Merge pie-platform-release to aosp-master - DO NOT MERGE

Change-Id: I8aed6ae19ef7a6a5b744aa6f5a4e6ca9de2e8eb4
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..cceb80d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,30 @@
+# Maven build folders
+target/
+
+# IntelliJ project files
+*.iml
+*.ipr
+*.iws
+.idea/
+
+# Eclipse project files
+.project
+.classpath
+.settings/
+
+# NetBeans user configuration
+nbactions.xml
+nb-configuration.xml
+
+# Python runtime files
+*.py[co]
+
+# ANTLR C# target build folders
+/runtime/CSharp3/Sources/Antlr3.Runtime/bin/
+/runtime/CSharp3/Sources/Antlr3.Runtime/obj/
+/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/bin/
+/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/obj/
+/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/bin/
+/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/obj/
+/runtime/CSharp3/Sources/Antlr3.Runtime.Test/bin/
+/runtime/CSharp3/Sources/Antlr3.Runtime.Test/obj/
diff --git a/Android.bp b/Android.bp
index 104331d..c51b357 100644
--- a/Android.bp
+++ b/Android.bp
@@ -14,7 +14,10 @@
 
 java_library_host {
     name: "antlr-runtime",
-    srcs: ["antlr-3.4/runtime/Java/src/main/java/**/*.java"],
+    srcs: ["runtime/Java/src/main/java/**/*.java"],
     //Remove DOTTreeGenerator.java, so that we don't have the StringTemplate library as a dependency
-    exclude_srcs: ["antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/DOTTreeGenerator.java"],
+    exclude_srcs: ["runtime/Java/src/main/java/org/antlr/runtime/tree/DOTTreeGenerator.java"],
+    errorprone: {
+        javacflags: ["-Xep:MissingOverride:OFF"],  // b/73499927
+    },
 }
diff --git a/BUILD.txt b/BUILD.txt
new file mode 100644
index 0000000..d0d1b16
--- /dev/null
+++ b/BUILD.txt
@@ -0,0 +1,6 @@
+We have moved instructions for building ANTLR with the maven build system to:
+
+http://www.antlr.org/wiki/display/ANTLR3/Building+ANTLR+with+Maven
+
+The notes are by Jim Idle (and are a bit out of date but we hope to
+update them).
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..a19780f
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,18 @@
+name: "antlr3"
+description: "ANTLR (ANother Tool for Language Recognition) is a language tool that provides a framework for constructing recognizers, interpreters, compilers, and translators from grammatical descriptions containing actions in a variety of target languages."
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://github.com/antlr/antlr3"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://github.com/antlr/antlr3/archive/3.5.2.zip"
+  }
+  version: "3.5.2"
+  last_upgrade_date {
+    year: 2018
+    month: 8
+    day: 28
+  }
+}
diff --git a/README.txt b/README.txt
new file mode 100644
index 0000000..c2fa6c4
--- /dev/null
+++ b/README.txt
@@ -0,0 +1,142 @@
+ANTLR v3.5
+January 4, 2013
+
+Terence Parr, parrt at cs usfca edu
+ANTLR project lead and supreme dictator for life
+University of San Francisco
+
+INTRODUCTION
+
+Welcome to ANTLR v3!  ANTLR (ANother Tool for Language Recognition) is
+a language tool that provides a framework for constructing
+recognizers, interpreters, compilers, and translators from grammatical
+descriptions containing actions in a variety of target
+languages. ANTLR provides excellent support for tree construction,
+tree walking, translation, error recovery, and error reporting. I've
+been working on parser generators for 25 years and on this particular
+version of ANTLR for 9 years.
+
+You should use v3 in conjunction with ANTLRWorks:
+
+    http://www.antlr.org/works/index.html
+
+and gUnit (grammar unit testing tool included in distribution):
+
+    http://www.antlr.org/wiki/display/ANTLR3/gUnit+-+Grammar+Unit+Testing
+
+The book will also help you a great deal (printed May 15, 2007); you
+can also buy the PDF:
+
+    http://www.pragmaticprogrammer.com/titles/tpantlr/index.html
+
+2nd book, Language Implementation Patterns:
+
+    http://pragprog.com/titles/tpdsl/language-implementation-patterns
+
+See the getting started document:
+
+    http://www.antlr.org/wiki/display/ANTLR3/FAQ+-+Getting+Started
+
+You also have the examples plus the source to guide you.
+
+See the wiki FAQ:
+
+    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ
+
+and general doc root:
+
+    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+3+Wiki+Home
+
+Please help add/update FAQ entries.
+
+If all else fails, you can buy support or ask the antlr-interest list:
+
+    http://www.antlr.org/support.html
+
+Per the license in LICENSE.txt, this software is not guaranteed to
+work and might even destroy all life on this planet:
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+
+EXAMPLES
+
+ANTLR v3 sample grammars:
+
+    https://github.com/antlr/examples-v3
+
+Examples from Language Implementation Patterns:
+
+    http://www.pragprog.com/titles/tpdsl/source_code
+
+----------------------------------------------------------------------
+
+What is ANTLR?
+
+ANTLR stands for (AN)other (T)ool for (L)anguage (R)ecognition
+and generates LL(*) recursive-descent parsers. ANTLR is a language tool
+that provides a framework for constructing recognizers, compilers, and
+translators from grammatical descriptions containing actions.
+Target language list:
+
+http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
+
+----------------------------------------------------------------------
+
+How is ANTLR v3 different than ANTLR v2?
+
+See "What is the difference between ANTLR v2 and v3?"
+
+    http://www.antlr.org/wiki/pages/viewpage.action?pageId=719
+
+See migration guide:
+
+    http://www.antlr.org/wiki/display/ANTLR3/Migrating+from+ANTLR+2+to+ANTLR+3
+
+----------------------------------------------------------------------
+
+How do I install this damn thing?
+
+You will have grabbed either of these:
+
+	http://antlr.org/download/antlr-3.5-complete-no-st3.jar
+	http://antlr.org/download/antlr-3.5-complete.jar
+
+It has all of the jars you need combined into one. Then you need to
+add antlr-3.5-complete.jar to your CLASSPATH or add to arg list; e.g., on unix:
+
+$ java -cp "/usr/local/lib/antlr-3.5-complete.jar:$CLASSPATH" org.antlr.Tool Test.g
+
+Source + java binaries: Just untar antlr-3.5.tar.gz and you'll get:
+
+antlr-3.5/BUILD.txt
+antlr-3.5/antlr3-maven-plugin
+antlr-3.5/antlrjar.xml
+antlr-3.5/antlrsources.xml
+antlr-3.5/gunit
+antlr-3.5/gunit-maven-plugin
+antlr-3.5/pom.xml
+antlr-3.5/runtime
+antlr-3.5/tool
+antlr-3.5/lib
+
+Please see the FAQ
+
+    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ
+
+-------------------------
+
+How can I contribute to ANTLR v3?
+
+http://www.antlr.org/wiki/pages/viewpage.action?pageId=33947666
diff --git a/README.version b/README.version
deleted file mode 100644
index 7270f7b..0000000
--- a/README.version
+++ /dev/null
@@ -1,3 +0,0 @@
-URL: https://github.com/antlr/website-antlr3/raw/gh-pages/download/antlr-3.4.tar.gz
-Version: 3.4
-BugComponent: 99142
diff --git a/antlr-3.4/BUILD.txt b/antlr-3.4/BUILD.txt
deleted file mode 100644
index f6e41c3..0000000
--- a/antlr-3.4/BUILD.txt
+++ /dev/null
@@ -1,493 +0,0 @@
- [The "BSD license"]
- Copyright (c) 2010 Terence Parr
- Maven Plugin - Copyright (c) 2009      Jim Idle
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-============================================================================
-
-This file contains the build instructions for the ANTLR toolset as
-of version 3.1.3 and beyond.
-
-The ANTLR toolset must be built using the Maven build system as
-this build system updates the version numbers and controls the
-whole build process. However, if you just want the latest build
-and do not care to learn anything about Maven, then visit the 'target'
-directories (for jars) under the depot mirror root here:
-
-   http://antlr.org/depot
-
-If you are looking for the latest released version of ANTLR, then
-visit the downloads page on the main antlr.org website.
-
-These instructions are mainly for the ANTLR development team,
-though you are free to build ANTLR yourself of course.
-
-Source code Structure
------------------------
-
-The main development branch of ANTLR is stored within the Perforce SCM at:
-
-   //depot/code/antlr/main/...
-
-release branches are stored in Perforce like so:
-
-   //depot/code/antlr/release-3.1.3/...
-
-In this top level directory, you will find a master build file for
-Maven called pom.xml and you will also note that there are a number of
-subdirectories:
-
- tool                  - The ANTLR tool itself
- runtime/Java          - The ANTLR Java runtime
- runtime/X             - The runtime for language target X
- gunit                 - The grammar test tool
- antlr3-maven-plugin   - The plugin tool for Maven allowing Maven
- 		          projects to process ANTLR grammars.
-
-Each of these sub-directories also contains a file pom.xml that
-controls the build of each sub-component (or module in Maven
-parlance).
-
-Build Parameters
------------------
-
-Alongside each pom.xml (other than for the antlr3-maven-plugin), you
-will see that there is a file called antlr.config. This file is called
-a filter and should contain a set of key/value pairs in the same
-manner as Java properties files:
-
-antlr.something="Some config thang!"
-
-When the build of any component happens, any values in the
-antlr.config for the master build file and any values in the
-antlr.config file for each component are made available to the
-build. This is mainly used by the resource processor, which will
-filter any file it finds under: src/main/resources/** and replace any
-references such as ${antlr.something} with the actual value at the
-time of the build.
-
-Building
---------
-
-Building ANTLR is trivial, assuming that you have loaded Maven version
-3.0.3 or better on to your build system and installed it as explained
-here:
-
-http://maven.apache.org/download.html
-
-Note that the ANTLR toolset will ONLY build with version 3.0.3 of Maven
-as of release 3.4.
-
-If you are unfamiliar with Maven (and even if you are), the best
-resource for learning about it is The Definitive Guide:
-
-http://www.sonatype.com/books/maven-book/reference/public-book.html
-
-The instructions here assume that Maven is installed and working correctly.
-
-If this is the first time you have built the ANTLR toolset, you will
-possibly need to install the master pom in your local repository
-(however the build may be able to locate this in the ANTLR snapshot or
-release repository). If you try to build sub-modules on their own (as
-in run the mvn command in the sub directory for that tool, such as
-runtime/Java), and you receive a message that maven cannot find the
-master pom, then execute this in the main (or release) directory:
-
-mvn -N install
-
-This command will install the master build pom in your local maven
-repository (it's ~/.m2 on UNIX) and individual builds of sub-modules
-will now work correctly.
-
-To build then, simply cd into the master build directory
-(e.g. $P4ROOT//code/antlr/main) and type:
-
-mvn -Dmaven.test.skip=true
-
-Assuming that everything is correctly installed and synchronized, then
-ANTLR will build and skip any unit tests in the modules (the ANTLR
-tool tests can take a long time).
-
-This command will build each of the tools in the correct order and
-will create the jar artifacts of all the components in your local
-development Maven repository (which takes precedence over remote
-repositories by default). At the end of the build you should see:
-
-[INFO] ------------------------------------------------------------------------
-[INFO] Reactor Summary:
-[INFO] ------------------------------------------------------------------------
-[INFO] ANTLR Master build control POM ........................ SUCCESS [1.373s]
-[INFO] Antlr 3 Runtime ....................................... SUCCESS [0.879s]
-[INFO] ANTLR Grammar Tool .................................... SUCCESS [5.431s]
-[INFO] Maven plugin for ANTLR V3 ............................. SUCCESS [1.277s]
-[INFO] ANTLR gUnit ........................................... SUCCESS [1.566s]
-[INFO] Maven plugin for gUnit ANTLR V3 ....................... SUCCESS [0.079s]
-[INFO] ------------------------------------------------------------------------
-[INFO] ------------------------------------------------------------------------
-[INFO] BUILD SUCCESSFUL
-[INFO] ------------------------------------------------------------------------
-[INFO] Total time: 11 seconds
-
-However, unless you are using Maven exclusively in your projects, you
-will most likely want to build the ANTLR Uber Jar, which is an
-executable jar containing all the components that ANTLR needs to build
-and run parsers (note that at runtime, you need only the runtime
-components you use, such as the Java runtime and say stringtemplate).
-
-Because the Uber jar is not something we want to deploy to Maven
-repositories it is built with a special invocation of Maven:
-
-mvn -Dmaven.test.skip=true package assembly:assembly
-
-Note that Maven will appear to build everything twice, which is a
-quirk of how it calculates the dependencies and makes sure it has
-everything packaged up so it can build the uber-jar assembly.
-
-Somewhere in the build output (towards the end), you will find a line
-like this:
-
-[INFO] Building jar: /home/jimi/antlrsrc/code/antlr/main/target/antlr-master-3.4-SNAPSHOT-completejar.jar
-
-This is the executable jar that you need and you can either copy it
-somewhere or, like me, you can create this script (assuming UNIX)
-somewhere in your PATH:
-
-#! /bin/bash
-java -jar ~/antlrsrc/code/antlr/main/target/antlr-master-3.4-SNAPSHOT-completejar.jar $*
-
-Version Numbering
--------------------
-
-The first and Golden rule is that any pom files stored under the main
-branch of the toolset should never be modified to contain a release
-version number. They should always contain a.b.c-SNAPSHOT
-(e.g. 3.1.3-SNAPSHOT). Only release branches should have their pom
-version numbers set to a release version. You can release as many
-SNAPSHOTS as you like, but only one release version. However, release
-versions may be updated with a patch level: 3.1.3-1, 3.1.3-2 and so
-on.
-
-Fortunately, Maven helps us with the version numbering in a number of
-ways. Firstly, the pom.xml files for the various modules do not
-specify a version of the artifacts themselves. They pick up their
-version number from the master build pom.  However, there is a catch,
-because they need to know what version of the parent pom they inherit
-from and so they DO mention the version number. However, this does
-prevent accidentally releasing different versions of sub-modules than
-the master pom describes.
-
-Fortunately once again, Maven has a neat way of helping us change the
-version.  All you need do is check out all the pom.xml files from
-perforce, then modify the <version>a.b.c-SNAPSHOT</version> in the
-master pom. When the version number is correct in the master pom, you
-make sure your working directory is the location of the master pom and
-type:
-
-mvn versions:update-child-modules
-
-This command will then update the child pom.xml files to reflect the
-version number defined in the master pom.xml.
-
-There is unfortunately one last catch here though and that is that the
-antlr3-maven-plugin and the gunit-maven-plugin are not able to use the
-parent pom. The reason for this is subtle but makes sense as doing so
-would create a circular dependency between the ANTLR tool (which uses
-the plugin to build its own grammar files), and the plugins (which
-uses the tool to build grammar files and gunit to test).
-
-This catch-22 situation means that the pom.xml file in the
-antlr3-maven-plugin directory and the one in the gunit-maven-plugin
-directory MUST be updated manually (or we must write a script to do
-this).
-
-Finally, we need to remember that because the tool is dependent on the
-antlr3-maven-plugin and the plugin is itself dependent on the
-tool, that we must manually update the versions of each that they
-reference. So, when we bump the version of the toolset to say
-3.1.4-SNAPSHOT, we need to change the antlr3-maven-plugin pom.xml and
-the gunit-maven-plugin pom.xml to reference that version of the antlr
-tool. The tool itself is always built with the prior released version
-of the plugin, so when we release we must change the main branch of
-the plugin to use the newly released version of the plugin. This is
-covered in the release checklist.
-
-Deploying
-----------
-
-Deploying the tools at the current version is relatively easy, but to
-deploy to the ANTLR repositories (snapshot or release) you must have
-been granted access to the Sonatype OSS repositories' ANTLR login. 
-Few people will have this access of course.
-
-Next, because we do not publish access information for antlr.org, you
-will need to configure the repository server names locally. You do
-this by creating (or adding to) the file:
-
-~/.m2/settings.xml
-
-Which should look like this:
-
-<?xml version="1.0" encoding="UTF-8"?>
-<settings>
-  <servers>
-    <server>
-      <id>sonatype-nexus-snapshots</id>
-      <username>xxxxxxx</username>
-      <password>xxxxxxx</password>
-    </server>
-    <server>
-      <id>sonatype-nexus-staging</id>
-      <username>xxxxxxx</username>
-      <password>xxxxxxx</password>
-    </server>
-  </servers>
-</settings>
-
-When this configuration is in place, you will be able to deploy the components,
-either individually or from the master directory:
-
-mvn -Dmaven.test.skip=true -Ddeplot deploy
-
-You will then see lots of information about checking existing version
-information and so on, and the components will be deployed once you
-supply the ANTLR public key passphrase to sign the jars.
-
-Note that so long as the artifacts are versioned with a.b.c-SNAPSHOT
-then deployment will always be to the development snapshot
-directory. When the artifacts are versioned with a release version
-then deployment will be to the release stahinh repository, which
-will then be mirrored around the world if closed and release.
-The sonatype documentation should be consulted.
-
-Release Checklist
-------------------
-
-Here is the procedure to use to make a release of ANTLR. Note that we
-should really use the mvn release:release command, but the perforce
-plugin for Maven is not commercial quality and I want to rewrite it.
-
-For this checklist, let's assume that the current development version
-of ANTLR is 3.1.3-SNAPSHOT. This means that it will probably (but not
-necessarily) become release version 3.1.3 and that the development
-version will bump to 3.1.4-SNAPSHOT.
-
-0) Run a build of the main branch and check that it is builds and
-   passes as many tests as you want it to.
-
-1) First make a branch from main into the target release
-   directory. Then submit this to perforce. You could change versions
-   numbers before submitting, but doing that in separate stages will
-   keep things sane;
-
---- Use main development branch from here ---
-
-2) Before we deploy the release, we want to update the versions of the
-   development branch, so we don't deploy what is now the new release
-   as an older snapshot (this is not super important, but procedure is
-   good right?).
-
-   Check out all the pom.xml files (and if you are using any
-   antlr.config parameters that must change, then do that too).
-
-3) Edit the master pom.xml in the main directory and change the version from
-   3.1.3-SNAPSHOT to 3.1.4-SNAPSHOT.
-
-4) Edit the pom.xml file for antlr3-maven-plugin under the main
-   directory and change the version from 3.1.3-SNAPSHOT to
-   3.1.4-SNAPSHOT. Do the same for the pom.xml in the
-   gunit-maven-plugin directory.
-
-   Update the pom.xml for the archetype manually too.
-
-5) Now (from the main directory), run the command:
-
-         mvn versions:update-child-modules
-
-      You should see:
-
-         [INFO] [versions:update-child-modules]
-         [INFO] Module: gunit
-         [INFO]   Parent is org.antlr:antlr-master:3.1.4-SNAPSHOT
-         [INFO] Module: runtime/Java
-         [INFO]   Parent is org.antlr:antlr-master:3.1.4-SNAPSHOT
-         [INFO] Module: tool
-         [INFO]   Parent is org.antlr:antlr-master:3.1.4-SNAPSHOT
-
-6) Run a build of the main branch:
-
-         mvn -Dmaven.test.skip=true
-
-       All should be good.
-
-7) Submit the pom changes of the main branch to perforce.
-
-8) Deploy the new snapshot as a placeholder for the next release. It
-   will go to the snapshot repository of course:
-
-	  mvn -N deploy
-          mvn -Dmaven.test.skip=true deploy
-
-9) You are now finished with the main development branch and should change
-   working directories to the release branch you made earlier.
-
---- Use release branch from here ---
-
-10) Check out all the pom.xml files in the release branch (and if you are
-    using any antlr.config parameters that must change, then do that too).
-
-11) Edit the master pom.xml in the release-3.1.3 directory and change
-    the version from 3.1.3-SNAPSHOT to 3.1.3.
-
-12) Edit the pom.xml file for antlr3-maven-plugin under the
-    release-3.1.3 directory and change the version from 3.1.3-SNAPSHOT
-    to 3.1.3. Also change the version of the tool that the this
-    pom.xml references from 3.1.3-SNAPSHOT to 3.1.3 as we are now
-    releasing the plugin of course and it needs to reference the
-    version we are about to release. You will find this reference in
-    the dependencies section of the antlr3-maven-plugin pom.xml. Also
-    change the version references in the pom for gunit-maven-plugin.
-
-13)  Now (from the release-3.1.3 directory), run the command:
-
-           mvn versions:update-child-modules
-
-        You should see:
-
-	[INFO] [versions:update-child-modules]
-	[INFO] Module: gunit
-	[INFO]   Parent was org.antlr:antlr-master:3.1.3-SNAPSHOT,
-	       now org.antlr:antlr-master:3.1.3
-	[INFO] Module: runtime/Java
-	[INFO]   Parent was org.antlr:antlr-master:3.1.3-SNAPSHOT,
-	       now org.antlr:antlr-master:3.1.3
-	[INFO] Module: tool
-	[INFO]   Parent was org.antlr:antlr-master:3.1.3-SNAPSHOT,
-	       now org.antlr:antlr-master:3.1.3
-
-14)  Run a build of the release-3.1.3 branch:
-
-           mvn   # Note I am letting unit tests run here!
-
-        All should be good, or as good as it gets ;-)
-
-15)  Submit the pom changes of the release-3.1.3 branch to perforce.
-
-16)  Deploy the new release (this is it guys, make sure you are happy):
-
-	  mvn -N deploy
-          mvn -Dmaven.test.skip=true deploy
-
-        Note that we must skip the tests as Maven will not let you
-        deploy releases that fail any junit tests.
-
-17) The final step is that we must update the main branch pom.xml for
-     the tool to reference the newly release version of the
-     antlr3-maven-plugin. This is because each release of ANTLR is
-     built with the prior release of ANTLR, and we have just released
-     a new version. Edit the pom.xml for the tool (main/tool/pom.xml)
-     under the main (that's the MAIN branch, not the release branch)
-     and find the dependency reference to the antlr plugin. If you
-     just released say 3.1.3, then the tool should now reference
-     version 3.1.3 of the plugin. Having done this, you should
-     probably rebuild the main branch and let it run the junit
-     tests. Later, I will automate this dependency update as mvn can
-     do this for us.
-
-18)  Having deployed the release to maven, you will want to create the
-     uber jar for the new release, to make it downloadable from the
-     antlr.org website. This is a repeat of the earlier described step
-     to build the uber jar:
-
-       mvn -Dmaven.test.skip=true package assembly:assembly
-
-     MAven will produce the uber jar in the target directory:
-
-	antlr-master-3.1.3-completejar.jar
-
-     And this is the complete jar that can be downloaded from the web site. You
-     may wish to produce an md5 checksum to go with the jar:
-
-     md5sum target/antlr-master-3.1.3-completejar.jar
-     xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  target/antlr-master-3.1.4-SNAPSHOT-completejar.jar
-
-     The command you just ran will also produce a second jar:
-
-        antlr-master-3.1.3-src.jar
-
-     This is the source code for everythign you just deployed and can
-     be unjarred and built from scratch using the very procedures
-     described here, which means you will now be reading this
-     BUILD.txt file for ever.
-
-19)  Reward anyone around you with good beer.
-
-
-Miscellany
------------
-
-It was a little tricky to get all the interdependencies correct
-because ANTLR builds itself using itself and the maven plugin
-references the ANTLR Tool as well. Hence the maven tool is not a child
-project of the master pom.xml file, even though it is built by it.
-
-An observant person will not that when the assembly:assembly phase is
-run, that it invokes the build of the ANTLR tool using the version of
-the Maven plugin that it has just built, and this results in the
-plugin using the version of ANTLR tool that it has just built. This is
-safe because everything will already be up to date and so we package
-up the version of the tool that we expect, but the Maven plugin we
-deploy will use the correct version of ANTLR, even though there is
-technically a circular dependency.
-
-The master pom.xml does give us a way to cause the build of the ANTLR
-tool to use itself to build itself. This is because in
-dependencyManagement in the master pom.xml, we can reference the
-current version of the Tool and the Maven plugin, even though in the
-pom.xml for the tool itself refers to the previous version of the
-plugin.
-
-What happens is that if we first cd into the tool and maven
-directories and build ANTLR, it will build itself with the prior
-version and this will deploy locally (.m2). We can then clean build
-from the master pom and when ANTLR asks for the prior version of the
-tool, the master pom.xml will override it and build with the interim
-versions we just built manually.
-
-However, strictly speaking, we need a third build where we rebuild the
-tool again with the version of the tool that was built with itself and
-not deploy the version that was built by the version of itself that
-was built by a prior version of itself. I decided that this was not
-particularly useful and complicates things too much. Building with a
-prior version of the tool is fine and if there was ever a need to, we
-could release twice in quick succession.
-
-I have occasionally seen the MAven reactor screw up (or perhaps it is
-the ANTLR tool) when building. If this happens you will see an ANTLR
-Panic - cannot find en.stg message. If this happens to you, then just
-rerun the build and it will eventually work.
-
-Jim Idle - March 2009
-
diff --git a/antlr-3.4/antlr.config b/antlr-3.4/antlr.config
deleted file mode 100644
index 00ac54e..0000000
--- a/antlr-3.4/antlr.config
+++ /dev/null
@@ -1 +0,0 @@
-fred=99
diff --git a/antlr-3.4/antlr3-maven-archetype/pom.xml b/antlr-3.4/antlr3-maven-archetype/pom.xml
deleted file mode 100644
index 6de349c..0000000
--- a/antlr-3.4/antlr3-maven-archetype/pom.xml
+++ /dev/null
@@ -1,132 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-
-    
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>org.antlr</groupId>
-    <artifactId>antlr3-maven-archetype</artifactId>
-    <version>3.4</version>
-    <packaging>maven-archetype</packaging>
-    <name>ANTLR3 Maven Archetype 3.4</name>
-    
-    <properties>
-        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    </properties>
-    
-    <!--
-
-    Inherit from the ANTLR master pom, which tells us what
-    version we are and allows us to inherit dependencies
-    and so on.
-
-    Unfortunately, because of a bug in the archetype plugin
-    we cannot use the parent pom because it causes the
-    artifactId in the generated pom to be set to antlr3-maven-archetype
-    We will reinstate this parent usage when that is fixed.
-    
-    <parent>
-        <groupId>org.antlr</groupId>
-        <artifactId>antlr-master</artifactId>
-        <version>3.4</version>
-    </parent>
-    -->
-    <!--
-        The ANTLR Maven artifacts are now released via the Sonotype OSS
-        repository, which means that they are synced to Maven central 
-        within a few minutes of hitting the release repo for Sonotype.
-        To enable this, we inherit from the Sonotype provided parent
-        pom. However, we must also configure our .m2/settings.xml to include
-        the snapshot and staging server and the sonotype password. This 
-        means that only ANTLR developers can released the artifacts, but
-        anyone can build locally.
-      -->
-    <parent>
-        <groupId>org.sonatype.oss</groupId>
-        <artifactId>oss-parent</artifactId>
-        <version>7</version>
-    </parent> 
-    
-    <profiles>
-        <profile>
-            <id>release-sign-artifacts</id>
-            <activation>
-                <property>
-                    <name>deploy</name>
-                    <value>true</value>
-                </property>
-            </activation>
-            <build>
-                <plugins>
-                    <plugin>
-                        <groupId>org.apache.maven.plugins</groupId>
-                        <artifactId>maven-gpg-plugin</artifactId>
-                        <version>1.3</version>
-                        <executions>
-                            <execution>
-                                <id>sign-artifacts</id>
-                                <phase>verify</phase>
-                                <goals>
-                                   <goal>sign</goal>
-                                </goals>
-                            </execution>
-                        </executions>
-                    </plugin>
-                </plugins>
-            </build>
-        </profile>
-    </profiles>
-    
-  <build>
-
-    <extensions>
-
-      <extension>
-        <groupId>org.apache.maven.archetype</groupId>
-        <artifactId>archetype-packaging</artifactId>
-        <version>2.0</version>
-      </extension>
-
-    </extensions>
-
-        <plugins>
-
-            <plugin>
-                <artifactId>maven-archetype-plugin</artifactId>
-                <version>2.0</version>
-                <extensions>true</extensions>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-source-plugin</artifactId>
-                <version>2.1.2</version>
-                <executions>
-                    <execution>
-                        <id>attach-sources</id>
-                        <goals>
-                            <goal>jar</goal>
-                        </goals>
-                    </execution>
-                </executions>
-            </plugin>
-			
-            <plugin>
-				<groupId>org.apache.maven.plugins</groupId>
-				<artifactId>maven-javadoc-plugin</artifactId>
-                <version>2.8</version>
-				<executions>
-					<execution>
-						<id>attach-javadocs</id>
-						<goals>
-							<goal>jar</goal>
-						</goals>
-					</execution>
-				</executions>
-			</plugin>
-            
-        </plugins>
-
-  </build>
-
-</project>
diff --git a/antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/pom.xml b/antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/pom.xml
deleted file mode 100644
index e8db114..0000000
--- a/antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/pom.xml
+++ /dev/null
@@ -1,182 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <!-- =======================================================================
-         A quickstart pom.xml that creates a sample project that uses ANTLR 3.x
-         grammars. You should replace the sample grammars in src/main/antlr3
-         with your own grammar files and use packages.
-
-         A .g file in
-          
-            src/main/antlr3/com/temporalwave
-
-          belongs in the package
-          
-            com.temporalwave
-
-         See http://antlr.org/antlr3-maven-plugin for more details.
-
-         This project produces both a jar file of the project and an executeable
-         jar file that contains all the dependencies so you can run it standalone.
-         See below for more details.
-         
-         Archetype by Jim Idle (jimi@temporal-wave.com) - Oct 2009
-         Report bugs to the ANTLR interest list at http://www.antlr.org
-
-         Generated by antlr3-maven-archetype version 3.4
-         =======================================================================
-      -->
-
-    <!-- This is your organizations normal group name
-         such as org.antlr
-         All the artifacts you create will be under this
-         group id.
-      -->
-    <groupId>${groupId}</groupId>
-
-    <!-- This is how maven knows your artifact
-      -->
-    <artifactId>${artifactId}</artifactId>
-
-    <!-- This is the human oriented name for the package
-         so you can call it anything you like
-      -->
-    <name>ANTLR3 project: ${package}</name>
-
-    <!-- This is the version of YOUR project -->
-    <version>${version}</version>
-
-    <packaging>jar</packaging>
-    <url>http://antlr.org</url>
-
-    <dependencies>
-
-        <!--
-          We need to have the ANTLR runtime jar when running and compiling.
-        -->
-        <dependency>
-            <groupId>org.antlr</groupId>
-            <artifactId>antlr-runtime</artifactId>
-            <version>3.4</version>
-            <scope>compile</scope>
-        </dependency>
-
-    </dependencies>
-
-  <!--
-
-    Tell Maven which other artifacts we need in order to
-    build with the ANTLR Tool. Here we also make the default
-    goal be install so that you can just type mvn at the command
-    line instead of mvn install. And we add the java compiler plugin
-    for convenience to show how you can use 1.6 source files but
-    generate 1.4 compatible .class files (as few people seem to
-    know about the jsr14 target).
-    -->
-    <build>
-
-        <defaultGoal>install</defaultGoal>
-
-        <plugins>
-
-            <plugin>
-
-                <groupId>org.antlr</groupId>
-                <artifactId>antlr3-maven-plugin</artifactId>
-                <version>3.4</version>
-                <executions>
-                    <execution>
-                        <goals>
-                            <goal>antlr</goal>
-                        </goals>
-                    </execution>
-                </executions>
-
-            </plugin>
-
-            <!--
-              Strictly speaking, we did not need to generate this for you from
-              the prototype, but we use it to illustrate how you can get
-              the JDK 6 Java compiler to accept 1.5 or 1.6 targeted source code
-              but produce class files that are compatible with JRE 1.4. As
-              Michael Caine might not say, "Not a lot of people know that!"
-              -->
-            <plugin>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>2.0.2</version>
-                <configuration>
-                    <source>1.6</source>
-                    <target>jsr14</target>
-                    <sourceDirectory>src</sourceDirectory>
-                </configuration>
-            </plugin>
-
-            <plugin>
-                
-                <!--
-
-                    Build an uber-jar that is packaged with all the other dependencies,
-                    such as the antlr-runtime and so on. This will be useful
-                    for developers, who then do not need to download anything else or
-                    remember that they need antlr.jar in their CLASSPATH and so
-                    on.
-
-                    You can delete this plugin of course and you will then
-                    get a jar file with only the code generated and included
-                    directly in this project. With this plugin though you will
-                    find that when you build with:
-
-                       mvn install
-
-                    There will be an executable jar generated. You can run this
-                    as:
-
-                      java -jar ${artifactId}-${version}-jar-with-dependencies.jar demosource.dmo
-
-                    assuming you have a file called demosource.dmo to attempt a parse.
-
-                  -->
-                <artifactId>maven-assembly-plugin</artifactId>
-
-                <configuration>
-                    <descriptorRefs>
-                        <descriptorRef>jar-with-dependencies</descriptorRef>
-                    </descriptorRefs>
-                    <!--
-
-                        Specify that we want the resulting jar to be executable
-                        via java -jar, which we do by modifying the manifest
-                        of course.
-                      -->
-                    <archive>
-
-                        <manifest>
-                            <mainClass>${package}.Main</mainClass>
-                        </manifest>
-                    </archive>
-
-                </configuration>
-
-                <!--
-
-                    We don't want to have to specifically ask for the uber jar, so we attach the
-                    running of this plugin to the execution of the package life-cycle
-                    phase.
-                  -->
-                <executions>
-                    <execution>
-                        <id>make-assembly</id>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>attached</goal>
-                        </goals>
-                    </execution>
-                </executions>
-
-            </plugin>
-
-        </plugins>
-    </build>
-
-</project>
diff --git a/antlr-3.4/antlr3-maven-plugin/pom.xml b/antlr-3.4/antlr3-maven-plugin/pom.xml
deleted file mode 100644
index 58dc982..0000000
--- a/antlr-3.4/antlr3-maven-plugin/pom.xml
+++ /dev/null
@@ -1,361 +0,0 @@
-<!--
-
- [The "BSD license"]
-
- ANTLR        - Copyright (c) 2005-2010 Terence Parr
- Maven Plugin - Copyright (c) 2009      Jim Idle
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-  -->
-
-
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-
-    <!--
-        The ANTLR Maven artifacts are now released via the Sonotype OSS
-        repository, which means that they are synced to Maven central 
-        within a few minutes of hitting the release repo for Sonotype.
-        To enable this, we inherit from the Sonotype provided parent
-        pom. However, we must also configure our .m2/settings.xml to include
-        the snapshot and staging server and the sonotype password. This 
-        means that only ANTLR developers can released the artifacts, but
-        anyone can build locally.
-      -->
-    <parent>
-        <groupId>org.sonatype.oss</groupId>
-        <artifactId>oss-parent</artifactId>
-        <version>7</version>
-    </parent>  
-    
-    <!-- Maven model we are inheriting from
-      -->
-    <modelVersion>4.0.0</modelVersion>
-
-    <!--
-
-     Now that the ANTLR project has adopted Maven with a vengence,
-     all ANTLR tools will be grouped under org.antlr and will be
-     controlled by a project member.
-     -->
-    <groupId>org.antlr</groupId>
-
-
-    <!--
-
-     This is the ANTLR plugin for ANTLR version 3.1.3 and above. It might
-     have been best to change the name of the plugin as the 3.1.2 plugins
-     behave a little differently, however for the sake of one transitional
-     phase to a much better plugin, it was decided that the name should
-     remain the same.
-      -->
-    <artifactId>antlr3-maven-plugin</artifactId>
-    <packaging>maven-plugin</packaging>
-
-    <!-- Note that as this plugin depends on the ANTLR tool itself
-         we cannot use the paren pom to control the version number
-         and MUST update <version> in this pom manually!
-         -->
-    <version>3.4</version>
-    <name>Maven plugin for ANTLR V3.4</name>
-    <prerequisites>
-        <maven>2.0</maven>
-    </prerequisites>
-
-    <!--
-     Where does our actual project live on the interwebs.
-      -->
-    <url>http://antlr.org</url>
-
-    <properties>
-        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    </properties>
-
-    <description>
-
-This is the brand new, re-written from scratch plugin for ANTLR v3.
-
-Previous valiant efforts all suffered from being unable to modify the ANTLR Tool
-itself to provide support not just for Maven oriented things but any other tool
-that might wish to invoke ANTLR without resorting to the command line interface.
-
-Rather than try to shoe-horn new code into the existing Mojo (in fact I think that
-by incorporating a patch supplied by someone I ended up with tow versions of the
-Mojo, I elected to rewrite everything from scratch, including the documentation, so
-that we might end up with a perfect Mojo that can do everything that ANTLR v3 supports
-such as imported grammar processing, proper support for library directories and
-locating token files from generated sources, and so on.
-
-In the end I decided to also change the the ANTLR Tool.java code so that it
-would be the provider of all the things that a build tool needs, rather than
-delegating things to 5 different tools. So, things like dependencies, dependency
-sorting, option tracking, generating sources and so on are all folded back
-in to ANTLR's Tool.java code, where they belong, and they now provide a
-public interface to anyone that might want to interface with them.
-
-One other goal of this rewrite was to completely document the whole thing
-to death. Hence even this pom has more comments than funcitonal elements,
-in case I get run over by a bus or fall off a cliff while skiing.
-
-Jim Idle - March 2009
-
-    </description>
-
-    <profiles>
-        <profile>
-            <id>release-sign-artifacts</id>
-            <activation>
-                <property>
-                    <name>deploy</name>
-                    <value>true</value>
-                </property>
-            </activation>
-            <build>
-                <plugins>
-                    <plugin>
-                        <groupId>org.apache.maven.plugins</groupId>
-                        <artifactId>maven-gpg-plugin</artifactId>
-                        <version>1.3</version>
-                        <executions>
-                            <execution>
-                                <id>sign-artifacts</id>
-                                <phase>verify</phase>
-                                <goals>
-                                   <goal>sign</goal>
-                                </goals>
-                            </execution>
-                        </executions>
-                    </plugin>
-                </plugins>
-            </build>
-        </profile>
-    </profiles>
-
-
-    <developers>
-
-        <developer>
-            <name>Jim Idle</name>
-            <url>http://www.temporal-wave.com</url>
-            <roles>
-                <role>Originator, version 3.1.3+</role>
-            </roles>
-        </developer>
-
-        <developer>
-            <name>Terence Parr</name>
-            <url>http://antlr.org/wiki/display/~admin/Home</url>
-            <roles>
-                <role>Project lead - ANTLR</role>
-            </roles>
-        </developer>
-
-        <developer>
-            <name>David Holroyd</name>
-            <url>http://david.holroyd.me.uk/</url>
-            <roles>
-                <role>Originator - prior version</role>
-            </roles>
-        </developer>
-
-        <developer>
-            <name>Kenny MacDermid</name>
-            <url>mailto:kenny "at" kmdconsulting.ca</url>
-            <roles>
-                <role>Contributor - prior versions</role>
-            </roles>
-        </developer>
-
-    </developers>
-
-    <!-- Where do we track bugs for this project?
-      -->
-    <issueManagement>
-        <system>JIRA</system>
-        <url>http://antlr.org/jira/browse/ANTLR</url>
-    </issueManagement>
-
-    <!-- Location of the license description for this project
-      -->
-    <licenses>
-        <license>
-            <distribution>repo</distribution>
-            <name>The BSD License</name>
-            <url>http://www.antlr.org/LICENSE.txt </url>
-        </license>
-    </licenses>
-    
-    <!-- Ancilliary information for completeness
-      -->
-    <inceptionYear>2009</inceptionYear>
-
-    <mailingLists>
-        <mailingList>
-            <archive>http://antlr.markmail.org/</archive>
-            <otherArchives>
-                <otherArchive>http://www.antlr.org/pipermail/antlr-interest/</otherArchive>
-            </otherArchives>
-            <name>ANTLR Users</name>
-            <subscribe>http://www.antlr.org/mailman/listinfo/antlr-interest/</subscribe>
-            <unsubscribe>http://www.antlr.org/mailman/options/antlr-interest/</unsubscribe>
-            <post>antlr-interest@antlr.org</post>
-        </mailingList>
-    </mailingLists>
-
-    <organization>
-        <name>ANTLR.org</name>
-        <url>http://www.antlr.org</url>
-    </organization>
-    <!-- ============================================================================= -->
-
-    <!--
-
-     What are we depedent on for the Mojos to execute? We need the
-     plugin API itself and of course we need the ANTLR Tool and runtime
-     and any of their dependencies, which we inherit. The Tool itself provides
-     us with all the dependencies, so we need only name it here.
-      -->
-    <dependencies>
-
-        <!--
-          The things we need to build the target language recognizer
-          -->
-        <dependency>
-            <groupId>org.apache.maven</groupId>
-            <artifactId>maven-plugin-api</artifactId>
-            <version>2.0</version>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.maven</groupId>
-            <artifactId>maven-project</artifactId>
-            <version>2.0</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.codehaus.plexus</groupId>
-            <artifactId>plexus-compiler-api</artifactId>
-            <version>1.5.3</version>
-        </dependency>
-
-        <!--
-         The version of ANTLR tool that this version of the plugin controls.
-         We have decided that this should be in lockstep with ANTLR itself, other
-         than -1 -2 -3 etc patch releases.
-          -->
-        <dependency>
-            <groupId>org.antlr</groupId>
-            <artifactId>antlr</artifactId>
-            <version>3.4</version>
-        </dependency>
-
-        <!--
-          Testing requirements...
-          -->
-        <dependency>
-
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-            <version>4.8.2</version>
-            <scope>test</scope>
-
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.maven.shared</groupId>
-            <artifactId>maven-plugin-testing-harness</artifactId>
-            <version>1.0</version>
-            <scope>test</scope>
-        </dependency>
-        
-    </dependencies>
-    
-    <build>
-
-        <defaultGoal>install</defaultGoal>
-
-        <plugins>
-
-            <plugin>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>2.0.2</version>
-                <configuration>
-                    <source>1.6</source>
-                    <target>jsr14</target>
-                </configuration>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-site-plugin</artifactId>
-                <version>2.0</version>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-project-info-reports-plugin</artifactId>
-                <version>2.1.1</version>
-                <configuration>
-                    <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
-                </configuration>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-source-plugin</artifactId>
-                <version>2.1.2</version>
-                <executions>
-                    <execution>
-                        <id>attach-sources</id>
-                        <goals>
-                            <goal>jar</goal>
-                        </goals>
-                    </execution>
-                </executions>
-            </plugin>
-
-			<plugin>
-				<groupId>org.apache.maven.plugins</groupId>
-				<artifactId>maven-javadoc-plugin</artifactId>
-                <version>2.8</version>
-				<executions>
-					<execution>
-						<id>attach-javadocs</id>
-						<goals>
-							<goal>jar</goal>
-						</goals>
-					</execution>
-				</executions>
-			</plugin>
-            
-        </plugins>
-
-    </build>
-
-</project>
diff --git a/antlr-3.4/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3ErrorLog.java b/antlr-3.4/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3ErrorLog.java
deleted file mode 100644
index bf2c3c6..0000000
--- a/antlr-3.4/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3ErrorLog.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- [The "BSD licence"]
-
- ANTLR        - Copyright (c) 2005-2008 Terence Parr
- Maven Plugin - Copyright (c) 2009      Jim Idle
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.mojo.antlr3;
-
-import org.antlr.tool.ANTLRErrorListener;
-import org.antlr.tool.Message;
-import org.antlr.tool.ToolMessage;
-import org.apache.maven.plugin.logging.Log;
-
-/**
- * The Maven plexus container gives us a Log logging provider
- * which we can use to install an error listener for the ANTLR
- * tool to report errors by.
- */
-public class Antlr3ErrorLog implements ANTLRErrorListener {
-
-    private Log log;
-
-    /**
-     * Instantiate an ANTLR ErrorListner that communicates any messages
-     * it receives to the Maven error sink.
-     *
-     * @param log The Maven Error Log
-     */
-    public Antlr3ErrorLog(Log log) {
-        this.log = log;
-    }
-
-    /**
-     * Sends an informational message to the Maven log sink.
-     * @param s The message to send to Maven
-     */
-    public void info(String message) {
-        log.info(message);
-    }
-
-    /**
-     * Sends an error message from ANTLR analysis to the Maven Log sink.
-     *
-     * @param message The message to send to Maven.
-     */
-    public void error(Message message) {
-        log.error(message.toString());
-    }
-
-    /**
-     * Sends a warning message to the Maven log sink.
-     *
-     * @param message
-     */
-    public void warning(Message message) {
-        log.warn(message.toString());
-    }
-
-    /**
-     * Sends an error message from the ANTLR tool to the Maven Log sink.
-     * @param toolMessage
-     */
-    public void error(ToolMessage toolMessage) {
-        log.error(toolMessage.toString());
-    }
-}
diff --git a/antlr-3.4/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3Mojo.java b/antlr-3.4/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3Mojo.java
deleted file mode 100644
index e7225d3..0000000
--- a/antlr-3.4/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3Mojo.java
+++ /dev/null
@@ -1,493 +0,0 @@
-/**
-[The "BSD licence"]
-
-ANTLR        - Copyright (c) 2005-2008 Terence Parr
-Maven Plugin - Copyright (c) 2009      Jim Idle
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-3. The name of the author may not be used to endorse or promote products
-derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/* ========================================================================
- * This is the definitive ANTLR3 Mojo set. All other sets are belong to us.
- */
-package org.antlr.mojo.antlr3;
-
-import org.apache.maven.plugin.AbstractMojo;
-import org.apache.maven.plugin.MojoExecutionException;
-import org.apache.maven.plugin.MojoFailureException;
-import org.apache.maven.project.MavenProject;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import org.antlr.Tool;
-import org.antlr.runtime.RecognitionException;
-import org.apache.maven.plugin.logging.Log;
-import org.codehaus.plexus.compiler.util.scan.InclusionScanException;
-import org.codehaus.plexus.compiler.util.scan.SimpleSourceInclusionScanner;
-import org.codehaus.plexus.compiler.util.scan.SourceInclusionScanner;
-import org.codehaus.plexus.compiler.util.scan.mapping.SourceMapping;
-import org.codehaus.plexus.compiler.util.scan.mapping.SuffixMapping;
-
-/**
- * Goal that picks up all the ANTLR grammars in a project and moves those that
- * are required for generation of the compilable sources into the location
- * that we use to compile them, such as target/generated-sources/antlr3 ...
- *
- * @goal antlr
- * 
- * @phase process-sources
- * @requiresDependencyResolution compile
- * @requiresProject true
- * 
- * @author <a href="mailto:jimi@temporal-wave.com">Jim Idle</a>
- */
-public class Antlr3Mojo
-        extends AbstractMojo {
-
-    // First, let's deal with the options that the ANTLR tool itself
-    // can be configured by.
-    //
-    /**
-     * If set to true, then after the tool has processed an input grammar file
-     * it will report various statistics about the parser, such as information
-     * on cyclic DFAs, which rules may use backtracking, and so on.
-     *
-     * @parameter default-value="false"
-     */
-    protected boolean report;
-    /**
-     * If set to true, then the ANTLR tool will print a version of the input
-     * grammar which is devoid of any actions that may be present in the input file.
-     *
-     * @parameter default-value="false"
-     */
-    protected boolean printGrammar;
-    /**
-     * If set to true, then the code generated by the ANTLR code generator will
-     * be set to debug mode. This means that when run, the code will 'hang' and
-     * wait for a debug connection on a TCP port (49100 by default).
-     *
-     * @parameter default-value="false"
-     */
-    protected boolean debug;
-    /**
-     * If set to true, then then the generated parser will compute and report on
-     * profile information at runtime.
-     *
-     * @parameter default-value="false"
-     */
-    protected boolean profile;
-    /**
-     * If set to true then the ANTLR tool will generate a description of the nfa
-     * for each rule in <a href="http://www.graphviz.org">Dot format</a>
-     * 
-     * @parameter default-value="false"
-     */
-    protected boolean nfa;
-    /**
-     * If set to true then the ANTLR tool will generate a description of the DFA
-     * for each decision in the grammar in <a href="http://www.graphviz.org">Dot format</a>
-     * 
-     * @parameter default-value="false"
-     */
-    protected boolean dfa;
-    /**
-     * If set to true, the generated parser code will log rule entry and exit points
-     * to stdout as an aid to debugging.
-     *
-     * @parameter default-value="false"
-     */
-    protected boolean trace;
-    /**
-     * If this parameter is set, it indicates that any warning or error messages returned
-     * by ANLTR, should be formatted in the specified way. Currently, ANTLR supports the
-     * built-in formats of antlr, gnu and vs2005.
-     *
-     * @parameter default-value="antlr"
-     */
-    protected String messageFormat;
-    /**
-     * If this parameter is set to true, then ANTLR will report all sorts of things
-     * about what it is doing such as the names of files and the version of ANTLR and so on.
-     *
-     * @parameter default-value="true"
-     */
-    protected boolean verbose;
-
-    /**
-     * The number of alts, beyond which ANTLR will not generate a switch statement
-     * for the DFA.
-     *
-     * @parameter default-value="300"
-     */
-    private int maxSwitchCaseLabels;
-
-    /**
-     * The number of alts, below which ANTLR will not choose to generate a switch
-     * statement over an if statement.
-     */
-    private int minSwitchAlts;
-
-    /* --------------------------------------------------------------------
-     * The following are Maven specific parameters, rather than specificlly
-     * options that the ANTLR tool can use.
-     */
-    /**
-     * Provides an explicit list of all the grammars that should
-     * be included in the generate phase of the plugin. Note that the plugin
-     * is smart enough to realize that imported grammars should be included but
-     * not acted upon directly by the ANTLR Tool.
-     *
-     * Unless otherwise specified, the include list scans for and includes all
-     * files that end in ".g" in any directory beneath src/main/antlr3. Note that
-     * this version of the plugin looks for the directory antlr3 and not the directory
-     * antlr, so as to avoid clashes and confusion for projects that use both v2 and v3 grammars
-     * such as ANTLR itself.
-     *
-     * @parameter
-     */
-    protected Set includes = new HashSet();
-    /**
-     * Provides an explicit list of any grammars that should be excluded from
-     * the generate phase of the plugin. Files listed here will not be sent for
-     * processing by the ANTLR tool.
-     *
-     * @parameter 
-     */
-    protected Set excludes = new HashSet();
-    /**
-     * @parameter expression="${project}"
-     * @required
-     * @readonly
-     */
-    protected MavenProject project;
-    /**
-     * Specifies the Antlr directory containing grammar files. For
-     * antlr version 3.x we default this to a directory in the tree
-     * called antlr3 because the antlr directory is occupied by version
-     * 2.x grammars.
-     *
-     * @parameter default-value="${basedir}/src/main/antlr3"
-     * @required
-     */
-    private File sourceDirectory;
-    /**
-     * Location for generated Java files. For antlr version 3.x we default
-     * this to a directory in the tree called antlr3 because the antlr
-     * directory is occupied by version 2.x grammars.
-     *
-     * @parameter default-value="${project.build.directory}/generated-sources/antlr3"
-     * @required
-     */
-    private File outputDirectory;
-    /**
-     * Location for imported token files, e.g. <code>.tokens</code> and imported grammars.
-     * Note that ANTLR will not try to process grammars that it finds to be imported
-     * into other grammars (in the same processing session).
-     *
-     * @parameter default-value="${basedir}/src/main/antlr3/imports"
-     */
-    private File libDirectory;
-
-    public File getSourceDirectory() {
-        return sourceDirectory;
-    }
-
-    public File getOutputDirectory() {
-        return outputDirectory;
-    }
-
-    public File getLibDirectory() {
-        return libDirectory;
-    }
-
-    void addSourceRoot(File outputDir) {
-        project.addCompileSourceRoot(outputDir.getPath());
-    }
-    /**
-     * An instance of the ANTLR tool build
-     */
-    protected Tool tool;
-
-    /**
-     * The main entry point for this Mojo, it is responsible for converting
-     * ANTLR 3.x grammars into the target language specified by the grammar.
-     * 
-     * @throws org.apache.maven.plugin.MojoExecutionException When something is discovered such as a missing source
-     * @throws org.apache.maven.plugin.MojoFailureException When something really bad happens such as not being able to create the ANTLR Tool
-     */
-    public void execute()
-            throws MojoExecutionException, MojoFailureException {
-
-        Log log = getLog();
-
-        // Check to see if the user asked for debug information, then dump all the
-        // parameters we have picked up if they did.
-        //
-        if (log.isDebugEnabled()) {
-
-            // Excludes
-            //
-            for (String e : (Set<String>) excludes) {
-
-                log.debug("ANTLR: Exclude: " + e);
-            }
-
-            // Includes
-            //
-            for (String e : (Set<String>) includes) {
-
-                log.debug("ANTLR: Include: " + e);
-            }
-
-            // Output location
-            //
-            log.debug("ANTLR: Output: " + outputDirectory);
-
-            // Library directory
-            //
-            log.debug("ANTLR: Library: " + libDirectory);
-
-            // Flags
-            //
-            log.debug("ANTLR: report              : " + report);
-            log.debug("ANTLR: printGrammar        : " + printGrammar);
-            log.debug("ANTLR: debug               : " + debug);
-            log.debug("ANTLR: profile             : " + profile);
-            log.debug("ANTLR: nfa                 : " + nfa);
-            log.debug("ANTLR: dfa                 : " + dfa);
-            log.debug("ANTLR: trace               : " + trace);
-            log.debug("ANTLR: messageFormat       : " + messageFormat);
-            log.debug("ANTLR: maxSwitchCaseLabels : " + maxSwitchCaseLabels);
-            log.debug("ANTLR: minSwitchAlts       : " + minSwitchAlts);
-            log.debug("ANTLR: verbose             : " + verbose);
-        }
-
-        // Ensure that the output directory path is all in tact so that
-        // ANTLR can just write into it.
-        //
-        File outputDir = getOutputDirectory();
-
-        if (!outputDir.exists()) {
-            outputDir.mkdirs();
-        }
-
-        // First thing we need is an instance of the ANTLR 3.1 build tool
-        //
-        try {
-            // ANTLR Tool buld interface
-            //
-            tool = new Tool();
-        } catch (Exception e) {
-            log.error("The attempt to create the ANTLR build tool failed, see exception report for details");
-
-            throw new MojoFailureException("Jim failed you!");
-        }
-
-        // Next we need to set the options given to us in the pom into the
-        // tool instance we have created.
-        //
-        tool.setDebug(debug);
-        tool.setGenerate_DFA_dot(dfa);
-        tool.setGenerate_NFA_dot(nfa);
-        tool.setProfile(profile);
-        tool.setReport(report);
-        tool.setPrintGrammar(printGrammar);
-        tool.setTrace(trace);
-        tool.setVerbose(verbose);
-        tool.setMessageFormat(messageFormat);
-        tool.setMaxSwitchCaseLabels(maxSwitchCaseLabels);
-        tool.setMinSwitchAlts(minSwitchAlts);
-
-        // Where do we want ANTLR to produce its output? (Base directory)
-        //
-        if (log.isDebugEnabled())
-        {
-            log.debug("Output directory base will be " + outputDirectory.getAbsolutePath());
-        }
-        tool.setOutputDirectory(outputDirectory.getAbsolutePath());
-
-        // Tell ANTLR that we always want the output files to be produced in the output directory
-        // using the same relative path as the input file was to the input directory.
-        //
-        tool.setForceRelativeOutput(true);
-
-        // Where do we want ANTLR to look for .tokens and import grammars?
-        //
-        tool.setLibDirectory(libDirectory.getAbsolutePath());
-
-        if (!sourceDirectory.exists()) {
-            if (log.isInfoEnabled()) {
-                log.info("No ANTLR grammars to compile in " + sourceDirectory.getAbsolutePath());
-            }
-            return;
-        } else {
-            if (log.isInfoEnabled()) {
-                log.info("ANTLR: Processing source directory " + sourceDirectory.getAbsolutePath());
-            }
-        }
-
-        // Set working directory for ANTLR to be the base source directory
-        //
-        tool.setInputDirectory(sourceDirectory.getAbsolutePath());
-
-        try {
-
-            // Now pick up all the files and process them with the Tool
-            //
-            processGrammarFiles(sourceDirectory, outputDirectory);
-
-        } catch (InclusionScanException ie) {
-
-            log.error(ie);
-            throw new MojoExecutionException("Fatal error occured while evaluating the names of the grammar files to analyze");
-
-        } catch (Exception e) {
-
-            getLog().error(e);
-            throw new MojoExecutionException(e.getMessage());
-        }
-
-
-
-        tool.process();
-
-        // If any of the grammar files caused errors but did nto throw exceptions
-        // then we should have accumulated errors in the counts
-        //
-        if (tool.getNumErrors() > 0) {
-            throw new MojoExecutionException("ANTLR caught " + tool.getNumErrors() + " build errors.");
-        }
-
-        // All looks good, so we need to tel Maven about the sources that
-        // we just created.
-        //
-        if (project != null) {
-            // Tell Maven that there are some new source files underneath
-            // the output directory.
-            //
-            addSourceRoot(this.getOutputDirectory());
-        }
-
-    }
-
-
-    /**
-     *
-     * @param sourceDirectory
-     * @param outputDirectory
-     * @throws antlr.TokenStreamException
-     * @throws antlr.RecognitionException
-     * @throws java.io.IOException
-     * @throws org.codehaus.plexus.compiler.util.scan.InclusionScanException
-     */
-    private void processGrammarFiles(File sourceDirectory, File outputDirectory)
-            throws RecognitionException, IOException, InclusionScanException {
-        // Which files under the source set should we be looking for as grammar files
-        //
-        SourceMapping mapping = new SuffixMapping("g", Collections.EMPTY_SET);
-
-        // What are the sets of includes (defaulted or otherwise).
-        //
-        Set includes = getIncludesPatterns();
-
-        // Now, to the excludes, we need to add the imports directory
-        // as this is autoscanned for importd grammars and so is auto-excluded from the
-        // set of gramamr fiels we shuold be analyzing.
-        //
-        excludes.add("imports/**");
-
-        SourceInclusionScanner scan = new SimpleSourceInclusionScanner(includes, excludes);
-
-        scan.addSourceMapping(mapping);
-        Set grammarFiles = scan.getIncludedSources(sourceDirectory, null);
-
-        if (grammarFiles.isEmpty()) {
-            if (getLog().isInfoEnabled()) {
-                getLog().info("No grammars to process");
-            }
-        } else {
-
-            // Tell the ANTLR tool that we want sorted build mode
-            //
-            tool.setMake(true);
-            
-            // Iterate each grammar file we were given and add it into the tool's list of
-            // grammars to process.
-            //
-            for (File grammar : (Set<File>) grammarFiles) {
-
-                if (getLog().isDebugEnabled()) {
-                    getLog().debug("Grammar file '" + grammar.getPath() + "' detected.");
-                }
-
-
-                String relPath = findSourceSubdir(sourceDirectory, grammar.getPath()) + grammar.getName();
-
-                if (getLog().isDebugEnabled()) {
-                    getLog().debug("  ... relative path is: " + relPath);
-                }
-                tool.addGrammarFile(relPath);
-
-            }
-
-        }
-
-
-    }
-
-    public Set getIncludesPatterns() {
-        if (includes == null || includes.isEmpty()) {
-            return Collections.singleton("**/*.g");
-        }
-        return includes;
-    }
-
-    /**
-     * Given the source directory File object and the full PATH to a
-     * grammar, produce the path to the named grammar file in relative
-     * terms to the sourceDirectory. This will then allow ANTLR to
-     * produce output relative to the base of the output directory and
-     * reflect the input organization of the grammar files.
-     *
-     * @param sourceDirectory The source directory File object
-     * @param grammarFileName The full path to the input grammar file
-     * @return The path to the grammar file relative to the source directory
-     */
-    private String findSourceSubdir(File sourceDirectory, String grammarFileName) {
-        String srcPath = sourceDirectory.getPath() + File.separator;
-
-        if (!grammarFileName.startsWith(srcPath)) {
-            throw new IllegalArgumentException("expected " + grammarFileName + " to be prefixed with " + sourceDirectory);
-        }
-
-        File unprefixedGrammarFileName = new File(grammarFileName.substring(srcPath.length()));
-
-        return unprefixedGrammarFileName.getParent() + File.separator;
-    }
-}
diff --git a/antlr-3.4/antlr3-maven-plugin/src/site/apt/examples/import.apt b/antlr-3.4/antlr3-maven-plugin/src/site/apt/examples/import.apt
deleted file mode 100644
index 06a49f1..0000000
--- a/antlr-3.4/antlr3-maven-plugin/src/site/apt/examples/import.apt
+++ /dev/null
@@ -1,8 +0,0 @@
-Imported Grammar Files
-
- In order to have the ANTLR plugin automatically locate and use grammars used
- as imports in your main .g files, you need to place the imported grammar
- files in the imports directory beneath the root directory of your grammar
- files (which is <<<src/main/antlr3>>> by default of course).
-
- For a default layout, place your import grammars in the directory: <<<src/main/antlr3/imports>>>
diff --git a/antlr-3.4/antlr3-maven-plugin/src/site/apt/examples/libraries.apt b/antlr-3.4/antlr3-maven-plugin/src/site/apt/examples/libraries.apt
deleted file mode 100644
index 73ce796..0000000
--- a/antlr-3.4/antlr3-maven-plugin/src/site/apt/examples/libraries.apt
+++ /dev/null
@@ -1,47 +0,0 @@
-Libraries
-
- The introduction of the import directive in a grammar allows reuse of common grammar files
- as well as the ability to divide up functional components of large grammars. However it has
- caused some confusion in regard to the fact that generated vocab files (<<<xxx.tokens>>>) can also
- be searched for with the <<<<libDirectory>>>> directive.
-
- This has confused two separate functions and imposes a structure upon the layout of
- your grammar files in certain cases. If you have grammars that both use the import
- directive and also require the use of a vocab file then you will need to locate
- the grammar that generates the .tokens file alongside the grammar that uses it. This
- is because you will need to use the <<<<libDirectory>>>> directive to specify the
- location of your imported grammars and ANTLR will not find any vocab files in
- this directory.
-
- The .tokens files for any grammars are generated within the same output directory structure
- as the .java files. So, whereever the .java files are generated, you will also find the .tokens
- files. ANTLR looks for .tokens files in both the <<<<libDirectory>>>> and the output directory
- where it is placing the geenrated .java files. Hence when you locate the grammars that generate
- .tokens files in the same source directory as the ones that use the .tokens files, then
- the Maven plugin will find the expected .tokens files.
-
- The <<<<libDirectory>>>> is specified like any other directory parameter in Maven. Here is an
- example:
-
-+--
-<plugin>
-    <groupId>org.antlr</groupId>
-    <artifactId>antlr3-maven-plugin</artifactId>
-    <version>3.1.3-1</version>
-
-    <executions>
-
-        <execution>
-            <configuration>
-                <goals>
-                    <goal>antlr</goal>
-                </goals>
-                <libDirectory>src/main/antlr_imports</libDirectory>
-            </configuration>
-        </execution>
-    </executions>
-</plugin>
-+--
-
-
-
diff --git a/antlr-3.4/antlr3-maven-plugin/src/site/apt/examples/simple.apt b/antlr-3.4/antlr3-maven-plugin/src/site/apt/examples/simple.apt
deleted file mode 100644
index 3e36e84..0000000
--- a/antlr-3.4/antlr3-maven-plugin/src/site/apt/examples/simple.apt
+++ /dev/null
@@ -1,40 +0,0 @@
-Simple configuration
-
- If your grammar files are organized into the default locations as described in the {{{../index.html}introduction}},
- then configuring the pom.xml file for your project is as simple as adding this to it
-
-+--
-<plugins>
-<plugin>
-    <groupId>org.antlr</groupId>
-    <artifactId>antlr3-maven-plugin</artifactId>
-    <version>3.1.3-1</version>
-    <executions>
-        <execution>
-            <goals>
-                <goal>antlr</goal>
-            </goals>
-        </execution>
-    </executions>
-</plugin>
-...
-</plugins>
-+--
-
- When the mvn command is executed all grammar files under <<<src/main/antlr3>>>, except any
- import grammars under <<<src/main/antlr3/imports>>> will be analyzed and converted to
- java source code in the output directory <<<target/generated-sources/antlr3>>>.
-
- Your input files under <<<antlr3>>> should be stored in sub directories that
- reflect the package structure of your java parsers. If your grammar file parser.g contains:
-
-+---
-@header {
-package org.jimi.themuss;
-}
-+---
-
- Then the .g file should be stored in: <<<src/main/antlr3/org/jimi/themuss/parser.g>>>. THis way
- the generated .java files will correctly reflect the package structure in which they will
- finally rest as classes.
-
diff --git a/antlr-3.4/antlr3-maven-plugin/src/site/apt/index.apt b/antlr-3.4/antlr3-maven-plugin/src/site/apt/index.apt
deleted file mode 100644
index 2b2495a..0000000
--- a/antlr-3.4/antlr3-maven-plugin/src/site/apt/index.apt
+++ /dev/null
@@ -1,63 +0,0 @@
-         -------------
-         ANTLR v3 Maven Plugin
-         -------------
-         Jim Idle
-         -------------
-         March 2009
-         -------------
-
-ANTLR v3 Maven plugin
-
- The ANTLR v3 Maven plugin is completely re-written as of version 3.1.3; if you are familiar
- with prior versions, you should note that there are some behavioral differences that make
- it worthwhile reading this documentation. 
-
- The job of the plugin is essentially to tell the standard ANTLR parser generator where the
- input grammar files are and where the output files should be generated. As with all Maven
- plugins, there are defaults, which you are advised to comply to, but are not forced to
- comply to.
-
- This version of the plugin allows full control over ANTLR and allows configuration of all
- options that are useful for a build system. The code required to calculate dependencies,
- check the build order, and otherwise work with your grammar files is built into the ANTLR
- tool as of version 3.1.3 of ANTLR and this plugin.
-
-* Plugin Versioning
-
- The plugin version tracks the version of the ANTLR tool that it controls. Hence if you
- use version 3.1.3 of the plugin, you will build your grammars using version 3.1.3 of the
- ANTLR tool, version 3.2 of the plugin will use version 3.2 of the ANTLR tool and so on.
-
- You may also find that there are patch versions of the plugin suchas 3.1.3-1 3.1.3-2 and
- so on. Use the latest patch release of the plugin.
-
- The current version of the plugin is shown at the top of this page after the <<Last Deployed>> date.
- 
-
-* Default directories
-
- As with all Maven plugins, this plugin will automatically default to standard locations
- for your grammar and import files. Organizing your source code to reflect this standard
- layout will greatly reduce the configuration effort required. The standard layout lookd
- like this:
-
-+--
- src/main/
-      |
-      +--- antlr3/... .g files organized in the required package structure
-             |
-             +--- imports/  .g files that are imported by other grammars.
-+--
-
- If your grammar is intended to be part of a package called org.foo.bar then you would
- place it in the directory <<<src/main/antlr3/org/foo/bar>>>. The plugin will then produce
- .java and .tokens files in the output directory <<<target/generated-sources/antlr3/org/foo/bar>>>
- When the Java files are compiled they will be in the correct location for the javac
- compiler without any special configuration. The generated java files are automatically
- submitted for compilation by the plugin.
-
- The <<<src/main/antlr3/imports>>> directory is treated in a special way. It should contain
- any grammar files that are imported by other grammar files (do not make subdirectories here.)
- Such files are never built on their own, but the plugin will automatically tell the ANTLR
- tool to look in this directory for library files.
-
diff --git a/antlr-3.4/antlr3-maven-plugin/src/site/apt/usage.apt.vm b/antlr-3.4/antlr3-maven-plugin/src/site/apt/usage.apt.vm
deleted file mode 100644
index 9b7ad0f..0000000
--- a/antlr-3.4/antlr3-maven-plugin/src/site/apt/usage.apt.vm
+++ /dev/null
@@ -1,193 +0,0 @@
-Usage
-
- The Maven plugin for antlr is simple to use but is at its simplest when you use the default
- layouts for your grammars, as so:
-
-+--
- src/main/
-      |
-      +--- antlr3/... .g files organized in the required package structure
-             |
-             +--- imports/  .g files that are imported by other grammars.
-+--
-
- However, if you are not able to use this structure for whatever reason, you
- can configure the locations of the grammar files, where library/import files
- are located and where the output files should be generated.
-
-* Plugin Descriptor
-
- The current version of the plugin is shown at the top of this page after the <<Last Deployed>> date.
-
- The full layout of the descriptor (at least, those parts that are not standard Maven things),
- showing the default values of the configuration options, is as follows:
-
-+--
-<plugin>
-    <groupId>org.antlr</groupId>
-    <artifactId>antlr3-maven-plugin</artifactId>
-    <version>3.1.3-1</version>
-
-    <executions>
-        
-        <execution>
-            <configuration>
-                <goals>
-                    <goal>antlr</goal>
-                </goals>
-                <conversionTimeout>10000</conversionTimeout>
-                <debug>false</debug>
-                <dfa>false</dfa>
-                <nfa>false</nfa>
-                <excludes><exclude/></excludes>
-                <includes><include/></includes>
-                <libDirectory>src/main/antlr3/imports</libDirectory>
-                <messageFormat>antlr</messageFormat>
-                <outputDirectory>target/generated-sources/antlr3</outputDirectory>
-                <printGrammar>false</printGrammar>
-                <profile>false</profile>
-                <report>false</report>
-                <sourceDirectory>src/main/antlr3</sourceDirectory>
-                <trace>false</trace>
-                <verbose>true</verbose>
-            </configuration>
-        </execution>
-    </executions>
-
-</plugin>
-+--
-
- Note that you can create multiple executions, and thus build some grammars with different
- options to others (such as setting the debug option for instance).
-
-** Configuration parameters
-
-*** report
-
-    If set to true, then after the tool has processed an input grammar file
-    it will report variaous statistics about the parser, such as information
-    on cyclic DFAs, which rules may use backtracking, and so on.
-
-    default-value="false"
-
-*** printGrammar
-
-    If set to true, then the ANTLR tool will print a version of the input
-    grammar which is devoid of any actions that may be present in the input file.
-
-    default-value = "false"
-
-*** debug
-
-     If set to true, then the code generated by the ANTLR code generator will
-     be set to debug mode. This means that when run, the code will 'hang' and
-     wait for a debug connection on a TCP port (49100 by default).
-     
-     default-value="false"
-     
-*** profile
-
-     If set to true, then then the generated parser will compute and report on
-     profile information at runtime.
-     
-     default-value="false"
-     
-*** nfa
-
-     If set to true then the ANTLR tool will generate a description of the nfa
-     for each rule in <a href="http://www.graphviz.org">Dot format</a>
-     
-     default-value="false"
-     
-    protected boolean nfa;
-    
-*** dfa
-
-     If set to true then the ANTLR tool will generate a description of the DFA
-     for each decision in the grammar in <a href="http://www.graphviz.org">Dot format</a>
-     
-     default-value="false"
-     
-*** trace
-
-     If set to true, the generated parser code will log rule entry and exit points
-     to stdout as an aid to debugging.
-     
-     default-value="false"
-     
-*** messageFormat
-
-     If this parameter is set, it indicates that any warning or error messages returned
-     by ANLTR, shoould be formatted in the specified way. Currently, ANTLR supports the
-     built-in formats of antlr, gnu and vs2005.
-
-     default-value="antlr"
-     
-*** verbose
-
-     If this parameter is set to true, then ANTLR will report all sorts of things
-     about what it is doing such as the names of files and the version of ANTLR and so on.
-     
-     default-value="true"
-     
-*** conversionTimeout
-
-     The number of milliseconds ANTLR will wait for analysis of each
-     alternative in the grammar to complete before giving up. You may raise
-     this value if ANTLR gives up on a complicated alt and tells you that
-     there are lots of ambiguties, but you know that it just needed to spend
-     more time on it. Note that this is an absolute time and not CPU time.
-     
-     default-value="10000"
-     
-*** includes
-
-     Provides an explicit list of all the grammars that should
-     be included in the generate phase of the plugin. Note that the plugin
-     is smart enough to realize that imported grammars should be included but
-     not acted upon directly by the ANTLR Tool.
-     
-     Unless otherwise specified, the include list scans for and includes all
-     files that end in ".g" in any directory beneath src/main/antlr3. Note that
-     this version of the plugin looks for the directory antlr3 and not the directory
-     antlr, so as to avoid clashes and confusion for projects that use both v2 and v3 grammars
-     such as ANTLR itself.
-     
-*** excludes
-
-     Provides an explicit list of any grammars that should be excluded from
-     the generate phase of the plugin. Files listed here will not be sent for
-     processing by the ANTLR tool.
-     
-*** sourceDirectory
-
-     Specifies the Antlr directory containing grammar files. For
-     antlr version 3.x we default this to a directory in the tree
-     called antlr3 because the antlr directory is occupied by version
-     2.x grammars.
-
-     <<NB>> Take careful note that the default location for antlr grammars
-     is now <<antlr3>> and NOT <<antlr>>
-
-     default-value="<<<${basedir}/src/main/antlr3>>>"
-     
-*** outputDirectory
-
-     Location for generated Java files. For antlr version 3.x we default
-     this to a directory in the tree called antlr3 because the antlr
-     directory is occupied by version 2.x grammars.
-     
-     default-value="<<<${project.build.directory}/generated-sources/antlr3>>>"
-     
-*** libDirectory
-
-     Location for imported token files, e.g. <code>.tokens</code> and imported grammars.
-     Note that ANTLR will not try to process grammars that it finds in this directory, but
-     will include this directory in the search for .tokens files and import grammars.
-
-     <<NB>> If you change the lib directory from the default but the directory is
-     still under<<<${basedir}/src/main/antlr3>>>, then you will need to exclude
-     the grammars from processing specifically, using the <<<<excludes>>>> option.
-
-     default-value="<<<${basedir}/src/main/antlr3/imports>>>"
-
diff --git a/antlr-3.4/antlr3-maven-plugin/src/site/site.xml b/antlr-3.4/antlr3-maven-plugin/src/site/site.xml
deleted file mode 100644
index 7d0c52b..0000000
--- a/antlr-3.4/antlr3-maven-plugin/src/site/site.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<project name="ANTLR v3 Maven plugin">
-
-  <publishDate position="left"/>
-  <version position="left"/>
-
-  <poweredBy>
-    <logo name="ANTLR Web Site" href="http://antlr.org/"
-          img="http://www.antlr.org/wiki/download/attachments/292/ANTLR3"/>
-  </poweredBy>
-
-  <body>
-    <links>
-      <item name="Antlr Web Site" href="http://www.antlr.org/"/>
-    </links>
-
-    <menu name="Overview">
-      <item name="Introduction" href="index.html"/>
-      <item name="Usage" href="usage.html"/>
-    </menu>
-
-    <menu name="Examples">
-      <item name="Simple configurations" href="examples/simple.html"/>
-      <item name="Using library directories" href="examples/libraries.html"/>
-      <item name="Using imported grammars" href="examples/import.html"/>
-    </menu>
-
-    <menu ref="reports" />
-    <menu ref="modules" />
-
-  </body>
-</project>
diff --git a/antlr-3.4/antlrjar.xml b/antlr-3.4/antlrjar.xml
deleted file mode 100644
index 6988f98..0000000
--- a/antlr-3.4/antlrjar.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-    This file defines what goes in to ANTLR Uber jar, which includes
-    all of the classes we need to run an executable jar in standalone
-    mode.
-  -->
-<assembly>
-
-    <!--
-        This is the suffix that will be used to name the uber jar
-        once it is jared up.
-      -->
-    <id>completejar</id>
-
-    <!--
-        The only output format we need is the executable jar file
-      -->
-    <formats>
-        <format>jar</format>
-    </formats>
-
-    <!--
-        Make all jars unpack at the same level and don't include
-        any extraneous directories.
-      -->
-    <includeBaseDirectory>false</includeBaseDirectory>
-
-    <!--
-        Which of the modules that the master pom builds do we
-        wish to include in the uber jar. We are including
-        dependencies, so we only need to name the Tool module
-        and the gunit module.
-      -->
-    <moduleSets>
-
-        <moduleSet>
-            
-            <includes>
-                
-                <include>org.antlr:antlr</include>
-          
-            </includes>
-            <!--
-                Of the binaries, such as the dependencies that the
-                above modules need, which do we want and which do we not.
-                Currently we want all the dependencies in the Tool jar.
-              -->
-            <binaries>
-
-                <dependencySets>
-                    <dependencySet>
-                        <!--
-                            Exclude the antlr-master pom from the jar - we don't need it
-                            and it causes silly things to happen.
-                          -->
-                        <useProjectArtifact>false</useProjectArtifact>
-                    
-                        <!--
-                            Unpack the binary dependencies so we have a nice
-                            uber jar that can run with java -jar and need not have
-                            CLASSPATH configured and so on.
-                          -->
-                        <unpack>true</unpack>
-                    </dependencySet>
-                </dependencySets>
-
-
-            </binaries>
-
-        </moduleSet>
-       
-    </moduleSets>
-
-    <!--
-        What do we want to include in the jar from each project
-      -->
-    <fileSets>
-        <fileSet>
-
-            <!--
-                We need the output classes and resources etc.
-              -->
-            <directory>${project.build.outputDirectory}</directory>
-        </fileSet>
-    </fileSets>
-
-</assembly>
diff --git a/antlr-3.4/antlrsources.xml b/antlr-3.4/antlrsources.xml
deleted file mode 100644
index 953c7fc..0000000
--- a/antlr-3.4/antlrsources.xml
+++ /dev/null
@@ -1,172 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-
-    This is the assembly descriptor for building a full source code
-    distribution of ANTLR and all its related components. This assembly
-    only includes the Java oriented source code, hence only the Java runtime
-    is included in the resulting jar.
-
-    The resulting jar is distribution that can be expanded with:
-
-      jar xvf antlr-master-3.x.x-src.jar
-
-    The output directory will be antlr-master-3.x.x and in here will be
-    the BUILD.txt fie, which explains how to build ANTLR.
-
-    Jim Idle - May, 2009
- -->
-<assembly>
-
-    <!-- The name of this assembly descriptor, which is referenced in
-         the master pom.xml using <assemblyRef> (although in fact we
-         reference the file name that contains it to avoid cluttering
-         the pom.
-      -->
-    <id>src</id>
-
-    <!-- We have elected to produce only a jar output and to use the line
-         endings of whatever platform we are running on. More formats
-         can be added for simultaneous production, such as <format>zip</format>
-      -->
-    <formats>
-        <format>jar</format>
-    </formats>
-
-    <!--
-        The resulting archives will have a base directory named after the master
-        artifact, rather than just extract into the current directory.
-      -->
-    <includeBaseDirectory>true</includeBaseDirectory>
-
-    <!-- We need to described the source code of each of the modules we want
-         including in the archive. In the main this is because we did not store
-         the modules in perforce using directory names that match the module
-         names. This was for historic reasons as we already moved everything
-         about massively, just to move to Maven in the first place.
-      -->
-    <moduleSets>
-
-        <!-- Describe the gUnit source code.
-          -->
-        <moduleSet>
-
-            <!-- The Maven artifact name tells the assembly artifact a bunch
-                 of information to start with, such as its location in the current
-                 tree and so on.
-              -->
-            <includes>
-                <include>org.antlr:antlr-runtime</include>
-            </includes>
-
-            <!-- What sources do we wish to include from this module?
-              -->
-            <sources>
-
-                <!-- Because the Java runtime source is not in a directory
-                     called antlr-runtime, directly underneath the master
-                     directory, we need to map the output directory so that
-                     instead of starting with the name of the artifact, it
-                     is in the place where the build expects it.
-                  -->
-                <outputDirectoryMapping>runtime/Java</outputDirectoryMapping>
-
-                <fileSets>
-                     <!-- We have one file set, being the src sub-directory, which in
-                          the output archive, we also want to be called src/
-                       -->
-                    <fileSet>
-                        <directory>src</directory>
-                        <outputDirectory>src</outputDirectory>
-                    </fileSet>
-
-                    <!-- In the base runtime/Java directory, we need to include a number
-                         of files that either document the module or control the
-                         build. These are not underneath the src directory of course
-                         so they need to be named here (which nicely documents what
-                         is included.
-                      -->
-                    <fileSet>
-                        <includes>
-                            <include>pom.xml</include>
-                            <include>doxyfile</include>
-                            <include>antlr.config</include>
-                        </includes>
-                    </fileSet>
-                </fileSets>
-            </sources>
-
-        </moduleSet>
-
-        <!-- Describe the ANTLR tool source code.
-          -->
-        <moduleSet>
-            <includes>
-                <include>org.antlr:antlr</include>
-            </includes>
-
-            <!-- What sources do we wish to include from this module?
-              -->
-            <sources>
-
-                <!-- Because the tool source code is not in a directory
-                     called antlr, nor directly underneath the master
-                     directory, we need to map the output directory so that
-                     instead of starting with the name of the artifact, it
-                     is in the place where the build expects it.
-                  -->
-                <outputDirectoryMapping>tool</outputDirectoryMapping>
-
-
-                <fileSets>
-
-                    <!-- We have one file set, being the src sub-directory, which in
-                         the output archive, we also want to be called src/
-                      -->
-                    <fileSet>
-                        <directory>src</directory>
-                        <outputDirectory>src</outputDirectory>
-                    </fileSet>
-
-                    <!-- In the base tool directory, we need to include a number
-                         of files that either document the module or control the
-                         build. These are not underneath the src directory of course
-                         so they need to be named here (which nicely documents what
-                         is included.
-                      -->
-                    <fileSet>
-                        <includes>
-                            <include>pom.xml</include>
-                            <include>CHANGES.txt</include>
-                            <include>LICENSE.txt</include>
-                            <include>README.txt</include>
-                            <include>antlr.config</include>
-                        </includes>
-                    </fileSet>
-
-                </fileSets>
-
-            </sources>
-
-        </moduleSet>
-
-    </moduleSets>
-
-    <!-- In the base directory of the master build directory (the root of all
-         the other sources), there are a number of files that describe or control
-         the build (such as the master pom.xml and the BUILD.txt files). Hence
-         we need to describe them in their own fileset. No output mapping is required here
-         of course.
-      -->
-    <fileSets>
-        <fileSet>
-            <includes>
-                <include>pom.xml</include>
-                <include>antlrjar.xml</include>
-                <include>antlrsources.xml</include>
-                <include>BUILD.txt</include>
-            </includes>
-        </fileSet>
-    </fileSets>
-
-</assembly>
diff --git a/antlr-3.4/gunit-maven-plugin/pom.xml b/antlr-3.4/gunit-maven-plugin/pom.xml
deleted file mode 100644
index a11a50e..0000000
--- a/antlr-3.4/gunit-maven-plugin/pom.xml
+++ /dev/null
@@ -1,269 +0,0 @@
-<!--
-
- [The "BSD license"]
-
- ANTLR        - Copyright (c) 2005-2010 Terence Parr
- Maven Plugin - Copyright (c) 2009      Jim Idle
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-    <!--
-        The ANTLR Maven artifacts are now released via the Sonotype OSS
-        repository, which means that they are synced to Maven central 
-        within a few minutes of hitting the release repo for Sonotype.
-        To enable this, we inherit from the Sonotype provided parent
-        pom. However, we must also configure our .m2/settings.xml to include
-        the snapshot and staging server and the sonotype password. This 
-        means that only ANTLR developers can released the artifacts, but
-        anyone can build locally.
-      -->
-    <parent>
-        <groupId>org.sonatype.oss</groupId>
-        <artifactId>oss-parent</artifactId>
-        <version>7</version>
-    </parent> 
-    
-    <modelVersion>4.0.0</modelVersion>
-
-    <prerequisites>
-        <maven>2.0</maven>
-    </prerequisites>
-
-    <groupId>org.antlr</groupId>
-    <artifactId>maven-gunit-plugin</artifactId>
-    <packaging>maven-plugin</packaging>
-    <version>3.4</version>
-
-    <name>Maven plugin for gUnit ANTLR V3.4</name>
-	<description>A Maven plugin for incorporating gUnit testing of grammars</description>
-    <url>http://antlr.org</url>
-
-    <properties>
-        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    </properties>
-
-    <!-- Where do we track bugs for this project?
-      -->
-    <issueManagement>
-        <system>JIRA</system>
-        <url>http://antlr.org/jira/browse/ANTLR</url>
-    </issueManagement>
-
-    <!-- Location of the license description for this project
-      -->
-    <licenses>
-        <license>
-            <distribution>repo</distribution>
-            <name>The BSD License</name>
-            <url>http://www.antlr.org/LICENSE.txt </url>
-        </license>
-    </licenses>
-
-    <profiles>
-        <profile>
-            <id>release-sign-artifacts</id>
-            <activation>
-                <property>
-                    <name>deploy</name>
-                    <value>true</value>
-                </property>
-            </activation>
-            <build>
-                <plugins>
-                    <plugin>
-                        <groupId>org.apache.maven.plugins</groupId>
-                        <artifactId>maven-gpg-plugin</artifactId>
-                        <version>1.3</version>
-                        <executions>
-                            <execution>
-                                <id>sign-artifacts</id>
-                                <phase>verify</phase>
-                                <goals>
-                                   <goal>sign</goal>
-                                </goals>
-                            </execution>
-                        </executions>
-                    </plugin>
-                </plugins>
-            </build>
-        </profile>
-    </profiles>
-    
-    <!-- Ancilliary information for completeness
-      -->
-    <inceptionYear>2009</inceptionYear>
-
-    <mailingLists>
-        <mailingList>
-            <archive>http://antlr.markmail.org/</archive>
-            <otherArchives>
-                <otherArchive>http://www.antlr.org/pipermail/antlr-interest/</otherArchive>
-            </otherArchives>
-            <name>ANTLR Users</name>
-            <subscribe>http://www.antlr.org/mailman/listinfo/antlr-interest/</subscribe>
-            <unsubscribe>http://www.antlr.org/mailman/options/antlr-interest/</unsubscribe>
-            <post>antlr-interest@antlr.org</post>
-        </mailingList>
-    </mailingLists>
-
-    <organization>
-        <name>ANTLR.org</name>
-        <url>http://www.antlr.org</url>
-    </organization>
-    <!-- ============================================================================= -->
-
-    <!--
-
-     What are we depedent on for the Mojos to execute? We need the
-     plugin API itself and of course we need the ANTLR Tool and runtime
-     and any of their dependencies, which we inherit. The Tool itself provides
-     us with all the dependencies, so we need only name it here.
-      -->
-    <dependencies>
-
-        <!--
-          The things we need to build the target language recognizer
-          -->
-        <dependency>
-            <groupId>org.apache.maven</groupId>
-            <artifactId>maven-plugin-api</artifactId>
-            <version>2.0</version>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.maven</groupId>
-            <artifactId>maven-project</artifactId>
-            <version>2.0</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.codehaus.plexus</groupId>
-            <artifactId>plexus-compiler-api</artifactId>
-            <version>1.5.3</version>
-        </dependency>
-
-        <!--
-         The version of ANTLR tool that this version of the plugin controls.
-         We have decided that this should be in lockstep with ANTLR itself, other
-         than -1 -2 -3 etc patch releases.
-          -->
-        <dependency>
-            <groupId>org.antlr</groupId>
-            <artifactId>antlr</artifactId>
-            <version>3.4</version>
-        </dependency>
-
-        <!--
-         Dependency on the gUnit artifact.
-        -->
-        <dependency>
-            <groupId>${groupId}</groupId>
-            <artifactId>gunit</artifactId>
-            <version>3.4</version>
-        </dependency>
-
-        <!--
-          Testing requirements...
-          -->
-        <dependency>
-
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-            <version>4.8.2</version>
-            <scope>test</scope>
-
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.maven.shared</groupId>
-            <artifactId>maven-plugin-testing-harness</artifactId>
-            <version>1.0</version>
-            <scope>test</scope>
-        </dependency>
-        
-    </dependencies>
-    
-    <build>
-
-        <defaultGoal>install</defaultGoal>
-        <plugins>
-            
-            <plugin>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>2.0.2</version>
-                <configuration>
-                    <source>1.6</source>
-                    <target>jsr14</target>
-                </configuration>
-            </plugin>
-            
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-project-info-reports-plugin</artifactId>
-                <version>2.4</version>
-                <configuration>
-                    <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
-                </configuration>
-            </plugin>
-
-            <plugin>  
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-source-plugin</artifactId>
-                <version>2.1.2</version>
-                <executions>
-                    <execution>
-                        <id>attach-sources</id>
-                        <goals>
-                            <goal>jar</goal>
-                        </goals>
-                    </execution>
-                </executions>
-            </plugin>
-			
-            <plugin>
-				<groupId>org.apache.maven.plugins</groupId>
-				<artifactId>maven-javadoc-plugin</artifactId>
-                <version>2.8</version>
-				<executions>
-					<execution>
-						<id>attach-javadocs</id>
-						<goals>
-							<goal>jar</goal>
-						</goals>
-					</execution>
-				</executions>
-			</plugin>
-            
-        </plugins>
-
-    </build>
-
-</project>
diff --git a/antlr-3.4/gunit-maven-plugin/src/main/java/org/antlr/mojo/antlr3/GUnitExecuteMojo.java b/antlr-3.4/gunit-maven-plugin/src/main/java/org/antlr/mojo/antlr3/GUnitExecuteMojo.java
deleted file mode 100644
index db3f569..0000000
--- a/antlr-3.4/gunit-maven-plugin/src/main/java/org/antlr/mojo/antlr3/GUnitExecuteMojo.java
+++ /dev/null
@@ -1,410 +0,0 @@
-package org.antlr.mojo.antlr3;
-
-import java.util.List;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.io.File;
-import java.io.IOException;
-import java.io.Writer;
-import java.io.FileWriter;
-import java.io.BufferedWriter;
-import java.net.URL;
-import java.net.MalformedURLException;
-import java.net.URLClassLoader;
-
-import org.apache.maven.plugin.AbstractMojo;
-import org.apache.maven.plugin.MojoExecutionException;
-import org.apache.maven.plugin.MojoFailureException;
-import org.apache.maven.project.MavenProject;
-import org.apache.maven.artifact.Artifact;
-import org.apache.maven.artifact.DependencyResolutionRequiredException;
-import org.apache.maven.artifact.versioning.ArtifactVersion;
-import org.apache.maven.artifact.versioning.DefaultArtifactVersion;
-import org.apache.maven.artifact.versioning.OverConstrainedVersionException;
-import org.codehaus.plexus.util.StringUtils;
-import org.codehaus.plexus.util.FileUtils;
-import org.codehaus.plexus.compiler.util.scan.mapping.SourceMapping;
-import org.codehaus.plexus.compiler.util.scan.mapping.SuffixMapping;
-import org.codehaus.plexus.compiler.util.scan.SourceInclusionScanner;
-import org.codehaus.plexus.compiler.util.scan.SimpleSourceInclusionScanner;
-import org.codehaus.plexus.compiler.util.scan.InclusionScanException;
-import org.antlr.runtime.ANTLRFileStream;
-import org.antlr.runtime.RecognitionException;
-import org.antlr.gunit.GrammarInfo;
-import org.antlr.gunit.gUnitExecutor;
-import org.antlr.gunit.AbstractTest;
-import org.antlr.gunit.Interp;
-
-/**
- * Takes gUnit scripts and directly performs testing.
- *
- * @goal gunit
- *
- * @phase test
- * @requiresDependencyResolution test
- * @requiresProject true
- *
- * @author Steve Ebersole
- */
-public class GUnitExecuteMojo extends AbstractMojo {
-	public static final String ANTLR_GROUP_ID = "org.antlr";
-	public static final String ANTLR_ARTIFACT_NAME = "antlr";
-	public static final String ANTLR_RUNTIME_ARTIFACT_NAME = "antlr-runtime";
-
-	/**
-     * INTERNAL : The Maven Project to which we are attached
-     *
-     * @parameter expression="${project}"
-     * @required
-     */
-    private MavenProject project;
-
-	/**
-	 * INTERNAL : The artifacts associated to the dependencies defined as part
-	 * of our configuration within the project to which we are being attached.
-	 *
-	 * @parameter expression="${plugin.artifacts}"
-     * @required
-     * @readonly
-	 */
-	private List<Artifact> pluginArtifacts;
-
-	/**
-     * Specifies the directory containing the gUnit testing files.
-     *
-     * @parameter expression="${basedir}/src/test/gunit"
-     * @required
-     */
-    private File sourceDirectory;
-
-    /**
-     * A set of patterns for matching files from the sourceDirectory that
-     * should be included as gUnit source files.
-     *
-     * @parameter
-     */
-    private Set includes;
-
-    /**
-     * A set of exclude patterns.
-     *
-     * @parameter
-     */
-    private Set excludes;
-
-	/**
-     * Specifies directory to which gUnit reports should get written.
-     *
-     * @parameter expression="${basedir}/target/gunit-report"
-     * @required
-     */
-    private File reportDirectory;
-
-	/**
-	 * Should gUnit functionality be completely by-passed?
-	 * <p/>
-	 * By default we skip gUnit tests if the user requested that all testing be skipped using 'maven.test.skip'
-	 *
-	 * @parameter expression="${maven.test.skip}"
-	 */
-	private boolean skip;
-
-	public Set getIncludePatterns() {
-		return includes == null || includes.isEmpty()
-				? Collections.singleton( "**/*.testsuite" )
-				: includes;
-	}
-
-	public Set getExcludePatterns() {
-		return excludes == null
-				? Collections.emptySet()
-				: excludes;
-	}
-
-
-	public final void execute() throws MojoExecutionException, MojoFailureException {
-		if ( skip ) {
-			getLog().info( "Skipping gUnit processing" );
-			return;
-		}
-		Artifact pluginAntlrArtifact = determinePluginAntlrArtifact();
-
-		validateProjectsAntlrVersion( determineArtifactVersion( pluginAntlrArtifact ) );
-
-		performExecution( determineProjectCompileScopeClassLoader( pluginAntlrArtifact ) );
-	}
-
-	private Artifact determinePluginAntlrArtifact() throws MojoExecutionException {
-		for ( Artifact artifact : pluginArtifacts ) {
-			boolean match = ANTLR_GROUP_ID.equals( artifact.getGroupId() )
-					&& ANTLR_ARTIFACT_NAME.equals( artifact.getArtifactId() );
-			if ( match ) {
-				return artifact;
-			}
-		}
-		throw new MojoExecutionException(
-				"Unexpected state : could not locate " + ANTLR_GROUP_ID + ':' + ANTLR_ARTIFACT_NAME +
-						" in plugin dependencies"
-		);
-	}
-
-	private ArtifactVersion determineArtifactVersion(Artifact artifact) throws MojoExecutionException {
-		try {
-			return artifact.getVersion() != null
-					? new DefaultArtifactVersion( artifact.getVersion() )
-					: artifact.getSelectedVersion();
-		}
-		catch ( OverConstrainedVersionException e ) {
-			throw new MojoExecutionException( "artifact [" + artifact.getId() + "] defined an overly constrained version range" );
-		}
-	}
-
-	private void validateProjectsAntlrVersion(ArtifactVersion pluginAntlrVersion) throws MojoExecutionException {
-		Artifact antlrArtifact = null;
-		Artifact antlrRuntimeArtifact = null;
-
-		if ( project.getCompileArtifacts() != null ) {
-			for ( Object o : project.getCompileArtifacts() ) {
-				final Artifact artifact = ( Artifact ) o;
-				if ( ANTLR_GROUP_ID.equals( artifact.getGroupId() ) ) {
-					if ( ANTLR_ARTIFACT_NAME.equals( artifact.getArtifactId() ) ) {
-						antlrArtifact = artifact;
-						break;
-					}
-					if ( ANTLR_RUNTIME_ARTIFACT_NAME.equals( artifact.getArtifactId() ) ) {
-						antlrRuntimeArtifact = artifact;
-					}
-				}
-			}
-		}
-
-		validateBuildTimeArtifact( antlrArtifact, pluginAntlrVersion );
-		validateRunTimeArtifact( antlrRuntimeArtifact, pluginAntlrVersion );
-	}
-
-	@SuppressWarnings(value = "unchecked")
-	protected void validateBuildTimeArtifact(Artifact antlrArtifact, ArtifactVersion pluginAntlrVersion)
-			throws MojoExecutionException {
-		if ( antlrArtifact == null ) {
-			validateMissingBuildtimeArtifact();
-			return;
-		}
-
-		// otherwise, lets make sure they match...
-		ArtifactVersion projectAntlrVersion = determineArtifactVersion( antlrArtifact );
-		if ( pluginAntlrVersion.compareTo( projectAntlrVersion ) != 0 ) {
-			getLog().warn(
-					"Encountered " + ANTLR_GROUP_ID + ':' + ANTLR_ARTIFACT_NAME + ':' + projectAntlrVersion.toString() +
-							" which did not match Antlr version used by plugin [" + pluginAntlrVersion.toString() + "]"
-			);
-		}
-	}
-
-	protected void validateMissingBuildtimeArtifact() {
-		// generally speaking, its ok for the project to not define a dep on the build-time artifact...
-	}
-
-	@SuppressWarnings(value = "unchecked")
-	protected void validateRunTimeArtifact(Artifact antlrRuntimeArtifact, ArtifactVersion pluginAntlrVersion)
-			throws MojoExecutionException {
-		if ( antlrRuntimeArtifact == null ) {
-			// its possible, if the project instead depends on the build-time (or full) artifact.
-			return;
-		}
-
-		ArtifactVersion projectAntlrVersion = determineArtifactVersion( antlrRuntimeArtifact );
-		if ( pluginAntlrVersion.compareTo( projectAntlrVersion ) != 0 ) {
-			getLog().warn(
-					"Encountered " + ANTLR_GROUP_ID + ':' + ANTLR_RUNTIME_ARTIFACT_NAME + ':' + projectAntlrVersion.toString() +
-							" which did not match Antlr version used by plugin [" + pluginAntlrVersion.toString() + "]"
-			);
-		}
-	}
-
-	/**
-	 * Builds the classloader to pass to gUnit.
-	 *
-	 * @param antlrArtifact The plugin's (our) Antlr dependency artifact.
-	 *
-	 * @return The classloader for gUnit to use
-	 *
-	 * @throws MojoExecutionException Problem resolving artifacts to {@link java.net.URL urls}.
-	 */
-	private ClassLoader determineProjectCompileScopeClassLoader(Artifact antlrArtifact)
-			throws MojoExecutionException {
-		ArrayList<URL> classPathUrls = new ArrayList<URL>();
-		getLog().info( "Adding Antlr artifact : " + antlrArtifact.getId() );
-		classPathUrls.add( resolveLocalURL( antlrArtifact ) );
-
-		for ( String path : classpathElements() ) {
-			try {
-				getLog().info( "Adding project compile classpath element : " + path );
-				classPathUrls.add( new File( path ).toURI().toURL() );
-			}
-			catch ( MalformedURLException e ) {
-				throw new MojoExecutionException( "Unable to build path URL [" + path + "]" );
-			}
-		}
-
-		return new URLClassLoader( classPathUrls.toArray( new URL[classPathUrls.size()] ), getClass().getClassLoader() );
-	}
-
-	protected static URL resolveLocalURL(Artifact artifact) throws MojoExecutionException {
-		try {
-			return artifact.getFile().toURI().toURL();
-		}
-		catch ( MalformedURLException e ) {
-			throw new MojoExecutionException( "Unable to resolve artifact url : " + artifact.getId(), e );
-		}
-	}
-
-	@SuppressWarnings( "unchecked" )
-	private List<String> classpathElements() throws MojoExecutionException {
-		try {
-			// todo : should we combine both compile and test scoped elements?
-			return ( List<String> ) project.getTestClasspathElements();
-		}
-		catch ( DependencyResolutionRequiredException e ) {
-			throw new MojoExecutionException( "Call to Project#getCompileClasspathElements required dependency resolution" );
-		}
-	}
-
-	private void performExecution(ClassLoader projectCompileScopeClassLoader) throws MojoExecutionException {
-		getLog().info( "gUnit report directory : " + reportDirectory.getAbsolutePath() );
-		if ( !reportDirectory.exists() ) {
-			boolean directoryCreated = reportDirectory.mkdirs();
-			if ( !directoryCreated ) {
-				getLog().warn( "mkdirs() reported problem creating report directory" );
-			}
-		}
-
-		Result runningResults = new Result();
-		ArrayList<String> failureNames = new ArrayList<String>();
-
-		System.out.println();
-		System.out.println( "-----------------------------------------------------------" );
-		System.out.println( " G U N I T   R E S U L T S" );
-		System.out.println( "-----------------------------------------------------------" );
-
-		for ( File script : collectIncludedSourceGrammars() ) {
-			final String scriptPath = script.getAbsolutePath();
-			System.out.println( "Executing script " + scriptPath );
-			try {
-				String scriptBaseName = StringUtils.chompLast( FileUtils.basename( script.getName() ), "." );
-
-				ANTLRFileStream antlrStream = new ANTLRFileStream( scriptPath );
-				GrammarInfo grammarInfo = Interp.parse( antlrStream );
-				gUnitExecutor executor = new gUnitExecutor(
-						grammarInfo,
-						projectCompileScopeClassLoader,
-						script.getParentFile().getAbsolutePath()
-				);
-
-				String report = executor.execTest();
-				writeReportFile( new File( reportDirectory, scriptBaseName + ".txt" ), report );
-
-				Result testResult = new Result();
-				testResult.tests = executor.numOfTest;
-				testResult.failures = executor.numOfFailure;
-				testResult.invalids = executor.numOfInvalidInput;
-
-				System.out.println( testResult.render() );
-
-				runningResults.add( testResult );
-				for ( AbstractTest test : executor.failures ) {
-					failureNames.add( scriptBaseName + "#" + test.getHeader() );
-				}
-			}
-			catch ( IOException e ) {
-				throw new MojoExecutionException( "Could not open specified script file", e );
-			}
-			catch ( RecognitionException e ) {
-				throw new MojoExecutionException( "Could not parse gUnit script", e );
-			}
-		}
-
-		System.out.println();
-		System.out.println( "Summary :" );
-		if ( ! failureNames.isEmpty() ) {
-			System.out.println( "  Found " + failureNames.size() + " failures" );
-			for ( String name : failureNames ) {
-				System.out.println( "    - " + name );
-			}
-		}
-		System.out.println( runningResults.render() );
-		System.out.println();
-
-		if ( runningResults.failures > 0 ) {
-			throw new MojoExecutionException( "Found gUnit test failures" );
-		}
-
-		if ( runningResults.invalids > 0 ) {
-			throw new MojoExecutionException( "Found invalid gUnit tests" );
-		}
-	}
-
-	private Set<File> collectIncludedSourceGrammars() throws MojoExecutionException {
-		SourceMapping mapping = new SuffixMapping( "g", Collections.EMPTY_SET );
-        SourceInclusionScanner scan = new SimpleSourceInclusionScanner( getIncludePatterns(), getExcludePatterns() );
-        scan.addSourceMapping( mapping );
-		try {
-			Set scanResults = scan.getIncludedSources( sourceDirectory, null );
-			Set<File> results = new HashSet<File>();
-			for ( Object result : scanResults ) {
-				if ( result instanceof File ) {
-					results.add( ( File ) result );
-				}
-				else if ( result instanceof String ) {
-					results.add( new File( ( String ) result ) );
-				}
-				else {
-					throw new MojoExecutionException( "Unexpected result type from scanning [" + result.getClass().getName() + "]" );
-				}
-			}
-			return results;
-		}
-		catch ( InclusionScanException e ) {
-			throw new MojoExecutionException( "Error determining gUnit sources", e );
-		}
-	}
-
-	private void writeReportFile(File reportFile, String results) {
-		try {
-			Writer writer = new FileWriter( reportFile );
-			writer = new BufferedWriter( writer );
-			try {
-				writer.write( results );
-				writer.flush();
-			}
-			finally {
-				try {
-					writer.close();
-				}
-				catch ( IOException ignore ) {
-				}
-			}
-		}
-		catch ( IOException e ) {
-			getLog().warn(  "Error writing gUnit report file", e );
-		}
-	}
-
-	private static class Result {
-		private int tests = 0;
-		private int failures = 0;
-		private int invalids = 0;
-
-		public String render() {
-			return String.format( "Tests run: %d,  Failures: %d,  Invalid: %d", tests, failures, invalids );
-		}
-
-		public void add(Result result) {
-			this.tests += result.tests;
-			this.failures += result.failures;
-			this.invalids += result.invalids;
-		}
-	}
-
-}
diff --git a/antlr-3.4/gunit/pom.xml b/antlr-3.4/gunit/pom.xml
deleted file mode 100644
index cb35b6b..0000000
--- a/antlr-3.4/gunit/pom.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>org.antlr</groupId>
-    <artifactId>gunit</artifactId>
-    <packaging>jar</packaging>
-  
-    <name>ANTLR gUnit v3.4</name>
-  <!--
-
-    Inherit from the ANTLR master pom, which tells us what
-    version we are and allows us to inherit dependencies
-    and so on.
-
-    -->
-    <parent>
-        <groupId>org.antlr</groupId>
-        <artifactId>antlr-master</artifactId>
-        <version>3.4</version>
-    </parent>
-
-    <url>http://www.antlr.org/wiki/display/ANTLR3/gUnit+-+Grammar+Unit+Testing</url>
-  
-  <!--
-
-    Tell Maven which other artifacts we need in order to
-    build, run and test the ANTLR Tool. The ANTLR Tool uses earlier versions
-    of ANTLR at runtime (for the moment), uses the current
-    released version of ANTLR String template, but obviously is
-    reliant on the latest snapshot of the runtime, which will either be
-    taken from the antlr-snapshot repository, or your local .m2
-    repository if you built and installed that locally.
-
-    -->
-    <dependencies>
-
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-            <version>4.8.2</version>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>org.antlr</groupId>
-            <artifactId>antlr</artifactId>
-            <version>${project.version}</version>
-            <scope>compile</scope>
-            
-        </dependency>
-
-        <dependency>
-            <groupId>org.antlr</groupId>
-            <artifactId>ST4</artifactId>
-            <version>4.0.4</version>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>org.antlr</groupId>
-            <artifactId>stringtemplate</artifactId>
-            <version>3.2.1</version>
-            <scope>compile</scope>
-        </dependency>
-        
-    </dependencies>
-
-    <build>
-
-        <defaultGoal>install</defaultGoal>
-
-        <plugins>
-
-            <plugin>
-                <groupId>org.antlr</groupId>
-                <artifactId>antlr3-maven-plugin</artifactId>
-                <version>${project.version}</version>
-                <configuration></configuration>
-                <executions>
-                    <execution>
-                        <goals>
-                            <goal>antlr</goal>
-                        </goals>
-                    </execution>
-                </executions>
-            </plugin>
-
-            <plugin>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <configuration>
-                    <source>1.6</source>
-                    <target>jsr14</target>
-                    <sourceDirectory>src</sourceDirectory>
-                </configuration>
-            </plugin>
-
-            <plugin>
-                <artifactId>maven-surefire-plugin</artifactId>
-                <version>2.9</version>
-            </plugin>
-
-            <plugin>
-                <groupId>org.codehaus.mojo</groupId>
-                <artifactId>findbugs-maven-plugin</artifactId>
-                <version>2.3.2</version>
-                <configuration>
-                    <findbugsXmlOutput>true</findbugsXmlOutput>
-                    <findbugsXmlWithMessages>true</findbugsXmlWithMessages>
-                    <xmlOutput>true</xmlOutput>
-                </configuration>
-            </plugin>
-
-        </plugins>
-
-        <extensions>
-            <extension>
-                <groupId>org.apache.maven.wagon</groupId>
-                <artifactId>wagon-ssh-external</artifactId>
-                <version>1.0-beta-2</version>
-            </extension>
-        </extensions>
-
-    
-    </build>
-
-</project>
diff --git a/antlr-3.4/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/StGUnit.g b/antlr-3.4/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/StGUnit.g
deleted file mode 100644
index 1701214..0000000
--- a/antlr-3.4/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/StGUnit.g
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
-[The "BSD licence"]
-Copyright (c) 2007-2008 Leon Jen-Yuan Su
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-grammar StGUnit;
-
-options {language=Java;}
-
-tokens {
-	OK = 'OK';
-	FAIL = 'FAIL';
-	DOC_COMMENT;
-}
-
-@header {
-package org.antlr.gunit.swingui.parsers;
-import org.antlr.gunit.swingui.model.*;
-import org.antlr.gunit.swingui.runner.*;
-}
-
-@lexer::header {package org.antlr.gunit.swingui.parsers;}
-
-@members {
-public TestSuiteAdapter adapter ;;
-}
-
-gUnitDef
-	:	'gunit' name=id {adapter.setGrammarName($name.text);}
-	    ('walks' id)? ';' 
-		header? suite*
-	;
-
-header
-	:	'@header' ACTION
-	;
-		
-suite
-	:	(	parserRule=RULE_REF ('walks' RULE_REF)? 
-	        {adapter.startRule($parserRule.text);}
-		|	lexerRule=TOKEN_REF 
-			{adapter.startRule($lexerRule.text);}
-		)
-		':'
-		test+
-		{adapter.endRule();}
-	;
-
-test
-	:	input expect
-		{adapter.addTestCase($input.in, $expect.out);}
-	;
-	
-expect returns [ITestCaseOutput out]
-	:	OK			{$out = adapter.createBoolOutput(true);}
-	|	FAIL		{$out = adapter.createBoolOutput(false);}
-	|	'returns' RETVAL {$out = adapter.createReturnOutput($RETVAL.text);}
-	|	'->' output {$out = adapter.createStdOutput($output.text);}
-	|	'->' AST	{$out = adapter.createAstOutput($AST.text);}
-	;
-
-input returns [ITestCaseInput in]
-	:	STRING 		{$in = adapter.createStringInput($STRING.text);}
-	|	ML_STRING	{$in = adapter.createMultiInput($ML_STRING.text);}
-	|	fileInput	{$in = adapter.createFileInput($fileInput.path);}
-	;
-
-output
-	:	STRING
-	|	ML_STRING
-	|	ACTION
-	;
-	
-fileInput returns [String path]
-	:	id {$path = $id.text;} (EXT {$path += $EXT.text;})? 
-	;
-
-id 	:	TOKEN_REF
-	|	RULE_REF
-	;
-
-// L E X I C A L   R U L E S
-
-SL_COMMENT
- 	:	'//' ~('\r'|'\n')* '\r'? '\n' {$channel=HIDDEN;}
-	;
-
-ML_COMMENT
-	:	'/*' {$channel=HIDDEN;} .* '*/'
-	;
-
-STRING
-	:	'"' ( ESC | ~('\\'|'"') )* '"'
-	;
-
-ML_STRING
-	:	'<<' .* '>>' 
-	;
-
-TOKEN_REF
-	:	'A'..'Z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
-	;
-
-RULE_REF
-	:	'a'..'z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
-	;
-
-EXT	:	'.'('a'..'z'|'A'..'Z'|'0'..'9')+;
-
-RETVAL	:	NESTED_RETVAL
-	;
-
-fragment
-NESTED_RETVAL :
-	'['
-	(	options {greedy=false;}
-	:	NESTED_RETVAL
-	|	.
-	)*
-	']'
-	;
-
-AST	:	NESTED_AST (' '? NESTED_AST)*;
-
-fragment
-NESTED_AST :
-	'('
-	(	options {greedy=false;}
-	:	NESTED_AST
-	|	.
-	)*
-	')'
-	;
-
-ACTION
-	:	NESTED_ACTION
-	;
-
-fragment
-NESTED_ACTION :
-	'{'
-	(	options {greedy=false; k=3;}
-	:	NESTED_ACTION
-	|	STRING_LITERAL
-	|	CHAR_LITERAL
-	|	.
-	)*
-	'}'
-	;
-
-fragment
-CHAR_LITERAL
-	:	'\'' ( ESC | ~('\''|'\\') ) '\''
-	;
-
-fragment
-STRING_LITERAL
-	:	'"' ( ESC | ~('\\'|'"') )* '"'
-	;
-
-fragment
-ESC	:	'\\'
-		(	'n'
-		|	'r'
-		|	't'
-		|	'b'
-		|	'f'
-		|	'"'
-		|	'\''
-		|	'\\'
-		|	'>'
-		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
-		|	. // unknown, leave as it is
-		)
-	;
-	
-fragment
-XDIGIT :
-		'0' .. '9'
-	|	'a' .. 'f'
-	|	'A' .. 'F'
-	;
-
-WS	:	(	' '
-		|	'\t'
-		|	'\r'? '\n'
-		)+
-		{$channel=HIDDEN;}
-	;
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/AbstractTest.java b/antlr-3.4/gunit/src/main/java/org/antlr/gunit/AbstractTest.java
deleted file mode 100644
index 158bf04..0000000
--- a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/AbstractTest.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2007 Kenny MacDermid
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.gunit;
-
-public abstract class AbstractTest implements ITestCase {
-	// store essential individual test result for string template
-	protected String header;
-	protected String actual;
-	
-	protected boolean hasErrorMsg;
-	
-	private String testedRuleName;
-	private int testCaseIndex;
-	
-	// TODO: remove these. They're only used as part of a refactor to keep the
-	//       code cleaner. It is a mock-instanceOf() replacement.
-	public abstract int getType();
-	public abstract String getText();
-	
-	public abstract String getExpected();
-	// return an escaped string of the expected result
-	public String getExpectedResult() {
-		String expected = getExpected();
-		if ( expected!=null ) expected = JUnitCodeGen.escapeForJava(expected);
-		return expected;
-	}
-	public abstract String getResult(gUnitTestResult testResult);
-	public String getHeader() { return this.header; }
-	public String getActual() { return this.actual; }
-	// return an escaped string of the actual result
-	public String getActualResult() {
-		String actual = getActual();
-		// there is no need to escape the error message from ANTLR 
-		if ( actual!=null && !hasErrorMsg ) actual = JUnitCodeGen.escapeForJava(actual);
-		return actual;
-	}
-	
-	public String getTestedRuleName() { return this.testedRuleName; }
-	public int getTestCaseIndex() { return this.testCaseIndex; }
-	
-	public void setHeader(String rule, String lexicalRule, String treeRule, int numOfTest, int line) {
-		StringBuffer buf = new StringBuffer();
-		buf.append("test" + numOfTest + " (");
-		if ( treeRule!=null ) {
-			buf.append(treeRule+" walks ");
-		}
-		if ( lexicalRule!=null ) {
-			buf.append(lexicalRule + ", line"+line+")" + " - ");
-		}
-		else buf.append(rule + ", line"+line+")" + " - ");
-		this.header = buf.toString();
-	}
-	public void setActual(String actual) { this.actual = actual; }
-	
-	public void setTestedRuleName(String testedRuleName) { this.testedRuleName = testedRuleName; }
-	public void setTestCaseIndex(int testCaseIndex) { this.testCaseIndex = testCaseIndex; }
-	
-}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/JUnitCodeGen.java b/antlr-3.4/gunit/src/main/java/org/antlr/gunit/JUnitCodeGen.java
deleted file mode 100644
index 4713cf7..0000000
--- a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/JUnitCodeGen.java
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2007-2008 Leon Jen-Yuan Su
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.gunit;
-
-import org.antlr.stringtemplate.StringTemplate;
-import org.antlr.stringtemplate.StringTemplateGroup;
-import org.antlr.stringtemplate.StringTemplateGroupLoader;
-import org.antlr.stringtemplate.CommonGroupLoader;
-import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
-
-import java.io.*;
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.logging.ConsoleHandler;
-import java.util.logging.Handler;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-public class JUnitCodeGen {
-    public GrammarInfo grammarInfo;
-    public Map<String, String> ruleWithReturn;
-    private final String testsuiteDir;
-    private String outputDirectoryPath = ".";
-
-    private final static Handler console = new ConsoleHandler();
-    private static final Logger logger = Logger.getLogger(JUnitCodeGen.class.getName());
-    static {
-        logger.addHandler(console);
-    }
-
-    public JUnitCodeGen(GrammarInfo grammarInfo, String testsuiteDir) throws ClassNotFoundException {
-        this( grammarInfo, determineClassLoader(), testsuiteDir);
-    }
-
-    private static ClassLoader determineClassLoader() {
-        ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-        if ( classLoader == null ) {
-            classLoader = JUnitCodeGen.class.getClassLoader();
-        }
-        return classLoader;
-    }
-
-    public JUnitCodeGen(GrammarInfo grammarInfo, ClassLoader classLoader, String testsuiteDir) throws ClassNotFoundException {
-        this.grammarInfo = grammarInfo;
-        this.testsuiteDir = testsuiteDir;
-        /** Map the name of rules having return value to its return type */
-        ruleWithReturn = new HashMap<String, String>();
-        Class parserClass = locateParserClass( grammarInfo, classLoader );
-        Method[] methods = parserClass.getDeclaredMethods();
-        for(Method method : methods) {
-            if ( !method.getReturnType().getName().equals("void") ) {
-                ruleWithReturn.put(method.getName(), method.getReturnType().getName().replace('$', '.'));
-            }
-        }
-    }
-
-    private Class locateParserClass(GrammarInfo grammarInfo, ClassLoader classLoader) throws ClassNotFoundException {
-        String parserClassName = grammarInfo.getGrammarName() + "Parser";
-        if ( grammarInfo.getGrammarPackage() != null ) {
-            parserClassName = grammarInfo.getGrammarPackage()+ "." + parserClassName;
-        }
-        return classLoader.loadClass( parserClassName );
-    }
-
-    public String getOutputDirectoryPath() {
-        return outputDirectoryPath;
-    }
-
-    public void setOutputDirectoryPath(String outputDirectoryPath) {
-        this.outputDirectoryPath = outputDirectoryPath;
-    }
-
-    public void compile() throws IOException{
-        String junitFileName;
-        if ( grammarInfo.getTreeGrammarName()!=null ) {
-            junitFileName = "Test"+grammarInfo.getTreeGrammarName();
-        }
-        else {
-            junitFileName = "Test"+grammarInfo.getGrammarName();
-        }
-        String lexerName = grammarInfo.getGrammarName()+"Lexer";
-        String parserName = grammarInfo.getGrammarName()+"Parser";
-
-        StringTemplateGroupLoader loader = new CommonGroupLoader("org/antlr/gunit", null);
-        StringTemplateGroup.registerGroupLoader(loader);
-        StringTemplateGroup.registerDefaultLexer(AngleBracketTemplateLexer.class);
-        StringBuffer buf = compileToBuffer(junitFileName, lexerName, parserName);
-        writeTestFile(".", junitFileName+".java", buf.toString());
-    }
-
-    public StringBuffer compileToBuffer(String className, String lexerName, String parserName) {
-        StringTemplateGroup group = StringTemplateGroup.loadGroup("junit");
-        StringBuffer buf = new StringBuffer();
-        buf.append(genClassHeader(group, className, lexerName, parserName));
-        buf.append(genTestRuleMethods(group));
-        buf.append("\n\n}");
-        return buf;
-    }
-
-    protected String genClassHeader(StringTemplateGroup group, String junitFileName, String lexerName, String parserName) {
-        StringTemplate classHeaderST = group.getInstanceOf("classHeader");
-        if ( grammarInfo.getTestPackage()!=null ) {	// Set up class package if there is
-            classHeaderST.setAttribute("header", "package "+grammarInfo.getTestPackage()+";");
-        }
-        classHeaderST.setAttribute("junitFileName", junitFileName);
-
-        String lexerPath = null;
-        String parserPath = null;
-        String treeParserPath = null;
-        String packagePath = null;
-        boolean isTreeGrammar = false;
-        boolean hasPackage = false;
-        /** Set up appropriate class path for parser/tree parser if using package */
-        if ( grammarInfo.getGrammarPackage()!=null ) {
-            hasPackage = true;
-            packagePath = "./"+grammarInfo.getGrammarPackage().replace('.', '/');
-            lexerPath = grammarInfo.getGrammarPackage()+"."+lexerName;
-            parserPath = grammarInfo.getGrammarPackage()+"."+parserName;
-            if ( grammarInfo.getTreeGrammarName()!=null ) {
-                treeParserPath = grammarInfo.getGrammarPackage()+"."+grammarInfo.getTreeGrammarName();
-                isTreeGrammar = true;
-            }
-        }
-        else {
-            lexerPath = lexerName;
-            parserPath = parserName;
-            if ( grammarInfo.getTreeGrammarName()!=null ) {
-                treeParserPath = grammarInfo.getTreeGrammarName();
-                isTreeGrammar = true;
-            }
-        }
-        // also set up custom tree adaptor if necessary
-        String treeAdaptorPath = null;
-        boolean hasTreeAdaptor = false;
-        if ( grammarInfo.getAdaptor()!=null ) {
-            hasTreeAdaptor = true;
-            treeAdaptorPath = grammarInfo.getAdaptor();
-        }
-        classHeaderST.setAttribute("hasTreeAdaptor", hasTreeAdaptor);
-        classHeaderST.setAttribute("treeAdaptorPath", treeAdaptorPath);
-        classHeaderST.setAttribute("hasPackage", hasPackage);
-        classHeaderST.setAttribute("packagePath", packagePath);
-        classHeaderST.setAttribute("lexerPath", lexerPath);
-        classHeaderST.setAttribute("parserPath", parserPath);
-        classHeaderST.setAttribute("treeParserPath", treeParserPath);
-        classHeaderST.setAttribute("isTreeGrammar", isTreeGrammar);
-        return classHeaderST.toString();
-    }
-
-    protected String genTestRuleMethods(StringTemplateGroup group) {
-        StringBuffer buf = new StringBuffer();
-        if ( grammarInfo.getTreeGrammarName()!=null ) {	// Generate junit codes of for tree grammar rule
-            genTreeMethods(group, buf);
-        }
-        else {	// Generate junit codes of for grammar rule
-            genParserMethods(group, buf);
-        }
-        return buf.toString();
-    }
-
-    private void genParserMethods(StringTemplateGroup group, StringBuffer buf) {
-        for ( gUnitTestSuite ts: grammarInfo.getRuleTestSuites() ) {
-            int i = 0;
-            for ( gUnitTestInput input: ts.testSuites.keySet() ) {	// each rule may contain multiple tests
-                i++;
-                StringTemplate testRuleMethodST;
-                /** If rule has multiple return values or ast*/
-                if ( ts.testSuites.get(input).getType()== gUnitParser.ACTION && ruleWithReturn.containsKey(ts.getRuleName()) ) {
-                    testRuleMethodST = group.getInstanceOf("testRuleMethod2");
-                    String outputString = ts.testSuites.get(input).getText();
-                    testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(ts.getRuleName())+i);
-                    testRuleMethodST.setAttribute("testRuleName", '"'+ts.getRuleName()+'"');
-                    testRuleMethodST.setAttribute("test", input);
-                    testRuleMethodST.setAttribute("returnType", ruleWithReturn.get(ts.getRuleName()));
-                    testRuleMethodST.setAttribute("expecting", outputString);
-                }
-                else {
-                    String testRuleName;
-                    // need to determine whether it's a test for parser rule or lexer rule
-                    if ( ts.isLexicalRule() ) testRuleName = ts.getLexicalRuleName();
-                    else testRuleName = ts.getRuleName();
-                    testRuleMethodST = group.getInstanceOf("testRuleMethod");
-                    String outputString = ts.testSuites.get(input).getText();
-                    testRuleMethodST.setAttribute("isLexicalRule", ts.isLexicalRule());
-                    testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(testRuleName)+i);
-                    testRuleMethodST.setAttribute("testRuleName", '"'+testRuleName+'"');
-                    testRuleMethodST.setAttribute("test", input);
-                    testRuleMethodST.setAttribute("tokenType", getTypeString(ts.testSuites.get(input).getType()));
-
-                    // normalize whitespace
-                    outputString = normalizeTreeSpec(outputString);
-
-                    if ( ts.testSuites.get(input).getType()==gUnitParser.ACTION ) {	// trim ';' at the end of ACTION if there is...
-                        //testRuleMethodST.setAttribute("expecting", outputString.substring(0, outputString.length()-1));
-                        testRuleMethodST.setAttribute("expecting", outputString);
-                    }
-                    else if ( ts.testSuites.get(input).getType()==gUnitParser.RETVAL ) {	// Expected: RETVAL
-                        testRuleMethodST.setAttribute("expecting", outputString);
-                    }
-                    else {	// Attach "" to expected STRING or AST
-                        // strip newlines for (...) tree stuff
-                        outputString = outputString.replaceAll("\n", "");
-                        testRuleMethodST.setAttribute("expecting", '"'+escapeForJava(outputString)+'"');
-                    }
-                }
-                buf.append(testRuleMethodST.toString());
-            }
-        }
-    }
-
-    private void genTreeMethods(StringTemplateGroup group, StringBuffer buf) {
-        for ( gUnitTestSuite ts: grammarInfo.getRuleTestSuites() ) {
-            int i = 0;
-            for ( gUnitTestInput input: ts.testSuites.keySet() ) {	// each rule may contain multiple tests
-                i++;
-                StringTemplate testRuleMethodST;
-                /** If rule has multiple return values or ast*/
-                if ( ts.testSuites.get(input).getType()== gUnitParser.ACTION && ruleWithReturn.containsKey(ts.getTreeRuleName()) ) {
-                    testRuleMethodST = group.getInstanceOf("testTreeRuleMethod2");
-                    String outputString = ts.testSuites.get(input).getText();
-                    testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(ts.getTreeRuleName())+"_walks_"+
-                                                                changeFirstCapital(ts.getRuleName())+i);
-                    testRuleMethodST.setAttribute("testTreeRuleName", '"'+ts.getTreeRuleName()+'"');
-                    testRuleMethodST.setAttribute("testRuleName", '"'+ts.getRuleName()+'"');
-                    testRuleMethodST.setAttribute("test", input);
-                    testRuleMethodST.setAttribute("returnType", ruleWithReturn.get(ts.getTreeRuleName()));
-                    testRuleMethodST.setAttribute("expecting", outputString);
-                }
-                else {
-                    testRuleMethodST = group.getInstanceOf("testTreeRuleMethod");
-                    String outputString = ts.testSuites.get(input).getText();
-                    testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(ts.getTreeRuleName())+"_walks_"+
-                                                                changeFirstCapital(ts.getRuleName())+i);
-                    testRuleMethodST.setAttribute("testTreeRuleName", '"'+ts.getTreeRuleName()+'"');
-                    testRuleMethodST.setAttribute("testRuleName", '"'+ts.getRuleName()+'"');
-                    testRuleMethodST.setAttribute("test", input);
-                    testRuleMethodST.setAttribute("tokenType", getTypeString(ts.testSuites.get(input).getType()));
-
-                    if ( ts.testSuites.get(input).getType()==gUnitParser.ACTION ) {	// trim ';' at the end of ACTION if there is...
-                        //testRuleMethodST.setAttribute("expecting", outputString.substring(0, outputString.length()-1));
-                        testRuleMethodST.setAttribute("expecting", outputString);
-                    }
-                    else if ( ts.testSuites.get(input).getType()==gUnitParser.RETVAL ) {	// Expected: RETVAL
-                        testRuleMethodST.setAttribute("expecting", outputString);
-                    }
-                    else {	// Attach "" to expected STRING or AST
-                        testRuleMethodST.setAttribute("expecting", '"'+escapeForJava(outputString)+'"');
-                    }
-                }
-                buf.append(testRuleMethodST.toString());
-            }
-        }
-    }
-
-    // return a meaningful gUnit token type name instead of using the magic number
-    public String getTypeString(int type) {
-        String typeText;
-        switch (type) {
-            case gUnitParser.OK :
-                typeText = "org.antlr.gunit.gUnitParser.OK";
-                break;
-            case gUnitParser.FAIL :
-                typeText = "org.antlr.gunit.gUnitParser.FAIL";
-                break;
-            case gUnitParser.STRING :
-                typeText = "org.antlr.gunit.gUnitParser.STRING";
-                break;
-            case gUnitParser.ML_STRING :
-                typeText = "org.antlr.gunit.gUnitParser.ML_STRING";
-                break;
-            case gUnitParser.RETVAL :
-                typeText = "org.antlr.gunit.gUnitParser.RETVAL";
-                break;
-            case gUnitParser.AST :
-                typeText = "org.antlr.gunit.gUnitParser.AST";
-                break;
-            default :
-                typeText = "org.antlr.gunit.gUnitParser.EOF";
-                break;
-        }
-        return typeText;
-    }
-
-    protected void writeTestFile(String dir, String fileName, String content) {
-        try {
-            File f = new File(dir, fileName);
-            FileWriter w = new FileWriter(f);
-            BufferedWriter bw = new BufferedWriter(w);
-            bw.write(content);
-            bw.close();
-            w.close();
-        }
-        catch (IOException ioe) {
-            logger.log(Level.SEVERE, "can't write file", ioe);
-        }
-    }
-
-    public static String escapeForJava(String inputString) {
-        // Gotta escape literal backslash before putting in specials that use escape.
-        inputString = inputString.replace("\\", "\\\\");
-        // Then double quotes need escaping (singles are OK of course).
-        inputString = inputString.replace("\"", "\\\"");
-        // note: replace newline to String ".\n", replace tab to String ".\t"
-        inputString = inputString.replace("\n", "\\n").replace("\t", "\\t").replace("\r", "\\r").replace("\b", "\\b").replace("\f", "\\f");
-
-        return inputString;
-    }
-
-    protected String changeFirstCapital(String ruleName) {
-        String firstChar = String.valueOf(ruleName.charAt(0));
-        return firstChar.toUpperCase()+ruleName.substring(1);
-    }
-
-    public static String normalizeTreeSpec(String t) {
-        List<String> words = new ArrayList<String>();
-        int i = 0;
-        StringBuilder word = new StringBuilder();
-        while ( i<t.length() ) {
-            if ( t.charAt(i)=='(' || t.charAt(i)==')' ) {
-                if ( word.length()>0 ) {
-                    words.add(word.toString());
-                    word.setLength(0);
-                }
-                words.add(String.valueOf(t.charAt(i)));
-                i++;
-                continue;
-            }
-            if ( Character.isWhitespace(t.charAt(i)) ) {
-                // upon WS, save word
-                if ( word.length()>0 ) {
-                    words.add(word.toString());
-                    word.setLength(0);
-                }
-                i++;
-                continue;
-            }
-
-            // ... "x" or ...("x"
-            if ( t.charAt(i)=='"' && (i-1)>=0 &&
-                 (t.charAt(i-1)=='(' || Character.isWhitespace(t.charAt(i-1))) )
-            {
-                i++;
-                while ( i<t.length() && t.charAt(i)!='"' ) {
-                    if ( t.charAt(i)=='\\' &&
-                         (i+1)<t.length() && t.charAt(i+1)=='"' ) // handle \"
-                    {
-                        word.append('"');
-                        i+=2;
-                        continue;
-                    }
-                    word.append(t.charAt(i));
-                    i++;
-                }
-                i++; // skip final "
-                words.add(word.toString());
-                word.setLength(0);
-                continue;
-            }
-            word.append(t.charAt(i));
-            i++;
-        }
-        if ( word.length()>0 ) {
-            words.add(word.toString());
-        }
-        //System.out.println("words="+words);
-        StringBuilder buf = new StringBuilder();
-        for (int j=0; j<words.size(); j++) {
-            if ( j>0 && !words.get(j).equals(")") &&
-                 !words.get(j-1).equals("(") ) {
-                buf.append(' ');
-            }
-            buf.append(words.get(j));
-        }
-        return buf.toString();
-    }
-
-}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitBaseTest.java b/antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitBaseTest.java
deleted file mode 100644
index bf089c3..0000000
--- a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitBaseTest.java
+++ /dev/null
@@ -1,489 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2007-2008 Leon, Jen-Yuan Su
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.gunit;
-
-import junit.framework.TestCase;
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.CommonTree;
-import org.antlr.runtime.tree.CommonTreeNodeStream;
-import org.antlr.runtime.tree.TreeAdaptor;
-import org.antlr.runtime.tree.TreeNodeStream;
-import org.antlr.stringtemplate.StringTemplate;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.PrintStream;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-
-/** All gUnit-generated JUnit class should extend this class 
- *  which implements the essential methods for triggering
- *  ANTLR parser/tree walker
- */
-public abstract class gUnitBaseTest extends TestCase {
-	
-	public String treeAdaptorPath;
-	public String packagePath;
-	public String lexerPath;
-	public String parserPath;
-	public String treeParserPath;
-	
-	protected String stdout;
-	protected String stderr;
-	
-	private PrintStream console = System.out;
-	private PrintStream consoleErr = System.err;
-	
-	// Invoke target lexer.rule
-	public String execLexer(String testRuleName, int line, String testInput, boolean isFile) throws Exception {
-		CharStream input;
-		/** Set up ANTLR input stream based on input source, file or String */
-		if ( isFile ) {
-			String filePath = testInput;
-			File testInputFile = new File(filePath);
-			// if input test file is not found under the current dir, also try to look for it under the package dir
-			if ( !testInputFile.exists() && packagePath!=null ) {
-				testInputFile = new File(packagePath, filePath);
-				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
-			}
-			input = new ANTLRFileStream(filePath);
-		}
-		else {
-			input = new ANTLRStringStream(testInput);
-		}
-		Class lexer = null;
-		PrintStream ps = null;		// for redirecting stdout later
-		PrintStream ps2 = null;		// for redirecting stderr later
-        try {
-            /** Use Reflection to create instances of lexer and parser */
-        	lexer = Class.forName(lexerPath);
-            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
-            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);        
-            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args   
-            Lexer lexObj = (Lexer)lexConstructor.newInstance(lexArgs);				// makes new instance of lexer    
-            input.setLine(line);
-
-            Method ruleName = lexer.getMethod("m"+testRuleName, new Class[0]);
-            
-            /** Start of I/O Redirecting */
-            ByteArrayOutputStream out = new ByteArrayOutputStream();
-            ByteArrayOutputStream err = new ByteArrayOutputStream();
-            ps = new PrintStream(out);
-            ps2 = new PrintStream(err);
-            System.setOut(ps);
-            System.setErr(ps2);
-            /** End of redirecting */
-
-            /** Invoke lexer rule, and get the current index in CharStream */
-            ruleName.invoke(lexObj, new Object[0]);
-            Method ruleName2 = lexer.getMethod("getCharIndex", new Class[0]);
-            int currentIndex = (Integer) ruleName2.invoke(lexObj, new Object[0]);
-            if ( currentIndex!=input.size() ) {
-            	ps2.println("extra text found, '"+input.substring(currentIndex, input.size()-1)+"'");
-            }
-			
-            this.stdout = null;
-			this.stderr = null;
-            
-			if ( err.toString().length()>0 ) {
-				this.stderr = err.toString();
-				return this.stderr;
-			}
-			if ( out.toString().length()>0 ) {
-				this.stdout = out.toString();
-			}
-			if ( err.toString().length()==0 && out.toString().length()==0 ) {
-				return null;
-			}
-        } catch (ClassNotFoundException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (SecurityException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (NoSuchMethodException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (IllegalArgumentException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (InstantiationException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (IllegalAccessException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (InvocationTargetException e) {	// This exception could be caused from ANTLR Runtime Exception, e.g. MismatchedTokenException
-        	if ( e.getCause()!=null ) this.stderr = e.getCause().toString();
-			else this.stderr = e.toString();
-        	return this.stderr;
-        } finally {
-        	try {
-        		if ( ps!=null ) ps.close();
-    			if ( ps2!=null ) ps2.close();
-    			System.setOut(console);			// Reset standard output
-    			System.setErr(consoleErr);		// Reset standard err out
-        	} catch (Exception e) {
-        		e.printStackTrace();
-        	}
-        }
-        return this.stdout;
-	}
-	
-	// Invoke target parser.rule
-	
-	public Object execParser(String testRuleName, int line, String testInput, boolean isFile) throws Exception {
-		CharStream input;
-		/** Set up ANTLR input stream based on input source, file or String */
-		if ( isFile ) {
-			String filePath = testInput;
-			File testInputFile = new File(filePath);
-			// if input test file is not found under the current dir, also try to look for it under the package dir
-			if ( !testInputFile.exists() && packagePath!=null ) {
-				testInputFile = new File(packagePath, filePath);
-				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
-			}
-			input = new ANTLRFileStream(filePath);
-		}
-		else {
-			input = new ANTLRStringStream(testInput);
-		}
-		Class lexer = null;
-		Class parser = null;
-		PrintStream ps = null;		// for redirecting stdout later
-		PrintStream ps2 = null;		// for redirecting stderr later
-        ByteArrayOutputStream out = null;
-        ByteArrayOutputStream err = null;
-		try {
-			/** Use Reflection to create instances of lexer and parser */
-			lexer = Class.forName(lexerPath);
-            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
-            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);
-            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args   
-            Lexer lexObj = (Lexer)lexConstructor.newInstance(lexArgs);				// makes new instance of lexer
-            input.setLine(line);
-
-            CommonTokenStream tokens = new CommonTokenStream(lexObj);
-            parser = Class.forName(parserPath);
-            Class[] parArgTypes = new Class[]{TokenStream.class};				// assign type to parser's args
-            Constructor parConstructor = parser.getConstructor(parArgTypes);
-            Object[] parArgs = new Object[]{tokens};							// assign value to parser's args  
-            Parser parObj = (Parser)parConstructor.newInstance(parArgs);				// makes new instance of parser
-            
-            // set up customized tree adaptor if necessary
-            if ( treeAdaptorPath!=null ) {
-            	parArgTypes = new Class[]{TreeAdaptor.class};
-            	Method _setTreeAdaptor = parser.getMethod("setTreeAdaptor", parArgTypes);
-            	Class _treeAdaptor = Class.forName(treeAdaptorPath);
-            	_setTreeAdaptor.invoke(parObj, _treeAdaptor.newInstance());
-            }
-
-            Method ruleName = parser.getMethod(testRuleName);
-
-            /** Start of I/O Redirecting */
-            out = new ByteArrayOutputStream();
-            err = new ByteArrayOutputStream();
-            ps = new PrintStream(out);
-            ps2 = new PrintStream(err);
-            System.setOut(ps);
-            System.setErr(ps2);
-            /** End of redirecting */
-
-			/** Invoke grammar rule, and store if there is a return value */
-            Object ruleReturn = ruleName.invoke(parObj);
-            String astString = null;
-            String stString = null;
-            /** If rule has return value, determine if it contains an AST or a ST */
-            if ( ruleReturn!=null ) {
-                if ( ruleReturn.getClass().toString().indexOf(testRuleName+"_return")>0 ) {
-                	try {	// NullPointerException may happen here...
-                		Class _return = Class.forName(parserPath+"$"+testRuleName+"_return");
-                		Method[] methods = _return.getDeclaredMethods();
-                		for(Method method : methods) {
-			                if ( method.getName().equals("getTree") ) {
-			                	Method returnName = _return.getMethod("getTree");
-		                    	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
-		                    	astString = tree.toStringTree();
-			                }
-			                else if ( method.getName().equals("getTemplate") ) {
-			                	Method returnName = _return.getMethod("getTemplate");
-			                	StringTemplate st = (StringTemplate) returnName.invoke(ruleReturn);
-			                	stString = st.toString();
-			                }
-			            }
-                	}
-                	catch(Exception e) {
-                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
-                	}
-                }
-            }
-
-			this.stdout = "";
-			this.stderr = "";
-			
-			/** Invalid input */
-            if ( tokens.index()!=tokens.size()-1 ) {
-            	//throw new InvalidInputException();
-            	this.stderr += "Stopped parsing at token index "+tokens.index()+": ";
-            }
-            
-			// retVal could be actual return object from rule, stderr or stdout
-            this.stdout += out.toString();
-            this.stderr += err.toString();
-
-			if ( err.toString().length()>0 ) return this.stderr;
-			if ( out.toString().length()>0 ) return this.stdout;
-			if ( astString!=null ) {	// Return toStringTree of AST
-				return astString;
-			}
-			else if ( stString!=null ) {// Return toString of ST
-				return stString;
-			}
-			if ( ruleReturn!=null ) {
-				return ruleReturn;
-			}
-			if ( err.toString().length()==0 && out.toString().length()==0 ) {
-				return null;
-			}
-		}
-        catch (ClassNotFoundException e) {
-			e.printStackTrace(); System.exit(1);
-		}
-        catch (SecurityException e) {
-			e.printStackTrace(); System.exit(1);
-		}
-        catch (NoSuchMethodException e) {
-			e.printStackTrace(); System.exit(1);
-		}
-        catch (IllegalAccessException e) {
-			e.printStackTrace(); System.exit(1);
-		}
-        catch (InvocationTargetException e) {
-            this.stdout = out.toString();
-            this.stderr = err.toString();
-
-			if ( e.getCause()!=null ) this.stderr += e.getCause().toString();
-			else this.stderr += e.toString();
-        	return this.stderr;
-		} finally {
-        	try {
-        		if ( ps!=null ) ps.close();
-    			if ( ps2!=null ) ps2.close();
-    			System.setOut(console);			// Reset standard output
-    			System.setErr(consoleErr);		// Reset standard err out
-        	} catch (Exception e) {
-        		e.printStackTrace();
-        	}
-        }
-		return this.stdout;
-	}
-	
-	// Invoke target parser.rule
-	public Object execTreeParser(String testTreeRuleName, String testRuleName, String testInput, boolean isFile) throws Exception {
-		CharStream input;
-		if ( isFile ) {
-			String filePath = testInput;
-			File testInputFile = new File(filePath);
-			// if input test file is not found under the current dir, also try to look for it under the package dir
-			if ( !testInputFile.exists() && packagePath!=null ) {
-				testInputFile = new File(packagePath, filePath);
-				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
-			}
-			input = new ANTLRFileStream(filePath);
-		}
-		else {
-			input = new ANTLRStringStream(testInput);
-		}
-		Class lexer = null;
-		Class parser = null;
-		Class treeParser = null;
-		PrintStream ps = null;		// for redirecting stdout later
-		PrintStream ps2 = null;		// for redirecting stderr later
-		try {
-			/** Use Reflection to create instances of lexer and parser */
-        	lexer = Class.forName(lexerPath);
-            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
-            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);        
-            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args   
-            Object lexObj = lexConstructor.newInstance(lexArgs);				// makes new instance of lexer    
-            
-            CommonTokenStream tokens = new CommonTokenStream((Lexer) lexObj);
-            
-            parser = Class.forName(parserPath);
-            Class[] parArgTypes = new Class[]{TokenStream.class};				// assign type to parser's args
-            Constructor parConstructor = parser.getConstructor(parArgTypes);
-            Object[] parArgs = new Object[]{tokens};							// assign value to parser's args  
-            Object parObj = parConstructor.newInstance(parArgs);				// makes new instance of parser      
-            
-            // set up customized tree adaptor if necessary
-            TreeAdaptor customTreeAdaptor = null; 
-            if ( treeAdaptorPath!=null ) {
-            	parArgTypes = new Class[]{TreeAdaptor.class};
-            	Method _setTreeAdaptor = parser.getMethod("setTreeAdaptor", parArgTypes);
-            	Class _treeAdaptor = Class.forName(treeAdaptorPath);
-            	customTreeAdaptor = (TreeAdaptor) _treeAdaptor.newInstance();
-            	_setTreeAdaptor.invoke(parObj, customTreeAdaptor);
-            }
-            
-            Method ruleName = parser.getMethod(testRuleName);
-
-            /** Start of I/O Redirecting */
-            ByteArrayOutputStream out = new ByteArrayOutputStream();
-            ByteArrayOutputStream err = new ByteArrayOutputStream();
-            ps = new PrintStream(out);
-            ps2 = new PrintStream(err);
-            System.setOut(ps);
-            System.setErr(ps2);
-            /** End of redirecting */
-
-            /** Invoke grammar rule, and get the return value */
-            Object ruleReturn = ruleName.invoke(parObj);
-            
-            Class _return = Class.forName(parserPath+"$"+testRuleName+"_return");            	
-        	Method returnName = _return.getMethod("getTree");
-        	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
-
-        	// Walk resulting tree; create tree nodes stream first
-        	CommonTreeNodeStream nodes;
-        	if ( customTreeAdaptor!=null ) {
-        		nodes = new CommonTreeNodeStream(customTreeAdaptor, tree);
-        	}
-        	else {
-        		nodes = new CommonTreeNodeStream(tree);
-        	}
-        	// AST nodes have payload that point into token stream
-        	nodes.setTokenStream(tokens);
-        	// Create a tree walker attached to the nodes stream
-        	treeParser = Class.forName(treeParserPath);
-            Class[] treeParArgTypes = new Class[]{TreeNodeStream.class};		// assign type to tree parser's args
-            Constructor treeParConstructor = treeParser.getConstructor(treeParArgTypes);
-            Object[] treeParArgs = new Object[]{nodes};							// assign value to tree parser's args  
-            Object treeParObj = treeParConstructor.newInstance(treeParArgs);	// makes new instance of tree parser      
-        	// Invoke the tree rule, and store the return value if there is
-            Method treeRuleName = treeParser.getMethod(testTreeRuleName);
-            Object treeRuleReturn = treeRuleName.invoke(treeParObj);
-            
-            String astString = null;
-            String stString = null;
-            /** If tree rule has return value, determine if it contains an AST or a ST */
-            if ( treeRuleReturn!=null ) {
-                if ( treeRuleReturn.getClass().toString().indexOf(testTreeRuleName+"_return")>0 ) {
-                	try {	// NullPointerException may happen here...
-                		Class _treeReturn = Class.forName(treeParserPath+"$"+testTreeRuleName+"_return");
-                		Method[] methods = _treeReturn.getDeclaredMethods();
-			            for(Method method : methods) {
-			                if ( method.getName().equals("getTree") ) {
-			                	Method treeReturnName = _treeReturn.getMethod("getTree");
-		                    	CommonTree returnTree = (CommonTree) treeReturnName.invoke(treeRuleReturn);
-		                        astString = returnTree.toStringTree();
-			                }
-			                else if ( method.getName().equals("getTemplate") ) {
-			                	Method treeReturnName = _return.getMethod("getTemplate");
-			                	StringTemplate st = (StringTemplate) treeReturnName.invoke(treeRuleReturn);
-			                	stString = st.toString();
-			                }
-			            }
-                	}
-                	catch(Exception e) {
-                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
-                	}
-                }
-            }
-
-			this.stdout = null;
-			this.stderr = null;
-			
-			/** Invalid input */
-            if ( tokens.index()!=tokens.size()-1 ) {
-            	throw new InvalidInputException();
-            }
-			
-			// retVal could be actual return object from rule, stderr or stdout
-			if ( err.toString().length()>0 ) {
-				this.stderr = err.toString();
-				return this.stderr;
-			}
-			if ( out.toString().length()>0 ) {
-				this.stdout = out.toString();
-			}
-			if ( astString!=null ) {	// Return toStringTree of AST
-				return astString;
-			}
-			else if ( stString!=null ) {// Return toString of ST
-				return stString;
-			}
-			if ( treeRuleReturn!=null ) {
-				return treeRuleReturn;
-			}
-			if ( err.toString().length()==0 && out.toString().length()==0 ) {
-				return null;
-			}
-		} catch (ClassNotFoundException e) {
-			e.printStackTrace(); System.exit(1);
-		} catch (SecurityException e) {
-			e.printStackTrace(); System.exit(1);
-		} catch (NoSuchMethodException e) {
-			e.printStackTrace(); System.exit(1);
-		} catch (IllegalAccessException e) {
-			e.printStackTrace(); System.exit(1);
-		} catch (InvocationTargetException e) {
-			if ( e.getCause()!=null ) this.stderr = e.getCause().toString();
-			else this.stderr = e.toString();
-        	return this.stderr;
-		} finally {
-        	try {
-        		if ( ps!=null ) ps.close();
-    			if ( ps2!=null ) ps2.close();
-    			System.setOut(console);			// Reset standard output
-    			System.setErr(consoleErr);		// Reset standard err out
-        	} catch (Exception e) {
-        		e.printStackTrace();
-        	}
-        }
-		return stdout;
-	}
-	
-	// Modify the return value if the expected token type is OK or FAIL
-	public Object examineExecResult(int tokenType, Object retVal) {	
-		if ( tokenType==gUnitParser.OK ) {	// expected Token: OK
-			if ( this.stderr==null ) {
-				return "OK";
-			}
-			else {
-				return "FAIL, "+this.stderr;
-			}
-		}
-		else if ( tokenType==gUnitParser.FAIL ) {	// expected Token: FAIL
-			if ( this.stderr!=null ) {
-				return "FAIL";
-			}
-			else {
-				return "OK";
-			}
-		}
-		else {	// return the same object for the other token types
-			return retVal;
-		}		
-	}
-	
-}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitExecutor.java b/antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitExecutor.java
deleted file mode 100644
index 8498c65..0000000
--- a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitExecutor.java
+++ /dev/null
@@ -1,653 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2007-2008 Leon Jen-Yuan Su
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.gunit;
-
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.CommonTree;
-import org.antlr.runtime.tree.CommonTreeNodeStream;
-import org.antlr.runtime.tree.TreeAdaptor;
-import org.antlr.runtime.tree.TreeNodeStream;
-import org.antlr.stringtemplate.CommonGroupLoader;
-import org.antlr.stringtemplate.StringTemplate;
-import org.antlr.stringtemplate.StringTemplateGroup;
-import org.antlr.stringtemplate.StringTemplateGroupLoader;
-import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.List;
-
-public class gUnitExecutor implements ITestSuite {
-	public GrammarInfo grammarInfo;
-
-	private final ClassLoader grammarClassLoader;
-
-	private final String testsuiteDir;
-
-	public int numOfTest;
-
-	public int numOfSuccess;
-
-	public int numOfFailure;
-
-	private String title;
-
-	public int numOfInvalidInput;
-
-	private String parserName;
-
-	private String lexerName;
-
-	public List<AbstractTest> failures;
-	public List<AbstractTest> invalids;
-
-	private PrintStream console = System.out;
-    private PrintStream consoleErr = System.err;
-
-    public gUnitExecutor(GrammarInfo grammarInfo, String testsuiteDir) {
-    	this( grammarInfo, determineClassLoader(), testsuiteDir);
-    }
-
-    private static ClassLoader determineClassLoader() {
-    	ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    	if ( classLoader == null ) {
-    		classLoader = gUnitExecutor.class.getClassLoader();
-    	}
-    	return classLoader;
-    }
-
-	public gUnitExecutor(GrammarInfo grammarInfo, ClassLoader grammarClassLoader, String testsuiteDir) {
-		this.grammarInfo = grammarInfo;
-		this.grammarClassLoader = grammarClassLoader;
-		this.testsuiteDir = testsuiteDir;
-		numOfTest = 0;
-		numOfSuccess = 0;
-		numOfFailure = 0;
-		numOfInvalidInput = 0;
-		failures = new ArrayList<AbstractTest>();
-		invalids = new ArrayList<AbstractTest>();
-	}
-
-	protected ClassLoader getGrammarClassLoader() {
-		return grammarClassLoader;
-	}
-
-	protected final Class classForName(String name) throws ClassNotFoundException {
-		return getGrammarClassLoader().loadClass( name );
-	}
-
-	public String execTest() throws IOException{
-		// Set up string template for testing result
-		StringTemplate testResultST = getTemplateGroup().getInstanceOf("testResult");
-		try {
-			/** Set up appropriate path for parser/lexer if using package */
-			if (grammarInfo.getGrammarPackage()!=null ) {
-				parserName = grammarInfo.getGrammarPackage()+"."+grammarInfo.getGrammarName()+"Parser";
-				lexerName = grammarInfo.getGrammarPackage()+"."+grammarInfo.getGrammarName()+"Lexer";
-			}
-			else {
-				parserName = grammarInfo.getGrammarName()+"Parser";
-				lexerName = grammarInfo.getGrammarName()+"Lexer";
-			}
-
-			/*** Start Unit/Functional Testing ***/
-			// Execute unit test of for parser, lexer and tree grammar
-			if ( grammarInfo.getTreeGrammarName()!=null ) {
-				title = "executing testsuite for tree grammar:"+grammarInfo.getTreeGrammarName()+" walks "+parserName;
-			}
-			else {
-				title = "executing testsuite for grammar:"+grammarInfo.getGrammarName();
-			}
-			executeTests();
-			// End of exection of unit testing
-
-			// Fill in the template holes with the test results
-			testResultST.setAttribute("title", title);
-			testResultST.setAttribute("num_of_test", numOfTest);
-			testResultST.setAttribute("num_of_failure", numOfFailure);
-			if ( numOfFailure>0 ) {
-				testResultST.setAttribute("failure", failures);
-			}
-			if ( numOfInvalidInput>0 ) {
-				testResultST.setAttribute("has_invalid", true);
-				testResultST.setAttribute("num_of_invalid", numOfInvalidInput);
-				testResultST.setAttribute("invalid", invalids);
-			}
-		}
-		catch (Exception e) {
-            e.printStackTrace();
-            System.exit(1);
-        }
-		return testResultST.toString();
-	}
-
-	private StringTemplateGroup getTemplateGroup() {
-		StringTemplateGroupLoader loader = new CommonGroupLoader("org/antlr/gunit", null);
-		StringTemplateGroup.registerGroupLoader(loader);
-		StringTemplateGroup.registerDefaultLexer(AngleBracketTemplateLexer.class);
-		StringTemplateGroup group = StringTemplateGroup.loadGroup("gUnitTestResult");
-		return group;
-	}
-
-	// TODO: throw more specific exceptions
-	private gUnitTestResult runCorrectParser(String parserName, String lexerName, String rule, String lexicalRule, String treeRule, gUnitTestInput input) throws Exception
-	{
-		if ( lexicalRule!=null ) return runLexer(lexerName, lexicalRule, input);
-		else if ( treeRule!=null ) return runTreeParser(parserName, lexerName, rule, treeRule, input);
-		else return runParser(parserName, lexerName, rule, input);
-	}
-
-	private void executeTests() throws Exception {
-		for ( gUnitTestSuite ts: grammarInfo.getRuleTestSuites() ) {
-			String rule = ts.getRuleName();
-			String lexicalRule = ts.getLexicalRuleName();
-			String treeRule = ts.getTreeRuleName();
-			for ( gUnitTestInput input: ts.testSuites.keySet() ) {	// each rule may contain multiple tests
-				numOfTest++;
-				// Run parser, and get the return value or stdout or stderr if there is
-				gUnitTestResult result = null;
-				AbstractTest test = ts.testSuites.get(input);
-				try {
-					// TODO: create a -debug option to turn on logging, which shows progress of running tests
-					//System.out.print(numOfTest + ". Running rule: " + rule + "; input: '" + input.testInput + "'");
-					result = runCorrectParser(parserName, lexerName, rule, lexicalRule, treeRule, input);
-					// TODO: create a -debug option to turn on logging, which shows progress of running tests
-					//System.out.println("; Expecting " + test.getExpected() + "; Success?: " + test.getExpected().equals(test.getResult(result)));
-				} catch ( InvalidInputException e) {
-					numOfInvalidInput++;
-                    test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.line);
-					test.setActual(input.input);
-					invalids.add(test);
-					continue;
-				}	// TODO: ensure there's no other exceptions required to be handled here...
-
-				String expected = test.getExpected();
-				String actual = test.getResult(result);
-				test.setActual(actual);
-
-				if (actual == null) {
-					numOfFailure++;
-                    test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.line);
-					test.setActual("null");
-					failures.add(test);
-					onFail(test);
-				}
-				// the 2nd condition is used for the assertFAIL test of lexer rule because BooleanTest return err msg instead of 'FAIL' if isLexerTest
-				else if ( expected.equals(actual) || (expected.equals("FAIL")&&!actual.equals("OK") ) ) {
-					numOfSuccess++;
-					onPass(test);
-				}
-				// TODO: something with ACTIONS - at least create action test type and throw exception.
-				else if ( ts.testSuites.get(input).getType()==gUnitParser.ACTION ) {	// expected Token: ACTION
-					numOfFailure++;
-                    test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.line);
-					test.setActual("\t"+"{ACTION} is not supported in the grammarInfo yet...");
-					failures.add(test);
-					onFail(test);
-				}
-				else {
-					numOfFailure++;
-                    test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.line);
-					failures.add(test);
-					onFail(test);
-				}
-			}	// end of 2nd for-loop: tests for individual rule
-		}	// end of 1st for-loop: testsuites for grammar
-	}
-
-	// TODO: throw proper exceptions
-	protected gUnitTestResult runLexer(String lexerName, String testRuleName, gUnitTestInput testInput) throws Exception {
-		CharStream input;
-		Class lexer = null;
-		PrintStream ps = null;		// for redirecting stdout later
-		PrintStream ps2 = null;		// for redirecting stderr later
-		try {
-			/** Set up ANTLR input stream based on input source, file or String */
-			input = getANTLRInputStream(testInput);
-
-            /** Use Reflection to create instances of lexer and parser */
-        	lexer = classForName(lexerName);
-            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
-            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);
-            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args
-            Object lexObj = lexConstructor.newInstance(lexArgs);				// makes new instance of lexer
-
-            Method ruleName = lexer.getMethod("m"+testRuleName, new Class[0]);
-
-            /** Start of I/O Redirecting */
-            ByteArrayOutputStream out = new ByteArrayOutputStream();
-            ByteArrayOutputStream err = new ByteArrayOutputStream();
-            ps = new PrintStream(out);
-            ps2 = new PrintStream(err);
-            System.setOut(ps);
-            System.setErr(ps2);
-            /** End of redirecting */
-
-            /** Invoke lexer rule, and get the current index in CharStream */
-            ruleName.invoke(lexObj, new Object[0]);
-            Method ruleName2 = lexer.getMethod("getCharIndex", new Class[0]);
-            int currentIndex = (Integer) ruleName2.invoke(lexObj, new Object[0]);
-            if ( currentIndex!=input.size() ) {
-            	ps2.print("extra text found, '"+input.substring(currentIndex, input.size()-1)+"'");
-            }
-
-			if ( err.toString().length()>0 ) {
-				gUnitTestResult testResult = new gUnitTestResult(false, err.toString(), true);
-				testResult.setError(err.toString());
-				return testResult;
-			}
-			String stdout = null;
-			if ( out.toString().length()>0 ) {
-				stdout = out.toString();
-			}
-			return new gUnitTestResult(true, stdout, true);
-		} catch (IOException e) {
-			return getTestExceptionResult(e);
-        } catch (ClassNotFoundException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (SecurityException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (NoSuchMethodException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (IllegalArgumentException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (InstantiationException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (IllegalAccessException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (InvocationTargetException e) {	// This exception could be caused from ANTLR Runtime Exception, e.g. MismatchedTokenException
-        	return getTestExceptionResult(e);
-        } finally {
-        	try {
-        		if ( ps!=null ) ps.close();
-    			if ( ps2!=null ) ps2.close();
-    			System.setOut(console);			// Reset standard output
-    			System.setErr(consoleErr);		// Reset standard err out
-        	} catch (Exception e) {
-        		e.printStackTrace();
-        	}
-        }
-        // TODO: verify this:
-        throw new Exception("This should be unreachable?");
-	}
-
-	// TODO: throw proper exceptions
-	protected gUnitTestResult runParser(String parserName, String lexerName, String testRuleName, gUnitTestInput testInput) throws Exception {
-		CharStream input;
-		Class lexer = null;
-		Class parser = null;
-		PrintStream ps = null;		// for redirecting stdout later
-		PrintStream ps2 = null;		// for redirecting stderr later
-		try {
-			/** Set up ANTLR input stream based on input source, file or String */
-			input = getANTLRInputStream(testInput);
-
-            /** Use Reflection to create instances of lexer and parser */
-        	lexer = classForName(lexerName);
-            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
-            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);
-            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args
-            Object lexObj = lexConstructor.newInstance(lexArgs);				// makes new instance of lexer
-
-            CommonTokenStream tokens = new CommonTokenStream((Lexer) lexObj);
-
-            parser = classForName(parserName);
-            Class[] parArgTypes = new Class[]{TokenStream.class};				// assign type to parser's args
-            Constructor parConstructor = parser.getConstructor(parArgTypes);
-            Object[] parArgs = new Object[]{tokens};							// assign value to parser's args
-            Object parObj = parConstructor.newInstance(parArgs);				// makes new instance of parser
-
-            // set up customized tree adaptor if necessary
-            if ( grammarInfo.getAdaptor()!=null ) {
-            	parArgTypes = new Class[]{TreeAdaptor.class};
-            	Method _setTreeAdaptor = parser.getMethod("setTreeAdaptor", parArgTypes);
-            	Class _treeAdaptor = classForName(grammarInfo.getAdaptor());
-            	_setTreeAdaptor.invoke(parObj, _treeAdaptor.newInstance());
-            }
-
-            Method ruleName = parser.getMethod(testRuleName);
-
-            /** Start of I/O Redirecting */
-            ByteArrayOutputStream out = new ByteArrayOutputStream();
-            ByteArrayOutputStream err = new ByteArrayOutputStream();
-            ps = new PrintStream(out);
-            ps2 = new PrintStream(err);
-            System.setOut(ps);
-            System.setErr(ps2);
-            /** End of redirecting */
-
-            /** Invoke grammar rule, and store if there is a return value */
-            Object ruleReturn = ruleName.invoke(parObj);
-            String astString = null;
-            String stString = null;
-            /** If rule has return value, determine if it contains an AST or a ST */
-            if ( ruleReturn!=null ) {
-                if ( ruleReturn.getClass().toString().indexOf(testRuleName+"_return")>0 ) {
-                	try {	// NullPointerException may happen here...
-                		Class _return = classForName(parserName+"$"+testRuleName+"_return");
-                		Method[] methods = _return.getDeclaredMethods();
-                		for(Method method : methods) {
-			                if ( method.getName().equals("getTree") ) {
-			                	Method returnName = _return.getMethod("getTree");
-		                    	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
-		                    	astString = tree.toStringTree();
-			                }
-			                else if ( method.getName().equals("getTemplate") ) {
-			                	Method returnName = _return.getMethod("getTemplate");
-			                	StringTemplate st = (StringTemplate) returnName.invoke(ruleReturn);
-			                	stString = st.toString();
-			                }
-			            }
-                	}
-                	catch(Exception e) {
-                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
-                	}
-                }
-            }
-
-            /** Invalid input */
-            if ( tokens.index()!=tokens.size()-1 ) {
-            	//throw new InvalidInputException();
-            	ps2.print("Invalid input");
-            }
-
-			if ( err.toString().length()>0 ) {
-				gUnitTestResult testResult = new gUnitTestResult(false, err.toString());
-				testResult.setError(err.toString());
-				return testResult;
-			}
-			String stdout = null;
-			// TODO: need to deal with the case which has both ST return value and stdout
-			if ( out.toString().length()>0 ) {
-				stdout = out.toString();
-			}
-			if ( astString!=null ) {	// Return toStringTree of AST
-				return new gUnitTestResult(true, stdout, astString);
-			}
-			else if ( stString!=null ) {// Return toString of ST
-				return new gUnitTestResult(true, stdout, stString);
-			}
-
-			if ( ruleReturn!=null ) {
-				// TODO: currently only works for a single return with int or String value
-				return new gUnitTestResult(true, stdout, String.valueOf(ruleReturn));
-			}
-			return new gUnitTestResult(true, stdout, stdout);
-		} catch (IOException e) {
-			return getTestExceptionResult(e);
-		} catch (ClassNotFoundException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (SecurityException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (NoSuchMethodException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (IllegalArgumentException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (InstantiationException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (IllegalAccessException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (InvocationTargetException e) {	// This exception could be caused from ANTLR Runtime Exception, e.g. MismatchedTokenException
-        	return getTestExceptionResult(e);
-        } finally {
-        	try {
-        		if ( ps!=null ) ps.close();
-    			if ( ps2!=null ) ps2.close();
-    			System.setOut(console);			// Reset standard output
-    			System.setErr(consoleErr);		// Reset standard err out
-        	} catch (Exception e) {
-        		e.printStackTrace();
-        	}
-        }
-        // TODO: verify this:
-        throw new Exception("This should be unreachable?");
-	}
-
-	protected gUnitTestResult runTreeParser(String parserName, String lexerName, String testRuleName, String testTreeRuleName, gUnitTestInput testInput) throws Exception {
-		CharStream input;
-		String treeParserPath;
-		Class lexer = null;
-		Class parser = null;
-		Class treeParser = null;
-		PrintStream ps = null;		// for redirecting stdout later
-		PrintStream ps2 = null;		// for redirecting stderr later
-		try {
-			/** Set up ANTLR input stream based on input source, file or String */
-			input = getANTLRInputStream(testInput);
-
-			/** Set up appropriate path for tree parser if using package */
-			if ( grammarInfo.getGrammarPackage()!=null ) {
-				treeParserPath = grammarInfo.getGrammarPackage()+"."+grammarInfo.getTreeGrammarName();
-			}
-			else {
-				treeParserPath = grammarInfo.getTreeGrammarName();
-			}
-
-            /** Use Reflection to create instances of lexer and parser */
-        	lexer = classForName(lexerName);
-            Class[] lexArgTypes = new Class[]{CharStream.class};				// assign type to lexer's args
-            Constructor lexConstructor = lexer.getConstructor(lexArgTypes);
-            Object[] lexArgs = new Object[]{input};								// assign value to lexer's args
-            Object lexObj = lexConstructor.newInstance(lexArgs);				// makes new instance of lexer
-
-            CommonTokenStream tokens = new CommonTokenStream((Lexer) lexObj);
-
-            parser = classForName(parserName);
-            Class[] parArgTypes = new Class[]{TokenStream.class};				// assign type to parser's args
-            Constructor parConstructor = parser.getConstructor(parArgTypes);
-            Object[] parArgs = new Object[]{tokens};							// assign value to parser's args
-            Object parObj = parConstructor.newInstance(parArgs);				// makes new instance of parser
-
-            // set up customized tree adaptor if necessary
-            TreeAdaptor customTreeAdaptor = null;
-            if ( grammarInfo.getAdaptor()!=null ) {
-            	parArgTypes = new Class[]{TreeAdaptor.class};
-            	Method _setTreeAdaptor = parser.getMethod("setTreeAdaptor", parArgTypes);
-            	Class _treeAdaptor = classForName(grammarInfo.getAdaptor());
-            	customTreeAdaptor = (TreeAdaptor) _treeAdaptor.newInstance();
-            	_setTreeAdaptor.invoke(parObj, customTreeAdaptor);
-            }
-
-            Method ruleName = parser.getMethod(testRuleName);
-
-            /** Start of I/O Redirecting */
-            ByteArrayOutputStream out = new ByteArrayOutputStream();
-            ByteArrayOutputStream err = new ByteArrayOutputStream();
-            ps = new PrintStream(out);
-            ps2 = new PrintStream(err);
-            System.setOut(ps);
-            System.setErr(ps2);
-            /** End of redirecting */
-
-            /** Invoke grammar rule, and get the return value */
-            Object ruleReturn = ruleName.invoke(parObj);
-
-            Class _return = classForName(parserName+"$"+testRuleName+"_return");
-        	Method returnName = _return.getMethod("getTree");
-        	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
-
-        	// Walk resulting tree; create tree nodes stream first
-        	CommonTreeNodeStream nodes;
-        	if ( customTreeAdaptor!=null ) {
-        		nodes = new CommonTreeNodeStream(customTreeAdaptor, tree);
-        	}
-        	else {
-        		nodes = new CommonTreeNodeStream(tree);
-        	}
-        	// AST nodes have payload that point into token stream
-        	nodes.setTokenStream(tokens);
-        	// Create a tree walker attached to the nodes stream
-        	treeParser = classForName(treeParserPath);
-            Class[] treeParArgTypes = new Class[]{TreeNodeStream.class};		// assign type to tree parser's args
-            Constructor treeParConstructor = treeParser.getConstructor(treeParArgTypes);
-            Object[] treeParArgs = new Object[]{nodes};							// assign value to tree parser's args
-            Object treeParObj = treeParConstructor.newInstance(treeParArgs);	// makes new instance of tree parser
-        	// Invoke the tree rule, and store the return value if there is
-            Method treeRuleName = treeParser.getMethod(testTreeRuleName);
-            Object treeRuleReturn = treeRuleName.invoke(treeParObj);
-
-            String astString = null;
-            String stString = null;
-            /** If tree rule has return value, determine if it contains an AST or a ST */
-            if ( treeRuleReturn!=null ) {
-                if ( treeRuleReturn.getClass().toString().indexOf(testTreeRuleName+"_return")>0 ) {
-                	try {	// NullPointerException may happen here...
-                		Class _treeReturn = classForName(treeParserPath+"$"+testTreeRuleName+"_return");
-                		Method[] methods = _treeReturn.getDeclaredMethods();
-			            for(Method method : methods) {
-			                if ( method.getName().equals("getTree") ) {
-			                	Method treeReturnName = _treeReturn.getMethod("getTree");
-		                    	CommonTree returnTree = (CommonTree) treeReturnName.invoke(treeRuleReturn);
-		                        astString = returnTree.toStringTree();
-			                }
-			                else if ( method.getName().equals("getTemplate") ) {
-			                	Method treeReturnName = _return.getMethod("getTemplate");
-			                	StringTemplate st = (StringTemplate) treeReturnName.invoke(treeRuleReturn);
-			                	stString = st.toString();
-			                }
-			            }
-                	}
-                	catch(Exception e) {
-                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
-                	}
-                }
-            }
-
-            /** Invalid input */
-            if ( tokens.index()!=tokens.size()-1 ) {
-            	//throw new InvalidInputException();
-            	ps2.print("Invalid input");
-            }
-
-			if ( err.toString().length()>0 ) {
-				gUnitTestResult testResult = new gUnitTestResult(false, err.toString());
-				testResult.setError(err.toString());
-				return testResult;
-			}
-
-			String stdout = null;
-			// TODO: need to deal with the case which has both ST return value and stdout
-			if ( out.toString().length()>0 ) {
-				stdout = out.toString();
-			}
-			if ( astString!=null ) {	// Return toStringTree of AST
-				return new gUnitTestResult(true, stdout, astString);
-			}
-			else if ( stString!=null ) {// Return toString of ST
-				return new gUnitTestResult(true, stdout, stString);
-			}
-
-			if ( treeRuleReturn!=null ) {
-				// TODO: again, currently only works for a single return with int or String value
-				return new gUnitTestResult(true, stdout, String.valueOf(treeRuleReturn));
-			}
-			return new gUnitTestResult(true, stdout, stdout);
-		} catch (IOException e) {
-			return getTestExceptionResult(e);
-		} catch (ClassNotFoundException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (SecurityException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (NoSuchMethodException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (IllegalArgumentException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (InstantiationException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (IllegalAccessException e) {
-        	e.printStackTrace(); System.exit(1);
-        } catch (InvocationTargetException e) {	// note: This exception could be caused from ANTLR Runtime Exception...
-        	return getTestExceptionResult(e);
-        } finally {
-        	try {
-        		if ( ps!=null ) ps.close();
-    			if ( ps2!=null ) ps2.close();
-    			System.setOut(console);			// Reset standard output
-    			System.setErr(consoleErr);		// Reset standard err out
-        	} catch (Exception e) {
-        		e.printStackTrace();
-        	}
-        }
-        // TODO: verify this:
-        throw new Exception("Should not be reachable?");
-	}
-
-	// Create ANTLR input stream based on input source, file or String
-	private CharStream getANTLRInputStream(gUnitTestInput testInput) throws IOException {
-		CharStream input;
-		if ( testInput.isFile) {
-			String filePath = testInput.input;
-			File testInputFile = new File(filePath);
-			// if input test file is not found under the current dir, try to look for it from dir where the testsuite file locates
-			if ( !testInputFile.exists() ) {
-				testInputFile = new File(this.testsuiteDir, filePath);
-				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
-				// if still not found, also try to look for it under the package dir
-				else if ( grammarInfo.getGrammarPackage()!=null ) {
-					testInputFile = new File("."+File.separator+grammarInfo.getGrammarPackage().replace(".", File.separator), filePath);
-					if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
-				}
-			}
-			input = new ANTLRFileStream(filePath);
-		}
-		else {
-			input = new ANTLRStringStream(testInput.input);
-		}
-		return input;
-	}
-
-	// set up the cause of exception or the exception name into a gUnitTestResult instance
-	private gUnitTestResult getTestExceptionResult(Exception e) {
-		gUnitTestResult testResult;
-    	if ( e.getCause()!=null ) {
-    		testResult = new gUnitTestResult(false, e.getCause().toString(), true);
-    		testResult.setError(e.getCause().toString());
-    	}
-    	else {
-    		testResult = new gUnitTestResult(false, e.toString(), true);
-    		testResult.setError(e.toString());
-    	}
-    	return testResult;
-	}
-
-
-    public void onPass(ITestCase passTest) {
-
-    }
-
-    public void onFail(ITestCase failTest) {
-
-    }
-
-}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/TestCaseEditController.java b/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/TestCaseEditController.java
deleted file mode 100644
index 30cf0ae..0000000
--- a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/TestCaseEditController.java
+++ /dev/null
@@ -1,633 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2009 Shaoting Cai
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-package org.antlr.gunit.swingui;
-
-import org.antlr.gunit.swingui.model.*;
-import org.antlr.gunit.swingui.ImageFactory;
-import java.awt.*;
-import java.awt.event.*;
-import java.util.HashMap;
-import javax.swing.*;
-import javax.swing.event.*;
-
-/**
- *
- * @author scai
- */
-public class TestCaseEditController implements IController {
-
-    private JPanel view = new JPanel();
-
-    private JScrollPane scroll;
-    private JPanel paneDetail;
-    private AbstractEditorPane paneDetailInput, paneDetailOutput;
-    private JToolBar toolbar;
-    private JList listCases;
-    private ListModel listModel ;
-
-    public ActionListener onTestCaseNumberChange;
-
-    /* EDITORS */
-    private InputFileEditor editInputFile;
-    private InputStringEditor editInputString;
-    private InputMultiEditor editInputMulti;
-    private OutputResultEditor editOutputResult;
-    private OutputAstEditor editOutputAST;
-    private OutputStdEditor editOutputStd;
-    private OutputReturnEditor editOutputReturn;
-    
-    private JComboBox comboInputType, comboOutputType;
-
-    /* TYPE NAME */
-    private static final String IN_TYPE_STRING = "Single-line Text";
-    private static final String IN_TYPE_MULTI = "Multi-line Text";
-    private static final String IN_TYPE_FILE = "Disk File";
-    private static final String OUT_TYPE_BOOL = "OK or Fail";
-    private static final String OUT_TYPE_AST = "AST";
-    private static final String OUT_TYPE_STD = "Standard Output";
-    private static final String OUT_TYPE_RET = "Return Value";
-
-    private static final String DEFAULT_IN_SCRIPT = "";
-    private static final String DEFAULT_OUT_SCRIPT = "";
-
-    private static final Object[] INPUT_TYPE =  {
-        IN_TYPE_STRING, IN_TYPE_MULTI, IN_TYPE_FILE
-    };
-
-    private static final Object[] OUTPUT_TYPE = {
-        OUT_TYPE_BOOL, OUT_TYPE_AST, OUT_TYPE_STD, OUT_TYPE_RET
-    };
-
-    /* SIZE */
-    private static final int TEST_CASE_DETAIL_WIDTH = 300;
-    private static final int TEST_EDITOR_WIDTH = 280;
-    private static final int TEST_CASE_DETAIL_HEIGHT = 250;
-    private static final int TEST_EDITOR_HEIGHT = 120;
-
-    /* MODEL */
-    private Rule currentRule = null;
-    private TestCase currentTestCase = null;
-
-    /* END OF MODEL*/
-
-    private static final HashMap<Class, String> TypeNameTable;
-    static {
-        TypeNameTable = new HashMap<Class, String> ();
-        TypeNameTable.put(TestCaseInputString.class, IN_TYPE_STRING);
-        TypeNameTable.put(TestCaseInputMultiString.class, IN_TYPE_MULTI);
-        TypeNameTable.put(TestCaseInputFile.class, IN_TYPE_FILE);
-
-        TypeNameTable.put(TestCaseOutputResult.class, OUT_TYPE_BOOL);
-        TypeNameTable.put(TestCaseOutputAST.class, OUT_TYPE_AST);
-        TypeNameTable.put(TestCaseOutputStdOut.class, OUT_TYPE_STD);
-        TypeNameTable.put(TestCaseOutputReturn.class, OUT_TYPE_RET);
-    }
-
-    //private WorkSpaceView owner;
-
-    public TestCaseEditController(WorkSpaceView workspace) {
-        //this.owner = workspace;
-        initComponents();
-    }
-
-    public TestCaseEditController() {
-        initComponents();
-    }
-
-    public void OnLoadRule(Rule rule) {
-        if(rule == null) throw new IllegalArgumentException("Null");
-        this.currentRule = rule;
-        this.currentTestCase = null;
-        this.listModel = rule;
-        this.listCases.setModel(this.listModel);      
-    }
-
-    public void setCurrentTestCase(TestCase testCase) {
-        if(testCase == null) throw new IllegalArgumentException("Null");
-        this.listCases.setSelectedValue(testCase, true);
-        this.currentTestCase = testCase;
-    }
-
-    public Rule getCurrentRule() {
-        return this.currentRule;
-    }
-    
-    private void initComponents() {
-
-        /* CASE LIST */
-        listCases = new JList();
-        listCases.addListSelectionListener(new TestCaseListSelectionListener());
-        listCases.setCellRenderer(listRenderer);
-        listCases.setOpaque(false);
-        
-        scroll = new JScrollPane(listCases);
-        scroll.setBorder(BorderFactory.createTitledBorder(
-                BorderFactory.createEmptyBorder(), "Test Cases"));
-        scroll.setOpaque(false);
-        scroll.setViewportBorder(BorderFactory.createEtchedBorder());
-
-        /* CASE DETAIL */
-
-        editInputString = new InputStringEditor();
-        editInputMulti = new InputMultiEditor();
-        editInputFile = new InputFileEditor();
-
-        editOutputResult = new OutputResultEditor();
-        editOutputAST = new OutputAstEditor();
-        editOutputStd = new OutputStdEditor();
-        editOutputReturn = new OutputReturnEditor();
-        
-        paneDetail = new JPanel();
-        paneDetail.setBorder(BorderFactory.createEmptyBorder());
-        paneDetail.setOpaque(false);
-
-        comboInputType = new JComboBox(INPUT_TYPE);
-        comboInputType.addActionListener(new ActionListener() {
-            public void actionPerformed(ActionEvent event) {
-                OnInputTestCaseTypeChanged(comboInputType.getSelectedItem());
-            }
-        });
-        comboOutputType = new JComboBox(OUTPUT_TYPE);
-        comboOutputType.addActionListener(new ActionListener() {
-            public void actionPerformed(ActionEvent event) {
-                OnOutputTestCaseTypeChanged(comboOutputType.getSelectedItem());
-            }
-        });
-        paneDetailInput = new InputEditorPane(comboInputType);
-        paneDetailOutput = new OutputEditorPane(comboOutputType);
-
-        BoxLayout layout = new BoxLayout(paneDetail, BoxLayout.PAGE_AXIS);
-        paneDetail.setLayout(layout);
-        
-        paneDetail.add(this.paneDetailInput);
-        paneDetail.add(this.paneDetailOutput);
-
-        /* TOOLBAR */
-        toolbar = new JToolBar("Edit TestCases", JToolBar.VERTICAL);
-        toolbar.setFloatable(false);
-        toolbar.add(new AddTestCaseAction());
-        toolbar.add(new RemoveTestCaseAction());
-
-        /* COMPOSITE */
-        view.setLayout(new BorderLayout());
-        view.setBorder(BorderFactory.createEmptyBorder());
-        view.setOpaque(false);
-        view.add(toolbar, BorderLayout.WEST);
-        view.add(scroll, BorderLayout.CENTER);
-        view.add(paneDetail, BorderLayout.EAST);
-    }
-
-    private void updateInputEditor() {
-        JComponent editor = null;
-
-        if(currentTestCase != null ) {
-            ITestCaseInput input = this.currentTestCase.getInput();
-            if(input instanceof TestCaseInputString) {
-                this.editInputString.setText(input.getScript());
-                editor = this.editInputString;
-                comboInputType.setSelectedItem(IN_TYPE_STRING);
-            } else if(input instanceof TestCaseInputMultiString) {
-                this.editInputMulti.setText(input.getScript());
-                editor = this.editInputMulti.getView();
-                comboInputType.setSelectedItem(IN_TYPE_MULTI);
-            } else if(input instanceof TestCaseInputFile) {
-                this.editInputFile.setText(input.getScript());
-                editor = this.editInputFile;
-                comboInputType.setSelectedItem(IN_TYPE_FILE);
-            } else {
-                throw new Error("Wrong type");
-            }
-        }
-        
-        paneDetailInput.setEditor(editor);
-    }
-
-    private void updateOutputEditor() {
-        JComponent editor = null;
-        
-        if(currentTestCase != null) {
-            
-            ITestCaseOutput output = this.currentTestCase.getOutput();
-
-            if(output instanceof TestCaseOutputAST) {
-
-                this.editOutputAST.setText(output.getScript());
-                editor = this.editOutputAST.getView();
-                comboOutputType.setSelectedItem(OUT_TYPE_AST);
-
-            } else if(output instanceof TestCaseOutputResult) {
-
-                this.editOutputResult.setValue(output.getScript());
-                editor = this.editOutputResult;
-                comboOutputType.setSelectedItem(OUT_TYPE_BOOL);
-
-            } else if(output instanceof TestCaseOutputStdOut) {
-
-                this.editOutputStd.setText(output.getScript());
-                editor = this.editOutputStd.getView();
-                comboOutputType.setSelectedItem(OUT_TYPE_STD);
-
-            } else if(output instanceof TestCaseOutputReturn) {
-
-                this.editOutputReturn.setText(output.getScript());
-                editor = this.editOutputReturn.getView();
-                comboOutputType.setSelectedItem(OUT_TYPE_RET);
-
-            } else {
-
-                throw new Error("Wrong type");
-                
-            }
-
-        }
-        this.paneDetailOutput.setEditor(editor);
-    }
-
-    private void OnInputTestCaseTypeChanged(Object inputTypeStr) {
-        if(this.currentTestCase != null) {
-            ITestCaseInput input ;
-            if(inputTypeStr == IN_TYPE_STRING) {
-                input = new TestCaseInputString(DEFAULT_IN_SCRIPT);
-            } else if(inputTypeStr == IN_TYPE_MULTI) {
-                input = new TestCaseInputMultiString(DEFAULT_IN_SCRIPT);
-            } else if(inputTypeStr == IN_TYPE_FILE) {
-                input = new TestCaseInputFile(DEFAULT_IN_SCRIPT);
-            } else {
-                throw new Error("Wrong Type");
-            }
-
-            if(input.getClass().equals(this.currentTestCase.getInput().getClass()))
-                return ;
-
-            this.currentTestCase.setInput(input);
-        }
-        this.updateInputEditor();
-    }
-
-    private void OnOutputTestCaseTypeChanged(Object outputTypeStr) {
-        if(this.currentTestCase != null) {
-
-            ITestCaseOutput output ;
-            if(outputTypeStr == OUT_TYPE_AST) {
-                output = new TestCaseOutputAST(DEFAULT_OUT_SCRIPT);
-            } else if(outputTypeStr == OUT_TYPE_BOOL) {
-                output = new TestCaseOutputResult(false);
-            } else if(outputTypeStr == OUT_TYPE_STD) {
-                output = new TestCaseOutputStdOut(DEFAULT_OUT_SCRIPT);
-            } else if(outputTypeStr == OUT_TYPE_RET) {
-                output = new TestCaseOutputReturn(DEFAULT_OUT_SCRIPT);
-            } else {
-                throw new Error("Wrong Type");
-            }
-
-            if(output.getClass().equals(this.currentTestCase.getOutput().getClass()))
-                return ;
-
-            this.currentTestCase.setOutput(output);
-        }
-        this.updateOutputEditor();
-    }
-
-
-    private void OnTestCaseSelected(TestCase testCase) {
-        //if(testCase == null) throw new RuntimeException("Null TestCase");
-        this.currentTestCase = testCase;
-        updateInputEditor();
-        updateOutputEditor();
-
-    }
-
-    private void OnAddTestCase() {
-        if(currentRule == null) return;
-        
-        final TestCase newCase = new TestCase(
-                new TestCaseInputString(""),
-                new TestCaseOutputResult(true));
-        this.currentRule.addTestCase(newCase);
-        setCurrentTestCase(newCase);
-
-        this.listCases.setSelectedValue(newCase, true);
-        this.listCases.updateUI();
-        this.OnTestCaseSelected(newCase);
-        this.onTestCaseNumberChange.actionPerformed(null);
-    }
-
-    private void OnRemoveTestCase() {
-        if(currentTestCase == null) return;
-        currentRule.removeElement(currentTestCase);
-        listCases.updateUI();
-
-        final TestCase nextActiveCase = listCases.isSelectionEmpty() ?
-            null : (TestCase) listCases.getSelectedValue() ;
-        OnTestCaseSelected(nextActiveCase);
-        this.onTestCaseNumberChange.actionPerformed(null);
-    }
-
-    public Object getModel() {
-        return currentRule;
-    }
-
-    public Component getView() {
-        return view;
-    }
-
-    /* EDITOR CONTAINER */
-
-    abstract public class AbstractEditorPane extends JPanel {
-
-        private JComboBox combo;
-        private JComponent editor;
-        private String title;
-        private JLabel placeHolder = new JLabel();
-
-        public AbstractEditorPane(JComboBox comboBox, String title) {
-            this.combo = comboBox;
-            this.editor = placeHolder;
-            this.title = title;
-            this.initComponents();
-        }
-
-        private void initComponents() {
-            placeHolder.setPreferredSize(new Dimension(
-                    TEST_CASE_DETAIL_WIDTH, TEST_CASE_DETAIL_HEIGHT));
-            this.setLayout(new BoxLayout(this, BoxLayout.Y_AXIS));
-            this.add(combo, BorderLayout.NORTH);
-            this.add(editor, BorderLayout.CENTER);
-            this.setOpaque(false);
-            this.setBorder(BorderFactory.createTitledBorder(title));
-            this.setPreferredSize(new Dimension(
-                    TEST_CASE_DETAIL_WIDTH, TEST_CASE_DETAIL_HEIGHT));
-        }
-
-        public void setEditor(JComponent newEditor) {
-            if(newEditor == null) newEditor = placeHolder;
-            this.remove(editor);
-            this.add(newEditor);
-            this.editor = newEditor;
-            this.updateUI();
-        }
-    }
-
-    public class InputEditorPane extends AbstractEditorPane {
-        public InputEditorPane(JComboBox comboBox) {
-            super(comboBox, "Input");
-        }
-    }
-
-    public class OutputEditorPane extends AbstractEditorPane {
-        public OutputEditorPane(JComboBox comboBox) {
-            super(comboBox, "Output");
-        }
-    }
-
-    /* INPUT EDITORS */
-
-    public class InputStringEditor extends JTextField implements CaretListener {
-        public InputStringEditor() {
-            super();
-
-            this.setBorder(BorderFactory.createLineBorder(Color.LIGHT_GRAY));
-            this.addCaretListener(this);
-        }
-
-        public void caretUpdate(CaretEvent arg0) {
-            currentTestCase.getInput().setScript(getText());
-            listCases.updateUI();
-        }
-    }
-
-    public class InputMultiEditor implements CaretListener {
-        private JTextArea textArea = new JTextArea(20, 30);
-        private JScrollPane scroll = new JScrollPane(textArea,
-                JScrollPane.VERTICAL_SCROLLBAR_ALWAYS,
-                JScrollPane.HORIZONTAL_SCROLLBAR_ALWAYS);
-
-        public InputMultiEditor() {
-            super();
-            scroll.setBorder(BorderFactory.createLineBorder(Color.LIGHT_GRAY));
-            textArea.addCaretListener(this);
-        }
-
-        public void caretUpdate(CaretEvent arg0) {
-            currentTestCase.getInput().setScript(getText());
-            listCases.updateUI();
-        }
-
-        public String getText() {
-            return textArea.getText();
-        }
-
-        public void setText(String text) {
-            textArea.setText(text);
-        }
-
-        public JComponent getView() {
-            return scroll;
-        }
-    }
-
-    public class InputFileEditor extends InputStringEditor {};
-
-    public class OutputResultEditor extends JPanel implements ActionListener {
-        
-        private JToggleButton tbFail, tbOk;
-
-        public OutputResultEditor() {
-            super();
-
-            tbFail = new JToggleButton("Fail");
-            tbOk = new JToggleButton("OK");
-            ButtonGroup group = new ButtonGroup();
-            group.add(tbFail);
-            group.add(tbOk);
-
-            this.add(tbFail);
-            this.add(tbOk);
-
-            this.tbFail.addActionListener(this);
-            this.tbOk.addActionListener(this);
-
-            this.setPreferredSize(
-                    new Dimension(TEST_EDITOR_WIDTH, 100));
-        }
-
-        public void actionPerformed(ActionEvent e) {
-            TestCaseOutputResult output =
-                    (TestCaseOutputResult) currentTestCase.getOutput();
-
-            if(e.getSource() == tbFail) {
-                output.setScript(false);
-            } else {
-                output.setScript(true);
-            }
-
-            listCases.updateUI();
-        }
-
-        public void setValue(String value) {
-            if(TestCaseOutputResult.OK.equals(value)) {
-                this.tbOk.setSelected(true);
-            } else {
-                this.tbFail.setSelected(true);
-            }
-        }
-    }
-    
-
-    public class OutputAstEditor implements CaretListener {
-        private JTextArea textArea = new JTextArea(20, 30);
-        private JScrollPane scroll = new JScrollPane(textArea,
-                JScrollPane.VERTICAL_SCROLLBAR_ALWAYS,
-                JScrollPane.HORIZONTAL_SCROLLBAR_ALWAYS);
-
-        public OutputAstEditor() {
-            super();
-            scroll.setBorder(BorderFactory.createLineBorder(Color.LIGHT_GRAY));
-            textArea.addCaretListener(this);
-        }
-
-        public void caretUpdate(CaretEvent arg0) {
-            currentTestCase.getOutput().setScript(getText());
-            listCases.updateUI();
-        }
-
-        public void setText(String text) {
-            this.textArea.setText(text);
-        }
-
-        public String getText() {
-            return this.textArea.getText();
-        }
-
-        public JScrollPane getView() {
-            return this.scroll;
-        }
-    }
-
-
-    public class OutputStdEditor extends OutputAstEditor {}
-    public class OutputReturnEditor extends OutputAstEditor {}
-
-    /* EVENT HANDLERS */
-
-    private class TestCaseListSelectionListener implements ListSelectionListener {
-
-        public void valueChanged(ListSelectionEvent e) {
-            
-            if(e.getValueIsAdjusting()) return;
-            final JList list = (JList) e.getSource();
-            final TestCase value = (TestCase) list.getSelectedValue();
-            if(value != null) OnTestCaseSelected(value);
-            
-        }
-
-    }
-
-    /* ACTIONS */
-
-    private class AddTestCaseAction extends AbstractAction {
-        public AddTestCaseAction() {
-            super("Add", ImageFactory.getSingleton().ADD);
-            putValue(SHORT_DESCRIPTION, "Add a gUnit test case.");
-        }
-        public void actionPerformed(ActionEvent e) {
-            OnAddTestCase();
-        }
-    }
-
-    private class RemoveTestCaseAction extends AbstractAction {
-        public RemoveTestCaseAction() {
-            super("Remove", ImageFactory.getSingleton().DELETE);
-            putValue(SHORT_DESCRIPTION, "Remove a gUnit test case.");
-        }
-        public void actionPerformed(ActionEvent e) {
-            OnRemoveTestCase();
-        }
-    }
-
-    /* CELL RENDERERS */
-
-    private static TestCaseListRenderer listRenderer
-            = new TestCaseListRenderer();
-
-    private static class TestCaseListRenderer implements ListCellRenderer {
-
-        private static Font IN_FONT = new Font("mono", Font.PLAIN, 12);
-        private static Font OUT_FONT = new Font("default", Font.BOLD, 12);
-
-        public static String clamp(String text, int len) {
-            if(text.length() > len) {
-                return text.substring(0, len - 3).concat("...");
-            } else {
-                return text;
-            }
-        }
-
-        public static String clampAtNewLine(String text) {
-            int pos = text.indexOf('\n');
-            if(pos >= 0) {
-                return text.substring(0, pos).concat("...");
-            } else {
-                return text;
-            }
-        }
-
-        public Component getListCellRendererComponent(
-                JList list, Object value, int index,
-                boolean isSelected, boolean hasFocus) {
-
-            final JPanel pane = new JPanel();
-            
-            if (value instanceof TestCase) {
-                final TestCase item = (TestCase) value;
-
-                // create components
-                final JLabel labIn = new JLabel(
-                        clamp(clampAtNewLine(item.getInput().getScript()), 18));
-                final JLabel labOut = new JLabel(
-                        clamp(clampAtNewLine(item.getOutput().getScript()), 18));
-                labOut.setFont(OUT_FONT);
-                labIn.setFont(IN_FONT);
-
-                labIn.setIcon(item.getInput() instanceof TestCaseInputFile ?
-                    ImageFactory.getSingleton().FILE16 :
-                    ImageFactory.getSingleton().EDIT16);
-
-                pane.setBorder(BorderFactory.createEtchedBorder());
-                pane.setLayout(new BoxLayout(pane, BoxLayout.Y_AXIS));
-                pane.add(labIn);
-                pane.add(labOut);
-                pane.setBackground(isSelected ? Color.LIGHT_GRAY : Color.WHITE);
-            } 
-
-            return pane;
-        }
-    }
-
-}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuite.java b/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuite.java
deleted file mode 100644
index 06e5227..0000000
--- a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuite.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2009 Shaoting Cai
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.gunit.swingui.model;
-
-import java.io.*;
-import java.util.*;
-import org.antlr.runtime.*;
-
-public class TestSuite {
-
-    protected List<Rule> rules ;
-    protected String grammarName ;
-    protected CommonTokenStream tokens;
-    protected File testSuiteFile;      
-
-    protected TestSuite(String gname, File testFile) {
-        grammarName = gname;
-        testSuiteFile = testFile;
-        rules = new ArrayList<Rule>();
-    }
-    
-    /* Get the gUnit test suite file name. */
-    public File getTestSuiteFile() {
-        return testSuiteFile;
-    }       
-
-    public void addRule(Rule currentRule) {
-        if(currentRule == null) throw new IllegalArgumentException("Null rule");
-        rules.add(currentRule);
-    }
-
-    // test rule name
-    public boolean hasRule(Rule rule) {
-        for(Rule r: rules) {
-            if(r.getName().equals(rule.getName())) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    public int getRuleCount() {
-        return rules.size();
-    }
-    
-    public void setRules(List<Rule> newRules) {
-        rules.clear();
-        rules.addAll(newRules);
-    }
-
-    /* GETTERS AND SETTERS */
-
-    public void setGrammarName(String name) { grammarName = name;}
-
-    public String getGrammarName() { return grammarName; }
-
-    public Rule getRule(int index) { return rules.get(index); }
-
-    public CommonTokenStream getTokens() { return tokens; }
-    
-    public void setTokens(CommonTokenStream ts) { tokens = ts; }
-
-    public Rule getRule(String name) {
-        for(Rule rule: rules) {
-            if(rule.getName().equals(name)) {
-                return rule;
-            }
-        }
-        return null;
-    }
-    
-    // only for stringtemplate use
-    public List getRulesForStringTemplate() {return rules;}
-    
-}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/runner/ParserLoader.java b/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/runner/ParserLoader.java
deleted file mode 100644
index 23f5aa0..0000000
--- a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/runner/ParserLoader.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2009 Shaoting Cai
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.gunit.swingui.runner;
-
-import java.io.*;
-import java.util.HashMap;
-
-/**
- * Class loader for parser & lexer generated by antlr.
- * @author Shaoting
- */
-public class ParserLoader extends ClassLoader {
-
-    private HashMap<String, Class> classList;
-    private String grammar;
-
-    /**
-     * Create a class loader for antlr parser/lexer.
-     * @param grammarName
-     * @param classDir
-     */
-    public ParserLoader(String grammarName, String classDir) throws IOException, ClassNotFoundException {
-
-        final String lexerName = grammarName + "Lexer";
-
-        // load all the class files in the "classDir" related to the grammarName
-        File dir = new File(classDir);
-        if(dir.isDirectory()) {
-            classList = new HashMap<String, Class>();
-            grammar = grammarName;
-            File[] files = dir.listFiles(new ClassFilenameFilter(grammarName));
-            for(File f : files) {
-
-                // load class data
-                final InputStream in = new BufferedInputStream(new FileInputStream(f));
-                final byte[] classData = new byte[in.available()];
-                in.read(classData);
-                in.close();
-
-                // define class
-                final Class newClass = defineClass(null, classData, 0, classData.length);
-                assert(newClass != null);
-                resolveClass(newClass);
-
-                // save to hashtable
-                final String fileName = f.getName();
-                final String className = fileName.substring(0, fileName.lastIndexOf("."));
-                classList.put(className, newClass);
-                //System.out.println("adding: " + className);
-            }
-        } else {
-            throw new IOException(classDir + " is not a directory.");
-        }
-
-        if(classList.isEmpty() || !classList.containsKey(lexerName)) {
-            throw new ClassNotFoundException(lexerName + " not found.");
-        }
-
-    }
-
-
-
-    @Override
-    public synchronized Class loadClass(String name, boolean resolve) throws ClassNotFoundException {
-        //System.out.print("loading: " + name);
-        if(name.startsWith(grammar)) {
-            if(classList.containsKey(name)) {
-                //System.out.println(" .... found");
-                return classList.get(name);
-            } else {
-                //System.out.println(" .... not found");
-                throw new ClassNotFoundException(name);
-            }
-            
-        } else {
-            final Class c = findSystemClass(name);
-            //System.out.println(" .... system found " + c.getName());
-            return c;
-        }
-    }
-
-    /**
-     * Accepts grammarname...($...)?.class
-     */
-    protected static class ClassFilenameFilter implements FilenameFilter {
-
-        private String grammarName;
-
-        protected ClassFilenameFilter(String name) {
-            grammarName = name;
-        }
-
-        public boolean accept(File dir, String name) {
-            return name.startsWith(grammarName) && name.endsWith(".class");
-        }
-
-    }
-
-}
diff --git a/antlr-3.4/lib/antlr-3.4-complete.jar b/antlr-3.4/lib/antlr-3.4-complete.jar
deleted file mode 100644
index 9c985c7..0000000
--- a/antlr-3.4/lib/antlr-3.4-complete.jar
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/pom.xml b/antlr-3.4/pom.xml
deleted file mode 100644
index c2f9cc7..0000000
--- a/antlr-3.4/pom.xml
+++ /dev/null
@@ -1,311 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-
-    <!--
-        The ANTLR Maven artifacts are now released via the Sonotype OSS
-        repository, which means that they are synced to Maven central 
-        within a few minutes of hitting the release repo for Sonotype.
-        To enable this, we inherit from the Sonotype provided parent
-        pom. However, we must also configure our .m2/settings.xml to include
-        the snapshot and staging server and the sonotype password. This 
-        means that only ANTLR developers can released the artifacts, but
-        anyone can build locally.
-      -->
-    <parent>
-        <groupId>org.sonatype.oss</groupId>
-        <artifactId>oss-parent</artifactId>
-        <version>7</version>
-    </parent>    
-    
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>org.antlr</groupId>
-    <artifactId>antlr-master</artifactId>
-    <packaging>pom</packaging>
-    <version>3.4</version>
-    <name>ANTLR Master build control POM 3.4</name>
-    <url>http://maven.apache.org</url>
-
-
-
-  <!--
-    What version of ANTLR are we building? This sets the
-    the version number for all other things that are built
-    as part of an ANTLR release, unless they override or
-    ignore it. We do this via a properites file for this
-    pom.
-    -->
-
-  <!--
-     This is the master pom for building the ANTLR
-     toolset and runtime (Java) at the specific level
-     defined above. Hence we specify here the modules that
-     this pom will build when we build this pom in certain profiles.
-    -->
-
-  <!--
-    Make sure that the build is not platform dependent (I.E show that
-    all the files in the source tree are in UTF-8 format.
-    -->
-    <properties>
-        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    </properties>
-
-        <profiles>
-            <profile>
-                <id>standard</id>
-                <activation>
-                    <activeByDefault>true</activeByDefault>
-                </activation>
-                <modules>
-                    <module>runtime/Java</module>
-                    <module>tool</module>
-                    <module>antlr3-maven-plugin</module>
-                    <module>gunit</module>
-                    <module>gunit-maven-plugin</module>
-                    <module>antlr3-maven-archetype</module>
-                </modules>
-            </profile>
-
-            <!--
-                Activate this profile to build ONLY the Uber jar, which is the
-                ANTLR tool and its dependencies (no plugins etc).
-                
-                mvn -Duber -DskipTests package assembly:assembly
-                
-              -->
-            <profile>
-                <id>uber</id>
-                <activation>
-                    <property>
-                        <name>uber</name>
-                        <value>true</value>
-                    </property>
-                </activation>
-                <modules>
-                    <module>runtime/Java</module>
-                    <module>tool</module>
-                </modules>
-                <build>
-                    <plugins>
-                        <plugin>
-
-                        <!--
-
-                            Build an uber-jar for the ANTLR Tool that is packaged with all the other dependencies,
-                            such as the antlr-runtime and stringtemplate etc. This will be useful
-                            for developers, who then do not need to download anything else or
-                            remember that they need stringtemplate.jar in their CLASSPATH and so
-                            on.
-
-                            This does not preclude any of the module generated jars from
-                            being used on their own of course.
-
-                            Here, we also build a master source jar as I was unable to pursuade
-                            this plugin to use multiple configurations and not have the thing
-                            screw up because of multiple modules :-(
-
-                          -->
-
-                            <artifactId>maven-assembly-plugin</artifactId>
-                            <version>2.2.1</version>
-                            <!--
-                                Do not make the child modules build an assembly
-                              -->
-                            <inherited>false</inherited>
-
-                            <configuration>
-                                <descriptors>
-                                    <descriptor>antlrjar.xml</descriptor>
-                                    <descriptor>antlrsources.xml</descriptor>
-                                </descriptors>
-                                    <!--
-
-                                        Specify that we want the resulting jar to be executable
-                                        via java -jar, which we do by modifying the manifest
-                                        of course.
-                                      -->
-                                <archive>
-                                    <manifest>
-                                        <mainClass>org.antlr.Tool</mainClass>
-                                    </manifest>
-                                </archive>
-                            </configuration>
-
-                        </plugin>
-                    </plugins>
-                </build>
-            </profile>
-            
-            <profile>
-                <id>release-sign-artifacts</id>
-                <activation>
-                    <property>
-                        <name>deploy</name>
-                        <value>true</value>
-                    </property>
-                </activation>
-                <modules>
-                    <module>runtime/Java</module>
-                    <module>tool</module>
-                    <module>antlr3-maven-plugin</module>
-                    <module>gunit</module>
-                    <module>gunit-maven-plugin</module>
-                    <module>antlr3-maven-archetype</module>
-                </modules>
-                <build>
-                    <plugins>
-                        
-                        <plugin>
-                            <groupId>org.apache.maven.plugins</groupId>
-                            <artifactId>maven-gpg-plugin</artifactId>
-                            <version>1.3</version>
-                            <executions>
-                                <execution>
-                                    <id>sign-artifacts</id>
-                                    <phase>verify</phase>
-                                    <goals>
-                                       <goal>sign</goal>
-                                    </goals>
-                                </execution>
-                            </executions>
-                        </plugin>
-                        
-                        <plugin>
-                            <groupId>org.apache.maven.plugins</groupId>
-                            <artifactId>maven-javadoc-plugin</artifactId>
-                            <version>2.8</version>
-                            <executions>
-                                <execution>
-                                    <id>attach-javadocs</id>
-                                    <goals>
-                                        <goal>jar</goal>
-                                    </goals>
-                                </execution>
-                            </executions>
-                        </plugin>
-                        
-                        <plugin>
-                            <groupId>org.apache.maven.plugins</groupId>
-                            <artifactId>maven-source-plugin</artifactId>
-                            <version>2.1.2</version>
-                            <executions>
-                                <execution>
-                                    <id>attach-sources</id>
-                                    <goals>
-                                       <goal>jar</goal>
-                                    </goals>
-                                </execution>
-                            </executions>
-                        </plugin>
-                        
-                    </plugins>
-                </build>
-            </profile>
-            
-    </profiles>
-
-  <!--
-
-    Tell Maven which other artifacts we need in order to
-    build, run and test the ANTLR jars.
-    This is the master pom, and so it only contains those
-    dependencies that are common to all the modules below
-    or are just included for test
-    -->
-    <dependencyManagement>
-
-        <dependencies>
-
-            <dependency>
-                <groupId>junit</groupId>
-                <artifactId>junit</artifactId>
-                <version>4.8.2</version>
-                <scope>test</scope>
-            </dependency>
-
-
-        </dependencies>
-
-    </dependencyManagement>
-
-    <build>
-
-        <defaultGoal>install</defaultGoal>
-
-        <!--
-            The following filter definition means that both the master
-            project and the sub projects will read in a file in the same
-            directory as the pom.xml is located and set any properties
-            that are defined there in the standard x=y format. These
-            properties can then be referenced via ${x} in any resource
-            file specified in any pom. So, there is a master antlr.config
-            file in the same location as this pom.xml file and here you can
-            define anything that is relevant to all the modules that we
-            build here. However each module also has an antlr.config file
-            where you can override property values from the master file or
-            define things that are only relevant to that module.
-          -->
-        <filters>
-            <filter>antlr.config</filter>
-        </filters>
-
-        <resources>
-            <resource>
-                <directory>src/main/resources</directory>
-                <filtering>true</filtering>
-            </resource>
-        </resources>
-
-        <plugins>
-
-             <plugin>
-                <groupId>org.codehaus.mojo</groupId>
-                <artifactId>buildnumber-maven-plugin</artifactId>
-                <version>1.0-beta-2</version>
-                <configuration>
-                  <format>{0,date,MMM dd, yyyy} {0,time,kk:mm:ss}</format>
-                  <items>
-                    <item>timestamp</item>
-                  </items>
-                </configuration>
-                <executions>
-                  <execution>
-                    <phase>validate</phase>
-                    <goals>
-                      <goal>create</goal>
-                    </goals>
-                  </execution>
-                </executions>
-             </plugin>
-
-            <plugin>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>2.3.2</version>
-                <configuration>
-                    <source>1.6</source>
-                    <target>jsr14</target>
-                    <sourceDirectory>src</sourceDirectory>
-                </configuration>
-            </plugin>
-
-            <plugin>
-                <artifactId>maven-surefire-plugin</artifactId>
-                <version>2.9</version>
-            </plugin>
-
-            <plugin>
-                <groupId>org.codehaus.mojo</groupId>
-                <artifactId>findbugs-maven-plugin</artifactId>
-                <version>2.3.2</version>
-                <configuration>
-                    <findbugsXmlOutput>true</findbugsXmlOutput>
-                    <findbugsXmlWithMessages>true</findbugsXmlWithMessages>
-                    <xmlOutput>true</xmlOutput>
-                </configuration>
-            </plugin>
-            
-        </plugins>
-
-    </build>
-</project>
diff --git a/antlr-3.4/runtime/C/C.sln b/antlr-3.4/runtime/C/C.sln
deleted file mode 100644
index f841177..0000000
--- a/antlr-3.4/runtime/C/C.sln
+++ /dev/null
@@ -1,53 +0,0 @@
-Microsoft Visual Studio Solution File, Format Version 10.00
-# Visual Studio 2008
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "C", "C.vcproj", "{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}"
-EndProject
-Global
-	GlobalSection(SourceCodeControl) = preSolution
-		SccNumberOfProjects = 2
-		SccProjectName0 = Perforce\u0020Project
-		SccLocalPath0 = ..\\..
-		SccProvider0 = MSSCCI:Perforce\u0020SCM
-		SccProjectFilePathRelativizedFromConnection0 = runtime\\C\\
-		SccProjectUniqueName1 = C.vcproj
-		SccLocalPath1 = ..\\..
-		SccProjectFilePathRelativizedFromConnection1 = runtime\\C\\
-	EndGlobalSection
-	GlobalSection(SolutionConfigurationPlatforms) = preSolution
-		Debug|Win32 = Debug|Win32
-		Debug|x64 = Debug|x64
-		DebugDLL|Win32 = DebugDLL|Win32
-		DebugDLL|x64 = DebugDLL|x64
-		Deployment|Win32 = Deployment|Win32
-		Deployment|x64 = Deployment|x64
-		Release|Win32 = Release|Win32
-		Release|x64 = Release|x64
-		ReleaseDLL|Win32 = ReleaseDLL|Win32
-		ReleaseDLL|x64 = ReleaseDLL|x64
-	EndGlobalSection
-	GlobalSection(ProjectConfigurationPlatforms) = postSolution
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Debug|Win32.ActiveCfg = Debug|Win32
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Debug|Win32.Build.0 = Debug|Win32
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Debug|x64.ActiveCfg = Debug|x64
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Debug|x64.Build.0 = Debug|x64
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.DebugDLL|Win32.ActiveCfg = DebugDLL|Win32
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.DebugDLL|Win32.Build.0 = DebugDLL|Win32
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.DebugDLL|x64.ActiveCfg = DebugDLL|x64
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.DebugDLL|x64.Build.0 = DebugDLL|x64
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Deployment|Win32.ActiveCfg = DebugDLL|Win32
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Deployment|Win32.Build.0 = DebugDLL|Win32
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Deployment|x64.ActiveCfg = DebugDLL|x64
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Deployment|x64.Build.0 = DebugDLL|x64
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Release|Win32.ActiveCfg = Release|Win32
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Release|Win32.Build.0 = Release|Win32
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Release|x64.ActiveCfg = Release|x64
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Release|x64.Build.0 = Release|x64
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.ReleaseDLL|Win32.ActiveCfg = ReleaseDLL|Win32
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.ReleaseDLL|Win32.Build.0 = ReleaseDLL|Win32
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.ReleaseDLL|x64.ActiveCfg = ReleaseDLL|x64
-		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.ReleaseDLL|x64.Build.0 = ReleaseDLL|x64
-	EndGlobalSection
-	GlobalSection(SolutionProperties) = preSolution
-		HideSolutionNode = FALSE
-	EndGlobalSection
-EndGlobal
diff --git a/antlr-3.4/runtime/C/C.vcproj b/antlr-3.4/runtime/C/C.vcproj
deleted file mode 100644
index 16d9c2b..0000000
--- a/antlr-3.4/runtime/C/C.vcproj
+++ /dev/null
@@ -1,1043 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="9.00"
-	Name="C"
-	ProjectGUID="{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}"
-	RootNamespace="C"
-	SccProjectName="Perforce Project"
-	SccLocalPath="..\.."
-	SccProvider="MSSCCI:Perforce SCM"
-	Keyword="Win32Proj"
-	TargetFrameworkVersion="131072"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-		<Platform
-			Name="x64"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			OutputDirectory="Debug"
-			IntermediateDirectory="Debug"
-			ConfigurationType="4"
-			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
-			CharacterSet="2"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-				Optimization="0"
-				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;;&quot;$(SolutionDir)\..\..\include&quot;"
-				PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
-				StringPooling="true"
-				MinimalRebuild="false"
-				BasicRuntimeChecks="3"
-				RuntimeLibrary="3"
-				StructMemberAlignment="0"
-				EnableFunctionLevelLinking="true"
-				EnableEnhancedInstructionSet="0"
-				FloatingPointModel="0"
-				FloatingPointExceptions="true"
-				DisableLanguageExtensions="false"
-				UsePrecompiledHeader="0"
-				ExpandAttributedSource="true"
-				AssemblerOutput="2"
-				BrowseInformation="1"
-				WarningLevel="4"
-				WarnAsError="false"
-				Detect64BitPortabilityProblems="false"
-				DebugInformationFormat="3"
-				CallingConvention="0"
-				CompileAs="0"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				AdditionalDependencies="Ws2_32.lib"
-				OutputFile="$(OutDir)/antlr3cd.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Debug|x64"
-			OutputDirectory="$(PlatformName)\$(ConfigurationName)"
-			IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
-			ConfigurationType="4"
-			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
-			CharacterSet="2"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-				TargetEnvironment="3"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-				Optimization="0"
-				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;;&quot;$(SolutionDir)\..\..\include&quot;"
-				PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
-				StringPooling="true"
-				MinimalRebuild="false"
-				BasicRuntimeChecks="3"
-				RuntimeLibrary="3"
-				StructMemberAlignment="0"
-				EnableFunctionLevelLinking="true"
-				EnableEnhancedInstructionSet="0"
-				FloatingPointModel="0"
-				FloatingPointExceptions="true"
-				DisableLanguageExtensions="false"
-				UsePrecompiledHeader="0"
-				ExpandAttributedSource="true"
-				AssemblerOutput="2"
-				BrowseInformation="1"
-				WarningLevel="4"
-				WarnAsError="false"
-				Detect64BitPortabilityProblems="false"
-				DebugInformationFormat="3"
-				CallingConvention="0"
-				CompileAs="0"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				AdditionalDependencies="Ws2_32.lib"
-				OutputFile="$(OutDir)/antlr3cd.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			OutputDirectory="Release"
-			IntermediateDirectory="Release"
-			ConfigurationType="4"
-			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
-			CharacterSet="2"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-				Optimization="3"
-				InlineFunctionExpansion="2"
-				EnableIntrinsicFunctions="true"
-				FavorSizeOrSpeed="1"
-				OmitFramePointers="true"
-				EnableFiberSafeOptimizations="true"
-				WholeProgramOptimization="true"
-				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
-				PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
-				StringPooling="true"
-				ExceptionHandling="1"
-				RuntimeLibrary="2"
-				BufferSecurityCheck="false"
-				EnableEnhancedInstructionSet="2"
-				FloatingPointModel="2"
-				DisableLanguageExtensions="false"
-				RuntimeTypeInfo="false"
-				UsePrecompiledHeader="0"
-				AssemblerListingLocation=".\asm\release"
-				WarningLevel="4"
-				WarnAsError="true"
-				Detect64BitPortabilityProblems="false"
-				DebugInformationFormat="3"
-				CallingConvention="0"
-				CompileAs="1"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				AdditionalOptions="/LTCG"
-				AdditionalDependencies="Ws2_32.lib"
-				OutputFile="$(OutDir)/antlr3c.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-				ValidateIntelliSense="true"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|x64"
-			OutputDirectory="$(PlatformName)\$(ConfigurationName)"
-			IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
-			ConfigurationType="4"
-			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
-			CharacterSet="2"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-				TargetEnvironment="3"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-				Optimization="3"
-				InlineFunctionExpansion="2"
-				EnableIntrinsicFunctions="true"
-				FavorSizeOrSpeed="1"
-				OmitFramePointers="true"
-				EnableFiberSafeOptimizations="true"
-				WholeProgramOptimization="true"
-				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
-				PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
-				StringPooling="true"
-				ExceptionHandling="1"
-				RuntimeLibrary="2"
-				BufferSecurityCheck="false"
-				EnableEnhancedInstructionSet="0"
-				FloatingPointModel="2"
-				RuntimeTypeInfo="false"
-				UsePrecompiledHeader="0"
-				AssemblerListingLocation=".\asm\release"
-				WarningLevel="4"
-				WarnAsError="true"
-				Detect64BitPortabilityProblems="false"
-				DebugInformationFormat="3"
-				CallingConvention="0"
-				CompileAs="1"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-				AdditionalOptions="/LTCG"
-				AdditionalDependencies="Ws2_32.lib"
-				OutputFile="$(OutDir)/antlr3c.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-				ValidateIntelliSense="true"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="ReleaseDLL|Win32"
-			OutputDirectory="$(ConfigurationName)"
-			IntermediateDirectory="$(ConfigurationName)"
-			ConfigurationType="2"
-			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
-			CharacterSet="2"
-			WholeProgramOptimization="1"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-				Optimization="3"
-				InlineFunctionExpansion="2"
-				EnableIntrinsicFunctions="true"
-				FavorSizeOrSpeed="1"
-				OmitFramePointers="true"
-				EnableFiberSafeOptimizations="true"
-				WholeProgramOptimization="true"
-				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
-				PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
-				StringPooling="true"
-				ExceptionHandling="1"
-				RuntimeLibrary="2"
-				BufferSecurityCheck="false"
-				EnableEnhancedInstructionSet="2"
-				FloatingPointModel="2"
-				DisableLanguageExtensions="false"
-				RuntimeTypeInfo="false"
-				UsePrecompiledHeader="0"
-				AssemblerListingLocation=".\asm\release"
-				WarningLevel="4"
-				WarnAsError="true"
-				Detect64BitPortabilityProblems="false"
-				DebugInformationFormat="3"
-				CallingConvention="0"
-				CompileAs="0"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="Ws2_32.lib"
-				OutputFile="$(OutDir)\antlr3c.dll"
-				Version="3.1.1"
-				OptimizeReferences="2"
-				EnableCOMDATFolding="2"
-				RandomizedBaseAddress="1"
-				DataExecutionPrevention="0"
-				ImportLibrary="$(TargetDir)$(TargetName)_dll.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-				ValidateIntelliSense="true"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="ReleaseDLL|x64"
-			OutputDirectory="$(PlatformName)\$(ConfigurationName)"
-			IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
-			ConfigurationType="2"
-			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
-			CharacterSet="2"
-			WholeProgramOptimization="1"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-				TargetEnvironment="3"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-				Optimization="3"
-				InlineFunctionExpansion="2"
-				EnableIntrinsicFunctions="true"
-				FavorSizeOrSpeed="1"
-				OmitFramePointers="true"
-				EnableFiberSafeOptimizations="true"
-				WholeProgramOptimization="true"
-				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
-				PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
-				StringPooling="true"
-				ExceptionHandling="1"
-				RuntimeLibrary="2"
-				BufferSecurityCheck="false"
-				EnableEnhancedInstructionSet="0"
-				FloatingPointModel="2"
-				DisableLanguageExtensions="false"
-				RuntimeTypeInfo="false"
-				UsePrecompiledHeader="0"
-				AssemblerListingLocation=".\asm\release"
-				WarningLevel="4"
-				WarnAsError="true"
-				Detect64BitPortabilityProblems="false"
-				DebugInformationFormat="3"
-				CallingConvention="0"
-				CompileAs="0"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="Ws2_32.lib"
-				OutputFile="$(OutDir)\antlr3c64.dll"
-				Version="3.1.1"
-				OptimizeReferences="2"
-				EnableCOMDATFolding="2"
-				RandomizedBaseAddress="1"
-				DataExecutionPrevention="0"
-				ImportLibrary="$(TargetDir)$(TargetName)_dll.lib"
-				TargetMachine="17"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-				ValidateIntelliSense="true"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="DebugDLL|Win32"
-			OutputDirectory="$(ConfigurationName)"
-			IntermediateDirectory="$(ConfigurationName)"
-			ConfigurationType="2"
-			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
-			CharacterSet="2"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-				Outputs="$(TargetDir)$(TargetName)_dll.lib"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-				Optimization="0"
-				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
-				PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
-				StringPooling="true"
-				MinimalRebuild="false"
-				BasicRuntimeChecks="3"
-				RuntimeLibrary="3"
-				StructMemberAlignment="0"
-				EnableFunctionLevelLinking="true"
-				EnableEnhancedInstructionSet="0"
-				FloatingPointModel="0"
-				FloatingPointExceptions="true"
-				DisableLanguageExtensions="false"
-				UsePrecompiledHeader="0"
-				ExpandAttributedSource="true"
-				AssemblerOutput="2"
-				BrowseInformation="1"
-				WarningLevel="4"
-				WarnAsError="false"
-				Detect64BitPortabilityProblems="false"
-				DebugInformationFormat="3"
-				CallingConvention="0"
-				CompileAs="0"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="Ws2_32.lib"
-				OutputFile="$(OutDir)\antlr3cd.dll"
-				Version="3.1.1"
-				GenerateDebugInformation="true"
-				RandomizedBaseAddress="1"
-				DataExecutionPrevention="0"
-				ImportLibrary="$(TargetDir)$(TargetName)_dll.lib"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="DebugDLL|x64"
-			OutputDirectory="$(PlatformName)\$(ConfigurationName)"
-			IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
-			ConfigurationType="2"
-			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
-			CharacterSet="2"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-				Outputs="$(TargetDir)$(TargetName)_dll.lib"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-				TargetEnvironment="3"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-				Optimization="0"
-				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
-				PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
-				StringPooling="true"
-				MinimalRebuild="false"
-				BasicRuntimeChecks="3"
-				RuntimeLibrary="3"
-				StructMemberAlignment="0"
-				EnableFunctionLevelLinking="true"
-				EnableEnhancedInstructionSet="0"
-				FloatingPointModel="0"
-				FloatingPointExceptions="true"
-				DisableLanguageExtensions="false"
-				UsePrecompiledHeader="0"
-				ExpandAttributedSource="true"
-				AssemblerOutput="2"
-				BrowseInformation="1"
-				WarningLevel="4"
-				WarnAsError="false"
-				Detect64BitPortabilityProblems="false"
-				DebugInformationFormat="3"
-				CallingConvention="0"
-				CompileAs="0"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLinkerTool"
-				AdditionalDependencies="Ws2_32.lib"
-				OutputFile="$(OutDir)\antlr3c64d.dll"
-				Version="3.1.1"
-				GenerateDebugInformation="true"
-				RandomizedBaseAddress="1"
-				DataExecutionPrevention="0"
-				ImportLibrary="$(TargetDir)$(TargetName)_dll.lib"
-				TargetMachine="17"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCManifestTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCAppVerifierTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<Filter
-			Name="Source Files"
-			Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
-			UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
-			>
-			<File
-				RelativePath=".\src\antlr3baserecognizer.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3basetree.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3basetreeadaptor.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3bitset.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3collections.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3commontoken.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3commontree.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3commontreeadaptor.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3commontreenodestream.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3convertutf.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3cyclicdfa.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3debughandlers.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3encodings.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3exception.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3filestream.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3inputstream.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3intstream.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3lexer.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3parser.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3rewritestreams.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3string.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3tokenstream.c"
-				>
-			</File>
-			<File
-				RelativePath=".\src\antlr3treeparser.c"
-				>
-			</File>
-		</Filter>
-		<Filter
-			Name="Header Files"
-			Filter="h;hpp;hxx;hm;inl;inc;xsd"
-			UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"
-			>
-			<File
-				RelativePath=".\include\antlr3.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3baserecognizer.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3basetree.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3basetreeadaptor.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3bitset.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3collections.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3commontoken.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3commontree.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3commontreeadaptor.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3commontreenodestream.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3convertutf.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3cyclicdfa.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3debugeventlistener.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3defs.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3encodings.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3errors.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3exception.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3filestream.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3input.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3interfaces.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3intstream.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3lexer.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3memory.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3parser.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3parsetree.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3recognizersharedstate.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3rewritestreams.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3string.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3tokenstream.h"
-				>
-			</File>
-			<File
-				RelativePath=".\include\antlr3treeparser.h"
-				>
-			</File>
-		</Filter>
-		<Filter
-			Name="Templates"
-			Filter=".stg"
-			>
-			<File
-				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\AST.stg"
-				>
-			</File>
-			<File
-				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\ASTDbg.stg"
-				>
-			</File>
-			<File
-				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\ASTParser.stg"
-				>
-			</File>
-			<File
-				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\ASTTreeParser.stg"
-				>
-			</File>
-			<File
-				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\C.stg"
-				>
-			</File>
-			<File
-				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\Dbg.stg"
-				>
-			</File>
-		</Filter>
-		<Filter
-			Name="Java"
-			Filter="*.java"
-			>
-			<File
-				RelativePath="..\..\tool\src\main\java\org\antlr\codegen\CTarget.java"
-				>
-			</File>
-		</Filter>
-		<Filter
-			Name="Doxygen"
-			>
-			<File
-				RelativePath=".\doxygen\atsections.dox"
-				>
-			</File>
-			<File
-				RelativePath=".\doxygen\build.dox"
-				>
-			</File>
-			<File
-				RelativePath=".\doxygen\buildrec.dox"
-				>
-			</File>
-			<File
-				RelativePath=".\doxygen\changes31.dox"
-				>
-			</File>
-			<File
-				RelativePath=".\doxygen\doxygengroups.dox"
-				>
-			</File>
-			<File
-				RelativePath=".\doxygen\generate.dox"
-				>
-			</File>
-			<File
-				RelativePath=".\doxygen\interop.dox"
-				>
-			</File>
-			<File
-				RelativePath=".\doxygen\mainpage.dox"
-				>
-			</File>
-			<File
-				RelativePath=".\doxygen\runtime.dox"
-				>
-			</File>
-			<File
-				RelativePath=".\doxygen\using.dox"
-				>
-			</File>
-		</Filter>
-	</Files>
-	<Globals>
-		<Global
-			Name="DevPartner_IsInstrumented"
-			Value="0"
-		/>
-	</Globals>
-</VisualStudioProject>
diff --git a/antlr-3.4/runtime/C/dist/libantlr3c-3.4.tar.gz b/antlr-3.4/runtime/C/dist/libantlr3c-3.4.tar.gz
deleted file mode 100644
index daeb313..0000000
--- a/antlr-3.4/runtime/C/dist/libantlr3c-3.4.tar.gz
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/C/include/antlr3convertutf.h b/antlr-3.4/runtime/C/include/antlr3convertutf.h
deleted file mode 100644
index 79cc82c..0000000
--- a/antlr-3.4/runtime/C/include/antlr3convertutf.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2001-2004 Unicode, Inc.
- * 
- * Disclaimer
- * 
- * This source code is provided as is by Unicode, Inc. No claims are
- * made as to fitness for any particular purpose. No warranties of any
- * kind are expressed or implied. The recipient agrees to determine
- * applicability of information provided. If this file has been
- * purchased on magnetic or optical media from Unicode, Inc., the
- * sole remedy for any claim will be exchange of defective media
- * within 90 days of receipt.
- * 
- * Limitations on Rights to Redistribute This Code
- * 
- * Unicode, Inc. hereby grants the right to freely use the information
- * supplied in this file in the creation of products supporting the
- * Unicode Standard, and to make copies of this file in any form
- * for internal or external distribution as long as this notice
- * remains attached.
- */
-
-/* ---------------------------------------------------------------------
-
-    Conversions between UTF32, UTF-16, and UTF-8.  Header file.
-
-    Several functions are included here, forming a complete set of
-    conversions between the three formats.  UTF-7 is not included
-    here, but is handled in a separate source file.
-
-    Each of these routines takes pointers to input buffers and output
-    buffers.  The input buffers are const.
-
-    Each routine converts the text between *sourceStart and sourceEnd,
-    putting the result into the buffer between *targetStart and
-    targetEnd. Note: the end pointers are *after* the last item: e.g. 
-    *(sourceEnd - 1) is the last item.
-
-    The return result indicates whether the conversion was successful,
-    and if not, whether the problem was in the source or target buffers.
-    (Only the first encountered problem is indicated.)
-
-    After the conversion, *sourceStart and *targetStart are both
-    updated to point to the end of last text successfully converted in
-    the respective buffers.
-
-    Input parameters:
-	sourceStart - pointer to a pointer to the source buffer.
-		The contents of this are modified on return so that
-		it points at the next thing to be converted.
-	targetStart - similarly, pointer to pointer to the target buffer.
-	sourceEnd, targetEnd - respectively pointers to the ends of the
-		two buffers, for overflow checking only.
-
-    These conversion functions take a ConversionFlags argument. When this
-    flag is set to strict, both irregular sequences and isolated surrogates
-    will cause an error.  When the flag is set to lenient, both irregular
-    sequences and isolated surrogates are converted.
-
-    Whether the flag is strict or lenient, all illegal sequences will cause
-    an error return. This includes sequences such as: <F4 90 80 80>, <C0 80>,
-    or <A0> in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code
-    must check for illegal sequences.
-
-    When the flag is set to lenient, characters over 0x10FFFF are converted
-    to the replacement character; otherwise (when the flag is set to strict)
-    they constitute an error.
-
-    Output parameters:
-	The value "sourceIllegal" is returned from some routines if the input
-	sequence is malformed.  When "sourceIllegal" is returned, the source
-	value will point to the illegal value that caused the problem. E.g.,
-	in UTF-8 when a sequence is malformed, it points to the start of the
-	malformed sequence.  
-
-    Author: Mark E. Davis, 1994.
-    Rev History: Rick McGowan, fixes & updates May 2001.
-		 Fixes & updates, Sept 2001.
-
------------------------------------------------------------------------- */
-
-/* ---------------------------------------------------------------------
-    The following 4 definitions are compiler-specific.
-    The C standard does not guarantee that wchar_t has at least
-    16 bits, so wchar_t is no less portable than unsigned short!
-    All should be unsigned values to avoid sign extension during
-    bit mask & shift operations.
------------------------------------------------------------------------- */
-
-
-// Changes for ANTLR3 - Jim Idle, January 2008.
-// builtin types defined for Unicode types changed to
-// aliases for the types that are system determined by
-// ANTLR at compile time.
-//
-// typedef unsigned long	UTF32;	/* at least 32 bits */
-// typedef unsigned short	UTF16;	/* at least 16 bits */
-// typedef unsigned char	UTF8;	/* typically 8 bits */
-// typedef unsigned char	Boolean; /* 0 or 1 */
-
-#ifndef	_ANTLR3_CONVERTUTF_H
-#define	_ANTLR3_CONVERTUTF_H
-
-#include	<antlr3defs.h>
-
-typedef ANTLR3_UINT32	UTF32;	/* at least 32 bits */
-typedef ANTLR3_UINT16	UTF16;	/* at least 16 bits */
-typedef ANTLR3_UINT8	UTF8;	/* typically 8 bits */
-
-/* Some fundamental constants */
-#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
-#define UNI_MAX_BMP (UTF32)0x0000FFFF
-#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
-#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
-#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
-
-#define UNI_SUR_HIGH_START  (UTF32)0xD800
-#define UNI_SUR_HIGH_END    (UTF32)0xDBFF
-#define UNI_SUR_LOW_START   (UTF32)0xDC00
-#define UNI_SUR_LOW_END     (UTF32)0xDFFF
-#define false	            ANTLR3_FALSE
-#define true	            ANTLR3_TRUE
-#define halfShift           ((UTF32)10)
-#define halfBase            ((UTF32)0x0010000UL)
-#define halfMask            ((UTF32)0x3FFUL)
-
-typedef enum {
-	conversionOK, 		/* conversion successful */
-	sourceExhausted,	/* partial character in source, but hit end */
-	targetExhausted,	/* insuff. room in target for conversion */
-	sourceIllegal		/* source sequence is illegal/malformed */
-} ConversionResult;
-
-typedef enum {
-	strictConversion = 0,
-	lenientConversion
-} ConversionFlags;
-
-/* This is for C++ and does no harm in C */
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-ConversionResult ConvertUTF8toUTF16 (
-		const UTF8** sourceStart, const UTF8* sourceEnd, 
-		UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
-
-ConversionResult ConvertUTF16toUTF8 (
-		const UTF16** sourceStart, const UTF16* sourceEnd, 
-		UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
-		
-ConversionResult ConvertUTF8toUTF32 (
-		const UTF8** sourceStart, const UTF8* sourceEnd, 
-		UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
-
-ConversionResult ConvertUTF32toUTF8 (
-		const UTF32** sourceStart, const UTF32* sourceEnd, 
-		UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
-		
-ConversionResult ConvertUTF16toUTF32 (
-		const UTF16** sourceStart, const UTF16* sourceEnd, 
-		UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
-
-ConversionResult ConvertUTF32toUTF16 (
-		const UTF32** sourceStart, const UTF32* sourceEnd, 
-		UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
-
-ANTLR3_BOOLEAN isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-/* --------------------------------------------------------------------- */
diff --git a/antlr-3.4/runtime/C/include/antlr3debugeventlistener.h b/antlr-3.4/runtime/C/include/antlr3debugeventlistener.h
deleted file mode 100644
index c9cd6ce..0000000
--- a/antlr-3.4/runtime/C/include/antlr3debugeventlistener.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/**
- * \file
- * The definition of all debugging events that a recognizer can trigger.
- *
- * \remark
- *  From the java implementation by Terence Parr...
- *  I did not create a separate AST debugging interface as it would create
- *  lots of extra classes and DebugParser has a dbg var defined, which makes
- *  it hard to change to ASTDebugEventListener.  I looked hard at this issue
- *  and it is easier to understand as one monolithic event interface for all
- *  possible events.  Hopefully, adding ST debugging stuff won't be bad.  Leave
- *  for future. 4/26/2006.
- */
-
-#ifndef	ANTLR3_DEBUG_EVENT_LISTENER_H
-#define	ANTLR3_DEBUG_EVENT_LISTENER_H
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3defs.h>
-#include    <antlr3basetree.h>
-#include    <antlr3commontoken.h>
-
-
-/// Default debugging port
-///
-#define DEFAULT_DEBUGGER_PORT		0xBFCC;
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** The ANTLR3 debugging interface for communicating with ANLTR Works. Function comments
- *  mostly taken from the Java version.
- */
-typedef struct ANTLR3_DEBUG_EVENT_LISTENER_struct
-{
-	/// The port number which the debug listener should listen on for a connection
-	///
-	ANTLR3_UINT32		port;
-
-	/// The socket structure we receive after a successful accept on the serverSocket
-	///
-	SOCKET				socket;
-
-	/** The version of the debugging protocol supported by the providing
-	 *  instance of the debug event listener.
-	 */
-	int					PROTOCOL_VERSION;
-
-	/// The name of the grammar file that we are debugging
-	///
-	pANTLR3_STRING		grammarFileName;
-
-	/// Indicates whether we have already connected or not
-	///
-	ANTLR3_BOOLEAN		initialized;
-
-	/// Used to serialize the values of any particular token we need to
-	/// send back to the debugger.
-	///
-	pANTLR3_STRING		tokenString;
-
-
-	/// Allows the debug event system to access the adapter in use
-	/// by the recognizer, if this is a tree parser of some sort.
-	///
-	pANTLR3_BASE_TREE_ADAPTOR	adaptor;
-
-	/// Wait for a connection from the debugger and initiate the
-	/// debugging session.
-	///
-	ANTLR3_BOOLEAN	(*handshake)		(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-
-	/** The parser has just entered a rule.  No decision has been made about
-	 *  which alt is predicted.  This is fired AFTER init actions have been
-	 *  executed.  Attributes are defined and available etc...
-	 */
-	void			(*enterRule)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName);
-
-	/** Because rules can have lots of alternatives, it is very useful to
-	 *  know which alt you are entering.  This is 1..n for n alts.
-	 */
-	void			(*enterAlt)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int alt);
-
-	/** This is the last thing executed before leaving a rule.  It is
-	 *  executed even if an exception is thrown.  This is triggered after
-	 *  error reporting and recovery have occurred (unless the exception is
-	 *  not caught in this rule).  This implies an "exitAlt" event.
-	 */
-	void			(*exitRule)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName);
-
-	/** Track entry into any (...) subrule other EBNF construct 
-	 */
-	void			(*enterSubRule)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
-	
-	void			(*exitSubRule)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
-
-	/** Every decision, fixed k or arbitrary, has an enter/exit event
-	 *  so that a GUI can easily track what LT/consume events are
-	 *  associated with prediction.  You will see a single enter/exit
-	 *  subrule but multiple enter/exit decision events, one for each
-	 *  loop iteration.
-	 */
-	void			(*enterDecision)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
-
-	void			(*exitDecision)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
-
-	/** An input token was consumed; matched by any kind of element.
-	 *  Trigger after the token was matched by things like match(), matchAny().
-	 */
-	void			(*consumeToken)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t);
-
-	/** An off-channel input token was consumed.
-	 *  Trigger after the token was matched by things like match(), matchAny().
-	 *  (unless of course the hidden token is first stuff in the input stream).
-	 */
-	void			(*consumeHiddenToken)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t);
-
-	/** Somebody (anybody) looked ahead.  Note that this actually gets
-	 *  triggered by both LA and LT calls.  The debugger will want to know
-	 *  which Token object was examined.  Like consumeToken, this indicates
-	 *  what token was seen at that depth.  A remote debugger cannot look
-	 *  ahead into a file it doesn't have so LT events must pass the token
-	 *  even if the info is redundant.
-	 */
-	void			(*LT)				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_COMMON_TOKEN t);
-
-	/** The parser is going to look arbitrarily ahead; mark this location,
-	 *  the token stream's marker is sent in case you need it.
-	 */
-	void			(*mark)				(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker);
-
-	/** After an arbitrarily long lookahead as with a cyclic DFA (or with
-	 *  any backtrack), this informs the debugger that stream should be
-	 *  rewound to the position associated with marker.
-	 */
-	void			(*rewind)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker);
-
-	/** Rewind to the input position of the last marker.
-	 *  Used currently only after a cyclic DFA and just
-	 *  before starting a sem/syn predicate to get the
-	 *  input position back to the start of the decision.
-	 *  Do not "pop" the marker off the state.  mark(i)
-	 *  and rewind(i) should balance still.
-	 */
-	void			(*rewindLast)		(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-
-	void			(*beginBacktrack)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level);
-
-	void			(*endBacktrack)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level, ANTLR3_BOOLEAN successful);
-
-	/** To watch a parser move through the grammar, the parser needs to
-	 *  inform the debugger what line/charPos it is passing in the grammar.
-	 *  For now, this does not know how to switch from one grammar to the
-	 *  other and back for island grammars etc...
-	 *
-	 *  This should also allow breakpoints because the debugger can stop
-	 *  the parser whenever it hits this line/pos.
-	 */
-	void			(*location)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int line, int pos);
-
-	/** A recognition exception occurred such as NoViableAltException.  I made
-	 *  this a generic event so that I can alter the exception hierarchy later
-	 *  without having to alter all the debug objects.
-	 *
-	 *  Upon error, the stack of enter rule/subrule must be properly unwound.
-	 *  If no viable alt occurs it is within an enter/exit decision, which
-	 *  also must be rewound.  Even the rewind for each mark must be unwound.
-	 *  In the Java target this is pretty easy using try/finally, if a bit
-	 *  ugly in the generated code.  The rewind is generated in DFA.predict()
-	 *  actually so no code needs to be generated for that.  For languages
-	 *  w/o this "finally" feature (C++?), the target implementor will have
-	 *  to build an event stack or something.
-	 *
-	 *  Across a socket for remote debugging, only the RecognitionException
-	 *  data fields are transmitted.  The token object or whatever that
-	 *  caused the problem was the last object referenced by LT.  The
-	 *  immediately preceding LT event should hold the unexpected Token or
-	 *  char.
-	 *
-	 *  Here is a sample event trace for grammar:
-	 *
-	 *  b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
-     *    | D
-     *    ;
-     *
-	 *  The sequence for this rule (with no viable alt in the subrule) for
-	 *  input 'c c' (there are 3 tokens) is:
-	 *
-	 *		commence
-	 *		LT(1)
-	 *		enterRule b
-	 *		location 7 1
-	 *		enter decision 3
-	 *		LT(1)
-	 *		exit decision 3
-	 *		enterAlt1
-	 *		location 7 5
-	 *		LT(1)
-	 *		consumeToken [c/<4>,1:0]
-	 *		location 7 7
-	 *		enterSubRule 2
-	 *		enter decision 2
-	 *		LT(1)
-	 *		LT(1)
-	 *		recognitionException NoViableAltException 2 1 2
-	 *		exit decision 2
-	 *		exitSubRule 2
-	 *		beginResync
-	 *		LT(1)
-	 *		consumeToken [c/<4>,1:1]
-	 *		LT(1)
-	 *		endResync
-	 *		LT(-1)
-	 *		exitRule b
-	 *		terminate
-	 */
-	void			(*recognitionException)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_EXCEPTION e);
-
-	/** Indicates the recognizer is about to consume tokens to resynchronize
-	 *  the parser.  Any consume events from here until the recovered event
-	 *  are not part of the parse--they are dead tokens.
-	 */
-	void			(*beginResync)			(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-
-	/** Indicates that the recognizer has finished consuming tokens in order
-	 *  to resynchronize.  There may be multiple beginResync/endResync pairs
-	 *  before the recognizer comes out of errorRecovery mode (in which
-	 *  multiple errors are suppressed).  This will be useful
-	 *  in a gui where you want to probably grey out tokens that are consumed
-	 *  but not matched to anything in grammar.  Anything between
-	 *  a beginResync/endResync pair was tossed out by the parser.
-	 */
-	void			(*endResync)			(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-
-	/** A semantic predicate was evaluate with this result and action text 
-	*/
-	void			(*semanticPredicate)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_BOOLEAN result, const char * predicate);
-
-	/** Announce that parsing has begun.  Not technically useful except for
-	 *  sending events over a socket.  A GUI for example will launch a thread
-	 *  to connect and communicate with a remote parser.  The thread will want
-	 *  to notify the GUI when a connection is made.  ANTLR parsers
-	 *  trigger this upon entry to the first rule (the ruleLevel is used to
-	 *  figure this out).
-	 */
-	void			(*commence)				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-
-	/** Parsing is over; successfully or not.  Mostly useful for telling
-	 *  remote debugging listeners that it's time to quit.  When the rule
-	 *  invocation level goes to zero at the end of a rule, we are done
-	 *  parsing.
-	 */
-	void			(*terminate)			(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-
-	/// Retrieve acknowledge response from the debugger. in fact this
-	/// response is never used at the moment. So we just read whatever
-	/// is in the socket buffer and throw it away.
-	///
-	void			(*ack)					(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-
-	// T r e e  P a r s i n g
-
-	/** Input for a tree parser is an AST, but we know nothing for sure
-	 *  about a node except its type and text (obtained from the adaptor).
-	 *  This is the analog of the consumeToken method.  The ID is usually 
-	 *  the memory address of the node.
-	 *  If the type is UP or DOWN, then
-	 *  the ID is not really meaningful as it's fixed--there is
-	 *  just one UP node and one DOWN navigation node.
-	 *
-	 *  Note that unlike the Java version, the node type of the C parsers
-	 *  is always fixed as pANTLR3_BASE_TREE because all such structures
-	 *  contain a super pointer to their parent, which is generally COMMON_TREE and within
-	 *  that there is a super pointer that can point to a user type that encapsulates it.
-	 *  Almost akin to saying that it is an interface pointer except we don't need to
-	 *  know what the interface is in full, just those bits that are the base.
-	 * @param t
-	 */
-	void			(*consumeNode)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
-
-	/** The tree parser looked ahead.  If the type is UP or DOWN,
-	 *  then the ID is not really meaningful as it's fixed--there is
-	 *  just one UP node and one DOWN navigation node.
-	 */
-	void			(*LTT)					(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_BASE_TREE t);
-
-
-	// A S T  E v e n t s
-
-	/** A nil was created (even nil nodes have a unique ID...
-	 *  they are not "null" per se).  As of 4/28/2006, this
-	 *  seems to be uniquely triggered when starting a new subtree
-	 *  such as when entering a subrule in automatic mode and when
-	 *  building a tree in rewrite mode.
-     *
- 	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only t.ID is set.
-	 */
-	void			(*nilNode)				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
-
-	/** If a syntax error occurs, recognizers bracket the error
-	 *  with an error node if they are building ASTs. This event
-	 *  notifies the listener that this is the case
-	 */
-	void			(*errorNode)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
-
-	/** Announce a new node built from token elements such as type etc...
-	 * 
-	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only t.ID, type, text are
-	 *  set.
-	 */
-	void			(*createNode)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
-
-	/** Announce a new node built from an existing token.
-	 *
-	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only node.ID and token.tokenIndex
-	 *  are set.
-	 */
-	void			(*createNodeTok)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE node, pANTLR3_COMMON_TOKEN token);
-
-	/** Make a node the new root of an existing root.  See
-	 *
-	 *  Note: the newRootID parameter is possibly different
-	 *  than the TreeAdaptor.becomeRoot() newRoot parameter.
-	 *  In our case, it will always be the result of calling
-	 *  TreeAdaptor.becomeRoot() and not root_n or whatever.
-	 *
-	 *  The listener should assume that this event occurs
-	 *  only when the current subrule (or rule) subtree is
-	 *  being reset to newRootID.
-	 * 
-	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only IDs are set.
-	 *
-	 *  @see org.antlr.runtime.tree.TreeAdaptor.becomeRoot()
-	 */
-	void			(*becomeRoot)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE newRoot, pANTLR3_BASE_TREE oldRoot);
-
-	/** Make childID a child of rootID.
-	 *
-	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only IDs are set.
-	 * 
-	 *  @see org.antlr.runtime.tree.TreeAdaptor.addChild()
-	 */
-	void			(*addChild)				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE root, pANTLR3_BASE_TREE child);
-
-	/** Set the token start/stop token index for a subtree root or node.
-	 *
-	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only t.ID is set.
-	 */
-	void			(*setTokenBoundaries)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t, ANTLR3_MARKER tokenStartIndex, ANTLR3_MARKER tokenStopIndex);
-
-	/// Free up the resources allocated to this structure
-	///
-	void			(*free)					(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-
-}
-	ANTLR3_DEBUG_EVENT_LISTENER;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
diff --git a/antlr-3.4/runtime/C/include/antlr3defs.h b/antlr-3.4/runtime/C/include/antlr3defs.h
deleted file mode 100644
index 2435b02..0000000
--- a/antlr-3.4/runtime/C/include/antlr3defs.h
+++ /dev/null
@@ -1,632 +0,0 @@
-/** \file
- * Basic type and constant definitions for ANTLR3 Runtime.
- */
-#ifndef	_ANTLR3DEFS_H
-#define	_ANTLR3DEFS_H
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/* Following are for generated code, they are not referenced internally!!!
- */
-#if !defined(ANTLR3_HUGE) && !defined(ANTLR3_AVERAGE) && !defined(ANTLR3_SMALL)
-#define	ANTLR3_AVERAGE
-#endif
-
-#ifdef	ANTLR3_HUGE
-#ifndef	ANTLR3_SIZE_HINT
-#define	ANTLR3_SIZE_HINT        2049
-#endif
-#ifndef	ANTLR3_LIST_SIZE_HINT
-#define	ANTLR3_LIST_SIZE_HINT   127
-#endif
-#endif
-
-#ifdef	ANTLR3_AVERAGE
-#ifndef	ANTLR3_SIZE_HINT
-#define	ANTLR3_SIZE_HINT        1025
-#define	ANTLR3_LIST_SIZE_HINT   63
-#endif
-#endif
-
-#ifdef	ANTLR3_SMALL
-#ifndef	ANTLR3_SIZE_HINT
-#define	ANTLR3_SIZE_HINT        211
-#define	ANTLR3_LIST_SIZE_HINT   31
-#endif
-#endif
-
-// Definitions that indicate the encoding scheme character streams and strings etc
-//
-/// Indicates Big Endian for encodings where this makes sense
-///
-#define ANTLR3_BE           1
-
-/// Indicates Little Endian for encoidngs where this makes sense
-///
-#define ANTLR3_LE           2
-
-/// General latin-1 or other 8 bit encoding scheme such as straight ASCII
-///
-#define ANTLR3_ENC_8BIT     4
-
-/// UTF-8 encoding scheme
-///
-#define ANTLR3_ENC_UTF8     8
-
-/// UTF-16 encoding scheme (which also covers UCS2 as that does not have surrogates)
-///
-#define ANTLR3_ENC_UTF16        16
-#define ANTLR3_ENC_UTF16BE      16 + ANTLR3_BE
-#define ANTLR3_ENC_UTF16LE      16 + ANTLR3_LE
-
-/// UTF-32 encoding scheme (basically straight 32 bit)
-///
-#define ANTLR3_ENC_UTF32        32
-#define ANTLR3_ENC_UTF32BE      32 + ANTLR3_BE
-#define ANTLR3_ENC_UTF32LE      32 + ANTLR3_LE
-
-/// Input is 8 bit EBCDIC (which we convert to 8 bit ASCII on the fly
-///
-#define ANTLR3_ENC_EBCDIC       64
-
-/* Common definitions come first
- */
-#include    <antlr3errors.h>
-
-/* Work out what operating system/compiler this is. We just do this once
- * here and use an internal symbol after this.
- */
-#ifdef	_WIN64
-
-# ifndef	ANTLR3_WINDOWS
-#   define	ANTLR3_WINDOWS
-# endif
-# define	ANTLR3_WIN64
-# define	ANTLR3_USE_64BIT
-
-#else
-
-#ifdef	_WIN32
-# ifndef	ANTLR3_WINDOWS
-#  define	ANTLR3_WINDOWS
-# endif
-
-#define	ANTLR3_WIN32
-#endif
-
-#endif
-
-#ifdef	ANTLR3_WINDOWS 
-
-#ifndef WIN32_LEAN_AND_MEAN
-#define	WIN32_LEAN_AND_MEAN
-#endif
-
-/* Allow VC 8 (vs2005) and above to use 'secure' versions of various functions such as sprintf
- */
-#ifndef	_CRT_SECURE_NO_DEPRECATE 
-#define	_CRT_SECURE_NO_DEPRECATE 
-#endif
-
-#include    <windows.h>
-#include    <stdlib.h>
-#include    <winsock.h>
-#include    <stdio.h>
-#include    <sys/types.h>
-#include    <sys/stat.h>
-#include    <stdarg.h>
-
-#define	ANTLR3_API      __declspec(dllexport)
-#define	ANTLR3_CDECL    __cdecl
-#define ANTLR3_FASTCALL __fastcall
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef __MINGW32__
-// Standard Windows types
-//
-typedef	INT32	ANTLR3_CHAR,	*pANTLR3_CHAR;
-typedef	UINT32	ANTLR3_UCHAR,	*pANTLR3_UCHAR;
-
-typedef	INT8	ANTLR3_INT8,	*pANTLR3_INT8;
-typedef	INT16	ANTLR3_INT16,	*pANTLR3_INT16;
-typedef	INT32	ANTLR3_INT32,	*pANTLR3_INT32;
-typedef	INT64	ANTLR3_INT64,	*pANTLR3_INT64;
-typedef	UINT8	ANTLR3_UINT8,	*pANTLR3_UINT8;
-typedef	UINT16	ANTLR3_UINT16,	*pANTLR3_UINT16;
-typedef	UINT32	ANTLR3_UINT32,	*pANTLR3_UINT32;
-typedef	UINT64	ANTLR3_UINT64,	*pANTLR3_UINT64;
-typedef UINT64  ANTLR3_BITWORD, *pANTLR3_BITWORD;
-typedef	UINT8	ANTLR3_BOOLEAN, *pANTLR3_BOOLEAN;
-
-#else
-// Mingw uses stdint.h and fails to define standard Microsoft typedefs
-// such as UINT16, hence we must use stdint.h for Mingw.
-//
-#include <stdint.h>
-typedef int32_t     ANTLR3_CHAR,    *pANTLR3_CHAR;
-typedef uint32_t    ANTLR3_UCHAR,   *pANTLR3_UCHAR;
-
-typedef int8_t	    ANTLR3_INT8,    *pANTLR3_INT8;
-typedef int16_t	    ANTLR3_INT16,   *pANTLR3_INT16;
-typedef int32_t	    ANTLR3_INT32,   *pANTLR3_INT32;
-typedef int64_t	    ANTLR3_INT64,   *pANTLR3_INT64;
-
-typedef uint8_t	    ANTLR3_UINT8,   *pANTLR3_UINT8;
-typedef uint16_t    ANTLR3_UINT16,  *pANTLR3_UINT16;
-typedef uint32_t    ANTLR3_UINT32,  *pANTLR3_UINT32;
-typedef uint64_t    ANTLR3_UINT64,  *pANTLR3_UINT64;
-typedef uint64_t    ANTLR3_BITWORD, *pANTLR3_BITWORD;
-
-typedef	uint8_t	    ANTLR3_BOOLEAN, *pANTLR3_BOOLEAN;
-
-#endif
-
-
-
-#define	ANTLR3_UINT64_LIT(lit)  lit##ULL
-
-#define	ANTLR3_INLINE	        __inline
-
-typedef FILE *	    ANTLR3_FDSC;
-typedef	struct stat ANTLR3_FSTAT_STRUCT;
-
-#ifdef	ANTLR3_USE_64BIT
-#define	ANTLR3_FUNC_PTR(ptr)	(void *)((ANTLR3_UINT64)(ptr))
-#define ANTLR3_UINT64_CAST(ptr) (ANTLR3_UINT64)(ptr))
-#define	ANTLR3_UINT32_CAST(ptr)	(ANTLR3_UINT32)((ANTLR3_UINT64)(ptr))
-typedef ANTLR3_INT64		ANTLR3_MARKER;			
-typedef ANTLR3_UINT64		ANTLR3_INTKEY;
-#else
-#define	ANTLR3_FUNC_PTR(ptr)	(void *)((ANTLR3_UINT32)(ptr))
-#define ANTLR3_UINT64_CAST(ptr) (ANTLR3_UINT64)((ANTLR3_UINT32)(ptr))
-#define	ANTLR3_UINT32_CAST(ptr)	(ANTLR3_UINT32)(ptr)
-typedef	ANTLR3_INT32		ANTLR3_MARKER;
-typedef ANTLR3_UINT32		ANTLR3_INTKEY;
-#endif
-
-#ifdef	ANTLR3_WIN32
-#endif
-
-#ifdef	ANTLR3_WIN64
-#endif
-
-
-typedef	int			ANTLR3_SALENT;								// Type used for size of accept structure
-typedef struct sockaddr_in	ANTLR3_SOCKADDRT, * pANTLR3_SOCKADDRT;	// Type used for socket address declaration
-typedef struct sockaddr		ANTLR3_SOCKADDRC, * pANTLR3_SOCKADDRC;	// Type used for cast on accept()
-
-#define	ANTLR3_CLOSESOCKET	closesocket
-
-#ifdef __cplusplus
-}
-#endif
-
-/* Warnings that are over-zealous such as complaining about strdup, we
- * can turn off.
- */
-
-/* Don't complain about "deprecated" functions such as strdup
- */
-#pragma warning( disable : 4996 )
-
-#else
-
-/* Include configure generated header file
- */
-#include	<antlr3config.h>
-
-#include <stdio.h>
-
-#if HAVE_STDINT_H
-# include <stdint.h>
-#endif
-
-#if HAVE_SYS_TYPES_H
-# include <sys/types.h>
-#endif
-
-#if HAVE_SYS_STAT_H
-# include <sys/stat.h>
-#endif
-
-#if STDC_HEADERS
-# include   <stdlib.h>
-# include   <stddef.h>
-# include   <stdarg.h>
-#else
-# if HAVE_STDLIB_H
-#  include  <stdlib.h>
-# endif
-# if HAVE_STDARG_H
-#  include  <stdarg.h>
-# endif
-#endif
-
-#if HAVE_STRING_H
-# if !STDC_HEADERS && HAVE_MEMORY_H
-#  include <memory.h>
-# endif
-# include <string.h>
-#endif
-
-#if HAVE_STRINGS_H
-# include <strings.h>
-#endif
-
-#if HAVE_INTTYPES_H
-# include <inttypes.h>
-#endif
-
-#if HAVE_UNISTD_H
-# include <unistd.h>
-#endif
-
-#ifdef HAVE_NETINET_IN_H
-#include	<netinet/in.h>
-#endif
-
-#ifdef HAVE_SOCKET_H
-# include	<socket.h>
-#else
-# if HAVE_SYS_SOCKET_H
-#  include	<sys/socket.h>
-# endif
-#endif
-
-#ifdef HAVE_NETINET_TCP_H
-#include	<netinet/tcp.h>
-#endif
-
-#ifdef HAVE_ARPA_NAMESER_H
-#include <arpa/nameser.h> /* DNS HEADER struct */
-#endif
-
-#ifdef HAVE_NETDB_H
-#include <netdb.h>
-#endif
-
-
-#ifdef HAVE_SYS_RESOLVE_H
-#include	<sys/resolv.h>
-#endif
-
-#ifdef HAVE_RESOLVE_H
-#include	<resolv.h>
-#endif
-
-
-#ifdef	HAVE_MALLOC_H
-# include    <malloc.h>
-#else
-# ifdef	HAVE_SYS_MALLOC_H
-#  include    <sys/malloc.h>
-# endif
-#endif
-
-#ifdef  HAVE_CTYPE_H
-# include   <ctype.h>
-#endif
-
-/* Some platforms define a macro, index() in string.h. AIX is
- * one of these for instance. We must get rid of that definition
- * as we use ->index all over the place. defining macros like this in system header
- * files is a really bad idea, but I doubt that IBM will listen to me ;-)
- */
-#ifdef	index
-#undef	index
-#endif
-
-#define _stat   stat
-
-// SOCKET not defined on Unix
-// 
-typedef	int	SOCKET;
-
-#define ANTLR3_API
-#define	ANTLR3_CDECL
-#define ANTLR3_FASTCALL
-
-#ifdef	__hpux
-
- // HPUX is always different usually for no good reason. Tru64 should have kicked it
- // into touch and everyone knows it ;-)
- //
- typedef struct sockaddr_in ANTLR3_SOCKADDRT, * pANTLR3_SOCKADDRT;	// Type used for socket address declaration
- typedef void *		    pANTLR3_SOCKADDRC;				// Type used for cast on accept()
- typedef int		    ANTLR3_SALENT;
-
-#else
-
-# if defined(_AIX) || __GNUC__ > 3 
-
-   typedef  socklen_t   ANTLR3_SALENT;
-
-# else
-
-   typedef  size_t	ANTLR3_SALENT;
-
-# endif
-
-   typedef struct sockaddr_in   ANTLR3_SOCKADDRT, * pANTLR3_SOCKADDRT;	// Type used for socket address declaration
-   typedef struct sockaddr	* pANTLR3_SOCKADDRC;                    // Type used for cast on accept()
-
-#endif
-
-#define INVALID_SOCKET      ((SOCKET)-1)
-#define	ANTLR3_CLOSESOCKET  close
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Inherit type definitions for autoconf
- */
-typedef int32_t	    ANTLR3_CHAR,    *pANTLR3_CHAR;
-typedef uint32_t    ANTLR3_UCHAR,   *pANTLR3_UCHAR;
-
-typedef int8_t	    ANTLR3_INT8,    *pANTLR3_INT8;
-typedef int16_t	    ANTLR3_INT16,   *pANTLR3_INT16;
-typedef int32_t	    ANTLR3_INT32,   *pANTLR3_INT32;
-typedef int64_t	    ANTLR3_INT64,   *pANTLR3_INT64;
-
-typedef uint8_t	    ANTLR3_UINT8,   *pANTLR3_UINT8;
-typedef uint16_t    ANTLR3_UINT16,  *pANTLR3_UINT16;
-typedef uint32_t    ANTLR3_UINT32,  *pANTLR3_UINT32;
-typedef uint64_t    ANTLR3_UINT64,  *pANTLR3_UINT64;
-typedef uint64_t    ANTLR3_BITWORD, *pANTLR3_BITWORD;
-
-typedef uint32_t    ANTLR3_BOOLEAN, *pANTLR3_BOOLEAN;
-
-#define ANTLR3_INLINE   inline
-#define	ANTLR3_API
-
-typedef FILE *	    ANTLR3_FDSC;
-typedef	struct stat ANTLR3_FSTAT_STRUCT;
-
-#ifdef	ANTLR3_USE_64BIT
-#define	ANTLR3_FUNC_PTR(ptr)    (void *)((ANTLR3_UINT64)(ptr))
-#define ANTLR3_UINT64_CAST(ptr)	(ANTLR3_UINT64)(ptr))
-#define	ANTLR3_UINT32_CAST(ptr) (ANTLR3_UINT32)((ANTLR3_UINT64)(ptr))
-typedef ANTLR3_INT64		ANTLR3_MARKER;
-typedef ANTLR3_UINT64		ANTLR3_INTKEY;
-#else
-#define	ANTLR3_FUNC_PTR(ptr)	(void *)((ANTLR3_UINT32)(ptr))
-#define ANTLR3_UINT64_CAST(ptr) (ANTLR3_UINT64)((ANTLR3_UINT32)(ptr))
-#define	ANTLR3_UINT32_CAST(ptr)	(ANTLR3_UINT32)(ptr)
-typedef	ANTLR3_INT32		ANTLR3_MARKER;
-typedef ANTLR3_UINT32		ANTLR3_INTKEY;
-#endif
-#define	ANTLR3_UINT64_LIT(lit)	lit##ULL
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-#ifdef ANTLR3_USE_64BIT
-#define ANTLR3_TRIE_DEPTH 63
-#else
-#define ANTLR3_TRIE_DEPTH 31
-#endif
-/* Pre declare the typedefs for all the interfaces, then 
- * they can be inter-dependant and we will let the linker
- * sort it out for us.
- */
-#include    <antlr3interfaces.h>
-
-// Include the unicode.org conversion library header.
-//
-#include    <antlr3convertutf.h>
-
-/* Prototypes
- */
-#ifndef ANTLR3_MALLOC
-/// Default definition of ANTLR3_MALLOC. You can override this before including
-/// antlr3.h if you wish to use your own implementation.
-///
-#define	ANTLR3_MALLOC(request)          malloc  ((size_t)(request))
-#endif
-
-#ifndef ANTLR3_CALLOC
-/// Default definition of ANTLR3_CALLOC. You can override this before including
-/// antlr3.h if you wish to use your own implementation.
-///
-#define	ANTLR3_CALLOC(numEl, elSize)    calloc  (numEl, (size_t)(elSize))
-#endif
-
-#ifndef ANTLR3_REALLOC
-/// Default definition of ANTLR3_REALLOC. You can override this before including
-/// antlr3.h if you wish to use your own implementation.
-///
-#define ANTLR3_REALLOC(current, request)    realloc ((void *)(current), (size_t)(request))
-#endif
-#ifndef ANTLR3_FREE
-/// Default definition of ANTLR3_FREE. You can override this before including
-/// antlr3.h if you wish to use your own implementation.
-///
-#define	ANTLR3_FREE(ptr)		free    ((void *)(ptr))
-#endif
-#ifndef ANTLR3_FREE_FUNC						
-/// Default definition of ANTLR3_FREE_FUNC. You can override this before including
-/// antlr3.h if you wish to use your own implementation.
-///
-#define	ANTLR3_FREE_FUNC		free
-#endif
-#ifndef ANTLR3_STRDUP
-/// Default definition of ANTLR3_STRDUP. You can override this before including
-/// antlr3.h if you wish to use your own implementation.
-///
-#define	ANTLR3_STRDUP(instr)		(pANTLR3_UINT8)(strdup  ((const char *)(instr)))
-#endif
-#ifndef ANTLR3_MEMCPY
-/// Default definition of ANTLR3_MEMCPY. You can override this before including
-/// antlr3.h if you wish to use your own implementation.
-///
-#define	ANTLR3_MEMCPY(target, source, size) memcpy((void *)(target), (const void *)(source), (size_t)(size))
-#endif
-#ifndef ANTLR3_MEMMOVE
-/// Default definition of ANTLR3_MEMMOVE. You can override this before including
-/// antlr3.h if you wish to use your own implementation.
-///
-#define	ANTLR3_MEMMOVE(target, source, size)    memmove((void *)(target), (const void *)(source), (size_t)(size))
-#endif
-#ifndef ANTLR3_MEMSET
-/// Default definition of ANTLR3_MEMSET. You can override this before including
-/// antlr3.h if you wish to use your own implementation.
-///
-#define	ANTLR3_MEMSET(target, byte, size)   memset((void *)(target), (int)(byte), (size_t)(size))
-#endif
-
-#ifndef	ANTLR3_PRINTF
-/// Default definition of printf, set this to something other than printf before including antlr3.h
-/// if your system does not have a printf. Note that you can define this to be <code>//</code>
-/// without harming the runtime.
-///
-#define	ANTLR3_PRINTF   printf
-#endif
-
-#ifndef	ANTLR3_FPRINTF
-/// Default definition of fprintf, set this to something other than fprintf before including antlr3.h
-/// if your system does not have a fprintf. Note that you can define this to be <code>//</code>
-/// without harming the runtime. 
-///
-#define	ANTLR3_FPRINTF	fprintf
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-ANTLR3_API pANTLR3_INT_TRIE antlr3IntTrieNew    (ANTLR3_UINT32 depth);
-
-ANTLR3_API pANTLR3_BITSET   antlr3BitsetNew	(ANTLR3_UINT32 numBits);
-ANTLR3_API pANTLR3_BITSET   antlr3BitsetOf	(ANTLR3_INT32 bit, ...);
-ANTLR3_API pANTLR3_BITSET   antlr3BitsetList	(pANTLR3_HASH_TABLE list);
-ANTLR3_API pANTLR3_BITSET   antlr3BitsetCopy	(pANTLR3_BITSET_LIST blist);
-ANTLR3_API pANTLR3_BITSET   antlr3BitsetLoad    (pANTLR3_BITSET_LIST blist);
-ANTLR3_API void             antlr3BitsetSetAPI  (pANTLR3_BITSET bitset);
-
-
-ANTLR3_API pANTLR3_BASE_RECOGNIZER  antlr3BaseRecognizerNew                     (ANTLR3_UINT32 type, ANTLR3_UINT32 sizeHint, pANTLR3_RECOGNIZER_SHARED_STATE state);
-ANTLR3_API void			    antlr3RecognitionExceptionNew               (pANTLR3_BASE_RECOGNIZER recognizer);
-ANTLR3_API void			    antlr3MTExceptionNew                        (pANTLR3_BASE_RECOGNIZER recognizer);
-ANTLR3_API void			    antlr3MTNExceptionNew                       (pANTLR3_BASE_RECOGNIZER recognizer);
-ANTLR3_API pANTLR3_HASH_TABLE	    antlr3HashTableNew                          (ANTLR3_UINT32 sizeHint);
-ANTLR3_API ANTLR3_UINT32	    antlr3Hash                                  (void * key, ANTLR3_UINT32 keylen);
-ANTLR3_API pANTLR3_HASH_ENUM	    antlr3EnumNew                               (pANTLR3_HASH_TABLE table);
-ANTLR3_API pANTLR3_LIST		    antlr3ListNew                               (ANTLR3_UINT32 sizeHint);
-ANTLR3_API pANTLR3_VECTOR_FACTORY   antlr3VectorFactoryNew                      (ANTLR3_UINT32 sizeHint);
-ANTLR3_API pANTLR3_VECTOR	    antlr3VectorNew                             (ANTLR3_UINT32 sizeHint);
-ANTLR3_API pANTLR3_STACK	    antlr3StackNew                              (ANTLR3_UINT32 sizeHint);
-ANTLR3_API void                     antlr3SetVectorApi                          (pANTLR3_VECTOR vector, ANTLR3_UINT32 sizeHint);
-ANTLR3_API ANTLR3_UCHAR		    antlr3c8toAntlrc                            (ANTLR3_INT8 inc);
-ANTLR3_API pANTLR3_TOPO             antlr3TopoNew();
-
-ANTLR3_API pANTLR3_EXCEPTION	    antlr3ExceptionNew                          (ANTLR3_UINT32 exception, void * name, void * message, ANTLR3_BOOLEAN freeMessage);
-
-
-ANTLR3_API pANTLR3_INPUT_STREAM     antlr3FileStreamNew                         (pANTLR3_UINT8 fileName, ANTLR3_UINT32 encoding);
-ANTLR3_API pANTLR3_INPUT_STREAM     antlr3StringStreamNew                       (pANTLR3_UINT8 data, ANTLR3_UINT32 encoding, ANTLR3_UINT32 size, pANTLR3_UINT8 name);
-
-ANTLR3_API pANTLR3_INT_STREAM	    antlr3IntStreamNew                          (void);
-
-ANTLR3_API pANTLR3_STRING_FACTORY   antlr3StringFactoryNew                      (ANTLR3_UINT32 encoding);
-
-ANTLR3_API pANTLR3_COMMON_TOKEN	    antlr3CommonTokenNew                        (ANTLR3_UINT32 ttype);
-ANTLR3_API pANTLR3_TOKEN_FACTORY    antlr3TokenFactoryNew                       (pANTLR3_INPUT_STREAM input);
-ANTLR3_API void			    antlr3SetTokenAPI                           (pANTLR3_COMMON_TOKEN token);
-
-ANTLR3_API pANTLR3_LEXER	    antlr3LexerNewStream                        (ANTLR3_UINT32 sizeHint, pANTLR3_INPUT_STREAM input, pANTLR3_RECOGNIZER_SHARED_STATE state);
-ANTLR3_API pANTLR3_LEXER	    antlr3LexerNew                              (ANTLR3_UINT32 sizeHint, pANTLR3_RECOGNIZER_SHARED_STATE state);
-ANTLR3_API pANTLR3_PARSER	    antlr3ParserNewStreamDbg                    (ANTLR3_UINT32 sizeHint, pANTLR3_TOKEN_STREAM tstream, pANTLR3_DEBUG_EVENT_LISTENER dbg, pANTLR3_RECOGNIZER_SHARED_STATE state);
-ANTLR3_API pANTLR3_PARSER	    antlr3ParserNewStream                       (ANTLR3_UINT32 sizeHint, pANTLR3_TOKEN_STREAM tstream, pANTLR3_RECOGNIZER_SHARED_STATE state);
-ANTLR3_API pANTLR3_PARSER           antlr3ParserNew                             (ANTLR3_UINT32 sizeHint, pANTLR3_RECOGNIZER_SHARED_STATE state);
-
-ANTLR3_API pANTLR3_COMMON_TOKEN_STREAM  antlr3CommonTokenStreamSourceNew        (ANTLR3_UINT32 hint, pANTLR3_TOKEN_SOURCE source);
-ANTLR3_API pANTLR3_COMMON_TOKEN_STREAM	antlr3CommonTokenStreamNew              (ANTLR3_UINT32 hint);
-ANTLR3_API pANTLR3_COMMON_TOKEN_STREAM	antlr3CommonTokenDebugStreamSourceNew   (ANTLR3_UINT32 hint, pANTLR3_TOKEN_SOURCE source, pANTLR3_DEBUG_EVENT_LISTENER debugger);
-
-ANTLR3_API pANTLR3_BASE_TREE_ADAPTOR	ANTLR3_TREE_ADAPTORNew                  (pANTLR3_STRING_FACTORY strFactory);
-ANTLR3_API pANTLR3_BASE_TREE_ADAPTOR	ANTLR3_TREE_ADAPTORDebugNew             (pANTLR3_STRING_FACTORY strFactory, pANTLR3_DEBUG_EVENT_LISTENER	debugger);
-ANTLR3_API pANTLR3_COMMON_TREE		antlr3CommonTreeNew                     (void);
-ANTLR3_API pANTLR3_COMMON_TREE		antlr3CommonTreeNewFromTree             (pANTLR3_COMMON_TREE tree);
-ANTLR3_API pANTLR3_COMMON_TREE		antlr3CommonTreeNewFromToken            (pANTLR3_COMMON_TOKEN tree);
-ANTLR3_API pANTLR3_ARBORETUM		antlr3ArboretumNew                      (pANTLR3_STRING_FACTORY factory);
-ANTLR3_API void				antlr3SetCTAPI                          (pANTLR3_COMMON_TREE tree);
-ANTLR3_API pANTLR3_BASE_TREE		antlr3BaseTreeNew                       (pANTLR3_BASE_TREE tree);
-
-ANTLR3_API void				antlr3BaseTreeAdaptorInit               (pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_DEBUG_EVENT_LISTENER debugger);
-
-ANTLR3_API pANTLR3_TREE_PARSER		antlr3TreeParserNewStream               (ANTLR3_UINT32 sizeHint, pANTLR3_COMMON_TREE_NODE_STREAM ctnstream, pANTLR3_RECOGNIZER_SHARED_STATE state);
-
-ANTLR3_API ANTLR3_INT32			antlr3dfaspecialTransition              (void * ctx, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_INT_STREAM is, pANTLR3_CYCLIC_DFA dfa, ANTLR3_INT32 s);
-ANTLR3_API ANTLR3_INT32			antlr3dfaspecialStateTransition         (void * ctx, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_INT_STREAM is, pANTLR3_CYCLIC_DFA dfa, ANTLR3_INT32 s);
-ANTLR3_API ANTLR3_INT32			antlr3dfapredict                        (void * ctx, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_INT_STREAM is, pANTLR3_CYCLIC_DFA cdfa);
-
-ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM  antlr3CommonTreeNodeStreamNewTree   (pANTLR3_BASE_TREE tree, ANTLR3_UINT32 hint);
-ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM  antlr3CommonTreeNodeStreamNew       (pANTLR3_STRING_FACTORY strFactory, ANTLR3_UINT32 hint);
-ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM  antlr3UnbufTreeNodeStreamNewTree    (pANTLR3_BASE_TREE tree, ANTLR3_UINT32 hint);
-ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM  antlr3UnbufTreeNodeStreamNew        (pANTLR3_STRING_FACTORY strFactory, ANTLR3_UINT32 hint);
-ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM  antlr3CommonTreeNodeStreamNewStream (pANTLR3_COMMON_TREE_NODE_STREAM inStream);
-ANTLR3_API pANTLR3_TREE_NODE_STREAM         antlr3TreeNodeStreamNew             ();
-ANTLR3_API void				    fillBufferExt                       (pANTLR3_COMMON_TOKEN_STREAM tokenStream);
-
-ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
-    antlr3RewriteRuleTOKENStreamNewAE	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description);
-ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
-    antlr3RewriteRuleTOKENStreamNewAEE	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement);
-ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
-    antlr3RewriteRuleTOKENStreamNewAEV	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector);
-
-ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
-    antlr3RewriteRuleNODEStreamNewAE	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description);
-ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
-    antlr3RewriteRuleNODEStreamNewAEE	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement);
-ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
-    antlr3RewriteRuleNODEStreamNewAEV	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector);
-
-ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
-    antlr3RewriteRuleSubtreeStreamNewAE	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description);
-ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
-    antlr3RewriteRuleSubtreeStreamNewAEE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement);
-ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
-    antlr3RewriteRuleSubtreeStreamNewAEV(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector);
-
-ANTLR3_API pANTLR3_DEBUG_EVENT_LISTENER antlr3DebugListenerNew();
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif	/* _ANTLR3DEFS_H	*/
diff --git a/antlr-3.4/runtime/C/src/antlr3baserecognizer.c b/antlr-3.4/runtime/C/src/antlr3baserecognizer.c
deleted file mode 100644
index e2eccc6..0000000
--- a/antlr-3.4/runtime/C/src/antlr3baserecognizer.c
+++ /dev/null
@@ -1,2235 +0,0 @@
-/** \file
- * Contains the base functions that all recognizers require.
- * Any function can be overridden by a lexer/parser/tree parser or by the
- * ANTLR3 programmer.
- * 
- * \addtogroup pANTLR3_BASE_RECOGNIZER
- * @{
- */
-#include    <antlr3baserecognizer.h>
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef	ANTLR3_WINDOWS
-#pragma warning( disable : 4100 )
-#endif
-
-/* Interface functions -standard implementations cover parser and treeparser
- * almost completely but are overridden by the parser or tree parser as needed. Lexer overrides
- * most of these functions.
- */
-static void					beginResync					(pANTLR3_BASE_RECOGNIZER recognizer);
-static pANTLR3_BITSET		computeErrorRecoverySet	    (pANTLR3_BASE_RECOGNIZER recognizer);
-static void					endResync					(pANTLR3_BASE_RECOGNIZER recognizer);
-static void					beginBacktrack				(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 level);
-static void					endBacktrack				(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 level, ANTLR3_BOOLEAN successful);
-
-static void *				match						(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow);
-static void					matchAny					(pANTLR3_BASE_RECOGNIZER recognizer);
-static void					mismatch					(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow);
-static ANTLR3_BOOLEAN		mismatchIsUnwantedToken		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, ANTLR3_UINT32 ttype);
-static ANTLR3_BOOLEAN		mismatchIsMissingToken		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, pANTLR3_BITSET_LIST follow);
-static void					reportError					(pANTLR3_BASE_RECOGNIZER recognizer);
-static pANTLR3_BITSET		computeCSRuleFollow			(pANTLR3_BASE_RECOGNIZER recognizer);
-static pANTLR3_BITSET		combineFollows				(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_BOOLEAN exact);
-static void					displayRecognitionError	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_UINT8 * tokenNames);
-static void					recover						(pANTLR3_BASE_RECOGNIZER recognizer);
-static void	*				recoverFromMismatchedToken  (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow);
-static void	*				recoverFromMismatchedSet    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET_LIST follow);
-static ANTLR3_BOOLEAN		recoverFromMismatchedElement(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET_LIST follow);
-static void					consumeUntil				(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 tokenType);
-static void					consumeUntilSet				(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET set);
-static pANTLR3_STACK		getRuleInvocationStack	    (pANTLR3_BASE_RECOGNIZER recognizer);
-static pANTLR3_STACK		getRuleInvocationStackNamed (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_UINT8 name);
-static pANTLR3_HASH_TABLE	toStrings					(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_HASH_TABLE);
-static ANTLR3_MARKER		getRuleMemoization			(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_INTKEY ruleIndex, ANTLR3_MARKER ruleParseStart);
-static ANTLR3_BOOLEAN		alreadyParsedRule			(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_MARKER ruleIndex);
-static void					memoize						(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_MARKER ruleIndex, ANTLR3_MARKER ruleParseStart);
-static ANTLR3_BOOLEAN		synpred						(pANTLR3_BASE_RECOGNIZER recognizer, void * ctx, void (*predicate)(void * ctx));
-static void					reset						(pANTLR3_BASE_RECOGNIZER recognizer);
-static void					freeBR						(pANTLR3_BASE_RECOGNIZER recognizer);
-static void *				getCurrentInputSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream);
-static void *				getMissingSymbol			(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
-															ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow);
-static ANTLR3_UINT32		getNumberOfSyntaxErrors		(pANTLR3_BASE_RECOGNIZER recognizer);
-
-ANTLR3_API pANTLR3_BASE_RECOGNIZER
-antlr3BaseRecognizerNew(ANTLR3_UINT32 type, ANTLR3_UINT32 sizeHint, pANTLR3_RECOGNIZER_SHARED_STATE state)
-{
-    pANTLR3_BASE_RECOGNIZER recognizer;
-
-    // Allocate memory for the structure
-    //
-    recognizer	    = (pANTLR3_BASE_RECOGNIZER) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_BASE_RECOGNIZER));
-
-    if	(recognizer == NULL)
-    {
-		// Allocation failed
-		//
-		return	NULL;
-    }
-
-	
-	// If we have been supplied with a pre-existing recognizer state
-	// then we just install it, otherwise we must create one from scratch
-	//
-	if	(state == NULL)
-	{
-		recognizer->state = (pANTLR3_RECOGNIZER_SHARED_STATE) ANTLR3_CALLOC(1, (size_t)sizeof(ANTLR3_RECOGNIZER_SHARED_STATE));
-
-		if	(recognizer->state == NULL)
-		{
-			ANTLR3_FREE(recognizer);
-			return	NULL;
-		}
-
-		// Initialize any new recognizer state
-		//
-		recognizer->state->errorRecovery	= ANTLR3_FALSE;
-		recognizer->state->lastErrorIndex	= -1;
-		recognizer->state->failed		= ANTLR3_FALSE;
-		recognizer->state->errorCount		= 0;
-		recognizer->state->backtracking		= 0;
-		recognizer->state->following		= NULL;
-		recognizer->state->ruleMemo		= NULL;
-		recognizer->state->tokenNames		= NULL;
-		recognizer->state->sizeHint             = sizeHint;
-		recognizer->state->tokSource		= NULL;
-                recognizer->state->tokFactory           = NULL;
-
-		// Rather than check to see if we must initialize
-		// the stack every time we are asked for an new rewrite stream
-		// we just always create an empty stack and then just
-		// free it when the base recognizer is freed.
-		//
-		recognizer->state->rStreams		= antlr3VectorNew(0);  // We don't know the size.
-
-		if	(recognizer->state->rStreams == NULL)
-		{
-			// Out of memory
-			//
-			ANTLR3_FREE(recognizer->state);
-			ANTLR3_FREE(recognizer);
-			return	NULL;
-		}
-	}
-	else
-	{
-		// Install the one we were given, and do not reset it here
-		// as it will either already have been initialized or will
-		// be in a state that needs to be preserved.
-		//
-		recognizer->state = state;
-	}
-		
-    // Install the BR API
-    //
-    recognizer->alreadyParsedRule           = alreadyParsedRule;
-    recognizer->beginResync                 = beginResync;
-    recognizer->combineFollows              = combineFollows;
-    recognizer->beginBacktrack              = beginBacktrack;
-    recognizer->endBacktrack                = endBacktrack;
-    recognizer->computeCSRuleFollow         = computeCSRuleFollow;
-    recognizer->computeErrorRecoverySet     = computeErrorRecoverySet;
-    recognizer->consumeUntil                = consumeUntil;
-    recognizer->consumeUntilSet             = consumeUntilSet;
-    recognizer->displayRecognitionError     = displayRecognitionError;
-    recognizer->endResync                   = endResync;
-    recognizer->exConstruct                 = antlr3MTExceptionNew;
-    recognizer->getRuleInvocationStack      = getRuleInvocationStack;
-    recognizer->getRuleInvocationStackNamed = getRuleInvocationStackNamed;
-    recognizer->getRuleMemoization          = getRuleMemoization;
-    recognizer->match                       = match;
-    recognizer->matchAny                    = matchAny;
-    recognizer->memoize                     = memoize;
-    recognizer->mismatch                    = mismatch;
-    recognizer->mismatchIsUnwantedToken     = mismatchIsUnwantedToken;
-    recognizer->mismatchIsMissingToken      = mismatchIsMissingToken;
-    recognizer->recover                     = recover;
-    recognizer->recoverFromMismatchedElement= recoverFromMismatchedElement;
-    recognizer->recoverFromMismatchedSet    = recoverFromMismatchedSet;
-    recognizer->recoverFromMismatchedToken  = recoverFromMismatchedToken;
-    recognizer->getNumberOfSyntaxErrors     = getNumberOfSyntaxErrors;
-    recognizer->reportError                 = reportError;
-    recognizer->reset                       = reset;
-    recognizer->synpred                     = synpred;
-    recognizer->toStrings                   = toStrings;
-    recognizer->getCurrentInputSymbol       = getCurrentInputSymbol;
-    recognizer->getMissingSymbol            = getMissingSymbol;
-    recognizer->debugger                    = NULL;
-
-    recognizer->free = freeBR;
-
-    /* Initialize variables
-     */
-    recognizer->type			= type;
-
-
-    return  recognizer;
-}
-static void	
-freeBR	    (pANTLR3_BASE_RECOGNIZER recognizer)
-{
-    pANTLR3_EXCEPTION thisE;
-
-	// Did we have a state allocated?
-	//
-	if	(recognizer->state != NULL)
-	{
-		// Free any rule memoization we set up
-		//
-		if	(recognizer->state->ruleMemo != NULL)
-		{
-			recognizer->state->ruleMemo->free(recognizer->state->ruleMemo);
-			recognizer->state->ruleMemo = NULL;
-		}
-
-		// Free any exception space we have left around
-		//
-		thisE = recognizer->state->exception;
-		if	(thisE != NULL)
-		{
-			thisE->freeEx(thisE);
-		}
-
-		// Free any rewrite streams we have allocated
-		//
-		if	(recognizer->state->rStreams != NULL)
-		{
-			recognizer->state->rStreams->free(recognizer->state->rStreams);
-		}
-
-		// Free up any token factory we created (error recovery for instance)
-		//
-		if	(recognizer->state->tokFactory != NULL)
-		{
-			recognizer->state->tokFactory->close(recognizer->state->tokFactory);
-		}
-		// Free the shared state memory
-		//
-		ANTLR3_FREE(recognizer->state);
-	}
-
-	// Free the actual recognizer space
-	//
-    ANTLR3_FREE(recognizer);
-}
-
-/**
- * Creates a new Mismatched Token Exception and inserts in the recognizer
- * exception stack.
- * 
- * \param recognizer
- * Context pointer for this recognizer
- * 
- */
-ANTLR3_API	void
-antlr3MTExceptionNew(pANTLR3_BASE_RECOGNIZER recognizer)
-{
-    /* Create a basic recognition exception structure
-     */
-    antlr3RecognitionExceptionNew(recognizer);
-
-    /* Now update it to indicate this is a Mismatched token exception
-     */
-    recognizer->state->exception->name		= ANTLR3_MISMATCHED_EX_NAME;
-    recognizer->state->exception->type		= ANTLR3_MISMATCHED_TOKEN_EXCEPTION;
-
-    return;
-}
-
-ANTLR3_API	void
-antlr3RecognitionExceptionNew(pANTLR3_BASE_RECOGNIZER recognizer)
-{
-	pANTLR3_EXCEPTION				ex;
-	pANTLR3_LEXER					lexer;
-	pANTLR3_PARSER					parser;
-	pANTLR3_TREE_PARSER				tparser;
-
-	pANTLR3_INPUT_STREAM			ins;
-	pANTLR3_INT_STREAM				is;
-	pANTLR3_COMMON_TOKEN_STREAM	    cts;
-	pANTLR3_TREE_NODE_STREAM	    tns;
-
-	ins	    = NULL;
-	cts	    = NULL;
-	tns	    = NULL;
-	is	    = NULL;
-	lexer   = NULL;
-	parser  = NULL;
-	tparser = NULL;
-
-	switch	(recognizer->type)
-	{
-	case	ANTLR3_TYPE_LEXER:
-
-		lexer	= (pANTLR3_LEXER) (recognizer->super);
-		ins	= lexer->input;
-		is	= ins->istream;
-
-		break;
-
-	case	ANTLR3_TYPE_PARSER:
-
-		parser  = (pANTLR3_PARSER) (recognizer->super);
-		cts	= (pANTLR3_COMMON_TOKEN_STREAM)(parser->tstream->super);
-		is	= parser->tstream->istream;
-
-		break;
-
-	case	ANTLR3_TYPE_TREE_PARSER:
-
-		tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-		tns	= tparser->ctnstream->tnstream;
-		is	= tns->istream;
-
-		break;
-
-	default:
-
-		ANTLR3_FPRINTF(stderr, "Base recognizer function antlr3RecognitionExceptionNew called by unknown parser type - provide override for this function\n");
-		return;
-
-		break;
-	}
-
-	/* Create a basic exception structure
-	 */
-	ex = antlr3ExceptionNew(ANTLR3_RECOGNITION_EXCEPTION,
-		(void *)ANTLR3_RECOGNITION_EX_NAME,
-		NULL,
-		ANTLR3_FALSE);
-
-	/* Rest of information depends on the base type of the 
-	 * input stream.
-	 */
-	switch  (is->type & ANTLR3_INPUT_MASK)
-	{
-	case    ANTLR3_CHARSTREAM:
-
-		ex->c			= is->_LA		    	(is, 1);					/* Current input character			*/
-		ex->line		= ins->getLine			(ins);						/* Line number comes from stream		*/
-		ex->charPositionInLine	= ins->getCharPositionInLine	(ins);	    /* Line offset also comes from the stream   */
-		ex->index		= is->index			(is);
-		ex->streamName		= ins->fileName;
-		ex->message		= "Unexpected character";
-		break;
-
-	case    ANTLR3_TOKENSTREAM:
-
-		ex->token		= cts->tstream->_LT						(cts->tstream, 1);	    /* Current input token			    */
-		ex->line		= ((pANTLR3_COMMON_TOKEN)(ex->token))->getLine			(ex->token);
-		ex->charPositionInLine	= ((pANTLR3_COMMON_TOKEN)(ex->token))->getCharPositionInLine	(ex->token);
-		ex->index		= cts->tstream->istream->index					(cts->tstream->istream);
-		if	(((pANTLR3_COMMON_TOKEN)(ex->token))->type == ANTLR3_TOKEN_EOF)
-		{
-			ex->streamName		= NULL;
-		}
-		else
-		{
-			ex->streamName		= ((pANTLR3_COMMON_TOKEN)(ex->token))->input->fileName;
-		}
-		ex->message		= "Unexpected token";
-		break;
-
-	case    ANTLR3_COMMONTREENODE:
-
-		ex->token		= tns->_LT						    (tns, 1);	    /* Current input tree node			    */
-		ex->line		= ((pANTLR3_BASE_TREE)(ex->token))->getLine		    (ex->token);
-		ex->charPositionInLine	= ((pANTLR3_BASE_TREE)(ex->token))->getCharPositionInLine   (ex->token);
-		ex->index		= tns->istream->index					    (tns->istream);
-
-		// Are you ready for this? Deep breath now...
-		//
-		{
-			pANTLR3_COMMON_TREE tnode;
-
-			tnode		= ((pANTLR3_COMMON_TREE)(((pANTLR3_BASE_TREE)(ex->token))->super));
-
-			if	(tnode->token    == NULL)
-			{
-				ex->streamName = ((pANTLR3_BASE_TREE)(ex->token))->strFactory->newStr(((pANTLR3_BASE_TREE)(ex->token))->strFactory, (pANTLR3_UINT8)"-unknown source-");
-			}
-			else
-			{
-				if	(tnode->token->input == NULL)
-				{
-					ex->streamName		= NULL;
-				}
-				else
-				{
-					ex->streamName		= tnode->token->input->fileName;
-				}
-			}
-			ex->message		= "Unexpected node";
-		}
-		break;
-	}
-
-	ex->input						= is;
-	ex->nextException				= recognizer->state->exception;	/* So we don't leak the memory */
-	recognizer->state->exception	= ex;
-	recognizer->state->error	    = ANTLR3_TRUE;	    /* Exception is outstanding	*/
-
-	return;
-}
-
-
-/// Match current input symbol against ttype.  Upon error, do one token
-/// insertion or deletion if possible.  
-/// To turn off single token insertion or deletion error
-/// recovery, override mismatchRecover() and have it call
-/// plain mismatch(), which does not recover.  Then any error
-/// in a rule will cause an exception and immediate exit from
-/// rule.  Rule would recover by resynchronizing to the set of
-/// symbols that can follow rule ref.
-///
-static void *
-match(	pANTLR3_BASE_RECOGNIZER recognizer,
-		ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow)
-{
-    pANTLR3_PARSER			parser;
-    pANTLR3_TREE_PARSER	    tparser;
-    pANTLR3_INT_STREAM	    is;
-	void					* matchedSymbol;
-
-    switch	(recognizer->type)
-    {
-		case	ANTLR3_TYPE_PARSER:
-
-			parser  = (pANTLR3_PARSER) (recognizer->super);
-			tparser	= NULL;
-			is	= parser->tstream->istream;
-
-			break;
-
-		case	ANTLR3_TYPE_TREE_PARSER:
-
-			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-			parser	= NULL;
-			is	= tparser->ctnstream->tnstream->istream;
-
-			break;
-
-		default:
-		    
-			ANTLR3_FPRINTF(stderr, "Base recognizer function 'match' called by unknown parser type - provide override for this function\n");
-			return ANTLR3_FALSE;
-
-			break;
-    }
-
-	// Pick up the current input token/node for assignment to labels
-	//
-	matchedSymbol = recognizer->getCurrentInputSymbol(recognizer, is);
-
-    if	(is->_LA(is, 1) == ttype)
-    {
-		// The token was the one we were told to expect
-		//
-		is->consume(is);									// Consume that token from the stream
-		recognizer->state->errorRecovery	= ANTLR3_FALSE;	// Not in error recovery now (if we were)
-		recognizer->state->failed			= ANTLR3_FALSE;	// The match was a success
-		return matchedSymbol;								// We are done
-    }
-
-    // We did not find the expected token type, if we are backtracking then
-    // we just set the failed flag and return.
-    //
-    if	(recognizer->state->backtracking > 0)
-    {
-		// Backtracking is going on
-		//
-		recognizer->state->failed  = ANTLR3_TRUE;
-		return matchedSymbol;
-	}
-
-    // We did not find the expected token and there is no backtracking
-    // going on, so we mismatch, which creates an exception in the recognizer exception
-    // stack.
-    //
-	matchedSymbol = recognizer->recoverFromMismatchedToken(recognizer, ttype, follow);
-    return matchedSymbol;
-}
-
-/// Consumes the next token, whatever it is, and resets the recognizer state
-/// so that it is not in error.
-///
-/// \param recognizer
-/// Recognizer context pointer
-///
-static void
-matchAny(pANTLR3_BASE_RECOGNIZER recognizer)
-{
-    pANTLR3_PARSER	    parser;
-    pANTLR3_TREE_PARSER	    tparser;
-    pANTLR3_INT_STREAM	    is;
-
-    switch	(recognizer->type)
-    {
-		case	ANTLR3_TYPE_PARSER:
-
-			parser  = (pANTLR3_PARSER) (recognizer->super);
-			tparser	= NULL;
-			is	= parser->tstream->istream;
-
-			break;
-
-		case	ANTLR3_TYPE_TREE_PARSER:
-
-			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-			parser	= NULL;
-			is	= tparser->ctnstream->tnstream->istream;
-
-			break;
-
-		default:
-		    
-			ANTLR3_FPRINTF(stderr, "Base recognizer function 'matchAny' called by unknown parser type - provide override for this function\n");
-			return;
-
-		break;
-    }
-    recognizer->state->errorRecovery	= ANTLR3_FALSE;
-    recognizer->state->failed		    = ANTLR3_FALSE;
-    is->consume(is);
-
-    return;
-}
-///
-///
-static ANTLR3_BOOLEAN
-mismatchIsUnwantedToken(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, ANTLR3_UINT32 ttype)
-{
-	ANTLR3_UINT32 nextt;
-
-	nextt = is->_LA(is, 2);
-
-	if	(nextt == ttype)
-	{
-		if	(recognizer->state->exception != NULL)
-		{
-			recognizer->state->exception->expecting = nextt;
-		}
-		return ANTLR3_TRUE;		// This token is unknown, but the next one is the one we wanted
-	}
-	else
-	{
-		return ANTLR3_FALSE;	// Neither this token, nor the one following is the one we wanted
-	}
-}
-
-///
-///
-static ANTLR3_BOOLEAN
-mismatchIsMissingToken(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, pANTLR3_BITSET_LIST follow)
-{
-	ANTLR3_BOOLEAN	retcode;
-	pANTLR3_BITSET	followClone;
-	pANTLR3_BITSET	viableTokensFollowingThisRule;
-
-	if	(follow == NULL)
-	{
-		// There is no information about the tokens that can follow the last one
-		// hence we must say that the current one we found is not a member of the 
-		// follow set and does not indicate a missing token. We will just consume this
-		// single token and see if the parser works it out from there.
-		//
-		return	ANTLR3_FALSE;
-	}
-
-	followClone						= NULL;
-	viableTokensFollowingThisRule	= NULL;
-
-	// The C bitset maps are laid down at compile time by the
-	// C code generation. Hence we cannot remove things from them
-	// and so on. So, in order to remove EOR (if we need to) then
-	// we clone the static bitset.
-	//
-	followClone = antlr3BitsetLoad(follow);
-	if	(followClone == NULL)
-	{
-		return ANTLR3_FALSE;
-	}
-
-	// Compute what can follow this grammar reference
-	//
-	if	(followClone->isMember(followClone, ANTLR3_EOR_TOKEN_TYPE))
-	{
-		// EOR can follow, but if we are not the start symbol, we
-		// need to remove it.
-		//
-		if	(recognizer->state->following->vector->count >= 0)
-		{
-			followClone->remove(followClone, ANTLR3_EOR_TOKEN_TYPE);
-		}
-
-		// Now compute the visiable tokens that can follow this rule, according to context
-		// and make them part of the follow set.
-		//
-		viableTokensFollowingThisRule = recognizer->computeCSRuleFollow(recognizer);
-		followClone->borInPlace(followClone, viableTokensFollowingThisRule);
-	}
-
-	/// if current token is consistent with what could come after set
-	/// then we know we're missing a token; error recovery is free to
-	/// "insert" the missing token
-	///
-	/// BitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
-	/// in follow set to indicate that the fall of the start symbol is
-	/// in the set (EOF can follow).
-	///
-	if	(		followClone->isMember(followClone, is->_LA(is, 1))
-			||	followClone->isMember(followClone, ANTLR3_EOR_TOKEN_TYPE)
-		)
-	{
-		retcode = ANTLR3_TRUE;
-	}
-	else
-	{
-		retcode	= ANTLR3_FALSE;
-	}
-
-	if	(viableTokensFollowingThisRule != NULL)
-	{
-		viableTokensFollowingThisRule->free(viableTokensFollowingThisRule);
-	}
-	if	(followClone != NULL)
-	{
-		followClone->free(followClone);
-	}
-
-	return retcode;
-
-}
-
-/// Factor out what to do upon token mismatch so tree parsers can behave
-/// differently.  Override and call mismatchRecover(input, ttype, follow)
-/// to get single token insertion and deletion.  Use this to turn off
-/// single token insertion and deletion. Override mismatchRecover
-/// to call this instead.
-///
-/// \remark mismatch only works for parsers and must be overridden for anything else.
-///
-static	void
-mismatch(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow)
-{
-    pANTLR3_PARSER	    parser;
-    pANTLR3_TREE_PARSER	    tparser;
-    pANTLR3_INT_STREAM	    is;
-
-    // Install a mismatched token exception in the exception stack
-    //
-    antlr3MTExceptionNew(recognizer);
-    recognizer->state->exception->expecting    = ttype;
-
-    switch	(recognizer->type)
-    {
-		case	ANTLR3_TYPE_PARSER:
-
-			parser  = (pANTLR3_PARSER) (recognizer->super);
-			tparser	= NULL;
-			is	= parser->tstream->istream;
-
-			break;
-
-		default:
-		    
-			ANTLR3_FPRINTF(stderr, "Base recognizer function 'mismatch' called by unknown parser type - provide override for this function\n");
-			return;
-
-			break;
-    }
-
-	if	(mismatchIsUnwantedToken(recognizer, is, ttype))
-	{
-		// Create a basic recognition exception structure
-		//
-	    antlr3RecognitionExceptionNew(recognizer);
-		
-		// Now update it to indicate this is an unwanted token exception
-		//
-		recognizer->state->exception->name		= ANTLR3_UNWANTED_TOKEN_EXCEPTION_NAME;
-		recognizer->state->exception->type		= ANTLR3_UNWANTED_TOKEN_EXCEPTION;
-
-		return;
-	}
-	
-	if	(mismatchIsMissingToken(recognizer, is, follow))
-	{
-		// Create a basic recognition exception structure
-		//
-	    antlr3RecognitionExceptionNew(recognizer);
-		
-		// Now update it to indicate this is an unwanted token exception
-		//
-		recognizer->state->exception->name		= ANTLR3_MISSING_TOKEN_EXCEPTION_NAME;
-		recognizer->state->exception->type		= ANTLR3_MISSING_TOKEN_EXCEPTION;
-
-		return;
-	}
-
-	// Just a mismatched token is all we can dtermine
-	//
-	antlr3MTExceptionNew(recognizer);
-
-	return;
-}
-/// Report a recognition problem.
-///
-/// This method sets errorRecovery to indicate the parser is recovering
-/// not parsing.  Once in recovery mode, no errors are generated.
-/// To get out of recovery mode, the parser must successfully match
-/// a token (after a resync).  So it will go:
-///
-///		1. error occurs
-///		2. enter recovery mode, report error
-///		3. consume until token found in resynch set
-///		4. try to resume parsing
-///		5. next match() will reset errorRecovery mode
-///
-/// If you override, make sure to update errorCount if you care about that.
-///
-static void			
-reportError		    (pANTLR3_BASE_RECOGNIZER recognizer)
-{
-    	// Invoke the debugger event if there is a debugger listening to us
-	//
-	if	(recognizer->debugger != NULL)
-	{
-		recognizer->debugger->recognitionException(recognizer->debugger, recognizer->state->exception);
-	}
-
-    if	(recognizer->state->errorRecovery == ANTLR3_TRUE)
-    {
-		// Already in error recovery so don't display another error while doing so
-		//
-		return;
-    }
-
-    // Signal we are in error recovery now
-    //
-    recognizer->state->errorRecovery = ANTLR3_TRUE;
-	
-	// Indicate this recognizer had an error while processing.
-	//
-	recognizer->state->errorCount++;
-
-	// Call the error display routine
-	//
-    recognizer->displayRecognitionError(recognizer, recognizer->state->tokenNames);
-}
-
-static void
-beginBacktrack		(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 level)
-{
-	if	(recognizer->debugger != NULL)
-	{
-		recognizer->debugger->beginBacktrack(recognizer->debugger, level);
-	}
-}
-
-static void
-endBacktrack		(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 level, ANTLR3_BOOLEAN successful)
-{
-	if	(recognizer->debugger != NULL)
-	{
-		recognizer->debugger->endBacktrack(recognizer->debugger, level, successful);
-	}
-}
-static void			
-beginResync		    (pANTLR3_BASE_RECOGNIZER recognizer)
-{
-	if	(recognizer->debugger != NULL)
-	{
-		recognizer->debugger->beginResync(recognizer->debugger);
-	}
-}
-
-static void			
-endResync		    (pANTLR3_BASE_RECOGNIZER recognizer)
-{
-	if	(recognizer->debugger != NULL)
-	{
-		recognizer->debugger->endResync(recognizer->debugger);
-	}
-}
-
-/// Compute the error recovery set for the current rule.
-/// Documentation below is from the Java implementation.
-///
-/// During rule invocation, the parser pushes the set of tokens that can
-/// follow that rule reference on the stack; this amounts to
-/// computing FIRST of what follows the rule reference in the
-/// enclosing rule. This local follow set only includes tokens
-/// from within the rule; i.e., the FIRST computation done by
-/// ANTLR stops at the end of a rule.
-//
-/// EXAMPLE
-//
-/// When you find a "no viable alt exception", the input is not
-/// consistent with any of the alternatives for rule r.  The best
-/// thing to do is to consume tokens until you see something that
-/// can legally follow a call to r *or* any rule that called r.
-/// You don't want the exact set of viable next tokens because the
-/// input might just be missing a token--you might consume the
-/// rest of the input looking for one of the missing tokens.
-///
-/// Consider grammar:
-///
-/// a : '[' b ']'
-///   | '(' b ')'
-///   ;
-/// b : c '^' INT ;
-/// c : ID
-///   | INT
-///   ;
-///
-/// At each rule invocation, the set of tokens that could follow
-/// that rule is pushed on a stack.  Here are the various "local"
-/// follow sets:
-///
-/// FOLLOW(b1_in_a) = FIRST(']') = ']'
-/// FOLLOW(b2_in_a) = FIRST(')') = ')'
-/// FOLLOW(c_in_b) = FIRST('^') = '^'
-///
-/// Upon erroneous input "[]", the call chain is
-///
-/// a -> b -> c
-///
-/// and, hence, the follow context stack is:
-///
-/// depth  local follow set     after call to rule
-///   0         <EOF>                    a (from main())
-///   1          ']'                     b
-///   3          '^'                     c
-///
-/// Notice that ')' is not included, because b would have to have
-/// been called from a different context in rule a for ')' to be
-/// included.
-///
-/// For error recovery, we cannot consider FOLLOW(c)
-/// (context-sensitive or otherwise).  We need the combined set of
-/// all context-sensitive FOLLOW sets--the set of all tokens that
-/// could follow any reference in the call chain.  We need to
-/// resync to one of those tokens.  Note that FOLLOW(c)='^' and if
-/// we resync'd to that token, we'd consume until EOF.  We need to
-/// sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
-/// In this case, for input "[]", LA(1) is in this set so we would
-/// not consume anything and after printing an error rule c would
-/// return normally.  It would not find the required '^' though.
-/// At this point, it gets a mismatched token error and throws an
-/// exception (since LA(1) is not in the viable following token
-/// set).  The rule exception handler tries to recover, but finds
-/// the same recovery set and doesn't consume anything.  Rule b
-/// exits normally returning to rule a.  Now it finds the ']' (and
-/// with the successful match exits errorRecovery mode).
-///
-/// So, you can see that the parser walks up call chain looking
-/// for the token that was a member of the recovery set.
-///
-/// Errors are not generated in errorRecovery mode.
-///
-/// ANTLR's error recovery mechanism is based upon original ideas:
-///
-/// "Algorithms + Data Structures = Programs" by Niklaus Wirth
-///
-/// and
-///
-/// "A note on error recovery in recursive descent parsers":
-/// http://portal.acm.org/citation.cfm?id=947902.947905
-///
-/// Later, Josef Grosch had some good ideas:
-///
-/// "Efficient and Comfortable Error Recovery in Recursive Descent
-/// Parsers":
-/// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
-///
-/// Like Grosch I implemented local FOLLOW sets that are combined
-/// at run-time upon error to avoid overhead during parsing.
-///
-static pANTLR3_BITSET		
-computeErrorRecoverySet	    (pANTLR3_BASE_RECOGNIZER recognizer)
-{
-    return   recognizer->combineFollows(recognizer, ANTLR3_FALSE);
-}
-
-/// Compute the context-sensitive FOLLOW set for current rule.
-/// Documentation below is from the Java runtime.
-///
-/// This is the set of token types that can follow a specific rule
-/// reference given a specific call chain.  You get the set of
-/// viable tokens that can possibly come next (look ahead depth 1)
-/// given the current call chain.  Contrast this with the
-/// definition of plain FOLLOW for rule r:
-///
-///  FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
-///
-/// where x in T* and alpha, beta in V*; T is set of terminals and
-/// V is the set of terminals and non terminals.  In other words,
-/// FOLLOW(r) is the set of all tokens that can possibly follow
-/// references to r in///any* sentential form (context).  At
-/// runtime, however, we know precisely which context applies as
-/// we have the call chain.  We may compute the exact (rather
-/// than covering superset) set of following tokens.
-///
-/// For example, consider grammar:
-///
-/// stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
-///      | "return" expr '.'
-///      ;
-/// expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
-/// atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
-///      | '(' expr ')'
-///      ;
-///
-/// The FOLLOW sets are all inclusive whereas context-sensitive
-/// FOLLOW sets are precisely what could follow a rule reference.
-/// For input input "i=(3);", here is the derivation:
-///
-/// stat => ID '=' expr ';'
-///      => ID '=' atom ('+' atom)* ';'
-///      => ID '=' '(' expr ')' ('+' atom)* ';'
-///      => ID '=' '(' atom ')' ('+' atom)* ';'
-///      => ID '=' '(' INT ')' ('+' atom)* ';'
-///      => ID '=' '(' INT ')' ';'
-///
-/// At the "3" token, you'd have a call chain of
-///
-///   stat -> expr -> atom -> expr -> atom
-///
-/// What can follow that specific nested ref to atom?  Exactly ')'
-/// as you can see by looking at the derivation of this specific
-/// input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
-///
-/// You want the exact viable token set when recovering from a
-/// token mismatch.  Upon token mismatch, if LA(1) is member of
-/// the viable next token set, then you know there is most likely
-/// a missing token in the input stream.  "Insert" one by just not
-/// throwing an exception.
-///
-static pANTLR3_BITSET		
-computeCSRuleFollow	    (pANTLR3_BASE_RECOGNIZER recognizer)
-{
-    return   recognizer->combineFollows(recognizer, ANTLR3_FALSE);
-}
-
-/// Compute the current followset for the input stream.
-///
-static pANTLR3_BITSET		
-combineFollows		    (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_BOOLEAN exact)
-{
-    pANTLR3_BITSET	followSet;
-    pANTLR3_BITSET	localFollowSet;
-    ANTLR3_UINT32	top;
-    ANTLR3_UINT32	i;
-
-    top	= recognizer->state->following->size(recognizer->state->following);
-
-    followSet	    = antlr3BitsetNew(0);
-	localFollowSet	= NULL;
-
-    for (i = top; i>0; i--)
-    {
-		localFollowSet = antlr3BitsetLoad((pANTLR3_BITSET_LIST) recognizer->state->following->get(recognizer->state->following, i-1));
-
-		if  (localFollowSet != NULL)
-		{
-			followSet->borInPlace(followSet, localFollowSet);
-
-			if	(exact == ANTLR3_TRUE)
-			{
-				if	(localFollowSet->isMember(localFollowSet, ANTLR3_EOR_TOKEN_TYPE) == ANTLR3_FALSE)
-				{
-					// Only leave EOR in the set if at top (start rule); this lets us know
-					// if we have to include the follow(start rule); I.E., EOF
-					//
-					if	(i>1)
-					{
-						followSet->remove(followSet, ANTLR3_EOR_TOKEN_TYPE);
-					}
-				}
-				else
-				{
-					break;	// Cannot see End Of Rule from here, just drop out
-				}
-			}
-			localFollowSet->free(localFollowSet);
-			localFollowSet = NULL;
-		}
-    }
-
-	if	(localFollowSet != NULL)
-	{
-		localFollowSet->free(localFollowSet);
-	}
-    return  followSet;
-}
-
-/// Standard/Example error display method.
-/// No generic error message display funciton coudl possibly do everything correctly
-/// for all possible parsers. Hence you are provided with this example routine, which
-/// you should override in your parser/tree parser to do as you will.
-///
-/// Here we depart somewhat from the Java runtime as that has now split up a lot
-/// of the error display routines into spearate units. However, ther is little advantage
-/// to this in the C version as you will probably implement all such routines as a 
-/// separate translation unit, rather than install them all as pointers to functions
-/// in the base recognizer.
-///
-static void			
-displayRecognitionError	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_UINT8 * tokenNames)
-{
-	pANTLR3_PARSER			parser;
-	pANTLR3_TREE_PARSER	    tparser;
-	pANTLR3_INT_STREAM	    is;
-	pANTLR3_STRING			ttext;
-	pANTLR3_STRING			ftext;
-	pANTLR3_EXCEPTION	    ex;
-	pANTLR3_COMMON_TOKEN    theToken;
-	pANTLR3_BASE_TREE	    theBaseTree;
-	pANTLR3_COMMON_TREE	    theCommonTree;
-
-	// Retrieve some info for easy reading.
-	//
-	ex	    =		recognizer->state->exception;
-	ttext   =		NULL;
-
-	// See if there is a 'filename' we can use
-	//
-	if	(ex->streamName == NULL)
-	{
-		if	(((pANTLR3_COMMON_TOKEN)(ex->token))->type == ANTLR3_TOKEN_EOF)
-		{
-			ANTLR3_FPRINTF(stderr, "-end of input-(");
-		}
-		else
-		{
-			ANTLR3_FPRINTF(stderr, "-unknown source-(");
-		}
-	}
-	else
-	{
-		ftext = ex->streamName->to8(ex->streamName);
-		ANTLR3_FPRINTF(stderr, "%s(", ftext->chars);
-	}
-
-	// Next comes the line number
-	//
-
-	ANTLR3_FPRINTF(stderr, "%d) ", recognizer->state->exception->line);
-	ANTLR3_FPRINTF(stderr, " : error %d : %s", 
-										recognizer->state->exception->type,
-					(pANTLR3_UINT8)	   (recognizer->state->exception->message));
-
-
-	// How we determine the next piece is dependent on which thing raised the
-	// error.
-	//
-	switch	(recognizer->type)
-	{
-	case	ANTLR3_TYPE_PARSER:
-
-		// Prepare the knowledge we know we have
-		//
-		parser	    = (pANTLR3_PARSER) (recognizer->super);
-		tparser	    = NULL;
-		is			= parser->tstream->istream;
-		theToken    = (pANTLR3_COMMON_TOKEN)(recognizer->state->exception->token);
-		ttext	    = theToken->toString(theToken);
-
-		ANTLR3_FPRINTF(stderr, ", at offset %d", recognizer->state->exception->charPositionInLine);
-		if  (theToken != NULL)
-		{
-			if (theToken->type == ANTLR3_TOKEN_EOF)
-			{
-				ANTLR3_FPRINTF(stderr, ", at <EOF>");
-			}
-			else
-			{
-				// Guard against null text in a token
-				//
-				ANTLR3_FPRINTF(stderr, "\n    near %s\n    ", ttext == NULL ? (pANTLR3_UINT8)"<no text for the token>" : ttext->chars);
-			}
-		}
-		break;
-
-	case	ANTLR3_TYPE_TREE_PARSER:
-
-		tparser		= (pANTLR3_TREE_PARSER) (recognizer->super);
-		parser		= NULL;
-		is			= tparser->ctnstream->tnstream->istream;
-		theBaseTree	= (pANTLR3_BASE_TREE)(recognizer->state->exception->token);
-		ttext		= theBaseTree->toStringTree(theBaseTree);
-
-		if  (theBaseTree != NULL)
-		{
-			theCommonTree	= (pANTLR3_COMMON_TREE)	    theBaseTree->super;
-
-			if	(theCommonTree != NULL)
-			{
-				theToken	= (pANTLR3_COMMON_TOKEN)    theBaseTree->getToken(theBaseTree);
-			}
-			ANTLR3_FPRINTF(stderr, ", at offset %d", theBaseTree->getCharPositionInLine(theBaseTree));
-			ANTLR3_FPRINTF(stderr, ", near %s", ttext->chars);
-		}
-		break;
-
-	default:
-
-		ANTLR3_FPRINTF(stderr, "Base recognizer function displayRecognitionError called by unknown parser type - provide override for this function\n");
-		return;
-		break;
-	}
-
-	// Although this function should generally be provided by the implementation, this one
-	// should be as helpful as possible for grammar developers and serve as an example
-	// of what you can do with each exception type. In general, when you make up your
-	// 'real' handler, you should debug the routine with all possible errors you expect
-	// which will then let you be as specific as possible about all circumstances.
-	//
-	// Note that in the general case, errors thrown by tree parsers indicate a problem
-	// with the output of the parser or with the tree grammar itself. The job of the parser
-	// is to produce a perfect (in traversal terms) syntactically correct tree, so errors
-	// at that stage should really be semantic errors that your own code determines and handles
-	// in whatever way is appropriate.
-	//
-	switch  (ex->type)
-	{
-	case	ANTLR3_UNWANTED_TOKEN_EXCEPTION:
-
-		// Indicates that the recognizer was fed a token which seesm to be
-		// spurious input. We can detect this when the token that follows
-		// this unwanted token would normally be part of the syntactically
-		// correct stream. Then we can see that the token we are looking at
-		// is just something that should not be there and throw this exception.
-		//
-		if	(tokenNames == NULL)
-		{
-			ANTLR3_FPRINTF(stderr, " : Extraneous input...");
-		}
-		else
-		{
-			if	(ex->expecting == ANTLR3_TOKEN_EOF)
-			{
-				ANTLR3_FPRINTF(stderr, " : Extraneous input - expected <EOF>\n");
-			}
-			else
-			{
-				ANTLR3_FPRINTF(stderr, " : Extraneous input - expected %s ...\n", tokenNames[ex->expecting]);
-			}
-		}
-		break;
-
-	case	ANTLR3_MISSING_TOKEN_EXCEPTION:
-
-		// Indicates that the recognizer detected that the token we just
-		// hit would be valid syntactically if preceeded by a particular 
-		// token. Perhaps a missing ';' at line end or a missing ',' in an
-		// expression list, and such like.
-		//
-		if	(tokenNames == NULL)
-		{
-			ANTLR3_FPRINTF(stderr, " : Missing token (%d)...\n", ex->expecting);
-		}
-		else
-		{
-			if	(ex->expecting == ANTLR3_TOKEN_EOF)
-			{
-				ANTLR3_FPRINTF(stderr, " : Missing <EOF>\n");
-			}
-			else
-			{
-				ANTLR3_FPRINTF(stderr, " : Missing %s \n", tokenNames[ex->expecting]);
-			}
-		}
-		break;
-
-	case	ANTLR3_RECOGNITION_EXCEPTION:
-
-		// Indicates that the recognizer received a token
-		// in the input that was not predicted. This is the basic exception type 
-		// from which all others are derived. So we assume it was a syntax error.
-		// You may get this if there are not more tokens and more are needed
-		// to complete a parse for instance.
-		//
-		ANTLR3_FPRINTF(stderr, " : syntax error...\n");    
-		break;
-
-	case    ANTLR3_MISMATCHED_TOKEN_EXCEPTION:
-
-		// We were expecting to see one thing and got another. This is the
-		// most common error if we coudl not detect a missing or unwanted token.
-		// Here you can spend your efforts to
-		// derive more useful error messages based on the expected
-		// token set and the last token and so on. The error following
-		// bitmaps do a good job of reducing the set that we were looking
-		// for down to something small. Knowing what you are parsing may be
-		// able to allow you to be even more specific about an error.
-		//
-		if	(tokenNames == NULL)
-		{
-			ANTLR3_FPRINTF(stderr, " : syntax error...\n");
-		}
-		else
-		{
-			if	(ex->expecting == ANTLR3_TOKEN_EOF)
-			{
-				ANTLR3_FPRINTF(stderr, " : expected <EOF>\n");
-			}
-			else
-			{
-				ANTLR3_FPRINTF(stderr, " : expected %s ...\n", tokenNames[ex->expecting]);
-			}
-		}
-		break;
-
-	case	ANTLR3_NO_VIABLE_ALT_EXCEPTION:
-
-		// We could not pick any alt decision from the input given
-		// so god knows what happened - however when you examine your grammar,
-		// you should. It means that at the point where the current token occurred
-		// that the DFA indicates nowhere to go from here.
-		//
-		ANTLR3_FPRINTF(stderr, " : cannot match to any predicted input...\n");
-
-		break;
-
-	case	ANTLR3_MISMATCHED_SET_EXCEPTION:
-
-		{
-			ANTLR3_UINT32	  count;
-			ANTLR3_UINT32	  bit;
-			ANTLR3_UINT32	  size;
-			ANTLR3_UINT32	  numbits;
-			pANTLR3_BITSET	  errBits;
-
-			// This means we were able to deal with one of a set of
-			// possible tokens at this point, but we did not see any
-			// member of that set.
-			//
-			ANTLR3_FPRINTF(stderr, " : unexpected input...\n  expected one of : ");
-
-			// What tokens could we have accepted at this point in the
-			// parse?
-			//
-			count   = 0;
-			errBits = antlr3BitsetLoad		(ex->expectingSet);
-			numbits = errBits->numBits		(errBits);
-			size    = errBits->size			(errBits);
-
-			if  (size > 0)
-			{
-				// However many tokens we could have dealt with here, it is usually
-				// not useful to print ALL of the set here. I arbitrarily chose 8
-				// here, but you should do whatever makes sense for you of course.
-				// No token number 0, so look for bit 1 and on.
-				//
-				for	(bit = 1; bit < numbits && count < 8 && count < size; bit++)
-				{
-					// TODO: This doesn;t look right - should be asking if the bit is set!!
-					//
-					if  (tokenNames[bit])
-					{
-						ANTLR3_FPRINTF(stderr, "%s%s", count > 0 ? ", " : "", tokenNames[bit]); 
-						count++;
-					}
-				}
-				ANTLR3_FPRINTF(stderr, "\n");
-			}
-			else
-			{
-				ANTLR3_FPRINTF(stderr, "Actually dude, we didn't seem to be expecting anything here, or at least\n");
-				ANTLR3_FPRINTF(stderr, "I could not work out what I was expecting, like so many of us these days!\n");
-			}
-		}
-		break;
-
-	case	ANTLR3_EARLY_EXIT_EXCEPTION:
-
-		// We entered a loop requiring a number of token sequences
-		// but found a token that ended that sequence earlier than
-		// we should have done.
-		//
-		ANTLR3_FPRINTF(stderr, " : missing elements...\n");
-		break;
-
-	default:
-
-		// We don't handle any other exceptions here, but you can
-		// if you wish. If we get an exception that hits this point
-		// then we are just going to report what we know about the
-		// token.
-		//
-		ANTLR3_FPRINTF(stderr, " : syntax not recognized...\n");
-		break;
-	}
-
-	// Here you have the token that was in error which if this is
-	// the standard implementation will tell you the line and offset
-	// and also record the address of the start of the line in the
-	// input stream. You could therefore print the source line and so on.
-	// Generally though, I would expect that your lexer/parser will keep
-	// its own map of lines and source pointers or whatever as there
-	// are a lot of specific things you need to know about the input
-	// to do something like that.
-	// Here is where you do it though :-).
-	//
-}
-
-/// Return how many syntax errors were detected by this recognizer
-///
-static ANTLR3_UINT32
-getNumberOfSyntaxErrors(pANTLR3_BASE_RECOGNIZER recognizer)
-{
-	return	recognizer->state->errorCount;
-}
-
-/// Recover from an error found on the input stream.  Mostly this is
-/// NoViableAlt exceptions, but could be a mismatched token that
-/// the match() routine could not recover from.
-///
-static void			
-recover			    (pANTLR3_BASE_RECOGNIZER recognizer)
-{
-    // Used to compute the follow set of tokens
-    //
-    pANTLR3_BITSET			followSet;
-    pANTLR3_PARSER			parser;
-    pANTLR3_TREE_PARSER	    tparser;
-    pANTLR3_INT_STREAM	    is;
-
-    switch	(recognizer->type)
-    {
-		case	ANTLR3_TYPE_PARSER:
-
-		parser  = (pANTLR3_PARSER) (recognizer->super);
-		tparser	= NULL;
-		is		= parser->tstream->istream;
-
-	break;
-
-    case	ANTLR3_TYPE_TREE_PARSER:
-
-		tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-		parser	= NULL;
-		is		= tparser->ctnstream->tnstream->istream;
-
-	break;
-
-    default:
-	    
-		ANTLR3_FPRINTF(stderr, "Base recognizer function recover called by unknown parser type - provide override for this function\n");
-		return;
-
-	break;
-    }
-
-	// Are we about to repeat the same error?
-	//
-    if	(recognizer->state->lastErrorIndex == is->index(is))
-    {
-		// The last error was at the same token index point. This must be a case
-		// where LT(1) is in the recovery token set so nothing is
-		// consumed. Consume a single token so at least to prevent
-		// an infinite loop; this is a failsafe.
-		//
-		is->consume(is);
-    }
-
-    // Record error index position
-    //
-    recognizer->state->lastErrorIndex	 = is->index(is);
-    
-    // Work out the follows set for error recovery
-    //
-    followSet	= recognizer->computeErrorRecoverySet(recognizer);
-
-    // Call resync hook (for debuggers and so on)
-    //
-    recognizer->beginResync(recognizer);
-
-    // Consume tokens until we have resynced to something in the follows set
-    //
-    recognizer->consumeUntilSet(recognizer, followSet);
-
-    // End resync hook 
-    //
-    recognizer->endResync(recognizer);
-
-    // Destroy the temporary bitset we produced.
-    //
-    followSet->free(followSet);
-
-    // Reset the inError flag so we don't re-report the exception
-    //
-    recognizer->state->error	= ANTLR3_FALSE;
-    recognizer->state->failed	= ANTLR3_FALSE;
-}
-
-
-/// Attempt to recover from a single missing or extra token.
-///
-/// EXTRA TOKEN
-///
-/// LA(1) is not what we are looking for.  If LA(2) has the right token,
-/// however, then assume LA(1) is some extra spurious token.  Delete it
-/// and LA(2) as if we were doing a normal match(), which advances the
-/// input.
-///
-/// MISSING TOKEN
-///
-/// If current token is consistent with what could come after
-/// ttype then it is ok to "insert" the missing token, else throw
-/// exception For example, Input "i=(3;" is clearly missing the
-/// ')'.  When the parser returns from the nested call to expr, it
-/// will have call chain:
-///
-///    stat -> expr -> atom
-///
-/// and it will be trying to match the ')' at this point in the
-/// derivation:
-///
-///       => ID '=' '(' INT ')' ('+' atom)* ';'
-///                          ^
-/// match() will see that ';' doesn't match ')' and report a
-/// mismatched token error.  To recover, it sees that LA(1)==';'
-/// is in the set of tokens that can follow the ')' token
-/// reference in rule atom.  It can assume that you forgot the ')'.
-///
-/// The exception that was passed in, in the java implementation is
-/// sorted in the recognizer exception stack in the C version. To 'throw' it we set the
-/// error flag and rules cascade back when this is set.
-///
-static void *	
-recoverFromMismatchedToken  (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow)
-{
-	pANTLR3_PARSER			  parser;
-	pANTLR3_TREE_PARSER	      tparser;
-	pANTLR3_INT_STREAM	      is;
-	void					* matchedSymbol;
-
-
-
-	switch	(recognizer->type)
-	{
-	case	ANTLR3_TYPE_PARSER:
-
-		parser  = (pANTLR3_PARSER) (recognizer->super);
-		tparser	= NULL;
-		is	= parser->tstream->istream;
-
-		break;
-
-	case	ANTLR3_TYPE_TREE_PARSER:
-
-		tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-		parser	= NULL;
-		is	= tparser->ctnstream->tnstream->istream;
-
-		break;
-
-	default:
-
-		ANTLR3_FPRINTF(stderr, "Base recognizer function recoverFromMismatchedToken called by unknown parser type - provide override for this function\n");
-		return NULL;
-
-		break;
-	}
-
-	// Create an exception if we need one
-	//
-	if	(recognizer->state->exception == NULL)
-	{
-		antlr3RecognitionExceptionNew(recognizer);
-	}
-
-	// If the next token after the one we are looking at in the input stream
-	// is what we are looking for then we remove the one we have discovered
-	// from the stream by consuming it, then consume this next one along too as
-	// if nothing had happened.
-	//
-	if	( recognizer->mismatchIsUnwantedToken(recognizer, is, ttype) == ANTLR3_TRUE)
-	{
-		recognizer->state->exception->type		= ANTLR3_UNWANTED_TOKEN_EXCEPTION;
-		recognizer->state->exception->message	= ANTLR3_UNWANTED_TOKEN_EXCEPTION_NAME;
-
-		// Call resync hook (for debuggers and so on)
-		//
-		if	(recognizer->debugger != NULL)
-		{
-			recognizer->debugger->beginResync(recognizer->debugger);
-		}
-
-		// "delete" the extra token
-		//
-		recognizer->beginResync(recognizer);
-		is->consume(is);
-		recognizer->endResync(recognizer);
-		// End resync hook 
-		//
-		if	(recognizer->debugger != NULL)
-		{
-			recognizer->debugger->endResync(recognizer->debugger);
-		}
-
-		// Print out the error after we consume so that ANTLRWorks sees the
-		// token in the exception.
-		//
-		recognizer->reportError(recognizer);
-
-		// Return the token we are actually matching
-		//
-		matchedSymbol = recognizer->getCurrentInputSymbol(recognizer, is);
-
-		// Consume the token that the rule actually expected to get as if everything
-		// was hunky dory.
-		//
-		is->consume(is);
-
-		recognizer->state->error  = ANTLR3_FALSE;	// Exception is not outstanding any more
-
-		return	matchedSymbol;
-	}
-
-	// Single token deletion (Unwanted above) did not work
-	// so we see if we can insert a token instead by calculating which
-	// token would be missing
-	//
-	if	(mismatchIsMissingToken(recognizer, is, follow))
-	{
-		// We can fake the missing token and proceed
-		//
-		matchedSymbol = recognizer->getMissingSymbol(recognizer, is, recognizer->state->exception, ttype, follow);
-		recognizer->state->exception->type		= ANTLR3_MISSING_TOKEN_EXCEPTION;
-		recognizer->state->exception->message	= ANTLR3_MISSING_TOKEN_EXCEPTION_NAME;
-		recognizer->state->exception->token		= matchedSymbol;
-		recognizer->state->exception->expecting	= ttype;
-
-		// Print out the error after we insert so that ANTLRWorks sees the
-		// token in the exception.
-		//
-		recognizer->reportError(recognizer);
-
-		recognizer->state->error  = ANTLR3_FALSE;	// Exception is not outstanding any more
-
-		return	matchedSymbol;
-	}
-
-
-	// Neither deleting nor inserting tokens allows recovery
-	// must just report the exception.
-	//
-	recognizer->state->error	    = ANTLR3_TRUE;
-	return NULL;
-}
-
-static void *
-recoverFromMismatchedSet	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET_LIST follow)
-{
-    pANTLR3_PARSER			parser;
-    pANTLR3_TREE_PARSER	    tparser;
-    pANTLR3_INT_STREAM	    is;
-	pANTLR3_COMMON_TOKEN	matchedSymbol;
-
-    switch	(recognizer->type)
-    {
-    case	ANTLR3_TYPE_PARSER:
-
-		parser  = (pANTLR3_PARSER) (recognizer->super);
-		tparser	= NULL;
-		is	= parser->tstream->istream;
-
-	break;
-
-    case	ANTLR3_TYPE_TREE_PARSER:
-
-		tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-		parser	= NULL;
-		is	= tparser->ctnstream->tnstream->istream;
-
-	break;
-
-    default:
-	    
-		ANTLR3_FPRINTF(stderr, "Base recognizer function recoverFromMismatchedSet called by unknown parser type - provide override for this function\n");
-		return NULL;
-
-	break;
-    }
-
-	if	(recognizer->mismatchIsMissingToken(recognizer, is, follow) == ANTLR3_TRUE)
-	{
-		// We can fake the missing token and proceed
-		//
-		matchedSymbol = recognizer->getMissingSymbol(recognizer, is, recognizer->state->exception, ANTLR3_TOKEN_INVALID, follow);
-		recognizer->state->exception->type	= ANTLR3_MISSING_TOKEN_EXCEPTION;
-		recognizer->state->exception->token	= matchedSymbol;
-
-		// Print out the error after we insert so that ANTLRWorks sees the
-		// token in the exception.
-		//
-		recognizer->reportError(recognizer);
-
-		recognizer->state->error  = ANTLR3_FALSE;	// Exception is not outstanding any more
-
-		return	matchedSymbol;
-	}
-
-    // TODO - Single token deletion like in recoverFromMismatchedToken()
-    //
-    recognizer->state->error	= ANTLR3_TRUE;
-	recognizer->state->failed	= ANTLR3_TRUE;
-	return NULL;
-}
-
-/// This code is factored out from mismatched token and mismatched set
-///  recovery.  It handles "single token insertion" error recovery for
-/// both.  No tokens are consumed to recover from insertions.  Return
-/// true if recovery was possible else return false.
-///
-static ANTLR3_BOOLEAN	
-recoverFromMismatchedElement	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET_LIST followBits)
-{
-    pANTLR3_BITSET	    viableToksFollowingRule;
-    pANTLR3_BITSET	    follow;
-    pANTLR3_PARSER	    parser;
-    pANTLR3_TREE_PARSER	    tparser;
-    pANTLR3_INT_STREAM	    is;
-
-    switch	(recognizer->type)
-    {
-    case	ANTLR3_TYPE_PARSER:
-
-		parser  = (pANTLR3_PARSER) (recognizer->super);
-		tparser	= NULL;
-		is	= parser->tstream->istream;
-
-	break;
-
-    case	ANTLR3_TYPE_TREE_PARSER:
-
-		tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-		parser	= NULL;
-		is	= tparser->ctnstream->tnstream->istream;
-
-	break;
-
-    default:
-	    
-		ANTLR3_FPRINTF(stderr, "Base recognizer function recover called by unknown parser type - provide override for this function\n");
-		return ANTLR3_FALSE;
-
-	break;
-    }
-
-    follow	= antlr3BitsetLoad(followBits);
-
-    if	(follow == NULL)
-    {
-		/* The follow set is NULL, which means we don't know what can come 
-		 * next, so we "hit and hope" by just signifying that we cannot
-		 * recover, which will just cause the next token to be consumed,
-		 * which might dig us out.
-		 */
-		return	ANTLR3_FALSE;
-    }
-
-    /* We have a bitmap for the follow set, hence we can compute 
-     * what can follow this grammar element reference.
-     */
-    if	(follow->isMember(follow, ANTLR3_EOR_TOKEN_TYPE) == ANTLR3_TRUE)
-    {
-		/* First we need to know which of the available tokens are viable
-		 * to follow this reference.
-		 */
-		viableToksFollowingRule	= recognizer->computeCSRuleFollow(recognizer);
-
-		/* Remove the EOR token, which we do not wish to compute with
-		 */
-		follow->remove(follow, ANTLR3_EOR_TOKEN_TYPE);
-		viableToksFollowingRule->free(viableToksFollowingRule);
-		/* We now have the computed set of what can follow the current token
-		 */
-    }
-
-    /* We can now see if the current token works with the set of tokens
-     * that could follow the current grammar reference. If it looks like it
-     * is consistent, then we can "insert" that token by not throwing
-     * an exception and assuming that we saw it. 
-     */
-    if	( follow->isMember(follow, is->_LA(is, 1)) == ANTLR3_TRUE)
-    {
-		/* report the error, but don't cause any rules to abort and stuff
-		 */
-		recognizer->reportError(recognizer);
-		if	(follow != NULL)
-		{
-			follow->free(follow);
-		}
-		recognizer->state->error			= ANTLR3_FALSE;
-		recognizer->state->failed			= ANTLR3_FALSE;
-		return ANTLR3_TRUE;	/* Success in recovery	*/
-    }
-
-    if	(follow != NULL)
-    {
-		follow->free(follow);
-    }
-
-    /* We could not find anything viable to do, so this is going to 
-     * cause an exception.
-     */
-    return  ANTLR3_FALSE;
-}
-
-/// Eat tokens from the input stream until we get one of JUST the right type
-///
-static void		
-consumeUntil	(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 tokenType)
-{
-    ANTLR3_UINT32			ttype;
-    pANTLR3_PARSER			parser;
-    pANTLR3_TREE_PARSER	    tparser;
-    pANTLR3_INT_STREAM	    is;
-
-    switch	(recognizer->type)
-    {
-		case	ANTLR3_TYPE_PARSER:
-
-			parser  = (pANTLR3_PARSER) (recognizer->super);
-			tparser	= NULL;
-			is	= parser->tstream->istream;
-
-			break;
-
-		case	ANTLR3_TYPE_TREE_PARSER:
-
-			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-			parser	= NULL;
-			is	= tparser->ctnstream->tnstream->istream;
-
-			break;
-
-		default:
-		    
-			ANTLR3_FPRINTF(stderr, "Base recognizer function 'consumeUntil' called by unknown parser type - provide override for this function\n");
-			return;
-
-			break;
-    }
-
-    // What do have at the moment?
-    //
-    ttype	= is->_LA(is, 1);
-
-    // Start eating tokens until we get to the one we want.
-    //
-    while   (ttype != ANTLR3_TOKEN_EOF && ttype != tokenType)
-    {
-		is->consume(is);
-		ttype	= is->_LA(is, 1);
-    }
-}
-
-/// Eat tokens from the input stream until we find one that
-/// belongs to the supplied set.
-///
-static void		
-consumeUntilSet			    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET set)
-{
-    ANTLR3_UINT32	    ttype;
-    pANTLR3_PARSER	    parser;
-    pANTLR3_TREE_PARSER	    tparser;
-    pANTLR3_INT_STREAM	    is;
-
-    switch	(recognizer->type)
-    {
-		case	ANTLR3_TYPE_PARSER:
-
-			parser  = (pANTLR3_PARSER) (recognizer->super);
-			tparser	= NULL;
-			is	= parser->tstream->istream;
-
-			break;
-
-		case	ANTLR3_TYPE_TREE_PARSER:
-
-			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-			parser	= NULL;
-			is	= tparser->ctnstream->tnstream->istream;
-
-			break;
-
-		default:
-		    
-			ANTLR3_FPRINTF(stderr, "Base recognizer function 'consumeUntilSet' called by unknown parser type - provide override for this function\n");
-			return;
-
-			break;
-    }
-
-    // What do have at the moment?
-    //
-    ttype	= is->_LA(is, 1);
-
-    // Start eating tokens until we get to one we want.
-    //
-    while   (ttype != ANTLR3_TOKEN_EOF && set->isMember(set, ttype) == ANTLR3_FALSE)
-    {
-		is->consume(is);
-		ttype	= is->_LA(is, 1);
-    }
-}
-
-/** Return the rule invocation stack (how we got here in the parse.
- *  In the java version Ter just asks the JVM for all the information
- *  but in C we don't get this information, so I am going to do nothing 
- *  right now.
- */
-static pANTLR3_STACK	
-getRuleInvocationStack		    (pANTLR3_BASE_RECOGNIZER recognizer)
-{
-    return NULL;
-}
-
-static pANTLR3_STACK	
-getRuleInvocationStackNamed	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_UINT8 name)
-{
-    return NULL;
-}
-
-/** Convenience method for template rewrites - NYI.
- */
-static pANTLR3_HASH_TABLE	
-toStrings			    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_HASH_TABLE tokens)
-{
-    return NULL;
-}
-
-static	void ANTLR3_CDECL
-freeIntTrie    (void * trie)
-{
-    ((pANTLR3_INT_TRIE)trie)->free((pANTLR3_INT_TRIE)trie);
-}
-
-
-/** Pointer to a function to return whether the rule has parsed input starting at the supplied 
- *  start index before. If the rule has not parsed input starting from the supplied start index,
- *  then it will return ANTLR3_MEMO_RULE_UNKNOWN. If it has parsed from the suppled start point
- *  then it will return the point where it last stopped parsing after that start point.
- *
- * \remark
- * The rule memos are an ANTLR3_LIST of ANTLR3_LISTS, however if this becomes any kind of performance
- * issue (it probably won't, the hash tables are pretty quick) then we could make a special int only
- * version of the table.
- */
-static ANTLR3_MARKER	
-getRuleMemoization		    (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_INTKEY ruleIndex, ANTLR3_MARKER ruleParseStart)
-{
-    /* The rule memos are an ANTLR3_LIST of ANTLR3_LIST.
-     */
-    pANTLR3_INT_TRIE	ruleList;
-    ANTLR3_MARKER	stopIndex;
-    pANTLR3_TRIE_ENTRY	entry;
-
-    /* See if we have a list in the ruleMemos for this rule, and if not, then create one
-     * as we will need it eventually if we are being asked for the memo here.
-     */
-    entry	= recognizer->state->ruleMemo->get(recognizer->state->ruleMemo, (ANTLR3_INTKEY)ruleIndex);
-
-    if	(entry == NULL)
-    {
-		/* Did not find it, so create a new one for it, with a bit depth based on the 
-		 * size of the input stream. We need the bit depth to incorporate the number if
-		 * bits required to represent the largest possible stop index in the input, which is the
-		 * last character. An int stream is free to return the largest 64 bit offset if it has
-		 * no idea of the size, but you should remember that this will cause the leftmost
-		 * bit match algorithm to run to 63 bits, which will be the whole time spent in the trie ;-)
-		 */
-		ruleList    = antlr3IntTrieNew(63);	/* Depth is theoretically 64 bits, but probably not ;-)	*/
-
-		if (ruleList != NULL)
-		{
-			recognizer->state->ruleMemo->add(recognizer->state->ruleMemo, (ANTLR3_INTKEY)ruleIndex, ANTLR3_HASH_TYPE_STR, 0, ANTLR3_FUNC_PTR(ruleList), freeIntTrie);
-		}
-
-		/* We cannot have a stopIndex in a trie we have just created of course
-		 */
-		return	MEMO_RULE_UNKNOWN;
-    }
-
-    ruleList	= (pANTLR3_INT_TRIE) (entry->data.ptr);
-
-    /* See if there is a stop index associated with the supplied start index.
-     */
-    stopIndex	= 0;
-
-    entry = ruleList->get(ruleList, ruleParseStart);
-    if (entry != NULL)
-    {
-		stopIndex = (ANTLR3_MARKER)(entry->data.intVal);
-    }
-
-    if	(stopIndex == 0)
-    {
-		return MEMO_RULE_UNKNOWN;
-    }
-
-    return  stopIndex;
-}
-
-/** Has this rule already parsed input at the current index in the
- *  input stream?  Return ANTLR3_TRUE if we have and ANTLR3_FALSE
- *  if we have not.
- *
- *  This method has a side-effect: if we have seen this input for
- *  this rule and successfully parsed before, then seek ahead to
- *  1 past the stop token matched for this rule last time.
- */
-static ANTLR3_BOOLEAN	
-alreadyParsedRule		    (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_MARKER ruleIndex)
-{
-    ANTLR3_MARKER			stopIndex;
-    pANTLR3_LEXER			lexer;
-    pANTLR3_PARSER			parser;
-    pANTLR3_TREE_PARSER	    tparser;
-    pANTLR3_INT_STREAM	    is;
-
-    switch	(recognizer->type)
-    {
-		case	ANTLR3_TYPE_PARSER:
-
-			parser  = (pANTLR3_PARSER) (recognizer->super);
-			tparser	= NULL;
-			lexer	= NULL;
-			is	= parser->tstream->istream;
-
-			break;
-
-		case	ANTLR3_TYPE_TREE_PARSER:
-
-			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-			parser	= NULL;
-			lexer	= NULL;
-			is	= tparser->ctnstream->tnstream->istream;
-
-			break;
-
-		case	ANTLR3_TYPE_LEXER:
-
-			lexer	= (pANTLR3_LEXER)   (recognizer->super);
-			parser	= NULL;
-			tparser	= NULL;
-			is	= lexer->input->istream;
-			break;
-
-		default:
-		    
-			ANTLR3_FPRINTF(stderr, "Base recognizer function 'alreadyParsedRule' called by unknown parser type - provide override for this function\n");
-			return ANTLR3_FALSE;
-
-			break;
-    }
-
-    /* See if we have a memo marker for this.
-     */
-    stopIndex	    = recognizer->getRuleMemoization(recognizer, ruleIndex, is->index(is));
-
-    if	(stopIndex  == MEMO_RULE_UNKNOWN)
-    {
-		return ANTLR3_FALSE;
-    }
-
-    if	(stopIndex == MEMO_RULE_FAILED)
-    {
-		recognizer->state->failed = ANTLR3_TRUE;
-    }
-    else
-    {
-		is->seek(is, stopIndex+1);
-    }
-
-    /* If here then the rule was executed for this input already
-     */
-    return  ANTLR3_TRUE;
-}
-
-/** Record whether or not this rule parsed the input at this position
- *  successfully.
- */
-static void		
-memoize	(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_MARKER ruleIndex, ANTLR3_MARKER ruleParseStart)
-{
-    /* The rule memos are an ANTLR3_LIST of ANTLR3_LIST.
-     */
-    pANTLR3_INT_TRIE	    ruleList;
-    pANTLR3_TRIE_ENTRY	    entry;
-    ANTLR3_MARKER	    stopIndex;
-    pANTLR3_LEXER	    lexer;
-    pANTLR3_PARSER	    parser;
-    pANTLR3_TREE_PARSER	    tparser;
-    pANTLR3_INT_STREAM	    is;
-
-    switch	(recognizer->type)
-    {
-		case	ANTLR3_TYPE_PARSER:
-
-			parser  = (pANTLR3_PARSER) (recognizer->super);
-			tparser	= NULL;
-			is	= parser->tstream->istream;
-
-			break;
-
-		case	ANTLR3_TYPE_TREE_PARSER:
-
-			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-			parser	= NULL;
-			is	= tparser->ctnstream->tnstream->istream;
-
-			break;
-
-		case	ANTLR3_TYPE_LEXER:
-
-			lexer	= (pANTLR3_LEXER)   (recognizer->super);
-			parser	= NULL;
-			tparser	= NULL;
-			is		= lexer->input->istream;
-			break;
-
-		default:
-		    
-			ANTLR3_FPRINTF(stderr, "Base recognizer function consumeUntilSet called by unknown parser type - provide override for this function\n");
-			return;
-
-			break;
-    }
-    
-    stopIndex	= recognizer->state->failed == ANTLR3_TRUE ? MEMO_RULE_FAILED : is->index(is) - 1;
-
-    entry	= recognizer->state->ruleMemo->get(recognizer->state->ruleMemo, (ANTLR3_INTKEY)ruleIndex);
-
-    if	(entry != NULL)
-    {
-		ruleList = (pANTLR3_INT_TRIE)(entry->data.ptr);
-
-		/* If we don't already have this entry, append it. The memoize trie does not
-		 * accept duplicates so it won't add it if already there and we just ignore the
-		 * return code as we don't care if it is there already.
-		 */
-		ruleList->add(ruleList, ruleParseStart, ANTLR3_HASH_TYPE_INT, stopIndex, NULL, NULL);
-    }
-}
-/** A syntactic predicate.  Returns true/false depending on whether
- *  the specified grammar fragment matches the current input stream.
- *  This resets the failed instance var afterwards.
- */
-static ANTLR3_BOOLEAN	
-synpred	(pANTLR3_BASE_RECOGNIZER recognizer, void * ctx, void (*predicate)(void * ctx))
-{
-    ANTLR3_MARKER   start;
-    pANTLR3_PARSER	    parser;
-    pANTLR3_TREE_PARSER	    tparser;
-    pANTLR3_INT_STREAM	    is;
-
-    switch	(recognizer->type)
-    {
-		case	ANTLR3_TYPE_PARSER:
-
-			parser  = (pANTLR3_PARSER) (recognizer->super);
-			tparser	= NULL;
-			is	= parser->tstream->istream;
-
-			break;
-
-		case	ANTLR3_TYPE_TREE_PARSER:
-
-			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
-			parser	= NULL;
-			is	= tparser->ctnstream->tnstream->istream;
-
-			break;
-
-		default:
-		    
-			ANTLR3_FPRINTF(stderr, "Base recognizer function 'synPred' called by unknown parser type - provide override for this function\n");
-			return ANTLR3_FALSE;
-
-			break;
-    }
-
-    /* Begin backtracking so we can get back to where we started after trying out
-     * the syntactic predicate.
-     */
-    start   = is->mark(is);
-    recognizer->state->backtracking++;
-
-    /* Try the syntactical predicate
-     */
-    predicate(ctx);
-
-    /* Reset
-     */
-    is->rewind(is, start);
-    recognizer->state->backtracking--;
-
-    if	(recognizer->state->failed == ANTLR3_TRUE)
-    {
-		/* Predicate failed
-		 */
-		recognizer->state->failed = ANTLR3_FALSE;
-		return	ANTLR3_FALSE;
-    }
-    else
-    {
-		/* Predicate was successful
-		 */
-		recognizer->state->failed	= ANTLR3_FALSE;
-		return	ANTLR3_TRUE;
-    }
-}
-
-static void
-reset(pANTLR3_BASE_RECOGNIZER recognizer)
-{
-    if	(recognizer->state->following != NULL)
-    {
-		recognizer->state->following->free(recognizer->state->following);
-    }
-
-	// Reset the state flags
-	//
-	recognizer->state->errorRecovery	= ANTLR3_FALSE;
-	recognizer->state->lastErrorIndex	= -1;
-	recognizer->state->failed			= ANTLR3_FALSE;
-	recognizer->state->errorCount		= 0;
-	recognizer->state->backtracking		= 0;
-	recognizer->state->following		= NULL;
-
-	if	(recognizer->state != NULL)
-	{
-		if	(recognizer->state->ruleMemo != NULL)
-		{
-			recognizer->state->ruleMemo->free(recognizer->state->ruleMemo);
-			recognizer->state->ruleMemo = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */
-		}
-	}
-	
-
-    // Install a new following set
-    //
-    recognizer->state->following   = antlr3StackNew(8);
-
-}
-
-// Default implementation is for parser and assumes a token stream as supplied by the runtime.
-// You MAY need override this function if the standard TOKEN_STREAM is not what you are using.
-//
-static void *				
-getCurrentInputSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream)
-{
-	return ((pANTLR3_TOKEN_STREAM)istream->super)->_LT((pANTLR3_TOKEN_STREAM)istream->super, 1);
-}
-
-// Default implementation is for parser and assumes a token stream as supplied by the runtime.
-// You MAY need override this function if the standard COMMON_TOKEN_STREAM is not what you are using.
-//
-static void *				
-getMissingSymbol			(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
-									ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow)
-{
-	pANTLR3_TOKEN_STREAM			ts;
-	pANTLR3_COMMON_TOKEN_STREAM		cts;
-	pANTLR3_COMMON_TOKEN			token;
-	pANTLR3_COMMON_TOKEN			current;
-	pANTLR3_STRING					text;
-
-	// Dereference the standard pointers
-	//
-	ts		= (pANTLR3_TOKEN_STREAM)istream->super;
-	cts		= (pANTLR3_COMMON_TOKEN_STREAM)ts->super;
-	
-	// Work out what to use as the current symbol to make a line and offset etc
-	// If we are at EOF, we use the token before EOF
-	//
-	current	= ts->_LT(ts, 1);
-	if	(current->getType(current) == ANTLR3_TOKEN_EOF)
-	{
-		current = ts->_LT(ts, -1);
-	}
-
-	// Create a new empty token
-	//
-	if	(recognizer->state->tokFactory == NULL)
-	{
-		// We don't yet have a token factory for making tokens
-		// we just need a fake one using the input stream of the current
-		// token.
-		//
-		recognizer->state->tokFactory = antlr3TokenFactoryNew(current->input);
-	}
-	token	= recognizer->state->tokFactory->newToken(recognizer->state->tokFactory);
-
-	// Set some of the token properties based on the current token
-	//
-	token->setLine					(token, current->getLine(current));
-	token->setCharPositionInLine	(token, current->getCharPositionInLine(current));
-	token->setChannel				(token, ANTLR3_TOKEN_DEFAULT_CHANNEL);
-	token->setType					(token, expectedTokenType);
-    token->user1                    = current->user1;
-    token->user2                    = current->user2;
-    token->user3                    = current->user3;
-    token->custom                   = current->custom;
-    token->lineStart                = current->lineStart;
-    
-	// Create the token text that shows it has been inserted
-	//
-	token->setText8(token, (pANTLR3_UINT8)"<missing ");
-	text = token->getText(token);
-
-	if	(text != NULL)
-	{
-		text->append8(text, (const char *)recognizer->state->tokenNames[expectedTokenType]);
-		text->append8(text, (const char *)">");
-	}
-	
-	// Finally return the pointer to our new token
-	//
-	return	token;
-}
-
-
-#ifdef	ANTLR3_WINDOWS
-#pragma warning( default : 4100 )
-#endif
-
-/// @}
-///
-
diff --git a/antlr-3.4/runtime/C/src/antlr3basetree.c b/antlr-3.4/runtime/C/src/antlr3basetree.c
deleted file mode 100644
index bbc81e7..0000000
--- a/antlr-3.4/runtime/C/src/antlr3basetree.c
+++ /dev/null
@@ -1,489 +0,0 @@
-#include    <antlr3basetree.h>
-
-#ifdef	ANTLR3_WINDOWS
-#pragma warning( disable : 4100 )
-#endif
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-static void				*	getChild			(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i);
-static ANTLR3_UINT32		getChildCount		(pANTLR3_BASE_TREE tree);
-static ANTLR3_UINT32		getCharPositionInLine
-(pANTLR3_BASE_TREE tree);
-static ANTLR3_UINT32		getLine				(pANTLR3_BASE_TREE tree);
-static pANTLR3_BASE_TREE    
-getFirstChildWithType
-(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 type);
-static void					addChild			(pANTLR3_BASE_TREE tree, pANTLR3_BASE_TREE child);
-static void					addChildren			(pANTLR3_BASE_TREE tree, pANTLR3_LIST kids);
-static void					replaceChildren		(pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE t);
-
-static	void				freshenPACIndexesAll(pANTLR3_BASE_TREE tree);
-static	void				freshenPACIndexes	(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 offset);
-
-static void					setChild			(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i, void * child);
-static void				*	deleteChild			(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i);
-static void				*	dupTree				(pANTLR3_BASE_TREE tree);
-static pANTLR3_STRING		toStringTree		(pANTLR3_BASE_TREE tree);
-
-
-ANTLR3_API pANTLR3_BASE_TREE
-antlr3BaseTreeNew(pANTLR3_BASE_TREE  tree)
-{
-	/* api */
-	tree->getChild				= getChild;
-	tree->getChildCount			= getChildCount;
-	tree->addChild				= (void (*)(pANTLR3_BASE_TREE, void *))(addChild);
-	tree->addChildren			= addChildren;
-	tree->setChild				= setChild;
-	tree->deleteChild			= deleteChild;
-	tree->dupTree				= dupTree;
-	tree->toStringTree			= toStringTree;
-	tree->getCharPositionInLine	= getCharPositionInLine;
-	tree->getLine				= getLine;
-	tree->replaceChildren		= replaceChildren;
-	tree->freshenPACIndexesAll	= freshenPACIndexesAll;
-	tree->freshenPACIndexes		= freshenPACIndexes;
-	tree->getFirstChildWithType	= (void *(*)(pANTLR3_BASE_TREE, ANTLR3_UINT32))(getFirstChildWithType);
-	tree->children				= NULL;
-	tree->strFactory			= NULL;
-
-	/* Rest must be filled in by caller.
-	*/
-	return  tree;
-}
-
-static ANTLR3_UINT32	
-getCharPositionInLine	(pANTLR3_BASE_TREE tree)
-{
-	return  0;
-}
-
-static ANTLR3_UINT32	
-getLine	(pANTLR3_BASE_TREE tree)
-{
-	return  0;
-}
-static pANTLR3_BASE_TREE
-getFirstChildWithType	(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 type)
-{
-	ANTLR3_UINT32   i;
-	ANTLR3_UINT32   cs;
-
-	pANTLR3_BASE_TREE	t;
-	if	(tree->children != NULL)
-	{
-		cs	= tree->children->size(tree->children);
-		for	(i = 0; i < cs; i++)
-		{
-			t = (pANTLR3_BASE_TREE) (tree->children->get(tree->children, i));
-			if  (tree->getType(t) == type)
-			{
-				return  (pANTLR3_BASE_TREE)t;
-			}
-		}
-	}
-	return  NULL;
-}
-
-
-
-static void    *
-getChild		(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i)
-{
-	if	(      tree->children == NULL
-		|| i >= tree->children->size(tree->children))
-	{
-		return NULL;
-	}
-	return  tree->children->get(tree->children, i);
-}
-
-
-static ANTLR3_UINT32
-getChildCount	(pANTLR3_BASE_TREE tree)
-{
-	if	(tree->children == NULL)
-	{
-		return 0;
-	}
-	else
-	{
-		return	tree->children->size(tree->children);
-	}
-}
-
-void	    
-addChild (pANTLR3_BASE_TREE tree, pANTLR3_BASE_TREE child)
-{
-	ANTLR3_UINT32   n;
-	ANTLR3_UINT32   i;
-
-	if	(child == NULL)
-	{
-		return;
-	}
-
-	if	(child->isNilNode(child) == ANTLR3_TRUE)
-	{
-		if  (child->children != NULL && child->children == tree->children)
-		{
-			// TODO: Change to exception rather than ANTLR3_FPRINTF?
-			//
-			ANTLR3_FPRINTF(stderr, "ANTLR3: An attempt was made to add a child list to itself!\n");
-			return;
-		}
-
-        // Add all of the children's children to this list
-        //
-        if (child->children != NULL)
-        {
-            if (tree->children == NULL)
-            {
-                // We are build ing the tree structure here, so we need not
-                // worry about duplication of pointers as the tree node
-                // factory will only clean up each node once. So we just
-                // copy in the child's children pointer as the child is
-                // a nil node (has not root itself).
-                //
-                tree->children = child->children;
-                child->children = NULL;
-                freshenPACIndexesAll(tree);
-                
-            }
-            else
-            {
-                // Need to copy the children
-                //
-                n = child->children->size(child->children);
-
-                for (i = 0; i < n; i++)
-                {
-                    pANTLR3_BASE_TREE entry;
-                    entry = child->children->get(child->children, i);
-
-                    // ANTLR3 lists can be sparse, unlike Array Lists
-                    //
-                    if (entry != NULL)
-                    {
-                        tree->children->add(tree->children, entry, (void (ANTLR3_CDECL *) (void *))child->free);
-                    }
-                }
-            }
-		}
-	}
-	else
-	{
-		// Tree we are adding is not a Nil and might have children to copy
-		//
-		if  (tree->children == NULL)
-		{
-			// No children in the tree we are adding to, so create a new list on
-			// the fly to hold them.
-			//
-			tree->createChildrenList(tree);
-		}
-
-		tree->children->add(tree->children, child, (void (ANTLR3_CDECL *)(void *))child->free);
-		
-	}
-}
-
-/// Add all elements of the supplied list as children of this node
-///
-static void
-addChildren	(pANTLR3_BASE_TREE tree, pANTLR3_LIST kids)
-{
-	ANTLR3_UINT32    i;
-	ANTLR3_UINT32    s;
-
-	s = kids->size(kids);
-	for	(i = 0; i<s; i++)
-	{
-		tree->addChild(tree, (pANTLR3_BASE_TREE)(kids->get(kids, i+1)));
-	}
-}
-
-
-static    void
-setChild	(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i, void * child)
-{
-	if	(tree->children == NULL)
-	{
-		tree->createChildrenList(tree);
-	}
-	tree->children->set(tree->children, i, child, NULL, ANTLR3_FALSE);
-}
-
-static void    *
-deleteChild	(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i)
-{
-	if	( tree->children == NULL)
-	{
-		return	NULL;
-	}
-
-	return  tree->children->remove(tree->children, i);
-}
-
-static void    *
-dupTree		(pANTLR3_BASE_TREE tree)
-{
-	pANTLR3_BASE_TREE	newTree;
-	ANTLR3_UINT32	i;
-	ANTLR3_UINT32	s;
-
-	newTree = tree->dupNode	    (tree);
-
-	if	(tree->children != NULL)
-	{
-		s	    = tree->children->size  (tree->children);
-
-		for	(i = 0; i < s; i++)
-		{
-			pANTLR3_BASE_TREE    t;
-			pANTLR3_BASE_TREE    newNode;
-
-			t   = (pANTLR3_BASE_TREE) tree->children->get(tree->children, i);
-
-			if  (t!= NULL)
-			{
-				newNode	    = t->dupTree(t);
-				newTree->addChild(newTree, newNode);
-			}
-		}
-	}
-
-	return newTree;
-}
-
-static pANTLR3_STRING
-toStringTree	(pANTLR3_BASE_TREE tree)
-{
-	pANTLR3_STRING  string;
-	ANTLR3_UINT32   i;
-	ANTLR3_UINT32   n;
-	pANTLR3_BASE_TREE   t;
-
-	if	(tree->children == NULL || tree->children->size(tree->children) == 0)
-	{
-		return	tree->toString(tree);
-	}
-
-	/* Need a new string with nothing at all in it.
-	*/
-	string	= tree->strFactory->newRaw(tree->strFactory);
-
-	if	(tree->isNilNode(tree) == ANTLR3_FALSE)
-	{
-		string->append8	(string, "(");
-		string->appendS	(string, tree->toString(tree));
-		string->append8	(string, " ");
-	}
-	if	(tree->children != NULL)
-	{
-		n = tree->children->size(tree->children);
-
-		for	(i = 0; i < n; i++)
-		{   
-			t   = (pANTLR3_BASE_TREE) tree->children->get(tree->children, i);
-
-			if  (i > 0)
-			{
-				string->append8(string, " ");
-			}
-			string->appendS(string, t->toStringTree(t));
-		}
-	}
-	if	(tree->isNilNode(tree) == ANTLR3_FALSE)
-	{
-		string->append8(string,")");
-	}
-
-	return  string;
-}
-
-/// Delete children from start to stop and replace with t even if t is
-/// a list (nil-root tree). Num of children can increase or decrease.
-/// For huge child lists, inserting children can force walking rest of
-/// children to set their child index; could be slow.
-///
-static void					
-replaceChildren		(pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE newTree)
-{
-	ANTLR3_INT32	replacingHowMany;		// How many nodes will go away
-	ANTLR3_INT32	replacingWithHowMany;	// How many nodes will replace them
-	ANTLR3_INT32	numNewChildren;			// Tracking variable
-	ANTLR3_INT32	delta;					// Difference in new vs existing count
-
-	ANTLR3_INT32	i;
-	ANTLR3_INT32	j;
-
-	pANTLR3_VECTOR	newChildren;			// Iterator for whatever we are going to add in
-	ANTLR3_BOOLEAN	freeNewChildren;		// Whether we created the iterator locally or reused it
-
-	if	(parent->children == NULL)
-	{
-		ANTLR3_FPRINTF(stderr, "replaceChildren call: Indexes are invalid; no children in list for %s", parent->getText(parent)->chars);
-		return;
-	}
-
-	// Either use the existing list of children in the supplied nil node, or build a vector of the
-	// tree we were given if it is not a nil node, then we treat both situations exactly the same
-	//
-	if	(newTree->isNilNode(newTree))
-	{
-		newChildren = newTree->children;
-		freeNewChildren = ANTLR3_FALSE;		// We must NO free this memory
-	}
-	else
-	{
-		newChildren = antlr3VectorNew(1);
-		if	(newChildren == NULL)
-		{
-			ANTLR3_FPRINTF(stderr, "replaceChildren: out of memory!!");
-			exit(1);
-		}
-		newChildren->add(newChildren, (void *)newTree, NULL);
-
-		freeNewChildren = ANTLR3_TRUE;		// We must free this memory
-	}
-
-	// Initialize
-	//
-	replacingHowMany		= stopChildIndex - startChildIndex + 1;
-	replacingWithHowMany	= newChildren->size(newChildren);
-	delta					= replacingHowMany - replacingWithHowMany;
-	numNewChildren			= newChildren->size(newChildren);
-
-	// If it is the same number of nodes, then do a direct replacement
-	//
-	if	(delta == 0)
-	{
-		pANTLR3_BASE_TREE	child;
-
-		// Same number of nodes
-		//
-		j	= 0;
-		for	(i = startChildIndex; i <= stopChildIndex; i++)
-		{
-			child = (pANTLR3_BASE_TREE) newChildren->get(newChildren, j);
-			parent->children->set(parent->children, i, child, NULL, ANTLR3_FALSE);
-			child->setParent(child, parent);
-			child->setChildIndex(child, i);
-		}
-	}
-	else if (delta > 0)
-	{
-		ANTLR3_UINT32	indexToDelete;
-
-		// Less nodes than there were before
-		// reuse what we have then delete the rest
-		//
-		for	(j = 0; j < numNewChildren; j++)
-		{
-			parent->children->set(parent->children, startChildIndex + j, newChildren->get(newChildren, j), NULL, ANTLR3_FALSE);
-		}
-
-		// We just delete the same index position until done
-		//
-		indexToDelete = startChildIndex + numNewChildren;
-
-		for	(j = indexToDelete; j <= (ANTLR3_INT32)stopChildIndex; j++)
-		{
-			parent->children->remove(parent->children, indexToDelete);
-		}
-
-		parent->freshenPACIndexes(parent, startChildIndex);
-	}
-	else
-	{
-		ANTLR3_UINT32 numToInsert;
-
-		// More nodes than there were before
-		// Use what we can, then start adding
-		//
-		for	(j = 0; j < replacingHowMany; j++)
-		{
-			parent->children->set(parent->children, startChildIndex + j, newChildren->get(newChildren, j), NULL, ANTLR3_FALSE);
-		}
-
-		numToInsert = replacingWithHowMany - replacingHowMany;
-
-		for	(j = replacingHowMany; j < replacingWithHowMany; j++)
-		{
-			parent->children->add(parent->children, newChildren->get(newChildren, j), NULL);
-		}
-
-		parent->freshenPACIndexes(parent, startChildIndex);
-	}
-
-	if	(freeNewChildren == ANTLR3_TRUE)
-	{
-		ANTLR3_FREE(newChildren->elements);
-		newChildren->elements = NULL;
-		newChildren->size = 0;
-		ANTLR3_FREE(newChildren);		// Will not free the nodes
-	}
-}
-
-/// Set the parent and child indexes for all children of the
-/// supplied tree.
-///
-static	void
-freshenPACIndexesAll(pANTLR3_BASE_TREE tree)
-{
-	tree->freshenPACIndexes(tree, 0);
-}
-
-/// Set the parent and child indexes for some of the children of the
-/// supplied tree, starting with the child at the supplied index.
-///
-static	void
-freshenPACIndexes	(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 offset)
-{
-	ANTLR3_UINT32	count;
-	ANTLR3_UINT32	c;
-
-	count	= tree->getChildCount(tree);		// How many children do we have 
-
-	// Loop from the supplied index and set the indexes and parent
-	//
-	for	(c = offset; c < count; c++)
-	{
-		pANTLR3_BASE_TREE	child;
-
-		child = tree->getChild(tree, c);
-
-		child->setChildIndex(child, c);
-		child->setParent(child, tree);
-	}
-}
-
diff --git a/antlr-3.4/runtime/C/src/antlr3basetreeadaptor.c b/antlr-3.4/runtime/C/src/antlr3basetreeadaptor.c
deleted file mode 100644
index e35878f..0000000
--- a/antlr-3.4/runtime/C/src/antlr3basetreeadaptor.c
+++ /dev/null
@@ -1,909 +0,0 @@
-/** \file
- * Contains the base functions that all tree adaptors start with.
- * this implementation can then be overridden by any higher implementation.
- * 
- */
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3basetreeadaptor.h>
-
-#ifdef	ANTLR3_WINDOWS
-#pragma warning( disable : 4100 )
-#endif
-
-/* Interface functions
- */
-static	pANTLR3_BASE_TREE	nilNode					(pANTLR3_BASE_TREE_ADAPTOR adaptor);
-static	pANTLR3_BASE_TREE	dbgNil					(pANTLR3_BASE_TREE_ADAPTOR adaptor);
-static	pANTLR3_BASE_TREE	dupTree					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	pANTLR3_BASE_TREE	dbgDupTree				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	pANTLR3_BASE_TREE	dupTreeTT				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE parent);
-static	void				addChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE child);
-static	void				dbgAddChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE child);
-static	pANTLR3_BASE_TREE	becomeRoot				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE newRoot, pANTLR3_BASE_TREE oldRoot);
-static	pANTLR3_BASE_TREE	dbgBecomeRoot			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE newRoot, pANTLR3_BASE_TREE oldRoot);
-static	pANTLR3_BASE_TREE	rulePostProcessing		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE root);
-static	void				addChildToken			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN child);
-static	void				dbgAddChildToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN child);
-static	pANTLR3_BASE_TREE	becomeRootToken			(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * newRoot, pANTLR3_BASE_TREE oldRoot);
-static	pANTLR3_BASE_TREE	dbgBecomeRootToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * newRoot, pANTLR3_BASE_TREE oldRoot);
-static	pANTLR3_BASE_TREE	createTypeToken			(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken);
-static	pANTLR3_BASE_TREE	dbgCreateTypeToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken);
-static	pANTLR3_BASE_TREE	createTypeTokenText		(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken, pANTLR3_UINT8 text);
-static	pANTLR3_BASE_TREE	dbgCreateTypeTokenText	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken, pANTLR3_UINT8 text);
-static	pANTLR3_BASE_TREE	createTypeText			(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text);
-static	pANTLR3_BASE_TREE	dbgCreateTypeText		(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text);
-static	ANTLR3_UINT32		getType					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	void				setType					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 type);
-static	pANTLR3_STRING		getText					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	void				setText					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_STRING t);
-static	void				setText8				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_UINT8 t);
-static	pANTLR3_BASE_TREE	getChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i);
-static	ANTLR3_UINT32		getChildCount			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	ANTLR3_UINT32		getUniqueID				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	ANTLR3_BOOLEAN		isNilNode				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	pANTLR3_STRING		makeDot					(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * theTree);
-
-/** Given a pointer to a base tree adaptor structure (which is usually embedded in the
- *  super class the implements the tree adaptor used in the parse), initialize its
- *  function pointers and so on.
- */
-ANTLR3_API void
-antlr3BaseTreeAdaptorInit(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_DEBUG_EVENT_LISTENER	debugger)
-{
-	// Initialize the interface
-	//
-	if	(debugger == NULL)
-	{
-		adaptor->nilNode				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR)) 								
-																				nilNode;
-		adaptor->addChild				= (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))								
-																				addChild;
-		adaptor->becomeRoot				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))				
-																				becomeRoot;
-		adaptor->addChildToken			= (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, pANTLR3_COMMON_TOKEN))	
-																				addChildToken;
-		adaptor->becomeRootToken		= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
-																				becomeRootToken;
-		adaptor->createTypeToken		= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_COMMON_TOKEN))
-																				createTypeToken;
-		adaptor->createTypeTokenText	= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_COMMON_TOKEN, pANTLR3_UINT8))
-																				createTypeTokenText;
-		adaptor->createTypeText			= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_UINT8))
-																				createTypeText;
-		adaptor->dupTree				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))		 				
-																				dupTree;
-	}
-	else
-	{
-		adaptor->nilNode				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR))
-                                                                                dbgNil;
-		adaptor->addChild				= (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
-                                                                                dbgAddChild;
-		adaptor->becomeRoot				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
-																				dbgBecomeRoot;
-		adaptor->addChildToken			= (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, pANTLR3_COMMON_TOKEN))
-                                                                                dbgAddChildToken;
-		adaptor->becomeRootToken		= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
-                                                                                dbgBecomeRootToken;
-		adaptor->createTypeToken		= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_COMMON_TOKEN))
-                                                                                dbgCreateTypeToken;
-		adaptor->createTypeTokenText	= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_COMMON_TOKEN, pANTLR3_UINT8))
-                                                                                dbgCreateTypeTokenText;
-		adaptor->createTypeText			= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_UINT8))
-                                                                                dbgCreateTypeText;
-		adaptor->dupTree				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                                                dbgDupTree;
-		debugger->adaptor				= adaptor;
-	}
-
-	adaptor->dupTreeTT				=  (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
-                                                                                dupTreeTT;
-	adaptor->rulePostProcessing		=  (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                                                rulePostProcessing;
-	adaptor->getType				=  (ANTLR3_UINT32 (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                                                getType;
-	adaptor->setType				=  (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32))
-																				setType;
-	adaptor->getText				=  (pANTLR3_STRING (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                                                getText;
-	adaptor->setText8				=  (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_UINT8))
-																				setText8;
-	adaptor->setText				=  (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_STRING))
-                                                                                setText;
-	adaptor->getChild				=  (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32))
-                                                                                getChild;
-	adaptor->getChildCount			=  (ANTLR3_UINT32 (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                                                getChildCount;
-	adaptor->getUniqueID			=  (ANTLR3_UINT32 (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                                                getUniqueID;
-	adaptor->isNilNode				=  (ANTLR3_BOOLEAN (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                                                isNilNode;
-
-	adaptor->makeDot				=  (pANTLR3_STRING  (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
-																				makeDot;
-	
-	/* Remaining functions filled in by the caller.
-	 */
-	return;
-}
-
-static void
-defineDotNodes(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * t, pANTLR3_STRING dotSpec )
-{
-	// How many nodes are we talking about?
-	//
-	int	nCount;
-	int i;
-    pANTLR3_BASE_TREE child;
-	char	buff[64];
-	pANTLR3_STRING	text;
-	int		j;
-
-
-
-
-
-	// Count the nodes
-	//
-	nCount = adaptor->getChildCount(adaptor, t);
-
-	if	(nCount == 0)
-	{
-		// This will already have been included as a child of another node
-		// so there is nothing to add.
-		//
-		return;
-	}
-
-	// For each child of the current tree, define a node using the
-	// memory address of the node to name it
-	//
-	for	(i = 0; i<nCount; i++)
-	{
-
-		// Pick up a pointer for the child
-		//
-		child = adaptor->getChild(adaptor, t, i);
-
-		// Name the node
-		//
-		sprintf(buff, "\tn%p[label=\"", child);
-		dotSpec->append8(dotSpec, buff);
-		text = adaptor->getText(adaptor, child);
-		for (j = 0; j < (ANTLR3_INT32)(text->len); j++)
-		{
-            switch(text->charAt(text, j))
-            {
-                case '"':
-
-                    dotSpec->append8(dotSpec, "\\\"");
-                    break;
-
-                case '\n':
-
-                    dotSpec->append8(dotSpec, "\\n");
-                    break;
-
-                case '\r':
-
-                    dotSpec->append8(dotSpec, "\\r");
-                    break;
-
-                default:
-
-                    dotSpec->addc(dotSpec, text->charAt(text, j));
-                    break;
-            }
-		}
-		dotSpec->append8(dotSpec, "\"]\n");
-
-		// And now define the children of this child (if any)
-		//
-		defineDotNodes(adaptor, child, dotSpec);
-	}
-	
-	// Done
-	//
-	return;
-}
-
-static void
-defineDotEdges(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * t, pANTLR3_STRING dotSpec)
-{
-	// How many nodes are we talking about?
-	//
-	int	nCount;
-	int i;
-
-	if	(t == NULL)
-	{
-		// No tree, so do nothing
-		//
-		return;
-	}
-
-	// Count the nodes
-	//
-	nCount = adaptor->getChildCount(adaptor, t);
-
-	if	(nCount == 0)
-	{
-		// This will already have been included as a child of another node
-		// so there is nothing to add.
-		//
-		return;
-	}
-
-	// For each child, define an edge from this parent, then process
-	// and children of this child in the same way
-	//
-	for	(i=0; i<nCount; i++)
-	{
-		pANTLR3_BASE_TREE child;
-		char	buff[128];
-        pANTLR3_STRING text;
-        int                 j;
-
-		// Next child
-		//
-		child	= adaptor->getChild(adaptor, t, i);
-
-		// Create the edge relation
-		//
-		sprintf(buff, "\t\tn%p -> n%p\t\t// ",  t, child);
-        
-		dotSpec->append8(dotSpec, buff);
-
-		// Document the relationship
-		//
-        text = adaptor->getText(adaptor, t);
-		for (j = 0; j < (ANTLR3_INT32)(text->len); j++)
-        {
-                switch(text->charAt(text, j))
-                {
-                    case '"':
-
-                        dotSpec->append8(dotSpec, "\\\"");
-                        break;
-
-                    case '\n':
-
-                        dotSpec->append8(dotSpec, "\\n");
-                        break;
-
-                    case '\r':
-
-                        dotSpec->append8(dotSpec, "\\r");
-                        break;
-
-                    default:
-
-                        dotSpec->addc(dotSpec, text->charAt(text, j));
-                        break;
-                }
-        }
-
-        dotSpec->append8(dotSpec, " -> ");
-
-        text = adaptor->getText(adaptor, child);
-        for (j = 0; j < (ANTLR3_INT32)(text->len); j++)
-        {
-                switch(text->charAt(text, j))
-                {
-                    case '"':
-
-                        dotSpec->append8(dotSpec, "\\\"");
-                        break;
-
-                    case '\n':
-
-                        dotSpec->append8(dotSpec, "\\n");
-                        break;
-
-                    case '\r':
-
-                        dotSpec->append8(dotSpec, "\\r");
-                        break;
-
-                    default:
-
-                        dotSpec->addc(dotSpec, text->charAt(text, j));
-                        break;
-                }
-        }
-		dotSpec->append8(dotSpec, "\n");
-
-        
-		// Define edges for this child
-		//
-		defineDotEdges(adaptor, child, dotSpec);
-	}
-
-	// Done
-	//
-	return;
-}
-
-/// Produce a DOT specification for graphviz
-//
-static pANTLR3_STRING
-makeDot	(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * theTree)
-{
-	// The string we are building up
-	//
-	pANTLR3_STRING		dotSpec;
-	char                buff[64];
-	pANTLR3_STRING      text;
-	int                 j;
-
-	dotSpec = adaptor->strFactory->newStr8
-		
-		(
-			adaptor->strFactory,
-
-			// Default look and feel
-			//
-			(pANTLR3_UINT8)
-			"digraph {\n\n"
-			"\tordering=out;\n"
-			"\tranksep=.4;\n"
-			"\tbgcolor=\"lightgrey\";  node [shape=box, fixedsize=false, fontsize=12, fontname=\"Helvetica-bold\", fontcolor=\"blue\"\n"
-			"\twidth=.25, height=.25, color=\"black\", fillcolor=\"white\", style=\"filled, solid, bold\"];\n\n"
-			"\tedge [arrowsize=.5, color=\"black\", style=\"bold\"]\n\n"
-		);
-
-    if	(theTree == NULL)
-	{
-		// No tree, so create a blank spec
-		//
-		dotSpec->append8(dotSpec, "n0[label=\"EMPTY TREE\"]\n");
-		return dotSpec;
-	}
-
-    sprintf(buff, "\tn%p[label=\"", theTree);
-	dotSpec->append8(dotSpec, buff);
-    text = adaptor->getText(adaptor, theTree);
-    for (j = 0; j < (ANTLR3_INT32)(text->len); j++)
-    {
-            switch(text->charAt(text, j))
-            {
-                case '"':
-
-                    dotSpec->append8(dotSpec, "\\\"");
-                    break;
-
-                case '\n':
-
-                    dotSpec->append8(dotSpec, "\\n");
-                    break;
-
-                case '\r':
-
-                    dotSpec->append8(dotSpec, "\\r");
-                    break;
-
-                default:
-
-                    dotSpec->addc(dotSpec, text->charAt(text, j));
-                    break;
-            }
-    }
-	dotSpec->append8(dotSpec, "\"]\n");
-
-	// First produce the node defintions
-	//
-	defineDotNodes(adaptor, theTree, dotSpec);
-	dotSpec->append8(dotSpec, "\n");
-	defineDotEdges(adaptor, theTree, dotSpec);
-	
-	// Terminate the spec
-	//
-	dotSpec->append8(dotSpec, "\n}");
-
-	// Result
-	//
-	return dotSpec;
-}
-
-
-/** Create and return a nil tree node (no token payload)
- */
-static	pANTLR3_BASE_TREE	
-nilNode	    (pANTLR3_BASE_TREE_ADAPTOR adaptor)
-{
-	return	adaptor->create(adaptor, NULL);
-}
-
-static	pANTLR3_BASE_TREE	
-dbgNil	    (pANTLR3_BASE_TREE_ADAPTOR adaptor)
-{
-	pANTLR3_BASE_TREE t;
-
-	t = adaptor->create				(adaptor, NULL);
-	adaptor->debugger->createNode	(adaptor->debugger, t);
-
-	return	t;
-}
-
-/** Return a duplicate of the entire tree (implementation provided by the 
- *  BASE_TREE interface.)
- */
-static	pANTLR3_BASE_TREE	
-dupTree  (pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
-{
-	return	adaptor->dupTreeTT(adaptor, t, NULL);
-}
-
-pANTLR3_BASE_TREE
-dupTreeTT			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE parent)
-{
-	pANTLR3_BASE_TREE	newTree;
-	pANTLR3_BASE_TREE	child;
-	pANTLR3_BASE_TREE	newSubTree;
-	ANTLR3_UINT32		n;
-	ANTLR3_UINT32		i;
-
-	if	(t == NULL)
-	{
-		return NULL;
-	}
-	newTree = t->dupNode(t);
-
-	// Ensure new subtree root has parent/child index set
-	//
-	adaptor->setChildIndex		(adaptor, newTree, t->getChildIndex(t));
-	adaptor->setParent			(adaptor, newTree, parent);
-	n = adaptor->getChildCount	(adaptor, t);
-
-	for	(i=0; i < n; i++)
-	{
-		child = adaptor->getChild		(adaptor, t, i);
-		newSubTree = adaptor->dupTreeTT	(adaptor, child, t);
-		adaptor->addChild				(adaptor, newTree, newSubTree);
-	}
-	return	newTree;
-}
-
-/// Sends the required debugging events for duplicating a tree
-/// to the debugger.
-///
-static void
-simulateTreeConstruction(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE tree)
-{
-	ANTLR3_UINT32		n;
-	ANTLR3_UINT32		i;
-	pANTLR3_BASE_TREE	child;
-
-	// Send the create node event
-	//
-	adaptor->debugger->createNode(adaptor->debugger, tree);
-
-	n = adaptor->getChildCount(adaptor, tree);
-	for	(i = 0; i < n; i++)
-	{
-		child = adaptor->getChild(adaptor, tree, i);
-		simulateTreeConstruction(adaptor, child);
-		adaptor->debugger->addChild(adaptor->debugger, tree, child);
-	}
-}
-
-pANTLR3_BASE_TREE
-dbgDupTree		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE tree)
-{
-	pANTLR3_BASE_TREE t;
-
-	// Call the normal dup tree mechanism first
-	//
-	t = adaptor->dupTreeTT(adaptor, tree, NULL);
-
-	// In order to tell the debugger what we have just done, we now
-	// simulate the tree building mechanism. THis will fire
-	// lots of debugging events to the client and look like we
-	// duped the tree..
-	//
-	simulateTreeConstruction(adaptor, t);
-
-	return t;
-}
-
-/** Add a child to the tree t.  If child is a flat tree (a list), make all
- *  in list children of t. Warning: if t has no children, but child does
- *  and child isNilNode then it is ok to move children to t via
- *  t.children = child.children; i.e., without copying the array.  This
- *  is for construction and I'm not sure it's completely general for
- *  a tree's addChild method to work this way.  Make sure you differentiate
- *  between your tree's addChild and this parser tree construction addChild
- *  if it's not ok to move children to t with a simple assignment.
- */
-static	void	
-addChild (pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE child)
-{
-	if	(t != NULL && child != NULL)
-	{
-		t->addChild(t, child);
-	}
-}
-static	void	
-dbgAddChild (pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE child)
-{
-	if	(t != NULL && child != NULL)
-	{
-		t->addChild(t, child);
-		adaptor->debugger->addChild(adaptor->debugger, t, child);
-	}
-}
-/** Use the adaptor implementation to add a child node with the supplied token
- */
-static	void		
-addChildToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN child)
-{
-	if	(t != NULL && child != NULL)
-	{
-		adaptor->addChild(adaptor, t, adaptor->create(adaptor, child));
-	}
-}
-static	void		
-dbgAddChildToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN child)
-{
-	pANTLR3_BASE_TREE	tc;
-
-	if	(t != NULL && child != NULL)
-	{
-		tc = adaptor->create(adaptor, child);
-		adaptor->addChild(adaptor, t, tc);
-		adaptor->debugger->addChild(adaptor->debugger, t, tc);
-	}
-}
-
-/** If oldRoot is a nil root, just copy or move the children to newRoot.
- *  If not a nil root, make oldRoot a child of newRoot.
- *
- * \code
- *    old=^(nil a b c), new=r yields ^(r a b c)
- *    old=^(a b c), new=r yields ^(r ^(a b c))
- * \endcode
- *
- *  If newRoot is a nil-rooted single child tree, use the single
- *  child as the new root node.
- *
- * \code
- *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
- *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
- * \endcode
- *
- *  If oldRoot was null, it's ok, just return newRoot (even if isNilNode).
- *
- * \code
- *    old=null, new=r yields r
- *    old=null, new=^(nil r) yields ^(nil r)
- * \endcode
- *
- *  Return newRoot.  Throw an exception if newRoot is not a
- *  simple node or nil root with a single child node--it must be a root
- *  node.  If newRoot is <code>^(nil x)</endcode> return x as newRoot.
- *
- *  Be advised that it's ok for newRoot to point at oldRoot's
- *  children; i.e., you don't have to copy the list.  We are
- *  constructing these nodes so we should have this control for
- *  efficiency.
- */
-static	pANTLR3_BASE_TREE	
-becomeRoot	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE newRootTree, pANTLR3_BASE_TREE oldRootTree)
-{
-    pANTLR3_BASE_TREE saveRoot;
-
-	/* Protect against tree rewrites if we are in some sort of error
-	 * state, but have tried to recover. In C we can end up with a null pointer
-	 * for a tree that was not produced.
-	 */
-	if	(newRootTree == NULL)
-	{
-		return	oldRootTree;
-	}
-
-	/* root is just the new tree as is if there is no
-	 * current root tree.
-	 */
-	if	(oldRootTree == NULL)
-	{
-		return	newRootTree;
-	}
-
-	/* Produce ^(nil real-node)
-	 */
-	if	(newRootTree->isNilNode(newRootTree))
-	{
-		if	(newRootTree->getChildCount(newRootTree) > 1)
-		{
-			/* TODO: Handle tree exceptions 
-			 */
-			ANTLR3_FPRINTF(stderr, "More than one node as root! TODO: Create tree exception handling\n");
-			return newRootTree;
-		}
-
-		/* The new root is the first child, keep track of the original newRoot
-         * because if it was a Nil Node, then we can reuse it now.
-		 */
-        saveRoot    = newRootTree;
-		newRootTree = newRootTree->getChild(newRootTree, 0);
-
-        // Reclaim the old nilNode()
-        //
-        saveRoot->reuse(saveRoot);
-	}
-
-	/* Add old root into new root. addChild takes care of the case where oldRoot
-	 * is a flat list (nill rooted tree). All children of oldroot are added to
-	 * new root.
-	 */
-	newRootTree->addChild(newRootTree, oldRootTree);
-
-    // If the oldroot tree was a nil node, then we know at this point
-    // it has become orphaned by the rewrite logic, so we tell it to do
-    // whatever it needs to do to be reused.
-    //
-    if  (oldRootTree->isNilNode(oldRootTree))
-    {
-        // We have taken an old Root Tree and appended all its children to the new
-        // root. In addition though it was a nil node, which means the generated code
-        // will not reuse it again, so we will reclaim it here. First we want to zero out
-        // any pointers it was carrying around. We are just the baseTree handler so we
-        // don't know necessarilly know how to do this for the real node, we just ask the tree itself
-        // to do it.
-        //
-        oldRootTree->reuse(oldRootTree);
-    }
-	/* Always returns new root structure
-	 */
-	return	newRootTree;
-
-}
-static	pANTLR3_BASE_TREE	
-dbgBecomeRoot	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE newRootTree, pANTLR3_BASE_TREE oldRootTree)
-{
-	pANTLR3_BASE_TREE t;
-	
-	t = becomeRoot(adaptor, newRootTree, oldRootTree);
-
-	adaptor->debugger->becomeRoot(adaptor->debugger, newRootTree, oldRootTree);
-
-	return t;
-}
-/** Transform ^(nil x) to x 
- */
-static	pANTLR3_BASE_TREE	
-   rulePostProcessing	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE root)
-{
-    pANTLR3_BASE_TREE saveRoot;
-
-    // Keep track of the root we are given. If it is a nilNode, then we
-    // can reuse it rather than orphaning it!
-    //
-    saveRoot = root;
-
-	if (root != NULL && root->isNilNode(root))
-	{
-		if	(root->getChildCount(root) == 0)
-		{
-			root = NULL;
-		}
-		else if	(root->getChildCount(root) == 1)
-		{
-			root = root->getChild(root, 0);
-			root->setParent(root, NULL);
-			root->setChildIndex(root, -1);
-
-            // The root we were given was a nil node, wiht one child, which means it has
-            // been abandoned and would be lost in the node factory. However
-            // nodes can be flagged as resuable to prevent this terrible waste
-            //
-            saveRoot->reuse(saveRoot);
-		}
-	}
-
-	return root;
-}
- 
-/** Use the adaptor interface to set a new tree node with the supplied token
- *  to the root of the tree.
- */
-static	pANTLR3_BASE_TREE	
-   becomeRootToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * newRoot, pANTLR3_BASE_TREE oldRoot)
-{
-	return	adaptor->becomeRoot(adaptor, adaptor->create(adaptor, newRoot), oldRoot);
-}
-static	pANTLR3_BASE_TREE	
-dbgBecomeRootToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * newRoot, pANTLR3_BASE_TREE oldRoot)
-{
-	pANTLR3_BASE_TREE	t;
-
-	t =	adaptor->becomeRoot(adaptor, adaptor->create(adaptor, newRoot), oldRoot);
-
-	adaptor->debugger->becomeRoot(adaptor->debugger,t, oldRoot);
-
-	return t;
-}
-
-/** Use the super class supplied create() method to create a new node
- *  from the supplied token.
- */
-static	pANTLR3_BASE_TREE	
-createTypeToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken)
-{
-	/* Create the new token
-	 */
-	fromToken = adaptor->createTokenFromToken(adaptor, fromToken);
-
-	/* Set the type of the new token to that supplied
-	 */
-	fromToken->setType(fromToken, tokenType);
-
-	/* Return a new node based upon this token
-	 */
-	return	adaptor->create(adaptor, fromToken);
-}
-static	pANTLR3_BASE_TREE	
-dbgCreateTypeToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken)
-{
-	pANTLR3_BASE_TREE t;
-
-	t = createTypeToken(adaptor, tokenType, fromToken);
-
-	adaptor->debugger->createNode(adaptor->debugger, t);
-
-	return t;
-}
-
-static	pANTLR3_BASE_TREE	
-createTypeTokenText	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken, pANTLR3_UINT8 text)
-{
-	/* Create the new token
-	 */
-	fromToken = adaptor->createTokenFromToken(adaptor, fromToken);
-
-	/* Set the type of the new token to that supplied
-	 */
-	fromToken->setType(fromToken, tokenType);
-
-	/* Set the text of the token accordingly
-	 */
-	fromToken->setText8(fromToken, text);
-
-	/* Return a new node based upon this token
-	 */
-	return	adaptor->create(adaptor, fromToken);
-}
-static	pANTLR3_BASE_TREE	
-dbgCreateTypeTokenText	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken, pANTLR3_UINT8 text)
-{
-	pANTLR3_BASE_TREE t;
-
-	t = createTypeTokenText(adaptor, tokenType, fromToken, text);
-
-	adaptor->debugger->createNode(adaptor->debugger, t);
-
-	return t;
-}
-
-static	pANTLR3_BASE_TREE	
-   createTypeText	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text)
-{
-	pANTLR3_COMMON_TOKEN	fromToken;
-
-	/* Create the new token
-	 */
-	fromToken = adaptor->createToken(adaptor, tokenType, text);
-
-	/* Return a new node based upon this token
-	 */
-	return	adaptor->create(adaptor, fromToken);
-}
-static	pANTLR3_BASE_TREE	
-   dbgCreateTypeText	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text)
-{
-	pANTLR3_BASE_TREE t;
-
-	t = createTypeText(adaptor, tokenType, text);
-
-	adaptor->debugger->createNode(adaptor->debugger, t);
-
-	return t;
-
-}
-/** Dummy implementation - will be supplied by super class
- */
-static	ANTLR3_UINT32	
-   getType		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
-{
-	return	0;
-}
-
-/** Dummy implementation - will be supplied by super class
- */
-static	void		
-   setType		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 type)
-{
-	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement setType()\n");
-}
-
-/** Dummy implementation - will be supplied by super class
- */
-static	pANTLR3_STRING	
-   getText		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
-{
-	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement getText()\n");
-	return	NULL;
-}
-
-/** Dummy implementation - will be supplied by super class
- */
-static	void		
-   setText		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_STRING t)
-{
-	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement setText()\n");
-}
-/** Dummy implementation - will be supplied by super class
- */
-static	void		
-setText8		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_UINT8 t)
-{
-	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement setText()\n");
-}
-
-static	pANTLR3_BASE_TREE	
-   getChild		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i)
-{
-	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement getChild()\n");
-	return NULL;
-}
-
-static	ANTLR3_UINT32	
-   getChildCount	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE tree)
-{
-	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement getChildCount()\n");
-	return 0;
-}
-
-/** Returns a uniqueID for the node. Because this is the C implementation
- *  we can just use its address suitably converted/cast to an integer.
- */
-static	ANTLR3_UINT32	
-   getUniqueID		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE node)
-{
-	return	ANTLR3_UINT32_CAST(node);
-}
-
-static	ANTLR3_BOOLEAN
-isNilNode					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
-{
-	return t->isNilNode(t);
-}
diff --git a/antlr-3.4/runtime/C/src/antlr3bitset.c b/antlr-3.4/runtime/C/src/antlr3bitset.c
deleted file mode 100644
index 4e63c79..0000000
--- a/antlr-3.4/runtime/C/src/antlr3bitset.c
+++ /dev/null
@@ -1,681 +0,0 @@
-///
-/// \file
-/// Contains the C implementation of ANTLR3 bitsets as adapted from Terence Parr's
-/// Java implementation.
-///
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3bitset.h>
-
-// External interface
-//
-
-static	pANTLR3_BITSET  antlr3BitsetClone		(pANTLR3_BITSET inSet);
-static	pANTLR3_BITSET  antlr3BitsetOR			(pANTLR3_BITSET bitset1, pANTLR3_BITSET bitset2);
-static	void			antlr3BitsetORInPlace	(pANTLR3_BITSET bitset, pANTLR3_BITSET bitset2);
-static	ANTLR3_UINT32	antlr3BitsetSize		(pANTLR3_BITSET bitset);
-static	void			antlr3BitsetAdd			(pANTLR3_BITSET bitset, ANTLR3_INT32 bit);
-static	ANTLR3_BOOLEAN	antlr3BitsetEquals		(pANTLR3_BITSET bitset1, pANTLR3_BITSET bitset2);
-static	ANTLR3_BOOLEAN	antlr3BitsetMember		(pANTLR3_BITSET bitset, ANTLR3_UINT32 bit);
-static	ANTLR3_UINT32	antlr3BitsetNumBits		(pANTLR3_BITSET bitset);
-static	void			antlr3BitsetRemove		(pANTLR3_BITSET bitset, ANTLR3_UINT32 bit);
-static	ANTLR3_BOOLEAN	antlr3BitsetIsNil		(pANTLR3_BITSET bitset);
-static	pANTLR3_INT32	antlr3BitsetToIntList	(pANTLR3_BITSET bitset);
-
-// Local functions
-//
-static	void			growToInclude		(pANTLR3_BITSET bitset, ANTLR3_INT32 bit);
-static	void			grow				(pANTLR3_BITSET bitset, ANTLR3_INT32 newSize);
-static	ANTLR3_UINT64	bitMask				(ANTLR3_UINT32 bitNumber);
-static	ANTLR3_UINT32	numWordsToHold		(ANTLR3_UINT32 bit);
-static	ANTLR3_UINT32	wordNumber			(ANTLR3_UINT32 bit);
-static	void			antlr3BitsetFree	(pANTLR3_BITSET bitset);
-
-static void
-antlr3BitsetFree(pANTLR3_BITSET bitset)
-{
-    if	(bitset->blist.bits != NULL)
-    {
-		ANTLR3_FREE(bitset->blist.bits);
-		bitset->blist.bits = NULL;
-    }
-    ANTLR3_FREE(bitset);
-
-    return;
-}
-
-ANTLR3_API pANTLR3_BITSET
-antlr3BitsetNew(ANTLR3_UINT32 numBits)
-{
-	pANTLR3_BITSET  bitset;
-
-	ANTLR3_UINT32   numelements;
-
-	// Allocate memory for the bitset structure itself
-	//
-	bitset  = (pANTLR3_BITSET) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_BITSET));
-
-	if	(bitset == NULL)
-	{
-		return	NULL;
-	}
-
-	// Avoid memory thrashing at the up front expense of a few bytes
-	//
-	if	(numBits < (8 * ANTLR3_BITSET_BITS))
-	{
-		numBits = 8 * ANTLR3_BITSET_BITS;
-	}
-
-	// No we need to allocate the memory for the number of bits asked for
-	// in multiples of ANTLR3_UINT64. 
-	//
-	numelements	= ((numBits -1) >> ANTLR3_BITSET_LOG_BITS) + 1;
-
-	bitset->blist.bits    = (pANTLR3_BITWORD) ANTLR3_MALLOC((size_t)(numelements * sizeof(ANTLR3_BITWORD)));
-	memset(bitset->blist.bits, 0, (size_t)(numelements * sizeof(ANTLR3_BITWORD)));
-	bitset->blist.length  = numelements;
-
-	if	(bitset->blist.bits == NULL)
-	{
-		ANTLR3_FREE(bitset);
-		return	NULL;
-	}
-
-	antlr3BitsetSetAPI(bitset);
-
-
-	// All seems good
-	//
-	return  bitset;
-}
-
-ANTLR3_API void
-antlr3BitsetSetAPI(pANTLR3_BITSET bitset)
-{
-    bitset->clone		=    antlr3BitsetClone;
-    bitset->bor			=    antlr3BitsetOR;
-    bitset->borInPlace	=    antlr3BitsetORInPlace;
-    bitset->size		=    antlr3BitsetSize;
-    bitset->add			=    antlr3BitsetAdd;
-    bitset->grow		=    grow;
-    bitset->equals		=    antlr3BitsetEquals;
-    bitset->isMember	=    antlr3BitsetMember;
-    bitset->numBits		=    antlr3BitsetNumBits;
-    bitset->remove		=    antlr3BitsetRemove;
-    bitset->isNilNode		=    antlr3BitsetIsNil;
-    bitset->toIntList	=    antlr3BitsetToIntList;
-
-    bitset->free		=    antlr3BitsetFree;
-}
-
-ANTLR3_API pANTLR3_BITSET
-antlr3BitsetCopy(pANTLR3_BITSET_LIST blist)
-{
-    pANTLR3_BITSET  bitset;
-	int				numElements;
-
-    // Allocate memory for the bitset structure itself
-    //
-    bitset  = (pANTLR3_BITSET) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_BITSET));
-
-    if	(bitset == NULL)
-    {
-		return	NULL;
-    }
-
-	numElements = blist->length;
-
-    // Avoid memory thrashing at the expense of a few more bytes
-    //
-    if	(numElements < 8)
-    {
-		numElements = 8;
-    }
-
-    // Install the length in ANTLR3_UINT64 units
-    //
-    bitset->blist.length  = numElements;
-
-    bitset->blist.bits    = (pANTLR3_BITWORD)ANTLR3_MALLOC((size_t)(numElements * sizeof(ANTLR3_BITWORD)));
-
-    if	(bitset->blist.bits == NULL)
-    {
-		ANTLR3_FREE(bitset);
-		return	NULL;
-    }
-
-	ANTLR3_MEMCPY(bitset->blist.bits, blist->bits, (ANTLR3_UINT64)(numElements * sizeof(ANTLR3_BITWORD)));
-
-    // All seems good
-    //
-    return  bitset;
-}
-
-static pANTLR3_BITSET
-antlr3BitsetClone(pANTLR3_BITSET inSet)
-{
-    pANTLR3_BITSET  bitset;
-
-    // Allocate memory for the bitset structure itself
-    //
-    bitset  = antlr3BitsetNew(ANTLR3_BITSET_BITS * inSet->blist.length);
-
-    if	(bitset == NULL)
-    {
-		return	NULL;
-    }
-
-    // Install the actual bits in the source set
-    //
-    ANTLR3_MEMCPY(bitset->blist.bits, inSet->blist.bits, (ANTLR3_UINT64)(inSet->blist.length * sizeof(ANTLR3_BITWORD)));
-
-    // All seems good
-    //
-    return  bitset;
-}
-
-
-ANTLR3_API pANTLR3_BITSET
-antlr3BitsetList(pANTLR3_HASH_TABLE list)
-{
-    pANTLR3_BITSET		bitSet;
-    pANTLR3_HASH_ENUM	en;
-    pANTLR3_HASH_KEY	key;
-    ANTLR3_UINT64		bit;
-
-    // We have no idea what exactly is in the list
-    // so create a default bitset and then just add stuff
-    // as we enumerate.
-    //
-    bitSet  = antlr3BitsetNew(0);
-
-    en		= antlr3EnumNew(list);
-
-    while   (en->next(en, &key, (void **)(&bit)) == ANTLR3_SUCCESS)
-    {
-		bitSet->add(bitSet, (ANTLR3_UINT32)bit);
-    }
-    en->free(en);
-
-    return NULL;
-}
-
-///
-/// \brief
-/// Creates a new bitset with at least one 64 bit bset of bits, but as
-/// many 64 bit sets as are required.
-///
-/// \param[in] bset
-/// A variable number of bits to add to the set, ending in -1 (impossible bit).
-/// 
-/// \returns
-/// A new bit set with all of the specified bitmaps in it and the API
-/// initialized.
-/// 
-/// Call as:
-///  - pANTLR3_BITSET = antlrBitsetLoad(bset, bset11, ..., -1);
-///  - pANTLR3_BITSET = antlrBitsetOf(-1);  Create empty bitset 
-///
-/// \remarks
-/// Stdargs function - must supply -1 as last paremeter, which is NOT
-/// added to the set.
-/// 
-///
-ANTLR3_API pANTLR3_BITSET
-antlr3BitsetLoad(pANTLR3_BITSET_LIST inBits)
-{
-	pANTLR3_BITSET  bitset;
-	ANTLR3_UINT32  count;
-
-	// Allocate memory for the bitset structure itself
-	// the input parameter is the bit number (0 based)
-	// to include in the bitset, so we need at at least
-	// bit + 1 bits. If any arguments indicate a 
-	// a bit higher than the default number of bits (0 means default size)
-	// then Add() will take care
-	// of it.
-	//
-	bitset  = antlr3BitsetNew(0);
-
-	if	(bitset == NULL)
-	{
-		return	NULL;
-	}
-
-	if	(inBits != NULL)
-	{
-		// Now we can add the element bits into the set
-		//
-		count=0;
-		while (count < inBits->length)
-		{
-			if  (bitset->blist.length <= count)
-			{
-				bitset->grow(bitset, count+1);
-			}
-
-			bitset->blist.bits[count] = *((inBits->bits)+count);
-			count++;
-		}
-	}
-
-	// return the new bitset
-	//
-	return  bitset;
-}
-
-///
-/// \brief
-/// Creates a new bitset with at least one element, but as
-/// many elements are required.
-/// 
-/// \param[in] bit
-/// A variable number of bits to add to the set, ending in -1 (impossible bit).
-/// 
-/// \returns
-/// A new bit set with all of the specified elements added into it.
-/// 
-/// Call as:
-///  - pANTLR3_BITSET = antlrBitsetOf(n, n1, n2, -1);
-///  - pANTLR3_BITSET = antlrBitsetOf(-1);  Create empty bitset 
-///
-/// \remarks
-/// Stdargs function - must supply -1 as last paremeter, which is NOT
-/// added to the set.
-/// 
-///
-ANTLR3_API pANTLR3_BITSET
-antlr3BitsetOf(ANTLR3_INT32 bit, ...)
-{
-    pANTLR3_BITSET  bitset;
-
-    va_list ap;
-
-    // Allocate memory for the bitset structure itself
-    // the input parameter is the bit number (0 based)
-    // to include in the bitset, so we need at at least
-    // bit + 1 bits. If any arguments indicate a 
-    // a bit higher than the default number of bits (0 menas default size)
-    // then Add() will take care
-    // of it.
-    //
-    bitset  = antlr3BitsetNew(0);
-
-    if	(bitset == NULL)
-    {
-		return	NULL;
-    }
-
-    // Now we can add the element bits into the set
-    //
-    va_start(ap, bit);
-    while   (bit != -1)
-    {
-		antlr3BitsetAdd(bitset, bit);
-		bit = va_arg(ap, ANTLR3_UINT32);
-    }
-    va_end(ap);
-
-    // return the new bitset
-    //
-    return  bitset;
-}
-
-static pANTLR3_BITSET
-antlr3BitsetOR(pANTLR3_BITSET bitset1, pANTLR3_BITSET bitset2)
-{
-    pANTLR3_BITSET  bitset;
-
-    if	(bitset1 == NULL)
-    {
-		return antlr3BitsetClone(bitset2);
-    }
-
-    if	(bitset2 == NULL)
-    {
-		return	antlr3BitsetClone(bitset1);
-    }
-
-    // Allocate memory for the newly ordered bitset structure itself.
-    //
-    bitset  = antlr3BitsetClone(bitset1);
-    
-    antlr3BitsetORInPlace(bitset, bitset2);
-
-    return  bitset;
-
-}
-
-static void
-antlr3BitsetAdd(pANTLR3_BITSET bitset, ANTLR3_INT32 bit)
-{
-    ANTLR3_UINT32   word;
-
-    word    = wordNumber(bit);
-
-    if	(word	>= bitset->blist.length)
-    {
-		growToInclude(bitset, bit);
-    }
-
-    bitset->blist.bits[word] |= bitMask(bit);
-
-}
-
-static void
-grow(pANTLR3_BITSET bitset, ANTLR3_INT32 newSize)
-{
-    pANTLR3_BITWORD   newBits;
-
-    // Space for newly sized bitset - TODO: come back to this and use realloc?, it may
-    // be more efficient...
-    //
-    newBits = (pANTLR3_BITWORD) ANTLR3_CALLOC(1, (size_t)(newSize * sizeof(ANTLR3_BITWORD)));
-    if	(bitset->blist.bits != NULL)
-    {
-		// Copy existing bits
-		//
-		ANTLR3_MEMCPY((void *)newBits, (const void *)bitset->blist.bits, (size_t)(bitset->blist.length * sizeof(ANTLR3_BITWORD)));
-
-		// Out with the old bits... de de de derrr
-		//
-		ANTLR3_FREE(bitset->blist.bits);
-    }
-
-    // In with the new bits... keerrrang.
-    //
-    bitset->blist.bits      = newBits;
-    bitset->blist.length    = newSize;
-}
-
-static void
-growToInclude(pANTLR3_BITSET bitset, ANTLR3_INT32 bit)
-{
-	ANTLR3_UINT32	bl;
-	ANTLR3_UINT32	nw;
-
-	bl = (bitset->blist.length << 1);
-	nw = numWordsToHold(bit);
-
-	if	(bl > nw)
-	{
-		bitset->grow(bitset, bl);
-	}
-	else
-	{
-		bitset->grow(bitset, nw);
-	}
-}
-
-static void
-antlr3BitsetORInPlace(pANTLR3_BITSET bitset, pANTLR3_BITSET bitset2)
-{
-    ANTLR3_UINT32   minimum;
-    ANTLR3_UINT32   i;
-
-    if	(bitset2 == NULL)
-    {
-		return;
-    }
-
-
-    // First make sure that the target bitset is big enough
-    // for the new bits to be ored in.
-    //
-    if	(bitset->blist.length < bitset2->blist.length)
-    {
-		growToInclude(bitset, (bitset2->blist.length * sizeof(ANTLR3_BITWORD)));
-    }
-    
-    // Or the miniimum number of bits after any resizing went on
-    //
-    if	(bitset->blist.length < bitset2->blist.length)
-	{
-		minimum = bitset->blist.length;
-	}
-	else
-	{
-		minimum = bitset2->blist.length;
-	}
-
-    for	(i = minimum; i > 0; i--)
-    {
-		bitset->blist.bits[i-1] |= bitset2->blist.bits[i-1];
-    }
-}
-
-static ANTLR3_UINT64
-bitMask(ANTLR3_UINT32 bitNumber)
-{
-    return  ((ANTLR3_UINT64)1) << (bitNumber & (ANTLR3_BITSET_MOD_MASK));
-}
-
-static ANTLR3_UINT32
-antlr3BitsetSize(pANTLR3_BITSET bitset)
-{
-    ANTLR3_UINT32   degree;
-    ANTLR3_INT32   i;
-    ANTLR3_INT8    bit;
-    
-    // TODO: Come back to this, it may be faster to & with 0x01
-    // then shift right a copy of the 4 bits, than shift left a constant of 1.
-    // But then again, the optimizer might just work this out
-    // anyway.
-    //
-    degree  = 0;
-    for	(i = bitset->blist.length - 1; i>= 0; i--)
-    {
-		if  (bitset->blist.bits[i] != 0)
-		{
-			for	(bit = ANTLR3_BITSET_BITS - 1; bit >= 0; bit--)
-			{
-				if  ((bitset->blist.bits[i] & (((ANTLR3_BITWORD)1) << bit)) != 0)
-				{
-					degree++;
-				}
-			}
-		}
-    }
-    return degree;
-}
-
-static ANTLR3_BOOLEAN
-antlr3BitsetEquals(pANTLR3_BITSET bitset1, pANTLR3_BITSET bitset2)
-{
-    ANTLR3_INT32   minimum;
-    ANTLR3_INT32   i;
-
-    if	(bitset1 == NULL || bitset2 == NULL)
-    {
-	return	ANTLR3_FALSE;
-    }
-
-    // Work out the minimum comparison set
-    //
-    if	(bitset1->blist.length < bitset2->blist.length)
-    {
-		minimum = bitset1->blist.length;
-    }
-    else
-    {
-		minimum = bitset2->blist.length;
-    }
-
-    // Make sure explict in common bits are equal
-    //
-    for	(i = minimum - 1; i >=0 ; i--)
-    {
-		if  (bitset1->blist.bits[i] != bitset2->blist.bits[i])
-		{
-			return  ANTLR3_FALSE;
-		}
-    }
-
-    // Now make sure the bits of the larger set are all turned
-    // off.
-    //
-    if	(bitset1->blist.length > (ANTLR3_UINT32)minimum)
-    {
-		for (i = minimum ; (ANTLR3_UINT32)i < bitset1->blist.length; i++)
-		{
-			if	(bitset1->blist.bits[i] != 0)
-			{
-				return	ANTLR3_FALSE;
-			}
-		}
-    }
-    else if (bitset2->blist.length > (ANTLR3_UINT32)minimum)
-    {
-		for (i = minimum; (ANTLR3_UINT32)i < bitset2->blist.length; i++)
-		{
-			if	(bitset2->blist.bits[i] != 0)
-			{
-				return	ANTLR3_FALSE;
-			}
-		}
-    }
-
-    return  ANTLR3_TRUE;
-}
-
-static ANTLR3_BOOLEAN
-antlr3BitsetMember(pANTLR3_BITSET bitset, ANTLR3_UINT32 bit)
-{
-    ANTLR3_UINT32    wordNo;
-
-    wordNo  = wordNumber(bit);
-
-    if	(wordNo >= bitset->blist.length)
-    {
-		return	ANTLR3_FALSE;
-    }
-    
-    if	((bitset->blist.bits[wordNo] & bitMask(bit)) == 0)
-    {
-		return	ANTLR3_FALSE;
-    }
-    else
-    {
-		return	ANTLR3_TRUE;
-    }
-}
-
-static void
-antlr3BitsetRemove(pANTLR3_BITSET bitset, ANTLR3_UINT32 bit)
-{
-    ANTLR3_UINT32    wordNo;
-
-    wordNo  = wordNumber(bit);
-
-    if	(wordNo < bitset->blist.length)
-    {
-		bitset->blist.bits[wordNo] &= ~(bitMask(bit));
-    }
-}
-static ANTLR3_BOOLEAN
-antlr3BitsetIsNil(pANTLR3_BITSET bitset)
-{
-   ANTLR3_INT32    i;
-
-   for	(i = bitset->blist.length -1; i>= 0; i--)
-   {
-       if   (bitset->blist.bits[i] != 0)
-       {
-			return ANTLR3_FALSE;
-       }
-   }
-   
-   return   ANTLR3_TRUE;
-}
-
-static ANTLR3_UINT32
-numWordsToHold(ANTLR3_UINT32 bit)
-{
-    return  (bit >> ANTLR3_BITSET_LOG_BITS) + 1;
-}
-
-static	ANTLR3_UINT32
-wordNumber(ANTLR3_UINT32 bit)
-{
-    return  bit >> ANTLR3_BITSET_LOG_BITS;
-}
-
-static ANTLR3_UINT32
-antlr3BitsetNumBits(pANTLR3_BITSET bitset)
-{
-    return  bitset->blist.length << ANTLR3_BITSET_LOG_BITS;
-}
-
-/** Produce an integer list of all the bits that are turned on
- *  in this bitset. Used for error processing in the main as the bitset
- *  reresents a number of integer tokens which we use for follow sets
- *  and so on.
- *
- *  The first entry is the number of elements following in the list.
- */
-static	pANTLR3_INT32	
-antlr3BitsetToIntList	(pANTLR3_BITSET bitset)
-{
-    ANTLR3_UINT32   numInts;	    // How many integers we will need
-    ANTLR3_UINT32   numBits;	    // How many bits are in the set
-    ANTLR3_UINT32   i;
-    ANTLR3_UINT32   index;
-
-    pANTLR3_INT32  intList;
-
-    numInts = bitset->size(bitset) + 1;
-    numBits = bitset->numBits(bitset);
- 
-    intList = (pANTLR3_INT32)ANTLR3_MALLOC(numInts * sizeof(ANTLR3_INT32));
-
-    if	(intList == NULL)
-    {
-		return NULL;	// Out of memory
-    }
-
-    intList[0] = numInts;
-
-    // Enumerate the bits that are turned on
-    //
-    for	(i = 0, index = 1; i<numBits; i++)
-    {
-		if  (bitset->isMember(bitset, i) == ANTLR3_TRUE)
-		{
-			intList[index++]    = i;
-		}
-    }
-
-    // Result set
-    //
-    return  intList;
-}
-
diff --git a/antlr-3.4/runtime/C/src/antlr3collections.c b/antlr-3.4/runtime/C/src/antlr3collections.c
deleted file mode 100644
index d9e22e9..0000000
--- a/antlr-3.4/runtime/C/src/antlr3collections.c
+++ /dev/null
@@ -1,2741 +0,0 @@
-/// \file
-/// Provides a number of useful functions that are roughly equivalent
-/// to java HashTable and List for the purposes of Antlr 3 C runtime.
-/// Also useable by the C programmer for things like symbol tables pointers
-/// and so on.
-///
-///
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3.h>
-
-#include "antlr3collections.h"
-
-// Interface functions for hash table
-//
-
-// String based keys
-//
-static void					antlr3HashDelete    (pANTLR3_HASH_TABLE table, void * key);
-static void *				antlr3HashGet	(pANTLR3_HASH_TABLE table, void * key);
-static pANTLR3_HASH_ENTRY   antlr3HashRemove    (pANTLR3_HASH_TABLE table, void * key);
-static ANTLR3_INT32			antlr3HashPut	(pANTLR3_HASH_TABLE table, void * key, void * element, void (ANTLR3_CDECL *freeptr)(void *));
-
-// Integer based keys (Lists and so on)
-//
-static void					antlr3HashDeleteI   (pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key);
-static void *				antlr3HashGetI	(pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key);
-static pANTLR3_HASH_ENTRY   antlr3HashRemoveI   (pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key);
-static ANTLR3_INT32			antlr3HashPutI	(pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key, void * element, void (ANTLR3_CDECL *freeptr)(void *));
-
-static void					antlr3HashFree	(pANTLR3_HASH_TABLE table);
-static ANTLR3_UINT32	    antlr3HashSize	(pANTLR3_HASH_TABLE table);
-
-// -----------
-
-// Interface functions for enumeration
-//
-static int	    antlr3EnumNext	    (pANTLR3_HASH_ENUM en, pANTLR3_HASH_KEY * key, void ** data);
-static void	    antlr3EnumFree	    (pANTLR3_HASH_ENUM en);
-
-// Interface functions for List
-//
-static void				antlr3ListFree	(pANTLR3_LIST list);
-static void				antlr3ListDelete(pANTLR3_LIST list, ANTLR3_INTKEY key);
-static void *			antlr3ListGet	(pANTLR3_LIST list, ANTLR3_INTKEY key);
-static ANTLR3_INT32		antlr3ListPut	(pANTLR3_LIST list, ANTLR3_INTKEY key, void * element, void (ANTLR3_CDECL *freeptr)(void *));
-static ANTLR3_INT32		antlr3ListAdd   (pANTLR3_LIST list, void * element, void (ANTLR3_CDECL *freeptr)(void *));
-static void *			antlr3ListRemove(pANTLR3_LIST list, ANTLR3_INTKEY key);
-static ANTLR3_UINT32	antlr3ListSize	(pANTLR3_LIST list);
-
-// Interface functions for Stack
-//
-static void				antlr3StackFree	(pANTLR3_STACK  stack);
-static void *			antlr3StackPop	(pANTLR3_STACK	stack);
-static void *			antlr3StackGet	(pANTLR3_STACK	stack, ANTLR3_INTKEY key);
-static ANTLR3_BOOLEAN	antlr3StackPush	(pANTLR3_STACK	stack, void * element, void (ANTLR3_CDECL *freeptr)(void *));
-static ANTLR3_UINT32	antlr3StackSize	(pANTLR3_STACK	stack);
-static void *			antlr3StackPeek	(pANTLR3_STACK	stack);
-
-// Interface functions for vectors
-//
-static	void ANTLR3_CDECL	antlr3VectorFree	(pANTLR3_VECTOR vector);
-static	void				antlr3VectorDel		(pANTLR3_VECTOR vector, ANTLR3_UINT32 entry);
-static	void *				antlr3VectorGet		(pANTLR3_VECTOR vector, ANTLR3_UINT32 entry);
-static	void *				antrl3VectorRemove	(pANTLR3_VECTOR vector, ANTLR3_UINT32 entry);
-static	void				antlr3VectorClear	(pANTLR3_VECTOR vector);
-static	ANTLR3_UINT32		antlr3VectorAdd		(pANTLR3_VECTOR vector, void * element, void (ANTLR3_CDECL *freeptr)(void *));
-static	ANTLR3_UINT32		antlr3VectorSet		(pANTLR3_VECTOR vector, ANTLR3_UINT32 entry, void * element, void (ANTLR3_CDECL *freeptr)(void *), ANTLR3_BOOLEAN freeExisting);
-static	ANTLR3_UINT32		antlr3VectorSize    (pANTLR3_VECTOR vector);
-static	ANTLR3_BOOLEAN      antlr3VectorSwap	(pANTLR3_VECTOR vector, ANTLR3_UINT32 entry1, ANTLR3_UINT32 entry2);
-
-static  void                newPool             (pANTLR3_VECTOR_FACTORY factory);
-static  void				closeVectorFactory  (pANTLR3_VECTOR_FACTORY factory);
-static	pANTLR3_VECTOR		newVector			(pANTLR3_VECTOR_FACTORY factory);
-static	void				returnVector		(pANTLR3_VECTOR_FACTORY factory, pANTLR3_VECTOR vector);
-
-
-// Interface functions for int TRIE
-//
-static	pANTLR3_TRIE_ENTRY	intTrieGet		(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key);
-static	ANTLR3_BOOLEAN		intTrieDel		(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key);
-static	ANTLR3_BOOLEAN		intTrieAdd		(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key, ANTLR3_UINT32 type, ANTLR3_INTKEY intType, void * data, void (ANTLR3_CDECL *freeptr)(void *));
-static	void				intTrieFree		(pANTLR3_INT_TRIE trie);
-
-
-// Interface functions for topological sorter
-//
-static  void            addEdge          (pANTLR3_TOPO topo, ANTLR3_UINT32 edge, ANTLR3_UINT32 dependency);
-static  pANTLR3_UINT32  sortToArray      (pANTLR3_TOPO topo);
-static  void            sortVector       (pANTLR3_TOPO topo, pANTLR3_VECTOR v);
-static  void            freeTopo         (pANTLR3_TOPO topo);
-
-// Local function to advance enumeration structure pointers
-//
-static void antlr3EnumNextEntry(pANTLR3_HASH_ENUM en);
-
-pANTLR3_HASH_TABLE
-antlr3HashTableNew(ANTLR3_UINT32 sizeHint)
-{
-	// All we have to do is create the hashtable tracking structure
-	// and allocate memory for the requested number of buckets.
-	//
-	pANTLR3_HASH_TABLE	table;
-
-	ANTLR3_UINT32	bucket;	// Used to traverse the buckets
-
-	table   = ANTLR3_MALLOC(sizeof(ANTLR3_HASH_TABLE));
-
-	// Error out if no memory left
-	if	(table	== NULL)
-	{
-		return	NULL;
-	}
-
-	// Allocate memory for the buckets
-	//
-	table->buckets = (pANTLR3_HASH_BUCKET) ANTLR3_MALLOC((size_t) (sizeof(ANTLR3_HASH_BUCKET) * sizeHint)); 
-
-	if	(table->buckets == NULL)
-	{
-		ANTLR3_FREE((void *)table);
-		return	NULL;
-	}
-
-	// Modulo of the table, (bucket count).
-	//
-	table->modulo   = sizeHint;
-
-	table->count    = 0;	    /* Nothing in there yet ( I hope)	*/
-
-	/* Initialize the buckets to empty
-	*/
-	for	(bucket = 0; bucket < sizeHint; bucket++)
-	{
-		table->buckets[bucket].entries = NULL;
-	}
-
-	/* Exclude duplicate entries by default
-	*/
-	table->allowDups	= ANTLR3_FALSE;
-
-    /* Assume that keys should by strduped before they are
-     * entered in the table.
-     */
-    table->doStrdup     = ANTLR3_TRUE;
-
-	/* Install the interface
-	*/
-
-	table->get		=  antlr3HashGet;
-	table->put		=  antlr3HashPut;
-	table->del		=  antlr3HashDelete;
-	table->remove	=  antlr3HashRemove;
-
-	table->getI		=  antlr3HashGetI;
-	table->putI		=  antlr3HashPutI;
-	table->delI		=  antlr3HashDeleteI;
-	table->removeI	=  antlr3HashRemoveI;
-
-	table->size		=  antlr3HashSize;
-	table->free		=  antlr3HashFree;
-
-	return  table;
-}
-
-static void
-antlr3HashFree(pANTLR3_HASH_TABLE table)
-{
-    ANTLR3_UINT32	bucket;	/* Used to traverse the buckets	*/
-
-    pANTLR3_HASH_BUCKET	thisBucket;
-    pANTLR3_HASH_ENTRY	entry;
-    pANTLR3_HASH_ENTRY	nextEntry;
-
-    /* Free the table, all buckets and all entries, and all the
-     * keys and data (if the table exists)
-     */
-    if	(table	!= NULL)
-    {
-	for	(bucket = 0; bucket < table->modulo; bucket++)
-	{
-	    thisBucket	= &(table->buckets[bucket]);
-
-	    /* Allow sparse tables, though we don't create them as such at present
-	     */
-	    if	( thisBucket != NULL)
-	    {
-		entry	= thisBucket->entries;
-
-		/* Search all entries in the bucket and free them up
-		 */
-		while	(entry != NULL)
-		{
-		    /* Save next entry - we do not want to access memory in entry after we
-		     * have freed it.
-		     */
-		    nextEntry	= entry->nextEntry;
-
-		    /* Free any data pointer, this only happens if the user supplied
-		     * a pointer to a routine that knwos how to free the structure they
-		     * added to the table.
-		     */
-		    if	(entry->free != NULL)
-		    {
-			entry->free(entry->data);
-		    }
-
-		    /* Free the key memory - we know that we allocated this
-		     */
-		    if	(entry->keybase.type == ANTLR3_HASH_TYPE_STR && entry->keybase.key.sKey != NULL)
-		    {
-			ANTLR3_FREE(entry->keybase.key.sKey);
-		    }
-
-		    /* Free this entry
-		     */
-		    ANTLR3_FREE(entry);
-		    entry   = nextEntry;    /* Load next pointer to see if we shoud free it */
-		}
-		/* Invalidate the current pointer
-		 */
-		thisBucket->entries = NULL;
-	    }
-	}
-
-	/* Now we can free the bucket memory
-	 */
-	ANTLR3_FREE(table->buckets);
-    }
-
-    /* Now we free teh memory for the table itself
-     */
-    ANTLR3_FREE(table);
-}
-
-/** return the current size of the hash table
- */
-static ANTLR3_UINT32	antlr3HashSize	    (pANTLR3_HASH_TABLE table)
-{
-    return  table->count;
-}
-
-/** Remove a numeric keyed entry from a hash table if it exists,
- *  no error if it does not exist.
- */
-static pANTLR3_HASH_ENTRY   antlr3HashRemoveI   (pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key)
-{
-    ANTLR3_UINT32	    hash;
-    pANTLR3_HASH_BUCKET	    bucket;
-    pANTLR3_HASH_ENTRY	    entry;
-    pANTLR3_HASH_ENTRY	    * nextPointer;
-
-    /* First we need to know the hash of the provided key
-     */
-    hash    = (ANTLR3_UINT32)(key % (ANTLR3_INTKEY)(table->modulo));
-
-    /* Knowing the hash, we can find the bucket
-     */
-    bucket  = table->buckets + hash;
-
-    /* Now, we traverse the entries in the bucket until
-     * we find the key or the end of the entries in the bucket. 
-     * We track the element prior to the one we are examining
-     * as we need to set its next pointer to the next pointer
-     * of the entry we are deleting (if we find it).
-     */
-    entry	    =   bucket->entries;    /* Entry to examine					    */
-    nextPointer	    = & bucket->entries;    /* Where to put the next pointer of the deleted entry   */
-
-    while   (entry != NULL)
-    {
-	/* See if this is the entry we wish to delete
-	 */
-	if  (entry->keybase.key.iKey == key)
-	{
-	    /* It was the correct entry, so we set the next pointer
-	     * of the previous entry to the next pointer of this
-	     * located one, which takes it out of the chain.
-	     */
-	    (*nextPointer)		= entry->nextEntry;
-
-	    table->count--;
-
-	    return entry;
-	}
-	else
-	{
-	    /* We found an entry but it wasn't the one that was wanted, so
-	     * move to the next one, if any.
-	     */
-	    nextPointer	= & (entry->nextEntry);	    /* Address of the next pointer in the current entry	    */
-	    entry	= entry->nextEntry;	    /* Address of the next element in the bucket (if any)   */
-	}
-    }
-
-    return NULL;  /* Not found */
-}
-
-/** Remove the element in the hash table for a particular
- *  key value, if it exists - no error if it does not.
- */
-static pANTLR3_HASH_ENTRY
-antlr3HashRemove(pANTLR3_HASH_TABLE table, void * key)
-{
-    ANTLR3_UINT32	    hash;
-    pANTLR3_HASH_BUCKET	    bucket;
-    pANTLR3_HASH_ENTRY	    entry;
-    pANTLR3_HASH_ENTRY	    * nextPointer;
-
-    /* First we need to know the hash of the provided key
-     */
-    hash    = antlr3Hash(key, (ANTLR3_UINT32)strlen((const char *)key));
-
-    /* Knowing the hash, we can find the bucket
-     */
-    bucket  = table->buckets + (hash % table->modulo);
-
-    /* Now, we traverse the entries in the bucket until
-     * we find the key or the end of the entires in the bucket. 
-     * We track the element prior to the one we are exmaining
-     * as we need to set its next pointer to the next pointer
-     * of the entry we are deleting (if we find it).
-     */
-    entry	    =   bucket->entries;    /* Entry to examine					    */
-    nextPointer	    = & bucket->entries;    /* Where to put the next pointer of the deleted entry   */
-
-    while   (entry != NULL)
-    {
-	/* See if this is the entry we wish to delete
-	 */
-	if  (strcmp((const char *)key, (const char *)entry->keybase.key.sKey) == 0)
-	{
-	    /* It was the correct entry, so we set the next pointer
-	     * of the previous entry to the next pointer of this
-	     * located one, which takes it out of the chain.
-	     */
-	    (*nextPointer)		= entry->nextEntry;
-
-	    /* Release the key - if we allocated that
-	     */
-        if (table->doStrdup == ANTLR3_TRUE)
-        {
-            ANTLR3_FREE(entry->keybase.key.sKey);
-        }
-	    entry->keybase.key.sKey	= NULL;
-
-	    table->count--;
-
-	    return entry;
-	}
-	else
-	{
-	    /* We found an entry but it wasn't the one that was wanted, so
-	     * move to the next one, if any.
-	     */
-	    nextPointer	= & (entry->nextEntry);	    /* Address of the next pointer in the current entry	    */
-	    entry	= entry->nextEntry;	    /* Address of the next element in the bucket (if any)   */
-	}
-    }
-
-    return NULL;  /* Not found */
-}
-
-/** Takes the element with the supplied key out of the list, and deletes the data
- *  calling the supplied free() routine if any. 
- */
-static void
-antlr3HashDeleteI    (pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key)
-{
-    pANTLR3_HASH_ENTRY	entry;
-
-    entry = antlr3HashRemoveI(table, key);
-	
-    /* Now we can free the elements and the entry in order
-     */
-    if	(entry != NULL && entry->free != NULL)
-    {
-	/* Call programmer supplied function to release this entry data
-	 */
-	entry->free(entry->data);
-	entry->data = NULL;
-    }
-    /* Finally release the space for this entry block.
-     */
-    ANTLR3_FREE(entry);
-}
-
-/** Takes the element with the supplied key out of the list, and deletes the data
- *  calling the supplied free() routine if any. 
- */
-static void
-antlr3HashDelete    (pANTLR3_HASH_TABLE table, void * key)
-{
-    pANTLR3_HASH_ENTRY	entry;
-
-    entry = antlr3HashRemove(table, key);
-	
-    /* Now we can free the elements and the entry in order
-     */
-    if	(entry != NULL && entry->free != NULL)
-    {
-	/* Call programmer supplied function to release this entry data
-	 */
-	entry->free(entry->data);
-	entry->data = NULL;
-    }
-    /* Finally release the space for this entry block.
-     */
-    ANTLR3_FREE(entry);
-}
-
-/** Return the element pointer in the hash table for a particular
- *  key value, or NULL if it don't exist (or was itself NULL).
- */
-static void *
-antlr3HashGetI(pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key)
-{
-    ANTLR3_UINT32	    hash;
-    pANTLR3_HASH_BUCKET	    bucket;
-    pANTLR3_HASH_ENTRY	    entry;
-
-    /* First we need to know the hash of the provided key
-     */
-    hash    = (ANTLR3_UINT32)(key % (ANTLR3_INTKEY)(table->modulo));
-
-    /* Knowing the hash, we can find the bucket
-     */
-    bucket  = table->buckets + hash;
-
-    /* Now we can inspect the key at each entry in the bucket
-     * and see if we have a match.
-     */
-    entry   = bucket->entries;
-
-    while   (entry != NULL)
-    {
-	if  (entry->keybase.key.iKey == key)
-	{
-	    /* Match was found, return the data pointer for this entry
-	     */
-	    return  entry->data;
-	}
-	entry = entry->nextEntry;
-    }
-
-    /* If we got here, then we did not find the key
-     */
-    return  NULL;
-}
-
-/** Return the element pointer in the hash table for a particular
- *  key value, or NULL if it don't exist (or was itself NULL).
- */
-static void *
-antlr3HashGet(pANTLR3_HASH_TABLE table, void * key)
-{
-    ANTLR3_UINT32	    hash;
-    pANTLR3_HASH_BUCKET	    bucket;
-    pANTLR3_HASH_ENTRY	    entry;
-
-
-    /* First we need to know the hash of the provided key
-     */
-    hash    = antlr3Hash(key, (ANTLR3_UINT32)strlen((const char *)key));
-
-    /* Knowing the hash, we can find the bucket
-     */
-    bucket  = table->buckets + (hash % table->modulo);
-
-    /* Now we can inspect the key at each entry in the bucket
-     * and see if we have a match.
-     */
-    entry   = bucket->entries;
-
-    while   (entry != NULL)
-    {
-	if  (strcmp((const char *)key, (const char *)entry->keybase.key.sKey) == 0)
-	{
-	    /* Match was found, return the data pointer for this entry
-	     */
-	    return  entry->data;
-	}
-	entry = entry->nextEntry;
-    }
-
-    /* If we got here, then we did not find the key
-     */
-    return  NULL;
-}
-
-/** Add the element pointer in to the table, based upon the 
- *  hash of the provided key.
- */
-static	ANTLR3_INT32
-antlr3HashPutI(pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key, void * element, void (ANTLR3_CDECL *freeptr)(void *))
-{
-	ANTLR3_UINT32	    hash;
-	pANTLR3_HASH_BUCKET	    bucket;
-	pANTLR3_HASH_ENTRY	    entry;
-	pANTLR3_HASH_ENTRY	    * newPointer;
-
-	/* First we need to know the hash of the provided key
-	*/
-	hash    = (ANTLR3_UINT32)(key % (ANTLR3_INTKEY)(table->modulo));
-
-	/* Knowing the hash, we can find the bucket
-	*/
-	bucket  = table->buckets + hash;
-
-	/* Knowing the bucket, we can traverse the entries until we
-	* we find a NULL pointer or we find that this is already 
-	* in the table and duplicates were not allowed.
-	*/
-	newPointer	= &bucket->entries;
-
-	while   (*newPointer !=  NULL)
-	{
-		/* The value at new pointer is pointing to an existing entry.
-		* If duplicates are allowed then we don't care what it is, but
-		* must reject this add if the key is the same as the one we are
-		* supplied with.
-		*/
-		if  (table->allowDups == ANTLR3_FALSE)
-		{
-			if	((*newPointer)->keybase.key.iKey == key)
-			{
-				return	ANTLR3_ERR_HASHDUP;
-			}
-		}
-
-		/* Point to the next entry pointer of the current entry we
-		* are traversing, if it is NULL we will create our new
-		* structure and point this to it.
-		*/
-		newPointer = &((*newPointer)->nextEntry);
-	}
-
-	/* newPointer is now pointing at the pointer where we need to
-	* add our new entry, so let's crate the entry and add it in.
-	*/
-	entry   = (pANTLR3_HASH_ENTRY)ANTLR3_MALLOC((size_t)sizeof(ANTLR3_HASH_ENTRY));
-
-	if	(entry == NULL)
-	{
-		return	ANTLR3_ERR_NOMEM;
-	}
-
-	entry->data			= element;		/* Install the data element supplied			*/
-	entry->free			= freeptr;		/* Function that knows how to release the entry		*/
-	entry->keybase.type		= ANTLR3_HASH_TYPE_INT;	/* Indicate the key type stored here for when we free	*/
-	entry->keybase.key.iKey	= key;			/* Record the key value					*/
-	entry->nextEntry		= NULL;			/* Ensure that the forward pointer ends the chain	*/
-
-	*newPointer	= entry;    /* Install the next entry in this bucket	*/
-
-	table->count++;
-
-	return  ANTLR3_SUCCESS;
-}
-
-
-/** Add the element pointer in to the table, based upon the 
- *  hash of the provided key.
- */
-static	ANTLR3_INT32
-antlr3HashPut(pANTLR3_HASH_TABLE table, void * key, void * element, void (ANTLR3_CDECL *freeptr)(void *))
-{
-	ANTLR3_UINT32	    hash;
-	pANTLR3_HASH_BUCKET	    bucket;
-	pANTLR3_HASH_ENTRY	    entry;
-	pANTLR3_HASH_ENTRY	    * newPointer;
-
-	/* First we need to know the hash of the provided key
-	*/
-	hash    = antlr3Hash(key, (ANTLR3_UINT32)strlen((const char *)key));
-
-	/* Knowing the hash, we can find the bucket
-	*/
-	bucket  = table->buckets + (hash % table->modulo);
-
-	/* Knowign the bucket, we can traverse the entries until we
-	* we find a NULL pointer ofr we find that this is already 
-	* in the table and duplicates were not allowed.
-	*/
-	newPointer	= &bucket->entries;
-
-	while   (*newPointer !=  NULL)
-	{
-		/* The value at new pointer is pointing to an existing entry.
-		* If duplicates are allowed then we don't care what it is, but
-		* must reject this add if the key is the same as the one we are
-		* supplied with.
-		*/
-		if  (table->allowDups == ANTLR3_FALSE)
-		{
-			if	(strcmp((const char*) key, (const char *)(*newPointer)->keybase.key.sKey) == 0)
-			{
-				return	ANTLR3_ERR_HASHDUP;
-			}
-		}
-
-		/* Point to the next entry pointer of the current entry we
-		* are traversing, if it is NULL we will create our new
-		* structure and point this to it.
-		*/
-		newPointer = &((*newPointer)->nextEntry);
-	}
-
-	/* newPointer is now poiting at the pointer where we need to
-	* add our new entry, so let's crate the entry and add it in.
-	*/
-	entry   = (pANTLR3_HASH_ENTRY)ANTLR3_MALLOC((size_t)sizeof(ANTLR3_HASH_ENTRY));
-
-	if	(entry == NULL)
-	{
-		return	ANTLR3_ERR_NOMEM;
-	}
-
-	entry->data			= element;					/* Install the data element supplied				*/
-	entry->free			= freeptr;					/* Function that knows how to release the entry	    */
-	entry->keybase.type	= ANTLR3_HASH_TYPE_STR;     /* Indicate the key type stored here for free()	    */
-    if  (table->doStrdup == ANTLR3_TRUE)
-    {
-        entry->keybase.key.sKey	= ANTLR3_STRDUP(key);	/* Record the key value								*/
-    }
-    else
-    {
-        entry->keybase.key.sKey	= key;                  /* Record the key value								*/
-    }
-	entry->nextEntry		= NULL;					/* Ensure that the forward pointer ends the chain   */
-
-	*newPointer	= entry;    /* Install the next entry in this bucket	*/
-
-	table->count++;
-
-	return  ANTLR3_SUCCESS;
-}
-
-/** \brief Creates an enumeration structure to traverse the hash table.
- *
- * \param table Table to enumerate
- * \return Pointer to enumeration structure.
- */
-pANTLR3_HASH_ENUM
-antlr3EnumNew	(pANTLR3_HASH_TABLE table)
-{
-    pANTLR3_HASH_ENUM	en;
-
-    /* Allocate structure memory
-     */
-    en    = (pANTLR3_HASH_ENUM) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_HASH_ENUM));
-
-    /* Check that the allocation was good 
-     */
-    if	(en == NULL)
-    {
-	return	(pANTLR3_HASH_ENUM) ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
-    }
-    
-    /* Initialize the start pointers
-    */
-    en->table	= table;
-    en->bucket	= 0;				/* First bucket		    */
-    en->entry	= en->table->buckets->entries;	/* First entry to return    */
-
-    /* Special case in that the first bucket may not have anything in it
-     * but the antlr3EnumNext() function expects that the en->entry is
-     * set to the next valid pointer. Hence if it is not a valid element
-     * pointer, attempt to find the next one that is, (table may be empty
-     * of course.
-     */
-    if	(en->entry == NULL)
-    {
-	antlr3EnumNextEntry(en);
-    }
-
-    /* Install the interface
-     */
-    en->free	=  antlr3EnumFree;
-    en->next	=  antlr3EnumNext;
-
-    /* All is good
-     */
-    return  en;
-}
-
-/** \brief Return the next entry in the hashtable being traversed by the supplied
- *         enumeration.
- *
- * \param[in] en Pointer to the enumeration tracking structure
- * \param key	 Pointer to void pointer, where the key pointer is returned.
- * \param data	 Pointer to void pointer where the data pointer is returned.
- * \return 
- *	- ANTLR3_SUCCESS if there was a next key
- *	- ANTLR3_FAIL	 if there were no more keys
- *
- * \remark
- *  No checking of input structure is performed!
- */
-static int
-antlr3EnumNext	(pANTLR3_HASH_ENUM en, pANTLR3_HASH_KEY * key, void ** data)
-{
-    /* If the current entry is valid, then use it
-     */
-    if  (en->bucket >= en->table->modulo)
-    {
-        /* Already exhausted the table
-         */
-        return	ANTLR3_FAIL;
-    }
-
-    /* Pointers are already set to the current entry to return, or
-     * we would not be at this point in the logic flow.
-     */
-    *key	= &(en->entry->keybase);
-    *data	= en->entry->data;
-
-    /* Return pointers are set up, so now we move the element
-     * pointer to the next in the table (if any).
-     */
-    antlr3EnumNextEntry(en);
-
-    return	ANTLR3_SUCCESS;
-}
-
-/** \brief Local function to advance the entry pointer of an enumeration 
- * structure to the next valid entry (if there is one).
- *
- * \param[in] enum Pointer to ANTLR3 enumeration structure returned by antlr3EnumNew()
- *
- * \remark
- *   - The function always leaves the pointers pointing at a valid entry if there
- *     is one, so if the entry pointer is NULL when this function exits, there were
- *     no more entries in the table.
- */
-static void
-antlr3EnumNextEntry(pANTLR3_HASH_ENUM en)
-{
-    pANTLR3_HASH_BUCKET	bucket;
-
-    /* See if the current entry pointer is valid first of all
-     */
-    if	(en->entry != NULL)
-    {
-	/* Current entry was a valid point, see if there is another
-	 * one in the chain.
-	 */
-	if  (en->entry->nextEntry != NULL)
-	{
-	    /* Next entry in the enumeration is just the next entry
-	     * in the chain.
-	     */
-	    en->entry = en->entry->nextEntry;
-	    return;
-	}
-    }
-
-    /* There were no more entries in the current bucket, if there are
-     * more buckets then chase them until we find an entry.
-     */
-    en->bucket++;
-
-    while   (en->bucket < en->table->modulo)
-    {
-	/* There was one more bucket, see if it has any elements in it
-	 */
-	bucket	= en->table->buckets + en->bucket;
-
-	if  (bucket->entries != NULL)
-	{
-	    /* There was an entry in this bucket, so we can use it
-	     * for the next entry in the enumeration.
-	     */
-	    en->entry	= bucket->entries;
-	    return;
-	}
-
-	/* There was nothing in the bucket we just examined, move to the
-	 * next one.
-	 */
-	en->bucket++;
-    }
-
-    /* Here we have exhausted all buckets and the enumeration pointer will 
-     * have its bucket count = table->modulo which signifies that we are done.
-     */
-}
-
-/** \brief Frees up the memory structures that represent a hash table
- *  enumeration.
- * \param[in] enum Pointer to ANTLR3 enumeration structure returned by antlr3EnumNew()
- */
-static void
-antlr3EnumFree	(pANTLR3_HASH_ENUM en)
-{
-    /* Nothing to check, we just free it.
-     */
-    ANTLR3_FREE(en);
-}
-
-/** Given an input key of arbitrary length, return a hash value of
- *  it. This can then be used (with suitable modulo) to index other
- *  structures.
- */
-ANTLR3_API ANTLR3_UINT32
-antlr3Hash(void * key, ANTLR3_UINT32 keylen)
-{
-    /* Accumulate the hash value of the key
-     */
-    ANTLR3_UINT32   hash;
-    pANTLR3_UINT8   keyPtr;
-    ANTLR3_UINT32   i1;
-
-    hash    = 0;
-    keyPtr  = (pANTLR3_UINT8) key;
-
-    /* Iterate the key and accumulate the hash
-     */
-    while(keylen > 0)
-    {
-	hash = (hash << 4) + (*(keyPtr++));
-
-	if ((i1=hash&0xf0000000) != 0)
-	{
-		hash = hash ^ (i1 >> 24);
-		hash = hash ^ i1;
-	}
-	keylen--;
-    }
-
-    return  hash;
-}
-
-ANTLR3_API  pANTLR3_LIST
-antlr3ListNew	(ANTLR3_UINT32 sizeHint)
-{
-    pANTLR3_LIST    list;
-
-    /* Allocate memory
-     */
-    list    = (pANTLR3_LIST)ANTLR3_MALLOC((size_t)sizeof(ANTLR3_LIST));
-
-    if	(list == NULL)
-    {
-	return	(pANTLR3_LIST)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
-    }
-
-    /* Now we need to add a new table
-     */
-    list->table	= antlr3HashTableNew(sizeHint);
-
-    if	(list->table == (pANTLR3_HASH_TABLE)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM))
-    {
-	return	(pANTLR3_LIST)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
-    }
-
-    /* Allocation was good, install interface
-     */
-    list->free	    =  antlr3ListFree;
-    list->del	    =  antlr3ListDelete;
-    list->get	    =  antlr3ListGet;
-    list->add	    =  antlr3ListAdd;
-    list->remove    =  antlr3ListRemove;
-    list->put	    =  antlr3ListPut;
-    list->size	    =  antlr3ListSize;
-
-    return  list;
-}
-
-static ANTLR3_UINT32	antlr3ListSize	    (pANTLR3_LIST list)
-{
-    return  list->table->size(list->table);
-}
-
-static void
-antlr3ListFree	(pANTLR3_LIST list)
-{
-    /* Free the hashtable that stores the list
-     */
-    list->table->free(list->table);
-
-    /* Free the allocation for the list itself
-     */
-    ANTLR3_FREE(list);
-}
-
-static void
-antlr3ListDelete    (pANTLR3_LIST list, ANTLR3_INTKEY key)
-{
-    list->table->delI(list->table, key);
-}
-
-static void *
-antlr3ListGet	    (pANTLR3_LIST list, ANTLR3_INTKEY key)
-{
-    return list->table->getI(list->table, key);
-}
-
-/** Add the supplied element to the list, at the next available key
- */
-static ANTLR3_INT32	antlr3ListAdd   (pANTLR3_LIST list, void * element, void (ANTLR3_CDECL *freeptr)(void *))
-{
-    ANTLR3_INTKEY   key;
-
-    key	    = list->table->size(list->table) + 1;
-    return list->put(list, key, element, freeptr);
-}
-
-/** Remove from the list, but don't free the element, just send it back to the
- *  caller.
- */
-static	void *
-antlr3ListRemove	    (pANTLR3_LIST list, ANTLR3_INTKEY key)
-{
-    pANTLR3_HASH_ENTRY	    entry;
-
-    entry = list->table->removeI(list->table, key);
-
-    if	(entry != NULL)
-    {
-        return  entry->data;
-    }
-    else
-    {
-	return	NULL;
-    }
-}
-
-static	ANTLR3_INT32
-antlr3ListPut	    (pANTLR3_LIST list, ANTLR3_INTKEY key, void * element, void (ANTLR3_CDECL *freeptr)(void *))
-{
-    return  list->table->putI(list->table, key, element, freeptr);
-}
-
-ANTLR3_API  pANTLR3_STACK
-antlr3StackNew	(ANTLR3_UINT32 sizeHint)
-{
-    pANTLR3_STACK   stack;
-
-    /* Allocate memory
-     */
-    stack    = (pANTLR3_STACK)ANTLR3_MALLOC((size_t)sizeof(ANTLR3_STACK));
-
-    if	(stack == NULL)
-    {
-	return	(pANTLR3_STACK)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
-    }
-
-    /* Now we need to add a new table
-     */
-    stack->vector   = antlr3VectorNew(sizeHint);
-    stack->top	    = NULL;
-
-    if	(stack->vector == (pANTLR3_VECTOR)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM))
-    {
-	return	(pANTLR3_STACK)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
-    }
-
-    /* Looks good, now add the interface
-     */
-    stack->get	=  antlr3StackGet;
-    stack->free	=  antlr3StackFree;
-    stack->pop	=  antlr3StackPop;
-    stack->push	=  antlr3StackPush;
-    stack->size	=  antlr3StackSize;
-    stack->peek	=  antlr3StackPeek;
-
-    return  stack;
-}
-
-static ANTLR3_UINT32	antlr3StackSize	    (pANTLR3_STACK stack)
-{
-    return  stack->vector->count;
-}
-
-
-static void
-antlr3StackFree	(pANTLR3_STACK  stack)
-{
-    /* Free the list that supports the stack
-     */
-    stack->vector->free(stack->vector);
-    stack->vector   = NULL;
-    stack->top	    = NULL;
-
-    ANTLR3_FREE(stack);
-}
-
-static void *
-antlr3StackPop	(pANTLR3_STACK	stack)
-{
-    // Delete the element that is currently at the top of the stack
-    //
-    stack->vector->del(stack->vector, stack->vector->count - 1);
-
-    // And get the element that is the now the top of the stack (if anything)
-    // NOTE! This is not quite like a 'real' stack, which would normally return you
-    // the current top of the stack, then remove it from the stack.
-    // TODO: Review this, it is correct for follow sets which is what this was done for
-    //       but is not as obvious when using it as a 'real'stack.
-    //
-    stack->top = stack->vector->get(stack->vector, stack->vector->count - 1);
-    return stack->top;
-}
-
-static void *
-antlr3StackGet	(pANTLR3_STACK stack, ANTLR3_INTKEY key)
-{
-    return  stack->vector->get(stack->vector, (ANTLR3_UINT32)key);
-}
-
-static void *
-antlr3StackPeek	(pANTLR3_STACK	stack)
-{
-    return  stack->top;
-}
-
-static ANTLR3_BOOLEAN 
-antlr3StackPush	(pANTLR3_STACK stack, void * element, void (ANTLR3_CDECL *freeptr)(void *))
-{
-    stack->top	= element;
-    return (ANTLR3_BOOLEAN)(stack->vector->add(stack->vector, element, freeptr));
-}
-
-ANTLR3_API  pANTLR3_VECTOR
-antlr3VectorNew	(ANTLR3_UINT32 sizeHint)
-{
-	pANTLR3_VECTOR  vector;
-
-
-	// Allocate memory for the vector structure itself
-	//
-	vector  = (pANTLR3_VECTOR) ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_VECTOR)));
-
-	if	(vector == NULL)
-	{
-		return	(pANTLR3_VECTOR)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
-	}
-
-	// Now fill in the defaults
-	//
-    antlr3SetVectorApi(vector, sizeHint);
-
-	// And everything is hunky dory
-	//
-	return  vector;
-}
-
-ANTLR3_API void
-antlr3SetVectorApi  (pANTLR3_VECTOR vector, ANTLR3_UINT32 sizeHint)
-{
-    ANTLR3_UINT32   initialSize;
-
-    // Allow vectors to be guessed by ourselves, so input size can be zero
-    //
-    if	(sizeHint > ANTLR3_VECTOR_INTERNAL_SIZE)
-    {
-        initialSize = sizeHint;
-    }
-    else
-    {
-        initialSize = ANTLR3_VECTOR_INTERNAL_SIZE;
-    }
-
-    if  (sizeHint > ANTLR3_VECTOR_INTERNAL_SIZE)
-    {
-        vector->elements	= (pANTLR3_VECTOR_ELEMENT)ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_VECTOR_ELEMENT) * initialSize));
-    }
-    else
-    {
-        vector->elements    = vector->internal;
-    }
-
-    if	(vector->elements == NULL)
-    {
-        ANTLR3_FREE(vector);
-        return;
-    }
-
-    // Memory allocated successfully
-    //
-    vector->count			= 0;			// No entries yet of course
-    vector->elementsSize    = initialSize;  // Available entries
-
-    // Now we can install the API
-    //
-    vector->add	    = antlr3VectorAdd;
-    vector->del	    = antlr3VectorDel;
-    vector->get	    = antlr3VectorGet;
-    vector->free    = antlr3VectorFree;
-    vector->set	    = antlr3VectorSet;
-    vector->remove  = antrl3VectorRemove;
-    vector->clear   = antlr3VectorClear;
-    vector->size    = antlr3VectorSize;
-    vector->swap    = antlr3VectorSwap;
-
-    // Assume that this is not a factory made vector
-    //
-    vector->factoryMade	= ANTLR3_FALSE;
-}
-
-// Clear the entries in a vector.
-// Clearing the vector leaves its capacity the same but
-// it walks the entries first to see if any of them
-// have a free routine that must be called.
-//
-static	void				
-antlr3VectorClear	(pANTLR3_VECTOR vector)
-{
-	ANTLR3_UINT32   entry;
-
-	// We must traverse every entry in the vector and if it has
-	// a pointer to a free function then we call it with the
-	// the entry pointer
-	//
-	for	(entry = 0; entry < vector->count; entry++)
-	{
-		if  (vector->elements[entry].freeptr != NULL)
-		{
-			vector->elements[entry].freeptr(vector->elements[entry].element);
-		}
-		vector->elements[entry].freeptr    = NULL;
-		vector->elements[entry].element    = NULL;
-	}
-
-	// Having called any free pointers, we just reset the entry count
-	// back to zero.
-	//
-	vector->count	= 0;
-}
-
-static	
-void	ANTLR3_CDECL	antlr3VectorFree    (pANTLR3_VECTOR vector)
-{
-	ANTLR3_UINT32   entry;
-
-	// We must traverse every entry in the vector and if it has
-	// a pointer to a free function then we call it with the
-	// the entry pointer
-	//
-	for	(entry = 0; entry < vector->count; entry++)
-	{
-		if  (vector->elements[entry].freeptr != NULL)
-		{
-			vector->elements[entry].freeptr(vector->elements[entry].element);
-		}
-		vector->elements[entry].freeptr    = NULL;
-		vector->elements[entry].element    = NULL;
-	}
-
-	if	(vector->factoryMade == ANTLR3_FALSE)
-	{
-		// The entries are freed, so free the element allocation
-		//
-        if  (vector->elementsSize > ANTLR3_VECTOR_INTERNAL_SIZE)
-        {
-            ANTLR3_FREE(vector->elements);
-        }
-		vector->elements = NULL;
-
-		// Finally, free the allocation for the vector itself
-		//
-		ANTLR3_FREE(vector);
-	}
-}
-
-static	void		antlr3VectorDel	    (pANTLR3_VECTOR vector, ANTLR3_UINT32 entry)
-{
-	// Check this is a valid request first
-	//
-	if	(entry >= vector->count)
-	{
-		return;
-	}
-
-	// Valid request, check for free pointer and call it if present
-	//
-	if	(vector->elements[entry].freeptr != NULL)
-	{
-		vector->elements[entry].freeptr(vector->elements[entry].element);
-		vector->elements[entry].freeptr    = NULL;
-	}
-
-	if	(entry == vector->count - 1)
-	{
-		// Ensure the pointer is never reused by accident, but otherwise just 
-		// decrement the pointer.
-		//
-		vector->elements[entry].element    = NULL;
-	}
-	else
-	{
-		// Need to shuffle trailing pointers back over the deleted entry
-		//
-		ANTLR3_MEMMOVE(vector->elements + entry, vector->elements + entry + 1, sizeof(ANTLR3_VECTOR_ELEMENT) * (vector->count - entry - 1));
-	}
-
-	// One less entry in the vector now
-	//
-	vector->count--;
-}
-
-static	void *		antlr3VectorGet     (pANTLR3_VECTOR vector, ANTLR3_UINT32 entry)
-{
-	// Ensure this is a valid request
-	//
-	if	(entry < vector->count)
-	{
-		return	vector->elements[entry].element;
-	}
-	else
-	{
-		// I know nothing, Mr. Fawlty!
-		//
-		return	NULL;
-	}
-}
-
-/// Remove the entry from the vector, but do not free any entry, even if it has
-/// a free pointer.
-///
-static	void *		antrl3VectorRemove  (pANTLR3_VECTOR vector, ANTLR3_UINT32 entry)
-{
-	void * element;
-
-	// Check this is a valid request first 
-	//
-	if	(entry >= vector->count)
-	{
-		return NULL;
-	}
-
-	// Valid request, return the sorted pointer
-	//
-
-	element				    = vector->elements[entry].element;
-
-	if	(entry == vector->count - 1)
-	{
-		// Ensure the pointer is never reused by accident, but otherwise just 
-		// decrement the pointer.
-		///
-		vector->elements[entry].element    = NULL;
-		vector->elements[entry].freeptr    = NULL;
-	}
-	else
-	{
-		// Need to shuffle trailing pointers back over the deleted entry
-		//
-		ANTLR3_MEMMOVE(vector->elements + entry, vector->elements + entry + 1, sizeof(ANTLR3_VECTOR_ELEMENT) * (vector->count - entry - 1));
-	}
-
-	// One less entry in the vector now
-	//
-	vector->count--;
-
-	return  element;
-}
-
-static  void
-antlr3VectorResize  (pANTLR3_VECTOR vector, ANTLR3_UINT32 hint)
-{
-	ANTLR3_UINT32	newSize;
-
-	// Need to resize the element pointers. We double the allocation
-	// we already have unless asked for a specific increase.
-    //
-    if (hint == 0 || hint < vector->elementsSize)
-    {
-        newSize = vector->elementsSize * 2;
-    }
-    else
-    {
-        newSize = hint * 2;
-    }
-
-    // Now we know how many we need, so we see if we have just expanded
-    // past the built in vector elements or were already past that
-    //
-    if  (vector->elementsSize > ANTLR3_VECTOR_INTERNAL_SIZE)
-    {
-        // We were already larger than the internal size, so we just
-        // use realloc so that the pointers are copied for us
-        //
-        vector->elements	= (pANTLR3_VECTOR_ELEMENT)ANTLR3_REALLOC(vector->elements, (sizeof(ANTLR3_VECTOR_ELEMENT)* newSize));
-    }
-    else
-    {
-        // The current size was less than or equal to the internal array size and as we always start
-        // with a size that is at least the maximum internal size, then we must need to allocate new memory
-        // for external pointers. We don't want to take the time to calculate if a requested element
-        // is part of the internal or external entries, so we copy the internal ones to the new space
-        //
-        vector->elements	= (pANTLR3_VECTOR_ELEMENT)ANTLR3_MALLOC((sizeof(ANTLR3_VECTOR_ELEMENT)* newSize));
-        ANTLR3_MEMCPY(vector->elements, vector->internal, ANTLR3_VECTOR_INTERNAL_SIZE * sizeof(ANTLR3_VECTOR_ELEMENT));
-    }
-
-	vector->elementsSize	= newSize;
-}
-
-/// Add the supplied pointer and freeing function pointer to the list,
-/// expanding the vector if needed.
-///
-static	ANTLR3_UINT32    antlr3VectorAdd	    (pANTLR3_VECTOR vector, void * element, void (ANTLR3_CDECL *freeptr)(void *))
-{
-	// Do we need to resize the vector table?
-	//
-	if	(vector->count == vector->elementsSize)
-	{
-		antlr3VectorResize(vector, 0);	    // Give no hint, we let it add 1024 or double it
-	}
-
-	// Insert the new entry
-	//
-	vector->elements[vector->count].element	= element;
-	vector->elements[vector->count].freeptr	= freeptr;
-
-	vector->count++;	    // One more element counted
-
-	return  (ANTLR3_UINT32)(vector->count);
-
-}
-
-/// Replace the element at the specified entry point with the supplied
-/// entry.
-///
-static	ANTLR3_UINT32    
-antlr3VectorSet	    (pANTLR3_VECTOR vector, ANTLR3_UINT32 entry, void * element, void (ANTLR3_CDECL *freeptr)(void *), ANTLR3_BOOLEAN freeExisting)
-{
-
-	// If the vector is currently not big enough, then we expand it
-	//
-	if (entry >= vector->elementsSize)
-	{
-		antlr3VectorResize(vector, entry);	// We will get at least this many 
-	}
-
-	// Valid request, replace the current one, freeing any prior entry if told to
-	//
-	if	(		entry < vector->count						// If actually replacing an element
-			&&	freeExisting								// And told to free any existing element
-			&&	vector->elements[entry].freeptr != NULL		// And the existing element has a free pointer
-		)
-	{
-		vector->elements[entry].freeptr(vector->elements[entry].element);
-	}
-
-	// Install the new pointers
-	//
-	vector->elements[entry].freeptr	= freeptr;
-	vector->elements[entry].element	= element;
-
-	if (entry >= vector->count)
-	{
-		vector->count = entry + 1;
-	}
-	return  (ANTLR3_UINT32)(entry);	    // Indicates the replacement was successful
-
-}
-
-/// Replace the element at the specified entry point with the supplied
-/// entry.
-///
-static	ANTLR3_BOOLEAN
-antlr3VectorSwap	    (pANTLR3_VECTOR vector, ANTLR3_UINT32 entry1, ANTLR3_UINT32 entry2)
-{
-
-    void               * tempEntry;
-    void (ANTLR3_CDECL *freeptr)(void *);
-
-	// If the vector is currently not big enough, then we do nothing
-	//
-	if (entry1 >= vector->elementsSize || entry2 >= vector->elementsSize)
-	{
-        return ANTLR3_FALSE;
-	}
-
-	// Valid request, swap them
-	//
-    tempEntry   = vector->elements[entry1].element;
-    freeptr     = vector->elements[entry1].freeptr;
-
-	// Install the new pointers
-	//
-    vector->elements[entry1].freeptr	= vector->elements[entry2].freeptr;
-	vector->elements[entry1].element	= vector->elements[entry2].element;
-
-	vector->elements[entry2].freeptr	= freeptr;
-	vector->elements[entry2].element	= tempEntry;
-
-	return  ANTLR3_TRUE;
-
-}
-
-static	ANTLR3_UINT32   antlr3VectorSize    (pANTLR3_VECTOR vector)
-{
-    return  vector->count;
-}
-
-#ifdef ANTLR3_WINDOWS
-#pragma warning	(push)
-#pragma warning (disable : 4100)
-#endif
-/// Vector factory creation
-///
-ANTLR3_API pANTLR3_VECTOR_FACTORY
-antlr3VectorFactoryNew	    (ANTLR3_UINT32 sizeHint)
-{
-	pANTLR3_VECTOR_FACTORY  factory;
-
-	// Allocate memory for the factory
-	//
-	factory = (pANTLR3_VECTOR_FACTORY)ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_VECTOR_FACTORY)));
-
-	if	(factory == NULL)
-	{
-		return	NULL;
-	}
-
-	// Factory memory is good, so create a new vector pool
-	//
-    factory->pools      = NULL;
-    factory->thisPool   = -1;
-
-    newPool(factory);
-
-    // Initialize the API, ignore the hint as this algorithm does
-    // a better job really.
-    //
-    antlr3SetVectorApi(&(factory->unTruc), ANTLR3_VECTOR_INTERNAL_SIZE);
-    
-    factory->unTruc.factoryMade = ANTLR3_TRUE;
-
-	// Install the factory API
-	//
-	factory->close			= closeVectorFactory;
-	factory->newVector		= newVector;
-	factory->returnVector	= returnVector;
-
-	// Create a stack to accumulate reusable vectors
-	//
-	factory->freeStack		= antlr3StackNew(16);
-	return  factory;
-}
-#ifdef ANTLR3_WINDOWS
-#pragma warning	(pop)
-#endif
-
-static	void				
-returnVector		(pANTLR3_VECTOR_FACTORY factory, pANTLR3_VECTOR vector)
-{
-	// First we need to clear out anything that is still in the vector
-	//
-	vector->clear(vector);
-
-	// We have a free stack available so we can add the vector we were
-	// given into the free chain. The vector has to have come from this
-	// factory, so we already know how to release its memory when it
-	// dies by virtue of the factory being closed.
-	//
-	factory->freeStack->push(factory->freeStack, vector, NULL);
-
-	// TODO: remove this line once happy printf("Returned vector %08X to the pool, stack size is %d\n", vector, factory->freeStack->size(factory->freeStack));
-}
-
-static void
-newPool(pANTLR3_VECTOR_FACTORY factory)
-{
-    /* Increment factory count
-     */
-    factory->thisPool++;
-
-    /* Ensure we have enough pointers allocated
-     */
-    factory->pools = (pANTLR3_VECTOR *)
-		     ANTLR3_REALLOC(	(void *)factory->pools,	    /* Current pools pointer (starts at NULL)	*/
-					(ANTLR3_UINT32)((factory->thisPool + 1) * sizeof(pANTLR3_VECTOR *))	/* Memory for new pool pointers */
-					);
-
-    /* Allocate a new pool for the factory
-     */
-    factory->pools[factory->thisPool]	=
-			    (pANTLR3_VECTOR)
-				ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_VECTOR) * ANTLR3_FACTORY_VPOOL_SIZE));
-
-
-    /* Reset the counters
-     */
-    factory->nextVector	= 0;
-
-    /* Done
-     */
-    return;
-}
-
-static  void		
-closeVectorFactory  (pANTLR3_VECTOR_FACTORY factory)
-{
-    pANTLR3_VECTOR      pool;
-    ANTLR3_INT32        poolCount;
-    ANTLR3_UINT32       limit;
-    ANTLR3_UINT32       vector;
-    pANTLR3_VECTOR      check;
-
-	// First see if we have a free chain stack to release?
-	//
-	if	(factory->freeStack != NULL)
-	{
-		factory->freeStack->free(factory->freeStack);
-	}
-
-    /* We iterate the vector pools one at a time
-     */
-    for (poolCount = 0; poolCount <= factory->thisPool; poolCount++)
-    {
-        /* Pointer to current pool
-         */
-        pool = factory->pools[poolCount];
-
-        /* Work out how many tokens we need to check in this pool.
-         */
-        limit = (poolCount == factory->thisPool ? factory->nextVector : ANTLR3_FACTORY_VPOOL_SIZE);
-
-        /* Marginal condition, we might be at the start of a brand new pool
-         * where the nextToken is 0 and nothing has been allocated.
-         */
-        if (limit > 0)
-        {
-            /* We have some vectors allocated from this pool
-             */
-            for (vector = 0; vector < limit; vector++)
-            {
-                /* Next one in the chain
-                 */
-                check = pool + vector;
-
-                // Call the free function on each of the vectors in the pool,
-                // which in turn will cause any elements it holds that also have a free
-                // pointer to be freed. However, because any vector may be in any other
-                // vector, we don't free the element allocations yet. We do that in a
-                // a specific pass, coming up next. The vector free function knows that
-                // this is a factory allocated pool vector and so it won't free things it
-                // should not.
-                //
-                check->free(check);
-            }
-        }
-    }
-
-    /* We iterate the vector pools one at a time once again, but this time
-     * we are going to free up any allocated element pointers. Note that we are doing this
-     * so that we do not try to release vectors twice. When building ASTs we just copy
-     * the vectors all over the place and they may be embedded in this vector pool
-     * numerous times.
-     */
-    for (poolCount = 0; poolCount <= factory->thisPool; poolCount++)
-    {
-        /* Pointer to current pool
-         */
-        pool = factory->pools[poolCount];
-
-        /* Work out how many tokens we need to check in this pool.
-         */
-        limit = (poolCount == factory->thisPool ? factory->nextVector : ANTLR3_FACTORY_VPOOL_SIZE);
-
-        /* Marginal condition, we might be at the start of a brand new pool
-         * where the nextToken is 0 and nothing has been allocated.
-         */
-        if (limit > 0)
-        {
-            /* We have some vectors allocated from this pool
-             */
-            for (vector = 0; vector < limit; vector++)
-            {
-                /* Next one in the chain
-                 */
-                check = pool + vector;
-
-                // Anything in here should be factory made, but we do this just
-                // to triple check. We just free up the elements if they were
-                // allocated beyond the internal size.
-                //
-                if (check->factoryMade == ANTLR3_TRUE && check->elementsSize > ANTLR3_VECTOR_INTERNAL_SIZE)
-                {
-                    ANTLR3_FREE(check->elements);
-                    check->elements = NULL;
-                }
-            }
-        }
-
-        // We can now free this pool allocation as we have called free on every element in every vector
-        // and freed any memory for pointers the grew beyond the internal size limit.
-        //
-        ANTLR3_FREE(factory->pools[poolCount]);
-        factory->pools[poolCount] = NULL;
-    }
-
-    /* All the pools are deallocated we can free the pointers to the pools
-     * now.
-     */
-    ANTLR3_FREE(factory->pools);
-
-    /* Finally, we can free the space for the factory itself
-     */
-    ANTLR3_FREE(factory);
-
-}
-
-static pANTLR3_VECTOR
-newVector(pANTLR3_VECTOR_FACTORY factory)
-{
-    pANTLR3_VECTOR vector;
-
-	// If we have anything on the re claim stack, reuse it
-	//
-	vector = factory->freeStack->peek(factory->freeStack);
-
-	if  (vector != NULL)
-	{
-		// Cool we got something we could reuse
-		//
-		factory->freeStack->pop(factory->freeStack);
-
-		// TODO: remove this line once happy printf("Reused vector %08X from stack, size is now %d\n", vector, factory->freeStack->size(factory->freeStack));
-		return vector;
-
-	}
-
-	// See if we need a new vector pool before allocating a new
-    // one
-    //
-    if (factory->nextVector >= ANTLR3_FACTORY_VPOOL_SIZE)
-    {
-        // We ran out of vectors in the current pool, so we need a new pool
-        //
-        newPool(factory);
-    }
-
-    // Assuming everything went well (we are trying for performance here so doing minimal
-    // error checking. Then we can work out what the pointer is to the next vector.
-    //
-    vector = factory->pools[factory->thisPool] + factory->nextVector;
-    factory->nextVector++;
-
-    // We have our token pointer now, so we can initialize it to the predefined model.
-    //
-    antlr3SetVectorApi(vector, ANTLR3_VECTOR_INTERNAL_SIZE);
-    vector->factoryMade = ANTLR3_TRUE;
-
-    // We know that the pool vectors are created at the default size, which means they
-    // will start off using their internal entry pointers. We must intialize our pool vector
-    // to point to its own internal entry table and not the pre-made one.
-    //
-    vector->elements = vector->internal;
-
-		// TODO: remove this line once happy printf("Used a new vector at %08X from the pools as nothing on the reusue stack\n", vector);
-
-    // And we are done
-    //
-    return vector;
-}
-
-/** Array of left most significant bit positions for an 8 bit
- *  element provides an efficient way to find the highest bit
- *  that is set in an n byte value (n>0). Assuming the values will all hit the data cache,
- *  coding without conditional elements should allow branch
- *  prediction to work well and of course a parallel instruction cache
- *  will whip through this. Otherwise we must loop shifting a one
- *  bit and masking. The values we tend to be placing in out integer
- *  patricia trie are usually a lot lower than the 64 bits we
- *  allow for the key allows. Hence there is a lot of redundant looping and
- *  shifting in a while loop. Whereas, the lookup table is just
- *  a few ands and indirect lookups, while testing for 0. This
- *  is likely to be done in parallel on many processors available
- *  when I wrote this. If this code survives as long as yacc, then
- *  I may already be dead by the time you read this and maybe there is
- *  a single machine instruction to perform the operation. What
- *  else are you going to do with all those transistors? Jim 2007
- *
- * The table is probably obvious but it is just the number 0..7
- * of the MSB in each integer value 0..256
- */
-static ANTLR3_UINT8 bitIndex[256] = 
-{ 
-    0,													// 0 - Just for padding
-    0,													// 1
-    1, 1,												// 2..3
-    2, 2, 2, 2,											// 4..7
-    3, 3, 3, 3, 3, 3, 3, 3,								// 8+
-    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,	    // 16+
-    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,	    // 32+
-	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,	    
-    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,	    // 64+
-	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
-	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
-	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 
-    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,	    // 128+
-	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 
-	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
-};
-
-/** Rather than use the bit index of a trie node to shift
- *  0x01 left that many times, then & with the result, it is
- *  faster to use the bit index as an index into this table
- *  which holds precomputed masks for any of the 64 bits
- *  we need to mask off singly. The data values will stay in
- *  cache while ever a trie is in heavy use, such as in
- *  memoization. It is also pretty enough to be ASCII art.
- */
-static ANTLR3_UINT64 bitMask[64] = 
-{
-    0x0000000000000001ULL, 0x0000000000000002ULL, 0x0000000000000004ULL, 0x0000000000000008ULL,
-    0x0000000000000010ULL, 0x0000000000000020ULL, 0x0000000000000040ULL, 0x0000000000000080ULL,
-    0x0000000000000100ULL, 0x0000000000000200ULL, 0x0000000000000400ULL, 0x0000000000000800ULL,
-    0x0000000000001000ULL, 0x0000000000002000ULL, 0x0000000000004000ULL, 0x0000000000008000ULL,
-    0x0000000000010000ULL, 0x0000000000020000ULL, 0x0000000000040000ULL, 0x0000000000080000ULL,
-    0x0000000000100000ULL, 0x0000000000200000ULL, 0x0000000000400000ULL, 0x0000000000800000ULL,
-    0x0000000001000000ULL, 0x0000000002000000ULL, 0x0000000004000000ULL, 0x0000000008000000ULL,
-    0x0000000010000000ULL, 0x0000000020000000ULL, 0x0000000040000000ULL, 0x0000000080000000ULL,
-    0x0000000100000000ULL, 0x0000000200000000ULL, 0x0000000400000000ULL, 0x0000000800000000ULL,
-    0x0000001000000000ULL, 0x0000002000000000ULL, 0x0000004000000000ULL, 0x0000008000000000ULL,
-    0x0000010000000000ULL, 0x0000020000000000ULL, 0x0000040000000000ULL, 0x0000080000000000ULL,
-    0x0000100000000000ULL, 0x0000200000000000ULL, 0x0000400000000000ULL, 0x0000800000000000ULL,
-    0x0001000000000000ULL, 0x0002000000000000ULL, 0x0004000000000000ULL, 0x0008000000000000ULL,
-    0x0010000000000000ULL, 0x0020000000000000ULL, 0x0040000000000000ULL, 0x0080000000000000ULL,
-    0x0100000000000000ULL, 0x0200000000000000ULL, 0x0400000000000000ULL, 0x0800000000000000ULL,
-    0x1000000000000000ULL, 0x2000000000000000ULL, 0x4000000000000000ULL, 0x8000000000000000ULL
-};
-
-/* INT TRIE Implementation of depth 64 bits, being the number of bits
- * in a 64 bit integer. 
- */
-
-pANTLR3_INT_TRIE
-antlr3IntTrieNew(ANTLR3_UINT32 depth)
-{
-	pANTLR3_INT_TRIE	trie;
-
-	trie    = (pANTLR3_INT_TRIE) ANTLR3_CALLOC(1, sizeof(ANTLR3_INT_TRIE));	/* Base memory required	*/
-
-	if (trie == NULL)
-	{
-		return	(pANTLR3_INT_TRIE) ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
-	}
-
-	/* Now we need to allocate the root node. This makes it easier
-	 * to use the tree as we don't have to do anything special 
-	 * for the root node.
-	 */
-	trie->root	= (pANTLR3_INT_TRIE_NODE) ANTLR3_CALLOC(1, sizeof(ANTLR3_INT_TRIE));
-
-	if (trie->root == NULL)
-	{
-		ANTLR3_FREE(trie);
-		return	(pANTLR3_INT_TRIE) ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
-	}
-
-	trie->add	= intTrieAdd;
-	trie->del	= intTrieDel;
-	trie->free	= intTrieFree;
-	trie->get	= intTrieGet;
-
-	/* Now we seed the root node with the index being the
-	 * highest left most bit we want to test, which limits the
-	 * keys in the trie. This is the trie 'depth'. The limit for
-	 * this implementation is 63 (bits 0..63).
-	 */
-	trie->root->bitNum = depth;
-
-	/* And as we have nothing in here yet, we set both child pointers
-	 * of the root node to point back to itself.
-	 */
-	trie->root->leftN	= trie->root;
-	trie->root->rightN	= trie->root;
-	trie->count			= 0;
-
-	/* Finally, note that the key for this root node is 0 because
-	 * we use calloc() to initialise it.
-	 */
-
-	return trie;
-}
-
-/** Search the int Trie and return a pointer to the first bucket indexed
- *  by the key if it is contained in the trie, otherwise NULL.
- */
-static	pANTLR3_TRIE_ENTRY   
-intTrieGet	(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key)
-{
-	pANTLR3_INT_TRIE_NODE    thisNode; 
-	pANTLR3_INT_TRIE_NODE    nextNode; 
-
-	if (trie->count == 0)
-	{
-		return NULL;	    /* Nothing in this trie yet	*/
-	}
-	/* Starting at the root node in the trie, compare the bit index
-	 * of the current node with its next child node (starts left from root).
-	 * When the bit index of the child node is greater than the bit index of the current node
-	 * then by definition (as the bit index decreases as we descent the trie)
-	 * we have reached a 'backward' pointer. A backward pointer means we
-	 * have reached the only node that can be reached by the bits given us so far
-	 * and it must either be the key we are looking for, or if not then it
-	 * means the entry was not in the trie, and we return NULL. A backward pointer
-	 * points back in to the tree structure rather than down (deeper) within the
-	 * tree branches.
-	 */
-	thisNode	= trie->root;		/* Start at the root node		*/
-	nextNode	= thisNode->leftN;	/* Examine the left node from the root	*/
-
-	/* While we are descending the tree nodes...
-	 */
-	while (thisNode->bitNum > nextNode->bitNum)
-	{
-		/* Next node now becomes the new 'current' node
-		 */
-		thisNode    = nextNode;
-
-		/* We now test the bit indicated by the bitmap in the next node
-		 * in the key we are searching for. The new next node is the
-		 * right node if that bit is set and the left node it is not.
-		 */
-		if (key & bitMask[nextNode->bitNum])
-		{
-			nextNode = nextNode->rightN;	/* 1 is right	*/
-		}
-		else
-		{
-			nextNode = nextNode->leftN;		/* 0 is left	*/
-		}
-	}
-
-	/* Here we have reached a node where the bitMap index is lower than
-	 * its parent. This means it is pointing backward in the tree and
-	 * must therefore be a terminal node, being the only point than can
-	 * be reached with the bits seen so far. It is either the actual key
-	 * we wanted, or if that key is not in the trie it is another key
-	 * that is currently the only one that can be reached by those bits.
-	 * That situation would obviously change if the key was to be added
-	 * to the trie.
-	 *
-	 * Hence it only remains to test whether this is actually the key or not.
-	 */
-	if (nextNode->key == key)
-	{
-		/* This was the key, so return the entry pointer
-		 */
-		return	nextNode->buckets;
-	}
-	else
-	{
-		return	NULL;	/* That key is not in the trie (note that we set the pointer to -1 if no payload) */
-	}
-}
-
-
-static	ANTLR3_BOOLEAN		
-intTrieDel	(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key)
-{
-    pANTLR3_INT_TRIE_NODE   p;
-
-    p=trie->root;
-    key = key;
-
-    return ANTLR3_FALSE;
-}
-
-/** Add an entry into the INT trie.
- *  Basically we descend the trie as we do when searching it, which will
- *  locate the only node in the trie that can be reached by the bit pattern of the
- *  key. If the key is actually at that node, then if the trie accepts duplicates
- *  we add the supplied data in a new chained bucket to that data node. If it does
- *  not accept duplicates then we merely return FALSE in case the caller wants to know
- *  whether the key was already in the trie.
- *  If the node we locate is not the key we are looking to add, then we insert a new node
- *  into the trie with a bit index of the leftmost differing bit and the left or right 
- *  node pointing to itself or the data node we are inserting 'before'. 
- */
-static	ANTLR3_BOOLEAN		
-intTrieAdd	(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key, ANTLR3_UINT32 type, ANTLR3_INTKEY intVal, void * data, void (ANTLR3_CDECL *freeptr)(void *))
-{
-	pANTLR3_INT_TRIE_NODE   thisNode;
-	pANTLR3_INT_TRIE_NODE   nextNode;
-	pANTLR3_INT_TRIE_NODE   entNode;
-	ANTLR3_UINT32			depth;
-	pANTLR3_TRIE_ENTRY	    newEnt;
-	pANTLR3_TRIE_ENTRY	    nextEnt;
-	ANTLR3_INTKEY		    xorKey;
-
-	/* Cache the bit depth of this trie, which is always the highest index, 
-	 * which is in the root node
-	 */
-	depth   = trie->root->bitNum;
-
-	thisNode	= trie->root;		/* Start with the root node	    */
-	nextNode	= trie->root->leftN;	/* And assume we start to the left  */
-
-	/* Now find the only node that can be currently reached by the bits in the
-	 * key we are being asked to insert.
-	 */
-	while (thisNode->bitNum  > nextNode->bitNum)
-	{
-		/* Still descending the structure, next node becomes current.
-		 */
-		thisNode = nextNode;
-
-		if (key & bitMask[nextNode->bitNum])
-		{
-			/* Bit at the required index was 1, so travers the right node from here
-			 */
-			nextNode = nextNode->rightN;
-		}
-		else
-		{
-			/* Bit at the required index was 0, so we traverse to the left
-			 */
-			nextNode = nextNode->leftN;
-		}
-	}
-	/* Here we have located the only node that can be reached by the
-	 * bits in the requested key. It could in fact be that key or the node
-	 * we need to use to insert the new key.
-	 */
-	if (nextNode->key == key)
-	{
-		/* We have located an exact match, but we will only append to the bucket chain
-		 * if this trie accepts duplicate keys.
-		 */
-		if (trie->allowDups ==ANTLR3_TRUE)
-		{
-			/* Yes, we are accepting duplicates
-			 */
-			newEnt = (pANTLR3_TRIE_ENTRY)ANTLR3_CALLOC(1, sizeof(ANTLR3_TRIE_ENTRY));
-
-			if (newEnt == NULL)
-			{
-				/* Out of memory, all we can do is return the fact that the insert failed.
-				 */
-				return	ANTLR3_FALSE;
-			}
-
-			/* Otherwise insert this in the chain
-			*/
-			newEnt->type	= type;
-			newEnt->freeptr	= freeptr;
-			if (type == ANTLR3_HASH_TYPE_STR)
-			{
-				newEnt->data.ptr = data;
-			}
-			else
-			{
-				newEnt->data.intVal = intVal;
-			}
-
-			/* We want to be able to traverse the stored elements in the order that they were
-			 * added as duplicate keys. We might need to revise this opinion if we end up having many duplicate keys
-			 * as perhaps reverse order is just as good, so long as it is ordered.
-			 */
-			nextEnt = nextNode->buckets;
-			while (nextEnt->next != NULL)
-			{
-				nextEnt = nextEnt->next;    
-			}
-			nextEnt->next = newEnt;
-
-			trie->count++;
-			return  ANTLR3_TRUE;
-		}
-		else
-		{
-			/* We found the key is already there and we are not allowed duplicates in this
-			 * trie.
-			 */
-			return  ANTLR3_FALSE;
-		}
-	}
-
-	/* Here we have discovered the only node that can be reached by the bits in the key
-	 * but we have found that this node is not the key we need to insert. We must find the
-	 * the leftmost bit by which the current key for that node and the new key we are going 
-	 * to insert, differ. While this nested series of ifs may look a bit strange, experimentation
-	 * showed that it allows a machine code path that works well with predicated execution
-	 */
-	xorKey = (key ^ nextNode->key);   /* Gives 1 bits only where they differ then we find the left most 1 bit*/
-
-	/* Most common case is a 32 bit key really
-	 */
-#ifdef	ANTLR3_USE_64BIT
-	if	(xorKey & 0xFFFFFFFF00000000)
-	{
-		if  (xorKey & 0xFFFF000000000000)
-		{
-			if	(xorKey & 0xFF00000000000000)
-			{
-				depth = 56 + bitIndex[((xorKey & 0xFF00000000000000)>>56)];
-			}
-			else
-			{
-				depth = 48 + bitIndex[((xorKey & 0x00FF000000000000)>>48)];
-			}
-		}
-		else
-		{
-			if	(xorKey & 0x0000FF0000000000)
-			{
-				depth = 40 + bitIndex[((xorKey & 0x0000FF0000000000)>>40)];
-			}
-			else
-			{
-				depth = 32 + bitIndex[((xorKey & 0x000000FF00000000)>>32)];
-			}
-		}
-	}
-	else
-#endif
-	{
-		if  (xorKey & 0x00000000FFFF0000)
-		{
-			if	(xorKey & 0x00000000FF000000)
-			{
-				depth = 24 + bitIndex[((xorKey & 0x00000000FF000000)>>24)];
-			}
-			else
-			{
-				depth = 16 + bitIndex[((xorKey & 0x0000000000FF0000)>>16)];
-			}
-		}
-		else
-		{
-			if	(xorKey & 0x000000000000FF00)
-			{
-				depth = 8 + bitIndex[((xorKey & 0x0000000000000FF00)>>8)];
-			}
-			else
-			{
-				depth = bitIndex[xorKey & 0x00000000000000FF];
-			}
-		}
-	}
-
-    /* We have located the leftmost differing bit, indicated by the depth variable. So, we know what
-     * bit index we are to insert the new entry at. There are two cases, being where the two keys
-     * differ at a bit position that is not currently part of the bit testing, where they differ on a bit
-     * that is currently being skipped in the indexed comparisons, and where they differ on a bit
-     * that is merely lower down in the current bit search. If the bit index went bit 4, bit 2 and they differ
-     * at bit 3, then we have the "skipped" bit case. But if that chain was Bit 4, Bit 2 and they differ at bit 1
-     * then we have the easy bit <pun>.
-     *
-     * So, set up to descend the tree again, but this time looking for the insert point
-     * according to whether we skip the bit that differs or not.
-     */
-    thisNode	= trie->root;
-    entNode	= trie->root->leftN;
-
-    /* Note the slight difference in the checks here to cover both cases
-     */
-    while (thisNode->bitNum > entNode->bitNum && entNode->bitNum > depth)
-    {
-	/* Still descending the structure, next node becomes current.
-	 */
-	thisNode = entNode;
-
-	if (key & bitMask[entNode->bitNum])
-	{
-	    /* Bit at the required index was 1, so traverse the right node from here
-	     */
-	    entNode = entNode->rightN;
-	}
-	else
-	{
-	    /* Bit at the required index was 0, so we traverse to the left
-	     */
-	    entNode = entNode->leftN;
-	}
-    }
-
-    /* We have located the correct insert point for this new key, so we need
-     * to allocate our entry and insert it etc.
-     */
-    nextNode	= (pANTLR3_INT_TRIE_NODE)ANTLR3_CALLOC(1, sizeof(ANTLR3_INT_TRIE_NODE));
-    if (nextNode == NULL)
-    {
-	/* All that work and no memory - bummer.
-	 */
-	return	ANTLR3_FALSE;
-    }
-
-    /* Build a new entry block for the new node
-     */
-    newEnt = (pANTLR3_TRIE_ENTRY)ANTLR3_CALLOC(1, sizeof(ANTLR3_TRIE_ENTRY));
-
-    if (newEnt == NULL)
-    {
-	/* Out of memory, all we can do is return the fact that the insert failed.
-	 */
-	return	ANTLR3_FALSE;
-    }
-
-    /* Otherwise enter this in our new node
-    */
-    newEnt->type	= type;
-    newEnt->freeptr	= freeptr;
-    if (type == ANTLR3_HASH_TYPE_STR)
-    {
-	newEnt->data.ptr = data;
-    }
-    else
-    {
-	newEnt->data.intVal = intVal;
-    }
-    /* Install it
-     */
-    nextNode->buckets	= newEnt;
-    nextNode->key	= key;
-    nextNode->bitNum	= depth;
-
-    /* Work out the right and left pointers for this new node, which involve
-     * terminating with the current found node either right or left according
-     * to whether the current index bit is 1 or 0
-     */
-    if (key & bitMask[depth])
-    {
-	nextNode->leftN	    = entNode;	    /* Terminates at previous position	*/
-	nextNode->rightN    = nextNode;	    /* Terminates with itself		*/
-    }
-    else
-    {
-	nextNode->rightN   = entNode;	    /* Terminates at previous position	*/
-	nextNode->leftN    = nextNode;	    /* Terminates with itself		*/		
-    }
-
-    /* Finally, we need to change the pointers at the node we located
-     * for inserting. If the key bit at its index is set then the right
-     * pointer for that node becomes the newly created node, otherwise the left 
-     * pointer does.
-     */
-    if (key & bitMask[thisNode->bitNum] )
-    {
-	thisNode->rightN    = nextNode;
-    }
-    else
-    {
-	thisNode->leftN	    = nextNode;
-    }
-
-    /* Et voila
-     */
-    trie->count++;
-    return  ANTLR3_TRUE;
-
-}
-/** Release memory allocated to this tree.
- *  Basic algorithm is that we do a depth first left descent and free
- *  up any nodes that are not backward pointers.
- */
-static void
-freeIntNode(pANTLR3_INT_TRIE_NODE node)
-{
-    pANTLR3_TRIE_ENTRY	thisEntry;
-    pANTLR3_TRIE_ENTRY	nextEntry;
-
-    /* If this node has a left pointer that is not a back pointer
-     * then recursively call to free this
-     */
-    if (node->bitNum > node->leftN->bitNum)
-    {
-	/* We have a left node that needs descending, so do it.
-	 */
-	freeIntNode(node->leftN);
-    }
-
-    /* The left nodes from here should now be dealt with, so 
-     * we need to descend any right nodes that are not back pointers
-     */
-    if (node->bitNum > node->rightN->bitNum)
-    {
-	/* There are some right nodes to descend and deal with.
-	 */
-	freeIntNode(node->rightN);
-    }
-
-    /* Now all the children are dealt with, we can destroy
-     * this node too
-     */
-    thisEntry	= node->buckets;
-
-    while (thisEntry != NULL)
-    {
-	nextEntry   = thisEntry->next;
-
-	/* Do we need to call a custom free pointer for this string entry?
-	 */
-	if (thisEntry->type == ANTLR3_HASH_TYPE_STR && thisEntry->freeptr != NULL)
-	{
-	    thisEntry->freeptr(thisEntry->data.ptr);
-	}
-
-	/* Now free the data for this bucket entry
-	 */
-	ANTLR3_FREE(thisEntry);
-	thisEntry = nextEntry;	    /* See if there are any more to free    */
-    }
-
-    /* The bucket entry is now gone, so we can free the memory for
-     * the entry itself.
-     */
-    ANTLR3_FREE(node);
-
-    /* And that should be it for everything under this node and itself
-     */
-}
-
-/** Called to free all nodes and the structure itself.
- */
-static	void			
-intTrieFree	(pANTLR3_INT_TRIE trie)
-{
-    /* Descend from the root and free all the nodes
-     */
-    freeIntNode(trie->root);
-
-    /* the nodes are all gone now, so we need only free the memory
-     * for the structure itself
-     */
-    ANTLR3_FREE(trie);
-}
-
-
-/**
- * Allocate and initialize a new ANTLR3 topological sorter, which can be
- * used to define edges that identify numerical node indexes that depend on other
- * numerical node indexes, which can then be sorted topologically such that
- * any node is sorted after all its dependent nodes.
- *
- * Use:
- *
- * /verbatim
-
-  pANTLR3_TOPO topo;
-  topo = antlr3NewTopo();
-
-  if (topo == NULL) { out of memory }
-
-  topo->addEdge(topo, 3, 0); // Node 3 depends on node 0
-  topo->addEdge(topo, 0, 1); // Node - depends on node 1
-  topo->sortVector(topo, myVector); // Sort the vector in place (node numbers are the vector entry numbers)
-
- * /verbatim
- */
-ANTLR3_API pANTLR3_TOPO
-antlr3TopoNew()
-{
-    pANTLR3_TOPO topo = (pANTLR3_TOPO)ANTLR3_MALLOC(sizeof(ANTLR3_TOPO));
-
-    if  (topo == NULL)
-    {
-        return NULL;
-    }
-
-    // Initialize variables
-    //
-
-    topo->visited   = NULL;                 // Don't know how big it is yet
-    topo->limit     = 1;                    // No edges added yet
-    topo->edges     = NULL;                 // No edges added yet
-    topo->sorted    = NULL;                 // Nothing sorted at the start
-    topo->cycle     = NULL;                 // No cycles at the start
-    topo->cycleMark = 0;                    // No cycles at the start
-    topo->hasCycle  = ANTLR3_FALSE;         // No cycle at the start
-    
-    // API
-    //
-    topo->addEdge       = addEdge;
-    topo->sortToArray   = sortToArray;
-    topo->sortVector    = sortVector;
-    topo->free          = freeTopo;
-
-    return topo;
-}
-// Topological sorter
-//
-static  void
-addEdge          (pANTLR3_TOPO topo, ANTLR3_UINT32 edge, ANTLR3_UINT32 dependency)
-{
-    ANTLR3_UINT32   i;
-    ANTLR3_UINT32   maxEdge;
-    pANTLR3_BITSET  edgeDeps;
-
-    if (edge>dependency)
-    {
-        maxEdge = edge;
-    }
-    else
-    {
-        maxEdge = dependency;
-    }
-    // We need to add an edge to says that the node indexed by 'edge' is
-    // dependent on the node indexed by 'dependency'
-    //
-
-    // First see if we have enough room in the edges array to add the edge?
-    //
-    if (topo->edges == NULL)
-    {
-        // We don't have any edges yet, so create an array to hold them
-        //
-        topo->edges = ANTLR3_CALLOC(sizeof(pANTLR3_BITSET) * (maxEdge + 1), 1);
-        if (topo->edges == NULL)
-        {
-            return;
-        }
-
-        // Set the limit to what we have now
-        //
-        topo->limit = maxEdge + 1;
-    }
-    else if (topo->limit <= maxEdge)
-    {
-        // WE have some edges but not enough
-        //
-        topo->edges = ANTLR3_REALLOC(topo->edges, sizeof(pANTLR3_BITSET) * (maxEdge + 1));
-        if (topo->edges == NULL)
-        {
-            return;
-        }
-
-        // Initialize the new bitmaps to ;indicate we have no edges defined yet
-        //
-        for (i = topo->limit; i <= maxEdge; i++)
-        {
-            *((topo->edges) + i) = NULL;
-        }
-
-        // Set the limit to what we have now
-        //
-        topo->limit = maxEdge + 1;
-    }
-
-    // If the edge was flagged as depending on itself, then we just
-    // do nothing as it means this routine was just called to add it
-    // in to the list of nodes.
-    //
-    if  (edge == dependency)
-    {
-        return;
-    }
-
-    // Pick up the bit map for the requested edge
-    //
-    edgeDeps = *((topo->edges) + edge);
-
-    if  (edgeDeps == NULL)
-    {
-        // No edges are defined yet for this node
-        //
-        edgeDeps                = antlr3BitsetNew(0);
-        *((topo->edges) + edge) = edgeDeps;
-        if (edgeDeps == NULL )
-        {
-            return;  // Out of memory
-        }
-    }
-
-    // Set the bit in the bitmap that corresponds to the requested
-    // dependency.
-    //
-    edgeDeps->add(edgeDeps, dependency);
-
-    // And we are all set
-    //
-    return;
-}
-
-
-/**
- * Given a starting node, descend its dependent nodes (ones that it has edges
- * to) until we find one without edges. Having found a node without edges, we have
- * discovered the bottom of a depth first search, which we can then ascend, adding
- * the nodes in order from the bottom, which gives us the dependency order.
- */
-static void
-DFS(pANTLR3_TOPO topo, ANTLR3_UINT32 node)
-{
-    pANTLR3_BITSET edges;
-
-    // Guard against a revisit and check for cycles
-    //
-    if  (topo->hasCycle == ANTLR3_TRUE)
-    {
-        return; // We don't do anything else if we found a cycle
-    }
-
-    if  (topo->visited->isMember(topo->visited, node))
-    {
-        // Check to see if we found a cycle. To do this we search the
-        // current cycle stack and see if we find this node already in the stack.
-        //
-        ANTLR3_UINT32   i;
-
-        for (i=0; i<topo->cycleMark; i++)
-        {
-            if  (topo->cycle[i] == node)
-            {
-                // Stop! We found a cycle in the input, so rejig the cycle
-                // stack so that it only contains the cycle and set the cycle flag
-                // which will tell the caller what happened
-                //
-                ANTLR3_UINT32 l;
-
-                for (l = i; l < topo->cycleMark; l++)
-                {
-                    topo->cycle[l - i] = topo->cycle[l];    // Move to zero base in the cycle list
-                }
-
-                // Recalculate the limit
-                //
-                topo->cycleMark -= i;
-
-                // Signal disaster
-                //
-                topo->hasCycle = ANTLR3_TRUE;
-            }
-        }
-        return;
-    }
-
-    // So far, no cycles have been found and we have not visited this node yet,
-    // so this node needs to go into the cycle stack before we continue
-    // then we will take it out of the stack once we have descended all its
-    // dependencies.
-    //
-    topo->cycle[topo->cycleMark++] = node;
-
-    // First flag that we have visited this node
-    //
-    topo->visited->add(topo->visited, node);
-
-    // Now, if this node has edges, then we want to ensure we visit
-    // them all before we drop through and add this node into the sorted
-    // list.
-    //
-    edges = *((topo->edges) + node);
-    if  (edges != NULL)
-    {
-        // We have some edges, so visit each of the edge nodes
-        // that have not already been visited.
-        //
-        ANTLR3_UINT32   numBits;	    // How many bits are in the set
-        ANTLR3_UINT32   i;
-        ANTLR3_UINT32   range;
-
-        numBits = edges->numBits(edges);
-        range   = edges->size(edges);   // Number of set bits
-
-        // Stop if we exahust the bit list or have checked the
-        // number of edges that this node refers to (so we don't
-        // check bits at the end that cannot possibly be set).
-        //
-        for (i=0; i<= numBits && range > 0; i++)
-        {
-            if  (edges->isMember(edges, i))
-            {
-                range--;        // About to check another one
-
-                // Found an edge, make sure we visit and descend it
-                //
-                DFS(topo, i);
-            }
-        }
-    }
-
-    // At this point we will have visited all the dependencies
-    // of this node and they will be ordered (even if there are cycles)
-    // So we just add the node into the sorted list at the
-    // current index position.
-    //
-    topo->sorted[topo->limit++] = node;
-
-    // Remove this node from the cycle list if we have not detected a cycle
-    //
-    if  (topo->hasCycle == ANTLR3_FALSE)
-    {
-        topo->cycleMark--;
-    }
-
-    return;
-}
-
-static  pANTLR3_UINT32
-sortToArray      (pANTLR3_TOPO topo)
-{
-    ANTLR3_UINT32 v;
-    ANTLR3_UINT32 oldLimit;
-
-    // Guard against being called with no edges defined
-    //
-    if  (topo->edges == NULL)
-    {
-        return 0;
-    }
-    // First we need a vector to populate with enough
-    // entries to accomodate the sorted list and another to accomodate
-    // the maximum cycle we could detect which is all nodes such as 0->1->2->3->0
-    //
-    topo->sorted    = ANTLR3_MALLOC(topo->limit * sizeof(ANTLR3_UINT32));
-    topo->cycle     = ANTLR3_MALLOC(topo->limit * sizeof(ANTLR3_UINT32));
-
-    // Next we need an empty bitset to show whether we have visited a node
-    // or not. This is the bit that gives us linear time of course as we are essentially
-    // dropping through the nodes in depth first order and when we get to a node that
-    // has no edges, we pop back up the stack adding the nodes we traversed in reverse
-    // order.
-    //
-    topo->visited   = antlr3BitsetNew(0);
-
-    // Now traverse the nodes as if we were just going left to right, but
-    // then descend each node unless it has already been visited.
-    //
-    oldLimit    = topo->limit;     // Number of nodes to traverse linearly
-    topo->limit = 0;               // Next entry in the sorted table
-
-    for (v = 0; v < oldLimit; v++)
-    {
-        // If we did not already visit this node, then descend it until we
-        // get a node without edges or arrive at a node we have already visited.
-        //
-        if  (topo->visited->isMember(topo->visited, v) == ANTLR3_FALSE)
-        {
-            // We have not visited this one so descend it
-            //
-            DFS(topo, v);
-        }
-
-        // Break the loop if we detect a cycle as we have no need to go any
-        // further
-        //
-        if  (topo->hasCycle == ANTLR3_TRUE)
-        {
-            break;
-        }
-    }
-
-    // Reset the limit to the number we recorded as if we hit a
-    // cycle, then limit will have stopped at the node where we
-    // discovered the cycle, but in order to free the edge bitmaps
-    // we need to know how many we may have allocated and traverse them all.
-    //
-    topo->limit = oldLimit;
-
-    // Having traversed all the nodes we were given, we
-    // are guaranteed to have ordered all the nodes or detected a
-    // cycle.
-    //
-    return topo->sorted;
-}
-
-static  void
-sortVector       (pANTLR3_TOPO topo, pANTLR3_VECTOR v)
-{
-    // To sort a vector, we first perform the
-    // sort to an array, then use the results to reorder the vector
-    // we are given. This is just a convenience routine that allows you to
-    // sort the children of a tree node into topological order before or
-    // during an AST walk. This can be useful for optimizations that require
-    // dag reorders and also when the input stream defines thigns that are
-    // interdependent and you want to walk the list of the generated trees
-    // for those things in topological order so you can ignore the interdependencies
-    // at that point.
-    //
-    ANTLR3_UINT32 i;
-
-    // Used as a lookup index to find the current location in the vector of
-    // the vector entry that was originally at position [0], [1], [2] etc
-    //
-    pANTLR3_UINT32  vIndex;
-
-    // Sort into an array, then we can use the array that is
-    // stored in the topo
-    //
-    if  (topo->sortToArray(topo) == 0)
-    {
-        return;     // There were no edges
-    }
-
-    if  (topo->hasCycle == ANTLR3_TRUE)
-    {
-        return;  // Do nothing if we detected a cycle
-    }
-
-    // Ensure that the vector we are sorting is at least as big as the
-    // the input sequence we were adsked to sort. It does not matter if it is
-    // bigger as thaat probably just means that nodes numbered higher than the
-    // limit had no dependencies and so can be left alone.
-    //
-    if  (topo->limit > v->count)
-    {
-        // We can only sort the entries that we have dude! The caller is
-        // responsible for ensuring the vector is the correct one and is the
-        // correct size etc.
-        //
-        topo->limit = v->count;
-    }
-    // We need to know the locations of each of the entries
-    // in the vector as we don't want to duplicate them in a new vector. We
-    // just use an indirection table to get the vector entry for a particular sequence
-    // acording to where we moved it last. Then we can just swap vector entries until
-    // we are done :-)
-    //
-    vIndex = ANTLR3_MALLOC(topo->limit * sizeof(ANTLR3_UINT32));
-
-    // Start index, each vector entry is located where you think it is
-    //
-    for (i = 0; i < topo->limit; i++)
-    {
-        vIndex[i] = i;
-    }
-
-    // Now we traverse the sorted array and moved the entries of
-    // the vector around according to the sort order and the indirection
-    // table we just created. The index telsl us where in the vector the
-    // original element entry n is now located via vIndex[n].
-    //
-    for (i=0; i < topo->limit; i++)
-    {
-        ANTLR3_UINT32   ind;
-
-        // If the vector entry at i is already the one that it
-        // should be, then we skip moving it of course.
-        //
-        if  (vIndex[topo->sorted[i]] == i)
-        {
-            continue;
-        }
-
-        // The vector entry at i, should be replaced with the
-        // vector entry indicated by topo->sorted[i]. The vector entry
-        // at topo->sorted[i] may have already been swapped out though, so we
-        // find where it is now and move it from there to i.
-        //
-        ind     = vIndex[topo->sorted[i]];
-        v->swap(v, i, ind);
-
-        // Update our index. The element at i is now the one we wanted
-        // to be sorted here and the element we swapped out is now the
-        // element that was at i just before we swapped it. If you are lost now
-        // don't worry about it, we are just reindexing on the fly is all.
-        //
-        vIndex[topo->sorted[i]] = i;
-        vIndex[i] = ind;
-    }
-
-    // Having traversed all the entries, we have sorted the vector in place.
-    //
-    ANTLR3_FREE(vIndex);
-    return;
-}
-
-static  void
-freeTopo             (pANTLR3_TOPO topo)
-{
-    ANTLR3_UINT32   i;
-
-    // Free the result vector
-    //
-    if  (topo->sorted != NULL)
-    {
-        ANTLR3_FREE(topo->sorted);
-        topo->sorted = NULL;
-    }
-
-    // Free the visited map
-    //
-    if  (topo->visited != NULL)
-    {
-
-        topo->visited->free(topo->visited);
-        topo->visited = NULL;
-    }
-
-    // Free any edgemaps
-    //
-    if  (topo->edges != NULL)
-    {
-        pANTLR3_BITSET edgeList;
-
-        
-        for (i=0; i<topo->limit; i++)
-        {
-            edgeList = *((topo->edges) + i);
-            if  (edgeList != NULL)
-            {
-                edgeList->free(edgeList);
-            }
-        }
-
-        ANTLR3_FREE(topo->edges);
-    }
-    topo->edges = NULL;
-    
-    // Free any cycle map
-    //
-    if  (topo->cycle != NULL)
-    {
-        ANTLR3_FREE(topo->cycle);
-    }
-
-    ANTLR3_FREE(topo);
-}
diff --git a/antlr-3.4/runtime/C/src/antlr3commontoken.c b/antlr-3.4/runtime/C/src/antlr3commontoken.c
deleted file mode 100644
index 2627431..0000000
--- a/antlr-3.4/runtime/C/src/antlr3commontoken.c
+++ /dev/null
@@ -1,586 +0,0 @@
-/**
- * Contains the default implementation of the common token used within
- * java. Custom tokens should create this structure and then append to it using the 
- * custom pointer to install their own structure and API.
- */
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3.h>
-
-/* Token API
- */
-static  pANTLR3_STRING	getText					(pANTLR3_COMMON_TOKEN token);
-static  void			setText					(pANTLR3_COMMON_TOKEN token, pANTLR3_STRING text);
-static  void			setText8				(pANTLR3_COMMON_TOKEN token, pANTLR3_UINT8 text);
-static	ANTLR3_UINT32   getType					(pANTLR3_COMMON_TOKEN token);
-static  void			setType					(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 type);
-static  ANTLR3_UINT32   getLine					(pANTLR3_COMMON_TOKEN token);
-static  void			setLine					(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 line);
-static  ANTLR3_INT32    getCharPositionInLine	(pANTLR3_COMMON_TOKEN token);
-static  void			setCharPositionInLine	(pANTLR3_COMMON_TOKEN token, ANTLR3_INT32 pos);
-static  ANTLR3_UINT32   getChannel				(pANTLR3_COMMON_TOKEN token);
-static  void			setChannel				(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 channel);
-static  ANTLR3_MARKER   getTokenIndex			(pANTLR3_COMMON_TOKEN token);
-static  void			setTokenIndex			(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER);
-static  ANTLR3_MARKER   getStartIndex			(pANTLR3_COMMON_TOKEN token);
-static  void			setStartIndex			(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER index);
-static  ANTLR3_MARKER   getStopIndex			(pANTLR3_COMMON_TOKEN token);
-static  void			setStopIndex			(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER index);
-static  pANTLR3_STRING  toString				(pANTLR3_COMMON_TOKEN token);
-
-/* Factory API
- */
-static	void			factoryClose	(pANTLR3_TOKEN_FACTORY factory);
-static	pANTLR3_COMMON_TOKEN	newToken	(void);
-static  void			setInputStream	(pANTLR3_TOKEN_FACTORY factory, pANTLR3_INPUT_STREAM input);
-static	void                    factoryReset    (pANTLR3_TOKEN_FACTORY factory);
-
-/* Internal management functions
- */
-static	void			newPool		(pANTLR3_TOKEN_FACTORY factory);
-static	pANTLR3_COMMON_TOKEN    newPoolToken	(pANTLR3_TOKEN_FACTORY factory);
-
-
-ANTLR3_API pANTLR3_COMMON_TOKEN
-antlr3CommonTokenNew(ANTLR3_UINT32 ttype)
-{
-	pANTLR3_COMMON_TOKEN    token;
-
-	// Create a raw token with the interface installed
-	//
-	token   = newToken();
-
-	if	(token != NULL)
-	{
-		token->setType(token, ttype);
-	}
-
-	// All good
-	//
-	return  token;
-}
-
-ANTLR3_API pANTLR3_TOKEN_FACTORY
-antlr3TokenFactoryNew(pANTLR3_INPUT_STREAM input)
-{
-    pANTLR3_TOKEN_FACTORY   factory;
-
-    /* allocate memory
-     */
-    factory	= (pANTLR3_TOKEN_FACTORY) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_TOKEN_FACTORY));
-
-    if	(factory == NULL)
-    {
-	return	NULL;
-    }
-
-    /* Install factory API
-     */
-    factory->newToken	    = newPoolToken;
-    factory->close	    = factoryClose;
-    factory->setInputStream = setInputStream;
-    factory->reset          = factoryReset;
-    
-    /* Allocate the initial pool
-     */
-    factory->thisPool	= -1;
-    factory->pools      = NULL;
-    factory->maxPool    = -1;
-    newPool(factory);
-
-    /* Factory space is good, we now want to initialize our cheating token
-     * which one it is initialized is the model for all tokens we manufacture
-     */
-    antlr3SetTokenAPI(&factory->unTruc);
-
-    /* Set some initial variables for future copying
-     */
-    factory->unTruc.factoryMade	= ANTLR3_TRUE;
-
-    // Input stream
-    //
-    setInputStream(factory, input);
-    
-    return  factory;
-
-}
-
-static void
-setInputStream	(pANTLR3_TOKEN_FACTORY factory, pANTLR3_INPUT_STREAM input)
-{
-    factory->input          =  input;
-    factory->unTruc.input   =  input;
-	if	(input != NULL)
-	{
-		factory->unTruc.strFactory	= input->strFactory;
-	}
-	else
-	{
-		factory->unTruc.strFactory = NULL;
-    }
-}
-
-static void
-newPool(pANTLR3_TOKEN_FACTORY factory)
-{
-    /* Increment factory count
-     */
-    factory->thisPool++;
-
-    // If we were reusing this token factory then we may already have a pool
-    // allocated. If we exceeded the max avaible then we must allocate a new
-    // one.
-    if  (factory->thisPool > factory->maxPool)
-    {
-        /* Ensure we have enough pointers allocated
-         */
-        factory->pools = (pANTLR3_COMMON_TOKEN *)
-		         ANTLR3_REALLOC(	(void *)factory->pools,	    /* Current pools pointer (starts at NULL)	*/
-					    (ANTLR3_UINT32)((factory->thisPool + 1) * sizeof(pANTLR3_COMMON_TOKEN *))	/* Memory for new pool pointers */
-					    );
-
-        /* Allocate a new pool for the factory
-         */
-        factory->pools[factory->thisPool]	=
-			        (pANTLR3_COMMON_TOKEN) 
-				    ANTLR3_CALLOC(1, (size_t)(sizeof(ANTLR3_COMMON_TOKEN) * ANTLR3_FACTORY_POOL_SIZE));
-
-        // We now have a new pool and can track it as the maximum we have created so far
-        //
-        factory->maxPool = factory->thisPool;
-    }
-
-    /* Reset the counters
-     */
-    factory->nextToken	= 0;
-  
-    /* Done
-     */
-    return;
-}
-
-static pANTLR3_COMMON_TOKEN
-newPoolToken(pANTLR3_TOKEN_FACTORY factory)
-{
-    pANTLR3_COMMON_TOKEN token;
-
-    /* See if we need a new token pool before allocating a new
-     * one
-     */
-    if (factory->nextToken >= ANTLR3_FACTORY_POOL_SIZE)
-    {
-        /* We ran out of tokens in the current pool, so we need a new pool
-         */
-        newPool(factory);
-    }
-
-    /* Assuming everything went well (we are trying for performance here so doing minimal
-     * error checking. Then we can work out what the pointer is to the next token.
-     */
-    token = factory->pools[factory->thisPool] + factory->nextToken;
-    factory->nextToken++;
-
-    /* We have our token pointer now, so we can initialize it to the predefined model.
-     * We only need do this though if the token is not already initialized, we just check
-     * an api function pointer for this as they are allocated via calloc.
-     */
-    if  (token->setStartIndex == NULL)
-    {
-        antlr3SetTokenAPI(token);
-
-        // It is factory made, and we need to copy the string factory pointer
-        //
-        token->factoryMade  = ANTLR3_TRUE;
-        token->strFactory   = factory->input == NULL ? NULL : factory->input->strFactory;
-        token->input        = factory->input;
-    }
-
-    /* And we are done
-     */
-    return token;
-}
-
-static	void
-factoryReset	    (pANTLR3_TOKEN_FACTORY factory)
-{
-    // Just start again with pool #0 when we are
-    // called.
-    //
-    factory->thisPool   = -1;
-    newPool(factory);
-}
-
-static	void
-factoryClose	    (pANTLR3_TOKEN_FACTORY factory)
-{
-    pANTLR3_COMMON_TOKEN    pool;
-    ANTLR3_INT32	    poolCount;
-    ANTLR3_UINT32	    limit;
-    ANTLR3_UINT32	    token;
-    pANTLR3_COMMON_TOKEN    check;
-
-    /* We iterate the token pools one at a time
-     */
-    for	(poolCount = 0; poolCount <= factory->thisPool; poolCount++)
-    {
-	/* Pointer to current pool
-	 */
-	pool	= factory->pools[poolCount];
-
-	/* Work out how many tokens we need to check in this pool.
-	 */
-	limit	= (poolCount == factory->thisPool ? factory->nextToken : ANTLR3_FACTORY_POOL_SIZE);
-	
-	/* Marginal condition, we might be at the start of a brand new pool
-	 * where the nextToken is 0 and nothing has been allocated.
-	 */
-	if  (limit > 0)
-	{
-	    /* We have some tokens allocated from this pool
-	     */
-	    for (token = 0; token < limit; token++)
-	    {
-		/* Next one in the chain
-		 */
-		check	= pool + token;
-
-		/* If the programmer made this a custom token, then
-		 * see if we need to call their free routine.
-		 */
-		if  (check->custom != NULL && check->freeCustom != NULL)
-		{
-		    check->freeCustom(check->custom);
-		    check->custom = NULL;
-		}
-	    }
-	}
-
-	/* We can now free this pool allocation
-	 */
-	ANTLR3_FREE(factory->pools[poolCount]);
-	factory->pools[poolCount] = NULL;
-    }
-
-    /* All the pools are deallocated we can free the pointers to the pools
-     * now.
-     */
-    ANTLR3_FREE(factory->pools);
-
-    /* Finally, we can free the space for the factory itself
-     */
-    ANTLR3_FREE(factory);
-}
-
-
-static	pANTLR3_COMMON_TOKEN	
-newToken(void)
-{
-    pANTLR3_COMMON_TOKEN    token;
-
-    /* Allocate memory for this
-     */
-    token   = (pANTLR3_COMMON_TOKEN) ANTLR3_CALLOC(1, (size_t)(sizeof(ANTLR3_COMMON_TOKEN)));
-
-    if	(token == NULL)
-    {
-	return	NULL;
-    }
-
-    // Install the API
-    //
-    antlr3SetTokenAPI(token);
-    token->factoryMade = ANTLR3_FALSE;
-
-    return  token;
-}
-
-ANTLR3_API void
-antlr3SetTokenAPI(pANTLR3_COMMON_TOKEN token)
-{
-    token->getText		    = getText;
-    token->setText		    = setText;
-    token->setText8		    = setText8;
-    token->getType		    = getType;
-    token->setType		    = setType;
-    token->getLine		    = getLine;
-    token->setLine		    = setLine;
-    token->setLine		    = setLine;
-    token->getCharPositionInLine    = getCharPositionInLine;
-    token->setCharPositionInLine    = setCharPositionInLine;
-    token->getChannel		    = getChannel;
-    token->setChannel		    = setChannel;
-    token->getTokenIndex	    = getTokenIndex;
-    token->setTokenIndex	    = setTokenIndex;
-    token->getStartIndex	    = getStartIndex;
-    token->setStartIndex	    = setStartIndex;
-    token->getStopIndex		    = getStopIndex;
-    token->setStopIndex		    = setStopIndex;
-    token->toString		    = toString;
-
-    return;
-}
-
-static  pANTLR3_STRING  getText			(pANTLR3_COMMON_TOKEN token)
-{
-	switch (token->textState)
-	{
-		case ANTLR3_TEXT_STRING:
-
-			// Someone already created a string for this token, so we just
-			// use it.
-			//
-			return	token->tokText.text;
-			break;
-    
-		case ANTLR3_TEXT_CHARP:
-
-			// We had a straight text pointer installed, now we
-			// must convert it to a string. Note we have to do this here
-			// or otherwise setText8() will just install the same char*
-			//
-			if	(token->strFactory != NULL)
-			{
-				token->tokText.text	= token->strFactory->newStr8(token->strFactory, (pANTLR3_UINT8)token->tokText.chars);
-				token->textState	= ANTLR3_TEXT_STRING;
-				return token->tokText.text;
-			}
-			else
-			{
-				// We cannot do anything here
-				//
-				return NULL;
-			}
-			break;
-
-		default:
-
-			// EOF is a special case
-			//
-			if (token->type == ANTLR3_TOKEN_EOF)
-			{
-				token->tokText.text				= token->strFactory->newStr8(token->strFactory, (pANTLR3_UINT8)"<EOF>");
-				token->textState				= ANTLR3_TEXT_STRING;
-				token->tokText.text->factory	= token->strFactory;
-				return token->tokText.text;
-			}
-
-
-			// We had nothing installed in the token, create a new string
-			// from the input stream
-			//
-
-			if	(token->input != NULL)
-			{
-			
-				return	token->input->substr(	token->input, 
-												token->getStartIndex(token), 
- 												token->getStopIndex(token)
-											);
-			}
-
-			// Nothing to return, there is no input stream
-			//
-			return NULL;
-			break;
-	}
-}
-static  void		setText8		(pANTLR3_COMMON_TOKEN token, pANTLR3_UINT8 text)
-{
-	// No text to set, so ignore
-	//
-	if	(text == NULL) return;
-
-	switch	(token->textState)
-	{
-		case	ANTLR3_TEXT_NONE:
-		case	ANTLR3_TEXT_CHARP:	// Caller must free before setting again, if it needs to be freed
-
-			// Nothing in there yet, or just a char *, so just set the
-			// text as a pointer
-			//
-			token->textState		= ANTLR3_TEXT_CHARP;
-			token->tokText.chars	= (pANTLR3_UCHAR)text;
-			break;
-
-		default:
-
-			// It was already a pANTLR3_STRING, so just override it
-			//
-			token->tokText.text->set8(token->tokText.text, (const char *)text);
-			break;
-	}
-
-	// We are done 
-	//
-	return;
-}
-
-/** \brief Install the supplied text string as teh text for the token.
- * The method assumes that the existing text (if any) was created by a factory
- * and so does not attempt to release any memory it is using.Text not created
- * by a string fctory (not advised) should be released prior to this call.
- */
-static  void		setText			(pANTLR3_COMMON_TOKEN token, pANTLR3_STRING text)
-{
-	// Merely replaces and existing pre-defined text with the supplied
-	// string
-	//
-	token->textState	= ANTLR3_TEXT_STRING;
-	token->tokText.text	= text;
-
-	/* We are done 
-	*/
-	return;
-}
-
-static	ANTLR3_UINT32   getType			(pANTLR3_COMMON_TOKEN token)
-{
-    return  token->type;
-}
-
-static  void		setType			(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 type)
-{
-    token->type = type;
-}
-
-static  ANTLR3_UINT32   getLine			(pANTLR3_COMMON_TOKEN token)
-{
-    return  token->line;
-}
-
-static  void		setLine			(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 line)
-{
-    token->line = line;
-}
-
-static  ANTLR3_INT32    getCharPositionInLine	(pANTLR3_COMMON_TOKEN token)
-{
-    return  token->charPosition;
-}
-
-static  void		setCharPositionInLine	(pANTLR3_COMMON_TOKEN token, ANTLR3_INT32 pos)
-{
-    token->charPosition = pos;
-}
-
-static  ANTLR3_UINT32   getChannel		(pANTLR3_COMMON_TOKEN token)
-{
-    return  token->channel;
-}
-
-static  void		setChannel		(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 channel)
-{
-    token->channel  = channel;
-}
-
-static  ANTLR3_MARKER   getTokenIndex		(pANTLR3_COMMON_TOKEN token)
-{
-    return  token->index;
-}
-
-static  void		setTokenIndex		(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER index)
-{
-    token->index    = index;
-}
-
-static  ANTLR3_MARKER   getStartIndex		(pANTLR3_COMMON_TOKEN token)
-{
-	return  token->start == -1 ? (ANTLR3_MARKER)(token->input->data) : token->start;
-}
-
-static  void		setStartIndex		(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER start)
-{
-    token->start    = start;
-}
-
-static  ANTLR3_MARKER   getStopIndex		(pANTLR3_COMMON_TOKEN token)
-{
-    return  token->stop;
-}
-
-static  void		setStopIndex		(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER stop)
-{
-    token->stop	= stop;
-}
-
-static  pANTLR3_STRING    toString		(pANTLR3_COMMON_TOKEN token)
-{
-    pANTLR3_STRING  text;
-    pANTLR3_STRING  outtext;
-
-    text    =	token->getText(token);
-    
-    if	(text == NULL)
-    {
-		return NULL;
-    }
-
-	if	(text->factory == NULL)
-	{
-		return text;		// This usally means it is the EOF token
-	}
-
-    /* A new empty string to assemble all the stuff in
-     */
-    outtext = text->factory->newRaw(text->factory);
-
-    /* Now we use our handy dandy string utility to assemble the
-     * the reporting string
-     * return "[@"+getTokenIndex()+","+start+":"+stop+"='"+txt+"',<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+"]";
-     */
-    outtext->append8(outtext, "[Index: ");
-    outtext->addi   (outtext, (ANTLR3_INT32)token->getTokenIndex(token));
-    outtext->append8(outtext, " (Start: ");
-    outtext->addi   (outtext, (ANTLR3_INT32)token->getStartIndex(token));
-    outtext->append8(outtext, "-Stop: ");
-    outtext->addi   (outtext, (ANTLR3_INT32)token->getStopIndex(token));
-    outtext->append8(outtext, ") ='");
-    outtext->appendS(outtext, text);
-    outtext->append8(outtext, "', type<");
-    outtext->addi   (outtext, token->type);
-    outtext->append8(outtext, "> ");
-
-    if	(token->getChannel(token) > ANTLR3_TOKEN_DEFAULT_CHANNEL)
-    {
-		outtext->append8(outtext, "(channel = ");
-		outtext->addi	(outtext, (ANTLR3_INT32)token->getChannel(token));
-		outtext->append8(outtext, ") ");
-    }
-
-    outtext->append8(outtext, "Line: ");
-    outtext->addi   (outtext, (ANTLR3_INT32)token->getLine(token));
-    outtext->append8(outtext, " LinePos:");
-    outtext->addi   (outtext, token->getCharPositionInLine(token));
-    outtext->addc   (outtext, ']');
-
-    return  outtext;
-}
-
diff --git a/antlr-3.4/runtime/C/src/antlr3commontree.c b/antlr-3.4/runtime/C/src/antlr3commontree.c
deleted file mode 100644
index 65de38f..0000000
--- a/antlr-3.4/runtime/C/src/antlr3commontree.c
+++ /dev/null
@@ -1,547 +0,0 @@
-// \file
-//
-// Implementation of ANTLR3 CommonTree, which you can use as a
-// starting point for your own tree. Though it is often easier just to tag things on
-// to the user pointer in the tree unless you are building a different type
-// of structure.
-//
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3commontree.h>
-
-
-static pANTLR3_COMMON_TOKEN getToken				(pANTLR3_BASE_TREE tree);
-static pANTLR3_BASE_TREE    dupNode					(pANTLR3_BASE_TREE tree);
-static ANTLR3_BOOLEAN	    isNilNode					(pANTLR3_BASE_TREE tree);
-static ANTLR3_UINT32	    getType					(pANTLR3_BASE_TREE tree);
-static pANTLR3_STRING	    getText					(pANTLR3_BASE_TREE tree);
-static ANTLR3_UINT32	    getLine					(pANTLR3_BASE_TREE tree);
-static ANTLR3_UINT32	    getCharPositionInLine	(pANTLR3_BASE_TREE tree);
-static pANTLR3_STRING	    toString				(pANTLR3_BASE_TREE tree);
-static pANTLR3_BASE_TREE	getParent				(pANTLR3_BASE_TREE tree);
-static void					setParent				(pANTLR3_BASE_TREE tree, pANTLR3_BASE_TREE parent);
-static void    				setChildIndex			(pANTLR3_BASE_TREE tree, ANTLR3_INT32 i);
-static ANTLR3_INT32			getChildIndex			(pANTLR3_BASE_TREE tree);
-static void					createChildrenList		(pANTLR3_BASE_TREE tree);
-static void                 reuse                   (pANTLR3_BASE_TREE tree);
-
-// Factory functions for the Arboretum
-//
-static void					newPool				(pANTLR3_ARBORETUM factory);
-static pANTLR3_BASE_TREE    newPoolTree			(pANTLR3_ARBORETUM factory);
-static pANTLR3_BASE_TREE    newFromTree			(pANTLR3_ARBORETUM factory, pANTLR3_COMMON_TREE tree);
-static pANTLR3_BASE_TREE    newFromToken		(pANTLR3_ARBORETUM factory, pANTLR3_COMMON_TOKEN token);
-static void					factoryClose		(pANTLR3_ARBORETUM factory);
-
-ANTLR3_API pANTLR3_ARBORETUM
-antlr3ArboretumNew(pANTLR3_STRING_FACTORY strFactory)
-{
-    pANTLR3_ARBORETUM   factory;
-
-    // Allocate memory
-    //
-    factory	= (pANTLR3_ARBORETUM) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_ARBORETUM));
-    if	(factory == NULL)
-    {
-		return	NULL;
-    }
-
-	// Install a vector factory to create, track and free() any child
-	// node lists.
-	//
-	factory->vFactory					= antlr3VectorFactoryNew(0);
-	if	(factory->vFactory == NULL)
-	{
-		free(factory);
-		return	NULL;
-	}
-
-    // We also keep a reclaim stack, so that any Nil nodes that are
-    // orphaned are not just left in the pool but are reused, other wise
-    // we create 6 times as many nilNodes as ordinary nodes and use loads of
-    // memory. Perhaps at some point, the analysis phase will generate better
-    // code and we won't need to do this here.
-    //
-    factory->nilStack       =  antlr3StackNew(0);
-
-    // Install factory API
-    //
-    factory->newTree	    =  newPoolTree;
-    factory->newFromTree    =  newFromTree;
-    factory->newFromToken   =  newFromToken;
-    factory->close			=  factoryClose;
-
-    // Allocate the initial pool
-    //
-    factory->thisPool	= -1;
-    factory->pools		= NULL;
-    newPool(factory);
-
-    // Factory space is good, we now want to initialize our cheating token
-    // which one it is initialized is the model for all tokens we manufacture
-    //
-    antlr3SetCTAPI(&factory->unTruc);
-
-    // Set some initial variables for future copying, including a string factory
-    // that we can use later for converting trees to strings.
-    //
-	factory->unTruc.factory				= factory;
-    factory->unTruc.baseTree.strFactory	= strFactory;
-
-    return  factory;
-
-}
-
-static void
-newPool(pANTLR3_ARBORETUM factory)
-{
-    // Increment factory count
-    //
-    factory->thisPool++;
-
-    // Ensure we have enough pointers allocated
-    //
-    factory->pools = (pANTLR3_COMMON_TREE *)
-					ANTLR3_REALLOC(	(void *)factory->pools,										// Current pools pointer (starts at NULL)
-					(ANTLR3_UINT32)((factory->thisPool + 1) * sizeof(pANTLR3_COMMON_TREE *))	// Memory for new pool pointers
-					);
-
-    // Allocate a new pool for the factory
-    //
-    factory->pools[factory->thisPool]	=
-			    (pANTLR3_COMMON_TREE) 
-				ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_COMMON_TREE) * ANTLR3_FACTORY_POOL_SIZE));
-
-
-    // Reset the counters
-    //
-    factory->nextTree	= 0;
-  
-    // Done
-    //
-    return;
-}
-
-static	pANTLR3_BASE_TREE    
-newPoolTree	    (pANTLR3_ARBORETUM factory)
-{
-	pANTLR3_COMMON_TREE    tree;
-
-    // If we have anything on the re claim stack, reuse that sucker first
-    //
-    tree = factory->nilStack->peek(factory->nilStack);
-
-    if  (tree != NULL)
-    {
-        // Cool we got something we could reuse, it will have been cleaned up by
-        // whatever put it back on the stack (for instance if it had a child vector,
-        // that will have been cleared to hold zero entries and that vector will get reused too.
-        // It is the basetree pointer that is placed on the stack of course
-        //
-        factory->nilStack->pop(factory->nilStack);
-        return (pANTLR3_BASE_TREE)tree;
-
-    }
-	// See if we need a new tree pool before allocating a new tree
-	//
-	if	(factory->nextTree >= ANTLR3_FACTORY_POOL_SIZE)
-	{
-		// We ran out of tokens in the current pool, so we need a new pool
-		//
-		newPool(factory);
-	}
-
-	// Assuming everything went well - we are trying for performance here so doing minimal
-	// error checking - then we can work out what the pointer is to the next commontree.
-	//
-	tree   = factory->pools[factory->thisPool] + factory->nextTree;
-	factory->nextTree++;
-
-	// We have our token pointer now, so we can initialize it to the predefined model.
-	//
-    antlr3SetCTAPI(tree);
-
-    // Set some initial variables for future copying, including a string factory
-    // that we can use later for converting trees to strings.
-    //
-	tree->factory				= factory;
-    tree->baseTree.strFactory	= factory->unTruc.baseTree.strFactory;
-
-	// The super points to the common tree so we must override the one used by
-	// by the pre-built tree as otherwise we will always poitn to the same initial
-	// common tree and we might spend 3 hours trying to debug why - this would never
-	// happen to me of course! :-(
-	//
-	tree->baseTree.super	= tree;
-
-
-	// And we are done
-	//
-	return  &(tree->baseTree);
-}
-
-
-static pANTLR3_BASE_TREE	    
-newFromTree(pANTLR3_ARBORETUM factory, pANTLR3_COMMON_TREE tree)
-{
-	pANTLR3_BASE_TREE	newTree;
-
-	newTree = factory->newTree(factory);
-
-	if	(newTree == NULL)
-	{
-		return	NULL;
-	}
-
-	// Pick up the payload we had in the supplied tree
-	//
-	((pANTLR3_COMMON_TREE)(newTree->super))->token   = tree->token;
-	newTree->u		    = tree->baseTree.u;							// Copy any user pointer
-
-	return  newTree;
-}
-
-static pANTLR3_BASE_TREE	    
-newFromToken(pANTLR3_ARBORETUM factory, pANTLR3_COMMON_TOKEN token)
-{
-	pANTLR3_BASE_TREE	newTree;
-
-	newTree = factory->newTree(factory);
-
-	if	(newTree == NULL)
-	{
-		return	NULL;
-	}
-
-	// Pick up the payload we had in the supplied tree
-	//
-	((pANTLR3_COMMON_TREE)(newTree->super))->token = token;
-
-	return newTree;
-}
-
-static	void
-factoryClose	    (pANTLR3_ARBORETUM factory)
-{
-	ANTLR3_INT32	    poolCount;
-
-	// First close the vector factory that supplied all the child pointer
-	// vectors.
-	//
-	factory->vFactory->close(factory->vFactory);
-
-    if  (factory->nilStack !=  NULL)
-    {
-        factory->nilStack->free(factory->nilStack);
-    }
-
-	// We now JUST free the pools because the C runtime CommonToken based tree
-	// cannot contain anything that was not made by this factory.
-	//
-	for	(poolCount = 0; poolCount <= factory->thisPool; poolCount++)
-	{
-		// We can now free this pool allocation
-		//
-		ANTLR3_FREE(factory->pools[poolCount]);
-		factory->pools[poolCount] = NULL;
-	}
-
-	// All the pools are deallocated we can free the pointers to the pools
-	// now.
-	//
-	ANTLR3_FREE(factory->pools);
-
-	// Finally, we can free the space for the factory itself
-	//
-	ANTLR3_FREE(factory);
-}
-
-
-ANTLR3_API void 
-antlr3SetCTAPI(pANTLR3_COMMON_TREE tree)
-{
-    // Init base tree
-    //
-    antlr3BaseTreeNew(&(tree->baseTree));
-
-    // We need a pointer to ourselves for 
-    // the payload and few functions that we
-    // provide.
-    //
-    tree->baseTree.super    =  tree;
-
-    // Common tree overrides
-
-    tree->baseTree.isNilNode                = isNilNode;
-    tree->baseTree.toString					= toString;
-    tree->baseTree.dupNode					= (void *(*)(pANTLR3_BASE_TREE))(dupNode);
-    tree->baseTree.getLine					= getLine;
-    tree->baseTree.getCharPositionInLine	= getCharPositionInLine;
-    tree->baseTree.toString					= toString;
-    tree->baseTree.getType					= getType;
-    tree->baseTree.getText					= getText;
-    tree->baseTree.getToken					= getToken;
-	tree->baseTree.getParent				= getParent;
-	tree->baseTree.setParent				= setParent;
-	tree->baseTree.setChildIndex			= setChildIndex;
-	tree->baseTree.getChildIndex			= getChildIndex;
-	tree->baseTree.createChildrenList		= createChildrenList;
-    tree->baseTree.reuse                    = reuse;
-	tree->baseTree.free						= NULL;	    // Factory trees have no free function
-    tree->baseTree.u                        = NULL;     // Initialize user pointer            
-
-	tree->baseTree.children	= NULL;
-
-    tree->token				= NULL;	// No token as yet
-    tree->startIndex		= 0;
-    tree->stopIndex			= 0;
-	tree->parent			= NULL;	// No parent yet
-	tree->childIndex		= -1;
-
-    return;
-}
-
-// --------------------------------------
-// Non factory node constructors.
-//
-
-ANTLR3_API pANTLR3_COMMON_TREE
-antlr3CommonTreeNew()
-{
-	pANTLR3_COMMON_TREE	tree;
-	tree    = ANTLR3_CALLOC(1, sizeof(ANTLR3_COMMON_TREE));
-
-	if	(tree == NULL)
-	{
-		return NULL;
-	}
-
-	antlr3SetCTAPI(tree);
-
-	return tree;
-}
-
-ANTLR3_API pANTLR3_COMMON_TREE	    
-antlr3CommonTreeNewFromToken(pANTLR3_COMMON_TOKEN token)
-{
-	pANTLR3_COMMON_TREE	newTree;
-
-	newTree = antlr3CommonTreeNew();
-
-	if	(newTree == NULL)
-	{
-		return	NULL;
-	}
-
-	//Pick up the payload we had in the supplied tree
-	//
-	newTree->token = token;
-	return newTree;
-}
-
-/// Create a new vector for holding child nodes using the inbuilt
-/// vector factory.
-///
-static void
-createChildrenList  (pANTLR3_BASE_TREE tree)
-{
-	tree->children = ((pANTLR3_COMMON_TREE)(tree->super))->factory->vFactory->newVector(((pANTLR3_COMMON_TREE)(tree->super))->factory->vFactory);
-}
-
-
-static pANTLR3_COMMON_TOKEN 
-getToken			(pANTLR3_BASE_TREE tree)
-{
-    // The token is the payload of the common tree or other implementor
-    // so it is stored within ourselves, which is the super pointer.Note 
-	// that whatever the actual token is, it is passed around by its pointer
-	// to the common token implementation, which it may of course surround
-	// with its own super structure.
-    //
-    return  ((pANTLR3_COMMON_TREE)(tree->super))->token;
-}
-
-static pANTLR3_BASE_TREE    
-dupNode			(pANTLR3_BASE_TREE tree)
-{
-    // The node we are duplicating is in fact the common tree (that's why we are here)
-    // so we use the super pointer to duplicate.
-    //
-    pANTLR3_COMMON_TREE	    theOld;
-    
-	theOld	= (pANTLR3_COMMON_TREE)(tree->super);
-
-	// The pointer we return is the base implementation of course
-    //
-	return  theOld->factory->newFromTree(theOld->factory, theOld);
-}
-
-static ANTLR3_BOOLEAN	    
-isNilNode			(pANTLR3_BASE_TREE tree)
-{
-	// This is a Nil tree if it has no payload (Token in our case)
-	//
-	if	(((pANTLR3_COMMON_TREE)(tree->super))->token == NULL)
-	{
-		return ANTLR3_TRUE;
-	}
-	else
-	{
-		return ANTLR3_FALSE;
-	}
-}
-
-static ANTLR3_UINT32	    
-getType			(pANTLR3_BASE_TREE tree)
-{
-	pANTLR3_COMMON_TREE    theTree;
-
-	theTree = (pANTLR3_COMMON_TREE)(tree->super);
-
-	if	(theTree->token == NULL)
-	{
-		return	0;
-	}
-	else
-	{
-		return	theTree->token->getType(theTree->token);
-	}
-}
-
-static pANTLR3_STRING	    
-getText			(pANTLR3_BASE_TREE tree)
-{
-	return	tree->toString(tree);
-}
-
-static ANTLR3_UINT32	    getLine			(pANTLR3_BASE_TREE tree)
-{
-	pANTLR3_COMMON_TREE	    cTree;
-	pANTLR3_COMMON_TOKEN    token;
-
-	cTree   = (pANTLR3_COMMON_TREE)(tree->super);
-
-	token   = cTree->token;
-
-	if	(token == NULL || token->getLine(token) == 0)
-	{
-		if  (tree->getChildCount(tree) > 0)
-		{
-			pANTLR3_BASE_TREE	child;
-
-			child   = (pANTLR3_BASE_TREE)tree->getChild(tree, 0);
-			return child->getLine(child);
-		}
-		return 0;
-	}
-	return  token->getLine(token);
-}
-
-static ANTLR3_UINT32	    getCharPositionInLine	(pANTLR3_BASE_TREE tree)
-{
-	pANTLR3_COMMON_TOKEN    token;
-
-	token   = ((pANTLR3_COMMON_TREE)(tree->super))->token;
-
-	if	(token == NULL || token->getCharPositionInLine(token) == -1)
-	{
-		if  (tree->getChildCount(tree) > 0)
-		{
-			pANTLR3_BASE_TREE	child;
-
-			child   = (pANTLR3_BASE_TREE)tree->getChild(tree, 0);
-
-			return child->getCharPositionInLine(child);
-		}
-		return 0;
-	}
-	return  token->getCharPositionInLine(token);
-}
-
-static pANTLR3_STRING	    toString			(pANTLR3_BASE_TREE tree)
-{
-	if  (tree->isNilNode(tree) == ANTLR3_TRUE)
-	{
-		pANTLR3_STRING  nilNode;
-
-		nilNode	= tree->strFactory->newPtr(tree->strFactory, (pANTLR3_UINT8)"nil", 3);
-
-		return nilNode;
-	}
-
-	return	((pANTLR3_COMMON_TREE)(tree->super))->token->getText(((pANTLR3_COMMON_TREE)(tree->super))->token);
-}
-
-static pANTLR3_BASE_TREE	
-getParent				(pANTLR3_BASE_TREE tree)
-{
-	return & (((pANTLR3_COMMON_TREE)(tree->super))->parent->baseTree);
-}
-
-static void					
-setParent				(pANTLR3_BASE_TREE tree, pANTLR3_BASE_TREE parent)
-{
-	((pANTLR3_COMMON_TREE)(tree->super))->parent = parent == NULL ? NULL : ((pANTLR3_COMMON_TREE)(parent->super))->parent;
-}
-
-static void    				
-setChildIndex			(pANTLR3_BASE_TREE tree, ANTLR3_INT32 i)
-{
-	((pANTLR3_COMMON_TREE)(tree->super))->childIndex = i;
-}
-static	ANTLR3_INT32			
-getChildIndex			(pANTLR3_BASE_TREE tree )
-{
-	return ((pANTLR3_COMMON_TREE)(tree->super))->childIndex;
-}
-
-/** Clean up any child vector that the tree might have, so it can be reused,
- *  then add it into the reuse stack.
- */
-static void
-reuse                   (pANTLR3_BASE_TREE tree)
-{
-    pANTLR3_COMMON_TREE	    cTree;
-
-	cTree   = (pANTLR3_COMMON_TREE)(tree->super);
-
-    if  (cTree->factory != NULL)
-    {
-
-        if  (cTree->baseTree.children != NULL)
-        {
-            
-            cTree->baseTree.children->clear(cTree->baseTree.children);
-        }
-       cTree->factory->nilStack->push(cTree->factory->nilStack, tree, NULL);
-       
-    }
-}
diff --git a/antlr-3.4/runtime/C/src/antlr3commontreeadaptor.c b/antlr-3.4/runtime/C/src/antlr3commontreeadaptor.c
deleted file mode 100644
index abce6f0..0000000
--- a/antlr-3.4/runtime/C/src/antlr3commontreeadaptor.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/** \file
- * This is the standard tree adaptor used by the C runtime unless the grammar
- * source file says to use anything different. It embeds a BASE_TREE to which
- * it adds its own implementation of anything that the base tree is not 
- * good for, plus a number of methods that any other adaptor type
- * needs to implement too.
- * \ingroup pANTLR3_COMMON_TREE_ADAPTOR
- */
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3commontreeadaptor.h>
-
-#ifdef	ANTLR3_WINDOWS
-#pragma warning( disable : 4100 )
-#endif
-
-/* BASE_TREE_ADAPTOR overrides... */
-static	pANTLR3_BASE_TREE		dupNode					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE treeNode);
-static	pANTLR3_BASE_TREE		create					(pANTLR3_BASE_TREE_ADAPTOR adpator, pANTLR3_COMMON_TOKEN payload);
-static	pANTLR3_BASE_TREE		dbgCreate				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_COMMON_TOKEN payload);
-static	pANTLR3_COMMON_TOKEN	createToken				(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text);
-static	pANTLR3_COMMON_TOKEN	createTokenFromToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_COMMON_TOKEN fromToken);
-static	pANTLR3_COMMON_TOKEN    getToken				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	pANTLR3_STRING			getText					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	ANTLR3_UINT32			getType					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	pANTLR3_BASE_TREE		getChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i);
-static	ANTLR3_UINT32			getChildCount			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	void					replaceChildren			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE t);
-static	void					setDebugEventListener	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_DEBUG_EVENT_LISTENER debugger);
-static  void					setChildIndex			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_INT32 i);
-static  ANTLR3_INT32			getChildIndex			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static	void					setParent				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE child, pANTLR3_BASE_TREE parent);
-static	pANTLR3_BASE_TREE    	getParent				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE child);
-static  void					setChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i, pANTLR3_BASE_TREE child);
-static	void					deleteChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i);
-static	pANTLR3_BASE_TREE		errorNode				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_TOKEN_STREAM ctnstream, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken, pANTLR3_EXCEPTION e);
-/* Methods specific to each tree adaptor
- */
-static	void			setTokenBoundaries		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken);
-static	void			dbgSetTokenBoundaries	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken);
-static	ANTLR3_MARKER   getTokenStartIndex		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-static  ANTLR3_MARKER   getTokenStopIndex		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
-
-static	void		ctaFree			(pANTLR3_BASE_TREE_ADAPTOR adaptor);
-
-/** Create a new tree adaptor. Note that despite the fact that this is
- *  creating a new COMMON_TREE adaptor, we return the address of the
- *  BASE_TREE interface, as should any other adaptor that wishes to be 
- *  used as the tree element of a tree parse/build. It needs to be given the
- *  address of a valid string factory as we do not know what the originating
- *  input stream encoding type was. This way we can rely on just using
- *  the original input stream's string factory or one of the correct type
- *  which the user supplies us.
- */
-ANTLR3_API pANTLR3_BASE_TREE_ADAPTOR
-ANTLR3_TREE_ADAPTORNew(pANTLR3_STRING_FACTORY strFactory)
-{
-	pANTLR3_COMMON_TREE_ADAPTOR	cta;
-
-	// First job is to create the memory we need for the tree adaptor interface.
-	//
-	cta	= (pANTLR3_COMMON_TREE_ADAPTOR) ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_COMMON_TREE_ADAPTOR)));
-
-	if	(cta == NULL)
-	{
-		return	NULL;
-	}
-
-	// Memory is initialized, so initialize the base tree adaptor
-	//
-	antlr3BaseTreeAdaptorInit(&(cta->baseAdaptor), NULL);
-
-	// Install our interface overrides. Strangeness is to allow generated code to treat them
-    // as returning void *
-	//
-	cta->baseAdaptor.dupNode				=  (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
-													dupNode;
-	cta->baseAdaptor.create					=  (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_COMMON_TOKEN))
-													create;
-	cta->baseAdaptor.createToken			=  
-													createToken;
-	cta->baseAdaptor.createTokenFromToken   =  
-													createTokenFromToken;
-	cta->baseAdaptor.setTokenBoundaries	    =  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, pANTLR3_COMMON_TOKEN, pANTLR3_COMMON_TOKEN))
-													setTokenBoundaries;
-	cta->baseAdaptor.getTokenStartIndex	    =  (ANTLR3_MARKER  (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                    getTokenStartIndex;
-	cta->baseAdaptor.getTokenStopIndex	    =  (ANTLR3_MARKER  (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                    getTokenStopIndex;
-	cta->baseAdaptor.getText				=  (pANTLR3_STRING (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                    getText;
-	cta->baseAdaptor.getType				=  (ANTLR3_UINT32  (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                    getType;
-	cta->baseAdaptor.getChild				=  (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32))
-                                                    getChild;
-	cta->baseAdaptor.setChild				=  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32, void *))
-                                                    setChild;
-	cta->baseAdaptor.setParent				=  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
-                                                    setParent;
-    cta->baseAdaptor.getParent				=  (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                    getParent;
-	cta->baseAdaptor.setChildIndex			=  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32))
-                                                    setChildIndex;
-	cta->baseAdaptor.deleteChild			=  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32))
-                                                    deleteChild;
-	cta->baseAdaptor.getChildCount			=  (ANTLR3_UINT32  (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                    getChildCount;
-	cta->baseAdaptor.getChildIndex			=  (ANTLR3_INT32  (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
-                                                    getChildIndex;
-	cta->baseAdaptor.free					=  (void  (*) (pANTLR3_BASE_TREE_ADAPTOR))
-                                                    ctaFree;
-	cta->baseAdaptor.setDebugEventListener	=  
-													setDebugEventListener;
-	cta->baseAdaptor.replaceChildren		=  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_INT32, ANTLR3_INT32, void *))
-                                                    replaceChildren;
-	cta->baseAdaptor.errorNode				=  (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_TOKEN_STREAM, pANTLR3_COMMON_TOKEN, pANTLR3_COMMON_TOKEN, pANTLR3_EXCEPTION))
-                                                    errorNode;
-
-	// Install the super class pointer
-	//
-	cta->baseAdaptor.super	    = cta;
-
-	// Install a tree factory for creating new tree nodes
-	//
-	cta->arboretum  = antlr3ArboretumNew(strFactory);
-
-	// Install a token factory for imaginary tokens, these imaginary
-	// tokens do not require access to the input stream so we can
-	// dummy the creation of it, but they will need a string factory.
-	//
-	cta->baseAdaptor.tokenFactory						= antlr3TokenFactoryNew(NULL);
-	cta->baseAdaptor.tokenFactory->unTruc.strFactory	= strFactory;
-
-	// Allow the base tree adaptor to share the tree factory's string factory.
-	//
-	cta->baseAdaptor.strFactory	= strFactory;
-
-	// Return the address of the base adaptor interface.
-	//
-	return  &(cta->baseAdaptor);
-}
-
-/// Debugging version of the tree adaptor (not normally called as generated code
-/// calls setDebugEventListener instead which changes a normal token stream to
-/// a debugging stream and means that a user's instantiation code does not need
-/// to be changed just to debug with AW.
-///
-ANTLR3_API pANTLR3_BASE_TREE_ADAPTOR
-ANTLR3_TREE_ADAPTORDebugNew(pANTLR3_STRING_FACTORY strFactory, pANTLR3_DEBUG_EVENT_LISTENER	debugger)
-{
-	pANTLR3_BASE_TREE_ADAPTOR	ta;
-
-	// Create a normal one first
-	//
-	ta	= ANTLR3_TREE_ADAPTORNew(strFactory);
-	
-	if	(ta != NULL)
-	{
-		// Reinitialize as a debug version
-		//
-		antlr3BaseTreeAdaptorInit(ta, debugger);
-		ta->create				= (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_COMMON_TOKEN))
-									dbgCreate;
-		ta->setTokenBoundaries	= (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, pANTLR3_COMMON_TOKEN, pANTLR3_COMMON_TOKEN))
-									dbgSetTokenBoundaries;
-	}
-
-	return	ta;
-}
-
-/// Causes an existing common tree adaptor to become a debug version
-///
-static	void
-setDebugEventListener	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_DEBUG_EVENT_LISTENER debugger)
-{
-	// Reinitialize as a debug version
-	//
-	antlr3BaseTreeAdaptorInit(adaptor, debugger);
-
-	adaptor->create				= (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_COMMON_TOKEN))
-                                    dbgCreate;
-	adaptor->setTokenBoundaries	= (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, pANTLR3_COMMON_TOKEN, pANTLR3_COMMON_TOKEN))
-                                    dbgSetTokenBoundaries;
-
-}
-
-static void
-ctaFree(pANTLR3_BASE_TREE_ADAPTOR adaptor)
-{
-    pANTLR3_COMMON_TREE_ADAPTOR cta;
-
-    cta	= (pANTLR3_COMMON_TREE_ADAPTOR)(adaptor->super);
-
-    /* Free the tree factory we created
-     */
-    cta->arboretum->close(((pANTLR3_COMMON_TREE_ADAPTOR)(adaptor->super))->arboretum);
-
-    /* Free the token factory we created
-     */
-    adaptor->tokenFactory->close(adaptor->tokenFactory);
-
-    /* Free the super pointer, as it is this that was allocated
-     * and is the common tree structure.
-     */
-    ANTLR3_FREE(adaptor->super);
-}
-
-/* BASE_TREE_ADAPTOR overrides */
-
-static	pANTLR3_BASE_TREE
-errorNode				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_TOKEN_STREAM ctnstream, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken, pANTLR3_EXCEPTION e)
-{
-	// Use the supplied common tree node stream to get another tree from the factory
-	// TODO: Look at creating the erronode as in Java, but this is complicated by the
-	// need to track and free the memory allocated to it, so for now, we just
-	// want something in the tree that isn't a NULL pointer.
-	//
-	return adaptor->createTypeText(adaptor, ANTLR3_TOKEN_INVALID, (pANTLR3_UINT8)"Tree Error Node");
-
-}
-
-/** Duplicate the supplied node.
- */
-static	pANTLR3_BASE_TREE
-dupNode		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE treeNode)
-{
-	return  treeNode == NULL ? NULL : treeNode->dupNode(treeNode);
-}
-
-static	pANTLR3_BASE_TREE
-create		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_COMMON_TOKEN payload)
-{
-    pANTLR3_BASE_TREE	ct;
-    
-    /* Create a new common tree as this is what this adaptor deals with
-     */
-    ct = ((pANTLR3_COMMON_TREE_ADAPTOR)(adaptor->super))->arboretum->newFromToken(((pANTLR3_COMMON_TREE_ADAPTOR)(adaptor->super))->arboretum, payload);
-
-    /* But all adaptors return the pointer to the base interface.
-     */
-    return  ct;
-}
-static	pANTLR3_BASE_TREE
-dbgCreate		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_COMMON_TOKEN payload)
-{
-	pANTLR3_BASE_TREE	ct;
-
-	ct = create(adaptor, payload);
-	adaptor->debugger->createNode(adaptor->debugger, ct);
-
-	return ct;
-}
-
-/** Tell me how to create a token for use with imaginary token nodes.
- *  For example, there is probably no input symbol associated with imaginary
- *  token DECL, but you need to create it as a payload or whatever for
- *  the DECL node as in ^(DECL type ID).
- *
- *  If you care what the token payload objects' type is, you should
- *  override this method and any other createToken variant.
- */
-static	pANTLR3_COMMON_TOKEN
-createToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text)
-{
-    pANTLR3_COMMON_TOKEN    newToken;
-
-    newToken	= adaptor->tokenFactory->newToken(adaptor->tokenFactory);
-
-    if	(newToken != NULL)
-    {	
-		newToken->textState		= ANTLR3_TEXT_CHARP;
-		newToken->tokText.chars = (pANTLR3_UCHAR)text;
-		newToken->setType(newToken, tokenType);
-		newToken->input				= adaptor->tokenFactory->input;
-        newToken->strFactory        = adaptor->strFactory;
-    }
-    return  newToken;
-}
-
-/** Tell me how to create a token for use with imaginary token nodes.
- *  For example, there is probably no input symbol associated with imaginary
- *  token DECL, but you need to create it as a payload or whatever for
- *  the DECL node as in ^(DECL type ID).
- *
- *  This is a variant of createToken where the new token is derived from
- *  an actual real input token.  Typically this is for converting '{'
- *  tokens to BLOCK etc...  You'll see
- *
- *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
- *
- *  If you care what the token payload objects' type is, you should
- *  override this method and any other createToken variant.
- *
- * NB: this being C it is not so easy to extend the types of creaeteToken.
- *     We will have to see if anyone needs to do this and add any variants to
- *     this interface.
- */
-static	pANTLR3_COMMON_TOKEN
-createTokenFromToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_COMMON_TOKEN fromToken)
-{
-    pANTLR3_COMMON_TOKEN    newToken;
-
-    newToken	= adaptor->tokenFactory->newToken(adaptor->tokenFactory);
-    
-    if	(newToken != NULL)
-    {
-		// Create the text using our own string factory to avoid complicating
-		// commontoken.
-		//
-		pANTLR3_STRING	text;
-
-		newToken->toString  = fromToken->toString;
-
-		if	(fromToken->textState == ANTLR3_TEXT_CHARP)
-		{
-			newToken->textState		= ANTLR3_TEXT_CHARP;
-			newToken->tokText.chars	= fromToken->tokText.chars;
-		}
-		else
-		{
-			text						= fromToken->getText(fromToken);
-			newToken->textState			= ANTLR3_TEXT_STRING;
-			newToken->tokText.text	    = adaptor->strFactory->newPtr(adaptor->strFactory, text->chars, text->len);
-		}
-
-		newToken->setLine				(newToken, fromToken->getLine(fromToken));
-		newToken->setTokenIndex			(newToken, fromToken->getTokenIndex(fromToken));
-		newToken->setCharPositionInLine	(newToken, fromToken->getCharPositionInLine(fromToken));
-		newToken->setChannel			(newToken, fromToken->getChannel(fromToken));
-		newToken->setType				(newToken, fromToken->getType(fromToken));
-    }
-
-    return  newToken;
-}
-
-/* Specific methods for a TreeAdaptor */
-
-/** Track start/stop token for subtree root created for a rule.
- *  Only works with CommonTree nodes.  For rules that match nothing,
- *  seems like this will yield start=i and stop=i-1 in a nil node.
- *  Might be useful info so I'll not force to be i..i.
- */
-static	void
-setTokenBoundaries	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken)
-{
-	ANTLR3_MARKER   start;
-	ANTLR3_MARKER   stop;
-
-	pANTLR3_COMMON_TREE	    ct;
-
-	if	(t == NULL)
-	{
-		return;
-	}
-
-	if	( startToken != NULL)
-	{
-		start = startToken->getTokenIndex(startToken);
-	}
-	else
-	{
-		start = 0;
-	}
-
-	if	( stopToken != NULL)
-	{
-		stop = stopToken->getTokenIndex(stopToken);
-	}
-	else
-	{
-		stop = 0;
-	}
-
-	ct	= (pANTLR3_COMMON_TREE)(t->super);
-
-	ct->startIndex  = start;
-	ct->stopIndex   = stop;
-
-}
-static	void
-dbgSetTokenBoundaries	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken)
-{
-	setTokenBoundaries(adaptor, t, startToken, stopToken);
-
-	if	(t != NULL && startToken != NULL && stopToken != NULL)
-	{
-		adaptor->debugger->setTokenBoundaries(adaptor->debugger, t, startToken->getTokenIndex(startToken), stopToken->getTokenIndex(stopToken));
-	}
-}
-
-static	ANTLR3_MARKER   
-getTokenStartIndex	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
-{
-    return  ((pANTLR3_COMMON_TREE)(t->super))->startIndex;
-}
-
-static	ANTLR3_MARKER   
-getTokenStopIndex	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
-{
-    return  ((pANTLR3_COMMON_TREE)(t->super))->stopIndex;
-}
-
-static	pANTLR3_STRING
-getText		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
-{
-    return  t->getText(t);
-}
-
-static	ANTLR3_UINT32
-getType		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
-{
-    return  t->getType(t);
-}
-
-static	void					
-replaceChildren
-(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE t)
-{
-	if	(parent != NULL)
-	{
-		parent->replaceChildren(parent, startChildIndex, stopChildIndex, t);
-	}
-}
-
-static	pANTLR3_BASE_TREE
-getChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i)
-{
-	return t->getChild(t, i);
-}
-static  void
-setChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i, pANTLR3_BASE_TREE child)
-{
-	t->setChild(t, i, child);
-}
-
-static	void
-deleteChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i)
-{
-	t->deleteChild(t, i);
-}
-
-static	ANTLR3_UINT32
-getChildCount			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
-{
-	return t->getChildCount(t);
-}
-
-static  void
-setChildIndex			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_INT32 i)
-{
-	t->setChildIndex(t, i);
-}
-
-static  ANTLR3_INT32
-getChildIndex			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
-{
-	return t->getChildIndex(t);
-}
-static	void
-setParent				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE child, pANTLR3_BASE_TREE parent)
-{
-	child->setParent(child, parent);
-}
-static	pANTLR3_BASE_TREE
-getParent				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE child)
-{
-	return child->getParent(child);
-}
diff --git a/antlr-3.4/runtime/C/src/antlr3commontreenodestream.c b/antlr-3.4/runtime/C/src/antlr3commontreenodestream.c
deleted file mode 100644
index a759d34..0000000
--- a/antlr-3.4/runtime/C/src/antlr3commontreenodestream.c
+++ /dev/null
@@ -1,968 +0,0 @@
-/// \file
-/// Defines the implementation of the common node stream the default
-/// tree node stream used by ANTLR.
-///
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3commontreenodestream.h>
-
-#ifdef	ANTLR3_WINDOWS
-#pragma warning( disable : 4100 )
-#endif
-
-// COMMON TREE STREAM API
-//
-static	void						addNavigationNode			(pANTLR3_COMMON_TREE_NODE_STREAM ctns, ANTLR3_UINT32 ttype);
-static	ANTLR3_BOOLEAN				hasUniqueNavigationNodes	(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
-static	pANTLR3_BASE_TREE			newDownNode					(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
-static	pANTLR3_BASE_TREE			newUpNode					(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
-static	void						reset						(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
-static	void						push						(pANTLR3_COMMON_TREE_NODE_STREAM ctns, ANTLR3_INT32 index);
-static	ANTLR3_INT32				pop							(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
-//static	ANTLR3_INT32				index						(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
-static	ANTLR3_UINT32				getLookaheadSize			(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
-// TREE NODE STREAM API
-//
-static	pANTLR3_BASE_TREE_ADAPTOR   getTreeAdaptor				(pANTLR3_TREE_NODE_STREAM tns);
-static	pANTLR3_BASE_TREE			getTreeSource				(pANTLR3_TREE_NODE_STREAM tns);
-static	pANTLR3_BASE_TREE			_LT							(pANTLR3_TREE_NODE_STREAM tns, ANTLR3_INT32 k);
-static	pANTLR3_BASE_TREE			get							(pANTLR3_TREE_NODE_STREAM tns, ANTLR3_INT32 k);
-static	void						setUniqueNavigationNodes	(pANTLR3_TREE_NODE_STREAM tns, ANTLR3_BOOLEAN uniqueNavigationNodes);
-static	pANTLR3_STRING				toString					(pANTLR3_TREE_NODE_STREAM tns);
-static	pANTLR3_STRING				toStringSS					(pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE start, pANTLR3_BASE_TREE stop);
-static	void						toStringWork				(pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE start, pANTLR3_BASE_TREE stop, pANTLR3_STRING buf);
-static	void						replaceChildren				(pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE t);
-
-// INT STREAM API
-//
-static	void						consume						(pANTLR3_INT_STREAM is);
-static	ANTLR3_MARKER				tindex						(pANTLR3_INT_STREAM is);
-static	ANTLR3_UINT32				_LA							(pANTLR3_INT_STREAM is, ANTLR3_INT32 i);
-static	ANTLR3_MARKER				mark						(pANTLR3_INT_STREAM is);
-static	void						release						(pANTLR3_INT_STREAM is, ANTLR3_MARKER marker);
-static	void						rewindMark					(pANTLR3_INT_STREAM is, ANTLR3_MARKER marker);
-static	void						rewindLast					(pANTLR3_INT_STREAM is);
-static	void						seek						(pANTLR3_INT_STREAM is, ANTLR3_MARKER index);
-static	ANTLR3_UINT32				size						(pANTLR3_INT_STREAM is);
-
-
-// Helper functions
-//
-static	void						fillBuffer					(pANTLR3_COMMON_TREE_NODE_STREAM ctns, pANTLR3_BASE_TREE t);
-static	void						fillBufferRoot				(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
-
-// Constructors
-//
-static	void						antlr3TreeNodeStreamFree			(pANTLR3_TREE_NODE_STREAM tns);
-static	void						antlr3CommonTreeNodeStreamFree		(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
-
-ANTLR3_API pANTLR3_TREE_NODE_STREAM
-antlr3TreeNodeStreamNew()
-{
-    pANTLR3_TREE_NODE_STREAM stream;
-
-    // Memory for the interface structure
-    //
-    stream  = (pANTLR3_TREE_NODE_STREAM) ANTLR3_CALLOC(1, sizeof(ANTLR3_TREE_NODE_STREAM));
-
-    if	(stream == NULL)
-    {
-		return	NULL;
-    }
-
-    // Install basic API 
-    //
-	stream->replaceChildren = replaceChildren;
-    stream->free			= antlr3TreeNodeStreamFree;
-    
-    return stream;
-}
-
-static void
-antlr3TreeNodeStreamFree(pANTLR3_TREE_NODE_STREAM stream)
-{   
-    ANTLR3_FREE(stream);
-}
-
-ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM
-antlr3CommonTreeNodeStreamNewTree(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 hint)
-{
-	pANTLR3_COMMON_TREE_NODE_STREAM stream;
-
-	stream = antlr3CommonTreeNodeStreamNew(tree->strFactory, hint);
-
-	if	(stream == NULL)
-	{
-		return	NULL;
-	}
-	stream->root    = tree;
-
-	return stream;
-}
-
-ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM
-antlr3CommonTreeNodeStreamNewStream(pANTLR3_COMMON_TREE_NODE_STREAM inStream)
-{
-	pANTLR3_COMMON_TREE_NODE_STREAM stream;
-
-	// Memory for the interface structure
-	//
-	stream  = (pANTLR3_COMMON_TREE_NODE_STREAM) ANTLR3_CALLOC(1, sizeof(ANTLR3_COMMON_TREE_NODE_STREAM));
-
-	if	(stream == NULL)
-	{
-		return	NULL;
-	}
-
-	// Copy in all the reusable parts of the originating stream and create new
-	// pieces where necessary.
-	//
-
-	// String factory for tree walker
-	//
-	stream->stringFactory		= inStream->stringFactory;
-
-	// Create an adaptor for the common tree node stream
-	//
-	stream->adaptor				= inStream->adaptor;
-
-	// Create space for the tree node stream interface
-	//
-	stream->tnstream	    = antlr3TreeNodeStreamNew();
-
-	if	(stream->tnstream == NULL)
-	{
-		stream->free				(stream);
-
-		return	NULL;
-	}
-
-	// Create space for the INT_STREAM interface
-	//
-	stream->tnstream->istream		    =  antlr3IntStreamNew();
-
-	if	(stream->tnstream->istream == NULL)
-	{
-		stream->tnstream->free		(stream->tnstream);
-		stream->free				(stream);
-
-		return	NULL;
-	}
-
-	// Install the common tree node stream API
-	//
-	stream->addNavigationNode		    =  addNavigationNode;
-	stream->hasUniqueNavigationNodes    =  hasUniqueNavigationNodes;
-	stream->newDownNode					=  newDownNode;
-	stream->newUpNode					=  newUpNode;
-	stream->reset						=  reset;
-	stream->push						=  push;
-	stream->pop							=  pop;
-	stream->getLookaheadSize			=  getLookaheadSize;
-
-	stream->free			    =  antlr3CommonTreeNodeStreamFree;
-
-	// Install the tree node stream API
-	//
-	stream->tnstream->getTreeAdaptor			=  getTreeAdaptor;
-	stream->tnstream->getTreeSource				=  getTreeSource;
-	stream->tnstream->_LT						=  _LT;
-	stream->tnstream->setUniqueNavigationNodes	=  setUniqueNavigationNodes;
-	stream->tnstream->toString					=  toString;
-	stream->tnstream->toStringSS				=  toStringSS;
-	stream->tnstream->toStringWork				=  toStringWork;
-	stream->tnstream->get						=  get;
-
-	// Install INT_STREAM interface
-	//
-	stream->tnstream->istream->consume	    =  consume;
-	stream->tnstream->istream->index	    =  tindex;
-	stream->tnstream->istream->_LA			=  _LA;
-	stream->tnstream->istream->mark			=  mark;
-	stream->tnstream->istream->release	    =  release;
-	stream->tnstream->istream->rewind	    =  rewindMark;
-	stream->tnstream->istream->rewindLast   =  rewindLast;
-	stream->tnstream->istream->seek			=  seek;
-	stream->tnstream->istream->size			=  size;
-
-	// Initialize data elements of INT stream
-	//
-	stream->tnstream->istream->type			= ANTLR3_COMMONTREENODE;
-	stream->tnstream->istream->super	    =  (stream->tnstream);
-
-	// Initialize data elements of TREE stream
-	//
-	stream->tnstream->ctns =  stream;
-
-	// Initialize data elements of the COMMON TREE NODE stream
-	//
-	stream->super					= NULL;
-	stream->uniqueNavigationNodes	= ANTLR3_FALSE;
-	stream->markers					= NULL;
-	stream->nodeStack				= inStream->nodeStack;
-
-	// Create the node list map
-	//
-	stream->nodes	= antlr3VectorNew(DEFAULT_INITIAL_BUFFER_SIZE);
-	stream->p		= -1;
-
-	// Install the navigation nodes     
-	//
-	
-	// Install the navigation nodes     
-	//
-	antlr3SetCTAPI(&(stream->UP));
-	antlr3SetCTAPI(&(stream->DOWN));
-	antlr3SetCTAPI(&(stream->EOF_NODE));
-	antlr3SetCTAPI(&(stream->INVALID_NODE));
-
-	stream->UP.token						= inStream->UP.token;
-	inStream->UP.token->strFactory			= stream->stringFactory;
-	stream->DOWN.token						= inStream->DOWN.token;
-	inStream->DOWN.token->strFactory		= stream->stringFactory;
-	stream->EOF_NODE.token					= inStream->EOF_NODE.token;
-	inStream->EOF_NODE.token->strFactory	= stream->stringFactory;
-	stream->INVALID_NODE.token				= inStream->INVALID_NODE.token;
-	inStream->INVALID_NODE.token->strFactory= stream->stringFactory;
-
-	// Reuse the root tree of the originating stream
-	//
-	stream->root		= inStream->root;
-
-	// Signal that this is a rewriting stream so we don't
-	// free the originating tree. Anything that we rewrite or
-	// duplicate here will be done through the adaptor or 
-	// the original tree factory.
-	//
-	stream->isRewriter	= ANTLR3_TRUE;
-	return stream;
-}
-
-ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM
-antlr3CommonTreeNodeStreamNew(pANTLR3_STRING_FACTORY strFactory, ANTLR3_UINT32 hint)
-{
-	pANTLR3_COMMON_TREE_NODE_STREAM stream;
-	pANTLR3_COMMON_TOKEN			token;
-
-	// Memory for the interface structure
-	//
-	stream  = (pANTLR3_COMMON_TREE_NODE_STREAM) ANTLR3_CALLOC(1, sizeof(ANTLR3_COMMON_TREE_NODE_STREAM));
-
-	if	(stream == NULL)
-	{
-		return	NULL;
-	}
-
-	// String factory for tree walker
-	//
-	stream->stringFactory		= strFactory;
-
-	// Create an adaptor for the common tree node stream
-	//
-	stream->adaptor				= ANTLR3_TREE_ADAPTORNew(strFactory);
-
-	if	(stream->adaptor == NULL)
-	{
-		stream->free(stream);
-		return	NULL;
-	}
-
-	// Create space for the tree node stream interface
-	//
-	stream->tnstream	    = antlr3TreeNodeStreamNew();
-
-	if	(stream->tnstream == NULL)
-	{
-		stream->adaptor->free		(stream->adaptor);
-		stream->free				(stream);
-
-		return	NULL;
-	}
-
-	// Create space for the INT_STREAM interface
-	//
-	stream->tnstream->istream		    =  antlr3IntStreamNew();
-
-	if	(stream->tnstream->istream == NULL)
-	{
-		stream->adaptor->free		(stream->adaptor);
-		stream->tnstream->free		(stream->tnstream);
-		stream->free				(stream);
-
-		return	NULL;
-	}
-
-	// Install the common tree node stream API
-	//
-	stream->addNavigationNode		    =  addNavigationNode;
-	stream->hasUniqueNavigationNodes    =  hasUniqueNavigationNodes;
-	stream->newDownNode					=  newDownNode;
-	stream->newUpNode					=  newUpNode;
-	stream->reset						=  reset;
-	stream->push						=  push;
-	stream->pop							=  pop;
-
-	stream->free			    =  antlr3CommonTreeNodeStreamFree;
-
-	// Install the tree node stream API
-	//
-	stream->tnstream->getTreeAdaptor			=  getTreeAdaptor;
-	stream->tnstream->getTreeSource				=  getTreeSource;
-	stream->tnstream->_LT						=  _LT;
-	stream->tnstream->setUniqueNavigationNodes	=  setUniqueNavigationNodes;
-	stream->tnstream->toString					=  toString;
-	stream->tnstream->toStringSS				=  toStringSS;
-	stream->tnstream->toStringWork				=  toStringWork;
-	stream->tnstream->get						=  get;
-
-	// Install INT_STREAM interface
-	//
-	stream->tnstream->istream->consume	    =  consume;
-	stream->tnstream->istream->index	    =  tindex;
-	stream->tnstream->istream->_LA			=  _LA;
-	stream->tnstream->istream->mark			=  mark;
-	stream->tnstream->istream->release	    =  release;
-	stream->tnstream->istream->rewind	    =  rewindMark;
-	stream->tnstream->istream->rewindLast   =  rewindLast;
-	stream->tnstream->istream->seek			=  seek;
-	stream->tnstream->istream->size			=  size;
-
-	// Initialize data elements of INT stream
-	//
-	stream->tnstream->istream->type			= ANTLR3_COMMONTREENODE;
-	stream->tnstream->istream->super	    =  (stream->tnstream);
-
-	// Initialize data elements of TREE stream
-	//
-	stream->tnstream->ctns =  stream;
-
-	// Initialize data elements of the COMMON TREE NODE stream
-	//
-	stream->super					= NULL;
-	stream->uniqueNavigationNodes	= ANTLR3_FALSE;
-	stream->markers					= NULL;
-	stream->nodeStack				= antlr3StackNew(INITIAL_CALL_STACK_SIZE);
-
-	// Create the node list map
-	//
-	if	(hint == 0)
-	{
-		hint = DEFAULT_INITIAL_BUFFER_SIZE;
-	}
-	stream->nodes	= antlr3VectorNew(hint);
-	stream->p		= -1;
-
-	// Install the navigation nodes     
-	//
-	antlr3SetCTAPI(&(stream->UP));
-	antlr3SetCTAPI(&(stream->DOWN));
-	antlr3SetCTAPI(&(stream->EOF_NODE));
-	antlr3SetCTAPI(&(stream->INVALID_NODE));
-
-	token						= antlr3CommonTokenNew(ANTLR3_TOKEN_UP);
-	token->strFactory			= strFactory;
-	token->textState			= ANTLR3_TEXT_CHARP;
-	token->tokText.chars		= (pANTLR3_UCHAR)"UP";
-	stream->UP.token			= token;
-
-	token						= antlr3CommonTokenNew(ANTLR3_TOKEN_DOWN);
-	token->strFactory			= strFactory;
-	token->textState			= ANTLR3_TEXT_CHARP;
-	token->tokText.chars		= (pANTLR3_UCHAR)"DOWN";
-	stream->DOWN.token			= token;
-
-	token						= antlr3CommonTokenNew(ANTLR3_TOKEN_EOF);
-	token->strFactory			= strFactory;
-	token->textState			= ANTLR3_TEXT_CHARP;
-	token->tokText.chars		= (pANTLR3_UCHAR)"EOF";
-	stream->EOF_NODE.token		= token;
-
-	token						= antlr3CommonTokenNew(ANTLR3_TOKEN_INVALID);
-	token->strFactory			= strFactory;
-	token->textState			= ANTLR3_TEXT_CHARP;
-	token->tokText.chars		= (pANTLR3_UCHAR)"INVALID";
-	stream->INVALID_NODE.token	= token;
-
-
-	return  stream;
-}
-
-/// Free up any resources that belong to this common tree node stream.
-///
-static	void			    antlr3CommonTreeNodeStreamFree  (pANTLR3_COMMON_TREE_NODE_STREAM ctns)
-{
-
-	// If this is a rewrting stream, then certain resources
-	// belong to the originating node stream and we do not
-	// free them here.
-	//
-	if	(ctns->isRewriter != ANTLR3_TRUE)
-	{
-		ctns->adaptor			->free  (ctns->adaptor);
-
-		if	(ctns->nodeStack != NULL)
-		{
-			ctns->nodeStack->free(ctns->nodeStack);
-		}
-
-		ANTLR3_FREE(ctns->INVALID_NODE.token);
-		ANTLR3_FREE(ctns->EOF_NODE.token);
-		ANTLR3_FREE(ctns->DOWN.token);
-		ANTLR3_FREE(ctns->UP.token);
-	}
-	
-	if	(ctns->nodes != NULL)
-	{
-		ctns->nodes			->free  (ctns->nodes);
-	}
-	ctns->tnstream->istream ->free  (ctns->tnstream->istream);
-    ctns->tnstream			->free  (ctns->tnstream);
-
-
-    ANTLR3_FREE(ctns);
-}
-
-// ------------------------------------------------------------------------------
-// Local helpers
-//
-
-/// Walk and fill the tree node buffer from the root tree
-///
-static void
-fillBufferRoot(pANTLR3_COMMON_TREE_NODE_STREAM ctns)
-{
-	// Call the generic buffer routine with the root as the
-	// argument
-	//
-	fillBuffer(ctns, ctns->root);
-	ctns->p = 0;					// Indicate we are at buffer start
-}
-
-/// Walk tree with depth-first-search and fill nodes buffer.
-/// Don't add in DOWN, UP nodes if the supplied tree is a list (t is isNilNode)
-// such as the root tree is.
-///
-static void
-fillBuffer(pANTLR3_COMMON_TREE_NODE_STREAM ctns, pANTLR3_BASE_TREE t)
-{
-	ANTLR3_BOOLEAN	nilNode;
-	ANTLR3_UINT32	nCount;
-	ANTLR3_UINT32	c;
-
-	nilNode = ctns->adaptor->isNilNode(ctns->adaptor, t);
-
-	// If the supplied node is not a nil (list) node then we
-	// add in the node itself to the vector
-	//
-	if	(nilNode == ANTLR3_FALSE)
-	{
-		ctns->nodes->add(ctns->nodes, t, NULL);	
-	}
-
-	// Only add a DOWN node if the tree is not a nil tree and
-	// the tree does have children.
-	//
-	nCount = t->getChildCount(t);
-
-	if	(nilNode == ANTLR3_FALSE && nCount>0)
-	{
-		ctns->addNavigationNode(ctns, ANTLR3_TOKEN_DOWN);
-	}
-
-	// We always add any children the tree contains, which is
-	// a recursive call to this function, which will cause similar
-	// recursion and implement a depth first addition
-	//
-	for	(c = 0; c < nCount; c++)
-	{
-		fillBuffer(ctns, ctns->adaptor->getChild(ctns->adaptor, t, c));
-	}
-
-	// If the tree had children and was not a nil (list) node, then we
-	// we need to add an UP node here to match the DOWN node
-	//
-	if	(nilNode == ANTLR3_FALSE && nCount > 0)
-	{
-		ctns->addNavigationNode(ctns, ANTLR3_TOKEN_UP);
-	}
-}
-
-
-// ------------------------------------------------------------------------------
-// Interface functions
-//
-
-/// Reset the input stream to the start of the input nodes.
-///
-static	void		
-reset	    (pANTLR3_COMMON_TREE_NODE_STREAM ctns)
-{
-	if	(ctns->p != -1)
-	{
-		ctns->p									= 0;
-	}
-	ctns->tnstream->istream->lastMarker		= 0;
-
-
-	// Free and reset the node stack only if this is not
-	// a rewriter, which is going to reuse the originating
-	// node streams node stack
-	//
-	if  (ctns->isRewriter != ANTLR3_TRUE)
-    {
-		if	(ctns->nodeStack != NULL)
-		{
-			ctns->nodeStack->free(ctns->nodeStack);
-			ctns->nodeStack = antlr3StackNew(INITIAL_CALL_STACK_SIZE);
-		}
-	}
-}
-
-
-static pANTLR3_BASE_TREE
-LB(pANTLR3_TREE_NODE_STREAM tns, ANTLR3_INT32 k)
-{
-	if	( k==0)
-	{
-		return	&(tns->ctns->INVALID_NODE.baseTree);
-	}
-
-	if	( (tns->ctns->p - k) < 0)
-	{
-		return	&(tns->ctns->INVALID_NODE.baseTree);
-	}
-
-	return tns->ctns->nodes->get(tns->ctns->nodes, tns->ctns->p - k);
-}
-
-/// Get tree node at current input pointer + i ahead where i=1 is next node.
-/// i<0 indicates nodes in the past.  So -1 is previous node and -2 is
-/// two nodes ago. LT(0) is undefined.  For i>=n, return null.
-/// Return null for LT(0) and any index that results in an absolute address
-/// that is negative.
-///
-/// This is analogous to the _LT() method of the TokenStream, but this
-/// returns a tree node instead of a token.  Makes code gen identical
-/// for both parser and tree grammars. :)
-///
-static	pANTLR3_BASE_TREE	    
-_LT	    (pANTLR3_TREE_NODE_STREAM tns, ANTLR3_INT32 k)
-{
-	if	(tns->ctns->p == -1)
-	{
-		fillBufferRoot(tns->ctns);
-	}
-
-	if	(k < 0)
-	{
-		return LB(tns, -k);
-	}
-	else if	(k == 0)
-	{
-		return	&(tns->ctns->INVALID_NODE.baseTree);
-	}
-
-	// k was a legitimate request, 
-	//
-	if	(( tns->ctns->p + k - 1) >= (ANTLR3_INT32)(tns->ctns->nodes->count))
-	{
-		return &(tns->ctns->EOF_NODE.baseTree);
-	}
-
-	return	tns->ctns->nodes->get(tns->ctns->nodes, tns->ctns->p + k - 1);
-}
-
-/// Where is this stream pulling nodes from?  This is not the name, but
-/// the object that provides node objects.
-///
-static	pANTLR3_BASE_TREE	    
-getTreeSource	(pANTLR3_TREE_NODE_STREAM tns)
-{
-    return  tns->ctns->root;
-}
-
-/// Consume the next node from the input stream
-///
-static	void		    
-consume	(pANTLR3_INT_STREAM is)
-{
-    pANTLR3_TREE_NODE_STREAM		tns;
-    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
-
-    tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
-    ctns    = tns->ctns;
-
-	if	(ctns->p == -1)
-	{
-		fillBufferRoot(ctns);
-	}
-	ctns->p++;
-}
-
-static	ANTLR3_UINT32	    
-_LA	    (pANTLR3_INT_STREAM is, ANTLR3_INT32 i)
-{
-	pANTLR3_TREE_NODE_STREAM		tns;
-	pANTLR3_BASE_TREE				t;
-
-	tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
-
-	// Ask LT for the 'token' at that position
-	//
-	t = tns->_LT(tns, i);
-
-	if	(t == NULL)
-	{
-		return	ANTLR3_TOKEN_INVALID;
-	}
-
-	// Token node was there so return the type of it
-	//
-	return  t->getType(t);
-}
-
-/// Mark the state of the input stream so that we can come back to it
-/// after a syntactic predicate and so on.
-///
-static	ANTLR3_MARKER	    
-mark	(pANTLR3_INT_STREAM is)
-{
-	pANTLR3_TREE_NODE_STREAM		tns;
-	pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
-
-	tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
-	ctns    = tns->ctns;
-
-	if	(tns->ctns->p == -1)
-	{
-		fillBufferRoot(tns->ctns);
-	}
-
-	// Return the current mark point
-	//
-	ctns->tnstream->istream->lastMarker = ctns->tnstream->istream->index(ctns->tnstream->istream);
-
-	return ctns->tnstream->istream->lastMarker;
-}
-
-static	void		    
-release	(pANTLR3_INT_STREAM is, ANTLR3_MARKER marker)
-{
-}
-
-/// Rewind the current state of the tree walk to the state it
-/// was in when mark() was called and it returned marker.  Also,
-/// wipe out the lookahead which will force reloading a few nodes
-/// but it is better than making a copy of the lookahead buffer
-/// upon mark().
-///
-static	void		    
-rewindMark	    (pANTLR3_INT_STREAM is, ANTLR3_MARKER marker)
-{
-	is->seek(is, marker);
-}
-
-static	void		    
-rewindLast	(pANTLR3_INT_STREAM is)
-{
-   is->seek(is, is->lastMarker);
-}
-
-/// consume() ahead until we hit index.  Can't just jump ahead--must
-/// spit out the navigation nodes.
-///
-static	void		    
-seek	(pANTLR3_INT_STREAM is, ANTLR3_MARKER index)
-{
-    pANTLR3_TREE_NODE_STREAM		tns;
-    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
-
-    tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
-    ctns    = tns->ctns;
-
-	ctns->p = ANTLR3_UINT32_CAST(index);
-}
-
-static	ANTLR3_MARKER		    
-tindex	(pANTLR3_INT_STREAM is)
-{
-    pANTLR3_TREE_NODE_STREAM		tns;
-    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
-
-    tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
-    ctns    = tns->ctns;
-
-	return (ANTLR3_MARKER)(ctns->p);
-}
-
-/// Expensive to compute the size of the whole tree while parsing.
-/// This method only returns how much input has been seen so far.  So
-/// after parsing it returns true size.
-///
-static	ANTLR3_UINT32		    
-size	(pANTLR3_INT_STREAM is)
-{
-    pANTLR3_TREE_NODE_STREAM		tns;
-    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
-
-    tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
-    ctns    = tns->ctns;
-
-	if	(ctns->p == -1)
-	{
-		fillBufferRoot(ctns);
-	}
-
-	return ctns->nodes->size(ctns->nodes);
-}
-
-/// As we flatten the tree, we use UP, DOWN nodes to represent
-/// the tree structure.  When debugging we need unique nodes
-/// so instantiate new ones when uniqueNavigationNodes is true.
-///
-static	void		    
-addNavigationNode	    (pANTLR3_COMMON_TREE_NODE_STREAM ctns, ANTLR3_UINT32 ttype)
-{
-	pANTLR3_BASE_TREE	    node;
-
-	node = NULL;
-
-	if	(ttype == ANTLR3_TOKEN_DOWN)
-	{
-		if  (ctns->hasUniqueNavigationNodes(ctns) == ANTLR3_TRUE)
-		{
-			node    = ctns->newDownNode(ctns);
-		}
-		else
-		{
-			node    = &(ctns->DOWN.baseTree);
-		}
-	}
-	else
-	{
-		if  (ctns->hasUniqueNavigationNodes(ctns) == ANTLR3_TRUE)
-		{
-			node    = ctns->newUpNode(ctns);
-		}
-		else
-		{
-			node    = &(ctns->UP.baseTree);
-		}
-	}
-
-	// Now add the node we decided upon.
-	//
-	ctns->nodes->add(ctns->nodes, node, NULL);
-}
-
-
-static	pANTLR3_BASE_TREE_ADAPTOR			    
-getTreeAdaptor	(pANTLR3_TREE_NODE_STREAM tns)
-{
-    return  tns->ctns->adaptor;
-}
-
-static	ANTLR3_BOOLEAN	    
-hasUniqueNavigationNodes	    (pANTLR3_COMMON_TREE_NODE_STREAM ctns)
-{
-    return  ctns->uniqueNavigationNodes;
-}
-
-static	void		    
-setUniqueNavigationNodes	    (pANTLR3_TREE_NODE_STREAM tns, ANTLR3_BOOLEAN uniqueNavigationNodes)
-{
-    tns->ctns->uniqueNavigationNodes = uniqueNavigationNodes;
-}
-
-
-/// Print out the entire tree including DOWN/UP nodes.  Uses
-/// a recursive walk.  Mostly useful for testing as it yields
-/// the token types not text.
-///
-static	pANTLR3_STRING	    
-toString	    (pANTLR3_TREE_NODE_STREAM tns)
-{
-
-    return  tns->toStringSS(tns, tns->ctns->root, NULL);
-}
-
-static	pANTLR3_STRING	    
-toStringSS	    (pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE start, pANTLR3_BASE_TREE stop)
-{
-    pANTLR3_STRING  buf;
-
-    buf = tns->ctns->stringFactory->newRaw(tns->ctns->stringFactory);
-
-    tns->toStringWork(tns, start, stop, buf);
-
-    return  buf;
-}
-
-static	void	    
-toStringWork	(pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE p, pANTLR3_BASE_TREE stop, pANTLR3_STRING buf)
-{
-
-	ANTLR3_UINT32   n;
-	ANTLR3_UINT32   c;
-
-	if	(!p->isNilNode(p) )
-	{
-		pANTLR3_STRING	text;
-
-		text	= p->toString(p);
-
-		if  (text == NULL)
-		{
-			text = tns->ctns->stringFactory->newRaw(tns->ctns->stringFactory);
-
-			text->addc	(text, ' ');
-			text->addi	(text, p->getType(p));
-		}
-
-		buf->appendS(buf, text);
-	}
-
-	if	(p == stop)
-	{
-		return;		/* Finished */
-	}
-
-	n = p->getChildCount(p);
-
-	if	(n > 0 && ! p->isNilNode(p) )
-	{
-		buf->addc   (buf, ' ');
-		buf->addi   (buf, ANTLR3_TOKEN_DOWN);
-	}
-
-	for	(c = 0; c<n ; c++)
-	{
-		pANTLR3_BASE_TREE   child;
-
-		child = p->getChild(p, c);
-		tns->toStringWork(tns, child, stop, buf);
-	}
-
-	if	(n > 0 && ! p->isNilNode(p) )
-	{
-		buf->addc   (buf, ' ');
-		buf->addi   (buf, ANTLR3_TOKEN_UP);
-	}
-}
-
-static	ANTLR3_UINT32	    
-getLookaheadSize	(pANTLR3_COMMON_TREE_NODE_STREAM ctns)
-{
-    return	ctns->tail < ctns->head 
-	    ?	(ctns->lookAheadLength - ctns->head + ctns->tail)
-	    :	(ctns->tail - ctns->head);
-}
-
-static	pANTLR3_BASE_TREE	    
-newDownNode		(pANTLR3_COMMON_TREE_NODE_STREAM ctns)
-{
-    pANTLR3_COMMON_TREE	    dNode;
-    pANTLR3_COMMON_TOKEN    token;
-
-    token					= antlr3CommonTokenNew(ANTLR3_TOKEN_DOWN);
-	token->textState		= ANTLR3_TEXT_CHARP;
-	token->tokText.chars	= (pANTLR3_UCHAR)"DOWN";
-    dNode					= antlr3CommonTreeNewFromToken(token);
-
-    return  &(dNode->baseTree);
-}
-
-static	pANTLR3_BASE_TREE	    
-newUpNode		(pANTLR3_COMMON_TREE_NODE_STREAM ctns)
-{
-    pANTLR3_COMMON_TREE	    uNode;
-    pANTLR3_COMMON_TOKEN    token;
-
-    token					= antlr3CommonTokenNew(ANTLR3_TOKEN_UP);
-	token->textState		= ANTLR3_TEXT_CHARP;
-	token->tokText.chars	= (pANTLR3_UCHAR)"UP";
-    uNode					= antlr3CommonTreeNewFromToken(token);
-
-    return  &(uNode->baseTree);
-}
-
-/// Replace from start to stop child index of parent with t, which might
-/// be a list.  Number of children may be different
-/// after this call.  The stream is notified because it is walking the
-/// tree and might need to know you are monkey-ing with the underlying
-/// tree.  Also, it might be able to modify the node stream to avoid
-/// re-streaming for future phases.
-///
-/// If parent is null, don't do anything; must be at root of overall tree.
-/// Can't replace whatever points to the parent externally.  Do nothing.
-///
-static	void						
-replaceChildren				(pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE t)
-{
-	if	(parent != NULL)
-	{
-		pANTLR3_BASE_TREE_ADAPTOR	adaptor;
-		pANTLR3_COMMON_TREE_ADAPTOR	cta;
-
-		adaptor	= tns->getTreeAdaptor(tns);
-		cta		= (pANTLR3_COMMON_TREE_ADAPTOR)(adaptor->super);
-
-		adaptor->replaceChildren(adaptor, parent, startChildIndex, stopChildIndex, t);
-	}
-}
-
-static	pANTLR3_BASE_TREE
-get							(pANTLR3_TREE_NODE_STREAM tns, ANTLR3_INT32 k)
-{
-	if	(tns->ctns->p == -1)
-	{
-		fillBufferRoot(tns->ctns);
-	}
-
-	return tns->ctns->nodes->get(tns->ctns->nodes, k);
-}
-
-static	void
-push						(pANTLR3_COMMON_TREE_NODE_STREAM ctns, ANTLR3_INT32 index)
-{
-	ctns->nodeStack->push(ctns->nodeStack, ANTLR3_FUNC_PTR(ctns->p), NULL);	// Save current index
-	ctns->tnstream->istream->seek(ctns->tnstream->istream, index);
-}
-
-static	ANTLR3_INT32
-pop							(pANTLR3_COMMON_TREE_NODE_STREAM ctns)
-{
-	ANTLR3_INT32	retVal;
-
-	retVal = ANTLR3_UINT32_CAST(ctns->nodeStack->pop(ctns->nodeStack));
-	ctns->tnstream->istream->seek(ctns->tnstream->istream, retVal);
-	return retVal;
-}
diff --git a/antlr-3.4/runtime/C/src/antlr3convertutf.c b/antlr-3.4/runtime/C/src/antlr3convertutf.c
deleted file mode 100644
index 7c2f060..0000000
--- a/antlr-3.4/runtime/C/src/antlr3convertutf.c
+++ /dev/null
@@ -1,532 +0,0 @@
-/*
- * Copyright 2001-2004 Unicode, Inc.
- * 
- * Disclaimer
- * 
- * This source code is provided as is by Unicode, Inc. No claims are
- * made as to fitness for any particular purpose. No warranties of any
- * kind are expressed or implied. The recipient agrees to determine
- * applicability of information provided. If this file has been
- * purchased on magnetic or optical media from Unicode, Inc., the
- * sole remedy for any claim will be exchange of defective media
- * within 90 days of receipt.
- * 
- * Limitations on Rights to Redistribute This Code
- * 
- * Unicode, Inc. hereby grants the right to freely use the information
- * supplied in this file in the creation of products supporting the
- * Unicode Standard, and to make copies of this file in any form
- * for internal or external distribution as long as this notice
- * remains attached.
- */
-
-/* ---------------------------------------------------------------------
-
-    Conversions between UTF32, UTF-16, and UTF-8. Source code file.
-    Author: Mark E. Davis, 1994.
-    Rev History: Rick McGowan, fixes & updates May 2001.
-    Sept 2001: fixed const & error conditions per
-	mods suggested by S. Parent & A. Lillich.
-    June 2002: Tim Dodd added detection and handling of incomplete
-	source sequences, enhanced error detection, added casts
-	to eliminate compiler warnings.
-    July 2003: slight mods to back out aggressive FFFE detection.
-    Jan 2004: updated switches in from-UTF8 conversions.
-    Oct 2004: updated to use UNI_MAX_LEGAL_UTF32 in UTF-32 conversions.
-
-    See the header file "ConvertUTF.h" for complete documentation.
-
------------------------------------------------------------------------- */
-
-
-#include "antlr3convertutf.h"
-
-#ifdef CVTUTF_DEBUG
-#include <stdio.h>
-#endif
-
-
-
-/* --------------------------------------------------------------------- */
-
-ConversionResult ConvertUTF32toUTF16 (
-	const UTF32** sourceStart, const UTF32* sourceEnd, 
-	UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
-    ConversionResult result = conversionOK;
-    const UTF32* source = *sourceStart;
-    UTF16* target = *targetStart;
-    while (source < sourceEnd) {
-	UTF32 ch;
-	if (target >= targetEnd) {
-	    result = targetExhausted; break;
-	}
-	ch = *source++;
-	if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
-	    /* UTF-16 surrogate values are illegal in UTF-32; 0xffff or 0xfffe are both reserved values */
-	    if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
-		if (flags == strictConversion) {
-		    --source; /* return to the illegal value itself */
-		    result = sourceIllegal;
-		    break;
-		} else {
-		    *target++ = UNI_REPLACEMENT_CHAR;
-		}
-	    } else {
-		*target++ = (UTF16)ch; /* normal case */
-	    }
-	} else if (ch > UNI_MAX_LEGAL_UTF32) {
-	    if (flags == strictConversion) {
-		result = sourceIllegal;
-	    } else {
-		*target++ = UNI_REPLACEMENT_CHAR;
-	    }
-	} else {
-	    /* target is a character in range 0xFFFF - 0x10FFFF. */
-	    if (target + 1 >= targetEnd) {
-		--source; /* Back up source pointer! */
-		result = targetExhausted; break;
-	    }
-	    ch -= halfBase;
-	    *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START);
-	    *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START);
-	}
-    }
-    *sourceStart = source;
-    *targetStart = target;
-    return result;
-}
-
-/* --------------------------------------------------------------------- */
-
-ConversionResult ConvertUTF16toUTF32 (
-	const UTF16** sourceStart, const UTF16* sourceEnd, 
-	UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
-    ConversionResult result = conversionOK;
-    const UTF16* source = *sourceStart;
-    UTF32* target = *targetStart;
-    UTF32 ch, ch2;
-    while (source < sourceEnd) {
-	const UTF16* oldSource = source; /*  In case we have to back up because of target overflow. */
-	ch = *source++;
-	/* If we have a surrogate pair, convert to UTF32 first. */
-	if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
-	    /* If the 16 bits following the high surrogate are in the source buffer... */
-	    if (source < sourceEnd) {
-		ch2 = *source;
-		/* If it's a low surrogate, convert to UTF32. */
-		if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) {
-		    ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
-			+ (ch2 - UNI_SUR_LOW_START) + halfBase;
-		    ++source;
-		} else if (flags == strictConversion) { /* it's an unpaired high surrogate */
-		    --source; /* return to the illegal value itself */
-		    result = sourceIllegal;
-		    break;
-		}
-	    } else { /* We don't have the 16 bits following the high surrogate. */
-		--source; /* return to the high surrogate */
-		result = sourceExhausted;
-		break;
-	    }
-	} else if (flags == strictConversion) {
-	    /* UTF-16 surrogate values are illegal in UTF-32 */
-	    if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) {
-		--source; /* return to the illegal value itself */
-		result = sourceIllegal;
-		break;
-	    }
-	}
-	if (target >= targetEnd) {
-	    source = oldSource; /* Back up source pointer! */
-	    result = targetExhausted; break;
-	}
-	*target++ = ch;
-    }
-    *sourceStart = source;
-    *targetStart = target;
-#ifdef CVTUTF_DEBUG
-if (result == sourceIllegal) {
-    ANTLR3_FPRINTF(stderr, "ConvertUTF16toUTF32 illegal seq 0x%04x,%04x\n", ch, ch2);
-    fflush(stderr);
-}
-#endif
-    return result;
-}
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Index into the table below with the first byte of a UTF-8 sequence to
- * get the number of trailing bytes that are supposed to follow it.
- * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is
- * left as-is for anyone who may want to do such conversion, which was
- * allowed in earlier algorithms.
- */
-static const char trailingBytesForUTF8[256] = {
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
-    2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
-};
-
-/*
- * Magic values subtracted from a buffer value during UTF8 conversion.
- * This table contains as many values as there might be trailing bytes
- * in a UTF-8 sequence.
- */
-static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, 
-		     0x03C82080UL, 0xFA082080UL, 0x82082080UL };
-
-/*
- * Once the bits are split out into bytes of UTF-8, this is a mask OR-ed
- * into the first byte, depending on how many bytes follow.  There are
- * as many entries in this table as there are UTF-8 sequence types.
- * (I.e., one byte sequence, two byte... etc.). Remember that sequencs
- * for *legal* UTF-8 will be 4 or fewer bytes total.
- */
-static const UTF8 firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC };
-
-/* --------------------------------------------------------------------- */
-
-/* The interface converts a whole buffer to avoid function-call overhead.
- * Constants have been gathered. Loops & conditionals have been removed as
- * much as possible for efficiency, in favor of drop-through switches.
- * (See "Note A" at the bottom of the file for equivalent code.)
- * If your compiler supports it, the "isLegalUTF8" call can be turned
- * into an inline function.
- */
-
-/* --------------------------------------------------------------------- */
-
-ConversionResult ConvertUTF16toUTF8 (
-	const UTF16** sourceStart, const UTF16* sourceEnd, 
-	UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) {
-    ConversionResult result = conversionOK;
-    const UTF16* source = *sourceStart;
-    UTF8* target = *targetStart;
-    while (source < sourceEnd) {
-	UTF32 ch;
-	unsigned short bytesToWrite = 0;
-	const UTF32 byteMask = 0xBF;
-	const UTF32 byteMark = 0x80; 
-	const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */
-	ch = *source++;
-	/* If we have a surrogate pair, convert to UTF32 first. */
-	if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
-	    /* If the 16 bits following the high surrogate are in the source buffer... */
-	    if (source < sourceEnd) {
-		UTF32 ch2 = *source;
-		/* If it's a low surrogate, convert to UTF32. */
-		if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) {
-		    ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
-			+ (ch2 - UNI_SUR_LOW_START) + halfBase;
-		    ++source;
-		} else if (flags == strictConversion) { /* it's an unpaired high surrogate */
-		    --source; /* return to the illegal value itself */
-		    result = sourceIllegal;
-		    break;
-		}
-	    } else { /* We don't have the 16 bits following the high surrogate. */
-		--source; /* return to the high surrogate */
-		result = sourceExhausted;
-		break;
-	    }
-        } else if (flags == strictConversion) {
-	    /* UTF-16 surrogate values are illegal in UTF-32 */
-	    if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) {
-		--source; /* return to the illegal value itself */
-		result = sourceIllegal;
-		break;
-	    }
-	}
-	/* Figure out how many bytes the result will require */
-	if (ch < (UTF32)0x80) {	     bytesToWrite = 1;
-	} else if (ch < (UTF32)0x800) {     bytesToWrite = 2;
-	} else if (ch < (UTF32)0x10000) {   bytesToWrite = 3;
-	} else if (ch < (UTF32)0x110000) {  bytesToWrite = 4;
-	} else {			    bytesToWrite = 3;
-					    ch = UNI_REPLACEMENT_CHAR;
-	}
-
-	target += bytesToWrite;
-	if (target > targetEnd) {
-	    source = oldSource; /* Back up source pointer! */
-	    target -= bytesToWrite; result = targetExhausted; break;
-	}
-	switch (bytesToWrite) { /* note: everything falls through. */
-	    case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
-	    case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
-	    case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
-	    case 1: *--target =  (UTF8)(ch | firstByteMark[bytesToWrite]);
-	}
-	target += bytesToWrite;
-    }
-    *sourceStart = source;
-    *targetStart = target;
-    return result;
-}
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Utility routine to tell whether a sequence of bytes is legal UTF-8.
- * This must be called with the length pre-determined by the first byte.
- * If not calling this from ConvertUTF8to*, then the length can be set by:
- *  length = trailingBytesForUTF8[*source]+1;
- * and the sequence is illegal right away if there aren't that many bytes
- * available.
- * If presented with a length > 4, this returns false.  The Unicode
- * definition of UTF-8 goes up to 4-byte sequences.
- */
-
-static ANTLR3_BOOLEAN
-isLegalUTF8(const UTF8 *source, int length) {
-    UTF8 a;
-    const UTF8 *srcptr = source+length;
-    switch (length) {
-    default: return false;
-	/* Everything else falls through when "true"... */
-    case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
-    case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
-    case 2: if ((a = (*--srcptr)) > 0xBF) return false;
-
-	switch (*source) {
-	    /* no fall-through in this inner switch */
-	    case 0xE0: if (a < 0xA0) return false; break;
-	    case 0xED: if (a > 0x9F) return false; break;
-	    case 0xF0: if (a < 0x90) return false; break;
-	    case 0xF4: if (a > 0x8F) return false; break;
-	    default:   if (a < 0x80) return false;
-	}
-
-    case 1: if (*source >= 0x80 && *source < 0xC2) return false;
-    }
-    if (*source > 0xF4) return false;
-    return true;
-}
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Exported function to return whether a UTF-8 sequence is legal or not.
- * This is not used here; it's just exported.
- */
-ANTLR3_BOOLEAN
-isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd) {
-    int length = trailingBytesForUTF8[*source]+1;
-    if (source+length > sourceEnd) {
-	return false;
-    }
-    return isLegalUTF8(source, length);
-}
-
-/* --------------------------------------------------------------------- */
-
-ConversionResult ConvertUTF8toUTF16 (
-	const UTF8** sourceStart, const UTF8* sourceEnd, 
-	UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
-    ConversionResult result = conversionOK;
-    const UTF8* source = *sourceStart;
-    UTF16* target = *targetStart;
-    while (source < sourceEnd) {
-	UTF32 ch = 0;
-	unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
-	if (source + extraBytesToRead >= sourceEnd) {
-	    result = sourceExhausted; break;
-	}
-	/* Do this check whether lenient or strict */
-	if (! isLegalUTF8(source, extraBytesToRead+1)) {
-	    result = sourceIllegal;
-	    break;
-	}
-	/*
-	 * The cases all fall through. See "Note A" below.
-	 */
-	switch (extraBytesToRead) {
-	    case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */
-	    case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */
-	    case 3: ch += *source++; ch <<= 6;
-	    case 2: ch += *source++; ch <<= 6;
-	    case 1: ch += *source++; ch <<= 6;
-	    case 0: ch += *source++;
-	}
-	ch -= offsetsFromUTF8[extraBytesToRead];
-
-	if (target >= targetEnd) {
-	    source -= (extraBytesToRead+1); /* Back up source pointer! */
-	    result = targetExhausted; break;
-	}
-	if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
-	    /* UTF-16 surrogate values are illegal in UTF-32 */
-	    if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
-		if (flags == strictConversion) {
-		    source -= (extraBytesToRead+1); /* return to the illegal value itself */
-		    result = sourceIllegal;
-		    break;
-		} else {
-		    *target++ = UNI_REPLACEMENT_CHAR;
-		}
-	    } else {
-		*target++ = (UTF16)ch; /* normal case */
-	    }
-	} else if (ch > UNI_MAX_UTF16) {
-	    if (flags == strictConversion) {
-		result = sourceIllegal;
-		source -= (extraBytesToRead+1); /* return to the start */
-		break; /* Bail out; shouldn't continue */
-	    } else {
-		*target++ = UNI_REPLACEMENT_CHAR;
-	    }
-	} else {
-	    /* target is a character in range 0xFFFF - 0x10FFFF. */
-	    if (target + 1 >= targetEnd) {
-		source -= (extraBytesToRead+1); /* Back up source pointer! */
-		result = targetExhausted; break;
-	    }
-	    ch -= halfBase;
-	    *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START);
-	    *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START);
-	}
-    }
-    *sourceStart = source;
-    *targetStart = target;
-    return result;
-}
-
-/* --------------------------------------------------------------------- */
-
-ConversionResult ConvertUTF32toUTF8 (
-	const UTF32** sourceStart, const UTF32* sourceEnd, 
-	UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) {
-    ConversionResult result = conversionOK;
-    const UTF32* source = *sourceStart;
-    UTF8* target = *targetStart;
-    while (source < sourceEnd) {
-	UTF32 ch;
-	unsigned short bytesToWrite = 0;
-	const UTF32 byteMask = 0xBF;
-	const UTF32 byteMark = 0x80; 
-	ch = *source++;
-	if (flags == strictConversion ) {
-	    /* UTF-16 surrogate values are illegal in UTF-32 */
-	    if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
-		--source; /* return to the illegal value itself */
-		result = sourceIllegal;
-		break;
-	    }
-	}
-	/*
-	 * Figure out how many bytes the result will require. Turn any
-	 * illegally large UTF32 things (> Plane 17) into replacement chars.
-	 */
-	if (ch < (UTF32)0x80) {	     bytesToWrite = 1;
-	} else if (ch < (UTF32)0x800) {     bytesToWrite = 2;
-	} else if (ch < (UTF32)0x10000) {   bytesToWrite = 3;
-	} else if (ch <= UNI_MAX_LEGAL_UTF32) {  bytesToWrite = 4;
-	} else {			    bytesToWrite = 3;
-					    ch = UNI_REPLACEMENT_CHAR;
-					    result = sourceIllegal;
-	}
-	
-	target += bytesToWrite;
-	if (target > targetEnd) {
-	    --source; /* Back up source pointer! */
-	    target -= bytesToWrite; result = targetExhausted; break;
-	}
-	switch (bytesToWrite) { /* note: everything falls through. */
-	    case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
-	    case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
-	    case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
-	    case 1: *--target = (UTF8) (ch | firstByteMark[bytesToWrite]);
-	}
-	target += bytesToWrite;
-    }
-    *sourceStart = source;
-    *targetStart = target;
-    return result;
-}
-
-/* --------------------------------------------------------------------- */
-
-ConversionResult ConvertUTF8toUTF32 (
-	const UTF8** sourceStart, const UTF8* sourceEnd, 
-	UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
-    ConversionResult result = conversionOK;
-    const UTF8* source = *sourceStart;
-    UTF32* target = *targetStart;
-    while (source < sourceEnd) {
-	UTF32 ch = 0;
-	unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
-	if (source + extraBytesToRead >= sourceEnd) {
-	    result = sourceExhausted; break;
-	}
-	/* Do this check whether lenient or strict */
-	if (! isLegalUTF8(source, extraBytesToRead+1)) {
-	    result = sourceIllegal;
-	    break;
-	}
-	/*
-	 * The cases all fall through. See "Note A" below.
-	 */
-	switch (extraBytesToRead) {
-	    case 5: ch += *source++; ch <<= 6;
-	    case 4: ch += *source++; ch <<= 6;
-	    case 3: ch += *source++; ch <<= 6;
-	    case 2: ch += *source++; ch <<= 6;
-	    case 1: ch += *source++; ch <<= 6;
-	    case 0: ch += *source++;
-	}
-	ch -= offsetsFromUTF8[extraBytesToRead];
-
-	if (target >= targetEnd) {
-	    source -= (extraBytesToRead+1); /* Back up the source pointer! */
-	    result = targetExhausted; break;
-	}
-	if (ch <= UNI_MAX_LEGAL_UTF32) {
-	    /*
-	     * UTF-16 surrogate values are illegal in UTF-32, and anything
-	     * over Plane 17 (> 0x10FFFF) is illegal.
-	     */
-	    if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
-		if (flags == strictConversion) {
-		    source -= (extraBytesToRead+1); /* return to the illegal value itself */
-		    result = sourceIllegal;
-		    break;
-		} else {
-		    *target++ = UNI_REPLACEMENT_CHAR;
-		}
-	    } else {
-		*target++ = ch;
-	    }
-	} else { /* i.e., ch > UNI_MAX_LEGAL_UTF32 */
-	    result = sourceIllegal;
-	    *target++ = UNI_REPLACEMENT_CHAR;
-	}
-    }
-    *sourceStart = source;
-    *targetStart = target;
-    return result;
-}
-
-/* ---------------------------------------------------------------------
-
-    Note A.
-    The fall-through switches in UTF-8 reading code save a
-    temp variable, some decrements & conditionals.  The switches
-    are equivalent to the following loop:
-	{
-	    int tmpBytesToRead = extraBytesToRead+1;
-	    do {
-		ch += *source++;
-		--tmpBytesToRead;
-		if (tmpBytesToRead) ch <<= 6;
-	    } while (tmpBytesToRead > 0);
-	}
-    In UTF-8 writing code, the switches on "bytesToWrite" are
-    similarly unrolled loops.
-
-   --------------------------------------------------------------------- */
diff --git a/antlr-3.4/runtime/C/src/antlr3debughandlers.c b/antlr-3.4/runtime/C/src/antlr3debughandlers.c
deleted file mode 100644
index d5f177a..0000000
--- a/antlr-3.4/runtime/C/src/antlr3debughandlers.c
+++ /dev/null
@@ -1,1047 +0,0 @@
-/// \file
-/// Provides the debugging functions invoked by a recognizer
-/// built using the debug generator mode of the antlr tool.
-/// See antlr3debugeventlistener.h for documentation.
-///
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3.h>
-
-// Not everyone wishes to include the debugger stuff in their final deployment because
-// it will then rely on being linked with the socket libraries. Hence if the programmer turns
-// off the debugging, we do some dummy stuff that satifies compilers etc but means there is
-// no debugger and no reliance on the socket librarires. If you set this flag, then using the -debug
-// option to generate your code will produce code that just crashes, but then I presme you are smart
-// enough to realize that building the libraries without debugger support means you can't call the
-// debugger ;-)
-// 
-#ifdef ANTLR3_NODEBUGGER
-ANTLR3_API pANTLR3_DEBUG_EVENT_LISTENER
-antlr3DebugListenerNew()
-{
-		ANTLR3_PRINTF("C runtime was compiled without debugger support. This program will crash!!");
-		return NULL;
-}
-#else
-
-static	ANTLR3_BOOLEAN	handshake		(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-static	void	enterRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName);
-static	void	enterAlt				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int alt);
-static	void	exitRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName);
-static	void	enterSubRule			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
-static	void	exitSubRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
-static	void	enterDecision			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
-static	void	exitDecision			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
-static	void	consumeToken			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t);
-static	void	consumeHiddenToken		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t);
-static	void	LT						(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_COMMON_TOKEN t);
-static	void	mark					(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker);
-static	void	rewindMark				(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker);
-static	void	rewindLast				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-static	void	beginBacktrack			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level);
-static	void	endBacktrack			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level, ANTLR3_BOOLEAN successful);
-static	void	location				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int line, int pos);
-static	void	recognitionException	(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_EXCEPTION e);
-static	void	beginResync				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-static	void	endResync				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-static	void	semanticPredicate		(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_BOOLEAN result, const char * predicate);
-static	void	commence				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-static	void	terminate				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-static	void	consumeNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
-static	void	LTT						(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_BASE_TREE t);
-static	void	nilNode					(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
-static	void	errorNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
-static	void	createNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
-static	void	createNodeTok			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE node, pANTLR3_COMMON_TOKEN token);
-static	void	becomeRoot				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE newRoot, pANTLR3_BASE_TREE oldRoot);
-static	void	addChild				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE root, pANTLR3_BASE_TREE child);
-static	void	setTokenBoundaries		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t, ANTLR3_MARKER tokenStartIndex, ANTLR3_MARKER tokenStopIndex);
-static	void	ack						(pANTLR3_DEBUG_EVENT_LISTENER delboy);
-
-/// Create and initialize a new debug event listener that can be connected to
-/// by ANTLRWorks and any other debugger via a socket.
-///
-ANTLR3_API pANTLR3_DEBUG_EVENT_LISTENER
-antlr3DebugListenerNew()
-{
-	pANTLR3_DEBUG_EVENT_LISTENER	delboy;
-
-	delboy = ANTLR3_CALLOC(1, sizeof(ANTLR3_DEBUG_EVENT_LISTENER));
-
-	if	(delboy == NULL)
-	{
-		return NULL;
-	}
-
-	// Initialize the API
-	//
-	delboy->addChild				= addChild;
-	delboy->becomeRoot				= becomeRoot;
-	delboy->beginBacktrack			= beginBacktrack;
-	delboy->beginResync				= beginResync;
-	delboy->commence				= commence;
-	delboy->consumeHiddenToken		= consumeHiddenToken;
-	delboy->consumeNode				= consumeNode;
-	delboy->consumeToken			= consumeToken;
-	delboy->createNode				= createNode;
-	delboy->createNodeTok			= createNodeTok;
-	delboy->endBacktrack			= endBacktrack;
-	delboy->endResync				= endResync;
-	delboy->enterAlt				= enterAlt;
-	delboy->enterDecision			= enterDecision;
-	delboy->enterRule				= enterRule;
-	delboy->enterSubRule			= enterSubRule;
-	delboy->exitDecision			= exitDecision;
-	delboy->exitRule				= exitRule;
-	delboy->exitSubRule				= exitSubRule;
-	delboy->handshake				= handshake;
-	delboy->location				= location;
-	delboy->LT						= LT;
-	delboy->LTT						= LTT;
-	delboy->mark					= mark;
-	delboy->nilNode					= nilNode;
-	delboy->recognitionException	= recognitionException;
-	delboy->rewind					= rewindMark;
-	delboy->rewindLast				= rewindLast;
-	delboy->semanticPredicate		= semanticPredicate;
-	delboy->setTokenBoundaries		= setTokenBoundaries;
-	delboy->terminate				= terminate;
-	delboy->errorNode				= errorNode;
-
-	delboy->PROTOCOL_VERSION		= 2;	// ANTLR 3.1 is at protocol version 2
-
-	delboy->port					= DEFAULT_DEBUGGER_PORT;
-
-	return delboy;
-}
-
-pANTLR3_DEBUG_EVENT_LISTENER
-antlr3DebugListenerNewPort(ANTLR3_UINT32 port)
-{
-	pANTLR3_DEBUG_EVENT_LISTENER	delboy;
-
-	delboy		 = antlr3DebugListenerNew();
-
-	if	(delboy != NULL)
-	{
-		delboy->port = port;
-	}
-
-	return delboy;
-}
-
-//--------------------------------------------------------------------------------
-// Support functions for sending stuff over the socket interface
-//
-static int 
-sockSend(SOCKET sock, const char * ptr, int len)
-{
-	int		sent;
-	int		thisSend;
-
-	sent	= 0;
-		
-	while	(sent < len)
-	{
-		// Send as many bytes as we can
-		//
-		thisSend =	send(sock, ptr, len - sent, 0);
-
-		// Check for errors and tell the user if we got one
-		//
-		if	(thisSend	== -1)
-		{
-			return	ANTLR3_FALSE;
-		}
-
-		// Increment our offset by how many we were able to send
-		//
-		ptr			+= thisSend;
-		sent		+= thisSend;
-	}
-	return	ANTLR3_TRUE;
-}
-
-static	ANTLR3_BOOLEAN	
-handshake				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
-{
-	/// Connection structure with which to wait and accept a connection from
-	/// a debugger.
-	///
-	SOCKET				serverSocket;
-
-	// Connection structures to deal with the client after we accept the connection
-	// and the server while we accept a connection.
-	//
-	ANTLR3_SOCKADDRT	client;
-	ANTLR3_SOCKADDRT	server;
-
-	// Buffer to construct our message in
-	//
-	char	message[256];
-
-	// Specifies the length of the connection structure to accept()
-	// Windows use int, everyone else uses size_t
-	//
-	ANTLR3_SALENT				sockaddr_len;
-
-	// Option holder for setsockopt()
-	//
-	int		optVal;
-
-	if	(delboy->initialized == ANTLR3_FALSE)
-	{
-		// Windows requires us to initialize WinSock.
-		//
-#ifdef ANTLR3_WINDOWS
-		{
-			WORD		wVersionRequested;
-			WSADATA		wsaData;
-			int			err;			// Return code from WSAStartup
-
-			// We must initialise the Windows socket system when the DLL is loaded.
-			// We are asking for Winsock 1.1 or better as we don't need anything
-			// too complicated for this.
-			//
-			wVersionRequested = MAKEWORD( 1, 1);
-
-			err = WSAStartup( wVersionRequested, &wsaData );
-
-			if ( err != 0 ) 
-			{
-				// Tell the user that we could not find a usable
-				// WinSock DLL
-				//
-				return FALSE;
-			}
-		}
-#endif
-
-		// Create the server socket, we are the server because we just wait until
-		// a debugger connects to the port we are listening on.
-		//
-		serverSocket	= socket(AF_INET, SOCK_STREAM, 0);
-
-		if	(serverSocket == INVALID_SOCKET)
-		{
-			return ANTLR3_FALSE;
-		}
-
-		// Set the listening port
-		//
-		server.sin_port			= htons((unsigned short)delboy->port);
-		server.sin_family		= AF_INET;
-		server.sin_addr.s_addr	= htonl (INADDR_ANY);
-
-		// We could allow a rebind on the same addr/port pair I suppose, but
-		// I imagine that most people will just want to start debugging one parser at once.
-		// Maybe change this at some point, but rejecting the bind at this point will ensure
-		// that people realize they have left something running in the background.
-		//
-		if	(bind(serverSocket, (pANTLR3_SOCKADDRC)&server, sizeof(server)) == -1)
-		{
-			return ANTLR3_FALSE;
-		}
-
-		// We have bound the socket to the port and address so we now ask the TCP subsystem
-		// to start listening on that address/port
-		//
-		if	(listen(serverSocket, 1) == -1)
-		{
-			// Some error, just fail
-			//
-			return	ANTLR3_FALSE;
-		}
-
-		// Now we can try to accept a connection on the port
-		//
-		sockaddr_len	= sizeof(client);
-		delboy->socket	= accept(serverSocket, (pANTLR3_SOCKADDRC)&client, &sockaddr_len);
-
-		// Having accepted a connection, we can stop listening and close down the socket
-		//
-		shutdown		(serverSocket, 0x02);
-		ANTLR3_CLOSESOCKET		(serverSocket);
-
-		if	(delboy->socket == -1)
-		{
-			return ANTLR3_FALSE;
-		}
-
-		// Disable Nagle as this is essentially a chat exchange
-		//
-		optVal	= 1;
-		setsockopt(delboy->socket, SOL_SOCKET, TCP_NODELAY, (const void *)&optVal, sizeof(optVal));
-		
-	}
-
-	// We now have a good socket connection with the debugging client, so we
-	// send it the protocol version we are using and what the name of the grammar
-	// is that we represent.
-	//
-	sprintf		(message, "ANTLR %d\n", delboy->PROTOCOL_VERSION);
-	sockSend	(delboy->socket, message, (int)strlen(message));
-	sprintf		(message, "grammar \"%s\n", delboy->grammarFileName->chars);
-	sockSend	(delboy->socket, message, (int)strlen(message));
-	ack			(delboy);
-
-	delboy->initialized = ANTLR3_TRUE;
-
-	return	ANTLR3_TRUE;
-}
-
-// Send the supplied text and wait for an ack from the client
-static void
-transmit(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * ptr)
-{
-	sockSend(delboy->socket, ptr, (int)strlen(ptr));
-	ack(delboy);
-}
-
-static	void
-ack						(pANTLR3_DEBUG_EVENT_LISTENER delboy)
-{
-	// Local buffer to read the next character in to
-	//
-	char	buffer;
-	int		rCount;
-
-	// Ack terminates in a line feed, so we just wait for
-	// one of those. Speed is not of the essence so we don't need
-	// to buffer the input or anything.
-	//
-	do
-	{
-		rCount = recv(delboy->socket, &buffer, 1, 0);
-	}
-	while	(rCount == 1 && buffer != '\n');
-
-	// If the socket ws closed on us, then we will get an error or
-	// (with a graceful close), 0. We can assume the the debugger stopped for some reason
-	// (such as Java crashing again). Therefore we just exit the program
-	// completely if we don't get the terminating '\n' for the ack.
-	//
-	if	(rCount != 1)
-	{
-		ANTLR3_PRINTF("Exiting debugger as remote client closed the socket\n");
-		ANTLR3_PRINTF("Received char count was %d, and last char received was %02X\n", rCount, buffer);
-		exit(0);
-	}
-}
-
-// Given a buffer string and a source string, serialize the
-// text, escaping any newlines and linefeeds. We have no need
-// for speed here, this is the debugger.
-//
-void
-serializeText(pANTLR3_STRING buffer, pANTLR3_STRING text)
-{
-	ANTLR3_UINT32	c;
-	ANTLR3_UCHAR	character;
-
-	// strings lead in with a "
-	//
-	buffer->append(buffer, "\t\"");
-
-	if	(text == NULL)
-	{
-		return;
-	}
-
-	// Now we replace linefeeds, newlines and the escape
-	// leadin character '%' with their hex equivalents
-	// prefixed by '%'
-	//
-	for	(c = 0; c < text->len; c++)
-	{
-		switch	(character = text->charAt(text, c))
-		{
-			case	'\n':
-
-				buffer->append(buffer, "%0A");
-				break;
-
-			case	'\r':
-			
-				buffer->append(buffer, "%0D");
-				break;
-
-			case	'\\':
-
-				buffer->append(buffer, "%25");
-				break;
-
-				// Other characters: The Song Remains the Same.
-				//
-			default:
-					
-				buffer->addc(buffer, character);
-				break;
-		}
-	}
-}
-
-// Given a token, create a stringified version of it, in the supplied
-// buffer. We create a string for this in the debug 'object', if there 
-// is not one there already, and then reuse it here if asked to do this
-// again.
-//
-pANTLR3_STRING
-serializeToken(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t)
-{
-	// Do we already have a serialization buffer?
-	//
-	if	(delboy->tokenString == NULL)
-	{
-		// No, so create one, using the string factory that
-		// the grammar name used, which is guaranteed to exist.
-		// 64 bytes will do us here for starters. 
-		//
-		delboy->tokenString = delboy->grammarFileName->factory->newSize(delboy->grammarFileName->factory, 64);
-	}
-
-	// Empty string
-	//
-	delboy->tokenString->set(delboy->tokenString, (const char *)"");
-
-	// Now we serialize the elements of the token.Note that the debugger only
-	// uses 32 bits.
-	//
-	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(t->getTokenIndex(t)));
-	delboy->tokenString->addc(delboy->tokenString, '\t');
-	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(t->getType(t)));
-	delboy->tokenString->addc(delboy->tokenString, '\t');
-	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(t->getChannel(t)));
-	delboy->tokenString->addc(delboy->tokenString, '\t');
-	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(t->getLine(t)));
-	delboy->tokenString->addc(delboy->tokenString, '\t');
-	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(t->getCharPositionInLine(t)));
-
-	// Now send the text that the token represents.
-	//
-	serializeText(delboy->tokenString, t->getText(t));
-
-	// Finally, as the debugger is a Java program it will expect to get UTF-8
-	// encoded strings. We don't use UTF-8 internally to the C runtime, so we 
-	// must force encode it. We have a method to do this in the string class, but
-	// it returns malloc space that we must free afterwards.
-	//
-	return delboy->tokenString->toUTF8(delboy->tokenString);
-}
-
-// Given a tree node, create a stringified version of it in the supplied
-// buffer.
-//
-pANTLR3_STRING
-serializeNode(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE node)
-{
-	pANTLR3_COMMON_TOKEN	token;
-
-
-	// Do we already have a serialization buffer?
-	//
-	if	(delboy->tokenString == NULL)
-	{
-		// No, so create one, using the string factory that
-		// the grammar name used, which is guaranteed to exist.
-		// 64 bytes will do us here for starters. 
-		//
-		delboy->tokenString = delboy->grammarFileName->factory->newSize(delboy->grammarFileName->factory, 64);
-	}
-
-	// Empty string
-	//
-	delboy->tokenString->set(delboy->tokenString, (const char *)"");
-
-	// Protect against bugs/errors etc
-	//
-	if	(node == NULL)
-	{
-		return delboy->tokenString;
-	}
-
-	// Now we serialize the elements of the node.Note that the debugger only
-	// uses 32 bits.
-	//
-	delboy->tokenString->addc(delboy->tokenString, '\t');
-
-	// Adaptor ID
-	//
-	delboy->tokenString->addi(delboy->tokenString, delboy->adaptor->getUniqueID(delboy->adaptor, node));
-	delboy->tokenString->addc(delboy->tokenString, '\t');
-
-	// Type of the current token (which may be imaginary)
-	//
-	delboy->tokenString->addi(delboy->tokenString, delboy->adaptor->getType(delboy->adaptor, node));
-
-	// See if we have an actual token or just an imaginary
-	//
-	token	= delboy->adaptor->getToken(delboy->adaptor, node);
-
-	delboy->tokenString->addc(delboy->tokenString, '\t');
-	if	(token != NULL)
-	{
-		// Real token
-		//
-		delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(token->getLine(token)));
-		delboy->tokenString->addc(delboy->tokenString, ' ');
-		delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(token->getCharPositionInLine(token)));
-	}
-	else
-	{
-		// Imaginary tokens have no location
-		//
-		delboy->tokenString->addi(delboy->tokenString, -1);
-		delboy->tokenString->addc(delboy->tokenString, '\t');
-		delboy->tokenString->addi(delboy->tokenString, -1);
-	}
-
-	// Start Index of the node
-	//
-	delboy->tokenString->addc(delboy->tokenString, '\t');
-	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_UINT32)(delboy->adaptor->getTokenStartIndex(delboy->adaptor, node)));
-
-	// Now send the text that the node represents.
-	//
-	serializeText(delboy->tokenString, delboy->adaptor->getText(delboy->adaptor, node));
-
-	// Finally, as the debugger is a Java program it will expect to get UTF-8
-	// encoded strings. We don't use UTF-8 internally to the C runtime, so we 
-	// must force encode it. We have a method to do this in the string class, but
-	// there is no utf8 string implementation as of yet
-	//
-	return delboy->tokenString->toUTF8(delboy->tokenString);
-}
-
-//------------------------------------------------------------------------------------------------------------------
-// EVENTS
-//
-static	void
-enterRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName)
-{
-	char	buffer[512];
-
-	// Create the message (speed is not of the essence)
-	//
-	sprintf(buffer, "enterRule\t%s\t%s\n", grammarFileName, ruleName);
-	transmit(delboy, buffer);
-}
-
-static	void	
-enterAlt				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int alt)
-{
-	char	buffer[512];
-
-	// Create the message (speed is not of the essence)
-	//
-	sprintf(buffer, "enterAlt\t%d\n", alt);
-	transmit(delboy, buffer);
-}
-
-static	void	
-exitRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName)
-{
-	char	buffer[512];
-
-	// Create the message (speed is not of the essence)
-	//
-	sprintf(buffer, "exitRule\t%s\t%s\n", grammarFileName, ruleName);
-	transmit(delboy, buffer);
-}
-
-static	void	
-enterSubRule			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber)
-{
-	char	buffer[512];
-
-	// Create the message (speed is not of the essence)
-	//
-	sprintf(buffer, "enterSubRule\t%d\n", decisionNumber);
-	transmit(delboy, buffer);
-}
-
-static	void	
-exitSubRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber)
-{
-	char	buffer[512];
-
-	// Create the message (speed is not of the essence)
-	//
-	sprintf(buffer, "exitSubRule\t%d\n", decisionNumber);
-	transmit(delboy, buffer);
-}
-
-static	void	
-enterDecision			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber)
-{
-	char	buffer[512];
-
-	// Create the message (speed is not of the essence)
-	//
-	sprintf(buffer, "enterDecision\t%d\n", decisionNumber);
-	transmit(delboy, buffer);
-
-}
-
-static	void	
-exitDecision			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber)
-{
-	char	buffer[512];
-
-	// Create the message (speed is not of the essence)
-	//
-	sprintf(buffer, "exitDecision\t%d\n", decisionNumber);
-	transmit(delboy, buffer);
-}
-
-static	void	
-consumeToken			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t)
-{
-	pANTLR3_STRING msg;
-
-	// Create the serialized token
-	//
-	msg = serializeToken(delboy, t);
-
-	// Insert the debug event indicator
-	//
-	msg->insert8(msg, 0, "consumeToken\t");
-
-	msg->addc(msg, '\n');
-
-	// Transmit the message and wait for ack
-	//
-	transmit(delboy, (const char *)(msg->chars));
-}
-
-static	void	
-consumeHiddenToken		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t)
-{
-	pANTLR3_STRING msg;
-
-	// Create the serialized token
-	//
-	msg = serializeToken(delboy, t);
-
-	// Insert the debug event indicator
-	//
-	msg->insert8(msg, 0, "consumeHiddenToken\t");
-
-	msg->addc(msg, '\n');
-
-	// Transmit the message and wait for ack
-	//
-	transmit(delboy, (const char *)(msg->chars));
-}
-
-// Looking at the next token event.
-//
-static	void	
-LT						(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_COMMON_TOKEN t)
-{
-	pANTLR3_STRING msg;
-
-	if	(t != NULL)
-	{
-		// Create the serialized token
-		//
-		msg = serializeToken(delboy, t);
-
-		// Insert the index parameter
-		//
-		msg->insert8(msg, 0, "\t");
-		msg->inserti(msg, 0, i);
-
-		// Insert the debug event indicator
-		//
-		msg->insert8(msg, 0, "LT\t");
-
-		msg->addc(msg, '\n');
-
-		// Transmit the message and wait for ack
-		//
-		transmit(delboy, (const char *)(msg->chars));
-	}
-}
-
-static	void	
-mark					(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker)
-{
-	char buffer[128];
-
-	sprintf(buffer, "mark\t%d\n", (ANTLR3_UINT32)(marker & 0xFFFFFFFF));
-
-	// Transmit the message and wait for ack
-	//
-	transmit(delboy, buffer);
-}
-
-static	void	
-rewindMark					(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker)
-{
-	char buffer[128];
-
-	sprintf(buffer, "rewind\t%d\n", (ANTLR3_UINT32)(marker & 0xFFFFFFFF));
-
-	// Transmit the message and wait for ack
-	//
-	transmit(delboy, buffer);
-
-}
-
-static	void	
-rewindLast				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
-{
-	transmit(delboy, (const char *)"rewind\n");
-}
-
-static	void	
-beginBacktrack			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level)
-{
-	char buffer[128];
-
-	sprintf(buffer, "beginBacktrack\t%d\n", (ANTLR3_UINT32)(level & 0xFFFFFFFF));
-
-	// Transmit the message and wait for ack
-	//
-	transmit(delboy, buffer);
-}
-
-static	void	
-endBacktrack			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level, ANTLR3_BOOLEAN successful)
-{
-	char buffer[128];
-
-	sprintf(buffer, "endBacktrack\t%d\t%d\n", level, successful);
-
-	// Transmit the message and wait for ack
-	//
-	transmit(delboy, buffer);
-}
-
-static	void	
-location				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int line, int pos)
-{
-	char buffer[128];
-
-	sprintf(buffer, "location\t%d\t%d\n", line, pos);
-
-	// Transmit the message and wait for ack
-	//
-	transmit(delboy, buffer);
-}
-
-static	void	
-recognitionException	(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_EXCEPTION e)
-{
-	char	buffer[256];
-
-	sprintf(buffer, "exception\t%s\t%d\t%d\t%d\n", (char *)(e->name), (ANTLR3_INT32)(e->index), e->line, e->charPositionInLine);
-
-	// Transmit the message and wait for ack
-	//
-	transmit(delboy, buffer);
-}
-
-static	void	
-beginResync				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
-{
-	transmit(delboy, (const char *)"beginResync\n");
-}
-
-static	void	
-endResync				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
-{
-	transmit(delboy, (const char *)"endResync\n");
-}
-
-static	void	
-semanticPredicate		(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_BOOLEAN result, const char * predicate)
-{
-	unsigned char * buffer;
-	unsigned char * out;
-
-	if	(predicate != NULL)
-	{
-		buffer	= (unsigned char *)ANTLR3_MALLOC(64 + 2*strlen(predicate));
-
-		if	(buffer != NULL)
-		{
-			out = buffer + sprintf((char *)buffer, "semanticPredicate\t%s\t", result == ANTLR3_TRUE ? "true" : "false");
-
-			while (*predicate != '\0')
-			{
-				switch(*predicate)
-				{
-					case	'\n':
-						
-						*out++	= '%';
-						*out++	= '0';
-						*out++	= 'A';
-						break;
-
-					case	'\r':
-
-						*out++	= '%';
-						*out++	= '0';
-						*out++	= 'D';
-						break;
-
-					case	'%':
-
-						*out++	= '%';
-						*out++	= '0';
-						*out++	= 'D';
-						break;
-
-
-					default:
-
-						*out++	= *predicate;
-						break;
-				}
-
-				predicate++;
-			}
-			*out++	= '\n';
-			*out++	= '\0';
-		}
-
-		// Send it and wait for the ack
-		//
-		transmit(delboy, (const char *)buffer);
-	}
-}
-
-#ifdef ANTLR3_WINDOWS
-#pragma warning	(push)
-#pragma warning (disable : 4100)
-#endif
-
-static	void	
-commence				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
-{
-	// Nothing to see here
-	//
-}
-
-#ifdef ANTLR3_WINDOWS
-#pragma warning	(pop)
-#endif
-
-static	void	
-terminate				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
-{
-	// Terminate sequence
-	//
-	sockSend(delboy->socket, "terminate\n", 10);		// Send out the command
-}
-
-//----------------------------------------------------------------
-// Tree parsing events
-//
-static	void	
-consumeNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t)
-{
-	pANTLR3_STRING	buffer;
-
-	buffer = serializeNode	(delboy, t);
-
-	// Now prepend the command
-	//
-	buffer->insert8	(buffer, 0, "consumeNode\t");
-	buffer->addc	(buffer, '\n');
-
-	// Send to the debugger and wait for the ack
-	//
-	transmit		(delboy, (const char *)(delboy->tokenString->toUTF8(delboy->tokenString)->chars));
-}
-
-static	void	
-LTT						(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_BASE_TREE t)
-{
-	pANTLR3_STRING	buffer;
-
-	buffer = serializeNode	(delboy, t);
-
-	// Now prepend the command
-	//
-	buffer->insert8	(buffer, 0, "\t");
-	buffer->inserti	(buffer, 0, i);
-	buffer->insert8	(buffer, 0, "LN\t");
-	buffer->addc	(buffer, '\n');
-
-	// Send to the debugger and wait for the ack
-	//
-	transmit		(delboy, (const char *)(delboy->tokenString->toUTF8(delboy->tokenString)->chars));
-}
-
-static	void	
-nilNode					(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t)
-{
-	char	buffer[128];
-	sprintf(buffer, "nilNode\t%d\n", delboy->adaptor->getUniqueID(delboy->adaptor, t));
-	transmit(delboy, buffer);
-}
-
-static	void	
-createNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t)
-{
-	// Do we already have a serialization buffer?
-	//
-	if	(delboy->tokenString == NULL)
-	{
-		// No, so create one, using the string factory that
-		// the grammar name used, which is guaranteed to exist.
-		// 64 bytes will do us here for starters. 
-		//
-		delboy->tokenString = delboy->grammarFileName->factory->newSize(delboy->grammarFileName->factory, 64);
-	}
-
-	// Empty string
-	//
-	delboy->tokenString->set8(delboy->tokenString, (const char *)"createNodeFromTokenElements ");
-
-	// Now we serialize the elements of the node.Note that the debugger only
-	// uses 32 bits.
-	//
-	// Adaptor ID
-	//
-	delboy->tokenString->addi(delboy->tokenString, delboy->adaptor->getUniqueID(delboy->adaptor, t));
-	delboy->tokenString->addc(delboy->tokenString, '\t');
-
-	// Type of the current token (which may be imaginary)
-	//
-	delboy->tokenString->addi(delboy->tokenString, delboy->adaptor->getType(delboy->adaptor, t));
-
-	// The text that this node represents
-	//
-	serializeText(delboy->tokenString, delboy->adaptor->getText(delboy->adaptor, t));
-	delboy->tokenString->addc(delboy->tokenString, '\n');
-
-	// Finally, as the debugger is a Java program it will expect to get UTF-8
-	// encoded strings. We don't use UTF-8 internally to the C runtime, so we 
-	// must force encode it. We have a method to do this in the string class, but
-	// there is no utf8 string implementation as of yet
-	//
-	transmit(delboy, (const char *)(delboy->tokenString->toUTF8(delboy->tokenString)->chars));
-
-}
-static void
-errorNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t)
-{
-	// Do we already have a serialization buffer?
-	//
-	if	(delboy->tokenString == NULL)
-	{
-		// No, so create one, using the string factory that
-		// the grammar name used, which is guaranteed to exist.
-		// 64 bytes will do us here for starters. 
-		//
-		delboy->tokenString = delboy->grammarFileName->factory->newSize(delboy->grammarFileName->factory, 64);
-	}
-
-	// Empty string
-	//
-	delboy->tokenString->set8(delboy->tokenString, (const char *)"errorNode\t");
-
-	// Now we serialize the elements of the node.Note that the debugger only
-	// uses 32 bits.
-	//
-	// Adaptor ID
-	//
-	delboy->tokenString->addi(delboy->tokenString, delboy->adaptor->getUniqueID(delboy->adaptor, t));
-	delboy->tokenString->addc(delboy->tokenString, '\t');
-
-	// Type of the current token (which is an error)
-	//
-	delboy->tokenString->addi(delboy->tokenString, ANTLR3_TOKEN_INVALID);
-
-	// The text that this node represents
-	//
-	serializeText(delboy->tokenString, delboy->adaptor->getText(delboy->adaptor, t));
-	delboy->tokenString->addc(delboy->tokenString, '\n');
-
-	// Finally, as the debugger is a Java program it will expect to get UTF-8
-	// encoded strings. We don't use UTF-8 internally to the C runtime, so we 
-	// must force encode it. We have a method to do this in the string class, but
-	// there is no utf8 string implementation as of yet
-	//
-	transmit(delboy, (const char *)(delboy->tokenString->toUTF8(delboy->tokenString)->chars));
-
-}
-
-static	void	
-createNodeTok			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE node, pANTLR3_COMMON_TOKEN token)
-{
-	char	buffer[128];
-
-	sprintf(buffer, "createNode\t%d\t%d\n",	delboy->adaptor->getUniqueID(delboy->adaptor, node), (ANTLR3_UINT32)token->getTokenIndex(token));
-
-	transmit(delboy, buffer);
-}
-
-static	void	
-becomeRoot				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE newRoot, pANTLR3_BASE_TREE oldRoot)
-{
-	char	buffer[128];
-
-	sprintf(buffer, "becomeRoot\t%d\t%d\n",	delboy->adaptor->getUniqueID(delboy->adaptor, newRoot),
-											delboy->adaptor->getUniqueID(delboy->adaptor, oldRoot)
-											);
-	transmit(delboy, buffer);
-}
-
-
-static	void	
-addChild				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE root, pANTLR3_BASE_TREE child)
-{
-	char	buffer[128];
-
-	sprintf(buffer, "addChild\t%d\t%d\n",	delboy->adaptor->getUniqueID(delboy->adaptor, root),
-											delboy->adaptor->getUniqueID(delboy->adaptor, child)
-											);
-	transmit(delboy, buffer);
-}
-
-static	void	
-setTokenBoundaries		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t, ANTLR3_MARKER tokenStartIndex, ANTLR3_MARKER tokenStopIndex)
-{
-	char	buffer[128];
-
-	sprintf(buffer, "becomeRoot\t%d\t%d\t%d\n",	delboy->adaptor->getUniqueID(delboy->adaptor, t),
-												(ANTLR3_UINT32)tokenStartIndex,
-												(ANTLR3_UINT32)tokenStopIndex
-											);
-	transmit(delboy, buffer);
-}
-#endif
-
diff --git a/antlr-3.4/runtime/C/src/antlr3inputstream.c b/antlr-3.4/runtime/C/src/antlr3inputstream.c
deleted file mode 100644
index e3f1c26..0000000
--- a/antlr-3.4/runtime/C/src/antlr3inputstream.c
+++ /dev/null
@@ -1,2050 +0,0 @@
-/// \file
-/// Base functions to initialize and manipulate any input stream
-///
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3input.h>
-
-// -----------------------------------
-// Generic 8 bit input such as latin-1
-//
-
-// 8Bit INT Stream API
-//
-static	    void	    antlr38BitConsume		(pANTLR3_INT_STREAM is);
-static	    ANTLR3_UCHAR    antlr38BitLA		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
-static	    ANTLR3_UCHAR    antlr38BitLA_ucase		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
-static	    ANTLR3_MARKER   antlr38BitIndex		(pANTLR3_INT_STREAM is);
-static	    ANTLR3_MARKER   antlr38BitMark		(pANTLR3_INT_STREAM is);
-static	    void	    antlr38BitRewind		(pANTLR3_INT_STREAM is, ANTLR3_MARKER mark);
-static	    void	    antlr38BitRewindLast	(pANTLR3_INT_STREAM is);
-static	    void	    antlr38BitRelease		(pANTLR3_INT_STREAM is, ANTLR3_MARKER mark);
-static	    void	    antlr38BitSeek		(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint);
-static	    pANTLR3_STRING  antlr38BitGetSourceName	(pANTLR3_INT_STREAM is);
-
-// 8Bit Charstream API functions
-//
-static	    void	    antlr3InputClose		(pANTLR3_INPUT_STREAM input);
-static	    void	    antlr3InputReset		(pANTLR3_INPUT_STREAM input);
-static      void            antlr38BitReuse            (pANTLR3_INPUT_STREAM input, pANTLR3_UINT8 inString, ANTLR3_UINT32 size, pANTLR3_UINT8 name);
-static	    void *	    antlr38BitLT		(pANTLR3_INPUT_STREAM input, ANTLR3_INT32 lt);
-static	    ANTLR3_UINT32   antlr38BitSize		(pANTLR3_INPUT_STREAM input);
-static	    pANTLR3_STRING  antlr38BitSubstr		(pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop);
-static	    ANTLR3_UINT32   antlr38BitGetLine		(pANTLR3_INPUT_STREAM input);
-static	    void	  * antlr38BitGetLineBuf	(pANTLR3_INPUT_STREAM input);
-static	    ANTLR3_UINT32   antlr38BitGetCharPosition	(pANTLR3_INPUT_STREAM input);
-static	    void	    antlr38BitSetLine		(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 line);
-static	    void	    antlr38BitSetCharPosition	(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 position);
-static	    void	    antlr38BitSetNewLineChar	(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 newlineChar);
-static	    void	    antlr38BitSetUcaseLA	(pANTLR3_INPUT_STREAM input, ANTLR3_BOOLEAN flag);
-
-// -----------------------------------
-// UTF16 (also covers UCS2)
-//
-// INT Stream API
-//
-static	    void	    antlr3UTF16Consume	        (pANTLR3_INT_STREAM is);
-static	    ANTLR3_UCHAR    antlr3UTF16LA		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
-static	    void	    antlr3UTF16ConsumeLE        (pANTLR3_INT_STREAM is);
-static	    ANTLR3_UCHAR    antlr3UTF16LALE		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
-static	    void	    antlr3UTF16ConsumeBE        (pANTLR3_INT_STREAM is);
-static	    ANTLR3_UCHAR    antlr3UTF16LABE		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
-static	    ANTLR3_MARKER   antlr3UTF16Index		(pANTLR3_INT_STREAM is);
-static	    void	    antlr3UTF16Seek		(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint);
-
-// UTF16 Charstream API functions
-//
-static	    pANTLR3_STRING	antlr3UTF16Substr	(pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop);
-
-// -----------------------------------
-// UTF32 (also covers UCS2)
-//
-// INT Stream API
-//
-static	    void	    antlr3UTF32Consume	        (pANTLR3_INT_STREAM is);
-static	    ANTLR3_UCHAR    antlr3UTF32LA		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
-static	    ANTLR3_UCHAR    antlr3UTF32LALE		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
-static	    ANTLR3_UCHAR    antlr3UTF32LABE		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
-static	    ANTLR3_MARKER   antlr3UTF32Index		(pANTLR3_INT_STREAM is);
-static	    void	    antlr3UTF32Seek		(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint);
-
-// UTF16 Charstream API functions
-//
-static	    pANTLR3_STRING  antlr3UTF32Substr	        (pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop);
-
-// ------------------------------------
-// UTF-8
-//
-static	    void	    antlr3UTF8Consume	        (pANTLR3_INT_STREAM is);
-static	    ANTLR3_UCHAR    antlr3UTF8LA		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
-
-// ------------------------------------
-// EBCDIC
-//
-static	    ANTLR3_UCHAR    antlr3EBCDICLA		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
-
-/// \brief Common function to setup function interface for an 8 bit input stream.
-///
-/// \param input Input stream context pointer
-///
-/// \remark
-///   - Many of the 8 bit oriented file stream handling functions will be usable
-///     by any or at least some, other input streams. Therefore it is perfectly acceptable
-///     to call this function to install the 8Bit handler then override just those functions
-///     that would not work for the particular input encoding, such as consume for instance.
-/// 
-void 
-antlr38BitSetupStream	(pANTLR3_INPUT_STREAM input)
-{
-    // Build a string factory for this stream
-    //
-    input->strFactory	= antlr3StringFactoryNew(input->encoding);
-
-    // Default stream API set up is for 8Bit, so we are done
-    //
-}
-
-void
-antlr3GenericSetupStream  (pANTLR3_INPUT_STREAM input)
-{
-    /* Install function pointers for an 8 bit input
-     */
-
-    /* Allocate stream interface
-     */
-    input->istream		= antlr3IntStreamNew();
-    input->istream->type        = ANTLR3_CHARSTREAM;
-    input->istream->super       = input;
-
-    /* Intstream API
-     */
-    input->istream->consume	    = antlr38BitConsume;	    // Consume the next 8 bit character in the buffer			
-    input->istream->_LA		    = antlr38BitLA;	            // Return the UTF32 character at offset n (1 based)			
-    input->istream->index	    = antlr38BitIndex;	            // Current index (offset from first character			    
-    input->istream->mark	    = antlr38BitMark;		    // Record the current lex state for later restore			
-    input->istream->rewind	    = antlr38BitRewind;	            // How to rewind the input									
-    input->istream->rewindLast	    = antlr38BitRewindLast;	    // How to rewind the input									
-    input->istream->seek	    = antlr38BitSeek;		    // How to seek to a specific point in the stream		    
-    input->istream->release	    = antlr38BitRelease;	    // Reset marks after mark n									
-    input->istream->getSourceName   = antlr38BitGetSourceName;      // Return a string that names the input source
-
-    /* Charstream API
-     */
-    input->close		    =  antlr3InputClose;	    // Close down the stream completely										
-    input->free			    =  antlr3InputClose;	    // Synonym for free														
-    input->reset		    =  antlr3InputReset;	    // Reset input to start	
-    input->reuse                    =  antlr38BitReuse;             // Install a new input string and reset
-    input->_LT			    =  antlr38BitLT;		    // Same as _LA for 8 bit file										
-    input->size			    =  antlr38BitSize;		    // Return the size of the input buffer									
-    input->substr		    =  antlr38BitSubstr;	    // Return a string from the input stream								
-    input->getLine		    =  antlr38BitGetLine;	    // Return the current line number in the input stream					
-    input->getLineBuf		    =  antlr38BitGetLineBuf;	    // Return a pointer to the start of the current line being consumed	    
-    input->getCharPositionInLine    =  antlr38BitGetCharPosition;   // Return the offset into the current line of input						
-    input->setLine		    =  antlr38BitSetLine;	    // Set the input stream line number (does not set buffer pointers)	    
-    input->setCharPositionInLine    =  antlr38BitSetCharPosition;   // Set the offset in to the current line (does not set any pointers)   
-    input->SetNewLineChar	    =  antlr38BitSetNewLineChar;    // Set the value of the newline trigger character						
-    input->setUcaseLA		    =  antlr38BitSetUcaseLA;        // Changes the LA function to return upper case always
-
-    input->charByteSize		    = 1;		// Size in bytes of characters in this stream.
-
-    /* Initialize entries for tables etc
-     */
-    input->markers  = NULL;
-
-    /* Set up the input stream brand new
-     */
-    input->reset(input);
-    
-    /* Install default line separator character (it can be replaced
-     * by the grammar programmer later)
-     */
-    input->SetNewLineChar(input, (ANTLR3_UCHAR)'\n');
-}
-
-static pANTLR3_STRING
-antlr38BitGetSourceName(pANTLR3_INT_STREAM is)
-{
-	return	is->streamName;
-}
-
-/** \brief Close down an input stream and free any memory allocated by it.
- *
- * \param input Input stream context pointer
- */
-static void
-antlr3InputClose(pANTLR3_INPUT_STREAM input)
-{
-    // Close any markers in the input stream
-    //
-    if	(input->markers != NULL)
-    {
-		input->markers->free(input->markers);
-		input->markers = NULL;
-    }
-
-    // Close the string factory
-    //
-    if	(input->strFactory != NULL)
-    {
-		input->strFactory->close(input->strFactory);
-    }
-
-    // Free the input stream buffer if we allocated it
-    //
-    if	(input->isAllocated && input->data != NULL)
-    {
-		ANTLR3_FREE(input->data);
-		input->data = NULL;
-    }
-    
-    input->istream->free(input->istream);
-
-    // Finally, free the space for the structure itself
-    //
-    ANTLR3_FREE(input);
-
-    // Done
-    //
-}
-
-static void		
-antlr38BitSetUcaseLA		(pANTLR3_INPUT_STREAM input, ANTLR3_BOOLEAN flag)
-{
-	if	(flag)
-	{
-		// Return the upper case version of the characters
-		//
-		input->istream->_LA		    =  antlr38BitLA_ucase;
-	}
-	else
-	{
-		// Return the raw characters as they are in the buffer
-		//
-		input->istream->_LA		    =  antlr38BitLA;
-	}
-}
-
-
-/** \brief Reset a re-startable input stream to the start
- *
- * \param input Input stream context pointer
- */
-static void
-antlr3InputReset(pANTLR3_INPUT_STREAM input)
-{
-
-    input->nextChar		= input->data;	/* Input at first character */
-    input->line			= 1;		/* starts at line 1	    */
-    input->charPositionInLine	= -1;
-    input->currentLine		= input->data;
-    input->markDepth		= 0;		/* Reset markers	    */
-    
-    /* Clear out up the markers table if it is there
-     */
-    if	(input->markers != NULL)
-    {
-        input->markers->clear(input->markers);
-    }
-    else
-    {
-        /* Install a new markers table
-         */
-        input->markers  = antlr3VectorNew(0);
-    }
-}
-
-/** Install a new source code in to a working input stream so that the
- *  input stream can be reused.
- */
-static void
-antlr38BitReuse(pANTLR3_INPUT_STREAM input, pANTLR3_UINT8 inString, ANTLR3_UINT32 size, pANTLR3_UINT8 name)
-{
-    input->isAllocated	= ANTLR3_FALSE;
-    input->data		= inString;
-    input->sizeBuf	= size;
-    
-    // Now we can set up the file name. As we are reusing the stream, there may already
-    // be a string that we can reuse for holding the filename.
-    //
-	if	(input->istream->streamName == NULL) 
-	{
-		input->istream->streamName	= input->strFactory->newStr(input->strFactory, name == NULL ? (pANTLR3_UINT8)"-memory-" : name);
-		input->fileName		= input->istream->streamName;
-	}
-	else
-	{
-		input->istream->streamName->set(input->istream->streamName,  (name == NULL ? (const char *)"-memory-" : (const char *)name));
-	}
-
-    input->reset(input);
-}
-
-/** \brief Consume the next character in an 8 bit input stream
- *
- * \param input Input stream context pointer
- */
-static void
-antlr38BitConsume(pANTLR3_INT_STREAM is)
-{
-    pANTLR3_INPUT_STREAM input;
-
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-    {	
-	/* Indicate one more character in this line
-	 */
-	input->charPositionInLine++;
-	
-	if  ((ANTLR3_UCHAR)(*((pANTLR3_UINT8)input->nextChar)) == input->newlineChar)
-	{
-	    /* Reset for start of a new line of input
-	     */
-	    input->line++;
-	    input->charPositionInLine	= 0;
-	    input->currentLine		= (void *)(((pANTLR3_UINT8)input->nextChar) + 1);
-	}
-
-	/* Increment to next character position
-	 */
-	input->nextChar = (void *)(((pANTLR3_UINT8)input->nextChar) + 1);
-    }
-}
-
-/** \brief Return the input element assuming an 8 bit ascii input
- *
- * \param[in] input Input stream context pointer
- * \param[in] la 1 based offset of next input stream element
- *
- * \return Next input character in internal ANTLR3 encoding (UTF32)
- */
-static ANTLR3_UCHAR 
-antlr38BitLA(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
-{
-    pANTLR3_INPUT_STREAM input;
-	
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-    {
-		return	ANTLR3_CHARSTREAM_EOF;
-    }
-    else
-    {
-		return	(ANTLR3_UCHAR)(*((pANTLR3_UINT8)input->nextChar + la - 1));
-    }
-}
-
-/** \brief Return the input element assuming an 8 bit input and
- *         always return the UPPER CASE character.
- *		   Note that this is 8 bit and so we assume that the toupper
- *		   function will use the correct locale for 8 bits.
- *
- * \param[in] input Input stream context pointer
- * \param[in] la 1 based offset of next input stream element
- *
- * \return Next input character in internal ANTLR3 encoding (UTF32)
- */
-static ANTLR3_UCHAR
-antlr38BitLA_ucase	(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
-{
-    pANTLR3_INPUT_STREAM input;
-	
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-    {
-		return	ANTLR3_CHARSTREAM_EOF;
-    }
-    else
-    {
-		return	(ANTLR3_UCHAR)toupper((*((pANTLR3_UINT8)input->nextChar + la - 1)));
-    }
-}
-
-
-/** \brief Return the input element assuming an 8 bit ascii input
- *
- * \param[in] input Input stream context pointer
- * \param[in] lt 1 based offset of next input stream element
- *
- * \return Next input character in internal ANTLR3 encoding (UTF32)
- */
-static void * 
-antlr38BitLT(pANTLR3_INPUT_STREAM input, ANTLR3_INT32 lt)
-{
-    /* Casting is horrible but it means no warnings and LT should never be called
-     * on a character stream anyway I think. If it is then, the void * will need to be 
-     * cast back in a similar manner. Yuck! But this means that LT for Token streams and
-     * tree streams is correct.
-     */
-    return (ANTLR3_FUNC_PTR(input->istream->_LA(input->istream, lt)));
-}
-
-/** \brief Calculate the current index in the output stream.
- * \param[in] input Input stream context pointer
- */
-static ANTLR3_MARKER
-antlr38BitIndex(pANTLR3_INT_STREAM is)
-{
-    pANTLR3_INPUT_STREAM input;
-
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    return  (ANTLR3_MARKER)(((pANTLR3_UINT8)input->nextChar));
-}
-
-/** \brief Return the size of the current input stream, as an 8Bit file
- *   which in this case is the total input. Other implementations may provide
- *   more sophisticated implementations to deal with non-recoverable streams 
- *   and so on.
- *
- * \param[in] input Input stream context pointer
- */
-static	ANTLR3_UINT32 
-antlr38BitSize(pANTLR3_INPUT_STREAM input)
-{
-    return  input->sizeBuf;
-}
-
-/** \brief Mark the current input point in an 8Bit 8 bit stream
- *  such as a file stream, where all the input is available in the
- *  buffer.
- *
- * \param[in] is Input stream context pointer
- */
-static ANTLR3_MARKER
-antlr38BitMark	(pANTLR3_INT_STREAM is)
-{
-    pANTLR3_LEX_STATE	    state;
-    pANTLR3_INPUT_STREAM    input;
-
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    /* New mark point 
-     */
-    input->markDepth++;
-
-    /* See if we are revisiting a mark as we can just reuse the vector
-     * entry if we are, otherwise, we need a new one
-     */
-    if	(input->markDepth > input->markers->count)
-    {	
-	state	= ANTLR3_MALLOC(sizeof(ANTLR3_LEX_STATE));
-
-	/* Add it to the table
-	 */
-	input->markers->add(input->markers, state, ANTLR3_FREE_FUNC);	/* No special structure, just free() on delete */
-    }
-    else
-    {
-	state	= (pANTLR3_LEX_STATE)input->markers->get(input->markers, input->markDepth - 1);
-
-	/* Assume no errors for speed, it will just blow up if the table failed
-	 * for some reasons, hence lots of unit tests on the tables ;-)
-	 */
-    }
-
-    /* We have created or retrieved the state, so update it with the current
-     * elements of the lexer state.
-     */
-    state->charPositionInLine	= input->charPositionInLine;
-    state->currentLine		= input->currentLine;
-    state->line			= input->line;
-    state->nextChar		= input->nextChar;
-
-    is->lastMarker  = input->markDepth;
-
-    /* And that's it
-     */
-    return  input->markDepth;
-}
-/** \brief Rewind the lexer input to the state specified by the last produced mark.
- * 
- * \param[in] input Input stream context pointer
- *
- * \remark
- * Assumes 8 Bit input stream.
- */
-static void
-antlr38BitRewindLast	(pANTLR3_INT_STREAM is)
-{
-    is->rewind(is, is->lastMarker);
-}
-
-/** \brief Rewind the lexer input to the state specified by the supplied mark.
- * 
- * \param[in] input Input stream context pointer
- *
- * \remark
- * Assumes 8 Bit input stream.
- */
-static void
-antlr38BitRewind	(pANTLR3_INT_STREAM is, ANTLR3_MARKER mark)
-{
-    pANTLR3_LEX_STATE	state;
-    pANTLR3_INPUT_STREAM input;
-
-    input   = ((pANTLR3_INPUT_STREAM) is->super);
-
-    /* Perform any clean up of the marks
-     */
-    input->istream->release(input->istream, mark);
-
-    /* Find the supplied mark state 
-     */
-    state   = (pANTLR3_LEX_STATE)input->markers->get(input->markers, (ANTLR3_UINT32)(mark - 1));
-
-    /* Seek input pointer to the requested point (note we supply the void *pointer
-     * to whatever is implementing the int stream to seek).
-     */
-    antlr38BitSeek(is, (ANTLR3_MARKER)(state->nextChar));
-
-    /* Reset to the reset of the information in the mark
-     */
-    input->charPositionInLine	= state->charPositionInLine;
-    input->currentLine		= state->currentLine;
-    input->line			= state->line;
-    input->nextChar		= state->nextChar;
-
-    /* And we are done
-     */
-}
-
-/** \brief Rewind the lexer input to the state specified by the supplied mark.
- * 
- * \param[in] input Input stream context pointer
- *
- * \remark
- * Assumes 8 Bit input stream.
- */
-static void
-antlr38BitRelease	(pANTLR3_INT_STREAM is, ANTLR3_MARKER mark)
-{
-    pANTLR3_INPUT_STREAM input;
-
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    /* We don't do much here in fact as we never free any higher marks in
-     * the hashtable as we just resuse any memory allocated for them.
-     */
-    input->markDepth	= (ANTLR3_UINT32)(mark - 1);
-}
-
-/** \brief Rewind the lexer input to the state specified by the supplied mark.
- * 
- * \param[in] input Input stream context pointer
- *
- * \remark
- * Assumes 8 Bit input stream.
- */
-static void
-antlr38BitSeek	(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint)
-{
-	ANTLR3_INT32   count;
-	pANTLR3_INPUT_STREAM input;
-
-	input   = ANTLR3_FUNC_PTR(((pANTLR3_INPUT_STREAM) is->super));
-
-	/* If the requested seek point is less than the current
-	* input point, then we assume that we are resetting from a mark
-	* and do not need to scan, but can just set to there.
-	*/
-	if	(seekPoint <= (ANTLR3_MARKER)(input->nextChar))
-	{
-		input->nextChar	= ((pANTLR3_UINT8) seekPoint);
-	}
-	else
-	{
-		count	= (ANTLR3_UINT32)(seekPoint - (ANTLR3_MARKER)(input->nextChar));
-
-		while (count--)
-		{
-			is->consume(is);
-		}
-	}
-}
-/** Return a substring of the 8 bit input stream in
- *  newly allocated memory.
- *
- * \param input Input stream context pointer
- * \param start Offset in input stream where the string starts
- * \param stop  Offset in the input stream where the string ends.
- */
-static pANTLR3_STRING
-antlr38BitSubstr		(pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop)
-{
-	return  input->strFactory->newPtr(input->strFactory, (pANTLR3_UINT8)start, (ANTLR3_UINT32)(stop - start + 1));
-}
-
-/** \brief Return the line number as understood by the 8 bit input stream.
- *
- * \param input Input stream context pointer
- * \return	Line number in input stream that we believe we are working on.
- */
-static ANTLR3_UINT32   
-antlr38BitGetLine		(pANTLR3_INPUT_STREAM input)
-{
-    return  input->line;
-}
-
-/** Return a pointer into the input stream that points at the start
- *  of the current input line as triggered by the end of line character installed
- *  for the stream ('\n' unless told differently).
- *
- * \param[in] input 
- */
-static void	  * 
-antlr38BitGetLineBuf	(pANTLR3_INPUT_STREAM input)
-{
-    return  input->currentLine;
-}
-
-/** Return the current offset in to the current line in the input stream.
- *
- * \param input Input stream context pointer
- * \return      Current line offset
- */
-static ANTLR3_UINT32
-antlr38BitGetCharPosition	(pANTLR3_INPUT_STREAM input)
-{
-    return  input->charPositionInLine;
-}
-
-/** Set the current line number as understood by the input stream.
- *
- * \param input Input stream context pointer
- * \param line  Line number to tell the input stream we are on
- *
- * \remark
- *  This function does not change any pointers, it just allows the programmer to set the
- *  line number according to some external criterion, such as finding a lexed directive
- *  like: #nnn "file.c" for instance, such that error reporting and so on in is in sync
- *  with some original source format.
- */
-static void
-antlr38BitSetLine		(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 line)
-{
-    input->line	= line;
-}
-
-/** Set the current offset in the current line to be a particular setting.
- *
- * \param[in] input    Input stream context pointer
- * \param[in] position New setting for current offset.
- *
- * \remark
- * This does not set the actual pointers in the input stream, it is purely for reporting
- * purposes and so on as per antlr38BitSetLine();
- */
-static void
-antlr38BitSetCharPosition	(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 position)
-{
-    input->charPositionInLine = position;
-}
-
-/** Set the newline trigger character in the input stream to the supplied parameter.
- *
- * \param[in] input	    Input stream context pointer
- * \param[in] newlineChar   Character to set to be the newline trigger.
- *
- * \remark
- *  - The supplied newLineChar is in UTF32 encoding (which means ASCII and latin1 etc
- *    are the same encodings), but the input stream catered to by this function is 8 bit
- *    only, so it is up to the programmer to ensure that the character supplied is valid.
- */
-static void 
-antlr38BitSetNewLineChar	(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 newlineChar)
-{
-    input->newlineChar	= newlineChar;
-}
-
-
-/// \brief Common function to setup function interface for a UTF16 or UCS2 input stream.
-///
-/// \param input Input stream context pointer
-///
-/// \remark
-///  - Strictly speaking, there is no such thing as a UCS2 input stream as the term
-///    tends to confuse the notions of character encoding, unicode and so on. UCS2 is
-///    essentially UTF16 without any surrogates and so the standard UTF16
-///    input stream is able to handle it without any special code.
-///
-void 
-antlr3UTF16SetupStream	(pANTLR3_INPUT_STREAM input, ANTLR3_BOOLEAN machineBigEndian, ANTLR3_BOOLEAN inputBigEndian)
-{
-    // Build a string factory for this stream. This is a UTF16 string factory which is a standard
-    // part of the ANTLR3 string. The string factory is then passed through the whole chain 
-    // of lexer->parser->tree->treeparser and so on.
-    //
-    input->strFactory	= antlr3StringFactoryNew(input->encoding);
-
-    // Generic API that does not care about endianess.
-    //
-    input->istream->index	    =  antlr3UTF16Index;            // Calculate current index in input stream, UTF16 based
-    input->substr		    =  antlr3UTF16Substr;	    // Return a string from the input stream
-    input->istream->seek	    =  antlr3UTF16Seek;		    // How to seek to a specific point in the stream
-
-    // We must install different UTF16 routines according to whether the input
-    // is the same endianess as the machine we are executing upon or not. If it is not
-    // then we must install methods that can convert the endianess on the fly as they go
-    //
-
-    switch (machineBigEndian)
-    {
-        case    ANTLR3_TRUE:
-
-            // Machine is Big Endian, if the input is also then install the 
-            // methods that do not access input by bytes and reverse them.
-            // Otherwise install endian aware methods.
-            //
-            if  (inputBigEndian == ANTLR3_TRUE) 
-            {
-                // Input is machine compatible
-                //
-                input->istream->consume	    =  antlr3UTF16Consume;	    // Consume the next UTF16 character in the buffer
-                input->istream->_LA         =  antlr3UTF16LA;		    // Return the UTF32 character at offset n (1 based)    
-            }
-            else
-            {
-                // Need to use methods that know that the input is little endian
-                //
-                input->istream->consume	    =  antlr3UTF16ConsumeLE;	    // Consume the next UTF16 character in the buffer
-                input->istream->_LA         =  antlr3UTF16LALE;		    // Return the UTF32 character at offset n (1 based) 
-            }
-            break;
-
-        case    ANTLR3_FALSE:
-
-            // Machine is Little Endian, if the input is also then install the 
-            // methods that do not access input by bytes and reverse them.
-            // Otherwise install endian aware methods.
-            //
-            if  (inputBigEndian == ANTLR3_FALSE) 
-            {
-                // Input is machine compatible
-                //
-                input->istream->consume	    =  antlr3UTF16Consume;	    // Consume the next UTF16 character in the buffer
-                input->istream->_LA         =  antlr3UTF16LA;		    // Return the UTF32 character at offset n (1 based)    
-            }
-            else
-            {
-                // Need to use methods that know that the input is Big Endian
-                //
-                input->istream->consume	    =  antlr3UTF16ConsumeBE;	    // Consume the next UTF16 character in the buffer
-                input->istream->_LA         =  antlr3UTF16LABE;		    // Return the UTF32 character at offset n (1 based) 
-            }
-            break;
-    }
-
-        
-    input->charByteSize		    = 2;			    // Size in bytes of characters in this stream.
-
-}
-
-/// \brief Consume the next character in a UTF16 input stream
-///
-/// \param input Input stream context pointer
-///
-static void
-antlr3UTF16Consume(pANTLR3_INT_STREAM is)
-{
-	pANTLR3_INPUT_STREAM input;
-        UTF32   ch;
-        UTF32   ch2;
-
-	input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-        // Buffer size is always in bytes
-        //
-	if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-	{	
-		// Indicate one more character in this line
-		//
-		input->charPositionInLine++;
-
-		if  ((ANTLR3_UCHAR)(*((pANTLR3_UINT16)input->nextChar)) == input->newlineChar)
-		{
-			// Reset for start of a new line of input
-			//
-			input->line++;
-			input->charPositionInLine	= 0;
-			input->currentLine		= (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
-		}
-
-		// Increment to next character position, accounting for any surrogates
-		//
-                // Next char in natural machine byte order
-                //
-                ch  = *((UTF16*)input->nextChar);
-
-                // We consumed one 16 bit character
-                //
-		input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
-
-                // If we have a surrogate pair then we need to consume
-                // a following valid LO surrogate.
-                //
-                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
-
-                    // If the 16 bits following the high surrogate are in the source buffer...
-                    //
-                    if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-                    {
-                        // Next character is in natural machine byte order
-                        //
-                        ch2 = *((UTF16*)input->nextChar);
-
-                        // If it's a valid low surrogate, consume it
-                        //
-                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
-                        {
-                            // We consumed one 16 bit character
-                            //
-		            input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
-                        }
-                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                        // it.
-                        //
-                    } 
-                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                    // it because the buffer ended
-                    //
-                } 
-                // Note that we did not check for an invalid low surrogate here, or that fact that the
-                // lo surrogate was missing. We just picked out one 16 bit character unless the character
-                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
-                //
-	}
-}
-
-/// \brief Return the input element assuming an 8 bit ascii input
-///
-/// \param[in] input Input stream context pointer
-/// \param[in] la 1 based offset of next input stream element
-///
-/// \return Next input character in internal ANTLR3 encoding (UTF32)
-///
-static ANTLR3_UCHAR 
-antlr3UTF16LA(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
-{
-	pANTLR3_INPUT_STREAM input;
-        UTF32   ch;
-        UTF32   ch2;
-        UTF16   * nextChar;
-
-        // Find the input interface and where we are currently pointing to
-        // in the input stream
-        //
-	input       = ((pANTLR3_INPUT_STREAM) (is->super));
-        nextChar    = input->nextChar;
-
-        // If a positive offset then advance forward, else retreat
-        //
-        if  (la >= 0)
-        {
-            while   (--la > 0 && (pANTLR3_UINT8)nextChar < ((pANTLR3_UINT8)input->data) + input->sizeBuf )
-            {
-                // Advance our copy of the input pointer
-                //
-                // Next char in natural machine byte order
-                //
-                ch  = *nextChar++;
-
-                // If we have a surrogate pair then we need to consume
-                // a following valid LO surrogate.
-                //
-                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
-                {
-                    // If the 16 bits following the high surrogate are in the source buffer...
-                    //
-                    if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-                    {
-                        // Next character is in natural machine byte order
-                        //
-                        ch2 = *nextChar;
-
-                        // If it's a valid low surrogate, consume it
-                        //
-                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
-                        {
-                            // We consumed one 16 bit character
-                            //
-		            nextChar++;
-                        }
-                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                        // it.
-                        //
-                    } 
-                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                    // it because the buffer ended
-                    //
-                }
-                // Note that we did not check for an invalid low surrogate here, or that fact that the
-                // lo surrogate was missing. We just picked out one 16 bit character unless the character
-                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
-                //
-            }
-        }
-        else
-        {
-            // We need to go backwards from our input point
-            //
-            while   (la++ < 0 && (pANTLR3_UINT8)nextChar > (pANTLR3_UINT8)input->data )
-            {
-                // Get the previous 16 bit character
-                //
-                ch = *--nextChar;
-
-                // If we found a low surrogate then go back one more character if
-                // the hi surrogate is there
-                //
-                if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) 
-                {
-                    ch2 = *(nextChar-1);
-                    if (ch2 >= UNI_SUR_HIGH_START && ch2 <= UNI_SUR_HIGH_END) 
-                    {
-                        // Yes, there is a high surrogate to match it so decrement one more and point to that
-                        //
-                        nextChar--;
-                    }
-                }
-            }
-        }
-
-        // Our local copy of nextChar is now pointing to either the correct character or end of file
-        //
-        // Input buffer size is always in bytes
-        //
-	if	( (pANTLR3_UINT8)nextChar >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-	{
-		return	ANTLR3_CHARSTREAM_EOF;
-	}
-	else
-	{
-            // Pick up the next 16 character (native machine byte order)
-            //
-            ch = *nextChar++;
-
-            // If we have a surrogate pair then we need to consume
-            // a following valid LO surrogate.
-            //
-            if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
-            {
-                // If the 16 bits following the high surrogate are in the source buffer...
-                //
-                if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-                {
-                    // Next character is in natural machine byte order
-                    //
-                    ch2 = *nextChar;
-
-                    // If it's a valid low surrogate, consume it
-                    //
-                    if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
-                    {
-                        // Construct the UTF32 code point
-                        //
-                        ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
-			    + (ch2 - UNI_SUR_LOW_START) + halfBase;
-                    }
-                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                    // it.
-                    //
-                } 
-                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                // it because the buffer ended
-                //
-            }
-        }
-        return ch;
-}
-
-
-/// \brief Calculate the current index in the output stream.
-/// \param[in] input Input stream context pointer
-///
-static ANTLR3_MARKER 
-antlr3UTF16Index(pANTLR3_INT_STREAM is)
-{
-    pANTLR3_INPUT_STREAM input;
-
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    return  (ANTLR3_MARKER)(input->nextChar);
-}
-
-/// \brief Rewind the lexer input to the state specified by the supplied mark.
-///
-/// \param[in] input Input stream context pointer
-///
-/// \remark
-/// Assumes UTF16 input stream.
-///
-static void
-antlr3UTF16Seek	(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint)
-{
-	pANTLR3_INPUT_STREAM input;
-
-	input   = ((pANTLR3_INPUT_STREAM) is->super);
-
-	// If the requested seek point is less than the current
-	// input point, then we assume that we are resetting from a mark
-	// and do not need to scan, but can just set to there as rewind will
-        // reset line numbers and so on.
-	//
-	if	(seekPoint <= (ANTLR3_MARKER)(input->nextChar))
-	{
-		input->nextChar	= (void *)seekPoint;
-	}
-	else
-	{
-            // Call consume until we reach the asked for seek point or EOF
-            //
-            while (is->_LA(is, 1) != ANTLR3_CHARSTREAM_EOF && seekPoint < (ANTLR3_MARKER)input->nextChar)
-	    {
-		is->consume(is);
-	    }
-	}
-}
-/// \brief Return a substring of the UTF16 input stream in
-///  newly allocated memory.
-///
-/// \param input Input stream context pointer
-/// \param start Offset in input stream where the string starts
-/// \param stop  Offset in the input stream where the string ends.
-///
-static pANTLR3_STRING
-antlr3UTF16Substr		(pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop)
-{
-    return  input->strFactory->newPtr(input->strFactory, (pANTLR3_UINT8)start, ((ANTLR3_UINT32_CAST(stop - start))/2) + 1);
-}
-
-/// \brief Consume the next character in a UTF16 input stream when the input is Little Endian and the machine is not
-/// Note that the UTF16 routines do not do any substantial verification of the input stream as for performance
-/// sake, we assume it is validly encoded. So if a low surrogate is found at the curent input position then we
-/// just consume it. Surrogate pairs should be seen as Hi, Lo. So if we have a Lo first, then the input stream
-/// is fubar but we just ignore that.
-///
-/// \param input Input stream context pointer
-///
-static void
-antlr3UTF16ConsumeLE(pANTLR3_INT_STREAM is)
-{
-	pANTLR3_INPUT_STREAM input;
-        UTF32   ch;
-        UTF32   ch2;
-
-	input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-        // Buffer size is always in bytes
-        //
-	if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-	{	
-		// Indicate one more character in this line
-		//
-		input->charPositionInLine++;
-
-		if  ((ANTLR3_UCHAR)(*((pANTLR3_UINT16)input->nextChar)) == input->newlineChar)
-		{
-			// Reset for start of a new line of input
-			//
-			input->line++;
-			input->charPositionInLine	= 0;
-			input->currentLine		= (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
-		}
-
-		// Increment to next character position, accounting for any surrogates
-		//
-                // Next char in litle endian form
-                //
-                ch  = *((pANTLR3_UINT8)input->nextChar) + (*((pANTLR3_UINT8)input->nextChar + 1) <<8);
-
-                // We consumed one 16 bit character
-                //
-		input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
-
-                // If we have a surrogate pair then we need to consume
-                // a following valid LO surrogate.
-                //
-                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
-
-                    // If the 16 bits following the high surrogate are in the source buffer...
-                    //
-                    if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-                    {
-                        ch2 = *((pANTLR3_UINT8)input->nextChar) + (*((pANTLR3_UINT8)input->nextChar + 1) <<8);
-
-                        // If it's a valid low surrogate, consume it
-                        //
-                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
-                        {
-                            // We consumed one 16 bit character
-                            //
-		            input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
-                        }
-                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                        // it.
-                        //
-                    } 
-                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                    // it because the buffer ended
-                    //
-                } 
-                // Note that we did not check for an invalid low surrogate here, or that fact that the
-                // lo surrogate was missing. We just picked out one 16 bit character unless the character
-                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
-                //
-	}
-}
-
-/// \brief Return the input element assuming a UTF16 input when the input is Little Endian and the machine is not
-///
-/// \param[in] input Input stream context pointer
-/// \param[in] la 1 based offset of next input stream element
-///
-/// \return Next input character in internal ANTLR3 encoding (UTF32)
-///
-static ANTLR3_UCHAR 
-antlr3UTF16LALE(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
-{
-	pANTLR3_INPUT_STREAM input;
-        UTF32           ch;
-        UTF32           ch2;
-        pANTLR3_UCHAR   nextChar;
-
-        // Find the input interface and where we are currently pointing to
-        // in the input stream
-        //
-	input       = ((pANTLR3_INPUT_STREAM) (is->super));
-        nextChar    = input->nextChar;
-
-        // If a positive offset then advance forward, else retreat
-        //
-        if  (la >= 0)
-        {
-            while   (--la > 0 && (pANTLR3_UINT8)nextChar < ((pANTLR3_UINT8)input->data) + input->sizeBuf )
-            {
-                // Advance our copy of the input pointer
-                //
-                // Next char in Little Endian byte order
-                //
-                ch  = (*nextChar) + (*(nextChar+1) << 8);
-                nextChar += 2;
-
-                // If we have a surrogate pair then we need to consume
-                // a following valid LO surrogate.
-                //
-                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
-                {
-                    // If the 16 bits following the high surrogate are in the source buffer...
-                    //
-                    if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-                    {
-                        // Next character is in little endian byte order
-                        //
-                        ch2 = (*nextChar) + (*(nextChar+1) << 8);
-
-                        // If it's a valid low surrogate, consume it
-                        //
-                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
-                        {
-                            // We consumed one 16 bit character
-                            //
-		            nextChar += 2;
-                        }
-                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                        // it.
-                        //
-                    } 
-                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                    // it because the buffer ended
-                    //
-                }
-                // Note that we did not check for an invalid low surrogate here, or that fact that the
-                // lo surrogate was missing. We just picked out one 16 bit character unless the character
-                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
-                //
-            }
-        }
-        else
-        {
-            // We need to go backwards from our input point
-            //
-            while   (la++ < 0 && (pANTLR3_UINT8)nextChar > (pANTLR3_UINT8)input->data )
-            {
-                // Get the previous 16 bit character
-                //
-                ch = (*nextChar - 2) + ((*nextChar -1) << 8);
-                nextChar -= 2;
-
-                // If we found a low surrogate then go back one more character if
-                // the hi surrogate is there
-                //
-                if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) 
-                {
-                    ch2 = (*nextChar - 2) + ((*nextChar -1) << 8);
-                    if (ch2 >= UNI_SUR_HIGH_START && ch2 <= UNI_SUR_HIGH_END) 
-                    {
-                        // Yes, there is a high surrogate to match it so decrement one more and point to that
-                        //
-                        nextChar -=2;
-                    }
-                }
-            }
-        }
-
-        // Our local copy of nextChar is now pointing to either the correct character or end of file
-        //
-        // Input buffer size is always in bytes
-        //
-	if	( (pANTLR3_UINT8)nextChar >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-	{
-		return	ANTLR3_CHARSTREAM_EOF;
-	}
-	else
-	{
-            // Pick up the next 16 character (little endian byte order)
-            //
-            ch = (*nextChar) + (*(nextChar+1) << 8);
-            nextChar += 2;
-
-            // If we have a surrogate pair then we need to consume
-            // a following valid LO surrogate.
-            //
-            if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
-            {
-                // If the 16 bits following the high surrogate are in the source buffer...
-                //
-                if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-                {
-                    // Next character is in little endian byte order
-                    //
-                    ch2 = (*nextChar) + (*(nextChar+1) << 8);
-
-                    // If it's a valid low surrogate, consume it
-                    //
-                    if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
-                    {
-                        // Construct the UTF32 code point
-                        //
-                        ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
-			    + (ch2 - UNI_SUR_LOW_START) + halfBase;
-                    }
-                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                    // it.
-                    //
-                } 
-                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                // it because the buffer ended
-                //
-            }
-        }
-        return ch;
-}
-
-/// \brief Consume the next character in a UTF16 input stream when the input is Big Endian and the machine is not
-///
-/// \param input Input stream context pointer
-///
-static void
-antlr3UTF16ConsumeBE(pANTLR3_INT_STREAM is)
-{
-	pANTLR3_INPUT_STREAM input;
-        UTF32   ch;
-        UTF32   ch2;
-
-	input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-        // Buffer size is always in bytes
-        //
-	if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-	{	
-		// Indicate one more character in this line
-		//
-		input->charPositionInLine++;
-
-		if  ((ANTLR3_UCHAR)(*((pANTLR3_UINT16)input->nextChar)) == input->newlineChar)
-		{
-			// Reset for start of a new line of input
-			//
-			input->line++;
-			input->charPositionInLine	= 0;
-			input->currentLine		= (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
-		}
-
-		// Increment to next character position, accounting for any surrogates
-		//
-                // Next char in big endian form
-                //
-                ch  = *((pANTLR3_UINT8)input->nextChar + 1) + (*((pANTLR3_UINT8)input->nextChar ) <<8);
-
-                // We consumed one 16 bit character
-                //
-		input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
-
-                // If we have a surrogate pair then we need to consume
-                // a following valid LO surrogate.
-                //
-                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
-
-                    // If the 16 bits following the high surrogate are in the source buffer...
-                    //
-                    if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-                    {
-                        // Big endian
-                        //
-                        ch2 = *((pANTLR3_UINT8)input->nextChar + 1) + (*((pANTLR3_UINT8)input->nextChar ) <<8);
-
-                        // If it's a valid low surrogate, consume it
-                        //
-                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
-                        {
-                            // We consumed one 16 bit character
-                            //
-		            input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
-                        }
-                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                        // it.
-                        //
-                    } 
-                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                    // it because the buffer ended
-                    //
-                } 
-                // Note that we did not check for an invalid low surrogate here, or that fact that the
-                // lo surrogate was missing. We just picked out one 16 bit character unless the character
-                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
-                //
-	}
-}
-
-/// \brief Return the input element assuming a UTF16 input when the input is Little Endian and the machine is not
-///
-/// \param[in] input Input stream context pointer
-/// \param[in] la 1 based offset of next input stream element
-///
-/// \return Next input character in internal ANTLR3 encoding (UTF32)
-///
-static ANTLR3_UCHAR 
-antlr3UTF16LABE(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
-{
-	pANTLR3_INPUT_STREAM input;
-        UTF32           ch;
-        UTF32           ch2;
-        pANTLR3_UCHAR   nextChar;
-
-        // Find the input interface and where we are currently pointing to
-        // in the input stream
-        //
-	input       = ((pANTLR3_INPUT_STREAM) (is->super));
-        nextChar    = input->nextChar;
-
-        // If a positive offset then advance forward, else retreat
-        //
-        if  (la >= 0)
-        {
-            while   (--la > 0 && (pANTLR3_UINT8)nextChar < ((pANTLR3_UINT8)input->data) + input->sizeBuf )
-            {
-                // Advance our copy of the input pointer
-                //
-                // Next char in Big Endian byte order
-                //
-                ch  = ((*nextChar) << 8) + *(nextChar+1);
-                nextChar += 2;
-
-                // If we have a surrogate pair then we need to consume
-                // a following valid LO surrogate.
-                //
-                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
-                {
-                    // If the 16 bits following the high surrogate are in the source buffer...
-                    //
-                    if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-                    {
-                        // Next character is in big endian byte order
-                        //
-                        ch2 = ((*nextChar) << 8) + *(nextChar+1);
-
-                        // If it's a valid low surrogate, consume it
-                        //
-                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
-                        {
-                            // We consumed one 16 bit character
-                            //
-		            nextChar += 2;
-                        }
-                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                        // it.
-                        //
-                    } 
-                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                    // it because the buffer ended
-                    //
-                }
-                // Note that we did not check for an invalid low surrogate here, or that fact that the
-                // lo surrogate was missing. We just picked out one 16 bit character unless the character
-                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
-                //
-            }
-        }
-        else
-        {
-            // We need to go backwards from our input point
-            //
-            while   (la++ < 0 && (pANTLR3_UINT8)nextChar > (pANTLR3_UINT8)input->data )
-            {
-                // Get the previous 16 bit character
-                //
-                ch = ((*nextChar - 2) << 8) + (*nextChar -1);
-                nextChar -= 2;
-
-                // If we found a low surrogate then go back one more character if
-                // the hi surrogate is there
-                //
-                if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) 
-                {
-                    ch2 = ((*nextChar - 2) << 8) + (*nextChar -1);
-                    if (ch2 >= UNI_SUR_HIGH_START && ch2 <= UNI_SUR_HIGH_END) 
-                    {
-                        // Yes, there is a high surrogate to match it so decrement one more and point to that
-                        //
-                        nextChar -=2;
-                    }
-                }
-            }
-        }
-
-        // Our local copy of nextChar is now pointing to either the correct character or end of file
-        //
-        // Input buffer size is always in bytes
-        //
-	if	( (pANTLR3_UINT8)nextChar >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-	{
-		return	ANTLR3_CHARSTREAM_EOF;
-	}
-	else
-	{
-            // Pick up the next 16 character (big endian byte order)
-            //
-            ch = ((*nextChar) << 8) + *(nextChar+1);
-            nextChar += 2;
-
-            // If we have a surrogate pair then we need to consume
-            // a following valid LO surrogate.
-            //
-            if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
-            {
-                // If the 16 bits following the high surrogate are in the source buffer...
-                //
-                if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-                {
-                    // Next character is in big endian byte order
-                    //
-                    ch2 = ((*nextChar) << 8) + *(nextChar+1);
-
-                    // If it's a valid low surrogate, consume it
-                    //
-                    if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
-                    {
-                        // Construct the UTF32 code point
-                        //
-                        ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
-			    + (ch2 - UNI_SUR_LOW_START) + halfBase;
-                    }
-                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                    // it.
-                    //
-                } 
-                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
-                // it because the buffer ended
-                //
-            }
-        }
-        return ch;
-}
-
-/// \brief Common function to setup function interface for a UTF3 input stream.
-///
-/// \param input Input stream context pointer
-///
-void 
-antlr3UTF32SetupStream	(pANTLR3_INPUT_STREAM input, ANTLR3_BOOLEAN machineBigEndian, ANTLR3_BOOLEAN inputBigEndian)
-{
-    // Build a string factory for this stream. This is a UTF32 string factory which is a standard
-    // part of the ANTLR3 string. The string factory is then passed through the whole chain of lexer->parser->tree->treeparser
-    // and so on.
-    //
-    input->strFactory	= antlr3StringFactoryNew(input->encoding);
-
-    // Generic API that does not care about endianess.
-    //
-    input->istream->index	    =  antlr3UTF32Index;            // Calculate current index in input stream, UTF16 based
-    input->substr		    =  antlr3UTF32Substr;	    // Return a string from the input stream
-    input->istream->seek	    =  antlr3UTF32Seek;		    // How to seek to a specific point in the stream
-    input->istream->consume	    =  antlr3UTF32Consume;	    // Consume the next UTF32 character in the buffer
-
-    // We must install different UTF32 LA routines according to whether the input
-    // is the same endianess as the machine we are executing upon or not. If it is not
-    // then we must install methods that can convert the endianess on the fly as they go
-    //
-    switch (machineBigEndian)
-    {
-        case    ANTLR3_TRUE:
-
-            // Machine is Big Endian, if the input is also then install the 
-            // methods that do not access input by bytes and reverse them.
-            // Otherwise install endian aware methods.
-            //
-            if  (inputBigEndian == ANTLR3_TRUE) 
-            {
-                // Input is machine compatible
-                //
-                input->istream->_LA         =  antlr3UTF32LA;		    // Return the UTF32 character at offset n (1 based)    
-            }
-            else
-            {
-                // Need to use methods that know that the input is little endian
-                //
-                input->istream->_LA         =  antlr3UTF32LALE;		    // Return the UTF32 character at offset n (1 based) 
-            }
-            break;
-
-        case    ANTLR3_FALSE:
-
-            // Machine is Little Endian, if the input is also then install the 
-            // methods that do not access input by bytes and reverse them.
-            // Otherwise install endian aware methods.
-            //
-            if  (inputBigEndian == ANTLR3_FALSE) 
-            {
-                // Input is machine compatible
-                //
-                input->istream->_LA         =  antlr3UTF32LA;		    // Return the UTF32 character at offset n (1 based)    
-            }
-            else
-            {
-                // Need to use methods that know that the input is Big Endian
-                //
-                input->istream->_LA         =  antlr3UTF32LABE;		    // Return the UTF32 character at offset n (1 based) 
-            }
-            break;
-    }
-
-    input->charByteSize		    = 4;			    // Size in bytes of characters in this stream.
-}
-
-/** \brief Consume the next character in a UTF32 input stream
- *
- * \param input Input stream context pointer
- */
-static void
-antlr3UTF32Consume(pANTLR3_INT_STREAM is)
-{
-    pANTLR3_INPUT_STREAM input;
-
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    // SizeBuf is always in bytes
-    //
-    if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-    {	
-	/* Indicate one more character in this line
-	 */
-	input->charPositionInLine++;
-	
-	if  ((ANTLR3_UCHAR)(*((pANTLR3_UINT32)input->nextChar)) == input->newlineChar)
-	{
-	    /* Reset for start of a new line of input
-	     */
-	    input->line++;
-	    input->charPositionInLine	= 0;
-	    input->currentLine		= (void *)(((pANTLR3_UINT32)input->nextChar) + 1);
-	}
-
-	/* Increment to next character position
-	 */
-	input->nextChar = (void *)(((pANTLR3_UINT32)input->nextChar) + 1);
-    }
-}
-
-/// \brief Calculate the current index in the output stream.
-/// \param[in] input Input stream context pointer
-///
-static ANTLR3_MARKER 
-antlr3UTF32Index(pANTLR3_INT_STREAM is)
-{
-    pANTLR3_INPUT_STREAM input;
-
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    return  (ANTLR3_MARKER)(input->nextChar);
-}
-
-/// \brief Return a substring of the UTF16 input stream in
-///  newly allocated memory.
-///
-/// \param input Input stream context pointer
-/// \param start Offset in input stream where the string starts
-/// \param stop  Offset in the input stream where the string ends.
-///
-static pANTLR3_STRING
-antlr3UTF32Substr		(pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop)
-{
-    return  input->strFactory->newPtr(input->strFactory, (pANTLR3_UINT8)start, ((ANTLR3_UINT32_CAST(stop - start))/4) + 1);
-}
-
-/// \brief Rewind the lexer input to the state specified by the supplied mark.
-///
-/// \param[in] input Input stream context pointer
-///
-/// \remark
-/// Assumes UTF32 input stream.
-///
-static void
-antlr3UTF32Seek	(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint)
-{
-	pANTLR3_INPUT_STREAM input;
-
-	input   = ((pANTLR3_INPUT_STREAM) is->super);
-
-	// If the requested seek point is less than the current
-	// input point, then we assume that we are resetting from a mark
-	// and do not need to scan, but can just set to there as rewind will
-        // reset line numbers and so on.
-	//
-	if	(seekPoint <= (ANTLR3_MARKER)(input->nextChar))
-	{
-		input->nextChar	= (void *)seekPoint;
-	}
-	else
-	{
-            // Call consume until we reach the asked for seek point or EOF
-            //
-            while (is->_LA(is, 1) != ANTLR3_CHARSTREAM_EOF && seekPoint < (ANTLR3_MARKER)input->nextChar)
-	    {
-		is->consume(is);
-	    }
-	}
-}
-
-/** \brief Return the input element assuming a UTF32 input in natural machine byte order
- *
- * \param[in] input Input stream context pointer
- * \param[in] la 1 based offset of next input stream element
- *
- * \return Next input character in internal ANTLR3 encoding (UTF32)
- */
-static ANTLR3_UCHAR 
-antlr3UTF32LA(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
-{
-    pANTLR3_INPUT_STREAM input;
-	
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-    {
-		return	ANTLR3_CHARSTREAM_EOF;
-    }
-    else
-    {
-		return	(ANTLR3_UCHAR)(*((pANTLR3_UINT32)input->nextChar + la - 1));
-    }
-}
-
-/** \brief Return the input element assuming a UTF32 input in little endian byte order
- *
- * \param[in] input Input stream context pointer
- * \param[in] la 1 based offset of next input stream element
- *
- * \return Next input character in internal ANTLR3 encoding (UTF32)
- */
-static ANTLR3_UCHAR 
-antlr3UTF32LALE(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
-{
-    pANTLR3_INPUT_STREAM input;
-	
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-    {
-		return	ANTLR3_CHARSTREAM_EOF;
-    }
-    else
-    {
-        ANTLR3_UCHAR   c;
-
-        c = (ANTLR3_UCHAR)(*((pANTLR3_UINT32)input->nextChar + la - 1));
-
-        // Swap Endianess to Big Endian
-        //
-        return (c>>24) | ((c<<8) & 0x00FF0000) | ((c>>8) & 0x0000FF00) | (c<<24);
-    }
-}
-
-/** \brief Return the input element assuming a UTF32 input in big endian byte order
- *
- * \param[in] input Input stream context pointer
- * \param[in] la 1 based offset of next input stream element
- *
- * \return Next input character in internal ANTLR3 encoding (UTF32)
- * \remark This is the same code as LE version but seprated in case there are better optimisations fo rendinan swap
- */
-static ANTLR3_UCHAR 
-antlr3UTF32LABE(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
-{
-    pANTLR3_INPUT_STREAM input;
-	
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-    {
-		return	ANTLR3_CHARSTREAM_EOF;
-    }
-    else
-    {
-        ANTLR3_UCHAR   c;
-
-        c = (ANTLR3_UCHAR)(*((pANTLR3_UINT32)input->nextChar + la - 1));
-
-        // Swap Endianess to Little Endian
-        //
-        return (c>>24) | ((c<<8) & 0x00FF0000) | ((c>>8) & 0x0000FF00) | (c<<24);
-    }
-}
-
-
-/// \brief Common function to setup function interface for a UTF8 input stream.
-///
-/// \param input Input stream context pointer
-///
-void 
-antlr3UTF8SetupStream	(pANTLR3_INPUT_STREAM input)
-{
-    // Build a string factory for this stream. This is a UTF16 string factory which is a standard
-    // part of the ANTLR3 string. The string factory is then passed through the whole chain of lexer->parser->tree->treeparser
-    // and so on.
-    //
-    input->strFactory	= antlr3StringFactoryNew(input->encoding);
-
-    // Generic API that does not care about endianess.
-    //
-    input->istream->consume	= antlr3UTF8Consume;	// Consume the next UTF32 character in the buffer
-    input->istream->_LA         = antlr3UTF8LA;         // Return the UTF32 character at offset n (1 based)    
-    input->charByteSize		= 0;	                // Size in bytes of characters in this stream.
-}
-
-// ------------------------------------------------------
-// Following is from Unicode.org (see antlr3convertutf.c)
-//
-
-/// Index into the table below with the first byte of a UTF-8 sequence to
-/// get the number of trailing bytes that are supposed to follow it.
-/// Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is
-/// left as-is for anyone who may want to do such conversion, which was
-/// allowed in earlier algorithms.
-///
-static const ANTLR3_UINT32 trailingBytesForUTF8[256] = {
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-    1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
-    2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
-};
-
-/// Magic values subtracted from a buffer value during UTF8 conversion.
-/// This table contains as many values as there might be trailing bytes
-/// in a UTF-8 sequence.
-///
-static const UTF32 offsetsFromUTF8[6] = 
-    {   0x00000000UL, 0x00003080UL, 0x000E2080UL, 
-	0x03C82080UL, 0xFA082080UL, 0x82082080UL 
-    };
-
-// End of Unicode.org tables
-// -------------------------
-
-
-/** \brief Consume the next character in a UTF8 input stream
- *
- * \param input Input stream context pointer
- */
-static void
-antlr3UTF8Consume(pANTLR3_INT_STREAM is)
-{
-    pANTLR3_INPUT_STREAM    input;
-    ANTLR3_UINT32           extraBytesToRead;
-    ANTLR3_UCHAR            ch;
-    pANTLR3_UINT8           nextChar;
-
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    nextChar = input->nextChar;
-
-    if	(nextChar < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-    {	
-	// Indicate one more character in this line
-	//
-	input->charPositionInLine++;
-	
-        // Are there more bytes needed to make up the whole thing?
-        //
-        extraBytesToRead = trailingBytesForUTF8[*nextChar];
-
-        if	(nextChar + extraBytesToRead >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-        {
-            input->nextChar = (((pANTLR3_UINT8)input->data) + input->sizeBuf);
-            return;
-        }
-
-        // Cases deliberately fall through (see note A in antlrconvertutf.c)
-        // Legal UTF8 is only 4 bytes but 6 bytes could be used in old UTF8 so
-        // we allow it.
-        //
-        ch  = 0;
-       	switch (extraBytesToRead) {
-	    case 5: ch += *nextChar++; ch <<= 6;
-	    case 4: ch += *nextChar++; ch <<= 6;
-	    case 3: ch += *nextChar++; ch <<= 6;
-	    case 2: ch += *nextChar++; ch <<= 6;
-	    case 1: ch += *nextChar++; ch <<= 6;
-	    case 0: ch += *nextChar++;
-	}
-
-        // Magically correct the input value
-        //
-	ch -= offsetsFromUTF8[extraBytesToRead];
-	if  (ch == input->newlineChar)
-	{
-	    /* Reset for start of a new line of input
-	     */
-	    input->line++;
-	    input->charPositionInLine	= 0;
-	    input->currentLine		= (void *)nextChar;
-	}
-
-        // Update input pointer
-        //
-        input->nextChar = nextChar;
-    }
-}
-/** \brief Return the input element assuming a UTF8 input
- *
- * \param[in] input Input stream context pointer
- * \param[in] la 1 based offset of next input stream element
- *
- * \return Next input character in internal ANTLR3 encoding (UTF32)
- */
-static ANTLR3_UCHAR 
-antlr3UTF8LA(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
-{
-    pANTLR3_INPUT_STREAM    input;
-    ANTLR3_UINT32           extraBytesToRead;
-    ANTLR3_UCHAR            ch;
-    pANTLR3_UINT8           nextChar;
-
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    nextChar = input->nextChar;
-
-    // Do we need to traverse forwards or backwards?
-    // - LA(0) is treated as LA(1) and we assume that the nextChar is
-    //   already positioned.
-    // - LA(n+) ; n>1 means we must traverse forward n-1 characters catering for UTF8 encoding
-    // - LA(-n) means we must traverse backwards n chracters
-    //
-    if (la > 1) {
-
-        // Make sure that we have at least one character left before trying to
-        // loop through the buffer.
-        //
-        if	(nextChar < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-        {	
-            // Now traverse n-1 characters forward
-            //
-            while (--la > 0)
-            {
-                // Does the next character require trailing bytes?
-                // If so advance the pointer by that many bytes as well as advancing
-                // one position for what will be at least a single byte character.
-                //
-                nextChar += trailingBytesForUTF8[*nextChar] + 1;
-
-                // Does that calculation take us past the byte length of the buffer?
-                //
-                if	(nextChar >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-                {
-                    return ANTLR3_CHARSTREAM_EOF;
-                }
-            }
-        }
-        else
-        {
-            return ANTLR3_CHARSTREAM_EOF;
-        }
-    }
-    else
-    {
-        // LA is negative so we decrease the pointer by n character positions
-        //
-        while   (nextChar > (pANTLR3_UINT8)input->data && la++ < 0)
-        {
-            // Traversing backwards in UTF8 means decermenting by one
-            // then continuing to decrement while ever a character pattern
-            // is flagged as being a trailing byte of an encoded code point.
-            // Trailing UTF8 bytes always start with 10 in binary. We assumne that
-            // the UTF8 is well formed and do not check boundary conditions
-            //
-            nextChar--;
-            while ((*nextChar & 0xC0) == 0x80)
-            {
-                nextChar--;
-            }
-        }
-    }
-
-    // nextChar is now pointing at the UTF8 encoded character that we need to
-    // decode and return.
-    //
-    // Are there more bytes needed to make up the whole thing?
-    //
-    extraBytesToRead = trailingBytesForUTF8[*nextChar];
-    if	(nextChar + extraBytesToRead >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-    {
-        return ANTLR3_CHARSTREAM_EOF;
-    }
-
-    // Cases deliberately fall through (see note A in antlrconvertutf.c)
-    // 
-    ch  = 0;
-    switch (extraBytesToRead) {
-            case 5: ch += *nextChar++; ch <<= 6;
-            case 4: ch += *nextChar++; ch <<= 6;
-            case 3: ch += *nextChar++; ch <<= 6;
-            case 2: ch += *nextChar++; ch <<= 6;
-            case 1: ch += *nextChar++; ch <<= 6;
-            case 0: ch += *nextChar++;
-    }
-
-    // Magically correct the input value
-    //
-    ch -= offsetsFromUTF8[extraBytesToRead];
-
-    return ch;
-}
-
-// EBCDIC to ASCII conversion table
-//
-// This for EBCDIC EDF04 translated to ISO-8859.1 which is the usually accepted POSIX
-// translation and the character tables are published all over the interweb.
-// 
-const ANTLR3_UCHAR e2a[256] =
-{
-    0x00, 0x01, 0x02, 0x03, 0x85, 0x09, 0x86, 0x7f,
-    0x87, 0x8d, 0x8e, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-    0x10, 0x11, 0x12, 0x13, 0x8f, 0x0a, 0x08, 0x97,
-    0x18, 0x19, 0x9c, 0x9d, 0x1c, 0x1d, 0x1e, 0x1f,
-    0x80, 0x81, 0x82, 0x83, 0x84, 0x92, 0x17, 0x1b,
-    0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x05, 0x06, 0x07, 
-    0x90, 0x91, 0x16, 0x93, 0x94, 0x95, 0x96, 0x04,
-    0x98, 0x99, 0x9a, 0x9b, 0x14, 0x15, 0x9e, 0x1a,
-    0x20, 0xa0, 0xe2, 0xe4, 0xe0, 0xe1, 0xe3, 0xe5,
-    0xe7, 0xf1, 0x60, 0x2e, 0x3c, 0x28, 0x2b, 0x7c,
-    0x26, 0xe9, 0xea, 0xeb, 0xe8, 0xed, 0xee, 0xef,
-    0xec, 0xdf, 0x21, 0x24, 0x2a, 0x29, 0x3b, 0x9f,
-    0x2d, 0x2f, 0xc2, 0xc4, 0xc0, 0xc1, 0xc3, 0xc5,
-    0xc7, 0xd1, 0x5e, 0x2c, 0x25, 0x5f, 0x3e, 0x3f,
-    0xf8, 0xc9, 0xca, 0xcb, 0xc8, 0xcd, 0xce, 0xcf,
-    0xcc, 0xa8, 0x3a, 0x23, 0x40, 0x27, 0x3d, 0x22,
-    0xd8, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
-    0x68, 0x69, 0xab, 0xbb, 0xf0, 0xfd, 0xfe, 0xb1,
-    0xb0, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
-    0x71, 0x72, 0xaa, 0xba, 0xe6, 0xb8, 0xc6, 0xa4,
-    0xb5, 0xaf, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
-    0x79, 0x7a, 0xa1, 0xbf, 0xd0, 0xdd, 0xde, 0xae,
-    0xa2, 0xa3, 0xa5, 0xb7, 0xa9, 0xa7, 0xb6, 0xbc,
-    0xbd, 0xbe, 0xac, 0x5b, 0x5c, 0x5d, 0xb4, 0xd7,
-    0xf9, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
-    0x48, 0x49, 0xad, 0xf4, 0xf6, 0xf2, 0xf3, 0xf5,
-    0xa6, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
-    0x51, 0x52, 0xb9, 0xfb, 0xfc, 0xdb, 0xfa, 0xff,
-    0xd9, 0xf7, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
-    0x59, 0x5a, 0xb2, 0xd4, 0xd6, 0xd2, 0xd3, 0xd5,
-    0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
-    0x38, 0x39, 0xb3, 0x7b, 0xdc, 0x7d, 0xda, 0x7e
-};
-
-/// \brief Common function to setup function interface for a EBCDIC input stream.
-///
-/// \param input Input stream context pointer
-///
-void 
-antlr3EBCDICSetupStream	(pANTLR3_INPUT_STREAM input)
-{
-    // EBCDIC streams can use the standard 8 bit string factory
-    //
-    input->strFactory	= antlr3StringFactoryNew(input->encoding);
-
-    // Generic API that does not care about endianess.
-    //
-    input->istream->_LA         = antlr3EBCDICLA;       // Return the UTF32 character at offset n (1 based)    
-    input->charByteSize		= 1;	                // Size in bytes of characters in this stream.
-}
-
-/// \brief Return the input element assuming an 8 bit EBCDIC input
-///
-/// \param[in] input Input stream context pointer
-/// \param[in] la 1 based offset of next input stream element
-///
-/// \return Next input character in internal ANTLR3 encoding (UTF32) after translation
-///         from EBCDIC to ASCII
-///
-static ANTLR3_UCHAR 
-antlr3EBCDICLA(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
-{
-    pANTLR3_INPUT_STREAM input;
-
-    input   = ((pANTLR3_INPUT_STREAM) (is->super));
-
-    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
-    {
-        return	ANTLR3_CHARSTREAM_EOF;
-    }
-    else
-    {
-        // Translate the required character via the constant conversion table
-        //
-        return	e2a[(*((pANTLR3_UINT8)input->nextChar + la - 1))];
-    }
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/C/src/antlr3lexer.c b/antlr-3.4/runtime/C/src/antlr3lexer.c
deleted file mode 100644
index d981ab7..0000000
--- a/antlr-3.4/runtime/C/src/antlr3lexer.c
+++ /dev/null
@@ -1,904 +0,0 @@
-/** \file
- *
- * Base implementation of an antlr 3 lexer.
- *
- * An ANTLR3 lexer implements a base recongizer, a token source and
- * a lexer interface. It constructs a base recognizer with default
- * functions, then overrides any of these that are parser specific (usual
- * default implementation of base recognizer.
- */
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3lexer.h>
-
-static void					mTokens						(pANTLR3_LEXER lexer);
-static void					setCharStream				(pANTLR3_LEXER lexer,  pANTLR3_INPUT_STREAM input);
-static void					pushCharStream				(pANTLR3_LEXER lexer,  pANTLR3_INPUT_STREAM input);
-static void					popCharStream				(pANTLR3_LEXER lexer);
-
-static void					emitNew						(pANTLR3_LEXER lexer,  pANTLR3_COMMON_TOKEN token);
-static pANTLR3_COMMON_TOKEN emit						(pANTLR3_LEXER lexer);
-static ANTLR3_BOOLEAN	    matchs						(pANTLR3_LEXER lexer, ANTLR3_UCHAR * string);
-static ANTLR3_BOOLEAN	    matchc						(pANTLR3_LEXER lexer, ANTLR3_UCHAR c);
-static ANTLR3_BOOLEAN	    matchRange					(pANTLR3_LEXER lexer, ANTLR3_UCHAR low, ANTLR3_UCHAR high);
-static void					matchAny					(pANTLR3_LEXER lexer);
-static void					recover						(pANTLR3_LEXER lexer);
-static ANTLR3_UINT32	    getLine						(pANTLR3_LEXER lexer);
-static ANTLR3_MARKER	    getCharIndex				(pANTLR3_LEXER lexer);
-static ANTLR3_UINT32	    getCharPositionInLine		(pANTLR3_LEXER lexer);
-static pANTLR3_STRING	    getText						(pANTLR3_LEXER lexer);
-static pANTLR3_COMMON_TOKEN nextToken					(pANTLR3_TOKEN_SOURCE toksource);
-
-static void					displayRecognitionError	    (pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 * tokenNames);
-static void					reportError					(pANTLR3_BASE_RECOGNIZER rec);
-static void *				getCurrentInputSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream);
-static void *				getMissingSymbol			(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
-															ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow);
-
-static void					reset						(pANTLR3_BASE_RECOGNIZER rec);
-
-static void					freeLexer					(pANTLR3_LEXER lexer);
-
-
-ANTLR3_API pANTLR3_LEXER
-antlr3LexerNew(ANTLR3_UINT32 sizeHint, pANTLR3_RECOGNIZER_SHARED_STATE state)
-{
-    pANTLR3_LEXER   lexer;
-    pANTLR3_COMMON_TOKEN	specialT;
-
-	/* Allocate memory
-	*/
-	lexer   = (pANTLR3_LEXER) ANTLR3_MALLOC(sizeof(ANTLR3_LEXER));
-
-	if	(lexer == NULL)
-	{
-		return	NULL;
-	}
-
-	/* Now we need to create the base recognizer
-	*/
-	lexer->rec	    =  antlr3BaseRecognizerNew(ANTLR3_TYPE_LEXER, sizeHint, state);
-
-	if	(lexer->rec == NULL)
-	{
-		lexer->free(lexer);
-		return	NULL;
-	}
-	lexer->rec->super  =  lexer;
-
-	lexer->rec->displayRecognitionError	    = displayRecognitionError;
-	lexer->rec->reportError					= reportError;
-	lexer->rec->reset						= reset;
-	lexer->rec->getCurrentInputSymbol		= getCurrentInputSymbol;
-	lexer->rec->getMissingSymbol			= getMissingSymbol;
-
-	/* Now install the token source interface
-	*/
-	if	(lexer->rec->state->tokSource == NULL) 
-	{
-		lexer->rec->state->tokSource	= (pANTLR3_TOKEN_SOURCE)ANTLR3_CALLOC(1, sizeof(ANTLR3_TOKEN_SOURCE));
-
-		if	(lexer->rec->state->tokSource == NULL) 
-		{
-			lexer->rec->free(lexer->rec);
-			lexer->free(lexer);
-
-			return	NULL;
-		}
-		lexer->rec->state->tokSource->super    =  lexer;
-
-		/* Install the default nextToken() method, which may be overridden
-		 * by generated code, or by anything else in fact.
-		 */
-		lexer->rec->state->tokSource->nextToken	    =  nextToken;
-		lexer->rec->state->tokSource->strFactory    = NULL;
-
-		lexer->rec->state->tokFactory				= NULL;
-	}
-
-    /* Install the lexer API
-     */
-    lexer->setCharStream			=  setCharStream;
-    lexer->mTokens					= (void (*)(void *))(mTokens);
-    lexer->setCharStream			=  setCharStream;
-    lexer->pushCharStream			=  pushCharStream;
-    lexer->popCharStream			=  popCharStream;
-    lexer->emit						=  emit;
-    lexer->emitNew					=  emitNew;
-    lexer->matchs					=  matchs;
-    lexer->matchc					=  matchc;
-    lexer->matchRange				=  matchRange;
-    lexer->matchAny					=  matchAny;
-    lexer->recover					=  recover;
-    lexer->getLine					=  getLine;
-    lexer->getCharIndex				=  getCharIndex;
-    lexer->getCharPositionInLine    =  getCharPositionInLine;
-    lexer->getText					=  getText;
-    lexer->free						=  freeLexer;
-    
-    /* Initialise the eof token
-     */
-    specialT					= &(lexer->rec->state->tokSource->eofToken);
-    antlr3SetTokenAPI	  (specialT);
-    specialT->setType	  (specialT, ANTLR3_TOKEN_EOF);
-    specialT->factoryMade		= ANTLR3_TRUE;					// Prevent things trying to free() it
-    specialT->strFactory        = NULL;
-	specialT->textState			= ANTLR3_TEXT_NONE;
-	specialT->custom			= NULL;
-	specialT->user1				= 0;
-	specialT->user2				= 0;
-	specialT->user3				= 0;
-
-	// Initialize the skip token.
-	//
-    specialT					= &(lexer->rec->state->tokSource->skipToken);
-    antlr3SetTokenAPI	  (specialT);
-    specialT->setType	  (specialT, ANTLR3_TOKEN_INVALID);
-    specialT->factoryMade		= ANTLR3_TRUE;					// Prevent things trying to free() it
-    specialT->strFactory        = NULL;
-	specialT->custom			= NULL;
-	specialT->user1				= 0;
-	specialT->user2				= 0;
-	specialT->user3				= 0;
-    return  lexer;
-}
-
-static void
-reset	(pANTLR3_BASE_RECOGNIZER rec)
-{
-    pANTLR3_LEXER   lexer;
-
-    lexer   = rec->super;
-
-    lexer->rec->state->token			    = NULL;
-    lexer->rec->state->type			    = ANTLR3_TOKEN_INVALID;
-    lexer->rec->state->channel			    = ANTLR3_TOKEN_DEFAULT_CHANNEL;
-    lexer->rec->state->tokenStartCharIndex	    = -1;
-    lexer->rec->state->tokenStartCharPositionInLine = -1;
-    lexer->rec->state->tokenStartLine		    = -1;
-
-    lexer->rec->state->text	                    = NULL;
-
-    // OK - that's all hunky dory, but we may well have had
-    // a token factory that needs a reset. Do that here
-    //
-    if  (lexer->rec->state->tokFactory != NULL)
-    {
-        lexer->rec->state->tokFactory->reset(lexer->rec->state->tokFactory);
-    }
-}
-
-///
-/// \brief
-/// Returns the next available token from the current input stream.
-/// 
-/// \param toksource
-/// Points to the implementation of a token source. The lexer is 
-/// addressed by the super structure pointer.
-/// 
-/// \returns
-/// The next token in the current input stream or the EOF token
-/// if there are no more tokens.
-/// 
-/// \remarks
-/// Write remarks for nextToken here.
-/// 
-/// \see nextToken
-///
-ANTLR3_INLINE static pANTLR3_COMMON_TOKEN
-nextTokenStr	    (pANTLR3_TOKEN_SOURCE toksource)
-{
-    pANTLR3_LEXER                   lexer;
-    pANTLR3_RECOGNIZER_SHARED_STATE state;
-    pANTLR3_INPUT_STREAM            input;
-    pANTLR3_INT_STREAM              istream;
-
-    lexer   = (pANTLR3_LEXER)(toksource->super);
-    state   = lexer->rec->state;
-    input   = lexer->input;
-    istream = input->istream;
-
-    /// Loop until we get a non skipped token or EOF
-    ///
-    for	(;;)
-    {
-        // Get rid of any previous token (token factory takes care of
-        // any de-allocation when this token is finally used up.
-        //
-        state->token		    = NULL;
-        state->error		    = ANTLR3_FALSE;	    // Start out without an exception
-        state->failed		    = ANTLR3_FALSE;
-
-        // Now call the matching rules and see if we can generate a new token
-        //
-        for	(;;)
-        {
-            // Record the start of the token in our input stream.
-            //
-            state->channel			    = ANTLR3_TOKEN_DEFAULT_CHANNEL;
-            state->tokenStartCharIndex	            = (ANTLR3_MARKER)(((pANTLR3_UINT8)input->nextChar));
-            state->tokenStartCharPositionInLine     = input->charPositionInLine;
-            state->tokenStartLine		    = input->line;
-            state->text			            = NULL;
-            state->custom                           = NULL;
-            state->user1                            = 0;
-            state->user2                            = 0;
-            state->user3                            = 0;
-
-            if  (istream->_LA(istream, 1) == ANTLR3_CHARSTREAM_EOF)
-            {
-                // Reached the end of the current stream, nothing more to do if this is
-                // the last in the stack.
-                //
-                pANTLR3_COMMON_TOKEN    teof = &(toksource->eofToken);
-
-                teof->setStartIndex (teof, lexer->getCharIndex(lexer));
-                teof->setStopIndex  (teof, lexer->getCharIndex(lexer));
-                teof->setLine	    (teof, lexer->getLine(lexer));
-                teof->factoryMade = ANTLR3_TRUE;	// This isn't really manufactured but it stops things from trying to free it
-                return  teof;
-            }
-
-            state->token		= NULL;
-            state->error		= ANTLR3_FALSE;	    // Start out without an exception
-            state->failed		= ANTLR3_FALSE;
-
-            // Call the generated lexer, see if it can get a new token together.
-            //
-            lexer->mTokens(lexer->ctx);
-
-            if  (state->error  == ANTLR3_TRUE)
-            {
-                // Recognition exception, report it and try to recover.
-                //
-                state->failed	    = ANTLR3_TRUE;
-                lexer->rec->reportError(lexer->rec);
-                lexer->recover(lexer); 
-            }
-            else
-            {
-                if (state->token == NULL)
-                {
-                    // Emit the real token, which adds it in to the token stream basically
-                    //
-                    emit(lexer);
-                }
-                else if	(state->token ==  &(toksource->skipToken))
-                {
-                    // A real token could have been generated, but "Computer say's naaaaah" and it
-                    // it is just something we need to skip altogether.
-                    //
-                    continue;
-                }
-
-                // Good token, not skipped, not EOF token
-                //
-                return  state->token;
-            }
-        }
-    }
-}
-
-/**
- * \brief
- * Default implementation of the nextToken() call for a lexer.
- * 
- * \param toksource
- * Points to the implementation of a token source. The lexer is 
- * addressed by the super structure pointer.
- * 
- * \returns
- * The next token in the current input stream or the EOF token
- * if there are no more tokens in any input stream in the stack.
- * 
- * Write detailed description for nextToken here.
- * 
- * \remarks
- * Write remarks for nextToken here.
- * 
- * \see nextTokenStr
- */
-static pANTLR3_COMMON_TOKEN
-nextToken	    (pANTLR3_TOKEN_SOURCE toksource)
-{
-	pANTLR3_COMMON_TOKEN tok;
-
-	// Find the next token in the current stream
-	//
-	tok = nextTokenStr(toksource);
-
-	// If we got to the EOF token then switch to the previous
-	// input stream if there were any and just return the
-	// EOF if there are none. We must check the next token
-	// in any outstanding input stream we pop into the active
-	// role to see if it was sitting at EOF after PUSHing the
-	// stream we just consumed, otherwise we will return EOF
-	// on the reinstalled input stream, when in actual fact
-	// there might be more input streams to POP before the
-	// real EOF of the whole logical inptu stream. Hence we
-	// use a while loop here until we find somethign in the stream
-	// that isn't EOF or we reach the actual end of the last input
-	// stream on the stack.
-	//
-	while	(tok->type == ANTLR3_TOKEN_EOF)
-	{
-		pANTLR3_LEXER   lexer;
-
-		lexer   = (pANTLR3_LEXER)(toksource->super);
-
-		if  (lexer->rec->state->streams != NULL && lexer->rec->state->streams->size(lexer->rec->state->streams) > 0)
-		{
-			// We have another input stream in the stack so we
-			// need to revert to it, then resume the loop to check
-			// it wasn't sitting at EOF itself.
-			//
-			lexer->popCharStream(lexer);
-			tok = nextTokenStr(toksource);
-		}
-		else
-		{
-			// There were no more streams on the input stack
-			// so this EOF is the 'real' logical EOF for
-			// the input stream. So we just exit the loop and 
-			// return the EOF we have found.
-			//
-			break;
-		}
-		
-	}
-
-	// return whatever token we have, which may be EOF
-	//
-	return  tok;
-}
-
-ANTLR3_API pANTLR3_LEXER
-antlr3LexerNewStream(ANTLR3_UINT32 sizeHint, pANTLR3_INPUT_STREAM input, pANTLR3_RECOGNIZER_SHARED_STATE state)
-{
-    pANTLR3_LEXER   lexer;
-
-    // Create a basic lexer first
-    //
-    lexer   = antlr3LexerNew(sizeHint, state);
-
-    if	(lexer != NULL) 
-    {
-		// Install the input stream and reset the lexer
-		//
-		setCharStream(lexer, input);
-    }
-
-    return  lexer;
-}
-
-static void mTokens	    (pANTLR3_LEXER lexer)
-{
-    if	(lexer)	    // Fool compiler, avoid pragmas
-    {
-		ANTLR3_FPRINTF(stderr, "lexer->mTokens(): Error: No lexer rules were added to the lexer yet!\n");
-    }
-}
-
-static void			
-reportError		    (pANTLR3_BASE_RECOGNIZER rec)
-{
-    // Indicate this recognizer had an error while processing.
-	//
-	rec->state->errorCount++;
-
-    rec->displayRecognitionError(rec, rec->state->tokenNames);
-}
-
-#ifdef	ANTLR3_WINDOWS
-#pragma warning( disable : 4100 )
-#endif
-
-/** Default lexer error handler (works for 8 bit streams only!!!)
- */
-static void			
-displayRecognitionError	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_UINT8 * tokenNames)
-{
-    pANTLR3_LEXER			lexer;
-	pANTLR3_EXCEPTION	    ex;
-	pANTLR3_STRING			ftext;
-
-    lexer   = (pANTLR3_LEXER)(recognizer->super);
-	ex		= lexer->rec->state->exception;
-
-	// See if there is a 'filename' we can use
-    //
-    if	(ex->name == NULL)
-    {
-		ANTLR3_FPRINTF(stderr, "-unknown source-(");
-    }
-    else
-    {
-		ftext = ex->streamName->to8(ex->streamName);
-		ANTLR3_FPRINTF(stderr, "%s(", ftext->chars);
-    }
-
-    ANTLR3_FPRINTF(stderr, "%d) ", recognizer->state->exception->line);
-    ANTLR3_FPRINTF(stderr, ": lexer error %d :\n\t%s at offset %d, ", 
-						ex->type,
-						(pANTLR3_UINT8)	   (ex->message),
-					    ex->charPositionInLine+1
-		    );
-	{
-		ANTLR3_INT32	width;
-
-		width	= ANTLR3_UINT32_CAST(( (pANTLR3_UINT8)(lexer->input->data) + (lexer->input->size(lexer->input) )) - (pANTLR3_UINT8)(ex->index));
-
-		if	(width >= 1)
-		{			
-			if	(isprint(ex->c))
-			{
-				ANTLR3_FPRINTF(stderr, "near '%c' :\n", ex->c);
-			}
-			else
-			{
-				ANTLR3_FPRINTF(stderr, "near char(%#02X) :\n", (ANTLR3_UINT8)(ex->c));
-			}
-			ANTLR3_FPRINTF(stderr, "\t%.*s\n", width > 20 ? 20 : width ,((pANTLR3_UINT8)ex->index));
-		}
-		else
-		{
-			ANTLR3_FPRINTF(stderr, "(end of input).\n\t This indicates a poorly specified lexer RULE\n\t or unterminated input element such as: \"STRING[\"]\n");
-			ANTLR3_FPRINTF(stderr, "\t The lexer was matching from line %d, offset %d, which\n\t ", 
-								(ANTLR3_UINT32)(lexer->rec->state->tokenStartLine),
-								(ANTLR3_UINT32)(lexer->rec->state->tokenStartCharPositionInLine)
-								);
-			width = ANTLR3_UINT32_CAST(((pANTLR3_UINT8)(lexer->input->data)+(lexer->input->size(lexer->input))) - (pANTLR3_UINT8)(lexer->rec->state->tokenStartCharIndex));
-
-			if	(width >= 1)
-			{
-				ANTLR3_FPRINTF(stderr, "looks like this:\n\t\t%.*s\n", width > 20 ? 20 : width ,(pANTLR3_UINT8)(lexer->rec->state->tokenStartCharIndex));
-			}
-			else
-			{
-				ANTLR3_FPRINTF(stderr, "is also the end of the line, so you must check your lexer rules\n");
-			}
-		}
-	}
-}
-
-static void setCharStream   (pANTLR3_LEXER lexer,  pANTLR3_INPUT_STREAM input)
-{
-    /* Install the input interface
-     */
-    lexer->input	= input;
-
-    /* We may need a token factory for the lexer; we don't destroy any existing factory
-     * until the lexer is destroyed, as people may still be using the tokens it produced.
-     * TODO: Later I will provide a dup() method for a token so that it can extract itself
-     * out of the factory. 
-     */
-    if	(lexer->rec->state->tokFactory == NULL)
-    {
-	lexer->rec->state->tokFactory	= antlr3TokenFactoryNew(input);
-    }
-    else
-    {
-	/* When the input stream is being changed on the fly, rather than
-	 * at the start of a new lexer, then we must tell the tokenFactory
-	 * which input stream to adorn the tokens with so that when they
-	 * are asked to provide their original input strings they can
-	 * do so from the correct text stream.
-	 */
-	lexer->rec->state->tokFactory->setInputStream(lexer->rec->state->tokFactory, input);
-    }
-
-    /* Propagate the string factory so that we preserve the encoding form from
-     * the input stream.
-     */
-    if	(lexer->rec->state->tokSource->strFactory == NULL)
-    {
-        lexer->rec->state->tokSource->strFactory	= input->strFactory;
-
-        // Set the newly acquired string factory up for our pre-made tokens
-        // for EOF.
-        //
-        if (lexer->rec->state->tokSource->eofToken.strFactory == NULL)
-        {
-            lexer->rec->state->tokSource->eofToken.strFactory = input->strFactory;
-        }
-    }
-
-    /* This is a lexer, install the appropriate exception creator
-     */
-    lexer->rec->exConstruct = antlr3RecognitionExceptionNew;
-
-    /* Set the current token to nothing
-     */
-    lexer->rec->state->token		= NULL;
-    lexer->rec->state->text			= NULL;
-    lexer->rec->state->tokenStartCharIndex	= -1;
-
-    /* Copy the name of the char stream to the token source
-     */
-    lexer->rec->state->tokSource->fileName = input->fileName;
-}
-
-/*!
- * \brief
- * Change to a new input stream, remembering the old one.
- * 
- * \param lexer
- * Pointer to the lexer instance to switch input streams for.
- * 
- * \param input
- * New input stream to install as the current one.
- * 
- * Switches the current character input stream to 
- * a new one, saving the old one, which we will revert to at the end of this 
- * new one.
- */
-static void
-pushCharStream  (pANTLR3_LEXER lexer,  pANTLR3_INPUT_STREAM input)
-{
-	// Do we need a new input stream stack?
-	//
-	if	(lexer->rec->state->streams == NULL)
-	{
-		// This is the first call to stack a new
-		// stream and so we must create the stack first.
-		//
-		lexer->rec->state->streams = antlr3StackNew(0);
-
-		if  (lexer->rec->state->streams == NULL)
-		{
-			// Could not do this, we just fail to push it.
-			// TODO: Consider if this is what we want to do, but then
-			//       any programmer can override this method to do something else.
-			return;
-		}
-	}
-
-	// We have a stack, so we can save the current input stream
-	// into it.
-	//
-	lexer->input->istream->mark(lexer->input->istream);
-	lexer->rec->state->streams->push(lexer->rec->state->streams, lexer->input, NULL);
-
-	// And now we can install this new one
-	//
-	lexer->setCharStream(lexer, input);
-}
-
-/*!
- * \brief
- * Stops using the current input stream and reverts to any prior
- * input stream on the stack.
- * 
- * \param lexer
- * Description of parameter lexer.
- * 
- * Pointer to a function that abandons the current input stream, whether it
- * is empty or not and reverts to the previous stacked input stream.
- *
- * \remark
- * The function fails silently if there are no prior input streams.
- */
-static void
-popCharStream   (pANTLR3_LEXER lexer)
-{
-    pANTLR3_INPUT_STREAM input;
-
-    // If we do not have a stream stack or we are already at the
-    // stack bottom, then do nothing.
-    //
-    if	(lexer->rec->state->streams != NULL && lexer->rec->state->streams->size(lexer->rec->state->streams) > 0)
-    {
-	// We just leave the current stream to its fate, we do not close
-	// it or anything as we do not know what the programmer intended
-	// for it. This method can always be overridden of course.
-	// So just find out what was currently saved on the stack and use
-	// that now, then pop it from the stack.
-	//
-	input	= (pANTLR3_INPUT_STREAM)(lexer->rec->state->streams->top);
-	lexer->rec->state->streams->pop(lexer->rec->state->streams);
-
-	// Now install the stream as the current one.
-	//
-	lexer->setCharStream(lexer, input);
-	lexer->input->istream->rewindLast(lexer->input->istream);
-    }
-    return;
-}
-
-static void emitNew	    (pANTLR3_LEXER lexer,  pANTLR3_COMMON_TOKEN token)
-{
-    lexer->rec->state->token    = token;	/* Voila!   */
-}
-
-static pANTLR3_COMMON_TOKEN
-emit	    (pANTLR3_LEXER lexer)
-{
-    pANTLR3_COMMON_TOKEN	token;
-
-    /* We could check pointers to token factories and so on, but
-    * we are in code that we want to run as fast as possible
-    * so we are not checking any errors. So make sure you have installed an input stream before
-    * trying to emit a new token.
-    */
-    token   = lexer->rec->state->tokFactory->newToken(lexer->rec->state->tokFactory);
-
-    /* Install the supplied information, and some other bits we already know
-    * get added automatically, such as the input stream it is associated with
-    * (though it can all be overridden of course)
-    */
-    token->type		    = lexer->rec->state->type;
-    token->channel	    = lexer->rec->state->channel;
-    token->start	    = lexer->rec->state->tokenStartCharIndex;
-    token->stop		    = lexer->getCharIndex(lexer) - 1;
-    token->line		    = lexer->rec->state->tokenStartLine;
-    token->charPosition	= lexer->rec->state->tokenStartCharPositionInLine;
-
-    if	(lexer->rec->state->text != NULL)
-    {
-        token->textState	    = ANTLR3_TEXT_STRING;
-        token->tokText.text	    = lexer->rec->state->text;
-    }
-    else
-    {
-        token->textState	= ANTLR3_TEXT_NONE;
-    }
-    token->lineStart	= lexer->input->currentLine;
-    token->user1	= lexer->rec->state->user1;
-    token->user2	= lexer->rec->state->user2;
-    token->user3	= lexer->rec->state->user3;
-    token->custom	= lexer->rec->state->custom;
-
-    lexer->rec->state->token	    = token;
-
-    return  token;
-}
-
-/**
- * Free the resources allocated by a lexer
- */
-static void 
-freeLexer    (pANTLR3_LEXER lexer)
-{
-	// This may have ben a delegate or delegator lexer, in which case the
-	// state may already have been freed (and set to NULL therefore)
-	// so we ignore the state if we don't have it.
-	//
-	if	(lexer->rec->state != NULL)
-	{
-		if	(lexer->rec->state->streams != NULL)
-		{
-			lexer->rec->state->streams->free(lexer->rec->state->streams);
-		}
-		if	(lexer->rec->state->tokFactory != NULL)
-		{
-			lexer->rec->state->tokFactory->close(lexer->rec->state->tokFactory);
-			lexer->rec->state->tokFactory = NULL;
-		}
-		if	(lexer->rec->state->tokSource != NULL)
-		{
-			ANTLR3_FREE(lexer->rec->state->tokSource);
-			lexer->rec->state->tokSource = NULL;
-		}
-	}
-	if	(lexer->rec != NULL)
-	{
-		lexer->rec->free(lexer->rec);
-		lexer->rec = NULL;
-	}
-	ANTLR3_FREE(lexer);
-}
-
-/** Implementation of matchs for the lexer, overrides any
- *  base implementation in the base recognizer. 
- *
- *  \remark
- *  Note that the generated code lays down arrays of ints for constant
- *  strings so that they are int UTF32 form!
- */
-static ANTLR3_BOOLEAN
-matchs(pANTLR3_LEXER lexer, ANTLR3_UCHAR * string)
-{
-	while   (*string != ANTLR3_STRING_TERMINATOR)
-	{
-		if  (lexer->input->istream->_LA(lexer->input->istream, 1) != (*string))
-		{
-			if	(lexer->rec->state->backtracking > 0)
-			{
-				lexer->rec->state->failed = ANTLR3_TRUE;
-				return ANTLR3_FALSE;
-			}
-
-			lexer->rec->exConstruct(lexer->rec);
-			lexer->rec->state->failed	 = ANTLR3_TRUE;
-
-			/* TODO: Implement exception creation more fully perhaps
-			 */
-			lexer->recover(lexer);
-			return  ANTLR3_FALSE;
-		}
-
-		/* Matched correctly, do consume it
-		 */
-		lexer->input->istream->consume(lexer->input->istream);
-		string++;
-
-		/* Reset any failed indicator
-		 */
-		lexer->rec->state->failed = ANTLR3_FALSE;
-	}
-
-
-	return  ANTLR3_TRUE;
-}
-
-/** Implementation of matchc for the lexer, overrides any
- *  base implementation in the base recognizer. 
- *
- *  \remark
- *  Note that the generated code lays down arrays of ints for constant
- *  strings so that they are int UTF32 form!
- */
-static ANTLR3_BOOLEAN
-matchc(pANTLR3_LEXER lexer, ANTLR3_UCHAR c)
-{
-	if	(lexer->input->istream->_LA(lexer->input->istream, 1) == c)
-	{
-		/* Matched correctly, do consume it
-		 */
-		lexer->input->istream->consume(lexer->input->istream);
-
-		/* Reset any failed indicator
-		 */
-		lexer->rec->state->failed = ANTLR3_FALSE;
-
-		return	ANTLR3_TRUE;
-	}
-
-	/* Failed to match, exception and recovery time.
-	 */
-	if	(lexer->rec->state->backtracking > 0)
-	{
-		lexer->rec->state->failed  = ANTLR3_TRUE;
-		return	ANTLR3_FALSE;
-	}
-
-	lexer->rec->exConstruct(lexer->rec);
-
-	/* TODO: Implement exception creation more fully perhaps
-	 */
-	lexer->recover(lexer);
-
-	return  ANTLR3_FALSE;
-}
-
-/** Implementation of match range for the lexer, overrides any
- *  base implementation in the base recognizer. 
- *
- *  \remark
- *  Note that the generated code lays down arrays of ints for constant
- *  strings so that they are int UTF32 form!
- */
-static ANTLR3_BOOLEAN
-matchRange(pANTLR3_LEXER lexer, ANTLR3_UCHAR low, ANTLR3_UCHAR high)
-{
-    ANTLR3_UCHAR    c;
-
-    /* What is in the stream at the moment?
-     */
-    c	= lexer->input->istream->_LA(lexer->input->istream, 1);
-    if	( c >= low && c <= high)
-    {
-	/* Matched correctly, consume it
-	 */
-	lexer->input->istream->consume(lexer->input->istream);
-
-	/* Reset any failed indicator
-	 */
-	lexer->rec->state->failed = ANTLR3_FALSE;
-
-	return	ANTLR3_TRUE;
-    }
-    
-    /* Failed to match, execption and recovery time.
-     */
-
-    if	(lexer->rec->state->backtracking > 0)
-    {
-	lexer->rec->state->failed  = ANTLR3_TRUE;
-	return	ANTLR3_FALSE;
-    }
-
-    lexer->rec->exConstruct(lexer->rec);
-
-    /* TODO: Implement exception creation more fully
-     */
-    lexer->recover(lexer);
-
-    return  ANTLR3_FALSE;
-}
-
-static void
-matchAny	    (pANTLR3_LEXER lexer)
-{
-    lexer->input->istream->consume(lexer->input->istream);
-}
-
-static void
-recover	    (pANTLR3_LEXER lexer)
-{
-    lexer->input->istream->consume(lexer->input->istream);
-}
-
-static ANTLR3_UINT32
-getLine	    (pANTLR3_LEXER lexer)
-{
-    return  lexer->input->getLine(lexer->input);
-}
-
-static ANTLR3_UINT32
-getCharPositionInLine	(pANTLR3_LEXER lexer)
-{
-    return  lexer->input->charPositionInLine;
-}
-
-static ANTLR3_MARKER	getCharIndex	    (pANTLR3_LEXER lexer)
-{
-    return lexer->input->istream->index(lexer->input->istream);
-}
-
-static pANTLR3_STRING
-getText	    (pANTLR3_LEXER lexer)
-{
-	if (lexer->rec->state->text)
-	{
-		return	lexer->rec->state->text;
-
-	}
-	return  lexer->input->substr(
-									lexer->input, 
-									lexer->rec->state->tokenStartCharIndex,
-									lexer->getCharIndex(lexer) - lexer->input->charByteSize
-							);
-
-}
-
-static void *				
-getCurrentInputSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream)
-{
-	return NULL;
-}
-
-static void *				
-getMissingSymbol			(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
-									ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow)
-{
-	return NULL;
-}
diff --git a/antlr-3.4/runtime/C/src/antlr3rewritestreams.c b/antlr-3.4/runtime/C/src/antlr3rewritestreams.c
deleted file mode 100644
index 9afb6e1..0000000
--- a/antlr-3.4/runtime/C/src/antlr3rewritestreams.c
+++ /dev/null
@@ -1,844 +0,0 @@
-/// \file
-/// Implementation of token/tree streams that are used by the
-/// tree re-write rules to manipulate the tokens and trees produced
-/// by rules that are subject to rewrite directives.
-///
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3rewritestreams.h>
-
-// Static support function forward declarations for the stream types.
-//
-static	void				reset			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream); 
-static	void				add				(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el, void (ANTLR3_CDECL *freePtr)(void *));
-static	void *				next			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-static	pANTLR3_BASE_TREE	nextTree		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-static	void *				nextToken		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-static	void *				_next			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-static	void *				dupTok			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el);
-static	void *				dupTree			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el);
-static	void *				dupTreeNode		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el);
-static	pANTLR3_BASE_TREE	toTree			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element);
-static	pANTLR3_BASE_TREE	toTreeNode		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element);
-static	ANTLR3_BOOLEAN		hasNext			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-static	pANTLR3_BASE_TREE	nextNode		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-static	pANTLR3_BASE_TREE	nextNodeNode	(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-static	pANTLR3_BASE_TREE	nextNodeToken	(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-static	ANTLR3_UINT32		size			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-static	void *				getDescription	(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-static	void				freeRS			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-static	void				expungeRS		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
-
-
-// Place a now unused rewrite stream back on the rewrite stream pool
-// so we can reuse it if we need to.
-//
-static void
-freeRS	(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-	// Before placing the stream back in the pool, we
-	// need to clear any vector it has. This is so any
-	// free pointers that are associated with the
-	// entires are called.
-	//
-	if	(stream->elements != NULL)
-	{
-		// Factory generated vectors can be returned to the
-		// vector factory for later reuse.
-		//
-		if	(stream->elements->factoryMade == ANTLR3_TRUE)
-		{
-			pANTLR3_VECTOR_FACTORY factory = ((pANTLR3_COMMON_TREE_ADAPTOR)(stream->adaptor->super))->arboretum->vFactory;
-			factory->returnVector(factory, stream->elements);
-
-			stream->elements = NULL;
-		} 
-		else
-		{
-			// Other vectors we clear and allow to be reused if they come off the
-			// rewrite stream free stack and are reused.
-			//
-			stream->elements->clear(stream->elements);
-			stream->freeElements = ANTLR3_TRUE;
-		}
-	}
-	else
-	{
-		stream->freeElements = ANTLR3_FALSE; // Just in case
-	}
-
-	// Add the stream into the recognizer stream stack vector
-	// adding the stream memory free routine so that
-	// it is thrown away when the stack vector is destroyed
-	//
-	stream->rec->state->rStreams->add(stream->rec->state->rStreams, stream, (void(*)(void *))expungeRS);
-}
-
-/** Do special nilNode reuse detection for node streams.
- */
-static void
-freeNodeRS(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-    pANTLR3_BASE_TREE tree;
-
-    // Before placing the stream back in the pool, we
-	// need to clear any vector it has. This is so any
-	// free pointers that are associated with the
-	// entires are called. However, if this particular function is called
-    // then we know that the entries in the stream are definately
-    // tree nodes. Hence we check to see if any of them were nilNodes as
-    // if they were, we can reuse them.
-	//
-	if	(stream->elements != NULL)
-	{
-        // We have some elements to traverse
-        //
-        ANTLR3_UINT32 i;
-
-        for (i = 1; i<= stream->elements->count; i++)
-        {
-            tree = (pANTLR3_BASE_TREE)(stream->elements->elements[i-1].element);
-            if  (tree != NULL && tree->isNilNode(tree))
-            {
-                // Had to remove this for now, check is not comprehensive enough
-                // tree->reuse(tree);
-            }
-
-        }
-		// Factory generated vectors can be returned to the
-		// vector factory for later reuse.
-		//
-		if	(stream->elements->factoryMade == ANTLR3_TRUE)
-		{
-			pANTLR3_VECTOR_FACTORY factory = ((pANTLR3_COMMON_TREE_ADAPTOR)(stream->adaptor->super))->arboretum->vFactory;
-			factory->returnVector(factory, stream->elements);
-
-			stream->elements = NULL;
-		} 
-		else
-		{
-			stream->elements->clear(stream->elements);
-			stream->freeElements = ANTLR3_TRUE;
-		}
-	}
-	else
-	{
-        if  (stream->singleElement != NULL)
-        {
-            tree = (pANTLR3_BASE_TREE)(stream->singleElement);
-            if  (tree->isNilNode(tree))
-            {
-                // Had to remove this for now, check is not comprehensive enough
-              //   tree->reuse(tree);
-            }
-        }
-        stream->singleElement = NULL;
-		stream->freeElements = ANTLR3_FALSE; // Just in case
-	}
-
-	// Add the stream into the recognizer stream stack vector
-	// adding the stream memory free routine so that
-	// it is thrown away when the stack vector is destroyed
-	//
-	stream->rec->state->rStreams->add(stream->rec->state->rStreams, stream, (void(*)(void *))expungeRS);
-}
-static void
-expungeRS(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-
-	if (stream->freeElements == ANTLR3_TRUE && stream->elements != NULL)
-	{
-		stream->elements->free(stream->elements);
-	}
-	ANTLR3_FREE(stream);
-}
-
-// Functions for creating streams
-//
-static  pANTLR3_REWRITE_RULE_ELEMENT_STREAM 
-antlr3RewriteRuleElementStreamNewAE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description)
-{
-	pANTLR3_REWRITE_RULE_ELEMENT_STREAM	stream;
-
-	// First - do we already have a rewrite stream that was returned
-	// to the pool? If we do, then we will just reuse it by resetting
-	// the generic interface.
-	//
-	if	(rec->state->rStreams->count > 0)
-	{
-		// Remove the entry from the vector. We do not
-		// cause it to be freed by using remove.
-		//
-		stream = rec->state->rStreams->remove(rec->state->rStreams, rec->state->rStreams->count - 1);
-
-		// We found a stream we can reuse.
-		// If the stream had a vector, then it will have been cleared
-		// when the freeRS was called that put it in this stack
-		//
-	}
-	else
-	{
-		// Ok, we need to allocate a new one as there were none on the stack.
-		// First job is to create the memory we need.
-		//
-		stream	= (pANTLR3_REWRITE_RULE_ELEMENT_STREAM) ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_REWRITE_RULE_ELEMENT_STREAM)));
-
-		if	(stream == NULL)
-		{
-			return	NULL;
-		}
-		stream->elements		= NULL;
-		stream->freeElements	= ANTLR3_FALSE;
-	}
-
-	// Populate the generic interface
-	//
-	stream->rec				= rec;
-	stream->reset			= reset;
-	stream->add				= add;
-	stream->next			= next;
-	stream->nextTree		= nextTree;
-	stream->nextNode		= nextNode;
-	stream->nextToken		= nextToken;
-	stream->_next			= _next;
-	stream->hasNext			= hasNext;
-	stream->size			= size;
-	stream->getDescription  = getDescription;
-	stream->toTree			= toTree;
-	stream->free			= freeRS;
-	stream->singleElement	= NULL;
-
-	// Reset the stream to empty.
-	//
-
-	stream->cursor			= 0;
-	stream->dirty			= ANTLR3_FALSE;
-
-	// Install the description
-	//
-	stream->elementDescription	= description;
-
-	// Install the adaptor
-	//
-	stream->adaptor		= adaptor;
-
-	return stream;
-}
-
-static pANTLR3_REWRITE_RULE_ELEMENT_STREAM 
-antlr3RewriteRuleElementStreamNewAEE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement)
-{
-	pANTLR3_REWRITE_RULE_ELEMENT_STREAM	stream;
-
-	// First job is to create the memory we need.
-	//
-	stream	= antlr3RewriteRuleElementStreamNewAE(adaptor, rec, description);
-
-	if (stream == NULL)
-	{
-		return NULL;
-	}
-
-	// Stream seems good so we need to add the supplied element
-	//
-	if	(oneElement != NULL)
-	{
-		stream->add(stream, oneElement, NULL);
-	}
-	return stream;
-}
-
-static pANTLR3_REWRITE_RULE_ELEMENT_STREAM 
-antlr3RewriteRuleElementStreamNewAEV(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector)
-{
-	pANTLR3_REWRITE_RULE_ELEMENT_STREAM	stream;
-
-	// First job is to create the memory we need.
-	//
-	stream	= antlr3RewriteRuleElementStreamNewAE(adaptor, rec, description);
-
-	if (stream == NULL)
-	{
-		return stream;
-	}
-
-	// Stream seems good so we need to install the vector we were
-	// given. We assume that someone else is going to free the
-	// vector.
-	//
-	if	(stream->elements != NULL && stream->elements->factoryMade == ANTLR3_FALSE && stream->freeElements == ANTLR3_TRUE )
-	{
-		stream->elements->free(stream->elements);
-	}
-	stream->elements		= vector;
-	stream->freeElements	= ANTLR3_FALSE;
-	return stream;
-}
-
-// Token rewrite stream ...
-//
-ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
-antlr3RewriteRuleTOKENStreamNewAE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description)
-{
-	pANTLR3_REWRITE_RULE_TOKEN_STREAM	stream;
-
-	// First job is to create the memory we need.
-	//
-	stream	= antlr3RewriteRuleElementStreamNewAE(adaptor, rec, description);
-
-	if (stream == NULL)
-	{
-		return stream;
-	}
-
-	// Install the token based overrides
-	//
-	stream->dup			= dupTok;
-	stream->nextNode	= nextNodeToken;
-
-	// No nextNode implementation for a token rewrite stream
-	//
-	return stream;
-}
-
-ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
-antlr3RewriteRuleTOKENStreamNewAEE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement)
-{
-	pANTLR3_REWRITE_RULE_TOKEN_STREAM	stream;
-
-	// First job is to create the memory we need.
-	//
-	stream	= antlr3RewriteRuleElementStreamNewAEE(adaptor, rec, description, oneElement);
-
-	// Install the token based overrides
-	//
-	stream->dup			= dupTok;
-	stream->nextNode	= nextNodeToken;
-
-	// No nextNode implementation for a token rewrite stream
-	//
-	return stream;
-}
-
-ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
-antlr3RewriteRuleTOKENStreamNewAEV(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector)
-{
-	pANTLR3_REWRITE_RULE_TOKEN_STREAM	stream;
-
-	// First job is to create the memory we need.
-	//
-	stream	= antlr3RewriteRuleElementStreamNewAEV(adaptor, rec, description, vector);
-
-	// Install the token based overrides
-	//
-	stream->dup			= dupTok;
-	stream->nextNode	= nextNodeToken;
-
-	// No nextNode implementation for a token rewrite stream
-	//
-	return stream;
-}
-
-// Subtree rewrite stream
-//
-ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
-antlr3RewriteRuleSubtreeStreamNewAE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description)
-{
-	pANTLR3_REWRITE_RULE_SUBTREE_STREAM	stream;
-
-	// First job is to create the memory we need.
-	//
-	stream	= antlr3RewriteRuleElementStreamNewAE(adaptor, rec, description);
-
-	if (stream == NULL)
-	{
-		return stream;
-	}
-
-	// Install the subtree based overrides
-	//
-	stream->dup			= dupTree;
-	stream->nextNode	= nextNode;
-    stream->free        = freeNodeRS;
-	return stream;
-
-}
-ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
-antlr3RewriteRuleSubtreeStreamNewAEE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement)
-{
-	pANTLR3_REWRITE_RULE_SUBTREE_STREAM	stream;
-
-	// First job is to create the memory we need.
-	//
-	stream	= antlr3RewriteRuleElementStreamNewAEE(adaptor, rec, description, oneElement);
-
-	if (stream == NULL)
-	{
-		return stream;
-	}
-
-	// Install the subtree based overrides
-	//
-	stream->dup			= dupTree;
-	stream->nextNode	= nextNode;
-    stream->free        = freeNodeRS;
-
-	return stream;
-}
-
-ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
-antlr3RewriteRuleSubtreeStreamNewAEV(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector)
-{
-	pANTLR3_REWRITE_RULE_SUBTREE_STREAM	stream;
-
-	// First job is to create the memory we need.
-	//
-	stream	= antlr3RewriteRuleElementStreamNewAEV(adaptor, rec, description, vector);
-
-	if (stream == NULL)
-	{
-		return NULL;
-	}
-
-	// Install the subtree based overrides
-	//
-	stream->dup			= dupTree;
-	stream->nextNode	= nextNode;
-    stream->free        = freeNodeRS;
-
-	return stream;
-}
-// Node rewrite stream ...
-//
-ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
-antlr3RewriteRuleNODEStreamNewAE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description)
-{
-	pANTLR3_REWRITE_RULE_NODE_STREAM	stream;
-
-	// First job is to create the memory we need.
-	//
-	stream	= antlr3RewriteRuleElementStreamNewAE(adaptor, rec, description);
-
-	if (stream == NULL)
-	{
-		return stream;
-	}
-
-	// Install the node based overrides
-	//
-	stream->dup			= dupTreeNode;
-	stream->toTree		= toTreeNode;
-	stream->nextNode	= nextNodeNode;
-    stream->free        = freeNodeRS;
-
-	return stream;
-}
-
-ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
-antlr3RewriteRuleNODEStreamNewAEE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement)
-{
-	pANTLR3_REWRITE_RULE_NODE_STREAM	stream;
-
-	// First job is to create the memory we need.
-	//
-	stream	= antlr3RewriteRuleElementStreamNewAEE(adaptor, rec, description, oneElement);
-
-	// Install the node based overrides
-	//
-	stream->dup			= dupTreeNode;
-	stream->toTree		= toTreeNode;
-	stream->nextNode	= nextNodeNode;
-    stream->free        = freeNodeRS;
-
-	return stream;
-}
-
-ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
-antlr3RewriteRuleNODEStreamNewAEV(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector)
-{
-	pANTLR3_REWRITE_RULE_NODE_STREAM	stream;
-
-	// First job is to create the memory we need.
-	//
-	stream	= antlr3RewriteRuleElementStreamNewAEV(adaptor, rec, description, vector);
-
-	// Install the Node based overrides
-	//
-	stream->dup			= dupTreeNode;
-	stream->toTree		= toTreeNode;
-	stream->nextNode	= nextNodeNode;
-    stream->free        = freeNodeRS;
-    
-	return stream;
-}
-
-//----------------------------------------------------------------------
-// Static support functions 
-
-/// Reset the condition of this stream so that it appears we have
-/// not consumed any of its elements.  Elements themselves are untouched.
-///
-static void		
-reset    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-	stream->dirty	= ANTLR3_TRUE;
-	stream->cursor	= 0;
-}
-
-// Add a new pANTLR3_BASE_TREE to this stream
-//
-static void		
-add	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el, void (ANTLR3_CDECL *freePtr)(void *))
-{
-	if (el== NULL)
-	{
-		return;
-	}
-	// As we may be reusing a stream, we may already have allocated
-	// a rewrite stream vector. If we have then is will be empty if
-	// we have either zero or just one element in the rewrite stream
-	//
-	if (stream->elements != NULL && stream->elements->count > 0)
-	{
-		// We already have >1 entries in the stream. So we can just add this new element to the existing
-		// collection. 
-		//
-		stream->elements->add(stream->elements, el, freePtr);
-		return;
-	}
-	if (stream->singleElement == NULL)
-	{
-		stream->singleElement = el;
-		return;
-	}
-
-	// If we got here then we had only the one element so far
-	// and we must now create a vector to hold a collection of them
-	//
-	if	(stream->elements == NULL)
-	{
-        pANTLR3_VECTOR_FACTORY factory = ((pANTLR3_COMMON_TREE_ADAPTOR)(stream->adaptor->super))->arboretum->vFactory;
-
-        
-		stream->elements		= factory->newVector(factory);
-		stream->freeElements	= ANTLR3_TRUE;			// We 'ummed it, so we play it son.
-	}
-    
-	stream->elements->add	(stream->elements, stream->singleElement, freePtr);
-	stream->elements->add	(stream->elements, el, freePtr);
-	stream->singleElement	= NULL;
-
-	return;
-}
-
-/// Return the next element in the stream.  If out of elements, throw
-/// an exception unless size()==1.  If size is 1, then return elements[0].
-/// Return a duplicate node/subtree if stream is out of elements and
-/// size==1.  If we've already used the element, dup (dirty bit set).
-///
-static pANTLR3_BASE_TREE
-nextTree(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream) 
-{
-	ANTLR3_UINT32		n;
-	void			*  el;
-
-	n = stream->size(stream);
-
-	if ( stream->dirty || (stream->cursor >=n && n==1) ) 
-	{
-		// if out of elements and size is 1, dup
-		//
-		el = stream->_next(stream);
-		return stream->dup(stream, el);
-	}
-
-	// test size above then fetch
-	//
-	el = stream->_next(stream);
-	return el;
-}
-
-/// Return the next element for a caller that wants just the token
-///
-static	void *
-nextToken		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-	return stream->_next(stream);
-}
-
-/// Return the next element in the stream.  If out of elements, throw
-/// an exception unless size()==1.  If size is 1, then return elements[0].
-///
-static void *	
-next	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-	ANTLR3_UINT32   s;
-
-	s = stream->size(stream);
-	if (stream->cursor >= s && s == 1)
-	{
-		pANTLR3_BASE_TREE el;
-
-		el = stream->_next(stream);
-
-		return	stream->dup(stream, el);
-	}
-
-	return stream->_next(stream);
-}
-
-/// Do the work of getting the next element, making sure that it's
-/// a tree node or subtree.  Deal with the optimization of single-
-/// element list versus list of size > 1.  Throw an exception (or something similar)
-/// if the stream is empty or we're out of elements and size>1.
-/// You can override in a 'subclass' if necessary.
-///
-static void *
-_next    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-	ANTLR3_UINT32		n;
-	pANTLR3_BASE_TREE	t;
-
-	n = stream->size(stream);
-
-	if (n == 0)
-	{
-		// This means that the stream is empty
-		//
-		return NULL;	// Caller must cope with this
-	}
-
-	// Traversed all the available elements already?
-	//
-	if (stream->cursor >= n)
-	{
-		if (n == 1)
-		{
-			// Special case when size is single element, it will just dup a lot
-			//
-			return stream->toTree(stream, stream->singleElement);
-		}
-
-		// Out of elements and the size is not 1, so we cannot assume
-		// that we just duplicate the entry n times (such as ID ent+ -> ^(ID ent)+)
-		// This means we ran out of elements earlier than was expected.
-		//
-		return NULL;	// Caller must cope with this
-	}
-
-	// Elements available either for duping or just available
-	//
-	if (stream->singleElement != NULL)
-	{
-		stream->cursor++;   // Cursor advances even for single element as this tells us to dup()
-		return stream->toTree(stream, stream->singleElement);
-	}
-
-	// More than just a single element so we extract it from the 
-	// vector.
-	//
-	t = stream->toTree(stream, stream->elements->get(stream->elements, stream->cursor));
-	stream->cursor++;
-	return t;
-}
-
-#ifdef ANTLR3_WINDOWS
-#pragma warning(push)
-#pragma warning(disable : 4100)
-#endif
-/// When constructing trees, sometimes we need to dup a token or AST
-/// subtree.  Dup'ing a token means just creating another AST node
-/// around it.  For trees, you must call the adaptor.dupTree().
-///
-static void *	
-dupTok	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el)
-{
-	ANTLR3_FPRINTF(stderr, "dup() cannot be called on a token rewrite stream!!");
-	return NULL;
-}
-#ifdef ANTLR3_WINDOWS
-#pragma warning(pop)
-#endif
-
-/// When constructing trees, sometimes we need to dup a token or AST
-/// subtree.  Dup'ing a token means just creating another AST node
-/// around it.  For trees, you must call the adaptor.dupTree().
-///
-static void *	
-dupTree	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element)
-{
-	return stream->adaptor->dupNode(stream->adaptor, (pANTLR3_BASE_TREE)element);
-}
-
-#ifdef ANTLR3_WINDOWS
-#pragma warning(push)
-#pragma warning(disable : 4100)
-#endif
-/// When constructing trees, sometimes we need to dup a token or AST
-/// subtree.  Dup'ing a token means just creating another AST node
-/// around it.  For trees, you must call the adaptor.dupTree().
-///
-static void *	
-dupTreeNode	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element)
-{
-	ANTLR3_FPRINTF(stderr, "dup() cannot be called on a node rewrite stream!!!");
-	return NULL;
-}
-
-
-/// We don;t explicitly convert to a tree unless the call goes to 
-/// nextTree, which means rewrites are heterogeneous 
-///
-static pANTLR3_BASE_TREE	
-toTree   (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element)
-{
-	return (pANTLR3_BASE_TREE)element;
-}
-#ifdef ANTLR3_WINDOWS
-#pragma warning(pop)
-#endif
-
-/// Ensure stream emits trees; tokens must be converted to AST nodes.
-/// AST nodes can be passed through unmolested.
-///
-#ifdef ANTLR3_WINDOWS
-#pragma warning(push)
-#pragma warning(disable : 4100)
-#endif
-
-static pANTLR3_BASE_TREE	
-toTreeNode   (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element)
-{
-	return stream->adaptor->dupNode(stream->adaptor, (pANTLR3_BASE_TREE)element);
-}
-
-#ifdef ANTLR3_WINDOWS
-#pragma warning(pop)
-#endif
-
-/// Returns ANTLR3_TRUE if there is a next element available
-///
-static ANTLR3_BOOLEAN	
-hasNext  (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-	if (	(stream->singleElement != NULL && stream->cursor < 1)
-		||	(stream->elements != NULL && stream->cursor < stream->elements->size(stream->elements)))
-	{
-		return ANTLR3_TRUE;
-	}
-	else
-	{
-		return ANTLR3_FALSE;
-	}
-}
-
-/// Get the next token from the list and create a node for it
-/// This is the implementation for token streams.
-///
-static pANTLR3_BASE_TREE
-nextNodeToken(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-	return stream->adaptor->create(stream->adaptor, stream->_next(stream));
-}
-
-static pANTLR3_BASE_TREE
-nextNodeNode(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-	return stream->_next(stream);
-}
-
-/// Treat next element as a single node even if it's a subtree.
-/// This is used instead of next() when the result has to be a
-/// tree root node.  Also prevents us from duplicating recently-added
-/// children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
-/// must dup the type node, but ID has been added.
-///
-/// Referencing to a rule result twice is ok; dup entire tree as
-/// we can't be adding trees; e.g., expr expr. 
-///
-static pANTLR3_BASE_TREE	
-nextNode (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-
-	ANTLR3_UINT32	n;
-	pANTLR3_BASE_TREE	el = stream->_next(stream);
-
-	n = stream->size(stream);
-	if (stream->dirty == ANTLR3_TRUE || (stream->cursor > n && n == 1))
-	{
-		// We are out of elements and the size is 1, which means we just 
-		// dup the node that we have
-		//
-		return	stream->adaptor->dupNode(stream->adaptor, el);
-	}
-
-	// We were not out of nodes, so the one we received is the one to return
-	//
-	return  el;
-}
-
-/// Number of elements available in the stream
-///
-static ANTLR3_UINT32	
-size	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-	ANTLR3_UINT32   n = 0;
-
-	/// Should be a count of one if singleElement is set. I copied this
-	/// logic from the java implementation, which I suspect is just guarding
-	/// against someone setting singleElement and forgetting to NULL it out
-	///
-	if (stream->singleElement != NULL)
-	{
-		n = 1;
-	}
-	else
-	{
-		if (stream->elements != NULL)
-		{
-			return (ANTLR3_UINT32)(stream->elements->count);
-		}
-	}
-	return n;
-}
-
-/// Returns the description string if there is one available (check for NULL).
-///
-static void *	
-getDescription  (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
-{
-	if (stream->elementDescription == NULL)
-	{
-		stream->elementDescription = "<unknown source>";
-	}
-
-	return  stream->elementDescription;
-}
diff --git a/antlr-3.4/runtime/C/src/antlr3string.c b/antlr-3.4/runtime/C/src/antlr3string.c
deleted file mode 100644
index b29c020..0000000
--- a/antlr-3.4/runtime/C/src/antlr3string.c
+++ /dev/null
@@ -1,1402 +0,0 @@
-/** \file
- * Implementation of the ANTLR3 string and string factory classes
- */
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3string.h>
-
-/* Factory API
- */
-static    pANTLR3_STRING    newRaw8	(pANTLR3_STRING_FACTORY factory);
-static    pANTLR3_STRING    newRawUTF16	(pANTLR3_STRING_FACTORY factory);
-static    pANTLR3_STRING    newSize8	(pANTLR3_STRING_FACTORY factory, ANTLR3_UINT32 size);
-static    pANTLR3_STRING    newSizeUTF16	(pANTLR3_STRING_FACTORY factory, ANTLR3_UINT32 size);
-static    pANTLR3_STRING    newPtr8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string, ANTLR3_UINT32 size);
-static    pANTLR3_STRING    newPtrUTF16_8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string, ANTLR3_UINT32 size);
-static    pANTLR3_STRING    newPtrUTF16_UTF16	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string, ANTLR3_UINT32 size);
-static    pANTLR3_STRING    newStr8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string);
-static    pANTLR3_STRING    newStrUTF16_8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string);
-static    pANTLR3_STRING    newStrUTF16_UTF16	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string);
-static    void		    destroy	(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING string);
-static    pANTLR3_STRING    printable8	(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING string);
-static    pANTLR3_STRING    printableUTF16	(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING string);
-static    void		    closeFactory(pANTLR3_STRING_FACTORY factory);
-
-/* String API
- */
-static    pANTLR3_UINT8	    set8	(pANTLR3_STRING string, const char * chars);
-static    pANTLR3_UINT8	    setUTF16_8	(pANTLR3_STRING string, const char * chars);
-static    pANTLR3_UINT8	    setUTF16_UTF16	(pANTLR3_STRING string, const char * chars);
-static    pANTLR3_UINT8	    append8	(pANTLR3_STRING string, const char * newbit);
-static    pANTLR3_UINT8	    appendUTF16_8	(pANTLR3_STRING string, const char * newbit);
-static    pANTLR3_UINT8	    appendUTF16_UTF16	(pANTLR3_STRING string, const char * newbit);
-static	  pANTLR3_UINT8	    insert8	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit);
-static	  pANTLR3_UINT8	    insertUTF16_8	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit);
-static	  pANTLR3_UINT8	    insertUTF16_UTF16	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit);
-
-static    pANTLR3_UINT8	    setS	(pANTLR3_STRING string, pANTLR3_STRING chars);
-static    pANTLR3_UINT8	    appendS	(pANTLR3_STRING string, pANTLR3_STRING newbit);
-static	  pANTLR3_UINT8	    insertS	(pANTLR3_STRING string, ANTLR3_UINT32 point, pANTLR3_STRING newbit);
-
-static    pANTLR3_UINT8	    addc8	(pANTLR3_STRING string, ANTLR3_UINT32 c);
-static    pANTLR3_UINT8	    addcUTF16	(pANTLR3_STRING string, ANTLR3_UINT32 c);
-static    pANTLR3_UINT8	    addi8	(pANTLR3_STRING string, ANTLR3_INT32 i);
-static    pANTLR3_UINT8	    addiUTF16	(pANTLR3_STRING string, ANTLR3_INT32 i);
-static	  pANTLR3_UINT8	    inserti8	(pANTLR3_STRING string, ANTLR3_UINT32 point, ANTLR3_INT32 i);
-static	  pANTLR3_UINT8	    insertiUTF16	(pANTLR3_STRING string, ANTLR3_UINT32 point, ANTLR3_INT32 i);
-
-static    ANTLR3_UINT32     compare8	(pANTLR3_STRING string, const char * compStr);
-static    ANTLR3_UINT32     compareUTF16_8	(pANTLR3_STRING string, const char * compStr);
-static    ANTLR3_UINT32     compareUTF16_UTF16(pANTLR3_STRING string, const char * compStr);
-static    ANTLR3_UINT32     compareS	(pANTLR3_STRING string, pANTLR3_STRING compStr);
-static    ANTLR3_UCHAR      charAt8	(pANTLR3_STRING string, ANTLR3_UINT32 offset);
-static    ANTLR3_UCHAR      charAtUTF16	(pANTLR3_STRING string, ANTLR3_UINT32 offset);
-static    pANTLR3_STRING    subString8	(pANTLR3_STRING string, ANTLR3_UINT32 startIndex, ANTLR3_UINT32 endIndex);
-static    pANTLR3_STRING    subStringUTF16	(pANTLR3_STRING string, ANTLR3_UINT32 startIndex, ANTLR3_UINT32 endIndex);
-static	  ANTLR3_INT32	    toInt32_8	(pANTLR3_STRING string);
-static	  ANTLR3_INT32	    toInt32_UTF16  (pANTLR3_STRING string);
-static	  pANTLR3_STRING    to8_8		(pANTLR3_STRING string);
-static	  pANTLR3_STRING    to8_UTF16		(pANTLR3_STRING string);
-static	pANTLR3_STRING		toUTF8_8	(pANTLR3_STRING string);
-static	pANTLR3_STRING		toUTF8_UTF16	(pANTLR3_STRING string);
-
-/* Local helpers
- */
-static	void			stringInit8	(pANTLR3_STRING string);
-static	void			stringInitUTF16	(pANTLR3_STRING string);
-static	void	ANTLR3_CDECL	stringFree	(pANTLR3_STRING string);
-
-ANTLR3_API pANTLR3_STRING_FACTORY 
-antlr3StringFactoryNew(ANTLR3_UINT32 encoding)
-{
-	pANTLR3_STRING_FACTORY  factory;
-
-	/* Allocate memory
-	*/
-	factory	= (pANTLR3_STRING_FACTORY) ANTLR3_CALLOC(1, sizeof(ANTLR3_STRING_FACTORY));
-
-	if	(factory == NULL)
-	{
-		return	NULL;
-	}
-
-	/* Now we make a new list to track the strings.
-	*/
-	factory->strings	= antlr3VectorNew(0);
-	factory->index	= 0;
-
-	if	(factory->strings == NULL)
-	{
-		ANTLR3_FREE(factory);
-		return	NULL;
-	}
-
-    // Install the API
-    //
-    // TODO: These encodings need equivalent functions to
-    // UTF16 and 8Bit if I am going to support those encodings in the STRING stuff.
-	// The STRING stuff was intended as a quick and dirty hack for people that did not
-	// want to worry about memory and performance very much, but nobody ever reads the 
-	// notes or comments or uses the email list search. I want to discourage using these
-	// interfaces as it is much more efficient to use the pointers within the tokens
-	// directly, so I am not implementing the string stuff for the newer encodings.
-    // We install the standard 8 and 16 bit functions for the UTF 8 and 16 but they
-	// will not be useful beyond returning the text.
-	// 
-    switch(encoding)
-    {
-		case    ANTLR3_ENC_UTF32:
-			break;
-
-		case    ANTLR3_ENC_UTF32BE:
-			break;
-
-		case    ANTLR3_ENC_UTF32LE:
-			break;
-
-		case    ANTLR3_ENC_UTF16BE:
-		case    ANTLR3_ENC_UTF16LE:
-		case    ANTLR3_ENC_UTF16:
-
-			factory->newRaw	    =  newRawUTF16;
-			factory->newSize	=  newSizeUTF16;
-			factory->newPtr	    =  newPtrUTF16_UTF16;
-			factory->newPtr8	=  newPtrUTF16_8;
-			factory->newStr	    =  newStrUTF16_UTF16;
-			factory->newStr8	=  newStrUTF16_8;
-			factory->printable	=  printableUTF16;
-			factory->destroy	=  destroy;
-			factory->close	    =  closeFactory;
-			break;
-	 
-		case    ANTLR3_ENC_UTF8:
-		case    ANTLR3_ENC_EBCDIC:
-		case    ANTLR3_ENC_8BIT:
-		default:
-
-			factory->newRaw	    =  newRaw8;
-			factory->newSize	=  newSize8;
-			factory->newPtr	    =  newPtr8;
-			factory->newPtr8	=  newPtr8;
-			factory->newStr	    =  newStr8;
-			factory->newStr8	=  newStr8;
-			factory->printable	=  printable8;
-			factory->destroy	=  destroy;
-			factory->close	    =  closeFactory;
-			break;
-    }
-	return  factory;
-}
-
-
-/**
- *
- * \param factory 
- * \return 
- */
-static    pANTLR3_STRING    
-newRaw8	(pANTLR3_STRING_FACTORY factory)
-{
-    pANTLR3_STRING  string;
-
-    string  = (pANTLR3_STRING) ANTLR3_MALLOC(sizeof(ANTLR3_STRING));
-
-    if	(string == NULL)
-    {
-		return	NULL;
-    }
-
-    /* Structure is allocated, now fill in the API etc.
-     */
-    stringInit8(string);
-    string->factory = factory;
-
-    /* Add the string into the allocated list
-     */
-    factory->strings->set(factory->strings, factory->index, (void *) string, (void (ANTLR3_CDECL *)(void *))(stringFree), ANTLR3_TRUE);
-    string->index   = factory->index++;
-
-    return string;
-}
-/**
- *
- * \param factory 
- * \return 
- */
-static    pANTLR3_STRING    
-newRawUTF16	(pANTLR3_STRING_FACTORY factory)
-{
-    pANTLR3_STRING  string;
-
-    string  = (pANTLR3_STRING) ANTLR3_MALLOC(sizeof(ANTLR3_STRING));
-
-    if	(string == NULL)
-    {
-		return	NULL;
-    }
-
-    /* Structure is allocated, now fill in the API etc.
-     */
-    stringInitUTF16(string);
-    string->factory = factory;
-
-    /* Add the string into the allocated list
-     */
-    factory->strings->set(factory->strings, factory->index, (void *) string, (void (ANTLR3_CDECL *)(void *))(stringFree), ANTLR3_TRUE);
-    string->index   = factory->index++;
-
-    return string;
-}
-static	 
-void	ANTLR3_CDECL stringFree  (pANTLR3_STRING string)
-{
-    /* First free the string itself if there was anything in it
-     */
-    if	(string->chars)
-    {
-	ANTLR3_FREE(string->chars);
-    }
-
-    /* Now free the space for this string
-     */
-    ANTLR3_FREE(string);
-
-    return;
-}
-/**
- *
- * \param string 
- * \return 
- */
-static	void
-stringInit8  (pANTLR3_STRING string)
-{
-    string->len			= 0;
-    string->size		= 0;
-    string->chars		= NULL;
-    string->encoding	= ANTLR3_ENC_8BIT ;
-
-    /* API for 8 bit strings*/
-
-    string->set		= set8;
-    string->set8	= set8;
-    string->append	= append8;
-    string->append8	= append8;
-    string->insert	= insert8;
-    string->insert8	= insert8;
-    string->addi	= addi8;
-    string->inserti	= inserti8;
-    string->addc	= addc8;
-    string->charAt	= charAt8;
-    string->compare	= compare8;
-    string->compare8	= compare8;
-    string->subString	= subString8;
-    string->toInt32	= toInt32_8;
-    string->to8		= to8_8;
-    string->toUTF8	= toUTF8_8;
-    string->compareS	= compareS;
-    string->setS	= setS;
-    string->appendS	= appendS;
-    string->insertS	= insertS;
-
-}
-/**
- *
- * \param string 
- * \return 
- */
-static	void
-stringInitUTF16  (pANTLR3_STRING string)
-{
-    string->len		= 0;
-    string->size	= 0;
-    string->chars	= NULL;
-    string->encoding	= ANTLR3_ENC_8BIT;
-
-    /* API for UTF16 strings */
-
-    string->set		= setUTF16_UTF16;
-    string->set8	= setUTF16_8;
-    string->append	= appendUTF16_UTF16;
-    string->append8	= appendUTF16_8;
-    string->insert	= insertUTF16_UTF16;
-    string->insert8	= insertUTF16_8;
-    string->addi	= addiUTF16;
-    string->inserti	= insertiUTF16;
-    string->addc	= addcUTF16;
-    string->charAt	= charAtUTF16;
-    string->compare	= compareUTF16_UTF16;
-    string->compare8	= compareUTF16_8;
-    string->subString	= subStringUTF16;
-    string->toInt32	= toInt32_UTF16;
-    string->to8		= to8_UTF16;
-    string->toUTF8	= toUTF8_UTF16;
-
-    string->compareS	= compareS;
-    string->setS	= setS;
-    string->appendS	= appendS;
-    string->insertS	= insertS;
-}
-/**
- *
- * \param string 
- * \return 
- * TODO: Implement UTF-8
- */
-static	void
-stringInitUTF8  (pANTLR3_STRING string)
-{
-    string->len	    = 0;
-    string->size    = 0;
-    string->chars   = NULL;
-
-    /* API */
-
-}
-
-// Convert an 8 bit string into a UTF8 representation, which is in fact just the string itself
-// a memcpy as we make no assumptions about the 8 bit encoding.
-//
-static	pANTLR3_STRING		
-toUTF8_8	(pANTLR3_STRING string)
-{
-	return string->factory->newPtr(string->factory, (pANTLR3_UINT8)(string->chars), string->len);
-}
-
-// Convert a UTF16 string into a UTF8 representation using the Unicode.org
-// supplied C algorithms, which are now contained within the ANTLR3 C runtime
-// as permitted by the Unicode license (within the source code antlr3convertutf.c/.h
-// UCS2 has the same encoding as UTF16 so we can use UTF16 converter.
-//
-static	pANTLR3_STRING	
-toUTF8_UTF16	(pANTLR3_STRING string)
-{
-
-    UTF8	      * outputEnd;	
-    UTF16	      * inputEnd;
-    pANTLR3_STRING	utf8String;
-
-    ConversionResult	cResult;
-
-    // Allocate the output buffer, which needs to accommodate potentially
-    // 3X (in bytes) the input size (in chars).
-    //
-    utf8String	= string->factory->newStr8(string->factory, (pANTLR3_UINT8)"");
-
-    if	(utf8String != NULL)
-    {
-        // Free existing allocation
-        //
-        ANTLR3_FREE(utf8String->chars);
-
-        // Reallocate according to maximum expected size
-        //
-        utf8String->size	= string->len *3;
-        utf8String->chars	= (pANTLR3_UINT8)ANTLR3_MALLOC(utf8String->size +1);
-
-        if	(utf8String->chars != NULL)
-        {
-            inputEnd  = (UTF16 *)	(string->chars);
-            outputEnd = (UTF8 *)	(utf8String->chars);
-
-            // Call the Unicode converter
-            //
-            cResult =  ConvertUTF16toUTF8
-                (
-                (const UTF16**)&inputEnd, 
-                ((const UTF16 *)(string->chars)) + string->len, 
-                &outputEnd, 
-                outputEnd + utf8String->size - 1,
-                lenientConversion
-                );
-
-            // We don't really care if things failed or not here, we just converted
-            // everything that was vaguely possible and stopped when it wasn't. It is
-            // up to the grammar programmer to verify that the input is sensible.
-            //
-            utf8String->len = ANTLR3_UINT32_CAST(((pANTLR3_UINT8)outputEnd) - utf8String->chars);
-
-            *(outputEnd+1) = '\0';		// Always null terminate
-        }
-    }
-    return utf8String;
-}
-
-/**
- * Creates a new string with enough capacity for size 8 bit characters plus a terminator.
- *
- * \param[in] factory - Pointer to the string factory that owns strings
- * \param[in] size - In characters
- * \return pointer to the new string.
- */
-static    pANTLR3_STRING    
-newSize8	(pANTLR3_STRING_FACTORY factory, ANTLR3_UINT32 size)
-{
-    pANTLR3_STRING  string;
-
-    string  = factory->newRaw(factory);
-
-    if	(string == NULL)
-    {
-        return	string;
-    }
-
-    /* Always add one more byte for a terminator ;-)
-    */
-    string->chars	= (pANTLR3_UINT8) ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_UINT8) * (size+1)));
-    *(string->chars)	= '\0';
-    string->size	= size + 1;
-
-
-    return string;
-}
-/**
- * Creates a new string with enough capacity for size UTF16 characters plus a terminator.
- *
- * \param[in] factory - Pointer to the string factory that owns strings
- * \param[in] size - In characters (count double for surrogate pairs!!!)
- * \return pointer to the new string.
- */
-static    pANTLR3_STRING    
-newSizeUTF16	(pANTLR3_STRING_FACTORY factory, ANTLR3_UINT32 size)
-{
-    pANTLR3_STRING  string;
-
-    string  = factory->newRaw(factory);
-
-    if	(string == NULL)
-    {
-        return	string;
-    }
-
-    /* Always add one more byte for a terminator ;-)
-    */	
-    string->chars	= (pANTLR3_UINT8) ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_UINT16) * (size+1)));
-    *(string->chars)	= '\0';
-    string->size	= size+1;	/* Size is always in characters, as is len */
-
-    return string;
-}
-
-/** Creates a new 8 bit string initialized with the 8 bit characters at the 
- *  supplied ptr, of pre-determined size.
- * \param[in] factory - Pointer to the string factory that owns the strings
- * \param[in] ptr - Pointer to 8 bit encoded characters
- * \return pointer to the new string
- */
-static    pANTLR3_STRING    
-newPtr8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr, ANTLR3_UINT32 size)
-{
-	pANTLR3_STRING  string;
-
-	string  = factory->newSize(factory, size);
-
-	if	(string == NULL)
-	{
-		return	NULL;
-	}
-
-	if	(size <= 0)
-	{
-		return	string;
-	}
-
-	if	(ptr != NULL)
-	{
-		ANTLR3_MEMMOVE(string->chars, (const void *)ptr, size);
-		*(string->chars + size) = '\0';	    /* Terminate, these strings are usually used for Token streams and printing etc.	*/
-		string->len = size;
-	}
-
-	return  string;
-}
-
-/** Creates a new UTF16 string initialized with the 8 bit characters at the 
- *  supplied 8 bit character ptr, of pre-determined size.
- * \param[in] factory - Pointer to the string factory that owns the strings
- * \param[in] ptr - Pointer to 8 bit encoded characters
- * \return pointer to the new string
- */
-static    pANTLR3_STRING    
-newPtrUTF16_8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr, ANTLR3_UINT32 size)
-{
-	pANTLR3_STRING  string;
-
-	/* newSize accepts size in characters, not bytes
-	*/
-	string  = factory->newSize(factory, size);
-
-	if	(string == NULL)
-	{
-		return	NULL;
-	}
-
-	if	(size <= 0)
-	{
-		return	string;
-	}
-
-	if	(ptr != NULL)
-	{
-		pANTLR3_UINT16	out;
-		ANTLR3_INT32    inSize;
-
-		out = (pANTLR3_UINT16)(string->chars);
-		inSize	= size;
-
-		while	(inSize-- > 0)
-		{
-			*out++ = (ANTLR3_UINT16)(*ptr++);
-		}
-
-		/* Terminate, these strings are usually used for Token streams and printing etc.	
-		*/
-		*(((pANTLR3_UINT16)(string->chars)) + size) = '\0';
-
-		string->len = size;
-	}
-
-	return  string;
-}
-
-/** Creates a new UTF16 string initialized with the UTF16 characters at the 
- *  supplied ptr, of pre-determined size.
- * \param[in] factory - Pointer to the string factory that owns the strings
- * \param[in] ptr - Pointer to UTF16 encoded characters
- * \return pointer to the new string
- */
-static    pANTLR3_STRING    
-newPtrUTF16_UTF16	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr, ANTLR3_UINT32 size)
-{
-	pANTLR3_STRING  string;
-
-	string  = factory->newSize(factory, size);
-
-	if	(string == NULL)
-	{
-		return	NULL;
-	}
-
-	if	(size <= 0)
-	{
-		return	string;
-	}
-
-	if	(ptr != NULL)
-	{
-		ANTLR3_MEMMOVE(string->chars, (const void *)ptr, (size * sizeof(ANTLR3_UINT16)));
-
-		/* Terminate, these strings are usually used for Token streams and printing etc.	
-		*/
-		*(((pANTLR3_UINT16)(string->chars)) + size) = '\0';	    
-		string->len = size;
-	}
-
-	return  string;
-}
-
-/** Create a new 8 bit string from the supplied, null terminated, 8 bit string pointer.
- * \param[in] factory - Pointer to the string factory that owns strings.
- * \param[in] ptr - Pointer to the 8 bit encoded string
- * \return Pointer to the newly initialized string
- */
-static    pANTLR3_STRING    
-newStr8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr)
-{
-    return factory->newPtr8(factory, ptr, (ANTLR3_UINT32)strlen((const char *)ptr));
-}
-
-/** Create a new UTF16 string from the supplied, null terminated, 8 bit string pointer.
- * \param[in] factory - Pointer to the string factory that owns strings.
- * \param[in] ptr - Pointer to the 8 bit encoded string
- * \return POinter to the newly initialized string
- */
-static    pANTLR3_STRING    
-newStrUTF16_8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr)
-{
-    return factory->newPtr8(factory, ptr, (ANTLR3_UINT32)strlen((const char *)ptr));
-}
-
-/** Create a new UTF16 string from the supplied, null terminated, UTF16 string pointer.
- * \param[in] factory - Pointer to the string factory that owns strings.
- * \param[in] ptr - Pointer to the UTF16 encoded string
- * \return Pointer to the newly initialized string
- */
-static    pANTLR3_STRING    
-newStrUTF16_UTF16	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr)
-{
-    pANTLR3_UINT16  in;
-    ANTLR3_UINT32   count;
-
-    /** First, determine the length of the input string
-     */
-    in	    = (pANTLR3_UINT16)ptr;
-    count   = 0;
-
-    while   (*in++ != '\0')
-    {
-		count++;
-    }
-    return factory->newPtr(factory, ptr, count);
-}
-
-static    void		    
-destroy	(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING string)
-{
-    // Record which string we are deleting
-    //
-    ANTLR3_UINT32 strIndex = string->index;
-    
-    // Ensure that the string was not factory made, or we would try
-    // to delete memory that wasn't allocated outside the factory
-    // block.
-    // Remove the specific indexed string from the vector
-    //
-    factory->strings->del(factory->strings, strIndex);
-
-    // One less string in the vector, so decrement the factory index
-    // so that the next string allocated is indexed correctly with
-    // respect to the vector.
-    //
-    factory->index--;
-
-    // Now we have to reindex the strings in the vector that followed
-    // the one we just deleted. We only do this if the one we just deleted
-    // was not the last one.
-    //
-    if  (strIndex< factory->index)
-    {
-        // We must reindex the strings after the one we just deleted.
-        // The one that follows the one we just deleted is also out
-        // of whack, so we start there.
-        //
-        ANTLR3_UINT32 i;
-
-        for (i = strIndex; i < factory->index; i++)
-        {
-            // Renumber the entry
-            //
-            ((pANTLR3_STRING)(factory->strings->elements[i].element))->index = i;
-        }
-    }
-
-    // The string has been destroyed and the elements of the factory are reindexed.
-    //
-
-}
-
-static    pANTLR3_STRING    
-printable8(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING instr)
-{
-    pANTLR3_STRING  string;
-    
-    /* We don't need to be too efficient here, this is mostly for error messages and so on.
-     */
-    pANTLR3_UINT8   scannedText;
-    ANTLR3_UINT32   i;
-
-    /* Assume we need as much as twice as much space to parse out the control characters
-     */
-    string  = factory->newSize(factory, instr->len *2 + 1);
-
-    /* Scan through and replace unprintable (in terms of this routine)
-     * characters
-     */
-    scannedText = string->chars;
-
-    for	(i = 0; i < instr->len; i++)
-    {
-		if (*(instr->chars + i) == '\n')
-		{
-			*scannedText++ = '\\';
-			*scannedText++ = 'n';
-		}
-		else if (*(instr->chars + i) == '\r')
-		{
-			*scannedText++ = '\\';
-			*scannedText++ = 'r';
-		}
-		else if	(!isprint(*(instr->chars +i)))
-		{
-			*scannedText++ = '?';
-		}
-		else
-		{
-			*scannedText++ = *(instr->chars + i);
-		}
-    }
-    *scannedText  = '\0';
-
-    string->len	= (ANTLR3_UINT32)(scannedText - string->chars);
-    
-    return  string;
-}
-
-static    pANTLR3_STRING    
-printableUTF16(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING instr)
-{
-    pANTLR3_STRING  string;
-    
-    /* We don't need to be too efficient here, this is mostly for error messages and so on.
-     */
-    pANTLR3_UINT16  scannedText;
-    pANTLR3_UINT16  inText;
-    ANTLR3_UINT32   i;
-    ANTLR3_UINT32   outLen;
-
-    /* Assume we need as much as twice as much space to parse out the control characters
-     */
-    string  = factory->newSize(factory, instr->len *2 + 1);
-
-    /* Scan through and replace unprintable (in terms of this routine)
-     * characters
-     */
-    scannedText = (pANTLR3_UINT16)(string->chars);
-    inText	= (pANTLR3_UINT16)(instr->chars);
-    outLen	= 0;
-
-    for	(i = 0; i < instr->len; i++)
-    {
-		if (*(inText + i) == '\n')
-		{
-			*scannedText++   = '\\';
-			*scannedText++   = 'n';
-			outLen	    += 2;
-		}
-		else if (*(inText + i) == '\r')
-		{
-			*scannedText++   = '\\';
-			*scannedText++   = 'r';
-			outLen	    += 2;
-		}
-		else if	(!isprint(*(inText +i)))
-		{
-			*scannedText++ = '?';
-			outLen++;
-		}
-		else
-		{
-			*scannedText++ = *(inText + i);
-			outLen++;
-		}
-    }
-    *scannedText  = '\0';
-
-    string->len	= outLen;
-    
-    return  string;
-}
-
-/** Fascist Capitalist Pig function created
- *  to oppress the workers comrade.
- */
-static    void		    
-closeFactory	(pANTLR3_STRING_FACTORY factory)
-{
-    /* Delete the vector we were tracking the strings with, this will
-     * causes all the allocated strings to be deallocated too
-     */
-    factory->strings->free(factory->strings);
-
-    /* Delete the space for the factory itself
-     */
-    ANTLR3_FREE((void *)factory);
-}
-
-static    pANTLR3_UINT8   
-append8	(pANTLR3_STRING string, const char * newbit)
-{
-    ANTLR3_UINT32 len;
-
-    len	= (ANTLR3_UINT32)strlen(newbit);
-
-    if	(string->size < (string->len + len + 1))
-    {
-		string->chars	= (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(string->len + len + 1));
-		string->size	= string->len + len + 1;
-    }
-
-    /* Note we copy one more byte than the strlen in order to get the trailing
-     */
-    ANTLR3_MEMMOVE((void *)(string->chars + string->len), newbit, (ANTLR3_UINT32)(len+1));
-    string->len	+= len;
-
-    return string->chars;
-}
-
-static    pANTLR3_UINT8   
-appendUTF16_8	(pANTLR3_STRING string, const char * newbit)
-{
-    ANTLR3_UINT32   len;
-    pANTLR3_UINT16  apPoint;
-    ANTLR3_UINT32   count;
-
-    len	= (ANTLR3_UINT32)strlen(newbit);
-
-    if	(string->size < (string->len + len + 1))
-    {
-		string->chars	= (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)((sizeof(ANTLR3_UINT16)*(string->len + len + 1))));
-		string->size	= string->len + len + 1;
-    }
-
-    apPoint = ((pANTLR3_UINT16)string->chars) + string->len;
-    string->len	+= len;
-
-    for	(count = 0; count < len; count++)
-    {
-		*apPoint++   = *(newbit + count);
-    }
-    *apPoint = '\0';
-
-    return string->chars;
-}
-
-static    pANTLR3_UINT8   
-appendUTF16_UTF16	(pANTLR3_STRING string, const char * newbit)
-{
-    ANTLR3_UINT32 len;
-    pANTLR3_UINT16  in;
-
-    /** First, determine the length of the input string
-     */
-    in	    = (pANTLR3_UINT16)newbit;
-    len   = 0;
-
-    while   (*in++ != '\0')
-    {
-		len++;
-    }
-
-    if	(string->size < (string->len + len + 1))
-    {
-		string->chars	= (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)( sizeof(ANTLR3_UINT16) *(string->len + len + 1) ));
-		string->size	= string->len + len + 1;
-    }
-
-    /* Note we copy one more byte than the strlen in order to get the trailing delimiter
-     */
-    ANTLR3_MEMMOVE((void *)(((pANTLR3_UINT16)string->chars) + string->len), newbit, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(len+1)));
-    string->len	+= len;
-
-    return string->chars;
-}
-
-static    pANTLR3_UINT8   
-set8	(pANTLR3_STRING string, const char * chars)
-{
-    ANTLR3_UINT32	len;
-
-    len = (ANTLR3_UINT32)strlen(chars);
-    if	(string->size < len + 1)
-    {
-		string->chars	= (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(len + 1));
-		string->size	= len + 1;
-    }
-
-    /* Note we copy one more byte than the strlen in order to get the trailing '\0'
-     */
-    ANTLR3_MEMMOVE((void *)(string->chars), chars, (ANTLR3_UINT32)(len+1));
-    string->len	    = len;
-
-    return  string->chars;
-
-}
-
-static    pANTLR3_UINT8   
-setUTF16_8	(pANTLR3_STRING string, const char * chars)
-{
-    ANTLR3_UINT32	len;
-    ANTLR3_UINT32	count;
-    pANTLR3_UINT16	apPoint;
-
-    len = (ANTLR3_UINT32)strlen(chars);
-    if	(string->size < len + 1)
-	{
-		string->chars	= (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(len + 1)));
-		string->size	= len + 1;
-    }
-    apPoint = ((pANTLR3_UINT16)string->chars);
-    string->len	= len;
-
-    for	(count = 0; count < string->len; count++)
-    {
-		*apPoint++   = *(chars + count);
-    }
-    *apPoint = '\0';
-
-    return  string->chars;
-}
-
-static    pANTLR3_UINT8   
-setUTF16_UTF16    (pANTLR3_STRING string, const char * chars)
-{
-    ANTLR3_UINT32   len;
-    pANTLR3_UINT16  in;
-
-    /** First, determine the length of the input string
-     */
-    in	    = (pANTLR3_UINT16)chars;
-    len   = 0;
-
-    while   (*in++ != '\0')
-    {
-		len++;
-    }
-
-    if	(string->size < len + 1)
-    {
-		string->chars	= (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(len + 1)));
-		string->size	= len + 1;
-    }
-
-    /* Note we copy one more byte than the strlen in order to get the trailing '\0'
-     */
-    ANTLR3_MEMMOVE((void *)(string->chars), chars, (ANTLR3_UINT32)((len+1) * sizeof(ANTLR3_UINT16)));
-    string->len	    = len;
-
-    return  string->chars;
-
-}
-
-static    pANTLR3_UINT8   
-addc8	(pANTLR3_STRING string, ANTLR3_UINT32 c)
-{
-    if	(string->size < string->len + 2)
-    {
-		string->chars	= (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(string->len + 2));
-		string->size	= string->len + 2;
-    }
-    *(string->chars + string->len)	= (ANTLR3_UINT8)c;
-    *(string->chars + string->len + 1)	= '\0';
-    string->len++;
-
-    return  string->chars;
-}
-
-static    pANTLR3_UINT8   
-addcUTF16	(pANTLR3_STRING string, ANTLR3_UINT32 c)
-{
-    pANTLR3_UINT16  ptr;
-
-    if	(string->size < string->len + 2)
-    {
-		string->chars	= (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16) * (string->len + 2)));
-		string->size	= string->len + 2;
-    }
-    ptr	= (pANTLR3_UINT16)(string->chars);
-
-    *(ptr + string->len)	= (ANTLR3_UINT16)c;
-    *(ptr + string->len + 1)	= '\0';
-    string->len++;
-
-    return  string->chars;
-}
-
-static    pANTLR3_UINT8   
-addi8	(pANTLR3_STRING string, ANTLR3_INT32 i)
-{
-    ANTLR3_UINT8	    newbit[32];
-
-    sprintf((char *)newbit, "%d", i);
-
-    return  string->append8(string, (const char *)newbit);
-}
-static    pANTLR3_UINT8   
-addiUTF16	(pANTLR3_STRING string, ANTLR3_INT32 i)
-{
-    ANTLR3_UINT8	    newbit[32];
-
-    sprintf((char *)newbit, "%d", i);
-
-    return  string->append8(string, (const char *)newbit);
-}
-
-static	  pANTLR3_UINT8
-inserti8    (pANTLR3_STRING string, ANTLR3_UINT32 point, ANTLR3_INT32 i)
-{
-    ANTLR3_UINT8	    newbit[32];
-
-    sprintf((char *)newbit, "%d", i);
-    return  string->insert8(string, point, (const char *)newbit);
-}
-static	  pANTLR3_UINT8
-insertiUTF16    (pANTLR3_STRING string, ANTLR3_UINT32 point, ANTLR3_INT32 i)
-{
-    ANTLR3_UINT8	    newbit[32];
-
-    sprintf((char *)newbit, "%d", i);
-    return  string->insert8(string, point, (const char *)newbit);
-}
-
-static	pANTLR3_UINT8
-insert8	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit)
-{
-    ANTLR3_UINT32	len;
-
-    if	(point >= string->len)
-    {
-		return	string->append(string, newbit);
-    }
- 
-    len	= (ANTLR3_UINT32)strlen(newbit);
-
-    if	(len == 0)
-    {
-		return	string->chars;
-    }
-
-    if	(string->size < (string->len + len + 1))
-    {
-		string->chars	= (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(string->len + len + 1));
-		string->size	= string->len + len + 1;
-    }
-
-    /* Move the characters we are inserting before, including the delimiter
-     */
-    ANTLR3_MEMMOVE((void *)(string->chars + point + len), (void *)(string->chars + point), (ANTLR3_UINT32)(string->len - point + 1));
-
-    /* Note we copy the exact number of bytes
-     */
-    ANTLR3_MEMMOVE((void *)(string->chars + point), newbit, (ANTLR3_UINT32)(len));
-    
-    string->len += len;
-
-    return  string->chars;
-}
-
-static	pANTLR3_UINT8
-insertUTF16_8	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit)
-{
-    ANTLR3_UINT32	len;
-    ANTLR3_UINT32	count;
-    pANTLR3_UINT16	inPoint;
-
-    if	(point >= string->len)
-    {
-		return	string->append8(string, newbit);
-    }
- 
-    len	= (ANTLR3_UINT32)strlen(newbit);
-
-    if	(len == 0)
-    {
-		return	string->chars;
-    }
-
-    if	(string->size < (string->len + len + 1))
-    {
-	string->chars	= (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(string->len + len + 1)));
-	string->size	= string->len + len + 1;
-    }
-
-    /* Move the characters we are inserting before, including the delimiter
-     */
-    ANTLR3_MEMMOVE((void *)(((pANTLR3_UINT16)string->chars) + point + len), (void *)(((pANTLR3_UINT16)string->chars) + point), (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(string->len - point + 1)));
-
-    string->len += len;
-    
-    inPoint = ((pANTLR3_UINT16)(string->chars))+point;
-    for	(count = 0; count<len; count++)
-    {
-		*(inPoint + count) = (ANTLR3_UINT16)(*(newbit+count));
-    }
-
-    return  string->chars;
-}
-
-static	pANTLR3_UINT8
-insertUTF16_UTF16	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit)
-{
-    ANTLR3_UINT32	len;
-    pANTLR3_UINT16	in;
-
-    if	(point >= string->len)
-    {
-		return	string->append(string, newbit);
-    }
- 
-    /** First, determine the length of the input string
-     */
-    in	    = (pANTLR3_UINT16)newbit;
-    len	    = 0;
-
-    while   (*in++ != '\0')
-    {
-		len++;
-    }
-
-    if	(len == 0)
-    {
-		return	string->chars;
-    }
-
-    if	(string->size < (string->len + len + 1))
-    {
-		string->chars	= (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(string->len + len + 1)));
-		string->size	= string->len + len + 1;
-    }
-
-    /* Move the characters we are inserting before, including the delimiter
-     */
-    ANTLR3_MEMMOVE((void *)(((pANTLR3_UINT16)string->chars) + point + len), (void *)(((pANTLR3_UINT16)string->chars) + point), (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(string->len - point + 1)));
-
-
-    /* Note we copy the exact number of characters
-     */
-    ANTLR3_MEMMOVE((void *)(((pANTLR3_UINT16)string->chars) + point), newbit, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(len)));
-    
-    string->len += len;
-
-    return  string->chars;
-}
-
-static    pANTLR3_UINT8	    setS	(pANTLR3_STRING string, pANTLR3_STRING chars)
-{
-    return  string->set(string, (const char *)(chars->chars));
-}
-
-static    pANTLR3_UINT8	    appendS	(pANTLR3_STRING string, pANTLR3_STRING newbit)
-{
-    /* We may be passed an empty string, in which case we just return the current pointer
-     */
-    if	(newbit == NULL || newbit->len == 0 || newbit->size == 0 || newbit->chars == NULL)
-    {
-		return	string->chars;
-    }
-    else
-    {
-		return  string->append(string, (const char *)(newbit->chars));
-    }
-}
-
-static	  pANTLR3_UINT8	    insertS	(pANTLR3_STRING string, ANTLR3_UINT32 point, pANTLR3_STRING newbit)
-{
-    return  string->insert(string, point, (const char *)(newbit->chars));
-}
-
-/* Function that compares the text of a string to the supplied
- * 8 bit character string and returns a result a la strcmp()
- */
-static ANTLR3_UINT32   
-compare8	(pANTLR3_STRING string, const char * compStr)
-{
-    return  strcmp((const char *)(string->chars), compStr);
-}
-
-/* Function that compares the text of a string with the supplied character string
- * (which is assumed to be in the same encoding as the string itself) and returns a result
- * a la strcmp()
- */
-static ANTLR3_UINT32   
-compareUTF16_8	(pANTLR3_STRING string, const char * compStr)
-{
-    pANTLR3_UINT16  ourString;
-    ANTLR3_UINT32   charDiff;
-
-    ourString	= (pANTLR3_UINT16)(string->chars);
-
-    while   (((ANTLR3_UCHAR)(*ourString) != '\0') && ((ANTLR3_UCHAR)(*compStr) != '\0'))
-    {
-		charDiff = *ourString - *compStr;
-		if  (charDiff != 0)
-		{
-			return charDiff;
-		}
-		ourString++;
-		compStr++;
-    }
-
-    /* At this point, one of the strings was terminated
-     */
-    return (ANTLR3_UINT32)((ANTLR3_UCHAR)(*ourString) - (ANTLR3_UCHAR)(*compStr));
-
-}
-
-/* Function that compares the text of a string with the supplied character string
- * (which is assumed to be in the same encoding as the string itself) and returns a result
- * a la strcmp()
- */
-static ANTLR3_UINT32   
-compareUTF16_UTF16	(pANTLR3_STRING string, const char * compStr8)
-{
-    pANTLR3_UINT16  ourString;
-    pANTLR3_UINT16  compStr;
-    ANTLR3_UINT32   charDiff;
-
-    ourString	= (pANTLR3_UINT16)(string->chars);
-    compStr	= (pANTLR3_UINT16)(compStr8);
-
-    while   (((ANTLR3_UCHAR)(*ourString) != '\0') && ((ANTLR3_UCHAR)(*((pANTLR3_UINT16)compStr)) != '\0'))
-    {
-		charDiff = *ourString - *compStr;
-		if  (charDiff != 0)
-		{
-			return charDiff;
-		}
-		ourString++;
-		compStr++;
-    }
-
-    /* At this point, one of the strings was terminated
-     */
-    return (ANTLR3_UINT32)((ANTLR3_UCHAR)(*ourString) - (ANTLR3_UCHAR)(*compStr));
-}
-
-/* Function that compares the text of a string with the supplied string
- * (which is assumed to be in the same encoding as the string itself) and returns a result
- * a la strcmp()
- */
-static ANTLR3_UINT32   
-compareS    (pANTLR3_STRING string, pANTLR3_STRING compStr)
-{
-    return  string->compare(string, (const char *)compStr->chars);
-}
-
-
-/* Function that returns the character indexed at the supplied
- * offset as a 32 bit character.
- */
-static ANTLR3_UCHAR    
-charAt8	    (pANTLR3_STRING string, ANTLR3_UINT32 offset)
-{
-    if	(offset > string->len)
-    {
-		return (ANTLR3_UCHAR)'\0';
-    }
-    else
-    {
-		return  (ANTLR3_UCHAR)(*(string->chars + offset));
-    }
-}
-
-/* Function that returns the character indexed at the supplied
- * offset as a 32 bit character.
- */
-static ANTLR3_UCHAR    
-charAtUTF16    (pANTLR3_STRING string, ANTLR3_UINT32 offset)
-{
-    if	(offset > string->len)
-    {
-		return (ANTLR3_UCHAR)'\0';
-    }
-    else
-    {
-		return  (ANTLR3_UCHAR)(*((pANTLR3_UINT16)(string->chars) + offset));
-    }
-}
-
-/* Function that returns a substring of the supplied string a la .subString(s,e)
- * in java runtimes.
- */
-static pANTLR3_STRING
-subString8   (pANTLR3_STRING string, ANTLR3_UINT32 startIndex, ANTLR3_UINT32 endIndex)
-{
-    pANTLR3_STRING newStr;
-
-    if	(endIndex > string->len)
-    {
-		endIndex = string->len + 1;
-    }
-    newStr  = string->factory->newPtr(string->factory, string->chars + startIndex, endIndex - startIndex);
-
-    return newStr;
-}
-
-/* Returns a substring of the supplied string a la .subString(s,e)
- * in java runtimes.
- */
-static pANTLR3_STRING
-subStringUTF16  (pANTLR3_STRING string, ANTLR3_UINT32 startIndex, ANTLR3_UINT32 endIndex)
-{
-    pANTLR3_STRING newStr;
-
-    if	(endIndex > string->len)
-    {
-		endIndex = string->len + 1;
-    }
-    newStr  = string->factory->newPtr(string->factory, (pANTLR3_UINT8)((pANTLR3_UINT16)(string->chars) + startIndex), endIndex - startIndex);
-
-    return newStr;
-}
-
-/* Function that can convert the characters in the string to an integer
- */
-static ANTLR3_INT32
-toInt32_8	    (struct ANTLR3_STRING_struct * string)
-{
-    return  atoi((const char *)(string->chars));
-}
-
-/* Function that can convert the characters in the string to an integer
- */
-static ANTLR3_INT32
-toInt32_UTF16       (struct ANTLR3_STRING_struct * string)
-{
-    pANTLR3_UINT16  input;
-    ANTLR3_INT32   value;
-    ANTLR3_BOOLEAN  negate;
-
-    value   = 0;
-    input   = (pANTLR3_UINT16)(string->chars);
-    negate  = ANTLR3_FALSE;
-
-    if	(*input == (ANTLR3_UCHAR)'-')
-    {
-		negate = ANTLR3_TRUE;
-		input++;
-    }
-    else if (*input == (ANTLR3_UCHAR)'+')
-    {
-		input++;
-    }
-
-    while   (*input != '\0' && isdigit(*input))
-    {
-		value	 = value * 10;
-		value	+= ((ANTLR3_UINT32)(*input) - (ANTLR3_UINT32)'0');
-		input++;
-    }
-
-    return negate ? -value : value;
-}
-
-/* Function that returns a pointer to an 8 bit version of the string,
- * which in this case is just the string as this is 
- * 8 bit encodiing anyway.
- */
-static	  pANTLR3_STRING	    to8_8	(pANTLR3_STRING string)
-{
-    return  string;
-}
-
-/* Function that returns an 8 bit version of the string,
- * which in this case is returning all the UTF16 characters
- * narrowed back into 8 bits, with characters that are too large
- * replaced with '_'
- */
-static	  pANTLR3_STRING    to8_UTF16	(pANTLR3_STRING string)
-{
-	pANTLR3_STRING  newStr;
-	ANTLR3_UINT32   i;
-
-	/* Create a new 8 bit string
-	*/
-	newStr  = newRaw8(string->factory);
-
-	if	(newStr == NULL)
-	{
-		return	NULL;
-	}
-
-	/* Always add one more byte for a terminator
-	*/
-	newStr->chars   = (pANTLR3_UINT8) ANTLR3_MALLOC((size_t)(string->len + 1));
-	newStr->size    = string->len + 1;
-	newStr->len	    = string->len;
-
-	/* Now copy each UTF16 charActer , making it an 8 bit character of 
-	* some sort.
-	*/
-	for	(i=0; i<string->len; i++)
-	{
-		ANTLR3_UCHAR	c;
-
-		c = *(((pANTLR3_UINT16)(string->chars)) + i);
-
-		*(newStr->chars + i) = (ANTLR3_UINT8)(c > 255 ? '_' : c);
-	}
-
-	/* Terminate
-	*/
-	*(newStr->chars + newStr->len) = '\0';
-
-	return newStr;
-}
diff --git a/antlr-3.4/runtime/C/src/antlr3treeparser.c b/antlr-3.4/runtime/C/src/antlr3treeparser.c
deleted file mode 100644
index b7e035a..0000000
--- a/antlr-3.4/runtime/C/src/antlr3treeparser.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/** \file
- *  Implementation of the tree parser and overrides for the base recognizer
- */
-
-// [The "BSD licence"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include    <antlr3treeparser.h>
-
-/* BASE Recognizer overrides
- */
-static void				mismatch	    (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow);
-
-/* Tree parser API
- */
-static void			setTreeNodeStream	    (pANTLR3_TREE_PARSER parser, pANTLR3_COMMON_TREE_NODE_STREAM input);
-static pANTLR3_COMMON_TREE_NODE_STREAM	
-					getTreeNodeStream	    (pANTLR3_TREE_PARSER parser);
-static void			freeParser				(pANTLR3_TREE_PARSER parser);    
-static void *		getCurrentInputSymbol	(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream);
-static void *		getMissingSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
-												ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow);
-
-
-ANTLR3_API pANTLR3_TREE_PARSER
-antlr3TreeParserNewStream(ANTLR3_UINT32 sizeHint, pANTLR3_COMMON_TREE_NODE_STREAM ctnstream, pANTLR3_RECOGNIZER_SHARED_STATE state)
-{
-	pANTLR3_TREE_PARSER	    parser;
-
-	/** Allocate tree parser memory
-	*/
-	parser  =(pANTLR3_TREE_PARSER) ANTLR3_MALLOC(sizeof(ANTLR3_TREE_PARSER));
-
-	if	(parser == NULL)
-	{
-		return	NULL;
-	}
-
-	/* Create and install a base recognizer which does most of the work for us
-	*/
-	parser->rec =  antlr3BaseRecognizerNew(ANTLR3_TYPE_PARSER, sizeHint, state);
-
-	if	(parser->rec == NULL)
-	{
-		parser->free(parser);
-		return	NULL;
-	}
-
-	/* Ensure we can track back to the tree parser super structure
-	* from the base recognizer structure
-	*/
-	parser->rec->super	= parser;
-	parser->rec->type	= ANTLR3_TYPE_TREE_PARSER;
-
-	/* Install our base recognizer overrides
-	*/
-	parser->rec->mismatch				= mismatch;
-	parser->rec->exConstruct			= antlr3MTNExceptionNew;
-	parser->rec->getCurrentInputSymbol	= getCurrentInputSymbol;
-	parser->rec->getMissingSymbol		= getMissingSymbol;
-
-	/* Install tree parser API
-	*/
-	parser->getTreeNodeStream	=  getTreeNodeStream;
-	parser->setTreeNodeStream	=  setTreeNodeStream;
-	parser->free		=  freeParser;
-
-	/* Install the tree node stream
-	*/
-	parser->setTreeNodeStream(parser, ctnstream);
-
-	return  parser;
-}
-
-/**
- * \brief
- * Creates a new Mismatched Tree Nde Exception and inserts in the recognizer
- * exception stack.
- * 
- * \param recognizer
- * Context pointer for this recognizer
- * 
- */
-ANTLR3_API	void
-antlr3MTNExceptionNew(pANTLR3_BASE_RECOGNIZER recognizer)
-{
-    /* Create a basic recognition exception structure
-     */
-    antlr3RecognitionExceptionNew(recognizer);
-
-    /* Now update it to indicate this is a Mismatched token exception
-     */
-    recognizer->state->exception->name		= ANTLR3_MISMATCHED_TREE_NODE_NAME;
-    recognizer->state->exception->type		= ANTLR3_MISMATCHED_TREE_NODE_EXCEPTION;
-
-    return;
-}
-
-
-static void
-freeParser	(pANTLR3_TREE_PARSER parser)
-{
-	if	(parser->rec != NULL)
-	{
-		// This may have ben a delegate or delegator parser, in which case the
-		// state may already have been freed (and set to NULL therefore)
-		// so we ignore the state if we don't have it.
-		//
-		if	(parser->rec->state != NULL)
-		{
-			if	(parser->rec->state->following != NULL)
-			{
-				parser->rec->state->following->free(parser->rec->state->following);
-				parser->rec->state->following = NULL;
-			}
-		}
-	    parser->rec->free(parser->rec);
-	    parser->rec	= NULL;
-    }
-
-    ANTLR3_FREE(parser);
-}
-
-/** Set the input stream and reset the parser
- */
-static void
-setTreeNodeStream	(pANTLR3_TREE_PARSER parser, pANTLR3_COMMON_TREE_NODE_STREAM input)
-{
-    parser->ctnstream = input;
-    parser->rec->reset		(parser->rec);
-    parser->ctnstream->reset	(parser->ctnstream);
-}
-
-/** Return a pointer to the input stream
- */
-static pANTLR3_COMMON_TREE_NODE_STREAM
-getTreeNodeStream	(pANTLR3_TREE_PARSER parser)
-{
-    return  parser->ctnstream;
-}
-
-
-/** Override for standard base recognizer mismatch function
- *  as we have DOWN/UP nodes in the stream that have no line info,
- *  plus we want to alter the exception type.
- */
-static void
-mismatch	    (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow)
-{
-    recognizer->exConstruct(recognizer);
-    recognizer->recoverFromMismatchedToken(recognizer, ttype, follow);
-}
-
-#ifdef ANTLR3_WINDOWS
-#pragma warning	(push)
-#pragma warning (disable : 4100)
-#endif
-
-// Default implementation is for parser and assumes a token stream as supplied by the runtime.
-// You MAY need override this function if the standard TOKEN_STREAM is not what you are using.
-//
-static void *				
-getCurrentInputSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream)
-{
-	pANTLR3_TREE_NODE_STREAM		tns;
-    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
-
-    tns	    = (pANTLR3_TREE_NODE_STREAM)(istream->super);
-    ctns    = tns->ctns;
-	return tns->_LT(tns, 1);
-}
-
-
-// Default implementation is for parser and assumes a token stream as supplied by the runtime.
-// You MAY need override this function if the standard BASE_TREE is not what you are using.
-//
-static void *				
-getMissingSymbol			(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
-									ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow)
-{
-	pANTLR3_TREE_NODE_STREAM		tns;
-    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
-	pANTLR3_BASE_TREE				node;
-	pANTLR3_BASE_TREE				current;
-	pANTLR3_COMMON_TOKEN			token;
-	pANTLR3_STRING					text;
-    ANTLR3_INT32                   i;
-
-	// Dereference the standard pointers
-	//
-    tns	    = (pANTLR3_TREE_NODE_STREAM)(istream->super);
-    ctns    = tns->ctns;
-    
-	// Create a new empty node, by stealing the current one, or the previous one if the current one is EOF
-	//
-	current	= tns->_LT(tns, 1);
-    i       = -1;
-
-	if	(current == &ctns->EOF_NODE.baseTree)
-	{
-		current = tns->_LT(tns, -1);
-        i--;
-	}
-    while (((pANTLR3_COMMON_TREE)(current->super))->factory == NULL)
-	{
-		current = tns->_LT(tns, i--);
-    }
-
-	node	= current->dupNode(current);
-
-	// Find the newly dupicated token
-	//
-	token	= node->getToken(node);
-
-	// Create the token text that shows it has been inserted
-	//
-	token->setText8			(token, (pANTLR3_UINT8)"<missing ");
-	text = token->getText	(token);
-	text->append8			(text, (const char *)recognizer->state->tokenNames[expectedTokenType]);
-	text->append8			(text, (const char *)">");
-	
-	// Finally return the pointer to our new node
-	//
-	return	node;
-}
-#ifdef ANTLR3_WINDOWS
-#pragma warning	(pop)
-#endif
-
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Antlr3.Runtime.Debug.csproj b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Antlr3.Runtime.Debug.csproj
deleted file mode 100644
index 5ca19d8..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Antlr3.Runtime.Debug.csproj
+++ /dev/null
@@ -1,84 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup>
-    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
-    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
-    <ProductVersion>9.0.30729</ProductVersion>
-    <SchemaVersion>2.0</SchemaVersion>
-    <ProjectGuid>{5EE27A90-B023-42C9-AAF1-52B0424C5D0B}</ProjectGuid>
-    <OutputType>Library</OutputType>
-    <AppDesignerFolder>Properties</AppDesignerFolder>
-    <RootNamespace>Antlr.Runtime.Debug</RootNamespace>
-    <AssemblyName>Antlr3.Runtime.Debug</AssemblyName>
-    <TargetFrameworkVersion>v2.0</TargetFrameworkVersion>
-    <FileAlignment>512</FileAlignment>
-    <SccProjectName>Perforce Project</SccProjectName>
-    <SccLocalPath>..\..\..\..\..\..</SccLocalPath>
-    <SccAuxPath>
-    </SccAuxPath>
-    <SccProvider>MSSCCI:Perforce SCM</SccProvider>
-    <SignAssembly>true</SignAssembly>
-    <AssemblyOriginatorKeyFile>..\..\..\..\..\..\..\keys\antlr\Key.snk</AssemblyOriginatorKeyFile>
-    <TargetFrameworkProfile>
-    </TargetFrameworkProfile>
-  </PropertyGroup>
-  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
-    <DebugSymbols>true</DebugSymbols>
-    <DebugType>full</DebugType>
-    <Optimize>false</Optimize>
-    <OutputPath>bin\Debug\</OutputPath>
-    <DefineConstants>DEBUG;TRACE</DefineConstants>
-    <ErrorReport>prompt</ErrorReport>
-    <WarningLevel>4</WarningLevel>
-  </PropertyGroup>
-  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
-    <DebugType>pdbonly</DebugType>
-    <Optimize>true</Optimize>
-    <OutputPath>bin\Release\</OutputPath>
-    <DefineConstants>TRACE</DefineConstants>
-    <ErrorReport>prompt</ErrorReport>
-    <WarningLevel>4</WarningLevel>
-  </PropertyGroup>
-  <ItemGroup>
-    <Reference Include="System" />
-  </ItemGroup>
-  <ItemGroup>
-    <Compile Include="BlankDebugEventListener.cs" />
-    <Compile Include="DebugEventHub.cs" />
-    <Compile Include="DebugEventListenerConstants.cs" />
-    <Compile Include="DebugEventRepeater.cs" />
-    <Compile Include="DebugEventSocketProxy.cs" />
-    <Compile Include="DebugParser.cs" />
-    <Compile Include="DebugTokenStream.cs" />
-    <Compile Include="DebugTreeAdaptor.cs" />
-    <Compile Include="DebugTreeNodeStream.cs" />
-    <Compile Include="DebugTreeParser.cs" />
-    <Compile Include="JavaExtensions\ExceptionExtensions.cs" />
-    <Compile Include="Misc\DoubleKeyMap`3.cs" />
-    <Compile Include="Misc\Stats.cs" />
-    <None Include="..\..\..\..\..\..\..\keys\antlr\Key.snk">
-      <Link>Key.snk</Link>
-    </None>
-    <None Include="ParserDebugger.cs" />
-    <Compile Include="ParseTreeBuilder.cs" />
-    <Compile Include="Profiler.cs" />
-    <Compile Include="Properties\AssemblyInfo.cs" />
-    <Compile Include="RemoteDebugEventSocketListener.cs" />
-    <Compile Include="TraceDebugEventListener.cs" />
-    <Compile Include="Tracer.cs" />
-  </ItemGroup>
-  <ItemGroup>
-    <ProjectReference Include="..\Antlr3.Runtime\Antlr3.Runtime.csproj">
-      <Project>{8FDC0A87-9005-4D5A-AB75-E55CEB575559}</Project>
-      <Name>Antlr3.Runtime</Name>
-    </ProjectReference>
-  </ItemGroup>
-  <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
-       Other similar extension points exist, see Microsoft.Common.targets.
-  <Target Name="BeforeBuild">
-  </Target>
-  <Target Name="AfterBuild">
-  </Target>
-  -->
-</Project>
\ No newline at end of file
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/ParseTreeBuilder.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/ParseTreeBuilder.cs
deleted file mode 100644
index deeaf3b..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/ParseTreeBuilder.cs
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime.Debug
-{
-    using System.Collections.Generic;
-    using ParseTree = Antlr.Runtime.Tree.ParseTree;
-
-    /** <summary>
-     *  This parser listener tracks rule entry/exit and token matches
-     *  to build a simple parse tree using ParseTree nodes.
-     *  </summary>
-     */
-    public class ParseTreeBuilder : BlankDebugEventListener
-    {
-        public const string EPSILON_PAYLOAD = "<epsilon>";
-
-        Stack<ParseTree> callStack = new Stack<ParseTree>();
-        List<IToken> hiddenTokens = new List<IToken>();
-        int backtracking = 0;
-
-        public ParseTreeBuilder( string grammarName )
-        {
-            ParseTree root = Create( "<grammar " + grammarName + ">" );
-            callStack.Push( root );
-        }
-
-        public virtual ParseTree Tree
-        {
-            get
-            {
-                return callStack.Peek();
-            }
-        }
-
-        /** <summary>
-         *  What kind of node to create.  You might want to override
-         *  so I factored out creation here.
-         *  </summary>
-         */
-        public virtual ParseTree Create( object payload )
-        {
-            return new ParseTree( payload );
-        }
-
-        public virtual ParseTree EpsilonNode()
-        {
-            return Create( EPSILON_PAYLOAD );
-        }
-
-        /** <summary>Backtracking or cyclic DFA, don't want to add nodes to tree</summary> */
-        public override void EnterDecision( int d, bool couldBacktrack )
-        {
-            backtracking++;
-        }
-        public override void ExitDecision( int i )
-        {
-            backtracking--;
-        }
-
-        public override void EnterRule( string filename, string ruleName )
-        {
-            if ( backtracking > 0 )
-                return;
-            ParseTree parentRuleNode = callStack.Peek();
-            ParseTree ruleNode = Create( ruleName );
-            parentRuleNode.AddChild( ruleNode );
-            callStack.Push( ruleNode );
-        }
-
-        public override void ExitRule( string filename, string ruleName )
-        {
-            if ( backtracking > 0 )
-                return;
-            ParseTree ruleNode = callStack.Peek();
-            if ( ruleNode.ChildCount == 0 )
-            {
-                ruleNode.AddChild( EpsilonNode() );
-            }
-            callStack.Pop();
-        }
-
-        public override void ConsumeToken( IToken token )
-        {
-            if ( backtracking > 0 )
-                return;
-            ParseTree ruleNode = callStack.Peek();
-            ParseTree elementNode = Create( token );
-            elementNode.hiddenTokens = this.hiddenTokens;
-            this.hiddenTokens = new List<IToken>();
-            ruleNode.AddChild( elementNode );
-        }
-
-        public override void ConsumeHiddenToken( IToken token )
-        {
-            if ( backtracking > 0 )
-                return;
-            hiddenTokens.Add( token );
-        }
-
-        public override void RecognitionException( RecognitionException e )
-        {
-            if ( backtracking > 0 )
-                return;
-            ParseTree ruleNode = callStack.Peek();
-            ParseTree errorNode = Create( e );
-            ruleNode.AddChild( errorNode );
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Properties/AssemblyInfo.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Properties/AssemblyInfo.cs
deleted file mode 100644
index b8c988f..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Properties/AssemblyInfo.cs
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-using System;
-using System.Reflection;
-using System.Runtime.InteropServices;
-
-// General Information about an assembly is controlled through the following 
-// set of attributes. Change these attribute values to modify the information
-// associated with an assembly.
-[assembly: AssemblyTitle( "Antlr3.Runtime.Debug" )]
-[assembly: AssemblyDescription( "" )]
-[assembly: AssemblyConfiguration( "" )]
-[assembly: AssemblyCompany( "Pixel Mine, Inc." )]
-[assembly: AssemblyProduct( "Antlr3.Runtime.Debug" )]
-[assembly: AssemblyCopyright( "Copyright © Pixel Mine 2010" )]
-[assembly: AssemblyTrademark( "" )]
-[assembly: AssemblyCulture( "" )]
-[assembly: CLSCompliant( true )]
-
-// Setting ComVisible to false makes the types in this assembly not visible 
-// to COM components.  If you need to access a type in this assembly from 
-// COM, set the ComVisible attribute to true on that type.
-[assembly: ComVisible( false )]
-
-// The following GUID is for the ID of the typelib if this project is exposed to COM
-[assembly: Guid( "9f8fa018-6766-404c-9e72-551407e1b173" )]
-
-/* Version information for an assembly consists of four values in the following order:
- *
- *   Major.Minor.Build.Revision
- *
- * These values are updated according to the following:
- *   1. Major.Minor follows the ANTLR release schedule
- *   2. Build is incremented each time the C# port is packaged for release (regardless
- *      of whether it's an incremental or nightly). The value resets to zero whenever
- *      the Major or Minor version is incremented.
- *   3. Revision is the Perforce changelist number associated with the release.
- */
-[assembly: AssemblyVersion("3.3.4.8517")]
-[assembly: AssemblyFileVersion("3.3.4.8517")]
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Antlr3.Runtime.JavaExtensions.csproj b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Antlr3.Runtime.JavaExtensions.csproj
deleted file mode 100644
index 959a9f9..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Antlr3.Runtime.JavaExtensions.csproj
+++ /dev/null
@@ -1,95 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup>
-    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
-    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
-    <ProductVersion>9.0.30729</ProductVersion>
-    <SchemaVersion>2.0</SchemaVersion>
-    <ProjectGuid>{A7EEC557-EB14-451C-9616-B7A61F4ECE69}</ProjectGuid>
-    <OutputType>Library</OutputType>
-    <AppDesignerFolder>Properties</AppDesignerFolder>
-    <RootNamespace>Antlr3.Runtime.JavaExtensions</RootNamespace>
-    <AssemblyName>Antlr3.Runtime.JavaExtensions</AssemblyName>
-    <TargetFrameworkVersion>v3.5</TargetFrameworkVersion>
-    <FileAlignment>512</FileAlignment>
-    <SccProjectName>SAK</SccProjectName>
-    <SccLocalPath>SAK</SccLocalPath>
-    <SccAuxPath>SAK</SccAuxPath>
-    <SccProvider>SAK</SccProvider>
-    <TargetFrameworkProfile>Client</TargetFrameworkProfile>
-  </PropertyGroup>
-  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
-    <DebugSymbols>true</DebugSymbols>
-    <DebugType>full</DebugType>
-    <Optimize>false</Optimize>
-    <OutputPath>bin\Debug\</OutputPath>
-    <DefineConstants>DEBUG;TRACE</DefineConstants>
-    <ErrorReport>prompt</ErrorReport>
-    <WarningLevel>4</WarningLevel>
-  </PropertyGroup>
-  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
-    <DebugType>pdbonly</DebugType>
-    <Optimize>true</Optimize>
-    <OutputPath>bin\Release\</OutputPath>
-    <DefineConstants>TRACE</DefineConstants>
-    <ErrorReport>prompt</ErrorReport>
-    <WarningLevel>4</WarningLevel>
-  </PropertyGroup>
-  <PropertyGroup>
-    <SignAssembly>true</SignAssembly>
-  </PropertyGroup>
-  <PropertyGroup>
-    <AssemblyOriginatorKeyFile>..\..\..\..\..\..\..\keys\antlr\Key.snk</AssemblyOriginatorKeyFile>
-  </PropertyGroup>
-  <ItemGroup>
-    <Reference Include="System" />
-    <Reference Include="System.Core">
-      <RequiredTargetFramework>3.5</RequiredTargetFramework>
-    </Reference>
-    <Reference Include="System.Xml.Linq">
-      <RequiredTargetFramework>3.5</RequiredTargetFramework>
-    </Reference>
-    <Reference Include="System.Data.DataSetExtensions">
-      <RequiredTargetFramework>3.5</RequiredTargetFramework>
-    </Reference>
-    <Reference Include="System.Data" />
-    <Reference Include="System.Xml" />
-  </ItemGroup>
-  <ItemGroup>
-    <Compile Include="DictionaryExtensions.cs" />
-    <Compile Include="ExceptionExtensions.cs" />
-    <Compile Include="IOExtensions.cs" />
-    <Compile Include="LexerExtensions.cs" />
-    <Compile Include="JSystem.cs" />
-    <Compile Include="ListExtensions.cs" />
-    <Compile Include="ObjectExtensions.cs" />
-    <Compile Include="Properties\AssemblyInfo.cs" />
-    <Compile Include="SetExtensions.cs" />
-    <Compile Include="StackExtensions.cs" />
-    <Compile Include="StringBuilderExtensions.cs" />
-    <Compile Include="StringExtensions.cs" />
-    <Compile Include="StringTokenizer.cs" />
-    <Compile Include="SubList.cs" />
-    <Compile Include="TreeExtensions.cs" />
-    <Compile Include="TypeExtensions.cs" />
-  </ItemGroup>
-  <ItemGroup>
-    <ProjectReference Include="..\Antlr3.Runtime\Antlr3.Runtime.csproj">
-      <Project>{8FDC0A87-9005-4D5A-AB75-E55CEB575559}</Project>
-      <Name>Antlr3.Runtime</Name>
-    </ProjectReference>
-  </ItemGroup>
-  <ItemGroup>
-    <None Include="..\..\..\..\..\..\..\keys\antlr\Key.snk">
-      <Link>Key.snk</Link>
-    </None>
-  </ItemGroup>
-  <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
-       Other similar extension points exist, see Microsoft.Common.targets.
-  <Target Name="BeforeBuild">
-  </Target>
-  <Target Name="AfterBuild">
-  </Target>
-  -->
-</Project>
\ No newline at end of file
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Properties/AssemblyInfo.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Properties/AssemblyInfo.cs
deleted file mode 100644
index 8aa2671..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Properties/AssemblyInfo.cs
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2010 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-using System;
-using System.Reflection;
-using System.Runtime.InteropServices;
-
-// General Information about an assembly is controlled through the following 
-// set of attributes. Change these attribute values to modify the information
-// associated with an assembly.
-[assembly: AssemblyTitle( "Antlr3.Runtime.JavaExtensions" )]
-[assembly: AssemblyDescription( "" )]
-[assembly: AssemblyConfiguration( "" )]
-[assembly: AssemblyCompany( "Pixel Mine, Inc." )]
-[assembly: AssemblyProduct( "Antlr3.Runtime.JavaExtensions" )]
-[assembly: AssemblyCopyright( "Copyright © Pixel Mine 2010" )]
-[assembly: AssemblyTrademark( "" )]
-[assembly: AssemblyCulture( "" )]
-[assembly: CLSCompliant( true )]
-
-// Setting ComVisible to false makes the types in this assembly not visible 
-// to COM components.  If you need to access a type in this assembly from 
-// COM, set the ComVisible attribute to true on that type.
-[assembly: ComVisible( false )]
-
-// The following GUID is for the ID of the typelib if this project is exposed to COM
-[assembly: Guid( "ad48c7f7-0b1d-4b1e-9602-83425cb5699f" )]
-
-/* Version information for an assembly consists of four values in the following order:
- *
- *   Major.Minor.Build.Revision
- *
- * These values are updated according to the following:
- *   1. Major.Minor follows the ANTLR release schedule
- *   2. Build is incremented each time the C# port is packaged for release (regardless
- *      of whether it's an incremental or nightly). The value resets to zero whenever
- *      the Major or Minor version is incremented.
- *   3. Revision is the Perforce changelist number associated with the release.
- */
-[assembly: AssemblyVersion("3.3.4.8517")]
-[assembly: AssemblyFileVersion("3.3.4.8517")]
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringExtensions.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringExtensions.cs
deleted file mode 100644
index 8432512..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringExtensions.cs
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime.JavaExtensions
-{
-    using ObsoleteAttribute = System.ObsoleteAttribute;
-    using Regex = System.Text.RegularExpressions.Regex;
-    using StringBuilder = System.Text.StringBuilder;
-
-    public static class StringExtensions
-    {
-#if DEBUG
-        [Obsolete]
-        public static char charAt( this string str, int index )
-        {
-            return str[index];
-        }
-
-        [Obsolete]
-        public static bool endsWith( this string str, string value )
-        {
-            return str.EndsWith( value );
-        }
-
-        [Obsolete]
-        public static int indexOf( this string str, char value )
-        {
-            return str.IndexOf( value );
-        }
-
-        [Obsolete]
-        public static int indexOf( this string str, char value, int startIndex )
-        {
-            return str.IndexOf( value, startIndex );
-        }
-
-        [Obsolete]
-        public static int indexOf( this string str, string value )
-        {
-            return str.IndexOf( value );
-        }
-
-        [Obsolete]
-        public static int indexOf( this string str, string value, int startIndex )
-        {
-            return str.IndexOf( value, startIndex );
-        }
-
-        [Obsolete]
-        public static int lastIndexOf( this string str, char value )
-        {
-            return str.LastIndexOf( value );
-        }
-
-        [Obsolete]
-        public static int lastIndexOf( this string str, string value )
-        {
-            return str.LastIndexOf( value );
-        }
-
-        [Obsolete]
-        public static int length( this string str )
-        {
-            return str.Length;
-        }
-
-        [Obsolete]
-        public static string replace(this string str, char oldValue, char newValue)
-        {
-            return str.Replace(oldValue, newValue);
-        }
-#endif
-
-        public static string replaceAll( this string str, string regex, string newValue )
-        {
-            return Regex.Replace( str, regex, newValue );
-        }
-
-        public static string replaceFirst( this string str, string regex, string replacement )
-        {
-            return Regex.Replace( str, regex, replacement );
-        }
-
-#if DEBUG
-        [Obsolete]
-        public static bool startsWith( this string str, string value )
-        {
-            return str.StartsWith( value );
-        }
-
-        [Obsolete]
-        public static string substring( this string str, int startOffset )
-        {
-            return str.Substring( startOffset );
-        }
-
-        [Obsolete]
-        public static string substring(this string str, int startOffset, int endOffset)
-        {
-            return str.Substring( startOffset, endOffset - startOffset );
-        }
-
-        [Obsolete]
-        public static char[] toCharArray( this string str )
-        {
-            return str.ToCharArray();
-        }
-
-        [Obsolete]
-        public static string toUpperCase( this string str )
-        {
-            return str.ToUpperInvariant();
-        }
-
-        [Obsolete]
-        public static string trim( this string str )
-        {
-            return str.Trim();
-        }
-#endif
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Antlr3.Runtime.Test.csproj b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Antlr3.Runtime.Test.csproj
deleted file mode 100644
index 11206dc..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Antlr3.Runtime.Test.csproj
+++ /dev/null
@@ -1,206 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup>
-    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
-    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
-    <ProductVersion>9.0.30729</ProductVersion>
-    <SchemaVersion>2.0</SchemaVersion>
-    <ProjectGuid>{19B965DE-5100-4064-A580-159644F6980E}</ProjectGuid>
-    <OutputType>Library</OutputType>
-    <AppDesignerFolder>Properties</AppDesignerFolder>
-    <RootNamespace>Antlr3.Runtime.Test</RootNamespace>
-    <AssemblyName>Antlr3.Runtime.Test</AssemblyName>
-    <TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
-    <FileAlignment>512</FileAlignment>
-    <ProjectTypeGuids>{3AC096D0-A1C2-E12C-1390-A8335801FDAB};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}</ProjectTypeGuids>
-    <SccProjectName>SAK</SccProjectName>
-    <SccLocalPath>SAK</SccLocalPath>
-    <SccAuxPath>SAK</SccAuxPath>
-    <SccProvider>SAK</SccProvider>
-  </PropertyGroup>
-  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
-    <DebugSymbols>true</DebugSymbols>
-    <DebugType>full</DebugType>
-    <Optimize>false</Optimize>
-    <OutputPath>bin\Debug\</OutputPath>
-    <DefineConstants>DEBUG;TRACE</DefineConstants>
-    <ErrorReport>prompt</ErrorReport>
-    <WarningLevel>4</WarningLevel>
-  </PropertyGroup>
-  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
-    <DebugType>pdbonly</DebugType>
-    <Optimize>true</Optimize>
-    <OutputPath>bin\Release\</OutputPath>
-    <DefineConstants>TRACE</DefineConstants>
-    <ErrorReport>prompt</ErrorReport>
-    <WarningLevel>4</WarningLevel>
-  </PropertyGroup>
-  <ItemGroup>
-    <Reference Include="Microsoft.VisualStudio.QualityTools.UnitTestFramework, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL" />
-    <Reference Include="System" />
-    <Reference Include="System.Core">
-      <RequiredTargetFramework>3.5</RequiredTargetFramework>
-    </Reference>
-    <Reference Include="vjslib" />
-  </ItemGroup>
-  <ItemGroup>
-    <Compile Include="Composition\Program.cs" />
-    <Compile Include="Properties\AssemblyInfo.cs" />
-  </ItemGroup>
-  <ItemGroup>
-    <ProjectReference Include="..\..\..\..\..\..\antlrcs\main\Antlr3.StringTemplate\Antlr3.StringTemplate.csproj">
-      <Project>{B5910BE2-DE21-4AA9-95C1-486F42B9E794}</Project>
-      <Name>Antlr3.StringTemplate</Name>
-    </ProjectReference>
-    <ProjectReference Include="..\Antlr3.Runtime.Debug\Antlr3.Runtime.Debug.csproj">
-      <Project>{5EE27A90-B023-42C9-AAF1-52B0424C5D0B}</Project>
-      <Name>Antlr3.Runtime.Debug</Name>
-    </ProjectReference>
-    <ProjectReference Include="..\Antlr3.Runtime.JavaExtensions\Antlr3.Runtime.JavaExtensions.csproj">
-      <Project>{A7EEC557-EB14-451C-9616-B7A61F4ECE69}</Project>
-      <Name>Antlr3.Runtime.JavaExtensions</Name>
-    </ProjectReference>
-    <ProjectReference Include="..\Antlr3.Runtime\Antlr3.Runtime.csproj">
-      <Project>{8FDC0A87-9005-4D5A-AB75-E55CEB575559}</Project>
-      <Name>Antlr3.Runtime</Name>
-    </ProjectReference>
-  </ItemGroup>
-  <ItemGroup>
-    <Antlr3 Include="SimpleExpression.g3">
-      <Generator>MSBuild:Compile</Generator>
-    </Antlr3>
-  </ItemGroup>
-  <ItemGroup>
-    <Compile Include="SemanticPredicateReduction.g3.lexer.cs">
-      <DependentUpon>SemanticPredicateReduction.g3</DependentUpon>
-    </Compile>
-    <Compile Include="SemanticPredicateReduction.g3.parser.cs">
-      <DependentUpon>SemanticPredicateReduction.g3</DependentUpon>
-    </Compile>
-    <Compile Include="SimpleExpressionLexerHelper.cs">
-      <DependentUpon>SimpleExpression.g3</DependentUpon>
-    </Compile>
-    <Compile Include="SimpleExpressionParserHelper.cs">
-      <DependentUpon>SimpleExpression.g3</DependentUpon>
-    </Compile>
-  </ItemGroup>
-  <ItemGroup>
-    <Antlr3 Include="FastSimpleExpression.g3">
-      <Generator>MSBuild:Compile</Generator>
-    </Antlr3>
-    <Compile Include="FastSimpleExpressionLexerHelper.cs">
-      <DependentUpon>FastSimpleExpression.g3</DependentUpon>
-    </Compile>
-    <Compile Include="FastSimpleExpressionParserHelper.cs">
-      <DependentUpon>FastSimpleExpression.g3</DependentUpon>
-    </Compile>
-  </ItemGroup>
-  <ItemGroup>
-    <Antlr3 Include="JavaCompat\Expr.g3">
-      <Generator>MSBuild:Compile</Generator>
-    </Antlr3>
-  </ItemGroup>
-  <ItemGroup>
-    <Antlr3 Include="BuildOptions\DebugGrammar.g3">
-      <!--<GrammarOptions>-debug</GrammarOptions>-->
-      <Generator>MSBuild:Compile</Generator>
-    </Antlr3>
-    <Compile Include="BuildOptions\DebugGrammarLexerHelper.cs">
-      <DependentUpon>DebugGrammar.g3</DependentUpon>
-    </Compile>
-    <Compile Include="BuildOptions\DebugGrammarParserHelper.cs">
-      <DependentUpon>DebugGrammar.g3</DependentUpon>
-    </Compile>
-  </ItemGroup>
-  <ItemGroup>
-    <Antlr3 Include="BuildOptions\DebugTreeGrammar.g3">
-      <!--<GrammarOptions>-debug</GrammarOptions>-->
-      <Generator>MSBuild:Compile</Generator>
-    </Antlr3>
-    <Compile Include="BuildOptions\DebugTreeGrammarHelper.cs">
-      <DependentUpon>DebugTreeGrammar.g3</DependentUpon>
-    </Compile>
-    <Compile Include="SlimParsing\ITokenSource`1.cs" />
-    <Compile Include="SlimParsing\ITokenStream`1.cs" />
-    <Compile Include="SlimParsing\SlimLexer.cs" />
-    <Compile Include="SlimParsing\SlimStringStream.cs" />
-    <Compile Include="SlimParsing\SlimToken.cs" />
-    <Compile Include="SlimParsing\SlimTokenStream.cs" />
-    <Compile Include="SlimParsing\Tree\ITreeAdaptor`1.cs" />
-    <Compile Include="SlimParsing\Tree\ITreeFactory.cs" />
-    <Compile Include="SlimParsing\Tree\ITreeNodeStream`1.cs" />
-    <Compile Include="StringTemplateOutput.g3.lexer.cs">
-      <DependentUpon>StringTemplateOutput.g3</DependentUpon>
-    </Compile>
-    <Compile Include="StringTemplateOutput.g3.parser.cs">
-      <DependentUpon>StringTemplateOutput.g3</DependentUpon>
-    </Compile>
-    <Compile Include="TestActionFeatures.g3.lexer.cs">
-      <DependentUpon>TestActionFeatures.g3</DependentUpon>
-    </Compile>
-    <Compile Include="TestActionFeatures.g3.parser.cs">
-      <DependentUpon>TestActionFeatures.g3</DependentUpon>
-    </Compile>
-    <Compile Include="TestDotTreeGenerator.cs" />
-    <Compile Include="TestExpressionFeatures.g3.lexer.cs">
-      <DependentUpon>TestExpressionFeatures.g3</DependentUpon>
-    </Compile>
-    <Compile Include="TestExpressionFeatures.g3.parser.cs">
-      <DependentUpon>TestExpressionFeatures.g3</DependentUpon>
-    </Compile>
-    <Compile Include="TestFastLexer.cs" />
-  </ItemGroup>
-  <ItemGroup>
-    <None Include="BuildOptions\ProfileGrammar.g3">
-      <!--<GrammarOptions>-profile</GrammarOptions>-->
-      <Generator>MSBuild:Compile</Generator>
-    </None>
-    <None Include="BuildOptions\ProfileGrammarLexerHelper.cs">
-      <DependentUpon>ProfileGrammar.g3</DependentUpon>
-    </None>
-    <None Include="BuildOptions\ProfileGrammarParserHelper.cs">
-      <DependentUpon>ProfileGrammar.g3</DependentUpon>
-    </None>
-    <Antlr3 Include="StringTemplateOutput.g3">
-      <Generator>MSBuild:Compile</Generator>
-    </Antlr3>
-    <Antlr3 Include="TestActionFeatures.g3">
-      <Generator>MSBuild:Compile</Generator>
-    </Antlr3>
-    <Antlr3 Include="SemanticPredicateReduction.g3">
-      <Generator>MSBuild:Compile</Generator>
-    </Antlr3>
-    <Antlr3 Include="Composition\Reduce.g3" />
-    <Antlr3 Include="Composition\Simplify.g3" />
-    <Antlr3 Include="Composition\VecMath.g3" />
-    <None Include="Composition\VecMath_Lexer.g3" />
-    <None Include="Composition\VecMath_Parser.g3" />
-    <None Include="TestExpressionFeatures.g3">
-      <Generator>MSBuild:Compile</Generator>
-    </None>
-  </ItemGroup>
-  <ItemGroup>
-    <None Include="BuildOptions\ProfileTreeGrammar.g3">
-      <!--<GrammarOptions>-profile</GrammarOptions>-->
-      <Generator>MSBuild:Compile</Generator>
-    </None>
-    <None Include="BuildOptions\ProfileTreeGrammarHelper.cs">
-      <DependentUpon>ProfileTreeGrammar.g3</DependentUpon>
-    </None>
-  </ItemGroup>
-  <PropertyGroup>
-    <!-- Folder containing AntlrBuildTask.dll -->
-    <AntlrBuildTaskPath>$(SolutionDir)bin\Bootstrap</AntlrBuildTaskPath>
-    <!-- Path to the ANTLR Tool itself. -->
-    <AntlrToolPath>$(SolutionDir)bin\Bootstrap\Antlr3.exe</AntlrToolPath>
-  </PropertyGroup>
-  <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <Import Project="$(ProjectDir)..\..\..\..\..\..\antlrcs\main\bin\Bootstrap\Antlr3.targets" />
-  <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
-       Other similar extension points exist, see Microsoft.Common.targets.
-  <Target Name="BeforeBuild">
-  </Target>
-  <Target Name="AfterBuild">
-  </Target>
-  -->
-</Project>
\ No newline at end of file
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammar.g3 b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammar.g3
deleted file mode 100644
index 36b1884..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammar.g3
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-grammar DebugGrammar;
-
-options
-{
-	language=CSharp3;
-	output=AST;
-	ASTLabelType=CommonTree;
-}
-
-tokens
-{
-	// define pseudo-operations
-	FUNC;
-	CALL;
-}
-
-// START:stat
-prog: ( stat )*
-    ;
-
-stat:   expr NEWLINE                    -> expr
-    |   ID '=' expr NEWLINE             -> ^('=' ID expr)
-    |   func NEWLINE                    -> func
-    |   NEWLINE                         -> // ignore
-    ;
-
-func:   ID  '(' formalPar ')' '=' expr  -> ^(FUNC ID formalPar expr)
-    ;
-	finally {
-	  functionDefinitions.Add($func.tree);
-	}
-
-formalPar
-    :   ID
-	|   INT
-	;
-
-// END:stat
-
-// START:expr
-expr:   multExpr (('+'^|'-'^) multExpr)*
-    ;
-
-multExpr
-    :   atom (('*'|'/'|'%')^ atom)*
-    ;
-
-atom:   INT
-    |   ID
-    |   '(' expr ')'    -> expr
-    |   ID '(' expr ')' -> ^(CALL ID expr)
-    ;
-// END:expr
-
-// START:tokens
-ID  :   ('a'..'z'|'A'..'Z')+
-	;
-
-INT :   '0'..'9'+
-    ;
-
-NEWLINE
-    :	'\r'? '\n'
-    ;
-
-WS  :   (' '|'\t')+ { Skip(); }
-    ;
-// END:tokens
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarParserHelper.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarParserHelper.cs
deleted file mode 100644
index 95beb20..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarParserHelper.cs
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-using System.Collections.Generic;
-using Antlr.Runtime.Tree;
-
-partial class DebugGrammarParser
-{
-    /** List of function definitions. Must point at the FUNC nodes. */
-    List<CommonTree> functionDefinitions = new List<CommonTree>();
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammar.g3 b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammar.g3
deleted file mode 100644
index b16a73e..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammar.g3
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-tree grammar DebugTreeGrammar;
-
-options
-{
-	language=CSharp3;
-	tokenVocab=DebugGrammar;
-	ASTLabelType=CommonTree;
-}
-
-// START:members
-@header
-{
-//import java.util.Map;
-//import java.util.HashMap;
-using BigInteger = java.math.BigInteger;
-using Console = System.Console;
-}
-// END:members
-
-// START:rules
-prog:   stat*
-    ;
-
-stat:   expr                       { string result = $expr.value.ToString();
-                                     Console.Out.WriteLine($expr.value + " (about " + result[0] + "*10^" + (result.Length-1) + ")");
-                                   }
-    |   ^('=' ID expr)             { globalMemory[$ID.text] = $expr.value; }
-    |   ^(FUNC .+)	               // ignore FUNCs - we added them to functionDefinitions already in parser.
-    ;
-
-expr returns [BigInteger value]
-    :   ^('+' a=expr b=expr)       { $value = $a.value.add($b.value); }
-    |   ^('-' a=expr b=expr)       { $value = $a.value.subtract($b.value); }
-    |   ^('*' a=expr b=expr)       { $value = $a.value.multiply($b.value); }
-    |   ^('/' a=expr b=expr)       { $value = $a.value.divide($b.value); }
-    |   ^('%' a=expr b=expr)       { $value = $a.value.remainder($b.value); }
-    |   ID                         { $value = getValue($ID.text); }
-    |   INT                        { $value = new BigInteger($INT.text); }
-    |   call                       { $value = $call.value; }
-    ;
-
-call returns [BigInteger value]
-    :   ^(CALL ID expr)            { BigInteger p = $expr.value;
-                                     CommonTree funcRoot = findFunction($ID.text, p);
-                                     if (funcRoot == null) {
-                                         Console.Error.WriteLine("No match found for " + $ID.text + "(" + p + ")");
-                                     } else {
-                                         // Here we set up the local evaluator to run over the
-                                         // function definition with the parameter value.
-                                         // This re-reads a sub-AST of our input AST!
-                                         DebugTreeGrammar e = new DebugTreeGrammar(funcRoot, functionDefinitions, globalMemory, p);
-                                         $value = e.expr();
-                                     }
-                                   }
-    ;
-// END:rules
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammarHelper.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammarHelper.cs
deleted file mode 100644
index af83214..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammarHelper.cs
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-using System.Collections.Generic;
-using Antlr.Runtime.Tree;
-
-using BigInteger = java.math.BigInteger;
-using Console = System.Console;
-
-partial class DebugTreeGrammar
-{
-    /** Points to functions tracked by tree builder. */
-    private List<CommonTree> functionDefinitions;
-
-    /** Remember local variables. Currently, this is only the function parameter.
-     */
-    private readonly IDictionary<string, BigInteger> localMemory = new Dictionary<string, BigInteger>();
-
-    /** Remember global variables set by =. */
-    private IDictionary<string, BigInteger> globalMemory = new Dictionary<string, BigInteger>();
-
-    /** Set up an evaluator with a node stream; and a set of function definition ASTs. */
-    public DebugTreeGrammar( CommonTreeNodeStream nodes, List<CommonTree> functionDefinitions )
-        : this( nodes )
-    {
-        this.functionDefinitions = functionDefinitions;
-    }
-
-    /** Set up a local evaluator for a nested function call. The evaluator gets the definition
-     *  tree of the function; the set of all defined functions (to find locally called ones); a
-     *  pointer to the global variable memory; and the value of the function parameter to be
-     *  added to the local memory.
-     */
-    private DebugTreeGrammar( CommonTree function,
-                 List<CommonTree> functionDefinitions,
-                 IDictionary<string, BigInteger> globalMemory,
-                 BigInteger paramValue )
-        // Expected tree for function: ^(FUNC ID ( INT | ID ) expr)
-        : this( new CommonTreeNodeStream( function.GetChild( 2 ) ), functionDefinitions )
-    {
-        this.globalMemory = globalMemory;
-        localMemory[function.GetChild( 1 ).Text] = paramValue;
-    }
-
-    /** Find matching function definition for a function name and parameter
-     *  value. The first definition is returned where (a) the name matches
-     *  and (b) the formal parameter agrees if it is defined as constant.
-     */
-    private CommonTree findFunction( string name, BigInteger paramValue )
-    {
-        foreach ( CommonTree f in functionDefinitions )
-        {
-            // Expected tree for f: ^(FUNC ID (ID | INT) expr)
-            if ( f.GetChild( 0 ).Text.Equals( name ) )
-            {
-                // Check whether parameter matches
-                CommonTree formalPar = (CommonTree)f.GetChild( 1 );
-                if ( formalPar.Token.Type == INT
-                    && !new BigInteger( formalPar.Token.Text ).Equals( paramValue ) )
-                {
-                    // Constant in formalPar list does not match actual value -> no match.
-                    continue;
-                }
-                // Parameter (value for INT formal arg) as well as fct name agrees!
-                return f;
-            }
-        }
-        return null;
-    }
-
-    /** Get value of name up call stack. */
-    internal BigInteger getValue( string name )
-    {
-        BigInteger value;
-        if ( localMemory.TryGetValue( name, out value ) && value != null )
-        {
-            return value;
-        }
-        if ( globalMemory.TryGetValue( name, out value ) && value != null )
-        {
-            return value;
-        }
-        // not found in local memory or global memory
-        Console.Error.WriteLine( "undefined variable " + name );
-        return new BigInteger( "0" );
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammar.g3 b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammar.g3
deleted file mode 100644
index 5f8de16..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammar.g3
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-grammar ProfileGrammar;
-
-options
-{
-	language=CSharp3;
-	output=AST;
-	ASTLabelType=CommonTree;
-}
-
-tokens
-{
-	// define pseudo-operations
-	FUNC;
-	CALL;
-}
-
-// START:stat
-prog: ( stat )*
-    ;
-
-stat:   expr NEWLINE                    -> expr
-    |   ID '=' expr NEWLINE             -> ^('=' ID expr)
-    |   func NEWLINE                    -> func
-    |   NEWLINE                         -> // ignore
-    ;
-
-func:   ID  '(' formalPar ')' '=' expr  -> ^(FUNC ID formalPar expr)
-    ;
-	finally {
-	  functionDefinitions.Add($func.tree);
-	}
-
-formalPar
-    :   ID
-	|   INT
-	;
-
-// END:stat
-
-// START:expr
-expr:   multExpr (('+'^|'-'^) multExpr)*
-    ;
-
-multExpr
-    :   atom (('*'|'/'|'%')^ atom)*
-    ;
-
-atom:   INT
-    |   ID
-    |   '(' expr ')'    -> expr
-    |   ID '(' expr ')' -> ^(CALL ID expr)
-    ;
-// END:expr
-
-// START:tokens
-ID  :   ('a'..'z'|'A'..'Z')+
-	;
-
-INT :   '0'..'9'+
-    ;
-
-NEWLINE
-    :	'\r'? '\n'
-    ;
-
-WS  :   (' '|'\t')+ { Skip(); }
-    ;
-// END:tokens
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarParserHelper.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarParserHelper.cs
deleted file mode 100644
index ddd7533..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarParserHelper.cs
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-using System.Collections.Generic;
-using Antlr.Runtime.Tree;
-
-partial class ProfileGrammarParser
-{
-    /** List of function definitions. Must point at the FUNC nodes. */
-    List<CommonTree> functionDefinitions = new List<CommonTree>();
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammar.g3 b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammar.g3
deleted file mode 100644
index f6786db..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammar.g3
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-tree grammar ProfileTreeGrammar;
-
-options
-{
-	language=CSharp3;
-	tokenVocab=ProfileGrammar;
-	ASTLabelType=CommonTree;
-}
-
-// START:members
-@header
-{
-//import java.util.Map;
-//import java.util.HashMap;
-using BigInteger = java.math.BigInteger;
-using Console = System.Console;
-}
-// END:members
-
-// START:rules
-prog:   stat*
-    ;
-
-stat:   expr                       { string result = $expr.value.ToString();
-                                     Console.Out.WriteLine($expr.value + " (about " + result[0] + "*10^" + (result.Length-1) + ")");
-                                   }
-    |   ^('=' ID expr)             { globalMemory[$ID.text] = $expr.value; }
-    |   ^(FUNC .+)	               // ignore FUNCs - we added them to functionDefinitions already in parser.
-    ;
-
-expr returns [BigInteger value]
-    :   ^('+' a=expr b=expr)       { $value = $a.value.add($b.value); }
-    |   ^('-' a=expr b=expr)       { $value = $a.value.subtract($b.value); }
-    |   ^('*' a=expr b=expr)       { $value = $a.value.multiply($b.value); }
-    |   ^('/' a=expr b=expr)       { $value = $a.value.divide($b.value); }
-    |   ^('%' a=expr b=expr)       { $value = $a.value.remainder($b.value); }
-    |   ID                         { $value = getValue($ID.text); }
-    |   INT                        { $value = new BigInteger($INT.text); }
-    |   call                       { $value = $call.value; }
-    ;
-
-call returns [BigInteger value]
-    :   ^(CALL ID expr)            { BigInteger p = $expr.value;
-                                     CommonTree funcRoot = findFunction($ID.text, p);
-                                     if (funcRoot == null) {
-                                         Console.Error.WriteLine("No match found for " + $ID.text + "(" + p + ")");
-                                     } else {
-                                         // Here we set up the local evaluator to run over the
-                                         // function definition with the parameter value.
-                                         // This re-reads a sub-AST of our input AST!
-                                         ProfileTreeGrammar e = new ProfileTreeGrammar(funcRoot, functionDefinitions, globalMemory, p);
-                                         $value = e.expr();
-                                     }
-                                   }
-    ;
-// END:rules
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammarHelper.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammarHelper.cs
deleted file mode 100644
index 47cc8a8..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammarHelper.cs
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-using System.Collections.Generic;
-using Antlr.Runtime.Tree;
-
-using BigInteger = java.math.BigInteger;
-using Console = System.Console;
-
-partial class ProfileTreeGrammar
-{
-    /** Points to functions tracked by tree builder. */
-    private List<CommonTree> functionDefinitions;
-
-    /** Remember local variables. Currently, this is only the function parameter.
-     */
-    private readonly IDictionary<string, BigInteger> localMemory = new Dictionary<string, BigInteger>();
-
-    /** Remember global variables set by =. */
-    private IDictionary<string, BigInteger> globalMemory = new Dictionary<string, BigInteger>();
-
-    /** Set up an evaluator with a node stream; and a set of function definition ASTs. */
-    public ProfileTreeGrammar( CommonTreeNodeStream nodes, List<CommonTree> functionDefinitions )
-        : this( nodes )
-    {
-        this.functionDefinitions = functionDefinitions;
-    }
-
-    /** Set up a local evaluator for a nested function call. The evaluator gets the definition
-     *  tree of the function; the set of all defined functions (to find locally called ones); a
-     *  pointer to the global variable memory; and the value of the function parameter to be
-     *  added to the local memory.
-     */
-    private ProfileTreeGrammar( CommonTree function,
-                 List<CommonTree> functionDefinitions,
-                 IDictionary<string, BigInteger> globalMemory,
-                 BigInteger paramValue )
-        // Expected tree for function: ^(FUNC ID ( INT | ID ) expr)
-        : this( new CommonTreeNodeStream( function.GetChild( 2 ) ), functionDefinitions )
-    {
-        this.globalMemory = globalMemory;
-        localMemory[function.GetChild( 1 ).Text] = paramValue;
-    }
-
-    /** Find matching function definition for a function name and parameter
-     *  value. The first definition is returned where (a) the name matches
-     *  and (b) the formal parameter agrees if it is defined as constant.
-     */
-    private CommonTree findFunction( string name, BigInteger paramValue )
-    {
-        foreach ( CommonTree f in functionDefinitions )
-        {
-            // Expected tree for f: ^(FUNC ID (ID | INT) expr)
-            if ( f.GetChild( 0 ).Text.Equals( name ) )
-            {
-                // Check whether parameter matches
-                CommonTree formalPar = (CommonTree)f.GetChild( 1 );
-                if ( formalPar.Token.Type == INT
-                    && !new BigInteger( formalPar.Token.Text ).Equals( paramValue ) )
-                {
-                    // Constant in formalPar list does not match actual value -> no match.
-                    continue;
-                }
-                // Parameter (value for INT formal arg) as well as fct name agrees!
-                return f;
-            }
-        }
-        return null;
-    }
-
-    /** Get value of name up call stack. */
-    public BigInteger getValue( string name )
-    {
-        BigInteger value;
-        if ( localMemory.TryGetValue( name, out value ) && value != null )
-        {
-            return value;
-        }
-        if ( globalMemory.TryGetValue( name, out value ) && value != null )
-        {
-            return value;
-        }
-        // not found in local memory or global memory
-        Console.Error.WriteLine( "undefined variable " + name );
-        return new BigInteger( "0" );
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Reduce.g3 b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Reduce.g3
deleted file mode 100644
index 0ed570b..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Reduce.g3
+++ /dev/null
@@ -1,26 +0,0 @@
-tree grammar Reduce;
-
-options
-{
-    tokenVocab=VecMath;      
-    ASTLabelType=CommonTree; 
-    output=AST;              
-    filter=true;             
-    language=CSharp3;
-}
-
-
-@members 
-{ 
-   //public override IAstRuleReturnScope Topdown() { return topdown(); }
-   public override IAstRuleReturnScope Bottomup() { return bottomup(); } 
-} 
-
-
-/** Rewrite: x+x to be 2*x, 2*x to be x<<1, x<<n<<m to be x<<(n+m) */
-bottomup
-    :  ^(PLUS i=INT j=INT {$i.int==$j.int}?) -> ^(MULT["*"] INT["2"] $j)
-    |  ^(MULT x=INT {$x.int==2}? y=.)        -> ^(SHIFT["<<"] $y INT["1"])
-    |  ^(SHIFT ^(SHIFT e=. n=INT) m=INT)
-       -> ^(SHIFT["<<"] $e INT[($n.int+$m.int).ToString()])
-    ;
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Simplify.g3 b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Simplify.g3
deleted file mode 100644
index fd1fded..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Simplify.g3
+++ /dev/null
@@ -1,25 +0,0 @@
-tree grammar Simplify;
-
-options {
-    tokenVocab=VecMath;    
-    ASTLabelType=CommonTree;
-    output=AST;
-    language=CSharp3;
-    filter=true;
-    //rewrite=true;
-}
-
-@members 
-{ 
-   public override IAstRuleReturnScope Topdown() { return topdown(); }
-   public override IAstRuleReturnScope Bottomup() { return bottomup(); } 
-} 
-
-
-topdown
-    :   ^( MULT INT ^(VEC (e+=.)+) ) -> ^(VEC ^(MULT INT $e)+)
-    ;
-
-bottomup
-    :  ^(MULT a=. b=INT {$b.int==0}?) -> $b // x*0 -> 0
-    ;
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpression.g3 b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpression.g3
deleted file mode 100644
index 1c51490..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpression.g3
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-grammar FastSimpleExpression;
-
-options
-{
-	language=CSharp3;
-	//slim=true;
-	output=AST;
-	ASTLabelType=CommonTree;
-}
-
-@lexer::superClass{Antlr.Runtime.SlimLexer}
-
-public
-expression
-	:	additive_expression
-		EOF
-	;
-
-additive_expression
-	:	multiplicative_expression
-		(	('+'^ | '-'^)
-			multiplicative_expression
-		)*
-	;
-
-multiplicative_expression
-	:	atom
-		(	('*'^ | '/'^ | '%'^)
-			atom
-		)*
-	;
-
-atom
-	:	IDENTIFIER
-	|	NUMBER
-	;
-
-//
-// LEXER
-//
-
-IDENTIFIER
-	:	('a'..'z' | 'A'..'Z' | '_')
-		('a'..'z' | 'A'..'Z' | '_' | '0'..'9')*
-	;
-
-NUMBER
-	:	'0'..'9'+
-	;
-
-WS
-	:	(' ' | '\t' | '\n' | '\r' | '\f')
-		{$channel = Hidden;}
-	;
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/JavaCompat/Expr.g3 b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/JavaCompat/Expr.g3
deleted file mode 100644
index 65e7c5d..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/JavaCompat/Expr.g3
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-grammar Expr;
-
-/*
-	I had to make the following changes to the basic Expr grammar to make it work with the CSharp3 target in "Java compatibility mode".
-	For reference, see http://www.antlr.org/wiki/display/ANTLR3/Expression+evaluator.
-
-	Add an options section to set the language to CSharp3.
-
-	In the @header section, add:
-		// 'member' is obsolete
-		#pragma warning disable 612
-		using Antlr.Runtime.JavaExtensions;
-
-	In the @header section, replace:
-			import java.util.HashMap;
-		with:
-			using HashMap = System.Collections.Generic.Dictionary<object,object>;
-
-	Change all instances of "System.out" with "JSystem.@out".
-
-	Change all instances of "System.err" with "JSystem.err".
-	
-	Change all instances of "skip()" with "Skip()".
- */
-
-options
-{
-	language=CSharp3;
-}
-
-@header {
-// 'member' is obsolete
-#pragma warning disable 612
-
-using Antlr.Runtime.JavaExtensions;
-using HashMap = System.Collections.Generic.Dictionary<object,object>;
-using Integer = java.lang.Integer;
-}
-
-@members {
-/** Map variable name to Integer object holding value */
-HashMap memory = new HashMap();
-}
-
-prog:   stat+ ;
-
-stat:   expr NEWLINE {JSystem.@out.println($expr.value);}
-    |   ID '=' expr NEWLINE
-        {memory.put($ID.text, new Integer($expr.value));}
-    |   NEWLINE
-    ;
-
-expr returns [int value]
-    :   e=multExpr {$value = $e.value;}
-        (   '+' e=multExpr {$value += $e.value;}
-        |   '-' e=multExpr {$value -= $e.value;}
-        )*
-    ;
-
-multExpr returns [int value]
-    :   e=atom {$value = $e.value;} ('*' e=atom {$value *= $e.value;})*
-    ; 
-
-atom returns [int value]
-    :   INT {$value = Integer.parseInt($INT.text);}
-    |   ID
-        {
-        Integer v = (Integer)memory.get($ID.text);
-        if ( v!=null ) $value = v.intValue();
-        else JSystem.err.println("undefined variable "+$ID.text);
-        }
-    |   '(' expr ')' {$value = $expr.value;}
-    ;
-
-ID  :   ('a'..'z'|'A'..'Z')+ ;
-INT :   '0'..'9'+ ;
-NEWLINE:'\r'? '\n' ;
-WS  :   (' '|'\t')+ {Skip();} ;
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Properties/AssemblyInfo.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Properties/AssemblyInfo.cs
deleted file mode 100644
index fd2190d..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Properties/AssemblyInfo.cs
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-using System;
-using System.Reflection;
-using System.Runtime.InteropServices;
-
-// General Information about an assembly is controlled through the following 
-// set of attributes. Change these attribute values to modify the information
-// associated with an assembly.
-[assembly: AssemblyTitle( "Antlr3.Runtime.Test" )]
-[assembly: AssemblyDescription( "" )]
-[assembly: AssemblyConfiguration( "" )]
-[assembly: AssemblyCompany( "Pixel Mine, Inc." )]
-[assembly: AssemblyProduct( "Antlr3.Runtime.Test" )]
-[assembly: AssemblyCopyright("Copyright © Sam Harwell 2011")]
-[assembly: AssemblyTrademark( "" )]
-[assembly: AssemblyCulture( "" )]
-[assembly: CLSCompliant( true )]
-
-// Setting ComVisible to false makes the types in this assembly not visible 
-// to COM components.  If you need to access a type in this assembly from 
-// COM, set the ComVisible attribute to true on that type.
-[assembly: ComVisible( false )]
-
-// The following GUID is for the ID of the typelib if this project is exposed to COM
-[assembly: Guid( "1352b15b-eded-4380-9122-acde32f7ff38" )]
-
-/* Version information for an assembly consists of four values in the following order:
- *
- *   Major.Minor.Build.Revision
- *
- * These values are updated according to the following:
- *   1. Major.Minor follows the ANTLR release schedule
- *   2. Build is incremented each time the C# port is packaged for release (regardless
- *      of whether it's an incremental or nightly). The value resets to zero whenever
- *      the Major or Minor version is incremented.
- *   3. Revision is the Perforce changelist number associated with the release.
- */
-[assembly: AssemblyVersion("3.3.2.8098")]
-[assembly: AssemblyFileVersion("3.3.2.8098")]
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRStringStream.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRStringStream.cs
deleted file mode 100644
index 9904680..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRStringStream.cs
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime
-{
-    using System.Collections.Generic;
-    using ArgumentException = System.ArgumentException;
-    using ArgumentOutOfRangeException = System.ArgumentOutOfRangeException;
-    using ArgumentNullException = System.ArgumentNullException;
-
-    /** <summary>
-     *  A pretty quick CharStream that pulls all data from an array
-     *  directly.  Every method call counts in the lexer.  Java's
-     *  strings aren't very good so I'm avoiding.
-     *  </summary>
-     */
-    [System.Serializable]
-    public class ANTLRStringStream : ICharStream
-    {
-        /** <summary>The data being scanned</summary> */
-        protected char[] data;
-
-        /** <summary>How many characters are actually in the buffer</summary> */
-        protected int n;
-
-        /** <summary>0..n-1 index into string of next char</summary> */
-        protected int p = 0;
-
-        /** <summary>line number 1..n within the input</summary> */
-        int line = 1;
-
-        /** <summary>The index of the character relative to the beginning of the line 0..n-1</summary> */
-        int charPositionInLine = 0;
-
-        /** <summary>tracks how deep mark() calls are nested</summary> */
-        protected int markDepth = 0;
-
-        /** <summary>
-         *  A list of CharStreamState objects that tracks the stream state
-         *  values line, charPositionInLine, and p that can change as you
-         *  move through the input stream.  Indexed from 1..markDepth.
-         *  A null is kept @ index 0.  Create upon first call to mark().
-         *  </summary>
-         */
-        protected IList<CharStreamState> markers;
-
-        /** <summary>Track the last mark() call result value for use in rewind().</summary> */
-        protected int lastMarker;
-
-        /** <summary>What is name or source of this char stream?</summary> */
-        public string name;
-
-        /** <summary>Copy data in string to a local char array</summary> */
-        public ANTLRStringStream( string input )
-            : this( input, null )
-        {
-        }
-
-        public ANTLRStringStream( string input, string sourceName )
-            : this( input.ToCharArray(), input.Length, sourceName )
-        {
-        }
-
-        /** <summary>This is the preferred constructor as no data is copied</summary> */
-        public ANTLRStringStream( char[] data, int numberOfActualCharsInArray )
-            : this( data, numberOfActualCharsInArray, null )
-        {
-        }
-
-        public ANTLRStringStream( char[] data, int numberOfActualCharsInArray, string sourceName )
-        {
-            if (data == null)
-                throw new ArgumentNullException("data");
-            if (numberOfActualCharsInArray < 0)
-                throw new ArgumentOutOfRangeException();
-            if (numberOfActualCharsInArray > data.Length)
-                throw new ArgumentException();
-
-            this.data = data;
-            this.n = numberOfActualCharsInArray;
-            this.name = sourceName;
-        }
-
-        protected ANTLRStringStream()
-        {
-            this.data = new char[0];
-        }
-
-        /** <summary>
-         *  Return the current input symbol index 0..n where n indicates the
-         *  last symbol has been read.  The index is the index of char to
-         *  be returned from LA(1).
-         *  </summary>
-         */
-        public virtual int Index
-        {
-            get
-            {
-                return p;
-            }
-        }
-        public virtual int Line
-        {
-            get
-            {
-                return line;
-            }
-            set
-            {
-                line = value;
-            }
-        }
-        public virtual int CharPositionInLine
-        {
-            get
-            {
-                return charPositionInLine;
-            }
-            set
-            {
-                charPositionInLine = value;
-            }
-        }
-
-        /** <summary>
-         *  Reset the stream so that it's in the same state it was
-         *  when the object was created *except* the data array is not
-         *  touched.
-         *  </summary>
-         */
-        public virtual void Reset()
-        {
-            p = 0;
-            line = 1;
-            charPositionInLine = 0;
-            markDepth = 0;
-        }
-
-        public virtual void Consume()
-        {
-            //System.out.println("prev p="+p+", c="+(char)data[p]);
-            if ( p < n )
-            {
-                charPositionInLine++;
-                if ( data[p] == '\n' )
-                {
-                    /*
-                    System.out.println("newline char found on line: "+line+
-                                       "@ pos="+charPositionInLine);
-                    */
-                    line++;
-                    charPositionInLine = 0;
-                }
-                p++;
-                //System.out.println("p moves to "+p+" (c='"+(char)data[p]+"')");
-            }
-        }
-
-        public virtual int LA( int i )
-        {
-            if ( i == 0 )
-            {
-                return 0; // undefined
-            }
-            if ( i < 0 )
-            {
-                i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
-                if ( ( p + i - 1 ) < 0 )
-                {
-                    return CharStreamConstants.EndOfFile; // invalid; no char before first char
-                }
-            }
-
-            if ( ( p + i - 1 ) >= n )
-            {
-                //System.out.println("char LA("+i+")=EOF; p="+p);
-                return CharStreamConstants.EndOfFile;
-            }
-            //System.out.println("char LA("+i+")="+(char)data[p+i-1]+"; p="+p);
-            //System.out.println("LA("+i+"); p="+p+" n="+n+" data.length="+data.length);
-            return data[p + i - 1];
-        }
-
-        public virtual int LT( int i )
-        {
-            return LA( i );
-        }
-
-        public virtual int Count
-        {
-            get
-            {
-                return n;
-            }
-        }
-
-        public virtual int Mark()
-        {
-            if ( markers == null )
-            {
-                markers = new List<CharStreamState>();
-                markers.Add( null ); // depth 0 means no backtracking, leave blank
-            }
-            markDepth++;
-            CharStreamState state = null;
-            if ( markDepth >= markers.Count )
-            {
-                state = new CharStreamState();
-                markers.Add( state );
-            }
-            else
-            {
-                state = markers[markDepth];
-            }
-            state.p = p;
-            state.line = line;
-            state.charPositionInLine = charPositionInLine;
-            lastMarker = markDepth;
-            return markDepth;
-        }
-
-        public virtual void Rewind( int m )
-        {
-            if (m < 0)
-                throw new ArgumentOutOfRangeException();
-
-            //if (m > markDepth)
-            //    throw new ArgumentException();
-
-            CharStreamState state = markers[m];
-            // restore stream state
-            Seek( state.p );
-            line = state.line;
-            charPositionInLine = state.charPositionInLine;
-            Release( m );
-        }
-
-        public virtual void Rewind()
-        {
-            Rewind( lastMarker );
-        }
-
-        public virtual void Release( int marker )
-        {
-            // unwind any other markers made after m and release m
-            markDepth = marker;
-            // release this marker
-            markDepth--;
-        }
-
-        /** <summary>
-         *  consume() ahead until p==index; can't just set p=index as we must
-         *  update line and charPositionInLine.
-         *  </summary>
-         */
-        public virtual void Seek( int index )
-        {
-            if ( index <= p )
-            {
-                p = index; // just jump; don't update stream state (line, ...)
-                return;
-            }
-            // seek forward, consume until p hits index
-            while ( p < index )
-            {
-                Consume();
-            }
-        }
-
-        public virtual string Substring( int start, int length )
-        {
-            if (start < 0)
-                throw new ArgumentOutOfRangeException();
-            if (length < 0)
-                throw new ArgumentOutOfRangeException();
-            if (start + length > data.Length)
-                throw new ArgumentException();
-
-            if (length == 0)
-                return string.Empty;
-
-            return new string( data, start, length );
-        }
-
-        public virtual string SourceName
-        {
-            get
-            {
-                return name;
-            }
-        }
-
-        public override string ToString()
-        {
-            return new string(data);
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Antlr3.Runtime.csproj b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Antlr3.Runtime.csproj
deleted file mode 100644
index 6de4bf7..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Antlr3.Runtime.csproj
+++ /dev/null
@@ -1,146 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
-  <PropertyGroup>
-    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
-    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
-    <ProductVersion>9.0.30729</ProductVersion>
-    <SchemaVersion>2.0</SchemaVersion>
-    <ProjectGuid>{8FDC0A87-9005-4D5A-AB75-E55CEB575559}</ProjectGuid>
-    <OutputType>Library</OutputType>
-    <AppDesignerFolder>Properties</AppDesignerFolder>
-    <RootNamespace>Antlr.Runtime</RootNamespace>
-    <AssemblyName>Antlr3.Runtime</AssemblyName>
-    <TargetFrameworkVersion>v2.0</TargetFrameworkVersion>
-    <FileAlignment>512</FileAlignment>
-    <SccProjectName>SAK</SccProjectName>
-    <SccLocalPath>SAK</SccLocalPath>
-    <SccAuxPath>SAK</SccAuxPath>
-    <SccProvider>SAK</SccProvider>
-    <SignAssembly>true</SignAssembly>
-    <AssemblyOriginatorKeyFile>..\..\..\..\..\..\..\keys\antlr\Key.snk</AssemblyOriginatorKeyFile>
-    <TargetFrameworkProfile>
-    </TargetFrameworkProfile>
-  </PropertyGroup>
-  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
-    <DebugSymbols>true</DebugSymbols>
-    <DebugType>full</DebugType>
-    <Optimize>false</Optimize>
-    <OutputPath>bin\Debug\</OutputPath>
-    <DefineConstants>DEBUG;TRACE</DefineConstants>
-    <ErrorReport>prompt</ErrorReport>
-    <WarningLevel>4</WarningLevel>
-  </PropertyGroup>
-  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
-    <DebugType>pdbonly</DebugType>
-    <Optimize>true</Optimize>
-    <OutputPath>bin\Release\</OutputPath>
-    <DefineConstants>TRACE</DefineConstants>
-    <ErrorReport>prompt</ErrorReport>
-    <WarningLevel>4</WarningLevel>
-  </PropertyGroup>
-  <ItemGroup>
-    <Reference Include="System" />
-  </ItemGroup>
-  <ItemGroup>
-    <Compile Include="ANTLRFileStream.cs" />
-    <Compile Include="ANTLRInputStream.cs" />
-    <Compile Include="ANTLRReaderStream.cs" />
-    <Compile Include="ANTLRStringStream.cs" />
-    <Compile Include="AstParserRuleReturnScope`2.cs" />
-    <Compile Include="BaseRecognizer.cs" />
-    <Compile Include="BitSet.cs" />
-    <Compile Include="BufferedTokenStream.cs" />
-    <Compile Include="CharStreamConstants.cs" />
-    <Compile Include="CharStreamState.cs" />
-    <Compile Include="ClassicToken.cs" />
-    <Compile Include="CommonToken.cs" />
-    <Compile Include="CommonTokenStream.cs" />
-    <Compile Include="Debug\IDebugEventListener.cs" />
-    <Compile Include="DFA.cs" />
-    <Compile Include="EarlyExitException.cs" />
-    <Compile Include="FailedPredicateException.cs" />
-    <Compile Include="GrammarRuleAttribute.cs" />
-    <Compile Include="IAstRuleReturnScope.cs" />
-    <Compile Include="IAstRuleReturnScope`1.cs" />
-    <Compile Include="ICharStream.cs" />
-    <Compile Include="IIntStream.cs" />
-    <Compile Include="IRuleReturnScope.cs" />
-    <Compile Include="IRuleReturnScope`1.cs" />
-    <Compile Include="ITemplateRuleReturnScope.cs" />
-    <Compile Include="ITemplateRuleReturnScope`1.cs" />
-    <Compile Include="IToken.cs" />
-    <Compile Include="ITokenSource.cs" />
-    <Compile Include="ITokenStream.cs" />
-    <Compile Include="ITokenStreamInformation.cs" />
-    <Compile Include="LegacyCommonTokenStream.cs" />
-    <Compile Include="Lexer.cs" />
-    <Compile Include="Misc\FastQueue.cs" />
-    <Compile Include="Misc\FunctionDelegates.cs" />
-    <Compile Include="Misc\ListStack`1.cs" />
-    <Compile Include="Misc\LookaheadStream.cs" />
-    <Compile Include="MismatchedNotSetException.cs" />
-    <Compile Include="MismatchedRangeException.cs" />
-    <Compile Include="MismatchedSetException.cs" />
-    <Compile Include="MismatchedTokenException.cs" />
-    <Compile Include="MismatchedTreeNodeException.cs" />
-    <Compile Include="MissingTokenException.cs" />
-    <Compile Include="NoViableAltException.cs" />
-    <Compile Include="Parser.cs" />
-    <Compile Include="ParserRuleReturnScope.cs" />
-    <Compile Include="Properties\AssemblyInfo.cs" />
-    <Compile Include="RecognitionException.cs" />
-    <Compile Include="RecognizerSharedState.cs" />
-    <Compile Include="TemplateParserRuleReturnScope`2.cs" />
-    <Compile Include="TokenChannels.cs" />
-    <Compile Include="TokenRewriteStream.cs" />
-    <Compile Include="Tokens.cs" />
-    <Compile Include="TokenTypes.cs" />
-    <Compile Include="Tree\AstTreeRuleReturnScope`2.cs" />
-    <Compile Include="Tree\BaseTree.cs" />
-    <Compile Include="Tree\BaseTreeAdaptor.cs" />
-    <Compile Include="Tree\AntlrRuntime_BaseTreeDebugView.cs" />
-    <Compile Include="Tree\BufferedTreeNodeStream.cs" />
-    <Compile Include="Tree\CommonErrorNode.cs" />
-    <Compile Include="Tree\CommonTree.cs" />
-    <Compile Include="Tree\CommonTreeAdaptor.cs" />
-    <Compile Include="Tree\CommonTreeNodeStream.cs" />
-    <Compile Include="Tree\DotTreeGenerator.cs" />
-    <Compile Include="Tree\ITree.cs" />
-    <Compile Include="Tree\ITreeAdaptor.cs" />
-    <Compile Include="Tree\ITreeNodeStream.cs" />
-    <Compile Include="Tree\ITreeVisitorAction.cs" />
-    <Compile Include="Tree\ParseTree.cs" />
-    <Compile Include="Tree\RewriteCardinalityException.cs" />
-    <Compile Include="Tree\RewriteEarlyExitException.cs" />
-    <Compile Include="Tree\RewriteEmptyStreamException.cs" />
-    <Compile Include="Tree\RewriteRuleElementStream.cs" />
-    <Compile Include="Tree\RewriteRuleNodeStream.cs" />
-    <Compile Include="Tree\RewriteRuleSubtreeStream.cs" />
-    <Compile Include="Tree\RewriteRuleTokenStream.cs" />
-    <Compile Include="Tree\TemplateTreeRuleReturnScope`2.cs" />
-    <Compile Include="Tree\TreeFilter.cs" />
-    <Compile Include="Tree\TreeIterator.cs" />
-    <Compile Include="Tree\TreeParser.cs" />
-    <Compile Include="Tree\TreePatternLexer.cs" />
-    <Compile Include="Tree\TreePatternParser.cs" />
-    <Compile Include="Tree\TreeRewriter.cs" />
-    <Compile Include="Tree\TreeRuleReturnScope`1.cs" />
-    <Compile Include="Tree\TreeVisitor.cs" />
-    <Compile Include="Tree\TreeWizard.cs" />
-    <Compile Include="UnbufferedTokenStream.cs" />
-    <Compile Include="UnwantedTokenException.cs" />
-  </ItemGroup>
-  <ItemGroup>
-    <None Include="..\..\..\..\..\..\..\keys\antlr\Key.snk">
-      <Link>Key.snk</Link>
-    </None>
-  </ItemGroup>
-  <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
-       Other similar extension points exist, see Microsoft.Common.targets.
-  <Target Name="BeforeBuild">
-  </Target>
-  <Target Name="AfterBuild">
-  </Target>
-  -->
-</Project>
\ No newline at end of file
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/BaseRecognizer.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/BaseRecognizer.cs
deleted file mode 100644
index 5ba18f5..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/BaseRecognizer.cs
+++ /dev/null
@@ -1,1184 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime
-{
-    using System.Collections.Generic;
-
-    using ArgumentNullException = System.ArgumentNullException;
-    using Array = System.Array;
-    using Conditional = System.Diagnostics.ConditionalAttribute;
-    using Exception = System.Exception;
-    using IDebugEventListener = Antlr.Runtime.Debug.IDebugEventListener;
-    using MethodBase = System.Reflection.MethodBase;
-    using NotSupportedException = System.NotSupportedException;
-    using Regex = System.Text.RegularExpressions.Regex;
-    using StackFrame = System.Diagnostics.StackFrame;
-    using StackTrace = System.Diagnostics.StackTrace;
-    using TextWriter = System.IO.TextWriter;
-    using Type = System.Type;
-
-    /** <summary>
-     *  A generic recognizer that can handle recognizers generated from
-     *  lexer, parser, and tree grammars.  This is all the parsing
-     *  support code essentially; most of it is error recovery stuff and
-     *  backtracking.
-     *  </summary>
-     */
-    public abstract class BaseRecognizer
-    {
-        public const int MemoRuleFailed = -2;
-        public const int MemoRuleUnknown = -1;
-        public const int InitialFollowStackSize = 100;
-
-        // copies from Token object for convenience in actions
-        public const int DefaultTokenChannel = TokenChannels.Default;
-        public const int Hidden = TokenChannels.Hidden;
-
-        public const string NextTokenRuleName = "nextToken";
-
-        /** <summary>
-         *  State of a lexer, parser, or tree parser are collected into a state
-         *  object so the state can be shared.  This sharing is needed to
-         *  have one grammar import others and share same error variables
-         *  and other state variables.  It's a kind of explicit multiple
-         *  inheritance via delegation of methods and shared state.
-         *  </summary>
-         */
-        protected internal RecognizerSharedState state;
-
-        public BaseRecognizer()
-            : this(new RecognizerSharedState())
-        {
-        }
-
-        public BaseRecognizer( RecognizerSharedState state )
-        {
-            if ( state == null )
-            {
-                state = new RecognizerSharedState();
-            }
-            this.state = state;
-            InitDFAs();
-        }
-
-        public TextWriter TraceDestination
-        {
-            get;
-            set;
-        }
-
-        protected virtual void InitDFAs()
-        {
-        }
-
-        /** <summary>reset the parser's state; subclasses must rewinds the input stream</summary> */
-        public virtual void Reset()
-        {
-            // wack everything related to error recovery
-            if ( state == null )
-            {
-                return; // no shared state work to do
-            }
-            state._fsp = -1;
-            state.errorRecovery = false;
-            state.lastErrorIndex = -1;
-            state.failed = false;
-            state.syntaxErrors = 0;
-            // wack everything related to backtracking and memoization
-            state.backtracking = 0;
-            for ( int i = 0; state.ruleMemo != null && i < state.ruleMemo.Length; i++ )
-            { // wipe cache
-                state.ruleMemo[i] = null;
-            }
-        }
-
-
-        /** <summary>
-         *  Match current input symbol against ttype.  Attempt
-         *  single token insertion or deletion error recovery.  If
-         *  that fails, throw MismatchedTokenException.
-         *  </summary>
-         *
-         *  <remarks>
-         *  To turn off single token insertion or deletion error
-         *  recovery, override recoverFromMismatchedToken() and have it
-         *  throw an exception. See TreeParser.recoverFromMismatchedToken().
-         *  This way any error in a rule will cause an exception and
-         *  immediate exit from rule.  Rule would recover by resynchronizing
-         *  to the set of symbols that can follow rule ref.
-         *  </remarks>
-         */
-        public virtual object Match( IIntStream input, int ttype, BitSet follow )
-        {
-            //System.out.println("match "+((TokenStream)input).LT(1));
-            object matchedSymbol = GetCurrentInputSymbol( input );
-            if ( input.LA( 1 ) == ttype )
-            {
-                input.Consume();
-                state.errorRecovery = false;
-                state.failed = false;
-                return matchedSymbol;
-            }
-            if ( state.backtracking > 0 )
-            {
-                state.failed = true;
-                return matchedSymbol;
-            }
-            matchedSymbol = RecoverFromMismatchedToken( input, ttype, follow );
-            return matchedSymbol;
-        }
-
-        /** <summary>Match the wildcard: in a symbol</summary> */
-        public virtual void MatchAny( IIntStream input )
-        {
-            state.errorRecovery = false;
-            state.failed = false;
-            input.Consume();
-        }
-
-        public virtual bool MismatchIsUnwantedToken( IIntStream input, int ttype )
-        {
-            return input.LA( 2 ) == ttype;
-        }
-
-        public virtual bool MismatchIsMissingToken( IIntStream input, BitSet follow )
-        {
-            if ( follow == null )
-            {
-                // we have no information about the follow; we can only consume
-                // a single token and hope for the best
-                return false;
-            }
-            // compute what can follow this grammar element reference
-            if ( follow.Member( TokenTypes.EndOfRule ) )
-            {
-                BitSet viableTokensFollowingThisRule = ComputeContextSensitiveRuleFOLLOW();
-                follow = follow.Or( viableTokensFollowingThisRule );
-                if ( state._fsp >= 0 )
-                { // remove EOR if we're not the start symbol
-                    follow.Remove( TokenTypes.EndOfRule );
-                }
-            }
-            // if current token is consistent with what could come after set
-            // then we know we're missing a token; error recovery is free to
-            // "insert" the missing token
-
-            //System.out.println("viable tokens="+follow.toString(getTokenNames()));
-            //System.out.println("LT(1)="+((TokenStream)input).LT(1));
-
-            // BitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
-            // in follow set to indicate that the fall of the start symbol is
-            // in the set (EOF can follow).
-            if ( follow.Member( input.LA( 1 ) ) || follow.Member( TokenTypes.EndOfRule ) )
-            {
-                //System.out.println("LT(1)=="+((TokenStream)input).LT(1)+" is consistent with what follows; inserting...");
-                return true;
-            }
-            return false;
-        }
-
-        /** <summary>Report a recognition problem.</summary>
-         *
-         *  <remarks>
-         *  This method sets errorRecovery to indicate the parser is recovering
-         *  not parsing.  Once in recovery mode, no errors are generated.
-         *  To get out of recovery mode, the parser must successfully match
-         *  a token (after a resync).  So it will go:
-         *
-         * 		1. error occurs
-         * 		2. enter recovery mode, report error
-         * 		3. consume until token found in resynch set
-         * 		4. try to resume parsing
-         * 		5. next match() will reset errorRecovery mode
-         *
-         *  If you override, make sure to update syntaxErrors if you care about that.
-         *  </remarks>
-         */
-        public virtual void ReportError( RecognitionException e )
-        {
-            // if we've already reported an error and have not matched a token
-            // yet successfully, don't report any errors.
-            if ( state.errorRecovery )
-            {
-                //System.err.print("[SPURIOUS] ");
-                return;
-            }
-            state.syntaxErrors++; // don't count spurious
-            state.errorRecovery = true;
-
-            DisplayRecognitionError( this.TokenNames, e );
-        }
-
-        public virtual void DisplayRecognitionError( string[] tokenNames,
-                                            RecognitionException e )
-        {
-            string hdr = GetErrorHeader( e );
-            string msg = GetErrorMessage( e, tokenNames );
-            EmitErrorMessage( hdr + " " + msg );
-        }
-
-        /** <summary>What error message should be generated for the various exception types?</summary>
-         *
-         *  <remarks>
-         *  Not very object-oriented code, but I like having all error message
-         *  generation within one method rather than spread among all of the
-         *  exception classes. This also makes it much easier for the exception
-         *  handling because the exception classes do not have to have pointers back
-         *  to this object to access utility routines and so on. Also, changing
-         *  the message for an exception type would be difficult because you
-         *  would have to subclassing exception, but then somehow get ANTLR
-         *  to make those kinds of exception objects instead of the default.
-         *  This looks weird, but trust me--it makes the most sense in terms
-         *  of flexibility.
-         *
-         *  For grammar debugging, you will want to override this to add
-         *  more information such as the stack frame with
-         *  getRuleInvocationStack(e, this.getClass().getName()) and,
-         *  for no viable alts, the decision description and state etc...
-         *
-         *  Override this to change the message generated for one or more
-         *  exception types.
-         *  </remarks>
-         */
-        public virtual string GetErrorMessage( RecognitionException e, string[] tokenNames )
-        {
-            string msg = e.Message;
-            if ( e is UnwantedTokenException )
-            {
-                UnwantedTokenException ute = (UnwantedTokenException)e;
-                string tokenName = "<unknown>";
-                if ( ute.Expecting == TokenTypes.EndOfFile )
-                {
-                    tokenName = "EndOfFile";
-                }
-                else
-                {
-                    tokenName = tokenNames[ute.Expecting];
-                }
-                msg = "extraneous input " + GetTokenErrorDisplay( ute.UnexpectedToken ) +
-                    " expecting " + tokenName;
-            }
-            else if ( e is MissingTokenException )
-            {
-                MissingTokenException mte = (MissingTokenException)e;
-                string tokenName = "<unknown>";
-                if ( mte.Expecting == TokenTypes.EndOfFile )
-                {
-                    tokenName = "EndOfFile";
-                }
-                else
-                {
-                    tokenName = tokenNames[mte.Expecting];
-                }
-                msg = "missing " + tokenName + " at " + GetTokenErrorDisplay( e.Token );
-            }
-            else if ( e is MismatchedTokenException )
-            {
-                MismatchedTokenException mte = (MismatchedTokenException)e;
-                string tokenName = "<unknown>";
-                if ( mte.Expecting == TokenTypes.EndOfFile )
-                {
-                    tokenName = "EndOfFile";
-                }
-                else
-                {
-                    tokenName = tokenNames[mte.Expecting];
-                }
-                msg = "mismatched input " + GetTokenErrorDisplay( e.Token ) +
-                    " expecting " + tokenName;
-            }
-            else if ( e is MismatchedTreeNodeException )
-            {
-                MismatchedTreeNodeException mtne = (MismatchedTreeNodeException)e;
-                string tokenName = "<unknown>";
-                if ( mtne.Expecting == TokenTypes.EndOfFile )
-                {
-                    tokenName = "EndOfFile";
-                }
-                else
-                {
-                    tokenName = tokenNames[mtne.Expecting];
-                }
-                // workaround for a .NET framework bug (NullReferenceException)
-                string nodeText = ( mtne.Node != null ) ? mtne.Node.ToString() ?? string.Empty : string.Empty;
-                msg = "mismatched tree node: " + nodeText + " expecting " + tokenName;
-            }
-            else if ( e is NoViableAltException )
-            {
-                //NoViableAltException nvae = (NoViableAltException)e;
-                // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
-                // and "(decision="+nvae.decisionNumber+") and
-                // "state "+nvae.stateNumber
-                msg = "no viable alternative at input " + GetTokenErrorDisplay( e.Token );
-            }
-            else if ( e is EarlyExitException )
-            {
-                //EarlyExitException eee = (EarlyExitException)e;
-                // for development, can add "(decision="+eee.decisionNumber+")"
-                msg = "required (...)+ loop did not match anything at input " +
-                    GetTokenErrorDisplay( e.Token );
-            }
-            else if ( e is MismatchedSetException )
-            {
-                MismatchedSetException mse = (MismatchedSetException)e;
-                msg = "mismatched input " + GetTokenErrorDisplay( e.Token ) +
-                    " expecting set " + mse.Expecting;
-            }
-            else if ( e is MismatchedNotSetException )
-            {
-                MismatchedNotSetException mse = (MismatchedNotSetException)e;
-                msg = "mismatched input " + GetTokenErrorDisplay( e.Token ) +
-                    " expecting set " + mse.Expecting;
-            }
-            else if ( e is FailedPredicateException )
-            {
-                FailedPredicateException fpe = (FailedPredicateException)e;
-                msg = "rule " + fpe.RuleName + " failed predicate: {" +
-                    fpe.PredicateText + "}?";
-            }
-            return msg;
-        }
-
-        /** <summary>
-         *  Get number of recognition errors (lexer, parser, tree parser).  Each
-         *  recognizer tracks its own number.  So parser and lexer each have
-         *  separate count.  Does not count the spurious errors found between
-         *  an error and next valid token match
-         *  </summary>
-         *
-         *  <seealso cref="reportError()"/>
-         */
-        public virtual int NumberOfSyntaxErrors
-        {
-            get
-            {
-                return state.syntaxErrors;
-            }
-        }
-
-        /** <summary>What is the error header, normally line/character position information?</summary> */
-        public virtual string GetErrorHeader( RecognitionException e )
-        {
-            string prefix = SourceName ?? string.Empty;
-            if (prefix.Length > 0)
-                prefix += ' ';
-
-            return string.Format("{0}line {1}:{2}", prefix, e.Line, e.CharPositionInLine + 1);
-        }
-
-        /** <summary>
-         *  How should a token be displayed in an error message? The default
-         *  is to display just the text, but during development you might
-         *  want to have a lot of information spit out.  Override in that case
-         *  to use t.ToString() (which, for CommonToken, dumps everything about
-         *  the token). This is better than forcing you to override a method in
-         *  your token objects because you don't have to go modify your lexer
-         *  so that it creates a new Java type.
-         *  </summary>
-         */
-        public virtual string GetTokenErrorDisplay( IToken t )
-        {
-            string s = t.Text;
-            if ( s == null )
-            {
-                if ( t.Type == TokenTypes.EndOfFile )
-                {
-                    s = "<EOF>";
-                }
-                else
-                {
-                    s = "<" + t.Type + ">";
-                }
-            }
-            s = Regex.Replace( s, "\n", "\\\\n" );
-            s = Regex.Replace( s, "\r", "\\\\r" );
-            s = Regex.Replace( s, "\t", "\\\\t" );
-            return "'" + s + "'";
-        }
-
-        /** <summary>Override this method to change where error messages go</summary> */
-        public virtual void EmitErrorMessage( string msg )
-        {
-            if (TraceDestination != null)
-                TraceDestination.WriteLine( msg );
-        }
-
-        /** <summary>
-         *  Recover from an error found on the input stream.  This is
-         *  for NoViableAlt and mismatched symbol exceptions.  If you enable
-         *  single token insertion and deletion, this will usually not
-         *  handle mismatched symbol exceptions but there could be a mismatched
-         *  token that the match() routine could not recover from.
-         *  </summary>
-         */
-        public virtual void Recover( IIntStream input, RecognitionException re )
-        {
-            if ( state.lastErrorIndex == input.Index )
-            {
-                // uh oh, another error at same token index; must be a case
-                // where LT(1) is in the recovery token set so nothing is
-                // consumed; consume a single token so at least to prevent
-                // an infinite loop; this is a failsafe.
-                input.Consume();
-            }
-            state.lastErrorIndex = input.Index;
-            BitSet followSet = ComputeErrorRecoverySet();
-            BeginResync();
-            ConsumeUntil( input, followSet );
-            EndResync();
-        }
-
-        /** <summary>
-         *  A hook to listen in on the token consumption during error recovery.
-         *  The DebugParser subclasses this to fire events to the listenter.
-         *  </summary>
-         */
-        public virtual void BeginResync()
-        {
-        }
-
-        public virtual void EndResync()
-        {
-        }
-
-        /*  Compute the error recovery set for the current rule.  During
-         *  rule invocation, the parser pushes the set of tokens that can
-         *  follow that rule reference on the stack; this amounts to
-         *  computing FIRST of what follows the rule reference in the
-         *  enclosing rule. This local follow set only includes tokens
-         *  from within the rule; i.e., the FIRST computation done by
-         *  ANTLR stops at the end of a rule.
-         *
-         *  EXAMPLE
-         *
-         *  When you find a "no viable alt exception", the input is not
-         *  consistent with any of the alternatives for rule r.  The best
-         *  thing to do is to consume tokens until you see something that
-         *  can legally follow a call to r *or* any rule that called r.
-         *  You don't want the exact set of viable next tokens because the
-         *  input might just be missing a token--you might consume the
-         *  rest of the input looking for one of the missing tokens.
-         *
-         *  Consider grammar:
-         *
-         *  a : '[' b ']'
-         *    | '(' b ')'
-         *    ;
-         *  b : c '^' INT ;
-         *  c : ID
-         *    | INT
-         *    ;
-         *
-         *  At each rule invocation, the set of tokens that could follow
-         *  that rule is pushed on a stack.  Here are the various "local"
-         *  follow sets:
-         *
-         *  FOLLOW(b1_in_a) = FIRST(']') = ']'
-         *  FOLLOW(b2_in_a) = FIRST(')') = ')'
-         *  FOLLOW(c_in_b) = FIRST('^') = '^'
-         *
-         *  Upon erroneous input "[]", the call chain is
-         *
-         *  a -> b -> c
-         *
-         *  and, hence, the follow context stack is:
-         *
-         *  depth  local follow set     after call to rule
-         *    0         <EOF>                    a (from main())
-         *    1          ']'                     b
-         *    3          '^'                     c
-         *
-         *  Notice that ')' is not included, because b would have to have
-         *  been called from a different context in rule a for ')' to be
-         *  included.
-         *
-         *  For error recovery, we cannot consider FOLLOW(c)
-         *  (context-sensitive or otherwise).  We need the combined set of
-         *  all context-sensitive FOLLOW sets--the set of all tokens that
-         *  could follow any reference in the call chain.  We need to
-         *  resync to one of those tokens.  Note that FOLLOW(c)='^' and if
-         *  we resync'd to that token, we'd consume until EOF.  We need to
-         *  sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
-         *  In this case, for input "[]", LA(1) is in this set so we would
-         *  not consume anything and after printing an error rule c would
-         *  return normally.  It would not find the required '^' though.
-         *  At this point, it gets a mismatched token error and throws an
-         *  exception (since LA(1) is not in the viable following token
-         *  set).  The rule exception handler tries to recover, but finds
-         *  the same recovery set and doesn't consume anything.  Rule b
-         *  exits normally returning to rule a.  Now it finds the ']' (and
-         *  with the successful match exits errorRecovery mode).
-         *
-         *  So, you cna see that the parser walks up call chain looking
-         *  for the token that was a member of the recovery set.
-         *
-         *  Errors are not generated in errorRecovery mode.
-         *
-         *  ANTLR's error recovery mechanism is based upon original ideas:
-         *
-         *  "Algorithms + Data Structures = Programs" by Niklaus Wirth
-         *
-         *  and
-         *
-         *  "A note on error recovery in recursive descent parsers":
-         *  http://portal.acm.org/citation.cfm?id=947902.947905
-         *
-         *  Later, Josef Grosch had some good ideas:
-         *
-         *  "Efficient and Comfortable Error Recovery in Recursive Descent
-         *  Parsers":
-         *  ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
-         *
-         *  Like Grosch I implemented local FOLLOW sets that are combined
-         *  at run-time upon error to avoid overhead during parsing.
-         */
-        protected virtual BitSet ComputeErrorRecoverySet()
-        {
-            return CombineFollows( false );
-        }
-
-        /** <summary>
-         *  Compute the context-sensitive FOLLOW set for current rule.
-         *  This is set of token types that can follow a specific rule
-         *  reference given a specific call chain.  You get the set of
-         *  viable tokens that can possibly come next (lookahead depth 1)
-         *  given the current call chain.  Contrast this with the
-         *  definition of plain FOLLOW for rule r:
-         *  </summary>
-         *
-         *   FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
-         *
-         *  where x in T* and alpha, beta in V*; T is set of terminals and
-         *  V is the set of terminals and nonterminals.  In other words,
-         *  FOLLOW(r) is the set of all tokens that can possibly follow
-         *  references to r in *any* sentential form (context).  At
-         *  runtime, however, we know precisely which context applies as
-         *  we have the call chain.  We may compute the exact (rather
-         *  than covering superset) set of following tokens.
-         *
-         *  For example, consider grammar:
-         *
-         *  stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
-         *       | "return" expr '.'
-         *       ;
-         *  expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
-         *  atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
-         *       | '(' expr ')'
-         *       ;
-         *
-         *  The FOLLOW sets are all inclusive whereas context-sensitive
-         *  FOLLOW sets are precisely what could follow a rule reference.
-         *  For input input "i=(3);", here is the derivation:
-         *
-         *  stat => ID '=' expr ';'
-         *       => ID '=' atom ('+' atom)* ';'
-         *       => ID '=' '(' expr ')' ('+' atom)* ';'
-         *       => ID '=' '(' atom ')' ('+' atom)* ';'
-         *       => ID '=' '(' INT ')' ('+' atom)* ';'
-         *       => ID '=' '(' INT ')' ';'
-         *
-         *  At the "3" token, you'd have a call chain of
-         *
-         *    stat -> expr -> atom -> expr -> atom
-         *
-         *  What can follow that specific nested ref to atom?  Exactly ')'
-         *  as you can see by looking at the derivation of this specific
-         *  input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
-         *
-         *  You want the exact viable token set when recovering from a
-         *  token mismatch.  Upon token mismatch, if LA(1) is member of
-         *  the viable next token set, then you know there is most likely
-         *  a missing token in the input stream.  "Insert" one by just not
-         *  throwing an exception.
-         */
-        protected virtual BitSet ComputeContextSensitiveRuleFOLLOW()
-        {
-            return CombineFollows( true );
-        }
-
-        // what is exact? it seems to only add sets from above on stack
-        // if EOR is in set i.  When it sees a set w/o EOR, it stops adding.
-        // Why would we ever want them all?  Maybe no viable alt instead of
-        // mismatched token?
-        protected virtual BitSet CombineFollows(bool exact)
-        {
-            int top = state._fsp;
-            BitSet followSet = new BitSet();
-            for ( int i = top; i >= 0; i-- )
-            {
-                BitSet localFollowSet = (BitSet)state.following[i];
-                /*
-                System.out.println("local follow depth "+i+"="+
-                                   localFollowSet.toString(getTokenNames())+")");
-                 */
-                followSet.OrInPlace( localFollowSet );
-                if ( exact )
-                {
-                    // can we see end of rule?
-                    if ( localFollowSet.Member( TokenTypes.EndOfRule ) )
-                    {
-                        // Only leave EOR in set if at top (start rule); this lets
-                        // us know if have to include follow(start rule); i.e., EOF
-                        if ( i > 0 )
-                        {
-                            followSet.Remove( TokenTypes.EndOfRule );
-                        }
-                    }
-                    else
-                    { // can't see end of rule, quit
-                        break;
-                    }
-                }
-            }
-            return followSet;
-        }
-
-        /** <summary>Attempt to recover from a single missing or extra token.</summary>
-         *
-         *  EXTRA TOKEN
-         *
-         *  LA(1) is not what we are looking for.  If LA(2) has the right token,
-         *  however, then assume LA(1) is some extra spurious token.  Delete it
-         *  and LA(2) as if we were doing a normal match(), which advances the
-         *  input.
-         *
-         *  MISSING TOKEN
-         *
-         *  If current token is consistent with what could come after
-         *  ttype then it is ok to "insert" the missing token, else throw
-         *  exception For example, Input "i=(3;" is clearly missing the
-         *  ')'.  When the parser returns from the nested call to expr, it
-         *  will have call chain:
-         *
-         *    stat -> expr -> atom
-         *
-         *  and it will be trying to match the ')' at this point in the
-         *  derivation:
-         *
-         *       => ID '=' '(' INT ')' ('+' atom)* ';'
-         *                          ^
-         *  match() will see that ';' doesn't match ')' and report a
-         *  mismatched token error.  To recover, it sees that LA(1)==';'
-         *  is in the set of tokens that can follow the ')' token
-         *  reference in rule atom.  It can assume that you forgot the ')'.
-         */
-        protected virtual object RecoverFromMismatchedToken( IIntStream input, int ttype, BitSet follow )
-        {
-            RecognitionException e = null;
-            // if next token is what we are looking for then "delete" this token
-            if ( MismatchIsUnwantedToken( input, ttype ) )
-            {
-                e = new UnwantedTokenException( ttype, input, TokenNames );
-                /*
-                System.err.println("recoverFromMismatchedToken deleting "+
-                                   ((TokenStream)input).LT(1)+
-                                   " since "+((TokenStream)input).LT(2)+" is what we want");
-                 */
-                BeginResync();
-                input.Consume(); // simply delete extra token
-                EndResync();
-                ReportError( e );  // report after consuming so AW sees the token in the exception
-                // we want to return the token we're actually matching
-                object matchedSymbol = GetCurrentInputSymbol( input );
-                input.Consume(); // move past ttype token as if all were ok
-                return matchedSymbol;
-            }
-            // can't recover with single token deletion, try insertion
-            if ( MismatchIsMissingToken( input, follow ) )
-            {
-                object inserted = GetMissingSymbol( input, e, ttype, follow );
-                e = new MissingTokenException( ttype, input, inserted );
-                ReportError( e );  // report after inserting so AW sees the token in the exception
-                return inserted;
-            }
-            // even that didn't work; must throw the exception
-            e = new MismatchedTokenException(ttype, input, TokenNames);
-            throw e;
-        }
-
-        /** Not currently used */
-        public virtual object RecoverFromMismatchedSet( IIntStream input,
-                                               RecognitionException e,
-                                               BitSet follow )
-        {
-            if ( MismatchIsMissingToken( input, follow ) )
-            {
-                // System.out.println("missing token");
-                ReportError( e );
-                // we don't know how to conjure up a token for sets yet
-                return GetMissingSymbol( input, e, TokenTypes.Invalid, follow );
-            }
-            // TODO do single token deletion like above for Token mismatch
-            throw e;
-        }
-
-        /** <summary>
-         *  Match needs to return the current input symbol, which gets put
-         *  into the label for the associated token ref; e.g., x=ID.  Token
-         *  and tree parsers need to return different objects. Rather than test
-         *  for input stream type or change the IntStream interface, I use
-         *  a simple method to ask the recognizer to tell me what the current
-         *  input symbol is.
-         *  </summary>
-         *
-         *  <remarks>This is ignored for lexers.</remarks>
-         */
-        protected virtual object GetCurrentInputSymbol( IIntStream input )
-        {
-            return null;
-        }
-
-        /** <summary>Conjure up a missing token during error recovery.</summary>
-         *
-         *  <remarks>
-         *  The recognizer attempts to recover from single missing
-         *  symbols. But, actions might refer to that missing symbol.
-         *  For example, x=ID {f($x);}. The action clearly assumes
-         *  that there has been an identifier matched previously and that
-         *  $x points at that token. If that token is missing, but
-         *  the next token in the stream is what we want we assume that
-         *  this token is missing and we keep going. Because we
-         *  have to return some token to replace the missing token,
-         *  we have to conjure one up. This method gives the user control
-         *  over the tokens returned for missing tokens. Mostly,
-         *  you will want to create something special for identifier
-         *  tokens. For literals such as '{' and ',', the default
-         *  action in the parser or tree parser works. It simply creates
-         *  a CommonToken of the appropriate type. The text will be the token.
-         *  If you change what tokens must be created by the lexer,
-         *  override this method to create the appropriate tokens.
-         *  </remarks>
-         */
-        protected virtual object GetMissingSymbol( IIntStream input,
-                                          RecognitionException e,
-                                          int expectedTokenType,
-                                          BitSet follow )
-        {
-            return null;
-        }
-
-        public virtual void ConsumeUntil( IIntStream input, int tokenType )
-        {
-            //System.out.println("consumeUntil "+tokenType);
-            int ttype = input.LA( 1 );
-            while ( ttype != TokenTypes.EndOfFile && ttype != tokenType )
-            {
-                input.Consume();
-                ttype = input.LA( 1 );
-            }
-        }
-
-        /** <summary>Consume tokens until one matches the given token set</summary> */
-        public virtual void ConsumeUntil( IIntStream input, BitSet set )
-        {
-            //System.out.println("consumeUntil("+set.toString(getTokenNames())+")");
-            int ttype = input.LA( 1 );
-            while ( ttype != TokenTypes.EndOfFile && !set.Member( ttype ) )
-            {
-                //System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
-                input.Consume();
-                ttype = input.LA( 1 );
-            }
-        }
-
-        /** <summary>Push a rule's follow set using our own hardcoded stack</summary> */
-        protected void PushFollow( BitSet fset )
-        {
-            if ( ( state._fsp + 1 ) >= state.following.Length )
-            {
-                Array.Resize(ref state.following, state.following.Length * 2);
-            }
-            state.following[++state._fsp] = fset;
-        }
-
-        protected void PopFollow()
-        {
-            state._fsp--;
-        }
-
-        /** <summary>
-         *  Return List<String> of the rules in your parser instance
-         *  leading up to a call to this method.  You could override if
-         *  you want more details such as the file/line info of where
-         *  in the parser java code a rule is invoked.
-         *  </summary>
-         *
-         *  <remarks>
-         *  This is very useful for error messages and for context-sensitive
-         *  error recovery.
-         *  </remarks>
-         */
-        public virtual IList<string> GetRuleInvocationStack()
-        {
-            return GetRuleInvocationStack( new StackTrace(true) );
-        }
-
-        /** <summary>
-         *  A more general version of GetRuleInvocationStack where you can
-         *  pass in the StackTrace of, for example, a RecognitionException
-         *  to get it's rule stack trace.
-         *  </summary>
-         */
-        public static IList<string> GetRuleInvocationStack(StackTrace trace)
-        {
-            if (trace == null)
-                throw new ArgumentNullException("trace");
-
-            List<string> rules = new List<string>();
-            StackFrame[] stack = trace.GetFrames() ?? new StackFrame[0];
-
-            for (int i = stack.Length - 1; i >= 0; i--)
-            {
-                StackFrame frame = stack[i];
-                MethodBase method = frame.GetMethod();
-                GrammarRuleAttribute[] attributes = (GrammarRuleAttribute[])method.GetCustomAttributes(typeof(GrammarRuleAttribute), true);
-                if (attributes != null && attributes.Length > 0)
-                    rules.Add(attributes[0].Name);
-            }
-
-            return rules;
-        }
-
-        public virtual int BacktrackingLevel
-        {
-            get
-            {
-                return state.backtracking;
-            }
-            set
-            {
-                state.backtracking = value;
-            }
-        }
-
-        /** <summary>Return whether or not a backtracking attempt failed.</summary> */
-        public virtual bool Failed
-        {
-            get
-            {
-                return state.failed;
-            }
-        }
-
-        /** <summary>
-         *  Used to print out token names like ID during debugging and
-         *  error reporting.  The generated parsers implement a method
-         *  that overrides this to point to their String[] tokenNames.
-         *  </summary>
-         */
-        public virtual string[] TokenNames
-        {
-            get
-            {
-                return null;
-            }
-        }
-
-        /** <summary>
-         *  For debugging and other purposes, might want the grammar name.
-         *  Have ANTLR generate an implementation for this method.
-         *  </summary>
-         */
-        public virtual string GrammarFileName
-        {
-            get
-            {
-                return null;
-            }
-        }
-
-        public abstract string SourceName
-        {
-            get;
-        }
-
-        /** <summary>
-         *  A convenience method for use most often with template rewrites.
-         *  Convert a List<Token> to List<String>
-         *  </summary>
-         */
-        public virtual List<string> ToStrings( ICollection<IToken> tokens )
-        {
-            if ( tokens == null )
-                return null;
-
-            List<string> strings = new List<string>( tokens.Count );
-            foreach ( IToken token in tokens )
-            {
-                strings.Add( token.Text );
-            }
-
-            return strings;
-        }
-
-        /** <summary>
-         *  Given a rule number and a start token index number, return
-         *  MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
-         *  start index.  If this rule has parsed input starting from the
-         *  start index before, then return where the rule stopped parsing.
-         *  It returns the index of the last token matched by the rule.
-         *  </summary>
-         *
-         *  <remarks>
-         *  For now we use a hashtable and just the slow Object-based one.
-         *  Later, we can make a special one for ints and also one that
-         *  tosses out data after we commit past input position i.
-         *  </remarks>
-         */
-        public virtual int GetRuleMemoization( int ruleIndex, int ruleStartIndex )
-        {
-            if ( state.ruleMemo[ruleIndex] == null )
-            {
-                state.ruleMemo[ruleIndex] = new Dictionary<int, int>();
-            }
-
-            int stopIndex;
-            if ( !state.ruleMemo[ruleIndex].TryGetValue( ruleStartIndex, out stopIndex ) )
-                return MemoRuleUnknown;
-
-            return stopIndex;
-        }
-
-        /** <summary>
-         *  Has this rule already parsed input at the current index in the
-         *  input stream?  Return the stop token index or MEMO_RULE_UNKNOWN.
-         *  If we attempted but failed to parse properly before, return
-         *  MEMO_RULE_FAILED.
-         *  </summary>
-         *
-         *  <remarks>
-         *  This method has a side-effect: if we have seen this input for
-         *  this rule and successfully parsed before, then seek ahead to
-         *  1 past the stop token matched for this rule last time.
-         *  </remarks>
-         */
-        public virtual bool AlreadyParsedRule( IIntStream input, int ruleIndex )
-        {
-            int stopIndex = GetRuleMemoization( ruleIndex, input.Index );
-            if ( stopIndex == MemoRuleUnknown )
-            {
-                return false;
-            }
-            if ( stopIndex == MemoRuleFailed )
-            {
-                //System.out.println("rule "+ruleIndex+" will never succeed");
-                state.failed = true;
-            }
-            else
-            {
-                //System.out.println("seen rule "+ruleIndex+" before; skipping ahead to @"+(stopIndex+1)+" failed="+state.failed);
-                input.Seek( stopIndex + 1 ); // jump to one past stop token
-            }
-            return true;
-        }
-
-        /** <summary>
-         *  Record whether or not this rule parsed the input at this position
-         *  successfully.  Use a standard java hashtable for now.
-         *  </summary>
-         */
-        public virtual void Memoize( IIntStream input,
-                            int ruleIndex,
-                            int ruleStartIndex )
-        {
-            int stopTokenIndex = state.failed ? MemoRuleFailed : input.Index - 1;
-            if ( state.ruleMemo == null )
-            {
-                if (TraceDestination != null)
-                    TraceDestination.WriteLine( "!!!!!!!!! memo array is null for " + GrammarFileName );
-            }
-            if ( ruleIndex >= state.ruleMemo.Length )
-            {
-                if (TraceDestination != null)
-                    TraceDestination.WriteLine("!!!!!!!!! memo size is " + state.ruleMemo.Length + ", but rule index is " + ruleIndex);
-            }
-            if ( state.ruleMemo[ruleIndex] != null )
-            {
-                state.ruleMemo[ruleIndex][ruleStartIndex] = stopTokenIndex;
-            }
-        }
-
-        /** <summary>return how many rule/input-index pairs there are in total.</summary>
-         *  TODO: this includes synpreds. :(
-         */
-        public virtual int GetRuleMemoizationCacheSize()
-        {
-            int n = 0;
-            for ( int i = 0; state.ruleMemo != null && i < state.ruleMemo.Length; i++ )
-            {
-                var ruleMap = state.ruleMemo[i];
-                if ( ruleMap != null )
-                {
-                    n += ruleMap.Count; // how many input indexes are recorded?
-                }
-            }
-            return n;
-        }
-
-        public virtual void TraceIn(string ruleName, int ruleIndex, object inputSymbol)
-        {
-            if (TraceDestination == null)
-                return;
-
-            TraceDestination.Write("enter " + ruleName + " " + inputSymbol);
-            if (state.backtracking > 0)
-            {
-                TraceDestination.Write(" backtracking=" + state.backtracking);
-            }
-            TraceDestination.WriteLine();
-        }
-
-        public virtual void TraceOut(string ruleName, int ruleIndex, object inputSymbol)
-        {
-            if (TraceDestination == null)
-                return;
-
-            TraceDestination.Write("exit " + ruleName + " " + inputSymbol);
-            if (state.backtracking > 0)
-            {
-                TraceDestination.Write(" backtracking=" + state.backtracking);
-                if (state.failed)
-                    TraceDestination.Write(" failed");
-                else
-                    TraceDestination.Write(" succeeded");
-            }
-            TraceDestination.WriteLine();
-        }
-
-        #region Debugging support
-        public virtual IDebugEventListener DebugListener
-        {
-            get
-            {
-                return null;
-            }
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugEnterRule(string grammarFileName, string ruleName)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.EnterRule(grammarFileName, ruleName);
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugExitRule(string grammarFileName, string ruleName)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.ExitRule(grammarFileName, ruleName);
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugEnterSubRule(int decisionNumber)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.EnterSubRule(decisionNumber);
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugExitSubRule(int decisionNumber)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.ExitSubRule(decisionNumber);
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugEnterAlt(int alt)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.EnterAlt(alt);
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugEnterDecision(int decisionNumber, bool couldBacktrack)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.EnterDecision(decisionNumber, couldBacktrack);
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugExitDecision(int decisionNumber)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.ExitDecision(decisionNumber);
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugLocation(int line, int charPositionInLine)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.Location(line, charPositionInLine);
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugSemanticPredicate(bool result, string predicate)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.SemanticPredicate(result, predicate);
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugBeginBacktrack(int level)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.BeginBacktrack(level);
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugEndBacktrack(int level, bool successful)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.EndBacktrack(level, successful);
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugRecognitionException(RecognitionException ex)
-        {
-            IDebugEventListener dbg = DebugListener;
-            if (dbg != null)
-                dbg.RecognitionException(ex);
-        }
-        #endregion
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/CommonTokenStream.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/CommonTokenStream.cs
deleted file mode 100644
index a1e4a29..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/CommonTokenStream.cs
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime
-{
-    using System.Collections.Generic;
-
-    using InvalidOperationException = System.InvalidOperationException;
-    using StringBuilder = System.Text.StringBuilder;
-
-    /** <summary>
-     *  The most common stream of tokens is one where every token is buffered up
-     *  and tokens are prefiltered for a certain channel (the parser will only
-     *  see these tokens and cannot change the filter channel number during the
-     *  parse).
-     *  </summary>
-     *
-     *  <remarks>TODO: how to access the full token stream?  How to track all tokens matched per rule?</remarks>
-     */
-    [System.Serializable]
-    public class CommonTokenStream : BufferedTokenStream
-    {
-        /** Skip tokens on any channel but this one; this is how we skip whitespace... */
-        private int _channel;
-
-        public CommonTokenStream()
-        {
-        }
-
-        public CommonTokenStream(ITokenSource tokenSource)
-            : this(tokenSource, TokenChannels.Default)
-        {
-        }
-
-        public CommonTokenStream(ITokenSource tokenSource, int channel)
-            : base(tokenSource)
-        {
-            this._channel = channel;
-        }
-
-        public int Channel
-        {
-            get
-            {
-                return _channel;
-            }
-        }
-
-        /** Reset this token stream by setting its token source. */
-        public override ITokenSource TokenSource
-        {
-            get
-            {
-                return base.TokenSource;
-            }
-            set
-            {
-                base.TokenSource = value;
-                _channel = TokenChannels.Default;
-            }
-        }
-
-        /** Always leave p on an on-channel token. */
-        public override void Consume()
-        {
-            if (_p == -1)
-                Setup();
-            _p++;
-            _p = SkipOffTokenChannels(_p);
-        }
-
-        protected override IToken LB(int k)
-        {
-            if (k == 0 || (_p - k) < 0)
-                return null;
-
-            int i = _p;
-            int n = 1;
-            // find k good tokens looking backwards
-            while (n <= k)
-            {
-                // skip off-channel tokens
-                i = SkipOffTokenChannelsReverse(i - 1);
-                n++;
-            }
-            if (i < 0)
-                return null;
-            return _tokens[i];
-        }
-
-        public override IToken LT(int k)
-        {
-            if (_p == -1)
-                Setup();
-            if (k == 0)
-                return null;
-            if (k < 0)
-                return LB(-k);
-            int i = _p;
-            int n = 1; // we know tokens[p] is a good one
-            // find k good tokens
-            while (n < k)
-            {
-                // skip off-channel tokens
-                i = SkipOffTokenChannels(i + 1);
-                n++;
-            }
-
-            if (i > Range)
-                Range = i;
-
-            return _tokens[i];
-        }
-
-        /** Given a starting index, return the index of the first on-channel
-         *  token.
-         */
-        protected virtual int SkipOffTokenChannels(int i)
-        {
-            Sync(i);
-            while (_tokens[i].Channel != _channel)
-            {
-                // also stops at EOF (it's on channel)
-                i++;
-                Sync(i);
-            }
-            return i;
-        }
-
-        protected virtual int SkipOffTokenChannelsReverse(int i)
-        {
-            while (i >= 0 && ((IToken)_tokens[i]).Channel != _channel)
-            {
-                i--;
-            }
-
-            return i;
-        }
-
-        protected override void Setup()
-        {
-            _p = 0;
-            _p = SkipOffTokenChannels(_p);
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/DFA.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/DFA.cs
deleted file mode 100644
index 37e2d06..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/DFA.cs
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime
-{
-    using ConditionalAttribute = System.Diagnostics.ConditionalAttribute;
-    using Console = System.Console;
-    using IDebugEventListener = Antlr.Runtime.Debug.IDebugEventListener;
-
-    public delegate int SpecialStateTransitionHandler( DFA dfa, int s, IIntStream input );
-
-    /** <summary>A DFA implemented as a set of transition tables.</summary>
-     *
-     *  <remarks>
-     *  Any state that has a semantic predicate edge is special; those states
-     *  are generated with if-then-else structures in a specialStateTransition()
-     *  which is generated by cyclicDFA template.
-     *
-     *  There are at most 32767 states (16-bit signed short).
-     *  Could get away with byte sometimes but would have to generate different
-     *  types and the simulation code too.  For a point of reference, the Java
-     *  lexer's Tokens rule DFA has 326 states roughly.
-     *  </remarks>
-     */
-    public class DFA
-    {
-        protected short[] eot;
-        protected short[] eof;
-        protected char[] min;
-        protected char[] max;
-        protected short[] accept;
-        protected short[] special;
-        protected short[][] transition;
-
-        protected int decisionNumber;
-
-        /** <summary>Which recognizer encloses this DFA?  Needed to check backtracking</summary> */
-        protected BaseRecognizer recognizer;
-
-        public readonly bool debug = false;
-
-        public DFA()
-            : this( new SpecialStateTransitionHandler( SpecialStateTransitionDefault ) )
-        {
-        }
-
-        public DFA( SpecialStateTransitionHandler specialStateTransition )
-        {
-            this.SpecialStateTransition = specialStateTransition ?? new SpecialStateTransitionHandler( SpecialStateTransitionDefault );
-        }
-
-        public virtual string Description
-        {
-            get
-            {
-                return "n/a";
-            }
-        }
-
-        /** <summary>
-         *  From the input stream, predict what alternative will succeed
-         *  using this DFA (representing the covering regular approximation
-         *  to the underlying CFL).  Return an alternative number 1..n.  Throw
-         *  an exception upon error.
-         *  </summary>
-         */
-        public virtual int Predict( IIntStream input )
-        {
-            if ( debug )
-            {
-                Console.Error.WriteLine( "Enter DFA.predict for decision " + decisionNumber );
-            }
-            int mark = input.Mark(); // remember where decision started in input
-            int s = 0; // we always start at s0
-            try
-            {
-                for ( ; ; )
-                {
-                    if ( debug )
-                        Console.Error.WriteLine( "DFA " + decisionNumber + " state " + s + " LA(1)=" + (char)input.LA( 1 ) + "(" + input.LA( 1 ) +
-                                           "), index=" + input.Index );
-                    int specialState = special[s];
-                    if ( specialState >= 0 )
-                    {
-                        if ( debug )
-                        {
-                            Console.Error.WriteLine( "DFA " + decisionNumber +
-                                " state " + s + " is special state " + specialState );
-                        }
-                        s = SpecialStateTransition( this, specialState, input );
-                        if ( debug )
-                        {
-                            Console.Error.WriteLine( "DFA " + decisionNumber +
-                                " returns from special state " + specialState + " to " + s );
-                        }
-                        if ( s == -1 )
-                        {
-                            NoViableAlt( s, input );
-                            return 0;
-                        }
-                        input.Consume();
-                        continue;
-                    }
-                    if ( accept[s] >= 1 )
-                    {
-                        if ( debug )
-                            Console.Error.WriteLine( "accept; predict " + accept[s] + " from state " + s );
-                        return accept[s];
-                    }
-                    // look for a normal char transition
-                    char c = (char)input.LA( 1 ); // -1 == \uFFFF, all tokens fit in 65000 space
-                    if ( c >= min[s] && c <= max[s] )
-                    {
-                        int snext = transition[s][c - min[s]]; // move to next state
-                        if ( snext < 0 )
-                        {
-                            // was in range but not a normal transition
-                            // must check EOT, which is like the else clause.
-                            // eot[s]>=0 indicates that an EOT edge goes to another
-                            // state.
-                            if ( eot[s] >= 0 )
-                            {  // EOT Transition to accept state?
-                                if ( debug )
-                                    Console.Error.WriteLine( "EOT transition" );
-                                s = eot[s];
-                                input.Consume();
-                                // TODO: I had this as return accept[eot[s]]
-                                // which assumed here that the EOT edge always
-                                // went to an accept...faster to do this, but
-                                // what about predicated edges coming from EOT
-                                // target?
-                                continue;
-                            }
-                            NoViableAlt( s, input );
-                            return 0;
-                        }
-                        s = snext;
-                        input.Consume();
-                        continue;
-                    }
-                    if ( eot[s] >= 0 )
-                    {  // EOT Transition?
-                        if ( debug )
-                            Console.Error.WriteLine( "EOT transition" );
-                        s = eot[s];
-                        input.Consume();
-                        continue;
-                    }
-                    if ( c == unchecked( (char)TokenTypes.EndOfFile ) && eof[s] >= 0 )
-                    {  // EOF Transition to accept state?
-                        if ( debug )
-                            Console.Error.WriteLine( "accept via EOF; predict " + accept[eof[s]] + " from " + eof[s] );
-                        return accept[eof[s]];
-                    }
-                    // not in range and not EOF/EOT, must be invalid symbol
-                    if ( debug )
-                    {
-                        Console.Error.WriteLine( "min[" + s + "]=" + min[s] );
-                        Console.Error.WriteLine( "max[" + s + "]=" + max[s] );
-                        Console.Error.WriteLine( "eot[" + s + "]=" + eot[s] );
-                        Console.Error.WriteLine( "eof[" + s + "]=" + eof[s] );
-                        for ( int p = 0; p < transition[s].Length; p++ )
-                        {
-                            Console.Error.Write( transition[s][p] + " " );
-                        }
-                        Console.Error.WriteLine();
-                    }
-                    NoViableAlt( s, input );
-                    return 0;
-                }
-            }
-            finally
-            {
-                input.Rewind( mark );
-            }
-        }
-
-        protected virtual void NoViableAlt( int s, IIntStream input )
-        {
-            if ( recognizer.state.backtracking > 0 )
-            {
-                recognizer.state.failed = true;
-                return;
-            }
-            NoViableAltException nvae =
-                new NoViableAltException( Description,
-                                         decisionNumber,
-                                         s,
-                                         input );
-            Error( nvae );
-            throw nvae;
-        }
-
-        /** <summary>A hook for debugging interface</summary> */
-        public virtual void Error( NoViableAltException nvae )
-        {
-        }
-
-        public SpecialStateTransitionHandler SpecialStateTransition
-        {
-            get;
-            private set;
-        }
-        //public virtual int specialStateTransition( int s, IntStream input )
-        //{
-        //    return -1;
-        //}
-
-        static int SpecialStateTransitionDefault( DFA dfa, int s, IIntStream input )
-        {
-            return -1;
-        }
-
-        /** <summary>
-         *  Given a String that has a run-length-encoding of some unsigned shorts
-         *  like "\1\2\3\9", convert to short[] {2,9,9,9}.  We do this to avoid
-         *  static short[] which generates so much init code that the class won't
-         *  compile. :(
-         *  </summary>
-         */
-        public static short[] UnpackEncodedString( string encodedString )
-        {
-            // walk first to find how big it is.
-            int size = 0;
-            for ( int i = 0; i < encodedString.Length; i += 2 )
-            {
-                size += encodedString[i];
-            }
-            short[] data = new short[size];
-            int di = 0;
-            for ( int i = 0; i < encodedString.Length; i += 2 )
-            {
-                char n = encodedString[i];
-                char v = encodedString[i + 1];
-                // add v n times to data
-                for ( int j = 1; j <= n; j++ )
-                {
-                    data[di++] = (short)v;
-                }
-            }
-            return data;
-        }
-
-        /** <summary>Hideous duplication of code, but I need different typed arrays out :(</summary> */
-        public static char[] UnpackEncodedStringToUnsignedChars( string encodedString )
-        {
-            // walk first to find how big it is.
-            int size = 0;
-            for ( int i = 0; i < encodedString.Length; i += 2 )
-            {
-                size += encodedString[i];
-            }
-            char[] data = new char[size];
-            int di = 0;
-            for ( int i = 0; i < encodedString.Length; i += 2 )
-            {
-                char n = encodedString[i];
-                char v = encodedString[i + 1];
-                // add v n times to data
-                for ( int j = 1; j <= n; j++ )
-                {
-                    data[di++] = v;
-                }
-            }
-            return data;
-        }
-
-        [Conditional("ANTLR_DEBUG")]
-        protected virtual void DebugRecognitionException(RecognitionException ex)
-        {
-            IDebugEventListener dbg = recognizer.DebugListener;
-            if (dbg != null)
-                dbg.RecognitionException(ex);
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Lexer.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Lexer.cs
deleted file mode 100644
index cf478c6..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Lexer.cs
+++ /dev/null
@@ -1,435 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime
-{
-    using ConditionalAttribute = System.Diagnostics.ConditionalAttribute;
-
-    /** <summary>
-     *  A lexer is recognizer that draws input symbols from a character stream.
-     *  lexer grammars result in a subclass of this object. A Lexer object
-     *  uses simplified match() and error recovery mechanisms in the interest
-     *  of speed.
-     *  </summary>
-     */
-    public abstract class Lexer : BaseRecognizer, ITokenSource
-    {
-        /** <summary>Where is the lexer drawing characters from?</summary> */
-        protected ICharStream input;
-
-        public Lexer()
-        {
-        }
-
-        public Lexer( ICharStream input )
-        {
-            this.input = input;
-        }
-
-        public Lexer( ICharStream input, RecognizerSharedState state )
-            : base(state)
-        {
-            this.input = input;
-        }
-
-        #region Properties
-        public string Text
-        {
-            /** <summary>Return the text matched so far for the current token or any text override.</summary> */
-            get
-            {
-                if ( state.text != null )
-                {
-                    return state.text;
-                }
-                return input.Substring( state.tokenStartCharIndex, CharIndex - state.tokenStartCharIndex );
-            }
-            /** <summary>Set the complete text of this token; it wipes any previous changes to the text.</summary> */
-            set
-            {
-                state.text = value;
-            }
-        }
-        public int Line
-        {
-            get
-            {
-                return input.Line;
-            }
-            set
-            {
-                input.Line = value;
-            }
-        }
-        public int CharPositionInLine
-        {
-            get
-            {
-                return input.CharPositionInLine;
-            }
-            set
-            {
-                input.CharPositionInLine = value;
-            }
-        }
-        #endregion
-
-        public override void Reset()
-        {
-            base.Reset(); // reset all recognizer state variables
-            // wack Lexer state variables
-            if ( input != null )
-            {
-                input.Seek( 0 ); // rewind the input
-            }
-            if ( state == null )
-            {
-                return; // no shared state work to do
-            }
-            state.token = null;
-            state.type = TokenTypes.Invalid;
-            state.channel = TokenChannels.Default;
-            state.tokenStartCharIndex = -1;
-            state.tokenStartCharPositionInLine = -1;
-            state.tokenStartLine = -1;
-            state.text = null;
-        }
-
-        /** <summary>Return a token from this source; i.e., match a token on the char stream.</summary> */
-        public virtual IToken NextToken()
-        {
-            for ( ; ; )
-            {
-                state.token = null;
-                state.channel = TokenChannels.Default;
-                state.tokenStartCharIndex = input.Index;
-                state.tokenStartCharPositionInLine = input.CharPositionInLine;
-                state.tokenStartLine = input.Line;
-                state.text = null;
-                if ( input.LA( 1 ) == CharStreamConstants.EndOfFile )
-                {
-                    IToken eof = new CommonToken((ICharStream)input, CharStreamConstants.EndOfFile, TokenChannels.Default, input.Index, input.Index);
-                    eof.Line = Line;
-                    eof.CharPositionInLine = CharPositionInLine;
-                    return eof;
-                }
-                try
-                {
-                    ParseNextToken();
-                    if ( state.token == null )
-                    {
-                        Emit();
-                    }
-                    else if ( state.token == Tokens.Skip )
-                    {
-                        continue;
-                    }
-                    return state.token;
-                }
-                catch (MismatchedRangeException mre)
-                {
-                    ReportError(mre);
-                    // MatchRange() routine has already called recover()
-                }
-                catch (MismatchedTokenException mte)
-                {
-                    ReportError(mte);
-                    // Match() routine has already called recover()
-                }
-                catch ( RecognitionException re )
-                {
-                    ReportError( re );
-                    Recover( re ); // throw out current char and try again
-                }
-            }
-        }
-
-        /** <summary>
-         *  Instruct the lexer to skip creating a token for current lexer rule
-         *  and look for another token.  nextToken() knows to keep looking when
-         *  a lexer rule finishes with token set to SKIP_TOKEN.  Recall that
-         *  if token==null at end of any token rule, it creates one for you
-         *  and emits it.
-         *  </summary>
-         */
-        public virtual void Skip()
-        {
-            state.token = Tokens.Skip;
-        }
-
-        /** <summary>This is the lexer entry point that sets instance var 'token'</summary> */
-        public abstract void mTokens();
-
-        public virtual ICharStream CharStream
-        {
-            get
-            {
-                return input;
-            }
-            /** <summary>Set the char stream and reset the lexer</summary> */
-            set
-            {
-                input = null;
-                Reset();
-                input = value;
-            }
-        }
-
-        public override string SourceName
-        {
-            get
-            {
-                return input.SourceName;
-            }
-        }
-
-        /** <summary>
-         *  Currently does not support multiple emits per nextToken invocation
-         *  for efficiency reasons.  Subclass and override this method and
-         *  nextToken (to push tokens into a list and pull from that list rather
-         *  than a single variable as this implementation does).
-         *  </summary>
-         */
-        public virtual void Emit( IToken token )
-        {
-            state.token = token;
-        }
-
-        /** <summary>
-         *  The standard method called to automatically emit a token at the
-         *  outermost lexical rule.  The token object should point into the
-         *  char buffer start..stop.  If there is a text override in 'text',
-         *  use that to set the token's text.  Override this method to emit
-         *  custom Token objects.
-         *  </summary>
-         *
-         *  <remarks>
-         *  If you are building trees, then you should also override
-         *  Parser or TreeParser.getMissingSymbol().
-         *  </remarks>
-         */
-        public virtual IToken Emit()
-        {
-            IToken t = new CommonToken( input, state.type, state.channel, state.tokenStartCharIndex, CharIndex - 1 );
-            t.Line = state.tokenStartLine;
-            t.Text = state.text;
-            t.CharPositionInLine = state.tokenStartCharPositionInLine;
-            Emit( t );
-            return t;
-        }
-
-        public virtual void Match( string s )
-        {
-            int i = 0;
-            while ( i < s.Length )
-            {
-                if ( input.LA( 1 ) != s[i] )
-                {
-                    if ( state.backtracking > 0 )
-                    {
-                        state.failed = true;
-                        return;
-                    }
-                    MismatchedTokenException mte = new MismatchedTokenException(s[i], input, TokenNames);
-                    Recover( mte );
-                    throw mte;
-                }
-                i++;
-                input.Consume();
-                state.failed = false;
-            }
-        }
-
-        public virtual void MatchAny()
-        {
-            input.Consume();
-        }
-
-        public virtual void Match( int c )
-        {
-            if ( input.LA( 1 ) != c )
-            {
-                if ( state.backtracking > 0 )
-                {
-                    state.failed = true;
-                    return;
-                }
-                MismatchedTokenException mte = new MismatchedTokenException(c, input, TokenNames);
-                Recover( mte );  // don't really recover; just consume in lexer
-                throw mte;
-            }
-            input.Consume();
-            state.failed = false;
-        }
-
-        public virtual void MatchRange( int a, int b )
-        {
-            if ( input.LA( 1 ) < a || input.LA( 1 ) > b )
-            {
-                if ( state.backtracking > 0 )
-                {
-                    state.failed = true;
-                    return;
-                }
-                MismatchedRangeException mre = new MismatchedRangeException(a, b, input);
-                Recover( mre );
-                throw mre;
-            }
-            input.Consume();
-            state.failed = false;
-        }
-
-        /** <summary>What is the index of the current character of lookahead?</summary> */
-        public virtual int CharIndex
-        {
-            get
-            {
-                return input.Index;
-            }
-        }
-
-        public override void ReportError( RecognitionException e )
-        {
-            /** TODO: not thought about recovery in lexer yet.
-             *
-            // if we've already reported an error and have not matched a token
-            // yet successfully, don't report any errors.
-            if ( errorRecovery ) {
-                //System.err.print("[SPURIOUS] ");
-                return;
-            }
-            errorRecovery = true;
-             */
-
-            DisplayRecognitionError( this.TokenNames, e );
-        }
-
-        public override string GetErrorMessage( RecognitionException e, string[] tokenNames )
-        {
-            string msg = null;
-            if ( e is MismatchedTokenException )
-            {
-                MismatchedTokenException mte = (MismatchedTokenException)e;
-                msg = "mismatched character " + GetCharErrorDisplay( e.Character ) + " expecting " + GetCharErrorDisplay( mte.Expecting );
-            }
-            else if ( e is NoViableAltException )
-            {
-                NoViableAltException nvae = (NoViableAltException)e;
-                // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
-                // and "(decision="+nvae.decisionNumber+") and
-                // "state "+nvae.stateNumber
-                msg = "no viable alternative at character " + GetCharErrorDisplay( e.Character );
-            }
-            else if ( e is EarlyExitException )
-            {
-                EarlyExitException eee = (EarlyExitException)e;
-                // for development, can add "(decision="+eee.decisionNumber+")"
-                msg = "required (...)+ loop did not match anything at character " + GetCharErrorDisplay( e.Character );
-            }
-            else if ( e is MismatchedNotSetException )
-            {
-                MismatchedNotSetException mse = (MismatchedNotSetException)e;
-                msg = "mismatched character " + GetCharErrorDisplay( e.Character ) + " expecting set " + mse.Expecting;
-            }
-            else if ( e is MismatchedSetException )
-            {
-                MismatchedSetException mse = (MismatchedSetException)e;
-                msg = "mismatched character " + GetCharErrorDisplay( e.Character ) + " expecting set " + mse.Expecting;
-            }
-            else if ( e is MismatchedRangeException )
-            {
-                MismatchedRangeException mre = (MismatchedRangeException)e;
-                msg = "mismatched character " + GetCharErrorDisplay( e.Character ) + " expecting set " +
-                      GetCharErrorDisplay( mre.A ) + ".." + GetCharErrorDisplay( mre.B );
-            }
-            else
-            {
-                msg = base.GetErrorMessage( e, tokenNames );
-            }
-            return msg;
-        }
-
-        public virtual string GetCharErrorDisplay( int c )
-        {
-            string s = ( (char)c ).ToString();
-            switch ( c )
-            {
-            case TokenTypes.EndOfFile:
-                s = "<EOF>";
-                break;
-            case '\n':
-                s = "\\n";
-                break;
-            case '\t':
-                s = "\\t";
-                break;
-            case '\r':
-                s = "\\r";
-                break;
-            }
-            return "'" + s + "'";
-        }
-
-        /** <summary>
-         *  Lexers can normally match any char in it's vocabulary after matching
-         *  a token, so do the easy thing and just kill a character and hope
-         *  it all works out.  You can instead use the rule invocation stack
-         *  to do sophisticated error recovery if you are in a fragment rule.
-         *  </summary>
-         */
-        public virtual void Recover( RecognitionException re )
-        {
-            //System.out.println("consuming char "+(char)input.LA(1)+" during recovery");
-            //re.printStackTrace();
-            input.Consume();
-        }
-
-        [Conditional("ANTLR_TRACE")]
-        public virtual void TraceIn( string ruleName, int ruleIndex )
-        {
-            string inputSymbol = ( (char)input.LT( 1 ) ) + " line=" + Line + ":" + CharPositionInLine;
-            base.TraceIn( ruleName, ruleIndex, inputSymbol );
-        }
-
-        [Conditional("ANTLR_TRACE")]
-        public virtual void TraceOut( string ruleName, int ruleIndex )
-        {
-            string inputSymbol = ( (char)input.LT( 1 ) ) + " line=" + Line + ":" + CharPositionInLine;
-            base.TraceOut( ruleName, ruleIndex, inputSymbol );
-        }
-
-        protected virtual void ParseNextToken()
-        {
-            mTokens();
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/FastQueue.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/FastQueue.cs
deleted file mode 100644
index 2dc5bfc..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/FastQueue.cs
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime.Misc
-{
-    using System.Collections.Generic;
-    using ArgumentException = System.ArgumentException;
-    using InvalidOperationException = System.InvalidOperationException;
-
-    /** A queue that can dequeue and get(i) in O(1) and grow arbitrarily large.
-     *  A linked list is fast at dequeue but slow at get(i).  An array is
-     *  the reverse.  This is O(1) for both operations.
-     *
-     *  List grows until you dequeue last element at end of buffer. Then
-     *  it resets to start filling at 0 again.  If adds/removes are balanced, the
-     *  buffer will not grow too large.
-     *
-     *  No iterator stuff as that's not how we'll use it.
-     */
-    public class FastQueue<T>
-    {
-        /** <summary>dynamically-sized buffer of elements</summary> */
-        internal List<T> _data = new List<T>();
-        /** <summary>index of next element to fill</summary> */
-        internal int _p = 0;
-
-        public virtual int Count
-        {
-            get
-            {
-                return _data.Count - _p;
-            }
-        }
-
-        /// <summary>
-        /// How deep have we gone?
-        /// </summary>
-        public virtual int Range
-        {
-            get;
-            protected set;
-        }
-
-        /** <summary>
-         *  Return element i elements ahead of current element.  i==0 gets
-         *  current element.  This is not an absolute index into the data list
-         *  since p defines the start of the real list.
-         *  </summary>
-         */
-        public virtual T this[int i]
-        {
-            get
-            {
-                int absIndex = _p + i;
-                if (absIndex >= _data.Count)
-                    throw new ArgumentException(string.Format("queue index {0} > last index {1}", absIndex, _data.Count - 1));
-                if (absIndex < 0)
-                    throw new ArgumentException(string.Format("queue index {0} < 0", absIndex));
-
-                if (absIndex > Range)
-                    Range = absIndex;
-
-                return _data[absIndex];
-            }
-        }
-
-        /** <summary>Get and remove first element in queue</summary> */
-        public virtual T Dequeue()
-        {
-            if (Count == 0)
-                throw new InvalidOperationException();
-
-            T o = this[0];
-            _p++;
-            // have we hit end of buffer?
-            if ( _p == _data.Count )
-            {
-                // if so, it's an opportunity to start filling at index 0 again
-                Clear(); // size goes to 0, but retains memory
-            }
-            return o;
-        }
-
-        public virtual void Enqueue( T o )
-        {
-            _data.Add( o );
-        }
-
-        public virtual T Peek()
-        {
-            return this[0];
-        }
-
-        public virtual void Clear()
-        {
-            _p = 0;
-            _data.Clear();
-        }
-
-        /** <summary>Return string of current buffer contents; non-destructive</summary> */
-        public override string ToString()
-        {
-            System.Text.StringBuilder buf = new System.Text.StringBuilder();
-            int n = Count;
-            for ( int i = 0; i < n; i++ )
-            {
-                buf.Append( this[i] );
-                if ( ( i + 1 ) < n )
-                    buf.Append( " " );
-            }
-            return buf.ToString();
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/LookaheadStream.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/LookaheadStream.cs
deleted file mode 100644
index 24dc0cb..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/LookaheadStream.cs
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime.Misc
-{
-    using ArgumentException = System.ArgumentException;
-    using InvalidOperationException = System.InvalidOperationException;
-
-    /** <summary>
-     *  A lookahead queue that knows how to mark/release locations
-     *  in the buffer for backtracking purposes. Any markers force the FastQueue
-     *  superclass to keep all tokens until no more markers; then can reset
-     *  to avoid growing a huge buffer.
-     *  </summary>
-     */
-    public abstract class LookaheadStream<T>
-        : FastQueue<T>
-        where T : class
-    {
-        /** Absolute token index. It's the index of the symbol about to be
-         *  read via LT(1). Goes from 0 to numtokens.
-         */
-        private int _currentElementIndex = 0;
-
-        private T _previousElement;
-
-        /** Track object returned by nextElement upon end of stream;
-         *  Return it later when they ask for LT passed end of input.
-         */
-        T _eof = null;
-
-        /** <summary>Track the last mark() call result value for use in rewind().</summary> */
-        int _lastMarker;
-
-        /** <summary>tracks how deep mark() calls are nested</summary> */
-        int _markDepth;
-
-        public T EndOfFile
-        {
-            get
-            {
-                return _eof;
-            }
-            protected set
-            {
-                _eof = value;
-            }
-        }
-
-        public T PreviousElement
-        {
-            get
-            {
-                return _previousElement;
-            }
-        }
-
-        public override void Clear()
-        {
-            base.Clear();
-            _currentElementIndex = 0;
-            _p = 0;
-            _previousElement = null;
-        }
-
-        /** <summary>
-         *  Implement nextElement to supply a stream of elements to this
-         *  lookahead buffer.  Return eof upon end of the stream we're pulling from.
-         *  </summary>
-         */
-        public abstract T NextElement();
-
-        public abstract bool IsEndOfFile(T o);
-
-        /** <summary>Get and remove first element in queue; override FastQueue.remove()</summary> */
-        public override T Dequeue()
-        {
-            T o = this[0];
-            _p++;
-            // have we hit end of buffer and not backtracking?
-            if ( _p == _data.Count && _markDepth == 0 )
-            {
-                // if so, it's an opportunity to start filling at index 0 again
-                Clear(); // size goes to 0, but retains memory
-            }
-            return o;
-        }
-
-        /** <summary>Make sure we have at least one element to remove, even if EOF</summary> */
-        public virtual void Consume()
-        {
-            SyncAhead(1);
-            _previousElement = Dequeue();
-            _currentElementIndex++;
-        }
-
-        /** <summary>
-         *  Make sure we have 'need' elements from current position p. Last valid
-         *  p index is data.size()-1.  p+need-1 is the data index 'need' elements
-         *  ahead.  If we need 1 element, (p+1-1)==p must be &lt; data.size().
-         *  </summary>
-         */
-        protected virtual void SyncAhead( int need )
-        {
-            int n = ( _p + need - 1 ) - _data.Count + 1; // how many more elements we need?
-            if ( n > 0 )
-                Fill( n );                 // out of elements?
-        }
-
-        /** <summary>add n elements to buffer</summary> */
-        public virtual void Fill( int n )
-        {
-            for ( int i = 0; i < n; i++ )
-            {
-                T o = NextElement();
-                if ( IsEndOfFile(o) )
-                    _eof = o;
-
-                _data.Add( o );
-            }
-        }
-
-        /** <summary>Size of entire stream is unknown; we only know buffer size from FastQueue</summary> */
-        public override int Count
-        {
-            get
-            {
-                throw new System.NotSupportedException( "streams are of unknown size" );
-            }
-        }
-
-        public virtual T LT( int k )
-        {
-            if ( k == 0 )
-            {
-                return null;
-            }
-            if ( k < 0 )
-            {
-                return LB(-k);
-            }
-
-            SyncAhead( k );
-            if ((_p + k - 1) > _data.Count)
-                return _eof;
-
-            return this[k - 1];
-        }
-
-        public virtual int Index
-        {
-            get
-            {
-                return _currentElementIndex;
-            }
-        }
-
-        public virtual int Mark()
-        {
-            _markDepth++;
-            _lastMarker = _p; // track where we are in buffer, not absolute token index
-            return _lastMarker;
-        }
-
-        public virtual void Release( int marker )
-        {
-            if (_markDepth == 0)
-                throw new InvalidOperationException();
-
-            _markDepth--;
-        }
-
-        public virtual void Rewind( int marker )
-        {
-            Seek( marker );
-            Release( marker );
-        }
-
-        public virtual void Rewind()
-        {
-            Rewind( _lastMarker );
-        }
-
-        /** <summary>
-         *  Seek to a 0-indexed position within data buffer.  Can't handle
-         *  case where you seek beyond end of existing buffer.  Normally used
-         *  to seek backwards in the buffer. Does not force loading of nodes.
-         *  Doesn't see to absolute position in input stream since this stream
-         *  is unbuffered. Seeks only into our moving window of elements.
-         *  </summary>
-         */
-        public virtual void Seek( int index )
-        {
-            _p = index;
-        }
-
-        protected virtual T LB(int k)
-        {
-            if (k == 1)
-                return _previousElement;
-
-            throw new ArgumentException("can't look backwards more than one token in this stream");
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/NoViableAltException.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/NoViableAltException.cs
deleted file mode 100644
index 6478c6f..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/NoViableAltException.cs
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime
-{
-    using ArgumentNullException = System.ArgumentNullException;
-    using Exception = System.Exception;
-    using SerializationInfo = System.Runtime.Serialization.SerializationInfo;
-    using StreamingContext = System.Runtime.Serialization.StreamingContext;
-
-    [System.Serializable]
-    public class NoViableAltException : RecognitionException
-    {
-        private readonly string _grammarDecisionDescription;
-        private readonly int _decisionNumber;
-        private readonly int _stateNumber;
-
-        public NoViableAltException()
-        {
-        }
-
-        public NoViableAltException(string grammarDecisionDescription)
-        {
-            this._grammarDecisionDescription = grammarDecisionDescription;
-        }
-
-        public NoViableAltException(string message, string grammarDecisionDescription)
-            : base(message)
-        {
-            this._grammarDecisionDescription = grammarDecisionDescription;
-        }
-
-        public NoViableAltException(string message, string grammarDecisionDescription, Exception innerException)
-            : base(message, innerException)
-        {
-            this._grammarDecisionDescription = grammarDecisionDescription;
-        }
-
-        public NoViableAltException(string grammarDecisionDescription, int decisionNumber, int stateNumber, IIntStream input)
-            : base(input)
-        {
-            this._grammarDecisionDescription = grammarDecisionDescription;
-            this._decisionNumber = decisionNumber;
-            this._stateNumber = stateNumber;
-        }
-
-        public NoViableAltException(string message, string grammarDecisionDescription, int decisionNumber, int stateNumber, IIntStream input)
-            : base(message, input)
-        {
-            this._grammarDecisionDescription = grammarDecisionDescription;
-            this._decisionNumber = decisionNumber;
-            this._stateNumber = stateNumber;
-        }
-
-        public NoViableAltException(string message, string grammarDecisionDescription, int decisionNumber, int stateNumber, IIntStream input, Exception innerException)
-            : base(message, input, innerException)
-        {
-            this._grammarDecisionDescription = grammarDecisionDescription;
-            this._decisionNumber = decisionNumber;
-            this._stateNumber = stateNumber;
-        }
-
-        protected NoViableAltException(SerializationInfo info, StreamingContext context)
-            : base(info, context)
-        {
-            if (info == null)
-                throw new ArgumentNullException("info");
-
-            this._grammarDecisionDescription = info.GetString("GrammarDecisionDescription");
-            this._decisionNumber = info.GetInt32("DecisionNumber");
-            this._stateNumber = info.GetInt32("StateNumber");
-        }
-
-        public int DecisionNumber
-        {
-            get
-            {
-                return _decisionNumber;
-            }
-        }
-
-        public string GrammarDecisionDescription
-        {
-            get
-            {
-                return _grammarDecisionDescription;
-            }
-        }
-
-        public int StateNumber
-        {
-            get
-            {
-                return _stateNumber;
-            }
-        }
-
-        public override void GetObjectData(SerializationInfo info, StreamingContext context)
-        {
-            if (info == null)
-                throw new ArgumentNullException("info");
-
-            base.GetObjectData(info, context);
-            info.AddValue("GrammarDecisionDescription", _grammarDecisionDescription);
-            info.AddValue("DecisionNumber", _decisionNumber);
-            info.AddValue("StateNumber", _stateNumber);
-        }
-
-        public override string ToString()
-        {
-            if ( Input is ICharStream )
-            {
-                return "NoViableAltException('" + (char)UnexpectedType + "'@[" + GrammarDecisionDescription + "])";
-            }
-            else
-            {
-                return "NoViableAltException(" + UnexpectedType + "@[" + GrammarDecisionDescription + "])";
-            }
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Properties/AssemblyInfo.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Properties/AssemblyInfo.cs
deleted file mode 100644
index 1489fde..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Properties/AssemblyInfo.cs
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-using System;
-using System.Reflection;
-using System.Runtime.InteropServices;
-using System.Security;
-
-// General Information about an assembly is controlled through the following 
-// set of attributes. Change these attribute values to modify the information
-// associated with an assembly.
-[assembly: AssemblyTitle( "Antlr3.Runtime" )]
-[assembly: AssemblyDescription( "" )]
-[assembly: AssemblyConfiguration( "" )]
-[assembly: AssemblyCompany( "Pixel Mine, Inc." )]
-[assembly: AssemblyProduct( "Antlr3.Runtime" )]
-[assembly: AssemblyCopyright("Copyright © Sam Harwell 2011")]
-[assembly: AssemblyTrademark( "" )]
-[assembly: AssemblyCulture( "" )]
-[assembly: CLSCompliant( true )]
-[assembly: AllowPartiallyTrustedCallers]
-
-// Setting ComVisible to false makes the types in this assembly not visible 
-// to COM components.  If you need to access a type in this assembly from 
-// COM, set the ComVisible attribute to true on that type.
-[assembly: ComVisible( false )]
-
-// The following GUID is for the ID of the typelib if this project is exposed to COM
-[assembly: Guid( "7a0b4db7-f127-4cf5-ac2c-e294957efcd6" )]
-
-/* Version information for an assembly consists of four values in the following order:
- *
- *   Major.Minor.Build.Revision
- *
- * These values are updated according to the following:
- *   1. Major.Minor follows the ANTLR release schedule
- *   2. Build is incremented each time the C# port is packaged for release (regardless
- *      of whether it's an incremental or nightly). The value resets to zero whenever
- *      the Major or Minor version is incremented.
- *   3. Revision is the Perforce changelist number associated with the release.
- */
-[assembly: AssemblyVersion("3.3.4.8517")]
-[assembly: AssemblyFileVersion("3.3.4.8517")]
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/RecognitionException.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/RecognitionException.cs
deleted file mode 100644
index f0c5662..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/RecognitionException.cs
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime
-{
-    using Antlr.Runtime.Tree;
-
-    using ArgumentException = System.ArgumentException;
-    using ArgumentNullException = System.ArgumentNullException;
-    using Exception = System.Exception;
-    using SerializationInfo = System.Runtime.Serialization.SerializationInfo;
-    using StreamingContext = System.Runtime.Serialization.StreamingContext;
-
-    /** <summary>The root of the ANTLR exception hierarchy.</summary>
-     *
-     *  <remarks>
-     *  To avoid English-only error messages and to generally make things
-     *  as flexible as possible, these exceptions are not created with strings,
-     *  but rather the information necessary to generate an error.  Then
-     *  the various reporting methods in Parser and Lexer can be overridden
-     *  to generate a localized error message.  For example, MismatchedToken
-     *  exceptions are built with the expected token type.
-     *  So, don't expect getMessage() to return anything.
-     *
-     *  Note that as of Java 1.4, you can access the stack trace, which means
-     *  that you can compute the complete trace of rules from the start symbol.
-     *  This gives you considerable context information with which to generate
-     *  useful error messages.
-     *
-     *  ANTLR generates code that throws exceptions upon recognition error and
-     *  also generates code to catch these exceptions in each rule.  If you
-     *  want to quit upon first error, you can turn off the automatic error
-     *  handling mechanism using rulecatch action, but you still need to
-     *  override methods mismatch and recoverFromMismatchSet.
-     *
-     *  In general, the recognition exceptions can track where in a grammar a
-     *  problem occurred and/or what was the expected input.  While the parser
-     *  knows its state (such as current input symbol and line info) that
-     *  state can change before the exception is reported so current token index
-     *  is computed and stored at exception time.  From this info, you can
-     *  perhaps print an entire line of input not just a single token, for example.
-     *  Better to just say the recognizer had a problem and then let the parser
-     *  figure out a fancy report.
-     *  </remarks>
-     */
-    [System.Serializable]
-    public class RecognitionException : Exception
-    {
-        /** <summary>What input stream did the error occur in?</summary> */
-        private IIntStream _input;
-
-        /** <summary>What is index of token/char were we looking at when the error occurred?</summary> */
-        private int _index;
-
-        /** <summary>
-         *  The current Token when an error occurred.  Since not all streams
-         *  can retrieve the ith Token, we have to track the Token object.
-         *  For parsers.  Even when it's a tree parser, token might be set.
-         *  </summary>
-         */
-        private IToken _token;
-
-        /** <summary>
-         *  If this is a tree parser exception, node is set to the node with
-         *  the problem.
-         *  </summary>
-         */
-        private object _node;
-
-        /** <summary>The current char when an error occurred. For lexers.</summary> */
-        private int _c;
-
-        /** <summary>
-         *  Track the line (1-based) at which the error occurred in case this is
-         *  generated from a lexer.  We need to track this since the
-         *  unexpected char doesn't carry the line info.
-         *  </summary>
-         */
-        private int _line;
-
-        /// <summary>
-        /// The 0-based index into the line where the error occurred.
-        /// </summary>
-        private int _charPositionInLine;
-
-        /** <summary>
-         *  If you are parsing a tree node stream, you will encounter som
-         *  imaginary nodes w/o line/col info.  We now search backwards looking
-         *  for most recent token with line/col info, but notify getErrorHeader()
-         *  that info is approximate.
-         *  </summary>
-         */
-        private bool _approximateLineInfo;
-
-        /** <summary>Used for remote debugger deserialization</summary> */
-        public RecognitionException()
-            : this("A recognition error occurred.", null, null)
-        {
-        }
-
-        public RecognitionException( IIntStream input )
-            : this("A recognition error occurred.", input, null)
-        {
-        }
-
-        public RecognitionException(string message)
-            : this(message, null, null)
-        {
-        }
-
-        public RecognitionException(string message, IIntStream input)
-            : this(message, input, null)
-        {
-        }
-
-        public RecognitionException(string message, Exception innerException)
-            : this(message, null, innerException)
-        {
-        }
-
-        public RecognitionException(string message, IIntStream input, Exception innerException)
-            : base(message, innerException)
-        {
-            this._input = input;
-            if (input != null)
-            {
-                this._index = input.Index;
-                if (input is ITokenStream)
-                {
-                    this._token = ((ITokenStream)input).LT(1);
-                    this._line = _token.Line;
-                    this._charPositionInLine = _token.CharPositionInLine;
-                }
-
-                ITreeNodeStream tns = input as ITreeNodeStream;
-                if (tns != null)
-                {
-                    ExtractInformationFromTreeNodeStream(tns);
-                }
-                else
-                {
-                    ICharStream charStream = input as ICharStream;
-                    if (charStream != null)
-                    {
-                        this._c = input.LA(1);
-                        this._line = ((ICharStream)input).Line;
-                        this._charPositionInLine = ((ICharStream)input).CharPositionInLine;
-                    }
-                    else
-                    {
-                        this._c = input.LA(1);
-                    }
-                }
-            }
-        }
-
-        protected RecognitionException(SerializationInfo info, StreamingContext context)
-            : base(info, context)
-        {
-            if (info == null)
-                throw new ArgumentNullException("info");
-
-            _index = info.GetInt32("Index");
-            _c = info.GetInt32("C");
-            _line = info.GetInt32("Line");
-            _charPositionInLine = info.GetInt32("CharPositionInLine");
-            _approximateLineInfo = info.GetBoolean("ApproximateLineInfo");
-        }
-
-        /** <summary>Return the token type or char of the unexpected input element</summary> */
-        public virtual int UnexpectedType
-        {
-            get
-            {
-                if ( _input is ITokenStream )
-                {
-                    return _token.Type;
-                }
-
-                ITreeNodeStream treeNodeStream = _input as ITreeNodeStream;
-                if ( treeNodeStream != null )
-                {
-                    ITreeAdaptor adaptor = treeNodeStream.TreeAdaptor;
-                    return adaptor.GetType( _node );
-                }
-
-                return _c;
-            }
-        }
-
-        public bool ApproximateLineInfo
-        {
-            get
-            {
-                return _approximateLineInfo;
-            }
-            protected set
-            {
-                _approximateLineInfo = value;
-            }
-        }
-
-        public IIntStream Input
-        {
-            get
-            {
-                return _input;
-            }
-            protected set
-            {
-                _input = value;
-            }
-        }
-
-        public IToken Token
-        {
-            get
-            {
-                return _token;
-            }
-            set
-            {
-                _token = value;
-            }
-        }
-
-        public object Node
-        {
-            get
-            {
-                return _node;
-            }
-            protected set
-            {
-                _node = value;
-            }
-        }
-
-        public int Character
-        {
-            get
-            {
-                return _c;
-            }
-            protected set
-            {
-                _c = value;
-            }
-        }
-
-        public int Index
-        {
-            get
-            {
-                return _index;
-            }
-            protected set
-            {
-                _index = value;
-            }
-        }
-
-        public int Line
-        {
-            get
-            {
-                return _line;
-            }
-            set
-            {
-                _line = value;
-            }
-        }
-
-        public int CharPositionInLine
-        {
-            get
-            {
-                return _charPositionInLine;
-            }
-            set
-            {
-                _charPositionInLine = value;
-            }
-        }
-
-        public override void GetObjectData(SerializationInfo info, StreamingContext context)
-        {
-            if (info == null)
-                throw new ArgumentNullException("info");
-
-            base.GetObjectData(info, context);
-            info.AddValue("Index", _index);
-            info.AddValue("C", _c);
-            info.AddValue("Line", _line);
-            info.AddValue("CharPositionInLine", _charPositionInLine);
-            info.AddValue("ApproximateLineInfo", _approximateLineInfo);
-        }
-
-        protected virtual void ExtractInformationFromTreeNodeStream(ITreeNodeStream input)
-        {
-            this._node = input.LT(1);
-            ITokenStreamInformation streamInformation = input as ITokenStreamInformation;
-            if (streamInformation != null)
-            {
-                IToken lastToken = streamInformation.LastToken;
-                IToken lastRealToken = streamInformation.LastRealToken;
-                if (lastRealToken != null)
-                {
-                    this._token = lastRealToken;
-                    this._line = lastRealToken.Line;
-                    this._charPositionInLine = lastRealToken.CharPositionInLine;
-                    this._approximateLineInfo = lastRealToken.Equals(lastToken);
-                }
-            }
-            else
-            {
-                ITreeAdaptor adaptor = input.TreeAdaptor;
-                IToken payload = adaptor.GetToken(_node);
-                if (payload != null)
-                {
-                    this._token = payload;
-                    if (payload.Line <= 0)
-                    {
-                        // imaginary node; no line/pos info; scan backwards
-                        int i = -1;
-                        object priorNode = input.LT(i);
-                        while (priorNode != null)
-                        {
-                            IToken priorPayload = adaptor.GetToken(priorNode);
-                            if (priorPayload != null && priorPayload.Line > 0)
-                            {
-                                // we found the most recent real line / pos info
-                                this._line = priorPayload.Line;
-                                this._charPositionInLine = priorPayload.CharPositionInLine;
-                                this._approximateLineInfo = true;
-                                break;
-                            }
-                            --i;
-                            try
-                            {
-                                priorNode = input.LT(i);
-                            }
-                            catch (ArgumentException)
-                            {
-                                priorNode = null;
-                            }
-                        }
-                    }
-                    else
-                    {
-                        // node created from real token
-                        this._line = payload.Line;
-                        this._charPositionInLine = payload.CharPositionInLine;
-                    }
-                }
-                else if (this._node is Tree.ITree)
-                {
-                    this._line = ((Tree.ITree)this._node).Line;
-                    this._charPositionInLine = ((Tree.ITree)this._node).CharPositionInLine;
-                    if (this._node is CommonTree)
-                    {
-                        this._token = ((CommonTree)this._node).Token;
-                    }
-                }
-                else
-                {
-                    int type = adaptor.GetType(this._node);
-                    string text = adaptor.GetText(this._node);
-                    this._token = new CommonToken(type, text);
-                }
-            }
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BaseTree.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BaseTree.cs
deleted file mode 100644
index 79f3d97..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BaseTree.cs
+++ /dev/null
@@ -1,532 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime.Tree
-{
-    using System;
-    using System.Collections.Generic;
-
-    using StringBuilder = System.Text.StringBuilder;
-
-    /** <summary>
-     *  A generic tree implementation with no payload.  You must subclass to
-     *  actually have any user data.  ANTLR v3 uses a list of children approach
-     *  instead of the child-sibling approach in v2.  A flat tree (a list) is
-     *  an empty node whose children represent the list.  An empty, but
-     *  non-null node is called "nil".
-     *  </summary>
-     */
-    [System.Serializable]
-    [System.Diagnostics.DebuggerTypeProxy(typeof(AntlrRuntime_BaseTreeDebugView))]
-    public abstract class BaseTree : ITree
-    {
-        private IList<ITree> _children;
-
-        public BaseTree()
-        {
-        }
-
-        /** <summary>
-         *  Create a new node from an existing node does nothing for BaseTree
-         *  as there are no fields other than the children list, which cannot
-         *  be copied as the children are not considered part of this node. 
-         *  </summary>
-         */
-        public BaseTree( ITree node )
-        {
-        }
-
-        /** <summary>
-         *  Get the children internal List; note that if you directly mess with
-         *  the list, do so at your own risk.
-         *  </summary>
-         */
-        public virtual IList<ITree> Children
-        {
-            get
-            {
-                return _children;
-            }
-
-            private set
-            {
-                _children = value;
-            }
-        }
-
-        #region ITree Members
-
-        public virtual int ChildCount
-        {
-            get
-            {
-                if ( Children == null )
-                    return 0;
-
-                return Children.Count;
-            }
-        }
-
-        /** <summary>BaseTree doesn't track parent pointers.</summary> */
-        public virtual ITree Parent
-        {
-            get
-            {
-                return null;
-            }
-            set
-            {
-            }
-        }
-
-        /** <summary>BaseTree doesn't track child indexes.</summary> */
-        public virtual int ChildIndex
-        {
-            get
-            {
-                return 0;
-            }
-            set
-            {
-            }
-        }
-
-        public virtual bool IsNil
-        {
-            get
-            {
-                return false;
-            }
-        }
-
-        public abstract int TokenStartIndex
-        {
-            get;
-            set;
-        }
-
-        public abstract int TokenStopIndex
-        {
-            get;
-            set;
-        }
-
-        public abstract int Type
-        {
-            get;
-            set;
-        }
-
-        public abstract string Text
-        {
-            get;
-            set;
-        }
-
-        public virtual int Line
-        {
-            get;
-            set;
-        }
-
-        public virtual int CharPositionInLine
-        {
-            get;
-            set;
-        }
-
-        #endregion
-
-        public virtual ITree GetChild( int i )
-        {
-            if (i < 0)
-                throw new ArgumentOutOfRangeException();
-
-            if ( Children == null || i >= Children.Count )
-                return null;
-
-            return Children[i];
-        }
-
-        public virtual ITree GetFirstChildWithType( int type )
-        {
-            foreach ( ITree child in Children )
-            {
-                if ( child.Type == type )
-                    return child;
-            }
-
-            return null;
-        }
-
-        /** <summary>Add t as child of this node.</summary>
-         *
-         *  <remarks>
-         *  Warning: if t has no children, but child does
-         *  and child isNil then this routine moves children to t via
-         *  t.children = child.children; i.e., without copying the array.
-         *  </remarks>
-         */
-        public virtual void AddChild( ITree t )
-        {
-            //System.out.println("add child "+t.toStringTree()+" "+this.toStringTree());
-            //System.out.println("existing children: "+children);
-            if ( t == null )
-            {
-                return; // do nothing upon addChild(null)
-            }
-            if ( t.IsNil )
-            {
-                // t is an empty node possibly with children
-                BaseTree childTree = t as BaseTree;
-                if ( childTree != null && this.Children != null && this.Children == childTree.Children )
-                {
-                    throw new Exception( "attempt to add child list to itself" );
-                }
-                // just add all of childTree's children to this
-                if ( t.ChildCount > 0 )
-                {
-                    if ( this.Children != null || childTree == null )
-                    {
-                        if ( this.Children == null )
-                            this.Children = CreateChildrenList();
-
-                        // must copy, this has children already
-                        int n = t.ChildCount;
-                        for ( int i = 0; i < n; i++ )
-                        {
-                            ITree c = t.GetChild( i );
-                            this.Children.Add( c );
-                            // handle double-link stuff for each child of nil root
-                            c.Parent = this;
-                            c.ChildIndex = Children.Count - 1;
-                        }
-                    }
-                    else
-                    {
-                        // no children for this but t is a BaseTree with children;
-                        // just set pointer call general freshener routine
-                        this.Children = childTree.Children;
-                        this.FreshenParentAndChildIndexes();
-                    }
-                }
-            }
-            else
-            {
-                // child is not nil (don't care about children)
-                if ( Children == null )
-                {
-                    Children = CreateChildrenList(); // create children list on demand
-                }
-                Children.Add( t );
-                t.Parent = this;
-                t.ChildIndex = Children.Count - 1;
-            }
-            // System.out.println("now children are: "+children);
-        }
-
-        /** <summary>Add all elements of kids list as children of this node</summary> */
-        public virtual void AddChildren( IEnumerable<ITree> kids )
-        {
-            if (kids == null)
-                throw new ArgumentNullException("kids");
-
-            foreach ( ITree t in kids )
-                AddChild( t );
-        }
-
-        public virtual void SetChild( int i, ITree t )
-        {
-            if (i < 0)
-                throw new ArgumentOutOfRangeException("i");
-
-            if ( t == null )
-            {
-                return;
-            }
-            if ( t.IsNil )
-            {
-                throw new ArgumentException( "Can't set single child to a list" );
-            }
-            if ( Children == null )
-            {
-                Children = CreateChildrenList();
-            }
-            Children[i] = t;
-            t.Parent = this;
-            t.ChildIndex = i;
-        }
-
-        public virtual object DeleteChild( int i )
-        {
-            if (i < 0)
-                throw new ArgumentOutOfRangeException("i");
-            if (i >= ChildCount)
-                throw new ArgumentException();
-
-            if ( Children == null )
-                return null;
-
-            ITree killed = Children[i];
-            Children.RemoveAt( i );
-            // walk rest and decrement their child indexes
-            this.FreshenParentAndChildIndexes( i );
-            return killed;
-        }
-
-        /** <summary>
-         *  Delete children from start to stop and replace with t even if t is
-         *  a list (nil-root tree).  num of children can increase or decrease.
-         *  For huge child lists, inserting children can force walking rest of
-         *  children to set their childindex; could be slow.
-         *  </summary>
-         */
-        public virtual void ReplaceChildren( int startChildIndex, int stopChildIndex, object t )
-        {
-            if (startChildIndex < 0)
-                throw new ArgumentOutOfRangeException();
-            if (stopChildIndex < 0)
-                throw new ArgumentOutOfRangeException();
-            if (t == null)
-                throw new ArgumentNullException("t");
-            if (stopChildIndex < startChildIndex)
-                throw new ArgumentException();
-
-            /*
-            System.out.println("replaceChildren "+startChildIndex+", "+stopChildIndex+
-                               " with "+((BaseTree)t).toStringTree());
-            System.out.println("in="+toStringTree());
-            */
-            if ( Children == null )
-            {
-                throw new ArgumentException( "indexes invalid; no children in list" );
-            }
-            int replacingHowMany = stopChildIndex - startChildIndex + 1;
-            int replacingWithHowMany;
-            ITree newTree = (ITree)t;
-            IList<ITree> newChildren = null;
-            // normalize to a list of children to add: newChildren
-            if ( newTree.IsNil )
-            {
-                BaseTree baseTree = newTree as BaseTree;
-                if ( baseTree != null && baseTree.Children != null )
-                {
-                    newChildren = baseTree.Children;
-                }
-                else
-                {
-                    newChildren = CreateChildrenList();
-                    int n = newTree.ChildCount;
-                    for ( int i = 0; i < n; i++ )
-                        newChildren.Add( newTree.GetChild( i ) );
-                }
-            }
-            else
-            {
-                newChildren = new List<ITree>( 1 );
-                newChildren.Add( newTree );
-            }
-            replacingWithHowMany = newChildren.Count;
-            int numNewChildren = newChildren.Count;
-            int delta = replacingHowMany - replacingWithHowMany;
-            // if same number of nodes, do direct replace
-            if ( delta == 0 )
-            {
-                int j = 0; // index into new children
-                for ( int i = startChildIndex; i <= stopChildIndex; i++ )
-                {
-                    ITree child = newChildren[j];
-                    Children[i] = child;
-                    child.Parent = this;
-                    child.ChildIndex = i;
-                    j++;
-                }
-            }
-            else if ( delta > 0 )
-            {
-                // fewer new nodes than there were
-                // set children and then delete extra
-                for ( int j = 0; j < numNewChildren; j++ )
-                {
-                    Children[startChildIndex + j] = newChildren[j];
-                }
-                int indexToDelete = startChildIndex + numNewChildren;
-                for ( int c = indexToDelete; c <= stopChildIndex; c++ )
-                {
-                    // delete same index, shifting everybody down each time
-                    Children.RemoveAt( indexToDelete );
-                }
-                FreshenParentAndChildIndexes( startChildIndex );
-            }
-            else
-            {
-                // more new nodes than were there before
-                // fill in as many children as we can (replacingHowMany) w/o moving data
-                for ( int j = 0; j < replacingHowMany; j++ )
-                {
-                    Children[startChildIndex + j] = newChildren[j];
-                }
-                int numToInsert = replacingWithHowMany - replacingHowMany;
-                for ( int j = replacingHowMany; j < replacingWithHowMany; j++ )
-                {
-                    Children.Insert( startChildIndex + j, newChildren[j] );
-                }
-                FreshenParentAndChildIndexes( startChildIndex );
-            }
-            //System.out.println("out="+toStringTree());
-        }
-
-        /** <summary>Override in a subclass to change the impl of children list</summary> */
-        protected virtual IList<ITree> CreateChildrenList()
-        {
-            return new List<ITree>();
-        }
-
-        /** <summary>Set the parent and child index values for all child of t</summary> */
-        public virtual void FreshenParentAndChildIndexes()
-        {
-            FreshenParentAndChildIndexes( 0 );
-        }
-
-        public virtual void FreshenParentAndChildIndexes( int offset )
-        {
-            int n = ChildCount;
-            for ( int c = offset; c < n; c++ )
-            {
-                ITree child = GetChild( c );
-                child.ChildIndex = c;
-                child.Parent = this;
-            }
-        }
-
-        public virtual void SanityCheckParentAndChildIndexes()
-        {
-            SanityCheckParentAndChildIndexes( null, -1 );
-        }
-
-        public virtual void SanityCheckParentAndChildIndexes( ITree parent, int i )
-        {
-            if ( parent != this.Parent )
-            {
-                throw new InvalidOperationException( "parents don't match; expected " + parent + " found " + this.Parent );
-            }
-            if ( i != this.ChildIndex )
-            {
-                throw new InvalidOperationException( "child indexes don't match; expected " + i + " found " + this.ChildIndex );
-            }
-            int n = this.ChildCount;
-            for ( int c = 0; c < n; c++ )
-            {
-                BaseTree child = (BaseTree)this.GetChild( c );
-                child.SanityCheckParentAndChildIndexes( this, c );
-            }
-        }
-
-        /** <summary>Walk upwards looking for ancestor with this token type.</summary> */
-        public virtual bool HasAncestor( int ttype )
-        {
-            return GetAncestor( ttype ) != null;
-        }
-
-        /** <summary>Walk upwards and get first ancestor with this token type.</summary> */
-        public virtual ITree GetAncestor( int ttype )
-        {
-            ITree t = this;
-            t = t.Parent;
-            while ( t != null )
-            {
-                if ( t.Type == ttype )
-                    return t;
-                t = t.Parent;
-            }
-            return null;
-        }
-
-        /** <summary>
-         *  Return a list of all ancestors of this node.  The first node of
-         *  list is the root and the last is the parent of this node.
-         *  </summary>
-         */
-        public virtual IList<ITree> GetAncestors()
-        {
-            if ( Parent == null )
-                return null;
-
-            List<ITree> ancestors = new List<ITree>();
-            ITree t = this;
-            t = t.Parent;
-            while ( t != null )
-            {
-                ancestors.Insert( 0, t ); // insert at start
-                t = t.Parent;
-            }
-            return ancestors;
-        }
-
-        /** <summary>Print out a whole tree not just a node</summary> */
-        public virtual string ToStringTree()
-        {
-            if ( Children == null || Children.Count == 0 )
-            {
-                return this.ToString();
-            }
-            StringBuilder buf = new StringBuilder();
-            if ( !IsNil )
-            {
-                buf.Append( "(" );
-                buf.Append( this.ToString() );
-                buf.Append( ' ' );
-            }
-            for ( int i = 0; Children != null && i < Children.Count; i++ )
-            {
-                ITree t = Children[i];
-                if ( i > 0 )
-                {
-                    buf.Append( ' ' );
-                }
-                buf.Append( t.ToStringTree() );
-            }
-            if ( !IsNil )
-            {
-                buf.Append( ")" );
-            }
-            return buf.ToString();
-        }
-
-        /** <summary>Override to say how a node (not a tree) should look as text</summary> */
-        public override abstract string ToString();
-
-        #region Tree Members
-        public abstract ITree DupNode();
-        #endregion
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTreeNodeStream.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTreeNodeStream.cs
deleted file mode 100644
index 45c46be..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTreeNodeStream.cs
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime.Tree
-{
-    using System.Collections.Generic;
-    using Antlr.Runtime.Misc;
-
-    using StringBuilder = System.Text.StringBuilder;
-    using NotSupportedException = System.NotSupportedException;
-
-    [System.Serializable]
-    public class CommonTreeNodeStream : LookaheadStream<object>, ITreeNodeStream
-    {
-        public const int DEFAULT_INITIAL_BUFFER_SIZE = 100;
-        public const int INITIAL_CALL_STACK_SIZE = 10;
-
-        /** <summary>Pull nodes from which tree?</summary> */
-        object _root;
-
-        /** <summary>If this tree (root) was created from a token stream, track it.</summary> */
-        protected ITokenStream tokens;
-
-        /** <summary>What tree adaptor was used to build these trees</summary> */
-        [System.NonSerialized]
-        ITreeAdaptor _adaptor;
-
-        /** The tree iterator we are using */
-        TreeIterator _it;
-
-        /** <summary>Stack of indexes used for push/pop calls</summary> */
-        Stack<int> _calls;
-
-        /** <summary>Tree (nil A B C) trees like flat A B C streams</summary> */
-        bool _hasNilRoot = false;
-
-        /** <summary>Tracks tree depth.  Level=0 means we're at root node level.</summary> */
-        int _level = 0;
-
-        public CommonTreeNodeStream( object tree )
-            : this( new CommonTreeAdaptor(), tree )
-        {
-        }
-
-        public CommonTreeNodeStream( ITreeAdaptor adaptor, object tree )
-        {
-            this._root = tree;
-            this._adaptor = adaptor;
-            _it = new TreeIterator( adaptor, _root );
-        }
-
-        #region Properties
-
-        public virtual string SourceName
-        {
-            get
-            {
-                if ( TokenStream == null )
-                    return null;
-
-                return TokenStream.SourceName;
-            }
-        }
-
-        public virtual ITokenStream TokenStream
-        {
-            get
-            {
-                return tokens;
-            }
-            set
-            {
-                tokens = value;
-            }
-        }
-
-        public virtual ITreeAdaptor TreeAdaptor
-        {
-            get
-            {
-                return _adaptor;
-            }
-
-            set
-            {
-                _adaptor = value;
-            }
-        }
-
-        public virtual object TreeSource
-        {
-            get
-            {
-                return _root;
-            }
-        }
-
-        public virtual bool UniqueNavigationNodes
-        {
-            get
-            {
-                return false;
-            }
-
-            set
-            {
-            }
-        }
-
-        #endregion
-
-        public virtual void Reset()
-        {
-            base.Clear();
-            _it.Reset();
-            _hasNilRoot = false;
-            _level = 0;
-            if ( _calls != null )
-                _calls.Clear();
-        }
-
-        public override object NextElement()
-        {
-            _it.MoveNext();
-            object t = _it.Current;
-            //System.out.println("pulled "+adaptor.getType(t));
-            if ( t == _it.up )
-            {
-                _level--;
-                if ( _level == 0 && _hasNilRoot )
-                {
-                    _it.MoveNext();
-                    return _it.Current; // don't give last UP; get EOF
-                }
-            }
-            else if ( t == _it.down )
-            {
-                _level++;
-            }
-
-            if ( _level == 0 && TreeAdaptor.IsNil( t ) )
-            {
-                // if nil root, scarf nil, DOWN
-                _hasNilRoot = true;
-                _it.MoveNext();
-                t = _it.Current; // t is now DOWN, so get first real node next
-                _level++;
-                _it.MoveNext();
-                t = _it.Current;
-            }
-
-            return t;
-        }
-
-        public override bool IsEndOfFile(object o)
-        {
-            return TreeAdaptor.GetType(o) == CharStreamConstants.EndOfFile;
-        }
-
-        public virtual int LA( int i )
-        {
-            return TreeAdaptor.GetType( LT( i ) );
-        }
-
-        /** Make stream jump to a new location, saving old location.
-         *  Switch back with pop().
-         */
-        public virtual void Push( int index )
-        {
-            if ( _calls == null )
-            {
-                _calls = new Stack<int>();
-            }
-            _calls.Push( _p ); // save current index
-            Seek( index );
-        }
-
-        /** Seek back to previous index saved during last push() call.
-         *  Return top of stack (return index).
-         */
-        public virtual int Pop()
-        {
-            int ret = _calls.Pop();
-            Seek( ret );
-            return ret;
-        }
-
-        #region Tree rewrite interface
-
-        public virtual void ReplaceChildren( object parent, int startChildIndex, int stopChildIndex, object t )
-        {
-            if ( parent != null )
-            {
-                TreeAdaptor.ReplaceChildren( parent, startChildIndex, stopChildIndex, t );
-            }
-        }
-
-        #endregion
-
-        public virtual string ToString( object start, object stop )
-        {
-            // we'll have to walk from start to stop in tree; we're not keeping
-            // a complete node stream buffer
-            return "n/a";
-        }
-
-        /** <summary>For debugging; destructive: moves tree iterator to end.</summary> */
-        public virtual string ToTokenTypeString()
-        {
-            Reset();
-            StringBuilder buf = new StringBuilder();
-            object o = LT( 1 );
-            int type = TreeAdaptor.GetType( o );
-            while ( type != TokenTypes.EndOfFile )
-            {
-                buf.Append( " " );
-                buf.Append( type );
-                Consume();
-                o = LT( 1 );
-                type = TreeAdaptor.GetType( o );
-            }
-            return buf.ToString();
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeNodeStream.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeNodeStream.cs
deleted file mode 100644
index b133f39..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeNodeStream.cs
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime.Tree
-{
-    /** <summary>A stream of tree nodes, accessing nodes from a tree of some kind</summary> */
-    public interface ITreeNodeStream : IIntStream
-    {
-        /** <summary>
-         *  Get a tree node at an absolute index i; 0..n-1.
-         *  If you don't want to buffer up nodes, then this method makes no
-         *  sense for you.
-         *  </summary>
-         */
-        object this[int i]
-        {
-            get;
-        }
-
-        /** <summary>
-         *  Get tree node at current input pointer + i ahead where i=1 is next node.
-         *  i&lt;0 indicates nodes in the past.  So LT(-1) is previous node, but
-         *  implementations are not required to provide results for k &lt; -1.
-         *  LT(0) is undefined.  For i&gt;=n, return null.
-         *  Return null for LT(0) and any index that results in an absolute address
-         *  that is negative.
-         *  </summary>
-         *
-         *  <remarks>
-         *  This is analogus to the LT() method of the TokenStream, but this
-         *  returns a tree node instead of a token.  Makes code gen identical
-         *  for both parser and tree grammars. :)
-         *  </remarks>
-         */
-        object LT( int k );
-
-        /** <summary>
-         *  Where is this stream pulling nodes from?  This is not the name, but
-         *  the object that provides node objects.
-         *  </summary>
-         */
-        object TreeSource
-        {
-            get;
-        }
-
-        /** <summary>
-         *  If the tree associated with this stream was created from a TokenStream,
-         *  you can specify it here.  Used to do rule $text attribute in tree
-         *  parser.  Optional unless you use tree parser rule text attribute
-         *  or output=template and rewrite=true options.
-         *  </summary>
-         */
-        ITokenStream TokenStream
-        {
-            get;
-        }
-
-        /** <summary>
-         *  What adaptor can tell me how to interpret/navigate nodes and
-         *  trees.  E.g., get text of a node.
-         *  </summary>
-         */
-        ITreeAdaptor TreeAdaptor
-        {
-            get;
-        }
-
-        /** <summary>
-         *  As we flatten the tree, we use UP, DOWN nodes to represent
-         *  the tree structure.  When debugging we need unique nodes
-         *  so we have to instantiate new ones.  When doing normal tree
-         *  parsing, it's slow and a waste of memory to create unique
-         *  navigation nodes.  Default should be false;
-         *  </summary>
-         */
-        bool UniqueNavigationNodes
-        {
-            get;
-            set;
-        }
-
-        /** <summary>
-         *  Return the text of all nodes from start to stop, inclusive.
-         *  If the stream does not buffer all the nodes then it can still
-         *  walk recursively from start until stop.  You can always return
-         *  null or "" too, but users should not access $ruleLabel.text in
-         *  an action of course in that case.
-         *  </summary>
-         */
-        string ToString( object start, object stop );
-
-
-        #region REWRITING TREES (used by tree parser)
-
-        /** <summary>
-         *  Replace from start to stop child index of parent with t, which might
-         *  be a list.  Number of children may be different
-         *  after this call.  The stream is notified because it is walking the
-         *  tree and might need to know you are monkeying with the underlying
-         *  tree.  Also, it might be able to modify the node stream to avoid
-         *  restreaming for future phases.
-         *  </summary>
-         *
-         *  <remarks>
-         *  If parent is null, don't do anything; must be at root of overall tree.
-         *  Can't replace whatever points to the parent externally.  Do nothing.
-         *  </remarks>
-         */
-        void ReplaceChildren( object parent, int startChildIndex, int stopChildIndex, object t );
-
-        #endregion
-
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleElementStream.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleElementStream.cs
deleted file mode 100644
index 8e3d5b0..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleElementStream.cs
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime.Tree
-{
-    using System.Collections.Generic;
-    using IList = System.Collections.IList;
-
-    /** <summary>
-     *  A generic list of elements tracked in an alternative to be used in
-     *  a -> rewrite rule.  We need to subclass to fill in the next() method,
-     *  which returns either an AST node wrapped around a token payload or
-     *  an existing subtree.
-     *  </summary>
-     *
-     *  <remarks>
-     *  Once you start next()ing, do not try to add more elements.  It will
-     *  break the cursor tracking I believe.
-     *
-     *  TODO: add mechanism to detect/puke on modification after reading from stream
-     *  </remarks>
-     *
-     *  <see cref="RewriteRuleSubtreeStream"/>
-     *  <see cref="RewriteRuleTokenStream"/>
-     */
-    [System.Serializable]
-    public abstract class RewriteRuleElementStream
-    {
-        /** <summary>
-         *  Cursor 0..n-1.  If singleElement!=null, cursor is 0 until you next(),
-         *  which bumps it to 1 meaning no more elements.
-         *  </summary>
-         */
-        protected int cursor = 0;
-
-        /** <summary>Track single elements w/o creating a list.  Upon 2nd add, alloc list */
-        protected object singleElement;
-
-        /** <summary>The list of tokens or subtrees we are tracking */
-        protected IList elements;
-
-        /** <summary>Once a node / subtree has been used in a stream, it must be dup'd
-         *  from then on.  Streams are reset after subrules so that the streams
-         *  can be reused in future subrules.  So, reset must set a dirty bit.
-         *  If dirty, then next() always returns a dup.
-         *
-         *  I wanted to use "naughty bit" here, but couldn't think of a way
-         *  to use "naughty".
-         */
-        protected bool dirty = false;
-
-        /** <summary>The element or stream description; usually has name of the token or
-         *  rule reference that this list tracks.  Can include rulename too, but
-         *  the exception would track that info.
-         */
-        protected string elementDescription;
-        protected ITreeAdaptor adaptor;
-
-        public RewriteRuleElementStream( ITreeAdaptor adaptor, string elementDescription )
-        {
-            this.elementDescription = elementDescription;
-            this.adaptor = adaptor;
-        }
-
-        /** <summary>Create a stream with one element</summary> */
-        public RewriteRuleElementStream( ITreeAdaptor adaptor, string elementDescription, object oneElement )
-            : this( adaptor, elementDescription )
-        {
-            Add( oneElement );
-        }
-
-        /** <summary>Create a stream, but feed off an existing list</summary> */
-        public RewriteRuleElementStream( ITreeAdaptor adaptor, string elementDescription, IList elements )
-            : this( adaptor, elementDescription )
-        {
-            this.singleElement = null;
-            this.elements = elements;
-        }
-
-        /** <summary>
-         *  Reset the condition of this stream so that it appears we have
-         *  not consumed any of its elements.  Elements themselves are untouched.
-         *  Once we reset the stream, any future use will need duplicates.  Set
-         *  the dirty bit.
-         *  </summary>
-         */
-        public virtual void Reset()
-        {
-            cursor = 0;
-            dirty = true;
-        }
-
-        public virtual void Add( object el )
-        {
-            //System.out.println("add '"+elementDescription+"' is "+el);
-            if ( el == null )
-            {
-                return;
-            }
-            if ( elements != null )
-            { // if in list, just add
-                elements.Add( el );
-                return;
-            }
-            if ( singleElement == null )
-            { // no elements yet, track w/o list
-                singleElement = el;
-                return;
-            }
-            // adding 2nd element, move to list
-            elements = new List<object>( 5 );
-            elements.Add( singleElement );
-            singleElement = null;
-            elements.Add( el );
-        }
-
-        /** <summary>
-         *  Return the next element in the stream.  If out of elements, throw
-         *  an exception unless size()==1.  If size is 1, then return elements[0].
-         *  Return a duplicate node/subtree if stream is out of elements and
-         *  size==1.  If we've already used the element, dup (dirty bit set).
-         *  </summary>
-         */
-        public virtual object NextTree()
-        {
-            int n = Count;
-            if ( dirty || ( cursor >= n && n == 1 ) )
-            {
-                // if out of elements and size is 1, dup
-                object el = NextCore();
-                return Dup( el );
-            }
-            // test size above then fetch
-            object el2 = NextCore();
-            return el2;
-        }
-
-        /** <summary>
-         *  Do the work of getting the next element, making sure that it's
-         *  a tree node or subtree.  Deal with the optimization of single-
-         *  element list versus list of size > 1.  Throw an exception
-         *  if the stream is empty or we're out of elements and size>1.
-         *  protected so you can override in a subclass if necessary.
-         *  </summary>
-         */
-        protected virtual object NextCore()
-        {
-            int n = Count;
-            if ( n == 0 )
-            {
-                throw new RewriteEmptyStreamException( elementDescription );
-            }
-            if ( cursor >= n )
-            { // out of elements?
-                if ( n == 1 )
-                {  // if size is 1, it's ok; return and we'll dup
-                    return ToTree( singleElement );
-                }
-                // out of elements and size was not 1, so we can't dup
-                throw new RewriteCardinalityException( elementDescription );
-            }
-            // we have elements
-            if ( singleElement != null )
-            {
-                cursor++; // move cursor even for single element list
-                return ToTree( singleElement );
-            }
-            // must have more than one in list, pull from elements
-            object o = ToTree( elements[cursor] );
-            cursor++;
-            return o;
-        }
-
-        /** <summary>
-         *  When constructing trees, sometimes we need to dup a token or AST
-         * 	subtree.  Dup'ing a token means just creating another AST node
-         *  around it.  For trees, you must call the adaptor.dupTree() unless
-         *  the element is for a tree root; then it must be a node dup.
-         *  </summary>
-         */
-        protected abstract object Dup( object el );
-
-        /** <summary>
-         *  Ensure stream emits trees; tokens must be converted to AST nodes.
-         *  AST nodes can be passed through unmolested.
-         *  </summary>
-         */
-        protected virtual object ToTree( object el )
-        {
-            return el;
-        }
-
-        public virtual bool HasNext
-        {
-            get
-            {
-                return ( singleElement != null && cursor < 1 ) ||
-                      ( elements != null && cursor < elements.Count );
-            }
-        }
-
-        public virtual int Count
-        {
-            get
-            {
-                int n = 0;
-                if ( singleElement != null )
-                {
-                    n = 1;
-                }
-                if ( elements != null )
-                {
-                    return elements.Count;
-                }
-                return n;
-            }
-        }
-
-        public virtual string Description
-        {
-            get
-            {
-                return elementDescription;
-            }
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeFilter.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeFilter.cs
deleted file mode 100644
index ef7b412..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeFilter.cs
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime.Tree
-{
-    using Antlr.Runtime.Misc;
-
-    public class TreeFilter : TreeParser
-    {
-        protected ITokenStream originalTokenStream;
-        protected ITreeAdaptor originalAdaptor;
-
-        public TreeFilter( ITreeNodeStream input )
-            : this( input, new RecognizerSharedState() )
-        {
-        }
-        public TreeFilter( ITreeNodeStream input, RecognizerSharedState state )
-            : base( input, state )
-        {
-            originalAdaptor = input.TreeAdaptor;
-            originalTokenStream = input.TokenStream;
-        }
-
-        public virtual void ApplyOnce( object t, Action whichRule )
-        {
-            if ( t == null )
-                return;
-
-            try
-            {
-                // share TreeParser object but not parsing-related state
-                state = new RecognizerSharedState();
-                input = new CommonTreeNodeStream( originalAdaptor, t );
-                ( (CommonTreeNodeStream)input ).TokenStream = originalTokenStream;
-                BacktrackingLevel = 1;
-                whichRule();
-                BacktrackingLevel = 0;
-            }
-            catch ( RecognitionException )
-            {
-            }
-        }
-
-        public virtual void Downup( object t )
-        {
-            TreeVisitor v = new TreeVisitor( new CommonTreeAdaptor() );
-            Func<object, object> pre = ( o ) =>
-            {
-                ApplyOnce( o, Topdown );
-                return o;
-            };
-            Func<object, object> post = ( o ) =>
-            {
-                ApplyOnce( o, Bottomup );
-                return o;
-            };
-            v.Visit( t, pre, post );
-        }
-
-        // methods the downup strategy uses to do the up and down rules.
-        // to override, just define tree grammar rule topdown and turn on
-        // filter=true.
-        protected virtual void Topdown()
-        {
-        }
-        protected virtual void Bottomup()
-        {
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeParser.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeParser.cs
deleted file mode 100644
index 927ee23..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeParser.cs
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime.Tree
-{
-    using ConditionalAttribute = System.Diagnostics.ConditionalAttribute;
-    using Regex = System.Text.RegularExpressions.Regex;
-    using RegexOptions = System.Text.RegularExpressions.RegexOptions;
-
-    /** <summary>
-     *  A parser for a stream of tree nodes.  "tree grammars" result in a subclass
-     *  of this.  All the error reporting and recovery is shared with Parser via
-     *  the BaseRecognizer superclass.
-     *  </summary>
-    */
-    public class TreeParser : BaseRecognizer
-    {
-        public const int DOWN = TokenTypes.Down;
-        public const int UP = TokenTypes.Up;
-
-        // precompiled regex used by inContext
-        static string dotdot = ".*[^.]\\.\\.[^.].*";
-        static string doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*";
-        static Regex dotdotPattern = new Regex( dotdot, RegexOptions.Compiled );
-        static Regex doubleEtcPattern = new Regex( doubleEtc, RegexOptions.Compiled );
-
-        protected ITreeNodeStream input;
-
-        public TreeParser( ITreeNodeStream input )
-            : base() // highlight that we go to super to set state object
-        {
-            SetTreeNodeStream( input );
-        }
-
-        public TreeParser( ITreeNodeStream input, RecognizerSharedState state )
-            : base( state ) // share the state object with another parser
-        {
-            SetTreeNodeStream( input );
-        }
-
-        public override void Reset()
-        {
-            base.Reset(); // reset all recognizer state variables
-            if ( input != null )
-            {
-                input.Seek( 0 ); // rewind the input
-            }
-        }
-
-        /** <summary>Set the input stream</summary> */
-        public virtual void SetTreeNodeStream( ITreeNodeStream input )
-        {
-            this.input = input;
-        }
-
-        public virtual ITreeNodeStream GetTreeNodeStream()
-        {
-            return input;
-        }
-
-        public override string SourceName
-        {
-            get
-            {
-                return input.SourceName;
-            }
-        }
-
-        protected override object GetCurrentInputSymbol( IIntStream input )
-        {
-            return ( (ITreeNodeStream)input ).LT( 1 );
-        }
-
-        protected override object GetMissingSymbol( IIntStream input,
-                                          RecognitionException e,
-                                          int expectedTokenType,
-                                          BitSet follow )
-        {
-            string tokenText =
-                "<missing " + TokenNames[expectedTokenType] + ">";
-            ITreeAdaptor adaptor = ((ITreeNodeStream)e.Input).TreeAdaptor;
-            return adaptor.Create(new CommonToken(expectedTokenType, tokenText));
-        }
-
-        /** <summary>
-         *  Match '.' in tree parser has special meaning.  Skip node or
-         *  entire tree if node has children.  If children, scan until
-         *  corresponding UP node.
-         *  </summary>
-         */
-        public override void MatchAny( IIntStream ignore )
-        {
-            state.errorRecovery = false;
-            state.failed = false;
-            // always consume the current node
-            input.Consume();
-            // if the next node is DOWN, then the current node is a subtree:
-            // skip to corresponding UP. must count nesting level to get right UP
-            int look = input.LA( 1 );
-            if ( look == DOWN )
-            {
-                input.Consume();
-                int level = 1;
-                while ( level > 0 )
-                {
-                    switch ( input.LA( 1 ) )
-                    {
-                    case DOWN:
-                        level++;
-                        break;
-                    case UP:
-                        level--;
-                        break;
-                    case TokenTypes.EndOfFile:
-                        return;
-                    default:
-                        break;
-                    }
-                    input.Consume();
-                }
-            }
-        }
-
-        /** <summary>
-         *  We have DOWN/UP nodes in the stream that have no line info; override.
-         *  plus we want to alter the exception type.  Don't try to recover
-         *  from tree parser errors inline...
-         *  </summary>
-         */
-        protected override object RecoverFromMismatchedToken( IIntStream input, int ttype, BitSet follow )
-        {
-            throw new MismatchedTreeNodeException( ttype, (ITreeNodeStream)input );
-        }
-
-        /** <summary>
-         *  Prefix error message with the grammar name because message is
-         *  always intended for the programmer because the parser built
-         *  the input tree not the user.
-         *  </summary>
-         */
-        public override string GetErrorHeader( RecognitionException e )
-        {
-            return GrammarFileName + ": node from " +
-                   ( e.ApproximateLineInfo ? "after " : "" ) + "line " + e.Line + ":" + e.CharPositionInLine;
-        }
-
-        /** <summary>
-         *  Tree parsers parse nodes they usually have a token object as
-         *  payload. Set the exception token and do the default behavior.
-         *  </summary>
-         */
-        public override string GetErrorMessage( RecognitionException e, string[] tokenNames )
-        {
-            if ( this is TreeParser )
-            {
-                ITreeAdaptor adaptor = ( (ITreeNodeStream)e.Input ).TreeAdaptor;
-                e.Token = adaptor.GetToken( e.Node );
-                if ( e.Token == null )
-                { // could be an UP/DOWN node
-                    e.Token = new CommonToken( adaptor.GetType( e.Node ),
-                                              adaptor.GetText( e.Node ) );
-                }
-            }
-            return base.GetErrorMessage( e, tokenNames );
-        }
-
-        [Conditional("ANTLR_TRACE")]
-        public virtual void TraceIn( string ruleName, int ruleIndex )
-        {
-            base.TraceIn( ruleName, ruleIndex, input.LT( 1 ) );
-        }
-
-        [Conditional("ANTLR_TRACE")]
-        public virtual void TraceOut( string ruleName, int ruleIndex )
-        {
-            base.TraceOut( ruleName, ruleIndex, input.LT( 1 ) );
-        }
-
-    }
-}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeRewriter.cs b/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeRewriter.cs
deleted file mode 100644
index b610c2c..0000000
--- a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeRewriter.cs
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * [The "BSD licence"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-namespace Antlr.Runtime.Tree
-{
-    using Antlr.Runtime.Misc;
-
-    using Console = System.Console;
-
-    public class TreeRewriter : TreeParser
-    {
-        protected bool showTransformations;
-
-        protected ITokenStream originalTokenStream;
-        protected ITreeAdaptor originalAdaptor;
-
-        Func<IAstRuleReturnScope> topdown_func;
-        Func<IAstRuleReturnScope> bottomup_func;
-
-        public TreeRewriter( ITreeNodeStream input )
-            : this( input, new RecognizerSharedState() )
-        {
-        }
-        public TreeRewriter( ITreeNodeStream input, RecognizerSharedState state )
-            : base( input, state )
-        {
-            originalAdaptor = input.TreeAdaptor;
-            originalTokenStream = input.TokenStream;
-            topdown_func = () => Topdown();
-            bottomup_func = () => Bottomup();
-        }
-
-        public virtual object ApplyOnce( object t, Func<IAstRuleReturnScope> whichRule )
-        {
-            if ( t == null )
-                return null;
-
-            try
-            {
-                // share TreeParser object but not parsing-related state
-                state = new RecognizerSharedState();
-                input = new CommonTreeNodeStream( originalAdaptor, t );
-                ( (CommonTreeNodeStream)input ).TokenStream = originalTokenStream;
-                BacktrackingLevel = 1;
-                IAstRuleReturnScope r = whichRule();
-                BacktrackingLevel = 0;
-                if ( Failed )
-                    return t;
-
-                if (showTransformations && r != null && !t.Equals(r.Tree) && r.Tree != null)
-                    ReportTransformation(t, r.Tree);
-
-                if ( r != null && r.Tree != null )
-                    return r.Tree;
-                else
-                    return t;
-            }
-            catch ( RecognitionException )
-            {
-            }
-
-            return t;
-        }
-
-        public virtual object ApplyRepeatedly( object t, Func<IAstRuleReturnScope> whichRule )
-        {
-            bool treeChanged = true;
-            while ( treeChanged )
-            {
-                object u = ApplyOnce( t, whichRule );
-                treeChanged = !t.Equals( u );
-                t = u;
-            }
-            return t;
-        }
-
-        public virtual object Downup( object t )
-        {
-            return Downup( t, false );
-        }
-
-        public virtual object Downup( object t, bool showTransformations )
-        {
-            this.showTransformations = showTransformations;
-            TreeVisitor v = new TreeVisitor( new CommonTreeAdaptor() );
-            t = v.Visit( t, ( o ) => ApplyOnce( o, topdown_func ), ( o ) => ApplyRepeatedly( o, bottomup_func ) );
-            return t;
-        }
-
-        // methods the downup strategy uses to do the up and down rules.
-        // to override, just define tree grammar rule topdown and turn on
-        // filter=true.
-        public virtual IAstRuleReturnScope Topdown()
-        {
-            return null;
-        }
-
-        public virtual IAstRuleReturnScope Bottomup()
-        {
-            return null;
-        }
-
-        /** Override this if you need transformation tracing to go somewhere
-         *  other than stdout or if you're not using ITree-derived trees.
-         */
-        protected virtual void ReportTransformation(object oldTree, object newTree)
-        {
-            ITree old = oldTree as ITree;
-            ITree @new = newTree as ITree;
-            string oldMessage = old != null ? old.ToStringTree() : "??";
-            string newMessage = @new != null ? @new.ToStringTree() : "??";
-            Console.WriteLine("{0} -> {1}", oldMessage, newMessage);
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/Java/doxyfile b/antlr-3.4/runtime/Java/doxyfile
deleted file mode 100644
index 8eba9a0..0000000
--- a/antlr-3.4/runtime/Java/doxyfile
+++ /dev/null
@@ -1,264 +0,0 @@
-# Doxyfile 1.5.2
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-DOXYFILE_ENCODING      = UTF-8
-PROJECT_NAME           = "ANTLR API"
-PROJECT_NUMBER         = 3.4
-OUTPUT_DIRECTORY       = api
-CREATE_SUBDIRS         = NO
-OUTPUT_LANGUAGE        = English
-BRIEF_MEMBER_DESC      = YES
-REPEAT_BRIEF           = YES
-ABBREVIATE_BRIEF       = "The $name class" \
-                         "The $name widget" \
-                         "The $name file" \
-                         is \
-                         provides \
-                         specifies \
-                         contains \
-                         represents \
-                         a \
-                         an \
-                         the
-ALWAYS_DETAILED_SEC    = YES
-INLINE_INHERITED_MEMB  = NO
-FULL_PATH_NAMES        = YES
-STRIP_FROM_PATH        = /Applications/
-STRIP_FROM_INC_PATH    = 
-SHORT_NAMES            = NO
-JAVADOC_AUTOBRIEF      = NO
-MULTILINE_CPP_IS_BRIEF = NO
-DETAILS_AT_TOP         = NO
-INHERIT_DOCS           = YES
-SEPARATE_MEMBER_PAGES  = NO
-TAB_SIZE               = 8
-ALIASES                = 
-OPTIMIZE_OUTPUT_FOR_C  = NO
-OPTIMIZE_OUTPUT_JAVA   = YES
-BUILTIN_STL_SUPPORT    = NO
-CPP_CLI_SUPPORT        = NO
-DISTRIBUTE_GROUP_DOC   = NO
-SUBGROUPING            = YES
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-EXTRACT_ALL            = YES
-EXTRACT_PRIVATE        = YES
-EXTRACT_STATIC         = YES
-EXTRACT_LOCAL_CLASSES  = YES
-EXTRACT_LOCAL_METHODS  = NO
-HIDE_UNDOC_MEMBERS     = NO
-HIDE_UNDOC_CLASSES     = NO
-HIDE_FRIEND_COMPOUNDS  = NO
-HIDE_IN_BODY_DOCS      = NO
-INTERNAL_DOCS          = NO
-CASE_SENSE_NAMES       = NO
-HIDE_SCOPE_NAMES       = NO
-SHOW_INCLUDE_FILES     = YES
-INLINE_INFO            = YES
-SORT_MEMBER_DOCS       = YES
-SORT_BRIEF_DOCS        = NO
-SORT_BY_SCOPE_NAME     = NO
-GENERATE_TODOLIST      = YES
-GENERATE_TESTLIST      = NO
-GENERATE_BUGLIST       = NO
-GENERATE_DEPRECATEDLIST= NO
-ENABLED_SECTIONS       = 
-MAX_INITIALIZER_LINES  = 30
-SHOW_USED_FILES        = YES
-SHOW_DIRECTORIES       = NO
-FILE_VERSION_FILTER    = 
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-QUIET                  = NO
-WARNINGS               = YES
-WARN_IF_UNDOCUMENTED   = YES
-WARN_IF_DOC_ERROR      = YES
-WARN_NO_PARAMDOC       = NO
-WARN_FORMAT            = "$file:$line: $text"
-WARN_LOGFILE           = 
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-INPUT                  = /Users/parrt/antlr/code/antlr/main/runtime/Java/src
-INPUT_ENCODING         = UTF-8
-FILE_PATTERNS          = *.c \
-                         *.cc \
-                         *.cxx \
-                         *.cpp \
-                         *.c++ \
-                         *.d \
-                         *.java \
-                         *.ii \
-                         *.ixx \
-                         *.ipp \
-                         *.i++ \
-                         *.inl \
-                         *.h \
-                         *.hh \
-                         *.hxx \
-                         *.hpp \
-                         *.h++ \
-                         *.idl \
-                         *.odl \
-                         *.cs \
-                         *.php \
-                         *.php3 \
-                         *.inc \
-                         *.m \
-                         *.mm \
-                         *.dox \
-                         *.py
-RECURSIVE              = YES
-EXCLUDE                = 
-EXCLUDE_SYMLINKS       = NO
-EXCLUDE_PATTERNS       = 
-EXCLUDE_SYMBOLS        = java::util \
-                         java::io
-EXAMPLE_PATH           = 
-EXAMPLE_PATTERNS       = *
-EXAMPLE_RECURSIVE      = NO
-IMAGE_PATH             = 
-INPUT_FILTER           = 
-FILTER_PATTERNS        = 
-FILTER_SOURCE_FILES    = NO
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-SOURCE_BROWSER         = YES
-INLINE_SOURCES         = NO
-STRIP_CODE_COMMENTS    = YES
-REFERENCED_BY_RELATION = NO
-REFERENCES_RELATION    = NO
-REFERENCES_LINK_SOURCE = YES
-USE_HTAGS              = NO
-VERBATIM_HEADERS       = YES
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-ALPHABETICAL_INDEX     = NO
-COLS_IN_ALPHA_INDEX    = 5
-IGNORE_PREFIX          = 
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-GENERATE_HTML          = YES
-HTML_OUTPUT            = .
-HTML_FILE_EXTENSION    = .html
-HTML_HEADER            = 
-HTML_FOOTER            = 
-HTML_STYLESHEET        = 
-HTML_ALIGN_MEMBERS     = YES
-GENERATE_HTMLHELP      = NO
-CHM_FILE               = 
-HHC_LOCATION           = 
-GENERATE_CHI           = NO
-BINARY_TOC             = NO
-TOC_EXPAND             = NO
-DISABLE_INDEX          = NO
-ENUM_VALUES_PER_LINE   = 4
-GENERATE_TREEVIEW      = NO
-TREEVIEW_WIDTH         = 250
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-GENERATE_LATEX         = NO
-LATEX_OUTPUT           = latex
-LATEX_CMD_NAME         = latex
-MAKEINDEX_CMD_NAME     = makeindex
-COMPACT_LATEX          = NO
-PAPER_TYPE             = a4wide
-EXTRA_PACKAGES         = 
-LATEX_HEADER           = 
-PDF_HYPERLINKS         = NO
-USE_PDFLATEX           = YES
-LATEX_BATCHMODE        = NO
-LATEX_HIDE_INDICES     = NO
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-GENERATE_RTF           = NO
-RTF_OUTPUT             = rtf
-COMPACT_RTF            = NO
-RTF_HYPERLINKS         = NO
-RTF_STYLESHEET_FILE    = 
-RTF_EXTENSIONS_FILE    = 
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-GENERATE_MAN           = NO
-MAN_OUTPUT             = man
-MAN_EXTENSION          = .3
-MAN_LINKS              = NO
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-GENERATE_XML           = NO
-XML_OUTPUT             = xml
-XML_SCHEMA             = 
-XML_DTD                = 
-XML_PROGRAMLISTING     = YES
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-GENERATE_AUTOGEN_DEF   = NO
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-GENERATE_PERLMOD       = NO
-PERLMOD_LATEX          = NO
-PERLMOD_PRETTY         = YES
-PERLMOD_MAKEVAR_PREFIX = 
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor   
-#---------------------------------------------------------------------------
-ENABLE_PREPROCESSING   = YES
-MACRO_EXPANSION        = NO
-EXPAND_ONLY_PREDEF     = NO
-SEARCH_INCLUDES        = YES
-INCLUDE_PATH           = 
-INCLUDE_FILE_PATTERNS  = 
-PREDEFINED             = 
-EXPAND_AS_DEFINED      = 
-SKIP_FUNCTION_MACROS   = YES
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references   
-#---------------------------------------------------------------------------
-TAGFILES               = 
-GENERATE_TAGFILE       = 
-ALLEXTERNALS           = NO
-EXTERNAL_GROUPS        = YES
-PERL_PATH              = /usr/bin/perl
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool   
-#---------------------------------------------------------------------------
-CLASS_DIAGRAMS         = NO
-MSCGEN_PATH            = /Applications/Doxygen.app/Contents/Resources/
-HIDE_UNDOC_RELATIONS   = YES
-HAVE_DOT               = YES
-CLASS_GRAPH            = YES
-COLLABORATION_GRAPH    = YES
-GROUP_GRAPHS           = YES
-UML_LOOK               = NO
-TEMPLATE_RELATIONS     = NO
-INCLUDE_GRAPH          = YES
-INCLUDED_BY_GRAPH      = YES
-CALL_GRAPH             = NO
-CALLER_GRAPH           = NO
-GRAPHICAL_HIERARCHY    = YES
-DIRECTORY_GRAPH        = YES
-DOT_IMAGE_FORMAT       = png
-DOT_PATH               = /Applications/Doxygen.app/Contents/Resources/
-DOTFILE_DIRS           = 
-DOT_GRAPH_MAX_NODES    = 50
-DOT_TRANSPARENT        = NO
-DOT_MULTI_TARGETS      = NO
-GENERATE_LEGEND        = YES
-DOT_CLEANUP            = YES
-#---------------------------------------------------------------------------
-# Configuration::additions related to the search engine   
-#---------------------------------------------------------------------------
-SEARCHENGINE           = NO
diff --git a/antlr-3.4/runtime/Java/pom.xml b/antlr-3.4/runtime/Java/pom.xml
deleted file mode 100644
index 6c8b13b..0000000
--- a/antlr-3.4/runtime/Java/pom.xml
+++ /dev/null
@@ -1,96 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.antlr</groupId>
-  <artifactId>antlr-runtime</artifactId>
-  <packaging>jar</packaging>
-  
-  <!--
-    Inherit from the ANTLR master pom, which tells us what
-    version we are and allows us to inherit dependencies
-    and so on.
-    -->
-  <parent>
-      <groupId>org.antlr</groupId>
-      <artifactId>antlr-master</artifactId>
-      <version>3.4</version>
-  </parent>
-  
-
-  <name>Antlr 3.4 Runtime</name>
-
-  <description>A framework for constructing recognizers, compilers, and translators from grammatical descriptions containing Java, C#, C++, or Python actions.</description>
-  <url>http://www.antlr.org</url>
-      <developers>
-        <developer>
-            <name>Terence Parr</name>
-            <organization>USFCA</organization>
-            <organizationUrl>http://www.cs.usfca.edu</organizationUrl>
-            <email>parrt@antlr.org</email>
-            <roles>
-                <role>Project Leader</role>
-                <role>Developer - Java Target</role>
-            </roles>
-            <timezone>PST</timezone>
-        </developer>
-        <developer>
-            <name>Jim Idle</name>
-            <organization>Temporal Wave LLC</organization>
-            <organizationUrl>http://www.temporal-wave.com</organizationUrl>
-            <email>jimi@temporal-wave.com</email>
-            <roles>
-                <role>Developer - Maven stuff</role>
-                <role>Developer - C Target</role>
-            </roles>
-            <timezone>PST</timezone>
-        </developer>
-    </developers>
-    
-  <scm>
-    <url>http://fisheye2.cenqua.com/browse/antlr</url>
-    <connection>http://fisheye2.cenqua.com/browse/antlr</connection>
-  </scm>
-  
-  <dependencies>
-    
-    <dependency>
-      <groupId>org.antlr</groupId>
-      <artifactId>stringtemplate</artifactId>
-      <version>3.2.1</version>
-      <scope>compile</scope>
-    </dependency>
-    
-    
-    <dependency>
-        <groupId>antlr</groupId>
-        <artifactId>antlr</artifactId>
-        <version>2.7.7</version>
-        <scope>compile</scope>
-    </dependency>    
-
-  </dependencies>
-  
-<build>
-    <defaultGoal>install</defaultGoal>
-    <plugins>
-        <plugin>
-            <artifactId>maven-compiler-plugin</artifactId>
-            <version>2.0.2</version>
-            <configuration>
-                <source>1.6</source>
-                <target>jsr14</target>
-            </configuration>
-        </plugin>
-        <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>findbugs-maven-plugin</artifactId>
-            <version>2.3.2</version>
-            <configuration>
-                <findbugsXmlOutput>true</findbugsXmlOutput>
-                <findbugsXmlWithMessages>true</findbugsXmlWithMessages>
-                <xmlOutput>true</xmlOutput>
-            </configuration>
-        </plugin>
-    </plugins>
-</build>
-</project>
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ANTLRFileStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ANTLRFileStream.java
deleted file mode 100644
index 27ef58b..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ANTLRFileStream.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import java.io.*;
-
-/** This is a char buffer stream that is loaded from a file
- *  all at once when you construct the object.  This looks very
- *  much like an ANTLReader or ANTLRInputStream, but it's a special case
- *  since we know the exact size of the object to load.  We can avoid lots
- *  of data copying. 
- */
-public class ANTLRFileStream extends ANTLRStringStream {
-	protected String fileName;
-
-	public ANTLRFileStream(String fileName) throws IOException {
-		this(fileName, null);
-	}
-
-	public ANTLRFileStream(String fileName, String encoding) throws IOException {
-		this.fileName = fileName;
-		load(fileName, encoding);
-	}
-
-	public void load(String fileName, String encoding)
-		throws IOException
-	{
-		if ( fileName==null ) {
-			return;
-		}
-		File f = new File(fileName);
-		int size = (int)f.length();
-		InputStreamReader isr;
-		FileInputStream fis = new FileInputStream(fileName);
-		if ( encoding!=null ) {
-			isr = new InputStreamReader(fis, encoding);
-		}
-		else {
-			isr = new InputStreamReader(fis);
-		}
-		try {
-			data = new char[size];
-			super.n = isr.read(data);
-		}
-		finally {
-			isr.close();
-		}
-	}
-
-	public String getSourceName() {
-		return fileName;
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ANTLRReaderStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ANTLRReaderStream.java
deleted file mode 100644
index 24d51ad..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ANTLRReaderStream.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import java.io.*;
-
-/** Vacuum all input from a Reader and then treat it like a StringStream.
- *  Manage the buffer manually to avoid unnecessary data copying.
- *
- *  If you need encoding, use ANTLRInputStream.
- */
-public class ANTLRReaderStream extends ANTLRStringStream {
-	public static final int READ_BUFFER_SIZE = 1024;
-	public static final int INITIAL_BUFFER_SIZE = 1024;
-
-	public ANTLRReaderStream() {
-	}
-
-	public ANTLRReaderStream(Reader r) throws IOException {
-		this(r, INITIAL_BUFFER_SIZE, READ_BUFFER_SIZE);
-	}
-
-	public ANTLRReaderStream(Reader r, int size) throws IOException {
-		this(r, size, READ_BUFFER_SIZE);
-	}
-
-	public ANTLRReaderStream(Reader r, int size, int readChunkSize) throws IOException {
-		load(r, size, readChunkSize);
-	}
-
-	public void load(Reader r, int size, int readChunkSize)
-		throws IOException
-	{
-		if ( r==null ) {
-			return;
-		}
-		if ( size<=0 ) {
-			size = INITIAL_BUFFER_SIZE;
-		}
-		if ( readChunkSize<=0 ) {
-			readChunkSize = READ_BUFFER_SIZE;
-		}
-		// System.out.println("load "+size+" in chunks of "+readChunkSize);
-		try {
-			// alloc initial buffer size.
-			data = new char[size];
-			// read all the data in chunks of readChunkSize
-			int numRead=0;
-			int p = 0;
-			do {
-				if ( p+readChunkSize > data.length ) { // overflow?
-					// System.out.println("### overflow p="+p+", data.length="+data.length);
-					char[] newdata = new char[data.length*2]; // resize
-					System.arraycopy(data, 0, newdata, 0, data.length);
-					data = newdata;
-				}
-				numRead = r.read(data, p, readChunkSize);
-				// System.out.println("read "+numRead+" chars; p was "+p+" is now "+(p+numRead));
-				p += numRead;
-			} while (numRead!=-1); // while not EOF
-			// set the actual size of the data available;
-			// EOF subtracted one above in p+=numRead; add one back
-			super.n = p+1;
-			//System.out.println("n="+n);
-		}
-		finally {
-			r.close();
-		}
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ANTLRStringStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ANTLRStringStream.java
deleted file mode 100644
index 17af23c..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ANTLRStringStream.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/** A pretty quick CharStream that pulls all data from an array
- *  directly.  Every method call counts in the lexer.  Java's
- *  strings aren't very good so I'm avoiding.
- */
-public class ANTLRStringStream implements CharStream {
-	/** The data being scanned */
-	protected char[] data;
-
-	/** How many characters are actually in the buffer */
-	protected int n;
-
-	/** 0..n-1 index into string of next char */
-	protected int p=0;
-
-	/** line number 1..n within the input */
-	protected int line = 1;
-
-	/** The index of the character relative to the beginning of the line 0..n-1 */
-	protected int charPositionInLine = 0;
-
-	/** tracks how deep mark() calls are nested */
-	protected int markDepth = 0;
-
-	/** A list of CharStreamState objects that tracks the stream state
-	 *  values line, charPositionInLine, and p that can change as you
-	 *  move through the input stream.  Indexed from 1..markDepth.
-     *  A null is kept @ index 0.  Create upon first call to mark().
-	 */
-	protected List markers;
-
-	/** Track the last mark() call result value for use in rewind(). */
-	protected int lastMarker;
-
-	/** What is name or source of this char stream? */
-	public String name;
-
-	public ANTLRStringStream() {
-	}
-
-	/** Copy data in string to a local char array */
-	public ANTLRStringStream(String input) {
-		this();
-		this.data = input.toCharArray();
-		this.n = input.length();
-	}
-
-	/** This is the preferred constructor as no data is copied */
-	public ANTLRStringStream(char[] data, int numberOfActualCharsInArray) {
-		this();
-		this.data = data;
-		this.n = numberOfActualCharsInArray;
-	}
-
-	/** Reset the stream so that it's in the same state it was
-	 *  when the object was created *except* the data array is not
-	 *  touched.
-	 */
-	public void reset() {
-		p = 0;
-		line = 1;
-		charPositionInLine = 0;
-		markDepth = 0;
-	}
-
-    public void consume() {
-		//System.out.println("prev p="+p+", c="+(char)data[p]);
-        if ( p < n ) {
-			charPositionInLine++;
-			if ( data[p]=='\n' ) {
-				/*
-				System.out.println("newline char found on line: "+line+
-								   "@ pos="+charPositionInLine);
-				*/
-				line++;
-				charPositionInLine=0;
-			}
-            p++;
-			//System.out.println("p moves to "+p+" (c='"+(char)data[p]+"')");
-        }
-    }
-
-    public int LA(int i) {
-		if ( i==0 ) {
-			return 0; // undefined
-		}
-		if ( i<0 ) {
-			i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
-			if ( (p+i-1) < 0 ) {
-				return CharStream.EOF; // invalid; no char before first char
-			}
-		}
-
-		if ( (p+i-1) >= n ) {
-            //System.out.println("char LA("+i+")=EOF; p="+p);
-            return CharStream.EOF;
-        }
-        //System.out.println("char LA("+i+")="+(char)data[p+i-1]+"; p="+p);
-		//System.out.println("LA("+i+"); p="+p+" n="+n+" data.length="+data.length);
-		return data[p+i-1];
-    }
-
-	public int LT(int i) {
-		return LA(i);
-	}
-
-	/** Return the current input symbol index 0..n where n indicates the
-     *  last symbol has been read.  The index is the index of char to
-	 *  be returned from LA(1).
-     */
-    public int index() {
-        return p;
-    }
-
-	public int size() {
-		return n;
-	}
-
-	public int mark() {
-        if ( markers==null ) {
-            markers = new ArrayList();
-            markers.add(null); // depth 0 means no backtracking, leave blank
-        }
-        markDepth++;
-		CharStreamState state = null;
-		if ( markDepth>=markers.size() ) {
-			state = new CharStreamState();
-			markers.add(state);
-		}
-		else {
-			state = (CharStreamState)markers.get(markDepth);
-		}
-		state.p = p;
-		state.line = line;
-		state.charPositionInLine = charPositionInLine;
-		lastMarker = markDepth;
-		return markDepth;
-    }
-
-    public void rewind(int m) {
-		CharStreamState state = (CharStreamState)markers.get(m);
-		// restore stream state
-		seek(state.p);
-		line = state.line;
-		charPositionInLine = state.charPositionInLine;
-		release(m);
-	}
-
-	public void rewind() {
-		rewind(lastMarker);
-	}
-
-	public void release(int marker) {
-		// unwind any other markers made after m and release m
-		markDepth = marker;
-		// release this marker
-		markDepth--;
-	}
-
-	/** consume() ahead until p==index; can't just set p=index as we must
-	 *  update line and charPositionInLine.
-	 */
-	public void seek(int index) {
-		if ( index<=p ) {
-			p = index; // just jump; don't update stream state (line, ...)
-			return;
-		}
-		// seek forward, consume until p hits index
-		while ( p<index ) {
-			consume();
-		}
-	}
-
-	public String substring(int start, int stop) {
-		return new String(data,start,stop-start+1);
-	}
-
-	public int getLine() {
-		return line;
-	}
-
-	public int getCharPositionInLine() {
-		return charPositionInLine;
-	}
-
-	public void setLine(int line) {
-		this.line = line;
-	}
-
-	public void setCharPositionInLine(int pos) {
-		this.charPositionInLine = pos;
-	}
-
-	public String getSourceName() {
-		return name;
-	}
-
-    public String toString() { return new String(data); }
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/BaseRecognizer.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/BaseRecognizer.java
deleted file mode 100644
index 667664d..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/BaseRecognizer.java
+++ /dev/null
@@ -1,886 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/** A generic recognizer that can handle recognizers generated from
- *  lexer, parser, and tree grammars.  This is all the parsing
- *  support code essentially; most of it is error recovery stuff and
- *  backtracking.
- */
-public abstract class BaseRecognizer {
-	public static final int MEMO_RULE_FAILED = -2;
-	public static final int MEMO_RULE_UNKNOWN = -1;
-	public static final int INITIAL_FOLLOW_STACK_SIZE = 100;
-
-	// copies from Token object for convenience in actions
-	public static final int DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL;
-	public static final int HIDDEN = Token.HIDDEN_CHANNEL;
-
-	public static final String NEXT_TOKEN_RULE_NAME = "nextToken";
-
-	/** State of a lexer, parser, or tree parser are collected into a state
-	 *  object so the state can be shared.  This sharing is needed to
-	 *  have one grammar import others and share same error variables
-	 *  and other state variables.  It's a kind of explicit multiple
-	 *  inheritance via delegation of methods and shared state.
-	 */
-	protected RecognizerSharedState state;
-
-	public BaseRecognizer() {
-		state = new RecognizerSharedState();
-	}
-
-	public BaseRecognizer(RecognizerSharedState state) {
-		if ( state==null ) {
-			state = new RecognizerSharedState();
-		}
-		this.state = state;
-	}
-
-	/** reset the parser's state; subclasses must rewinds the input stream */
-	public void reset() {
-		// wack everything related to error recovery
-		if ( state==null ) {
-			return; // no shared state work to do
-		}
-		state._fsp = -1;
-		state.errorRecovery = false;
-		state.lastErrorIndex = -1;
-		state.failed = false;
-		state.syntaxErrors = 0;
-		// wack everything related to backtracking and memoization
-		state.backtracking = 0;
-		for (int i = 0; state.ruleMemo!=null && i < state.ruleMemo.length; i++) { // wipe cache
-			state.ruleMemo[i] = null;
-		}
-	}
-
-
-	/** Match current input symbol against ttype.  Attempt
-	 *  single token insertion or deletion error recovery.  If
-	 *  that fails, throw MismatchedTokenException.
-	 *
-	 *  To turn off single token insertion or deletion error
-	 *  recovery, override recoverFromMismatchedToken() and have it
-     *  throw an exception. See TreeParser.recoverFromMismatchedToken().
-     *  This way any error in a rule will cause an exception and
-     *  immediate exit from rule.  Rule would recover by resynchronizing
-     *  to the set of symbols that can follow rule ref.
-	 */
-	public Object match(IntStream input, int ttype, BitSet follow)
-		throws RecognitionException
-	{
-		//System.out.println("match "+((TokenStream)input).LT(1));
-		Object matchedSymbol = getCurrentInputSymbol(input);
-		if ( input.LA(1)==ttype ) {
-			input.consume();
-			state.errorRecovery = false;
-			state.failed = false;
-			return matchedSymbol;
-		}
-		if ( state.backtracking>0 ) {
-			state.failed = true;
-			return matchedSymbol;
-		}
-		matchedSymbol = recoverFromMismatchedToken(input, ttype, follow);
-		return matchedSymbol;
-	}
-
-	/** Match the wildcard: in a symbol */
-	public void matchAny(IntStream input) {
-		state.errorRecovery = false;
-		state.failed = false;
-		input.consume();
-	}
-
-	public boolean mismatchIsUnwantedToken(IntStream input, int ttype) {
-		return input.LA(2)==ttype;
-	}
-
-	public boolean mismatchIsMissingToken(IntStream input, BitSet follow) {
-		if ( follow==null ) {
-			// we have no information about the follow; we can only consume
-			// a single token and hope for the best
-			return false;
-		}
-		// compute what can follow this grammar element reference
-		if ( follow.member(Token.EOR_TOKEN_TYPE) ) {
-			BitSet viableTokensFollowingThisRule = computeContextSensitiveRuleFOLLOW();
-			follow = follow.or(viableTokensFollowingThisRule);
-            if ( state._fsp>=0 ) { // remove EOR if we're not the start symbol
-                follow.remove(Token.EOR_TOKEN_TYPE);
-            }
-		}
-		// if current token is consistent with what could come after set
-		// then we know we're missing a token; error recovery is free to
-		// "insert" the missing token
-
-		//System.out.println("viable tokens="+follow.toString(getTokenNames()));
-		//System.out.println("LT(1)="+((TokenStream)input).LT(1));
-
-		// BitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
-		// in follow set to indicate that the fall of the start symbol is
-		// in the set (EOF can follow).
-		if ( follow.member(input.LA(1)) || follow.member(Token.EOR_TOKEN_TYPE) ) {
-			//System.out.println("LT(1)=="+((TokenStream)input).LT(1)+" is consistent with what follows; inserting...");
-			return true;
-		}
-		return false;
-	}
-
-	/** Report a recognition problem.
-	 *
-	 *  This method sets errorRecovery to indicate the parser is recovering
-	 *  not parsing.  Once in recovery mode, no errors are generated.
-	 *  To get out of recovery mode, the parser must successfully match
-	 *  a token (after a resync).  So it will go:
-	 *
-	 * 		1. error occurs
-	 * 		2. enter recovery mode, report error
-	 * 		3. consume until token found in resynch set
-	 * 		4. try to resume parsing
-	 * 		5. next match() will reset errorRecovery mode
-	 *
-	 *  If you override, make sure to update syntaxErrors if you care about that.
-	 */
-	public void reportError(RecognitionException e) {
-		// if we've already reported an error and have not matched a token
-		// yet successfully, don't report any errors.
-		if ( state.errorRecovery ) {
-			//System.err.print("[SPURIOUS] ");
-			return;
-		}
-		state.syntaxErrors++; // don't count spurious
-		state.errorRecovery = true;
-
-		displayRecognitionError(this.getTokenNames(), e);
-	}
-
-	public void displayRecognitionError(String[] tokenNames,
-										RecognitionException e)
-	{
-		String hdr = getErrorHeader(e);
-		String msg = getErrorMessage(e, tokenNames);
-		emitErrorMessage(hdr+" "+msg);
-	}
-
-	/** What error message should be generated for the various
-	 *  exception types?
-	 *
-	 *  Not very object-oriented code, but I like having all error message
-	 *  generation within one method rather than spread among all of the
-	 *  exception classes. This also makes it much easier for the exception
-	 *  handling because the exception classes do not have to have pointers back
-	 *  to this object to access utility routines and so on. Also, changing
-	 *  the message for an exception type would be difficult because you
-	 *  would have to subclassing exception, but then somehow get ANTLR
-	 *  to make those kinds of exception objects instead of the default.
-	 *  This looks weird, but trust me--it makes the most sense in terms
-	 *  of flexibility.
-	 *
-	 *  For grammar debugging, you will want to override this to add
-	 *  more information such as the stack frame with
-	 *  getRuleInvocationStack(e, this.getClass().getName()) and,
-	 *  for no viable alts, the decision description and state etc...
-	 *
-	 *  Override this to change the message generated for one or more
-	 *  exception types.
-	 */
-	public String getErrorMessage(RecognitionException e, String[] tokenNames) {
-		String msg = e.getMessage();
-		if ( e instanceof UnwantedTokenException ) {
-			UnwantedTokenException ute = (UnwantedTokenException)e;
-			String tokenName="<unknown>";
-			if ( ute.expecting== Token.EOF ) {
-				tokenName = "EOF";
-			}
-			else {
-				tokenName = tokenNames[ute.expecting];
-			}
-			msg = "extraneous input "+getTokenErrorDisplay(ute.getUnexpectedToken())+
-				" expecting "+tokenName;
-		}
-		else if ( e instanceof MissingTokenException ) {
-			MissingTokenException mte = (MissingTokenException)e;
-			String tokenName="<unknown>";
-			if ( mte.expecting== Token.EOF ) {
-				tokenName = "EOF";
-			}
-			else {
-				tokenName = tokenNames[mte.expecting];
-			}
-			msg = "missing "+tokenName+" at "+getTokenErrorDisplay(e.token);
-		}
-		else if ( e instanceof MismatchedTokenException ) {
-			MismatchedTokenException mte = (MismatchedTokenException)e;
-			String tokenName="<unknown>";
-			if ( mte.expecting== Token.EOF ) {
-				tokenName = "EOF";
-			}
-			else {
-				tokenName = tokenNames[mte.expecting];
-			}
-			msg = "mismatched input "+getTokenErrorDisplay(e.token)+
-				" expecting "+tokenName;
-		}
-		else if ( e instanceof MismatchedTreeNodeException ) {
-			MismatchedTreeNodeException mtne = (MismatchedTreeNodeException)e;
-			String tokenName="<unknown>";
-			if ( mtne.expecting==Token.EOF ) {
-				tokenName = "EOF";
-			}
-			else {
-				tokenName = tokenNames[mtne.expecting];
-			}
-			msg = "mismatched tree node: "+mtne.node+
-				" expecting "+tokenName;
-		}
-		else if ( e instanceof NoViableAltException ) {
-			//NoViableAltException nvae = (NoViableAltException)e;
-			// for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
-			// and "(decision="+nvae.decisionNumber+") and
-			// "state "+nvae.stateNumber
-			msg = "no viable alternative at input "+getTokenErrorDisplay(e.token);
-		}
-		else if ( e instanceof EarlyExitException ) {
-			//EarlyExitException eee = (EarlyExitException)e;
-			// for development, can add "(decision="+eee.decisionNumber+")"
-			msg = "required (...)+ loop did not match anything at input "+
-				getTokenErrorDisplay(e.token);
-		}
-		else if ( e instanceof MismatchedSetException ) {
-			MismatchedSetException mse = (MismatchedSetException)e;
-			msg = "mismatched input "+getTokenErrorDisplay(e.token)+
-				" expecting set "+mse.expecting;
-		}
-		else if ( e instanceof MismatchedNotSetException ) {
-			MismatchedNotSetException mse = (MismatchedNotSetException)e;
-			msg = "mismatched input "+getTokenErrorDisplay(e.token)+
-				" expecting set "+mse.expecting;
-		}
-		else if ( e instanceof FailedPredicateException ) {
-			FailedPredicateException fpe = (FailedPredicateException)e;
-			msg = "rule "+fpe.ruleName+" failed predicate: {"+
-				fpe.predicateText+"}?";
-		}
-		return msg;
-	}
-
-	/** Get number of recognition errors (lexer, parser, tree parser).  Each
-	 *  recognizer tracks its own number.  So parser and lexer each have
-	 *  separate count.  Does not count the spurious errors found between
-	 *  an error and next valid token match
-	 *
-	 *  See also reportError()
-	 */
-	public int getNumberOfSyntaxErrors() {
-		return state.syntaxErrors;
-	}
-
-	/** What is the error header, normally line/character position information? */
-	public String getErrorHeader(RecognitionException e) {
-		if ( getSourceName()!=null )
-			return getSourceName()+" line "+e.line+":"+e.charPositionInLine;
-				
-		return "line "+e.line+":"+e.charPositionInLine;
-	}
-
-	/** How should a token be displayed in an error message? The default
-	 *  is to display just the text, but during development you might
-	 *  want to have a lot of information spit out.  Override in that case
-	 *  to use t.toString() (which, for CommonToken, dumps everything about
-	 *  the token). This is better than forcing you to override a method in
-	 *  your token objects because you don't have to go modify your lexer
-	 *  so that it creates a new Java type.
-	 */
-	public String getTokenErrorDisplay(Token t) {
-		String s = t.getText();
-		if ( s==null ) {
-			if ( t.getType()==Token.EOF ) {
-				s = "<EOF>";
-			}
-			else {
-				s = "<"+t.getType()+">";
-			}
-		}
-		s = s.replaceAll("\n","\\\\n");
-		s = s.replaceAll("\r","\\\\r");
-		s = s.replaceAll("\t","\\\\t");
-		return "'"+s+"'";
-	}
-
-	/** Override this method to change where error messages go */
-	public void emitErrorMessage(String msg) {
-		System.err.println(msg);
-	}
-
-	/** Recover from an error found on the input stream.  This is
-	 *  for NoViableAlt and mismatched symbol exceptions.  If you enable
-	 *  single token insertion and deletion, this will usually not
-	 *  handle mismatched symbol exceptions but there could be a mismatched
-	 *  token that the match() routine could not recover from.
-	 */
-	public void recover(IntStream input, RecognitionException re) {
-		if ( state.lastErrorIndex==input.index() ) {
-			// uh oh, another error at same token index; must be a case
-			// where LT(1) is in the recovery token set so nothing is
-			// consumed; consume a single token so at least to prevent
-			// an infinite loop; this is a failsafe.
-			input.consume();
-		}
-		state.lastErrorIndex = input.index();
-		BitSet followSet = computeErrorRecoverySet();
-		beginResync();
-		consumeUntil(input, followSet);
-		endResync();
-	}
-
-	/** A hook to listen in on the token consumption during error recovery.
-	 *  The DebugParser subclasses this to fire events to the listenter.
-	 */
-	public void beginResync() {
-	}
-
-	public void endResync() {
-	}
-
-	/*  Compute the error recovery set for the current rule.  During
-	 *  rule invocation, the parser pushes the set of tokens that can
-	 *  follow that rule reference on the stack; this amounts to
-	 *  computing FIRST of what follows the rule reference in the
-	 *  enclosing rule. This local follow set only includes tokens
-	 *  from within the rule; i.e., the FIRST computation done by
-	 *  ANTLR stops at the end of a rule.
-	 *
-	 *  EXAMPLE
-	 *
-	 *  When you find a "no viable alt exception", the input is not
-	 *  consistent with any of the alternatives for rule r.  The best
-	 *  thing to do is to consume tokens until you see something that
-	 *  can legally follow a call to r *or* any rule that called r.
-	 *  You don't want the exact set of viable next tokens because the
-	 *  input might just be missing a token--you might consume the
-	 *  rest of the input looking for one of the missing tokens.
-	 *
-	 *  Consider grammar:
-	 *
-	 *  a : '[' b ']'
-	 *    | '(' b ')'
-	 *    ;
-	 *  b : c '^' INT ;
-	 *  c : ID
-	 *    | INT
-	 *    ;
-	 *
-	 *  At each rule invocation, the set of tokens that could follow
-	 *  that rule is pushed on a stack.  Here are the various "local"
-	 *  follow sets:
-	 *
-	 *  FOLLOW(b1_in_a) = FIRST(']') = ']'
-	 *  FOLLOW(b2_in_a) = FIRST(')') = ')'
-	 *  FOLLOW(c_in_b) = FIRST('^') = '^'
-	 *
-	 *  Upon erroneous input "[]", the call chain is
-	 *
-	 *  a -> b -> c
-	 *
-	 *  and, hence, the follow context stack is:
-	 *
-	 *  depth  local follow set     after call to rule
-	 *    0         <EOF>                    a (from main())
-	 *    1          ']'                     b
-	 *    3          '^'                     c
-	 *
-	 *  Notice that ')' is not included, because b would have to have
-	 *  been called from a different context in rule a for ')' to be
-	 *  included.
-	 *
-	 *  For error recovery, we cannot consider FOLLOW(c)
-	 *  (context-sensitive or otherwise).  We need the combined set of
-	 *  all context-sensitive FOLLOW sets--the set of all tokens that
-	 *  could follow any reference in the call chain.  We need to
-	 *  resync to one of those tokens.  Note that FOLLOW(c)='^' and if
-	 *  we resync'd to that token, we'd consume until EOF.  We need to
-	 *  sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
-	 *  In this case, for input "[]", LA(1) is in this set so we would
-	 *  not consume anything and after printing an error rule c would
-	 *  return normally.  It would not find the required '^' though.
-	 *  At this point, it gets a mismatched token error and throws an
-	 *  exception (since LA(1) is not in the viable following token
-	 *  set).  The rule exception handler tries to recover, but finds
-	 *  the same recovery set and doesn't consume anything.  Rule b
-	 *  exits normally returning to rule a.  Now it finds the ']' (and
-	 *  with the successful match exits errorRecovery mode).
-	 *
-	 *  So, you cna see that the parser walks up call chain looking
-	 *  for the token that was a member of the recovery set.
-	 *
-	 *  Errors are not generated in errorRecovery mode.
-	 *
-	 *  ANTLR's error recovery mechanism is based upon original ideas:
-	 *
-	 *  "Algorithms + Data Structures = Programs" by Niklaus Wirth
-	 *
-	 *  and
-	 *
-	 *  "A note on error recovery in recursive descent parsers":
-	 *  http://portal.acm.org/citation.cfm?id=947902.947905
-	 *
-	 *  Later, Josef Grosch had some good ideas:
-	 *
-	 *  "Efficient and Comfortable Error Recovery in Recursive Descent
-	 *  Parsers":
-	 *  ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
-	 *
-	 *  Like Grosch I implemented local FOLLOW sets that are combined
-	 *  at run-time upon error to avoid overhead during parsing.
-	 */
-	protected BitSet computeErrorRecoverySet() {
-		return combineFollows(false);
-	}
-
-	/** Compute the context-sensitive FOLLOW set for current rule.
-	 *  This is set of token types that can follow a specific rule
-	 *  reference given a specific call chain.  You get the set of
-	 *  viable tokens that can possibly come next (lookahead depth 1)
-	 *  given the current call chain.  Contrast this with the
-	 *  definition of plain FOLLOW for rule r:
-	 *
-	 *   FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
-	 *
-	 *  where x in T* and alpha, beta in V*; T is set of terminals and
-	 *  V is the set of terminals and nonterminals.  In other words,
-	 *  FOLLOW(r) is the set of all tokens that can possibly follow
-	 *  references to r in *any* sentential form (context).  At
-	 *  runtime, however, we know precisely which context applies as
-	 *  we have the call chain.  We may compute the exact (rather
-	 *  than covering superset) set of following tokens.
-	 *
-	 *  For example, consider grammar:
-	 *
-	 *  stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
-	 *       | "return" expr '.'
-	 *       ;
-	 *  expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
-	 *  atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
-	 *       | '(' expr ')'
-	 *       ;
-	 *
-	 *  The FOLLOW sets are all inclusive whereas context-sensitive
-	 *  FOLLOW sets are precisely what could follow a rule reference.
-	 *  For input input "i=(3);", here is the derivation:
-	 *
-	 *  stat => ID '=' expr ';'
-	 *       => ID '=' atom ('+' atom)* ';'
-	 *       => ID '=' '(' expr ')' ('+' atom)* ';'
-	 *       => ID '=' '(' atom ')' ('+' atom)* ';'
-	 *       => ID '=' '(' INT ')' ('+' atom)* ';'
-	 *       => ID '=' '(' INT ')' ';'
-	 *
-	 *  At the "3" token, you'd have a call chain of
-	 *
-	 *    stat -> expr -> atom -> expr -> atom
-	 *
-	 *  What can follow that specific nested ref to atom?  Exactly ')'
-	 *  as you can see by looking at the derivation of this specific
-	 *  input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
-	 *
-	 *  You want the exact viable token set when recovering from a
-	 *  token mismatch.  Upon token mismatch, if LA(1) is member of
-	 *  the viable next token set, then you know there is most likely
-	 *  a missing token in the input stream.  "Insert" one by just not
-	 *  throwing an exception.
-	 */
-	protected BitSet computeContextSensitiveRuleFOLLOW() {
-		return combineFollows(true);
-	}
-
-	// what is exact? it seems to only add sets from above on stack
-	// if EOR is in set i.  When it sees a set w/o EOR, it stops adding.
-	// Why would we ever want them all?  Maybe no viable alt instead of
-	// mismatched token?
-	protected BitSet combineFollows(boolean exact) {
-		int top = state._fsp;
-		BitSet followSet = new BitSet();
-		for (int i=top; i>=0; i--) {
-			BitSet localFollowSet = (BitSet)state.following[i];
-			/*
-			System.out.println("local follow depth "+i+"="+
-							   localFollowSet.toString(getTokenNames())+")");
-			 */
-			followSet.orInPlace(localFollowSet);
-			if ( exact ) {
-				// can we see end of rule?
-				if ( localFollowSet.member(Token.EOR_TOKEN_TYPE) ) {
-					// Only leave EOR in set if at top (start rule); this lets
-					// us know if have to include follow(start rule); i.e., EOF
-					if ( i>0 ) {
-						followSet.remove(Token.EOR_TOKEN_TYPE);
-					}
-				}
-				else { // can't see end of rule, quit
-					break;
-				}
-			}
-		}
-		return followSet;
-	}
-
-	/** Attempt to recover from a single missing or extra token.
-	 *
-	 *  EXTRA TOKEN
-	 *
-	 *  LA(1) is not what we are looking for.  If LA(2) has the right token,
-	 *  however, then assume LA(1) is some extra spurious token.  Delete it
-	 *  and LA(2) as if we were doing a normal match(), which advances the
-	 *  input.
-	 *
-	 *  MISSING TOKEN
-	 *
-	 *  If current token is consistent with what could come after
-	 *  ttype then it is ok to "insert" the missing token, else throw
-	 *  exception For example, Input "i=(3;" is clearly missing the
-	 *  ')'.  When the parser returns from the nested call to expr, it
-	 *  will have call chain:
-	 *
-	 *    stat -> expr -> atom
-	 *
-	 *  and it will be trying to match the ')' at this point in the
-	 *  derivation:
-	 *
-	 *       => ID '=' '(' INT ')' ('+' atom)* ';'
-	 *                          ^
-	 *  match() will see that ';' doesn't match ')' and report a
-	 *  mismatched token error.  To recover, it sees that LA(1)==';'
-	 *  is in the set of tokens that can follow the ')' token
-	 *  reference in rule atom.  It can assume that you forgot the ')'.
-	 */
-	protected Object recoverFromMismatchedToken(IntStream input, int ttype, BitSet follow)
-		throws RecognitionException
-	{
-		RecognitionException e = null;
-		// if next token is what we are looking for then "delete" this token
-		if ( mismatchIsUnwantedToken(input, ttype) ) {
-			e = new UnwantedTokenException(ttype, input);
-			/*
-			System.err.println("recoverFromMismatchedToken deleting "+
-							   ((TokenStream)input).LT(1)+
-							   " since "+((TokenStream)input).LT(2)+" is what we want");
-			 */
-			beginResync();
-			input.consume(); // simply delete extra token
-			endResync();
-			reportError(e);  // report after consuming so AW sees the token in the exception
-			// we want to return the token we're actually matching
-			Object matchedSymbol = getCurrentInputSymbol(input);
-			input.consume(); // move past ttype token as if all were ok
-			return matchedSymbol;
-		}
-		// can't recover with single token deletion, try insertion
-		if ( mismatchIsMissingToken(input, follow) ) {
-			Object inserted = getMissingSymbol(input, e, ttype, follow);
-			e = new MissingTokenException(ttype, input, inserted);
-			reportError(e);  // report after inserting so AW sees the token in the exception
-			return inserted;
-		}
-		// even that didn't work; must throw the exception
-		e = new MismatchedTokenException(ttype, input);
-		throw e;
-	}
-
-	/** Not currently used */
-	public Object recoverFromMismatchedSet(IntStream input,
-										   RecognitionException e,
-										   BitSet follow)
-		throws RecognitionException
-	{
-		if ( mismatchIsMissingToken(input, follow) ) {
-			// System.out.println("missing token");
-			reportError(e);
-			// we don't know how to conjure up a token for sets yet
-			return getMissingSymbol(input, e, Token.INVALID_TOKEN_TYPE, follow);
-		}
-		// TODO do single token deletion like above for Token mismatch
-		throw e;
-	}
-
-	/** Match needs to return the current input symbol, which gets put
-	 *  into the label for the associated token ref; e.g., x=ID.  Token
-	 *  and tree parsers need to return different objects. Rather than test
-	 *  for input stream type or change the IntStream interface, I use
-	 *  a simple method to ask the recognizer to tell me what the current
-	 *  input symbol is.
-	 * 
-	 *  This is ignored for lexers.
-	 */
-	protected Object getCurrentInputSymbol(IntStream input) { return null; }
-
-	/** Conjure up a missing token during error recovery.
-	 *
-	 *  The recognizer attempts to recover from single missing
-	 *  symbols. But, actions might refer to that missing symbol.
-	 *  For example, x=ID {f($x);}. The action clearly assumes
-	 *  that there has been an identifier matched previously and that
-	 *  $x points at that token. If that token is missing, but
-	 *  the next token in the stream is what we want we assume that
-	 *  this token is missing and we keep going. Because we
-	 *  have to return some token to replace the missing token,
-	 *  we have to conjure one up. This method gives the user control
-	 *  over the tokens returned for missing tokens. Mostly,
-	 *  you will want to create something special for identifier
-	 *  tokens. For literals such as '{' and ',', the default
-	 *  action in the parser or tree parser works. It simply creates
-	 *  a CommonToken of the appropriate type. The text will be the token.
-	 *  If you change what tokens must be created by the lexer,
-	 *  override this method to create the appropriate tokens.
-	 */
-	protected Object getMissingSymbol(IntStream input,
-									  RecognitionException e,
-									  int expectedTokenType,
-									  BitSet follow)
-	{
-		return null;
-	}
-
-	public void consumeUntil(IntStream input, int tokenType) {
-		//System.out.println("consumeUntil "+tokenType);
-		int ttype = input.LA(1);
-		while (ttype != Token.EOF && ttype != tokenType) {
-			input.consume();
-			ttype = input.LA(1);
-		}
-	}
-
-	/** Consume tokens until one matches the given token set */
-	public void consumeUntil(IntStream input, BitSet set) {
-		//System.out.println("consumeUntil("+set.toString(getTokenNames())+")");
-		int ttype = input.LA(1);
-		while (ttype != Token.EOF && !set.member(ttype) ) {
-			//System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
-			input.consume();
-			ttype = input.LA(1);
-		}
-	}
-
-	/** Push a rule's follow set using our own hardcoded stack */
-	protected void pushFollow(BitSet fset) {
-		if ( (state._fsp +1)>=state.following.length ) {
-			BitSet[] f = new BitSet[state.following.length*2];
-			System.arraycopy(state.following, 0, f, 0, state.following.length);
-			state.following = f;
-		}
-		state.following[++state._fsp] = fset;
-	}
-
-	/** Return List<String> of the rules in your parser instance
-	 *  leading up to a call to this method.  You could override if
-	 *  you want more details such as the file/line info of where
-	 *  in the parser java code a rule is invoked.
-	 *
-	 *  This is very useful for error messages and for context-sensitive
-	 *  error recovery.
-	 */
-	public List getRuleInvocationStack() {
-		String parserClassName = getClass().getName();
-		return getRuleInvocationStack(new Throwable(), parserClassName);
-	}
-
-	/** A more general version of getRuleInvocationStack where you can
-	 *  pass in, for example, a RecognitionException to get it's rule
-	 *  stack trace.  This routine is shared with all recognizers, hence,
-	 *  static.
-	 *
-	 *  TODO: move to a utility class or something; weird having lexer call this
-	 */
-	public static List getRuleInvocationStack(Throwable e,
-											  String recognizerClassName)
-	{
-		List rules = new ArrayList();
-		StackTraceElement[] stack = e.getStackTrace();
-		int i = 0;
-		for (i=stack.length-1; i>=0; i--) {
-			StackTraceElement t = stack[i];
-			if ( t.getClassName().startsWith("org.antlr.runtime.") ) {
-				continue; // skip support code such as this method
-			}
-			if ( t.getMethodName().equals(NEXT_TOKEN_RULE_NAME) ) {
-				continue;
-			}
-			if ( !t.getClassName().equals(recognizerClassName) ) {
-				continue; // must not be part of this parser
-			}
-            rules.add(t.getMethodName());
-		}
-		return rules;
-	}
-
-    public int getBacktrackingLevel() { return state.backtracking; }
-
-    public void setBacktrackingLevel(int n) { state.backtracking = n; }
-
-    /** Return whether or not a backtracking attempt failed. */
-    public boolean failed() { return state.failed; }
-
-	/** Used to print out token names like ID during debugging and
-	 *  error reporting.  The generated parsers implement a method
-	 *  that overrides this to point to their String[] tokenNames.
-	 */
-	public String[] getTokenNames() {
-		return null;
-	}
-
-	/** For debugging and other purposes, might want the grammar name.
-	 *  Have ANTLR generate an implementation for this method.
-	 */
-	public String getGrammarFileName() {
-		return null;
-	}
-
-	public abstract String getSourceName();
-
-	/** A convenience method for use most often with template rewrites.
-	 *  Convert a List<Token> to List<String>
-	 */
-	public List toStrings(List tokens) {
-		if ( tokens==null ) return null;
-		List strings = new ArrayList(tokens.size());
-		for (int i=0; i<tokens.size(); i++) {
-			strings.add(((Token)tokens.get(i)).getText());
-		}
-		return strings;
-	}
-
-	/** Given a rule number and a start token index number, return
-	 *  MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
-	 *  start index.  If this rule has parsed input starting from the
-	 *  start index before, then return where the rule stopped parsing.
-	 *  It returns the index of the last token matched by the rule.
-	 *
-	 *  For now we use a hashtable and just the slow Object-based one.
-	 *  Later, we can make a special one for ints and also one that
-	 *  tosses out data after we commit past input position i.
-	 */
-	public int getRuleMemoization(int ruleIndex, int ruleStartIndex) {
-		if ( state.ruleMemo[ruleIndex]==null ) {
-			state.ruleMemo[ruleIndex] = new HashMap();
-		}
-		Integer stopIndexI =
-			(Integer)state.ruleMemo[ruleIndex].get(new Integer(ruleStartIndex));
-		if ( stopIndexI==null ) {
-			return MEMO_RULE_UNKNOWN;
-		}
-		return stopIndexI.intValue();
-	}
-
-	/** Has this rule already parsed input at the current index in the
-	 *  input stream?  Return the stop token index or MEMO_RULE_UNKNOWN.
-	 *  If we attempted but failed to parse properly before, return
-	 *  MEMO_RULE_FAILED.
-	 *
-	 *  This method has a side-effect: if we have seen this input for
-	 *  this rule and successfully parsed before, then seek ahead to
-	 *  1 past the stop token matched for this rule last time.
-	 */
-	public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
-		int stopIndex = getRuleMemoization(ruleIndex, input.index());
-		if ( stopIndex==MEMO_RULE_UNKNOWN ) {
-			return false;
-		}
-		if ( stopIndex==MEMO_RULE_FAILED ) {
-			//System.out.println("rule "+ruleIndex+" will never succeed");
-			state.failed=true;
-		}
-		else {
-			//System.out.println("seen rule "+ruleIndex+" before; skipping ahead to @"+(stopIndex+1)+" failed="+state.failed);
-			input.seek(stopIndex+1); // jump to one past stop token
-		}
-		return true;
-	}
-
-	/** Record whether or not this rule parsed the input at this position
-	 *  successfully.  Use a standard java hashtable for now.
-	 */
-	public void memoize(IntStream input,
-						int ruleIndex,
-						int ruleStartIndex)
-	{
-		int stopTokenIndex = state.failed?MEMO_RULE_FAILED:input.index()-1;
-		if ( state.ruleMemo==null ) {
-			System.err.println("!!!!!!!!! memo array is null for "+ getGrammarFileName());
-		}
-		if ( ruleIndex >= state.ruleMemo.length ) {
-			System.err.println("!!!!!!!!! memo size is "+state.ruleMemo.length+", but rule index is "+ruleIndex);
-		}
-		if ( state.ruleMemo[ruleIndex]!=null ) {
-			state.ruleMemo[ruleIndex].put(
-				new Integer(ruleStartIndex), new Integer(stopTokenIndex)
-			);
-		}
-	}
-
-	/** return how many rule/input-index pairs there are in total.
-	 *  TODO: this includes synpreds. :(
-	 */
-	public int getRuleMemoizationCacheSize() {
-		int n = 0;
-		for (int i = 0; state.ruleMemo!=null && i < state.ruleMemo.length; i++) {
-			Map ruleMap = state.ruleMemo[i];
-			if ( ruleMap!=null ) {
-				n += ruleMap.size(); // how many input indexes are recorded?
-			}
-		}
-		return n;
-	}
-
-	public void traceIn(String ruleName, int ruleIndex, Object inputSymbol)  {
-		System.out.print("enter "+ruleName+" "+inputSymbol);
-		if ( state.backtracking>0 ) {
-			System.out.print(" backtracking="+state.backtracking);
-		}
-		System.out.println();
-	}
-
-	public void traceOut(String ruleName,
-						 int ruleIndex,
-						 Object inputSymbol)
-	{
-		System.out.print("exit "+ruleName+" "+inputSymbol);
-		if ( state.backtracking>0 ) {
-            System.out.print(" backtracking="+state.backtracking);
-            if ( state.failed ) System.out.print(" failed");
-            else System.out.print(" succeeded");
-        }
-		System.out.println();
-	}
-
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/BitSet.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/BitSet.java
deleted file mode 100644
index f013a26..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/BitSet.java
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import java.util.List;
-
-/**A stripped-down version of org.antlr.misc.BitSet that is just
- * good enough to handle runtime requirements such as FOLLOW sets
- * for automatic error recovery.
- */
-public class BitSet implements Cloneable {
-    protected final static int BITS = 64;    // number of bits / long
-    protected final static int LOG_BITS = 6; // 2^6 == 64
-
-    /* We will often need to do a mod operator (i mod nbits).  Its
-     * turns out that, for powers of two, this mod operation is
-     * same as (i & (nbits-1)).  Since mod is slow, we use a
-     * precomputed mod mask to do the mod instead.
-     */
-    protected final static int MOD_MASK = BITS - 1;
-
-    /** The actual data bits */
-    protected long bits[];
-
-    /** Construct a bitset of size one word (64 bits) */
-    public BitSet() {
-        this(BITS);
-    }
-
-    /** Construction from a static array of longs */
-    public BitSet(long[] bits_) {
-        bits = bits_;
-    }
-
-	/** Construction from a list of integers */
-	public BitSet(List items) {
-		this();
-		for (int i = 0; i < items.size(); i++) {
-			Integer v = (Integer) items.get(i);
-			add(v.intValue());
-		}
-	}
-
-    /** Construct a bitset given the size
-     * @param nbits The size of the bitset in bits
-     */
-    public BitSet(int nbits) {
-        bits = new long[((nbits - 1) >> LOG_BITS) + 1];
-    }
-
-	public static BitSet of(int el) {
-		BitSet s = new BitSet(el + 1);
-		s.add(el);
-		return s;
-	}
-
-	public static BitSet of(int a, int b) {
-		BitSet s = new BitSet(Math.max(a,b)+1);
-		s.add(a);
-		s.add(b);
-		return s;
-	}
-
-	public static BitSet of(int a, int b, int c) {
-		BitSet s = new BitSet();
-		s.add(a);
-		s.add(b);
-		s.add(c);
-		return s;
-	}
-
-	public static BitSet of(int a, int b, int c, int d) {
-		BitSet s = new BitSet();
-		s.add(a);
-		s.add(b);
-		s.add(c);
-		s.add(d);
-		return s;
-	}
-
-	/** return this | a in a new set */
-	public BitSet or(BitSet a) {
-		if ( a==null ) {
-			return this;
-		}
-		BitSet s = (BitSet)this.clone();
-		s.orInPlace(a);
-		return s;
-	}
-
-	/** or this element into this set (grow as necessary to accommodate) */
-	public void add(int el) {
-		int n = wordNumber(el);
-		if (n >= bits.length) {
-			growToInclude(el);
-		}
-		bits[n] |= bitMask(el);
-	}
-
-	/**
-	 * Grows the set to a larger number of bits.
-	 * @param bit element that must fit in set
-	 */
-	public void growToInclude(int bit) {
-		int newSize = Math.max(bits.length << 1, numWordsToHold(bit));
-		long newbits[] = new long[newSize];
-		System.arraycopy(bits, 0, newbits, 0, bits.length);
-		bits = newbits;
-	}
-
-	public void orInPlace(BitSet a) {
-		if ( a==null ) {
-			return;
-		}
-		// If this is smaller than a, grow this first
-		if (a.bits.length > bits.length) {
-			setSize(a.bits.length);
-		}
-		int min = Math.min(bits.length, a.bits.length);
-		for (int i = min - 1; i >= 0; i--) {
-			bits[i] |= a.bits[i];
-		}
-	}
-
-	/**
-	 * Sets the size of a set.
-	 * @param nwords how many words the new set should be
-	 */
-	private void setSize(int nwords) {
-		long newbits[] = new long[nwords];
-		int n = Math.min(nwords, bits.length);
-		System.arraycopy(bits, 0, newbits, 0, n);
-		bits = newbits;
-	}
-
-    private final static long bitMask(int bitNumber) {
-        int bitPosition = bitNumber & MOD_MASK; // bitNumber mod BITS
-        return 1L << bitPosition;
-    }
-
-    public Object clone() {
-        BitSet s;
-        try {
-            s = (BitSet)super.clone();
-            s.bits = new long[bits.length];
-            System.arraycopy(bits, 0, s.bits, 0, bits.length);
-        }
-        catch (CloneNotSupportedException e) {
-            throw new InternalError();
-        }
-        return s;
-    }
-
-    public int size() {
-        int deg = 0;
-        for (int i = bits.length - 1; i >= 0; i--) {
-            long word = bits[i];
-            if (word != 0L) {
-                for (int bit = BITS - 1; bit >= 0; bit--) {
-                    if ((word & (1L << bit)) != 0) {
-                        deg++;
-                    }
-                }
-            }
-        }
-        return deg;
-    }
-
-    public boolean equals(Object other) {
-        if ( other == null || !(other instanceof BitSet) ) {
-            return false;
-        }
-
-        BitSet otherSet = (BitSet)other;
-
-        int n = Math.min(this.bits.length, otherSet.bits.length);
-
-        // for any bits in common, compare
-        for (int i=0; i<n; i++) {
-            if (this.bits[i] != otherSet.bits[i]) {
-                return false;
-            }
-        }
-
-        // make sure any extra bits are off
-
-        if (this.bits.length > n) {
-            for (int i = n+1; i<this.bits.length; i++) {
-                if (this.bits[i] != 0) {
-                    return false;
-                }
-            }
-        }
-        else if (otherSet.bits.length > n) {
-            for (int i = n+1; i<otherSet.bits.length; i++) {
-                if (otherSet.bits[i] != 0) {
-                    return false;
-                }
-            }
-        }
-
-        return true;
-    }
-
-    public boolean member(int el) {
-		if ( el<0 ) {
-			return false;
-		}
-        int n = wordNumber(el);
-        if (n >= bits.length) return false;
-        return (bits[n] & bitMask(el)) != 0;
-    }
-
-	// remove this element from this set
-	public void remove(int el) {
-		int n = wordNumber(el);
-		if (n < bits.length) {
-			bits[n] &= ~bitMask(el);
-		}
-	}
-
-    public boolean isNil() {
-        for (int i = bits.length - 1; i >= 0; i--) {
-            if (bits[i] != 0) return false;
-        }
-        return true;
-    }
-
-    private final int numWordsToHold(int el) {
-        return (el >> LOG_BITS) + 1;
-    }
-
-    public int numBits() {
-        return bits.length << LOG_BITS; // num words * bits per word
-    }
-
-    /** return how much space is being used by the bits array not
-     *  how many actually have member bits on.
-     */
-    public int lengthInLongWords() {
-        return bits.length;
-    }
-
-    /**Is this contained within a? */
-    /*
-	public boolean subset(BitSet a) {
-        if (a == null || !(a instanceof BitSet)) return false;
-        return this.and(a).equals(this);
-    }
-	*/
-
-    public int[] toArray() {
-        int[] elems = new int[size()];
-        int en = 0;
-        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
-            if (member(i)) {
-                elems[en++] = i;
-            }
-        }
-        return elems;
-    }
-
-    public long[] toPackedArray() {
-        return bits;
-    }
-
-	private final static int wordNumber(int bit) {
-		return bit >> LOG_BITS; // bit / BITS
-	}
-
-	public String toString() {
-		return toString(null);
-	}
-
-	public String toString(String[] tokenNames) {
-		StringBuffer buf = new StringBuffer();
-		String separator = ",";
-		boolean havePrintedAnElement = false;
-		buf.append('{');
-
-		for (int i = 0; i < (bits.length << LOG_BITS); i++) {
-			if (member(i)) {
-				if (i > 0 && havePrintedAnElement ) {
-					buf.append(separator);
-				}
-				if ( tokenNames!=null ) {
-					buf.append(tokenNames[i]);
-				}
-				else {
-					buf.append(i);
-				}
-				havePrintedAnElement = true;
-			}
-		}
-		buf.append('}');
-		return buf.toString();
-	}
-
-
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/BufferedTokenStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/BufferedTokenStream.java
deleted file mode 100644
index 7742d4f..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/BufferedTokenStream.java
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.runtime;
-
-import java.util.List;
-import java.util.ArrayList;
-import java.util.NoSuchElementException;
-
-/** Buffer all input tokens but do on-demand fetching of new tokens from
- *  lexer. Useful when the parser or lexer has to set context/mode info before
- *  proper lexing of future tokens. The ST template parser needs this,
- *  for example, because it has to constantly flip back and forth between
- *  inside/output templates. E.g., <names:{hi, <it>}> has to parse names
- *  as part of an expression but "hi, <it>" as a nested template.
- *
- *  You can't use this stream if you pass whitespace or other off-channel
- *  tokens to the parser. The stream can't ignore off-channel tokens.
- *  (UnbufferedTokenStream is the same way.)
- *
- *  This is not a subclass of UnbufferedTokenStream because I don't want
- *  to confuse small moving window of tokens it uses for the full buffer.
- */
-public class BufferedTokenStream implements TokenStream {
-    protected TokenSource tokenSource;
-
-    /** Record every single token pulled from the source so we can reproduce
-     *  chunks of it later.  The buffer in LookaheadStream overlaps sometimes
-     *  as its moving window moves through the input.  This list captures
-     *  everything so we can access complete input text.
-     */
-    protected List<Token> tokens = new ArrayList<Token>(100);
-
-    /** Track the last mark() call result value for use in rewind(). */
-    protected int lastMarker;
-
-    /** The index into the tokens list of the current token (next token
-     *  to consume).  tokens[p] should be LT(1).  p=-1 indicates need
-     *  to initialize with first token.  The ctor doesn't get a token.
-     *  First call to LT(1) or whatever gets the first token and sets p=0;
-     */
-    protected int p = -1;
-
-	protected int range = -1; // how deep have we gone?
-
-    public BufferedTokenStream() {;}
-
-    public BufferedTokenStream(TokenSource tokenSource) {
-        this.tokenSource = tokenSource;
-    }
-
-    public TokenSource getTokenSource() { return tokenSource; }
-
-	public int index() { return p; }
-
-	public int range() { return range; }
-
-    public int mark() {
-        if ( p == -1 ) setup();
-		lastMarker = index();
-		return lastMarker;
-	}
-
-	public void release(int marker) {
-		// no resources to release
-	}
-
-    public void rewind(int marker) {
-        seek(marker);
-    }
-
-    public void rewind() {
-        seek(lastMarker);
-    }
-
-    public void reset() {
-        p = 0;
-        lastMarker = 0;
-    }
-
-    public void seek(int index) { p = index; }
-
-    public int size() { return tokens.size(); }
-
-    /** Move the input pointer to the next incoming token.  The stream
-     *  must become active with LT(1) available.  consume() simply
-     *  moves the input pointer so that LT(1) points at the next
-     *  input symbol. Consume at least one token.
-     *
-     *  Walk past any token not on the channel the parser is listening to.
-     */
-    public void consume() {
-        if ( p == -1 ) setup();
-        p++;
-        sync(p);
-    }
-
-    /** Make sure index i in tokens has a token. */
-    protected void sync(int i) {
-        int n = i - tokens.size() + 1; // how many more elements we need?
-        //System.out.println("sync("+i+") needs "+n);
-        if ( n > 0 ) fetch(n);
-    }
-
-    /** add n elements to buffer */
-    protected void fetch(int n) {
-        for (int i=1; i<=n; i++) {
-            Token t = tokenSource.nextToken();
-            t.setTokenIndex(tokens.size());
-            //System.out.println("adding "+t+" at index "+tokens.size());
-            tokens.add(t);
-            if ( t.getType()==Token.EOF ) break;
-        }
-    }
-
-    public Token get(int i) {
-        if ( i < 0 || i >= tokens.size() ) {
-            throw new NoSuchElementException("token index "+i+" out of range 0.."+(tokens.size()-1));
-        }
-        return tokens.get(i);
-    }
-
-	/** Get all tokens from start..stop inclusively */
-	public List get(int start, int stop) {
-		if ( start<0 || stop<0 ) return null;
-		if ( p == -1 ) setup();
-		List subset = new ArrayList();
-		if ( stop>=tokens.size() ) stop = tokens.size()-1;
-		for (int i = start; i <= stop; i++) {
-			Token t = tokens.get(i);
-			if ( t.getType()==Token.EOF ) break;
-			subset.add(t);
-		}
-		return subset;
-	}
-
-	public int LA(int i) { return LT(i).getType(); }
-
-    protected Token LB(int k) {
-        if ( (p-k)<0 ) return null;
-        return tokens.get(p-k);
-    }
-
-    public Token LT(int k) {
-        if ( p == -1 ) setup();
-        if ( k==0 ) return null;
-        if ( k < 0 ) return LB(-k);
-
-		int i = p + k - 1;
-		sync(i);
-        if ( i >= tokens.size() ) { // return EOF token
-            // EOF must be last token
-            return tokens.get(tokens.size()-1);
-        }
-		if ( i>range ) range = i; 		
-        return tokens.get(i);
-    }
-
-    protected void setup() { sync(0); p = 0; }
-
-    /** Reset this token stream by setting its token source. */
-    public void setTokenSource(TokenSource tokenSource) {
-        this.tokenSource = tokenSource;
-        tokens.clear();
-        p = -1;
-    }
-    
-    public List getTokens() { return tokens; }
-
-    public List getTokens(int start, int stop) {
-        return getTokens(start, stop, (BitSet)null);
-    }
-
-    /** Given a start and stop index, return a List of all tokens in
-     *  the token type BitSet.  Return null if no tokens were found.  This
-     *  method looks at both on and off channel tokens.
-     */
-    public List getTokens(int start, int stop, BitSet types) {
-        if ( p == -1 ) setup();
-        if ( stop>=tokens.size() ) stop=tokens.size()-1;
-        if ( start<0 ) start=0;
-        if ( start>stop ) return null;
-
-        // list = tokens[start:stop]:{Token t, t.getType() in types}
-        List<Token> filteredTokens = new ArrayList<Token>();
-        for (int i=start; i<=stop; i++) {
-            Token t = tokens.get(i);
-            if ( types==null || types.member(t.getType()) ) {
-                filteredTokens.add(t);
-            }
-        }
-        if ( filteredTokens.size()==0 ) {
-            filteredTokens = null;
-        }
-        return filteredTokens;
-    }
-
-    public List getTokens(int start, int stop, List types) {
-        return getTokens(start,stop,new BitSet(types));
-    }
-
-    public List getTokens(int start, int stop, int ttype) {
-        return getTokens(start,stop,BitSet.of(ttype));
-    }
-
-    public String getSourceName() {	return tokenSource.getSourceName();	}
-
-    /** Grab *all* tokens from stream and return string */
-    public String toString() {
-        if ( p == -1 ) setup();
-        fill();
-        return toString(0, tokens.size()-1);
-    }
-
-    public String toString(int start, int stop) {
-        if ( start<0 || stop<0 ) return null;
-        if ( p == -1 ) setup();
-        if ( stop>=tokens.size() ) stop = tokens.size()-1;
-        StringBuffer buf = new StringBuffer();
-        for (int i = start; i <= stop; i++) {
-            Token t = tokens.get(i);
-            if ( t.getType()==Token.EOF ) break;
-            buf.append(t.getText());
-        }
-        return buf.toString();
-    }
-
-    public String toString(Token start, Token stop) {
-        if ( start!=null && stop!=null ) {
-            return toString(start.getTokenIndex(), stop.getTokenIndex());
-        }
-        return null;
-    }
-
-    /** Get all tokens from lexer until EOF */
-    public void fill() {
-        if ( p == -1 ) setup();
-        if ( tokens.get(p).getType()==Token.EOF ) return;
-
-        int i = p+1;
-        sync(i);
-        while ( tokens.get(i).getType()!=Token.EOF ) {
-            i++;
-            sync(i);
-        }
-    }
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ClassicToken.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ClassicToken.java
deleted file mode 100644
index 72c2bd9..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ClassicToken.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-/** A Token object like we'd use in ANTLR 2.x; has an actual string created
- *  and associated with this object.  These objects are needed for imaginary
- *  tree nodes that have payload objects.  We need to create a Token object
- *  that has a string; the tree node will point at this token.  CommonToken
- *  has indexes into a char stream and hence cannot be used to introduce
- *  new strings.
- */
-public class ClassicToken implements Token {
-	protected String text;
-	protected int type;
-	protected int line;
-	protected int charPositionInLine;
-	protected int channel=DEFAULT_CHANNEL;
-
-	/** What token number is this from 0..n-1 tokens */
-	protected int index;
-
-	public ClassicToken(int type) {
-		this.type = type;
-	}
-
-	public ClassicToken(Token oldToken) {
-		text = oldToken.getText();
-		type = oldToken.getType();
-		line = oldToken.getLine();
-		charPositionInLine = oldToken.getCharPositionInLine();
-		channel = oldToken.getChannel();
-	}
-
-	public ClassicToken(int type, String text) {
-		this.type = type;
-		this.text = text;
-	}
-
-	public ClassicToken(int type, String text, int channel) {
-		this.type = type;
-		this.text = text;
-		this.channel = channel;
-	}
-
-	public int getType() {
-		return type;
-	}
-
-	public void setLine(int line) {
-		this.line = line;
-	}
-
-	public String getText() {
-		return text;
-	}
-
-	public void setText(String text) {
-		this.text = text;
-	}
-
-	public int getLine() {
-		return line;
-	}
-
-	public int getCharPositionInLine() {
-		return charPositionInLine;
-	}
-
-	public void setCharPositionInLine(int charPositionInLine) {
-		this.charPositionInLine = charPositionInLine;
-	}
-
-	public int getChannel() {
-		return channel;
-	}
-
-	public void setChannel(int channel) {
-		this.channel = channel;
-	}
-
-	public void setType(int type) {
-		this.type = type;
-	}
-
-	public int getTokenIndex() {
-		return index;
-	}
-
-	public void setTokenIndex(int index) {
-		this.index = index;
-	}
-
-	public CharStream getInputStream() {
-		return null;
-	}
-
-	public void setInputStream(CharStream input) {
-	}
-	
-	public String toString() {
-		String channelStr = "";
-		if ( channel>0 ) {
-			channelStr=",channel="+channel;
-		}
-		String txt = getText();
-		if ( txt!=null ) {
-			txt = txt.replaceAll("\n","\\\\n");
-			txt = txt.replaceAll("\r","\\\\r");
-			txt = txt.replaceAll("\t","\\\\t");
-		}
-		else {
-			txt = "<no text>";
-		}
-		return "[@"+getTokenIndex()+",'"+txt+"',<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+"]";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/CommonToken.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/CommonToken.java
deleted file mode 100644
index 6bd9b89..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/CommonToken.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import java.io.Serializable;
-
-public class CommonToken implements Token, Serializable {
-	protected int type;
-	protected int line;
-	protected int charPositionInLine = -1; // set to invalid position
-	protected int channel=DEFAULT_CHANNEL;
-	protected transient CharStream input;
-
-	/** We need to be able to change the text once in a while.  If
-	 *  this is non-null, then getText should return this.  Note that
-	 *  start/stop are not affected by changing this.
-	  */
-	protected String text;
-
-	/** What token number is this from 0..n-1 tokens; < 0 implies invalid index */
-	protected int index = -1;
-
-	/** The char position into the input buffer where this token starts */
-	protected int start;
-
-	/** The char position into the input buffer where this token stops */
-	protected int stop;
-
-	public CommonToken(int type) {
-		this.type = type;
-	}
-
-	public CommonToken(CharStream input, int type, int channel, int start, int stop) {
-		this.input = input;
-		this.type = type;
-		this.channel = channel;
-		this.start = start;
-		this.stop = stop;
-	}
-
-	public CommonToken(int type, String text) {
-		this.type = type;
-		this.channel = DEFAULT_CHANNEL;
-		this.text = text;
-	}
-
-	public CommonToken(Token oldToken) {
-		text = oldToken.getText();
-		type = oldToken.getType();
-		line = oldToken.getLine();
-		index = oldToken.getTokenIndex();
-		charPositionInLine = oldToken.getCharPositionInLine();
-		channel = oldToken.getChannel();
-        input = oldToken.getInputStream();
-		if ( oldToken instanceof CommonToken ) {
-			start = ((CommonToken)oldToken).start;
-			stop = ((CommonToken)oldToken).stop;
-		}
-	}
-
-	public int getType() {
-		return type;
-	}
-
-	public void setLine(int line) {
-		this.line = line;
-	}
-
-	public String getText() {
-		if ( text!=null ) {
-			return text;
-		}
-		if ( input==null ) {
-			return null;
-		}
-		int n = input.size();
-		if ( start<n && stop<n) {
-			return input.substring(start,stop);
-		}
-		else {
-			return "<EOF>";
-		}
-	}
-
-	/** Override the text for this token.  getText() will return this text
-	 *  rather than pulling from the buffer.  Note that this does not mean
-	 *  that start/stop indexes are not valid.  It means that that input
-	 *  was converted to a new string in the token object.
-	 */
-	public void setText(String text) {
-		this.text = text;
-	}
-
-	public int getLine() {
-		return line;
-	}
-
-	public int getCharPositionInLine() {
-		return charPositionInLine;
-	}
-
-	public void setCharPositionInLine(int charPositionInLine) {
-		this.charPositionInLine = charPositionInLine;
-	}
-
-	public int getChannel() {
-		return channel;
-	}
-
-	public void setChannel(int channel) {
-		this.channel = channel;
-	}
-
-	public void setType(int type) {
-		this.type = type;
-	}
-
-	public int getStartIndex() {
-		return start;
-	}
-
-	public void setStartIndex(int start) {
-		this.start = start;
-	}
-
-	public int getStopIndex() {
-		return stop;
-	}
-
-	public void setStopIndex(int stop) {
-		this.stop = stop;
-	}
-
-	public int getTokenIndex() {
-		return index;
-	}
-
-	public void setTokenIndex(int index) {
-		this.index = index;
-	}
-
-	public CharStream getInputStream() {
-		return input;
-	}
-
-	public void setInputStream(CharStream input) {
-		this.input = input;
-	}
-
-	public String toString() {
-		String channelStr = "";
-		if ( channel>0 ) {
-			channelStr=",channel="+channel;
-		}
-		String txt = getText();
-		if ( txt!=null ) {
-			txt = txt.replaceAll("\n","\\\\n");
-			txt = txt.replaceAll("\r","\\\\r");
-			txt = txt.replaceAll("\t","\\\\t");
-		}
-		else {
-			txt = "<no text>";
-		}
-		return "[@"+getTokenIndex()+","+start+":"+stop+"='"+txt+"',<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+"]";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/CommonTokenStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/CommonTokenStream.java
deleted file mode 100644
index 2bea7ec..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/CommonTokenStream.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.runtime;
-
-/** The most common stream of tokens where every token is buffered up
- *  and tokens are filtered for a certain channel (the parser will only
- *  see these tokens).
- *
- *  Even though it buffers all of the tokens, this token stream pulls tokens
- *  from the tokens source on demand. In other words, until you ask for a
- *  token using consume(), LT(), etc. the stream does not pull from the lexer.
- *
- *  The only difference between this stream and BufferedTokenStream superclass
- *  is that this stream knows how to ignore off channel tokens. There may be
- *  a performance advantage to using the superclass if you don't pass
- *  whitespace and comments etc. to the parser on a hidden channel (i.e.,
- *  you set $channel instead of calling skip() in lexer rules.)
- *
- *  @see org.antlr.runtime.UnbufferedTokenStream
- *  @see org.antlr.runtime.BufferedTokenStream
- */
-public class CommonTokenStream extends BufferedTokenStream {
-    /** Skip tokens on any channel but this one; this is how we skip whitespace... */
-    protected int channel = Token.DEFAULT_CHANNEL;
-
-    public CommonTokenStream() { ; }
-
-    public CommonTokenStream(TokenSource tokenSource) {
-        super(tokenSource);
-    }
-
-    public CommonTokenStream(TokenSource tokenSource, int channel) {
-        this(tokenSource);
-        this.channel = channel;
-    }
-
-    /** Always leave p on an on-channel token. */
-    public void consume() {
-        if ( p == -1 ) setup();
-        p++;
-        sync(p);
-        while ( tokens.get(p).getChannel()!=channel ) {
-            p++;
-            sync(p);
-        }
-    }
-
-    protected Token LB(int k) {
-        if ( k==0 || (p-k)<0 ) return null;
-
-        int i = p;
-        int n = 1;
-        // find k good tokens looking backwards
-        while ( n<=k ) {
-            // skip off-channel tokens
-            i = skipOffTokenChannelsReverse(i-1);
-            n++;
-        }
-        if ( i<0 ) return null;
-        return tokens.get(i);
-    }
-
-    public Token LT(int k) {
-        //System.out.println("enter LT("+k+")");
-        if ( p == -1 ) setup();
-        if ( k == 0 ) return null;
-        if ( k < 0 ) return LB(-k);
-        int i = p;
-        int n = 1; // we know tokens[p] is a good one
-        // find k good tokens
-        while ( n<k ) {
-            // skip off-channel tokens
-            i = skipOffTokenChannels(i+1);
-            n++;
-        }
-		if ( i>range ) range = i;
-        return tokens.get(i);
-    }
-
-    /** Given a starting index, return the index of the first on-channel
-     *  token.
-     */
-    protected int skipOffTokenChannels(int i) {
-        sync(i);
-        while ( tokens.get(i).getChannel()!=channel ) { // also stops at EOF (it's onchannel)
-            i++;
-            sync(i);
-        }
-        return i;
-    }
-
-    protected int skipOffTokenChannelsReverse(int i) {
-        while ( i>=0 && ((Token)tokens.get(i)).getChannel()!=channel ) {
-            i--;
-        }
-        return i;
-    }
-
-	public void reset() {
-		super.reset();
-		p = skipOffTokenChannels(0);
-	}
-
-	protected void setup() {
-        p = 0;
-        sync(0);
-        int i = 0;
-        while ( tokens.get(i).getChannel()!=channel ) {
-            i++;
-            sync(i);
-        }
-        p = i;
-    }
-
-	/** Count EOF just once. */
-	public int getNumberOfOnChannelTokens() {
-		int n = 0;
-		fill();
-		for (int i = 0; i < tokens.size(); i++) {
-			Token t = tokens.get(i);
-			if ( t.getChannel()==channel ) n++;
-			if ( t.getType()==Token.EOF ) break;
-		}
-		return n;
-	}
-
-    /** Reset this token stream by setting its token source. */
-    public void setTokenSource(TokenSource tokenSource) {
-        super.setTokenSource(tokenSource);
-        channel = Token.DEFAULT_CHANNEL;
-    }
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/FailedPredicateException.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/FailedPredicateException.java
deleted file mode 100644
index 5bef1bd..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/FailedPredicateException.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-/** A semantic predicate failed during validation.  Validation of predicates
- *  occurs when normally parsing the alternative just like matching a token.
- *  Disambiguating predicate evaluation occurs when we hoist a predicate into
- *  a prediction decision.
- */
-public class FailedPredicateException extends RecognitionException {
-	public String ruleName;
-	public String predicateText;
-
-	/** Used for remote debugger deserialization */
-	public FailedPredicateException() {;}
-
-	public FailedPredicateException(IntStream input,
-									String ruleName,
-									String predicateText)
-	{
-		super(input);
-		this.ruleName = ruleName;
-		this.predicateText = predicateText;
-	}
-
-	public String toString() {
-		return "FailedPredicateException("+ruleName+",{"+predicateText+"}?)";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/LegacyCommonTokenStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/LegacyCommonTokenStream.java
deleted file mode 100644
index f9c5e39..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/LegacyCommonTokenStream.java
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import java.util.*;
-
-/** The most common stream of tokens is one where every token is buffered up
- *  and tokens are prefiltered for a certain channel (the parser will only
- *  see these tokens and cannot change the filter channel number during the
- *  parse).
- *
- *  TODO: how to access the full token stream?  How to track all tokens matched per rule?
- */
-public class LegacyCommonTokenStream implements TokenStream {
-    protected TokenSource tokenSource;
-
-	/** Record every single token pulled from the source so we can reproduce
-	 *  chunks of it later.
-	 */
-	protected List tokens;
-
-	/** Map<tokentype, channel> to override some Tokens' channel numbers */
-	protected Map channelOverrideMap;
-
-	/** Set<tokentype>; discard any tokens with this type */
-	protected Set discardSet;
-
-	/** Skip tokens on any channel but this one; this is how we skip whitespace... */
-	protected int channel = Token.DEFAULT_CHANNEL;
-
-	/** By default, track all incoming tokens */
-	protected boolean discardOffChannelTokens = false;
-
-	/** Track the last mark() call result value for use in rewind(). */
-	protected int lastMarker;
-
-	protected int range = -1; // how deep have we gone?	
-
-	/** The index into the tokens list of the current token (next token
-     *  to consume).  p==-1 indicates that the tokens list is empty
-     */
-    protected int p = -1;
-
-	public LegacyCommonTokenStream() {
-		tokens = new ArrayList(500);
-	}
-
-	public LegacyCommonTokenStream(TokenSource tokenSource) {
-	    this();
-		this.tokenSource = tokenSource;
-	}
-
-	public LegacyCommonTokenStream(TokenSource tokenSource, int channel) {
-		this(tokenSource);
-		this.channel = channel;
-	}
-
-	/** Reset this token stream by setting its token source. */
-	public void setTokenSource(TokenSource tokenSource) {
-		this.tokenSource = tokenSource;
-		tokens.clear();
-		p = -1;
-		channel = Token.DEFAULT_CHANNEL;
-	}
-
-	/** Load all tokens from the token source and put in tokens.
-	 *  This is done upon first LT request because you might want to
-	 *  set some token type / channel overrides before filling buffer.
-	 */
-	protected void fillBuffer() {
-		int index = 0;
-		Token t = tokenSource.nextToken();
-		while ( t!=null && t.getType()!=CharStream.EOF ) {
-			boolean discard = false;
-			// is there a channel override for token type?
-			if ( channelOverrideMap!=null ) {
-				Integer channelI = (Integer)
-					channelOverrideMap.get(new Integer(t.getType()));
-				if ( channelI!=null ) {
-					t.setChannel(channelI.intValue());
-				}
-			}
-			if ( discardSet!=null &&
-				 discardSet.contains(new Integer(t.getType())) )
-			{
-				discard = true;
-			}
-			else if ( discardOffChannelTokens && t.getChannel()!=this.channel ) {
-				discard = true;
-			}
-			if ( !discard )	{
-				t.setTokenIndex(index);
-				tokens.add(t);
-				index++;
-			}
-			t = tokenSource.nextToken();
-		}
-		// leave p pointing at first token on channel
-		p = 0;
-		p = skipOffTokenChannels(p);
-    }
-
-	/** Move the input pointer to the next incoming token.  The stream
-	 *  must become active with LT(1) available.  consume() simply
-	 *  moves the input pointer so that LT(1) points at the next
-	 *  input symbol. Consume at least one token.
-	 *
-	 *  Walk past any token not on the channel the parser is listening to.
-	 */
-	public void consume() {
-		if ( p<tokens.size() ) {
-            p++;
-			p = skipOffTokenChannels(p); // leave p on valid token
-        }
-    }
-
-	/** Given a starting index, return the index of the first on-channel
-	 *  token.
-	 */
-	protected int skipOffTokenChannels(int i) {
-		int n = tokens.size();
-		while ( i<n && ((Token)tokens.get(i)).getChannel()!=channel ) {
-			i++;
-		}
-		return i;
-	}
-
-	protected int skipOffTokenChannelsReverse(int i) {
-		while ( i>=0 && ((Token)tokens.get(i)).getChannel()!=channel ) {
-			i--;
-		}
-		return i;
-	}
-
-	/** A simple filter mechanism whereby you can tell this token stream
-	 *  to force all tokens of type ttype to be on channel.  For example,
-	 *  when interpreting, we cannot exec actions so we need to tell
-	 *  the stream to force all WS and NEWLINE to be a different, ignored
-	 *  channel.
-	 */
-	public void setTokenTypeChannel(int ttype, int channel) {
-		if ( channelOverrideMap==null ) {
-			channelOverrideMap = new HashMap();
-		}
-        channelOverrideMap.put(new Integer(ttype), new Integer(channel));
-	}
-
-	public void discardTokenType(int ttype) {
-		if ( discardSet==null ) {
-			discardSet = new HashSet();
-		}
-        discardSet.add(new Integer(ttype));
-	}
-
-	public void discardOffChannelTokens(boolean discardOffChannelTokens) {
-		this.discardOffChannelTokens = discardOffChannelTokens;
-	}
-
-	public List getTokens() {
-		if ( p == -1 ) {
-			fillBuffer();
-		}
-		return tokens;
-	}
-
-	public List getTokens(int start, int stop) {
-		return getTokens(start, stop, (BitSet)null);
-	}
-
-	/** Given a start and stop index, return a List of all tokens in
-	 *  the token type BitSet.  Return null if no tokens were found.  This
-	 *  method looks at both on and off channel tokens.
-	 */
-	public List getTokens(int start, int stop, BitSet types) {
-		if ( p == -1 ) {
-			fillBuffer();
-		}
-		if ( stop>=tokens.size() ) {
-			stop=tokens.size()-1;
-		}
-		if ( start<0 ) {
-			start=0;
-		}
-		if ( start>stop ) {
-			return null;
-		}
-
-		// list = tokens[start:stop]:{Token t, t.getType() in types}
-		List filteredTokens = new ArrayList();
-		for (int i=start; i<=stop; i++) {
-			Token t = (Token)tokens.get(i);
-			if ( types==null || types.member(t.getType()) ) {
-				filteredTokens.add(t);
-			}
-		}
-		if ( filteredTokens.size()==0 ) {
-			filteredTokens = null;
-		}
-		return filteredTokens;
-	}
-
-	public List getTokens(int start, int stop, List types) {
-		return getTokens(start,stop,new BitSet(types));
-	}
-
-	public List getTokens(int start, int stop, int ttype) {
-		return getTokens(start,stop,BitSet.of(ttype));
-	}
-
-	/** Get the ith token from the current position 1..n where k=1 is the
-	 *  first symbol of lookahead.
-	 */
-	public Token LT(int k) {
-		if ( p == -1 ) {
-			fillBuffer();
-		}
-		if ( k==0 ) {
-			return null;
-		}
-		if ( k<0 ) {
-			return LB(-k);
-		}
-		//System.out.print("LT(p="+p+","+k+")=");
-		if ( (p+k-1) >= tokens.size() ) {
-            return (Token)tokens.get(tokens.size()-1);
-		}
-		//System.out.println(tokens.get(p+k-1));
-		int i = p;
-		int n = 1;
-		// find k good tokens
-		while ( n<k ) {
-			// skip off-channel tokens
-			i = skipOffTokenChannels(i+1); // leave p on valid token
-			n++;
-		}
-		if ( i>=tokens.size() ) {
-            return (Token)tokens.get(tokens.size()-1); // must be EOF
-		}
-
-		if ( i>range ) range = i;
-        return (Token)tokens.get(i);
-    }
-
-	/** Look backwards k tokens on-channel tokens */
-	protected Token LB(int k) {
-		//System.out.print("LB(p="+p+","+k+") ");
-		if ( p == -1 ) {
-			fillBuffer();
-		}
-		if ( k==0 ) {
-			return null;
-		}
-		if ( (p-k)<0 ) {
-			return null;
-		}
-
-		int i = p;
-		int n = 1;
-		// find k good tokens looking backwards
-		while ( n<=k ) {
-			// skip off-channel tokens
-			i = skipOffTokenChannelsReverse(i-1); // leave p on valid token
-			n++;
-		}
-		if ( i<0 ) {
-			return null;
-		}
-		return (Token)tokens.get(i);
-	}
-
-	/** Return absolute token i; ignore which channel the tokens are on;
-	 *  that is, count all tokens not just on-channel tokens.
-	 */
-	public Token get(int i) {
-		return (Token)tokens.get(i);
-	}
-
-	/** Get all tokens from start..stop inclusively */
-	public List get(int start, int stop) {
-		if ( p == -1 ) fillBuffer();
-		if ( start<0 || stop<0 ) return null;
-		return tokens.subList(start, stop);
-	}
-
-	public int LA(int i) {
-        return LT(i).getType();
-    }
-
-    public int mark() {
-		if ( p == -1 ) {
-			fillBuffer();
-		}
-		lastMarker = index();
-		return lastMarker;
-	}
-
-	public void release(int marker) {
-		// no resources to release
-	}
-
-	public int size() {
-		return tokens.size();
-	}
-
-    public int index() {
-        return p;
-    }
-
-	public int range() {
-		return range;
-	}
-
-	public void rewind(int marker) {
-		seek(marker);
-	}
-
-	public void rewind() {
-		seek(lastMarker);
-	}
-
-	public void reset() {
-		p = 0;
-		lastMarker = 0;
-	}
-	
-	public void seek(int index) {
-		p = index;
-	}
-
-	public TokenSource getTokenSource() {
-		return tokenSource;
-	}
-
-	public String getSourceName() {
-		return getTokenSource().getSourceName();
-	}
-
-	public String toString() {
-		if ( p == -1 ) {
-			fillBuffer();
-		}
-		return toString(0, tokens.size()-1);
-	}
-
-	public String toString(int start, int stop) {
-		if ( start<0 || stop<0 ) {
-			return null;
-		}
-		if ( p == -1 ) {
-			fillBuffer();
-		}
-		if ( stop>=tokens.size() ) {
-			stop = tokens.size()-1;
-		}
- 		StringBuffer buf = new StringBuffer();
-		for (int i = start; i <= stop; i++) {
-			Token t = (Token)tokens.get(i);
-			buf.append(t.getText());
-		}
-		return buf.toString();
-	}
-
-	public String toString(Token start, Token stop) {
-		if ( start!=null && stop!=null ) {
-			return toString(start.getTokenIndex(), stop.getTokenIndex());
-		}
-		return null;
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/Lexer.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/Lexer.java
deleted file mode 100644
index a1a24e1..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/Lexer.java
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-/** A lexer is recognizer that draws input symbols from a character stream.
- *  lexer grammars result in a subclass of this object. A Lexer object
- *  uses simplified match() and error recovery mechanisms in the interest
- *  of speed.
- */
-public abstract class Lexer extends BaseRecognizer implements TokenSource {
-	/** Where is the lexer drawing characters from? */
-	protected CharStream input;
-
-	public Lexer() {
-	}
-
-	public Lexer(CharStream input) {
-		this.input = input;
-	}
-
-	public Lexer(CharStream input, RecognizerSharedState state) {
-		super(state);
-		this.input = input;
-	}
-
-	public void reset() {
-		super.reset(); // reset all recognizer state variables
-		// wack Lexer state variables
-		if ( input!=null ) {
-			input.seek(0); // rewind the input
-		}
-		if ( state==null ) {
-			return; // no shared state work to do
-		}
-		state.token = null;
-		state.type = Token.INVALID_TOKEN_TYPE;
-		state.channel = Token.DEFAULT_CHANNEL;
-		state.tokenStartCharIndex = -1;
-		state.tokenStartCharPositionInLine = -1;
-		state.tokenStartLine = -1;
-		state.text = null;
-	}
-
-	/** Return a token from this source; i.e., match a token on the char
-	 *  stream.
-	 */
-	public Token nextToken() {
-		while (true) {
-			state.token = null;
-			state.channel = Token.DEFAULT_CHANNEL;
-			state.tokenStartCharIndex = input.index();
-			state.tokenStartCharPositionInLine = input.getCharPositionInLine();
-			state.tokenStartLine = input.getLine();
-			state.text = null;
-			if ( input.LA(1)==CharStream.EOF ) {
-                Token eof = new CommonToken((CharStream)input,Token.EOF,
-                                            Token.DEFAULT_CHANNEL,
-                                            input.index(),input.index());
-                eof.setLine(getLine());
-                eof.setCharPositionInLine(getCharPositionInLine());
-                return eof;
-			}
-			try {
-				mTokens();
-				if ( state.token==null ) {
-					emit();
-				}
-				else if ( state.token==Token.SKIP_TOKEN ) {
-					continue;
-				}
-				return state.token;
-			}
-			catch (MismatchedRangeException re) {
-				reportError(re);
-				// matchRange() routine has already called recover()
-			}
-			catch (MismatchedTokenException re) {
-				reportError(re);
-				// match() routine has already called recover()
-			}
-			catch (RecognitionException re) {
-				reportError(re);
-				recover(re); // throw out current char and try again
-			}
-		}
-	}
-
-	/** Instruct the lexer to skip creating a token for current lexer rule
-	 *  and look for another token.  nextToken() knows to keep looking when
-	 *  a lexer rule finishes with token set to SKIP_TOKEN.  Recall that
-	 *  if token==null at end of any token rule, it creates one for you
-	 *  and emits it.
-	 */
-	public void skip() {
-		state.token = Token.SKIP_TOKEN;
-	}
-
-	/** This is the lexer entry point that sets instance var 'token' */
-	public abstract void mTokens() throws RecognitionException;
-
-	/** Set the char stream and reset the lexer */
-	public void setCharStream(CharStream input) {
-		this.input = null;
-		reset();
-		this.input = input;
-	}
-
-	public CharStream getCharStream() {
-		return this.input;
-	}
-
-	public String getSourceName() {
-		return input.getSourceName();
-	}
-
-	/** Currently does not support multiple emits per nextToken invocation
-	 *  for efficiency reasons.  Subclass and override this method and
-	 *  nextToken (to push tokens into a list and pull from that list rather
-	 *  than a single variable as this implementation does).
-	 */
-	public void emit(Token token) {
-		state.token = token;
-	}
-
-	/** The standard method called to automatically emit a token at the
-	 *  outermost lexical rule.  The token object should point into the
-	 *  char buffer start..stop.  If there is a text override in 'text',
-	 *  use that to set the token's text.  Override this method to emit
-	 *  custom Token objects.
-	 *
-	 *  If you are building trees, then you should also override
-	 *  Parser or TreeParser.getMissingSymbol().
-	 */
-	public Token emit() {
-		Token t = new CommonToken(input, state.type, state.channel, state.tokenStartCharIndex, getCharIndex()-1);
-		t.setLine(state.tokenStartLine);
-		t.setText(state.text);
-		t.setCharPositionInLine(state.tokenStartCharPositionInLine);
-		emit(t);
-		return t;
-	}
-
-	public void match(String s) throws MismatchedTokenException {
-		int i = 0;
-		while ( i<s.length() ) {
-			if ( input.LA(1)!=s.charAt(i) ) {
-				if ( state.backtracking>0 ) {
-					state.failed = true;
-					return;
-				}
-				MismatchedTokenException mte =
-					new MismatchedTokenException(s.charAt(i), input);
-				recover(mte);
-				throw mte;
-			}
-			i++;
-			input.consume();
-			state.failed = false;
-		}
-	}
-
-	public void matchAny() {
-		input.consume();
-	}
-
-	public void match(int c) throws MismatchedTokenException {
-		if ( input.LA(1)!=c ) {
-			if ( state.backtracking>0 ) {
-				state.failed = true;
-				return;
-			}
-			MismatchedTokenException mte =
-				new MismatchedTokenException(c, input);
-			recover(mte);  // don't really recover; just consume in lexer
-			throw mte;
-		}
-		input.consume();
-		state.failed = false;
-	}
-
-	public void matchRange(int a, int b)
-		throws MismatchedRangeException
-	{
-		if ( input.LA(1)<a || input.LA(1)>b ) {
-			if ( state.backtracking>0 ) {
-				state.failed = true;
-				return;
-			}
-			MismatchedRangeException mre =
-				new MismatchedRangeException(a,b,input);
-			recover(mre);
-			throw mre;
-		}
-		input.consume();
-		state.failed = false;
-	}
-
-	public int getLine() {
-		return input.getLine();
-	}
-
-	public int getCharPositionInLine() {
-		return input.getCharPositionInLine();
-	}
-
-	/** What is the index of the current character of lookahead? */
-	public int getCharIndex() {
-		return input.index();
-	}
-
-	/** Return the text matched so far for the current token or any
-	 *  text override.
-	 */
-	public String getText() {
-		if ( state.text!=null ) {
-			return state.text;
-		}
-		return input.substring(state.tokenStartCharIndex,getCharIndex()-1);
-	}
-
-	/** Set the complete text of this token; it wipes any previous
-	 *  changes to the text.
-	 */
-	public void setText(String text) {
-		state.text = text;
-	}
-
-	public void reportError(RecognitionException e) {
-		/** TODO: not thought about recovery in lexer yet.
-		 *
-		// if we've already reported an error and have not matched a token
-		// yet successfully, don't report any errors.
-		if ( errorRecovery ) {
-			//System.err.print("[SPURIOUS] ");
-			return;
-		}
-		errorRecovery = true;
-		 */
-
-		displayRecognitionError(this.getTokenNames(), e);
-	}
-
-	public String getErrorMessage(RecognitionException e, String[] tokenNames) {
-		String msg = null;
-		if ( e instanceof MismatchedTokenException ) {
-			MismatchedTokenException mte = (MismatchedTokenException)e;
-			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting "+getCharErrorDisplay(mte.expecting);
-		}
-		else if ( e instanceof NoViableAltException ) {
-			NoViableAltException nvae = (NoViableAltException)e;
-			// for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
-			// and "(decision="+nvae.decisionNumber+") and
-			// "state "+nvae.stateNumber
-			msg = "no viable alternative at character "+getCharErrorDisplay(e.c);
-		}
-		else if ( e instanceof EarlyExitException ) {
-			EarlyExitException eee = (EarlyExitException)e;
-			// for development, can add "(decision="+eee.decisionNumber+")"
-			msg = "required (...)+ loop did not match anything at character "+getCharErrorDisplay(e.c);
-		}
-		else if ( e instanceof MismatchedNotSetException ) {
-			MismatchedNotSetException mse = (MismatchedNotSetException)e;
-			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+mse.expecting;
-		}
-		else if ( e instanceof MismatchedSetException ) {
-			MismatchedSetException mse = (MismatchedSetException)e;
-			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+mse.expecting;
-		}
-		else if ( e instanceof MismatchedRangeException ) {
-			MismatchedRangeException mre = (MismatchedRangeException)e;
-			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+
-				  getCharErrorDisplay(mre.a)+".."+getCharErrorDisplay(mre.b);
-		}
-		else {
-			msg = super.getErrorMessage(e, tokenNames);
-		}
-		return msg;
-	}
-
-	public String getCharErrorDisplay(int c) {
-		String s = String.valueOf((char)c);
-		switch ( c ) {
-			case Token.EOF :
-				s = "<EOF>";
-				break;
-			case '\n' :
-				s = "\\n";
-				break;
-			case '\t' :
-				s = "\\t";
-				break;
-			case '\r' :
-				s = "\\r";
-				break;
-		}
-		return "'"+s+"'";
-	}
-
-	/** Lexers can normally match any char in it's vocabulary after matching
-	 *  a token, so do the easy thing and just kill a character and hope
-	 *  it all works out.  You can instead use the rule invocation stack
-	 *  to do sophisticated error recovery if you are in a fragment rule.
-	 */
-	public void recover(RecognitionException re) {
-		//System.out.println("consuming char "+(char)input.LA(1)+" during recovery");
-		//re.printStackTrace();
-		input.consume();
-	}
-
-	public void traceIn(String ruleName, int ruleIndex)  {
-		String inputSymbol = ((char)input.LT(1))+" line="+getLine()+":"+getCharPositionInLine();
-		super.traceIn(ruleName, ruleIndex, inputSymbol);
-	}
-
-	public void traceOut(String ruleName, int ruleIndex)  {
-		String inputSymbol = ((char)input.LT(1))+" line="+getLine()+":"+getCharPositionInLine();
-		super.traceOut(ruleName, ruleIndex, inputSymbol);
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedNotSetException.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedNotSetException.java
deleted file mode 100644
index 49ceb27..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedNotSetException.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-public class MismatchedNotSetException extends MismatchedSetException {
-	/** Used for remote debugger deserialization */
-	public MismatchedNotSetException() {;}
-
-	public MismatchedNotSetException(BitSet expecting, IntStream input) {
-		super(expecting, input);
-	}
-
-	public String toString() {
-		return "MismatchedNotSetException("+getUnexpectedType()+"!="+expecting+")";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedRangeException.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedRangeException.java
deleted file mode 100644
index 23b3d87..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedRangeException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-public class MismatchedRangeException extends RecognitionException {
-	public int a,b;
-
-	/** Used for remote debugger deserialization */
-	public MismatchedRangeException() {;}
-
-	public MismatchedRangeException(int a, int b, IntStream input) {
-		super(input);
-		this.a = a;
-		this.b = b;
-	}
-
-	public String toString() {
-		return "MismatchedNotSetException("+getUnexpectedType()+" not in ["+a+","+b+"])";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedSetException.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedSetException.java
deleted file mode 100644
index 9bfa530..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedSetException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-public class MismatchedSetException extends RecognitionException {
-	public BitSet expecting;
-
-	/** Used for remote debugger deserialization */
-	public MismatchedSetException() {;}
-
-	public MismatchedSetException(BitSet expecting, IntStream input) {
-		super(input);
-		this.expecting = expecting;
-	}
-
-	public String toString() {
-		return "MismatchedSetException("+getUnexpectedType()+"!="+expecting+")";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTokenException.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTokenException.java
deleted file mode 100644
index 07ae814..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTokenException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-/** A mismatched char or Token or tree node */
-public class MismatchedTokenException extends RecognitionException {
-	public int expecting = Token.INVALID_TOKEN_TYPE;
-
-	/** Used for remote debugger deserialization */
-	public MismatchedTokenException() {;}
-
-	public MismatchedTokenException(int expecting, IntStream input) {
-		super(input);
-		this.expecting = expecting;
-	}
-
-	public String toString() {
-		return "MismatchedTokenException("+getUnexpectedType()+"!="+expecting+")";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTreeNodeException.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTreeNodeException.java
deleted file mode 100644
index 99c834d..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTreeNodeException.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import org.antlr.runtime.tree.TreeNodeStream;
-import org.antlr.runtime.tree.Tree;
-
-/**
- */
-public class MismatchedTreeNodeException extends RecognitionException {
-	public int expecting;
-
-	public MismatchedTreeNodeException() {
-	}
-
-	public MismatchedTreeNodeException(int expecting, TreeNodeStream input) {
-		super(input);
-		this.expecting = expecting;
-	}
-
-	public String toString() {
-		return "MismatchedTreeNodeException("+getUnexpectedType()+"!="+expecting+")";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MissingTokenException.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MissingTokenException.java
deleted file mode 100644
index 9eda1f2..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/MissingTokenException.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-/** We were expecting a token but it's not found.  The current token
- *  is actually what we wanted next.  Used for tree node errors too.
- */
-public class MissingTokenException extends MismatchedTokenException {
-	public Object inserted;
-	/** Used for remote debugger deserialization */
-	public MissingTokenException() {;}
-
-	public MissingTokenException(int expecting, IntStream input, Object inserted) {
-		super(expecting, input);
-		this.inserted = inserted;
-	}
-
-	public int getMissingType() {
-		return expecting;
-	}
-
-	public String toString() {
-		if ( inserted!=null && token!=null ) {
-			return "MissingTokenException(inserted "+inserted+" at "+token.getText()+")";
-		}
-		if ( token!=null ) {
-			return "MissingTokenException(at "+token.getText()+")";
-		}
-		return "MissingTokenException";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/NoViableAltException.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/NoViableAltException.java
deleted file mode 100644
index 889045f..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/NoViableAltException.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-public class NoViableAltException extends RecognitionException {
-	public String grammarDecisionDescription;
-	public int decisionNumber;
-	public int stateNumber;
-
-	/** Used for remote debugger deserialization */
-	public NoViableAltException() {;}
-	
-	public NoViableAltException(String grammarDecisionDescription,
-								int decisionNumber,
-								int stateNumber,
-								IntStream input)
-	{
-		super(input);
-		this.grammarDecisionDescription = grammarDecisionDescription;
-		this.decisionNumber = decisionNumber;
-		this.stateNumber = stateNumber;
-	}
-
-	public String toString() {
-		if ( input instanceof CharStream ) {
-			return "NoViableAltException('"+(char)getUnexpectedType()+"'@["+grammarDecisionDescription+"])";
-		}
-		else {
-			return "NoViableAltException("+getUnexpectedType()+"@["+grammarDecisionDescription+"])";
-		}
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/Parser.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/Parser.java
deleted file mode 100644
index 030cf72..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/Parser.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import org.antlr.runtime.debug.DebugTokenStream;
-
-/** A parser for TokenStreams.  "parser grammars" result in a subclass
- *  of this.
- */
-public class Parser extends BaseRecognizer {
-	public TokenStream input;
-
-	public Parser(TokenStream input) {
-		super(); // highlight that we go to super to set state object
-		setTokenStream(input);
-    }
-
-	public Parser(TokenStream input, RecognizerSharedState state) {
-		super(state); // share the state object with another parser
-		this.input = input;
-    }
-
-	public void reset() {
-		super.reset(); // reset all recognizer state variables
-		if ( input!=null ) {
-			input.seek(0); // rewind the input
-		}
-	}
-
-	protected Object getCurrentInputSymbol(IntStream input) {
-		return ((TokenStream)input).LT(1);
-	}
-
-	protected Object getMissingSymbol(IntStream input,
-									  RecognitionException e,
-									  int expectedTokenType,
-									  BitSet follow)
-	{
-		String tokenText = null;
-		if ( expectedTokenType==Token.EOF ) tokenText = "<missing EOF>";
-		else tokenText = "<missing "+getTokenNames()[expectedTokenType]+">";
-		CommonToken t = new CommonToken(expectedTokenType, tokenText);
-		Token current = ((TokenStream)input).LT(1);
-		if ( current.getType() == Token.EOF ) {
-			current = ((TokenStream)input).LT(-1);
-		}
-		t.line = current.getLine();
-		t.charPositionInLine = current.getCharPositionInLine();
-		t.channel = DEFAULT_TOKEN_CHANNEL;
-		t.input = current.getInputStream();
-		return t;
-	}
-
-	/** Set the token stream and reset the parser */
-	public void setTokenStream(TokenStream input) {
-		this.input = null;
-		reset();
-		this.input = input;
-	}
-
-    public TokenStream getTokenStream() {
-		return input;
-	}
-
-	public String getSourceName() {
-		return input.getSourceName();
-	}
-
-	public void traceIn(String ruleName, int ruleIndex)  {
-		super.traceIn(ruleName, ruleIndex, input.LT(1));
-	}
-
-	public void traceOut(String ruleName, int ruleIndex)  {
-		super.traceOut(ruleName, ruleIndex, input.LT(1));
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ParserRuleReturnScope.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ParserRuleReturnScope.java
deleted file mode 100644
index e3e38ce..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ParserRuleReturnScope.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import org.antlr.runtime.tree.CommonTree;
-
-/** Rules that return more than a single value must return an object
- *  containing all the values.  Besides the properties defined in
- *  RuleLabelScope.predefinedRulePropertiesScope there may be user-defined
- *  return values.  This class simply defines the minimum properties that
- *  are always defined and methods to access the others that might be
- *  available depending on output option such as template and tree.
- *
- *  Note text is not an actual property of the return value, it is computed
- *  from start and stop using the input stream's toString() method.  I
- *  could add a ctor to this so that we can pass in and store the input
- *  stream, but I'm not sure we want to do that.  It would seem to be undefined
- *  to get the .text property anyway if the rule matches tokens from multiple
- *  input streams.
- *
- *  I do not use getters for fields of objects that are used simply to
- *  group values such as this aggregate.  The getters/setters are there to
- *  satisfy the superclass interface.
- */
-public class ParserRuleReturnScope extends RuleReturnScope {
-	public Token start, stop;
-	public Object getStart() { return start; }
-	public Object getStop() { return stop; }
-
-	public Object tree; // only used when output=AST
-	public Object getTree() { return tree; }
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/RecognitionException.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/RecognitionException.java
deleted file mode 100644
index 3e79f99..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/RecognitionException.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import org.antlr.runtime.tree.*;
-
-/** The root of the ANTLR exception hierarchy.
- *
- *  To avoid English-only error messages and to generally make things
- *  as flexible as possible, these exceptions are not created with strings,
- *  but rather the information necessary to generate an error.  Then
- *  the various reporting methods in Parser and Lexer can be overridden
- *  to generate a localized error message.  For example, MismatchedToken
- *  exceptions are built with the expected token type.
- *  So, don't expect getMessage() to return anything.
- *
- *  Note that as of Java 1.4, you can access the stack trace, which means
- *  that you can compute the complete trace of rules from the start symbol.
- *  This gives you considerable context information with which to generate
- *  useful error messages.
- *
- *  ANTLR generates code that throws exceptions upon recognition error and
- *  also generates code to catch these exceptions in each rule.  If you
- *  want to quit upon first error, you can turn off the automatic error
- *  handling mechanism using rulecatch action, but you still need to
- *  override methods mismatch and recoverFromMismatchSet.
- *
- *  In general, the recognition exceptions can track where in a grammar a
- *  problem occurred and/or what was the expected input.  While the parser
- *  knows its state (such as current input symbol and line info) that
- *  state can change before the exception is reported so current token index
- *  is computed and stored at exception time.  From this info, you can
- *  perhaps print an entire line of input not just a single token, for example.
- *  Better to just say the recognizer had a problem and then let the parser
- *  figure out a fancy report.
- */
-public class RecognitionException extends Exception {
-	/** What input stream did the error occur in? */
-	public transient IntStream input;
-
-	/** What is index of token/char were we looking at when the error occurred? */
-	public int index;
-
-	/** The current Token when an error occurred.  Since not all streams
-	 *  can retrieve the ith Token, we have to track the Token object.
-	 *  For parsers.  Even when it's a tree parser, token might be set.
-	 */
-	public Token token;
-
-	/** If this is a tree parser exception, node is set to the node with
-	 *  the problem.
-	 */
-	public Object node;
-
-	/** The current char when an error occurred. For lexers. */
-	public int c;
-
-	/** Track the line at which the error occurred in case this is
-	 *  generated from a lexer.  We need to track this since the
-	 *  unexpected char doesn't carry the line info.
-	 */
-	public int line;
-
-	public int charPositionInLine;
-
-	/** If you are parsing a tree node stream, you will encounter som
-	 *  imaginary nodes w/o line/col info.  We now search backwards looking
-	 *  for most recent token with line/col info, but notify getErrorHeader()
-	 *  that info is approximate.
-	 */
-	public boolean approximateLineInfo;
-
-	/** Used for remote debugger deserialization */
-	public RecognitionException() {
-	}
-
-	public RecognitionException(IntStream input) {
-		this.input = input;
-		this.index = input.index();
-		if ( input instanceof TokenStream ) {
-			this.token = ((TokenStream)input).LT(1);
-			this.line = token.getLine();
-			this.charPositionInLine = token.getCharPositionInLine();
-		}
-		if ( input instanceof TreeNodeStream ) {
-			extractInformationFromTreeNodeStream(input);
-		}
-		else if ( input instanceof CharStream ) {
-			this.c = input.LA(1);
-			this.line = ((CharStream)input).getLine();
-			this.charPositionInLine = ((CharStream)input).getCharPositionInLine();
-		}
-		else {
-			this.c = input.LA(1);
-		}
-	}
-
-	protected void extractInformationFromTreeNodeStream(IntStream input) {
-		TreeNodeStream nodes = (TreeNodeStream)input;
-		this.node = nodes.LT(1);
-		TreeAdaptor adaptor = nodes.getTreeAdaptor();
-		Token payload = adaptor.getToken(node);
-		if ( payload!=null ) {
-			this.token = payload;
-			if ( payload.getLine()<= 0 ) {
-				// imaginary node; no line/pos info; scan backwards
-				int i = -1;
-				Object priorNode = nodes.LT(i);
-				while ( priorNode!=null ) {
-					Token priorPayload = adaptor.getToken(priorNode);
-					if ( priorPayload!=null && priorPayload.getLine()>0 ) {
-						// we found the most recent real line / pos info
-						this.line = priorPayload.getLine();
-						this.charPositionInLine = priorPayload.getCharPositionInLine();
-						this.approximateLineInfo = true;
-						break;
-					}
-					--i;
-					priorNode = nodes.LT(i);
-				}
-			}
-			else { // node created from real token
-				this.line = payload.getLine();
-				this.charPositionInLine = payload.getCharPositionInLine();
-			}
-		}
-		else if ( this.node instanceof Tree) {
-			this.line = ((Tree)this.node).getLine();
-			this.charPositionInLine = ((Tree)this.node).getCharPositionInLine();
-			if ( this.node instanceof CommonTree) {
-				this.token = ((CommonTree)this.node).token;
-			}
-		}
-		else {
-			int type = adaptor.getType(this.node);
-			String text = adaptor.getText(this.node);
-			this.token = new CommonToken(type, text);
-		}
-	}
-
-	/** Return the token type or char of the unexpected input element */
-	public int getUnexpectedType() {
-		if ( input instanceof TokenStream ) {
-			return token.getType();
-		}
-		else if ( input instanceof TreeNodeStream ) {
-			TreeNodeStream nodes = (TreeNodeStream)input;
-			TreeAdaptor adaptor = nodes.getTreeAdaptor();
-			return adaptor.getType(node);
-		}
-		else {
-			return c;
-		}
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/RecognizerSharedState.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/RecognizerSharedState.java
deleted file mode 100644
index 068ac3b..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/RecognizerSharedState.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */package org.antlr.runtime;
-
-import java.util.Map;
-
-/** The set of fields needed by an abstract recognizer to recognize input
- *  and recover from errors etc...  As a separate state object, it can be
- *  shared among multiple grammars; e.g., when one grammar imports another.
- *
- *  These fields are publically visible but the actual state pointer per
- *  parser is protected.
- */
-public class RecognizerSharedState {
-	/** Track the set of token types that can follow any rule invocation.
-	 *  Stack grows upwards.  When it hits the max, it grows 2x in size
-	 *  and keeps going.
-	 */
-	public BitSet[] following = new BitSet[BaseRecognizer.INITIAL_FOLLOW_STACK_SIZE];
-	public int _fsp = -1;
-
-	/** This is true when we see an error and before having successfully
-	 *  matched a token.  Prevents generation of more than one error message
-	 *  per error.
-	 */
-	public boolean errorRecovery = false;
-
-	/** The index into the input stream where the last error occurred.
-	 * 	This is used to prevent infinite loops where an error is found
-	 *  but no token is consumed during recovery...another error is found,
-	 *  ad naseum.  This is a failsafe mechanism to guarantee that at least
-	 *  one token/tree node is consumed for two errors.
-	 */
-	public int lastErrorIndex = -1;
-
-	/** In lieu of a return value, this indicates that a rule or token
-	 *  has failed to match.  Reset to false upon valid token match.
-	 */
-	public boolean failed = false;
-
-	/** Did the recognizer encounter a syntax error?  Track how many. */
-	public int syntaxErrors = 0;
-
-	/** If 0, no backtracking is going on.  Safe to exec actions etc...
-	 *  If >0 then it's the level of backtracking.
-	 */
-	public int backtracking = 0;
-
-	/** An array[size num rules] of Map<Integer,Integer> that tracks
-	 *  the stop token index for each rule.  ruleMemo[ruleIndex] is
-	 *  the memoization table for ruleIndex.  For key ruleStartIndex, you
-	 *  get back the stop token for associated rule or MEMO_RULE_FAILED.
-	 *
-	 *  This is only used if rule memoization is on (which it is by default).
-	 */
-	public Map[] ruleMemo;
-
-
-	// LEXER FIELDS (must be in same state object to avoid casting
-	//               constantly in generated code and Lexer object) :(
-
-
-	/** The goal of all lexer rules/methods is to create a token object.
-	 *  This is an instance variable as multiple rules may collaborate to
-	 *  create a single token.  nextToken will return this object after
-	 *  matching lexer rule(s).  If you subclass to allow multiple token
-	 *  emissions, then set this to the last token to be matched or
-	 *  something nonnull so that the auto token emit mechanism will not
-	 *  emit another token.
-	 */
-    public Token token;
-
-	/** What character index in the stream did the current token start at?
-	 *  Needed, for example, to get the text for current token.  Set at
-	 *  the start of nextToken.
- 	 */
-	public int tokenStartCharIndex = -1;
-
-	/** The line on which the first character of the token resides */
-	public int tokenStartLine;
-
-	/** The character position of first character within the line */
-	public int tokenStartCharPositionInLine;
-
-	/** The channel number for the current token */
-	public int channel;
-
-	/** The token type for the current token */
-	public int type;
-
-	/** You can set the text for the current token to override what is in
-	 *  the input char buffer.  Use setText() or can set this instance var.
- 	 */
-	public String text;
-
-    public RecognizerSharedState() {;}
-    
-    public RecognizerSharedState(RecognizerSharedState state) {
-        if ( this.following.length < state.following.length ) {
-            this.following = new BitSet[state.following.length];
-        }
-        System.arraycopy(state.following, 0, this.following, 0, state.following.length);
-        this._fsp = state._fsp;
-        this.errorRecovery = state.errorRecovery;
-        this.lastErrorIndex = state.lastErrorIndex;
-        this.failed = state.failed;
-        this.syntaxErrors = state.syntaxErrors;
-        this.backtracking = state.backtracking;
-        if ( state.ruleMemo!=null ) {
-            this.ruleMemo = new Map[state.ruleMemo.length];
-            System.arraycopy(state.ruleMemo, 0, this.ruleMemo, 0, state.ruleMemo.length);
-        }
-        this.token = state.token;
-        this.tokenStartCharIndex = state.tokenStartCharIndex;
-        this.tokenStartCharPositionInLine = state.tokenStartCharPositionInLine;
-        this.channel = state.channel;
-        this.type = state.type;
-        this.text = state.text;
-    }
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/SerializedGrammar.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/SerializedGrammar.java
deleted file mode 100644
index a609053..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/SerializedGrammar.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.runtime;
-
-import java.io.IOException;
-import java.io.FileInputStream;
-import java.io.BufferedInputStream;
-import java.io.DataInputStream;
-import java.util.List;
-import java.util.ArrayList;
-
-public class SerializedGrammar {
-    public static final String COOKIE = "$ANTLR";
-    public static final int FORMAT_VERSION = 1;
-    //public static org.antlr.tool.Grammar gr; // TESTING ONLY; remove later
-
-    public String name;
-    public char type; // in {l, p, t, c}
-    public List rules;
-
-    class Rule {
-        String name;
-        Block block;
-        public Rule(String name, Block block) {
-            this.name = name;
-            this.block = block;
-        }
-        public String toString() {
-            return name+":"+block;
-        }
-    }
-
-    class Block {
-        List[] alts;
-        public Block(List[] alts) {
-            this.alts = alts;
-        }
-        public String toString() {
-            StringBuffer buf = new StringBuffer();
-            buf.append("(");
-            for (int i = 0; i < alts.length; i++) {
-                List alt = alts[i];
-                if ( i>0 ) buf.append("|");
-                buf.append(alt.toString());
-            }
-            buf.append(")");
-            return buf.toString();
-        }
-    }
-
-    class TokenRef {
-        int ttype;
-        public TokenRef(int ttype) { this.ttype = ttype; }
-        public String toString() { return String.valueOf(ttype); }
-    }
-
-    class RuleRef {
-        int ruleIndex;
-        public RuleRef(int ruleIndex) { this.ruleIndex = ruleIndex; }
-        public String toString() { return String.valueOf(ruleIndex); }
-    }
-
-    public SerializedGrammar(String filename) throws IOException {
-        System.out.println("loading "+filename);
-        FileInputStream fis = new FileInputStream(filename);
-        BufferedInputStream bos = new BufferedInputStream(fis);
-        DataInputStream in = new DataInputStream(bos);
-        readFile(in);
-        in.close();
-    }
-
-    protected void readFile(DataInputStream in) throws IOException {
-        String cookie = readString(in); // get $ANTLR
-        if ( !cookie.equals(COOKIE) ) throw new IOException("not a serialized grammar file");
-        int version = in.readByte();
-        char grammarType = (char)in.readByte();
-        this.type = grammarType;
-        String grammarName = readString(in);
-        this.name = grammarName;
-        System.out.println(grammarType+" grammar "+grammarName);
-        int numRules = in.readShort();
-        System.out.println("num rules = "+numRules);
-        rules = readRules(in, numRules);
-    }
-
-    protected List readRules(DataInputStream in, int numRules) throws IOException {
-        List rules = new ArrayList();
-        for (int i=0; i<numRules; i++) {
-            Rule r = readRule(in);
-            rules.add(r);
-        }
-        return rules;
-    }
-
-    protected Rule readRule(DataInputStream in) throws IOException {
-        byte R = in.readByte();
-        if ( R!='R' ) throw new IOException("missing R on start of rule");
-        String name = readString(in);
-        System.out.println("rule: "+name);
-        byte B = in.readByte();
-        Block b = readBlock(in);
-        byte period = in.readByte();
-        if ( period!='.' ) throw new IOException("missing . on end of rule");
-        return new Rule(name, b);
-    }
-
-    protected Block readBlock(DataInputStream in) throws IOException {
-        int nalts = in.readShort();
-        List[] alts = new List[nalts];
-        //System.out.println("enter block n="+nalts);
-        for (int i=0; i<nalts; i++) {
-            List alt = readAlt(in);
-            alts[i] = alt;
-        }
-        //System.out.println("exit block");
-        return new Block(alts);
-    }
-
-    protected List readAlt(DataInputStream in) throws IOException {
-        List alt = new ArrayList();
-        byte A = in.readByte();
-        if ( A!='A' ) throw new IOException("missing A on start of alt");
-        byte cmd = in.readByte();
-        while ( cmd!=';' ) {
-            switch (cmd) {
-                case 't' :
-                    int ttype = in.readShort();
-                    alt.add(new TokenRef(ttype));
-                    //System.out.println("read token "+gr.getTokenDisplayName(ttype));
-                    break;
-                case 'r' :
-                    int ruleIndex = in.readShort();
-                    alt.add(new RuleRef(ruleIndex));
-                    //System.out.println("read rule "+gr.getRuleName(ruleIndex));
-                    break;
-                case '.' : // wildcard
-                    break;
-                case '-' : // range
-                    int from = in.readChar();
-                    int to = in.readChar();
-                    break;
-                case '~' : // not
-                    int notThisTokenType = in.readShort();
-                    break;
-                case 'B' : // nested block
-                    Block b = readBlock(in);
-                    alt.add(b);
-                    break;
-            }
-            cmd = in.readByte();
-        }
-        //System.out.println("exit alt");
-        return alt;
-    }
-
-    protected String readString(DataInputStream in) throws IOException {
-        byte c = in.readByte();
-        StringBuffer buf = new StringBuffer();
-        while ( c!=';' ) {
-            buf.append((char)c);
-            c = in.readByte();
-        }
-        return buf.toString();
-    }
-
-    public String toString() {
-        StringBuffer buf = new StringBuffer();
-        buf.append(type+" grammar "+name);
-        buf.append(rules);
-        return buf.toString();
-    }
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/Token.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/Token.java
deleted file mode 100644
index b8eb95e..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/Token.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-public interface Token {
-	public static final int EOR_TOKEN_TYPE = 1;
-
-	/** imaginary tree navigation type; traverse "get child" link */
-	public static final int DOWN = 2;
-	/** imaginary tree navigation type; finish with a child list */
-	public static final int UP = 3;
-
-	public static final int MIN_TOKEN_TYPE = UP+1;
-
-    public static final int EOF = CharStream.EOF;
-    // TODO: remove once we go ANTLR v3.3
-    public static final Token EOF_TOKEN = new CommonToken(EOF);
-
-	public static final int INVALID_TOKEN_TYPE = 0;
-	public static final Token INVALID_TOKEN = new CommonToken(INVALID_TOKEN_TYPE);
-
-	/** In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR
-	 *  will avoid creating a token for this symbol and try to fetch another.
-	 */
-	public static final Token SKIP_TOKEN = new CommonToken(INVALID_TOKEN_TYPE);
-
-	/** All tokens go to the parser (unless skip() is called in that rule)
-	 *  on a particular "channel".  The parser tunes to a particular channel
-	 *  so that whitespace etc... can go to the parser on a "hidden" channel.
-	 */
-	public static final int DEFAULT_CHANNEL = 0;
-	
-	/** Anything on different channel than DEFAULT_CHANNEL is not parsed
-	 *  by parser.
-	 */
-	public static final int HIDDEN_CHANNEL = 99;
-
-	/** Get the text of the token */
-	public String getText();
-	public void setText(String text);
-
-	public int getType();
-	public void setType(int ttype);
-	/**  The line number on which this token was matched; line=1..n */
-	public int getLine();
-    public void setLine(int line);
-
-	/** The index of the first character relative to the beginning of the line 0..n-1 */
-	public int getCharPositionInLine();
-	public void setCharPositionInLine(int pos);
-
-	public int getChannel();
-	public void setChannel(int channel);
-
-	/** An index from 0..n-1 of the token object in the input stream.
-	 *  This must be valid in order to use the ANTLRWorks debugger.
-	 */
-	public int getTokenIndex();
-	public void setTokenIndex(int index);
-
-	/** From what character stream was this token created?  You don't have to
-	 *  implement but it's nice to know where a Token comes from if you have
-	 *  include files etc... on the input.
-	 */
-	public CharStream getInputStream();
-	public void setInputStream(CharStream input);
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/TokenRewriteStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/TokenRewriteStream.java
deleted file mode 100644
index 8437441..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/TokenRewriteStream.java
+++ /dev/null
@@ -1,590 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import java.util.*;
-
-/** Useful for dumping out the input stream after doing some
- *  augmentation or other manipulations.
- *
- *  You can insert stuff, replace, and delete chunks.  Note that the
- *  operations are done lazily--only if you convert the buffer to a
- *  String.  This is very efficient because you are not moving data around
- *  all the time.  As the buffer of tokens is converted to strings, the
- *  toString() method(s) check to see if there is an operation at the
- *  current index.  If so, the operation is done and then normal String
- *  rendering continues on the buffer.  This is like having multiple Turing
- *  machine instruction streams (programs) operating on a single input tape. :)
- *
- *  Since the operations are done lazily at toString-time, operations do not
- *  screw up the token index values.  That is, an insert operation at token
- *  index i does not change the index values for tokens i+1..n-1.
- *
- *  Because operations never actually alter the buffer, you may always get
- *  the original token stream back without undoing anything.  Since
- *  the instructions are queued up, you can easily simulate transactions and
- *  roll back any changes if there is an error just by removing instructions.
- *  For example,
- *
- *   CharStream input = new ANTLRFileStream("input");
- *   TLexer lex = new TLexer(input);
- *   TokenRewriteStream tokens = new TokenRewriteStream(lex);
- *   T parser = new T(tokens);
- *   parser.startRule();
- *
- * 	 Then in the rules, you can execute
- *      Token t,u;
- *      ...
- *      input.insertAfter(t, "text to put after t");}
- * 		input.insertAfter(u, "text after u");}
- * 		System.out.println(tokens.toString());
- *
- *  Actually, you have to cast the 'input' to a TokenRewriteStream. :(
- *
- *  You can also have multiple "instruction streams" and get multiple
- *  rewrites from a single pass over the input.  Just name the instruction
- *  streams and use that name again when printing the buffer.  This could be
- *  useful for generating a C file and also its header file--all from the
- *  same buffer:
- *
- *      tokens.insertAfter("pass1", t, "text to put after t");}
- * 		tokens.insertAfter("pass2", u, "text after u");}
- * 		System.out.println(tokens.toString("pass1"));
- * 		System.out.println(tokens.toString("pass2"));
- *
- *  If you don't use named rewrite streams, a "default" stream is used as
- *  the first example shows.
- */
-public class TokenRewriteStream extends CommonTokenStream {
-	public static final String DEFAULT_PROGRAM_NAME = "default";
-    public static final int PROGRAM_INIT_SIZE = 100;
-	public static final int MIN_TOKEN_INDEX = 0;
-
-	// Define the rewrite operation hierarchy
-
-	class RewriteOperation {
-        /** What index into rewrites List are we? */
-        protected int instructionIndex;
-        /** Token buffer index. */
-        protected int index;
-		protected Object text;
-
-		protected RewriteOperation(int index) {
-			this.index = index;
-		}
-
-		protected RewriteOperation(int index, Object text) {
-			this.index = index;
-			this.text = text;
-		}
-		/** Execute the rewrite operation by possibly adding to the buffer.
-		 *  Return the index of the next token to operate on.
-		 */
-		public int execute(StringBuffer buf) {
-			return index;
-		}
-		public String toString() {
-			String opName = getClass().getName();
-			int $index = opName.indexOf('$');
-			opName = opName.substring($index+1, opName.length());
-			return "<"+opName+"@"+tokens.get(index)+
-				   ":\""+text+"\">";
-		}
-	}
-
-	class InsertBeforeOp extends RewriteOperation {
-		public InsertBeforeOp(int index, Object text) {
-			super(index,text);
-		}
-		public int execute(StringBuffer buf) {
-			buf.append(text);
-			if ( tokens.get(index).getType()!=Token.EOF ) {
-				buf.append(tokens.get(index).getText());
-			}
-			return index+1;
-		}
-	}
-
-	/** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
-	 *  instructions.
-	 */
-	class ReplaceOp extends RewriteOperation {
-		protected int lastIndex;
-		public ReplaceOp(int from, int to, Object text) {
-			super(from,text);
-			lastIndex = to;
-		}
-		public int execute(StringBuffer buf) {
-			if ( text!=null ) {
-				buf.append(text);
-			}
-			return lastIndex+1;
-		}
-		public String toString() {
-			if ( text==null ) {
-				return "<DeleteOp@"+tokens.get(index)+
-					   ".."+tokens.get(lastIndex)+">";
-			}
-			return "<ReplaceOp@"+tokens.get(index)+
-				   ".."+tokens.get(lastIndex)+":\""+text+"\">";
-		}
-	}
-
-	/** You may have multiple, named streams of rewrite operations.
-	 *  I'm calling these things "programs."
-	 *  Maps String (name) -> rewrite (List)
-	 */
-	protected Map programs = null;
-
-	/** Map String (program name) -> Integer index */
-	protected Map lastRewriteTokenIndexes = null;
-
-	public TokenRewriteStream() {
-		init();
-	}
-
-	protected void init() {
-		programs = new HashMap();
-		programs.put(DEFAULT_PROGRAM_NAME, new ArrayList(PROGRAM_INIT_SIZE));
-		lastRewriteTokenIndexes = new HashMap();
-	}
-
-	public TokenRewriteStream(TokenSource tokenSource) {
-	    super(tokenSource);
-		init();
-	}
-
-	public TokenRewriteStream(TokenSource tokenSource, int channel) {
-		super(tokenSource, channel);
-		init();
-	}
-
-	public void rollback(int instructionIndex) {
-		rollback(DEFAULT_PROGRAM_NAME, instructionIndex);
-	}
-
-	/** Rollback the instruction stream for a program so that
-	 *  the indicated instruction (via instructionIndex) is no
-	 *  longer in the stream.  UNTESTED!
-	 */
-	public void rollback(String programName, int instructionIndex) {
-		List is = (List)programs.get(programName);
-		if ( is!=null ) {
-			programs.put(programName, is.subList(MIN_TOKEN_INDEX,instructionIndex));
-		}
-	}
-
-	public void deleteProgram() {
-		deleteProgram(DEFAULT_PROGRAM_NAME);
-	}
-
-	/** Reset the program so that no instructions exist */
-	public void deleteProgram(String programName) {
-		rollback(programName, MIN_TOKEN_INDEX);
-	}
-
-	public void insertAfter(Token t, Object text) {
-		insertAfter(DEFAULT_PROGRAM_NAME, t, text);
-	}
-
-	public void insertAfter(int index, Object text) {
-		insertAfter(DEFAULT_PROGRAM_NAME, index, text);
-	}
-
-	public void insertAfter(String programName, Token t, Object text) {
-		insertAfter(programName,t.getTokenIndex(), text);
-	}
-
-	public void insertAfter(String programName, int index, Object text) {
-		// to insert after, just insert before next index (even if past end)
-		insertBefore(programName,index+1, text);
-	}
-
-	public void insertBefore(Token t, Object text) {
-		insertBefore(DEFAULT_PROGRAM_NAME, t, text);
-	}
-
-	public void insertBefore(int index, Object text) {
-		insertBefore(DEFAULT_PROGRAM_NAME, index, text);
-	}
-
-	public void insertBefore(String programName, Token t, Object text) {
-		insertBefore(programName, t.getTokenIndex(), text);
-	}
-
-	public void insertBefore(String programName, int index, Object text) {
-		RewriteOperation op = new InsertBeforeOp(index,text);
-		List rewrites = getProgram(programName);
-        op.instructionIndex = rewrites.size();
-        rewrites.add(op);		
-	}
-
-	public void replace(int index, Object text) {
-		replace(DEFAULT_PROGRAM_NAME, index, index, text);
-	}
-
-	public void replace(int from, int to, Object text) {
-		replace(DEFAULT_PROGRAM_NAME, from, to, text);
-	}
-
-	public void replace(Token indexT, Object text) {
-		replace(DEFAULT_PROGRAM_NAME, indexT, indexT, text);
-	}
-
-	public void replace(Token from, Token to, Object text) {
-		replace(DEFAULT_PROGRAM_NAME, from, to, text);
-	}
-
-	public void replace(String programName, int from, int to, Object text) {
-		if ( from > to || from<0 || to<0 || to >= tokens.size() ) {
-			throw new IllegalArgumentException("replace: range invalid: "+from+".."+to+"(size="+tokens.size()+")");
-		}
-		RewriteOperation op = new ReplaceOp(from, to, text);
-		List rewrites = getProgram(programName);
-        op.instructionIndex = rewrites.size();
-        rewrites.add(op);
-	}
-
-	public void replace(String programName, Token from, Token to, Object text) {
-		replace(programName,
-				from.getTokenIndex(),
-				to.getTokenIndex(),
-				text);
-	}
-
-	public void delete(int index) {
-		delete(DEFAULT_PROGRAM_NAME, index, index);
-	}
-
-	public void delete(int from, int to) {
-		delete(DEFAULT_PROGRAM_NAME, from, to);
-	}
-
-	public void delete(Token indexT) {
-		delete(DEFAULT_PROGRAM_NAME, indexT, indexT);
-	}
-
-	public void delete(Token from, Token to) {
-		delete(DEFAULT_PROGRAM_NAME, from, to);
-	}
-
-	public void delete(String programName, int from, int to) {
-		replace(programName,from,to,null);
-	}
-
-	public void delete(String programName, Token from, Token to) {
-		replace(programName,from,to,null);
-	}
-
-	public int getLastRewriteTokenIndex() {
-		return getLastRewriteTokenIndex(DEFAULT_PROGRAM_NAME);
-	}
-
-	protected int getLastRewriteTokenIndex(String programName) {
-		Integer I = (Integer)lastRewriteTokenIndexes.get(programName);
-		if ( I==null ) {
-			return -1;
-		}
-		return I.intValue();
-	}
-
-	protected void setLastRewriteTokenIndex(String programName, int i) {
-		lastRewriteTokenIndexes.put(programName, new Integer(i));
-	}
-
-	protected List getProgram(String name) {
-		List is = (List)programs.get(name);
-		if ( is==null ) {
-			is = initializeProgram(name);
-		}
-		return is;
-	}
-
-	private List initializeProgram(String name) {
-		List is = new ArrayList(PROGRAM_INIT_SIZE);
-		programs.put(name, is);
-		return is;
-	}
-
-	public String toOriginalString() {
-        fill();
-		return toOriginalString(MIN_TOKEN_INDEX, size()-1);
-	}
-
-	public String toOriginalString(int start, int end) {
-		StringBuffer buf = new StringBuffer();
-		for (int i=start; i>=MIN_TOKEN_INDEX && i<=end && i<tokens.size(); i++) {
-			if ( get(i).getType()!=Token.EOF ) buf.append(get(i).getText());
-		}
-		return buf.toString();
-	}
-
-	public String toString() {
-        fill();
-		return toString(MIN_TOKEN_INDEX, size()-1);
-	}
-
-	public String toString(String programName) {
-        fill();
-		return toString(programName, MIN_TOKEN_INDEX, size()-1);
-	}
-
-	public String toString(int start, int end) {
-		return toString(DEFAULT_PROGRAM_NAME, start, end);
-	}
-
-	public String toString(String programName, int start, int end) {
-		List rewrites = (List)programs.get(programName);
-
-        // ensure start/end are in range
-        if ( end>tokens.size()-1 ) end = tokens.size()-1;
-        if ( start<0 ) start = 0;
-
-        if ( rewrites==null || rewrites.size()==0 ) {
-			return toOriginalString(start,end); // no instructions to execute
-		}
-		StringBuffer buf = new StringBuffer();
-
-		// First, optimize instruction stream
-		Map indexToOp = reduceToSingleOperationPerIndex(rewrites);
-
-        // Walk buffer, executing instructions and emitting tokens
-        int i = start;
-        while ( i <= end && i < tokens.size() ) {
-			RewriteOperation op = (RewriteOperation)indexToOp.get(new Integer(i));
-			indexToOp.remove(new Integer(i)); // remove so any left have index size-1
-			Token t = (Token) tokens.get(i);
-			if ( op==null ) {
-				// no operation at that index, just dump token
-				if ( t.getType()!=Token.EOF ) buf.append(t.getText());
-				i++; // move to next token
-			}
-			else {
-				i = op.execute(buf); // execute operation and skip
-			}
-		}
-
-        // include stuff after end if it's last index in buffer
-        // So, if they did an insertAfter(lastValidIndex, "foo"), include
-        // foo if end==lastValidIndex.
-        if ( end==tokens.size()-1 ) {
-            // Scan any remaining operations after last token
-            // should be included (they will be inserts).
-            Iterator it = indexToOp.values().iterator();
-            while (it.hasNext()) {
-                RewriteOperation op = (RewriteOperation)it.next();
-                if ( op.index >= tokens.size()-1 ) buf.append(op.text);
-            }
-        }
-        return buf.toString();
-	}
-
-	/** We need to combine operations and report invalid operations (like
-	 *  overlapping replaces that are not completed nested).  Inserts to
-	 *  same index need to be combined etc...   Here are the cases:
-	 *
-	 *  I.i.u I.j.v								leave alone, nonoverlapping
-	 *  I.i.u I.i.v								combine: Iivu
-	 *
-	 *  R.i-j.u R.x-y.v	| i-j in x-y			delete first R
-	 *  R.i-j.u R.i-j.v							delete first R
-	 *  R.i-j.u R.x-y.v	| x-y in i-j			ERROR
-	 *  R.i-j.u R.x-y.v	| boundaries overlap	ERROR
-	 *
-	 *  Delete special case of replace (text==null):
-	 *  D.i-j.u D.x-y.v	| boundaries overlap	combine to max(min)..max(right)
-	 *
-	 *  I.i.u R.x-y.v | i in (x+1)-y			delete I (since insert before
-	 *											we're not deleting i)
-	 *  I.i.u R.x-y.v | i not in (x+1)-y		leave alone, nonoverlapping
-	 *  R.x-y.v I.i.u | i in x-y				ERROR
-	 *  R.x-y.v I.x.u 							R.x-y.uv (combine, delete I)
-	 *  R.x-y.v I.i.u | i not in x-y			leave alone, nonoverlapping
-	 *
-	 *  I.i.u = insert u before op @ index i
-	 *  R.x-y.u = replace x-y indexed tokens with u
-	 *
-	 *  First we need to examine replaces.  For any replace op:
-	 *
-	 * 		1. wipe out any insertions before op within that range.
-	 *		2. Drop any replace op before that is contained completely within
-	 *         that range.
-	 *		3. Throw exception upon boundary overlap with any previous replace.
-	 *
-	 *  Then we can deal with inserts:
-	 *
-	 * 		1. for any inserts to same index, combine even if not adjacent.
-	 * 		2. for any prior replace with same left boundary, combine this
-	 *         insert with replace and delete this replace.
-	 * 		3. throw exception if index in same range as previous replace
-	 *
-	 *  Don't actually delete; make op null in list. Easier to walk list.
-	 *  Later we can throw as we add to index -> op map.
-	 *
-	 *  Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
-	 *  inserted stuff would be before the replace range.  But, if you
-	 *  add tokens in front of a method body '{' and then delete the method
-	 *  body, I think the stuff before the '{' you added should disappear too.
-	 *
-	 *  Return a map from token index to operation.
-	 */
-	protected Map reduceToSingleOperationPerIndex(List rewrites) {
-//		System.out.println("rewrites="+rewrites);
-
-		// WALK REPLACES
-		for (int i = 0; i < rewrites.size(); i++) {
-			RewriteOperation op = (RewriteOperation)rewrites.get(i);
-			if ( op==null ) continue;
-			if ( !(op instanceof ReplaceOp) ) continue;
-			ReplaceOp rop = (ReplaceOp)rewrites.get(i);
-			// Wipe prior inserts within range
-			List inserts = getKindOfOps(rewrites, InsertBeforeOp.class, i);
-			for (int j = 0; j < inserts.size(); j++) {
-				InsertBeforeOp iop = (InsertBeforeOp) inserts.get(j);
-				if ( iop.index == rop.index ) {
-					// E.g., insert before 2, delete 2..2; update replace
-					// text to include insert before, kill insert
-					rewrites.set(iop.instructionIndex, null);
-					rop.text = iop.text.toString() + (rop.text!=null?rop.text.toString():"");
-				}
-				else if ( iop.index > rop.index && iop.index <= rop.lastIndex ) {
-                    // delete insert as it's a no-op.
-                    rewrites.set(iop.instructionIndex, null);
-				}
-			}
-			// Drop any prior replaces contained within
-			List prevReplaces = getKindOfOps(rewrites, ReplaceOp.class, i);
-			for (int j = 0; j < prevReplaces.size(); j++) {
-				ReplaceOp prevRop = (ReplaceOp) prevReplaces.get(j);
-				if ( prevRop.index>=rop.index && prevRop.lastIndex <= rop.lastIndex ) {
-                    // delete replace as it's a no-op.
-                    rewrites.set(prevRop.instructionIndex, null);
-					continue;
-				}
-				// throw exception unless disjoint or identical
-				boolean disjoint =
-					prevRop.lastIndex<rop.index || prevRop.index > rop.lastIndex;
-				boolean same =
-					prevRop.index==rop.index && prevRop.lastIndex==rop.lastIndex;
-				// Delete special case of replace (text==null):
-				// D.i-j.u D.x-y.v	| boundaries overlap	combine to max(min)..max(right)
-				if ( prevRop.text==null && rop.text==null && !disjoint ) {
-					//System.out.println("overlapping deletes: "+prevRop+", "+rop);
-					rewrites.set(prevRop.instructionIndex, null); // kill first delete
-					rop.index = Math.min(prevRop.index, rop.index);
-					rop.lastIndex = Math.max(prevRop.lastIndex, rop.lastIndex);
-					System.out.println("new rop "+rop);
-				}
-				else if ( !disjoint && !same ) {
-					throw new IllegalArgumentException("replace op boundaries of "+rop+
-													   " overlap with previous "+prevRop);
-				}
-			}
-		}
-
-		// WALK INSERTS
-		for (int i = 0; i < rewrites.size(); i++) {
-			RewriteOperation op = (RewriteOperation)rewrites.get(i);
-			if ( op==null ) continue;
-			if ( !(op instanceof InsertBeforeOp) ) continue;
-			InsertBeforeOp iop = (InsertBeforeOp)rewrites.get(i);
-			// combine current insert with prior if any at same index
-			List prevInserts = getKindOfOps(rewrites, InsertBeforeOp.class, i);
-			for (int j = 0; j < prevInserts.size(); j++) {
-				InsertBeforeOp prevIop = (InsertBeforeOp) prevInserts.get(j);
-				if ( prevIop.index == iop.index ) { // combine objects
-					// convert to strings...we're in process of toString'ing
-					// whole token buffer so no lazy eval issue with any templates
-					iop.text = catOpText(iop.text,prevIop.text);
-                    // delete redundant prior insert
-                    rewrites.set(prevIop.instructionIndex, null);
-				}
-			}
-			// look for replaces where iop.index is in range; error
-			List prevReplaces = getKindOfOps(rewrites, ReplaceOp.class, i);
-			for (int j = 0; j < prevReplaces.size(); j++) {
-				ReplaceOp rop = (ReplaceOp) prevReplaces.get(j);
-				if ( iop.index == rop.index ) {
-					rop.text = catOpText(iop.text,rop.text);
-					rewrites.set(i, null);  // delete current insert
-					continue;
-				}
-				if ( iop.index >= rop.index && iop.index <= rop.lastIndex ) {
-					throw new IllegalArgumentException("insert op "+iop+
-													   " within boundaries of previous "+rop);
-				}
-			}
-		}
-		// System.out.println("rewrites after="+rewrites);
-		Map m = new HashMap();
-		for (int i = 0; i < rewrites.size(); i++) {
-			RewriteOperation op = (RewriteOperation)rewrites.get(i);
-			if ( op==null ) continue; // ignore deleted ops
-			if ( m.get(new Integer(op.index))!=null ) {
-				throw new Error("should only be one op per index");
-			}
-			m.put(new Integer(op.index), op);
-		}
-		//System.out.println("index to op: "+m);
-		return m;
-	}
-
-	protected String catOpText(Object a, Object b) {
-		String x = "";
-		String y = "";
-		if ( a!=null ) x = a.toString();
-		if ( b!=null ) y = b.toString();
-		return x+y;
-	}
-	protected List getKindOfOps(List rewrites, Class kind) {
-		return getKindOfOps(rewrites, kind, rewrites.size());
-	}
-
-    /** Get all operations before an index of a particular kind */
-    protected List getKindOfOps(List rewrites, Class kind, int before) {
-		List ops = new ArrayList();
-		for (int i=0; i<before && i<rewrites.size(); i++) {
-			RewriteOperation op = (RewriteOperation)rewrites.get(i);
-			if ( op==null ) continue; // ignore deleted
-			if ( op.getClass() == kind ) ops.add(op);
-		}		
-		return ops;
-	}
-
-	public String toDebugString() {
-		return toDebugString(MIN_TOKEN_INDEX, size()-1);
-	}
-
-	public String toDebugString(int start, int end) {
-		StringBuffer buf = new StringBuffer();
-		for (int i=start; i>=MIN_TOKEN_INDEX && i<=end && i<tokens.size(); i++) {
-			buf.append(get(i));
-		}
-		return buf.toString();
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/TokenStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/TokenStream.java
deleted file mode 100644
index 1b43c14..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/TokenStream.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import java.util.List;
-
-/** A stream of tokens accessing tokens from a TokenSource */
-public interface TokenStream extends IntStream {
-    /** Get Token at current input pointer + i ahead where i=1 is next Token.
-	 *  i<0 indicates tokens in the past.  So -1 is previous token and -2 is
-	 *  two tokens ago. LT(0) is undefined.  For i>=n, return Token.EOFToken.
-	 *  Return null for LT(0) and any index that results in an absolute address
-	 *  that is negative.
-	 */
-    public Token LT(int k);
-
-	/** How far ahead has the stream been asked to look?  The return
-	 *  value is a valid index from 0..n-1.
-	 */
-	int range();
-	
-	/** Get a token at an absolute index i; 0..n-1.  This is really only
-	 *  needed for profiling and debugging and token stream rewriting.
-	 *  If you don't want to buffer up tokens, then this method makes no
-	 *  sense for you.  Naturally you can't use the rewrite stream feature.
-	 *  I believe DebugTokenStream can easily be altered to not use
-	 *  this method, removing the dependency.
-	 */
-	public Token get(int i);
-
-	/** Where is this stream pulling tokens from?  This is not the name, but
-	 *  the object that provides Token objects.
-	 */
-	public TokenSource getTokenSource();
-
-	/** Return the text of all tokens from start to stop, inclusive.
-	 *  If the stream does not buffer all the tokens then it can just
-	 *  return "" or null;  Users should not access $ruleLabel.text in
-	 *  an action of course in that case.
-	 */
-	public String toString(int start, int stop);
-
-	/** Because the user is not required to use a token with an index stored
-	 *  in it, we must provide a means for two token objects themselves to
-	 *  indicate the start/end location.  Most often this will just delegate
-	 *  to the other toString(int,int).  This is also parallel with
-	 *  the TreeNodeStream.toString(Object,Object).
-	 */
-	public String toString(Token start, Token stop);
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/UnbufferedTokenStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/UnbufferedTokenStream.java
deleted file mode 100644
index 0b0e979..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/UnbufferedTokenStream.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-import org.antlr.runtime.misc.LookaheadStream;
-
-import java.util.List;
-import java.util.NoSuchElementException;
-
-/** A token stream that pulls tokens from the code source on-demand and
- *  without tracking a complete buffer of the tokens. This stream buffers
- *  the minimum number of tokens possible.  It's the same as
- *  OnDemandTokenStream except that OnDemandTokenStream buffers all tokens.
- *
- *  You can't use this stream if you pass whitespace or other off-channel
- *  tokens to the parser. The stream can't ignore off-channel tokens.
- * 
- *  You can only look backwards 1 token: LT(-1).
- *
- *  Use this when you need to read from a socket or other infinite stream.
- *
- *  @see BufferedTokenStream
- *  @see CommonTokenStream
- */
-public class UnbufferedTokenStream extends LookaheadStream<Token> implements TokenStream {
-	protected TokenSource tokenSource;
-    protected int tokenIndex = 0; // simple counter to set token index in tokens
-
-    /** Skip tokens on any channel but this one; this is how we skip whitespace... */
-    protected int channel = Token.DEFAULT_CHANNEL;
-
-	public UnbufferedTokenStream(TokenSource tokenSource) {
-		this.tokenSource = tokenSource;
-	}
-
-	public Token nextElement() {
-		Token t = tokenSource.nextToken();
-        t.setTokenIndex(tokenIndex++);
-		return t;
-	}
-
-    public boolean isEOF(Token o) { return o.getType() == Token.EOF; }    
-
-	public TokenSource getTokenSource() { return tokenSource; }
-
-	public String toString(int start, int stop) { return "n/a"; }
-
-	public String toString(Token start, Token stop) { return "n/a"; }
-
-    public int LA(int i) { return LT(i).getType(); }
-
-    public Token get(int i) {
-        throw new UnsupportedOperationException("Absolute token indexes are meaningless in an unbuffered stream");
-    }
-
-	public String getSourceName() {	return tokenSource.getSourceName();	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/UnwantedTokenException.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/UnwantedTokenException.java
deleted file mode 100644
index feb7445..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/UnwantedTokenException.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime;
-
-/** An extra token while parsing a TokenStream */
-public class UnwantedTokenException extends MismatchedTokenException {
-	/** Used for remote debugger deserialization */
-	public UnwantedTokenException() {;}
-
-	public UnwantedTokenException(int expecting, IntStream input) {
-		super(expecting, input);
-	}
-
-	public Token getUnexpectedToken() {
-		return token;
-	}
-
-	public String toString() {
-		String exp = ", expected "+expecting;
-		if ( expecting==Token.INVALID_TOKEN_TYPE ) {
-			exp = "";
-		}
-		if ( token==null ) {
-			return "UnwantedTokenException(found="+null+exp+")";
-		}
-		return "UnwantedTokenException(found="+token.getText()+exp+")";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/BlankDebugEventListener.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/BlankDebugEventListener.java
deleted file mode 100755
index d70aa26..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/BlankDebugEventListener.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.RecognitionException;
-import org.antlr.runtime.Token;
-
-/** A blank listener that does nothing; useful for real classes so
- *  they don't have to have lots of blank methods and are less
- *  sensitive to updates to debug interface.
- */
-public class BlankDebugEventListener implements DebugEventListener {
-	public void enterRule(String grammarFileName, String ruleName) {}
-	public void exitRule(String grammarFileName, String ruleName) {}
-	public void enterAlt(int alt) {}
-	public void enterSubRule(int decisionNumber) {}
-	public void exitSubRule(int decisionNumber) {}
-	public void enterDecision(int decisionNumber, boolean couldBacktrack) {}
-	public void exitDecision(int decisionNumber) {}
-	public void location(int line, int pos) {}
-	public void consumeToken(Token token) {}
-	public void consumeHiddenToken(Token token) {}
-	public void LT(int i, Token t) {}
-	public void mark(int i) {}
-	public void rewind(int i) {}
-	public void rewind() {}
-	public void beginBacktrack(int level) {}
-	public void endBacktrack(int level, boolean successful) {}
-	public void recognitionException(RecognitionException e) {}
-	public void beginResync() {}
-	public void endResync() {}
-	public void semanticPredicate(boolean result, String predicate) {}
-	public void commence() {}
-	public void terminate() {}
-
-	// Tree parsing stuff
-
-	public void consumeNode(Object t) {}
-	public void LT(int i, Object t) {}
-
-	// AST Stuff
-
-	public void nilNode(Object t) {}
-	public void errorNode(Object t) {}
-	public void createNode(Object t) {}
-	public void createNode(Object node, Token token) {}
-	public void becomeRoot(Object newRoot, Object oldRoot) {}
-	public void addChild(Object root, Object child) {}
-	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {}
-}
-
-
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventHub.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventHub.java
deleted file mode 100644
index 7bfe6a8..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventHub.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.Token;
-import org.antlr.runtime.RecognitionException;
-
-import java.util.List;
-import java.util.ArrayList;
-
-/** Broadcast debug events to multiple listeners.  Lets you debug and still
- *  use the event mechanism to build parse trees etc...  Not thread-safe.
- *  Don't add events in one thread while parser fires events in another.
- * 
- *  @see also DebugEventRepeater
- */
-public class DebugEventHub implements DebugEventListener {
-	protected List listeners = new ArrayList();
-
-	public DebugEventHub(DebugEventListener listener) {
-		listeners.add(listener);
-	}
-
-	public DebugEventHub(DebugEventListener a, DebugEventListener b) {
-		listeners.add(a);
-		listeners.add(b);
-	}
-
-	/** Add another listener to broadcast events too.  Not thread-safe.
-	 *  Don't add events in one thread while parser fires events in another.
-	 */
-	public void addListener(DebugEventListener listener) {
-		listeners.add(listener);
-	}
-	
-	/* To avoid a mess like this:
-		public void enterRule(final String ruleName) {
-			broadcast(new Code(){
-				public void exec(DebugEventListener listener) {listener.enterRule(ruleName);}}
-				);
-		}
-		I am dup'ing the for-loop in each.  Where are Java closures!? blech!
-	 */
-
-	public void enterRule(String grammarFileName, String ruleName) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.enterRule(grammarFileName,ruleName);
-		}
-	}
-
-	public void exitRule(String grammarFileName, String ruleName) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.exitRule(grammarFileName, ruleName);
-		}
-	}
-
-	public void enterAlt(int alt) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.enterAlt(alt);
-		}
-	}
-
-	public void enterSubRule(int decisionNumber) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.enterSubRule(decisionNumber);
-		}
-	}
-
-	public void exitSubRule(int decisionNumber) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.exitSubRule(decisionNumber);
-		}
-	}
-
-	public void enterDecision(int decisionNumber, boolean couldBacktrack) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.enterDecision(decisionNumber, couldBacktrack);
-		}
-	}
-
-	public void exitDecision(int decisionNumber) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.exitDecision(decisionNumber);
-		}
-	}
-
-	public void location(int line, int pos) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.location(line, pos);
-		}
-	}
-
-	public void consumeToken(Token token) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.consumeToken(token);
-		}
-	}
-
-	public void consumeHiddenToken(Token token) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.consumeHiddenToken(token);
-		}
-	}
-
-	public void LT(int index, Token t) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.LT(index, t);
-		}
-	}
-
-	public void mark(int index) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.mark(index);
-		}
-	}
-
-	public void rewind(int index) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.rewind(index);
-		}
-	}
-
-	public void rewind() {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.rewind();
-		}
-	}
-
-	public void beginBacktrack(int level) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.beginBacktrack(level);
-		}
-	}
-
-	public void endBacktrack(int level, boolean successful) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.endBacktrack(level, successful);
-		}
-	}
-
-	public void recognitionException(RecognitionException e) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.recognitionException(e);
-		}
-	}
-
-	public void beginResync() {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.beginResync();
-		}
-	}
-
-	public void endResync() {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.endResync();
-		}
-	}
-
-	public void semanticPredicate(boolean result, String predicate) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.semanticPredicate(result, predicate);
-		}
-	}
-
-	public void commence() {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.commence();
-		}
-	}
-
-	public void terminate() {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.terminate();
-		}
-	}
-
-
-	// Tree parsing stuff
-
-	public void consumeNode(Object t) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.consumeNode(t);
-		}
-	}
-
-	public void LT(int index, Object t) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.LT(index, t);
-		}
-	}
-
-
-	// AST Stuff
-
-	public void nilNode(Object t) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.nilNode(t);
-		}
-	}
-
-	public void errorNode(Object t) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.errorNode(t);
-		}
-	}
-
-	public void createNode(Object t) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.createNode(t);
-		}
-	}
-
-	public void createNode(Object node, Token token) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.createNode(node, token);
-		}
-	}
-
-	public void becomeRoot(Object newRoot, Object oldRoot) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.becomeRoot(newRoot, oldRoot);
-		}
-	}
-
-	public void addChild(Object root, Object child) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.addChild(root, child);
-		}
-	}
-
-	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
-		for (int i = 0; i < listeners.size(); i++) {
-			DebugEventListener listener = (DebugEventListener)listeners.get(i);
-			listener.setTokenBoundaries(t, tokenStartIndex, tokenStopIndex);
-		}
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventListener.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventListener.java
deleted file mode 100644
index 163b5cd..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventListener.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.RecognitionException;
-import org.antlr.runtime.Token;
-
-/** All debugging events that a recognizer can trigger.
- *
- *  I did not create a separate AST debugging interface as it would create
- *  lots of extra classes and DebugParser has a dbg var defined, which makes
- *  it hard to change to ASTDebugEventListener.  I looked hard at this issue
- *  and it is easier to understand as one monolithic event interface for all
- *  possible events.  Hopefully, adding ST debugging stuff won't be bad.  Leave
- *  for future. 4/26/2006.
- */
-public interface DebugEventListener {
-	/** Moved to version 2 for v3.1: added grammar name to enter/exit Rule */
-	public static final String PROTOCOL_VERSION = "2";
-	
-	/** serialized version of true */
-	public static final int TRUE = 1;
-	public static final int FALSE = 0;
-
-	/** The parser has just entered a rule.  No decision has been made about
-	 *  which alt is predicted.  This is fired AFTER init actions have been
-	 *  executed.  Attributes are defined and available etc...
-	 *  The grammarFileName allows composite grammars to jump around among
-	 *  multiple grammar files.
-	 */
-	public void enterRule(String grammarFileName, String ruleName);
-
-	/** Because rules can have lots of alternatives, it is very useful to
-	 *  know which alt you are entering.  This is 1..n for n alts.
-	 */
-	public void enterAlt(int alt);
-
-	/** This is the last thing executed before leaving a rule.  It is
-	 *  executed even if an exception is thrown.  This is triggered after
-	 *  error reporting and recovery have occurred (unless the exception is
-	 *  not caught in this rule).  This implies an "exitAlt" event.
-	 *  The grammarFileName allows composite grammars to jump around among
-	 *  multiple grammar files.
-	 */
-	public void exitRule(String grammarFileName, String ruleName);
-
-	/** Track entry into any (...) subrule other EBNF construct */
-	public void enterSubRule(int decisionNumber);
-
-	public void exitSubRule(int decisionNumber);
-
-	/** Every decision, fixed k or arbitrary, has an enter/exit event
-	 *  so that a GUI can easily track what LT/consume events are
-	 *  associated with prediction.  You will see a single enter/exit
-	 *  subrule but multiple enter/exit decision events, one for each
-	 *  loop iteration.
-	 */
-	public void enterDecision(int decisionNumber, boolean couldBacktrack);
-
-	public void exitDecision(int decisionNumber);
-
-	/** An input token was consumed; matched by any kind of element.
-	 *  Trigger after the token was matched by things like match(), matchAny().
-	 */
-	public void consumeToken(Token t);
-
-	/** An off-channel input token was consumed.
-	 *  Trigger after the token was matched by things like match(), matchAny().
-	 *  (unless of course the hidden token is first stuff in the input stream).
-	 */
-	public void consumeHiddenToken(Token t);
-
-	/** Somebody (anybody) looked ahead.  Note that this actually gets
-	 *  triggered by both LA and LT calls.  The debugger will want to know
-	 *  which Token object was examined.  Like consumeToken, this indicates
-	 *  what token was seen at that depth.  A remote debugger cannot look
-	 *  ahead into a file it doesn't have so LT events must pass the token
-	 *  even if the info is redundant.
-	 */
-	public void LT(int i, Token t);
-
-	/** The parser is going to look arbitrarily ahead; mark this location,
-	 *  the token stream's marker is sent in case you need it.
-	 */
-	public void mark(int marker);
-
-	/** After an arbitrairly long lookahead as with a cyclic DFA (or with
-	 *  any backtrack), this informs the debugger that stream should be
-	 *  rewound to the position associated with marker.
-	 */
-	public void rewind(int marker);
-
-	/** Rewind to the input position of the last marker.
-	 *  Used currently only after a cyclic DFA and just
-	 *  before starting a sem/syn predicate to get the
-	 *  input position back to the start of the decision.
-	 *  Do not "pop" the marker off the state.  mark(i)
-	 *  and rewind(i) should balance still.
-	 */
-	public void rewind();
-
-	public void beginBacktrack(int level);
-
-	public void endBacktrack(int level, boolean successful);
-
-	/** To watch a parser move through the grammar, the parser needs to
-	 *  inform the debugger what line/charPos it is passing in the grammar.
-	 *  For now, this does not know how to switch from one grammar to the
-	 *  other and back for island grammars etc...
-	 *
-	 *  This should also allow breakpoints because the debugger can stop
-	 *  the parser whenever it hits this line/pos.
-	 */
-	public void location(int line, int pos);
-
-	/** A recognition exception occurred such as NoViableAltException.  I made
-	 *  this a generic event so that I can alter the exception hierachy later
-	 *  without having to alter all the debug objects.
-	 *
-	 *  Upon error, the stack of enter rule/subrule must be properly unwound.
-	 *  If no viable alt occurs it is within an enter/exit decision, which
-	 *  also must be rewound.  Even the rewind for each mark must be unwount.
-	 *  In the Java target this is pretty easy using try/finally, if a bit
-	 *  ugly in the generated code.  The rewind is generated in DFA.predict()
-	 *  actually so no code needs to be generated for that.  For languages
-	 *  w/o this "finally" feature (C++?), the target implementor will have
-	 *  to build an event stack or something.
-	 *
-	 *  Across a socket for remote debugging, only the RecognitionException
-	 *  data fields are transmitted.  The token object or whatever that
-	 *  caused the problem was the last object referenced by LT.  The
-	 *  immediately preceding LT event should hold the unexpected Token or
-	 *  char.
-	 *
-	 *  Here is a sample event trace for grammar:
-	 *
-	 *  b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
-     *    | D
-     *    ;
-     *
-	 *  The sequence for this rule (with no viable alt in the subrule) for
-	 *  input 'c c' (there are 3 tokens) is:
-	 *
-	 *		commence
-	 *		LT(1)
-	 *		enterRule b
-	 *		location 7 1
-	 *		enter decision 3
-	 *		LT(1)
-	 *		exit decision 3
-	 *		enterAlt1
-	 *		location 7 5
-	 *		LT(1)
-	 *		consumeToken [c/<4>,1:0]
-	 *		location 7 7
-	 *		enterSubRule 2
-	 *		enter decision 2
-	 *		LT(1)
-	 *		LT(1)
-	 *		recognitionException NoViableAltException 2 1 2
-	 *		exit decision 2
-	 *		exitSubRule 2
-	 *		beginResync
-	 *		LT(1)
-	 *		consumeToken [c/<4>,1:1]
-	 *		LT(1)
-	 *		endResync
-	 *		LT(-1)
-	 *		exitRule b
-	 *		terminate
-	 */
-	public void recognitionException(RecognitionException e);
-
-	/** Indicates the recognizer is about to consume tokens to resynchronize
-	 *  the parser.  Any consume events from here until the recovered event
-	 *  are not part of the parse--they are dead tokens.
-	 */
-	public void beginResync();
-
-	/** Indicates that the recognizer has finished consuming tokens in order
-	 *  to resychronize.  There may be multiple beginResync/endResync pairs
-	 *  before the recognizer comes out of errorRecovery mode (in which
-	 *  multiple errors are suppressed).  This will be useful
-	 *  in a gui where you want to probably grey out tokens that are consumed
-	 *  but not matched to anything in grammar.  Anything between
-	 *  a beginResync/endResync pair was tossed out by the parser.
-	 */
-	public void endResync();
-
-	/** A semantic predicate was evaluate with this result and action text */
-	public void semanticPredicate(boolean result, String predicate);
-
-	/** Announce that parsing has begun.  Not technically useful except for
-	 *  sending events over a socket.  A GUI for example will launch a thread
-	 *  to connect and communicate with a remote parser.  The thread will want
-	 *  to notify the GUI when a connection is made.  ANTLR parsers
-	 *  trigger this upon entry to the first rule (the ruleLevel is used to
-	 *  figure this out).
-	 */
-	public void commence();
-
-	/** Parsing is over; successfully or not.  Mostly useful for telling
-	 *  remote debugging listeners that it's time to quit.  When the rule
-	 *  invocation level goes to zero at the end of a rule, we are done
-	 *  parsing.
-	 */
-	public void terminate();
-
-
-	// T r e e  P a r s i n g
-
-	/** Input for a tree parser is an AST, but we know nothing for sure
-	 *  about a node except its type and text (obtained from the adaptor).
-	 *  This is the analog of the consumeToken method.  Again, the ID is
-	 *  the hashCode usually of the node so it only works if hashCode is
-	 *  not implemented.  If the type is UP or DOWN, then
-	 *  the ID is not really meaningful as it's fixed--there is
-	 *  just one UP node and one DOWN navigation node.
-	 * @param t
-	 */
-	public void consumeNode(Object t);
-
-	/** The tree parser lookedahead.  If the type is UP or DOWN,
-	 *  then the ID is not really meaningful as it's fixed--there is
-	 *  just one UP node and one DOWN navigation node.
-	 */
-	public void LT(int i, Object t);
-
-
-	// A S T  E v e n t s
-
-	/** A nil was created (even nil nodes have a unique ID...
-	 *  they are not "null" per se).  As of 4/28/2006, this
-	 *  seems to be uniquely triggered when starting a new subtree
-	 *  such as when entering a subrule in automatic mode and when
-	 *  building a tree in rewrite mode.
-     *
- 	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only t.ID is set.
-	 */
-	public void nilNode(Object t);
-
-	/** Upon syntax error, recognizers bracket the error with an error node
-	 *  if they are building ASTs.
-	 * @param t
-	 */
-	public void errorNode(Object t);
-
-	/** Announce a new node built from token elements such as type etc...
-	 * 
-	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only t.ID, type, text are
-	 *  set.
-	 */
-	public void createNode(Object t);
-
-	/** Announce a new node built from an existing token.
-	 *
-	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only node.ID and token.tokenIndex
-	 *  are set.
-	 */
-	public void createNode(Object node, Token token);
-
-	/** Make a node the new root of an existing root.  See
-	 *
-	 *  Note: the newRootID parameter is possibly different
-	 *  than the TreeAdaptor.becomeRoot() newRoot parameter.
-	 *  In our case, it will always be the result of calling
-	 *  TreeAdaptor.becomeRoot() and not root_n or whatever.
-	 *
-	 *  The listener should assume that this event occurs
-	 *  only when the current subrule (or rule) subtree is
-	 *  being reset to newRootID.
-	 * 
-	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only IDs are set.
-	 *
-	 *  @see org.antlr.runtime.tree.TreeAdaptor.becomeRoot()
-	 */
-	public void becomeRoot(Object newRoot, Object oldRoot);
-
-	/** Make childID a child of rootID.
-	 *
-	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only IDs are set.
-	 * 
-	 *  @see org.antlr.runtime.tree.TreeAdaptor.addChild()
-	 */
-	public void addChild(Object root, Object child);
-
-	/** Set the token start/stop token index for a subtree root or node.
-	 *
-	 *  If you are receiving this event over a socket via
-	 *  RemoteDebugEventSocketListener then only t.ID is set.
-	 */
-	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex);
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventRepeater.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventRepeater.java
deleted file mode 100644
index 8fb6b66..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventRepeater.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.Token;
-import org.antlr.runtime.RecognitionException;
-
-/** A simple event repeater (proxy) that delegates all functionality to the
- *  listener sent into the ctor.  Useful if you want to listen in on a few
- *  debug events w/o interrupting the debugger.  Just subclass the repeater
- *  and override the methods you want to listen in on.  Remember to call
- *  the method in this class so the event will continue on to the original
- *  recipient.
- *
- *  @see DebugEventHub
- */
-public class DebugEventRepeater implements DebugEventListener {
-	protected DebugEventListener listener;
-
-	public DebugEventRepeater(DebugEventListener listener) {
-		this.listener = listener;
-	}
-	
-	public void enterRule(String grammarFileName, String ruleName) { listener.enterRule(grammarFileName, ruleName); }
-	public void exitRule(String grammarFileName, String ruleName) { listener.exitRule(grammarFileName, ruleName); }
-	public void enterAlt(int alt) { listener.enterAlt(alt); }
-	public void enterSubRule(int decisionNumber) { listener.enterSubRule(decisionNumber); }
-	public void exitSubRule(int decisionNumber) { listener.exitSubRule(decisionNumber); }
-	public void enterDecision(int decisionNumber, boolean couldBacktrack) { listener.enterDecision(decisionNumber, couldBacktrack); }
-	public void exitDecision(int decisionNumber) { listener.exitDecision(decisionNumber); }
-	public void location(int line, int pos) { listener.location(line, pos); }
-	public void consumeToken(Token token) { listener.consumeToken(token); }
-	public void consumeHiddenToken(Token token) { listener.consumeHiddenToken(token); }
-	public void LT(int i, Token t) { listener.LT(i, t); }
-	public void mark(int i) { listener.mark(i); }
-	public void rewind(int i) { listener.rewind(i); }
-	public void rewind() { listener.rewind(); }
-	public void beginBacktrack(int level) { listener.beginBacktrack(level); }
-	public void endBacktrack(int level, boolean successful) { listener.endBacktrack(level, successful); }
-	public void recognitionException(RecognitionException e) { listener.recognitionException(e); }
-	public void beginResync() { listener.beginResync(); }
-	public void endResync() { listener.endResync(); }
-	public void semanticPredicate(boolean result, String predicate) { listener.semanticPredicate(result, predicate); }
-	public void commence() { listener.commence(); }
-	public void terminate() { listener.terminate(); }
-
-	// Tree parsing stuff
-
-	public void consumeNode(Object t) { listener.consumeNode(t); }
-	public void LT(int i, Object t) { listener.LT(i, t); }
-
-	// AST Stuff
-
-	public void nilNode(Object t) { listener.nilNode(t); }
-	public void errorNode(Object t) { listener.errorNode(t); }
-	public void createNode(Object t) { listener.createNode(t); }
-	public void createNode(Object node, Token token) { listener.createNode(node, token); }
-	public void becomeRoot(Object newRoot, Object oldRoot) { listener.becomeRoot(newRoot, oldRoot); }
-	public void addChild(Object root, Object child) { listener.addChild(root, child); }
-	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
-		listener.setTokenBoundaries(t, tokenStartIndex, tokenStopIndex);
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventSocketProxy.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventSocketProxy.java
deleted file mode 100644
index 3b480ad..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventSocketProxy.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.RecognitionException;
-import org.antlr.runtime.Token;
-import org.antlr.runtime.BaseRecognizer;
-import org.antlr.runtime.tree.TreeAdaptor;
-
-import java.io.*;
-import java.net.ServerSocket;
-import java.net.Socket;
-
-/** A proxy debug event listener that forwards events over a socket to
- *  a debugger (or any other listener) using a simple text-based protocol;
- *  one event per line.  ANTLRWorks listens on server socket with a
- *  RemoteDebugEventSocketListener instance.  These two objects must therefore
- *  be kept in sync.  New events must be handled on both sides of socket.
- */
-public class DebugEventSocketProxy extends BlankDebugEventListener {
-	public static final int DEFAULT_DEBUGGER_PORT = 49100; // was 49153
-	protected int port = DEFAULT_DEBUGGER_PORT;
-	protected ServerSocket serverSocket;
-	protected Socket socket;
-	protected String grammarFileName;
-	protected PrintWriter out;
-	protected BufferedReader in;
-
-	/** Who am i debugging? */
-	protected BaseRecognizer recognizer;
-
-	/** Almost certainly the recognizer will have adaptor set, but
-	 *  we don't know how to cast it (Parser or TreeParser) to get
-	 *  the adaptor field.  Must be set with a constructor. :(
-	 */
-	protected TreeAdaptor adaptor;
-
-	public DebugEventSocketProxy(BaseRecognizer recognizer, TreeAdaptor adaptor) {
-		this(recognizer, DEFAULT_DEBUGGER_PORT, adaptor);
-	}
-
-	public DebugEventSocketProxy(BaseRecognizer recognizer, int port, TreeAdaptor adaptor) {
-		this.grammarFileName = recognizer.getGrammarFileName();
-		this.adaptor = adaptor;
-		this.port = port;
-	}
-
-	public void handshake() throws IOException {
-		if ( serverSocket==null ) {
-			serverSocket = new ServerSocket(port);
-			socket = serverSocket.accept();
-			socket.setTcpNoDelay(true);
-			OutputStream os = socket.getOutputStream();
-			OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
-			out = new PrintWriter(new BufferedWriter(osw));
-			InputStream is = socket.getInputStream();
-			InputStreamReader isr = new InputStreamReader(is, "UTF8");
-			in = new BufferedReader(isr);
-			out.println("ANTLR "+ DebugEventListener.PROTOCOL_VERSION);
-			out.println("grammar \""+ grammarFileName);
-			out.flush();
-			ack();
-		}
-	}
-
-	public void commence() {
-		// don't bother sending event; listener will trigger upon connection
-	}
-
-	public void terminate() {
-		transmit("terminate");
-		out.close();
-		try {
-			socket.close();
-		}
-		catch (IOException ioe) {
-			ioe.printStackTrace(System.err);
-		}
-	}
-
-	protected void ack() {
-		try {
-			in.readLine();
-		}
-		catch (IOException ioe) {
-			ioe.printStackTrace(System.err);
-		}
-	}
-
-	protected void transmit(String event) {
-		out.println(event);
-		out.flush();
-		ack();
-	}
-
-	public void enterRule(String grammarFileName, String ruleName) {
-		transmit("enterRule\t"+grammarFileName+"\t"+ruleName);
-	}
-
-	public void enterAlt(int alt) {
-		transmit("enterAlt\t"+alt);
-	}
-
-	public void exitRule(String grammarFileName, String ruleName) {
-		transmit("exitRule\t"+grammarFileName+"\t"+ruleName);
-	}
-
-	public void enterSubRule(int decisionNumber) {
-		transmit("enterSubRule\t"+decisionNumber);
-	}
-
-	public void exitSubRule(int decisionNumber) {
-		transmit("exitSubRule\t"+decisionNumber);
-	}
-
-	public void enterDecision(int decisionNumber, boolean couldBacktrack) {
-		transmit("enterDecision\t"+decisionNumber+"\t"+couldBacktrack);
-	}
-
-	public void exitDecision(int decisionNumber) {
-		transmit("exitDecision\t"+decisionNumber);
-	}
-
-	public void consumeToken(Token t) {
-		String buf = serializeToken(t);
-		transmit("consumeToken\t"+buf);
-	}
-
-	public void consumeHiddenToken(Token t) {
-		String buf = serializeToken(t);
-		transmit("consumeHiddenToken\t"+buf);
-	}
-
-	public void LT(int i, Token t) {
-        if(t != null)
-            transmit("LT\t"+i+"\t"+serializeToken(t));
-	}
-
-	public void mark(int i) {
-		transmit("mark\t"+i);
-	}
-
-	public void rewind(int i) {
-		transmit("rewind\t"+i);
-	}
-
-	public void rewind() {
-		transmit("rewind");
-	}
-
-	public void beginBacktrack(int level) {
-		transmit("beginBacktrack\t"+level);
-	}
-
-	public void endBacktrack(int level, boolean successful) {
-		transmit("endBacktrack\t"+level+"\t"+(successful?TRUE:FALSE));
-	}
-
-	public void location(int line, int pos) {
-		transmit("location\t"+line+"\t"+pos);
-	}
-
-	public void recognitionException(RecognitionException e) {
-		StringBuffer buf = new StringBuffer(50);
-		buf.append("exception\t");
-		buf.append(e.getClass().getName());
-		// dump only the data common to all exceptions for now
-		buf.append("\t");
-		buf.append(e.index);
-		buf.append("\t");
-		buf.append(e.line);
-		buf.append("\t");
-		buf.append(e.charPositionInLine);
-		transmit(buf.toString());
-	}
-
-	public void beginResync() {
-		transmit("beginResync");
-	}
-
-	public void endResync() {
-		transmit("endResync");
-	}
-
-	public void semanticPredicate(boolean result, String predicate) {
-		StringBuffer buf = new StringBuffer(50);
-		buf.append("semanticPredicate\t");
-		buf.append(result);
-		serializeText(buf, predicate);
-		transmit(buf.toString());
-	}
-
-	// A S T  P a r s i n g  E v e n t s
-
-	public void consumeNode(Object t) {
-		StringBuffer buf = new StringBuffer(50);
-		buf.append("consumeNode");
-		serializeNode(buf, t);
-		transmit(buf.toString());
-	}
-
-	public void LT(int i, Object t) {
-		int ID = adaptor.getUniqueID(t);
-		String text = adaptor.getText(t);
-		int type = adaptor.getType(t);
-		StringBuffer buf = new StringBuffer(50);
-		buf.append("LN\t"); // lookahead node; distinguish from LT in protocol
-		buf.append(i);
-		serializeNode(buf, t);
-		transmit(buf.toString());
-	}
-
-	protected void serializeNode(StringBuffer buf, Object t) {
-		int ID = adaptor.getUniqueID(t);
-		String text = adaptor.getText(t);
-		int type = adaptor.getType(t);
-		buf.append("\t");
-		buf.append(ID);
-		buf.append("\t");
-		buf.append(type);
-		Token token = adaptor.getToken(t);
-		int line = -1;
-		int pos = -1;
-		if ( token!=null ) {
-			line = token.getLine();
-			pos = token.getCharPositionInLine();
-		}
-		buf.append("\t");
-		buf.append(line);
-		buf.append("\t");
-		buf.append(pos);
-		int tokenIndex = adaptor.getTokenStartIndex(t);
-		buf.append("\t");
-		buf.append(tokenIndex);
-		serializeText(buf, text);
-	}
-
-
-	// A S T  E v e n t s
-
-	public void nilNode(Object t) {
-		int ID = adaptor.getUniqueID(t);
-		transmit("nilNode\t"+ID);
-	}
-
-	public void errorNode(Object t) {
-		int ID = adaptor.getUniqueID(t);
-		String text = t.toString();
-		StringBuffer buf = new StringBuffer(50);
-		buf.append("errorNode\t");
-		buf.append(ID);
-		buf.append("\t");
-		buf.append(Token.INVALID_TOKEN_TYPE);
-		serializeText(buf, text);
-		transmit(buf.toString());
-	}
-
-	public void createNode(Object t) {
-		int ID = adaptor.getUniqueID(t);
-		String text = adaptor.getText(t);
-		int type = adaptor.getType(t);
-		StringBuffer buf = new StringBuffer(50);
-		buf.append("createNodeFromTokenElements\t");
-		buf.append(ID);
-		buf.append("\t");
-		buf.append(type);
-		serializeText(buf, text);
-		transmit(buf.toString());
-	}
-
-	public void createNode(Object node, Token token) {
-		int ID = adaptor.getUniqueID(node);
-		int tokenIndex = token.getTokenIndex();
-		transmit("createNode\t"+ID+"\t"+tokenIndex);
-	}
-
-	public void becomeRoot(Object newRoot, Object oldRoot) {
-		int newRootID = adaptor.getUniqueID(newRoot);
-		int oldRootID = adaptor.getUniqueID(oldRoot);
-		transmit("becomeRoot\t"+newRootID+"\t"+oldRootID);
-	}
-
-	public void addChild(Object root, Object child) {
-		int rootID = adaptor.getUniqueID(root);
-		int childID = adaptor.getUniqueID(child);
-		transmit("addChild\t"+rootID+"\t"+childID);
-	}
-
-	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
-		int ID = adaptor.getUniqueID(t);
-		transmit("setTokenBoundaries\t"+ID+"\t"+tokenStartIndex+"\t"+tokenStopIndex);
-	}
-
-
-    // support
-
-    public void setTreeAdaptor(TreeAdaptor adaptor) { this.adaptor = adaptor; }
-    public TreeAdaptor getTreeAdaptor() { return adaptor; }
-
-    protected String serializeToken(Token t) {
-        StringBuffer buf = new StringBuffer(50);
-        buf.append(t.getTokenIndex()); buf.append('\t');
-        buf.append(t.getType()); buf.append('\t');
-        buf.append(t.getChannel()); buf.append('\t');
-        buf.append(t.getLine()); buf.append('\t');
-		buf.append(t.getCharPositionInLine());
-		serializeText(buf, t.getText());
-		return buf.toString();
-	}
-
-	protected void serializeText(StringBuffer buf, String text) {
-		buf.append("\t\"");
-		if ( text==null ) {
-			text = "";
-		}
-		// escape \n and \r all text for token appears to exist on one line
-		// this escape is slow but easy to understand
-		text = escapeNewlines(text);
-		buf.append(text);
-	}
-
-	protected String escapeNewlines(String txt) {
-		txt = txt.replaceAll("%","%25");   // escape all escape char ;)
-		txt = txt.replaceAll("\n","%0A");  // escape \n
-		txt = txt.replaceAll("\r","%0D");  // escape \r
-		return txt;
-	}
-}
-
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugParser.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugParser.java
deleted file mode 100644
index 49d78e7..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugParser.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.*;
-
-import java.io.IOException;
-
-public class DebugParser extends Parser {
-	/** Who to notify when events in the parser occur. */
-	protected DebugEventListener dbg = null;
-
-	/** Used to differentiate between fixed lookahead and cyclic DFA decisions
-	 *  while profiling.
- 	 */
-	public boolean isCyclicDecision = false;
-
-	/** Create a normal parser except wrap the token stream in a debug
-	 *  proxy that fires consume events.
-	 */
-	public DebugParser(TokenStream input, DebugEventListener dbg, RecognizerSharedState state) {
-		super(input instanceof DebugTokenStream?input:new DebugTokenStream(input,dbg), state);
-		setDebugListener(dbg);
-	}
-
-	public DebugParser(TokenStream input, RecognizerSharedState state) {
-		super(input instanceof DebugTokenStream?input:new DebugTokenStream(input,null), state);
-	}
-
-	public DebugParser(TokenStream input, DebugEventListener dbg) {
-		this(input instanceof DebugTokenStream?input:new DebugTokenStream(input,dbg), dbg, null);
-	}
-
-	/** Provide a new debug event listener for this parser.  Notify the
-	 *  input stream too that it should send events to this listener.
-	 */
-	public void setDebugListener(DebugEventListener dbg) {
-		if ( input instanceof DebugTokenStream ) {
-			((DebugTokenStream)input).setDebugListener(dbg);
-		}
-		this.dbg = dbg;
-	}
-
-	public DebugEventListener getDebugListener() {
-		return dbg;
-	}
-
-	public void reportError(IOException e) {
-		System.err.println(e);
-		e.printStackTrace(System.err);
-	}
-
-	public void beginResync() {
-		dbg.beginResync();
-	}
-
-	public void endResync() {
-		dbg.endResync();
-	}
-
-	public void beginBacktrack(int level) {
-		dbg.beginBacktrack(level);
-	}
-
-	public void endBacktrack(int level, boolean successful) {
-		dbg.endBacktrack(level,successful);		
-	}
-
-	public void reportError(RecognitionException e) {
-		super.reportError(e);
-		dbg.recognitionException(e);
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTokenStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTokenStream.java
deleted file mode 100644
index 9a7a75f..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTokenStream.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.*;
-
-import java.util.List;
-
-public class DebugTokenStream implements TokenStream {
-	protected DebugEventListener dbg;
-	public TokenStream input;
-	protected boolean initialStreamState = true;
-
-	/** Track the last mark() call result value for use in rewind(). */
-	protected int lastMarker;
-
-	public DebugTokenStream(TokenStream input, DebugEventListener dbg) {
-		this.input = input;
-		setDebugListener(dbg);
-		// force TokenStream to get at least first valid token
-		// so we know if there are any hidden tokens first in the stream
-		input.LT(1);
-	}
-
-	public void setDebugListener(DebugEventListener dbg) {
-		this.dbg = dbg;
-	}
-
-	public void consume() {
-		if ( initialStreamState ) {
-			consumeInitialHiddenTokens();
-		}
-		int a = input.index();
-		Token t = input.LT(1);
-		input.consume();
-		int b = input.index();
-		dbg.consumeToken(t);
-		if ( b>a+1 ) {
-			// then we consumed more than one token; must be off channel tokens
-			for (int i=a+1; i<b; i++) {
-				dbg.consumeHiddenToken(input.get(i));
-			}
-		}
-	}
-
-	/* consume all initial off-channel tokens */
-	protected void consumeInitialHiddenTokens() {
-		int firstOnChannelTokenIndex = input.index();
-		for (int i=0; i<firstOnChannelTokenIndex; i++) {
-			dbg.consumeHiddenToken(input.get(i));
-		}
-		initialStreamState = false;
-	}
-
-	public Token LT(int i) {
-		if ( initialStreamState ) {
-			consumeInitialHiddenTokens();
-		}
-		dbg.LT(i, input.LT(i));
-		return input.LT(i);
-	}
-
-	public int LA(int i) {
-		if ( initialStreamState ) {
-			consumeInitialHiddenTokens();
-		}
-		dbg.LT(i, input.LT(i));
-		return input.LA(i);
-	}
-
-	public Token get(int i) {
-		return input.get(i);
-	}
-
-	public int mark() {
-		lastMarker = input.mark();
-		dbg.mark(lastMarker);
-		return lastMarker;
-	}
-
-	public int index() {
-		return input.index();
-	}
-
-	public int range() {
-		return input.range();
-	}
-
-	public void rewind(int marker) {
-		dbg.rewind(marker);
-		input.rewind(marker);
-	}
-
-	public void rewind() {
-		dbg.rewind();
-		input.rewind(lastMarker);
-	}
-
-	public void release(int marker) {
-	}
-
-	public void seek(int index) {
-		// TODO: implement seek in dbg interface
-		// db.seek(index);
-		input.seek(index);
-	}
-
-	public int size() {
-		return input.size();
-	}
-
-	public TokenSource getTokenSource() {
-		return input.getTokenSource();
-	}
-
-	public String getSourceName() {
-		return getTokenSource().getSourceName();
-	}
-
-	public String toString() {
-		return input.toString();
-	}
-
-	public String toString(int start, int stop) {
-		return input.toString(start,stop);
-	}
-
-	public String toString(Token start, Token stop) {
-		return input.toString(start,stop);
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java
deleted file mode 100644
index 1884bd6..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.Token;
-import org.antlr.runtime.TokenStream;
-import org.antlr.runtime.RecognitionException;
-import org.antlr.runtime.tree.TreeAdaptor;
-
-/** A TreeAdaptor proxy that fires debugging events to a DebugEventListener
- *  delegate and uses the TreeAdaptor delegate to do the actual work.  All
- *  AST events are triggered by this adaptor; no code gen changes are needed
- *  in generated rules.  Debugging events are triggered *after* invoking
- *  tree adaptor routines.
- *
- *  Trees created with actions in rewrite actions like "-> ^(ADD {foo} {bar})"
- *  cannot be tracked as they might not use the adaptor to create foo, bar.
- *  The debug listener has to deal with tree node IDs for which it did
- *  not see a createNode event.  A single <unknown> node is sufficient even
- *  if it represents a whole tree.
- */
-public class DebugTreeAdaptor implements TreeAdaptor {
-	protected DebugEventListener dbg;
-	protected TreeAdaptor adaptor;
-
-	public DebugTreeAdaptor(DebugEventListener dbg, TreeAdaptor adaptor) {
-		this.dbg = dbg;
-		this.adaptor = adaptor;
-	}
-
-	public Object create(Token payload) {
-		if ( payload.getTokenIndex() < 0 ) {
-			// could be token conjured up during error recovery
-			return create(payload.getType(), payload.getText());
-		}
-		Object node = adaptor.create(payload);
-		dbg.createNode(node, payload);
-		return node;
-	}
-
-	public Object errorNode(TokenStream input, Token start, Token stop,
-							RecognitionException e)
-	{
-		Object node = adaptor.errorNode(input, start, stop, e);
-		if ( node!=null ) {
-			dbg.errorNode(node);
-		}
-		return node;
-	}
-
-	public Object dupTree(Object tree) {
-		Object t = adaptor.dupTree(tree);
-		// walk the tree and emit create and add child events
-		// to simulate what dupTree has done. dupTree does not call this debug
-		// adapter so I must simulate.
-		simulateTreeConstruction(t);
-		return t;
-	}
-
-	/** ^(A B C): emit create A, create B, add child, ...*/
-	protected void simulateTreeConstruction(Object t) {
-		dbg.createNode(t);
-		int n = adaptor.getChildCount(t);
-		for (int i=0; i<n; i++) {
-			Object child = adaptor.getChild(t, i);
-			simulateTreeConstruction(child);
-			dbg.addChild(t, child);
-		}
-	}
-
-	public Object dupNode(Object treeNode) {
-		Object d = adaptor.dupNode(treeNode);
-		dbg.createNode(d);
-		return d;
-	}
-
-	public Object nil() {
-		Object node = adaptor.nil();
-		dbg.nilNode(node);
-		return node;
-	}
-
-	public boolean isNil(Object tree) {
-		return adaptor.isNil(tree);
-	}
-
-	public void addChild(Object t, Object child) {
-		if ( t==null || child==null ) {
-			return;
-		}
-		adaptor.addChild(t,child);
-		dbg.addChild(t, child);
-	}
-
-	public Object becomeRoot(Object newRoot, Object oldRoot) {
-		Object n = adaptor.becomeRoot(newRoot, oldRoot);
-		dbg.becomeRoot(newRoot, oldRoot);
-		return n;
-	}
-
-	public Object rulePostProcessing(Object root) {
-		return adaptor.rulePostProcessing(root);
-	}
-
-	public void addChild(Object t, Token child) {
-		Object n = this.create(child);
-		this.addChild(t, n);
-	}
-
-	public Object becomeRoot(Token newRoot, Object oldRoot) {
-		Object n = this.create(newRoot);
-		adaptor.becomeRoot(n, oldRoot);
-		dbg.becomeRoot(newRoot, oldRoot);
-		return n;
-	}
-
-	public Object create(int tokenType, Token fromToken) {
-		Object node = adaptor.create(tokenType, fromToken);
-		dbg.createNode(node);
-		return node;
-	}
-
-	public Object create(int tokenType, Token fromToken, String text) {
-		Object node = adaptor.create(tokenType, fromToken, text);
-		dbg.createNode(node);
-		return node;
-	}
-
-	public Object create(int tokenType, String text) {
-		Object node = adaptor.create(tokenType, text);
-		dbg.createNode(node);
-		return node;
-	}
-
-	public int getType(Object t) {
-		return adaptor.getType(t);
-	}
-
-	public void setType(Object t, int type) {
-		adaptor.setType(t, type);
-	}
-
-	public String getText(Object t) {
-		return adaptor.getText(t);
-	}
-
-	public void setText(Object t, String text) {
-		adaptor.setText(t, text);
-	}
-
-	public Token getToken(Object t) {
-		return adaptor.getToken(t);
-	}
-
-	public void setTokenBoundaries(Object t, Token startToken, Token stopToken) {
-		adaptor.setTokenBoundaries(t, startToken, stopToken);
-		if ( t!=null && startToken!=null && stopToken!=null ) {
-			dbg.setTokenBoundaries(
-				t, startToken.getTokenIndex(),
-				stopToken.getTokenIndex());
-		}
-	}
-
-	public int getTokenStartIndex(Object t) {
-		return adaptor.getTokenStartIndex(t);
-	}
-
-	public int getTokenStopIndex(Object t) {
-		return adaptor.getTokenStopIndex(t);
-	}
-
-	public Object getChild(Object t, int i) {
-		return adaptor.getChild(t, i);
-	}
-
-	public void setChild(Object t, int i, Object child) {
-		adaptor.setChild(t, i, child);
-	}
-
-	public Object deleteChild(Object t, int i) {
-		return adaptor.deleteChild(t, i);
-	}
-
-	public int getChildCount(Object t) {
-		return adaptor.getChildCount(t);
-	}
-
-	public int getUniqueID(Object node) {
-		return adaptor.getUniqueID(node);
-	}
-
-	public Object getParent(Object t) {
-		return adaptor.getParent(t);
-	}
-
-	public int getChildIndex(Object t) {
-		return adaptor.getChildIndex(t);
-	}
-
-	public void setParent(Object t, Object parent) {
-		adaptor.setParent(t, parent);
-	}
-
-	public void setChildIndex(Object t, int index) {
-		adaptor.setChildIndex(t, index);
-	}
-
-	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
-		adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t);
-	}
-
-	// support
-
-	public DebugEventListener getDebugListener() {
-		return dbg;
-	}
-
-	public void setDebugListener(DebugEventListener dbg) {
-		this.dbg = dbg;
-	}
-
-	public TreeAdaptor getTreeAdaptor() {
-		return adaptor;
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeNodeStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeNodeStream.java
deleted file mode 100644
index 92ff009..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeNodeStream.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.tree.TreeAdaptor;
-import org.antlr.runtime.tree.TreeNodeStream;
-import org.antlr.runtime.TokenStream;
-
-/** Debug any tree node stream.  The constructor accepts the stream
- *  and a debug listener.  As node stream calls come in, debug events
- *  are triggered.
- */
-public class DebugTreeNodeStream implements TreeNodeStream {
-	protected DebugEventListener dbg;
-	protected TreeAdaptor adaptor;
-	protected TreeNodeStream input;
-	protected boolean initialStreamState = true;
-
-	/** Track the last mark() call result value for use in rewind(). */
-	protected int lastMarker;
-
-	public DebugTreeNodeStream(TreeNodeStream input,
-							   DebugEventListener dbg)
-	{
-		this.input = input;
-		this.adaptor = input.getTreeAdaptor();
-		this.input.setUniqueNavigationNodes(true);
-		setDebugListener(dbg);
-	}
-
-	public void setDebugListener(DebugEventListener dbg) {
-		this.dbg = dbg;
-	}
-
-	public TreeAdaptor getTreeAdaptor() {
-		return adaptor;
-	}
-
-	public void consume() {
-		Object node = input.LT(1);
-		input.consume();
-		dbg.consumeNode(node);
-	}
-
-	public Object get(int i) {
-		return input.get(i);
-	}
-
-	public Object LT(int i) {
-		Object node = input.LT(i);
-		int ID = adaptor.getUniqueID(node);
-		String text = adaptor.getText(node);
-		int type = adaptor.getType(node);
-		dbg.LT(i, node);
-		return node;
-	}
-
-	public int LA(int i) {
-		Object node = input.LT(i);
-		int ID = adaptor.getUniqueID(node);
-		String text = adaptor.getText(node);
-		int type = adaptor.getType(node);
-		dbg.LT(i, node);
-		return type;
-	}
-
-	public int mark() {
-		lastMarker = input.mark();
-		dbg.mark(lastMarker);
-		return lastMarker;
-	}
-
-	public int index() {
-		return input.index();
-	}
-
-	public void rewind(int marker) {
-		dbg.rewind(marker);
-		input.rewind(marker);
-	}
-
-	public void rewind() {
-		dbg.rewind();
-		input.rewind(lastMarker);
-	}
-
-	public void release(int marker) {
-	}
-
-	public void seek(int index) {
-		// TODO: implement seek in dbg interface
-		// db.seek(index);
-		input.seek(index);
-	}
-
-	public int size() {
-		return input.size();
-	}
-
-    public void reset() { ; }
-
-    public Object getTreeSource() {
-		return input;
-	}
-
-	public String getSourceName() {
-		return getTokenStream().getSourceName();
-	}
-
-	public TokenStream getTokenStream() {
-		return input.getTokenStream();
-	}
-
-	/** It is normally this object that instructs the node stream to
-	 *  create unique nav nodes, but to satisfy interface, we have to
-	 *  define it.  It might be better to ignore the parameter but
-	 *  there might be a use for it later, so I'll leave.
-	 */
-	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes) {
-		input.setUniqueNavigationNodes(uniqueNavigationNodes);
-	}
-
-	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
-		input.replaceChildren(parent, startChildIndex, stopChildIndex, t);
-	}
-
-	public String toString(Object start, Object stop) {
-		return input.toString(start,stop);
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeParser.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeParser.java
deleted file mode 100644
index 6e1ece8..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeParser.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.TreeNodeStream;
-import org.antlr.runtime.tree.TreeParser;
-
-import java.io.IOException;
-
-public class DebugTreeParser extends TreeParser {
-	/** Who to notify when events in the parser occur. */
-	protected DebugEventListener dbg = null;
-
-	/** Used to differentiate between fixed lookahead and cyclic DFA decisions
-	 *  while profiling.
- 	 */
-	public boolean isCyclicDecision = false;
-
-	/** Create a normal parser except wrap the token stream in a debug
-	 *  proxy that fires consume events.
-	 */
-	public DebugTreeParser(TreeNodeStream input, DebugEventListener dbg, RecognizerSharedState state) {
-		super(input instanceof DebugTreeNodeStream?input:new DebugTreeNodeStream(input,dbg), state);
-		setDebugListener(dbg);
-	}
-
-	public DebugTreeParser(TreeNodeStream input, RecognizerSharedState state) {
-		super(input instanceof DebugTreeNodeStream?input:new DebugTreeNodeStream(input,null), state);
-	}
-
-	public DebugTreeParser(TreeNodeStream input, DebugEventListener dbg) {
-		this(input instanceof DebugTreeNodeStream?input:new DebugTreeNodeStream(input,dbg), dbg, null);
-	}
-
-	/** Provide a new debug event listener for this parser.  Notify the
-	 *  input stream too that it should send events to this listener.
-	 */
-	public void setDebugListener(DebugEventListener dbg) {
-		if ( input instanceof DebugTreeNodeStream ) {
-			((DebugTreeNodeStream)input).setDebugListener(dbg);
-		}
-		this.dbg = dbg;
-	}
-
-	public DebugEventListener getDebugListener() {
-		return dbg;
-	}
-
-	public void reportError(IOException e) {
-		System.err.println(e);
-		e.printStackTrace(System.err);
-	}
-
-	public void reportError(RecognitionException e) {
-		dbg.recognitionException(e);
-	}
-
-	protected Object getMissingSymbol(IntStream input,
-									  RecognitionException e,
-									  int expectedTokenType,
-									  BitSet follow)
-	{
-		Object o = super.getMissingSymbol(input, e, expectedTokenType, follow);
-		dbg.consumeNode(o);
-		return o;
-	}
-
-	public void beginResync() {
-		dbg.beginResync();
-	}
-
-	public void endResync() {
-		dbg.endResync();
-	}
-
-	public void beginBacktrack(int level) {
-		dbg.beginBacktrack(level);
-	}
-
-	public void endBacktrack(int level, boolean successful) {
-		dbg.endBacktrack(level,successful);		
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/ParseTreeBuilder.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/ParseTreeBuilder.java
deleted file mode 100644
index 13c6ed0..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/ParseTreeBuilder.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.RecognitionException;
-import org.antlr.runtime.Token;
-import org.antlr.runtime.tree.ParseTree;
-
-import java.util.Stack;
-import java.util.ArrayList;
-import java.util.List;
-
-/** This parser listener tracks rule entry/exit and token matches
- *  to build a simple parse tree using ParseTree nodes.
- */
-public class ParseTreeBuilder extends BlankDebugEventListener {
-	public static final String EPSILON_PAYLOAD = "<epsilon>";
-	
-	Stack callStack = new Stack();
-	List hiddenTokens = new ArrayList();
-	int backtracking = 0;
-
-	public ParseTreeBuilder(String grammarName) {
-		ParseTree root = create("<grammar "+grammarName+">");
-		callStack.push(root);
-	}
-
-	public ParseTree getTree() {
-		return (ParseTree)callStack.elementAt(0);
-	}
-
-	/**  What kind of node to create.  You might want to override
-	 *   so I factored out creation here.
-	 */
-	public ParseTree create(Object payload) {
-		return new ParseTree(payload);
-	}
-
-	public ParseTree epsilonNode() {
-		return create(EPSILON_PAYLOAD);
-	}
-
-	/** Backtracking or cyclic DFA, don't want to add nodes to tree */
-	public void enterDecision(int d, boolean couldBacktrack) { backtracking++; }
-	public void exitDecision(int i) { backtracking--; }
-
-	public void enterRule(String filename, String ruleName) {
-		if ( backtracking>0 ) return;
-		ParseTree parentRuleNode = (ParseTree)callStack.peek();
-		ParseTree ruleNode = create(ruleName);
-		parentRuleNode.addChild(ruleNode);
-		callStack.push(ruleNode);
-	}
-
-	public void exitRule(String filename, String ruleName) {
-		if ( backtracking>0 ) return;
-		ParseTree ruleNode = (ParseTree)callStack.peek();
-		if ( ruleNode.getChildCount()==0 ) {
-			ruleNode.addChild(epsilonNode());
-		}
-		callStack.pop();		
-	}
-
-	public void consumeToken(Token token) {
-		if ( backtracking>0 ) return;
-		ParseTree ruleNode = (ParseTree)callStack.peek();
-		ParseTree elementNode = create(token);
-		elementNode.hiddenTokens = this.hiddenTokens;
-		this.hiddenTokens = new ArrayList();
-		ruleNode.addChild(elementNode);
-	}
-
-	public void consumeHiddenToken(Token token) {
-		if ( backtracking>0 ) return;
-		hiddenTokens.add(token);
-	}
-
-	public void recognitionException(RecognitionException e) {
-		if ( backtracking>0 ) return;
-		ParseTree ruleNode = (ParseTree)callStack.peek();
-		ParseTree errorNode = create(e);
-		ruleNode.addChild(errorNode);
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/Profiler.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/Profiler.java
deleted file mode 100644
index aea9a17..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/Profiler.java
+++ /dev/null
@@ -1,734 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.*;
-import org.antlr.runtime.misc.DoubleKeyMap;
-
-import java.util.*;
-
-/** Using the debug event interface, track what is happening in the parser
- *  and record statistics about the runtime.
- */
-public class Profiler extends BlankDebugEventListener {
-	public static final String DATA_SEP = "\t";
-	public static final String newline = System.getProperty("line.separator");
-
-	static boolean dump = false;
-
-	public static class ProfileStats {
-		public String Version;
-		public String name;
-		public int numRuleInvocations;
-		public int numUniqueRulesInvoked;
-		public int numDecisionEvents;
-		public int numDecisionsCovered;
-		public int numDecisionsThatPotentiallyBacktrack;
-		public int numDecisionsThatDoBacktrack;
-		public int maxRuleInvocationDepth;
-		public float avgkPerDecisionEvent;
-		public float avgkPerBacktrackingDecisionEvent;
-		public float averageDecisionPercentBacktracks;
-		public int numBacktrackOccurrences; // doesn't count gated DFA edges
-
-		public int numFixedDecisions;
-		public int minDecisionMaxFixedLookaheads;
-		public int maxDecisionMaxFixedLookaheads;
-		public int avgDecisionMaxFixedLookaheads;
-		public int stddevDecisionMaxFixedLookaheads;
-		public int numCyclicDecisions;
-		public int minDecisionMaxCyclicLookaheads;
-		public int maxDecisionMaxCyclicLookaheads;
-		public int avgDecisionMaxCyclicLookaheads;
-		public int stddevDecisionMaxCyclicLookaheads;
-//		int Stats.min(toArray(decisionMaxSynPredLookaheads);
-//		int Stats.max(toArray(decisionMaxSynPredLookaheads);
-//		int Stats.avg(toArray(decisionMaxSynPredLookaheads);
-//		int Stats.stddev(toArray(decisionMaxSynPredLookaheads);
-		public int numSemanticPredicates;
-		public int numTokens;
-		public int numHiddenTokens;
-		public int numCharsMatched;
-		public int numHiddenCharsMatched;
-		public int numReportedErrors;
-		public int numMemoizationCacheHits;
-		public int numMemoizationCacheMisses;
-		public int numGuessingRuleInvocations;
-		public int numMemoizationCacheEntries;
-	}
-
-	public static class DecisionDescriptor {
-		public int decision;
-		public String fileName;
-		public String ruleName;
-		public int line;
-		public int pos;
-		public boolean couldBacktrack;
-
-		public int n;
-		public float avgk; // avg across all decision events
-		public int maxk;
-		public int numBacktrackOccurrences;
-		public int numSemPredEvals;
-	}
-
-	// all about a specific exec of a single decision
-	public static class DecisionEvent {
-		public DecisionDescriptor decision;
-		public int startIndex;
-		public int k;
-		public boolean backtracks; // doesn't count gated DFA edges
-		public boolean evalSemPred;
-		public long startTime;
-		public long stopTime;
-		public int numMemoizationCacheHits;
-		public int numMemoizationCacheMisses;
-	}
-
-	/** Because I may change the stats, I need to track that for later
-	 *  computations to be consistent.
-	 */
-	public static final String Version = "3";
-	public static final String RUNTIME_STATS_FILENAME = "runtime.stats";
-
-	/** Ack, should not store parser; can't do remote stuff.  Well, we pass
-	 *  input stream around too so I guess it's ok.
-	 */
-	public DebugParser parser = null;
-
-	// working variables
-
-	protected int ruleLevel = 0;
-	//protected int decisionLevel = 0;
-	protected Token lastRealTokenTouchedInDecision;
-	protected Set<String> uniqueRules = new HashSet<String>();
-	protected Stack<String> currentGrammarFileName = new Stack();
-	protected Stack<String> currentRuleName = new Stack();
-	protected Stack<Integer> currentLine = new Stack();
-	protected Stack<Integer> currentPos = new Stack();
-
-	// Vector<DecisionStats>
-	//protected Vector decisions = new Vector(200); // need setSize
-	protected DoubleKeyMap<String,Integer, DecisionDescriptor> decisions =
-		new DoubleKeyMap<String,Integer, DecisionDescriptor>();
-
-	// Record a DecisionData for each decision we hit while parsing
-	protected List<DecisionEvent> decisionEvents = new ArrayList<DecisionEvent>();
-	protected Stack<DecisionEvent> decisionStack = new Stack<DecisionEvent>();
-
-	protected int backtrackDepth;
-	
-	ProfileStats stats = new ProfileStats();
-
-	public Profiler() {
-	}
-
-	public Profiler(DebugParser parser) {
-		this.parser = parser;
-	}
-
-	public void enterRule(String grammarFileName, String ruleName) {
-//		System.out.println("enterRule "+grammarFileName+":"+ruleName);
-		ruleLevel++;
-		stats.numRuleInvocations++;
-		uniqueRules.add(grammarFileName+":"+ruleName);
-		stats.maxRuleInvocationDepth = Math.max(stats.maxRuleInvocationDepth, ruleLevel);
-		currentGrammarFileName.push( grammarFileName );
-		currentRuleName.push( ruleName );
-	}
-
-	public void exitRule(String grammarFileName, String ruleName) {
-		ruleLevel--;
-		currentGrammarFileName.pop();
-		currentRuleName.pop();
-	}
-
-	/** Track memoization; this is not part of standard debug interface
-	 *  but is triggered by profiling.  Code gen inserts an override
-	 *  for this method in the recognizer, which triggers this method.
-	 *  Called from alreadyParsedRule().
-	 */
-	public void examineRuleMemoization(IntStream input,
-									   int ruleIndex,
-									   int stopIndex, // index or MEMO_RULE_UNKNOWN...
-									   String ruleName)
-	{
-		if (dump) System.out.println("examine memo "+ruleName+" at "+input.index()+": "+stopIndex);
-		if ( stopIndex==BaseRecognizer.MEMO_RULE_UNKNOWN ) {
-			//System.out.println("rule "+ruleIndex+" missed @ "+input.index());
-			stats.numMemoizationCacheMisses++;
-			stats.numGuessingRuleInvocations++; // we'll have to enter
-			currentDecision().numMemoizationCacheMisses++;
-		}
-		else {
-			// regardless of rule success/failure, if in cache, we have a cache hit
-			//System.out.println("rule "+ruleIndex+" hit @ "+input.index());
-			stats.numMemoizationCacheHits++;
-			currentDecision().numMemoizationCacheHits++;
-		}
-	}
-
-	/** Warning: doesn't track success/failure, just unique recording event */
-	public void memoize(IntStream input,
-						int ruleIndex,
-						int ruleStartIndex,
-						String ruleName)
-	{
-		// count how many entries go into table
-		if (dump) System.out.println("memoize "+ruleName);
-		stats.numMemoizationCacheEntries++;
-	}
-
-	@Override
-	public void location(int line, int pos) {
-		currentLine.push(line);
-		currentPos.push(pos);
-	}
-
-	public void enterDecision(int decisionNumber, boolean couldBacktrack) {
-		lastRealTokenTouchedInDecision = null;
-		stats.numDecisionEvents++;
-		int startingLookaheadIndex = parser.getTokenStream().index();
-		TokenStream input = parser.getTokenStream();
-		if ( dump ) System.out.println("enterDecision canBacktrack="+couldBacktrack+" "+ decisionNumber +
-						   " backtrack depth " + backtrackDepth +
-						   " @ " + input.get(input.index()) +
-						   " rule " +locationDescription());
-		String g = (String) currentGrammarFileName.peek();
-		DecisionDescriptor descriptor = decisions.get(g, decisionNumber);
-		if ( descriptor == null ) {
-			descriptor = new DecisionDescriptor();
-			decisions.put(g, decisionNumber, descriptor);
-			descriptor.decision = decisionNumber;
-			descriptor.fileName = (String)currentGrammarFileName.peek();
-			descriptor.ruleName = (String)currentRuleName.peek();
-			descriptor.line = (Integer)currentLine.peek();
-			descriptor.pos = (Integer)currentPos.peek();
-			descriptor.couldBacktrack = couldBacktrack;
-		}
-		descriptor.n++;
-
-		DecisionEvent d = new DecisionEvent();
-		decisionStack.push(d);
-		d.decision = descriptor;
-		d.startTime = System.currentTimeMillis();
-		d.startIndex = startingLookaheadIndex;
-	}
-
-	public void exitDecision(int decisionNumber) {
-		DecisionEvent d = decisionStack.pop();
-		d.stopTime = System.currentTimeMillis();
-
-		int lastTokenIndex = lastRealTokenTouchedInDecision.getTokenIndex();
-		int numHidden = getNumberOfHiddenTokens(d.startIndex, lastTokenIndex);
-		int depth = lastTokenIndex - d.startIndex - numHidden + 1; // +1 counts consuming start token as 1
-		d.k = depth;
-		d.decision.maxk = Math.max(d.decision.maxk, depth);
-
-		if (dump) System.out.println("exitDecision "+decisionNumber+" in "+d.decision.ruleName+
-						   " lookahead "+d.k +" max token "+lastRealTokenTouchedInDecision);
-		decisionEvents.add(d); // done with decision; track all
-	}
-
-	public void consumeToken(Token token) {
-		if (dump) System.out.println("consume token "+token);
-		if ( !inDecision() ) {
-			stats.numTokens++;
-			return;
-		}
-		if ( lastRealTokenTouchedInDecision==null ||
-			 lastRealTokenTouchedInDecision.getTokenIndex() < token.getTokenIndex() )
-		{
-			lastRealTokenTouchedInDecision = token;
-		}
-		DecisionEvent d = currentDecision();
-		// compute lookahead depth
-		int thisRefIndex = token.getTokenIndex();
-		int numHidden = getNumberOfHiddenTokens(d.startIndex, thisRefIndex);
-		int depth = thisRefIndex - d.startIndex - numHidden + 1; // +1 counts consuming start token as 1
-		//d.maxk = Math.max(d.maxk, depth);
-		if (dump) System.out.println("consume "+thisRefIndex+" "+depth+" tokens ahead in "+
-						   d.decision.ruleName+"-"+d.decision.decision+" start index "+d.startIndex);		
-	}
-
-	/** The parser is in a decision if the decision depth > 0.  This
-	 *  works for backtracking also, which can have nested decisions.
-	 */
-	public boolean inDecision() {
-		return decisionStack.size()>0;
-	}
-
-	public void consumeHiddenToken(Token token) {
-		//System.out.println("consume hidden token "+token);
-		if ( !inDecision() ) stats.numHiddenTokens++;
-	}
-
-	/** Track refs to lookahead if in a fixed/nonfixed decision.
-	 */
-	public void LT(int i, Token t) {
-		if ( inDecision() && i>0 ) {
-			DecisionEvent d = currentDecision();
-			if (dump) System.out.println("LT("+i+")="+t+" index "+t.getTokenIndex()+" relative to "+d.decision.ruleName+"-"+
-							   d.decision.decision+" start index "+d.startIndex);
-			if ( lastRealTokenTouchedInDecision==null ||
-				 lastRealTokenTouchedInDecision.getTokenIndex() < t.getTokenIndex() )
-			{
-				lastRealTokenTouchedInDecision = t;
-				if (dump) System.out.println("set last token "+lastRealTokenTouchedInDecision);
-			}
-			// get starting index off stack
-//			int stackTop = lookaheadStack.size()-1;
-//			Integer startingIndex = (Integer)lookaheadStack.get(stackTop);
-//			// compute lookahead depth
-//			int thisRefIndex = parser.getTokenStream().index();
-//			int numHidden =
-//				getNumberOfHiddenTokens(startingIndex.intValue(), thisRefIndex);
-//			int depth = i + thisRefIndex - startingIndex.intValue() - numHidden;
-//			/*
-//			System.out.println("LT("+i+") @ index "+thisRefIndex+" is depth "+depth+
-//				" max is "+maxLookaheadInCurrentDecision);
-//			*/
-//			if ( depth>maxLookaheadInCurrentDecision ) {
-//				maxLookaheadInCurrentDecision = depth;
-//			}
-//			d.maxk = currentDecision()/
-		}
-	}
-
-	/** Track backtracking decisions.  You'll see a fixed or cyclic decision
-	 *  and then a backtrack.
-	 *
-	 * 		enter rule
-	 * 		...
-	 * 		enter decision
-	 * 		LA and possibly consumes (for cyclic DFAs)
-	 * 		begin backtrack level
-	 * 		mark m
-	 * 		rewind m
-	 * 		end backtrack level, success
-	 * 		exit decision
-	 * 		...
-	 * 		exit rule
-	 */
-	public void beginBacktrack(int level) {
-		if (dump) System.out.println("enter backtrack "+level);
-		backtrackDepth++;
-		DecisionEvent e = currentDecision();
-		if ( e.decision.couldBacktrack ) {
-			stats.numBacktrackOccurrences++;
-			e.decision.numBacktrackOccurrences++;
-			e.backtracks = true;
-		}
-	}
-
-	/** Successful or not, track how much lookahead synpreds use */
-	public void endBacktrack(int level, boolean successful) {
-		if (dump) System.out.println("exit backtrack "+level+": "+successful);
-		backtrackDepth--;		
-	}
-
-	@Override
-	public void mark(int i) {
-		if (dump) System.out.println("mark "+i);
-	}
-
-	@Override
-	public void rewind(int i) {
-		if (dump) System.out.println("rewind "+i);
-	}
-
-	@Override
-	public void rewind() {
-		if (dump) System.out.println("rewind");
-	}
-
-
-
-	protected DecisionEvent currentDecision() {
-		return decisionStack.peek();
-	}
-
-	public void recognitionException(RecognitionException e) {
-		stats.numReportedErrors++;
-	}
-
-	public void semanticPredicate(boolean result, String predicate) {
-		stats.numSemanticPredicates++;
-		if ( inDecision() ) {
-			DecisionEvent d = currentDecision();
-			d.evalSemPred = true;
-			d.decision.numSemPredEvals++;
-			if (dump) System.out.println("eval "+predicate+" in "+d.decision.ruleName+"-"+
-							   d.decision.decision);
-		}
-	}
-
-	public void terminate() {
-		for (DecisionEvent e : decisionEvents) {
-			//System.out.println("decision "+e.decision.decision+": k="+e.k);
-			e.decision.avgk += e.k;
-			stats.avgkPerDecisionEvent += e.k;
-			if ( e.backtracks ) { // doesn't count gated syn preds on DFA edges
-				stats.avgkPerBacktrackingDecisionEvent += e.k;
-			}
-		}
-		stats.averageDecisionPercentBacktracks = 0.0f;
-		for (DecisionDescriptor d : decisions.values()) {
-			stats.numDecisionsCovered++;
-			d.avgk /= (double)d.n;
-			if ( d.couldBacktrack ) {
-				stats.numDecisionsThatPotentiallyBacktrack++;
-				float percentBacktracks = d.numBacktrackOccurrences / (float)d.n;
-				//System.out.println("dec "+d.decision+" backtracks "+percentBacktracks*100+"%");
-				stats.averageDecisionPercentBacktracks += percentBacktracks;
-			}
-			// ignore rules that backtrack along gated DFA edges
-			if ( d.numBacktrackOccurrences > 0 ) {
-				stats.numDecisionsThatDoBacktrack++;
-			}
-		}
-		stats.averageDecisionPercentBacktracks /= stats.numDecisionsThatPotentiallyBacktrack;
-		stats.averageDecisionPercentBacktracks *= 100; // it's a percentage
-		stats.avgkPerDecisionEvent /= stats.numDecisionEvents;
-		stats.avgkPerBacktrackingDecisionEvent /= (double)stats.numBacktrackOccurrences;
-
-		System.err.println(toString());
-		System.err.println(getDecisionStatsDump());
-
-//		String stats = toNotifyString();
-//		try {
-//			Stats.writeReport(RUNTIME_STATS_FILENAME,stats);
-//		}
-//		catch (IOException ioe) {
-//			System.err.println(ioe);
-//			ioe.printStackTrace(System.err);
-//		}
-	}
-
-	public void setParser(DebugParser parser) {
-		this.parser = parser;
-	}
-
-	// R E P O R T I N G
-
-	public String toNotifyString() {
-		StringBuffer buf = new StringBuffer();
-		buf.append(Version);
-		buf.append('\t');
-		buf.append(parser.getClass().getName());
-//		buf.append('\t');
-//		buf.append(numRuleInvocations);
-//		buf.append('\t');
-//		buf.append(maxRuleInvocationDepth);
-//		buf.append('\t');
-//		buf.append(numFixedDecisions);
-//		buf.append('\t');
-//		buf.append(Stats.min(decisionMaxFixedLookaheads));
-//		buf.append('\t');
-//		buf.append(Stats.max(decisionMaxFixedLookaheads));
-//		buf.append('\t');
-//		buf.append(Stats.avg(decisionMaxFixedLookaheads));
-//		buf.append('\t');
-//		buf.append(Stats.stddev(decisionMaxFixedLookaheads));
-//		buf.append('\t');
-//		buf.append(numCyclicDecisions);
-//		buf.append('\t');
-//		buf.append(Stats.min(decisionMaxCyclicLookaheads));
-//		buf.append('\t');
-//		buf.append(Stats.max(decisionMaxCyclicLookaheads));
-//		buf.append('\t');
-//		buf.append(Stats.avg(decisionMaxCyclicLookaheads));
-//		buf.append('\t');
-//		buf.append(Stats.stddev(decisionMaxCyclicLookaheads));
-//		buf.append('\t');
-//		buf.append(numBacktrackDecisions);
-//		buf.append('\t');
-//		buf.append(Stats.min(toArray(decisionMaxSynPredLookaheads)));
-//		buf.append('\t');
-//		buf.append(Stats.max(toArray(decisionMaxSynPredLookaheads)));
-//		buf.append('\t');
-//		buf.append(Stats.avg(toArray(decisionMaxSynPredLookaheads)));
-//		buf.append('\t');
-//		buf.append(Stats.stddev(toArray(decisionMaxSynPredLookaheads)));
-//		buf.append('\t');
-//		buf.append(numSemanticPredicates);
-//		buf.append('\t');
-//		buf.append(parser.getTokenStream().size());
-//		buf.append('\t');
-//		buf.append(numHiddenTokens);
-//		buf.append('\t');
-//		buf.append(numCharsMatched);
-//		buf.append('\t');
-//		buf.append(numHiddenCharsMatched);
-//		buf.append('\t');
-//		buf.append(numberReportedErrors);
-//		buf.append('\t');
-//		buf.append(numMemoizationCacheHits);
-//		buf.append('\t');
-//		buf.append(numMemoizationCacheMisses);
-//		buf.append('\t');
-//		buf.append(numGuessingRuleInvocations);
-//		buf.append('\t');
-//		buf.append(numMemoizationCacheEntries);
-		return buf.toString();
-	}
-
-	public String toString() {
-		return toString(getReport());
-	}
-
-	public ProfileStats getReport() {
-//		TokenStream input = parser.getTokenStream();
-//		for (int i=0; i<input.size()&& lastRealTokenTouchedInDecision !=null&&i<= lastRealTokenTouchedInDecision.getTokenIndex(); i++) {
-//			Token t = input.get(i);
-//			if ( t.getChannel()!=Token.DEFAULT_CHANNEL ) {
-//				stats.numHiddenTokens++;
-//				stats.numHiddenCharsMatched += t.getText().length();
-//			}
-//		}
-		stats.Version = Version;
-		stats.name = parser.getClass().getName();
-		stats.numUniqueRulesInvoked = uniqueRules.size();
-		//stats.numCharsMatched = lastTokenConsumed.getStopIndex() + 1;
-		return stats;
-	}
-
-	public DoubleKeyMap getDecisionStats() {
-		return decisions;
-	}
-
-	public List getDecisionEvents() {
-		return decisionEvents;
-	}
-
-	public static String toString(ProfileStats stats) {
-		StringBuffer buf = new StringBuffer();
-		buf.append("ANTLR Runtime Report; Profile Version ");
-		buf.append(stats.Version);
-		buf.append(newline);
-		buf.append("parser name ");
-		buf.append(stats.name);
-		buf.append(newline);
-		buf.append("Number of rule invocations ");
-		buf.append(stats.numRuleInvocations);
-		buf.append(newline);
-		buf.append("Number of unique rules visited ");
-		buf.append(stats.numUniqueRulesInvoked);
-		buf.append(newline);
-		buf.append("Number of decision events ");
-		buf.append(stats.numDecisionEvents);
-		buf.append(newline);
-		buf.append("Overall average k per decision event ");
-		buf.append(stats.avgkPerDecisionEvent);
-		buf.append(newline);
-		buf.append("Number of backtracking occurrences (can be multiple per decision) ");
-		buf.append(stats.numBacktrackOccurrences);
-		buf.append(newline);
-		buf.append("Overall average k per decision event that backtracks ");
-		buf.append(stats.avgkPerBacktrackingDecisionEvent);
-		buf.append(newline);
-		buf.append("Number of rule invocations while backtracking ");
-		buf.append(stats.numGuessingRuleInvocations);
-		buf.append(newline);
-		buf.append("num decisions that potentially backtrack ");
-		buf.append(stats.numDecisionsThatPotentiallyBacktrack);
-		buf.append(newline);
-		buf.append("num decisions that do backtrack ");
-		buf.append(stats.numDecisionsThatDoBacktrack);
-		buf.append(newline);
-		buf.append("num decisions that potentially backtrack but don't ");
-		buf.append(stats.numDecisionsThatPotentiallyBacktrack - stats.numDecisionsThatDoBacktrack);
-		buf.append(newline);
-		buf.append("average % of time a potentially backtracking decision backtracks ");
-		buf.append(stats.averageDecisionPercentBacktracks);
-		buf.append(newline);
-		buf.append("num unique decisions covered ");
-		buf.append(stats.numDecisionsCovered);
-		buf.append(newline);
-		buf.append("max rule invocation nesting depth ");
-		buf.append(stats.maxRuleInvocationDepth);
-		buf.append(newline);
-
-//		buf.append("number of fixed lookahead decisions ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("min lookahead used in a fixed lookahead decision ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("max lookahead used in a fixed lookahead decision ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("average lookahead depth used in fixed lookahead decisions ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("standard deviation of depth used in fixed lookahead decisions ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("number of arbitrary lookahead decisions ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("min lookahead used in an arbitrary lookahead decision ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("max lookahead used in an arbitrary lookahead decision ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("average lookahead depth used in arbitrary lookahead decisions ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("standard deviation of depth used in arbitrary lookahead decisions ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("number of evaluated syntactic predicates ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("min lookahead used in a syntactic predicate ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("max lookahead used in a syntactic predicate ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("average lookahead depth used in syntactic predicates ");
-//		buf.append();
-//		buf.append('\n');
-//		buf.append("standard deviation of depth used in syntactic predicates ");
-//		buf.append();
-//		buf.append('\n');
-		buf.append("rule memoization cache size ");
-		buf.append(stats.numMemoizationCacheEntries);
-		buf.append(newline);
-		buf.append("number of rule memoization cache hits ");
-		buf.append(stats.numMemoizationCacheHits);
-		buf.append(newline);
-		buf.append("number of rule memoization cache misses ");
-		buf.append(stats.numMemoizationCacheMisses);
-		buf.append(newline);
-//		buf.append("number of evaluated semantic predicates ");
-//		buf.append();
-//		buf.append(newline);
-		buf.append("number of tokens ");
-		buf.append(stats.numTokens);
-		buf.append(newline);
-		buf.append("number of hidden tokens ");
-		buf.append(stats.numHiddenTokens);
-		buf.append(newline);
-		buf.append("number of char ");
-		buf.append(stats.numCharsMatched);
-		buf.append(newline);
-		buf.append("number of hidden char ");
-		buf.append(stats.numHiddenCharsMatched);
-		buf.append(newline);
-		buf.append("number of syntax errors ");
-		buf.append(stats.numReportedErrors);
-		buf.append(newline);
-		return buf.toString();
-	}
-
-	public String getDecisionStatsDump() {
-		StringBuffer buf = new StringBuffer();
-		buf.append("location");
-		buf.append(DATA_SEP);
-		buf.append("n");
-		buf.append(DATA_SEP);
-		buf.append("avgk");
-		buf.append(DATA_SEP);
-		buf.append("maxk");
-		buf.append(DATA_SEP);
-		buf.append("synpred");
-		buf.append(DATA_SEP);
-		buf.append("sempred");
-		buf.append(DATA_SEP);
-		buf.append("canbacktrack");
-		buf.append("\n");
-		for (String fileName : decisions.keySet()) {
-			for (int d : decisions.keySet(fileName)) {
-				DecisionDescriptor s = decisions.get(fileName, d);
-				buf.append(s.decision);
-				buf.append("@");
-				buf.append(locationDescription(s.fileName,s.ruleName,s.line,s.pos)); // decision number
-				buf.append(DATA_SEP);
-				buf.append(s.n);
-				buf.append(DATA_SEP);
-				buf.append(String.format("%.2f",s.avgk));
-				buf.append(DATA_SEP);
-				buf.append(s.maxk);
-				buf.append(DATA_SEP);
-				buf.append(s.numBacktrackOccurrences);
-				buf.append(DATA_SEP);
-				buf.append(s.numSemPredEvals);
-				buf.append(DATA_SEP);
-				buf.append(s.couldBacktrack ?"1":"0");
-				buf.append(newline);
-			}
-		}
-		return buf.toString();
-	}
-
-	protected int[] trim(int[] X, int n) {
-		if ( n<X.length ) {
-			int[] trimmed = new int[n];
-			System.arraycopy(X,0,trimmed,0,n);
-			X = trimmed;
-		}
-		return X;
-	}
-
-	protected int[] toArray(List a) {
-		int[] x = new int[a.size()];
-		for (int i = 0; i < a.size(); i++) {
-			Integer I = (Integer) a.get(i);
-			x[i] = I.intValue();
-		}
-		return x;
-	}
-
-	/** Get num hidden tokens between i..j inclusive */
-	public int getNumberOfHiddenTokens(int i, int j) {
-		int n = 0;
-		TokenStream input = parser.getTokenStream();
-		for (int ti = i; ti<input.size() && ti <= j; ti++) {
-			Token t = input.get(ti);
-			if ( t.getChannel()!=Token.DEFAULT_CHANNEL ) {
-				n++;
-			}
-		}
-		return n;
-	}
-
-	protected String locationDescription() {
-		return locationDescription(
-			currentGrammarFileName.peek(),
-			currentRuleName.peek(),
-			currentLine.peek(),
-			currentPos.peek());
-	}
-
-	protected String locationDescription(String file, String rule, int line, int pos) {
-		return file+":"+line+":"+pos+"(" + rule + ")";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java
deleted file mode 100644
index 933fdae..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.RecognitionException;
-import org.antlr.runtime.Token;
-import org.antlr.runtime.CharStream;
-import org.antlr.runtime.tree.BaseTree;
-import org.antlr.runtime.tree.Tree;
-
-import java.io.*;
-import java.net.ConnectException;
-import java.net.Socket;
-import java.util.StringTokenizer;
-
-public class RemoteDebugEventSocketListener implements Runnable {
-	static final int MAX_EVENT_ELEMENTS = 8;
-	DebugEventListener listener;
-	String machine;
-	int port;
-	Socket channel = null;
-	PrintWriter out;
-	BufferedReader in;
-	String event;
-	/** Version of ANTLR (dictates events) */
-	public String version;
-	public String grammarFileName;
-	/** Track the last token index we saw during a consume.  If same, then
-	 *  set a flag that we have a problem.
-	 */
-	int previousTokenIndex = -1;
-	boolean tokenIndexesInvalid = false;
-
-	public static class ProxyToken implements Token {
-		int index;
-		int type;
-		int channel;
-		int line;
-		int charPos;
-		String text;
-		public ProxyToken(int index) { this.index = index; }		
-		public ProxyToken(int index, int type, int channel,
-						  int line, int charPos, String text)
-		{
-			this.index = index;
-			this.type = type;
-			this.channel = channel;
-			this.line = line;
-			this.charPos = charPos;
-			this.text = text;
-		}
-		public String getText() {
-			return text;
-		}
-		public void setText(String text) {
-			this.text = text;
-		}
-		public int getType() {
-			return type;
-		}
-		public void setType(int ttype) {
-			this.type = ttype;
-		}
-		public int getLine() {
-			return line;
-		}
-		public void setLine(int line) {
-			this.line = line;
-		}
-		public int getCharPositionInLine() {
-			return charPos;
-		}
-		public void setCharPositionInLine(int pos) {
-			this.charPos = pos;
-		}
-		public int getChannel() {
-			return channel;
-		}
-		public void setChannel(int channel) {
-			this.channel = channel;
-		}
-		public int getTokenIndex() {
-			return index;
-		}
-		public void setTokenIndex(int index) {
-			this.index = index;
-		}
-		public CharStream getInputStream() {
-			return null;
-		}
-		public void setInputStream(CharStream input) {
-		}
-		public String toString() {
-			String channelStr = "";
-			if ( channel!=Token.DEFAULT_CHANNEL ) {
-				channelStr=",channel="+channel;
-			}
-			return "["+getText()+"/<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+",@"+index+"]";
-		}
-	}
-
-	public static class ProxyTree extends BaseTree {
-		public int ID;
-		public int type;
-		public int line = 0;
-		public int charPos = -1;
-		public int tokenIndex = -1;
-		public String text;
-		
-		public ProxyTree(int ID, int type, int line, int charPos, int tokenIndex, String text) {
-			this.ID = ID;
-			this.type = type;
-			this.line = line;
-			this.charPos = charPos;
-			this.tokenIndex = tokenIndex;
-			this.text = text;
-		}
-
-		public ProxyTree(int ID) { this.ID = ID; }
-
-		public int getTokenStartIndex() { return tokenIndex; }
-		public void setTokenStartIndex(int index) {	}
-		public int getTokenStopIndex() { return 0; }
-		public void setTokenStopIndex(int index) { }
-		public Tree dupNode() {	return null; }
-		public int getType() { return type; }
-		public String getText() { return text; }
-		public String toString() {
-			return "fix this";
-		}
-	}
-
-	public RemoteDebugEventSocketListener(DebugEventListener listener,
-										  String machine,
-										  int port) throws IOException
-	{
-		this.listener = listener;
-		this.machine = machine;
-		this.port = port;
-
-        if( !openConnection() ) {
-            throw new ConnectException();
-        }
-	}
-
-	protected void eventHandler() {
-		try {
-			handshake();
-			event = in.readLine();
-			while ( event!=null ) {
-				dispatch(event);
-				ack();
-				event = in.readLine();
-			}
-		}
-		catch (Exception e) {
-			System.err.println(e);
-			e.printStackTrace(System.err);
-		}
-		finally {
-            closeConnection();
-		}
-	}
-
-    protected boolean openConnection() {
-        boolean success = false;
-        try {
-            channel = new Socket(machine, port);
-            channel.setTcpNoDelay(true);
-			OutputStream os = channel.getOutputStream();
-			OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
-			out = new PrintWriter(new BufferedWriter(osw));
-			InputStream is = channel.getInputStream();
-			InputStreamReader isr = new InputStreamReader(is, "UTF8");
-			in = new BufferedReader(isr);
-            success = true;
-        } catch(Exception e) {
-            System.err.println(e);
-        }
-        return success;
-    }
-
-    protected void closeConnection() {
-        try {
-            in.close(); in = null;
-            out.close(); out = null;
-            channel.close(); channel=null;
-        }
-        catch (Exception e) {
-            System.err.println(e);
-            e.printStackTrace(System.err);
-        }
-        finally {
-            if ( in!=null ) {
-                try {in.close();} catch (IOException ioe) {
-                    System.err.println(ioe);
-                }
-            }
-            if ( out!=null ) {
-                out.close();
-            }
-            if ( channel!=null ) {
-                try {channel.close();} catch (IOException ioe) {
-                    System.err.println(ioe);
-                }
-            }
-        }
-
-    }
-
-	protected void handshake() throws IOException {
-		String antlrLine = in.readLine();
-		String[] antlrElements = getEventElements(antlrLine);
-		version = antlrElements[1];
-		String grammarLine = in.readLine();
-		String[] grammarElements = getEventElements(grammarLine);
-		grammarFileName = grammarElements[1];
-		ack();
-		listener.commence(); // inform listener after handshake
-	}
-
-	protected void ack() {
-        out.println("ack");
-		out.flush();
-	}
-
-	protected void dispatch(String line) {
-        //System.out.println("event: "+line);
-        String[] elements = getEventElements(line);
-		if ( elements==null || elements[0]==null ) {
-			System.err.println("unknown debug event: "+line);
-			return;
-		}
-		if ( elements[0].equals("enterRule") ) {
-			listener.enterRule(elements[1], elements[2]);
-		}
-		else if ( elements[0].equals("exitRule") ) {
-			listener.exitRule(elements[1], elements[2]);
-		}
-		else if ( elements[0].equals("enterAlt") ) {
-			listener.enterAlt(Integer.parseInt(elements[1]));
-		}
-		else if ( elements[0].equals("enterSubRule") ) {
-			listener.enterSubRule(Integer.parseInt(elements[1]));
-		}
-		else if ( elements[0].equals("exitSubRule") ) {
-			listener.exitSubRule(Integer.parseInt(elements[1]));
-		}
-		else if ( elements[0].equals("enterDecision") ) {
-			listener.enterDecision(Integer.parseInt(elements[1]), elements[2].equals("true"));
-		}
-		else if ( elements[0].equals("exitDecision") ) {
-			listener.exitDecision(Integer.parseInt(elements[1]));
-		}
-		else if ( elements[0].equals("location") ) {
-			listener.location(Integer.parseInt(elements[1]),
-							  Integer.parseInt(elements[2]));
-		}
-		else if ( elements[0].equals("consumeToken") ) {
-			ProxyToken t = deserializeToken(elements, 1);
-			if ( t.getTokenIndex() == previousTokenIndex ) {
-				tokenIndexesInvalid = true;
-			}
-			previousTokenIndex = t.getTokenIndex();
-			listener.consumeToken(t);
-		}
-		else if ( elements[0].equals("consumeHiddenToken") ) {
-			ProxyToken t = deserializeToken(elements, 1);
-			if ( t.getTokenIndex() == previousTokenIndex ) {
-				tokenIndexesInvalid = true;
-			}
-			previousTokenIndex = t.getTokenIndex();
-			listener.consumeHiddenToken(t);
-		}
-		else if ( elements[0].equals("LT") ) {
-			Token t = deserializeToken(elements, 2);
-			listener.LT(Integer.parseInt(elements[1]), t);
-		}
-		else if ( elements[0].equals("mark") ) {
-			listener.mark(Integer.parseInt(elements[1]));
-		}
-		else if ( elements[0].equals("rewind") ) {
-			if ( elements[1]!=null ) {
-				listener.rewind(Integer.parseInt(elements[1]));
-			}
-			else {
-				listener.rewind();
-			}
-		}
-		else if ( elements[0].equals("beginBacktrack") ) {
-			listener.beginBacktrack(Integer.parseInt(elements[1]));
-		}
-		else if ( elements[0].equals("endBacktrack") ) {
-			int level = Integer.parseInt(elements[1]);
-			int successI = Integer.parseInt(elements[2]);
-			listener.endBacktrack(level, successI==DebugEventListener.TRUE);
-		}
-		else if ( elements[0].equals("exception") ) {
-			String excName = elements[1];
-			String indexS = elements[2];
-			String lineS = elements[3];
-			String posS = elements[4];
-			Class excClass = null;
-			try {
-				excClass = Class.forName(excName);
-				RecognitionException e =
-					(RecognitionException)excClass.newInstance();
-				e.index = Integer.parseInt(indexS);
-				e.line = Integer.parseInt(lineS);
-				e.charPositionInLine = Integer.parseInt(posS);
-				listener.recognitionException(e);
-			}
-			catch (ClassNotFoundException cnfe) {
-				System.err.println("can't find class "+cnfe);
-				cnfe.printStackTrace(System.err);
-			}
-			catch (InstantiationException ie) {
-				System.err.println("can't instantiate class "+ie);
-				ie.printStackTrace(System.err);
-			}
-			catch (IllegalAccessException iae) {
-				System.err.println("can't access class "+iae);
-				iae.printStackTrace(System.err);
-			}
-		}
-		else if ( elements[0].equals("beginResync") ) {
-			listener.beginResync();
-		}
-		else if ( elements[0].equals("endResync") ) {
-			listener.endResync();
-		}
-		else if ( elements[0].equals("terminate") ) {
-			listener.terminate();
-		}
-		else if ( elements[0].equals("semanticPredicate") ) {
-			Boolean result = Boolean.valueOf(elements[1]);
-			String predicateText = elements[2];
-			predicateText = unEscapeNewlines(predicateText);
-			listener.semanticPredicate(result.booleanValue(),
-									   predicateText);
-		}
-		else if ( elements[0].equals("consumeNode") ) {
-			ProxyTree node = deserializeNode(elements, 1);
-			listener.consumeNode(node);
-		}
-		else if ( elements[0].equals("LN") ) {
-			int i = Integer.parseInt(elements[1]);
-			ProxyTree node = deserializeNode(elements, 2);
-			listener.LT(i, node);
-		}
-		else if ( elements[0].equals("createNodeFromTokenElements") ) {
-			int ID = Integer.parseInt(elements[1]);
-			int type = Integer.parseInt(elements[2]);
-			String text = elements[3];
-			text = unEscapeNewlines(text);
-			ProxyTree node = new ProxyTree(ID, type, -1, -1, -1, text);
-			listener.createNode(node);
-		}
-		else if ( elements[0].equals("createNode") ) {
-			int ID = Integer.parseInt(elements[1]);
-			int tokenIndex = Integer.parseInt(elements[2]);
-			// create dummy node/token filled with ID, tokenIndex
-			ProxyTree node = new ProxyTree(ID);
-			ProxyToken token = new ProxyToken(tokenIndex);
-			listener.createNode(node, token);
-		}
-		else if ( elements[0].equals("nilNode") ) {
-			int ID = Integer.parseInt(elements[1]);
-			ProxyTree node = new ProxyTree(ID);
-			listener.nilNode(node);
-		}
-		else if ( elements[0].equals("errorNode") ) {
-			// TODO: do we need a special tree here?
-			int ID = Integer.parseInt(elements[1]);
-			int type = Integer.parseInt(elements[2]);
-			String text = elements[3];
-			text = unEscapeNewlines(text);
-			ProxyTree node = new ProxyTree(ID, type, -1, -1, -1, text);
-			listener.errorNode(node);
-		}
-		else if ( elements[0].equals("becomeRoot") ) {
-			int newRootID = Integer.parseInt(elements[1]);
-			int oldRootID = Integer.parseInt(elements[2]);
-			ProxyTree newRoot = new ProxyTree(newRootID);
-			ProxyTree oldRoot = new ProxyTree(oldRootID);
-			listener.becomeRoot(newRoot, oldRoot);
-		}
-		else if ( elements[0].equals("addChild") ) {
-			int rootID = Integer.parseInt(elements[1]);
-			int childID = Integer.parseInt(elements[2]);
-			ProxyTree root = new ProxyTree(rootID);
-			ProxyTree child = new ProxyTree(childID);
-			listener.addChild(root, child);
-		}
-		else if ( elements[0].equals("setTokenBoundaries") ) {
-			int ID = Integer.parseInt(elements[1]);
-			ProxyTree node = new ProxyTree(ID);
-			listener.setTokenBoundaries(
-				node,
-				Integer.parseInt(elements[2]),
-				Integer.parseInt(elements[3]));
-		}
-		else {
-			System.err.println("unknown debug event: "+line);
-		}
-	}
-
-	protected ProxyTree deserializeNode(String[] elements, int offset) {
-		int ID = Integer.parseInt(elements[offset+0]);
-		int type = Integer.parseInt(elements[offset+1]);
-		int tokenLine = Integer.parseInt(elements[offset+2]);
-		int charPositionInLine = Integer.parseInt(elements[offset+3]);
-		int tokenIndex = Integer.parseInt(elements[offset+4]);
-		String text = elements[offset+5];
-		text = unEscapeNewlines(text);
-		return new ProxyTree(ID, type, tokenLine, charPositionInLine, tokenIndex, text);
-	}
-
-	protected ProxyToken deserializeToken(String[] elements,
-										  int offset)
-	{
-		String indexS = elements[offset+0];
-		String typeS = elements[offset+1];
-		String channelS = elements[offset+2];
-		String lineS = elements[offset+3];
-		String posS = elements[offset+4];
-		String text = elements[offset+5];
-		text = unEscapeNewlines(text);
-		int index = Integer.parseInt(indexS);
-		ProxyToken t =
-			new ProxyToken(index,
-						   Integer.parseInt(typeS),
-						   Integer.parseInt(channelS),
-						   Integer.parseInt(lineS),
-						   Integer.parseInt(posS),
-						   text);
-		return t;
-	}
-
-	/** Create a thread to listen to the remote running recognizer */
-	public void start() {
-		Thread t = new Thread(this);
-		t.start();
-	}
-
-	public void run() {
-		eventHandler();
-	}
-
-	// M i s c
-
-	public String[] getEventElements(String event) {
-		if ( event==null ) {
-			return null;
-		}
-		String[] elements = new String[MAX_EVENT_ELEMENTS];
-		String str = null; // a string element if present (must be last)
-		try {
-			int firstQuoteIndex = event.indexOf('"');
-			if ( firstQuoteIndex>=0 ) {
-				// treat specially; has a string argument like "a comment\n
-				// Note that the string is terminated by \n not end quote.
-				// Easier to parse that way.
-				String eventWithoutString = event.substring(0,firstQuoteIndex);
-				str = event.substring(firstQuoteIndex+1,event.length());
-				event = eventWithoutString;
-			}
-			StringTokenizer st = new StringTokenizer(event, "\t", false);
-			int i = 0;
-			while ( st.hasMoreTokens() ) {
-				if ( i>=MAX_EVENT_ELEMENTS ) {
-					// ErrorManager.internalError("event has more than "+MAX_EVENT_ELEMENTS+" args: "+event);
-					return elements;
-				}
-				elements[i] = st.nextToken();
-				i++;
-			}
-			if ( str!=null ) {
-				elements[i] = str;
-			}
-		}
-		catch (Exception e) {
-			e.printStackTrace(System.err);
-		}
-		return elements;
-	}
-
-	protected String unEscapeNewlines(String txt) {
-		// this unescape is slow but easy to understand
-		txt = txt.replaceAll("%0A","\n");  // unescape \n
-		txt = txt.replaceAll("%0D","\r");  // unescape \r
-		txt = txt.replaceAll("%25","%");   // undo escaped escape chars
-		return txt;
-	}
-
-	public boolean tokenIndexesAreInvalid() {
-		return false;
-		//return tokenIndexesInvalid;
-	}
-
-}
-
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/TraceDebugEventListener.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/TraceDebugEventListener.java
deleted file mode 100644
index de9366d..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/TraceDebugEventListener.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.Token;
-import org.antlr.runtime.tree.TreeAdaptor;
-
-/** Print out (most of) the events... Useful for debugging, testing... */
-public class TraceDebugEventListener extends BlankDebugEventListener {
-	TreeAdaptor adaptor;
-
-	public TraceDebugEventListener(TreeAdaptor adaptor) {
-		this.adaptor = adaptor;
-	}
-
-	public void enterRule(String ruleName) { System.out.println("enterRule "+ruleName); }
-	public void exitRule(String ruleName) { System.out.println("exitRule "+ruleName); }
-	public void enterSubRule(int decisionNumber) { System.out.println("enterSubRule"); }
-	public void exitSubRule(int decisionNumber) { System.out.println("exitSubRule"); }
-	public void location(int line, int pos) {System.out.println("location "+line+":"+pos);}
-
-	// Tree parsing stuff
-
-	public void consumeNode(Object t) {
-		int ID = adaptor.getUniqueID(t);
-		String text = adaptor.getText(t);
-		int type = adaptor.getType(t);
-		System.out.println("consumeNode "+ID+" "+text+" "+type);
-	}
-
-	public void LT(int i, Object t) {
-		int ID = adaptor.getUniqueID(t);
-		String text = adaptor.getText(t);
-		int type = adaptor.getType(t);
-		System.out.println("LT "+i+" "+ID+" "+text+" "+type);
-	}
-
-
-	// AST stuff
-	public void nilNode(Object t) {System.out.println("nilNode "+adaptor.getUniqueID(t));}
-
-	public void createNode(Object t) {
-		int ID = adaptor.getUniqueID(t);
-		String text = adaptor.getText(t);
-		int type = adaptor.getType(t);
-		System.out.println("create "+ID+": "+text+", "+type);
-	}
-
-	public void createNode(Object node, Token token) {
-		int ID = adaptor.getUniqueID(node);
-		String text = adaptor.getText(node);
-		int tokenIndex = token.getTokenIndex();
-		System.out.println("create "+ID+": "+tokenIndex);
-	}
-
-	public void becomeRoot(Object newRoot, Object oldRoot) {
-		System.out.println("becomeRoot "+adaptor.getUniqueID(newRoot)+", "+
-						   adaptor.getUniqueID(oldRoot));
-	}
-
-	public void addChild(Object root, Object child) {
-		System.out.println("addChild "+adaptor.getUniqueID(root)+", "+
-						   adaptor.getUniqueID(child));
-	}
-
-	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
-		System.out.println("setTokenBoundaries "+adaptor.getUniqueID(t)+", "+
-						   tokenStartIndex+", "+tokenStopIndex);
-	}
-}
-
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/Tracer.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/Tracer.java
deleted file mode 100644
index c2c73da..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/debug/Tracer.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.debug;
-
-import org.antlr.runtime.IntStream;
-import org.antlr.runtime.TokenStream;
-
-/** The default tracer mimics the traceParser behavior of ANTLR 2.x.
- *  This listens for debugging events from the parser and implies
- *  that you cannot debug and trace at the same time.
- */
-public class Tracer extends BlankDebugEventListener {
-	public IntStream input;
-	protected int level = 0;
-
-	public Tracer(IntStream input) {
-		this.input = input;
-	}
-
-	public void enterRule(String ruleName) {
-		for (int i=1; i<=level; i++) {System.out.print(" ");}
-		System.out.println("> "+ruleName+" lookahead(1)="+getInputSymbol(1));
-		level++;
-	}
-
-	public void exitRule(String ruleName) {
-		level--;
-		for (int i=1; i<=level; i++) {System.out.print(" ");}
-		System.out.println("< "+ruleName+" lookahead(1)="+getInputSymbol(1));
-	}
-
-	public Object getInputSymbol(int k) {
-		if ( input instanceof TokenStream ) {
-			return ((TokenStream)input).LT(k);
-		}
-		return new Character((char)input.LA(k));
-	}
-}
-
-
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/FastQueue.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/FastQueue.java
deleted file mode 100644
index 08843dd..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/FastQueue.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.misc;
-
-import java.util.List;
-import java.util.ArrayList;
-import java.util.NoSuchElementException;
-
-/** A queue that can dequeue and get(i) in O(1) and grow arbitrarily large.
- *  A linked list is fast at dequeue but slow at get(i).  An array is
- *  the reverse.  This is O(1) for both operations.
- *
- *  List grows until you dequeue last element at end of buffer. Then
- *  it resets to start filling at 0 again.  If adds/removes are balanced, the
- *  buffer will not grow too large.
- *
- *  No iterator stuff as that's not how we'll use it.
- */
-public class FastQueue<T> {
-    /** dynamically-sized buffer of elements */
-    protected List<T> data = new ArrayList<T>();
-    /** index of next element to fill */
-    protected int p = 0;
-	protected int range = -1; // how deep have we gone?	
-
-    public void reset() { clear(); }
-    public void clear() { p = 0; data.clear(); }
-
-    /** Get and remove first element in queue */
-    public T remove() {
-        T o = elementAt(0);
-        p++;
-        // have we hit end of buffer?
-        if ( p == data.size() ) {
-            // if so, it's an opportunity to start filling at index 0 again
-            clear(); // size goes to 0, but retains memory
-        }
-        return o;
-    }
-
-    public void add(T o) { data.add(o); }
-
-    public int size() { return data.size() - p; }
-
-	public int range() { return range; }
-
-    public T head() { return elementAt(0); }
-
-    /** Return element i elements ahead of current element.  i==0 gets
-     *  current element.  This is not an absolute index into the data list
-     *  since p defines the start of the real list.
-     */
-    public T elementAt(int i) {
-		int absIndex = p + i;
-		if ( absIndex >= data.size() ) {
-            throw new NoSuchElementException("queue index "+ absIndex +" > last index "+(data.size()-1));
-        }
-        if ( absIndex < 0 ) {
-            throw new NoSuchElementException("queue index "+ absIndex +" < 0");
-        }
-		if ( absIndex>range ) range = absIndex;
-        return data.get(absIndex);
-    }
-
-    /** Return string of current buffer contents; non-destructive */
-    public String toString() {
-        StringBuffer buf = new StringBuffer();
-        int n = size();
-        for (int i=0; i<n; i++) {
-            buf.append(elementAt(i));
-            if ( (i+1)<n ) buf.append(" ");
-        }
-        return buf.toString();
-    }
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/LookaheadStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/LookaheadStream.java
deleted file mode 100644
index 6f19c44..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/LookaheadStream.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.misc;
-
-import org.antlr.runtime.Token;
-
-import java.util.NoSuchElementException;
-
-/** A lookahead queue that knows how to mark/release locations
- *  in the buffer for backtracking purposes. Any markers force the FastQueue
- *  superclass to keep all tokens until no more markers; then can reset
- *  to avoid growing a huge buffer.
- */
-public abstract class LookaheadStream<T> extends FastQueue<T> {
-    public static final int UNINITIALIZED_EOF_ELEMENT_INDEX = Integer.MAX_VALUE;
-
-    /** Absolute token index. It's the index of the symbol about to be
-	 *  read via LT(1). Goes from 0 to numtokens.
-     */
-    protected int currentElementIndex = 0;
-
-    protected T prevElement;
-
-    /** Track object returned by nextElement upon end of stream;
-     *  Return it later when they ask for LT passed end of input.
-     */
-    public T eof = null;
-
-    /** Track the last mark() call result value for use in rewind(). */
-    protected int lastMarker;
-
-    /** tracks how deep mark() calls are nested */
-    protected int markDepth = 0;
-
-    public void reset() {
-        super.reset();
-        currentElementIndex = 0;
-        p = 0;
-        prevElement=null;        
-    }
-    
-    /** Implement nextElement to supply a stream of elements to this
-     *  lookahead buffer.  Return eof upon end of the stream we're pulling from.
-     */
-    public abstract T nextElement();
-
-    public abstract boolean isEOF(T o);
-
-    /** Get and remove first element in queue; override FastQueue.remove();
-     *  it's the same, just checks for backtracking.
-     */
-    public T remove() {
-        T o = elementAt(0);
-        p++;
-        // have we hit end of buffer and not backtracking?
-        if ( p == data.size() && markDepth==0 ) {
-            // if so, it's an opportunity to start filling at index 0 again
-            clear(); // size goes to 0, but retains memory
-        }
-        return o;
-    }
-
-    /** Make sure we have at least one element to remove, even if EOF */
-    public void consume() {
-        syncAhead(1);
-        prevElement = remove();
-        currentElementIndex++;
-    }
-
-    /** Make sure we have 'need' elements from current position p. Last valid
-     *  p index is data.size()-1.  p+need-1 is the data index 'need' elements
-     *  ahead.  If we need 1 element, (p+1-1)==p must be < data.size().
-     */
-    protected void syncAhead(int need) {
-        int n = (p+need-1) - data.size() + 1; // how many more elements we need?
-        if ( n > 0 ) fill(n);                 // out of elements?
-    }
-
-    /** add n elements to buffer */
-    public void fill(int n) {
-        for (int i=1; i<=n; i++) {
-            T o = nextElement();
-            if ( isEOF(o) ) eof = o;
-            data.add(o);
-        }
-    }
-
-    /** Size of entire stream is unknown; we only know buffer size from FastQueue */
-    public int size() { throw new UnsupportedOperationException("streams are of unknown size"); }
-
-    public T LT(int k) {
-		if ( k==0 ) {
-			return null;
-		}
-		if ( k<0 ) return LB(-k);
-		//System.out.print("LT(p="+p+","+k+")=");
-        syncAhead(k);
-        if ( (p+k-1) > data.size() ) return eof;
-        return elementAt(k-1);
-	}
-
-    public int index() { return currentElementIndex; }
-
-	public int mark() {
-        markDepth++;
-        lastMarker = p; // track where we are in buffer not absolute token index
-        return lastMarker;
-	}
-
-	public void release(int marker) {
-		// no resources to release
-	}
-
-	public void rewind(int marker) {
-        markDepth--;
-        seek(marker); // assume marker is top
-        // release(marker); // waste of call; it does nothing in this class
-    }
-
-	public void rewind() {
-        seek(lastMarker); // rewind but do not release marker
-    }
-
-    /** Seek to a 0-indexed position within data buffer.  Can't handle
-     *  case where you seek beyond end of existing buffer.  Normally used
-     *  to seek backwards in the buffer. Does not force loading of nodes.
-     *  Doesn't see to absolute position in input stream since this stream
-     *  is unbuffered. Seeks only into our moving window of elements.
-     */
-    public void seek(int index) { p = index; }
-
-    protected T LB(int k) {
-        if ( k==1 ) return prevElement;
-        throw new NoSuchElementException("can't look backwards more than one token in this stream");
-    }
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTree.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTree.java
deleted file mode 100644
index 34dd050..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTree.java
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/** A generic tree implementation with no payload.  You must subclass to
- *  actually have any user data.  ANTLR v3 uses a list of children approach
- *  instead of the child-sibling approach in v2.  A flat tree (a list) is
- *  an empty node whose children represent the list.  An empty, but
- *  non-null node is called "nil".
- */
-public abstract class BaseTree implements Tree {
-	protected List children;
-
-	public BaseTree() {
-	}
-
-	/** Create a new node from an existing node does nothing for BaseTree
-	 *  as there are no fields other than the children list, which cannot
-	 *  be copied as the children are not considered part of this node.
-	 */
-	public BaseTree(Tree node) {
-	}
-
-	public Tree getChild(int i) {
-		if ( children==null || i>=children.size() ) {
-			return null;
-		}
-		return (Tree)children.get(i);
-	}
-
-	/** Get the children internal List; note that if you directly mess with
-	 *  the list, do so at your own risk.
-	 */
-	public List getChildren() {
-		return children;
-	}
-
-	public Tree getFirstChildWithType(int type) {
-		for (int i = 0; children!=null && i < children.size(); i++) {
-			Tree t = (Tree) children.get(i);
-			if ( t.getType()==type ) {
-				return t;
-			}
-		}
-		return null;
-	}
-
-	public int getChildCount() {
-		if ( children==null ) {
-			return 0;
-		}
-		return children.size();
-	}
-
-	/** Add t as child of this node.
-	 *
-	 *  Warning: if t has no children, but child does
-	 *  and child isNil then this routine moves children to t via
-	 *  t.children = child.children; i.e., without copying the array.
-	 */
-	public void addChild(Tree t) {
-		//System.out.println("add child "+t.toStringTree()+" "+this.toStringTree());
-		//System.out.println("existing children: "+children);
-		if ( t==null ) {
-			return; // do nothing upon addChild(null)
-		}
-		BaseTree childTree = (BaseTree)t;
-		if ( childTree.isNil() ) { // t is an empty node possibly with children
-			if ( this.children!=null && this.children == childTree.children ) {
-				throw new RuntimeException("attempt to add child list to itself");
-			}
-			// just add all of childTree's children to this
-			if ( childTree.children!=null ) {
-				if ( this.children!=null ) { // must copy, this has children already
-					int n = childTree.children.size();
-					for (int i = 0; i < n; i++) {
-						Tree c = (Tree)childTree.children.get(i);
-						this.children.add(c);
-						// handle double-link stuff for each child of nil root
-						c.setParent(this);
-						c.setChildIndex(children.size()-1);
-					}
-				}
-				else {
-					// no children for this but t has children; just set pointer
-					// call general freshener routine
-					this.children = childTree.children;
-					this.freshenParentAndChildIndexes();
-				}
-			}
-		}
-		else { // child is not nil (don't care about children)
-			if ( children==null ) {
-				children = createChildrenList(); // create children list on demand
-			}
-			children.add(t);
-			childTree.setParent(this);
-			childTree.setChildIndex(children.size()-1);
-		}
-		// System.out.println("now children are: "+children);
-	}
-
-	/** Add all elements of kids list as children of this node */
-	public void addChildren(List kids) {
-		for (int i = 0; i < kids.size(); i++) {
-			Tree t = (Tree) kids.get(i);
-			addChild(t);
-		}
-	}
-
-	public void setChild(int i, Tree t) {
-		if ( t==null ) {
-			return;
-		}
-		if ( t.isNil() ) {
-			throw new IllegalArgumentException("Can't set single child to a list");
-		}
-		if ( children==null ) {
-			children = createChildrenList();
-		}
-		children.set(i, t);
-		t.setParent(this);
-		t.setChildIndex(i);
-	}
-
-	/** Insert child t at child position i (0..n-1) by shifting children
-		i+1..n-1 to the right one position. Set parent / indexes properly
-	 	but does NOT collapse nil-rooted t's that come in here like addChild.
-	 */
-	public void insertChild(int i, Object t) {
-		if ( children==null ) return;
-		children.add(i, t);
-		// walk others to increment their child indexes
-		// set index, parent of this one too
-		this.freshenParentAndChildIndexes(i);
-	}
-
-	public Object deleteChild(int i) {
-		if ( children==null ) {
-			return null;
-		}
-		Tree killed = (Tree)children.remove(i);
-		// walk rest and decrement their child indexes
-		this.freshenParentAndChildIndexes(i);
-		return killed;
-	}
-
-	/** Delete children from start to stop and replace with t even if t is
-	 *  a list (nil-root tree).  num of children can increase or decrease.
-	 *  For huge child lists, inserting children can force walking rest of
-	 *  children to set their childindex; could be slow.
-	 */
-	public void replaceChildren(int startChildIndex, int stopChildIndex, Object t) {
-		/*
-		System.out.println("replaceChildren "+startChildIndex+", "+stopChildIndex+
-						   " with "+((BaseTree)t).toStringTree());
-		System.out.println("in="+toStringTree());
-		*/
-		if ( children==null ) {
-			throw new IllegalArgumentException("indexes invalid; no children in list");
-		}
-		int replacingHowMany = stopChildIndex - startChildIndex + 1;
-		int replacingWithHowMany;
-		BaseTree newTree = (BaseTree)t;
-		List newChildren = null;
-		// normalize to a list of children to add: newChildren
-		if ( newTree.isNil() ) {
-			newChildren = newTree.children;
-		}
-		else {
-			newChildren = new ArrayList(1);
-			newChildren.add(newTree);
-		}
-		replacingWithHowMany = newChildren.size();
-		int numNewChildren = newChildren.size();
-		int delta = replacingHowMany - replacingWithHowMany;
-		// if same number of nodes, do direct replace
-		if ( delta == 0 ) {
-			int j = 0; // index into new children
-			for (int i=startChildIndex; i<=stopChildIndex; i++) {
-				BaseTree child = (BaseTree)newChildren.get(j);
-				children.set(i, child);
-				child.setParent(this);
-				child.setChildIndex(i);
-                j++;
-            }
-		}
-		else if ( delta > 0 ) { // fewer new nodes than there were
-			// set children and then delete extra
-			for (int j=0; j<numNewChildren; j++) {
-				children.set(startChildIndex+j, newChildren.get(j));
-			}
-			int indexToDelete = startChildIndex+numNewChildren;
-			for (int c=indexToDelete; c<=stopChildIndex; c++) {
-				// delete same index, shifting everybody down each time
-				children.remove(indexToDelete);
-			}
-			freshenParentAndChildIndexes(startChildIndex);
-		}
-		else { // more new nodes than were there before
-			// fill in as many children as we can (replacingHowMany) w/o moving data
-			for (int j=0; j<replacingHowMany; j++) {
-				children.set(startChildIndex+j, newChildren.get(j));
-			}
-			int numToInsert = replacingWithHowMany-replacingHowMany;
-			for (int j=replacingHowMany; j<replacingWithHowMany; j++) {
-				children.add(startChildIndex+j, newChildren.get(j));
-			}
-			freshenParentAndChildIndexes(startChildIndex);
-		}
-		//System.out.println("out="+toStringTree());
-	}
-
-	/** Override in a subclass to change the impl of children list */
-	protected List createChildrenList() {
-		return new ArrayList();
-	}
-
-	public boolean isNil() {
-		return false;
-	}
-
-	/** Set the parent and child index values for all child of t */
-	public void freshenParentAndChildIndexes() {
-		freshenParentAndChildIndexes(0);
-	}
-
-	public void freshenParentAndChildIndexes(int offset) {
-		int n = getChildCount();
-		for (int c = offset; c < n; c++) {
-			Tree child = (Tree)getChild(c);
-			child.setChildIndex(c);
-			child.setParent(this);
-		}
-	}
-
-	public void freshenParentAndChildIndexesDeeply() {
-		freshenParentAndChildIndexesDeeply(0);
-	}
-
-	public void freshenParentAndChildIndexesDeeply(int offset) {
-		int n = getChildCount();
-		for (int c = offset; c < n; c++) {
-			BaseTree child = (BaseTree)getChild(c);
-			child.setChildIndex(c);
-			child.setParent(this);
-			child.freshenParentAndChildIndexesDeeply();
-		}
-	}
-
-	public void sanityCheckParentAndChildIndexes() {
-		sanityCheckParentAndChildIndexes(null, -1);
-	}
-
-	public void sanityCheckParentAndChildIndexes(Tree parent, int i) {
-		if ( parent!=this.getParent() ) {
-			throw new IllegalStateException("parents don't match; expected "+parent+" found "+this.getParent());
-		}
-		if ( i!=this.getChildIndex() ) {
-			throw new IllegalStateException("child indexes don't match; expected "+i+" found "+this.getChildIndex());
-		}
-		int n = this.getChildCount();
-		for (int c = 0; c < n; c++) {
-			CommonTree child = (CommonTree)this.getChild(c);
-			child.sanityCheckParentAndChildIndexes(this, c);
-		}
-	}
-
-	/** BaseTree doesn't track child indexes. */
-	public int getChildIndex() {
-		return 0;
-	}
-	public void setChildIndex(int index) {
-	}
-
-	/** BaseTree doesn't track parent pointers. */
-	public Tree getParent() {
-		return null;
-	}
-
-    public void setParent(Tree t) {
-	}
-
-    /** Walk upwards looking for ancestor with this token type. */
-    public boolean hasAncestor(int ttype) { return getAncestor(ttype)!=null; }
-
-    /** Walk upwards and get first ancestor with this token type. */
-    public Tree getAncestor(int ttype) {
-        Tree t = this;
-        t = t.getParent();
-        while ( t!=null ) {
-            if ( t.getType()==ttype ) return t;
-            t = t.getParent();
-        }
-        return null;
-    }
-
-    /** Return a list of all ancestors of this node.  The first node of
-     *  list is the root and the last is the parent of this node.
-     */
-    public List getAncestors() {
-        if ( getParent()==null ) return null;
-        List ancestors = new ArrayList();
-        Tree t = this;
-        t = t.getParent();
-        while ( t!=null ) {
-            ancestors.add(0, t); // insert at start
-            t = t.getParent();
-        }
-        return ancestors;
-    }
-
-    /** Print out a whole tree not just a node */
-    public String toStringTree() {
-		if ( children==null || children.size()==0 ) {
-			return this.toString();
-		}
-		StringBuffer buf = new StringBuffer();
-		if ( !isNil() ) {
-			buf.append("(");
-			buf.append(this.toString());
-			buf.append(' ');
-		}
-		for (int i = 0; children!=null && i < children.size(); i++) {
-			Tree t = (Tree)children.get(i);
-			if ( i>0 ) {
-				buf.append(' ');
-			}
-			buf.append(t.toStringTree());
-		}
-		if ( !isNil() ) {
-			buf.append(")");
-		}
-		return buf.toString();
-	}
-
-    public int getLine() {
-		return 0;
-	}
-
-	public int getCharPositionInLine() {
-		return 0;
-	}
-
-	/** Override to say how a node (not a tree) should look as text */
-	public abstract String toString();
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTreeAdaptor.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTreeAdaptor.java
deleted file mode 100644
index 33140b1..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTreeAdaptor.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.Token;
-import org.antlr.runtime.TokenStream;
-import org.antlr.runtime.RecognitionException;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/** A TreeAdaptor that works with any Tree implementation. */
-public abstract class BaseTreeAdaptor implements TreeAdaptor {
-	/** System.identityHashCode() is not always unique; we have to
-	 *  track ourselves.  That's ok, it's only for debugging, though it's
-	 *  expensive: we have to create a hashtable with all tree nodes in it.
-	 */
-	protected Map treeToUniqueIDMap;
-	protected int uniqueNodeID = 1;
-
-	public Object nil() {
-		return create(null);
-	}
-
-	/** create tree node that holds the start and stop tokens associated
-	 *  with an error.
-	 *
-	 *  If you specify your own kind of tree nodes, you will likely have to
-	 *  override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
-	 *  if no token payload but you might have to set token type for diff
-	 *  node type.
-     *
-     *  You don't have to subclass CommonErrorNode; you will likely need to
-     *  subclass your own tree node class to avoid class cast exception.
-	 */
-	public Object errorNode(TokenStream input, Token start, Token stop,
-							RecognitionException e)
-	{
-		CommonErrorNode t = new CommonErrorNode(input, start, stop, e);
-		//System.out.println("returning error node '"+t+"' @index="+input.index());
-		return t;
-	}
-
-	public boolean isNil(Object tree) {
-		return ((Tree)tree).isNil();
-	}
-
-	public Object dupTree(Object tree) {
-		return dupTree(tree, null);
-	}
-
-	/** This is generic in the sense that it will work with any kind of
-	 *  tree (not just Tree interface).  It invokes the adaptor routines
-	 *  not the tree node routines to do the construction.  
-	 */
-	public Object dupTree(Object t, Object parent) {
-		if ( t==null ) {
-			return null;
-		}
-		Object newTree = dupNode(t);
-		// ensure new subtree root has parent/child index set
-		setChildIndex(newTree, getChildIndex(t)); // same index in new tree
-		setParent(newTree, parent);
-		int n = getChildCount(t);
-		for (int i = 0; i < n; i++) {
-			Object child = getChild(t, i);
-			Object newSubTree = dupTree(child, t);
-			addChild(newTree, newSubTree);
-		}
-		return newTree;
-	}
-
-	/** Add a child to the tree t.  If child is a flat tree (a list), make all
-	 *  in list children of t.  Warning: if t has no children, but child does
-	 *  and child isNil then you can decide it is ok to move children to t via
-	 *  t.children = child.children; i.e., without copying the array.  Just
-	 *  make sure that this is consistent with have the user will build
-	 *  ASTs.
-	 */
-	public void addChild(Object t, Object child) {
-		if ( t!=null && child!=null ) {
-			((Tree)t).addChild((Tree)child);
-		}
-	}
-
-	/** If oldRoot is a nil root, just copy or move the children to newRoot.
-	 *  If not a nil root, make oldRoot a child of newRoot.
-	 *
-	 *    old=^(nil a b c), new=r yields ^(r a b c)
-	 *    old=^(a b c), new=r yields ^(r ^(a b c))
-	 *
-	 *  If newRoot is a nil-rooted single child tree, use the single
-	 *  child as the new root node.
-	 *
-	 *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
-	 *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
-	 *
-	 *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
-	 *
-	 *    old=null, new=r yields r
-	 *    old=null, new=^(nil r) yields ^(nil r)
-	 *
-	 *  Return newRoot.  Throw an exception if newRoot is not a
-	 *  simple node or nil root with a single child node--it must be a root
-	 *  node.  If newRoot is ^(nil x) return x as newRoot.
-	 *
-	 *  Be advised that it's ok for newRoot to point at oldRoot's
-	 *  children; i.e., you don't have to copy the list.  We are
-	 *  constructing these nodes so we should have this control for
-	 *  efficiency.
-	 */
-	public Object becomeRoot(Object newRoot, Object oldRoot) {
-        //System.out.println("becomeroot new "+newRoot.toString()+" old "+oldRoot);
-        Tree newRootTree = (Tree)newRoot;
-		Tree oldRootTree = (Tree)oldRoot;
-		if ( oldRoot==null ) {
-			return newRoot;
-		}
-		// handle ^(nil real-node)
-		if ( newRootTree.isNil() ) {
-            int nc = newRootTree.getChildCount();
-            if ( nc==1 ) newRootTree = (Tree)newRootTree.getChild(0);
-            else if ( nc >1 ) {
-				// TODO: make tree run time exceptions hierarchy
-				throw new RuntimeException("more than one node as root (TODO: make exception hierarchy)");
-			}
-        }
-		// add oldRoot to newRoot; addChild takes care of case where oldRoot
-		// is a flat list (i.e., nil-rooted tree).  All children of oldRoot
-		// are added to newRoot.
-		newRootTree.addChild(oldRootTree);
-		return newRootTree;
-	}
-
-	/** Transform ^(nil x) to x and nil to null */
-	public Object rulePostProcessing(Object root) {
-		//System.out.println("rulePostProcessing: "+((Tree)root).toStringTree());
-		Tree r = (Tree)root;
-		if ( r!=null && r.isNil() ) {
-			if ( r.getChildCount()==0 ) {
-				r = null;
-			}
-			else if ( r.getChildCount()==1 ) {
-				r = (Tree)r.getChild(0);
-				// whoever invokes rule will set parent and child index
-				r.setParent(null);
-				r.setChildIndex(-1);
-			}
-		}
-		return r;
-	}
-
-	public Object becomeRoot(Token newRoot, Object oldRoot) {
-		return becomeRoot(create(newRoot), oldRoot);
-	}
-
-	public Object create(int tokenType, Token fromToken) {
-		fromToken = createToken(fromToken);
-		//((ClassicToken)fromToken).setType(tokenType);
-		fromToken.setType(tokenType);
-		Tree t = (Tree)create(fromToken);
-		return t;
-	}
-
-	public Object create(int tokenType, Token fromToken, String text) {
-        if (fromToken == null) return create(tokenType, text);
-		fromToken = createToken(fromToken);
-		fromToken.setType(tokenType);
-		fromToken.setText(text);
-		Tree t = (Tree)create(fromToken);
-		return t;
-	}
-
-	public Object create(int tokenType, String text) {
-		Token fromToken = createToken(tokenType, text);
-		Tree t = (Tree)create(fromToken);
-		return t;
-	}
-
-	public int getType(Object t) {
-		return ((Tree)t).getType();
-	}
-
-	public void setType(Object t, int type) {
-		throw new NoSuchMethodError("don't know enough about Tree node");
-	}
-
-	public String getText(Object t) {
-		return ((Tree)t).getText();
-	}
-
-	public void setText(Object t, String text) {
-		throw new NoSuchMethodError("don't know enough about Tree node");
-	}
-
-	public Object getChild(Object t, int i) {
-		return ((Tree)t).getChild(i);
-	}
-
-	public void setChild(Object t, int i, Object child) {
-		((Tree)t).setChild(i, (Tree)child);
-	}
-
-	public Object deleteChild(Object t, int i) {
-		return ((Tree)t).deleteChild(i);
-	}
-
-	public int getChildCount(Object t) {
-		return ((Tree)t).getChildCount();
-	}
-
-	public int getUniqueID(Object node) {
-		if ( treeToUniqueIDMap==null ) {
-			 treeToUniqueIDMap = new HashMap();
-		}
-		Integer prevID = (Integer)treeToUniqueIDMap.get(node);
-		if ( prevID!=null ) {
-			return prevID.intValue();
-		}
-		int ID = uniqueNodeID;
-		treeToUniqueIDMap.put(node, new Integer(ID));
-		uniqueNodeID++;
-		return ID;
-		// GC makes these nonunique:
-		// return System.identityHashCode(node);
-	}
-
-	/** Tell me how to create a token for use with imaginary token nodes.
-	 *  For example, there is probably no input symbol associated with imaginary
-	 *  token DECL, but you need to create it as a payload or whatever for
-	 *  the DECL node as in ^(DECL type ID).
-	 *
-	 *  If you care what the token payload objects' type is, you should
-	 *  override this method and any other createToken variant.
-	 */
-	public abstract Token createToken(int tokenType, String text);
-
-	/** Tell me how to create a token for use with imaginary token nodes.
-	 *  For example, there is probably no input symbol associated with imaginary
-	 *  token DECL, but you need to create it as a payload or whatever for
-	 *  the DECL node as in ^(DECL type ID).
-	 *
-	 *  This is a variant of createToken where the new token is derived from
-	 *  an actual real input token.  Typically this is for converting '{'
-	 *  tokens to BLOCK etc...  You'll see
-	 *
-	 *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
-	 *
-	 *  If you care what the token payload objects' type is, you should
-	 *  override this method and any other createToken variant.
-	 */
-	public abstract Token createToken(Token fromToken);
-}
-
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/BufferedTreeNodeStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/BufferedTreeNodeStream.java
deleted file mode 100644
index d9a2a7e..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/BufferedTreeNodeStream.java
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.Token;
-import org.antlr.runtime.TokenStream;
-import org.antlr.runtime.misc.IntArray;
-import java.util.*;
-
-/** A buffered stream of tree nodes.  Nodes can be from a tree of ANY kind.
- *
- *  This node stream sucks all nodes out of the tree specified in
- *  the constructor during construction and makes pointers into
- *  the tree using an array of Object pointers. The stream necessarily
- *  includes pointers to DOWN and UP and EOF nodes.
- *
- *  This stream knows how to mark/release for backtracking.
- *
- *  This stream is most suitable for tree interpreters that need to
- *  jump around a lot or for tree parsers requiring speed (at cost of memory).
- *  There is some duplicated functionality here with UnBufferedTreeNodeStream
- *  but just in bookkeeping, not tree walking etc...
- *
- *  TARGET DEVELOPERS:
- *
- *  This is the old CommonTreeNodeStream that buffered up entire node stream.
- *  No need to implement really as new CommonTreeNodeStream is much better
- *  and covers what we need.
- *
- *  @see CommonTreeNodeStream
- */
-public class BufferedTreeNodeStream implements TreeNodeStream {
-	public static final int DEFAULT_INITIAL_BUFFER_SIZE = 100;
-	public static final int INITIAL_CALL_STACK_SIZE = 10;
-
-    protected class StreamIterator implements Iterator {
-		int i = 0;
-		public boolean hasNext() {
-			return i<nodes.size();
-		}
-
-		public Object next() {
-			int current = i;
-			i++;
-			if ( current < nodes.size() ) {
-				return nodes.get(current);
-			}
-			return eof;
-		}
-
-		public void remove() {
-			throw new RuntimeException("cannot remove nodes from stream");
-		}
-	}
-
-	// all these navigation nodes are shared and hence they
-	// cannot contain any line/column info
-
-	protected Object down;
-	protected Object up;
-	protected Object eof;
-
-	/** The complete mapping from stream index to tree node.
-	 *  This buffer includes pointers to DOWN, UP, and EOF nodes.
-	 *  It is built upon ctor invocation.  The elements are type
-	 *  Object as we don't what the trees look like.
-	 *
-	 *  Load upon first need of the buffer so we can set token types
-	 *  of interest for reverseIndexing.  Slows us down a wee bit to
-	 *  do all of the if p==-1 testing everywhere though.
-	 */
-	protected List nodes;
-
-	/** Pull nodes from which tree? */
-	protected Object root;
-
-	/** IF this tree (root) was created from a token stream, track it. */
-	protected TokenStream tokens;
-
-	/** What tree adaptor was used to build these trees */
-	TreeAdaptor adaptor;
-
-	/** Reuse same DOWN, UP navigation nodes unless this is true */
-	protected boolean uniqueNavigationNodes = false;
-
-	/** The index into the nodes list of the current node (next node
-	 *  to consume).  If -1, nodes array not filled yet.
-	 */
-	protected int p = -1;
-
-	/** Track the last mark() call result value for use in rewind(). */
-	protected int lastMarker;
-
-	/** Stack of indexes used for push/pop calls */
-	protected IntArray calls;
-
-	public BufferedTreeNodeStream(Object tree) {
-		this(new CommonTreeAdaptor(), tree);
-	}
-
-	public BufferedTreeNodeStream(TreeAdaptor adaptor, Object tree) {
-		this(adaptor, tree, DEFAULT_INITIAL_BUFFER_SIZE);
-	}
-
-	public BufferedTreeNodeStream(TreeAdaptor adaptor, Object tree, int initialBufferSize) {
-		this.root = tree;
-		this.adaptor = adaptor;
-		nodes = new ArrayList(initialBufferSize);
-		down = adaptor.create(Token.DOWN, "DOWN");
-		up = adaptor.create(Token.UP, "UP");
-		eof = adaptor.create(Token.EOF, "EOF");
-	}
-
-	/** Walk tree with depth-first-search and fill nodes buffer.
-	 *  Don't do DOWN, UP nodes if its a list (t is isNil).
-	 */
-	protected void fillBuffer() {
-		fillBuffer(root);
-		//System.out.println("revIndex="+tokenTypeToStreamIndexesMap);
-		p = 0; // buffer of nodes intialized now
-	}
-
-	public void fillBuffer(Object t) {
-		boolean nil = adaptor.isNil(t);
-		if ( !nil ) {
-			nodes.add(t); // add this node
-		}
-		// add DOWN node if t has children
-		int n = adaptor.getChildCount(t);
-		if ( !nil && n>0 ) {
-			addNavigationNode(Token.DOWN);
-		}
-		// and now add all its children
-		for (int c=0; c<n; c++) {
-			Object child = adaptor.getChild(t,c);
-			fillBuffer(child);
-		}
-		// add UP node if t has children
-		if ( !nil && n>0 ) {
-			addNavigationNode(Token.UP);
-		}
-	}
-
-	/** What is the stream index for node? 0..n-1
-	 *  Return -1 if node not found.
-	 */
-	protected int getNodeIndex(Object node) {
-		if ( p==-1 ) {
-			fillBuffer();
-		}
-		for (int i = 0; i < nodes.size(); i++) {
-			Object t = (Object) nodes.get(i);
-			if ( t==node ) {
-				return i;
-			}
-		}
-		return -1;
-	}
-
-	/** As we flatten the tree, we use UP, DOWN nodes to represent
-	 *  the tree structure.  When debugging we need unique nodes
-	 *  so instantiate new ones when uniqueNavigationNodes is true.
-	 */
-	protected void addNavigationNode(final int ttype) {
-		Object navNode = null;
-		if ( ttype==Token.DOWN ) {
-			if ( hasUniqueNavigationNodes() ) {
-				navNode = adaptor.create(Token.DOWN, "DOWN");
-			}
-			else {
-				navNode = down;
-			}
-		}
-		else {
-			if ( hasUniqueNavigationNodes() ) {
-				navNode = adaptor.create(Token.UP, "UP");
-			}
-			else {
-				navNode = up;
-			}
-		}
-		nodes.add(navNode);
-	}
-
-	public Object get(int i) {
-		if ( p==-1 ) {
-			fillBuffer();
-		}
-		return nodes.get(i);
-	}
-
-	public Object LT(int k) {
-		if ( p==-1 ) {
-			fillBuffer();
-		}
-		if ( k==0 ) {
-			return null;
-		}
-		if ( k<0 ) {
-			return LB(-k);
-		}
-		//System.out.print("LT(p="+p+","+k+")=");
-		if ( (p+k-1) >= nodes.size() ) {
-			return eof;
-		}
-		return nodes.get(p+k-1);
-	}
-
-	public Object getCurrentSymbol() { return LT(1); }
-
-/*
-	public Object getLastTreeNode() {
-		int i = index();
-		if ( i>=size() ) {
-			i--; // if at EOF, have to start one back
-		}
-		System.out.println("start last node: "+i+" size=="+nodes.size());
-		while ( i>=0 &&
-			(adaptor.getType(get(i))==Token.EOF ||
-			 adaptor.getType(get(i))==Token.UP ||
-			 adaptor.getType(get(i))==Token.DOWN) )
-		{
-			i--;
-		}
-		System.out.println("stop at node: "+i+" "+nodes.get(i));
-		return nodes.get(i);
-	}
-*/
-	
-	/** Look backwards k nodes */
-	protected Object LB(int k) {
-		if ( k==0 ) {
-			return null;
-		}
-		if ( (p-k)<0 ) {
-			return null;
-		}
-		return nodes.get(p-k);
-	}
-
-	public Object getTreeSource() {
-		return root;
-	}
-
-	public String getSourceName() {
-		return getTokenStream().getSourceName();
-	}
-
-	public TokenStream getTokenStream() {
-		return tokens;
-	}
-
-	public void setTokenStream(TokenStream tokens) {
-		this.tokens = tokens;
-	}
-
-	public TreeAdaptor getTreeAdaptor() {
-		return adaptor;
-	}
-
-	public void setTreeAdaptor(TreeAdaptor adaptor) {
-		this.adaptor = adaptor;
-	}
-
-	public boolean hasUniqueNavigationNodes() {
-		return uniqueNavigationNodes;
-	}
-
-	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes) {
-		this.uniqueNavigationNodes = uniqueNavigationNodes;
-	}
-
-	public void consume() {
-		if ( p==-1 ) {
-			fillBuffer();
-		}
-		p++;
-	}
-
-	public int LA(int i) {
-		return adaptor.getType(LT(i));
-	}
-
-	public int mark() {
-		if ( p==-1 ) {
-			fillBuffer();
-		}
-		lastMarker = index();
-		return lastMarker;
-	}
-
-	public void release(int marker) {
-		// no resources to release
-	}
-
-	public int index() {
-		return p;
-	}
-
-	public void rewind(int marker) {
-		seek(marker);
-	}
-
-	public void rewind() {
-		seek(lastMarker);
-	}
-
-	public void seek(int index) {
-		if ( p==-1 ) {
-			fillBuffer();
-		}
-		p = index;
-	}
-
-	/** Make stream jump to a new location, saving old location.
-	 *  Switch back with pop().
-	 */
-	public void push(int index) {
-		if ( calls==null ) {
-			calls = new IntArray();
-		}
-		calls.push(p); // save current index
-		seek(index);
-	}
-
-	/** Seek back to previous index saved during last push() call.
-	 *  Return top of stack (return index).
-	 */
-	public int pop() {
-		int ret = calls.pop();
-		seek(ret);
-		return ret;
-	}
-
-	public void reset() {
-		p = 0;
-		lastMarker = 0;
-        if (calls != null) {
-            calls.clear();
-        }
-    }
-
-	public int size() {
-		if ( p==-1 ) {
-			fillBuffer();
-		}
-		return nodes.size();
-	}
-
-	public Iterator iterator() {
-		if ( p==-1 ) {
-			fillBuffer();
-		}
-		return new StreamIterator();
-	}
-
-	// TREE REWRITE INTERFACE
-
-	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
-		if ( parent!=null ) {
-			adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t);
-		}
-	}
-
-	/** Used for testing, just return the token type stream */
-	public String toTokenTypeString() {
-		if ( p==-1 ) {
-			fillBuffer();
-		}
-		StringBuffer buf = new StringBuffer();
-		for (int i = 0; i < nodes.size(); i++) {
-			Object t = (Object) nodes.get(i);
-			buf.append(" ");
-			buf.append(adaptor.getType(t));
-		}
-		return buf.toString();
-	}
-
-	/** Debugging */
-	public String toTokenString(int start, int stop) {
-		if ( p==-1 ) {
-			fillBuffer();
-		}
-		StringBuffer buf = new StringBuffer();
-		for (int i = start; i < nodes.size() && i <= stop; i++) {
-			Object t = (Object) nodes.get(i);
-			buf.append(" ");
-			buf.append(adaptor.getToken(t));
-		}
-		return buf.toString();
-	}
-
-	public String toString(Object start, Object stop) {
-		System.out.println("toString");
-		if ( start==null || stop==null ) {
-			return null;
-		}
-		if ( p==-1 ) {
-			fillBuffer();
-		}
-		//System.out.println("stop: "+stop);
-		if ( start instanceof CommonTree )
-			System.out.print("toString: "+((CommonTree)start).getToken()+", ");
-		else
-			System.out.println(start);
-		if ( stop instanceof CommonTree )
-			System.out.println(((CommonTree)stop).getToken());
-		else
-			System.out.println(stop);
-		// if we have the token stream, use that to dump text in order
-		if ( tokens!=null ) {
-			int beginTokenIndex = adaptor.getTokenStartIndex(start);
-			int endTokenIndex = adaptor.getTokenStopIndex(stop);
-			// if it's a tree, use start/stop index from start node
-			// else use token range from start/stop nodes
-			if ( adaptor.getType(stop)==Token.UP ) {
-				endTokenIndex = adaptor.getTokenStopIndex(start);
-			}
-			else if ( adaptor.getType(stop)==Token.EOF ) {
-				endTokenIndex = size()-2; // don't use EOF
-			}
-			return tokens.toString(beginTokenIndex, endTokenIndex);
-		}
-		// walk nodes looking for start
-		Object t = null;
-		int i = 0;
-		for (; i < nodes.size(); i++) {
-			t = nodes.get(i);
-			if ( t==start ) {
-				break;
-			}
-		}
-		// now walk until we see stop, filling string buffer with text
-		 StringBuffer buf = new StringBuffer();
-		t = nodes.get(i);
-		while ( t!=stop ) {
-			String text = adaptor.getText(t);
-			if ( text==null ) {
-				text = " "+String.valueOf(adaptor.getType(t));
-			}
-			buf.append(text);
-			i++;
-			t = nodes.get(i);
-		}
-		// include stop node too
-		String text = adaptor.getText(stop);
-		if ( text==null ) {
-			text = " "+String.valueOf(adaptor.getType(stop));
-		}
-		buf.append(text);
-		return buf.toString();
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonErrorNode.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonErrorNode.java
deleted file mode 100644
index 26b9933..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonErrorNode.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.*;
-
-/** A node representing erroneous token range in token stream */
-public class CommonErrorNode extends CommonTree {
-	public IntStream input;
-	public Token start;
-	public Token stop;
-	public RecognitionException trappedException;
-
-	public CommonErrorNode(TokenStream input, Token start, Token stop,
-						   RecognitionException e)
-	{
-		//System.out.println("start: "+start+", stop: "+stop);
-		if ( stop==null ||
-			 (stop.getTokenIndex() < start.getTokenIndex() &&
-			  stop.getType()!=Token.EOF) )
-		{
-			// sometimes resync does not consume a token (when LT(1) is
-			// in follow set.  So, stop will be 1 to left to start. adjust.
-			// Also handle case where start is the first token and no token
-			// is consumed during recovery; LT(-1) will return null.
-			stop = start;
-		}
-		this.input = input;
-		this.start = start;
-		this.stop = stop;
-		this.trappedException = e;
-	}
-
-	public boolean isNil() {
-		return false;
-	}
-
-	public int getType() {
-		return Token.INVALID_TOKEN_TYPE;
-	}
-
-	public String getText() {
-		String badText = null;
-		if ( start instanceof Token ) {
-			int i = ((Token)start).getTokenIndex();
-			int j = ((Token)stop).getTokenIndex();
-			if ( ((Token)stop).getType() == Token.EOF ) {
-				j = ((TokenStream)input).size();
-			}
-			badText = ((TokenStream)input).toString(i, j);
-		}
-		else if ( start instanceof Tree ) {
-			badText = ((TreeNodeStream)input).toString(start, stop);
-		}
-		else {
-			// people should subclass if they alter the tree type so this
-			// next one is for sure correct.
-			badText = "<unknown>";
-		}
-		return badText;
-	}
-
-	public String toString() {
-		if ( trappedException instanceof MissingTokenException ) {
-			return "<missing type: "+
-				   ((MissingTokenException)trappedException).getMissingType()+
-				   ">";
-		}
-		else if ( trappedException instanceof UnwantedTokenException ) {
-			return "<extraneous: "+
-				   ((UnwantedTokenException)trappedException).getUnexpectedToken()+
-				   ", resync="+getText()+">";
-		}
-		else if ( trappedException instanceof MismatchedTokenException ) {
-			return "<mismatched token: "+trappedException.token+", resync="+getText()+">";
-		}
-		else if ( trappedException instanceof NoViableAltException ) {
-			return "<unexpected: "+trappedException.token+
-				   ", resync="+getText()+">";
-		}
-		return "<error: "+getText()+">";
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTree.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTree.java
deleted file mode 100644
index 91c59de..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTree.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.Token;
-
-/** A tree node that is wrapper for a Token object.  After 3.0 release
- *  while building tree rewrite stuff, it became clear that computing
- *  parent and child index is very difficult and cumbersome.  Better to
- *  spend the space in every tree node.  If you don't want these extra
- *  fields, it's easy to cut them out in your own BaseTree subclass.
- */
-public class CommonTree extends BaseTree {
-	/** A single token is the payload */
-	public Token token;
-
-	/** What token indexes bracket all tokens associated with this node
-	 *  and below?
-	 */
-	protected int startIndex=-1, stopIndex=-1;
-
-	/** Who is the parent node of this node; if null, implies node is root */
-	public CommonTree parent;
-
-	/** What index is this node in the child list? Range: 0..n-1 */
-	public int childIndex = -1;
-
-	public CommonTree() { }
-	
-	public CommonTree(CommonTree node) {
-		super(node);
-		this.token = node.token;
-		this.startIndex = node.startIndex;
-		this.stopIndex = node.stopIndex;
-	}
-
-	public CommonTree(Token t) {
-		this.token = t;
-	}
-
-	public Token getToken() {
-		return token;
-	}
-
-	public Tree dupNode() {
-		return new CommonTree(this);
-	}
-
-	public boolean isNil() {
-		return token==null;
-	}
-
-	public int getType() {
-		if ( token==null ) {
-			return Token.INVALID_TOKEN_TYPE;
-		}
-		return token.getType();
-	}
-
-	public String getText() {
-		if ( token==null ) {
-			return null;
-		}
-		return token.getText();
-	}
-
-	public int getLine() {
-		if ( token==null || token.getLine()==0 ) {
-			if ( getChildCount()>0 ) {
-				return getChild(0).getLine();
-			}
-			return 0;
-		}
-		return token.getLine();
-	}
-
-	public int getCharPositionInLine() {
-		if ( token==null || token.getCharPositionInLine()==-1 ) {
-			if ( getChildCount()>0 ) {
-				return getChild(0).getCharPositionInLine();
-			}
-			return 0;
-		}
-		return token.getCharPositionInLine();
-	}
-
-	public int getTokenStartIndex() {
-		if ( startIndex==-1 && token!=null ) {
-			return token.getTokenIndex();
-		}
-		return startIndex;
-	}
-
-	public void setTokenStartIndex(int index) {
-		startIndex = index;
-	}
-
-	public int getTokenStopIndex() {
-		if ( stopIndex==-1 && token!=null ) {
-			return token.getTokenIndex();
-		}
-		return stopIndex;
-	}
-
-	public void setTokenStopIndex(int index) {
-		stopIndex = index;
-	}
-
-    /** For every node in this subtree, make sure it's start/stop token's
-     *  are set.  Walk depth first, visit bottom up.  Only updates nodes
-     *  with at least one token index < 0.
-     */
-    public void setUnknownTokenBoundaries() {
-        if ( children==null ) {
-            if ( startIndex<0 || stopIndex<0 ) {
-                startIndex = stopIndex = token.getTokenIndex();
-            }
-            return;
-        }
-        for (int i=0; i<children.size(); i++) {
-            ((CommonTree)children.get(i)).setUnknownTokenBoundaries();
-        }
-        if ( startIndex>=0 && stopIndex>=0 ) return; // already set
-        if ( children.size() > 0 ) {
-            CommonTree firstChild = (CommonTree)children.get(0);
-            CommonTree lastChild = (CommonTree)children.get(children.size()-1);
-            startIndex = firstChild.getTokenStartIndex();
-            stopIndex = lastChild.getTokenStopIndex();
-        }
-    }
-
-	public int getChildIndex() {
-		return childIndex;
-	}
-
-	public Tree getParent() {
-		return parent;
-	}
-
-	public void setParent(Tree t) {
-		this.parent = (CommonTree)t;
-	}
-
-	public void setChildIndex(int index) {
-		this.childIndex = index;
-	}
-
-	public String toString() {
-		if ( isNil() ) {
-			return "nil";
-		}
-		if ( getType()==Token.INVALID_TOKEN_TYPE ) {
-			return "<errornode>";
-		}
-		if ( token==null ) {
-			return null;
-		}
-		return token.getText();
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeAdaptor.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeAdaptor.java
deleted file mode 100644
index ebf560b..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeAdaptor.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.CommonToken;
-import org.antlr.runtime.Token;
-
-/** A TreeAdaptor that works with any Tree implementation.  It provides
- *  really just factory methods; all the work is done by BaseTreeAdaptor.
- *  If you would like to have different tokens created than ClassicToken
- *  objects, you need to override this and then set the parser tree adaptor to
- *  use your subclass.
- *
- *  To get your parser to build nodes of a different type, override
- *  create(Token), errorNode(), and to be safe, YourTreeClass.dupNode().
- *  dupNode is called to duplicate nodes during rewrite operations.
- */
-public class CommonTreeAdaptor extends BaseTreeAdaptor {
-	/** Duplicate a node.  This is part of the factory;
-	 *	override if you want another kind of node to be built.
-	 *
-	 *  I could use reflection to prevent having to override this
-	 *  but reflection is slow.
-	 */
-	public Object dupNode(Object t) {
-		if ( t==null ) return null;
-		return ((Tree)t).dupNode();
-	}
-
-	public Object create(Token payload) {
-		return new CommonTree(payload);
-	}
-
-	/** Tell me how to create a token for use with imaginary token nodes.
-	 *  For example, there is probably no input symbol associated with imaginary
-	 *  token DECL, but you need to create it as a payload or whatever for
-	 *  the DECL node as in ^(DECL type ID).
-	 *
-	 *  If you care what the token payload objects' type is, you should
-	 *  override this method and any other createToken variant.
-	 */
-	public Token createToken(int tokenType, String text) {
-		return new CommonToken(tokenType, text);
-	}
-
-	/** Tell me how to create a token for use with imaginary token nodes.
-	 *  For example, there is probably no input symbol associated with imaginary
-	 *  token DECL, but you need to create it as a payload or whatever for
-	 *  the DECL node as in ^(DECL type ID).
-	 *
-	 *  This is a variant of createToken where the new token is derived from
-	 *  an actual real input token.  Typically this is for converting '{'
-	 *  tokens to BLOCK etc...  You'll see
-	 *
-	 *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
-	 *
-	 *  If you care what the token payload objects' type is, you should
-	 *  override this method and any other createToken variant.
-	 */
-	public Token createToken(Token fromToken) {
-		return new CommonToken(fromToken);
-	}
-
-	/** Track start/stop token for subtree root created for a rule.
-	 *  Only works with Tree nodes.  For rules that match nothing,
-	 *  seems like this will yield start=i and stop=i-1 in a nil node.
-	 *  Might be useful info so I'll not force to be i..i.
-	 */
-	public void setTokenBoundaries(Object t, Token startToken, Token stopToken) {
-		if ( t==null ) return;
-		int start = 0;
-		int stop = 0;
-		if ( startToken!=null ) start = startToken.getTokenIndex();
-		if ( stopToken!=null ) stop = stopToken.getTokenIndex();
-		((Tree)t).setTokenStartIndex(start);
-		((Tree)t).setTokenStopIndex(stop);
-	}
-
-	public int getTokenStartIndex(Object t) {
-		if ( t==null ) return -1;
-		return ((Tree)t).getTokenStartIndex();
-	}
-
-	public int getTokenStopIndex(Object t) {
-		if ( t==null ) return -1;
-		return ((Tree)t).getTokenStopIndex();
-	}
-
-	public String getText(Object t) {
-		if ( t==null ) return null;
-		return ((Tree)t).getText();
-	}
-
-    public int getType(Object t) {
-		if ( t==null ) return Token.INVALID_TOKEN_TYPE;
-		return ((Tree)t).getType();
-	}
-
-	/** What is the Token associated with this node?  If
-	 *  you are not using CommonTree, then you must
-	 *  override this in your own adaptor.
-	 */
-	public Token getToken(Object t) {
-		if ( t instanceof CommonTree ) {
-			return ((CommonTree)t).getToken();
-		}
-		return null; // no idea what to do
-	}
-
-	public Object getChild(Object t, int i) {
-		if ( t==null ) return null;
-        return ((Tree)t).getChild(i);
-    }
-
-    public int getChildCount(Object t) {
-		if ( t==null ) return 0;
-        return ((Tree)t).getChildCount();
-    }
-
-	public Object getParent(Object t) {
-		if ( t==null ) return null;
-        return ((Tree)t).getParent();
-	}
-
-	public void setParent(Object t, Object parent) {
-        if ( t!=null ) ((Tree)t).setParent((Tree)parent);
-	}
-
-	public int getChildIndex(Object t) {
-        if ( t==null ) return 0;
-		return ((Tree)t).getChildIndex();
-	}
-
-	public void setChildIndex(Object t, int index) {
-        if ( t!=null ) ((Tree)t).setChildIndex(index);
-	}
-
-	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
-		if ( parent!=null ) {
-			((Tree)parent).replaceChildren(startChildIndex, stopChildIndex, t);
-		}
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeNodeStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeNodeStream.java
deleted file mode 100644
index dcdbdd5..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeNodeStream.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.Token;
-import org.antlr.runtime.TokenStream;
-import org.antlr.runtime.misc.LookaheadStream;
-import org.antlr.runtime.misc.IntArray;
-
-import java.util.*;
-
-public class CommonTreeNodeStream extends LookaheadStream<Object> implements TreeNodeStream {
-	public static final int DEFAULT_INITIAL_BUFFER_SIZE = 100;
-	public static final int INITIAL_CALL_STACK_SIZE = 10;
-
-	/** Pull nodes from which tree? */
-	protected Object root;
-
-	/** If this tree (root) was created from a token stream, track it. */
-	protected TokenStream tokens;
-
-	/** What tree adaptor was used to build these trees */
-	TreeAdaptor adaptor;
-
-    /** The tree iterator we using */
-    protected TreeIterator it;
-
-    /** Stack of indexes used for push/pop calls */
-    protected IntArray calls;
-
-    /** Tree (nil A B C) trees like flat A B C streams */
-    protected boolean hasNilRoot = false;
-
-    /** Tracks tree depth.  Level=0 means we're at root node level. */
-    protected int level = 0;
-
-	public CommonTreeNodeStream(Object tree) {
-		this(new CommonTreeAdaptor(), tree);
-	}
-
-	public CommonTreeNodeStream(TreeAdaptor adaptor, Object tree) {
-		this.root = tree;
-		this.adaptor = adaptor;
-        it = new TreeIterator(adaptor,root);
-	}
-
-    public void reset() {
-        super.reset();
-        it.reset();
-        hasNilRoot = false;
-        level = 0;
-        if ( calls != null ) calls.clear();
-    }
-
-    /** Pull elements from tree iterator.  Track tree level 0..max_level.
-     *  If nil rooted tree, don't give initial nil and DOWN nor final UP.
-     */
-    public Object nextElement() {
-        Object t = it.next();
-        //System.out.println("pulled "+adaptor.getType(t));
-        if ( t == it.up ) {
-            level--;
-            if ( level==0 && hasNilRoot ) return it.next(); // don't give last UP; get EOF
-        }
-        else if ( t == it.down ) level++;
-        if ( level==0 && adaptor.isNil(t) ) { // if nil root, scarf nil, DOWN
-            hasNilRoot = true;
-            t = it.next(); // t is now DOWN, so get first real node next
-            level++;
-            t = it.next();
-        }
-        return t;
-    }
-
-    public boolean isEOF(Object o) { return adaptor.getType(o) == Token.EOF; }
-
-    public void setUniqueNavigationNodes(boolean uniqueNavigationNodes) { }
-
-	public Object getTreeSource() {	return root; }
-
-	public String getSourceName() { return getTokenStream().getSourceName(); }
-
-	public TokenStream getTokenStream() { return tokens; }
-
-	public void setTokenStream(TokenStream tokens) { this.tokens = tokens; }
-
-	public TreeAdaptor getTreeAdaptor() { return adaptor; }
-
-	public void setTreeAdaptor(TreeAdaptor adaptor) { this.adaptor = adaptor; }
-
-    public Object get(int i) {
-        throw new UnsupportedOperationException("Absolute node indexes are meaningless in an unbuffered stream");
-    }
-
-    public int LA(int i) { return adaptor.getType(LT(i)); }
-
-    /** Make stream jump to a new location, saving old location.
-     *  Switch back with pop().
-     */
-    public void push(int index) {
-        if ( calls==null ) {
-            calls = new IntArray();
-        }
-        calls.push(p); // save current index
-        seek(index);
-    }
-
-    /** Seek back to previous index saved during last push() call.
-     *  Return top of stack (return index).
-     */
-    public int pop() {
-        int ret = calls.pop();
-        seek(ret);
-        return ret;
-    }
-
-	// TREE REWRITE INTERFACE
-
-	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
-		if ( parent!=null ) {
-			adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t);
-		}
-	}
-
-	public String toString(Object start, Object stop) {
-        // we'll have to walk from start to stop in tree; we're not keeping
-        // a complete node stream buffer
-        return "n/a";
-	}
-
-    /** For debugging; destructive: moves tree iterator to end. */
-    public String toTokenTypeString() {
-        reset();
-		StringBuffer buf = new StringBuffer();
-        Object o = LT(1);
-        int type = adaptor.getType(o);
-        while ( type!=Token.EOF ) {
-            buf.append(" ");
-            buf.append(type);
-            consume();
-            o = LT(1);
-            type = adaptor.getType(o);
-		}
-		return buf.toString();
-    }
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/DOTTreeGenerator.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/DOTTreeGenerator.java
deleted file mode 100644
index 6c519d1..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/DOTTreeGenerator.java
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.stringtemplate.StringTemplate;
-
-import java.util.HashMap;
-
-/** A utility class to generate DOT diagrams (graphviz) from
- *  arbitrary trees.  You can pass in your own templates and
- *  can pass in any kind of tree or use Tree interface method.
- *  I wanted this separator so that you don't have to include
- *  ST just to use the org.antlr.runtime.tree.* package.
- *  This is a set of non-static methods so you can subclass
- *  to override.  For example, here is an invocation:
- *
- *      CharStream input = new ANTLRInputStream(System.in);
- *      TLexer lex = new TLexer(input);
- *      CommonTokenStream tokens = new CommonTokenStream(lex);
- *      TParser parser = new TParser(tokens);
- *      TParser.e_return r = parser.e();
- *      Tree t = (Tree)r.tree;
- *      System.out.println(t.toStringTree());
- *      DOTTreeGenerator gen = new DOTTreeGenerator();
- *      StringTemplate st = gen.toDOT(t);
- *      System.out.println(st);
- */
-public class DOTTreeGenerator {
-
-	public static StringTemplate _treeST =
-		new StringTemplate(
-			"digraph {\n\n" +
-			"\tordering=out;\n" +
-			"\tranksep=.4;\n" +
-			"\tbgcolor=\"lightgrey\"; node [shape=box, fixedsize=false, fontsize=12, fontname=\"Helvetica-bold\", fontcolor=\"blue\"\n" +
-			"\t\twidth=.25, height=.25, color=\"black\", fillcolor=\"white\", style=\"filled, solid, bold\"];\n" +
-			"\tedge [arrowsize=.5, color=\"black\", style=\"bold\"]\n\n" +
-			"  $nodes$\n" +
-			"  $edges$\n" +
-			"}\n");
-
-	public static StringTemplate _nodeST =
-			new StringTemplate("$name$ [label=\"$text$\"];\n");
-
-	public static StringTemplate _edgeST =
-			new StringTemplate("$parent$ -> $child$ // \"$parentText$\" -> \"$childText$\"\n");
-
-	/** Track node to number mapping so we can get proper node name back */
-	HashMap nodeToNumberMap = new HashMap();
-
-	/** Track node number so we can get unique node names */
-	int nodeNumber = 0;
-
-	public StringTemplate toDOT(Object tree,
-								TreeAdaptor adaptor,
-								StringTemplate _treeST,
-								StringTemplate _edgeST)
-	{
-		StringTemplate treeST = _treeST.getInstanceOf();
-		nodeNumber = 0;
-		toDOTDefineNodes(tree, adaptor, treeST);
-		nodeNumber = 0;
-		toDOTDefineEdges(tree, adaptor, treeST);
-		/*
-		if ( adaptor.getChildCount(tree)==0 ) {
-            // single node, don't do edge.
-            treeST.add("nodes", adaptor.getText(tree));
-        }
-        */
-		return treeST;
-	}
-
-	public StringTemplate toDOT(Object tree,
-								TreeAdaptor adaptor)
-	{
-		return toDOT(tree, adaptor, _treeST, _edgeST);
-	}
-
-	/** Generate DOT (graphviz) for a whole tree not just a node.
-	 *  For example, 3+4*5 should generate:
-	 *
-	 * digraph {
-	 *   node [shape=plaintext, fixedsize=true, fontsize=11, fontname="Courier",
-	 *         width=.4, height=.2];
-	 *   edge [arrowsize=.7]
-	 *   "+"->3
-	 *   "+"->"*"
-	 *   "*"->4
-	 *   "*"->5
-	 * }
-	 *
-	 * Return the ST not a string in case people want to alter.
-	 *
-	 * Takes a Tree interface object.
-	 */
-	public StringTemplate toDOT(Tree tree) {
-		return toDOT(tree, new CommonTreeAdaptor());
-	}
-
-	protected void toDOTDefineNodes(Object tree,
-									TreeAdaptor adaptor,
-									StringTemplate treeST)
-	{
-		if ( tree==null ) {
-			return;
-		}
-		int n = adaptor.getChildCount(tree);
-		if ( n==0 ) {
-			// must have already dumped as child from previous
-			// invocation; do nothing
-			return;
-		}
-
-		// define parent node
-		StringTemplate parentNodeST = getNodeST(adaptor, tree);
-		treeST.setAttribute("nodes", parentNodeST);
-
-		// for each child, do a "<unique-name> [label=text]" node def
-		for (int i = 0; i < n; i++) {
-			Object child = adaptor.getChild(tree, i);
-			StringTemplate nodeST = getNodeST(adaptor, child);
-			treeST.setAttribute("nodes", nodeST);
-			toDOTDefineNodes(child, adaptor, treeST);
-		}
-	}
-
-	protected void toDOTDefineEdges(Object tree,
-									TreeAdaptor adaptor,
-									StringTemplate treeST)
-	{
-		if ( tree==null ) {
-			return;
-		}
-		int n = adaptor.getChildCount(tree);
-		if ( n==0 ) {
-			// must have already dumped as child from previous
-			// invocation; do nothing
-			return;
-		}
-
-		String parentName = "n"+getNodeNumber(tree);
-
-		// for each child, do a parent -> child edge using unique node names
-		String parentText = adaptor.getText(tree);
-		for (int i = 0; i < n; i++) {
-			Object child = adaptor.getChild(tree, i);
-			String childText = adaptor.getText(child);
-			String childName = "n"+getNodeNumber(child);
-			StringTemplate edgeST = _edgeST.getInstanceOf();
-			edgeST.setAttribute("parent", parentName);
-			edgeST.setAttribute("child", childName);
-			edgeST.setAttribute("parentText", fixString(parentText));
-			edgeST.setAttribute("childText", fixString(childText));
-			treeST.setAttribute("edges", edgeST);
-			toDOTDefineEdges(child, adaptor, treeST);
-		}
-	}
-
-	protected StringTemplate getNodeST(TreeAdaptor adaptor, Object t) {
-		String text = adaptor.getText(t);
-		StringTemplate nodeST = _nodeST.getInstanceOf();
-		String uniqueName = "n"+getNodeNumber(t);
-		nodeST.setAttribute("name", uniqueName);
-
-		nodeST.setAttribute("text", fixString(text));
-		return nodeST;
-	}
-
-	protected int getNodeNumber(Object t) {
-		Integer nI = (Integer)nodeToNumberMap.get(t);
-		if ( nI!=null ) {
-			return nI.intValue();
-		}
-		else {
-			nodeToNumberMap.put(t, new Integer(nodeNumber));
-			nodeNumber++;
-			return nodeNumber-1;
-		}
-	}
-
-    protected String fixString(String in)
-    {
-        String text = in;
-
-        if (text!=null) {
-
-            text = text.replaceAll("\"", "\\\\\"");
-            text = text.replaceAll("\\t", "    ");
-            text = text.replaceAll("\\n", "\\\\n");
-            text = text.replaceAll("\\r", "\\\\r");
-            if  (text.length() > 20)    {
-                text = text.substring(0, 8) + "..." + text.substring(text.length()-8);
-            }
-
-        }
-
-        return text;
-    }
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/ParseTree.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/ParseTree.java
deleted file mode 100644
index 5811c55..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/ParseTree.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.Token;
-
-import java.util.List;
-
-/** A record of the rules used to match a token sequence.  The tokens
- *  end up as the leaves of this tree and rule nodes are the interior nodes.
- *  This really adds no functionality, it is just an alias for CommonTree
- *  that is more meaningful (specific) and holds a String to display for a node.
- */
-public class ParseTree extends BaseTree {
-	public Object payload;
-	public List hiddenTokens;
-
-	public ParseTree(Object label) {
-		this.payload = label;
-	}
-
-	public Tree dupNode() {
-		return null;
-	}
-
-	public int getType() {
-		return 0;
-	}
-
-	public String getText() {
-		return toString();
-	}
-
-	public int getTokenStartIndex() {
-		return 0;
-	}
-
-	public void setTokenStartIndex(int index) {
-	}
-
-	public int getTokenStopIndex() {
-		return 0;
-	}
-
-	public void setTokenStopIndex(int index) {
-	}
-
-	public String toString() {
-		if ( payload instanceof Token ) {
-			Token t = (Token)payload;
-			if ( t.getType() == Token.EOF ) {
-				return "<EOF>";
-			}
-			return t.getText();
-		}
-		return payload.toString();
-	}
-
-	/** Emit a token and all hidden nodes before.  EOF node holds all
-	 *  hidden tokens after last real token.
-	 */
-	public String toStringWithHiddenTokens() {
-		StringBuffer buf = new StringBuffer();
-		if ( hiddenTokens!=null ) {
-			for (int i = 0; i < hiddenTokens.size(); i++) {
-				Token hidden = (Token) hiddenTokens.get(i);
-				buf.append(hidden.getText());
-			}
-		}
-		String nodeText = this.toString();
-		if ( !nodeText.equals("<EOF>") ) buf.append(nodeText);
-		return buf.toString();
-	}
-
-	/** Print out the leaves of this tree, which means printing original
-	 *  input back out.
-	 */
-	public String toInputString() {
-		StringBuffer buf = new StringBuffer();
-		_toStringLeaves(buf);
-		return buf.toString();
-	}
-
-	public void _toStringLeaves(StringBuffer buf) {
-		if ( payload instanceof Token ) { // leaf node token?
-			buf.append(this.toStringWithHiddenTokens());
-			return;
-		}
-		for (int i = 0; children!=null && i < children.size(); i++) {
-			ParseTree t = (ParseTree)children.get(i);
-			t._toStringLeaves(buf);
-		}
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteCardinalityException.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteCardinalityException.java
deleted file mode 100644
index 7f909cd..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteCardinalityException.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-/** Base class for all exceptions thrown during AST rewrite construction.
- *  This signifies a case where the cardinality of two or more elements
- *  in a subrule are different: (ID INT)+ where |ID|!=|INT|
- */
-public class RewriteCardinalityException extends RuntimeException {
-	public String elementDescription;
-
-	public RewriteCardinalityException(String elementDescription) {
-		this.elementDescription = elementDescription;
-	}
-
-	public String getMessage() {
-		if ( elementDescription!=null ) {
-			return elementDescription;
-		}
-		return null;
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleElementStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleElementStream.java
deleted file mode 100644
index 61f1860..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleElementStream.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/** A generic list of elements tracked in an alternative to be used in
- *  a -> rewrite rule.  We need to subclass to fill in the next() method,
- *  which returns either an AST node wrapped around a token payload or
- *  an existing subtree.
- *
- *  Once you start next()ing, do not try to add more elements.  It will
- *  break the cursor tracking I believe.
- *
- *  @see org.antlr.runtime.tree.RewriteRuleSubtreeStream
- *  @see org.antlr.runtime.tree.RewriteRuleTokenStream
- *
- *  TODO: add mechanism to detect/puke on modification after reading from stream
- */
-public abstract class RewriteRuleElementStream {
-	/** Cursor 0..n-1.  If singleElement!=null, cursor is 0 until you next(),
-	 *  which bumps it to 1 meaning no more elements.
-	 */
-	protected int cursor = 0;
-
-	/** Track single elements w/o creating a list.  Upon 2nd add, alloc list */
-	protected Object singleElement;
-
-	/** The list of tokens or subtrees we are tracking */
-	protected List elements;
-
-	/** Once a node / subtree has been used in a stream, it must be dup'd
-	 *  from then on.  Streams are reset after subrules so that the streams
-	 *  can be reused in future subrules.  So, reset must set a dirty bit.
-	 *  If dirty, then next() always returns a dup.
-	 *
-	 *  I wanted to use "naughty bit" here, but couldn't think of a way
-	 *  to use "naughty".
-	 *
-	 *  TODO: unused?
-	 */
-	protected boolean dirty = false;
-
-	/** The element or stream description; usually has name of the token or
-	 *  rule reference that this list tracks.  Can include rulename too, but
-	 *  the exception would track that info.
-	 */
-	protected String elementDescription;
-	protected TreeAdaptor adaptor;
-
-	public RewriteRuleElementStream(TreeAdaptor adaptor, String elementDescription) {
-		this.elementDescription = elementDescription;
-		this.adaptor = adaptor;
-	}
-
-	/** Create a stream with one element */
-	public RewriteRuleElementStream(TreeAdaptor adaptor,
-									String elementDescription,
-									Object oneElement)
-	{
-		this(adaptor, elementDescription);
-		add(oneElement);
-	}
-
-	/** Create a stream, but feed off an existing list */
-	public RewriteRuleElementStream(TreeAdaptor adaptor,
-									String elementDescription,
-									List elements)
-	{
-		this(adaptor, elementDescription);
-		this.singleElement = null;
-		this.elements = elements;
-	}
-
-	/** Reset the condition of this stream so that it appears we have
-	 *  not consumed any of its elements.  Elements themselves are untouched.
-	 *  Once we reset the stream, any future use will need duplicates.  Set
-	 *  the dirty bit.
-	 */
-	public void reset() {
-		cursor = 0;
-		dirty = true;
-	}
-
-	public void add(Object el) {
-		//System.out.println("add '"+elementDescription+"' is "+el);
-		if ( el==null ) {
-			return;
-		}
-		if ( elements!=null ) { // if in list, just add
-			elements.add(el);
-			return;
-		}
-		if ( singleElement == null ) { // no elements yet, track w/o list
-			singleElement = el;
-			return;
-		}
-		// adding 2nd element, move to list
-		elements = new ArrayList(5);
-		elements.add(singleElement);
-		singleElement = null;
-		elements.add(el);
-	}
-
-	/** Return the next element in the stream.  If out of elements, throw
-	 *  an exception unless size()==1.  If size is 1, then return elements[0].
-	 *  Return a duplicate node/subtree if stream is out of elements and
-	 *  size==1.  If we've already used the element, dup (dirty bit set).
-	 */
-	public Object nextTree() {
-		int n = size();
-		if ( dirty || (cursor>=n && n==1) ) {
-			// if out of elements and size is 1, dup
-			Object el = _next();
-			return dup(el);
-		}
-		// test size above then fetch
-		Object el = _next();
-		return el;
-	}
-
-	/** do the work of getting the next element, making sure that it's
-	 *  a tree node or subtree.  Deal with the optimization of single-
-	 *  element list versus list of size > 1.  Throw an exception
-	 *  if the stream is empty or we're out of elements and size>1.
-	 *  protected so you can override in a subclass if necessary.
-	 */
-	protected Object _next() {
-		int n = size();
-		if ( n ==0 ) {
-			throw new RewriteEmptyStreamException(elementDescription);
-		}
-		if ( cursor>= n) { // out of elements?
-			if ( n ==1 ) {  // if size is 1, it's ok; return and we'll dup
-				return toTree(singleElement);
-			}
-			// out of elements and size was not 1, so we can't dup
-			throw new RewriteCardinalityException(elementDescription);
-		}
-		// we have elements
-		if ( singleElement!=null ) {
-			cursor++; // move cursor even for single element list
-			return toTree(singleElement);
-		}
-		// must have more than one in list, pull from elements
-		Object o = toTree(elements.get(cursor));
-		cursor++;
-		return o;
-	}
-
-	/** When constructing trees, sometimes we need to dup a token or AST
-	 * 	subtree.  Dup'ing a token means just creating another AST node
-	 *  around it.  For trees, you must call the adaptor.dupTree() unless
-	 *  the element is for a tree root; then it must be a node dup.
-	 */
-	protected abstract Object dup(Object el);
-
-	/** Ensure stream emits trees; tokens must be converted to AST nodes.
-	 *  AST nodes can be passed through unmolested.
-	 */
-	protected Object toTree(Object el) {
-		return el;
-	}
-
-	public boolean hasNext() {
-		 return (singleElement != null && cursor < 1) ||
-			   (elements!=null && cursor < elements.size());
-	}
-
-	public int size() {
-		int n = 0;
-		if ( singleElement != null ) {
-			n = 1;
-		}
-		if ( elements!=null ) {
-			return elements.size();
-		}
-		return n;
-	}
-
-	public String getDescription() {
-		return elementDescription;
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleNodeStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleNodeStream.java
deleted file mode 100644
index 713e9ff..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleNodeStream.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import java.util.List;
-
-/** Queues up nodes matched on left side of -> in a tree parser. This is
- *  the analog of RewriteRuleTokenStream for normal parsers. 
- */
-public class RewriteRuleNodeStream extends RewriteRuleElementStream {
-
-	public RewriteRuleNodeStream(TreeAdaptor adaptor, String elementDescription) {
-		super(adaptor, elementDescription);
-	}
-
-	/** Create a stream with one element */
-	public RewriteRuleNodeStream(TreeAdaptor adaptor,
-								 String elementDescription,
-								 Object oneElement)
-	{
-		super(adaptor, elementDescription, oneElement);
-	}
-
-	/** Create a stream, but feed off an existing list */
-	public RewriteRuleNodeStream(TreeAdaptor adaptor,
-								 String elementDescription,
-								 List elements)
-	{
-		super(adaptor, elementDescription, elements);
-	}
-
-	public Object nextNode() {
-		return _next();
-	}
-
-	protected Object toTree(Object el) {
-		return adaptor.dupNode(el);
-	}
-
-	protected Object dup(Object el) {
-		// we dup every node, so don't have to worry about calling dup; short-
-		// circuited next() so it doesn't call.
-		throw new UnsupportedOperationException("dup can't be called for a node stream.");
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java
deleted file mode 100644
index 5189f21..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import java.util.List;
-
-public class RewriteRuleSubtreeStream extends RewriteRuleElementStream {
-
-	public RewriteRuleSubtreeStream(TreeAdaptor adaptor, String elementDescription) {
-		super(adaptor, elementDescription);
-	}
-
-	/** Create a stream with one element */
-	public RewriteRuleSubtreeStream(TreeAdaptor adaptor,
-									String elementDescription,
-									Object oneElement)
-	{
-		super(adaptor, elementDescription, oneElement);
-	}
-
-	/** Create a stream, but feed off an existing list */
-	public RewriteRuleSubtreeStream(TreeAdaptor adaptor,
-									String elementDescription,
-									List elements)
-	{
-		super(adaptor, elementDescription, elements);
-	}
-
-	/** Treat next element as a single node even if it's a subtree.
-	 *  This is used instead of next() when the result has to be a
-	 *  tree root node.  Also prevents us from duplicating recently-added
-	 *  children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
-	 *  must dup the type node, but ID has been added.
-	 *
-	 *  Referencing a rule result twice is ok; dup entire tree as
-	 *  we can't be adding trees as root; e.g., expr expr.
-	 *
-	 *  Hideous code duplication here with super.next().  Can't think of
-	 *  a proper way to refactor.  This needs to always call dup node
-	 *  and super.next() doesn't know which to call: dup node or dup tree.
-	 */
-	public Object nextNode() {
-		//System.out.println("nextNode: elements="+elements+", singleElement="+((Tree)singleElement).toStringTree());
-		int n = size();
-		if ( dirty || (cursor>=n && n==1) ) {
-			// if out of elements and size is 1, dup (at most a single node
-			// since this is for making root nodes).
-			Object el = _next();
-			return adaptor.dupNode(el);
-		}
-		// test size above then fetch
-		Object tree = _next();
-		while (adaptor.isNil(tree) && adaptor.getChildCount(tree) == 1)
-			tree = adaptor.getChild(tree, 0);
-		//System.out.println("_next="+((Tree)tree).toStringTree());
-		Object el = adaptor.dupNode(tree); // dup just the root (want node here)
-		return el;
-	}
-
-	protected Object dup(Object el) {
-		return adaptor.dupTree(el);
-	}
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleTokenStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleTokenStream.java
deleted file mode 100644
index 4cd7b08..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleTokenStream.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.Token;
-
-import java.util.List;
-
-public class RewriteRuleTokenStream extends RewriteRuleElementStream {
-
-	public RewriteRuleTokenStream(TreeAdaptor adaptor, String elementDescription) {
-		super(adaptor, elementDescription);
-	}
-
-	/** Create a stream with one element */
-	public RewriteRuleTokenStream(TreeAdaptor adaptor,
-								  String elementDescription,
-								  Object oneElement)
-	{
-		super(adaptor, elementDescription, oneElement);
-	}
-
-	/** Create a stream, but feed off an existing list */
-	public RewriteRuleTokenStream(TreeAdaptor adaptor,
-								  String elementDescription,
-								  List elements)
-	{
-		super(adaptor, elementDescription, elements);
-	}
-
-	/** Get next token from stream and make a node for it */
-	public Object nextNode() {
-		Token t = (Token)_next();
-		return adaptor.create(t);
-	}
-
-	public Token nextToken() {
-		return (Token)_next();
-	}
-
-	/** Don't convert to a tree unless they explicitly call nextTree.
-	 *  This way we can do hetero tree nodes in rewrite.
-	 */
-	protected Object toTree(Object el) {
-		return el;
-	}
-
-	protected Object dup(Object el) {
-		throw new UnsupportedOperationException("dup can't be called for a token stream.");
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/Tree.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/Tree.java
deleted file mode 100644
index 7875be3..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/Tree.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.Token;
-
-import java.util.List;
-
-/** What does a tree look like?  ANTLR has a number of support classes
- *  such as CommonTreeNodeStream that work on these kinds of trees.  You
- *  don't have to make your trees implement this interface, but if you do,
- *  you'll be able to use more support code.
- *
- *  NOTE: When constructing trees, ANTLR can build any kind of tree; it can
- *  even use Token objects as trees if you add a child list to your tokens.
- *
- *  This is a tree node without any payload; just navigation and factory stuff.
- */
-public interface Tree {
-	public static final Tree INVALID_NODE = new CommonTree(Token.INVALID_TOKEN);
-
-	Tree getChild(int i);
-
-	int getChildCount();
-
-	// Tree tracks parent and child index now > 3.0
-
-	public Tree getParent();
-
-	public void setParent(Tree t);
-
-    /** Is there is a node above with token type ttype? */
-    public boolean hasAncestor(int ttype);
-
-    /** Walk upwards and get first ancestor with this token type. */
-    public Tree getAncestor(int ttype);
-
-    /** Return a list of all ancestors of this node.  The first node of
-     *  list is the root and the last is the parent of this node.
-     */
-    public List getAncestors();
-
-    /** This node is what child index? 0..n-1 */
-	public int getChildIndex();
-
-	public void setChildIndex(int index);
-
-	/** Set the parent and child index values for all children */
-	public void freshenParentAndChildIndexes();
-
-	/** Add t as a child to this node.  If t is null, do nothing.  If t
-	 *  is nil, add all children of t to this' children.
-	 */
-	void addChild(Tree t);
-
-	/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
-	public void setChild(int i, Tree t);
-
-	public Object deleteChild(int i);
-
-	/** Delete children from start to stop and replace with t even if t is
-	 *  a list (nil-root tree).  num of children can increase or decrease.
-	 *  For huge child lists, inserting children can force walking rest of
-	 *  children to set their childindex; could be slow.
-	 */
-	public void replaceChildren(int startChildIndex, int stopChildIndex, Object t);	
-
-	/** Indicates the node is a nil node but may still have children, meaning
-	 *  the tree is a flat list.
-	 */
-	boolean isNil();
-
-	/**  What is the smallest token index (indexing from 0) for this node
-	 *   and its children?
-	 */
-	int getTokenStartIndex();
-
-	void setTokenStartIndex(int index);
-
-	/**  What is the largest token index (indexing from 0) for this node
-	 *   and its children?
-	 */
-	int getTokenStopIndex();
-
-	void setTokenStopIndex(int index);
-
-	Tree dupNode();
-
-	/** Return a token type; needed for tree parsing */
-	int getType();
-
-	String getText();
-
-	/** In case we don't have a token payload, what is the line for errors? */
-	int getLine();
-
-	int getCharPositionInLine();
-
-	String toStringTree();
-
-	String toString();
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeFilter.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeFilter.java
deleted file mode 100644
index b6a7e05..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeFilter.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.RecognizerSharedState;
-import org.antlr.runtime.RecognitionException;
-import org.antlr.runtime.TokenStream;
-
-/**
- Cut-n-paste from material I'm not using in the book anymore (edit later
- to make sense):
-
- Now, how are we going to test these tree patterns against every
-subtree in our original tree?  In what order should we visit nodes?
-For this application, it turns out we need a simple ``apply once''
-rule application strategy and a ``down then up'' tree traversal
-strategy.  Let's look at rule application first.
-
-As we visit each node, we need to see if any of our patterns match. If
-a pattern matches, we execute the associated tree rewrite and move on
-to the next node. In other words, we only look for a single rule
-application opportunity (we'll see below that we sometimes need to
-repeatedly apply rules). The following method applies a rule in a @cl
-TreeParser (derived from a tree grammar) to a tree:
-
-here is where weReferenced code/walking/patterns/TreePatternMatcher.java
-
-It uses reflection to lookup the appropriate rule within the generated
-tree parser class (@cl Simplify in this case). Most of the time, the
-rule will not match the tree.  To avoid issuing syntax errors and
-attempting error recovery, it bumps up the backtracking level.  Upon
-failure, the invoked rule immediately returns. If you don't plan on
-using this technique in your own ANTLR-based application, don't sweat
-the details. This method boils down to ``call a rule to match a tree,
-executing any embedded actions and rewrite rules.''
-
-At this point, we know how to define tree grammar rules and how to
-apply them to a particular subtree. The final piece of the tree
-pattern matcher is the actual tree traversal. We have to get the
-correct node visitation order.  In particular, we need to perform the
-scalar-vector multiply transformation on the way down (preorder) and
-we need to reduce multiply-by-zero subtrees on the way up (postorder).
-
-To implement a top-down visitor, we do a depth first walk of the tree,
-executing an action in the preorder position. To get a bottom-up
-visitor, we execute an action in the postorder position.  ANTLR
-provides a standard @cl TreeVisitor class with a depth first search @v
-visit method. That method executes either a @m pre or @m post method
-or both. In our case, we need to call @m applyOnce in both. On the way
-down, we'll look for @r vmult patterns. On the way up,
-we'll look for @r mult0 patterns.
- */
-public class TreeFilter extends TreeParser {
-    public interface fptr {
-        public void rule() throws RecognitionException;
-    }
-
-    protected TokenStream originalTokenStream;
-    protected TreeAdaptor originalAdaptor;
-
-    public TreeFilter(TreeNodeStream input) {
-        this(input, new RecognizerSharedState());
-    }
-    public TreeFilter(TreeNodeStream input, RecognizerSharedState state) {
-        super(input, state);
-        originalAdaptor = input.getTreeAdaptor();
-        originalTokenStream = input.getTokenStream();
-    }
-
-    public void applyOnce(Object t, fptr whichRule) {
-        if ( t==null ) return;
-        try {
-            // share TreeParser object but not parsing-related state
-            state = new RecognizerSharedState();
-            input = new CommonTreeNodeStream(originalAdaptor, t);
-            ((CommonTreeNodeStream)input).setTokenStream(originalTokenStream);
-            setBacktrackingLevel(1);
-            whichRule.rule();
-            setBacktrackingLevel(0);
-        }
-        catch (RecognitionException e) { ; }
-    }
-
-    public void downup(Object t) {
-        TreeVisitor v = new TreeVisitor(new CommonTreeAdaptor());
-        TreeVisitorAction actions = new TreeVisitorAction() {
-            public Object pre(Object t)  { applyOnce(t, topdown_fptr); return t; }
-            public Object post(Object t) { applyOnce(t, bottomup_fptr); return t; }
-        };
-        v.visit(t, actions);
-    }
-        
-    fptr topdown_fptr = new fptr() {
-        public void rule() throws RecognitionException {
-            topdown();
-        }
-    };
-
-    fptr bottomup_fptr = new fptr() {
-        public void rule() throws RecognitionException {
-            bottomup();
-        }
-    };
-
-    // methods the downup strategy uses to do the up and down rules.
-    // to override, just define tree grammar rule topdown and turn on
-    // filter=true.
-    public void topdown() throws RecognitionException {;}
-    public void bottomup() throws RecognitionException {;}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeIterator.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeIterator.java
deleted file mode 100644
index 43ead6d..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeIterator.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.Token;
-import org.antlr.runtime.CommonToken;
-import org.antlr.runtime.misc.FastQueue;
-
-import java.util.Iterator;
-
-/** Return a node stream from a doubly-linked tree whose nodes
- *  know what child index they are.  No remove() is supported.
- *
- *  Emit navigation nodes (DOWN, UP, and EOF) to let show tree structure.
- */
-public class TreeIterator implements Iterator {
-    protected TreeAdaptor adaptor;
-    protected Object root;
-    protected Object tree;
-    protected boolean firstTime = true;
-
-    // navigation nodes to return during walk and at end
-    public Object up;
-    public Object down;
-    public Object eof;
-
-    /** If we emit UP/DOWN nodes, we need to spit out multiple nodes per
-     *  next() call.
-     */
-    protected FastQueue nodes;
-
-    public TreeIterator(Object tree) {
-        this(new CommonTreeAdaptor(),tree);
-    }
-
-    public TreeIterator(TreeAdaptor adaptor, Object tree) {
-        this.adaptor = adaptor;
-        this.tree = tree;
-        this.root = tree;
-        nodes = new FastQueue();
-        down = adaptor.create(Token.DOWN, "DOWN");
-        up = adaptor.create(Token.UP, "UP");
-        eof = adaptor.create(Token.EOF, "EOF");
-    }
-
-    public void reset() {
-        firstTime = true;
-        tree = root;
-        nodes.clear();
-    }
-
-    public boolean hasNext() {
-        if ( firstTime ) return root!=null;
-        if ( nodes!=null && nodes.size()>0 ) return true;
-        if ( tree==null ) return false;
-        if ( adaptor.getChildCount(tree)>0 ) return true;
-        return adaptor.getParent(tree)!=null; // back at root?
-    }
-
-    public Object next() {
-        if ( firstTime ) { // initial condition
-            firstTime = false;
-            if ( adaptor.getChildCount(tree)==0 ) { // single node tree (special)
-                nodes.add(eof);
-                return tree;
-            }
-            return tree;
-        }
-        // if any queued up, use those first
-        if ( nodes!=null && nodes.size()>0 ) return nodes.remove();
-
-        // no nodes left?
-        if ( tree==null ) return eof;
-
-        // next node will be child 0 if any children
-        if ( adaptor.getChildCount(tree)>0 ) {
-            tree = adaptor.getChild(tree, 0);
-            nodes.add(tree); // real node is next after DOWN
-            return down;
-        }
-        // if no children, look for next sibling of tree or ancestor
-        Object parent = adaptor.getParent(tree);
-        // while we're out of siblings, keep popping back up towards root
-        while ( parent!=null &&
-                adaptor.getChildIndex(tree)+1 >= adaptor.getChildCount(parent) )
-        {
-            nodes.add(up); // we're moving back up
-            tree = parent;
-            parent = adaptor.getParent(tree);
-        }
-        // no nodes left?
-        if ( parent==null ) {
-            tree = null; // back at root? nothing left then
-            nodes.add(eof); // add to queue, might have UP nodes in there
-            return nodes.remove();
-        }
-
-        // must have found a node with an unvisited sibling
-        // move to it and return it
-        int nextSiblingIndex = adaptor.getChildIndex(tree) + 1;
-        tree = adaptor.getChild(parent, nextSiblingIndex);
-        nodes.add(tree); // add to queue, might have UP nodes in there
-        return nodes.remove();
-    }
-
-    public void remove() { throw new UnsupportedOperationException(); }
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeNodeStream.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeNodeStream.java
deleted file mode 100644
index df0ad34..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeNodeStream.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.IntStream;
-import org.antlr.runtime.TokenStream;
-
-/** A stream of tree nodes, accessing nodes from a tree of some kind */
-public interface TreeNodeStream extends IntStream {
-	/** Get a tree node at an absolute index i; 0..n-1.
-	 *  If you don't want to buffer up nodes, then this method makes no
-	 *  sense for you.
-	 */
-	public Object get(int i);
-
-	/** Get tree node at current input pointer + i ahead where i=1 is next node.
-	 *  i<0 indicates nodes in the past.  So LT(-1) is previous node, but
-	 *  implementations are not required to provide results for k < -1.
-	 *  LT(0) is undefined.  For i>=n, return null.
-	 *  Return null for LT(0) and any index that results in an absolute address
-	 *  that is negative.
-	 *
-	 *  This is analogus to the LT() method of the TokenStream, but this
-	 *  returns a tree node instead of a token.  Makes code gen identical
-	 *  for both parser and tree grammars. :)
-	 */
-	public Object LT(int k);
-
-	/** Where is this stream pulling nodes from?  This is not the name, but
-	 *  the object that provides node objects.
-	 */
-	public Object getTreeSource();
-
-	/** If the tree associated with this stream was created from a TokenStream,
-	 *  you can specify it here.  Used to do rule $text attribute in tree
-	 *  parser.  Optional unless you use tree parser rule text attribute
-	 *  or output=template and rewrite=true options.
-	 */
-	public TokenStream getTokenStream();
-
-	/** What adaptor can tell me how to interpret/navigate nodes and
-	 *  trees.  E.g., get text of a node.
-	 */
-	public TreeAdaptor getTreeAdaptor();
-
-	/** As we flatten the tree, we use UP, DOWN nodes to represent
-	 *  the tree structure.  When debugging we need unique nodes
-	 *  so we have to instantiate new ones.  When doing normal tree
-	 *  parsing, it's slow and a waste of memory to create unique
-	 *  navigation nodes.  Default should be false;
-	 */
-	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes);
-
-    /** Reset the tree node stream in such a way that it acts like
-     *  a freshly constructed stream.
-     */
-    public void reset();
-
-	/** Return the text of all nodes from start to stop, inclusive.
-	 *  If the stream does not buffer all the nodes then it can still
-	 *  walk recursively from start until stop.  You can always return
-	 *  null or "" too, but users should not access $ruleLabel.text in
-	 *  an action of course in that case.
-	 */
-	public String toString(Object start, Object stop);
-
-
-	// REWRITING TREES (used by tree parser)
-
-	/** Replace from start to stop child index of parent with t, which might
-	 *  be a list.  Number of children may be different
-	 *  after this call.  The stream is notified because it is walking the
-	 *  tree and might need to know you are monkeying with the underlying
-	 *  tree.  Also, it might be able to modify the node stream to avoid
-	 *  restreaming for future phases.
-	 *
-	 *  If parent is null, don't do anything; must be at root of overall tree.
-	 *  Can't replace whatever points to the parent externally.  Do nothing.
-	 */
-	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t);
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeParser.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeParser.java
deleted file mode 100644
index c40f81f..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeParser.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.*;
-
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/** A parser for a stream of tree nodes.  "tree grammars" result in a subclass
- *  of this.  All the error reporting and recovery is shared with Parser via
- *  the BaseRecognizer superclass.
-*/
-public class TreeParser extends BaseRecognizer {
-	public static final int DOWN = Token.DOWN;
-	public static final int UP = Token.UP;
-
-    // precompiled regex used by inContext
-    static String dotdot = ".*[^.]\\.\\.[^.].*";
-    static String doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*";
-    static Pattern dotdotPattern = Pattern.compile(dotdot);
-    static Pattern doubleEtcPattern = Pattern.compile(doubleEtc);
-
-	protected TreeNodeStream input;
-
-	public TreeParser(TreeNodeStream input) {
-		super(); // highlight that we go to super to set state object
-		setTreeNodeStream(input);
-	}
-
-	public TreeParser(TreeNodeStream input, RecognizerSharedState state) {
-		super(state); // share the state object with another parser
-		setTreeNodeStream(input);
-    }
-
-	public void reset() {
-		super.reset(); // reset all recognizer state variables
-		if ( input!=null ) {
-			input.seek(0); // rewind the input
-		}
-	}
-
-	/** Set the input stream */
-	public void setTreeNodeStream(TreeNodeStream input) {
-		this.input = input;
-	}
-
-	public TreeNodeStream getTreeNodeStream() {
-		return input;
-	}
-
-	public String getSourceName() {
-		return input.getSourceName();
-	}
-
-	protected Object getCurrentInputSymbol(IntStream input) {
-		return ((TreeNodeStream)input).LT(1);
-	}
-
-	protected Object getMissingSymbol(IntStream input,
-									  RecognitionException e,
-									  int expectedTokenType,
-									  BitSet follow)
-	{
-		String tokenText =
-			"<missing "+getTokenNames()[expectedTokenType]+">";
-        TreeAdaptor adaptor = ((TreeNodeStream)e.input).getTreeAdaptor();
-        return adaptor.create(new CommonToken(expectedTokenType, tokenText));
-	}
-
-    /** Match '.' in tree parser has special meaning.  Skip node or
-	 *  entire tree if node has children.  If children, scan until
-	 *  corresponding UP node.
-	 */
-	public void matchAny(IntStream ignore) { // ignore stream, copy of input
-		state.errorRecovery = false;
-		state.failed = false;
-		Object look = input.LT(1);
-		if ( input.getTreeAdaptor().getChildCount(look)==0 ) {
-			input.consume(); // not subtree, consume 1 node and return
-			return;
-		}
-		// current node is a subtree, skip to corresponding UP.
-		// must count nesting level to get right UP
-		int level=0;
-		int tokenType = input.getTreeAdaptor().getType(look);
-		while ( tokenType!=Token.EOF && !(tokenType==UP && level==0) ) {
-			input.consume();
-			look = input.LT(1);
-			tokenType = input.getTreeAdaptor().getType(look);
-			if ( tokenType == DOWN ) {
-				level++;
-			}
-			else if ( tokenType == UP ) {
-				level--;
-			}
-		}
-		input.consume(); // consume UP
-	}
-
-    /** We have DOWN/UP nodes in the stream that have no line info; override.
-	 *  plus we want to alter the exception type.  Don't try to recover
-	 *  from tree parser errors inline...
-     */
-    protected Object recoverFromMismatchedToken(IntStream input,
-                                                int ttype,
-                                                BitSet follow)
-        throws RecognitionException
-    {
-        throw new MismatchedTreeNodeException(ttype, (TreeNodeStream)input);
-    }
-
-    /** Prefix error message with the grammar name because message is
-	 *  always intended for the programmer because the parser built
-	 *  the input tree not the user.
-	 */
-	public String getErrorHeader(RecognitionException e) {
-		return getGrammarFileName()+": node from "+
-			   (e.approximateLineInfo?"after ":"")+"line "+e.line+":"+e.charPositionInLine;
-	}
-
-	/** Tree parsers parse nodes they usually have a token object as
-	 *  payload. Set the exception token and do the default behavior.
-	 */
-	public String getErrorMessage(RecognitionException e, String[] tokenNames) {
-		if ( this instanceof TreeParser ) {
-			TreeAdaptor adaptor = ((TreeNodeStream)e.input).getTreeAdaptor();
-			e.token = adaptor.getToken(e.node);
-			if ( e.token==null ) { // could be an UP/DOWN node
-				e.token = new CommonToken(adaptor.getType(e.node),
-										  adaptor.getText(e.node));
-			}
-		}
-		return super.getErrorMessage(e, tokenNames);
-	}
-
-	/** Check if current node in input has a context.  Context means sequence
-	 *  of nodes towards root of tree.  For example, you might say context
-	 *  is "MULT" which means my parent must be MULT.  "CLASS VARDEF" says
-	 *  current node must be child of a VARDEF and whose parent is a CLASS node.
-	 *  You can use "..." to mean zero-or-more nodes.  "METHOD ... VARDEF"
-	 *  means my parent is VARDEF and somewhere above that is a METHOD node.
-	 *  The first node in the context is not necessarily the root.  The context
-	 *  matcher stops matching and returns true when it runs out of context.
-	 *  There is no way to force the first node to be the root.
-	 */
-	public boolean inContext(String context) {
-		return inContext(input.getTreeAdaptor(), getTokenNames(), input.LT(1), context);
-	}
-
-	/** The worker for inContext.  It's static and full of parameters for
-	 *  testing purposes.
-	 */
-	public static boolean inContext(TreeAdaptor adaptor,
-									String[] tokenNames,
-									Object t,
-									String context)
-	{
-		Matcher dotdotMatcher = dotdotPattern.matcher(context);
-		Matcher doubleEtcMatcher = doubleEtcPattern.matcher(context);
-		if ( dotdotMatcher.find() ) { // don't allow "..", must be "..."
-			throw new IllegalArgumentException("invalid syntax: ..");
-		}
-		if ( doubleEtcMatcher.find() ) { // don't allow double "..."
-			throw new IllegalArgumentException("invalid syntax: ... ...");
-		}
-		context = context.replaceAll("\\.\\.\\.", " ... "); // ensure spaces around ...
-		context = context.trim();
-		String[] nodes = context.split("\\s+");
-		int ni = nodes.length-1;
-		t = adaptor.getParent(t);
-		while ( ni>=0 && t!=null ) {
-			if ( nodes[ni].equals("...") ) {
-				// walk upwards until we see nodes[ni-1] then continue walking
-				if ( ni==0 ) return true; // ... at start is no-op
-				String goal = nodes[ni-1];
-				Object ancestor = getAncestor(adaptor, tokenNames, t, goal);
-				if ( ancestor==null ) return false;
-				t = ancestor;
-				ni--;
-			}
-			String name = tokenNames[adaptor.getType(t)];
-			if ( !name.equals(nodes[ni]) ) {
-				//System.err.println("not matched: "+nodes[ni]+" at "+t);
-				return false;
-			}
-			// advance to parent and to previous element in context node list
-			ni--;
-			t = adaptor.getParent(t);
-		}
-
-		if ( t==null && ni>=0 ) return false; // at root but more nodes to match
-		return true;
-	}
-
-	/** Helper for static inContext */
-	protected static Object getAncestor(TreeAdaptor adaptor, String[] tokenNames, Object t, String goal) {
-		while ( t!=null ) {
-			String name = tokenNames[adaptor.getType(t)];
-			if ( name.equals(goal) ) return t;
-			t = adaptor.getParent(t);
-		}
-		return null;
-	}
-
-	public void traceIn(String ruleName, int ruleIndex)  {
-		super.traceIn(ruleName, ruleIndex, input.LT(1));
-	}
-
-	public void traceOut(String ruleName, int ruleIndex)  {
-		super.traceOut(ruleName, ruleIndex, input.LT(1));
-	}
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRewriter.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRewriter.java
deleted file mode 100644
index 91aee93..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRewriter.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.RecognizerSharedState;
-import org.antlr.runtime.RecognitionException;
-import org.antlr.runtime.TokenStream;
-
-public class TreeRewriter extends TreeParser {
-    public interface fptr {
-        public Object rule() throws RecognitionException;
-    }
-
-    protected boolean showTransformations = false;
-
-    protected TokenStream originalTokenStream;
-    protected TreeAdaptor originalAdaptor;
-    
-    public TreeRewriter(TreeNodeStream input) {
-        this(input, new RecognizerSharedState());
-    }
-    public TreeRewriter(TreeNodeStream input, RecognizerSharedState state) {
-        super(input, state);
-        originalAdaptor = input.getTreeAdaptor();
-        originalTokenStream = input.getTokenStream();        
-    }
-
-    public Object applyOnce(Object t, fptr whichRule) {
-        if ( t==null ) return null;
-        try {
-            // share TreeParser object but not parsing-related state
-            state = new RecognizerSharedState();
-            input = new CommonTreeNodeStream(originalAdaptor, t);
-            ((CommonTreeNodeStream)input).setTokenStream(originalTokenStream);
-            setBacktrackingLevel(1);
-            TreeRuleReturnScope r = (TreeRuleReturnScope)whichRule.rule();
-            setBacktrackingLevel(0);
-            if ( failed() ) return t;
-            if ( showTransformations &&
-                 r!=null && !t.equals(r.getTree()) && r.getTree()!=null )
-            {
-                reportTransformation(t, r.getTree());
-            }
-            if ( r!=null && r.getTree()!=null ) return r.getTree();
-            else return t;
-        }
-        catch (RecognitionException e) { ; }
-        return t;
-    }
-
-    public Object applyRepeatedly(Object t, fptr whichRule) {
-        boolean treeChanged = true;
-        while ( treeChanged ) {
-            Object u = applyOnce(t, whichRule);
-            treeChanged = !t.equals(u);
-            t = u;
-        }
-        return t;
-    }
-
-    public Object downup(Object t) { return downup(t, false); }
-
-    public Object downup(Object t, boolean showTransformations) {
-        this.showTransformations = showTransformations;
-        TreeVisitor v = new TreeVisitor(new CommonTreeAdaptor());
-        TreeVisitorAction actions = new TreeVisitorAction() {
-            public Object pre(Object t)  { return applyOnce(t, topdown_fptr); }
-            public Object post(Object t) { return applyRepeatedly(t, bottomup_ftpr); }
-        };
-        t = v.visit(t, actions);
-        return t;
-    }
-
-    /** Override this if you need transformation tracing to go somewhere
-     *  other than stdout or if you're not using Tree-derived trees.
-     */
-    public void reportTransformation(Object oldTree, Object newTree) {
-        System.out.println(((Tree)oldTree).toStringTree()+" -> "+
-                           ((Tree)newTree).toStringTree());
-    }
-
-    fptr topdown_fptr = new fptr() {
-        public Object rule() throws RecognitionException { return topdown(); }
-    };
-    
-    fptr bottomup_ftpr = new fptr() {
-        public Object rule() throws RecognitionException { return bottomup(); }
-    };
-
-    // methods the downup strategy uses to do the up and down rules.
-    // to override, just define tree grammar rule topdown and turn on
-    // filter=true.
-    public Object topdown() throws RecognitionException { return null; }
-    public Object bottomup() throws RecognitionException { return null; }
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRuleReturnScope.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRuleReturnScope.java
deleted file mode 100644
index 4ea65c0..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRuleReturnScope.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.RuleReturnScope;
-
-/** This is identical to the ParserRuleReturnScope except that
- *  the start property is a tree nodes not Token object
- *  when you are parsing trees.  To be generic the tree node types
- *  have to be Object.
- */
-public class TreeRuleReturnScope extends RuleReturnScope {
-	/** First node or root node of tree matched for this rule. */
-	public Object start;
-	public Object getStart() { return start; }	
-}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeWizard.java b/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeWizard.java
deleted file mode 100644
index 666cfd6..0000000
--- a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeWizard.java
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.runtime.tree;
-
-import org.antlr.runtime.Token;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/** Build and navigate trees with this object.  Must know about the names
- *  of tokens so you have to pass in a map or array of token names (from which
- *  this class can build the map).  I.e., Token DECL means nothing unless the
- *  class can translate it to a token type.
- *
- *  In order to create nodes and navigate, this class needs a TreeAdaptor.
- *
- *  This class can build a token type -> node index for repeated use or for
- *  iterating over the various nodes with a particular type.
- *
- *  This class works in conjunction with the TreeAdaptor rather than moving
- *  all this functionality into the adaptor.  An adaptor helps build and
- *  navigate trees using methods.  This class helps you do it with string
- *  patterns like "(A B C)".  You can create a tree from that pattern or
- *  match subtrees against it.
- */
-public class TreeWizard {
-	protected TreeAdaptor adaptor;
-	protected Map tokenNameToTypeMap;
-
-	public interface ContextVisitor {
-		// TODO: should this be called visit or something else?
-		public void visit(Object t, Object parent, int childIndex, Map labels);
-	}
-
-	public static abstract class Visitor implements ContextVisitor {
-		public void visit(Object t, Object parent, int childIndex, Map labels) {
-			visit(t);
-		}
-		public abstract void visit(Object t);
-	}
-
-	/** When using %label:TOKENNAME in a tree for parse(), we must
-	 *  track the label.
-	 */
-	public static class TreePattern extends CommonTree {
-		public String label;
-		public boolean hasTextArg;
-		public TreePattern(Token payload) {
-			super(payload);
-		}
-		public String toString() {
-			if ( label!=null ) {
-				return "%"+label+":"+super.toString();
-			}
-			else {
-				return super.toString();				
-			}
-		}
-	}
-
-	public static class WildcardTreePattern extends TreePattern {
-		public WildcardTreePattern(Token payload) {
-			super(payload);
-		}
-	}
-
-	/** This adaptor creates TreePattern objects for use during scan() */
-	public static class TreePatternTreeAdaptor extends CommonTreeAdaptor {
-		public Object create(Token payload) {
-			return new TreePattern(payload);
-		}
-	}
-
-	// TODO: build indexes for the wizard
-
-	/** During fillBuffer(), we can make a reverse index from a set
-	 *  of token types of interest to the list of indexes into the
-	 *  node stream.  This lets us convert a node pointer to a
-	 *  stream index semi-efficiently for a list of interesting
-	 *  nodes such as function definition nodes (you'll want to seek
-	 *  to their bodies for an interpreter).  Also useful for doing
-	 *  dynamic searches; i.e., go find me all PLUS nodes.
-	protected Map tokenTypeToStreamIndexesMap;
-
-	/** If tokenTypesToReverseIndex set to INDEX_ALL then indexing
-	 *  occurs for all token types.
-	public static final Set INDEX_ALL = new HashSet();
-
-	/** A set of token types user would like to index for faster lookup.
-	 *  If this is INDEX_ALL, then all token types are tracked.  If null,
-	 *  then none are indexed.
-	protected Set tokenTypesToReverseIndex = null;
-	*/
-
-	public TreeWizard(TreeAdaptor adaptor) {
-		this.adaptor = adaptor;
-	}
-
-	public TreeWizard(TreeAdaptor adaptor, Map tokenNameToTypeMap) {
-		this.adaptor = adaptor;
-		this.tokenNameToTypeMap = tokenNameToTypeMap;
-	}
-
-	public TreeWizard(TreeAdaptor adaptor, String[] tokenNames) {
-		this.adaptor = adaptor;
-		this.tokenNameToTypeMap = computeTokenTypes(tokenNames);
-	}
-
-	public TreeWizard(String[] tokenNames) {
-		this(new CommonTreeAdaptor(), tokenNames);
-	}
-
-	/** Compute a Map<String, Integer> that is an inverted index of
-	 *  tokenNames (which maps int token types to names).
-	 */
-	public Map computeTokenTypes(String[] tokenNames) {
-		Map m = new HashMap();
-		if ( tokenNames==null ) {
-			return m;
-		}
-		for (int ttype = Token.MIN_TOKEN_TYPE; ttype < tokenNames.length; ttype++) {
-			String name = tokenNames[ttype];
-			m.put(name, new Integer(ttype));
-		}
-		return m;
-	}
-
-	/** Using the map of token names to token types, return the type. */
-	public int getTokenType(String tokenName) {
-	 	if ( tokenNameToTypeMap==null ) {
-			 return Token.INVALID_TOKEN_TYPE;
-		 }
-		Integer ttypeI = (Integer)tokenNameToTypeMap.get(tokenName);
-		if ( ttypeI!=null ) {
-			return ttypeI.intValue();
-		}
-		return Token.INVALID_TOKEN_TYPE;
-	}
-
-	/** Walk the entire tree and make a node name to nodes mapping.
-	 *  For now, use recursion but later nonrecursive version may be
-	 *  more efficient.  Returns Map<Integer, List> where the List is
-	 *  of your AST node type.  The Integer is the token type of the node.
-	 *
-	 *  TODO: save this index so that find and visit are faster
-	 */
-	public Map index(Object t) {
-		Map m = new HashMap();
-		_index(t, m);
-		return m;
-	}
-
-	/** Do the work for index */
-	protected void _index(Object t, Map m) {
-		if ( t==null ) {
-			return;
-		}
-		int ttype = adaptor.getType(t);
-		List elements = (List)m.get(new Integer(ttype));
-		if ( elements==null ) {
-			elements = new ArrayList();
-			m.put(new Integer(ttype), elements);
-		}
-		elements.add(t);
-		int n = adaptor.getChildCount(t);
-		for (int i=0; i<n; i++) {
-			Object child = adaptor.getChild(t, i);
-			_index(child, m);
-		}
-	}
-
-	/** Return a List of tree nodes with token type ttype */
-	public List find(Object t, int ttype) {
-		final List nodes = new ArrayList();
-		visit(t, ttype, new TreeWizard.Visitor() {
-			public void visit(Object t) {
-				nodes.add(t);
-			}
-		});
-		return nodes;
-	}
-
-	/** Return a List of subtrees matching pattern. */
-	public List find(Object t, String pattern) {
-		final List subtrees = new ArrayList();
-		// Create a TreePattern from the pattern
-		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
-		TreePatternParser parser =
-			new TreePatternParser(tokenizer, this, new TreePatternTreeAdaptor());
-		final TreePattern tpattern = (TreePattern)parser.pattern();
-		// don't allow invalid patterns
-		if ( tpattern==null ||
-			 tpattern.isNil() ||
-			 tpattern.getClass()==WildcardTreePattern.class )
-		{
-			return null;
-		}
-		int rootTokenType = tpattern.getType();
-		visit(t, rootTokenType, new TreeWizard.ContextVisitor() {
-			public void visit(Object t, Object parent, int childIndex, Map labels) {
-				if ( _parse(t, tpattern, null) ) {
-					subtrees.add(t);
-				}
-			}
-		});
-		return subtrees;
-	}
-
-	public Object findFirst(Object t, int ttype) {
-		return null;
-	}
-
-	public Object findFirst(Object t, String pattern) {
-		return null;
-	}
-
-	/** Visit every ttype node in t, invoking the visitor.  This is a quicker
-	 *  version of the general visit(t, pattern) method.  The labels arg
-	 *  of the visitor action method is never set (it's null) since using
-	 *  a token type rather than a pattern doesn't let us set a label.
-	 */
-	public void visit(Object t, int ttype, ContextVisitor visitor) {
-		_visit(t, null, 0, ttype, visitor);
-	}
-
-	/** Do the recursive work for visit */
-	protected void _visit(Object t, Object parent, int childIndex, int ttype, ContextVisitor visitor) {
-		if ( t==null ) {
-			return;
-		}
-		if ( adaptor.getType(t)==ttype ) {
-			visitor.visit(t, parent, childIndex, null);
-		}
-		int n = adaptor.getChildCount(t);
-		for (int i=0; i<n; i++) {
-			Object child = adaptor.getChild(t, i);
-			_visit(child, t, i, ttype, visitor);
-		}
-	}
-
-	/** For all subtrees that match the pattern, execute the visit action.
-	 *  The implementation uses the root node of the pattern in combination
-	 *  with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
-	 *  Patterns with wildcard roots are also not allowed.
-	 */
-	public void visit(Object t, final String pattern, final ContextVisitor visitor) {
-		// Create a TreePattern from the pattern
-		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
-		TreePatternParser parser =
-			new TreePatternParser(tokenizer, this, new TreePatternTreeAdaptor());
-		final TreePattern tpattern = (TreePattern)parser.pattern();
-		// don't allow invalid patterns
-		if ( tpattern==null ||
-			 tpattern.isNil() ||
-			 tpattern.getClass()==WildcardTreePattern.class )
-		{
-			return;
-		}
-		final Map labels = new HashMap(); // reused for each _parse
-		int rootTokenType = tpattern.getType();
-		visit(t, rootTokenType, new TreeWizard.ContextVisitor() {
-			public void visit(Object t, Object parent, int childIndex, Map unusedlabels) {
-				// the unusedlabels arg is null as visit on token type doesn't set.
-				labels.clear();
-				if ( _parse(t, tpattern, labels) ) {
-					visitor.visit(t, parent, childIndex, labels);
-				}
-			}
-		});
-	}
-
-	/** Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
-	 *  on the various nodes and '.' (dot) as the node/subtree wildcard,
-	 *  return true if the pattern matches and fill the labels Map with
-	 *  the labels pointing at the appropriate nodes.  Return false if
-	 *  the pattern is malformed or the tree does not match.
-	 *
-	 *  If a node specifies a text arg in pattern, then that must match
-	 *  for that node in t.
-	 *
-	 *  TODO: what's a better way to indicate bad pattern? Exceptions are a hassle 
-	 */
-	public boolean parse(Object t, String pattern, Map labels) {
-		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
-		TreePatternParser parser =
-			new TreePatternParser(tokenizer, this, new TreePatternTreeAdaptor());
-		TreePattern tpattern = (TreePattern)parser.pattern();
-		/*
-		System.out.println("t="+((Tree)t).toStringTree());
-		System.out.println("scant="+tpattern.toStringTree());
-		*/
-		boolean matched = _parse(t, tpattern, labels);
-		return matched;
-	}
-
-	public boolean parse(Object t, String pattern) {
-		return parse(t, pattern, null);
-	}
-
-	/** Do the work for parse. Check to see if the t2 pattern fits the
-	 *  structure and token types in t1.  Check text if the pattern has
-	 *  text arguments on nodes.  Fill labels map with pointers to nodes
-	 *  in tree matched against nodes in pattern with labels.
-	 */
-	protected boolean _parse(Object t1, TreePattern tpattern, Map labels) {
-		// make sure both are non-null
-		if ( t1==null || tpattern==null ) {
-			return false;
-		}
-		// check roots (wildcard matches anything)
-		if ( tpattern.getClass() != WildcardTreePattern.class ) {
-			if ( adaptor.getType(t1) != tpattern.getType() ) return false;
-            // if pattern has text, check node text
-			if ( tpattern.hasTextArg && !adaptor.getText(t1).equals(tpattern.getText()) ) {
-				return false;
-			}
-		}
-		if ( tpattern.label!=null && labels!=null ) {
-			// map label in pattern to node in t1
-			labels.put(tpattern.label, t1);
-		}
-		// check children
-		int n1 = adaptor.getChildCount(t1);
-		int n2 = tpattern.getChildCount();
-		if ( n1 != n2 ) {
-			return false;
-		}
-		for (int i=0; i<n1; i++) {
-			Object child1 = adaptor.getChild(t1, i);
-			TreePattern child2 = (TreePattern)tpattern.getChild(i);
-			if ( !_parse(child1, child2, labels) ) {
-				return false;
-			}
-		}
-		return true;
-	}
-
-	/** Create a tree or node from the indicated tree pattern that closely
-	 *  follows ANTLR tree grammar tree element syntax:
-	 *
-	 * 		(root child1 ... child2).
-	 *
-	 *  You can also just pass in a node: ID
-	 * 
-	 *  Any node can have a text argument: ID[foo]
-	 *  (notice there are no quotes around foo--it's clear it's a string).
-	 *
-	 *  nil is a special name meaning "give me a nil node".  Useful for
-	 *  making lists: (nil A B C) is a list of A B C.
- 	 */
-	public Object create(String pattern) {
-		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
-		TreePatternParser parser = new TreePatternParser(tokenizer, this, adaptor);
-		Object t = parser.pattern();
-		return t;
-	}
-
-	/** Compare t1 and t2; return true if token types/text, structure match exactly.
-	 *  The trees are examined in their entirety so that (A B) does not match
-	 *  (A B C) nor (A (B C)). 
-	 // TODO: allow them to pass in a comparator
-	 *  TODO: have a version that is nonstatic so it can use instance adaptor
-	 *
-	 *  I cannot rely on the tree node's equals() implementation as I make
-	 *  no constraints at all on the node types nor interface etc... 
-	 */
-	public static boolean equals(Object t1, Object t2, TreeAdaptor adaptor) {
-		return _equals(t1, t2, adaptor);
-	}
-
-	/** Compare type, structure, and text of two trees, assuming adaptor in
-	 *  this instance of a TreeWizard.
-	 */
-	public boolean equals(Object t1, Object t2) {
-		return _equals(t1, t2, adaptor);
-	}
-
-	protected static boolean _equals(Object t1, Object t2, TreeAdaptor adaptor) {
-		// make sure both are non-null
-		if ( t1==null || t2==null ) {
-			return false;
-		}
-		// check roots
-		if ( adaptor.getType(t1) != adaptor.getType(t2) ) {
-			return false;
-		}
-		if ( !adaptor.getText(t1).equals(adaptor.getText(t2)) ) {
-			return false;
-		}
-		// check children
-		int n1 = adaptor.getChildCount(t1);
-		int n2 = adaptor.getChildCount(t2);
-		if ( n1 != n2 ) {
-			return false;
-		}
-		for (int i=0; i<n1; i++) {
-			Object child1 = adaptor.getChild(t1, i);
-			Object child2 = adaptor.getChild(t2, i);
-			if ( !_equals(child1, child2, adaptor) ) {
-				return false;
-			}
-		}
-		return true;
-	}
-
-	// TODO: next stuff taken from CommonTreeNodeStream
-	
-		/** Given a node, add this to the reverse index tokenTypeToStreamIndexesMap.
-	 *  You can override this method to alter how indexing occurs.  The
-	 *  default is to create a
-	 *
-	 *    Map<Integer token type,ArrayList<Integer stream index>>
-	 *
-	 *  This data structure allows you to find all nodes with type INT in order.
-	 *
-	 *  If you really need to find a node of type, say, FUNC quickly then perhaps
-	 *
-	 *    Map<Integertoken type,Map<Object tree node,Integer stream index>>
-	 *
-	 *  would be better for you.  The interior maps map a tree node to
-	 *  the index so you don't have to search linearly for a specific node.
-	 *
-	 *  If you change this method, you will likely need to change
-	 *  getNodeIndex(), which extracts information.
-	protected void fillReverseIndex(Object node, int streamIndex) {
-		//System.out.println("revIndex "+node+"@"+streamIndex);
-		if ( tokenTypesToReverseIndex==null ) {
-			return; // no indexing if this is empty (nothing of interest)
-		}
-		if ( tokenTypeToStreamIndexesMap==null ) {
-			tokenTypeToStreamIndexesMap = new HashMap(); // first indexing op
-		}
-		int tokenType = adaptor.getType(node);
-		Integer tokenTypeI = new Integer(tokenType);
-		if ( !(tokenTypesToReverseIndex==INDEX_ALL ||
-			   tokenTypesToReverseIndex.contains(tokenTypeI)) )
-		{
-			return; // tokenType not of interest
-		}
-		Integer streamIndexI = new Integer(streamIndex);
-		ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
-		if ( indexes==null ) {
-			indexes = new ArrayList(); // no list yet for this token type
-			indexes.add(streamIndexI); // not there yet, add
-			tokenTypeToStreamIndexesMap.put(tokenTypeI, indexes);
-		}
-		else {
-			if ( !indexes.contains(streamIndexI) ) {
-				indexes.add(streamIndexI); // not there yet, add
-			}
-		}
-	}
-
-	/** Track the indicated token type in the reverse index.  Call this
-	 *  repeatedly for each type or use variant with Set argument to
-	 *  set all at once.
-	 * @param tokenType
-	public void reverseIndex(int tokenType) {
-		if ( tokenTypesToReverseIndex==null ) {
-			tokenTypesToReverseIndex = new HashSet();
-		}
-		else if ( tokenTypesToReverseIndex==INDEX_ALL ) {
-			return;
-		}
-		tokenTypesToReverseIndex.add(new Integer(tokenType));
-	}
-
-	/** Track the indicated token types in the reverse index. Set
-	 *  to INDEX_ALL to track all token types.
-	public void reverseIndex(Set tokenTypes) {
-		tokenTypesToReverseIndex = tokenTypes;
-	}
-
-	/** Given a node pointer, return its index into the node stream.
-	 *  This is not its Token stream index.  If there is no reverse map
-	 *  from node to stream index or the map does not contain entries
-	 *  for node's token type, a linear search of entire stream is used.
-	 *
-	 *  Return -1 if exact node pointer not in stream.
-	public int getNodeIndex(Object node) {
-		//System.out.println("get "+node);
-		if ( tokenTypeToStreamIndexesMap==null ) {
-			return getNodeIndexLinearly(node);
-		}
-		int tokenType = adaptor.getType(node);
-		Integer tokenTypeI = new Integer(tokenType);
-		ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
-		if ( indexes==null ) {
-			//System.out.println("found linearly; stream index = "+getNodeIndexLinearly(node));
-			return getNodeIndexLinearly(node);
-		}
-		for (int i = 0; i < indexes.size(); i++) {
-			Integer streamIndexI = (Integer)indexes.get(i);
-			Object n = get(streamIndexI.intValue());
-			if ( n==node ) {
-				//System.out.println("found in index; stream index = "+streamIndexI);
-				return streamIndexI.intValue(); // found it!
-			}
-		}
-		return -1;
-	}
-
-	*/
-}
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework.zip b/antlr-3.4/runtime/ObjC/ANTLR.framework.zip
deleted file mode 100644
index 8340758..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework.zip
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/ANTLR b/antlr-3.4/runtime/ObjC/ANTLR.framework/ANTLR
deleted file mode 100755
index 67c1d3a..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/ANTLR
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLR.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLR.h
deleted file mode 100755
index 671e783..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLR.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRTreeException.h>
-
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseMapElement.h
deleted file mode 100644
index b9100ac..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseMapElement.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-//  ANTLRBaseMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-@interface ANTLRBaseMapElement : ANTLRLinkBase {
-    NSNumber *index;
-}
-
-@property (retain, getter=getIndex, setter=setIndex:) NSNumber *index;
-
-+ (id) newANTLRBaseMapElement;
-+ (id) newANTLRBaseMapElementWithIndex:(NSNumber *)anIdx;
-- (id) init;
-- (id) initWithAnIndex:(NSNumber *)anIdx;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSNumber *)getIndex;
-- (void)setIndex:(NSNumber *)anIdx;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseRecognizer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseRecognizer.h
deleted file mode 100755
index 1a922bd..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseRecognizer.h
+++ /dev/null
@@ -1,183 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import <Foundation/Foundation.h>
-
-#import "ANTLRIntStream.h"
-
-// This is an abstract superclass for lexers and parsers.
-
-#define ANTLR_MEMO_RULE_FAILED -2
-#define ANTLR_MEMO_RULE_UNKNOWN -1
-#define ANTLR_INITIAL_FOLLOW_STACK_SIZE 100
-
-#import "ANTLRMapElement.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRToken.h"
-#import "ANTLRRecognizerSharedState.h"
-#import "ANTLRRecognitionException.h"
-#import "ANTLRMissingTokenException.h"
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRMismatchedTreeNodeException.h"
-#import "ANTLRUnwantedTokenException.h"
-#import "ANTLRNoViableAltException.h"
-#import "ANTLREarlyExitException.h"
-#import "ANTLRMismatchedSetException.h"
-#import "ANTLRMismatchedNotSetException.h"
-#import "ANTLRFailedPredicateException.h"
-
-@interface ANTLRBaseRecognizer : NSObject {
-	ANTLRRecognizerSharedState *state;	// the state of this recognizer. Might be shared with other recognizers, e.g. in grammar import scenarios.
-	NSString *grammarFileName;			// where did the grammar come from. filled in by codegeneration
-//    BOOL failed;
-    NSString *sourceName;
-//    NSInteger numberOfSyntaxErrors;
-    NSArray *tokenNames;
-}
-
-@property (retain, getter=getState, setter=setState) ANTLRRecognizerSharedState *state;
-@property (retain, getter=getGrammarFileName, setter=setGrammarFileName) NSString *grammarFileName;
-//@property (assign, getter=getFailed, setter=setFailed) BOOL failed;
-@property (retain, getter=getTokenNames, setter=setTokenNames) NSArray *tokenNames;
-@property (retain, getter=getSourceName, setter=setSourceName) NSString *sourceName;
-//@property (assign, getter=getNumberOfSyntaxErrors, setter=setNumberOfSyntaxErrors) NSInteger numberOfSyntaxErrors;
-
-+ (void) initialize;
-
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizer;
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizerWithRuleLen:(NSInteger)aLen;
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizer:(ANTLRRecognizerSharedState *)aState;
-
-+ (NSArray *)getTokenNames;
-+ (void)setTokenNames:(NSArray *)aTokNamArray;
-+ (void)setGrammarFileName:(NSString *)aFileName;
-
-- (id) init;
-- (id) initWithLen:(NSInteger)aLen;
-- (id) initWithState:(ANTLRRecognizerSharedState *)aState;
-
-- (void) dealloc;
-
-// simple accessors
-- (NSInteger) getBacktrackingLevel;
-- (void) setBacktrackingLevel:(NSInteger) level;
-
-- (BOOL) getFailed;
-- (void) setFailed: (BOOL) flag;
-
-- (ANTLRRecognizerSharedState *) getState;
-- (void) setState:(ANTLRRecognizerSharedState *) theState;
-
-// reset this recognizer - might be extended by codegeneration/grammar
-- (void) reset;
-
-/** Match needs to return the current input symbol, which gets put
- *  into the label for the associated token ref; e.g., x=ID.  Token
- *  and tree parsers need to return different objects. Rather than test
- *  for input stream type or change the IntStream interface, I use
- *  a simple method to ask the recognizer to tell me what the current
- *  input symbol is.
- * 
- *  This is ignored for lexers.
- */
-- (id) getInput;
-
-- (void)skip;
-
-// do actual matching of tokens/characters
-- (id) match:(id<ANTLRIntStream>)anInput TokenType:(NSInteger)ttype Follow:(ANTLRBitSet *)follow;
-- (void) matchAny:(id<ANTLRIntStream>)anInput;
-- (BOOL) mismatchIsUnwantedToken:(id<ANTLRIntStream>)anInput TokenType:(NSInteger) ttype;
-- (BOOL) mismatchIsMissingToken:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)follow;
-
-// error reporting and recovery
-- (void) reportError:(ANTLRRecognitionException *)e;
-- (void) displayRecognitionError:(NSArray *)theTokNams Exception:(ANTLRRecognitionException *)e;
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(NSArray *)theTokNams;
-- (NSInteger) getNumberOfSyntaxErrors;
-- (NSString *)getErrorHeader:(ANTLRRecognitionException *)e;
-- (NSString *)getTokenErrorDisplay:(id<ANTLRToken>)t;
-- (void) emitErrorMessage:(NSString *)msg;
-- (void) recover:(id<ANTLRIntStream>)anInput Exception:(ANTLRRecognitionException *)e;
-
-// begin hooks for debugger
-- (void) beginResync;
-- (void) endResync;
-// end hooks for debugger
-
-// compute the bitsets necessary to do matching and recovery
-- (ANTLRBitSet *)computeErrorRecoverySet;
-- (ANTLRBitSet *)computeContextSensitiveRuleFOLLOW;
-- (ANTLRBitSet *)combineFollows:(BOOL) exact;
-
-- (id<ANTLRToken>) recoverFromMismatchedToken:(id<ANTLRIntStream>)anInput 
-                                    TokenType:(NSInteger)ttype 
-                                       Follow:(ANTLRBitSet *)follow;
-                                    
-- (id<ANTLRToken>)recoverFromMismatchedSet:(id<ANTLRIntStream>)anInput
-                                    Exception:(ANTLRRecognitionException *)e
-                                    Follow:(ANTLRBitSet *)follow;
-
-- (id) getCurrentInputSymbol:(id<ANTLRIntStream>)anInput;
-- (id) getMissingSymbol:(id<ANTLRIntStream>)anInput
-              Exception:(ANTLRRecognitionException *)e
-              TokenType:(NSInteger) expectedTokenType
-                Follow:(ANTLRBitSet *)follow;
-
-// helper methods for recovery. try to resync somewhere
-- (void) consumeUntilTType:(id<ANTLRIntStream>)anInput TokenType:(NSInteger)ttype;
-- (void) consumeUntilFollow:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)bitSet;
-- (void) pushFollow:(ANTLRBitSet *)fset;
-- (ANTLRBitSet *)popFollow;
-
-// to be used by the debugger to do reporting. maybe hook in incremental stuff here, too.
-- (NSMutableArray *) getRuleInvocationStack;
-- (NSMutableArray *) getRuleInvocationStack:(ANTLRRecognitionException *)exception
-					             Recognizer:(NSString *)recognizerClassName;
-
-- (NSArray *) getTokenNames;
-- (NSString *)getGrammarFileName;
-- (NSString *)getSourceName;
-- (NSMutableArray *) toStrings:(NSArray *)tokens;
-// support for memoization
-- (NSInteger) getRuleMemoization:(NSInteger)ruleIndex StartIndex:(NSInteger)ruleStartIndex;
-- (BOOL) alreadyParsedRule:(id<ANTLRIntStream>)anInput RuleIndex:(NSInteger)ruleIndex;
-- (void) memoize:(id<ANTLRIntStream>)anInput
-	     RuleIndex:(NSInteger)ruleIndex
-	    StartIndex:(NSInteger)ruleStartIndex;
-- (NSInteger) getRuleMemoizationCacheSize;
-- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol;
-- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol;
-
-
-// support for syntactic predicates. these are called indirectly to support funky stuff in grammars,
-// like supplying selectors instead of writing code directly into the actions of the grammar.
-- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment;
-// stream:(id<ANTLRIntStream>)anInput;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseStack.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseStack.h
deleted file mode 100644
index 5069031..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseStack.h
+++ /dev/null
@@ -1,66 +0,0 @@
-//
-//  ANTLRBaseRecognizer.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRBaseStack : ANTLRPtrBuffer {
-	//ANTLRRuleStack *fNext;
-    // TStringPool *fPool;
-    NSInteger LastHash;
-}
-
-//@property (copy) ANTLRRuleStack *fNext;
-@property (getter=getLastHash, setter=setLastHash) NSInteger LastHash;
-
-// Contruction/Destruction
-+ (ANTLRBaseStack *)newANTLRBaseStack;
-+ (ANTLRBaseStack *)newANTLRBaseStackWithLen:(NSInteger)cnt;
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSInteger)count;
-- (NSInteger)size;
-/* clear -- reinitialize the maplist array */
-
-- (NSInteger)getLastHash;
-- (void)setLastHash:(NSInteger)aVal;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseTree.h
deleted file mode 100755
index 96513f8..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseTree.h
+++ /dev/null
@@ -1,199 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTree.h"
-
-@protocol ANTLRBaseTree <ANTLRTree>
-
-@property (retain, getter=getChildren, setter=setChildren) NSMutableArray *children;
-
-+ (id<ANTLRBaseTree>) newANTLRBaseTree;
-+ (id<ANTLRBaseTree>) newANTLRBaseTree:(id<ANTLRBaseTree>)node;
-
-- (id<ANTLRBaseTree>) init;
-- (id<ANTLRBaseTree>) initWith:(id<ANTLRTree>)node;
-
-- (id<ANTLRBaseTree>) getChild:(NSUInteger)i;
-- (NSMutableArray *)getChildren;
-- (void) setChildren:(NSMutableArray *)anArray;
-- (id<ANTLRBaseTree>)getFirstChildWithType:(NSInteger)type;
-- (NSUInteger) getChildCount;
-
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChild:(id<ANTLRTree>) tree;
-- (void) addChildren:(NSArray *) theChildren;
-//- (void) removeAllChildren;
-
-- (void) setChild:(NSInteger) i With:(id<ANTLRTree>)t;
-- (id) deleteChild:(NSInteger) i;
-- (NSMutableArray *) createChildrenList;
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-// Indicates the node is a nil node but may still have children, meaning
-// the tree is a flat list.
-
-- (BOOL) isNil;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex:(NSInteger) index;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (void) freshenParentAndChildIndexes;
-- (void) freshenParentAndChildIndexes:(NSInteger) offset;
-- (void) sanityCheckParentAndChildIndexes;
-- (void) sanityCheckParentAndChildIndexes:(id<ANTLRTree>) parent At:(NSInteger) i;
-
-- (NSInteger) getChildIndex;
-- (void) setChildIndex:(NSInteger)i;
-
-- (id<ANTLRTree>)getAncestor:(NSInteger)ttype;
-- (NSMutableArray *)getAncestors;
-
-#pragma mark Copying
-- (id) copyWithZone:(NSZone *)aZone;	// the children themselves are not copied here!
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-#pragma mark Tree Parser support
-- (NSInteger) getType;
-- (NSString *) getText;
-// In case we don't have a token payload, what is the line for errors?
-- (NSInteger) getLine;
-- (NSInteger) getCharPositionInLine;
-
-
-#pragma mark Informational
-- (NSString *) treeDescription;
-- (NSString *) description;
-
-- (NSString *) toString;
-- (NSString *) toStringTree;
-
-@end
-
-@interface ANTLRBaseTree : NSObject <ANTLRTree>
-{
-	NSMutableArray *children;
-    NSException *anException;
-}
-
-@property (retain, getter=getChildren, setter=setChildren) NSMutableArray *children;
-
-+ (id<ANTLRBaseTree>) newANTLRBaseTree;
-+ (id<ANTLRBaseTree>) newANTLRBaseTree:(id<ANTLRBaseTree>)node;
-         
-- (id<ANTLRTree>) init;
-- (id<ANTLRBaseTree>) initWith:(id<ANTLRTree>)node;
-
-- (id<ANTLRBaseTree>) getChild:(NSUInteger)i;
-- (NSMutableArray *)getChildren;
-- (void) setChildren:(NSMutableArray *)anArray;
-- (id<ANTLRBaseTree>)getFirstChildWithType:(NSInteger)type;
-- (NSUInteger) getChildCount;
-
-//- (void) removeAllChildren;
-
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChild:(id<ANTLRTree>) tree;
-- (void) addChildren:(NSArray *) theChildren;
-
-- (void) setChild:(NSInteger) i With:(id<ANTLRTree>)t;
-- (id) deleteChild:(NSInteger) i;
-- (NSMutableArray *) createChildrenList;
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-// Indicates the node is a nil node but may still have children, meaning
-	// the tree is a flat list.
-
-- (BOOL) isNil;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex:(NSInteger) index;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (void) freshenParentAndChildIndexes;
-- (void) freshenParentAndChildIndexes:(NSInteger) offset;
-- (void) sanityCheckParentAndChildIndexes;
-- (void) sanityCheckParentAndChildIndexes:(id<ANTLRTree>) parent At:(NSInteger) i;
-
-- (NSInteger) getChildIndex;
-- (void) setChildIndex:(NSInteger)i;
-
-- (BOOL) hasAncestor:(NSInteger) ttype;
-- (id<ANTLRTree>)getAncestor:(NSInteger)ttype;
-- (NSMutableArray *)getAncestors;
-
-- (id) copyWithZone:(NSZone *)aZone;
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-	// Return a token type; needed for tree parsing
-- (NSInteger) getType;
-- (NSString *) getText;
-
-	// In case we don't have a token payload, what is the line for errors?
-- (NSInteger) getLine;
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger)pos;
-
-- (NSString *) treeDescription;
-- (NSString *) description;
-- (NSString *) toString;
-- (NSString *) toStringTree;
-
-@end
-
-@interface ANTLRTreeNavigationNode : ANTLRBaseTree {
-}
-- (id) copyWithZone:(NSZone *)aZone;
-@end
-
-@interface ANTLRTreeNavigationNodeDown : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeDown *) getNavigationNodeDown;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-@interface ANTLRTreeNavigationNodeUp : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeUp *) getNavigationNodeUp;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-@interface ANTLRTreeNavigationNodeEOF : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeEOF *) getNavigationNodeEOF;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-extern ANTLRTreeNavigationNodeDown *navigationNodeDown;
-extern ANTLRTreeNavigationNodeUp *navigationNodeUp;
-extern ANTLRTreeNavigationNodeEOF *navigationNodeEOF;
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseTreeAdaptor.h
deleted file mode 100644
index b4f8dad..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBaseTreeAdaptor.h
+++ /dev/null
@@ -1,163 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRCommonErrorNode.h"
-#import "ANTLRUniqueIDMap.h"
-
-@interface ANTLRBaseTreeAdaptor : NSObject <ANTLRTreeAdaptor, NSCopying> {
-    ANTLRUniqueIDMap *treeToUniqueIDMap;
-	NSInteger uniqueNodeID;
-}
-
-@property (retain, getter=getTreeToUniqueIDMap, setter=setTreeToUniqueIDMap:) ANTLRUniqueIDMap *treeToUniqueIDMap;
-@property (getter=getUniqueNodeID, setter=setUniqueNodeID:) NSInteger uniqueNodeID;
-
-+ (id<ANTLRTreeAdaptor>) newEmptyTree;
-
-- (id) init;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id) emptyNode;
-
-- (ANTLRUniqueIDMap *)getTreeToUniqueIDMap;
-- (void) setTreeToUniqueIDMap:(ANTLRUniqueIDMap *)aMapNode;
-
-- (NSInteger)getUniqueID;
-- (void) setUniqueNodeID:(NSInteger)aUniqueNodeID;
-
-/** create tree node that holds the start and stop tokens associated
- *  with an error.
- *
- *  If you specify your own kind of tree nodes, you will likely have to
- *  override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
- *  if no token payload but you might have to set token type for diff
- *  node type.
- *
- *  You don't have to subclass CommonErrorNode; you will likely need to
- *  subclass your own tree node class to avoid class cast exception.
- */
-- (id) errorNode:(id<ANTLRTokenStream>)anInput
-            From:(id<ANTLRToken>)startToken
-              To:(id<ANTLRToken>)stopToken
-       Exception:(NSException *) e;
-
-- (BOOL) isNil:(id<ANTLRTree>) aTree;
-
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree;
-/** This is generic in the sense that it will work with any kind of
- *  tree (not just Tree interface).  It invokes the adaptor routines
- *  not the tree node routines to do the construction.  
- */
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree Parent:(id<ANTLRTree>)parent;
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)aNode;
-/** Add a child to the tree t.  If child is a flat tree (a list), make all
- *  in list children of t.  Warning: if t has no children, but child does
- *  and child isNil then you can decide it is ok to move children to t via
- *  t.children = child.children; i.e., without copying the array.  Just
- *  make sure that this is consistent with have the user will build
- *  ASTs.
- */
-- (void) addChild:(id<ANTLRTree>)aChild toTree:(id<ANTLRTree>)aTree;
-
-/** If oldRoot is a nil root, just copy or move the children to newRoot.
- *  If not a nil root, make oldRoot a child of newRoot.
- *
- *    old=^(nil a b c), new=r yields ^(r a b c)
- *    old=^(a b c), new=r yields ^(r ^(a b c))
- *
- *  If newRoot is a nil-rooted single child tree, use the single
- *  child as the new root node.
- *
- *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
- *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
- *
- *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
- *
- *    old=null, new=r yields r
- *    old=null, new=^(nil r) yields ^(nil r)
- *
- *  Return newRoot.  Throw an exception if newRoot is not a
- *  simple node or nil root with a single child node--it must be a root
- *  node.  If newRoot is ^(nil x) return x as newRoot.
- *
- *  Be advised that it's ok for newRoot to point at oldRoot's
- *  children; i.e., you don't have to copy the list.  We are
- *  constructing these nodes so we should have this control for
- *  efficiency.
- */
-- (id<ANTLRTree>)becomeRoot:(id<ANTLRTree>)aNewRoot old:(id<ANTLRTree>)oldRoot;
-
-/** Transform ^(nil x) to x and nil to null */
-- (id<ANTLRTree>)rulePostProcessing:(id<ANTLRTree>)aRoot;
-
-- (id<ANTLRTree>)becomeRootfromToken:(id<ANTLRToken>)aNewRoot old:(id<ANTLRTree>)oldRoot;
-
-- (id<ANTLRTree>)createTree:(NSInteger)aTType With:(id<ANTLRToken>)aFromToken;
-
-- (id<ANTLRTree>)createTree:(NSInteger)aTType FromToken:(id<ANTLRToken>)aFromToken Text:(NSString *)theText;
-
-- (id<ANTLRTree>)createTree:(NSInteger)aTType Text:(NSString *)theText;
-
-- (NSInteger) getType:(id<ANTLRTree>)aTree;
-
-- (void) setType:(id<ANTLRTree>)aTree Type:(NSInteger)type;
-
-- (NSString *)getText:(id<ANTLRTree>)aTree;
-
-- (void) setText:(id<ANTLRTree>)aTree Text:(NSString *)theText;
-
-- (id<ANTLRTree>) getChild:(id<ANTLRTree>)aTree At:(NSInteger)i;
-
-- (void) setChild:(id<ANTLRTree>)aTree At:(NSInteger)index Child:(id<ANTLRTree>)aChild;
-
-- (id<ANTLRTree>) deleteChild:(id<ANTLRTree>)aTree Index:(NSInteger)index;
-
-- (NSInteger) getChildCount:(id<ANTLRTree>)aTree;
-
-- (NSInteger) getUniqueID:(id<ANTLRTree>)node;
-
-/** Tell me how to create a token for use with imaginary token nodes.
- *  For example, there is probably no input symbol associated with imaginary
- *  token DECL, but you need to create it as a payload or whatever for
- *  the DECL node as in ^(DECL type ID).
- *
- *  This is a variant of createToken where the new token is derived from
- *  an actual real input token.  Typically this is for converting '{'
- *  tokens to BLOCK etc...  You'll see
- *
- *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
- *
- *  If you care what the token payload objects' type is, you should
- *  override this method and any other createToken variant.
- */
-- (id<ANTLRToken>)createToken:(NSInteger)aTType Text:(NSString *)theText;
-
-- (id<ANTLRToken>)createToken:(id<ANTLRToken>)aFromToken;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBitSet.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBitSet.h
deleted file mode 100755
index a1be117..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBitSet.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import <CoreFoundation/CoreFoundation.h>
-#import "ANTLRToken.h"
-
-#define BITS (sizeof(NSUInteger) * 8)
-#define LOG_BITS ((sizeof(NSUInteger)==8)?6:5)
-
-// A simple wrapper around CoreFoundation bit vectors to shield the rest of the implementation
-// from the specifics of the BitVector initialization and query functions.
-// This is fast, so there is no need to reinvent the wheel just yet.
-
-@interface ANTLRBitSet : NSObject < NSMutableCopying > {
-	CFMutableBitVectorRef bitVector;
-}
-
-#pragma mark Class Methods
-
-+ (ANTLRBitSet *) newANTLRBitSet;
-+ (ANTLRBitSet *) newANTLRBitSetWithType:(ANTLRTokenType)type;
-/** Construct a ANTLRBitSet given the size
- * @param nbits The size of the ANTLRBitSet in bits
- */
-+ (ANTLRBitSet *) newANTLRBitSetWithNBits:(NSUInteger)nbits;
-+ (ANTLRBitSet *) newANTLRBitSetWithArray:(NSMutableArray *)types;
-+ (ANTLRBitSet *) newANTLRBitSetWithBits:(const unsigned long long *)theBits Count:(NSUInteger)longCount;
-
-+ (ANTLRBitSet *) of:(NSUInteger)el;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c And4:(NSUInteger)d;
-
-#pragma mark Initializer
-
-- (ANTLRBitSet *) init;
-- (ANTLRBitSet *) initWithType:(ANTLRTokenType)type;
-- (ANTLRBitSet *) initWithNBits:(NSUInteger)nbits;
-- (ANTLRBitSet *) initWithBitVector:(CFMutableBitVectorRef)theBitVector;
-- (ANTLRBitSet *) initWithBits:(const unsigned long long const*)theBits Count:(NSUInteger)theCount;
-- (ANTLRBitSet *) initWithArrayOfBits:(NSArray *)theArray;
-
-#pragma mark Operations
-- (ANTLRBitSet *) or:(ANTLRBitSet *) aBitSet;
-- (void) orInPlace:(ANTLRBitSet *) aBitSet;
-- (void) add:(NSUInteger) bit;
-- (void) remove:(NSUInteger) bit;
-- (void) setAllBits:(BOOL) aState;
-
-- (NSInteger) numBits;
-- (NSUInteger) size;
-- (void) setSize:(NSUInteger) noOfWords;
-
-#pragma mark Informational
-- (unsigned long long) bitMask:(NSUInteger) bitNumber;
-- (BOOL) member:(NSUInteger)bitNumber;
-- (BOOL) isNil;
-- (NSString *) toString;
-- (NSString *) description;
-
-#pragma mark NSCopying support
-
-- (id) mutableCopyWithZone:(NSZone *) theZone;
-
-
-//private
-- (CFMutableBitVectorRef) _bitVector;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBufferedTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBufferedTokenStream.h
deleted file mode 100644
index 198a6f7..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBufferedTokenStream.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenStream.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRBitSet.h"
-
-@interface ANTLRBufferedTokenStream : NSObject <ANTLRTokenStream> 
-{
-id<ANTLRTokenSource> tokenSource;
-    
-    /** Record every single token pulled from the source so we can reproduce
-     *  chunks of it later.  The buffer in LookaheadStream overlaps sometimes
-     *  as its moving window moves through the input.  This list captures
-     *  everything so we can access complete input text.
-     */
-NSMutableArray *tokens;
-    
-    /** Track the last mark() call result value for use in rewind(). */
-NSInteger lastMarker;
-    
-    /** The index into the tokens list of the current token (next token
-     *  to consume).  tokens[p] should be LT(1).  p=-1 indicates need
-     *  to initialize with first token.  The ctor doesn't get a token.
-     *  First call to LT(1) or whatever gets the first token and sets p=0;
-     */
-NSInteger p;
-    
-NSInteger range; // how deep have we gone?
-    
-}
-@property (retain, getter=getTokenSource,setter=setTokenSource) id<ANTLRTokenSource> tokenSource;
-@property (retain, getter=getTokens,setter=setTokens) NSMutableArray *tokens;
-@property (assign, getter=getLastMarker,setter=setLastMarker) NSInteger lastMarker;
-@property (assign, getter=getIndex,setter=setIndex) NSInteger p;
-@property (assign, getter=getRange,setter=setRange) NSInteger range;
-
-+ (ANTLRBufferedTokenStream *) newANTLRBufferedTokenStream;
-+ (ANTLRBufferedTokenStream *) newANTLRBufferedTokenStreamWith:(id<ANTLRTokenSource>)aSource;
-- (id) initWithSource:(id<ANTLRTokenSource>)aSource;
-- (id) copyWithZone:(NSZone *)aZone;
-- (NSInteger) getIndex;
-- (void) setIndex:(NSInteger)index;
-- (NSInteger) getRange;
-- (void) setRange:(NSInteger)anInt;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) reset;
-- (void) seek:(NSInteger) index;
-- (NSInteger) size;
-- (void) consume;
-- (void) sync:(NSInteger) i;
-- (void) fetch:(NSInteger) n;
-- (id<ANTLRToken>) getToken:(NSInteger) i;
-- (NSMutableArray *)getFrom:(NSInteger)startIndex To:(NSInteger) stopIndex;
-- (NSInteger) LA:(NSInteger)k;
-- (id<ANTLRToken>) LT:(NSInteger) k;
-- (id<ANTLRToken>) LB:(NSInteger) k;
-- (void) setup;
-- (id<ANTLRTokenSource>) getTokenSource;
-- (void) setTokenSource:(id<ANTLRTokenSource>) aTokenSource;
-- (NSMutableArray *)getTokens;
-- (NSString *) getSourceName;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex With:(ANTLRBitSet *)types;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithList:(NSMutableArray *)types;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithType:(NSInteger)ttype;
-- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startIndex ToToken:(id<ANTLRToken>)stopIndex;
-- (void) fill;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBufferedTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBufferedTreeNodeStream.h
deleted file mode 100644
index 8618ea2..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRBufferedTreeNodeStream.h
+++ /dev/null
@@ -1,156 +0,0 @@
-//
-//  ANTLRBufferedTreeNodeStream.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRCommonTreeNodeStream.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRTreeIterator.h"
-#import "ANTLRIntArray.h"
-
-#define DEFAULT_INITIAL_BUFFER_SIZE 100
-#define INITIAL_CALL_STACK_SIZE 10
-
-#ifdef DONTUSENOMO
-@interface ANTLRStreamIterator : ANTLRTreeIterator
-{
-    NSInteger idx;
-    ANTLRBufferedTreeNodeStream input;
-    NSMutableArray *nodes;
-}
-
-+ (id) newANTLRStreamIterator:(ANTLRBufferedTreeNodeStream *) theStream;
-
-- (id) initWithStream:(ANTLRBufferedTreeNodeStream *) theStream;
-
-- (BOOL) hasNext;
-- (id) next;
-- (void) remove;
-@end
-#endif
-
-@interface ANTLRBufferedTreeNodeStream : NSObject <ANTLRTreeNodeStream> 
-{
-	id<ANTLRTree> up;
-	id<ANTLRTree> down;
-	id<ANTLRTree> eof;
-	
-	NSMutableArray *nodes;
-	
-	id<ANTLRTree> root; // root
-	
-	id<ANTLRTokenStream> tokens;
-	ANTLRCommonTreeAdaptor *adaptor;
-	
-	BOOL uniqueNavigationNodes;
-	NSInteger p;
-	NSInteger lastMarker;
-	ANTLRIntArray *calls;
-	
-	NSEnumerator *e;
-    id currentSymbol;
-	
-}
-
-@property (retain, getter=getUp, setter=setUp:) id<ANTLRTree> up;
-@property (retain, getter=getDown, setter=setDown:) id<ANTLRTree> down;
-@property (retain, getter=getEof, setter=setEof:) id<ANTLRTree> eof;
-@property (retain, getter=getNodes, setter=setNodes:) NSMutableArray *nodes;
-@property (retain, getter=getTreeSource, setter=setTreeSource:) id<ANTLRTree> root;
-@property (retain, getter=getTokenStream, setter=setTokenStream:) id<ANTLRTokenStream> tokens;
-@property (retain, getter=getAdaptor, setter=setAdaptor:) ANTLRCommonTreeAdaptor *adaptor;
-@property (assign, getter=getUniqueNavigationNodes, setter=setUniqueNavigationNodes:) BOOL uniqueNavigationNodes;
-@property (assign, getter=getIndex, setter=setIndex:) NSInteger p;
-@property (assign, getter=getLastMarker, setter=setLastMarker:) NSInteger lastMarker;
-@property (retain, getter=getCalls, setter=setCalls:) ANTLRIntArray *calls;
-@property (retain, getter=getEnum, setter=setEnum:) NSEnumerator *e;
-@property (retain, getter=getCurrentSymbol, setter=setCurrentSymbol:) id currentSymbol;
-
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTree>)tree;
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTreeAdaptor>)adaptor Tree:(id<ANTLRTree>)tree;
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTreeAdaptor>)adaptor Tree:(id<ANTLRTree>)tree withBufferSize:(NSInteger)initialBufferSize;
-
-#pragma mark Constructor
-- (id) initWithTree:(id<ANTLRTree>)tree;
-- (id) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)anAdaptor Tree:(id<ANTLRTree>)tree;
-- (id) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)anAdaptor Tree:(id<ANTLRTree>)tree WithBufferSize:(NSInteger)bufferSize;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-// protected methods. DO NOT USE
-#pragma mark Protected Methods
-- (void) fillBuffer;
-- (void) fillBufferWithTree:(id<ANTLRTree>) tree;
-- (NSInteger) getNodeIndex:(id<ANTLRTree>) node;
-- (void) addNavigationNode:(NSInteger) type;
-- (id) getNode:(NSInteger) i;
-- (id) LT:(NSInteger) k;
-- (id) getCurrentSymbol;
-- (id) LB:(NSInteger) i;
-#pragma mark General Methods
-- (NSString *) getSourceName;
-
-- (id<ANTLRTokenStream>) getTokenStream;
-- (void) setTokenStream:(id<ANTLRTokenStream>) tokens;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>) anAdaptor;
-
-- (BOOL)getUniqueNavigationNodes;
-- (void) setUniqueNavigationNodes:(BOOL)aVal;
-
-- (void) consume;
-- (NSInteger) LA:(NSInteger) i;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (NSInteger) getIndex;
-- (void) setIndex:(NSInteger) idx;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) seek:(NSInteger) idx;
-
-- (void) push:(NSInteger) i;
-- (NSInteger) pop;
-
-- (void) reset;
-- (NSUInteger) count;
-- (NSEnumerator *) objectEnumerator;
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-
-- (NSString *) toTokenTypeString;
-- (NSString *) toTokenString:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *) toStringFromNode:(id)aStart ToNode:(id)aStop;
-
-// getters and setters
-- (NSMutableArray *) getNodes;
-- (id<ANTLRTree>) getEof;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCharStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCharStream.h
deleted file mode 100755
index 379734b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCharStream.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRIntStream.h"
-
-#define	ANTLRCharStreamEOF -1
-
-
-@protocol ANTLRCharStream < ANTLRIntStream >
-
-- (NSString *) substringWithRange:(NSRange) theRange;
-
-/** Get the ith character of lookahead.  This is the same usually as
- *  LA(i).  This will be used for labels in the generated
- *  lexer code.  I'd prefer to return a char here type-wise, but it's
- *  probably better to be 32-bit clean and be consistent with LA.
- */
-- (NSInteger)LT:(NSInteger) i;
-
-// ANTLR tracks the line information automatically
-- (NSInteger) getLine;
-
-// Because this stream can rewind, we need to be able to reset the line
-- (void) setLine:(NSInteger) theLine;
-
-// The index of the character relative to the beginning of the line 0..n-1
-- (NSInteger) getCharPositionInLine;
-
-- (void) setCharPositionInLine:(NSInteger) thePos;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCharStreamState.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCharStreamState.h
deleted file mode 100644
index 2787c76..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCharStreamState.h
+++ /dev/null
@@ -1,58 +0,0 @@
-//
-//  ANTLRCharStreamState.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c)  2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRCharStreamState : NSObject
-{
-NSInteger p;
-NSInteger line;
-NSInteger charPositionInLine;
-}
-
-@property (getter=getP,setter=setP:) NSInteger p;
-@property (getter=getLine,setter=setLine:) NSInteger line;
-@property (getter=getCharPositionInLine,setter=setCharPositionInLine:) NSInteger charPositionInLine;
-
-+ newANTLRCharStreamState;
-
-- (id) init;
-
-- (NSInteger) getP;
-- (void) setP: (NSInteger) anIndex;
-
-- (NSInteger) getLine;
-- (void) setLine: (NSInteger) aLine;
-
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger)aCharPositionInLine;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonErrorNode.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonErrorNode.h
deleted file mode 100644
index 79badc1..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonErrorNode.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-//  ANTLRCommonErrorNode.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-#import "ANTLRTokenStream.h"
-//#import "ANTLRIntStream.h"
-//#import "ANTLRToken.h"
-#import "ANTLRUnWantedTokenException.h"
-
-@interface ANTLRCommonErrorNode : ANTLRCommonTree
-{
-id<ANTLRIntStream> input;
-id<ANTLRToken> startToken;
-id<ANTLRToken> stopToken;
-ANTLRRecognitionException *trappedException;
-}
-
-+ (id) newANTLRCommonErrorNode:(id<ANTLRTokenStream>)anInput
-                  From:(id<ANTLRToken>)startToken
-                    To:(id<ANTLRToken>)stopToken
-                     Exception:(ANTLRRecognitionException *) e;
-
-- (id) initWithInput:(id<ANTLRTokenStream>)anInput
-                From:(id<ANTLRToken>)startToken
-                  To:(id<ANTLRToken>)stopToken
-           Exception:(ANTLRRecognitionException *) e;
-- (BOOL) isNil;
-
-- (NSInteger) getType;
-
-- (NSString *) getText;
-
-- (NSString *) toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonToken.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonToken.h
deleted file mode 100755
index 8662378..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonToken.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRCharStream.h"
-
-@interface ANTLRCommonToken : NSObject < ANTLRToken > {
-	NSString *text;
-	NSInteger type;
-	// information about the Token's position in the input stream
-	NSUInteger line;
-	NSUInteger charPositionInLine;
-	NSUInteger channel;
-	// this token's position in the TokenStream
-	NSUInteger index;
-	
-	// indices into the CharStream to avoid copying the text
-	// can manually override the text by using -setText:
-	NSUInteger startIndex;
-	NSUInteger stopIndex;
-	// the actual input stream this token was found in
-	id<ANTLRCharStream> input;
-}
-
-@property (retain, getter=getText, setter=setText:) NSString *text;
-@property (assign, getter=getType, setter=setType:) NSInteger type;
-@property (assign, getter=getLine, setter=setLine:) NSUInteger line;
-@property (assign, getter=getCharPositionInLine, setter=setCharPositionInLine:) NSUInteger charPositionInLine;
-@property (assign, getter=getChannel, setter=setChannel:) NSUInteger channel;
-@property (assign, getter=getTokenIndex, setter=setTokenIndex:) NSUInteger index;
-@property (assign, getter=getStart, setter=setStart:) NSUInteger startIndex;
-@property (assign, getter=getStop, setter=setStop:) NSUInteger stopIndex;
-@property (retain, getter=getInput, setter=setInput:) id<ANTLRCharStream> input;
-
-+ (void) initialize;
-+ (ANTLRCommonToken *) newANTLRCommonToken;
-+ (ANTLRCommonToken *) newANTLRCommonToken:(id<ANTLRCharStream>)anInput
-                                      Type:(NSInteger)aTType
-                                   Channel:(NSInteger)aChannel
-                                     Start:(NSInteger)aStart
-                                      Stop:(NSInteger)aStop;
-+ (ANTLRCommonToken *) newANTLRCommonToken:(ANTLRTokenType)aType;
-+ (id<ANTLRToken>) newANTLRCommonToken:(NSInteger)tokenType Text:(NSString *)tokenText;
-+ (id<ANTLRToken>) newANTLRCommonTokenWithToken:(id<ANTLRToken>)fromToken;
-+ (id<ANTLRToken>) eofToken;
-+ (id<ANTLRToken>) skipToken;
-+ (id<ANTLRToken>) invalidToken;
-+ (ANTLRTokenChannel) defaultChannel;
-
-// designated initializer. This is used as the default way to initialize a Token in the generated code.
-- (ANTLRCommonToken *) init;
-- (ANTLRCommonToken *) initWithInput:(id<ANTLRCharStream>)anInput
-                                Type:(NSInteger)aTType
-                             Channel:(NSInteger)aChannel
-                               Start:(NSInteger)theStart
-                                Stop:(NSInteger)theStop;
-- (ANTLRCommonToken *) initWithToken:(ANTLRCommonToken *)aToken;
-- (ANTLRCommonToken *) initWithType:(ANTLRTokenType)aType;
-- (ANTLRCommonToken *) initWithType:(ANTLRTokenType)aTType Text:(NSString *)tokenText;
-
-- (id<ANTLRCharStream>) getInput;
-- (void) setInput: (id<ANTLRCharStream>) anInput;
-
-- (NSUInteger) getStart;
-- (void) setStart: (NSUInteger) aStart;
-
-- (NSUInteger) getStop;
-- (void) setStop: (NSUInteger) aStop;
-
-// the index of this Token into the TokenStream
-- (NSUInteger) getTokenIndex;
-- (void) setTokenIndex: (NSUInteger) aTokenIndex;
-
-// conform to NSCopying
-- (id) copyWithZone:(NSZone *)theZone;
-
-- (NSString *) description;
-- (NSString *) toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTokenStream.h
deleted file mode 100755
index 59f9d5e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTokenStream.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenStream.h"
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRBufferedTokenStream.h"
-
-@interface ANTLRCommonTokenStream : ANTLRBufferedTokenStream < ANTLRTokenStream >
-{
-	NSMutableDictionary *channelOverride;
-	NSInteger channel;
-}
-
-@property (retain, getter=getChannelOverride,setter=setChannelOverride) NSMutableDictionary *channelOverride;
-@property (assign, getter=getChannel,setter=setChannel) NSInteger channel;
-
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStream;
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStreamWithTokenSource:(id<ANTLRTokenSource>)theTokenSource;
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStreamWithTokenSource:(id<ANTLRTokenSource>)theTokenSource
-                                                               Channel:(NSInteger)aChannel;
-
-- (id) init;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource Channel:(NSInteger)aChannel;
-
-- (id<ANTLRTokenSource>) getTokenSource;
-- (void) setTokenSource: (id<ANTLRTokenSource>) aTokenSource;
-
-- (void) consume;
-- (id<ANTLRToken>) LT:(NSInteger)k;
-- (id<ANTLRToken>) LB:(NSInteger)k;
-
-- (NSInteger) skipOffChannelTokens:(NSInteger) i;
-- (NSInteger) skipOffChannelTokensReverse:(NSInteger) i;
-
-- (void)setup;
-
-- (NSArray *) tokensInRange:(NSRange)aRange;
-- (NSArray *) tokensInRange:(NSRange)aRange inBitSet:(ANTLRBitSet *)aBitSet;
-- (NSArray *) tokensInRange:(NSRange)aRange withTypes:(NSArray *)tokenTypes;
-- (NSArray *) tokensInRange:(NSRange)aRange withType:(NSInteger)tokenType;
-
-- (id<ANTLRToken>) getToken:(NSInteger)i;
-
-- (NSInteger) size;
-- (NSInteger) getIndex;
-- (void) rewind;
-- (void) rewind:(NSInteger)marker;
-- (void) seek:(NSInteger)index;
-
-- (NSString *) toString;
-- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSInteger)getChannel;
-- (void)setChannel:(NSInteger)aChannel;
-
-- (NSMutableDictionary *)getChannelOverride;
-- (void)setChannelOverride:(NSMutableDictionary *)anOverride;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTree.h
deleted file mode 100755
index 0966051..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTree.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonToken.h"
-#import "ANTLRBaseTree.h"
-
-@interface ANTLRCommonTree : ANTLRBaseTree <ANTLRTree> {
-	ANTLRCommonToken *token;
-	NSInteger startIndex;
-	NSInteger stopIndex;
-    ANTLRCommonTree *parent;
-    NSInteger childIndex;
-}
-
-@property (retain, getter=getANTLRCommonToken, setter=setANTLRCommonToken) ANTLRCommonToken *token;
-@property (assign, getter=getTokenStartIndex, setter=setTokenStartIndex) NSInteger startIndex;
-@property (assign, getter=getTokenStopIndex, setter=setTokenStopIndex) NSInteger stopIndex;
-@property (retain, getter=getParent, setter=setParent:) ANTLRCommonTree *parent;
-@property (assign, getter=getChildIndex, setter=setChildIndex) NSInteger childIndex;
-
-+ (ANTLRCommonTree *) invalidNode;
-+ (ANTLRCommonTree *) newANTLRCommonTree;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithTree:(ANTLRCommonTree *)aTree;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithToken:(ANTLRCommonToken *)aToken;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithTokenType:(NSInteger)tokenType;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithTokenType:(NSInteger)aTType Text:(NSString *)theText;
-#ifdef DONTUSEYET
-+ (id<ANTLRTree>) newANTLRCommonTreeWithTokenType:(NSInteger)tokenType;
-+ (id<ANTLRTree>) newANTLRCommonTreeWithToken:(id<ANTLRToken>)fromToken TokenType:(NSInteger)tokenType;
-+ (id<ANTLRTree>) newANTLRCommonTreeWithToken:(id<ANTLRToken>)fromToken TokenType:(NSInteger)tokenType Text:(NSString *)tokenText;
-+ (id<ANTLRTree>) newANTLRCommonTreeWithToken:(id<ANTLRToken>)fromToken Text:(NSString *)tokenText;
-#endif
-
-- (id) init;
-- (id) initWithTreeNode:(ANTLRCommonTree *)aNode;
-- (id) initWithToken:(ANTLRCommonToken *)aToken;
-- (id) initWithTokenType:(NSInteger)aTokenType;
-- (id) initWithTokenType:(NSInteger)aTokenType Text:(NSString *)theText;
-
-- (id<ANTLRTree>) copyWithZone:(NSZone *)aZone;
-
-- (BOOL) isNil;
-
-- (ANTLRCommonToken *) getToken;
-- (void) setToken:(ANTLRCommonToken *)aToken;
-- (id<ANTLRTree>) dupNode;
-- (NSInteger) getType;
-- (NSString *) getText;
-- (NSUInteger) getLine;
-- (NSUInteger) getCharPositionInLine;
-- (ANTLRCommonTree *) getParent;
-- (void) setParent:(ANTLRCommonTree *) t;
-
-#ifdef DONTUSENOMO
-- (NSString *) treeDescription;
-#endif
-- (NSString *) description;
-- (void) setUnknownTokenBoundaries;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex: (NSInteger) aStartIndex;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex: (NSInteger) aStopIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTreeAdaptor.h
deleted file mode 100755
index 53287e6..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTreeAdaptor.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRBaseTreeAdaptor.h"
-
-@interface ANTLRCommonTreeAdaptor : ANTLRBaseTreeAdaptor {
-}
-
-+ (id<ANTLRTree>) newEmptyTree;
-+ (ANTLRCommonTreeAdaptor *)newANTLRCommonTreeAdaptor;
-- (id) init;
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)t;    
-- (ANTLRCommonTree *)createTree:(ANTLRCommonToken *)aToken;
-- (ANTLRCommonTree *)createTree:(NSInteger)tokenType Text:(NSString *)text;
-- (id<ANTLRToken>)createToken:(NSInteger)tokenType Text:(NSString *)text;
-- (void) setTokenBoundaries:(id<ANTLRTree>)t From:(id<ANTLRToken>)startToken To:(id<ANTLRToken>)stopToken;
-- (NSInteger)getTokenStartIndex:(id<ANTLRTree>)t;
-- (NSInteger)getTokenStopIndex:(id<ANTLRTree>)t;
-- (NSString *)getText:(id<ANTLRTree>)t;
-- (void)setText:(id<ANTLRTree>)t Text:(NSString *)text;
-- (NSInteger)getType:(id<ANTLRTree>)t;
-- (void) setType:(id<ANTLRTree>)t Type:(NSInteger)tokenType;
-- (id<ANTLRToken>)getToken:(id<ANTLRTree>)t;
-- (id<ANTLRTree>)getChild:(id<ANTLRTree>)t At:(NSInteger)i;
-- (void) setChild:(id<ANTLRTree>)t At:(NSInteger)i Child:(id<ANTLRTree>)child;
-- (NSInteger)getChildCount:(id<ANTLRTree>)t;
-- (id<ANTLRTree>)getParent:(id<ANTLRTree>)t;
-- (void)setParent:(id<ANTLRTree>)t With:(id<ANTLRTree>)parent;
-- (NSInteger)getChildIndex:(id<ANTLRTree>)t;
-- (void)setChildIndex:(id<ANTLRTree>)t With:(NSInteger)index;
-- (void)replaceChildren:(id<ANTLRTree>)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id<ANTLRTree>)t;
-- (id)copyWithZone:(NSZone *)zone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTreeNodeStream.h
deleted file mode 100755
index 4c68f2e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRCommonTreeNodeStream.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-#import "ANTLRCommonTreeNodeStream.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRTreeNodeStream.h"
-#import "ANTLRTreeIterator.h"
-#import "ANTLRIntArray.h"
-
-@interface ANTLRCommonTreeNodeStream : ANTLRLookaheadStream <ANTLRTreeNodeStream> {
-#define DEFAULT_INITIAL_BUFFER_SIZE 100
-#define INITIAL_CALL_STACK_SIZE 10
-    
-/** Pull nodes from which tree? */
-id root;
-    
-/** If this tree (root) was created from a token stream, track it. */
-id <ANTLRTokenStream> tokens;
-    
-	/** What tree adaptor was used to build these trees */
-ANTLRCommonTreeAdaptor *adaptor;
-    
-/** The tree iterator we using */
-ANTLRTreeIterator *it;
-    
-/** Stack of indexes used for push/pop calls */
-ANTLRIntArray *calls;    
-    
-/** Tree (nil A B C) trees like flat A B C streams */
-BOOL hasNilRoot;
-    
-/** Tracks tree depth.  Level=0 means we're at root node level. */
-NSInteger level;
-}
-@property (retain, getter=getRoot, setter=setRoot:) ANTLRCommonTree *root;
-@property (retain, getter=getTokens,setter=setTokens:) id<ANTLRTokenStream> tokens;
-@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) ANTLRCommonTreeAdaptor *adaptor;
-
-+ (ANTLRCommonTreeNodeStream *) newANTLRCommonTreeNodeStream:(ANTLRCommonTree *)theTree;
-+ (ANTLRCommonTreeNodeStream *) newANTLRCommonTreeNodeStream:(id<ANTLRTreeAdaptor>)anAdaptor Tree:(ANTLRCommonTree *)theTree;
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)adaptor Tree:(ANTLRCommonTree *)theTree;
-    
-- (void) reset;
-    
-    /** Pull elements from tree iterator.  Track tree level 0..max_level.
-     *  If nil rooted tree, don't give initial nil and DOWN nor final UP.
-     */
-- (id) nextElement;
-    
-- (BOOL) isEOF:(id<ANTLRTree>) o;
-- (void) setUniqueNavigationNodes:(BOOL) uniqueNavigationNodes;
-    
-- (id) getTreeSource;
-    
-- (NSString *) getSourceName;
-    
-- (id<ANTLRTokenStream>) getTokenStream;
-    
-- (void) setTokenStream:(id<ANTLRTokenStream>) tokens;
-    
-- (ANTLRCommonTreeAdaptor *) getTreeAdaptor;
-    
-- (void) setTreeAdaptor:(ANTLRCommonTreeAdaptor *) adaptor;
-    
-- (NSInteger) LA:(NSInteger) i;
-    
-    /** Make stream jump to a new location, saving old location.
-     *  Switch back with pop().
-     */
-- (ANTLRCommonTree *)getNode:(NSInteger) i;
-
-- (void) push:(NSInteger) index;
-    
-    /** Seek back to previous index saved during last push() call.
-     *  Return top of stack (return index).
-     */
-- (NSInteger) pop;
-    
-// TREE REWRITE INTERFACE
-    
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-    
-- (NSString *) toStringFromNode:(id<ANTLRTree>)startNode ToNode:(id<ANTLRTree>)stopNode;
-
-/** For debugging; destructive: moves tree iterator to end. */
-- (NSString *) toTokenTypeString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDFA.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDFA.h
deleted file mode 100755
index 9094a3d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDFA.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRNoViableAltException.h"
-
-@interface ANTLRDFA : NSObject {
-	// the tables are set by subclasses to their own static versions.
-	const int *eot;
-	const int *eof;
-	const unichar *min;
-	const unichar *max;
-	const int *accept;
-	const int *special;
-	const int **transition;
-	
-	ANTLRBaseRecognizer *recognizer;
-	NSInteger decisionNumber;
-    NSInteger len;
-}
-
-@property (retain, getter=getRecognizer,setter=setRecognizer:) ANTLRBaseRecognizer *recognizer;
-@property (assign, getter=getDecision,setter=setDecision:) NSInteger decisionNumber;
-@property (assign, getter=getLen,setter=setLen:) NSInteger len;
-
-- (id) initWithRecognizer:(id) theRecognizer;
-// simulate the DFA using the static tables and predict an alternative
-- (NSInteger) predict:(id<ANTLRCharStream>)anInput;
-- (void) noViableAlt:(NSInteger)state Stream:(id<ANTLRIntStream>)anInput;
-
-- (NSInteger) specialStateTransition:(NSInteger)state Stream:(id<ANTLRIntStream>)anInput;
-// - (NSInteger) specialStateTransition:(NSInteger) state;
-//- (unichar) specialTransition:(unichar) state symbol:(NSInteger) symbol;
-
-// hook for debugger support
-- (void) error:(ANTLRNoViableAltException *)nvae;
-
-- (NSString *) description;
-- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment;
-
-+ (void) setIsEmittingDebugInfo:(BOOL) shouldEmitDebugInfo;
-
-- (NSInteger)getDecision;
-- (void)setDecision:(NSInteger)aDecison;
-
-- (ANTLRBaseRecognizer *)getRecognizer;
-- (void)setRecognizer:(ANTLRBaseRecognizer *)aRecognizer;
-- (NSInteger)length;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebug.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebug.h
deleted file mode 100755
index 87383c9..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebug.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRDebugEventListener.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugParser.h"
-#import "ANTLRDebugTokenStream.h"
-#import "ANTLRDebugTreeParser.h"
-#import "ANTLRDebugTreeNodeStream.h"
-#import "ANTLRDebugTreeAdaptor.h"
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugEventListener.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugEventListener.h
deleted file mode 100755
index c2bee6c..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugEventListener.h
+++ /dev/null
@@ -1,275 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRDebugEventListener 
-
-#define ANTLRDebugProtocolVersion 1
-
-/** The parser has just entered a rule.  No decision has been made about
-*  which alt is predicted.  This is fired AFTER init actions have been
-*  executed.  Attributes are defined and available etc...
-*/
-- (void) enterRule:(NSString *)ruleName;
-
-/** Because rules can have lots of alternatives, it is very useful to
-*  know which alt you are entering.  This is 1..n for n alts.
-*/
-- (void) enterAlt:(NSInteger)alt;
-
-/** This is the last thing executed before leaving a rule.  It is
-*  executed even if an exception is thrown.  This is triggered after
-*  error reporting and recovery have occurred (unless the exception is
-											   *  not caught in this rule).  This implies an "exitAlt" event.
-*/
-- (void) exitRule:(NSString *)ruleName;
-
-/** Track entry into any (...) subrule other EBNF construct */
-- (void) enterSubRule:(NSInteger)decisionNumber;
-
-- (void) exitSubRule:(NSInteger)decisionNumber;
-
-/** Every decision, fixed k or arbitrary, has an enter/exit event
-*  so that a GUI can easily track what LT/consume events are
-*  associated with prediction.  You will see a single enter/exit
-*  subrule but multiple enter/exit decision events, one for each
-*  loop iteration.
-*/
-- (void) enterDecision:(NSInteger)decisionNumber;
-
-- (void) exitDecision:(NSInteger)decisionNumber;
-
-/** An input token was consumed; matched by any kind of element.
-*  Trigger after the token was matched by things like match(), matchAny().
-*/
-- (void) consumeToken:(id<ANTLRToken>)t;
-
-/** An off-channel input token was consumed.
-*  Trigger after the token was matched by things like match(), matchAny().
-*  (unless of course the hidden token is first stuff in the input stream).
-*/
-- (void) consumeHiddenToken:(id<ANTLRToken>)t;
-
-/** Somebody (anybody) looked ahead.  Note that this actually gets
-*  triggered by both LA and LT calls.  The debugger will want to know
-*  which Token object was examined.  Like consumeToken, this indicates
-*  what token was seen at that depth.  A remote debugger cannot look
-*  ahead into a file it doesn't have so LT events must pass the token
-*  even if the info is redundant.
-*/
-- (void) LT:(NSInteger)i foundToken:(id<ANTLRToken>)t;
-
-/** The parser is going to look arbitrarily ahead; mark this location,
-*  the token stream's marker is sent in case you need it.
-*/
-- (void) mark:(NSInteger)marker;
-
-/** After an arbitrairly long lookahead as with a cyclic DFA (or with
-*  any backtrack), this informs the debugger that stream should be
-*  rewound to the position associated with marker.
-*/
-- (void) rewind:(NSInteger)marker;
-
-/** Rewind to the input position of the last marker.
-*  Used currently only after a cyclic DFA and just
-*  before starting a sem/syn predicate to get the
-*  input position back to the start of the decision.
-*  Do not "pop" the marker off the state.  mark(i)
-*  and rewind(i) should balance still.
-*/
-- (void) rewind;
-
-- (void) beginBacktrack:(NSInteger)level;
-
-- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful;
-
-/** To watch a parser move through the grammar, the parser needs to
-*  inform the debugger what line/charPos it is passing in the grammar.
-*  For now, this does not know how to switch from one grammar to the
-*  other and back for island grammars etc...
-*
-*  This should also allow breakpoints because the debugger can stop
-*  the parser whenever it hits this line/pos.
-*/
-- (void) locationLine:(NSInteger)line column:(NSInteger)pos;
-
-/** A recognition exception occurred such as NoViableAltException.  I made
-*  this a generic event so that I can alter the exception hierachy later
-*  without having to alter all the debug objects.
-*
-*  Upon error, the stack of enter rule/subrule must be properly unwound.
-*  If no viable alt occurs it is within an enter/exit decision, which
-*  also must be rewound.  Even the rewind for each mark must be unwount.
-*  In the Java target this is pretty easy using try/finally, if a bit
-*  ugly in the generated code.  The rewind is generated in DFA.predict()
-*  actually so no code needs to be generated for that.  For languages
-*  w/o this "finally" feature (C++?), the target implementor will have
-*  to build an event stack or something.
-*
-*  Across a socket for remote debugging, only the RecognitionException
-*  data fields are transmitted.  The token object or whatever that
-*  caused the problem was the last object referenced by LT.  The
-*  immediately preceding LT event should hold the unexpected Token or
-*  char.
-*
-*  Here is a sample event trace for grammar:
-*
-*  b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
-*    | D
-*    ;
-*
-*  The sequence for this rule (with no viable alt in the subrule) for
-*  input 'c c' (there are 3 tokens) is:
-*
-*		commence
-*		LT(1)
-*		enterRule b
-*		location 7 1
-*		enter decision 3
-*		LT(1)
-*		exit decision 3
-*		enterAlt1
-*		location 7 5
-*		LT(1)
-*		consumeToken [c/<4>,1:0]
-*		location 7 7
-*		enterSubRule 2
-*		enter decision 2
-*		LT(1)
-*		LT(1)
-*		recognitionException NoViableAltException 2 1 2
-*		exit decision 2
-*		exitSubRule 2
-*		beginResync
-*		LT(1)
-*		consumeToken [c/<4>,1:1]
-*		LT(1)
-*		endResync
-*		LT(-1)
-*		exitRule b
-*		terminate
-*/
-- (void) recognitionException:(ANTLRRecognitionException *)e;
-
-/** Indicates the recognizer is about to consume tokens to resynchronize
-*  the parser.  Any consume events from here until the recovered event
-*  are not part of the parse--they are dead tokens.
-*/
-- (void) beginResync;
-
-/** Indicates that the recognizer has finished consuming tokens in order
-*  to resychronize.  There may be multiple beginResync/endResync pairs
-*  before the recognizer comes out of errorRecovery mode (in which
-*  multiple errors are suppressed).  This will be useful
-*  in a gui where you want to probably grey out tokens that are consumed
-*  but not matched to anything in grammar.  Anything between
-*  a beginResync/endResync pair was tossed out by the parser.
-*/
-- (void) endResync;
-
-/** A semantic predicate was evaluate with this result and action text */
-- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result;
-
-/** Announce that parsing has begun.  Not technically useful except for
-*  sending events over a socket.  A GUI for example will launch a thread
-*  to connect and communicate with a remote parser.  The thread will want
-*  to notify the GUI when a connection is made.  ANTLR parsers
-*  trigger this upon entry to the first rule (the ruleLevel is used to
-*  figure this out).
-*/
-- (void) commence;
-
-/** Parsing is over; successfully or not.  Mostly useful for telling
-*  remote debugging listeners that it's time to quit.  When the rule
-*  invocation level goes to zero at the end of a rule, we are done
-*  parsing.
-*/
-- (void) terminate;
-
-
-// T r e e  P a r s i n g
-
-/** Input for a tree parser is an AST, but we know nothing for sure
-*  about a node except its type and text (obtained from the adaptor).
-*  This is the analog of the consumeToken method.  Again, the ID is
-*  the hashCode usually of the node so it only works if hashCode is
-*  not implemented.  If the type is UP or DOWN, then
-*  the ID is not really meaningful as it's fixed--there is
-*  just one UP node and one DOWN navigation node.
-*/
-- (void) consumeNode:(NSInteger)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-/** The tree parser lookedahead.  If the type is UP or DOWN,
-*  then the ID is not really meaningful as it's fixed--there is
-*  just one UP node and one DOWN navigation node.
-*/
-- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-
-// A S T  E v e n t s
-
-/** A nil was created (even nil nodes have a unique ID...
-*  they are not "null" per se).  As of 4/28/2006, this
-*  seems to be uniquely triggered when starting a new subtree
-*  such as when entering a subrule in automatic mode and when
-*  building a tree in rewrite mode.
-*/
-- (void) createNilNode:(unsigned)hash;
-
-/** Announce a new node built from text */
-- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type;
-
-/** Announce a new node built from an existing token */
-- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex;
-
-/** Make a node the new root of an existing root.  See
-*
-*  Note: the newRootID parameter is possibly different
-*  than the TreeAdaptor.becomeRoot() newRoot parameter.
-*  In our case, it will always be the result of calling
-*  TreeAdaptor.becomeRoot() and not root_n or whatever.
-*
-*  The listener should assume that this event occurs
-*  only when the current subrule (or rule) subtree is
-*  being reset to newRootID.
-*
-*/
-- (void) makeNode:(unsigned)newRootHash parentOf:(unsigned)oldRootHash;
-
-/** Make childID a child of rootID.
-*  @see org.antlr.runtime.tree.TreeAdaptor.addChild()
-*/
-- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash;
-
-/** Set the token start/stop token index for a subtree root or node */
-- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSUInteger)tokenStartIndex To:(NSUInteger)tokenStopIndex;
-
-- (void) waitForDebuggerConnection;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugEventProxy.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugEventProxy.h
deleted file mode 100755
index 59bf67b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugEventProxy.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRDebugEventListener.h"
-#import <sys/socket.h>
-#import <netinet/in.h>
-#import <netinet/tcp.h>
-#include <arpa/inet.h>
-
-// default port for ANTLRWorks
-#define DEFAULT_DEBUGGER_PORT 0xC001
-
-@interface ANTLRDebugEventProxy : NSObject <ANTLRDebugEventListener> {
-	int serverSocket;
-	
-	struct sockaddr debugger_sockaddr;
-	socklen_t debugger_socklen;
-	int debuggerSocket;
-	NSFileHandle *debuggerFH;
-	
-	NSString *grammarName;
-	int debuggerPort;
-}
-
-- (id) init;
-- (id) initWithGrammarName:(NSString *)aGrammarName debuggerPort:(NSInteger)aPort;
-- (void) waitForDebuggerConnection;
-- (void) waitForAck;
-- (void) sendToDebugger:(NSString *)message;
-- (void) sendToDebugger:(NSString *)message waitForResponse:(BOOL)wait;
-
-- (NSInteger) serverSocket;
-- (void) setServerSocket: (NSInteger) aServerSocket;
-
-- (NSInteger) debuggerSocket;
-- (void) setDebuggerSocket: (NSInteger) aDebuggerSocket;
-
-- (NSString *) grammarName;
-- (void) setGrammarName: (NSString *) aGrammarName;
-
-- (NSInteger) debuggerPort;
-- (void) setDebuggerPort: (NSInteger) aDebuggerPort;
-
-- (NSString *) escapeNewlines:(NSString *)aString;
-
-#pragma mark -
-
-#pragma mark DebugEventListener Protocol
-- (void) enterRule:(NSString *)ruleName;
-- (void) enterAlt:(NSInteger)alt;
-- (void) exitRule:(NSString *)ruleName;
-- (void) enterSubRule:(NSInteger)decisionNumber;
-- (void) exitSubRule:(NSInteger)decisionNumber;
-- (void) enterDecision:(NSInteger)decisionNumber;
-- (void) exitDecision:(NSInteger)decisionNumber;
-- (void) consumeToken:(id<ANTLRToken>)t;
-- (void) consumeHiddenToken:(id<ANTLRToken>)t;
-- (void) LT:(NSInteger)i foundToken:(id<ANTLRToken>)t;
-- (void) mark:(NSInteger)marker;
-- (void) rewind:(NSInteger)marker;
-- (void) rewind;
-- (void) beginBacktrack:(NSInteger)level;
-- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful;
-- (void) locationLine:(NSInteger)line column:(NSInteger)pos;
-- (void) recognitionException:(ANTLRRecognitionException *)e;
-- (void) beginResync;
-- (void) endResync;
-- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result;
-- (void) commence;
-- (void) terminate;
-
-
-#pragma mark Tree Parsing
-- (void) consumeNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-
-#pragma mark AST Events
-
-- (void) createNilNode:(unsigned)hash;
-- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type;
-- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex;
-- (void) makeNode:(unsigned)newRootHash parentOf:(unsigned)oldRootHash;
-- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash;
-- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSInteger)tokenStartIndex To:(NSInteger)tokenStopIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugParser.h
deleted file mode 100755
index b23ff50..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugParser.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugTokenStream.h"
-
-@interface ANTLRDebugParser : ANTLRParser {
-	id<ANTLRDebugEventListener> debugListener;
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream;
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-			  debuggerPort:(NSInteger)portNumber;
-// designated initializer
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-			 debugListener:(id<ANTLRDebugEventListener>)theDebugListener
-			  debuggerPort:(NSInteger)portNumber;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTokenStream.h
deleted file mode 100755
index 335b002..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRDebugTokenStream.h"
-#import "ANTLRDebugEventListener.h"
-
-@interface ANTLRDebugTokenStream : NSObject <ANTLRTokenStream>
-{
-	id<ANTLRDebugEventListener> debugListener;
-	id<ANTLRTokenStream> input;
-	BOOL initialStreamState;
-    NSInteger lastMarker;
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream debugListener:(id<ANTLRDebugEventListener>)debugger;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (id<ANTLRTokenStream>) getInput;
-- (void) setInput:(id<ANTLRTokenStream>)aTokenStream;
-
-- (void) consume;
-- (id<ANTLRToken>) getToken:(NSInteger)index;
-- (NSInteger) getIndex;
-- (void) release:(NSInteger)marker;
-- (void) seek:(NSInteger)index;
-- (NSInteger) size;
-- (id<ANTLRTokenSource>) getTokenSource;
-- (NSString *) getSourceName;
-- (NSString *) toString;
-- (NSString *) toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTreeAdaptor.h
deleted file mode 100755
index 41965fa..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTreeAdaptor.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRBaseTreeAdaptor.h"
-#import "ANTLRDebugEventListener.h"
-
-@interface ANTLRDebugTreeAdaptor : ANTLRBaseTreeAdaptor {
-	id<ANTLRDebugEventListener> debugListener;
-	id<ANTLRTreeAdaptor> treeAdaptor;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor debugListener:(id<ANTLRDebugEventListener>)aDebugListener;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor: (id<ANTLRTreeAdaptor>) aTreeAdaptor;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTreeNodeStream.h
deleted file mode 100755
index 70f9939..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTreeNodeStream.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRDebugEventListener.h"
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTreeNodeStream.h"
-
-@interface ANTLRDebugTreeNodeStream : NSObject <ANTLRTreeNodeStream> {
-	id<ANTLRDebugEventListener> debugListener;
-	id<ANTLRTreeAdaptor> treeAdaptor;
-	id<ANTLRTreeNodeStream> input;
-	BOOL initialStreamState;
-}
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream debugListener:(id<ANTLRDebugEventListener>)debugger;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (id<ANTLRTreeNodeStream>) getInput;
-- (void) setInput: (id<ANTLRTreeNodeStream>) aTreeNodeStream;
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor: (id<ANTLRTreeAdaptor>) aTreeAdaptor;
-
-#pragma mark ANTLRTreeNodeStream conformance
-
-- (id) LT:(NSInteger)k;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setUniqueNavigationNodes:(BOOL)flag;
-
-#pragma mark ANTLRIntStream conformance
-- (void) consume;
-- (NSInteger) LA:(NSUInteger) i;
-- (NSUInteger) mark;
-- (NSUInteger) getIndex;
-- (void) rewind:(NSUInteger) marker;
-- (void) rewind;
-- (void) release:(NSUInteger) marker;
-- (void) seek:(NSUInteger) index;
-- (NSUInteger) size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTreeParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTreeParser.h
deleted file mode 100755
index cbeac76..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRDebugTreeParser.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeParser.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugTreeNodeStream.h"
-
-@interface ANTLRDebugTreeParser : ANTLRTreeParser {
-	id<ANTLRDebugEventListener> debugListener;
-}
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream;
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream
-				 debuggerPort:(NSInteger)portNumber;
-	// designated initializer
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream
-				debugListener:(id<ANTLRDebugEventListener>)theDebugListener
-				 debuggerPort:(NSInteger)portNumber;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLREarlyExitException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLREarlyExitException.h
deleted file mode 100755
index 1a89bbb..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLREarlyExitException.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLREarlyExitException : ANTLRRecognitionException {
-	int decisionNumber;
-}
-
-+ (ANTLREarlyExitException *) exceptionWithStream:(id<ANTLRIntStream>) anInputStream decisionNumber:(NSInteger) aDecisionNumber;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream decisionNumber:(NSInteger) aDecisionNumber;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRError.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRError.h
deleted file mode 100644
index f2657af..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRError.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-//  ANTLRError.h
-//  ANTLR
-//
-//  Created by Ian Michell on 30/03/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-// [The "BSD licence"]
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-#define ANTLRErrorDomain @"ANTLRError"
-
-#define ANTLRIllegalArgumentException @"ANTLRIllegalArgumentException"
-#define ANTLRIllegalStateException @"IllegalStateException"
-//#define ANTLRRuntimeException @"RuntimeException"
-//#define ANTLRNoSuchMethodException @"NoSuchMethodException"
-//#define ANTLRNoSuchElementException @"NoSuchElementException"
-//#define ANTLRUnsupportedOperationException @"UnsupportedOperationException"
-
-
-/*typedef enum
-{
-	ANTLRIllegalState = 1,
-	ANTLRIllegalArgument = 2,
-	ANTLRRecognitionError = 3,
-	ANTLRMissingTokenError = 4,
-	ANTLRUnwantedTokenError = 5,
-	ANTLRMismatechedTokenError = 6,
-	ANTLRNoViableAltError = 7
-	
-} ANTLRErrorCode;*/
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRFailedPredicateException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRFailedPredicateException.h
deleted file mode 100755
index 9788cba..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRFailedPredicateException.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-
-@interface ANTLRFailedPredicateException : ANTLRRecognitionException
-{
-	NSString *predicate;
-	NSString *ruleName;
-}
-
-@property (retain, getter=getPredicate, setter=setPredicate:) NSString *predicate;
-@property (retain, getter=getRuleName, setter=setRuleName:) NSString *ruleName;
-
-+ (ANTLRFailedPredicateException *) exceptionWithRuleName:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<ANTLRIntStream>)theStream;
-- (ANTLRFailedPredicateException *) initWithRuleName:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<ANTLRIntStream>)theStream;
-
-#ifdef DONTUSEYET
-- (NSString *) getPredicate;
-- (void) setPredicate:(NSString *)thePredicate;
-- (NSString *) getRuleName;
-- (void) setRuleName:(NSString *)theRuleName;
-#endif
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRFastQueue.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRFastQueue.h
deleted file mode 100644
index cf81817..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRFastQueue.h
+++ /dev/null
@@ -1,68 +0,0 @@
-//
-//  ANTLRFastQueue.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRFastQueue : NSObject <NSCopying>
-{
-	NSAutoreleasePool *pool;
-	NSMutableArray *data;
-	NSInteger p;
-}
-
-@property (retain, getter=getPool, setter=setPool) NSAutoreleasePool *pool;
-@property (retain, getter=getData, setter=setData) NSMutableArray *data;
-@property (assign, getter=getP, setter=setP) NSInteger p;
-
-+ (id) newANTLRFastQueue;
-
-- (id) init;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (void) reset;
-- (id) remove;
-- (void) addObject:(id) o;
-- (NSInteger) count;
-- (NSInteger) size;
-- (id) head;
-- (id) objectAtIndex:(NSInteger) i;
-- (void) clear;
-- (NSString *) toString;
-- (NSAutoreleasePool *)getPool;
-- (void)setPool:(NSAutoreleasePool *)aPool;
-- (NSMutableArray *)getData;
-- (void)setData:(NSMutableArray *)myData;
-- (NSInteger) getP;
-- (void) setP:(NSInteger)anInt;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashMap.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashMap.h
deleted file mode 100644
index 04aca7b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashMap.h
+++ /dev/null
@@ -1,102 +0,0 @@
-//
-//  ANTLRHashMap.h
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-#import "ANTLRMapElement.h"
-
-#define GLOBAL_SCOPE       0
-#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRHashMap : ANTLRLinkBase {
-	//	ANTLRHashMap *fNext;
-    //    TStringPool *fPool;
-    NSInteger Scope;
-    NSInteger LastHash;
-    NSInteger BuffSize;
-    ANTLRMapElement *ptrBuffer[HASHSIZE];
-    NSInteger mode;
-}
-
-//@property (copy) ANTLRHashMap *fNext;
-//@property (copy) TStringPool *fPool;
-@property (getter=getScope, setter=setScope:) NSInteger Scope;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
-
-// Contruction/Destruction
-+ (id)newANTLRHashMap;
-+ (id)newANTLRHashMapWithLen:(NSInteger)aBuffSize;
-- (id)init;
-- (id)initWithLen:(NSInteger)aBuffSize;
-- (void)dealloc;
-- (ANTLRHashMap *)PushScope:( ANTLRHashMap **)map;
-- (ANTLRHashMap *)PopScope:( ANTLRHashMap **)map;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-// Instance Methods
-/*    form hash value for string s */
-- (NSInteger)hash:(NSString *)s;
-/*   look for s in ptrBuffer  */
-- (ANTLRHashMap *)findscope:(int)level;
-/*   look for s in ptrBuffer  */
-- (id)lookup:(NSString *)s Scope:(int)scope;
-/*   look for s in ptrBuffer  */
-- (id)install:(ANTLRMapElement *)sym Scope:(int)scope;
-/*   look for s in ptrBuffer  */
-- (void)deleteANTLRHashMap:(ANTLRMapElement *)np;
-- (int)RemoveSym:(NSString *)s;
-- (void)delete_chain:(ANTLRMapElement *)np;
-#ifdef DONTUSEYET
-- (int)bld_symtab:(KW_TABLE *)toknams;
-#endif
-- (ANTLRMapElement **)getptrBuffer;
-- (ANTLRMapElement *)getptrBufferEntry:(int)idx;
-- (void)setptrBuffer:(ANTLRMapElement *)np Index:(int)idx;
-- (NSInteger)getScope;
-- (void)setScope:(NSInteger)i;
-- (ANTLRMapElement *)getTType:(NSString *)name;
-- (ANTLRMapElement *)getNameInList:(NSInteger)ttype;
-- (void)putNode:(NSString *)name TokenType:(NSInteger)ttype;
-- (NSInteger)getMode;
-- (void)setMode:(NSInteger)aMode;
-- (void) insertObject:(id)aRule atIndex:(NSInteger)idx;
-- (id) objectAtIndex:(NSInteger)idx;
-- (void) setObject:(id)aRule atIndex:(NSInteger)idx;
-- (void)addObject:(id)anObject;
-- (ANTLRMapElement *) getName:(NSString *)aName;
-- (void) putName:(NSString *)name Node:(id)aNode;
-
-- (NSEnumerator *)objectEnumerator;
-- (BOOL) hasNext;
-- (ANTLRMapElement *)nextObject;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashMap.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashMap.m
deleted file mode 100644
index a23426b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashMap.m
+++ /dev/null
@@ -1,521 +0,0 @@
-//
-//  ANTLRHashMap.m
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRHashMap.h"
-
-static NSInteger itIndex;
-
-/*
- * Start of ANTLRHashMap
- */
-@implementation ANTLRHashMap
-
-@synthesize Scope;
-@synthesize LastHash;
-
-+(id)newANTLRHashMap
-{
-    ANTLRHashMap *aNewANTLRHashMap;
-    
-    aNewANTLRHashMap = [[ANTLRHashMap alloc] init];
-	return( aNewANTLRHashMap );
-}
-
-+(id)newANTLRHashMapWithLen:(NSInteger)aBuffSize
-{
-    ANTLRHashMap *aNewANTLRHashMap;
-    
-    aNewANTLRHashMap = [[ANTLRHashMap alloc] initWithLen:aBuffSize];
-	return( aNewANTLRHashMap );
-}
-
--(id)init
-{
-    NSInteger idx;
-    
-	if ((self = [super init]) != nil) {
-		fNext = nil;
-        BuffSize = HASHSIZE;
-		Scope = 0;
-		if ( fNext != nil ) {
-			Scope = ((ANTLRHashMap *)fNext)->Scope+1;
-			for( idx = 0; idx < BuffSize; idx++ ) {
-				ptrBuffer[idx] = ((ANTLRHashMap *)fNext)->ptrBuffer[idx];
-			}
-		}
-        mode = 0;
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)aBuffSize
-{
-    NSInteger idx;
-    
-	if ((self = [super init]) != nil) {
-		fNext = nil;
-        BuffSize = aBuffSize;
-		Scope = 0;
-		if ( fNext != nil ) {
-			Scope = ((ANTLRHashMap *)fNext)->Scope+1;
-			for( idx = 0; idx < BuffSize; idx++ ) {
-				ptrBuffer[idx] = ((ANTLRHashMap *)fNext)->ptrBuffer[idx];
-			}
-		}
-        mode = 0;
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-    ANTLRMapElement *tmp, *rtmp;
-    NSInteger idx;
-	
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp && tmp != [((ANTLRHashMap *)fNext) getptrBufferEntry:idx] ) {
-                rtmp = tmp;
-                // tmp = [tmp getfNext];
-                tmp = (ANTLRMapElement *)tmp.fNext;
-                [rtmp dealloc];
-            }
-        }
-    }
-	[super dealloc];
-}
-
-- (NSInteger)count
-{
-    id anElement;
-    NSInteger aCnt = 0;
-    
-    for (NSInteger i = 0; i < BuffSize; i++) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aCnt++;
-        }
-    }
-    return aCnt;
-}
-                          
-- (NSInteger) size
-{
-    id anElement;
-    NSInteger aSize = 0;
-    
-    for (NSInteger i = 0; i < BuffSize; i++) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aSize += sizeof(id);
-        }
-    }
-    return aSize;
-}
-                                  
-                                  
--(void)deleteANTLRHashMap:(ANTLRMapElement *)np
-{
-    ANTLRMapElement *tmp, *rtmp;
-    NSInteger idx;
-    
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp && tmp != (ANTLRLinkBase *)[((ANTLRHashMap *)fNext) getptrBufferEntry:idx] ) {
-                rtmp = tmp;
-                tmp = [tmp getfNext];
-                [rtmp dealloc];
-            }
-        }
-    }
-}
-
--(ANTLRHashMap *)PushScope:(ANTLRHashMap **)map
-{
-    NSInteger idx;
-    ANTLRHashMap *htmp;
-    
-    htmp = [ANTLRHashMap newANTLRHashMap];
-    if ( *map != nil ) {
-        ((ANTLRHashMap *)htmp)->fNext = *map;
-        [htmp setScope:[((ANTLRHashMap *)htmp->fNext) getScope]+1];
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            htmp->ptrBuffer[idx] = ((ANTLRHashMap *)htmp->fNext)->ptrBuffer[idx];
-        }
-    }
-    //    gScopeLevel++;
-    *map = htmp;
-    return( htmp );
-}
-
--(ANTLRHashMap *)PopScope:(ANTLRHashMap **)map
-{
-    NSInteger idx;
-    ANTLRMapElement *tmp;
-	ANTLRHashMap *htmp;
-    
-    htmp = *map;
-    if ( (*map)->fNext != nil ) {
-        *map = (ANTLRHashMap *)htmp->fNext;
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            if ( htmp->ptrBuffer[idx] == nil ||
-                htmp->ptrBuffer[idx] == (*map)->ptrBuffer[idx] ) {
-                break;
-            }
-            tmp = htmp->ptrBuffer[idx];
-            /*
-             * must deal with parms, locals and labels at some point
-             * can not forget the debuggers
-             */
-            htmp->ptrBuffer[idx] = [tmp getfNext];
-            [ tmp dealloc];
-        }
-        *map = (ANTLRHashMap *)htmp->fNext;
-        //        gScopeLevel--;
-    }
-    return( htmp );
-}
-
-#ifdef USERDOC
-/*
- *  HASH        hash entry to get index to table
- *  NSInteger hash( ANTLRHashMap *self, char *s );
- *
- *     Inputs:  char *s             string to find
- *
- *     Returns: NSInteger                 hashed value
- *
- *  Last Revision 9/03/90
- */
-#endif
--(NSInteger)hash:(NSString *)s       /*    form hash value for string s */
-{
-	NSInteger hashval;
-	const char *tmp;
-    
-	tmp = [s cStringUsingEncoding:NSASCIIStringEncoding];
-	for( hashval = 0; *tmp != '\0'; )
-        hashval += *tmp++;
-	self->LastHash = hashval % BuffSize;
-	return( self->LastHash );
-}
-
-#ifdef USERDOC
-/*
- *  FINDSCOPE  search hashed list for entry
- *  ANTLRHashMap *findscope( ANTLRHashMap *self, NSInteger scope );
- *
- *     Inputs:  NSInteger       scope -- scope level to find
- *
- *     Returns: ANTLRHashMap   pointer to ptrBuffer of proper scope level
- *
- *  Last Revision 9/03/90
- */
-#endif
--(ANTLRHashMap *)findscope:(NSInteger)scope
-{
-    if ( self->Scope == scope ) {
-        return( self );
-    }
-    else if ( fNext ) {
-        return( [((ANTLRHashMap *)fNext) findscope:scope] );
-    }
-    return( nil );              /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  LOOKUP  search hashed list for entry
- *  ANTLRMapElement *lookup( ANTLRHashMap *self, char *s, NSInteger scope );
- *
- *     Inputs:  char     *s          string to find
- *
- *     Returns: ANTLRMapElement  *           pointer to entry
- *
- *  Last Revision 9/03/90
- */
-#endif
--(id)lookup:(NSString *)s Scope:(NSInteger)scope
-{
-    ANTLRMapElement *np;
-    
-    for( np = self->ptrBuffer[[self hash:s]]; np != nil; np = [np getfNext] ) {
-        if ( [s isEqualToString:[np getName]] ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  INSTALL search hashed list for entry
- *  NSInteger install( ANTLRHashMap *self, ANTLRMapElement *sym, NSInteger scope );
- *
- *     Inputs:  ANTLRMapElement    *sym   -- symbol ptr to install
- *              NSInteger         scope -- level to find
- *
- *     Returns: Boolean     TRUE   if installed
- *                          FALSE  if already in table
- *
- *  Last Revision 9/03/90
- */
-#endif
--(ANTLRMapElement *)install:(ANTLRMapElement *)sym Scope:(NSInteger)scope
-{
-    ANTLRMapElement *np;
-    
-    np = [self lookup:[sym getName] Scope:scope ];
-    if ( np == nil ) {
-        [sym retain];
-        [sym setFNext:self->ptrBuffer[ self->LastHash ]];
-        self->ptrBuffer[ self->LastHash ] = sym;
-        return( self->ptrBuffer[ self->LastHash ] );
-    }
-    return( nil );            /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  RemoveSym  search hashed list for entry
- *  NSInteger RemoveSym( ANTLRHashMap *self, char *s );
- *
- *     Inputs:  char     *s          string to find
- *
- *     Returns: NSInteger      indicator of SUCCESS OR FAILURE
- *
- *  Last Revision 9/03/90
- */
-#endif
--(NSInteger)RemoveSym:(NSString *)s
-{
-    ANTLRMapElement *np, *tmp;
-    NSInteger idx;
-    
-    idx = [self hash:s];
-    for ( tmp = self->ptrBuffer[idx], np = self->ptrBuffer[idx]; np != nil; np = [np getfNext] ) {
-        if ( [s isEqualToString:[np getName]] ) {
-            tmp = [np getfNext];             /* get the next link  */
-            [np dealloc];
-            return( SUCCESS );            /* report SUCCESS     */
-        }
-        tmp = [np getfNext];              //  BAD!!!!!!
-    }
-    return( FAILURE );                    /*   not found      */
-}
-
--(void)delete_chain:(ANTLRMapElement *)np
-{
-    if ( [np getfNext] != nil )
-		[self delete_chain:[np getfNext]];
-	[np dealloc];
-}
-
-#ifdef DONTUSEYET
--(NSInteger)bld_symtab:(KW_TABLE *)toknams
-{
-    NSInteger i;
-    ANTLRMapElement *np;
-    
-    for( i = 0; *(toknams[i].name) != '\0'; i++ ) {
-        // install symbol in ptrBuffer
-        np = [ANTLRMapElement newANTLRMapElement:[NSString stringWithFormat:@"%s", toknams[i].name]];
-        //        np->fType = toknams[i].toknum;
-        [self install:np Scope:0];
-    }
-    return( SUCCESS );
-}
-#endif
-
--(ANTLRMapElement *)getptrBufferEntry:(NSInteger)idx
-{
-	return( ptrBuffer[idx] );
-}
-
--(ANTLRMapElement **)getptrBuffer
-{
-	return( ptrBuffer );
-}
-
--(void)setptrBuffer:(ANTLRMapElement *)np Index:(NSInteger)idx
-{
-	if ( idx < BuffSize ) {
-        [np retain];
-		ptrBuffer[idx] = np;
-    }
-}
-
--(NSInteger)getScope
-{
-	return( Scope );
-}
-
--(void)setScopeScope:(NSInteger)i
-{
-	Scope = i;
-}
-
-- (ANTLRMapElement *)getTType:(NSString *)name
-{
-    return [self lookup:name Scope:0];
-}
-
-/*
- * works only for maplist indexed not by name but by TokenNumber
- */
-- (ANTLRMapElement *)getNameInList:(NSInteger)ttype
-{
-    ANTLRMapElement *np;
-    NSInteger aTType;
-
-    aTType = ttype % BuffSize;
-    for( np = self->ptrBuffer[ttype]; np != nil; np = [np getfNext] ) {
-        if ( [np.index integerValue] == ttype ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-- (ANTLRLinkBase *)getName:(NSString *)name
-{
-    return [self lookup:name Scope:0]; /*  nil if not found      */    
-}
-
-- (void)putNode:(NSString *)name TokenType:(NSInteger)ttype
-{
-    ANTLRMapElement *np;
-    
-    // install symbol in ptrBuffer
-    np = [ANTLRMapElement newANTLRMapElementWithName:[NSString stringWithString:name] Type:ttype];
-    //        np->fType = toknams[i].toknum;
-    [self install:np Scope:0];
-}
-
-- (NSInteger)getMode
-{
-    return mode;
-}
-
-- (void)setMode:(NSInteger)aMode
-{
-    mode = aMode;
-}
-
-- (void) addObject:(id)aRule
-{
-    NSInteger idx;
-
-    idx = [self count];
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-/* this may have to handle linking into the chain
- */
-- (void) insertObject:(id)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    if (aRule != ptrBuffer[idx]) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (id)objectAtIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    return ptrBuffer[idx];
-}
-
-/* this will never link into the chain
- */
-- (void) setObject:(id)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    if (aRule != ptrBuffer[idx]) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (void)putName:(NSString *)name Node:(id)aNode
-{
-    ANTLRMapElement *np;
-    
-    np = [self lookup:name Scope:0 ];
-    if ( np == nil ) {
-        np = [ANTLRMapElement newANTLRMapElementWithName:name Node:aNode];
-        if (ptrBuffer[LastHash] != nil)
-            [ptrBuffer[LastHash] release];
-        [np retain];
-        np.fNext = ptrBuffer[ LastHash ];
-        ptrBuffer[ LastHash ] = np;
-    }
-    return;    
-}
-
-- (NSEnumerator *)objectEnumerator
-{
-    NSEnumerator *anEnumerator;
-
-    itIndex = 0;
-    return anEnumerator;
-}
-
-- (BOOL)hasNext
-{
-    if (self && [self count] < BuffSize-1) {
-        return YES;
-    }
-    return NO;
-}
-
-- (ANTLRMapElement *)nextObject
-{
-    if (self && itIndex < BuffSize-1) {
-        return ptrBuffer[itIndex];
-    }
-    return nil;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashRule.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashRule.h
deleted file mode 100644
index f1558e8..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashRule.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//
-//  ANTLRHashRule.h
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuleMemo.h"
-#import "ANTLRPtrBuffer.h"
-
-#define GLOBAL_SCOPE       0
-#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRHashRule : ANTLRPtrBuffer {
-	//	ANTLRHashRule *fNext;
-    //    TStringPool *fPool;
-    NSInteger LastHash;
-    NSInteger mode;
-}
-
-//@property (copy) ANTLRHashRule *fNext;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
-
-// Contruction/Destruction
-+ (id)newANTLRHashRule;
-+ (id)newANTLRHashRuleWithLen:(NSInteger)aBuffSize;
-- (id)init;
-- (id)initWithLen:(NSInteger)aBuffSize;
-- (void)dealloc;
-
-- (NSInteger)count;
-- (NSInteger)length;
-- (NSInteger)size;
-
-// Instance Methods
-- (void)deleteANTLRHashRule:(ANTLRRuleMemo *)np;
-- (void)delete_chain:(ANTLRRuleMemo *)np;
-- (ANTLRRuleMemo **)getPtrBuffer;
-- (void)setPtrBuffer:(ANTLRRuleMemo **)np;
-- (NSNumber *)getRuleMemoStopIndex:(NSInteger)aStartIndex;
-- (void)putRuleMemoAtStartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex;
-- (NSInteger)getMode;
-- (void)setMode:(NSInteger)aMode;
-- (void) insertObject:(ANTLRRuleMemo *)aRule atIndex:(NSInteger)Index;
-- (ANTLRRuleMemo *) objectAtIndex:(NSInteger)Index;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashRule.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashRule.m
deleted file mode 100644
index 93ce3a1..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRHashRule.m
+++ /dev/null
@@ -1,281 +0,0 @@
-//
-//  ANTLRHashRule.m
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-#define ANTLR_MEMO_RULE_UNKNOWN -1
-
-#import "ANTLRHashRule.h"
-
-/*
- * Start of ANTLRHashRule
- */
-@implementation ANTLRHashRule
-
-@synthesize LastHash;
-
-+(id)newANTLRHashRule
-{
-    ANTLRHashRule *aNewANTLRHashRule;
-    
-    aNewANTLRHashRule = [[ANTLRHashRule alloc] init];
-	return( aNewANTLRHashRule );
-}
-
-+(id)newANTLRHashRuleWithLen:(NSInteger)aBuffSize
-{
-    ANTLRHashRule *aNewANTLRHashRule;
-    
-    aNewANTLRHashRule = [[ANTLRHashRule alloc] initWithLen:aBuffSize];
-	return( aNewANTLRHashRule );
-}
-
--(id)init
-{
-	if ((self = [super initWithLen:HASHSIZE]) != nil) {
-		fNext = nil;
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)aBuffSize
-{
-	if ((self = [super initWithLen:aBuffSize]) != nil) {
-		fNext = nil;
-        mode = 0;
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-    ANTLRRuleMemo *tmp, *rtmp;
-    int Index;
-	
-    if ( self.fNext != nil ) {
-        for( Index = 0; Index < BuffSize; Index++ ) {
-            tmp = ptrBuffer[Index];
-            while ( tmp && tmp != ptrBuffer[Index] ) {
-                rtmp = tmp;
-                // tmp = [tmp getfNext];
-                tmp = (ANTLRRuleMemo *)tmp.fNext;
-                [rtmp dealloc];
-            }
-        }
-    }
-	[super dealloc];
-}
-
-- (NSInteger)count
-{
-    id anElement;
-    NSInteger aCnt = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        anElement = ptrBuffer[i];
-        if ( anElement != nil ) {
-            aCnt++;
-        }
-    }
-    return aCnt;
-}
-                          
-- (NSInteger) length
-{
-    return BuffSize;
-}
-
-- (NSInteger) size
-{
-    id anElement;
-    NSInteger aSize = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aSize += sizeof(id);
-        }
-    }
-    return aSize;
-}
-                                  
-                                  
--(void)deleteANTLRHashRule:(ANTLRRuleMemo *)np
-{
-    ANTLRRuleMemo *tmp, *rtmp;
-    int Index;
-    
-    if ( self.fNext != nil ) {
-        for( Index = 0; Index < BuffSize; Index++ ) {
-            tmp = ptrBuffer[Index];
-            while ( tmp && tmp != ptrBuffer[Index ] ) {
-                rtmp = tmp;
-                tmp = tmp.fNext;
-                [rtmp dealloc];
-            }
-        }
-    }
-}
-
--(void)delete_chain:(ANTLRRuleMemo *)np
-{
-    if ( np.fNext != nil )
-		[self delete_chain:np.fNext];
-	[np dealloc];
-}
-
--(ANTLRRuleMemo **)getPtrBuffer
-{
-	return( ptrBuffer );
-}
-
--(void)setPtrBuffer:(ANTLRRuleMemo **)np
-{
-	ptrBuffer = np;
-}
-
-- (NSNumber *)getRuleMemoStopIndex:(NSInteger)aStartIndex
-{
-    ANTLRRuleMemo *aRule;
-    NSNumber *stopIndex;
-    NSInteger anIndex;
-    
-    anIndex = ( aStartIndex >= BuffSize ) ? aStartIndex %= BuffSize : aStartIndex;
-    if ((aRule = ptrBuffer[anIndex]) == nil) {
-        return nil;
-    }
-    stopIndex = [aRule getStopIndex:aStartIndex];
-    return stopIndex;
-}
-
-- (void)putRuleMemo:(ANTLRRuleMemo *)aRule AtStartIndex:(NSInteger)aStartIndex
-{
-    NSInteger anIndex;
-    
-    anIndex = (aStartIndex >= BuffSize) ? aStartIndex %= BuffSize : aStartIndex;
-    if ( ptrBuffer[anIndex] == nil ) {
-        ptrBuffer[anIndex] = aRule;
-        [aRule retain];
-    }
-    else {
-        do {
-            if ( [aRule.startIndex integerValue] == aStartIndex ) {
-                [aRule setStartIndex:aRule.stopIndex];
-                return;
-            }
-            aRule = aRule.fNext;
-        } while ( aRule != nil );
-    }
-}
-
-- (void)putRuleMemoAtStartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex
-{
-    ANTLRRuleMemo *aRule, *newRule;
-    NSInteger anIndex;
-    NSInteger aMatchIndex;
-
-    anIndex = (aStartIndex >= BuffSize) ? aStartIndex %= BuffSize : aStartIndex;
-    if ((aRule = ptrBuffer[anIndex]) == nil ) {
-        aRule = [ANTLRRuleMemo newANTLRRuleMemoWithStartIndex:[NSNumber numberWithInteger:aStartIndex]
-                                                    StopIndex:[NSNumber numberWithInteger:aStopIndex]];
-        [aRule retain];
-        ptrBuffer[anIndex] = aRule;
-    }
-    else {
-        aMatchIndex = [aRule.startIndex integerValue];
-        if ( aStartIndex > aMatchIndex ) {
-            if ( aRule != ptrBuffer[anIndex] ) {
-                [aRule retain];
-            }
-            aRule.fNext = ptrBuffer[anIndex];
-            ptrBuffer[anIndex] = aRule;
-            return;
-        }
-        while (aRule.fNext != nil) {
-            aMatchIndex = [((ANTLRRuleMemo *)aRule.fNext).startIndex integerValue];
-            if ( aStartIndex > aMatchIndex ) {
-                newRule = [ANTLRRuleMemo newANTLRRuleMemoWithStartIndex:[NSNumber numberWithInteger:aStartIndex]
-                                                              StopIndex:[NSNumber numberWithInteger:aStopIndex]];
-                [newRule retain];
-                newRule.fNext = aRule.fNext;
-                aRule.fNext = newRule;
-                return;
-            }
-            if ( aMatchIndex == aStartIndex ) {
-                [aRule setStartIndex:aRule.stopIndex];
-                return;
-            }
-            aRule = aRule.fNext;
-        }
-    }
-}
-
-- (NSInteger)getLastHash
-{
-    return LastHash;
-}
-
-- (void)setLastHash:(NSInteger)aHash
-{
-    LastHash = aHash;
-}
-
-- (NSInteger)getMode
-{
-    return mode;
-}
-
-- (void)setMode:(NSInteger)aMode
-{
-    mode = aMode;
-}
-
-- (void) insertObject:(ANTLRRuleMemo *)aRule atIndex:(NSInteger)anIndex
-{
-    NSInteger Index;
-    
-    Index = ( anIndex >= BuffSize ) ? anIndex %= BuffSize : anIndex;
-    if (aRule != ptrBuffer[Index]) {
-        if (ptrBuffer[Index] != nil) {
-            [ptrBuffer[Index] release];
-        }
-        [aRule retain];
-    }
-    ptrBuffer[Index] = aRule;
-}
-
-- (ANTLRRuleMemo *)objectAtIndex:(NSInteger)anIndex
-{
-    NSInteger anIdx;
-
-    anIdx = ( anIndex >= BuffSize ) ? anIndex %= BuffSize : anIndex;
-    return ptrBuffer[anIdx];
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRIntArray.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRIntArray.h
deleted file mode 100644
index 5269b23..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRIntArray.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-//  ANTLRIntArray.h
-//  ANTLR
-//
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-
-#define ANTLR_INT_ARRAY_INITIAL_SIZE 10
-
-@interface ANTLRIntArray : ANTLRPtrBuffer 
-{
-}
-
-+ (ANTLRIntArray *)newANTLRIntArray;
-+ (ANTLRIntArray *)newANTLRIntArrayWithLen:(NSInteger)aLen;
-
-- (id) init;
-- (id) initWithLen:(NSInteger)aLen;
-
-- (void) dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (void) addInteger:(NSInteger) v;
-- (void) push:(NSInteger) v;
-- (NSInteger) pop;
-- (NSInteger) integerAtIndex:(NSInteger) i;
-- (void) insertInteger:(NSInteger)anInteger AtIndex:(NSInteger) idx;
-- (NSInteger) size;
-- (void) reset;
-
-- (NSInteger) count;
-- (NSInteger) size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRIntStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRIntStream.h
deleted file mode 100755
index 3790cd9..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRIntStream.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-@protocol ANTLRIntStream < NSObject, NSCopying >
-
-- (void) consume;
-
-// Get unichar at current input pointer + i ahead where i=1 is next character as int for including ANTLRCharStreamEOF (-1) in the data range
-- (NSInteger) LA:(NSInteger) i;
-
-// Tell the stream to start buffering if it hasn't already.  Return
-// current input position, index(), or some other marker so that
-// when passed to rewind() you get back to the same spot.
-// rewind(mark()) should not affect the input cursor.
-// TODO: problem in that lexer stream returns not index but some marker 
-
-- (NSInteger) mark;
-
-// Return the current input symbol index 0..n where n indicates the
-// last symbol has been read.
-
-- (NSInteger) getIndex;
-
-// Reset the stream so that next call to index would return marker.
-// The marker will usually be -index but it doesn't have to be.  It's
-// just a marker to indicate what state the stream was in.  This is
-// essentially calling -release: and -seek:.  If there are markers
-// created after this marker argument, this routine must unroll them
-// like a stack.  Assume the state the stream was in when this marker
-// was created.
-
-- (void) rewind;
-- (void) rewind:(NSInteger) marker;
-
-// You may want to commit to a backtrack but don't want to force the
-// stream to keep bookkeeping objects around for a marker that is
-// no longer necessary.  This will have the same behavior as
-// rewind() except it releases resources without the backward seek.
-
-- (void) release:(NSInteger) marker;
-
-// Set the input cursor to the position indicated by index.  This is
-// normally used to seek ahead in the input stream.  No buffering is
-// required to do this unless you know your stream will use seek to
-// move backwards such as when backtracking.
-// This is different from rewind in its multi-directional
-// requirement and in that its argument is strictly an input cursor (index).
-//
-// For char streams, seeking forward must update the stream state such
-// as line number.  For seeking backwards, you will be presumably
-// backtracking using the mark/rewind mechanism that restores state and
-// so this method does not need to update state when seeking backwards.
-//
-// Currently, this method is only used for efficient backtracking, but
-// in the future it may be used for incremental parsing.
-
-- (void) seek:(NSInteger) index;
-
-/** Only makes sense for streams that buffer everything up probably, but
- *  might be useful to display the entire stream or for testing.  This
- *  value includes a single EOF.
- */
-- (NSUInteger) size;
-/** Where are you getting symbols from?  Normally, implementations will
- *  pass the buck all the way to the lexer who can ask its input stream
- *  for the file name or whatever.
- */
-- (NSString *)getSourceName;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLexer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLexer.h
deleted file mode 100755
index 5cfb36f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLexer.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenSource.h"
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRRecognizerSharedState.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRRecognitionException.h"
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRMismatchedRangeException.h"
-
-@interface ANTLRLexer : ANTLRBaseRecognizer <ANTLRTokenSource> {
-	id<ANTLRCharStream> input;      ///< The character stream we pull tokens out of.
-	NSUInteger ruleNestingLevel;
-}
-
-@property (retain, getter=getInput, setter=setInput:) id<ANTLRCharStream> input;
-@property (getter=getRuleNestingLevel, setter=setRuleNestingLevel) NSUInteger ruleNestingLevel;
-
-#pragma mark Initializer
-- (id) initWithCharStream:(id<ANTLRCharStream>) anInput;
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput State:(ANTLRRecognizerSharedState *)state;
-
-- (id) copyWithZone:(NSZone *)zone;
-
-- (void) reset;
-
-// - (ANTLRRecognizerSharedState *) state;
-
-#pragma mark Tokens
-- (id<ANTLRToken>)getToken;
-- (void) setToken: (id<ANTLRToken>) aToken;
-- (id<ANTLRToken>) nextToken;
-- (void) mTokens;		// abstract, defined in generated sources
-- (void) skip;
-- (id<ANTLRCharStream>) getInput;
-- (void) setInput:(id<ANTLRCharStream>)aCharStream;
-
-- (void) emit;
-- (void) emit:(id<ANTLRToken>)aToken;
-
-#pragma mark Matching
-- (void) matchString:(NSString *)aString;
-- (void) matchAny;
-- (void) matchChar:(unichar) aChar;
-- (void) matchRangeFromChar:(unichar)fromChar to:(unichar)toChar;
-
-#pragma mark Informational
-- (NSUInteger) getLine;
-- (NSUInteger) getCharPositionInLine;
-- (NSInteger) getIndex;
-- (NSString *) getText;
-- (void) setText:(NSString *) theText;
-
-// error handling
-- (void) reportError:(ANTLRRecognitionException *)e;
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(NSMutableArray *)tokenNames;
-- (NSString *)getCharErrorDisplay:(NSInteger)c;
-- (void) recover:(ANTLRRecognitionException *)e;
-- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLexerRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLexerRuleReturnScope.h
deleted file mode 100755
index 18ae374..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLexerRuleReturnScope.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-@interface ANTLRLexerRuleReturnScope : NSObject {
-	int startToken;
-	int stopToken;
-}
-
-- (NSInteger) getStart;
-- (void) setStart: (NSInteger) aStart;
-
-- (NSInteger) getStop;
-- (void) setStop: (NSInteger) aStop;
-
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLinkBase.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLinkBase.h
deleted file mode 100644
index 21019e6..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLinkBase.h
+++ /dev/null
@@ -1,74 +0,0 @@
-//
-//  ANTLRLinkBase.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/14/10.
-//  [The "BSD licence"]
-//  Copyright (c) 2010 Alan Condit
-//  All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-@protocol ANTLRLinkList <NSObject>
-
-+ (id<ANTLRLinkList>)newANTLRLinkBase;
-+ (id<ANTLRLinkList>)newANTLRLinkBase:(id<ANTLRLinkList>)np Prev:(id<ANTLRLinkList>)pp;
-
-- (void) dealloc;
-
-- (id<ANTLRLinkList>) append:(id<ANTLRLinkList>)node;
-- (id<ANTLRLinkList>) insert:(id<ANTLRLinkList>)node;
-
-- (id<ANTLRLinkList>) getfNext;
-- (void) setFNext:(id<ANTLRLinkList>)np;
-- (id<ANTLRLinkList>)getfPrev;
-- (void) setFPrev:(id<ANTLRLinkList>)pp;
-
-@end
-
-@interface ANTLRLinkBase : NSObject <ANTLRLinkList> {
-	id<ANTLRLinkList> fPrev;
-	id<ANTLRLinkList> fNext;
-}
-
-@property (retain, getter=getfPrev, setter=setFPrev:) id<ANTLRLinkList> fPrev;
-@property (retain, getter=getfNext, setter=setFNext:) id<ANTLRLinkList> fNext;
-
-+ (id<ANTLRLinkList>)newANTLRLinkBase;
-+ (id<ANTLRLinkList>)newANTLRLinkBase:(id<ANTLRLinkList>)np Prev:(id<ANTLRLinkList>)pp;
-- (id<ANTLRLinkList>)init;
-- (id<ANTLRLinkList>)initWithPtr:(id)np Prev:(id)pp;
-- (void)dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id<ANTLRLinkList>)append:(id<ANTLRLinkList>)node;
-- (id<ANTLRLinkList>)insert:(id<ANTLRLinkList>)node;
-
-- (id<ANTLRLinkList>)getfNext;
-- (void)setFNext:(id<ANTLRLinkList>) np;
-- (id<ANTLRLinkList>)getfPrev;
-- (void)setFPrev:(id<ANTLRLinkList>) pp;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLookaheadStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLookaheadStream.h
deleted file mode 100644
index ad48ff5..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRLookaheadStream.h
+++ /dev/null
@@ -1,73 +0,0 @@
-//
-//  ANTLRLookaheadStream.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-//  [The "BSD licence"]
-//  Copyright (c) 2010 Ian Michell 2010 Alan Condit
-//  All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRFastQueue.h"
-
-#define UNITIALIZED_EOF_ELEMENT_INDEX NSIntegerMax
-
-@interface ANTLRLookaheadStream : ANTLRFastQueue
-{
-	id eof;
-	NSInteger eofElementIndex;
-	NSInteger lastMarker;
-	NSInteger markDepth;
-}
-
-@property (readwrite, retain, getter=getEof, setter=setEof) id eof;
-@property (assign, getter=getEofElementIndex, setter=setEofElementIndex) NSInteger eofElementIndex;
-@property (assign, getter=getLastMarker, setter=setLastMarker) NSInteger lastMarker;
-@property (assign, getter=getMarkDepth, setter=setMarkDepth) NSInteger markDepth;
-
-- (id) initWithEOF:(id) o;
-- (id) nextElement;
-- (void) consume;
-- (void) sync:(NSInteger) need;
-- (void) fill:(NSInteger) n;
-- (id) LT:(NSInteger) i;
-- (id) LB:(NSInteger) i;
-- (id) currentSymbol;
-- (NSInteger) getIndex;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) seek:(NSInteger) i;
-- (id) getEof;
-- (void) setEof:(id) anID;
-- (NSInteger) getEofElementIndex;
-- (void) setEofElementIndex:(NSInteger) anInt;
-- (NSInteger) getLastMarker;
-- (void) setLastMarker:(NSInteger) anInt;
-- (NSInteger) getMarkDepth;
-- (void) setMarkDepth:(NSInteger) anInt;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMap.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMap.h
deleted file mode 100644
index 80ad486..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMap.h
+++ /dev/null
@@ -1,82 +0,0 @@
-//
-//  ANTLRMap.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-#import "ANTLRMapElement.h"
-
-//#define GLOBAL_SCOPE      0
-//#define LOCAL_SCOPE       1
-#define HASHSIZE            101
-#define HBUFSIZE            0x2000
-
-@interface ANTLRMap : ANTLRPtrBuffer {
-	//ANTLRMap *fNext; // found in superclass
-    // TStringPool *fPool;
-    NSInteger lastHash;
-}
-
-//@property (copy) ANTLRMap *fNext;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger lastHash;
-
-// Contruction/Destruction
-+ (id)newANTLRMap;
-+ (id)newANTLRMapWithLen:(NSInteger)aHashSize;
-
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-// Instance Methods
-- (NSInteger)count;
-- (NSInteger)length;
-- (NSInteger)size;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-/* form hash value for string s */
--(NSInteger)hash:(NSString *)s;
-/*   look for s in ptrBuffer  */
--(id)lookup:(NSString *)s;
-/* look for s in ptrBuffer  */
--(id)install:(ANTLRMapElement *)sym;
-/*
- * delete entry from list
- */
-- (void)deleteANTLRMap:(ANTLRMapElement *)np;
-- (NSInteger)RemoveSym:(NSString *)s;
-- (void)delete_chain:(ANTLRMapElement *)np;
-- (ANTLRMapElement *)getTType:(NSString *)name;
-- (ANTLRMapElement *)getName:(NSInteger)ttype;
-- (NSInteger)getNode:(ANTLRMapElement *)aNode;
-- (void)putNode:(NSInteger)aTType Node:(id)aNode;
-- (void)putName:(NSString *)name TType:(NSInteger)ttype;
-- (void)putName:(NSString *)name Node:(id)aNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMapElement.h
deleted file mode 100644
index e20d01c..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMapElement.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//
-//  ANTLRMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-
-@interface ANTLRMapElement : ANTLRBaseMapElement {
-    NSString *name;
-    id        node;
-}
-@property (retain, getter=getName, setter=setName:) NSString *name;
-@property (retain, getter=getNode, setter=setNode:) id node;
-
-+ (id) newANTLRMapElement;
-+ (id) newANTLRMapElementWithName:(NSString *)aName Type:(NSInteger)aTType;
-+ (id) newANTLRMapElementWithNode:(NSInteger)aTType Node:(id)aNode;
-+ (id) newANTLRMapElementWithName:(NSString *)aName Node:(id)aNode;
-+ (id) newANTLRMapElementWithObj1:(id)anObj1 Obj2:(id)anObj2;
-- (id) init;
-- (id) initWithName:(NSString *)aName Type:(NSInteger)aTType;
-- (id) initWithNode:(NSInteger)aTType Node:(id)aNode;
-- (id) initWithName:(NSString *)aName Node:(id)aNode;
-- (id) initWithObj1:(id)anObj1 Obj2:(id)anObj2;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSInteger) count;
-- (NSInteger) size;
-- (NSString *)getName;
-- (void)setName:(NSString *)aName;
-- (id)getNode;
-- (void)setNode:(id)aNode;
-- (void)putNode:(id)aNode;
-- (void)putNode:(id)aNode With:(NSInteger)uniqueID;
-//- (void)setObject:(id)aNode atIndex:anIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedNotSetException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedNotSetException.h
deleted file mode 100644
index 57391d5..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedNotSetException.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//
-//  ANTLRMismatchedNotSetException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/13/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRBitSet.h"
-
-@interface ANTLRMismatchedNotSetException : ANTLRRecognitionException
-{
-    ANTLRBitSet *expecting;
-}
-@property (retain, getter=getExpecting, setter=setExpecting) ANTLRBitSet *expecting;
-
-- (ANTLRMismatchedNotSetException *)newANTLRMismatchedNotSetException;
-- (ANTLRMismatchedNotSetException *)newANTLRMismatchedNotSetException:(id<ANTLRIntStream>)anInput
-                                                               Follow:(ANTLRBitSet *)expecting;
-
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)expecting;
-
-- (NSString *)toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedRangeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedRangeException.h
deleted file mode 100755
index abda3bb..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedRangeException.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRIntStream;
-
-
-@interface ANTLRMismatchedRangeException : ANTLRRecognitionException {
-	NSRange range;
-}
-
-+ (id) exceptionWithRange:(NSRange) aRange stream:(id<ANTLRIntStream>) theInput;
-- (id) initWithRange:(NSRange) aRange stream:(id<ANTLRIntStream>) theInput;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedSetException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedSetException.h
deleted file mode 100755
index 3bd45fc..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedSetException.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLRMismatchedSetException : ANTLRRecognitionException {
-	NSSet *expecting;
-}
-
-@property (retain, getter=getExpecting, setter=setExpecting:) NSSet *expecting;
-
-+ (id) exceptionWithSet:(NSSet *) theExpectedSet stream:(id<ANTLRIntStream>) theStream;
-- (id) initWithSet:(NSSet *) theExpectedSet stream:(id<ANTLRIntStream>) theStream;
-
-- (NSSet *) getExpecting;
-- (void) setExpecting: (NSSet *) anExpectedSet;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedTokenException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedTokenException.h
deleted file mode 100755
index 5e1d77d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedTokenException.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRBitSet.h"
-
-@protocol ANTLRIntStream;
-
-@interface ANTLRMismatchedTokenException : ANTLRRecognitionException {
-	NSInteger expecting;
-	unichar expectingChar;
-	BOOL isTokenType;
-}
-
-@property (assign, getter=getExpecting, setter=setExpecting:) NSInteger expecting;
-@property (assign, getter=getExpectingChar, setter=setExpectingChar:) unichar expectingChar;
-@property (assign, getter=getIsTokenType, setter=setIsTokenType:) BOOL isTokenType;
-
-+ (id) newANTLRMismatchedTokenException:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-+ (id) newANTLRMismatchedTokenExceptionMissing:(NSInteger)expectedTokenType
-                                        Stream:(id<ANTLRIntStream>)anInput
-                                         Token:(id<ANTLRToken>)inserted;
-+ (id) newANTLRMismatchedTokenExceptionChar:(unichar)expectedCharacter Stream:(id<ANTLRIntStream>)anInput;
-+ (id) newANTLRMismatchedTokenExceptionStream:(id<ANTLRIntStream>)anInput
-                                    Exception:(NSException *)e
-                                       Follow:(ANTLRBitSet *)follow;
-- (id) initWithTokenType:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
--(id) initWithTokenType:(NSInteger)expectedTokenType
-                 Stream:(id<ANTLRIntStream>)anInput
-                  Token:(id<ANTLRToken>)inserted;
-- (id) initWithCharacter:(unichar)expectedCharacter Stream:(id<ANTLRIntStream>)anInput;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedTreeNodeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedTreeNodeException.h
deleted file mode 100755
index b61ab51..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMismatchedTreeNodeException.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRIntStream;
-
-@interface ANTLRMismatchedTreeNodeException : ANTLRRecognitionException {
-	NSInteger expecting;
-}
-
-@property (getter=getExpecting, setter=setExpecting) NSInteger expecting;
-
-+ (id) newANTLRMismatchedTreeNodeException:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-- (id) initWithTokenType:(NSInteger) expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMissingTokenException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMissingTokenException.h
deleted file mode 100644
index 1398e25..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRMissingTokenException.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//
-//  ANTLRMissingTokenException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRToken.h"
-
-@interface ANTLRMissingTokenException : ANTLRMismatchedTokenException {
-    id<ANTLRToken> inserted;
-}
-/** Used for remote debugger deserialization */
-+ (id) newANTLRMissingTokenException;
-+ (id) newANTLRMissingTokenException:(NSInteger)expected
-                              Stream:(id<ANTLRIntStream>)anInput
-                                With:(id<ANTLRToken>)insertedToken;
-- (id) init;
-- (id) init:(NSInteger)expected Stream:(id<ANTLRIntStream>)anInput With:(id<ANTLRToken>)insertedToken;
-
-- (NSInteger) getMissingType;
-
-- (NSString *)toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRNoViableAltException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRNoViableAltException.h
deleted file mode 100755
index b71baff..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRNoViableAltException.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRIntStream.h"
-
-@interface ANTLRNoViableAltException : ANTLRRecognitionException {
-	int decisionNumber;
-	int stateNumber;
-}
-
-+ (ANTLRNoViableAltException *) newANTLRNoViableAltException:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<ANTLRIntStream>)theStream;
-- (ANTLRNoViableAltException *) initWithDecision:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<ANTLRIntStream>)theStream;
-
-- (void)setDecisionNumber:(NSInteger)decisionNumber;
-- (void)setStateNumber:(NSInteger)stateNumber;
-
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRNodeMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRNodeMapElement.h
deleted file mode 100644
index 1c0c916..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRNodeMapElement.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//
-//  ANTLRRuleMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-#import "ANTLRTree.h"
-
-@interface ANTLRNodeMapElement : ANTLRBaseMapElement {
-    id<ANTLRTree> node;
-}
-
-@property (retain, getter=getNode, setter=setNode:) id node;
-
-+ (void)initialize;
-
-+ (id) newANTLRNodeMapElement;
-+ (id) newANTLRNodeMapElementWithIndex:(id)anIndex Node:(id<ANTLRTree>)aNode;
-- (id) init;
-- (id) initWithAnIndex:(id)anIndex Node:(id)aNode;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id<ANTLRTree>)getNode;
-- (void)setNode:(id<ANTLRTree>)aNode;
-
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRParseTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRParseTree.h
deleted file mode 100644
index 92554e3..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRParseTree.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-//  ANTLRParseTree.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/12/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseTree.h"
-#import "ANTLRCommonToken.h"
-
-@interface ANTLRParseTree : ANTLRBaseTree {
-	id<ANTLRToken> payload;
-	NSMutableArray *hiddenTokens;
-}
-/** A record of the rules used to match a token sequence.  The tokens
- *  end up as the leaves of this tree and rule nodes are the interior nodes.
- *  This really adds no functionality, it is just an alias for CommonTree
- *  that is more meaningful (specific) and holds a String to display for a node.
- */
-+ (ANTLRParseTree *)newANTLRParseTree:(id<ANTLRToken>)label;
-- (id)initWithLabel:(id<ANTLRToken>)label;
-
-- (id<ANTLRTree>)dupNode;
-- (NSInteger)getType;
-- (NSString *)getText;
-- (NSInteger)getTokenStartIndex;
-- (void)setTokenStartIndex:(NSInteger)index;
-- (NSInteger)getTokenStopIndex;
-- (void)setTokenStopIndex:(NSInteger)index;
-- (NSString *)toString;
-- (NSString *)toStringWithHiddenTokens;
-- (NSString *)toInputString;
-- (void)_toStringLeaves:(NSMutableString *)buf;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRParser.h
deleted file mode 100755
index 5ddaf50..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRParser.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRTokenStream.h"
-
-@interface ANTLRParser : ANTLRBaseRecognizer {
-	id<ANTLRTokenStream> input;
-}
-+ (ANTLRParser *)newANTLRParser:(id<ANTLRTokenStream>)anInput;
-+ (ANTLRParser *)newANTLRParser:(id<ANTLRTokenStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream;
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream State:(ANTLRRecognizerSharedState *)aState;
-
-- (id<ANTLRTokenStream>) getInput;
-- (void) setInput: (id<ANTLRTokenStream>) anInput;
-
-- (void) reset;
-
-- (id) getCurrentInputSymbol:(id<ANTLRTokenStream>)anInput;
-- (ANTLRCommonToken *)getMissingSymbol:(id<ANTLRTokenStream>)input
-                             Exception:(ANTLRRecognitionException *)e
-                                 TType:(NSInteger)expectedTokenType
-                                BitSet:(ANTLRBitSet *)follow;
-- (void) setTokenStream:(id<ANTLRTokenStream>)anInput;
-- (id<ANTLRTokenStream>)getTokenStream;
-- (NSString *)getSourceName;
-
-- (void) traceIn:(NSString *)ruleName Index:(int)ruleIndex;
-- (void) traceOut:(NSString *)ruleName Index:(NSInteger) ruleIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRParserRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRParserRuleReturnScope.h
deleted file mode 100755
index aef3dd0..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRParserRuleReturnScope.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRRuleReturnScope.h"
-
-@interface ANTLRParserRuleReturnScope : ANTLRRuleReturnScope {
-	id<ANTLRToken> startToken;
-	id<ANTLRToken> stopToken;
-}
-@property (retain, getter=getStart, setter=setStart:) id<ANTLRToken> startToken;
-@property (retain, getter=getStop, setter=setStop:)   id<ANTLRToken> stopToken;
-
-- (id<ANTLRToken>) getStart;
-- (void) setStart: (id<ANTLRToken>) aStart;
-
-- (id<ANTLRToken>) getStop;
-- (void) setStop: (id<ANTLRToken>) aStop;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRPtrBuffer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRPtrBuffer.h
deleted file mode 100644
index 188f597..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRPtrBuffer.h
+++ /dev/null
@@ -1,91 +0,0 @@
-//
-//  ANTLRPtrBuffer.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define BUFFSIZE         101
-
-@interface ANTLRPtrBuffer : ANTLRLinkBase {
-	//ANTLRPtrBuffer *fNext;
-    NSInteger BuffSize;
-    NSMutableData *buffer;
-    id *ptrBuffer;
-    NSInteger count;
-    NSInteger ptr;
-}
-
-@property (getter=getBuffSize, setter=setBuffSize:) NSInteger BuffSize;
-@property (retain, getter=getBuffer, setter=setBuffer:) NSMutableData *buffer;
-@property (retain, getter=getPtrBuffer, setter=setPtrBuffer:) id *ptrBuffer;
-@property (getter=getCount, setter=setCount:) NSInteger count;
-@property (getter=getPtr, setter=setPtr:) NSInteger ptr;
-
-// Contruction/Destruction
-+(ANTLRPtrBuffer *)newANTLRPtrBuffer;
-+(ANTLRPtrBuffer *)newANTLRPtrBufferWithLen:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-
-- (NSInteger)count;
-- (NSInteger)length;
-- (NSInteger)size;
-
-- (NSMutableData *)getBuffer;
-- (void)setBuffer:(NSMutableData *)np;
-- (NSInteger)getCount;
-- (void)setCount:(NSInteger)aCount;
-- (id *)getPtrBuffer;
-- (void)setPtrBuffer:(id *)np;
-- (NSInteger)getPtr;
-- (void)setPtr:(NSInteger)np;
-
-- (void) push:(id) v;
-- (id) pop;
-- (id) peek;
-
-- (void) addObject:(id) v;
-- (void) addObjectsFromArray:(ANTLRPtrBuffer *)anArray;
-- (void) insertObject:(id)aRule atIndex:(NSInteger)idx;
-- (id)   objectAtIndex:(NSInteger)idx;
-- (void) removeAllObjects;
-
-- (void) ensureCapacity:(NSInteger) index;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRecognitionException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRecognitionException.h
deleted file mode 100755
index 853dc0e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRecognitionException.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuntimeException.h"
-#import "ANTLRToken.h"
-#import "ANTLRIntStream.h"
-#import "ANTLRTree.h"
-
-@interface ANTLRRecognitionException : ANTLRRuntimeException {
-	id<ANTLRIntStream> input;
-	NSInteger index;
-	id<ANTLRToken> token;
-	id<ANTLRTree> node;
-	unichar c;
-	NSInteger line;
-	NSInteger charPositionInLine;
-}
-
-@property (retain, getter=getStream, setter=setStream:) id<ANTLRIntStream> input;
-@property (retain, getter=getToken, setter=setToken:) id<ANTLRToken>token;
-@property (retain, getter=getNode, setter=setNode:) id<ANTLRTree>node;
-@property (getter=getLine, setter=setLine:) NSInteger line;
-@property (getter=getCharPositionInLine, setter=setCharPositionInLine:) NSInteger charPositionInLine;
-
-+ (ANTLRRecognitionException *) newANTLRRecognitionException;
-+ (ANTLRRecognitionException *) exceptionWithStream:(id<ANTLRIntStream>) anInputStream; 
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream reason:(NSString *)aReason;
-- (NSInteger) unexpectedType;
-- (id<ANTLRToken>)getUnexpectedToken;
-
-- (id<ANTLRIntStream>) getStream;
-- (void) setStream: (id<ANTLRIntStream>) aStream;
-
-- (id<ANTLRToken>) getToken;
-- (void) setToken: (id<ANTLRToken>) aToken;
-
-- (id<ANTLRTree>) getNode;
-- (void) setNode: (id<ANTLRTree>) aNode;
-
-- (NSString *)getMessage;
-
-- (NSInteger)getCharPositionInLine;
-- (void)setCharPositionInLine:(NSInteger)aPos;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRecognizerSharedState.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRecognizerSharedState.h
deleted file mode 100755
index 0430b79..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRecognizerSharedState.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRRuleStack.h"
-
-@interface ANTLRRecognizerSharedState : NSObject {
-	NSMutableArray *following;          // a stack of FOLLOW bitsets used for context sensitive prediction and recovery
-    NSInteger _fsp;                     // Follow stack pointer
-	BOOL errorRecovery;                 // are we recovering?
-	NSInteger lastErrorIndex;
-	BOOL failed;                        // indicate that some match failed
-    NSInteger syntaxErrors;
-	NSInteger backtracking;             // the level of backtracking
-	ANTLRRuleStack *ruleMemo;			// store previous results of matching rules so we don't have to do it again. Hook in incremental stuff here, too.
-
-	id<ANTLRToken> token;
-	NSInteger  tokenStartCharIndex;
-	NSUInteger tokenStartLine;
-	NSUInteger tokenStartCharPositionInLine;
-	NSUInteger channel;
-	NSUInteger type;
-	NSString   *text;
-}
-
-@property (retain, getter=getFollowing, setter=setFollowing:) NSMutableArray *following;
-@property (assign) NSInteger _fsp;
-@property (assign) BOOL errorRecovery;
-@property (assign) NSInteger lastErrorIndex;
-@property (assign, getter=getFailed, setter=setFailed:) BOOL failed;
-@property (assign) NSInteger syntaxErrors;
-@property (assign, getter=getBacktracking, setter=setBacktracking) NSInteger backtracking;
-@property (retain, getter=getRuleMemo, setter=setRuleMemo:) ANTLRRuleStack *ruleMemo;
-@property (copy, getter=getToken, setter=setToken) id<ANTLRToken> token;
-@property (getter=getType,setter=setType:) NSUInteger type;
-@property (getter=getChannel,setter=setChannel:) NSUInteger channel;
-@property (getter=getTokenStartLine,setter=setTokenStartLine:) NSUInteger tokenStartLine;
-@property (getter=getCharPositionInLine,setter=setCharPositionInLine:) NSUInteger tokenStartCharPositionInLine;
-@property (getter=getTokenStartCharIndex,setter=setTokenStartCharIndex:) NSInteger tokenStartCharIndex;
-@property (retain, getter=getText, setter=setText) NSString *text;
-
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedState;
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedStateWithRuleLen:(NSInteger)aLen;
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedState:(ANTLRRecognizerSharedState *)aState;
-
-- (id) init;
-- (id) initWithRuleLen:(NSInteger)aLen;
-- (id) initWithState:(ANTLRRecognizerSharedState *)state;
-
-- (id<ANTLRToken>) getToken;
-- (void) setToken:(id<ANTLRToken>) theToken;
-
-- (NSUInteger) getType;
-- (void) setType:(NSUInteger) theTokenType;
-
-- (NSUInteger) getChannel;
-- (void) setChannel:(NSUInteger) theChannel;
-
-- (NSUInteger) getTokenStartLine;
-- (void) setTokenStartLine:(NSUInteger) theTokenStartLine;
-
-- (NSUInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSUInteger) theCharPosition;
-
-- (NSInteger) getTokenStartCharIndex;
-- (void) setTokenStartCharIndex:(NSInteger) theTokenStartCharIndex;
-
-- (NSString *) getText;
-- (void) setText:(NSString *) theText;
-
-
-- (NSMutableArray *) getFollowing;
-- (void)setFollowing:(NSMutableArray *)aFollow;
-- (ANTLRRuleStack *) getRuleMemo;
-- (void)setRuleMemo:(ANTLRRuleStack *)aRuleMemo;
-- (BOOL) isErrorRecovery;
-- (void) setIsErrorRecovery: (BOOL) flag;
-
-- (BOOL) getFailed;
-- (void) setFailed: (BOOL) flag;
-
-- (NSInteger)  getBacktracking;
-- (void) setBacktracking:(NSInteger) value;
-- (void) increaseBacktracking;
-- (void) decreaseBacktracking;
-- (BOOL) isBacktracking;
-
-- (NSInteger) lastErrorIndex;
-- (void) setLastErrorIndex:(NSInteger) value;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRewriteRuleElementStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRewriteRuleElementStream.h
deleted file mode 100755
index 132a0cc..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRewriteRuleElementStream.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-
-typedef union {
-    id single;
-    NSMutableArray *multiple;
-} Elements;
-
-// TODO: this should be separated into stream and enumerator classes
-@interface ANTLRRewriteRuleElementStream : NSObject {
-    NSInteger cursor;
-    BOOL dirty;        ///< indicates whether the stream should return copies of its elements, set to true after a call to -reset
-    BOOL isSingleElement;
-    Elements elements;
-    
-    NSString *elementDescription;
-    id<ANTLRTreeAdaptor> treeAdaptor;
-}
-
-@property (assign, getter=GetCursor, setter=SetCursor:) NSInteger cursor;
-@property (assign, getter=Getdirty, setter=Setdirty:) BOOL dirty;
-@property (assign, getter=GetIsSingleElement, setter=SetIsSingleElement:) BOOL isSingleElement;
-@property (assign, getter=GetElement, setter=SetElement:) Elements elements;
-@property (assign, getter=GetElementDescription, setter=SetElementDescription:) NSString *elementDescription;
-@property (retain, getter=GetTreeAdaptor, setter=SetTreeAdaptor:) id<ANTLRTreeAdaptor> treeAdaptor;
-
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription;
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription
-                                                            element:(id)anElement;
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription
-                                                           elements:(NSArray *)theElements;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
-
-- (void)reset;
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor;
-
-- (void) addElement:(id)anElement;
-- (NSInteger) size;
- 
-- (BOOL) hasNext;
-- (id<ANTLRTree>) nextTree;
-- (id<ANTLRTree>) _next;       // internal: TODO: redesign if necessary. maybe delegate
-
-- (id) copyElement:(id)element;
-- (id) toTree:(id)element;
-
-- (NSString *) getDescription;
-- (void) setDescription:(NSString *)description;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRewriteRuleSubtreeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRewriteRuleSubtreeStream.h
deleted file mode 100755
index 1d18b24..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRewriteRuleSubtreeStream.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRewriteRuleElementStream.h"
-
-@interface ANTLRRewriteRuleSubtreeStream : ANTLRRewriteRuleElementStream {
-
-}
-
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription;
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription
-                                                             element:(id)anElement;
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription
-                                                            elements:(NSArray *)theElements;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
-
-- (id) nextNode;
-- (id) dup:(id)element;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRewriteRuleTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRewriteRuleTokenStream.h
deleted file mode 100755
index 3a516de..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRewriteRuleTokenStream.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRewriteRuleElementStream.h"
-
-
-@interface ANTLRRewriteRuleTokenStream : ANTLRRewriteRuleElementStream {
-
-}
-
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)anAdaptor
-                          description:(NSString *)elementDescription;
-/** Create a stream with one element */
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)adaptor
-                          description:(NSString *)elementDescription
-                              element:(id) oneElement;
-/** Create a stream, but feed off an existing list */
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)adaptor
-                          description:(NSString *)elementDescription
-                             elements:(NSMutableArray *)elements;
-
-- (id) init;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-               description:(NSString *)aDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor 
-               description:(NSString *)aDescription
-                   element:(id)element;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-               description:(NSString *)aDescription
-                  elements:(NSMutableArray *)elements;
-                               
-/** Get next token from stream and make a node for it */
-- (id) nextNode;
-
-- (id) nextToken;
-
-/** Don't convert to a tree unless they explicitly call nextTree.
- *  This way we can do hetero tree nodes in rewrite.
- */
-- (id<ANTLRTree>) toTree:(id<ANTLRToken>)element;
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleMapElement.h
deleted file mode 100644
index e040b18..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleMapElement.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-//  ANTLRRuleMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-
-@interface ANTLRRuleMapElement : ANTLRBaseMapElement {
-    NSNumber *ruleNum;
-}
-
-@property (retain, getter=getRuleNum, setter=setRuleNum:) NSNumber *ruleNum;
-
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElement;
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElementWithIndex:(NSNumber *)anIdx;
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElementWithIndex:(NSNumber *)anIdx RuleNum:(NSNumber *)aRuleNum;
-- (id) init;
-- (id) initWithAnIndex:(NSNumber *)anIdx;
-- (id) initWithAnIndex:(NSNumber *)anIdx RuleNum:(NSNumber *)aRuleNum;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSNumber *)getRuleNum;
-- (void)setRuleNum:(NSNumber *)aRuleNum;
-
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleMemo.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleMemo.h
deleted file mode 100644
index 63a5ae2..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleMemo.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-//  ANTLRRuleMemo.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-@interface ANTLRRuleMemo : ANTLRLinkBase {
-    NSNumber *startIndex;
-    NSNumber *stopIndex;
-}
-
-@property (retain, getter=getStartIndex, setter=setStartIndex) NSNumber *startIndex;
-@property (retain, getter=getStopIndex, setter=setStopIndex) NSNumber *stopIndex;
-
-+ (ANTLRRuleMemo *)newANTLRRuleMemo;
-+ (ANTLRRuleMemo *)newANTLRRuleMemoWithStartIndex:(NSNumber *)aStartIndex StopIndex:(NSNumber *)aStopIndex;
-
-- (id) init;
-- (id) initWithStartIndex:(NSNumber *)aStartIndex StopIndex:(NSNumber *)aStopIndex;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-- (ANTLRRuleMemo *)getRuleWithStartIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStartIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStopIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStartIndex;
-- (void)setStartIndex:(NSNumber *)aStartIndex;
-- (NSNumber *)getStopIndex;
-- (void)setStopIndex:(NSNumber *)aStopIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleReturnScope.h
deleted file mode 100644
index 4750c16..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleReturnScope.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-//  ANTLRRuleReturnScope.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-
-@interface ANTLRRuleReturnScope : NSObject <NSCopying> {
-
-}
-
-/** Return the start token or tree */
-- (id<ANTLRToken>) getStart;
-
-/** Return the stop token or tree */
-- (id<ANTLRToken>) getStop;
-
-/** Has a value potentially if output=AST; */
-- (id) getNode;
-
-/** Has a value potentially if output=template; Don't use StringTemplate
- *  type as it then causes a dependency with ST lib.
- */
-- (id) getTemplate;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleStack.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleStack.h
deleted file mode 100644
index 12d450b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleStack.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-//  ANTLRRuleStack.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseStack.h"
-#import "ANTLRHashRule.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRRuleStack : ANTLRBaseStack {
-}
-
-// Contruction/Destruction
-+(ANTLRRuleStack *)newANTLRRuleStack;
-+(ANTLRRuleStack *)newANTLRRuleStack:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-- (ANTLRHashRule *) pop;
-
-- (void) insertObject:(ANTLRHashRule *)aHashRule atIndex:(NSInteger)idx;
-- (ANTLRHashRule *)objectAtIndex:(NSInteger)idx;
-- (void)putHashRuleAtRuleIndex:(NSInteger)aRuleIndex StartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleStack.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleStack.m
deleted file mode 100644
index 909192f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuleStack.m
+++ /dev/null
@@ -1,147 +0,0 @@
-//
-//  ANTLRRuleStack.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRRuleStack.h"
-#import "ANTLRTree.h"
-
-/*
- * Start of ANTLRRuleStack
- */
-@implementation ANTLRRuleStack
-
-+ (ANTLRRuleStack *)newANTLRRuleStack
-{
-    return [[ANTLRRuleStack alloc] init];
-}
-
-+ (ANTLRRuleStack *)newANTLRRuleStack:(NSInteger)cnt
-{
-    return [[ANTLRRuleStack alloc] initWithLen:cnt];
-}
-
-- (id)init
-{
-	if ((self = [super init]) != nil) {
-	}
-    return( self );
-}
-
-- (id)initWithLen:(NSInteger)cnt
-{
-	if ((self = [super initWithLen:cnt]) != nil) {
-	}
-    return( self );
-}
-
-- (void)dealloc
-{
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    return [super copyWithZone:aZone];
-}
-
-- (NSInteger)count
-{
-    ANTLRRuleMemo *anElement;
-    NSInteger aCnt = 0;
-    for( int i = 0; i < BuffSize; i++ ) {
-        if ((anElement = ptrBuffer[i]) != nil)
-            aCnt++;
-    }
-    return aCnt;
-}
-
-- (NSInteger)size
-{
-    ANTLRRuleMemo *anElement;
-    NSInteger aSize = 0;
-    for( int i = 0; i < BuffSize; i++ ) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aSize++;
-        }
-    }
-    return aSize;
-}
-
-- (ANTLRHashRule *)pop
-{
-    return (ANTLRHashRule *)[super pop];
-}
-
-- (void) insertObject:(ANTLRHashRule *)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        NSLog( @"In ANTLRRuleStack attempting to insert aRule at Index %d, but Buffer is only %d long\n", idx, BuffSize );
-        [self ensureCapacity:idx];
-    }
-    if ( aRule != ptrBuffer[idx] ) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (ANTLRHashRule *)objectAtIndex:(NSInteger)idx
-{
-    if (idx < BuffSize) {
-        return ptrBuffer[idx];
-    }
-    return nil;
-}
-
-- (void)putHashRuleAtRuleIndex:(NSInteger)aRuleIndex StartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex
-{
-    ANTLRHashRule *aHashRule;
-    ANTLRRuleMemo *aRuleMemo;
-
-    if (aRuleIndex >= BuffSize) {
-        NSLog( @"putHashRuleAtRuleIndex attempting to insert aRule at Index %d, but Buffer is only %d long\n", aRuleIndex, BuffSize );
-        [self ensureCapacity:aRuleIndex];
-    }
-    if ((aHashRule = ptrBuffer[aRuleIndex]) == nil) {
-        aHashRule = [[ANTLRHashRule newANTLRHashRuleWithLen:17] retain];
-        ptrBuffer[aRuleIndex] = aHashRule;
-    }
-    if (( aRuleMemo = [aHashRule objectAtIndex:aStartIndex] ) == nil ) {
-        aRuleMemo = [[ANTLRRuleMemo newANTLRRuleMemo] retain];
-        [aHashRule insertObject:aRuleMemo atIndex:aStartIndex];
-    }
-    [aRuleMemo setStartIndex:[NSNumber numberWithInteger:aStartIndex]];
-    [aRuleMemo setStopIndex:[NSNumber numberWithInteger:aStopIndex]];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuntimeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuntimeException.h
deleted file mode 100644
index 6cf0918..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRRuntimeException.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-//  ANTLRRuntimeException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/5/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-@interface ANTLRRuntimeException : NSException
-{
-}
-
-+ (ANTLRRuntimeException *) newANTLRNoSuchElementException:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRIllegalArgumentException:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRRuntimeException:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRRuntimeException:(NSString *)aName reason:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRRuntimeException:(NSString *)aName reason:(NSString *)aReason userInfo:aUserInfo;
-
-- (id) init;
-- (id)initWithRuntime:(NSString *)aReason;
-- (id)initWithReason:(NSString *)aReason;
-- (id)initWithName:(NSString *)aName reason:(NSString *)aReason;
-- (id)initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-- (NSString *) Description;
-
-//    - (void)setDecisionNumber:(NSInteger)decisionNumber;
-//    - (void)setStateNumber:(NSInteger)stateNumber;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRStreamEnumerator.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRStreamEnumerator.h
deleted file mode 100644
index a0e0f69..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRStreamEnumerator.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-//  ANTLRStreamEnumertor.h
-//  ANTLR
-//
-//  Created by Ian Michell on 29/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRStreamEnumerator : NSEnumerator 
-{
-	NSInteger i;
-	id eof;
-	NSMutableArray *nodes;
-}
-
--(id) initWithNodes:(NSMutableArray *) n andEOF:(id) o;
--(BOOL) hasNext;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRStringStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRStringStream.h
deleted file mode 100755
index 2b13c7d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRStringStream.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCharStream.h"
-#import "ANTLRCharStreamState.h"
-#import "ANTLRPtrBuffer.h"
-
-@interface ANTLRStringStream : NSObject < ANTLRCharStream > {
-	NSString *data;
-	NSInteger n;
-	NSInteger p;
-	NSInteger line;
-	NSInteger charPositionInLine;
-	NSInteger markDepth;
-	ANTLRPtrBuffer *markers;
-	NSInteger lastMarker;
-	NSString *name;
-    ANTLRCharStreamState *charState;
-}
-
-@property (retain, getter=getData,setter=setData:) NSString *data;
-@property (getter=getP,setter=setP:) NSInteger p;
-@property (getter=getN,setter=setN:) NSInteger n;
-@property (getter=getLine,setter=setLine:) NSInteger line;
-@property (getter=getCharPositionInLine,setter=setCharPositionInLine:) NSInteger charPositionInLine;
-@property (getter=getMarkDepth,setter=setMarkDepth:) NSInteger markDepth;
-@property (retain, getter=getMarkers, setter=setMarkers:) ANTLRPtrBuffer *markers;
-@property (getter=getLastMarker,setter=setLastMarker:) NSInteger lastMarker;
-@property (retain, getter=getSourceName, setter=setSourceName:) NSString *name;
-@property (retain, getter=getCharState, setter=setCharState:) ANTLRCharStreamState *charState;
-
-+ newANTLRStringStream;
-
-+ newANTLRStringStream:(NSString *)aString;
-
-+ newANTLRStringStream:(char *)myData Count:(NSInteger)numBytes;
-
-- (id) init;
-
-// this initializer copies the string
-- (id) initWithString:(NSString *) theString;
-
-// This is the preferred constructor as no data is copied
-- (id) initWithStringNoCopy:(NSString *) theString;
-
-- (id) initWithData:(char *)myData Count:(NSInteger)numBytes;
-
-- (void) dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-// reset the stream's state, but keep the data to feed off
-- (void) reset;
-// consume one character from the stream
-- (void) consume;
-
-// look ahead i characters
-- (NSInteger) LA:(NSInteger) i;
-- (NSInteger) LT:(NSInteger) i;
-
-// returns the position of the current input symbol
-- (NSInteger) getIndex;
-// total length of the input data
-- (NSInteger) size;
-
-// seek and rewind in the stream
-- (NSInteger) mark;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) release:(NSInteger) marker;
-- (void) seek:(NSInteger) index;
-
-// provide the streams data (e.g. for tokens using indices)
-- (NSString *) substring:(NSInteger)startIndex To:(NSInteger)stopIndex;
-- (NSString *) substringWithRange:(NSRange) theRange;
-
-// used for tracking the current position in the input stream
-- (NSInteger) getLine;
-- (void) setLine:(NSInteger) theLine;
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger) thePos;
-
-- (NSInteger) getN;
-- (void) setN:(NSInteger)num;
-
-- (NSInteger) getP;
-- (void) setP:(NSInteger)num;
-
-- (ANTLRPtrBuffer *)getMarkers;
-- (void) setMarkers:(ANTLRPtrBuffer *)aMarkerList;
-
-- (NSString *)getSourceName;
-
-- (NSString *)toString;
-
-// accessors to the raw data of this stream
-- (NSString *) getData;
-- (void) setData: (NSString *) aData;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRSymbolStack.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRSymbolStack.h
deleted file mode 100644
index 169df9f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRSymbolStack.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//
-//  ANTLRSymbolStack.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseStack.h"
-// #import "ANTLRSymbolScope.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRSymbolsScope : NSObject
-{
-    
-}
-
-+ (ANTLRSymbolsScope *)newANTLRSymbolsScope;
-
-- (id)init;
-@end
-
-
-@interface ANTLRSymbolStack : ANTLRBaseStack {
-}
-
-// Contruction/Destruction
-+(ANTLRSymbolStack *)newANTLRSymbolStack;
-+(ANTLRSymbolStack *)newANTLRSymbolStackWithLen:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-
--(ANTLRSymbolsScope *)getHashMapEntry:(NSInteger)idx;
-
--(ANTLRSymbolsScope **)getHashMap;
-
--(ANTLRSymbolsScope *) pop;
-
-- (void) insertObject:(ANTLRSymbolsScope *)aScope atIndex:(NSInteger)idx;
-- (ANTLRSymbolsScope *)objectAtIndex:(NSInteger)idx;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRSymbolStack.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRSymbolStack.m
deleted file mode 100644
index 1dd6775..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRSymbolStack.m
+++ /dev/null
@@ -1,123 +0,0 @@
-//
-//  ANTLRSymbolStack.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRSymbolStack.h"
-#import "ANTLRTree.h"
-
-
-@implementation ANTLRSymbolsScope
-
-+ (ANTLRSymbolsScope *)newANTLRSymbolsScope
-{
-    return( [[ANTLRSymbolsScope alloc] init] );
-}
-
-- (id)init
-{
-    if ((self = [super init]) != nil) {
-    }
-    return (self);
-}
-
-@end
-
-/*
- * Start of ANTLRSymbolStack
- */
-@implementation ANTLRSymbolStack
-
-+(ANTLRSymbolStack *)newANTLRSymbolStack
-{
-    return [[ANTLRSymbolStack alloc] init];
-}
-
-+(ANTLRSymbolStack *)newANTLRSymbolStackWithLen:(NSInteger)cnt
-{
-    return [[ANTLRSymbolStack alloc] initWithLen:cnt];
-}
-
--(id)init
-{
-	if ((self = [super init]) != nil) {
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)cnt
-{
-	if ((self = [super initWithLen:cnt]) != nil) {
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    return [super copyWithZone:aZone];
-}
-
--(ANTLRSymbolsScope *)getHashMapEntry:(NSInteger)idx
-{
-	return( (ANTLRSymbolsScope *)[super objectAtIndex:idx] );
-}
-
--(ANTLRSymbolsScope **)getHashMap
-{
-	return( (ANTLRSymbolsScope **)ptrBuffer );
-}
-
--(ANTLRSymbolsScope *) pop
-{
-    return (ANTLRSymbolsScope *)[super pop];
-}
-
-- (void) insertObject:(ANTLRSymbolsScope *)aRule atIndex:(NSInteger)idx
-{
-    if (aRule != ptrBuffer[idx]) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (ANTLRSymbolsScope *)objectAtIndex:(NSInteger)idx
-{
-    return (ANTLRSymbolsScope *)[super objectAtIndex:idx];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRToken+DebuggerSupport.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRToken+DebuggerSupport.h
deleted file mode 100755
index 659e763..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRToken+DebuggerSupport.h
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-//  ANTLRToken+DebuggerSupport.h
-//  ANTLR
-//
-//  Created by Kay Röpke on 03.12.2006.
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-
-@interface ANTLRCommonToken(DebuggerSupport)
-
-- (NSString *)debuggerDescription;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRToken.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRToken.h
deleted file mode 100755
index 64524f0..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRToken.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-
-typedef enum {
-	ANTLRTokenTypeEOF = -1,
-	ANTLRTokenTypeInvalid,
-	ANTLRTokenTypeEOR,
-	ANTLRTokenTypeDOWN,
-	ANTLRTokenTypeUP,
-	ANTLRTokenTypeMIN
-} ANTLRTokenType;
-
-typedef enum {
-	ANTLRTokenChannelDefault = 0,
-    ANTLRTokenChannelHidden = 99
-} ANTLRTokenChannel;
-
-#define HIDDEN 99
-
-@protocol ANTLRToken < NSObject, NSCopying >
-
-// The singleton eofToken instance.
-+ (id<ANTLRToken>) eofToken;
-// The default channel for this class of Tokens
-+ (ANTLRTokenChannel) defaultChannel;
-
-// provide hooks to explicitely set the text as opposed to use the indices into the CharStream
-- (NSString *) getText;
-- (void) setText:(NSString *) theText;
-
-- (NSInteger) getType;
-- (void) setType: (NSInteger) aType;
-
-// ANTLR v3 provides automatic line and position tracking. Subclasses do not need to
-// override these, if they do not want to store line/pos tracking information
-- (NSUInteger) getLine;
-- (void) setLine: (NSUInteger) aLine;
-
-- (NSUInteger) getCharPositionInLine;
-- (void) setCharPositionInLine: (NSUInteger) aCharPositionInLine;
-
-// explicitely change the channel this Token is on. The default parser implementation
-// just sees the defaultChannel
-// Common idiom is to put whitespace tokens on channel 99.
-- (NSUInteger) getChannel;
-- (void) setChannel: (NSUInteger) aChannel;
-
-// the index of this Token into the TokenStream
-- (NSUInteger) getTokenIndex;
-- (void) setTokenIndex: (NSUInteger) aTokenIndex;
-- (NSString *)toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTokenRewriteStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTokenRewriteStream.h
deleted file mode 100644
index 0d8681f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTokenRewriteStream.h
+++ /dev/null
@@ -1,170 +0,0 @@
-//
-//  ANTLRTokenRewriteStream.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/19/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTokenStream.h"
-#import "ANTLRLinkBase.h"
-#import "ANTLRHashMap.h"
-#import "ANTLRMapElement.h"
-#import "ANTLRTokenSource.h"
-
-// Define the rewrite operation hierarchy
-
-@interface ANTLRRewriteOperation : ANTLRCommonTokenStream
-{
-/** What index into rewrites List are we? */
-NSInteger instructionIndex;
-/** Token buffer index. */
-NSInteger index;
-NSString *text;
-}
-
-@property (getter=getInstructionIndex, setter=setInstructionIndex:) NSInteger instructionIndex;
-@property (getter=getIndex, setter=setIndex:) NSInteger index;
-@property (retain, getter=getText, setter=setText:) NSString *text;
-
-+ (ANTLRRewriteOperation *) newANTLRRewriteOperation:(NSInteger)index Text:(NSString *)text;
-
-- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText;
-
-/** Execute the rewrite operation by possibly adding to the buffer.
- *  Return the index of the next token to operate on.
- */
-- (NSInteger) execute:(NSString *)buf;
-
-- (NSString *)toString;
-- (NSInteger) indexOf:(char)aChar inString:(NSString *)aString;
-@end
-
-@interface ANTLRInsertBeforeOp : ANTLRRewriteOperation {
-}
-
-+ (ANTLRInsertBeforeOp *) newANTLRInsertBeforeOp:(NSInteger)anIndex Text:(NSString *)theText;
-- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText;
-
-@end
-
-/** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
- *  instructions.
- */
-@interface ANTLRReplaceOp : ANTLRRewriteOperation {
-    NSInteger lastIndex;
-}
-
-@property (getter=getLastIndex, setter=setLastIndex:) NSInteger lastIndex;
-
-+ (ANTLRReplaceOp *) newANTLRReplaceOp:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString*)theText;
-- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-
-- (NSInteger) execute:(NSString *)buf;
-- (NSString *)toString;
-
-@end
-
-@interface ANTLRDeleteOp : ANTLRReplaceOp {
-}
-+ (ANTLRDeleteOp *) newANTLRDeleteOp:(NSInteger)from ToIndex:(NSInteger)to;
-
-- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to;
-
-- (NSString *)toString;
-
-@end
-
-
-@interface ANTLRTokenRewriteStream : ANTLRCommonTokenStream {
-/** You may have multiple, named streams of rewrite operations.
- *  I'm calling these things "programs."
- *  Maps String (name) -> rewrite (List)
- */
-ANTLRHashMap *programs;
-
-/** Map String (program name) -> Integer index */
-ANTLRHashMap *lastRewriteTokenIndexes;
-}
-
-@property (retain, getter=getPrograms, setter=setPrograms:) ANTLRHashMap *programs;
-@property (retain, getter=getLastRewriteTokenIndexes, setter=setLastRewriteTokenIndexes:) ANTLRHashMap *lastRewriteTokenIndexes;
-
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream;
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream:(id<ANTLRTokenSource>) aTokenSource;
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream:(id<ANTLRTokenSource>) aTokenSource Channel:(NSInteger)aChannel;
-
-- (id) init;
-- (id)initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource;
-- (id)initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource Channel:(NSInteger)aChannel;
-
-- (ANTLRHashMap *)getPrograms;
-- (void)setPrograms:(ANTLRHashMap *)aProgList;
-
-- (void) rollback:(NSInteger)instructionIndex;
-- (void) rollback:(NSString *)programName Index:(NSInteger)anInstructionIndex;
-- (void) deleteProgram;
-- (void) deleteProgram:(NSString *)programName;
-- (void) insertAfterToken:(id<ANTLRToken>)t Text:(NSString *)theText;
-- (void) insertAfterIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) insertAfterProgNam:(NSString *)programName Index:(NSInteger)anIndex Text:(NSString *)theText;
-
-
-- (void) insertBeforeToken:(id<ANTLRToken>)t Text:(NSString *)theText;
-- (void) insertBeforeIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) insertBeforeProgName:(NSString *)programName Index:(NSInteger)index Text:(NSString *)theText;
-- (void) replaceFromIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) replaceFromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-- (void) replaceFromToken:(id<ANTLRToken>)indexT Text:(NSString *)theText;
-- (void) replaceFromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to Text:(NSString *)theText;
-- (void) replaceProgNam:(NSString *)programName Token:(id<ANTLRToken>)from Token:(id<ANTLRToken>)to Text:(NSString *)theText;
-- (void) replaceProgNam:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-- (void) delete:(NSInteger)anIndex;
-- (void) delete:(NSInteger)from ToIndex:(NSInteger)to;
-- (void) deleteToken:(id<ANTLRToken>)indexT;
-- (void) deleteFromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to;
-- (void) delete:(NSString *)programName FromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to;
-- (void) delete:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to;
-- (NSInteger)getLastRewriteTokenIndex;
-- (NSInteger)getLastRewriteTokenIndex:(NSString *)programName;
-- (void)setLastRewriteTokenIndex:(NSString *)programName Index:(NSInteger)anInt;
-- (ANTLRHashMap *) getProgram:(NSString *)name;
-- (ANTLRHashMap *) initializeProgram:(NSString *)name;
-- (NSString *)toOriginalString;
-- (NSString *)toOriginalString:(NSInteger)start End:(NSInteger)end;
-- (NSString *)toString;
-- (NSString *)toString:(NSString *)programName;
-- (NSString *)toStringFromStart:(NSInteger)start ToEnd:(NSInteger)end;
-- (NSString *)toString:(NSString *)programName FromStart:(NSInteger)start ToEnd:(NSInteger)end;
-- (ANTLRHashMap *)reduceToSingleOperationPerIndex:(ANTLRHashMap *)rewrites;
-- (ANTLRHashMap *)getKindOfOps:(ANTLRHashMap *)rewrites KindOfClass:(Class)kind;
-- (ANTLRHashMap *)getKindOfOps:(ANTLRHashMap *)rewrites KindOfClass:(Class)kind Index:(NSInteger)before;
-- (NSString *)catOpText:(id)a PrevText:(id)b;
-- (NSMutableString *)toDebugString;
-- (NSMutableString *)toDebugStringFromStart:(NSInteger)start ToEnd:(NSInteger)end;
-                    
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTokenSource.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTokenSource.h
deleted file mode 100755
index 4d6b6ee..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTokenSource.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRToken.h"
-
-// Anything that responds to -nextToken can be treated as a lexer.
-// For instance this can be a flex lexer or a handwritten one or even
-// a proxy for a remotely running token source (database, lexer, whatever).
-@protocol ANTLRTokenSource <NSObject, NSCopying>
-
-- (id<ANTLRToken>) nextToken;
-- (NSString *)getSourceName;
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTokenStream.h
deleted file mode 100755
index c104578..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRIntStream.h"
-#import "ANTLRToken.h"
-
-@protocol ANTLRTokenStream < ANTLRIntStream >
-
-// Get Token at current input pointer + i ahead where i=1 is next Token.
-// i<0 indicates tokens in the past.  So -1 is previous token and -2 is
-// two tokens ago. LT:0 is undefined.  For i>=n, return Token.EOFToken.
-// Return null for LT:0 and any index that results in an absolute address
-// that is negative.
-
-- (id<ANTLRToken>) LT:(NSInteger) i;
-
-- (id<ANTLRToken>) getToken:(NSUInteger) i;
-
-- (id) getTokenSource;
-
-- (NSString *) toString;
-/** Return the text of all tokens from start to stop, inclusive.
- *  If the stream does not buffer all the tokens then it can just
- *  return "" or null;  Users should not access $ruleLabel.text in
- *  an action of course in that case.
- */
-- (NSString *)toStringFromStart:(NSInteger)startIdx ToEnd:(NSInteger)stopIdx;
-
-/** Because the user is not required to use a token with an index stored
- *  in it, we must provide a means for two token objects themselves to
- *  indicate the start/end location.  Most often this will just delegate
- *  to the other toString(int,int).  This is also parallel with
- *  the TreeNodeStream.toString(Object,Object).
- */
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTree.h
deleted file mode 100755
index f269b2d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTree.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-@protocol ANTLRTree < NSObject, NSCopying >
-
-//+ (id<ANTLRTree>) invalidNode;
-
-- (id<ANTLRTree>) getChild:(NSUInteger)index;
-- (NSUInteger) getChildCount;
-
-// Tree tracks parent and child index now > 3.0
-
-- (id<ANTLRTree>)getParent;
-
-- (void) setParent:(id<ANTLRTree>)t;
-
-/** Is there is a node above with token type ttype? */
-- (BOOL) hasAncestor:(NSInteger)ttype;
-
-/** Walk upwards and get first ancestor with this token type. */
-- (id<ANTLRTree>) getAncestor:(NSInteger) ttype;
-
-/** Return a list of all ancestors of this node.  The first node of
- *  list is the root and the last is the parent of this node.
- */
-- (NSMutableArray *) getAncestors;
-
-/** This node is what child index? 0..n-1 */
-- (NSInteger) getChildIndex;
-
-- (void) setChildIndex:(NSInteger) index;
-
-/** Set the parent and child index values for all children */
-- (void) freshenParentAndChildIndexes;
-
-/** Add t as a child to this node.  If t is null, do nothing.  If t
- *  is nil, add all children of t to this' children.
- */
-- (void) addChild:(id<ANTLRTree>) t;
-
-/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
-- (void) setChild:(NSInteger)i With:(id<ANTLRTree>) t;
-
-- (id) deleteChild:(NSInteger) i;
-
-/** Delete children from start to stop and replace with t even if t is
- *  a list (nil-root tree).  num of children can increase or decrease.
- *  For huge child lists, inserting children can force walking rest of
- *  children to set their childindex; could be slow.
- */
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id)t;	
-
-- (NSArray *) getChildren;
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChildren:(NSArray *) theChildren;
-//- (void) removeAllChildren;
-
-// Indicates the node is a nil node but may still have children, meaning
-// the tree is a flat list.
-
-- (BOOL) isNil;
-
-/**  What is the smallest token index (indexing from 0) for this node
- *   and its children?
- */
-- (NSInteger) getTokenStartIndex;
-
-- (void) setTokenStartIndex:(NSInteger) index;
-
-/**  What is the largest token index (indexing from 0) for this node
- *   and its children?
- */
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (id<ANTLRTree>) dupNode;
-
-- (NSString *) toString;
-
-#pragma mark Copying
-- (id) copyWithZone:(NSZone *)aZone;	// the children themselves are not copied here!
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-#pragma mark Tree Parser support
-- (NSInteger) getType;
-- (NSString *) getText;
-// In case we don't have a token payload, what is the line for errors?
-- (NSInteger) getLine;
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger)pos;
-
-#pragma mark Informational
-- (NSString *) treeDescription;
-- (NSString *) description;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeAdaptor.h
deleted file mode 100755
index e6579cf..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeAdaptor.h
+++ /dev/null
@@ -1,159 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRToken.h"
-#import "ANTLRBaseTree.h"
-#import "ANTLRTokenStream.h"
-
-#pragma warning tree/node diction is broken.
-
-@protocol ANTLRTreeAdaptor <NSObject, NSCopying>
-
-#pragma mark Construction
-
-+ (id<ANTLRTree>) newEmptyTree;
-
-- (id<ANTLRTree>) createTree:(id<ANTLRToken>)payload;
-
-#pragma mark ANTLRTreeAdaptor implementation
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)aNode;	// copies just the node
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree;	// copies the entire subtree, recursively
-
-/** Return a nil node (an empty but non-null node) that can hold
- *  a list of element as the children.  If you want a flat tree (a list)
- *  use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
- */
-- (id) emptyNode;
-
-/** Return a tree node representing an error.  This node records the
- *  tokens consumed during error recovery.  The start token indicates the
- *  input symbol at which the error was detected.  The stop token indicates
- *  the last symbol consumed during recovery.
- *
- *  You must specify the input stream so that the erroneous text can
- *  be packaged up in the error node.  The exception could be useful
- *  to some applications; default implementation stores ptr to it in
- *  the CommonErrorNode.
- *
- *  This only makes sense during token parsing, not tree parsing.
- *  Tree parsing should happen only when parsing and tree construction
- *  succeed.
- */
-- (id) errorNode:(id<ANTLRTokenStream>)anInput
-            From:(id<ANTLRToken>)aStartToken
-              To:(id<ANTLRToken>)aStopToken
-       Exception:(NSException *) e;
-
-/** Is tree considered a nil node used to make lists of child nodes? */
-- (BOOL) isNil:(id<ANTLRTree>)aTree;
-
-
-- (void) addChild:(id<ANTLRTree>)child toTree:(id<ANTLRTree>)aTree;
-
-/** If oldRoot is a nil root, just copy or move the children to newRoot.
- *  If not a nil root, make oldRoot a child of newRoot.
- *
- *    old=^(nil a b c), new=r yields ^(r a b c)
- *    old=^(a b c), new=r yields ^(r ^(a b c))
- *
- *  If newRoot is a nil-rooted single child tree, use the single
- *  child as the new root node.
- *
- *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
- *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
- *
- *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
- *
- *    old=null, new=r yields r
- *    old=null, new=^(nil r) yields ^(nil r)
- *
- *  Return newRoot.  Throw an exception if newRoot is not a
- *  simple node or nil root with a single child node--it must be a root
- *  node.  If newRoot is ^(nil x) return x as newRoot.
- *
- *  Be advised that it's ok for newRoot to point at oldRoot's
- *  children; i.e., you don't have to copy the list.  We are
- *  constructing these nodes so we should have this control for
- *  efficiency.
- */
-- (id) becomeRoot:(id<ANTLRTree>)newRoot old:(id<ANTLRTree>)oldRoot;
-
-- (id) rulePostProcessing:(id<ANTLRTree>)root;
-
-#pragma mark Rewrite Rules
-                           
-- (NSUInteger) getUniqueID:(id<ANTLRTree>)aNode;
-
-- (id<ANTLRTree>) createTree:(NSInteger)tokenType FromToken:(id<ANTLRToken>)fromToken;
-- (id<ANTLRTree>) createTree:(NSInteger)tokenType FromToken:(id<ANTLRToken>)fromToken Text:(NSString *)text;
-- (id<ANTLRTree>) createTree:(NSInteger)tokenType Text:(NSString *)text;
-
-#pragma mark Content
-
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)aNode;
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree;
-
-- (NSInteger) getType:(id<ANTLRTree>)aNode;
-- (void) setType:(id<ANTLRTree>)aNode Type:(NSInteger)tokenType;
-
-- (NSString *) getText:(id<ANTLRTree>)aNode;
-- (void) setText:(id<ANTLRTree>)aNode Text:(NSString *)tokenText;
-
-- (id<ANTLRToken>) getToken:(id<ANTLRTree>)t;
-
-- (void) setTokenBoundaries:(id<ANTLRTree>)aTree From:(id<ANTLRToken>)startToken To:(id<ANTLRToken>)stopToken;
-- (NSInteger) getTokenStartIndex:(id<ANTLRTree>)aTree;
-- (NSInteger) getTokenStopIndex:(id<ANTLRTree>)aTree;
-
-#pragma mark Navigation / Tree Parsing
-
-/** Get a child 0..n-1 node */
-- (id<ANTLRTree>) getChild:(id<ANTLRTree>)aNode At:(NSInteger) i;
-/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
-- (void) setChild:(id<ANTLRTree>)aTree At:(NSInteger)index Child:(id<ANTLRTree>)child;
-/** Remove ith child and shift children down from right. */
-- (id<ANTLRTree>) deleteChild:(id<ANTLRTree>)t Index:(NSInteger)index;
-
-/** How many children?  If 0, then this is a leaf node */
-- (NSInteger) getChildCount:(id<ANTLRTree>) aTree;
-
-/** Who is the parent node of this node; if null, implies node is root.
- *  If your node type doesn't handle this, it's ok but the tree rewrites
- *  in tree parsers need this functionality.
- */
-- (id<ANTLRTree>)getParent:(id<ANTLRTree>)t;
-- (void) setParent:(id<ANTLRTree>)t With:(id<ANTLRTree>)parent;
-
-/** What index is this node in the child list? Range: 0..n-1
- *  If your node type doesn't handle this, it's ok but the tree rewrites
- *  in tree parsers need this functionality.
- */
-- (NSInteger) getChildIndex:(id<ANTLRTree>)t;
-- (void) setChildIndex:(id<ANTLRTree>)t With:(NSInteger)index;
-
-- (void) replaceChildren:(id<ANTLRTree>)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id<ANTLRTree>)t;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeException.h
deleted file mode 100755
index 8ec5c45..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeException.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLRTreeException : ANTLRRecognitionException {
-	id<ANTLRTree> oldRoot;
-	id<ANTLRTree> newRoot;
-}
-
-+ (id) exceptionWithOldRoot:(id<ANTLRTree>)theOldRoot newRoot:(id<ANTLRTree>)theNewRoot stream:(id<ANTLRIntStream>)aStream;
-- (id) initWithOldRoot:(id<ANTLRTree>)theOldRoot newRoot:(id<ANTLRTree>)theNewRoot stream:(id<ANTLRIntStream>)aStream;
-
-- (void) setOldRoot:(id<ANTLRTree>)aTree;
-- (void) setNewRoot:(id<ANTLRTree>)aTree;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeIterator.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeIterator.h
deleted file mode 100644
index e6d5e71..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeIterator.h
+++ /dev/null
@@ -1,67 +0,0 @@
-//
-//  ANTLRTreeIterator.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRFastQueue.h"
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTree.h"
-
-@interface ANTLRTreeIterator : NSObject 
-{
-	id<ANTLRTreeAdaptor> adaptor;
-	id<ANTLRTree> root;
-	id<ANTLRTree> tree;
-	BOOL firstTime;
-	id<ANTLRTree> up;
-	id<ANTLRTree> down;
-	id<ANTLRTree> eof;
-	
-	ANTLRFastQueue *nodes;
-}
-
-@property(retain, readwrite) id<ANTLRTree> up;
-@property(retain, readwrite) id<ANTLRTree> down;
-@property(retain, readwrite) id<ANTLRTree> eof;
-
-+ newANTRLTreeIterator;
-+ (ANTLRTreeIterator *) newANTRLTreeIteratorWithAdaptor:(ANTLRCommonTreeAdaptor *)adaptor
-                                                andTree:(id<ANTLRTree>)tree;
-- (id) init;
-- (id) initWithTree:(id<ANTLRTree>) t;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>) a andTree:(id<ANTLRTree>) t;
-
-- (void) reset;
-- (BOOL) hasNext;
-- (id) nextObject;
-- (NSArray *) allObjects;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeNodeStream.h
deleted file mode 100755
index bf6342c..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeNodeStream.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRIntStream.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRCommonTreeAdaptor.h"
-
-@protocol ANTLRTreeNodeStream < ANTLRIntStream > 
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)theAdaptor Tree:(ANTLRCommonTree *)theTree;
-
-- (id) LT:(NSInteger)k;
-- (id) getTreeSource;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (id<ANTLRTokenStream>) getTokenStream; 
-- (void) setUniqueNavigationNodes:(BOOL)flag;
-
-- (id) getNode:(NSInteger) idx;
-
-- (NSString *) toStringFromNode:(id)startNode ToNode:(id)stopNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeParser.h
deleted file mode 100755
index e2f01ee..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeParser.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRTreeNodeStream.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRMismatchedTreeNodeException.h"
-
-@interface ANTLRTreeParser : ANTLRBaseRecognizer {
-	id<ANTLRTreeNodeStream> input;
-}
-
-@property (retain, getter=getInput, setter=setInput:) id<ANTLRTreeNodeStream> input;
-
-+ (id) newANTLRTreeParser:(id<ANTLRTreeNodeStream>)anInput;
-+ (id) newANTLRTreeParser:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)state;
-
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)theInput;
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)theInput
-                State:(ANTLRRecognizerSharedState *)state;
-
-
-- (id<ANTLRTreeNodeStream>)getInput;
-- (void) setInput:(id<ANTLRTreeNodeStream>)anInput;
-
-- (void) setTreeNodeStream:(id<ANTLRTreeNodeStream>) anInput;
-- (id<ANTLRTreeNodeStream>) getTreeNodeStream;
-
-- (NSString *)getSourceName;
-
-- (id) getCurrentInputSymbol:(id<ANTLRIntStream>) anInput;
-
-- (id) getMissingSymbol:(id<ANTLRIntStream>)input
-              Exception:(ANTLRRecognitionException *) e
-          ExpectedToken:(NSInteger) expectedTokenType
-                 BitSet:(ANTLRBitSet *)follow;
-
-/** Match '.' in tree parser has special meaning.  Skip node or
- *  entire tree if node has children.  If children, scan until
- *  corresponding UP node.
- */
-- (void) matchAny:(id<ANTLRIntStream>)ignore;
-
-/** We have DOWN/UP nodes in the stream that have no line info; override.
- *  plus we want to alter the exception type.  Don't try to recover
- *  from tree parser errors inline...
- */
-- (id) recoverFromMismatchedToken:(id<ANTLRIntStream>)anInput
-                             Type:(NSInteger)ttype
-                           Follow:(ANTLRBitSet *)follow;
-
-/** Prefix error message with the grammar name because message is
- *  always intended for the programmer because the parser built
- *  the input tree not the user.
- */
-- (NSString *)getErrorHeader:(ANTLRRecognitionException *)e;
-
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(NSArray *) tokenNames;
-
-- (void) traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-- (void) traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreePatternLexer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreePatternLexer.h
deleted file mode 100644
index f6059d3..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreePatternLexer.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//
-//  ANTLRTreePatternLexer.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-typedef enum {
-	ANTLRLexerTokenTypeEOF = -1,
-	ANTLRLexerTokenTypeInvalid,
-	ANTLRLexerTokenTypeBEGIN,
-	ANTLRLexerTokenTypeEND,
-	ANTLRLexerTokenTypeID,
-	ANTLRLexerTokenTypeARG,
-	ANTLRLexerTokenTypePERCENT,
-	ANTLRLexerTokenTypeCOLON,
-	ANTLRLexerTokenTypeDOT,
-} ANTLRLexerTokenType;
-
-
-@interface ANTLRTreePatternLexer : NSObject {
-
-/** The tree pattern to lex like "(A B C)" */
-NSString *pattern;
-    
-/** Index into input string */
-NSInteger p;
-    
-/** Current char */
-NSInteger c;
-    
-/** How long is the pattern in char? */
-NSInteger n;
-    
-/** Set when token type is ID or ARG (name mimics Java's StreamTokenizer) */
-NSMutableData *sval;
-char *data;
-    
-BOOL error;
-
-}
-
-@property (retain, getter=getPattern, setter=setPattern:) NSString *pattern;
-@property (getter=getP, setter=setP:) NSInteger p;
-@property (getter=getC, setter=setC:) NSInteger c;
-@property (getter=getN, setter=setN:) NSInteger n;
-@property (retain, getter=getSval, setter=setSval:) NSMutableData *sval;
-@property (assign, getter=getData, setter=setData:) char *data;
-@property (getter=getError, setter=setError) BOOL error;
-
-+ (ANTLRTreePatternLexer *)newANTLRTreePatternLexer:(NSString *)aPattern;
-- (id) init;
-- (id) initWithPattern:(NSString *)aPattern;
-- (NSInteger) nextToken;
-- (void) consume;
-- (NSString *)toString;
-
-- (NSMutableData *)getSval;
-- (void) setSval:(NSMutableData *)aSval;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreePatternParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreePatternParser.h
deleted file mode 100644
index f6d6dc6..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreePatternParser.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//
-//  ANTLRTreePatternParser.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreePatternLexer.h"
-#import "ANTLRTreeWizard.h"
-#import "ANTLRTreeAdaptor.h"
-
-@interface ANTLRTreePatternParser : NSObject {
-
-ANTLRTreePatternLexer *tokenizer;
-NSInteger ttype;
-ANTLRTreeWizard *wizard;
-id<ANTLRTreeAdaptor> adaptor;
-    
-}
-
-+ (ANTLRTreePatternParser *)newANTLRTreePatternParser:(ANTLRTreePatternLexer *)aTokenizer
-                                               Wizard:(ANTLRTreeWizard *)aWizard
-                                              Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (id) init;
-- (id) initWithTokenizer:(ANTLRTreePatternLexer *)tokenizer
-                  Wizard:(ANTLRTreeWizard *)aWizard
-                 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (id<ANTLRTree>) pattern;
-- (id<ANTLRTree>) parseTree;
-- (id<ANTLRTree>) parseNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeRewriter.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeRewriter.h
deleted file mode 100644
index aee873e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeRewriter.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//
-//  ANTLRTreeRewriter.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeParser.h"
-
-@interface ANTLRfptr : NSObject {
-    id  actor;
-    SEL ruleSEL;
-}
-
-+ (ANTLRfptr *)newANTLRfptrWithRule:(SEL)aRuleAction withObject:(id)anObject;
--initWithRule:(SEL)ruleAction withObject:(id)anObject;
-
-- (id)rule;
-
-@end
-
-@interface ANTLRTreeRewriter : ANTLRTreeParser {
-    BOOL showTransformations;
-    id<ANTLRTokenStream> originalTokenStream;
-    id<ANTLRTreeAdaptor> originalAdaptor;
-    ANTLRfptr *rule;
-    ANTLRfptr *topdown_fptr;
-    ANTLRfptr *bottomup_ftpr;
-}
-
-+ (ANTLRTreeRewriter *) newANTLRTreeRewriter:(id<ANTLRTreeNodeStream>)anInput;
-+ (ANTLRTreeRewriter *) newANTLRTreeRewriter:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-- (id)initWithStream:(id<ANTLRTreeNodeStream>)anInput;
-- (id)initWithStream:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-- (ANTLRTreeRewriter *) applyOnce:(id<ANTLRTree>)t Rule:(ANTLRfptr *)whichRule;
-- (ANTLRTreeRewriter *) applyRepeatedly:(id<ANTLRTree>)t Rule:(ANTLRfptr *)whichRule;
-- (ANTLRTreeRewriter *) downup:(id<ANTLRTree>)t;
-- (ANTLRTreeRewriter *) pre:(id<ANTLRTree>)t;
-- (ANTLRTreeRewriter *) post:(id<ANTLRTree>)t;
-- (ANTLRTreeRewriter *) downup:(id<ANTLRTree>)t XForm:(BOOL)aShowTransformations;
-- (void)reportTransformation:(id<ANTLRTree>)oldTree Tree:(id<ANTLRTree>)newTree;
-- (ANTLRTreeRewriter *) topdown_fptr;
-- (ANTLRTreeRewriter *) bottomup_ftpr;
-- (ANTLRTreeRewriter *) topdown;
-- (ANTLRTreeRewriter *) bottomup;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeRuleReturnScope.h
deleted file mode 100644
index ea8a487..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeRuleReturnScope.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//
-//  ANTLRTreeRuleReturnScope.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuleReturnScope.h"
-#import "ANTLRCommonTree.h"
-
-@interface ANTLRTreeRuleReturnScope : ANTLRRuleReturnScope {
-    ANTLRCommonTree *startNode;
-}
-
-@property (retain, getter=getStart, setter=setStart:) ANTLRCommonTree *startNode;
-
-/** First node or root node of tree matched for this rule. */
-
-- (ANTLRCommonTree *)getStart;
-- (void)setStart:(ANTLRCommonTree *)aStartNode;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeVisitor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeVisitor.h
deleted file mode 100644
index 1f167bb..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeVisitor.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-//  ANTLRTreeVisitor.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTreeVisitorAction.h"
-
-@interface ANTLRTreeVisitor : NSObject {
-   id<ANTLRTreeAdaptor> adaptor;
-}
-+ (ANTLRTreeVisitor *)newANTLRTreeVisitor:(id<ANTLRTreeAdaptor>) anAdaptor;
-+ (ANTLRTreeVisitor *)newANTLRTreeVisitor;
-- (id)init;
-- (id)initWithAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (ANTLRTreeVisitor *)visit:(id<ANTLRTree>)t Action:(ANTLRTreeVisitorAction *)action;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeVisitorAction.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeVisitorAction.h
deleted file mode 100644
index c9c0856..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeVisitorAction.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//
-//  ANTLRTreeVisitorAction.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRTreeVisitorAction : NSObject
-{
-
-}
-
-+ (ANTLRTreeVisitorAction *)newANTLRTreeVisitorAction;
-- (id) init;
-
-/** Execute an action before visiting children of t.  Return t or
- *  a rewritten t.  It is up to the visitor to decide what to do
- *  with the return value.  Children of returned value will be
- *  visited if using TreeVisitor.visit().
- */
-- (ANTLRTreeVisitorAction *)pre:(ANTLRTreeVisitorAction *) t;
-
-/** Execute an action after visiting children of t.  Return t or
- *  a rewritten t.  It is up to the visitor to decide what to do
- *  with the return value.
- */
-- (ANTLRTreeVisitorAction *)post:(ANTLRTreeVisitorAction *) t;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeWizard.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeWizard.h
deleted file mode 100644
index d952572..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRTreeWizard.h
+++ /dev/null
@@ -1,134 +0,0 @@
-//
-//  ANTLRTreeWizard.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRMapElement.h"
-#import "ANTLRMap.h"
-
-@class ANTLRVisitor;
-
-@protocol ANTLRContextVisitor <NSObject>
-// TODO: should this be called visit or something else?
-- (void) visit:(id<ANTLRTree>)t Parent:(id<ANTLRTree>)parent ChildIndex:(NSInteger)childIndex Map:(ANTLRMap *)labels;
-
-@end
-
-@interface ANTLRVisitor : NSObject <ANTLRContextVisitor> {
-    NSInteger action;
-    id actor;
-    id object1;
-    id object2;
-}
-+ (ANTLRVisitor *)newANTLRVisitor:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2;
-- (id) initWithAction:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2;
-
-- (void) visit:(id<ANTLRTree>)t;
-- (void) visit:(id<ANTLRTree>)t Parent:(id<ANTLRTree>)parent ChildIndex:(NSInteger)childIndex Map:(ANTLRMap *)labels;
-
-@end
-
-/** When using %label:TOKENNAME in a tree for parse(), we must
- *  track the label.
- */
-@interface ANTLRTreePattern : ANTLRCommonTree {
-    NSString *label;
-    BOOL      hasTextArg;
-}
-@property (retain, getter=getLabel, setter=setLabel:) NSString *label;
-@property (assign, getter=getHasTextArg, setter=setHasTextArg:) BOOL hasTextArg;
-
-+ (ANTLRTreePattern *)newANTLRTreePattern:(id<ANTLRToken>)payload;
-
-- (id) initWithToken:(id<ANTLRToken>)payload;
-- (NSString *)toString;
-@end
-
-@interface ANTLRWildcardTreePattern : ANTLRTreePattern {
-}
-
-+ (ANTLRWildcardTreePattern *)newANTLRWildcardTreePattern:(id<ANTLRToken>)payload;
-- (id) initWithToken:(id<ANTLRToken>)payload;
-@end
-
-/** This adaptor creates TreePattern objects for use during scan() */
-@interface ANTLRTreePatternTreeAdaptor : ANTLRCommonTreeAdaptor {
-}
-+ (ANTLRTreePatternTreeAdaptor *)newTreeAdaptor;
-#ifdef DONTUSENOMO
-+ (ANTLRTreePatternTreeAdaptor *)newTreeAdaptor:(id<ANTLRToken>)payload;
-#endif
-- (id) init;
-#ifdef DONTUSENOMO
-- initWithToken:(id<ANTLRToken>)payload;
-#endif
-- (id<ANTLRTree>)createTreePattern:(id<ANTLRToken>)payload;
-
-@end
-
-@interface ANTLRTreeWizard : NSObject {
-	id<ANTLRTreeAdaptor> adaptor;
-	ANTLRMap *tokenNameToTypeMap;
-}
-+ (ANTLRTreeWizard *) newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)anAdaptor;
-+ (ANTLRTreeWizard *)newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)adaptor Map:(ANTLRMap *)aTokenNameToTypeMap;
-+ (ANTLRTreeWizard *)newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)adaptor TokenNames:(NSArray *)theTokNams;
-+ (ANTLRTreeWizard *)newANTLRTreeWizardWithTokenNames:(NSArray *)theTokNams;
-- (id) init;
-- (id) initWithAdaptor:(id<ANTLRTreeAdaptor>)adaptor;
-- (id) initWithAdaptor:(id<ANTLRTreeAdaptor>)adaptor Map:(ANTLRMap *)tokenNameToTypeMap;
-- (id) initWithTokenNames:(NSArray *)theTokNams;
-- (id) initWithTokenNames:(id<ANTLRTreeAdaptor>)anAdaptor TokenNames:(NSArray *)theTokNams;
-- (ANTLRMap *)computeTokenTypes:(NSArray *)theTokNams;
-- (NSInteger)getTokenType:(NSString *)tokenName;
-- (ANTLRMap *)index:(id<ANTLRTree>)t;
-- (void) _index:(id<ANTLRTree>)t Map:(ANTLRMap *)m;
-- (NSMutableArray *)find:(id<ANTLRTree>) t Pattern:(NSString *)pattern;
-- (ANTLRTreeWizard *)findFirst:(id<ANTLRTree>) t Type:(NSInteger)ttype;
-- (ANTLRTreeWizard *)findFirst:(id<ANTLRTree>) t Pattern:(NSString *)pattern;
-- (void) visit:(id<ANTLRTree>)t Type:(NSInteger)ttype Visitor:(ANTLRVisitor *)visitor;
-- (void) _visit:(id<ANTLRTree>)t
-         Parent:(id<ANTLRTree>)parent
-     ChildIndex:(NSInteger)childIndex
-           Type:(NSInteger)ttype
-        Visitor:(ANTLRVisitor *)visitor;
-- (void)visit:(id<ANTLRTree>)t Pattern:(NSString *)pattern Visitor:(ANTLRVisitor *)visitor;
-- (BOOL)parse:(id<ANTLRTree>)t Pattern:(NSString *)pattern Map:(ANTLRMap *)labels;
-- (BOOL) parse:(id<ANTLRTree>) t Pattern:(NSString *)pattern;
-- (BOOL) _parse:(id<ANTLRTree>)t1 Pattern:(ANTLRTreePattern *)tpattern Map:(ANTLRMap *)labels;
-- (id<ANTLRTree>) createTree:(NSString *)pattern;
-- (BOOL)equals:(id)t1 O2:(id)t2 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (BOOL)equals:(id)t1 O2:(id)t2;
-- (BOOL) _equals:(id)t1 O2:(id)t2 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUnbufferedCommonTreeNodeStreamState.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUnbufferedCommonTreeNodeStreamState.h
deleted file mode 100755
index 9e79d86..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUnbufferedCommonTreeNodeStreamState.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-
-@interface ANTLRUnbufferedCommonTreeNodeStreamState : NSObject {
-	ANTLRCommonTree *currentNode;
-	ANTLRCommonTree *previousNode;
-
-	int currentChildIndex;
-	int absoluteNodeIndex;
-	unsigned int nodeStackSize;
-	unsigned int indexStackSize;
-	
-	NSMutableArray *lookahead;
-}
-
-- (ANTLRCommonTree *) currentNode;
-- (void) setCurrentNode: (ANTLRCommonTree *) aCurrentNode;
-
-- (ANTLRCommonTree *) previousNode;
-- (void) setPreviousNode: (ANTLRCommonTree *) aPreviousNode;
-
-- (NSInteger) currentChildIndex;
-- (void) setCurrentChildIndex: (NSInteger) aCurrentChildIndex;
-
-- (NSInteger) absoluteNodeIndex;
-- (void) setAbsoluteNodeIndex: (NSInteger) anAbsoluteNodeIndex;
-
-- (NSUInteger) nodeStackSize;
-- (void) setNodeStackSize: (NSUInteger) aNodeStackSize;
-
-- (NSUInteger) indexStackSize;
-- (void) setIndexStackSize: (NSUInteger) anIndexStackSize;
-
-- (NSMutableArray *) lookahead;
-- (void) setLookahead: (NSMutableArray *) aLookahead;
-
-- (void) addToLookahead: (id)lookaheadObject;
-- (void) removeFromLookahead: (id)lookaheadObject;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUnbufferedTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUnbufferedTokenStream.h
deleted file mode 100644
index e4f8630..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUnbufferedTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-//
-//  ANTLRUnbufferedTokenStream.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/12/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuntimeException.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRToken.h"
-
-@interface ANTLRUnbufferedTokenStream : ANTLRLookaheadStream {
-	id<ANTLRTokenSource> tokenSource;
-    NSInteger tokenIndex; // simple counter to set token index in tokens
-    NSInteger channel;
-}
-
-@property (retain, getter=getTokenSource, setter=setTokenSource:) id<ANTLRTokenSource> tokenSource;
-@property (getter=getTokenIndex, setter=setTokenIndex) NSInteger tokenIndex;
-@property (getter=getChannel, setter=setChannel:) NSInteger channel;
-
-+ (ANTLRUnbufferedTokenStream *)newANTLRUnbufferedTokenStream:(id<ANTLRTokenSource>)aTokenSource;
-- (id) init;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource;
-
-- (id<ANTLRToken>)nextElement;
-- (BOOL)isEOF:(id<ANTLRToken>) aToken;
-- (id<ANTLRTokenSource>)getTokenSource;
-- (NSString *)toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *)toStringFromToken:(id<ANTLRToken>)aStart ToEnd:(id<ANTLRToken>)aStop;
-- (NSInteger)LA:(NSInteger)anIdx;
-- (id<ANTLRToken>)objectAtIndex:(NSInteger)anIdx;
-- (NSString *)getSourceName;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUniqueIDMap.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUniqueIDMap.h
deleted file mode 100644
index a805bd5..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUniqueIDMap.h
+++ /dev/null
@@ -1,64 +0,0 @@
-//
-//  ANTLRUniqueIDMap.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/7/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-#import "ANTLRNodeMapElement.h"
-
-#define SUCCESS             0
-#define FAILURE             -1
-#define HASHSIZE            101
-#define HBUFSIZE            0x2000
-
-@interface ANTLRUniqueIDMap : ANTLRPtrBuffer {
-    NSInteger lastHash;
-}
-
-@property (getter=getLastHash, setter=setLastHash) NSInteger lastHash;
-
-+ (id)newANTLRUniqueIDMap;
-+ (id)newANTLRUniqueIDMapWithLen:(NSInteger)aHashSize;
-
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-// Instance Methods
-- (NSInteger)count;
-- (NSInteger)size;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-
-- (void)deleteANTLRUniqueIDMap:(ANTLRNodeMapElement *)np;
-- (void)delete_chain:(ANTLRNodeMapElement *)np;
-- (id)getNode:(id<ANTLRTree>)aNode;
-- (void)putID:(id)anID Node:(id<ANTLRTree>)aNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUnwantedTokenException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUnwantedTokenException.h
deleted file mode 100644
index 2945bfe..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ANTLRUnwantedTokenException.h
+++ /dev/null
@@ -1,47 +0,0 @@
-//
-//  ANTLRUnwantedTokenException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRMismatchedTokenException.h"
-
-@interface ANTLRUnwantedTokenException : ANTLRMismatchedTokenException {
-
-}
-+ (ANTLRUnwantedTokenException *)newANTLRUnwantedTokenException;
-+ (ANTLRUnwantedTokenException *)newANTLRUnwantedTokenException:(NSInteger)expected Stream:(id<ANTLRIntStream>)anInput;
-
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInput And:(NSInteger)expected;
-- (id<ANTLRToken>)getUnexpectedToken;
-- (NSString *)toString;
-                     
-    
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Resources/English.lproj/InfoPlist.strings b/antlr-3.4/runtime/ObjC/ANTLR.framework/Resources/English.lproj/InfoPlist.strings
deleted file mode 100644
index fa1b75f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Resources/English.lproj/InfoPlist.strings
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Resources/Info.plist b/antlr-3.4/runtime/ObjC/ANTLR.framework/Resources/Info.plist
deleted file mode 100644
index 24436a3..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Resources/Info.plist
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
-	<key>CFBundleDevelopmentRegion</key>
-	<string>English</string>
-	<key>CFBundleExecutable</key>
-	<string>ANTLR</string>
-	<key>CFBundleIdentifier</key>
-	<string>org.antlr.antlrframework</string>
-	<key>CFBundleInfoDictionaryVersion</key>
-	<string>6.0</string>
-	<key>CFBundleName</key>
-	<string>ANTLR</string>
-	<key>CFBundlePackageType</key>
-	<string>FMWK</string>
-	<key>CFBundleSignature</key>
-	<string>????</string>
-	<key>CFBundleVersion</key>
-	<string>1.0</string>
-</dict>
-</plist>
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/ANTLR b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/ANTLR
deleted file mode 100755
index 67c1d3a..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/ANTLR
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLR.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLR.h
deleted file mode 100755
index 671e783..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLR.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRTreeException.h>
-
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseMapElement.h
deleted file mode 100644
index b9100ac..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseMapElement.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-//  ANTLRBaseMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-@interface ANTLRBaseMapElement : ANTLRLinkBase {
-    NSNumber *index;
-}
-
-@property (retain, getter=getIndex, setter=setIndex:) NSNumber *index;
-
-+ (id) newANTLRBaseMapElement;
-+ (id) newANTLRBaseMapElementWithIndex:(NSNumber *)anIdx;
-- (id) init;
-- (id) initWithAnIndex:(NSNumber *)anIdx;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSNumber *)getIndex;
-- (void)setIndex:(NSNumber *)anIdx;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseRecognizer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseRecognizer.h
deleted file mode 100755
index 1a922bd..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseRecognizer.h
+++ /dev/null
@@ -1,183 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import <Foundation/Foundation.h>
-
-#import "ANTLRIntStream.h"
-
-// This is an abstract superclass for lexers and parsers.
-
-#define ANTLR_MEMO_RULE_FAILED -2
-#define ANTLR_MEMO_RULE_UNKNOWN -1
-#define ANTLR_INITIAL_FOLLOW_STACK_SIZE 100
-
-#import "ANTLRMapElement.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRToken.h"
-#import "ANTLRRecognizerSharedState.h"
-#import "ANTLRRecognitionException.h"
-#import "ANTLRMissingTokenException.h"
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRMismatchedTreeNodeException.h"
-#import "ANTLRUnwantedTokenException.h"
-#import "ANTLRNoViableAltException.h"
-#import "ANTLREarlyExitException.h"
-#import "ANTLRMismatchedSetException.h"
-#import "ANTLRMismatchedNotSetException.h"
-#import "ANTLRFailedPredicateException.h"
-
-@interface ANTLRBaseRecognizer : NSObject {
-	ANTLRRecognizerSharedState *state;	// the state of this recognizer. Might be shared with other recognizers, e.g. in grammar import scenarios.
-	NSString *grammarFileName;			// where did the grammar come from. filled in by codegeneration
-//    BOOL failed;
-    NSString *sourceName;
-//    NSInteger numberOfSyntaxErrors;
-    NSArray *tokenNames;
-}
-
-@property (retain, getter=getState, setter=setState) ANTLRRecognizerSharedState *state;
-@property (retain, getter=getGrammarFileName, setter=setGrammarFileName) NSString *grammarFileName;
-//@property (assign, getter=getFailed, setter=setFailed) BOOL failed;
-@property (retain, getter=getTokenNames, setter=setTokenNames) NSArray *tokenNames;
-@property (retain, getter=getSourceName, setter=setSourceName) NSString *sourceName;
-//@property (assign, getter=getNumberOfSyntaxErrors, setter=setNumberOfSyntaxErrors) NSInteger numberOfSyntaxErrors;
-
-+ (void) initialize;
-
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizer;
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizerWithRuleLen:(NSInteger)aLen;
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizer:(ANTLRRecognizerSharedState *)aState;
-
-+ (NSArray *)getTokenNames;
-+ (void)setTokenNames:(NSArray *)aTokNamArray;
-+ (void)setGrammarFileName:(NSString *)aFileName;
-
-- (id) init;
-- (id) initWithLen:(NSInteger)aLen;
-- (id) initWithState:(ANTLRRecognizerSharedState *)aState;
-
-- (void) dealloc;
-
-// simple accessors
-- (NSInteger) getBacktrackingLevel;
-- (void) setBacktrackingLevel:(NSInteger) level;
-
-- (BOOL) getFailed;
-- (void) setFailed: (BOOL) flag;
-
-- (ANTLRRecognizerSharedState *) getState;
-- (void) setState:(ANTLRRecognizerSharedState *) theState;
-
-// reset this recognizer - might be extended by codegeneration/grammar
-- (void) reset;
-
-/** Match needs to return the current input symbol, which gets put
- *  into the label for the associated token ref; e.g., x=ID.  Token
- *  and tree parsers need to return different objects. Rather than test
- *  for input stream type or change the IntStream interface, I use
- *  a simple method to ask the recognizer to tell me what the current
- *  input symbol is.
- * 
- *  This is ignored for lexers.
- */
-- (id) getInput;
-
-- (void)skip;
-
-// do actual matching of tokens/characters
-- (id) match:(id<ANTLRIntStream>)anInput TokenType:(NSInteger)ttype Follow:(ANTLRBitSet *)follow;
-- (void) matchAny:(id<ANTLRIntStream>)anInput;
-- (BOOL) mismatchIsUnwantedToken:(id<ANTLRIntStream>)anInput TokenType:(NSInteger) ttype;
-- (BOOL) mismatchIsMissingToken:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)follow;
-
-// error reporting and recovery
-- (void) reportError:(ANTLRRecognitionException *)e;
-- (void) displayRecognitionError:(NSArray *)theTokNams Exception:(ANTLRRecognitionException *)e;
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(NSArray *)theTokNams;
-- (NSInteger) getNumberOfSyntaxErrors;
-- (NSString *)getErrorHeader:(ANTLRRecognitionException *)e;
-- (NSString *)getTokenErrorDisplay:(id<ANTLRToken>)t;
-- (void) emitErrorMessage:(NSString *)msg;
-- (void) recover:(id<ANTLRIntStream>)anInput Exception:(ANTLRRecognitionException *)e;
-
-// begin hooks for debugger
-- (void) beginResync;
-- (void) endResync;
-// end hooks for debugger
-
-// compute the bitsets necessary to do matching and recovery
-- (ANTLRBitSet *)computeErrorRecoverySet;
-- (ANTLRBitSet *)computeContextSensitiveRuleFOLLOW;
-- (ANTLRBitSet *)combineFollows:(BOOL) exact;
-
-- (id<ANTLRToken>) recoverFromMismatchedToken:(id<ANTLRIntStream>)anInput 
-                                    TokenType:(NSInteger)ttype 
-                                       Follow:(ANTLRBitSet *)follow;
-                                    
-- (id<ANTLRToken>)recoverFromMismatchedSet:(id<ANTLRIntStream>)anInput
-                                    Exception:(ANTLRRecognitionException *)e
-                                    Follow:(ANTLRBitSet *)follow;
-
-- (id) getCurrentInputSymbol:(id<ANTLRIntStream>)anInput;
-- (id) getMissingSymbol:(id<ANTLRIntStream>)anInput
-              Exception:(ANTLRRecognitionException *)e
-              TokenType:(NSInteger) expectedTokenType
-                Follow:(ANTLRBitSet *)follow;
-
-// helper methods for recovery. try to resync somewhere
-- (void) consumeUntilTType:(id<ANTLRIntStream>)anInput TokenType:(NSInteger)ttype;
-- (void) consumeUntilFollow:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)bitSet;
-- (void) pushFollow:(ANTLRBitSet *)fset;
-- (ANTLRBitSet *)popFollow;
-
-// to be used by the debugger to do reporting. maybe hook in incremental stuff here, too.
-- (NSMutableArray *) getRuleInvocationStack;
-- (NSMutableArray *) getRuleInvocationStack:(ANTLRRecognitionException *)exception
-					             Recognizer:(NSString *)recognizerClassName;
-
-- (NSArray *) getTokenNames;
-- (NSString *)getGrammarFileName;
-- (NSString *)getSourceName;
-- (NSMutableArray *) toStrings:(NSArray *)tokens;
-// support for memoization
-- (NSInteger) getRuleMemoization:(NSInteger)ruleIndex StartIndex:(NSInteger)ruleStartIndex;
-- (BOOL) alreadyParsedRule:(id<ANTLRIntStream>)anInput RuleIndex:(NSInteger)ruleIndex;
-- (void) memoize:(id<ANTLRIntStream>)anInput
-	     RuleIndex:(NSInteger)ruleIndex
-	    StartIndex:(NSInteger)ruleStartIndex;
-- (NSInteger) getRuleMemoizationCacheSize;
-- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol;
-- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol;
-
-
-// support for syntactic predicates. these are called indirectly to support funky stuff in grammars,
-// like supplying selectors instead of writing code directly into the actions of the grammar.
-- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment;
-// stream:(id<ANTLRIntStream>)anInput;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseStack.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseStack.h
deleted file mode 100644
index 5069031..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseStack.h
+++ /dev/null
@@ -1,66 +0,0 @@
-//
-//  ANTLRBaseRecognizer.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRBaseStack : ANTLRPtrBuffer {
-	//ANTLRRuleStack *fNext;
-    // TStringPool *fPool;
-    NSInteger LastHash;
-}
-
-//@property (copy) ANTLRRuleStack *fNext;
-@property (getter=getLastHash, setter=setLastHash) NSInteger LastHash;
-
-// Contruction/Destruction
-+ (ANTLRBaseStack *)newANTLRBaseStack;
-+ (ANTLRBaseStack *)newANTLRBaseStackWithLen:(NSInteger)cnt;
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSInteger)count;
-- (NSInteger)size;
-/* clear -- reinitialize the maplist array */
-
-- (NSInteger)getLastHash;
-- (void)setLastHash:(NSInteger)aVal;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseTree.h
deleted file mode 100755
index 96513f8..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseTree.h
+++ /dev/null
@@ -1,199 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTree.h"
-
-@protocol ANTLRBaseTree <ANTLRTree>
-
-@property (retain, getter=getChildren, setter=setChildren) NSMutableArray *children;
-
-+ (id<ANTLRBaseTree>) newANTLRBaseTree;
-+ (id<ANTLRBaseTree>) newANTLRBaseTree:(id<ANTLRBaseTree>)node;
-
-- (id<ANTLRBaseTree>) init;
-- (id<ANTLRBaseTree>) initWith:(id<ANTLRTree>)node;
-
-- (id<ANTLRBaseTree>) getChild:(NSUInteger)i;
-- (NSMutableArray *)getChildren;
-- (void) setChildren:(NSMutableArray *)anArray;
-- (id<ANTLRBaseTree>)getFirstChildWithType:(NSInteger)type;
-- (NSUInteger) getChildCount;
-
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChild:(id<ANTLRTree>) tree;
-- (void) addChildren:(NSArray *) theChildren;
-//- (void) removeAllChildren;
-
-- (void) setChild:(NSInteger) i With:(id<ANTLRTree>)t;
-- (id) deleteChild:(NSInteger) i;
-- (NSMutableArray *) createChildrenList;
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-// Indicates the node is a nil node but may still have children, meaning
-// the tree is a flat list.
-
-- (BOOL) isNil;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex:(NSInteger) index;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (void) freshenParentAndChildIndexes;
-- (void) freshenParentAndChildIndexes:(NSInteger) offset;
-- (void) sanityCheckParentAndChildIndexes;
-- (void) sanityCheckParentAndChildIndexes:(id<ANTLRTree>) parent At:(NSInteger) i;
-
-- (NSInteger) getChildIndex;
-- (void) setChildIndex:(NSInteger)i;
-
-- (id<ANTLRTree>)getAncestor:(NSInteger)ttype;
-- (NSMutableArray *)getAncestors;
-
-#pragma mark Copying
-- (id) copyWithZone:(NSZone *)aZone;	// the children themselves are not copied here!
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-#pragma mark Tree Parser support
-- (NSInteger) getType;
-- (NSString *) getText;
-// In case we don't have a token payload, what is the line for errors?
-- (NSInteger) getLine;
-- (NSInteger) getCharPositionInLine;
-
-
-#pragma mark Informational
-- (NSString *) treeDescription;
-- (NSString *) description;
-
-- (NSString *) toString;
-- (NSString *) toStringTree;
-
-@end
-
-@interface ANTLRBaseTree : NSObject <ANTLRTree>
-{
-	NSMutableArray *children;
-    NSException *anException;
-}
-
-@property (retain, getter=getChildren, setter=setChildren) NSMutableArray *children;
-
-+ (id<ANTLRBaseTree>) newANTLRBaseTree;
-+ (id<ANTLRBaseTree>) newANTLRBaseTree:(id<ANTLRBaseTree>)node;
-         
-- (id<ANTLRTree>) init;
-- (id<ANTLRBaseTree>) initWith:(id<ANTLRTree>)node;
-
-- (id<ANTLRBaseTree>) getChild:(NSUInteger)i;
-- (NSMutableArray *)getChildren;
-- (void) setChildren:(NSMutableArray *)anArray;
-- (id<ANTLRBaseTree>)getFirstChildWithType:(NSInteger)type;
-- (NSUInteger) getChildCount;
-
-//- (void) removeAllChildren;
-
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChild:(id<ANTLRTree>) tree;
-- (void) addChildren:(NSArray *) theChildren;
-
-- (void) setChild:(NSInteger) i With:(id<ANTLRTree>)t;
-- (id) deleteChild:(NSInteger) i;
-- (NSMutableArray *) createChildrenList;
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-// Indicates the node is a nil node but may still have children, meaning
-	// the tree is a flat list.
-
-- (BOOL) isNil;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex:(NSInteger) index;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (void) freshenParentAndChildIndexes;
-- (void) freshenParentAndChildIndexes:(NSInteger) offset;
-- (void) sanityCheckParentAndChildIndexes;
-- (void) sanityCheckParentAndChildIndexes:(id<ANTLRTree>) parent At:(NSInteger) i;
-
-- (NSInteger) getChildIndex;
-- (void) setChildIndex:(NSInteger)i;
-
-- (BOOL) hasAncestor:(NSInteger) ttype;
-- (id<ANTLRTree>)getAncestor:(NSInteger)ttype;
-- (NSMutableArray *)getAncestors;
-
-- (id) copyWithZone:(NSZone *)aZone;
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-	// Return a token type; needed for tree parsing
-- (NSInteger) getType;
-- (NSString *) getText;
-
-	// In case we don't have a token payload, what is the line for errors?
-- (NSInteger) getLine;
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger)pos;
-
-- (NSString *) treeDescription;
-- (NSString *) description;
-- (NSString *) toString;
-- (NSString *) toStringTree;
-
-@end
-
-@interface ANTLRTreeNavigationNode : ANTLRBaseTree {
-}
-- (id) copyWithZone:(NSZone *)aZone;
-@end
-
-@interface ANTLRTreeNavigationNodeDown : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeDown *) getNavigationNodeDown;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-@interface ANTLRTreeNavigationNodeUp : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeUp *) getNavigationNodeUp;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-@interface ANTLRTreeNavigationNodeEOF : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeEOF *) getNavigationNodeEOF;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-extern ANTLRTreeNavigationNodeDown *navigationNodeDown;
-extern ANTLRTreeNavigationNodeUp *navigationNodeUp;
-extern ANTLRTreeNavigationNodeEOF *navigationNodeEOF;
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseTreeAdaptor.h
deleted file mode 100644
index b4f8dad..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBaseTreeAdaptor.h
+++ /dev/null
@@ -1,163 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRCommonErrorNode.h"
-#import "ANTLRUniqueIDMap.h"
-
-@interface ANTLRBaseTreeAdaptor : NSObject <ANTLRTreeAdaptor, NSCopying> {
-    ANTLRUniqueIDMap *treeToUniqueIDMap;
-	NSInteger uniqueNodeID;
-}
-
-@property (retain, getter=getTreeToUniqueIDMap, setter=setTreeToUniqueIDMap:) ANTLRUniqueIDMap *treeToUniqueIDMap;
-@property (getter=getUniqueNodeID, setter=setUniqueNodeID:) NSInteger uniqueNodeID;
-
-+ (id<ANTLRTreeAdaptor>) newEmptyTree;
-
-- (id) init;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id) emptyNode;
-
-- (ANTLRUniqueIDMap *)getTreeToUniqueIDMap;
-- (void) setTreeToUniqueIDMap:(ANTLRUniqueIDMap *)aMapNode;
-
-- (NSInteger)getUniqueID;
-- (void) setUniqueNodeID:(NSInteger)aUniqueNodeID;
-
-/** create tree node that holds the start and stop tokens associated
- *  with an error.
- *
- *  If you specify your own kind of tree nodes, you will likely have to
- *  override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
- *  if no token payload but you might have to set token type for diff
- *  node type.
- *
- *  You don't have to subclass CommonErrorNode; you will likely need to
- *  subclass your own tree node class to avoid class cast exception.
- */
-- (id) errorNode:(id<ANTLRTokenStream>)anInput
-            From:(id<ANTLRToken>)startToken
-              To:(id<ANTLRToken>)stopToken
-       Exception:(NSException *) e;
-
-- (BOOL) isNil:(id<ANTLRTree>) aTree;
-
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree;
-/** This is generic in the sense that it will work with any kind of
- *  tree (not just Tree interface).  It invokes the adaptor routines
- *  not the tree node routines to do the construction.  
- */
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree Parent:(id<ANTLRTree>)parent;
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)aNode;
-/** Add a child to the tree t.  If child is a flat tree (a list), make all
- *  in list children of t.  Warning: if t has no children, but child does
- *  and child isNil then you can decide it is ok to move children to t via
- *  t.children = child.children; i.e., without copying the array.  Just
- *  make sure that this is consistent with have the user will build
- *  ASTs.
- */
-- (void) addChild:(id<ANTLRTree>)aChild toTree:(id<ANTLRTree>)aTree;
-
-/** If oldRoot is a nil root, just copy or move the children to newRoot.
- *  If not a nil root, make oldRoot a child of newRoot.
- *
- *    old=^(nil a b c), new=r yields ^(r a b c)
- *    old=^(a b c), new=r yields ^(r ^(a b c))
- *
- *  If newRoot is a nil-rooted single child tree, use the single
- *  child as the new root node.
- *
- *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
- *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
- *
- *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
- *
- *    old=null, new=r yields r
- *    old=null, new=^(nil r) yields ^(nil r)
- *
- *  Return newRoot.  Throw an exception if newRoot is not a
- *  simple node or nil root with a single child node--it must be a root
- *  node.  If newRoot is ^(nil x) return x as newRoot.
- *
- *  Be advised that it's ok for newRoot to point at oldRoot's
- *  children; i.e., you don't have to copy the list.  We are
- *  constructing these nodes so we should have this control for
- *  efficiency.
- */
-- (id<ANTLRTree>)becomeRoot:(id<ANTLRTree>)aNewRoot old:(id<ANTLRTree>)oldRoot;
-
-/** Transform ^(nil x) to x and nil to null */
-- (id<ANTLRTree>)rulePostProcessing:(id<ANTLRTree>)aRoot;
-
-- (id<ANTLRTree>)becomeRootfromToken:(id<ANTLRToken>)aNewRoot old:(id<ANTLRTree>)oldRoot;
-
-- (id<ANTLRTree>)createTree:(NSInteger)aTType With:(id<ANTLRToken>)aFromToken;
-
-- (id<ANTLRTree>)createTree:(NSInteger)aTType FromToken:(id<ANTLRToken>)aFromToken Text:(NSString *)theText;
-
-- (id<ANTLRTree>)createTree:(NSInteger)aTType Text:(NSString *)theText;
-
-- (NSInteger) getType:(id<ANTLRTree>)aTree;
-
-- (void) setType:(id<ANTLRTree>)aTree Type:(NSInteger)type;
-
-- (NSString *)getText:(id<ANTLRTree>)aTree;
-
-- (void) setText:(id<ANTLRTree>)aTree Text:(NSString *)theText;
-
-- (id<ANTLRTree>) getChild:(id<ANTLRTree>)aTree At:(NSInteger)i;
-
-- (void) setChild:(id<ANTLRTree>)aTree At:(NSInteger)index Child:(id<ANTLRTree>)aChild;
-
-- (id<ANTLRTree>) deleteChild:(id<ANTLRTree>)aTree Index:(NSInteger)index;
-
-- (NSInteger) getChildCount:(id<ANTLRTree>)aTree;
-
-- (NSInteger) getUniqueID:(id<ANTLRTree>)node;
-
-/** Tell me how to create a token for use with imaginary token nodes.
- *  For example, there is probably no input symbol associated with imaginary
- *  token DECL, but you need to create it as a payload or whatever for
- *  the DECL node as in ^(DECL type ID).
- *
- *  This is a variant of createToken where the new token is derived from
- *  an actual real input token.  Typically this is for converting '{'
- *  tokens to BLOCK etc...  You'll see
- *
- *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
- *
- *  If you care what the token payload objects' type is, you should
- *  override this method and any other createToken variant.
- */
-- (id<ANTLRToken>)createToken:(NSInteger)aTType Text:(NSString *)theText;
-
-- (id<ANTLRToken>)createToken:(id<ANTLRToken>)aFromToken;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBitSet.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBitSet.h
deleted file mode 100755
index a1be117..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBitSet.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import <CoreFoundation/CoreFoundation.h>
-#import "ANTLRToken.h"
-
-#define BITS (sizeof(NSUInteger) * 8)
-#define LOG_BITS ((sizeof(NSUInteger)==8)?6:5)
-
-// A simple wrapper around CoreFoundation bit vectors to shield the rest of the implementation
-// from the specifics of the BitVector initialization and query functions.
-// This is fast, so there is no need to reinvent the wheel just yet.
-
-@interface ANTLRBitSet : NSObject < NSMutableCopying > {
-	CFMutableBitVectorRef bitVector;
-}
-
-#pragma mark Class Methods
-
-+ (ANTLRBitSet *) newANTLRBitSet;
-+ (ANTLRBitSet *) newANTLRBitSetWithType:(ANTLRTokenType)type;
-/** Construct a ANTLRBitSet given the size
- * @param nbits The size of the ANTLRBitSet in bits
- */
-+ (ANTLRBitSet *) newANTLRBitSetWithNBits:(NSUInteger)nbits;
-+ (ANTLRBitSet *) newANTLRBitSetWithArray:(NSMutableArray *)types;
-+ (ANTLRBitSet *) newANTLRBitSetWithBits:(const unsigned long long *)theBits Count:(NSUInteger)longCount;
-
-+ (ANTLRBitSet *) of:(NSUInteger)el;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c And4:(NSUInteger)d;
-
-#pragma mark Initializer
-
-- (ANTLRBitSet *) init;
-- (ANTLRBitSet *) initWithType:(ANTLRTokenType)type;
-- (ANTLRBitSet *) initWithNBits:(NSUInteger)nbits;
-- (ANTLRBitSet *) initWithBitVector:(CFMutableBitVectorRef)theBitVector;
-- (ANTLRBitSet *) initWithBits:(const unsigned long long const*)theBits Count:(NSUInteger)theCount;
-- (ANTLRBitSet *) initWithArrayOfBits:(NSArray *)theArray;
-
-#pragma mark Operations
-- (ANTLRBitSet *) or:(ANTLRBitSet *) aBitSet;
-- (void) orInPlace:(ANTLRBitSet *) aBitSet;
-- (void) add:(NSUInteger) bit;
-- (void) remove:(NSUInteger) bit;
-- (void) setAllBits:(BOOL) aState;
-
-- (NSInteger) numBits;
-- (NSUInteger) size;
-- (void) setSize:(NSUInteger) noOfWords;
-
-#pragma mark Informational
-- (unsigned long long) bitMask:(NSUInteger) bitNumber;
-- (BOOL) member:(NSUInteger)bitNumber;
-- (BOOL) isNil;
-- (NSString *) toString;
-- (NSString *) description;
-
-#pragma mark NSCopying support
-
-- (id) mutableCopyWithZone:(NSZone *) theZone;
-
-
-//private
-- (CFMutableBitVectorRef) _bitVector;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBufferedTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBufferedTokenStream.h
deleted file mode 100644
index 198a6f7..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBufferedTokenStream.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenStream.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRBitSet.h"
-
-@interface ANTLRBufferedTokenStream : NSObject <ANTLRTokenStream> 
-{
-id<ANTLRTokenSource> tokenSource;
-    
-    /** Record every single token pulled from the source so we can reproduce
-     *  chunks of it later.  The buffer in LookaheadStream overlaps sometimes
-     *  as its moving window moves through the input.  This list captures
-     *  everything so we can access complete input text.
-     */
-NSMutableArray *tokens;
-    
-    /** Track the last mark() call result value for use in rewind(). */
-NSInteger lastMarker;
-    
-    /** The index into the tokens list of the current token (next token
-     *  to consume).  tokens[p] should be LT(1).  p=-1 indicates need
-     *  to initialize with first token.  The ctor doesn't get a token.
-     *  First call to LT(1) or whatever gets the first token and sets p=0;
-     */
-NSInteger p;
-    
-NSInteger range; // how deep have we gone?
-    
-}
-@property (retain, getter=getTokenSource,setter=setTokenSource) id<ANTLRTokenSource> tokenSource;
-@property (retain, getter=getTokens,setter=setTokens) NSMutableArray *tokens;
-@property (assign, getter=getLastMarker,setter=setLastMarker) NSInteger lastMarker;
-@property (assign, getter=getIndex,setter=setIndex) NSInteger p;
-@property (assign, getter=getRange,setter=setRange) NSInteger range;
-
-+ (ANTLRBufferedTokenStream *) newANTLRBufferedTokenStream;
-+ (ANTLRBufferedTokenStream *) newANTLRBufferedTokenStreamWith:(id<ANTLRTokenSource>)aSource;
-- (id) initWithSource:(id<ANTLRTokenSource>)aSource;
-- (id) copyWithZone:(NSZone *)aZone;
-- (NSInteger) getIndex;
-- (void) setIndex:(NSInteger)index;
-- (NSInteger) getRange;
-- (void) setRange:(NSInteger)anInt;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) reset;
-- (void) seek:(NSInteger) index;
-- (NSInteger) size;
-- (void) consume;
-- (void) sync:(NSInteger) i;
-- (void) fetch:(NSInteger) n;
-- (id<ANTLRToken>) getToken:(NSInteger) i;
-- (NSMutableArray *)getFrom:(NSInteger)startIndex To:(NSInteger) stopIndex;
-- (NSInteger) LA:(NSInteger)k;
-- (id<ANTLRToken>) LT:(NSInteger) k;
-- (id<ANTLRToken>) LB:(NSInteger) k;
-- (void) setup;
-- (id<ANTLRTokenSource>) getTokenSource;
-- (void) setTokenSource:(id<ANTLRTokenSource>) aTokenSource;
-- (NSMutableArray *)getTokens;
-- (NSString *) getSourceName;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex With:(ANTLRBitSet *)types;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithList:(NSMutableArray *)types;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithType:(NSInteger)ttype;
-- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startIndex ToToken:(id<ANTLRToken>)stopIndex;
-- (void) fill;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBufferedTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBufferedTreeNodeStream.h
deleted file mode 100644
index 8618ea2..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRBufferedTreeNodeStream.h
+++ /dev/null
@@ -1,156 +0,0 @@
-//
-//  ANTLRBufferedTreeNodeStream.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRCommonTreeNodeStream.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRTreeIterator.h"
-#import "ANTLRIntArray.h"
-
-#define DEFAULT_INITIAL_BUFFER_SIZE 100
-#define INITIAL_CALL_STACK_SIZE 10
-
-#ifdef DONTUSENOMO
-@interface ANTLRStreamIterator : ANTLRTreeIterator
-{
-    NSInteger idx;
-    ANTLRBufferedTreeNodeStream input;
-    NSMutableArray *nodes;
-}
-
-+ (id) newANTLRStreamIterator:(ANTLRBufferedTreeNodeStream *) theStream;
-
-- (id) initWithStream:(ANTLRBufferedTreeNodeStream *) theStream;
-
-- (BOOL) hasNext;
-- (id) next;
-- (void) remove;
-@end
-#endif
-
-@interface ANTLRBufferedTreeNodeStream : NSObject <ANTLRTreeNodeStream> 
-{
-	id<ANTLRTree> up;
-	id<ANTLRTree> down;
-	id<ANTLRTree> eof;
-	
-	NSMutableArray *nodes;
-	
-	id<ANTLRTree> root; // root
-	
-	id<ANTLRTokenStream> tokens;
-	ANTLRCommonTreeAdaptor *adaptor;
-	
-	BOOL uniqueNavigationNodes;
-	NSInteger p;
-	NSInteger lastMarker;
-	ANTLRIntArray *calls;
-	
-	NSEnumerator *e;
-    id currentSymbol;
-	
-}
-
-@property (retain, getter=getUp, setter=setUp:) id<ANTLRTree> up;
-@property (retain, getter=getDown, setter=setDown:) id<ANTLRTree> down;
-@property (retain, getter=getEof, setter=setEof:) id<ANTLRTree> eof;
-@property (retain, getter=getNodes, setter=setNodes:) NSMutableArray *nodes;
-@property (retain, getter=getTreeSource, setter=setTreeSource:) id<ANTLRTree> root;
-@property (retain, getter=getTokenStream, setter=setTokenStream:) id<ANTLRTokenStream> tokens;
-@property (retain, getter=getAdaptor, setter=setAdaptor:) ANTLRCommonTreeAdaptor *adaptor;
-@property (assign, getter=getUniqueNavigationNodes, setter=setUniqueNavigationNodes:) BOOL uniqueNavigationNodes;
-@property (assign, getter=getIndex, setter=setIndex:) NSInteger p;
-@property (assign, getter=getLastMarker, setter=setLastMarker:) NSInteger lastMarker;
-@property (retain, getter=getCalls, setter=setCalls:) ANTLRIntArray *calls;
-@property (retain, getter=getEnum, setter=setEnum:) NSEnumerator *e;
-@property (retain, getter=getCurrentSymbol, setter=setCurrentSymbol:) id currentSymbol;
-
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTree>)tree;
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTreeAdaptor>)adaptor Tree:(id<ANTLRTree>)tree;
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTreeAdaptor>)adaptor Tree:(id<ANTLRTree>)tree withBufferSize:(NSInteger)initialBufferSize;
-
-#pragma mark Constructor
-- (id) initWithTree:(id<ANTLRTree>)tree;
-- (id) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)anAdaptor Tree:(id<ANTLRTree>)tree;
-- (id) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)anAdaptor Tree:(id<ANTLRTree>)tree WithBufferSize:(NSInteger)bufferSize;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-// protected methods. DO NOT USE
-#pragma mark Protected Methods
-- (void) fillBuffer;
-- (void) fillBufferWithTree:(id<ANTLRTree>) tree;
-- (NSInteger) getNodeIndex:(id<ANTLRTree>) node;
-- (void) addNavigationNode:(NSInteger) type;
-- (id) getNode:(NSInteger) i;
-- (id) LT:(NSInteger) k;
-- (id) getCurrentSymbol;
-- (id) LB:(NSInteger) i;
-#pragma mark General Methods
-- (NSString *) getSourceName;
-
-- (id<ANTLRTokenStream>) getTokenStream;
-- (void) setTokenStream:(id<ANTLRTokenStream>) tokens;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>) anAdaptor;
-
-- (BOOL)getUniqueNavigationNodes;
-- (void) setUniqueNavigationNodes:(BOOL)aVal;
-
-- (void) consume;
-- (NSInteger) LA:(NSInteger) i;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (NSInteger) getIndex;
-- (void) setIndex:(NSInteger) idx;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) seek:(NSInteger) idx;
-
-- (void) push:(NSInteger) i;
-- (NSInteger) pop;
-
-- (void) reset;
-- (NSUInteger) count;
-- (NSEnumerator *) objectEnumerator;
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-
-- (NSString *) toTokenTypeString;
-- (NSString *) toTokenString:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *) toStringFromNode:(id)aStart ToNode:(id)aStop;
-
-// getters and setters
-- (NSMutableArray *) getNodes;
-- (id<ANTLRTree>) getEof;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCharStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCharStream.h
deleted file mode 100755
index 379734b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCharStream.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRIntStream.h"
-
-#define	ANTLRCharStreamEOF -1
-
-
-@protocol ANTLRCharStream < ANTLRIntStream >
-
-- (NSString *) substringWithRange:(NSRange) theRange;
-
-/** Get the ith character of lookahead.  This is the same usually as
- *  LA(i).  This will be used for labels in the generated
- *  lexer code.  I'd prefer to return a char here type-wise, but it's
- *  probably better to be 32-bit clean and be consistent with LA.
- */
-- (NSInteger)LT:(NSInteger) i;
-
-// ANTLR tracks the line information automatically
-- (NSInteger) getLine;
-
-// Because this stream can rewind, we need to be able to reset the line
-- (void) setLine:(NSInteger) theLine;
-
-// The index of the character relative to the beginning of the line 0..n-1
-- (NSInteger) getCharPositionInLine;
-
-- (void) setCharPositionInLine:(NSInteger) thePos;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCharStreamState.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCharStreamState.h
deleted file mode 100644
index 2787c76..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCharStreamState.h
+++ /dev/null
@@ -1,58 +0,0 @@
-//
-//  ANTLRCharStreamState.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c)  2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRCharStreamState : NSObject
-{
-NSInteger p;
-NSInteger line;
-NSInteger charPositionInLine;
-}
-
-@property (getter=getP,setter=setP:) NSInteger p;
-@property (getter=getLine,setter=setLine:) NSInteger line;
-@property (getter=getCharPositionInLine,setter=setCharPositionInLine:) NSInteger charPositionInLine;
-
-+ newANTLRCharStreamState;
-
-- (id) init;
-
-- (NSInteger) getP;
-- (void) setP: (NSInteger) anIndex;
-
-- (NSInteger) getLine;
-- (void) setLine: (NSInteger) aLine;
-
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger)aCharPositionInLine;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonErrorNode.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonErrorNode.h
deleted file mode 100644
index 79badc1..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonErrorNode.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-//  ANTLRCommonErrorNode.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-#import "ANTLRTokenStream.h"
-//#import "ANTLRIntStream.h"
-//#import "ANTLRToken.h"
-#import "ANTLRUnWantedTokenException.h"
-
-@interface ANTLRCommonErrorNode : ANTLRCommonTree
-{
-id<ANTLRIntStream> input;
-id<ANTLRToken> startToken;
-id<ANTLRToken> stopToken;
-ANTLRRecognitionException *trappedException;
-}
-
-+ (id) newANTLRCommonErrorNode:(id<ANTLRTokenStream>)anInput
-                  From:(id<ANTLRToken>)startToken
-                    To:(id<ANTLRToken>)stopToken
-                     Exception:(ANTLRRecognitionException *) e;
-
-- (id) initWithInput:(id<ANTLRTokenStream>)anInput
-                From:(id<ANTLRToken>)startToken
-                  To:(id<ANTLRToken>)stopToken
-           Exception:(ANTLRRecognitionException *) e;
-- (BOOL) isNil;
-
-- (NSInteger) getType;
-
-- (NSString *) getText;
-
-- (NSString *) toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonToken.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonToken.h
deleted file mode 100755
index 8662378..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonToken.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRCharStream.h"
-
-@interface ANTLRCommonToken : NSObject < ANTLRToken > {
-	NSString *text;
-	NSInteger type;
-	// information about the Token's position in the input stream
-	NSUInteger line;
-	NSUInteger charPositionInLine;
-	NSUInteger channel;
-	// this token's position in the TokenStream
-	NSUInteger index;
-	
-	// indices into the CharStream to avoid copying the text
-	// can manually override the text by using -setText:
-	NSUInteger startIndex;
-	NSUInteger stopIndex;
-	// the actual input stream this token was found in
-	id<ANTLRCharStream> input;
-}
-
-@property (retain, getter=getText, setter=setText:) NSString *text;
-@property (assign, getter=getType, setter=setType:) NSInteger type;
-@property (assign, getter=getLine, setter=setLine:) NSUInteger line;
-@property (assign, getter=getCharPositionInLine, setter=setCharPositionInLine:) NSUInteger charPositionInLine;
-@property (assign, getter=getChannel, setter=setChannel:) NSUInteger channel;
-@property (assign, getter=getTokenIndex, setter=setTokenIndex:) NSUInteger index;
-@property (assign, getter=getStart, setter=setStart:) NSUInteger startIndex;
-@property (assign, getter=getStop, setter=setStop:) NSUInteger stopIndex;
-@property (retain, getter=getInput, setter=setInput:) id<ANTLRCharStream> input;
-
-+ (void) initialize;
-+ (ANTLRCommonToken *) newANTLRCommonToken;
-+ (ANTLRCommonToken *) newANTLRCommonToken:(id<ANTLRCharStream>)anInput
-                                      Type:(NSInteger)aTType
-                                   Channel:(NSInteger)aChannel
-                                     Start:(NSInteger)aStart
-                                      Stop:(NSInteger)aStop;
-+ (ANTLRCommonToken *) newANTLRCommonToken:(ANTLRTokenType)aType;
-+ (id<ANTLRToken>) newANTLRCommonToken:(NSInteger)tokenType Text:(NSString *)tokenText;
-+ (id<ANTLRToken>) newANTLRCommonTokenWithToken:(id<ANTLRToken>)fromToken;
-+ (id<ANTLRToken>) eofToken;
-+ (id<ANTLRToken>) skipToken;
-+ (id<ANTLRToken>) invalidToken;
-+ (ANTLRTokenChannel) defaultChannel;
-
-// designated initializer. This is used as the default way to initialize a Token in the generated code.
-- (ANTLRCommonToken *) init;
-- (ANTLRCommonToken *) initWithInput:(id<ANTLRCharStream>)anInput
-                                Type:(NSInteger)aTType
-                             Channel:(NSInteger)aChannel
-                               Start:(NSInteger)theStart
-                                Stop:(NSInteger)theStop;
-- (ANTLRCommonToken *) initWithToken:(ANTLRCommonToken *)aToken;
-- (ANTLRCommonToken *) initWithType:(ANTLRTokenType)aType;
-- (ANTLRCommonToken *) initWithType:(ANTLRTokenType)aTType Text:(NSString *)tokenText;
-
-- (id<ANTLRCharStream>) getInput;
-- (void) setInput: (id<ANTLRCharStream>) anInput;
-
-- (NSUInteger) getStart;
-- (void) setStart: (NSUInteger) aStart;
-
-- (NSUInteger) getStop;
-- (void) setStop: (NSUInteger) aStop;
-
-// the index of this Token into the TokenStream
-- (NSUInteger) getTokenIndex;
-- (void) setTokenIndex: (NSUInteger) aTokenIndex;
-
-// conform to NSCopying
-- (id) copyWithZone:(NSZone *)theZone;
-
-- (NSString *) description;
-- (NSString *) toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTokenStream.h
deleted file mode 100755
index 59f9d5e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTokenStream.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenStream.h"
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRBufferedTokenStream.h"
-
-@interface ANTLRCommonTokenStream : ANTLRBufferedTokenStream < ANTLRTokenStream >
-{
-	NSMutableDictionary *channelOverride;
-	NSInteger channel;
-}
-
-@property (retain, getter=getChannelOverride,setter=setChannelOverride) NSMutableDictionary *channelOverride;
-@property (assign, getter=getChannel,setter=setChannel) NSInteger channel;
-
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStream;
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStreamWithTokenSource:(id<ANTLRTokenSource>)theTokenSource;
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStreamWithTokenSource:(id<ANTLRTokenSource>)theTokenSource
-                                                               Channel:(NSInteger)aChannel;
-
-- (id) init;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource Channel:(NSInteger)aChannel;
-
-- (id<ANTLRTokenSource>) getTokenSource;
-- (void) setTokenSource: (id<ANTLRTokenSource>) aTokenSource;
-
-- (void) consume;
-- (id<ANTLRToken>) LT:(NSInteger)k;
-- (id<ANTLRToken>) LB:(NSInteger)k;
-
-- (NSInteger) skipOffChannelTokens:(NSInteger) i;
-- (NSInteger) skipOffChannelTokensReverse:(NSInteger) i;
-
-- (void)setup;
-
-- (NSArray *) tokensInRange:(NSRange)aRange;
-- (NSArray *) tokensInRange:(NSRange)aRange inBitSet:(ANTLRBitSet *)aBitSet;
-- (NSArray *) tokensInRange:(NSRange)aRange withTypes:(NSArray *)tokenTypes;
-- (NSArray *) tokensInRange:(NSRange)aRange withType:(NSInteger)tokenType;
-
-- (id<ANTLRToken>) getToken:(NSInteger)i;
-
-- (NSInteger) size;
-- (NSInteger) getIndex;
-- (void) rewind;
-- (void) rewind:(NSInteger)marker;
-- (void) seek:(NSInteger)index;
-
-- (NSString *) toString;
-- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSInteger)getChannel;
-- (void)setChannel:(NSInteger)aChannel;
-
-- (NSMutableDictionary *)getChannelOverride;
-- (void)setChannelOverride:(NSMutableDictionary *)anOverride;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTree.h
deleted file mode 100755
index 0966051..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTree.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonToken.h"
-#import "ANTLRBaseTree.h"
-
-@interface ANTLRCommonTree : ANTLRBaseTree <ANTLRTree> {
-	ANTLRCommonToken *token;
-	NSInteger startIndex;
-	NSInteger stopIndex;
-    ANTLRCommonTree *parent;
-    NSInteger childIndex;
-}
-
-@property (retain, getter=getANTLRCommonToken, setter=setANTLRCommonToken) ANTLRCommonToken *token;
-@property (assign, getter=getTokenStartIndex, setter=setTokenStartIndex) NSInteger startIndex;
-@property (assign, getter=getTokenStopIndex, setter=setTokenStopIndex) NSInteger stopIndex;
-@property (retain, getter=getParent, setter=setParent:) ANTLRCommonTree *parent;
-@property (assign, getter=getChildIndex, setter=setChildIndex) NSInteger childIndex;
-
-+ (ANTLRCommonTree *) invalidNode;
-+ (ANTLRCommonTree *) newANTLRCommonTree;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithTree:(ANTLRCommonTree *)aTree;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithToken:(ANTLRCommonToken *)aToken;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithTokenType:(NSInteger)tokenType;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithTokenType:(NSInteger)aTType Text:(NSString *)theText;
-#ifdef DONTUSEYET
-+ (id<ANTLRTree>) newANTLRCommonTreeWithTokenType:(NSInteger)tokenType;
-+ (id<ANTLRTree>) newANTLRCommonTreeWithToken:(id<ANTLRToken>)fromToken TokenType:(NSInteger)tokenType;
-+ (id<ANTLRTree>) newANTLRCommonTreeWithToken:(id<ANTLRToken>)fromToken TokenType:(NSInteger)tokenType Text:(NSString *)tokenText;
-+ (id<ANTLRTree>) newANTLRCommonTreeWithToken:(id<ANTLRToken>)fromToken Text:(NSString *)tokenText;
-#endif
-
-- (id) init;
-- (id) initWithTreeNode:(ANTLRCommonTree *)aNode;
-- (id) initWithToken:(ANTLRCommonToken *)aToken;
-- (id) initWithTokenType:(NSInteger)aTokenType;
-- (id) initWithTokenType:(NSInteger)aTokenType Text:(NSString *)theText;
-
-- (id<ANTLRTree>) copyWithZone:(NSZone *)aZone;
-
-- (BOOL) isNil;
-
-- (ANTLRCommonToken *) getToken;
-- (void) setToken:(ANTLRCommonToken *)aToken;
-- (id<ANTLRTree>) dupNode;
-- (NSInteger) getType;
-- (NSString *) getText;
-- (NSUInteger) getLine;
-- (NSUInteger) getCharPositionInLine;
-- (ANTLRCommonTree *) getParent;
-- (void) setParent:(ANTLRCommonTree *) t;
-
-#ifdef DONTUSENOMO
-- (NSString *) treeDescription;
-#endif
-- (NSString *) description;
-- (void) setUnknownTokenBoundaries;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex: (NSInteger) aStartIndex;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex: (NSInteger) aStopIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTreeAdaptor.h
deleted file mode 100755
index 53287e6..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTreeAdaptor.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRBaseTreeAdaptor.h"
-
-@interface ANTLRCommonTreeAdaptor : ANTLRBaseTreeAdaptor {
-}
-
-+ (id<ANTLRTree>) newEmptyTree;
-+ (ANTLRCommonTreeAdaptor *)newANTLRCommonTreeAdaptor;
-- (id) init;
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)t;    
-- (ANTLRCommonTree *)createTree:(ANTLRCommonToken *)aToken;
-- (ANTLRCommonTree *)createTree:(NSInteger)tokenType Text:(NSString *)text;
-- (id<ANTLRToken>)createToken:(NSInteger)tokenType Text:(NSString *)text;
-- (void) setTokenBoundaries:(id<ANTLRTree>)t From:(id<ANTLRToken>)startToken To:(id<ANTLRToken>)stopToken;
-- (NSInteger)getTokenStartIndex:(id<ANTLRTree>)t;
-- (NSInteger)getTokenStopIndex:(id<ANTLRTree>)t;
-- (NSString *)getText:(id<ANTLRTree>)t;
-- (void)setText:(id<ANTLRTree>)t Text:(NSString *)text;
-- (NSInteger)getType:(id<ANTLRTree>)t;
-- (void) setType:(id<ANTLRTree>)t Type:(NSInteger)tokenType;
-- (id<ANTLRToken>)getToken:(id<ANTLRTree>)t;
-- (id<ANTLRTree>)getChild:(id<ANTLRTree>)t At:(NSInteger)i;
-- (void) setChild:(id<ANTLRTree>)t At:(NSInteger)i Child:(id<ANTLRTree>)child;
-- (NSInteger)getChildCount:(id<ANTLRTree>)t;
-- (id<ANTLRTree>)getParent:(id<ANTLRTree>)t;
-- (void)setParent:(id<ANTLRTree>)t With:(id<ANTLRTree>)parent;
-- (NSInteger)getChildIndex:(id<ANTLRTree>)t;
-- (void)setChildIndex:(id<ANTLRTree>)t With:(NSInteger)index;
-- (void)replaceChildren:(id<ANTLRTree>)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id<ANTLRTree>)t;
-- (id)copyWithZone:(NSZone *)zone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTreeNodeStream.h
deleted file mode 100755
index 4c68f2e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRCommonTreeNodeStream.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-#import "ANTLRCommonTreeNodeStream.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRTreeNodeStream.h"
-#import "ANTLRTreeIterator.h"
-#import "ANTLRIntArray.h"
-
-@interface ANTLRCommonTreeNodeStream : ANTLRLookaheadStream <ANTLRTreeNodeStream> {
-#define DEFAULT_INITIAL_BUFFER_SIZE 100
-#define INITIAL_CALL_STACK_SIZE 10
-    
-/** Pull nodes from which tree? */
-id root;
-    
-/** If this tree (root) was created from a token stream, track it. */
-id <ANTLRTokenStream> tokens;
-    
-	/** What tree adaptor was used to build these trees */
-ANTLRCommonTreeAdaptor *adaptor;
-    
-/** The tree iterator we using */
-ANTLRTreeIterator *it;
-    
-/** Stack of indexes used for push/pop calls */
-ANTLRIntArray *calls;    
-    
-/** Tree (nil A B C) trees like flat A B C streams */
-BOOL hasNilRoot;
-    
-/** Tracks tree depth.  Level=0 means we're at root node level. */
-NSInteger level;
-}
-@property (retain, getter=getRoot, setter=setRoot:) ANTLRCommonTree *root;
-@property (retain, getter=getTokens,setter=setTokens:) id<ANTLRTokenStream> tokens;
-@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) ANTLRCommonTreeAdaptor *adaptor;
-
-+ (ANTLRCommonTreeNodeStream *) newANTLRCommonTreeNodeStream:(ANTLRCommonTree *)theTree;
-+ (ANTLRCommonTreeNodeStream *) newANTLRCommonTreeNodeStream:(id<ANTLRTreeAdaptor>)anAdaptor Tree:(ANTLRCommonTree *)theTree;
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)adaptor Tree:(ANTLRCommonTree *)theTree;
-    
-- (void) reset;
-    
-    /** Pull elements from tree iterator.  Track tree level 0..max_level.
-     *  If nil rooted tree, don't give initial nil and DOWN nor final UP.
-     */
-- (id) nextElement;
-    
-- (BOOL) isEOF:(id<ANTLRTree>) o;
-- (void) setUniqueNavigationNodes:(BOOL) uniqueNavigationNodes;
-    
-- (id) getTreeSource;
-    
-- (NSString *) getSourceName;
-    
-- (id<ANTLRTokenStream>) getTokenStream;
-    
-- (void) setTokenStream:(id<ANTLRTokenStream>) tokens;
-    
-- (ANTLRCommonTreeAdaptor *) getTreeAdaptor;
-    
-- (void) setTreeAdaptor:(ANTLRCommonTreeAdaptor *) adaptor;
-    
-- (NSInteger) LA:(NSInteger) i;
-    
-    /** Make stream jump to a new location, saving old location.
-     *  Switch back with pop().
-     */
-- (ANTLRCommonTree *)getNode:(NSInteger) i;
-
-- (void) push:(NSInteger) index;
-    
-    /** Seek back to previous index saved during last push() call.
-     *  Return top of stack (return index).
-     */
-- (NSInteger) pop;
-    
-// TREE REWRITE INTERFACE
-    
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-    
-- (NSString *) toStringFromNode:(id<ANTLRTree>)startNode ToNode:(id<ANTLRTree>)stopNode;
-
-/** For debugging; destructive: moves tree iterator to end. */
-- (NSString *) toTokenTypeString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDFA.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDFA.h
deleted file mode 100755
index 9094a3d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDFA.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRNoViableAltException.h"
-
-@interface ANTLRDFA : NSObject {
-	// the tables are set by subclasses to their own static versions.
-	const int *eot;
-	const int *eof;
-	const unichar *min;
-	const unichar *max;
-	const int *accept;
-	const int *special;
-	const int **transition;
-	
-	ANTLRBaseRecognizer *recognizer;
-	NSInteger decisionNumber;
-    NSInteger len;
-}
-
-@property (retain, getter=getRecognizer,setter=setRecognizer:) ANTLRBaseRecognizer *recognizer;
-@property (assign, getter=getDecision,setter=setDecision:) NSInteger decisionNumber;
-@property (assign, getter=getLen,setter=setLen:) NSInteger len;
-
-- (id) initWithRecognizer:(id) theRecognizer;
-// simulate the DFA using the static tables and predict an alternative
-- (NSInteger) predict:(id<ANTLRCharStream>)anInput;
-- (void) noViableAlt:(NSInteger)state Stream:(id<ANTLRIntStream>)anInput;
-
-- (NSInteger) specialStateTransition:(NSInteger)state Stream:(id<ANTLRIntStream>)anInput;
-// - (NSInteger) specialStateTransition:(NSInteger) state;
-//- (unichar) specialTransition:(unichar) state symbol:(NSInteger) symbol;
-
-// hook for debugger support
-- (void) error:(ANTLRNoViableAltException *)nvae;
-
-- (NSString *) description;
-- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment;
-
-+ (void) setIsEmittingDebugInfo:(BOOL) shouldEmitDebugInfo;
-
-- (NSInteger)getDecision;
-- (void)setDecision:(NSInteger)aDecison;
-
-- (ANTLRBaseRecognizer *)getRecognizer;
-- (void)setRecognizer:(ANTLRBaseRecognizer *)aRecognizer;
-- (NSInteger)length;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebug.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebug.h
deleted file mode 100755
index 87383c9..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebug.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRDebugEventListener.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugParser.h"
-#import "ANTLRDebugTokenStream.h"
-#import "ANTLRDebugTreeParser.h"
-#import "ANTLRDebugTreeNodeStream.h"
-#import "ANTLRDebugTreeAdaptor.h"
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugEventListener.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugEventListener.h
deleted file mode 100755
index c2bee6c..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugEventListener.h
+++ /dev/null
@@ -1,275 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRDebugEventListener 
-
-#define ANTLRDebugProtocolVersion 1
-
-/** The parser has just entered a rule.  No decision has been made about
-*  which alt is predicted.  This is fired AFTER init actions have been
-*  executed.  Attributes are defined and available etc...
-*/
-- (void) enterRule:(NSString *)ruleName;
-
-/** Because rules can have lots of alternatives, it is very useful to
-*  know which alt you are entering.  This is 1..n for n alts.
-*/
-- (void) enterAlt:(NSInteger)alt;
-
-/** This is the last thing executed before leaving a rule.  It is
-*  executed even if an exception is thrown.  This is triggered after
-*  error reporting and recovery have occurred (unless the exception is
-											   *  not caught in this rule).  This implies an "exitAlt" event.
-*/
-- (void) exitRule:(NSString *)ruleName;
-
-/** Track entry into any (...) subrule other EBNF construct */
-- (void) enterSubRule:(NSInteger)decisionNumber;
-
-- (void) exitSubRule:(NSInteger)decisionNumber;
-
-/** Every decision, fixed k or arbitrary, has an enter/exit event
-*  so that a GUI can easily track what LT/consume events are
-*  associated with prediction.  You will see a single enter/exit
-*  subrule but multiple enter/exit decision events, one for each
-*  loop iteration.
-*/
-- (void) enterDecision:(NSInteger)decisionNumber;
-
-- (void) exitDecision:(NSInteger)decisionNumber;
-
-/** An input token was consumed; matched by any kind of element.
-*  Trigger after the token was matched by things like match(), matchAny().
-*/
-- (void) consumeToken:(id<ANTLRToken>)t;
-
-/** An off-channel input token was consumed.
-*  Trigger after the token was matched by things like match(), matchAny().
-*  (unless of course the hidden token is first stuff in the input stream).
-*/
-- (void) consumeHiddenToken:(id<ANTLRToken>)t;
-
-/** Somebody (anybody) looked ahead.  Note that this actually gets
-*  triggered by both LA and LT calls.  The debugger will want to know
-*  which Token object was examined.  Like consumeToken, this indicates
-*  what token was seen at that depth.  A remote debugger cannot look
-*  ahead into a file it doesn't have so LT events must pass the token
-*  even if the info is redundant.
-*/
-- (void) LT:(NSInteger)i foundToken:(id<ANTLRToken>)t;
-
-/** The parser is going to look arbitrarily ahead; mark this location,
-*  the token stream's marker is sent in case you need it.
-*/
-- (void) mark:(NSInteger)marker;
-
-/** After an arbitrairly long lookahead as with a cyclic DFA (or with
-*  any backtrack), this informs the debugger that stream should be
-*  rewound to the position associated with marker.
-*/
-- (void) rewind:(NSInteger)marker;
-
-/** Rewind to the input position of the last marker.
-*  Used currently only after a cyclic DFA and just
-*  before starting a sem/syn predicate to get the
-*  input position back to the start of the decision.
-*  Do not "pop" the marker off the state.  mark(i)
-*  and rewind(i) should balance still.
-*/
-- (void) rewind;
-
-- (void) beginBacktrack:(NSInteger)level;
-
-- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful;
-
-/** To watch a parser move through the grammar, the parser needs to
-*  inform the debugger what line/charPos it is passing in the grammar.
-*  For now, this does not know how to switch from one grammar to the
-*  other and back for island grammars etc...
-*
-*  This should also allow breakpoints because the debugger can stop
-*  the parser whenever it hits this line/pos.
-*/
-- (void) locationLine:(NSInteger)line column:(NSInteger)pos;
-
-/** A recognition exception occurred such as NoViableAltException.  I made
-*  this a generic event so that I can alter the exception hierachy later
-*  without having to alter all the debug objects.
-*
-*  Upon error, the stack of enter rule/subrule must be properly unwound.
-*  If no viable alt occurs it is within an enter/exit decision, which
-*  also must be rewound.  Even the rewind for each mark must be unwount.
-*  In the Java target this is pretty easy using try/finally, if a bit
-*  ugly in the generated code.  The rewind is generated in DFA.predict()
-*  actually so no code needs to be generated for that.  For languages
-*  w/o this "finally" feature (C++?), the target implementor will have
-*  to build an event stack or something.
-*
-*  Across a socket for remote debugging, only the RecognitionException
-*  data fields are transmitted.  The token object or whatever that
-*  caused the problem was the last object referenced by LT.  The
-*  immediately preceding LT event should hold the unexpected Token or
-*  char.
-*
-*  Here is a sample event trace for grammar:
-*
-*  b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
-*    | D
-*    ;
-*
-*  The sequence for this rule (with no viable alt in the subrule) for
-*  input 'c c' (there are 3 tokens) is:
-*
-*		commence
-*		LT(1)
-*		enterRule b
-*		location 7 1
-*		enter decision 3
-*		LT(1)
-*		exit decision 3
-*		enterAlt1
-*		location 7 5
-*		LT(1)
-*		consumeToken [c/<4>,1:0]
-*		location 7 7
-*		enterSubRule 2
-*		enter decision 2
-*		LT(1)
-*		LT(1)
-*		recognitionException NoViableAltException 2 1 2
-*		exit decision 2
-*		exitSubRule 2
-*		beginResync
-*		LT(1)
-*		consumeToken [c/<4>,1:1]
-*		LT(1)
-*		endResync
-*		LT(-1)
-*		exitRule b
-*		terminate
-*/
-- (void) recognitionException:(ANTLRRecognitionException *)e;
-
-/** Indicates the recognizer is about to consume tokens to resynchronize
-*  the parser.  Any consume events from here until the recovered event
-*  are not part of the parse--they are dead tokens.
-*/
-- (void) beginResync;
-
-/** Indicates that the recognizer has finished consuming tokens in order
-*  to resychronize.  There may be multiple beginResync/endResync pairs
-*  before the recognizer comes out of errorRecovery mode (in which
-*  multiple errors are suppressed).  This will be useful
-*  in a gui where you want to probably grey out tokens that are consumed
-*  but not matched to anything in grammar.  Anything between
-*  a beginResync/endResync pair was tossed out by the parser.
-*/
-- (void) endResync;
-
-/** A semantic predicate was evaluate with this result and action text */
-- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result;
-
-/** Announce that parsing has begun.  Not technically useful except for
-*  sending events over a socket.  A GUI for example will launch a thread
-*  to connect and communicate with a remote parser.  The thread will want
-*  to notify the GUI when a connection is made.  ANTLR parsers
-*  trigger this upon entry to the first rule (the ruleLevel is used to
-*  figure this out).
-*/
-- (void) commence;
-
-/** Parsing is over; successfully or not.  Mostly useful for telling
-*  remote debugging listeners that it's time to quit.  When the rule
-*  invocation level goes to zero at the end of a rule, we are done
-*  parsing.
-*/
-- (void) terminate;
-
-
-// T r e e  P a r s i n g
-
-/** Input for a tree parser is an AST, but we know nothing for sure
-*  about a node except its type and text (obtained from the adaptor).
-*  This is the analog of the consumeToken method.  Again, the ID is
-*  the hashCode usually of the node so it only works if hashCode is
-*  not implemented.  If the type is UP or DOWN, then
-*  the ID is not really meaningful as it's fixed--there is
-*  just one UP node and one DOWN navigation node.
-*/
-- (void) consumeNode:(NSInteger)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-/** The tree parser lookedahead.  If the type is UP or DOWN,
-*  then the ID is not really meaningful as it's fixed--there is
-*  just one UP node and one DOWN navigation node.
-*/
-- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-
-// A S T  E v e n t s
-
-/** A nil was created (even nil nodes have a unique ID...
-*  they are not "null" per se).  As of 4/28/2006, this
-*  seems to be uniquely triggered when starting a new subtree
-*  such as when entering a subrule in automatic mode and when
-*  building a tree in rewrite mode.
-*/
-- (void) createNilNode:(unsigned)hash;
-
-/** Announce a new node built from text */
-- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type;
-
-/** Announce a new node built from an existing token */
-- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex;
-
-/** Make a node the new root of an existing root.  See
-*
-*  Note: the newRootID parameter is possibly different
-*  than the TreeAdaptor.becomeRoot() newRoot parameter.
-*  In our case, it will always be the result of calling
-*  TreeAdaptor.becomeRoot() and not root_n or whatever.
-*
-*  The listener should assume that this event occurs
-*  only when the current subrule (or rule) subtree is
-*  being reset to newRootID.
-*
-*/
-- (void) makeNode:(unsigned)newRootHash parentOf:(unsigned)oldRootHash;
-
-/** Make childID a child of rootID.
-*  @see org.antlr.runtime.tree.TreeAdaptor.addChild()
-*/
-- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash;
-
-/** Set the token start/stop token index for a subtree root or node */
-- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSUInteger)tokenStartIndex To:(NSUInteger)tokenStopIndex;
-
-- (void) waitForDebuggerConnection;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugEventProxy.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugEventProxy.h
deleted file mode 100755
index 59bf67b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugEventProxy.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRDebugEventListener.h"
-#import <sys/socket.h>
-#import <netinet/in.h>
-#import <netinet/tcp.h>
-#include <arpa/inet.h>
-
-// default port for ANTLRWorks
-#define DEFAULT_DEBUGGER_PORT 0xC001
-
-@interface ANTLRDebugEventProxy : NSObject <ANTLRDebugEventListener> {
-	int serverSocket;
-	
-	struct sockaddr debugger_sockaddr;
-	socklen_t debugger_socklen;
-	int debuggerSocket;
-	NSFileHandle *debuggerFH;
-	
-	NSString *grammarName;
-	int debuggerPort;
-}
-
-- (id) init;
-- (id) initWithGrammarName:(NSString *)aGrammarName debuggerPort:(NSInteger)aPort;
-- (void) waitForDebuggerConnection;
-- (void) waitForAck;
-- (void) sendToDebugger:(NSString *)message;
-- (void) sendToDebugger:(NSString *)message waitForResponse:(BOOL)wait;
-
-- (NSInteger) serverSocket;
-- (void) setServerSocket: (NSInteger) aServerSocket;
-
-- (NSInteger) debuggerSocket;
-- (void) setDebuggerSocket: (NSInteger) aDebuggerSocket;
-
-- (NSString *) grammarName;
-- (void) setGrammarName: (NSString *) aGrammarName;
-
-- (NSInteger) debuggerPort;
-- (void) setDebuggerPort: (NSInteger) aDebuggerPort;
-
-- (NSString *) escapeNewlines:(NSString *)aString;
-
-#pragma mark -
-
-#pragma mark DebugEventListener Protocol
-- (void) enterRule:(NSString *)ruleName;
-- (void) enterAlt:(NSInteger)alt;
-- (void) exitRule:(NSString *)ruleName;
-- (void) enterSubRule:(NSInteger)decisionNumber;
-- (void) exitSubRule:(NSInteger)decisionNumber;
-- (void) enterDecision:(NSInteger)decisionNumber;
-- (void) exitDecision:(NSInteger)decisionNumber;
-- (void) consumeToken:(id<ANTLRToken>)t;
-- (void) consumeHiddenToken:(id<ANTLRToken>)t;
-- (void) LT:(NSInteger)i foundToken:(id<ANTLRToken>)t;
-- (void) mark:(NSInteger)marker;
-- (void) rewind:(NSInteger)marker;
-- (void) rewind;
-- (void) beginBacktrack:(NSInteger)level;
-- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful;
-- (void) locationLine:(NSInteger)line column:(NSInteger)pos;
-- (void) recognitionException:(ANTLRRecognitionException *)e;
-- (void) beginResync;
-- (void) endResync;
-- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result;
-- (void) commence;
-- (void) terminate;
-
-
-#pragma mark Tree Parsing
-- (void) consumeNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-
-#pragma mark AST Events
-
-- (void) createNilNode:(unsigned)hash;
-- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type;
-- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex;
-- (void) makeNode:(unsigned)newRootHash parentOf:(unsigned)oldRootHash;
-- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash;
-- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSInteger)tokenStartIndex To:(NSInteger)tokenStopIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugParser.h
deleted file mode 100755
index b23ff50..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugParser.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugTokenStream.h"
-
-@interface ANTLRDebugParser : ANTLRParser {
-	id<ANTLRDebugEventListener> debugListener;
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream;
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-			  debuggerPort:(NSInteger)portNumber;
-// designated initializer
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-			 debugListener:(id<ANTLRDebugEventListener>)theDebugListener
-			  debuggerPort:(NSInteger)portNumber;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTokenStream.h
deleted file mode 100755
index 335b002..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRDebugTokenStream.h"
-#import "ANTLRDebugEventListener.h"
-
-@interface ANTLRDebugTokenStream : NSObject <ANTLRTokenStream>
-{
-	id<ANTLRDebugEventListener> debugListener;
-	id<ANTLRTokenStream> input;
-	BOOL initialStreamState;
-    NSInteger lastMarker;
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream debugListener:(id<ANTLRDebugEventListener>)debugger;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (id<ANTLRTokenStream>) getInput;
-- (void) setInput:(id<ANTLRTokenStream>)aTokenStream;
-
-- (void) consume;
-- (id<ANTLRToken>) getToken:(NSInteger)index;
-- (NSInteger) getIndex;
-- (void) release:(NSInteger)marker;
-- (void) seek:(NSInteger)index;
-- (NSInteger) size;
-- (id<ANTLRTokenSource>) getTokenSource;
-- (NSString *) getSourceName;
-- (NSString *) toString;
-- (NSString *) toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTreeAdaptor.h
deleted file mode 100755
index 41965fa..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTreeAdaptor.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRBaseTreeAdaptor.h"
-#import "ANTLRDebugEventListener.h"
-
-@interface ANTLRDebugTreeAdaptor : ANTLRBaseTreeAdaptor {
-	id<ANTLRDebugEventListener> debugListener;
-	id<ANTLRTreeAdaptor> treeAdaptor;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor debugListener:(id<ANTLRDebugEventListener>)aDebugListener;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor: (id<ANTLRTreeAdaptor>) aTreeAdaptor;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTreeNodeStream.h
deleted file mode 100755
index 70f9939..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTreeNodeStream.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRDebugEventListener.h"
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTreeNodeStream.h"
-
-@interface ANTLRDebugTreeNodeStream : NSObject <ANTLRTreeNodeStream> {
-	id<ANTLRDebugEventListener> debugListener;
-	id<ANTLRTreeAdaptor> treeAdaptor;
-	id<ANTLRTreeNodeStream> input;
-	BOOL initialStreamState;
-}
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream debugListener:(id<ANTLRDebugEventListener>)debugger;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (id<ANTLRTreeNodeStream>) getInput;
-- (void) setInput: (id<ANTLRTreeNodeStream>) aTreeNodeStream;
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor: (id<ANTLRTreeAdaptor>) aTreeAdaptor;
-
-#pragma mark ANTLRTreeNodeStream conformance
-
-- (id) LT:(NSInteger)k;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setUniqueNavigationNodes:(BOOL)flag;
-
-#pragma mark ANTLRIntStream conformance
-- (void) consume;
-- (NSInteger) LA:(NSUInteger) i;
-- (NSUInteger) mark;
-- (NSUInteger) getIndex;
-- (void) rewind:(NSUInteger) marker;
-- (void) rewind;
-- (void) release:(NSUInteger) marker;
-- (void) seek:(NSUInteger) index;
-- (NSUInteger) size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTreeParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTreeParser.h
deleted file mode 100755
index cbeac76..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRDebugTreeParser.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeParser.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugTreeNodeStream.h"
-
-@interface ANTLRDebugTreeParser : ANTLRTreeParser {
-	id<ANTLRDebugEventListener> debugListener;
-}
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream;
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream
-				 debuggerPort:(NSInteger)portNumber;
-	// designated initializer
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream
-				debugListener:(id<ANTLRDebugEventListener>)theDebugListener
-				 debuggerPort:(NSInteger)portNumber;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLREarlyExitException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLREarlyExitException.h
deleted file mode 100755
index 1a89bbb..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLREarlyExitException.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLREarlyExitException : ANTLRRecognitionException {
-	int decisionNumber;
-}
-
-+ (ANTLREarlyExitException *) exceptionWithStream:(id<ANTLRIntStream>) anInputStream decisionNumber:(NSInteger) aDecisionNumber;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream decisionNumber:(NSInteger) aDecisionNumber;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRError.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRError.h
deleted file mode 100644
index f2657af..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRError.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-//  ANTLRError.h
-//  ANTLR
-//
-//  Created by Ian Michell on 30/03/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-// [The "BSD licence"]
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-#define ANTLRErrorDomain @"ANTLRError"
-
-#define ANTLRIllegalArgumentException @"ANTLRIllegalArgumentException"
-#define ANTLRIllegalStateException @"IllegalStateException"
-//#define ANTLRRuntimeException @"RuntimeException"
-//#define ANTLRNoSuchMethodException @"NoSuchMethodException"
-//#define ANTLRNoSuchElementException @"NoSuchElementException"
-//#define ANTLRUnsupportedOperationException @"UnsupportedOperationException"
-
-
-/*typedef enum
-{
-	ANTLRIllegalState = 1,
-	ANTLRIllegalArgument = 2,
-	ANTLRRecognitionError = 3,
-	ANTLRMissingTokenError = 4,
-	ANTLRUnwantedTokenError = 5,
-	ANTLRMismatechedTokenError = 6,
-	ANTLRNoViableAltError = 7
-	
-} ANTLRErrorCode;*/
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRFailedPredicateException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRFailedPredicateException.h
deleted file mode 100755
index 9788cba..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRFailedPredicateException.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-
-@interface ANTLRFailedPredicateException : ANTLRRecognitionException
-{
-	NSString *predicate;
-	NSString *ruleName;
-}
-
-@property (retain, getter=getPredicate, setter=setPredicate:) NSString *predicate;
-@property (retain, getter=getRuleName, setter=setRuleName:) NSString *ruleName;
-
-+ (ANTLRFailedPredicateException *) exceptionWithRuleName:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<ANTLRIntStream>)theStream;
-- (ANTLRFailedPredicateException *) initWithRuleName:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<ANTLRIntStream>)theStream;
-
-#ifdef DONTUSEYET
-- (NSString *) getPredicate;
-- (void) setPredicate:(NSString *)thePredicate;
-- (NSString *) getRuleName;
-- (void) setRuleName:(NSString *)theRuleName;
-#endif
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRFastQueue.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRFastQueue.h
deleted file mode 100644
index cf81817..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRFastQueue.h
+++ /dev/null
@@ -1,68 +0,0 @@
-//
-//  ANTLRFastQueue.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRFastQueue : NSObject <NSCopying>
-{
-	NSAutoreleasePool *pool;
-	NSMutableArray *data;
-	NSInteger p;
-}
-
-@property (retain, getter=getPool, setter=setPool) NSAutoreleasePool *pool;
-@property (retain, getter=getData, setter=setData) NSMutableArray *data;
-@property (assign, getter=getP, setter=setP) NSInteger p;
-
-+ (id) newANTLRFastQueue;
-
-- (id) init;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (void) reset;
-- (id) remove;
-- (void) addObject:(id) o;
-- (NSInteger) count;
-- (NSInteger) size;
-- (id) head;
-- (id) objectAtIndex:(NSInteger) i;
-- (void) clear;
-- (NSString *) toString;
-- (NSAutoreleasePool *)getPool;
-- (void)setPool:(NSAutoreleasePool *)aPool;
-- (NSMutableArray *)getData;
-- (void)setData:(NSMutableArray *)myData;
-- (NSInteger) getP;
-- (void) setP:(NSInteger)anInt;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashMap.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashMap.h
deleted file mode 100644
index 04aca7b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashMap.h
+++ /dev/null
@@ -1,102 +0,0 @@
-//
-//  ANTLRHashMap.h
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-#import "ANTLRMapElement.h"
-
-#define GLOBAL_SCOPE       0
-#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRHashMap : ANTLRLinkBase {
-	//	ANTLRHashMap *fNext;
-    //    TStringPool *fPool;
-    NSInteger Scope;
-    NSInteger LastHash;
-    NSInteger BuffSize;
-    ANTLRMapElement *ptrBuffer[HASHSIZE];
-    NSInteger mode;
-}
-
-//@property (copy) ANTLRHashMap *fNext;
-//@property (copy) TStringPool *fPool;
-@property (getter=getScope, setter=setScope:) NSInteger Scope;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
-
-// Contruction/Destruction
-+ (id)newANTLRHashMap;
-+ (id)newANTLRHashMapWithLen:(NSInteger)aBuffSize;
-- (id)init;
-- (id)initWithLen:(NSInteger)aBuffSize;
-- (void)dealloc;
-- (ANTLRHashMap *)PushScope:( ANTLRHashMap **)map;
-- (ANTLRHashMap *)PopScope:( ANTLRHashMap **)map;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-// Instance Methods
-/*    form hash value for string s */
-- (NSInteger)hash:(NSString *)s;
-/*   look for s in ptrBuffer  */
-- (ANTLRHashMap *)findscope:(int)level;
-/*   look for s in ptrBuffer  */
-- (id)lookup:(NSString *)s Scope:(int)scope;
-/*   look for s in ptrBuffer  */
-- (id)install:(ANTLRMapElement *)sym Scope:(int)scope;
-/*   look for s in ptrBuffer  */
-- (void)deleteANTLRHashMap:(ANTLRMapElement *)np;
-- (int)RemoveSym:(NSString *)s;
-- (void)delete_chain:(ANTLRMapElement *)np;
-#ifdef DONTUSEYET
-- (int)bld_symtab:(KW_TABLE *)toknams;
-#endif
-- (ANTLRMapElement **)getptrBuffer;
-- (ANTLRMapElement *)getptrBufferEntry:(int)idx;
-- (void)setptrBuffer:(ANTLRMapElement *)np Index:(int)idx;
-- (NSInteger)getScope;
-- (void)setScope:(NSInteger)i;
-- (ANTLRMapElement *)getTType:(NSString *)name;
-- (ANTLRMapElement *)getNameInList:(NSInteger)ttype;
-- (void)putNode:(NSString *)name TokenType:(NSInteger)ttype;
-- (NSInteger)getMode;
-- (void)setMode:(NSInteger)aMode;
-- (void) insertObject:(id)aRule atIndex:(NSInteger)idx;
-- (id) objectAtIndex:(NSInteger)idx;
-- (void) setObject:(id)aRule atIndex:(NSInteger)idx;
-- (void)addObject:(id)anObject;
-- (ANTLRMapElement *) getName:(NSString *)aName;
-- (void) putName:(NSString *)name Node:(id)aNode;
-
-- (NSEnumerator *)objectEnumerator;
-- (BOOL) hasNext;
-- (ANTLRMapElement *)nextObject;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashMap.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashMap.m
deleted file mode 100644
index a23426b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashMap.m
+++ /dev/null
@@ -1,521 +0,0 @@
-//
-//  ANTLRHashMap.m
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRHashMap.h"
-
-static NSInteger itIndex;
-
-/*
- * Start of ANTLRHashMap
- */
-@implementation ANTLRHashMap
-
-@synthesize Scope;
-@synthesize LastHash;
-
-+(id)newANTLRHashMap
-{
-    ANTLRHashMap *aNewANTLRHashMap;
-    
-    aNewANTLRHashMap = [[ANTLRHashMap alloc] init];
-	return( aNewANTLRHashMap );
-}
-
-+(id)newANTLRHashMapWithLen:(NSInteger)aBuffSize
-{
-    ANTLRHashMap *aNewANTLRHashMap;
-    
-    aNewANTLRHashMap = [[ANTLRHashMap alloc] initWithLen:aBuffSize];
-	return( aNewANTLRHashMap );
-}
-
--(id)init
-{
-    NSInteger idx;
-    
-	if ((self = [super init]) != nil) {
-		fNext = nil;
-        BuffSize = HASHSIZE;
-		Scope = 0;
-		if ( fNext != nil ) {
-			Scope = ((ANTLRHashMap *)fNext)->Scope+1;
-			for( idx = 0; idx < BuffSize; idx++ ) {
-				ptrBuffer[idx] = ((ANTLRHashMap *)fNext)->ptrBuffer[idx];
-			}
-		}
-        mode = 0;
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)aBuffSize
-{
-    NSInteger idx;
-    
-	if ((self = [super init]) != nil) {
-		fNext = nil;
-        BuffSize = aBuffSize;
-		Scope = 0;
-		if ( fNext != nil ) {
-			Scope = ((ANTLRHashMap *)fNext)->Scope+1;
-			for( idx = 0; idx < BuffSize; idx++ ) {
-				ptrBuffer[idx] = ((ANTLRHashMap *)fNext)->ptrBuffer[idx];
-			}
-		}
-        mode = 0;
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-    ANTLRMapElement *tmp, *rtmp;
-    NSInteger idx;
-	
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp && tmp != [((ANTLRHashMap *)fNext) getptrBufferEntry:idx] ) {
-                rtmp = tmp;
-                // tmp = [tmp getfNext];
-                tmp = (ANTLRMapElement *)tmp.fNext;
-                [rtmp dealloc];
-            }
-        }
-    }
-	[super dealloc];
-}
-
-- (NSInteger)count
-{
-    id anElement;
-    NSInteger aCnt = 0;
-    
-    for (NSInteger i = 0; i < BuffSize; i++) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aCnt++;
-        }
-    }
-    return aCnt;
-}
-                          
-- (NSInteger) size
-{
-    id anElement;
-    NSInteger aSize = 0;
-    
-    for (NSInteger i = 0; i < BuffSize; i++) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aSize += sizeof(id);
-        }
-    }
-    return aSize;
-}
-                                  
-                                  
--(void)deleteANTLRHashMap:(ANTLRMapElement *)np
-{
-    ANTLRMapElement *tmp, *rtmp;
-    NSInteger idx;
-    
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp && tmp != (ANTLRLinkBase *)[((ANTLRHashMap *)fNext) getptrBufferEntry:idx] ) {
-                rtmp = tmp;
-                tmp = [tmp getfNext];
-                [rtmp dealloc];
-            }
-        }
-    }
-}
-
--(ANTLRHashMap *)PushScope:(ANTLRHashMap **)map
-{
-    NSInteger idx;
-    ANTLRHashMap *htmp;
-    
-    htmp = [ANTLRHashMap newANTLRHashMap];
-    if ( *map != nil ) {
-        ((ANTLRHashMap *)htmp)->fNext = *map;
-        [htmp setScope:[((ANTLRHashMap *)htmp->fNext) getScope]+1];
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            htmp->ptrBuffer[idx] = ((ANTLRHashMap *)htmp->fNext)->ptrBuffer[idx];
-        }
-    }
-    //    gScopeLevel++;
-    *map = htmp;
-    return( htmp );
-}
-
--(ANTLRHashMap *)PopScope:(ANTLRHashMap **)map
-{
-    NSInteger idx;
-    ANTLRMapElement *tmp;
-	ANTLRHashMap *htmp;
-    
-    htmp = *map;
-    if ( (*map)->fNext != nil ) {
-        *map = (ANTLRHashMap *)htmp->fNext;
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            if ( htmp->ptrBuffer[idx] == nil ||
-                htmp->ptrBuffer[idx] == (*map)->ptrBuffer[idx] ) {
-                break;
-            }
-            tmp = htmp->ptrBuffer[idx];
-            /*
-             * must deal with parms, locals and labels at some point
-             * can not forget the debuggers
-             */
-            htmp->ptrBuffer[idx] = [tmp getfNext];
-            [ tmp dealloc];
-        }
-        *map = (ANTLRHashMap *)htmp->fNext;
-        //        gScopeLevel--;
-    }
-    return( htmp );
-}
-
-#ifdef USERDOC
-/*
- *  HASH        hash entry to get index to table
- *  NSInteger hash( ANTLRHashMap *self, char *s );
- *
- *     Inputs:  char *s             string to find
- *
- *     Returns: NSInteger                 hashed value
- *
- *  Last Revision 9/03/90
- */
-#endif
--(NSInteger)hash:(NSString *)s       /*    form hash value for string s */
-{
-	NSInteger hashval;
-	const char *tmp;
-    
-	tmp = [s cStringUsingEncoding:NSASCIIStringEncoding];
-	for( hashval = 0; *tmp != '\0'; )
-        hashval += *tmp++;
-	self->LastHash = hashval % BuffSize;
-	return( self->LastHash );
-}
-
-#ifdef USERDOC
-/*
- *  FINDSCOPE  search hashed list for entry
- *  ANTLRHashMap *findscope( ANTLRHashMap *self, NSInteger scope );
- *
- *     Inputs:  NSInteger       scope -- scope level to find
- *
- *     Returns: ANTLRHashMap   pointer to ptrBuffer of proper scope level
- *
- *  Last Revision 9/03/90
- */
-#endif
--(ANTLRHashMap *)findscope:(NSInteger)scope
-{
-    if ( self->Scope == scope ) {
-        return( self );
-    }
-    else if ( fNext ) {
-        return( [((ANTLRHashMap *)fNext) findscope:scope] );
-    }
-    return( nil );              /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  LOOKUP  search hashed list for entry
- *  ANTLRMapElement *lookup( ANTLRHashMap *self, char *s, NSInteger scope );
- *
- *     Inputs:  char     *s          string to find
- *
- *     Returns: ANTLRMapElement  *           pointer to entry
- *
- *  Last Revision 9/03/90
- */
-#endif
--(id)lookup:(NSString *)s Scope:(NSInteger)scope
-{
-    ANTLRMapElement *np;
-    
-    for( np = self->ptrBuffer[[self hash:s]]; np != nil; np = [np getfNext] ) {
-        if ( [s isEqualToString:[np getName]] ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  INSTALL search hashed list for entry
- *  NSInteger install( ANTLRHashMap *self, ANTLRMapElement *sym, NSInteger scope );
- *
- *     Inputs:  ANTLRMapElement    *sym   -- symbol ptr to install
- *              NSInteger         scope -- level to find
- *
- *     Returns: Boolean     TRUE   if installed
- *                          FALSE  if already in table
- *
- *  Last Revision 9/03/90
- */
-#endif
--(ANTLRMapElement *)install:(ANTLRMapElement *)sym Scope:(NSInteger)scope
-{
-    ANTLRMapElement *np;
-    
-    np = [self lookup:[sym getName] Scope:scope ];
-    if ( np == nil ) {
-        [sym retain];
-        [sym setFNext:self->ptrBuffer[ self->LastHash ]];
-        self->ptrBuffer[ self->LastHash ] = sym;
-        return( self->ptrBuffer[ self->LastHash ] );
-    }
-    return( nil );            /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  RemoveSym  search hashed list for entry
- *  NSInteger RemoveSym( ANTLRHashMap *self, char *s );
- *
- *     Inputs:  char     *s          string to find
- *
- *     Returns: NSInteger      indicator of SUCCESS OR FAILURE
- *
- *  Last Revision 9/03/90
- */
-#endif
--(NSInteger)RemoveSym:(NSString *)s
-{
-    ANTLRMapElement *np, *tmp;
-    NSInteger idx;
-    
-    idx = [self hash:s];
-    for ( tmp = self->ptrBuffer[idx], np = self->ptrBuffer[idx]; np != nil; np = [np getfNext] ) {
-        if ( [s isEqualToString:[np getName]] ) {
-            tmp = [np getfNext];             /* get the next link  */
-            [np dealloc];
-            return( SUCCESS );            /* report SUCCESS     */
-        }
-        tmp = [np getfNext];              //  BAD!!!!!!
-    }
-    return( FAILURE );                    /*   not found      */
-}
-
--(void)delete_chain:(ANTLRMapElement *)np
-{
-    if ( [np getfNext] != nil )
-		[self delete_chain:[np getfNext]];
-	[np dealloc];
-}
-
-#ifdef DONTUSEYET
--(NSInteger)bld_symtab:(KW_TABLE *)toknams
-{
-    NSInteger i;
-    ANTLRMapElement *np;
-    
-    for( i = 0; *(toknams[i].name) != '\0'; i++ ) {
-        // install symbol in ptrBuffer
-        np = [ANTLRMapElement newANTLRMapElement:[NSString stringWithFormat:@"%s", toknams[i].name]];
-        //        np->fType = toknams[i].toknum;
-        [self install:np Scope:0];
-    }
-    return( SUCCESS );
-}
-#endif
-
--(ANTLRMapElement *)getptrBufferEntry:(NSInteger)idx
-{
-	return( ptrBuffer[idx] );
-}
-
--(ANTLRMapElement **)getptrBuffer
-{
-	return( ptrBuffer );
-}
-
--(void)setptrBuffer:(ANTLRMapElement *)np Index:(NSInteger)idx
-{
-	if ( idx < BuffSize ) {
-        [np retain];
-		ptrBuffer[idx] = np;
-    }
-}
-
--(NSInteger)getScope
-{
-	return( Scope );
-}
-
--(void)setScopeScope:(NSInteger)i
-{
-	Scope = i;
-}
-
-- (ANTLRMapElement *)getTType:(NSString *)name
-{
-    return [self lookup:name Scope:0];
-}
-
-/*
- * works only for maplist indexed not by name but by TokenNumber
- */
-- (ANTLRMapElement *)getNameInList:(NSInteger)ttype
-{
-    ANTLRMapElement *np;
-    NSInteger aTType;
-
-    aTType = ttype % BuffSize;
-    for( np = self->ptrBuffer[ttype]; np != nil; np = [np getfNext] ) {
-        if ( [np.index integerValue] == ttype ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-- (ANTLRLinkBase *)getName:(NSString *)name
-{
-    return [self lookup:name Scope:0]; /*  nil if not found      */    
-}
-
-- (void)putNode:(NSString *)name TokenType:(NSInteger)ttype
-{
-    ANTLRMapElement *np;
-    
-    // install symbol in ptrBuffer
-    np = [ANTLRMapElement newANTLRMapElementWithName:[NSString stringWithString:name] Type:ttype];
-    //        np->fType = toknams[i].toknum;
-    [self install:np Scope:0];
-}
-
-- (NSInteger)getMode
-{
-    return mode;
-}
-
-- (void)setMode:(NSInteger)aMode
-{
-    mode = aMode;
-}
-
-- (void) addObject:(id)aRule
-{
-    NSInteger idx;
-
-    idx = [self count];
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-/* this may have to handle linking into the chain
- */
-- (void) insertObject:(id)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    if (aRule != ptrBuffer[idx]) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (id)objectAtIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    return ptrBuffer[idx];
-}
-
-/* this will never link into the chain
- */
-- (void) setObject:(id)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    if (aRule != ptrBuffer[idx]) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (void)putName:(NSString *)name Node:(id)aNode
-{
-    ANTLRMapElement *np;
-    
-    np = [self lookup:name Scope:0 ];
-    if ( np == nil ) {
-        np = [ANTLRMapElement newANTLRMapElementWithName:name Node:aNode];
-        if (ptrBuffer[LastHash] != nil)
-            [ptrBuffer[LastHash] release];
-        [np retain];
-        np.fNext = ptrBuffer[ LastHash ];
-        ptrBuffer[ LastHash ] = np;
-    }
-    return;    
-}
-
-- (NSEnumerator *)objectEnumerator
-{
-    NSEnumerator *anEnumerator;
-
-    itIndex = 0;
-    return anEnumerator;
-}
-
-- (BOOL)hasNext
-{
-    if (self && [self count] < BuffSize-1) {
-        return YES;
-    }
-    return NO;
-}
-
-- (ANTLRMapElement *)nextObject
-{
-    if (self && itIndex < BuffSize-1) {
-        return ptrBuffer[itIndex];
-    }
-    return nil;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashRule.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashRule.h
deleted file mode 100644
index f1558e8..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashRule.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//
-//  ANTLRHashRule.h
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuleMemo.h"
-#import "ANTLRPtrBuffer.h"
-
-#define GLOBAL_SCOPE       0
-#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRHashRule : ANTLRPtrBuffer {
-	//	ANTLRHashRule *fNext;
-    //    TStringPool *fPool;
-    NSInteger LastHash;
-    NSInteger mode;
-}
-
-//@property (copy) ANTLRHashRule *fNext;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
-
-// Contruction/Destruction
-+ (id)newANTLRHashRule;
-+ (id)newANTLRHashRuleWithLen:(NSInteger)aBuffSize;
-- (id)init;
-- (id)initWithLen:(NSInteger)aBuffSize;
-- (void)dealloc;
-
-- (NSInteger)count;
-- (NSInteger)length;
-- (NSInteger)size;
-
-// Instance Methods
-- (void)deleteANTLRHashRule:(ANTLRRuleMemo *)np;
-- (void)delete_chain:(ANTLRRuleMemo *)np;
-- (ANTLRRuleMemo **)getPtrBuffer;
-- (void)setPtrBuffer:(ANTLRRuleMemo **)np;
-- (NSNumber *)getRuleMemoStopIndex:(NSInteger)aStartIndex;
-- (void)putRuleMemoAtStartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex;
-- (NSInteger)getMode;
-- (void)setMode:(NSInteger)aMode;
-- (void) insertObject:(ANTLRRuleMemo *)aRule atIndex:(NSInteger)Index;
-- (ANTLRRuleMemo *) objectAtIndex:(NSInteger)Index;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashRule.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashRule.m
deleted file mode 100644
index 93ce3a1..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRHashRule.m
+++ /dev/null
@@ -1,281 +0,0 @@
-//
-//  ANTLRHashRule.m
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-#define ANTLR_MEMO_RULE_UNKNOWN -1
-
-#import "ANTLRHashRule.h"
-
-/*
- * Start of ANTLRHashRule
- */
-@implementation ANTLRHashRule
-
-@synthesize LastHash;
-
-+(id)newANTLRHashRule
-{
-    ANTLRHashRule *aNewANTLRHashRule;
-    
-    aNewANTLRHashRule = [[ANTLRHashRule alloc] init];
-	return( aNewANTLRHashRule );
-}
-
-+(id)newANTLRHashRuleWithLen:(NSInteger)aBuffSize
-{
-    ANTLRHashRule *aNewANTLRHashRule;
-    
-    aNewANTLRHashRule = [[ANTLRHashRule alloc] initWithLen:aBuffSize];
-	return( aNewANTLRHashRule );
-}
-
--(id)init
-{
-	if ((self = [super initWithLen:HASHSIZE]) != nil) {
-		fNext = nil;
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)aBuffSize
-{
-	if ((self = [super initWithLen:aBuffSize]) != nil) {
-		fNext = nil;
-        mode = 0;
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-    ANTLRRuleMemo *tmp, *rtmp;
-    int Index;
-	
-    if ( self.fNext != nil ) {
-        for( Index = 0; Index < BuffSize; Index++ ) {
-            tmp = ptrBuffer[Index];
-            while ( tmp && tmp != ptrBuffer[Index] ) {
-                rtmp = tmp;
-                // tmp = [tmp getfNext];
-                tmp = (ANTLRRuleMemo *)tmp.fNext;
-                [rtmp dealloc];
-            }
-        }
-    }
-	[super dealloc];
-}
-
-- (NSInteger)count
-{
-    id anElement;
-    NSInteger aCnt = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        anElement = ptrBuffer[i];
-        if ( anElement != nil ) {
-            aCnt++;
-        }
-    }
-    return aCnt;
-}
-                          
-- (NSInteger) length
-{
-    return BuffSize;
-}
-
-- (NSInteger) size
-{
-    id anElement;
-    NSInteger aSize = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aSize += sizeof(id);
-        }
-    }
-    return aSize;
-}
-                                  
-                                  
--(void)deleteANTLRHashRule:(ANTLRRuleMemo *)np
-{
-    ANTLRRuleMemo *tmp, *rtmp;
-    int Index;
-    
-    if ( self.fNext != nil ) {
-        for( Index = 0; Index < BuffSize; Index++ ) {
-            tmp = ptrBuffer[Index];
-            while ( tmp && tmp != ptrBuffer[Index ] ) {
-                rtmp = tmp;
-                tmp = tmp.fNext;
-                [rtmp dealloc];
-            }
-        }
-    }
-}
-
--(void)delete_chain:(ANTLRRuleMemo *)np
-{
-    if ( np.fNext != nil )
-		[self delete_chain:np.fNext];
-	[np dealloc];
-}
-
--(ANTLRRuleMemo **)getPtrBuffer
-{
-	return( ptrBuffer );
-}
-
--(void)setPtrBuffer:(ANTLRRuleMemo **)np
-{
-	ptrBuffer = np;
-}
-
-- (NSNumber *)getRuleMemoStopIndex:(NSInteger)aStartIndex
-{
-    ANTLRRuleMemo *aRule;
-    NSNumber *stopIndex;
-    NSInteger anIndex;
-    
-    anIndex = ( aStartIndex >= BuffSize ) ? aStartIndex %= BuffSize : aStartIndex;
-    if ((aRule = ptrBuffer[anIndex]) == nil) {
-        return nil;
-    }
-    stopIndex = [aRule getStopIndex:aStartIndex];
-    return stopIndex;
-}
-
-- (void)putRuleMemo:(ANTLRRuleMemo *)aRule AtStartIndex:(NSInteger)aStartIndex
-{
-    NSInteger anIndex;
-    
-    anIndex = (aStartIndex >= BuffSize) ? aStartIndex %= BuffSize : aStartIndex;
-    if ( ptrBuffer[anIndex] == nil ) {
-        ptrBuffer[anIndex] = aRule;
-        [aRule retain];
-    }
-    else {
-        do {
-            if ( [aRule.startIndex integerValue] == aStartIndex ) {
-                [aRule setStartIndex:aRule.stopIndex];
-                return;
-            }
-            aRule = aRule.fNext;
-        } while ( aRule != nil );
-    }
-}
-
-- (void)putRuleMemoAtStartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex
-{
-    ANTLRRuleMemo *aRule, *newRule;
-    NSInteger anIndex;
-    NSInteger aMatchIndex;
-
-    anIndex = (aStartIndex >= BuffSize) ? aStartIndex %= BuffSize : aStartIndex;
-    if ((aRule = ptrBuffer[anIndex]) == nil ) {
-        aRule = [ANTLRRuleMemo newANTLRRuleMemoWithStartIndex:[NSNumber numberWithInteger:aStartIndex]
-                                                    StopIndex:[NSNumber numberWithInteger:aStopIndex]];
-        [aRule retain];
-        ptrBuffer[anIndex] = aRule;
-    }
-    else {
-        aMatchIndex = [aRule.startIndex integerValue];
-        if ( aStartIndex > aMatchIndex ) {
-            if ( aRule != ptrBuffer[anIndex] ) {
-                [aRule retain];
-            }
-            aRule.fNext = ptrBuffer[anIndex];
-            ptrBuffer[anIndex] = aRule;
-            return;
-        }
-        while (aRule.fNext != nil) {
-            aMatchIndex = [((ANTLRRuleMemo *)aRule.fNext).startIndex integerValue];
-            if ( aStartIndex > aMatchIndex ) {
-                newRule = [ANTLRRuleMemo newANTLRRuleMemoWithStartIndex:[NSNumber numberWithInteger:aStartIndex]
-                                                              StopIndex:[NSNumber numberWithInteger:aStopIndex]];
-                [newRule retain];
-                newRule.fNext = aRule.fNext;
-                aRule.fNext = newRule;
-                return;
-            }
-            if ( aMatchIndex == aStartIndex ) {
-                [aRule setStartIndex:aRule.stopIndex];
-                return;
-            }
-            aRule = aRule.fNext;
-        }
-    }
-}
-
-- (NSInteger)getLastHash
-{
-    return LastHash;
-}
-
-- (void)setLastHash:(NSInteger)aHash
-{
-    LastHash = aHash;
-}
-
-- (NSInteger)getMode
-{
-    return mode;
-}
-
-- (void)setMode:(NSInteger)aMode
-{
-    mode = aMode;
-}
-
-- (void) insertObject:(ANTLRRuleMemo *)aRule atIndex:(NSInteger)anIndex
-{
-    NSInteger Index;
-    
-    Index = ( anIndex >= BuffSize ) ? anIndex %= BuffSize : anIndex;
-    if (aRule != ptrBuffer[Index]) {
-        if (ptrBuffer[Index] != nil) {
-            [ptrBuffer[Index] release];
-        }
-        [aRule retain];
-    }
-    ptrBuffer[Index] = aRule;
-}
-
-- (ANTLRRuleMemo *)objectAtIndex:(NSInteger)anIndex
-{
-    NSInteger anIdx;
-
-    anIdx = ( anIndex >= BuffSize ) ? anIndex %= BuffSize : anIndex;
-    return ptrBuffer[anIdx];
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRIntArray.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRIntArray.h
deleted file mode 100644
index 5269b23..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRIntArray.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-//  ANTLRIntArray.h
-//  ANTLR
-//
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-
-#define ANTLR_INT_ARRAY_INITIAL_SIZE 10
-
-@interface ANTLRIntArray : ANTLRPtrBuffer 
-{
-}
-
-+ (ANTLRIntArray *)newANTLRIntArray;
-+ (ANTLRIntArray *)newANTLRIntArrayWithLen:(NSInteger)aLen;
-
-- (id) init;
-- (id) initWithLen:(NSInteger)aLen;
-
-- (void) dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (void) addInteger:(NSInteger) v;
-- (void) push:(NSInteger) v;
-- (NSInteger) pop;
-- (NSInteger) integerAtIndex:(NSInteger) i;
-- (void) insertInteger:(NSInteger)anInteger AtIndex:(NSInteger) idx;
-- (NSInteger) size;
-- (void) reset;
-
-- (NSInteger) count;
-- (NSInteger) size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRIntStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRIntStream.h
deleted file mode 100755
index 3790cd9..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRIntStream.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-@protocol ANTLRIntStream < NSObject, NSCopying >
-
-- (void) consume;
-
-// Get unichar at current input pointer + i ahead where i=1 is next character as int for including ANTLRCharStreamEOF (-1) in the data range
-- (NSInteger) LA:(NSInteger) i;
-
-// Tell the stream to start buffering if it hasn't already.  Return
-// current input position, index(), or some other marker so that
-// when passed to rewind() you get back to the same spot.
-// rewind(mark()) should not affect the input cursor.
-// TODO: problem in that lexer stream returns not index but some marker 
-
-- (NSInteger) mark;
-
-// Return the current input symbol index 0..n where n indicates the
-// last symbol has been read.
-
-- (NSInteger) getIndex;
-
-// Reset the stream so that next call to index would return marker.
-// The marker will usually be -index but it doesn't have to be.  It's
-// just a marker to indicate what state the stream was in.  This is
-// essentially calling -release: and -seek:.  If there are markers
-// created after this marker argument, this routine must unroll them
-// like a stack.  Assume the state the stream was in when this marker
-// was created.
-
-- (void) rewind;
-- (void) rewind:(NSInteger) marker;
-
-// You may want to commit to a backtrack but don't want to force the
-// stream to keep bookkeeping objects around for a marker that is
-// no longer necessary.  This will have the same behavior as
-// rewind() except it releases resources without the backward seek.
-
-- (void) release:(NSInteger) marker;
-
-// Set the input cursor to the position indicated by index.  This is
-// normally used to seek ahead in the input stream.  No buffering is
-// required to do this unless you know your stream will use seek to
-// move backwards such as when backtracking.
-// This is different from rewind in its multi-directional
-// requirement and in that its argument is strictly an input cursor (index).
-//
-// For char streams, seeking forward must update the stream state such
-// as line number.  For seeking backwards, you will be presumably
-// backtracking using the mark/rewind mechanism that restores state and
-// so this method does not need to update state when seeking backwards.
-//
-// Currently, this method is only used for efficient backtracking, but
-// in the future it may be used for incremental parsing.
-
-- (void) seek:(NSInteger) index;
-
-/** Only makes sense for streams that buffer everything up probably, but
- *  might be useful to display the entire stream or for testing.  This
- *  value includes a single EOF.
- */
-- (NSUInteger) size;
-/** Where are you getting symbols from?  Normally, implementations will
- *  pass the buck all the way to the lexer who can ask its input stream
- *  for the file name or whatever.
- */
-- (NSString *)getSourceName;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLexer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLexer.h
deleted file mode 100755
index 5cfb36f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLexer.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenSource.h"
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRRecognizerSharedState.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRRecognitionException.h"
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRMismatchedRangeException.h"
-
-@interface ANTLRLexer : ANTLRBaseRecognizer <ANTLRTokenSource> {
-	id<ANTLRCharStream> input;      ///< The character stream we pull tokens out of.
-	NSUInteger ruleNestingLevel;
-}
-
-@property (retain, getter=getInput, setter=setInput:) id<ANTLRCharStream> input;
-@property (getter=getRuleNestingLevel, setter=setRuleNestingLevel) NSUInteger ruleNestingLevel;
-
-#pragma mark Initializer
-- (id) initWithCharStream:(id<ANTLRCharStream>) anInput;
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput State:(ANTLRRecognizerSharedState *)state;
-
-- (id) copyWithZone:(NSZone *)zone;
-
-- (void) reset;
-
-// - (ANTLRRecognizerSharedState *) state;
-
-#pragma mark Tokens
-- (id<ANTLRToken>)getToken;
-- (void) setToken: (id<ANTLRToken>) aToken;
-- (id<ANTLRToken>) nextToken;
-- (void) mTokens;		// abstract, defined in generated sources
-- (void) skip;
-- (id<ANTLRCharStream>) getInput;
-- (void) setInput:(id<ANTLRCharStream>)aCharStream;
-
-- (void) emit;
-- (void) emit:(id<ANTLRToken>)aToken;
-
-#pragma mark Matching
-- (void) matchString:(NSString *)aString;
-- (void) matchAny;
-- (void) matchChar:(unichar) aChar;
-- (void) matchRangeFromChar:(unichar)fromChar to:(unichar)toChar;
-
-#pragma mark Informational
-- (NSUInteger) getLine;
-- (NSUInteger) getCharPositionInLine;
-- (NSInteger) getIndex;
-- (NSString *) getText;
-- (void) setText:(NSString *) theText;
-
-// error handling
-- (void) reportError:(ANTLRRecognitionException *)e;
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(NSMutableArray *)tokenNames;
-- (NSString *)getCharErrorDisplay:(NSInteger)c;
-- (void) recover:(ANTLRRecognitionException *)e;
-- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLexerRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLexerRuleReturnScope.h
deleted file mode 100755
index 18ae374..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLexerRuleReturnScope.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-@interface ANTLRLexerRuleReturnScope : NSObject {
-	int startToken;
-	int stopToken;
-}
-
-- (NSInteger) getStart;
-- (void) setStart: (NSInteger) aStart;
-
-- (NSInteger) getStop;
-- (void) setStop: (NSInteger) aStop;
-
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLinkBase.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLinkBase.h
deleted file mode 100644
index 21019e6..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLinkBase.h
+++ /dev/null
@@ -1,74 +0,0 @@
-//
-//  ANTLRLinkBase.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/14/10.
-//  [The "BSD licence"]
-//  Copyright (c) 2010 Alan Condit
-//  All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-@protocol ANTLRLinkList <NSObject>
-
-+ (id<ANTLRLinkList>)newANTLRLinkBase;
-+ (id<ANTLRLinkList>)newANTLRLinkBase:(id<ANTLRLinkList>)np Prev:(id<ANTLRLinkList>)pp;
-
-- (void) dealloc;
-
-- (id<ANTLRLinkList>) append:(id<ANTLRLinkList>)node;
-- (id<ANTLRLinkList>) insert:(id<ANTLRLinkList>)node;
-
-- (id<ANTLRLinkList>) getfNext;
-- (void) setFNext:(id<ANTLRLinkList>)np;
-- (id<ANTLRLinkList>)getfPrev;
-- (void) setFPrev:(id<ANTLRLinkList>)pp;
-
-@end
-
-@interface ANTLRLinkBase : NSObject <ANTLRLinkList> {
-	id<ANTLRLinkList> fPrev;
-	id<ANTLRLinkList> fNext;
-}
-
-@property (retain, getter=getfPrev, setter=setFPrev:) id<ANTLRLinkList> fPrev;
-@property (retain, getter=getfNext, setter=setFNext:) id<ANTLRLinkList> fNext;
-
-+ (id<ANTLRLinkList>)newANTLRLinkBase;
-+ (id<ANTLRLinkList>)newANTLRLinkBase:(id<ANTLRLinkList>)np Prev:(id<ANTLRLinkList>)pp;
-- (id<ANTLRLinkList>)init;
-- (id<ANTLRLinkList>)initWithPtr:(id)np Prev:(id)pp;
-- (void)dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id<ANTLRLinkList>)append:(id<ANTLRLinkList>)node;
-- (id<ANTLRLinkList>)insert:(id<ANTLRLinkList>)node;
-
-- (id<ANTLRLinkList>)getfNext;
-- (void)setFNext:(id<ANTLRLinkList>) np;
-- (id<ANTLRLinkList>)getfPrev;
-- (void)setFPrev:(id<ANTLRLinkList>) pp;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLookaheadStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLookaheadStream.h
deleted file mode 100644
index ad48ff5..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRLookaheadStream.h
+++ /dev/null
@@ -1,73 +0,0 @@
-//
-//  ANTLRLookaheadStream.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-//  [The "BSD licence"]
-//  Copyright (c) 2010 Ian Michell 2010 Alan Condit
-//  All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRFastQueue.h"
-
-#define UNITIALIZED_EOF_ELEMENT_INDEX NSIntegerMax
-
-@interface ANTLRLookaheadStream : ANTLRFastQueue
-{
-	id eof;
-	NSInteger eofElementIndex;
-	NSInteger lastMarker;
-	NSInteger markDepth;
-}
-
-@property (readwrite, retain, getter=getEof, setter=setEof) id eof;
-@property (assign, getter=getEofElementIndex, setter=setEofElementIndex) NSInteger eofElementIndex;
-@property (assign, getter=getLastMarker, setter=setLastMarker) NSInteger lastMarker;
-@property (assign, getter=getMarkDepth, setter=setMarkDepth) NSInteger markDepth;
-
-- (id) initWithEOF:(id) o;
-- (id) nextElement;
-- (void) consume;
-- (void) sync:(NSInteger) need;
-- (void) fill:(NSInteger) n;
-- (id) LT:(NSInteger) i;
-- (id) LB:(NSInteger) i;
-- (id) currentSymbol;
-- (NSInteger) getIndex;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) seek:(NSInteger) i;
-- (id) getEof;
-- (void) setEof:(id) anID;
-- (NSInteger) getEofElementIndex;
-- (void) setEofElementIndex:(NSInteger) anInt;
-- (NSInteger) getLastMarker;
-- (void) setLastMarker:(NSInteger) anInt;
-- (NSInteger) getMarkDepth;
-- (void) setMarkDepth:(NSInteger) anInt;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMap.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMap.h
deleted file mode 100644
index 80ad486..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMap.h
+++ /dev/null
@@ -1,82 +0,0 @@
-//
-//  ANTLRMap.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-#import "ANTLRMapElement.h"
-
-//#define GLOBAL_SCOPE      0
-//#define LOCAL_SCOPE       1
-#define HASHSIZE            101
-#define HBUFSIZE            0x2000
-
-@interface ANTLRMap : ANTLRPtrBuffer {
-	//ANTLRMap *fNext; // found in superclass
-    // TStringPool *fPool;
-    NSInteger lastHash;
-}
-
-//@property (copy) ANTLRMap *fNext;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger lastHash;
-
-// Contruction/Destruction
-+ (id)newANTLRMap;
-+ (id)newANTLRMapWithLen:(NSInteger)aHashSize;
-
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-// Instance Methods
-- (NSInteger)count;
-- (NSInteger)length;
-- (NSInteger)size;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-/* form hash value for string s */
--(NSInteger)hash:(NSString *)s;
-/*   look for s in ptrBuffer  */
--(id)lookup:(NSString *)s;
-/* look for s in ptrBuffer  */
--(id)install:(ANTLRMapElement *)sym;
-/*
- * delete entry from list
- */
-- (void)deleteANTLRMap:(ANTLRMapElement *)np;
-- (NSInteger)RemoveSym:(NSString *)s;
-- (void)delete_chain:(ANTLRMapElement *)np;
-- (ANTLRMapElement *)getTType:(NSString *)name;
-- (ANTLRMapElement *)getName:(NSInteger)ttype;
-- (NSInteger)getNode:(ANTLRMapElement *)aNode;
-- (void)putNode:(NSInteger)aTType Node:(id)aNode;
-- (void)putName:(NSString *)name TType:(NSInteger)ttype;
-- (void)putName:(NSString *)name Node:(id)aNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMapElement.h
deleted file mode 100644
index e20d01c..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMapElement.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//
-//  ANTLRMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-
-@interface ANTLRMapElement : ANTLRBaseMapElement {
-    NSString *name;
-    id        node;
-}
-@property (retain, getter=getName, setter=setName:) NSString *name;
-@property (retain, getter=getNode, setter=setNode:) id node;
-
-+ (id) newANTLRMapElement;
-+ (id) newANTLRMapElementWithName:(NSString *)aName Type:(NSInteger)aTType;
-+ (id) newANTLRMapElementWithNode:(NSInteger)aTType Node:(id)aNode;
-+ (id) newANTLRMapElementWithName:(NSString *)aName Node:(id)aNode;
-+ (id) newANTLRMapElementWithObj1:(id)anObj1 Obj2:(id)anObj2;
-- (id) init;
-- (id) initWithName:(NSString *)aName Type:(NSInteger)aTType;
-- (id) initWithNode:(NSInteger)aTType Node:(id)aNode;
-- (id) initWithName:(NSString *)aName Node:(id)aNode;
-- (id) initWithObj1:(id)anObj1 Obj2:(id)anObj2;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSInteger) count;
-- (NSInteger) size;
-- (NSString *)getName;
-- (void)setName:(NSString *)aName;
-- (id)getNode;
-- (void)setNode:(id)aNode;
-- (void)putNode:(id)aNode;
-- (void)putNode:(id)aNode With:(NSInteger)uniqueID;
-//- (void)setObject:(id)aNode atIndex:anIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedNotSetException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedNotSetException.h
deleted file mode 100644
index 57391d5..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedNotSetException.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//
-//  ANTLRMismatchedNotSetException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/13/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRBitSet.h"
-
-@interface ANTLRMismatchedNotSetException : ANTLRRecognitionException
-{
-    ANTLRBitSet *expecting;
-}
-@property (retain, getter=getExpecting, setter=setExpecting) ANTLRBitSet *expecting;
-
-- (ANTLRMismatchedNotSetException *)newANTLRMismatchedNotSetException;
-- (ANTLRMismatchedNotSetException *)newANTLRMismatchedNotSetException:(id<ANTLRIntStream>)anInput
-                                                               Follow:(ANTLRBitSet *)expecting;
-
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)expecting;
-
-- (NSString *)toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedRangeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedRangeException.h
deleted file mode 100755
index abda3bb..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedRangeException.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRIntStream;
-
-
-@interface ANTLRMismatchedRangeException : ANTLRRecognitionException {
-	NSRange range;
-}
-
-+ (id) exceptionWithRange:(NSRange) aRange stream:(id<ANTLRIntStream>) theInput;
-- (id) initWithRange:(NSRange) aRange stream:(id<ANTLRIntStream>) theInput;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedSetException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedSetException.h
deleted file mode 100755
index 3bd45fc..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedSetException.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLRMismatchedSetException : ANTLRRecognitionException {
-	NSSet *expecting;
-}
-
-@property (retain, getter=getExpecting, setter=setExpecting:) NSSet *expecting;
-
-+ (id) exceptionWithSet:(NSSet *) theExpectedSet stream:(id<ANTLRIntStream>) theStream;
-- (id) initWithSet:(NSSet *) theExpectedSet stream:(id<ANTLRIntStream>) theStream;
-
-- (NSSet *) getExpecting;
-- (void) setExpecting: (NSSet *) anExpectedSet;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedTokenException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedTokenException.h
deleted file mode 100755
index 5e1d77d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedTokenException.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRBitSet.h"
-
-@protocol ANTLRIntStream;
-
-@interface ANTLRMismatchedTokenException : ANTLRRecognitionException {
-	NSInteger expecting;
-	unichar expectingChar;
-	BOOL isTokenType;
-}
-
-@property (assign, getter=getExpecting, setter=setExpecting:) NSInteger expecting;
-@property (assign, getter=getExpectingChar, setter=setExpectingChar:) unichar expectingChar;
-@property (assign, getter=getIsTokenType, setter=setIsTokenType:) BOOL isTokenType;
-
-+ (id) newANTLRMismatchedTokenException:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-+ (id) newANTLRMismatchedTokenExceptionMissing:(NSInteger)expectedTokenType
-                                        Stream:(id<ANTLRIntStream>)anInput
-                                         Token:(id<ANTLRToken>)inserted;
-+ (id) newANTLRMismatchedTokenExceptionChar:(unichar)expectedCharacter Stream:(id<ANTLRIntStream>)anInput;
-+ (id) newANTLRMismatchedTokenExceptionStream:(id<ANTLRIntStream>)anInput
-                                    Exception:(NSException *)e
-                                       Follow:(ANTLRBitSet *)follow;
-- (id) initWithTokenType:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
--(id) initWithTokenType:(NSInteger)expectedTokenType
-                 Stream:(id<ANTLRIntStream>)anInput
-                  Token:(id<ANTLRToken>)inserted;
-- (id) initWithCharacter:(unichar)expectedCharacter Stream:(id<ANTLRIntStream>)anInput;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedTreeNodeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedTreeNodeException.h
deleted file mode 100755
index b61ab51..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMismatchedTreeNodeException.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRIntStream;
-
-@interface ANTLRMismatchedTreeNodeException : ANTLRRecognitionException {
-	NSInteger expecting;
-}
-
-@property (getter=getExpecting, setter=setExpecting) NSInteger expecting;
-
-+ (id) newANTLRMismatchedTreeNodeException:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-- (id) initWithTokenType:(NSInteger) expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMissingTokenException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMissingTokenException.h
deleted file mode 100644
index 1398e25..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRMissingTokenException.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//
-//  ANTLRMissingTokenException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRToken.h"
-
-@interface ANTLRMissingTokenException : ANTLRMismatchedTokenException {
-    id<ANTLRToken> inserted;
-}
-/** Used for remote debugger deserialization */
-+ (id) newANTLRMissingTokenException;
-+ (id) newANTLRMissingTokenException:(NSInteger)expected
-                              Stream:(id<ANTLRIntStream>)anInput
-                                With:(id<ANTLRToken>)insertedToken;
-- (id) init;
-- (id) init:(NSInteger)expected Stream:(id<ANTLRIntStream>)anInput With:(id<ANTLRToken>)insertedToken;
-
-- (NSInteger) getMissingType;
-
-- (NSString *)toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRNoViableAltException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRNoViableAltException.h
deleted file mode 100755
index b71baff..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRNoViableAltException.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRIntStream.h"
-
-@interface ANTLRNoViableAltException : ANTLRRecognitionException {
-	int decisionNumber;
-	int stateNumber;
-}
-
-+ (ANTLRNoViableAltException *) newANTLRNoViableAltException:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<ANTLRIntStream>)theStream;
-- (ANTLRNoViableAltException *) initWithDecision:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<ANTLRIntStream>)theStream;
-
-- (void)setDecisionNumber:(NSInteger)decisionNumber;
-- (void)setStateNumber:(NSInteger)stateNumber;
-
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRNodeMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRNodeMapElement.h
deleted file mode 100644
index 1c0c916..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRNodeMapElement.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//
-//  ANTLRRuleMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-#import "ANTLRTree.h"
-
-@interface ANTLRNodeMapElement : ANTLRBaseMapElement {
-    id<ANTLRTree> node;
-}
-
-@property (retain, getter=getNode, setter=setNode:) id node;
-
-+ (void)initialize;
-
-+ (id) newANTLRNodeMapElement;
-+ (id) newANTLRNodeMapElementWithIndex:(id)anIndex Node:(id<ANTLRTree>)aNode;
-- (id) init;
-- (id) initWithAnIndex:(id)anIndex Node:(id)aNode;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id<ANTLRTree>)getNode;
-- (void)setNode:(id<ANTLRTree>)aNode;
-
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRParseTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRParseTree.h
deleted file mode 100644
index 92554e3..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRParseTree.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-//  ANTLRParseTree.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/12/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseTree.h"
-#import "ANTLRCommonToken.h"
-
-@interface ANTLRParseTree : ANTLRBaseTree {
-	id<ANTLRToken> payload;
-	NSMutableArray *hiddenTokens;
-}
-/** A record of the rules used to match a token sequence.  The tokens
- *  end up as the leaves of this tree and rule nodes are the interior nodes.
- *  This really adds no functionality, it is just an alias for CommonTree
- *  that is more meaningful (specific) and holds a String to display for a node.
- */
-+ (ANTLRParseTree *)newANTLRParseTree:(id<ANTLRToken>)label;
-- (id)initWithLabel:(id<ANTLRToken>)label;
-
-- (id<ANTLRTree>)dupNode;
-- (NSInteger)getType;
-- (NSString *)getText;
-- (NSInteger)getTokenStartIndex;
-- (void)setTokenStartIndex:(NSInteger)index;
-- (NSInteger)getTokenStopIndex;
-- (void)setTokenStopIndex:(NSInteger)index;
-- (NSString *)toString;
-- (NSString *)toStringWithHiddenTokens;
-- (NSString *)toInputString;
-- (void)_toStringLeaves:(NSMutableString *)buf;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRParser.h
deleted file mode 100755
index 5ddaf50..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRParser.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRTokenStream.h"
-
-@interface ANTLRParser : ANTLRBaseRecognizer {
-	id<ANTLRTokenStream> input;
-}
-+ (ANTLRParser *)newANTLRParser:(id<ANTLRTokenStream>)anInput;
-+ (ANTLRParser *)newANTLRParser:(id<ANTLRTokenStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream;
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream State:(ANTLRRecognizerSharedState *)aState;
-
-- (id<ANTLRTokenStream>) getInput;
-- (void) setInput: (id<ANTLRTokenStream>) anInput;
-
-- (void) reset;
-
-- (id) getCurrentInputSymbol:(id<ANTLRTokenStream>)anInput;
-- (ANTLRCommonToken *)getMissingSymbol:(id<ANTLRTokenStream>)input
-                             Exception:(ANTLRRecognitionException *)e
-                                 TType:(NSInteger)expectedTokenType
-                                BitSet:(ANTLRBitSet *)follow;
-- (void) setTokenStream:(id<ANTLRTokenStream>)anInput;
-- (id<ANTLRTokenStream>)getTokenStream;
-- (NSString *)getSourceName;
-
-- (void) traceIn:(NSString *)ruleName Index:(int)ruleIndex;
-- (void) traceOut:(NSString *)ruleName Index:(NSInteger) ruleIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRParserRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRParserRuleReturnScope.h
deleted file mode 100755
index aef3dd0..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRParserRuleReturnScope.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRRuleReturnScope.h"
-
-@interface ANTLRParserRuleReturnScope : ANTLRRuleReturnScope {
-	id<ANTLRToken> startToken;
-	id<ANTLRToken> stopToken;
-}
-@property (retain, getter=getStart, setter=setStart:) id<ANTLRToken> startToken;
-@property (retain, getter=getStop, setter=setStop:)   id<ANTLRToken> stopToken;
-
-- (id<ANTLRToken>) getStart;
-- (void) setStart: (id<ANTLRToken>) aStart;
-
-- (id<ANTLRToken>) getStop;
-- (void) setStop: (id<ANTLRToken>) aStop;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRPtrBuffer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRPtrBuffer.h
deleted file mode 100644
index 188f597..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRPtrBuffer.h
+++ /dev/null
@@ -1,91 +0,0 @@
-//
-//  ANTLRPtrBuffer.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define BUFFSIZE         101
-
-@interface ANTLRPtrBuffer : ANTLRLinkBase {
-	//ANTLRPtrBuffer *fNext;
-    NSInteger BuffSize;
-    NSMutableData *buffer;
-    id *ptrBuffer;
-    NSInteger count;
-    NSInteger ptr;
-}
-
-@property (getter=getBuffSize, setter=setBuffSize:) NSInteger BuffSize;
-@property (retain, getter=getBuffer, setter=setBuffer:) NSMutableData *buffer;
-@property (retain, getter=getPtrBuffer, setter=setPtrBuffer:) id *ptrBuffer;
-@property (getter=getCount, setter=setCount:) NSInteger count;
-@property (getter=getPtr, setter=setPtr:) NSInteger ptr;
-
-// Contruction/Destruction
-+(ANTLRPtrBuffer *)newANTLRPtrBuffer;
-+(ANTLRPtrBuffer *)newANTLRPtrBufferWithLen:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-
-- (NSInteger)count;
-- (NSInteger)length;
-- (NSInteger)size;
-
-- (NSMutableData *)getBuffer;
-- (void)setBuffer:(NSMutableData *)np;
-- (NSInteger)getCount;
-- (void)setCount:(NSInteger)aCount;
-- (id *)getPtrBuffer;
-- (void)setPtrBuffer:(id *)np;
-- (NSInteger)getPtr;
-- (void)setPtr:(NSInteger)np;
-
-- (void) push:(id) v;
-- (id) pop;
-- (id) peek;
-
-- (void) addObject:(id) v;
-- (void) addObjectsFromArray:(ANTLRPtrBuffer *)anArray;
-- (void) insertObject:(id)aRule atIndex:(NSInteger)idx;
-- (id)   objectAtIndex:(NSInteger)idx;
-- (void) removeAllObjects;
-
-- (void) ensureCapacity:(NSInteger) index;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRecognitionException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRecognitionException.h
deleted file mode 100755
index 853dc0e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRecognitionException.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuntimeException.h"
-#import "ANTLRToken.h"
-#import "ANTLRIntStream.h"
-#import "ANTLRTree.h"
-
-@interface ANTLRRecognitionException : ANTLRRuntimeException {
-	id<ANTLRIntStream> input;
-	NSInteger index;
-	id<ANTLRToken> token;
-	id<ANTLRTree> node;
-	unichar c;
-	NSInteger line;
-	NSInteger charPositionInLine;
-}
-
-@property (retain, getter=getStream, setter=setStream:) id<ANTLRIntStream> input;
-@property (retain, getter=getToken, setter=setToken:) id<ANTLRToken>token;
-@property (retain, getter=getNode, setter=setNode:) id<ANTLRTree>node;
-@property (getter=getLine, setter=setLine:) NSInteger line;
-@property (getter=getCharPositionInLine, setter=setCharPositionInLine:) NSInteger charPositionInLine;
-
-+ (ANTLRRecognitionException *) newANTLRRecognitionException;
-+ (ANTLRRecognitionException *) exceptionWithStream:(id<ANTLRIntStream>) anInputStream; 
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream reason:(NSString *)aReason;
-- (NSInteger) unexpectedType;
-- (id<ANTLRToken>)getUnexpectedToken;
-
-- (id<ANTLRIntStream>) getStream;
-- (void) setStream: (id<ANTLRIntStream>) aStream;
-
-- (id<ANTLRToken>) getToken;
-- (void) setToken: (id<ANTLRToken>) aToken;
-
-- (id<ANTLRTree>) getNode;
-- (void) setNode: (id<ANTLRTree>) aNode;
-
-- (NSString *)getMessage;
-
-- (NSInteger)getCharPositionInLine;
-- (void)setCharPositionInLine:(NSInteger)aPos;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRecognizerSharedState.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRecognizerSharedState.h
deleted file mode 100755
index 0430b79..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRecognizerSharedState.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRRuleStack.h"
-
-@interface ANTLRRecognizerSharedState : NSObject {
-	NSMutableArray *following;          // a stack of FOLLOW bitsets used for context sensitive prediction and recovery
-    NSInteger _fsp;                     // Follow stack pointer
-	BOOL errorRecovery;                 // are we recovering?
-	NSInteger lastErrorIndex;
-	BOOL failed;                        // indicate that some match failed
-    NSInteger syntaxErrors;
-	NSInteger backtracking;             // the level of backtracking
-	ANTLRRuleStack *ruleMemo;			// store previous results of matching rules so we don't have to do it again. Hook in incremental stuff here, too.
-
-	id<ANTLRToken> token;
-	NSInteger  tokenStartCharIndex;
-	NSUInteger tokenStartLine;
-	NSUInteger tokenStartCharPositionInLine;
-	NSUInteger channel;
-	NSUInteger type;
-	NSString   *text;
-}
-
-@property (retain, getter=getFollowing, setter=setFollowing:) NSMutableArray *following;
-@property (assign) NSInteger _fsp;
-@property (assign) BOOL errorRecovery;
-@property (assign) NSInteger lastErrorIndex;
-@property (assign, getter=getFailed, setter=setFailed:) BOOL failed;
-@property (assign) NSInteger syntaxErrors;
-@property (assign, getter=getBacktracking, setter=setBacktracking) NSInteger backtracking;
-@property (retain, getter=getRuleMemo, setter=setRuleMemo:) ANTLRRuleStack *ruleMemo;
-@property (copy, getter=getToken, setter=setToken) id<ANTLRToken> token;
-@property (getter=getType,setter=setType:) NSUInteger type;
-@property (getter=getChannel,setter=setChannel:) NSUInteger channel;
-@property (getter=getTokenStartLine,setter=setTokenStartLine:) NSUInteger tokenStartLine;
-@property (getter=getCharPositionInLine,setter=setCharPositionInLine:) NSUInteger tokenStartCharPositionInLine;
-@property (getter=getTokenStartCharIndex,setter=setTokenStartCharIndex:) NSInteger tokenStartCharIndex;
-@property (retain, getter=getText, setter=setText) NSString *text;
-
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedState;
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedStateWithRuleLen:(NSInteger)aLen;
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedState:(ANTLRRecognizerSharedState *)aState;
-
-- (id) init;
-- (id) initWithRuleLen:(NSInteger)aLen;
-- (id) initWithState:(ANTLRRecognizerSharedState *)state;
-
-- (id<ANTLRToken>) getToken;
-- (void) setToken:(id<ANTLRToken>) theToken;
-
-- (NSUInteger) getType;
-- (void) setType:(NSUInteger) theTokenType;
-
-- (NSUInteger) getChannel;
-- (void) setChannel:(NSUInteger) theChannel;
-
-- (NSUInteger) getTokenStartLine;
-- (void) setTokenStartLine:(NSUInteger) theTokenStartLine;
-
-- (NSUInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSUInteger) theCharPosition;
-
-- (NSInteger) getTokenStartCharIndex;
-- (void) setTokenStartCharIndex:(NSInteger) theTokenStartCharIndex;
-
-- (NSString *) getText;
-- (void) setText:(NSString *) theText;
-
-
-- (NSMutableArray *) getFollowing;
-- (void)setFollowing:(NSMutableArray *)aFollow;
-- (ANTLRRuleStack *) getRuleMemo;
-- (void)setRuleMemo:(ANTLRRuleStack *)aRuleMemo;
-- (BOOL) isErrorRecovery;
-- (void) setIsErrorRecovery: (BOOL) flag;
-
-- (BOOL) getFailed;
-- (void) setFailed: (BOOL) flag;
-
-- (NSInteger)  getBacktracking;
-- (void) setBacktracking:(NSInteger) value;
-- (void) increaseBacktracking;
-- (void) decreaseBacktracking;
-- (BOOL) isBacktracking;
-
-- (NSInteger) lastErrorIndex;
-- (void) setLastErrorIndex:(NSInteger) value;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRewriteRuleElementStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRewriteRuleElementStream.h
deleted file mode 100755
index 132a0cc..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRewriteRuleElementStream.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-
-typedef union {
-    id single;
-    NSMutableArray *multiple;
-} Elements;
-
-// TODO: this should be separated into stream and enumerator classes
-@interface ANTLRRewriteRuleElementStream : NSObject {
-    NSInteger cursor;
-    BOOL dirty;        ///< indicates whether the stream should return copies of its elements, set to true after a call to -reset
-    BOOL isSingleElement;
-    Elements elements;
-    
-    NSString *elementDescription;
-    id<ANTLRTreeAdaptor> treeAdaptor;
-}
-
-@property (assign, getter=GetCursor, setter=SetCursor:) NSInteger cursor;
-@property (assign, getter=Getdirty, setter=Setdirty:) BOOL dirty;
-@property (assign, getter=GetIsSingleElement, setter=SetIsSingleElement:) BOOL isSingleElement;
-@property (assign, getter=GetElement, setter=SetElement:) Elements elements;
-@property (assign, getter=GetElementDescription, setter=SetElementDescription:) NSString *elementDescription;
-@property (retain, getter=GetTreeAdaptor, setter=SetTreeAdaptor:) id<ANTLRTreeAdaptor> treeAdaptor;
-
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription;
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription
-                                                            element:(id)anElement;
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription
-                                                           elements:(NSArray *)theElements;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
-
-- (void)reset;
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor;
-
-- (void) addElement:(id)anElement;
-- (NSInteger) size;
- 
-- (BOOL) hasNext;
-- (id<ANTLRTree>) nextTree;
-- (id<ANTLRTree>) _next;       // internal: TODO: redesign if necessary. maybe delegate
-
-- (id) copyElement:(id)element;
-- (id) toTree:(id)element;
-
-- (NSString *) getDescription;
-- (void) setDescription:(NSString *)description;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRewriteRuleSubtreeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRewriteRuleSubtreeStream.h
deleted file mode 100755
index 1d18b24..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRewriteRuleSubtreeStream.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRewriteRuleElementStream.h"
-
-@interface ANTLRRewriteRuleSubtreeStream : ANTLRRewriteRuleElementStream {
-
-}
-
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription;
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription
-                                                             element:(id)anElement;
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription
-                                                            elements:(NSArray *)theElements;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
-
-- (id) nextNode;
-- (id) dup:(id)element;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRewriteRuleTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRewriteRuleTokenStream.h
deleted file mode 100755
index 3a516de..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRewriteRuleTokenStream.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRewriteRuleElementStream.h"
-
-
-@interface ANTLRRewriteRuleTokenStream : ANTLRRewriteRuleElementStream {
-
-}
-
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)anAdaptor
-                          description:(NSString *)elementDescription;
-/** Create a stream with one element */
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)adaptor
-                          description:(NSString *)elementDescription
-                              element:(id) oneElement;
-/** Create a stream, but feed off an existing list */
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)adaptor
-                          description:(NSString *)elementDescription
-                             elements:(NSMutableArray *)elements;
-
-- (id) init;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-               description:(NSString *)aDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor 
-               description:(NSString *)aDescription
-                   element:(id)element;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-               description:(NSString *)aDescription
-                  elements:(NSMutableArray *)elements;
-                               
-/** Get next token from stream and make a node for it */
-- (id) nextNode;
-
-- (id) nextToken;
-
-/** Don't convert to a tree unless they explicitly call nextTree.
- *  This way we can do hetero tree nodes in rewrite.
- */
-- (id<ANTLRTree>) toTree:(id<ANTLRToken>)element;
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleMapElement.h
deleted file mode 100644
index e040b18..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleMapElement.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-//  ANTLRRuleMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-
-@interface ANTLRRuleMapElement : ANTLRBaseMapElement {
-    NSNumber *ruleNum;
-}
-
-@property (retain, getter=getRuleNum, setter=setRuleNum:) NSNumber *ruleNum;
-
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElement;
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElementWithIndex:(NSNumber *)anIdx;
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElementWithIndex:(NSNumber *)anIdx RuleNum:(NSNumber *)aRuleNum;
-- (id) init;
-- (id) initWithAnIndex:(NSNumber *)anIdx;
-- (id) initWithAnIndex:(NSNumber *)anIdx RuleNum:(NSNumber *)aRuleNum;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSNumber *)getRuleNum;
-- (void)setRuleNum:(NSNumber *)aRuleNum;
-
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleMemo.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleMemo.h
deleted file mode 100644
index 63a5ae2..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleMemo.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-//  ANTLRRuleMemo.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-@interface ANTLRRuleMemo : ANTLRLinkBase {
-    NSNumber *startIndex;
-    NSNumber *stopIndex;
-}
-
-@property (retain, getter=getStartIndex, setter=setStartIndex) NSNumber *startIndex;
-@property (retain, getter=getStopIndex, setter=setStopIndex) NSNumber *stopIndex;
-
-+ (ANTLRRuleMemo *)newANTLRRuleMemo;
-+ (ANTLRRuleMemo *)newANTLRRuleMemoWithStartIndex:(NSNumber *)aStartIndex StopIndex:(NSNumber *)aStopIndex;
-
-- (id) init;
-- (id) initWithStartIndex:(NSNumber *)aStartIndex StopIndex:(NSNumber *)aStopIndex;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-- (ANTLRRuleMemo *)getRuleWithStartIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStartIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStopIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStartIndex;
-- (void)setStartIndex:(NSNumber *)aStartIndex;
-- (NSNumber *)getStopIndex;
-- (void)setStopIndex:(NSNumber *)aStopIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleReturnScope.h
deleted file mode 100644
index 4750c16..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleReturnScope.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-//  ANTLRRuleReturnScope.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-
-@interface ANTLRRuleReturnScope : NSObject <NSCopying> {
-
-}
-
-/** Return the start token or tree */
-- (id<ANTLRToken>) getStart;
-
-/** Return the stop token or tree */
-- (id<ANTLRToken>) getStop;
-
-/** Has a value potentially if output=AST; */
-- (id) getNode;
-
-/** Has a value potentially if output=template; Don't use StringTemplate
- *  type as it then causes a dependency with ST lib.
- */
-- (id) getTemplate;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleStack.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleStack.h
deleted file mode 100644
index 12d450b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleStack.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-//  ANTLRRuleStack.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseStack.h"
-#import "ANTLRHashRule.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRRuleStack : ANTLRBaseStack {
-}
-
-// Contruction/Destruction
-+(ANTLRRuleStack *)newANTLRRuleStack;
-+(ANTLRRuleStack *)newANTLRRuleStack:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-- (ANTLRHashRule *) pop;
-
-- (void) insertObject:(ANTLRHashRule *)aHashRule atIndex:(NSInteger)idx;
-- (ANTLRHashRule *)objectAtIndex:(NSInteger)idx;
-- (void)putHashRuleAtRuleIndex:(NSInteger)aRuleIndex StartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleStack.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleStack.m
deleted file mode 100644
index 909192f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuleStack.m
+++ /dev/null
@@ -1,147 +0,0 @@
-//
-//  ANTLRRuleStack.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRRuleStack.h"
-#import "ANTLRTree.h"
-
-/*
- * Start of ANTLRRuleStack
- */
-@implementation ANTLRRuleStack
-
-+ (ANTLRRuleStack *)newANTLRRuleStack
-{
-    return [[ANTLRRuleStack alloc] init];
-}
-
-+ (ANTLRRuleStack *)newANTLRRuleStack:(NSInteger)cnt
-{
-    return [[ANTLRRuleStack alloc] initWithLen:cnt];
-}
-
-- (id)init
-{
-	if ((self = [super init]) != nil) {
-	}
-    return( self );
-}
-
-- (id)initWithLen:(NSInteger)cnt
-{
-	if ((self = [super initWithLen:cnt]) != nil) {
-	}
-    return( self );
-}
-
-- (void)dealloc
-{
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    return [super copyWithZone:aZone];
-}
-
-- (NSInteger)count
-{
-    ANTLRRuleMemo *anElement;
-    NSInteger aCnt = 0;
-    for( int i = 0; i < BuffSize; i++ ) {
-        if ((anElement = ptrBuffer[i]) != nil)
-            aCnt++;
-    }
-    return aCnt;
-}
-
-- (NSInteger)size
-{
-    ANTLRRuleMemo *anElement;
-    NSInteger aSize = 0;
-    for( int i = 0; i < BuffSize; i++ ) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aSize++;
-        }
-    }
-    return aSize;
-}
-
-- (ANTLRHashRule *)pop
-{
-    return (ANTLRHashRule *)[super pop];
-}
-
-- (void) insertObject:(ANTLRHashRule *)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        NSLog( @"In ANTLRRuleStack attempting to insert aRule at Index %d, but Buffer is only %d long\n", idx, BuffSize );
-        [self ensureCapacity:idx];
-    }
-    if ( aRule != ptrBuffer[idx] ) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (ANTLRHashRule *)objectAtIndex:(NSInteger)idx
-{
-    if (idx < BuffSize) {
-        return ptrBuffer[idx];
-    }
-    return nil;
-}
-
-- (void)putHashRuleAtRuleIndex:(NSInteger)aRuleIndex StartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex
-{
-    ANTLRHashRule *aHashRule;
-    ANTLRRuleMemo *aRuleMemo;
-
-    if (aRuleIndex >= BuffSize) {
-        NSLog( @"putHashRuleAtRuleIndex attempting to insert aRule at Index %d, but Buffer is only %d long\n", aRuleIndex, BuffSize );
-        [self ensureCapacity:aRuleIndex];
-    }
-    if ((aHashRule = ptrBuffer[aRuleIndex]) == nil) {
-        aHashRule = [[ANTLRHashRule newANTLRHashRuleWithLen:17] retain];
-        ptrBuffer[aRuleIndex] = aHashRule;
-    }
-    if (( aRuleMemo = [aHashRule objectAtIndex:aStartIndex] ) == nil ) {
-        aRuleMemo = [[ANTLRRuleMemo newANTLRRuleMemo] retain];
-        [aHashRule insertObject:aRuleMemo atIndex:aStartIndex];
-    }
-    [aRuleMemo setStartIndex:[NSNumber numberWithInteger:aStartIndex]];
-    [aRuleMemo setStopIndex:[NSNumber numberWithInteger:aStopIndex]];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuntimeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuntimeException.h
deleted file mode 100644
index 6cf0918..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRRuntimeException.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-//  ANTLRRuntimeException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/5/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-@interface ANTLRRuntimeException : NSException
-{
-}
-
-+ (ANTLRRuntimeException *) newANTLRNoSuchElementException:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRIllegalArgumentException:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRRuntimeException:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRRuntimeException:(NSString *)aName reason:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRRuntimeException:(NSString *)aName reason:(NSString *)aReason userInfo:aUserInfo;
-
-- (id) init;
-- (id)initWithRuntime:(NSString *)aReason;
-- (id)initWithReason:(NSString *)aReason;
-- (id)initWithName:(NSString *)aName reason:(NSString *)aReason;
-- (id)initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-- (NSString *) Description;
-
-//    - (void)setDecisionNumber:(NSInteger)decisionNumber;
-//    - (void)setStateNumber:(NSInteger)stateNumber;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRStreamEnumerator.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRStreamEnumerator.h
deleted file mode 100644
index a0e0f69..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRStreamEnumerator.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-//  ANTLRStreamEnumertor.h
-//  ANTLR
-//
-//  Created by Ian Michell on 29/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRStreamEnumerator : NSEnumerator 
-{
-	NSInteger i;
-	id eof;
-	NSMutableArray *nodes;
-}
-
--(id) initWithNodes:(NSMutableArray *) n andEOF:(id) o;
--(BOOL) hasNext;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRStringStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRStringStream.h
deleted file mode 100755
index 2b13c7d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRStringStream.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCharStream.h"
-#import "ANTLRCharStreamState.h"
-#import "ANTLRPtrBuffer.h"
-
-@interface ANTLRStringStream : NSObject < ANTLRCharStream > {
-	NSString *data;
-	NSInteger n;
-	NSInteger p;
-	NSInteger line;
-	NSInteger charPositionInLine;
-	NSInteger markDepth;
-	ANTLRPtrBuffer *markers;
-	NSInteger lastMarker;
-	NSString *name;
-    ANTLRCharStreamState *charState;
-}
-
-@property (retain, getter=getData,setter=setData:) NSString *data;
-@property (getter=getP,setter=setP:) NSInteger p;
-@property (getter=getN,setter=setN:) NSInteger n;
-@property (getter=getLine,setter=setLine:) NSInteger line;
-@property (getter=getCharPositionInLine,setter=setCharPositionInLine:) NSInteger charPositionInLine;
-@property (getter=getMarkDepth,setter=setMarkDepth:) NSInteger markDepth;
-@property (retain, getter=getMarkers, setter=setMarkers:) ANTLRPtrBuffer *markers;
-@property (getter=getLastMarker,setter=setLastMarker:) NSInteger lastMarker;
-@property (retain, getter=getSourceName, setter=setSourceName:) NSString *name;
-@property (retain, getter=getCharState, setter=setCharState:) ANTLRCharStreamState *charState;
-
-+ newANTLRStringStream;
-
-+ newANTLRStringStream:(NSString *)aString;
-
-+ newANTLRStringStream:(char *)myData Count:(NSInteger)numBytes;
-
-- (id) init;
-
-// this initializer copies the string
-- (id) initWithString:(NSString *) theString;
-
-// This is the preferred constructor as no data is copied
-- (id) initWithStringNoCopy:(NSString *) theString;
-
-- (id) initWithData:(char *)myData Count:(NSInteger)numBytes;
-
-- (void) dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-// reset the stream's state, but keep the data to feed off
-- (void) reset;
-// consume one character from the stream
-- (void) consume;
-
-// look ahead i characters
-- (NSInteger) LA:(NSInteger) i;
-- (NSInteger) LT:(NSInteger) i;
-
-// returns the position of the current input symbol
-- (NSInteger) getIndex;
-// total length of the input data
-- (NSInteger) size;
-
-// seek and rewind in the stream
-- (NSInteger) mark;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) release:(NSInteger) marker;
-- (void) seek:(NSInteger) index;
-
-// provide the streams data (e.g. for tokens using indices)
-- (NSString *) substring:(NSInteger)startIndex To:(NSInteger)stopIndex;
-- (NSString *) substringWithRange:(NSRange) theRange;
-
-// used for tracking the current position in the input stream
-- (NSInteger) getLine;
-- (void) setLine:(NSInteger) theLine;
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger) thePos;
-
-- (NSInteger) getN;
-- (void) setN:(NSInteger)num;
-
-- (NSInteger) getP;
-- (void) setP:(NSInteger)num;
-
-- (ANTLRPtrBuffer *)getMarkers;
-- (void) setMarkers:(ANTLRPtrBuffer *)aMarkerList;
-
-- (NSString *)getSourceName;
-
-- (NSString *)toString;
-
-// accessors to the raw data of this stream
-- (NSString *) getData;
-- (void) setData: (NSString *) aData;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRSymbolStack.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRSymbolStack.h
deleted file mode 100644
index 169df9f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRSymbolStack.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//
-//  ANTLRSymbolStack.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseStack.h"
-// #import "ANTLRSymbolScope.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRSymbolsScope : NSObject
-{
-    
-}
-
-+ (ANTLRSymbolsScope *)newANTLRSymbolsScope;
-
-- (id)init;
-@end
-
-
-@interface ANTLRSymbolStack : ANTLRBaseStack {
-}
-
-// Contruction/Destruction
-+(ANTLRSymbolStack *)newANTLRSymbolStack;
-+(ANTLRSymbolStack *)newANTLRSymbolStackWithLen:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-
--(ANTLRSymbolsScope *)getHashMapEntry:(NSInteger)idx;
-
--(ANTLRSymbolsScope **)getHashMap;
-
--(ANTLRSymbolsScope *) pop;
-
-- (void) insertObject:(ANTLRSymbolsScope *)aScope atIndex:(NSInteger)idx;
-- (ANTLRSymbolsScope *)objectAtIndex:(NSInteger)idx;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRSymbolStack.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRSymbolStack.m
deleted file mode 100644
index 1dd6775..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRSymbolStack.m
+++ /dev/null
@@ -1,123 +0,0 @@
-//
-//  ANTLRSymbolStack.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRSymbolStack.h"
-#import "ANTLRTree.h"
-
-
-@implementation ANTLRSymbolsScope
-
-+ (ANTLRSymbolsScope *)newANTLRSymbolsScope
-{
-    return( [[ANTLRSymbolsScope alloc] init] );
-}
-
-- (id)init
-{
-    if ((self = [super init]) != nil) {
-    }
-    return (self);
-}
-
-@end
-
-/*
- * Start of ANTLRSymbolStack
- */
-@implementation ANTLRSymbolStack
-
-+(ANTLRSymbolStack *)newANTLRSymbolStack
-{
-    return [[ANTLRSymbolStack alloc] init];
-}
-
-+(ANTLRSymbolStack *)newANTLRSymbolStackWithLen:(NSInteger)cnt
-{
-    return [[ANTLRSymbolStack alloc] initWithLen:cnt];
-}
-
--(id)init
-{
-	if ((self = [super init]) != nil) {
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)cnt
-{
-	if ((self = [super initWithLen:cnt]) != nil) {
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    return [super copyWithZone:aZone];
-}
-
--(ANTLRSymbolsScope *)getHashMapEntry:(NSInteger)idx
-{
-	return( (ANTLRSymbolsScope *)[super objectAtIndex:idx] );
-}
-
--(ANTLRSymbolsScope **)getHashMap
-{
-	return( (ANTLRSymbolsScope **)ptrBuffer );
-}
-
--(ANTLRSymbolsScope *) pop
-{
-    return (ANTLRSymbolsScope *)[super pop];
-}
-
-- (void) insertObject:(ANTLRSymbolsScope *)aRule atIndex:(NSInteger)idx
-{
-    if (aRule != ptrBuffer[idx]) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (ANTLRSymbolsScope *)objectAtIndex:(NSInteger)idx
-{
-    return (ANTLRSymbolsScope *)[super objectAtIndex:idx];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRToken+DebuggerSupport.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRToken+DebuggerSupport.h
deleted file mode 100755
index 659e763..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRToken+DebuggerSupport.h
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-//  ANTLRToken+DebuggerSupport.h
-//  ANTLR
-//
-//  Created by Kay Röpke on 03.12.2006.
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-
-@interface ANTLRCommonToken(DebuggerSupport)
-
-- (NSString *)debuggerDescription;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRToken.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRToken.h
deleted file mode 100755
index 64524f0..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRToken.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-
-typedef enum {
-	ANTLRTokenTypeEOF = -1,
-	ANTLRTokenTypeInvalid,
-	ANTLRTokenTypeEOR,
-	ANTLRTokenTypeDOWN,
-	ANTLRTokenTypeUP,
-	ANTLRTokenTypeMIN
-} ANTLRTokenType;
-
-typedef enum {
-	ANTLRTokenChannelDefault = 0,
-    ANTLRTokenChannelHidden = 99
-} ANTLRTokenChannel;
-
-#define HIDDEN 99
-
-@protocol ANTLRToken < NSObject, NSCopying >
-
-// The singleton eofToken instance.
-+ (id<ANTLRToken>) eofToken;
-// The default channel for this class of Tokens
-+ (ANTLRTokenChannel) defaultChannel;
-
-// provide hooks to explicitely set the text as opposed to use the indices into the CharStream
-- (NSString *) getText;
-- (void) setText:(NSString *) theText;
-
-- (NSInteger) getType;
-- (void) setType: (NSInteger) aType;
-
-// ANTLR v3 provides automatic line and position tracking. Subclasses do not need to
-// override these, if they do not want to store line/pos tracking information
-- (NSUInteger) getLine;
-- (void) setLine: (NSUInteger) aLine;
-
-- (NSUInteger) getCharPositionInLine;
-- (void) setCharPositionInLine: (NSUInteger) aCharPositionInLine;
-
-// explicitely change the channel this Token is on. The default parser implementation
-// just sees the defaultChannel
-// Common idiom is to put whitespace tokens on channel 99.
-- (NSUInteger) getChannel;
-- (void) setChannel: (NSUInteger) aChannel;
-
-// the index of this Token into the TokenStream
-- (NSUInteger) getTokenIndex;
-- (void) setTokenIndex: (NSUInteger) aTokenIndex;
-- (NSString *)toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTokenRewriteStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTokenRewriteStream.h
deleted file mode 100644
index 0d8681f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTokenRewriteStream.h
+++ /dev/null
@@ -1,170 +0,0 @@
-//
-//  ANTLRTokenRewriteStream.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/19/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTokenStream.h"
-#import "ANTLRLinkBase.h"
-#import "ANTLRHashMap.h"
-#import "ANTLRMapElement.h"
-#import "ANTLRTokenSource.h"
-
-// Define the rewrite operation hierarchy
-
-@interface ANTLRRewriteOperation : ANTLRCommonTokenStream
-{
-/** What index into rewrites List are we? */
-NSInteger instructionIndex;
-/** Token buffer index. */
-NSInteger index;
-NSString *text;
-}
-
-@property (getter=getInstructionIndex, setter=setInstructionIndex:) NSInteger instructionIndex;
-@property (getter=getIndex, setter=setIndex:) NSInteger index;
-@property (retain, getter=getText, setter=setText:) NSString *text;
-
-+ (ANTLRRewriteOperation *) newANTLRRewriteOperation:(NSInteger)index Text:(NSString *)text;
-
-- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText;
-
-/** Execute the rewrite operation by possibly adding to the buffer.
- *  Return the index of the next token to operate on.
- */
-- (NSInteger) execute:(NSString *)buf;
-
-- (NSString *)toString;
-- (NSInteger) indexOf:(char)aChar inString:(NSString *)aString;
-@end
-
-@interface ANTLRInsertBeforeOp : ANTLRRewriteOperation {
-}
-
-+ (ANTLRInsertBeforeOp *) newANTLRInsertBeforeOp:(NSInteger)anIndex Text:(NSString *)theText;
-- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText;
-
-@end
-
-/** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
- *  instructions.
- */
-@interface ANTLRReplaceOp : ANTLRRewriteOperation {
-    NSInteger lastIndex;
-}
-
-@property (getter=getLastIndex, setter=setLastIndex:) NSInteger lastIndex;
-
-+ (ANTLRReplaceOp *) newANTLRReplaceOp:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString*)theText;
-- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-
-- (NSInteger) execute:(NSString *)buf;
-- (NSString *)toString;
-
-@end
-
-@interface ANTLRDeleteOp : ANTLRReplaceOp {
-}
-+ (ANTLRDeleteOp *) newANTLRDeleteOp:(NSInteger)from ToIndex:(NSInteger)to;
-
-- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to;
-
-- (NSString *)toString;
-
-@end
-
-
-@interface ANTLRTokenRewriteStream : ANTLRCommonTokenStream {
-/** You may have multiple, named streams of rewrite operations.
- *  I'm calling these things "programs."
- *  Maps String (name) -> rewrite (List)
- */
-ANTLRHashMap *programs;
-
-/** Map String (program name) -> Integer index */
-ANTLRHashMap *lastRewriteTokenIndexes;
-}
-
-@property (retain, getter=getPrograms, setter=setPrograms:) ANTLRHashMap *programs;
-@property (retain, getter=getLastRewriteTokenIndexes, setter=setLastRewriteTokenIndexes:) ANTLRHashMap *lastRewriteTokenIndexes;
-
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream;
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream:(id<ANTLRTokenSource>) aTokenSource;
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream:(id<ANTLRTokenSource>) aTokenSource Channel:(NSInteger)aChannel;
-
-- (id) init;
-- (id)initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource;
-- (id)initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource Channel:(NSInteger)aChannel;
-
-- (ANTLRHashMap *)getPrograms;
-- (void)setPrograms:(ANTLRHashMap *)aProgList;
-
-- (void) rollback:(NSInteger)instructionIndex;
-- (void) rollback:(NSString *)programName Index:(NSInteger)anInstructionIndex;
-- (void) deleteProgram;
-- (void) deleteProgram:(NSString *)programName;
-- (void) insertAfterToken:(id<ANTLRToken>)t Text:(NSString *)theText;
-- (void) insertAfterIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) insertAfterProgNam:(NSString *)programName Index:(NSInteger)anIndex Text:(NSString *)theText;
-
-
-- (void) insertBeforeToken:(id<ANTLRToken>)t Text:(NSString *)theText;
-- (void) insertBeforeIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) insertBeforeProgName:(NSString *)programName Index:(NSInteger)index Text:(NSString *)theText;
-- (void) replaceFromIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) replaceFromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-- (void) replaceFromToken:(id<ANTLRToken>)indexT Text:(NSString *)theText;
-- (void) replaceFromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to Text:(NSString *)theText;
-- (void) replaceProgNam:(NSString *)programName Token:(id<ANTLRToken>)from Token:(id<ANTLRToken>)to Text:(NSString *)theText;
-- (void) replaceProgNam:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-- (void) delete:(NSInteger)anIndex;
-- (void) delete:(NSInteger)from ToIndex:(NSInteger)to;
-- (void) deleteToken:(id<ANTLRToken>)indexT;
-- (void) deleteFromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to;
-- (void) delete:(NSString *)programName FromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to;
-- (void) delete:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to;
-- (NSInteger)getLastRewriteTokenIndex;
-- (NSInteger)getLastRewriteTokenIndex:(NSString *)programName;
-- (void)setLastRewriteTokenIndex:(NSString *)programName Index:(NSInteger)anInt;
-- (ANTLRHashMap *) getProgram:(NSString *)name;
-- (ANTLRHashMap *) initializeProgram:(NSString *)name;
-- (NSString *)toOriginalString;
-- (NSString *)toOriginalString:(NSInteger)start End:(NSInteger)end;
-- (NSString *)toString;
-- (NSString *)toString:(NSString *)programName;
-- (NSString *)toStringFromStart:(NSInteger)start ToEnd:(NSInteger)end;
-- (NSString *)toString:(NSString *)programName FromStart:(NSInteger)start ToEnd:(NSInteger)end;
-- (ANTLRHashMap *)reduceToSingleOperationPerIndex:(ANTLRHashMap *)rewrites;
-- (ANTLRHashMap *)getKindOfOps:(ANTLRHashMap *)rewrites KindOfClass:(Class)kind;
-- (ANTLRHashMap *)getKindOfOps:(ANTLRHashMap *)rewrites KindOfClass:(Class)kind Index:(NSInteger)before;
-- (NSString *)catOpText:(id)a PrevText:(id)b;
-- (NSMutableString *)toDebugString;
-- (NSMutableString *)toDebugStringFromStart:(NSInteger)start ToEnd:(NSInteger)end;
-                    
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTokenSource.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTokenSource.h
deleted file mode 100755
index 4d6b6ee..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTokenSource.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRToken.h"
-
-// Anything that responds to -nextToken can be treated as a lexer.
-// For instance this can be a flex lexer or a handwritten one or even
-// a proxy for a remotely running token source (database, lexer, whatever).
-@protocol ANTLRTokenSource <NSObject, NSCopying>
-
-- (id<ANTLRToken>) nextToken;
-- (NSString *)getSourceName;
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTokenStream.h
deleted file mode 100755
index c104578..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRIntStream.h"
-#import "ANTLRToken.h"
-
-@protocol ANTLRTokenStream < ANTLRIntStream >
-
-// Get Token at current input pointer + i ahead where i=1 is next Token.
-// i<0 indicates tokens in the past.  So -1 is previous token and -2 is
-// two tokens ago. LT:0 is undefined.  For i>=n, return Token.EOFToken.
-// Return null for LT:0 and any index that results in an absolute address
-// that is negative.
-
-- (id<ANTLRToken>) LT:(NSInteger) i;
-
-- (id<ANTLRToken>) getToken:(NSUInteger) i;
-
-- (id) getTokenSource;
-
-- (NSString *) toString;
-/** Return the text of all tokens from start to stop, inclusive.
- *  If the stream does not buffer all the tokens then it can just
- *  return "" or null;  Users should not access $ruleLabel.text in
- *  an action of course in that case.
- */
-- (NSString *)toStringFromStart:(NSInteger)startIdx ToEnd:(NSInteger)stopIdx;
-
-/** Because the user is not required to use a token with an index stored
- *  in it, we must provide a means for two token objects themselves to
- *  indicate the start/end location.  Most often this will just delegate
- *  to the other toString(int,int).  This is also parallel with
- *  the TreeNodeStream.toString(Object,Object).
- */
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTree.h
deleted file mode 100755
index f269b2d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTree.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-@protocol ANTLRTree < NSObject, NSCopying >
-
-//+ (id<ANTLRTree>) invalidNode;
-
-- (id<ANTLRTree>) getChild:(NSUInteger)index;
-- (NSUInteger) getChildCount;
-
-// Tree tracks parent and child index now > 3.0
-
-- (id<ANTLRTree>)getParent;
-
-- (void) setParent:(id<ANTLRTree>)t;
-
-/** Is there is a node above with token type ttype? */
-- (BOOL) hasAncestor:(NSInteger)ttype;
-
-/** Walk upwards and get first ancestor with this token type. */
-- (id<ANTLRTree>) getAncestor:(NSInteger) ttype;
-
-/** Return a list of all ancestors of this node.  The first node of
- *  list is the root and the last is the parent of this node.
- */
-- (NSMutableArray *) getAncestors;
-
-/** This node is what child index? 0..n-1 */
-- (NSInteger) getChildIndex;
-
-- (void) setChildIndex:(NSInteger) index;
-
-/** Set the parent and child index values for all children */
-- (void) freshenParentAndChildIndexes;
-
-/** Add t as a child to this node.  If t is null, do nothing.  If t
- *  is nil, add all children of t to this' children.
- */
-- (void) addChild:(id<ANTLRTree>) t;
-
-/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
-- (void) setChild:(NSInteger)i With:(id<ANTLRTree>) t;
-
-- (id) deleteChild:(NSInteger) i;
-
-/** Delete children from start to stop and replace with t even if t is
- *  a list (nil-root tree).  num of children can increase or decrease.
- *  For huge child lists, inserting children can force walking rest of
- *  children to set their childindex; could be slow.
- */
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id)t;	
-
-- (NSArray *) getChildren;
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChildren:(NSArray *) theChildren;
-//- (void) removeAllChildren;
-
-// Indicates the node is a nil node but may still have children, meaning
-// the tree is a flat list.
-
-- (BOOL) isNil;
-
-/**  What is the smallest token index (indexing from 0) for this node
- *   and its children?
- */
-- (NSInteger) getTokenStartIndex;
-
-- (void) setTokenStartIndex:(NSInteger) index;
-
-/**  What is the largest token index (indexing from 0) for this node
- *   and its children?
- */
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (id<ANTLRTree>) dupNode;
-
-- (NSString *) toString;
-
-#pragma mark Copying
-- (id) copyWithZone:(NSZone *)aZone;	// the children themselves are not copied here!
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-#pragma mark Tree Parser support
-- (NSInteger) getType;
-- (NSString *) getText;
-// In case we don't have a token payload, what is the line for errors?
-- (NSInteger) getLine;
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger)pos;
-
-#pragma mark Informational
-- (NSString *) treeDescription;
-- (NSString *) description;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeAdaptor.h
deleted file mode 100755
index e6579cf..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeAdaptor.h
+++ /dev/null
@@ -1,159 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRToken.h"
-#import "ANTLRBaseTree.h"
-#import "ANTLRTokenStream.h"
-
-#pragma warning tree/node diction is broken.
-
-@protocol ANTLRTreeAdaptor <NSObject, NSCopying>
-
-#pragma mark Construction
-
-+ (id<ANTLRTree>) newEmptyTree;
-
-- (id<ANTLRTree>) createTree:(id<ANTLRToken>)payload;
-
-#pragma mark ANTLRTreeAdaptor implementation
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)aNode;	// copies just the node
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree;	// copies the entire subtree, recursively
-
-/** Return a nil node (an empty but non-null node) that can hold
- *  a list of element as the children.  If you want a flat tree (a list)
- *  use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
- */
-- (id) emptyNode;
-
-/** Return a tree node representing an error.  This node records the
- *  tokens consumed during error recovery.  The start token indicates the
- *  input symbol at which the error was detected.  The stop token indicates
- *  the last symbol consumed during recovery.
- *
- *  You must specify the input stream so that the erroneous text can
- *  be packaged up in the error node.  The exception could be useful
- *  to some applications; default implementation stores ptr to it in
- *  the CommonErrorNode.
- *
- *  This only makes sense during token parsing, not tree parsing.
- *  Tree parsing should happen only when parsing and tree construction
- *  succeed.
- */
-- (id) errorNode:(id<ANTLRTokenStream>)anInput
-            From:(id<ANTLRToken>)aStartToken
-              To:(id<ANTLRToken>)aStopToken
-       Exception:(NSException *) e;
-
-/** Is tree considered a nil node used to make lists of child nodes? */
-- (BOOL) isNil:(id<ANTLRTree>)aTree;
-
-
-- (void) addChild:(id<ANTLRTree>)child toTree:(id<ANTLRTree>)aTree;
-
-/** If oldRoot is a nil root, just copy or move the children to newRoot.
- *  If not a nil root, make oldRoot a child of newRoot.
- *
- *    old=^(nil a b c), new=r yields ^(r a b c)
- *    old=^(a b c), new=r yields ^(r ^(a b c))
- *
- *  If newRoot is a nil-rooted single child tree, use the single
- *  child as the new root node.
- *
- *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
- *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
- *
- *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
- *
- *    old=null, new=r yields r
- *    old=null, new=^(nil r) yields ^(nil r)
- *
- *  Return newRoot.  Throw an exception if newRoot is not a
- *  simple node or nil root with a single child node--it must be a root
- *  node.  If newRoot is ^(nil x) return x as newRoot.
- *
- *  Be advised that it's ok for newRoot to point at oldRoot's
- *  children; i.e., you don't have to copy the list.  We are
- *  constructing these nodes so we should have this control for
- *  efficiency.
- */
-- (id) becomeRoot:(id<ANTLRTree>)newRoot old:(id<ANTLRTree>)oldRoot;
-
-- (id) rulePostProcessing:(id<ANTLRTree>)root;
-
-#pragma mark Rewrite Rules
-                           
-- (NSUInteger) getUniqueID:(id<ANTLRTree>)aNode;
-
-- (id<ANTLRTree>) createTree:(NSInteger)tokenType FromToken:(id<ANTLRToken>)fromToken;
-- (id<ANTLRTree>) createTree:(NSInteger)tokenType FromToken:(id<ANTLRToken>)fromToken Text:(NSString *)text;
-- (id<ANTLRTree>) createTree:(NSInteger)tokenType Text:(NSString *)text;
-
-#pragma mark Content
-
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)aNode;
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree;
-
-- (NSInteger) getType:(id<ANTLRTree>)aNode;
-- (void) setType:(id<ANTLRTree>)aNode Type:(NSInteger)tokenType;
-
-- (NSString *) getText:(id<ANTLRTree>)aNode;
-- (void) setText:(id<ANTLRTree>)aNode Text:(NSString *)tokenText;
-
-- (id<ANTLRToken>) getToken:(id<ANTLRTree>)t;
-
-- (void) setTokenBoundaries:(id<ANTLRTree>)aTree From:(id<ANTLRToken>)startToken To:(id<ANTLRToken>)stopToken;
-- (NSInteger) getTokenStartIndex:(id<ANTLRTree>)aTree;
-- (NSInteger) getTokenStopIndex:(id<ANTLRTree>)aTree;
-
-#pragma mark Navigation / Tree Parsing
-
-/** Get a child 0..n-1 node */
-- (id<ANTLRTree>) getChild:(id<ANTLRTree>)aNode At:(NSInteger) i;
-/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
-- (void) setChild:(id<ANTLRTree>)aTree At:(NSInteger)index Child:(id<ANTLRTree>)child;
-/** Remove ith child and shift children down from right. */
-- (id<ANTLRTree>) deleteChild:(id<ANTLRTree>)t Index:(NSInteger)index;
-
-/** How many children?  If 0, then this is a leaf node */
-- (NSInteger) getChildCount:(id<ANTLRTree>) aTree;
-
-/** Who is the parent node of this node; if null, implies node is root.
- *  If your node type doesn't handle this, it's ok but the tree rewrites
- *  in tree parsers need this functionality.
- */
-- (id<ANTLRTree>)getParent:(id<ANTLRTree>)t;
-- (void) setParent:(id<ANTLRTree>)t With:(id<ANTLRTree>)parent;
-
-/** What index is this node in the child list? Range: 0..n-1
- *  If your node type doesn't handle this, it's ok but the tree rewrites
- *  in tree parsers need this functionality.
- */
-- (NSInteger) getChildIndex:(id<ANTLRTree>)t;
-- (void) setChildIndex:(id<ANTLRTree>)t With:(NSInteger)index;
-
-- (void) replaceChildren:(id<ANTLRTree>)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id<ANTLRTree>)t;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeException.h
deleted file mode 100755
index 8ec5c45..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeException.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLRTreeException : ANTLRRecognitionException {
-	id<ANTLRTree> oldRoot;
-	id<ANTLRTree> newRoot;
-}
-
-+ (id) exceptionWithOldRoot:(id<ANTLRTree>)theOldRoot newRoot:(id<ANTLRTree>)theNewRoot stream:(id<ANTLRIntStream>)aStream;
-- (id) initWithOldRoot:(id<ANTLRTree>)theOldRoot newRoot:(id<ANTLRTree>)theNewRoot stream:(id<ANTLRIntStream>)aStream;
-
-- (void) setOldRoot:(id<ANTLRTree>)aTree;
-- (void) setNewRoot:(id<ANTLRTree>)aTree;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeIterator.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeIterator.h
deleted file mode 100644
index e6d5e71..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeIterator.h
+++ /dev/null
@@ -1,67 +0,0 @@
-//
-//  ANTLRTreeIterator.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRFastQueue.h"
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTree.h"
-
-@interface ANTLRTreeIterator : NSObject 
-{
-	id<ANTLRTreeAdaptor> adaptor;
-	id<ANTLRTree> root;
-	id<ANTLRTree> tree;
-	BOOL firstTime;
-	id<ANTLRTree> up;
-	id<ANTLRTree> down;
-	id<ANTLRTree> eof;
-	
-	ANTLRFastQueue *nodes;
-}
-
-@property(retain, readwrite) id<ANTLRTree> up;
-@property(retain, readwrite) id<ANTLRTree> down;
-@property(retain, readwrite) id<ANTLRTree> eof;
-
-+ newANTRLTreeIterator;
-+ (ANTLRTreeIterator *) newANTRLTreeIteratorWithAdaptor:(ANTLRCommonTreeAdaptor *)adaptor
-                                                andTree:(id<ANTLRTree>)tree;
-- (id) init;
-- (id) initWithTree:(id<ANTLRTree>) t;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>) a andTree:(id<ANTLRTree>) t;
-
-- (void) reset;
-- (BOOL) hasNext;
-- (id) nextObject;
-- (NSArray *) allObjects;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeNodeStream.h
deleted file mode 100755
index bf6342c..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeNodeStream.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRIntStream.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRCommonTreeAdaptor.h"
-
-@protocol ANTLRTreeNodeStream < ANTLRIntStream > 
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)theAdaptor Tree:(ANTLRCommonTree *)theTree;
-
-- (id) LT:(NSInteger)k;
-- (id) getTreeSource;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (id<ANTLRTokenStream>) getTokenStream; 
-- (void) setUniqueNavigationNodes:(BOOL)flag;
-
-- (id) getNode:(NSInteger) idx;
-
-- (NSString *) toStringFromNode:(id)startNode ToNode:(id)stopNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeParser.h
deleted file mode 100755
index e2f01ee..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeParser.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRTreeNodeStream.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRMismatchedTreeNodeException.h"
-
-@interface ANTLRTreeParser : ANTLRBaseRecognizer {
-	id<ANTLRTreeNodeStream> input;
-}
-
-@property (retain, getter=getInput, setter=setInput:) id<ANTLRTreeNodeStream> input;
-
-+ (id) newANTLRTreeParser:(id<ANTLRTreeNodeStream>)anInput;
-+ (id) newANTLRTreeParser:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)state;
-
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)theInput;
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)theInput
-                State:(ANTLRRecognizerSharedState *)state;
-
-
-- (id<ANTLRTreeNodeStream>)getInput;
-- (void) setInput:(id<ANTLRTreeNodeStream>)anInput;
-
-- (void) setTreeNodeStream:(id<ANTLRTreeNodeStream>) anInput;
-- (id<ANTLRTreeNodeStream>) getTreeNodeStream;
-
-- (NSString *)getSourceName;
-
-- (id) getCurrentInputSymbol:(id<ANTLRIntStream>) anInput;
-
-- (id) getMissingSymbol:(id<ANTLRIntStream>)input
-              Exception:(ANTLRRecognitionException *) e
-          ExpectedToken:(NSInteger) expectedTokenType
-                 BitSet:(ANTLRBitSet *)follow;
-
-/** Match '.' in tree parser has special meaning.  Skip node or
- *  entire tree if node has children.  If children, scan until
- *  corresponding UP node.
- */
-- (void) matchAny:(id<ANTLRIntStream>)ignore;
-
-/** We have DOWN/UP nodes in the stream that have no line info; override.
- *  plus we want to alter the exception type.  Don't try to recover
- *  from tree parser errors inline...
- */
-- (id) recoverFromMismatchedToken:(id<ANTLRIntStream>)anInput
-                             Type:(NSInteger)ttype
-                           Follow:(ANTLRBitSet *)follow;
-
-/** Prefix error message with the grammar name because message is
- *  always intended for the programmer because the parser built
- *  the input tree not the user.
- */
-- (NSString *)getErrorHeader:(ANTLRRecognitionException *)e;
-
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(NSArray *) tokenNames;
-
-- (void) traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-- (void) traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreePatternLexer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreePatternLexer.h
deleted file mode 100644
index f6059d3..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreePatternLexer.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//
-//  ANTLRTreePatternLexer.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-typedef enum {
-	ANTLRLexerTokenTypeEOF = -1,
-	ANTLRLexerTokenTypeInvalid,
-	ANTLRLexerTokenTypeBEGIN,
-	ANTLRLexerTokenTypeEND,
-	ANTLRLexerTokenTypeID,
-	ANTLRLexerTokenTypeARG,
-	ANTLRLexerTokenTypePERCENT,
-	ANTLRLexerTokenTypeCOLON,
-	ANTLRLexerTokenTypeDOT,
-} ANTLRLexerTokenType;
-
-
-@interface ANTLRTreePatternLexer : NSObject {
-
-/** The tree pattern to lex like "(A B C)" */
-NSString *pattern;
-    
-/** Index into input string */
-NSInteger p;
-    
-/** Current char */
-NSInteger c;
-    
-/** How long is the pattern in char? */
-NSInteger n;
-    
-/** Set when token type is ID or ARG (name mimics Java's StreamTokenizer) */
-NSMutableData *sval;
-char *data;
-    
-BOOL error;
-
-}
-
-@property (retain, getter=getPattern, setter=setPattern:) NSString *pattern;
-@property (getter=getP, setter=setP:) NSInteger p;
-@property (getter=getC, setter=setC:) NSInteger c;
-@property (getter=getN, setter=setN:) NSInteger n;
-@property (retain, getter=getSval, setter=setSval:) NSMutableData *sval;
-@property (assign, getter=getData, setter=setData:) char *data;
-@property (getter=getError, setter=setError) BOOL error;
-
-+ (ANTLRTreePatternLexer *)newANTLRTreePatternLexer:(NSString *)aPattern;
-- (id) init;
-- (id) initWithPattern:(NSString *)aPattern;
-- (NSInteger) nextToken;
-- (void) consume;
-- (NSString *)toString;
-
-- (NSMutableData *)getSval;
-- (void) setSval:(NSMutableData *)aSval;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreePatternParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreePatternParser.h
deleted file mode 100644
index f6d6dc6..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreePatternParser.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//
-//  ANTLRTreePatternParser.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreePatternLexer.h"
-#import "ANTLRTreeWizard.h"
-#import "ANTLRTreeAdaptor.h"
-
-@interface ANTLRTreePatternParser : NSObject {
-
-ANTLRTreePatternLexer *tokenizer;
-NSInteger ttype;
-ANTLRTreeWizard *wizard;
-id<ANTLRTreeAdaptor> adaptor;
-    
-}
-
-+ (ANTLRTreePatternParser *)newANTLRTreePatternParser:(ANTLRTreePatternLexer *)aTokenizer
-                                               Wizard:(ANTLRTreeWizard *)aWizard
-                                              Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (id) init;
-- (id) initWithTokenizer:(ANTLRTreePatternLexer *)tokenizer
-                  Wizard:(ANTLRTreeWizard *)aWizard
-                 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (id<ANTLRTree>) pattern;
-- (id<ANTLRTree>) parseTree;
-- (id<ANTLRTree>) parseNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeRewriter.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeRewriter.h
deleted file mode 100644
index aee873e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeRewriter.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//
-//  ANTLRTreeRewriter.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeParser.h"
-
-@interface ANTLRfptr : NSObject {
-    id  actor;
-    SEL ruleSEL;
-}
-
-+ (ANTLRfptr *)newANTLRfptrWithRule:(SEL)aRuleAction withObject:(id)anObject;
--initWithRule:(SEL)ruleAction withObject:(id)anObject;
-
-- (id)rule;
-
-@end
-
-@interface ANTLRTreeRewriter : ANTLRTreeParser {
-    BOOL showTransformations;
-    id<ANTLRTokenStream> originalTokenStream;
-    id<ANTLRTreeAdaptor> originalAdaptor;
-    ANTLRfptr *rule;
-    ANTLRfptr *topdown_fptr;
-    ANTLRfptr *bottomup_ftpr;
-}
-
-+ (ANTLRTreeRewriter *) newANTLRTreeRewriter:(id<ANTLRTreeNodeStream>)anInput;
-+ (ANTLRTreeRewriter *) newANTLRTreeRewriter:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-- (id)initWithStream:(id<ANTLRTreeNodeStream>)anInput;
-- (id)initWithStream:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-- (ANTLRTreeRewriter *) applyOnce:(id<ANTLRTree>)t Rule:(ANTLRfptr *)whichRule;
-- (ANTLRTreeRewriter *) applyRepeatedly:(id<ANTLRTree>)t Rule:(ANTLRfptr *)whichRule;
-- (ANTLRTreeRewriter *) downup:(id<ANTLRTree>)t;
-- (ANTLRTreeRewriter *) pre:(id<ANTLRTree>)t;
-- (ANTLRTreeRewriter *) post:(id<ANTLRTree>)t;
-- (ANTLRTreeRewriter *) downup:(id<ANTLRTree>)t XForm:(BOOL)aShowTransformations;
-- (void)reportTransformation:(id<ANTLRTree>)oldTree Tree:(id<ANTLRTree>)newTree;
-- (ANTLRTreeRewriter *) topdown_fptr;
-- (ANTLRTreeRewriter *) bottomup_ftpr;
-- (ANTLRTreeRewriter *) topdown;
-- (ANTLRTreeRewriter *) bottomup;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeRuleReturnScope.h
deleted file mode 100644
index ea8a487..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeRuleReturnScope.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//
-//  ANTLRTreeRuleReturnScope.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuleReturnScope.h"
-#import "ANTLRCommonTree.h"
-
-@interface ANTLRTreeRuleReturnScope : ANTLRRuleReturnScope {
-    ANTLRCommonTree *startNode;
-}
-
-@property (retain, getter=getStart, setter=setStart:) ANTLRCommonTree *startNode;
-
-/** First node or root node of tree matched for this rule. */
-
-- (ANTLRCommonTree *)getStart;
-- (void)setStart:(ANTLRCommonTree *)aStartNode;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeVisitor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeVisitor.h
deleted file mode 100644
index 1f167bb..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeVisitor.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-//  ANTLRTreeVisitor.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTreeVisitorAction.h"
-
-@interface ANTLRTreeVisitor : NSObject {
-   id<ANTLRTreeAdaptor> adaptor;
-}
-+ (ANTLRTreeVisitor *)newANTLRTreeVisitor:(id<ANTLRTreeAdaptor>) anAdaptor;
-+ (ANTLRTreeVisitor *)newANTLRTreeVisitor;
-- (id)init;
-- (id)initWithAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (ANTLRTreeVisitor *)visit:(id<ANTLRTree>)t Action:(ANTLRTreeVisitorAction *)action;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeVisitorAction.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeVisitorAction.h
deleted file mode 100644
index c9c0856..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeVisitorAction.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//
-//  ANTLRTreeVisitorAction.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRTreeVisitorAction : NSObject
-{
-
-}
-
-+ (ANTLRTreeVisitorAction *)newANTLRTreeVisitorAction;
-- (id) init;
-
-/** Execute an action before visiting children of t.  Return t or
- *  a rewritten t.  It is up to the visitor to decide what to do
- *  with the return value.  Children of returned value will be
- *  visited if using TreeVisitor.visit().
- */
-- (ANTLRTreeVisitorAction *)pre:(ANTLRTreeVisitorAction *) t;
-
-/** Execute an action after visiting children of t.  Return t or
- *  a rewritten t.  It is up to the visitor to decide what to do
- *  with the return value.
- */
-- (ANTLRTreeVisitorAction *)post:(ANTLRTreeVisitorAction *) t;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeWizard.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeWizard.h
deleted file mode 100644
index d952572..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRTreeWizard.h
+++ /dev/null
@@ -1,134 +0,0 @@
-//
-//  ANTLRTreeWizard.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRMapElement.h"
-#import "ANTLRMap.h"
-
-@class ANTLRVisitor;
-
-@protocol ANTLRContextVisitor <NSObject>
-// TODO: should this be called visit or something else?
-- (void) visit:(id<ANTLRTree>)t Parent:(id<ANTLRTree>)parent ChildIndex:(NSInteger)childIndex Map:(ANTLRMap *)labels;
-
-@end
-
-@interface ANTLRVisitor : NSObject <ANTLRContextVisitor> {
-    NSInteger action;
-    id actor;
-    id object1;
-    id object2;
-}
-+ (ANTLRVisitor *)newANTLRVisitor:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2;
-- (id) initWithAction:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2;
-
-- (void) visit:(id<ANTLRTree>)t;
-- (void) visit:(id<ANTLRTree>)t Parent:(id<ANTLRTree>)parent ChildIndex:(NSInteger)childIndex Map:(ANTLRMap *)labels;
-
-@end
-
-/** When using %label:TOKENNAME in a tree for parse(), we must
- *  track the label.
- */
-@interface ANTLRTreePattern : ANTLRCommonTree {
-    NSString *label;
-    BOOL      hasTextArg;
-}
-@property (retain, getter=getLabel, setter=setLabel:) NSString *label;
-@property (assign, getter=getHasTextArg, setter=setHasTextArg:) BOOL hasTextArg;
-
-+ (ANTLRTreePattern *)newANTLRTreePattern:(id<ANTLRToken>)payload;
-
-- (id) initWithToken:(id<ANTLRToken>)payload;
-- (NSString *)toString;
-@end
-
-@interface ANTLRWildcardTreePattern : ANTLRTreePattern {
-}
-
-+ (ANTLRWildcardTreePattern *)newANTLRWildcardTreePattern:(id<ANTLRToken>)payload;
-- (id) initWithToken:(id<ANTLRToken>)payload;
-@end
-
-/** This adaptor creates TreePattern objects for use during scan() */
-@interface ANTLRTreePatternTreeAdaptor : ANTLRCommonTreeAdaptor {
-}
-+ (ANTLRTreePatternTreeAdaptor *)newTreeAdaptor;
-#ifdef DONTUSENOMO
-+ (ANTLRTreePatternTreeAdaptor *)newTreeAdaptor:(id<ANTLRToken>)payload;
-#endif
-- (id) init;
-#ifdef DONTUSENOMO
-- initWithToken:(id<ANTLRToken>)payload;
-#endif
-- (id<ANTLRTree>)createTreePattern:(id<ANTLRToken>)payload;
-
-@end
-
-@interface ANTLRTreeWizard : NSObject {
-	id<ANTLRTreeAdaptor> adaptor;
-	ANTLRMap *tokenNameToTypeMap;
-}
-+ (ANTLRTreeWizard *) newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)anAdaptor;
-+ (ANTLRTreeWizard *)newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)adaptor Map:(ANTLRMap *)aTokenNameToTypeMap;
-+ (ANTLRTreeWizard *)newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)adaptor TokenNames:(NSArray *)theTokNams;
-+ (ANTLRTreeWizard *)newANTLRTreeWizardWithTokenNames:(NSArray *)theTokNams;
-- (id) init;
-- (id) initWithAdaptor:(id<ANTLRTreeAdaptor>)adaptor;
-- (id) initWithAdaptor:(id<ANTLRTreeAdaptor>)adaptor Map:(ANTLRMap *)tokenNameToTypeMap;
-- (id) initWithTokenNames:(NSArray *)theTokNams;
-- (id) initWithTokenNames:(id<ANTLRTreeAdaptor>)anAdaptor TokenNames:(NSArray *)theTokNams;
-- (ANTLRMap *)computeTokenTypes:(NSArray *)theTokNams;
-- (NSInteger)getTokenType:(NSString *)tokenName;
-- (ANTLRMap *)index:(id<ANTLRTree>)t;
-- (void) _index:(id<ANTLRTree>)t Map:(ANTLRMap *)m;
-- (NSMutableArray *)find:(id<ANTLRTree>) t Pattern:(NSString *)pattern;
-- (ANTLRTreeWizard *)findFirst:(id<ANTLRTree>) t Type:(NSInteger)ttype;
-- (ANTLRTreeWizard *)findFirst:(id<ANTLRTree>) t Pattern:(NSString *)pattern;
-- (void) visit:(id<ANTLRTree>)t Type:(NSInteger)ttype Visitor:(ANTLRVisitor *)visitor;
-- (void) _visit:(id<ANTLRTree>)t
-         Parent:(id<ANTLRTree>)parent
-     ChildIndex:(NSInteger)childIndex
-           Type:(NSInteger)ttype
-        Visitor:(ANTLRVisitor *)visitor;
-- (void)visit:(id<ANTLRTree>)t Pattern:(NSString *)pattern Visitor:(ANTLRVisitor *)visitor;
-- (BOOL)parse:(id<ANTLRTree>)t Pattern:(NSString *)pattern Map:(ANTLRMap *)labels;
-- (BOOL) parse:(id<ANTLRTree>) t Pattern:(NSString *)pattern;
-- (BOOL) _parse:(id<ANTLRTree>)t1 Pattern:(ANTLRTreePattern *)tpattern Map:(ANTLRMap *)labels;
-- (id<ANTLRTree>) createTree:(NSString *)pattern;
-- (BOOL)equals:(id)t1 O2:(id)t2 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (BOOL)equals:(id)t1 O2:(id)t2;
-- (BOOL) _equals:(id)t1 O2:(id)t2 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUnbufferedCommonTreeNodeStreamState.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUnbufferedCommonTreeNodeStreamState.h
deleted file mode 100755
index 9e79d86..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUnbufferedCommonTreeNodeStreamState.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-
-@interface ANTLRUnbufferedCommonTreeNodeStreamState : NSObject {
-	ANTLRCommonTree *currentNode;
-	ANTLRCommonTree *previousNode;
-
-	int currentChildIndex;
-	int absoluteNodeIndex;
-	unsigned int nodeStackSize;
-	unsigned int indexStackSize;
-	
-	NSMutableArray *lookahead;
-}
-
-- (ANTLRCommonTree *) currentNode;
-- (void) setCurrentNode: (ANTLRCommonTree *) aCurrentNode;
-
-- (ANTLRCommonTree *) previousNode;
-- (void) setPreviousNode: (ANTLRCommonTree *) aPreviousNode;
-
-- (NSInteger) currentChildIndex;
-- (void) setCurrentChildIndex: (NSInteger) aCurrentChildIndex;
-
-- (NSInteger) absoluteNodeIndex;
-- (void) setAbsoluteNodeIndex: (NSInteger) anAbsoluteNodeIndex;
-
-- (NSUInteger) nodeStackSize;
-- (void) setNodeStackSize: (NSUInteger) aNodeStackSize;
-
-- (NSUInteger) indexStackSize;
-- (void) setIndexStackSize: (NSUInteger) anIndexStackSize;
-
-- (NSMutableArray *) lookahead;
-- (void) setLookahead: (NSMutableArray *) aLookahead;
-
-- (void) addToLookahead: (id)lookaheadObject;
-- (void) removeFromLookahead: (id)lookaheadObject;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUnbufferedTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUnbufferedTokenStream.h
deleted file mode 100644
index e4f8630..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUnbufferedTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-//
-//  ANTLRUnbufferedTokenStream.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/12/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuntimeException.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRToken.h"
-
-@interface ANTLRUnbufferedTokenStream : ANTLRLookaheadStream {
-	id<ANTLRTokenSource> tokenSource;
-    NSInteger tokenIndex; // simple counter to set token index in tokens
-    NSInteger channel;
-}
-
-@property (retain, getter=getTokenSource, setter=setTokenSource:) id<ANTLRTokenSource> tokenSource;
-@property (getter=getTokenIndex, setter=setTokenIndex) NSInteger tokenIndex;
-@property (getter=getChannel, setter=setChannel:) NSInteger channel;
-
-+ (ANTLRUnbufferedTokenStream *)newANTLRUnbufferedTokenStream:(id<ANTLRTokenSource>)aTokenSource;
-- (id) init;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource;
-
-- (id<ANTLRToken>)nextElement;
-- (BOOL)isEOF:(id<ANTLRToken>) aToken;
-- (id<ANTLRTokenSource>)getTokenSource;
-- (NSString *)toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *)toStringFromToken:(id<ANTLRToken>)aStart ToEnd:(id<ANTLRToken>)aStop;
-- (NSInteger)LA:(NSInteger)anIdx;
-- (id<ANTLRToken>)objectAtIndex:(NSInteger)anIdx;
-- (NSString *)getSourceName;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUniqueIDMap.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUniqueIDMap.h
deleted file mode 100644
index a805bd5..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUniqueIDMap.h
+++ /dev/null
@@ -1,64 +0,0 @@
-//
-//  ANTLRUniqueIDMap.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/7/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-#import "ANTLRNodeMapElement.h"
-
-#define SUCCESS             0
-#define FAILURE             -1
-#define HASHSIZE            101
-#define HBUFSIZE            0x2000
-
-@interface ANTLRUniqueIDMap : ANTLRPtrBuffer {
-    NSInteger lastHash;
-}
-
-@property (getter=getLastHash, setter=setLastHash) NSInteger lastHash;
-
-+ (id)newANTLRUniqueIDMap;
-+ (id)newANTLRUniqueIDMapWithLen:(NSInteger)aHashSize;
-
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-// Instance Methods
-- (NSInteger)count;
-- (NSInteger)size;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-
-- (void)deleteANTLRUniqueIDMap:(ANTLRNodeMapElement *)np;
-- (void)delete_chain:(ANTLRNodeMapElement *)np;
-- (id)getNode:(id<ANTLRTree>)aNode;
-- (void)putID:(id)anID Node:(id<ANTLRTree>)aNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUnwantedTokenException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUnwantedTokenException.h
deleted file mode 100644
index 2945bfe..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ANTLRUnwantedTokenException.h
+++ /dev/null
@@ -1,47 +0,0 @@
-//
-//  ANTLRUnwantedTokenException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRMismatchedTokenException.h"
-
-@interface ANTLRUnwantedTokenException : ANTLRMismatchedTokenException {
-
-}
-+ (ANTLRUnwantedTokenException *)newANTLRUnwantedTokenException;
-+ (ANTLRUnwantedTokenException *)newANTLRUnwantedTokenException:(NSInteger)expected Stream:(id<ANTLRIntStream>)anInput;
-
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInput And:(NSInteger)expected;
-- (id<ANTLRToken>)getUnexpectedToken;
-- (NSString *)toString;
-                     
-    
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Resources/English.lproj/InfoPlist.strings b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Resources/English.lproj/InfoPlist.strings
deleted file mode 100644
index fa1b75f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Resources/English.lproj/InfoPlist.strings
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Resources/Info.plist b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Resources/Info.plist
deleted file mode 100644
index 24436a3..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Resources/Info.plist
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
-	<key>CFBundleDevelopmentRegion</key>
-	<string>English</string>
-	<key>CFBundleExecutable</key>
-	<string>ANTLR</string>
-	<key>CFBundleIdentifier</key>
-	<string>org.antlr.antlrframework</string>
-	<key>CFBundleInfoDictionaryVersion</key>
-	<string>6.0</string>
-	<key>CFBundleName</key>
-	<string>ANTLR</string>
-	<key>CFBundlePackageType</key>
-	<string>FMWK</string>
-	<key>CFBundleSignature</key>
-	<string>????</string>
-	<key>CFBundleVersion</key>
-	<string>1.0</string>
-</dict>
-</plist>
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/ANTLR b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/ANTLR
deleted file mode 100755
index 67c1d3a..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/ANTLR
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLR.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLR.h
deleted file mode 100755
index 671e783..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLR.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRTreeException.h>
-
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseMapElement.h
deleted file mode 100644
index b9100ac..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseMapElement.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-//  ANTLRBaseMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-@interface ANTLRBaseMapElement : ANTLRLinkBase {
-    NSNumber *index;
-}
-
-@property (retain, getter=getIndex, setter=setIndex:) NSNumber *index;
-
-+ (id) newANTLRBaseMapElement;
-+ (id) newANTLRBaseMapElementWithIndex:(NSNumber *)anIdx;
-- (id) init;
-- (id) initWithAnIndex:(NSNumber *)anIdx;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSNumber *)getIndex;
-- (void)setIndex:(NSNumber *)anIdx;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseRecognizer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseRecognizer.h
deleted file mode 100755
index 1a922bd..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseRecognizer.h
+++ /dev/null
@@ -1,183 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import <Foundation/Foundation.h>
-
-#import "ANTLRIntStream.h"
-
-// This is an abstract superclass for lexers and parsers.
-
-#define ANTLR_MEMO_RULE_FAILED -2
-#define ANTLR_MEMO_RULE_UNKNOWN -1
-#define ANTLR_INITIAL_FOLLOW_STACK_SIZE 100
-
-#import "ANTLRMapElement.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRToken.h"
-#import "ANTLRRecognizerSharedState.h"
-#import "ANTLRRecognitionException.h"
-#import "ANTLRMissingTokenException.h"
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRMismatchedTreeNodeException.h"
-#import "ANTLRUnwantedTokenException.h"
-#import "ANTLRNoViableAltException.h"
-#import "ANTLREarlyExitException.h"
-#import "ANTLRMismatchedSetException.h"
-#import "ANTLRMismatchedNotSetException.h"
-#import "ANTLRFailedPredicateException.h"
-
-@interface ANTLRBaseRecognizer : NSObject {
-	ANTLRRecognizerSharedState *state;	// the state of this recognizer. Might be shared with other recognizers, e.g. in grammar import scenarios.
-	NSString *grammarFileName;			// where did the grammar come from. filled in by codegeneration
-//    BOOL failed;
-    NSString *sourceName;
-//    NSInteger numberOfSyntaxErrors;
-    NSArray *tokenNames;
-}
-
-@property (retain, getter=getState, setter=setState) ANTLRRecognizerSharedState *state;
-@property (retain, getter=getGrammarFileName, setter=setGrammarFileName) NSString *grammarFileName;
-//@property (assign, getter=getFailed, setter=setFailed) BOOL failed;
-@property (retain, getter=getTokenNames, setter=setTokenNames) NSArray *tokenNames;
-@property (retain, getter=getSourceName, setter=setSourceName) NSString *sourceName;
-//@property (assign, getter=getNumberOfSyntaxErrors, setter=setNumberOfSyntaxErrors) NSInteger numberOfSyntaxErrors;
-
-+ (void) initialize;
-
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizer;
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizerWithRuleLen:(NSInteger)aLen;
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizer:(ANTLRRecognizerSharedState *)aState;
-
-+ (NSArray *)getTokenNames;
-+ (void)setTokenNames:(NSArray *)aTokNamArray;
-+ (void)setGrammarFileName:(NSString *)aFileName;
-
-- (id) init;
-- (id) initWithLen:(NSInteger)aLen;
-- (id) initWithState:(ANTLRRecognizerSharedState *)aState;
-
-- (void) dealloc;
-
-// simple accessors
-- (NSInteger) getBacktrackingLevel;
-- (void) setBacktrackingLevel:(NSInteger) level;
-
-- (BOOL) getFailed;
-- (void) setFailed: (BOOL) flag;
-
-- (ANTLRRecognizerSharedState *) getState;
-- (void) setState:(ANTLRRecognizerSharedState *) theState;
-
-// reset this recognizer - might be extended by codegeneration/grammar
-- (void) reset;
-
-/** Match needs to return the current input symbol, which gets put
- *  into the label for the associated token ref; e.g., x=ID.  Token
- *  and tree parsers need to return different objects. Rather than test
- *  for input stream type or change the IntStream interface, I use
- *  a simple method to ask the recognizer to tell me what the current
- *  input symbol is.
- * 
- *  This is ignored for lexers.
- */
-- (id) getInput;
-
-- (void)skip;
-
-// do actual matching of tokens/characters
-- (id) match:(id<ANTLRIntStream>)anInput TokenType:(NSInteger)ttype Follow:(ANTLRBitSet *)follow;
-- (void) matchAny:(id<ANTLRIntStream>)anInput;
-- (BOOL) mismatchIsUnwantedToken:(id<ANTLRIntStream>)anInput TokenType:(NSInteger) ttype;
-- (BOOL) mismatchIsMissingToken:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)follow;
-
-// error reporting and recovery
-- (void) reportError:(ANTLRRecognitionException *)e;
-- (void) displayRecognitionError:(NSArray *)theTokNams Exception:(ANTLRRecognitionException *)e;
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(NSArray *)theTokNams;
-- (NSInteger) getNumberOfSyntaxErrors;
-- (NSString *)getErrorHeader:(ANTLRRecognitionException *)e;
-- (NSString *)getTokenErrorDisplay:(id<ANTLRToken>)t;
-- (void) emitErrorMessage:(NSString *)msg;
-- (void) recover:(id<ANTLRIntStream>)anInput Exception:(ANTLRRecognitionException *)e;
-
-// begin hooks for debugger
-- (void) beginResync;
-- (void) endResync;
-// end hooks for debugger
-
-// compute the bitsets necessary to do matching and recovery
-- (ANTLRBitSet *)computeErrorRecoverySet;
-- (ANTLRBitSet *)computeContextSensitiveRuleFOLLOW;
-- (ANTLRBitSet *)combineFollows:(BOOL) exact;
-
-- (id<ANTLRToken>) recoverFromMismatchedToken:(id<ANTLRIntStream>)anInput 
-                                    TokenType:(NSInteger)ttype 
-                                       Follow:(ANTLRBitSet *)follow;
-                                    
-- (id<ANTLRToken>)recoverFromMismatchedSet:(id<ANTLRIntStream>)anInput
-                                    Exception:(ANTLRRecognitionException *)e
-                                    Follow:(ANTLRBitSet *)follow;
-
-- (id) getCurrentInputSymbol:(id<ANTLRIntStream>)anInput;
-- (id) getMissingSymbol:(id<ANTLRIntStream>)anInput
-              Exception:(ANTLRRecognitionException *)e
-              TokenType:(NSInteger) expectedTokenType
-                Follow:(ANTLRBitSet *)follow;
-
-// helper methods for recovery. try to resync somewhere
-- (void) consumeUntilTType:(id<ANTLRIntStream>)anInput TokenType:(NSInteger)ttype;
-- (void) consumeUntilFollow:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)bitSet;
-- (void) pushFollow:(ANTLRBitSet *)fset;
-- (ANTLRBitSet *)popFollow;
-
-// to be used by the debugger to do reporting. maybe hook in incremental stuff here, too.
-- (NSMutableArray *) getRuleInvocationStack;
-- (NSMutableArray *) getRuleInvocationStack:(ANTLRRecognitionException *)exception
-					             Recognizer:(NSString *)recognizerClassName;
-
-- (NSArray *) getTokenNames;
-- (NSString *)getGrammarFileName;
-- (NSString *)getSourceName;
-- (NSMutableArray *) toStrings:(NSArray *)tokens;
-// support for memoization
-- (NSInteger) getRuleMemoization:(NSInteger)ruleIndex StartIndex:(NSInteger)ruleStartIndex;
-- (BOOL) alreadyParsedRule:(id<ANTLRIntStream>)anInput RuleIndex:(NSInteger)ruleIndex;
-- (void) memoize:(id<ANTLRIntStream>)anInput
-	     RuleIndex:(NSInteger)ruleIndex
-	    StartIndex:(NSInteger)ruleStartIndex;
-- (NSInteger) getRuleMemoizationCacheSize;
-- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol;
-- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol;
-
-
-// support for syntactic predicates. these are called indirectly to support funky stuff in grammars,
-// like supplying selectors instead of writing code directly into the actions of the grammar.
-- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment;
-// stream:(id<ANTLRIntStream>)anInput;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseStack.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseStack.h
deleted file mode 100644
index 5069031..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseStack.h
+++ /dev/null
@@ -1,66 +0,0 @@
-//
-//  ANTLRBaseRecognizer.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRBaseStack : ANTLRPtrBuffer {
-	//ANTLRRuleStack *fNext;
-    // TStringPool *fPool;
-    NSInteger LastHash;
-}
-
-//@property (copy) ANTLRRuleStack *fNext;
-@property (getter=getLastHash, setter=setLastHash) NSInteger LastHash;
-
-// Contruction/Destruction
-+ (ANTLRBaseStack *)newANTLRBaseStack;
-+ (ANTLRBaseStack *)newANTLRBaseStackWithLen:(NSInteger)cnt;
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSInteger)count;
-- (NSInteger)size;
-/* clear -- reinitialize the maplist array */
-
-- (NSInteger)getLastHash;
-- (void)setLastHash:(NSInteger)aVal;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseTree.h
deleted file mode 100755
index 96513f8..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseTree.h
+++ /dev/null
@@ -1,199 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTree.h"
-
-@protocol ANTLRBaseTree <ANTLRTree>
-
-@property (retain, getter=getChildren, setter=setChildren) NSMutableArray *children;
-
-+ (id<ANTLRBaseTree>) newANTLRBaseTree;
-+ (id<ANTLRBaseTree>) newANTLRBaseTree:(id<ANTLRBaseTree>)node;
-
-- (id<ANTLRBaseTree>) init;
-- (id<ANTLRBaseTree>) initWith:(id<ANTLRTree>)node;
-
-- (id<ANTLRBaseTree>) getChild:(NSUInteger)i;
-- (NSMutableArray *)getChildren;
-- (void) setChildren:(NSMutableArray *)anArray;
-- (id<ANTLRBaseTree>)getFirstChildWithType:(NSInteger)type;
-- (NSUInteger) getChildCount;
-
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChild:(id<ANTLRTree>) tree;
-- (void) addChildren:(NSArray *) theChildren;
-//- (void) removeAllChildren;
-
-- (void) setChild:(NSInteger) i With:(id<ANTLRTree>)t;
-- (id) deleteChild:(NSInteger) i;
-- (NSMutableArray *) createChildrenList;
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-// Indicates the node is a nil node but may still have children, meaning
-// the tree is a flat list.
-
-- (BOOL) isNil;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex:(NSInteger) index;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (void) freshenParentAndChildIndexes;
-- (void) freshenParentAndChildIndexes:(NSInteger) offset;
-- (void) sanityCheckParentAndChildIndexes;
-- (void) sanityCheckParentAndChildIndexes:(id<ANTLRTree>) parent At:(NSInteger) i;
-
-- (NSInteger) getChildIndex;
-- (void) setChildIndex:(NSInteger)i;
-
-- (id<ANTLRTree>)getAncestor:(NSInteger)ttype;
-- (NSMutableArray *)getAncestors;
-
-#pragma mark Copying
-- (id) copyWithZone:(NSZone *)aZone;	// the children themselves are not copied here!
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-#pragma mark Tree Parser support
-- (NSInteger) getType;
-- (NSString *) getText;
-// In case we don't have a token payload, what is the line for errors?
-- (NSInteger) getLine;
-- (NSInteger) getCharPositionInLine;
-
-
-#pragma mark Informational
-- (NSString *) treeDescription;
-- (NSString *) description;
-
-- (NSString *) toString;
-- (NSString *) toStringTree;
-
-@end
-
-@interface ANTLRBaseTree : NSObject <ANTLRTree>
-{
-	NSMutableArray *children;
-    NSException *anException;
-}
-
-@property (retain, getter=getChildren, setter=setChildren) NSMutableArray *children;
-
-+ (id<ANTLRBaseTree>) newANTLRBaseTree;
-+ (id<ANTLRBaseTree>) newANTLRBaseTree:(id<ANTLRBaseTree>)node;
-         
-- (id<ANTLRTree>) init;
-- (id<ANTLRBaseTree>) initWith:(id<ANTLRTree>)node;
-
-- (id<ANTLRBaseTree>) getChild:(NSUInteger)i;
-- (NSMutableArray *)getChildren;
-- (void) setChildren:(NSMutableArray *)anArray;
-- (id<ANTLRBaseTree>)getFirstChildWithType:(NSInteger)type;
-- (NSUInteger) getChildCount;
-
-//- (void) removeAllChildren;
-
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChild:(id<ANTLRTree>) tree;
-- (void) addChildren:(NSArray *) theChildren;
-
-- (void) setChild:(NSInteger) i With:(id<ANTLRTree>)t;
-- (id) deleteChild:(NSInteger) i;
-- (NSMutableArray *) createChildrenList;
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-// Indicates the node is a nil node but may still have children, meaning
-	// the tree is a flat list.
-
-- (BOOL) isNil;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex:(NSInteger) index;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (void) freshenParentAndChildIndexes;
-- (void) freshenParentAndChildIndexes:(NSInteger) offset;
-- (void) sanityCheckParentAndChildIndexes;
-- (void) sanityCheckParentAndChildIndexes:(id<ANTLRTree>) parent At:(NSInteger) i;
-
-- (NSInteger) getChildIndex;
-- (void) setChildIndex:(NSInteger)i;
-
-- (BOOL) hasAncestor:(NSInteger) ttype;
-- (id<ANTLRTree>)getAncestor:(NSInteger)ttype;
-- (NSMutableArray *)getAncestors;
-
-- (id) copyWithZone:(NSZone *)aZone;
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-	// Return a token type; needed for tree parsing
-- (NSInteger) getType;
-- (NSString *) getText;
-
-	// In case we don't have a token payload, what is the line for errors?
-- (NSInteger) getLine;
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger)pos;
-
-- (NSString *) treeDescription;
-- (NSString *) description;
-- (NSString *) toString;
-- (NSString *) toStringTree;
-
-@end
-
-@interface ANTLRTreeNavigationNode : ANTLRBaseTree {
-}
-- (id) copyWithZone:(NSZone *)aZone;
-@end
-
-@interface ANTLRTreeNavigationNodeDown : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeDown *) getNavigationNodeDown;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-@interface ANTLRTreeNavigationNodeUp : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeUp *) getNavigationNodeUp;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-@interface ANTLRTreeNavigationNodeEOF : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeEOF *) getNavigationNodeEOF;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-extern ANTLRTreeNavigationNodeDown *navigationNodeDown;
-extern ANTLRTreeNavigationNodeUp *navigationNodeUp;
-extern ANTLRTreeNavigationNodeEOF *navigationNodeEOF;
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseTreeAdaptor.h
deleted file mode 100644
index b4f8dad..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBaseTreeAdaptor.h
+++ /dev/null
@@ -1,163 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRCommonErrorNode.h"
-#import "ANTLRUniqueIDMap.h"
-
-@interface ANTLRBaseTreeAdaptor : NSObject <ANTLRTreeAdaptor, NSCopying> {
-    ANTLRUniqueIDMap *treeToUniqueIDMap;
-	NSInteger uniqueNodeID;
-}
-
-@property (retain, getter=getTreeToUniqueIDMap, setter=setTreeToUniqueIDMap:) ANTLRUniqueIDMap *treeToUniqueIDMap;
-@property (getter=getUniqueNodeID, setter=setUniqueNodeID:) NSInteger uniqueNodeID;
-
-+ (id<ANTLRTreeAdaptor>) newEmptyTree;
-
-- (id) init;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id) emptyNode;
-
-- (ANTLRUniqueIDMap *)getTreeToUniqueIDMap;
-- (void) setTreeToUniqueIDMap:(ANTLRUniqueIDMap *)aMapNode;
-
-- (NSInteger)getUniqueID;
-- (void) setUniqueNodeID:(NSInteger)aUniqueNodeID;
-
-/** create tree node that holds the start and stop tokens associated
- *  with an error.
- *
- *  If you specify your own kind of tree nodes, you will likely have to
- *  override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
- *  if no token payload but you might have to set token type for diff
- *  node type.
- *
- *  You don't have to subclass CommonErrorNode; you will likely need to
- *  subclass your own tree node class to avoid class cast exception.
- */
-- (id) errorNode:(id<ANTLRTokenStream>)anInput
-            From:(id<ANTLRToken>)startToken
-              To:(id<ANTLRToken>)stopToken
-       Exception:(NSException *) e;
-
-- (BOOL) isNil:(id<ANTLRTree>) aTree;
-
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree;
-/** This is generic in the sense that it will work with any kind of
- *  tree (not just Tree interface).  It invokes the adaptor routines
- *  not the tree node routines to do the construction.  
- */
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree Parent:(id<ANTLRTree>)parent;
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)aNode;
-/** Add a child to the tree t.  If child is a flat tree (a list), make all
- *  in list children of t.  Warning: if t has no children, but child does
- *  and child isNil then you can decide it is ok to move children to t via
- *  t.children = child.children; i.e., without copying the array.  Just
- *  make sure that this is consistent with have the user will build
- *  ASTs.
- */
-- (void) addChild:(id<ANTLRTree>)aChild toTree:(id<ANTLRTree>)aTree;
-
-/** If oldRoot is a nil root, just copy or move the children to newRoot.
- *  If not a nil root, make oldRoot a child of newRoot.
- *
- *    old=^(nil a b c), new=r yields ^(r a b c)
- *    old=^(a b c), new=r yields ^(r ^(a b c))
- *
- *  If newRoot is a nil-rooted single child tree, use the single
- *  child as the new root node.
- *
- *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
- *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
- *
- *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
- *
- *    old=null, new=r yields r
- *    old=null, new=^(nil r) yields ^(nil r)
- *
- *  Return newRoot.  Throw an exception if newRoot is not a
- *  simple node or nil root with a single child node--it must be a root
- *  node.  If newRoot is ^(nil x) return x as newRoot.
- *
- *  Be advised that it's ok for newRoot to point at oldRoot's
- *  children; i.e., you don't have to copy the list.  We are
- *  constructing these nodes so we should have this control for
- *  efficiency.
- */
-- (id<ANTLRTree>)becomeRoot:(id<ANTLRTree>)aNewRoot old:(id<ANTLRTree>)oldRoot;
-
-/** Transform ^(nil x) to x and nil to null */
-- (id<ANTLRTree>)rulePostProcessing:(id<ANTLRTree>)aRoot;
-
-- (id<ANTLRTree>)becomeRootfromToken:(id<ANTLRToken>)aNewRoot old:(id<ANTLRTree>)oldRoot;
-
-- (id<ANTLRTree>)createTree:(NSInteger)aTType With:(id<ANTLRToken>)aFromToken;
-
-- (id<ANTLRTree>)createTree:(NSInteger)aTType FromToken:(id<ANTLRToken>)aFromToken Text:(NSString *)theText;
-
-- (id<ANTLRTree>)createTree:(NSInteger)aTType Text:(NSString *)theText;
-
-- (NSInteger) getType:(id<ANTLRTree>)aTree;
-
-- (void) setType:(id<ANTLRTree>)aTree Type:(NSInteger)type;
-
-- (NSString *)getText:(id<ANTLRTree>)aTree;
-
-- (void) setText:(id<ANTLRTree>)aTree Text:(NSString *)theText;
-
-- (id<ANTLRTree>) getChild:(id<ANTLRTree>)aTree At:(NSInteger)i;
-
-- (void) setChild:(id<ANTLRTree>)aTree At:(NSInteger)index Child:(id<ANTLRTree>)aChild;
-
-- (id<ANTLRTree>) deleteChild:(id<ANTLRTree>)aTree Index:(NSInteger)index;
-
-- (NSInteger) getChildCount:(id<ANTLRTree>)aTree;
-
-- (NSInteger) getUniqueID:(id<ANTLRTree>)node;
-
-/** Tell me how to create a token for use with imaginary token nodes.
- *  For example, there is probably no input symbol associated with imaginary
- *  token DECL, but you need to create it as a payload or whatever for
- *  the DECL node as in ^(DECL type ID).
- *
- *  This is a variant of createToken where the new token is derived from
- *  an actual real input token.  Typically this is for converting '{'
- *  tokens to BLOCK etc...  You'll see
- *
- *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
- *
- *  If you care what the token payload objects' type is, you should
- *  override this method and any other createToken variant.
- */
-- (id<ANTLRToken>)createToken:(NSInteger)aTType Text:(NSString *)theText;
-
-- (id<ANTLRToken>)createToken:(id<ANTLRToken>)aFromToken;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBitSet.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBitSet.h
deleted file mode 100755
index a1be117..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBitSet.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import <CoreFoundation/CoreFoundation.h>
-#import "ANTLRToken.h"
-
-#define BITS (sizeof(NSUInteger) * 8)
-#define LOG_BITS ((sizeof(NSUInteger)==8)?6:5)
-
-// A simple wrapper around CoreFoundation bit vectors to shield the rest of the implementation
-// from the specifics of the BitVector initialization and query functions.
-// This is fast, so there is no need to reinvent the wheel just yet.
-
-@interface ANTLRBitSet : NSObject < NSMutableCopying > {
-	CFMutableBitVectorRef bitVector;
-}
-
-#pragma mark Class Methods
-
-+ (ANTLRBitSet *) newANTLRBitSet;
-+ (ANTLRBitSet *) newANTLRBitSetWithType:(ANTLRTokenType)type;
-/** Construct a ANTLRBitSet given the size
- * @param nbits The size of the ANTLRBitSet in bits
- */
-+ (ANTLRBitSet *) newANTLRBitSetWithNBits:(NSUInteger)nbits;
-+ (ANTLRBitSet *) newANTLRBitSetWithArray:(NSMutableArray *)types;
-+ (ANTLRBitSet *) newANTLRBitSetWithBits:(const unsigned long long *)theBits Count:(NSUInteger)longCount;
-
-+ (ANTLRBitSet *) of:(NSUInteger)el;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c And4:(NSUInteger)d;
-
-#pragma mark Initializer
-
-- (ANTLRBitSet *) init;
-- (ANTLRBitSet *) initWithType:(ANTLRTokenType)type;
-- (ANTLRBitSet *) initWithNBits:(NSUInteger)nbits;
-- (ANTLRBitSet *) initWithBitVector:(CFMutableBitVectorRef)theBitVector;
-- (ANTLRBitSet *) initWithBits:(const unsigned long long const*)theBits Count:(NSUInteger)theCount;
-- (ANTLRBitSet *) initWithArrayOfBits:(NSArray *)theArray;
-
-#pragma mark Operations
-- (ANTLRBitSet *) or:(ANTLRBitSet *) aBitSet;
-- (void) orInPlace:(ANTLRBitSet *) aBitSet;
-- (void) add:(NSUInteger) bit;
-- (void) remove:(NSUInteger) bit;
-- (void) setAllBits:(BOOL) aState;
-
-- (NSInteger) numBits;
-- (NSUInteger) size;
-- (void) setSize:(NSUInteger) noOfWords;
-
-#pragma mark Informational
-- (unsigned long long) bitMask:(NSUInteger) bitNumber;
-- (BOOL) member:(NSUInteger)bitNumber;
-- (BOOL) isNil;
-- (NSString *) toString;
-- (NSString *) description;
-
-#pragma mark NSCopying support
-
-- (id) mutableCopyWithZone:(NSZone *) theZone;
-
-
-//private
-- (CFMutableBitVectorRef) _bitVector;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBufferedTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBufferedTokenStream.h
deleted file mode 100644
index 198a6f7..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBufferedTokenStream.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenStream.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRBitSet.h"
-
-@interface ANTLRBufferedTokenStream : NSObject <ANTLRTokenStream> 
-{
-id<ANTLRTokenSource> tokenSource;
-    
-    /** Record every single token pulled from the source so we can reproduce
-     *  chunks of it later.  The buffer in LookaheadStream overlaps sometimes
-     *  as its moving window moves through the input.  This list captures
-     *  everything so we can access complete input text.
-     */
-NSMutableArray *tokens;
-    
-    /** Track the last mark() call result value for use in rewind(). */
-NSInteger lastMarker;
-    
-    /** The index into the tokens list of the current token (next token
-     *  to consume).  tokens[p] should be LT(1).  p=-1 indicates need
-     *  to initialize with first token.  The ctor doesn't get a token.
-     *  First call to LT(1) or whatever gets the first token and sets p=0;
-     */
-NSInteger p;
-    
-NSInteger range; // how deep have we gone?
-    
-}
-@property (retain, getter=getTokenSource,setter=setTokenSource) id<ANTLRTokenSource> tokenSource;
-@property (retain, getter=getTokens,setter=setTokens) NSMutableArray *tokens;
-@property (assign, getter=getLastMarker,setter=setLastMarker) NSInteger lastMarker;
-@property (assign, getter=getIndex,setter=setIndex) NSInteger p;
-@property (assign, getter=getRange,setter=setRange) NSInteger range;
-
-+ (ANTLRBufferedTokenStream *) newANTLRBufferedTokenStream;
-+ (ANTLRBufferedTokenStream *) newANTLRBufferedTokenStreamWith:(id<ANTLRTokenSource>)aSource;
-- (id) initWithSource:(id<ANTLRTokenSource>)aSource;
-- (id) copyWithZone:(NSZone *)aZone;
-- (NSInteger) getIndex;
-- (void) setIndex:(NSInteger)index;
-- (NSInteger) getRange;
-- (void) setRange:(NSInteger)anInt;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) reset;
-- (void) seek:(NSInteger) index;
-- (NSInteger) size;
-- (void) consume;
-- (void) sync:(NSInteger) i;
-- (void) fetch:(NSInteger) n;
-- (id<ANTLRToken>) getToken:(NSInteger) i;
-- (NSMutableArray *)getFrom:(NSInteger)startIndex To:(NSInteger) stopIndex;
-- (NSInteger) LA:(NSInteger)k;
-- (id<ANTLRToken>) LT:(NSInteger) k;
-- (id<ANTLRToken>) LB:(NSInteger) k;
-- (void) setup;
-- (id<ANTLRTokenSource>) getTokenSource;
-- (void) setTokenSource:(id<ANTLRTokenSource>) aTokenSource;
-- (NSMutableArray *)getTokens;
-- (NSString *) getSourceName;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex With:(ANTLRBitSet *)types;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithList:(NSMutableArray *)types;
-- (NSMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithType:(NSInteger)ttype;
-- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startIndex ToToken:(id<ANTLRToken>)stopIndex;
-- (void) fill;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBufferedTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBufferedTreeNodeStream.h
deleted file mode 100644
index 8618ea2..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRBufferedTreeNodeStream.h
+++ /dev/null
@@ -1,156 +0,0 @@
-//
-//  ANTLRBufferedTreeNodeStream.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRCommonTreeNodeStream.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRTreeIterator.h"
-#import "ANTLRIntArray.h"
-
-#define DEFAULT_INITIAL_BUFFER_SIZE 100
-#define INITIAL_CALL_STACK_SIZE 10
-
-#ifdef DONTUSENOMO
-@interface ANTLRStreamIterator : ANTLRTreeIterator
-{
-    NSInteger idx;
-    ANTLRBufferedTreeNodeStream input;
-    NSMutableArray *nodes;
-}
-
-+ (id) newANTLRStreamIterator:(ANTLRBufferedTreeNodeStream *) theStream;
-
-- (id) initWithStream:(ANTLRBufferedTreeNodeStream *) theStream;
-
-- (BOOL) hasNext;
-- (id) next;
-- (void) remove;
-@end
-#endif
-
-@interface ANTLRBufferedTreeNodeStream : NSObject <ANTLRTreeNodeStream> 
-{
-	id<ANTLRTree> up;
-	id<ANTLRTree> down;
-	id<ANTLRTree> eof;
-	
-	NSMutableArray *nodes;
-	
-	id<ANTLRTree> root; // root
-	
-	id<ANTLRTokenStream> tokens;
-	ANTLRCommonTreeAdaptor *adaptor;
-	
-	BOOL uniqueNavigationNodes;
-	NSInteger p;
-	NSInteger lastMarker;
-	ANTLRIntArray *calls;
-	
-	NSEnumerator *e;
-    id currentSymbol;
-	
-}
-
-@property (retain, getter=getUp, setter=setUp:) id<ANTLRTree> up;
-@property (retain, getter=getDown, setter=setDown:) id<ANTLRTree> down;
-@property (retain, getter=getEof, setter=setEof:) id<ANTLRTree> eof;
-@property (retain, getter=getNodes, setter=setNodes:) NSMutableArray *nodes;
-@property (retain, getter=getTreeSource, setter=setTreeSource:) id<ANTLRTree> root;
-@property (retain, getter=getTokenStream, setter=setTokenStream:) id<ANTLRTokenStream> tokens;
-@property (retain, getter=getAdaptor, setter=setAdaptor:) ANTLRCommonTreeAdaptor *adaptor;
-@property (assign, getter=getUniqueNavigationNodes, setter=setUniqueNavigationNodes:) BOOL uniqueNavigationNodes;
-@property (assign, getter=getIndex, setter=setIndex:) NSInteger p;
-@property (assign, getter=getLastMarker, setter=setLastMarker:) NSInteger lastMarker;
-@property (retain, getter=getCalls, setter=setCalls:) ANTLRIntArray *calls;
-@property (retain, getter=getEnum, setter=setEnum:) NSEnumerator *e;
-@property (retain, getter=getCurrentSymbol, setter=setCurrentSymbol:) id currentSymbol;
-
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTree>)tree;
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTreeAdaptor>)adaptor Tree:(id<ANTLRTree>)tree;
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTreeAdaptor>)adaptor Tree:(id<ANTLRTree>)tree withBufferSize:(NSInteger)initialBufferSize;
-
-#pragma mark Constructor
-- (id) initWithTree:(id<ANTLRTree>)tree;
-- (id) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)anAdaptor Tree:(id<ANTLRTree>)tree;
-- (id) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)anAdaptor Tree:(id<ANTLRTree>)tree WithBufferSize:(NSInteger)bufferSize;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-// protected methods. DO NOT USE
-#pragma mark Protected Methods
-- (void) fillBuffer;
-- (void) fillBufferWithTree:(id<ANTLRTree>) tree;
-- (NSInteger) getNodeIndex:(id<ANTLRTree>) node;
-- (void) addNavigationNode:(NSInteger) type;
-- (id) getNode:(NSInteger) i;
-- (id) LT:(NSInteger) k;
-- (id) getCurrentSymbol;
-- (id) LB:(NSInteger) i;
-#pragma mark General Methods
-- (NSString *) getSourceName;
-
-- (id<ANTLRTokenStream>) getTokenStream;
-- (void) setTokenStream:(id<ANTLRTokenStream>) tokens;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>) anAdaptor;
-
-- (BOOL)getUniqueNavigationNodes;
-- (void) setUniqueNavigationNodes:(BOOL)aVal;
-
-- (void) consume;
-- (NSInteger) LA:(NSInteger) i;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (NSInteger) getIndex;
-- (void) setIndex:(NSInteger) idx;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) seek:(NSInteger) idx;
-
-- (void) push:(NSInteger) i;
-- (NSInteger) pop;
-
-- (void) reset;
-- (NSUInteger) count;
-- (NSEnumerator *) objectEnumerator;
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-
-- (NSString *) toTokenTypeString;
-- (NSString *) toTokenString:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *) toStringFromNode:(id)aStart ToNode:(id)aStop;
-
-// getters and setters
-- (NSMutableArray *) getNodes;
-- (id<ANTLRTree>) getEof;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCharStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCharStream.h
deleted file mode 100755
index 379734b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCharStream.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRIntStream.h"
-
-#define	ANTLRCharStreamEOF -1
-
-
-@protocol ANTLRCharStream < ANTLRIntStream >
-
-- (NSString *) substringWithRange:(NSRange) theRange;
-
-/** Get the ith character of lookahead.  This is the same usually as
- *  LA(i).  This will be used for labels in the generated
- *  lexer code.  I'd prefer to return a char here type-wise, but it's
- *  probably better to be 32-bit clean and be consistent with LA.
- */
-- (NSInteger)LT:(NSInteger) i;
-
-// ANTLR tracks the line information automatically
-- (NSInteger) getLine;
-
-// Because this stream can rewind, we need to be able to reset the line
-- (void) setLine:(NSInteger) theLine;
-
-// The index of the character relative to the beginning of the line 0..n-1
-- (NSInteger) getCharPositionInLine;
-
-- (void) setCharPositionInLine:(NSInteger) thePos;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCharStreamState.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCharStreamState.h
deleted file mode 100644
index 2787c76..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCharStreamState.h
+++ /dev/null
@@ -1,58 +0,0 @@
-//
-//  ANTLRCharStreamState.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c)  2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRCharStreamState : NSObject
-{
-NSInteger p;
-NSInteger line;
-NSInteger charPositionInLine;
-}
-
-@property (getter=getP,setter=setP:) NSInteger p;
-@property (getter=getLine,setter=setLine:) NSInteger line;
-@property (getter=getCharPositionInLine,setter=setCharPositionInLine:) NSInteger charPositionInLine;
-
-+ newANTLRCharStreamState;
-
-- (id) init;
-
-- (NSInteger) getP;
-- (void) setP: (NSInteger) anIndex;
-
-- (NSInteger) getLine;
-- (void) setLine: (NSInteger) aLine;
-
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger)aCharPositionInLine;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonErrorNode.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonErrorNode.h
deleted file mode 100644
index 79badc1..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonErrorNode.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-//  ANTLRCommonErrorNode.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-#import "ANTLRTokenStream.h"
-//#import "ANTLRIntStream.h"
-//#import "ANTLRToken.h"
-#import "ANTLRUnWantedTokenException.h"
-
-@interface ANTLRCommonErrorNode : ANTLRCommonTree
-{
-id<ANTLRIntStream> input;
-id<ANTLRToken> startToken;
-id<ANTLRToken> stopToken;
-ANTLRRecognitionException *trappedException;
-}
-
-+ (id) newANTLRCommonErrorNode:(id<ANTLRTokenStream>)anInput
-                  From:(id<ANTLRToken>)startToken
-                    To:(id<ANTLRToken>)stopToken
-                     Exception:(ANTLRRecognitionException *) e;
-
-- (id) initWithInput:(id<ANTLRTokenStream>)anInput
-                From:(id<ANTLRToken>)startToken
-                  To:(id<ANTLRToken>)stopToken
-           Exception:(ANTLRRecognitionException *) e;
-- (BOOL) isNil;
-
-- (NSInteger) getType;
-
-- (NSString *) getText;
-
-- (NSString *) toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonToken.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonToken.h
deleted file mode 100755
index 8662378..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonToken.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRCharStream.h"
-
-@interface ANTLRCommonToken : NSObject < ANTLRToken > {
-	NSString *text;
-	NSInteger type;
-	// information about the Token's position in the input stream
-	NSUInteger line;
-	NSUInteger charPositionInLine;
-	NSUInteger channel;
-	// this token's position in the TokenStream
-	NSUInteger index;
-	
-	// indices into the CharStream to avoid copying the text
-	// can manually override the text by using -setText:
-	NSUInteger startIndex;
-	NSUInteger stopIndex;
-	// the actual input stream this token was found in
-	id<ANTLRCharStream> input;
-}
-
-@property (retain, getter=getText, setter=setText:) NSString *text;
-@property (assign, getter=getType, setter=setType:) NSInteger type;
-@property (assign, getter=getLine, setter=setLine:) NSUInteger line;
-@property (assign, getter=getCharPositionInLine, setter=setCharPositionInLine:) NSUInteger charPositionInLine;
-@property (assign, getter=getChannel, setter=setChannel:) NSUInteger channel;
-@property (assign, getter=getTokenIndex, setter=setTokenIndex:) NSUInteger index;
-@property (assign, getter=getStart, setter=setStart:) NSUInteger startIndex;
-@property (assign, getter=getStop, setter=setStop:) NSUInteger stopIndex;
-@property (retain, getter=getInput, setter=setInput:) id<ANTLRCharStream> input;
-
-+ (void) initialize;
-+ (ANTLRCommonToken *) newANTLRCommonToken;
-+ (ANTLRCommonToken *) newANTLRCommonToken:(id<ANTLRCharStream>)anInput
-                                      Type:(NSInteger)aTType
-                                   Channel:(NSInteger)aChannel
-                                     Start:(NSInteger)aStart
-                                      Stop:(NSInteger)aStop;
-+ (ANTLRCommonToken *) newANTLRCommonToken:(ANTLRTokenType)aType;
-+ (id<ANTLRToken>) newANTLRCommonToken:(NSInteger)tokenType Text:(NSString *)tokenText;
-+ (id<ANTLRToken>) newANTLRCommonTokenWithToken:(id<ANTLRToken>)fromToken;
-+ (id<ANTLRToken>) eofToken;
-+ (id<ANTLRToken>) skipToken;
-+ (id<ANTLRToken>) invalidToken;
-+ (ANTLRTokenChannel) defaultChannel;
-
-// designated initializer. This is used as the default way to initialize a Token in the generated code.
-- (ANTLRCommonToken *) init;
-- (ANTLRCommonToken *) initWithInput:(id<ANTLRCharStream>)anInput
-                                Type:(NSInteger)aTType
-                             Channel:(NSInteger)aChannel
-                               Start:(NSInteger)theStart
-                                Stop:(NSInteger)theStop;
-- (ANTLRCommonToken *) initWithToken:(ANTLRCommonToken *)aToken;
-- (ANTLRCommonToken *) initWithType:(ANTLRTokenType)aType;
-- (ANTLRCommonToken *) initWithType:(ANTLRTokenType)aTType Text:(NSString *)tokenText;
-
-- (id<ANTLRCharStream>) getInput;
-- (void) setInput: (id<ANTLRCharStream>) anInput;
-
-- (NSUInteger) getStart;
-- (void) setStart: (NSUInteger) aStart;
-
-- (NSUInteger) getStop;
-- (void) setStop: (NSUInteger) aStop;
-
-// the index of this Token into the TokenStream
-- (NSUInteger) getTokenIndex;
-- (void) setTokenIndex: (NSUInteger) aTokenIndex;
-
-// conform to NSCopying
-- (id) copyWithZone:(NSZone *)theZone;
-
-- (NSString *) description;
-- (NSString *) toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTokenStream.h
deleted file mode 100755
index 59f9d5e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTokenStream.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenStream.h"
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRBufferedTokenStream.h"
-
-@interface ANTLRCommonTokenStream : ANTLRBufferedTokenStream < ANTLRTokenStream >
-{
-	NSMutableDictionary *channelOverride;
-	NSInteger channel;
-}
-
-@property (retain, getter=getChannelOverride,setter=setChannelOverride) NSMutableDictionary *channelOverride;
-@property (assign, getter=getChannel,setter=setChannel) NSInteger channel;
-
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStream;
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStreamWithTokenSource:(id<ANTLRTokenSource>)theTokenSource;
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStreamWithTokenSource:(id<ANTLRTokenSource>)theTokenSource
-                                                               Channel:(NSInteger)aChannel;
-
-- (id) init;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource Channel:(NSInteger)aChannel;
-
-- (id<ANTLRTokenSource>) getTokenSource;
-- (void) setTokenSource: (id<ANTLRTokenSource>) aTokenSource;
-
-- (void) consume;
-- (id<ANTLRToken>) LT:(NSInteger)k;
-- (id<ANTLRToken>) LB:(NSInteger)k;
-
-- (NSInteger) skipOffChannelTokens:(NSInteger) i;
-- (NSInteger) skipOffChannelTokensReverse:(NSInteger) i;
-
-- (void)setup;
-
-- (NSArray *) tokensInRange:(NSRange)aRange;
-- (NSArray *) tokensInRange:(NSRange)aRange inBitSet:(ANTLRBitSet *)aBitSet;
-- (NSArray *) tokensInRange:(NSRange)aRange withTypes:(NSArray *)tokenTypes;
-- (NSArray *) tokensInRange:(NSRange)aRange withType:(NSInteger)tokenType;
-
-- (id<ANTLRToken>) getToken:(NSInteger)i;
-
-- (NSInteger) size;
-- (NSInteger) getIndex;
-- (void) rewind;
-- (void) rewind:(NSInteger)marker;
-- (void) seek:(NSInteger)index;
-
-- (NSString *) toString;
-- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSInteger)getChannel;
-- (void)setChannel:(NSInteger)aChannel;
-
-- (NSMutableDictionary *)getChannelOverride;
-- (void)setChannelOverride:(NSMutableDictionary *)anOverride;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTree.h
deleted file mode 100755
index 0966051..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTree.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonToken.h"
-#import "ANTLRBaseTree.h"
-
-@interface ANTLRCommonTree : ANTLRBaseTree <ANTLRTree> {
-	ANTLRCommonToken *token;
-	NSInteger startIndex;
-	NSInteger stopIndex;
-    ANTLRCommonTree *parent;
-    NSInteger childIndex;
-}
-
-@property (retain, getter=getANTLRCommonToken, setter=setANTLRCommonToken) ANTLRCommonToken *token;
-@property (assign, getter=getTokenStartIndex, setter=setTokenStartIndex) NSInteger startIndex;
-@property (assign, getter=getTokenStopIndex, setter=setTokenStopIndex) NSInteger stopIndex;
-@property (retain, getter=getParent, setter=setParent:) ANTLRCommonTree *parent;
-@property (assign, getter=getChildIndex, setter=setChildIndex) NSInteger childIndex;
-
-+ (ANTLRCommonTree *) invalidNode;
-+ (ANTLRCommonTree *) newANTLRCommonTree;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithTree:(ANTLRCommonTree *)aTree;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithToken:(ANTLRCommonToken *)aToken;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithTokenType:(NSInteger)tokenType;
-+ (ANTLRCommonTree *) newANTLRCommonTreeWithTokenType:(NSInteger)aTType Text:(NSString *)theText;
-#ifdef DONTUSEYET
-+ (id<ANTLRTree>) newANTLRCommonTreeWithTokenType:(NSInteger)tokenType;
-+ (id<ANTLRTree>) newANTLRCommonTreeWithToken:(id<ANTLRToken>)fromToken TokenType:(NSInteger)tokenType;
-+ (id<ANTLRTree>) newANTLRCommonTreeWithToken:(id<ANTLRToken>)fromToken TokenType:(NSInteger)tokenType Text:(NSString *)tokenText;
-+ (id<ANTLRTree>) newANTLRCommonTreeWithToken:(id<ANTLRToken>)fromToken Text:(NSString *)tokenText;
-#endif
-
-- (id) init;
-- (id) initWithTreeNode:(ANTLRCommonTree *)aNode;
-- (id) initWithToken:(ANTLRCommonToken *)aToken;
-- (id) initWithTokenType:(NSInteger)aTokenType;
-- (id) initWithTokenType:(NSInteger)aTokenType Text:(NSString *)theText;
-
-- (id<ANTLRTree>) copyWithZone:(NSZone *)aZone;
-
-- (BOOL) isNil;
-
-- (ANTLRCommonToken *) getToken;
-- (void) setToken:(ANTLRCommonToken *)aToken;
-- (id<ANTLRTree>) dupNode;
-- (NSInteger) getType;
-- (NSString *) getText;
-- (NSUInteger) getLine;
-- (NSUInteger) getCharPositionInLine;
-- (ANTLRCommonTree *) getParent;
-- (void) setParent:(ANTLRCommonTree *) t;
-
-#ifdef DONTUSENOMO
-- (NSString *) treeDescription;
-#endif
-- (NSString *) description;
-- (void) setUnknownTokenBoundaries;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex: (NSInteger) aStartIndex;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex: (NSInteger) aStopIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTreeAdaptor.h
deleted file mode 100755
index 53287e6..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTreeAdaptor.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRBaseTreeAdaptor.h"
-
-@interface ANTLRCommonTreeAdaptor : ANTLRBaseTreeAdaptor {
-}
-
-+ (id<ANTLRTree>) newEmptyTree;
-+ (ANTLRCommonTreeAdaptor *)newANTLRCommonTreeAdaptor;
-- (id) init;
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)t;    
-- (ANTLRCommonTree *)createTree:(ANTLRCommonToken *)aToken;
-- (ANTLRCommonTree *)createTree:(NSInteger)tokenType Text:(NSString *)text;
-- (id<ANTLRToken>)createToken:(NSInteger)tokenType Text:(NSString *)text;
-- (void) setTokenBoundaries:(id<ANTLRTree>)t From:(id<ANTLRToken>)startToken To:(id<ANTLRToken>)stopToken;
-- (NSInteger)getTokenStartIndex:(id<ANTLRTree>)t;
-- (NSInteger)getTokenStopIndex:(id<ANTLRTree>)t;
-- (NSString *)getText:(id<ANTLRTree>)t;
-- (void)setText:(id<ANTLRTree>)t Text:(NSString *)text;
-- (NSInteger)getType:(id<ANTLRTree>)t;
-- (void) setType:(id<ANTLRTree>)t Type:(NSInteger)tokenType;
-- (id<ANTLRToken>)getToken:(id<ANTLRTree>)t;
-- (id<ANTLRTree>)getChild:(id<ANTLRTree>)t At:(NSInteger)i;
-- (void) setChild:(id<ANTLRTree>)t At:(NSInteger)i Child:(id<ANTLRTree>)child;
-- (NSInteger)getChildCount:(id<ANTLRTree>)t;
-- (id<ANTLRTree>)getParent:(id<ANTLRTree>)t;
-- (void)setParent:(id<ANTLRTree>)t With:(id<ANTLRTree>)parent;
-- (NSInteger)getChildIndex:(id<ANTLRTree>)t;
-- (void)setChildIndex:(id<ANTLRTree>)t With:(NSInteger)index;
-- (void)replaceChildren:(id<ANTLRTree>)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id<ANTLRTree>)t;
-- (id)copyWithZone:(NSZone *)zone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTreeNodeStream.h
deleted file mode 100755
index 4c68f2e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRCommonTreeNodeStream.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-#import "ANTLRCommonTreeNodeStream.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRTreeNodeStream.h"
-#import "ANTLRTreeIterator.h"
-#import "ANTLRIntArray.h"
-
-@interface ANTLRCommonTreeNodeStream : ANTLRLookaheadStream <ANTLRTreeNodeStream> {
-#define DEFAULT_INITIAL_BUFFER_SIZE 100
-#define INITIAL_CALL_STACK_SIZE 10
-    
-/** Pull nodes from which tree? */
-id root;
-    
-/** If this tree (root) was created from a token stream, track it. */
-id <ANTLRTokenStream> tokens;
-    
-	/** What tree adaptor was used to build these trees */
-ANTLRCommonTreeAdaptor *adaptor;
-    
-/** The tree iterator we using */
-ANTLRTreeIterator *it;
-    
-/** Stack of indexes used for push/pop calls */
-ANTLRIntArray *calls;    
-    
-/** Tree (nil A B C) trees like flat A B C streams */
-BOOL hasNilRoot;
-    
-/** Tracks tree depth.  Level=0 means we're at root node level. */
-NSInteger level;
-}
-@property (retain, getter=getRoot, setter=setRoot:) ANTLRCommonTree *root;
-@property (retain, getter=getTokens,setter=setTokens:) id<ANTLRTokenStream> tokens;
-@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) ANTLRCommonTreeAdaptor *adaptor;
-
-+ (ANTLRCommonTreeNodeStream *) newANTLRCommonTreeNodeStream:(ANTLRCommonTree *)theTree;
-+ (ANTLRCommonTreeNodeStream *) newANTLRCommonTreeNodeStream:(id<ANTLRTreeAdaptor>)anAdaptor Tree:(ANTLRCommonTree *)theTree;
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)adaptor Tree:(ANTLRCommonTree *)theTree;
-    
-- (void) reset;
-    
-    /** Pull elements from tree iterator.  Track tree level 0..max_level.
-     *  If nil rooted tree, don't give initial nil and DOWN nor final UP.
-     */
-- (id) nextElement;
-    
-- (BOOL) isEOF:(id<ANTLRTree>) o;
-- (void) setUniqueNavigationNodes:(BOOL) uniqueNavigationNodes;
-    
-- (id) getTreeSource;
-    
-- (NSString *) getSourceName;
-    
-- (id<ANTLRTokenStream>) getTokenStream;
-    
-- (void) setTokenStream:(id<ANTLRTokenStream>) tokens;
-    
-- (ANTLRCommonTreeAdaptor *) getTreeAdaptor;
-    
-- (void) setTreeAdaptor:(ANTLRCommonTreeAdaptor *) adaptor;
-    
-- (NSInteger) LA:(NSInteger) i;
-    
-    /** Make stream jump to a new location, saving old location.
-     *  Switch back with pop().
-     */
-- (ANTLRCommonTree *)getNode:(NSInteger) i;
-
-- (void) push:(NSInteger) index;
-    
-    /** Seek back to previous index saved during last push() call.
-     *  Return top of stack (return index).
-     */
-- (NSInteger) pop;
-    
-// TREE REWRITE INTERFACE
-    
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-    
-- (NSString *) toStringFromNode:(id<ANTLRTree>)startNode ToNode:(id<ANTLRTree>)stopNode;
-
-/** For debugging; destructive: moves tree iterator to end. */
-- (NSString *) toTokenTypeString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDFA.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDFA.h
deleted file mode 100755
index 9094a3d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDFA.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRNoViableAltException.h"
-
-@interface ANTLRDFA : NSObject {
-	// the tables are set by subclasses to their own static versions.
-	const int *eot;
-	const int *eof;
-	const unichar *min;
-	const unichar *max;
-	const int *accept;
-	const int *special;
-	const int **transition;
-	
-	ANTLRBaseRecognizer *recognizer;
-	NSInteger decisionNumber;
-    NSInteger len;
-}
-
-@property (retain, getter=getRecognizer,setter=setRecognizer:) ANTLRBaseRecognizer *recognizer;
-@property (assign, getter=getDecision,setter=setDecision:) NSInteger decisionNumber;
-@property (assign, getter=getLen,setter=setLen:) NSInteger len;
-
-- (id) initWithRecognizer:(id) theRecognizer;
-// simulate the DFA using the static tables and predict an alternative
-- (NSInteger) predict:(id<ANTLRCharStream>)anInput;
-- (void) noViableAlt:(NSInteger)state Stream:(id<ANTLRIntStream>)anInput;
-
-- (NSInteger) specialStateTransition:(NSInteger)state Stream:(id<ANTLRIntStream>)anInput;
-// - (NSInteger) specialStateTransition:(NSInteger) state;
-//- (unichar) specialTransition:(unichar) state symbol:(NSInteger) symbol;
-
-// hook for debugger support
-- (void) error:(ANTLRNoViableAltException *)nvae;
-
-- (NSString *) description;
-- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment;
-
-+ (void) setIsEmittingDebugInfo:(BOOL) shouldEmitDebugInfo;
-
-- (NSInteger)getDecision;
-- (void)setDecision:(NSInteger)aDecison;
-
-- (ANTLRBaseRecognizer *)getRecognizer;
-- (void)setRecognizer:(ANTLRBaseRecognizer *)aRecognizer;
-- (NSInteger)length;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebug.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebug.h
deleted file mode 100755
index 87383c9..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebug.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRDebugEventListener.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugParser.h"
-#import "ANTLRDebugTokenStream.h"
-#import "ANTLRDebugTreeParser.h"
-#import "ANTLRDebugTreeNodeStream.h"
-#import "ANTLRDebugTreeAdaptor.h"
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugEventListener.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugEventListener.h
deleted file mode 100755
index c2bee6c..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugEventListener.h
+++ /dev/null
@@ -1,275 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRDebugEventListener 
-
-#define ANTLRDebugProtocolVersion 1
-
-/** The parser has just entered a rule.  No decision has been made about
-*  which alt is predicted.  This is fired AFTER init actions have been
-*  executed.  Attributes are defined and available etc...
-*/
-- (void) enterRule:(NSString *)ruleName;
-
-/** Because rules can have lots of alternatives, it is very useful to
-*  know which alt you are entering.  This is 1..n for n alts.
-*/
-- (void) enterAlt:(NSInteger)alt;
-
-/** This is the last thing executed before leaving a rule.  It is
-*  executed even if an exception is thrown.  This is triggered after
-*  error reporting and recovery have occurred (unless the exception is
-											   *  not caught in this rule).  This implies an "exitAlt" event.
-*/
-- (void) exitRule:(NSString *)ruleName;
-
-/** Track entry into any (...) subrule other EBNF construct */
-- (void) enterSubRule:(NSInteger)decisionNumber;
-
-- (void) exitSubRule:(NSInteger)decisionNumber;
-
-/** Every decision, fixed k or arbitrary, has an enter/exit event
-*  so that a GUI can easily track what LT/consume events are
-*  associated with prediction.  You will see a single enter/exit
-*  subrule but multiple enter/exit decision events, one for each
-*  loop iteration.
-*/
-- (void) enterDecision:(NSInteger)decisionNumber;
-
-- (void) exitDecision:(NSInteger)decisionNumber;
-
-/** An input token was consumed; matched by any kind of element.
-*  Trigger after the token was matched by things like match(), matchAny().
-*/
-- (void) consumeToken:(id<ANTLRToken>)t;
-
-/** An off-channel input token was consumed.
-*  Trigger after the token was matched by things like match(), matchAny().
-*  (unless of course the hidden token is first stuff in the input stream).
-*/
-- (void) consumeHiddenToken:(id<ANTLRToken>)t;
-
-/** Somebody (anybody) looked ahead.  Note that this actually gets
-*  triggered by both LA and LT calls.  The debugger will want to know
-*  which Token object was examined.  Like consumeToken, this indicates
-*  what token was seen at that depth.  A remote debugger cannot look
-*  ahead into a file it doesn't have so LT events must pass the token
-*  even if the info is redundant.
-*/
-- (void) LT:(NSInteger)i foundToken:(id<ANTLRToken>)t;
-
-/** The parser is going to look arbitrarily ahead; mark this location,
-*  the token stream's marker is sent in case you need it.
-*/
-- (void) mark:(NSInteger)marker;
-
-/** After an arbitrairly long lookahead as with a cyclic DFA (or with
-*  any backtrack), this informs the debugger that stream should be
-*  rewound to the position associated with marker.
-*/
-- (void) rewind:(NSInteger)marker;
-
-/** Rewind to the input position of the last marker.
-*  Used currently only after a cyclic DFA and just
-*  before starting a sem/syn predicate to get the
-*  input position back to the start of the decision.
-*  Do not "pop" the marker off the state.  mark(i)
-*  and rewind(i) should balance still.
-*/
-- (void) rewind;
-
-- (void) beginBacktrack:(NSInteger)level;
-
-- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful;
-
-/** To watch a parser move through the grammar, the parser needs to
-*  inform the debugger what line/charPos it is passing in the grammar.
-*  For now, this does not know how to switch from one grammar to the
-*  other and back for island grammars etc...
-*
-*  This should also allow breakpoints because the debugger can stop
-*  the parser whenever it hits this line/pos.
-*/
-- (void) locationLine:(NSInteger)line column:(NSInteger)pos;
-
-/** A recognition exception occurred such as NoViableAltException.  I made
-*  this a generic event so that I can alter the exception hierachy later
-*  without having to alter all the debug objects.
-*
-*  Upon error, the stack of enter rule/subrule must be properly unwound.
-*  If no viable alt occurs it is within an enter/exit decision, which
-*  also must be rewound.  Even the rewind for each mark must be unwount.
-*  In the Java target this is pretty easy using try/finally, if a bit
-*  ugly in the generated code.  The rewind is generated in DFA.predict()
-*  actually so no code needs to be generated for that.  For languages
-*  w/o this "finally" feature (C++?), the target implementor will have
-*  to build an event stack or something.
-*
-*  Across a socket for remote debugging, only the RecognitionException
-*  data fields are transmitted.  The token object or whatever that
-*  caused the problem was the last object referenced by LT.  The
-*  immediately preceding LT event should hold the unexpected Token or
-*  char.
-*
-*  Here is a sample event trace for grammar:
-*
-*  b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
-*    | D
-*    ;
-*
-*  The sequence for this rule (with no viable alt in the subrule) for
-*  input 'c c' (there are 3 tokens) is:
-*
-*		commence
-*		LT(1)
-*		enterRule b
-*		location 7 1
-*		enter decision 3
-*		LT(1)
-*		exit decision 3
-*		enterAlt1
-*		location 7 5
-*		LT(1)
-*		consumeToken [c/<4>,1:0]
-*		location 7 7
-*		enterSubRule 2
-*		enter decision 2
-*		LT(1)
-*		LT(1)
-*		recognitionException NoViableAltException 2 1 2
-*		exit decision 2
-*		exitSubRule 2
-*		beginResync
-*		LT(1)
-*		consumeToken [c/<4>,1:1]
-*		LT(1)
-*		endResync
-*		LT(-1)
-*		exitRule b
-*		terminate
-*/
-- (void) recognitionException:(ANTLRRecognitionException *)e;
-
-/** Indicates the recognizer is about to consume tokens to resynchronize
-*  the parser.  Any consume events from here until the recovered event
-*  are not part of the parse--they are dead tokens.
-*/
-- (void) beginResync;
-
-/** Indicates that the recognizer has finished consuming tokens in order
-*  to resychronize.  There may be multiple beginResync/endResync pairs
-*  before the recognizer comes out of errorRecovery mode (in which
-*  multiple errors are suppressed).  This will be useful
-*  in a gui where you want to probably grey out tokens that are consumed
-*  but not matched to anything in grammar.  Anything between
-*  a beginResync/endResync pair was tossed out by the parser.
-*/
-- (void) endResync;
-
-/** A semantic predicate was evaluate with this result and action text */
-- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result;
-
-/** Announce that parsing has begun.  Not technically useful except for
-*  sending events over a socket.  A GUI for example will launch a thread
-*  to connect and communicate with a remote parser.  The thread will want
-*  to notify the GUI when a connection is made.  ANTLR parsers
-*  trigger this upon entry to the first rule (the ruleLevel is used to
-*  figure this out).
-*/
-- (void) commence;
-
-/** Parsing is over; successfully or not.  Mostly useful for telling
-*  remote debugging listeners that it's time to quit.  When the rule
-*  invocation level goes to zero at the end of a rule, we are done
-*  parsing.
-*/
-- (void) terminate;
-
-
-// T r e e  P a r s i n g
-
-/** Input for a tree parser is an AST, but we know nothing for sure
-*  about a node except its type and text (obtained from the adaptor).
-*  This is the analog of the consumeToken method.  Again, the ID is
-*  the hashCode usually of the node so it only works if hashCode is
-*  not implemented.  If the type is UP or DOWN, then
-*  the ID is not really meaningful as it's fixed--there is
-*  just one UP node and one DOWN navigation node.
-*/
-- (void) consumeNode:(NSInteger)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-/** The tree parser lookedahead.  If the type is UP or DOWN,
-*  then the ID is not really meaningful as it's fixed--there is
-*  just one UP node and one DOWN navigation node.
-*/
-- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-
-// A S T  E v e n t s
-
-/** A nil was created (even nil nodes have a unique ID...
-*  they are not "null" per se).  As of 4/28/2006, this
-*  seems to be uniquely triggered when starting a new subtree
-*  such as when entering a subrule in automatic mode and when
-*  building a tree in rewrite mode.
-*/
-- (void) createNilNode:(unsigned)hash;
-
-/** Announce a new node built from text */
-- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type;
-
-/** Announce a new node built from an existing token */
-- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex;
-
-/** Make a node the new root of an existing root.  See
-*
-*  Note: the newRootID parameter is possibly different
-*  than the TreeAdaptor.becomeRoot() newRoot parameter.
-*  In our case, it will always be the result of calling
-*  TreeAdaptor.becomeRoot() and not root_n or whatever.
-*
-*  The listener should assume that this event occurs
-*  only when the current subrule (or rule) subtree is
-*  being reset to newRootID.
-*
-*/
-- (void) makeNode:(unsigned)newRootHash parentOf:(unsigned)oldRootHash;
-
-/** Make childID a child of rootID.
-*  @see org.antlr.runtime.tree.TreeAdaptor.addChild()
-*/
-- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash;
-
-/** Set the token start/stop token index for a subtree root or node */
-- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSUInteger)tokenStartIndex To:(NSUInteger)tokenStopIndex;
-
-- (void) waitForDebuggerConnection;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugEventProxy.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugEventProxy.h
deleted file mode 100755
index 59bf67b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugEventProxy.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRDebugEventListener.h"
-#import <sys/socket.h>
-#import <netinet/in.h>
-#import <netinet/tcp.h>
-#include <arpa/inet.h>
-
-// default port for ANTLRWorks
-#define DEFAULT_DEBUGGER_PORT 0xC001
-
-@interface ANTLRDebugEventProxy : NSObject <ANTLRDebugEventListener> {
-	int serverSocket;
-	
-	struct sockaddr debugger_sockaddr;
-	socklen_t debugger_socklen;
-	int debuggerSocket;
-	NSFileHandle *debuggerFH;
-	
-	NSString *grammarName;
-	int debuggerPort;
-}
-
-- (id) init;
-- (id) initWithGrammarName:(NSString *)aGrammarName debuggerPort:(NSInteger)aPort;
-- (void) waitForDebuggerConnection;
-- (void) waitForAck;
-- (void) sendToDebugger:(NSString *)message;
-- (void) sendToDebugger:(NSString *)message waitForResponse:(BOOL)wait;
-
-- (NSInteger) serverSocket;
-- (void) setServerSocket: (NSInteger) aServerSocket;
-
-- (NSInteger) debuggerSocket;
-- (void) setDebuggerSocket: (NSInteger) aDebuggerSocket;
-
-- (NSString *) grammarName;
-- (void) setGrammarName: (NSString *) aGrammarName;
-
-- (NSInteger) debuggerPort;
-- (void) setDebuggerPort: (NSInteger) aDebuggerPort;
-
-- (NSString *) escapeNewlines:(NSString *)aString;
-
-#pragma mark -
-
-#pragma mark DebugEventListener Protocol
-- (void) enterRule:(NSString *)ruleName;
-- (void) enterAlt:(NSInteger)alt;
-- (void) exitRule:(NSString *)ruleName;
-- (void) enterSubRule:(NSInteger)decisionNumber;
-- (void) exitSubRule:(NSInteger)decisionNumber;
-- (void) enterDecision:(NSInteger)decisionNumber;
-- (void) exitDecision:(NSInteger)decisionNumber;
-- (void) consumeToken:(id<ANTLRToken>)t;
-- (void) consumeHiddenToken:(id<ANTLRToken>)t;
-- (void) LT:(NSInteger)i foundToken:(id<ANTLRToken>)t;
-- (void) mark:(NSInteger)marker;
-- (void) rewind:(NSInteger)marker;
-- (void) rewind;
-- (void) beginBacktrack:(NSInteger)level;
-- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful;
-- (void) locationLine:(NSInteger)line column:(NSInteger)pos;
-- (void) recognitionException:(ANTLRRecognitionException *)e;
-- (void) beginResync;
-- (void) endResync;
-- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result;
-- (void) commence;
-- (void) terminate;
-
-
-#pragma mark Tree Parsing
-- (void) consumeNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-
-#pragma mark AST Events
-
-- (void) createNilNode:(unsigned)hash;
-- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type;
-- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex;
-- (void) makeNode:(unsigned)newRootHash parentOf:(unsigned)oldRootHash;
-- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash;
-- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSInteger)tokenStartIndex To:(NSInteger)tokenStopIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugParser.h
deleted file mode 100755
index b23ff50..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugParser.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugTokenStream.h"
-
-@interface ANTLRDebugParser : ANTLRParser {
-	id<ANTLRDebugEventListener> debugListener;
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream;
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-			  debuggerPort:(NSInteger)portNumber;
-// designated initializer
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-			 debugListener:(id<ANTLRDebugEventListener>)theDebugListener
-			  debuggerPort:(NSInteger)portNumber;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTokenStream.h
deleted file mode 100755
index 335b002..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRDebugTokenStream.h"
-#import "ANTLRDebugEventListener.h"
-
-@interface ANTLRDebugTokenStream : NSObject <ANTLRTokenStream>
-{
-	id<ANTLRDebugEventListener> debugListener;
-	id<ANTLRTokenStream> input;
-	BOOL initialStreamState;
-    NSInteger lastMarker;
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream debugListener:(id<ANTLRDebugEventListener>)debugger;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (id<ANTLRTokenStream>) getInput;
-- (void) setInput:(id<ANTLRTokenStream>)aTokenStream;
-
-- (void) consume;
-- (id<ANTLRToken>) getToken:(NSInteger)index;
-- (NSInteger) getIndex;
-- (void) release:(NSInteger)marker;
-- (void) seek:(NSInteger)index;
-- (NSInteger) size;
-- (id<ANTLRTokenSource>) getTokenSource;
-- (NSString *) getSourceName;
-- (NSString *) toString;
-- (NSString *) toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTreeAdaptor.h
deleted file mode 100755
index 41965fa..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTreeAdaptor.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRBaseTreeAdaptor.h"
-#import "ANTLRDebugEventListener.h"
-
-@interface ANTLRDebugTreeAdaptor : ANTLRBaseTreeAdaptor {
-	id<ANTLRDebugEventListener> debugListener;
-	id<ANTLRTreeAdaptor> treeAdaptor;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor debugListener:(id<ANTLRDebugEventListener>)aDebugListener;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor: (id<ANTLRTreeAdaptor>) aTreeAdaptor;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTreeNodeStream.h
deleted file mode 100755
index 70f9939..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTreeNodeStream.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRDebugEventListener.h"
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTreeNodeStream.h"
-
-@interface ANTLRDebugTreeNodeStream : NSObject <ANTLRTreeNodeStream> {
-	id<ANTLRDebugEventListener> debugListener;
-	id<ANTLRTreeAdaptor> treeAdaptor;
-	id<ANTLRTreeNodeStream> input;
-	BOOL initialStreamState;
-}
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream debugListener:(id<ANTLRDebugEventListener>)debugger;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (id<ANTLRTreeNodeStream>) getInput;
-- (void) setInput: (id<ANTLRTreeNodeStream>) aTreeNodeStream;
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor: (id<ANTLRTreeAdaptor>) aTreeAdaptor;
-
-#pragma mark ANTLRTreeNodeStream conformance
-
-- (id) LT:(NSInteger)k;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setUniqueNavigationNodes:(BOOL)flag;
-
-#pragma mark ANTLRIntStream conformance
-- (void) consume;
-- (NSInteger) LA:(NSUInteger) i;
-- (NSUInteger) mark;
-- (NSUInteger) getIndex;
-- (void) rewind:(NSUInteger) marker;
-- (void) rewind;
-- (void) release:(NSUInteger) marker;
-- (void) seek:(NSUInteger) index;
-- (NSUInteger) size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTreeParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTreeParser.h
deleted file mode 100755
index cbeac76..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRDebugTreeParser.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeParser.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugTreeNodeStream.h"
-
-@interface ANTLRDebugTreeParser : ANTLRTreeParser {
-	id<ANTLRDebugEventListener> debugListener;
-}
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream;
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream
-				 debuggerPort:(NSInteger)portNumber;
-	// designated initializer
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream
-				debugListener:(id<ANTLRDebugEventListener>)theDebugListener
-				 debuggerPort:(NSInteger)portNumber;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLREarlyExitException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLREarlyExitException.h
deleted file mode 100755
index 1a89bbb..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLREarlyExitException.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLREarlyExitException : ANTLRRecognitionException {
-	int decisionNumber;
-}
-
-+ (ANTLREarlyExitException *) exceptionWithStream:(id<ANTLRIntStream>) anInputStream decisionNumber:(NSInteger) aDecisionNumber;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream decisionNumber:(NSInteger) aDecisionNumber;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRError.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRError.h
deleted file mode 100644
index f2657af..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRError.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-//  ANTLRError.h
-//  ANTLR
-//
-//  Created by Ian Michell on 30/03/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-// [The "BSD licence"]
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-#define ANTLRErrorDomain @"ANTLRError"
-
-#define ANTLRIllegalArgumentException @"ANTLRIllegalArgumentException"
-#define ANTLRIllegalStateException @"IllegalStateException"
-//#define ANTLRRuntimeException @"RuntimeException"
-//#define ANTLRNoSuchMethodException @"NoSuchMethodException"
-//#define ANTLRNoSuchElementException @"NoSuchElementException"
-//#define ANTLRUnsupportedOperationException @"UnsupportedOperationException"
-
-
-/*typedef enum
-{
-	ANTLRIllegalState = 1,
-	ANTLRIllegalArgument = 2,
-	ANTLRRecognitionError = 3,
-	ANTLRMissingTokenError = 4,
-	ANTLRUnwantedTokenError = 5,
-	ANTLRMismatechedTokenError = 6,
-	ANTLRNoViableAltError = 7
-	
-} ANTLRErrorCode;*/
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRFailedPredicateException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRFailedPredicateException.h
deleted file mode 100755
index 9788cba..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRFailedPredicateException.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-
-@interface ANTLRFailedPredicateException : ANTLRRecognitionException
-{
-	NSString *predicate;
-	NSString *ruleName;
-}
-
-@property (retain, getter=getPredicate, setter=setPredicate:) NSString *predicate;
-@property (retain, getter=getRuleName, setter=setRuleName:) NSString *ruleName;
-
-+ (ANTLRFailedPredicateException *) exceptionWithRuleName:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<ANTLRIntStream>)theStream;
-- (ANTLRFailedPredicateException *) initWithRuleName:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<ANTLRIntStream>)theStream;
-
-#ifdef DONTUSEYET
-- (NSString *) getPredicate;
-- (void) setPredicate:(NSString *)thePredicate;
-- (NSString *) getRuleName;
-- (void) setRuleName:(NSString *)theRuleName;
-#endif
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRFastQueue.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRFastQueue.h
deleted file mode 100644
index cf81817..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRFastQueue.h
+++ /dev/null
@@ -1,68 +0,0 @@
-//
-//  ANTLRFastQueue.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRFastQueue : NSObject <NSCopying>
-{
-	NSAutoreleasePool *pool;
-	NSMutableArray *data;
-	NSInteger p;
-}
-
-@property (retain, getter=getPool, setter=setPool) NSAutoreleasePool *pool;
-@property (retain, getter=getData, setter=setData) NSMutableArray *data;
-@property (assign, getter=getP, setter=setP) NSInteger p;
-
-+ (id) newANTLRFastQueue;
-
-- (id) init;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (void) reset;
-- (id) remove;
-- (void) addObject:(id) o;
-- (NSInteger) count;
-- (NSInteger) size;
-- (id) head;
-- (id) objectAtIndex:(NSInteger) i;
-- (void) clear;
-- (NSString *) toString;
-- (NSAutoreleasePool *)getPool;
-- (void)setPool:(NSAutoreleasePool *)aPool;
-- (NSMutableArray *)getData;
-- (void)setData:(NSMutableArray *)myData;
-- (NSInteger) getP;
-- (void) setP:(NSInteger)anInt;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashMap.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashMap.h
deleted file mode 100644
index 04aca7b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashMap.h
+++ /dev/null
@@ -1,102 +0,0 @@
-//
-//  ANTLRHashMap.h
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-#import "ANTLRMapElement.h"
-
-#define GLOBAL_SCOPE       0
-#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRHashMap : ANTLRLinkBase {
-	//	ANTLRHashMap *fNext;
-    //    TStringPool *fPool;
-    NSInteger Scope;
-    NSInteger LastHash;
-    NSInteger BuffSize;
-    ANTLRMapElement *ptrBuffer[HASHSIZE];
-    NSInteger mode;
-}
-
-//@property (copy) ANTLRHashMap *fNext;
-//@property (copy) TStringPool *fPool;
-@property (getter=getScope, setter=setScope:) NSInteger Scope;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
-
-// Contruction/Destruction
-+ (id)newANTLRHashMap;
-+ (id)newANTLRHashMapWithLen:(NSInteger)aBuffSize;
-- (id)init;
-- (id)initWithLen:(NSInteger)aBuffSize;
-- (void)dealloc;
-- (ANTLRHashMap *)PushScope:( ANTLRHashMap **)map;
-- (ANTLRHashMap *)PopScope:( ANTLRHashMap **)map;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-// Instance Methods
-/*    form hash value for string s */
-- (NSInteger)hash:(NSString *)s;
-/*   look for s in ptrBuffer  */
-- (ANTLRHashMap *)findscope:(int)level;
-/*   look for s in ptrBuffer  */
-- (id)lookup:(NSString *)s Scope:(int)scope;
-/*   look for s in ptrBuffer  */
-- (id)install:(ANTLRMapElement *)sym Scope:(int)scope;
-/*   look for s in ptrBuffer  */
-- (void)deleteANTLRHashMap:(ANTLRMapElement *)np;
-- (int)RemoveSym:(NSString *)s;
-- (void)delete_chain:(ANTLRMapElement *)np;
-#ifdef DONTUSEYET
-- (int)bld_symtab:(KW_TABLE *)toknams;
-#endif
-- (ANTLRMapElement **)getptrBuffer;
-- (ANTLRMapElement *)getptrBufferEntry:(int)idx;
-- (void)setptrBuffer:(ANTLRMapElement *)np Index:(int)idx;
-- (NSInteger)getScope;
-- (void)setScope:(NSInteger)i;
-- (ANTLRMapElement *)getTType:(NSString *)name;
-- (ANTLRMapElement *)getNameInList:(NSInteger)ttype;
-- (void)putNode:(NSString *)name TokenType:(NSInteger)ttype;
-- (NSInteger)getMode;
-- (void)setMode:(NSInteger)aMode;
-- (void) insertObject:(id)aRule atIndex:(NSInteger)idx;
-- (id) objectAtIndex:(NSInteger)idx;
-- (void) setObject:(id)aRule atIndex:(NSInteger)idx;
-- (void)addObject:(id)anObject;
-- (ANTLRMapElement *) getName:(NSString *)aName;
-- (void) putName:(NSString *)name Node:(id)aNode;
-
-- (NSEnumerator *)objectEnumerator;
-- (BOOL) hasNext;
-- (ANTLRMapElement *)nextObject;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashMap.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashMap.m
deleted file mode 100644
index a23426b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashMap.m
+++ /dev/null
@@ -1,521 +0,0 @@
-//
-//  ANTLRHashMap.m
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRHashMap.h"
-
-static NSInteger itIndex;
-
-/*
- * Start of ANTLRHashMap
- */
-@implementation ANTLRHashMap
-
-@synthesize Scope;
-@synthesize LastHash;
-
-+(id)newANTLRHashMap
-{
-    ANTLRHashMap *aNewANTLRHashMap;
-    
-    aNewANTLRHashMap = [[ANTLRHashMap alloc] init];
-	return( aNewANTLRHashMap );
-}
-
-+(id)newANTLRHashMapWithLen:(NSInteger)aBuffSize
-{
-    ANTLRHashMap *aNewANTLRHashMap;
-    
-    aNewANTLRHashMap = [[ANTLRHashMap alloc] initWithLen:aBuffSize];
-	return( aNewANTLRHashMap );
-}
-
--(id)init
-{
-    NSInteger idx;
-    
-	if ((self = [super init]) != nil) {
-		fNext = nil;
-        BuffSize = HASHSIZE;
-		Scope = 0;
-		if ( fNext != nil ) {
-			Scope = ((ANTLRHashMap *)fNext)->Scope+1;
-			for( idx = 0; idx < BuffSize; idx++ ) {
-				ptrBuffer[idx] = ((ANTLRHashMap *)fNext)->ptrBuffer[idx];
-			}
-		}
-        mode = 0;
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)aBuffSize
-{
-    NSInteger idx;
-    
-	if ((self = [super init]) != nil) {
-		fNext = nil;
-        BuffSize = aBuffSize;
-		Scope = 0;
-		if ( fNext != nil ) {
-			Scope = ((ANTLRHashMap *)fNext)->Scope+1;
-			for( idx = 0; idx < BuffSize; idx++ ) {
-				ptrBuffer[idx] = ((ANTLRHashMap *)fNext)->ptrBuffer[idx];
-			}
-		}
-        mode = 0;
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-    ANTLRMapElement *tmp, *rtmp;
-    NSInteger idx;
-	
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp && tmp != [((ANTLRHashMap *)fNext) getptrBufferEntry:idx] ) {
-                rtmp = tmp;
-                // tmp = [tmp getfNext];
-                tmp = (ANTLRMapElement *)tmp.fNext;
-                [rtmp dealloc];
-            }
-        }
-    }
-	[super dealloc];
-}
-
-- (NSInteger)count
-{
-    id anElement;
-    NSInteger aCnt = 0;
-    
-    for (NSInteger i = 0; i < BuffSize; i++) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aCnt++;
-        }
-    }
-    return aCnt;
-}
-                          
-- (NSInteger) size
-{
-    id anElement;
-    NSInteger aSize = 0;
-    
-    for (NSInteger i = 0; i < BuffSize; i++) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aSize += sizeof(id);
-        }
-    }
-    return aSize;
-}
-                                  
-                                  
--(void)deleteANTLRHashMap:(ANTLRMapElement *)np
-{
-    ANTLRMapElement *tmp, *rtmp;
-    NSInteger idx;
-    
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp && tmp != (ANTLRLinkBase *)[((ANTLRHashMap *)fNext) getptrBufferEntry:idx] ) {
-                rtmp = tmp;
-                tmp = [tmp getfNext];
-                [rtmp dealloc];
-            }
-        }
-    }
-}
-
--(ANTLRHashMap *)PushScope:(ANTLRHashMap **)map
-{
-    NSInteger idx;
-    ANTLRHashMap *htmp;
-    
-    htmp = [ANTLRHashMap newANTLRHashMap];
-    if ( *map != nil ) {
-        ((ANTLRHashMap *)htmp)->fNext = *map;
-        [htmp setScope:[((ANTLRHashMap *)htmp->fNext) getScope]+1];
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            htmp->ptrBuffer[idx] = ((ANTLRHashMap *)htmp->fNext)->ptrBuffer[idx];
-        }
-    }
-    //    gScopeLevel++;
-    *map = htmp;
-    return( htmp );
-}
-
--(ANTLRHashMap *)PopScope:(ANTLRHashMap **)map
-{
-    NSInteger idx;
-    ANTLRMapElement *tmp;
-	ANTLRHashMap *htmp;
-    
-    htmp = *map;
-    if ( (*map)->fNext != nil ) {
-        *map = (ANTLRHashMap *)htmp->fNext;
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            if ( htmp->ptrBuffer[idx] == nil ||
-                htmp->ptrBuffer[idx] == (*map)->ptrBuffer[idx] ) {
-                break;
-            }
-            tmp = htmp->ptrBuffer[idx];
-            /*
-             * must deal with parms, locals and labels at some point
-             * can not forget the debuggers
-             */
-            htmp->ptrBuffer[idx] = [tmp getfNext];
-            [ tmp dealloc];
-        }
-        *map = (ANTLRHashMap *)htmp->fNext;
-        //        gScopeLevel--;
-    }
-    return( htmp );
-}
-
-#ifdef USERDOC
-/*
- *  HASH        hash entry to get index to table
- *  NSInteger hash( ANTLRHashMap *self, char *s );
- *
- *     Inputs:  char *s             string to find
- *
- *     Returns: NSInteger                 hashed value
- *
- *  Last Revision 9/03/90
- */
-#endif
--(NSInteger)hash:(NSString *)s       /*    form hash value for string s */
-{
-	NSInteger hashval;
-	const char *tmp;
-    
-	tmp = [s cStringUsingEncoding:NSASCIIStringEncoding];
-	for( hashval = 0; *tmp != '\0'; )
-        hashval += *tmp++;
-	self->LastHash = hashval % BuffSize;
-	return( self->LastHash );
-}
-
-#ifdef USERDOC
-/*
- *  FINDSCOPE  search hashed list for entry
- *  ANTLRHashMap *findscope( ANTLRHashMap *self, NSInteger scope );
- *
- *     Inputs:  NSInteger       scope -- scope level to find
- *
- *     Returns: ANTLRHashMap   pointer to ptrBuffer of proper scope level
- *
- *  Last Revision 9/03/90
- */
-#endif
--(ANTLRHashMap *)findscope:(NSInteger)scope
-{
-    if ( self->Scope == scope ) {
-        return( self );
-    }
-    else if ( fNext ) {
-        return( [((ANTLRHashMap *)fNext) findscope:scope] );
-    }
-    return( nil );              /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  LOOKUP  search hashed list for entry
- *  ANTLRMapElement *lookup( ANTLRHashMap *self, char *s, NSInteger scope );
- *
- *     Inputs:  char     *s          string to find
- *
- *     Returns: ANTLRMapElement  *           pointer to entry
- *
- *  Last Revision 9/03/90
- */
-#endif
--(id)lookup:(NSString *)s Scope:(NSInteger)scope
-{
-    ANTLRMapElement *np;
-    
-    for( np = self->ptrBuffer[[self hash:s]]; np != nil; np = [np getfNext] ) {
-        if ( [s isEqualToString:[np getName]] ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  INSTALL search hashed list for entry
- *  NSInteger install( ANTLRHashMap *self, ANTLRMapElement *sym, NSInteger scope );
- *
- *     Inputs:  ANTLRMapElement    *sym   -- symbol ptr to install
- *              NSInteger         scope -- level to find
- *
- *     Returns: Boolean     TRUE   if installed
- *                          FALSE  if already in table
- *
- *  Last Revision 9/03/90
- */
-#endif
--(ANTLRMapElement *)install:(ANTLRMapElement *)sym Scope:(NSInteger)scope
-{
-    ANTLRMapElement *np;
-    
-    np = [self lookup:[sym getName] Scope:scope ];
-    if ( np == nil ) {
-        [sym retain];
-        [sym setFNext:self->ptrBuffer[ self->LastHash ]];
-        self->ptrBuffer[ self->LastHash ] = sym;
-        return( self->ptrBuffer[ self->LastHash ] );
-    }
-    return( nil );            /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  RemoveSym  search hashed list for entry
- *  NSInteger RemoveSym( ANTLRHashMap *self, char *s );
- *
- *     Inputs:  char     *s          string to find
- *
- *     Returns: NSInteger      indicator of SUCCESS OR FAILURE
- *
- *  Last Revision 9/03/90
- */
-#endif
--(NSInteger)RemoveSym:(NSString *)s
-{
-    ANTLRMapElement *np, *tmp;
-    NSInteger idx;
-    
-    idx = [self hash:s];
-    for ( tmp = self->ptrBuffer[idx], np = self->ptrBuffer[idx]; np != nil; np = [np getfNext] ) {
-        if ( [s isEqualToString:[np getName]] ) {
-            tmp = [np getfNext];             /* get the next link  */
-            [np dealloc];
-            return( SUCCESS );            /* report SUCCESS     */
-        }
-        tmp = [np getfNext];              //  BAD!!!!!!
-    }
-    return( FAILURE );                    /*   not found      */
-}
-
--(void)delete_chain:(ANTLRMapElement *)np
-{
-    if ( [np getfNext] != nil )
-		[self delete_chain:[np getfNext]];
-	[np dealloc];
-}
-
-#ifdef DONTUSEYET
--(NSInteger)bld_symtab:(KW_TABLE *)toknams
-{
-    NSInteger i;
-    ANTLRMapElement *np;
-    
-    for( i = 0; *(toknams[i].name) != '\0'; i++ ) {
-        // install symbol in ptrBuffer
-        np = [ANTLRMapElement newANTLRMapElement:[NSString stringWithFormat:@"%s", toknams[i].name]];
-        //        np->fType = toknams[i].toknum;
-        [self install:np Scope:0];
-    }
-    return( SUCCESS );
-}
-#endif
-
--(ANTLRMapElement *)getptrBufferEntry:(NSInteger)idx
-{
-	return( ptrBuffer[idx] );
-}
-
--(ANTLRMapElement **)getptrBuffer
-{
-	return( ptrBuffer );
-}
-
--(void)setptrBuffer:(ANTLRMapElement *)np Index:(NSInteger)idx
-{
-	if ( idx < BuffSize ) {
-        [np retain];
-		ptrBuffer[idx] = np;
-    }
-}
-
--(NSInteger)getScope
-{
-	return( Scope );
-}
-
--(void)setScopeScope:(NSInteger)i
-{
-	Scope = i;
-}
-
-- (ANTLRMapElement *)getTType:(NSString *)name
-{
-    return [self lookup:name Scope:0];
-}
-
-/*
- * works only for maplist indexed not by name but by TokenNumber
- */
-- (ANTLRMapElement *)getNameInList:(NSInteger)ttype
-{
-    ANTLRMapElement *np;
-    NSInteger aTType;
-
-    aTType = ttype % BuffSize;
-    for( np = self->ptrBuffer[ttype]; np != nil; np = [np getfNext] ) {
-        if ( [np.index integerValue] == ttype ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-- (ANTLRLinkBase *)getName:(NSString *)name
-{
-    return [self lookup:name Scope:0]; /*  nil if not found      */    
-}
-
-- (void)putNode:(NSString *)name TokenType:(NSInteger)ttype
-{
-    ANTLRMapElement *np;
-    
-    // install symbol in ptrBuffer
-    np = [ANTLRMapElement newANTLRMapElementWithName:[NSString stringWithString:name] Type:ttype];
-    //        np->fType = toknams[i].toknum;
-    [self install:np Scope:0];
-}
-
-- (NSInteger)getMode
-{
-    return mode;
-}
-
-- (void)setMode:(NSInteger)aMode
-{
-    mode = aMode;
-}
-
-- (void) addObject:(id)aRule
-{
-    NSInteger idx;
-
-    idx = [self count];
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-/* this may have to handle linking into the chain
- */
-- (void) insertObject:(id)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    if (aRule != ptrBuffer[idx]) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (id)objectAtIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    return ptrBuffer[idx];
-}
-
-/* this will never link into the chain
- */
-- (void) setObject:(id)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    if (aRule != ptrBuffer[idx]) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (void)putName:(NSString *)name Node:(id)aNode
-{
-    ANTLRMapElement *np;
-    
-    np = [self lookup:name Scope:0 ];
-    if ( np == nil ) {
-        np = [ANTLRMapElement newANTLRMapElementWithName:name Node:aNode];
-        if (ptrBuffer[LastHash] != nil)
-            [ptrBuffer[LastHash] release];
-        [np retain];
-        np.fNext = ptrBuffer[ LastHash ];
-        ptrBuffer[ LastHash ] = np;
-    }
-    return;    
-}
-
-- (NSEnumerator *)objectEnumerator
-{
-    NSEnumerator *anEnumerator;
-
-    itIndex = 0;
-    return anEnumerator;
-}
-
-- (BOOL)hasNext
-{
-    if (self && [self count] < BuffSize-1) {
-        return YES;
-    }
-    return NO;
-}
-
-- (ANTLRMapElement *)nextObject
-{
-    if (self && itIndex < BuffSize-1) {
-        return ptrBuffer[itIndex];
-    }
-    return nil;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashRule.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashRule.h
deleted file mode 100644
index f1558e8..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashRule.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//
-//  ANTLRHashRule.h
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuleMemo.h"
-#import "ANTLRPtrBuffer.h"
-
-#define GLOBAL_SCOPE       0
-#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRHashRule : ANTLRPtrBuffer {
-	//	ANTLRHashRule *fNext;
-    //    TStringPool *fPool;
-    NSInteger LastHash;
-    NSInteger mode;
-}
-
-//@property (copy) ANTLRHashRule *fNext;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
-
-// Contruction/Destruction
-+ (id)newANTLRHashRule;
-+ (id)newANTLRHashRuleWithLen:(NSInteger)aBuffSize;
-- (id)init;
-- (id)initWithLen:(NSInteger)aBuffSize;
-- (void)dealloc;
-
-- (NSInteger)count;
-- (NSInteger)length;
-- (NSInteger)size;
-
-// Instance Methods
-- (void)deleteANTLRHashRule:(ANTLRRuleMemo *)np;
-- (void)delete_chain:(ANTLRRuleMemo *)np;
-- (ANTLRRuleMemo **)getPtrBuffer;
-- (void)setPtrBuffer:(ANTLRRuleMemo **)np;
-- (NSNumber *)getRuleMemoStopIndex:(NSInteger)aStartIndex;
-- (void)putRuleMemoAtStartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex;
-- (NSInteger)getMode;
-- (void)setMode:(NSInteger)aMode;
-- (void) insertObject:(ANTLRRuleMemo *)aRule atIndex:(NSInteger)Index;
-- (ANTLRRuleMemo *) objectAtIndex:(NSInteger)Index;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashRule.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashRule.m
deleted file mode 100644
index 93ce3a1..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRHashRule.m
+++ /dev/null
@@ -1,281 +0,0 @@
-//
-//  ANTLRHashRule.m
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-#define ANTLR_MEMO_RULE_UNKNOWN -1
-
-#import "ANTLRHashRule.h"
-
-/*
- * Start of ANTLRHashRule
- */
-@implementation ANTLRHashRule
-
-@synthesize LastHash;
-
-+(id)newANTLRHashRule
-{
-    ANTLRHashRule *aNewANTLRHashRule;
-    
-    aNewANTLRHashRule = [[ANTLRHashRule alloc] init];
-	return( aNewANTLRHashRule );
-}
-
-+(id)newANTLRHashRuleWithLen:(NSInteger)aBuffSize
-{
-    ANTLRHashRule *aNewANTLRHashRule;
-    
-    aNewANTLRHashRule = [[ANTLRHashRule alloc] initWithLen:aBuffSize];
-	return( aNewANTLRHashRule );
-}
-
--(id)init
-{
-	if ((self = [super initWithLen:HASHSIZE]) != nil) {
-		fNext = nil;
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)aBuffSize
-{
-	if ((self = [super initWithLen:aBuffSize]) != nil) {
-		fNext = nil;
-        mode = 0;
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-    ANTLRRuleMemo *tmp, *rtmp;
-    int Index;
-	
-    if ( self.fNext != nil ) {
-        for( Index = 0; Index < BuffSize; Index++ ) {
-            tmp = ptrBuffer[Index];
-            while ( tmp && tmp != ptrBuffer[Index] ) {
-                rtmp = tmp;
-                // tmp = [tmp getfNext];
-                tmp = (ANTLRRuleMemo *)tmp.fNext;
-                [rtmp dealloc];
-            }
-        }
-    }
-	[super dealloc];
-}
-
-- (NSInteger)count
-{
-    id anElement;
-    NSInteger aCnt = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        anElement = ptrBuffer[i];
-        if ( anElement != nil ) {
-            aCnt++;
-        }
-    }
-    return aCnt;
-}
-                          
-- (NSInteger) length
-{
-    return BuffSize;
-}
-
-- (NSInteger) size
-{
-    id anElement;
-    NSInteger aSize = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aSize += sizeof(id);
-        }
-    }
-    return aSize;
-}
-                                  
-                                  
--(void)deleteANTLRHashRule:(ANTLRRuleMemo *)np
-{
-    ANTLRRuleMemo *tmp, *rtmp;
-    int Index;
-    
-    if ( self.fNext != nil ) {
-        for( Index = 0; Index < BuffSize; Index++ ) {
-            tmp = ptrBuffer[Index];
-            while ( tmp && tmp != ptrBuffer[Index ] ) {
-                rtmp = tmp;
-                tmp = tmp.fNext;
-                [rtmp dealloc];
-            }
-        }
-    }
-}
-
--(void)delete_chain:(ANTLRRuleMemo *)np
-{
-    if ( np.fNext != nil )
-		[self delete_chain:np.fNext];
-	[np dealloc];
-}
-
--(ANTLRRuleMemo **)getPtrBuffer
-{
-	return( ptrBuffer );
-}
-
--(void)setPtrBuffer:(ANTLRRuleMemo **)np
-{
-	ptrBuffer = np;
-}
-
-- (NSNumber *)getRuleMemoStopIndex:(NSInteger)aStartIndex
-{
-    ANTLRRuleMemo *aRule;
-    NSNumber *stopIndex;
-    NSInteger anIndex;
-    
-    anIndex = ( aStartIndex >= BuffSize ) ? aStartIndex %= BuffSize : aStartIndex;
-    if ((aRule = ptrBuffer[anIndex]) == nil) {
-        return nil;
-    }
-    stopIndex = [aRule getStopIndex:aStartIndex];
-    return stopIndex;
-}
-
-- (void)putRuleMemo:(ANTLRRuleMemo *)aRule AtStartIndex:(NSInteger)aStartIndex
-{
-    NSInteger anIndex;
-    
-    anIndex = (aStartIndex >= BuffSize) ? aStartIndex %= BuffSize : aStartIndex;
-    if ( ptrBuffer[anIndex] == nil ) {
-        ptrBuffer[anIndex] = aRule;
-        [aRule retain];
-    }
-    else {
-        do {
-            if ( [aRule.startIndex integerValue] == aStartIndex ) {
-                [aRule setStartIndex:aRule.stopIndex];
-                return;
-            }
-            aRule = aRule.fNext;
-        } while ( aRule != nil );
-    }
-}
-
-- (void)putRuleMemoAtStartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex
-{
-    ANTLRRuleMemo *aRule, *newRule;
-    NSInteger anIndex;
-    NSInteger aMatchIndex;
-
-    anIndex = (aStartIndex >= BuffSize) ? aStartIndex %= BuffSize : aStartIndex;
-    if ((aRule = ptrBuffer[anIndex]) == nil ) {
-        aRule = [ANTLRRuleMemo newANTLRRuleMemoWithStartIndex:[NSNumber numberWithInteger:aStartIndex]
-                                                    StopIndex:[NSNumber numberWithInteger:aStopIndex]];
-        [aRule retain];
-        ptrBuffer[anIndex] = aRule;
-    }
-    else {
-        aMatchIndex = [aRule.startIndex integerValue];
-        if ( aStartIndex > aMatchIndex ) {
-            if ( aRule != ptrBuffer[anIndex] ) {
-                [aRule retain];
-            }
-            aRule.fNext = ptrBuffer[anIndex];
-            ptrBuffer[anIndex] = aRule;
-            return;
-        }
-        while (aRule.fNext != nil) {
-            aMatchIndex = [((ANTLRRuleMemo *)aRule.fNext).startIndex integerValue];
-            if ( aStartIndex > aMatchIndex ) {
-                newRule = [ANTLRRuleMemo newANTLRRuleMemoWithStartIndex:[NSNumber numberWithInteger:aStartIndex]
-                                                              StopIndex:[NSNumber numberWithInteger:aStopIndex]];
-                [newRule retain];
-                newRule.fNext = aRule.fNext;
-                aRule.fNext = newRule;
-                return;
-            }
-            if ( aMatchIndex == aStartIndex ) {
-                [aRule setStartIndex:aRule.stopIndex];
-                return;
-            }
-            aRule = aRule.fNext;
-        }
-    }
-}
-
-- (NSInteger)getLastHash
-{
-    return LastHash;
-}
-
-- (void)setLastHash:(NSInteger)aHash
-{
-    LastHash = aHash;
-}
-
-- (NSInteger)getMode
-{
-    return mode;
-}
-
-- (void)setMode:(NSInteger)aMode
-{
-    mode = aMode;
-}
-
-- (void) insertObject:(ANTLRRuleMemo *)aRule atIndex:(NSInteger)anIndex
-{
-    NSInteger Index;
-    
-    Index = ( anIndex >= BuffSize ) ? anIndex %= BuffSize : anIndex;
-    if (aRule != ptrBuffer[Index]) {
-        if (ptrBuffer[Index] != nil) {
-            [ptrBuffer[Index] release];
-        }
-        [aRule retain];
-    }
-    ptrBuffer[Index] = aRule;
-}
-
-- (ANTLRRuleMemo *)objectAtIndex:(NSInteger)anIndex
-{
-    NSInteger anIdx;
-
-    anIdx = ( anIndex >= BuffSize ) ? anIndex %= BuffSize : anIndex;
-    return ptrBuffer[anIdx];
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRIntArray.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRIntArray.h
deleted file mode 100644
index 5269b23..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRIntArray.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-//  ANTLRIntArray.h
-//  ANTLR
-//
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-
-#define ANTLR_INT_ARRAY_INITIAL_SIZE 10
-
-@interface ANTLRIntArray : ANTLRPtrBuffer 
-{
-}
-
-+ (ANTLRIntArray *)newANTLRIntArray;
-+ (ANTLRIntArray *)newANTLRIntArrayWithLen:(NSInteger)aLen;
-
-- (id) init;
-- (id) initWithLen:(NSInteger)aLen;
-
-- (void) dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (void) addInteger:(NSInteger) v;
-- (void) push:(NSInteger) v;
-- (NSInteger) pop;
-- (NSInteger) integerAtIndex:(NSInteger) i;
-- (void) insertInteger:(NSInteger)anInteger AtIndex:(NSInteger) idx;
-- (NSInteger) size;
-- (void) reset;
-
-- (NSInteger) count;
-- (NSInteger) size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRIntStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRIntStream.h
deleted file mode 100755
index 3790cd9..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRIntStream.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-@protocol ANTLRIntStream < NSObject, NSCopying >
-
-- (void) consume;
-
-// Get unichar at current input pointer + i ahead where i=1 is next character as int for including ANTLRCharStreamEOF (-1) in the data range
-- (NSInteger) LA:(NSInteger) i;
-
-// Tell the stream to start buffering if it hasn't already.  Return
-// current input position, index(), or some other marker so that
-// when passed to rewind() you get back to the same spot.
-// rewind(mark()) should not affect the input cursor.
-// TODO: problem in that lexer stream returns not index but some marker 
-
-- (NSInteger) mark;
-
-// Return the current input symbol index 0..n where n indicates the
-// last symbol has been read.
-
-- (NSInteger) getIndex;
-
-// Reset the stream so that next call to index would return marker.
-// The marker will usually be -index but it doesn't have to be.  It's
-// just a marker to indicate what state the stream was in.  This is
-// essentially calling -release: and -seek:.  If there are markers
-// created after this marker argument, this routine must unroll them
-// like a stack.  Assume the state the stream was in when this marker
-// was created.
-
-- (void) rewind;
-- (void) rewind:(NSInteger) marker;
-
-// You may want to commit to a backtrack but don't want to force the
-// stream to keep bookkeeping objects around for a marker that is
-// no longer necessary.  This will have the same behavior as
-// rewind() except it releases resources without the backward seek.
-
-- (void) release:(NSInteger) marker;
-
-// Set the input cursor to the position indicated by index.  This is
-// normally used to seek ahead in the input stream.  No buffering is
-// required to do this unless you know your stream will use seek to
-// move backwards such as when backtracking.
-// This is different from rewind in its multi-directional
-// requirement and in that its argument is strictly an input cursor (index).
-//
-// For char streams, seeking forward must update the stream state such
-// as line number.  For seeking backwards, you will be presumably
-// backtracking using the mark/rewind mechanism that restores state and
-// so this method does not need to update state when seeking backwards.
-//
-// Currently, this method is only used for efficient backtracking, but
-// in the future it may be used for incremental parsing.
-
-- (void) seek:(NSInteger) index;
-
-/** Only makes sense for streams that buffer everything up probably, but
- *  might be useful to display the entire stream or for testing.  This
- *  value includes a single EOF.
- */
-- (NSUInteger) size;
-/** Where are you getting symbols from?  Normally, implementations will
- *  pass the buck all the way to the lexer who can ask its input stream
- *  for the file name or whatever.
- */
-- (NSString *)getSourceName;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLexer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLexer.h
deleted file mode 100755
index 5cfb36f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLexer.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenSource.h"
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRRecognizerSharedState.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRRecognitionException.h"
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRMismatchedRangeException.h"
-
-@interface ANTLRLexer : ANTLRBaseRecognizer <ANTLRTokenSource> {
-	id<ANTLRCharStream> input;      ///< The character stream we pull tokens out of.
-	NSUInteger ruleNestingLevel;
-}
-
-@property (retain, getter=getInput, setter=setInput:) id<ANTLRCharStream> input;
-@property (getter=getRuleNestingLevel, setter=setRuleNestingLevel) NSUInteger ruleNestingLevel;
-
-#pragma mark Initializer
-- (id) initWithCharStream:(id<ANTLRCharStream>) anInput;
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput State:(ANTLRRecognizerSharedState *)state;
-
-- (id) copyWithZone:(NSZone *)zone;
-
-- (void) reset;
-
-// - (ANTLRRecognizerSharedState *) state;
-
-#pragma mark Tokens
-- (id<ANTLRToken>)getToken;
-- (void) setToken: (id<ANTLRToken>) aToken;
-- (id<ANTLRToken>) nextToken;
-- (void) mTokens;		// abstract, defined in generated sources
-- (void) skip;
-- (id<ANTLRCharStream>) getInput;
-- (void) setInput:(id<ANTLRCharStream>)aCharStream;
-
-- (void) emit;
-- (void) emit:(id<ANTLRToken>)aToken;
-
-#pragma mark Matching
-- (void) matchString:(NSString *)aString;
-- (void) matchAny;
-- (void) matchChar:(unichar) aChar;
-- (void) matchRangeFromChar:(unichar)fromChar to:(unichar)toChar;
-
-#pragma mark Informational
-- (NSUInteger) getLine;
-- (NSUInteger) getCharPositionInLine;
-- (NSInteger) getIndex;
-- (NSString *) getText;
-- (void) setText:(NSString *) theText;
-
-// error handling
-- (void) reportError:(ANTLRRecognitionException *)e;
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(NSMutableArray *)tokenNames;
-- (NSString *)getCharErrorDisplay:(NSInteger)c;
-- (void) recover:(ANTLRRecognitionException *)e;
-- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLexerRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLexerRuleReturnScope.h
deleted file mode 100755
index 18ae374..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLexerRuleReturnScope.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-@interface ANTLRLexerRuleReturnScope : NSObject {
-	int startToken;
-	int stopToken;
-}
-
-- (NSInteger) getStart;
-- (void) setStart: (NSInteger) aStart;
-
-- (NSInteger) getStop;
-- (void) setStop: (NSInteger) aStop;
-
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLinkBase.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLinkBase.h
deleted file mode 100644
index 21019e6..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLinkBase.h
+++ /dev/null
@@ -1,74 +0,0 @@
-//
-//  ANTLRLinkBase.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/14/10.
-//  [The "BSD licence"]
-//  Copyright (c) 2010 Alan Condit
-//  All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-@protocol ANTLRLinkList <NSObject>
-
-+ (id<ANTLRLinkList>)newANTLRLinkBase;
-+ (id<ANTLRLinkList>)newANTLRLinkBase:(id<ANTLRLinkList>)np Prev:(id<ANTLRLinkList>)pp;
-
-- (void) dealloc;
-
-- (id<ANTLRLinkList>) append:(id<ANTLRLinkList>)node;
-- (id<ANTLRLinkList>) insert:(id<ANTLRLinkList>)node;
-
-- (id<ANTLRLinkList>) getfNext;
-- (void) setFNext:(id<ANTLRLinkList>)np;
-- (id<ANTLRLinkList>)getfPrev;
-- (void) setFPrev:(id<ANTLRLinkList>)pp;
-
-@end
-
-@interface ANTLRLinkBase : NSObject <ANTLRLinkList> {
-	id<ANTLRLinkList> fPrev;
-	id<ANTLRLinkList> fNext;
-}
-
-@property (retain, getter=getfPrev, setter=setFPrev:) id<ANTLRLinkList> fPrev;
-@property (retain, getter=getfNext, setter=setFNext:) id<ANTLRLinkList> fNext;
-
-+ (id<ANTLRLinkList>)newANTLRLinkBase;
-+ (id<ANTLRLinkList>)newANTLRLinkBase:(id<ANTLRLinkList>)np Prev:(id<ANTLRLinkList>)pp;
-- (id<ANTLRLinkList>)init;
-- (id<ANTLRLinkList>)initWithPtr:(id)np Prev:(id)pp;
-- (void)dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id<ANTLRLinkList>)append:(id<ANTLRLinkList>)node;
-- (id<ANTLRLinkList>)insert:(id<ANTLRLinkList>)node;
-
-- (id<ANTLRLinkList>)getfNext;
-- (void)setFNext:(id<ANTLRLinkList>) np;
-- (id<ANTLRLinkList>)getfPrev;
-- (void)setFPrev:(id<ANTLRLinkList>) pp;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLookaheadStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLookaheadStream.h
deleted file mode 100644
index ad48ff5..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRLookaheadStream.h
+++ /dev/null
@@ -1,73 +0,0 @@
-//
-//  ANTLRLookaheadStream.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-//  [The "BSD licence"]
-//  Copyright (c) 2010 Ian Michell 2010 Alan Condit
-//  All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRFastQueue.h"
-
-#define UNITIALIZED_EOF_ELEMENT_INDEX NSIntegerMax
-
-@interface ANTLRLookaheadStream : ANTLRFastQueue
-{
-	id eof;
-	NSInteger eofElementIndex;
-	NSInteger lastMarker;
-	NSInteger markDepth;
-}
-
-@property (readwrite, retain, getter=getEof, setter=setEof) id eof;
-@property (assign, getter=getEofElementIndex, setter=setEofElementIndex) NSInteger eofElementIndex;
-@property (assign, getter=getLastMarker, setter=setLastMarker) NSInteger lastMarker;
-@property (assign, getter=getMarkDepth, setter=setMarkDepth) NSInteger markDepth;
-
-- (id) initWithEOF:(id) o;
-- (id) nextElement;
-- (void) consume;
-- (void) sync:(NSInteger) need;
-- (void) fill:(NSInteger) n;
-- (id) LT:(NSInteger) i;
-- (id) LB:(NSInteger) i;
-- (id) currentSymbol;
-- (NSInteger) getIndex;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) seek:(NSInteger) i;
-- (id) getEof;
-- (void) setEof:(id) anID;
-- (NSInteger) getEofElementIndex;
-- (void) setEofElementIndex:(NSInteger) anInt;
-- (NSInteger) getLastMarker;
-- (void) setLastMarker:(NSInteger) anInt;
-- (NSInteger) getMarkDepth;
-- (void) setMarkDepth:(NSInteger) anInt;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMap.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMap.h
deleted file mode 100644
index 80ad486..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMap.h
+++ /dev/null
@@ -1,82 +0,0 @@
-//
-//  ANTLRMap.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-#import "ANTLRMapElement.h"
-
-//#define GLOBAL_SCOPE      0
-//#define LOCAL_SCOPE       1
-#define HASHSIZE            101
-#define HBUFSIZE            0x2000
-
-@interface ANTLRMap : ANTLRPtrBuffer {
-	//ANTLRMap *fNext; // found in superclass
-    // TStringPool *fPool;
-    NSInteger lastHash;
-}
-
-//@property (copy) ANTLRMap *fNext;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger lastHash;
-
-// Contruction/Destruction
-+ (id)newANTLRMap;
-+ (id)newANTLRMapWithLen:(NSInteger)aHashSize;
-
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-// Instance Methods
-- (NSInteger)count;
-- (NSInteger)length;
-- (NSInteger)size;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-/* form hash value for string s */
--(NSInteger)hash:(NSString *)s;
-/*   look for s in ptrBuffer  */
--(id)lookup:(NSString *)s;
-/* look for s in ptrBuffer  */
--(id)install:(ANTLRMapElement *)sym;
-/*
- * delete entry from list
- */
-- (void)deleteANTLRMap:(ANTLRMapElement *)np;
-- (NSInteger)RemoveSym:(NSString *)s;
-- (void)delete_chain:(ANTLRMapElement *)np;
-- (ANTLRMapElement *)getTType:(NSString *)name;
-- (ANTLRMapElement *)getName:(NSInteger)ttype;
-- (NSInteger)getNode:(ANTLRMapElement *)aNode;
-- (void)putNode:(NSInteger)aTType Node:(id)aNode;
-- (void)putName:(NSString *)name TType:(NSInteger)ttype;
-- (void)putName:(NSString *)name Node:(id)aNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMapElement.h
deleted file mode 100644
index e20d01c..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMapElement.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//
-//  ANTLRMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-
-@interface ANTLRMapElement : ANTLRBaseMapElement {
-    NSString *name;
-    id        node;
-}
-@property (retain, getter=getName, setter=setName:) NSString *name;
-@property (retain, getter=getNode, setter=setNode:) id node;
-
-+ (id) newANTLRMapElement;
-+ (id) newANTLRMapElementWithName:(NSString *)aName Type:(NSInteger)aTType;
-+ (id) newANTLRMapElementWithNode:(NSInteger)aTType Node:(id)aNode;
-+ (id) newANTLRMapElementWithName:(NSString *)aName Node:(id)aNode;
-+ (id) newANTLRMapElementWithObj1:(id)anObj1 Obj2:(id)anObj2;
-- (id) init;
-- (id) initWithName:(NSString *)aName Type:(NSInteger)aTType;
-- (id) initWithNode:(NSInteger)aTType Node:(id)aNode;
-- (id) initWithName:(NSString *)aName Node:(id)aNode;
-- (id) initWithObj1:(id)anObj1 Obj2:(id)anObj2;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSInteger) count;
-- (NSInteger) size;
-- (NSString *)getName;
-- (void)setName:(NSString *)aName;
-- (id)getNode;
-- (void)setNode:(id)aNode;
-- (void)putNode:(id)aNode;
-- (void)putNode:(id)aNode With:(NSInteger)uniqueID;
-//- (void)setObject:(id)aNode atIndex:anIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedNotSetException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedNotSetException.h
deleted file mode 100644
index 57391d5..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedNotSetException.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//
-//  ANTLRMismatchedNotSetException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/13/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRBitSet.h"
-
-@interface ANTLRMismatchedNotSetException : ANTLRRecognitionException
-{
-    ANTLRBitSet *expecting;
-}
-@property (retain, getter=getExpecting, setter=setExpecting) ANTLRBitSet *expecting;
-
-- (ANTLRMismatchedNotSetException *)newANTLRMismatchedNotSetException;
-- (ANTLRMismatchedNotSetException *)newANTLRMismatchedNotSetException:(id<ANTLRIntStream>)anInput
-                                                               Follow:(ANTLRBitSet *)expecting;
-
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)expecting;
-
-- (NSString *)toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedRangeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedRangeException.h
deleted file mode 100755
index abda3bb..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedRangeException.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRIntStream;
-
-
-@interface ANTLRMismatchedRangeException : ANTLRRecognitionException {
-	NSRange range;
-}
-
-+ (id) exceptionWithRange:(NSRange) aRange stream:(id<ANTLRIntStream>) theInput;
-- (id) initWithRange:(NSRange) aRange stream:(id<ANTLRIntStream>) theInput;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedSetException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedSetException.h
deleted file mode 100755
index 3bd45fc..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedSetException.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLRMismatchedSetException : ANTLRRecognitionException {
-	NSSet *expecting;
-}
-
-@property (retain, getter=getExpecting, setter=setExpecting:) NSSet *expecting;
-
-+ (id) exceptionWithSet:(NSSet *) theExpectedSet stream:(id<ANTLRIntStream>) theStream;
-- (id) initWithSet:(NSSet *) theExpectedSet stream:(id<ANTLRIntStream>) theStream;
-
-- (NSSet *) getExpecting;
-- (void) setExpecting: (NSSet *) anExpectedSet;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedTokenException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedTokenException.h
deleted file mode 100755
index 5e1d77d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedTokenException.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRBitSet.h"
-
-@protocol ANTLRIntStream;
-
-@interface ANTLRMismatchedTokenException : ANTLRRecognitionException {
-	NSInteger expecting;
-	unichar expectingChar;
-	BOOL isTokenType;
-}
-
-@property (assign, getter=getExpecting, setter=setExpecting:) NSInteger expecting;
-@property (assign, getter=getExpectingChar, setter=setExpectingChar:) unichar expectingChar;
-@property (assign, getter=getIsTokenType, setter=setIsTokenType:) BOOL isTokenType;
-
-+ (id) newANTLRMismatchedTokenException:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-+ (id) newANTLRMismatchedTokenExceptionMissing:(NSInteger)expectedTokenType
-                                        Stream:(id<ANTLRIntStream>)anInput
-                                         Token:(id<ANTLRToken>)inserted;
-+ (id) newANTLRMismatchedTokenExceptionChar:(unichar)expectedCharacter Stream:(id<ANTLRIntStream>)anInput;
-+ (id) newANTLRMismatchedTokenExceptionStream:(id<ANTLRIntStream>)anInput
-                                    Exception:(NSException *)e
-                                       Follow:(ANTLRBitSet *)follow;
-- (id) initWithTokenType:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
--(id) initWithTokenType:(NSInteger)expectedTokenType
-                 Stream:(id<ANTLRIntStream>)anInput
-                  Token:(id<ANTLRToken>)inserted;
-- (id) initWithCharacter:(unichar)expectedCharacter Stream:(id<ANTLRIntStream>)anInput;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedTreeNodeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedTreeNodeException.h
deleted file mode 100755
index b61ab51..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMismatchedTreeNodeException.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRIntStream;
-
-@interface ANTLRMismatchedTreeNodeException : ANTLRRecognitionException {
-	NSInteger expecting;
-}
-
-@property (getter=getExpecting, setter=setExpecting) NSInteger expecting;
-
-+ (id) newANTLRMismatchedTreeNodeException:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-- (id) initWithTokenType:(NSInteger) expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMissingTokenException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMissingTokenException.h
deleted file mode 100644
index 1398e25..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRMissingTokenException.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//
-//  ANTLRMissingTokenException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRToken.h"
-
-@interface ANTLRMissingTokenException : ANTLRMismatchedTokenException {
-    id<ANTLRToken> inserted;
-}
-/** Used for remote debugger deserialization */
-+ (id) newANTLRMissingTokenException;
-+ (id) newANTLRMissingTokenException:(NSInteger)expected
-                              Stream:(id<ANTLRIntStream>)anInput
-                                With:(id<ANTLRToken>)insertedToken;
-- (id) init;
-- (id) init:(NSInteger)expected Stream:(id<ANTLRIntStream>)anInput With:(id<ANTLRToken>)insertedToken;
-
-- (NSInteger) getMissingType;
-
-- (NSString *)toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRNoViableAltException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRNoViableAltException.h
deleted file mode 100755
index b71baff..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRNoViableAltException.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRIntStream.h"
-
-@interface ANTLRNoViableAltException : ANTLRRecognitionException {
-	int decisionNumber;
-	int stateNumber;
-}
-
-+ (ANTLRNoViableAltException *) newANTLRNoViableAltException:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<ANTLRIntStream>)theStream;
-- (ANTLRNoViableAltException *) initWithDecision:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<ANTLRIntStream>)theStream;
-
-- (void)setDecisionNumber:(NSInteger)decisionNumber;
-- (void)setStateNumber:(NSInteger)stateNumber;
-
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRNodeMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRNodeMapElement.h
deleted file mode 100644
index 1c0c916..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRNodeMapElement.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//
-//  ANTLRRuleMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-#import "ANTLRTree.h"
-
-@interface ANTLRNodeMapElement : ANTLRBaseMapElement {
-    id<ANTLRTree> node;
-}
-
-@property (retain, getter=getNode, setter=setNode:) id node;
-
-+ (void)initialize;
-
-+ (id) newANTLRNodeMapElement;
-+ (id) newANTLRNodeMapElementWithIndex:(id)anIndex Node:(id<ANTLRTree>)aNode;
-- (id) init;
-- (id) initWithAnIndex:(id)anIndex Node:(id)aNode;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id<ANTLRTree>)getNode;
-- (void)setNode:(id<ANTLRTree>)aNode;
-
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRParseTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRParseTree.h
deleted file mode 100644
index 92554e3..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRParseTree.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-//  ANTLRParseTree.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/12/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseTree.h"
-#import "ANTLRCommonToken.h"
-
-@interface ANTLRParseTree : ANTLRBaseTree {
-	id<ANTLRToken> payload;
-	NSMutableArray *hiddenTokens;
-}
-/** A record of the rules used to match a token sequence.  The tokens
- *  end up as the leaves of this tree and rule nodes are the interior nodes.
- *  This really adds no functionality, it is just an alias for CommonTree
- *  that is more meaningful (specific) and holds a String to display for a node.
- */
-+ (ANTLRParseTree *)newANTLRParseTree:(id<ANTLRToken>)label;
-- (id)initWithLabel:(id<ANTLRToken>)label;
-
-- (id<ANTLRTree>)dupNode;
-- (NSInteger)getType;
-- (NSString *)getText;
-- (NSInteger)getTokenStartIndex;
-- (void)setTokenStartIndex:(NSInteger)index;
-- (NSInteger)getTokenStopIndex;
-- (void)setTokenStopIndex:(NSInteger)index;
-- (NSString *)toString;
-- (NSString *)toStringWithHiddenTokens;
-- (NSString *)toInputString;
-- (void)_toStringLeaves:(NSMutableString *)buf;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRParser.h
deleted file mode 100755
index 5ddaf50..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRParser.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRTokenStream.h"
-
-@interface ANTLRParser : ANTLRBaseRecognizer {
-	id<ANTLRTokenStream> input;
-}
-+ (ANTLRParser *)newANTLRParser:(id<ANTLRTokenStream>)anInput;
-+ (ANTLRParser *)newANTLRParser:(id<ANTLRTokenStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream;
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream State:(ANTLRRecognizerSharedState *)aState;
-
-- (id<ANTLRTokenStream>) getInput;
-- (void) setInput: (id<ANTLRTokenStream>) anInput;
-
-- (void) reset;
-
-- (id) getCurrentInputSymbol:(id<ANTLRTokenStream>)anInput;
-- (ANTLRCommonToken *)getMissingSymbol:(id<ANTLRTokenStream>)input
-                             Exception:(ANTLRRecognitionException *)e
-                                 TType:(NSInteger)expectedTokenType
-                                BitSet:(ANTLRBitSet *)follow;
-- (void) setTokenStream:(id<ANTLRTokenStream>)anInput;
-- (id<ANTLRTokenStream>)getTokenStream;
-- (NSString *)getSourceName;
-
-- (void) traceIn:(NSString *)ruleName Index:(int)ruleIndex;
-- (void) traceOut:(NSString *)ruleName Index:(NSInteger) ruleIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRParserRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRParserRuleReturnScope.h
deleted file mode 100755
index aef3dd0..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRParserRuleReturnScope.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRRuleReturnScope.h"
-
-@interface ANTLRParserRuleReturnScope : ANTLRRuleReturnScope {
-	id<ANTLRToken> startToken;
-	id<ANTLRToken> stopToken;
-}
-@property (retain, getter=getStart, setter=setStart:) id<ANTLRToken> startToken;
-@property (retain, getter=getStop, setter=setStop:)   id<ANTLRToken> stopToken;
-
-- (id<ANTLRToken>) getStart;
-- (void) setStart: (id<ANTLRToken>) aStart;
-
-- (id<ANTLRToken>) getStop;
-- (void) setStop: (id<ANTLRToken>) aStop;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRPtrBuffer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRPtrBuffer.h
deleted file mode 100644
index 188f597..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRPtrBuffer.h
+++ /dev/null
@@ -1,91 +0,0 @@
-//
-//  ANTLRPtrBuffer.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define BUFFSIZE         101
-
-@interface ANTLRPtrBuffer : ANTLRLinkBase {
-	//ANTLRPtrBuffer *fNext;
-    NSInteger BuffSize;
-    NSMutableData *buffer;
-    id *ptrBuffer;
-    NSInteger count;
-    NSInteger ptr;
-}
-
-@property (getter=getBuffSize, setter=setBuffSize:) NSInteger BuffSize;
-@property (retain, getter=getBuffer, setter=setBuffer:) NSMutableData *buffer;
-@property (retain, getter=getPtrBuffer, setter=setPtrBuffer:) id *ptrBuffer;
-@property (getter=getCount, setter=setCount:) NSInteger count;
-@property (getter=getPtr, setter=setPtr:) NSInteger ptr;
-
-// Contruction/Destruction
-+(ANTLRPtrBuffer *)newANTLRPtrBuffer;
-+(ANTLRPtrBuffer *)newANTLRPtrBufferWithLen:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-
-- (NSInteger)count;
-- (NSInteger)length;
-- (NSInteger)size;
-
-- (NSMutableData *)getBuffer;
-- (void)setBuffer:(NSMutableData *)np;
-- (NSInteger)getCount;
-- (void)setCount:(NSInteger)aCount;
-- (id *)getPtrBuffer;
-- (void)setPtrBuffer:(id *)np;
-- (NSInteger)getPtr;
-- (void)setPtr:(NSInteger)np;
-
-- (void) push:(id) v;
-- (id) pop;
-- (id) peek;
-
-- (void) addObject:(id) v;
-- (void) addObjectsFromArray:(ANTLRPtrBuffer *)anArray;
-- (void) insertObject:(id)aRule atIndex:(NSInteger)idx;
-- (id)   objectAtIndex:(NSInteger)idx;
-- (void) removeAllObjects;
-
-- (void) ensureCapacity:(NSInteger) index;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRecognitionException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRecognitionException.h
deleted file mode 100755
index 853dc0e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRecognitionException.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuntimeException.h"
-#import "ANTLRToken.h"
-#import "ANTLRIntStream.h"
-#import "ANTLRTree.h"
-
-@interface ANTLRRecognitionException : ANTLRRuntimeException {
-	id<ANTLRIntStream> input;
-	NSInteger index;
-	id<ANTLRToken> token;
-	id<ANTLRTree> node;
-	unichar c;
-	NSInteger line;
-	NSInteger charPositionInLine;
-}
-
-@property (retain, getter=getStream, setter=setStream:) id<ANTLRIntStream> input;
-@property (retain, getter=getToken, setter=setToken:) id<ANTLRToken>token;
-@property (retain, getter=getNode, setter=setNode:) id<ANTLRTree>node;
-@property (getter=getLine, setter=setLine:) NSInteger line;
-@property (getter=getCharPositionInLine, setter=setCharPositionInLine:) NSInteger charPositionInLine;
-
-+ (ANTLRRecognitionException *) newANTLRRecognitionException;
-+ (ANTLRRecognitionException *) exceptionWithStream:(id<ANTLRIntStream>) anInputStream; 
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream reason:(NSString *)aReason;
-- (NSInteger) unexpectedType;
-- (id<ANTLRToken>)getUnexpectedToken;
-
-- (id<ANTLRIntStream>) getStream;
-- (void) setStream: (id<ANTLRIntStream>) aStream;
-
-- (id<ANTLRToken>) getToken;
-- (void) setToken: (id<ANTLRToken>) aToken;
-
-- (id<ANTLRTree>) getNode;
-- (void) setNode: (id<ANTLRTree>) aNode;
-
-- (NSString *)getMessage;
-
-- (NSInteger)getCharPositionInLine;
-- (void)setCharPositionInLine:(NSInteger)aPos;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRecognizerSharedState.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRecognizerSharedState.h
deleted file mode 100755
index 0430b79..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRecognizerSharedState.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRRuleStack.h"
-
-@interface ANTLRRecognizerSharedState : NSObject {
-	NSMutableArray *following;          // a stack of FOLLOW bitsets used for context sensitive prediction and recovery
-    NSInteger _fsp;                     // Follow stack pointer
-	BOOL errorRecovery;                 // are we recovering?
-	NSInteger lastErrorIndex;
-	BOOL failed;                        // indicate that some match failed
-    NSInteger syntaxErrors;
-	NSInteger backtracking;             // the level of backtracking
-	ANTLRRuleStack *ruleMemo;			// store previous results of matching rules so we don't have to do it again. Hook in incremental stuff here, too.
-
-	id<ANTLRToken> token;
-	NSInteger  tokenStartCharIndex;
-	NSUInteger tokenStartLine;
-	NSUInteger tokenStartCharPositionInLine;
-	NSUInteger channel;
-	NSUInteger type;
-	NSString   *text;
-}
-
-@property (retain, getter=getFollowing, setter=setFollowing:) NSMutableArray *following;
-@property (assign) NSInteger _fsp;
-@property (assign) BOOL errorRecovery;
-@property (assign) NSInteger lastErrorIndex;
-@property (assign, getter=getFailed, setter=setFailed:) BOOL failed;
-@property (assign) NSInteger syntaxErrors;
-@property (assign, getter=getBacktracking, setter=setBacktracking) NSInteger backtracking;
-@property (retain, getter=getRuleMemo, setter=setRuleMemo:) ANTLRRuleStack *ruleMemo;
-@property (copy, getter=getToken, setter=setToken) id<ANTLRToken> token;
-@property (getter=getType,setter=setType:) NSUInteger type;
-@property (getter=getChannel,setter=setChannel:) NSUInteger channel;
-@property (getter=getTokenStartLine,setter=setTokenStartLine:) NSUInteger tokenStartLine;
-@property (getter=getCharPositionInLine,setter=setCharPositionInLine:) NSUInteger tokenStartCharPositionInLine;
-@property (getter=getTokenStartCharIndex,setter=setTokenStartCharIndex:) NSInteger tokenStartCharIndex;
-@property (retain, getter=getText, setter=setText) NSString *text;
-
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedState;
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedStateWithRuleLen:(NSInteger)aLen;
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedState:(ANTLRRecognizerSharedState *)aState;
-
-- (id) init;
-- (id) initWithRuleLen:(NSInteger)aLen;
-- (id) initWithState:(ANTLRRecognizerSharedState *)state;
-
-- (id<ANTLRToken>) getToken;
-- (void) setToken:(id<ANTLRToken>) theToken;
-
-- (NSUInteger) getType;
-- (void) setType:(NSUInteger) theTokenType;
-
-- (NSUInteger) getChannel;
-- (void) setChannel:(NSUInteger) theChannel;
-
-- (NSUInteger) getTokenStartLine;
-- (void) setTokenStartLine:(NSUInteger) theTokenStartLine;
-
-- (NSUInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSUInteger) theCharPosition;
-
-- (NSInteger) getTokenStartCharIndex;
-- (void) setTokenStartCharIndex:(NSInteger) theTokenStartCharIndex;
-
-- (NSString *) getText;
-- (void) setText:(NSString *) theText;
-
-
-- (NSMutableArray *) getFollowing;
-- (void)setFollowing:(NSMutableArray *)aFollow;
-- (ANTLRRuleStack *) getRuleMemo;
-- (void)setRuleMemo:(ANTLRRuleStack *)aRuleMemo;
-- (BOOL) isErrorRecovery;
-- (void) setIsErrorRecovery: (BOOL) flag;
-
-- (BOOL) getFailed;
-- (void) setFailed: (BOOL) flag;
-
-- (NSInteger)  getBacktracking;
-- (void) setBacktracking:(NSInteger) value;
-- (void) increaseBacktracking;
-- (void) decreaseBacktracking;
-- (BOOL) isBacktracking;
-
-- (NSInteger) lastErrorIndex;
-- (void) setLastErrorIndex:(NSInteger) value;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRewriteRuleElementStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRewriteRuleElementStream.h
deleted file mode 100755
index 132a0cc..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRewriteRuleElementStream.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-
-typedef union {
-    id single;
-    NSMutableArray *multiple;
-} Elements;
-
-// TODO: this should be separated into stream and enumerator classes
-@interface ANTLRRewriteRuleElementStream : NSObject {
-    NSInteger cursor;
-    BOOL dirty;        ///< indicates whether the stream should return copies of its elements, set to true after a call to -reset
-    BOOL isSingleElement;
-    Elements elements;
-    
-    NSString *elementDescription;
-    id<ANTLRTreeAdaptor> treeAdaptor;
-}
-
-@property (assign, getter=GetCursor, setter=SetCursor:) NSInteger cursor;
-@property (assign, getter=Getdirty, setter=Setdirty:) BOOL dirty;
-@property (assign, getter=GetIsSingleElement, setter=SetIsSingleElement:) BOOL isSingleElement;
-@property (assign, getter=GetElement, setter=SetElement:) Elements elements;
-@property (assign, getter=GetElementDescription, setter=SetElementDescription:) NSString *elementDescription;
-@property (retain, getter=GetTreeAdaptor, setter=SetTreeAdaptor:) id<ANTLRTreeAdaptor> treeAdaptor;
-
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription;
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription
-                                                            element:(id)anElement;
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription
-                                                           elements:(NSArray *)theElements;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
-
-- (void)reset;
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor;
-
-- (void) addElement:(id)anElement;
-- (NSInteger) size;
- 
-- (BOOL) hasNext;
-- (id<ANTLRTree>) nextTree;
-- (id<ANTLRTree>) _next;       // internal: TODO: redesign if necessary. maybe delegate
-
-- (id) copyElement:(id)element;
-- (id) toTree:(id)element;
-
-- (NSString *) getDescription;
-- (void) setDescription:(NSString *)description;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRewriteRuleSubtreeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRewriteRuleSubtreeStream.h
deleted file mode 100755
index 1d18b24..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRewriteRuleSubtreeStream.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRewriteRuleElementStream.h"
-
-@interface ANTLRRewriteRuleSubtreeStream : ANTLRRewriteRuleElementStream {
-
-}
-
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription;
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription
-                                                             element:(id)anElement;
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription
-                                                            elements:(NSArray *)theElements;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
-
-- (id) nextNode;
-- (id) dup:(id)element;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRewriteRuleTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRewriteRuleTokenStream.h
deleted file mode 100755
index 3a516de..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRewriteRuleTokenStream.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRewriteRuleElementStream.h"
-
-
-@interface ANTLRRewriteRuleTokenStream : ANTLRRewriteRuleElementStream {
-
-}
-
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)anAdaptor
-                          description:(NSString *)elementDescription;
-/** Create a stream with one element */
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)adaptor
-                          description:(NSString *)elementDescription
-                              element:(id) oneElement;
-/** Create a stream, but feed off an existing list */
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)adaptor
-                          description:(NSString *)elementDescription
-                             elements:(NSMutableArray *)elements;
-
-- (id) init;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-               description:(NSString *)aDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor 
-               description:(NSString *)aDescription
-                   element:(id)element;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-               description:(NSString *)aDescription
-                  elements:(NSMutableArray *)elements;
-                               
-/** Get next token from stream and make a node for it */
-- (id) nextNode;
-
-- (id) nextToken;
-
-/** Don't convert to a tree unless they explicitly call nextTree.
- *  This way we can do hetero tree nodes in rewrite.
- */
-- (id<ANTLRTree>) toTree:(id<ANTLRToken>)element;
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleMapElement.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleMapElement.h
deleted file mode 100644
index e040b18..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleMapElement.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-//  ANTLRRuleMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-
-@interface ANTLRRuleMapElement : ANTLRBaseMapElement {
-    NSNumber *ruleNum;
-}
-
-@property (retain, getter=getRuleNum, setter=setRuleNum:) NSNumber *ruleNum;
-
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElement;
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElementWithIndex:(NSNumber *)anIdx;
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElementWithIndex:(NSNumber *)anIdx RuleNum:(NSNumber *)aRuleNum;
-- (id) init;
-- (id) initWithAnIndex:(NSNumber *)anIdx;
-- (id) initWithAnIndex:(NSNumber *)anIdx RuleNum:(NSNumber *)aRuleNum;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSNumber *)getRuleNum;
-- (void)setRuleNum:(NSNumber *)aRuleNum;
-
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleMemo.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleMemo.h
deleted file mode 100644
index 63a5ae2..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleMemo.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-//  ANTLRRuleMemo.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-@interface ANTLRRuleMemo : ANTLRLinkBase {
-    NSNumber *startIndex;
-    NSNumber *stopIndex;
-}
-
-@property (retain, getter=getStartIndex, setter=setStartIndex) NSNumber *startIndex;
-@property (retain, getter=getStopIndex, setter=setStopIndex) NSNumber *stopIndex;
-
-+ (ANTLRRuleMemo *)newANTLRRuleMemo;
-+ (ANTLRRuleMemo *)newANTLRRuleMemoWithStartIndex:(NSNumber *)aStartIndex StopIndex:(NSNumber *)aStopIndex;
-
-- (id) init;
-- (id) initWithStartIndex:(NSNumber *)aStartIndex StopIndex:(NSNumber *)aStopIndex;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-- (ANTLRRuleMemo *)getRuleWithStartIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStartIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStopIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStartIndex;
-- (void)setStartIndex:(NSNumber *)aStartIndex;
-- (NSNumber *)getStopIndex;
-- (void)setStopIndex:(NSNumber *)aStopIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleReturnScope.h
deleted file mode 100644
index 4750c16..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleReturnScope.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-//  ANTLRRuleReturnScope.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-
-@interface ANTLRRuleReturnScope : NSObject <NSCopying> {
-
-}
-
-/** Return the start token or tree */
-- (id<ANTLRToken>) getStart;
-
-/** Return the stop token or tree */
-- (id<ANTLRToken>) getStop;
-
-/** Has a value potentially if output=AST; */
-- (id) getNode;
-
-/** Has a value potentially if output=template; Don't use StringTemplate
- *  type as it then causes a dependency with ST lib.
- */
-- (id) getTemplate;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleStack.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleStack.h
deleted file mode 100644
index 12d450b..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleStack.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-//  ANTLRRuleStack.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseStack.h"
-#import "ANTLRHashRule.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRRuleStack : ANTLRBaseStack {
-}
-
-// Contruction/Destruction
-+(ANTLRRuleStack *)newANTLRRuleStack;
-+(ANTLRRuleStack *)newANTLRRuleStack:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-- (ANTLRHashRule *) pop;
-
-- (void) insertObject:(ANTLRHashRule *)aHashRule atIndex:(NSInteger)idx;
-- (ANTLRHashRule *)objectAtIndex:(NSInteger)idx;
-- (void)putHashRuleAtRuleIndex:(NSInteger)aRuleIndex StartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleStack.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleStack.m
deleted file mode 100644
index 909192f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuleStack.m
+++ /dev/null
@@ -1,147 +0,0 @@
-//
-//  ANTLRRuleStack.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRRuleStack.h"
-#import "ANTLRTree.h"
-
-/*
- * Start of ANTLRRuleStack
- */
-@implementation ANTLRRuleStack
-
-+ (ANTLRRuleStack *)newANTLRRuleStack
-{
-    return [[ANTLRRuleStack alloc] init];
-}
-
-+ (ANTLRRuleStack *)newANTLRRuleStack:(NSInteger)cnt
-{
-    return [[ANTLRRuleStack alloc] initWithLen:cnt];
-}
-
-- (id)init
-{
-	if ((self = [super init]) != nil) {
-	}
-    return( self );
-}
-
-- (id)initWithLen:(NSInteger)cnt
-{
-	if ((self = [super initWithLen:cnt]) != nil) {
-	}
-    return( self );
-}
-
-- (void)dealloc
-{
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    return [super copyWithZone:aZone];
-}
-
-- (NSInteger)count
-{
-    ANTLRRuleMemo *anElement;
-    NSInteger aCnt = 0;
-    for( int i = 0; i < BuffSize; i++ ) {
-        if ((anElement = ptrBuffer[i]) != nil)
-            aCnt++;
-    }
-    return aCnt;
-}
-
-- (NSInteger)size
-{
-    ANTLRRuleMemo *anElement;
-    NSInteger aSize = 0;
-    for( int i = 0; i < BuffSize; i++ ) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aSize++;
-        }
-    }
-    return aSize;
-}
-
-- (ANTLRHashRule *)pop
-{
-    return (ANTLRHashRule *)[super pop];
-}
-
-- (void) insertObject:(ANTLRHashRule *)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        NSLog( @"In ANTLRRuleStack attempting to insert aRule at Index %d, but Buffer is only %d long\n", idx, BuffSize );
-        [self ensureCapacity:idx];
-    }
-    if ( aRule != ptrBuffer[idx] ) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (ANTLRHashRule *)objectAtIndex:(NSInteger)idx
-{
-    if (idx < BuffSize) {
-        return ptrBuffer[idx];
-    }
-    return nil;
-}
-
-- (void)putHashRuleAtRuleIndex:(NSInteger)aRuleIndex StartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex
-{
-    ANTLRHashRule *aHashRule;
-    ANTLRRuleMemo *aRuleMemo;
-
-    if (aRuleIndex >= BuffSize) {
-        NSLog( @"putHashRuleAtRuleIndex attempting to insert aRule at Index %d, but Buffer is only %d long\n", aRuleIndex, BuffSize );
-        [self ensureCapacity:aRuleIndex];
-    }
-    if ((aHashRule = ptrBuffer[aRuleIndex]) == nil) {
-        aHashRule = [[ANTLRHashRule newANTLRHashRuleWithLen:17] retain];
-        ptrBuffer[aRuleIndex] = aHashRule;
-    }
-    if (( aRuleMemo = [aHashRule objectAtIndex:aStartIndex] ) == nil ) {
-        aRuleMemo = [[ANTLRRuleMemo newANTLRRuleMemo] retain];
-        [aHashRule insertObject:aRuleMemo atIndex:aStartIndex];
-    }
-    [aRuleMemo setStartIndex:[NSNumber numberWithInteger:aStartIndex]];
-    [aRuleMemo setStopIndex:[NSNumber numberWithInteger:aStopIndex]];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuntimeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuntimeException.h
deleted file mode 100644
index 6cf0918..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRRuntimeException.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-//  ANTLRRuntimeException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/5/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-@interface ANTLRRuntimeException : NSException
-{
-}
-
-+ (ANTLRRuntimeException *) newANTLRNoSuchElementException:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRIllegalArgumentException:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRRuntimeException:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRRuntimeException:(NSString *)aName reason:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newANTLRRuntimeException:(NSString *)aName reason:(NSString *)aReason userInfo:aUserInfo;
-
-- (id) init;
-- (id)initWithRuntime:(NSString *)aReason;
-- (id)initWithReason:(NSString *)aReason;
-- (id)initWithName:(NSString *)aName reason:(NSString *)aReason;
-- (id)initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-- (NSString *) Description;
-
-//    - (void)setDecisionNumber:(NSInteger)decisionNumber;
-//    - (void)setStateNumber:(NSInteger)stateNumber;
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRStreamEnumerator.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRStreamEnumerator.h
deleted file mode 100644
index a0e0f69..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRStreamEnumerator.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-//  ANTLRStreamEnumertor.h
-//  ANTLR
-//
-//  Created by Ian Michell on 29/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRStreamEnumerator : NSEnumerator 
-{
-	NSInteger i;
-	id eof;
-	NSMutableArray *nodes;
-}
-
--(id) initWithNodes:(NSMutableArray *) n andEOF:(id) o;
--(BOOL) hasNext;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRStringStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRStringStream.h
deleted file mode 100755
index 2b13c7d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRStringStream.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCharStream.h"
-#import "ANTLRCharStreamState.h"
-#import "ANTLRPtrBuffer.h"
-
-@interface ANTLRStringStream : NSObject < ANTLRCharStream > {
-	NSString *data;
-	NSInteger n;
-	NSInteger p;
-	NSInteger line;
-	NSInteger charPositionInLine;
-	NSInteger markDepth;
-	ANTLRPtrBuffer *markers;
-	NSInteger lastMarker;
-	NSString *name;
-    ANTLRCharStreamState *charState;
-}
-
-@property (retain, getter=getData,setter=setData:) NSString *data;
-@property (getter=getP,setter=setP:) NSInteger p;
-@property (getter=getN,setter=setN:) NSInteger n;
-@property (getter=getLine,setter=setLine:) NSInteger line;
-@property (getter=getCharPositionInLine,setter=setCharPositionInLine:) NSInteger charPositionInLine;
-@property (getter=getMarkDepth,setter=setMarkDepth:) NSInteger markDepth;
-@property (retain, getter=getMarkers, setter=setMarkers:) ANTLRPtrBuffer *markers;
-@property (getter=getLastMarker,setter=setLastMarker:) NSInteger lastMarker;
-@property (retain, getter=getSourceName, setter=setSourceName:) NSString *name;
-@property (retain, getter=getCharState, setter=setCharState:) ANTLRCharStreamState *charState;
-
-+ newANTLRStringStream;
-
-+ newANTLRStringStream:(NSString *)aString;
-
-+ newANTLRStringStream:(char *)myData Count:(NSInteger)numBytes;
-
-- (id) init;
-
-// this initializer copies the string
-- (id) initWithString:(NSString *) theString;
-
-// This is the preferred constructor as no data is copied
-- (id) initWithStringNoCopy:(NSString *) theString;
-
-- (id) initWithData:(char *)myData Count:(NSInteger)numBytes;
-
-- (void) dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-// reset the stream's state, but keep the data to feed off
-- (void) reset;
-// consume one character from the stream
-- (void) consume;
-
-// look ahead i characters
-- (NSInteger) LA:(NSInteger) i;
-- (NSInteger) LT:(NSInteger) i;
-
-// returns the position of the current input symbol
-- (NSInteger) getIndex;
-// total length of the input data
-- (NSInteger) size;
-
-// seek and rewind in the stream
-- (NSInteger) mark;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) release:(NSInteger) marker;
-- (void) seek:(NSInteger) index;
-
-// provide the streams data (e.g. for tokens using indices)
-- (NSString *) substring:(NSInteger)startIndex To:(NSInteger)stopIndex;
-- (NSString *) substringWithRange:(NSRange) theRange;
-
-// used for tracking the current position in the input stream
-- (NSInteger) getLine;
-- (void) setLine:(NSInteger) theLine;
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger) thePos;
-
-- (NSInteger) getN;
-- (void) setN:(NSInteger)num;
-
-- (NSInteger) getP;
-- (void) setP:(NSInteger)num;
-
-- (ANTLRPtrBuffer *)getMarkers;
-- (void) setMarkers:(ANTLRPtrBuffer *)aMarkerList;
-
-- (NSString *)getSourceName;
-
-- (NSString *)toString;
-
-// accessors to the raw data of this stream
-- (NSString *) getData;
-- (void) setData: (NSString *) aData;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRSymbolStack.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRSymbolStack.h
deleted file mode 100644
index 169df9f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRSymbolStack.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//
-//  ANTLRSymbolStack.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseStack.h"
-// #import "ANTLRSymbolScope.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRSymbolsScope : NSObject
-{
-    
-}
-
-+ (ANTLRSymbolsScope *)newANTLRSymbolsScope;
-
-- (id)init;
-@end
-
-
-@interface ANTLRSymbolStack : ANTLRBaseStack {
-}
-
-// Contruction/Destruction
-+(ANTLRSymbolStack *)newANTLRSymbolStack;
-+(ANTLRSymbolStack *)newANTLRSymbolStackWithLen:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-
--(ANTLRSymbolsScope *)getHashMapEntry:(NSInteger)idx;
-
--(ANTLRSymbolsScope **)getHashMap;
-
--(ANTLRSymbolsScope *) pop;
-
-- (void) insertObject:(ANTLRSymbolsScope *)aScope atIndex:(NSInteger)idx;
-- (ANTLRSymbolsScope *)objectAtIndex:(NSInteger)idx;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRSymbolStack.m b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRSymbolStack.m
deleted file mode 100644
index 1dd6775..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRSymbolStack.m
+++ /dev/null
@@ -1,123 +0,0 @@
-//
-//  ANTLRSymbolStack.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRSymbolStack.h"
-#import "ANTLRTree.h"
-
-
-@implementation ANTLRSymbolsScope
-
-+ (ANTLRSymbolsScope *)newANTLRSymbolsScope
-{
-    return( [[ANTLRSymbolsScope alloc] init] );
-}
-
-- (id)init
-{
-    if ((self = [super init]) != nil) {
-    }
-    return (self);
-}
-
-@end
-
-/*
- * Start of ANTLRSymbolStack
- */
-@implementation ANTLRSymbolStack
-
-+(ANTLRSymbolStack *)newANTLRSymbolStack
-{
-    return [[ANTLRSymbolStack alloc] init];
-}
-
-+(ANTLRSymbolStack *)newANTLRSymbolStackWithLen:(NSInteger)cnt
-{
-    return [[ANTLRSymbolStack alloc] initWithLen:cnt];
-}
-
--(id)init
-{
-	if ((self = [super init]) != nil) {
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)cnt
-{
-	if ((self = [super initWithLen:cnt]) != nil) {
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    return [super copyWithZone:aZone];
-}
-
--(ANTLRSymbolsScope *)getHashMapEntry:(NSInteger)idx
-{
-	return( (ANTLRSymbolsScope *)[super objectAtIndex:idx] );
-}
-
--(ANTLRSymbolsScope **)getHashMap
-{
-	return( (ANTLRSymbolsScope **)ptrBuffer );
-}
-
--(ANTLRSymbolsScope *) pop
-{
-    return (ANTLRSymbolsScope *)[super pop];
-}
-
-- (void) insertObject:(ANTLRSymbolsScope *)aRule atIndex:(NSInteger)idx
-{
-    if (aRule != ptrBuffer[idx]) {
-        if (ptrBuffer[idx] != nil) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (ANTLRSymbolsScope *)objectAtIndex:(NSInteger)idx
-{
-    return (ANTLRSymbolsScope *)[super objectAtIndex:idx];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRToken+DebuggerSupport.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRToken+DebuggerSupport.h
deleted file mode 100755
index 659e763..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRToken+DebuggerSupport.h
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-//  ANTLRToken+DebuggerSupport.h
-//  ANTLR
-//
-//  Created by Kay Röpke on 03.12.2006.
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-
-@interface ANTLRCommonToken(DebuggerSupport)
-
-- (NSString *)debuggerDescription;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRToken.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRToken.h
deleted file mode 100755
index 64524f0..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRToken.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-
-typedef enum {
-	ANTLRTokenTypeEOF = -1,
-	ANTLRTokenTypeInvalid,
-	ANTLRTokenTypeEOR,
-	ANTLRTokenTypeDOWN,
-	ANTLRTokenTypeUP,
-	ANTLRTokenTypeMIN
-} ANTLRTokenType;
-
-typedef enum {
-	ANTLRTokenChannelDefault = 0,
-    ANTLRTokenChannelHidden = 99
-} ANTLRTokenChannel;
-
-#define HIDDEN 99
-
-@protocol ANTLRToken < NSObject, NSCopying >
-
-// The singleton eofToken instance.
-+ (id<ANTLRToken>) eofToken;
-// The default channel for this class of Tokens
-+ (ANTLRTokenChannel) defaultChannel;
-
-// provide hooks to explicitely set the text as opposed to use the indices into the CharStream
-- (NSString *) getText;
-- (void) setText:(NSString *) theText;
-
-- (NSInteger) getType;
-- (void) setType: (NSInteger) aType;
-
-// ANTLR v3 provides automatic line and position tracking. Subclasses do not need to
-// override these, if they do not want to store line/pos tracking information
-- (NSUInteger) getLine;
-- (void) setLine: (NSUInteger) aLine;
-
-- (NSUInteger) getCharPositionInLine;
-- (void) setCharPositionInLine: (NSUInteger) aCharPositionInLine;
-
-// explicitely change the channel this Token is on. The default parser implementation
-// just sees the defaultChannel
-// Common idiom is to put whitespace tokens on channel 99.
-- (NSUInteger) getChannel;
-- (void) setChannel: (NSUInteger) aChannel;
-
-// the index of this Token into the TokenStream
-- (NSUInteger) getTokenIndex;
-- (void) setTokenIndex: (NSUInteger) aTokenIndex;
-- (NSString *)toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTokenRewriteStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTokenRewriteStream.h
deleted file mode 100644
index 0d8681f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTokenRewriteStream.h
+++ /dev/null
@@ -1,170 +0,0 @@
-//
-//  ANTLRTokenRewriteStream.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/19/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTokenStream.h"
-#import "ANTLRLinkBase.h"
-#import "ANTLRHashMap.h"
-#import "ANTLRMapElement.h"
-#import "ANTLRTokenSource.h"
-
-// Define the rewrite operation hierarchy
-
-@interface ANTLRRewriteOperation : ANTLRCommonTokenStream
-{
-/** What index into rewrites List are we? */
-NSInteger instructionIndex;
-/** Token buffer index. */
-NSInteger index;
-NSString *text;
-}
-
-@property (getter=getInstructionIndex, setter=setInstructionIndex:) NSInteger instructionIndex;
-@property (getter=getIndex, setter=setIndex:) NSInteger index;
-@property (retain, getter=getText, setter=setText:) NSString *text;
-
-+ (ANTLRRewriteOperation *) newANTLRRewriteOperation:(NSInteger)index Text:(NSString *)text;
-
-- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText;
-
-/** Execute the rewrite operation by possibly adding to the buffer.
- *  Return the index of the next token to operate on.
- */
-- (NSInteger) execute:(NSString *)buf;
-
-- (NSString *)toString;
-- (NSInteger) indexOf:(char)aChar inString:(NSString *)aString;
-@end
-
-@interface ANTLRInsertBeforeOp : ANTLRRewriteOperation {
-}
-
-+ (ANTLRInsertBeforeOp *) newANTLRInsertBeforeOp:(NSInteger)anIndex Text:(NSString *)theText;
-- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText;
-
-@end
-
-/** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
- *  instructions.
- */
-@interface ANTLRReplaceOp : ANTLRRewriteOperation {
-    NSInteger lastIndex;
-}
-
-@property (getter=getLastIndex, setter=setLastIndex:) NSInteger lastIndex;
-
-+ (ANTLRReplaceOp *) newANTLRReplaceOp:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString*)theText;
-- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-
-- (NSInteger) execute:(NSString *)buf;
-- (NSString *)toString;
-
-@end
-
-@interface ANTLRDeleteOp : ANTLRReplaceOp {
-}
-+ (ANTLRDeleteOp *) newANTLRDeleteOp:(NSInteger)from ToIndex:(NSInteger)to;
-
-- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to;
-
-- (NSString *)toString;
-
-@end
-
-
-@interface ANTLRTokenRewriteStream : ANTLRCommonTokenStream {
-/** You may have multiple, named streams of rewrite operations.
- *  I'm calling these things "programs."
- *  Maps String (name) -> rewrite (List)
- */
-ANTLRHashMap *programs;
-
-/** Map String (program name) -> Integer index */
-ANTLRHashMap *lastRewriteTokenIndexes;
-}
-
-@property (retain, getter=getPrograms, setter=setPrograms:) ANTLRHashMap *programs;
-@property (retain, getter=getLastRewriteTokenIndexes, setter=setLastRewriteTokenIndexes:) ANTLRHashMap *lastRewriteTokenIndexes;
-
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream;
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream:(id<ANTLRTokenSource>) aTokenSource;
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream:(id<ANTLRTokenSource>) aTokenSource Channel:(NSInteger)aChannel;
-
-- (id) init;
-- (id)initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource;
-- (id)initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource Channel:(NSInteger)aChannel;
-
-- (ANTLRHashMap *)getPrograms;
-- (void)setPrograms:(ANTLRHashMap *)aProgList;
-
-- (void) rollback:(NSInteger)instructionIndex;
-- (void) rollback:(NSString *)programName Index:(NSInteger)anInstructionIndex;
-- (void) deleteProgram;
-- (void) deleteProgram:(NSString *)programName;
-- (void) insertAfterToken:(id<ANTLRToken>)t Text:(NSString *)theText;
-- (void) insertAfterIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) insertAfterProgNam:(NSString *)programName Index:(NSInteger)anIndex Text:(NSString *)theText;
-
-
-- (void) insertBeforeToken:(id<ANTLRToken>)t Text:(NSString *)theText;
-- (void) insertBeforeIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) insertBeforeProgName:(NSString *)programName Index:(NSInteger)index Text:(NSString *)theText;
-- (void) replaceFromIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) replaceFromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-- (void) replaceFromToken:(id<ANTLRToken>)indexT Text:(NSString *)theText;
-- (void) replaceFromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to Text:(NSString *)theText;
-- (void) replaceProgNam:(NSString *)programName Token:(id<ANTLRToken>)from Token:(id<ANTLRToken>)to Text:(NSString *)theText;
-- (void) replaceProgNam:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-- (void) delete:(NSInteger)anIndex;
-- (void) delete:(NSInteger)from ToIndex:(NSInteger)to;
-- (void) deleteToken:(id<ANTLRToken>)indexT;
-- (void) deleteFromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to;
-- (void) delete:(NSString *)programName FromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to;
-- (void) delete:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to;
-- (NSInteger)getLastRewriteTokenIndex;
-- (NSInteger)getLastRewriteTokenIndex:(NSString *)programName;
-- (void)setLastRewriteTokenIndex:(NSString *)programName Index:(NSInteger)anInt;
-- (ANTLRHashMap *) getProgram:(NSString *)name;
-- (ANTLRHashMap *) initializeProgram:(NSString *)name;
-- (NSString *)toOriginalString;
-- (NSString *)toOriginalString:(NSInteger)start End:(NSInteger)end;
-- (NSString *)toString;
-- (NSString *)toString:(NSString *)programName;
-- (NSString *)toStringFromStart:(NSInteger)start ToEnd:(NSInteger)end;
-- (NSString *)toString:(NSString *)programName FromStart:(NSInteger)start ToEnd:(NSInteger)end;
-- (ANTLRHashMap *)reduceToSingleOperationPerIndex:(ANTLRHashMap *)rewrites;
-- (ANTLRHashMap *)getKindOfOps:(ANTLRHashMap *)rewrites KindOfClass:(Class)kind;
-- (ANTLRHashMap *)getKindOfOps:(ANTLRHashMap *)rewrites KindOfClass:(Class)kind Index:(NSInteger)before;
-- (NSString *)catOpText:(id)a PrevText:(id)b;
-- (NSMutableString *)toDebugString;
-- (NSMutableString *)toDebugStringFromStart:(NSInteger)start ToEnd:(NSInteger)end;
-                    
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTokenSource.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTokenSource.h
deleted file mode 100755
index 4d6b6ee..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTokenSource.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRToken.h"
-
-// Anything that responds to -nextToken can be treated as a lexer.
-// For instance this can be a flex lexer or a handwritten one or even
-// a proxy for a remotely running token source (database, lexer, whatever).
-@protocol ANTLRTokenSource <NSObject, NSCopying>
-
-- (id<ANTLRToken>) nextToken;
-- (NSString *)getSourceName;
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTokenStream.h
deleted file mode 100755
index c104578..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRIntStream.h"
-#import "ANTLRToken.h"
-
-@protocol ANTLRTokenStream < ANTLRIntStream >
-
-// Get Token at current input pointer + i ahead where i=1 is next Token.
-// i<0 indicates tokens in the past.  So -1 is previous token and -2 is
-// two tokens ago. LT:0 is undefined.  For i>=n, return Token.EOFToken.
-// Return null for LT:0 and any index that results in an absolute address
-// that is negative.
-
-- (id<ANTLRToken>) LT:(NSInteger) i;
-
-- (id<ANTLRToken>) getToken:(NSUInteger) i;
-
-- (id) getTokenSource;
-
-- (NSString *) toString;
-/** Return the text of all tokens from start to stop, inclusive.
- *  If the stream does not buffer all the tokens then it can just
- *  return "" or null;  Users should not access $ruleLabel.text in
- *  an action of course in that case.
- */
-- (NSString *)toStringFromStart:(NSInteger)startIdx ToEnd:(NSInteger)stopIdx;
-
-/** Because the user is not required to use a token with an index stored
- *  in it, we must provide a means for two token objects themselves to
- *  indicate the start/end location.  Most often this will just delegate
- *  to the other toString(int,int).  This is also parallel with
- *  the TreeNodeStream.toString(Object,Object).
- */
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTree.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTree.h
deleted file mode 100755
index f269b2d..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTree.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-@protocol ANTLRTree < NSObject, NSCopying >
-
-//+ (id<ANTLRTree>) invalidNode;
-
-- (id<ANTLRTree>) getChild:(NSUInteger)index;
-- (NSUInteger) getChildCount;
-
-// Tree tracks parent and child index now > 3.0
-
-- (id<ANTLRTree>)getParent;
-
-- (void) setParent:(id<ANTLRTree>)t;
-
-/** Is there is a node above with token type ttype? */
-- (BOOL) hasAncestor:(NSInteger)ttype;
-
-/** Walk upwards and get first ancestor with this token type. */
-- (id<ANTLRTree>) getAncestor:(NSInteger) ttype;
-
-/** Return a list of all ancestors of this node.  The first node of
- *  list is the root and the last is the parent of this node.
- */
-- (NSMutableArray *) getAncestors;
-
-/** This node is what child index? 0..n-1 */
-- (NSInteger) getChildIndex;
-
-- (void) setChildIndex:(NSInteger) index;
-
-/** Set the parent and child index values for all children */
-- (void) freshenParentAndChildIndexes;
-
-/** Add t as a child to this node.  If t is null, do nothing.  If t
- *  is nil, add all children of t to this' children.
- */
-- (void) addChild:(id<ANTLRTree>) t;
-
-/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
-- (void) setChild:(NSInteger)i With:(id<ANTLRTree>) t;
-
-- (id) deleteChild:(NSInteger) i;
-
-/** Delete children from start to stop and replace with t even if t is
- *  a list (nil-root tree).  num of children can increase or decrease.
- *  For huge child lists, inserting children can force walking rest of
- *  children to set their childindex; could be slow.
- */
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id)t;	
-
-- (NSArray *) getChildren;
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChildren:(NSArray *) theChildren;
-//- (void) removeAllChildren;
-
-// Indicates the node is a nil node but may still have children, meaning
-// the tree is a flat list.
-
-- (BOOL) isNil;
-
-/**  What is the smallest token index (indexing from 0) for this node
- *   and its children?
- */
-- (NSInteger) getTokenStartIndex;
-
-- (void) setTokenStartIndex:(NSInteger) index;
-
-/**  What is the largest token index (indexing from 0) for this node
- *   and its children?
- */
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (id<ANTLRTree>) dupNode;
-
-- (NSString *) toString;
-
-#pragma mark Copying
-- (id) copyWithZone:(NSZone *)aZone;	// the children themselves are not copied here!
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-#pragma mark Tree Parser support
-- (NSInteger) getType;
-- (NSString *) getText;
-// In case we don't have a token payload, what is the line for errors?
-- (NSInteger) getLine;
-- (NSInteger) getCharPositionInLine;
-- (void) setCharPositionInLine:(NSInteger)pos;
-
-#pragma mark Informational
-- (NSString *) treeDescription;
-- (NSString *) description;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeAdaptor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeAdaptor.h
deleted file mode 100755
index e6579cf..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeAdaptor.h
+++ /dev/null
@@ -1,159 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRToken.h"
-#import "ANTLRBaseTree.h"
-#import "ANTLRTokenStream.h"
-
-#pragma warning tree/node diction is broken.
-
-@protocol ANTLRTreeAdaptor <NSObject, NSCopying>
-
-#pragma mark Construction
-
-+ (id<ANTLRTree>) newEmptyTree;
-
-- (id<ANTLRTree>) createTree:(id<ANTLRToken>)payload;
-
-#pragma mark ANTLRTreeAdaptor implementation
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)aNode;	// copies just the node
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree;	// copies the entire subtree, recursively
-
-/** Return a nil node (an empty but non-null node) that can hold
- *  a list of element as the children.  If you want a flat tree (a list)
- *  use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
- */
-- (id) emptyNode;
-
-/** Return a tree node representing an error.  This node records the
- *  tokens consumed during error recovery.  The start token indicates the
- *  input symbol at which the error was detected.  The stop token indicates
- *  the last symbol consumed during recovery.
- *
- *  You must specify the input stream so that the erroneous text can
- *  be packaged up in the error node.  The exception could be useful
- *  to some applications; default implementation stores ptr to it in
- *  the CommonErrorNode.
- *
- *  This only makes sense during token parsing, not tree parsing.
- *  Tree parsing should happen only when parsing and tree construction
- *  succeed.
- */
-- (id) errorNode:(id<ANTLRTokenStream>)anInput
-            From:(id<ANTLRToken>)aStartToken
-              To:(id<ANTLRToken>)aStopToken
-       Exception:(NSException *) e;
-
-/** Is tree considered a nil node used to make lists of child nodes? */
-- (BOOL) isNil:(id<ANTLRTree>)aTree;
-
-
-- (void) addChild:(id<ANTLRTree>)child toTree:(id<ANTLRTree>)aTree;
-
-/** If oldRoot is a nil root, just copy or move the children to newRoot.
- *  If not a nil root, make oldRoot a child of newRoot.
- *
- *    old=^(nil a b c), new=r yields ^(r a b c)
- *    old=^(a b c), new=r yields ^(r ^(a b c))
- *
- *  If newRoot is a nil-rooted single child tree, use the single
- *  child as the new root node.
- *
- *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
- *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
- *
- *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
- *
- *    old=null, new=r yields r
- *    old=null, new=^(nil r) yields ^(nil r)
- *
- *  Return newRoot.  Throw an exception if newRoot is not a
- *  simple node or nil root with a single child node--it must be a root
- *  node.  If newRoot is ^(nil x) return x as newRoot.
- *
- *  Be advised that it's ok for newRoot to point at oldRoot's
- *  children; i.e., you don't have to copy the list.  We are
- *  constructing these nodes so we should have this control for
- *  efficiency.
- */
-- (id) becomeRoot:(id<ANTLRTree>)newRoot old:(id<ANTLRTree>)oldRoot;
-
-- (id) rulePostProcessing:(id<ANTLRTree>)root;
-
-#pragma mark Rewrite Rules
-                           
-- (NSUInteger) getUniqueID:(id<ANTLRTree>)aNode;
-
-- (id<ANTLRTree>) createTree:(NSInteger)tokenType FromToken:(id<ANTLRToken>)fromToken;
-- (id<ANTLRTree>) createTree:(NSInteger)tokenType FromToken:(id<ANTLRToken>)fromToken Text:(NSString *)text;
-- (id<ANTLRTree>) createTree:(NSInteger)tokenType Text:(NSString *)text;
-
-#pragma mark Content
-
-- (id<ANTLRTree>)dupNode:(id<ANTLRTree>)aNode;
-- (id<ANTLRTree>)dupTree:(id<ANTLRTree>)aTree;
-
-- (NSInteger) getType:(id<ANTLRTree>)aNode;
-- (void) setType:(id<ANTLRTree>)aNode Type:(NSInteger)tokenType;
-
-- (NSString *) getText:(id<ANTLRTree>)aNode;
-- (void) setText:(id<ANTLRTree>)aNode Text:(NSString *)tokenText;
-
-- (id<ANTLRToken>) getToken:(id<ANTLRTree>)t;
-
-- (void) setTokenBoundaries:(id<ANTLRTree>)aTree From:(id<ANTLRToken>)startToken To:(id<ANTLRToken>)stopToken;
-- (NSInteger) getTokenStartIndex:(id<ANTLRTree>)aTree;
-- (NSInteger) getTokenStopIndex:(id<ANTLRTree>)aTree;
-
-#pragma mark Navigation / Tree Parsing
-
-/** Get a child 0..n-1 node */
-- (id<ANTLRTree>) getChild:(id<ANTLRTree>)aNode At:(NSInteger) i;
-/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
-- (void) setChild:(id<ANTLRTree>)aTree At:(NSInteger)index Child:(id<ANTLRTree>)child;
-/** Remove ith child and shift children down from right. */
-- (id<ANTLRTree>) deleteChild:(id<ANTLRTree>)t Index:(NSInteger)index;
-
-/** How many children?  If 0, then this is a leaf node */
-- (NSInteger) getChildCount:(id<ANTLRTree>) aTree;
-
-/** Who is the parent node of this node; if null, implies node is root.
- *  If your node type doesn't handle this, it's ok but the tree rewrites
- *  in tree parsers need this functionality.
- */
-- (id<ANTLRTree>)getParent:(id<ANTLRTree>)t;
-- (void) setParent:(id<ANTLRTree>)t With:(id<ANTLRTree>)parent;
-
-/** What index is this node in the child list? Range: 0..n-1
- *  If your node type doesn't handle this, it's ok but the tree rewrites
- *  in tree parsers need this functionality.
- */
-- (NSInteger) getChildIndex:(id<ANTLRTree>)t;
-- (void) setChildIndex:(id<ANTLRTree>)t With:(NSInteger)index;
-
-- (void) replaceChildren:(id<ANTLRTree>)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id<ANTLRTree>)t;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeException.h
deleted file mode 100755
index 8ec5c45..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeException.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLRTreeException : ANTLRRecognitionException {
-	id<ANTLRTree> oldRoot;
-	id<ANTLRTree> newRoot;
-}
-
-+ (id) exceptionWithOldRoot:(id<ANTLRTree>)theOldRoot newRoot:(id<ANTLRTree>)theNewRoot stream:(id<ANTLRIntStream>)aStream;
-- (id) initWithOldRoot:(id<ANTLRTree>)theOldRoot newRoot:(id<ANTLRTree>)theNewRoot stream:(id<ANTLRIntStream>)aStream;
-
-- (void) setOldRoot:(id<ANTLRTree>)aTree;
-- (void) setNewRoot:(id<ANTLRTree>)aTree;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeIterator.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeIterator.h
deleted file mode 100644
index e6d5e71..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeIterator.h
+++ /dev/null
@@ -1,67 +0,0 @@
-//
-//  ANTLRTreeIterator.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRFastQueue.h"
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTree.h"
-
-@interface ANTLRTreeIterator : NSObject 
-{
-	id<ANTLRTreeAdaptor> adaptor;
-	id<ANTLRTree> root;
-	id<ANTLRTree> tree;
-	BOOL firstTime;
-	id<ANTLRTree> up;
-	id<ANTLRTree> down;
-	id<ANTLRTree> eof;
-	
-	ANTLRFastQueue *nodes;
-}
-
-@property(retain, readwrite) id<ANTLRTree> up;
-@property(retain, readwrite) id<ANTLRTree> down;
-@property(retain, readwrite) id<ANTLRTree> eof;
-
-+ newANTRLTreeIterator;
-+ (ANTLRTreeIterator *) newANTRLTreeIteratorWithAdaptor:(ANTLRCommonTreeAdaptor *)adaptor
-                                                andTree:(id<ANTLRTree>)tree;
-- (id) init;
-- (id) initWithTree:(id<ANTLRTree>) t;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>) a andTree:(id<ANTLRTree>) t;
-
-- (void) reset;
-- (BOOL) hasNext;
-- (id) nextObject;
-- (NSArray *) allObjects;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeNodeStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeNodeStream.h
deleted file mode 100755
index bf6342c..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeNodeStream.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRIntStream.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRCommonTreeAdaptor.h"
-
-@protocol ANTLRTreeNodeStream < ANTLRIntStream > 
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)theAdaptor Tree:(ANTLRCommonTree *)theTree;
-
-- (id) LT:(NSInteger)k;
-- (id) getTreeSource;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (id<ANTLRTokenStream>) getTokenStream; 
-- (void) setUniqueNavigationNodes:(BOOL)flag;
-
-- (id) getNode:(NSInteger) idx;
-
-- (NSString *) toStringFromNode:(id)startNode ToNode:(id)stopNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeParser.h
deleted file mode 100755
index e2f01ee..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeParser.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRTreeNodeStream.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRMismatchedTreeNodeException.h"
-
-@interface ANTLRTreeParser : ANTLRBaseRecognizer {
-	id<ANTLRTreeNodeStream> input;
-}
-
-@property (retain, getter=getInput, setter=setInput:) id<ANTLRTreeNodeStream> input;
-
-+ (id) newANTLRTreeParser:(id<ANTLRTreeNodeStream>)anInput;
-+ (id) newANTLRTreeParser:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)state;
-
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)theInput;
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)theInput
-                State:(ANTLRRecognizerSharedState *)state;
-
-
-- (id<ANTLRTreeNodeStream>)getInput;
-- (void) setInput:(id<ANTLRTreeNodeStream>)anInput;
-
-- (void) setTreeNodeStream:(id<ANTLRTreeNodeStream>) anInput;
-- (id<ANTLRTreeNodeStream>) getTreeNodeStream;
-
-- (NSString *)getSourceName;
-
-- (id) getCurrentInputSymbol:(id<ANTLRIntStream>) anInput;
-
-- (id) getMissingSymbol:(id<ANTLRIntStream>)input
-              Exception:(ANTLRRecognitionException *) e
-          ExpectedToken:(NSInteger) expectedTokenType
-                 BitSet:(ANTLRBitSet *)follow;
-
-/** Match '.' in tree parser has special meaning.  Skip node or
- *  entire tree if node has children.  If children, scan until
- *  corresponding UP node.
- */
-- (void) matchAny:(id<ANTLRIntStream>)ignore;
-
-/** We have DOWN/UP nodes in the stream that have no line info; override.
- *  plus we want to alter the exception type.  Don't try to recover
- *  from tree parser errors inline...
- */
-- (id) recoverFromMismatchedToken:(id<ANTLRIntStream>)anInput
-                             Type:(NSInteger)ttype
-                           Follow:(ANTLRBitSet *)follow;
-
-/** Prefix error message with the grammar name because message is
- *  always intended for the programmer because the parser built
- *  the input tree not the user.
- */
-- (NSString *)getErrorHeader:(ANTLRRecognitionException *)e;
-
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(NSArray *) tokenNames;
-
-- (void) traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-- (void) traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreePatternLexer.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreePatternLexer.h
deleted file mode 100644
index f6059d3..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreePatternLexer.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//
-//  ANTLRTreePatternLexer.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-typedef enum {
-	ANTLRLexerTokenTypeEOF = -1,
-	ANTLRLexerTokenTypeInvalid,
-	ANTLRLexerTokenTypeBEGIN,
-	ANTLRLexerTokenTypeEND,
-	ANTLRLexerTokenTypeID,
-	ANTLRLexerTokenTypeARG,
-	ANTLRLexerTokenTypePERCENT,
-	ANTLRLexerTokenTypeCOLON,
-	ANTLRLexerTokenTypeDOT,
-} ANTLRLexerTokenType;
-
-
-@interface ANTLRTreePatternLexer : NSObject {
-
-/** The tree pattern to lex like "(A B C)" */
-NSString *pattern;
-    
-/** Index into input string */
-NSInteger p;
-    
-/** Current char */
-NSInteger c;
-    
-/** How long is the pattern in char? */
-NSInteger n;
-    
-/** Set when token type is ID or ARG (name mimics Java's StreamTokenizer) */
-NSMutableData *sval;
-char *data;
-    
-BOOL error;
-
-}
-
-@property (retain, getter=getPattern, setter=setPattern:) NSString *pattern;
-@property (getter=getP, setter=setP:) NSInteger p;
-@property (getter=getC, setter=setC:) NSInteger c;
-@property (getter=getN, setter=setN:) NSInteger n;
-@property (retain, getter=getSval, setter=setSval:) NSMutableData *sval;
-@property (assign, getter=getData, setter=setData:) char *data;
-@property (getter=getError, setter=setError) BOOL error;
-
-+ (ANTLRTreePatternLexer *)newANTLRTreePatternLexer:(NSString *)aPattern;
-- (id) init;
-- (id) initWithPattern:(NSString *)aPattern;
-- (NSInteger) nextToken;
-- (void) consume;
-- (NSString *)toString;
-
-- (NSMutableData *)getSval;
-- (void) setSval:(NSMutableData *)aSval;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreePatternParser.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreePatternParser.h
deleted file mode 100644
index f6d6dc6..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreePatternParser.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//
-//  ANTLRTreePatternParser.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreePatternLexer.h"
-#import "ANTLRTreeWizard.h"
-#import "ANTLRTreeAdaptor.h"
-
-@interface ANTLRTreePatternParser : NSObject {
-
-ANTLRTreePatternLexer *tokenizer;
-NSInteger ttype;
-ANTLRTreeWizard *wizard;
-id<ANTLRTreeAdaptor> adaptor;
-    
-}
-
-+ (ANTLRTreePatternParser *)newANTLRTreePatternParser:(ANTLRTreePatternLexer *)aTokenizer
-                                               Wizard:(ANTLRTreeWizard *)aWizard
-                                              Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (id) init;
-- (id) initWithTokenizer:(ANTLRTreePatternLexer *)tokenizer
-                  Wizard:(ANTLRTreeWizard *)aWizard
-                 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (id<ANTLRTree>) pattern;
-- (id<ANTLRTree>) parseTree;
-- (id<ANTLRTree>) parseNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeRewriter.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeRewriter.h
deleted file mode 100644
index aee873e..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeRewriter.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//
-//  ANTLRTreeRewriter.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeParser.h"
-
-@interface ANTLRfptr : NSObject {
-    id  actor;
-    SEL ruleSEL;
-}
-
-+ (ANTLRfptr *)newANTLRfptrWithRule:(SEL)aRuleAction withObject:(id)anObject;
--initWithRule:(SEL)ruleAction withObject:(id)anObject;
-
-- (id)rule;
-
-@end
-
-@interface ANTLRTreeRewriter : ANTLRTreeParser {
-    BOOL showTransformations;
-    id<ANTLRTokenStream> originalTokenStream;
-    id<ANTLRTreeAdaptor> originalAdaptor;
-    ANTLRfptr *rule;
-    ANTLRfptr *topdown_fptr;
-    ANTLRfptr *bottomup_ftpr;
-}
-
-+ (ANTLRTreeRewriter *) newANTLRTreeRewriter:(id<ANTLRTreeNodeStream>)anInput;
-+ (ANTLRTreeRewriter *) newANTLRTreeRewriter:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-- (id)initWithStream:(id<ANTLRTreeNodeStream>)anInput;
-- (id)initWithStream:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-- (ANTLRTreeRewriter *) applyOnce:(id<ANTLRTree>)t Rule:(ANTLRfptr *)whichRule;
-- (ANTLRTreeRewriter *) applyRepeatedly:(id<ANTLRTree>)t Rule:(ANTLRfptr *)whichRule;
-- (ANTLRTreeRewriter *) downup:(id<ANTLRTree>)t;
-- (ANTLRTreeRewriter *) pre:(id<ANTLRTree>)t;
-- (ANTLRTreeRewriter *) post:(id<ANTLRTree>)t;
-- (ANTLRTreeRewriter *) downup:(id<ANTLRTree>)t XForm:(BOOL)aShowTransformations;
-- (void)reportTransformation:(id<ANTLRTree>)oldTree Tree:(id<ANTLRTree>)newTree;
-- (ANTLRTreeRewriter *) topdown_fptr;
-- (ANTLRTreeRewriter *) bottomup_ftpr;
-- (ANTLRTreeRewriter *) topdown;
-- (ANTLRTreeRewriter *) bottomup;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeRuleReturnScope.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeRuleReturnScope.h
deleted file mode 100644
index ea8a487..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeRuleReturnScope.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//
-//  ANTLRTreeRuleReturnScope.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuleReturnScope.h"
-#import "ANTLRCommonTree.h"
-
-@interface ANTLRTreeRuleReturnScope : ANTLRRuleReturnScope {
-    ANTLRCommonTree *startNode;
-}
-
-@property (retain, getter=getStart, setter=setStart:) ANTLRCommonTree *startNode;
-
-/** First node or root node of tree matched for this rule. */
-
-- (ANTLRCommonTree *)getStart;
-- (void)setStart:(ANTLRCommonTree *)aStartNode;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeVisitor.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeVisitor.h
deleted file mode 100644
index 1f167bb..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeVisitor.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-//  ANTLRTreeVisitor.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTreeVisitorAction.h"
-
-@interface ANTLRTreeVisitor : NSObject {
-   id<ANTLRTreeAdaptor> adaptor;
-}
-+ (ANTLRTreeVisitor *)newANTLRTreeVisitor:(id<ANTLRTreeAdaptor>) anAdaptor;
-+ (ANTLRTreeVisitor *)newANTLRTreeVisitor;
-- (id)init;
-- (id)initWithAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (ANTLRTreeVisitor *)visit:(id<ANTLRTree>)t Action:(ANTLRTreeVisitorAction *)action;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeVisitorAction.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeVisitorAction.h
deleted file mode 100644
index c9c0856..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeVisitorAction.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//
-//  ANTLRTreeVisitorAction.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRTreeVisitorAction : NSObject
-{
-
-}
-
-+ (ANTLRTreeVisitorAction *)newANTLRTreeVisitorAction;
-- (id) init;
-
-/** Execute an action before visiting children of t.  Return t or
- *  a rewritten t.  It is up to the visitor to decide what to do
- *  with the return value.  Children of returned value will be
- *  visited if using TreeVisitor.visit().
- */
-- (ANTLRTreeVisitorAction *)pre:(ANTLRTreeVisitorAction *) t;
-
-/** Execute an action after visiting children of t.  Return t or
- *  a rewritten t.  It is up to the visitor to decide what to do
- *  with the return value.
- */
-- (ANTLRTreeVisitorAction *)post:(ANTLRTreeVisitorAction *) t;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeWizard.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeWizard.h
deleted file mode 100644
index d952572..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRTreeWizard.h
+++ /dev/null
@@ -1,134 +0,0 @@
-//
-//  ANTLRTreeWizard.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRMapElement.h"
-#import "ANTLRMap.h"
-
-@class ANTLRVisitor;
-
-@protocol ANTLRContextVisitor <NSObject>
-// TODO: should this be called visit or something else?
-- (void) visit:(id<ANTLRTree>)t Parent:(id<ANTLRTree>)parent ChildIndex:(NSInteger)childIndex Map:(ANTLRMap *)labels;
-
-@end
-
-@interface ANTLRVisitor : NSObject <ANTLRContextVisitor> {
-    NSInteger action;
-    id actor;
-    id object1;
-    id object2;
-}
-+ (ANTLRVisitor *)newANTLRVisitor:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2;
-- (id) initWithAction:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2;
-
-- (void) visit:(id<ANTLRTree>)t;
-- (void) visit:(id<ANTLRTree>)t Parent:(id<ANTLRTree>)parent ChildIndex:(NSInteger)childIndex Map:(ANTLRMap *)labels;
-
-@end
-
-/** When using %label:TOKENNAME in a tree for parse(), we must
- *  track the label.
- */
-@interface ANTLRTreePattern : ANTLRCommonTree {
-    NSString *label;
-    BOOL      hasTextArg;
-}
-@property (retain, getter=getLabel, setter=setLabel:) NSString *label;
-@property (assign, getter=getHasTextArg, setter=setHasTextArg:) BOOL hasTextArg;
-
-+ (ANTLRTreePattern *)newANTLRTreePattern:(id<ANTLRToken>)payload;
-
-- (id) initWithToken:(id<ANTLRToken>)payload;
-- (NSString *)toString;
-@end
-
-@interface ANTLRWildcardTreePattern : ANTLRTreePattern {
-}
-
-+ (ANTLRWildcardTreePattern *)newANTLRWildcardTreePattern:(id<ANTLRToken>)payload;
-- (id) initWithToken:(id<ANTLRToken>)payload;
-@end
-
-/** This adaptor creates TreePattern objects for use during scan() */
-@interface ANTLRTreePatternTreeAdaptor : ANTLRCommonTreeAdaptor {
-}
-+ (ANTLRTreePatternTreeAdaptor *)newTreeAdaptor;
-#ifdef DONTUSENOMO
-+ (ANTLRTreePatternTreeAdaptor *)newTreeAdaptor:(id<ANTLRToken>)payload;
-#endif
-- (id) init;
-#ifdef DONTUSENOMO
-- initWithToken:(id<ANTLRToken>)payload;
-#endif
-- (id<ANTLRTree>)createTreePattern:(id<ANTLRToken>)payload;
-
-@end
-
-@interface ANTLRTreeWizard : NSObject {
-	id<ANTLRTreeAdaptor> adaptor;
-	ANTLRMap *tokenNameToTypeMap;
-}
-+ (ANTLRTreeWizard *) newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)anAdaptor;
-+ (ANTLRTreeWizard *)newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)adaptor Map:(ANTLRMap *)aTokenNameToTypeMap;
-+ (ANTLRTreeWizard *)newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)adaptor TokenNames:(NSArray *)theTokNams;
-+ (ANTLRTreeWizard *)newANTLRTreeWizardWithTokenNames:(NSArray *)theTokNams;
-- (id) init;
-- (id) initWithAdaptor:(id<ANTLRTreeAdaptor>)adaptor;
-- (id) initWithAdaptor:(id<ANTLRTreeAdaptor>)adaptor Map:(ANTLRMap *)tokenNameToTypeMap;
-- (id) initWithTokenNames:(NSArray *)theTokNams;
-- (id) initWithTokenNames:(id<ANTLRTreeAdaptor>)anAdaptor TokenNames:(NSArray *)theTokNams;
-- (ANTLRMap *)computeTokenTypes:(NSArray *)theTokNams;
-- (NSInteger)getTokenType:(NSString *)tokenName;
-- (ANTLRMap *)index:(id<ANTLRTree>)t;
-- (void) _index:(id<ANTLRTree>)t Map:(ANTLRMap *)m;
-- (NSMutableArray *)find:(id<ANTLRTree>) t Pattern:(NSString *)pattern;
-- (ANTLRTreeWizard *)findFirst:(id<ANTLRTree>) t Type:(NSInteger)ttype;
-- (ANTLRTreeWizard *)findFirst:(id<ANTLRTree>) t Pattern:(NSString *)pattern;
-- (void) visit:(id<ANTLRTree>)t Type:(NSInteger)ttype Visitor:(ANTLRVisitor *)visitor;
-- (void) _visit:(id<ANTLRTree>)t
-         Parent:(id<ANTLRTree>)parent
-     ChildIndex:(NSInteger)childIndex
-           Type:(NSInteger)ttype
-        Visitor:(ANTLRVisitor *)visitor;
-- (void)visit:(id<ANTLRTree>)t Pattern:(NSString *)pattern Visitor:(ANTLRVisitor *)visitor;
-- (BOOL)parse:(id<ANTLRTree>)t Pattern:(NSString *)pattern Map:(ANTLRMap *)labels;
-- (BOOL) parse:(id<ANTLRTree>) t Pattern:(NSString *)pattern;
-- (BOOL) _parse:(id<ANTLRTree>)t1 Pattern:(ANTLRTreePattern *)tpattern Map:(ANTLRMap *)labels;
-- (id<ANTLRTree>) createTree:(NSString *)pattern;
-- (BOOL)equals:(id)t1 O2:(id)t2 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (BOOL)equals:(id)t1 O2:(id)t2;
-- (BOOL) _equals:(id)t1 O2:(id)t2 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUnbufferedCommonTreeNodeStreamState.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUnbufferedCommonTreeNodeStreamState.h
deleted file mode 100755
index 9e79d86..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUnbufferedCommonTreeNodeStreamState.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-
-@interface ANTLRUnbufferedCommonTreeNodeStreamState : NSObject {
-	ANTLRCommonTree *currentNode;
-	ANTLRCommonTree *previousNode;
-
-	int currentChildIndex;
-	int absoluteNodeIndex;
-	unsigned int nodeStackSize;
-	unsigned int indexStackSize;
-	
-	NSMutableArray *lookahead;
-}
-
-- (ANTLRCommonTree *) currentNode;
-- (void) setCurrentNode: (ANTLRCommonTree *) aCurrentNode;
-
-- (ANTLRCommonTree *) previousNode;
-- (void) setPreviousNode: (ANTLRCommonTree *) aPreviousNode;
-
-- (NSInteger) currentChildIndex;
-- (void) setCurrentChildIndex: (NSInteger) aCurrentChildIndex;
-
-- (NSInteger) absoluteNodeIndex;
-- (void) setAbsoluteNodeIndex: (NSInteger) anAbsoluteNodeIndex;
-
-- (NSUInteger) nodeStackSize;
-- (void) setNodeStackSize: (NSUInteger) aNodeStackSize;
-
-- (NSUInteger) indexStackSize;
-- (void) setIndexStackSize: (NSUInteger) anIndexStackSize;
-
-- (NSMutableArray *) lookahead;
-- (void) setLookahead: (NSMutableArray *) aLookahead;
-
-- (void) addToLookahead: (id)lookaheadObject;
-- (void) removeFromLookahead: (id)lookaheadObject;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUnbufferedTokenStream.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUnbufferedTokenStream.h
deleted file mode 100644
index e4f8630..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUnbufferedTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-//
-//  ANTLRUnbufferedTokenStream.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/12/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuntimeException.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRToken.h"
-
-@interface ANTLRUnbufferedTokenStream : ANTLRLookaheadStream {
-	id<ANTLRTokenSource> tokenSource;
-    NSInteger tokenIndex; // simple counter to set token index in tokens
-    NSInteger channel;
-}
-
-@property (retain, getter=getTokenSource, setter=setTokenSource:) id<ANTLRTokenSource> tokenSource;
-@property (getter=getTokenIndex, setter=setTokenIndex) NSInteger tokenIndex;
-@property (getter=getChannel, setter=setChannel:) NSInteger channel;
-
-+ (ANTLRUnbufferedTokenStream *)newANTLRUnbufferedTokenStream:(id<ANTLRTokenSource>)aTokenSource;
-- (id) init;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource;
-
-- (id<ANTLRToken>)nextElement;
-- (BOOL)isEOF:(id<ANTLRToken>) aToken;
-- (id<ANTLRTokenSource>)getTokenSource;
-- (NSString *)toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *)toStringFromToken:(id<ANTLRToken>)aStart ToEnd:(id<ANTLRToken>)aStop;
-- (NSInteger)LA:(NSInteger)anIdx;
-- (id<ANTLRToken>)objectAtIndex:(NSInteger)anIdx;
-- (NSString *)getSourceName;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUniqueIDMap.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUniqueIDMap.h
deleted file mode 100644
index a805bd5..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUniqueIDMap.h
+++ /dev/null
@@ -1,64 +0,0 @@
-//
-//  ANTLRUniqueIDMap.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/7/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-#import "ANTLRNodeMapElement.h"
-
-#define SUCCESS             0
-#define FAILURE             -1
-#define HASHSIZE            101
-#define HBUFSIZE            0x2000
-
-@interface ANTLRUniqueIDMap : ANTLRPtrBuffer {
-    NSInteger lastHash;
-}
-
-@property (getter=getLastHash, setter=setLastHash) NSInteger lastHash;
-
-+ (id)newANTLRUniqueIDMap;
-+ (id)newANTLRUniqueIDMapWithLen:(NSInteger)aHashSize;
-
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-// Instance Methods
-- (NSInteger)count;
-- (NSInteger)size;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-
-- (void)deleteANTLRUniqueIDMap:(ANTLRNodeMapElement *)np;
-- (void)delete_chain:(ANTLRNodeMapElement *)np;
-- (id)getNode:(id<ANTLRTree>)aNode;
-- (void)putID:(id)anID Node:(id<ANTLRTree>)aNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUnwantedTokenException.h b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUnwantedTokenException.h
deleted file mode 100644
index 2945bfe..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ANTLRUnwantedTokenException.h
+++ /dev/null
@@ -1,47 +0,0 @@
-//
-//  ANTLRUnwantedTokenException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRMismatchedTokenException.h"
-
-@interface ANTLRUnwantedTokenException : ANTLRMismatchedTokenException {
-
-}
-+ (ANTLRUnwantedTokenException *)newANTLRUnwantedTokenException;
-+ (ANTLRUnwantedTokenException *)newANTLRUnwantedTokenException:(NSInteger)expected Stream:(id<ANTLRIntStream>)anInput;
-
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInput And:(NSInteger)expected;
-- (id<ANTLRToken>)getUnexpectedToken;
-- (NSString *)toString;
-                     
-    
-@end
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Resources/English.lproj/InfoPlist.strings b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Resources/English.lproj/InfoPlist.strings
deleted file mode 100644
index fa1b75f..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Resources/English.lproj/InfoPlist.strings
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Resources/Info.plist b/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Resources/Info.plist
deleted file mode 100644
index 24436a3..0000000
--- a/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Resources/Info.plist
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
-	<key>CFBundleDevelopmentRegion</key>
-	<string>English</string>
-	<key>CFBundleExecutable</key>
-	<string>ANTLR</string>
-	<key>CFBundleIdentifier</key>
-	<string>org.antlr.antlrframework</string>
-	<key>CFBundleInfoDictionaryVersion</key>
-	<string>6.0</string>
-	<key>CFBundleName</key>
-	<string>ANTLR</string>
-	<key>CFBundlePackageType</key>
-	<string>FMWK</string>
-	<key>CFBundleSignature</key>
-	<string>????</string>
-	<key>CFBundleVersion</key>
-	<string>1.0</string>
-</dict>
-</plist>
diff --git a/antlr-3.4/runtime/ObjC/Framework/ACBTree.h b/antlr-3.4/runtime/ObjC/Framework/ACBTree.h
deleted file mode 100644
index ba19527..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ACBTree.h
+++ /dev/null
@@ -1,97 +0,0 @@
-//
-//  ACBtree.h
-//  ST4
-//
-//  Created by Alan Condit on 4/18/11.
-//  Copyright 2011 Alan Condit. All rights reserved.
-//
-
-typedef enum {
-    BTNODE,
-    LEAF
-} NodeType;
-
-#import <Foundation/Foundation.h>
-
-@class AMutableDictionary;
-
-#define BTNODESIZE 11
-#define BTHNODESIZE ((BTNODESIZE-1)/2)
-#define BTKeySize  38
-#define FAILURE -1
-#define SUCCESS 0
-
-@interface ACBKey : NSObject {
-    NSInteger recnum;               /*  record number                   */
-    __strong NSString *key;         /*  key pointer id                  */
-    char      kstr[BTKeySize];      /*  key entry                       */
-}
-
-@property (assign) NSInteger recnum;
-@property (retain) NSString *key;
-
-+ (ACBKey *)newKey;
-+ (ACBKey *)newKeyWithKStr:(NSString *)aKey;
-- (id) init;
-- (id) initWithKStr:(NSString *)aKey;
-
-@end
-
-@interface ACBTree : NSObject {
-    __strong AMutableDictionary *dict;  /* The dictionary that this node belongs to */
-    __strong ACBTree *lnode;            /* pointer to left node            */
-    __strong ACBTree *rnode;            /* pointer to right node           */
-    __strong ACBKey  **keys;            /* pointer to keys                 */
-    __strong ACBTree **btNodes;         /* pointers to btNodes             */
-    __strong ACBKey  *keyArray[BTNODESIZE];
-    __strong ACBTree *btNodeArray[BTNODESIZE];
-    NSInteger lnodeid;                  /* nodeid of left node             */
-    NSInteger rnodeid;                  /* nodeid of right node            */
-    NSInteger nodeid;                   /* node id                         */
-    NSInteger nodeType;                 /* 1 = node, 2 = leaf, -1 = unused */
-    NSInteger numkeys;                  /* number of active entries        */
-    NSInteger numrecs;                  /* number of records               */
-    NSInteger updtd;                    /* modified since update flag      */
-    NSInteger keylen;                   /* length of key                   */
-    NSInteger kidx;
-}
-
-@property (retain) AMutableDictionary *dict;
-@property (retain) ACBTree  *lnode;
-@property (retain) ACBTree  *rnode;
-@property (assign) ACBKey   **keys;
-@property (assign) ACBTree  **btNodes;
-@property (assign) NSInteger lnodeid;
-@property (assign) NSInteger rnodeid;
-@property (assign) NSInteger nodeid;
-@property (assign) NSInteger nodeType;
-@property (assign) NSInteger numkeys;
-@property (assign) NSInteger numrecs;
-@property (assign) NSInteger updtd;
-@property (assign) NSInteger keylen;
-@property (assign) NSInteger kidx;
-
-+ (ACBTree *) newNodeWithDictionary:(AMutableDictionary *)theDict;
-
-- (id)initWithDictionary:(AMutableDictionary *)theDict;
-
-- (ACBTree *)createnode:(ACBKey *)kp0;
-- (ACBTree *)deletekey:(NSString *)dkey;
-- (ACBTree *)insertkey:(ACBKey *)ikp value:(id)value;
-- (ACBKey *)internaldelete:(ACBKey *)dkp;
-- (ACBTree *) internalinsert:(ACBKey *)key value:(id)value split:(NSInteger *)h;
-- (ACBTree *) insert:(ACBKey *)key value:(id)value index:(NSInteger)hi split:(NSInteger *)h;
-- (NSInteger)delfrmnode:(ACBKey *)ikp;
-- (NSInteger)insinnode:(ACBKey *)key value:(id)value;
-- (void)mergenode:(NSInteger)i;
-- (ACBTree *)splitnode:(NSInteger)idx;
-- (ACBTree *)search:(id)key;
-- (NSInteger)searchnode:(id)key match:(BOOL)match;
-- (void)borrowleft:(NSInteger)i;
-- (void)borrowright:(NSInteger)i;
-- (void)rotateleft:(NSInteger)j;
-- (void)rotateright:(NSInteger)j;
-- (NSInteger) keyWalkLeaves;
-- (NSInteger) objectWalkLeaves;
-- (void)dealloc;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ACBTree.m b/antlr-3.4/runtime/ObjC/Framework/ACBTree.m
deleted file mode 100644
index 99c0cda..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ACBTree.m
+++ /dev/null
@@ -1,721 +0,0 @@
-//
-//  ACBTree.m
-//  ST4
-//
-//  Created by Alan Condit on 4/18/11.
-//  Copyright 2011 Alan Condit. All rights reserved.
-//
-
-#import <Cocoa/Cocoa.h>
-#import "ACBTree.h"
-#import "AMutableDictionary.h"
-#import "ANTLRRuntimeException.h"
-
-@class AMutableDictionary;
-
-@implementation ACBKey
-
-static NSInteger RECNUM = 0;
-
-@synthesize recnum;
-@synthesize key;
-
-+ (ACBKey *)newKey
-{
-    return [[ACBKey alloc] init];
-}
-
-+ (ACBKey *)newKeyWithKStr:(NSString *)aKey
-{
-    return [[ACBKey alloc] initWithKStr:(NSString *)aKey];
-}
-
-- (id) init
-{
-    self =[super init];
-    if ( self != nil ) {
-        recnum = RECNUM++;
-    }
-    return self;
-}
-
-- (id) initWithKStr:(NSString *)aKey
-{
-    self =[super init];
-    if ( self != nil ) {
-        NSInteger len;
-        recnum = RECNUM++;
-        key = aKey;
-        len = [aKey length];
-        if ( len >= BTKeySize ) {
-            len = BTKeySize - 1;
-        }
-        strncpy( kstr, [aKey cStringUsingEncoding:NSASCIIStringEncoding], len);
-        kstr[len] = '\0';
-    }
-    return self;
-}
-
-@end
-
-@implementation ACBTree
-
-@synthesize dict;
-@synthesize lnode;
-@synthesize rnode;
-@synthesize keys;
-@synthesize btNodes;
-@synthesize lnodeid;
-@synthesize rnodeid;
-@synthesize nodeid;
-@synthesize nodeType;
-@synthesize numkeys;
-@synthesize numrecs;
-@synthesize updtd;
-@synthesize keylen;
-@synthesize kidx;
-
-+ (ACBTree *) newNodeWithDictionary:(AMutableDictionary *)theDict
-{
-    return [[ACBTree alloc] initWithDictionary:theDict];
-}
-
-- (id)initWithDictionary:(AMutableDictionary *)theDict
-{
-    self = [super init];
-    if (self) {
-        // Initialization code here.
-        dict = theDict;
-        nodeid = theDict.nxt_nodeid++;
-        keys = keyArray;
-        btNodes = btNodeArray;
-        if ( nodeid == 0 ) {
-            numkeys = 0;
-        }
-    }
-    
-    return self;
-}
-
-- (ACBTree *)createnode:(ACBKey *)kp
-{
-    ACBTree *tmp;
-    
-    tmp = [ACBTree newNodeWithDictionary:dict];
-    tmp.nodeType = nodeType;
-    tmp.lnode = self;
-    tmp.rnode = self.rnode;
-    self.rnode = tmp;
-    //tmp.btNodes[0] = self;
-    //tmp.keys[0] = kp;
-    tmp.updtd = YES;
-    tmp.numrecs = ((nodeType == LEAF)?1:numrecs);
-    updtd = YES;
-    tmp.numkeys = 1;
-    [tmp retain];
-    return(tmp);
-}
-
-- (ACBTree *)deletekey:(NSString *)dkey
-{
-    ACBKey /* *del, */ *dkp;
-    ACBTree *told, *sNode;
-    BOOL mustRelease = NO;
-
-    if ( [dkey isKindOfClass:[NSString class]] ) {
-        dkp = [ACBKey newKeyWithKStr:dkey];
-        mustRelease = YES;
-    }
-    else if ( [dkey isKindOfClass:[ACBKey class]] )
-        dkp = (ACBKey *)dkey;
-    else
-        @throw [ANTLRIllegalArgumentException newException:[NSString stringWithFormat:@"Don't understand this key:\"%@\"", dkey]];
-    sNode = [self search:dkp.key];
-    if ( sNode == nil || [sNode searchnode:dkp.key match:YES] == FAILURE ) {
-        if ( mustRelease ) [dkp release];
-        return(self);
-    }
-    told = dict.root;
-    /* del = */[self internaldelete:dkp];
-    
-    /*  check for shrink at the root  */
-    if ( numkeys == 1 && nodeType != LEAF ) {
-        told = btNodes[0];
-        told.nodeid = 1;
-        told.updtd = YES;
-        dict.root = told;
-    }
-#ifdef DONTUSENOMO
-    if (debug == 'd') [self printtree];
-#endif
-    if ( mustRelease ) [dkp release];
-    return(told);
-}
-
-/** insertKey is the insertion entry point
- *  It determines if the key exists in the tree already
- *  it calls internalInsert to determine if the key already exists in the tree,
- *  and returns the node to be updated
- */
-- (ACBTree *)insertkey:(ACBKey *)kp value:(id)value
-{
-    ACBTree *tnew, *q;
-    NSInteger h, nodeNum;
-    
-    tnew = self;
-    q = [self internalinsert:kp value:value split:&h];
-    /*  check for growth at the root  */
-    if ( q != nil ) {
-        tnew = [[ACBTree newNodeWithDictionary:dict] retain];
-        tnew.nodeType = BTNODE;
-        nodeNum = tnew.nodeid;
-        tnew.nodeid = 0;
-        self.nodeid = nodeNum;
-        [tnew insert:self.keys[numkeys-1] value:self index:0 split:&h];
-        [tnew insert:q.keys[q.numkeys-1] value:q index:1 split:&h];
-        tnew.numrecs = self.numrecs + q.numrecs;
-        tnew.lnodeid = self.nodeid;
-        tnew.rnodeid = self.rnodeid;
-        self.rnodeid = tnew.nodeid;
-        tnew.lnode = self;
-        tnew.rnode = self.rnode;
-        self.rnode = tnew;
-        /* affected by nodeid swap */
-        // newnode.lnodeid = tnew.btNodes[0].nodeid;
-    }
-    //dict.root = t;
-    //l.reccnt++;
-    return(tnew);
-}
-
-- (ACBTree *)search:(NSString *)kstr
-{
-    NSInteger i, ret;
-    NSInteger srchlvl = 0;
-    ACBTree *t;
-
-    t = self;
-    if ( self.numkeys == 0 && self.nodeType == LEAF )
-        return nil;
-    while (t != nil) {
-        for (i = 0; i < t.numkeys; i++) {
-            ret = [t.keys[i].key compare:kstr];
-            if ( ret >= 0 ) {
-                if ( t.nodeType == LEAF ) {
-                    if ( ret == 0 ) return (t);    /* node containing keyentry found */
-                    else return nil;
-                }
-                else {
-                    break;
-                }
-            }
-        }
-        srchlvl++;
-        if ( t.nodeType == BTNODE ) t = t.btNodes[i];
-        else {
-            t = nil;
-        }
-    }
-    return(nil);          /* entry not found */
-}
-
-/** SEARCHNODE
- *  calling parameters --
- *      BKEY PTR for key to search for.
- *      TYPE for exact match(YES) or position(NO)
- *  returns -- i
- *      i == FAILURE when match required but does not exist.
- *      i == t.numkeys if no existing insertion branch found.
- *      otherwise i == insertion branch.
- */
-- (NSInteger)searchnode:(NSString *)kstr match:(BOOL)match
-{
-    NSInteger i, ret;
-    for ( i = 0; i < numkeys; i++ ) {
-        ret = [keys[i].key compare:kstr];
-        if ( ret >= 0 ) {         /* key node found */
-            if ( ret == 0 && match == NO ) {
-                return FAILURE;
-            }
-            else if ( ret > 0 &&  match == YES ) {
-                return FAILURE;
-            }
-            break;
-        }
-    }
-    if ( i == numkeys && match == YES ) {
-        i = FAILURE;
-    }
-    return(i);
-}
-
-- (ACBKey *)internaldelete:(ACBKey *)dkp
-{
-    NSInteger i, nkey;
-    __strong ACBKey *del = nil;
-    ACBTree *tsb;
-    NSInteger srchlvl = 0;
-    
-    /* find deletion branch */
-    if ( self.nodeType != LEAF ) {
-        srchlvl++;
-        /* search for end of tree */
-        i = [self searchnode:dkp.key match:NO];
-        del = [btNodes[i] internaldelete:dkp];
-        srchlvl--;
-        /* if not LEAF propagate back high key    */
-        tsb = btNodes[i];
-        nkey = tsb.numkeys - 1;
-    }
-    /***  the bottom of the tree has been reached       ***/
-    else {                   /* set up deletion ptrs      */
-        if ( [self delfrmnode:dkp] == SUCCESS ) {
-            if ( numkeys < BTHNODESIZE+1 ) {
-                del = dkp;
-            }
-            else {
-                del = nil;
-            }
-            dkp.recnum = nodeid;
-            return(del);
-        }
-    }
-    /***       indicate deletion to be done            ***/
-    if ( del != nil ) {
-        /*** the key in "del" has to be deleted from in present node ***/
-        if ( btNodes[i].numkeys >= BTHNODESIZE+1 ) {
-            /* node does not need balancing */
-            del = nil;
-            self.keys[i] = tsb.keys[nkey];
-        }
-        else {                         /* node requires balancing */
-            if ( i == 0 ) {
-                [self rotateright:0];
-                self.btNodes[0] = tsb;
-            } else if ( i < numkeys-1 ) {     /* look to the right first */
-                if ( self.btNodes[i+1].numkeys > BTHNODESIZE+1 ) {  /* carry from right */
-                    [self borrowright:i];
-                }
-                else {           /* merge present node with right node */
-                    [self mergenode:i];
-                }
-            }
-            else {                      /* look to the left */
-                if ( i > 0 ) {          /* carry or merge with left node */
-                    if ( self.btNodes[i-1].numkeys > BTHNODESIZE+1 ) { /* carry from left */
-                        [self borrowleft:i];
-                    }
-                    else { /*** merge present node with left node ***/
-                        i--;
-                        [self mergenode:i];
-                        tsb = self.btNodes[i];
-                    }
-                }
-            }
-        self.keys[i] = tsb.keys[nkey];
-        }
-    }
-    numrecs--;
-    updtd = TRUE;
-    return(del);
-}
-
-/** Search key kp on B-tree with root t; if found increment counter.
- *  otherwise insert an item with key kp in tree.  If an ACBKey
- *  emerges to be passed to a lower level, then assign it to kp;
- *  h = "tree t has become higher"
- */
-- (ACBTree *) internalinsert:(ACBKey *)kp value:(id)value split:(NSInteger *)h
-{
-    /* search key ins on node t^; h = false  */
-    NSInteger i, ret;
-    ACBTree *q, *tmp;
-    
-    for (i = 0; i < numkeys; i++) {
-        ret = [keys[i].key compare:kp.key];
-        if ( ret >= 0 ) {
-            if ( nodeType == LEAF && ret == 0 ) return (self);    /* node containing keyentry found */
-            break;
-        }
-    }
-    if ( nodeType == LEAF ) { /*  key goes in this node  */
-        q = [self insert:kp value:value index:i split:h];
-    }
-    else  { /* nodeType == BTNODE */
-        /*  key is not on this node  */
-        q = [self.btNodes[i] internalinsert:kp value:value split:h];
-        if ( *h ) {
-            [self insert:kp value:q index:i split:h];
-        }
-        else {
-            self.numrecs++;
-        }
-        tmp = self.btNodes[numkeys-1];
-        keys[numkeys-1] = tmp.keys[tmp.numkeys-1];
-        if ( i != numkeys-1 ) {
-            tmp = self.btNodes[i];
-            keys[i] = tmp.keys[tmp.numkeys-1];
-        }
-        updtd = YES;
-    } /* search */
-    return q;
-}
-
-/** Do the actual insertion or split and insert
- *  insert key to the right of t.keys[hi] 
- */
-- (ACBTree *) insert:(ACBKey *)kp value:(id)value index:(NSInteger)hi split:(NSInteger *)h
-{
-    ACBTree *b;
-    
-    if ( numkeys < BTNODESIZE ) {
-        *h = NO;
-        [self rotateright:hi];
-        keys[hi] = kp;
-        btNodes[hi] = value;
-        numrecs++;
-        numkeys++;
-        updtd = YES;
-        //[kp retain];
-        return nil;
-    }
-    else { /*  node t is full; split it and assign the emerging ACBKey to olditem  */
-        b = [self splitnode:hi];
-        if ( hi <= BTHNODESIZE ) {              /* insert key in left page */
-            [self rotateright:hi];
-            keys[hi] = kp;
-            btNodes[hi] = value;
-            numrecs++;
-            numkeys++;
-        }
-        else {                                  /* insert key in right page */
-            hi -= BTHNODESIZE;
-            if ( b.rnode == nil ) hi--;
-            [b rotateright:hi];
-            b.keys[hi] = kp;
-            b.btNodes[hi] = value;
-            b.numrecs++;
-            b.numkeys++;
-        }
-        numkeys = b.numkeys = BTHNODESIZE+1;
-        b.updtd = updtd = YES;
-    }
-    return b;
-} /* insert */
-
-- (void)borrowleft:(NSInteger)i
-{
-    ACBTree *t0, *t1;
-    NSInteger nkey;
-    
-    t0 = btNodes[i];
-    t1 = btNodes[i-1];
-    nkey = t1.numkeys-1;
-    [t0 insinnode:t1.keys[nkey] value:t1.btNodes[nkey]];
-    [t1 delfrmnode:t1.keys[nkey]];
-    nkey--;
-    keys[i-1] = t1.keys[nkey];
-    keys[i-1].recnum = t1.nodeid;
-}
-
-- (void)borrowright:(NSInteger)i
-{
-    ACBTree *t0, *t1;
-    NSInteger nkey;
-    
-    t0 = btNodes[i];
-    t1 = btNodes[i+1];
-    [t0 insinnode:t1.keys[0] value:t1.btNodes[0]];
-    [t1 delfrmnode:t1.keys[0]];
-    nkey = t0.numkeys - 1;
-    keys[i] = t0.keys[nkey];
-    keys[i].recnum = t0.nodeid;
-}
-
-- (NSInteger)delfrmnode:(ACBKey *)ikp
-{
-    NSInteger j;
-    
-    j = [self searchnode:ikp.key match:YES];
-    if (j == FAILURE) {
-        return(FAILURE);
-    }
-    ACBKey *k0 = nil;
-    ACBTree *n0 = nil;
-    if ( self.nodeType == LEAF ) {
-        k0 = self.keys[j];
-        n0 = self.btNodes[j];
-    }
-    [self rotateleft:j];
-    self.numkeys--;
-    numrecs -= ((self.nodeType == LEAF)?1:btNodes[j].numrecs);
-    if ( k0 ) [k0 release];
-    if ( n0 ) [n0 release];
-    updtd = TRUE;
-    return(SUCCESS);
-}
-
-- (NSInteger)insinnode:(ACBKey *)ikp value:(id)value
-{
-    NSInteger j;
-    
-    j = [self searchnode:ikp.key match:NO];
-    [self rotateright:j];
-    keys[j] = ikp;
-    btNodes[j] = value;
-    numkeys++;
-    if ( nodeType == LEAF ) {
-        numrecs++;
-    }
-    else {
-        numrecs += btNodes[j].numrecs;
-    }
-    updtd = TRUE;
-    return(j);
-}
-
-- (void)mergenode:(NSInteger)i
-{
-    ACBTree *t0, *t1, *tr;
-    NSInteger j, k, nkeys;
-    
-    t0 = btNodes[i];
-    t1 = btNodes[i+1];
-    /*** move keys and pointers from
-     t1 node to t0 node           ***/
-    for (j=t0.numkeys, k=0; j < BTNODESIZE && k < t1.numkeys; j++, k++) {
-        t0.keys[j] = t1.keys[k];
-        t0.btNodes[j] = t1.btNodes[k];
-        t0.numkeys++;
-    }
-    t0.numrecs += t1.numrecs;
-    t0.rnode = t1.rnode;
-    t0.rnodeid = t1.rnodeid;
-    t0.updtd = YES;
-    nkeys = t0.numkeys - 1;
-    keys[i] = t0.keys[nkeys]; /* update key to point to new high key */
-    [self rotateleft:i+1]; /* copy over the keys and nodes */
-    
-    t1.nodeType = -1;
-    if (t1.rnodeid != 0xffff && i < numkeys - 2) {
-        tr = btNodes[i+1];
-        tr.lnodeid = t0.nodeid;
-        tr.lnode = t0;
-        tr.updtd = YES;
-    }
-    self.numkeys--;
-    updtd = YES;
-}
-
-- (ACBTree *)splitnode:(NSInteger)idx
-{
-    ACBTree *t1;
-    NSInteger j, k;
-    
-    k = (idx <= BTHNODESIZE) ? BTHNODESIZE : BTHNODESIZE+1;
-    /*** create new node ***/
-    // checknode(l, t, k);
-    t1 = [ACBTree newNodeWithDictionary:dict];
-    t1.nodeType = nodeType;
-    t1.rnode = self.rnode;
-    self.rnode = t1;
-    t1.lnode = self;
-    self.updtd = t1.updtd = YES;
-    /*** move keys and pointers ***/
-    NSInteger i = 0;
-    for (j = k; j < BTNODESIZE; j++, i++ ) {
-        t1.keys[i] = keys[j];
-        t1.btNodes[i] = btNodes[j];
-        t1.numrecs += ((nodeType == LEAF) ? 1 : btNodes[j].numrecs);
-        numrecs     -= ((nodeType == LEAF) ? 1 : btNodes[j].numrecs);
-        keys[j] = nil;
-        btNodes[j] = nil;
-    }
-    t1.numkeys  = BTNODESIZE-k;
-    self.numkeys = k;
-    return(t1);
-}
-
-#ifdef DONTUSENOMO
-freetree(l, t)
-FIDB *l;
-ACBTree *t;
-{
-    ACBTree *tmp;
-    NSInteger i;
-    
-    if (dict.root == nil) return(SUCCESS);
-    if (t.nodeid == 1) {
-        srchlvl = 0;
-    }
-    else srchlvl++;
-    for (i = 0; i < t.numkeys; i++) {
-        tmp = t.btNodes[i];
-        if (tmp != nil) {
-            if (tmp.nodeType == LEAF) {
-                free(tmp);    /* free the leaf */
-                if (tmp == l.rrnode) {
-                    l.rrnode = nil;
-                }
-                t.btNodes[i] = nil;
-                l.chknode.nods_inuse--;
-                /*              putpage(l, l.chknode, 0);
-                 */
-            }
-            else {
-                freetree(l, tmp); /* continue up the tree */
-                srchlvl--;        /* decrement the srchlvl on return */
-            }
-        }
-    }
-    free(t); /* free the node entered with */
-    if (t == l.rrnode) {
-        l.rrnode = nil;
-    }
-    l.chknode.nods_inuse--;
-    /*     putpage(l, l.chknode, 0);
-     */
-    t = nil;
-}
-
-- (void) notfound:(ACBKey *)kp
-{
-    /* error routine to perform if entry was expected and not found */
-}
-
-- (void)printtree:(ACBTree *)t
-{
-    BYTE *str;
-    NSInteger i, j;
-    NSUInteger *pdate, *ptime;
-    
-    syslst = stdprn;
-    if ( t.nodeid == 1 ) {
-        srchlvl = 0;
-    }
-    else srchlvl++;
-    for (j = 0; j < t.numkeys; j++) {
-        checknode(l, t, j);
-        if ( t.btNodes[j] != nil ) [self printtree:t.btNodes[j]];
-    }
-    NSLog(@"Nodeid = %d, nodeType = %s, numkeys = %d, numrecs = %d\n",
-          t.nodeid, (t.nodeType == BTNODE)?@"NODE":@"LEAF", t.numkeys, t.numrecs);
-    NSLog(@"Left nodeid = %d, Right nodeid = %d\n", t.lnodeid, t.rnodeid);
-    for (i = 0; i < t.numkeys; i++) {
-        NSLog(@"     t.keys[%d] recnum = %d, keyval = %@",
-              i, t.keys[i].recnum, t.keys[i]);
-        str = t.keys[i].kstr;
-        pdate = (NSUInteger *) (str + 6);
-        ptime = (NSUInteger *) (str + 8);
-        NSLog(@" date = %04.4x,  time = %04.4x\n",
-              *pdate, *ptime);
-    }
-}
-
-- (BOOL)puttree:(ACBTree *)t
-{
-    NSInteger i;
-    if (t.nodeType != LEAF) {
-        for (i = 0; i < t.numkeys; i++) {
-            if ( t.btNodes[i] != nil ) puttree(l, t.btNodes[i]);
-        }
-    }
-    if ( t.updtd ) {
-        putnode(l, t, t.nodeid);
-        return(YES);
-    }
-    return(NO);
-}
-
-#endif
-
-/** ROTATELEFT -- rotate keys from right to the left
- *  starting at position j
- */
-- (void)rotateleft:(NSInteger)j
-{
-    while ( j+1 < numkeys ) {
-        keys[j] = keys[j+1];
-        btNodes[j] = btNodes[j+1];
-        j++;
-    }
-}
-
-/** ROTATERIGHT -- rotate keys to the right by 1 position
- *  starting at the last key down to position j.
- */
-- (void)rotateright:(NSInteger)j
-{
-    NSInteger k;
-    
-    for ( k = numkeys; k > j; k-- ) {
-        keys[k] = keys[k-1];
-        btNodes[k] = btNodes[k-1];
-    }
-    keys[j] = nil;
-    btNodes[j] = nil;
-}
-
-- (NSInteger) keyWalkLeaves
-{
-    NSInteger i, idx = 0;
-    NSInteger keycnt;
-    ACBTree *t;
-
-    if ( self != dict.root ) {
-        return 0; // maybe I need to throw an exception here
-    }
-    t = self;
-    self.dict.data = [[NSMutableData dataWithLength:(numkeys * sizeof(id))] retain];
-    self.dict.ptrBuffer = [self.dict.data mutableBytes];
-    while ( t != nil && t.nodeType != LEAF ) {
-        t = t.btNodes[0];
-    }
-    do {
-        keycnt = t.numkeys;
-        for ( i = 0; i < keycnt; i++ ) {
-            if ( t.btNodes[i] != nil ) {
-                dict.ptrBuffer[idx++] = (id) t.keys[i].key;
-            }
-        }
-        t = t.rnode;
-    } while ( t != nil );
-    return( idx );
-}
-
-- (NSInteger) objectWalkLeaves
-{
-    NSInteger i, idx = 0;
-    NSInteger keycnt;
-    ACBTree *t;
-    
-    if ( self != dict.root ) {
-        return 0; // maybe I need to throw an exception here
-    }
-    t = self;
-    self.dict.data = [[NSMutableData dataWithLength:(numrecs * sizeof(id))] retain];
-    self.dict.ptrBuffer = [self.dict.data mutableBytes];
-    while ( t != nil && t.nodeType != LEAF ) {
-        t = t.btNodes[0];
-    }
-    do {
-        keycnt = t.numkeys;
-        for ( i = 0; i < keycnt; i++ ) {
-            if ( t.btNodes[i] != nil ) {
-                dict.ptrBuffer[idx++] = (id) t.btNodes[i];
-            }
-        }
-        t = t.rnode;
-    } while ( t != nil );
-    return( idx );
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ACBTree" );
-#endif
-    [super dealloc];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/AMutableArray.h b/antlr-3.4/runtime/ObjC/Framework/AMutableArray.h
deleted file mode 100644
index 28d0797..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/AMutableArray.h
+++ /dev/null
@@ -1,50 +0,0 @@
-//
-//  AMutableArray.h
-//  a_ST4
-//
-//  Created by Alan Condit on 3/12/11.
-//  Copyright 2011 Alan's MachineWorks. All rights reserved.
-//
-
-#import <Cocoa/Cocoa.h>
-#import "ArrayIterator.h"
-
-@class ArrayIterator;
-
-@interface AMutableArray : NSMutableArray {
-    NSInteger BuffSize;
-    NSInteger count;
-    __strong NSMutableData *buffer;
-    __strong id *ptrBuffer;
-}
-
-+ (id) newArray;
-+ (id) arrayWithCapacity:(NSInteger)size;
-
-- (id) init;
-- (id) initWithCapacity:(NSInteger)size;
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (void) addObject:(id)anObject;
-- (void) addObjectsFromArray:(NSArray *)anArray;
-- (id) objectAtIndex:(NSInteger)anIdx;
-- (void) insertObject:(id)anObject atIndex:(NSInteger)anIdx;
-- (void) removeAllObjects;
-- (void) removeLastObject;
-- (void) removeObjectAtIndex:(NSInteger)idx;
-- (void) replaceObjectAtIndex:(NSInteger)idx withObject:(id)obj;
-- (NSInteger) count;
-- (void)setCount:(NSInteger)cnt;
-//- (NSUInteger)countByEnumeratingWithState:(NSFastEnumerationState *)state objects:(id *)stackbuf count:(NSUInteger)len;
-- (NSArray *) allObjects;
-- (ArrayIterator *) objectEnumerator;
-- (void) ensureCapacity:(NSInteger) index;
-- (NSString *) description;
-- (NSString *) toString;
-
-@property (assign) NSInteger BuffSize;
-@property (assign, getter=count, setter=setCount:) NSInteger count;
-@property (retain) NSMutableData *buffer;
-@property (assign) id *ptrBuffer;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/AMutableArray.m b/antlr-3.4/runtime/ObjC/Framework/AMutableArray.m
deleted file mode 100644
index e6d1eb8..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/AMutableArray.m
+++ /dev/null
@@ -1,296 +0,0 @@
-//
-//  AMutableArray.m
-//  a_ST4
-//
-//  Created by Alan Condit on 3/12/11.
-//  Copyright 2011 Alan's MachineWorks. All rights reserved.
-//
-#import "AMutableArray.h"
-#import "ArrayIterator.h"
-
-#define BUFFSIZE 25
-
-@implementation AMutableArray
-
-@synthesize BuffSize;
-@synthesize buffer;
-@synthesize ptrBuffer;
-//@synthesize count;
-
-
-+ (id) newArray
-{
-    return [[AMutableArray alloc] init];
-}
-
-+ (id) arrayWithCapacity:(NSInteger)size
-{
-    return [[AMutableArray alloc] initWithCapacity:size];
-}
-
-- (id) init
-{
-    self=[super init];
-    if ( self != nil ) {
-        BuffSize = BUFFSIZE;
-        buffer = [[NSMutableData dataWithLength:(BuffSize * sizeof(id))] retain];
-        ptrBuffer = (id *)[buffer mutableBytes];
-        for( int idx = 0; idx < BuffSize; idx++ ) {
-            ptrBuffer[idx] = nil;
-        }
-    }
-    return self;
-}
-
-- (id) initWithCapacity:(NSInteger)len
-{
-    self=[super init];
-    if ( self != nil ) {
-        BuffSize = (len >= BUFFSIZE) ? len : BUFFSIZE;
-        buffer = [[NSMutableData dataWithLength:(BuffSize * sizeof(id))] retain];
-        ptrBuffer = (id *)[buffer mutableBytes];
-        for( int idx = 0; idx < BuffSize; idx++ ) {
-            ptrBuffer[idx] = nil;
-        }
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in AMutableArray" );
-#endif
-    if ( count ) [self removeAllObjects];
-    if ( buffer ) [buffer release];
-    [super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    AMutableArray *copy;
-    
-    copy = [[[self class] allocWithZone:aZone] init];
-    if ( buffer ) {
-        copy.buffer = [buffer copyWithZone:aZone];
-    }
-    copy.ptrBuffer = [copy.buffer mutableBytes];
-    copy.count = count;
-    copy.BuffSize = BuffSize;
-    return copy;
-}
-
-- (void) addObject:(id)anObject
-{
-    if ( anObject == nil ) anObject = [NSNull null];
-    [anObject retain];
-	[self ensureCapacity:count];
-	ptrBuffer[count++] = anObject;
-}
-
-- (void) addObjectsFromArray:(NSArray *)otherArray
-{
-    NSInteger cnt, i;
-    cnt = [otherArray count];
-    [self ensureCapacity:count+cnt];
-    for( i = 0; i < cnt; i++) {
-        [self addObject:[otherArray objectAtIndex:i]];
-    }
-    return;
-}
-
-- (id) objectAtIndex:(NSInteger)anIdx
-{
-    id obj;
-    if ( anIdx < 0 || anIdx >= count ) {
-        @throw [NSException exceptionWithName:NSRangeException
-                                       reason:[NSString stringWithFormat:@"Attempt to retrieve objectAtIndex %d past end", anIdx]
-                                     userInfo:nil];
-        return nil;
-    }
-    ptrBuffer = [buffer mutableBytes];
-    obj = ptrBuffer[anIdx];
-    if ( obj == [NSNull null] ) {
-        obj = nil;
-    }
-    return obj;
-}
-
-- (void) insertObject:(id)anObject atIndex:(NSInteger)anIdx
-{
-    if ( anObject == nil ) anObject = [NSNull null];
-    if ( anObject == nil ) {
-        @throw [NSException exceptionWithName:NSInvalidArgumentException reason:@"Attempt to insert nil objectAtIndex" userInfo:nil];
-    }
-    if ( anIdx < 0 || anIdx > count ) {
-        @throw [NSException exceptionWithName:NSRangeException reason:@"Attempt to insertObjectAtIndex past end" userInfo:nil];
-    }
-    if ( count == BuffSize ) {
-        [self ensureCapacity:count];
-    }
-    if ( anIdx < count ) {
-        for (int i = count; i > anIdx; i--) {
-            ptrBuffer[i] = ptrBuffer[i-1];
-        }
-    }
-    ptrBuffer[anIdx] = [anObject retain];
-    count++;
-}
-
-- (void) removeObjectAtIndex:(NSInteger)idx;
-{
-    id tmp;
-    if (idx < 0 || idx >= count) {
-        @throw [NSException exceptionWithName:NSRangeException reason:@"Attempt to insert removeObjectAtIndex past end" userInfo:nil];
-    }
-    else if (count) {
-        tmp = ptrBuffer[idx];
-        if ( tmp ) [tmp release];
-        for (int i = idx; i < count; i++) {
-            ptrBuffer[i] = ptrBuffer[i+1];
-        }
-        count--;
-    }
-}
-
-- (void) removeLastObject
-{
-    id tmp;
-    if (count == 0) {
-        @throw [NSException exceptionWithName:NSRangeException reason:@"Attempt to removeLastObject from 0" userInfo:nil];
-    }
-    count--;
-    tmp = ptrBuffer[count];
-    if ( tmp ) [tmp release];
-    ptrBuffer[count] = nil;
-}
-
-- (void)removeAllObjects
-{
-    id tmp;
-    if (count == 0) {
-        @throw [NSException exceptionWithName:NSRangeException reason:@"Attempt to removeAllObjects from 0" userInfo:nil];
-    }
-    int i;
-    for ( i = 0; i < BuffSize; i++ ) {
-        if (i < count) {
-            tmp = ptrBuffer[i];
-            if ( tmp ) [tmp release];
-        }
-        ptrBuffer[i] = nil;
-    }
-    count = 0;
-}
-
-- (void) replaceObjectAtIndex:(NSInteger)idx withObject:(id)obj
-{
-    id tmp;
-    if ( obj == nil ) {
-        obj = [NSNull null];
-    }
-    if ( idx < 0 || idx >= count ) {
-        @throw [NSException exceptionWithName:NSRangeException reason:@"Attempt to replace object past end" userInfo:nil];
-   }
-    if ( count ) {
-        [obj retain];
-        tmp = ptrBuffer[idx];
-        if ( tmp ) [tmp release];
-        ptrBuffer[idx] = obj;
-    }
-}
-
-- (NSInteger) count
-{
-    return count;
-}
-
-- (void) setCount:(NSInteger)cnt
-{
-    count = cnt;
-}
-
-- (NSArray *) allObjects
-{
-    return [NSArray arrayWithObjects:ptrBuffer count:count];
-}
-
-- (ArrayIterator *) objectEnumerator
-{
-    return [ArrayIterator newIterator:[self allObjects]];
-}
-
-// This is where all the magic happens.
-// You have two choices when implementing this method:
-// 1) Use the stack based array provided by stackbuf. If you do this, then you must respect the value of 'len'.
-// 2) Return your own array of objects. If you do this, return the full length of the array returned until you run out of objects, then return 0. For example, a linked-array implementation may return each array in order until you iterate through all arrays.
-// In either case, state->itemsPtr MUST be a valid array (non-nil). This sample takes approach #1, using stackbuf to store results.
-- (NSUInteger)countByEnumeratingWithState:(NSFastEnumerationState *)state objects:(id *)stackbuf count:(NSUInteger)len
-{
-    NSUInteger cnt = 0;
-    // This is the initialization condition, so we'll do one-time setup here.
-    // Ensure that you never set state->state back to 0, or use another method to detect initialization
-    // (such as using one of the values of state->extra).
-    if (state->state == 0) {
-        // We are not tracking mutations, so we'll set state->mutationsPtr to point into one of our extra values,
-        // since these values are not otherwise used by the protocol.
-        // If your class was mutable, you may choose to use an internal variable that is updated when the class is mutated.
-        // state->mutationsPtr MUST NOT be NULL.
-        state->mutationsPtr = &state->extra[0];
-    }
-    // Now we provide items, which we track with state->state, and determine if we have finished iterating.
-    if (state->state < self.count) {
-        // Set state->itemsPtr to the provided buffer.
-        // Alternate implementations may set state->itemsPtr to an internal C array of objects.
-        // state->itemsPtr MUST NOT be NULL.
-        state->itemsPtr = stackbuf;
-        // Fill in the stack array, either until we've provided all items from the list
-        // or until we've provided as many items as the stack based buffer will hold.
-        while((state->state < self.count) && (cnt < len)) {
-            // For this sample, we generate the contents on the fly.
-            // A real implementation would likely just be copying objects from internal storage.
-            stackbuf[cnt++] = ptrBuffer[state->state++];
-        }
-        // state->state = ((cnt < len)? cnt : len);
-    }
-    else
-    {
-        // We've already provided all our items, so we signal we are done by returning 0.
-        cnt = 0;
-    }
-    return cnt;
-}
-
-- (NSString *) description
-{
-    NSMutableString *str;
-    NSInteger idx, cnt;
-    cnt = [self count];
-    str = [NSMutableString stringWithCapacity:30];
-    [str appendString:@"["];
-    for (idx = 0; idx < cnt; idx++ ) {
-        [str appendString:[[self objectAtIndex:idx] toString]];
-    }
-    [str appendString:@"]"];
-    return str;
-}
-
-- (NSString *) toString
-{
-    return [self description];
-}
-
-- (void) ensureCapacity:(NSInteger) index
-{
-	if ((index * sizeof(id)) >= [buffer length])
-	{
-		NSInteger newSize = ([buffer length] / sizeof(id)) * 2;
-		if (index > newSize) {
-			newSize = index + 1;
-		}
-        BuffSize = newSize;
-		[buffer setLength:(BuffSize * sizeof(id))];
-        ptrBuffer = [buffer mutableBytes];
-	}
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/AMutableDictionary.m b/antlr-3.4/runtime/ObjC/Framework/AMutableDictionary.m
deleted file mode 100644
index 88b85a8..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/AMutableDictionary.m
+++ /dev/null
@@ -1,264 +0,0 @@
-//
-//  AMutableDictionary.m
-//  ST4
-//
-//  Created by Alan Condit on 4/18/11.
-//  Copyright 2011 Alan Condit. All rights reserved.
-//
-
-#import <Cocoa/Cocoa.h>
-#import "AMutableDictionary.h"
-#import "ACBTree.h"
-
-@implementation AMutableDictionary
-
-@synthesize root;
-@synthesize nodes_av;
-@synthesize nodes_inuse;
-@synthesize nxt_nodeid;
-//@synthesize count;
-@synthesize data;
-@synthesize ptrBuffer;
-
-+ (AMutableDictionary *) newDictionary
-{
-    return [[AMutableDictionary alloc] init];
-}
-
-/** dictionaryWithCapacity
- *  capacity is meaningless to ACBTree because
- *  capacity is automatically increased
- */
-+ (AMutableDictionary *) dictionaryWithCapacity
-{
-    return [[AMutableDictionary alloc] init];
-}
-
-- (id)init
-{
-    self = [super init];
-    if (self) {
-        // Initialization code here.
-        nxt_nodeid = 0;
-        count = 0;
-        root = [ACBTree newNodeWithDictionary:self];
-        root.nodeType = LEAF;
-        root.numrecs = 0;
-        root.updtd = NO;
-        root.lnodeid = 1;
-        root.lnode = nil;
-        root.rnodeid = 0xffff;
-        root.rnode = nil;
-    }
-    return self;
-}
-
-/** initWithCapacity
- *  capacity is meaningless to ACBTree because
- *  capacity is automatically increased
- */
-- (id) initWithCapacity:(NSUInteger)numItems
-{
-    self = [super init];
-    if (self) {
-        // Initialization code here.
-        nxt_nodeid = 0;
-        count = 0;
-        root = [ACBTree newNodeWithDictionary:self];
-        root.nodeType = LEAF;
-        root.numrecs = 0;
-        root.updtd = NO;
-        root.lnodeid = 1;
-        root.lnode = nil;
-        root.rnodeid = 0xffff;
-        root.rnode = nil;
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in AMutableDictionary" );
-#endif
-    if ( data ) [data release];
-    if ( root ) [root release];
-    [super dealloc];
-}
-
-- (id) objectForKey:(id)aKey
-{
-    id obj = nil;
-    ACBTree *node;
-    ACBKey *kp;
-    NSInteger ret;
-    BOOL mustRelease = NO;
-
-    if ( [aKey isKindOfClass:[NSString class]] ) {
-        kp = [ACBKey newKeyWithKStr:aKey];
-        mustRelease = YES;
-    }
-    else if ( [aKey isKindOfClass:[ACBKey class]] ) {
-        kp = aKey;
-        //ACBKey *akey = [ACBKey newKey:aKey];
-    }
-    else {
-        @throw [NSException exceptionWithName:NSInvalidArgumentException
-                                       reason:[NSString stringWithFormat:@"What kind of key is this? %@", aKey]
-                                     userInfo:nil];
-        return nil; // not a key that I know how to deal with
-    }
-    node = [root search:kp.key];
-    if ( node != nil ) {
-        ret = [node searchnode:kp.key match:YES];
-        if ( ret >= 0 && ret < node.numkeys ) {
-            obj = node.btNodes[ret];
-            if ( obj == [NSNull null] ) {
-                obj = nil;
-            }
-        }
-    }
-    if ( mustRelease ) [kp release];
-    return obj;
-}
-
-- (void) setObject:(id)obj forKey:(id)aKey
-{
-    ACBKey *kp;
-    BOOL mustRelease = NO;
-    if ( [aKey isKindOfClass:[NSString class]] ) {
-        kp = [ACBKey newKeyWithKStr:aKey];
-        mustRelease = YES;
-    }
-    else if ( [aKey isKindOfClass:[ACBKey class]] ) {
-        kp = (ACBKey *)aKey;
-    }
-    else {
-        @throw [NSException exceptionWithName:NSInvalidArgumentException
-                                       reason:[NSString stringWithFormat:@"What kind of key is this? %@", aKey]
-                                     userInfo:nil];
-    }
-    if ( [root search:kp.key] == nil ) {
-        if ( obj == nil ) {
-            obj = [NSNull null];
-        }
-        root = [root insertkey:kp value:obj];
-        [kp retain];
-        [obj retain];
-        kp.recnum = count++;
-    }
-    else {
-        if ( mustRelease ) [kp release];
-        @throw [NSException exceptionWithName:NSInvalidArgumentException reason:@"key alreadyExists" userInfo:nil];
-    }
-    return;
-}
-
-- (BOOL) isEqual:(id)object
-{
-    return [super isEqual:object];
-}
-
-- (void) removeObjectForKey:(id)aKey
-{
-    if ( [root deletekey:aKey] == SUCCESS )
-        count--;
-}
-
-- (NSUInteger) count
-{
-    return count;
-}
-
-- (NSArray *) allKeys
-{
-    NSUInteger cnt = [root keyWalkLeaves];
-    return [NSArray arrayWithObjects:ptrBuffer count:cnt];
-}
-
-- (NSArray *) allValues
-{
-    NSUInteger cnt = [root objectWalkLeaves];
-    return [NSArray arrayWithObjects:ptrBuffer count:cnt];
-}
-
-- (ArrayIterator *) keyEnumerator
-{
-    return [ArrayIterator newIterator:[self allKeys]];
-}
-
-- (ArrayIterator *) objectEnumerator
-{
-    return [ArrayIterator newIterator:[self allValues]];
-}
-
-// This is where all the magic happens.
-// You have two choices when implementing this method:
-// 1) Use the stack based array provided by stackbuf. If you do this, then you must respect the value of 'len'.
-// 2) Return your own array of objects. If you do this, return the full length of the array returned until you run out of objects, then return 0. For example, a linked-array implementation may return each array in order until you iterate through all arrays.
-// In either case, state->itemsPtr MUST be a valid array (non-nil). This sample takes approach #1, using stackbuf to store results.
-- (NSUInteger)countByEnumeratingWithState:(NSFastEnumerationState *)state objects:(id *)stackbuf count:(NSUInteger)len
-{
-    NSUInteger cnt = 0;
-    // This is the initialization condition, so we'll do one-time setup here.
-    // Ensure that you never set state->state back to 0, or use another method to detect initialization
-    // (such as using one of the values of state->extra).
-    if (state->state == 0) {
-        // We are not tracking mutations, so we'll set state->mutationsPtr to point into one of our extra values,
-        // since these values are not otherwise used by the protocol.
-        // If your class was mutable, you may choose to use an internal variable that is updated when the class is mutated.
-        // state->mutationsPtr MUST NOT be NULL.
-        state->mutationsPtr = &state->extra[0];
-        [self.root objectWalkLeaves];
-    }
-    // Now we provide items, which we track with state->state, and determine if we have finished iterating.
-    if (state->state < self.count) {
-        // Set state->itemsPtr to the provided buffer.
-        // Alternate implementations may set state->itemsPtr to an internal C array of objects.
-        // state->itemsPtr MUST NOT be NULL.
-        state->itemsPtr = stackbuf;
-        // Fill in the stack array, either until we've provided all items from the list
-        // or until we've provided as many items as the stack based buffer will hold.
-        while((state->state < self.count) && (cnt < len)) {
-            // For this sample, we generate the contents on the fly.
-            // A real implementation would likely just be copying objects from internal storage.
-            stackbuf[cnt++] = ptrBuffer[state->state++];
-        }
-        // state->state = ((cnt < len)? cnt : len);
-    }
-    else
-    {
-        // We've already provided all our items, so we signal we are done by returning 0.
-        cnt = 0;
-    }
-    return cnt;
-}
-
-- (void) clear
-{
-    if ( count ) [self removeAllObjects];
-}
-
-- (void) removeAllObjects
-{
-    root = [ACBTree newNodeWithDictionary:self];
-    root.nodeid = 0;
-    nxt_nodeid = 1;
-}
-
-- (NSInteger) nextNodeId
-{
-    return nxt_nodeid++;
-}
-
-- (NSArray *) toKeyArray
-{
-    return nil;
-}
-
-- (NSArray *) toValueArray
-{
-    return nil;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR.h b/antlr-3.4/runtime/ObjC/Framework/ANTLR.h
deleted file mode 100644
index 3c81fc2..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR.h
+++ /dev/null
@@ -1,114 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke (c) 2011 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <ANTLR/ACBTree.h>
-#import <ANTLR/AMutableArray.h>
-#import <ANTLR/AMutableDictionary.h>
-#import <ANTLR/ANTLRBaseMapElement.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRBaseStack.h>
-#import <ANTLR/ANTLRBaseTree.h>
-#import <ANTLR/ANTLRBaseTreeAdaptor.h>
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBufferedTokenStream.h>
-#import <ANTLR/ANTLRBufferedTreeNodeStream.h>
-#import <ANTLR/ANTLRCharStream.h>
-#import <ANTLR/ANTLRCharStreamState.h>
-#import <ANTLR/ANTLRCommonErrorNode.h>
-#import <ANTLR/ANTLRCommonToken.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRDebug.h>
-#import <ANTLR/ANTLRDebugEventProxy.h>
-#import <ANTLR/ANTLRDebugEventListener.h>
-#import <ANTLR/ANTLRDebugParser.h>
-#import <ANTLR/ANTLRDebugTokenStream.h>
-#import <ANTLR/ANTLRDebugTreeAdaptor.h>
-#import <ANTLR/ANTLRDebugTreeNodeStream.h>
-#import <ANTLR/ANTLRDebugTreeParser.h>
-#import <ANTLR/ANTLRDoubleKeyMap.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRError.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRFastQueue.h>
-#import <ANTLR/ANTLRFileStream.h>
-#import <ANTLR/ANTLRHashMap.h>
-#import <ANTLR/ANTLRHashRule.h>
-#import <ANTLR/ANTLRInputStream.h>
-#import <ANTLR/ANTLRIntArray.h>
-#import <ANTLR/ANTLRIntStream.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRLexerRuleReturnScope.h>
-#import <ANTLR/ANTLRLinkBase.h>
-#import <ANTLR/ANTLRLookaheadStream.h>
-#import <ANTLR/ANTLRMapElement.h>
-#import <ANTLR/ANTLRMap.h>
-#import <ANTLR/ANTLRMismatchedNotSetException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRMissingTokenException.h>
-#import <ANTLR/ANTLRNodeMapElement.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRPtrBuffer.h>
-#import <ANTLR/ANTLRReaderStream.h>
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLRRecognizerSharedState.h>
-#import <ANTLR/ANTLRRewriteRuleElementStream.h>
-#import <ANTLR/ANTLRRewriteRuleNodeStream.h>
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
-#import <ANTLR/ANTLRRuleMemo.h>
-#import <ANTLR/ANTLRRuleStack.h>
-#import <ANTLR/ANTLRRuleReturnScope.h>
-#import <ANTLR/ANTLRRuntimeException.h>
-#import <ANTLR/ANTLRStreamEnumerator.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRSymbolStack.h>
-#import <ANTLR/ANTLRToken+DebuggerSupport.h>
-#import <ANTLR/ANTLRToken.h>
-#import <ANTLR/ANTLRTokenRewriteStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRTokenStream.h>
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeException.h>
-#import <ANTLR/ANTLRTreeIterator.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-#import <ANTLR/ANTLRUnbufferedTokenStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-#import <ANTLR/ANTLRUniqueIDMap.h>
-#import <ANTLR/ANTLRUnwantedTokenException.h>
-#import <ANTLR/ArrayIterator.h>
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR.xcodeproj/project.pbxproj b/antlr-3.4/runtime/ObjC/Framework/ANTLR.xcodeproj/project.pbxproj
deleted file mode 100644
index 575fd9a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR.xcodeproj/project.pbxproj
+++ /dev/null
@@ -1,6288 +0,0 @@
-// !$*UTF8*$!
-{
-	archiveVersion = 1;
-	classes = {
-	};
-	objectVersion = 46;
-	objects = {
-
-/* Begin PBXAggregateTarget section */
-		F762873F0B71519B006AA7EF /* Regenerate all examples */ = {
-			isa = PBXAggregateTarget;
-			buildConfigurationList = F76287400B7151B9006AA7EF /* Build configuration list for PBXAggregateTarget "Regenerate all examples" */;
-			buildPhases = (
-			);
-			dependencies = (
-				F76287A70B7157C2006AA7EF /* PBXTargetDependency */,
-				F762874C0B715417006AA7EF /* PBXTargetDependency */,
-				F76287AB0B7157C2006AA7EF /* PBXTargetDependency */,
-				F79EFB140C5845A300ABAB3D /* PBXTargetDependency */,
-				F76287A90B7157C2006AA7EF /* PBXTargetDependency */,
-				1A0F347112EA43BA00496BB8 /* PBXTargetDependency */,
-				F76287AD0B7157C2006AA7EF /* PBXTargetDependency */,
-				F76287AF0B7157C2006AA7EF /* PBXTargetDependency */,
-				1A0F347312EA43BA00496BB8 /* PBXTargetDependency */,
-				1A0F347512EA43BA00496BB8 /* PBXTargetDependency */,
-			);
-			name = "Regenerate all examples";
-			productName = Untitled;
-		};
-/* End PBXAggregateTarget section */
-
-/* Begin PBXBuildFile section */
-		1A01BD9312EB5A6000428792 /* Simplifier.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A0F347F12EA444500496BB8 /* Simplifier.m */; };
-		1A0F345E12EA42D800496BB8 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		1A0F345F12EA42D800496BB8 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
-		1A0F346012EA42D800496BB8 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
-		1A0F346D12EA434F00496BB8 /* Main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A0F343012EA411F00496BB8 /* Main.m */; };
-		1A0F348212EA444500496BB8 /* PolyLexer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A0F347A12EA444500496BB8 /* PolyLexer.h */; };
-		1A0F348412EA444500496BB8 /* PolyParser.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A0F347C12EA444500496BB8 /* PolyParser.h */; };
-		1A0F348612EA444500496BB8 /* Simplifier.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A0F347E12EA444500496BB8 /* Simplifier.h */; };
-		1A0F348912EA444500496BB8 /* PolyLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A0F347B12EA444500496BB8 /* PolyLexer.m */; };
-		1A0F348A12EA444500496BB8 /* PolyParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A0F347D12EA444500496BB8 /* PolyParser.m */; };
-		1A10050611B8796D0022B434 /* ANTLRBufferedTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A18EF5511B8028D0006186A /* ANTLRBufferedTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A10050711B8796E0022B434 /* ANTLRBitSet.h in Headers */ = {isa = PBXBuildFile; fileRef = F7F218EE097AFB1A000472E9 /* ANTLRBitSet.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A10050811B879A40022B434 /* ANTLRFastQueue.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1CCC9011B6FD39002E5F53 /* ANTLRFastQueue.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A10050911B879A80022B434 /* ANTLRFailedPredicateException.h in Headers */ = {isa = PBXBuildFile; fileRef = F738D1730B07AEAA001813C4 /* ANTLRFailedPredicateException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A10050B11B879B80022B434 /* ANTLRIntArray.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1CCC9211B6FD39002E5F53 /* ANTLRIntArray.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A100ABB11E604FE006ABF94 /* ANTLRHashRule.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A100AB911E604FE006ABF94 /* ANTLRHashRule.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A100ABC11E604FE006ABF94 /* ANTLRHashRule.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A100ABA11E604FE006ABF94 /* ANTLRHashRule.m */; };
-		1A12110311D3A62B00F27B38 /* ANTLRCommonTokenTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2311D2BE4F000C72FC /* ANTLRCommonTokenTest.m */; };
-		1A12117911D3B45C00F27B38 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		1A12117A11D3B47000F27B38 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
-		1A12117B11D3B47000F27B38 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
-		1A1211D711D3BF6800F27B38 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		1A1211D811D3BF6800F27B38 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
-		1A1211D911D3BF6800F27B38 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
-		1A1211DE11D3BFC900F27B38 /* ANTLRStringStreamTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2011D2BE4F000C72FC /* ANTLRStringStreamTest.m */; };
-		1A12122B11D3C93500F27B38 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A12122A11D3C93500F27B38 /* ANTLR.framework */; };
-		1A12122C11D3C93500F27B38 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
-		1A12122D11D3C93500F27B38 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
-		1A12126211D3CA0100F27B38 /* ANTLRFastQueueTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1811D2BE4F000C72FC /* ANTLRFastQueueTest.m */; };
-		1A1212E211D3F55500F27B38 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		1A1212E311D3F55500F27B38 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
-		1A1212E411D3F55500F27B38 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
-		1A1212E711D3F59300F27B38 /* ANTLRIntArrayTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1A11D2BE4F000C72FC /* ANTLRIntArrayTest.m */; };
-		1A12131211D3F7DC00F27B38 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		1A12131311D3F7DC00F27B38 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
-		1A12131411D3F7DC00F27B38 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
-		1A12131711D3F80500F27B38 /* ANTLRCommonTreeTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2611D2BE4F000C72FC /* ANTLRCommonTreeTest.m */; };
-		1A12C95911B89F62008C9BED /* ANTLRBitSet.m in Sources */ = {isa = PBXBuildFile; fileRef = F7F218EF097AFB1A000472E9 /* ANTLRBitSet.m */; };
-		1A12C95A11B89F64008C9BED /* ANTLRBufferedTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A18EF5611B8028D0006186A /* ANTLRBufferedTokenStream.m */; };
-		1A12C95B11B89F65008C9BED /* ANTLRCommonToken.m in Sources */ = {isa = PBXBuildFile; fileRef = F777660409DC5CF400517181 /* ANTLRCommonToken.m */; };
-		1A12C95C11B89F67008C9BED /* ANTLRCommonTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F700ED950A5FF2A5005D0757 /* ANTLRCommonTokenStream.m */; };
-		1A12C95D11B89F68008C9BED /* ANTLRCommonTree.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C58E90AB3911D00282574 /* ANTLRCommonTree.m */; };
-		1A12C95E11B89F69008C9BED /* ANTLRCommonTreeAdaptor.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C59A60AB4F20A00282574 /* ANTLRCommonTreeAdaptor.m */; };
-		1A12C95F11B89F6A008C9BED /* ANTLRCommonTreeNodeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F76AA98F0CEA515A00AF044C /* ANTLRCommonTreeNodeStream.m */; };
-		1A12C96011B89F6B008C9BED /* ANTLRDebugEventListener.h in Headers */ = {isa = PBXBuildFile; fileRef = F7CECD7D0B1E5C370054CC3B /* ANTLRDebugEventListener.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A12C96111B89F6F008C9BED /* ANTLRLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7A4099209659BFB002CC781 /* ANTLRLexer.m */; };
-		1A12C96211B89F70008C9BED /* ANTLRLexerRuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = F7009ADA0A1BE4AE002EDD5D /* ANTLRLexerRuleReturnScope.m */; };
-		1A12C96311B89F76008C9BED /* ANTLRLookaheadStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1CCCAA11B724B2002E5F53 /* ANTLRLookaheadStream.m */; };
-		1A12C96411B89F76008C9BED /* ANTLRMismatchedRangeException.m in Sources */ = {isa = PBXBuildFile; fileRef = F7037CEF0A0582FC0070435D /* ANTLRMismatchedRangeException.m */; };
-		1A12C96511B89F77008C9BED /* ANTLRMismatchedSetException.m in Sources */ = {isa = PBXBuildFile; fileRef = F70380BB0A07FA0D0070435D /* ANTLRMismatchedSetException.m */; };
-		1A12C96611B89F78008C9BED /* ANTLRMismatchedTokenException.m in Sources */ = {isa = PBXBuildFile; fileRef = F777668109DC719C00517181 /* ANTLRMismatchedTokenException.m */; };
-		1A12C96711B89F7A008C9BED /* ANTLRMismatchedTreeNodeException.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C5D610AB63E0B00282574 /* ANTLRMismatchedTreeNodeException.m */; };
-		1A12C96811B89F7B008C9BED /* ANTLRNoViableAltException.m in Sources */ = {isa = PBXBuildFile; fileRef = F79D598A0A0E51AB00EA3CEE /* ANTLRNoViableAltException.m */; };
-		1A12C96911B89F7E008C9BED /* ANTLRParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F700E8FA0A5FAD21005D0757 /* ANTLRParser.m */; };
-		1A12C96A11B89F7F008C9BED /* ANTLRParserRuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C5ECD0AB7E5A500282574 /* ANTLRParserRuleReturnScope.m */; };
-		1A12C96B11B89F80008C9BED /* ANTLRRecognitionException.m in Sources */ = {isa = PBXBuildFile; fileRef = F777669209DC72D600517181 /* ANTLRRecognitionException.m */; };
-		1A12C96C11B89F82008C9BED /* ANTLRRecognizerSharedState.m in Sources */ = {isa = PBXBuildFile; fileRef = F7B1E5AD0CD7CF1900CE136E /* ANTLRRecognizerSharedState.m */; };
-		1A12C96D11B89F83008C9BED /* ANTLRRewriteRuleElementStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F71325870C4A060900B99F2D /* ANTLRRewriteRuleElementStream.m */; };
-		1A12C96E11B89F84008C9BED /* ANTLRRewriteRuleSubtreeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F70B11BC0C4C2B6400C3ECE0 /* ANTLRRewriteRuleSubtreeStream.m */; };
-		1A12C96F11B89F85008C9BED /* ANTLRRewriteRuleTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F70B11C20C4C2B7900C3ECE0 /* ANTLRRewriteRuleTokenStream.m */; };
-		1A12C97011B89F87008C9BED /* ANTLRStringStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F70AA7C609AA339900C3FD5E /* ANTLRStringStream.m */; };
-		1A12C97111B89F8B008C9BED /* ANTLRCharStreamState.m in Sources */ = {isa = PBXBuildFile; fileRef = F70AA7CE09AA379300C3FD5E /* ANTLRCharStreamState.m */; };
-		1A12C97211B89F8C008C9BED /* ANTLRToken+DebuggerSupport.m in Sources */ = {isa = PBXBuildFile; fileRef = F77744040B234A3400D1F89B /* ANTLRToken+DebuggerSupport.m */; };
-		1A12C97311B89F8E008C9BED /* ANTLRTreeException.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D1760B07AEAA001813C4 /* ANTLRTreeException.m */; };
-		1A12C97411B89F90008C9BED /* ANTLRTreeParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C5D550AB63C1D00282574 /* ANTLRTreeParser.m */; };
-		1A16B13C11C66492002860C7 /* ANTLRLinkBase.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A16B13A11C66492002860C7 /* ANTLRLinkBase.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A16B13D11C66492002860C7 /* ANTLRLinkBase.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A16B13B11C66492002860C7 /* ANTLRLinkBase.m */; };
-		1A1702FE11C05D4800F6978A /* ANTLRHashMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1702FC11C05D4800F6978A /* ANTLRHashMap.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A1702FF11C05D4800F6978A /* ANTLRHashMap.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1702FD11C05D4800F6978A /* ANTLRHashMap.m */; };
-		1A1BCDBB11CB01E60051A1EC /* ANTLRRuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1BCDB911CB01E60051A1EC /* ANTLRRuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A1BCDBC11CB01E60051A1EC /* ANTLRRuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1BCDBA11CB01E60051A1EC /* ANTLRRuleReturnScope.m */; };
-		1A1BCDCF11CB0B3D0051A1EC /* ANTLRTreeRuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1BCDCD11CB0B3D0051A1EC /* ANTLRTreeRuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A1BCDD011CB0B3D0051A1EC /* ANTLRTreeRuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1BCDCE11CB0B3D0051A1EC /* ANTLRTreeRuleReturnScope.m */; };
-		1A1BCE2A11CB1A3E0051A1EC /* ANTLRTreeRewriter.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1BCE2811CB1A3E0051A1EC /* ANTLRTreeRewriter.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A1BCE2B11CB1A3E0051A1EC /* ANTLRTreeRewriter.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1BCE2911CB1A3E0051A1EC /* ANTLRTreeRewriter.m */; };
-		1A1CCCAB11B724B2002E5F53 /* ANTLRLookaheadStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1CCCA911B724B2002E5F53 /* ANTLRLookaheadStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A1CCCC811B727B5002E5F53 /* ANTLRError.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1CCCC711B727B5002E5F53 /* ANTLRError.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A1D465B11BE73B2001575F3 /* ANTLRBaseTreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1D465911BE73B2001575F3 /* ANTLRBaseTreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A1D465C11BE73B2001575F3 /* ANTLRBaseTreeAdaptor.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1D465A11BE73B2001575F3 /* ANTLRBaseTreeAdaptor.m */; };
-		1A1D467011BE75C0001575F3 /* ANTLRMapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1D466E11BE75C0001575F3 /* ANTLRMapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A1D467111BE75C0001575F3 /* ANTLRMapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1D466F11BE75C0001575F3 /* ANTLRMapElement.m */; };
-		1A1D467C11BE8E5A001575F3 /* ANTLRCommonErrorNode.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1D467A11BE8E5A001575F3 /* ANTLRCommonErrorNode.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A1D467D11BE8E5A001575F3 /* ANTLRCommonErrorNode.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1D467B11BE8E5A001575F3 /* ANTLRCommonErrorNode.m */; };
-		1A20C56512D6267500C2072A /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F700E86A0A5FA34D005D0757 /* main.m */; };
-		1A26329511C53578000DCDD4 /* ANTLRMismatchedNotSetException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A26329311C53578000DCDD4 /* ANTLRMismatchedNotSetException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A26329611C53578000DCDD4 /* ANTLRMismatchedNotSetException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A26329411C53578000DCDD4 /* ANTLRMismatchedNotSetException.m */; };
-		1A270BF911C1451200DCC8F3 /* ANTLRTreeIterator.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A270BF711C1451200DCC8F3 /* ANTLRTreeIterator.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A270BFA11C1451200DCC8F3 /* ANTLRTreeIterator.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A270BF811C1451200DCC8F3 /* ANTLRTreeIterator.m */; };
-		1A2D217511E4F57C00DFE328 /* ANTLRUniqueIDMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A2D217311E4F57C00DFE328 /* ANTLRUniqueIDMap.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A2D217611E4F57C00DFE328 /* ANTLRUniqueIDMap.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A2D217411E4F57C00DFE328 /* ANTLRUniqueIDMap.m */; };
-		1A2D218611E502DE00DFE328 /* ANTLRNodeMapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A2D218411E502DE00DFE328 /* ANTLRNodeMapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A2D218711E502DE00DFE328 /* ANTLRNodeMapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A2D218511E502DE00DFE328 /* ANTLRNodeMapElement.m */; };
-		1A348B5811D2BF1C000C72FC /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		1A348BA511D2C6A0000C72FC /* ANTLRBitSetTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1D11D2BE4F000C72FC /* ANTLRBitSetTest.m */; };
-		1A348BA811D2C6AD000C72FC /* ANTLRCommonTokenTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2311D2BE4F000C72FC /* ANTLRCommonTokenTest.m */; };
-		1A348BAB11D2C6B8000C72FC /* ANTLRCommonTreeTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2611D2BE4F000C72FC /* ANTLRCommonTreeTest.m */; };
-		1A348BAE11D2C6C6000C72FC /* ANTLRFastQueueTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1811D2BE4F000C72FC /* ANTLRFastQueueTest.m */; };
-		1A348BAF11D2C6D3000C72FC /* ANTLRIntArrayTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1A11D2BE4F000C72FC /* ANTLRIntArrayTest.m */; };
-		1A348BB211D2C6E3000C72FC /* ANTLRStringStreamTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2011D2BE4F000C72FC /* ANTLRStringStreamTest.m */; };
-		1A348BB611D2C711000C72FC /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
-		1A348BB811D2C711000C72FC /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
-		1A348BF211D2D0E0000C72FC /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
-		1A348BF311D2D0E0000C72FC /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
-		1A348BF411D2D0E7000C72FC /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		1A348C0611D2D22B000C72FC /* ANTLRBitSetTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1D11D2BE4F000C72FC /* ANTLRBitSetTest.m */; };
-		1A3A08E611E213C500D5EE26 /* ANTLRBaseStack.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A3A08E411E213C500D5EE26 /* ANTLRBaseStack.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A3A08E711E213C500D5EE26 /* ANTLRBaseStack.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A3A08E511E213C500D5EE26 /* ANTLRBaseStack.m */; };
-		1A3A08EA11E213E100D5EE26 /* ANTLRSymbolStack.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A3A08E811E213E100D5EE26 /* ANTLRSymbolStack.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A3A08EB11E213E100D5EE26 /* ANTLRSymbolStack.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A3A08E911E213E100D5EE26 /* ANTLRSymbolStack.m */; };
-		1A3A09BE11E235BD00D5EE26 /* antlr3.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A3A09BD11E235BD00D5EE26 /* antlr3.h */; };
-		1A406B5612E8F2ED005EF037 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C5E560AB7E41000282574 /* main.m */; };
-		1A45657711C922BE0082F421 /* ANTLRRuleMemo.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A45657511C922BE0082F421 /* ANTLRRuleMemo.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A45657811C922BE0082F421 /* ANTLRRuleMemo.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A45657611C922BE0082F421 /* ANTLRRuleMemo.m */; };
-		1A45658911C9270D0082F421 /* ANTLRBaseMapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A45658711C9270D0082F421 /* ANTLRBaseMapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A45658A11C9270D0082F421 /* ANTLRBaseMapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A45658811C9270D0082F421 /* ANTLRBaseMapElement.m */; };
-		1A4A851211CBCE3E00E4BF1B /* ANTLRTreeVisitor.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A4A851011CBCE3E00E4BF1B /* ANTLRTreeVisitor.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A4A851311CBCE3E00E4BF1B /* ANTLRTreeVisitor.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A4A851111CBCE3E00E4BF1B /* ANTLRTreeVisitor.m */; };
-		1A4A851811CBCE5500E4BF1B /* ANTLRTreeVisitorAction.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A4A851611CBCE5500E4BF1B /* ANTLRTreeVisitorAction.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A4A851911CBCE5500E4BF1B /* ANTLRTreeVisitorAction.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A4A851711CBCE5500E4BF1B /* ANTLRTreeVisitorAction.m */; };
-		1A4A851E11CBCF3700E4BF1B /* ANTLRTreeWizard.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A4A851C11CBCF3700E4BF1B /* ANTLRTreeWizard.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A4A851F11CBCF3700E4BF1B /* ANTLRTreeWizard.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A4A851D11CBCF3700E4BF1B /* ANTLRTreeWizard.m */; };
-		1A4D5AD611B55A45001C9482 /* ANTLRBaseTree.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A4D5AD411B55A45001C9482 /* ANTLRBaseTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A5EA50B11CFE7CE00E8932F /* ANTLRMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A5EA50911CFE7CE00E8932F /* ANTLRMap.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A5EA50C11CFE7CE00E8932F /* ANTLRMap.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A5EA50A11CFE7CE00E8932F /* ANTLRMap.m */; };
-		1A65B7D811B9532A00FD8754 /* ANTLRBufferedTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A65B7D611B9532A00FD8754 /* ANTLRBufferedTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A65B7D911B9532A00FD8754 /* ANTLRBufferedTreeNodeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A65B7D711B9532A00FD8754 /* ANTLRBufferedTreeNodeStream.m */; };
-		1A67885211B87ABA00A11EEC /* ANTLRBaseTree.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A4D5AD511B55A45001C9482 /* ANTLRBaseTree.m */; };
-		1A67885311B87AC400A11EEC /* ANTLRCharStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F70AA7B509AA2B8800C3FD5E /* ANTLRCharStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A67885411B87AEA00A11EEC /* ANTLRFastQueue.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1CCC9111B6FD39002E5F53 /* ANTLRFastQueue.m */; };
-		1A67885511B87AEF00A11EEC /* ANTLRIntArray.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1CCC9311B6FD39002E5F53 /* ANTLRIntArray.m */; };
-		1A6788FC11B893E100A11EEC /* ANTLRBaseRecognizer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7A4098C09659BF3002CC781 /* ANTLRBaseRecognizer.m */; };
-		1A6C451711BF4EE00039788A /* ANTLRMissingTokenException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6C451511BF4EE00039788A /* ANTLRMissingTokenException.m */; };
-		1A6C452811BF50A40039788A /* ANTLRUnwantedTokenException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6C452611BF50A40039788A /* ANTLRUnwantedTokenException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6C452911BF50A40039788A /* ANTLRUnwantedTokenException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6C452711BF50A40039788A /* ANTLRUnwantedTokenException.m */; };
-		1A75BF5911D6B3FD0096C6F5 /* ANTLRMissingTokenException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6C451411BF4EE00039788A /* ANTLRMissingTokenException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A75BFBA11D6C2B10096C6F5 /* ANTLRDFA.m in Sources */ = {isa = PBXBuildFile; fileRef = F7754E3E0A5C0A0500D0873A /* ANTLRDFA.m */; };
-		1A77EE9312E6A57C007F323A /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
-		1A77EE9412E6A57C007F323A /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
-		1A77EE9712E6A594007F323A /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		1A86B91B11EB9F6300C67A03 /* ANTLRParseTree.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A86B91911EB9F6300C67A03 /* ANTLRParseTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A86B91C11EB9F6300C67A03 /* ANTLRParseTree.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A86B91A11EB9F6300C67A03 /* ANTLRParseTree.m */; };
-		1A86BACF11EC1CD000C67A03 /* ANTLRUnbufferedTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A86BACD11EC1CD000C67A03 /* ANTLRUnbufferedTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A86BAD011EC1CD000C67A03 /* ANTLRUnbufferedTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A86BACE11EC1CD000C67A03 /* ANTLRUnbufferedTokenStream.m */; };
-		1A8ABFC611BA9B960038DBB0 /* ANTLRCharStreamState.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A8ABFC511BA9B960038DBB0 /* ANTLRCharStreamState.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A8AC00C11BAEC710038DBB0 /* ANTLRRuntimeException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A8AC00A11BAEC710038DBB0 /* ANTLRRuntimeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A8AC00D11BAEC710038DBB0 /* ANTLRRuntimeException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A8AC00B11BAEC710038DBB0 /* ANTLRRuntimeException.m */; };
-		1AAC202C11CC621A00CF56D1 /* ANTLRTreePatternLexer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AAC202A11CC621A00CF56D1 /* ANTLRTreePatternLexer.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1AAC202D11CC621A00CF56D1 /* ANTLRTreePatternLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AAC202B11CC621A00CF56D1 /* ANTLRTreePatternLexer.m */; };
-		1AAC20A511CC790300CF56D1 /* ANTLRTreePatternParser.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AAC20A311CC790300CF56D1 /* ANTLRTreePatternParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1AAC20A611CC790300CF56D1 /* ANTLRTreePatternParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AAC20A411CC790300CF56D1 /* ANTLRTreePatternParser.m */; };
-		1AB4A54211B995290076E91A /* ANTLREarlyExitException.m in Sources */ = {isa = PBXBuildFile; fileRef = F700E61A0A5F66EC005D0757 /* ANTLREarlyExitException.m */; };
-		1AB4A54311B9952A0076E91A /* ANTLRFailedPredicateException.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D1740B07AEAA001813C4 /* ANTLRFailedPredicateException.m */; };
-		1AB4A59111B9A0DA0076E91A /* ANTLRStreamEnumerator.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AB4A58F11B9A0DA0076E91A /* ANTLRStreamEnumerator.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1AB4A59211B9A0DA0076E91A /* ANTLRStreamEnumerator.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AB4A59011B9A0DA0076E91A /* ANTLRStreamEnumerator.m */; };
-		1AB5F47711E3869D00E065B0 /* ANTLRRuleMapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AB5F47511E3869D00E065B0 /* ANTLRRuleMapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1AB5F47811E3869D00E065B0 /* ANTLRRuleMapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AB5F47611E3869D00E065B0 /* ANTLRRuleMapElement.m */; };
-		1AB5F51E11E3BE2E00E065B0 /* ANTLRPtrBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AB5F51C11E3BE2E00E065B0 /* ANTLRPtrBuffer.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1AB5F51F11E3BE2E00E065B0 /* ANTLRPtrBuffer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AB5F51D11E3BE2E00E065B0 /* ANTLRPtrBuffer.m */; };
-		1AC5AC9E12E7BEFE00DF0C58 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		1AC5AC9F12E7BEFE00DF0C58 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
-		1AC5ACA112E7BEFE00DF0C58 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
-		1AC5ACAD12E7BF4E00DF0C58 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AC5AC8112E7BC9100DF0C58 /* main.m */; };
-		1AC5ACD612E7C05800DF0C58 /* LangLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AC5ACC912E7C03C00DF0C58 /* LangLexer.m */; };
-		1AC5ACE612E7CE4700DF0C58 /* LangParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AC5ACCB12E7C03C00DF0C58 /* LangParser.m */; };
-		1AC5ACE712E7CE4C00DF0C58 /* LangLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AC5ACC912E7C03C00DF0C58 /* LangLexer.m */; };
-		1AC5ACE812E7CE5100DF0C58 /* LangDumpDecl.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AC5ACC612E7C03C00DF0C58 /* LangDumpDecl.m */; };
-		1ADB66F112E74341007C1661 /* FuzzyLexer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1ADB66F012E74341007C1661 /* FuzzyLexer.h */; };
-		1ADB67BA12E74E82007C1661 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
-		1AE8A96C11D9227A00D36FD6 /* ANTLRRuleStack.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AE8A96A11D9227A00D36FD6 /* ANTLRRuleStack.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1AE8A96D11D9227A00D36FD6 /* ANTLRRuleStack.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE8A96B11D9227A00D36FD6 /* ANTLRRuleStack.m */; };
-		1AEECE1511E7EB3C00554AAF /* ANTLRTokenRewriteStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1FFC5911CD12A400FBB452 /* ANTLRTokenRewriteStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1AEECE1611E7EB3D00554AAF /* ANTLRTokenRewriteStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1FFC5A11CD12A400FBB452 /* ANTLRTokenRewriteStream.m */; };
-		F7009ADB0A1BE4AE002EDD5D /* ANTLRLexerRuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = F7009AD90A1BE4AE002EDD5D /* ANTLRLexerRuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F700E61B0A5F66EC005D0757 /* ANTLREarlyExitException.h in Headers */ = {isa = PBXBuildFile; fileRef = F700E6190A5F66EC005D0757 /* ANTLREarlyExitException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F700E8FB0A5FAD21005D0757 /* ANTLRParser.h in Headers */ = {isa = PBXBuildFile; fileRef = F700E8F90A5FAD21005D0757 /* ANTLRParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F700EC670A5FDF0D005D0757 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		F700ECA40A5FDF1A005D0757 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
-		F700ECA50A5FDF1A005D0757 /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
-		F700ECD90A5FE19A005D0757 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
-		F700ECDA0A5FE19A005D0757 /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
-		F700ED7F0A5FF17C005D0757 /* ANTLRTokenSource.h in Headers */ = {isa = PBXBuildFile; fileRef = F700ED7E0A5FF17C005D0757 /* ANTLRTokenSource.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F700ED960A5FF2A5005D0757 /* ANTLRCommonTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F700ED940A5FF2A5005D0757 /* ANTLRCommonTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F7037CF00A0582FC0070435D /* ANTLRMismatchedRangeException.h in Headers */ = {isa = PBXBuildFile; fileRef = F7037CEE0A0582FC0070435D /* ANTLRMismatchedRangeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F7037EA60A05AFD70070435D /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		F70380BC0A07FA0D0070435D /* ANTLRMismatchedSetException.h in Headers */ = {isa = PBXBuildFile; fileRef = F70380BA0A07FA0D0070435D /* ANTLRMismatchedSetException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F7048FF80B07D05400D2F326 /* TestLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7048FF70B07D05400D2F326 /* TestLexer.m */; };
-		F7048FF90B07D05800D2F326 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F7E983940A0D6A5F00F16093 /* main.m */; };
-		F70AA7A609AA2A6900C3FD5E /* ANTLR.h in Headers */ = {isa = PBXBuildFile; fileRef = F70AA7A509AA2A6900C3FD5E /* ANTLR.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F70AA7AF09AA2AC000C3FD5E /* ANTLRIntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F70AA7AD09AA2AC000C3FD5E /* ANTLRIntStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F70AA7C709AA339900C3FD5E /* ANTLRStringStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F70AA7C509AA339900C3FD5E /* ANTLRStringStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F70B11BD0C4C2B6400C3ECE0 /* ANTLRRewriteRuleSubtreeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F70B11BB0C4C2B6400C3ECE0 /* ANTLRRewriteRuleSubtreeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F70B11C30C4C2B7900C3ECE0 /* ANTLRRewriteRuleTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F70B11C10C4C2B7900C3ECE0 /* ANTLRRewriteRuleTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F71325880C4A060900B99F2D /* ANTLRRewriteRuleElementStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F71325860C4A060900B99F2D /* ANTLRRewriteRuleElementStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F72C58EA0AB3911D00282574 /* ANTLRCommonTree.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C58E80AB3911D00282574 /* ANTLRCommonTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F72C59A70AB4F20A00282574 /* ANTLRCommonTreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C59A50AB4F20A00282574 /* ANTLRCommonTreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F72C5B840AB52AD300282574 /* ANTLRTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C5B820AB52AD300282574 /* ANTLRTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F72C5D560AB63C1D00282574 /* ANTLRTreeParser.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C5D540AB63C1D00282574 /* ANTLRTreeParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F72C5D620AB63E0B00282574 /* ANTLRMismatchedTreeNodeException.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C5D600AB63E0B00282574 /* ANTLRMismatchedTreeNodeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F72C5E620AB7E4C900282574 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		F72C5E630AB7E4C900282574 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
-		F72C5E650AB7E4C900282574 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
-		F72C5ECE0AB7E5A500282574 /* ANTLRParserRuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C5ECC0AB7E5A500282574 /* ANTLRParserRuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F738D1790B07AEAA001813C4 /* ANTLRTreeException.h in Headers */ = {isa = PBXBuildFile; fileRef = F738D1750B07AEAA001813C4 /* ANTLRTreeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F738D1FC0B07B1BD001813C4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F7DD07800A7B67A7006A006C /* main.m */; };
-		F738D20D0B07B265001813C4 /* SymbolTableParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D2010B07B1CE001813C4 /* SymbolTableParser.m */; };
-		F738D20E0B07B266001813C4 /* SymbolTableLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D1FF0B07B1CE001813C4 /* SymbolTableLexer.m */; };
-		F738D2120B07B32D001813C4 /* T.g in Sources */ = {isa = PBXBuildFile; fileRef = F7DD05E40A7B14BE006A006C /* T.g */; };
-		F738D2220B07B39F001813C4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F7DD05E70A7B1572006A006C /* main.m */; };
-		F738D3190B07BDB7001813C4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F700ECE70A5FE25D005D0757 /* main.m */; };
-		F738D3610B07C105001813C4 /* CombinedLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D35E0B07C105001813C4 /* CombinedLexer.m */; };
-		F738D3620B07C105001813C4 /* CombinedParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D3600B07C105001813C4 /* CombinedParser.m */; };
-		F738D37E0B07C3BD001813C4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F79D56C00A0E287500EA3CEE /* main.m */; };
-		F73E2B740A9CFE6A005D6267 /* ANTLRTree.h in Headers */ = {isa = PBXBuildFile; fileRef = F73E2B720A9CFE6A005D6267 /* ANTLRTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F73E2B7C0A9D0AFC005D6267 /* ANTLRTreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = F73E2B7A0A9D0AFC005D6267 /* ANTLRTreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F741D0830B381E720024DF3F /* SimpleCWalker.m in Sources */ = {isa = PBXBuildFile; fileRef = F741D0650B3812D40024DF3F /* SimpleCWalker.m */; };
-		F741D0840B381E730024DF3F /* SimpleCParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F7715D310AC9DE9E00ED984D /* SimpleCParser.m */; };
-		F741D08E0B381EA90024DF3F /* SimpleCLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7715D2F0AC9DE9E00ED984D /* SimpleCLexer.m */; };
-		F7492F5D09C016A200B25E30 /* ANTLRBaseRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = F7A4098B09659BF3002CC781 /* ANTLRBaseRecognizer.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F76287130B714E77006AA7EF /* TLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7DD06E80A7B1700006A006C /* TLexer.m */; };
-		F76287140B714E78006AA7EF /* TParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D2240B07B3BC001813C4 /* TParser.m */; };
-		F76287150B714E82006AA7EF /* SimpleCParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D2510B07B842001813C4 /* SimpleCParser.m */; };
-		F76287160B714E83006AA7EF /* SimpleCLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D2810B07B9B6001813C4 /* SimpleCLexer.m */; };
-		F76287170B714EA9006AA7EF /* FuzzyLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F72B8D0B0AD01DCB0013F1E2 /* FuzzyLexer.m */; };
-		F763D4490A666D3D0061CD35 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		F763D51E0A66765B0061CD35 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
-		F76AA9900CEA515A00AF044C /* ANTLRCommonTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F76AA98E0CEA515A00AF044C /* ANTLRCommonTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F7754E3F0A5C0A0500D0873A /* ANTLRDFA.h in Headers */ = {isa = PBXBuildFile; fileRef = F7754E3D0A5C0A0500D0873A /* ANTLRDFA.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F77744050B234A3400D1F89B /* ANTLRToken+DebuggerSupport.h in Headers */ = {isa = PBXBuildFile; fileRef = F77744030B234A3400D1F89B /* ANTLRToken+DebuggerSupport.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F77747560B23A70600D1F89B /* ANTLRDebug.h in Headers */ = {isa = PBXBuildFile; fileRef = F77747550B23A70600D1F89B /* ANTLRDebug.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F77765CC09DC583000517181 /* ANTLRToken.h in Headers */ = {isa = PBXBuildFile; fileRef = F77765CA09DC583000517181 /* ANTLRToken.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F777660509DC5CF400517181 /* ANTLRCommonToken.h in Headers */ = {isa = PBXBuildFile; fileRef = F777660309DC5CF400517181 /* ANTLRCommonToken.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F777668209DC719C00517181 /* ANTLRMismatchedTokenException.h in Headers */ = {isa = PBXBuildFile; fileRef = F777668009DC719C00517181 /* ANTLRMismatchedTokenException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F777669309DC72D600517181 /* ANTLRRecognitionException.h in Headers */ = {isa = PBXBuildFile; fileRef = F777669109DC72D600517181 /* ANTLRRecognitionException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F77766AF09DD53E800517181 /* ANTLRTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F77766AE09DD53E800517181 /* ANTLRTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F777678E09DD618000517181 /* ANTLRLexer.h in Headers */ = {isa = PBXBuildFile; fileRef = F7A4099109659BFB002CC781 /* ANTLRLexer.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F79D598B0A0E51AB00EA3CEE /* ANTLRNoViableAltException.h in Headers */ = {isa = PBXBuildFile; fileRef = F79D59890A0E51AB00EA3CEE /* ANTLRNoViableAltException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F79D5AF60A0E634900EA3CEE /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		F79D5AF70A0E634A00EA3CEE /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
-		F79D5AF80A0E634A00EA3CEE /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
-		F7B1E5B00CD7CF1900CE136E /* ANTLRRecognizerSharedState.h in Headers */ = {isa = PBXBuildFile; fileRef = F7B1E5AC0CD7CF1900CE136E /* ANTLRRecognizerSharedState.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F7CD47650C64D24C00FF933A /* TreeRewriteLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7CD46360C64BB7300FF933A /* TreeRewriteLexer.m */; };
-		F7CD47660C64D24D00FF933A /* TreeRewriteParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F7CD46380C64BB7300FF933A /* TreeRewriteParser.m */; };
-		F7CD47670C64D24D00FF933A /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F7CD45FC0C64BA4B00FF933A /* main.m */; };
-		F7CD48670C64D88800FF933A /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		F7CD48680C64D88800FF933A /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
-		F7CD486A0C64D88800FF933A /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
-		F7DD06040A7B1663006A006C /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
-		F7DD06070A7B1664006A006C /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
-		F7DD06300A7B1665006A006C /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
-		F7DD06C50A7B1691006A006C /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		F7DD074C0A7B6656006A006C /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
-		F7DD074D0A7B665C006A006C /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
-		F7DD074E0A7B665D006A006C /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
-		F7DD074F0A7B665D006A006C /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
-		F7E261160B1E44320013F640 /* ANTLRDebugParser.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E261140B1E44320013F640 /* ANTLRDebugParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F7E2611A0B1E443D0013F640 /* ANTLRDebugTreeParser.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E261180B1E443C0013F640 /* ANTLRDebugTreeParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F7E261200B1E44E80013F640 /* ANTLRDebugTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E2611E0B1E44E80013F640 /* ANTLRDebugTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F7E261240B1E44FA0013F640 /* ANTLRDebugTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E261220B1E44FA0013F640 /* ANTLRDebugTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F7E261280B1E45070013F640 /* ANTLRDebugTreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E261260B1E45070013F640 /* ANTLRDebugTreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F7E261390B1E45580013F640 /* ANTLRDebugEventProxy.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E261370B1E45580013F640 /* ANTLRDebugEventProxy.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		F7E985580A0D865E00F16093 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
-		F7E985590A0D866000F16093 /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
-		F7F4E9BA0A6E8B110092D087 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
-/* End PBXBuildFile section */
-
-/* Begin PBXBuildRule section */
-		1A0F346112EA42D800496BB8 /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			filePatterns = .g.m;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			outputFiles = (
-				$1.m,
-				$1.h,
-			);
-			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
-		};
-		1A994CF212A84FD3001853FF /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			filePatterns = .g.m;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			outputFiles = (
-				$1.h,
-				$1.m,
-			);
-			script = "/usr/bin/java -jar antlr-3.3.1.jar $1.g$2";
-		};
-		1A994D3E12A858E1001853FF /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			filePatterns = .g.m;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			outputFiles = (
-				$1.m,
-				$1.h,
-			);
-			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
-		};
-		1A994D4F12A85987001853FF /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			filePatterns = .g.m;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			outputFiles = (
-				$1.h,
-				$1.m,
-			);
-			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
-		};
-		1A994D8512A85ABE001853FF /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			filePatterns = .g.m;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			outputFiles = (
-				$1.h,
-				$1.m,
-			);
-			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1";
-		};
-		1A994DC612A85BFC001853FF /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			filePatterns = .g.m;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			outputFiles = (
-				$1.h,
-				$1.m,
-			);
-			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
-		};
-		1A994DC712A85BFC001853FF /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			filePatterns = .g.m;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			outputFiles = (
-				$1.h,
-				$1.m,
-			);
-			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
-		};
-		1A994DC812A85BFC001853FF /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			filePatterns = .g.m;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			outputFiles = (
-				$1.h,
-				$1.m,
-			);
-			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
-		};
-		1A994DC912A85BFC001853FF /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			filePatterns = .g.m;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			outputFiles = (
-				$1.h,
-				$1.m,
-			);
-			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
-		};
-		1AC5ACA212E7BEFE00DF0C58 /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			filePatterns = .g.m;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			outputFiles = (
-				$1.m,
-				$1.h,
-			);
-			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
-		};
-/* End PBXBuildRule section */
-
-/* Begin PBXContainerItemProxy section */
-		1A0F347012EA43BA00496BB8 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1A0F343B12EA425700496BB8;
-			remoteInfo = "Regenerate polydiff";
-		};
-		1A0F347212EA43BA00496BB8 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AC5AC9312E7BE0400DF0C58;
-			remoteInfo = "Regenerate treeparser";
-		};
-		1A0F347412EA43BA00496BB8 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = F7CD47610C64D23800FF933A;
-			remoteInfo = "Regenerate treerewrite";
-		};
-		1A12134411D3FDA500F27B38 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1A348BEB11D2D0A1000C72FC;
-			remoteInfo = ANTLRBitsetTest;
-		};
-		1A12134611D3FDA500F27B38 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1A1210FA11D3A5D900F27B38;
-			remoteInfo = ANTLRCommonTokenTest;
-		};
-		1A12134811D3FDA500F27B38 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1A12130B11D3F7CD00F27B38;
-			remoteInfo = ANTLRCommonTreeTest;
-		};
-		1A12134A11D3FDA500F27B38 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1A12122311D3C92400F27B38;
-			remoteInfo = ANTLRFastQueueTest;
-		};
-		1A12134C11D3FDA500F27B38 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1A1212DB11D3F53600F27B38;
-			remoteInfo = ANTLRIntArrayTest;
-		};
-		1A12134E11D3FDA500F27B38 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1A1211D011D3BF4600F27B38;
-			remoteInfo = ANTLRStringStreamTest;
-		};
-		F762874B0B715417006AA7EF /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = F76287450B7151E3006AA7EF;
-			remoteInfo = "Regenerate fuzzy";
-		};
-		F76287A60B7157C2006AA7EF /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = F762877E0B71559C006AA7EF;
-			remoteInfo = "Regenerate combined";
-		};
-		F76287A80B7157C2006AA7EF /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = F76287820B71559F006AA7EF;
-			remoteInfo = "Regenerate LL-star";
-		};
-		F76287AA0B7157C2006AA7EF /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = F76287860B7155A2006AA7EF;
-			remoteInfo = "Regenerate hoistedPredicates";
-		};
-		F76287AC0B7157C2006AA7EF /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = F762878A0B7155AB006AA7EF;
-			remoteInfo = "Regenerate scopes";
-		};
-		F76287AE0B7157C2006AA7EF /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = F762878E0B7155AF006AA7EF;
-			remoteInfo = "Regenerate simplec tree";
-		};
-		F79EFB130C5845A300ABAB3D /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = F76287780B71557E006AA7EF;
-			remoteInfo = "Regenerate lexertest-simple";
-		};
-/* End PBXContainerItemProxy section */
-
-/* Begin PBXCopyFilesBuildPhase section */
-		F706A5710A0EC357008999AB /* CopyFiles */ = {
-			isa = PBXCopyFilesBuildPhase;
-			buildActionMask = 2147483647;
-			dstPath = "";
-			dstSubfolderSpec = 16;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-/* End PBXCopyFilesBuildPhase section */
-
-/* Begin PBXFileReference section */
-		0867D69BFE84028FC02AAC07 /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = /System/Library/Frameworks/Foundation.framework; sourceTree = "<absolute>"; };
-		089C1667FE841158C02AAC07 /* English */ = {isa = PBXFileReference; fileEncoding = 10; lastKnownFileType = text.plist.strings; name = English; path = English.lproj/InfoPlist.strings; sourceTree = "<group>"; };
-		1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = FuzzyLexer.h; path = /System/Library/Frameworks/Cocoa.framework; sourceTree = "<absolute>"; };
-		1A0F342D12EA411F00496BB8 /* files */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = files; sourceTree = "<group>"; };
-		1A0F342E12EA411F00496BB8 /* input */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		1A0F343012EA411F00496BB8 /* Main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Main.m; sourceTree = "<group>"; };
-		1A0F343112EA411F00496BB8 /* output */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
-		1A0F343212EA411F00496BB8 /* Poly.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Poly.g; sourceTree = "<group>"; };
-		1A0F343312EA411F00496BB8 /* PolyDifferentiator.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = PolyDifferentiator.g; sourceTree = "<group>"; };
-		1A0F343412EA411F00496BB8 /* PolyPrinter.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = PolyPrinter.g; sourceTree = "<group>"; };
-		1A0F343512EA411F00496BB8 /* Simplifier.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Simplifier.g; sourceTree = "<group>"; };
-		1A0F346612EA42D800496BB8 /* polydiff */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = polydiff; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A0F347812EA444500496BB8 /* Poly.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Poly.tokens; sourceTree = "<group>"; };
-		1A0F347912EA444500496BB8 /* PolyDifferentiator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = PolyDifferentiator.m; sourceTree = "<group>"; };
-		1A0F347A12EA444500496BB8 /* PolyLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PolyLexer.h; sourceTree = "<group>"; };
-		1A0F347B12EA444500496BB8 /* PolyLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = PolyLexer.m; sourceTree = "<group>"; };
-		1A0F347C12EA444500496BB8 /* PolyParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PolyParser.h; sourceTree = "<group>"; };
-		1A0F347D12EA444500496BB8 /* PolyParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = PolyParser.m; sourceTree = "<group>"; };
-		1A0F347E12EA444500496BB8 /* Simplifier.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Simplifier.h; sourceTree = "<group>"; };
-		1A0F347F12EA444500496BB8 /* Simplifier.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Simplifier.m; sourceTree = "<group>"; };
-		1A0F348012EA444500496BB8 /* Simplifier.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Simplifier.tokens; sourceTree = "<group>"; };
-		1A100AB911E604FE006ABF94 /* ANTLRHashRule.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRHashRule.h; sourceTree = "<group>"; };
-		1A100ABA11E604FE006ABF94 /* ANTLRHashRule.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRHashRule.m; sourceTree = "<group>"; };
-		1A1210FB11D3A5D900F27B38 /* ANTLRCommonTokenTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRCommonTokenTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A1210FC11D3A5DA00F27B38 /* ANTLRCommonTokenTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRCommonTokenTest-Info.plist"; sourceTree = "<group>"; };
-		1A1211D111D3BF4700F27B38 /* ANTLRStringStreamTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRStringStreamTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A1211D211D3BF4700F27B38 /* ANTLRStringStreamTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRStringStreamTest-Info.plist"; sourceTree = "<group>"; };
-		1A12122411D3C92400F27B38 /* ANTLRFastQueueTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRFastQueueTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A12122511D3C92400F27B38 /* ANTLRFastQueueTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRFastQueueTest-Info.plist"; sourceTree = "<group>"; };
-		1A12122A11D3C93500F27B38 /* ANTLR.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = ANTLR.framework; path = Library/Frameworks/ANTLR.framework; sourceTree = SDKROOT; };
-		1A1212DC11D3F53600F27B38 /* ANTLRIntArrayTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRIntArrayTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A1212DD11D3F53600F27B38 /* ANTLRIntArrayTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRIntArrayTest-Info.plist"; sourceTree = "<group>"; };
-		1A12130C11D3F7CD00F27B38 /* ANTLRCommonTreeTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRCommonTreeTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A12130D11D3F7CD00F27B38 /* ANTLRCommonTreeTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRCommonTreeTest-Info.plist"; sourceTree = "<group>"; };
-		1A16B13A11C66492002860C7 /* ANTLRLinkBase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRLinkBase.h; sourceTree = "<group>"; };
-		1A16B13B11C66492002860C7 /* ANTLRLinkBase.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRLinkBase.m; sourceTree = "<group>"; };
-		1A1702FC11C05D4800F6978A /* ANTLRHashMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRHashMap.h; sourceTree = "<group>"; };
-		1A1702FD11C05D4800F6978A /* ANTLRHashMap.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRHashMap.m; sourceTree = "<group>"; };
-		1A18EF5511B8028D0006186A /* ANTLRBufferedTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRBufferedTokenStream.h; sourceTree = "<group>"; };
-		1A18EF5611B8028D0006186A /* ANTLRBufferedTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRBufferedTokenStream.m; sourceTree = "<group>"; };
-		1A1BCDB911CB01E60051A1EC /* ANTLRRuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRRuleReturnScope.h; sourceTree = "<group>"; };
-		1A1BCDBA11CB01E60051A1EC /* ANTLRRuleReturnScope.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRRuleReturnScope.m; sourceTree = "<group>"; };
-		1A1BCDCD11CB0B3D0051A1EC /* ANTLRTreeRuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreeRuleReturnScope.h; sourceTree = "<group>"; };
-		1A1BCDCE11CB0B3D0051A1EC /* ANTLRTreeRuleReturnScope.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRTreeRuleReturnScope.m; sourceTree = "<group>"; };
-		1A1BCE2811CB1A3E0051A1EC /* ANTLRTreeRewriter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreeRewriter.h; sourceTree = "<group>"; };
-		1A1BCE2911CB1A3E0051A1EC /* ANTLRTreeRewriter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRTreeRewriter.m; sourceTree = "<group>"; };
-		1A1CCC9011B6FD39002E5F53 /* ANTLRFastQueue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRFastQueue.h; sourceTree = "<group>"; };
-		1A1CCC9111B6FD39002E5F53 /* ANTLRFastQueue.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRFastQueue.m; sourceTree = "<group>"; };
-		1A1CCC9211B6FD39002E5F53 /* ANTLRIntArray.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRIntArray.h; sourceTree = "<group>"; };
-		1A1CCC9311B6FD39002E5F53 /* ANTLRIntArray.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRIntArray.m; sourceTree = "<group>"; };
-		1A1CCCA911B724B2002E5F53 /* ANTLRLookaheadStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRLookaheadStream.h; sourceTree = "<group>"; };
-		1A1CCCAA11B724B2002E5F53 /* ANTLRLookaheadStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRLookaheadStream.m; sourceTree = "<group>"; };
-		1A1CCCC711B727B5002E5F53 /* ANTLRError.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRError.h; sourceTree = "<group>"; };
-		1A1D465911BE73B2001575F3 /* ANTLRBaseTreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRBaseTreeAdaptor.h; sourceTree = "<group>"; };
-		1A1D465A11BE73B2001575F3 /* ANTLRBaseTreeAdaptor.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRBaseTreeAdaptor.m; sourceTree = "<group>"; };
-		1A1D466E11BE75C0001575F3 /* ANTLRMapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRMapElement.h; sourceTree = "<group>"; };
-		1A1D466F11BE75C0001575F3 /* ANTLRMapElement.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRMapElement.m; sourceTree = "<group>"; };
-		1A1D467A11BE8E5A001575F3 /* ANTLRCommonErrorNode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonErrorNode.h; sourceTree = "<group>"; };
-		1A1D467B11BE8E5A001575F3 /* ANTLRCommonErrorNode.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonErrorNode.m; sourceTree = "<group>"; };
-		1A1FFC5911CD12A400FBB452 /* ANTLRTokenRewriteStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTokenRewriteStream.h; sourceTree = "<group>"; };
-		1A1FFC5A11CD12A400FBB452 /* ANTLRTokenRewriteStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRTokenRewriteStream.m; sourceTree = "<group>"; };
-		1A26329311C53578000DCDD4 /* ANTLRMismatchedNotSetException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRMismatchedNotSetException.h; sourceTree = "<group>"; };
-		1A26329411C53578000DCDD4 /* ANTLRMismatchedNotSetException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRMismatchedNotSetException.m; sourceTree = "<group>"; };
-		1A270BF711C1451200DCC8F3 /* ANTLRTreeIterator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreeIterator.h; sourceTree = "<group>"; };
-		1A270BF811C1451200DCC8F3 /* ANTLRTreeIterator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRTreeIterator.m; sourceTree = "<group>"; };
-		1A2B096312E797DE00A75133 /* TestRewriteRuleTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = TestRewriteRuleTokenStream.m; path = test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.m; sourceTree = "<group>"; };
-		1A2B096612E797F600A75133 /* TestRewriteRuleTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = TestRewriteRuleTokenStream.h; path = test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.h; sourceTree = "<group>"; };
-		1A2D217311E4F57C00DFE328 /* ANTLRUniqueIDMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRUniqueIDMap.h; sourceTree = "<group>"; };
-		1A2D217411E4F57C00DFE328 /* ANTLRUniqueIDMap.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRUniqueIDMap.m; sourceTree = "<group>"; };
-		1A2D218411E502DE00DFE328 /* ANTLRNodeMapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRNodeMapElement.h; sourceTree = "<group>"; };
-		1A2D218511E502DE00DFE328 /* ANTLRNodeMapElement.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRNodeMapElement.m; sourceTree = "<group>"; };
-		1A348B1711D2BE4F000C72FC /* ANTLRFastQueueTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRFastQueueTest.h; sourceTree = "<group>"; };
-		1A348B1811D2BE4F000C72FC /* ANTLRFastQueueTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRFastQueueTest.m; sourceTree = "<group>"; };
-		1A348B1911D2BE4F000C72FC /* ANTLRIntArrayTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRIntArrayTest.h; sourceTree = "<group>"; };
-		1A348B1A11D2BE4F000C72FC /* ANTLRIntArrayTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRIntArrayTest.m; sourceTree = "<group>"; };
-		1A348B1C11D2BE4F000C72FC /* ANTLRBitSetTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRBitSetTest.h; sourceTree = "<group>"; };
-		1A348B1D11D2BE4F000C72FC /* ANTLRBitSetTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRBitSetTest.m; sourceTree = "<group>"; };
-		1A348B1F11D2BE4F000C72FC /* ANTLRStringStreamTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRStringStreamTest.h; sourceTree = "<group>"; };
-		1A348B2011D2BE4F000C72FC /* ANTLRStringStreamTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRStringStreamTest.m; sourceTree = "<group>"; };
-		1A348B2211D2BE4F000C72FC /* ANTLRCommonTokenTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonTokenTest.h; sourceTree = "<group>"; };
-		1A348B2311D2BE4F000C72FC /* ANTLRCommonTokenTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonTokenTest.m; sourceTree = "<group>"; };
-		1A348B2511D2BE4F000C72FC /* ANTLRCommonTreeTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonTreeTest.h; sourceTree = "<group>"; };
-		1A348B2611D2BE4F000C72FC /* ANTLRCommonTreeTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonTreeTest.m; sourceTree = "<group>"; };
-		1A348B4E11D2BEE8000C72FC /* Test.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = Test.octest; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A348B4F11D2BEE8000C72FC /* Test-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "Test-Info.plist"; sourceTree = "<group>"; };
-		1A348BB511D2C711000C72FC /* Cocoa.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Cocoa.framework; path = System/Library/Frameworks/Cocoa.framework; sourceTree = SDKROOT; };
-		1A348BB711D2C711000C72FC /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = System/Library/Frameworks/CoreFoundation.framework; sourceTree = SDKROOT; };
-		1A348BEC11D2D0A1000C72FC /* ANTLRBitsetTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRBitsetTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A348BED11D2D0A1000C72FC /* ANTLRBitsetTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRBitsetTest-Info.plist"; sourceTree = "<group>"; };
-		1A3A08E411E213C500D5EE26 /* ANTLRBaseStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRBaseStack.h; sourceTree = "<group>"; };
-		1A3A08E511E213C500D5EE26 /* ANTLRBaseStack.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRBaseStack.m; sourceTree = "<group>"; };
-		1A3A08E811E213E100D5EE26 /* ANTLRSymbolStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRSymbolStack.h; sourceTree = "<group>"; };
-		1A3A08E911E213E100D5EE26 /* ANTLRSymbolStack.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRSymbolStack.m; sourceTree = "<group>"; };
-		1A3A09BD11E235BD00D5EE26 /* antlr3.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = antlr3.h; sourceTree = "<group>"; };
-		1A45657511C922BE0082F421 /* ANTLRRuleMemo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRRuleMemo.h; sourceTree = "<group>"; };
-		1A45657611C922BE0082F421 /* ANTLRRuleMemo.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRRuleMemo.m; sourceTree = "<group>"; };
-		1A45658711C9270D0082F421 /* ANTLRBaseMapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRBaseMapElement.h; sourceTree = "<group>"; };
-		1A45658811C9270D0082F421 /* ANTLRBaseMapElement.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRBaseMapElement.m; sourceTree = "<group>"; };
-		1A4A851011CBCE3E00E4BF1B /* ANTLRTreeVisitor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreeVisitor.h; sourceTree = "<group>"; };
-		1A4A851111CBCE3E00E4BF1B /* ANTLRTreeVisitor.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRTreeVisitor.m; sourceTree = "<group>"; };
-		1A4A851611CBCE5500E4BF1B /* ANTLRTreeVisitorAction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreeVisitorAction.h; sourceTree = "<group>"; };
-		1A4A851711CBCE5500E4BF1B /* ANTLRTreeVisitorAction.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRTreeVisitorAction.m; sourceTree = "<group>"; };
-		1A4A851C11CBCF3700E4BF1B /* ANTLRTreeWizard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreeWizard.h; sourceTree = "<group>"; };
-		1A4A851D11CBCF3700E4BF1B /* ANTLRTreeWizard.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRTreeWizard.m; sourceTree = "<group>"; };
-		1A4D5AD411B55A45001C9482 /* ANTLRBaseTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRBaseTree.h; sourceTree = "<group>"; };
-		1A4D5AD511B55A45001C9482 /* ANTLRBaseTree.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRBaseTree.m; sourceTree = "<group>"; };
-		1A5EA50911CFE7CE00E8932F /* ANTLRMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRMap.h; sourceTree = "<group>"; };
-		1A5EA50A11CFE7CE00E8932F /* ANTLRMap.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRMap.m; sourceTree = "<group>"; };
-		1A65B7D611B9532A00FD8754 /* ANTLRBufferedTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRBufferedTreeNodeStream.h; sourceTree = "<group>"; };
-		1A65B7D711B9532A00FD8754 /* ANTLRBufferedTreeNodeStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRBufferedTreeNodeStream.m; sourceTree = "<group>"; };
-		1A6C451411BF4EE00039788A /* ANTLRMissingTokenException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRMissingTokenException.h; sourceTree = "<group>"; };
-		1A6C451511BF4EE00039788A /* ANTLRMissingTokenException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRMissingTokenException.m; sourceTree = "<group>"; };
-		1A6C452611BF50A40039788A /* ANTLRUnwantedTokenException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRUnwantedTokenException.h; sourceTree = "<group>"; };
-		1A6C452711BF50A40039788A /* ANTLRUnwantedTokenException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRUnwantedTokenException.m; sourceTree = "<group>"; };
-		1A77EE8912E6A552007F323A /* TreeRewriteRuleTokenStream.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = TreeRewriteRuleTokenStream.octest; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A77EE8A12E6A552007F323A /* TreeRewriteRuleTokenStream-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "TreeRewriteRuleTokenStream-Info.plist"; sourceTree = "<group>"; };
-		1A86B91911EB9F6300C67A03 /* ANTLRParseTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRParseTree.h; sourceTree = "<group>"; };
-		1A86B91A11EB9F6300C67A03 /* ANTLRParseTree.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRParseTree.m; sourceTree = "<group>"; };
-		1A86BACD11EC1CD000C67A03 /* ANTLRUnbufferedTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRUnbufferedTokenStream.h; sourceTree = "<group>"; };
-		1A86BACE11EC1CD000C67A03 /* ANTLRUnbufferedTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRUnbufferedTokenStream.m; sourceTree = "<group>"; };
-		1A8ABFC511BA9B960038DBB0 /* ANTLRCharStreamState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRCharStreamState.h; sourceTree = "<group>"; };
-		1A8AC00A11BAEC710038DBB0 /* ANTLRRuntimeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRRuntimeException.h; sourceTree = "<group>"; };
-		1A8AC00B11BAEC710038DBB0 /* ANTLRRuntimeException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRRuntimeException.m; sourceTree = "<group>"; };
-		1A994CE412A84F3E001853FF /* SimpleC__.gl */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleC__.gl; sourceTree = "<group>"; };
-		1A9CBD2411C9979600DA8FEF /* ANTLRUnbufferedCommonTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRUnbufferedCommonTreeNodeStream.h; sourceTree = "<group>"; };
-		1A9CBD2511C9979600DA8FEF /* ANTLRUnbufferedCommonTreeNodeStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRUnbufferedCommonTreeNodeStream.m; sourceTree = "<group>"; };
-		1A9CBD2611C9979600DA8FEF /* ANTLRUnbufferedCommonTreeNodeStreamState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRUnbufferedCommonTreeNodeStreamState.h; sourceTree = "<group>"; };
-		1A9CBD2711C9979600DA8FEF /* ANTLRUnbufferedCommonTreeNodeStreamState.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRUnbufferedCommonTreeNodeStreamState.m; sourceTree = "<group>"; };
-		1AAC202A11CC621A00CF56D1 /* ANTLRTreePatternLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreePatternLexer.h; sourceTree = "<group>"; };
-		1AAC202B11CC621A00CF56D1 /* ANTLRTreePatternLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRTreePatternLexer.m; sourceTree = "<group>"; };
-		1AAC20A311CC790300CF56D1 /* ANTLRTreePatternParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreePatternParser.h; sourceTree = "<group>"; };
-		1AAC20A411CC790300CF56D1 /* ANTLRTreePatternParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRTreePatternParser.m; sourceTree = "<group>"; };
-		1AB4A58F11B9A0DA0076E91A /* ANTLRStreamEnumerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRStreamEnumerator.h; sourceTree = "<group>"; };
-		1AB4A59011B9A0DA0076E91A /* ANTLRStreamEnumerator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRStreamEnumerator.m; sourceTree = "<group>"; };
-		1AB5F47511E3869D00E065B0 /* ANTLRRuleMapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRRuleMapElement.h; sourceTree = "<group>"; };
-		1AB5F47611E3869D00E065B0 /* ANTLRRuleMapElement.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRRuleMapElement.m; sourceTree = "<group>"; };
-		1AB5F51C11E3BE2E00E065B0 /* ANTLRPtrBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRPtrBuffer.h; sourceTree = "<group>"; };
-		1AB5F51D11E3BE2E00E065B0 /* ANTLRPtrBuffer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRPtrBuffer.m; sourceTree = "<group>"; };
-		1AC5AC7212E7BBB600DF0C58 /* files */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = files; path = treeparser/files; sourceTree = "<group>"; };
-		1AC5AC7312E7BBB600DF0C58 /* input */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = input; path = treeparser/input; sourceTree = "<group>"; };
-		1AC5AC7412E7BBB600DF0C58 /* Lang.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = Lang.g; path = treeparser/Lang.g; sourceTree = "<group>"; };
-		1AC5AC7512E7BBB600DF0C58 /* LangDumpDecl.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = LangDumpDecl.g; path = treeparser/LangDumpDecl.g; sourceTree = "<group>"; };
-		1AC5AC7712E7BBB600DF0C58 /* output */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = output; path = treeparser/output; sourceTree = "<group>"; };
-		1AC5AC7812E7BBB600DF0C58 /* README.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = README.txt; path = treeparser/README.txt; sourceTree = "<group>"; };
-		1AC5AC8112E7BC9100DF0C58 /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = main.m; path = treeparser/main.m; sourceTree = "<group>"; };
-		1AC5ACA712E7BEFE00DF0C58 /* treeparser */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = treeparser; sourceTree = BUILT_PRODUCTS_DIR; };
-		1AC5ACC412E7C03C00DF0C58 /* Lang.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = Lang.tokens; path = treeparser/Lang.tokens; sourceTree = "<group>"; };
-		1AC5ACC512E7C03C00DF0C58 /* LangDumpDecl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LangDumpDecl.h; path = treeparser/LangDumpDecl.h; sourceTree = "<group>"; };
-		1AC5ACC612E7C03C00DF0C58 /* LangDumpDecl.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = LangDumpDecl.m; path = treeparser/LangDumpDecl.m; sourceTree = "<group>"; };
-		1AC5ACC712E7C03C00DF0C58 /* LangDumpDecl.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = LangDumpDecl.tokens; path = treeparser/LangDumpDecl.tokens; sourceTree = "<group>"; };
-		1AC5ACC812E7C03C00DF0C58 /* LangLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LangLexer.h; path = treeparser/LangLexer.h; sourceTree = "<group>"; };
-		1AC5ACC912E7C03C00DF0C58 /* LangLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = LangLexer.m; path = treeparser/LangLexer.m; sourceTree = "<group>"; };
-		1AC5ACCA12E7C03C00DF0C58 /* LangParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LangParser.h; path = treeparser/LangParser.h; sourceTree = "<group>"; };
-		1AC5ACCB12E7C03C00DF0C58 /* LangParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = LangParser.m; path = treeparser/LangParser.m; sourceTree = "<group>"; };
-		1ADB66F012E74341007C1661 /* FuzzyLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FuzzyLexer.h; sourceTree = "<group>"; };
-		1ADE21F012E505D700E8A95C /* SimpleC.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleC.g; sourceTree = "<group>"; };
-		1AE8A96A11D9227A00D36FD6 /* ANTLRRuleStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRRuleStack.h; sourceTree = "<group>"; };
-		1AE8A96B11D9227A00D36FD6 /* ANTLRRuleStack.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRRuleStack.m; sourceTree = "<group>"; };
-		32DBCF5E0370ADEE00C91783 /* ANTLR_Prefix.pch */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLR_Prefix.pch; sourceTree = "<group>"; };
-		8DC2EF5A0486A6940098B216 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist; path = Info.plist; sourceTree = "<group>"; };
-		8DC2EF5B0486A6940098B216 /* ANTLR.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = ANTLR.framework; sourceTree = BUILT_PRODUCTS_DIR; };
-		F7009AD90A1BE4AE002EDD5D /* ANTLRLexerRuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRLexerRuleReturnScope.h; sourceTree = "<group>"; };
-		F7009ADA0A1BE4AE002EDD5D /* ANTLRLexerRuleReturnScope.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRLexerRuleReturnScope.m; sourceTree = "<group>"; };
-		F700E6190A5F66EC005D0757 /* ANTLREarlyExitException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLREarlyExitException.h; sourceTree = "<group>"; };
-		F700E61A0A5F66EC005D0757 /* ANTLREarlyExitException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLREarlyExitException.m; sourceTree = "<group>"; };
-		F700E85E0A5FA2DE005D0757 /* Combined.g */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; name = Combined.g; path = combined/Combined.g; sourceTree = "<group>"; };
-		F700E8640A5FA31D005D0757 /* combined */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = combined; sourceTree = BUILT_PRODUCTS_DIR; };
-		F700E86A0A5FA34D005D0757 /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = main.m; path = combined/main.m; sourceTree = "<group>"; };
-		F700E8F90A5FAD21005D0757 /* ANTLRParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRParser.h; sourceTree = "<group>"; };
-		F700E8FA0A5FAD21005D0757 /* ANTLRParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRParser.m; sourceTree = "<group>"; };
-		F700ECCF0A5FE176005D0757 /* input */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		F700ECD00A5FE176005D0757 /* output */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
-		F700ECD70A5FE186005D0757 /* LL-star */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "LL-star"; sourceTree = BUILT_PRODUCTS_DIR; };
-		F700ECE70A5FE25D005D0757 /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		F700ED7E0A5FF17C005D0757 /* ANTLRTokenSource.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTokenSource.h; sourceTree = "<group>"; };
-		F700ED940A5FF2A5005D0757 /* ANTLRCommonTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonTokenStream.h; sourceTree = "<group>"; };
-		F700ED950A5FF2A5005D0757 /* ANTLRCommonTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonTokenStream.m; sourceTree = "<group>"; };
-		F7037CEE0A0582FC0070435D /* ANTLRMismatchedRangeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRMismatchedRangeException.h; sourceTree = "<group>"; };
-		F7037CEF0A0582FC0070435D /* ANTLRMismatchedRangeException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRMismatchedRangeException.m; sourceTree = "<group>"; };
-		F7037EA00A05AFB60070435D /* lexertest-simple */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "lexertest-simple"; sourceTree = BUILT_PRODUCTS_DIR; };
-		F7037EBD0A05B06B0070435D /* TestLexer.g */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = TestLexer.g; sourceTree = "<group>"; };
-		F70380BA0A07FA0D0070435D /* ANTLRMismatchedSetException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRMismatchedSetException.h; sourceTree = "<group>"; };
-		F70380BB0A07FA0D0070435D /* ANTLRMismatchedSetException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRMismatchedSetException.m; sourceTree = "<group>"; };
-		F7048FF50B07D05400D2F326 /* Test.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Test.tokens; sourceTree = "<group>"; };
-		F7048FF60B07D05400D2F326 /* TestLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TestLexer.h; sourceTree = "<group>"; };
-		F7048FF70B07D05400D2F326 /* TestLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TestLexer.m; sourceTree = "<group>"; };
-		F706A55B0A0EC307008999AB /* input */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		F70AA7A509AA2A6900C3FD5E /* ANTLR.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLR.h; sourceTree = "<group>"; };
-		F70AA7AD09AA2AC000C3FD5E /* ANTLRIntStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRIntStream.h; sourceTree = "<group>"; };
-		F70AA7B509AA2B8800C3FD5E /* ANTLRCharStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRCharStream.h; sourceTree = "<group>"; };
-		F70AA7C509AA339900C3FD5E /* ANTLRStringStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRStringStream.h; sourceTree = "<group>"; };
-		F70AA7C609AA339900C3FD5E /* ANTLRStringStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRStringStream.m; sourceTree = "<group>"; };
-		F70AA7CE09AA379300C3FD5E /* ANTLRCharStreamState.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRCharStreamState.m; sourceTree = "<group>"; };
-		F70B11BB0C4C2B6400C3ECE0 /* ANTLRRewriteRuleSubtreeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRRewriteRuleSubtreeStream.h; sourceTree = "<group>"; };
-		F70B11BC0C4C2B6400C3ECE0 /* ANTLRRewriteRuleSubtreeStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRRewriteRuleSubtreeStream.m; sourceTree = "<group>"; };
-		F70B11C10C4C2B7900C3ECE0 /* ANTLRRewriteRuleTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRRewriteRuleTokenStream.h; sourceTree = "<group>"; };
-		F70B11C20C4C2B7900C3ECE0 /* ANTLRRewriteRuleTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRRewriteRuleTokenStream.m; sourceTree = "<group>"; };
-		F70BB390098E5BB80054FEF8 /* SenTestingKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = SenTestingKit.framework; path = Library/Frameworks/SenTestingKit.framework; sourceTree = DEVELOPER_DIR; };
-		F71325860C4A060900B99F2D /* ANTLRRewriteRuleElementStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRRewriteRuleElementStream.h; sourceTree = "<group>"; };
-		F71325870C4A060900B99F2D /* ANTLRRewriteRuleElementStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRRewriteRuleElementStream.m; sourceTree = "<group>"; };
-		F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = /System/Library/Frameworks/CoreFoundation.framework; sourceTree = "<absolute>"; };
-		F72B8CFA0AD01D380013F1E2 /* Fuzzy.g */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = Fuzzy.g; sourceTree = "<group>"; };
-		F72B8D090AD01DCB0013F1E2 /* Fuzzy.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Fuzzy.tokens; sourceTree = "<group>"; };
-		F72B8D0B0AD01DCB0013F1E2 /* FuzzyLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = FuzzyLexer.m; sourceTree = "<group>"; };
-		F72C58E80AB3911D00282574 /* ANTLRCommonTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonTree.h; sourceTree = "<group>"; };
-		F72C58E90AB3911D00282574 /* ANTLRCommonTree.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonTree.m; sourceTree = "<group>"; };
-		F72C59A50AB4F20A00282574 /* ANTLRCommonTreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonTreeAdaptor.h; sourceTree = "<group>"; };
-		F72C59A60AB4F20A00282574 /* ANTLRCommonTreeAdaptor.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonTreeAdaptor.m; sourceTree = "<group>"; };
-		F72C5B820AB52AD300282574 /* ANTLRTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreeNodeStream.h; sourceTree = "<group>"; };
-		F72C5D540AB63C1D00282574 /* ANTLRTreeParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreeParser.h; sourceTree = "<group>"; };
-		F72C5D550AB63C1D00282574 /* ANTLRTreeParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRTreeParser.m; sourceTree = "<group>"; };
-		F72C5D600AB63E0B00282574 /* ANTLRMismatchedTreeNodeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRMismatchedTreeNodeException.h; sourceTree = "<group>"; };
-		F72C5D610AB63E0B00282574 /* ANTLRMismatchedTreeNodeException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRMismatchedTreeNodeException.m; sourceTree = "<group>"; };
-		F72C5E2F0AB7529C00282574 /* input */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		F72C5E310AB7529C00282574 /* output */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
-		F72C5E560AB7E41000282574 /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		F72C5E690AB7E4C900282574 /* simplectree */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = simplectree; sourceTree = BUILT_PRODUCTS_DIR; };
-		F72C5ECC0AB7E5A500282574 /* ANTLRParserRuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRParserRuleReturnScope.h; sourceTree = "<group>"; };
-		F72C5ECD0AB7E5A500282574 /* ANTLRParserRuleReturnScope.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRParserRuleReturnScope.m; sourceTree = "<group>"; };
-		F738D1730B07AEAA001813C4 /* ANTLRFailedPredicateException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRFailedPredicateException.h; sourceTree = "<group>"; };
-		F738D1740B07AEAA001813C4 /* ANTLRFailedPredicateException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRFailedPredicateException.m; sourceTree = "<group>"; };
-		F738D1750B07AEAA001813C4 /* ANTLRTreeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreeException.h; sourceTree = "<group>"; };
-		F738D1760B07AEAA001813C4 /* ANTLRTreeException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRTreeException.m; sourceTree = "<group>"; };
-		F738D1FD0B07B1CE001813C4 /* SymbolTable.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SymbolTable.tokens; sourceTree = "<group>"; };
-		F738D1FE0B07B1CE001813C4 /* SymbolTableLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SymbolTableLexer.h; sourceTree = "<group>"; };
-		F738D1FF0B07B1CE001813C4 /* SymbolTableLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SymbolTableLexer.m; sourceTree = "<group>"; };
-		F738D2000B07B1CE001813C4 /* SymbolTableParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SymbolTableParser.h; sourceTree = "<group>"; };
-		F738D2010B07B1CE001813C4 /* SymbolTableParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SymbolTableParser.m; sourceTree = "<group>"; };
-		F738D2230B07B3BC001813C4 /* TParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TParser.h; sourceTree = "<group>"; };
-		F738D2240B07B3BC001813C4 /* TParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TParser.m; sourceTree = "<group>"; };
-		F738D2510B07B842001813C4 /* SimpleCParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SimpleCParser.m; sourceTree = "<group>"; };
-		F738D27F0B07B9B6001813C4 /* SimpleC.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleC.tokens; sourceTree = "<group>"; };
-		F738D2800B07B9B6001813C4 /* SimpleCLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SimpleCLexer.h; sourceTree = "<group>"; };
-		F738D2810B07B9B6001813C4 /* SimpleCLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SimpleCLexer.m; sourceTree = "<group>"; };
-		F738D2820B07B9B6001813C4 /* SimpleCParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SimpleCParser.h; sourceTree = "<group>"; };
-		F738D35C0B07C105001813C4 /* Combined.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = Combined.tokens; path = combined/Combined.tokens; sourceTree = "<group>"; };
-		F738D35D0B07C105001813C4 /* CombinedLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = CombinedLexer.h; path = combined/CombinedLexer.h; sourceTree = "<group>"; };
-		F738D35E0B07C105001813C4 /* CombinedLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = CombinedLexer.m; path = combined/CombinedLexer.m; sourceTree = "<group>"; };
-		F738D35F0B07C105001813C4 /* CombinedParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = CombinedParser.h; path = combined/CombinedParser.h; sourceTree = "<group>"; };
-		F738D3600B07C105001813C4 /* CombinedParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = CombinedParser.m; path = combined/CombinedParser.m; sourceTree = "<group>"; };
-		F73E2B720A9CFE6A005D6267 /* ANTLRTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTree.h; sourceTree = "<group>"; };
-		F73E2B7A0A9D0AFC005D6267 /* ANTLRTreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTreeAdaptor.h; sourceTree = "<group>"; };
-		F741D0640B3812D40024DF3F /* SimpleCWalker.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SimpleCWalker.h; sourceTree = "<group>"; };
-		F741D0650B3812D40024DF3F /* SimpleCWalker.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SimpleCWalker.m; sourceTree = "<group>"; };
-		F762879C0B71578D006AA7EF /* README.rtf */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.rtf; path = README.rtf; sourceTree = "<group>"; };
-		F76AA98E0CEA515A00AF044C /* ANTLRCommonTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonTreeNodeStream.h; sourceTree = "<group>"; };
-		F76AA98F0CEA515A00AF044C /* ANTLRCommonTreeNodeStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonTreeNodeStream.m; sourceTree = "<group>"; };
-		F7715D1A0AC9DCE400ED984D /* SimpleC.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleC.g; sourceTree = "<group>"; };
-		F7715D1B0AC9DCE500ED984D /* SimpleCWalker.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleCWalker.g; sourceTree = "<group>"; };
-		F7715D1C0AC9DDD800ED984D /* SimpleC.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleC.tokens; sourceTree = "<group>"; };
-		F7715D1D0AC9DDD800ED984D /* SimpleCWalker.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleCWalker.tokens; sourceTree = "<group>"; };
-		F7715D2E0AC9DE9E00ED984D /* SimpleCLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SimpleCLexer.h; sourceTree = "<group>"; };
-		F7715D2F0AC9DE9E00ED984D /* SimpleCLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SimpleCLexer.m; sourceTree = "<group>"; };
-		F7715D300AC9DE9E00ED984D /* SimpleCParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SimpleCParser.h; sourceTree = "<group>"; };
-		F7715D310AC9DE9E00ED984D /* SimpleCParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SimpleCParser.m; sourceTree = "<group>"; };
-		F7754E3D0A5C0A0500D0873A /* ANTLRDFA.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRDFA.h; sourceTree = "<group>"; };
-		F7754E3E0A5C0A0500D0873A /* ANTLRDFA.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRDFA.m; sourceTree = "<group>"; };
-		F77744030B234A3400D1F89B /* ANTLRToken+DebuggerSupport.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "ANTLRToken+DebuggerSupport.h"; sourceTree = "<group>"; };
-		F77744040B234A3400D1F89B /* ANTLRToken+DebuggerSupport.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "ANTLRToken+DebuggerSupport.m"; sourceTree = "<group>"; };
-		F77747550B23A70600D1F89B /* ANTLRDebug.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRDebug.h; sourceTree = "<group>"; };
-		F77765CA09DC583000517181 /* ANTLRToken.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRToken.h; sourceTree = "<group>"; };
-		F777660309DC5CF400517181 /* ANTLRCommonToken.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonToken.h; sourceTree = "<group>"; };
-		F777660409DC5CF400517181 /* ANTLRCommonToken.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonToken.m; sourceTree = "<group>"; };
-		F777668009DC719C00517181 /* ANTLRMismatchedTokenException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRMismatchedTokenException.h; sourceTree = "<group>"; };
-		F777668109DC719C00517181 /* ANTLRMismatchedTokenException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRMismatchedTokenException.m; sourceTree = "<group>"; };
-		F777669109DC72D600517181 /* ANTLRRecognitionException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRRecognitionException.h; sourceTree = "<group>"; };
-		F777669209DC72D600517181 /* ANTLRRecognitionException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRRecognitionException.m; sourceTree = "<group>"; };
-		F77766AE09DD53E800517181 /* ANTLRTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRTokenStream.h; sourceTree = "<group>"; };
-		F79D56600A0E23A400EA3CEE /* fuzzy */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = fuzzy; sourceTree = BUILT_PRODUCTS_DIR; };
-		F79D56C00A0E287500EA3CEE /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		F79D59890A0E51AB00EA3CEE /* ANTLRNoViableAltException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRNoViableAltException.h; sourceTree = "<group>"; };
-		F79D598A0A0E51AB00EA3CEE /* ANTLRNoViableAltException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRNoViableAltException.m; sourceTree = "<group>"; };
-		F7A4098B09659BF3002CC781 /* ANTLRBaseRecognizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRBaseRecognizer.h; sourceTree = "<group>"; };
-		F7A4098C09659BF3002CC781 /* ANTLRBaseRecognizer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRBaseRecognizer.m; sourceTree = "<group>"; };
-		F7A4099109659BFB002CC781 /* ANTLRLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRLexer.h; sourceTree = "<group>"; };
-		F7A4099209659BFB002CC781 /* ANTLRLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRLexer.m; sourceTree = "<group>"; };
-		F7B1E5AC0CD7CF1900CE136E /* ANTLRRecognizerSharedState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRRecognizerSharedState.h; sourceTree = "<group>"; };
-		F7B1E5AD0CD7CF1900CE136E /* ANTLRRecognizerSharedState.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRRecognizerSharedState.m; sourceTree = "<group>"; };
-		F7CD45FC0C64BA4B00FF933A /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		F7CD45FD0C64BA4B00FF933A /* TreeRewrite.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = TreeRewrite.g; sourceTree = "<group>"; };
-		F7CD46340C64BB7300FF933A /* TreeRewrite.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = TreeRewrite.tokens; sourceTree = "<group>"; };
-		F7CD46350C64BB7300FF933A /* TreeRewriteLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeRewriteLexer.h; sourceTree = "<group>"; };
-		F7CD46360C64BB7300FF933A /* TreeRewriteLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeRewriteLexer.m; sourceTree = "<group>"; };
-		F7CD46370C64BB7300FF933A /* TreeRewriteParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeRewriteParser.h; sourceTree = "<group>"; };
-		F7CD46380C64BB7300FF933A /* TreeRewriteParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeRewriteParser.m; sourceTree = "<group>"; };
-		F7CD475D0C64D22800FF933A /* treerewrite */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = treerewrite; sourceTree = BUILT_PRODUCTS_DIR; };
-		F7CECD7D0B1E5C370054CC3B /* ANTLRDebugEventListener.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRDebugEventListener.h; sourceTree = "<group>"; };
-		F7DD05E20A7B14BE006A006C /* input */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		F7DD05E30A7B14BE006A006C /* output */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
-		F7DD05E40A7B14BE006A006C /* T.g */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = T.g; sourceTree = "<group>"; };
-		F7DD05E70A7B1572006A006C /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		F7DD05EE0A7B15E1006A006C /* hoistedPredicates */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = hoistedPredicates; sourceTree = BUILT_PRODUCTS_DIR; };
-		F7DD06E70A7B1700006A006C /* TLexer.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = TLexer.h; sourceTree = "<group>"; };
-		F7DD06E80A7B1700006A006C /* TLexer.m */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.objc; path = TLexer.m; sourceTree = "<group>"; };
-		F7DD073C0A7B660A006A006C /* input */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		F7DD073D0A7B660A006A006C /* output */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
-		F7DD073E0A7B660A006A006C /* SymbolTable.g */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = SymbolTable.g; sourceTree = "<group>"; };
-		F7DD07440A7B6618006A006C /* scopes */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = scopes; sourceTree = BUILT_PRODUCTS_DIR; };
-		F7DD07800A7B67A7006A006C /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		F7E261140B1E44320013F640 /* ANTLRDebugParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRDebugParser.h; sourceTree = "<group>"; };
-		F7E261150B1E44320013F640 /* ANTLRDebugParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRDebugParser.m; sourceTree = "<group>"; };
-		F7E261180B1E443C0013F640 /* ANTLRDebugTreeParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRDebugTreeParser.h; sourceTree = "<group>"; };
-		F7E261190B1E443C0013F640 /* ANTLRDebugTreeParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRDebugTreeParser.m; sourceTree = "<group>"; };
-		F7E2611E0B1E44E80013F640 /* ANTLRDebugTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRDebugTokenStream.h; sourceTree = "<group>"; };
-		F7E2611F0B1E44E80013F640 /* ANTLRDebugTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRDebugTokenStream.m; sourceTree = "<group>"; };
-		F7E261220B1E44FA0013F640 /* ANTLRDebugTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRDebugTreeNodeStream.h; sourceTree = "<group>"; };
-		F7E261230B1E44FA0013F640 /* ANTLRDebugTreeNodeStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRDebugTreeNodeStream.m; sourceTree = "<group>"; };
-		F7E261260B1E45070013F640 /* ANTLRDebugTreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRDebugTreeAdaptor.h; sourceTree = "<group>"; };
-		F7E261270B1E45070013F640 /* ANTLRDebugTreeAdaptor.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRDebugTreeAdaptor.m; sourceTree = "<group>"; };
-		F7E261370B1E45580013F640 /* ANTLRDebugEventProxy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRDebugEventProxy.h; sourceTree = "<group>"; };
-		F7E261380B1E45580013F640 /* ANTLRDebugEventProxy.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRDebugEventProxy.m; sourceTree = "<group>"; };
-		F7E983940A0D6A5F00F16093 /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		F7EFFC8B0D164E2C008EE57E /* CHANGES.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = CHANGES.txt; sourceTree = "<group>"; };
-		F7F218EE097AFB1A000472E9 /* ANTLRBitSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRBitSet.h; sourceTree = "<group>"; };
-		F7F218EF097AFB1A000472E9 /* ANTLRBitSet.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRBitSet.m; sourceTree = "<group>"; };
-/* End PBXFileReference section */
-
-/* Begin PBXFrameworksBuildPhase section */
-		1A0F345D12EA42D800496BB8 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A0F345E12EA42D800496BB8 /* ANTLR.framework in Frameworks */,
-				1A0F345F12EA42D800496BB8 /* Foundation.framework in Frameworks */,
-				1A0F346012EA42D800496BB8 /* CoreFoundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A1210F811D3A5D900F27B38 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A12117911D3B45C00F27B38 /* ANTLR.framework in Frameworks */,
-				1A12117A11D3B47000F27B38 /* Cocoa.framework in Frameworks */,
-				1A12117B11D3B47000F27B38 /* CoreFoundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A1211CE11D3BF4600F27B38 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A1211D711D3BF6800F27B38 /* ANTLR.framework in Frameworks */,
-				1A1211D811D3BF6800F27B38 /* Cocoa.framework in Frameworks */,
-				1A1211D911D3BF6800F27B38 /* CoreFoundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A12122111D3C92400F27B38 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A12122B11D3C93500F27B38 /* ANTLR.framework in Frameworks */,
-				1A12122C11D3C93500F27B38 /* Cocoa.framework in Frameworks */,
-				1A12122D11D3C93500F27B38 /* CoreFoundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A1212D911D3F53600F27B38 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A1212E211D3F55500F27B38 /* ANTLR.framework in Frameworks */,
-				1A1212E311D3F55500F27B38 /* Cocoa.framework in Frameworks */,
-				1A1212E411D3F55500F27B38 /* CoreFoundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A12130911D3F7CD00F27B38 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A12131211D3F7DC00F27B38 /* ANTLR.framework in Frameworks */,
-				1A12131311D3F7DC00F27B38 /* Cocoa.framework in Frameworks */,
-				1A12131411D3F7DC00F27B38 /* CoreFoundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A348B4B11D2BEE8000C72FC /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A348B5811D2BF1C000C72FC /* ANTLR.framework in Frameworks */,
-				1A348BB611D2C711000C72FC /* Cocoa.framework in Frameworks */,
-				1A348BB811D2C711000C72FC /* CoreFoundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A348BE911D2D0A1000C72FC /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A348BF211D2D0E0000C72FC /* Cocoa.framework in Frameworks */,
-				1A348BF311D2D0E0000C72FC /* CoreFoundation.framework in Frameworks */,
-				1A348BF411D2D0E7000C72FC /* ANTLR.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A77EE8612E6A552007F323A /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A77EE9312E6A57C007F323A /* Cocoa.framework in Frameworks */,
-				1A77EE9412E6A57C007F323A /* CoreFoundation.framework in Frameworks */,
-				1A77EE9712E6A594007F323A /* ANTLR.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1AC5AC9D12E7BEFE00DF0C58 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AC5AC9E12E7BEFE00DF0C58 /* ANTLR.framework in Frameworks */,
-				1AC5AC9F12E7BEFE00DF0C58 /* Foundation.framework in Frameworks */,
-				1AC5ACA112E7BEFE00DF0C58 /* CoreFoundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		8DC2EF560486A6940098B216 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F763D51E0A66765B0061CD35 /* CoreFoundation.framework in Frameworks */,
-				1ADB67BA12E74E82007C1661 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F700E8620A5FA31D005D0757 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F700ECA40A5FDF1A005D0757 /* CoreFoundation.framework in Frameworks */,
-				F700ECA50A5FDF1A005D0757 /* FuzzyLexer.h in Frameworks */,
-				F700EC670A5FDF0D005D0757 /* ANTLR.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F700ECD50A5FE186005D0757 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F7F4E9BA0A6E8B110092D087 /* Foundation.framework in Frameworks */,
-				F763D4490A666D3D0061CD35 /* ANTLR.framework in Frameworks */,
-				F700ECD90A5FE19A005D0757 /* CoreFoundation.framework in Frameworks */,
-				F700ECDA0A5FE19A005D0757 /* FuzzyLexer.h in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F7037E9E0A05AFB60070435D /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F7037EA60A05AFD70070435D /* ANTLR.framework in Frameworks */,
-				F7E985580A0D865E00F16093 /* Foundation.framework in Frameworks */,
-				F7E985590A0D866000F16093 /* FuzzyLexer.h in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F72C5E610AB7E4C900282574 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F72C5E620AB7E4C900282574 /* ANTLR.framework in Frameworks */,
-				F72C5E630AB7E4C900282574 /* Foundation.framework in Frameworks */,
-				F72C5E650AB7E4C900282574 /* CoreFoundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F79D565E0A0E23A400EA3CEE /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F79D5AF60A0E634900EA3CEE /* ANTLR.framework in Frameworks */,
-				F79D5AF70A0E634A00EA3CEE /* Foundation.framework in Frameworks */,
-				F79D5AF80A0E634A00EA3CEE /* FuzzyLexer.h in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F7CD475B0C64D22800FF933A /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F7CD48670C64D88800FF933A /* ANTLR.framework in Frameworks */,
-				F7CD48680C64D88800FF933A /* Foundation.framework in Frameworks */,
-				F7CD486A0C64D88800FF933A /* CoreFoundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F7DD05EC0A7B15E1006A006C /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F7DD06040A7B1663006A006C /* FuzzyLexer.h in Frameworks */,
-				F7DD06070A7B1664006A006C /* CoreFoundation.framework in Frameworks */,
-				F7DD06300A7B1665006A006C /* Foundation.framework in Frameworks */,
-				F7DD06C50A7B1691006A006C /* ANTLR.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F7DD07420A7B6618006A006C /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F7DD074C0A7B6656006A006C /* ANTLR.framework in Frameworks */,
-				F7DD074D0A7B665C006A006C /* Foundation.framework in Frameworks */,
-				F7DD074E0A7B665D006A006C /* FuzzyLexer.h in Frameworks */,
-				F7DD074F0A7B665D006A006C /* CoreFoundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-/* End PBXFrameworksBuildPhase section */
-
-/* Begin PBXGroup section */
-		034768DFFF38A50411DB9C8B /* Products */ = {
-			isa = PBXGroup;
-			children = (
-				8DC2EF5B0486A6940098B216 /* ANTLR.framework */,
-				F7037EA00A05AFB60070435D /* lexertest-simple */,
-				F79D56600A0E23A400EA3CEE /* fuzzy */,
-				F700E8640A5FA31D005D0757 /* combined */,
-				F700ECD70A5FE186005D0757 /* LL-star */,
-				F7DD05EE0A7B15E1006A006C /* hoistedPredicates */,
-				F7DD07440A7B6618006A006C /* scopes */,
-				F72C5E690AB7E4C900282574 /* simplectree */,
-				F7CD475D0C64D22800FF933A /* treerewrite */,
-				1A348B4E11D2BEE8000C72FC /* Test.octest */,
-				1A348BEC11D2D0A1000C72FC /* ANTLRBitsetTest.octest */,
-				1A1210FB11D3A5D900F27B38 /* ANTLRCommonTokenTest.octest */,
-				1A1211D111D3BF4700F27B38 /* ANTLRStringStreamTest.octest */,
-				1A12122411D3C92400F27B38 /* ANTLRFastQueueTest.octest */,
-				1A1212DC11D3F53600F27B38 /* ANTLRIntArrayTest.octest */,
-				1A12130C11D3F7CD00F27B38 /* ANTLRCommonTreeTest.octest */,
-				1A77EE8912E6A552007F323A /* TreeRewriteRuleTokenStream.octest */,
-				1AC5ACA712E7BEFE00DF0C58 /* treeparser */,
-				1A0F346612EA42D800496BB8 /* polydiff */,
-			);
-			name = Products;
-			sourceTree = "<group>";
-		};
-		0867D691FE84028FC02AAC07 /* ANTLR */ = {
-			isa = PBXGroup;
-			children = (
-				F762879C0B71578D006AA7EF /* README.rtf */,
-				F7EFFC8B0D164E2C008EE57E /* CHANGES.txt */,
-				08FB77AEFE84172EC02AAC07 /* Classes */,
-				F7037EBB0A05B06B0070435D /* examples */,
-				32C88DFF0371C24200C91783 /* Other Sources */,
-				089C1665FE841158C02AAC07 /* Resources */,
-				0867D69AFE84028FC02AAC07 /* External Frameworks and Libraries */,
-				034768DFFF38A50411DB9C8B /* Products */,
-				1A348B1411D2BE4F000C72FC /* test */,
-				1A348B4F11D2BEE8000C72FC /* Test-Info.plist */,
-				1A348BB511D2C711000C72FC /* Cocoa.framework */,
-				1A348BB711D2C711000C72FC /* CoreFoundation.framework */,
-				1A348BED11D2D0A1000C72FC /* ANTLRBitsetTest-Info.plist */,
-				1A1210FC11D3A5DA00F27B38 /* ANTLRCommonTokenTest-Info.plist */,
-				1A1211D211D3BF4700F27B38 /* ANTLRStringStreamTest-Info.plist */,
-				1A12122511D3C92400F27B38 /* ANTLRFastQueueTest-Info.plist */,
-				1A12122A11D3C93500F27B38 /* ANTLR.framework */,
-				1A1212DD11D3F53600F27B38 /* ANTLRIntArrayTest-Info.plist */,
-				1A12130D11D3F7CD00F27B38 /* ANTLRCommonTreeTest-Info.plist */,
-				1A77EE8A12E6A552007F323A /* TreeRewriteRuleTokenStream-Info.plist */,
-			);
-			name = ANTLR;
-			sourceTree = "<group>";
-		};
-		0867D69AFE84028FC02AAC07 /* External Frameworks and Libraries */ = {
-			isa = PBXGroup;
-			children = (
-				F70BB390098E5BB80054FEF8 /* SenTestingKit.framework */,
-				1058C7B0FEA5585E11CA2CBB /* Linked Frameworks */,
-				1058C7B2FEA5585E11CA2CBB /* Other Frameworks */,
-			);
-			name = "External Frameworks and Libraries";
-			sourceTree = "<group>";
-		};
-		089C1665FE841158C02AAC07 /* Resources */ = {
-			isa = PBXGroup;
-			children = (
-				8DC2EF5A0486A6940098B216 /* Info.plist */,
-				089C1666FE841158C02AAC07 /* InfoPlist.strings */,
-			);
-			name = Resources;
-			sourceTree = "<group>";
-		};
-		08FB77AEFE84172EC02AAC07 /* Classes */ = {
-			isa = PBXGroup;
-			children = (
-				F7E2610F0B1E43E60013F640 /* Debugging */,
-				F7A40951096597D2002CC781 /* DFA */,
-				F7A4094C096597C4002CC781 /* Exceptions */,
-				F7F218EB097AFB0C000472E9 /* Misc */,
-				F7A4098809659BE5002CC781 /* Recognizer */,
-				1A1BCDC011CB04D20051A1EC /* Rules */,
-				F70AA7AA09AA2AAB00C3FD5E /* Streams */,
-				F7492F8D09C0171900B25E30 /* Tokens */,
-				F73E2B590A9CF83A005D6267 /* Trees */,
-			);
-			name = Classes;
-			sourceTree = "<group>";
-		};
-		1058C7B0FEA5585E11CA2CBB /* Linked Frameworks */ = {
-			isa = PBXGroup;
-			children = (
-				F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */,
-				1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */,
-			);
-			name = "Linked Frameworks";
-			sourceTree = "<group>";
-		};
-		1058C7B2FEA5585E11CA2CBB /* Other Frameworks */ = {
-			isa = PBXGroup;
-			children = (
-				0867D69BFE84028FC02AAC07 /* Foundation.framework */,
-			);
-			name = "Other Frameworks";
-			sourceTree = "<group>";
-		};
-		1A0F342C12EA411F00496BB8 /* polydiff */ = {
-			isa = PBXGroup;
-			children = (
-				1A0F347812EA444500496BB8 /* Poly.tokens */,
-				1A0F347912EA444500496BB8 /* PolyDifferentiator.m */,
-				1A0F347A12EA444500496BB8 /* PolyLexer.h */,
-				1A0F347B12EA444500496BB8 /* PolyLexer.m */,
-				1A0F347C12EA444500496BB8 /* PolyParser.h */,
-				1A0F347D12EA444500496BB8 /* PolyParser.m */,
-				1A0F347E12EA444500496BB8 /* Simplifier.h */,
-				1A0F347F12EA444500496BB8 /* Simplifier.m */,
-				1A0F348012EA444500496BB8 /* Simplifier.tokens */,
-				1A0F342D12EA411F00496BB8 /* files */,
-				1A0F342E12EA411F00496BB8 /* input */,
-				1A0F343012EA411F00496BB8 /* Main.m */,
-				1A0F343112EA411F00496BB8 /* output */,
-				1A0F343212EA411F00496BB8 /* Poly.g */,
-				1A0F343312EA411F00496BB8 /* PolyDifferentiator.g */,
-				1A0F343412EA411F00496BB8 /* PolyPrinter.g */,
-				1A0F343512EA411F00496BB8 /* Simplifier.g */,
-			);
-			path = polydiff;
-			sourceTree = "<group>";
-		};
-		1A1BCDC011CB04D20051A1EC /* Rules */ = {
-			isa = PBXGroup;
-			children = (
-				1AB5F47511E3869D00E065B0 /* ANTLRRuleMapElement.h */,
-				1AB5F47611E3869D00E065B0 /* ANTLRRuleMapElement.m */,
-				F72C5ECC0AB7E5A500282574 /* ANTLRParserRuleReturnScope.h */,
-				F72C5ECD0AB7E5A500282574 /* ANTLRParserRuleReturnScope.m */,
-				1A1BCDB911CB01E60051A1EC /* ANTLRRuleReturnScope.h */,
-				1A1BCDBA11CB01E60051A1EC /* ANTLRRuleReturnScope.m */,
-				1A1BCDCD11CB0B3D0051A1EC /* ANTLRTreeRuleReturnScope.h */,
-				1A1BCDCE11CB0B3D0051A1EC /* ANTLRTreeRuleReturnScope.m */,
-			);
-			name = Rules;
-			sourceTree = "<group>";
-		};
-		1A348B1411D2BE4F000C72FC /* test */ = {
-			isa = PBXGroup;
-			children = (
-				1A348B1511D2BE4F000C72FC /* runtime */,
-			);
-			path = test;
-			sourceTree = "<group>";
-		};
-		1A348B1511D2BE4F000C72FC /* runtime */ = {
-			isa = PBXGroup;
-			children = (
-				1A348B1611D2BE4F000C72FC /* misc */,
-				1A348B1B11D2BE4F000C72FC /* sets */,
-				1A348B1E11D2BE4F000C72FC /* stream */,
-				1A348B2111D2BE4F000C72FC /* token */,
-				1A348B2411D2BE4F000C72FC /* tree */,
-				1A77EE1912E6A03B007F323A /* RewriteRule */,
-			);
-			path = runtime;
-			sourceTree = "<group>";
-		};
-		1A348B1611D2BE4F000C72FC /* misc */ = {
-			isa = PBXGroup;
-			children = (
-				1A348B1711D2BE4F000C72FC /* ANTLRFastQueueTest.h */,
-				1A348B1811D2BE4F000C72FC /* ANTLRFastQueueTest.m */,
-				1A348B1911D2BE4F000C72FC /* ANTLRIntArrayTest.h */,
-				1A348B1A11D2BE4F000C72FC /* ANTLRIntArrayTest.m */,
-			);
-			path = misc;
-			sourceTree = "<group>";
-		};
-		1A348B1B11D2BE4F000C72FC /* sets */ = {
-			isa = PBXGroup;
-			children = (
-				1A348B1C11D2BE4F000C72FC /* ANTLRBitSetTest.h */,
-				1A348B1D11D2BE4F000C72FC /* ANTLRBitSetTest.m */,
-			);
-			path = sets;
-			sourceTree = "<group>";
-		};
-		1A348B1E11D2BE4F000C72FC /* stream */ = {
-			isa = PBXGroup;
-			children = (
-				1A348B1F11D2BE4F000C72FC /* ANTLRStringStreamTest.h */,
-				1A348B2011D2BE4F000C72FC /* ANTLRStringStreamTest.m */,
-			);
-			path = stream;
-			sourceTree = "<group>";
-		};
-		1A348B2111D2BE4F000C72FC /* token */ = {
-			isa = PBXGroup;
-			children = (
-				1A348B2211D2BE4F000C72FC /* ANTLRCommonTokenTest.h */,
-				1A348B2311D2BE4F000C72FC /* ANTLRCommonTokenTest.m */,
-			);
-			path = token;
-			sourceTree = "<group>";
-		};
-		1A348B2411D2BE4F000C72FC /* tree */ = {
-			isa = PBXGroup;
-			children = (
-				1A348B2511D2BE4F000C72FC /* ANTLRCommonTreeTest.h */,
-				1A348B2611D2BE4F000C72FC /* ANTLRCommonTreeTest.m */,
-			);
-			path = tree;
-			sourceTree = "<group>";
-		};
-		1A77EE1912E6A03B007F323A /* RewriteRule */ = {
-			isa = PBXGroup;
-			children = (
-				1A2B096312E797DE00A75133 /* TestRewriteRuleTokenStream.m */,
-				1A2B096612E797F600A75133 /* TestRewriteRuleTokenStream.h */,
-			);
-			name = RewriteRule;
-			path = ../..;
-			sourceTree = "<group>";
-		};
-		1AC5AC6D12E7BB7600DF0C58 /* treeparser */ = {
-			isa = PBXGroup;
-			children = (
-				1AC5ACC412E7C03C00DF0C58 /* Lang.tokens */,
-				1AC5ACC512E7C03C00DF0C58 /* LangDumpDecl.h */,
-				1AC5ACC612E7C03C00DF0C58 /* LangDumpDecl.m */,
-				1AC5ACC712E7C03C00DF0C58 /* LangDumpDecl.tokens */,
-				1AC5ACC812E7C03C00DF0C58 /* LangLexer.h */,
-				1AC5ACC912E7C03C00DF0C58 /* LangLexer.m */,
-				1AC5ACCA12E7C03C00DF0C58 /* LangParser.h */,
-				1AC5ACCB12E7C03C00DF0C58 /* LangParser.m */,
-				1AC5AC7212E7BBB600DF0C58 /* files */,
-				1AC5AC7312E7BBB600DF0C58 /* input */,
-				1AC5AC7412E7BBB600DF0C58 /* Lang.g */,
-				1AC5AC7512E7BBB600DF0C58 /* LangDumpDecl.g */,
-				1AC5AC7712E7BBB600DF0C58 /* output */,
-				1AC5AC7812E7BBB600DF0C58 /* README.txt */,
-				1AC5AC8112E7BC9100DF0C58 /* main.m */,
-			);
-			name = treeparser;
-			sourceTree = "<group>";
-		};
-		32C88DFF0371C24200C91783 /* Other Sources */ = {
-			isa = PBXGroup;
-			children = (
-				1A3A09BD11E235BD00D5EE26 /* antlr3.h */,
-				1A1CCCC711B727B5002E5F53 /* ANTLRError.h */,
-				32DBCF5E0370ADEE00C91783 /* ANTLR_Prefix.pch */,
-				F70AA7A509AA2A6900C3FD5E /* ANTLR.h */,
-				F77747550B23A70600D1F89B /* ANTLRDebug.h */,
-			);
-			name = "Other Sources";
-			sourceTree = "<group>";
-		};
-		F700E85D0A5FA2C0005D0757 /* combined */ = {
-			isa = PBXGroup;
-			children = (
-				F738D35C0B07C105001813C4 /* Combined.tokens */,
-				F738D35D0B07C105001813C4 /* CombinedLexer.h */,
-				F738D35E0B07C105001813C4 /* CombinedLexer.m */,
-				F738D35F0B07C105001813C4 /* CombinedParser.h */,
-				F738D3600B07C105001813C4 /* CombinedParser.m */,
-				F700E85E0A5FA2DE005D0757 /* Combined.g */,
-				F700E86A0A5FA34D005D0757 /* main.m */,
-			);
-			name = combined;
-			sourceTree = "<group>";
-		};
-		F700ECCE0A5FE176005D0757 /* LL-star */ = {
-			isa = PBXGroup;
-			children = (
-				1ADE21F012E505D700E8A95C /* SimpleC.g */,
-				F738D27F0B07B9B6001813C4 /* SimpleC.tokens */,
-				F738D2800B07B9B6001813C4 /* SimpleCLexer.h */,
-				F738D2810B07B9B6001813C4 /* SimpleCLexer.m */,
-				F738D2820B07B9B6001813C4 /* SimpleCParser.h */,
-				F738D2510B07B842001813C4 /* SimpleCParser.m */,
-				F700ECCF0A5FE176005D0757 /* input */,
-				F700ECD00A5FE176005D0757 /* output */,
-				1A994CE412A84F3E001853FF /* SimpleC__.gl */,
-				F700ECE70A5FE25D005D0757 /* main.m */,
-			);
-			path = "LL-star";
-			sourceTree = "<group>";
-		};
-		F7037EBB0A05B06B0070435D /* examples */ = {
-			isa = PBXGroup;
-			children = (
-				F700E85D0A5FA2C0005D0757 /* combined */,
-				F79D56590A0E238100EA3CEE /* fuzzy */,
-				F7DD05E10A7B14BE006A006C /* hoistedPredicates */,
-				F7037EBC0A05B06B0070435D /* lexertest-simple */,
-				F700ECCE0A5FE176005D0757 /* LL-star */,
-				1A0F342C12EA411F00496BB8 /* polydiff */,
-				F7DD073B0A7B660A006A006C /* scopes */,
-				F72C5E2D0AB7529C00282574 /* simplecTreeParser */,
-				1AC5AC6D12E7BB7600DF0C58 /* treeparser */,
-				F7CD45FB0C64BA4B00FF933A /* treerewrite */,
-			);
-			path = examples;
-			sourceTree = "<group>";
-		};
-		F7037EBC0A05B06B0070435D /* lexertest-simple */ = {
-			isa = PBXGroup;
-			children = (
-				F7048FF50B07D05400D2F326 /* Test.tokens */,
-				F7048FF60B07D05400D2F326 /* TestLexer.h */,
-				F7048FF70B07D05400D2F326 /* TestLexer.m */,
-				F7037EBD0A05B06B0070435D /* TestLexer.g */,
-				F7E983940A0D6A5F00F16093 /* main.m */,
-			);
-			path = "lexertest-simple";
-			sourceTree = "<group>";
-		};
-		F70AA7AA09AA2AAB00C3FD5E /* Streams */ = {
-			isa = PBXGroup;
-			children = (
-				F71325850C4A05DC00B99F2D /* Trees */,
-				F70AA7B509AA2B8800C3FD5E /* ANTLRCharStream.h */,
-				1A18EF5511B8028D0006186A /* ANTLRBufferedTokenStream.h */,
-				1A18EF5611B8028D0006186A /* ANTLRBufferedTokenStream.m */,
-				F700ED940A5FF2A5005D0757 /* ANTLRCommonTokenStream.h */,
-				F700ED950A5FF2A5005D0757 /* ANTLRCommonTokenStream.m */,
-				F70AA7AD09AA2AC000C3FD5E /* ANTLRIntStream.h */,
-				1A1CCCA911B724B2002E5F53 /* ANTLRLookaheadStream.h */,
-				1A1CCCAA11B724B2002E5F53 /* ANTLRLookaheadStream.m */,
-				1AB4A58F11B9A0DA0076E91A /* ANTLRStreamEnumerator.h */,
-				1AB4A59011B9A0DA0076E91A /* ANTLRStreamEnumerator.m */,
-				F70AA7C509AA339900C3FD5E /* ANTLRStringStream.h */,
-				F70AA7C609AA339900C3FD5E /* ANTLRStringStream.m */,
-				F700ED7E0A5FF17C005D0757 /* ANTLRTokenSource.h */,
-				F77766AE09DD53E800517181 /* ANTLRTokenStream.h */,
-				1A1FFC5911CD12A400FBB452 /* ANTLRTokenRewriteStream.h */,
-				1A1FFC5A11CD12A400FBB452 /* ANTLRTokenRewriteStream.m */,
-				1A86BACD11EC1CD000C67A03 /* ANTLRUnbufferedTokenStream.h */,
-				1A86BACE11EC1CD000C67A03 /* ANTLRUnbufferedTokenStream.m */,
-			);
-			name = Streams;
-			sourceTree = "<group>";
-		};
-		F71325850C4A05DC00B99F2D /* Trees */ = {
-			isa = PBXGroup;
-			children = (
-				1A9CBD2411C9979600DA8FEF /* ANTLRUnbufferedCommonTreeNodeStream.h */,
-				1A9CBD2511C9979600DA8FEF /* ANTLRUnbufferedCommonTreeNodeStream.m */,
-				1A9CBD2611C9979600DA8FEF /* ANTLRUnbufferedCommonTreeNodeStreamState.h */,
-				1A9CBD2711C9979600DA8FEF /* ANTLRUnbufferedCommonTreeNodeStreamState.m */,
-				F72C5B820AB52AD300282574 /* ANTLRTreeNodeStream.h */,
-				1A65B7D611B9532A00FD8754 /* ANTLRBufferedTreeNodeStream.h */,
-				1A65B7D711B9532A00FD8754 /* ANTLRBufferedTreeNodeStream.m */,
-				F76AA98E0CEA515A00AF044C /* ANTLRCommonTreeNodeStream.h */,
-				F76AA98F0CEA515A00AF044C /* ANTLRCommonTreeNodeStream.m */,
-				F71325860C4A060900B99F2D /* ANTLRRewriteRuleElementStream.h */,
-				F71325870C4A060900B99F2D /* ANTLRRewriteRuleElementStream.m */,
-				F70B11BB0C4C2B6400C3ECE0 /* ANTLRRewriteRuleSubtreeStream.h */,
-				F70B11BC0C4C2B6400C3ECE0 /* ANTLRRewriteRuleSubtreeStream.m */,
-				F70B11C10C4C2B7900C3ECE0 /* ANTLRRewriteRuleTokenStream.h */,
-				F70B11C20C4C2B7900C3ECE0 /* ANTLRRewriteRuleTokenStream.m */,
-			);
-			name = Trees;
-			sourceTree = "<group>";
-		};
-		F72C5E2D0AB7529C00282574 /* simplecTreeParser */ = {
-			isa = PBXGroup;
-			children = (
-				F7715D2E0AC9DE9E00ED984D /* SimpleCLexer.h */,
-				F7715D2F0AC9DE9E00ED984D /* SimpleCLexer.m */,
-				F7715D300AC9DE9E00ED984D /* SimpleCParser.h */,
-				F7715D310AC9DE9E00ED984D /* SimpleCParser.m */,
-				F741D0640B3812D40024DF3F /* SimpleCWalker.h */,
-				F741D0650B3812D40024DF3F /* SimpleCWalker.m */,
-				F72C5E2F0AB7529C00282574 /* input */,
-				F72C5E310AB7529C00282574 /* output */,
-				F7715D1C0AC9DDD800ED984D /* SimpleC.tokens */,
-				F7715D1D0AC9DDD800ED984D /* SimpleCWalker.tokens */,
-				F7715D1A0AC9DCE400ED984D /* SimpleC.g */,
-				F7715D1B0AC9DCE500ED984D /* SimpleCWalker.g */,
-				F72C5E560AB7E41000282574 /* main.m */,
-			);
-			path = simplecTreeParser;
-			sourceTree = "<group>";
-		};
-		F73E2B590A9CF83A005D6267 /* Trees */ = {
-			isa = PBXGroup;
-			children = (
-				1A1D465911BE73B2001575F3 /* ANTLRBaseTreeAdaptor.h */,
-				1A1D465A11BE73B2001575F3 /* ANTLRBaseTreeAdaptor.m */,
-				1A4D5AD411B55A45001C9482 /* ANTLRBaseTree.h */,
-				1A4D5AD511B55A45001C9482 /* ANTLRBaseTree.m */,
-				1A8ABFC511BA9B960038DBB0 /* ANTLRCharStreamState.h */,
-				F70AA7CE09AA379300C3FD5E /* ANTLRCharStreamState.m */,
-				F72C58E80AB3911D00282574 /* ANTLRCommonTree.h */,
-				F72C58E90AB3911D00282574 /* ANTLRCommonTree.m */,
-				F72C59A50AB4F20A00282574 /* ANTLRCommonTreeAdaptor.h */,
-				F72C59A60AB4F20A00282574 /* ANTLRCommonTreeAdaptor.m */,
-				F73E2B720A9CFE6A005D6267 /* ANTLRTree.h */,
-				F73E2B7A0A9D0AFC005D6267 /* ANTLRTreeAdaptor.h */,
-				1A270BF711C1451200DCC8F3 /* ANTLRTreeIterator.h */,
-				1A270BF811C1451200DCC8F3 /* ANTLRTreeIterator.m */,
-				1AAC202A11CC621A00CF56D1 /* ANTLRTreePatternLexer.h */,
-				1AAC202B11CC621A00CF56D1 /* ANTLRTreePatternLexer.m */,
-				1AAC20A311CC790300CF56D1 /* ANTLRTreePatternParser.h */,
-				1AAC20A411CC790300CF56D1 /* ANTLRTreePatternParser.m */,
-				1A1BCE2811CB1A3E0051A1EC /* ANTLRTreeRewriter.h */,
-				1A1BCE2911CB1A3E0051A1EC /* ANTLRTreeRewriter.m */,
-				1A4A851011CBCE3E00E4BF1B /* ANTLRTreeVisitor.h */,
-				1A4A851111CBCE3E00E4BF1B /* ANTLRTreeVisitor.m */,
-				1A4A851611CBCE5500E4BF1B /* ANTLRTreeVisitorAction.h */,
-				1A4A851711CBCE5500E4BF1B /* ANTLRTreeVisitorAction.m */,
-				1A4A851C11CBCF3700E4BF1B /* ANTLRTreeWizard.h */,
-				1A4A851D11CBCF3700E4BF1B /* ANTLRTreeWizard.m */,
-			);
-			name = Trees;
-			sourceTree = "<group>";
-		};
-		F7492F8D09C0171900B25E30 /* Tokens */ = {
-			isa = PBXGroup;
-			children = (
-				F77765CA09DC583000517181 /* ANTLRToken.h */,
-				F777660309DC5CF400517181 /* ANTLRCommonToken.h */,
-				F777660409DC5CF400517181 /* ANTLRCommonToken.m */,
-			);
-			name = Tokens;
-			sourceTree = "<group>";
-		};
-		F77744070B234A3B00D1F89B /* Debugging Categories */ = {
-			isa = PBXGroup;
-			children = (
-				F77744030B234A3400D1F89B /* ANTLRToken+DebuggerSupport.h */,
-				F77744040B234A3400D1F89B /* ANTLRToken+DebuggerSupport.m */,
-			);
-			name = "Debugging Categories";
-			sourceTree = "<group>";
-		};
-		F79D56590A0E238100EA3CEE /* fuzzy */ = {
-			isa = PBXGroup;
-			children = (
-				1ADB66F012E74341007C1661 /* FuzzyLexer.h */,
-				F72B8D090AD01DCB0013F1E2 /* Fuzzy.tokens */,
-				F72B8CFA0AD01D380013F1E2 /* Fuzzy.g */,
-				F72B8D0B0AD01DCB0013F1E2 /* FuzzyLexer.m */,
-				F706A55B0A0EC307008999AB /* input */,
-				F79D56C00A0E287500EA3CEE /* main.m */,
-			);
-			path = fuzzy;
-			sourceTree = "<group>";
-		};
-		F7A4094C096597C4002CC781 /* Exceptions */ = {
-			isa = PBXGroup;
-			children = (
-				F738D1730B07AEAA001813C4 /* ANTLRFailedPredicateException.h */,
-				F738D1740B07AEAA001813C4 /* ANTLRFailedPredicateException.m */,
-				1A26329311C53578000DCDD4 /* ANTLRMismatchedNotSetException.h */,
-				1A26329411C53578000DCDD4 /* ANTLRMismatchedNotSetException.m */,
-				F7037CEE0A0582FC0070435D /* ANTLRMismatchedRangeException.h */,
-				F7037CEF0A0582FC0070435D /* ANTLRMismatchedRangeException.m */,
-				F70380BA0A07FA0D0070435D /* ANTLRMismatchedSetException.h */,
-				F70380BB0A07FA0D0070435D /* ANTLRMismatchedSetException.m */,
-				F777668009DC719C00517181 /* ANTLRMismatchedTokenException.h */,
-				F777668109DC719C00517181 /* ANTLRMismatchedTokenException.m */,
-				F72C5D600AB63E0B00282574 /* ANTLRMismatchedTreeNodeException.h */,
-				F72C5D610AB63E0B00282574 /* ANTLRMismatchedTreeNodeException.m */,
-				1A6C451411BF4EE00039788A /* ANTLRMissingTokenException.h */,
-				1A6C451511BF4EE00039788A /* ANTLRMissingTokenException.m */,
-				1A8AC00A11BAEC710038DBB0 /* ANTLRRuntimeException.h */,
-				1A8AC00B11BAEC710038DBB0 /* ANTLRRuntimeException.m */,
-				F79D59890A0E51AB00EA3CEE /* ANTLRNoViableAltException.h */,
-				F79D598A0A0E51AB00EA3CEE /* ANTLRNoViableAltException.m */,
-				F777669109DC72D600517181 /* ANTLRRecognitionException.h */,
-				F777669209DC72D600517181 /* ANTLRRecognitionException.m */,
-				F700E6190A5F66EC005D0757 /* ANTLREarlyExitException.h */,
-				F700E61A0A5F66EC005D0757 /* ANTLREarlyExitException.m */,
-				F738D1750B07AEAA001813C4 /* ANTLRTreeException.h */,
-				F738D1760B07AEAA001813C4 /* ANTLRTreeException.m */,
-				1A6C452611BF50A40039788A /* ANTLRUnwantedTokenException.h */,
-				1A6C452711BF50A40039788A /* ANTLRUnwantedTokenException.m */,
-			);
-			name = Exceptions;
-			sourceTree = "<group>";
-		};
-		F7A40951096597D2002CC781 /* DFA */ = {
-			isa = PBXGroup;
-			children = (
-				F7754E3D0A5C0A0500D0873A /* ANTLRDFA.h */,
-				F7754E3E0A5C0A0500D0873A /* ANTLRDFA.m */,
-			);
-			name = DFA;
-			sourceTree = "<group>";
-		};
-		F7A4098809659BE5002CC781 /* Recognizer */ = {
-			isa = PBXGroup;
-			children = (
-				F7A4098B09659BF3002CC781 /* ANTLRBaseRecognizer.h */,
-				F7A4098C09659BF3002CC781 /* ANTLRBaseRecognizer.m */,
-				F7B1E5AC0CD7CF1900CE136E /* ANTLRRecognizerSharedState.h */,
-				F7B1E5AD0CD7CF1900CE136E /* ANTLRRecognizerSharedState.m */,
-				F7A4099109659BFB002CC781 /* ANTLRLexer.h */,
-				F7A4099209659BFB002CC781 /* ANTLRLexer.m */,
-				F7009AD90A1BE4AE002EDD5D /* ANTLRLexerRuleReturnScope.h */,
-				F7009ADA0A1BE4AE002EDD5D /* ANTLRLexerRuleReturnScope.m */,
-				F700E8F90A5FAD21005D0757 /* ANTLRParser.h */,
-				F700E8FA0A5FAD21005D0757 /* ANTLRParser.m */,
-				F72C5D540AB63C1D00282574 /* ANTLRTreeParser.h */,
-				F72C5D550AB63C1D00282574 /* ANTLRTreeParser.m */,
-				1A86B91911EB9F6300C67A03 /* ANTLRParseTree.h */,
-				1A86B91A11EB9F6300C67A03 /* ANTLRParseTree.m */,
-			);
-			name = Recognizer;
-			sourceTree = "<group>";
-		};
-		F7CD45FB0C64BA4B00FF933A /* treerewrite */ = {
-			isa = PBXGroup;
-			children = (
-				F7CD46340C64BB7300FF933A /* TreeRewrite.tokens */,
-				F7CD46350C64BB7300FF933A /* TreeRewriteLexer.h */,
-				F7CD46360C64BB7300FF933A /* TreeRewriteLexer.m */,
-				F7CD46370C64BB7300FF933A /* TreeRewriteParser.h */,
-				F7CD46380C64BB7300FF933A /* TreeRewriteParser.m */,
-				F7CD45FC0C64BA4B00FF933A /* main.m */,
-				F7CD45FD0C64BA4B00FF933A /* TreeRewrite.g */,
-			);
-			path = treerewrite;
-			sourceTree = "<group>";
-		};
-		F7DD05E10A7B14BE006A006C /* hoistedPredicates */ = {
-			isa = PBXGroup;
-			children = (
-				F738D2230B07B3BC001813C4 /* TParser.h */,
-				F738D2240B07B3BC001813C4 /* TParser.m */,
-				F7DD06E70A7B1700006A006C /* TLexer.h */,
-				F7DD06E80A7B1700006A006C /* TLexer.m */,
-				F7DD05E20A7B14BE006A006C /* input */,
-				F7DD05E30A7B14BE006A006C /* output */,
-				F7DD05E40A7B14BE006A006C /* T.g */,
-				F7DD05E70A7B1572006A006C /* main.m */,
-			);
-			path = hoistedPredicates;
-			sourceTree = "<group>";
-		};
-		F7DD073B0A7B660A006A006C /* scopes */ = {
-			isa = PBXGroup;
-			children = (
-				F738D1FD0B07B1CE001813C4 /* SymbolTable.tokens */,
-				F738D1FE0B07B1CE001813C4 /* SymbolTableLexer.h */,
-				F738D1FF0B07B1CE001813C4 /* SymbolTableLexer.m */,
-				F738D2000B07B1CE001813C4 /* SymbolTableParser.h */,
-				F738D2010B07B1CE001813C4 /* SymbolTableParser.m */,
-				F7DD073C0A7B660A006A006C /* input */,
-				F7DD073D0A7B660A006A006C /* output */,
-				F7DD073E0A7B660A006A006C /* SymbolTable.g */,
-				F7DD07800A7B67A7006A006C /* main.m */,
-			);
-			path = scopes;
-			sourceTree = "<group>";
-		};
-		F7E2610F0B1E43E60013F640 /* Debugging */ = {
-			isa = PBXGroup;
-			children = (
-				F77744070B234A3B00D1F89B /* Debugging Categories */,
-				F7CECD7D0B1E5C370054CC3B /* ANTLRDebugEventListener.h */,
-				F7E261370B1E45580013F640 /* ANTLRDebugEventProxy.h */,
-				F7E261380B1E45580013F640 /* ANTLRDebugEventProxy.m */,
-				F7E261140B1E44320013F640 /* ANTLRDebugParser.h */,
-				F7E261150B1E44320013F640 /* ANTLRDebugParser.m */,
-				F7E2611E0B1E44E80013F640 /* ANTLRDebugTokenStream.h */,
-				F7E2611F0B1E44E80013F640 /* ANTLRDebugTokenStream.m */,
-				F7E261180B1E443C0013F640 /* ANTLRDebugTreeParser.h */,
-				F7E261190B1E443C0013F640 /* ANTLRDebugTreeParser.m */,
-				F7E261220B1E44FA0013F640 /* ANTLRDebugTreeNodeStream.h */,
-				F7E261230B1E44FA0013F640 /* ANTLRDebugTreeNodeStream.m */,
-				F7E261260B1E45070013F640 /* ANTLRDebugTreeAdaptor.h */,
-				F7E261270B1E45070013F640 /* ANTLRDebugTreeAdaptor.m */,
-			);
-			name = Debugging;
-			sourceTree = "<group>";
-		};
-		F7F218EB097AFB0C000472E9 /* Misc */ = {
-			isa = PBXGroup;
-			children = (
-				1A100AB911E604FE006ABF94 /* ANTLRHashRule.h */,
-				1A100ABA11E604FE006ABF94 /* ANTLRHashRule.m */,
-				1A2D218411E502DE00DFE328 /* ANTLRNodeMapElement.h */,
-				1A2D218511E502DE00DFE328 /* ANTLRNodeMapElement.m */,
-				1AB5F51C11E3BE2E00E065B0 /* ANTLRPtrBuffer.h */,
-				1AB5F51D11E3BE2E00E065B0 /* ANTLRPtrBuffer.m */,
-				1A3A08E811E213E100D5EE26 /* ANTLRSymbolStack.h */,
-				1A3A08E911E213E100D5EE26 /* ANTLRSymbolStack.m */,
-				1A3A08E411E213C500D5EE26 /* ANTLRBaseStack.h */,
-				1A3A08E511E213C500D5EE26 /* ANTLRBaseStack.m */,
-				1A45658711C9270D0082F421 /* ANTLRBaseMapElement.h */,
-				1A45658811C9270D0082F421 /* ANTLRBaseMapElement.m */,
-				F7F218EE097AFB1A000472E9 /* ANTLRBitSet.h */,
-				F7F218EF097AFB1A000472E9 /* ANTLRBitSet.m */,
-				1A1D467A11BE8E5A001575F3 /* ANTLRCommonErrorNode.h */,
-				1A1D467B11BE8E5A001575F3 /* ANTLRCommonErrorNode.m */,
-				1A1CCC9011B6FD39002E5F53 /* ANTLRFastQueue.h */,
-				1A1CCC9111B6FD39002E5F53 /* ANTLRFastQueue.m */,
-				1A1702FC11C05D4800F6978A /* ANTLRHashMap.h */,
-				1A1702FD11C05D4800F6978A /* ANTLRHashMap.m */,
-				1A1CCC9211B6FD39002E5F53 /* ANTLRIntArray.h */,
-				1A1CCC9311B6FD39002E5F53 /* ANTLRIntArray.m */,
-				1A16B13A11C66492002860C7 /* ANTLRLinkBase.h */,
-				1A16B13B11C66492002860C7 /* ANTLRLinkBase.m */,
-				1A1D466E11BE75C0001575F3 /* ANTLRMapElement.h */,
-				1A1D466F11BE75C0001575F3 /* ANTLRMapElement.m */,
-				1A5EA50911CFE7CE00E8932F /* ANTLRMap.h */,
-				1A5EA50A11CFE7CE00E8932F /* ANTLRMap.m */,
-				1A45657511C922BE0082F421 /* ANTLRRuleMemo.h */,
-				1A45657611C922BE0082F421 /* ANTLRRuleMemo.m */,
-				1AE8A96A11D9227A00D36FD6 /* ANTLRRuleStack.h */,
-				1AE8A96B11D9227A00D36FD6 /* ANTLRRuleStack.m */,
-				1A2D217311E4F57C00DFE328 /* ANTLRUniqueIDMap.h */,
-				1A2D217411E4F57C00DFE328 /* ANTLRUniqueIDMap.m */,
-			);
-			name = Misc;
-			sourceTree = "<group>";
-		};
-/* End PBXGroup section */
-
-/* Begin PBXHeadersBuildPhase section */
-		8DC2EF500486A6940098B216 /* Headers */ = {
-			isa = PBXHeadersBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F70AA7A609AA2A6900C3FD5E /* ANTLR.h in Headers */,
-				1A45658911C9270D0082F421 /* ANTLRBaseMapElement.h in Headers */,
-				F7492F5D09C016A200B25E30 /* ANTLRBaseRecognizer.h in Headers */,
-				1A4D5AD611B55A45001C9482 /* ANTLRBaseTree.h in Headers */,
-				1A1D465B11BE73B2001575F3 /* ANTLRBaseTreeAdaptor.h in Headers */,
-				1A10050711B8796E0022B434 /* ANTLRBitSet.h in Headers */,
-				1A10050611B8796D0022B434 /* ANTLRBufferedTokenStream.h in Headers */,
-				1A65B7D811B9532A00FD8754 /* ANTLRBufferedTreeNodeStream.h in Headers */,
-				1A67885311B87AC400A11EEC /* ANTLRCharStream.h in Headers */,
-				1A8ABFC611BA9B960038DBB0 /* ANTLRCharStreamState.h in Headers */,
-				1A1D467C11BE8E5A001575F3 /* ANTLRCommonErrorNode.h in Headers */,
-				F777660509DC5CF400517181 /* ANTLRCommonToken.h in Headers */,
-				F700ED960A5FF2A5005D0757 /* ANTLRCommonTokenStream.h in Headers */,
-				F72C58EA0AB3911D00282574 /* ANTLRCommonTree.h in Headers */,
-				F76AA9900CEA515A00AF044C /* ANTLRCommonTreeNodeStream.h in Headers */,
-				F72C59A70AB4F20A00282574 /* ANTLRCommonTreeAdaptor.h in Headers */,
-				1A12C96011B89F6B008C9BED /* ANTLRDebugEventListener.h in Headers */,
-				F7E261390B1E45580013F640 /* ANTLRDebugEventProxy.h in Headers */,
-				F7E261160B1E44320013F640 /* ANTLRDebugParser.h in Headers */,
-				F7E261200B1E44E80013F640 /* ANTLRDebugTokenStream.h in Headers */,
-				F7E261240B1E44FA0013F640 /* ANTLRDebugTreeNodeStream.h in Headers */,
-				F7E261280B1E45070013F640 /* ANTLRDebugTreeAdaptor.h in Headers */,
-				F7E2611A0B1E443D0013F640 /* ANTLRDebugTreeParser.h in Headers */,
-				F77747560B23A70600D1F89B /* ANTLRDebug.h in Headers */,
-				F7754E3F0A5C0A0500D0873A /* ANTLRDFA.h in Headers */,
-				F700E61B0A5F66EC005D0757 /* ANTLREarlyExitException.h in Headers */,
-				1A1CCCC811B727B5002E5F53 /* ANTLRError.h in Headers */,
-				1A10050911B879A80022B434 /* ANTLRFailedPredicateException.h in Headers */,
-				1A10050811B879A40022B434 /* ANTLRFastQueue.h in Headers */,
-				1A1702FE11C05D4800F6978A /* ANTLRHashMap.h in Headers */,
-				1A10050B11B879B80022B434 /* ANTLRIntArray.h in Headers */,
-				F70AA7AF09AA2AC000C3FD5E /* ANTLRIntStream.h in Headers */,
-				F777678E09DD618000517181 /* ANTLRLexer.h in Headers */,
-				F7009ADB0A1BE4AE002EDD5D /* ANTLRLexerRuleReturnScope.h in Headers */,
-				1A16B13C11C66492002860C7 /* ANTLRLinkBase.h in Headers */,
-				1A1CCCAB11B724B2002E5F53 /* ANTLRLookaheadStream.h in Headers */,
-				1A1D467011BE75C0001575F3 /* ANTLRMapElement.h in Headers */,
-				1A5EA50B11CFE7CE00E8932F /* ANTLRMap.h in Headers */,
-				1A26329511C53578000DCDD4 /* ANTLRMismatchedNotSetException.h in Headers */,
-				F7037CF00A0582FC0070435D /* ANTLRMismatchedRangeException.h in Headers */,
-				F70380BC0A07FA0D0070435D /* ANTLRMismatchedSetException.h in Headers */,
-				F777668209DC719C00517181 /* ANTLRMismatchedTokenException.h in Headers */,
-				F72C5D620AB63E0B00282574 /* ANTLRMismatchedTreeNodeException.h in Headers */,
-				1A75BF5911D6B3FD0096C6F5 /* ANTLRMissingTokenException.h in Headers */,
-				F79D598B0A0E51AB00EA3CEE /* ANTLRNoViableAltException.h in Headers */,
-				F700E8FB0A5FAD21005D0757 /* ANTLRParser.h in Headers */,
-				F72C5ECE0AB7E5A500282574 /* ANTLRParserRuleReturnScope.h in Headers */,
-				F777669309DC72D600517181 /* ANTLRRecognitionException.h in Headers */,
-				F7B1E5B00CD7CF1900CE136E /* ANTLRRecognizerSharedState.h in Headers */,
-				F71325880C4A060900B99F2D /* ANTLRRewriteRuleElementStream.h in Headers */,
-				F70B11BD0C4C2B6400C3ECE0 /* ANTLRRewriteRuleSubtreeStream.h in Headers */,
-				F70B11C30C4C2B7900C3ECE0 /* ANTLRRewriteRuleTokenStream.h in Headers */,
-				1A45657711C922BE0082F421 /* ANTLRRuleMemo.h in Headers */,
-				1A1BCDBB11CB01E60051A1EC /* ANTLRRuleReturnScope.h in Headers */,
-				1A8AC00C11BAEC710038DBB0 /* ANTLRRuntimeException.h in Headers */,
-				1AB4A59111B9A0DA0076E91A /* ANTLRStreamEnumerator.h in Headers */,
-				F70AA7C709AA339900C3FD5E /* ANTLRStringStream.h in Headers */,
-				F77765CC09DC583000517181 /* ANTLRToken.h in Headers */,
-				F77766AF09DD53E800517181 /* ANTLRTokenStream.h in Headers */,
-				F77744050B234A3400D1F89B /* ANTLRToken+DebuggerSupport.h in Headers */,
-				F700ED7F0A5FF17C005D0757 /* ANTLRTokenSource.h in Headers */,
-				F73E2B740A9CFE6A005D6267 /* ANTLRTree.h in Headers */,
-				F73E2B7C0A9D0AFC005D6267 /* ANTLRTreeAdaptor.h in Headers */,
-				F738D1790B07AEAA001813C4 /* ANTLRTreeException.h in Headers */,
-				1A270BF911C1451200DCC8F3 /* ANTLRTreeIterator.h in Headers */,
-				F72C5B840AB52AD300282574 /* ANTLRTreeNodeStream.h in Headers */,
-				F72C5D560AB63C1D00282574 /* ANTLRTreeParser.h in Headers */,
-				1AAC202C11CC621A00CF56D1 /* ANTLRTreePatternLexer.h in Headers */,
-				1AAC20A511CC790300CF56D1 /* ANTLRTreePatternParser.h in Headers */,
-				1A1BCDCF11CB0B3D0051A1EC /* ANTLRTreeRuleReturnScope.h in Headers */,
-				1A1BCE2A11CB1A3E0051A1EC /* ANTLRTreeRewriter.h in Headers */,
-				1A4A851211CBCE3E00E4BF1B /* ANTLRTreeVisitor.h in Headers */,
-				1A4A851811CBCE5500E4BF1B /* ANTLRTreeVisitorAction.h in Headers */,
-				1A4A851E11CBCF3700E4BF1B /* ANTLRTreeWizard.h in Headers */,
-				1A6C452811BF50A40039788A /* ANTLRUnwantedTokenException.h in Headers */,
-				1AE8A96C11D9227A00D36FD6 /* ANTLRRuleStack.h in Headers */,
-				1A3A08E611E213C500D5EE26 /* ANTLRBaseStack.h in Headers */,
-				1A3A08EA11E213E100D5EE26 /* ANTLRSymbolStack.h in Headers */,
-				1A3A09BE11E235BD00D5EE26 /* antlr3.h in Headers */,
-				1AB5F47711E3869D00E065B0 /* ANTLRRuleMapElement.h in Headers */,
-				1AB5F51E11E3BE2E00E065B0 /* ANTLRPtrBuffer.h in Headers */,
-				1A2D217511E4F57C00DFE328 /* ANTLRUniqueIDMap.h in Headers */,
-				1A2D218611E502DE00DFE328 /* ANTLRNodeMapElement.h in Headers */,
-				1A100ABB11E604FE006ABF94 /* ANTLRHashRule.h in Headers */,
-				1AEECE1511E7EB3C00554AAF /* ANTLRTokenRewriteStream.h in Headers */,
-				1A86B91B11EB9F6300C67A03 /* ANTLRParseTree.h in Headers */,
-				1A86BACF11EC1CD000C67A03 /* ANTLRUnbufferedTokenStream.h in Headers */,
-				1ADB66F112E74341007C1661 /* FuzzyLexer.h in Headers */,
-				1A0F348212EA444500496BB8 /* PolyLexer.h in Headers */,
-				1A0F348412EA444500496BB8 /* PolyParser.h in Headers */,
-				1A0F348612EA444500496BB8 /* Simplifier.h in Headers */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-/* End PBXHeadersBuildPhase section */
-
-/* Begin PBXLegacyTarget section */
-		1A0F343B12EA425700496BB8 /* Regenerate polydiff */ = {
-			isa = PBXLegacyTarget;
-			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar Poly.g PolyDifferentiator.g Simplifier.g PolyPrinter.g";
-			buildConfigurationList = 1A0F343C12EA425700496BB8 /* Build configuration list for PBXLegacyTarget "Regenerate polydiff" */;
-			buildPhases = (
-			);
-			buildToolPath = /usr/bin/java;
-			buildWorkingDirectory = "$(PROJECT_DIR)/examples/polydiff";
-			dependencies = (
-			);
-			name = "Regenerate polydiff";
-			passBuildSettingsInEnvironment = 1;
-			productName = Untitled;
-		};
-		1AC5AC9312E7BE0400DF0C58 /* Regenerate treeparser */ = {
-			isa = PBXLegacyTarget;
-			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar Lang.g LangDumpDecl.g";
-			buildConfigurationList = 1AC5AC9412E7BE0400DF0C58 /* Build configuration list for PBXLegacyTarget "Regenerate treeparser" */;
-			buildPhases = (
-			);
-			buildToolPath = /usr/bin/java;
-			buildWorkingDirectory = "$(PROJECT_DIR)/examples/treeparser";
-			dependencies = (
-			);
-			name = "Regenerate treeparser";
-			passBuildSettingsInEnvironment = 1;
-			productName = Untitled;
-		};
-		F76287450B7151E3006AA7EF /* Regenerate fuzzy */ = {
-			isa = PBXLegacyTarget;
-			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar Fuzzy.g";
-			buildConfigurationList = F76287460B715201006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate fuzzy" */;
-			buildPhases = (
-			);
-			buildToolPath = /usr/bin/java;
-			buildWorkingDirectory = "$(PROJECT_DIR)/examples/fuzzy";
-			dependencies = (
-			);
-			name = "Regenerate fuzzy";
-			passBuildSettingsInEnvironment = 1;
-			productName = Untitled;
-		};
-		F76287780B71557E006AA7EF /* Regenerate lexertest-simple */ = {
-			isa = PBXLegacyTarget;
-			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar TestLexer.g";
-			buildConfigurationList = F76287790B71557E006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate lexertest-simple" */;
-			buildPhases = (
-			);
-			buildToolPath = /usr/bin/java;
-			buildWorkingDirectory = "$(PROJECT_DIR)/examples/lexertest-simple";
-			dependencies = (
-			);
-			name = "Regenerate lexertest-simple";
-			passBuildSettingsInEnvironment = 1;
-			productName = Untitled;
-		};
-		F762877E0B71559C006AA7EF /* Regenerate combined */ = {
-			isa = PBXLegacyTarget;
-			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar Combined.g";
-			buildConfigurationList = F762877F0B71559C006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate combined" */;
-			buildPhases = (
-			);
-			buildToolPath = /usr/bin/java;
-			buildWorkingDirectory = "$(PROJECT_DIR)/examples/combined";
-			dependencies = (
-			);
-			name = "Regenerate combined";
-			passBuildSettingsInEnvironment = 1;
-			productName = Untitled;
-		};
-		F76287820B71559F006AA7EF /* Regenerate LL-star */ = {
-			isa = PBXLegacyTarget;
-			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar  SimpleC.g";
-			buildConfigurationList = F76287830B71559F006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate LL-star" */;
-			buildPhases = (
-			);
-			buildToolPath = /usr/bin/java;
-			buildWorkingDirectory = "$(PROJECT_DIR)/examples/LL-star";
-			dependencies = (
-			);
-			name = "Regenerate LL-star";
-			passBuildSettingsInEnvironment = 1;
-			productName = Untitled;
-		};
-		F76287860B7155A2006AA7EF /* Regenerate hoistedPredicates */ = {
-			isa = PBXLegacyTarget;
-			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar  T.g";
-			buildConfigurationList = F76287870B7155A2006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate hoistedPredicates" */;
-			buildPhases = (
-			);
-			buildToolPath = /usr/bin/java;
-			buildWorkingDirectory = "$(PROJECT_DIR)/examples/hoistedPredicates";
-			dependencies = (
-			);
-			name = "Regenerate hoistedPredicates";
-			passBuildSettingsInEnvironment = 1;
-			productName = Untitled;
-		};
-		F762878A0B7155AB006AA7EF /* Regenerate scopes */ = {
-			isa = PBXLegacyTarget;
-			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar  SymbolTable.g";
-			buildConfigurationList = F762878B0B7155AB006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate scopes" */;
-			buildPhases = (
-			);
-			buildToolPath = /usr/bin/java;
-			buildWorkingDirectory = "$(PROJECT_DIR)/examples/scopes";
-			dependencies = (
-			);
-			name = "Regenerate scopes";
-			passBuildSettingsInEnvironment = 1;
-			productName = Untitled;
-		};
-		F762878E0B7155AF006AA7EF /* Regenerate simplectree */ = {
-			isa = PBXLegacyTarget;
-			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar SimpleC.g SimpleCWalker.g";
-			buildConfigurationList = F762878F0B7155AF006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate simplectree" */;
-			buildPhases = (
-			);
-			buildToolPath = /usr/bin/java;
-			buildWorkingDirectory = "$(PROJECT_DIR)/examples/simplecTreeParser";
-			dependencies = (
-			);
-			name = "Regenerate simplectree";
-			passBuildSettingsInEnvironment = 1;
-			productName = Untitled;
-		};
-		F7CD47610C64D23800FF933A /* Regenerate treerewrite */ = {
-			isa = PBXLegacyTarget;
-			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar TreeRewrite.g";
-			buildConfigurationList = F7CD47620C64D23800FF933A /* Build configuration list for PBXLegacyTarget "Regenerate treerewrite" */;
-			buildPhases = (
-			);
-			buildToolPath = /usr/bin/java;
-			buildWorkingDirectory = "$(PROJECT_DIR)/examples/treerewrite";
-			dependencies = (
-			);
-			name = "Regenerate treerewrite";
-			passBuildSettingsInEnvironment = 1;
-			productName = Untitled;
-		};
-/* End PBXLegacyTarget section */
-
-/* Begin PBXNativeTarget section */
-		1A0F345712EA42D800496BB8 /* polydiff */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A0F346212EA42D800496BB8 /* Build configuration list for PBXNativeTarget "polydiff" */;
-			buildPhases = (
-				1A0F345812EA42D800496BB8 /* Sources */,
-				1A0F345D12EA42D800496BB8 /* Frameworks */,
-			);
-			buildRules = (
-				1A0F346112EA42D800496BB8 /* PBXBuildRule */,
-			);
-			dependencies = (
-			);
-			name = polydiff;
-			productName = treerewrite;
-			productReference = 1A0F346612EA42D800496BB8 /* polydiff */;
-			productType = "com.apple.product-type.tool";
-		};
-		1A1210FA11D3A5D900F27B38 /* ANTLRCommonTokenTest */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A12110011D3A5DB00F27B38 /* Build configuration list for PBXNativeTarget "ANTLRCommonTokenTest" */;
-			buildPhases = (
-				1A1210F611D3A5D900F27B38 /* Resources */,
-				1A1210F711D3A5D900F27B38 /* Sources */,
-				1A1210F811D3A5D900F27B38 /* Frameworks */,
-				1A1210F911D3A5D900F27B38 /* ShellScript */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = ANTLRCommonTokenTest;
-			productName = ANTLRCommonTokenTest;
-			productReference = 1A1210FB11D3A5D900F27B38 /* ANTLRCommonTokenTest.octest */;
-			productType = "com.apple.product-type.bundle";
-		};
-		1A1211D011D3BF4600F27B38 /* ANTLRStringStreamTest */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A1211D611D3BF4800F27B38 /* Build configuration list for PBXNativeTarget "ANTLRStringStreamTest" */;
-			buildPhases = (
-				1A1211CC11D3BF4600F27B38 /* Resources */,
-				1A1211CD11D3BF4600F27B38 /* Sources */,
-				1A1211CE11D3BF4600F27B38 /* Frameworks */,
-				1A1211CF11D3BF4600F27B38 /* ShellScript */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = ANTLRStringStreamTest;
-			productName = ANTLRStringStreamTest;
-			productReference = 1A1211D111D3BF4700F27B38 /* ANTLRStringStreamTest.octest */;
-			productType = "com.apple.product-type.bundle";
-		};
-		1A12122311D3C92400F27B38 /* ANTLRFastQueueTest */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A12122911D3C92500F27B38 /* Build configuration list for PBXNativeTarget "ANTLRFastQueueTest" */;
-			buildPhases = (
-				1A12121F11D3C92400F27B38 /* Resources */,
-				1A12122011D3C92400F27B38 /* Sources */,
-				1A12122111D3C92400F27B38 /* Frameworks */,
-				1A12122211D3C92400F27B38 /* ShellScript */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = ANTLRFastQueueTest;
-			productName = ANTLRFastQueueTest;
-			productReference = 1A12122411D3C92400F27B38 /* ANTLRFastQueueTest.octest */;
-			productType = "com.apple.product-type.bundle";
-		};
-		1A1212DB11D3F53600F27B38 /* ANTLRIntArrayTest */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A1212E111D3F53700F27B38 /* Build configuration list for PBXNativeTarget "ANTLRIntArrayTest" */;
-			buildPhases = (
-				1A1212D711D3F53600F27B38 /* Resources */,
-				1A1212D811D3F53600F27B38 /* Sources */,
-				1A1212D911D3F53600F27B38 /* Frameworks */,
-				1A1212DA11D3F53600F27B38 /* ShellScript */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = ANTLRIntArrayTest;
-			productName = ANTLRIntArrayTest;
-			productReference = 1A1212DC11D3F53600F27B38 /* ANTLRIntArrayTest.octest */;
-			productType = "com.apple.product-type.bundle";
-		};
-		1A12130B11D3F7CD00F27B38 /* ANTLRCommonTreeTest */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A12131111D3F7CE00F27B38 /* Build configuration list for PBXNativeTarget "ANTLRCommonTreeTest" */;
-			buildPhases = (
-				1A12130711D3F7CD00F27B38 /* Resources */,
-				1A12130811D3F7CD00F27B38 /* Sources */,
-				1A12130911D3F7CD00F27B38 /* Frameworks */,
-				1A12130A11D3F7CD00F27B38 /* ShellScript */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = ANTLRCommonTreeTest;
-			productName = ANTLRCommonTreeTest;
-			productReference = 1A12130C11D3F7CD00F27B38 /* ANTLRCommonTreeTest.octest */;
-			productType = "com.apple.product-type.bundle";
-		};
-		1A348B4D11D2BEE8000C72FC /* Test */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A348B5311D2BEE9000C72FC /* Build configuration list for PBXNativeTarget "Test" */;
-			buildPhases = (
-				1A348B4911D2BEE8000C72FC /* Resources */,
-				1A348B4A11D2BEE8000C72FC /* Sources */,
-				1A348B4B11D2BEE8000C72FC /* Frameworks */,
-				1A348B4C11D2BEE8000C72FC /* ShellScript */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				1A12134511D3FDA500F27B38 /* PBXTargetDependency */,
-				1A12134711D3FDA500F27B38 /* PBXTargetDependency */,
-				1A12134911D3FDA500F27B38 /* PBXTargetDependency */,
-				1A12134B11D3FDA500F27B38 /* PBXTargetDependency */,
-				1A12134D11D3FDA500F27B38 /* PBXTargetDependency */,
-				1A12134F11D3FDA500F27B38 /* PBXTargetDependency */,
-			);
-			name = Test;
-			productName = Test;
-			productReference = 1A348B4E11D2BEE8000C72FC /* Test.octest */;
-			productType = "com.apple.product-type.bundle";
-		};
-		1A348BEB11D2D0A1000C72FC /* ANTLRBitsetTest */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A348BF111D2D0A2000C72FC /* Build configuration list for PBXNativeTarget "ANTLRBitsetTest" */;
-			buildPhases = (
-				1A348BE711D2D0A1000C72FC /* Resources */,
-				1A348BE811D2D0A1000C72FC /* Sources */,
-				1A348BE911D2D0A1000C72FC /* Frameworks */,
-				1A348BEA11D2D0A1000C72FC /* ShellScript */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = ANTLRBitsetTest;
-			productName = ANTLRBitsetTest;
-			productReference = 1A348BEC11D2D0A1000C72FC /* ANTLRBitsetTest.octest */;
-			productType = "com.apple.product-type.bundle";
-		};
-		1A77EE8812E6A552007F323A /* TreeRewriteRuleTokenStream */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A77EE8E12E6A553007F323A /* Build configuration list for PBXNativeTarget "TreeRewriteRuleTokenStream" */;
-			buildPhases = (
-				1A77EE8412E6A552007F323A /* Resources */,
-				1A77EE8512E6A552007F323A /* Sources */,
-				1A77EE8612E6A552007F323A /* Frameworks */,
-				1A77EE8712E6A552007F323A /* ShellScript */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = TreeRewriteRuleTokenStream;
-			productName = TreeRewriteRuleTokenStream;
-			productReference = 1A77EE8912E6A552007F323A /* TreeRewriteRuleTokenStream.octest */;
-			productType = "com.apple.product-type.bundle";
-		};
-		1AC5AC9812E7BEFE00DF0C58 /* treeparser */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1AC5ACA312E7BEFE00DF0C58 /* Build configuration list for PBXNativeTarget "treeparser" */;
-			buildPhases = (
-				1AC5AC9912E7BEFE00DF0C58 /* Sources */,
-				1AC5AC9D12E7BEFE00DF0C58 /* Frameworks */,
-			);
-			buildRules = (
-				1AC5ACA212E7BEFE00DF0C58 /* PBXBuildRule */,
-			);
-			dependencies = (
-			);
-			name = treeparser;
-			productName = treerewrite;
-			productReference = 1AC5ACA712E7BEFE00DF0C58 /* treeparser */;
-			productType = "com.apple.product-type.tool";
-		};
-		8DC2EF4F0486A6940098B216 /* ANTLR */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1DEB91AD08733DA50010E9CD /* Build configuration list for PBXNativeTarget "ANTLR" */;
-			buildPhases = (
-				8DC2EF500486A6940098B216 /* Headers */,
-				8DC2EF540486A6940098B216 /* Sources */,
-				8DC2EF560486A6940098B216 /* Frameworks */,
-				1A994CC412A84A46001853FF /* ShellScript */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = ANTLR;
-			productInstallPath = "$(HOME)/Library/Frameworks";
-			productName = ANTLR;
-			productReference = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */;
-			productType = "com.apple.product-type.framework";
-		};
-		F700E8630A5FA31D005D0757 /* combined */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = F700E86C0A5FA34D005D0757 /* Build configuration list for PBXNativeTarget "combined" */;
-			buildPhases = (
-				F700E8610A5FA31D005D0757 /* Sources */,
-				F700E8620A5FA31D005D0757 /* Frameworks */,
-			);
-			buildRules = (
-				1A994DC612A85BFC001853FF /* PBXBuildRule */,
-			);
-			dependencies = (
-			);
-			name = combined;
-			productName = combined;
-			productReference = F700E8640A5FA31D005D0757 /* combined */;
-			productType = "com.apple.product-type.tool";
-		};
-		F700ECD60A5FE186005D0757 /* LL-star */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = F700ECDC0A5FE1BF005D0757 /* Build configuration list for PBXNativeTarget "LL-star" */;
-			buildPhases = (
-				F700ECD40A5FE186005D0757 /* Sources */,
-				F700ECD50A5FE186005D0757 /* Frameworks */,
-			);
-			buildRules = (
-				1A994CF212A84FD3001853FF /* PBXBuildRule */,
-			);
-			dependencies = (
-			);
-			name = "LL-star";
-			productName = "LL-star";
-			productReference = F700ECD70A5FE186005D0757 /* LL-star */;
-			productType = "com.apple.product-type.tool";
-		};
-		F7037E9F0A05AFB60070435D /* lexertest-simple */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = F7037EB80A05AFEF0070435D /* Build configuration list for PBXNativeTarget "lexertest-simple" */;
-			buildPhases = (
-				F7037E9D0A05AFB60070435D /* Sources */,
-				F7037E9E0A05AFB60070435D /* Frameworks */,
-			);
-			buildRules = (
-				1A994DC912A85BFC001853FF /* PBXBuildRule */,
-			);
-			dependencies = (
-			);
-			name = "lexertest-simple";
-			productName = "lexertest-simple";
-			productReference = F7037EA00A05AFB60070435D /* lexertest-simple */;
-			productType = "com.apple.product-type.tool";
-		};
-		F72C5E5A0AB7E4C900282574 /* simplectree */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = F72C5E660AB7E4C900282574 /* Build configuration list for PBXNativeTarget "simplectree" */;
-			buildPhases = (
-				F72C5E5D0AB7E4C900282574 /* Sources */,
-				F72C5E610AB7E4C900282574 /* Frameworks */,
-			);
-			buildRules = (
-				1A994D4F12A85987001853FF /* PBXBuildRule */,
-			);
-			dependencies = (
-			);
-			name = simplectree;
-			productName = scopes;
-			productReference = F72C5E690AB7E4C900282574 /* simplectree */;
-			productType = "com.apple.product-type.tool";
-		};
-		F79D565F0A0E23A400EA3CEE /* fuzzy */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = F79D566F0A0E23D600EA3CEE /* Build configuration list for PBXNativeTarget "fuzzy" */;
-			buildPhases = (
-				F79D565D0A0E23A400EA3CEE /* Sources */,
-				F79D565E0A0E23A400EA3CEE /* Frameworks */,
-				F706A5710A0EC357008999AB /* CopyFiles */,
-			);
-			buildRules = (
-				1A994DC712A85BFC001853FF /* PBXBuildRule */,
-			);
-			dependencies = (
-			);
-			name = fuzzy;
-			productName = fuzzy;
-			productReference = F79D56600A0E23A400EA3CEE /* fuzzy */;
-			productType = "com.apple.product-type.tool";
-		};
-		F7CD475C0C64D22800FF933A /* treerewrite */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = F7CD477C0C64D27000FF933A /* Build configuration list for PBXNativeTarget "treerewrite" */;
-			buildPhases = (
-				F7CD475A0C64D22800FF933A /* Sources */,
-				F7CD475B0C64D22800FF933A /* Frameworks */,
-			);
-			buildRules = (
-				1A994D3E12A858E1001853FF /* PBXBuildRule */,
-			);
-			dependencies = (
-			);
-			name = treerewrite;
-			productName = treerewrite;
-			productReference = F7CD475D0C64D22800FF933A /* treerewrite */;
-			productType = "com.apple.product-type.tool";
-		};
-		F7DD05ED0A7B15E1006A006C /* hoistedPredicates */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = F7DD05F00A7B1640006A006C /* Build configuration list for PBXNativeTarget "hoistedPredicates" */;
-			buildPhases = (
-				F7DD05EB0A7B15E1006A006C /* Sources */,
-				F7DD05EC0A7B15E1006A006C /* Frameworks */,
-			);
-			buildRules = (
-				1A994DC812A85BFC001853FF /* PBXBuildRule */,
-			);
-			dependencies = (
-			);
-			name = hoistedPredicates;
-			productName = hoistedPredicates;
-			productReference = F7DD05EE0A7B15E1006A006C /* hoistedPredicates */;
-			productType = "com.apple.product-type.tool";
-		};
-		F7DD07430A7B6618006A006C /* scopes */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = F7DD07790A7B6682006A006C /* Build configuration list for PBXNativeTarget "scopes" */;
-			buildPhases = (
-				F7DD07410A7B6618006A006C /* Sources */,
-				F7DD07420A7B6618006A006C /* Frameworks */,
-			);
-			buildRules = (
-				1A994D8512A85ABE001853FF /* PBXBuildRule */,
-			);
-			dependencies = (
-			);
-			name = scopes;
-			productName = scopes;
-			productReference = F7DD07440A7B6618006A006C /* scopes */;
-			productType = "com.apple.product-type.tool";
-		};
-/* End PBXNativeTarget section */
-
-/* Begin PBXProject section */
-		0867D690FE84028FC02AAC07 /* Project object */ = {
-			isa = PBXProject;
-			buildConfigurationList = 1DEB91B108733DA50010E9CD /* Build configuration list for PBXProject "ANTLR" */;
-			compatibilityVersion = "Xcode 3.2";
-			developmentRegion = English;
-			hasScannedForEncodings = 1;
-			knownRegions = (
-				English,
-				Japanese,
-				French,
-				German,
-			);
-			mainGroup = 0867D691FE84028FC02AAC07 /* ANTLR */;
-			productRefGroup = 034768DFFF38A50411DB9C8B /* Products */;
-			projectDirPath = "";
-			projectRoot = "";
-			targets = (
-				8DC2EF4F0486A6940098B216 /* ANTLR */,
-				1A348B4D11D2BEE8000C72FC /* Test */,
-				1A348BEB11D2D0A1000C72FC /* ANTLRBitsetTest */,
-				1A1210FA11D3A5D900F27B38 /* ANTLRCommonTokenTest */,
-				1A12130B11D3F7CD00F27B38 /* ANTLRCommonTreeTest */,
-				1A12122311D3C92400F27B38 /* ANTLRFastQueueTest */,
-				1A1212DB11D3F53600F27B38 /* ANTLRIntArrayTest */,
-				1A1211D011D3BF4600F27B38 /* ANTLRStringStreamTest */,
-				1A77EE8812E6A552007F323A /* TreeRewriteRuleTokenStream */,
-				F762873F0B71519B006AA7EF /* Regenerate all examples */,
-				F762877E0B71559C006AA7EF /* Regenerate combined */,
-				F700E8630A5FA31D005D0757 /* combined */,
-				F76287450B7151E3006AA7EF /* Regenerate fuzzy */,
-				F79D565F0A0E23A400EA3CEE /* fuzzy */,
-				F76287860B7155A2006AA7EF /* Regenerate hoistedPredicates */,
-				F7DD05ED0A7B15E1006A006C /* hoistedPredicates */,
-				F76287780B71557E006AA7EF /* Regenerate lexertest-simple */,
-				F7037E9F0A05AFB60070435D /* lexertest-simple */,
-				F76287820B71559F006AA7EF /* Regenerate LL-star */,
-				F700ECD60A5FE186005D0757 /* LL-star */,
-				1A0F343B12EA425700496BB8 /* Regenerate polydiff */,
-				1A0F345712EA42D800496BB8 /* polydiff */,
-				F762878A0B7155AB006AA7EF /* Regenerate scopes */,
-				F7DD07430A7B6618006A006C /* scopes */,
-				F762878E0B7155AF006AA7EF /* Regenerate simplectree */,
-				F72C5E5A0AB7E4C900282574 /* simplectree */,
-				1AC5AC9312E7BE0400DF0C58 /* Regenerate treeparser */,
-				1AC5AC9812E7BEFE00DF0C58 /* treeparser */,
-				F7CD47610C64D23800FF933A /* Regenerate treerewrite */,
-				F7CD475C0C64D22800FF933A /* treerewrite */,
-			);
-		};
-/* End PBXProject section */
-
-/* Begin PBXResourcesBuildPhase section */
-		1A1210F611D3A5D900F27B38 /* Resources */ = {
-			isa = PBXResourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A1211CC11D3BF4600F27B38 /* Resources */ = {
-			isa = PBXResourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A12121F11D3C92400F27B38 /* Resources */ = {
-			isa = PBXResourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A1212D711D3F53600F27B38 /* Resources */ = {
-			isa = PBXResourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A12130711D3F7CD00F27B38 /* Resources */ = {
-			isa = PBXResourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A348B4911D2BEE8000C72FC /* Resources */ = {
-			isa = PBXResourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A348BE711D2D0A1000C72FC /* Resources */ = {
-			isa = PBXResourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A77EE8412E6A552007F323A /* Resources */ = {
-			isa = PBXResourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-/* End PBXResourcesBuildPhase section */
-
-/* Begin PBXShellScriptBuildPhase section */
-		1A1210F911D3A5D900F27B38 /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
-		};
-		1A1211CF11D3BF4600F27B38 /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
-		};
-		1A12122211D3C92400F27B38 /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
-		};
-		1A1212DA11D3F53600F27B38 /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
-		};
-		1A12130A11D3F7CD00F27B38 /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
-		};
-		1A348B4C11D2BEE8000C72FC /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
-		};
-		1A348BEA11D2D0A1000C72FC /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
-		};
-		1A77EE8712E6A552007F323A /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
-		};
-		1A994CC412A84A46001853FF /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "";
-		};
-/* End PBXShellScriptBuildPhase section */
-
-/* Begin PBXSourcesBuildPhase section */
-		1A0F345812EA42D800496BB8 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A0F346D12EA434F00496BB8 /* Main.m in Sources */,
-				1A0F348912EA444500496BB8 /* PolyLexer.m in Sources */,
-				1A0F348A12EA444500496BB8 /* PolyParser.m in Sources */,
-				1A01BD9312EB5A6000428792 /* Simplifier.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A1210F711D3A5D900F27B38 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A12110311D3A62B00F27B38 /* ANTLRCommonTokenTest.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A1211CD11D3BF4600F27B38 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A1211DE11D3BFC900F27B38 /* ANTLRStringStreamTest.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A12122011D3C92400F27B38 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A12126211D3CA0100F27B38 /* ANTLRFastQueueTest.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A1212D811D3F53600F27B38 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A1212E711D3F59300F27B38 /* ANTLRIntArrayTest.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A12130811D3F7CD00F27B38 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A12131711D3F80500F27B38 /* ANTLRCommonTreeTest.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A348B4A11D2BEE8000C72FC /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A348BA511D2C6A0000C72FC /* ANTLRBitSetTest.m in Sources */,
-				1A348BA811D2C6AD000C72FC /* ANTLRCommonTokenTest.m in Sources */,
-				1A348BAB11D2C6B8000C72FC /* ANTLRCommonTreeTest.m in Sources */,
-				1A348BAE11D2C6C6000C72FC /* ANTLRFastQueueTest.m in Sources */,
-				1A348BAF11D2C6D3000C72FC /* ANTLRIntArrayTest.m in Sources */,
-				1A348BB211D2C6E3000C72FC /* ANTLRStringStreamTest.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A348BE811D2D0A1000C72FC /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A348C0611D2D22B000C72FC /* ANTLRBitSetTest.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A77EE8512E6A552007F323A /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1AC5AC9912E7BEFE00DF0C58 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AC5ACAD12E7BF4E00DF0C58 /* main.m in Sources */,
-				1AC5ACE612E7CE4700DF0C58 /* LangParser.m in Sources */,
-				1AC5ACE712E7CE4C00DF0C58 /* LangLexer.m in Sources */,
-				1AC5ACE812E7CE5100DF0C58 /* LangDumpDecl.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		8DC2EF540486A6940098B216 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A67885211B87ABA00A11EEC /* ANTLRBaseTree.m in Sources */,
-				1A67885411B87AEA00A11EEC /* ANTLRFastQueue.m in Sources */,
-				1A67885511B87AEF00A11EEC /* ANTLRIntArray.m in Sources */,
-				1A6788FC11B893E100A11EEC /* ANTLRBaseRecognizer.m in Sources */,
-				1A12C95911B89F62008C9BED /* ANTLRBitSet.m in Sources */,
-				1A12C95A11B89F64008C9BED /* ANTLRBufferedTokenStream.m in Sources */,
-				1A12C95B11B89F65008C9BED /* ANTLRCommonToken.m in Sources */,
-				1A12C95C11B89F67008C9BED /* ANTLRCommonTokenStream.m in Sources */,
-				1A12C95D11B89F68008C9BED /* ANTLRCommonTree.m in Sources */,
-				1A12C95E11B89F69008C9BED /* ANTLRCommonTreeAdaptor.m in Sources */,
-				1A12C95F11B89F6A008C9BED /* ANTLRCommonTreeNodeStream.m in Sources */,
-				1A12C96111B89F6F008C9BED /* ANTLRLexer.m in Sources */,
-				1A12C96211B89F70008C9BED /* ANTLRLexerRuleReturnScope.m in Sources */,
-				1A12C96311B89F76008C9BED /* ANTLRLookaheadStream.m in Sources */,
-				1A12C96411B89F76008C9BED /* ANTLRMismatchedRangeException.m in Sources */,
-				1A12C96511B89F77008C9BED /* ANTLRMismatchedSetException.m in Sources */,
-				1A12C96611B89F78008C9BED /* ANTLRMismatchedTokenException.m in Sources */,
-				1A12C96711B89F7A008C9BED /* ANTLRMismatchedTreeNodeException.m in Sources */,
-				1A12C96811B89F7B008C9BED /* ANTLRNoViableAltException.m in Sources */,
-				1A12C96911B89F7E008C9BED /* ANTLRParser.m in Sources */,
-				1A12C96A11B89F7F008C9BED /* ANTLRParserRuleReturnScope.m in Sources */,
-				1A12C96B11B89F80008C9BED /* ANTLRRecognitionException.m in Sources */,
-				1A12C96C11B89F82008C9BED /* ANTLRRecognizerSharedState.m in Sources */,
-				1A12C96D11B89F83008C9BED /* ANTLRRewriteRuleElementStream.m in Sources */,
-				1A12C96E11B89F84008C9BED /* ANTLRRewriteRuleSubtreeStream.m in Sources */,
-				1A12C96F11B89F85008C9BED /* ANTLRRewriteRuleTokenStream.m in Sources */,
-				1A12C97011B89F87008C9BED /* ANTLRStringStream.m in Sources */,
-				1A12C97111B89F8B008C9BED /* ANTLRCharStreamState.m in Sources */,
-				1A12C97211B89F8C008C9BED /* ANTLRToken+DebuggerSupport.m in Sources */,
-				1A12C97311B89F8E008C9BED /* ANTLRTreeException.m in Sources */,
-				1A12C97411B89F90008C9BED /* ANTLRTreeParser.m in Sources */,
-				1A65B7D911B9532A00FD8754 /* ANTLRBufferedTreeNodeStream.m in Sources */,
-				1AB4A54211B995290076E91A /* ANTLREarlyExitException.m in Sources */,
-				1AB4A54311B9952A0076E91A /* ANTLRFailedPredicateException.m in Sources */,
-				1AB4A59211B9A0DA0076E91A /* ANTLRStreamEnumerator.m in Sources */,
-				1A8AC00D11BAEC710038DBB0 /* ANTLRRuntimeException.m in Sources */,
-				1A1D465C11BE73B2001575F3 /* ANTLRBaseTreeAdaptor.m in Sources */,
-				1A1D467111BE75C0001575F3 /* ANTLRMapElement.m in Sources */,
-				1A1D467D11BE8E5A001575F3 /* ANTLRCommonErrorNode.m in Sources */,
-				1A6C451711BF4EE00039788A /* ANTLRMissingTokenException.m in Sources */,
-				1A6C452911BF50A40039788A /* ANTLRUnwantedTokenException.m in Sources */,
-				1A1702FF11C05D4800F6978A /* ANTLRHashMap.m in Sources */,
-				1A270BFA11C1451200DCC8F3 /* ANTLRTreeIterator.m in Sources */,
-				1A26329611C53578000DCDD4 /* ANTLRMismatchedNotSetException.m in Sources */,
-				1A16B13D11C66492002860C7 /* ANTLRLinkBase.m in Sources */,
-				1A45657811C922BE0082F421 /* ANTLRRuleMemo.m in Sources */,
-				1A45658A11C9270D0082F421 /* ANTLRBaseMapElement.m in Sources */,
-				1A1BCDBC11CB01E60051A1EC /* ANTLRRuleReturnScope.m in Sources */,
-				1A1BCDD011CB0B3D0051A1EC /* ANTLRTreeRuleReturnScope.m in Sources */,
-				1A1BCE2B11CB1A3E0051A1EC /* ANTLRTreeRewriter.m in Sources */,
-				1A4A851311CBCE3E00E4BF1B /* ANTLRTreeVisitor.m in Sources */,
-				1A4A851911CBCE5500E4BF1B /* ANTLRTreeVisitorAction.m in Sources */,
-				1A4A851F11CBCF3700E4BF1B /* ANTLRTreeWizard.m in Sources */,
-				1AAC202D11CC621A00CF56D1 /* ANTLRTreePatternLexer.m in Sources */,
-				1AAC20A611CC790300CF56D1 /* ANTLRTreePatternParser.m in Sources */,
-				1A5EA50C11CFE7CE00E8932F /* ANTLRMap.m in Sources */,
-				1A75BFBA11D6C2B10096C6F5 /* ANTLRDFA.m in Sources */,
-				1AE8A96D11D9227A00D36FD6 /* ANTLRRuleStack.m in Sources */,
-				1A3A08E711E213C500D5EE26 /* ANTLRBaseStack.m in Sources */,
-				1A3A08EB11E213E100D5EE26 /* ANTLRSymbolStack.m in Sources */,
-				1AB5F47811E3869D00E065B0 /* ANTLRRuleMapElement.m in Sources */,
-				1AB5F51F11E3BE2E00E065B0 /* ANTLRPtrBuffer.m in Sources */,
-				1A2D217611E4F57C00DFE328 /* ANTLRUniqueIDMap.m in Sources */,
-				1A2D218711E502DE00DFE328 /* ANTLRNodeMapElement.m in Sources */,
-				1A100ABC11E604FE006ABF94 /* ANTLRHashRule.m in Sources */,
-				1AEECE1611E7EB3D00554AAF /* ANTLRTokenRewriteStream.m in Sources */,
-				1A86B91C11EB9F6300C67A03 /* ANTLRParseTree.m in Sources */,
-				1A86BAD011EC1CD000C67A03 /* ANTLRUnbufferedTokenStream.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F700E8610A5FA31D005D0757 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F738D3610B07C105001813C4 /* CombinedLexer.m in Sources */,
-				F738D3620B07C105001813C4 /* CombinedParser.m in Sources */,
-				1A20C56512D6267500C2072A /* main.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F700ECD40A5FE186005D0757 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F738D3190B07BDB7001813C4 /* main.m in Sources */,
-				F76287150B714E82006AA7EF /* SimpleCParser.m in Sources */,
-				F76287160B714E83006AA7EF /* SimpleCLexer.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F7037E9D0A05AFB60070435D /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F7048FF80B07D05400D2F326 /* TestLexer.m in Sources */,
-				F7048FF90B07D05800D2F326 /* main.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F72C5E5D0AB7E4C900282574 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A406B5612E8F2ED005EF037 /* main.m in Sources */,
-				F741D0830B381E720024DF3F /* SimpleCWalker.m in Sources */,
-				F741D0840B381E730024DF3F /* SimpleCParser.m in Sources */,
-				F741D08E0B381EA90024DF3F /* SimpleCLexer.m in Sources */,
-				1AC5ACD612E7C05800DF0C58 /* LangLexer.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F79D565D0A0E23A400EA3CEE /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F738D37E0B07C3BD001813C4 /* main.m in Sources */,
-				F76287170B714EA9006AA7EF /* FuzzyLexer.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F7CD475A0C64D22800FF933A /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F7CD47650C64D24C00FF933A /* TreeRewriteLexer.m in Sources */,
-				F7CD47660C64D24D00FF933A /* TreeRewriteParser.m in Sources */,
-				F7CD47670C64D24D00FF933A /* main.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F7DD05EB0A7B15E1006A006C /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F738D2120B07B32D001813C4 /* T.g in Sources */,
-				F738D2220B07B39F001813C4 /* main.m in Sources */,
-				F76287130B714E77006AA7EF /* TLexer.m in Sources */,
-				F76287140B714E78006AA7EF /* TParser.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		F7DD07410A7B6618006A006C /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				F738D1FC0B07B1BD001813C4 /* main.m in Sources */,
-				F738D20D0B07B265001813C4 /* SymbolTableParser.m in Sources */,
-				F738D20E0B07B266001813C4 /* SymbolTableLexer.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-/* End PBXSourcesBuildPhase section */
-
-/* Begin PBXTargetDependency section */
-		1A0F347112EA43BA00496BB8 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1A0F343B12EA425700496BB8 /* Regenerate polydiff */;
-			targetProxy = 1A0F347012EA43BA00496BB8 /* PBXContainerItemProxy */;
-		};
-		1A0F347312EA43BA00496BB8 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AC5AC9312E7BE0400DF0C58 /* Regenerate treeparser */;
-			targetProxy = 1A0F347212EA43BA00496BB8 /* PBXContainerItemProxy */;
-		};
-		1A0F347512EA43BA00496BB8 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = F7CD47610C64D23800FF933A /* Regenerate treerewrite */;
-			targetProxy = 1A0F347412EA43BA00496BB8 /* PBXContainerItemProxy */;
-		};
-		1A12134511D3FDA500F27B38 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1A348BEB11D2D0A1000C72FC /* ANTLRBitsetTest */;
-			targetProxy = 1A12134411D3FDA500F27B38 /* PBXContainerItemProxy */;
-		};
-		1A12134711D3FDA500F27B38 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1A1210FA11D3A5D900F27B38 /* ANTLRCommonTokenTest */;
-			targetProxy = 1A12134611D3FDA500F27B38 /* PBXContainerItemProxy */;
-		};
-		1A12134911D3FDA500F27B38 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1A12130B11D3F7CD00F27B38 /* ANTLRCommonTreeTest */;
-			targetProxy = 1A12134811D3FDA500F27B38 /* PBXContainerItemProxy */;
-		};
-		1A12134B11D3FDA500F27B38 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1A12122311D3C92400F27B38 /* ANTLRFastQueueTest */;
-			targetProxy = 1A12134A11D3FDA500F27B38 /* PBXContainerItemProxy */;
-		};
-		1A12134D11D3FDA500F27B38 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1A1212DB11D3F53600F27B38 /* ANTLRIntArrayTest */;
-			targetProxy = 1A12134C11D3FDA500F27B38 /* PBXContainerItemProxy */;
-		};
-		1A12134F11D3FDA500F27B38 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1A1211D011D3BF4600F27B38 /* ANTLRStringStreamTest */;
-			targetProxy = 1A12134E11D3FDA500F27B38 /* PBXContainerItemProxy */;
-		};
-		F762874C0B715417006AA7EF /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = F76287450B7151E3006AA7EF /* Regenerate fuzzy */;
-			targetProxy = F762874B0B715417006AA7EF /* PBXContainerItemProxy */;
-		};
-		F76287A70B7157C2006AA7EF /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = F762877E0B71559C006AA7EF /* Regenerate combined */;
-			targetProxy = F76287A60B7157C2006AA7EF /* PBXContainerItemProxy */;
-		};
-		F76287A90B7157C2006AA7EF /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = F76287820B71559F006AA7EF /* Regenerate LL-star */;
-			targetProxy = F76287A80B7157C2006AA7EF /* PBXContainerItemProxy */;
-		};
-		F76287AB0B7157C2006AA7EF /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = F76287860B7155A2006AA7EF /* Regenerate hoistedPredicates */;
-			targetProxy = F76287AA0B7157C2006AA7EF /* PBXContainerItemProxy */;
-		};
-		F76287AD0B7157C2006AA7EF /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = F762878A0B7155AB006AA7EF /* Regenerate scopes */;
-			targetProxy = F76287AC0B7157C2006AA7EF /* PBXContainerItemProxy */;
-		};
-		F76287AF0B7157C2006AA7EF /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = F762878E0B7155AF006AA7EF /* Regenerate simplectree */;
-			targetProxy = F76287AE0B7157C2006AA7EF /* PBXContainerItemProxy */;
-		};
-		F79EFB140C5845A300ABAB3D /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = F76287780B71557E006AA7EF /* Regenerate lexertest-simple */;
-			targetProxy = F79EFB130C5845A300ABAB3D /* PBXContainerItemProxy */;
-		};
-/* End PBXTargetDependency section */
-
-/* Begin PBXVariantGroup section */
-		089C1666FE841158C02AAC07 /* InfoPlist.strings */ = {
-			isa = PBXVariantGroup;
-			children = (
-				089C1667FE841158C02AAC07 /* English */,
-			);
-			name = InfoPlist.strings;
-			sourceTree = "<group>";
-		};
-/* End PBXVariantGroup section */
-
-/* Begin XCBuildConfiguration section */
-		1A0F343D12EA425700496BB8 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = polydiff;
-			};
-			name = Debug;
-		};
-		1A0F343E12EA425700496BB8 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = treerewrite;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1A0F343F12EA425700496BB8 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = treerewrite;
-			};
-			name = Release;
-		};
-		1A0F346312EA42D800496BB8 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
-				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
-				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
-				ALTERNATE_PERMISSIONS_FILES = "";
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ANTLR_DEBUG = YES;
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/polydiff";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/polydiff";
-				ARCHS = "$(NATIVE_ARCH)";
-				BUILD_VARIANTS = normal;
-				BUNDLE_LOADER = "";
-				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
-				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
-				COPYING_PRESERVES_HFS_DATA = NO;
-				COPY_PHASE_STRIP = NO;
-				CURRENT_PROJECT_VERSION = "";
-				DEAD_CODE_STRIPPING = NO;
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				DEPLOYMENT_LOCATION = NO;
-				DEPLOYMENT_POSTPROCESSING = NO;
-				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
-				DYLIB_COMPATIBILITY_VERSION = "";
-				DYLIB_CURRENT_VERSION = "";
-				EXECUTABLE_EXTENSION = "";
-				EXECUTABLE_PREFIX = "";
-				EXPORTED_SYMBOLS_FILE = "";
-				FRAMEWORK_SEARCH_PATHS = "";
-				FRAMEWORK_VERSION = A;
-				GCC_ALTIVEC_EXTENSIONS = NO;
-				GCC_AUTO_VECTORIZATION = NO;
-				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
-				GCC_CW_ASM_SYNTAX = YES;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = full;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_ASM_KEYWORD = YES;
-				GCC_ENABLE_CPP_EXCEPTIONS = YES;
-				GCC_ENABLE_CPP_RTTI = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = supported;
-				GCC_ENABLE_PASCAL_STRINGS = YES;
-				GCC_ENABLE_SSE3_EXTENSIONS = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_ENABLE_TRIGRAPHS = NO;
-				GCC_FAST_MATH = NO;
-				GCC_FAST_OBJC_DISPATCH = NO;
-				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
-				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
-				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
-				GCC_INPUT_FILETYPE = automatic;
-				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
-				GCC_MODEL_PPC64 = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_NO_COMMON_BLOCKS = NO;
-				GCC_OBJC_CALL_CXX_CDTORS = NO;
-				GCC_ONE_BYTE_BOOL = NO;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				GCC_PREPROCESSOR_DEFINITIONS = "";
-				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
-				GCC_REUSE_STRINGS = YES;
-				GCC_SHORT_ENUMS = NO;
-				GCC_STRICT_ALIASING = NO;
-				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
-				GCC_THREADSAFE_STATICS = YES;
-				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
-				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
-				GCC_UNROLL_LOOPS = NO;
-				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
-				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
-				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
-				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
-				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
-				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
-				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
-				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
-				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
-				GCC_WARN_MISSING_PARENTHESES = NO;
-				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
-				GCC_WARN_PEDANTIC = NO;
-				GCC_WARN_SHADOW = NO;
-				GCC_WARN_SIGN_COMPARE = NO;
-				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
-				GCC_WARN_UNINITIALIZED_AUTOS = NO;
-				GCC_WARN_UNKNOWN_PRAGMAS = NO;
-				GCC_WARN_UNUSED_FUNCTION = NO;
-				GCC_WARN_UNUSED_LABEL = NO;
-				GCC_WARN_UNUSED_PARAMETER = NO;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				GENERATE_MASTER_OBJECT_FILE = NO;
-				GENERATE_PKGINFO_FILE = NO;
-				GENERATE_PROFILING_CODE = NO;
-				HEADER_SEARCH_PATHS = "";
-				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
-				INFOPLIST_FILE = "";
-				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
-				INFOPLIST_PREFIX_HEADER = "";
-				INFOPLIST_PREPROCESS = NO;
-				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
-				INIT_ROUTINE = "";
-				INSTALL_GROUP = "$(GROUP)";
-				INSTALL_MODE_FLAG = "a-w,a+rX";
-				INSTALL_OWNER = "$(USER)";
-				INSTALL_PATH = "$(HOME)/bin";
-				KEEP_PRIVATE_EXTERNS = NO;
-				LIBRARY_SEARCH_PATHS = "";
-				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
-				LINKER_DISPLAYS_MANGLED_NAMES = NO;
-				LINK_WITH_STANDARD_LIBRARIES = YES;
-				MACH_O_TYPE = "";
-				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
-				OBJROOT = Build/Intermediates;
-				ONLY_ACTIVE_ARCH = YES;
-				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
-				OTHER_CFLAGS = "";
-				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
-				PRELINK_FLAGS = "";
-				PRELINK_LIBS = "";
-				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
-				PRIVATE_HEADERS_FOLDER_PATH = "";
-				PRODUCT_NAME = polydiff;
-				PUBLIC_HEADERS_FOLDER_PATH = "";
-				REZ_SEARCH_PATHS = "";
-				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
-				SDKROOT = macosx10.6;
-				SECTORDER_FLAGS = "";
-				SEPARATE_STRIP = NO;
-				SEPARATE_SYMBOL_EDIT = NO;
-				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
-				SKIP_INSTALL = NO;
-				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
-				STRIPFLAGS = "";
-				STRIP_INSTALLED_PRODUCT = "";
-				STRIP_STYLE = all;
-				SYMROOT = Build;
-				TEST_HOST = "";
-				TEST_RIG = "";
-				UNEXPORTED_SYMBOLS_FILE = "";
-				USER_HEADER_SEARCH_PATHS = "";
-				VERSIONING_SYSTEM = "";
-				WARNING_CFLAGS = "";
-				WARNING_LDFLAGS = "";
-				WRAPPER_EXTENSION = "";
-				ZERO_LINK = NO;
-			};
-			name = Debug;
-		};
-		1A0F346412EA42D800496BB8 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
-				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
-				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
-				ALTERNATE_PERMISSIONS_FILES = "";
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ANTLR_DEBUG = YES;
-				ANTLR_EXTRA_JAVA_ARGS = "/Users/acondit/Projects/idea/antlr3/classes:/Users/acondit/Projects/idea/stringtemplate/classes:/Library/Java/Extensions/antlr-3.3.1.jar";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/treerewrite";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/treerewrite";
-				ARCHS = "$(NATIVE_ARCH)";
-				BUILD_VARIANTS = normal;
-				BUNDLE_LOADER = "";
-				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
-				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
-				COPYING_PRESERVES_HFS_DATA = NO;
-				COPY_PHASE_STRIP = NO;
-				CURRENT_PROJECT_VERSION = "";
-				DEAD_CODE_STRIPPING = NO;
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				DEPLOYMENT_LOCATION = NO;
-				DEPLOYMENT_POSTPROCESSING = NO;
-				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
-				DYLIB_COMPATIBILITY_VERSION = "";
-				DYLIB_CURRENT_VERSION = "";
-				EXECUTABLE_EXTENSION = "";
-				EXECUTABLE_PREFIX = "";
-				EXPORTED_SYMBOLS_FILE = "";
-				FRAMEWORK_SEARCH_PATHS = "";
-				FRAMEWORK_VERSION = A;
-				GCC_ALTIVEC_EXTENSIONS = NO;
-				GCC_AUTO_VECTORIZATION = NO;
-				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
-				GCC_CW_ASM_SYNTAX = YES;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = full;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_ASM_KEYWORD = YES;
-				GCC_ENABLE_CPP_EXCEPTIONS = YES;
-				GCC_ENABLE_CPP_RTTI = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_PASCAL_STRINGS = YES;
-				GCC_ENABLE_SSE3_EXTENSIONS = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_ENABLE_TRIGRAPHS = NO;
-				GCC_FAST_MATH = NO;
-				GCC_FAST_OBJC_DISPATCH = NO;
-				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
-				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
-				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
-				GCC_INPUT_FILETYPE = automatic;
-				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
-				GCC_MODEL_PPC64 = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_NO_COMMON_BLOCKS = NO;
-				GCC_OBJC_CALL_CXX_CDTORS = NO;
-				GCC_ONE_BYTE_BOOL = NO;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				GCC_PREPROCESSOR_DEFINITIONS = "";
-				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
-				GCC_REUSE_STRINGS = YES;
-				GCC_SHORT_ENUMS = NO;
-				GCC_STRICT_ALIASING = NO;
-				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
-				GCC_THREADSAFE_STATICS = YES;
-				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
-				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
-				GCC_UNROLL_LOOPS = NO;
-				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
-				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
-				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
-				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
-				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
-				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
-				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
-				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
-				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
-				GCC_WARN_MISSING_PARENTHESES = NO;
-				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
-				GCC_WARN_PEDANTIC = NO;
-				GCC_WARN_SHADOW = NO;
-				GCC_WARN_SIGN_COMPARE = NO;
-				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
-				GCC_WARN_UNINITIALIZED_AUTOS = NO;
-				GCC_WARN_UNKNOWN_PRAGMAS = NO;
-				GCC_WARN_UNUSED_FUNCTION = NO;
-				GCC_WARN_UNUSED_LABEL = NO;
-				GCC_WARN_UNUSED_PARAMETER = NO;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				GENERATE_MASTER_OBJECT_FILE = NO;
-				GENERATE_PKGINFO_FILE = NO;
-				GENERATE_PROFILING_CODE = NO;
-				HEADER_SEARCH_PATHS = "";
-				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
-				INFOPLIST_FILE = "";
-				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
-				INFOPLIST_PREFIX_HEADER = "";
-				INFOPLIST_PREPROCESS = NO;
-				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
-				INIT_ROUTINE = "";
-				INSTALL_GROUP = "$(GROUP)";
-				INSTALL_MODE_FLAG = "a-w,a+rX";
-				INSTALL_OWNER = "$(USER)";
-				INSTALL_PATH = "$(HOME)/bin";
-				KEEP_PRIVATE_EXTERNS = NO;
-				LIBRARY_SEARCH_PATHS = "";
-				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
-				LINKER_DISPLAYS_MANGLED_NAMES = NO;
-				LINK_WITH_STANDARD_LIBRARIES = YES;
-				MACH_O_TYPE = "";
-				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
-				OBJROOT = /Users/acondit/Projects/Intermediates;
-				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
-				OTHER_CFLAGS = "";
-				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
-				PRELINK_FLAGS = "";
-				PRELINK_LIBS = "";
-				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
-				PRIVATE_HEADERS_FOLDER_PATH = "";
-				PRODUCT_NAME = treerewrite;
-				PUBLIC_HEADERS_FOLDER_PATH = "";
-				REZ_SEARCH_PATHS = "";
-				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
-				SDKROOT = macosx10.4;
-				SECTORDER_FLAGS = "";
-				SEPARATE_STRIP = NO;
-				SEPARATE_SYMBOL_EDIT = NO;
-				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
-				SKIP_INSTALL = NO;
-				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
-				STRIPFLAGS = "";
-				STRIP_INSTALLED_PRODUCT = "";
-				STRIP_STYLE = all;
-				SYMROOT = /Users/acondit/Projects/Antlr/Build;
-				TEST_HOST = "";
-				TEST_RIG = "";
-				UNEXPORTED_SYMBOLS_FILE = "";
-				USER_HEADER_SEARCH_PATHS = "";
-				VERSIONING_SYSTEM = "";
-				WARNING_CFLAGS = "";
-				WARNING_LDFLAGS = "";
-				WRAPPER_EXTENSION = "";
-				ZERO_LINK = NO;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1A0F346512EA42D800496BB8 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
-				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
-				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
-				ALTERNATE_PERMISSIONS_FILES = "";
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = (
-					ppc,
-					i386,
-				);
-				BUILD_VARIANTS = normal;
-				BUNDLE_LOADER = "";
-				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
-				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
-				COPYING_PRESERVES_HFS_DATA = NO;
-				COPY_PHASE_STRIP = YES;
-				CURRENT_PROJECT_VERSION = "";
-				DEAD_CODE_STRIPPING = NO;
-				DEBUG_INFORMATION_FORMAT = stabs;
-				DEPLOYMENT_LOCATION = NO;
-				DEPLOYMENT_POSTPROCESSING = NO;
-				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
-				DYLIB_COMPATIBILITY_VERSION = "";
-				DYLIB_CURRENT_VERSION = "";
-				EXECUTABLE_EXTENSION = "";
-				EXECUTABLE_PREFIX = "";
-				EXPORTED_SYMBOLS_FILE = "";
-				FRAMEWORK_SEARCH_PATHS = "";
-				FRAMEWORK_VERSION = A;
-				GCC_ALTIVEC_EXTENSIONS = NO;
-				GCC_AUTO_VECTORIZATION = NO;
-				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
-				GCC_CW_ASM_SYNTAX = YES;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = default;
-				GCC_DYNAMIC_NO_PIC = YES;
-				GCC_ENABLE_ASM_KEYWORD = YES;
-				GCC_ENABLE_CPP_EXCEPTIONS = YES;
-				GCC_ENABLE_CPP_RTTI = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_PASCAL_STRINGS = YES;
-				GCC_ENABLE_SSE3_EXTENSIONS = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = YES;
-				GCC_ENABLE_TRIGRAPHS = NO;
-				GCC_FAST_MATH = NO;
-				GCC_FAST_OBJC_DISPATCH = NO;
-				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
-				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
-				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
-				GCC_INPUT_FILETYPE = automatic;
-				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
-				GCC_MODEL_PPC64 = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_NO_COMMON_BLOCKS = NO;
-				GCC_OBJC_CALL_CXX_CDTORS = NO;
-				GCC_ONE_BYTE_BOOL = NO;
-				GCC_OPTIMIZATION_LEVEL = s;
-				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				GCC_PREPROCESSOR_DEFINITIONS = "";
-				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
-				GCC_REUSE_STRINGS = YES;
-				GCC_SHORT_ENUMS = NO;
-				GCC_STRICT_ALIASING = NO;
-				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
-				GCC_THREADSAFE_STATICS = YES;
-				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
-				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
-				GCC_UNROLL_LOOPS = NO;
-				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
-				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
-				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
-				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
-				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
-				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
-				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
-				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
-				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
-				GCC_WARN_MISSING_PARENTHESES = NO;
-				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
-				GCC_WARN_PEDANTIC = NO;
-				GCC_WARN_SHADOW = YES;
-				GCC_WARN_SIGN_COMPARE = YES;
-				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
-				GCC_WARN_UNINITIALIZED_AUTOS = YES;
-				GCC_WARN_UNKNOWN_PRAGMAS = NO;
-				GCC_WARN_UNUSED_FUNCTION = NO;
-				GCC_WARN_UNUSED_LABEL = NO;
-				GCC_WARN_UNUSED_PARAMETER = YES;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				GENERATE_MASTER_OBJECT_FILE = NO;
-				GENERATE_PKGINFO_FILE = NO;
-				GENERATE_PROFILING_CODE = NO;
-				HEADER_SEARCH_PATHS = "";
-				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
-				INFOPLIST_FILE = "";
-				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
-				INFOPLIST_PREFIX_HEADER = "";
-				INFOPLIST_PREPROCESS = NO;
-				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
-				INIT_ROUTINE = "";
-				INSTALL_GROUP = "$(GROUP)";
-				INSTALL_MODE_FLAG = "a-w,a+rX";
-				INSTALL_OWNER = "$(USER)";
-				INSTALL_PATH = "$(HOME)/bin";
-				KEEP_PRIVATE_EXTERNS = NO;
-				LIBRARY_SEARCH_PATHS = "";
-				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
-				LINKER_DISPLAYS_MANGLED_NAMES = NO;
-				LINK_WITH_STANDARD_LIBRARIES = YES;
-				MACH_O_TYPE = "";
-				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
-				OBJROOT = /Users/kroepke/Projects/Intermediates;
-				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
-				OTHER_CFLAGS = "";
-				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
-				PRELINK_FLAGS = "";
-				PRELINK_LIBS = "";
-				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
-				PRIVATE_HEADERS_FOLDER_PATH = "";
-				PRODUCT_NAME = treerewrite;
-				PUBLIC_HEADERS_FOLDER_PATH = "";
-				REZ_SEARCH_PATHS = "";
-				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
-				SDKROOT = macosx10.4;
-				SECTORDER_FLAGS = "";
-				SEPARATE_STRIP = NO;
-				SEPARATE_SYMBOL_EDIT = NO;
-				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
-				SKIP_INSTALL = NO;
-				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
-				STRIPFLAGS = "";
-				STRIP_INSTALLED_PRODUCT = "";
-				STRIP_STYLE = all;
-				SYMROOT = /Users/kroepke/Projects/Build;
-				TEST_HOST = "";
-				TEST_RIG = "";
-				UNEXPORTED_SYMBOLS_FILE = "";
-				USER_HEADER_SEARCH_PATHS = "";
-				VERSIONING_SYSTEM = "";
-				WARNING_CFLAGS = "";
-				WARNING_LDFLAGS = "";
-				WRAPPER_EXTENSION = "";
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		1A1210FD11D3A5DB00F27B38 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 1;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRCommonTokenTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRCommonTokenTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = Debug;
-		};
-		1A1210FE11D3A5DB00F27B38 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRCommonTokenTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRCommonTokenTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1A1210FF11D3A5DB00F27B38 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRCommonTokenTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRCommonTokenTest;
-				WRAPPER_EXTENSION = octest;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		1A1211D311D3BF4700F27B38 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 1;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRStringStreamTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRStringStreamTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = Debug;
-		};
-		1A1211D411D3BF4700F27B38 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRStringStreamTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRStringStreamTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1A1211D511D3BF4700F27B38 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRStringStreamTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRStringStreamTest;
-				WRAPPER_EXTENSION = octest;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		1A12122611D3C92500F27B38 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 1;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRFastQueueTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRFastQueueTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = Debug;
-		};
-		1A12122711D3C92500F27B38 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRFastQueueTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRFastQueueTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1A12122811D3C92500F27B38 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRFastQueueTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRFastQueueTest;
-				WRAPPER_EXTENSION = octest;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		1A1212DE11D3F53700F27B38 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 1;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRIntArrayTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRIntArrayTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = Debug;
-		};
-		1A1212DF11D3F53700F27B38 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRIntArrayTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRIntArrayTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1A1212E011D3F53700F27B38 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRIntArrayTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRIntArrayTest;
-				WRAPPER_EXTENSION = octest;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		1A12130E11D3F7CE00F27B38 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 1;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRCommonTreeTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRCommonTreeTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = Debug;
-		};
-		1A12130F11D3F7CE00F27B38 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRCommonTreeTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRCommonTreeTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1A12131011D3F7CE00F27B38 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRCommonTreeTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRCommonTreeTest;
-				WRAPPER_EXTENSION = octest;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		1A348B5011D2BEE8000C72FC /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 1;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "Test-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = Test;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = Debug;
-		};
-		1A348B5111D2BEE8000C72FC /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "Test-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = Test;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1A348B5211D2BEE8000C72FC /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "Test-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = Test;
-				WRAPPER_EXTENSION = octest;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		1A348BEE11D2D0A2000C72FC /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 1;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRBitsetTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRBitsetTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = Debug;
-		};
-		1A348BEF11D2D0A2000C72FC /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRBitsetTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRBitsetTest;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1A348BF011D2D0A2000C72FC /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "ANTLRBitsetTest-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = ANTLRBitsetTest;
-				WRAPPER_EXTENSION = octest;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		1A77EE8B12E6A552007F323A /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 1;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "TreeRewriteRuleTokenStream-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = TreeRewriteRuleTokenStream;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = Debug;
-		};
-		1A77EE8C12E6A552007F323A /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "TreeRewriteRuleTokenStream-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = TreeRewriteRuleTokenStream;
-				WRAPPER_EXTENSION = octest;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1A77EE8D12E6A552007F323A /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
-				INFOPLIST_FILE = "TreeRewriteRuleTokenStream-Info.plist";
-				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Cocoa,
-					"-framework",
-					SenTestingKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = TreeRewriteRuleTokenStream;
-				WRAPPER_EXTENSION = octest;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		1AC5AC9512E7BE0400DF0C58 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = treeparser;
-			};
-			name = Debug;
-		};
-		1AC5AC9612E7BE0400DF0C58 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = treerewrite;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1AC5AC9712E7BE0400DF0C58 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = treerewrite;
-			};
-			name = Release;
-		};
-		1AC5ACA412E7BEFE00DF0C58 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
-				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
-				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
-				ALTERNATE_PERMISSIONS_FILES = "";
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ANTLR_DEBUG = YES;
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/treerewrite";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/treerewrite";
-				ARCHS = "$(NATIVE_ARCH)";
-				BUILD_VARIANTS = normal;
-				BUNDLE_LOADER = "";
-				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
-				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
-				COPYING_PRESERVES_HFS_DATA = NO;
-				COPY_PHASE_STRIP = NO;
-				CURRENT_PROJECT_VERSION = "";
-				DEAD_CODE_STRIPPING = NO;
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				DEPLOYMENT_LOCATION = NO;
-				DEPLOYMENT_POSTPROCESSING = NO;
-				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
-				DYLIB_COMPATIBILITY_VERSION = "";
-				DYLIB_CURRENT_VERSION = "";
-				EXECUTABLE_EXTENSION = "";
-				EXECUTABLE_PREFIX = "";
-				EXPORTED_SYMBOLS_FILE = "";
-				FRAMEWORK_SEARCH_PATHS = "";
-				FRAMEWORK_VERSION = A;
-				GCC_ALTIVEC_EXTENSIONS = NO;
-				GCC_AUTO_VECTORIZATION = NO;
-				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
-				GCC_CW_ASM_SYNTAX = YES;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = full;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_ASM_KEYWORD = YES;
-				GCC_ENABLE_CPP_EXCEPTIONS = YES;
-				GCC_ENABLE_CPP_RTTI = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = supported;
-				GCC_ENABLE_PASCAL_STRINGS = YES;
-				GCC_ENABLE_SSE3_EXTENSIONS = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_ENABLE_TRIGRAPHS = NO;
-				GCC_FAST_MATH = NO;
-				GCC_FAST_OBJC_DISPATCH = NO;
-				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
-				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
-				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
-				GCC_INPUT_FILETYPE = automatic;
-				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
-				GCC_MODEL_PPC64 = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_NO_COMMON_BLOCKS = NO;
-				GCC_OBJC_CALL_CXX_CDTORS = NO;
-				GCC_ONE_BYTE_BOOL = NO;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				GCC_PREPROCESSOR_DEFINITIONS = "";
-				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
-				GCC_REUSE_STRINGS = YES;
-				GCC_SHORT_ENUMS = NO;
-				GCC_STRICT_ALIASING = NO;
-				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
-				GCC_THREADSAFE_STATICS = YES;
-				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
-				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
-				GCC_UNROLL_LOOPS = NO;
-				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
-				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
-				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
-				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
-				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
-				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
-				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
-				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
-				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
-				GCC_WARN_MISSING_PARENTHESES = NO;
-				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
-				GCC_WARN_PEDANTIC = NO;
-				GCC_WARN_SHADOW = NO;
-				GCC_WARN_SIGN_COMPARE = NO;
-				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
-				GCC_WARN_UNINITIALIZED_AUTOS = NO;
-				GCC_WARN_UNKNOWN_PRAGMAS = NO;
-				GCC_WARN_UNUSED_FUNCTION = NO;
-				GCC_WARN_UNUSED_LABEL = NO;
-				GCC_WARN_UNUSED_PARAMETER = NO;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				GENERATE_MASTER_OBJECT_FILE = NO;
-				GENERATE_PKGINFO_FILE = NO;
-				GENERATE_PROFILING_CODE = NO;
-				HEADER_SEARCH_PATHS = "";
-				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
-				INFOPLIST_FILE = "";
-				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
-				INFOPLIST_PREFIX_HEADER = "";
-				INFOPLIST_PREPROCESS = NO;
-				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
-				INIT_ROUTINE = "";
-				INSTALL_GROUP = "$(GROUP)";
-				INSTALL_MODE_FLAG = "a-w,a+rX";
-				INSTALL_OWNER = "$(USER)";
-				INSTALL_PATH = "$(HOME)/bin";
-				KEEP_PRIVATE_EXTERNS = NO;
-				LIBRARY_SEARCH_PATHS = "";
-				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
-				LINKER_DISPLAYS_MANGLED_NAMES = NO;
-				LINK_WITH_STANDARD_LIBRARIES = YES;
-				MACH_O_TYPE = "";
-				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
-				OBJROOT = Build/Intermediates;
-				ONLY_ACTIVE_ARCH = YES;
-				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
-				OTHER_CFLAGS = "";
-				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
-				PRELINK_FLAGS = "";
-				PRELINK_LIBS = "";
-				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
-				PRIVATE_HEADERS_FOLDER_PATH = "";
-				PRODUCT_NAME = treeparser;
-				PUBLIC_HEADERS_FOLDER_PATH = "";
-				REZ_SEARCH_PATHS = "";
-				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
-				SDKROOT = macosx10.6;
-				SECTORDER_FLAGS = "";
-				SEPARATE_STRIP = NO;
-				SEPARATE_SYMBOL_EDIT = NO;
-				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
-				SKIP_INSTALL = NO;
-				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
-				STRIPFLAGS = "";
-				STRIP_INSTALLED_PRODUCT = "";
-				STRIP_STYLE = all;
-				SYMROOT = Build;
-				TEST_HOST = "";
-				TEST_RIG = "";
-				UNEXPORTED_SYMBOLS_FILE = "";
-				USER_HEADER_SEARCH_PATHS = "";
-				VERSIONING_SYSTEM = "";
-				WARNING_CFLAGS = "";
-				WARNING_LDFLAGS = "";
-				WRAPPER_EXTENSION = "";
-				ZERO_LINK = NO;
-			};
-			name = Debug;
-		};
-		1AC5ACA512E7BEFE00DF0C58 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
-				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
-				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
-				ALTERNATE_PERMISSIONS_FILES = "";
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ANTLR_DEBUG = YES;
-				ANTLR_EXTRA_JAVA_ARGS = "/Users/acondit/Projects/idea/antlr3/classes:/Users/acondit/Projects/idea/stringtemplate/classes:/Library/Java/Extensions/antlr-3.3.1.jar";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/treerewrite";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/treerewrite";
-				ARCHS = "$(NATIVE_ARCH)";
-				BUILD_VARIANTS = normal;
-				BUNDLE_LOADER = "";
-				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
-				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
-				COPYING_PRESERVES_HFS_DATA = NO;
-				COPY_PHASE_STRIP = NO;
-				CURRENT_PROJECT_VERSION = "";
-				DEAD_CODE_STRIPPING = NO;
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				DEPLOYMENT_LOCATION = NO;
-				DEPLOYMENT_POSTPROCESSING = NO;
-				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
-				DYLIB_COMPATIBILITY_VERSION = "";
-				DYLIB_CURRENT_VERSION = "";
-				EXECUTABLE_EXTENSION = "";
-				EXECUTABLE_PREFIX = "";
-				EXPORTED_SYMBOLS_FILE = "";
-				FRAMEWORK_SEARCH_PATHS = "";
-				FRAMEWORK_VERSION = A;
-				GCC_ALTIVEC_EXTENSIONS = NO;
-				GCC_AUTO_VECTORIZATION = NO;
-				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
-				GCC_CW_ASM_SYNTAX = YES;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = full;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_ASM_KEYWORD = YES;
-				GCC_ENABLE_CPP_EXCEPTIONS = YES;
-				GCC_ENABLE_CPP_RTTI = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_PASCAL_STRINGS = YES;
-				GCC_ENABLE_SSE3_EXTENSIONS = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_ENABLE_TRIGRAPHS = NO;
-				GCC_FAST_MATH = NO;
-				GCC_FAST_OBJC_DISPATCH = NO;
-				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
-				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
-				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
-				GCC_INPUT_FILETYPE = automatic;
-				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
-				GCC_MODEL_PPC64 = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_NO_COMMON_BLOCKS = NO;
-				GCC_OBJC_CALL_CXX_CDTORS = NO;
-				GCC_ONE_BYTE_BOOL = NO;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				GCC_PREPROCESSOR_DEFINITIONS = "";
-				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
-				GCC_REUSE_STRINGS = YES;
-				GCC_SHORT_ENUMS = NO;
-				GCC_STRICT_ALIASING = NO;
-				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
-				GCC_THREADSAFE_STATICS = YES;
-				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
-				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
-				GCC_UNROLL_LOOPS = NO;
-				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
-				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
-				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
-				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
-				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
-				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
-				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
-				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
-				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
-				GCC_WARN_MISSING_PARENTHESES = NO;
-				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
-				GCC_WARN_PEDANTIC = NO;
-				GCC_WARN_SHADOW = NO;
-				GCC_WARN_SIGN_COMPARE = NO;
-				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
-				GCC_WARN_UNINITIALIZED_AUTOS = NO;
-				GCC_WARN_UNKNOWN_PRAGMAS = NO;
-				GCC_WARN_UNUSED_FUNCTION = NO;
-				GCC_WARN_UNUSED_LABEL = NO;
-				GCC_WARN_UNUSED_PARAMETER = NO;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				GENERATE_MASTER_OBJECT_FILE = NO;
-				GENERATE_PKGINFO_FILE = NO;
-				GENERATE_PROFILING_CODE = NO;
-				HEADER_SEARCH_PATHS = "";
-				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
-				INFOPLIST_FILE = "";
-				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
-				INFOPLIST_PREFIX_HEADER = "";
-				INFOPLIST_PREPROCESS = NO;
-				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
-				INIT_ROUTINE = "";
-				INSTALL_GROUP = "$(GROUP)";
-				INSTALL_MODE_FLAG = "a-w,a+rX";
-				INSTALL_OWNER = "$(USER)";
-				INSTALL_PATH = "$(HOME)/bin";
-				KEEP_PRIVATE_EXTERNS = NO;
-				LIBRARY_SEARCH_PATHS = "";
-				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
-				LINKER_DISPLAYS_MANGLED_NAMES = NO;
-				LINK_WITH_STANDARD_LIBRARIES = YES;
-				MACH_O_TYPE = "";
-				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
-				OBJROOT = /Users/acondit/Projects/Intermediates;
-				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
-				OTHER_CFLAGS = "";
-				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
-				PRELINK_FLAGS = "";
-				PRELINK_LIBS = "";
-				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
-				PRIVATE_HEADERS_FOLDER_PATH = "";
-				PRODUCT_NAME = treerewrite;
-				PUBLIC_HEADERS_FOLDER_PATH = "";
-				REZ_SEARCH_PATHS = "";
-				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
-				SDKROOT = macosx10.4;
-				SECTORDER_FLAGS = "";
-				SEPARATE_STRIP = NO;
-				SEPARATE_SYMBOL_EDIT = NO;
-				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
-				SKIP_INSTALL = NO;
-				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
-				STRIPFLAGS = "";
-				STRIP_INSTALLED_PRODUCT = "";
-				STRIP_STYLE = all;
-				SYMROOT = /Users/acondit/Projects/Antlr/Build;
-				TEST_HOST = "";
-				TEST_RIG = "";
-				UNEXPORTED_SYMBOLS_FILE = "";
-				USER_HEADER_SEARCH_PATHS = "";
-				VERSIONING_SYSTEM = "";
-				WARNING_CFLAGS = "";
-				WARNING_LDFLAGS = "";
-				WRAPPER_EXTENSION = "";
-				ZERO_LINK = NO;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		1AC5ACA612E7BEFE00DF0C58 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
-				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
-				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
-				ALTERNATE_PERMISSIONS_FILES = "";
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = (
-					ppc,
-					i386,
-				);
-				BUILD_VARIANTS = normal;
-				BUNDLE_LOADER = "";
-				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
-				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
-				COPYING_PRESERVES_HFS_DATA = NO;
-				COPY_PHASE_STRIP = YES;
-				CURRENT_PROJECT_VERSION = "";
-				DEAD_CODE_STRIPPING = NO;
-				DEBUG_INFORMATION_FORMAT = stabs;
-				DEPLOYMENT_LOCATION = NO;
-				DEPLOYMENT_POSTPROCESSING = NO;
-				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
-				DYLIB_COMPATIBILITY_VERSION = "";
-				DYLIB_CURRENT_VERSION = "";
-				EXECUTABLE_EXTENSION = "";
-				EXECUTABLE_PREFIX = "";
-				EXPORTED_SYMBOLS_FILE = "";
-				FRAMEWORK_SEARCH_PATHS = "";
-				FRAMEWORK_VERSION = A;
-				GCC_ALTIVEC_EXTENSIONS = NO;
-				GCC_AUTO_VECTORIZATION = NO;
-				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
-				GCC_CW_ASM_SYNTAX = YES;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = default;
-				GCC_DYNAMIC_NO_PIC = YES;
-				GCC_ENABLE_ASM_KEYWORD = YES;
-				GCC_ENABLE_CPP_EXCEPTIONS = YES;
-				GCC_ENABLE_CPP_RTTI = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_PASCAL_STRINGS = YES;
-				GCC_ENABLE_SSE3_EXTENSIONS = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = YES;
-				GCC_ENABLE_TRIGRAPHS = NO;
-				GCC_FAST_MATH = NO;
-				GCC_FAST_OBJC_DISPATCH = NO;
-				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
-				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
-				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
-				GCC_INPUT_FILETYPE = automatic;
-				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
-				GCC_MODEL_PPC64 = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_NO_COMMON_BLOCKS = NO;
-				GCC_OBJC_CALL_CXX_CDTORS = NO;
-				GCC_ONE_BYTE_BOOL = NO;
-				GCC_OPTIMIZATION_LEVEL = s;
-				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				GCC_PREPROCESSOR_DEFINITIONS = "";
-				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
-				GCC_REUSE_STRINGS = YES;
-				GCC_SHORT_ENUMS = NO;
-				GCC_STRICT_ALIASING = NO;
-				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
-				GCC_THREADSAFE_STATICS = YES;
-				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
-				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
-				GCC_UNROLL_LOOPS = NO;
-				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
-				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
-				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
-				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
-				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
-				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
-				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
-				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
-				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
-				GCC_WARN_MISSING_PARENTHESES = NO;
-				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
-				GCC_WARN_PEDANTIC = NO;
-				GCC_WARN_SHADOW = YES;
-				GCC_WARN_SIGN_COMPARE = YES;
-				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
-				GCC_WARN_UNINITIALIZED_AUTOS = YES;
-				GCC_WARN_UNKNOWN_PRAGMAS = NO;
-				GCC_WARN_UNUSED_FUNCTION = NO;
-				GCC_WARN_UNUSED_LABEL = NO;
-				GCC_WARN_UNUSED_PARAMETER = YES;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				GENERATE_MASTER_OBJECT_FILE = NO;
-				GENERATE_PKGINFO_FILE = NO;
-				GENERATE_PROFILING_CODE = NO;
-				HEADER_SEARCH_PATHS = "";
-				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
-				INFOPLIST_FILE = "";
-				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
-				INFOPLIST_PREFIX_HEADER = "";
-				INFOPLIST_PREPROCESS = NO;
-				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
-				INIT_ROUTINE = "";
-				INSTALL_GROUP = "$(GROUP)";
-				INSTALL_MODE_FLAG = "a-w,a+rX";
-				INSTALL_OWNER = "$(USER)";
-				INSTALL_PATH = "$(HOME)/bin";
-				KEEP_PRIVATE_EXTERNS = NO;
-				LIBRARY_SEARCH_PATHS = "";
-				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
-				LINKER_DISPLAYS_MANGLED_NAMES = NO;
-				LINK_WITH_STANDARD_LIBRARIES = YES;
-				MACH_O_TYPE = "";
-				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
-				OBJROOT = /Users/kroepke/Projects/Intermediates;
-				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
-				OTHER_CFLAGS = "";
-				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
-				PRELINK_FLAGS = "";
-				PRELINK_LIBS = "";
-				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
-				PRIVATE_HEADERS_FOLDER_PATH = "";
-				PRODUCT_NAME = treerewrite;
-				PUBLIC_HEADERS_FOLDER_PATH = "";
-				REZ_SEARCH_PATHS = "";
-				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
-				SDKROOT = macosx10.4;
-				SECTORDER_FLAGS = "";
-				SEPARATE_STRIP = NO;
-				SEPARATE_SYMBOL_EDIT = NO;
-				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
-				SKIP_INSTALL = NO;
-				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
-				STRIPFLAGS = "";
-				STRIP_INSTALLED_PRODUCT = "";
-				STRIP_STYLE = all;
-				SYMROOT = /Users/kroepke/Projects/Build;
-				TEST_HOST = "";
-				TEST_RIG = "";
-				UNEXPORTED_SYMBOLS_FILE = "";
-				USER_HEADER_SEARCH_PATHS = "";
-				VERSIONING_SYSTEM = "";
-				WARNING_CFLAGS = "";
-				WARNING_LDFLAGS = "";
-				WRAPPER_EXTENSION = "";
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		1DEB91AE08733DA50010E9CD /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_JAVA_ARGS = "/Library/Java/Extensions/antlr-2.7.7.jar:/Users/acondit/IdeaProjects/antlr3/out/production/antlr3:/Users/acondit/IdeaProjects/antlr3/out/production/stringtemplate";
-				COPY_PHASE_STRIP = NO;
-				CURRENT_PROJECT_VERSION = 1;
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				DEPLOYMENT_LOCATION = NO;
-				DYLIB_COMPATIBILITY_VERSION = 1;
-				DYLIB_CURRENT_VERSION = 1;
-				FRAMEWORK_VERSION = A;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = full;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_MODEL_TUNING = G4;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = ANTLR_Prefix.pch;
-				GCC_WARN_UNINITIALIZED_AUTOS = NO;
-				INFOPLIST_FILE = Info.plist;
-				INSTALL_PATH = "$(HOME)/Library/Frameworks";
-				PRODUCT_NAME = ANTLR;
-				VERSIONING_SYSTEM = "apple-generic";
-				WRAPPER_EXTENSION = framework;
-				ZERO_LINK = NO;
-			};
-			name = Debug;
-		};
-		1DEB91AF08733DA50010E9CD /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ARCHS = (
-					ppc,
-					i386,
-				);
-				BUILD_VARIANTS = (
-					normal,
-					debug,
-				);
-				CURRENT_PROJECT_VERSION = 1;
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				DYLIB_COMPATIBILITY_VERSION = 1;
-				DYLIB_CURRENT_VERSION = 1;
-				FRAMEWORK_VERSION = A;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = ANTLR_Prefix.pch;
-				INFOPLIST_FILE = Info.plist;
-				INSTALL_PATH = "$(HOME)/Library/Frameworks";
-				PRODUCT_NAME = ANTLR;
-				VERSIONING_SYSTEM = "apple-generic";
-				WRAPPER_EXTENSION = framework;
-			};
-			name = Release;
-		};
-		1DEB91B208733DA50010E9CD /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_ARGS = "";
-				ANTLR_EXTRA_JAVA_ARGS = "";
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = full;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = supported;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_OPTIMIZATION_LEVEL = 1;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
-				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_SHADOW = NO;
-				GCC_WARN_SIGN_COMPARE = NO;
-				GCC_WARN_UNINITIALIZED_AUTOS = YES;
-				GCC_WARN_UNUSED_PARAMETER = NO;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				PREBINDING = NO;
-				SDKROOT = macosx10.6;
-			};
-			name = Debug;
-		};
-		1DEB91B308733DA50010E9CD /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_ARGS = "";
-				ANTLR_EXTRA_JAVA_ARGS = "";
-				ARCHS = (
-					ppc,
-					i386,
-				);
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_SHADOW = YES;
-				GCC_WARN_SIGN_COMPARE = YES;
-				GCC_WARN_UNINITIALIZED_AUTOS = YES;
-				GCC_WARN_UNUSED_PARAMETER = YES;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				PREBINDING = NO;
-				SDKROOT = macosx10.6;
-			};
-			name = Release;
-		};
-		F700E86D0A5FA34D005D0757 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/combined";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/combined";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = YES;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 1;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				ONLY_ACTIVE_ARCH = YES;
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = combined;
-				ZERO_LINK = YES;
-			};
-			name = Debug;
-		};
-		F700E86E0A5FA34D005D0757 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				COPY_PHASE_STRIP = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = combined;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		F700ECDD0A5FE1BF005D0757 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_DEBUG = YES;
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/LL-star";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/LL-star";
-				ANTLR_X_DEBUG_ST = NO;
-				COPY_PHASE_STRIP = NO;
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G4;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = "LL-star";
-				ZERO_LINK = NO;
-			};
-			name = Debug;
-		};
-		F700ECDE0A5FE1BF005D0757 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = stabs;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = "LL-star";
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		F7037EB90A05AFEF0070435D /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/lexertest-simple";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/lexertest-simple";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = YES;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_PASCAL_STRINGS = YES;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = Build;
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = "lexertest-simple";
-				ZERO_LINK = NO;
-			};
-			name = Debug;
-		};
-		F7037EBA0A05AFEF0070435D /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				COPY_PHASE_STRIP = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = "lexertest-simple";
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		F72C5E670AB7E4C900282574 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_DEBUG = YES;
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/simpleCTreeParser";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/simpleCTreeParser";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				GCC_WARN_UNINITIALIZED_AUTOS = NO;
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = simplectree;
-				ZERO_LINK = NO;
-			};
-			name = Debug;
-		};
-		F72C5E680AB7E4C900282574 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				COPY_PHASE_STRIP = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = simplectree;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		F76287410B7151B9006AA7EF /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/Users/acondit/source/antlr_src/code/antlr/out/production/antlr3:/Users/acondit/source/antlr_src/code/antlr/out/production/stringtemplate";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				PRODUCT_NAME = Untitled;
-			};
-			name = Debug;
-		};
-		F76287420B7151B9006AA7EF /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_JAVA_ARGS = "ANTLR_EXTRA_JAVA_ARGS = /usr/share/java/antlr-2.7.7.jar:/Users/acondit/source/antlr_src/code/antlr/out/production/antlr3:/Users/acondit/source/antlr_src/code/antlr/out/production/stringtemplate\n";
-				COPY_PHASE_STRIP = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				PRODUCT_NAME = Untitled;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		F76287470B715201006AA7EF /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = fuzzy;
-			};
-			name = Debug;
-		};
-		F76287480B715201006AA7EF /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = fuzzy;
-			};
-			name = Release;
-		};
-		F762877A0B71557E006AA7EF /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = "lexertest-simple";
-			};
-			name = Debug;
-		};
-		F762877B0B71557E006AA7EF /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = fuzzy;
-			};
-			name = Release;
-		};
-		F76287800B71559C006AA7EF /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = combined;
-			};
-			name = Debug;
-		};
-		F76287810B71559C006AA7EF /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = combined;
-			};
-			name = Release;
-		};
-		F76287840B71559F006AA7EF /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = "LL-star";
-			};
-			name = Debug;
-		};
-		F76287850B71559F006AA7EF /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = "LL-star";
-			};
-			name = Release;
-		};
-		F76287880B7155A2006AA7EF /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = hoistedPredicates;
-			};
-			name = Debug;
-		};
-		F76287890B7155A2006AA7EF /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = hoistedPredicates;
-			};
-			name = Release;
-		};
-		F762878C0B7155AB006AA7EF /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = scopes;
-			};
-			name = Debug;
-		};
-		F762878D0B7155AB006AA7EF /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = scopes;
-			};
-			name = Release;
-		};
-		F76287900B7155AF006AA7EF /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				BUILD_SETTING = NO;
-				GRAMMAR_SETTING = NO;
-				PRODUCT_NAME = simplectree;
-			};
-			name = Debug;
-		};
-		F76287910B7155AF006AA7EF /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = "simplec tree";
-			};
-			name = Release;
-		};
-		F79D56700A0E23D600EA3CEE /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/fuzzy";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/fuzzy";
-				ANTLR_TRACE = NO;
-				ANTLR_X_DEBUG_ST = NO;
-				COPY_PHASE_STRIP = NO;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G4;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = fuzzy;
-				ZERO_LINK = NO;
-			};
-			name = Debug;
-		};
-		F79D56710A0E23D600EA3CEE /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				COPY_PHASE_STRIP = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = fuzzy;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		F7C562300CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_ARGS = "";
-				ANTLR_EXTRA_JAVA_ARGS = "";
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = full;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
-				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_SHADOW = NO;
-				GCC_WARN_SIGN_COMPARE = NO;
-				GCC_WARN_UNINITIALIZED_AUTOS = YES;
-				GCC_WARN_UNUSED_PARAMETER = NO;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				PREBINDING = NO;
-				SDKROOT = macosx10.6;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562310CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				COPY_PHASE_STRIP = NO;
-				CURRENT_PROJECT_VERSION = 1;
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				DEPLOYMENT_LOCATION = NO;
-				DYLIB_COMPATIBILITY_VERSION = 1;
-				DYLIB_CURRENT_VERSION = 1;
-				FRAMEWORK_VERSION = A;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = full;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_MODEL_TUNING = G4;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = ANTLR_Prefix.pch;
-				GCC_WARN_UNINITIALIZED_AUTOS = NO;
-				INFOPLIST_FILE = Info.plist;
-				INSTALL_PATH = "$(HOME)/Library/Frameworks";
-				PRODUCT_NAME = ANTLR;
-				VERSIONING_SYSTEM = "apple-generic";
-				WRAPPER_EXTENSION = framework;
-				ZERO_LINK = NO;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562330CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/Users/acondit/source/antlr_src/code/antlr/out/production/antlr3:/Users/acondit/source/antlr_src/code/antlr/out/production/stringtemplate";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				PRODUCT_NAME = Untitled;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562340CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/lexertest-simple";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/lexertest-simple";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = YES;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_PASCAL_STRINGS = YES;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = "lexertest-simple";
-				ZERO_LINK = NO;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562350CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = "lexertest-simple";
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562360CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/fuzzy";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/fuzzy";
-				ANTLR_TRACE = NO;
-				ANTLR_X_DEBUG_ST = NO;
-				COPY_PHASE_STRIP = NO;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G4;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = fuzzy;
-				ZERO_LINK = NO;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562370CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = fuzzy;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562380CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/combined";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/combined";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = YES;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = combined;
-				ZERO_LINK = YES;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562390CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = combined;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C5623A0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_DEBUG = YES;
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/LL-star";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/LL-star";
-				ANTLR_X_DEBUG_ST = NO;
-				COPY_PHASE_STRIP = NO;
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G4;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = "LL-star";
-				ZERO_LINK = NO;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C5623B0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = "LL-star";
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C5623C0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/hoistedPredicates";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/hoistedPredicates";
-				ARCHS = "$(NATIVE_ARCH)";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = YES;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = hoistedPredicates;
-				ZERO_LINK = NO;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C5623D0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = hoistedPredicates;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C5623E0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/scopes";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/scopes";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = YES;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = scopes;
-				ZERO_LINK = NO;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C5623F0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = scopes;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562400CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_DEBUG = YES;
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/simpleCTreeParser";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/simpleCTreeParser";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				GCC_WARN_UNINITIALIZED_AUTOS = NO;
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = simplectree;
-				ZERO_LINK = NO;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562410CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = "simplec tree";
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562420CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
-				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
-				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
-				ALTERNATE_PERMISSIONS_FILES = "";
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ANTLR_DEBUG = YES;
-				ANTLR_EXTRA_JAVA_ARGS = "/Users/acondit/Projects/idea/antlr3/classes:/Users/acondit/Projects/idea/stringtemplate/classes:/Library/Java/Extensions/antlr-3.3.1.jar";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/treerewrite";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/treerewrite";
-				ARCHS = "$(NATIVE_ARCH)";
-				BUILD_VARIANTS = normal;
-				BUNDLE_LOADER = "";
-				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
-				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
-				COPYING_PRESERVES_HFS_DATA = NO;
-				COPY_PHASE_STRIP = NO;
-				CURRENT_PROJECT_VERSION = "";
-				DEAD_CODE_STRIPPING = NO;
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				DEPLOYMENT_LOCATION = NO;
-				DEPLOYMENT_POSTPROCESSING = NO;
-				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
-				DYLIB_COMPATIBILITY_VERSION = "";
-				DYLIB_CURRENT_VERSION = "";
-				EXECUTABLE_EXTENSION = "";
-				EXECUTABLE_PREFIX = "";
-				EXPORTED_SYMBOLS_FILE = "";
-				FRAMEWORK_SEARCH_PATHS = "";
-				FRAMEWORK_VERSION = A;
-				GCC_ALTIVEC_EXTENSIONS = NO;
-				GCC_AUTO_VECTORIZATION = NO;
-				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
-				GCC_CW_ASM_SYNTAX = YES;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = full;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_ASM_KEYWORD = YES;
-				GCC_ENABLE_CPP_EXCEPTIONS = YES;
-				GCC_ENABLE_CPP_RTTI = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_PASCAL_STRINGS = YES;
-				GCC_ENABLE_SSE3_EXTENSIONS = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_ENABLE_TRIGRAPHS = NO;
-				GCC_FAST_MATH = NO;
-				GCC_FAST_OBJC_DISPATCH = NO;
-				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
-				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
-				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
-				GCC_INPUT_FILETYPE = automatic;
-				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
-				GCC_MODEL_PPC64 = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_NO_COMMON_BLOCKS = NO;
-				GCC_OBJC_CALL_CXX_CDTORS = NO;
-				GCC_ONE_BYTE_BOOL = NO;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				GCC_PREPROCESSOR_DEFINITIONS = "";
-				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
-				GCC_REUSE_STRINGS = YES;
-				GCC_SHORT_ENUMS = NO;
-				GCC_STRICT_ALIASING = NO;
-				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
-				GCC_THREADSAFE_STATICS = YES;
-				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
-				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
-				GCC_UNROLL_LOOPS = NO;
-				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
-				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
-				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
-				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
-				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
-				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
-				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
-				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
-				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
-				GCC_WARN_MISSING_PARENTHESES = NO;
-				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
-				GCC_WARN_PEDANTIC = NO;
-				GCC_WARN_SHADOW = NO;
-				GCC_WARN_SIGN_COMPARE = NO;
-				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
-				GCC_WARN_UNINITIALIZED_AUTOS = NO;
-				GCC_WARN_UNKNOWN_PRAGMAS = NO;
-				GCC_WARN_UNUSED_FUNCTION = NO;
-				GCC_WARN_UNUSED_LABEL = NO;
-				GCC_WARN_UNUSED_PARAMETER = NO;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				GENERATE_MASTER_OBJECT_FILE = NO;
-				GENERATE_PKGINFO_FILE = NO;
-				GENERATE_PROFILING_CODE = NO;
-				HEADER_SEARCH_PATHS = "";
-				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
-				INFOPLIST_FILE = "";
-				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
-				INFOPLIST_PREFIX_HEADER = "";
-				INFOPLIST_PREPROCESS = NO;
-				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
-				INIT_ROUTINE = "";
-				INSTALL_GROUP = "$(GROUP)";
-				INSTALL_MODE_FLAG = "a-w,a+rX";
-				INSTALL_OWNER = "$(USER)";
-				INSTALL_PATH = "$(HOME)/bin";
-				KEEP_PRIVATE_EXTERNS = NO;
-				LIBRARY_SEARCH_PATHS = "";
-				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
-				LINKER_DISPLAYS_MANGLED_NAMES = NO;
-				LINK_WITH_STANDARD_LIBRARIES = YES;
-				MACH_O_TYPE = "";
-				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
-				OBJROOT = /Users/acondit/Projects/Intermediates;
-				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
-				OTHER_CFLAGS = "";
-				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
-				PRELINK_FLAGS = "";
-				PRELINK_LIBS = "";
-				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
-				PRIVATE_HEADERS_FOLDER_PATH = "";
-				PRODUCT_NAME = treerewrite;
-				PUBLIC_HEADERS_FOLDER_PATH = "";
-				REZ_SEARCH_PATHS = "";
-				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
-				SDKROOT = macosx10.4;
-				SECTORDER_FLAGS = "";
-				SEPARATE_STRIP = NO;
-				SEPARATE_SYMBOL_EDIT = NO;
-				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
-				SKIP_INSTALL = NO;
-				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
-				STRIPFLAGS = "";
-				STRIP_INSTALLED_PRODUCT = "";
-				STRIP_STYLE = all;
-				SYMROOT = /Users/acondit/Projects/Antlr/Build;
-				TEST_HOST = "";
-				TEST_RIG = "";
-				UNEXPORTED_SYMBOLS_FILE = "";
-				USER_HEADER_SEARCH_PATHS = "";
-				VERSIONING_SYSTEM = "";
-				WARNING_CFLAGS = "";
-				WARNING_LDFLAGS = "";
-				WRAPPER_EXTENSION = "";
-				ZERO_LINK = NO;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7C562430CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = treerewrite;
-			};
-			name = "Debug with StringTemplate Debug";
-		};
-		F7CD47630C64D23800FF933A /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = treerewrite;
-			};
-			name = Debug;
-		};
-		F7CD47640C64D23800FF933A /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				PRODUCT_NAME = treerewrite;
-			};
-			name = Release;
-		};
-		F7CD477D0C64D27000FF933A /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
-				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
-				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
-				ALTERNATE_PERMISSIONS_FILES = "";
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ANTLR_DEBUG = YES;
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/treerewrite";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/treerewrite";
-				ARCHS = "$(NATIVE_ARCH)";
-				BUILD_VARIANTS = normal;
-				BUNDLE_LOADER = "";
-				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
-				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
-				COPYING_PRESERVES_HFS_DATA = NO;
-				COPY_PHASE_STRIP = NO;
-				CURRENT_PROJECT_VERSION = "";
-				DEAD_CODE_STRIPPING = NO;
-				DEBUG_INFORMATION_FORMAT = dwarf;
-				DEPLOYMENT_LOCATION = NO;
-				DEPLOYMENT_POSTPROCESSING = NO;
-				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
-				DYLIB_COMPATIBILITY_VERSION = "";
-				DYLIB_CURRENT_VERSION = "";
-				EXECUTABLE_EXTENSION = "";
-				EXECUTABLE_PREFIX = "";
-				EXPORTED_SYMBOLS_FILE = "";
-				FRAMEWORK_SEARCH_PATHS = "";
-				FRAMEWORK_VERSION = A;
-				GCC_ALTIVEC_EXTENSIONS = NO;
-				GCC_AUTO_VECTORIZATION = NO;
-				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
-				GCC_CW_ASM_SYNTAX = YES;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = full;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_ASM_KEYWORD = YES;
-				GCC_ENABLE_CPP_EXCEPTIONS = YES;
-				GCC_ENABLE_CPP_RTTI = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = supported;
-				GCC_ENABLE_PASCAL_STRINGS = YES;
-				GCC_ENABLE_SSE3_EXTENSIONS = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_ENABLE_TRIGRAPHS = NO;
-				GCC_FAST_MATH = NO;
-				GCC_FAST_OBJC_DISPATCH = NO;
-				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
-				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
-				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
-				GCC_INPUT_FILETYPE = automatic;
-				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
-				GCC_MODEL_PPC64 = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_NO_COMMON_BLOCKS = NO;
-				GCC_OBJC_CALL_CXX_CDTORS = NO;
-				GCC_ONE_BYTE_BOOL = NO;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				GCC_PREPROCESSOR_DEFINITIONS = "";
-				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
-				GCC_REUSE_STRINGS = YES;
-				GCC_SHORT_ENUMS = NO;
-				GCC_STRICT_ALIASING = NO;
-				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
-				GCC_THREADSAFE_STATICS = YES;
-				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
-				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
-				GCC_UNROLL_LOOPS = NO;
-				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
-				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
-				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
-				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
-				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
-				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
-				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
-				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
-				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
-				GCC_WARN_MISSING_PARENTHESES = NO;
-				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
-				GCC_WARN_PEDANTIC = NO;
-				GCC_WARN_SHADOW = NO;
-				GCC_WARN_SIGN_COMPARE = NO;
-				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
-				GCC_WARN_UNINITIALIZED_AUTOS = NO;
-				GCC_WARN_UNKNOWN_PRAGMAS = NO;
-				GCC_WARN_UNUSED_FUNCTION = NO;
-				GCC_WARN_UNUSED_LABEL = NO;
-				GCC_WARN_UNUSED_PARAMETER = NO;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				GENERATE_MASTER_OBJECT_FILE = NO;
-				GENERATE_PKGINFO_FILE = NO;
-				GENERATE_PROFILING_CODE = NO;
-				HEADER_SEARCH_PATHS = "";
-				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
-				INFOPLIST_FILE = "";
-				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
-				INFOPLIST_PREFIX_HEADER = "";
-				INFOPLIST_PREPROCESS = NO;
-				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
-				INIT_ROUTINE = "";
-				INSTALL_GROUP = "$(GROUP)";
-				INSTALL_MODE_FLAG = "a-w,a+rX";
-				INSTALL_OWNER = "$(USER)";
-				INSTALL_PATH = "$(HOME)/bin";
-				KEEP_PRIVATE_EXTERNS = NO;
-				LIBRARY_SEARCH_PATHS = "";
-				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
-				LINKER_DISPLAYS_MANGLED_NAMES = NO;
-				LINK_WITH_STANDARD_LIBRARIES = YES;
-				MACH_O_TYPE = "";
-				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
-				OBJROOT = Build/Intermediates;
-				ONLY_ACTIVE_ARCH = YES;
-				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
-				OTHER_CFLAGS = "";
-				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
-				PRELINK_FLAGS = "";
-				PRELINK_LIBS = "";
-				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
-				PRIVATE_HEADERS_FOLDER_PATH = "";
-				PRODUCT_NAME = treerewrite;
-				PUBLIC_HEADERS_FOLDER_PATH = "";
-				REZ_SEARCH_PATHS = "";
-				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
-				SDKROOT = macosx10.6;
-				SECTORDER_FLAGS = "";
-				SEPARATE_STRIP = NO;
-				SEPARATE_SYMBOL_EDIT = NO;
-				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
-				SKIP_INSTALL = NO;
-				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
-				STRIPFLAGS = "";
-				STRIP_INSTALLED_PRODUCT = "";
-				STRIP_STYLE = all;
-				SYMROOT = Build;
-				TEST_HOST = "";
-				TEST_RIG = "";
-				UNEXPORTED_SYMBOLS_FILE = "";
-				USER_HEADER_SEARCH_PATHS = "";
-				VERSIONING_SYSTEM = "";
-				WARNING_CFLAGS = "";
-				WARNING_LDFLAGS = "";
-				WRAPPER_EXTENSION = "";
-				ZERO_LINK = NO;
-			};
-			name = Debug;
-		};
-		F7CD477E0C64D27000FF933A /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
-				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
-				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
-				ALTERNATE_PERMISSIONS_FILES = "";
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = (
-					ppc,
-					i386,
-				);
-				BUILD_VARIANTS = normal;
-				BUNDLE_LOADER = "";
-				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
-				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
-				COPYING_PRESERVES_HFS_DATA = NO;
-				COPY_PHASE_STRIP = YES;
-				CURRENT_PROJECT_VERSION = "";
-				DEAD_CODE_STRIPPING = NO;
-				DEBUG_INFORMATION_FORMAT = stabs;
-				DEPLOYMENT_LOCATION = NO;
-				DEPLOYMENT_POSTPROCESSING = NO;
-				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
-				DYLIB_COMPATIBILITY_VERSION = "";
-				DYLIB_CURRENT_VERSION = "";
-				EXECUTABLE_EXTENSION = "";
-				EXECUTABLE_PREFIX = "";
-				EXPORTED_SYMBOLS_FILE = "";
-				FRAMEWORK_SEARCH_PATHS = "";
-				FRAMEWORK_VERSION = A;
-				GCC_ALTIVEC_EXTENSIONS = NO;
-				GCC_AUTO_VECTORIZATION = NO;
-				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
-				GCC_CW_ASM_SYNTAX = YES;
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_DEBUGGING_SYMBOLS = default;
-				GCC_DYNAMIC_NO_PIC = YES;
-				GCC_ENABLE_ASM_KEYWORD = YES;
-				GCC_ENABLE_CPP_EXCEPTIONS = YES;
-				GCC_ENABLE_CPP_RTTI = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_PASCAL_STRINGS = YES;
-				GCC_ENABLE_SSE3_EXTENSIONS = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = YES;
-				GCC_ENABLE_TRIGRAPHS = NO;
-				GCC_FAST_MATH = NO;
-				GCC_FAST_OBJC_DISPATCH = NO;
-				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
-				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
-				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
-				GCC_INPUT_FILETYPE = automatic;
-				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
-				GCC_MODEL_PPC64 = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_NO_COMMON_BLOCKS = NO;
-				GCC_OBJC_CALL_CXX_CDTORS = NO;
-				GCC_ONE_BYTE_BOOL = NO;
-				GCC_OPTIMIZATION_LEVEL = s;
-				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				GCC_PREPROCESSOR_DEFINITIONS = "";
-				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
-				GCC_REUSE_STRINGS = YES;
-				GCC_SHORT_ENUMS = NO;
-				GCC_STRICT_ALIASING = NO;
-				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
-				GCC_THREADSAFE_STATICS = YES;
-				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
-				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
-				GCC_UNROLL_LOOPS = NO;
-				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
-				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
-				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
-				GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
-				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
-				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
-				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
-				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
-				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
-				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
-				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
-				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
-				GCC_WARN_MISSING_PARENTHESES = NO;
-				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
-				GCC_WARN_PEDANTIC = NO;
-				GCC_WARN_SHADOW = YES;
-				GCC_WARN_SIGN_COMPARE = YES;
-				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
-				GCC_WARN_UNINITIALIZED_AUTOS = YES;
-				GCC_WARN_UNKNOWN_PRAGMAS = NO;
-				GCC_WARN_UNUSED_FUNCTION = NO;
-				GCC_WARN_UNUSED_LABEL = NO;
-				GCC_WARN_UNUSED_PARAMETER = YES;
-				GCC_WARN_UNUSED_VALUE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				GENERATE_MASTER_OBJECT_FILE = NO;
-				GENERATE_PKGINFO_FILE = NO;
-				GENERATE_PROFILING_CODE = NO;
-				HEADER_SEARCH_PATHS = "";
-				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
-				INFOPLIST_FILE = "";
-				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
-				INFOPLIST_PREFIX_HEADER = "";
-				INFOPLIST_PREPROCESS = NO;
-				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
-				INIT_ROUTINE = "";
-				INSTALL_GROUP = "$(GROUP)";
-				INSTALL_MODE_FLAG = "a-w,a+rX";
-				INSTALL_OWNER = "$(USER)";
-				INSTALL_PATH = "$(HOME)/bin";
-				KEEP_PRIVATE_EXTERNS = NO;
-				LIBRARY_SEARCH_PATHS = "";
-				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
-				LINKER_DISPLAYS_MANGLED_NAMES = NO;
-				LINK_WITH_STANDARD_LIBRARIES = YES;
-				MACH_O_TYPE = "";
-				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
-				OBJROOT = /Users/kroepke/Projects/Intermediates;
-				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
-				OTHER_CFLAGS = "";
-				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
-				PRELINK_FLAGS = "";
-				PRELINK_LIBS = "";
-				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
-				PRIVATE_HEADERS_FOLDER_PATH = "";
-				PRODUCT_NAME = treerewrite;
-				PUBLIC_HEADERS_FOLDER_PATH = "";
-				REZ_SEARCH_PATHS = "";
-				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
-				SDKROOT = macosx10.4;
-				SECTORDER_FLAGS = "";
-				SEPARATE_STRIP = NO;
-				SEPARATE_SYMBOL_EDIT = NO;
-				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
-				SKIP_INSTALL = NO;
-				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
-				STRIPFLAGS = "";
-				STRIP_INSTALLED_PRODUCT = "";
-				STRIP_STYLE = all;
-				SYMROOT = /Users/kroepke/Projects/Build;
-				TEST_HOST = "";
-				TEST_RIG = "";
-				UNEXPORTED_SYMBOLS_FILE = "";
-				USER_HEADER_SEARCH_PATHS = "";
-				VERSIONING_SYSTEM = "";
-				WARNING_CFLAGS = "";
-				WARNING_LDFLAGS = "";
-				WRAPPER_EXTENSION = "";
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		F7DD05F10A7B1640006A006C /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate\n";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/hoistedPredicates";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/hoistedPredicates";
-				ARCHS = "$(NATIVE_ARCH)";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = YES;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = hoistedPredicates;
-				ZERO_LINK = NO;
-			};
-			name = Debug;
-		};
-		F7DD05F20A7B1640006A006C /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ARCHS = "$(NATIVE_ARCH)";
-				COPY_PHASE_STRIP = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_ENABLE_SYMBOL_SEPARATION = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = hoistedPredicates;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-		F7DD077A0A7B6682006A006C /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
-				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/scopes";
-				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/scopes";
-				COPY_PHASE_STRIP = NO;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_FIX_AND_CONTINUE = YES;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
-				GCC_MODEL_TUNING = G5;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = scopes;
-				ZERO_LINK = NO;
-			};
-			name = Debug;
-		};
-		F7DD077B0A7B6682006A006C /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				COPY_PHASE_STRIP = YES;
-				GCC_ENABLE_FIX_AND_CONTINUE = NO;
-				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
-				GCC_MODEL_TUNING = G5;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
-				INSTALL_PATH = "$(HOME)/bin";
-				OTHER_LDFLAGS = (
-					"-framework",
-					Foundation,
-					"-framework",
-					AppKit,
-				);
-				PREBINDING = NO;
-				PRODUCT_NAME = scopes;
-				ZERO_LINK = NO;
-			};
-			name = Release;
-		};
-/* End XCBuildConfiguration section */
-
-/* Begin XCConfigurationList section */
-		1A0F343C12EA425700496BB8 /* Build configuration list for PBXLegacyTarget "Regenerate polydiff" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A0F343D12EA425700496BB8 /* Debug */,
-				1A0F343E12EA425700496BB8 /* Debug with StringTemplate Debug */,
-				1A0F343F12EA425700496BB8 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1A0F346212EA42D800496BB8 /* Build configuration list for PBXNativeTarget "polydiff" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A0F346312EA42D800496BB8 /* Debug */,
-				1A0F346412EA42D800496BB8 /* Debug with StringTemplate Debug */,
-				1A0F346512EA42D800496BB8 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1A12110011D3A5DB00F27B38 /* Build configuration list for PBXNativeTarget "ANTLRCommonTokenTest" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A1210FD11D3A5DB00F27B38 /* Debug */,
-				1A1210FE11D3A5DB00F27B38 /* Debug with StringTemplate Debug */,
-				1A1210FF11D3A5DB00F27B38 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1A1211D611D3BF4800F27B38 /* Build configuration list for PBXNativeTarget "ANTLRStringStreamTest" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A1211D311D3BF4700F27B38 /* Debug */,
-				1A1211D411D3BF4700F27B38 /* Debug with StringTemplate Debug */,
-				1A1211D511D3BF4700F27B38 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1A12122911D3C92500F27B38 /* Build configuration list for PBXNativeTarget "ANTLRFastQueueTest" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A12122611D3C92500F27B38 /* Debug */,
-				1A12122711D3C92500F27B38 /* Debug with StringTemplate Debug */,
-				1A12122811D3C92500F27B38 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1A1212E111D3F53700F27B38 /* Build configuration list for PBXNativeTarget "ANTLRIntArrayTest" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A1212DE11D3F53700F27B38 /* Debug */,
-				1A1212DF11D3F53700F27B38 /* Debug with StringTemplate Debug */,
-				1A1212E011D3F53700F27B38 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1A12131111D3F7CE00F27B38 /* Build configuration list for PBXNativeTarget "ANTLRCommonTreeTest" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A12130E11D3F7CE00F27B38 /* Debug */,
-				1A12130F11D3F7CE00F27B38 /* Debug with StringTemplate Debug */,
-				1A12131011D3F7CE00F27B38 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1A348B5311D2BEE9000C72FC /* Build configuration list for PBXNativeTarget "Test" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A348B5011D2BEE8000C72FC /* Debug */,
-				1A348B5111D2BEE8000C72FC /* Debug with StringTemplate Debug */,
-				1A348B5211D2BEE8000C72FC /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1A348BF111D2D0A2000C72FC /* Build configuration list for PBXNativeTarget "ANTLRBitsetTest" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A348BEE11D2D0A2000C72FC /* Debug */,
-				1A348BEF11D2D0A2000C72FC /* Debug with StringTemplate Debug */,
-				1A348BF011D2D0A2000C72FC /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1A77EE8E12E6A553007F323A /* Build configuration list for PBXNativeTarget "TreeRewriteRuleTokenStream" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A77EE8B12E6A552007F323A /* Debug */,
-				1A77EE8C12E6A552007F323A /* Debug with StringTemplate Debug */,
-				1A77EE8D12E6A552007F323A /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1AC5AC9412E7BE0400DF0C58 /* Build configuration list for PBXLegacyTarget "Regenerate treeparser" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1AC5AC9512E7BE0400DF0C58 /* Debug */,
-				1AC5AC9612E7BE0400DF0C58 /* Debug with StringTemplate Debug */,
-				1AC5AC9712E7BE0400DF0C58 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1AC5ACA312E7BEFE00DF0C58 /* Build configuration list for PBXNativeTarget "treeparser" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1AC5ACA412E7BEFE00DF0C58 /* Debug */,
-				1AC5ACA512E7BEFE00DF0C58 /* Debug with StringTemplate Debug */,
-				1AC5ACA612E7BEFE00DF0C58 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1DEB91AD08733DA50010E9CD /* Build configuration list for PBXNativeTarget "ANTLR" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1DEB91AE08733DA50010E9CD /* Debug */,
-				F7C562310CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				1DEB91AF08733DA50010E9CD /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		1DEB91B108733DA50010E9CD /* Build configuration list for PBXProject "ANTLR" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1DEB91B208733DA50010E9CD /* Debug */,
-				F7C562300CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				1DEB91B308733DA50010E9CD /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F700E86C0A5FA34D005D0757 /* Build configuration list for PBXNativeTarget "combined" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F700E86D0A5FA34D005D0757 /* Debug */,
-				F7C562380CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F700E86E0A5FA34D005D0757 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F700ECDC0A5FE1BF005D0757 /* Build configuration list for PBXNativeTarget "LL-star" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F700ECDD0A5FE1BF005D0757 /* Debug */,
-				F7C5623A0CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F700ECDE0A5FE1BF005D0757 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F7037EB80A05AFEF0070435D /* Build configuration list for PBXNativeTarget "lexertest-simple" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F7037EB90A05AFEF0070435D /* Debug */,
-				F7C562340CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F7037EBA0A05AFEF0070435D /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F72C5E660AB7E4C900282574 /* Build configuration list for PBXNativeTarget "simplectree" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F72C5E670AB7E4C900282574 /* Debug */,
-				F7C562400CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F72C5E680AB7E4C900282574 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F76287400B7151B9006AA7EF /* Build configuration list for PBXAggregateTarget "Regenerate all examples" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F76287410B7151B9006AA7EF /* Debug */,
-				F7C562330CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F76287420B7151B9006AA7EF /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F76287460B715201006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate fuzzy" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F76287470B715201006AA7EF /* Debug */,
-				F7C562370CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F76287480B715201006AA7EF /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F76287790B71557E006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate lexertest-simple" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F762877A0B71557E006AA7EF /* Debug */,
-				F7C562350CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F762877B0B71557E006AA7EF /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F762877F0B71559C006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate combined" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F76287800B71559C006AA7EF /* Debug */,
-				F7C562390CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F76287810B71559C006AA7EF /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F76287830B71559F006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate LL-star" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F76287840B71559F006AA7EF /* Debug */,
-				F7C5623B0CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F76287850B71559F006AA7EF /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F76287870B7155A2006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate hoistedPredicates" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F76287880B7155A2006AA7EF /* Debug */,
-				F7C5623D0CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F76287890B7155A2006AA7EF /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F762878B0B7155AB006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate scopes" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F762878C0B7155AB006AA7EF /* Debug */,
-				F7C5623F0CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F762878D0B7155AB006AA7EF /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F762878F0B7155AF006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate simplectree" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F76287900B7155AF006AA7EF /* Debug */,
-				F7C562410CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F76287910B7155AF006AA7EF /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F79D566F0A0E23D600EA3CEE /* Build configuration list for PBXNativeTarget "fuzzy" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F79D56700A0E23D600EA3CEE /* Debug */,
-				F7C562360CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F79D56710A0E23D600EA3CEE /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F7CD47620C64D23800FF933A /* Build configuration list for PBXLegacyTarget "Regenerate treerewrite" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F7CD47630C64D23800FF933A /* Debug */,
-				F7C562430CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F7CD47640C64D23800FF933A /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F7CD477C0C64D27000FF933A /* Build configuration list for PBXNativeTarget "treerewrite" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F7CD477D0C64D27000FF933A /* Debug */,
-				F7C562420CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F7CD477E0C64D27000FF933A /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F7DD05F00A7B1640006A006C /* Build configuration list for PBXNativeTarget "hoistedPredicates" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F7DD05F10A7B1640006A006C /* Debug */,
-				F7C5623C0CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F7DD05F20A7B1640006A006C /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-		F7DD07790A7B6682006A006C /* Build configuration list for PBXNativeTarget "scopes" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				F7DD077A0A7B6682006A006C /* Debug */,
-				F7C5623E0CD513D400727DB0 /* Debug with StringTemplate Debug */,
-				F7DD077B0A7B6682006A006C /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Debug;
-		};
-/* End XCConfigurationList section */
-	};
-	rootObject = 0867D690FE84028FC02AAC07 /* Project object */;
-}
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.pbxproj b/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.pbxproj
deleted file mode 100644
index 1596463..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.pbxproj
+++ /dev/null
@@ -1,3048 +0,0 @@
-// !$*UTF8*$!
-{
-	archiveVersion = 1;
-	classes = {
-	};
-	objectVersion = 46;
-	objects = {
-
-/* Begin PBXBuildFile section */
-		1A048D21134E8C1100005F57 /* antlr3.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D01134E8C1000005F57 /* antlr3.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D22134E8C1100005F57 /* ANTLRBaseMapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D02134E8C1000005F57 /* ANTLRBaseMapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D23134E8C1100005F57 /* ANTLRBaseRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D03134E8C1000005F57 /* ANTLRBaseRecognizer.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D24134E8C1100005F57 /* ANTLRBaseStack.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D04134E8C1000005F57 /* ANTLRBaseStack.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D25134E8C1100005F57 /* ANTLRBaseTree.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D05134E8C1000005F57 /* ANTLRBaseTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D26134E8C1100005F57 /* ANTLRBaseTreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D06134E8C1000005F57 /* ANTLRBaseTreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D27134E8C1100005F57 /* ANTLRBitSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D07134E8C1000005F57 /* ANTLRBitSet.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D28134E8C1100005F57 /* ANTLRBufferedTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D08134E8C1000005F57 /* ANTLRBufferedTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D29134E8C1100005F57 /* ANTLRBufferedTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D09134E8C1000005F57 /* ANTLRBufferedTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D2A134E8C1100005F57 /* ANTLRCharStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D0A134E8C1000005F57 /* ANTLRCharStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D2B134E8C1100005F57 /* ANTLRCharStreamState.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D0B134E8C1000005F57 /* ANTLRCharStreamState.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D2C134E8C1100005F57 /* ANTLRCommonErrorNode.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D0C134E8C1000005F57 /* ANTLRCommonErrorNode.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D2D134E8C1100005F57 /* ANTLRCommonToken.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D0D134E8C1000005F57 /* ANTLRCommonToken.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D2E134E8C1100005F57 /* ANTLRCommonTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D0E134E8C1000005F57 /* ANTLRCommonTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D2F134E8C1100005F57 /* ANTLRCommonTree.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D0F134E8C1000005F57 /* ANTLRCommonTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D30134E8C1100005F57 /* ANTLRCommonTreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D10134E8C1000005F57 /* ANTLRCommonTreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D31134E8C1100005F57 /* ANTLRCommonTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D11134E8C1000005F57 /* ANTLRCommonTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D33134E8C1100005F57 /* ANTLRDebug.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D13134E8C1100005F57 /* ANTLRDebug.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D34134E8C1100005F57 /* ANTLRDebugEventListener.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D14134E8C1100005F57 /* ANTLRDebugEventListener.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D35134E8C1100005F57 /* ANTLRDebugEventProxy.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D15134E8C1100005F57 /* ANTLRDebugEventProxy.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D36134E8C1100005F57 /* ANTLRDebugParser.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D16134E8C1100005F57 /* ANTLRDebugParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D37134E8C1100005F57 /* ANTLRDebugTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D17134E8C1100005F57 /* ANTLRDebugTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D38134E8C1100005F57 /* ANTLRDebugTreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D18134E8C1100005F57 /* ANTLRDebugTreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D39134E8C1100005F57 /* ANTLRDebugTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D19134E8C1100005F57 /* ANTLRDebugTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D3A134E8C1100005F57 /* ANTLRDebugTreeParser.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D1A134E8C1100005F57 /* ANTLRDebugTreeParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D3B134E8C1100005F57 /* ANTLRDFA.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D1B134E8C1100005F57 /* ANTLRDFA.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D3C134E8C1100005F57 /* ANTLRDoubleKeyMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D1C134E8C1100005F57 /* ANTLRDoubleKeyMap.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D3D134E8C1100005F57 /* ANTLREarlyExitException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D1D134E8C1100005F57 /* ANTLREarlyExitException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D3E134E8C1100005F57 /* ANTLRError.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D1E134E8C1100005F57 /* ANTLRError.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D3F134E8C1100005F57 /* ANTLRFailedPredicateException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D1F134E8C1100005F57 /* ANTLRFailedPredicateException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A048D40134E8C1100005F57 /* ANTLRFastQueue.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A048D20134E8C1100005F57 /* ANTLRFastQueue.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A63BC6E134F5DE5002EDFB4 /* FuzzyLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BC6B134F5DE4002EDFB4 /* FuzzyLexer.m */; };
-		1A63BC6F134F5DE5002EDFB4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BC6C134F5DE5002EDFB4 /* main.m */; };
-		1A63BC70134F5E43002EDFB4 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72318134E860B001C3F35 /* ANTLR.framework */; };
-		1A63BD89134F5FF3002EDFB4 /* CombinedLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BC7E134F5EB1002EDFB4 /* CombinedLexer.m */; };
-		1A63BD8A134F5FF3002EDFB4 /* CombinedParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BC80134F5EB1002EDFB4 /* CombinedParser.m */; };
-		1A63BD8B134F5FF3002EDFB4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BC81134F5EB1002EDFB4 /* main.m */; };
-		1A63BD94134F606A002EDFB4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BC9E134F5EB2002EDFB4 /* main.m */; };
-		1A63BD95134F606A002EDFB4 /* TestLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCA9134F5EB2002EDFB4 /* TestLexer.m */; };
-		1A63BDAF134F614D002EDFB4 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1A63BDB4134F6154002EDFB4 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1A63BDB9134F615A002EDFB4 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1A63BDBE134F6160002EDFB4 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1A63BDC3134F6167002EDFB4 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1A63BDC7134F61E4002EDFB4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCB0134F5EB2002EDFB4 /* main.m */; };
-		1A63BDC8134F61E8002EDFB4 /* SimpleCLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCB6134F5EB2002EDFB4 /* SimpleCLexer.m */; };
-		1A63BDC9134F61EC002EDFB4 /* SimpleCParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCB8134F5EB2002EDFB4 /* SimpleCParser.m */; };
-		1A63BDCA134F6218002EDFB4 /* Main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCBC134F5EB2002EDFB4 /* Main.m */; };
-		1A63BDCB134F6218002EDFB4 /* PolyDifferentiator.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCCB134F5EB2002EDFB4 /* PolyDifferentiator.m */; };
-		1A63BDCC134F6218002EDFB4 /* PolyLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCCD134F5EB2002EDFB4 /* PolyLexer.m */; };
-		1A63BDCD134F6218002EDFB4 /* PolyParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCCF134F5EB2002EDFB4 /* PolyParser.m */; };
-		1A63BDCE134F6218002EDFB4 /* PolyPrinter.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCD2134F5EB2002EDFB4 /* PolyPrinter.m */; };
-		1A63BDCF134F6218002EDFB4 /* Simplifier.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCD6134F5EB2002EDFB4 /* Simplifier.m */; };
-		1A63BDDD134F6258002EDFB4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCDB134F5EB2002EDFB4 /* main.m */; };
-		1A63BDDE134F6258002EDFB4 /* SymbolTableLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCE7134F5EB2002EDFB4 /* SymbolTableLexer.m */; };
-		1A63BDDF134F6258002EDFB4 /* SymbolTableParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCE9134F5EB2002EDFB4 /* SymbolTableParser.m */; };
-		1A63BDE1134F626A002EDFB4 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1A63BDE7134F62CB002EDFB4 /* SimpleCLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BD00134F5EB2002EDFB4 /* SimpleCLexer.m */; };
-		1A63BDE8134F62D0002EDFB4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BCED134F5EB2002EDFB4 /* main.m */; };
-		1A63BDEA134F62D0002EDFB4 /* SimpleCTP.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BD07134F5EB2002EDFB4 /* SimpleCTP.m */; };
-		1A63BDEB134F62D0002EDFB4 /* SimpleCWalker.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BD0B134F5EB2002EDFB4 /* SimpleCWalker.m */; };
-		1A63BDEE134F932E002EDFB4 /* ANTLRIntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AE72368134E8AB4001C3F35 /* ANTLRIntStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A63BDEF134F93A5002EDFB4 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72318134E860B001C3F35 /* ANTLR.framework */; };
-		1A63BDF0134F93AC002EDFB4 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1A63BDF1134FAB4B002EDFB4 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1A63BDF2134FAB60002EDFB4 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72318134E860B001C3F35 /* ANTLR.framework */; };
-		1A63BDF3134FAB63002EDFB4 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1A63BDF4134FAF58002EDFB4 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72318134E860B001C3F35 /* ANTLR.framework */; };
-		1A63BDF5134FB55B002EDFB4 /* TreeRewriteLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BD2B134F5EB2002EDFB4 /* TreeRewriteLexer.m */; };
-		1A63BDF6134FB55F002EDFB4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BD21134F5EB2002EDFB4 /* main.m */; };
-		1A63BDF7134FB564002EDFB4 /* TreeRewriteParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BD2D134F5EB2002EDFB4 /* TreeRewriteParser.m */; };
-		1A63BE05134FB807002EDFB4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BC94134F5EB2002EDFB4 /* main.m */; };
-		1A63BE06134FB80B002EDFB4 /* TLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BC99134F5EB2002EDFB4 /* TLexer.m */; };
-		1A63BE07134FB80E002EDFB4 /* TParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BC9B134F5EB2002EDFB4 /* TParser.m */; };
-		1A63BE08134FB814002EDFB4 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72318134E860B001C3F35 /* ANTLR.framework */; };
-		1A63BE09134FB818002EDFB4 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1A63BE0C134FB855002EDFB4 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72318134E860B001C3F35 /* ANTLR.framework */; };
-		1A6B1CD7134E8CF70016A47D /* ANTLRFileStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1C97134E8CF70016A47D /* ANTLRFileStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CD8134E8CF70016A47D /* ANTLRHashMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1C98134E8CF70016A47D /* ANTLRHashMap.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CD9134E8CF70016A47D /* ANTLRHashRule.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1C99134E8CF70016A47D /* ANTLRHashRule.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CDA134E8CF70016A47D /* ANTLRInputStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1C9A134E8CF70016A47D /* ANTLRInputStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CDB134E8CF70016A47D /* ANTLRIntArray.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1C9B134E8CF70016A47D /* ANTLRIntArray.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CDC134E8CF70016A47D /* ANTLRLexer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1C9C134E8CF70016A47D /* ANTLRLexer.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CDD134E8CF70016A47D /* ANTLRLexerRuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1C9D134E8CF70016A47D /* ANTLRLexerRuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CDE134E8CF70016A47D /* ANTLRLexerState.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1C9E134E8CF70016A47D /* ANTLRLexerState.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CDF134E8CF70016A47D /* ANTLRLinkBase.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1C9F134E8CF70016A47D /* ANTLRLinkBase.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CE0134E8CF70016A47D /* ANTLRLookaheadStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CA0134E8CF70016A47D /* ANTLRLookaheadStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CE1134E8CF70016A47D /* ANTLRMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CA1134E8CF70016A47D /* ANTLRMap.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CE2134E8CF70016A47D /* ANTLRMapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CA2134E8CF70016A47D /* ANTLRMapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CE3134E8CF70016A47D /* ANTLRMismatchedNotSetException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CA3134E8CF70016A47D /* ANTLRMismatchedNotSetException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CE4134E8CF70016A47D /* ANTLRMismatchedRangeException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CA4134E8CF70016A47D /* ANTLRMismatchedRangeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CE5134E8CF70016A47D /* ANTLRMismatchedSetException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CA5134E8CF70016A47D /* ANTLRMismatchedSetException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CE6134E8CF70016A47D /* ANTLRMismatchedTokenException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CA6134E8CF70016A47D /* ANTLRMismatchedTokenException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CE7134E8CF70016A47D /* ANTLRMismatchedTreeNodeException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CA7134E8CF70016A47D /* ANTLRMismatchedTreeNodeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CE8134E8CF70016A47D /* ANTLRMissingTokenException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CA8134E8CF70016A47D /* ANTLRMissingTokenException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CE9134E8CF70016A47D /* ANTLRNodeMapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CA9134E8CF70016A47D /* ANTLRNodeMapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CEA134E8CF70016A47D /* ANTLRNoViableAltException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CAA134E8CF70016A47D /* ANTLRNoViableAltException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CEB134E8CF70016A47D /* ANTLRParser.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CAB134E8CF70016A47D /* ANTLRParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CEC134E8CF70016A47D /* ANTLRParserRuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CAC134E8CF70016A47D /* ANTLRParserRuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CED134E8CF80016A47D /* ANTLRParseTree.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CAD134E8CF70016A47D /* ANTLRParseTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CEE134E8CF80016A47D /* ANTLRPtrBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CAE134E8CF70016A47D /* ANTLRPtrBuffer.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CEF134E8CF80016A47D /* ANTLRPtrStack.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CAF134E8CF70016A47D /* ANTLRPtrStack.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CF0134E8CF80016A47D /* ANTLRReaderStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CB0134E8CF70016A47D /* ANTLRReaderStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CF1134E8CF80016A47D /* ANTLRRecognitionException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CB1134E8CF70016A47D /* ANTLRRecognitionException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CF2134E8CF80016A47D /* ANTLRRecognizerSharedState.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CB2134E8CF70016A47D /* ANTLRRecognizerSharedState.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CF3134E8CF80016A47D /* ANTLRRewriteRuleElementStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CB3134E8CF70016A47D /* ANTLRRewriteRuleElementStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CF4134E8CF80016A47D /* ANTLRRewriteRuleNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CB4134E8CF70016A47D /* ANTLRRewriteRuleNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CF5134E8CF80016A47D /* ANTLRRewriteRuleSubtreeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CB5134E8CF70016A47D /* ANTLRRewriteRuleSubtreeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CF6134E8CF80016A47D /* ANTLRRewriteRuleTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CB6134E8CF70016A47D /* ANTLRRewriteRuleTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CF7134E8CF80016A47D /* ANTLRRuleMapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CB7134E8CF70016A47D /* ANTLRRuleMapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CF8134E8CF80016A47D /* ANTLRRuleMemo.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CB8134E8CF70016A47D /* ANTLRRuleMemo.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CF9134E8CF80016A47D /* ANTLRRuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CB9134E8CF70016A47D /* ANTLRRuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CFA134E8CF80016A47D /* ANTLRRuleStack.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CBA134E8CF70016A47D /* ANTLRRuleStack.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CFB134E8CF80016A47D /* ANTLRRuntimeException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CBB134E8CF70016A47D /* ANTLRRuntimeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CFC134E8CF80016A47D /* ANTLRStreamEnumerator.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CBC134E8CF70016A47D /* ANTLRStreamEnumerator.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CFD134E8CF80016A47D /* ANTLRStringStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CBD134E8CF70016A47D /* ANTLRStringStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CFE134E8CF80016A47D /* ANTLRStringStreamState.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CBE134E8CF70016A47D /* ANTLRStringStreamState.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1CFF134E8CF80016A47D /* ANTLRSymbolStack.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CBF134E8CF70016A47D /* ANTLRSymbolStack.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D00134E8CF80016A47D /* ANTLRToken.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CC0134E8CF70016A47D /* ANTLRToken.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D01134E8CF80016A47D /* ANTLRToken+DebuggerSupport.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CC1134E8CF70016A47D /* ANTLRToken+DebuggerSupport.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D02134E8CF80016A47D /* ANTLRTokenRewriteStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CC2134E8CF70016A47D /* ANTLRTokenRewriteStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D03134E8CF80016A47D /* ANTLRTokenSource.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CC3134E8CF70016A47D /* ANTLRTokenSource.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D04134E8CF80016A47D /* ANTLRTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CC4134E8CF70016A47D /* ANTLRTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D05134E8CF80016A47D /* ANTLRTree.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CC5134E8CF70016A47D /* ANTLRTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D06134E8CF80016A47D /* ANTLRTreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CC6134E8CF70016A47D /* ANTLRTreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D07134E8CF80016A47D /* ANTLRTreeException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CC7134E8CF70016A47D /* ANTLRTreeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D08134E8CF90016A47D /* ANTLRTreeIterator.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CC8134E8CF70016A47D /* ANTLRTreeIterator.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D09134E8CF90016A47D /* ANTLRTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CC9134E8CF70016A47D /* ANTLRTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D0A134E8CF90016A47D /* ANTLRTreeParser.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CCA134E8CF70016A47D /* ANTLRTreeParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D0B134E8CF90016A47D /* ANTLRTreePatternLexer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CCB134E8CF70016A47D /* ANTLRTreePatternLexer.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D0C134E8CF90016A47D /* ANTLRTreePatternParser.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CCC134E8CF70016A47D /* ANTLRTreePatternParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D0D134E8CF90016A47D /* ANTLRTreeRewriter.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CCD134E8CF70016A47D /* ANTLRTreeRewriter.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D0E134E8CF90016A47D /* ANTLRTreeRuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CCE134E8CF70016A47D /* ANTLRTreeRuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D0F134E8CF90016A47D /* ANTLRTreeVisitor.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CCF134E8CF70016A47D /* ANTLRTreeVisitor.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D10134E8CF90016A47D /* ANTLRTreeVisitorAction.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CD0134E8CF70016A47D /* ANTLRTreeVisitorAction.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D11134E8CF90016A47D /* ANTLRTreeWizard.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CD1134E8CF70016A47D /* ANTLRTreeWizard.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D12134E8CF90016A47D /* ANTLRUnbufferedCommonTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CD2134E8CF70016A47D /* ANTLRUnbufferedCommonTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D13134E8CF90016A47D /* ANTLRUnbufferedCommonTreeNodeStreamState.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CD3134E8CF70016A47D /* ANTLRUnbufferedCommonTreeNodeStreamState.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D14134E8CF90016A47D /* ANTLRUnbufferedTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CD4134E8CF70016A47D /* ANTLRUnbufferedTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D15134E8CF90016A47D /* ANTLRUniqueIDMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CD5134E8CF70016A47D /* ANTLRUniqueIDMap.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D16134E8CF90016A47D /* ANTLRUnwantedTokenException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6B1CD6134E8CF70016A47D /* ANTLRUnwantedTokenException.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1A6B1D64134E8DEB0016A47D /* ANTLRFastQueueTest.h in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D4A134E8DA10016A47D /* ANTLRFastQueueTest.h */; };
-		1A6B1D65134E8DEB0016A47D /* ANTLRFastQueueTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D4B134E8DA10016A47D /* ANTLRFastQueueTest.m */; };
-		1A6B1D66134E8DEB0016A47D /* ANTLRIntArrayTest.h in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D4C134E8DA10016A47D /* ANTLRIntArrayTest.h */; };
-		1A6B1D67134E8DEB0016A47D /* ANTLRIntArrayTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D4D134E8DA10016A47D /* ANTLRIntArrayTest.m */; };
-		1A6B1D68134E8DEB0016A47D /* ANTLRRecognizerTest.h in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D4F134E8DA10016A47D /* ANTLRRecognizerTest.h */; };
-		1A6B1D69134E8DEB0016A47D /* ANTLRRecognizerTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D50134E8DA10016A47D /* ANTLRRecognizerTest.m */; };
-		1A6B1D6A134E8DEB0016A47D /* ANTLRBitSetTest.h in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D52134E8DA10016A47D /* ANTLRBitSetTest.h */; };
-		1A6B1D6B134E8DEB0016A47D /* ANTLRBitSetTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D53134E8DA10016A47D /* ANTLRBitSetTest.m */; };
-		1A6B1D6C134E8DEB0016A47D /* ANTLRStringStreamTest.h in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D55134E8DA10016A47D /* ANTLRStringStreamTest.h */; };
-		1A6B1D6D134E8DEB0016A47D /* ANTLRStringStreamTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D56134E8DA10016A47D /* ANTLRStringStreamTest.m */; };
-		1A6B1D6E134E8DEB0016A47D /* TestRewriteRuleTokenStream.h in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D58134E8DA10016A47D /* TestRewriteRuleTokenStream.h */; };
-		1A6B1D6F134E8DEB0016A47D /* TestRewriteRuleTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D59134E8DA10016A47D /* TestRewriteRuleTokenStream.m */; };
-		1A6B1D70134E8DEB0016A47D /* ANTLRCommonTokenTest.h in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D5B134E8DA10016A47D /* ANTLRCommonTokenTest.h */; };
-		1A6B1D71134E8DEB0016A47D /* ANTLRCommonTokenTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D5C134E8DA10016A47D /* ANTLRCommonTokenTest.m */; };
-		1A6B1D72134E8DEB0016A47D /* ANTLRCommonErrorNodeTest.h in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D5E134E8DA10016A47D /* ANTLRCommonErrorNodeTest.h */; };
-		1A6B1D73134E8DEB0016A47D /* ANTLRCommonErrorNodeTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D5F134E8DA10016A47D /* ANTLRCommonErrorNodeTest.m */; };
-		1A6B1D74134E8DEB0016A47D /* ANTLRCommonTreeAdaptorTest.h in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D60134E8DA10016A47D /* ANTLRCommonTreeAdaptorTest.h */; };
-		1A6B1D75134E8DEC0016A47D /* ANTLRCommonTreeAdaptorTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D61134E8DA10016A47D /* ANTLRCommonTreeAdaptorTest.m */; };
-		1A6B1D76134E8DEC0016A47D /* ANTLRCommonTreeTest.h in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D62134E8DA10016A47D /* ANTLRCommonTreeTest.h */; };
-		1A6B1D77134E8DEC0016A47D /* ANTLRCommonTreeTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6B1D63134E8DA10016A47D /* ANTLRCommonTreeTest.m */; };
-		1A6B1D79134EA0970016A47D /* SenTestingKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A6B1D78134EA0970016A47D /* SenTestingKit.framework */; };
-		1A76A02F134FD4160041634F /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BD1B134F5EB2002EDFB4 /* main.m */; };
-		1A76A030134FD4A00041634F /* LangLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BD17134F5EB2002EDFB4 /* LangLexer.m */; };
-		1A76A031134FD4A40041634F /* LangParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BD19134F5EB2002EDFB4 /* LangParser.m */; };
-		1A76A032134FD4B90041634F /* LangDumpDecl.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A63BD14134F5EB2002EDFB4 /* LangDumpDecl.m */; };
-		1AAC1C3D134FD6A500B2DC68 /* ANTLR.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AAC1C3C134FD6A500B2DC68 /* ANTLR.h */; settings = {ATTRIBUTES = (Public, ); }; };
-		1AB7FE15134FBF900059474B /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72318134E860B001C3F35 /* ANTLR.framework */; };
-		1AB7FE16134FBF9F0059474B /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72318134E860B001C3F35 /* ANTLR.framework */; };
-		1AB7FE17134FBFB20059474B /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72318134E860B001C3F35 /* ANTLR.framework */; };
-		1AB7FE18134FC0800059474B /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72318134E860B001C3F35 /* ANTLR.framework */; };
-		1AE7231C134E860B001C3F35 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1AE72326134E860B001C3F35 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = 1AE72324134E860B001C3F35 /* InfoPlist.strings */; };
-		1AE7232E134E860B001C3F35 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE7231B134E860B001C3F35 /* Cocoa.framework */; };
-		1AE72331134E860B001C3F35 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72318134E860B001C3F35 /* ANTLR.framework */; };
-		1AE72337134E860B001C3F35 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = 1AE72335134E860B001C3F35 /* InfoPlist.strings */; };
-		1AE7233A134E860B001C3F35 /* ANTLRTests.h in Resources */ = {isa = PBXBuildFile; fileRef = 1AE72339134E860B001C3F35 /* ANTLRTests.h */; };
-		1AE7233C134E860B001C3F35 /* ANTLRTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7233B134E860B001C3F35 /* ANTLRTests.m */; };
-		1AE72347134E89BF001C3F35 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72345134E89BF001C3F35 /* CoreFoundation.framework */; };
-		1AE72348134E89BF001C3F35 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1AE72346134E89BF001C3F35 /* Foundation.framework */; };
-		1AE7239F134E8AB4001C3F35 /* ANTLRBaseMapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72349134E8AB4001C3F35 /* ANTLRBaseMapElement.m */; };
-		1AE723A0134E8AB4001C3F35 /* ANTLRBaseRecognizer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7234A134E8AB4001C3F35 /* ANTLRBaseRecognizer.m */; };
-		1AE723A1134E8AB4001C3F35 /* ANTLRBaseStack.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7234B134E8AB4001C3F35 /* ANTLRBaseStack.m */; };
-		1AE723A2134E8AB4001C3F35 /* ANTLRBaseTree.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7234C134E8AB4001C3F35 /* ANTLRBaseTree.m */; };
-		1AE723A3134E8AB4001C3F35 /* ANTLRBaseTreeAdaptor.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7234D134E8AB4001C3F35 /* ANTLRBaseTreeAdaptor.m */; };
-		1AE723A4134E8AB4001C3F35 /* ANTLRBitSet.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7234E134E8AB4001C3F35 /* ANTLRBitSet.m */; };
-		1AE723A5134E8AB4001C3F35 /* ANTLRBufferedTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7234F134E8AB4001C3F35 /* ANTLRBufferedTokenStream.m */; };
-		1AE723A6134E8AB4001C3F35 /* ANTLRBufferedTreeNodeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72350134E8AB4001C3F35 /* ANTLRBufferedTreeNodeStream.m */; };
-		1AE723A7134E8AB4001C3F35 /* ANTLRCharStreamState.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72351134E8AB4001C3F35 /* ANTLRCharStreamState.m */; };
-		1AE723A8134E8AB4001C3F35 /* ANTLRCommonErrorNode.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72352134E8AB4001C3F35 /* ANTLRCommonErrorNode.m */; };
-		1AE723A9134E8AB4001C3F35 /* ANTLRCommonToken.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72353134E8AB4001C3F35 /* ANTLRCommonToken.m */; };
-		1AE723AA134E8AB4001C3F35 /* ANTLRCommonTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72354134E8AB4001C3F35 /* ANTLRCommonTokenStream.m */; };
-		1AE723AB134E8AB4001C3F35 /* ANTLRCommonTree.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72355134E8AB4001C3F35 /* ANTLRCommonTree.m */; };
-		1AE723AC134E8AB4001C3F35 /* ANTLRCommonTreeAdaptor.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72356134E8AB4001C3F35 /* ANTLRCommonTreeAdaptor.m */; };
-		1AE723AD134E8AB4001C3F35 /* ANTLRCommonTreeNodeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72357134E8AB4001C3F35 /* ANTLRCommonTreeNodeStream.m */; };
-		1AE723AE134E8AB4001C3F35 /* ANTLRDebugEventProxy.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72358134E8AB4001C3F35 /* ANTLRDebugEventProxy.m */; };
-		1AE723AF134E8AB4001C3F35 /* ANTLRDebugParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72359134E8AB4001C3F35 /* ANTLRDebugParser.m */; };
-		1AE723B0134E8AB4001C3F35 /* ANTLRDebugTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7235A134E8AB4001C3F35 /* ANTLRDebugTokenStream.m */; };
-		1AE723B1134E8AB4001C3F35 /* ANTLRDebugTreeAdaptor.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7235B134E8AB4001C3F35 /* ANTLRDebugTreeAdaptor.m */; };
-		1AE723B2134E8AB4001C3F35 /* ANTLRDebugTreeNodeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7235C134E8AB4001C3F35 /* ANTLRDebugTreeNodeStream.m */; };
-		1AE723B3134E8AB4001C3F35 /* ANTLRDebugTreeParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7235D134E8AB4001C3F35 /* ANTLRDebugTreeParser.m */; };
-		1AE723B4134E8AB4001C3F35 /* ANTLRDFA.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7235E134E8AB4001C3F35 /* ANTLRDFA.m */; };
-		1AE723B5134E8AB4001C3F35 /* ANTLRDoubleKeyMap.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7235F134E8AB4001C3F35 /* ANTLRDoubleKeyMap.m */; };
-		1AE723B6134E8AB4001C3F35 /* ANTLREarlyExitException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72360134E8AB4001C3F35 /* ANTLREarlyExitException.m */; };
-		1AE723B7134E8AB4001C3F35 /* ANTLRFailedPredicateException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72361134E8AB4001C3F35 /* ANTLRFailedPredicateException.m */; };
-		1AE723B8134E8AB4001C3F35 /* ANTLRFastQueue.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72362134E8AB4001C3F35 /* ANTLRFastQueue.m */; };
-		1AE723B9134E8AB4001C3F35 /* ANTLRFileStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72363134E8AB4001C3F35 /* ANTLRFileStream.m */; };
-		1AE723BA134E8AB5001C3F35 /* ANTLRHashMap.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72364134E8AB4001C3F35 /* ANTLRHashMap.m */; };
-		1AE723BB134E8AB5001C3F35 /* ANTLRHashRule.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72365134E8AB4001C3F35 /* ANTLRHashRule.m */; };
-		1AE723BC134E8AB5001C3F35 /* ANTLRInputStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72366134E8AB4001C3F35 /* ANTLRInputStream.m */; };
-		1AE723BD134E8AB5001C3F35 /* ANTLRIntArray.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72367134E8AB4001C3F35 /* ANTLRIntArray.m */; };
-		1AE723BF134E8AB5001C3F35 /* ANTLRLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72369134E8AB4001C3F35 /* ANTLRLexer.m */; };
-		1AE723C0134E8AB5001C3F35 /* ANTLRLexerRuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7236A134E8AB4001C3F35 /* ANTLRLexerRuleReturnScope.m */; };
-		1AE723C1134E8AB5001C3F35 /* ANTLRLexerState.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7236B134E8AB4001C3F35 /* ANTLRLexerState.m */; };
-		1AE723C2134E8AB5001C3F35 /* ANTLRLinkBase.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7236C134E8AB4001C3F35 /* ANTLRLinkBase.m */; };
-		1AE723C3134E8AB5001C3F35 /* ANTLRLookaheadStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7236D134E8AB4001C3F35 /* ANTLRLookaheadStream.m */; };
-		1AE723C4134E8AB5001C3F35 /* ANTLRMap.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7236E134E8AB4001C3F35 /* ANTLRMap.m */; };
-		1AE723C5134E8AB5001C3F35 /* ANTLRMapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7236F134E8AB4001C3F35 /* ANTLRMapElement.m */; };
-		1AE723C6134E8AB5001C3F35 /* ANTLRMismatchedNotSetException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72370134E8AB4001C3F35 /* ANTLRMismatchedNotSetException.m */; };
-		1AE723C7134E8AB5001C3F35 /* ANTLRMismatchedRangeException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72371134E8AB4001C3F35 /* ANTLRMismatchedRangeException.m */; };
-		1AE723C8134E8AB5001C3F35 /* ANTLRMismatchedSetException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72372134E8AB4001C3F35 /* ANTLRMismatchedSetException.m */; };
-		1AE723C9134E8AB5001C3F35 /* ANTLRMismatchedTokenException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72373134E8AB4001C3F35 /* ANTLRMismatchedTokenException.m */; };
-		1AE723CA134E8AB5001C3F35 /* ANTLRMismatchedTreeNodeException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72374134E8AB4001C3F35 /* ANTLRMismatchedTreeNodeException.m */; };
-		1AE723CB134E8AB5001C3F35 /* ANTLRMissingTokenException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72375134E8AB4001C3F35 /* ANTLRMissingTokenException.m */; };
-		1AE723CC134E8AB5001C3F35 /* ANTLRNodeMapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72376134E8AB4001C3F35 /* ANTLRNodeMapElement.m */; };
-		1AE723CD134E8AB5001C3F35 /* ANTLRNoViableAltException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72377134E8AB4001C3F35 /* ANTLRNoViableAltException.m */; };
-		1AE723CE134E8AB5001C3F35 /* ANTLRParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72378134E8AB4001C3F35 /* ANTLRParser.m */; };
-		1AE723CF134E8AB5001C3F35 /* ANTLRParserRuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72379134E8AB4001C3F35 /* ANTLRParserRuleReturnScope.m */; };
-		1AE723D0134E8AB5001C3F35 /* ANTLRParseTree.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7237A134E8AB4001C3F35 /* ANTLRParseTree.m */; };
-		1AE723D1134E8AB5001C3F35 /* ANTLRPtrBuffer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7237B134E8AB4001C3F35 /* ANTLRPtrBuffer.m */; };
-		1AE723D2134E8AB5001C3F35 /* ANTLRPtrStack.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7237C134E8AB4001C3F35 /* ANTLRPtrStack.m */; };
-		1AE723D3134E8AB5001C3F35 /* ANTLRReaderStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7237D134E8AB4001C3F35 /* ANTLRReaderStream.m */; };
-		1AE723D4134E8AB5001C3F35 /* ANTLRRecognitionException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7237E134E8AB4001C3F35 /* ANTLRRecognitionException.m */; };
-		1AE723D5134E8AB5001C3F35 /* ANTLRRecognizerSharedState.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7237F134E8AB4001C3F35 /* ANTLRRecognizerSharedState.m */; };
-		1AE723D6134E8AB5001C3F35 /* ANTLRRewriteRuleElementStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72380134E8AB4001C3F35 /* ANTLRRewriteRuleElementStream.m */; };
-		1AE723D7134E8AB5001C3F35 /* ANTLRRewriteRuleNodeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72381134E8AB4001C3F35 /* ANTLRRewriteRuleNodeStream.m */; };
-		1AE723D8134E8AB5001C3F35 /* ANTLRRewriteRuleSubtreeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72382134E8AB4001C3F35 /* ANTLRRewriteRuleSubtreeStream.m */; };
-		1AE723D9134E8AB5001C3F35 /* ANTLRRewriteRuleTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72383134E8AB4001C3F35 /* ANTLRRewriteRuleTokenStream.m */; };
-		1AE723DA134E8AB5001C3F35 /* ANTLRRuleMapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72384134E8AB4001C3F35 /* ANTLRRuleMapElement.m */; };
-		1AE723DB134E8AB5001C3F35 /* ANTLRRuleMemo.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72385134E8AB4001C3F35 /* ANTLRRuleMemo.m */; };
-		1AE723DC134E8AB5001C3F35 /* ANTLRRuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72386134E8AB4001C3F35 /* ANTLRRuleReturnScope.m */; };
-		1AE723DD134E8AB5001C3F35 /* ANTLRRuleStack.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72387134E8AB4001C3F35 /* ANTLRRuleStack.m */; };
-		1AE723DE134E8AB5001C3F35 /* ANTLRRuntimeException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72388134E8AB4001C3F35 /* ANTLRRuntimeException.m */; };
-		1AE723DF134E8AB5001C3F35 /* ANTLRStreamEnumerator.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72389134E8AB4001C3F35 /* ANTLRStreamEnumerator.m */; };
-		1AE723E0134E8AB5001C3F35 /* ANTLRStringStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7238A134E8AB4001C3F35 /* ANTLRStringStream.m */; };
-		1AE723E1134E8AB5001C3F35 /* ANTLRSymbolStack.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7238B134E8AB4001C3F35 /* ANTLRSymbolStack.m */; };
-		1AE723E2134E8AB5001C3F35 /* ANTLRToken+DebuggerSupport.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7238C134E8AB4001C3F35 /* ANTLRToken+DebuggerSupport.m */; };
-		1AE723E3134E8AB6001C3F35 /* ANTLRTokenRewriteStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7238D134E8AB4001C3F35 /* ANTLRTokenRewriteStream.m */; };
-		1AE723E5134E8AB6001C3F35 /* ANTLRTreeAdaptor.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7238F134E8AB4001C3F35 /* ANTLRTreeAdaptor.m */; };
-		1AE723E6134E8AB6001C3F35 /* ANTLRTreeException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72390134E8AB4001C3F35 /* ANTLRTreeException.m */; };
-		1AE723E7134E8AB6001C3F35 /* ANTLRTreeIterator.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72391134E8AB4001C3F35 /* ANTLRTreeIterator.m */; };
-		1AE723E8134E8AB6001C3F35 /* ANTLRTreeParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72392134E8AB4001C3F35 /* ANTLRTreeParser.m */; };
-		1AE723E9134E8AB6001C3F35 /* ANTLRTreePatternLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72393134E8AB4001C3F35 /* ANTLRTreePatternLexer.m */; };
-		1AE723EA134E8AB6001C3F35 /* ANTLRTreePatternParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72394134E8AB4001C3F35 /* ANTLRTreePatternParser.m */; };
-		1AE723EB134E8AB6001C3F35 /* ANTLRTreeRewriter.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72395134E8AB4001C3F35 /* ANTLRTreeRewriter.m */; };
-		1AE723EC134E8AB6001C3F35 /* ANTLRTreeRuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72396134E8AB4001C3F35 /* ANTLRTreeRuleReturnScope.m */; };
-		1AE723ED134E8AB6001C3F35 /* ANTLRTreeVisitor.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72397134E8AB4001C3F35 /* ANTLRTreeVisitor.m */; };
-		1AE723EE134E8AB6001C3F35 /* ANTLRTreeVisitorAction.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72398134E8AB4001C3F35 /* ANTLRTreeVisitorAction.m */; };
-		1AE723EF134E8AB6001C3F35 /* ANTLRTreeWizard.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE72399134E8AB4001C3F35 /* ANTLRTreeWizard.m */; };
-		1AE723F0134E8AB6001C3F35 /* ANTLRUnbufferedCommonTreeNodeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7239A134E8AB4001C3F35 /* ANTLRUnbufferedCommonTreeNodeStream.m */; };
-		1AE723F1134E8AB6001C3F35 /* ANTLRUnbufferedCommonTreeNodeStreamState.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7239B134E8AB4001C3F35 /* ANTLRUnbufferedCommonTreeNodeStreamState.m */; };
-		1AE723F2134E8AB6001C3F35 /* ANTLRUnbufferedTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7239C134E8AB4001C3F35 /* ANTLRUnbufferedTokenStream.m */; };
-		1AE723F3134E8AB6001C3F35 /* ANTLRUniqueIDMap.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7239D134E8AB4001C3F35 /* ANTLRUniqueIDMap.m */; };
-		1AE723F4134E8AB6001C3F35 /* ANTLRUnwantedTokenException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE7239E134E8AB4001C3F35 /* ANTLRUnwantedTokenException.m */; };
-/* End PBXBuildFile section */
-
-/* Begin PBXBuildRule section */
-		1A63BDEC134F649F002EDFB4 /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			name = .g.m;
-			outputFiles = (
-				$1Lexer.h,
-				$1Lexer.m,
-				$1Parser.h,
-				$1Parser.m,
-			);
-			script = "-jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
-		};
-		1A63BDED134F6810002EDFB4 /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			outputFiles = (
-				$1Lexer.h,
-				$1Lexer.m,
-				$1Parser.h,
-				$1Parser.m,
-			);
-			script = "-jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
-		};
-		1A76A02C134FC7540041634F /* PBXBuildRule */ = {
-			isa = PBXBuildRule;
-			compilerSpec = com.apple.compilers.proxy.script;
-			fileType = pattern.proxy;
-			isEditable = 1;
-			name = "Files '.g.m' using Script";
-			outputFiles = (
-				treeRewriteLexer.h,
-				treeRewriteLexer.m,
-				treeRewriteParser.h,
-				treeRewriteParser.m,
-			);
-			script = "-jar /Library/Java/Extensions/antlr-3.3.1.jar treeRewrite.g";
-		};
-/* End PBXBuildRule section */
-
-/* Begin PBXContainerItemProxy section */
-		1A63BD9E134F6093002EDFB4 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 1AE7230E134E860A001C3F35 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AE72317134E860B001C3F35;
-			remoteInfo = ANTLR;
-		};
-		1A63BDA0134F609B002EDFB4 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 1AE7230E134E860A001C3F35 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AE72317134E860B001C3F35;
-			remoteInfo = ANTLR;
-		};
-		1A63BDA2134F60A7002EDFB4 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 1AE7230E134E860A001C3F35 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AE72317134E860B001C3F35;
-			remoteInfo = ANTLR;
-		};
-		1A63BDA4134F60B0002EDFB4 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 1AE7230E134E860A001C3F35 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AE72317134E860B001C3F35;
-			remoteInfo = ANTLR;
-		};
-		1A63BDA6134F60BC002EDFB4 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 1AE7230E134E860A001C3F35 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AE72317134E860B001C3F35;
-			remoteInfo = ANTLR;
-		};
-		1A63BDA8134F60C3002EDFB4 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 1AE7230E134E860A001C3F35 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AE72317134E860B001C3F35;
-			remoteInfo = ANTLR;
-		};
-		1A63BDAA134F60CC002EDFB4 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 1AE7230E134E860A001C3F35 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AE72317134E860B001C3F35;
-			remoteInfo = ANTLR;
-		};
-		1A63BDAC134F60D2002EDFB4 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 1AE7230E134E860A001C3F35 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AE72317134E860B001C3F35;
-			remoteInfo = ANTLR;
-		};
-		1A63BDE5134F629B002EDFB4 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 1AE7230E134E860A001C3F35 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AE72317134E860B001C3F35;
-			remoteInfo = ANTLR;
-		};
-		1A63BE0A134FB824002EDFB4 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 1AE7230E134E860A001C3F35 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AE72317134E860B001C3F35;
-			remoteInfo = ANTLR;
-		};
-		1AE7232F134E860B001C3F35 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 1AE7230E134E860A001C3F35 /* Project object */;
-			proxyType = 1;
-			remoteGlobalIDString = 1AE72317134E860B001C3F35;
-			remoteInfo = ANTLR;
-		};
-/* End PBXContainerItemProxy section */
-
-/* Begin PBXCopyFilesBuildPhase section */
-		1A63BC5F134F5DAB002EDFB4 /* CopyFiles */ = {
-			isa = PBXCopyFilesBuildPhase;
-			buildActionMask = 2147483647;
-			dstPath = /usr/share/man/man1/;
-			dstSubfolderSpec = 0;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 1;
-		};
-		1A63BD30134F5F1E002EDFB4 /* CopyFiles */ = {
-			isa = PBXCopyFilesBuildPhase;
-			buildActionMask = 2147483647;
-			dstPath = /usr/share/man/man1/;
-			dstSubfolderSpec = 0;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 1;
-		};
-		1A63BD3D134F5F36002EDFB4 /* CopyFiles */ = {
-			isa = PBXCopyFilesBuildPhase;
-			buildActionMask = 2147483647;
-			dstPath = /usr/share/man/man1/;
-			dstSubfolderSpec = 0;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 1;
-		};
-		1A63BD4A134F5F43002EDFB4 /* CopyFiles */ = {
-			isa = PBXCopyFilesBuildPhase;
-			buildActionMask = 2147483647;
-			dstPath = /usr/share/man/man1/;
-			dstSubfolderSpec = 0;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 1;
-		};
-		1A63BD57134F5F4D002EDFB4 /* CopyFiles */ = {
-			isa = PBXCopyFilesBuildPhase;
-			buildActionMask = 2147483647;
-			dstPath = /usr/share/man/man1/;
-			dstSubfolderSpec = 0;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 1;
-		};
-		1A63BD64134F5F5E002EDFB4 /* CopyFiles */ = {
-			isa = PBXCopyFilesBuildPhase;
-			buildActionMask = 2147483647;
-			dstPath = /usr/share/man/man1/;
-			dstSubfolderSpec = 0;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 1;
-		};
-		1A63BD71134F5F67002EDFB4 /* CopyFiles */ = {
-			isa = PBXCopyFilesBuildPhase;
-			buildActionMask = 2147483647;
-			dstPath = /usr/share/man/man1/;
-			dstSubfolderSpec = 0;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 1;
-		};
-		1A63BD7E134F5F71002EDFB4 /* Copy Files */ = {
-			isa = PBXCopyFilesBuildPhase;
-			buildActionMask = 2147483647;
-			dstPath = /usr/share/man/man1/;
-			dstSubfolderSpec = 0;
-			files = (
-			);
-			name = "Copy Files";
-			runOnlyForDeploymentPostprocessing = 1;
-		};
-		1A63BDD2134F6233002EDFB4 /* CopyFiles */ = {
-			isa = PBXCopyFilesBuildPhase;
-			buildActionMask = 2147483647;
-			dstPath = /usr/share/man/man1/;
-			dstSubfolderSpec = 0;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 1;
-		};
-		1A63BDFA134FB75E002EDFB4 /* CopyFiles */ = {
-			isa = PBXCopyFilesBuildPhase;
-			buildActionMask = 2147483647;
-			dstPath = /usr/share/man/man1/;
-			dstSubfolderSpec = 0;
-			files = (
-			);
-			runOnlyForDeploymentPostprocessing = 1;
-		};
-/* End PBXCopyFilesBuildPhase section */
-
-/* Begin PBXFileReference section */
-		1A048D01134E8C1000005F57 /* antlr3.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = antlr3.h; path = ../antlr3.h; sourceTree = "<group>"; };
-		1A048D02134E8C1000005F57 /* ANTLRBaseMapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRBaseMapElement.h; path = ../ANTLRBaseMapElement.h; sourceTree = "<group>"; };
-		1A048D03134E8C1000005F57 /* ANTLRBaseRecognizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRBaseRecognizer.h; path = ../ANTLRBaseRecognizer.h; sourceTree = "<group>"; };
-		1A048D04134E8C1000005F57 /* ANTLRBaseStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRBaseStack.h; path = ../ANTLRBaseStack.h; sourceTree = "<group>"; };
-		1A048D05134E8C1000005F57 /* ANTLRBaseTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRBaseTree.h; path = ../ANTLRBaseTree.h; sourceTree = "<group>"; };
-		1A048D06134E8C1000005F57 /* ANTLRBaseTreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRBaseTreeAdaptor.h; path = ../ANTLRBaseTreeAdaptor.h; sourceTree = "<group>"; };
-		1A048D07134E8C1000005F57 /* ANTLRBitSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRBitSet.h; path = ../ANTLRBitSet.h; sourceTree = "<group>"; };
-		1A048D08134E8C1000005F57 /* ANTLRBufferedTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRBufferedTokenStream.h; path = ../ANTLRBufferedTokenStream.h; sourceTree = "<group>"; };
-		1A048D09134E8C1000005F57 /* ANTLRBufferedTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRBufferedTreeNodeStream.h; path = ../ANTLRBufferedTreeNodeStream.h; sourceTree = "<group>"; };
-		1A048D0A134E8C1000005F57 /* ANTLRCharStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRCharStream.h; path = ../ANTLRCharStream.h; sourceTree = "<group>"; };
-		1A048D0B134E8C1000005F57 /* ANTLRCharStreamState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRCharStreamState.h; path = ../ANTLRCharStreamState.h; sourceTree = "<group>"; };
-		1A048D0C134E8C1000005F57 /* ANTLRCommonErrorNode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRCommonErrorNode.h; path = ../ANTLRCommonErrorNode.h; sourceTree = "<group>"; };
-		1A048D0D134E8C1000005F57 /* ANTLRCommonToken.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRCommonToken.h; path = ../ANTLRCommonToken.h; sourceTree = "<group>"; };
-		1A048D0E134E8C1000005F57 /* ANTLRCommonTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRCommonTokenStream.h; path = ../ANTLRCommonTokenStream.h; sourceTree = "<group>"; };
-		1A048D0F134E8C1000005F57 /* ANTLRCommonTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRCommonTree.h; path = ../ANTLRCommonTree.h; sourceTree = "<group>"; };
-		1A048D10134E8C1000005F57 /* ANTLRCommonTreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRCommonTreeAdaptor.h; path = ../ANTLRCommonTreeAdaptor.h; sourceTree = "<group>"; };
-		1A048D11134E8C1000005F57 /* ANTLRCommonTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRCommonTreeNodeStream.h; path = ../ANTLRCommonTreeNodeStream.h; sourceTree = "<group>"; };
-		1A048D12134E8C1100005F57 /* ANTLRCommonTreeTest-Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; name = "ANTLRCommonTreeTest-Info.plist"; path = "../ANTLRCommonTreeTest-Info.plist"; sourceTree = "<group>"; };
-		1A048D13134E8C1100005F57 /* ANTLRDebug.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRDebug.h; path = ../ANTLRDebug.h; sourceTree = "<group>"; };
-		1A048D14134E8C1100005F57 /* ANTLRDebugEventListener.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRDebugEventListener.h; path = ../ANTLRDebugEventListener.h; sourceTree = "<group>"; };
-		1A048D15134E8C1100005F57 /* ANTLRDebugEventProxy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRDebugEventProxy.h; path = ../ANTLRDebugEventProxy.h; sourceTree = "<group>"; };
-		1A048D16134E8C1100005F57 /* ANTLRDebugParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRDebugParser.h; path = ../ANTLRDebugParser.h; sourceTree = "<group>"; };
-		1A048D17134E8C1100005F57 /* ANTLRDebugTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRDebugTokenStream.h; path = ../ANTLRDebugTokenStream.h; sourceTree = "<group>"; };
-		1A048D18134E8C1100005F57 /* ANTLRDebugTreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRDebugTreeAdaptor.h; path = ../ANTLRDebugTreeAdaptor.h; sourceTree = "<group>"; };
-		1A048D19134E8C1100005F57 /* ANTLRDebugTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRDebugTreeNodeStream.h; path = ../ANTLRDebugTreeNodeStream.h; sourceTree = "<group>"; };
-		1A048D1A134E8C1100005F57 /* ANTLRDebugTreeParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRDebugTreeParser.h; path = ../ANTLRDebugTreeParser.h; sourceTree = "<group>"; };
-		1A048D1B134E8C1100005F57 /* ANTLRDFA.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRDFA.h; path = ../ANTLRDFA.h; sourceTree = "<group>"; };
-		1A048D1C134E8C1100005F57 /* ANTLRDoubleKeyMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRDoubleKeyMap.h; path = ../ANTLRDoubleKeyMap.h; sourceTree = "<group>"; };
-		1A048D1D134E8C1100005F57 /* ANTLREarlyExitException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLREarlyExitException.h; path = ../ANTLREarlyExitException.h; sourceTree = "<group>"; };
-		1A048D1E134E8C1100005F57 /* ANTLRError.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRError.h; path = ../ANTLRError.h; sourceTree = "<group>"; };
-		1A048D1F134E8C1100005F57 /* ANTLRFailedPredicateException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRFailedPredicateException.h; path = ../ANTLRFailedPredicateException.h; sourceTree = "<group>"; };
-		1A048D20134E8C1100005F57 /* ANTLRFastQueue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRFastQueue.h; path = ../ANTLRFastQueue.h; sourceTree = "<group>"; };
-		1A63BC61134F5DAB002EDFB4 /* Fuzzy */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = Fuzzy; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A63BC6B134F5DE4002EDFB4 /* FuzzyLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = FuzzyLexer.m; sourceTree = "<group>"; };
-		1A63BC6C134F5DE5002EDFB4 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		1A63BC79134F5EB1002EDFB4 /* antlr3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = antlr3.h; sourceTree = "<group>"; };
-		1A63BC7A134F5EB1002EDFB4 /* Combined.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = Combined.g; sourceTree = "<group>"; };
-		1A63BC7B134F5EB1002EDFB4 /* Combined.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = Combined.tokens; sourceTree = "<group>"; };
-		1A63BC7C134F5EB1002EDFB4 /* Combined__.gl */ = {isa = PBXFileReference; lastKnownFileType = text; path = Combined__.gl; sourceTree = "<group>"; };
-		1A63BC7D134F5EB1002EDFB4 /* CombinedLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = CombinedLexer.h; sourceTree = "<group>"; };
-		1A63BC7E134F5EB1002EDFB4 /* CombinedLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = CombinedLexer.m; sourceTree = "<group>"; };
-		1A63BC7F134F5EB1002EDFB4 /* CombinedParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = CombinedParser.h; sourceTree = "<group>"; };
-		1A63BC80134F5EB1002EDFB4 /* CombinedParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = CombinedParser.m; sourceTree = "<group>"; };
-		1A63BC81134F5EB1002EDFB4 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		1A63BC83134F5EB1002EDFB4 /* Combined.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = Combined.tokens; sourceTree = "<group>"; };
-		1A63BC85134F5EB1002EDFB4 /* antlr3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = antlr3.h; sourceTree = "<group>"; };
-		1A63BC86134F5EB1002EDFB4 /* Fuzzy.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = Fuzzy.g; sourceTree = "<group>"; };
-		1A63BC88134F5EB2002EDFB4 /* Fuzzy.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = Fuzzy.tokens; sourceTree = "<group>"; };
-		1A63BC89134F5EB2002EDFB4 /* FuzzyLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = FuzzyLexer.h; sourceTree = "<group>"; };
-		1A63BC8B134F5EB2002EDFB4 /* input */ = {isa = PBXFileReference; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		1A63BC8E134F5EB2002EDFB4 /* Fuzzy.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = Fuzzy.m; sourceTree = "<group>"; };
-		1A63BC8F134F5EB2002EDFB4 /* Fuzzy.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = Fuzzy.tokens; sourceTree = "<group>"; };
-		1A63BC90134F5EB2002EDFB4 /* FuzzyLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = FuzzyLexer.h; sourceTree = "<group>"; };
-		1A63BC92134F5EB2002EDFB4 /* antlr3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = antlr3.h; sourceTree = "<group>"; };
-		1A63BC93134F5EB2002EDFB4 /* input */ = {isa = PBXFileReference; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		1A63BC94134F5EB2002EDFB4 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		1A63BC95134F5EB2002EDFB4 /* output */ = {isa = PBXFileReference; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
-		1A63BC96134F5EB2002EDFB4 /* T.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = T.g; sourceTree = "<group>"; };
-		1A63BC97134F5EB2002EDFB4 /* T.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = T.tokens; sourceTree = "<group>"; };
-		1A63BC98134F5EB2002EDFB4 /* TLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TLexer.h; sourceTree = "<group>"; };
-		1A63BC99134F5EB2002EDFB4 /* TLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = TLexer.m; sourceTree = "<group>"; };
-		1A63BC9A134F5EB2002EDFB4 /* TParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TParser.h; sourceTree = "<group>"; };
-		1A63BC9B134F5EB2002EDFB4 /* TParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = TParser.m; sourceTree = "<group>"; };
-		1A63BC9D134F5EB2002EDFB4 /* antlr3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = antlr3.h; sourceTree = "<group>"; };
-		1A63BC9E134F5EB2002EDFB4 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		1A63BCA0134F5EB2002EDFB4 /* Test.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = Test.tokens; sourceTree = "<group>"; };
-		1A63BCA1134F5EB2002EDFB4 /* TestLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TestLexer.h; sourceTree = "<group>"; };
-		1A63BCA2134F5EB2002EDFB4 /* Testlexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = Testlexer.m; sourceTree = "<group>"; };
-		1A63BCA3134F5EB2002EDFB4 /* TestLexer.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = TestLexer.tokens; sourceTree = "<group>"; };
-		1A63BCA4134F5EB2002EDFB4 /* TestLexerLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TestLexerLexer.h; sourceTree = "<group>"; };
-		1A63BCA5134F5EB2002EDFB4 /* Test.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = Test.tokens; sourceTree = "<group>"; };
-		1A63BCA6134F5EB2002EDFB4 /* TestLexer.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = TestLexer.g; sourceTree = "<group>"; };
-		1A63BCA7134F5EB2002EDFB4 /* TestLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TestLexer.h; sourceTree = "<group>"; };
-		1A63BCA8134F5EB2002EDFB4 /* TestLexer.h.old */ = {isa = PBXFileReference; lastKnownFileType = text; path = TestLexer.h.old; sourceTree = "<group>"; };
-		1A63BCA9134F5EB2002EDFB4 /* TestLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = TestLexer.m; sourceTree = "<group>"; };
-		1A63BCAA134F5EB2002EDFB4 /* TestLexer.m.old */ = {isa = PBXFileReference; lastKnownFileType = text; path = TestLexer.m.old; sourceTree = "<group>"; };
-		1A63BCAB134F5EB2002EDFB4 /* TestLexer.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = TestLexer.tokens; sourceTree = "<group>"; };
-		1A63BCAC134F5EB2002EDFB4 /* TestLexerLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TestLexerLexer.h; sourceTree = "<group>"; };
-		1A63BCAE134F5EB2002EDFB4 /* antlr3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = antlr3.h; sourceTree = "<group>"; };
-		1A63BCAF134F5EB2002EDFB4 /* input */ = {isa = PBXFileReference; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		1A63BCB0134F5EB2002EDFB4 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		1A63BCB1134F5EB2002EDFB4 /* output */ = {isa = PBXFileReference; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
-		1A63BCB2134F5EB2002EDFB4 /* SimpleC.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleC.g; sourceTree = "<group>"; };
-		1A63BCB3134F5EB2002EDFB4 /* SimpleC.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleC.tokens; sourceTree = "<group>"; };
-		1A63BCB4134F5EB2002EDFB4 /* SimpleC__.gl */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleC__.gl; sourceTree = "<group>"; };
-		1A63BCB5134F5EB2002EDFB4 /* SimpleCLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SimpleCLexer.h; sourceTree = "<group>"; };
-		1A63BCB6134F5EB2002EDFB4 /* SimpleCLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SimpleCLexer.m; sourceTree = "<group>"; };
-		1A63BCB7134F5EB2002EDFB4 /* SimpleCParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SimpleCParser.h; sourceTree = "<group>"; };
-		1A63BCB8134F5EB2002EDFB4 /* SimpleCParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SimpleCParser.m; sourceTree = "<group>"; };
-		1A63BCBA134F5EB2002EDFB4 /* files */ = {isa = PBXFileReference; lastKnownFileType = text; path = files; sourceTree = "<group>"; };
-		1A63BCBB134F5EB2002EDFB4 /* input */ = {isa = PBXFileReference; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		1A63BCBC134F5EB2002EDFB4 /* Main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = Main.m; sourceTree = "<group>"; };
-		1A63BCBD134F5EB2002EDFB4 /* output */ = {isa = PBXFileReference; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
-		1A63BCBF134F5EB2002EDFB4 /* Poly.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = Poly.tokens; sourceTree = "<group>"; };
-		1A63BCC0134F5EB2002EDFB4 /* PolyDifferentiator.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = PolyDifferentiator.m; sourceTree = "<group>"; };
-		1A63BCC1134F5EB2002EDFB4 /* PolyLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = PolyLexer.h; sourceTree = "<group>"; };
-		1A63BCC2134F5EB2002EDFB4 /* PolyLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = PolyLexer.m; sourceTree = "<group>"; };
-		1A63BCC3134F5EB2002EDFB4 /* PolyParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = PolyParser.h; sourceTree = "<group>"; };
-		1A63BCC4134F5EB2002EDFB4 /* PolyParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = PolyParser.m; sourceTree = "<group>"; };
-		1A63BCC5134F5EB2002EDFB4 /* Simplifier.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = Simplifier.h; sourceTree = "<group>"; };
-		1A63BCC6134F5EB2002EDFB4 /* Simplifier.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = Simplifier.m; sourceTree = "<group>"; };
-		1A63BCC7134F5EB2002EDFB4 /* Simplifier.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = Simplifier.tokens; sourceTree = "<group>"; };
-		1A63BCC8134F5EB2002EDFB4 /* Poly.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = Poly.g; sourceTree = "<group>"; };
-		1A63BCC9134F5EB2002EDFB4 /* Poly.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = Poly.tokens; sourceTree = "<group>"; };
-		1A63BCCA134F5EB2002EDFB4 /* PolyDifferentiator.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = PolyDifferentiator.g; sourceTree = "<group>"; };
-		1A63BCCB134F5EB2002EDFB4 /* PolyDifferentiator.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = PolyDifferentiator.m; sourceTree = "<group>"; };
-		1A63BCCC134F5EB2002EDFB4 /* PolyLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = PolyLexer.h; sourceTree = "<group>"; };
-		1A63BCCD134F5EB2002EDFB4 /* PolyLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = PolyLexer.m; sourceTree = "<group>"; };
-		1A63BCCE134F5EB2002EDFB4 /* PolyParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = PolyParser.h; sourceTree = "<group>"; };
-		1A63BCCF134F5EB2002EDFB4 /* PolyParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = PolyParser.m; sourceTree = "<group>"; };
-		1A63BCD0134F5EB2002EDFB4 /* PolyPrinter.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = PolyPrinter.g; sourceTree = "<group>"; };
-		1A63BCD1134F5EB2002EDFB4 /* PolyPrinter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = PolyPrinter.h; sourceTree = "<group>"; };
-		1A63BCD2134F5EB2002EDFB4 /* PolyPrinter.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = PolyPrinter.m; sourceTree = "<group>"; };
-		1A63BCD3134F5EB2002EDFB4 /* PolyPrinter.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = PolyPrinter.tokens; sourceTree = "<group>"; };
-		1A63BCD4134F5EB2002EDFB4 /* Simplifier.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = Simplifier.g; sourceTree = "<group>"; };
-		1A63BCD5134F5EB2002EDFB4 /* Simplifier.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = Simplifier.h; sourceTree = "<group>"; };
-		1A63BCD6134F5EB2002EDFB4 /* Simplifier.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = Simplifier.m; sourceTree = "<group>"; };
-		1A63BCD7134F5EB2002EDFB4 /* Simplifier.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = Simplifier.tokens; sourceTree = "<group>"; };
-		1A63BCD9134F5EB2002EDFB4 /* antlr3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = antlr3.h; sourceTree = "<group>"; };
-		1A63BCDA134F5EB2002EDFB4 /* input */ = {isa = PBXFileReference; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		1A63BCDB134F5EB2002EDFB4 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		1A63BCDC134F5EB2002EDFB4 /* output */ = {isa = PBXFileReference; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
-		1A63BCDE134F5EB2002EDFB4 /* SymbolTable.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = SymbolTable.tokens; sourceTree = "<group>"; };
-		1A63BCDF134F5EB2002EDFB4 /* SymbolTableLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SymbolTableLexer.h; sourceTree = "<group>"; };
-		1A63BCE0134F5EB2002EDFB4 /* SymbolTableLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SymbolTableLexer.m; sourceTree = "<group>"; };
-		1A63BCE1134F5EB2002EDFB4 /* SymbolTableParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SymbolTableParser.h; sourceTree = "<group>"; };
-		1A63BCE2134F5EB2002EDFB4 /* SymbolTableParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SymbolTableParser.m; sourceTree = "<group>"; };
-		1A63BCE3134F5EB2002EDFB4 /* SymbolTable.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = SymbolTable.g; sourceTree = "<group>"; };
-		1A63BCE4134F5EB2002EDFB4 /* SymbolTable.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = SymbolTable.tokens; sourceTree = "<group>"; };
-		1A63BCE5134F5EB2002EDFB4 /* SymbolTable__.gl */ = {isa = PBXFileReference; lastKnownFileType = text; path = SymbolTable__.gl; sourceTree = "<group>"; };
-		1A63BCE6134F5EB2002EDFB4 /* SymbolTableLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SymbolTableLexer.h; sourceTree = "<group>"; };
-		1A63BCE7134F5EB2002EDFB4 /* SymbolTableLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SymbolTableLexer.m; sourceTree = "<group>"; };
-		1A63BCE8134F5EB2002EDFB4 /* SymbolTableParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SymbolTableParser.h; sourceTree = "<group>"; };
-		1A63BCE9134F5EB2002EDFB4 /* SymbolTableParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SymbolTableParser.m; sourceTree = "<group>"; };
-		1A63BCEB134F5EB2002EDFB4 /* antlr3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = antlr3.h; sourceTree = "<group>"; };
-		1A63BCEC134F5EB2002EDFB4 /* input */ = {isa = PBXFileReference; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		1A63BCED134F5EB2002EDFB4 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		1A63BCEE134F5EB2002EDFB4 /* output */ = {isa = PBXFileReference; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
-		1A63BCF0134F5EB2002EDFB4 /* SimpleC.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleC.tokens; sourceTree = "<group>"; };
-		1A63BCF1134F5EB2002EDFB4 /* SimpleCLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SimpleCLexer.h; sourceTree = "<group>"; };
-		1A63BCF2134F5EB2002EDFB4 /* SimpleCLexer.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = SimpleCLexer.java; sourceTree = "<group>"; };
-		1A63BCF3134F5EB2002EDFB4 /* SimpleCLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SimpleCLexer.m; sourceTree = "<group>"; };
-		1A63BCF4134F5EB2002EDFB4 /* SimpleCParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SimpleCParser.h; sourceTree = "<group>"; };
-		1A63BCF5134F5EB2002EDFB4 /* SimpleCParser.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = SimpleCParser.java; sourceTree = "<group>"; };
-		1A63BCF6134F5EB2002EDFB4 /* SimpleCParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SimpleCParser.m; sourceTree = "<group>"; };
-		1A63BCF7134F5EB2002EDFB4 /* SimpleCTP.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SimpleCTP.h; sourceTree = "<group>"; };
-		1A63BCF8134F5EB2002EDFB4 /* SimpleCTP.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = SimpleCTP.java; sourceTree = "<group>"; };
-		1A63BCF9134F5EB2002EDFB4 /* SimpleCTP.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SimpleCTP.m; sourceTree = "<group>"; };
-		1A63BCFA134F5EB2002EDFB4 /* SimpleCTP.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleCTP.tokens; sourceTree = "<group>"; };
-		1A63BCFB134F5EB2002EDFB4 /* SimpleC.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleC.g; sourceTree = "<group>"; };
-		1A63BCFC134F5EB2002EDFB4 /* SimpleC.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleC.tokens; sourceTree = "<group>"; };
-		1A63BCFD134F5EB2002EDFB4 /* SimpleC__.gl */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleC__.gl; sourceTree = "<group>"; };
-		1A63BCFE134F5EB2002EDFB4 /* SimpleCLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SimpleCLexer.h; sourceTree = "<group>"; };
-		1A63BCFF134F5EB2002EDFB4 /* SimpleCLexer.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = SimpleCLexer.java; sourceTree = "<group>"; };
-		1A63BD00134F5EB2002EDFB4 /* SimpleCLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SimpleCLexer.m; sourceTree = "<group>"; };
-		1A63BD01134F5EB2002EDFB4 /* SimpleCParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SimpleCParser.h; sourceTree = "<group>"; };
-		1A63BD02134F5EB2002EDFB4 /* SimpleCParser.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = SimpleCParser.java; sourceTree = "<group>"; };
-		1A63BD03134F5EB2002EDFB4 /* SimpleCParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SimpleCParser.m; sourceTree = "<group>"; };
-		1A63BD04134F5EB2002EDFB4 /* SimpleCTP.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleCTP.g; sourceTree = "<group>"; };
-		1A63BD05134F5EB2002EDFB4 /* SimpleCTP.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SimpleCTP.h; sourceTree = "<group>"; };
-		1A63BD06134F5EB2002EDFB4 /* SimpleCTP.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = SimpleCTP.java; sourceTree = "<group>"; };
-		1A63BD07134F5EB2002EDFB4 /* SimpleCTP.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SimpleCTP.m; sourceTree = "<group>"; };
-		1A63BD08134F5EB2002EDFB4 /* SimpleCTP.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleCTP.tokens; sourceTree = "<group>"; };
-		1A63BD09134F5EB2002EDFB4 /* SimpleCWalker.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleCWalker.g; sourceTree = "<group>"; };
-		1A63BD0A134F5EB2002EDFB4 /* SimpleCWalker.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = SimpleCWalker.h; sourceTree = "<group>"; };
-		1A63BD0B134F5EB2002EDFB4 /* SimpleCWalker.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SimpleCWalker.m; sourceTree = "<group>"; };
-		1A63BD0C134F5EB2002EDFB4 /* SimpleCWalker.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = SimpleCWalker.tokens; sourceTree = "<group>"; };
-		1A63BD0E134F5EB2002EDFB4 /* files */ = {isa = PBXFileReference; lastKnownFileType = text; path = files; sourceTree = "<group>"; };
-		1A63BD0F134F5EB2002EDFB4 /* input */ = {isa = PBXFileReference; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
-		1A63BD10134F5EB2002EDFB4 /* Lang.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = Lang.g; sourceTree = "<group>"; };
-		1A63BD11134F5EB2002EDFB4 /* Lang.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = Lang.tokens; sourceTree = "<group>"; };
-		1A63BD12134F5EB2002EDFB4 /* LangDumpDecl.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = LangDumpDecl.g; sourceTree = "<group>"; };
-		1A63BD13134F5EB2002EDFB4 /* LangDumpDecl.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = LangDumpDecl.h; sourceTree = "<group>"; };
-		1A63BD14134F5EB2002EDFB4 /* LangDumpDecl.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = LangDumpDecl.m; sourceTree = "<group>"; };
-		1A63BD15134F5EB2002EDFB4 /* LangDumpDecl.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = LangDumpDecl.tokens; sourceTree = "<group>"; };
-		1A63BD16134F5EB2002EDFB4 /* LangLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = LangLexer.h; sourceTree = "<group>"; };
-		1A63BD17134F5EB2002EDFB4 /* LangLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = LangLexer.m; sourceTree = "<group>"; };
-		1A63BD18134F5EB2002EDFB4 /* LangParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = LangParser.h; sourceTree = "<group>"; };
-		1A63BD19134F5EB2002EDFB4 /* LangParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = LangParser.m; sourceTree = "<group>"; };
-		1A63BD1A134F5EB2002EDFB4 /* Main.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = Main.java; sourceTree = "<group>"; };
-		1A63BD1B134F5EB2002EDFB4 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		1A63BD1C134F5EB2002EDFB4 /* output */ = {isa = PBXFileReference; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
-		1A63BD1E134F5EB2002EDFB4 /* README.txt */ = {isa = PBXFileReference; lastKnownFileType = text; path = README.txt; sourceTree = "<group>"; };
-		1A63BD20134F5EB2002EDFB4 /* antlr3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = antlr3.h; sourceTree = "<group>"; };
-		1A63BD21134F5EB2002EDFB4 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
-		1A63BD23134F5EB2002EDFB4 /* TreeRewrite.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = TreeRewrite.tokens; sourceTree = "<group>"; };
-		1A63BD24134F5EB2002EDFB4 /* TreeRewriteLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TreeRewriteLexer.h; sourceTree = "<group>"; };
-		1A63BD25134F5EB2002EDFB4 /* TreeRewriteLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = TreeRewriteLexer.m; sourceTree = "<group>"; };
-		1A63BD26134F5EB2002EDFB4 /* TreeRewriteParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TreeRewriteParser.h; sourceTree = "<group>"; };
-		1A63BD27134F5EB2002EDFB4 /* TreeRewriteParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = TreeRewriteParser.m; sourceTree = "<group>"; };
-		1A63BD28134F5EB2002EDFB4 /* TreeRewrite.g */ = {isa = PBXFileReference; lastKnownFileType = text; path = TreeRewrite.g; sourceTree = "<group>"; };
-		1A63BD29134F5EB2002EDFB4 /* TreeRewrite.tokens */ = {isa = PBXFileReference; lastKnownFileType = text; path = TreeRewrite.tokens; sourceTree = "<group>"; };
-		1A63BD2A134F5EB2002EDFB4 /* TreeRewriteLexer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TreeRewriteLexer.h; sourceTree = "<group>"; };
-		1A63BD2B134F5EB2002EDFB4 /* TreeRewriteLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = TreeRewriteLexer.m; sourceTree = "<group>"; };
-		1A63BD2C134F5EB2002EDFB4 /* TreeRewriteParser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TreeRewriteParser.h; sourceTree = "<group>"; };
-		1A63BD2D134F5EB2002EDFB4 /* TreeRewriteParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = TreeRewriteParser.m; sourceTree = "<group>"; };
-		1A63BD32134F5F1E002EDFB4 /* combined */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = combined; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A63BD3F134F5F36002EDFB4 /* lexertest-simple */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "lexertest-simple"; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A63BD4C134F5F43002EDFB4 /* LL-start */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "LL-start"; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A63BD59134F5F4D002EDFB4 /* polydiff */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = polydiff; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A63BD66134F5F5E002EDFB4 /* simplecTreeParser */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = simplecTreeParser; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A63BD73134F5F67002EDFB4 /* treeparser */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = treeparser; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A63BD80134F5F71002EDFB4 /* treerewrite */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = treerewrite; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A63BDD4134F6233002EDFB4 /* scopes */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = scopes; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A63BDFC134FB75E002EDFB4 /* hoistedPredicates */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = hoistedPredicates; sourceTree = BUILT_PRODUCTS_DIR; };
-		1A6B1C97134E8CF70016A47D /* ANTLRFileStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRFileStream.h; path = ../ANTLRFileStream.h; sourceTree = "<group>"; };
-		1A6B1C98134E8CF70016A47D /* ANTLRHashMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRHashMap.h; path = ../ANTLRHashMap.h; sourceTree = "<group>"; };
-		1A6B1C99134E8CF70016A47D /* ANTLRHashRule.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRHashRule.h; path = ../ANTLRHashRule.h; sourceTree = "<group>"; };
-		1A6B1C9A134E8CF70016A47D /* ANTLRInputStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRInputStream.h; path = ../ANTLRInputStream.h; sourceTree = "<group>"; };
-		1A6B1C9B134E8CF70016A47D /* ANTLRIntArray.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRIntArray.h; path = ../ANTLRIntArray.h; sourceTree = "<group>"; };
-		1A6B1C9C134E8CF70016A47D /* ANTLRLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRLexer.h; path = ../ANTLRLexer.h; sourceTree = "<group>"; };
-		1A6B1C9D134E8CF70016A47D /* ANTLRLexerRuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRLexerRuleReturnScope.h; path = ../ANTLRLexerRuleReturnScope.h; sourceTree = "<group>"; };
-		1A6B1C9E134E8CF70016A47D /* ANTLRLexerState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRLexerState.h; path = ../ANTLRLexerState.h; sourceTree = "<group>"; };
-		1A6B1C9F134E8CF70016A47D /* ANTLRLinkBase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRLinkBase.h; path = ../ANTLRLinkBase.h; sourceTree = "<group>"; };
-		1A6B1CA0134E8CF70016A47D /* ANTLRLookaheadStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRLookaheadStream.h; path = ../ANTLRLookaheadStream.h; sourceTree = "<group>"; };
-		1A6B1CA1134E8CF70016A47D /* ANTLRMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRMap.h; path = ../ANTLRMap.h; sourceTree = "<group>"; };
-		1A6B1CA2134E8CF70016A47D /* ANTLRMapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRMapElement.h; path = ../ANTLRMapElement.h; sourceTree = "<group>"; };
-		1A6B1CA3134E8CF70016A47D /* ANTLRMismatchedNotSetException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRMismatchedNotSetException.h; path = ../ANTLRMismatchedNotSetException.h; sourceTree = "<group>"; };
-		1A6B1CA4134E8CF70016A47D /* ANTLRMismatchedRangeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRMismatchedRangeException.h; path = ../ANTLRMismatchedRangeException.h; sourceTree = "<group>"; };
-		1A6B1CA5134E8CF70016A47D /* ANTLRMismatchedSetException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRMismatchedSetException.h; path = ../ANTLRMismatchedSetException.h; sourceTree = "<group>"; };
-		1A6B1CA6134E8CF70016A47D /* ANTLRMismatchedTokenException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRMismatchedTokenException.h; path = ../ANTLRMismatchedTokenException.h; sourceTree = "<group>"; };
-		1A6B1CA7134E8CF70016A47D /* ANTLRMismatchedTreeNodeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRMismatchedTreeNodeException.h; path = ../ANTLRMismatchedTreeNodeException.h; sourceTree = "<group>"; };
-		1A6B1CA8134E8CF70016A47D /* ANTLRMissingTokenException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRMissingTokenException.h; path = ../ANTLRMissingTokenException.h; sourceTree = "<group>"; };
-		1A6B1CA9134E8CF70016A47D /* ANTLRNodeMapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRNodeMapElement.h; path = ../ANTLRNodeMapElement.h; sourceTree = "<group>"; };
-		1A6B1CAA134E8CF70016A47D /* ANTLRNoViableAltException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRNoViableAltException.h; path = ../ANTLRNoViableAltException.h; sourceTree = "<group>"; };
-		1A6B1CAB134E8CF70016A47D /* ANTLRParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRParser.h; path = ../ANTLRParser.h; sourceTree = "<group>"; };
-		1A6B1CAC134E8CF70016A47D /* ANTLRParserRuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRParserRuleReturnScope.h; path = ../ANTLRParserRuleReturnScope.h; sourceTree = "<group>"; };
-		1A6B1CAD134E8CF70016A47D /* ANTLRParseTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRParseTree.h; path = ../ANTLRParseTree.h; sourceTree = "<group>"; };
-		1A6B1CAE134E8CF70016A47D /* ANTLRPtrBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRPtrBuffer.h; path = ../ANTLRPtrBuffer.h; sourceTree = "<group>"; };
-		1A6B1CAF134E8CF70016A47D /* ANTLRPtrStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRPtrStack.h; path = ../ANTLRPtrStack.h; sourceTree = "<group>"; };
-		1A6B1CB0134E8CF70016A47D /* ANTLRReaderStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRReaderStream.h; path = ../ANTLRReaderStream.h; sourceTree = "<group>"; };
-		1A6B1CB1134E8CF70016A47D /* ANTLRRecognitionException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRRecognitionException.h; path = ../ANTLRRecognitionException.h; sourceTree = "<group>"; };
-		1A6B1CB2134E8CF70016A47D /* ANTLRRecognizerSharedState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRRecognizerSharedState.h; path = ../ANTLRRecognizerSharedState.h; sourceTree = "<group>"; };
-		1A6B1CB3134E8CF70016A47D /* ANTLRRewriteRuleElementStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRRewriteRuleElementStream.h; path = ../ANTLRRewriteRuleElementStream.h; sourceTree = "<group>"; };
-		1A6B1CB4134E8CF70016A47D /* ANTLRRewriteRuleNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRRewriteRuleNodeStream.h; path = ../ANTLRRewriteRuleNodeStream.h; sourceTree = "<group>"; };
-		1A6B1CB5134E8CF70016A47D /* ANTLRRewriteRuleSubtreeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRRewriteRuleSubtreeStream.h; path = ../ANTLRRewriteRuleSubtreeStream.h; sourceTree = "<group>"; };
-		1A6B1CB6134E8CF70016A47D /* ANTLRRewriteRuleTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRRewriteRuleTokenStream.h; path = ../ANTLRRewriteRuleTokenStream.h; sourceTree = "<group>"; };
-		1A6B1CB7134E8CF70016A47D /* ANTLRRuleMapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRRuleMapElement.h; path = ../ANTLRRuleMapElement.h; sourceTree = "<group>"; };
-		1A6B1CB8134E8CF70016A47D /* ANTLRRuleMemo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRRuleMemo.h; path = ../ANTLRRuleMemo.h; sourceTree = "<group>"; };
-		1A6B1CB9134E8CF70016A47D /* ANTLRRuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRRuleReturnScope.h; path = ../ANTLRRuleReturnScope.h; sourceTree = "<group>"; };
-		1A6B1CBA134E8CF70016A47D /* ANTLRRuleStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRRuleStack.h; path = ../ANTLRRuleStack.h; sourceTree = "<group>"; };
-		1A6B1CBB134E8CF70016A47D /* ANTLRRuntimeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRRuntimeException.h; path = ../ANTLRRuntimeException.h; sourceTree = "<group>"; };
-		1A6B1CBC134E8CF70016A47D /* ANTLRStreamEnumerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRStreamEnumerator.h; path = ../ANTLRStreamEnumerator.h; sourceTree = "<group>"; };
-		1A6B1CBD134E8CF70016A47D /* ANTLRStringStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRStringStream.h; path = ../ANTLRStringStream.h; sourceTree = "<group>"; };
-		1A6B1CBE134E8CF70016A47D /* ANTLRStringStreamState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRStringStreamState.h; path = ../ANTLRStringStreamState.h; sourceTree = "<group>"; };
-		1A6B1CBF134E8CF70016A47D /* ANTLRSymbolStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRSymbolStack.h; path = ../ANTLRSymbolStack.h; sourceTree = "<group>"; };
-		1A6B1CC0134E8CF70016A47D /* ANTLRToken.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRToken.h; path = ../ANTLRToken.h; sourceTree = "<group>"; };
-		1A6B1CC1134E8CF70016A47D /* ANTLRToken+DebuggerSupport.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ANTLRToken+DebuggerSupport.h"; path = "../ANTLRToken+DebuggerSupport.h"; sourceTree = "<group>"; };
-		1A6B1CC2134E8CF70016A47D /* ANTLRTokenRewriteStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTokenRewriteStream.h; path = ../ANTLRTokenRewriteStream.h; sourceTree = "<group>"; };
-		1A6B1CC3134E8CF70016A47D /* ANTLRTokenSource.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTokenSource.h; path = ../ANTLRTokenSource.h; sourceTree = "<group>"; };
-		1A6B1CC4134E8CF70016A47D /* ANTLRTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTokenStream.h; path = ../ANTLRTokenStream.h; sourceTree = "<group>"; };
-		1A6B1CC5134E8CF70016A47D /* ANTLRTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTree.h; path = ../ANTLRTree.h; sourceTree = "<group>"; };
-		1A6B1CC6134E8CF70016A47D /* ANTLRTreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreeAdaptor.h; path = ../ANTLRTreeAdaptor.h; sourceTree = "<group>"; };
-		1A6B1CC7134E8CF70016A47D /* ANTLRTreeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreeException.h; path = ../ANTLRTreeException.h; sourceTree = "<group>"; };
-		1A6B1CC8134E8CF70016A47D /* ANTLRTreeIterator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreeIterator.h; path = ../ANTLRTreeIterator.h; sourceTree = "<group>"; };
-		1A6B1CC9134E8CF70016A47D /* ANTLRTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreeNodeStream.h; path = ../ANTLRTreeNodeStream.h; sourceTree = "<group>"; };
-		1A6B1CCA134E8CF70016A47D /* ANTLRTreeParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreeParser.h; path = ../ANTLRTreeParser.h; sourceTree = "<group>"; };
-		1A6B1CCB134E8CF70016A47D /* ANTLRTreePatternLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreePatternLexer.h; path = ../ANTLRTreePatternLexer.h; sourceTree = "<group>"; };
-		1A6B1CCC134E8CF70016A47D /* ANTLRTreePatternParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreePatternParser.h; path = ../ANTLRTreePatternParser.h; sourceTree = "<group>"; };
-		1A6B1CCD134E8CF70016A47D /* ANTLRTreeRewriter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreeRewriter.h; path = ../ANTLRTreeRewriter.h; sourceTree = "<group>"; };
-		1A6B1CCE134E8CF70016A47D /* ANTLRTreeRuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreeRuleReturnScope.h; path = ../ANTLRTreeRuleReturnScope.h; sourceTree = "<group>"; };
-		1A6B1CCF134E8CF70016A47D /* ANTLRTreeVisitor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreeVisitor.h; path = ../ANTLRTreeVisitor.h; sourceTree = "<group>"; };
-		1A6B1CD0134E8CF70016A47D /* ANTLRTreeVisitorAction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreeVisitorAction.h; path = ../ANTLRTreeVisitorAction.h; sourceTree = "<group>"; };
-		1A6B1CD1134E8CF70016A47D /* ANTLRTreeWizard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRTreeWizard.h; path = ../ANTLRTreeWizard.h; sourceTree = "<group>"; };
-		1A6B1CD2134E8CF70016A47D /* ANTLRUnbufferedCommonTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRUnbufferedCommonTreeNodeStream.h; path = ../ANTLRUnbufferedCommonTreeNodeStream.h; sourceTree = "<group>"; };
-		1A6B1CD3134E8CF70016A47D /* ANTLRUnbufferedCommonTreeNodeStreamState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRUnbufferedCommonTreeNodeStreamState.h; path = ../ANTLRUnbufferedCommonTreeNodeStreamState.h; sourceTree = "<group>"; };
-		1A6B1CD4134E8CF70016A47D /* ANTLRUnbufferedTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRUnbufferedTokenStream.h; path = ../ANTLRUnbufferedTokenStream.h; sourceTree = "<group>"; };
-		1A6B1CD5134E8CF70016A47D /* ANTLRUniqueIDMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRUniqueIDMap.h; path = ../ANTLRUniqueIDMap.h; sourceTree = "<group>"; };
-		1A6B1CD6134E8CF70016A47D /* ANTLRUnwantedTokenException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLRUnwantedTokenException.h; path = ../ANTLRUnwantedTokenException.h; sourceTree = "<group>"; };
-		1A6B1D1F134E8DA10016A47D /* BaseTest.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = BaseTest.java; sourceTree = "<group>"; };
-		1A6B1D20134E8DA10016A47D /* DebugTestAutoAST.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = DebugTestAutoAST.java; sourceTree = "<group>"; };
-		1A6B1D21134E8DA10016A47D /* DebugTestCompositeGrammars.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = DebugTestCompositeGrammars.java; sourceTree = "<group>"; };
-		1A6B1D22134E8DA10016A47D /* DebugTestRewriteAST.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = DebugTestRewriteAST.java; sourceTree = "<group>"; };
-		1A6B1D23134E8DA10016A47D /* ErrorQueue.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = ErrorQueue.java; sourceTree = "<group>"; };
-		1A6B1D24134E8DA10016A47D /* TestASTConstruction.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestASTConstruction.java; sourceTree = "<group>"; };
-		1A6B1D25134E8DA10016A47D /* TestAttributes.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestAttributes.java; sourceTree = "<group>"; };
-		1A6B1D26134E8DA10016A47D /* TestAutoAST.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestAutoAST.java; sourceTree = "<group>"; };
-		1A6B1D27134E8DA10016A47D /* TestBufferedTreeNodeStream.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestBufferedTreeNodeStream.java; sourceTree = "<group>"; };
-		1A6B1D28134E8DA10016A47D /* TestCharDFAConversion.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestCharDFAConversion.java; sourceTree = "<group>"; };
-		1A6B1D29134E8DA10016A47D /* TestCommonTokenStream.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestCommonTokenStream.java; sourceTree = "<group>"; };
-		1A6B1D2A134E8DA10016A47D /* TestCompositeGrammars.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestCompositeGrammars.java; sourceTree = "<group>"; };
-		1A6B1D2B134E8DA10016A47D /* TestDFAConversion.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestDFAConversion.java; sourceTree = "<group>"; };
-		1A6B1D2C134E8DA10016A47D /* TestDFAMatching.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestDFAMatching.java; sourceTree = "<group>"; };
-		1A6B1D2D134E8DA10016A47D /* TestFastQueue.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestFastQueue.java; sourceTree = "<group>"; };
-		1A6B1D2E134E8DA10016A47D /* TestHeteroAST.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestHeteroAST.java; sourceTree = "<group>"; };
-		1A6B1D2F134E8DA10016A47D /* TestInterpretedLexing.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestInterpretedLexing.java; sourceTree = "<group>"; };
-		1A6B1D30134E8DA10016A47D /* TestInterpretedParsing.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestInterpretedParsing.java; sourceTree = "<group>"; };
-		1A6B1D31134E8DA10016A47D /* TestIntervalSet.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestIntervalSet.java; sourceTree = "<group>"; };
-		1A6B1D32134E8DA10016A47D /* TestJavaCodeGeneration.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestJavaCodeGeneration.java; sourceTree = "<group>"; };
-		1A6B1D33134E8DA10016A47D /* TestLeftRecursion.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestLeftRecursion.java; sourceTree = "<group>"; };
-		1A6B1D34134E8DA10016A47D /* TestLexer.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestLexer.java; sourceTree = "<group>"; };
-		1A6B1D35134E8DA10016A47D /* TestMessages.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestMessages.java; sourceTree = "<group>"; };
-		1A6B1D36134E8DA10016A47D /* TestNFAConstruction.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestNFAConstruction.java; sourceTree = "<group>"; };
-		1A6B1D37134E8DA10016A47D /* TestRewriteAST.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestRewriteAST.java; sourceTree = "<group>"; };
-		1A6B1D38134E8DA10016A47D /* TestRewriteTemplates.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestRewriteTemplates.java; sourceTree = "<group>"; };
-		1A6B1D39134E8DA10016A47D /* TestSemanticPredicateEvaluation.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestSemanticPredicateEvaluation.java; sourceTree = "<group>"; };
-		1A6B1D3A134E8DA10016A47D /* TestSemanticPredicates.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestSemanticPredicates.java; sourceTree = "<group>"; };
-		1A6B1D3B134E8DA10016A47D /* TestSets.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestSets.java; sourceTree = "<group>"; };
-		1A6B1D3C134E8DA10016A47D /* TestSymbolDefinitions.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestSymbolDefinitions.java; sourceTree = "<group>"; };
-		1A6B1D3D134E8DA10016A47D /* TestSyntacticPredicateEvaluation.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestSyntacticPredicateEvaluation.java; sourceTree = "<group>"; };
-		1A6B1D3E134E8DA10016A47D /* TestSyntaxErrors.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestSyntaxErrors.java; sourceTree = "<group>"; };
-		1A6B1D3F134E8DA10016A47D /* TestTemplates.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestTemplates.java; sourceTree = "<group>"; };
-		1A6B1D40134E8DA10016A47D /* TestTokenRewriteStream.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestTokenRewriteStream.java; sourceTree = "<group>"; };
-		1A6B1D41134E8DA10016A47D /* TestTopologicalSort.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestTopologicalSort.java; sourceTree = "<group>"; };
-		1A6B1D42134E8DA10016A47D /* TestTreeGrammarRewriteAST.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestTreeGrammarRewriteAST.java; sourceTree = "<group>"; };
-		1A6B1D43134E8DA10016A47D /* TestTreeIterator.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestTreeIterator.java; sourceTree = "<group>"; };
-		1A6B1D44134E8DA10016A47D /* TestTreeNodeStream.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestTreeNodeStream.java; sourceTree = "<group>"; };
-		1A6B1D45134E8DA10016A47D /* TestTreeParsing.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestTreeParsing.java; sourceTree = "<group>"; };
-		1A6B1D46134E8DA10016A47D /* TestTrees.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestTrees.java; sourceTree = "<group>"; };
-		1A6B1D47134E8DA10016A47D /* TestTreeWizard.java */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.java; path = TestTreeWizard.java; sourceTree = "<group>"; };
-		1A6B1D4A134E8DA10016A47D /* ANTLRFastQueueTest.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ANTLRFastQueueTest.h; sourceTree = "<group>"; };
-		1A6B1D4B134E8DA10016A47D /* ANTLRFastQueueTest.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ANTLRFastQueueTest.m; sourceTree = "<group>"; };
-		1A6B1D4C134E8DA10016A47D /* ANTLRIntArrayTest.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ANTLRIntArrayTest.h; sourceTree = "<group>"; };
-		1A6B1D4D134E8DA10016A47D /* ANTLRIntArrayTest.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ANTLRIntArrayTest.m; sourceTree = "<group>"; };
-		1A6B1D4F134E8DA10016A47D /* ANTLRRecognizerTest.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ANTLRRecognizerTest.h; sourceTree = "<group>"; };
-		1A6B1D50134E8DA10016A47D /* ANTLRRecognizerTest.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ANTLRRecognizerTest.m; sourceTree = "<group>"; };
-		1A6B1D52134E8DA10016A47D /* ANTLRBitSetTest.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ANTLRBitSetTest.h; sourceTree = "<group>"; };
-		1A6B1D53134E8DA10016A47D /* ANTLRBitSetTest.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ANTLRBitSetTest.m; sourceTree = "<group>"; };
-		1A6B1D55134E8DA10016A47D /* ANTLRStringStreamTest.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ANTLRStringStreamTest.h; sourceTree = "<group>"; };
-		1A6B1D56134E8DA10016A47D /* ANTLRStringStreamTest.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ANTLRStringStreamTest.m; sourceTree = "<group>"; };
-		1A6B1D58134E8DA10016A47D /* TestRewriteRuleTokenStream.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TestRewriteRuleTokenStream.h; sourceTree = "<group>"; };
-		1A6B1D59134E8DA10016A47D /* TestRewriteRuleTokenStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = TestRewriteRuleTokenStream.m; sourceTree = "<group>"; };
-		1A6B1D5B134E8DA10016A47D /* ANTLRCommonTokenTest.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonTokenTest.h; sourceTree = "<group>"; };
-		1A6B1D5C134E8DA10016A47D /* ANTLRCommonTokenTest.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonTokenTest.m; sourceTree = "<group>"; };
-		1A6B1D5E134E8DA10016A47D /* ANTLRCommonErrorNodeTest.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonErrorNodeTest.h; sourceTree = "<group>"; };
-		1A6B1D5F134E8DA10016A47D /* ANTLRCommonErrorNodeTest.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonErrorNodeTest.m; sourceTree = "<group>"; };
-		1A6B1D60134E8DA10016A47D /* ANTLRCommonTreeAdaptorTest.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonTreeAdaptorTest.h; sourceTree = "<group>"; };
-		1A6B1D61134E8DA10016A47D /* ANTLRCommonTreeAdaptorTest.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonTreeAdaptorTest.m; sourceTree = "<group>"; };
-		1A6B1D62134E8DA10016A47D /* ANTLRCommonTreeTest.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ANTLRCommonTreeTest.h; sourceTree = "<group>"; };
-		1A6B1D63134E8DA10016A47D /* ANTLRCommonTreeTest.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ANTLRCommonTreeTest.m; sourceTree = "<group>"; };
-		1A6B1D78134EA0970016A47D /* SenTestingKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = SenTestingKit.framework; path = Library/Frameworks/SenTestingKit.framework; sourceTree = DEVELOPER_DIR; };
-		1AAC1C3C134FD6A500B2DC68 /* ANTLR.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ANTLR.h; path = ../ANTLR.h; sourceTree = "<group>"; };
-		1AE72318134E860B001C3F35 /* ANTLR.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = ANTLR.framework; sourceTree = BUILT_PRODUCTS_DIR; };
-		1AE7231B134E860B001C3F35 /* Cocoa.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Cocoa.framework; path = System/Library/Frameworks/Cocoa.framework; sourceTree = SDKROOT; };
-		1AE7231E134E860B001C3F35 /* AppKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AppKit.framework; path = System/Library/Frameworks/AppKit.framework; sourceTree = SDKROOT; };
-		1AE7231F134E860B001C3F35 /* CoreData.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreData.framework; path = System/Library/Frameworks/CoreData.framework; sourceTree = SDKROOT; };
-		1AE72323134E860B001C3F35 /* ANTLR-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLR-Info.plist"; sourceTree = "<group>"; };
-		1AE72325134E860B001C3F35 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = "<group>"; };
-		1AE72327134E860B001C3F35 /* ANTLR-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "ANTLR-Prefix.pch"; sourceTree = "<group>"; };
-		1AE7232D134E860B001C3F35 /* ANTLRTests.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRTests.octest; sourceTree = BUILT_PRODUCTS_DIR; };
-		1AE72334134E860B001C3F35 /* ANTLRTests-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRTests-Info.plist"; sourceTree = "<group>"; };
-		1AE72336134E860B001C3F35 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = "<group>"; };
-		1AE72338134E860B001C3F35 /* ANTLRTests-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "ANTLRTests-Prefix.pch"; sourceTree = "<group>"; };
-		1AE72339134E860B001C3F35 /* ANTLRTests.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ANTLRTests.h; sourceTree = "<group>"; };
-		1AE7233B134E860B001C3F35 /* ANTLRTests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ANTLRTests.m; sourceTree = "<group>"; };
-		1AE72345134E89BF001C3F35 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = System/Library/Frameworks/CoreFoundation.framework; sourceTree = SDKROOT; };
-		1AE72346134E89BF001C3F35 /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; };
-		1AE72349134E8AB4001C3F35 /* ANTLRBaseMapElement.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRBaseMapElement.m; path = ../ANTLRBaseMapElement.m; sourceTree = SOURCE_ROOT; };
-		1AE7234A134E8AB4001C3F35 /* ANTLRBaseRecognizer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRBaseRecognizer.m; path = ../ANTLRBaseRecognizer.m; sourceTree = SOURCE_ROOT; };
-		1AE7234B134E8AB4001C3F35 /* ANTLRBaseStack.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRBaseStack.m; path = ../ANTLRBaseStack.m; sourceTree = SOURCE_ROOT; };
-		1AE7234C134E8AB4001C3F35 /* ANTLRBaseTree.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRBaseTree.m; path = ../ANTLRBaseTree.m; sourceTree = SOURCE_ROOT; };
-		1AE7234D134E8AB4001C3F35 /* ANTLRBaseTreeAdaptor.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRBaseTreeAdaptor.m; path = ../ANTLRBaseTreeAdaptor.m; sourceTree = SOURCE_ROOT; };
-		1AE7234E134E8AB4001C3F35 /* ANTLRBitSet.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRBitSet.m; path = ../ANTLRBitSet.m; sourceTree = SOURCE_ROOT; };
-		1AE7234F134E8AB4001C3F35 /* ANTLRBufferedTokenStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRBufferedTokenStream.m; path = ../ANTLRBufferedTokenStream.m; sourceTree = SOURCE_ROOT; };
-		1AE72350134E8AB4001C3F35 /* ANTLRBufferedTreeNodeStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRBufferedTreeNodeStream.m; path = ../ANTLRBufferedTreeNodeStream.m; sourceTree = SOURCE_ROOT; };
-		1AE72351134E8AB4001C3F35 /* ANTLRCharStreamState.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRCharStreamState.m; path = ../ANTLRCharStreamState.m; sourceTree = SOURCE_ROOT; };
-		1AE72352134E8AB4001C3F35 /* ANTLRCommonErrorNode.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRCommonErrorNode.m; path = ../ANTLRCommonErrorNode.m; sourceTree = SOURCE_ROOT; };
-		1AE72353134E8AB4001C3F35 /* ANTLRCommonToken.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRCommonToken.m; path = ../ANTLRCommonToken.m; sourceTree = SOURCE_ROOT; };
-		1AE72354134E8AB4001C3F35 /* ANTLRCommonTokenStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRCommonTokenStream.m; path = ../ANTLRCommonTokenStream.m; sourceTree = SOURCE_ROOT; };
-		1AE72355134E8AB4001C3F35 /* ANTLRCommonTree.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRCommonTree.m; path = ../ANTLRCommonTree.m; sourceTree = SOURCE_ROOT; };
-		1AE72356134E8AB4001C3F35 /* ANTLRCommonTreeAdaptor.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRCommonTreeAdaptor.m; path = ../ANTLRCommonTreeAdaptor.m; sourceTree = SOURCE_ROOT; };
-		1AE72357134E8AB4001C3F35 /* ANTLRCommonTreeNodeStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRCommonTreeNodeStream.m; path = ../ANTLRCommonTreeNodeStream.m; sourceTree = SOURCE_ROOT; };
-		1AE72358134E8AB4001C3F35 /* ANTLRDebugEventProxy.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRDebugEventProxy.m; path = ../ANTLRDebugEventProxy.m; sourceTree = SOURCE_ROOT; };
-		1AE72359134E8AB4001C3F35 /* ANTLRDebugParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRDebugParser.m; path = ../ANTLRDebugParser.m; sourceTree = SOURCE_ROOT; };
-		1AE7235A134E8AB4001C3F35 /* ANTLRDebugTokenStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRDebugTokenStream.m; path = ../ANTLRDebugTokenStream.m; sourceTree = SOURCE_ROOT; };
-		1AE7235B134E8AB4001C3F35 /* ANTLRDebugTreeAdaptor.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRDebugTreeAdaptor.m; path = ../ANTLRDebugTreeAdaptor.m; sourceTree = SOURCE_ROOT; };
-		1AE7235C134E8AB4001C3F35 /* ANTLRDebugTreeNodeStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRDebugTreeNodeStream.m; path = ../ANTLRDebugTreeNodeStream.m; sourceTree = SOURCE_ROOT; };
-		1AE7235D134E8AB4001C3F35 /* ANTLRDebugTreeParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRDebugTreeParser.m; path = ../ANTLRDebugTreeParser.m; sourceTree = SOURCE_ROOT; };
-		1AE7235E134E8AB4001C3F35 /* ANTLRDFA.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRDFA.m; path = ../ANTLRDFA.m; sourceTree = SOURCE_ROOT; };
-		1AE7235F134E8AB4001C3F35 /* ANTLRDoubleKeyMap.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRDoubleKeyMap.m; path = ../ANTLRDoubleKeyMap.m; sourceTree = SOURCE_ROOT; };
-		1AE72360134E8AB4001C3F35 /* ANTLREarlyExitException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLREarlyExitException.m; path = ../ANTLREarlyExitException.m; sourceTree = SOURCE_ROOT; };
-		1AE72361134E8AB4001C3F35 /* ANTLRFailedPredicateException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRFailedPredicateException.m; path = ../ANTLRFailedPredicateException.m; sourceTree = SOURCE_ROOT; };
-		1AE72362134E8AB4001C3F35 /* ANTLRFastQueue.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRFastQueue.m; path = ../ANTLRFastQueue.m; sourceTree = SOURCE_ROOT; };
-		1AE72363134E8AB4001C3F35 /* ANTLRFileStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRFileStream.m; path = ../ANTLRFileStream.m; sourceTree = SOURCE_ROOT; };
-		1AE72364134E8AB4001C3F35 /* ANTLRHashMap.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRHashMap.m; path = ../ANTLRHashMap.m; sourceTree = SOURCE_ROOT; };
-		1AE72365134E8AB4001C3F35 /* ANTLRHashRule.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRHashRule.m; path = ../ANTLRHashRule.m; sourceTree = SOURCE_ROOT; };
-		1AE72366134E8AB4001C3F35 /* ANTLRInputStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRInputStream.m; path = ../ANTLRInputStream.m; sourceTree = SOURCE_ROOT; };
-		1AE72367134E8AB4001C3F35 /* ANTLRIntArray.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRIntArray.m; path = ../ANTLRIntArray.m; sourceTree = SOURCE_ROOT; };
-		1AE72368134E8AB4001C3F35 /* ANTLRIntStream.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = ANTLRIntStream.h; path = ../ANTLRIntStream.h; sourceTree = SOURCE_ROOT; };
-		1AE72369134E8AB4001C3F35 /* ANTLRLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRLexer.m; path = ../ANTLRLexer.m; sourceTree = SOURCE_ROOT; };
-		1AE7236A134E8AB4001C3F35 /* ANTLRLexerRuleReturnScope.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRLexerRuleReturnScope.m; path = ../ANTLRLexerRuleReturnScope.m; sourceTree = SOURCE_ROOT; };
-		1AE7236B134E8AB4001C3F35 /* ANTLRLexerState.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRLexerState.m; path = ../ANTLRLexerState.m; sourceTree = SOURCE_ROOT; };
-		1AE7236C134E8AB4001C3F35 /* ANTLRLinkBase.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRLinkBase.m; path = ../ANTLRLinkBase.m; sourceTree = SOURCE_ROOT; };
-		1AE7236D134E8AB4001C3F35 /* ANTLRLookaheadStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRLookaheadStream.m; path = ../ANTLRLookaheadStream.m; sourceTree = SOURCE_ROOT; };
-		1AE7236E134E8AB4001C3F35 /* ANTLRMap.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRMap.m; path = ../ANTLRMap.m; sourceTree = SOURCE_ROOT; };
-		1AE7236F134E8AB4001C3F35 /* ANTLRMapElement.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRMapElement.m; path = ../ANTLRMapElement.m; sourceTree = SOURCE_ROOT; };
-		1AE72370134E8AB4001C3F35 /* ANTLRMismatchedNotSetException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRMismatchedNotSetException.m; path = ../ANTLRMismatchedNotSetException.m; sourceTree = SOURCE_ROOT; };
-		1AE72371134E8AB4001C3F35 /* ANTLRMismatchedRangeException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRMismatchedRangeException.m; path = ../ANTLRMismatchedRangeException.m; sourceTree = SOURCE_ROOT; };
-		1AE72372134E8AB4001C3F35 /* ANTLRMismatchedSetException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRMismatchedSetException.m; path = ../ANTLRMismatchedSetException.m; sourceTree = SOURCE_ROOT; };
-		1AE72373134E8AB4001C3F35 /* ANTLRMismatchedTokenException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRMismatchedTokenException.m; path = ../ANTLRMismatchedTokenException.m; sourceTree = SOURCE_ROOT; };
-		1AE72374134E8AB4001C3F35 /* ANTLRMismatchedTreeNodeException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRMismatchedTreeNodeException.m; path = ../ANTLRMismatchedTreeNodeException.m; sourceTree = SOURCE_ROOT; };
-		1AE72375134E8AB4001C3F35 /* ANTLRMissingTokenException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRMissingTokenException.m; path = ../ANTLRMissingTokenException.m; sourceTree = SOURCE_ROOT; };
-		1AE72376134E8AB4001C3F35 /* ANTLRNodeMapElement.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRNodeMapElement.m; path = ../ANTLRNodeMapElement.m; sourceTree = SOURCE_ROOT; };
-		1AE72377134E8AB4001C3F35 /* ANTLRNoViableAltException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRNoViableAltException.m; path = ../ANTLRNoViableAltException.m; sourceTree = SOURCE_ROOT; };
-		1AE72378134E8AB4001C3F35 /* ANTLRParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRParser.m; path = ../ANTLRParser.m; sourceTree = SOURCE_ROOT; };
-		1AE72379134E8AB4001C3F35 /* ANTLRParserRuleReturnScope.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRParserRuleReturnScope.m; path = ../ANTLRParserRuleReturnScope.m; sourceTree = SOURCE_ROOT; };
-		1AE7237A134E8AB4001C3F35 /* ANTLRParseTree.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRParseTree.m; path = ../ANTLRParseTree.m; sourceTree = SOURCE_ROOT; };
-		1AE7237B134E8AB4001C3F35 /* ANTLRPtrBuffer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRPtrBuffer.m; path = ../ANTLRPtrBuffer.m; sourceTree = SOURCE_ROOT; };
-		1AE7237C134E8AB4001C3F35 /* ANTLRPtrStack.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRPtrStack.m; path = ../ANTLRPtrStack.m; sourceTree = SOURCE_ROOT; };
-		1AE7237D134E8AB4001C3F35 /* ANTLRReaderStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRReaderStream.m; path = ../ANTLRReaderStream.m; sourceTree = SOURCE_ROOT; };
-		1AE7237E134E8AB4001C3F35 /* ANTLRRecognitionException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRRecognitionException.m; path = ../ANTLRRecognitionException.m; sourceTree = SOURCE_ROOT; };
-		1AE7237F134E8AB4001C3F35 /* ANTLRRecognizerSharedState.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRRecognizerSharedState.m; path = ../ANTLRRecognizerSharedState.m; sourceTree = SOURCE_ROOT; };
-		1AE72380134E8AB4001C3F35 /* ANTLRRewriteRuleElementStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRRewriteRuleElementStream.m; path = ../ANTLRRewriteRuleElementStream.m; sourceTree = SOURCE_ROOT; };
-		1AE72381134E8AB4001C3F35 /* ANTLRRewriteRuleNodeStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRRewriteRuleNodeStream.m; path = ../ANTLRRewriteRuleNodeStream.m; sourceTree = SOURCE_ROOT; };
-		1AE72382134E8AB4001C3F35 /* ANTLRRewriteRuleSubtreeStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRRewriteRuleSubtreeStream.m; path = ../ANTLRRewriteRuleSubtreeStream.m; sourceTree = SOURCE_ROOT; };
-		1AE72383134E8AB4001C3F35 /* ANTLRRewriteRuleTokenStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRRewriteRuleTokenStream.m; path = ../ANTLRRewriteRuleTokenStream.m; sourceTree = SOURCE_ROOT; };
-		1AE72384134E8AB4001C3F35 /* ANTLRRuleMapElement.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRRuleMapElement.m; path = ../ANTLRRuleMapElement.m; sourceTree = SOURCE_ROOT; };
-		1AE72385134E8AB4001C3F35 /* ANTLRRuleMemo.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRRuleMemo.m; path = ../ANTLRRuleMemo.m; sourceTree = SOURCE_ROOT; };
-		1AE72386134E8AB4001C3F35 /* ANTLRRuleReturnScope.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRRuleReturnScope.m; path = ../ANTLRRuleReturnScope.m; sourceTree = SOURCE_ROOT; };
-		1AE72387134E8AB4001C3F35 /* ANTLRRuleStack.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRRuleStack.m; path = ../ANTLRRuleStack.m; sourceTree = SOURCE_ROOT; };
-		1AE72388134E8AB4001C3F35 /* ANTLRRuntimeException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRRuntimeException.m; path = ../ANTLRRuntimeException.m; sourceTree = SOURCE_ROOT; };
-		1AE72389134E8AB4001C3F35 /* ANTLRStreamEnumerator.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRStreamEnumerator.m; path = ../ANTLRStreamEnumerator.m; sourceTree = SOURCE_ROOT; };
-		1AE7238A134E8AB4001C3F35 /* ANTLRStringStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRStringStream.m; path = ../ANTLRStringStream.m; sourceTree = SOURCE_ROOT; };
-		1AE7238B134E8AB4001C3F35 /* ANTLRSymbolStack.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRSymbolStack.m; path = ../ANTLRSymbolStack.m; sourceTree = SOURCE_ROOT; };
-		1AE7238C134E8AB4001C3F35 /* ANTLRToken+DebuggerSupport.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = "ANTLRToken+DebuggerSupport.m"; path = "../ANTLRToken+DebuggerSupport.m"; sourceTree = SOURCE_ROOT; };
-		1AE7238D134E8AB4001C3F35 /* ANTLRTokenRewriteStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTokenRewriteStream.m; path = ../ANTLRTokenRewriteStream.m; sourceTree = SOURCE_ROOT; };
-		1AE7238F134E8AB4001C3F35 /* ANTLRTreeAdaptor.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTreeAdaptor.m; path = ../ANTLRTreeAdaptor.m; sourceTree = SOURCE_ROOT; };
-		1AE72390134E8AB4001C3F35 /* ANTLRTreeException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTreeException.m; path = ../ANTLRTreeException.m; sourceTree = SOURCE_ROOT; };
-		1AE72391134E8AB4001C3F35 /* ANTLRTreeIterator.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTreeIterator.m; path = ../ANTLRTreeIterator.m; sourceTree = SOURCE_ROOT; };
-		1AE72392134E8AB4001C3F35 /* ANTLRTreeParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTreeParser.m; path = ../ANTLRTreeParser.m; sourceTree = SOURCE_ROOT; };
-		1AE72393134E8AB4001C3F35 /* ANTLRTreePatternLexer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTreePatternLexer.m; path = ../ANTLRTreePatternLexer.m; sourceTree = SOURCE_ROOT; };
-		1AE72394134E8AB4001C3F35 /* ANTLRTreePatternParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTreePatternParser.m; path = ../ANTLRTreePatternParser.m; sourceTree = SOURCE_ROOT; };
-		1AE72395134E8AB4001C3F35 /* ANTLRTreeRewriter.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTreeRewriter.m; path = ../ANTLRTreeRewriter.m; sourceTree = SOURCE_ROOT; };
-		1AE72396134E8AB4001C3F35 /* ANTLRTreeRuleReturnScope.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTreeRuleReturnScope.m; path = ../ANTLRTreeRuleReturnScope.m; sourceTree = SOURCE_ROOT; };
-		1AE72397134E8AB4001C3F35 /* ANTLRTreeVisitor.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTreeVisitor.m; path = ../ANTLRTreeVisitor.m; sourceTree = SOURCE_ROOT; };
-		1AE72398134E8AB4001C3F35 /* ANTLRTreeVisitorAction.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTreeVisitorAction.m; path = ../ANTLRTreeVisitorAction.m; sourceTree = SOURCE_ROOT; };
-		1AE72399134E8AB4001C3F35 /* ANTLRTreeWizard.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRTreeWizard.m; path = ../ANTLRTreeWizard.m; sourceTree = SOURCE_ROOT; };
-		1AE7239A134E8AB4001C3F35 /* ANTLRUnbufferedCommonTreeNodeStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRUnbufferedCommonTreeNodeStream.m; path = ../ANTLRUnbufferedCommonTreeNodeStream.m; sourceTree = SOURCE_ROOT; };
-		1AE7239B134E8AB4001C3F35 /* ANTLRUnbufferedCommonTreeNodeStreamState.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRUnbufferedCommonTreeNodeStreamState.m; path = ../ANTLRUnbufferedCommonTreeNodeStreamState.m; sourceTree = SOURCE_ROOT; };
-		1AE7239C134E8AB4001C3F35 /* ANTLRUnbufferedTokenStream.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRUnbufferedTokenStream.m; path = ../ANTLRUnbufferedTokenStream.m; sourceTree = SOURCE_ROOT; };
-		1AE7239D134E8AB4001C3F35 /* ANTLRUniqueIDMap.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRUniqueIDMap.m; path = ../ANTLRUniqueIDMap.m; sourceTree = SOURCE_ROOT; };
-		1AE7239E134E8AB4001C3F35 /* ANTLRUnwantedTokenException.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; name = ANTLRUnwantedTokenException.m; path = ../ANTLRUnwantedTokenException.m; sourceTree = SOURCE_ROOT; };
-/* End PBXFileReference section */
-
-/* Begin PBXFrameworksBuildPhase section */
-		1A63BC5E134F5DAB002EDFB4 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BC70134F5E43002EDFB4 /* ANTLR.framework in Frameworks */,
-				1A63BDF1134FAB4B002EDFB4 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD2F134F5F1E002EDFB4 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BDEF134F93A5002EDFB4 /* ANTLR.framework in Frameworks */,
-				1A63BDF0134F93AC002EDFB4 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD3C134F5F36002EDFB4 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BDF2134FAB60002EDFB4 /* ANTLR.framework in Frameworks */,
-				1A63BDF3134FAB63002EDFB4 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD49134F5F43002EDFB4 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AB7FE18134FC0800059474B /* ANTLR.framework in Frameworks */,
-				1A63BDAF134F614D002EDFB4 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD56134F5F4D002EDFB4 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BE0C134FB855002EDFB4 /* ANTLR.framework in Frameworks */,
-				1A63BDB4134F6154002EDFB4 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD63134F5F5E002EDFB4 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AB7FE17134FBFB20059474B /* ANTLR.framework in Frameworks */,
-				1A63BDB9134F615A002EDFB4 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD70134F5F67002EDFB4 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AB7FE15134FBF900059474B /* ANTLR.framework in Frameworks */,
-				1A63BDBE134F6160002EDFB4 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD7D134F5F71002EDFB4 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AB7FE16134FBF9F0059474B /* ANTLR.framework in Frameworks */,
-				1A63BDC3134F6167002EDFB4 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BDD1134F6233002EDFB4 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BDF4134FAF58002EDFB4 /* ANTLR.framework in Frameworks */,
-				1A63BDE1134F626A002EDFB4 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BDF9134FB75E002EDFB4 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BE08134FB814002EDFB4 /* ANTLR.framework in Frameworks */,
-				1A63BE09134FB818002EDFB4 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1AE72314134E860B001C3F35 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AE7231C134E860B001C3F35 /* Cocoa.framework in Frameworks */,
-				1AE72347134E89BF001C3F35 /* CoreFoundation.framework in Frameworks */,
-				1AE72348134E89BF001C3F35 /* Foundation.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1AE72329134E860B001C3F35 /* Frameworks */ = {
-			isa = PBXFrameworksBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A6B1D79134EA0970016A47D /* SenTestingKit.framework in Frameworks */,
-				1AE72331134E860B001C3F35 /* ANTLR.framework in Frameworks */,
-				1AE7232E134E860B001C3F35 /* Cocoa.framework in Frameworks */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-/* End PBXFrameworksBuildPhase section */
-
-/* Begin PBXGroup section */
-		1A048CFF134E8B9100005F57 /* Classes */ = {
-			isa = PBXGroup;
-			children = (
-				1AAC1C3C134FD6A500B2DC68 /* ANTLR.h */,
-				1A048D01134E8C1000005F57 /* antlr3.h */,
-				1A048D02134E8C1000005F57 /* ANTLRBaseMapElement.h */,
-				1A048D03134E8C1000005F57 /* ANTLRBaseRecognizer.h */,
-				1A048D04134E8C1000005F57 /* ANTLRBaseStack.h */,
-				1A048D05134E8C1000005F57 /* ANTLRBaseTree.h */,
-				1A048D06134E8C1000005F57 /* ANTLRBaseTreeAdaptor.h */,
-				1A048D07134E8C1000005F57 /* ANTLRBitSet.h */,
-				1A048D08134E8C1000005F57 /* ANTLRBufferedTokenStream.h */,
-				1A048D09134E8C1000005F57 /* ANTLRBufferedTreeNodeStream.h */,
-				1A048D0A134E8C1000005F57 /* ANTLRCharStream.h */,
-				1A048D0B134E8C1000005F57 /* ANTLRCharStreamState.h */,
-				1A048D0C134E8C1000005F57 /* ANTLRCommonErrorNode.h */,
-				1A048D0D134E8C1000005F57 /* ANTLRCommonToken.h */,
-				1A048D0E134E8C1000005F57 /* ANTLRCommonTokenStream.h */,
-				1A048D0F134E8C1000005F57 /* ANTLRCommonTree.h */,
-				1A048D10134E8C1000005F57 /* ANTLRCommonTreeAdaptor.h */,
-				1A048D11134E8C1000005F57 /* ANTLRCommonTreeNodeStream.h */,
-				1A048D12134E8C1100005F57 /* ANTLRCommonTreeTest-Info.plist */,
-				1A048D13134E8C1100005F57 /* ANTLRDebug.h */,
-				1A048D14134E8C1100005F57 /* ANTLRDebugEventListener.h */,
-				1A048D15134E8C1100005F57 /* ANTLRDebugEventProxy.h */,
-				1A048D16134E8C1100005F57 /* ANTLRDebugParser.h */,
-				1A048D17134E8C1100005F57 /* ANTLRDebugTokenStream.h */,
-				1A048D18134E8C1100005F57 /* ANTLRDebugTreeAdaptor.h */,
-				1A048D19134E8C1100005F57 /* ANTLRDebugTreeNodeStream.h */,
-				1A048D1A134E8C1100005F57 /* ANTLRDebugTreeParser.h */,
-				1A048D1B134E8C1100005F57 /* ANTLRDFA.h */,
-				1A048D1C134E8C1100005F57 /* ANTLRDoubleKeyMap.h */,
-				1A048D1D134E8C1100005F57 /* ANTLREarlyExitException.h */,
-				1A048D1E134E8C1100005F57 /* ANTLRError.h */,
-				1A048D1F134E8C1100005F57 /* ANTLRFailedPredicateException.h */,
-				1A048D20134E8C1100005F57 /* ANTLRFastQueue.h */,
-				1A6B1C97134E8CF70016A47D /* ANTLRFileStream.h */,
-				1A6B1C98134E8CF70016A47D /* ANTLRHashMap.h */,
-				1A6B1C99134E8CF70016A47D /* ANTLRHashRule.h */,
-				1A6B1C9A134E8CF70016A47D /* ANTLRInputStream.h */,
-				1A6B1C9B134E8CF70016A47D /* ANTLRIntArray.h */,
-				1AE72368134E8AB4001C3F35 /* ANTLRIntStream.h */,
-				1A6B1C9C134E8CF70016A47D /* ANTLRLexer.h */,
-				1A6B1C9D134E8CF70016A47D /* ANTLRLexerRuleReturnScope.h */,
-				1A6B1C9E134E8CF70016A47D /* ANTLRLexerState.h */,
-				1A6B1C9F134E8CF70016A47D /* ANTLRLinkBase.h */,
-				1A6B1CA0134E8CF70016A47D /* ANTLRLookaheadStream.h */,
-				1A6B1CA1134E8CF70016A47D /* ANTLRMap.h */,
-				1A6B1CA2134E8CF70016A47D /* ANTLRMapElement.h */,
-				1A6B1CA3134E8CF70016A47D /* ANTLRMismatchedNotSetException.h */,
-				1A6B1CA4134E8CF70016A47D /* ANTLRMismatchedRangeException.h */,
-				1A6B1CA5134E8CF70016A47D /* ANTLRMismatchedSetException.h */,
-				1A6B1CA6134E8CF70016A47D /* ANTLRMismatchedTokenException.h */,
-				1A6B1CA7134E8CF70016A47D /* ANTLRMismatchedTreeNodeException.h */,
-				1A6B1CA8134E8CF70016A47D /* ANTLRMissingTokenException.h */,
-				1A6B1CA9134E8CF70016A47D /* ANTLRNodeMapElement.h */,
-				1A6B1CAA134E8CF70016A47D /* ANTLRNoViableAltException.h */,
-				1A6B1CAB134E8CF70016A47D /* ANTLRParser.h */,
-				1A6B1CAC134E8CF70016A47D /* ANTLRParserRuleReturnScope.h */,
-				1A6B1CAD134E8CF70016A47D /* ANTLRParseTree.h */,
-				1A6B1CAE134E8CF70016A47D /* ANTLRPtrBuffer.h */,
-				1A6B1CAF134E8CF70016A47D /* ANTLRPtrStack.h */,
-				1A6B1CB0134E8CF70016A47D /* ANTLRReaderStream.h */,
-				1A6B1CB1134E8CF70016A47D /* ANTLRRecognitionException.h */,
-				1A6B1CB2134E8CF70016A47D /* ANTLRRecognizerSharedState.h */,
-				1A6B1CB3134E8CF70016A47D /* ANTLRRewriteRuleElementStream.h */,
-				1A6B1CB4134E8CF70016A47D /* ANTLRRewriteRuleNodeStream.h */,
-				1A6B1CB5134E8CF70016A47D /* ANTLRRewriteRuleSubtreeStream.h */,
-				1A6B1CB6134E8CF70016A47D /* ANTLRRewriteRuleTokenStream.h */,
-				1A6B1CB7134E8CF70016A47D /* ANTLRRuleMapElement.h */,
-				1A6B1CB8134E8CF70016A47D /* ANTLRRuleMemo.h */,
-				1A6B1CB9134E8CF70016A47D /* ANTLRRuleReturnScope.h */,
-				1A6B1CBA134E8CF70016A47D /* ANTLRRuleStack.h */,
-				1A6B1CBB134E8CF70016A47D /* ANTLRRuntimeException.h */,
-				1A6B1CBC134E8CF70016A47D /* ANTLRStreamEnumerator.h */,
-				1A6B1CBD134E8CF70016A47D /* ANTLRStringStream.h */,
-				1A6B1CBE134E8CF70016A47D /* ANTLRStringStreamState.h */,
-				1A6B1CBF134E8CF70016A47D /* ANTLRSymbolStack.h */,
-				1A6B1CC0134E8CF70016A47D /* ANTLRToken.h */,
-				1A6B1CC1134E8CF70016A47D /* ANTLRToken+DebuggerSupport.h */,
-				1A6B1CC2134E8CF70016A47D /* ANTLRTokenRewriteStream.h */,
-				1A6B1CC3134E8CF70016A47D /* ANTLRTokenSource.h */,
-				1A6B1CC4134E8CF70016A47D /* ANTLRTokenStream.h */,
-				1A6B1CC5134E8CF70016A47D /* ANTLRTree.h */,
-				1A6B1CC6134E8CF70016A47D /* ANTLRTreeAdaptor.h */,
-				1A6B1CC7134E8CF70016A47D /* ANTLRTreeException.h */,
-				1A6B1CC8134E8CF70016A47D /* ANTLRTreeIterator.h */,
-				1A6B1CC9134E8CF70016A47D /* ANTLRTreeNodeStream.h */,
-				1A6B1CCA134E8CF70016A47D /* ANTLRTreeParser.h */,
-				1A6B1CCB134E8CF70016A47D /* ANTLRTreePatternLexer.h */,
-				1A6B1CCC134E8CF70016A47D /* ANTLRTreePatternParser.h */,
-				1A6B1CCD134E8CF70016A47D /* ANTLRTreeRewriter.h */,
-				1A6B1CCE134E8CF70016A47D /* ANTLRTreeRuleReturnScope.h */,
-				1A6B1CCF134E8CF70016A47D /* ANTLRTreeVisitor.h */,
-				1A6B1CD0134E8CF70016A47D /* ANTLRTreeVisitorAction.h */,
-				1A6B1CD1134E8CF70016A47D /* ANTLRTreeWizard.h */,
-				1A6B1CD2134E8CF70016A47D /* ANTLRUnbufferedCommonTreeNodeStream.h */,
-				1A6B1CD3134E8CF70016A47D /* ANTLRUnbufferedCommonTreeNodeStreamState.h */,
-				1A6B1CD4134E8CF70016A47D /* ANTLRUnbufferedTokenStream.h */,
-				1A6B1CD5134E8CF70016A47D /* ANTLRUniqueIDMap.h */,
-				1A6B1CD6134E8CF70016A47D /* ANTLRUnwantedTokenException.h */,
-				1AE72349134E8AB4001C3F35 /* ANTLRBaseMapElement.m */,
-				1AE7234A134E8AB4001C3F35 /* ANTLRBaseRecognizer.m */,
-				1AE7234B134E8AB4001C3F35 /* ANTLRBaseStack.m */,
-				1AE7234C134E8AB4001C3F35 /* ANTLRBaseTree.m */,
-				1AE7234D134E8AB4001C3F35 /* ANTLRBaseTreeAdaptor.m */,
-				1AE7234E134E8AB4001C3F35 /* ANTLRBitSet.m */,
-				1AE7234F134E8AB4001C3F35 /* ANTLRBufferedTokenStream.m */,
-				1AE72350134E8AB4001C3F35 /* ANTLRBufferedTreeNodeStream.m */,
-				1AE72351134E8AB4001C3F35 /* ANTLRCharStreamState.m */,
-				1AE72352134E8AB4001C3F35 /* ANTLRCommonErrorNode.m */,
-				1AE72353134E8AB4001C3F35 /* ANTLRCommonToken.m */,
-				1AE72354134E8AB4001C3F35 /* ANTLRCommonTokenStream.m */,
-				1AE72355134E8AB4001C3F35 /* ANTLRCommonTree.m */,
-				1AE72356134E8AB4001C3F35 /* ANTLRCommonTreeAdaptor.m */,
-				1AE72357134E8AB4001C3F35 /* ANTLRCommonTreeNodeStream.m */,
-				1AE72358134E8AB4001C3F35 /* ANTLRDebugEventProxy.m */,
-				1AE72359134E8AB4001C3F35 /* ANTLRDebugParser.m */,
-				1AE7235A134E8AB4001C3F35 /* ANTLRDebugTokenStream.m */,
-				1AE7235B134E8AB4001C3F35 /* ANTLRDebugTreeAdaptor.m */,
-				1AE7235C134E8AB4001C3F35 /* ANTLRDebugTreeNodeStream.m */,
-				1AE7235D134E8AB4001C3F35 /* ANTLRDebugTreeParser.m */,
-				1AE7235E134E8AB4001C3F35 /* ANTLRDFA.m */,
-				1AE7235F134E8AB4001C3F35 /* ANTLRDoubleKeyMap.m */,
-				1AE72360134E8AB4001C3F35 /* ANTLREarlyExitException.m */,
-				1AE72361134E8AB4001C3F35 /* ANTLRFailedPredicateException.m */,
-				1AE72362134E8AB4001C3F35 /* ANTLRFastQueue.m */,
-				1AE72363134E8AB4001C3F35 /* ANTLRFileStream.m */,
-				1AE72364134E8AB4001C3F35 /* ANTLRHashMap.m */,
-				1AE72365134E8AB4001C3F35 /* ANTLRHashRule.m */,
-				1AE72366134E8AB4001C3F35 /* ANTLRInputStream.m */,
-				1AE72367134E8AB4001C3F35 /* ANTLRIntArray.m */,
-				1AE72369134E8AB4001C3F35 /* ANTLRLexer.m */,
-				1AE7236A134E8AB4001C3F35 /* ANTLRLexerRuleReturnScope.m */,
-				1AE7236B134E8AB4001C3F35 /* ANTLRLexerState.m */,
-				1AE7236C134E8AB4001C3F35 /* ANTLRLinkBase.m */,
-				1AE7236D134E8AB4001C3F35 /* ANTLRLookaheadStream.m */,
-				1AE7236E134E8AB4001C3F35 /* ANTLRMap.m */,
-				1AE7236F134E8AB4001C3F35 /* ANTLRMapElement.m */,
-				1AE72370134E8AB4001C3F35 /* ANTLRMismatchedNotSetException.m */,
-				1AE72371134E8AB4001C3F35 /* ANTLRMismatchedRangeException.m */,
-				1AE72372134E8AB4001C3F35 /* ANTLRMismatchedSetException.m */,
-				1AE72373134E8AB4001C3F35 /* ANTLRMismatchedTokenException.m */,
-				1AE72374134E8AB4001C3F35 /* ANTLRMismatchedTreeNodeException.m */,
-				1AE72375134E8AB4001C3F35 /* ANTLRMissingTokenException.m */,
-				1AE72376134E8AB4001C3F35 /* ANTLRNodeMapElement.m */,
-				1AE72377134E8AB4001C3F35 /* ANTLRNoViableAltException.m */,
-				1AE72378134E8AB4001C3F35 /* ANTLRParser.m */,
-				1AE72379134E8AB4001C3F35 /* ANTLRParserRuleReturnScope.m */,
-				1AE7237A134E8AB4001C3F35 /* ANTLRParseTree.m */,
-				1AE7237B134E8AB4001C3F35 /* ANTLRPtrBuffer.m */,
-				1AE7237C134E8AB4001C3F35 /* ANTLRPtrStack.m */,
-				1AE7237D134E8AB4001C3F35 /* ANTLRReaderStream.m */,
-				1AE7237E134E8AB4001C3F35 /* ANTLRRecognitionException.m */,
-				1AE7237F134E8AB4001C3F35 /* ANTLRRecognizerSharedState.m */,
-				1AE72380134E8AB4001C3F35 /* ANTLRRewriteRuleElementStream.m */,
-				1AE72381134E8AB4001C3F35 /* ANTLRRewriteRuleNodeStream.m */,
-				1AE72382134E8AB4001C3F35 /* ANTLRRewriteRuleSubtreeStream.m */,
-				1AE72383134E8AB4001C3F35 /* ANTLRRewriteRuleTokenStream.m */,
-				1AE72384134E8AB4001C3F35 /* ANTLRRuleMapElement.m */,
-				1AE72385134E8AB4001C3F35 /* ANTLRRuleMemo.m */,
-				1AE72386134E8AB4001C3F35 /* ANTLRRuleReturnScope.m */,
-				1AE72387134E8AB4001C3F35 /* ANTLRRuleStack.m */,
-				1AE72388134E8AB4001C3F35 /* ANTLRRuntimeException.m */,
-				1AE72389134E8AB4001C3F35 /* ANTLRStreamEnumerator.m */,
-				1AE7238A134E8AB4001C3F35 /* ANTLRStringStream.m */,
-				1AE7238B134E8AB4001C3F35 /* ANTLRSymbolStack.m */,
-				1AE7238C134E8AB4001C3F35 /* ANTLRToken+DebuggerSupport.m */,
-				1AE7238D134E8AB4001C3F35 /* ANTLRTokenRewriteStream.m */,
-				1AE7238F134E8AB4001C3F35 /* ANTLRTreeAdaptor.m */,
-				1AE72390134E8AB4001C3F35 /* ANTLRTreeException.m */,
-				1AE72391134E8AB4001C3F35 /* ANTLRTreeIterator.m */,
-				1AE72392134E8AB4001C3F35 /* ANTLRTreeParser.m */,
-				1AE72393134E8AB4001C3F35 /* ANTLRTreePatternLexer.m */,
-				1AE72394134E8AB4001C3F35 /* ANTLRTreePatternParser.m */,
-				1AE72395134E8AB4001C3F35 /* ANTLRTreeRewriter.m */,
-				1AE72396134E8AB4001C3F35 /* ANTLRTreeRuleReturnScope.m */,
-				1AE72397134E8AB4001C3F35 /* ANTLRTreeVisitor.m */,
-				1AE72398134E8AB4001C3F35 /* ANTLRTreeVisitorAction.m */,
-				1AE72399134E8AB4001C3F35 /* ANTLRTreeWizard.m */,
-				1AE7239A134E8AB4001C3F35 /* ANTLRUnbufferedCommonTreeNodeStream.m */,
-				1AE7239B134E8AB4001C3F35 /* ANTLRUnbufferedCommonTreeNodeStreamState.m */,
-				1AE7239C134E8AB4001C3F35 /* ANTLRUnbufferedTokenStream.m */,
-				1AE7239D134E8AB4001C3F35 /* ANTLRUniqueIDMap.m */,
-				1AE7239E134E8AB4001C3F35 /* ANTLRUnwantedTokenException.m */,
-			);
-			name = Classes;
-			sourceTree = "<group>";
-		};
-		1A63BC77134F5EB1002EDFB4 /* examples */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BC78134F5EB1002EDFB4 /* combined */,
-				1A63BC84134F5EB1002EDFB4 /* fuzzy */,
-				1A63BC91134F5EB2002EDFB4 /* hoistedPredicates */,
-				1A63BC9C134F5EB2002EDFB4 /* lexertest-simple */,
-				1A63BCAD134F5EB2002EDFB4 /* LL-star */,
-				1A63BCB9134F5EB2002EDFB4 /* polydiff */,
-				1A63BCD8134F5EB2002EDFB4 /* scopes */,
-				1A63BCEA134F5EB2002EDFB4 /* simplecTreeParser */,
-				1A63BD0D134F5EB2002EDFB4 /* treeparser */,
-				1A63BD1F134F5EB2002EDFB4 /* treerewrite */,
-			);
-			name = examples;
-			path = ../examples;
-			sourceTree = "<group>";
-		};
-		1A63BC78134F5EB1002EDFB4 /* combined */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BC79134F5EB1002EDFB4 /* antlr3.h */,
-				1A63BC7A134F5EB1002EDFB4 /* Combined.g */,
-				1A63BC7B134F5EB1002EDFB4 /* Combined.tokens */,
-				1A63BC7C134F5EB1002EDFB4 /* Combined__.gl */,
-				1A63BC7D134F5EB1002EDFB4 /* CombinedLexer.h */,
-				1A63BC7E134F5EB1002EDFB4 /* CombinedLexer.m */,
-				1A63BC7F134F5EB1002EDFB4 /* CombinedParser.h */,
-				1A63BC80134F5EB1002EDFB4 /* CombinedParser.m */,
-				1A63BC81134F5EB1002EDFB4 /* main.m */,
-				1A63BC82134F5EB1002EDFB4 /* output1 */,
-			);
-			path = combined;
-			sourceTree = "<group>";
-		};
-		1A63BC82134F5EB1002EDFB4 /* output1 */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BC83134F5EB1002EDFB4 /* Combined.tokens */,
-			);
-			path = output1;
-			sourceTree = "<group>";
-		};
-		1A63BC84134F5EB1002EDFB4 /* fuzzy */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BC85134F5EB1002EDFB4 /* antlr3.h */,
-				1A63BC86134F5EB1002EDFB4 /* Fuzzy.g */,
-				1A63BC88134F5EB2002EDFB4 /* Fuzzy.tokens */,
-				1A63BC89134F5EB2002EDFB4 /* FuzzyLexer.h */,
-				1A63BC6B134F5DE4002EDFB4 /* FuzzyLexer.m */,
-				1A63BC8B134F5EB2002EDFB4 /* input */,
-				1A63BC6C134F5DE5002EDFB4 /* main.m */,
-				1A63BC8D134F5EB2002EDFB4 /* output1 */,
-			);
-			path = fuzzy;
-			sourceTree = "<group>";
-		};
-		1A63BC8D134F5EB2002EDFB4 /* output1 */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BC8E134F5EB2002EDFB4 /* Fuzzy.m */,
-				1A63BC8F134F5EB2002EDFB4 /* Fuzzy.tokens */,
-				1A63BC90134F5EB2002EDFB4 /* FuzzyLexer.h */,
-			);
-			path = output1;
-			sourceTree = "<group>";
-		};
-		1A63BC91134F5EB2002EDFB4 /* hoistedPredicates */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BC92134F5EB2002EDFB4 /* antlr3.h */,
-				1A63BC93134F5EB2002EDFB4 /* input */,
-				1A63BC94134F5EB2002EDFB4 /* main.m */,
-				1A63BC95134F5EB2002EDFB4 /* output */,
-				1A63BC96134F5EB2002EDFB4 /* T.g */,
-				1A63BC97134F5EB2002EDFB4 /* T.tokens */,
-				1A63BC98134F5EB2002EDFB4 /* TLexer.h */,
-				1A63BC99134F5EB2002EDFB4 /* TLexer.m */,
-				1A63BC9A134F5EB2002EDFB4 /* TParser.h */,
-				1A63BC9B134F5EB2002EDFB4 /* TParser.m */,
-			);
-			path = hoistedPredicates;
-			sourceTree = "<group>";
-		};
-		1A63BC9C134F5EB2002EDFB4 /* lexertest-simple */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BC9D134F5EB2002EDFB4 /* antlr3.h */,
-				1A63BC9E134F5EB2002EDFB4 /* main.m */,
-				1A63BC9F134F5EB2002EDFB4 /* output1 */,
-				1A63BCA5134F5EB2002EDFB4 /* Test.tokens */,
-				1A63BCA6134F5EB2002EDFB4 /* TestLexer.g */,
-				1A63BCA7134F5EB2002EDFB4 /* TestLexer.h */,
-				1A63BCA8134F5EB2002EDFB4 /* TestLexer.h.old */,
-				1A63BCA9134F5EB2002EDFB4 /* TestLexer.m */,
-				1A63BCAA134F5EB2002EDFB4 /* TestLexer.m.old */,
-				1A63BCAB134F5EB2002EDFB4 /* TestLexer.tokens */,
-				1A63BCAC134F5EB2002EDFB4 /* TestLexerLexer.h */,
-			);
-			path = "lexertest-simple";
-			sourceTree = "<group>";
-		};
-		1A63BC9F134F5EB2002EDFB4 /* output1 */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BCA0134F5EB2002EDFB4 /* Test.tokens */,
-				1A63BCA1134F5EB2002EDFB4 /* TestLexer.h */,
-				1A63BCA2134F5EB2002EDFB4 /* Testlexer.m */,
-				1A63BCA3134F5EB2002EDFB4 /* TestLexer.tokens */,
-				1A63BCA4134F5EB2002EDFB4 /* TestLexerLexer.h */,
-			);
-			path = output1;
-			sourceTree = "<group>";
-		};
-		1A63BCAD134F5EB2002EDFB4 /* LL-star */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BCAE134F5EB2002EDFB4 /* antlr3.h */,
-				1A63BCAF134F5EB2002EDFB4 /* input */,
-				1A63BCB0134F5EB2002EDFB4 /* main.m */,
-				1A63BCB1134F5EB2002EDFB4 /* output */,
-				1A63BCB2134F5EB2002EDFB4 /* SimpleC.g */,
-				1A63BCB3134F5EB2002EDFB4 /* SimpleC.tokens */,
-				1A63BCB4134F5EB2002EDFB4 /* SimpleC__.gl */,
-				1A63BCB5134F5EB2002EDFB4 /* SimpleCLexer.h */,
-				1A63BCB6134F5EB2002EDFB4 /* SimpleCLexer.m */,
-				1A63BCB7134F5EB2002EDFB4 /* SimpleCParser.h */,
-				1A63BCB8134F5EB2002EDFB4 /* SimpleCParser.m */,
-			);
-			path = "LL-star";
-			sourceTree = "<group>";
-		};
-		1A63BCB9134F5EB2002EDFB4 /* polydiff */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BCBA134F5EB2002EDFB4 /* files */,
-				1A63BCBB134F5EB2002EDFB4 /* input */,
-				1A63BCBC134F5EB2002EDFB4 /* Main.m */,
-				1A63BCBD134F5EB2002EDFB4 /* output */,
-				1A63BCBE134F5EB2002EDFB4 /* output1 */,
-				1A63BCC8134F5EB2002EDFB4 /* Poly.g */,
-				1A63BCC9134F5EB2002EDFB4 /* Poly.tokens */,
-				1A63BCCA134F5EB2002EDFB4 /* PolyDifferentiator.g */,
-				1A63BCCB134F5EB2002EDFB4 /* PolyDifferentiator.m */,
-				1A63BCCC134F5EB2002EDFB4 /* PolyLexer.h */,
-				1A63BCCD134F5EB2002EDFB4 /* PolyLexer.m */,
-				1A63BCCE134F5EB2002EDFB4 /* PolyParser.h */,
-				1A63BCCF134F5EB2002EDFB4 /* PolyParser.m */,
-				1A63BCD0134F5EB2002EDFB4 /* PolyPrinter.g */,
-				1A63BCD1134F5EB2002EDFB4 /* PolyPrinter.h */,
-				1A63BCD2134F5EB2002EDFB4 /* PolyPrinter.m */,
-				1A63BCD3134F5EB2002EDFB4 /* PolyPrinter.tokens */,
-				1A63BCD4134F5EB2002EDFB4 /* Simplifier.g */,
-				1A63BCD5134F5EB2002EDFB4 /* Simplifier.h */,
-				1A63BCD6134F5EB2002EDFB4 /* Simplifier.m */,
-				1A63BCD7134F5EB2002EDFB4 /* Simplifier.tokens */,
-			);
-			path = polydiff;
-			sourceTree = "<group>";
-		};
-		1A63BCBE134F5EB2002EDFB4 /* output1 */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BCBF134F5EB2002EDFB4 /* Poly.tokens */,
-				1A63BCC0134F5EB2002EDFB4 /* PolyDifferentiator.m */,
-				1A63BCC1134F5EB2002EDFB4 /* PolyLexer.h */,
-				1A63BCC2134F5EB2002EDFB4 /* PolyLexer.m */,
-				1A63BCC3134F5EB2002EDFB4 /* PolyParser.h */,
-				1A63BCC4134F5EB2002EDFB4 /* PolyParser.m */,
-				1A63BCC5134F5EB2002EDFB4 /* Simplifier.h */,
-				1A63BCC6134F5EB2002EDFB4 /* Simplifier.m */,
-				1A63BCC7134F5EB2002EDFB4 /* Simplifier.tokens */,
-			);
-			path = output1;
-			sourceTree = "<group>";
-		};
-		1A63BCD8134F5EB2002EDFB4 /* scopes */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BCD9134F5EB2002EDFB4 /* antlr3.h */,
-				1A63BCDA134F5EB2002EDFB4 /* input */,
-				1A63BCDB134F5EB2002EDFB4 /* main.m */,
-				1A63BCDC134F5EB2002EDFB4 /* output */,
-				1A63BCDD134F5EB2002EDFB4 /* output1 */,
-				1A63BCE3134F5EB2002EDFB4 /* SymbolTable.g */,
-				1A63BCE4134F5EB2002EDFB4 /* SymbolTable.tokens */,
-				1A63BCE5134F5EB2002EDFB4 /* SymbolTable__.gl */,
-				1A63BCE6134F5EB2002EDFB4 /* SymbolTableLexer.h */,
-				1A63BCE7134F5EB2002EDFB4 /* SymbolTableLexer.m */,
-				1A63BCE8134F5EB2002EDFB4 /* SymbolTableParser.h */,
-				1A63BCE9134F5EB2002EDFB4 /* SymbolTableParser.m */,
-			);
-			path = scopes;
-			sourceTree = "<group>";
-		};
-		1A63BCDD134F5EB2002EDFB4 /* output1 */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BCDE134F5EB2002EDFB4 /* SymbolTable.tokens */,
-				1A63BCDF134F5EB2002EDFB4 /* SymbolTableLexer.h */,
-				1A63BCE0134F5EB2002EDFB4 /* SymbolTableLexer.m */,
-				1A63BCE1134F5EB2002EDFB4 /* SymbolTableParser.h */,
-				1A63BCE2134F5EB2002EDFB4 /* SymbolTableParser.m */,
-			);
-			path = output1;
-			sourceTree = "<group>";
-		};
-		1A63BCEA134F5EB2002EDFB4 /* simplecTreeParser */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BCEB134F5EB2002EDFB4 /* antlr3.h */,
-				1A63BCEC134F5EB2002EDFB4 /* input */,
-				1A63BCED134F5EB2002EDFB4 /* main.m */,
-				1A63BCEE134F5EB2002EDFB4 /* output */,
-				1A63BCEF134F5EB2002EDFB4 /* output1 */,
-				1A63BCFB134F5EB2002EDFB4 /* SimpleC.g */,
-				1A63BCFC134F5EB2002EDFB4 /* SimpleC.tokens */,
-				1A63BCFD134F5EB2002EDFB4 /* SimpleC__.gl */,
-				1A63BCFE134F5EB2002EDFB4 /* SimpleCLexer.h */,
-				1A63BCFF134F5EB2002EDFB4 /* SimpleCLexer.java */,
-				1A63BD00134F5EB2002EDFB4 /* SimpleCLexer.m */,
-				1A63BD01134F5EB2002EDFB4 /* SimpleCParser.h */,
-				1A63BD02134F5EB2002EDFB4 /* SimpleCParser.java */,
-				1A63BD03134F5EB2002EDFB4 /* SimpleCParser.m */,
-				1A63BD04134F5EB2002EDFB4 /* SimpleCTP.g */,
-				1A63BD05134F5EB2002EDFB4 /* SimpleCTP.h */,
-				1A63BD06134F5EB2002EDFB4 /* SimpleCTP.java */,
-				1A63BD07134F5EB2002EDFB4 /* SimpleCTP.m */,
-				1A63BD08134F5EB2002EDFB4 /* SimpleCTP.tokens */,
-				1A63BD09134F5EB2002EDFB4 /* SimpleCWalker.g */,
-				1A63BD0A134F5EB2002EDFB4 /* SimpleCWalker.h */,
-				1A63BD0B134F5EB2002EDFB4 /* SimpleCWalker.m */,
-				1A63BD0C134F5EB2002EDFB4 /* SimpleCWalker.tokens */,
-			);
-			path = simplecTreeParser;
-			sourceTree = "<group>";
-		};
-		1A63BCEF134F5EB2002EDFB4 /* output1 */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BCF0134F5EB2002EDFB4 /* SimpleC.tokens */,
-				1A63BCF1134F5EB2002EDFB4 /* SimpleCLexer.h */,
-				1A63BCF2134F5EB2002EDFB4 /* SimpleCLexer.java */,
-				1A63BCF3134F5EB2002EDFB4 /* SimpleCLexer.m */,
-				1A63BCF4134F5EB2002EDFB4 /* SimpleCParser.h */,
-				1A63BCF5134F5EB2002EDFB4 /* SimpleCParser.java */,
-				1A63BCF6134F5EB2002EDFB4 /* SimpleCParser.m */,
-				1A63BCF7134F5EB2002EDFB4 /* SimpleCTP.h */,
-				1A63BCF8134F5EB2002EDFB4 /* SimpleCTP.java */,
-				1A63BCF9134F5EB2002EDFB4 /* SimpleCTP.m */,
-				1A63BCFA134F5EB2002EDFB4 /* SimpleCTP.tokens */,
-			);
-			path = output1;
-			sourceTree = "<group>";
-		};
-		1A63BD0D134F5EB2002EDFB4 /* treeparser */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BD0E134F5EB2002EDFB4 /* files */,
-				1A63BD0F134F5EB2002EDFB4 /* input */,
-				1A63BD10134F5EB2002EDFB4 /* Lang.g */,
-				1A63BD11134F5EB2002EDFB4 /* Lang.tokens */,
-				1A63BD12134F5EB2002EDFB4 /* LangDumpDecl.g */,
-				1A63BD13134F5EB2002EDFB4 /* LangDumpDecl.h */,
-				1A63BD14134F5EB2002EDFB4 /* LangDumpDecl.m */,
-				1A63BD15134F5EB2002EDFB4 /* LangDumpDecl.tokens */,
-				1A63BD16134F5EB2002EDFB4 /* LangLexer.h */,
-				1A63BD17134F5EB2002EDFB4 /* LangLexer.m */,
-				1A63BD18134F5EB2002EDFB4 /* LangParser.h */,
-				1A63BD19134F5EB2002EDFB4 /* LangParser.m */,
-				1A63BD1A134F5EB2002EDFB4 /* Main.java */,
-				1A63BD1B134F5EB2002EDFB4 /* main.m */,
-				1A63BD1C134F5EB2002EDFB4 /* output */,
-				1A63BD1D134F5EB2002EDFB4 /* output1 */,
-				1A63BD1E134F5EB2002EDFB4 /* README.txt */,
-			);
-			path = treeparser;
-			sourceTree = "<group>";
-		};
-		1A63BD1D134F5EB2002EDFB4 /* output1 */ = {
-			isa = PBXGroup;
-			children = (
-			);
-			path = output1;
-			sourceTree = "<group>";
-		};
-		1A63BD1F134F5EB2002EDFB4 /* treerewrite */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BD20134F5EB2002EDFB4 /* antlr3.h */,
-				1A63BD21134F5EB2002EDFB4 /* main.m */,
-				1A63BD22134F5EB2002EDFB4 /* output1 */,
-				1A63BD28134F5EB2002EDFB4 /* TreeRewrite.g */,
-				1A63BD29134F5EB2002EDFB4 /* TreeRewrite.tokens */,
-				1A63BD2A134F5EB2002EDFB4 /* TreeRewriteLexer.h */,
-				1A63BD2B134F5EB2002EDFB4 /* TreeRewriteLexer.m */,
-				1A63BD2C134F5EB2002EDFB4 /* TreeRewriteParser.h */,
-				1A63BD2D134F5EB2002EDFB4 /* TreeRewriteParser.m */,
-			);
-			path = treerewrite;
-			sourceTree = "<group>";
-		};
-		1A63BD22134F5EB2002EDFB4 /* output1 */ = {
-			isa = PBXGroup;
-			children = (
-				1A63BD23134F5EB2002EDFB4 /* TreeRewrite.tokens */,
-				1A63BD24134F5EB2002EDFB4 /* TreeRewriteLexer.h */,
-				1A63BD25134F5EB2002EDFB4 /* TreeRewriteLexer.m */,
-				1A63BD26134F5EB2002EDFB4 /* TreeRewriteParser.h */,
-				1A63BD27134F5EB2002EDFB4 /* TreeRewriteParser.m */,
-			);
-			path = output1;
-			sourceTree = "<group>";
-		};
-		1A6B1D1D134E8DA10016A47D /* ANTLRTests */ = {
-			isa = PBXGroup;
-			children = (
-				1A6B1D1E134E8DA10016A47D /* java */,
-				1A6B1D48134E8DA10016A47D /* runtime */,
-			);
-			name = ANTLRTests;
-			path = ../test;
-			sourceTree = "<group>";
-		};
-		1A6B1D1E134E8DA10016A47D /* java */ = {
-			isa = PBXGroup;
-			children = (
-				1A6B1D1F134E8DA10016A47D /* BaseTest.java */,
-				1A6B1D20134E8DA10016A47D /* DebugTestAutoAST.java */,
-				1A6B1D21134E8DA10016A47D /* DebugTestCompositeGrammars.java */,
-				1A6B1D22134E8DA10016A47D /* DebugTestRewriteAST.java */,
-				1A6B1D23134E8DA10016A47D /* ErrorQueue.java */,
-				1A6B1D24134E8DA10016A47D /* TestASTConstruction.java */,
-				1A6B1D25134E8DA10016A47D /* TestAttributes.java */,
-				1A6B1D26134E8DA10016A47D /* TestAutoAST.java */,
-				1A6B1D27134E8DA10016A47D /* TestBufferedTreeNodeStream.java */,
-				1A6B1D28134E8DA10016A47D /* TestCharDFAConversion.java */,
-				1A6B1D29134E8DA10016A47D /* TestCommonTokenStream.java */,
-				1A6B1D2A134E8DA10016A47D /* TestCompositeGrammars.java */,
-				1A6B1D2B134E8DA10016A47D /* TestDFAConversion.java */,
-				1A6B1D2C134E8DA10016A47D /* TestDFAMatching.java */,
-				1A6B1D2D134E8DA10016A47D /* TestFastQueue.java */,
-				1A6B1D2E134E8DA10016A47D /* TestHeteroAST.java */,
-				1A6B1D2F134E8DA10016A47D /* TestInterpretedLexing.java */,
-				1A6B1D30134E8DA10016A47D /* TestInterpretedParsing.java */,
-				1A6B1D31134E8DA10016A47D /* TestIntervalSet.java */,
-				1A6B1D32134E8DA10016A47D /* TestJavaCodeGeneration.java */,
-				1A6B1D33134E8DA10016A47D /* TestLeftRecursion.java */,
-				1A6B1D34134E8DA10016A47D /* TestLexer.java */,
-				1A6B1D35134E8DA10016A47D /* TestMessages.java */,
-				1A6B1D36134E8DA10016A47D /* TestNFAConstruction.java */,
-				1A6B1D37134E8DA10016A47D /* TestRewriteAST.java */,
-				1A6B1D38134E8DA10016A47D /* TestRewriteTemplates.java */,
-				1A6B1D39134E8DA10016A47D /* TestSemanticPredicateEvaluation.java */,
-				1A6B1D3A134E8DA10016A47D /* TestSemanticPredicates.java */,
-				1A6B1D3B134E8DA10016A47D /* TestSets.java */,
-				1A6B1D3C134E8DA10016A47D /* TestSymbolDefinitions.java */,
-				1A6B1D3D134E8DA10016A47D /* TestSyntacticPredicateEvaluation.java */,
-				1A6B1D3E134E8DA10016A47D /* TestSyntaxErrors.java */,
-				1A6B1D3F134E8DA10016A47D /* TestTemplates.java */,
-				1A6B1D40134E8DA10016A47D /* TestTokenRewriteStream.java */,
-				1A6B1D41134E8DA10016A47D /* TestTopologicalSort.java */,
-				1A6B1D42134E8DA10016A47D /* TestTreeGrammarRewriteAST.java */,
-				1A6B1D43134E8DA10016A47D /* TestTreeIterator.java */,
-				1A6B1D44134E8DA10016A47D /* TestTreeNodeStream.java */,
-				1A6B1D45134E8DA10016A47D /* TestTreeParsing.java */,
-				1A6B1D46134E8DA10016A47D /* TestTrees.java */,
-				1A6B1D47134E8DA10016A47D /* TestTreeWizard.java */,
-			);
-			path = java;
-			sourceTree = "<group>";
-		};
-		1A6B1D48134E8DA10016A47D /* runtime */ = {
-			isa = PBXGroup;
-			children = (
-				1A6B1D49134E8DA10016A47D /* misc */,
-				1A6B1D4E134E8DA10016A47D /* recognizer */,
-				1A6B1D51134E8DA10016A47D /* sets */,
-				1A6B1D54134E8DA10016A47D /* stream */,
-				1A6B1D57134E8DA10016A47D /* TestRewriteRuleTokenStream */,
-				1A6B1D5A134E8DA10016A47D /* token */,
-				1A6B1D5D134E8DA10016A47D /* tree */,
-			);
-			path = runtime;
-			sourceTree = "<group>";
-		};
-		1A6B1D49134E8DA10016A47D /* misc */ = {
-			isa = PBXGroup;
-			children = (
-				1A6B1D4A134E8DA10016A47D /* ANTLRFastQueueTest.h */,
-				1A6B1D4B134E8DA10016A47D /* ANTLRFastQueueTest.m */,
-				1A6B1D4C134E8DA10016A47D /* ANTLRIntArrayTest.h */,
-				1A6B1D4D134E8DA10016A47D /* ANTLRIntArrayTest.m */,
-			);
-			path = misc;
-			sourceTree = "<group>";
-		};
-		1A6B1D4E134E8DA10016A47D /* recognizer */ = {
-			isa = PBXGroup;
-			children = (
-				1A6B1D4F134E8DA10016A47D /* ANTLRRecognizerTest.h */,
-				1A6B1D50134E8DA10016A47D /* ANTLRRecognizerTest.m */,
-			);
-			path = recognizer;
-			sourceTree = "<group>";
-		};
-		1A6B1D51134E8DA10016A47D /* sets */ = {
-			isa = PBXGroup;
-			children = (
-				1A6B1D52134E8DA10016A47D /* ANTLRBitSetTest.h */,
-				1A6B1D53134E8DA10016A47D /* ANTLRBitSetTest.m */,
-			);
-			path = sets;
-			sourceTree = "<group>";
-		};
-		1A6B1D54134E8DA10016A47D /* stream */ = {
-			isa = PBXGroup;
-			children = (
-				1A6B1D55134E8DA10016A47D /* ANTLRStringStreamTest.h */,
-				1A6B1D56134E8DA10016A47D /* ANTLRStringStreamTest.m */,
-			);
-			path = stream;
-			sourceTree = "<group>";
-		};
-		1A6B1D57134E8DA10016A47D /* TestRewriteRuleTokenStream */ = {
-			isa = PBXGroup;
-			children = (
-				1A6B1D58134E8DA10016A47D /* TestRewriteRuleTokenStream.h */,
-				1A6B1D59134E8DA10016A47D /* TestRewriteRuleTokenStream.m */,
-			);
-			path = TestRewriteRuleTokenStream;
-			sourceTree = "<group>";
-		};
-		1A6B1D5A134E8DA10016A47D /* token */ = {
-			isa = PBXGroup;
-			children = (
-				1A6B1D5B134E8DA10016A47D /* ANTLRCommonTokenTest.h */,
-				1A6B1D5C134E8DA10016A47D /* ANTLRCommonTokenTest.m */,
-			);
-			path = token;
-			sourceTree = "<group>";
-		};
-		1A6B1D5D134E8DA10016A47D /* tree */ = {
-			isa = PBXGroup;
-			children = (
-				1A6B1D5E134E8DA10016A47D /* ANTLRCommonErrorNodeTest.h */,
-				1A6B1D5F134E8DA10016A47D /* ANTLRCommonErrorNodeTest.m */,
-				1A6B1D60134E8DA10016A47D /* ANTLRCommonTreeAdaptorTest.h */,
-				1A6B1D61134E8DA10016A47D /* ANTLRCommonTreeAdaptorTest.m */,
-				1A6B1D62134E8DA10016A47D /* ANTLRCommonTreeTest.h */,
-				1A6B1D63134E8DA10016A47D /* ANTLRCommonTreeTest.m */,
-			);
-			path = tree;
-			sourceTree = "<group>";
-		};
-		1AE7230C134E860A001C3F35 = {
-			isa = PBXGroup;
-			children = (
-				1AE72318134E860B001C3F35 /* ANTLR.framework */,
-				1A048CFF134E8B9100005F57 /* Classes */,
-				1A6B1D1D134E8DA10016A47D /* ANTLRTests */,
-				1A63BC77134F5EB1002EDFB4 /* examples */,
-				1AE72321134E860B001C3F35 /* ANTLR */,
-				1AE72332134E860B001C3F35 /* ANTLRTests */,
-				1AE7231A134E860B001C3F35 /* Frameworks */,
-				1AE72319134E860B001C3F35 /* Products */,
-			);
-			sourceTree = "<group>";
-		};
-		1AE72319134E860B001C3F35 /* Products */ = {
-			isa = PBXGroup;
-			children = (
-				1AE7232D134E860B001C3F35 /* ANTLRTests.octest */,
-				1A63BC61134F5DAB002EDFB4 /* Fuzzy */,
-				1A63BD32134F5F1E002EDFB4 /* combined */,
-				1A63BD3F134F5F36002EDFB4 /* lexertest-simple */,
-				1A63BD4C134F5F43002EDFB4 /* LL-start */,
-				1A63BD59134F5F4D002EDFB4 /* polydiff */,
-				1A63BD66134F5F5E002EDFB4 /* simplecTreeParser */,
-				1A63BD73134F5F67002EDFB4 /* treeparser */,
-				1A63BD80134F5F71002EDFB4 /* treerewrite */,
-				1A63BDD4134F6233002EDFB4 /* scopes */,
-				1A63BDFC134FB75E002EDFB4 /* hoistedPredicates */,
-			);
-			name = Products;
-			sourceTree = "<group>";
-		};
-		1AE7231A134E860B001C3F35 /* Frameworks */ = {
-			isa = PBXGroup;
-			children = (
-				1AE7231E134E860B001C3F35 /* AppKit.framework */,
-				1AE7231B134E860B001C3F35 /* Cocoa.framework */,
-				1AE7231F134E860B001C3F35 /* CoreData.framework */,
-				1AE72345134E89BF001C3F35 /* CoreFoundation.framework */,
-				1AE72346134E89BF001C3F35 /* Foundation.framework */,
-				1A6B1D78134EA0970016A47D /* SenTestingKit.framework */,
-				1AE7231D134E860B001C3F35 /* Other Frameworks */,
-			);
-			name = Frameworks;
-			sourceTree = "<group>";
-		};
-		1AE7231D134E860B001C3F35 /* Other Frameworks */ = {
-			isa = PBXGroup;
-			children = (
-			);
-			name = "Other Frameworks";
-			sourceTree = "<group>";
-		};
-		1AE72321134E860B001C3F35 /* ANTLR */ = {
-			isa = PBXGroup;
-			children = (
-				1AE72322134E860B001C3F35 /* Supporting Files */,
-			);
-			path = ANTLR;
-			sourceTree = "<group>";
-		};
-		1AE72322134E860B001C3F35 /* Supporting Files */ = {
-			isa = PBXGroup;
-			children = (
-				1AE72323134E860B001C3F35 /* ANTLR-Info.plist */,
-				1AE72324134E860B001C3F35 /* InfoPlist.strings */,
-				1AE72327134E860B001C3F35 /* ANTLR-Prefix.pch */,
-			);
-			name = "Supporting Files";
-			sourceTree = "<group>";
-		};
-		1AE72332134E860B001C3F35 /* ANTLRTests */ = {
-			isa = PBXGroup;
-			children = (
-				1AE72339134E860B001C3F35 /* ANTLRTests.h */,
-				1AE7233B134E860B001C3F35 /* ANTLRTests.m */,
-				1AE72333134E860B001C3F35 /* Supporting Files */,
-			);
-			path = ANTLRTests;
-			sourceTree = "<group>";
-		};
-		1AE72333134E860B001C3F35 /* Supporting Files */ = {
-			isa = PBXGroup;
-			children = (
-				1AE72334134E860B001C3F35 /* ANTLRTests-Info.plist */,
-				1AE72335134E860B001C3F35 /* InfoPlist.strings */,
-				1AE72338134E860B001C3F35 /* ANTLRTests-Prefix.pch */,
-			);
-			name = "Supporting Files";
-			sourceTree = "<group>";
-		};
-/* End PBXGroup section */
-
-/* Begin PBXHeadersBuildPhase section */
-		1AE72315134E860B001C3F35 /* Headers */ = {
-			isa = PBXHeadersBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AAC1C3D134FD6A500B2DC68 /* ANTLR.h in Headers */,
-				1A048D21134E8C1100005F57 /* antlr3.h in Headers */,
-				1A048D22134E8C1100005F57 /* ANTLRBaseMapElement.h in Headers */,
-				1A048D23134E8C1100005F57 /* ANTLRBaseRecognizer.h in Headers */,
-				1A048D24134E8C1100005F57 /* ANTLRBaseStack.h in Headers */,
-				1A048D25134E8C1100005F57 /* ANTLRBaseTree.h in Headers */,
-				1A048D26134E8C1100005F57 /* ANTLRBaseTreeAdaptor.h in Headers */,
-				1A048D27134E8C1100005F57 /* ANTLRBitSet.h in Headers */,
-				1A048D28134E8C1100005F57 /* ANTLRBufferedTokenStream.h in Headers */,
-				1A048D29134E8C1100005F57 /* ANTLRBufferedTreeNodeStream.h in Headers */,
-				1A048D2A134E8C1100005F57 /* ANTLRCharStream.h in Headers */,
-				1A048D2B134E8C1100005F57 /* ANTLRCharStreamState.h in Headers */,
-				1A048D2C134E8C1100005F57 /* ANTLRCommonErrorNode.h in Headers */,
-				1A048D2D134E8C1100005F57 /* ANTLRCommonToken.h in Headers */,
-				1A048D2E134E8C1100005F57 /* ANTLRCommonTokenStream.h in Headers */,
-				1A048D2F134E8C1100005F57 /* ANTLRCommonTree.h in Headers */,
-				1A048D30134E8C1100005F57 /* ANTLRCommonTreeAdaptor.h in Headers */,
-				1A048D31134E8C1100005F57 /* ANTLRCommonTreeNodeStream.h in Headers */,
-				1A048D33134E8C1100005F57 /* ANTLRDebug.h in Headers */,
-				1A048D34134E8C1100005F57 /* ANTLRDebugEventListener.h in Headers */,
-				1A048D35134E8C1100005F57 /* ANTLRDebugEventProxy.h in Headers */,
-				1A048D36134E8C1100005F57 /* ANTLRDebugParser.h in Headers */,
-				1A048D37134E8C1100005F57 /* ANTLRDebugTokenStream.h in Headers */,
-				1A048D38134E8C1100005F57 /* ANTLRDebugTreeAdaptor.h in Headers */,
-				1A048D39134E8C1100005F57 /* ANTLRDebugTreeNodeStream.h in Headers */,
-				1A048D3A134E8C1100005F57 /* ANTLRDebugTreeParser.h in Headers */,
-				1A048D3B134E8C1100005F57 /* ANTLRDFA.h in Headers */,
-				1A048D3C134E8C1100005F57 /* ANTLRDoubleKeyMap.h in Headers */,
-				1A048D3D134E8C1100005F57 /* ANTLREarlyExitException.h in Headers */,
-				1A048D3E134E8C1100005F57 /* ANTLRError.h in Headers */,
-				1A048D3F134E8C1100005F57 /* ANTLRFailedPredicateException.h in Headers */,
-				1A048D40134E8C1100005F57 /* ANTLRFastQueue.h in Headers */,
-				1A6B1CD7134E8CF70016A47D /* ANTLRFileStream.h in Headers */,
-				1A6B1CD8134E8CF70016A47D /* ANTLRHashMap.h in Headers */,
-				1A6B1CD9134E8CF70016A47D /* ANTLRHashRule.h in Headers */,
-				1A6B1CDA134E8CF70016A47D /* ANTLRInputStream.h in Headers */,
-				1A6B1CDB134E8CF70016A47D /* ANTLRIntArray.h in Headers */,
-				1A63BDEE134F932E002EDFB4 /* ANTLRIntStream.h in Headers */,
-				1A6B1CDC134E8CF70016A47D /* ANTLRLexer.h in Headers */,
-				1A6B1CDD134E8CF70016A47D /* ANTLRLexerRuleReturnScope.h in Headers */,
-				1A6B1CDE134E8CF70016A47D /* ANTLRLexerState.h in Headers */,
-				1A6B1CDF134E8CF70016A47D /* ANTLRLinkBase.h in Headers */,
-				1A6B1CE0134E8CF70016A47D /* ANTLRLookaheadStream.h in Headers */,
-				1A6B1CE1134E8CF70016A47D /* ANTLRMap.h in Headers */,
-				1A6B1CE2134E8CF70016A47D /* ANTLRMapElement.h in Headers */,
-				1A6B1CE3134E8CF70016A47D /* ANTLRMismatchedNotSetException.h in Headers */,
-				1A6B1CE4134E8CF70016A47D /* ANTLRMismatchedRangeException.h in Headers */,
-				1A6B1CE5134E8CF70016A47D /* ANTLRMismatchedSetException.h in Headers */,
-				1A6B1CE6134E8CF70016A47D /* ANTLRMismatchedTokenException.h in Headers */,
-				1A6B1CE7134E8CF70016A47D /* ANTLRMismatchedTreeNodeException.h in Headers */,
-				1A6B1CE8134E8CF70016A47D /* ANTLRMissingTokenException.h in Headers */,
-				1A6B1CE9134E8CF70016A47D /* ANTLRNodeMapElement.h in Headers */,
-				1A6B1CEA134E8CF70016A47D /* ANTLRNoViableAltException.h in Headers */,
-				1A6B1CEB134E8CF70016A47D /* ANTLRParser.h in Headers */,
-				1A6B1CEC134E8CF70016A47D /* ANTLRParserRuleReturnScope.h in Headers */,
-				1A6B1CED134E8CF80016A47D /* ANTLRParseTree.h in Headers */,
-				1A6B1CEE134E8CF80016A47D /* ANTLRPtrBuffer.h in Headers */,
-				1A6B1CEF134E8CF80016A47D /* ANTLRPtrStack.h in Headers */,
-				1A6B1CF0134E8CF80016A47D /* ANTLRReaderStream.h in Headers */,
-				1A6B1CF1134E8CF80016A47D /* ANTLRRecognitionException.h in Headers */,
-				1A6B1CF2134E8CF80016A47D /* ANTLRRecognizerSharedState.h in Headers */,
-				1A6B1CF3134E8CF80016A47D /* ANTLRRewriteRuleElementStream.h in Headers */,
-				1A6B1CF4134E8CF80016A47D /* ANTLRRewriteRuleNodeStream.h in Headers */,
-				1A6B1CF5134E8CF80016A47D /* ANTLRRewriteRuleSubtreeStream.h in Headers */,
-				1A6B1CF6134E8CF80016A47D /* ANTLRRewriteRuleTokenStream.h in Headers */,
-				1A6B1CF7134E8CF80016A47D /* ANTLRRuleMapElement.h in Headers */,
-				1A6B1CF8134E8CF80016A47D /* ANTLRRuleMemo.h in Headers */,
-				1A6B1CF9134E8CF80016A47D /* ANTLRRuleReturnScope.h in Headers */,
-				1A6B1CFA134E8CF80016A47D /* ANTLRRuleStack.h in Headers */,
-				1A6B1CFB134E8CF80016A47D /* ANTLRRuntimeException.h in Headers */,
-				1A6B1CFC134E8CF80016A47D /* ANTLRStreamEnumerator.h in Headers */,
-				1A6B1CFD134E8CF80016A47D /* ANTLRStringStream.h in Headers */,
-				1A6B1CFE134E8CF80016A47D /* ANTLRStringStreamState.h in Headers */,
-				1A6B1CFF134E8CF80016A47D /* ANTLRSymbolStack.h in Headers */,
-				1A6B1D00134E8CF80016A47D /* ANTLRToken.h in Headers */,
-				1A6B1D01134E8CF80016A47D /* ANTLRToken+DebuggerSupport.h in Headers */,
-				1A6B1D02134E8CF80016A47D /* ANTLRTokenRewriteStream.h in Headers */,
-				1A6B1D03134E8CF80016A47D /* ANTLRTokenSource.h in Headers */,
-				1A6B1D04134E8CF80016A47D /* ANTLRTokenStream.h in Headers */,
-				1A6B1D05134E8CF80016A47D /* ANTLRTree.h in Headers */,
-				1A6B1D06134E8CF80016A47D /* ANTLRTreeAdaptor.h in Headers */,
-				1A6B1D07134E8CF80016A47D /* ANTLRTreeException.h in Headers */,
-				1A6B1D08134E8CF90016A47D /* ANTLRTreeIterator.h in Headers */,
-				1A6B1D09134E8CF90016A47D /* ANTLRTreeNodeStream.h in Headers */,
-				1A6B1D0A134E8CF90016A47D /* ANTLRTreeParser.h in Headers */,
-				1A6B1D0B134E8CF90016A47D /* ANTLRTreePatternLexer.h in Headers */,
-				1A6B1D0C134E8CF90016A47D /* ANTLRTreePatternParser.h in Headers */,
-				1A6B1D0D134E8CF90016A47D /* ANTLRTreeRewriter.h in Headers */,
-				1A6B1D0E134E8CF90016A47D /* ANTLRTreeRuleReturnScope.h in Headers */,
-				1A6B1D0F134E8CF90016A47D /* ANTLRTreeVisitor.h in Headers */,
-				1A6B1D10134E8CF90016A47D /* ANTLRTreeVisitorAction.h in Headers */,
-				1A6B1D11134E8CF90016A47D /* ANTLRTreeWizard.h in Headers */,
-				1A6B1D12134E8CF90016A47D /* ANTLRUnbufferedCommonTreeNodeStream.h in Headers */,
-				1A6B1D13134E8CF90016A47D /* ANTLRUnbufferedCommonTreeNodeStreamState.h in Headers */,
-				1A6B1D14134E8CF90016A47D /* ANTLRUnbufferedTokenStream.h in Headers */,
-				1A6B1D15134E8CF90016A47D /* ANTLRUniqueIDMap.h in Headers */,
-				1A6B1D16134E8CF90016A47D /* ANTLRUnwantedTokenException.h in Headers */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-/* End PBXHeadersBuildPhase section */
-
-/* Begin PBXNativeTarget section */
-		1A63BC60134F5DAB002EDFB4 /* Fuzzy */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A63BC67134F5DAC002EDFB4 /* Build configuration list for PBXNativeTarget "Fuzzy" */;
-			buildPhases = (
-				1A63BC5D134F5DAB002EDFB4 /* Sources */,
-				1A63BC5E134F5DAB002EDFB4 /* Frameworks */,
-				1A63BC5F134F5DAB002EDFB4 /* CopyFiles */,
-			);
-			buildRules = (
-				1A63BDED134F6810002EDFB4 /* PBXBuildRule */,
-			);
-			dependencies = (
-				1A63BDA3134F60A7002EDFB4 /* PBXTargetDependency */,
-			);
-			name = Fuzzy;
-			productName = Fuzzy;
-			productReference = 1A63BC61134F5DAB002EDFB4 /* Fuzzy */;
-			productType = "com.apple.product-type.tool";
-		};
-		1A63BD31134F5F1E002EDFB4 /* combined */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A63BD38134F5F1E002EDFB4 /* Build configuration list for PBXNativeTarget "combined" */;
-			buildPhases = (
-				1A63BD2E134F5F1E002EDFB4 /* Sources */,
-				1A63BD2F134F5F1E002EDFB4 /* Frameworks */,
-				1A63BD30134F5F1E002EDFB4 /* CopyFiles */,
-			);
-			buildRules = (
-				1A63BDEC134F649F002EDFB4 /* PBXBuildRule */,
-			);
-			dependencies = (
-				1A63BDA1134F609B002EDFB4 /* PBXTargetDependency */,
-			);
-			name = combined;
-			productName = combined;
-			productReference = 1A63BD32134F5F1E002EDFB4 /* combined */;
-			productType = "com.apple.product-type.tool";
-		};
-		1A63BD3E134F5F36002EDFB4 /* lexertest-simple */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A63BD45134F5F36002EDFB4 /* Build configuration list for PBXNativeTarget "lexertest-simple" */;
-			buildPhases = (
-				1A63BD3B134F5F36002EDFB4 /* Sources */,
-				1A63BD3C134F5F36002EDFB4 /* Frameworks */,
-				1A63BD3D134F5F36002EDFB4 /* CopyFiles */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				1A63BD9F134F6093002EDFB4 /* PBXTargetDependency */,
-			);
-			name = "lexertest-simple";
-			productName = "lexertest-simple";
-			productReference = 1A63BD3F134F5F36002EDFB4 /* lexertest-simple */;
-			productType = "com.apple.product-type.tool";
-		};
-		1A63BD4B134F5F43002EDFB4 /* LL-start */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A63BD52134F5F43002EDFB4 /* Build configuration list for PBXNativeTarget "LL-start" */;
-			buildPhases = (
-				1A63BD48134F5F43002EDFB4 /* Sources */,
-				1A63BD49134F5F43002EDFB4 /* Frameworks */,
-				1A63BD4A134F5F43002EDFB4 /* CopyFiles */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				1A63BDA5134F60B0002EDFB4 /* PBXTargetDependency */,
-			);
-			name = "LL-start";
-			productName = "LL-start";
-			productReference = 1A63BD4C134F5F43002EDFB4 /* LL-start */;
-			productType = "com.apple.product-type.tool";
-		};
-		1A63BD58134F5F4D002EDFB4 /* polydiff */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A63BD5F134F5F4D002EDFB4 /* Build configuration list for PBXNativeTarget "polydiff" */;
-			buildPhases = (
-				1A63BD55134F5F4D002EDFB4 /* Sources */,
-				1A63BD56134F5F4D002EDFB4 /* Frameworks */,
-				1A63BD57134F5F4D002EDFB4 /* CopyFiles */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				1A63BDA7134F60BC002EDFB4 /* PBXTargetDependency */,
-			);
-			name = polydiff;
-			productName = polydiff;
-			productReference = 1A63BD59134F5F4D002EDFB4 /* polydiff */;
-			productType = "com.apple.product-type.tool";
-		};
-		1A63BD65134F5F5E002EDFB4 /* simplecTreeParser */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A63BD6C134F5F5E002EDFB4 /* Build configuration list for PBXNativeTarget "simplecTreeParser" */;
-			buildPhases = (
-				1A63BD62134F5F5E002EDFB4 /* Sources */,
-				1A63BD63134F5F5E002EDFB4 /* Frameworks */,
-				1A63BD64134F5F5E002EDFB4 /* CopyFiles */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				1A63BDA9134F60C3002EDFB4 /* PBXTargetDependency */,
-			);
-			name = simplecTreeParser;
-			productName = simplecTreeParser;
-			productReference = 1A63BD66134F5F5E002EDFB4 /* simplecTreeParser */;
-			productType = "com.apple.product-type.tool";
-		};
-		1A63BD72134F5F67002EDFB4 /* treeparser */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A63BD79134F5F68002EDFB4 /* Build configuration list for PBXNativeTarget "treeparser" */;
-			buildPhases = (
-				1A63BD6F134F5F67002EDFB4 /* Sources */,
-				1A63BD70134F5F67002EDFB4 /* Frameworks */,
-				1A63BD71134F5F67002EDFB4 /* CopyFiles */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				1A63BDAB134F60CC002EDFB4 /* PBXTargetDependency */,
-			);
-			name = treeparser;
-			productName = treeparser;
-			productReference = 1A63BD73134F5F67002EDFB4 /* treeparser */;
-			productType = "com.apple.product-type.tool";
-		};
-		1A63BD7F134F5F71002EDFB4 /* treerewrite */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A63BD86134F5F72002EDFB4 /* Build configuration list for PBXNativeTarget "treerewrite" */;
-			buildPhases = (
-				1A63BD7C134F5F71002EDFB4 /* Sources */,
-				1A63BD7D134F5F71002EDFB4 /* Frameworks */,
-				1A63BD7E134F5F71002EDFB4 /* Copy Files */,
-			);
-			buildRules = (
-				1A76A02C134FC7540041634F /* PBXBuildRule */,
-			);
-			dependencies = (
-				1A63BDAD134F60D2002EDFB4 /* PBXTargetDependency */,
-			);
-			name = treerewrite;
-			productName = treerewrite;
-			productReference = 1A63BD80134F5F71002EDFB4 /* treerewrite */;
-			productType = "com.apple.product-type.tool";
-		};
-		1A63BDD3134F6233002EDFB4 /* scopes */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A63BDDA134F6234002EDFB4 /* Build configuration list for PBXNativeTarget "scopes" */;
-			buildPhases = (
-				1A63BDD0134F6233002EDFB4 /* Sources */,
-				1A63BDD1134F6233002EDFB4 /* Frameworks */,
-				1A63BDD2134F6233002EDFB4 /* CopyFiles */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				1A63BDE6134F629B002EDFB4 /* PBXTargetDependency */,
-			);
-			name = scopes;
-			productName = scopes;
-			productReference = 1A63BDD4134F6233002EDFB4 /* scopes */;
-			productType = "com.apple.product-type.tool";
-		};
-		1A63BDFB134FB75E002EDFB4 /* hoistedPredicates */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1A63BE02134FB75F002EDFB4 /* Build configuration list for PBXNativeTarget "hoistedPredicates" */;
-			buildPhases = (
-				1A63BDF8134FB75E002EDFB4 /* Sources */,
-				1A63BDF9134FB75E002EDFB4 /* Frameworks */,
-				1A63BDFA134FB75E002EDFB4 /* CopyFiles */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				1A63BE0B134FB824002EDFB4 /* PBXTargetDependency */,
-			);
-			name = hoistedPredicates;
-			productName = hoistedPredicates;
-			productReference = 1A63BDFC134FB75E002EDFB4 /* hoistedPredicates */;
-			productType = "com.apple.product-type.tool";
-		};
-		1AE72317134E860B001C3F35 /* ANTLR */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1AE7233F134E860B001C3F35 /* Build configuration list for PBXNativeTarget "ANTLR" */;
-			buildPhases = (
-				1AE72313134E860B001C3F35 /* Sources */,
-				1AE72314134E860B001C3F35 /* Frameworks */,
-				1AE72315134E860B001C3F35 /* Headers */,
-				1AE72316134E860B001C3F35 /* Resources */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-			);
-			name = ANTLR;
-			productName = ANTLR;
-			productReference = 1AE72318134E860B001C3F35 /* ANTLR.framework */;
-			productType = "com.apple.product-type.framework";
-		};
-		1AE7232C134E860B001C3F35 /* ANTLRTests */ = {
-			isa = PBXNativeTarget;
-			buildConfigurationList = 1AE72342134E860B001C3F35 /* Build configuration list for PBXNativeTarget "ANTLRTests" */;
-			buildPhases = (
-				1AE72328134E860B001C3F35 /* Sources */,
-				1AE72329134E860B001C3F35 /* Frameworks */,
-				1AE7232A134E860B001C3F35 /* Resources */,
-				1AE7232B134E860B001C3F35 /* ShellScript */,
-			);
-			buildRules = (
-			);
-			dependencies = (
-				1AE72330134E860B001C3F35 /* PBXTargetDependency */,
-			);
-			name = ANTLRTests;
-			productName = ANTLRTests;
-			productReference = 1AE7232D134E860B001C3F35 /* ANTLRTests.octest */;
-			productType = "com.apple.product-type.bundle";
-		};
-/* End PBXNativeTarget section */
-
-/* Begin PBXProject section */
-		1AE7230E134E860A001C3F35 /* Project object */ = {
-			isa = PBXProject;
-			attributes = {
-				ORGANIZATIONNAME = "Alan's MachineWorks";
-			};
-			buildConfigurationList = 1AE72311134E860A001C3F35 /* Build configuration list for PBXProject "ANTLR" */;
-			compatibilityVersion = "Xcode 3.2";
-			developmentRegion = English;
-			hasScannedForEncodings = 0;
-			knownRegions = (
-				en,
-			);
-			mainGroup = 1AE7230C134E860A001C3F35;
-			productRefGroup = 1AE72319134E860B001C3F35 /* Products */;
-			projectDirPath = "";
-			projectRoot = "";
-			targets = (
-				1AE72317134E860B001C3F35 /* ANTLR */,
-				1AE7232C134E860B001C3F35 /* ANTLRTests */,
-				1A63BD31134F5F1E002EDFB4 /* combined */,
-				1A63BC60134F5DAB002EDFB4 /* Fuzzy */,
-				1A63BDFB134FB75E002EDFB4 /* hoistedPredicates */,
-				1A63BD3E134F5F36002EDFB4 /* lexertest-simple */,
-				1A63BD4B134F5F43002EDFB4 /* LL-start */,
-				1A63BD58134F5F4D002EDFB4 /* polydiff */,
-				1A63BDD3134F6233002EDFB4 /* scopes */,
-				1A63BD65134F5F5E002EDFB4 /* simplecTreeParser */,
-				1A63BD72134F5F67002EDFB4 /* treeparser */,
-				1A63BD7F134F5F71002EDFB4 /* treerewrite */,
-			);
-		};
-/* End PBXProject section */
-
-/* Begin PBXResourcesBuildPhase section */
-		1AE72316134E860B001C3F35 /* Resources */ = {
-			isa = PBXResourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AE72326134E860B001C3F35 /* InfoPlist.strings in Resources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1AE7232A134E860B001C3F35 /* Resources */ = {
-			isa = PBXResourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AE72337134E860B001C3F35 /* InfoPlist.strings in Resources */,
-				1AE7233A134E860B001C3F35 /* ANTLRTests.h in Resources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-/* End PBXResourcesBuildPhase section */
-
-/* Begin PBXShellScriptBuildPhase section */
-		1AE7232B134E860B001C3F35 /* ShellScript */ = {
-			isa = PBXShellScriptBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-			);
-			inputPaths = (
-			);
-			outputPaths = (
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-			shellPath = /bin/sh;
-			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
-		};
-/* End PBXShellScriptBuildPhase section */
-
-/* Begin PBXSourcesBuildPhase section */
-		1A63BC5D134F5DAB002EDFB4 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BC6F134F5DE5002EDFB4 /* main.m in Sources */,
-				1A63BC6E134F5DE5002EDFB4 /* FuzzyLexer.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD2E134F5F1E002EDFB4 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BD8B134F5FF3002EDFB4 /* main.m in Sources */,
-				1A63BD89134F5FF3002EDFB4 /* CombinedLexer.m in Sources */,
-				1A63BD8A134F5FF3002EDFB4 /* CombinedParser.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD3B134F5F36002EDFB4 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BD94134F606A002EDFB4 /* main.m in Sources */,
-				1A63BD95134F606A002EDFB4 /* TestLexer.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD48134F5F43002EDFB4 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BDC7134F61E4002EDFB4 /* main.m in Sources */,
-				1A63BDC8134F61E8002EDFB4 /* SimpleCLexer.m in Sources */,
-				1A63BDC9134F61EC002EDFB4 /* SimpleCParser.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD55134F5F4D002EDFB4 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BDCA134F6218002EDFB4 /* Main.m in Sources */,
-				1A63BDCB134F6218002EDFB4 /* PolyDifferentiator.m in Sources */,
-				1A63BDCC134F6218002EDFB4 /* PolyLexer.m in Sources */,
-				1A63BDCD134F6218002EDFB4 /* PolyParser.m in Sources */,
-				1A63BDCE134F6218002EDFB4 /* PolyPrinter.m in Sources */,
-				1A63BDCF134F6218002EDFB4 /* Simplifier.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD62134F5F5E002EDFB4 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BDE8134F62D0002EDFB4 /* main.m in Sources */,
-				1A63BDEA134F62D0002EDFB4 /* SimpleCTP.m in Sources */,
-				1A63BDEB134F62D0002EDFB4 /* SimpleCWalker.m in Sources */,
-				1A63BDE7134F62CB002EDFB4 /* SimpleCLexer.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD6F134F5F67002EDFB4 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A76A02F134FD4160041634F /* main.m in Sources */,
-				1A76A032134FD4B90041634F /* LangDumpDecl.m in Sources */,
-				1A76A030134FD4A00041634F /* LangLexer.m in Sources */,
-				1A76A031134FD4A40041634F /* LangParser.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BD7C134F5F71002EDFB4 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BDF6134FB55F002EDFB4 /* main.m in Sources */,
-				1A63BDF5134FB55B002EDFB4 /* TreeRewriteLexer.m in Sources */,
-				1A63BDF7134FB564002EDFB4 /* TreeRewriteParser.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BDD0134F6233002EDFB4 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BDDD134F6258002EDFB4 /* main.m in Sources */,
-				1A63BDDE134F6258002EDFB4 /* SymbolTableLexer.m in Sources */,
-				1A63BDDF134F6258002EDFB4 /* SymbolTableParser.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1A63BDF8134FB75E002EDFB4 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1A63BE05134FB807002EDFB4 /* main.m in Sources */,
-				1A63BE06134FB80B002EDFB4 /* TLexer.m in Sources */,
-				1A63BE07134FB80E002EDFB4 /* TParser.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1AE72313134E860B001C3F35 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AE7239F134E8AB4001C3F35 /* ANTLRBaseMapElement.m in Sources */,
-				1AE723A0134E8AB4001C3F35 /* ANTLRBaseRecognizer.m in Sources */,
-				1AE723A1134E8AB4001C3F35 /* ANTLRBaseStack.m in Sources */,
-				1AE723A2134E8AB4001C3F35 /* ANTLRBaseTree.m in Sources */,
-				1AE723A3134E8AB4001C3F35 /* ANTLRBaseTreeAdaptor.m in Sources */,
-				1AE723A4134E8AB4001C3F35 /* ANTLRBitSet.m in Sources */,
-				1AE723A5134E8AB4001C3F35 /* ANTLRBufferedTokenStream.m in Sources */,
-				1AE723A6134E8AB4001C3F35 /* ANTLRBufferedTreeNodeStream.m in Sources */,
-				1AE723A7134E8AB4001C3F35 /* ANTLRCharStreamState.m in Sources */,
-				1AE723A8134E8AB4001C3F35 /* ANTLRCommonErrorNode.m in Sources */,
-				1AE723A9134E8AB4001C3F35 /* ANTLRCommonToken.m in Sources */,
-				1AE723AA134E8AB4001C3F35 /* ANTLRCommonTokenStream.m in Sources */,
-				1AE723AB134E8AB4001C3F35 /* ANTLRCommonTree.m in Sources */,
-				1AE723AC134E8AB4001C3F35 /* ANTLRCommonTreeAdaptor.m in Sources */,
-				1AE723AD134E8AB4001C3F35 /* ANTLRCommonTreeNodeStream.m in Sources */,
-				1AE723AE134E8AB4001C3F35 /* ANTLRDebugEventProxy.m in Sources */,
-				1AE723AF134E8AB4001C3F35 /* ANTLRDebugParser.m in Sources */,
-				1AE723B0134E8AB4001C3F35 /* ANTLRDebugTokenStream.m in Sources */,
-				1AE723B1134E8AB4001C3F35 /* ANTLRDebugTreeAdaptor.m in Sources */,
-				1AE723B2134E8AB4001C3F35 /* ANTLRDebugTreeNodeStream.m in Sources */,
-				1AE723B3134E8AB4001C3F35 /* ANTLRDebugTreeParser.m in Sources */,
-				1AE723B4134E8AB4001C3F35 /* ANTLRDFA.m in Sources */,
-				1AE723B5134E8AB4001C3F35 /* ANTLRDoubleKeyMap.m in Sources */,
-				1AE723B6134E8AB4001C3F35 /* ANTLREarlyExitException.m in Sources */,
-				1AE723B7134E8AB4001C3F35 /* ANTLRFailedPredicateException.m in Sources */,
-				1AE723B8134E8AB4001C3F35 /* ANTLRFastQueue.m in Sources */,
-				1AE723B9134E8AB4001C3F35 /* ANTLRFileStream.m in Sources */,
-				1AE723BA134E8AB5001C3F35 /* ANTLRHashMap.m in Sources */,
-				1AE723BB134E8AB5001C3F35 /* ANTLRHashRule.m in Sources */,
-				1AE723BC134E8AB5001C3F35 /* ANTLRInputStream.m in Sources */,
-				1AE723BD134E8AB5001C3F35 /* ANTLRIntArray.m in Sources */,
-				1AE723BF134E8AB5001C3F35 /* ANTLRLexer.m in Sources */,
-				1AE723C0134E8AB5001C3F35 /* ANTLRLexerRuleReturnScope.m in Sources */,
-				1AE723C1134E8AB5001C3F35 /* ANTLRLexerState.m in Sources */,
-				1AE723C2134E8AB5001C3F35 /* ANTLRLinkBase.m in Sources */,
-				1AE723C3134E8AB5001C3F35 /* ANTLRLookaheadStream.m in Sources */,
-				1AE723C4134E8AB5001C3F35 /* ANTLRMap.m in Sources */,
-				1AE723C5134E8AB5001C3F35 /* ANTLRMapElement.m in Sources */,
-				1AE723C6134E8AB5001C3F35 /* ANTLRMismatchedNotSetException.m in Sources */,
-				1AE723C7134E8AB5001C3F35 /* ANTLRMismatchedRangeException.m in Sources */,
-				1AE723C8134E8AB5001C3F35 /* ANTLRMismatchedSetException.m in Sources */,
-				1AE723C9134E8AB5001C3F35 /* ANTLRMismatchedTokenException.m in Sources */,
-				1AE723CA134E8AB5001C3F35 /* ANTLRMismatchedTreeNodeException.m in Sources */,
-				1AE723CB134E8AB5001C3F35 /* ANTLRMissingTokenException.m in Sources */,
-				1AE723CC134E8AB5001C3F35 /* ANTLRNodeMapElement.m in Sources */,
-				1AE723CD134E8AB5001C3F35 /* ANTLRNoViableAltException.m in Sources */,
-				1AE723CE134E8AB5001C3F35 /* ANTLRParser.m in Sources */,
-				1AE723CF134E8AB5001C3F35 /* ANTLRParserRuleReturnScope.m in Sources */,
-				1AE723D0134E8AB5001C3F35 /* ANTLRParseTree.m in Sources */,
-				1AE723D1134E8AB5001C3F35 /* ANTLRPtrBuffer.m in Sources */,
-				1AE723D2134E8AB5001C3F35 /* ANTLRPtrStack.m in Sources */,
-				1AE723D3134E8AB5001C3F35 /* ANTLRReaderStream.m in Sources */,
-				1AE723D4134E8AB5001C3F35 /* ANTLRRecognitionException.m in Sources */,
-				1AE723D5134E8AB5001C3F35 /* ANTLRRecognizerSharedState.m in Sources */,
-				1AE723D6134E8AB5001C3F35 /* ANTLRRewriteRuleElementStream.m in Sources */,
-				1AE723D7134E8AB5001C3F35 /* ANTLRRewriteRuleNodeStream.m in Sources */,
-				1AE723D8134E8AB5001C3F35 /* ANTLRRewriteRuleSubtreeStream.m in Sources */,
-				1AE723D9134E8AB5001C3F35 /* ANTLRRewriteRuleTokenStream.m in Sources */,
-				1AE723DA134E8AB5001C3F35 /* ANTLRRuleMapElement.m in Sources */,
-				1AE723DB134E8AB5001C3F35 /* ANTLRRuleMemo.m in Sources */,
-				1AE723DC134E8AB5001C3F35 /* ANTLRRuleReturnScope.m in Sources */,
-				1AE723DD134E8AB5001C3F35 /* ANTLRRuleStack.m in Sources */,
-				1AE723DE134E8AB5001C3F35 /* ANTLRRuntimeException.m in Sources */,
-				1AE723DF134E8AB5001C3F35 /* ANTLRStreamEnumerator.m in Sources */,
-				1AE723E0134E8AB5001C3F35 /* ANTLRStringStream.m in Sources */,
-				1AE723E1134E8AB5001C3F35 /* ANTLRSymbolStack.m in Sources */,
-				1AE723E2134E8AB5001C3F35 /* ANTLRToken+DebuggerSupport.m in Sources */,
-				1AE723E3134E8AB6001C3F35 /* ANTLRTokenRewriteStream.m in Sources */,
-				1AE723E5134E8AB6001C3F35 /* ANTLRTreeAdaptor.m in Sources */,
-				1AE723E6134E8AB6001C3F35 /* ANTLRTreeException.m in Sources */,
-				1AE723E7134E8AB6001C3F35 /* ANTLRTreeIterator.m in Sources */,
-				1AE723E8134E8AB6001C3F35 /* ANTLRTreeParser.m in Sources */,
-				1AE723E9134E8AB6001C3F35 /* ANTLRTreePatternLexer.m in Sources */,
-				1AE723EA134E8AB6001C3F35 /* ANTLRTreePatternParser.m in Sources */,
-				1AE723EB134E8AB6001C3F35 /* ANTLRTreeRewriter.m in Sources */,
-				1AE723EC134E8AB6001C3F35 /* ANTLRTreeRuleReturnScope.m in Sources */,
-				1AE723ED134E8AB6001C3F35 /* ANTLRTreeVisitor.m in Sources */,
-				1AE723EE134E8AB6001C3F35 /* ANTLRTreeVisitorAction.m in Sources */,
-				1AE723EF134E8AB6001C3F35 /* ANTLRTreeWizard.m in Sources */,
-				1AE723F0134E8AB6001C3F35 /* ANTLRUnbufferedCommonTreeNodeStream.m in Sources */,
-				1AE723F1134E8AB6001C3F35 /* ANTLRUnbufferedCommonTreeNodeStreamState.m in Sources */,
-				1AE723F2134E8AB6001C3F35 /* ANTLRUnbufferedTokenStream.m in Sources */,
-				1AE723F3134E8AB6001C3F35 /* ANTLRUniqueIDMap.m in Sources */,
-				1AE723F4134E8AB6001C3F35 /* ANTLRUnwantedTokenException.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-		1AE72328134E860B001C3F35 /* Sources */ = {
-			isa = PBXSourcesBuildPhase;
-			buildActionMask = 2147483647;
-			files = (
-				1AE7233C134E860B001C3F35 /* ANTLRTests.m in Sources */,
-				1A6B1D6A134E8DEB0016A47D /* ANTLRBitSetTest.h in Sources */,
-				1A6B1D6B134E8DEB0016A47D /* ANTLRBitSetTest.m in Sources */,
-				1A6B1D70134E8DEB0016A47D /* ANTLRCommonTokenTest.h in Sources */,
-				1A6B1D71134E8DEB0016A47D /* ANTLRCommonTokenTest.m in Sources */,
-				1A6B1D72134E8DEB0016A47D /* ANTLRCommonErrorNodeTest.h in Sources */,
-				1A6B1D73134E8DEB0016A47D /* ANTLRCommonErrorNodeTest.m in Sources */,
-				1A6B1D74134E8DEB0016A47D /* ANTLRCommonTreeAdaptorTest.h in Sources */,
-				1A6B1D75134E8DEC0016A47D /* ANTLRCommonTreeAdaptorTest.m in Sources */,
-				1A6B1D76134E8DEC0016A47D /* ANTLRCommonTreeTest.h in Sources */,
-				1A6B1D77134E8DEC0016A47D /* ANTLRCommonTreeTest.m in Sources */,
-				1A6B1D64134E8DEB0016A47D /* ANTLRFastQueueTest.h in Sources */,
-				1A6B1D65134E8DEB0016A47D /* ANTLRFastQueueTest.m in Sources */,
-				1A6B1D66134E8DEB0016A47D /* ANTLRIntArrayTest.h in Sources */,
-				1A6B1D67134E8DEB0016A47D /* ANTLRIntArrayTest.m in Sources */,
-				1A6B1D68134E8DEB0016A47D /* ANTLRRecognizerTest.h in Sources */,
-				1A6B1D69134E8DEB0016A47D /* ANTLRRecognizerTest.m in Sources */,
-				1A6B1D6C134E8DEB0016A47D /* ANTLRStringStreamTest.h in Sources */,
-				1A6B1D6D134E8DEB0016A47D /* ANTLRStringStreamTest.m in Sources */,
-				1A6B1D6E134E8DEB0016A47D /* TestRewriteRuleTokenStream.h in Sources */,
-				1A6B1D6F134E8DEB0016A47D /* TestRewriteRuleTokenStream.m in Sources */,
-			);
-			runOnlyForDeploymentPostprocessing = 0;
-		};
-/* End PBXSourcesBuildPhase section */
-
-/* Begin PBXTargetDependency section */
-		1A63BD9F134F6093002EDFB4 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AE72317134E860B001C3F35 /* ANTLR */;
-			targetProxy = 1A63BD9E134F6093002EDFB4 /* PBXContainerItemProxy */;
-		};
-		1A63BDA1134F609B002EDFB4 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AE72317134E860B001C3F35 /* ANTLR */;
-			targetProxy = 1A63BDA0134F609B002EDFB4 /* PBXContainerItemProxy */;
-		};
-		1A63BDA3134F60A7002EDFB4 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AE72317134E860B001C3F35 /* ANTLR */;
-			targetProxy = 1A63BDA2134F60A7002EDFB4 /* PBXContainerItemProxy */;
-		};
-		1A63BDA5134F60B0002EDFB4 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AE72317134E860B001C3F35 /* ANTLR */;
-			targetProxy = 1A63BDA4134F60B0002EDFB4 /* PBXContainerItemProxy */;
-		};
-		1A63BDA7134F60BC002EDFB4 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AE72317134E860B001C3F35 /* ANTLR */;
-			targetProxy = 1A63BDA6134F60BC002EDFB4 /* PBXContainerItemProxy */;
-		};
-		1A63BDA9134F60C3002EDFB4 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AE72317134E860B001C3F35 /* ANTLR */;
-			targetProxy = 1A63BDA8134F60C3002EDFB4 /* PBXContainerItemProxy */;
-		};
-		1A63BDAB134F60CC002EDFB4 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AE72317134E860B001C3F35 /* ANTLR */;
-			targetProxy = 1A63BDAA134F60CC002EDFB4 /* PBXContainerItemProxy */;
-		};
-		1A63BDAD134F60D2002EDFB4 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AE72317134E860B001C3F35 /* ANTLR */;
-			targetProxy = 1A63BDAC134F60D2002EDFB4 /* PBXContainerItemProxy */;
-		};
-		1A63BDE6134F629B002EDFB4 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AE72317134E860B001C3F35 /* ANTLR */;
-			targetProxy = 1A63BDE5134F629B002EDFB4 /* PBXContainerItemProxy */;
-		};
-		1A63BE0B134FB824002EDFB4 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AE72317134E860B001C3F35 /* ANTLR */;
-			targetProxy = 1A63BE0A134FB824002EDFB4 /* PBXContainerItemProxy */;
-		};
-		1AE72330134E860B001C3F35 /* PBXTargetDependency */ = {
-			isa = PBXTargetDependency;
-			target = 1AE72317134E860B001C3F35 /* ANTLR */;
-			targetProxy = 1AE7232F134E860B001C3F35 /* PBXContainerItemProxy */;
-		};
-/* End PBXTargetDependency section */
-
-/* Begin PBXVariantGroup section */
-		1AE72324134E860B001C3F35 /* InfoPlist.strings */ = {
-			isa = PBXVariantGroup;
-			children = (
-				1AE72325134E860B001C3F35 /* en */,
-			);
-			name = InfoPlist.strings;
-			sourceTree = "<group>";
-		};
-		1AE72335134E860B001C3F35 /* InfoPlist.strings */ = {
-			isa = PBXVariantGroup;
-			children = (
-				1AE72336134E860B001C3F35 /* en */,
-			);
-			name = InfoPlist.strings;
-			sourceTree = "<group>";
-		};
-/* End PBXVariantGroup section */
-
-/* Begin XCBuildConfiguration section */
-		1A63BC68134F5DAC002EDFB4 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				"FRAMEWORK_SEARCH_PATHS[arch=*]" = /Library/Frameworks;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				HEADER_SEARCH_PATHS = /Library/Frameworks;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-			};
-			name = Debug;
-		};
-		1A63BC69134F5DAC002EDFB4 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-			};
-			name = Release;
-		};
-		1A63BD39134F5F1E002EDFB4 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-			};
-			name = Debug;
-		};
-		1A63BD3A134F5F1E002EDFB4 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-			};
-			name = Release;
-		};
-		1A63BD46134F5F36002EDFB4 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				HEADER_SEARCH_PATHS = /Library/Frameworks;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-			};
-			name = Debug;
-		};
-		1A63BD47134F5F36002EDFB4 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-			};
-			name = Release;
-		};
-		1A63BD53134F5F43002EDFB4 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-			};
-			name = Debug;
-		};
-		1A63BD54134F5F43002EDFB4 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-			};
-			name = Release;
-		};
-		1A63BD60134F5F4D002EDFB4 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				HEADER_SEARCH_PATHS = /Library/Frameworks;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				USER_HEADER_SEARCH_PATHS = /Library/Frameworks;
-			};
-			name = Debug;
-		};
-		1A63BD61134F5F4D002EDFB4 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				USER_HEADER_SEARCH_PATHS = /Library/Frameworks;
-			};
-			name = Release;
-		};
-		1A63BD6D134F5F5E002EDFB4 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-			};
-			name = Debug;
-		};
-		1A63BD6E134F5F5E002EDFB4 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-			};
-			name = Release;
-		};
-		1A63BD7A134F5F68002EDFB4 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-			};
-			name = Debug;
-		};
-		1A63BD7B134F5F68002EDFB4 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-			};
-			name = Release;
-		};
-		1A63BD87134F5F72002EDFB4 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-			};
-			name = Debug;
-		};
-		1A63BD88134F5F72002EDFB4 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-			};
-			name = Release;
-		};
-		1A63BDDB134F6234002EDFB4 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-				USER_HEADER_SEARCH_PATHS = /Library/Frameworks;
-			};
-			name = Debug;
-		};
-		1A63BDDC134F6234002EDFB4 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = YES;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				TEST_AFTER_BUILD = YES;
-				USER_HEADER_SEARCH_PATHS = /Library/Frameworks;
-			};
-			name = Release;
-		};
-		1A63BE03134FB75F002EDFB4 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-			};
-			name = Debug;
-		};
-		1A63BE04134FB75F002EDFB4 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				ARCHS = "$(NATIVE_ARCH_ACTUAL)";
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = /Library/Frameworks;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				ONLY_ACTIVE_ARCH = YES;
-				PRODUCT_NAME = "$(TARGET_NAME)";
-			};
-			name = Release;
-		};
-		1AE7233D134E860B001C3F35 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ARCHS = "$(ARCHS_STANDARD_32_BIT)";
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_ENABLE_OBJC_GC = supported;
-				GCC_OPTIMIZATION_LEVEL = 0;
-				GCC_PREPROCESSOR_DEFINITIONS = DEBUG;
-				GCC_SYMBOLS_PRIVATE_EXTERN = NO;
-				GCC_VERSION = com.apple.compilers.llvm.clang.1_0;
-				GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				HEADER_SEARCH_PATHS = /Library/Frameworks;
-				MACOSX_DEPLOYMENT_TARGET = 10.6;
-				ONLY_ACTIVE_ARCH = NO;
-				SDKROOT = macosx;
-				USER_HEADER_SEARCH_PATHS = /Library/Frameworks;
-				VALID_ARCHS = i386;
-			};
-			name = Debug;
-		};
-		1AE7233E134E860B001C3F35 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ARCHS = "$(ARCHS_STANDARD_32_BIT)";
-				GCC_C_LANGUAGE_STANDARD = gnu99;
-				GCC_ENABLE_OBJC_GC = supported;
-				GCC_VERSION = com.apple.compilers.llvm.clang.1_0;
-				GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
-				GCC_WARN_ABOUT_RETURN_TYPE = YES;
-				GCC_WARN_UNUSED_VARIABLE = YES;
-				HEADER_SEARCH_PATHS = /Library/Frameworks;
-				MACOSX_DEPLOYMENT_TARGET = 10.6;
-				ONLY_ACTIVE_ARCH = NO;
-				SDKROOT = macosx;
-				USER_HEADER_SEARCH_PATHS = /Library/Frameworks;
-				VALID_ARCHS = i386;
-			};
-			name = Release;
-		};
-		1AE72340134E860B001C3F35 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				ARCHS = "$(ARCHS_STANDARD_32_BIT)";
-				COPY_PHASE_STRIP = NO;
-				DYLIB_COMPATIBILITY_VERSION = 1;
-				DYLIB_CURRENT_VERSION = 1;
-				FRAMEWORK_VERSION = A;
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "ANTLR/ANTLR-Prefix.pch";
-				INFOPLIST_FILE = "ANTLR/ANTLR-Info.plist";
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				VALID_ARCHS = i386;
-				WRAPPER_EXTENSION = framework;
-			};
-			name = Debug;
-		};
-		1AE72341134E860B001C3F35 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				ARCHS = "$(ARCHS_STANDARD_32_BIT)";
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				DYLIB_COMPATIBILITY_VERSION = 1;
-				DYLIB_CURRENT_VERSION = 1;
-				FRAMEWORK_VERSION = A;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_ENABLE_OBJC_GC = required;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "ANTLR/ANTLR-Prefix.pch";
-				INFOPLIST_FILE = "ANTLR/ANTLR-Info.plist";
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				VALID_ARCHS = i386;
-				WRAPPER_EXTENSION = framework;
-			};
-			name = Release;
-		};
-		1AE72343134E860B001C3F35 /* Debug */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = NO;
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_DYNAMIC_NO_PIC = NO;
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "ANTLRTests/ANTLRTests-Prefix.pch";
-				INFOPLIST_FILE = "ANTLRTests/ANTLRTests-Info.plist";
-				OTHER_LDFLAGS = (
-					"-framework",
-					SenTestingKit,
-				);
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				WRAPPER_EXTENSION = octest;
-			};
-			name = Debug;
-		};
-		1AE72344134E860B001C3F35 /* Release */ = {
-			isa = XCBuildConfiguration;
-			buildSettings = {
-				ALWAYS_SEARCH_USER_PATHS = NO;
-				COPY_PHASE_STRIP = YES;
-				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
-				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
-				GCC_PRECOMPILE_PREFIX_HEADER = YES;
-				GCC_PREFIX_HEADER = "ANTLRTests/ANTLRTests-Prefix.pch";
-				INFOPLIST_FILE = "ANTLRTests/ANTLRTests-Info.plist";
-				OTHER_LDFLAGS = (
-					"-framework",
-					SenTestingKit,
-				);
-				PRODUCT_NAME = "$(TARGET_NAME)";
-				WRAPPER_EXTENSION = octest;
-			};
-			name = Release;
-		};
-/* End XCBuildConfiguration section */
-
-/* Begin XCConfigurationList section */
-		1A63BC67134F5DAC002EDFB4 /* Build configuration list for PBXNativeTarget "Fuzzy" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A63BC68134F5DAC002EDFB4 /* Debug */,
-				1A63BC69134F5DAC002EDFB4 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1A63BD38134F5F1E002EDFB4 /* Build configuration list for PBXNativeTarget "combined" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A63BD39134F5F1E002EDFB4 /* Debug */,
-				1A63BD3A134F5F1E002EDFB4 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1A63BD45134F5F36002EDFB4 /* Build configuration list for PBXNativeTarget "lexertest-simple" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A63BD46134F5F36002EDFB4 /* Debug */,
-				1A63BD47134F5F36002EDFB4 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1A63BD52134F5F43002EDFB4 /* Build configuration list for PBXNativeTarget "LL-start" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A63BD53134F5F43002EDFB4 /* Debug */,
-				1A63BD54134F5F43002EDFB4 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1A63BD5F134F5F4D002EDFB4 /* Build configuration list for PBXNativeTarget "polydiff" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A63BD60134F5F4D002EDFB4 /* Debug */,
-				1A63BD61134F5F4D002EDFB4 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1A63BD6C134F5F5E002EDFB4 /* Build configuration list for PBXNativeTarget "simplecTreeParser" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A63BD6D134F5F5E002EDFB4 /* Debug */,
-				1A63BD6E134F5F5E002EDFB4 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1A63BD79134F5F68002EDFB4 /* Build configuration list for PBXNativeTarget "treeparser" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A63BD7A134F5F68002EDFB4 /* Debug */,
-				1A63BD7B134F5F68002EDFB4 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1A63BD86134F5F72002EDFB4 /* Build configuration list for PBXNativeTarget "treerewrite" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A63BD87134F5F72002EDFB4 /* Debug */,
-				1A63BD88134F5F72002EDFB4 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1A63BDDA134F6234002EDFB4 /* Build configuration list for PBXNativeTarget "scopes" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A63BDDB134F6234002EDFB4 /* Debug */,
-				1A63BDDC134F6234002EDFB4 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1A63BE02134FB75F002EDFB4 /* Build configuration list for PBXNativeTarget "hoistedPredicates" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1A63BE03134FB75F002EDFB4 /* Debug */,
-				1A63BE04134FB75F002EDFB4 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1AE72311134E860A001C3F35 /* Build configuration list for PBXProject "ANTLR" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1AE7233D134E860B001C3F35 /* Debug */,
-				1AE7233E134E860B001C3F35 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1AE7233F134E860B001C3F35 /* Build configuration list for PBXNativeTarget "ANTLR" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1AE72340134E860B001C3F35 /* Debug */,
-				1AE72341134E860B001C3F35 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-		1AE72342134E860B001C3F35 /* Build configuration list for PBXNativeTarget "ANTLRTests" */ = {
-			isa = XCConfigurationList;
-			buildConfigurations = (
-				1AE72343134E860B001C3F35 /* Debug */,
-				1AE72344134E860B001C3F35 /* Release */,
-			);
-			defaultConfigurationIsVisible = 0;
-			defaultConfigurationName = Release;
-		};
-/* End XCConfigurationList section */
-	};
-	rootObject = 1AE7230E134E860A001C3F35 /* Project object */;
-}
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/xcuserdata/acondit.xcuserdatad/UserInterfaceState.xcuserstate b/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/xcuserdata/acondit.xcuserdatad/UserInterfaceState.xcuserstate
deleted file mode 100644
index 03da13b..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/xcuserdata/acondit.xcuserdatad/UserInterfaceState.xcuserstate
+++ /dev/null
@@ -1,99027 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
-	<key>$archiver</key>
-	<string>NSKeyedArchiver</string>
-	<key>$objects</key>
-	<array>
-		<string>$null</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>6</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>7</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>8</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>9</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>10</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>11</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>12</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>13</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>14</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>15</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>16</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>17</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>18</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>19</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>20</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>21</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>22</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>23</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>24</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>25</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>26</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>27</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>28</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>29</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>30</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>31</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>32</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>33</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>34</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>35</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>36</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>37</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>38</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>39</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>40</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>41</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>222</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>309</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>395</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>471</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>636</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>764</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>824</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>884</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>964</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1044</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1104</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1204</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1264</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1324</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1403</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1463</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1530</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1587</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1644</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1704</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1787</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1871</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3031</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4325</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4382</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4453</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4530</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4590</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4667</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4724</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4801</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4861</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4940</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4997</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5076</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5133</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5274</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>8B8DDC6B-DDE2-4247-8F0D-A06E0D4B2DBD</string>
-		<string>3E880256-37A7-45EE-9711-6A7B5B8E709E</string>
-		<string>2A0687CA-9E09-43DA-AAB4-66AB0EF9AB0D</string>
-		<string>267DE2B3-8A97-43DB-B258-865995C3291E</string>
-		<string>9869D367-F4FF-47E1-9F0B-EC2129F3C250</string>
-		<string>C5B8F73C-C719-4392-B73A-2E9BC23A265E</string>
-		<string>2060A434-A276-41E2-9ADB-28BB1B8C65B3</string>
-		<string>51912CD1-86C0-4E43-BF83-A46B6AEE9BB9</string>
-		<string>33B3EA90-9D12-4F88-A34B-E2B32A707D40</string>
-		<string>984062D3-D917-4552-92A9-E57F4A3BA955</string>
-		<string>F7CA7E77-5E1E-4D51-B349-95E0260321DE</string>
-		<string>6EC9948E-0F92-459C-AF9D-782DA9D80829</string>
-		<string>DDBEAFBD-8E5A-4C34-83BD-2C4637025539</string>
-		<string>DDC0C46D-96FA-46F0-84C0-8E712DE1EB6A</string>
-		<string>B9397168-C6E4-4B33-9B07-E4E7C78BB1F5</string>
-		<string>2DE3D2D0-EE66-4B5F-8EAD-32D8585B9607</string>
-		<string>C345EBB9-FE8A-47EB-AA26-842A6A8726A3</string>
-		<string>BD9F3C49-0864-4BF6-8624-E3FE059EC4B3</string>
-		<string>8916566A-4F13-46CD-B79C-3C921EEE7D49</string>
-		<string>1F68007A-BC1A-4A45-B3EB-7C585A74C0C8</string>
-		<string>AED3ECB9-6F72-47B5-83EB-D9ED381ACAD7</string>
-		<string>65F4C6DD-94DD-44AD-8D8F-22A5797CE3AE</string>
-		<string>IDEWorkspaceDocument</string>
-		<string>94527DF4-8A4A-4B44-A86F-9E177764020F</string>
-		<string>787499DC-17C3-4BB8-94ED-F5F4A0D91345</string>
-		<string>36E20E99-C4B5-4408-9469-AD3DBB319BE0</string>
-		<string>97C4F628-88C1-41B1-9AC9-DF72045A615B</string>
-		<string>228B9835-EB0C-47C4-AF0C-562AE82B00DD</string>
-		<string>38E9C286-2E78-48F4-BE55-7F0891FDBB6A</string>
-		<string>6ABB3AFD-D548-4BC7-8153-8135D4962AA6</string>
-		<string>73A1D02F-4ED6-4C89-82A1-AD3DCB688C00</string>
-		<string>FE7B2FCF-EF2F-4DEB-8DE2-36A865A2803C</string>
-		<string>1A2E2AE9-7B63-407A-8A8F-B5149057CDD7</string>
-		<string>F47E26E1-DE28-455F-98EF-EA339320AD36</string>
-		<string>9B93B8C8-2C83-499A-AA57-E9AB210F54F5</string>
-		<string>827C8A69-A567-412B-A2F1-1FC135ED4231</string>
-		<string>9525D734-BB03-4B02-BF0B-2B807EC3DF3B</string>
-		<string>D005C949-00BD-483D-8EFC-E251FE97FA2B</string>
-		<string>C25840B0-EB05-42A5-84AC-811CFB8C95FC</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>46</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>46</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>49</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>50</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>221</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWindowToolbarIsVisible</string>
-		<string>IDEActiveWorkspaceTabController</string>
-		<string>IDEWindowFrame</string>
-		<string>IDEWorkspaceWindowControllerUniqueIdentifier</string>
-		<string>IDEWorkspaceTabController_CC7ABA9A-DE2E-4A35-BE25-9FFBEFB51B96</string>
-		<string>IDEOrderedWorkspaceTabControllers</string>
-		<false/>
-		<string>{{142, 337}, {600, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>59</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>61</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>172</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>205</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>212</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDETabLabel</string>
-		<string>AssistantEditorsLayout</string>
-		<string>IDEShowNavigator</string>
-		<string>IDEShowUtilities</string>
-		<string>IDEEditorArea</string>
-		<string>IDENavigatorArea</string>
-		<string>IDEWorkspaceTabControllerUtilityAreaSplitView</string>
-		<string>IDEWorkspaceTabControllerDesignAreaSplitView</string>
-		<string>ANTLRUnbufferedCommonTreeNodeStream.m</string>
-		<integer>0</integer>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>70</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>72</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>104</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>125</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>DefaultPersistentRepresentations</string>
-		<string>IDEEDitorArea_DebugArea</string>
-		<string>layoutTree</string>
-		<string>IDEEditorMode_Standard</string>
-		<string>ShowDebuggerArea</string>
-		<string>EditorMode</string>
-		<string>DebuggerSplitView</string>
-		<string>IDEShowEditor</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSMutableDictionary</string>
-				<string>NSDictionary</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSMutableDictionary</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>79</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>92</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>99</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>102</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEDebugArea_SplitView</string>
-		<string>IDEDebuggerAreaSplitView</string>
-		<string>LayoutMode</string>
-		<string>LayoutFocusMode</string>
-		<string>variables</string>
-		<string>console</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>81</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>DVTSplitViewItems</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>82</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>88</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>86</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>DVTIdentifier</string>
-		<string>DVTViewMagnitude</string>
-		<string>VariablesView</string>
-		<real>298</real>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSDictionary</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSDictionary</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>90</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ConsoleArea</string>
-		<real>301</real>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSMutableArray</string>
-				<string>NSArray</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSMutableArray</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>93</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>94</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>96</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>95</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>97</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<integer>1</integer>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>DBGVariablesViewFilterMode</string>
-		<integer>2</integer>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ConsoleFilterMode</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>105</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>121</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>106</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>121</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>116</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>108</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<string>Xcode.IDENavigableItemDomain.WorkspaceStructure</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>109</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>111</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>113</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>59</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>IDEArchivableStringIndexPair</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>IDEArchivableStringIndexPair</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<string>Classes</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<string>ANTLR</string>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSArray</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSArray</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>117</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.m</string>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSMutableString</string>
-				<string>NSString</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSMutableString</string>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>DVTDocumentLocation</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>DVTDocumentLocation</string>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>IDENavigableItemArchivableRepresentation</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>IDENavigableItemArchivableRepresentation</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>122</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>105</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>IDEWorkspaceTabControllerLayoutTreeNode</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>IDEWorkspaceTabControllerLayoutTreeNode</string>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>IDEWorkspaceTabControllerLayoutTree</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>IDEWorkspaceTabControllerLayoutTree</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>127</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>EditorLayout_PersistentRepresentation</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>129</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Main</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>133</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>161</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>EditorLayout_StateSavingStateDictionaries</string>
-		<string>EditorLayout_Selected</string>
-		<string>EditorLayout_Geometry</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>134</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>143</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>144</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>59</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>153</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>156</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>DocumentExtensionIdentifier</string>
-		<string>NavigableItemName</string>
-		<string>EditorState</string>
-		<string>DocumentNavigableItemName</string>
-		<string>FileDataType</string>
-		<string>DocumentURL</string>
-		<string>ArchivableRepresentation</string>
-		<string>Xcode.IDEKit.EditorDocument.SourceCode</string>
-		<string>@implementation ANTLRUnbufferedCommonTreeNodeStream</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>149</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>150</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>151</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>HideAllIssues</string>
-		<string>PrimaryDocumentTimestamp</string>
-		<string>PrimaryDocumentVisibleCharacterRange</string>
-		<string>PrimaryDocumentSelectedCharacterRange</string>
-		<real>324417698.23469198</real>
-		<string>{0, 1992}</string>
-		<string>{1697, 0}</string>
-		<string>public.objective-c-source</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>154</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.m</string>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSURL</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSURL</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>116</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>157</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>160</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>59</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {600, 600}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>164</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>165</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>168</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>167</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEEditor</string>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>170</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEDebuggerArea</string>
-		<real>115</real>
-		<true/>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>176</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>188</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode.IDEKit.Navigator.Structure</string>
-		<string>Xcode.IDEKit.Navigator.Issues</string>
-		<string>SelectedNavigator</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDESelectedObjects</string>
-		<string>IDEUnsavedDocumentFilteringEnabled</string>
-		<string>IDESCMStatusFilteringEnabled</string>
-		<string>IDERecentDocumentFilteringEnabled</string>
-		<string>IDEVisibleRect</string>
-		<string>IDENavigatorExpandedItemsBeforeFilteringSet</string>
-		<string>IDEExpandedItemsSet</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>{{0, 0}, {259, 832}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>187</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSSet</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSSet</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>198</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>200</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>201</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>203</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>204</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEErrorFilteringEnabled</string>
-		<string>IDECollapsedGroups</string>
-		<string>IDEExpandedIssues</string>
-		<string>IDECollapsedFiles</string>
-		<string>IDERecentFilteringEnabled</string>
-		<string>IDEShowsByType</string>
-		<string>IDECollapsedTypes</string>
-		<string>IDESelectedNavigables</string>
-		<string>IDESchemeFilteringEnabled</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSMutableSet</string>
-				<string>NSSet</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSMutableSet</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>{{0, 0}, {0, 0}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>206</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>207</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>210</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>209</integer>
-				</dict>
-			</array>
-		</dict>
-		<string></string>
-		<real>377</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>211</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>213</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>214</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>216</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>218</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>215</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>586</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>217</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>987</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>220</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEUtilitiesArea</string>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>46</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>223</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>224</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>225</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>223</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>226</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_1F24F9E0-9508-47FA-913F-2D38EC4B42F9</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>223</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{205, 359}, {600, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>227</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>228</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>285</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>295</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>301</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>SimpleCTP.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>229</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>230</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>245</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>259</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>279</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>231</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>237</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>243</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>244</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>232</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>233</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>235</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>234</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>236</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>238</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>239</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>241</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>240</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>242</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>246</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>257</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>247</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>257</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>255</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>248</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>249</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>250</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>252</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>254</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>227</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>251</integer>
-			</dict>
-		</dict>
-		<string>simplecTreeParser</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<string>examples</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>256</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>258</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>246</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>260</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>261</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>262</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>278</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>263</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>264</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>265</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>227</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>270</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>272</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>EOF</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>266</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>267</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>268</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323990081.71608299</real>
-		<string>{0, 1023}</string>
-		<string>{574, 0}</string>
-		<string>public.c-header</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>271</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>255</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>273</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>274</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>275</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>276</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>277</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>227</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>251</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>280</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>281</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>283</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>282</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>284</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>286</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>287</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>286</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>288</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode.IDEKit.Navigator.Logs</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>289</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>290</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>292</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>293</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>294</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDELogNavigatorVisibleRectStateKey</string>
-		<string>IDELogNavigatorSelectedObjectsStateKey</string>
-		<string>IDELogNavigatorExpandedItemsStateKey</string>
-		<string>IDELogNavigatorRecentFilterStateKey</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>296</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>297</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>299</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>298</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>377</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>300</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>302</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>303</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>305</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>307</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>304</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>457</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>306</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>1116</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>308</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>310</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>310</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>311</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>312</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>394</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_66D45BE6-C0B4-41C3-B7F3-D54D52CBDC48</string>
-		<string>{{163, 316}, {600, 668}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>313</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>314</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>368</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>380</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>386</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>main.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>315</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>316</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>331</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>344</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>362</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>317</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>323</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>329</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>330</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>318</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>319</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>321</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>320</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>322</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>324</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>325</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>327</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>326</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>328</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>332</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>342</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>333</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>342</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>340</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>334</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>335</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>336</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>338</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>339</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>313</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>337</integer>
-			</dict>
-		</dict>
-		<string>treeparser</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>341</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/main.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>343</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>332</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>345</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>346</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>347</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>361</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>348</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>349</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>355</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>313</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>359</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>313</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>340</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>350</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>352</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>353</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>354</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>313</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>337</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>356</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>357</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323993967.40714198</real>
-		<string>{217, 1612}</string>
-		<string>{0, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>360</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/main.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>363</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>364</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>366</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>365</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>367</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>369</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>370</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>372</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>371</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>253</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>337</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>313</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>187</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>373</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>374</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>375</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>376</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>378</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>253</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>253</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>337</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>253</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>377</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>treerewrite</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>379</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Frameworks</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>381</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>382</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>384</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>383</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>399</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>385</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>387</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>388</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>390</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>392</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>389</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>457</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>391</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>1116</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>393</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>310</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>396</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>396</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>397</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>398</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>470</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_789FE7FF-C37B-4DAE-8869-39BFB0787273</string>
-		<string>{{226, 336}, {600, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>399</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>400</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>451</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>456</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>462</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Build LL-start</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>401</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>402</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>417</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>429</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>445</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>403</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>409</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>415</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>416</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>404</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>405</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>407</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>406</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>408</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>410</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>411</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>413</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>412</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>414</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>418</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>427</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>419</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>427</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>425</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>420</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>421</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>424</integer>
-			</dict>
-		</dict>
-		<string>Xcode.IDENavigableItem.WorkspaceLogsDomain</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>422</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>423</integer>
-			</dict>
-		</dict>
-		<string>Build LL-start : 2:21:49 PM</string>
-		<integer>2147483647</integer>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>426</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://CF33AB74-A9A8-4346-8979-13E89E7154B5</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>428</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>418</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>430</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>431</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>432</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>444</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>433</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>434</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>423</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>435</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>423</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>437</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>438</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>439</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode.IDEKit.EditorDocument.LogDocument</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>SelectedDocumentLocations</string>
-		<string>com.apple.dt.IDE.BuildLogContentType</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>426</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>443</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>420</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>440</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>442</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>441</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>423</integer>
-			</dict>
-		</dict>
-		<integer>2147483647</integer>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>426</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>446</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>447</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>449</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>448</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>450</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>286</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>452</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>286</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>453</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>289</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>290</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>292</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>454</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>455</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>457</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>458</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>460</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>459</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>377</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>461</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>463</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>464</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>466</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>468</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>465</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>457</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>467</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>1116</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>469</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>396</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>472</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>473</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>474</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>475</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>476</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>477</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>476</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>478</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>6</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>479</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>635</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWindowToolbarIsVisible</string>
-		<string>IDEActiveWorkspaceTabController</string>
-		<string>IDEWindowFrame</string>
-		<string>IDEWorkspaceWindowControllerUniqueIdentifier</string>
-		<string>IDEWorkspaceTabController_976F992C-63EB-4603-83E4-98D2941DDB9F</string>
-		<string>IDEOrderedWorkspaceTabControllers</string>
-		<string>{{205, 242}, {1127, 763}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>480</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>481</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>482</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>483</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>486</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>487</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>488</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>489</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>588</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>619</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>626</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDETabLabel</string>
-		<string>AssistantEditorsLayout</string>
-		<string>IDEShowNavigator</string>
-		<string>IDEShowUtilities</string>
-		<string>IDEEditorArea</string>
-		<string>IDENavigatorArea</string>
-		<string>IDEWorkspaceTabControllerUtilityAreaSplitView</string>
-		<string>IDEWorkspaceTabControllerDesignAreaSplitView</string>
-		<string>ANTLRTreeRewriter.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>490</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>491</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>492</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>493</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>495</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>496</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>497</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>498</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>499</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>527</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>542</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>580</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>DefaultPersistentRepresentations</string>
-		<string>IDEEDitorArea_DebugArea</string>
-		<string>layoutTree</string>
-		<string>IDEEditorMode_Standard</string>
-		<string>ShowDebuggerArea</string>
-		<string>EditorMode</string>
-		<string>DebuggerSplitView</string>
-		<string>IDEShowEditor</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>500</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>502</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>503</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>505</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>517</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>523</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>525</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEDebugArea_SplitView</string>
-		<string>IDEDebuggerAreaSplitView</string>
-		<string>LayoutMode</string>
-		<string>LayoutFocusMode</string>
-		<string>variables</string>
-		<string>console</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>508</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>DVTSplitViewItems</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>514</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>513</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>DVTIdentifier</string>
-		<string>DVTViewMagnitude</string>
-		<string>VariablesView</string>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>516</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ConsoleArea</string>
-		<real>828</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>518</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>519</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>521</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>520</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>522</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>828</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>524</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>DBGVariablesViewFilterMode</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>526</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ConsoleFilterMode</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>528</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>540</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>529</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>540</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>538</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>530</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>531</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<string>Xcode.IDENavigableItemDomain.WorkspaceStructure</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>532</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>534</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>536</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>533</integer>
-			</dict>
-		</dict>
-		<string>ANTLRTreeRewriter.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>535</integer>
-			</dict>
-		</dict>
-		<string>Classes</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>537</integer>
-			</dict>
-		</dict>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>539</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeRewriter.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>541</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>528</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>543</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>544</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>EditorLayout_PersistentRepresentation</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>545</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>546</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Main</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>547</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>548</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>549</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>550</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>578</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>EditorLayout_StateSavingStateDictionaries</string>
-		<string>EditorLayout_Selected</string>
-		<string>EditorLayout_Geometry</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>551</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>552</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>553</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>554</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>555</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>556</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>557</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>558</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>559</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>560</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>566</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>533</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>574</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>575</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>577</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>NavigableItemName</string>
-		<string>ArchivableRepresentation</string>
-		<string>EditorState</string>
-		<string>DocumentNavigableItemName</string>
-		<string>FileDataType</string>
-		<string>DocumentURL</string>
-		<string>DocumentExtensionIdentifier</string>
-		<string>-applyOnce:Rule:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>538</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>530</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>561</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>562</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>563</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>564</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>533</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>535</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>565</integer>
-			</dict>
-		</dict>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>570</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>571</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>572</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>573</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>PrimaryDocumentTimestamp</string>
-		<string>HideAllIssues</string>
-		<string>PrimaryDocumentVisibleCharacterRange</string>
-		<string>PrimaryDocumentSelectedCharacterRange</string>
-		<real>324441057.59489697</real>
-		<string>{3432, 2012}</string>
-		<string>{4860, 0}</string>
-		<string>public.objective-c-source</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>576</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeRewriter.m</string>
-		<string>Xcode.IDEKit.EditorDocument.SourceCode</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>579</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {1127, 717}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>581</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>582</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>585</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>583</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>584</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEEditor</string>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>586</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>587</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEDebuggerArea</string>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>589</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>590</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>591</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>592</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>602</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>590</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode.IDEKit.Navigator.Structure</string>
-		<string>Xcode.IDEKit.Navigator.Issues</string>
-		<string>SelectedNavigator</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>593</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>594</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>595</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>596</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>597</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>598</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>599</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>600</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>601</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDESelectedObjects</string>
-		<string>IDEUnsavedDocumentFilteringEnabled</string>
-		<string>IDESCMStatusFilteringEnabled</string>
-		<string>IDERecentDocumentFilteringEnabled</string>
-		<string>IDEVisibleRect</string>
-		<string>IDENavigatorExpandedItemsBeforeFilteringSet</string>
-		<string>IDEExpandedItemsSet</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>{{0, 0}, {259, 832}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>603</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>604</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>605</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>606</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>607</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>608</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>609</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>610</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>611</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>612</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>613</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>614</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>615</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>616</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>617</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>618</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEErrorFilteringEnabled</string>
-		<string>IDECollapsedGroups</string>
-		<string>IDEExpandedIssues</string>
-		<string>IDECollapsedFiles</string>
-		<string>IDEVisibleRect</string>
-		<string>IDERecentFilteringEnabled</string>
-		<string>IDEShowsByType</string>
-		<string>IDECollapsedTypes</string>
-		<string>IDESelectedNavigables</string>
-		<string>IDESchemeFilteringEnabled</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>{{0, 0}, {0, 0}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>620</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>621</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>624</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>623</integer>
-				</dict>
-			</array>
-		</dict>
-		<string></string>
-		<real>506</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>625</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>627</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>628</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>630</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>632</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>629</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>260</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>631</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>982</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>633</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>634</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEUtilitiesArea</string>
-		<real>340</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>476</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>637</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>637</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>638</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>7</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>639</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_DD922F63-58E5-4424-9493-5E8068570F13</string>
-		<string>{{118, -179}, {1400, 974}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>640</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>641</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>741</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>750</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>756</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLRTree.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>642</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>643</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>644</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>659</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>683</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>701</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>735</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEEditorMode_Genius</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>645</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>651</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>657</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>658</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>646</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>647</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>649</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>648</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>330</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>650</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>549</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>652</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>653</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>655</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>654</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>330</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>656</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>549</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>670</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>660</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>668</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>661</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>668</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>666</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>662</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>663</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>664</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>665</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>640</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>667</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTree.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>669</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>660</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>670</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>671</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>1</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>668</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>672</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>2</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>673</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>670</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>681</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>674</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>675</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>678</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>676</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>677</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>navigableItem_name</string>
-		<string>ANTLRTree.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>679</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>680</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>identifier</string>
-		<string>Xcode.IDEKit.GeniusCategory.Counterparts</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>682</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTree.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>684</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>685</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>686</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>699</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>687</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>640</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>688</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>693</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>640</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>697</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>666</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>689</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>690</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>691</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>692</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>640</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>694</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>695</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>696</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323919127.41417199</real>
-		<string>{0, 2668}</string>
-		<string>{1505, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>698</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTree.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>700</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {880, 876}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>702</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>703</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>734</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>SplitPosition</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>704</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>705</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>719</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Alternate</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>706</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>718</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>707</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>708</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>709</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>714</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>640</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>717</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>@protocol ANTLRTree</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>666</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>710</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>711</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>712</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>713</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>640</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>715</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>716</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>696</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323919130.07890499</real>
-		<string>{382, 1955}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>698</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>700</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>720</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>732</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>721</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>677</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>722</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>726</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>677</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>730</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>681</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>723</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>724</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>725</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>676</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>677</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>679</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>680</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>727</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>728</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>729</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323919130.07915699</real>
-		<string>{825, 1423}</string>
-		<string>{3330, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>731</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTree.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>733</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {439, 876}}</string>
-		<real>0.5</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>736</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>737</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>739</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>738</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>740</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>742</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>743</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>744</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>745</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>746</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>747</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>748</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>749</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>{{0, 0}, {244, 810}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>751</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>752</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>754</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>753</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>665</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>755</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>757</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>758</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>760</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>762</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>759</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>260</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>761</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>880</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>763</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>260</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>765</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>765</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>766</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>8</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>767</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>823</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_F5D326C0-04C4-47F6-8741-20A565897CC8</string>
-		<string>{{140, 349}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>768</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>801</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>809</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>815</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>769</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>770</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>785</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>789</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>795</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>771</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>777</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>783</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>784</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>772</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>773</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>775</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>774</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>776</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>778</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>779</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>781</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>780</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>782</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>786</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>787</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>787</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>788</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>786</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>790</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>791</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>792</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>794</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>793</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>796</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>797</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>799</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>798</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>800</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>802</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>803</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>804</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>805</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>806</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>807</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>808</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>810</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>811</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>813</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>812</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>814</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>816</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>817</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>819</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>821</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>818</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>661</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>820</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>921</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>822</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>765</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>825</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>826</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>882</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>825</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>9</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>883</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_932DCD76-57BC-4F32-BB6F-2C5F20724E0A</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>827</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>860</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>868</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>874</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>828</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>829</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>844</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>848</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>854</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>830</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>836</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>842</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>843</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>831</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>832</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>834</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>833</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>835</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>837</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>838</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>840</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>839</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>841</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>845</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>846</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>846</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>847</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>845</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>849</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>850</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>851</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>853</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>852</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>855</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>856</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>858</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>857</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>859</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>861</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>862</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>863</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>864</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>865</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>866</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>867</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>869</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>870</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>872</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>871</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>873</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>875</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>876</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>878</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>880</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>877</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>277</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>879</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>810</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>881</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<string>{{14, 359}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>825</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>885</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>885</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>886</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>10</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>887</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>963</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_E1713161-A393-4544-8845-403A2045B239</string>
-		<string>{{18, 382}, {600, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>888</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>889</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>944</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>949</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>955</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>SimpleCTP.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>890</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>891</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>906</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>918</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>938</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>892</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>898</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>904</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>905</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>893</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>894</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>896</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>895</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>897</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>899</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>900</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>902</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>901</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>903</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>907</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>916</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>908</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>916</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>914</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>909</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>910</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>911</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>912</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>913</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>888</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>251</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>915</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>917</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>907</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>919</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>920</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>921</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>936</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>922</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>923</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>924</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>888</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>928</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>930</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>-atom</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>925</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>926</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>927</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991249.92976499</real>
-		<string>{21219, 1327}</string>
-		<string>{40731, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>929</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>914</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>931</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>932</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>933</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>934</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>935</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>888</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>251</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>937</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {600, 578}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>939</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>940</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>942</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>941</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>943</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>286</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>945</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>286</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>946</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>289</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>290</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>292</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>948</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>950</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>951</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>953</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>952</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>377</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>954</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>956</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>957</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>959</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>961</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>958</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>457</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>960</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>1116</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>962</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>885</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>965</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>966</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>49</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>965</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>11</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1043</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_9E45EA5C-8211-4C60-B769-E55D97496BF5</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>313</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>967</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1019</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1029</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1035</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>968</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>969</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>984</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>996</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1013</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>970</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>976</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>982</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>983</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>971</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>972</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>974</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>973</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>975</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>977</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>978</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>980</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>979</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>981</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>985</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>994</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>986</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>994</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>992</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>987</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>988</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>989</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>990</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>991</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>313</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>251</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>993</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/main.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>995</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>985</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>997</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>998</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>999</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1012</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1000</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1001</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1007</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>313</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1010</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>313</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>992</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1002</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1003</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1004</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1005</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1006</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>313</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>251</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1008</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1009</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323993875.32485598</real>
-		<string>{0, 1506}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1011</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/main.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1014</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1015</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1017</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1016</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1018</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1020</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1021</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1023</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1022</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>253</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>251</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>313</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>187</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1024</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1025</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1026</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1027</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1028</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>253</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>253</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>377</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>253</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>251</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>379</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1030</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1031</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1033</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1032</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>377</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1034</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1036</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1037</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1039</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1041</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1038</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>457</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1040</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>1116</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1042</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>965</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1045</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1045</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1046</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>12</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1047</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1103</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_F33080DA-636E-4BF6-B5C1-24B49B17E33D</string>
-		<string>{{268, 382}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1048</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1081</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1089</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1095</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1049</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1050</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1065</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1069</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1075</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1051</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1057</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1063</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1064</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1052</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1053</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1055</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1054</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1056</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1058</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1059</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1061</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1060</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1062</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1066</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1067</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1067</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1068</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1066</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1070</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1071</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1072</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1074</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1073</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1076</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1077</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1079</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1078</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1080</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1082</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1083</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1084</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1085</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1086</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1087</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1088</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1090</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1091</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1093</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1092</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1094</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1096</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1097</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1099</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1098</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>277</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1100</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>810</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1102</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1045</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>472</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>473</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>474</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>475</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1105</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>477</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1105</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1106</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>13</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1107</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1203</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_2356F5AA-2FD8-4EDE-B754-CC2194BB1038</string>
-		<string>{{500, 210}, {1145, 763}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>480</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>481</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>482</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>483</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>486</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>487</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1108</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1109</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1195</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLRTreeRuleReturnScope.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>490</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>491</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>492</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>493</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>495</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>496</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>497</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1110</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1111</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1126</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1157</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>500</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>502</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>503</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>505</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1112</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1118</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1124</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1125</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1113</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1116</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1115</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>569</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1117</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>315</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1119</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1120</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1122</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1121</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>569</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1123</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>315</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>524</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>526</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1127</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1137</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1128</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1137</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1135</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>530</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1129</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1132</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1133</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1131</integer>
-			</dict>
-		</dict>
-		<string>ANTLRTreeRuleReturnScope.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>535</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1134</integer>
-			</dict>
-		</dict>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1136</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1138</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1127</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>543</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1140</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>545</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1141</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>547</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>548</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>549</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1155</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1143</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>552</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>553</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>554</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>555</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>556</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>557</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>558</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1144</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1150</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>574</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1153</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>577</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1135</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>530</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1145</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1148</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1131</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>535</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1149</integer>
-			</dict>
-		</dict>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>570</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1151</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324441058.64650702</real>
-		<string>{47, 2017}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1154</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1156</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {885, 717}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1158</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1161</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>583</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1160</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>586</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1162</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1164</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>590</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>591</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>589</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1165</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1164</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1186</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode.IDEKit.Navigator.BatchFind</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1167</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1172</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1175</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEBatchFindNavigatorCollapsedGroups</string>
-		<string>IDEBatchFindNavigatorFindMode</string>
-		<string>IDEBatchFindNavigatorSelectedRowIndexes</string>
-		<string>IDEBatchFindNavigatorReplaceString</string>
-		<string>IDEBatchFindNavigatorScrollPosition</string>
-		<string>IDEBatchFindNavigatorFindString</string>
-		<string>IDEBatchFindNavigatorShowsOptions</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSMutableIndexSet</string>
-				<string>NSIndexSet</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSMutableIndexSet</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSLength</key>
-			<integer>3</integer>
-			<key>NSLocation</key>
-			<integer>23</integer>
-			<key>NSRangeCount</key>
-			<integer>1</integer>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSIndexSet</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSIndexSet</string>
-		</dict>
-		<integer>294</integer>
-		<string>ANTLRLexerRuleReturnScope</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>603</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>604</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>605</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>606</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>607</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>608</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>609</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>610</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>611</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>612</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>{{0, 0}, {168, 651}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>593</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>594</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>595</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>596</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>597</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>598</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>599</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1188</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>{{0, 0}, {259, 832}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1190</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1193</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1192</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>506</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1194</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1196</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1197</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1199</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1201</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1198</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>260</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1200</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>885</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>633</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1202</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>340</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1105</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1205</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1205</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1206</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>14</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1207</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1263</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_F6D47396-B5A1-43DA-8459-FA61D4CD1CA9</string>
-		<string>{{98, 359}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1241</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1249</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1255</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1209</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1210</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1225</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1229</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1235</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1211</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1223</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1224</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1212</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1213</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1215</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1214</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1216</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1218</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1221</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1220</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1222</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1226</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1227</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1227</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1228</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1226</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1230</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1231</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1232</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1234</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1233</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1236</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1237</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1239</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1238</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1240</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1242</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1243</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1244</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1245</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1246</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1247</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1248</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1250</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1251</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1253</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1252</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1254</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1256</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1257</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1259</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1261</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1258</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>661</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1260</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>921</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1262</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1205</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1265</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1266</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1322</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1265</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>15</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1323</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_CDAD906A-B3FD-4DE9-9B32-BA0E6A2D92D2</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1267</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1300</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1308</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1314</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1268</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1284</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1288</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1294</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1270</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1276</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1282</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1283</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1271</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1272</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1274</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1273</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1275</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1277</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1278</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1280</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1279</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1281</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1285</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1286</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1286</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1287</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1285</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1289</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1290</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1293</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1292</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1295</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1296</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1298</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1297</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1299</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1301</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1302</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1303</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1304</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1305</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1306</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1307</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1309</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1310</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1312</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1311</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1313</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1315</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1316</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1318</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1320</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1317</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>661</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1319</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>921</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1321</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<string>{{77, 382}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1265</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1325</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1325</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>49</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>16</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1326</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1402</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_0FE96BB2-94D6-44F4-B2EB-13242DED799C</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1327</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1328</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1380</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1388</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1394</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLRTests.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1329</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1330</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1345</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1357</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1374</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1331</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1337</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1343</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1344</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1332</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1333</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1335</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1334</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1336</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1338</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1339</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1341</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1340</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1342</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1346</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1355</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1347</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1355</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1353</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1348</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1349</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1350</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1352</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1327</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1351</integer>
-			</dict>
-		</dict>
-		<string>ANTLRTests</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1354</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1356</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1346</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1358</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1359</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1360</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1373</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1361</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1327</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1362</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1327</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1366</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1368</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1363</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1364</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1365</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323970804.32803798</real>
-		<string>{0, 344}</string>
-		<string>{344, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1367</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1353</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1369</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1370</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1371</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1372</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1327</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1351</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1375</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1376</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1378</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1377</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1379</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1381</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1382</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1383</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1384</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1385</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1386</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1387</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1389</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1390</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1392</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1391</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>377</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1393</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1395</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1396</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1398</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1400</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1397</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>260</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1399</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>1313</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1401</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1325</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1404</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1404</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1405</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>17</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1406</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1462</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_9D6F2215-F4FA-46B0-90C4-C0671DE09770</string>
-		<string>{{247, 338}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1407</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1440</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1448</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1454</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1408</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1409</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1424</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1428</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1434</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1410</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1416</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1422</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1423</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1411</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1412</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1414</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1413</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1415</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1417</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1418</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1420</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1419</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1421</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1425</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1426</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1426</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1427</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1425</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1429</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1430</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1431</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1433</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1432</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1435</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1436</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1438</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1437</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1439</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1441</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1442</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1443</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1444</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1445</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1446</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1447</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1449</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1450</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1452</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1451</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1453</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1455</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1456</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1458</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1460</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1457</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>277</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1459</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>810</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1461</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1404</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1464</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1464</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1465</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>18</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1466</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1529</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_DD4A7D9F-63E4-467E-A0FD-C95A8591EAC6</string>
-		<string>{{245, 338}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1467</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1500</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1521</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1468</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1469</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1488</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1470</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1476</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1482</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1483</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1471</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1472</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1474</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1473</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1475</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1477</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1478</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1480</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1479</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1481</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1485</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1486</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1486</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1487</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1485</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1489</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1490</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1491</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1493</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1492</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1495</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1496</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1498</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1497</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1499</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1502</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1503</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode.IDEKit.Navigator.BatchFind</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1510</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1511</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1513</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1514</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEBatchFindNavigatorCollapsedGroups</string>
-		<string>IDEBatchFindNavigatorFindMode</string>
-		<string>IDEBatchFindNavigatorSelectedRowIndexes</string>
-		<string>IDEBatchFindNavigatorReplaceString</string>
-		<string>IDEBatchFindNavigatorScrollPosition</string>
-		<string>IDEBatchFindNavigatorFindString</string>
-		<string>IDEBatchFindNavigatorShowsOptions</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<string>id&lt;ANTLRBaseTree&gt;</string>
-		<string>createTree</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1516</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1517</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1519</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1518</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1520</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1522</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1523</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1525</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1527</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1524</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>629</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1526</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>953</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1528</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1464</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1531</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1532</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1585</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1531</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>19</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1586</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_CBA99461-DAF4-4594-8438-4FBCFC6D29E7</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1533</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1566</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1571</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1577</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1534</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1535</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1550</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1554</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1560</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1536</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1542</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1548</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1549</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1537</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1538</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1540</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1539</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1541</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1543</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1544</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1546</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1545</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1547</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1551</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1552</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1552</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1553</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1551</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1555</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1556</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1557</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1559</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1558</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1561</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1562</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1564</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1563</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1565</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1510</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1570</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1513</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1514</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1572</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1573</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1575</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1574</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1576</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1578</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1579</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1581</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1583</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1580</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>629</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1582</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>953</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1584</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<string>{{45, 336}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1531</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1588</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1589</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1642</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1588</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>20</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1643</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_F5B531A8-A5F4-4360-9351-259E374487A2</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1590</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1623</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1628</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1634</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1591</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1592</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1607</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1611</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1617</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1593</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1599</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1605</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1606</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1594</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1595</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1597</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1596</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1598</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1600</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1601</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1603</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1602</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1604</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1608</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1609</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1609</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1610</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1608</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1612</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1613</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1614</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1616</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1615</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1618</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1619</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1621</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1620</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1622</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1624</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1625</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1510</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1626</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1627</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1513</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1514</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1629</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1630</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1632</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1631</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1633</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1635</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1636</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1638</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1640</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1637</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>630</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1639</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>952</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1641</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<string>{{182, 359}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1588</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1645</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1645</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1646</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>21</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1647</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1703</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_06062238-A35F-4397-A2EB-12BF21080894</string>
-		<string>{{35, 336}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1648</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1681</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1689</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1695</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1649</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1650</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1665</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1669</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1675</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1651</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1657</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1663</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1664</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1652</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1653</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1655</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1654</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1656</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1658</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1659</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1661</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1660</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1662</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1666</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1667</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1667</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1668</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1666</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1670</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1671</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1672</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1674</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1673</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1676</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1677</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1679</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1678</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1680</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1682</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1683</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1684</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1685</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1686</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1687</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1688</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1690</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1691</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1693</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1692</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1694</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1696</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1697</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1699</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1701</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1698</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>277</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1700</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>810</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1702</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1645</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>472</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>473</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>474</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>475</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1705</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>477</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1705</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1706</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>22</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1707</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1786</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_A8605124-DF69-4621-B7EB-52B137DAB6ED</string>
-		<string>{{226, 149}, {1443, 833}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>480</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>481</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>482</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>483</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>486</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>487</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1708</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1709</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1765</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1772</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1778</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLRLexerRuleReturnScope.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>490</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>491</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>492</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>493</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>495</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>496</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>497</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1710</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1711</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1726</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1739</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1759</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>500</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>502</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>503</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>505</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1712</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1718</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1724</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1725</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1713</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1714</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1716</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1715</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>717</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1717</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>725</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1719</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1720</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1722</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1721</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>717</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1723</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>725</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>524</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>526</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1727</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1737</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1728</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1737</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1735</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>530</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1729</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1730</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1732</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1733</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1731</integer>
-			</dict>
-		</dict>
-		<string>ANTLRLexerRuleReturnScope.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>535</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1734</integer>
-			</dict>
-		</dict>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1736</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRLexerRuleReturnScope.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1738</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1727</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>543</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1740</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>545</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1741</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>547</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>548</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>549</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1742</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1757</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1743</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>552</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>553</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>554</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>555</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>556</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>557</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>558</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1744</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1745</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1751</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1731</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>574</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1755</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>577</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>-getStart</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1735</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>530</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1746</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1747</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1748</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1749</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1731</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>535</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1750</integer>
-			</dict>
-		</dict>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>570</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1752</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1753</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1754</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324441055.65792698</real>
-		<string>{78, 2018}</string>
-		<string>{1759, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1756</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRLexerRuleReturnScope.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1758</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {1443, 787}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1760</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1761</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1763</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>583</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1762</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>586</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1764</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>589</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>591</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1164</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1766</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1164</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1769</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>593</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>594</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>595</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>596</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>597</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>598</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>599</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1767</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1768</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>{{0, 0}, {259, 832}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1167</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1172</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1770</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1771</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1773</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1774</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1776</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1775</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>576</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1777</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1779</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1780</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1782</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1784</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1781</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>260</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1783</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>982</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>633</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1785</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>340</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1705</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1788</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1788</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1789</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>23</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1790</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1870</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_E89174C7-252B-475D-9A02-F71427918439</string>
-		<string>{{184, 382}, {600, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1791</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1792</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1848</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1856</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1862</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>NSObject.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1793</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1794</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1809</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1824</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1842</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1795</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1801</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1807</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1808</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1796</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1797</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1799</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1798</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1800</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1802</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1803</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1805</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1804</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1806</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1810</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1822</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1811</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1822</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1820</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1812</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1813</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<string>Xcode.IDENavigableItemDomain.FrameworkFilePath</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1814</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1815</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1817</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1818</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1791</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1816</integer>
-			</dict>
-		</dict>
-		<string>Foundation.framework</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>379</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1819</integer>
-			</dict>
-		</dict>
-		<string>MacOSX10.6</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1821</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Developer/SDKs/MacOSX10.6.sdk/System/Library/Frameworks/Foundation.framework/Versions/C/Headers/NSObject.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1823</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1810</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1825</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1826</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1827</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1841</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1828</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1791</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1829</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1791</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1832</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1834</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1830</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1831</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324417838.38949198</real>
-		<string>{0, 935}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1833</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Developer/SDKs/MacOSX10.6.sdk/System/Library/Frameworks/Foundation.framework/Versions/C/Headers/NSObject.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1840</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1812</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1835</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1836</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1837</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1838</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1839</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1791</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1816</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>379</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1819</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1821</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1843</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1844</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1846</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1845</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1847</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1849</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1850</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1851</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1852</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1853</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1854</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1855</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1857</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1858</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1860</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1859</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>377</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1861</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1863</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1864</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1866</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1868</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1865</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>586</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1867</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>987</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1869</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1788</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1872</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1873</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1874</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1875</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1876</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1877</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1878</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1879</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1880</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1881</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1882</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1883</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1915</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1916</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1921</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2001</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2022</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3028</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>LastCompletedPersistentSchemeBasedActivityReport</string>
-		<string>DocumentWindows</string>
-		<string>DebuggingWindowBehavior</string>
-		<string>ActiveRunDestination</string>
-		<string>MiniDebuggingConsole</string>
-		<string>DefaultEditorFrameSizeForURLs</string>
-		<string>RecentEditorDocumentURLs</string>
-		<string>DefaultEditorStatesForURLs</string>
-		<string>AppFocusInMiniDebugging</string>
-		<string>BreakpointsActivated</string>
-		<string>ActiveScheme</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1884</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1885</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1886</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1887</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1888</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1914</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEActivityReportTitle</string>
-		<string>IDEActivityReportCompletionSummaryStringSegments</string>
-		<string>IDEActivityReportOptions</string>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1889</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1896</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1900</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1905</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1890</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1891</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1892</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1893</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1894</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1895</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEActivityReportStringSegmentBackSeparator</string>
-		<string>IDEActivityReportStringSegmentPriority</string>
-		<string>IDEActivityReportStringSegmentStringValue</string>
-		<string> </string>
-		<real>2</real>
-		<string>Build</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1890</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1891</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1892</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1897</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1898</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1899</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>: </string>
-		<real>4</real>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1890</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1891</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1892</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1901</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1902</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1903</integer>
-				</dict>
-			</array>
-		</dict>
-		<string> │ </string>
-		<real>1</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1904</integer>
-			</dict>
-			<key>NS.data</key>
-			<data>
-			YnBsaXN0MDDUAQIDBAUIPT5UJHRvcFgkb2JqZWN0c1gkdmVyc2lv
-			blkkYXJjaGl2ZXLRBgdUcm9vdIABrQkKERIcHR4mJy0zNjlVJG51
-			bGzTCwwNDg8QXE5TQXR0cmlidXRlc1YkY2xhc3NYTlNTdHJpbmeA
-			A4AMgAJZU3VjY2VlZGVk0xMUDBUYG1pOUy5vYmplY3RzV05TLmtl
-			eXOiFheABoAJohkagASABYALVk5TRm9udFdOU0NvbG9y1B8gDCEi
-			IyQlVk5TU2l6ZVZOU05hbWVYTlNmRmxhZ3MjQCYAAAAAAACAB4AI
-			EQ0QXxARTHVjaWRhR3JhbmRlLUJvbGTSKCkqK1gkY2xhc3Nlc1ok
-			Y2xhc3NuYW1loissVk5TRm9udFhOU09iamVjdNMuLwwwMTJXTlNX
-			aGl0ZVxOU0NvbG9yU3BhY2VCMAAQA4AK0igpNDWiNSxXTlNDb2xv
-			ctIoKTc4ojgsXE5TRGljdGlvbmFyedIoKTo8ojssXxASTlNBdHRy
-			aWJ1dGVkU3RyaW5nXxASTlNBdHRyaWJ1dGVkU3RyaW5nEgABhqBf
-			EA9OU0tleWVkQXJjaGl2ZXIACAARABYAHwAoADIANQA6ADwASgBQ
-			AFcAZABrAHQAdgB4AHoAhACLAJYAngChAKMApQCoAKoArACuALUA
-			vQDGAM0A1ADdAOYA6ADqAO0BAQEGAQ8BGgEdASQBLQE0ATwBSQFM
-			AU4BUAFVAVgBYAFlAWgBdQF6AX0BkgGnAawAAAAAAAACAQAAAAAA
-			AAA/AAAAAAAAAAAAAAAAAAABvg==
-			</data>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSMutableData</string>
-				<string>NSData</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSMutableData</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1892</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1906</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1891</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1907</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1908</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1909</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1910</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1911</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1912</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEActivityReportStringSegmentDateStyle</string>
-		<string>IDEActivityReportStringSegmentType</string>
-		<string>IDEActivityReportStringSegmentTimeStyle</string>
-		<string>IDEActivityReportStringSegmentDate</string>
-		<string>Today at 7:27 PM</string>
-		<real>3</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1913</integer>
-			</dict>
-			<key>NS.time</key>
-			<real>324440863.47144002</real>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSDate</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSDate</string>
-		</dict>
-		<integer>106</integer>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>13</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1917</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1918</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1919</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1920</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEDeviceLocation</string>
-		<string>IDEDeviceArchitecture</string>
-		<string>dvtdevice-local-computer:localhost</string>
-		<string>i386</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1922</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1924</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1925</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1926</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1927</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1929</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1931</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1932</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1933</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1934</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1935</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1937</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1938</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1940</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1942</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1943</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1944</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1946</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1951</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1954</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1957</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1960</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1963</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1968</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1971</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1974</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1977</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1980</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1983</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1986</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1989</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1992</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1995</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1998</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1923</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://0CBB9AA9-E42F-400E-9F34-053023A4DD9A</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>426</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1833</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>929</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1928</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/polydiff/Simplifier.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1930</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1367</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1011</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>360</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>154</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1936</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1736</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1939</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/fuzzy/FuzzyLexer.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1941</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>539</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>271</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1945</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1949</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1950</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>width</string>
-		<string>height</string>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1952</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1953</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1955</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1956</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1958</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1959</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>578</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1961</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1962</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1964</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1965</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1966</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1967</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>width</string>
-		<string>height</string>
-		<real>1095</real>
-		<real>720</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1969</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1970</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1972</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1973</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1975</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1976</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1978</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1979</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1981</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1982</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1964</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1965</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1984</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1985</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>1443</real>
-		<real>787</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1987</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1988</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1990</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1991</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1964</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1965</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1993</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1994</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>1127</real>
-		<real>717</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1948</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1996</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1997</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>600</real>
-		<real>600</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1964</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1965</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1999</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2000</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>899</real>
-		<real>859</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2002</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2004</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2006</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2008</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2010</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2012</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2014</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2016</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2018</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2020</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2003</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://128D3AC5-F4C6-4508-8BEA-E26CF7377619</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2005</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRLexerRuleReturnScope.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2007</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRLexerRuleReturnScope.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2009</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://DF871C4F-2A91-4770-B164-12F2B22E77CB</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2011</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeRewriter.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2013</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeAdaptor.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2015</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2017</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2019</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://CD03766F-8B5F-414D-84B4-BECC59F190F5</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2021</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2023</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2024</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2025</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>434</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2026</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2035</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2288</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2943</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2961</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEQuickLookEditor.Editor</string>
-		<string>Xcode.Xcode3ProjectSupport.EditorDocument.Xcode3Project</string>
-		<string>Xcode.IDEKit.EditorDocument.DebuggerLogDocument</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2027</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2029</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2028</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/Library/Developer/Xcode/DerivedData/ANTLR-awfvqfoybjihuiaoxllmwcgxqxnm/Build/Products/Debug/Fuzzy</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2030</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2031</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2034</integer>
-			</dict>
-			<key>IDEQuickLookPageNumber</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2032</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2033</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/Library/Developer/Xcode/DerivedData/ANTLR-awfvqfoybjihuiaoxllmwcgxqxnm/Build/Products/Debug/Fuzzy</string>
-		<real>323996585.291471</real>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>IDEQuickLookDocumentLocation</string>
-				<string>DVTDocumentLocation</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>IDEQuickLookDocumentLocation</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2036</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2038</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2037</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2039</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2040</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2041</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2042</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2043</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2044</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2045</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2046</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2047</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2048</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2076</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2077</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2078</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2079</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2080</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2081</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2087</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2095</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode3ProjectEditor_Xcode3BuildPhasesEditor</string>
-		<string>Xcode3ProjectEditor_Xcode3InfoEditor</string>
-		<string>Xcode3ProjectEditor_Xcode3ProjectInfoEditor</string>
-		<string>Xcode3ProjectEditor_Xcode3BuildSettingsEditor</string>
-		<string>Xcode3ProjectEditor_Xcode3BuildRulesEditor</string>
-		<string>Xcode3ProjectEditorPreviousProjectEditorClass</string>
-		<string>Xcode3ProjectEditor.sourceList.splitview</string>
-		<string>Xcode3ProjectEditorSelectedDocumentLocations</string>
-		<string>Xcode3ProjectEditorPreviousTargetEditorClass</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2049</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2050</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2051</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2052</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2053</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2054</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2055</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2056</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2057</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2058</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2059</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2060</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2061</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2062</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2063</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2066</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2067</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2068</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2069</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2071</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2072</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2073</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2074</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2075</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>1AE7232C134E860B001C3F35</string>
-		<string>kXcode3BuildPhasesEditorScrollPointKey</string>
-		<string>1AE72315134E860B001C3F35</string>
-		<string>1AE72328134E860B001C3F35</string>
-		<string>1AE7232B134E860B001C3F35</string>
-		<string>1AE72317134E860B001C3F35</string>
-		<string>Xcode3BuildPhasesEditorDisclosedNamesKey</string>
-		<string>1AE72313134E860B001C3F35</string>
-		<string>Xcode3BuildPhasesEditorFilterKey</string>
-		<string>1AE72314134E860B001C3F35</string>
-		<string>1AE72316134E860B001C3F35</string>
-		<string>1AE7232A134E860B001C3F35</string>
-		<string>1AE72329134E860B001C3F35</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2064</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2065</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode3HeadersBuildPhaseDisclosedIndexes</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSLength</key>
-			<integer>1</integer>
-			<key>NSLocation</key>
-			<integer>0</integer>
-			<key>NSRangeCount</key>
-			<integer>1</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2070</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Link Binary With Libraries</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Xcode3ProjectInfoEditor</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2082</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2083</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2085</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2084</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>371</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2086</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>415</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2088</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2287</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2089</integer>
-			</dict>
-			<key>selection</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2091</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2090</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/</string>
-		<real>324364227.89618599</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2092</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2093</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2094</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2095</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2096</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Editor</string>
-		<string>Target</string>
-		<string>Xcode3BuildSettingsEditorLocations</string>
-		<string>Xcode3BuildSettingsEditor</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2097</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2098</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2099</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2100</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2101</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2102</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2104</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2105</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode3BuildPropertyNameDisplayMode</string>
-		<string>Xcode3BuildPropertyValueDisplayMode</string>
-		<string>Xcode3BuildSettingsEditorDisplayMode</string>
-		<string>Selected Build Properties</string>
-		<string>Collapsed Build Property Categories</string>
-		<string>Xcode3BuildSettingsEditorMode</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2106</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2107</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2108</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2109</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2110</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2111</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2112</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2113</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2115</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2116</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2117</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2118</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2119</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2120</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2121</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2122</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2123</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2124</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2125</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2126</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2127</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2128</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2129</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2132</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2133</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2134</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2143</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2144</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2148</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2149</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2150</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2151</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2153</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2154</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2155</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2156</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2157</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2162</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2164</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2165</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2167</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2172</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2175</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2176</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2188</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2197</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2198</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2199</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2200</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2201</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2203</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2204</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2205</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2206</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2207</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2209</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2210</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2211</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2212</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2213</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2214</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2215</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2216</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2218</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2220</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2221</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2222</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2223</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2224</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2225</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2226</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2227</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2228</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2229</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2230</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2231</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2232</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2233</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2234</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2235</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2236</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2237</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2238</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2239</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2240</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2241</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2242</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2243</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2244</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2245</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2246</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2247</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2248</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2249</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2250</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2251</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2252</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2253</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2254</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2255</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2256</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2257</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2258</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2259</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2260</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2261</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2262</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2263</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2264</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2265</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2266</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2267</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2268</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2270</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2271</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2272</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2273</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2274</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2275</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2276</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2277</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2278</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2279</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2280</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2281</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2282</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2283</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2284</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2285</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2286</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Architectures||ADDITIONAL_SDKS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Architectures||ARCHS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Architectures||ONLY_ACTIVE_ARCH</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Architectures||SDKROOT</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Architectures||SUPPORTED_PLATFORMS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Architectures||VALID_ARCHS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Build Locations||OBJROOT</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Build Locations||SHARED_PRECOMPS_DIR</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Build Locations||SYMROOT</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Build Options||BUILD_VARIANTS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Build Options||ENABLE_OPENMP_SUPPORT</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Build Options||GENERATE_PROFILING_CODE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Build Options||PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Build Options||RUN_CLANG_STATIC_ANALYZER</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Build Options||SCAN_ALL_SOURCE_FILES_FOR_INCLUDES</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Build Options||VALIDATE_PRODUCT</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Code Signing||CODE_SIGN_ENTITLEMENTS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Code Signing||CODE_SIGN_IDENTITY</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Code Signing||CODE_SIGN_RESOURCE_RULES_PATH</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Code Signing||OTHER_CODE_SIGN_FLAGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Compiler Version||GCC_VERSION</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||ALTERNATE_GROUP</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||ALTERNATE_MODE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||ALTERNATE_OWNER</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||ALTERNATE_PERMISSIONS_FILES</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||COMBINE_HIDPI_IMAGES</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||DEPLOYMENT_LOCATION</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||DEPLOYMENT_POSTPROCESSING</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||DSTROOT</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||INSTALL_GROUP</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||INSTALL_MODE_FLAG</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||INSTALL_OWNER</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||INSTALL_PATH</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||MACOSX_DEPLOYMENT_TARGET</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||SEPARATE_STRIP</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||SKIP_INSTALL</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||STRIPFLAGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||STRIP_INSTALLED_PRODUCT</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Deployment||STRIP_STYLE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Kernel Module||MODULE_NAME</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Kernel Module||MODULE_START</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Kernel Module||MODULE_STOP</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Kernel Module||MODULE_VERSION</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||BUNDLE_LOADER</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||DEAD_CODE_STRIPPING</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||DYLIB_COMPATIBILITY_VERSION</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||DYLIB_CURRENT_VERSION</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||EXPORTED_SYMBOLS_FILE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||GENERATE_MASTER_OBJECT_FILE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||INIT_ROUTINE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||KEEP_PRIVATE_EXTERNS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||LD_DYLIB_INSTALL_NAME</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||LD_GENERATE_MAP_FILE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||LD_OPENMP_FLAGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||LD_RUNPATH_SEARCH_PATHS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||LINKER_DISPLAYS_MANGLED_NAMES</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||LINK_WITH_STANDARD_LIBRARIES</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||MACH_O_TYPE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||ORDER_FILE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||OTHER_LDFLAGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||PRELINK_FLAGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||PRELINK_LIBS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||PRESERVE_DEAD_CODE_INITS_AND_TERMS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||SECTORDER_FLAGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||SEPARATE_SYMBOL_EDIT</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||STANDARD_C_PLUS_PLUS_LIBRARY_TYPE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||UNEXPORTED_SYMBOLS_FILE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Linking||WARNING_LDFLAGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||APPLY_RULES_IN_COPY_FILES</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||COPYING_PRESERVES_HFS_DATA</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||EXECUTABLE_EXTENSION</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||EXECUTABLE_PREFIX</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||FRAMEWORK_VERSION</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||GENERATE_PKGINFO_FILE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||INFOPLIST_EXPAND_BUILD_SETTINGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||INFOPLIST_FILE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||INFOPLIST_OTHER_PREPROCESSOR_FLAGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||INFOPLIST_OUTPUT_FORMAT</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||INFOPLIST_PREFIX_HEADER</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||INFOPLIST_PREPROCESS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||INFOPLIST_PREPROCESSOR_DEFINITIONS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||PLIST_FILE_OUTPUT_FORMAT</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||PRIVATE_HEADERS_FOLDER_PATH</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||PRODUCT_NAME</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||PUBLIC_HEADERS_FOLDER_PATH</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||STRINGS_FILE_OUTPUT_ENCODING</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Packaging||WRAPPER_EXTENSION</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Search Paths||ALWAYS_SEARCH_USER_PATHS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Search Paths||EXCLUDED_RECURSIVE_SEARCH_PATH_SUBDIRECTORIES</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Search Paths||FRAMEWORK_SEARCH_PATHS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Search Paths||HEADER_SEARCH_PATHS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Search Paths||INCLUDED_RECURSIVE_SEARCH_PATH_SUBDIRECTORIES</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Search Paths||LIBRARY_SEARCH_PATHS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Search Paths||REZ_SEARCH_PATHS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Search Paths||USER_HEADER_SEARCH_PATHS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Unit Testing||OTHER_TEST_FLAGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Unit Testing||TEST_AFTER_BUILD</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Unit Testing||TEST_HOST</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Unit Testing||TEST_RIG</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Versioning||CURRENT_PROJECT_VERSION</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Versioning||VERSIONING_SYSTEM</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Versioning||VERSION_INFO_BUILDER</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Versioning||VERSION_INFO_EXPORT_DECL</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Versioning||VERSION_INFO_FILE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Versioning||VERSION_INFO_PREFIX</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>Versioning||VERSION_INFO_SUFFIX</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_DYNAMIC_NO_PIC</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_ENABLE_KERNEL_DEVELOPMENT</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_ENABLE_OBJC_GC</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_ENABLE_SSE3_EXTENSIONS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_ENABLE_SSE41_EXTENSIONS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_ENABLE_SSE42_EXTENSIONS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_FAST_MATH</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_FAST_OBJC_DISPATCH</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_GENERATE_DEBUGGING_SYMBOLS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_INLINES_ARE_PRIVATE_EXTERN</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_NO_COMMON_BLOCKS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_REUSE_STRINGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_STRICT_ALIASING</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_SYMBOLS_PRIVATE_EXTERN</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_THREADSAFE_STATICS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||GCC_UNROLL_LOOPS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Code Generation||LLVM_LTO</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_CHAR_IS_UNSIGNED_CHAR</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_CW_ASM_SYNTAX</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_C_LANGUAGE_STANDARD</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_ENABLE_ASM_KEYWORD</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_ENABLE_BUILTIN_FUNCTIONS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_ENABLE_CPP_EXCEPTIONS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_ENABLE_CPP_RTTI</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_ENABLE_FLOATING_POINT_LIBRARY_CALLS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_ENABLE_OBJC_EXCEPTIONS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_ENABLE_PASCAL_STRINGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_ENABLE_TRIGRAPHS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_FORCE_CPU_SUBTYPE_ALL</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_INCREASE_PRECOMPILED_HEADER_SHARING</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_INPUT_FILETYPE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_LINK_WITH_DYNAMIC_LIBRARIES</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_PRECOMPILE_PREFIX_HEADER</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_PREFIX_HEADER</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_SHORT_ENUMS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_USE_INDIRECT_FUNCTION_CALLS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_USE_REGISTER_FUNCTION_CALLS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||GCC_USE_STANDARD_INCLUDE_SEARCHING</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||OTHER_CFLAGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Language||OTHER_CPLUSPLUSFLAGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Preprocessing||GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_TREAT_IMPLICIT_FUNCTION_DECLARATIONS_AS_ERRORS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_TREAT_WARNINGS_AS_ERRORS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_64_TO_32_BIT_CONVERSION</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_ABOUT_MISSING_NEWLINE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_ABOUT_MISSING_PROTOTYPES</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_ABOUT_POINTER_SIGNEDNESS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_ABOUT_RETURN_TYPE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_CHECK_SWITCH_STATEMENTS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_FOUR_CHARACTER_CONSTANTS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_INHIBIT_ALL_WARNINGS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_MISSING_PARENTHESES</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_MULTIPLE_DEFINITION_TYPES_FOR_SELECTOR</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_NON_VIRTUAL_DESTRUCTOR</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_PEDANTIC</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_SHADOW</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_SIGN_COMPARE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_STRICT_SELECTOR_MATCH</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_TYPECHECK_CALLS_TO_PRINTF</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_UNDECLARED_SELECTOR</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_UNINITIALIZED_AUTOS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_UNKNOWN_PRAGMAS</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_UNUSED_FUNCTION</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_UNUSED_LABEL</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_UNUSED_PARAMETER</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_UNUSED_VALUE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||GCC_WARN_UNUSED_VARIABLE</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>LLVM compiler 2.0 - Warnings||WARNING_CFLAGS</string>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>Xcode3ProjectDocumentLocation</string>
-				<string>DVTDocumentLocation</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>Xcode3ProjectDocumentLocation</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2289</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2293</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2295</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2297</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2299</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2301</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2303</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2305</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2307</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2309</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2311</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2313</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2315</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2317</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2319</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2321</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2323</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2325</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2327</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2329</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2331</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2333</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1929</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2335</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2337</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2339</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2341</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2343</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2345</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2347</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2349</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2353</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2355</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2357</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2359</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2361</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2363</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2365</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2367</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2369</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2371</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2373</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2375</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2377</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2379</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2381</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2383</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2385</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2387</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2389</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2391</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2393</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2395</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2397</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2399</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2401</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2403</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2405</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2407</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2409</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2411</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2413</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2415</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1944</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2417</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2419</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2421</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2423</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2425</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2427</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2429</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2431</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2433</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2435</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2437</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2439</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2441</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2443</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2445</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2447</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2449</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2451</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2453</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2455</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2457</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2459</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1942</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1937</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2461</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2463</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2465</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2467</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2469</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2471</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2473</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2475</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2477</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2479</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2481</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2483</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2487</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2489</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2491</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2492</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2496</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2498</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2500</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2502</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2512</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2514</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2517</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2521</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2525</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2529</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2533</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2536</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2539</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2543</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2547</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2551</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2554</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2558</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2562</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2566</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2572</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2576</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2580</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2583</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2586</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2589</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2593</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2597</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2601</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2605</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2609</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2613</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2617</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2621</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2625</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2629</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2633</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2636</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2639</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2643</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2646</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2650</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2653</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2656</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2659</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2663</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2667</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2671</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2675</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2678</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2682</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2686</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2690</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2694</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2697</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2700</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2704</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2707</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2710</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2714</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2717</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2721</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2725</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2728</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2732</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2736</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2739</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2743</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2747</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2751</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2755</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2759</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2763</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2767</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2771</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2775</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2778</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2782</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2785</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2789</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2792</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2796</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2799</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2803</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2807</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2811</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2815</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2819</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2823</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2827</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2831</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2834</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2838</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2842</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2846</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2850</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2853</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2857</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2861</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2865</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2869</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2873</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2877</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2880</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2883</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2887</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2891</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2895</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2898</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2902</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2905</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2909</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2913</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2917</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2920</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2923</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2927</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2931</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2934</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2937</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2939</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2290</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/Main.java</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2292</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Developer/SDKs/MacOSX10.6.sdk/System/Library/Frameworks/Foundation.framework/Versions/C/Headers/NSAppleScript.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2294</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/polydiff/Simplifier.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2296</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2298</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/polydiff/PolyLexer.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2300</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/LangLexer.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2302</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRecognizerSharedState.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2304</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeIterator.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2306</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonToken.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2308</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRParseTree.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2310</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/polydiff/PolyParser.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2312</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2314</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTree.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2316</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2318</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteLexer.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2320</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/var/folders/g7/g7-mBaqj2RaJUU+F71D0SU+++TI/-Tmp-/objc_msgSend_disassembly_0x9881dedb.nasm</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2322</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2324</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLexerState.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2326</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/input</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2328</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/input</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2330</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/scopes/input</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2332</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2334</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonToken.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2336</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2338</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/polydiff/Simplifier.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2340</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRFastQueue.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2342</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/LL-star/main.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2344</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/hoistedPredicates/main.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2346</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/main.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2348</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLookaheadStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2350</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/main.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2352</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treerewrite/main.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2354</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/fuzzy/FuzzyLexer.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2356</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/scopes/main.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2358</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/combined/main.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2360</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/fuzzy/main.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2362</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/lexertest-simple/main.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2364</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2366</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseMapElement.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2368</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2370</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRecognizerSharedState.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2372</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2374</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRPtrBuffer.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2376</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2378</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRDoubleKeyMap.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2380</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeNodeStream.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2382</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2384</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2386</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2388</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRDebugTreeNodeStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2390</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTreeNodeStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2392</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/output</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2394</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTree.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2396</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRInputStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2398</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreePatternParser.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2400</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTree.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2402</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRPtrStack.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2404</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRewriteRuleElementStream.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2406</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/test/runtime/token/ANTLRCommonTokenTest.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2408</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTokenStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2410</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRuntimeException.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2412</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRParseTree.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2414</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTokenStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2416</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRUniqueIDMap.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2418</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2420</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeVisitor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2422</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRewriteRuleNodeStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2424</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeWizard.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2426</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2428</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2430</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2432</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2434</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2436</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Developer/SDKs/MacOSX10.6.sdk/System/Library/Frameworks/Foundation.framework/Versions/C/Headers/NSObject.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2438</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTree.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2440</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2442</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRuntimeException.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2444</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2446</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTokenStream.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2448</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRNodeMapElement.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2450</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseRecognizer.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2452</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/LangParser.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2454</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Developer/SDKs/MacOSX10.6.sdk/System/Library/Frameworks/Foundation.framework/Versions/C/Headers/NSString.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2456</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeRewriter.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2458</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRPtrStack.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2460</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTokenStream.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2462</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRStringStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2464</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2466</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreePatternParser.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2468</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLookaheadStream.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2470</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRDFA.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2472</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeWizard.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2474</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2476</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRLexerRuleReturnScope.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2478</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRewriteRuleTokenStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2480</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/scopes/SymbolTable.g</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2482</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRMap.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2484</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRDebugTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2486</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRPtrBuffer.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2488</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/LangParser.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2490</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeRewriter.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1136</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2493</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2495</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2497</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTreeNodeStream.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2499</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2501</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2503</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRewriteRuleNodeStream.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2505</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLexerState.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2507</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2509</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseStack.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2511</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2513</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRecognitionException.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2516</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323994196.47168499</real>
-		<string>{0, 694}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2518</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2519</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2520</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324409866.92811</real>
-		<string>{0, 2523}</string>
-		<string>{1904, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2522</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2523</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2524</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323992265.70342797</real>
-		<string>{15643, 725}</string>
-		<string>{16364, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2526</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2527</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2528</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324431688.50582999</real>
-		<string>{0, 2923}</string>
-		<string>{1661, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2530</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2531</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2532</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991920.336025</real>
-		<string>{0, 951}</string>
-		<string>{247, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2534</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2535</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2532</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323998977.39440101</real>
-		<string>{0, 1393}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2537</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2538</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324400472.07226503</real>
-		<string>{0, 3189}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2540</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2541</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2542</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323916640.42540002</real>
-		<string>{3272, 1581}</string>
-		<string>{4662, 20}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2544</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2545</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2546</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324429732.66660303</real>
-		<string>{3267, 2188}</string>
-		<string>{3258, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2548</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2549</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2550</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324426667.94757599</real>
-		<string>{0, 2638}</string>
-		<string>{2084, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2552</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2553</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2532</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991925.66436201</real>
-		<string>{0, 737}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2555</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2556</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2557</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324432773.92324698</real>
-		<string>{2871, 1937}</string>
-		<string>{3733, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2559</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2560</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2561</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323918775.21388799</real>
-		<string>{825, 1664}</string>
-		<string>{3364, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2563</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2564</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2565</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991958.93438703</real>
-		<string>{0, 947}</string>
-		<string>{243, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323995435.26982701</real>
-		<string>{0, 904}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2570</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2571</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323963868.237019</real>
-		<string>{0, 1445}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2573</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2574</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2575</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324414176.27392399</real>
-		<string>{11291, 1545}</string>
-		<string>{12802, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2577</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2578</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2579</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324401544.86127102</real>
-		<string>{0, 2259}</string>
-		<string>{1883, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2581</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2582</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323993804.56084901</real>
-		<string>{0, 118}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2584</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2585</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323994255.81979299</real>
-		<string>{0, 7}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2587</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2588</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323995809.48964798</real>
-		<string>{0, 97}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2590</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2591</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2592</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324437798.21673</real>
-		<string>{3553, 2268}</string>
-		<string>{3868, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2594</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2595</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2596</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324432938.69757497</real>
-		<string>{394, 2191}</string>
-		<string>{9292, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>570</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2598</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2599</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2600</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324440456.98038799</real>
-		<string>{2869, 1679}</string>
-		<string>{3716, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2602</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2603</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2604</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324436297.57894599</real>
-		<string>{4094, 2581}</string>
-		<string>{4957, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2606</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2607</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2608</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991935.93480003</real>
-		<string>{0, 902}</string>
-		<string>{253, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2610</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2611</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2612</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324412765.68417799</real>
-		<string>{2555, 1260}</string>
-		<string>{3290, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2614</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2615</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2616</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323997773.22955102</real>
-		<string>{0, 1098}</string>
-		<string>{190, 239}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2618</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2619</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2620</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323997633.580167</real>
-		<string>{0, 1074}</string>
-		<string>{200, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2623</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2624</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323998098.80162501</real>
-		<string>{0, 1691}</string>
-		<string>{390, 249}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2626</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2627</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2628</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324413128.654562</real>
-		<string>{3, 2278}</string>
-		<string>{1887, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2630</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2631</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2632</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323998229.417656</real>
-		<string>{0, 2002}</string>
-		<string>{960, 242}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2634</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2635</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323998272.29509199</real>
-		<string>{0, 1289}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2637</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2638</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2532</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323993444.857252</real>
-		<string>{0, 1036}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2640</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2641</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2642</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323997879.67560601</real>
-		<string>{0, 1109}</string>
-		<string>{197, 238}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2644</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2645</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323996597.73368597</real>
-		<string>{0, 748}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2647</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2648</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2649</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323997564.08925098</real>
-		<string>{0, 955}</string>
-		<string>{211, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2651</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2652</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323997740.41328001</real>
-		<string>{0, 631}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2654</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2655</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>268</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991956.51889098</real>
-		<string>{0, 821}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2657</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2658</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323914776.143372</real>
-		<string>{0, 2221}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2660</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2661</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2662</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324431511.42497897</real>
-		<string>{0, 2454}</string>
-		<string>{2246, 11}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2664</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2665</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2666</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324400607.254843</real>
-		<string>{1283, 2182}</string>
-		<string>{6261, 1}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2668</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2669</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2670</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324436274.26169902</real>
-		<string>{7238, 2038}</string>
-		<string>{8431, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2672</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2673</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2674</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323917736.789765</real>
-		<string>{5708, 1546}</string>
-		<string>{6321, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2676</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2677</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2565</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323995781.52013803</real>
-		<string>{0, 909}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2679</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2680</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2681</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323917645.241445</real>
-		<string>{0, 1288}</string>
-		<string>{127, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2683</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2684</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2685</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324431658.89625299</real>
-		<string>{0, 2174}</string>
-		<string>{1817, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2687</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2688</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2689</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323998717.00971699</real>
-		<string>{46491, 2812}</string>
-		<string>{24064, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2691</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2692</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2693</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991960.92231297</real>
-		<string>{0, 849}</string>
-		<string>{254, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2695</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2696</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323995447.02758801</real>
-		<string>{0, 195}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2698</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2699</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324432009.839122</real>
-		<string>{0, 2419}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2701</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2702</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2703</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324422636.81480199</real>
-		<string>{4578, 1748}</string>
-		<string>{5858, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2705</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2706</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323993798.49312198</real>
-		<string>{0, 233}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2708</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2709</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324436728.64446801</real>
-		<string>{0, 1681}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2711</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2712</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2713</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323917693.27358299</real>
-		<string>{0, 1837}</string>
-		<string>{1836, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2715</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2716</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324422407.05101901</real>
-		<string>{0, 2563}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2718</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2719</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2720</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324426155.38032198</real>
-		<string>{2376, 2026}</string>
-		<string>{1582, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2722</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2723</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2724</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323917102.30272698</real>
-		<string>{0, 1158}</string>
-		<string>{543, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2726</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2727</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323916228.22060299</real>
-		<string>{0, 3469}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2729</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2730</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2731</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323963859.82644898</real>
-		<string>{2834, 1401}</string>
-		<string>{3758, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2733</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2734</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2735</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324409948.76730502</real>
-		<string>{1181, 1454}</string>
-		<string>{2325, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2737</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2738</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323916551.38835502</real>
-		<string>{0, 2730}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2740</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2741</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2742</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324426418.68622297</real>
-		<string>{0, 2343}</string>
-		<string>{1931, 13}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2744</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2745</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2746</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324412735.31168002</real>
-		<string>{1359, 1828}</string>
-		<string>{2852, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2748</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2749</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2750</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324414185.99766701</real>
-		<string>{2616, 1311}</string>
-		<string>{3656, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>570</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2752</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2753</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2754</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324440370.639184</real>
-		<string>{0, 2023}</string>
-		<string>{1752, 83}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2756</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2757</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2758</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324437798.219513</real>
-		<string>{9059, 2872}</string>
-		<string>{9164, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2760</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2761</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2762</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324431765.90748399</real>
-		<string>{1492, 2008}</string>
-		<string>{2875, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2764</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2765</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2766</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323996535.758793</real>
-		<string>{380, 2043}</string>
-		<string>{2367, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2768</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2769</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2770</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324436864.92715198</real>
-		<string>{0, 1684}</string>
-		<string>{4936, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2772</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2773</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2774</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991883.09180599</real>
-		<string>{0, 775}</string>
-		<string>{256, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2776</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2777</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2565</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991947.84153402</real>
-		<string>{0, 755}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2779</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2780</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2781</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991940.85978901</real>
-		<string>{0, 895}</string>
-		<string>{250, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2783</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2780</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2784</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991949.74583501</real>
-		<string>{239, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2786</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2787</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2788</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323993442.86846501</real>
-		<string>{584, 1666}</string>
-		<string>{233, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2790</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2791</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324417873.568241</real>
-		<string>{0, 1255}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2793</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2794</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2795</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324425413.43702298</real>
-		<string>{0, 3283}</string>
-		<string>{1739, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2797</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2798</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323968006.69858402</real>
-		<string>{0, 923}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2800</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2801</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2802</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324431829.74965</real>
-		<string>{5540, 1396}</string>
-		<string>{6662, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2804</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2805</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2806</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324437401.74129099</real>
-		<string>{0, 3346}</string>
-		<string>{1797, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2808</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2809</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2810</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324421435.024073</real>
-		<string>{102, 2861}</string>
-		<string>{193, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2812</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2813</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2814</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324426394.96874899</real>
-		<string>{0, 2146}</string>
-		<string>{1639, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2816</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2817</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2818</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324436606.93009198</real>
-		<string>{3687, 1874}</string>
-		<string>{4722, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2820</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2821</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2822</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323999071.56422198</real>
-		<string>{12932, 1171}</string>
-		<string>{13981, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2824</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2825</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2826</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323995848.84945101</real>
-		<string>{21965, 1757}</string>
-		<string>{22896, 95}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2828</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2829</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2830</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324415593.73750901</real>
-		<string>{5439, 1874}</string>
-		<string>{7064, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2832</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2833</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323917117.24232203</real>
-		<string>{2409, 1259}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2835</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2836</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2837</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324410006.29558802</real>
-		<string>{1940, 2182}</string>
-		<string>{3080, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>570</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2839</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2840</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2841</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324440890.34305298</real>
-		<string>{3432, 2012}</string>
-		<string>{4860, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>570</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2843</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2844</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2845</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324440890.34319198</real>
-		<string>{78, 2018}</string>
-		<string>{1759, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2847</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2848</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2849</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323917830.14173597</real>
-		<string>{6240, 2512}</string>
-		<string>{7297, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2851</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2852</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>151</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324431694.36976302</real>
-		<string>{0, 2678}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2854</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2855</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2856</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324431995.00957501</real>
-		<string>{3979, 1931}</string>
-		<string>{4147, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2858</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2859</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2860</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324413092.90767199</real>
-		<string>{341, 2680}</string>
-		<string>{2305, 34}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2862</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2863</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2864</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324399293.830338</real>
-		<string>{6483, 1894}</string>
-		<string>{7621, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2866</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2867</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2868</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324422940.22563303</real>
-		<string>{3038, 2830}</string>
-		<string>{5494, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2870</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2871</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2872</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323998972.84941399</real>
-		<string>{0, 1716}</string>
-		<string>{236, 19}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>570</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2874</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2875</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2876</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324440838.34989101</real>
-		<string>{0, 1840}</string>
-		<string>{1601, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2878</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2879</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323916742.91389602</real>
-		<string>{0, 3071}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2881</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2882</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323995798.68886</real>
-		<string>{0, 587}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2884</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2885</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2886</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324426367.06206697</real>
-		<string>{739, 1744}</string>
-		<string>{1634, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2888</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2889</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2890</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323919665.61871201</real>
-		<string>{2253, 1777}</string>
-		<string>{3066, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2892</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2893</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2894</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323917746.03652102</real>
-		<string>{1269, 1981}</string>
-		<string>{3133, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2896</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2897</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2532</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323999027.071006</real>
-		<string>{0, 1113}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2899</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2900</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2901</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324415638.39336997</real>
-		<string>{524, 2814}</string>
-		<string>{2988, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>570</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2903</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2904</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324441058.64298302</real>
-		<string>{47, 2017}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2906</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2907</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2908</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323999335.35239202</real>
-		<string>{2017, 2058}</string>
-		<string>{3482, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2910</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2911</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2912</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324437832.25074202</real>
-		<string>{1951, 2511}</string>
-		<string>{2100, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2914</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2915</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2916</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324421603.58039403</real>
-		<string>{2413, 2126}</string>
-		<string>{3075, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2918</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2919</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2784</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991938.742567</real>
-		<string>{0, 905}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2921</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2922</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2784</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991951.46779501</real>
-		<string>{0, 920}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2924</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2925</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2926</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324402854.21050298</real>
-		<string>{0, 2222}</string>
-		<string>{559, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2928</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2929</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2930</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324400673.29259199</real>
-		<string>{1813, 941}</string>
-		<string>{2099, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2932</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2933</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2788</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991962.84279197</real>
-		<string>{0, 924}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2935</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2936</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324400461.89105803</real>
-		<string>{0, 2382}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2938</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>926</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>927</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323991202.52781898</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2940</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2941</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2942</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324422018.73711902</real>
-		<string>{0, 2893}</string>
-		<string>{1632, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2944</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2946</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2948</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2950</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2952</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2955</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2957</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2959</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2945</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://D77EB64F-D96E-4EE1-9AD5-C2E8CC6078D1</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2947</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://F5EF2CEA-3A6E-4CE0-88F1-CD36958A1A8E</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2949</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://4C3C7F23-F763-4ED2-A9D1-2D50EAB289F4</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2951</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://2CCA3A1A-094A-4C2D-8C04-BDB027FCB6DA</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2953</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2954</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>DBGConsoleLogEditorScrollRange</string>
-		<string>{7913, 4599}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2953</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2956</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{0, 2227}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2953</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2958</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{745, 2156}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2953</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2960</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{11436, 1351}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2962</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1924</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2964</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2018</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2002</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2965</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2967</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2969</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2971</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2008</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2973</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2975</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2977</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2979</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2981</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2982</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2983</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2984</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2992</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2993</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2998</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3003</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3004</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3009</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3014</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3019</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3024</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3027</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2963</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://CF5FF470-4F4A-4184-96B3-5B2F8B4DADFF</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1923</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2966</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://F12883C6-1F90-4A68-A7B2-A84A97A444FF</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2968</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://C56D6006-8D04-4E05-8056-E1B0EAE370EC</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2970</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://35FB82FA-7610-48F5-AA8F-444EB49C032E</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2972</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://86811926-3971-46D4-A0DD-2A6798C8B85F</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2974</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://F062C011-F180-4D28-8936-769801EA2730</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2976</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://0C4538B8-C5BC-4D32-AEC7-BC703EA08568</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2978</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://4B1732E0-4B96-4015-94F9-C167BBAAE89D</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2980</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://935AF1AA-6982-47AB-B679-13A9D4F93CE6</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2985</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2986</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>SelectedDocumentLocations</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2987</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2991</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2019</integer>
-			</dict>
-			<key>expandTranscript</key>
-			<false/>
-			<key>indexPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2988</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2990</integer>
-			</dict>
-			<key>NSIndexPathData</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2989</integer>
-			</dict>
-			<key>NSIndexPathLength</key>
-			<integer>3</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1904</integer>
-			</dict>
-			<key>NS.data</key>
-			<data>
-			AFEG
-			</data>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>NSIndexPath</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>NSIndexPath</string>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>IDELogDocumentLocation</string>
-				<string>DVTDocumentLocation</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>IDELogDocumentLocation</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2985</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2994</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2995</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2991</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2966</integer>
-			</dict>
-			<key>expandTranscript</key>
-			<false/>
-			<key>indexPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2996</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2990</integer>
-			</dict>
-			<key>NSIndexPathData</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2997</integer>
-			</dict>
-			<key>NSIndexPathLength</key>
-			<integer>2</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1904</integer>
-			</dict>
-			<key>NS.data</key>
-			<data>
-			AQE=
-			</data>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2999</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3000</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2991</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2968</integer>
-			</dict>
-			<key>expandTranscript</key>
-			<false/>
-			<key>indexPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3001</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2990</integer>
-			</dict>
-			<key>NSIndexPathData</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3002</integer>
-			</dict>
-			<key>NSIndexPathLength</key>
-			<integer>2</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1904</integer>
-			</dict>
-			<key>NS.data</key>
-			<data>
-			AAI=
-			</data>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3005</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3006</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2991</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2972</integer>
-			</dict>
-			<key>expandTranscript</key>
-			<false/>
-			<key>indexPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3007</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2990</integer>
-			</dict>
-			<key>NSIndexPathData</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3008</integer>
-			</dict>
-			<key>NSIndexPathLength</key>
-			<integer>2</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1904</integer>
-			</dict>
-			<key>NS.data</key>
-			<data>
-			AL8B
-			</data>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2985</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3010</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3011</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2991</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2009</integer>
-			</dict>
-			<key>expandTranscript</key>
-			<false/>
-			<key>indexPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3012</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2990</integer>
-			</dict>
-			<key>NSIndexPathData</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3013</integer>
-			</dict>
-			<key>NSIndexPathLength</key>
-			<integer>3</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1904</integer>
-			</dict>
-			<key>NS.data</key>
-			<data>
-			ACwA
-			</data>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3015</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3016</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2991</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2974</integer>
-			</dict>
-			<key>expandTranscript</key>
-			<false/>
-			<key>indexPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3017</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2990</integer>
-			</dict>
-			<key>NSIndexPathData</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3018</integer>
-			</dict>
-			<key>NSIndexPathLength</key>
-			<integer>2</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1904</integer>
-			</dict>
-			<key>NS.data</key>
-			<data>
-			AAA=
-			</data>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3020</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3021</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2991</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2976</integer>
-			</dict>
-			<key>expandTranscript</key>
-			<false/>
-			<key>indexPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3022</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2990</integer>
-			</dict>
-			<key>NSIndexPathData</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3023</integer>
-			</dict>
-			<key>NSIndexPathLength</key>
-			<integer>3</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1904</integer>
-			</dict>
-			<key>NS.data</key>
-			<data>
-			AQMA
-			</data>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3025</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3026</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2991</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2978</integer>
-			</dict>
-			<key>expandTranscript</key>
-			<false/>
-			<key>indexPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2996</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3029</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3030</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDENameString</string>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>637</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>474</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3032</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3033</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>473</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>477</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>472</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3034</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3974</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3975</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4203</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3033</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>25</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4324</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_F18AFCAA-0B06-4A4A-B3A3-172BB6FE3FF0</string>
-		<string>IDEWorkspaceTabController_0E300015-F55C-40BA-9D74-6F096B3B8016</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3035</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3036</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3133</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3960</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3966</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLRTreeAdaptor.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>642</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3037</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3038</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3053</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3064</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3083</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3127</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3039</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3045</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3051</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3052</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3040</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3041</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3043</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3042</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>309</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3044</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>459</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3046</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3047</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3049</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3048</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>309</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3050</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>459</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3054</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3062</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3055</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3062</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3060</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3056</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3057</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3058</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3059</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3035</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3061</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3063</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3054</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3065</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3066</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3067</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3081</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3068</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3069</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3074</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3035</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3078</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3080</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3060</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3070</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3071</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3072</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3073</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3035</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3075</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3076</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3077</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324436605.33686697</real>
-		<string>{6581, 1360}</string>
-		<string>{3905, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3079</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.m</string>
-		<string>-copyNode:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3082</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {769, 925}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>702</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3084</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3126</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>704</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3085</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3103</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3086</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3087</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3088</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3089</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3035</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3093</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3094</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>-newTreeWithTokenType:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3090</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3091</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3092</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324412008.05151898</real>
-		<string>{1608, 1437}</string>
-		<string>{2146, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3079</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3099</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3095</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3096</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3097</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3098</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3035</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3100</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3102</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {1239, 891}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3104</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3124</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3105</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3106</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3107</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3111</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3112</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3114</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>-createTree:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3108</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3109</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3110</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324412008.05183703</real>
-		<string>{2872, 2448}</string>
-		<string>{1770, 14}</string>
-		<string>ANTLRTreeAdaptor.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3113</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3122</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3115</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3116</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3117</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3118</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3120</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3111</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>676</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3119</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>107</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>manualDomainIdentifier</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>679</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3121</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode.IDEKit.GeniusCategory.Manual</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3123</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3125</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {727, 891}}</string>
-		<real>0.41242939233779907</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3128</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3129</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3131</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3130</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3132</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3134</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>286</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3149</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3952</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3956</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode.DebuggerKit.ThreadsStacksNavigator</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3139</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3137</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {456, 847}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>187</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3144</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>379</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>253</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3143</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>scopes</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>253</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1510</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1513</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3148</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<string>invalidNode</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3150</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3156</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3906</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3949</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3950</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3951</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>251</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3151</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>337</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3143</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3153</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3154</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3155</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>377</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>hoistedPredicates</string>
-		<string>LL-start</string>
-		<string>lexertest-simple</string>
-		<string>Fuzzy</string>
-		<string>polydiff</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3157</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3199</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3230</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3252</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3297</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3326</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3354</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3499</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3608</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3627</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3635</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3641</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3681</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3741</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3765</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3783</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3800</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3815</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3826</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3840</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3863</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3869</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3894</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3164</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3196</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>type</string>
-		<string>subissues</string>
-		<string>shortMessage</string>
-		<string>fullMessage</string>
-		<string>documentLocations</string>
-		<string>Semantic Issue</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3165</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3189</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3188</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3167</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3175</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3181</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3171</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Uncategorized</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRBaseTreeAdaptor.h:28:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3172</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3173</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>27</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>27</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$classes</key>
-			<array>
-				<string>DVTTextDocumentLocation</string>
-				<string>DVTDocumentLocation</string>
-				<string>NSObject</string>
-			</array>
-			<key>$classname</key>
-			<string>DVTTextDocumentLocation</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3176</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3178</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRBaseTreeAdaptor.m:27:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3179</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3180</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3184</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method declared here in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3185</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3186</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>124</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>124</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.h</string>
-		</dict>
-		<string>Method declared here</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3185</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3192</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Required for direct or indirect protocol 'ANTLRTreeAdaptor'</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3193</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3194</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<string>Method in protocol not implemented</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3197</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3198</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3200</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3223</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3224</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3225</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3201</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3218</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3203</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3210</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3204</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3205</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3205</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3206</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRCommonTokenStream.m:28:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3207</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3208</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>27</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>27</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3209</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTokenStream.m</string>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3211</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3212</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3212</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3213</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Previous definition is here in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTokenStream.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3214</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3215</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>84</integer>
-			<key>endingLineNumber</key>
-			<integer>50</integer>
-			<key>startingColumnNumber</key>
-			<integer>84</integer>
-			<key>startingLineNumber</key>
-			<integer>50</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3216</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTokenStream.h</string>
-		<real>0.0</real>
-		<string>Previous definition is here</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3221</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3215</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>84</integer>
-			<key>endingLineNumber</key>
-			<integer>50</integer>
-			<key>startingColumnNumber</key>
-			<integer>84</integer>
-			<key>startingLineNumber</key>
-			<integer>50</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3220</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3215</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>83</integer>
-			<key>endingLineNumber</key>
-			<integer>50</integer>
-			<key>startingColumnNumber</key>
-			<integer>73</integer>
-			<key>startingLineNumber</key>
-			<integer>50</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3222</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<string>Conflicting parameter types in implementation of 'initWithTokenSource:Channel:': 'NSUInteger'</string>
-		<string>Conflicting parameter types in implementation of 'initWithTokenSource:Channel:': 'NSUInteger' (aka 'unsigned int') vs 'NSInteger' (aka 'int')</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3226</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3228</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3208</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>83</integer>
-			<key>endingLineNumber</key>
-			<integer>72</integer>
-			<key>startingColumnNumber</key>
-			<integer>83</integer>
-			<key>startingLineNumber</key>
-			<integer>72</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3227</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3208</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>82</integer>
-			<key>endingLineNumber</key>
-			<integer>72</integer>
-			<key>startingColumnNumber</key>
-			<integer>73</integer>
-			<key>startingLineNumber</key>
-			<integer>72</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3229</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3231</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3224</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3224</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3249</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3232</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3233</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3244</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3234</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3239</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3235</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3205</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3205</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3236</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3237</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3208</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>27</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>27</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3238</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3240</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3212</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3212</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3241</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3242</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3215</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>84</integer>
-			<key>endingLineNumber</key>
-			<integer>50</integer>
-			<key>startingColumnNumber</key>
-			<integer>84</integer>
-			<key>startingLineNumber</key>
-			<integer>50</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3243</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3245</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3247</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3215</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>84</integer>
-			<key>endingLineNumber</key>
-			<integer>50</integer>
-			<key>startingColumnNumber</key>
-			<integer>84</integer>
-			<key>startingLineNumber</key>
-			<integer>50</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3246</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3215</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>83</integer>
-			<key>endingLineNumber</key>
-			<integer>50</integer>
-			<key>startingColumnNumber</key>
-			<integer>73</integer>
-			<key>startingLineNumber</key>
-			<integer>50</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3248</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3250</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3251</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3208</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>72</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>72</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3208</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>72</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>72</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3253</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3254</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3267</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3267</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3268</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Lexical or Preprocessor Issue</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3255</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3261</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3256</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3257</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3257</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3258</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../examples/simplecTreeParser/SimpleCParser.m:46:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3259</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3260</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>45</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>45</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3262</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3263</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3263</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3264</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>'ANTLR/ANTLR.h' file not found in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3265</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3266</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>8</integer>
-			<key>endingLineNumber</key>
-			<integer>6</integer>
-			<key>startingColumnNumber</key>
-			<integer>8</integer>
-			<key>startingLineNumber</key>
-			<integer>6</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.h</string>
-		</dict>
-		<string>'ANTLR/ANTLR.h' file not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3265</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3270</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3294</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3271</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3289</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3272</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3288</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3273</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3278</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3283</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3274</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3275</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3276</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3277</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>27</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>27</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3279</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3280</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3281</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3282</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3284</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3285</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3286</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3287</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>38</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>38</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3286</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3290</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3291</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3292</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3293</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3295</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3296</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3298</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3319</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3320</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3321</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3299</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3300</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3314</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3301</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3308</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3302</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3303</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3303</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3304</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRBaseTree.m:28:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3305</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3306</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>27</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>27</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3307</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.m</string>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3309</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3310</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3310</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3311</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Previous definition is here in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3312</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1941</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>32</integer>
-			<key>endingLineNumber</key>
-			<integer>126</integer>
-			<key>startingColumnNumber</key>
-			<integer>32</integer>
-			<key>startingLineNumber</key>
-			<integer>126</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3313</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3315</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3317</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1941</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>32</integer>
-			<key>endingLineNumber</key>
-			<integer>126</integer>
-			<key>startingColumnNumber</key>
-			<integer>32</integer>
-			<key>startingLineNumber</key>
-			<integer>126</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3316</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1941</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>30</integer>
-			<key>endingLineNumber</key>
-			<integer>126</integer>
-			<key>startingColumnNumber</key>
-			<integer>20</integer>
-			<key>startingLineNumber</key>
-			<integer>126</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3318</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<string>Conflicting parameter types in implementation of 'deleteChild:': 'NSUInteger'</string>
-		<string>Conflicting parameter types in implementation of 'deleteChild:': 'NSUInteger' (aka 'unsigned int') vs 'NSInteger' (aka 'int')</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3322</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3324</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3306</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>31</integer>
-			<key>endingLineNumber</key>
-			<integer>226</integer>
-			<key>startingColumnNumber</key>
-			<integer>31</integer>
-			<key>startingLineNumber</key>
-			<integer>226</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3323</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3306</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>29</integer>
-			<key>endingLineNumber</key>
-			<integer>226</integer>
-			<key>startingColumnNumber</key>
-			<integer>20</integer>
-			<key>startingLineNumber</key>
-			<integer>226</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3325</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3327</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3352</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3328</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3346</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3329</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3345</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3330</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3335</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3340</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3331</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3332</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3333</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3334</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>27</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>27</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3336</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3337</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3338</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3339</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3341</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3342</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3343</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3344</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>38</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>38</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3343</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3347</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3348</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3349</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3350</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<string>Incomplete implementation</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3353</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3296</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>32</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>32</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3355</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3497</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3356</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3362</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3367</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3372</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3377</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3382</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3387</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3392</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3397</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3402</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3407</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3412</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3417</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3422</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3427</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3432</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3437</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3442</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3447</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3452</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3457</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3462</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3467</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3472</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3477</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3482</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3487</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3492</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3357</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3358</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3358</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3359</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'init' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3360</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>38</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>38</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3363</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3364</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3364</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3365</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'copyWithZone:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3366</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>40</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>40</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3368</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3369</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3369</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3370</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'emptyNode' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3371</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>42</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>42</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3373</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3374</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3374</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3375</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'errorNode:From:To:Exception:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3376</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>55</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>55</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3378</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3379</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3379</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3380</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'isNil:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3381</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>60</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>60</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3383</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3384</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3384</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3385</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'dupTree:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3386</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>62</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>62</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3388</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3389</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3389</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3390</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'dupTree:Parent:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3391</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>68</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>68</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3393</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3394</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3394</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3395</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'dupNode:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3396</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>69</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>69</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3398</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3399</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3399</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3400</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'addChild:toTree:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3401</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>77</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>77</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3403</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3404</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3404</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3405</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'becomeRoot:old:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3406</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>105</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>105</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3408</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3409</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3409</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3410</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'rulePostProcessing:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3411</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>108</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>108</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3413</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3414</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3414</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3415</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'becomeRootfromToken:old:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3416</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>110</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>110</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3418</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3419</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3419</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3420</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'createTree:FromToken:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3421</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>112</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>112</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3423</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3424</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3424</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3425</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'createTree:FromToken:Text:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3426</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>114</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>114</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3428</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3429</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3429</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3430</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'createTree:Text:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3431</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>116</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>116</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3433</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3434</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3434</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3435</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getType:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3436</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>118</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>118</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3438</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3439</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3439</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3440</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'setType:Type:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3441</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>120</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>120</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3443</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3444</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3444</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3445</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getText:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3446</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>122</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>122</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3448</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3449</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3449</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3450</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'setText:Text:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3451</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>124</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>124</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3453</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3454</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3454</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3455</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getChild:At:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3456</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>126</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>126</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3458</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3459</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3459</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3460</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'setChild:At:Child:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3461</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>128</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>128</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3463</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3464</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3464</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3465</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'deleteChild:Index:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3466</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>130</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>130</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3468</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3469</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3469</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3470</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getChildCount:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3471</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>132</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>132</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3473</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3474</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3474</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3475</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getUniqueID:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3476</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>134</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>134</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3478</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3479</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3479</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3480</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getUniqueID' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3481</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>144</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>144</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3483</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3485</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'createToken:Text:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3486</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>160</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>160</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3488</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3489</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3489</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3490</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'createToken:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3491</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>162</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>162</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3493</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3495</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'newEmptyTree' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3496</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3361</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>36</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>36</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3498</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3296</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3500</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3606</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3521</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3538</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3555</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3572</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3589</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3502</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3517</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3517</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3518</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3503</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3510</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3506</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRLexerState.m:27:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3507</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3508</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3509</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLexerState.m</string>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3511</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3513</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getTokenType' not found in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLexerState.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3514</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>38</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>38</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3516</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLexerState.h</string>
-		<real>0.0</real>
-		<string>Method definition for 'getTokenType' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3519</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>38</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>38</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3520</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3522</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3534</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3534</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3535</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3523</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3528</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3524</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3525</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3526</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3508</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3527</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3529</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3530</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3530</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3531</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getChannel' not found in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLexerState.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3532</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>41</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>41</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3533</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<string>Method definition for 'getChannel' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3536</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>41</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>41</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3537</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3539</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3551</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3551</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3552</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3540</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3545</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3541</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3542</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3543</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3508</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3544</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3546</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3547</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3547</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3548</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getTokenStartLine' not found in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLexerState.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3549</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>44</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>44</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3550</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<string>Method definition for 'getTokenStartLine' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3553</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>44</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>44</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3554</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3556</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3569</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3557</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3562</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3558</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3559</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3560</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3508</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3561</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3563</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3564</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3564</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3565</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getTokenCharPositionInLine' not found in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLexerState.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3566</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>47</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>47</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3567</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<string>Method definition for 'getTokenCharPositionInLine' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3570</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>47</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>47</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3571</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3573</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3585</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3585</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3586</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3574</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3579</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3575</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3576</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3577</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3508</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3578</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3580</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3581</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3581</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3582</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getTokenStartCharIndex' not found in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLexerState.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3583</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>50</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>50</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3584</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<string>Method definition for 'getTokenStartCharIndex' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3587</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>50</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>50</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3588</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3590</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3602</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3602</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3603</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3591</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3596</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3592</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3593</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3594</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3508</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3595</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3597</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3598</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3598</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3599</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getText' not found in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLexerState.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3600</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>53</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>53</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3601</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<string>Method definition for 'getText' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3604</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3515</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>53</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>53</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3605</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3607</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3508</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>29</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>29</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3609</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3624</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3610</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3611</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3479</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3479</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3623</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3612</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3617</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3613</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3614</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3615</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3616</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3618</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3619</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3619</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3620</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'getUniqueID' not found in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3621</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3622</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>144</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>144</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3621</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3625</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3626</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3628</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3345</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3629</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3631</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3633</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3630</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3332</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3632</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3337</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3634</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3342</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3636</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3479</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3479</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3623</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3637</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3639</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3638</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3614</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3640</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3619</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3619</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3620</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3642</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3677</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3677</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3678</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3643</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3650</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3657</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3664</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3671</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3644</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3645</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3645</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3646</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../examples/combined/antlr3.h:59:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3647</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3648</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>58</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>58</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3649</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/combined/antlr3.h</string>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3651</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3652</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3652</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3653</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../examples/combined/CombinedLexer.h:7:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3654</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3655</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>6</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>6</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3656</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/combined/CombinedLexer.h</string>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3658</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3659</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3659</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3660</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../examples/combined/main.m:2:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3661</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3662</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>1</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>1</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3663</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/combined/main.m</string>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3665</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3666</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3666</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3667</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Duplicate protocol definition of 'ANTLRIntStream' is ignored in /Users/acondit/Library/Developer/Xcode/DerivedData/ANTLR-awfvqfoybjihuiaoxllmwcgxqxnm/Build/Products/Debug/ANTLR.framework/Headers/ANTLRIntStream.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3668</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3669</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>10</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>10</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3670</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRIntStream.h</string>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3672</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3673</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3674</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3675</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3676</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRIntStream.h</string>
-		<real>0.0</real>
-		<string>Duplicate protocol definition of 'ANTLRIntStream' is ignored</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3679</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3669</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>10</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>10</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3680</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3682</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3739</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3683</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3685</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3687</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3689</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3691</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3693</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3695</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3697</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3699</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3701</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3703</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3705</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3707</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3709</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3711</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3713</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3715</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3717</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3719</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3721</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3723</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3725</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3727</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3729</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3731</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3733</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3735</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3737</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3684</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3358</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3358</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3359</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3686</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3364</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3364</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3365</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3688</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3369</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3369</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3370</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3690</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3374</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3374</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3375</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3692</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3379</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3379</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3380</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3694</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3384</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3384</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3385</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3696</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3389</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3389</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3390</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3698</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3394</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3394</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3395</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3700</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3399</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3399</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3400</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3702</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3404</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3404</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3405</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3704</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3409</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3409</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3410</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3706</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3414</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3414</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3415</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3708</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3419</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3419</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3420</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3710</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3424</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3424</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3425</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3712</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3429</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3429</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3430</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3714</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3434</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3434</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3435</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3716</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3439</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3439</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3440</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3718</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3444</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3444</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3445</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3720</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3449</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3449</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3450</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3722</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3454</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3454</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3455</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3724</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3459</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3459</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3460</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3726</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3464</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3464</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3465</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3728</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3469</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3469</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3470</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3730</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3474</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3474</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3475</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3732</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3479</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3479</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3480</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3734</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3485</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3736</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3489</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3489</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3490</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3738</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3495</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3740</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3296</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3742</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3760</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3760</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3761</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3743</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3744</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3757</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3745</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3751</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3746</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3747</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3747</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3748</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRBufferedTreeNodeStream.m:31:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3749</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3750</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>30</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>30</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3752</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3753</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3753</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3754</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Previous definition is here in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3755</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3756</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>27</integer>
-			<key>endingLineNumber</key>
-			<integer>114</integer>
-			<key>startingColumnNumber</key>
-			<integer>27</integer>
-			<key>startingLineNumber</key>
-			<integer>114</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3755</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3758</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3759</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>25</integer>
-			<key>endingLineNumber</key>
-			<integer>114</integer>
-			<key>startingColumnNumber</key>
-			<integer>16</integer>
-			<key>startingLineNumber</key>
-			<integer>114</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.h</string>
-		</dict>
-		<string>Conflicting parameter types in implementation of 'getNode:': 'NSInteger' (aka 'int') vs 'NSUInteger' (aka 'unsigned int')</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3762</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3764</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3763</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>269</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>269</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3763</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>269</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>269</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3766</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3782</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3767</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3772</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3777</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3768</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3769</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3770</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3771</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>27</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>27</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3773</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3774</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3775</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3776</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3778</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3779</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3780</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3781</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>126</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>126</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3780</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3784</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3798</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3785</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3793</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3786</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3782</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3787</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3789</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3791</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3788</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3769</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3790</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3774</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3792</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3779</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3794</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3795</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3796</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3797</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3799</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3296</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3801</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3812</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3802</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3810</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3803</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3288</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3804</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3806</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3808</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3805</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3275</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3807</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3280</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3809</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3285</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3811</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3291</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3813</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3814</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3816</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3822</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3822</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3823</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3817</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3818</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3819</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3819</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3820</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Passing argument to parameter 'node' here</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3821</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1941</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>106</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>106</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<string>Sending 'id&lt;ANTLRTree&gt;' to parameter of incompatible type 'id&lt;ANTLRBaseTree&gt;'</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3824</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3825</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2021</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>58</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>58</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2021</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>58</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>58</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3827</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3838</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3828</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3836</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3829</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3187</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3345</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3830</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3832</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3834</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3831</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3332</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3833</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3337</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3835</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3342</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3837</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3348</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3839</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3296</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>32</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>32</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3841</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3861</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3842</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3843</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3857</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3857</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3858</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3844</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3850</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3845</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3846</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3846</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3847</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRUnbufferedCommonTreeNodeStream.m:28:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3848</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>154</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>27</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>27</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3849</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3851</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3852</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3852</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3853</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'toStringFromNode:toNode:' not found in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3854</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3855</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>92</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>92</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3856</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.h</string>
-		<real>0.0</real>
-		<string>Method definition for 'toStringFromNode:toNode:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3859</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3855</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>92</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>92</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3860</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3862</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>154</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>33</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>33</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3864</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3857</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3857</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3858</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3865</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3867</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3866</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3846</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3846</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3847</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3868</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3852</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3852</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3853</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3870</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3891</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3871</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3872</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3887</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3887</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3888</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3873</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3880</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3874</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3875</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3875</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3876</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRRuntimeException.m:32:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3877</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3878</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3879</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRuntimeException.m</string>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3881</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3882</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3882</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3883</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'initWithName:reason:userInfo:' not found in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRuntimeException.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3884</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3885</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>97</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>97</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3886</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRuntimeException.h</string>
-		<real>0.0</real>
-		<string>Method definition for 'initWithName:reason:userInfo:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3889</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3885</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>97</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>97</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3890</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3892</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3878</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>232</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>232</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3893</integer>
-			</dict>
-		</dict>
-		<real>0.0</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3895</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3902</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3902</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3903</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3896</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3897</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3898</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3898</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3899</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Previous declaration is here</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3900</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3901</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>93</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>93</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.m</string>
-		</dict>
-		<string>Duplicate declaration of method 'createTree:text:'</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3904</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3905</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>126</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>126</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3907</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3908</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3909</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3910</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3911</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3912</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3913</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3914</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3915</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3916</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3917</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3918</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3919</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3920</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3921</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3922</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3923</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3924</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3925</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3926</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3927</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3928</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3929</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3930</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3931</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3932</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3933</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3934</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3935</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3936</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3937</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3938</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3939</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3940</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3941</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3942</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3943</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3944</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3945</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3946</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3948</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLRTests/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeTest.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRFailedPredicateException.h</string>
-		<string>polydiff/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.m</string>
-		<string>simplecTreeParser/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.m</string>
-		<string>polydiff/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/polydiff/Simplifier.m</string>
-		<string>ANTLRTests/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/test/runtime/token/ANTLRCommonTokenTest.m</string>
-		<string>polydiff/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/polydiff/PolyLexer.m</string>
-		<string>Fuzzy/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/fuzzy/main.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeRewriter.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.m</string>
-		<string>lexertest-simple/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.h</string>
-		<string>simplecTreeParser/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.h</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTokenRewriteStream.h</string>
-		<string>treeparser/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/LangParser.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRDebugTreeAdaptor.m</string>
-		<string>treerewrite/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.m</string>
-		<string>simplecTreeParser/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRStringStream.m</string>
-		<string>polydiff/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.h</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRewriteRuleTokenStream.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRPtrBuffer.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.m</string>
-		<string>Fuzzy/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/fuzzy/FuzzyLexer.m</string>
-		<string>LL-start/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTree.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRDebugEventProxy.m</string>
-		<string>simplecTreeParser/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRDebugTreeNodeStream.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRDebugTokenStream.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTokenRewriteStream.m</string>
-		<string>hoistedPredicates/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.m</string>
-		<string>treeparser/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRDebugParser.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.h</string>
-		<string>ANTLRTests/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTree.m</string>
-		<string>polydiff/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/polydiff/PolyParser.m</string>
-		<string>Fuzzy/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/fuzzy/FuzzyLexer.h</string>
-		<string>scopes/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRReaderStream.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRDebugTreeParser.m</string>
-		<string>{{0, 0}, {317, 859}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3953</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3954</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3955</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEStackCompressionValue</string>
-		<string>IDEThreadsOrQueuesMode</string>
-		<string>IDEHideAncestorForNonInterestingFrames</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>289</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>290</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>292</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3957</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3958</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3959</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {259, 847}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3961</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3962</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3964</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3963</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>687</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3965</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>238</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3967</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3968</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3970</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3972</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3969</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>318</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3971</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>769</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3973</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<string>{{51, 7}, {1582, 1014}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>480</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>481</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>482</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>483</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>486</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>487</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3976</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3977</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4026</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4195</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLRCommonTreeAdaptor.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>490</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>491</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>492</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>495</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>642</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>496</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>497</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3978</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3979</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3994</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4005</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3083</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4020</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>500</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>502</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>503</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>505</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3980</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3986</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3992</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3993</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3981</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3982</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3984</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3983</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>694</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3985</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>627</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3987</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3988</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3990</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3989</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>694</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3991</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>627</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>524</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>526</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3995</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4003</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3996</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4003</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4001</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3997</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3998</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3999</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4000</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3976</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4002</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4004</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3995</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4006</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4007</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4008</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4018</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4009</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4010</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4015</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3976</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4017</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3976</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4001</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4011</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4012</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4013</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4014</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3976</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4016</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2911</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2912</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324437832.25516802</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>2021</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4019</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {953, 916}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4021</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4022</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4024</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>583</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4023</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>514</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>586</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4025</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>402</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>591</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3134</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>286</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4027</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4030</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>590</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4184</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1510</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4028</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4029</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1513</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1514</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSLength</key>
-			<integer>1</integer>
-			<key>NSLocation</key>
-			<integer>37</integer>
-			<key>NSRangeCount</key>
-			<integer>1</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4031</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4032</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4176</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>377</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3151</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>337</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3143</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3153</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3154</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3155</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>251</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3783</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3499</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3608</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3800</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3326</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3815</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3627</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3199</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3230</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3157</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3252</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3297</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3354</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3635</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3641</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3681</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3741</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3765</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3826</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3840</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3863</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4033</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4078</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4088</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4107</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4129</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4154</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4034</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4076</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4076</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4077</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4035</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4041</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4046</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4051</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4057</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4036</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4037</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4037</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4038</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRTreeAdaptor.h:28:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4039</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4040</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>27</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>27</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4042</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4043</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4044</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4045</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>27</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>27</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4047</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4048</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4049</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4050</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4052</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4053</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4053</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4054</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Property has a previous declaration in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4055</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4056</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>76</integer>
-			<key>endingLineNumber</key>
-			<integer>103</integer>
-			<key>startingColumnNumber</key>
-			<integer>76</integer>
-			<key>startingLineNumber</key>
-			<integer>103</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4058</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4074</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4074</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4075</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4059</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4062</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4065</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4068</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4060</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4037</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4037</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4061</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4039</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4063</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4064</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4044</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4066</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4067</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4049</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4069</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4070</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4070</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4071</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Property declared here in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4072</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4073</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>76</integer>
-			<key>endingLineNumber</key>
-			<integer>31</integer>
-			<key>startingColumnNumber</key>
-			<integer>76</integer>
-			<key>startingLineNumber</key>
-			<integer>31</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.h</string>
-		</dict>
-		<string>Property declared here</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4072</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Property has a previous declaration</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4055</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4079</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4074</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4074</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4075</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4080</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4082</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4084</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4086</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4081</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4037</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4037</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4061</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4083</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4064</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4085</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4067</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4087</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4070</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4070</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4071</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4089</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3351</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4105</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4090</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3163</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4091</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4103</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4103</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4104</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4092</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4097</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4093</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4094</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4095</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4096</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>26</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>26</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4098</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4099</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4099</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4100</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Method definition for 'createTree:' not found in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4102</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>36</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>36</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		</dict>
-		<string>Method definition for 'createTree:' not found</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4106</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3296</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>32</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>32</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4108</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4109</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4127</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4127</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4128</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Parse Issue</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4110</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4116</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4121</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4111</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4112</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4112</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4113</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>In file included from /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRBufferedTreeNodeStream.h:33:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4114</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4115</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>32</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>32</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4117</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3747</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3747</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4118</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4119</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4120</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>-1</integer>
-			<key>endingLineNumber</key>
-			<integer>30</integer>
-			<key>startingColumnNumber</key>
-			<integer>-1</integer>
-			<key>startingLineNumber</key>
-			<integer>30</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4122</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4123</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4123</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4124</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Expected ';' after method prototype in /Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4125</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4126</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>53</integer>
-			<key>endingLineNumber</key>
-			<integer>41</integer>
-			<key>startingColumnNumber</key>
-			<integer>53</integer>
-			<key>startingLineNumber</key>
-			<integer>41</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.h</string>
-		</dict>
-		<string>Expected ';' after method prototype</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4125</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4133</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4133</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4137</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4131</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4132</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4133</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4133</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4134</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>The Objective-C class 'ANTLRCommonTree', which is derived from class 'ANTLRBaseTree', defines the instance method 'getLine' whose return type is 'NSUInteger'.  A method with the same name (same selector) is also defined in class 'ANTLRBaseTree' and has a return type of 'NSInteger'.  These two types are incompatible, and may result in undefined behavior for clients of these classes</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4135</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4136</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>203</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>203</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRCommonTree.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4135</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4139</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>0</integer>
-			<key>endingLineNumber</key>
-			<integer>203</integer>
-			<key>startingColumnNumber</key>
-			<integer>0</integer>
-			<key>startingLineNumber</key>
-			<integer>203</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRCommonTree.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4151</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Dead store</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4143</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4144</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4146</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Value stored to 'msg' during its initialization is never read</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4149</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4148</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>14</integer>
-			<key>endingLineNumber</key>
-			<integer>329</integer>
-			<key>startingColumnNumber</key>
-			<integer>14</integer>
-			<key>startingLineNumber</key>
-			<integer>329</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRBaseRecognizer.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4150</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>34</integer>
-			<key>endingLineNumber</key>
-			<integer>329</integer>
-			<key>startingColumnNumber</key>
-			<integer>20</integer>
-			<key>startingLineNumber</key>
-			<integer>329</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRBaseRecognizer.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4149</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4153</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>14</integer>
-			<key>endingLineNumber</key>
-			<integer>329</integer>
-			<key>startingColumnNumber</key>
-			<integer>14</integer>
-			<key>startingLineNumber</key>
-			<integer>329</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRBaseRecognizer.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4155</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4156</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4167</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4167</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4173</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Logic error</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4157</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4165</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4160</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Variable 'tpattern' declared without an initial value</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4163</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4162</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>4</integer>
-			<key>endingLineNumber</key>
-			<integer>500</integer>
-			<key>startingColumnNumber</key>
-			<integer>4</integer>
-			<key>startingLineNumber</key>
-			<integer>500</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRTreeWizard.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4164</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>30</integer>
-			<key>endingLineNumber</key>
-			<integer>500</integer>
-			<key>startingColumnNumber</key>
-			<integer>4</integer>
-			<key>startingLineNumber</key>
-			<integer>500</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRTreeWizard.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3159</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3160</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3162</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4167</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4167</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4168</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>Receiver in message expression is an uninitialized value</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4170</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>9</integer>
-			<key>endingLineNumber</key>
-			<integer>509</integer>
-			<key>startingColumnNumber</key>
-			<integer>9</integer>
-			<key>startingLineNumber</key>
-			<integer>509</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRTreeWizard.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4172</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>18</integer>
-			<key>endingLineNumber</key>
-			<integer>509</integer>
-			<key>startingColumnNumber</key>
-			<integer>10</integer>
-			<key>startingLineNumber</key>
-			<integer>509</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRTreeWizard.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3174</integer>
-			</dict>
-			<key>characterRangeLen</key>
-			<integer>0</integer>
-			<key>characterRangeLoc</key>
-			<integer>-1</integer>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4175</integer>
-			</dict>
-			<key>endingColumnNumber</key>
-			<integer>9</integer>
-			<key>endingLineNumber</key>
-			<integer>509</integer>
-			<key>startingColumnNumber</key>
-			<integer>9</integer>
-			<key>startingLineNumber</key>
-			<integer>509</integer>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLR/../ANTLRTreeWizard.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3948</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3909</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3910</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3911</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3912</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3913</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3914</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3915</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3916</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3917</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3918</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3919</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3920</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3921</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3922</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3923</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3924</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3925</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3927</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3928</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3929</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3930</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3932</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3933</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3936</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3935</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3937</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3938</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3939</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3940</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3941</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3942</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3943</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3944</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3907</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3946</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3945</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3908</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRuntimeException.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeAdaptor.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreeVisitor.m</string>
-		<string>{{0, 464}, {613, 850}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3953</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3954</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3955</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>289</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>290</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>292</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3957</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4188</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4187</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Debug treerewrite : Recording</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4190</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4193</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4192</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>678</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4194</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>238</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4196</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4197</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4199</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4201</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4198</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>629</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4200</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>953</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>633</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4202</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>481</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>480</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>482</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>483</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>486</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>487</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4204</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4205</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4260</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4310</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4316</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Build ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>490</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>491</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>492</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>493</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>495</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>496</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>497</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4206</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4207</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4222</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4234</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4254</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>500</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>502</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>503</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>505</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4214</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4220</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4221</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4209</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4210</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4212</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4211</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4213</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>683</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4215</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4216</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4218</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4217</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4219</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>683</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>524</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>526</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4223</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4232</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4224</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4232</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4230</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4225</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4226</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4229</integer>
-			</dict>
-		</dict>
-		<string>Xcode.IDENavigableItem.WorkspaceLogsDomain</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4227</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4228</integer>
-			</dict>
-		</dict>
-		<string>Build ANTLR : 7:27:43 PM</string>
-		<integer>2147483647</integer>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4231</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://128D3AC5-F4C6-4508-8BEA-E26CF7377619</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4233</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4223</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>543</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4235</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>545</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4236</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>547</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>548</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>549</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4237</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4252</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4238</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>552</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>553</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>554</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>555</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>556</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>557</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>558</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4239</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4240</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4247</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4239</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4248</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4249</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4251</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Build ANTLR : 7:27:43 PM</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4245</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4225</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4241</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4244</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4242</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4243</integer>
-			</dict>
-		</dict>
-		<string>Build ANTLR : 7:27:43 PM</string>
-		<integer>2147483647</integer>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4246</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://128D3AC5-F4C6-4508-8BEA-E26CF7377619</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2985</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>com.apple.dt.IDE.BuildLogContentType</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4250</integer>
-			</dict>
-		</dict>
-		<string>x-xcode-log://128D3AC5-F4C6-4508-8BEA-E26CF7377619</string>
-		<string>Xcode.IDEKit.EditorDocument.LogDocument</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4253</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {982, 916}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4255</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4256</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4258</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>583</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4257</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>586</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4259</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3134</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1164</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>590</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>591</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>589</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4261</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4262</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4263</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4266</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4261</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4288</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4302</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Xcode.IDEKit.Navigator.Logs</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3953</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3954</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3955</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1167</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1172</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4264</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4265</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSLength</key>
-			<integer>2</integer>
-			<key>NSLocation</key>
-			<integer>20</integer>
-			<key>NSRangeCount</key>
-			<integer>1</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>603</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>604</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>605</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>606</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>607</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>608</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>609</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>610</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>611</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>612</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4267</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4268</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4285</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4286</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4287</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>251</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3151</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>337</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3143</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3153</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3154</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3155</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>377</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3946</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4270</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3948</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3908</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3909</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3910</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4271</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3913</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4272</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3911</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3914</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4273</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4274</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3917</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4275</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4276</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3918</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3919</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4277</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3920</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3921</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4278</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3922</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3923</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3924</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3925</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4279</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3926</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3927</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3928</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4280</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3930</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3929</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4281</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4282</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3932</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3934</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3935</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3936</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3937</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3938</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3940</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3944</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4283</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4284</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3947</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLookaheadStream.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRFastQueue.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRTreePatternParser.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLexerState.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRDFA.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRPtrStack.h</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRFileStream.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseRecognizer.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBufferedTokenStream.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTokenStream.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRLookaheadStream.h</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRRewriteRuleNodeStream.m</string>
-		<string>ANTLR/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRMap.m</string>
-		<string>{{0, 4191}, {244, 850}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>593</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>594</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>595</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>596</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>597</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>598</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>599</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4289</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4292</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4293</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4290</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>535</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1731</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLR</string>
-		<string>{{0, 1762}, {244, 872}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>187</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4294</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4295</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4296</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4298</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4300</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4291</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>535</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4297</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>examples</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4299</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Products</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4297</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4301</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>treeparser</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>289</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4303</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4304</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4305</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4306</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4309</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDELogNavigatorSelectedObjectsStateKey</string>
-		<string>IDELogNavigatorExpandedItemsStateKey</string>
-		<string>IDELogNavigatorRecentFilterStateKey</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4307</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4308</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Build ANTLR : 7:27:43 PM</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4311</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4312</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4314</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4313</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>698</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4315</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>218</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4317</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4318</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4320</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4322</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4319</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>260</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4321</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>982</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>633</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4323</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>340</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3033</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>3032</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4326</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4326</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4327</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>26</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4328</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4381</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_302D15CD-F2E4-4591-BBEA-87156BE3FC43</string>
-		<string>{{24, 359}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4329</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4362</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4367</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4373</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4330</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4331</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4346</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4350</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4356</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4332</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4338</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4344</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4345</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4333</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4334</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4336</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4335</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4337</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4339</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4340</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4342</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4341</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4343</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4347</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4348</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4348</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4349</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4347</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4351</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4352</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4353</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4355</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4354</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4357</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4358</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4360</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4359</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4361</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4363</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4364</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1510</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4365</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4366</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1513</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1514</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4368</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4369</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4371</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4370</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4372</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4374</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4375</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4377</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4379</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4376</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>629</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4378</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>953</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4380</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4326</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4383</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4383</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4384</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>27</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4385</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4452</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_BF1D8188-61A0-4891-9E5B-744667FD84B4</string>
-		<string>{{247, 338}, {600, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4386</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4387</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4433</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4438</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4444</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Build scopes</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4388</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4389</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4404</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4414</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4427</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4390</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4396</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4402</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4403</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4391</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4392</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4394</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4393</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4395</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4397</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4398</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4400</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4399</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4401</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4405</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4412</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4406</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4412</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4411</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>420</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4407</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4410</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4408</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4409</integer>
-			</dict>
-		</dict>
-		<string>Build scopes : 2:22:51 PM</string>
-		<integer>2147483647</integer>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1923</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4413</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4405</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4415</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4416</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4417</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4426</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4418</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>434</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4409</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4419</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4409</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>437</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4420</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4421</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>436</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1923</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4425</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>420</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4422</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4424</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4423</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4409</integer>
-			</dict>
-		</dict>
-		<integer>2147483647</integer>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1923</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4428</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4429</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4431</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4430</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4432</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>286</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4434</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>286</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4435</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>289</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>290</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>291</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>292</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4436</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4437</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4439</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4440</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4442</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4441</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>377</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4443</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4445</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4446</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4448</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4450</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4447</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>457</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4449</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>1116</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4451</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4383</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4454</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4455</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>397</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4454</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>28</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4529</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_643C9C67-AF73-4A43-899C-9F86EE0BA865</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4456</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4457</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4521</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLRCommonTreeAdaptor.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4458</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4459</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4474</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4460</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4466</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4472</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4473</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4461</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4462</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4464</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4463</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4465</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4467</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4468</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4470</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4469</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4471</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4475</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4483</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4476</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4483</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4481</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4477</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4478</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4479</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4480</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4456</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4482</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4484</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4475</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4486</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4487</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4488</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4500</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4489</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4490</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4495</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4456</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4498</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4456</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4481</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4491</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4492</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4493</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4494</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4456</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4496</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4497</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324436616.01285303</real>
-		<string>{0, 2064}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4499</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4502</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4503</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4505</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4504</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4506</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4511</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4513</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4514</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4516</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4517</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4519</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4518</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4520</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4522</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4523</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4525</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4527</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4524</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>277</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4526</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>810</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4528</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4454</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4531</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4531</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4532</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>29</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4533</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4589</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_739FB955-3296-4612-AC20-1CDF97DE7A33</string>
-		<string>{{119, 336}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4534</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4575</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4581</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4535</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4536</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4551</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4555</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4561</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4537</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4543</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4549</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4550</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4538</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4539</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4541</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4540</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4542</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4544</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4545</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4547</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4546</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4548</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4552</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4553</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4553</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4554</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4552</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4556</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4557</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4558</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4560</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4559</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4562</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4563</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4565</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4564</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4566</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4570</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4571</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4572</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4573</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4574</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4576</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4577</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4579</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4578</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4580</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4582</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4583</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4585</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4587</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4584</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>661</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4586</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>921</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4588</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4531</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4591</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4591</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4592</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>30</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4593</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4666</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_814C37BB-0594-46EB-9B90-B9116756F804</string>
-		<string>{{163, 349}, {600, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1791</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4594</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4644</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4652</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4658</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4595</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4596</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4611</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4638</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4597</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4603</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4609</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4610</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4598</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4599</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4601</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4600</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4602</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4604</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4605</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4607</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4606</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4608</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4612</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4620</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4613</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4620</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4619</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1812</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4614</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4615</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4616</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4617</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4618</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1791</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1816</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>379</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1819</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1821</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4621</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4612</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4623</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4624</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4625</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4637</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4626</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1791</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4627</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1791</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4629</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4630</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4628</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1831</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324417807.90071499</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1833</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4636</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1812</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4631</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4632</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4633</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4634</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4635</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1791</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1816</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>379</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1819</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1821</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4639</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4640</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4642</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4641</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4643</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4645</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4646</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4647</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4648</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4649</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4650</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4651</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4653</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4654</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4656</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4655</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>377</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4657</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4659</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4660</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4662</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4664</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4661</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>586</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4663</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>987</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4665</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4591</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4668</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4668</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4669</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>31</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4670</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4723</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_33ED8F40-5853-4B5E-82D1-ED44495458A6</string>
-		<string>{{266, 382}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4671</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4704</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4709</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4715</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4672</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4673</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4688</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4692</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4698</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4674</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4680</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4686</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4687</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4675</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4676</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4678</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4677</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4679</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4681</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4682</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4684</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4683</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4685</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4689</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4690</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4690</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4691</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4689</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4693</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4694</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4695</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4697</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4696</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4699</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4700</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4702</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4701</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4703</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4705</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4706</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1510</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4707</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4708</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1513</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1514</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4710</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4711</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4713</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4712</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4714</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4716</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4717</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4719</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4721</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4718</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>629</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4720</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>953</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4722</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4668</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4725</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4725</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>225</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>32</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4726</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4800</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_F2B57B98-941D-4238-9379-7473B3430A7E</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4727</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4728</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4779</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4786</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4792</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLRBaseTree.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4729</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4730</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4745</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4756</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4773</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4731</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4737</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4743</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4744</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4732</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4733</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4735</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4734</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4736</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4738</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4739</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4741</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4740</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4742</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4746</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4754</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4747</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4754</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4752</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4748</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4749</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4750</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4751</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4727</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4753</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/ANTLRBaseTree.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4755</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4746</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4757</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4758</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4759</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4772</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4760</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4761</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4762</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4727</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4766</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4767</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>-addChild:</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4763</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4764</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4765</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324421881.86844403</real>
-		<string>{1527, 1531}</string>
-		<string>{2218, 17}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1941</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4752</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4768</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4769</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4770</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4771</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4727</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>112</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4774</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4775</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4777</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4776</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4778</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4780</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4781</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1510</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4782</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4783</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4784</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4785</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<string>id&lt;ANTLRTreeAdaptor&gt;</string>
-		<string>id&lt;ANTLRTree&gt;</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4787</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4788</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4790</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4789</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>377</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4791</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4793</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4794</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4796</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4798</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4795</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>586</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4797</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>987</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4799</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4725</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4802</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4802</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4803</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>33</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4804</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4860</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_5A5562D3-2457-44ED-87EC-EE25A7A3221D</string>
-		<string>{{56, 349}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4805</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4838</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4846</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4852</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4806</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4807</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4822</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4826</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4832</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4808</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4814</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4820</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4821</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4809</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4810</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4812</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4811</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4813</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4815</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4816</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4818</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4817</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4819</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4823</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4824</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4824</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4825</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4823</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4827</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4828</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4829</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4831</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4830</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4833</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4834</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4836</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4835</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4837</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4839</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4840</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4841</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4842</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4843</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4844</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4845</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4847</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4848</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4850</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4849</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4851</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4853</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4854</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4856</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4858</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4855</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>661</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4857</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>921</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4859</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4802</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4862</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4863</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4864</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4862</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>34</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4865</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_EB09D177-511E-48E6-97D0-A5368084A1FF</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4862</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{49, 309}, {600, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4866</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4867</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4918</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4926</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4932</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>FuzzyLexer.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4868</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4869</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4884</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4897</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4912</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4870</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4876</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4882</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4883</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4871</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4872</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4874</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4873</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>300</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4875</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>299</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4877</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4878</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4880</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4879</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>300</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4881</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>299</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4885</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4895</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4886</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4895</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4893</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4887</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4888</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4889</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4891</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4892</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4866</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4890</integer>
-			</dict>
-		</dict>
-		<string>fuzzy</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4894</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/fuzzy/FuzzyLexer.h</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4896</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4885</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4898</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4899</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4900</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4911</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4901</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4902</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4908</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4866</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>269</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4910</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4866</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4893</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4903</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4904</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4905</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4906</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4907</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4866</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4890</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4909</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2638</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2532</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323993445.39168298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1939</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4913</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4914</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4916</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4915</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4917</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4919</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4920</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4921</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4922</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4923</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4924</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4925</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4927</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4928</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4930</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4929</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>377</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4931</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4933</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4934</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4936</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4938</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4935</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>260</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4937</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>1313</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4939</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4941</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4941</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4942</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>35</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4943</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4996</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_D7C117CF-2DCE-4CBA-B705-19535B08742D</string>
-		<string>{{161, 382}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4944</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4977</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4982</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4988</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4945</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4946</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4961</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4965</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4971</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4947</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4953</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4959</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4960</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4948</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4949</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4951</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4950</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4952</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4954</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4955</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4957</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4956</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4958</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4962</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4963</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4963</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>4964</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4962</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4966</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4967</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4968</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4970</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4969</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4972</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4973</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4975</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4974</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4976</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4978</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4979</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1510</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4980</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4981</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1513</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1514</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4983</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4984</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4986</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4985</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4987</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4989</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4990</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4992</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4994</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4991</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>630</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4993</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>952</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4995</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4941</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4998</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4999</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5000</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4998</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>36</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5001</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_2ACC26B0-F5C0-41D8-832E-AD4A74EA1F07</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>4998</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{268, 360}, {600, 668}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>51</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5002</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5003</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5054</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5062</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5068</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>Simplifier.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5004</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5005</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5020</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5032</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5048</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5006</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5012</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5018</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5019</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5007</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5008</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5010</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5009</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5011</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5013</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5014</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5016</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5015</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5017</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5021</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5030</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5022</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5030</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5028</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5023</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5024</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5025</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5026</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5027</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5002</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3155</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5029</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>118</integer>
-			</dict>
-			<key>NS.string</key>
-			<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/polydiff/Simplifier.m</string>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5031</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5021</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5033</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5034</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5035</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5047</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5036</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>141</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5002</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5037</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5002</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5040</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5041</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>145</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>147</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>148</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5038</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5039</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>2524</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>323992277.437558</real>
-		<string>{15643, 721}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1928</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5028</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>107</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5042</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5043</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5044</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5045</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5046</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5002</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>3155</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>253</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>114</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5049</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5050</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5052</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5051</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5053</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5055</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5056</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>174</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>189</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>190</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>197</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5057</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5058</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5059</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>202</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5060</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5061</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5063</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5064</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5066</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5065</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>399</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5067</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>223</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5069</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5070</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5072</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5074</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5071</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>457</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5073</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>1116</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5075</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5077</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5077</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5078</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>37</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5079</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5132</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_7BDC7074-7CDB-4F9D-BA7D-78C9C1EDE74B</string>
-		<string>{{203, 336}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5080</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5113</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5118</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5124</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5081</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5082</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5097</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5101</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5107</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5083</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5089</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5095</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5096</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5084</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5085</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5087</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5086</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5088</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5090</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5091</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5093</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5092</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5094</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5098</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5099</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5099</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5100</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5098</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5102</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5103</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5104</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5106</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5105</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5108</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5109</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5111</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5110</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5112</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5114</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5115</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1510</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5116</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5117</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1513</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1514</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5119</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5120</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5122</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5121</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5123</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5125</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5126</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5128</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5130</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5127</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>629</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5129</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>953</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5131</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5077</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>42</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>43</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>44</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>45</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5134</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>47</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5134</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5135</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>38</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5136</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5189</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_2B2152F1-B07F-4225-B664-24F65D0A9243</string>
-		<string>{{224, 349}, {240, 646}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>52</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>53</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>54</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>57</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>58</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5137</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5175</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5181</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>62</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>63</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>64</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>65</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>66</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>67</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>68</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>69</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5138</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5139</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5154</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5158</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5164</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>73</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>74</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>75</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>76</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>77</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>78</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5140</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5146</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5152</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5153</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5141</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5142</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5144</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5143</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5145</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5147</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5148</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5150</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>85</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5149</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>298</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>89</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5151</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>301</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>100</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>103</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5155</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5156</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5156</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5157</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5155</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>126</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5159</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>128</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5160</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>130</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>131</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>132</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5161</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5163</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>162</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5165</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5168</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5167</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5169</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>175</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5172</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1501</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>178</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>179</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>180</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>181</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>182</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>183</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>184</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1505</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1506</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1507</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1508</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1509</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1510</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5173</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5174</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1513</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1514</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5176</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5177</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5179</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5178</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>389</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>208</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5180</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>80</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5182</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5183</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5185</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5187</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>56</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5184</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>629</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>55</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5186</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>953</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>83</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>84</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>219</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5188</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>354</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5134</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>474</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>473</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>472</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>475</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>477</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5192</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5272</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5191</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>39</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5273</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_7C9CA7DC-C1BD-4E3C-87A4-808B1D459FCA</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>480</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>481</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>482</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>483</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>486</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>487</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5193</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5194</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5250</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5258</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5264</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLRTreeRuleReturnScope.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>490</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>491</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>492</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>493</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>495</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>496</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>497</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5195</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5196</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5211</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5223</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5244</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>500</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>502</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>503</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>505</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5197</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5203</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5209</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5210</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5198</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5199</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5201</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5200</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>447</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5202</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>451</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5204</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5205</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5207</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5206</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>447</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5208</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>451</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>524</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>526</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5212</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5221</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5213</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5221</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5220</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>530</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5214</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5215</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5217</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5218</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5216</integer>
-			</dict>
-		</dict>
-		<string>ANTLRTreeRuleReturnScope.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>535</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5219</integer>
-			</dict>
-		</dict>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1945</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5222</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5212</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>543</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5224</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>545</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5225</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>547</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>548</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>549</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5226</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5242</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5227</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>552</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>553</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>554</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>555</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>556</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>557</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>558</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5228</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5229</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5235</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5216</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5239</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5240</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>577</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>startNode</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5220</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>530</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5230</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5231</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5232</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5233</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5216</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>535</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5234</integer>
-			</dict>
-		</dict>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>570</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5236</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5237</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5238</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324440398.24771601</real>
-		<string>{0, 1988}</string>
-		<string>{1952, 0}</string>
-		<string>public.c-header</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5241</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.h</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5243</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {899, 859}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5245</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5246</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5248</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>583</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5247</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>586</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5249</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>589</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>591</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1164</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5251</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1164</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5254</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>593</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>594</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>595</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>596</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>597</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>598</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>599</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5252</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5253</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>{{0, 0}, {259, 832}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1166</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1167</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1168</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1169</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1170</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1171</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>1172</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5255</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5256</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5257</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1174</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1176</integer>
-			</dict>
-			<key>NSRangeCount</key>
-			<integer>0</integer>
-		</dict>
-		<string>ANTLRTreeRuleReturn</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5259</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5260</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5262</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5261</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>489</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5263</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>370</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5265</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5266</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5268</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5270</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5267</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>260</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5269</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>885</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>633</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5271</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>340</real>
-		<string>{{163, 90}, {899, 905}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5191</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>472</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>473</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>474</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>475</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5275</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>477</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5275</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5276</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>40</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5277</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5359</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>IDEWorkspaceTabController_A37CC082-FCE5-4465-8D92-5B82D5AFB5B0</string>
-		<string>{{184, 262}, {1095, 766}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>480</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>481</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>482</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>483</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>486</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>487</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5278</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5279</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5334</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5345</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5351</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>ANTLRTreeAdaptor.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>490</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>491</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>492</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>493</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>494</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>495</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>496</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>497</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5280</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5281</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5296</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5308</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5328</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>171</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array/>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>500</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>501</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>502</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>503</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>504</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>505</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5282</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5288</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>98</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5294</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5295</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5283</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5284</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5286</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5285</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>544</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5287</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>550</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5289</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5290</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5292</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>512</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5291</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>544</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>515</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5293</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>550</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>524</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>101</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>526</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>124</integer>
-			</dict>
-			<key>geniusEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>primaryEditorContextNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5297</integer>
-			</dict>
-			<key>rootLayoutTreeNode</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5306</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>1</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5298</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5306</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5305</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>530</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5299</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5300</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5302</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5303</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5301</integer>
-			</dict>
-		</dict>
-		<string>ANTLRTreeAdaptor.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>535</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5304</integer>
-			</dict>
-		</dict>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>119</integer>
-			</dict>
-			<key>documentURL</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>1930</integer>
-			</dict>
-			<key>timestamp</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>123</integer>
-			</dict>
-			<key>children</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5307</integer>
-			</dict>
-			<key>contentType</key>
-			<integer>0</integer>
-			<key>documentArchivableRepresentation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>orientation</key>
-			<integer>0</integer>
-			<key>parent</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5297</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>543</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5309</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>545</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5310</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>547</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>548</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>549</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5311</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>60</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5326</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5312</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>552</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>553</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>554</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>555</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>556</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>557</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>558</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5313</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5314</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5320</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5301</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>574</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5324</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>577</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>@implementation ANTLRTreeAdaptor</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>120</integer>
-			</dict>
-			<key>DocumentLocation</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5305</integer>
-			</dict>
-			<key>DomainIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>530</integer>
-			</dict>
-			<key>IdentifierPath</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5315</integer>
-			</dict>
-			<key>IndexOfDocumentIdentifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>60</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5316</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5317</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5318</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5301</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>535</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>110</integer>
-			</dict>
-			<key>Identifier</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5319</integer>
-			</dict>
-		</dict>
-		<string>ANTLR</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>567</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>568</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>569</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>570</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5321</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5322</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5323</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>324440596.88617098</real>
-		<string>{2169, 1696}</string>
-		<string>{3488, 0}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>155</integer>
-			</dict>
-			<key>NS.base</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>0</integer>
-			</dict>
-			<key>NS.relative</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>5325</integer>
-			</dict>
-		</dict>
-		<string>file://localhost/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/ANTLRTreeAdaptor.m</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5327</integer>
-				</dict>
-			</array>
-		</dict>
-		<string>{{0, 0}, {1095, 720}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5329</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5330</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5332</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>583</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5331</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>203</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>586</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5333</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>115</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>589</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>590</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>591</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5335</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5338</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>590</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>593</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>594</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>595</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>596</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>597</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>598</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>599</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5336</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5337</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>186</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>{{0, 0}, {259, 832}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>603</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>604</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>605</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>606</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>607</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>608</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>609</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>610</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>611</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>612</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5339</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5340</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5341</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5342</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5343</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5344</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>48</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<string>{{0, 0}, {0, 0}}</string>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>199</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array/>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5346</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5347</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5349</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5348</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>509</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>622</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5350</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>211</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>71</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>507</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5352</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>91</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5353</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5355</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5357</integer>
-				</dict>
-			</array>
-		</dict>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>485</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5354</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>260</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>484</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5356</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>982</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>87</integer>
-			</dict>
-			<key>NS.keys</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>510</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>511</integer>
-				</dict>
-			</array>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>633</integer>
-				</dict>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5358</integer>
-				</dict>
-			</array>
-		</dict>
-		<real>340</real>
-		<dict>
-			<key>$class</key>
-			<dict>
-				<key>CF$UID</key>
-				<integer>115</integer>
-			</dict>
-			<key>NS.objects</key>
-			<array>
-				<dict>
-					<key>CF$UID</key>
-					<integer>5275</integer>
-				</dict>
-			</array>
-		</dict>
-	</array>
-	<key>$top</key>
-	<dict>
-		<key>State</key>
-		<dict>
-			<key>CF$UID</key>
-			<integer>1</integer>
-		</dict>
-	</dict>
-	<key>$version</key>
-	<integer>100000</integer>
-</dict>
-</plist>
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist b/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist
deleted file mode 100644
index 05301bc..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcdebugger/Breakpoints.xcbkptlist
+++ /dev/null
@@ -1,5 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<Bucket
-   type = "1"
-   version = "1.0">
-</Bucket>
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/ANTLR.xcscheme b/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/ANTLR.xcscheme
deleted file mode 100644
index 0b76a62..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/ANTLR.xcscheme
+++ /dev/null
@@ -1,82 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<Scheme
-   version = "1.3">
-   <BuildAction
-      parallelizeBuildables = "YES"
-      buildImplicitDependencies = "NO">
-      <BuildActionEntries>
-         <BuildActionEntry
-            buildForTesting = "NO"
-            buildForRunning = "YES"
-            buildForProfiling = "YES"
-            buildForArchiving = "YES"
-            buildForAnalyzing = "YES">
-            <BuildableReference
-               BuildableIdentifier = "primary"
-               BlueprintIdentifier = "1AE72317134E860B001C3F35"
-               BuildableName = "ANTLR.framework"
-               BlueprintName = "ANTLR"
-               ReferencedContainer = "container:ANTLR.xcodeproj">
-            </BuildableReference>
-         </BuildActionEntry>
-         <BuildActionEntry
-            buildForTesting = "YES"
-            buildForRunning = "NO"
-            buildForProfiling = "NO"
-            buildForArchiving = "NO"
-            buildForAnalyzing = "NO">
-            <BuildableReference
-               BuildableIdentifier = "primary"
-               BlueprintIdentifier = "1AE7232C134E860B001C3F35"
-               BuildableName = "ANTLRTests.octest"
-               BlueprintName = "ANTLRTests"
-               ReferencedContainer = "container:ANTLR.xcodeproj">
-            </BuildableReference>
-         </BuildActionEntry>
-      </BuildActionEntries>
-   </BuildAction>
-   <TestAction
-      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.GDB"
-      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.GDB"
-      shouldUseLaunchSchemeArgsEnv = "YES"
-      buildConfiguration = "Debug">
-      <Testables>
-         <TestableReference
-            skipped = "NO">
-            <BuildableReference
-               BuildableIdentifier = "primary"
-               BlueprintIdentifier = "1AE7232C134E860B001C3F35"
-               BuildableName = "ANTLRTests.octest"
-               BlueprintName = "ANTLRTests"
-               ReferencedContainer = "container:ANTLR.xcodeproj">
-            </BuildableReference>
-         </TestableReference>
-      </Testables>
-   </TestAction>
-   <LaunchAction
-      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.GDB"
-      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.GDB"
-      displayScaleIsEnabled = "NO"
-      displayScale = "1.00"
-      launchStyle = "0"
-      useCustomWorkingDirectory = "NO"
-      buildConfiguration = "Debug">
-      <AdditionalOptions>
-      </AdditionalOptions>
-   </LaunchAction>
-   <ProfileAction
-      displayScaleIsEnabled = "NO"
-      displayScale = "1.00"
-      shouldUseLaunchSchemeArgsEnv = "YES"
-      savedToolIdentifier = ""
-      useCustomWorkingDirectory = "NO"
-      buildConfiguration = "Release">
-   </ProfileAction>
-   <AnalyzeAction
-      buildConfiguration = "Debug">
-   </AnalyzeAction>
-   <ArchiveAction
-      buildConfiguration = "Release"
-      revealArchiveInOrganizer = "YES">
-   </ArchiveAction>
-</Scheme>
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/Fuzzy.xcscheme b/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/Fuzzy.xcscheme
deleted file mode 100644
index bb165e3..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/Fuzzy.xcscheme
+++ /dev/null
@@ -1,82 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<Scheme
-   version = "1.3">
-   <BuildAction
-      parallelizeBuildables = "YES"
-      buildImplicitDependencies = "YES">
-      <BuildActionEntries>
-         <BuildActionEntry
-            buildForTesting = "YES"
-            buildForRunning = "YES"
-            buildForProfiling = "YES"
-            buildForArchiving = "YES"
-            buildForAnalyzing = "YES">
-            <BuildableReference
-               BuildableIdentifier = "primary"
-               BlueprintIdentifier = "1A63BC60134F5DAB002EDFB4"
-               BuildableName = "Fuzzy"
-               BlueprintName = "Fuzzy"
-               ReferencedContainer = "container:ANTLR.xcodeproj">
-            </BuildableReference>
-         </BuildActionEntry>
-      </BuildActionEntries>
-   </BuildAction>
-   <TestAction
-      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.GDB"
-      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.GDB"
-      shouldUseLaunchSchemeArgsEnv = "YES"
-      buildConfiguration = "Debug">
-      <Testables>
-      </Testables>
-   </TestAction>
-   <LaunchAction
-      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.GDB"
-      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.GDB"
-      displayScaleIsEnabled = "NO"
-      displayScale = "1.00"
-      launchStyle = "0"
-      useCustomWorkingDirectory = "NO"
-      buildConfiguration = "Debug">
-      <BuildableProductRunnable>
-         <BuildableReference
-            BuildableIdentifier = "primary"
-            BlueprintIdentifier = "1A63BC60134F5DAB002EDFB4"
-            BuildableName = "Fuzzy"
-            BlueprintName = "Fuzzy"
-            ReferencedContainer = "container:ANTLR.xcodeproj">
-         </BuildableReference>
-      </BuildableProductRunnable>
-      <EnvironmentVariables>
-         <EnvironmentVariable
-            value = ""
-            isEnabled = "NO">
-         </EnvironmentVariable>
-      </EnvironmentVariables>
-      <AdditionalOptions>
-      </AdditionalOptions>
-   </LaunchAction>
-   <ProfileAction
-      displayScaleIsEnabled = "NO"
-      displayScale = "1.00"
-      shouldUseLaunchSchemeArgsEnv = "YES"
-      savedToolIdentifier = ""
-      useCustomWorkingDirectory = "NO"
-      buildConfiguration = "Release">
-      <BuildableProductRunnable>
-         <BuildableReference
-            BuildableIdentifier = "primary"
-            BlueprintIdentifier = "1A63BC60134F5DAB002EDFB4"
-            BuildableName = "Fuzzy"
-            BlueprintName = "Fuzzy"
-            ReferencedContainer = "container:ANTLR.xcodeproj">
-         </BuildableReference>
-      </BuildableProductRunnable>
-   </ProfileAction>
-   <AnalyzeAction
-      buildConfiguration = "Debug">
-   </AnalyzeAction>
-   <ArchiveAction
-      buildConfiguration = "Release"
-      revealArchiveInOrganizer = "YES">
-   </ArchiveAction>
-</Scheme>
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/LL-start.xcscheme b/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/LL-start.xcscheme
deleted file mode 100644
index 5e618e1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/LL-start.xcscheme
+++ /dev/null
@@ -1,76 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<Scheme
-   version = "1.3">
-   <BuildAction
-      parallelizeBuildables = "YES"
-      buildImplicitDependencies = "YES">
-      <BuildActionEntries>
-         <BuildActionEntry
-            buildForTesting = "YES"
-            buildForRunning = "YES"
-            buildForProfiling = "YES"
-            buildForArchiving = "YES"
-            buildForAnalyzing = "YES">
-            <BuildableReference
-               BuildableIdentifier = "primary"
-               BlueprintIdentifier = "1A63BD4B134F5F43002EDFB4"
-               BuildableName = "LL-start"
-               BlueprintName = "LL-start"
-               ReferencedContainer = "container:ANTLR.xcodeproj">
-            </BuildableReference>
-         </BuildActionEntry>
-      </BuildActionEntries>
-   </BuildAction>
-   <TestAction
-      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.GDB"
-      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.GDB"
-      shouldUseLaunchSchemeArgsEnv = "YES"
-      buildConfiguration = "Debug">
-      <Testables>
-      </Testables>
-   </TestAction>
-   <LaunchAction
-      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.GDB"
-      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.GDB"
-      displayScaleIsEnabled = "NO"
-      displayScale = "1.00"
-      launchStyle = "0"
-      useCustomWorkingDirectory = "NO"
-      buildConfiguration = "Debug">
-      <BuildableProductRunnable>
-         <BuildableReference
-            BuildableIdentifier = "primary"
-            BlueprintIdentifier = "1A63BD4B134F5F43002EDFB4"
-            BuildableName = "LL-start"
-            BlueprintName = "LL-start"
-            ReferencedContainer = "container:ANTLR.xcodeproj">
-         </BuildableReference>
-      </BuildableProductRunnable>
-      <AdditionalOptions>
-      </AdditionalOptions>
-   </LaunchAction>
-   <ProfileAction
-      displayScaleIsEnabled = "NO"
-      displayScale = "1.00"
-      shouldUseLaunchSchemeArgsEnv = "YES"
-      savedToolIdentifier = ""
-      useCustomWorkingDirectory = "NO"
-      buildConfiguration = "Release">
-      <BuildableProductRunnable>
-         <BuildableReference
-            BuildableIdentifier = "primary"
-            BlueprintIdentifier = "1A63BD4B134F5F43002EDFB4"
-            BuildableName = "LL-start"
-            BlueprintName = "LL-start"
-            ReferencedContainer = "container:ANTLR.xcodeproj">
-         </BuildableReference>
-      </BuildableProductRunnable>
-   </ProfileAction>
-   <AnalyzeAction
-      buildConfiguration = "Debug">
-   </AnalyzeAction>
-   <ArchiveAction
-      buildConfiguration = "Release"
-      revealArchiveInOrganizer = "YES">
-   </ArchiveAction>
-</Scheme>
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/xcschememanagement.plist b/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/xcschememanagement.plist
deleted file mode 100644
index 348d7aa..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/xcschememanagement.plist
+++ /dev/null
@@ -1,132 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
-	<key>SchemeUserState</key>
-	<dict>
-		<key>ANTLR.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>0</integer>
-		</dict>
-		<key>ANTLRTests.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>1</integer>
-		</dict>
-		<key>Fuzzy.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>3</integer>
-		</dict>
-		<key>LL-start.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>6</integer>
-		</dict>
-		<key>combined.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>2</integer>
-		</dict>
-		<key>hoistedPredicates.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>4</integer>
-		</dict>
-		<key>lexertest-simple.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>5</integer>
-		</dict>
-		<key>polydiff.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>7</integer>
-		</dict>
-		<key>scopes.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>8</integer>
-		</dict>
-		<key>simplecTreeParser.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>9</integer>
-		</dict>
-		<key>treeparser.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>10</integer>
-		</dict>
-		<key>treerewrite.xcscheme</key>
-		<dict>
-			<key>orderHint</key>
-			<integer>11</integer>
-		</dict>
-	</dict>
-	<key>SuppressBuildableAutocreation</key>
-	<dict>
-		<key>1A63BC60134F5DAB002EDFB4</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-		<key>1A63BD31134F5F1E002EDFB4</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-		<key>1A63BD3E134F5F36002EDFB4</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-		<key>1A63BD4B134F5F43002EDFB4</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-		<key>1A63BD58134F5F4D002EDFB4</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-		<key>1A63BD65134F5F5E002EDFB4</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-		<key>1A63BD72134F5F67002EDFB4</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-		<key>1A63BD7F134F5F71002EDFB4</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-		<key>1A63BDD3134F6233002EDFB4</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-		<key>1A63BDFB134FB75E002EDFB4</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-		<key>1AE72317134E860B001C3F35</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-		<key>1AE7232C134E860B001C3F35</key>
-		<dict>
-			<key>primary</key>
-			<true/>
-		</dict>
-	</dict>
-</dict>
-</plist>
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests-Info.plist b/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests-Info.plist
deleted file mode 100644
index 4814b62..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests-Info.plist
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
-	<key>CFBundleDevelopmentRegion</key>
-	<string>en</string>
-	<key>CFBundleExecutable</key>
-	<string>${EXECUTABLE_NAME}</string>
-	<key>CFBundleIdentifier</key>
-	<string>AMKS.${PRODUCT_NAME:rfc1034identifier}</string>
-	<key>CFBundleInfoDictionaryVersion</key>
-	<string>6.0</string>
-	<key>CFBundlePackageType</key>
-	<string>BNDL</string>
-	<key>CFBundleShortVersionString</key>
-	<string>1.0</string>
-	<key>CFBundleSignature</key>
-	<string>????</string>
-	<key>CFBundleVersion</key>
-	<string>1</string>
-</dict>
-</plist>
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests-Prefix.pch b/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests-Prefix.pch
deleted file mode 100644
index 9d05c98..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests-Prefix.pch
+++ /dev/null
@@ -1,7 +0,0 @@
-//
-// Prefix header for all source files of the 'ANTLRTests' target in the 'ANTLRTests' project
-//
-
-#ifdef __OBJC__
-    #import <Cocoa/Cocoa.h>
-#endif
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests.h b/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests.h
deleted file mode 100644
index a4b4456..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests.h
+++ /dev/null
@@ -1,17 +0,0 @@
-//
-//  ANTLRTests.h
-//  ANTLRTests
-//
-//  Created by Alan Condit on 4/7/11.
-//  Copyright 2011 Alan's MachineWorks. All rights reserved.
-//
-
-#import <SenTestingKit/SenTestingKit.h>
-
-
-@interface ANTLRTests : SenTestCase {
-@private
-    
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests.m b/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests.m
deleted file mode 100644
index c0386a0..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/ANTLRTests.m
+++ /dev/null
@@ -1,28 +0,0 @@
-//
-//  ANTLRTests.m
-//  ANTLRTests
-//
-//  Created by Alan Condit on 4/7/11.
-//  Copyright 2011 Alan's MachineWorks. All rights reserved.
-//
-
-#import "ANTLRTests.h"
-
-
-@implementation ANTLRTests
-
-- (void)setUp
-{
-    [super setUp];
-    
-    // Set-up code here.
-}
-
-- (void)tearDown
-{
-    // Tear-down code here.
-    
-    [super tearDown];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseMapElement.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseMapElement.h
deleted file mode 100644
index 0815165..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseMapElement.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//
-//  ANTLRBaseMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-@interface ANTLRBaseMapElement : ANTLRLinkBase {
-    NSNumber *index;
-}
-
-@property (retain) NSNumber *index;
-
-+ (id) newANTLRBaseMapElement;
-+ (id) newANTLRBaseMapElementWithIndex:(NSNumber *)anIdx;
-- (id) init;
-- (id) initWithAnIndex:(NSNumber *)anIdx;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseMapElement.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseMapElement.m
deleted file mode 100644
index 7197799..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseMapElement.m
+++ /dev/null
@@ -1,95 +0,0 @@
-//
-//  ANTLRBaseMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRBaseMapElement.h"
-
-
-@implementation ANTLRBaseMapElement
-
-@synthesize index;
-
-+ (ANTLRBaseMapElement *)newANTLRBaseMapElement
-{
-    return [[ANTLRBaseMapElement alloc] init];
-}
-
-+ (ANTLRBaseMapElement *)newANTLRBaseMapElementWithIndex:(NSNumber *)aNumber
-{
-    return [[ANTLRBaseMapElement alloc] initWithAnIndex:(NSNumber *)aNumber];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil ) {
-        index = nil;
-    }
-    return (self);
-}
-
-- (id) initWithAnIndex:(NSNumber *)aNumber
-{
-    if ((self = [super init]) != nil ) {
-        index = aNumber;
-        if ( index ) [index retain];
-    }
-    return (self);
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRBaseMapElement" );
-#endif
-    if ( index ) [index release];
-    [super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRBaseMapElement *copy;
-    
-    copy = [super copyWithZone:aZone];
-    copy.index = index;
-    return( copy );
-}
-
-- (NSInteger)count
-{
-    return 1;
-}
-                          
-                          
-- (NSInteger)size
-{
-    return(  sizeof(index) );
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseRecognizer.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseRecognizer.h
deleted file mode 100644
index 346a857..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseRecognizer.h
+++ /dev/null
@@ -1,180 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import <Foundation/Foundation.h>
-
-#import "ANTLRIntStream.h"
-#import "AMutableArray.h"
-
-// This is an abstract superclass for lexers and parsers.
-
-#define ANTLR_MEMO_RULE_FAILED -2
-#define ANTLR_MEMO_RULE_UNKNOWN -1
-#define ANTLR_INITIAL_FOLLOW_STACK_SIZE 100
-
-#import "ANTLRMapElement.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRToken.h"
-#import "ANTLRRecognizerSharedState.h"
-#import "ANTLRRecognitionException.h"
-#import "ANTLRMissingTokenException.h"
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRMismatchedTreeNodeException.h"
-#import "ANTLRUnwantedTokenException.h"
-#import "ANTLRNoViableAltException.h"
-#import "ANTLREarlyExitException.h"
-#import "ANTLRMismatchedSetException.h"
-#import "ANTLRMismatchedNotSetException.h"
-#import "ANTLRFailedPredicateException.h"
-
-@interface ANTLRBaseRecognizer : NSObject {
-    __strong ANTLRRecognizerSharedState *state;  // the state of this recognizer. Might be shared with other recognizers, e.g. in grammar import scenarios.
-    __strong NSString *grammarFileName;          // where did the grammar come from. filled in by codegeneration
-    __strong NSString *sourceName;
-    __strong AMutableArray *tokenNames;
-}
-
-+ (void) initialize;
-
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizer;
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizerWithRuleLen:(NSInteger)aLen;
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizer:(ANTLRRecognizerSharedState *)aState;
-
-+ (AMutableArray *)getTokenNames;
-+ (void)setTokenNames:(NSArray *)aTokNamArray;
-+ (void)setGrammarFileName:(NSString *)aFileName;
-
-- (id) init;
-- (id) initWithLen:(NSInteger)aLen;
-- (id) initWithState:(ANTLRRecognizerSharedState *)aState;
-
-- (void) dealloc;
-
-// simple accessors
-- (NSInteger) getBacktrackingLevel;
-- (void) setBacktrackingLevel:(NSInteger) level;
-
-- (BOOL) getFailed;
-- (void) setFailed: (BOOL) flag;
-
-- (ANTLRRecognizerSharedState *) getState;
-- (void) setState:(ANTLRRecognizerSharedState *) theState;
-
-// reset this recognizer - might be extended by codegeneration/grammar
-- (void) reset;
-
-/** Match needs to return the current input symbol, which gets put
- *  into the label for the associated token ref; e.g., x=ID.  Token
- *  and tree parsers need to return different objects. Rather than test
- *  for input stream type or change the IntStream interface, I use
- *  a simple method to ask the recognizer to tell me what the current
- *  input symbol is.
- * 
- *  This is ignored for lexers.
- */
-- (id) input;
-
-- (void)skip;
-
-// do actual matching of tokens/characters
-- (id) match:(id<ANTLRIntStream>)anInput TokenType:(NSInteger)ttype Follow:(ANTLRBitSet *)follow;
-- (void) matchAny:(id<ANTLRIntStream>)anInput;
-- (BOOL) mismatchIsUnwantedToken:(id<ANTLRIntStream>)anInput TokenType:(NSInteger) ttype;
-- (BOOL) mismatchIsMissingToken:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)follow;
-
-// error reporting and recovery
-- (void) reportError:(ANTLRRecognitionException *)e;
-- (void) displayRecognitionError:(AMutableArray *)theTokNams Exception:(ANTLRRecognitionException *)e;
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(AMutableArray *)theTokNams;
-- (NSInteger) getNumberOfSyntaxErrors;
-- (NSString *)getErrorHeader:(ANTLRRecognitionException *)e;
-- (NSString *)getTokenErrorDisplay:(id<ANTLRToken>)t;
-- (void) emitErrorMessage:(NSString *)msg;
-- (void) recover:(id<ANTLRIntStream>)anInput Exception:(ANTLRRecognitionException *)e;
-
-// begin hooks for debugger
-- (void) beginResync;
-- (void) endResync;
-// end hooks for debugger
-
-// compute the bitsets necessary to do matching and recovery
-- (ANTLRBitSet *)computeErrorRecoverySet;
-- (ANTLRBitSet *)computeContextSensitiveRuleFOLLOW;
-- (ANTLRBitSet *)combineFollows:(BOOL) exact;
-
-- (id<ANTLRToken>) recoverFromMismatchedToken:(id<ANTLRIntStream>)anInput 
-                                    TokenType:(NSInteger)ttype 
-                                       Follow:(ANTLRBitSet *)follow;
-                                    
-- (id<ANTLRToken>)recoverFromMismatchedSet:(id<ANTLRIntStream>)anInput
-                                    Exception:(ANTLRRecognitionException *)e
-                                    Follow:(ANTLRBitSet *)follow;
-
-- (id) getCurrentInputSymbol:(id<ANTLRIntStream>)anInput;
-- (id) getMissingSymbol:(id<ANTLRIntStream>)anInput
-              Exception:(ANTLRRecognitionException *)e
-              TokenType:(NSInteger) expectedTokenType
-                Follow:(ANTLRBitSet *)follow;
-
-// helper methods for recovery. try to resync somewhere
-- (void) consumeUntilTType:(id<ANTLRIntStream>)anInput TokenType:(NSInteger)ttype;
-- (void) consumeUntilFollow:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)bitSet;
-- (void) pushFollow:(ANTLRBitSet *)fset;
-- (ANTLRBitSet *)popFollow;
-
-// to be used by the debugger to do reporting. maybe hook in incremental stuff here, too.
-- (AMutableArray *) getRuleInvocationStack;
-- (AMutableArray *) getRuleInvocationStack:(ANTLRRecognitionException *)exception
-                                 Recognizer:(NSString *)recognizerClassName;
-
-- (AMutableArray *) getTokenNames;
-- (NSString *)getGrammarFileName;
-- (NSString *)getSourceName;
-- (AMutableArray *) toStrings:(NSArray *)tokens;
-// support for memoization
-- (NSInteger) getRuleMemoization:(NSInteger)ruleIndex StartIndex:(NSInteger)ruleStartIndex;
-- (BOOL) alreadyParsedRule:(id<ANTLRIntStream>)anInput RuleIndex:(NSInteger)ruleIndex;
-- (void) memoize:(id<ANTLRIntStream>)anInput
-         RuleIndex:(NSInteger)ruleIndex
-        StartIndex:(NSInteger)ruleStartIndex;
-- (NSInteger) getRuleMemoizationCacheSize;
-- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol;
-- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol;
-
-
-// support for syntactic predicates. these are called indirectly to support funky stuff in grammars,
-// like supplying selectors instead of writing code directly into the actions of the grammar.
-- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment;
-// stream:(id<ANTLRIntStream>)anInput;
-
-@property (retain) ANTLRRecognizerSharedState *state;
-@property (retain) NSString *grammarFileName;
-@property (retain) NSString *sourceName;
-@property (retain) AMutableArray *tokenNames;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseRecognizer.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseRecognizer.m
deleted file mode 100644
index 573e0dc..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseRecognizer.m
+++ /dev/null
@@ -1,1129 +0,0 @@
-//
-//  ANTLRBaseRecognizer.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRHashRule.h"
-#import "ANTLRRuleMemo.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRMap.h"
-
-extern NSInteger debug;
-
-@implementation ANTLRBaseRecognizer
-
-static AMutableArray *_tokenNames;
-static NSString *_grammarFileName;
-static NSString *NEXT_TOKEN_RULE_NAME;
-
-@synthesize state;
-@synthesize grammarFileName;
-//@synthesize failed;
-@synthesize sourceName;
-//@synthesize numberOfSyntaxErrors;
-@synthesize tokenNames;
-
-+ (void) initialize
-{
-    NEXT_TOKEN_RULE_NAME = [NSString stringWithString:@"nextToken"];
-    [NEXT_TOKEN_RULE_NAME retain];
-}
-
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizer
-{
-    return [[ANTLRBaseRecognizer alloc] init];
-}
-
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizerWithRuleLen:(NSInteger)aLen
-{
-    return [[ANTLRBaseRecognizer alloc] initWithLen:aLen];
-}
-
-+ (ANTLRBaseRecognizer *) newANTLRBaseRecognizer:(ANTLRRecognizerSharedState *)aState
-{
-	return [[ANTLRBaseRecognizer alloc] initWithState:aState];
-}
-
-+ (AMutableArray *)getTokenNames
-{
-    return _tokenNames;
-}
-
-+ (void)setTokenNames:(AMutableArray *)theTokNams
-{
-    if ( _tokenNames != theTokNams ) {
-        if ( _tokenNames ) [_tokenNames release];
-        [theTokNams retain];
-    }
-    _tokenNames = theTokNams;
-}
-
-+ (void)setGrammarFileName:(NSString *)aFileName
-{
-    if ( _grammarFileName != aFileName ) {
-        if ( _grammarFileName ) [_grammarFileName release];
-        [aFileName retain];
-    }
-    [_grammarFileName retain];
-}
-
-- (id) init
-{
-	if ((self = [super init]) != nil) {
-        if (state == nil) {
-            state = [[ANTLRRecognizerSharedState newANTLRRecognizerSharedState] retain];
-        }
-        tokenNames = _tokenNames;
-        if ( tokenNames ) [tokenNames retain];
-        grammarFileName = _grammarFileName;
-        if ( grammarFileName ) [grammarFileName retain];
-        state._fsp = -1;
-        state.errorRecovery = NO;		// are we recovering?
-        state.lastErrorIndex = -1;
-        state.failed = NO;				// indicate that some match failed
-        state.syntaxErrors = 0;
-        state.backtracking = 0;			// the level of backtracking
-        state.tokenStartCharIndex = -1;
-	}
-	return self;
-}
-
-- (id) initWithLen:(NSInteger)aLen
-{
-	if ((self = [super init]) != nil) {
-        if (state == nil) {
-            state = [[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:aLen] retain];
-        }
-        tokenNames = _tokenNames;
-        if ( tokenNames ) [tokenNames retain];
-        grammarFileName = _grammarFileName;
-        if ( grammarFileName ) [grammarFileName retain];
-        state._fsp = -1;
-        state.errorRecovery = NO;		// are we recovering?
-        state.lastErrorIndex = -1;
-        state.failed = NO;				// indicate that some match failed
-        state.syntaxErrors = 0;
-        state.backtracking = 0;			// the level of backtracking
-        state.tokenStartCharIndex = -1;
-	}
-	return self;
-}
-
-- (id) initWithState:(ANTLRRecognizerSharedState *)aState
-{
-	if ((self = [super init]) != nil) {
-		state = aState;
-        if (state == nil) {
-            state = [ANTLRRecognizerSharedState newANTLRRecognizerSharedState];
-        }
-        [state retain];
-        tokenNames = _tokenNames;
-        if ( tokenNames ) [tokenNames retain];
-        grammarFileName = _grammarFileName;
-        if ( grammarFileName ) [grammarFileName retain];
-        state._fsp = -1;
-        state.errorRecovery = NO;		// are we recovering?
-        state.lastErrorIndex = -1;
-        state.failed = NO;				// indicate that some match failed
-        state.syntaxErrors = 0;
-        state.backtracking = 0;			// the level of backtracking
-        state.tokenStartCharIndex = -1;
-	}
-	return self;
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRBaseRecognizer" );
-#endif
-	if ( grammarFileName ) [grammarFileName release];
-	if ( tokenNames ) [tokenNames release];
-	if ( state ) [state release];
-	[super dealloc];
-}
-
-// reset the recognizer to the initial state. does not touch the token source!
-// this can be extended by the grammar writer to reset custom ivars
-- (void) reset
-{
-    if ( state == nil )
-        return; 
-    if ( state.following != nil ) {
-        if ( [state.following count] )
-            [state.following removeAllObjects];
-    }
-    state._fsp = -1;
-    state.errorRecovery = NO;		// are we recovering?
-    state.lastErrorIndex = -1;
-    state.failed = NO;				// indicate that some match failed
-    state.syntaxErrors = 0;
-    state.backtracking = 0;			// the level of backtracking
-    state.tokenStartCharIndex = -1;
-    if ( state.ruleMemo != nil ) {
-        if ( [state.ruleMemo count] )
-            [state.ruleMemo removeAllObjects];
-    }
-}
-
-- (BOOL) getFailed
-{
-	return [state getFailed];
-}
-
-- (void) setFailed:(BOOL)flag
-{
-	[state setFailed:flag];
-}
-
-- (ANTLRRecognizerSharedState *) getState
-{
-	return state;
-}
-
-- (void) setState:(ANTLRRecognizerSharedState *) theState
-{
-	if (state != theState) {
-		if ( state ) [state release];
-		state = theState;
-		[state retain];
-	}
-}
-
-- (id)input
-{
-    return nil; // Must be overriden in inheriting class
-}
-
-- (void)skip // override in inheriting class
-{
-    return;
-}
-
--(id) match:(id<ANTLRIntStream>)anInput TokenType:(NSInteger)ttype Follow:(ANTLRBitSet *)follow
-{
-	id matchedSymbol = [self getCurrentInputSymbol:anInput];
-	if ([anInput LA:1] == ttype) {
-		[anInput consume];
-		state.errorRecovery = NO;
-		state.failed = NO;
-		return matchedSymbol;
-	}
-	if (state.backtracking > 0) {
-		state.failed = YES;
-		return matchedSymbol;
-	}
-	matchedSymbol = [self recoverFromMismatchedToken:anInput TokenType:ttype Follow:follow];
-	return matchedSymbol;
-}
-
--(void) matchAny:(id<ANTLRIntStream>)anInput
-{
-    state.errorRecovery = NO;
-    state.failed = NO;
-    [anInput consume];
-}
-
--(BOOL) mismatchIsUnwantedToken:(id<ANTLRIntStream>)anInput TokenType:(NSInteger)ttype
-{
-    return [anInput LA:2] == ttype;
-}
-
--(BOOL) mismatchIsMissingToken:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *) follow
-{
-    if ( follow == nil ) {
-        // we have no information about the follow; we can only consume
-        // a single token and hope for the best
-        return NO;
-    }
-    // compute what can follow this grammar element reference
-    if ( [follow member:ANTLRTokenTypeEOR] ) {
-        ANTLRBitSet *viableTokensFollowingThisRule = [self computeContextSensitiveRuleFOLLOW];
-        follow = [follow or:viableTokensFollowingThisRule];
-        if ( state._fsp >= 0 ) { // remove EOR if we're not the start symbol
-            [follow remove:(ANTLRTokenTypeEOR)];
-        }
-    }
-    // if current token is consistent with what could come after set
-    // then we know we're missing a token; error recovery is free to
-    // "insert" the missing token
-    
-    //System.out.println("viable tokens="+follow.toString(getTokenNames()));
-    //System.out.println("LT(1)="+((TokenStream)input).LT(1));
-    
-    // BitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
-    // in follow set to indicate that the fall of the start symbol is
-    // in the set (EOF can follow).
-    if ( [follow member:[anInput LA:1]] || [follow member:ANTLRTokenTypeEOR] ) {
-        //System.out.println("LT(1)=="+((TokenStream)input).LT(1)+" is consistent with what follows; inserting...");
-        return YES;
-    }
-    return NO;
-}
-
-/** Report a recognition problem.
- *
- *  This method sets errorRecovery to indicate the parser is recovering
- *  not parsing.  Once in recovery mode, no errors are generated.
- *  To get out of recovery mode, the parser must successfully match
- *  a token (after a resync).  So it will go:
- *
- * 		1. error occurs
- * 		2. enter recovery mode, report error
- * 		3. consume until token found in resynch set
- * 		4. try to resume parsing
- * 		5. next match() will reset errorRecovery mode
- *
- *  If you override, make sure to update syntaxErrors if you care about that.
- */
--(void) reportError:(ANTLRRecognitionException *) e
-{
-    // if we've already reported an error and have not matched a token
-    // yet successfully, don't report any errors.
-    if ( state.errorRecovery ) {
-        //System.err.print("[SPURIOUS] ");
-        return;
-    }
-    state.syntaxErrors++; // don't count spurious
-    state.errorRecovery = YES;
-    
-    [self displayRecognitionError:[self getTokenNames] Exception:e];
-}
-
--(void) displayRecognitionError:(AMutableArray *)theTokNams Exception:(ANTLRRecognitionException *)e
-{
-    NSString *hdr = [self getErrorHeader:e];
-    NSString *msg = [self getErrorMessage:e TokenNames:theTokNams];
-    [self emitErrorMessage:[NSString stringWithFormat:@" %@ %@", hdr, msg]];
-}
-
-/** What error message should be generated for the various
- *  exception types?
- *
- *  Not very object-oriented code, but I like having all error message
- *  generation within one method rather than spread among all of the
- *  exception classes. This also makes it much easier for the exception
- *  handling because the exception classes do not have to have pointers back
- *  to this object to access utility routines and so on. Also, changing
- *  the message for an exception type would be difficult because you
- *  would have to subclassing exception, but then somehow get ANTLR
- *  to make those kinds of exception objects instead of the default.
- *  This looks weird, but trust me--it makes the most sense in terms
- *  of flexibility.
- *
- *  For grammar debugging, you will want to override this to add
- *  more information such as the stack frame with
- *  getRuleInvocationStack(e, this.getClass().getName()) and,
- *  for no viable alts, the decision description and state etc...
- *
- *  Override this to change the message generated for one or more
- *  exception types.
- */
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(AMutableArray *)theTokNams
-{
-    // NSString *msg = [e getMessage];
-    NSString *msg;
-    if ( [e isKindOfClass:[ANTLRUnwantedTokenException class]] ) {
-        ANTLRUnwantedTokenException *ute = (ANTLRUnwantedTokenException *)e;
-        NSString *tokenName=@"<unknown>";
-        if ( ute.expecting == ANTLRTokenTypeEOF ) {
-            tokenName = @"EOF";
-        }
-        else {
-            tokenName = (NSString *)[theTokNams objectAtIndex:ute.expecting];
-        }
-        msg = [NSString stringWithFormat:@"extraneous input %@ expecting %@", [self getTokenErrorDisplay:[ute getUnexpectedToken]],
-               tokenName];
-    }
-    else if ( [e isKindOfClass:[ANTLRMissingTokenException class] ] ) {
-        ANTLRMissingTokenException *mte = (ANTLRMissingTokenException *)e;
-        NSString *tokenName=@"<unknown>";
-        if ( mte.expecting== ANTLRTokenTypeEOF ) {
-            tokenName = @"EOF";
-        }
-        else {
-            tokenName = [theTokNams objectAtIndex:mte.expecting];
-        }
-        msg = [NSString stringWithFormat:@"missing %@ at %@", tokenName, [self getTokenErrorDisplay:(e.token)] ];
-    }
-    else if ( [e isKindOfClass:[ANTLRMismatchedTokenException class]] ) {
-        ANTLRMismatchedTokenException *mte = (ANTLRMismatchedTokenException *)e;
-        NSString *tokenName=@"<unknown>";
-        if ( mte.expecting== ANTLRTokenTypeEOF ) {
-            tokenName = @"EOF";
-        }
-        else {
-            tokenName = [theTokNams objectAtIndex:mte.expecting];
-        }
-        msg = [NSString stringWithFormat:@"mismatched input %@ expecting %@",[self getTokenErrorDisplay:(e.token)], tokenName];
-    }
-    else if ( [e isKindOfClass:[ANTLRMismatchedTreeNodeException class]] ) {
-        ANTLRMismatchedTreeNodeException *mtne = (ANTLRMismatchedTreeNodeException *)e;
-        NSString *tokenName=@"<unknown>";
-        if ( mtne.expecting==ANTLRTokenTypeEOF ) {
-            tokenName = @"EOF";
-        }
-        else {
-            tokenName = [theTokNams objectAtIndex:mtne.expecting];
-        }
-        msg = [NSString stringWithFormat:@"mismatched tree node: %@ expecting %@", mtne.node, tokenName];
-    }
-    else if ( [e isKindOfClass:[ANTLRNoViableAltException class]] ) {
-        //NoViableAltException *nvae = (NoViableAltException *)e;
-        // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
-        // and "(decision="+nvae.decisionNumber+") and
-        // "state "+nvae.stateNumber
-        msg = [NSString stringWithFormat:@"no viable alternative at input %@", [self getTokenErrorDisplay:e.token]];
-    }
-    else if ( [e isKindOfClass:[ANTLREarlyExitException class]] ) {
-        //ANTLREarlyExitException *eee = (ANTLREarlyExitException *)e;
-        // for development, can add "(decision="+eee.decisionNumber+")"
-        msg =[NSString stringWithFormat: @"required (...)+ loop did not match anything at input ", [self getTokenErrorDisplay:e.token]];
-    }
-    else if ( [e isKindOfClass:[ANTLRMismatchedSetException class]] ) {
-        ANTLRMismatchedSetException *mse = (ANTLRMismatchedSetException *)e;
-        msg = [NSString stringWithFormat:@"mismatched input %@ expecting set %@",
-               [self getTokenErrorDisplay:(e.token)],
-               mse.expecting];
-    }
-#pragma warning NotSet not yet implemented.
-    else if ( [e isKindOfClass:[ANTLRMismatchedNotSetException class] ] ) {
-        ANTLRMismatchedNotSetException *mse = (ANTLRMismatchedNotSetException *)e;
-        msg = [NSString stringWithFormat:@"mismatched input %@ expecting set %@",
-               [self getTokenErrorDisplay:(e.token)],
-               mse.expecting];
-    }
-    else if ( [e isKindOfClass:[ANTLRFailedPredicateException class]] ) {
-        ANTLRFailedPredicateException *fpe = (ANTLRFailedPredicateException *)e;
-        msg = [NSString stringWithFormat:@"rule %@ failed predicate: { %@ }?", fpe.ruleName, fpe.predicate];
-    }
-    else {
-        msg = [NSString stringWithFormat:@"Exception= %@\n", e.name];
-    }
-    return msg;
-}
-
-/** Get number of recognition errors (lexer, parser, tree parser).  Each
- *  recognizer tracks its own number.  So parser and lexer each have
- *  separate count.  Does not count the spurious errors found between
- *  an error and next valid token match
- *
- *  See also reportError()
- */
-- (NSInteger) getNumberOfSyntaxErrors
-{
-    return state.syntaxErrors;
-}
-
-/** What is the error header, normally line/character position information? */
-- (NSString *)getErrorHeader:(ANTLRRecognitionException *)e
-{
-    return [NSString stringWithFormat:@"line %d:%d", e.line, e.charPositionInLine];
-}
-
-/** How should a token be displayed in an error message? The default
- *  is to display just the text, but during development you might
- *  want to have a lot of information spit out.  Override in that case
- *  to use t.toString() (which, for CommonToken, dumps everything about
- *  the token). This is better than forcing you to override a method in
- *  your token objects because you don't have to go modify your lexer
- *  so that it creates a new Java type.
- */
-- (NSString *)getTokenErrorDisplay:(id<ANTLRToken>)t
-{
-    NSString *s = t.text;
-    if ( s == nil ) {
-        if ( t.type == ANTLRTokenTypeEOF ) {
-            s = @"<EOF>";
-        }
-        else {
-            s = [NSString stringWithFormat:@"<%@>", t.type];
-        }
-    }
-    s = [s stringByReplacingOccurrencesOfString:@"\n" withString:@"\\\\n"];
-    s = [s stringByReplacingOccurrencesOfString:@"\r" withString:@"\\\\r"];
-    s = [s stringByReplacingOccurrencesOfString:@"\t" withString:@"\\\\t"];
-    return [NSString stringWithFormat:@"\'%@\'", s];
-}
-                                        
-/** Override this method to change where error messages go */
-- (void) emitErrorMessage:(NSString *) msg
-{
-//    System.err.println(msg);
-    NSLog(@"%@", msg);
-}
-
-/** Recover from an error found on the input stream.  This is
- *  for NoViableAlt and mismatched symbol exceptions.  If you enable
- *  single token insertion and deletion, this will usually not
- *  handle mismatched symbol exceptions but there could be a mismatched
- *  token that the match() routine could not recover from.
- */
-- (void)recover:(id<ANTLRIntStream>)anInput Exception:(ANTLRRecognitionException *)re
-{
-    if ( state.lastErrorIndex == anInput.index ) {
-        // uh oh, another error at same token index; must be a case
-        // where LT(1) is in the recovery token set so nothing is
-        // consumed; consume a single token so at least to prevent
-        // an infinite loop; this is a failsafe.
-        [anInput consume];
-    }
-    state.lastErrorIndex = anInput.index;
-    ANTLRBitSet *followSet = [self computeErrorRecoverySet];
-    [self beginResync];
-    [self consumeUntilFollow:anInput Follow:followSet];
-    [self endResync];
-}
-
-- (void) beginResync
-{
-    
-}
-
-- (void) endResync
-{
-    
-}
-                            
-/*  Compute the error recovery set for the current rule.  During
- *  rule invocation, the parser pushes the set of tokens that can
- *  follow that rule reference on the stack; this amounts to
- *  computing FIRST of what follows the rule reference in the
- *  enclosing rule. This local follow set only includes tokens
- *  from within the rule; i.e., the FIRST computation done by
- *  ANTLR stops at the end of a rule.
- *
- *  EXAMPLE
- *
- *  When you find a "no viable alt exception", the input is not
- *  consistent with any of the alternatives for rule r.  The best
- *  thing to do is to consume tokens until you see something that
- *  can legally follow a call to r *or* any rule that called r.
- *  You don't want the exact set of viable next tokens because the
- *  input might just be missing a token--you might consume the
- *  rest of the input looking for one of the missing tokens.
- *
- *  Consider grammar:
- *
- *  a : '[' b ']'
- *    | '(' b ')'
- *    ;
- *  b : c '^' INT ;
- *  c : ID
- *    | INT
- *    ;
- *
- *  At each rule invocation, the set of tokens that could follow
- *  that rule is pushed on a stack.  Here are the various "local"
- *  follow sets:
- *
- *  FOLLOW(b1_in_a) = FIRST(']') = ']'
- *  FOLLOW(b2_in_a) = FIRST(')') = ')'
- *  FOLLOW(c_in_b) = FIRST('^') = '^'
- *
- *  Upon erroneous input "[]", the call chain is
- *
- *  a -> b -> c
- *
- *  and, hence, the follow context stack is:
- *
- *  depth  local follow set     after call to rule
- *    0         <EOF>                    a (from main())
- *    1          ']'                     b
- *    3          '^'                     c
- *
- *  Notice that ')' is not included, because b would have to have
- *  been called from a different context in rule a for ')' to be
- *  included.
- *
- *  For error recovery, we cannot consider FOLLOW(c)
- *  (context-sensitive or otherwise).  We need the combined set of
- *  all context-sensitive FOLLOW sets--the set of all tokens that
- *  could follow any reference in the call chain.  We need to
- *  resync to one of those tokens.  Note that FOLLOW(c)='^' and if
- *  we resync'd to that token, we'd consume until EOF.  We need to
- *  sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
- *  In this case, for input "[]", LA(1) is in this set so we would
- *  not consume anything and after printing an error rule c would
- *  return normally.  It would not find the required '^' though.
- *  At this point, it gets a mismatched token error and throws an
- *  exception (since LA(1) is not in the viable following token
- *  set).  The rule exception handler tries to recover, but finds
- *  the same recovery set and doesn't consume anything.  Rule b
- *  exits normally returning to rule a.  Now it finds the ']' (and
- *  with the successful match exits errorRecovery mode).
- *
- *  So, you cna see that the parser walks up call chain looking
- *  for the token that was a member of the recovery set.
- *
- *  Errors are not generated in errorRecovery mode.
- *
- *  ANTLR's error recovery mechanism is based upon original ideas:
- *
- *  "Algorithms + Data Structures = Programs" by Niklaus Wirth
- *
- *  and
- *
- *  "A note on error recovery in recursive descent parsers":
- *  http://portal.acm.org/citation.cfm?id=947902.947905
- *
- *  Later, Josef Grosch had some good ideas:
- *
- *  "Efficient and Comfortable Error Recovery in Recursive Descent
- *  Parsers":
- *  ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
- *
- *  Like Grosch I implemented local FOLLOW sets that are combined
- *  at run-time upon error to avoid overhead during parsing.
- */
-- (ANTLRBitSet *) computeErrorRecoverySet
-{
-    return [self combineFollows:NO];
-}
-
-/** Compute the context-sensitive FOLLOW set for current rule.
- *  This is set of token types that can follow a specific rule
- *  reference given a specific call chain.  You get the set of
- *  viable tokens that can possibly come next (lookahead depth 1)
- *  given the current call chain.  Contrast this with the
- *  definition of plain FOLLOW for rule r:
- *
- *   FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
- *
- *  where x in T* and alpha, beta in V*; T is set of terminals and
- *  V is the set of terminals and nonterminals.  In other words,
- *  FOLLOW(r) is the set of all tokens that can possibly follow
- *  references to r in *any* sentential form (context).  At
- *  runtime, however, we know precisely which context applies as
- *  we have the call chain.  We may compute the exact (rather
- *  than covering superset) set of following tokens.
- *
- *  For example, consider grammar:
- *
- *  stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
- *       | "return" expr '.'
- *       ;
- *  expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
- *  atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
- *       | '(' expr ')'
- *       ;
- *
- *  The FOLLOW sets are all inclusive whereas context-sensitive
- *  FOLLOW sets are precisely what could follow a rule reference.
- *  For input input "i=(3);", here is the derivation:
- *
- *  stat => ID '=' expr ';'
- *       => ID '=' atom ('+' atom)* ';'
- *       => ID '=' '(' expr ')' ('+' atom)* ';'
- *       => ID '=' '(' atom ')' ('+' atom)* ';'
- *       => ID '=' '(' INT ')' ('+' atom)* ';'
- *       => ID '=' '(' INT ')' ';'
- *
- *  At the "3" token, you'd have a call chain of
- *
- *    stat -> expr -> atom -> expr -> atom
- *
- *  What can follow that specific nested ref to atom?  Exactly ')'
- *  as you can see by looking at the derivation of this specific
- *  input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
- *
- *  You want the exact viable token set when recovering from a
- *  token mismatch.  Upon token mismatch, if LA(1) is member of
- *  the viable next token set, then you know there is most likely
- *  a missing token in the input stream.  "Insert" one by just not
- *  throwing an exception.
- */
-- (ANTLRBitSet *)computeContextSensitiveRuleFOLLOW
-{
-    return [self combineFollows:YES];
-}
-
-// what is exact? it seems to only add sets from above on stack
-// if EOR is in set i.  When it sees a set w/o EOR, it stops adding.
-// Why would we ever want them all?  Maybe no viable alt instead of
-// mismatched token?
-- (ANTLRBitSet *)combineFollows:(BOOL) exact
-{
-    NSInteger top = state._fsp;
-    ANTLRBitSet *followSet = [[ANTLRBitSet newANTLRBitSet] retain];
-    for (int i = top; i >= 0; i--) {
-        ANTLRBitSet *localFollowSet = (ANTLRBitSet *)[state.following objectAtIndex:i];
-        /*
-         System.out.println("local follow depth "+i+"="+
-         localFollowSet.toString(getTokenNames())+")");
-         */
-        [followSet orInPlace:localFollowSet];
-        if ( exact ) {
-            // can we see end of rule?
-            if ( [localFollowSet member:ANTLRTokenTypeEOR] ) {
-                // Only leave EOR in set if at top (start rule); this lets
-                // us know if have to include follow(start rule); i.e., EOF
-                if ( i > 0 ) {
-                    [followSet remove:ANTLRTokenTypeEOR];
-                }
-            }
-            else { // can't see end of rule, quit
-                break;
-            }
-        }
-    }
-    return followSet;
-}
-
-/** Attempt to recover from a single missing or extra token.
- *
- *  EXTRA TOKEN
- *
- *  LA(1) is not what we are looking for.  If LA(2) has the right token,
- *  however, then assume LA(1) is some extra spurious token.  Delete it
- *  and LA(2) as if we were doing a normal match(), which advances the
- *  input.
- *
- *  MISSING TOKEN
- *
- *  If current token is consistent with what could come after
- *  ttype then it is ok to "insert" the missing token, else throw
- *  exception For example, Input "i=(3;" is clearly missing the
- *  ')'.  When the parser returns from the nested call to expr, it
- *  will have call chain:
- *
- *    stat -> expr -> atom
- *
- *  and it will be trying to match the ')' at this point in the
- *  derivation:
- *
- *       => ID '=' '(' INT ')' ('+' atom)* ';'
- *                          ^
- *  match() will see that ';' doesn't match ')' and report a
- *  mismatched token error.  To recover, it sees that LA(1)==';'
- *  is in the set of tokens that can follow the ')' token
- *  reference in rule atom.  It can assume that you forgot the ')'.
- */
-- (id<ANTLRToken>)recoverFromMismatchedToken:(id<ANTLRIntStream>)anInput
-                       TokenType:(NSInteger)ttype
-                          Follow:(ANTLRBitSet *)follow
-{
-    ANTLRRecognitionException *e = nil;
-    // if next token is what we are looking for then "delete" this token
-    if ( [self mismatchIsUnwantedToken:anInput TokenType:ttype] ) {
-        e = [ANTLRUnwantedTokenException newException:ttype Stream:anInput];
-        /*
-         System.err.println("recoverFromMismatchedToken deleting "+
-         ((TokenStream)input).LT(1)+
-         " since "+((TokenStream)input).LT(2)+" is what we want");
-         */
-        [self beginResync];
-        [anInput consume]; // simply delete extra token
-        [self endResync];
-        [self reportError:e];  // report after consuming so AW sees the token in the exception
-                         // we want to return the token we're actually matching
-        id matchedSymbol = [self getCurrentInputSymbol:anInput];
-        [anInput consume]; // move past ttype token as if all were ok
-        return matchedSymbol;
-    }
-    // can't recover with single token deletion, try insertion
-    if ( [self mismatchIsMissingToken:anInput Follow:follow] ) {
-        id<ANTLRToken> inserted = [self getMissingSymbol:anInput Exception:e TokenType:ttype Follow:follow];
-        e = [ANTLRMissingTokenException newException:ttype Stream:anInput With:inserted];
-        [self reportError:e];  // report after inserting so AW sees the token in the exception
-        return inserted;
-    }
-    // even that didn't work; must throw the exception
-    e = [ANTLRMismatchedTokenException newException:ttype Stream:anInput];
-    @throw e;
-}
-
-/** Not currently used */
--(id) recoverFromMismatchedSet:(id<ANTLRIntStream>)anInput
-                     Exception:(ANTLRRecognitionException *)e
-                        Follow:(ANTLRBitSet *) follow
-{
-    if ( [self mismatchIsMissingToken:anInput Follow:follow] ) {
-        // System.out.println("missing token");
-        [self reportError:e];
-        // we don't know how to conjure up a token for sets yet
-        return [self getMissingSymbol:anInput Exception:e TokenType:ANTLRTokenTypeInvalid Follow:follow];
-    }
-    // TODO do single token deletion like above for Token mismatch
-    @throw e;
-}
-
-/** Match needs to return the current input symbol, which gets put
- *  into the label for the associated token ref; e.g., x=ID.  Token
- *  and tree parsers need to return different objects. Rather than test
- *  for input stream type or change the IntStream interface, I use
- *  a simple method to ask the recognizer to tell me what the current
- *  input symbol is.
- * 
- *  This is ignored for lexers.
- */
-- (id) getCurrentInputSymbol:(id<ANTLRIntStream>)anInput
-{
-    return nil;
-}
-
-/** Conjure up a missing token during error recovery.
- *
- *  The recognizer attempts to recover from single missing
- *  symbols. But, actions might refer to that missing symbol.
- *  For example, x=ID {f($x);}. The action clearly assumes
- *  that there has been an identifier matched previously and that
- *  $x points at that token. If that token is missing, but
- *  the next token in the stream is what we want we assume that
- *  this token is missing and we keep going. Because we
- *  have to return some token to replace the missing token,
- *  we have to conjure one up. This method gives the user control
- *  over the tokens returned for missing tokens. Mostly,
- *  you will want to create something special for identifier
- *  tokens. For literals such as '{' and ',', the default
- *  action in the parser or tree parser works. It simply creates
- *  a CommonToken of the appropriate type. The text will be the token.
- *  If you change what tokens must be created by the lexer,
- *  override this method to create the appropriate tokens.
- */
-- (id)getMissingSymbol:(id<ANTLRIntStream>)anInput
-             Exception:(ANTLRRecognitionException *)e
-             TokenType:(NSInteger)expectedTokenType
-                Follow:(ANTLRBitSet *)follow
-{
-    return nil;
-}
-
-
--(void) consumeUntilTType:(id<ANTLRIntStream>)anInput TokenType:(NSInteger)tokenType
-{
-    //System.out.println("consumeUntil "+tokenType);
-    int ttype = [anInput LA:1];
-    while (ttype != ANTLRTokenTypeEOF && ttype != tokenType) {
-        [anInput consume];
-        ttype = [anInput LA:1];
-    }
-}
-
-/** Consume tokens until one matches the given token set */
--(void) consumeUntilFollow:(id<ANTLRIntStream>)anInput Follow:(ANTLRBitSet *)set
-{
-    //System.out.println("consumeUntil("+set.toString(getTokenNames())+")");
-    int ttype = [anInput LA:1];
-    while (ttype != ANTLRTokenTypeEOF && ![set member:ttype] ) {
-        //System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
-        [anInput consume];
-        ttype = [anInput LA:1];
-    }
-}
-
-/** Push a rule's follow set using our own hardcoded stack */
-- (void)pushFollow:(ANTLRBitSet *)fset
-{
-    if ( (state._fsp +1) >= [state.following count] ) {
-        //        AMutableArray *f = [AMutableArray arrayWithCapacity:[[state.following] count]*2];
-        //        System.arraycopy(state.following, 0, f, 0, state.following.length);
-        //        state.following = f;
-        [state.following addObject:fset];
-        [fset retain];
-        state._fsp++;
-    }
-    else {
-        [state.following replaceObjectAtIndex:++state._fsp withObject:fset];
-    }
-}
-
-- (ANTLRBitSet *)popFollow
-{
-    ANTLRBitSet *fset;
-
-    if ( state._fsp >= 0 && [state.following count] > 0 ) {
-        fset = [state.following objectAtIndex:state._fsp--];
-        [state.following removeLastObject];
-        return fset;
-    }
-    else {
-        NSLog( @"Attempted to pop a follow when none exists on the stack\n" );
-    }
-    return nil;
-}
-
-/** Return List<String> of the rules in your parser instance
- *  leading up to a call to this method.  You could override if
- *  you want more details such as the file/line info of where
- *  in the parser java code a rule is invoked.
- *
- *  This is very useful for error messages and for context-sensitive
- *  error recovery.
- */
-- (AMutableArray *)getRuleInvocationStack
-{
-    NSString *parserClassName = [[self className] retain];
-    return [self getRuleInvocationStack:[ANTLRRecognitionException newException] Recognizer:parserClassName];
-}
-
-/** A more general version of getRuleInvocationStack where you can
- *  pass in, for example, a RecognitionException to get it's rule
- *  stack trace.  This routine is shared with all recognizers, hence,
- *  static.
- *
- *  TODO: move to a utility class or something; weird having lexer call this
- */
-- (AMutableArray *)getRuleInvocationStack:(ANTLRRecognitionException *)e
-                                Recognizer:(NSString *)recognizerClassName
-{
-    // char *name;
-    AMutableArray *rules = [[AMutableArray arrayWithCapacity:20] retain];
-    NSArray *stack = [e callStackSymbols];
-    int i = 0;
-    for (i = [stack count]-1; i >= 0; i--) {
-        NSString *t = [stack objectAtIndex:i];
-        // NSLog(@"stack %d = %@\n", i, t);
-        if ( [t commonPrefixWithString:@"org.antlr.runtime." options:NSLiteralSearch] ) {
-            // id aClass = objc_getClass( [t UTF8String] );
-            continue; // skip support code such as this method
-        }
-        if ( [t isEqualTo:NEXT_TOKEN_RULE_NAME] ) {
-            // name = sel_getName(method_getName(method));
-            // NSString *aMethod = [NSString stringWithFormat:@"%s", name];
-            continue;
-        }
-        if ( ![t isEqualTo:recognizerClassName] ) {
-            // name = class_getName( [t UTF8String] );
-            continue; // must not be part of this parser
-        }
-        [rules addObject:t];
-    }
-#ifdef DONTUSEYET
-    StackTraceElement[] stack = e.getStackTrace();
-    int i = 0;
-    for (i=stack.length-1; i>=0; i--) {
-        StackTraceElement t = stack[i];
-        if ( [t getClassName().startsWith("org.antlr.runtime.") ) {
-            continue; // skip support code such as this method
-        }
-              if ( [[t getMethodName] equals:NEXT_TOKEN_RULE_NAME] ) {
-            continue;
-        }
-              if ( ![[t getClassName] equals:recognizerClassName] ) {
-            continue; // must not be part of this parser
-        }
-              [rules addObject:[t getMethodName]];
-    }
-#endif
-    [stack release];
-    return rules;
-}
-
-- (NSInteger) getBacktrackingLevel
-{
-    return [state getBacktracking];
-}
-      
-- (void) setBacktrackingLevel:(NSInteger)level
-{
-    [state setBacktracking:level];
-}
-      
-        /** Used to print out token names like ID during debugging and
- *  error reporting.  The generated parsers implement a method
- *  that overrides this to point to their String[] tokenNames.
- */
-- (NSArray *)getTokenNames
-{
-    return tokenNames;
-}
-
-/** For debugging and other purposes, might want the grammar name.
- *  Have ANTLR generate an implementation for this method.
- */
-- (NSString *)getGrammarFileName
-{
-    return grammarFileName;
-}
-
-- (NSString *)getSourceName
-{
-    return nil;
-}
-
-/** A convenience method for use most often with template rewrites.
- *  Convert a List<Token> to List<String>
- */
-- (AMutableArray *)toStrings:(AMutableArray *)tokens
-{
-    if ( tokens == nil )
-        return nil;
-    AMutableArray *strings = [AMutableArray arrayWithCapacity:[tokens count]];
-    id object;
-    NSInteger i = 0;
-    for (object in tokens) {
-        [strings addObject:[object text]];
-        i++;
-    }
-    return strings;
-}
-
-/** Given a rule number and a start token index number, return
- *  ANTLR_MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
- *  start index.  If this rule has parsed input starting from the
- *  start index before, then return where the rule stopped parsing.
- *  It returns the index of the last token matched by the rule.
- *
- *  For now we use a hashtable and just the slow Object-based one.
- *  Later, we can make a special one for ints and also one that
- *  tosses out data after we commit past input position i.
- */
-- (NSInteger)getRuleMemoization:(NSInteger)ruleIndex StartIndex:(NSInteger)ruleStartIndex
-{
-    NSNumber *stopIndexI;
-    ANTLRHashRule *aHashRule;
-    if ( (aHashRule = [state.ruleMemo objectAtIndex:ruleIndex]) == nil ) {
-        aHashRule = [ANTLRHashRule newANTLRHashRuleWithLen:17];
-        [state.ruleMemo insertObject:aHashRule atIndex:ruleIndex];
-    }
-    stopIndexI = [aHashRule getRuleMemoStopIndex:ruleStartIndex];
-    if ( stopIndexI == nil ) {
-        return ANTLR_MEMO_RULE_UNKNOWN;
-    }
-    return [stopIndexI integerValue];
-}
-
-/** Has this rule already parsed input at the current index in the
- *  input stream?  Return the stop token index or MEMO_RULE_UNKNOWN.
- *  If we attempted but failed to parse properly before, return
- *  MEMO_RULE_FAILED.
- *
- *  This method has a side-effect: if we have seen this input for
- *  this rule and successfully parsed before, then seek ahead to
- *  1 past the stop token matched for this rule last time.
- */
-- (BOOL)alreadyParsedRule:(id<ANTLRIntStream>)anInput RuleIndex:(NSInteger)ruleIndex
-{
-    NSInteger aStopIndex = [self getRuleMemoization:ruleIndex StartIndex:anInput.index];
-    if ( aStopIndex == ANTLR_MEMO_RULE_UNKNOWN ) {
-        // NSLog(@"rule %d not yet encountered\n", ruleIndex);
-        return NO;
-    }
-    if ( aStopIndex == ANTLR_MEMO_RULE_FAILED ) {
-        if (debug) NSLog(@"rule %d will never succeed\n", ruleIndex);
-        state.failed = YES;
-    }
-    else {
-        if (debug) NSLog(@"seen rule %d before; skipping ahead to %d failed = %@\n", ruleIndex, aStopIndex+1, state.failed?@"YES":@"NO");
-        [anInput seek:(aStopIndex+1)]; // jump to one past stop token
-    }
-    return YES;
-}
-      
-/** Record whether or not this rule parsed the input at this position
- *  successfully.  Use a standard java hashtable for now.
- */
-- (void)memoize:(id<ANTLRIntStream>)anInput
-      RuleIndex:(NSInteger)ruleIndex
-     StartIndex:(NSInteger)ruleStartIndex
-{
-    ANTLRRuleStack *aRuleStack;
-    NSInteger stopTokenIndex;
-
-    aRuleStack = state.ruleMemo;
-    stopTokenIndex = (state.failed ? ANTLR_MEMO_RULE_FAILED : (anInput.index-1));
-    if ( aRuleStack == nil ) {
-        if (debug) NSLog(@"!!!!!!!!! memo array is nil for %@", [self getGrammarFileName]);
-        return;
-    }
-    if ( ruleIndex >= [aRuleStack length] ) {
-        if (debug) NSLog(@"!!!!!!!!! memo size is %d, but rule index is %d", [state.ruleMemo length], ruleIndex);
-        return;
-    }
-    if ( [aRuleStack objectAtIndex:ruleIndex] != nil ) {
-        [aRuleStack putHashRuleAtRuleIndex:ruleIndex StartIndex:ruleStartIndex StopIndex:stopTokenIndex];
-    }
-    return;
-}
-   
-/** return how many rule/input-index pairs there are in total.
- *  TODO: this includes synpreds. :(
- */
-- (NSInteger)getRuleMemoizationCacheSize
-{
-    ANTLRRuleStack *aRuleStack;
-    ANTLRHashRule *aHashRule;
-
-    int aCnt = 0;
-    aRuleStack = state.ruleMemo;
-    for (NSUInteger i = 0; aRuleStack != nil && i < [aRuleStack length]; i++) {
-        aHashRule = [aRuleStack objectAtIndex:i];
-        if ( aHashRule != nil ) {
-            aCnt += [aHashRule count]; // how many input indexes are recorded?
-        }
-    }
-    return aCnt;
-}
-
-#pragma warning Have to fix traceIn and traceOut.
-- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol
-{
-    NSLog(@"enter %@ %@", ruleName, inputSymbol);
-    if ( state.backtracking > 0 ) {
-        NSLog(@" backtracking=%s", ((state.backtracking==YES)?"YES":"NO"));
-    }
-    NSLog(@"\n");
-}
-
-- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol
-{
-    NSLog(@"exit %@ -- %@", ruleName, inputSymbol);
-    if ( state.backtracking > 0 ) {
-        NSLog(@" backtracking=%s %s", state.backtracking?"YES":"NO", state.failed ? "failed":"succeeded");
-    }
-    NSLog(@"\n");
-}
-
-
-// call a syntactic predicate methods using its selector. this way we can support arbitrary synpreds.
-- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment // stream:(id<ANTLRIntStream>)input
-{
-    id<ANTLRIntStream> input;
-
-    state.backtracking++;
-    // input = state.token.input;
-    input = self.input;
-    int start = [input mark];
-    @try {
-        [self performSelector:synpredFragment];
-    }
-    @catch (ANTLRRecognitionException *re) {
-        NSLog(@"impossible synpred: %@", re.name);
-    }
-    BOOL success = (state.failed == NO);
-    [input rewind:start];
-    state.backtracking--;
-    state.failed = NO;
-    return success;
-}
-              
-@end
-                               
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseStack.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseStack.h
deleted file mode 100644
index 2356178..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseStack.h
+++ /dev/null
@@ -1,66 +0,0 @@
-//
-//  ANTLRBaseRecognizer.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRBaseStack : ANTLRPtrBuffer {
-	//ANTLRRuleStack *fNext;
-    // TStringPool *fPool;
-    NSInteger LastHash;
-}
-
-//@property (copy) ANTLRRuleStack *fNext;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
-
-// Contruction/Destruction
-+ (ANTLRBaseStack *)newANTLRBaseStack;
-+ (ANTLRBaseStack *)newANTLRBaseStackWithLen:(NSInteger)cnt;
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSUInteger)count;
-- (NSUInteger)size;
-/* clear -- reinitialize the maplist array */
-
-- (NSInteger)getLastHash;
-- (void)setLastHash:(NSInteger)aVal;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseStack.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseStack.m
deleted file mode 100644
index 7bd2282..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseStack.m
+++ /dev/null
@@ -1,131 +0,0 @@
-//
-//  ANTLRBaseRecognizer.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRBaseStack.h"
-#import "ANTLRTree.h"
-
-/*
- * Start of ANTLRBaseStack
- */
-@implementation ANTLRBaseStack
-
-@synthesize LastHash;
-
-+(ANTLRBaseStack *)newANTLRBaseStack
-{
-    return [[ANTLRBaseStack alloc] init];
-}
-
-+(ANTLRBaseStack *)newANTLRBaseStackWithLen:(NSInteger)cnt
-{
-    return [[ANTLRBaseStack alloc] initWithLen:cnt];
-}
-
--(id)init
-{
-	self = [super initWithLen:HASHSIZE];
-	if ( self != nil ) {
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)cnt
-{
-	self = [super initWithLen:cnt];
-    if ( self != nil ) {
-	}
-    return( self );
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRBaseStack" );
-#endif
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRBaseStack *copy;
-    
-    copy = [super copyWithZone:aZone];
-    return copy;
-}
-
-- (NSUInteger)count
-{
-    NSUInteger aCnt = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        if (ptrBuffer[i] != nil) {
-            aCnt++;
-        }
-    }
-    return aCnt;
-}
-
-- (NSUInteger) size
-{
-    return BuffSize;
-}
-
--(void)deleteANTLRBaseStack:(ANTLRBaseStack *)np
-{
-    id tmp, rtmp;
-    NSInteger idx;
-    
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = (ANTLRLinkBase *)ptrBuffer[idx];
-            while ( tmp ) {
-                rtmp = tmp;
-                tmp = [tmp getfNext];
-                [rtmp release];
-            }
-        }
-    }
-}
-
-- (NSInteger)getLastHash
-{
-    return LastHash;
-}
-
-- (void)setLastHash:(NSInteger)aVal
-{
-    LastHash = aVal;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTree.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTree.h
deleted file mode 100755
index 1139cd0..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTree.h
+++ /dev/null
@@ -1,210 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTree.h"
-#import "ANTLRCommonToken.h"
-#import "AMutableArray.h"
-
-@protocol ANTLRBaseTree <ANTLRTree>
-
-+ (id<ANTLRBaseTree>) INVALID_NODE;
-
-+ (id<ANTLRBaseTree>) newTree;
-+ (id<ANTLRBaseTree>) newTree:(id<ANTLRBaseTree>)node;
-
-- (id<ANTLRBaseTree>) init;
-- (id<ANTLRBaseTree>) initWith:(id<ANTLRBaseTree>)node;
-
-- (id<ANTLRBaseTree>) getChild:(NSUInteger)i;
-- (AMutableArray *)children;
-- (void) setChildren:(AMutableArray *)anArray;
-- (id<ANTLRBaseTree>)getFirstChildWithType:(NSInteger)type;
-- (NSUInteger) getChildCount;
-
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChild:(id<ANTLRBaseTree>) tree;
-- (void) addChildren:(NSArray *) theChildren;
-//- (void) removeAllChildren;
-
-- (void) setChild:(NSInteger) i With:(id<ANTLRBaseTree>)t;
-- (id) deleteChild:(NSInteger) i;
-- (AMutableArray *) createChildrenList;
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-// Indicates the node is a nil node but may still have children, meaning
-// the tree is a flat list.
-
-- (BOOL) isNil;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex:(NSInteger) index;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (void) freshenParentAndChildIndexes;
-- (void) freshenParentAndChildIndexes:(NSInteger) offset;
-- (void) sanityCheckParentAndChildIndexes;
-- (void) sanityCheckParentAndChildIndexes:(id<ANTLRBaseTree>) parent At:(NSInteger) i;
-
-- (NSInteger) getChildIndex;
-- (void) setChildIndex:(NSInteger)i;
-
-- (id<ANTLRBaseTree>)getAncestor:(NSInteger)ttype;
-- (AMutableArray *)getAncestors;
-
-#pragma mark Copying
-- (id) copyWithZone:(NSZone *)aZone;	// the children themselves are not copied here!
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-#pragma mark Tree Parser support
-- (NSInteger)type;
-- (NSString *)text;
-// In case we don't have a token payload, what is the line for errors?
-- (NSUInteger)line;
-- (NSUInteger)charPositionInLine;
-
-
-#pragma mark Informational
-- (NSString *) treeDescription;
-- (NSString *) description;
-
-- (NSString *) toString;
-- (NSString *) toStringTree;
-
-@property (retain) AMutableArray *children;
-@property (retain) NSException *anException;
-
-@end
-
-@interface ANTLRBaseTree : NSObject <ANTLRTree>
-{
-	__strong AMutableArray *children;
-    __strong NSException *anException;
-}
-
-+ (id<ANTLRBaseTree>) INVALID_NODE;
-+ (id<ANTLRBaseTree>) newTree;
-+ (id<ANTLRBaseTree>) newTree:(id<ANTLRBaseTree>)node;
-         
-- (id<ANTLRBaseTree>) init;
-- (id<ANTLRBaseTree>) initWith:(id<ANTLRBaseTree>)node;
-
-- (id<ANTLRBaseTree>) getChild:(NSUInteger)i;
-- (AMutableArray *)children;
-- (void) setChildren:(AMutableArray *)anArray;
-- (id<ANTLRBaseTree>)getFirstChildWithType:(NSInteger)type;
-- (NSUInteger) getChildCount;
-
-//- (void) removeAllChildren;
-
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChild:(id<ANTLRBaseTree>) tree;
-- (void) addChildren:(NSArray *) theChildren;
-
-- (void) setChild:(NSUInteger) i With:(id<ANTLRBaseTree>)t;
-- (id) deleteChild:(NSUInteger) idx;
-- (AMutableArray *) createChildrenList;
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-// Indicates the node is a nil node but may still have children, meaning
-	// the tree is a flat list.
-
-- (BOOL) isNil;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex:(NSInteger) index;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (void) freshenParentAndChildIndexes;
-- (void) freshenParentAndChildIndexes:(NSInteger) offset;
-- (void) sanityCheckParentAndChildIndexes;
-- (void) sanityCheckParentAndChildIndexes:(id<ANTLRBaseTree>)parent At:(NSInteger) i;
-
-- (NSInteger) getChildIndex;
-- (void) setChildIndex:(NSInteger)i;
-
-- (BOOL) hasAncestor:(NSInteger) ttype;
-- (id<ANTLRBaseTree>)getAncestor:(NSInteger)ttype;
-- (AMutableArray *)getAncestors;
-
-- (id) copyWithZone:(NSZone *)aZone;
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-	// Return a token type; needed for tree parsing
-- (NSInteger)type;
-- (NSString *)text;
-
-	// In case we don't have a token payload, what is the line for errors?
-- (NSUInteger)line;
-- (NSUInteger)charPositionInLine;
-- (void) setCharPositionInLine:(NSUInteger)pos;
-
-- (NSString *) treeDescription;
-- (NSString *) description;
-- (NSString *) toString;
-- (NSString *) toStringTree;
-
-@property (retain) AMutableArray *children;
-@property (retain) NSException *anException;
-
-@end
-
-@interface ANTLRTreeNavigationNode : ANTLRBaseTree {
-}
-- (id) init;
-- (id) copyWithZone:(NSZone *)aZone;
-@end
-
-@interface ANTLRTreeNavigationNodeDown : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeDown *) getNavigationNodeDown;
-- (id) init;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-@interface ANTLRTreeNavigationNodeUp : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeUp *) getNavigationNodeUp;
-- (id) init;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-@interface ANTLRTreeNavigationNodeEOF : ANTLRTreeNavigationNode {
-}
-+ (ANTLRTreeNavigationNodeEOF *) getNavigationNodeEOF;
-- (id) init;
-- (NSInteger) tokenType;
-- (NSString *) description;
-@end
-
-extern ANTLRTreeNavigationNodeDown *navigationNodeDown;
-extern ANTLRTreeNavigationNodeUp *navigationNodeUp;
-extern ANTLRTreeNavigationNodeEOF *navigationNodeEOF;
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTree.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTree.m
deleted file mode 100755
index 5ba4eac..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTree.m
+++ /dev/null
@@ -1,616 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRBaseTree.h"
-#import "ANTLRBaseTreeAdaptor.h"
-#import "ANTLRToken.h"
-// TODO: this shouldn't be here...but needed for invalidNode
-#import "AMutableArray.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRRuntimeException.h"
-#import "ANTLRError.h"
-
-#pragma mark - Navigation Nodes
-ANTLRTreeNavigationNodeDown *navigationNodeDown = nil;
-ANTLRTreeNavigationNodeUp *navigationNodeUp = nil;
-ANTLRTreeNavigationNodeEOF *navigationNodeEOF = nil;
-
-
-@implementation ANTLRBaseTree
-
-static id<ANTLRBaseTree> invalidNode = nil;
-
-#pragma mark ANTLRTree protocol conformance
-
-+ (id<ANTLRBaseTree>) INVALID_NODE
-{
-	if ( invalidNode == nil ) {
-		invalidNode = [[ANTLRCommonTree alloc] initWithTokenType:ANTLRTokenTypeInvalid];
-	}
-	return invalidNode;
-}
-
-+ (id<ANTLRBaseTree>) invalidNode
-{
-	if ( invalidNode == nil ) {
-		invalidNode = [[ANTLRCommonTree alloc] initWithTokenType:ANTLRTokenTypeInvalid];
-	}
-	return invalidNode;
-}
-
-+ newTree
-{
-    return [[ANTLRBaseTree alloc] init];
-}
-
-/** Create a new node from an existing node does nothing for ANTLRBaseTree
- *  as there are no fields other than the children list, which cannot
- *  be copied as the children are not considered part of this node. 
- */
-+ newTree:(id<ANTLRBaseTree>) node
-{
-    return [[ANTLRBaseTree alloc] initWith:(id<ANTLRBaseTree>) node];
-}
-
-- (id) init
-{
-    self = [super init];
-    if ( self != nil ) {
-        children = nil;
-        return self;
-    }
-    return nil;
-}
-
-- (id) initWith:(id<ANTLRBaseTree>)node
-{
-    self = [super init];
-    if ( self != nil ) {
-        // children = [[AMutableArray arrayWithCapacity:5] retain];
-        // [children addObject:node];
-        [self addChild:node];
-        return self;
-    }
-    return nil;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRBaseTree" );
-#endif
-	if ( children ) [children release];
-	[super dealloc];
-}
-
-- (id<ANTLRBaseTree>) getChild:(NSUInteger)i
-{
-    if ( children == nil || i >= [children count] ) {
-        return nil;
-    }
-    return (id<ANTLRBaseTree>)[children objectAtIndex:i];
-}
-
-/** Get the children internal List; note that if you directly mess with
- *  the list, do so at your own risk.
- */
-- (AMutableArray *) children
-{
-    return children;
-}
-
-- (void) setChildren:(AMutableArray *)anArray
-{
-    if ( children != anArray ) {
-        if ( children ) [children release];
-        if ( anArray ) [anArray retain];
-    }
-    children = anArray;
-}
-
-- (id<ANTLRBaseTree>) getFirstChildWithType:(NSInteger) aType
-{
-    for (NSUInteger i = 0; children != nil && i < [children count]; i++) {
-        id<ANTLRBaseTree> t = (id<ANTLRBaseTree>) [children objectAtIndex:i];
-        if ( t.type == aType ) {
-            return t;
-        }
-    }	
-    return nil;
-}
-
-- (NSUInteger) getChildCount
-{
-    if ( children == nil ) {
-        return 0;
-    }
-    return [children count];
-}
-
-/** Add t as child of this node.
- *
- *  Warning: if t has no children, but child does
- *  and child isNil then this routine moves children to t via
- *  t.children = child.children; i.e., without copying the array.
- */
-- (void) addChild:(id<ANTLRBaseTree>) t
-{
-    //System.out.println("add child "+t.toStringTree()+" "+self.toStringTree());
-    //System.out.println("existing children: "+children);
-    if ( t == nil ) {
-        return; // do nothing upon addChild(nil)
-    }
-    if ( self == (ANTLRBaseTree *)t )
-        @throw [ANTLRIllegalArgumentException newException:@"ANTLRBaseTree Can't add self to self as child"];        
-    id<ANTLRBaseTree> childTree = (id<ANTLRBaseTree>) t;
-    if ( [childTree isNil] ) { // t is an empty node possibly with children
-        if ( children != nil && children == childTree.children ) {
-            @throw [ANTLRRuntimeException newException:@"ANTLRBaseTree add child list to itself"];
-        }
-        // just add all of childTree's children to this
-        if ( childTree.children != nil ) {
-            if ( children != nil ) { // must copy, this has children already
-                int n = [childTree.children count];
-                for ( int i = 0; i < n; i++) {
-                    id<ANTLRBaseTree> c = (id<ANTLRBaseTree>)[childTree.children objectAtIndex:i];
-                    [children addObject:c];
-                    // handle double-link stuff for each child of nil root
-                    [c setParent:(id<ANTLRBaseTree>)self];
-                    [c setChildIndex:[children count]-1];
-                }
-            }
-            else {
-                // no children for this but t has children; just set pointer
-                // call general freshener routine
-                children = childTree.children;
-                [self freshenParentAndChildIndexes];
-            }
-        }
-    }
-    else { // child is not nil (don't care about children)
-        if ( children == nil ) {
-            children = [[AMutableArray arrayWithCapacity:5] retain]; // create children list on demand
-        }
-        [children addObject:t];
-        [childTree setParent:(id<ANTLRBaseTree>)self];
-        [childTree setChildIndex:[children count]-1];
-    }
-    // System.out.println("now children are: "+children);
-}
-
-/** Add all elements of kids list as children of this node */
-- (void) addChildren:(AMutableArray *) kids
-{
-    for (NSUInteger i = 0; i < [kids count]; i++) {
-        id<ANTLRBaseTree> t = (id<ANTLRBaseTree>) [kids objectAtIndex:i];
-        [self addChild:t];
-    }
-}
-
-- (void) setChild:(NSUInteger) i With:(id<ANTLRBaseTree>)t
-{
-    if ( t == nil ) {
-        return;
-    }
-    if ( [t isNil] ) {
-        @throw [ANTLRIllegalArgumentException newException:@"ANTLRBaseTree Can't set single child to a list"];        
-    }
-    if ( children == nil ) {
-        children = [[AMutableArray arrayWithCapacity:5] retain];
-    }
-    if ([children count] > i ) {
-        [children replaceObjectAtIndex:i withObject:t];
-    }
-    else {
-        [children insertObject:t atIndex:i];
-    }
-    [t setParent:(id<ANTLRBaseTree>)self];
-    [t setChildIndex:i];
-}
-
-- (id) deleteChild:(NSUInteger) idx
-{
-    if ( children == nil ) {
-        return nil;
-    }
-    id<ANTLRBaseTree> killed = (id<ANTLRBaseTree>)[children objectAtIndex:idx];
-    [children removeObjectAtIndex:idx];
-    // walk rest and decrement their child indexes
-    [self freshenParentAndChildIndexes:idx];
-    return killed;
-}
-
-/** Delete children from start to stop and replace with t even if t is
- *  a list (nil-root ANTLRTree).  num of children can increase or decrease.
- *  For huge child lists, inserting children can force walking rest of
- *  children to set their childindex; could be slow.
- */
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t
-{
-    /*
-     System.out.println("replaceChildren "+startChildIndex+", "+stopChildIndex+
-     " with "+((ANTLRBaseTree)t).toStringTree());
-     System.out.println("in="+toStringTree());
-     */
-    if ( children == nil ) {
-        @throw [ANTLRIllegalArgumentException newException:@"ANTLRBaseTree Invalid Indexes; no children in list"];        
-    }
-    int replacingHowMany = stopChildIndex - startChildIndex + 1;
-    int replacingWithHowMany;
-    id<ANTLRBaseTree> newTree = (id<ANTLRBaseTree>) t;
-    AMutableArray *newChildren = nil;
-    // normalize to a list of children to add: newChildren
-    if ( [newTree isNil] ) {
-        newChildren = newTree.children;
-    }
-    else {
-        newChildren = [AMutableArray arrayWithCapacity:5];
-        [newChildren addObject:newTree];
-    }
-    replacingWithHowMany = [newChildren count];
-    int numNewChildren = [newChildren count];
-    int delta = replacingHowMany - replacingWithHowMany;
-    // if same number of nodes, do direct replace
-    if ( delta == 0 ) {
-        int j = 0; // index into new children
-        for (int i=startChildIndex; i <= stopChildIndex; i++) {
-            id<ANTLRBaseTree> child = (id<ANTLRBaseTree>)[newChildren objectAtIndex:j];
-            [children replaceObjectAtIndex:i withObject:(id)child];
-            [child setParent:(id<ANTLRBaseTree>)self];
-            [child setChildIndex:i];
-            j++;
-        }
-    }
-    else if ( delta > 0 ) { // fewer new nodes than there were
-                            // set children and then delete extra
-        for (int j = 0; j < numNewChildren; j++) {
-            [children replaceObjectAtIndex:startChildIndex+j withObject:[newChildren objectAtIndex:j]];
-        }
-        int indexToDelete = startChildIndex+numNewChildren;
-        for (int c=indexToDelete; c<=stopChildIndex; c++) {
-            // delete same index, shifting everybody down each time
-            [children removeObjectAtIndex:indexToDelete];
-        }
-        [self freshenParentAndChildIndexes:startChildIndex];
-    }
-    else { // more new nodes than were there before
-           // fill in as many children as we can (replacingHowMany) w/o moving data
-        for (int j=0; j<replacingHowMany; j++) {
-            [children replaceObjectAtIndex:startChildIndex+j withObject:[newChildren objectAtIndex:j]];
-        }
-        //        int numToInsert = replacingWithHowMany-replacingHowMany;
-        for (int j=replacingHowMany; j<replacingWithHowMany; j++) {
-            [children insertObject:[newChildren objectAtIndex:j] atIndex:startChildIndex+j];
-        }
-        [self freshenParentAndChildIndexes:startChildIndex];
-    }
-    //System.out.println("out="+toStringTree());
-}
-
-/** Override in a subclass to change the impl of children list */
-- (AMutableArray *) createChildrenList
-{
-    return [AMutableArray arrayWithCapacity:5];
-}
-
-- (BOOL) isNil
-{
-    return NO;
-}
-
-/** Set the parent and child index values for all child of t */
-- (void) freshenParentAndChildIndexes
-{
-    [self freshenParentAndChildIndexes:0];
-}
-               
-- (void) freshenParentAndChildIndexes:(NSInteger) offset
-{
-    int n = [self getChildCount];
-    for (int i = offset; i < n; i++) {
-        id<ANTLRBaseTree> child = (id<ANTLRBaseTree>)[self getChild:i];
-        [child setChildIndex:i];
-        [child setParent:(id<ANTLRBaseTree>)self];
-    }
-}
-               
-- (void) sanityCheckParentAndChildIndexes
-{
-    [self sanityCheckParentAndChildIndexes:nil At:-1];
-}
-               
-- (void) sanityCheckParentAndChildIndexes:(id<ANTLRBaseTree>)aParent At:(NSInteger) i
-{
-    if ( aParent != [self getParent] ) {
-        @throw [ANTLRIllegalStateException newException:[NSString stringWithFormat:@"parents don't match; expected %s found %s", aParent, [self getParent]]];
-    }
-    if ( i != [self getChildIndex] ) {
-        @throw [ANTLRIllegalStateException newException:[NSString stringWithFormat:@"child indexes don't match; expected %d found %d", i, [self getChildIndex]]];
-    }
-    int n = [self getChildCount];
-    for (int c = 0; c < n; c++) {
-        id<ANTLRBaseTree> child = (id<ANTLRBaseTree>)[self getChild:c];
-        [child sanityCheckParentAndChildIndexes:(id<ANTLRBaseTree>)self At:c];
-    }
-}
-               
-/**  What is the smallest token index (indexing from 0) for this node
- *   and its children?
- */
-- (NSInteger) getTokenStartIndex
-{
-    return 0;
-}
-
-- (void) setTokenStartIndex:(NSInteger) anIndex
-{
-}
-
-/**  What is the largest token index (indexing from 0) for this node
- *   and its children?
- */
-- (NSInteger) getTokenStopIndex
-{
-    return 0;
-}
-
-- (void) setTokenStopIndex:(NSInteger) anIndex
-{
-}
-
-- (id<ANTLRBaseTree>) dupNode
-{
-    return nil;
-}
-
-
-/** ANTLRBaseTree doesn't track child indexes. */
-- (NSInteger) getChildIndex
-{
-    return 0;
-}
-
-- (void) setChildIndex:(NSInteger) anIndex
-{
-}
-
-/** ANTLRBaseTree doesn't track parent pointers. */
-- (id<ANTLRBaseTree>) getParent
-{
-    return nil;
-}
-
-- (void) setParent:(id<ANTLRBaseTree>) t
-{
-}
-
-/** Walk upwards looking for ancestor with this token type. */
-- (BOOL) hasAncestor:(NSInteger) ttype
-{
-    return([self getAncestor:ttype] != nil);
-}
-
-/** Walk upwards and get first ancestor with this token type. */
-- (id<ANTLRBaseTree>) getAncestor:(NSInteger) ttype
-{
-    id<ANTLRBaseTree> t = (id<ANTLRBaseTree>)self;
-    t = (id<ANTLRBaseTree>)[t getParent];
-    while ( t != nil ) {
-        if ( t.type == ttype )
-            return t;
-        t = (id<ANTLRBaseTree>)[t getParent];
-    }
-    return nil;
-}
-
-/** Return a list of all ancestors of this node.  The first node of
- *  list is the root and the last is the parent of this node.
- */
-- (AMutableArray *)getAncestors
-{
-    if ( [self getParent] == nil )
-        return nil;
-    AMutableArray *ancestors = [AMutableArray arrayWithCapacity:5];
-    id<ANTLRBaseTree> t = (id<ANTLRBaseTree>)self;
-    t = (id<ANTLRBaseTree>)[t getParent];
-    while ( t != nil ) {
-        [ancestors insertObject:t atIndex:0]; // insert at start
-        t = (id<ANTLRBaseTree>)[t getParent];
-    }
-    return ancestors;
-}
-
-- (NSInteger)type
-{
-    return ANTLRTokenTypeInvalid;
-}
-
-- (NSString *)text
-{
-    return nil;
-}
-
-- (NSUInteger)line
-{
-    return 0;
-}
-
-- (NSUInteger)charPositionInLine
-{
-    return 0;
-}
-
-- (void) setCharPositionInLine:(NSUInteger) pos
-{
-}
-
-#pragma mark Copying
-     
-     // the children themselves are not copied here!
-- (id) copyWithZone:(NSZone *)aZone
-{
-    id<ANTLRBaseTree> theCopy = [[[self class] allocWithZone:aZone] init];
-    [theCopy addChildren:self.children];
-    return theCopy;
-}
-     
-- (id) deepCopy 					// performs a deepCopyWithZone: with the default zone
-{
-    return [self deepCopyWithZone:NULL];
-}
-     
-- (id) deepCopyWithZone:(NSZone *)aZone
-{
-    id<ANTLRBaseTree> theCopy = [self copyWithZone:aZone];
-        
-    if ( [theCopy.children count] )
-        [theCopy.children removeAllObjects];
-    AMutableArray *childrenCopy = theCopy.children;
-    for (id loopItem in children) {
-        id<ANTLRBaseTree> childCopy = [loopItem deepCopyWithZone:aZone];
-        [theCopy addChild:childCopy];
-    }
-    if ( childrenCopy ) [childrenCopy release];
-    return theCopy;
-}
-     
-- (NSString *) treeDescription
-{
-    if ( children == nil || [children count] == 0 ) {
-        return [self description];
-    }
-    NSMutableString *buf = [NSMutableString stringWithCapacity:[children count]];
-    if ( ![self isNil] ) {
-        [buf appendString:@"("];
-        [buf appendString:[self toString]];
-        [buf appendString:@" "];
-    }
-    for (int i = 0; children != nil && i < [children count]; i++) {
-        id<ANTLRBaseTree> t = (id<ANTLRBaseTree>)[children objectAtIndex:i];
-        if ( i > 0 ) {
-            [buf appendString:@" "];
-        }
-        [buf appendString:[(id<ANTLRBaseTree>)t toStringTree]];
-    }
-    if ( ![self isNil] ) {
-        [buf appendString:@")"];
-    }
-    return buf;
-}
-
-/** Print out a whole tree not just a node */
-- (NSString *) toStringTree
-{
-    return [self treeDescription];
-}
-
-- (NSString *) description
-{
-    return nil;
-}
-
-/** Override to say how a node (not a tree) should look as text */
-- (NSString *) toString
-{
-    return nil;
-}
-
-@synthesize children;
-@synthesize anException;
-
-@end
-
-#pragma mark -
-
-@implementation ANTLRTreeNavigationNode
-- (id)init
-{
-    self = (ANTLRTreeNavigationNode *)[super init];
-    return self;
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-	return nil;
-}
-@end
-
-@implementation ANTLRTreeNavigationNodeDown
-+ (ANTLRTreeNavigationNodeDown *) getNavigationNodeDown
-{
-    if ( navigationNodeDown == nil )
-        navigationNodeDown = [[ANTLRTreeNavigationNodeDown alloc] init];
-    return navigationNodeDown;
-}
-
-- (id)init
-{
-    self = [super init];
-    return self;
-}
-
-- (NSInteger) tokenType { return ANTLRTokenTypeDOWN; }
-- (NSString *) description { return @"DOWN"; }
-@end
-
-@implementation ANTLRTreeNavigationNodeUp
-+ (ANTLRTreeNavigationNodeUp *) getNavigationNodeUp
-{
-    if ( navigationNodeUp == nil )
-        navigationNodeUp = [[ANTLRTreeNavigationNodeUp alloc] init];
-    return navigationNodeUp;
-}
-
-
-- (id)init
-{
-    self = [super init];
-    return self;
-}
-
-- (NSInteger) tokenType { return ANTLRTokenTypeUP; }
-- (NSString *) description { return @"UP"; }
-@end
-
-@implementation ANTLRTreeNavigationNodeEOF
-+ (ANTLRTreeNavigationNodeEOF *) getNavigationNodeEOF
-{
-    if ( navigationNodeEOF == nil )
-        navigationNodeEOF = [[ANTLRTreeNavigationNodeEOF alloc] init];
-    return navigationNodeEOF;
-}
-
-- (id)init
-{
-    self = [super init];
-    return self;
-}
-
-- (NSInteger) tokenType { return ANTLRTokenTypeEOF; }
-- (NSString *) description { return @"EOF"; }
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h
deleted file mode 100644
index f8ed0d2..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.h
+++ /dev/null
@@ -1,182 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRCommonErrorNode.h"
-#import "ANTLRUniqueIDMap.h"
-
-@interface ANTLRBaseTreeAdaptor : NSObject <ANTLRTreeAdaptor, NSCopying> {
-    ANTLRUniqueIDMap *treeToUniqueIDMap;
-	NSInteger uniqueNodeID;
-}
-
-- (id) init;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id) emptyNode;
-
-- (id) createNil;
-
-/** create tree node that holds the start and stop tokens associated
- *  with an error.
- *
- *  If you specify your own kind of tree nodes, you will likely have to
- *  override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
- *  if no token payload but you might have to set token type for diff
- *  node type.
- *
- *  You don't have to subclass CommonErrorNode; you will likely need to
- *  subclass your own tree node class to avoid class cast exception.
- */
-- (id) errorNode:(id<ANTLRTokenStream>)anInput
-            From:(id<ANTLRToken>)startToken
-              To:(id<ANTLRToken>)stopToken
-       Exception:(NSException *) e;
-
-- (BOOL) isNil:(id<ANTLRBaseTree>) aTree;
-
-- (id<ANTLRBaseTree>)dupTree:(id<ANTLRBaseTree>)aTree;
-
-/** This is generic in the sense that it will work with any kind of
- *  tree (not just Tree interface).  It invokes the adaptor routines
- *  not the tree node routines to do the construction.  
- */
-- (id<ANTLRBaseTree>)dupTree:(id<ANTLRBaseTree>)aTree Parent:(id<ANTLRBaseTree>)parent;
-- (id<ANTLRBaseTree>)dupNode:(id<ANTLRBaseTree>)aNode;
-/** Add a child to the tree t.  If child is a flat tree (a list), make all
- *  in list children of t.  Warning: if t has no children, but child does
- *  and child isNil then you can decide it is ok to move children to t via
- *  t.children = child.children; i.e., without copying the array.  Just
- *  make sure that this is consistent with have the user will build
- *  ASTs.
- */
-- (void) addChild:(id<ANTLRBaseTree>)aChild toTree:(id<ANTLRBaseTree>)aTree;
-
-/** If oldRoot is a nil root, just copy or move the children to newRoot.
- *  If not a nil root, make oldRoot a child of newRoot.
- *
- *    old=^(nil a b c), new=r yields ^(r a b c)
- *    old=^(a b c), new=r yields ^(r ^(a b c))
- *
- *  If newRoot is a nil-rooted single child tree, use the single
- *  child as the new root node.
- *
- *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
- *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
- *
- *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
- *
- *    old=null, new=r yields r
- *    old=null, new=^(nil r) yields ^(nil r)
- *
- *  Return newRoot.  Throw an exception if newRoot is not a
- *  simple node or nil root with a single child node--it must be a root
- *  node.  If newRoot is ^(nil x) return x as newRoot.
- *
- *  Be advised that it's ok for newRoot to point at oldRoot's
- *  children; i.e., you don't have to copy the list.  We are
- *  constructing these nodes so we should have this control for
- *  efficiency.
- */
-- (id<ANTLRBaseTree>)becomeRoot:(id<ANTLRBaseTree>)aNewRoot old:(id<ANTLRBaseTree>)oldRoot;
-
-/** Transform ^(nil x) to x and nil to null */
-- (id<ANTLRBaseTree>)rulePostProcessing:(id<ANTLRBaseTree>)aRoot;
-
-- (id<ANTLRBaseTree>)becomeRootfromToken:(id<ANTLRToken>)aNewRoot old:(id<ANTLRBaseTree>)oldRoot;
-
-- (id<ANTLRBaseTree>) create:(id<ANTLRToken>)payload;
-- (id<ANTLRBaseTree>) createTree:(NSInteger)aTType FromToken:(id<ANTLRToken>)aFromToken;
-- (id<ANTLRBaseTree>) createTree:(NSInteger)aTType FromToken:(id<ANTLRToken>)aFromToken Text:(NSString *)theText;
-- (id<ANTLRBaseTree>) createTree:(NSInteger)aTType Text:(NSString *)theText;
-
-- (NSInteger) getType:(id<ANTLRBaseTree>)aTree;
-
-- (void) setType:(id<ANTLRBaseTree>)aTree Type:(NSInteger)type;
-
-- (id<ANTLRToken>)getToken:(ANTLRCommonTree *)t;
-
-- (NSString *)getText:(ANTLRCommonTree *)aTree;
-
-- (void) setText:(id<ANTLRBaseTree>)aTree Text:(NSString *)theText;
-
-- (id<ANTLRBaseTree>) getChild:(id<ANTLRBaseTree>)aTree At:(NSInteger)i;
-
-- (void) setChild:(id<ANTLRBaseTree>)aTree At:(NSInteger)index Child:(id<ANTLRBaseTree>)aChild;
-
-- (id<ANTLRBaseTree>) deleteChild:(id<ANTLRBaseTree>)aTree Index:(NSInteger)index;
-
-- (NSInteger) getChildCount:(id<ANTLRBaseTree>)aTree;
-
-- (id<ANTLRBaseTree>) getParent:(id<ANTLRBaseTree>) t;
-
-- (void) setParent:(id<ANTLRBaseTree>)t With:(id<ANTLRBaseTree>) parent;
-
-/** What index is this node in the child list? Range: 0..n-1
- *  If your node type doesn't handle this, it's ok but the tree rewrites
- *  in tree parsers need this functionality.
- */
-- (NSInteger) getChildIndex:(id)t;
-- (void) setChildIndex:(id)t With:(NSInteger)index;
-
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id)t;
-
-- (NSInteger) getUniqueID:(id<ANTLRBaseTree>)node;
-
-#ifdef DONTUSENOMO
-- (NSInteger) getUniqueID;
-
-- (void) setUniqueNodeID:(NSInteger)aUniqueNodeID;
-
-- (ANTLRUniqueIDMap *)getTreeToUniqueIDMap;
-
-- (void) setTreeToUniqueIDMap:(ANTLRUniqueIDMap *)aMapNode;
-#endif
-
-/** Tell me how to create a token for use with imaginary token nodes.
- *  For example, there is probably no input symbol associated with imaginary
- *  token DECL, but you need to create it as a payload or whatever for
- *  the DECL node as in ^(DECL type ID).
- *
- *  This is a variant of createToken where the new token is derived from
- *  an actual real input token.  Typically this is for converting '{'
- *  tokens to BLOCK etc...  You'll see
- *
- *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
- *
- *  If you care what the token payload objects' type is, you should
- *  override this method and any other createToken variant.
- */
-- (id<ANTLRToken>)createToken:(NSInteger)aTType Text:(NSString *)theText;
-
-- (id<ANTLRToken>)createToken:(id<ANTLRToken>)aFromToken;
-
-@property (retain) ANTLRUniqueIDMap *treeToUniqueIDMap;
-@property (assign) NSInteger uniqueNodeID;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m
deleted file mode 100644
index 58b181d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBaseTreeAdaptor.m
+++ /dev/null
@@ -1,429 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRBaseTreeAdaptor.h"
-#import "ANTLRRuntimeException.h"
-#import "ANTLRUniqueIDMap.h"
-#import "ANTLRMapElement.h"
-#import "ANTLRCommonTree.h"
-
-@implementation ANTLRBaseTreeAdaptor
-
-@synthesize treeToUniqueIDMap;
-@synthesize uniqueNodeID;
-
-+ (id<ANTLRTree>) newEmptyTree
-{
-    return [[ANTLRCommonTree alloc] init];
-}
-
-- (id) init
-{
-    self = [super init];
-    if ( self != nil ) {
-    }
-    return self;
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRBaseTreeAdaptor *copy;
-    
-    copy = [[[self class] alloc] init];
-    if (treeToUniqueIDMap)
-        copy.treeToUniqueIDMap = [treeToUniqueIDMap copyWithZone:aZone];
-    copy.uniqueNodeID = uniqueNodeID;
-    return copy;
-}
-    
-
-- (id) createNil
-{
-    return [ANTLRCommonTree newTreeWithToken:nil];
-}
-
-- (id) emptyNode
-{
-    return [ANTLRCommonTree newTreeWithToken:nil];
-}
-
-/** create tree node that holds the start and stop tokens associated
- *  with an error.
- *
- *  If you specify your own kind of tree nodes, you will likely have to
- *  override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
- *  if no token payload but you might have to set token type for diff
- *  node type.
- *
- *  You don't have to subclass CommonErrorNode; you will likely need to
- *  subclass your own tree node class to avoid class cast exception.
- */
-- (id) errorNode:(id<ANTLRTokenStream>)anInput
-            From:(id<ANTLRToken>)startToken
-              To:(id<ANTLRToken>)stopToken
-       Exception:(ANTLRRecognitionException *) e;
-{
-    //System.out.println("returning error node '"+t+"' @index="+anInput.index());
-    return [ANTLRCommonErrorNode newANTLRCommonErrorNode:anInput
-                                                    From:startToken
-                                                      To:stopToken
-                                               Exception:e];
-}
-
-- (BOOL) isNil:(id) tree
-{
-    return [(id)tree isNil];
-}
-
-- (id)dupTree:(id)tree
-{
-    return [self dupTree:(id)tree Parent:nil];
-}
-
-/** This is generic in the sense that it will work with any kind of
- *  tree (not just Tree interface).  It invokes the adaptor routines
- *  not the tree node routines to do the construction.  
- */
-- (id)dupTree:(id)t Parent:(id)parent
-{
-    if ( t==nil ) {
-        return nil;
-    }
-    id newTree = [self dupNode:t];
-    // ensure new subtree root has parent/child index set
-    [self setChildIndex:newTree With:[self getChildIndex:t]]; // same index in new tree
-    [self setParent:newTree With:parent];
-    NSInteger n = [self getChildCount:t];
-    for (NSInteger i = 0; i < n; i++) {
-        id child = [self getChild:t At:i];
-        id newSubTree = [self dupTree:child Parent:t];
-        [self addChild:newSubTree toTree:newTree];
-    }
-    return newTree;
-}
-
-- (id)dupNode:(id)aNode
-{
-    return aNode; // override for better results :>)
-}
-/** Add a child to the tree t.  If child is a flat tree (a list), make all
- *  in list children of t.  Warning: if t has no children, but child does
- *  and child isNil then you can decide it is ok to move children to t via
- *  t.children = child.children; i.e., without copying the array.  Just
- *  make sure that this is consistent with have the user will build
- *  ASTs.
- */
-- (void) addChild:(id)child toTree:(id)t
-{
-    if ( t != nil && child != nil ) {
-        [(id)t addChild:child];
-    }
-}
-
-/** If oldRoot is a nil root, just copy or move the children to newRoot.
- *  If not a nil root, make oldRoot a child of newRoot.
- *
- *    old=^(nil a b c), new=r yields ^(r a b c)
- *    old=^(a b c), new=r yields ^(r ^(a b c))
- *
- *  If newRoot is a nil-rooted single child tree, use the single
- *  child as the new root node.
- *
- *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
- *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
- *
- *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
- *
- *    old=null, new=r yields r
- *    old=null, new=^(nil r) yields ^(nil r)
- *
- *  Return newRoot.  Throw an exception if newRoot is not a
- *  simple node or nil root with a single child node--it must be a root
- *  node.  If newRoot is ^(nil x) return x as newRoot.
- *
- *  Be advised that it's ok for newRoot to point at oldRoot's
- *  children; i.e., you don't have to copy the list.  We are
- *  constructing these nodes so we should have this control for
- *  efficiency.
- */
-- (id)becomeRoot:(id)newRoot old:(id)oldRoot
-{
-    if ( oldRoot == nil ) {
-        return newRoot;
-    }
-    //System.out.println("becomeroot new "+newRoot.toString()+" old "+oldRoot);
-    id newRootTree = (id)newRoot;
-    id oldRootTree = (id)oldRoot;
-    // handle ^(nil real-node)
-    if ( [newRootTree isNil] ) {
-        NSInteger nc = [newRootTree getChildCount];
-        if ( nc == 1 ) newRootTree = [(id)newRootTree getChild:0];
-        else if ( nc > 1 ) {
-            // TODO: make tree run time exceptions hierarchy
-            @throw [ANTLRRuntimeException newException:NSStringFromClass([self class]) reason:@"more than one node as root (TODO: make exception hierarchy)"];
-        }
-    }
-    // add oldRoot to newRoot; addChild takes care of case where oldRoot
-    // is a flat list (i.e., nil-rooted tree).  All children of oldRoot
-    // are added to newRoot.
-    [newRootTree addChild:oldRootTree];
-    return newRootTree;
-}
-
-/** Transform ^(nil x) to x and nil to null */
-- (id)rulePostProcessing:(id)root
-{
-    //System.out.println("rulePostProcessing: "+((Tree)root).toStringTree());
-    id r = (id)root;
-    if ( r != nil && [r isNil] ) {
-        if ( [r getChildCount] == 0 ) {
-            r = nil;
-        }
-        else if ( [r getChildCount] == 1 ) {
-            r = (id)[r getChild:0];
-            // whoever invokes rule will set parent and child index
-            [r setParent:nil];
-            [r setChildIndex:-1];
-        }
-    }
-    return r;
-}
-
-- (id)becomeRootfromToken:(id<ANTLRToken>)newRoot old:(id)oldRoot
-{
-    return [self becomeRoot:(id)[self create:newRoot] old:oldRoot];
-}
-
-- (id) create:(id<ANTLRToken>)aToken
-{
-    return [ANTLRCommonTree newTreeWithToken:aToken];
-}
-
-- (id)createTree:(NSInteger)tokenType FromToken:(id<ANTLRToken>)fromToken
-{
-    fromToken = [self createToken:fromToken];
-    //((ClassicToken)fromToken).setType(tokenType);
-    [fromToken setType:tokenType];
-    id t = [self create:fromToken];
-    return t;
-}
-
-- (id)createTree:(NSInteger)tokenType FromToken:(id<ANTLRToken>)fromToken Text:(NSString *)text
-{
-    if (fromToken == nil)
-        return [self createTree:tokenType Text:text];
-    fromToken = [self createToken:fromToken];
-    [fromToken setType:tokenType];
-    [fromToken setText:text];
-    id t = [self create:fromToken];
-    return t;
-}
-
-- (id)createTree:(NSInteger)tokenType Text:(NSString *)text
-{
-    id<ANTLRToken> fromToken = [self createToken:tokenType Text:text];
-    id t = (id)[self create:fromToken];
-    return t;
-}
-
-- (NSInteger) getType:(ANTLRCommonTree *) t
-{
-    return [t type];
-}
-
-- (void) setType:(id)t Type:(NSInteger)type
-{
-    @throw [ANTLRNoSuchElementException newException:@"don't know enough about Tree node"];
-}
-
-/** What is the Token associated with this node?  If
- *  you are not using ANTLRCommonTree, then you must
- *  override this in your own adaptor.
- */
-- (id<ANTLRToken>) getToken:(ANTLRCommonTree *) t
-{
-    if ( [t isKindOfClass:[ANTLRCommonTree class]] ) {
-        return [t getToken];
-    }
-    return nil; // no idea what to do
-}
-
-- (NSString *)getText:(ANTLRCommonTree *)t
-{
-    return [t text];
-}
-
-- (void) setText:(id)t Text:(NSString *)text
-{
-    @throw [ANTLRNoSuchElementException newException:@"don't know enough about Tree node"];
-}
-
-- (id) getChild:(id)t At:(NSInteger)index
-{
-    return [(id)t getChild:index ];
-}
-
-- (void) setChild:(id)t At:(NSInteger)index Child:(id)child
-{
-    [(id)t setChild:index With:(id)child];
-}
-
-- (id) deleteChild:(id)t Index:(NSInteger)index
-{
-    return [(id)t deleteChild:index];
-}
-
-- (NSInteger) getChildCount:(id)t
-{
-    return [(id)t getChildCount];
-}
-
-- (id<ANTLRBaseTree>) getParent:(id<ANTLRBaseTree>) t
-{
-    if ( t == nil )
-        return nil;
-    return (id<ANTLRBaseTree>)[t getParent];
-}
-
-- (void) setParent:(id<ANTLRBaseTree>)t With:(id<ANTLRBaseTree>) parent
-{
-    if ( t != nil )
-        [(id<ANTLRBaseTree>) t setParent:(id<ANTLRBaseTree>)parent];
-}
-
-/** What index is this node in the child list? Range: 0..n-1
- *  If your node type doesn't handle this, it's ok but the tree rewrites
- *  in tree parsers need this functionality.
- */
-- (NSInteger) getChildIndex:(id)t
-{
-    return ((ANTLRCommonTree *)t).childIndex;
-}
-
-- (void) setChildIndex:(id)t With:(NSInteger)index
-{
-    ((ANTLRCommonTree *)t).childIndex = index;
-}
-
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id)t
-{
-    return;
-}
-
-- (NSInteger) getUniqueID:(id)node
-{
-    if ( treeToUniqueIDMap == nil ) {
-        treeToUniqueIDMap = [ANTLRUniqueIDMap newANTLRUniqueIDMap];
-    }
-    NSNumber *prevID = [treeToUniqueIDMap getNode:node];
-    if ( prevID != nil ) {
-        return [prevID integerValue];
-    }
-    NSInteger anID = uniqueNodeID;
-    // ANTLRMapElement *aMapNode = [ANTLRMapElement newANTLRMapElementWithObj1:[NSNumber numberWithInteger:anID] Obj2:node];
-    [treeToUniqueIDMap putID:[NSNumber numberWithInteger:anID] Node:node];
-    uniqueNodeID++;
-    return anID;
-    // GCC makes these nonunique:
-    // return System.identityHashCode(node);
-}
-
-/** Tell me how to create a token for use with imaginary token nodes.
- *  For example, there is probably no input symbol associated with imaginary
- *  token DECL, but you need to create it as a payload or whatever for
- *  the DECL node as in ^(DECL type ID).
- *
- *  If you care what the token payload objects' type is, you should
- *  override this method and any other createToken variant.
- */
-- (id<ANTLRToken>) createToken:(NSInteger)aTType Text:(NSString *)text
-{
-    return nil;
-}
-
-/** Tell me how to create a token for use with imaginary token nodes.
- *  For example, there is probably no input symbol associated with imaginary
- *  token DECL, but you need to create it as a payload or whatever for
- *  the DECL node as in ^(DECL type ID).
- *
- *  This is a variant of createToken where the new token is derived from
- *  an actual real input token.  Typically this is for converting '{'
- *  tokens to BLOCK etc...  You'll see
- *
- *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
- *
- *  If you care what the token payload objects' type is, you should
- *  override this method and any other createToken variant.
- */
-- (id<ANTLRToken>) createToken:(id<ANTLRToken>) fromToken
-{
-    return nil;
-}
-
-/** Track start/stop token for subtree root created for a rule.
- *  Only works with Tree nodes.  For rules that match nothing,
- *  seems like this will yield start=i and stop=i-1 in a nil node.
- *  Might be useful info so I'll not force to be i..i.
- */
-- (void) setTokenBoundaries:(id)aTree From:(id<ANTLRToken>)startToken To:(id<ANTLRToken>)stopToken
-{
-    return;
-}
-
-- (NSInteger) getTokenStartIndex:(id)aTree
-{
-    return -1;
-}
-
-- (NSInteger) getTokenStopIndex:(id)aTree
-{
-    return -1;
-}
-
-#ifdef DONTUSENOMO
-- (NSInteger)getUniqueID
-{
-    return uniqueNodeID;
-}
-
-- (void) setUniqueNodeID:(NSInteger)aUniqueNodeID
-{
-    uniqueNodeID = aUniqueNodeID;
-}
-
-- (ANTLRUniqueIDMap *)getTreeToUniqueIDMap
-{
-    return treeToUniqueIDMap;
-}
-
-- (void) setTreeToUniqueIDMap:(ANTLRUniqueIDMap *)aMapListNode
-{
-    treeToUniqueIDMap = aMapListNode;
-}
-
-#endif
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBitSet.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRBitSet.h
deleted file mode 100644
index 8fd75b1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBitSet.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import <CoreFoundation/CoreFoundation.h>
-#import "ANTLRToken.h"
-#import "AMutableArray.h"
-
-#define BITS (sizeof(NSUInteger) * 8)
-#define LOG_BITS ((sizeof(NSUInteger)==8)?6:5)
-
-// A simple wrapper around CoreFoundation bit vectors to shield the rest of the implementation
-// from the specifics of the BitVector initialization and query functions.
-// This is fast, so there is no need to reinvent the wheel just yet.
-
-@interface ANTLRBitSet : NSObject < NSMutableCopying > {
-	__strong CFMutableBitVectorRef bitVector;
-}
-
-#pragma mark Class Methods
-
-+ (ANTLRBitSet *) newANTLRBitSet;
-+ (ANTLRBitSet *) newANTLRBitSetWithType:(ANTLRTokenType)type;
-/** Construct a ANTLRBitSet given the size
- * @param nbits The size of the ANTLRBitSet in bits
- */
-+ (ANTLRBitSet *) newANTLRBitSetWithNBits:(NSUInteger)nbits;
-+ (ANTLRBitSet *) newANTLRBitSetWithArray:(AMutableArray *)types;
-+ (ANTLRBitSet *) newANTLRBitSetWithBits:(const unsigned long long *)theBits Count:(NSUInteger)longCount;
-
-+ (ANTLRBitSet *) of:(NSUInteger)el;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c;
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c And4:(NSUInteger)d;
-
-#pragma mark Initializer
-
-- (ANTLRBitSet *) init;
-- (ANTLRBitSet *) initWithType:(ANTLRTokenType)type;
-- (ANTLRBitSet *) initWithNBits:(NSUInteger)nbits;
-- (ANTLRBitSet *) initWithBitVector:(CFMutableBitVectorRef)theBitVector;
-- (ANTLRBitSet *) initWithBits:(const unsigned long long const*)theBits Count:(NSUInteger)theCount;
-- (ANTLRBitSet *) initWithArrayOfBits:(NSArray *)theArray;
-
-#pragma mark Operations
-- (ANTLRBitSet *) or:(ANTLRBitSet *) aBitSet;
-- (void) orInPlace:(ANTLRBitSet *) aBitSet;
-- (void) add:(NSUInteger) bit;
-- (void) remove:(NSUInteger) bit;
-- (void) setAllBits:(BOOL) aState;
-
-- (NSInteger) numBits;
-- (NSUInteger) size;
-- (void) setSize:(NSUInteger) noOfWords;
-
-#pragma mark Informational
-- (unsigned long long) bitMask:(NSUInteger) bitNumber;
-- (BOOL) member:(NSUInteger)bitNumber;
-- (BOOL) isNil;
-- (NSString *) toString;
-- (NSString *) description;
-
-#pragma mark NSCopying support
-
-- (id) mutableCopyWithZone:(NSZone *) theZone;
-
-
-//private
-- (CFMutableBitVectorRef) _bitVector;
-@property (getter=_bitVector) CFMutableBitVectorRef bitVector;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBitSet.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRBitSet.m
deleted file mode 100644
index 3f77e36..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBitSet.m
+++ /dev/null
@@ -1,322 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRBitSet.h"
-
-@implementation ANTLRBitSet
-#pragma mark Class Methods
-
-+ (ANTLRBitSet *) newANTLRBitSet
-{
-    return [[ANTLRBitSet alloc] init];
-}
-
-+ (ANTLRBitSet *) newANTLRBitSetWithType:(ANTLRTokenType)type
-{
-    return [[ANTLRBitSet alloc] initWithType:type];
-}
-
-/** Construct a ANTLRBitSet given the size
- * @param nbits The size of the ANTLRBitSet in bits
- */
-+ (ANTLRBitSet *) newANTLRBitSetWithNBits:(NSUInteger)nbits
-{
-    return [[ANTLRBitSet alloc] initWithNBits:nbits];
-}
-
-+ (ANTLRBitSet *) newANTLRBitSetWithArray:(AMutableArray *)types
-{
-    return [[ANTLRBitSet alloc] initWithArrayOfBits:types];
-}
-
-+ (ANTLRBitSet *) newANTLRBitSetWithBits:(const unsigned long long *)theBits Count:(NSUInteger)longCount
-{
-    return [[ANTLRBitSet alloc] initWithBits:theBits Count:longCount];
-}
-
-
-+ (ANTLRBitSet *) of:(NSUInteger) el
-{
-    ANTLRBitSet *s = [ANTLRBitSet newANTLRBitSetWithNBits:(el + 1)];
-    [s add:el];
-    return s;
-}
-
-+ (ANTLRBitSet *) of:(NSUInteger) a And2:(NSUInteger) b
-{
-    NSInteger c = (((a>b)?a:b)+1);
-    ANTLRBitSet *s = [ANTLRBitSet newANTLRBitSetWithNBits:c];
-    [s add:a];
-    [s add:b];
-    return s;
-}
-
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c
-{
-    NSUInteger d = ((a>b)?a:b);
-    d = ((c>d)?c:d)+1;
-    ANTLRBitSet *s = [ANTLRBitSet newANTLRBitSetWithNBits:d];
-    [s add:a];
-    [s add:b];
-    [s add:c];
-    return s;
-}
-
-+ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c And4:(NSUInteger)d
-{
-    NSUInteger e = ((a>b)?a:b);
-    NSUInteger f = ((c>d)?c:d);
-    e = ((e>f)?e:f)+1;
-    ANTLRBitSet *s = [ANTLRBitSet newANTLRBitSetWithNBits:e];
-    [s add:a];
-    [s add:b];
-    [s add:c];
-    [s add:d];
-    return s;
-}
-
-// initializer
-#pragma mark Initializer
-
-- (ANTLRBitSet *) init
-{
-	if ((self = [super init]) != nil) {
-		bitVector = CFBitVectorCreateMutable(kCFAllocatorDefault,0);
-	}
-	return self;
-}
-
-- (ANTLRBitSet *) initWithType:(ANTLRTokenType)type
-{
-	if ((self = [super init]) != nil) {
-		bitVector = CFBitVectorCreateMutable(kCFAllocatorDefault,0);
-        if ((CFIndex)type >= CFBitVectorGetCount(bitVector))
-            CFBitVectorSetCount(bitVector, type+1);
-        CFBitVectorSetBitAtIndex(bitVector, type, 1);
-	}
-	return self;
-}
-
-- (ANTLRBitSet *) initWithNBits:(NSUInteger)nbits
-{
-	if ((self = [super init]) != nil) {
-        bitVector = CFBitVectorCreateMutable(kCFAllocatorDefault,0);
-        CFBitVectorSetCount( bitVector, nbits );
-	}
-	return self;
-}
-
-- (ANTLRBitSet *) initWithBitVector:(CFMutableBitVectorRef)theBitVector
-{
-	if ((self = [super init]) != nil) {
-		bitVector = theBitVector;
-	}
-	return self;
-}
-
-// Initialize the bit vector with a constant array of ulonglongs like ANTLR generates.
-// Converts to big endian, because the underlying CFBitVector works like that.
-- (ANTLRBitSet *) initWithBits:(const unsigned long long *)theBits Count:(NSUInteger)longCount
-{
-	if ((self = [super init]) != nil) {
-		unsigned int longNo;
-		CFIndex bitIdx;
-        bitVector = CFBitVectorCreateMutable ( kCFAllocatorDefault, 0 );
-		CFBitVectorSetCount( bitVector, sizeof(unsigned long long)*8*longCount );
-
-		for (longNo = 0; longNo < longCount; longNo++) {
-			for (bitIdx = 0; bitIdx < (CFIndex)sizeof(unsigned long long)*8; bitIdx++) {
-				unsigned long long swappedBits = CFSwapInt64HostToBig(theBits[longNo]);
-				if (swappedBits & (1LL << bitIdx)) {
-					CFBitVectorSetBitAtIndex(bitVector, bitIdx+(longNo*(sizeof(unsigned long long)*8)), 1);
-				}
-			}
-		}
-	}
-	return self;
-}
-
-// Initialize bit vector with an array of anything. Just test the boolValue and set the corresponding bit.
-// Note: This is big-endian!
-- (ANTLRBitSet *) initWithArrayOfBits:(NSArray *)theArray
-{
-	if ((self = [super init]) != nil) {
-        bitVector = CFBitVectorCreateMutable ( kCFAllocatorDefault, 0 );
-		id value;
-		int bit = 0;
-		for (value in theArray) {
-			if ([value boolValue] == YES) {
-                [self add:bit];
-				//CFBitVectorSetBitAtIndex(bitVector, bit, 1);
-			}
-			bit++;
-		}
-	}
-	return self;
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRBitSet" );
-#endif
-	CFRelease(bitVector);
-	[super dealloc];
-}
-
-	// operations
-#pragma mark Operations
-// return a copy of (self|aBitSet)
-- (ANTLRBitSet *) or:(ANTLRBitSet *) aBitSet
-{
-	ANTLRBitSet *bitsetCopy = [self mutableCopyWithZone:nil];
-	[bitsetCopy orInPlace:aBitSet];
-	return bitsetCopy;
-}
-
-// perform a bitwise OR operation in place by changing underlying bit vector, growing it if necessary
-- (void) orInPlace:(ANTLRBitSet *) aBitSet
-{
-	CFIndex selfCnt = CFBitVectorGetCount(bitVector);
-	CFMutableBitVectorRef otherBitVector = [aBitSet _bitVector];
-	CFIndex otherCnt = CFBitVectorGetCount(otherBitVector);
-	CFIndex maxBitCnt = selfCnt > otherCnt ? selfCnt : otherCnt;
-	CFBitVectorSetCount(bitVector,maxBitCnt);		// be sure to grow the CFBitVector manually!
-	
-	CFIndex currIdx;
-	for (currIdx = 0; currIdx < maxBitCnt; currIdx++) {
-		if (CFBitVectorGetBitAtIndex(bitVector, currIdx) | CFBitVectorGetBitAtIndex(otherBitVector, currIdx)) {
-			CFBitVectorSetBitAtIndex(bitVector, currIdx, 1);
-		}
-	}
-}
-
-// set a bit, grow the bit vector if necessary
-- (void) add:(NSUInteger) bit
-{
-	if ((CFIndex)bit >= CFBitVectorGetCount(bitVector))
-		CFBitVectorSetCount(bitVector, bit+1);
-	CFBitVectorSetBitAtIndex(bitVector, bit, 1);
-}
-
-// unset a bit
-- (void) remove:(NSUInteger) bit
-{
-	CFBitVectorSetBitAtIndex(bitVector, bit, 0);
-}
-
-- (void) setAllBits:(BOOL) aState
-{
-    for( NSInteger bit=0; bit < CFBitVectorGetCount(bitVector); bit++ ) {
-        CFBitVectorSetBitAtIndex(bitVector, bit, aState);
-    }
-}
-
-// returns the number of bits in the bit vector.
-- (NSInteger) numBits
-{
-    // return CFBitVectorGetCount(bitVector);
-    return CFBitVectorGetCountOfBit(bitVector, CFRangeMake(0, CFBitVectorGetCount(bitVector)), 1);
-}
-
-// returns the number of bits in the bit vector.
-- (NSUInteger) size
-{
-    return CFBitVectorGetCount(bitVector);
-}
-
-- (void) setSize:(NSUInteger) nBits
-{
-    CFBitVectorSetCount( bitVector, nBits );
-}
-
-#pragma mark Informational
-// return a bitmask representation of this bitvector for easy operations
-- (unsigned long long) bitMask:(NSUInteger) bitNumber
-{
-	return 1LL << bitNumber;
-}
-
-// test a bit (no pun intended)
-- (BOOL) member:(NSUInteger) bitNumber
-{
-	return CFBitVectorGetBitAtIndex(bitVector,bitNumber) ? YES : NO;
-}
-
-// are all bits off?
-- (BOOL) isNil
-{
-	return ((CFBitVectorGetCountOfBit(bitVector, CFRangeMake(0,CFBitVectorGetCount(bitVector)), 1) == 0) ? YES : NO);
-}
-
-// return a string representation of the bit vector, indicating by their bitnumber which bits are set
-- (NSString *) toString
-{
-	CFIndex length = CFBitVectorGetCount(bitVector);
-	CFIndex currBit;
-	NSMutableString *descString = [NSMutableString  stringWithString:@"{"];
-	BOOL haveInsertedBit = NO;
-	for (currBit = 0; currBit < length; currBit++) {
-		if ( CFBitVectorGetBitAtIndex(bitVector, currBit) ) {
-			if (haveInsertedBit) {
-				[descString appendString:@","];
-			}
-			[descString appendFormat:@"%d", currBit];
-			haveInsertedBit = YES;
-		}
-	}
-	[descString appendString:@"}"];
-	return descString;
-}
-
-// debugging aid. GDB invokes this automagically
-- (NSString *) description
-{
-	return [self toString];
-}
-
-	// NSCopying
-#pragma mark NSCopying support
-
-- (id) mutableCopyWithZone:(NSZone *) theZone
-{
-	ANTLRBitSet *newBitSet = [[ANTLRBitSet allocWithZone:theZone] initWithBitVector:CFBitVectorCreateMutableCopy(kCFAllocatorDefault,0,bitVector)];
-	return newBitSet;
-}
-
-- (CFMutableBitVectorRef) _bitVector
-{
-	return bitVector;
-}
-
-@synthesize bitVector;
-@end
-
-NSInteger max(NSInteger a, NSInteger b)
-{
-    return (a>b)?a:b;
-}
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTokenStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTokenStream.h
deleted file mode 100644
index 28d1418..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTokenStream.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenStream.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRCommonToken.h"
-#import "AMutableArray.h"
-
-@interface ANTLRBufferedTokenStream : NSObject <ANTLRTokenStream> 
-{
-__strong id<ANTLRTokenSource> tokenSource;
-    
-    /** Record every single token pulled from the source so we can reproduce
-     *  chunks of it later.  The buffer in LookaheadStream overlaps sometimes
-     *  as its moving window moves through the input.  This list captures
-     *  everything so we can access complete input text.
-     */
-__strong AMutableArray *tokens;
-    
-    /** Track the last mark() call result value for use in rewind(). */
-NSInteger lastMarker;
-    
-    /** The index into the tokens list of the current token (next token
-     *  to consume).  tokens[index] should be LT(1).  index=-1 indicates need
-     *  to initialize with first token.  The ctor doesn't get a token.
-     *  First call to LT(1) or whatever gets the first token and sets index=0;
-     */
-NSInteger index;
-    
-NSInteger range; // how deep have we gone?
-    
-}
-@property (retain, getter=getTokenSource,setter=setTokenSource:) id<ANTLRTokenSource> tokenSource;
-@property (retain, getter=getTokens,setter=setTokens:) AMutableArray *tokens;
-@property (assign, getter=getLastMarker,setter=setLastMarker:) NSInteger lastMarker;
-@property (assign) NSInteger index;
-@property (assign, getter=getRange,setter=setRange:) NSInteger range;
-
-+ (ANTLRBufferedTokenStream *) newANTLRBufferedTokenStream;
-+ (ANTLRBufferedTokenStream *) newANTLRBufferedTokenStreamWith:(id<ANTLRTokenSource>)aSource;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)aSource;
-- (void)dealloc;
-- (id) copyWithZone:(NSZone *)aZone;
-- (NSUInteger)charPositionInLine;
-- (NSUInteger)line;
-- (NSInteger) getRange;
-- (void) setRange:(NSInteger)anInt;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) reset;
-- (void) seek:(NSInteger) anIndex;
-- (NSInteger) size;
-- (void) consume;
-- (void) sync:(NSInteger) i;
-- (void) fetch:(NSInteger) n;
-- (id<ANTLRToken>) getToken:(NSInteger) i;
-- (AMutableArray *)getFrom:(NSInteger)startIndex To:(NSInteger) stopIndex;
-- (NSInteger) LA:(NSInteger)i;
-- (id<ANTLRToken>) LB:(NSInteger) k;
-- (id<ANTLRToken>) LT:(NSInteger) k;
-- (void) setup;
-- (id<ANTLRTokenSource>) getTokenSource;
-- (void) setTokenSource:(id<ANTLRTokenSource>) aTokenSource;
-- (AMutableArray *)getTokens;
-- (NSString *) getSourceName;
-- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex;
-- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex With:(ANTLRBitSet *)types;
-- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithList:(AMutableArray *)types;
-- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithType:(NSInteger)ttype;
-- (NSString *) toString;
-- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startIndex ToToken:(id<ANTLRToken>)stopIndex;
-- (void) fill;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTokenStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTokenStream.m
deleted file mode 100644
index 94802b5..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTokenStream.m
+++ /dev/null
@@ -1,392 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRBufferedTokenStream.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRRuntimeException.h"
-
-extern NSInteger debug;
-
-@implementation ANTLRBufferedTokenStream
-
-@synthesize tokenSource;
-@synthesize tokens;
-@synthesize lastMarker;
-@synthesize index;
-@synthesize range;
-
-+ (ANTLRBufferedTokenStream *) newANTLRBufferedTokenStream
-{
-    return [[ANTLRBufferedTokenStream alloc] init];
-}
-
-+ (ANTLRBufferedTokenStream *) newANTLRBufferedTokenStreamWith:(id<ANTLRTokenSource>)aSource
-{
-    return [[ANTLRBufferedTokenStream alloc] initWithTokenSource:aSource];
-}
-
-- (ANTLRBufferedTokenStream *) init
-{
-	if ((self = [super init]) != nil)
-	{
-        tokenSource = nil;
-        tokens = [[AMutableArray arrayWithCapacity:1000] retain];
-        index = -1;
-        range = -1;
-	}
-	return self;
-}
-
--(id) initWithTokenSource:(id<ANTLRTokenSource>)aSource
-{
-	if ((self = [super init]) != nil)
-	{
-        tokenSource = [aSource retain];
-        tokens = [[AMutableArray arrayWithCapacity:1000] retain];
-        index = -1;
-        range = -1;
-	}
-	return self;
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRBufferedTokenStream *copy;
-    
-    copy = [[[self class] allocWithZone:aZone] init];
-    copy.tokenSource = self.tokenSource;
-    if ( self.tokens )
-        copy.tokens = [tokens copyWithZone:aZone];
-    copy.lastMarker = self.lastMarker;
-    copy.index = self.index;
-    copy.range = self.range;
-    return copy;
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRBufferedTokenStream" );
-#endif
-    if ( tokens ) [tokens release];
-    if ( tokenSource ) [tokenSource release];
-	[super dealloc];
-}
-
-- (NSUInteger)line
-{
-    return ((ANTLRCommonToken *)[tokens objectAtIndex:index]).line;
-}
-
-- (NSUInteger)charPositionInLine
-{
-    return ((ANTLRCommonToken *)[tokens objectAtIndex:index]).charPositionInLine;
-}
-
-- (id<ANTLRTokenSource>) getTokenSource
-{
-    return tokenSource;
-}
-
-- (NSInteger) getRange
-{
-    return range;
-}
-
-- (void) setRange:(NSInteger)anInt
-{
-    range = anInt;
-}
-
-- (NSInteger) mark
-{
-    if ( index == -1 ) {
-        [self setup];
-//        [self fill];
-    }
-    lastMarker = self.index;
-    return lastMarker;
-}
-
-- (void) release:(NSInteger) marker
-{
-    // no resources to release
-}
-
-- (void) rewind:(NSInteger) marker
-{
-    [self seek:marker];
-}
-
-- (void) rewind
-{
-    [self seek:lastMarker];
-}
-
-- (void) reset
-{
-    index = 0;
-    lastMarker = 0;
-}
-
-- (void) seek:(NSInteger) anIndex
-{
-    index = anIndex;
-}
-
-- (NSInteger) size
-{
-    return [tokens count];
-}
-
-/** Move the input pointer to the next incoming token.  The stream
- *  must become active with LT(1) available.  consume() simply
- *  moves the input pointer so that LT(1) points at the next
- *  input symbol. Consume at least one token.
- *
- *  Walk past any token not on the channel the parser is listening to.
- */
-- (void) consume
-{
-    if ( index == -1 ) {
-        [self setup];
-//        [self fill];
-    }
-    index++;
-    [self sync:index];
-}
-
-/** Make sure index i in tokens has a token. */
-- (void) sync:(NSInteger) i
-{
-    // how many more elements we need?
-    NSInteger n = (i - [tokens count]) + 1;
-    if (debug > 1) NSLog(@"[self sync:%d] needs %d\n", i, n);
-    if ( n > 0 )
-        [self fetch:n];
-}
-
-/** add n elements to buffer */
-- (void) fetch:(NSInteger)n
-{
-    for (NSInteger i=1; i <= n; i++) {
-        id<ANTLRToken> t = [tokenSource nextToken];
-        [t setTokenIndex:[tokens count]];
-        if (debug > 1) NSLog(@"adding %@ at index %d\n", [t text], [tokens count]);
-        [tokens addObject:t];
-        if ( t.type == ANTLRTokenTypeEOF )
-            break;
-    }
-}
-
-- (id<ANTLRToken>) getToken:(NSInteger) i
-{
-    if ( i < 0 || i >= [tokens count] ) {
-        @throw [ANTLRNoSuchElementException newException:[NSString stringWithFormat:@"token index %d out of range 0..%d", i, [tokens count]-1]];
-    }
-    return [tokens objectAtIndex:i];
-}
-
-/** Get all tokens from start..stop inclusively */
-- (AMutableArray *)getFrom:(NSInteger)startIndex To:(NSInteger)stopIndex
-{
-    if ( startIndex < 0 || stopIndex < 0 )
-        return nil;
-    if ( index == -1 ) {
-        [self setup];
-//        [self fill];
-    }
-    AMutableArray *subset = [AMutableArray arrayWithCapacity:5];
-    if ( stopIndex >= [tokens count] )
-        stopIndex = [tokens count]-1;
-    for (NSInteger i = startIndex; i <= stopIndex; i++) {
-        id<ANTLRToken>t = [tokens objectAtIndex:i];
-        if ( t.type == ANTLRTokenTypeEOF )
-            break;
-        [subset addObject:t];
-    }
-    return subset;
-}
-
-- (NSInteger) LA:(NSInteger)i
-{
-    return [[self LT:i] type];
-}
-
-- (id<ANTLRToken>) LB:(NSInteger)k
-{
-    if ( (index - k) < 0 )
-        return nil;
-    return [tokens objectAtIndex:(index-k)];
-}
-
-- (id<ANTLRToken>) LT:(NSInteger)k
-{
-    if ( index == -1 ) {
-        [self setup];
-//        [self fill];
-    }
-    if ( k == 0 )
-        return nil;
-    if ( k < 0 )
-        return [self LB:-k];
-    
-    NSInteger i = index + k - 1;
-    [self sync:i];
-    if ( i >= [tokens count] ) { // return EOF token
-                                // EOF must be last token
-        return [tokens objectAtIndex:([tokens count]-1)];
-    }
-    if ( i > range )
-        range = i; 		
-    return [tokens objectAtIndex:i];
-}
-
-- (void) setup
-{
-    [self sync:0];
-    index = 0;
-}
-
-/** Reset this token stream by setting its token source. */
-- (void) setTokenSource:(id<ANTLRTokenSource>) aTokenSource
-{
-    tokenSource = aTokenSource;
-    if ( [tokens count] )
-        [tokens removeAllObjects];
-    index = -1;
-}
-
-- (AMutableArray *)getTokens
-{
-    return tokens;
-}
-
-- (AMutableArray *)getTokensFrom:(NSInteger) startIndex To:(NSInteger) stopIndex
-{
-    return [self getTokensFrom:startIndex To:stopIndex With:(ANTLRBitSet *)nil];
-}
-
-/** Given a start and stop index, return a List of all tokens in
- *  the token type BitSet.  Return null if no tokens were found.  This
- *  method looks at both on and off channel tokens.
- */
-- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex With:(ANTLRBitSet *)types
-{
-    if ( index == -1 ) {
-        [self setup];
-//        [self fill];
-    }
-    if ( stopIndex >= [tokens count] )
-        stopIndex = [tokens count]-1;
-    if ( startIndex < 0 )
-        startIndex = 0;
-    if ( startIndex > stopIndex )
-        return nil;
-    
-    // list = tokens[start:stop]:{Token t, t.getType() in types}
-    AMutableArray *filteredTokens = [AMutableArray arrayWithCapacity:5];
-    for (NSInteger i = startIndex; i <= stopIndex; i++) {
-        id<ANTLRToken>t = [tokens objectAtIndex:i];
-        if ( types == nil || [types member:t.type] ) {
-            [filteredTokens addObject:t];
-        }
-    }
-    if ( [filteredTokens count] == 0 ) {
-        filteredTokens = nil;
-    }
-    return filteredTokens;
-}
-
-- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithType:(NSInteger)ttype
-{
-    return [self getTokensFrom:startIndex To:stopIndex With:[ANTLRBitSet of:ttype]];
-}
-
-- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithList:(AMutableArray *)types
-{
-    return [self getTokensFrom:startIndex To:stopIndex With:[ANTLRBitSet newANTLRBitSetWithArray:types]];
-}
-            
-- (NSString *)getSourceName
-{
-    return [tokenSource getSourceName];
-}
-
-/** Grab *all* tokens from stream and return string */
-- (NSString *) toString
-{
-    if ( index == -1 ) {
-        [self setup];
-    }
-    [self fill];
-    return [self toStringFromStart:0 ToEnd:[tokens count]-1];
-}
-
-- (NSString *) toStringFromStart:(NSInteger)startIdx ToEnd:(NSInteger)stopIdx
-{
-    if ( startIdx < 0 || stopIdx < 0 )
-        return nil;
-    if ( index == -1 ) {
-        [self setup];
-    }
-    if ( stopIdx >= [tokens count] )
-        stopIdx = [tokens count]-1;
-    NSMutableString *buf = [NSMutableString stringWithCapacity:5];
-    for (NSInteger i = startIdx; i <= stopIdx; i++) {
-        id<ANTLRToken>t = [tokens objectAtIndex:i];
-        if ( t.type == ANTLRTokenTypeEOF )
-            break;
-        [buf appendString:[t text]];
-    }
-    return buf;
-}
-
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken
-{
-    if ( startToken != nil && stopToken != nil ) {
-        return [self toStringFromStart:[startToken getTokenIndex] ToEnd:[stopToken getTokenIndex]];
-    }
-    return nil;
-}
-
-/** Get all tokens from lexer until EOF */
-- (void) fill
-{
-    if ( index == -1 ) [self setup];
-    if ( [((ANTLRCommonToken *)[tokens objectAtIndex:index]) type] == ANTLRTokenTypeEOF )
-        return;
-    
-    NSInteger i = index+1;
-    [self sync:i];
-    while ( [((ANTLRCommonToken *)[tokens objectAtIndex:i]) type] != ANTLRTokenTypeEOF ) {
-        i++;
-        [self sync:i];
-    }
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.h
deleted file mode 100644
index 07555a5..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.h
+++ /dev/null
@@ -1,157 +0,0 @@
-//
-//  ANTLRBufferedTreeNodeStream.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRCommonTreeNodeStream.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRTreeIterator.h"
-#import "ANTLRIntArray.h"
-#import "AMutableArray.h"
-
-#define DEFAULT_INITIAL_BUFFER_SIZE 100
-#define INITIAL_CALL_STACK_SIZE 10
-
-#ifdef DONTUSENOMO
-@interface ANTLRStreamIterator : ANTLRTreeIterator
-{
-    NSInteger idx;
-    __strong ANTLRBufferedTreeNodeStream *input;
-    __strong AMutableArray *nodes;
-}
-
-+ (id) newANTLRStreamIterator:(ANTLRBufferedTreeNodeStream *) theStream;
-
-- (id) initWithStream:(ANTLRBufferedTreeNodeStream *) theStream;
-
-- (BOOL) hasNext;
-- (id) next;
-- (void) remove;
-@end
-#endif
-
-@interface ANTLRBufferedTreeNodeStream : NSObject <ANTLRTreeNodeStream> 
-{
-	id up;
-	id down;
-	id eof;
-	
-	AMutableArray *nodes;
-	
-	id root; // root
-	
-	id<ANTLRTokenStream> tokens;
-	ANTLRCommonTreeAdaptor *adaptor;
-	
-	BOOL uniqueNavigationNodes;
-	NSInteger index;
-	NSInteger lastMarker;
-	ANTLRIntArray *calls;
-	
-	NSEnumerator *e;
-    id currentSymbol;
-	
-}
-
-@property (retain, getter=getUp, setter=setUp:) id up;
-@property (retain, getter=getDown, setter=setDown:) id down;
-@property (retain, getter=eof, setter=setEof:) id eof;
-@property (retain, getter=getNodes, setter=setNodes:) AMutableArray *nodes;
-@property (retain, getter=getTreeSource, setter=setTreeSource:) id root;
-@property (retain, getter=getTokenStream, setter=setTokenStream:) id<ANTLRTokenStream> tokens;
-@property (retain, getter=getAdaptor, setter=setAdaptor:) ANTLRCommonTreeAdaptor *adaptor;
-@property (assign, getter=getUniqueNavigationNodes, setter=setUniqueNavigationNodes:) BOOL uniqueNavigationNodes;
-@property (assign) NSInteger index;
-@property (assign, getter=getLastMarker, setter=setLastMarker:) NSInteger lastMarker;
-@property (retain, getter=getCalls, setter=setCalls:) ANTLRIntArray *calls;
-@property (retain, getter=getEnum, setter=setEnum:) NSEnumerator *e;
-@property (retain, getter=getCurrentSymbol, setter=setCurrentSymbol:) id currentSymbol;
-
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(ANTLRCommonTree *)tree;
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTreeAdaptor>)adaptor Tree:(ANTLRCommonTree *)tree;
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTreeAdaptor>)adaptor Tree:(ANTLRCommonTree *)tree withBufferSize:(NSInteger)initialBufferSize;
-
-#pragma mark Constructor
-- (id) initWithTree:(ANTLRCommonTree *)tree;
-- (id) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)anAdaptor Tree:(ANTLRCommonTree *)tree;
-- (id) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)anAdaptor Tree:(ANTLRCommonTree *)tree WithBufferSize:(NSInteger)bufferSize;
-
-- (void)dealloc;
-- (id) copyWithZone:(NSZone *)aZone;
-
-// protected methods. DO NOT USE
-#pragma mark Protected Methods
-- (void) fillBuffer;
-- (void) fillBufferWithTree:(ANTLRCommonTree *) tree;
-- (NSInteger) getNodeIndex:(ANTLRCommonTree *) node;
-- (void) addNavigationNode:(NSInteger) type;
-- (id) getNode:(NSUInteger) i;
-- (id) LT:(NSInteger) k;
-- (id) getCurrentSymbol;
-- (id) LB:(NSInteger) i;
-#pragma mark General Methods
-- (NSString *) getSourceName;
-
-- (id<ANTLRTokenStream>) getTokenStream;
-- (void) setTokenStream:(id<ANTLRTokenStream>) tokens;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>) anAdaptor;
-
-- (BOOL)getUniqueNavigationNodes;
-- (void) setUniqueNavigationNodes:(BOOL)aVal;
-
-- (void) consume;
-- (NSInteger) LA:(NSInteger) i;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) seek:(NSInteger) idx;
-
-- (void) push:(NSInteger) i;
-- (NSInteger) pop;
-
-- (void) reset;
-- (NSUInteger) count;
-- (NSEnumerator *) objectEnumerator;
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-
-- (NSString *) toTokenTypeString;
-- (NSString *) toTokenString:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *) toStringFromNode:(id)aStart ToNode:(id)aStop;
-
-// getters and setters
-- (AMutableArray *) getNodes;
-- (id) eof;
-- (void)setEof:(id)anEOF;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.m
deleted file mode 100644
index a8f73d8..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRBufferedTreeNodeStream.m
+++ /dev/null
@@ -1,556 +0,0 @@
-//
-//  ANTLRBufferedTreeNodeStream.m
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRBufferedTreeNodeStream.h"
-#import "ANTLRStreamEnumerator.h"
-#import "ANTLRCommonTreeAdaptor.h"
-
-extern NSInteger debug;
-
-#ifdef DONTUSENOMO
-@implementation ANTLRTreeStreamIterator
-+ newANTLRTreeStreamIteratorWithNodes:(ANTLRBufferedTreeNodeStream *)theStream
-{
-    return[[ANTLRTreeStreamIterator alloc] initWithStream:theStream];
-}
-
-- (id) initWithStream:(ANTLRBufferedTreeNodeStream *)theStream
-{
-    if ((self = [super init]) != nil) {
-        idx = 0;
-        input = theStream;
-        nodes = [theStream getNodes];
-    }
-    return self;
-}
-
-- (BOOL) hasNext
-{
-    return idx < [nodes count];
-}
-
-- (id) next
-{
-    NSInteger current = idx;
-    idx++;
-    if (current < [nodes count]) {
-    }
-    return [nodes getEof];
-}
-
-- (void) remove
-{
-	@throw [ANTLRRuntimeException newException:@"cannot remove nodes from stream"];
-}
-
-@end
-#endif
-
-@implementation ANTLRBufferedTreeNodeStream
-
-@synthesize up;
-@synthesize down;
-@synthesize eof;
-@synthesize nodes;
-@synthesize root;
-@synthesize tokens;
-@synthesize adaptor;
-@synthesize uniqueNavigationNodes;
-@synthesize index;
-@synthesize lastMarker;
-@synthesize calls;
-@synthesize e;
-@synthesize currentSymbol;
-
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(ANTLRCommonTree *) aTree
-{
-    return [((ANTLRBufferedTreeNodeStream *)[ANTLRBufferedTreeNodeStream alloc]) initWithTree:(ANTLRCommonTree *)aTree];
-}
-
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTreeAdaptor>)adaptor Tree:(ANTLRCommonTree *)aTree
-{
-    return [[ANTLRBufferedTreeNodeStream alloc] initWithTreeAdaptor:adaptor Tree:(ANTLRCommonTree *)aTree];
-}
-
-+ (ANTLRBufferedTreeNodeStream *) newANTLRBufferedTreeNodeStream:(id<ANTLRTreeAdaptor>)adaptor Tree:(ANTLRCommonTree *)aTree withBufferSize:(NSInteger)initialBufferSize
-{
-    return [[ANTLRBufferedTreeNodeStream alloc] initWithTreeAdaptor:adaptor Tree:(ANTLRCommonTree *)aTree WithBufferSize:initialBufferSize];
-}
-
--(ANTLRBufferedTreeNodeStream *) init
-{
-	self = [super init];
-	if (self) {
-		index = -1;
-		uniqueNavigationNodes = NO;
-        root = [[ANTLRCommonTree alloc] init];
-        //		tokens = tree;
-        adaptor = [[[ANTLRCommonTreeAdaptor alloc] init] retain];
-        nodes = [[AMutableArray arrayWithCapacity:DEFAULT_INITIAL_BUFFER_SIZE] retain];
-        down = [[adaptor createTree:ANTLRTokenTypeDOWN Text:@"DOWN"] retain];
-        up = [[adaptor createTree:ANTLRTokenTypeUP Text:@"UP"] retain];
-        eof = [[adaptor createTree:ANTLRTokenTypeEOF Text:@"EOF"] retain];
-    }
-	return self;
-}
-
-- (ANTLRBufferedTreeNodeStream *)initWithTree:(ANTLRCommonTree *) aTree
-{
-	self = [super init];
-	if (self) {
-		index = -1;
-		uniqueNavigationNodes = NO;
-        root = aTree;
-        //		tokens = aTree;
-        adaptor = [[[ANTLRCommonTreeAdaptor alloc] init] retain];
-        nodes = [[AMutableArray arrayWithCapacity:DEFAULT_INITIAL_BUFFER_SIZE] retain];
-        down = [[adaptor createTree:ANTLRTokenTypeDOWN Text:@"DOWN"] retain];
-        up = [[adaptor createTree:ANTLRTokenTypeUP Text:@"UP"] retain];
-        eof = [[adaptor createTree:ANTLRTokenTypeEOF Text:@"EOF"] retain];
-    }
-	return self;
-}
-
--(ANTLRBufferedTreeNodeStream *) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)anAdaptor Tree:(ANTLRCommonTree *)aTree
-{
-	self = [super init];
-	if (self) {
-		index = -1;
-		uniqueNavigationNodes = NO;
-        root = aTree;
-        //		tokens = aTree;
-        adaptor = [anAdaptor retain];
-        nodes = [[AMutableArray arrayWithCapacity:DEFAULT_INITIAL_BUFFER_SIZE] retain];
-        down = [[adaptor createTree:ANTLRTokenTypeDOWN Text:@"DOWN"] retain];
-        up = [[adaptor createTree:ANTLRTokenTypeUP Text:@"UP"] retain];
-        eof = [[adaptor createTree:ANTLRTokenTypeEOF Text:@"EOF"] retain];
-    }
-	return self;
-}
-
--(ANTLRBufferedTreeNodeStream *) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)anAdaptor Tree:(ANTLRCommonTree *)aTree WithBufferSize:(NSInteger)bufferSize
-{
-	self = [super init];
-	if (self) {
-        //		down = [adaptor createToken:ANTLRTokenTypeDOWN withText:@"DOWN"];
-        //		up = [adaptor createToken:ANTLRTokenTypeDOWN withText:@"UP"];
-        //		eof = [adaptor createToken:ANTLRTokenTypeDOWN withText:@"EOF"];
-		index = -1;
-		uniqueNavigationNodes = NO;
-        root = aTree;
-        //		tokens = aTree;
-        adaptor = [anAdaptor retain];
-        nodes = [[AMutableArray arrayWithCapacity:bufferSize] retain];
-        down = [[adaptor createTree:ANTLRTokenTypeDOWN Text:@"DOWN"] retain];
-        up = [[adaptor createTree:ANTLRTokenTypeUP Text:@"UP"] retain];
-        eof = [[adaptor createTree:ANTLRTokenTypeEOF Text:@"EOF"] retain];
-	}
-	return self;
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRBufferedTreeNodeStream" );
-#endif
-    if ( adaptor ) [adaptor release];
-    if ( nodes ) [nodes release];
-    if ( root ) [root release];
-    if ( down ) [down release];
-    if ( up ) [up release];
-    if ( eof ) [eof release];
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRBufferedTreeNodeStream *copy;
-    
-    copy = [[[self class] allocWithZone:aZone] init];
-    if ( up )
-        copy.up = [up copyWithZone:aZone];
-    if ( down )
-        copy.down = [down copyWithZone:aZone];
-    if ( eof )
-        copy.eof = [eof copyWithZone:aZone];
-    if ( nodes )
-        copy.nodes = [nodes copyWithZone:aZone];
-    if ( root )
-        copy.root = [root copyWithZone:aZone];
-    if ( tokens )
-        copy.tokens = [tokens copyWithZone:aZone];
-    if ( adaptor )
-        copy.adaptor = [adaptor copyWithZone:aZone];
-    copy.uniqueNavigationNodes = self.uniqueNavigationNodes;
-    copy.index = self.index;
-    copy.lastMarker = self.lastMarker;
-    if ( calls )
-        copy.calls = [calls copyWithZone:aZone];
-    return copy;
-}
-
-// protected methods. DO NOT USE
-#pragma mark Protected Methods
--(void) fillBuffer
-{
-	[self fillBufferWithTree:root];
-	// if (debug > 1) NSLog("revIndex=%@", tokenTypeToStreamIndexesMap);
-	index = 0; // buffer of nodes intialized now
-}
-
--(void) fillBufferWithTree:(ANTLRCommonTree *) aTree
-{
-	BOOL empty = [adaptor isNil:aTree];
-	if (!empty) {
-		[nodes addObject:aTree];
-	}
-	NSInteger n = [adaptor getChildCount:aTree];
-	if (!empty && n > 0) {
-		[self addNavigationNode:ANTLRTokenTypeDOWN];
-	}
-	for (NSInteger c = 0; c < n; c++) {
-		id child = [adaptor getChild:aTree At:c];
-		[self fillBufferWithTree:child];
-	}
-	if (!empty && n > 0) {
-		[self addNavigationNode:ANTLRTokenTypeUP];
-	}
-}
-
--(NSInteger) getNodeIndex:(ANTLRCommonTree *) node
-{
-	if (index == -1) {
-		[self fillBuffer];
-	}
-	for (NSUInteger i = 0; i < [nodes count]; i++) {
-		id t = [nodes objectAtIndex:i];
-		if (t == node) {
-			return i;
-		}
-	}
-	return -1;
-}
-
--(void) addNavigationNode:(NSInteger) type
-{
-	id navNode = nil;
-	if (type == ANTLRTokenTypeDOWN) {
-		if (self.uniqueNavigationNodes) {
-			navNode = [adaptor createToken:ANTLRTokenTypeDOWN Text:@"DOWN"];
-		}
-		else {
-			navNode = down;
-		}
-
-	}
-	else {
-		if (self.uniqueNavigationNodes) {
-			navNode = [adaptor createToken:ANTLRTokenTypeUP Text:@"UP"];
-		}
-		else {
-			navNode = up;
-		}
-	}
-	[nodes addObject:navNode];
-}
-
--(id) getNode:(NSUInteger) i
-{
-	if (index == -1) {
-		[self fillBuffer];
-	}
-	return [nodes objectAtIndex:i];
-}
-
--(id) LT:(NSInteger) k
-{
-	if (index == -1) {
-		[self fillBuffer];
-	}
-	if (k == 0) {
-		return nil;
-	}
-	if (k < 0) {
-		return [self LB:-k];
-	}
-	if ((index + k - 1) >= [nodes count]) {
-		return eof;
-	}
-	return [nodes objectAtIndex:(index + k - 1)];
-}
-
--(id) getCurrentSymbol
-{
-	return [self LT:1];
-}
-
--(id) LB:(NSInteger) k
-{
-	if (k == 0) {
-		return nil;
-	}
-	if ((index - k) < 0) {
-		return nil;
-	}
-	return [nodes objectAtIndex:(index - k)];
-}
-
-- (ANTLRCommonTree *)getTreeSource
-{
-    return root;
-}
-
--(NSString *)getSourceName
-{
-	return [[self getTokenStream] getSourceName];
-}
-
-- (id<ANTLRTokenStream>)getTokenStream
-{
-    return tokens;
-}
-
-- (void) setTokenStream:(id<ANTLRTokenStream>)newtokens
-{
-    tokens = newtokens;
-}
-
-- (id<ANTLRTreeAdaptor>)getTreeAdaptor
-{
-    return adaptor;
-}
-
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-{
-    adaptor = anAdaptor;
-}
-
-- (BOOL)getUniqueNavigationNodes
-{
-    return uniqueNavigationNodes;
-}
-
-- (void) setUniqueNavigationNodes:(BOOL)aVal
-{
-    uniqueNavigationNodes = aVal;
-}
-
--(void) consume
-{
-	if (index == -1) {
-		[self fillBuffer];
-	}
-	index++;
-}
-
--(NSInteger) LA:(NSInteger) i
-{
-	return [adaptor getType:[self LT:i]];
-}
-
--(NSInteger) mark
-{
-	if (index == -1) {
-		[self fillBuffer];
-	}
-	lastMarker = self.index;
-	return lastMarker;
-}
-
--(void) release:(NSInteger) marker
-{
-	// do nothing
-}
-
--(void) rewind:(NSInteger) marker
-{
-	[self seek:marker];
-}
-
--(void) rewind
-{
-	[self seek:lastMarker];
-}
-
--(void) seek:(NSInteger) i
-{
-	if (index == -1) {
-		[self fillBuffer];
-	}
-	index = i;
-}
-
--(void) push:(NSInteger) i
-{
-	if (calls == nil) {
-		calls = [ANTLRIntArray newArrayWithLen:INITIAL_CALL_STACK_SIZE];
-	}
-	[calls push:index];
-	[self seek:i];
-}
-
--(NSInteger) pop
-{
-	NSInteger ret = [calls pop];
-	[self seek:ret];
-	return ret;
-}
-
--(void) reset
-{
-	index = 0;
-	lastMarker = 0;
-	if (calls != nil) {
-		[calls reset];
-	}
-}
-
--(NSUInteger) count
-{
-	if (index == -1) {
-		[self fillBuffer];
-	}
-	return [nodes count];
-}
-
--(NSUInteger) size
-{
-	return [self count];
-}
-
--(NSEnumerator *) objectEnumerator
-{
-	if (e == nil) {
-		e = [[ANTLRStreamEnumerator alloc] initWithNodes:nodes andEOF:eof];
-	}
-	return e;
-}
-
--(void) replaceChildren:(ANTLRCommonTree *) parent From:(NSInteger)startIdx To:(NSInteger)stopIdx With:(ANTLRCommonTree *)aTree
-{
-	if (parent != nil) {
-		[adaptor replaceChildren:parent From:startIdx To:stopIdx With:aTree];
-	}
-}
-
--(NSString *) toTokenTypeString
-{
-	if (index == -1)
-	{
-		[self fillBuffer];
-	}
-	NSMutableString *buf = [NSMutableString stringWithCapacity:10];
-	for (NSUInteger i= 0; i < [nodes count]; i++) {
-		ANTLRCommonTree * aTree = (ANTLRCommonTree *)[self getNode:i];
-		[buf appendFormat:@" %d", [adaptor getType:aTree]];
-	}
-	return buf;
-}
-
--(NSString *) toTokenString:(NSInteger)aStart ToEnd:(NSInteger)aStop
-{
-	if (index == -1) {
-		[self fillBuffer];
-	}
-	NSMutableString *buf = [NSMutableString stringWithCapacity:10];
-	for (NSUInteger i = aStart; i < [nodes count] && i <= aStop; i++) {
-		ANTLRCommonTree * t = (ANTLRCommonTree *)[self getNode:i];
-		[buf appendFormat:@" %d", [adaptor getType:t]];
-	}
-	return buf;
-}
-
--(NSString *) toStringFromNode:(id)aStart ToNode:(id)aStop
-{
-	if (aStart == nil || aStop == nil) {
-		return nil;
-	}
-	if (index == -1) {
-		[self fillBuffer];
-	}
-	
-	// if we have a token stream, use that to dump text in order
-	if ([self getTokenStream] != nil) {
-		NSInteger beginTokenIndex = [adaptor getTokenStartIndex:aStart];
-		NSInteger endTokenIndex = [adaptor getTokenStopIndex:aStop];
-		
-		if ([adaptor getType:aStop] == ANTLRTokenTypeUP) {
-			endTokenIndex = [adaptor getTokenStopIndex:aStart];
-		}
-		else if ([adaptor getType:aStop] == ANTLRTokenTypeEOF) {
-			endTokenIndex = [self count] - 2; //don't use EOF
-		}
-        [tokens toStringFromStart:beginTokenIndex ToEnd:endTokenIndex];
-	}
-	// walk nodes looking for aStart
-	ANTLRCommonTree * aTree = nil;
-	NSUInteger i = 0;
-	for (; i < [nodes count]; i++) {
-		aTree = [nodes objectAtIndex:i];
-		if (aTree == aStart) {
-			break;
-		}
-	}
-	NSMutableString *buf = [NSMutableString stringWithCapacity:10];
-	aTree = [nodes objectAtIndex:i]; // why?
-	while (aTree != aStop) {
-		NSString *text = [adaptor getText:aTree];
-		if (text == nil) {
-			text = [NSString stringWithFormat:@" %d", [adaptor getType:aTree]];
-		}
-		[buf appendString:text];
-		i++;
-		aTree = [nodes objectAtIndex:i];
-	}
-	NSString *text = [adaptor getText:aStop];
-	if (text == nil) {
-		text = [NSString stringWithFormat:@" %d", [adaptor getType:aStop]];
-	}
-	[buf appendString:text];
-	return buf;
-}
-
-// getters and setters
-- (AMutableArray *) getNodes
-{
-    return nodes;
-}
-
-- (id) eof
-{
-    return eof;
-}
-
-- (void) setEof:(id)theEOF
-{
-    eof = theEOF;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCharStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRCharStream.h
deleted file mode 100644
index f7be94b..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCharStream.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRIntStream.h"
-
-#define	ANTLRCharStreamEOF -1
-
-
-@protocol ANTLRCharStream < ANTLRIntStream >
-
-- (NSString *) substringWithRange:(NSRange) theRange;
-
-/** Get the ith character of lookahead.  This is the same usually as
- *  LA(i).  This will be used for labels in the generated
- *  lexer code.  I'd prefer to return a char here type-wise, but it's
- *  probably better to be 32-bit clean and be consistent with LA.
- */
-- (NSInteger)LT:(NSInteger) i;
-
-// ANTLR tracks the line information automatically
-
-// Because this stream can rewind, we need to be able to reset the line
-
-// The index of the character relative to the beginning of the line 0..n-1
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCharStreamState.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRCharStreamState.h
deleted file mode 100644
index 0cf872f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCharStreamState.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//
-//  ANTLRCharStreamState.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c)  2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRCharStreamState : NSObject
-{
-NSInteger index;
-NSUInteger line;
-NSUInteger charPositionInLine;
-}
-
-@property (assign) NSInteger index;
-@property (assign) NSUInteger line;
-@property (assign) NSUInteger charPositionInLine;
-
-+ newANTLRCharStreamState;
-
-- (id) init;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCharStreamState.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRCharStreamState.m
deleted file mode 100755
index b2a7e3a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCharStreamState.m
+++ /dev/null
@@ -1,52 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRCharStreamState.h"
-
-
-@implementation ANTLRCharStreamState
-
-@synthesize index;
-@synthesize line;
-@synthesize charPositionInLine;
-
-+ newANTLRCharStreamState
-{
-    return [[ANTLRCharStreamState alloc] init];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil) {
-        index = 0;
-        line = 1;
-        charPositionInLine = 0;
-    }
-    return self;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonErrorNode.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonErrorNode.h
deleted file mode 100644
index d01af00..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonErrorNode.h
+++ /dev/null
@@ -1,67 +0,0 @@
-//
-//  ANTLRCommonErrorNode.h
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-#import "ANTLRTokenStream.h"
-//#import "ANTLRIntStream.h"
-//#import "ANTLRToken.h"
-#import "ANTLRUnWantedTokenException.h"
-
-@interface ANTLRCommonErrorNode : ANTLRCommonTree
-{
-id<ANTLRIntStream> input;
-id<ANTLRToken> startToken;
-id<ANTLRToken> stopToken;
-ANTLRRecognitionException *trappedException;
-}
-
-+ (id) newANTLRCommonErrorNode:(id<ANTLRTokenStream>)anInput
-                  From:(id<ANTLRToken>)startToken
-                    To:(id<ANTLRToken>)stopToken
-                     Exception:(ANTLRRecognitionException *) e;
-
-- (id) initWithInput:(id<ANTLRTokenStream>)anInput
-                From:(id<ANTLRToken>)startToken
-                  To:(id<ANTLRToken>)stopToken
-           Exception:(ANTLRRecognitionException *) e;
-
-- (void)dealloc;
-- (BOOL) isNil;
-
-- (NSInteger)type;
-- (NSString *)text;
-- (NSString *)toString;
-
-@property (retain) id<ANTLRIntStream> input;
-@property (retain) id<ANTLRToken> startToken;
-@property (retain) id<ANTLRToken> stopToken;
-@property (retain) ANTLRRecognitionException *trappedException;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonErrorNode.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonErrorNode.m
deleted file mode 100644
index 17fec33..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonErrorNode.m
+++ /dev/null
@@ -1,159 +0,0 @@
-//
-//  ANTLRCommonErrorNode.m
-//  ANTLR
-//
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRCommonErrorNode.h"
-#import "ANTLRMissingTokenException.h"
-#import "ANTLRNoViableAltException.h"
-#import "ANTLRTreeNodeStream.h"
-#import "ANTLRUnwantedTokenException.h"
-
-@implementation ANTLRCommonErrorNode
-
-+ (id) newANTLRCommonErrorNode:(id<ANTLRTokenStream>)anInput
-                          From:(id<ANTLRToken>)aStartToken
-                            To:(id<ANTLRToken>)aStopToken
-                     Exception:(ANTLRRecognitionException *) e
-{
-    return [[ANTLRCommonErrorNode alloc] initWithInput:anInput From:aStartToken To:aStopToken Exception:e];
-}
-
-- (id) init
-{
-    self = [super init];
-    if ( self != nil ) {
-    }
-    return self;
-}
-
-- (id) initWithInput:(id<ANTLRTokenStream>)anInput
-                From:(id<ANTLRToken>)aStartToken
-                  To:(id<ANTLRToken>)aStopToken
-           Exception:(ANTLRRecognitionException *) e
-{
-    self = [super init];
-    if ( self != nil ) {
-        //System.out.println("aStartToken: "+aStartToken+", aStopToken: "+aStopToken);
-        if ( aStopToken == nil ||
-            ([aStopToken getTokenIndex] < [aStartToken getTokenIndex] &&
-             aStopToken.type != ANTLRTokenTypeEOF) )
-        {
-            // sometimes resync does not consume a token (when LT(1) is
-            // in follow set.  So, aStopToken will be 1 to left to aStartToken. adjust.
-            // Also handle case where aStartToken is the first token and no token
-            // is consumed during recovery; LT(-1) will return null.
-            aStopToken = aStartToken;
-        }
-        input = anInput;
-        if ( input ) [input retain];
-        startToken = aStartToken;
-        if ( startToken ) [startToken retain];
-        stopToken = aStopToken;
-        if ( stopToken ) [stopToken retain];
-        trappedException = e;
-        if ( trappedException ) [trappedException retain];
-    }
-    return self;
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRCommonErrorNode" );
-#endif
-    if ( input ) [input release];
-    if ( startToken ) [startToken release];
-    if ( stopToken ) [stopToken release];
-    if ( trappedException ) [trappedException release];
-	[super dealloc];
-}
-
-- (BOOL) isNil
-{
-    return NO;
-}
-
-- (NSInteger)type
-{
-    return ANTLRTokenTypeInvalid;
-}
-
-- (NSString *)text
-{
-    NSString *badText = nil;
-    if ( [startToken isKindOfClass:[self class]] ) {
-        int i = [(id<ANTLRToken>)startToken getTokenIndex];
-        int j = [(id<ANTLRToken>)stopToken getTokenIndex];
-        if ( stopToken.type == ANTLRTokenTypeEOF ) {
-            j = [(id<ANTLRTokenStream>)input size];
-        }
-        badText = [(id<ANTLRTokenStream>)input toStringFromStart:i ToEnd:j];
-    }
-    else if ( [startToken isKindOfClass:[self class]] ) {
-        badText = [(id<ANTLRTreeNodeStream>)input toStringFromNode:startToken ToNode:stopToken];
-    }
-    else {
-        // people should subclass if they alter the tree type so this
-        // next one is for sure correct.
-        badText = @"<unknown>";
-    }
-    return badText;
-}
-
-- (NSString *)toString
-{
-    NSString *aString;
-    if ( [trappedException isKindOfClass:[ANTLRMissingTokenException class]] ) {
-        aString = [NSString stringWithFormat:@"<missing type: %@ >",
-        [(ANTLRMissingTokenException *)trappedException getMissingType]];
-        return aString;
-    }
-    else if ( [trappedException isKindOfClass:[ANTLRUnwantedTokenException class]] ) {
-        aString = [NSString stringWithFormat:@"<extraneous: %@, resync=%@>",
-        [trappedException getUnexpectedToken],
-        [self text]];
-        return aString;
-    }
-    else if ( [trappedException isKindOfClass:[ANTLRMismatchedTokenException class]] ) {
-        aString = [NSString stringWithFormat:@"<mismatched token: %@, resync=%@>", trappedException.token, [self text]];
-        return aString;
-    }
-    else if ( [trappedException isKindOfClass:[ANTLRNoViableAltException class]] ) {
-        aString = [NSString stringWithFormat:@"<unexpected:  %@, resync=%@>", trappedException.token, [self text]];
-        return aString;
-    }
-    aString = [NSString stringWithFormat:@"<error: %@>",[self text]];
-    return aString;
-}
-
-@synthesize input;
-@synthesize startToken;
-@synthesize stopToken;
-@synthesize trappedException;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonToken.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonToken.h
deleted file mode 100644
index 948e129..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonToken.h
+++ /dev/null
@@ -1,129 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRCharStream.h"
-
-@interface ANTLRCommonToken : NSObject < ANTLRToken > {
-	__strong NSString *text;
-	NSInteger type;
-	// information about the Token's position in the input stream
-	NSUInteger line;
-	NSUInteger charPositionInLine;
-	NSUInteger channel;
-	// this token's position in the TokenStream
-	NSInteger index;
-	
-	// indices into the CharStream to avoid copying the text
-	// can manually override the text by using -setText:
-	NSInteger startIndex;
-	NSInteger stopIndex;
-	// the actual input stream this token was found in
-	__strong id<ANTLRCharStream> input;
-}
-
-+ (void) initialize;
-+ (NSInteger) DEFAULT_CHANNEL;
-+ (id<ANTLRToken>)INVALID_TOKEN;
-+ (NSInteger) INVALID_TOKEN_TYPE;
-+ (id<ANTLRToken>) newToken;
-+ (id<ANTLRToken>) newToken:(id<ANTLRCharStream>)anInput
-                       Type:(NSInteger)aTType
-                    Channel:(NSInteger)aChannel
-                      Start:(NSInteger)aStart
-                       Stop:(NSInteger)aStop;
-+ (id<ANTLRToken>) newToken:(ANTLRTokenType)aType;
-+ (id<ANTLRToken>) newToken:(NSInteger)tokenType Text:(NSString *)tokenText;
-+ (id<ANTLRToken>) newTokenWithToken:(ANTLRCommonToken *)fromToken;
-+ (id<ANTLRToken>) eofToken;
-+ (id<ANTLRToken>) skipToken;
-+ (id<ANTLRToken>) invalidToken;
-+ (ANTLRTokenChannel) defaultChannel;
-
-// designated initializer. This is used as the default way to initialize a Token in the generated code.
-- (id) init;
-- (id) initWithInput:(id<ANTLRCharStream>)anInput
-                                Type:(NSInteger)aTType
-                             Channel:(NSInteger)aChannel
-                               Start:(NSInteger)theStart
-                                Stop:(NSInteger)theStop;
-- (id) initWithToken:(id<ANTLRToken>)aToken;
-- (id) initWithType:(ANTLRTokenType)aType;
-- (id) initWithType:(ANTLRTokenType)aTType Text:(NSString *)tokenText;
-
-//---------------------------------------------------------- 
-//  text 
-//---------------------------------------------------------- 
-- (NSString *)text;
-- (void) setText:(NSString *)aText;
-
-//---------------------------------------------------------- 
-//  type 
-//---------------------------------------------------------- 
-- (NSInteger)type;
-- (void) setType:(NSInteger)aType;
-
-//---------------------------------------------------------- 
-//  channel 
-//---------------------------------------------------------- 
-- (NSUInteger)channel;
-- (void) setChannel:(NSUInteger)aChannel;
-
-//---------------------------------------------------------- 
-//  input 
-//---------------------------------------------------------- 
-- (id<ANTLRCharStream>)input;
-- (void) setInput:(id<ANTLRCharStream>)anInput;
-
-- (NSInteger)getStart;
-- (void) setStart: (NSInteger)aStart;
-
-- (NSInteger)getStop;
-- (void) setStop: (NSInteger) aStop;
-
-// the index of this Token into the TokenStream
-- (NSInteger)getTokenIndex;
-- (void) setTokenIndex:(NSInteger)aTokenIndex;
-
-// conform to NSCopying
-- (id) copyWithZone:(NSZone *)theZone;
-
-- (NSString *) description;
-- (NSString *) toString;
-
-@property (retain, getter = text, setter = setText:) NSString *text;
-@property (assign) NSInteger type;
-@property (assign, getter = line, setter = setLine:) NSUInteger line;
-@property (assign, getter=charPositionInLine, setter = setCharPositionInLine:) NSUInteger charPositionInLine;
-@property (assign) NSUInteger channel;
-@property (assign) NSInteger index;
-@property (assign, getter=getStart, setter=setStart:) NSInteger startIndex;
-@property (assign, getter=getStop, setter=setStop:) NSInteger stopIndex;
-@property (retain) id<ANTLRCharStream> input;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonToken.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonToken.m
deleted file mode 100644
index ee0e018..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonToken.m
+++ /dev/null
@@ -1,397 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRCommonToken.h"
-
-static ANTLRCommonToken *SKIP_TOKEN;
-static ANTLRCommonToken *EOF_TOKEN;
-static ANTLRCommonToken *INVALID_TOKEN;
-
-@implementation ANTLRCommonToken
-
-    static NSInteger DEFAULT_CHANNEL = ANTLRTokenChannelDefault;
-    static NSInteger INVALID_TOKEN_TYPE = ANTLRTokenTypeInvalid;
-
-
-@synthesize text;
-@synthesize type;
-@synthesize line;
-@synthesize charPositionInLine;
-@synthesize channel;
-@synthesize index;
-@synthesize startIndex;
-@synthesize stopIndex;
-@synthesize input;
-
-+ (void) initialize
-{
-    EOF_TOKEN = [ANTLRCommonToken newToken:ANTLRTokenTypeEOF Text:@"EOF"];
-    SKIP_TOKEN = [ANTLRCommonToken newToken:ANTLRTokenTypeInvalid Text:@"Skip"];
-    INVALID_TOKEN = [ANTLRCommonToken newToken:ANTLRTokenTypeInvalid Text:@"Invalid"];
-    [EOF_TOKEN retain];
-    [SKIP_TOKEN retain];
-    [INVALID_TOKEN retain];
-}
-
-+ (ANTLRCommonToken *)INVALID_TOKEN
-{
-    return INVALID_TOKEN;
-}
-
-+ (NSInteger) DEFAULT_CHANNEL
-{
-    return DEFAULT_CHANNEL;
-}
-
-+ (NSInteger) INVALID_TOKEN_TYPE
-{
-    return INVALID_TOKEN_TYPE;
-}
-
-+ (ANTLRCommonToken *) newToken
-{
-    return [[ANTLRCommonToken alloc] init];
-}
-
-+ (ANTLRCommonToken *) newToken:(id<ANTLRCharStream>)anInput Type:(NSInteger)aTType Channel:(NSInteger)aChannel Start:(NSInteger)aStart Stop:(NSInteger)aStop
-{
-    return [[ANTLRCommonToken alloc] initWithInput:(id<ANTLRCharStream>)anInput Type:(NSInteger)aTType Channel:(NSInteger)aChannel Start:(NSInteger)aStart Stop:(NSInteger)aStop];
-}
-
-+ (ANTLRCommonToken *) newToken:(ANTLRTokenType)tokenType
-{
-    return( [[ANTLRCommonToken alloc] initWithType:tokenType] );
-}
-
-+ (ANTLRCommonToken *) newToken:(NSInteger)tokenType Text:(NSString *)tokenText
-{
-    return( [[ANTLRCommonToken alloc] initWithType:tokenType Text:tokenText] );
-}
-
-+ (ANTLRCommonToken *) newTokenWithToken:(ANTLRCommonToken *)fromToken
-{
-    return( [[ANTLRCommonToken alloc] initWithToken:fromToken] );
-}
-
-// return the singleton EOF Token 
-+ (id<ANTLRToken>) eofToken
-{
-    if (EOF_TOKEN == nil) {
-        EOF_TOKEN = [[ANTLRCommonToken newToken:ANTLRTokenTypeEOF Text:@"EOF"] retain];
-    }
-    return EOF_TOKEN;
-}
-
-// return the singleton skip Token 
-+ (id<ANTLRToken>) skipToken
-{
-    if (SKIP_TOKEN == nil) {
-        SKIP_TOKEN = [[ANTLRCommonToken newToken:ANTLRTokenTypeInvalid Text:@"Skip"] retain];
-    }
-    return SKIP_TOKEN;
-}
-
-// return the singleton skip Token 
-+ (id<ANTLRToken>) invalidToken
-{
-    if (INVALID_TOKEN == nil) {
-        INVALID_TOKEN = [[ANTLRCommonToken newToken:ANTLRTokenTypeInvalid Text:@"Invalid"] retain];
-    }
-    return SKIP_TOKEN;
-}
-
-// the default channel for this class of Tokens
-+ (ANTLRTokenChannel) defaultChannel
-{
-    return ANTLRTokenChannelDefault;
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil) {
-        input = nil;
-        type = ANTLRTokenTypeInvalid;
-        channel = ANTLRTokenChannelDefault;
-        startIndex = 0;
-        stopIndex = 0;
-    }
-    return self;
-}
-
-// designated initializer
-- (id) initWithInput:(id<ANTLRCharStream>)anInput
-                           Type:(NSInteger)aTType
-                             Channel:(NSInteger)aChannel
-                               Start:(NSInteger)aStart
-                                Stop:(NSInteger)aStop
-{
-    if ((self = [super init]) != nil) {
-        input = anInput;
-        if ( input ) [input retain];
-        type = aTType;
-        channel = aChannel;
-        startIndex = aStart;
-        stopIndex = aStop;
-        if (type == ANTLRTokenTypeEOF)
-            text = @"EOF";
-        else
-            text = [input substringWithRange:NSMakeRange(startIndex, (stopIndex-startIndex)+1)];
-        if ( text ) [text retain];
-    }
-    return self;
-}
-
-- (id) initWithToken:(ANTLRCommonToken *)oldToken
-{
-    if ((self = [super init]) != nil) {
-        text = [NSString stringWithString:oldToken.text];
-        if ( text ) [text retain];
-        type = oldToken.type;
-        line = oldToken.line;
-        index = oldToken.index;
-        charPositionInLine = oldToken.charPositionInLine;
-        channel = oldToken.channel;
-        input = oldToken.input;
-        if ( input ) [input retain];
-        if ( [oldToken isKindOfClass:[ANTLRCommonToken class]] ) {
-            startIndex = oldToken.startIndex;
-            stopIndex = oldToken.stopIndex;
-        }
-    }
-    return self;
-}
-
-- (id) initWithType:(ANTLRTokenType)aTType
-{
-    if ((self = [super init]) != nil) {
-        self.type = aTType;
-    }
-    return self;
-}
-
-- (id) initWithType:(ANTLRTokenType)aTType Text:(NSString *)tokenText
-{
-    if ((self = [super init]) != nil) {
-        self.type = aTType;
-        self.text = [NSString stringWithString:tokenText];
-        if ( text ) [text retain];
-    }
-    return self;
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRCommonToken" );
-#endif
-    if ( input ) [input release];
-    if ( text ) [text release];
-    [super dealloc];
-}
-
-// create a copy, including the text if available
-// the input stream is *not* copied!
-- (id) copyWithZone:(NSZone *)theZone
-{
-    ANTLRCommonToken *copy = [[[self class] allocWithZone:theZone] init];
-    
-    if (text)
-        copy.text = [text copyWithZone:nil];
-    copy.type = type;
-    copy.line = line;
-    copy.charPositionInLine = charPositionInLine;
-    copy.channel = channel;
-    copy.index = index;
-    copy.startIndex = startIndex;
-    copy.stopIndex = stopIndex;
-    copy.input = input;
-    return copy;
-}
-
-
-//---------------------------------------------------------- 
-//  charPositionInLine 
-//---------------------------------------------------------- 
-- (NSUInteger) charPositionInLine
-{
-    return charPositionInLine;
-}
-
-- (void) setCharPositionInLine:(NSUInteger)aCharPositionInLine
-{
-    charPositionInLine = aCharPositionInLine;
-}
-
-//---------------------------------------------------------- 
-//  line 
-//---------------------------------------------------------- 
-- (NSUInteger) line
-{
-    return line;
-}
-
-- (void) setLine:(NSUInteger)aLine
-{
-    line = aLine;
-}
-
-//---------------------------------------------------------- 
-//  text 
-//---------------------------------------------------------- 
-- (NSString *) text
-{
-    if (text != nil) {
-        return text;
-    }
-    if (input == nil) {
-        return nil;
-    }
-    return [input substringWithRange:NSMakeRange(startIndex, (stopIndex-startIndex)+1)];
-}
-
-- (void) setText:(NSString *)aText
-{
-    if (text != aText) {
-        if ( text ) [text release];
-        text = aText;
-        [text retain];
-    }
-}
-
-
-//---------------------------------------------------------- 
-//  type 
-//---------------------------------------------------------- 
-- (NSInteger)type
-{
-    return type;
-}
-
-- (void) setType:(NSInteger)aType
-{
-    type = aType;
-}
-
-//---------------------------------------------------------- 
-//  channel 
-//---------------------------------------------------------- 
-- (NSUInteger)channel
-{
-    return channel;
-}
-
-- (void) setChannel:(NSUInteger)aChannel
-{
-    channel = aChannel;
-}
-
-
-//---------------------------------------------------------- 
-//  input 
-//---------------------------------------------------------- 
-- (id<ANTLRCharStream>) input
-{
-    return input; 
-}
-
-- (void) setInput: (id<ANTLRCharStream>) anInput
-{
-    if (input != anInput) {
-        if ( input ) [input release];
-        [anInput retain];
-    }
-    input = anInput;
-}
-
-
-//---------------------------------------------------------- 
-//  start 
-//---------------------------------------------------------- 
-- (NSInteger) getStart
-{
-    return startIndex;
-}
-
-- (void) setStart: (NSInteger) aStart
-{
-    startIndex = aStart;
-}
-
-//---------------------------------------------------------- 
-//  stop 
-//---------------------------------------------------------- 
-- (NSInteger) getStop
-{
-    return stopIndex;
-}
-
-- (void) setStop: (NSInteger) aStop
-{
-    stopIndex = aStop;
-}
-
-//---------------------------------------------------------- 
-//  index 
-//---------------------------------------------------------- 
-- (NSInteger) getTokenIndex;
-{
-    return index;
-}
-
-- (void) setTokenIndex: (NSInteger) aTokenIndex;
-{
-    index = aTokenIndex;
-}
-
-
-// provide a textual representation for debugging
-- (NSString *) description
-{
-    NSString *channelStr;
-    NSMutableString *txtString;
-
-    channelStr = @"";
-    if ( channel > 0 ) {
-        channelStr = [NSString stringWithFormat:@",channel=%d\n", channel];
-    }
-    if ([self text] != nil) {
-        txtString = [NSMutableString stringWithString:[self text]];
-        [txtString replaceOccurrencesOfString:@"\n" withString:@"\\\\n" options:NSAnchoredSearch range:NSMakeRange(0, [txtString length])];
-        [txtString replaceOccurrencesOfString:@"\r" withString:@"\\\\r" options:NSAnchoredSearch range:NSMakeRange(0, [txtString length])];
-        [txtString replaceOccurrencesOfString:@"\t" withString:@"\\\\t" options:NSAnchoredSearch range:NSMakeRange(0, [txtString length])];
-    } else {
-        txtString = [NSMutableString stringWithString:@"<no text>"];
-    }
-    return [NSString stringWithFormat:@"[@%d, %d:%d='%@',<%d>%@,%d:%d]", index, startIndex, stopIndex, txtString, type, channelStr, line, charPositionInLine];
-}
-
-- (NSString *)toString
-{
-   return [self description];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTokenStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTokenStream.h
deleted file mode 100644
index c26130e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTokenStream.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenStream.h"
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRBufferedTokenStream.h"
-#import "AMutableDictionary.h"
-
-@interface ANTLRCommonTokenStream : ANTLRBufferedTokenStream < ANTLRTokenStream >
-{
-	__strong AMutableDictionary *channelOverride;
-	NSUInteger channel;
-}
-
-@property (retain, getter=getChannelOverride,setter=setChannelOverride:) AMutableDictionary *channelOverride;
-@property (assign, getter=channel,setter=setChannel:) NSUInteger channel;
-
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStream;
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStreamWithTokenSource:(id<ANTLRTokenSource>)theTokenSource;
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStreamWithTokenSource:(id<ANTLRTokenSource>)theTokenSource
-                                                               Channel:(NSUInteger)aChannel;
-
-- (id) init;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource Channel:(NSUInteger)aChannel;
-
-- (void) consume;
-- (id<ANTLRToken>) LB:(NSInteger)k;
-- (id<ANTLRToken>) LT:(NSInteger)k;
-
-- (NSInteger) skipOffChannelTokens:(NSInteger) i;
-- (NSInteger) skipOffChannelTokensReverse:(NSInteger) i;
-
-- (void)setup;
-
-- (NSInteger) getNumberOfOnChannelTokens;
-
-// - (id<ANTLRTokenSource>) getTokenSource;
-- (void) setTokenSource: (id<ANTLRTokenSource>) aTokenSource;
-
-- (NSUInteger)channel;
-- (void)setChannel:(NSUInteger)aChannel;
-
-- (AMutableDictionary *)channelOverride;
-- (void)setChannelOverride:(AMutableDictionary *)anOverride;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-#ifdef DONTUSENOMO
-- (NSArray *) tokensInRange:(NSRange)aRange;
-- (NSArray *) tokensInRange:(NSRange)aRange inBitSet:(ANTLRBitSet *)aBitSet;
-- (NSArray *) tokensInRange:(NSRange)aRange withTypes:(NSArray *)tokenTypes;
-- (NSArray *) tokensInRange:(NSRange)aRange withType:(NSInteger)tokenType;
-
-- (id<ANTLRToken>) getToken:(NSInteger)i;
-
-- (NSInteger) size;
-- (void) rewind;
-- (void) rewind:(NSInteger)marker;
-- (void) seek:(NSInteger)index;
-
-- (NSString *) toString;
-- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-#endif
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTokenStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTokenStream.m
deleted file mode 100644
index 9ebc325..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTokenStream.m
+++ /dev/null
@@ -1,352 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRToken.h"
-#import "ANTLRCommonTokenStream.h"
-
-
-@implementation ANTLRCommonTokenStream
-
-@synthesize channelOverride;
-@synthesize channel;
-
-#pragma mark Initialization
-
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStream
-{
-    return [[ANTLRCommonTokenStream alloc] init];
-}
-
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStreamWithTokenSource:(id<ANTLRTokenSource>)theTokenSource
-{
-    return [[ANTLRCommonTokenStream alloc] initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource];
-}
-
-+ (ANTLRCommonTokenStream *)newANTLRCommonTokenStreamWithTokenSource:(id<ANTLRTokenSource>)theTokenSource Channel:(NSUInteger)aChannel
-{
-    return [[ANTLRCommonTokenStream alloc] initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource Channel:aChannel];
-}
-
-- (id) init
-{
-	if ((self = [super init]) != nil) {
-		channelOverride = [[AMutableDictionary dictionaryWithCapacity:100] retain];
-		channel = ANTLRTokenChannelDefault;
-	}
-	return self;
-}
-
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource
-{
-	if ((self = [super initWithTokenSource:theTokenSource]) != nil) {
-		channelOverride = [[AMutableDictionary dictionaryWithCapacity:100] retain];
-		channel = ANTLRTokenChannelDefault;
-	}
-	return self;
-}
-
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)theTokenSource Channel:(NSUInteger)aChannel
-{
-	if ((self = [super initWithTokenSource:theTokenSource]) != nil) {
-		channelOverride = [[AMutableDictionary dictionaryWithCapacity:100] retain];
-		channel = aChannel;
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRCommonTokenStream" );
-#endif
-	if ( channelOverride ) [channelOverride release];
-	if ( tokens ) [tokens release];
-	[self setTokenSource:nil];
-	[super dealloc];
-}
-
-/** Always leave index on an on-channel token. */
-- (void) consume
-{
-    if (index == -1) [self setup];
-    index++;
-    [self sync:index];
-    while ( ((ANTLRCommonToken *)[tokens objectAtIndex:index]).channel != channel ) {
-		index++;
-		[self sync:index];
-	}
-}
-
-#pragma mark Lookahead
-
-- (id<ANTLRToken>) LB:(NSInteger)k
-{
-	if ( k == 0 || (index-k) < 0 ) {
-		return nil;
-	}
-	int i = index;
-	int n = 1;
-    // find k good tokens looking backwards
-	while ( n <= k ) {
-		i = [self skipOffChannelTokensReverse:i-1];
-		n++;
-	}
-	if ( i < 0 ) {
-		return nil;
-	}
-	return [tokens objectAtIndex:i];
-}
-
-- (id<ANTLRToken>) LT:(NSInteger)k
-{
-	if ( index == -1 ) [self setup];
-	if ( k == 0 ) return nil;
-	if ( k < 0 ) return [self LB:-k];
-	int i = index;
-	int n = 1;
-	while ( n < k ) {
-		i = [self skipOffChannelTokens:i+1];
-		n++;
-	}
-//	if ( i >= (NSInteger)[tokens count] ) {
-//		return [ANTLRCommonToken eofToken];
-//	}
-    if ( i > range ) range = i;
-	return [tokens objectAtIndex:i];
-}
-
-#pragma mark Channels & Skipping
-
-- (NSInteger) skipOffChannelTokens:(NSInteger) idx
-{
-    [self sync:idx];
-	while ( ((ANTLRCommonToken *)[tokens objectAtIndex:idx]).channel != channel ) {
-		idx++;
-        [self sync:idx];
-	}
-	return idx;
-}
-
-- (NSInteger) skipOffChannelTokensReverse:(NSInteger) i
-{
-	while ( i >= 0 && ((ANTLRCommonToken *)[tokens objectAtIndex:i]).channel != channel ) {
-		i--;
-	}
-	return i;
-}
-
-- (void) setup
-{
-    index = 0;
-    [self sync:0];
-    int i = 0;
-    while ( ((ANTLRCommonToken *)[tokens objectAtIndex:i]).channel != channel ) {
-        i++;
-        [self sync:i];
-    }
-	// leave index pointing at first token on channel
-    index = i;
-}
-
-- (NSInteger) getNumberOfOnChannelTokens
-{
-    NSInteger n = 0;
-    [self fill];
-    for( int i = 0; i < [tokens count]; i++ ) {
-        ANTLRCommonToken *t = [tokens objectAtIndex:i];
-        if ( t.channel == channel )
-            n++;
-        if ( t.type == ANTLRTokenTypeEOF )
-            break;
-    }
-    return n;
-}
-
-/** Reset this token stream by setting its token source. */
-- (void) setTokenSource:(id<ANTLRTokenSource>)aTokenSource
-{
-    [super setTokenSource:aTokenSource];
-    channel = ANTLRTokenChannelDefault;
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRCommonTokenStream *copy;
-	
-    //    copy = [[[self class] allocWithZone:aZone] init];
-    copy = [super copyWithZone:aZone]; // allocation occurs in ANTLRBaseTree
-    if ( self.channelOverride )
-        copy.channelOverride = [channelOverride copyWithZone:aZone];
-    copy.channel = channel;
-    return copy;
-}
-
-- (NSUInteger)channel
-{
-    return channel;
-}
-
-- (void)setChannel:(NSUInteger)aChannel
-{
-    channel = aChannel;
-}
-
-- (AMutableDictionary *)channelOverride
-{
-    return channelOverride;
-}
-
-- (void)setChannelOverride:(AMutableDictionary *)anOverride
-{
-    channelOverride = anOverride;
-}
-
-#ifdef DONTUSENOMO
-#pragma mark Token access
-
-- (NSArray *) tokensInRange:(NSRange)aRange
-{
-	return [tokens subarrayWithRange:aRange];
-}
-
-#pragma mark Accessors
-
-- (id<ANTLRTokenSource>) getTokenSource
-{
-    return tokenSource; 
-}
-
-- (NSArray *) tokensInRange:(NSRange)aRange inBitSet:(ANTLRBitSet *)aBitSet
-{
-	unsigned int startIndex = aRange.location;
-	unsigned int stopIndex = aRange.location+aRange.length;
-	if ( index == -1 ) {
-		[self setup];
-	}
-	if (stopIndex >= [tokens count]) {
-		stopIndex = [tokens count] - 1;
-	}
-	AMutableArray *filteredTokens = [AMutableArray arrayWithCapacity:100];
-	unsigned int i=0;
-	for (i = startIndex; i<=stopIndex; i++) {
-		id<ANTLRToken> token = [tokens objectAtIndex:i];
-		if (aBitSet == nil || [aBitSet member:token.type]) {
-			[filteredTokens addObject:token];
-		}
-	}
-	if ([filteredTokens count]) {
-		return filteredTokens;
-	} else {
-		[filteredTokens release];
-		return nil;
-	}
-}
-
-- (NSArray *) tokensInRange:(NSRange)aRange withTypes:(NSArray *)tokenTypes
-{
-	ANTLRBitSet *bits = [[ANTLRBitSet alloc] initWithArrayOfBits:tokenTypes];
-	NSArray *returnTokens = [[self tokensInRange:aRange inBitSet:bits] retain];
-	[bits release];
-	return returnTokens;
-}
-
-- (NSArray *) tokensInRange:(NSRange)aRange withType:(NSInteger)tokenType
-{
-	ANTLRBitSet *bits = [[ANTLRBitSet alloc] init];
-	[bits add:tokenType];
-	NSArray *returnTokens = [[self tokensInRange:aRange inBitSet:bits] retain];
-	[bits release];
-	return returnTokens;
-}
-
-- (id<ANTLRToken>) getToken:(NSInteger)i
-{
-	return [tokens objectAtIndex:i];
-}
-
-- (NSInteger) size
-{
-	return [tokens count];
-}
-
-- (void) rewind
-{
-	[self seek:lastMarker];
-}
-
-- (void) rewind:(NSInteger)marker
-{
-	[self seek:marker];
-}
-
-- (void) seek:(NSInteger)anIndex
-{
-	index = anIndex;
-}
-#pragma mark toString routines
-
-- (NSString *) toString
-{
-	if ( index == -1 ) {
-		[self setup];
-	}
-	return [self toStringFromStart:0 ToEnd:[tokens count]];
-}
-
-- (NSString *) toStringFromStart:(NSInteger)startIdx ToEnd:(NSInteger) stopIdx
-{
-    NSMutableString *stringBuffer;
-    id<ANTLRToken> t;
-
-    if ( startIdx < 0 || stopIdx < 0 ) {
-        return nil;
-    }
-    if ( index == -1 ) {
-        [self setup];
-    }
-    if ( stopIdx >= [tokens count] ) {
-        stopIdx = [tokens count]-1;
-    }
-    stringBuffer = [NSMutableString stringWithCapacity:30];
-    for (int i = startIdx; i <= stopIdx; i++) {
-        t = (id<ANTLRToken>)[tokens objectAtIndex:i];
-        [stringBuffer appendString:[t text]];
-    }
-    return stringBuffer;
-}
-
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken
-{
-	if (startToken && stopToken) {
-		int startIdx = [startToken getTokenIndex];
-		int stopIdx = [stopToken getTokenIndex];
-		return [self toStringFromStart:startIdx ToEnd:stopIdx];
-	}
-	return nil;
-}
-#endif
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTree.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTree.h
deleted file mode 100644
index 2becb76..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTree.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonToken.h"
-#import "ANTLRBaseTree.h"
-
-@interface ANTLRCommonTree : ANTLRBaseTree <ANTLRTree> {
-	__strong ANTLRCommonToken *token;
-	NSInteger startIndex;
-	NSInteger stopIndex;
-    __strong ANTLRCommonTree *parent;
-    NSInteger childIndex;
-}
-
-+ (ANTLRCommonTree *) invalidNode;
-+ (ANTLRCommonTree *) newTree;
-+ (ANTLRCommonTree *) newTreeWithTree:(ANTLRCommonTree *)aTree;
-+ (ANTLRCommonTree *) newTreeWithToken:(ANTLRCommonToken *)aToken;
-+ (ANTLRCommonTree *) newTreeWithTokenType:(NSInteger)tokenType;
-+ (ANTLRCommonTree *) newTreeWithTokenType:(NSInteger)aTType Text:(NSString *)theText;
-
-- (id) init;
-- (id) initWithTreeNode:(ANTLRCommonTree *)aNode;
-- (id) initWithToken:(ANTLRCommonToken *)aToken;
-- (id) initWithTokenType:(NSInteger)aTokenType;
-- (id) initWithTokenType:(NSInteger)aTokenType Text:(NSString *)theText;
-
-- (id<ANTLRBaseTree>) copyWithZone:(NSZone *)aZone;
-
-- (BOOL) isNil;
-
-- (ANTLRCommonToken *) getToken;
-- (void) setToken:(ANTLRCommonToken *)aToken;
-- (ANTLRCommonToken *) dupNode;
-- (NSInteger)type;
-- (NSString *)text;
-- (NSUInteger)line;
-- (void) setLine:(NSUInteger)aLine;
-- (NSUInteger)charPositionInLine;
-- (void) setCharPositionInLine:(NSUInteger)pos;
-- (ANTLRCommonTree *) getParent;
-- (void) setParent:(ANTLRCommonTree *) t;
-
-#ifdef DONTUSENOMO
-- (NSString *) treeDescription;
-#endif
-- (NSString *) description;
-- (void) setUnknownTokenBoundaries;
-- (NSInteger) getTokenStartIndex;
-- (void) setTokenStartIndex: (NSInteger) aStartIndex;
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex: (NSInteger) aStopIndex;
-
-/*
- @property (retain, getter=getANTLRCommonToken, setter=setANTLRCommonToken:) ANTLRCommonToken *token;
- @property (assign, getter=getTokenStartIndex, setter=setTokenStartIndex:) NSInteger startIndex;
- @property (assign, getter=getTokenStopIndex, setter=setTokenStopIndex:) NSInteger stopIndex;
- @property (retain, getter=getParent, setter=setParent:) id<ANTLRBaseTree> parentparent;
- @property (assign, getter=getChildIndex, setter=setChildIndex:) NSInteger childIndex;
- */
-
-@property (retain) ANTLRCommonToken *token;
-@property (assign) NSInteger startIndex;
-@property (assign) NSInteger stopIndex;
-@property (retain) ANTLRCommonTree *parent;
-@property (assign) NSInteger childIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTree.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTree.m
deleted file mode 100644
index 68c1fc1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTree.m
+++ /dev/null
@@ -1,345 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRCommonTree.h"
-
-
-@implementation ANTLRCommonTree
-
-+ (ANTLRCommonTree *)INVALID_NODE
-{
-	return [[ANTLRCommonTree alloc] initWithToken:[ANTLRCommonToken invalidToken]];
-}
-
-+ (ANTLRCommonTree *)invalidNode
-{
-    // Had to cast to ANTLRCommonTree * here, because GCC is dumb.
-	return [[ANTLRCommonTree alloc] initWithToken:ANTLRCommonToken.INVALID_TOKEN];
-}
-
-+ (ANTLRCommonTree *)newTree
-{
-    return [[ANTLRCommonTree alloc] init];
-}
-
-+ (ANTLRCommonTree *)newTreeWithTree:(ANTLRCommonTree *)aTree
-{
-    return [[ANTLRCommonTree alloc] initWithTreeNode:aTree];
-}
-
-+ (ANTLRCommonTree *)newTreeWithToken:(id<ANTLRToken>)aToken
-{
-	return [[ANTLRCommonTree alloc] initWithToken:aToken];
-}
-
-+ (ANTLRCommonTree *)newTreeWithTokenType:(NSInteger)aTType
-{
-	return [[ANTLRCommonTree alloc] initWithTokenType:(NSInteger)aTType];
-}
-
-+ (ANTLRCommonTree *)newTreeWithTokenType:(NSInteger)aTType Text:(NSString *)theText
-{
-	return [[ANTLRCommonTree alloc] initWithTokenType:(NSInteger)aTType Text:theText];
-}
-
-- (id)init
-{
-	self = (ANTLRCommonTree *)[super init];
-	if ( self != nil ) {
-        token = nil;
-		startIndex = -1;
-		stopIndex = -1;
-        parent = nil;
-        childIndex = -1;
-	}
-	return (ANTLRCommonTree *)self;
-}
-
-- (id)initWithTreeNode:(ANTLRCommonTree *)aNode
-{
-	self = (ANTLRCommonTree *)[super init];
-	if ( self != nil ) {
-		token = aNode.token;
-        if ( token ) [token retain];
-		startIndex = aNode.startIndex;
-		stopIndex = aNode.stopIndex;
-        parent = nil;
-        childIndex = -1;
-	}
-	return self;
-}
-
-- (id)initWithToken:(id<ANTLRToken>)aToken
-{
-	self = (ANTLRCommonTree *)[super init];
-	if ( self != nil ) {
-		token = aToken;
-        if ( token ) [token retain];
-		startIndex = -1;
-		stopIndex = -1;
-        parent = nil;
-        childIndex = -1;
-	}
-	return self;
-}
-
-- (id)initWithTokenType:(NSInteger)aTokenType
-{
-	self = (ANTLRCommonTree *)[super init];
-	if ( self != nil ) {
-		token = [[ANTLRCommonToken newToken:aTokenType] retain];
-//		startIndex = token.startIndex;
-		startIndex = -1;
-//		stopIndex = token.stopIndex;
-		stopIndex = -1;
-        parent = nil;
-        childIndex = -1;
-	}
-	return self;
-}
-
-- (id) initWithTokenType:(NSInteger)aTokenType Text:(NSString *)theText
-{
-	self = (ANTLRCommonTree *)[super init];
-	if ( self != nil ) {
-		token = [[ANTLRCommonToken newToken:aTokenType Text:theText] retain];
-//		startIndex = token.startIndex;
-		startIndex = -1;
-//		stopIndex = token.stopIndex;
-		stopIndex = -1;
-        parent = nil;
-        childIndex = -1;
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-    if ( token ) {
-        [token release];
-        token = nil;
-    }
-    if ( parent ) {
-        [parent release];
-        parent = nil;
-    }
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRCommonTree *copy;
-	
-    //    copy = [[[self class] allocWithZone:aZone] init];
-    copy = [super copyWithZone:aZone]; // allocation occurs in ANTLRBaseTree
-    if ( self.token )
-        copy.token = [self.token copyWithZone:aZone];
-    copy.startIndex = startIndex;
-    copy.stopIndex = stopIndex;
-    copy.parent = (ANTLRCommonTree *)[self.parent copyWithZone:aZone];
-    copy.childIndex = childIndex;
-    return copy;
-}
-
-- (BOOL) isNil
-{
-	return token == nil;
-}
-
-- (ANTLRCommonToken *) getToken
-{
-	return token;
-}
-
-- (void) setToken:(ANTLRCommonToken *) aToken
-{
-	if ( token != aToken ) {
-		if ( token ) [token release];
-		[aToken retain];
-		token = aToken;
-	}
-}
-
-- (ANTLRCommonTree *) dupNode
-{
-    return [ANTLRCommonTree newTreeWithTree:self ];
-}
-
-- (NSInteger)type
-{
-	if (token)
-		return token.type;
-	return ANTLRTokenTypeInvalid;
-}
-
-- (NSString *)text
-{
-	if (token)
-		return token.text;
-	return nil;
-}
-
-- (NSUInteger)line
-{
-	if (token)
-		return token.line;
-	return 0;
-}
-
-- (void) setLine:(NSUInteger)aLine
-{
-    if (token)
-        token.line = aLine;
-}
-
-- (NSUInteger)charPositionInLine
-{
-	if (token)
-		return token.charPositionInLine;
-	return 0;
-}
-
-- (void) setCharPositionInLine:(NSUInteger)pos
-{
-    if (token)
-        token.charPositionInLine = pos;
-}
-
-- (NSInteger) getTokenStartIndex
-{
-	if ( startIndex == -1 && token != nil ) {
-		return [token getTokenIndex];
-	}
-    return startIndex;
-}
-
-- (void) setTokenStartIndex: (NSInteger) aStartIndex
-{
-    startIndex = aStartIndex;
-}
-
-- (NSInteger) getTokenStopIndex
-{
-	if ( stopIndex == -1 && token != nil ) {
-		return [token getTokenIndex];
-	}
-    return stopIndex;
-}
-
-- (void) setTokenStopIndex: (NSInteger) aStopIndex
-{
-    stopIndex = aStopIndex;
-}
-
-#ifdef DONTUSENOMO
-- (NSString *) treeDescription
-{
-	if (children) {
-		NSMutableString *desc = [NSMutableString stringWithString:@"(^"];
-		[desc appendString:[self description]];
-		unsigned int childIdx;
-		for (childIdx = 0; childIdx < [children count]; childIdx++) {
-			[desc appendFormat:@"%@", [[children objectAtIndex:childIdx] treeDescription]];
-		}
-		[desc appendString:@")"];
-		return desc;
-	} else {
-		return [self description];
-	}
-}
-#endif
-
-/** For every node in this subtree, make sure it's start/stop token's
- *  are set.  Walk depth first, visit bottom up.  Only updates nodes
- *  with at least one token index < 0.
- */
-- (void) setUnknownTokenBoundaries
-{
-    if ( children == nil ) {
-        if ( startIndex<0 || stopIndex<0 ) {
-            startIndex = stopIndex = [token getTokenIndex];
-        }
-        return;
-    }
-    for (NSUInteger i=0; i < [children count]; i++) {
-        [[children objectAtIndex:i] setUnknownTokenBoundaries];
-    }
-    if ( startIndex >= 0 && stopIndex >= 0 )
-         return; // already set
-    if ( [children count] > 0 ) {
-        ANTLRCommonTree *firstChild = (ANTLRCommonTree *)[children objectAtIndex:0];
-        ANTLRCommonTree *lastChild = (ANTLRCommonTree *)[children objectAtIndex:[children count]-1];
-        startIndex = [firstChild getTokenStartIndex];
-        stopIndex = [lastChild getTokenStopIndex];
-    }
-}
-
-- (NSInteger) getChildIndex
-{
-    return childIndex;
-}
-
-- (ANTLRCommonTree *) getParent
-{
-    return parent;
-}
-
-- (void) setParent:(ANTLRCommonTree *) t
-{
-    parent = t;
-}
-
-- (void) setChildIndex:(NSInteger) anIndex
-{
-    childIndex = anIndex;
-}
-
-- (NSString *) description
-{
-    return [self toString];
-}
-
-- (NSString *) toString
-{
-    if ( [self isNil] ) {
-        return @"nil";
-    }
-    if ( [self type] == ANTLRTokenTypeInvalid ) {
-        return @"<errornode>";
-    }
-    if ( token==nil ) {
-        return nil;
-    }
-    return token.text;
-}
-
-@synthesize token;
-@synthesize startIndex;
-@synthesize stopIndex;
-@synthesize parent;
-@synthesize childIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.h
deleted file mode 100644
index e79f0ab..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRBaseTreeAdaptor.h"
-
-@interface ANTLRCommonTreeAdaptor : ANTLRBaseTreeAdaptor {
-}
-
-+ (ANTLRCommonTree *) newEmptyTree;
-+ (ANTLRCommonTreeAdaptor *)newTreeAdaptor;
-- (id) init;
-- (ANTLRCommonTree *)dupNode:(ANTLRCommonTree *)t;   
-
-- (ANTLRCommonTree *) create:(id<ANTLRToken>) payload;
-//- (ANTLRCommonTree *) createTree:(NSInteger)tokenType fromToken:(ANTLRCommonToken *)aToken;
-//- (ANTLRCommonTree *) createTree:(NSInteger)tokenType fromToken:(ANTLRCommonToken *)aToken Text:(NSString *)text;
-- (id<ANTLRToken>)createToken:(NSInteger)tokenType Text:(NSString *)text;
-- (id<ANTLRToken>)createToken:(id<ANTLRToken>)fromToken;
-- (void) setTokenBoundaries:(ANTLRCommonTree *)t From:(id<ANTLRToken>)startToken To:(id<ANTLRToken>)stopToken;
-- (NSInteger)getTokenStartIndex:(ANTLRCommonTree *)t;
-- (NSInteger)getTokenStopIndex:(ANTLRCommonTree *)t;
-- (NSString *)getText:(ANTLRCommonTree *)t;
-- (void)setText:(ANTLRCommonTree *)t Text:(NSString *)text;
-- (NSInteger)getType:(ANTLRCommonTree *)t;
-- (void) setType:(ANTLRCommonTree *)t Type:(NSInteger)tokenType;
-- (id<ANTLRToken>)getToken:(ANTLRCommonTree *)t;
-- (ANTLRCommonTree *)getChild:(ANTLRCommonTree *)t At:(NSInteger)i;
-- (void) setChild:(ANTLRCommonTree *)t At:(NSInteger)i Child:(ANTLRCommonTree *)child;
-- (NSInteger)getChildCount:(ANTLRCommonTree *)t;
-- (ANTLRCommonTree *)getParent:(ANTLRCommonTree *)t;
-- (void)setParent:(ANTLRCommonTree *)t With:(ANTLRCommonTree *)parent;
-- (NSInteger)getChildIndex:(ANTLRCommonTree *)t;
-- (void)setChildIndex:(ANTLRCommonTree *)t With:(NSInteger)index;
-- (void)replaceChildren:(ANTLRCommonTree *)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(ANTLRCommonTree *)t;
-- (id)copyWithZone:(NSZone *)zone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.m
deleted file mode 100644
index 7609698..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeAdaptor.m
+++ /dev/null
@@ -1,240 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRCommonTreeAdaptor.h"
-
-@implementation ANTLRCommonTreeAdaptor
-
-+ (ANTLRCommonTree *) newEmptyTree;
-{
-    return [ANTLRCommonTree newTree];
-}
-
-+ (ANTLRCommonTreeAdaptor *)newTreeAdaptor
-{
-    return[[ANTLRCommonTreeAdaptor alloc] init];
-}
-
-- (id) init
-{
-    self = [super init];
-    if (self) {
-    }
-    return self;
-}
-
-/** Duplicate a node.  This is part of the factory;
- *	override if you want another kind of node to be built.
- *
- *  I could use reflection to prevent having to override this
- *  but reflection is slow.
- */
-- (id) dupNode:(id<ANTLRBaseTree>)t
-{
-    if ( t==nil )
-        return nil;
-    return [ANTLRCommonTree newTree:t];
-}
-
-/** Tell me how to create a token for use with imaginary token nodes.
- *  For example, there is probably no input symbol associated with imaginary
- *  token DECL, but you need to create it as a payload or whatever for
- *  the DECL node as in ^(DECL type ID).
- *
- *  This is a variant of createToken where the new token is derived from
- *  an actual real input token.  Typically this is for converting '{'
- *  tokens to BLOCK etc...  You'll see
- *
- *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
- *
- *  If you care what the token payload objects' type is, you should
- *  override this method and any other createToken variant.
- */
-- (ANTLRCommonTree *) create:(ANTLRCommonToken *)aToken
-{
-    return [ANTLRCommonTree newTreeWithToken:aToken];
-}
-
-/** Tell me how to create a token for use with imaginary token nodes.
- *  For example, there is probably no input symbol associated with imaginary
- *  token DECL, but you need to create it as a payload or whatever for
- *  the DECL node as in ^(DECL type ID).
- *
- *  If you care what the token payload objects' type is, you should
- *  override this method and any other createToken variant.
- */
-- (ANTLRCommonTree *)createTree:(NSInteger)tokenType Text:(NSString *)text
-{
-    return [ANTLRCommonTree newTreeWithTokenType:tokenType Text:text];
-}
-
-- (id<ANTLRToken>)createToken:(NSInteger)tokenType Text:(NSString *)text
-{
-    id<ANTLRToken> fromToken = [ANTLRCommonToken newToken:tokenType Text:text];
-    return fromToken;
-}
-
-- (id<ANTLRToken>)createToken:(id<ANTLRToken>)fromToken
-{
-    return [ANTLRCommonToken newTokenWithToken:(ANTLRCommonToken *)fromToken];
-}
-
-/** Track start/stop token for subtree root created for a rule.
- *  Only works with Tree nodes.  For rules that match nothing,
- *  seems like this will yield start=i and stop=i-1 in a nil node.
- *  Might be useful info so I'll not force to be i..i.
- */
-- (void) setTokenBoundaries:(id<ANTLRBaseTree>)aTree From:(id<ANTLRToken>)startToken To:(id<ANTLRToken>)stopToken
-{
-    if ( aTree == nil )
-        return;
-    int startTokIdx = 0;
-    int stopTokIdx = 0;
-    if ( startToken != nil )
-        startTokIdx = [startToken getTokenIndex];
-    if ( stopToken != nil )
-        stopTokIdx = [stopToken getTokenIndex];
-    [(id<ANTLRBaseTree>)aTree setTokenStartIndex:startTokIdx];
-    [(id<ANTLRBaseTree>)aTree setTokenStopIndex:stopTokIdx];
-}
-
-- (NSInteger)getTokenStartIndex:(id<ANTLRBaseTree>) t
-{
-    if ( t == nil )
-        return -1;
-    return [(id<ANTLRBaseTree>)t getTokenStartIndex];
-}
-
-- (NSInteger)getTokenStopIndex:(id<ANTLRBaseTree>) t
-{
-    if ( t == nil )
-        return -1;
-    return [(id<ANTLRBaseTree>)t getTokenStopIndex];
-}
-
-- (NSString *)getText:(ANTLRCommonTree *)t
-{
-    if ( t == nil )
-        return nil;
-    return t.token.text;
-}
-
-- (void)setText:(id<ANTLRBaseTree>)t Text:(NSString *)text
-{
-    if ( t == nil )
-        return;
-}
-
-- (NSInteger)getType:(ANTLRCommonTree *)t
-{
-    if ( t==nil )
-        return ANTLRTokenTypeInvalid;
-    return t.token.type;
-}
-
-- (void) setType:(id<ANTLRBaseTree>)t Type:(NSInteger)tokenType
-{
-    if ( t==nil )
-        return;
-}
-
-/** What is the Token associated with this node?  If
- *  you are not using ANTLRCommonTree, then you must
- *  override this in your own adaptor.
- */
-- (id<ANTLRToken>) getToken:(ANTLRCommonTree *) t
-{
-    if ( [t isKindOfClass:[ANTLRCommonTree class]] ) {
-        return t.token;
-    }
-    return nil; // no idea what to do
-}
-
-- (id<ANTLRBaseTree>) getChild:(id<ANTLRBaseTree>)t At:(NSInteger)i
-{
-    if ( t == nil )
-        return nil;
-    return [(id<ANTLRBaseTree>)t getChild:i];
-}
-
-- (void) setChild:(id<ANTLRBaseTree>)t At:(NSInteger)i Child:(id<ANTLRBaseTree>)child
-{
-    if ( t == nil )
-        return;
-    [(id<ANTLRBaseTree>)t setChild:i With:child];
-}
-
-- (id) deleteChild:(id<ANTLRBaseTree>)t Index:(NSInteger)anIndex
-{
-    return [t deleteChild:anIndex];
-}
-
-- (NSInteger) getChildCount:(id<ANTLRBaseTree>) t
-{
-    if ( t == nil )
-        return 0;
-    return [(id<ANTLRBaseTree>) t getChildCount];
-}
-
-- (id<ANTLRBaseTree>) getParent:(id<ANTLRBaseTree>) t
-{
-    if ( t == nil )
-        return nil;
-    return (id<ANTLRBaseTree>)[t getParent];
-}
-
-- (void) setParent:(id<ANTLRBaseTree>)t With:(id<ANTLRBaseTree>) parent
-{
-    if ( t != nil )
-        [(id<ANTLRBaseTree>) t setParent:(id<ANTLRBaseTree>)parent];
-}
-
-- (NSInteger) getChildIndex:(id<ANTLRBaseTree>) t
-{
-    if ( t == nil )
-        return 0;
-    return [(id<ANTLRBaseTree>) t getChildIndex];
-}
-
-- (void) setChildIndex:(id<ANTLRBaseTree>)t With:(NSInteger)anIndex
-{
-    if ( t!=nil )
-        [(id<ANTLRBaseTree>)t setChildIndex:anIndex];
-}
-
-- (void) replaceChildren:(id<ANTLRBaseTree>)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id<ANTLRBaseTree>)t
-{
-    if ( parent != nil ) {
-        [(id<ANTLRBaseTree>)parent replaceChildrenFrom:startChildIndex To:stopChildIndex With:t];
-    }
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    return [[[self class] allocWithZone:aZone] init];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeNodeStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeNodeStream.h
deleted file mode 100644
index 52efc0f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeNodeStream.h
+++ /dev/null
@@ -1,120 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-#import "ANTLRCommonTreeNodeStream.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRTreeNodeStream.h"
-#import "ANTLRTreeIterator.h"
-#import "ANTLRIntArray.h"
-
-@interface ANTLRCommonTreeNodeStream : ANTLRLookaheadStream <ANTLRTreeNodeStream> {
-#define DEFAULT_INITIAL_BUFFER_SIZE 100
-#define INITIAL_CALL_STACK_SIZE 10
-    
-/** Pull nodes from which tree? */
-__strong id root;
-    
-/** If this tree (root) was created from a token stream, track it. */
-__strong id <ANTLRTokenStream> tokens;
-    
-	/** What tree adaptor was used to build these trees */
-__strong ANTLRCommonTreeAdaptor *adaptor;
-    
-/** The tree iterator we using */
-__strong ANTLRTreeIterator *it;
-    
-/** Stack of indexes used for push/pop calls */
-__strong ANTLRIntArray *calls;    
-    
-/** Tree (nil A B C) trees like flat A B C streams */
-BOOL hasNilRoot;
-    
-/** Tracks tree depth.  Level=0 means we're at root node level. */
-NSInteger level;
-}
-@property (retain, getter=getRoot, setter=setRoot:) ANTLRCommonTree *root;
-@property (retain, getter=getTokens,setter=setTokens:) id<ANTLRTokenStream> tokens;
-@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) ANTLRCommonTreeAdaptor *adaptor;
-@property (assign, getter=getLevel, setter=setLevel:) NSInteger level;
-
-+ (ANTLRCommonTreeNodeStream *) newANTLRCommonTreeNodeStream:(ANTLRCommonTree *)theTree;
-+ (ANTLRCommonTreeNodeStream *) newANTLRCommonTreeNodeStream:(id<ANTLRTreeAdaptor>)anAdaptor Tree:(ANTLRCommonTree *)theTree;
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)adaptor Tree:(ANTLRCommonTree *)theTree;
-    
-- (void) reset;
-    
-    /** Pull elements from tree iterator.  Track tree level 0..max_level.
-     *  If nil rooted tree, don't give initial nil and DOWN nor final UP.
-     */
-- (id) nextElement;
-    
-- (BOOL) isEOF:(id<ANTLRBaseTree>) obj;
-- (void) setUniqueNavigationNodes:(BOOL) uniqueNavigationNodes;
-    
-- (id) getTreeSource;
-    
-- (NSString *) getSourceName;
-    
-- (id<ANTLRTokenStream>) getTokenStream;
-    
-- (void) setTokenStream:(id<ANTLRTokenStream>) tokens;
-    
-- (ANTLRCommonTreeAdaptor *) getTreeAdaptor;
-    
-- (void) setTreeAdaptor:(ANTLRCommonTreeAdaptor *) adaptor;
-    
-- (NSInteger) LA:(NSInteger) i;
-    
-    /** Make stream jump to a new location, saving old location.
-     *  Switch back with pop().
-     */
-- (ANTLRCommonTree *)getNode:(NSInteger) i;
-
-- (void) push:(NSInteger) index;
-    
-    /** Seek back to previous index saved during last push() call.
-     *  Return top of stack (return index).
-     */
-- (NSInteger) pop;
-    
-// TREE REWRITE INTERFACE
-    
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-    
-- (NSString *) toStringFromNode:(id<ANTLRBaseTree>)startNode ToNode:(id<ANTLRBaseTree>)stopNode;
-
-/** For debugging; destructive: moves tree iterator to end. */
-- (NSString *) toTokenTypeString;
-
-@property (retain) ANTLRTreeIterator *it;
-@property (retain) ANTLRIntArray *calls;
-@property BOOL hasNilRoot;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeNodeStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeNodeStream.m
deleted file mode 100644
index 23eddee..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeNodeStream.m
+++ /dev/null
@@ -1,249 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRCommonTreeNodeStream.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRIntStream.h"
-#import "ANTLRCharStream.h"
-#import "AMutableArray.h"
-#import "ANTLRCommonTreeAdaptor.h"
-
-#ifndef DEBUG_DEALLOC
-#define DEBUG_DEALLOC
-#endif
-
-@implementation ANTLRCommonTreeNodeStream
-
-@synthesize root;
-@synthesize tokens;
-@synthesize adaptor;
-@synthesize level;
-
-+ (ANTLRCommonTreeNodeStream *) newANTLRCommonTreeNodeStream:(ANTLRCommonTree *)theTree
-{
-    return [[ANTLRCommonTreeNodeStream alloc] initWithTree:theTree];
-}
-
-+ (ANTLRCommonTreeNodeStream *) newANTLRCommonTreeNodeStream:(id<ANTLRTreeAdaptor>)anAdaptor Tree:(ANTLRCommonTree *)theTree
-{
-    return [[ANTLRCommonTreeNodeStream alloc] initWithTreeAdaptor:anAdaptor Tree:theTree];
-}
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree
-{
-    if ((self = [super init]) != nil ) {
-        adaptor = [[ANTLRCommonTreeAdaptor newTreeAdaptor] retain];
-        root = [theTree retain];
-        navigationNodeEOF = [[adaptor createTree:ANTLRTokenTypeEOF Text:@"EOF"] retain]; // set EOF
-        it = [[ANTLRTreeIterator newANTRLTreeIteratorWithAdaptor:adaptor andTree:root] retain];
-        calls = [[ANTLRIntArray newArrayWithLen:INITIAL_CALL_STACK_SIZE] retain];
-        /** Tree (nil A B C) trees like flat A B C streams */
-        hasNilRoot = NO;
-        level = 0;
-    }
-    return self;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor Tree:(ANTLRCommonTree *)theTree
-{
-    if ((self = [super init]) != nil ) {
-        adaptor = [anAdaptor retain];
-        root = [theTree retain];
-        navigationNodeEOF = [[adaptor createTree:ANTLRTokenTypeEOF Text:@"EOF"] retain]; // set EOF
-        //    it = [root objectEnumerator];
-        it = [[ANTLRTreeIterator newANTRLTreeIteratorWithAdaptor:adaptor andTree:root] retain];
-        calls = [[ANTLRIntArray newArrayWithLen:INITIAL_CALL_STACK_SIZE] retain];
-        /** Tree (nil A B C) trees like flat A B C streams */
-        hasNilRoot = NO;
-        level = 0;
-    }
-    //    eof = [self isEOF]; // make sure tree iterator returns the EOF we want
-    return self;
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRCommonTreeNodeStream" );
-#endif
-    if ( root ) [root release];
-    if ( tokens ) [tokens release];
-    if ( adaptor ) [adaptor release];
-    if ( it ) [it release];
-    if ( calls ) [calls release];    
-    [super dealloc];
-}
-
-- (void) reset
-{
-    [super reset];
-    [it reset];
-    hasNilRoot = false;
-    level = 0;
-    if ( calls != nil )
-        [calls reset];  // [calls clear]; // in Java
-}
-
-/** Pull elements from tree iterator.  Track tree level 0..max_level.
- *  If nil rooted tree, don't give initial nil and DOWN nor final UP.
- */
-- (id) nextElement
-{
-    id t = [it nextObject];
-    //System.out.println("pulled "+adaptor.getType(t));
-    if ( t == [it up] ) {
-        level--;
-        if ( level==0 && hasNilRoot ) return [it nextObject]; // don't give last UP; get EOF
-    }
-    else if ( t == [it down] )
-        level++;
-    if ( level == 0 && [adaptor isNil:t] ) { // if nil root, scarf nil, DOWN
-        hasNilRoot = true;
-        t = [it nextObject]; // t is now DOWN, so get first real node next
-        level++;
-        t = [it nextObject];
-    }
-    return t;
-}
-
-- (BOOL) isEOF:(id<ANTLRBaseTree>) aTree
-{
-    return [adaptor getType:(ANTLRCommonTree *)aTree] == ANTLRTokenTypeEOF;
-}
-
-- (void) setUniqueNavigationNodes:(BOOL) uniqueNavigationNodes
-{
-}
-
-- (id) getTreeSource
-{
-    return root;
-}
-
-- (NSString *) getSourceName
-{
-    return [[self getTokenStream] getSourceName];
-}
-
-- (id<ANTLRTokenStream>) getTokenStream
-{
-    return tokens;
-}
-
-- (void) setTokenStream:(id<ANTLRTokenStream>)theTokens
-{
-    if ( tokens != theTokens ) {
-        if ( tokens ) [tokens release];
-        [theTokens retain];
-    }
-    tokens = theTokens;
-}
-
-- (ANTLRCommonTreeAdaptor *) getTreeAdaptor
-{
-    return adaptor;
-}
-
-- (void) setTreeAdaptor:(ANTLRCommonTreeAdaptor *) anAdaptor
-{
-    if ( adaptor != anAdaptor ) {
-        if ( adaptor ) [adaptor release];
-        [anAdaptor retain];
-    }
-    adaptor = anAdaptor;
-}
-
-- (ANTLRCommonTree *)getNode:(NSInteger) i
-{
-    @throw [ANTLRRuntimeException newException:@"Absolute node indexes are meaningless in an unbuffered stream"];
-    return nil;
-}
-
-- (NSInteger) LA:(NSInteger) i
-{
-    return [adaptor getType:[self LT:i]];
-}
-
-/** Make stream jump to a new location, saving old location.
- *  Switch back with pop().
- */
-- (void) push:(NSInteger) anIndex
-{
-    if ( calls == nil ) {
-        calls = [[ANTLRIntArray newArrayWithLen:INITIAL_CALL_STACK_SIZE] retain];
-    }
-    [calls push:p]; // save current anIndex
-    [self seek:anIndex];
-}
-
-/** Seek back to previous anIndex saved during last push() call.
- *  Return top of stack (return anIndex).
- */
-- (NSInteger) pop
-{
-    int ret = [calls pop];
-    [self seek:ret];
-    return ret;
-}    
-
-// TREE REWRITE INTERFACE
-
-- (void) replaceChildren:(id) parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) aTree
-{
-    if ( parent != nil ) {
-        [adaptor replaceChildren:parent From:startChildIndex To:stopChildIndex With:aTree];
-    }
-}
-
-- (NSString *) toStringFromNode:(id<ANTLRBaseTree>)startNode ToNode:(id<ANTLRBaseTree>)stopNode
-{
-    // we'll have to walk from start to stop in tree; we're not keeping
-    // a complete node stream buffer
-    return @"n/a";
-}
-
-/** For debugging; destructive: moves tree iterator to end. */
-- (NSString *) toTokenTypeString
-{
-    [self reset];
-    NSMutableString *buf = [NSMutableString stringWithCapacity:5];
-    id obj = [self LT:1];
-    NSInteger type = [adaptor getType:obj];
-    while ( type != ANTLRTokenTypeEOF ) {
-        [buf appendString:@" "];
-        [buf appendString:[NSString stringWithFormat:@"%d", type]];
-        [self consume];
-        obj = [self LT:1];
-        type = [adaptor getType:obj];
-    }
-    return buf;
-}
-
-@synthesize it;
-@synthesize calls;
-@synthesize hasNilRoot;
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDFA.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRDFA.h
deleted file mode 100644
index 9adedb1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDFA.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRNoViableAltException.h"
-
-@interface ANTLRDFA : NSObject {
-	// the tables are set by subclasses to their own static versions.
-	const int *eot;
-	const int *eof;
-	const unichar *min;
-	const unichar *max;
-	const int *accept;
-	const int *special;
-	const int **transition;
-	
-	__strong ANTLRBaseRecognizer *recognizer;
-	NSInteger decisionNumber;
-    NSInteger len;
-}
-
-- (id) initWithRecognizer:(id) theRecognizer;
-// simulate the DFA using the static tables and predict an alternative
-- (NSInteger) predict:(id<ANTLRCharStream>)anInput;
-- (void) noViableAlt:(NSInteger)state Stream:(id<ANTLRIntStream>)anInput;
-
-- (NSInteger) specialStateTransition:(NSInteger)state Stream:(id<ANTLRIntStream>)anInput;
-// - (NSInteger) specialStateTransition:(NSInteger) state;
-//- (unichar) specialTransition:(unichar) state symbol:(NSInteger) symbol;
-
-// hook for debugger support
-- (void) error:(ANTLRNoViableAltException *)nvae;
-
-- (NSString *) description;
-- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment;
-
-+ (void) setIsEmittingDebugInfo:(BOOL) shouldEmitDebugInfo;
-
-- (NSInteger)getDecision;
-- (void)setDecision:(NSInteger)aDecison;
-
-- (ANTLRBaseRecognizer *)getRecognizer;
-- (void)setRecognizer:(ANTLRBaseRecognizer *)aRecognizer;
-- (NSInteger)length;
-
-@property const int *eot;
-@property const int *eof;
-@property const unichar *min;
-@property const unichar *max;
-@property const int *accept;
-@property const int *special;
-@property const int **transition;
-
-@property (retain, getter=getRecognizer,setter=setRecognizer:) ANTLRBaseRecognizer *recognizer;
-@property (assign, getter=getDecision,setter=setDecision:) NSInteger decisionNumber;
-@property (assign, getter=getLen,setter=setLen:) NSInteger len;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDFA.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRDFA.m
deleted file mode 100644
index b42daf5..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDFA.m
+++ /dev/null
@@ -1,262 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRDFA.h"
-#import <ANTLRToken.h>
-#import <ANTLRNoViableAltException.h>
-
-NSInteger debug = 0;
-
-@implementation ANTLRDFA
-@synthesize recognizer;
-@synthesize decisionNumber;
-@synthesize len;
-
-- (id) initWithRecognizer:(ANTLRBaseRecognizer *) theRecognizer
-{
-	if ((self = [super init]) != nil) {
-		recognizer = theRecognizer;
-        [recognizer retain];
-        debug = 0;
-	}
-	return self;
-}
-
-// using the tables ANTLR generates for the DFA based prediction this method simulates the DFA
-// and returns the prediction of the alternative to be used.
-- (NSInteger) predict:(id<ANTLRIntStream>)input
-{
-    if ( debug > 2 ) {
-        NSLog(@"Enter DFA.predict for decision %d", decisionNumber);
-    }
-	int aMark = [input mark];
-	int s = 0;
-	@try {
-		while (YES) {
-			if ( debug > 2 )
-                NSLog(@"DFA %d state %d LA(1)='%c'(%x)", decisionNumber, s, (unichar)[input LA:1], [input LA:1]);
-			NSInteger specialState = special[s];
-			if (specialState >= 0) {
-				// this state is special in that it has some code associated with it. we cannot do this in a pure DFA so
-				// we signal the caller accordingly.
-				if ( debug > 2 ) {
-                    NSLog(@"DFA %d state %d is special state %d", decisionNumber, s, specialState);
-                }
-				s = [self specialStateTransition:specialState Stream:input];
-                if ( debug > 2 ) {
-                    NSLog(@"DFA %d returns from special state %d to %d", decisionNumber, specialState, s);
-                }
-                if (s == -1 ) {
-                    [self noViableAlt:s Stream:input];
-                    return 0;
-                }
-				[input consume];
-				continue;
-			}
-			if (accept[s] >= 1) {  // if this is an accepting state return the prediction
-				if ( debug > 2 ) NSLog(@"accept; predict %d from state %d", accept[s], s);
-				return accept[s];
-			}
-			// based on the lookahead lookup the next transition, consume and do transition
-			// or signal that we have no viable alternative
-			int c = [input LA:1];
-			if ( (unichar)c >= min[s] && (unichar)c <= max[s]) {
-				int snext = transition[s][c-min[s]];
-				if (snext < 0) {
-                    // was in range but not a normal transition
-                    // must check EOT, which is like the else clause.
-                    // eot[s]>=0 indicates that an EOT edge goes to another
-                    // state.
-					if (eot[s] >= 0) {
-						if ( debug > 2 ) NSLog(@"EOT transition");
-						s = eot[s];
-						[input consume];
-                        // TODO: I had this as return accept[eot[s]]
-                        // which assumed here that the EOT edge always
-                        // went to an accept...faster to do this, but
-                        // what about predicated edges coming from EOT
-                        // target?
-						continue;
-					}
-					[self noViableAlt:s Stream:input];
-					return 0;
-				}
-				s = snext;
-				[input consume];
-				continue;
-			}
-			
-			if (eot[s] >= 0) {// EOT transition? we may still accept the input in the next state
-				if ( debug > 2 ) NSLog(@"EOT transition");
-				s = eot[s];
-				[input consume];
-				continue;
-			}
-			if ( c == ANTLRTokenTypeEOF && eof[s] >= 0) {  // we are at EOF and may even accept the input.
-				if ( debug > 2 ) NSLog(@"accept via EOF; predict %d from %d", accept[eof[s]], eof[s]);
-				return accept[eof[s]];
-			}
-			if ( debug > 2 ) {
-                NSLog(@"no viable alt!\n");
-                NSLog(@"min[%d] = %d\n", s, min[s]);
-                NSLog(@"max[%d] = %d\n", s, min[s]);
-                NSLog(@"eot[%d] = %d\n", s, min[s]);
-                NSLog(@"eof[%d] = %d\n", s, min[s]);
-                for (NSInteger p = 0; p < self.len; p++) {
-                    NSLog(@"%d ", transition[s][p]);
-                }
-                NSLog(@"\n");
-            }
-			[self noViableAlt:s Stream:input];
-            return 0;
-		}
-	}
-	@finally {
-		[input rewind:aMark];
-	}
-	return 0; // silence warning
-}
-
-- (void) noViableAlt:(NSInteger)state Stream:(id<ANTLRIntStream>)anInput
-{
-	if ([recognizer.state isBacktracking]) {
-		[recognizer.state setFailed:YES];
-		return;
-	}
-	ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:decisionNumber state:state stream:anInput];
-	[self error:nvae];
-	@throw nvae;
-}
-
-- (NSInteger) specialStateTransition:(NSInteger)state Stream:(id<ANTLRIntStream>)anInput
-{
-    @throw [ANTLRNoViableAltException newException:-1 state:state stream:anInput];
-	return -1;
-}
-
-- (void) error:(ANTLRNoViableAltException *)nvae
-{
-	// empty, hook for debugger support
-}
-
-- (NSString *) description
-{
-	return @"subclass responsibility";
-}
-
-- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment
-{
-	return [recognizer evaluateSyntacticPredicate:synpredFragment];
-}
-
-+ (void) setIsEmittingDebugInfo:(BOOL) shouldEmitDebugInfo
-{
-	debug = shouldEmitDebugInfo;
-}
-
-/** Given a String that has a run-length-encoding of some unsigned shorts
- *  like "\1\2\3\9", convert to short[] {2,9,9,9}.  We do this to avoid
- *  static short[] which generates so much init code that the class won't
- *  compile. :(
- */
-- (short *) unpackEncodedString:(NSString *)encodedString
-{
-    // walk first to find how big it is.
-    int size = 0;
-    for (int i=0; i < [encodedString length]; i+=2) {
-        size += [encodedString characterAtIndex:i];
-    }
-    __strong short *data = (short *)calloc(size, sizeof(short));
-    int di = 0;
-    for (int i=0; i < [encodedString length]; i+=2) {
-        char n = [encodedString characterAtIndex:i];
-        char v = [encodedString characterAtIndex:i+1];
-        // add v n times to data
-        for (int j = 0; j < n; j++) {
-            data[di++] = v;
-        }
-    }
-    return data;
-}
-
-/** Hideous duplication of code, but I need different typed arrays out :( */
-- (char *) unpackEncodedStringToUnsignedChars:(NSString *)encodedString
-{
-    // walk first to find how big it is.
-    int size = 0;
-    for (int i=0; i < [encodedString length]; i+=2) {
-        size += [encodedString characterAtIndex:i];
-    }
-    __strong short *data = (short *)calloc(size, sizeof(short));
-    int di = 0;
-    for (int i=0; i < [encodedString length]; i+=2) {
-        char n = [encodedString characterAtIndex:i];
-        char v = [encodedString characterAtIndex:i+1];
-        // add v n times to data
-        for (int j = 0; j < n; j++) {
-            data[di++] = v;
-        }
-    }
-    return (char *)data;
-}
-
-- (NSInteger)getDecision
-{
-    return decisionNumber;
-}
-
-- (void)setDecision:(NSInteger)aDecison
-{
-    decisionNumber = aDecison;
-}
-
-- (ANTLRBaseRecognizer *)getRecognizer
-{
-    return recognizer;
-}
-
-- (void)setRecognizer:(ANTLRBaseRecognizer *)aRecognizer
-{
-    if ( recognizer != aRecognizer ) {
-        if ( recognizer ) [recognizer release];
-        [aRecognizer retain];
-    }
-    recognizer = aRecognizer;
-}
-
-- (NSInteger)length
-{
-    return len;
-}
-
-@synthesize eot;
-@synthesize eof;
-@synthesize min;
-@synthesize max;
-@synthesize accept;
-@synthesize special;
-@synthesize transition;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebug.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebug.h
deleted file mode 100644
index 87383c9..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebug.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRDebugEventListener.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugParser.h"
-#import "ANTLRDebugTokenStream.h"
-#import "ANTLRDebugTreeParser.h"
-#import "ANTLRDebugTreeNodeStream.h"
-#import "ANTLRDebugTreeAdaptor.h"
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugEventListener.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugEventListener.h
deleted file mode 100644
index c2bee6c..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugEventListener.h
+++ /dev/null
@@ -1,275 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRDebugEventListener 
-
-#define ANTLRDebugProtocolVersion 1
-
-/** The parser has just entered a rule.  No decision has been made about
-*  which alt is predicted.  This is fired AFTER init actions have been
-*  executed.  Attributes are defined and available etc...
-*/
-- (void) enterRule:(NSString *)ruleName;
-
-/** Because rules can have lots of alternatives, it is very useful to
-*  know which alt you are entering.  This is 1..n for n alts.
-*/
-- (void) enterAlt:(NSInteger)alt;
-
-/** This is the last thing executed before leaving a rule.  It is
-*  executed even if an exception is thrown.  This is triggered after
-*  error reporting and recovery have occurred (unless the exception is
-											   *  not caught in this rule).  This implies an "exitAlt" event.
-*/
-- (void) exitRule:(NSString *)ruleName;
-
-/** Track entry into any (...) subrule other EBNF construct */
-- (void) enterSubRule:(NSInteger)decisionNumber;
-
-- (void) exitSubRule:(NSInteger)decisionNumber;
-
-/** Every decision, fixed k or arbitrary, has an enter/exit event
-*  so that a GUI can easily track what LT/consume events are
-*  associated with prediction.  You will see a single enter/exit
-*  subrule but multiple enter/exit decision events, one for each
-*  loop iteration.
-*/
-- (void) enterDecision:(NSInteger)decisionNumber;
-
-- (void) exitDecision:(NSInteger)decisionNumber;
-
-/** An input token was consumed; matched by any kind of element.
-*  Trigger after the token was matched by things like match(), matchAny().
-*/
-- (void) consumeToken:(id<ANTLRToken>)t;
-
-/** An off-channel input token was consumed.
-*  Trigger after the token was matched by things like match(), matchAny().
-*  (unless of course the hidden token is first stuff in the input stream).
-*/
-- (void) consumeHiddenToken:(id<ANTLRToken>)t;
-
-/** Somebody (anybody) looked ahead.  Note that this actually gets
-*  triggered by both LA and LT calls.  The debugger will want to know
-*  which Token object was examined.  Like consumeToken, this indicates
-*  what token was seen at that depth.  A remote debugger cannot look
-*  ahead into a file it doesn't have so LT events must pass the token
-*  even if the info is redundant.
-*/
-- (void) LT:(NSInteger)i foundToken:(id<ANTLRToken>)t;
-
-/** The parser is going to look arbitrarily ahead; mark this location,
-*  the token stream's marker is sent in case you need it.
-*/
-- (void) mark:(NSInteger)marker;
-
-/** After an arbitrairly long lookahead as with a cyclic DFA (or with
-*  any backtrack), this informs the debugger that stream should be
-*  rewound to the position associated with marker.
-*/
-- (void) rewind:(NSInteger)marker;
-
-/** Rewind to the input position of the last marker.
-*  Used currently only after a cyclic DFA and just
-*  before starting a sem/syn predicate to get the
-*  input position back to the start of the decision.
-*  Do not "pop" the marker off the state.  mark(i)
-*  and rewind(i) should balance still.
-*/
-- (void) rewind;
-
-- (void) beginBacktrack:(NSInteger)level;
-
-- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful;
-
-/** To watch a parser move through the grammar, the parser needs to
-*  inform the debugger what line/charPos it is passing in the grammar.
-*  For now, this does not know how to switch from one grammar to the
-*  other and back for island grammars etc...
-*
-*  This should also allow breakpoints because the debugger can stop
-*  the parser whenever it hits this line/pos.
-*/
-- (void) locationLine:(NSInteger)line column:(NSInteger)pos;
-
-/** A recognition exception occurred such as NoViableAltException.  I made
-*  this a generic event so that I can alter the exception hierachy later
-*  without having to alter all the debug objects.
-*
-*  Upon error, the stack of enter rule/subrule must be properly unwound.
-*  If no viable alt occurs it is within an enter/exit decision, which
-*  also must be rewound.  Even the rewind for each mark must be unwount.
-*  In the Java target this is pretty easy using try/finally, if a bit
-*  ugly in the generated code.  The rewind is generated in DFA.predict()
-*  actually so no code needs to be generated for that.  For languages
-*  w/o this "finally" feature (C++?), the target implementor will have
-*  to build an event stack or something.
-*
-*  Across a socket for remote debugging, only the RecognitionException
-*  data fields are transmitted.  The token object or whatever that
-*  caused the problem was the last object referenced by LT.  The
-*  immediately preceding LT event should hold the unexpected Token or
-*  char.
-*
-*  Here is a sample event trace for grammar:
-*
-*  b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
-*    | D
-*    ;
-*
-*  The sequence for this rule (with no viable alt in the subrule) for
-*  input 'c c' (there are 3 tokens) is:
-*
-*		commence
-*		LT(1)
-*		enterRule b
-*		location 7 1
-*		enter decision 3
-*		LT(1)
-*		exit decision 3
-*		enterAlt1
-*		location 7 5
-*		LT(1)
-*		consumeToken [c/<4>,1:0]
-*		location 7 7
-*		enterSubRule 2
-*		enter decision 2
-*		LT(1)
-*		LT(1)
-*		recognitionException NoViableAltException 2 1 2
-*		exit decision 2
-*		exitSubRule 2
-*		beginResync
-*		LT(1)
-*		consumeToken [c/<4>,1:1]
-*		LT(1)
-*		endResync
-*		LT(-1)
-*		exitRule b
-*		terminate
-*/
-- (void) recognitionException:(ANTLRRecognitionException *)e;
-
-/** Indicates the recognizer is about to consume tokens to resynchronize
-*  the parser.  Any consume events from here until the recovered event
-*  are not part of the parse--they are dead tokens.
-*/
-- (void) beginResync;
-
-/** Indicates that the recognizer has finished consuming tokens in order
-*  to resychronize.  There may be multiple beginResync/endResync pairs
-*  before the recognizer comes out of errorRecovery mode (in which
-*  multiple errors are suppressed).  This will be useful
-*  in a gui where you want to probably grey out tokens that are consumed
-*  but not matched to anything in grammar.  Anything between
-*  a beginResync/endResync pair was tossed out by the parser.
-*/
-- (void) endResync;
-
-/** A semantic predicate was evaluate with this result and action text */
-- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result;
-
-/** Announce that parsing has begun.  Not technically useful except for
-*  sending events over a socket.  A GUI for example will launch a thread
-*  to connect and communicate with a remote parser.  The thread will want
-*  to notify the GUI when a connection is made.  ANTLR parsers
-*  trigger this upon entry to the first rule (the ruleLevel is used to
-*  figure this out).
-*/
-- (void) commence;
-
-/** Parsing is over; successfully or not.  Mostly useful for telling
-*  remote debugging listeners that it's time to quit.  When the rule
-*  invocation level goes to zero at the end of a rule, we are done
-*  parsing.
-*/
-- (void) terminate;
-
-
-// T r e e  P a r s i n g
-
-/** Input for a tree parser is an AST, but we know nothing for sure
-*  about a node except its type and text (obtained from the adaptor).
-*  This is the analog of the consumeToken method.  Again, the ID is
-*  the hashCode usually of the node so it only works if hashCode is
-*  not implemented.  If the type is UP or DOWN, then
-*  the ID is not really meaningful as it's fixed--there is
-*  just one UP node and one DOWN navigation node.
-*/
-- (void) consumeNode:(NSInteger)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-/** The tree parser lookedahead.  If the type is UP or DOWN,
-*  then the ID is not really meaningful as it's fixed--there is
-*  just one UP node and one DOWN navigation node.
-*/
-- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-
-// A S T  E v e n t s
-
-/** A nil was created (even nil nodes have a unique ID...
-*  they are not "null" per se).  As of 4/28/2006, this
-*  seems to be uniquely triggered when starting a new subtree
-*  such as when entering a subrule in automatic mode and when
-*  building a tree in rewrite mode.
-*/
-- (void) createNilNode:(unsigned)hash;
-
-/** Announce a new node built from text */
-- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type;
-
-/** Announce a new node built from an existing token */
-- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex;
-
-/** Make a node the new root of an existing root.  See
-*
-*  Note: the newRootID parameter is possibly different
-*  than the TreeAdaptor.becomeRoot() newRoot parameter.
-*  In our case, it will always be the result of calling
-*  TreeAdaptor.becomeRoot() and not root_n or whatever.
-*
-*  The listener should assume that this event occurs
-*  only when the current subrule (or rule) subtree is
-*  being reset to newRootID.
-*
-*/
-- (void) makeNode:(unsigned)newRootHash parentOf:(unsigned)oldRootHash;
-
-/** Make childID a child of rootID.
-*  @see org.antlr.runtime.tree.TreeAdaptor.addChild()
-*/
-- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash;
-
-/** Set the token start/stop token index for a subtree root or node */
-- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSUInteger)tokenStartIndex To:(NSUInteger)tokenStopIndex;
-
-- (void) waitForDebuggerConnection;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugEventProxy.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugEventProxy.h
deleted file mode 100644
index af8cd33..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugEventProxy.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRDebugEventListener.h"
-#import <sys/socket.h>
-#import <netinet/in.h>
-#import <netinet/tcp.h>
-#include <arpa/inet.h>
-
-// default port for ANTLRWorks
-#define DEFAULT_DEBUGGER_PORT 49001
-
-@interface ANTLRDebugEventProxy : NSObject <ANTLRDebugEventListener> {
-	int serverSocket;
-	
-	struct sockaddr debugger_sockaddr;
-	socklen_t debugger_socklen;
-	int debuggerSocket;
-	NSFileHandle *debuggerFH;
-	
-	NSString *grammarName;
-	int debuggerPort;
-}
-
-- (id) init;
-- (id) initWithGrammarName:(NSString *)aGrammarName debuggerPort:(NSInteger)aPort;
-- (void) waitForDebuggerConnection;
-- (void) waitForAck;
-- (void) sendToDebugger:(NSString *)message;
-- (void) sendToDebugger:(NSString *)message waitForResponse:(BOOL)wait;
-
-- (NSInteger) serverSocket;
-- (void) setServerSocket: (NSInteger) aServerSocket;
-
-- (NSInteger) debuggerSocket;
-- (void) setDebuggerSocket: (NSInteger) aDebuggerSocket;
-
-- (NSString *) grammarName;
-- (void) setGrammarName: (NSString *) aGrammarName;
-
-- (NSInteger) debuggerPort;
-- (void) setDebuggerPort: (NSInteger) aDebuggerPort;
-
-- (NSString *) escapeNewlines:(NSString *)aString;
-
-#pragma mark -
-
-#pragma mark DebugEventListener Protocol
-- (void) enterRule:(NSString *)ruleName;
-- (void) enterAlt:(NSInteger)alt;
-- (void) exitRule:(NSString *)ruleName;
-- (void) enterSubRule:(NSInteger)decisionNumber;
-- (void) exitSubRule:(NSInteger)decisionNumber;
-- (void) enterDecision:(NSInteger)decisionNumber;
-- (void) exitDecision:(NSInteger)decisionNumber;
-- (void) consumeToken:(id<ANTLRToken>)t;
-- (void) consumeHiddenToken:(id<ANTLRToken>)t;
-- (void) LT:(NSInteger)i foundToken:(id<ANTLRToken>)t;
-- (void) mark:(NSInteger)marker;
-- (void) rewind:(NSInteger)marker;
-- (void) rewind;
-- (void) beginBacktrack:(NSInteger)level;
-- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful;
-- (void) locationLine:(NSInteger)line column:(NSInteger)pos;
-- (void) recognitionException:(ANTLRRecognitionException *)e;
-- (void) beginResync;
-- (void) endResync;
-- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result;
-- (void) commence;
-- (void) terminate;
-
-
-#pragma mark Tree Parsing
-- (void) consumeNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
-
-
-#pragma mark AST Events
-
-- (void) createNilNode:(unsigned)hash;
-- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type;
-- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex;
-- (void) makeNode:(unsigned)newRootHash parentOf:(unsigned)oldRootHash;
-- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash;
-- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSInteger)tokenStartIndex To:(NSInteger)tokenStopIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugEventProxy.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugEventProxy.m
deleted file mode 100644
index f68059a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugEventProxy.m
+++ /dev/null
@@ -1,370 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRToken+DebuggerSupport.h"
-#include <string.h>
-
-static NSData *newlineData = nil;
-static unsigned lengthOfUTF8Ack = 0;
-
-@implementation ANTLRDebugEventProxy
-
-+ (void) initialize
-{
-	if (!newlineData) newlineData = [@"\n" dataUsingEncoding:NSUTF8StringEncoding];
-	if (!lengthOfUTF8Ack) lengthOfUTF8Ack = [[@"ack\n" dataUsingEncoding:NSUTF8StringEncoding] length];
-}
-
-- (id) init
-{
-	return [self initWithGrammarName:nil debuggerPort:DEFAULT_DEBUGGER_PORT];
-}
-
-- (id) initWithGrammarName:(NSString *)aGrammarName debuggerPort:(NSInteger)aPort
-{
-	self = [super init];
-	if (self) {
-		serverSocket = -1;
-		[self setGrammarName:aGrammarName];
-		if (aPort == -1) aPort = DEFAULT_DEBUGGER_PORT;
-		[self setDebuggerPort:aPort];
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-	if (serverSocket != -1) 
-		shutdown(serverSocket,SHUT_RDWR);
-	serverSocket = -1;
-	[debuggerFH release];
-    [self setGrammarName:nil];
-    [super dealloc];
-}
-
-- (void) waitForDebuggerConnection
-{
-	if (serverSocket == -1) {
-		serverSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
-		
-		NSAssert1(serverSocket != -1, @"Failed to create debugger socket. %s", strerror(errno));
-		
-		int yes = 1;
-		setsockopt(serverSocket, SOL_SOCKET, SO_KEEPALIVE|SO_REUSEPORT|SO_REUSEADDR|TCP_NODELAY, (void *)&yes, sizeof(NSInteger));
-
-		struct sockaddr_in server_addr;
-		bzero(&server_addr, sizeof(struct sockaddr_in));
-		server_addr.sin_family = AF_INET;
-		server_addr.sin_port = htons([self debuggerPort]);
-		server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
-		NSAssert1( bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(struct sockaddr)) != -1, @"bind(2) failed. %s", strerror(errno));
-
-		NSAssert1(listen(serverSocket,50) == 0, @"listen(2) failed. %s", strerror(errno));
-		
-		NSLog(@"ANTLR waiting for debugger attach (grammar %@)", [self grammarName]);
-		
-		debuggerSocket = accept(serverSocket, &debugger_sockaddr, &debugger_socklen);
-		NSAssert1( debuggerSocket != -1, @"accept(2) failed. %s", strerror(errno));
-		
-		debuggerFH = [[NSFileHandle alloc] initWithFileDescriptor:debuggerSocket];
-		[self sendToDebugger:[NSString stringWithFormat:@"ANTLR %d", ANTLRDebugProtocolVersion] waitForResponse:NO];
-		[self sendToDebugger:[NSString stringWithFormat:@"grammar \"%@", [self grammarName]] waitForResponse:NO];
-	}
-}
-
-- (void) waitForAck
-{
-	NSString *response;
-	@try {
-		NSData *newLine = [debuggerFH readDataOfLength:lengthOfUTF8Ack];
-		response = [[NSString alloc] initWithData:newLine encoding:NSUTF8StringEncoding];
-		if (![response isEqualToString:@"ack\n"]) @throw [NSException exceptionWithName:@"ANTLRDebugEventProxy" reason:@"illegal response from debugger" userInfo:nil];
-	}
-	@catch (NSException *e) {
-		NSLog(@"socket died or debugger misbehaved: %@ read <%@>", e, response);
-	}
-	@finally {
-		[response release];
-	}
-}
-
-- (void) sendToDebugger:(NSString *)message
-{
-	[self sendToDebugger:message waitForResponse:YES];
-}
-
-- (void) sendToDebugger:(NSString *)message waitForResponse:(BOOL)wait
-{
-	if (! debuggerFH ) return;
-	[debuggerFH writeData:[message dataUsingEncoding:NSUTF8StringEncoding]];
-	[debuggerFH writeData:newlineData];
-	if (wait) [self waitForAck];
-}
-
-- (NSInteger) serverSocket
-{
-    return serverSocket;
-}
-
-- (void) setServerSocket: (NSInteger) aServerSocket
-{
-    serverSocket = aServerSocket;
-}
-
-- (NSInteger) debuggerSocket
-{
-    return debuggerSocket;
-}
-
-- (void) setDebuggerSocket: (NSInteger) aDebuggerSocket
-{
-    debuggerSocket = aDebuggerSocket;
-}
-
-- (NSString *) grammarName
-{
-    return grammarName; 
-}
-
-- (void) setGrammarName: (NSString *) aGrammarName
-{
-    if (grammarName != aGrammarName) {
-        [aGrammarName retain];
-        [grammarName release];
-        grammarName = aGrammarName;
-    }
-}
-
-- (NSInteger) debuggerPort
-{
-    return debuggerPort;
-}
-
-- (void) setDebuggerPort: (NSInteger) aDebuggerPort
-{
-    debuggerPort = aDebuggerPort;
-}
-
-- (NSString *) escapeNewlines:(NSString *)aString
-{
-	NSMutableString *escapedText;
-	if (aString) {
-		escapedText = [NSMutableString stringWithString:aString];
-		NSRange wholeString = NSMakeRange(0,[escapedText length]);
-		[escapedText replaceOccurrencesOfString:@"%" withString:@"%25" options:0 range:wholeString];
-		[escapedText replaceOccurrencesOfString:@"\n" withString:@"%0A" options:0 range:wholeString];
-		[escapedText replaceOccurrencesOfString:@"\r" withString:@"%0D" options:0 range:wholeString];
-	} else {
-		escapedText = [NSMutableString stringWithString:@""];
-	}
-	return escapedText;
-}
-
-#pragma mark -
-
-#pragma mark DebugEventListener Protocol
-- (void) enterRule:(NSString *)ruleName
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"enterRule %@", ruleName]];
-}
-
-- (void) enterAlt:(NSInteger)alt
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"enterAlt %d", alt]]; 
-}
-
-- (void) exitRule:(NSString *)ruleName
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"exitRule %@", ruleName]];
-}
-
-- (void) enterSubRule:(NSInteger)decisionNumber
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"enterSubRule %d", decisionNumber]];
-}
-
-- (void) exitSubRule:(NSInteger)decisionNumber
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"exitSubRule %d", decisionNumber]];
-}
-
-- (void) enterDecision:(NSInteger)decisionNumber
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"enterDecision %d", decisionNumber]];
-}
-
-- (void) exitDecision:(NSInteger)decisionNumber
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"exitDecision %d", decisionNumber]];
-}
-
-- (void) consumeToken:(id<ANTLRToken>)t
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"consumeToken %@", [self escapeNewlines:[t description]]]];
-}
-
-- (void) consumeHiddenToken:(id<ANTLRToken>)t
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"consumeHiddenToken %@", [self escapeNewlines:[t description]]]];
-}
-
-- (void) LT:(NSInteger)i foundToken:(id<ANTLRToken>)t
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"LT %d %@", i, [self escapeNewlines:[t description]]]];
-}
-
-- (void) mark:(NSInteger)marker
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"mark %d", marker]];
-}
-- (void) rewind:(NSInteger)marker
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"rewind %d", marker]];
-}
-
-- (void) rewind
-{
-	[self sendToDebugger:@"rewind"];
-}
-
-- (void) beginBacktrack:(NSInteger)level
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"beginBacktrack %d", level]];
-}
-
-- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"endBacktrack %d %d", level, successful ? 1 : 0]];
-}
-
-- (void) locationLine:(NSInteger)line column:(NSInteger)pos
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"location %d %d", line, pos]];
-}
-
-- (void) recognitionException:(ANTLRRecognitionException *)e
-{
-#warning TODO: recognition exceptions
-	// these must use the names of the corresponding Java exception classes, because ANTLRWorks recreates the exception
-	// objects on the Java side.
-	// Write categories for Objective-C exceptions to provide those names
-}
-
-- (void) beginResync
-{
-	[self sendToDebugger:@"beginResync"];
-}
-	
-- (void) endResync
-{
-	[self sendToDebugger:@"endResync"];
-}
-
-- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"semanticPredicate %d %@", result?1:0, [self escapeNewlines:predicate]]];
-}
-
-- (void) commence
-{
-	// no need to send event
-}
-
-- (void) terminate
-{
-	[self sendToDebugger:@"terminate"];
-	@try {
-		[debuggerFH closeFile];
-	}
-	@finally {
-#warning TODO: make socket handling robust. too lazy now...
-		shutdown(serverSocket,SHUT_RDWR);
-		serverSocket = -1;
-	}
-}
-
-
-#pragma mark Tree Parsing
-- (void) consumeNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"consumeNode %u %d %@",
-		nodeHash,
-		type,
-		[self escapeNewlines:text]
-		]];
-}
-
-- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"LN %d %u %d %@",
-		i,
-		nodeHash,
-		type,
-		[self escapeNewlines:text]
-		]];
-}
-
-
-#pragma mark AST Events
-
-- (void) createNilNode:(unsigned)hash
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"nilNode %u", hash]];
-}
-
-- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"createNodeFromToken %u %d %@", 
-		hash,
-		type,
-		[self escapeNewlines:text]
-		]];
-}
-
-- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"createNode %u %d", hash, tokenIndex]];
-}
-
-- (void) becomeRoot:(unsigned)newRootHash old:(unsigned)oldRootHash
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"becomeRoot %u %u", newRootHash, oldRootHash]];
-}
-
-- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"addChild %u %u", treeHash, childHash]];
-}
-
-- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSInteger)tokenStartIndex To:(NSInteger)tokenStopIndex
-{
-	[self sendToDebugger:[NSString stringWithFormat:@"setTokenBoundaries %u %d %d", nodeHash, tokenStartIndex, tokenStopIndex]];
-}
-
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugParser.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugParser.h
deleted file mode 100644
index ed403ef..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugParser.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugTokenStream.h"
-
-@interface ANTLRDebugParser : ANTLRParser {
-	id<ANTLRDebugEventListener> debugListener;
-}
-
-+ (id) newDebugParser:(id<ANTLRTokenStream>)theStream
-        debugListener:(id<ANTLRDebugEventListener>)debugListener;
-
-+ (id) newDebugParser:(id<ANTLRTokenStream>)theStream
-                state:(ANTLRRecognizerSharedState *)state;
-
-+ (id) newDebugParser:(id<ANTLRTokenStream>)theStream
-        debugListener:(id<ANTLRDebugEventListener>)debugListener
-                state:(ANTLRRecognizerSharedState *)state;
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream;
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-			  debuggerPort:(NSInteger)portNumber;
-// designated initializer
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-			 debugListener:(id<ANTLRDebugEventListener>)theDebugListener
-			  debuggerPort:(NSInteger)portNumber;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugParser.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugParser.m
deleted file mode 100644
index f86a8ba..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugParser.m
+++ /dev/null
@@ -1,113 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRDebugParser.h"
-
-
-@implementation ANTLRDebugParser
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-{
-	return [self initWithTokenStream:theStream debugListener:nil debuggerPort:-1];
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-			  debuggerPort:(NSInteger)portNumber
-{
-	return [self initWithTokenStream:theStream debugListener:nil debuggerPort:portNumber];
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-			 debugListener:(id<ANTLRDebugEventListener>)theDebugListener
-			  debuggerPort:(NSInteger)portNumber
-{
-	id<ANTLRDebugEventListener,NSObject> debugger = nil;
-	id<ANTLRTokenStream> tokenStream = nil;
-	if (theDebugListener) {
-		debugger = [(id<ANTLRDebugEventListener,NSObject>)theDebugListener retain];
-		debugger = theDebugListener;
-	} else {
-		debugger = [[ANTLRDebugEventProxy alloc] initWithGrammarName:[self grammarFileName] debuggerPort:portNumber];
-	}
-	if (theStream && ![theStream isKindOfClass:[ANTLRDebugTokenStream class]]) {
-		tokenStream = [[ANTLRDebugTokenStream alloc] initWithTokenStream:theStream debugListener:debugger];
-	} else {
-		tokenStream = [theStream retain];
-		tokenStream = theStream;
-	}
-	self = [super initWithTokenStream:tokenStream];
-	if (self) {
-		[self setDebugListener:debugger];
-		[debugger release];
-		[tokenStream release];
-		[debugListener waitForDebuggerConnection];
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-    [self setDebugListener: nil];
-    [super dealloc];
-}
-
-- (id<ANTLRDebugEventListener>) debugListener
-{
-    return debugListener; 
-}
-
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener
-{
-    if (debugListener != aDebugListener) {
-        [(id<ANTLRDebugEventListener,NSObject>)aDebugListener retain];
-        [(id<ANTLRDebugEventListener,NSObject>)debugListener release];
-        debugListener = aDebugListener;
-    }
-}
-
-#pragma mark -
-#pragma mark Overrides
-
-- (void) beginResync
-{
-	[debugListener beginResync];
-}
-
-- (void) endResync
-{
-	[debugListener endResync];
-}
-- (void)beginBacktracking:(NSInteger)level
-{
-	[debugListener beginBacktrack:level];
-}
-
-- (void)endBacktracking:(NSInteger)level wasSuccessful:(BOOL)successful
-{
-	[debugListener endBacktrack:level wasSuccessful:successful];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTokenStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTokenStream.h
deleted file mode 100644
index 9f6c438..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRDebugTokenStream.h"
-#import "ANTLRDebugEventListener.h"
-
-@interface ANTLRDebugTokenStream : NSObject <ANTLRTokenStream>
-{
-	id<ANTLRDebugEventListener> debugListener;
-	id<ANTLRTokenStream> input;
-	BOOL initialStreamState;
-    NSInteger lastMarker;
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream debugListener:(id<ANTLRDebugEventListener>)debugger;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (id<ANTLRTokenStream>) input;
-- (void) setInput:(id<ANTLRTokenStream>)aTokenStream;
-
-- (void) consume;
-- (id<ANTLRToken>) getToken:(NSInteger)index;
-- (NSInteger) getIndex;
-- (void) release:(NSInteger)marker;
-- (void) seek:(NSInteger)index;
-- (NSInteger) size;
-- (id<ANTLRTokenSource>) getTokenSource;
-- (NSString *) getSourceName;
-- (NSString *) toString;
-- (NSString *) toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTokenStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTokenStream.m
deleted file mode 100644
index 8a294c9..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTokenStream.m
+++ /dev/null
@@ -1,204 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRDebugTokenStream.h"
-
-
-@implementation ANTLRDebugTokenStream
-
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream debugListener:(id<ANTLRDebugEventListener>)debugger
-{
-	self = [super init];
-	if (self) {
-		[self setDebugListener:debugger];
-		[self setInput:theStream];
-		[self.input LT:1];	// force reading first on-channel token
-		initialStreamState = YES;
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-    [self setDebugListener:nil];
-    self.input = nil;
-    [super dealloc];
-}
-
-
-- (id<ANTLRDebugEventListener>) debugListener
-{
-    return debugListener; 
-}
-
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener
-{
-    if (debugListener != aDebugListener) {
-        [(id<ANTLRDebugEventListener,NSObject>)aDebugListener retain];
-        [(id<ANTLRDebugEventListener,NSObject>)debugListener release];
-        debugListener = aDebugListener;
-    }
-}
-
-- (id<ANTLRTokenStream>) input
-{
-    return input; 
-}
-
-- (void) setInput: (id<ANTLRTokenStream>) aTokenStream
-{
-    if (input != aTokenStream) {
-        if ( input ) [input release];
-        input = aTokenStream;
-        [input retain];
-    }
-}
-
-- (void) consumeInitialHiddenTokens
-{
-	int firstIdx = input.index;
-	for (int i = 0; i<firstIdx; i++)
-		[debugListener consumeHiddenToken:[input getToken:i]];
-	initialStreamState = NO;
-}
-
-#pragma mark -
-#pragma mark Proxy implementation
-
-// anything else that hasn't some debugger event assicioated with it, is simply
-// forwarded to the actual token stream
-- (void) forwardInvocation:(NSInvocation *)anInvocation
-{
-	[anInvocation invokeWithTarget:self.input];
-}
-
-- (void) consume
-{
-	if ( initialStreamState )
-		[self consumeInitialHiddenTokens];
-	int a = input.index;
-	id<ANTLRToken> token = [input LT:1];
-	[input consume];
-	int b = input.index;
-	[debugListener consumeToken:token];
-	if (b > a+1) // must have consumed hidden tokens
-		for (int i = a+1; i < b; i++)
-			[debugListener consumeHiddenToken:[input getToken:i]];
-}
-
-- (NSInteger) mark
-{
-	lastMarker = [input mark];
-	[debugListener mark:lastMarker];
-	return lastMarker;
-}
-
-- (void) rewind
-{
-	[debugListener rewind];
-	[input rewind];
-}
-
-- (void) rewind:(NSInteger)marker
-{
-	[debugListener rewind:marker];
-	[input rewind:marker];
-}
-
-- (id<ANTLRToken>) LT:(NSInteger)k
-{
-	if ( initialStreamState )
-		[self consumeInitialHiddenTokens];
-	[debugListener LT:k foundToken:[input LT:k]];
-	return [input LT:k];
-}
-
-- (NSInteger) LA:(NSInteger)k
-{
-	if ( initialStreamState )
-		[self consumeInitialHiddenTokens];
-	[debugListener LT:k foundToken:[input LT:k]];
-	return [input LA:k];
-}
-
-- (id<ANTLRToken>) getToken:(NSInteger)i
-{
-    return [input getToken:i];
-}
-
-- (NSInteger) getIndex
-{
-    return input.index;
-}
-
-- (void) release:(NSInteger) marker
-{
-}
-
-- (void) seek:(NSInteger)index
-{
-    // TODO: implement seek in dbg interface
-    // db.seek(index);
-    [input seek:index];
-}
-
-- (NSInteger) size
-{
-    return [input size];
-}
-
-- (id<ANTLRTokenSource>) getTokenSource
-{
-    return [input getTokenSource];
-}
-
-- (NSString *) getSourceName
-{
-    return [[input getTokenSource] getSourceName];
-}
-
-- (NSString *) description
-{
-    return [input toString];
-}
-
-- (NSString *) toString
-{
-    return [input toString];
-}
-
-- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex
-{
-    return [input toStringFromStart:startIndex ToEnd:stopIndex];
-}
-
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken
-{
-    return [input toStringFromStart:[startToken getStart] ToEnd:[stopToken getStopToken]];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeAdaptor.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeAdaptor.h
deleted file mode 100644
index f8dd07e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeAdaptor.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRParser.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRDebugEventListener.h"
-
-@interface ANTLRDebugTreeAdaptor : ANTLRBaseTreeAdaptor {
-	id<ANTLRDebugEventListener> debugListener;
-	ANTLRCommonTreeAdaptor *treeAdaptor;
-}
-
-- (id) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)aTreeAdaptor debugListener:(id<ANTLRDebugEventListener>)aDebugListener;
-
-- (id<ANTLRDebugEventListener>)debugListener;
-- (void) setDebugListener:(id<ANTLRDebugEventListener>)aDebugListener;
-
-- (ANTLRCommonTreeAdaptor *) getTreeAdaptor;
-- (void) setTreeAdaptor:(ANTLRCommonTreeAdaptor *)aTreeAdaptor;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeAdaptor.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeAdaptor.m
deleted file mode 100644
index 01c4c6e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeAdaptor.m
+++ /dev/null
@@ -1,229 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRDebugTreeAdaptor.h"
-
-
-@implementation ANTLRDebugTreeAdaptor
-
-
-- (id) initWithTreeAdaptor:(ANTLRCommonTreeAdaptor *)aTreeAdaptor debugListener:(id<ANTLRDebugEventListener>)aDebugListener
-{
-	self = [super init];
-	if (self) {
-		[self setDebugListener:aDebugListener];
-		[self setTreeAdaptor:aTreeAdaptor];
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-    [self setDebugListener: nil];
-    [self setTreeAdaptor: nil];
-    [super dealloc];
-}
-
-- (id<ANTLRDebugEventListener>) debugListener
-{
-    return debugListener; 
-}
-
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener
-{
-    if (debugListener != aDebugListener) {
-        [(id<ANTLRTreeAdaptor,NSObject>)aDebugListener retain];
-        [(id<ANTLRTreeAdaptor,NSObject>)debugListener release];
-        debugListener = aDebugListener;
-    }
-}
-
-- (ANTLRCommonTreeAdaptor *) getTreeAdaptor
-{
-    return treeAdaptor; 
-}
-
-- (void) setTreeAdaptor: (ANTLRCommonTreeAdaptor *) aTreeAdaptor
-{
-    if (treeAdaptor != aTreeAdaptor) {
-        [aTreeAdaptor retain];
-        [treeAdaptor release];
-        treeAdaptor = aTreeAdaptor;
-    }
-}
-
-#pragma mark -
-#pragma mark Proxy implementation
-
-// anything else that hasn't some debugger event assicioated with it, is simply
-// forwarded to the actual token stream
-- (void) forwardInvocation:(NSInvocation *)anInvocation
-{
-	[anInvocation invokeWithTarget:[self getTreeAdaptor]];
-}
-
-#pragma mark -
-
-#pragma mark Construction
-
-- (id<ANTLRBaseTree>) newANTLRTreeWithToken:(id<ANTLRToken>) payload
-{
-	id<ANTLRBaseTree> newTree = [ANTLRCommonTree newTreeWithToken:payload];
-	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] fromTokenAtIndex:[payload getTokenIndex]];
-	return newTree;
-}
-
-- (id<ANTLRBaseTree>) emptyTree
-{
-	id<ANTLRBaseTree> newTree = [treeAdaptor newEmptyTree];
-	[debugListener createNilNode:[treeAdaptor uniqueIdForTree:newTree]];
-	return newTree;
-}
-
-/*	We don't have debug events for those:
- - (id) copyNode:(id<ANTLRBaseTree>)aNode
-{
-}
-- (id) copyTree:(id<ANTLRBaseTree>)aTree
-{
-}
-*/
-
-- (void) addChild:(id<ANTLRBaseTree>)child toTree:(id<ANTLRBaseTree>)aTree
-{
-	[treeAdaptor addChild:child toTree:aTree];
-	[debugListener addChild:[treeAdaptor uniqueIdForTree:child] toTree:[self uniqueIdForTree:aTree]];
-}
-
-- (id<ANTLRBaseTree>) becomeRoot:(id<ANTLRBaseTree>)newRoot old:(id<ANTLRBaseTree>)oldRoot
-{
-	id<ANTLRBaseTree> newTree = [treeAdaptor becomeRoot:newRoot old:oldRoot];
-	[debugListener becomeRoot:[treeAdaptor uniqueIdForTree:newTree] old:[self uniqueIdForTree:oldRoot]];
-	return newTree;
-}
-
-/* handle by forwardInvocation: 
-- (NSUInteger) uniqueIdForTree:(id<ANTLRBaseTree>)aNode
-{
-}
-*/
-
-#pragma mark Rewrite Rules
-
- - (void) addTokenAsChild:(id<ANTLRToken>)child toTree:(id<ANTLRBaseTree>)aTree
-{
-	id<ANTLRBaseTree> newChild = [self newANTLRTreeWithToken:child];
-	[self addChild:newChild toTree:aTree];
-}
-
-- (id<ANTLRBaseTree>) makeToken:(id<ANTLRToken>)newRoot parentOf:(id<ANTLRBaseTree>)oldRoot
-{
-	id<ANTLRBaseTree> newNode = [self newANTLRTreeWithToken:newRoot];
-	return [self becomeRoot:newNode old:oldRoot];
-}
-
-- (id<ANTLRBaseTree>) newANTLRTreeWithTokenType:(NSInteger)tokenType
-{
-	id<ANTLRBaseTree> newTree = [treeAdaptor newANTLRTreeWithTokenType:tokenType];
-	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] text:nil type:tokenType];
-	return newTree;
-}
-
-- (id<ANTLRBaseTree>) newANTLRTreeWithTokenType:(NSInteger)tokenType text:(NSString *)tokenText
-{
-	id<ANTLRBaseTree> newTree = [treeAdaptor newANTLRTreeWithTokenType:tokenType text:tokenText];
-	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] text:tokenText type:tokenType];
-	return newTree;
-}
-- (id<ANTLRBaseTree>) newANTLRTreeWithToken:(id<ANTLRToken>)fromToken tokenType:(NSInteger)tokenType
-{
-	id<ANTLRBaseTree> newTree = [treeAdaptor newANTLRTreeWithToken:fromToken tokenType:tokenType];
-	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] text:fromToken.text type:tokenType];
-	return newTree;
-}
-
-- (id<ANTLRBaseTree>) newANTLRTreeWithToken:(id<ANTLRToken>)fromToken tokenType:(NSInteger)tokenType text:(NSString *)tokenText
-{
-	id<ANTLRBaseTree> newTree = [treeAdaptor newANTLRTreeWithToken:fromToken tokenType:tokenType text:tokenText];
-	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] text:tokenText type:tokenType];
-	return newTree;
-}
-
-- (id<ANTLRBaseTree>) newANTLRTreeWithToken:(id<ANTLRToken>)fromToken text:(NSString *)tokenText
-{
-	id<ANTLRBaseTree> newTree = [treeAdaptor newANTLRTreeWithToken:fromToken text:tokenText];
-	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] text:tokenText type:fromToken.type];
-	return newTree;
-}
-
-#pragma mark Content
-
-/* handled by forwardInvocation:
-- (NSInteger) tokenTypeForNode:(id<ANTLRBaseTree>)aNode
-{
-}
- 
-- (void) setTokenType:(NSInteger)tokenType forNode:(id)aNode
-{
-}
-
-- (NSString *) textForNode:(id<ANTLRBaseTree>)aNode
-{
-}
- 
-- (void) setText:(NSString *)tokenText forNode:(id<ANTLRBaseTree>)aNode
-{
-}
-*/
-- (void) setBoundariesForTree:(id<ANTLRBaseTree>)aTree fromToken:(id<ANTLRToken>)startToken toToken:(id<ANTLRToken>)stopToken
-{
-	[treeAdaptor setBoundariesForTree:aTree fromToken:startToken toToken:stopToken];
-	if (aTree && startToken && stopToken) {
-		[debugListener setTokenBoundariesForTree:[aTree hash] From:[startToken getTokenIndex] To:[stopToken getTokenIndex]];
-	}
-}
-/* handled by forwardInvocation:
-- (NSInteger) tokenStartIndexForTree:(id<ANTLRBaseTree>)aTree
-{
-}
- 
-- (NSInteger) tokenStopIndexForTree:(id<ANTLRBaseTree>)aTree
-{
-}
-*/
-
-#pragma mark Navigation / Tree Parsing
-/* handled by forwardInvocation:
-- (id<ANTLRBaseTree>) childForNode:(id<ANTLRBaseTree>) aNode atIndex:(NSInteger) i
-{
-}
- 
-- (NSInteger) childCountForTree:(id<ANTLRBaseTree>) aTree
-{
-}
-*/
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeNodeStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeNodeStream.h
deleted file mode 100644
index 733f6fd..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeNodeStream.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRDebugEventListener.h"
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTreeNodeStream.h"
-
-@interface ANTLRDebugTreeNodeStream : NSObject <ANTLRTreeNodeStream> {
-	id<ANTLRDebugEventListener> debugListener;
-	id<ANTLRTreeAdaptor> treeAdaptor;
-	id<ANTLRTreeNodeStream> input;
-	BOOL initialStreamState;
-}
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream debugListener:(id<ANTLRDebugEventListener>)debugger;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (id<ANTLRTreeNodeStream>) input;
-- (void) setInput: (id<ANTLRTreeNodeStream>) aTreeNodeStream;
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor: (id<ANTLRTreeAdaptor>) aTreeAdaptor;
-
-#pragma mark ANTLRTreeNodeStream conformance
-
-- (id) LT:(NSInteger)k;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setUniqueNavigationNodes:(BOOL)flag;
-
-#pragma mark ANTLRIntStream conformance
-- (void) consume;
-- (NSInteger) LA:(NSUInteger) i;
-- (NSUInteger) mark;
-- (NSUInteger) getIndex;
-- (void) rewind:(NSUInteger) marker;
-- (void) rewind;
-- (void) release:(NSUInteger) marker;
-- (void) seek:(NSUInteger) index;
-- (NSUInteger) size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeNodeStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeNodeStream.m
deleted file mode 100644
index 37f55de..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeNodeStream.m
+++ /dev/null
@@ -1,175 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRDebugTreeNodeStream.h"
-
-
-@implementation ANTLRDebugTreeNodeStream
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream debugListener:(id<ANTLRDebugEventListener>)debugger
-{
-	self = [super init];
-	if (self) {
-		[self setDebugListener:debugger];
-		[self setTreeAdaptor:[theStream treeAdaptor]];
-		[self setInput:theStream];
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-    [self setDebugListener: nil];
-    [self setTreeAdaptor: nil];
-    input = nil;
-    [super dealloc];
-}
-
-- (id<ANTLRDebugEventListener>) debugListener
-{
-    return debugListener; 
-}
-
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener
-{
-    if (debugListener != aDebugListener) {
-        [(id<ANTLRDebugEventListener,NSObject>)aDebugListener retain];
-        [(id<ANTLRDebugEventListener,NSObject>)debugListener release];
-        debugListener = aDebugListener;
-    }
-}
-
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor
-{
-    return treeAdaptor; 
-}
-
-- (void) setTreeAdaptor: (id<ANTLRTreeAdaptor>) aTreeAdaptor
-{
-    if (treeAdaptor != aTreeAdaptor) {
-        [(id<ANTLRTreeAdaptor,NSObject>)aTreeAdaptor retain];
-        [(id<ANTLRTreeAdaptor,NSObject>)treeAdaptor release];
-        treeAdaptor = aTreeAdaptor;
-    }
-}
-
-
-- (id<ANTLRTreeNodeStream>) input
-{
-    return input; 
-}
-
-- (void) setInput:(id<ANTLRTreeNodeStream>) aTreeNodeStream
-{
-    if (input != aTreeNodeStream) {
-        [input release];
-        [(id<ANTLRTreeNodeStream,NSObject>)aTreeNodeStream retain];
-    }
-    input = aTreeNodeStream;
-}
-
-
-#pragma mark ANTLRTreeNodeStream conformance
-
-- (id) LT:(NSInteger)k
-{
-	id node = [input LT:k];
-	unsigned hash = [treeAdaptor uniqueIdForTree:node];
-	NSString *text = [treeAdaptor textForNode:node];
-	int type = [treeAdaptor tokenTypeForNode:node];
-	[debugListener LT:k foundNode:hash ofType:type text:text];
-	return node;
-}
-
-- (void) setUniqueNavigationNodes:(BOOL)flag
-{
-	[input setUniqueNavigationNodes:flag];
-}
-
-#pragma mark ANTLRIntStream conformance
-- (void) consume
-{
-	id node = [input LT:1];
-	[input consume];
-	unsigned hash = [treeAdaptor uniqueIdForTree:node];
-	NSString *theText = [treeAdaptor textForNode:node];
-	int aType = [treeAdaptor tokenTypeForNode:node];
-	[debugListener consumeNode:hash ofType:aType text:theText];
-}
-
-- (NSInteger) LA:(NSUInteger) i
-{
-	id<ANTLRBaseTree> node = [self LT:1];
-	return node.type;
-}
-
-- (NSUInteger) mark
-{
-	unsigned lastMarker = [input mark];
-	[debugListener mark:lastMarker];
-	return lastMarker;
-}
-
-- (NSUInteger) getIndex
-{
-	return input.index;
-}
-
-- (void) rewind:(NSUInteger) marker
-{
-	[input rewind:marker];
-	[debugListener rewind:marker];
-}
-
-- (void) rewind
-{
-	[input rewind];
-	[debugListener rewind];
-}
-
-- (void) release:(NSUInteger) marker
-{
-	[input release:marker];
-}
-
-- (void) seek:(NSUInteger) index
-{
-	[input seek:index];
-	// todo: seek missing in debug protocol
-}
-
-- (NSUInteger) size
-{
-	return [input size];
-}
-
-- (NSString *) toStringFromToken:(id)startNode ToToken:(id)stopNode
-{
-    return [input toStringFromToken:(id<ANTLRToken>)startNode ToToken:(id<ANTLRToken>)stopNode];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeParser.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeParser.h
deleted file mode 100644
index 171c1e7..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeParser.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeParser.h"
-#import "ANTLRDebugEventProxy.h"
-#import "ANTLRDebugTreeNodeStream.h"
-
-@interface ANTLRDebugTreeParser : ANTLRTreeParser {
-	id<ANTLRDebugEventListener> debugListener;
-}
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream;
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream
-				 debuggerPort:(NSInteger)portNumber;
-	// designated initializer
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream
-				debugListener:(id<ANTLRDebugEventListener>)theDebugListener
-				 debuggerPort:(NSInteger)portNumber;
-
-- (id<ANTLRDebugEventListener>) debugListener;
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener;
-
-- (void) recoverFromMismatchedToken:(id<ANTLRIntStream>)inputStream 
-						  exception:(NSException *)e 
-						  tokenType:(ANTLRTokenType)ttype 
-							 follow:(ANTLRBitSet *)follow;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeParser.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeParser.m
deleted file mode 100644
index 5692993..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDebugTreeParser.m
+++ /dev/null
@@ -1,128 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRDebugTreeParser.h"
-
-
-@implementation ANTLRDebugTreeParser
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream
-{
-	return [self initWithTreeNodeStream:theStream debugListener:nil debuggerPort:-1];
-}
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream
-				 debuggerPort:(NSInteger)portNumber
-{
-	return [self initWithTreeNodeStream:theStream debugListener:nil debuggerPort:portNumber];
-}
-
-- (id) initWithTreeNodeStream:(id<ANTLRTreeNodeStream>)theStream
-				debugListener:(id<ANTLRDebugEventListener>)theDebugListener
-				 debuggerPort:(NSInteger)portNumber
-{
-	id<ANTLRDebugEventListener,NSObject> debugger = nil;
-	id<ANTLRTreeNodeStream> treeNodeStream = nil;
-	if (theDebugListener) {
-		debugger = (id<ANTLRDebugEventListener>)theDebugListener;
-	} else {
-		debugger = [[ANTLRDebugEventProxy alloc] initWithGrammarName:[self grammarFileName] debuggerPort:portNumber];
-	}
-	if (theStream && ![theStream isKindOfClass:[ANTLRDebugTreeNodeStream class]]) {
-		treeNodeStream = [[ANTLRDebugTreeNodeStream alloc] initWithTreeNodeStream:theStream debugListener:debugger];
-	} else {
-		treeNodeStream = theStream;
-	}
-	self = [super initWithStream:treeNodeStream];
-	if ( self ) {
-		[self setDebugListener:debugger];
-		//[debugger release];
-		//[treeNodeStream release];
-		[debugListener waitForDebuggerConnection];
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-    [self setDebugListener: nil];
-    [super dealloc];
-}
-
-- (id<ANTLRDebugEventListener>) debugListener
-{
-    return debugListener; 
-}
-
-- (void) setDebugListener: (id<ANTLRDebugEventListener>) aDebugListener
-{
-    if (debugListener != aDebugListener) {
-        [(id<ANTLRDebugEventListener,NSObject>)aDebugListener retain];
-        [(id<ANTLRDebugEventListener,NSObject>)debugListener release];
-        debugListener = aDebugListener;
-    }
-}
-
-#pragma mark -
-#pragma mark Overrides
-
-- (void) beginResync
-{
-	[debugListener beginResync];
-}
-
-- (void) endResync
-{
-	[debugListener endResync];
-}
-- (void)beginBacktracking:(NSInteger)level
-{
-	[debugListener beginBacktrack:level];
-}
-
-- (void)endBacktracking:(NSInteger)level wasSuccessful:(BOOL)successful
-{
-	[debugListener endBacktrack:level wasSuccessful:successful];
-}
-
-- (void) recoverFromMismatchedToken:(id<ANTLRIntStream>)inputStream 
-						  exception:(NSException *)e 
-						  tokenType:(ANTLRTokenType)ttype 
-							 follow:(ANTLRBitSet *)follow
-{
-#warning TODO: recoverFromMismatchedToken in debugger
-	[self recoverFromMismatchedToken:inputStream exception:e follow:follow];
-}
-
-- (void) recoverFromMismatchedSet:(id<ANTLRIntStream>)inputStream
-						exception:(NSException *)e
-						   follow:(ANTLRBitSet *)follow
-{
-#warning TODO: recoverFromMismatchedSet in debugger
-	[super recoverFromMismatchedSet:inputStream];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDoubleKeyMap.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRDoubleKeyMap.h
deleted file mode 100644
index 7885879..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDoubleKeyMap.h
+++ /dev/null
@@ -1,28 +0,0 @@
-
-#import "AMutableArray.h"
-#import "AMutableDictionary.h"
-#import "ANTLRLinkBase.h"
-/**
- * Sometimes we need to map a key to a value but key is two pieces of data.
- * This nested hash table saves creating a single key each time we access
- * map; avoids mem creation.
- */
-
-@class AMutableArray;
-
-@interface ANTLRDoubleKeyMap : ANTLRLinkBase {
-    AMutableDictionary *data;
-}
-
-- (id) init;
-- (id) setObject:(id)v forKey1:(id)k1 forKey2:(NSString *)k2;
-- (id) objectForKey1:(id)k1 forKey2:(id)k2;
-- (AMutableDictionary *) objectForKey:(id)k1;
-- (NSArray *) valuesForKey:(id)k1;
-- (NSArray *) allKeys1;
-- (AMutableArray *) allKeys2:(id)k1;
-- (NSArray *) values;
-
-@property (retain) AMutableDictionary *data;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRDoubleKeyMap.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRDoubleKeyMap.m
deleted file mode 100644
index 3642ef0..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRDoubleKeyMap.m
+++ /dev/null
@@ -1,101 +0,0 @@
-#import "ANTLRDoubleKeyMap.h"
-
-@implementation ANTLRDoubleKeyMap
-
-- (id) init
-{
-    self = [super init];
-    if ( self  != nil ) {
-        data = [[AMutableDictionary dictionaryWithCapacity:30] retain];
-    }
-    return self;
-}
-
-- (id) setObject:(id)v forKey1:(id)k1 forKey2:(id)k2
-{
-    AMutableDictionary *data2 = [data objectForKey:k1];
-    id prev = nil;
-    if ( data2 == nil ) {
-        data2 = [AMutableDictionary dictionaryWithCapacity:30];
-        [data setObject:data2 forKey:k1];
-    }
-    else {
-        prev = [data2 objectForKey:k2];
-    }
-    [data2 setObject:v forKey:k2];
-    return prev;
-}
-
-- (id) objectForKey1:(id)k1 forKey2:(id)k2
-{
-    AMutableDictionary *data2 = [data objectForKey:k1];
-    if ( data2 == nil )
-        return nil;
-    return [data2 objectForKey:k2];
-}
-
-- (AMutableDictionary *) objectForKey:(id)k1
-{
-    return [data objectForKey:k1];
-}
-
-
-/**
- * Get all values associated with primary key
- */
-- (NSArray *) valuesForKey:(id)k1
-{
-    AMutableDictionary *data2 = [data objectForKey:k1];
-    if ( data2 == nil )
-        return nil;
-    return [data2 allValues];
-}
-
-
-/**
- * get all primary keys
- */
-- (NSArray *) allKeys1
-{
-    return [data allKeys];
-}
-
-
-/**
- * get all secondary keys associated with a primary key
- */
-- (NSArray *) allKeys2:(id)k1
-{
-    AMutableDictionary * data2 = [data objectForKey:k1];
-    if ( data2 == nil )
-        return nil;
-    return [data2 allKeys];
-}
-
-- (AMutableArray *) values
-{
-//    ANTLRHashMap *s = [[ANTLRHashMap newANTLRHashMapWithLen:30];
-    AMutableArray *s = [AMutableArray arrayWithCapacity:30];
-    
-    for (AMutableDictionary *k2 in [data allValues]) {
-        
-        for ( NSString *v in [k2 allValues]) {
-            [s addObject:v];
-        }
-        
-    }
-    
-    return s;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRCommonToken" );
-#endif
-    [data release];
-    [super dealloc];
-}
-
-@synthesize data;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLREarlyExitException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLREarlyExitException.h
deleted file mode 100644
index 07e840d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLREarlyExitException.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLREarlyExitException : ANTLRRecognitionException {
-	int decisionNumber;
-}
-
-+ (ANTLREarlyExitException *) newException:(id<ANTLRIntStream>)anInputStream decisionNumber:(NSInteger)aDecisionNumber;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream decisionNumber:(NSInteger) aDecisionNumber;
-
-@property int decisionNumber;
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLREarlyExitException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLREarlyExitException.m
deleted file mode 100644
index 81ffb26..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLREarlyExitException.m
+++ /dev/null
@@ -1,54 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLREarlyExitException.h"
-
-
-@implementation ANTLREarlyExitException
-
-+ (ANTLREarlyExitException *) newException:(id<ANTLRIntStream>) anInputStream decisionNumber:(NSInteger) aDecisionNumber
-{
-	return [[self alloc] initWithStream:anInputStream decisionNumber:aDecisionNumber];
-}
-
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream decisionNumber:(NSInteger) aDecisionNumber
-{
-	if ((self = [super initWithStream:anInputStream]) != nil) {
-		decisionNumber = aDecisionNumber;
-	}
-	return self;
-}
-
-- (NSString *) description
-{
-	NSMutableString *desc = (NSMutableString *)[super description];
-	[desc appendFormat:@" decision:%d", decisionNumber];
-	return desc;
-}
-
-@synthesize decisionNumber;
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRError.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRError.h
deleted file mode 100644
index 9a15702..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRError.h
+++ /dev/null
@@ -1,35 +0,0 @@
-//
-//  ANTLRError.h
-//  ANTLR
-//
-//  Created by Ian Michell on 30/03/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-// [The "BSD licence"]
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-#define ANTLRErrorDomain @"ANTLRError"
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRFailedPredicateException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRFailedPredicateException.h
deleted file mode 100644
index afbb653..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRFailedPredicateException.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-
-@interface ANTLRFailedPredicateException : ANTLRRecognitionException
-{
-	NSString *predicate;
-	NSString *ruleName;
-}
-
-@property (retain) NSString *predicate;
-@property (retain) NSString *ruleName;
-
-+ (ANTLRFailedPredicateException *) newException:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<ANTLRIntStream>)theStream;
-- (ANTLRFailedPredicateException *) initWithRuleName:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<ANTLRIntStream>)theStream;
-
-#ifdef DONTUSEYET
-- (NSString *) getPredicate;
-- (void) setPredicate:(NSString *)thePredicate;
-- (NSString *) getRuleName;
-- (void) setRuleName:(NSString *)theRuleName;
-#endif
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRFailedPredicateException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRFailedPredicateException.m
deleted file mode 100644
index caf7716..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRFailedPredicateException.m
+++ /dev/null
@@ -1,96 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 20110 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRFailedPredicateException.h"
-
-
-@implementation ANTLRFailedPredicateException
-
-@synthesize predicate;
-@synthesize ruleName;
-
-+ (ANTLRFailedPredicateException *) newException:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<ANTLRIntStream>)theStream
-{
-	return [[ANTLRFailedPredicateException alloc] initWithRuleName:theRuleName predicate:thePredicate stream:theStream];
-}
-
-- (ANTLRFailedPredicateException *) initWithRuleName:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<ANTLRIntStream>)theStream
-{
-	if ((self = [super initWithStream:theStream])) {
-		[self setPredicate:thePredicate];
-		[self setRuleName:theRuleName];
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRFailedPredicateException" );
-#endif
-	[self setPredicate:nil];
-	[self setRuleName:nil];
-	[super dealloc];
-}
-
-- (NSString *) description
-{
-	NSMutableString *desc = (NSMutableString *)[super description];
-	[desc appendFormat:@" rule: %@ predicate failed: %@", ruleName, predicate];
-	return desc;
-}
-
-#ifdef DONTUSEYET
-- (NSString *) getPredicate
-{
-	return predicate;
-}
-
-- (void) setPredicate:(NSString *)thePredicate
-{
-	if (thePredicate != predicate) {
-		[thePredicate retain];
-		if ( predicate ) [predicate release];
-		predicate = thePredicate;
-	}
-}
-
-- (NSString *) getRuleName
-{
-	return ruleName;
-}
-
-- (void) setRuleName:(NSString *)theRuleName
-{
-	if (theRuleName != ruleName) {
-		[theRuleName retain];
-		if ( ruleName ) [ruleName release];
-		ruleName = theRuleName;
-	}
-}
-#endif
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRFastQueue.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRFastQueue.h
deleted file mode 100644
index f5ba582..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRFastQueue.h
+++ /dev/null
@@ -1,64 +0,0 @@
-//
-//  ANTLRFastQueue.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "AMutableArray.h"
-
-@interface ANTLRFastQueue : NSObject <NSCopying>
-{
-    __strong AMutableArray *data;
-    NSUInteger p;
-    NSUInteger range;
-}
-
-@property (retain) AMutableArray *data;
-@property (assign) NSUInteger p;
-@property (assign) NSUInteger range;
-
-+ (id) newANTLRFastQueue;
-
-- (id) init;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (void) reset;
-- (id) remove;
-- (void) addObject:(id) obj;
-- (NSUInteger) count;
-- (NSUInteger) size;
-- (NSUInteger) range;
-- (id) head;
-- (id) objectAtIndex:(NSUInteger) i;
-- (void) clear;
-- (NSString *) toString;
-- (NSString *) description;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRFastQueue.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRFastQueue.m
deleted file mode 100644
index 6944b6d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRFastQueue.m
+++ /dev/null
@@ -1,174 +0,0 @@
-//
-//  ANTLRFastQueue.m
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRFastQueue.h"
-#import "ANTLRError.h"
-#import "ANTLRRuntimeException.h"
-
-@implementation ANTLRFastQueue
-
-//@synthesize pool;
-@synthesize data;
-@synthesize p;
-@synthesize range;
-
-+ (id) newANTLRFastQueue
-{
-    return [[ANTLRFastQueue alloc] init];
-}
-
-- (id) init
-{
-	self = [super init];
-	if ( self != nil ) {
-		data = [[AMutableArray arrayWithCapacity:100] retain];
-		p = 0;
-		range = -1;
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRFastQueue" );
-#endif
-	if ( data ) [data release];
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRFastQueue *copy;
-    
-    copy = [[[self class] allocWithZone:aZone] init];
-    copy.data = [data copyWithZone:nil];
-    copy.p = p;
-    copy.range = range;
-    return copy;
-}
-
-// FIXME: Java code has this, it doesn't seem like it needs to be there... Then again a lot of the code in the java runtime is not great...
-- (void) reset
-{
-	[self clear];
-}
-
-- (void) clear
-{
-	p = 0;
-    if ( [data count] )
-        [data removeAllObjects];
-}
-
-- (id) remove
-{
-	id obj = [self objectAtIndex:0];
-	p++;
-	// check to see if we have hit the end of the buffer
-	if ( p == [data count] ) {
-		// if we have, then we need to clear it out
-		[self clear];
-	}
-	return obj;
-}
-
-- (void) addObject:(id) obj
-{
-    [data addObject:obj];
-}
-
-- (NSUInteger) count
-{
-	return [data count];
-}
-
-- (NSUInteger) size
-{
-	return [data count] - p;
-}
-
-- (NSUInteger) range
-{
-    return range;
-}
-
-- (id) head
-{
-	return [self objectAtIndex:0];
-}
-
-- (id) objectAtIndex:(NSUInteger) i
-{
-    NSUInteger absIndex;
-
-    absIndex = p + i;
-	if ( absIndex >= [data count] ) {
-		@throw [ANTLRNoSuchElementException newException:[NSString stringWithFormat:@"queue index %d > last index %d", absIndex, [data count]-1]];
-	}
-	if ( absIndex < 0 ) {
-	    @throw [ANTLRNoSuchElementException newException:[NSString stringWithFormat:@"queue index %d < 0", absIndex]];
-	}
-	if ( absIndex > range ) range = absIndex;
-	return [data objectAtIndex:absIndex];
-}
-
-- (NSString *) toString
-{
-    return [self description];
-}
-
-- (NSString *) description
-{
-	NSMutableString *buf = [NSMutableString stringWithCapacity:30];
-	NSInteger n = [self size];
-	for (NSInteger i = 0; i < n; i++) {
-		[buf appendString:[[self objectAtIndex:i] description]];
-		if ((i + 1) < n) {
-			[buf appendString:@" "];
-		}
-	}
-	return buf;
-}
-
-#ifdef DONTUSENOMO
-- (NSAutoreleasePool *)getPool
-{
-    return pool;
-}
-
-- (void)setPool:(NSAutoreleasePool *)aPool
-{
-    pool = aPool;
-}
-#endif
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRFileStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRFileStream.h
deleted file mode 100644
index 6c9a881..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRFileStream.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-     notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-     notice, this list of conditions and the following disclaimer in the
-     documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-     derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#import "ANTLRStringStream.h"
-
-/** This is a char buffer stream that is loaded from a file
- *  all at once when you construct the object.  This looks very
- *  much like an ANTLReader or ANTLRInputStream, but it's a special case
- *  since we know the exact size of the object to load.  We can avoid lots
- *  of data copying. 
- */
-@interface ANTLRFileStream : ANTLRStringStream {
-	__strong NSString *fileName;
-}
-
-
-+ (id) newANTLRFileStream:(NSString*) fileName;
-+ (id) newANTLRFileStream:(NSString *)aFileName encoding:(NSStringEncoding)encoding;
-- (id) init:(NSString *) aFileName;
-- (id) init:(NSString *) aFileName encoding:(NSStringEncoding)encoding;
-- (void) load:(NSString *)fileName encoding:(NSStringEncoding)encoding;
-- (NSString *) getSourceName;
-
-@property (retain) NSString *fileName;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRFileStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRFileStream.m
deleted file mode 100644
index f0e5417..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRFileStream.m
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Terence Parr
- All rights reserved.
- 
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
- derived from this software without specific prior written permission.
- 
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** This is a char buffer stream that is loaded from a file
- *  all at once when you construct the object.  This looks very
- *  much like an ANTLReader or ANTLRInputStream, but it's a special case
- *  since we know the exact size of the object to load.  We can avoid lots
- *  of data copying. 
- */
-
-#import "ANTLRFileStream.h"
-
-@implementation ANTLRFileStream
-
-@synthesize fileName;
-
-+ (id) newANTLRFileStream:(NSString*)fileName
-{
-    return [[ANTLRFileStream alloc] init:fileName];
-}
-
-+ (id) newANTLRFileStream:(NSString *)aFileName encoding:(NSStringEncoding)encoding
-{
-    return [[ANTLRFileStream alloc] init:aFileName encoding:encoding];
-}
-
-- (id) init:(NSString *)aFileName
-{
-    self = [super init];
-    if ( self != nil ) {
-        fileName = aFileName;
-        [self load:aFileName encoding:NSUTF8StringEncoding];
-    }
-    return self;
-}
-
-- (id) init:(NSString *) aFileName encoding:(NSStringEncoding)encoding
-{
-    self = [super init];
-    if ( self != nil ) {
-        fileName = aFileName;
-        [self load:aFileName encoding:encoding];
-    }
-    return self;
-}
-
-- (NSString *) getSourceName
-{
-    return fileName;
-}
-
-- (void) load:(NSString *)aFileName encoding:(NSStringEncoding)encoding
-{
-    if ( aFileName==nil ) {
-        return;
-    }
-    NSError *error;
-    NSData *retData = nil;
-    NSFileHandle *fh;
-    @try {
-        NSString *fn = [aFileName stringByStandardizingPath];
-        NSURL *f = [NSURL fileURLWithPath:fn];
-        fh = [NSFileHandle fileHandleForReadingFromURL:f error:&error];
-        if ( fh==nil ) {
-            return;
-        }
-        int numRead=0;
-        int p1 = 0;
-        retData = [fh readDataToEndOfFile];
-        numRead = [retData length];
-#pragma mark fix these NSLog calls
-        NSLog( @"read %d chars; p was %d is now %d", n, p1, (p1+numRead) );
-        p1 += numRead;
-        n = p1;
-        data = [[NSString alloc] initWithData:retData encoding:NSASCIIStringEncoding];
-#pragma mark fix these NSLog calls
-        NSLog( @"n=%d", n );
-    }
-    @finally {
-        [fh closeFile];
-    }
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRHashMap.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRHashMap.h
deleted file mode 100644
index 113cb65..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRHashMap.h
+++ /dev/null
@@ -1,110 +0,0 @@
-//
-//  ANTLRHashMap.h
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-#import "ANTLRMapElement.h"
-
-#define GLOBAL_SCOPE       0
-#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRHashMap : ANTLRLinkBase {
-    //    TStringPool *fPool;
-    NSInteger Scope;
-    NSInteger LastHash;
-    NSInteger BuffSize;
-    NSUInteger count;
-    NSUInteger ptr;
-    __strong NSMutableData *buffer;
-    __strong ANTLRMapElement **ptrBuffer;
-    NSInteger mode;
-}
-
-// Contruction/Destruction
-+ (id)newANTLRHashMap;
-+ (id)newANTLRHashMapWithLen:(NSInteger)aBuffSize;
-- (id)init;
-- (id)initWithLen:(NSInteger)aBuffSize;
-- (void)dealloc;
-- (ANTLRHashMap *)PushScope:( ANTLRHashMap **)map;
-- (ANTLRHashMap *)PopScope:( ANTLRHashMap **)map;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-// Instance Methods
-/*    form hash value for string s */
-- (NSInteger)hash:(NSString *)s;
-/*   look for s in ptrBuffer  */
-- (ANTLRHashMap *)findscope:(int)level;
-/*   look for s in ptrBuffer  */
-- (id)lookup:(NSString *)s Scope:(int)scope;
-/*   look for s in ptrBuffer  */
-- (id)install:(ANTLRMapElement *)sym Scope:(int)scope;
-/*   look for s in ptrBuffer  */
-- (void)deleteANTLRHashMap:(ANTLRMapElement *)np;
-- (int)RemoveSym:(NSString *)s;
-- (void)delete_chain:(ANTLRMapElement *)np;
-#ifdef DONTUSEYET
-- (int)bld_symtab:(KW_TABLE *)toknams;
-#endif
-- (ANTLRMapElement **)getptrBuffer;
-- (ANTLRMapElement *)getptrBufferEntry:(int)idx;
-- (void)setptrBuffer:(ANTLRMapElement *)np Index:(int)idx;
-- (NSInteger)getScope;
-- (void)setScope:(NSInteger)i;
-- (ANTLRMapElement *)getTType:(NSString *)name;
-- (ANTLRMapElement *)getNameInList:(NSInteger)ttype;
-- (void)putNode:(NSString *)name TokenType:(NSInteger)ttype;
-- (NSInteger)getMode;
-- (void)setMode:(NSInteger)aMode;
-- (void) insertObject:(id)aRule atIndex:(NSInteger)idx;
-- (id) objectAtIndex:(NSInteger)idx;
-- (void) setObject:(id)aRule atIndex:(NSInteger)idx;
-- (void)addObject:(id)anObject;
-- (ANTLRMapElement *) getName:(NSString *)aName;
-- (void) putName:(NSString *)name Node:(id)aNode;
-
-- (NSEnumerator *)objectEnumerator;
-- (BOOL) hasNext;
-- (ANTLRMapElement *)nextObject;
-
-//@property (copy) TStringPool *fPool;
-@property (getter=getScope, setter=setScope:) NSInteger Scope;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
-
-@property (getter=getMode,setter=setMode:) NSInteger mode;
-@property NSInteger BuffSize;
-@property (getter=getCount, setter=setCount:) NSUInteger count;
-@property (assign) NSUInteger ptr;
-@property (retain, getter=getBuffer, setter=setBuffer:) NSMutableData *buffer;
-@property (assign, getter=getPtrBuffer, setter=setPtrBuffer:) ANTLRMapElement **ptrBuffer;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRHashMap.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRHashMap.m
deleted file mode 100644
index ab025a9..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRHashMap.m
+++ /dev/null
@@ -1,529 +0,0 @@
-//
-//  ANTLRHashMap.m
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRHashMap.h"
-
-static NSInteger itIndex;
-
-/*
- * Start of ANTLRHashMap
- */
-@implementation ANTLRHashMap
-
-@synthesize Scope;
-@synthesize LastHash;
-
-+(id)newANTLRHashMap
-{
-    return [[ANTLRHashMap alloc] init];
-}
-
-+(id)newANTLRHashMapWithLen:(NSInteger)aBuffSize
-{
-    return [[ANTLRHashMap alloc] initWithLen:aBuffSize];
-}
-
--(id)init
-{
-    NSInteger idx;
-    
-    if ((self = [super init]) != nil) {
-        fNext = nil;
-        Scope = 0;
-        ptr = 0;
-        BuffSize = HASHSIZE;
-        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize * sizeof(id)] retain];
-        ptrBuffer = (ANTLRMapElement **) [buffer mutableBytes];
-        if ( fNext != nil ) {
-            Scope = ((ANTLRHashMap *)fNext)->Scope+1;
-            for( idx = 0; idx < BuffSize; idx++ ) {
-                ptrBuffer[idx] = ((ANTLRHashMap *)fNext)->ptrBuffer[idx];
-            }
-        }
-        mode = 0;
-    }
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)aBuffSize
-{
-    NSInteger idx;
-    
-    if ((self = [super init]) != nil) {
-        fNext = nil;
-        BuffSize = aBuffSize;
-        Scope = 0;
-        ptr = 0;
-        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize * sizeof(id)] retain];
-        ptrBuffer = (ANTLRMapElement **) [buffer mutableBytes];
-        if ( fNext != nil ) {
-            Scope = ((ANTLRHashMap *)fNext)->Scope+1;
-            for( idx = 0; idx < BuffSize; idx++ ) {
-                ptrBuffer[idx] = ((ANTLRHashMap *)fNext)->ptrBuffer[idx];
-            }
-        }
-        mode = 0;
-    }
-    return( self );
-}
-
--(void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRHashMap" );
-#endif
-    ANTLRMapElement *tmp, *rtmp;
-    NSInteger idx;
-
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp && tmp != [((ANTLRHashMap *)fNext) getptrBufferEntry:idx] ) {
-                rtmp = tmp;
-                // tmp = [tmp getfNext];
-                tmp = (ANTLRMapElement *)tmp.fNext;
-                [rtmp release];
-            }
-        }
-    }
-    if ( buffer ) [buffer release];
-    [super dealloc];
-}
-
-- (NSInteger)count
-{
-    NSInteger aCnt = 0;
-    
-    for (NSInteger i = 0; i < BuffSize; i++) {
-        if ( ptrBuffer[i] != nil ) {
-            aCnt++;
-        }
-    }
-    return aCnt;
-}
-                          
-- (NSInteger) size
-{
-    NSInteger aSize = 0;
-    
-    for (NSInteger i = 0; i < BuffSize; i++) {
-        if ( ptrBuffer[i] != nil ) {
-            aSize += sizeof(id);
-        }
-    }
-    return aSize;
-}
-                                  
-                                  
--(void)deleteANTLRHashMap:(ANTLRMapElement *)np
-{
-    ANTLRMapElement *tmp, *rtmp;
-    NSInteger idx;
-    
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp && tmp != (ANTLRLinkBase *)[((ANTLRHashMap *)fNext) getptrBufferEntry:idx] ) {
-                rtmp = tmp;
-                tmp = [tmp getfNext];
-                [rtmp release];
-            }
-        }
-    }
-}
-
--(ANTLRHashMap *)PushScope:(ANTLRHashMap **)map
-{
-    NSInteger idx;
-    ANTLRHashMap *htmp;
-    
-    htmp = [ANTLRHashMap newANTLRHashMap];
-    if ( *map != nil ) {
-        ((ANTLRHashMap *)htmp)->fNext = *map;
-        [htmp setScope:[((ANTLRHashMap *)htmp->fNext) getScope]+1];
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            htmp->ptrBuffer[idx] = ((ANTLRHashMap *)htmp->fNext)->ptrBuffer[idx];
-        }
-    }
-    //    gScopeLevel++;
-    *map = htmp;
-    return( htmp );
-}
-
--(ANTLRHashMap *)PopScope:(ANTLRHashMap **)map
-{
-    NSInteger idx;
-    ANTLRMapElement *tmp;
-    ANTLRHashMap *htmp;
-    
-    htmp = *map;
-    if ( (*map)->fNext != nil ) {
-        *map = (ANTLRHashMap *)htmp->fNext;
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            if ( htmp->ptrBuffer[idx] == nil ||
-                htmp->ptrBuffer[idx] == (*map)->ptrBuffer[idx] ) {
-                break;
-            }
-            tmp = htmp->ptrBuffer[idx];
-            /*
-             * must deal with parms, locals and labels at some point
-             * can not forget the debuggers
-             */
-            htmp->ptrBuffer[idx] = [tmp getfNext];
-            [tmp release];
-        }
-        *map = (ANTLRHashMap *)htmp->fNext;
-        //        gScopeLevel--;
-    }
-    return( htmp );
-}
-
-#ifdef USERDOC
-/*
- *  HASH        hash entry to get index to table
- *  NSInteger hash( ANTLRHashMap *self, char *s );
- *
- *     Inputs:  char *s             string to find
- *
- *     Returns: NSInteger                 hashed value
- *
- *  Last Revision 9/03/90
- */
-#endif
--(NSInteger)hash:(NSString *)s       /*    form hash value for string s */
-{
-    NSInteger hashval;
-    const char *tmp;
-    
-    tmp = [s cStringUsingEncoding:NSASCIIStringEncoding];
-    for( hashval = 0; *tmp != '\0'; )
-        hashval += *tmp++;
-    self->LastHash = hashval % BuffSize;
-    return( self->LastHash );
-}
-
-#ifdef USERDOC
-/*
- *  FINDSCOPE  search hashed list for entry
- *  ANTLRHashMap *findscope( ANTLRHashMap *self, NSInteger scope );
- *
- *     Inputs:  NSInteger       scope -- scope level to find
- *
- *     Returns: ANTLRHashMap   pointer to ptrBuffer of proper scope level
- *
- *  Last Revision 9/03/90
- */
-#endif
--(ANTLRHashMap *)findscope:(NSInteger)scope
-{
-    if ( self->Scope == scope ) {
-        return( self );
-    }
-    else if ( fNext ) {
-        return( [((ANTLRHashMap *)fNext) findscope:scope] );
-    }
-    return( nil );              /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  LOOKUP  search hashed list for entry
- *  ANTLRMapElement *lookup( ANTLRHashMap *self, char *s, NSInteger scope );
- *
- *     Inputs:  char     *s          string to find
- *
- *     Returns: ANTLRMapElement  *           pointer to entry
- *
- *  Last Revision 9/03/90
- */
-#endif
--(id)lookup:(NSString *)s Scope:(NSInteger)scope
-{
-    ANTLRMapElement *np;
-    
-    for( np = self->ptrBuffer[[self hash:s]]; np != nil; np = [np getfNext] ) {
-        if ( [s isEqualToString:[np getName]] ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  INSTALL search hashed list for entry
- *  NSInteger install( ANTLRHashMap *self, ANTLRMapElement *sym, NSInteger scope );
- *
- *     Inputs:  ANTLRMapElement    *sym   -- symbol ptr to install
- *              NSInteger         scope -- level to find
- *
- *     Returns: Boolean     TRUE   if installed
- *                          FALSE  if already in table
- *
- *  Last Revision 9/03/90
- */
-#endif
--(ANTLRMapElement *)install:(ANTLRMapElement *)sym Scope:(NSInteger)scope
-{
-    ANTLRMapElement *np;
-    
-    np = [self lookup:[sym getName] Scope:scope ];
-    if ( np == nil ) {
-        [sym retain];
-        [sym setFNext:self->ptrBuffer[ self->LastHash ]];
-        self->ptrBuffer[ self->LastHash ] = sym;
-        return( self->ptrBuffer[ self->LastHash ] );
-    }
-    return( nil );            /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  RemoveSym  search hashed list for entry
- *  NSInteger RemoveSym( ANTLRHashMap *self, char *s );
- *
- *     Inputs:  char     *s          string to find
- *
- *     Returns: NSInteger      indicator of SUCCESS OR FAILURE
- *
- *  Last Revision 9/03/90
- */
-#endif
--(NSInteger)RemoveSym:(NSString *)s
-{
-    ANTLRMapElement *np, *tmp;
-    NSInteger idx;
-    
-    idx = [self hash:s];
-    for ( tmp = self->ptrBuffer[idx], np = self->ptrBuffer[idx]; np != nil; np = [np getfNext] ) {
-        if ( [s isEqualToString:[np getName]] ) {
-            tmp = [np getfNext];             /* get the next link  */
-            [np release];
-            return( SUCCESS );            /* report SUCCESS     */
-        }
-        tmp = [np getfNext];              //  BAD!!!!!!
-    }
-    return( FAILURE );                    /*   not found      */
-}
-
--(void)delete_chain:(ANTLRMapElement *)np
-{
-    if ( [np getfNext] != nil )
-        [self delete_chain:[np getfNext]];
-    [np dealloc];
-}
-
-#ifdef DONTUSEYET
--(NSInteger)bld_symtab:(KW_TABLE *)toknams
-{
-    NSInteger i;
-    ANTLRMapElement *np;
-    
-    for( i = 0; *(toknams[i].name) != '\0'; i++ ) {
-        // install symbol in ptrBuffer
-        np = [ANTLRMapElement newANTLRMapElement:[NSString stringWithFormat:@"%s", toknams[i].name]];
-        //        np->fType = toknams[i].toknum;
-        [self install:np Scope:0];
-    }
-    return( SUCCESS );
-}
-#endif
-
--(ANTLRMapElement *)getptrBufferEntry:(NSInteger)idx
-{
-    return( ptrBuffer[idx] );
-}
-
--(ANTLRMapElement **)getptrBuffer
-{
-    return( ptrBuffer );
-}
-
--(void)setptrBuffer:(ANTLRMapElement *)np Index:(NSInteger)idx
-{
-    if ( idx < BuffSize ) {
-        [np retain];
-        ptrBuffer[idx] = np;
-    }
-}
-
--(NSInteger)getScope
-{
-    return( Scope );
-}
-
--(void)setScopeScope:(NSInteger)i
-{
-    Scope = i;
-}
-
-- (ANTLRMapElement *)getTType:(NSString *)name
-{
-    return [self lookup:name Scope:0];
-}
-
-/*
- * works only for maplist indexed not by name but by TokenNumber
- */
-- (ANTLRMapElement *)getNameInList:(NSInteger)ttype
-{
-    ANTLRMapElement *np;
-    NSInteger aTType;
-
-    aTType = ttype % BuffSize;
-    for( np = self->ptrBuffer[aTType]; np != nil; np = [np getfNext] ) {
-        if ( [(NSNumber *)np.node integerValue] == ttype ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-- (ANTLRLinkBase *)getName:(NSString *)name
-{
-    return [self lookup:name Scope:0]; /*  nil if not found      */    
-}
-
-- (void)putNode:(NSString *)name TokenType:(NSInteger)ttype
-{
-    ANTLRMapElement *np;
-    
-    // install symbol in ptrBuffer
-    np = [ANTLRMapElement newANTLRMapElementWithName:[NSString stringWithString:name] Type:ttype];
-    //        np->fType = toknams[i].toknum;
-    [self install:np Scope:0];
-}
-
-- (NSInteger)getMode
-{
-    return mode;
-}
-
-- (void)setMode:(NSInteger)aMode
-{
-    mode = aMode;
-}
-
-- (void) addObject:(id)aRule
-{
-    NSInteger idx;
-
-    idx = [self count];
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-/* this may have to handle linking into the chain
- */
-- (void) insertObject:(id)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    if ( aRule != ptrBuffer[idx] ) {
-        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (id)objectAtIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    return ptrBuffer[idx];
-}
-
-/* this will never link into the chain
- */
-- (void) setObject:(id)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        idx %= BuffSize;
-    }
-    if ( aRule != ptrBuffer[idx] ) {
-        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (void)putName:(NSString *)name Node:(id)aNode
-{
-    ANTLRMapElement *np;
-    
-    np = [self lookup:name Scope:0 ];
-    if ( np == nil ) {
-        np = [ANTLRMapElement newANTLRMapElementWithName:name Node:aNode];
-        if ( ptrBuffer[LastHash] )
-            [ptrBuffer[LastHash] release];
-        [np retain];
-        np.fNext = ptrBuffer[ LastHash ];
-        ptrBuffer[ LastHash ] = np;
-    }
-    return;    
-}
-
-- (NSEnumerator *)objectEnumerator
-{
-#pragma mark fix this its broken
-    NSEnumerator *anEnumerator;
-
-    itIndex = 0;
-    return anEnumerator;
-}
-
-- (BOOL)hasNext
-{
-    if (self && [self count] < BuffSize-1) {
-        return YES;
-    }
-    return NO;
-}
-
-- (ANTLRMapElement *)nextObject
-{
-    if (self && itIndex < BuffSize-1) {
-        return ptrBuffer[itIndex];
-    }
-    return nil;
-}
-
-@synthesize BuffSize;
-@synthesize count;
-@synthesize ptr;
-@synthesize ptrBuffer;
-@synthesize buffer;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRHashRule.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRHashRule.h
deleted file mode 100644
index 75ef581..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRHashRule.h
+++ /dev/null
@@ -1,70 +0,0 @@
-//
-//  ANTLRHashRule.h
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuleMemo.h"
-#import "ANTLRPtrBuffer.h"
-
-#define GLOBAL_SCOPE       0
-#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRHashRule : ANTLRPtrBuffer {
-    //    TStringPool *fPool;
-    NSInteger LastHash;
-    NSInteger mode;
-}
-
-// Contruction/Destruction
-+ (id)newANTLRHashRule;
-+ (id)newANTLRHashRuleWithLen:(NSInteger)aBuffSize;
-- (id)init;
-- (id)initWithLen:(NSInteger)aBuffSize;
-- (void)dealloc;
-
-- (NSInteger)count;
-- (NSInteger)length;
-- (NSInteger)size;
-
-// Instance Methods
-- (void)deleteANTLRHashRule:(ANTLRRuleMemo *)np;
-- (void)delete_chain:(ANTLRRuleMemo *)np;
-- (ANTLRRuleMemo **)getPtrBuffer;
-- (void)setPtrBuffer:(ANTLRRuleMemo **)np;
-- (NSNumber *)getRuleMemoStopIndex:(NSInteger)aStartIndex;
-- (void)putRuleMemoAtStartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex;
-- (NSInteger)getMode;
-- (void)setMode:(NSInteger)aMode;
-- (void) insertObject:(ANTLRRuleMemo *)aRule atIndex:(NSInteger)Index;
-- (ANTLRRuleMemo *) objectAtIndex:(NSInteger)Index;
-
-@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
-@property (getter=getMode,setter=setMode:) NSInteger mode;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRHashRule.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRHashRule.m
deleted file mode 100644
index 3bca78f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRHashRule.m
+++ /dev/null
@@ -1,279 +0,0 @@
-//
-//  ANTLRHashRule.m
-//  ANTLR
-//
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-#define ANTLR_MEMO_RULE_UNKNOWN -1
-
-#import "ANTLRHashRule.h"
-
-/*
- * Start of ANTLRHashRule
- */
-@implementation ANTLRHashRule
-
-@synthesize LastHash;
-
-+(id)newANTLRHashRule
-{
-    return [[ANTLRHashRule alloc] init];
-}
-
-+(id)newANTLRHashRuleWithLen:(NSInteger)aBuffSize
-{
-    return [[ANTLRHashRule alloc] initWithLen:aBuffSize];
-}
-
--(id)init
-{
-    self = [super initWithLen:HASHSIZE];
-    if ( self != nil ) {
-    }
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)aBuffSize
-{
-    self = [super initWithLen:aBuffSize];
-    if ( self != nil ) {
-        mode = 0;
-    }
-    return( self );
-}
-
--(void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRHashRule" );
-#endif
-    ANTLRRuleMemo *tmp, *rtmp;
-    int Index;
-    
-    if ( self.fNext != nil ) {
-        for( Index = 0; Index < BuffSize; Index++ ) {
-            tmp = ptrBuffer[Index];
-            while ( tmp && tmp != ptrBuffer[Index] ) {
-                rtmp = tmp;
-                if ([tmp isKindOfClass:[ANTLRLinkBase class]])
-                    tmp = (ANTLRRuleMemo *)tmp.fNext;
-                else
-                    tmp = nil;
-                [rtmp dealloc];
-            }
-        }
-    }
-    [super dealloc];
-}
-
-- (NSInteger)count
-{
-    NSInteger aCnt = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        if ( ptrBuffer[i] != nil ) {
-            aCnt++;
-        }
-    }
-    return aCnt;
-}
-                          
-- (NSInteger) length
-{
-    return BuffSize;
-}
-
-- (NSInteger) size
-{
-    NSInteger aSize = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        if ( ptrBuffer[i] != nil ) {
-            aSize += sizeof(id);
-        }
-    }
-    return aSize;
-}
-                                  
-                                  
--(void)deleteANTLRHashRule:(ANTLRRuleMemo *)np
-{
-    ANTLRRuleMemo *tmp, *rtmp;
-    int Index;
-    
-    if ( self.fNext != nil ) {
-        for( Index = 0; Index < BuffSize; Index++ ) {
-            tmp = ptrBuffer[Index];
-            while ( tmp && tmp != ptrBuffer[Index ] ) {
-                rtmp = tmp;
-                if ([tmp isKindOfClass:[ANTLRLinkBase class]])
-                    tmp = (ANTLRRuleMemo *)tmp.fNext;
-                else
-                    tmp = nil;
-                [rtmp release];
-            }
-        }
-    }
-}
-
--(void)delete_chain:(ANTLRRuleMemo *)np
-{
-    if ( np.fNext != nil )
-        [self delete_chain:np.fNext];
-    [np dealloc];
-}
-
--(ANTLRRuleMemo **)getPtrBuffer
-{
-    return( ptrBuffer );
-}
-
--(void)setPtrBuffer:(ANTLRRuleMemo **)np
-{
-    ptrBuffer = np;
-}
-
-- (NSNumber *)getRuleMemoStopIndex:(NSInteger)aStartIndex
-{
-    ANTLRRuleMemo *aRule;
-    NSNumber *stopIndex;
-    NSInteger anIndex;
-    
-    anIndex = ( aStartIndex >= BuffSize ) ? aStartIndex % BuffSize : aStartIndex;
-    if ((aRule = ptrBuffer[anIndex]) == nil) {
-        return nil;
-    }
-    stopIndex = [aRule getStopIndex:aStartIndex];
-    return stopIndex;
-}
-
-- (void)putRuleMemo:(ANTLRRuleMemo *)aRule AtStartIndex:(NSInteger)aStartIndex
-{
-    NSInteger anIndex;
-    
-    anIndex = (aStartIndex >= BuffSize) ? aStartIndex %= BuffSize : aStartIndex;
-    if ( ptrBuffer[anIndex] == nil ) {
-        ptrBuffer[anIndex] = aRule;
-        [aRule retain];
-    }
-    else {
-        do {
-            if ( [aRule.startIndex integerValue] == aStartIndex ) {
-                [aRule setStartIndex:aRule.stopIndex];
-                return;
-            }
-            aRule = aRule.fNext;
-        } while ( aRule != nil );
-    }
-}
-
-- (void)putRuleMemoAtStartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex
-{
-    ANTLRRuleMemo *aRule, *newRule;
-    NSInteger anIndex;
-    NSInteger aMatchIndex;
-
-    anIndex = (aStartIndex >= BuffSize) ? aStartIndex % BuffSize : aStartIndex;
-    aRule = ptrBuffer[anIndex];
-    if ( aRule == nil ) {
-        aRule = [ANTLRRuleMemo newANTLRRuleMemoWithStartIndex:[NSNumber numberWithInteger:aStartIndex]
-                                                    StopIndex:[NSNumber numberWithInteger:aStopIndex]];
-        [aRule retain];
-        ptrBuffer[anIndex] = aRule;
-    }
-    else {
-        aMatchIndex = [aRule.startIndex integerValue];
-        if ( aStartIndex > aMatchIndex ) {
-            if ( aRule != ptrBuffer[anIndex] ) {
-                [aRule retain];
-            }
-            aRule.fNext = ptrBuffer[anIndex];
-            ptrBuffer[anIndex] = aRule;
-            return;
-        }
-        while (aRule.fNext != nil) {
-            aMatchIndex = [((ANTLRRuleMemo *)aRule.fNext).startIndex integerValue];
-            if ( aStartIndex > aMatchIndex ) {
-                newRule = [ANTLRRuleMemo newANTLRRuleMemoWithStartIndex:[NSNumber numberWithInteger:aStartIndex]
-                                                              StopIndex:[NSNumber numberWithInteger:aStopIndex]];
-                [newRule retain];
-                newRule.fNext = aRule.fNext;
-                aRule.fNext = newRule;
-                return;
-            }
-            if ( aMatchIndex == aStartIndex ) {
-                [aRule setStartIndex:aRule.stopIndex];
-                return;
-            }
-            aRule = aRule.fNext;
-        }
-    }
-}
-
-- (NSInteger)getLastHash
-{
-    return LastHash;
-}
-
-- (void)setLastHash:(NSInteger)aHash
-{
-    LastHash = aHash;
-}
-
-- (NSInteger)getMode
-{
-    return mode;
-}
-
-- (void)setMode:(NSInteger)aMode
-{
-    mode = aMode;
-}
-
-- (void) insertObject:(ANTLRRuleMemo *)aRule atIndex:(NSInteger)anIndex
-{
-    NSInteger Index;
-    
-    Index = ( anIndex >= BuffSize ) ? anIndex % BuffSize : anIndex;
-    if (aRule != ptrBuffer[Index]) {
-        if ( ptrBuffer[Index] ) [ptrBuffer[Index] release];
-        [aRule retain];
-    }
-    ptrBuffer[Index] = aRule;
-}
-
-- (ANTLRRuleMemo *)objectAtIndex:(NSInteger)anIndex
-{
-    NSInteger anIdx;
-
-    anIdx = ( anIndex >= BuffSize ) ? anIndex % BuffSize : anIndex;
-    return ptrBuffer[anIdx];
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRInputStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRInputStream.h
deleted file mode 100644
index 7bbdd6f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRInputStream.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-//  ANTLRInputStream.h
-//  ANTLR
-//
-//  Created by Alan Condit on 2/21/11.
-//  Copyright 2011 Alan's MachineWorks. All rights reserved.
-//
-
-#import <Cocoa/Cocoa.h>
-#import "AntlrReaderStream.h"
-
-@interface ANTLRInputStream : ANTLRReaderStream {
-    NSStringEncoding encoding;
-}
-
-@property (assign) NSStringEncoding encoding;
-
-+ (id) newANTLRInputStream;
-+ (id) newANTLRInputStream:(NSFileHandle *)anInput;
-+ (id) newANTLRInputStream:(NSFileHandle *)anInput size:(NSInteger)theSize;
-+ (id) newANTLRInputStream:(NSFileHandle *)anInput encoding:(NSStringEncoding)theEncoding;
-+ (id) newANTLRInputStream:(NSFileHandle *)anInput
-                      size:(NSInteger)theSize
-            readBufferSize:(NSInteger)theRBSize
-                  encoding:(NSStringEncoding)theEncoding;
-- (id) init;
-- (id) initWithInput:(NSFileHandle *)anInput
-                size:(NSInteger)theSize
-      readBufferSize:(NSInteger)theRBSize
-            encoding:(NSStringEncoding)theEncoding;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRInputStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRInputStream.m
deleted file mode 100644
index 3d041c5..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRInputStream.m
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-//  ANTLRInputStream.m
-//  ANTLR
-//
-//  Created by Alan Condit on 2/21/11.
-//  Copyright 2011 Alan's MachineWorks. All rights reserved.
-//
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRInputStream.h"
-
-
-@implementation ANTLRInputStream
-
-@synthesize encoding;
-
-+ (id) newANTLRInputStream
-{
-    return [[ANTLRInputStream alloc] init];
-}
-
-+ (id) newANTLRInputStream:(NSFileHandle *)anInput
-{
-    return [[ANTLRInputStream alloc] initWithInput:anInput size:ANTLRReaderStream.INITIAL_BUFFER_SIZE readBufferSize:ANTLRReaderStream.READ_BUFFER_SIZE encoding:NSASCIIStringEncoding];
-}
-
-+ (id) newANTLRInputStream:(NSFileHandle *)anInput size:(NSInteger)theSize
-{
-    return [[ANTLRInputStream alloc] initWithInput:anInput size:theSize readBufferSize:ANTLRReaderStream.READ_BUFFER_SIZE encoding:NSASCIIStringEncoding];
-}
-
-+ (id) newANTLRInputStream:(NSFileHandle *)anInput encoding:(NSStringEncoding)theEncoding
-{
-    return [[ANTLRInputStream alloc] initWithInput:anInput size:ANTLRReaderStream.INITIAL_BUFFER_SIZE readBufferSize:ANTLRReaderStream.READ_BUFFER_SIZE encoding:theEncoding];
-}
-
-+ (id) newANTLRInputStream:(NSFileHandle *)anInput
-                      size:(NSInteger)theSize
-            readBufferSize:(NSInteger)theRBSize
-                  encoding:(NSStringEncoding)theEncoding
-{
-    return [[ANTLRInputStream alloc] initWithInput:anInput size:theSize readBufferSize:theRBSize encoding:theEncoding];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-- (id) initWithInput:(NSFileHandle *)anInput
-                size:(NSInteger)theSize
-      readBufferSize:(NSInteger)theRBSize
-            encoding:(NSStringEncoding)theEncoding
-{
-    ;self = [super initWithReader:anInput size:theSize readBufferSize:theRBSize];
-    if ( self != nil ) {
-        //[self load:theSize readBufferSize:theRBSize];
-    }
-    return self;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRIntArray.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRIntArray.h
deleted file mode 100644
index 9182377..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRIntArray.h
+++ /dev/null
@@ -1,74 +0,0 @@
-//
-//  ANTLRIntArray.h
-//  ANTLR
-//
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-#define ANTLR_INT_ARRAY_INITIAL_SIZE 10
-
-@interface ANTLRIntArray : NSObject 
-{
-    NSUInteger BuffSize;
-    NSUInteger count;
-    NSInteger idx;
-    NSMutableData *buffer;
-    __strong NSInteger *intBuffer;
-    BOOL SPARSE;
-}
-
-+ (ANTLRIntArray *)newArray;
-+ (ANTLRIntArray *)newArrayWithLen:(NSUInteger)aLen;
-
-- (id) init;
-- (id) initWithLen:(NSUInteger)aLen;
-
-- (void) dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (void) addInteger:(NSInteger) value;
-- (NSInteger) pop;
-- (void) push:(NSInteger) value;
-- (NSInteger) integerAtIndex:(NSUInteger) index;
-- (void) insertInteger:(NSInteger)anInteger AtIndex:(NSUInteger) anIndex;
-- (NSInteger)removeIntegerAtIndex:(NSUInteger) anIndex;
-- (void)replaceInteger:(NSInteger)aValue AtIndex:(NSUInteger)anIndex;
-- (void) reset;
-
-- (NSUInteger) count;
-- (NSUInteger) size;
-- (void) ensureCapacity:(NSUInteger) anIndex;
-
-@property (assign) NSUInteger BuffSize;
-@property (assign) NSUInteger count;
-@property (assign) NSInteger idx;
-@property (retain) NSMutableData *buffer;
-@property (assign) NSInteger *intBuffer;
-@property (assign) BOOL SPARSE;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRIntArray.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRIntArray.m
deleted file mode 100644
index f715372..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRIntArray.m
+++ /dev/null
@@ -1,199 +0,0 @@
-//
-//  ANTLRIntArray.m
-//  ANTLR
-//
-//  Created by Ian Michell on 27/04/2010.
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRIntArray.h"
-#import "ANTLRRuntimeException.h"
-
-@implementation ANTLRIntArray
-
-@synthesize BuffSize;
-@synthesize count;
-@synthesize idx;
-@synthesize buffer;
-@synthesize intBuffer;
-@synthesize SPARSE;
-
-+ (ANTLRIntArray *)newArray
-{
-    return [[ANTLRIntArray alloc] init];
-}
-
-+ (ANTLRIntArray *)newArrayWithLen:(NSUInteger)aLen
-{
-    return [[ANTLRIntArray alloc] initWithLen:aLen];
-}
-
-- (id)init
-{
-    self = [super init];
-    if ( self != nil ) {
-        BuffSize  = (ANTLR_INT_ARRAY_INITIAL_SIZE * (sizeof(NSInteger)/sizeof(id)));
-        count = 0;
-        idx = -1;
-        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize * sizeof(id)] retain];
-        intBuffer = (NSInteger *)[buffer mutableBytes];
-        SPARSE = NO;
-    }
-    return self;
-}
-
-- (id)initWithLen:(NSUInteger)aLen
-{
-    self = [super init];
-    if ( self != nil ) {
-        BuffSize  = (ANTLR_INT_ARRAY_INITIAL_SIZE * (sizeof(NSInteger)/sizeof(id)));
-        count = 0;
-        idx = -1;
-        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize * sizeof(id)] retain];
-        intBuffer = (NSInteger *)[buffer mutableBytes];
-        SPARSE = NO;
-    }
-    return self;
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRIntArray" );
-#endif
-    if ( buffer ) [buffer release];
-    [super dealloc];
-}
-
-- (id)copyWithZone:(NSZone *)aZone
-{
-    ANTLRIntArray *copy;
-    
-    copy = [[[self class] alloc] initWithLen:BuffSize];
-    copy.idx = self.idx;
-    NSInteger anIndex;
-    for ( anIndex = 0; anIndex < BuffSize; anIndex++ ) {
-        [copy addInteger:intBuffer[anIndex]];
-    }
-    return copy;
-}
-
-- (NSUInteger)count
-{
-    return count;
-}
-
-// FIXME: Java runtime returns p, I'm not so sure it's right so have added p + 1 to show true size!
-- (NSUInteger)size
-{
-    if ( count > 0 )
-        return ( count * sizeof(NSInteger));
-    return 0;
-}
-
-- (void)addInteger:(NSInteger) value
-{
-    [self ensureCapacity:idx+1];
-    intBuffer[++idx] = (NSInteger) value;
-    count++;
-}
-
-- (NSInteger)pop
-{
-    if ( idx < 0 ) {
-        @throw [ANTLRIllegalArgumentException newException:[NSString stringWithFormat:@"Nothing to pop, count = %d", count]];
-    }
-    NSInteger value = (NSInteger) intBuffer[idx--];
-    count--;
-    return value;
-}
-
-- (void)push:(NSInteger)aValue
-{
-    [self addInteger:aValue];
-}
-
-- (NSInteger)integerAtIndex:(NSUInteger) anIndex
-{
-    if ( SPARSE==NO  && anIndex > idx ) {
-        @throw [ANTLRIllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than count %d", anIndex, count]];
-    }
-    else if ( SPARSE == YES && anIndex >= BuffSize ) {
-        @throw [ANTLRIllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than BuffSize %d", anIndex, BuffSize]];
-    }
-    return intBuffer[anIndex];
-}
-
-- (void)insertInteger:(NSInteger)aValue AtIndex:(NSUInteger)anIndex
-{
-    [self replaceInteger:aValue AtIndex:anIndex];
-    count++;
-}
-
-- (NSInteger)removeIntegerAtIndex:(NSUInteger) anIndex
-{
-    if ( SPARSE==NO && anIndex > idx ) {
-        @throw [ANTLRIllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than count %d", anIndex, count]];
-        return (NSInteger)-1;
-    } else if ( SPARSE==YES && anIndex >= BuffSize ) {
-        @throw [ANTLRIllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than BuffSize %d", anIndex, BuffSize]];
-    }
-    count--;
-    return intBuffer[anIndex];
-}
-
-- (void)replaceInteger:(NSInteger)aValue AtIndex:(NSUInteger)anIndex
-{
-    if ( SPARSE == NO && anIndex > idx ) {
-        @throw [ANTLRIllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than count %d", anIndex, count]];
-    }
-    else if ( SPARSE == YES && anIndex >= BuffSize ) {
-        @throw [ANTLRIllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than BuffSize %d", anIndex, BuffSize]];
-    }
-    intBuffer[anIndex] = aValue;
-}
-
--(void) reset
-{
-    count = 0;
-    idx = -1;
-}
-
-- (void) ensureCapacity:(NSUInteger) anIndex
-{
-    if ( (anIndex * sizeof(NSUInteger)) >= [buffer length] )
-    {
-        NSUInteger newSize = ([buffer length] / sizeof(NSInteger)) * 2;
-        if (anIndex > newSize) {
-            newSize = anIndex + 1;
-        }
-        BuffSize = newSize;
-        [buffer setLength:(BuffSize * sizeof(NSUInteger))];
-        intBuffer = (NSInteger *)[buffer mutableBytes];
-    }
-}
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRIntStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRIntStream.h
deleted file mode 100644
index e37f907..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRIntStream.h
+++ /dev/null
@@ -1,106 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef DEBUG_DEALLOC
-#define DEBUG_DEALLOC
-#endif
-
-@protocol ANTLRIntStream < NSObject, NSCopying >
-
-- (void) consume;
-
-// Get unichar at current input pointer + i ahead where i=1 is next character as int for including ANTLRCharStreamEOF (-1) in the data range
-- (NSInteger) LA:(NSInteger) i;
-
-// Tell the stream to start buffering if it hasn't already.  Return
-// current input position, index(), or some other marker so that
-// when passed to rewind() you get back to the same spot.
-// rewind(mark()) should not affect the input cursor.
-// TODO: problem in that lexer stream returns not index but some marker 
-
-- (NSInteger) mark;
-
-// Return the current input symbol index 0..n where n indicates the
-// last symbol has been read.
-
-- (NSInteger) index;
-
-- (NSUInteger) line;
-
-- (NSUInteger) charPositionInLine;
-
-// Reset the stream so that next call to index would return marker.
-// The marker will usually be -index but it doesn't have to be.  It's
-// just a marker to indicate what state the stream was in.  This is
-// essentially calling -release: and -seek:.  If there are markers
-// created after this marker argument, this routine must unroll them
-// like a stack.  Assume the state the stream was in when this marker
-// was created.
-
-- (void) rewind;
-- (void) rewind:(NSInteger) marker;
-
-// You may want to commit to a backtrack but don't want to force the
-// stream to keep bookkeeping objects around for a marker that is
-// no longer necessary.  This will have the same behavior as
-// rewind() except it releases resources without the backward seek.
-
-- (void) release:(NSInteger) marker;
-
-// Set the input cursor to the position indicated by index.  This is
-// normally used to seek ahead in the input stream.  No buffering is
-// required to do this unless you know your stream will use seek to
-// move backwards such as when backtracking.
-// This is different from rewind in its multi-directional
-// requirement and in that its argument is strictly an input cursor (index).
-//
-// For char streams, seeking forward must update the stream state such
-// as line number.  For seeking backwards, you will be presumably
-// backtracking using the mark/rewind mechanism that restores state and
-// so this method does not need to update state when seeking backwards.
-//
-// Currently, this method is only used for efficient backtracking, but
-// in the future it may be used for incremental parsing.
-
-- (void) seek:(NSInteger) anIndex;
-
-/** Only makes sense for streams that buffer everything up probably, but
- *  might be useful to display the entire stream or for testing.  This
- *  value includes a single EOF.
- */
-- (NSUInteger) size;
-/** Where are you getting symbols from?  Normally, implementations will
- *  pass the buck all the way to the lexer who can ask its input stream
- *  for the file name or whatever.
- */
-- (NSString *)getSourceName;
-
-//@property (assign) NSInteger index;
-//@property (assign) NSUInteger line;
-//@property (assign) NSUInteger charPositionInLine;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexer.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRLexer.h
deleted file mode 100644
index cd985cc..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexer.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTokenSource.h"
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRRecognizerSharedState.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRRecognitionException.h"
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRMismatchedRangeException.h"
-
-@interface ANTLRLexer : ANTLRBaseRecognizer <ANTLRTokenSource> {
-	id<ANTLRCharStream> input;      ///< The character stream we pull tokens out of.
-	NSUInteger ruleNestingLevel;
-}
-
-@property (retain, getter=input, setter=setInput:) id<ANTLRCharStream> input;
-@property (getter=getRuleNestingLevel, setter=setRuleNestingLevel:) NSUInteger ruleNestingLevel;
-
-#pragma mark Initializer
-- (id) initWithCharStream:(id<ANTLRCharStream>) anInput;
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput State:(ANTLRRecognizerSharedState *)state;
-
-- (id) copyWithZone:(NSZone *)zone;
-
-- (void) reset;
-
-// - (ANTLRRecognizerSharedState *) state;
-
-#pragma mark Tokens
-- (id<ANTLRToken>)getToken;
-- (void) setToken: (id<ANTLRToken>) aToken;
-- (id<ANTLRToken>) nextToken;
-- (void) mTokens;		// abstract, defined in generated sources
-- (void) skip;
-- (id<ANTLRCharStream>) input;
-- (void) setInput:(id<ANTLRCharStream>)aCharStream;
-
-- (void) emit;
-- (void) emit:(id<ANTLRToken>)aToken;
-
-#pragma mark Matching
-- (void) matchString:(NSString *)aString;
-- (void) matchAny;
-- (void) matchChar:(unichar) aChar;
-- (void) matchRangeFromChar:(unichar)fromChar to:(unichar)toChar;
-
-#pragma mark Informational
-- (NSUInteger) line;
-- (NSUInteger) charPositionInLine;
-- (NSInteger) index;
-- (NSString *) text;
-- (void) setText:(NSString *) theText;
-
-// error handling
-- (void) reportError:(ANTLRRecognitionException *)e;
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(AMutableArray *)tokenNames;
-- (NSString *)getCharErrorDisplay:(NSInteger)c;
-- (void) recover:(ANTLRRecognitionException *)e;
-- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexer.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRLexer.m
deleted file mode 100644
index de1a0a3..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexer.m
+++ /dev/null
@@ -1,428 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <ANTLR/antlr.h>
-#import "ANTLRLexer.h"
-
-@implementation ANTLRLexer
-
-@synthesize input;
-@synthesize ruleNestingLevel;
-#pragma mark Initializer
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-	self = [super initWithState:[[ANTLRRecognizerSharedState alloc] init]];
-	if ( self != nil ) {
-        input = [anInput retain];
-        if (state.token != nil)
-            [((ANTLRCommonToken *)state.token) setInput:anInput];
-		ruleNestingLevel = 0;
-	}
-	return self;
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput State:(ANTLRRecognizerSharedState *)aState
-{
-	self = [super initWithState:aState];
-	if ( self != nil ) {
-        input = [anInput retain];
-        if (state.token != nil)
-            [((ANTLRCommonToken *)state.token) setInput:anInput];
-		ruleNestingLevel = 0;
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-    if ( input ) [input release];
-    [super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRLexer *copy;
-	
-    copy = [[[self class] allocWithZone:aZone] init];
-    //    copy = [super copyWithZone:aZone]; // allocation occurs here
-    if ( input != nil )
-        copy.input = input;
-    copy.ruleNestingLevel = ruleNestingLevel;
-    return copy;
-}
-
-- (void) reset
-{
-    [super reset]; // reset all recognizer state variables
-                   // wack Lexer state variables
-    if ( input != nil ) {
-        [input seek:0]; // rewind the input
-    }
-    if ( state == nil ) {
-        return; // no shared state work to do
-    }
-    state.token = nil;
-    state.type = ANTLRCommonToken.INVALID_TOKEN_TYPE;
-    state.channel = ANTLRCommonToken.DEFAULT_CHANNEL;
-    state.tokenStartCharIndex = -1;
-    state.tokenStartCharPositionInLine = -1;
-    state.tokenStartLine = -1;
-    state.text = nil;
-}
-
-// token stuff
-#pragma mark Tokens
-
-- (id<ANTLRToken>)getToken
-{
-    return [state getToken]; 
-}
-
-- (void) setToken: (id<ANTLRToken>) aToken
-{
-    if (state.token != aToken) {
-        [aToken retain];
-        state.token = aToken;
-    }
-}
-
-
-// this method may be overridden in the generated lexer if we generate a filtering lexer.
-- (id<ANTLRToken>) nextToken
-{
-	while (YES) {
-        [self setToken:nil];
-        state.channel = ANTLRCommonToken.DEFAULT_CHANNEL;
-        state.tokenStartCharIndex = input.index;
-        state.tokenStartCharPositionInLine = input.charPositionInLine;
-        state.tokenStartLine = input.line;
-        state.text = nil;
-        
-        // [self setText:[self text]];
-		if ([input LA:1] == ANTLRCharStreamEOF) {
-            ANTLRCommonToken *eof = [ANTLRCommonToken newToken:input
-                                                          Type:ANTLRTokenTypeEOF
-                                                       Channel:ANTLRCommonToken.DEFAULT_CHANNEL
-                                                         Start:input.index
-                                                          Stop:input.index];
-            [eof setLine:input.line];
-            [eof setCharPositionInLine:input.charPositionInLine];
-			return eof;
-		}
-		@try {
-			[self mTokens];
-            // SEL aMethod = @selector(mTokens);
-            // [[self class] instancesRespondToSelector:aMethod];
-            if ( state.token == nil)
-                [self emit];
-            else if ( state.token == [ANTLRCommonToken skipToken] ) {
-                continue;
-            }
-			return state.token;
-		}
-		@catch (ANTLRNoViableAltException *nva) {
-			[self reportError:nva];
-			[self recover:nva];
-		}
-		@catch (ANTLRRecognitionException *e) {
-			[self reportError:e];
-		}
-	}
-}
-
-- (void) mTokens
-{   // abstract, defined in generated source as a starting point for matching
-    [self doesNotRecognizeSelector:_cmd];
-}
-
-- (void) skip
-{
-    state.token = [ANTLRCommonToken skipToken];
-}
-
-- (id<ANTLRCharStream>) input
-{
-    return input; 
-}
-
-- (void) setInput:(id<ANTLRCharStream>) anInput
-{
-    if ( anInput != input ) {
-        if ( input ) [input release];
-    }
-    input = nil;
-    [self reset];
-    input = anInput;
-    [input retain];
-}
-
-/** Currently does not support multiple emits per nextToken invocation
- *  for efficiency reasons.  Subclass and override this method and
- *  nextToken (to push tokens into a list and pull from that list rather
- *  than a single variable as this implementation does).
- */
-- (void) emit:(id<ANTLRToken>)aToken
-{
-	state.token = aToken;
-}
-
-/** The standard method called to automatically emit a token at the
- *  outermost lexical rule.  The token object should point into the
- *  char buffer start..stop.  If there is a text override in 'text',
- *  use that to set the token's text.  Override this method to emit
- *  custom Token objects.
- *
- *  If you are building trees, then you should also override
- *  Parser or TreeParser.getMissingSymbol().
- */
-- (void) emit
-{
-	id<ANTLRToken> aToken = [ANTLRCommonToken newToken:input
-                                                  Type:state.type
-                                               Channel:state.channel
-                                                 Start:state.tokenStartCharIndex
-                                                  Stop:input.index-1];
-	[aToken setLine:state.tokenStartLine];
-    aToken.text = [self text];
-	[aToken setCharPositionInLine:state.tokenStartCharPositionInLine];
-    [aToken retain];
-	[self emit:aToken];
-	// [aToken release];
-}
-
-// matching
-#pragma mark Matching
-- (void) matchString:(NSString *)aString
-{
-    unichar c;
-	unsigned int i = 0;
-	unsigned int stringLength = [aString length];
-	while ( i < stringLength ) {
-		c = [input LA:1];
-        if ( c != [aString characterAtIndex:i] ) {
-			if ([state getBacktracking] > 0) {
-				state.failed = YES;
-				return;
-			}
-			ANTLRMismatchedTokenException *mte = [ANTLRMismatchedTokenException newExceptionChar:[aString characterAtIndex:i] Stream:input];
-            mte.c = c;
-			[self recover:mte];
-			@throw mte;
-		}
-		i++;
-		[input consume];
-		state.failed = NO;
-	}
-}
-
-- (void) matchAny
-{
-	[input consume];
-}
-
-- (void) matchChar:(unichar) aChar
-{
-	// TODO: -LA: is returning an int because it sometimes is used in the generated parser to compare lookahead with a tokentype.
-	//		 try to change all those occurrences to -LT: if possible (i.e. if ANTLR can be made to generate LA only for lexer code)
-    unichar charLA;
-	charLA = [input LA:1];
-	if ( charLA != aChar) {
-		if ([state getBacktracking] > 0) {
-			state.failed = YES;
-			return;
-		}
-		ANTLRMismatchedTokenException  *mte = [ANTLRMismatchedTokenException newExceptionChar:aChar Stream:input];
-        mte.c = charLA;
-		[self recover:mte];
-		@throw mte;
-	}
-	[input consume];
-	state.failed = NO;
-}
-
-- (void) matchRangeFromChar:(unichar)fromChar to:(unichar)toChar
-{
-	unichar charLA = (unichar)[input LA:1];
-	if ( charLA < fromChar || charLA > toChar ) {
-		if ([state getBacktracking] > 0) {
-			state.failed = YES;
-			return;
-		}
-		ANTLRMismatchedRangeException  *mre = [ANTLRMismatchedRangeException
-					newException:NSMakeRange((NSUInteger)fromChar,(NSUInteger)toChar)
-							   stream:input];
-        mre.c = charLA;
-		[self recover:mre];
-		@throw mre;
-	}		
-	[input consume];
-	state.failed = NO;
-}
-
-	// info
-#pragma mark Informational
-
-- (NSUInteger) line
-{
-	return input.line;
-}
-
-- (NSUInteger) charPositionInLine
-{
-	return input.charPositionInLine;
-}
-
-- (NSInteger) index
-{
-    return 0;
-}
-
-- (NSString *) text
-{
-    if (state.text != nil) {
-        return state.text;
-    }
-	return [input substringWithRange:NSMakeRange(state.tokenStartCharIndex, input.index-state.tokenStartCharIndex)];
-}
-
-- (void) setText:(NSString *) theText
-{
-    state.text = theText;
-}
-
-	// error handling
-- (void) reportError:(ANTLRRecognitionException *)e
-{
-    /** TODO: not thought about recovery in lexer yet.
-     *
-     // if we've already reported an error and have not matched a token
-     // yet successfully, don't report any errors.
-     if ( errorRecovery ) {
-     //System.err.print("[SPURIOUS] ");
-     return;
-     }
-     errorRecovery = true;
-     */
-    
-    [self displayRecognitionError:[self getTokenNames] Exception:e];
-}
-
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(AMutableArray *)tokenNames
-{
-/*    NSString *msg = [NSString stringWithFormat:@"Gotta fix getErrorMessage in ANTLRLexer.m--%@\n",
-                     e.name];
- */
-    NSString *msg = nil;
-    if ( [e isKindOfClass:[ANTLRMismatchedTokenException class]] ) {
-        ANTLRMismatchedTokenException *mte = (ANTLRMismatchedTokenException *)e;
-        msg = [NSString stringWithFormat:@"mismatched character \"%@\" expecting \"%@\"",
-               [self getCharErrorDisplay:mte.c], [self getCharErrorDisplay:mte.expecting]];
-    }
-    else if ( [e isKindOfClass:[ANTLRNoViableAltException class]] ) {
-        ANTLRNoViableAltException *nvae = (ANTLRNoViableAltException *)e;
-        // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
-        // and "(decision="+nvae.decisionNumber+") and
-        // "state "+nvae.stateNumber
-        msg = [NSString stringWithFormat:@"no viable alternative at character \"%@\"",
-               [self getCharErrorDisplay:(nvae.c)]];
-    }
-    else if ( [e isKindOfClass:[ANTLREarlyExitException class]] ) {
-        ANTLREarlyExitException *eee = (ANTLREarlyExitException *)e;
-        // for development, can add "(decision="+eee.decisionNumber+")"
-        msg = [NSString stringWithFormat:@"required (...)+ loop did not match anything at character \"%@\"",
-               [self getCharErrorDisplay:(eee.c)]];
-    }
-    else if ( [e isKindOfClass:[ANTLRMismatchedNotSetException class]] ) {
-        ANTLRMismatchedNotSetException *mse = (ANTLRMismatchedNotSetException *)e;
-        msg = [NSString stringWithFormat:@"mismatched character \"%@\"  expecting set \"%@\"",
-               [self getCharErrorDisplay:(mse.c)], mse.expecting];
-    }
-    else if ( [e isKindOfClass:[ANTLRMismatchedSetException class]] ) {
-        ANTLRMismatchedSetException *mse = (ANTLRMismatchedSetException *)e;
-        msg = [NSString stringWithFormat:@"mismatched character \"%@\" expecting set \"%@\"",
-               [self getCharErrorDisplay:(mse.c)], mse.expecting];
-    }
-    else if ( [e isKindOfClass:[ANTLRMismatchedRangeException class]] ) {
-        ANTLRMismatchedRangeException *mre = (ANTLRMismatchedRangeException *)e;
-        msg = [NSString stringWithFormat:@"mismatched character \"%@\" \"%@..%@\"",
-               [self getCharErrorDisplay:(mre.c)], [self getCharErrorDisplay:(mre.range.location)],
-               [self getCharErrorDisplay:(mre.range.location+mre.range.length-1)]];
-    }
-    else {
-        msg = [super getErrorMessage:e TokenNames:[self getTokenNames]];
-    }
-    return msg;
-}
-
-- (NSString *)getCharErrorDisplay:(NSInteger)c
-{
-    NSString *s;
-    switch ( c ) {
-        case ANTLRTokenTypeEOF :
-            s = @"<EOF>";
-            break;
-        case '\n' :
-            s = @"\\n";
-            break;
-        case '\t' :
-            s = @"\\t";
-            break;
-        case '\r' :
-            s = @"\\r";
-            break;
-        default:
-            s = [NSString stringWithFormat:@"%c", (char)c];
-            break;
-    }
-    return s;
-}
-
-/** Lexers can normally match any char in it's vocabulary after matching
- *  a token, so do the easy thing and just kill a character and hope
- *  it all works out.  You can instead use the rule invocation stack
- *  to do sophisticated error recovery if you are in a fragment rule.
- */
-- (void)recover:(ANTLRRecognitionException *)re
-{
-    //System.out.println("consuming char "+(char)input.LA(1)+" during recovery");
-    //re.printStackTrace();
-    [input consume];
-}
-
-- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex
-{
-    NSString *inputSymbol = [NSString stringWithFormat:@"%c line=%d:%d\n", [input LT:1], input.line, input.charPositionInLine];
-    [super traceIn:ruleName Index:ruleIndex Object:inputSymbol];
-}
-
-- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex
-{
-    NSString *inputSymbol = [NSString stringWithFormat:@"%c line=%d:%d\n", [input LT:1], input.line, input.charPositionInLine];
-    [super traceOut:ruleName Index:ruleIndex Object:inputSymbol];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerRuleReturnScope.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerRuleReturnScope.h
deleted file mode 100644
index 27b8abc..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerRuleReturnScope.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-@interface ANTLRLexerRuleReturnScope : NSObject {
-	NSInteger start;
-	NSInteger stopToken;
-}
-
-- (NSInteger) getStart;
-- (void) setStart: (NSInteger) aStart;
-
-- (NSInteger) getStop;
-- (void) setStop: (NSInteger) aStop;
-
-@property (assign, getter=getStart, setter=setStart:) NSInteger start;
-@property (getter=getStop,setter=setStop:) NSInteger stopToken;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerRuleReturnScope.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerRuleReturnScope.m
deleted file mode 100644
index ac69380..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerRuleReturnScope.m
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRLexerRuleReturnScope.h"
-
-
-@implementation ANTLRLexerRuleReturnScope
-
-@synthesize start;
-
-//---------------------------------------------------------- 
-//  start 
-//---------------------------------------------------------- 
-- (NSInteger) getStart
-{
-    return start;
-}
-
-- (void) setStart: (NSInteger) aStart
-{
-    start = aStart;
-}
-
-//---------------------------------------------------------- 
-//  stop 
-//---------------------------------------------------------- 
-- (NSInteger) getStop
-{
-    return stopToken;
-}
-
-- (void) setStop: (NSInteger) aStop
-{
-    stopToken = aStop;
-}
-
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerState.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerState.h
deleted file mode 100644
index 7132a48..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerState.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRRecognizerSharedState.h"
-
-@interface ANTLRLexerState : ANTLRRecognizerSharedState {
-}
-
-- (void) reset;
-
-- (id<ANTLRToken>) getToken;
-- (void) setToken:(id<ANTLRToken>) theToken;
-
-- (NSUInteger) getTokenType;
-- (void) setTokenType:(unsigned int) theTokenType;
-
-- (NSUInteger) channel;
-- (void) setChannel:(unsigned int) theChannel;
-
-- (NSUInteger) getTokenStartLine;
-- (void) setTokenStartLine:(unsigned int) theTokenStartLine;
-
-- (NSUInteger) getTokenCharPositionInLine;
-- (void) setTokenCharPositionInLine:(unsigned int) theCharPosition;
-
-- (NSInteger) getTokenStartCharIndex;
-- (void) setTokenStartCharIndex:(int) theTokenStartCharIndex;
-
-- (NSString *) text;
-- (void) setText:(NSString *) theText;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerState.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerState.m
deleted file mode 100644
index 88284b7..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRLexerState.m
+++ /dev/null
@@ -1,139 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRLexerState.h"
-
-
-@implementation ANTLRLexerState
-
-- (id) init
-{
-	self = [super init];
-	if (self) {
-		[self reset];
-	}
-	return self;
-}
-
-- (void) reset
-{
-	[self setToken:nil];
-	type = 0;				
-	channel = 0;				
-	tokenStartLine = 0;		
-	tokenStartCharPositionInLine = 0;
-	tokenStartCharIndex = -1;    
-	[self setText:nil];
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRLexerState" );
-#endif
-	[self setText:nil];
-	[self setToken:nil];
-	[super dealloc];
-}
-
-- (id<ANTLRToken>) getToken
-{
-	return token;
-}
-
-- (void) setToken:(id<ANTLRToken>) theToken
-{
-	if (theToken != token) {
-		if ( token ) [token release];
-		token = [theToken retain];
-	}
-}
-
-
-- (NSUInteger) getTokenType
-{
-	return type;
-}
-
-- (void) setTokenType:(NSUInteger) theTokenType
-{
-	type = theTokenType;
-}
-
-- (NSUInteger)channel
-{
-	return channel;
-}
-
-- (void) setChannel:(NSUInteger) theChannel
-{
-	channel = theChannel;
-}
-
-- (NSUInteger) getTokenStartLine
-{
-	return tokenStartLine;
-}
-
-- (void) setTokenStartLine:(NSUInteger) theTokenStartLine
-{
-	tokenStartLine = theTokenStartLine;
-}
-
-- (unsigned int) getTokenCharPositionInLine
-{
-	return tokenStartCharPositionInLine;
-}
-
-- (void) setTokenCharPositionInLine:(unsigned int) theCharPosition
-{
-	tokenStartCharPositionInLine = theCharPosition;
-}
-
-- (int) getTokenStartCharIndex
-{
-	return tokenStartCharIndex;
-}
-
-- (void) setTokenStartCharIndex:(int) theTokenStartCharIndex
-{
-	tokenStartCharIndex = theTokenStartCharIndex;
-}
-
-- (NSString *) text
-{
-	return text;
-}
-
-- (void) setText:(NSString *) theText
-{
-	if (text != theText) {
-		if ( text ) [text release];
-		text = [theText retain];
-	}
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRLinkBase.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRLinkBase.h
deleted file mode 100644
index f4c337e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRLinkBase.h
+++ /dev/null
@@ -1,80 +0,0 @@
-//
-//  ANTLRLinkBase.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/14/10.
-//  [The "BSD licence"]
-//  Copyright (c) 2010 Alan Condit
-//  All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-#ifndef DEBUG_DEALLOC
-#define DEBUG_DEALLOC
-#endif
-
-@protocol ANTLRLinkList <NSObject>
-
-+ (id<ANTLRLinkList>)newANTLRLinkBase;
-+ (id<ANTLRLinkList>)newANTLRLinkBase:(id<ANTLRLinkList>)np Prev:(id<ANTLRLinkList>)pp;
-
-- (void) dealloc;
-
-- (id<ANTLRLinkList>) append:(id<ANTLRLinkList>)node;
-- (id<ANTLRLinkList>) insert:(id<ANTLRLinkList>)node;
-
-- (id<ANTLRLinkList>) getfNext;
-- (void) setFNext:(id<ANTLRLinkList>)np;
-- (id<ANTLRLinkList>)getfPrev;
-- (void) setFPrev:(id<ANTLRLinkList>)pp;
-
-@property (retain) id<ANTLRLinkList> fPrev;
-@property (retain) id<ANTLRLinkList> fNext;
-@end
-
-@interface ANTLRLinkBase : NSObject <ANTLRLinkList> {
-	id<ANTLRLinkList> fPrev;
-	id<ANTLRLinkList> fNext;
-}
-
-@property (retain) id<ANTLRLinkList> fPrev;
-@property (retain) id<ANTLRLinkList> fNext;
-
-+ (id<ANTLRLinkList>)newANTLRLinkBase;
-+ (id<ANTLRLinkList>)newANTLRLinkBase:(id<ANTLRLinkList>)np Prev:(id<ANTLRLinkList>)pp;
-- (id<ANTLRLinkList>)init;
-- (id<ANTLRLinkList>)initWithPtr:(id)np Prev:(id)pp;
-- (void)dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id<ANTLRLinkList>)append:(id<ANTLRLinkList>)node;
-- (id<ANTLRLinkList>)insert:(id<ANTLRLinkList>)node;
-
-- (id<ANTLRLinkList>)getfNext;
-- (void)setfNext:(id<ANTLRLinkList>) np;
-- (id<ANTLRLinkList>)getfPrev;
-- (void)setfPrev:(id<ANTLRLinkList>) pp;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRLinkBase.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRLinkBase.m
deleted file mode 100644
index d352993..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRLinkBase.m
+++ /dev/null
@@ -1,127 +0,0 @@
-//
-//  ANTLRLinkBase.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/14/10.
-//  [The "BSD licence"]
-//  Copyright (c) 2010 Alan Condit
-//  All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRLinkBase.h"
-
-@implementation ANTLRLinkBase
-
-@synthesize fPrev;
-@synthesize fNext;
-
-+(id<ANTLRLinkList>)newANTLRLinkBase
-{
-	return [[ANTLRLinkBase alloc] init];
-}
-
-+(id<ANTLRLinkList>)newANTLRLinkBase:(id<ANTLRLinkList>)np Prev:(id<ANTLRLinkList>)pp
-{
-	return [[ANTLRLinkBase alloc] initWithPtr:np Prev:pp];
-}
-
--(id<ANTLRLinkList>)init
-{
-	if ((self = [super init]) != nil) {
-		fNext = nil;
-		fPrev = nil;
-	}
-	return(self);
-}
-
--(id<ANTLRLinkList>)initWithPtr:(id<ANTLRLinkList>)np Prev:(id<ANTLRLinkList>)pp
-{
-	if ((self = [super init]) != nil) {
-		fNext = np;
-		fPrev = pp;
-	}
-	return(self);
-}
-
--(void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRLinkBase" );
-#endif
-	if (fNext) [fNext dealloc];
-	if (fPrev) [fPrev dealloc];
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRLinkBase *copy;
-    
-    copy = [[self class] allocWithZone:aZone];
-    copy.fPrev = fPrev;
-    copy.fNext = fNext;
-    return( copy );
-}
-
--(id<ANTLRLinkList>)append:(id<ANTLRLinkList>)node
-{
-	node.fPrev = (id<ANTLRLinkList>)self;
-	node.fNext = (id<ANTLRLinkList>)self.fNext;
-	if (node.fNext != nil)
-        node.fNext.fPrev = node;
-    self.fNext = node;
-    return( node );
-}
-
--(id<ANTLRLinkList>)insert:(id<ANTLRLinkList>)node
-{
-	node.fNext = self;
-	node.fPrev = self.fPrev;
-    if (node.fPrev != nil) 
-        node.fPrev.fNext = node;
-	self.fPrev = node;
-	return( node );
-}
-
--(id<ANTLRLinkList>)getfNext
-{
-	return(fNext);
-}
-
--(void)setfNext:(id<ANTLRLinkList>)np
-{
-	fNext = np;
-}
-
--(id<ANTLRLinkList>)getfPrev
-{
-	return(fPrev);
-}
-
--(void)setfPrev:(id<ANTLRLinkList>)pp
-{
-	fPrev = pp;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRLookaheadStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRLookaheadStream.h
deleted file mode 100644
index 3ec121e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRLookaheadStream.h
+++ /dev/null
@@ -1,77 +0,0 @@
-//
-//  ANTLRLookaheadStream.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-//  [The "BSD licence"]
-//  Copyright (c) 2010 Ian Michell 2010 Alan Condit
-//  All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRFastQueue.h"
-
-#define UNITIALIZED_EOF_ELEMENT_INDEX NSIntegerMax
-
-@interface ANTLRLookaheadStream : ANTLRFastQueue
-{
-    NSInteger index;
-	NSInteger eofElementIndex;
-	NSInteger lastMarker;
-	NSInteger markDepth;
-	id prevElement;
-	id eof;
-}
-
-@property (readwrite, retain, getter=getEof, setter=setEof:) id eof;
-@property (assign) NSInteger index;
-@property (assign, getter=getEofElementIndex, setter=setEofElementIndex:) NSInteger eofElementIndex;
-@property (assign, getter=getLastMarker, setter=setLastMarker:) NSInteger lastMarker;
-@property (assign, getter=getMarkDepth, setter=setMarkDepth:) NSInteger markDepth;
-@property (retain) id prevElement;
-
-- (id) initWithEOF:(id) obj;
-- (id) nextElement;
-- (id) remove;
-- (void) consume;
-- (void) sync:(NSInteger) need;
-- (void) fill:(NSInteger) n;
-- (id) LT:(NSInteger) i;
-- (id) LB:(NSInteger) i;
-- (id) getCurrentSymbol;
-- (NSInteger) mark;
-- (void) release:(NSInteger) marker;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) seek:(NSInteger) i;
-- (id) getEof;
-- (void) setEof:(id) anID;
-- (NSInteger) getEofElementIndex;
-- (void) setEofElementIndex:(NSInteger) anInt;
-- (NSInteger) getLastMarker;
-- (void) setLastMarker:(NSInteger) anInt;
-- (NSInteger) getMarkDepth;
-- (void) setMarkDepth:(NSInteger) anInt;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRLookaheadStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRLookaheadStream.m
deleted file mode 100644
index 57e489e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRLookaheadStream.m
+++ /dev/null
@@ -1,229 +0,0 @@
-//
-//  ANTLRLookaheadStream.m
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRError.h"
-#import "ANTLRRecognitionException.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRRuntimeException.h"
-
-@implementation ANTLRLookaheadStream
-
-@synthesize eof;
-@synthesize index;
-@synthesize eofElementIndex;
-@synthesize lastMarker;
-@synthesize markDepth;
-@synthesize prevElement;
-
--(id) init
-{
-	self = [super init];
-	if ( self != nil ) {
-        eof = [[ANTLRCommonToken eofToken] retain];
-		eofElementIndex = UNITIALIZED_EOF_ELEMENT_INDEX;
-		markDepth = 0;
-        index = 0;
-	}
-	return self;
-}
-
--(id) initWithEOF:(id)obj
-{
-	if ((self = [super init]) != nil) {
-		self.eof = obj;
-        if ( self.eof ) [self.eof retain];
-	}
-	return self;
-}
-
-- (void) reset
-{
-	[super reset];
-    index = 0;
-    p = 0;
-    prevElement = nil;
-	eofElementIndex = UNITIALIZED_EOF_ELEMENT_INDEX;
-}
-
--(id) nextElement
-{
-//	[self doesNotRecognizeSelector:_cmd];
-	return nil;
-}
-
-- (id) remove
-{
-    id obj = [self objectAtIndex:0];
-    p++;
-    // have we hit end of buffer and not backtracking?
-    if ( p == [data count] && markDepth==0 ) {
-        // if so, it's an opportunity to start filling at index 0 again
-        [self clear]; // size goes to 0, but retains memory
-    }
-    [obj release];
-    return obj;
-}
-
--(void) consume
-{
-	[self sync:1];
-	prevElement = [self remove];
-    index++;
-}
-
--(void) sync:(NSInteger) need
-{
-	NSInteger n = (p + need - 1) - [data count] + 1;
-	if ( n > 0 ) {
-		[self fill:n];
-	}
-}
-
--(void) fill:(NSInteger) n
-{
-    id obj;
-	for (NSInteger i = 1; i <= n; i++) {
-		obj = [self nextElement];
-		if ( obj == eof ) {
-			[data addObject:self.eof];
-			eofElementIndex = [data count] - 1;
-		}
-		else {
-			[data addObject:obj];
-		}
-	}
-}
-
--(NSUInteger) count
-{
-	@throw [NSException exceptionWithName:@"ANTLRUnsupportedOperationException" reason:@"Streams have no defined size" userInfo:nil];
-}
-
--(id) LT:(NSInteger) k
-{
-	if (k == 0) {
-		return nil;
-	}
-	if (k < 0) {
-		return [self LB:-k];
-	}
-	if ((p + k - 1) >= eofElementIndex) {
-		return self.eof;
-	}
-	[self sync:k];
-	return [self objectAtIndex:(k - 1)];
-}
-
--(id) LB:(NSInteger) k
-{
-	if (k == 1) {
-		return prevElement;
-	}
-	@throw [ANTLRNoSuchElementException newException:@"can't look backwards more than one token in this stream"];
-}
-
--(id) getCurrentSymbol
-{
-	return [self LT:1];
-}
-
--(NSInteger) mark
-{
-	markDepth++;
-	lastMarker = p;
-	return lastMarker;
-}
-
--(void) release:(NSInteger) marker
-{
-	// no resources to release
-}
-
--(void) rewind:(NSInteger) marker
-{
-	markDepth--;
-	[self seek:marker];
-//    if (marker == 0) [self reset];
-}
-
--(void) rewind
-{
-	[self seek:lastMarker];
-//    if (lastMarker == 0) [self reset];
-}
-
--(void) seek:(NSInteger) anIndex
-{
-	p = anIndex;
-}
-
-- (id) getEof
-{
-    return eof;
-}
-
-- (void) setEof:(id) anID
-{
-    eof = anID;
-}
-
-- (NSInteger) getEofElementIndex
-{
-    return eofElementIndex;
-}
-
-- (void) setEofElementIndex:(NSInteger) anInt
-{
-    eofElementIndex = anInt;
-}
-
-- (NSInteger) getLastMarker
-{
-    return lastMarker;
-}
-
-- (void) setLastMarker:(NSInteger) anInt
-{
-    lastMarker = anInt;
-}
-
-- (NSInteger) getMarkDepthlastMarker
-{
-    return markDepth;
-}
-
-- (void) setMarkDepth:(NSInteger) anInt
-{
-    markDepth = anInt;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMap.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRMap.h
deleted file mode 100644
index 80ad486..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMap.h
+++ /dev/null
@@ -1,82 +0,0 @@
-//
-//  ANTLRMap.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-#import "ANTLRMapElement.h"
-
-//#define GLOBAL_SCOPE      0
-//#define LOCAL_SCOPE       1
-#define HASHSIZE            101
-#define HBUFSIZE            0x2000
-
-@interface ANTLRMap : ANTLRPtrBuffer {
-	//ANTLRMap *fNext; // found in superclass
-    // TStringPool *fPool;
-    NSInteger lastHash;
-}
-
-//@property (copy) ANTLRMap *fNext;
-@property (getter=getLastHash, setter=setLastHash:) NSInteger lastHash;
-
-// Contruction/Destruction
-+ (id)newANTLRMap;
-+ (id)newANTLRMapWithLen:(NSInteger)aHashSize;
-
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-// Instance Methods
-- (NSInteger)count;
-- (NSInteger)length;
-- (NSInteger)size;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-/* form hash value for string s */
--(NSInteger)hash:(NSString *)s;
-/*   look for s in ptrBuffer  */
--(id)lookup:(NSString *)s;
-/* look for s in ptrBuffer  */
--(id)install:(ANTLRMapElement *)sym;
-/*
- * delete entry from list
- */
-- (void)deleteANTLRMap:(ANTLRMapElement *)np;
-- (NSInteger)RemoveSym:(NSString *)s;
-- (void)delete_chain:(ANTLRMapElement *)np;
-- (ANTLRMapElement *)getTType:(NSString *)name;
-- (ANTLRMapElement *)getName:(NSInteger)ttype;
-- (NSInteger)getNode:(ANTLRMapElement *)aNode;
-- (void)putNode:(NSInteger)aTType Node:(id)aNode;
-- (void)putName:(NSString *)name TType:(NSInteger)ttype;
-- (void)putName:(NSString *)name Node:(id)aNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMap.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRMap.m
deleted file mode 100644
index 6bfb088..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMap.m
+++ /dev/null
@@ -1,362 +0,0 @@
-//
-//  ANTLRMap.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRMap.h"
-#import "ANTLRBaseTree.h"
-
-/*
- * Start of ANTLRMap
- */
-@implementation ANTLRMap
-
-@synthesize lastHash;
-
-+(id)newANTLRMap
-{
-    return [[ANTLRMap alloc] init];
-}
-
-+(id)newANTLRMapWithLen:(NSInteger)aBuffSize
-{
-    return [[ANTLRMap alloc] initWithLen:aBuffSize];
-}
-
--(id)init
-{
-    NSInteger idx;
-    
-	self = [super initWithLen:HASHSIZE];
-    if ( self != nil ) {
-		fNext = nil;
-        for( idx = 0; idx < HASHSIZE; idx++ ) {
-            ptrBuffer[idx] = nil;
-        }
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)aBuffSize
-{
-	self = [super initWithLen:aBuffSize];
-    if ( self != nil ) {
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRMMap" );
-#endif
-    ANTLRMapElement *tmp, *rtmp;
-    NSInteger idx;
-	
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp ) {
-                rtmp = tmp;
-                tmp = (ANTLRMapElement *)tmp.fNext;
-                [rtmp release];
-            }
-        }
-    }
-	[super dealloc];
-}
-
--(void)deleteANTLRMap:(ANTLRMapElement *)np
-{
-    ANTLRMapElement *tmp, *rtmp;
-    NSInteger idx;
-    
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp ) {
-                rtmp = tmp;
-                tmp = [tmp getfNext];
-                [rtmp release];
-            }
-        }
-    }
-}
-
-- (void)clear
-{
-    ANTLRMapElement *tmp, *rtmp;
-    NSInteger idx;
-
-    for( idx = 0; idx < BuffSize; idx++ ) {
-        tmp = ptrBuffer[idx];
-        while ( tmp ) {
-            rtmp = tmp;
-            tmp = [tmp getfNext];
-            [rtmp dealloc];
-        }
-        ptrBuffer[idx] = nil;
-    }
-}
-
-- (NSInteger)count
-{
-    NSInteger aCnt = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        if (ptrBuffer[i] != nil) {
-            aCnt++;
-        }
-    }
-    return aCnt;
-}
-
-- (NSInteger)length
-{
-    return BuffSize;
-}
-
-- (NSInteger)size
-{
-    ANTLRMapElement *anElement;
-    NSInteger aSize = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aSize += (NSInteger)[anElement size];
-        }
-    }
-    return aSize;
-}
-                          
-#ifdef USERDOC
-/*
- *  HASH        hash entry to get index to table
- *  NSInteger hash( ANTLRMap *self, char *s );
- *
- *     Inputs:  NSString *s         string to find
- *
- *     Returns: NSInteger                 hashed value
- *
- *  Last Revision 9/03/90
- */
-#endif
--(NSInteger)hash:(NSString *)s       /*    form hash value for string s */
-{
-	NSInteger hashval;
-	const char *tmp;
-    
-	tmp = [s cStringUsingEncoding:NSASCIIStringEncoding];
-	for( hashval = 0; *tmp != '\0'; )
-        hashval += *tmp++;
-	self->lastHash = hashval % HASHSIZE;
-	return( self->lastHash );
-}
-
-#ifdef USERDOC
-/*
- *  LOOKUP  search hashed list for entry
- *  ANTLRMapElement *lookup:(NSString *)s;
- *
- *     Inputs:  NSString  *s       string to find
- *
- *     Returns: ANTLRMapElement  *        pointer to entry
- *
- *  Last Revision 9/03/90
- */
-#endif
--(id)lookup:(NSString *)s
-{
-    ANTLRMapElement *np;
-    
-    for( np = self->ptrBuffer[[self hash:s]]; np != nil; np = [np getfNext] ) {
-        if ( [s isEqualToString:[np getName]] ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  INSTALL search hashed list for entry
- *  NSInteger install( ANTLRMap *self, ANTLRMapElement *sym );
- *
- *     Inputs:  ANTLRMapElement    *sym   -- symbol ptr to install
- *              NSInteger         scope -- level to find
- *
- *     Returns: Boolean     TRUE   if installed
- *                          FALSE  if already in table
- *
- *  Last Revision 9/03/90
- */
-#endif
--(ANTLRMapElement *)install:(ANTLRMapElement *)sym
-{
-    ANTLRMapElement *np;
-    
-    np = [self lookup:[sym getName]];
-    if ( np == nil ) {
-        [sym setFNext:ptrBuffer[ lastHash ]];
-        ptrBuffer[ lastHash ] = sym;
-        [sym retain];
-        return( ptrBuffer[ lastHash ] );
-    }
-    return( nil );            /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  RemoveSym  search hashed list for entry
- *  NSInteger RemoveSym( ANTLRMap *self, char *s );
- *
- *     Inputs:  char     *s          string to find
- *
- *     Returns: NSInteger      indicator of SUCCESS OR FAILURE
- *
- *  Last Revision 9/03/90
- */
-#endif
--(NSInteger)RemoveSym:(NSString *)s
-{
-    ANTLRMapElement *np, *tmp;
-    NSInteger idx;
-    
-    idx = [self hash:s];
-    for ( tmp = self->ptrBuffer[idx], np = self->ptrBuffer[idx]; np != nil; np = [np getfNext] ) {
-        if ( [s isEqualToString:[np getName]] ) {
-            tmp = [np getfNext];             /* get the next link  */
-            [np release];
-            return( SUCCESS );            /* report SUCCESS     */
-        }
-        tmp = [np getfNext];              //  BAD!!!!!!
-    }
-    return( FAILURE );                    /*   not found      */
-}
-
--(void)delete_chain:(ANTLRMapElement *)np
-{
-    if ( [np getfNext] != nil )
-		[self delete_chain:[np getfNext]];
-	[np release];
-}
-
-#ifdef DONTUSEYET
--(NSInteger)bld_symtab:(KW_TABLE *)toknams
-{
-    NSInteger i;
-    ANTLRMapElement *np;
-    
-    for( i = 0; *(toknams[i].name) != '\0'; i++ ) {
-        // install symbol in ptrBuffer
-        np = [ANTLRMapElement newANTLRMapElement:[NSString stringWithFormat:@"%s", toknams[i].name]];
-        //        np->fType = toknams[i].toknum;
-        [self install:np Scope:0];
-    }
-    return( SUCCESS );
-}
-#endif
-
-/*
- * works only for maplist indexed not by name but by TokenNumber
- */
-- (ANTLRMapElement *)getName:(NSInteger)ttype
-{
-    ANTLRMapElement *np;
-    NSInteger aTType;
-
-    aTType = ttype % HASHSIZE;
-    for( np = self->ptrBuffer[ttype]; np != nil; np = [np getfNext] ) {
-        if ( [(NSNumber *)np.node integerValue] == ttype ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-- (NSInteger)getNode:(id<ANTLRBaseTree>)aNode
-{
-    ANTLRMapElement *np;
-    NSInteger idx;
-
-    idx = [(id<ANTLRBaseTree>)aNode type];
-    idx %= HASHSIZE;
-    np = ptrBuffer[idx];
-    return( [(NSNumber *)np.node integerValue] );
-}
-
-- (ANTLRMapElement *)getTType:(NSString *)name
-{
-    return [self lookup:name];
-}
-
-// create node and install node in ptrBuffer
-- (void)putName:(NSString *)name TType:(NSInteger)ttype
-{
-    ANTLRMapElement *np;
-    
-    np = [ANTLRMapElement newANTLRMapElementWithName:[NSString stringWithString:name] Type:ttype];
-    [self install:np];
-}
-
-// create node and install node in ptrBuffer
-- (void)putName:(NSString *)name Node:(id)aNode
-{
-    ANTLRMapElement *np, *np1;
-    NSInteger idx;
-    
-    idx = [self hash:name];
-    np1 = [ANTLRMapElement newANTLRMapElementWithName:[NSString stringWithString:name] Type:idx];
-    np = [self lookup:name];
-    if ( np == nil ) {
-        [np1 setFNext:self->ptrBuffer[ self->lastHash ]];
-        self->ptrBuffer[ self->lastHash ] = np1;
-        [np1 retain];
-    }
-    else {
-        // ptrBuffer[idx] = np;
-    }
-    return;
-}
-
-// create node and install node in ptrBuffer
-- (void)putNode:(NSInteger)aTType Node:(id)aNode
-{
-    ANTLRMapElement *np;
-    NSInteger ttype;
-    
-    ttype = aTType % HASHSIZE;
-    np = [ANTLRMapElement newANTLRMapElementWithNode:ttype Node:(id)aNode];
-    ptrBuffer[ttype] = np;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMapElement.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRMapElement.h
deleted file mode 100644
index e20d01c..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMapElement.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//
-//  ANTLRMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-
-@interface ANTLRMapElement : ANTLRBaseMapElement {
-    NSString *name;
-    id        node;
-}
-@property (retain, getter=getName, setter=setName:) NSString *name;
-@property (retain, getter=getNode, setter=setNode:) id node;
-
-+ (id) newANTLRMapElement;
-+ (id) newANTLRMapElementWithName:(NSString *)aName Type:(NSInteger)aTType;
-+ (id) newANTLRMapElementWithNode:(NSInteger)aTType Node:(id)aNode;
-+ (id) newANTLRMapElementWithName:(NSString *)aName Node:(id)aNode;
-+ (id) newANTLRMapElementWithObj1:(id)anObj1 Obj2:(id)anObj2;
-- (id) init;
-- (id) initWithName:(NSString *)aName Type:(NSInteger)aTType;
-- (id) initWithNode:(NSInteger)aTType Node:(id)aNode;
-- (id) initWithName:(NSString *)aName Node:(id)aNode;
-- (id) initWithObj1:(id)anObj1 Obj2:(id)anObj2;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSInteger) count;
-- (NSInteger) size;
-- (NSString *)getName;
-- (void)setName:(NSString *)aName;
-- (id)getNode;
-- (void)setNode:(id)aNode;
-- (void)putNode:(id)aNode;
-- (void)putNode:(id)aNode With:(NSInteger)uniqueID;
-//- (void)setObject:(id)aNode atIndex:anIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMapElement.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRMapElement.m
deleted file mode 100644
index bce1c9f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMapElement.m
+++ /dev/null
@@ -1,207 +0,0 @@
-//
-//  ANTLRMapElement.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRMapElement.h"
-
-
-@implementation ANTLRMapElement
-
-@synthesize name;
-@synthesize node;
-
-+ (id) newANTLRMapElement
-{
-    return [[ANTLRMapElement alloc] init];
-}
-
-+ (id) newANTLRMapElementWithName:(NSString *)aName Type:(NSInteger)aTType
-{
-    return [[ANTLRMapElement alloc] initWithName:aName Type:aTType];
-}
-
-+ (id) newANTLRMapElementWithNode:(NSInteger)aTType Node:(id)aNode
-{
-    return [[ANTLRMapElement alloc] initWithNode:aTType Node:aNode];
-}
-
-+ (id) newANTLRMapElementWithName:(NSString *)aName Node:(id)aNode
-{
-    return [[ANTLRMapElement alloc] initWithName:aName Node:aNode];
-}
-
-+ (id) newANTLRMapElementWithObj1:(id)anObj1 Obj2:(id)anObj2
-{
-    return [[ANTLRMapElement alloc] initWithObj1:anObj1 Obj2:anObj2];
-}
-
-- (id) init
-{
-    self = [super init];
-    if ( self != nil ) {
-        index = nil;
-        name  = nil;
-    }
-    return self;
-}
-
-- (id) initWithName:(NSString *)aName Type:(NSInteger)aTType
-{
-    self = [super init];
-    if ( self != nil ) {
-        index = [[NSNumber numberWithInteger: aTType] retain];
-        name  = [[NSString stringWithString:aName] retain];
-    }
-    return self;
-}
-
-- (id) initWithNode:(NSInteger)aTType Node:(id)aNode
-{
-    self = [super initWithAnIndex:[NSNumber numberWithInteger:aTType]];
-    if ( self != nil ) {
-        node  = aNode;
-        if ( node ) [node retain];
-    }
-    return self;
-}
-
-- (id) initWithName:(NSString *)aName Node:(id)aNode
-{
-    self = [super init];
-    if ( self != nil ) {
-        name  = [[NSString stringWithString:aName] retain];
-        node = aNode;
-        if ( node ) [node retain];
-    }
-    return self;
-}
-
-- (id) initWithObj1:(id)anIndex Obj2:(id)aNode
-{
-    self = [super initWithAnIndex:anIndex];
-    if ( self != nil ) {
-        node = aNode;
-        if ( node ) [node retain];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRMapElement" );
-#endif
-    if ( name ) [name release];
-    if ( node ) [node release];
-    [super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRMapElement *copy;
-
-    copy = [super copyWithZone:aZone];
-    if (name) copy.name = name;
-    if (node) copy.node = node;
-    return( copy );
-}
-
-- (NSInteger) count
-{
-    NSInteger aCnt = 0;
-    if (name != nil) aCnt++;;
-    if (node != nil) aCnt++;;
-    return aCnt;
-}
-
-- (NSInteger)size
-{
-    NSInteger aSize = 0;
-    if ( name ) aSize += sizeof(id);
-    if ( node ) aSize += sizeof(id);
-    return aSize;
-}
-
-
-- (NSString *)getName
-{
-    return name;
-}
-
-- (void)setName:(NSString *)aName
-{
-    if ( aName != name ) {
-        if ( name ) [name release];
-        [aName retain];
-    }
-    name = aName;
-}
-
-- (id)getNode
-{
-    return node;
-}
-
-- (void)setNode:(id)aNode
-{   if ( aNode != node ) {
-        if ( node ) [node release];
-        [aNode retain];
-    }
-    node = aNode;
-}
-
-- (void)putNode:(id)aNode
-{
-    index = ((ANTLRMapElement *)aNode).index;
-    if (((ANTLRMapElement *)aNode).name) {
-        name = [((ANTLRMapElement *)aNode).name retain];
-        node = nil;
-    }
-    if (((ANTLRMapElement *)aNode).node) {
-        name = nil;
-        node = [((ANTLRMapElement *)aNode).node retain];
-    }
-}
-
-- (void)putNode:(id)aNode With:(NSInteger)uniqueID
-{
-    index = ((ANTLRMapElement *)aNode).index;
-    if (((ANTLRMapElement *)aNode).name) {
-        name = [((ANTLRMapElement *)aNode).name retain];
-        node = nil;
-    }
-    if (((ANTLRMapElement *)aNode).node) {
-        name = nil;
-        node = [((ANTLRMapElement *)aNode).node retain];
-    }
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedNotSetException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedNotSetException.h
deleted file mode 100644
index 95d191d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedNotSetException.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//
-//  ANTLRMismatchedNotSetException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/13/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRBitSet.h"
-
-@interface ANTLRMismatchedNotSetException : ANTLRRecognitionException
-{
-    NSString *expecting;
-}
-@property (retain, getter=getExpecting, setter=setExpecting:) NSString *expecting;
-
-- (ANTLRMismatchedNotSetException *)newException;
-- (ANTLRMismatchedNotSetException *)newException:(id<ANTLRIntStream>)anInput
-                                                               Follow:(NSString *)expecting;
-
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInput Follow:(NSString *)expecting;
-
-- (NSString *)toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedNotSetException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedNotSetException.m
deleted file mode 100644
index e43e1b1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedNotSetException.m
+++ /dev/null
@@ -1,69 +0,0 @@
-//
-//  ANTLRMismatchedNotSetException.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/13/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRMismatchedNotSetException.h"
-
-@implementation ANTLRMismatchedNotSetException
-
-@synthesize expecting;
-
-- (ANTLRMismatchedNotSetException *)newException
-{
-    return [[ANTLRMismatchedNotSetException alloc] init];
-}
-
-- (ANTLRMismatchedNotSetException *)newException:(id<ANTLRIntStream>)anInput
-                                                               Follow:(NSString *)expected
-{
-    return [[ANTLRMismatchedNotSetException alloc] initWithStream:anInput Follow:(NSString *)expected];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil ) {
-    }
-    return(self);
-}
-
-- (id) initWithStream:(id<ANTLRIntStream>)anInput Follow:(NSString *)expected
-{
-    if ((self = [super initWithStream:anInput]) != nil ) {
-        expecting = expected;
-    }
-    return(self);
-}
-
-- (NSString *)toString
-{
-    return [NSString stringWithFormat:@"MismatchedNotSetException( %d != %@ )", [self unexpectedType], expecting];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedRangeException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedRangeException.h
deleted file mode 100644
index 678af61..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedRangeException.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRIntStream;
-
-
-@interface ANTLRMismatchedRangeException : ANTLRRecognitionException {
-	NSRange range;
-}
-
-@property (assign) NSRange range;
-
-+ (id) newException:(NSRange) aRange stream:(id<ANTLRIntStream>) theInput;
-- (id) initWithRange:(NSRange) aRange stream:(id<ANTLRIntStream>) theInput;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedRangeException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedRangeException.m
deleted file mode 100644
index 0647254..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedRangeException.m
+++ /dev/null
@@ -1,55 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRMismatchedRangeException.h"
-
-
-@implementation ANTLRMismatchedRangeException
-
-@synthesize range;
-
-+ (id) newException:(NSRange) aRange stream:(id<ANTLRIntStream>) theInput
-{
-	return [[ANTLRMismatchedRangeException alloc] initWithRange:aRange stream:theInput];
-}
-
-- (id) initWithRange:(NSRange) aRange stream:(id<ANTLRIntStream>) theInput
-{
-	if ((self = [super initWithStream:theInput]) != nil) {
-		range = aRange;
-	}
-	return self;
-}
-
-- (NSString *) description
-{
-	NSMutableString *desc = (NSMutableString *)[super description];
-	[desc appendFormat:@" range:%@", NSStringFromRange(range)];
-	return desc;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedSetException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedSetException.h
deleted file mode 100644
index 0610973..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedSetException.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLRMismatchedSetException : ANTLRRecognitionException {
-	NSString *expecting;
-}
-
-@property (retain, getter=getExpecting, setter=setExpecting:) NSString *expecting;
-
-+ (id) newException:(NSString *) theExpectedSet stream:(id<ANTLRIntStream>) theStream;
-- (id) initWithSet:(NSString *) theExpectedSet stream:(id<ANTLRIntStream>) theStream;
-
-- (NSString *) getExpecting;
-- (void) setExpecting: (NSString *) anExpectedSet;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedSetException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedSetException.m
deleted file mode 100644
index b5248d2..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedSetException.m
+++ /dev/null
@@ -1,79 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRMismatchedSetException.h"
-
-
-@implementation ANTLRMismatchedSetException
-
-@synthesize expecting;
-
-+ (id) newException:(NSString *) theExpectedSet stream:(id<ANTLRIntStream>) theStream
-{
-	return [[ANTLRMismatchedSetException alloc] initWithSet:theExpectedSet stream:theStream];
-}
-
-- (id) initWithSet:(NSString *) theExpectedSet stream:(id<ANTLRIntStream>) theStream
-{
-	if ((self = [super initWithStream:theStream]) != nil) {
-		[self setExpecting:theExpectedSet];
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-	[self setExpecting:nil];
-	[super dealloc];
-}
-
-- (NSString *) description
-{
-	NSMutableString *desc =(NSMutableString *)[super description];
-	[desc appendFormat:@" set:%@", expecting];
-	return desc;
-}
-
-
-//---------------------------------------------------------- 
-//  expectedSet 
-//---------------------------------------------------------- 
-- (NSString *) getExpecting
-{
-    return expecting; 
-}
-
-- (void) setExpecting: (NSString *) anExpectedSet
-{
-    if ( expecting != anExpectedSet ) {
-        if ( expecting ) [expecting release];
-        [anExpectedSet retain];
-        expecting = anExpectedSet;
-    }
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTokenException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTokenException.h
deleted file mode 100644
index 8e28dcc..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTokenException.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRBitSet.h"
-
-@protocol ANTLRIntStream;
-
-@interface ANTLRMismatchedTokenException : ANTLRRecognitionException {
-	NSInteger expecting;
-	unichar expectingChar;
-	BOOL isTokenType;
-}
-
-@property (assign, getter=getExpecting, setter=setExpecting:) NSInteger expecting;
-@property (assign, getter=getExpectingChar, setter=setExpectingChar:) unichar expectingChar;
-@property (assign, getter=getIsTokenType, setter=setIsTokenType:) BOOL isTokenType;
-
-+ (id) newException:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-+ (id) newExceptionMissing:(NSInteger)expectedTokenType
-                                        Stream:(id<ANTLRIntStream>)anInput
-                                         Token:(id<ANTLRToken>)inserted;
-+ (id) newExceptionChar:(unichar)expectedCharacter Stream:(id<ANTLRIntStream>)anInput;
-+ (id) newExceptionStream:(id<ANTLRIntStream>)anInput
-                                    Exception:(NSException *)e
-                                       Follow:(ANTLRBitSet *)follow;
-- (id) initWithTokenType:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
--(id) initWithTokenType:(NSInteger)expectedTokenType
-                 Stream:(id<ANTLRIntStream>)anInput
-                  Token:(id<ANTLRToken>)inserted;
-- (id) initWithCharacter:(unichar)expectedCharacter Stream:(id<ANTLRIntStream>)anInput;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTokenException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTokenException.m
deleted file mode 100644
index a8807fa..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTokenException.m
+++ /dev/null
@@ -1,99 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRMismatchedTokenException.h"
-
-
-@implementation ANTLRMismatchedTokenException
-
-@synthesize expecting;
-@synthesize expectingChar;
-@synthesize isTokenType;
-
-
-+ (id) newException:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput
-{
-	return [[ANTLRMismatchedTokenException alloc] initWithTokenType:expectedTokenType Stream:anInput];
-}
-
-+ (id) newExceptionMissing:(NSInteger)expectedTokenType
-                                        Stream:(id<ANTLRIntStream>)anInput
-                                         Token:(id<ANTLRToken>)inserted
-{
-	return [[ANTLRMismatchedTokenException alloc] initWithTokenType:expectedTokenType Stream:anInput Token:inserted];
-}
-
-+ (id) newExceptionChar:(unichar) expectedCharacter Stream:(id<ANTLRIntStream>)anInput
-{
-	return [[ANTLRMismatchedTokenException alloc] initWithCharacter:expectedCharacter Stream:anInput];
-}
-
-+ (id) newExceptionStream:(id<ANTLRIntStream>)anInput Exception:(NSException *)e Follow:(ANTLRBitSet *) follow
-{
-	return [[ANTLRMismatchedTokenException alloc] initWithStream:anInput];
-}
-
--(id) initWithTokenType:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput
-{
-	if ((self = [super initWithStream:anInput]) != nil) {
-		expecting = expectedTokenType;
-		isTokenType = YES;
-	}
-	return self;
-}
-
--(id) initWithTokenType:(NSInteger)expectedTokenType
-                 Stream:(id<ANTLRIntStream>)anInput
-                  Token:(id<ANTLRToken>)inserted
-{
-	if ((self = [super initWithStream:anInput]) != nil) {
-		expecting = expectedTokenType;
-		isTokenType = YES;
-	}
-	return self;
-}
-
-- (id) initWithCharacter:(unichar) expectedCharacter Stream:(id<ANTLRIntStream>)anInput
-{
-	if ((self = [super initWithStream:anInput]) != nil) {
-		expectingChar = expectedCharacter;
-		isTokenType = NO;
-	}
-	return self;
-}
-
-- (NSString *) description
-{
-	NSMutableString *desc = (NSMutableString *)[super description];
-	if (isTokenType) {
-		[desc appendFormat:@" expected:%d got:%d", expecting, [self unexpectedType]];
-	} else {
-		[desc appendFormat:@" expected:%c got:%c", expectingChar, (unichar)[self unexpectedType]];
-	}
-	return desc;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTreeNodeException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTreeNodeException.h
deleted file mode 100644
index 3528cba..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTreeNodeException.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-
-@protocol ANTLRIntStream;
-
-@interface ANTLRMismatchedTreeNodeException : ANTLRRecognitionException {
-	NSInteger expecting;
-}
-
-@property (getter=getExpecting, setter=setExpecting:) NSInteger expecting;
-
-+ (id) newException:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-- (id) initWithTokenType:(NSInteger) expectedTokenType Stream:(id<ANTLRIntStream>)anInput;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTreeNodeException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTreeNodeException.m
deleted file mode 100644
index 60d5184..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMismatchedTreeNodeException.m
+++ /dev/null
@@ -1,54 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRMismatchedTreeNodeException.h"
-
-
-@implementation ANTLRMismatchedTreeNodeException
-
-@synthesize expecting;
-
-+ (id) newException:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput
-{
-	return [[ANTLRMismatchedTreeNodeException alloc] initWithTokenType:expectedTokenType Stream:anInput];
-}
-
--(id) initWithTokenType:(NSInteger)expectedTokenType Stream:(id<ANTLRIntStream>)anInput
-{
-	if ((self = [super initWithStream:anInput]) != nil) {
-		expecting = expectedTokenType;
-	}
-	return self;
-}
-
-- (NSString *) description
-{
-	NSMutableString *desc = (NSMutableString *)[super description];
-	[desc appendFormat:@" expected:%d got:%d", expecting, [self unexpectedType]];
-	return desc;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMissingTokenException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRMissingTokenException.h
deleted file mode 100644
index 1ae8103..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMissingTokenException.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//
-//  ANTLRMissingTokenException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRToken.h"
-
-@interface ANTLRMissingTokenException : ANTLRMismatchedTokenException {
-    id<ANTLRToken> inserted;
-}
-/** Used for remote debugger deserialization */
-+ (id) newException;
-+ (id) newException:(NSInteger)expected
-             Stream:(id<ANTLRIntStream>)anInput
-               With:(id<ANTLRToken>)insertedToken;
-- (id) init;
-- (id) init:(NSInteger)expected Stream:(id<ANTLRIntStream>)anInput With:(id<ANTLRToken>)insertedToken;
-
-- (NSInteger) getMissingType;
-
-- (NSString *)toString;
-
-@property (retain) id<ANTLRToken> inserted;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRMissingTokenException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRMissingTokenException.m
deleted file mode 100644
index 35bd130..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRMissingTokenException.m
+++ /dev/null
@@ -1,83 +0,0 @@
-//
-//  ANTLRMissingTokenException.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRMissingTokenException.h"
-
-
-@implementation ANTLRMissingTokenException
-/** Used for remote debugger deserialization */
-+ (id) newException
-{
-    return [[ANTLRMissingTokenException alloc] init];
-}
-
-+ (id) newException:(NSInteger)expected
-             Stream:(id<ANTLRIntStream>)anInput
-               With:(id<ANTLRToken>)insertedToken
-{
-    return [[ANTLRMissingTokenException alloc] init:expected Stream:anInput With:insertedToken];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil) {
-    }
-    return self;
-}
-
-- (id) init:(NSInteger)expected Stream:(id<ANTLRIntStream>)anInput With:(id<ANTLRToken>)insertedToken
-{
-    if ((self = [super initWithStream:anInput]) != nil) {
-        expecting = expected;
-        input = anInput;
-        inserted = insertedToken;
-    }
-    return self;
-}
-
-- (NSInteger) getMissingType
-{
-    return expecting;
-}
-
-- (NSString *)toString
-{
-    if ( inserted != nil && token != nil ) {
-        return [NSString stringWithFormat:@"MissingTokenException(inserted %@ at %@)", inserted, token.text];
-    }
-    if ( token!=nil ) {
-        return [NSString stringWithFormat:@"MissingTokenException(at %@)", token.text ];
-    }
-    return @"MissingTokenException";
-}
-
-@synthesize inserted;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRNoViableAltException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRNoViableAltException.h
deleted file mode 100644
index 9b2e521..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRNoViableAltException.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRecognitionException.h"
-#import "ANTLRIntStream.h"
-
-@interface ANTLRNoViableAltException : ANTLRRecognitionException {
-	int decisionNumber;
-	int stateNumber;
-}
-
-+ (ANTLRNoViableAltException *) newException:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<ANTLRIntStream>)theStream;
-- (ANTLRNoViableAltException *) initWithDecision:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<ANTLRIntStream>)theStream;
-
-- (void)setDecisionNumber:(NSInteger)decisionNumber;
-- (void)setStateNumber:(NSInteger)stateNumber;
-
-
-@property (getter=decisionNumber,setter=setDecisionNumber:) NSInteger decisionNumber;
-@property (getter=stateNumber,setter=setStateNumber:) NSInteger stateNumber;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRNoViableAltException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRNoViableAltException.m
deleted file mode 100644
index e519581..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRNoViableAltException.m
+++ /dev/null
@@ -1,83 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRNoViableAltException.h"
-
-
-@implementation ANTLRNoViableAltException
-
-
-+ (ANTLRNoViableAltException *) newException:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<ANTLRIntStream>)theStream
-{
-	return [[self alloc] initWithDecision:theDecisionNumber state:theStateNumber stream:theStream];
-}
-
-
-- (ANTLRNoViableAltException *) initWithDecision:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<ANTLRIntStream>)theStream
-{
-	if ((self = [super initWithStream:theStream]) != nil) {
-		decisionNumber = theDecisionNumber;
-		stateNumber = theStateNumber;
-	}
-	return self;
-}
-
-- (NSString *) description
-{
-	NSMutableString *desc = (NSMutableString *)[super description];
-	[desc appendFormat:@" decision:%d state:%d", decisionNumber, stateNumber];
-	return desc;
-}
-
-//---------------------------------------------------------- 
-//  decisionNumber 
-//---------------------------------------------------------- 
-- (NSInteger) decisionNumber
-{
-    return decisionNumber;
-}
-
-- (void) setDecisionNumber: (NSInteger) aDecisionNumber
-{
-    decisionNumber = aDecisionNumber;
-}
-
-//---------------------------------------------------------- 
-//  stateNumber 
-//---------------------------------------------------------- 
-- (NSInteger) stateNumber
-{
-    return stateNumber;
-}
-
-- (void) setStateNumber: (NSInteger) aStateNumber
-{
-    stateNumber = aStateNumber;
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRNodeMapElement.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRNodeMapElement.h
deleted file mode 100644
index 3bbf7b7..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRNodeMapElement.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//
-//  ANTLRRuleMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-#import "ANTLRBaseTree.h"
-
-@interface ANTLRNodeMapElement : ANTLRBaseMapElement {
-    id<ANTLRBaseTree> node;
-}
-
-@property (retain, getter=getNode, setter=setNode:) id<ANTLRBaseTree> node;
-
-+ (void)initialize;
-
-+ (id) newANTLRNodeMapElement;
-+ (id) newANTLRNodeMapElementWithIndex:(id)anIndex Node:(id<ANTLRBaseTree>)aNode;
-- (id) init;
-- (id) initWithAnIndex:(id)anIndex Node:(id)aNode;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (id<ANTLRBaseTree>)getNode;
-- (void)setNode:(id<ANTLRBaseTree>)aNode;
-
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRNodeMapElement.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRNodeMapElement.m
deleted file mode 100644
index 06f35cc..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRNodeMapElement.m
+++ /dev/null
@@ -1,108 +0,0 @@
-//
-//  ANTLRNodeMapElement.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRNodeMapElement.h"
-
-static NSInteger _aUniqueID;
-
-@implementation ANTLRNodeMapElement
-
-@synthesize node;
-
-+ (void)initialize
-{
-    _aUniqueID = 0;
-}
-
-+ (ANTLRNodeMapElement *)newANTLRNodeMapElement
-{
-    return [[ANTLRNodeMapElement alloc] init];
-}
-
-+ (ANTLRNodeMapElement *)newANTLRNodeMapElementWithIndex:(id)anIndex Node:(id<ANTLRBaseTree>)aNode
-{
-    return [[ANTLRNodeMapElement alloc] initWithAnIndex:anIndex Node:aNode];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil ) {
-        index = nil;
-        node = nil;
-    }
-    return (self);
-}
-
-- (id) initWithAnIndex:(id)anIndex Node:(id)aNode
-{
-    self = [super initWithAnIndex:anIndex];
-    if ( self ) {
-        if ( aNode != node ) {
-            if ( node ) [node release];
-            [aNode retain];
-        }
-        node = aNode;
-    }
-    return (self);
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRNodeMapElement *copy;
-    
-    copy = [super copyWithZone:aZone];
-    copy.node = node;
-    return( copy );
-}
-
-- (id<ANTLRBaseTree>)getNode
-{
-    return node;
-}
-
-- (void)setNode:(id<ANTLRBaseTree>)aNode
-{
-    if ( aNode != node ) {
-        if ( node ) [node release];
-        [aNode retain];
-    }
-    node = aNode;
-}
-
-- (NSInteger)size
-{
-    NSInteger aSize = 0;
-    if (node != nil) aSize += sizeof(id);
-    if (index != nil) aSize += sizeof(id);
-    return( aSize );
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRParseTree.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRParseTree.h
deleted file mode 100644
index 5331005..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRParseTree.h
+++ /dev/null
@@ -1,64 +0,0 @@
-//
-//  ANTLRParseTree.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/12/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseTree.h"
-#import "ANTLRCommonToken.h"
-#import "AMutableArray.h"
-
-@interface ANTLRParseTree : ANTLRBaseTree <ANTLRBaseTree> {
-	__strong id<ANTLRToken> payload;
-	__strong AMutableArray *hiddenTokens;
-}
-/** A record of the rules used to match a token sequence.  The tokens
- *  end up as the leaves of this tree and rule nodes are the interior nodes.
- *  This really adds no functionality, it is just an alias for CommonTree
- *  that is more meaningful (specific) and holds a String to display for a node.
- */
-+ (id<ANTLRBaseTree>)newANTLRParseTree:(id<ANTLRToken>)label;
-- (id)initWithLabel:(id<ANTLRToken>)label;
-
-- (id<ANTLRBaseTree>)dupNode;
-- (NSInteger)type;
-- (NSString *)text;
-- (NSInteger)getTokenStartIndex;
-- (void)setTokenStartIndex:(NSInteger)index;
-- (NSInteger)getTokenStopIndex;
-- (void)setTokenStopIndex:(NSInteger)index;
-- (NSString *)description;
-- (NSString *)toString;
-- (NSString *)toStringWithHiddenTokens;
-- (NSString *)toInputString;
-- (void)_toStringLeaves:(NSMutableString *)buf;
-
-@property (retain) id<ANTLRToken> payload;
-@property (retain) AMutableArray *hiddenTokens;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRParseTree.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRParseTree.m
deleted file mode 100644
index 8339640..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRParseTree.m
+++ /dev/null
@@ -1,149 +0,0 @@
-//
-//  ANTLRParseTree.m
-//  ANTLR
-//
-//  Created by Alan Condit on 7/12/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRParseTree.h"
-
-/** A record of the rules used to match a token sequence.  The tokens
- *  end up as the leaves of this tree and rule nodes are the interior nodes.
- *  This really adds no functionality, it is just an alias for CommonTree
- *  that is more meaningful (specific) and holds a String to display for a node.
- */
-@implementation ANTLRParseTree
-+ (ANTLRParseTree *)newANTLRParseTree:(id<ANTLRToken>)label
-{
-    return [[ANTLRParseTree alloc] initWithLabel:label];
-}
-    
-- (id)initWithLabel:(id<ANTLRToken>)label
-{
-    self = [super init];
-    if ( self != nil) {
-        payload = [label retain];
-    }
-    return self;
-}
-
-- (id<ANTLRBaseTree>)dupNode
-{
-    return nil;
-}
-    
-- (NSInteger)type
-{
-    return 0;
-}
-    
-- (NSString *)text
-{
-    return [self toString];
-}
-    
-- (NSInteger)getTokenStartIndex
-{
-    return 0;
-}
-    
-- (void)setTokenStartIndex:(NSInteger)anIndex
-{
-}
-    
-- (NSInteger)getTokenStopIndex
-{
-    return 0;
-}
-    
-- (void)setTokenStopIndex:(NSInteger)anIndex
-{
-}
-
-- (NSString *)description
-{
-    if ( [payload isKindOfClass:[ANTLRCommonToken class]] ) {
-        id<ANTLRToken> t = (id<ANTLRToken>)payload;
-        if ( t.type == ANTLRTokenTypeEOF ) {
-            return @"<EOF>";
-        }
-        return [t text];
-    }
-    return [payload description];
-}
-    
-- (NSString *)toString
-{
-    return [self description];
-}
-    
-/** Emit a token and all hidden nodes before.  EOF node holds all
- *  hidden tokens after last real token.
- */
-- (NSString *)toStringWithHiddenTokens
-{
-    NSMutableString *buf = [NSMutableString stringWithCapacity:25];
-    if ( hiddenTokens!=nil ) {
-        for (NSUInteger i = 0; i < [hiddenTokens count]; i++) {
-            id<ANTLRToken>  hidden = (id<ANTLRToken> ) [hiddenTokens objectAtIndex:i];
-            [buf appendString:[hidden text]];
-        }
-    }
-    NSString *nodeText = [self toString];
-    if ( ![nodeText isEqualTo:@"<EOF>"] )
-        [buf appendString:nodeText];
-    return buf;
-}
-    
-/** Print out the leaves of this tree, which means printing original
- *  input back out.
- */
-- (NSString *)toInputString
-{
-    NSMutableString *buf = [NSMutableString stringWithCapacity:25];
-    [self _toStringLeaves:buf];
-    return buf;
-}
-    
-- (void)_toStringLeaves:(NSMutableString *)buf
-{
-    if ( [payload isKindOfClass:[ANTLRCommonToken class]] ) { // leaf node token?
-        [buf appendString:[self toStringWithHiddenTokens]];
-        return;
-    }
-    for (int i = 0; children!=nil && i < [children count]; i++) {
-        ANTLRParseTree *t = (ANTLRParseTree *) [children objectAtIndex:i];
-        [t _toStringLeaves:buf];
-    }
-}
-    
-@synthesize payload;
-@synthesize hiddenTokens;
-@synthesize children;
-@synthesize anException;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRParser.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRParser.h
deleted file mode 100644
index 067b313..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRParser.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRTokenStream.h"
-
-@interface ANTLRParser : ANTLRBaseRecognizer {
-	id<ANTLRTokenStream> input;
-}
-+ (ANTLRParser *)newANTLRParser:(id<ANTLRTokenStream>)anInput;
-+ (ANTLRParser *)newANTLRParser:(id<ANTLRTokenStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream;
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream State:(ANTLRRecognizerSharedState *)aState;
-
-- (id<ANTLRTokenStream>) input;
-- (void) setInput: (id<ANTLRTokenStream>) anInput;
-
-- (void) reset;
-
-- (id) getCurrentInputSymbol:(id<ANTLRTokenStream>)anInput;
-- (ANTLRCommonToken *)getMissingSymbol:(id<ANTLRTokenStream>)input
-                             Exception:(ANTLRRecognitionException *)e
-                                 TType:(NSInteger)expectedTokenType
-                                BitSet:(ANTLRBitSet *)follow;
-- (void) setTokenStream:(id<ANTLRTokenStream>)anInput;
-- (id<ANTLRTokenStream>)getTokenStream;
-- (NSString *)getSourceName;
-
-- (void) traceIn:(NSString *)ruleName Index:(int)ruleIndex;
-- (void) traceOut:(NSString *)ruleName Index:(NSInteger) ruleIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRParser.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRParser.m
deleted file mode 100644
index df559b2..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRParser.m
+++ /dev/null
@@ -1,147 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRParser.h"
-
-
-@implementation ANTLRParser
-
-+ (ANTLRParser *)newANTLRParser:(id<ANTLRTokenStream>)anInput
-{
-    return [[ANTLRParser alloc] initWithTokenStream:anInput];
-}
-
-+ (ANTLRParser *)newANTLRParser:(id<ANTLRTokenStream>)anInput State:(ANTLRRecognizerSharedState *)aState
-{
-    return [[ANTLRParser alloc] initWithTokenStream:anInput State:aState];
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream
-{
-	if ((self = [super init]) != nil) {
-		input = theStream;
-	}
-	return self;
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)theStream State:(ANTLRRecognizerSharedState *)aState
-{
-	if ((self = [super initWithState:aState]) != nil) {
-        input = theStream;
-	}
-	return self;
-}
-
-- (void) reset
-{
-    [super reset]; // reset all recognizer state variables
-    if ( input!=nil ) {
-        [input seek:0]; // rewind the input
-    }
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRParser" );
-#endif
-	[self setInput:nil];
-	[super dealloc];
-}
-
-//---------------------------------------------------------- 
-//  input 
-//---------------------------------------------------------- 
-- (id<ANTLRTokenStream>) input
-{
-    return input; 
-}
-
-- (void) setInput: (id<ANTLRTokenStream>) anInput
-{
-    if (input != anInput) {
-        if ( input ) [input release];
-        [anInput retain];
-    }
-    input = anInput;
-}
-
-- (id) getCurrentInputSymbol:(id<ANTLRTokenStream>)anInput
-{
-    state.token = [input LT:1];
-    return state.token;
-}
-
-- (ANTLRCommonToken *)getMissingSymbol:(id<ANTLRTokenStream>)anInput
-                             Exception:(ANTLRRecognitionException *)e
-                                 TType:(NSInteger)expectedTokenType
-                                BitSet:(ANTLRBitSet *)follow
-{
-    NSString *tokenText = nil;
-    if ( expectedTokenType == ANTLRTokenTypeEOF )
-        tokenText = @"<missing EOF>";
-    else
-        tokenText = [NSString stringWithFormat:@"<missing %@>\n",[[ANTLRBaseRecognizer getTokenNames] objectAtIndex:expectedTokenType]];
-    ANTLRCommonToken *t = [[ANTLRCommonToken newToken:expectedTokenType Text:tokenText] retain];
-    ANTLRCommonToken *current = [anInput LT:1];
-    if ( current.type == ANTLRTokenTypeEOF ) {
-        current = [anInput LT:-1];
-    }
-    t.line = current.line;
-    t.charPositionInLine = current.charPositionInLine;
-    t.channel = ANTLRTokenChannelDefault;
-    return t;
-}
-
-/** Set the token stream and reset the parser */
-- (void) setTokenStream:(id<ANTLRTokenStream>)anInput
-{
-    input = nil;
-    [self reset];
-    input = anInput;
-}
-
-- (id<ANTLRTokenStream>)getTokenStream
-{
-    return input;
-}
-
-- (NSString *)getSourceName
-{
-    return [input getSourceName];
-}
-
-- (void) traceIn:(NSString *)ruleName Index:(int)ruleIndex
-{
-    [super traceIn:ruleName Index:ruleIndex Object:[input LT:1]];
-}
-
-- (void) traceOut:(NSString *)ruleName Index:(NSInteger) ruleIndex
-{
-    [super traceOut:ruleName Index:ruleIndex Object:[input LT:1]];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRParserRuleReturnScope.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRParserRuleReturnScope.h
deleted file mode 100644
index d788c21..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRParserRuleReturnScope.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRRuleReturnScope.h"
-
-@interface ANTLRParserRuleReturnScope : ANTLRRuleReturnScope {
-	id<ANTLRToken> start;
-	id<ANTLRToken> stopToken;
-}
-@property (retain, getter=getStart, setter=setStart:) id<ANTLRToken> start;
-@property (retain, getter=getStop, setter=setStop:)   id<ANTLRToken> stopToken;
-
-- (id<ANTLRToken>) getStart;
-- (void) setStart: (id<ANTLRToken>) aStart;
-
-- (id<ANTLRToken>) getStop;
-- (void) setStop: (id<ANTLRToken>) aStop;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRParserRuleReturnScope.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRParserRuleReturnScope.m
deleted file mode 100644
index 2bc2392..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRParserRuleReturnScope.m
+++ /dev/null
@@ -1,80 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRParserRuleReturnScope.h"
-
-
-@implementation ANTLRParserRuleReturnScope
-
-@synthesize start;
-@synthesize stopToken;
-
-- (void) dealloc
-{
-    [self setStart:nil];
-    [self setStop:nil];
-    [super dealloc];
-}
-
-- (id<ANTLRToken>) getStart
-{
-    return start; 
-}
-
-- (void) setStart: (id<ANTLRToken>) aStart
-{
-    if (start != aStart) {
-        [aStart retain];
-        if ( start ) [start release];
-        start = aStart;
-    }
-}
-
-- (id<ANTLRToken>) getStop
-{
-    return stopToken; 
-}
-
-- (void) setStop: (id<ANTLRToken>) aStop
-{
-    if (stopToken != aStop) {
-        [aStop retain];
-        if ( stopToken ) [stopToken release];
-        stopToken = aStop;
-    }
-}
-
-// create a copy, including the text if available
-// the input stream is *not* copied!
-- (id) copyWithZone:(NSZone *)theZone
-{
-    ANTLRParserRuleReturnScope *copy = [super copyWithZone:theZone];
-    copy.start = start;
-    copy.stopToken = stopToken;
-    return copy;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrBuffer.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrBuffer.h
deleted file mode 100644
index baf929b..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrBuffer.h
+++ /dev/null
@@ -1,93 +0,0 @@
-//
-//  ANTLRPtrBuffer.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define BUFFSIZE         101
-
-@interface ANTLRPtrBuffer : ANTLRLinkBase {
-    NSUInteger BuffSize;
-    NSUInteger count;
-    NSUInteger ptr;
-    __strong NSMutableData *buffer;
-    __strong id *ptrBuffer;
-}
-
-@property (getter=getBuffSize, setter=setBuffSize:) NSUInteger BuffSize;
-@property (getter=getCount, setter=setCount:) NSUInteger count;
-@property (getter=getPtr, setter=setPtr:) NSUInteger ptr;
-@property (retain, getter=getBuffer, setter=setBuffer:) NSMutableData *buffer;
-@property (assign, getter=getPtrBuffer, setter=setPtrBuffer:) id *ptrBuffer;
-
-// Contruction/Destruction
-+(ANTLRPtrBuffer *)newANTLRPtrBuffer;
-+(ANTLRPtrBuffer *)newANTLRPtrBufferWithLen:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSUInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-
-- (NSUInteger)count;
-- (NSUInteger)length;
-- (NSUInteger)size;
-
-- (NSMutableData *)getBuffer;
-- (void)setBuffer:(NSMutableData *)np;
-- (NSUInteger)getCount;
-- (void)setCount:(NSUInteger)aCount;
-- (id *)getPtrBuffer;
-- (void)setPtrBuffer:(id *)np;
-- (NSUInteger)getPtr;
-- (void)setPtr:(NSUInteger)np;
-
-- (void) push:(id) v;
-- (id) pop;
-- (id) peek;
-
-- (void) addObject:(id) v;
-- (void) addObjectsFromArray:(ANTLRPtrBuffer *)anArray;
-- (void) insertObject:(id)aRule atIndex:(NSUInteger)idx;
-- (id)   objectAtIndex:(NSUInteger)idx;
-- (void) removeAllObjects;
-- (void)removeObjectAtIndex:(NSInteger)idx;
-
-- (void) ensureCapacity:(NSUInteger) index;
-- (NSString *) description;
-- (NSString *) toString;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrBuffer.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrBuffer.m
deleted file mode 100644
index 392a7b7..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrBuffer.m
+++ /dev/null
@@ -1,353 +0,0 @@
-//
-//  ANTLRPtrBuffer.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRPtrBuffer.h"
-#import "ANTLRTree.h"
-
-/*
- * Start of ANTLRPtrBuffer
- */
-@implementation ANTLRPtrBuffer
-
-@synthesize BuffSize;
-@synthesize buffer;
-@synthesize ptrBuffer;
-@synthesize count;
-@synthesize ptr;
-
-+(ANTLRPtrBuffer *)newANTLRPtrBuffer
-{
-    return [[ANTLRPtrBuffer alloc] init];
-}
-
-+(ANTLRPtrBuffer *)newANTLRPtrBufferWithLen:(NSInteger)cnt
-{
-    return [[ANTLRPtrBuffer alloc] initWithLen:cnt];
-}
-
--(id)init
-{
-    NSUInteger idx;
-    
-    self = [super init];
-    if ( self != nil ) {
-        BuffSize  = BUFFSIZE;
-        ptr = 0;
-        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize * sizeof(id)] retain];
-        ptrBuffer = (id *) [buffer mutableBytes];
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            ptrBuffer[idx] = nil;
-        }
-        count = 0;
-    }
-    return( self );
-}
-
--(id)initWithLen:(NSUInteger)cnt
-{
-    NSUInteger idx;
-    
-    self = [super init];
-    if ( self != nil ) {
-        BuffSize  = cnt;
-        ptr = 0;
-        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize * sizeof(id)] retain];
-        ptrBuffer = (id *)[buffer mutableBytes];
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            ptrBuffer[idx] = nil;
-        }
-        count = 0;
-    }
-    return( self );
-}
-
--(void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRPtrBuffer" );
-#endif
-    ANTLRLinkBase *tmp, *rtmp;
-    NSInteger idx;
-    
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp ) {
-                rtmp = tmp;
-                if ([tmp isKindOfClass:[ANTLRLinkBase class]])
-                    tmp = (id)tmp.fNext;
-                else
-                    tmp = nil;
-                [rtmp release];
-            }
-        }
-    }
-    [buffer release];
-    [super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRPtrBuffer *copy;
-    
-    copy = [[[self class] allocWithZone:aZone] init];
-    if ( buffer )
-        copy.buffer = [buffer copyWithZone:aZone];
-    copy.ptrBuffer = ptrBuffer;
-    copy.ptr = ptr;
-    return copy;
-}
-
-- (void)clear
-{
-    ANTLRLinkBase *tmp, *rtmp;
-    NSInteger idx;
-
-    for( idx = 0; idx < BuffSize; idx++ ) {
-        tmp = ptrBuffer[idx];
-        while ( tmp ) {
-            rtmp = tmp;
-            if ([tmp isKindOfClass:[ANTLRLinkBase class]])
-                tmp = (id)tmp.fNext;
-            else
-                tmp = nil;
-            [rtmp dealloc];
-        }
-        ptrBuffer[idx] = nil;
-    }
-    count = 0;
-}
-
-- (NSMutableData *)getBuffer
-{
-    return( buffer );
-}
-
-- (void)setBuffer:(NSMutableData *)np
-{
-    buffer = np;
-}
-
-- (NSUInteger)getCount
-{
-    return( count );
-}
-
-- (void)setCount:(NSUInteger)aCount
-{
-    count = aCount;
-}
-
-- (id *)getPtrBuffer
-{
-    return( ptrBuffer );
-}
-
-- (void)setPtrBuffer:(id *)np
-{
-    ptrBuffer = np;
-}
-
-- (NSUInteger)getPtr
-{
-    return( ptr );
-}
-
-- (void)setPtr:(NSUInteger)aPtr
-{
-    ptr = aPtr;
-}
-
-- (void) addObject:(id) v
-{
-    [self ensureCapacity:ptr];
-    if ( v ) [v retain];
-    ptrBuffer[ptr++] = v;
-    count++;
-}
-
-- (void) push:(id) v
-{
-    if ( ptr >= BuffSize - 1 ) {
-        [self ensureCapacity:ptr];
-    }
-    if ( v ) [v retain];
-    ptrBuffer[ptr++] = v;
-    count++;
-}
-
-- (id) pop
-{
-    id v = nil;
-    if ( ptr > 0 ) {
-        v = ptrBuffer[--ptr];
-        ptrBuffer[ptr] = nil;
-    }
-    count--;
-    if ( v ) [v release];
-    return v;
-}
-
-- (id) peek
-{
-    id v = nil;
-    if ( ptr > 0 ) {
-        v = ptrBuffer[ptr-1];
-    }
-    return v;
-}
-
-- (NSUInteger)count
-{
-#ifdef DONTUSENOMO
-    int cnt = 0;
-    
-    for (NSInteger i = 0; i < BuffSize; i++ ) {
-        if ( ptrBuffer[i] != nil ) {
-            cnt++;
-        }
-    }
-    if ( cnt != count ) count = cnt;
-#endif
-    return count;
-}
-
-- (NSUInteger)length
-{
-    return BuffSize;
-}
-
-- (NSUInteger)size
-{
-    NSUInteger aSize = 0;
-    for (int i = 0; i < BuffSize; i++ ) {
-        if (ptrBuffer[i] != nil) {
-            aSize += sizeof(id);
-        }
-    }
-    return aSize;
-}
-
-- (void) insertObject:(id)aRule atIndex:(NSUInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        [self ensureCapacity:idx];
-    }
-    if ( aRule != ptrBuffer[idx] ) {
-        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
-        if ( aRule ) [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-    count++;
-}
-
-- (id)objectAtIndex:(NSUInteger)idx
-{
-    if ( idx < BuffSize ) {
-        return ptrBuffer[idx];
-    }
-    return nil;
-}
-
-- (void)addObjectsFromArray:(ANTLRPtrBuffer *)anArray
-{
-    NSInteger cnt, i;
-    cnt = [anArray count];
-    for( i = 0; i < cnt; i++) {
-        id tmp = [anArray objectAtIndex:i];
-        if ( tmp ) [tmp retain];
-        [self insertObject:tmp atIndex:i];
-    }
-    count += cnt;
-    return;
-}
-
-- (void)removeAllObjects
-{
-    int i;
-    for ( i = 0; i < BuffSize; i++ ) {
-        if ( ptrBuffer[i] ) [ptrBuffer[i] release];
-        ptrBuffer[i] = nil;
-    }
-    count = 0;
-    ptr = 0;
-}
-
-- (void)removeObjectAtIndex:(NSInteger)idx
-{
-    int i;
-    if ( idx >= 0 && idx < count ) {
-        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
-        for ( i = idx; i < count-1; i++ ) {
-            ptrBuffer[i] = ptrBuffer[i+1];
-        }
-        ptrBuffer[i] = nil;
-        count--;
-    }
-}
-
-- (void) ensureCapacity:(NSUInteger) anIndex
-{
-    if ((anIndex * sizeof(id)) >= [buffer length])
-    {
-        NSInteger newSize = ([buffer length] / sizeof(id)) * 2;
-        if (anIndex > newSize) {
-            newSize = anIndex + 1;
-        }
-        BuffSize = newSize;
-        [buffer setLength:(BuffSize * sizeof(id))];
-        ptrBuffer = [buffer mutableBytes];
-    }
-}
-
-- (NSString *) description
-{
-    NSMutableString *str;
-    NSInteger idx, cnt;
-    cnt = [self count];
-    str = [NSMutableString stringWithCapacity:30];
-    [str appendString:@"["];
-    for (idx = 0; idx < cnt; idx++ ) {
-        [str appendString:[[self objectAtIndex:idx] description]];
-    }
-    [str appendString:@"]"];
-    return str;
-}
-
-- (NSString *) toString
-{
-    return [self description];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrStack.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrStack.h
deleted file mode 100644
index 7aa65a9..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrStack.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//
-//  ANTLRPtrStack.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-//  Copyright 2010 Alan's MachineWorks. All rights reserved.
-//ptrBuffer
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseStack.h"
-#import "ANTLRRuleMemo.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRPtrStack : ANTLRBaseStack {
-	//ANTLRPtrStack *fNext;
-    // TStringPool *fPool;
-}
-
-//@property (copy) ANTLRPtrStack *fNext;
-//@property (copy) TStringPool *fPool;
-
-// Contruction/Destruction
-+ (ANTLRPtrStack *)newANTLRPtrStack;
-+ (ANTLRPtrStack *)newANTLRPtrStack:(NSInteger)cnt;
-- (id)init;
-- (id)initWithLen:(NSInteger)aLen;
-- (void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-
-#ifdef DONTUSENOMO
-/* form hash value for string s */
-- (NSInteger)hash:(NSString *)s;
-/*   look for s in ptrBuffer  */
-- (id)lookup:(NSString *)s;
-/* look for s in ptrBuffer  */
-- (id)install:(id)sym;
-#endif
-
-#ifdef DONTUSENOMO
-- (id)getTType:(NSString *)name;
-- (id)getName:(NSInteger)ttype;
-#endif
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrStack.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrStack.m
deleted file mode 100644
index 5b180f2..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRPtrStack.m
+++ /dev/null
@@ -1,191 +0,0 @@
-//
-//  ANTLRPtrStack.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-//  Copyright 2010 Alan's MachineWorks. All rights reserved.
-//
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRPtrStack.h"
-#import "ANTLRTree.h"
-
-/*
- * Start of ANTLRPtrStack
- */
-@implementation ANTLRPtrStack
-
-+(ANTLRPtrStack *)newANTLRPtrStack
-{
-    return [[ANTLRPtrStack alloc] init];
-}
-
-+(ANTLRPtrStack *)newANTLRPtrStack:(NSInteger)cnt
-{
-    return [[ANTLRPtrStack alloc] initWithLen:cnt];
-}
-
--(id)init
-{
-	self = [super initWithLen:HASHSIZE];
-	if ( self != nil ) {
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)cnt
-{
-	self = [super initWithLen:cnt];
-	if ( self != nil ) {
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRPtrStack" );
-#endif
-	[super dealloc];
-}
-
--(void)deleteANTLRPtrStack:(ANTLRPtrStack *)np
-{
-    ANTLRLinkBase *tmp, *rtmp;
-    NSInteger idx;
-    
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < BuffSize; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp ) {
-                rtmp = tmp;
-                tmp = [tmp getfNext];
-                [rtmp release];
-            }
-        }
-    }
-}
-
-#ifdef DONTUSENOMO
-#ifdef USERDOC
-/*
- *  HASH        hash entry to get index to table
- *  NSInteger hash( ANTLRPtrStack *self, char *s );
- *
- *     Inputs:  NSString *s         string to find
- *
- *     Returns: NSInteger                 hashed value
- *
- *  Last Revision 9/03/90
- */
-#endif
--(NSInteger)hash:(NSString *)s       /*    form hash value for string s */
-{
-	NSInteger hashval;
-	const char *tmp;
-    
-	tmp = [s cStringUsingEncoding:NSASCIIStringEncoding];
-	for( hashval = 0; *tmp != '\0'; )
-        hashval += *tmp++;
-	LastHash = hashval % HashSize;
-	return( LastHash );
-}
-
-#ifdef USERDOC
-/*
- *  LOOKUP  search hashed list for entry
- *  id lookup:(NSString *)s;
- *
- *     Inputs:  NSString  *s       string to find
- *
- *     Returns: ANTLRRuleMemo  *        pointer to entry
- *
- *  Last Revision 9/03/90
- */
-#endif
--(id)lookup:(NSString *)s
-{
-    ANTLRLinkBase *np;
-    
-    for( np = ptrBuffer[[self hash:s]]; np != nil; np = [np getfNext] ) {
-        if ( [s isEqualToString:[np getName]] ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-#ifdef USERDOC
-/*
- *  INSTALL search hashed list for entry
- *  NSInteger install( ANTLRPtrStack *self, id sym );
- *
- *     Inputs:  ANTLRRuleMemo    *sym   -- symbol ptr to install
- *              NSInteger         scope -- level to find
- *
- *     Returns: Boolean     TRUE   if installed
- *                          FALSE  if already in table
- *
- *  Last Revision 9/03/90
- */
-#endif
--(id)install:(id)sym
-{
-    ANTLRLinkBase *np;
-    
-    np = [self lookup:[sym getName]];
-    if ( np == nil ) {
-        [sym setFNext:ptrBuffer[ LastHash ]];
-        ptrBuffer[ LastHash ] = [sym retain];
-        return( ptrBuffer[ LastHash ] );
-    }
-    return( nil );            /*   not found      */
-}
-#endif
-
--(id)getptrBufferEntry:(NSInteger)idx
-{
-	return( ptrBuffer[idx] );
-}
-
--(id *)getptrBuffer
-{
-	return( ptrBuffer );
-}
-
--(void)setptrBuffer:(id *)np
-{
-    ptrBuffer = np;
-}
-
-#ifdef DONTUSENOMO
-/*
- * works only for maplist indexed not by name but by TokenNumber
- */
-- (id)getName:(NSInteger)ttype
-{
-    id np;
-    NSInteger aTType;
-
-    aTType = ttype % HashSize;
-    for( np = ptrBuffer[ttype]; np != nil; np = [np getfNext] ) {
-        if ( np.index == ttype ) {
-            return( np );        /*   found it       */
-        }
-    }
-    return( nil );              /*   not found      */
-}
-
-- (id)getTType:(NSString *)name
-{
-    return [self lookup:name];
-}
-#endif
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    return [super copyWithZone:aZone];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRReaderStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRReaderStream.h
deleted file mode 100644
index 1dd19d1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRReaderStream.h
+++ /dev/null
@@ -1,35 +0,0 @@
-//
-//  AntlrReaderStream.h
-//  ANTLR
-//
-//  Created by Alan Condit on 2/21/11.
-//  Copyright 2011 Alan's MachineWorks. All rights reserved.
-//
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRStringStream.h"
-
-@interface ANTLRReaderStream : ANTLRStringStream {
-    NSFileHandle *fh;
-    NSInteger size;
-    NSInteger rbSize;
-    //NSData *data; /* ANTLRStringStream has NSString *data */
-}
-
-@property (retain) NSFileHandle *fh;
-@property (assign) NSInteger size;
-@property (assign) NSInteger rbSize;
-//@property (retain) NSData *data;
-
-+ (NSInteger) READ_BUFFER_SIZE;
-+ (NSInteger) INITIAL_BUFFER_SIZE;
-
-+ (id) newANTLRReaderStream;
-+ (id) newANTLRReaderStream:(NSFileHandle *)r;
-+ (id) newANTLRReaderStream:(NSFileHandle *)r size:(NSInteger)aSize;
-+ (id) newANTLRReaderStream:(NSFileHandle *)r size:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize;
-- (id) initWithReader:(NSFileHandle *)r size:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize;
-- (void) load:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize;
-- (void) close;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRReaderStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRReaderStream.m
deleted file mode 100644
index a71b827..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRReaderStream.m
+++ /dev/null
@@ -1,153 +0,0 @@
-//
-//  ANTLRReaderStream.m
-//  ANTLR
-//
-//  Created by Alan Condit on 2/21/11.
-//  Copyright 2011 Alan's MachineWorks. All rights reserved.
-//
-
-#import "ANTLRReaderStream.h"
-
-
-@implementation ANTLRReaderStream
-
-@synthesize fh;
-@synthesize size;
-@synthesize rbSize;
-
-static NSInteger READ_BUFFER_SIZE = 1024;
-static NSInteger INITIAL_BUFFER_SIZE = 1024;
-
-+ (NSInteger) READ_BUFFER_SIZE
-{
-    return READ_BUFFER_SIZE;
-}
-
-+ (NSInteger) INITIAL_BUFFER_SIZE
-{
-    return INITIAL_BUFFER_SIZE;
-}
-
-+ (id) newANTLRReaderStream
-{
-    return [[ANTLRReaderStream alloc] init];
-}
-
-+ (id) newANTLRReaderStream:(NSFileHandle *)r
-{
-    return [[ANTLRReaderStream alloc] initWithReader:r size:INITIAL_BUFFER_SIZE readBufferSize:READ_BUFFER_SIZE];
-}
-
-+ (id) newANTLRReaderStream:(NSFileHandle *)r size:(NSInteger)aSize
-{
-    return [[ANTLRReaderStream alloc] initWithReader:r size:aSize readBufferSize:READ_BUFFER_SIZE];
-}
-
-+ (id) newANTLRReaderStream:(NSFileHandle *)r size:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize
-{
-//    load(r, aSize, aReadChunkSize);
-    return [[ANTLRReaderStream alloc] initWithReader:r size:aSize readBufferSize:aReadChunkSize];
-}
-
-- (id) init
-{
-	self = [super init];
-	if ( self != nil ) {
-        fh = nil;
-        rbSize = READ_BUFFER_SIZE;
-        size = INITIAL_BUFFER_SIZE;
-    }
-    return self;
-}
-
-- (id) initWithReader:(NSFileHandle *)r size:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize
-{
-	self = [super init];
-	if ( self != nil ) {
-        fh = r;
-        rbSize = aSize;
-        size = aReadChunkSize;
-        [self load:aSize readBufferSize:aReadChunkSize];
-    }
-    return self;
-}
-
-- (void) load:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize
-{
-    NSData *retData = nil;
-    if ( fh==nil ) {
-        return;
-    }
-    if ( aSize<=0 ) {
-        aSize = INITIAL_BUFFER_SIZE;
-    }
-    if ( aReadChunkSize<=0 ) {
-        aReadChunkSize = READ_BUFFER_SIZE;
-    }
-#pragma mark fix these NSLog calls
-    @try {
-        int numRead=0;
-        int p1 = 0;
-        retData = [fh readDataToEndOfFile];
-        numRead = [retData length];
-        NSLog( @"read %d chars; p was %d is now %d", n, p1, (p1+numRead) );
-        p1 += numRead;
-        n = p1;
-        data = [[NSString alloc] initWithData:retData encoding:NSASCIIStringEncoding];
-        NSLog( @"n=%d", n );
-    }
-    @finally {
-        [fh closeFile];
-    }
-}
-
-- (void)setUpStreamForFile:(NSString *)path {
-    // iStream is NSInputStream instance variable
-    NSInputStream *iStream = [[NSInputStream alloc] initWithFileAtPath:path];
-//    [iStream setDelegate:self];
-    [iStream scheduleInRunLoop:[NSRunLoop currentRunLoop]
-                       forMode:NSDefaultRunLoopMode];
-    [iStream open];
-}
-
-- (void)stream:(NSStream *)stream handleEvent:(NSStreamEvent)eventCode
-{
-    NSMutableData *myData = nil;
-    NSNumber *bytesRead = [NSNumber numberWithInteger:0];
-    switch(eventCode) {
-        case NSStreamEventHasBytesAvailable:
-        {
-            if(!myData) {
-                myData = [[NSMutableData data] retain];
-            }
-            uint8_t buf[1024];
-            unsigned int len = 0;
-            len = [(NSInputStream *)stream read:buf maxLength:1024];
-            if(len) {
-                [myData appendBytes:(const void *)buf length:len];
-                // bytesRead is an instance variable of type NSNumber.
-                bytesRead = [NSNumber numberWithInteger:[bytesRead intValue]+len];
-            } else {
-                NSLog(@"no buffer!");
-            }
-            break;
-        }
-        case NSStreamEventEndEncountered:
-        {
-            [stream close];
-            [stream removeFromRunLoop:[NSRunLoop currentRunLoop]
-                              forMode:NSDefaultRunLoopMode];
-            [stream release];
-            stream = nil; // stream is ivar, so reinit it
-            break;
-        }
-            // continued
-    }
-}
-
-- (void) close
-{
-    [fh closeFile];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognitionException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognitionException.h
deleted file mode 100644
index 9bd799d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognitionException.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuntimeException.h"
-#import "ANTLRToken.h"
-#import "ANTLRIntStream.h"
-#import "ANTLRBaseTree.h"
-
-@interface ANTLRRecognitionException : ANTLRRuntimeException {
-	id<ANTLRIntStream> input;
-	NSInteger index;
-	id<ANTLRToken> token;
-	id<ANTLRBaseTree> node;
-	unichar c;
-	NSUInteger line;
-	NSUInteger charPositionInLine;
-}
-
-@property (retain, getter=getStream, setter=setStream:) id<ANTLRIntStream> input;
-@property (assign) NSInteger index;
-@property (retain, getter=getToken, setter=setToken:) id<ANTLRToken>token;
-@property (retain, getter=getNode, setter=setNode:) id<ANTLRBaseTree>node;
-@property (assign) unichar c;
-@property (assign) NSUInteger line;
-@property (assign) NSUInteger charPositionInLine;
-
-+ (id) newException;
-+ (id) newException:(id<ANTLRIntStream>) anInputStream; 
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream;
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream reason:(NSString *)aReason;
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-- (NSInteger) unexpectedType;
-- (id<ANTLRToken>)getUnexpectedToken;
-
-- (id<ANTLRIntStream>) getStream;
-- (void) setStream: (id<ANTLRIntStream>) aStream;
-
-- (id<ANTLRToken>) getToken;
-- (void) setToken: (id<ANTLRToken>) aToken;
-
-- (id<ANTLRBaseTree>) getNode;
-- (void) setNode: (id<ANTLRBaseTree>) aNode;
-
-- (NSString *)getMessage;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognitionException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognitionException.m
deleted file mode 100644
index 0d0e11f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognitionException.m
+++ /dev/null
@@ -1,215 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRRecognitionException.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRTreeNodeStream.h"
-
-@implementation ANTLRRecognitionException
-
-@synthesize input;
-@synthesize token;
-@synthesize node;
-@synthesize line;
-@synthesize charPositionInLine;
-
-+ (id) newException
-{
-	return [[ANTLRRecognitionException alloc] init];
-}
-
-+ (id) newException:(id<ANTLRIntStream>) anInputStream
-{
-	return [[ANTLRRecognitionException alloc] initWithStream:anInputStream];
-}
-
-+ (id) newException:(id<ANTLRIntStream>) anInputStream reason:(NSString *)aReason
-{
-	return [[ANTLRRecognitionException alloc] initWithStream:anInputStream reason:aReason];
-}
-
-- (id) init
-{
-	self = [super initWithName:@"Recognition Exception" reason:@"Recognition Exception" userInfo:nil];
-	if ( self != nil ) {
-	}
-	return self;
-}
-
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream reason:(NSString *)aReason
-{
-	self = [super initWithName:NSStringFromClass([self class]) reason:aReason userInfo:nil];
-	if ( self != nil ) {
-		[self setStream:anInputStream];
-		index = input.index;
-		
-		Class inputClass = [input class];
-		if ([inputClass conformsToProtocol:@protocol(ANTLRTokenStream)]) {
-			[self setToken:[(id<ANTLRTokenStream>)input LT:1]];
-			line = token.line;
-			charPositionInLine = token.charPositionInLine;
-		} else if ([inputClass conformsToProtocol:@protocol(ANTLRCharStream)]) {
-			c = (unichar)[input LA:1];
-			line = ((id<ANTLRCharStream>)input).line;
-			charPositionInLine = ((id<ANTLRCharStream>)input).charPositionInLine;
-		} else if ([inputClass conformsToProtocol:@protocol(ANTLRTreeNodeStream)]) {
-			[self setNode:[(id<ANTLRTreeNodeStream>)input LT:1]];
-			line = [node line];
-			charPositionInLine = [node charPositionInLine];
-		} else {
-			c = (unichar)[input LA:1];
-		}
-	}
-	return self;
-}
-
-- (id) initWithStream:(id<ANTLRIntStream>)anInputStream
-{
-	self = [super initWithName:NSStringFromClass([self class]) reason:@"Runtime Exception" userInfo:nil];
-	if ( self != nil ) {
-	}
-	return self;
-}
-
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-	self = [super initWithName:aName reason:aReason userInfo:aUserInfo];
-	if ( self != nil ) {
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRRecognitionException" );
-#endif
-	if ( input ) [input release];
-	if ( token ) [token release];
-	if ( node ) [node release];
-	[super dealloc];
-}
-
-- (NSInteger) unexpectedType
-{
-	if (token) {
-		return token.type;
-    } else if (node) {
-        return [node type];
-	} else {
-		return c;
-	}
-}
-
-- (id<ANTLRToken>)getUnexpectedToken
-{
-    return token;
-}
-
-- (NSString *) description
-{
-	//NSMutableString *desc = [[NSMutableString alloc] initWithString:NSStringFromClass([self class])];
-	NSMutableString *desc = [NSMutableString stringWithString:[self className]];
-	if (token) {
-		[desc appendFormat:@" token:%@", token];
-	} else if (node) {
-		[desc appendFormat:@" node:%@", node];
-	} else {
-		[desc appendFormat:@" char:%c", c];
-	}
-	[desc appendFormat:@" line:%d position:%d", line, charPositionInLine];
-	return desc;
-}
-
-//---------------------------------------------------------- 
-//  input 
-//---------------------------------------------------------- 
-- (id<ANTLRIntStream>) getStream
-{
-    return input; 
-}
-
-- (void) setStream: (id<ANTLRIntStream>) aStream
-{
-    if ( input != aStream ) {
-        if ( input ) [input release];
-        if ( aStream ) [aStream retain];
-        input = aStream;
-    }
-}
-
-//---------------------------------------------------------- 
-//  token 
-//---------------------------------------------------------- 
-- (id<ANTLRToken>) getToken
-{
-    return token; 
-}
-
-- (void) setToken: (id<ANTLRToken>) aToken
-{
-    if (token != aToken) {
-        if ( token ) [token release];
-        if ( aToken ) [aToken retain];
-        token = aToken;
-    }
-}
-
-//---------------------------------------------------------- 
-//  node 
-//---------------------------------------------------------- 
-- (id<ANTLRBaseTree>) getNode
-{
-    return node; 
-}
-
-- (void) setNode: (id<ANTLRBaseTree>) aNode
-{
-    if (node != aNode) {
-        if ( node ) [node release];
-        if ( aNode ) [aNode retain];
-        node = aNode;
-    }
-}
-
-- (NSString *)getMessage
-{
-    return @"Fix getMessage in ANTLRRecognitionException";
-}
-
-- (NSUInteger)charPositionInLine
-{
-    return charPositionInLine;
-}
-
-- (void)setCharPositionInLine:(NSUInteger)aPos
-{
-    charPositionInLine = aPos;
-}
-
-@synthesize index;
-@synthesize c;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognizerSharedState.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognizerSharedState.h
deleted file mode 100755
index 0878dba..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognizerSharedState.h
+++ /dev/null
@@ -1,117 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRBitSet.h"
-#import "ANTLRRuleStack.h"
-#import "AMutableArray.h"
-
-@interface ANTLRRecognizerSharedState : NSObject {
-	__strong AMutableArray *following;  // a stack of FOLLOW bitsets used for context sensitive prediction and recovery
-    NSInteger _fsp;                     // Follow stack pointer
-	BOOL errorRecovery;                 // are we recovering?
-	NSInteger lastErrorIndex;
-	BOOL failed;                        // indicate that some match failed
-    NSInteger syntaxErrors;
-	NSInteger backtracking;             // the level of backtracking
-	__strong ANTLRRuleStack *ruleMemo;	// store previous results of matching rules so we don't have to do it again. Hook in incremental stuff here, too.
-
-	__strong id<ANTLRToken> token;
-	NSInteger  tokenStartCharIndex;
-	NSUInteger tokenStartLine;
-	NSUInteger tokenStartCharPositionInLine;
-	NSUInteger channel;
-	NSUInteger type;
-	NSString   *text;
-}
-
-@property (retain, getter=getFollowing, setter=setFollowing:) AMutableArray *following;
-@property (assign) NSInteger _fsp;
-@property (assign) BOOL errorRecovery;
-@property (assign) NSInteger lastErrorIndex;
-@property (assign, getter=getFailed, setter=setFailed:) BOOL failed;
-@property (assign) NSInteger syntaxErrors;
-@property (assign, getter=getBacktracking, setter=setBacktracking:) NSInteger backtracking;
-@property (retain, getter=getRuleMemo, setter=setRuleMemo:) ANTLRRuleStack *ruleMemo;
-@property (copy, getter=getToken, setter=setToken:) id<ANTLRToken> token;
-@property (getter=type,setter=setType:) NSUInteger type;
-@property (getter=channel,setter=setChannel:) NSUInteger channel;
-@property (getter=getTokenStartLine,setter=setTokenStartLine:) NSUInteger tokenStartLine;
-@property (getter=charPositionInLine,setter=setCharPositionInLine:) NSUInteger tokenStartCharPositionInLine;
-@property (getter=getTokenStartCharIndex,setter=setTokenStartCharIndex:) NSInteger tokenStartCharIndex;
-@property (retain, getter=text, setter=setText:) NSString *text;
-
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedState;
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedStateWithRuleLen:(NSInteger)aLen;
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedState:(ANTLRRecognizerSharedState *)aState;
-
-- (id) init;
-- (id) initWithRuleLen:(NSInteger)aLen;
-- (id) initWithState:(ANTLRRecognizerSharedState *)state;
-
-- (id<ANTLRToken>) getToken;
-- (void) setToken:(id<ANTLRToken>) theToken;
-
-- (NSUInteger)type;
-- (void) setType:(NSUInteger) theTokenType;
-
-- (NSUInteger)channel;
-- (void) setChannel:(NSUInteger) theChannel;
-
-- (NSUInteger) getTokenStartLine;
-- (void) setTokenStartLine:(NSUInteger) theTokenStartLine;
-
-- (NSUInteger) charPositionInLine;
-- (void) setCharPositionInLine:(NSUInteger) theCharPosition;
-
-- (NSInteger) getTokenStartCharIndex;
-- (void) setTokenStartCharIndex:(NSInteger) theTokenStartCharIndex;
-
-- (NSString *)text;
-- (void) setText:(NSString *) theText;
-
-
-- (AMutableArray *) getFollowing;
-- (void)setFollowing:(AMutableArray *)aFollow;
-- (ANTLRRuleStack *) getRuleMemo;
-- (void)setRuleMemo:(ANTLRRuleStack *)aRuleMemo;
-- (BOOL) isErrorRecovery;
-- (void) setIsErrorRecovery: (BOOL) flag;
-
-- (BOOL) getFailed;
-- (void) setFailed: (BOOL) flag;
-
-- (NSInteger)  getBacktracking;
-- (void) setBacktracking:(NSInteger) value;
-- (void) increaseBacktracking;
-- (void) decreaseBacktracking;
-- (BOOL) isBacktracking;
-
-- (NSInteger) lastErrorIndex;
-- (void) setLastErrorIndex:(NSInteger) value;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognizerSharedState.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognizerSharedState.m
deleted file mode 100755
index 79dda2d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRecognizerSharedState.m
+++ /dev/null
@@ -1,331 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRRecognizerSharedState.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRMismatchedTokenException.h"
-#import "ANTLRMismatchedRangeException.h"
-
-@implementation ANTLRRecognizerSharedState
-
-@synthesize following;
-@synthesize _fsp;
-@synthesize errorRecovery;
-@synthesize lastErrorIndex;
-@synthesize failed;
-@synthesize syntaxErrors;
-@synthesize backtracking;
-@synthesize ruleMemo;
-@synthesize token;
-@synthesize type;
-@synthesize channel;
-@synthesize tokenStartLine;
-@synthesize tokenStartCharPositionInLine;
-@synthesize tokenStartCharIndex;
-@synthesize text;
-
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedState
-{
-    return [[[ANTLRRecognizerSharedState alloc] init] retain];
-}
-
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedStateWithRuleLen:(NSInteger)aLen
-{
-    return [[[ANTLRRecognizerSharedState alloc] initWithRuleLen:aLen] retain];
-}
-
-+ (ANTLRRecognizerSharedState *) newANTLRRecognizerSharedState:(ANTLRRecognizerSharedState *)aState
-{
-    return [[[ANTLRRecognizerSharedState alloc] initWithState:aState] retain];
-}
-
-- (id) init
-{
-    ANTLRHashRule *aHashRule;
-	if ((self = [super init]) != nil ) {
-        following = [[AMutableArray arrayWithCapacity:10] retain];
-        _fsp = -1;
-        errorRecovery = NO;			// are we recovering?
-        lastErrorIndex = -1;
-        failed = NO;				// indicate that some match failed
-        syntaxErrors = 0;
-        backtracking = 0;			// the level of backtracking
-        tokenStartCharIndex = -1;
-        tokenStartLine = 0;
-        int cnt = 200;
-		ruleMemo = [[ANTLRRuleStack newANTLRRuleStack:cnt] retain];
-        for (int i = 0; i < cnt; i++ ) {
-            aHashRule = [[ANTLRHashRule newANTLRHashRuleWithLen:17] retain];
-            [ruleMemo addObject:aHashRule];
-        }
-#ifdef DONTUSEYET
-        token = state.token;
-        tokenStartCharIndex = state.tokenStartCharIndex;
-        tokenStartCharPositionInLine = state.tokenStartCharPositionInLine;
-        channel = state.channel;
-        type = state.type;
-        text = state.text;
-#endif
-	}
-	return self;
-}
-
-- (id) initWithRuleLen:(NSInteger)aLen
-{
-    ANTLRHashRule *aHashRule;
-	if ((self = [super init]) != nil ) {
-        following = [[AMutableArray arrayWithCapacity:10] retain];
-        _fsp = -1;
-        errorRecovery = NO;			// are we recovering?
-        lastErrorIndex = -1;
-        failed = NO;				// indicate that some match failed
-        syntaxErrors = 0;
-        backtracking = 0;			// the level of backtracking
-        tokenStartCharIndex = -1;
-        tokenStartLine = 0;
-		ruleMemo = [[ANTLRRuleStack newANTLRRuleStack:aLen] retain];
-        for (int i = 0; i < aLen; i++ ) {
-            aHashRule = [[ANTLRHashRule newANTLRHashRuleWithLen:17] retain];
-            [ruleMemo addObject:aHashRule];
-        }
-#ifdef DONTUSEYET
-        token = state.token;
-        tokenStartCharIndex = state.tokenStartCharIndex;
-        tokenStartCharPositionInLine = state.tokenStartCharPositionInLine;
-        channel = state.channel;
-        type = state.type;
-        text = state.text;
-#endif
-	}
-	return self;
-}
-
-- (id) initWithState:(ANTLRRecognizerSharedState *)aState
-{
-    ANTLRHashRule *aHashRule;
-    if ( [following count] < [aState.following count] ) {
-        //        following = new BitSet[state.following.size];
-    }
-    [following setArray:aState.following];
-    _fsp = aState._fsp;
-    errorRecovery = aState.errorRecovery;
-    lastErrorIndex = aState.lastErrorIndex;
-    failed = aState.failed;
-    syntaxErrors = aState.syntaxErrors;
-    backtracking = aState.backtracking;
-    if ( aState.ruleMemo == nil ) {
-        int cnt = 200;
-        ruleMemo = [[ANTLRRuleStack newANTLRRuleStack:cnt] retain];
-        for (int i = 0; i < cnt; i++ ) {
-            aHashRule = [[ANTLRHashRule newANTLRHashRuleWithLen:17] retain];
-            [ruleMemo addObject:aHashRule];
-        }
-    }
-    else {
-        ruleMemo = aState.ruleMemo;
-        if ( [ruleMemo count] == 0 ) {
-            int cnt = [ruleMemo length];
-            for (int i = 0; i < cnt; i++ ) {
-                [ruleMemo addObject:[[ANTLRHashRule newANTLRHashRuleWithLen:17] retain]];
-            }
-        }
-        else {
-            [ruleMemo addObjectsFromArray:aState.ruleMemo];
-        }
-    }
-    token = aState.token;
-    tokenStartCharIndex = aState.tokenStartCharIndex;
-    tokenStartCharPositionInLine = aState.tokenStartCharPositionInLine;
-    tokenStartLine = aState.tokenStartLine;
-    channel = aState.channel;
-    type = aState.type;
-    text = aState.text;
-    return( self );
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRRecognizerSharedState" );
-#endif
-    if ( token ) [token release];
-	if ( following ) [following release];
-	if ( ruleMemo ) [ruleMemo release];
-	[super dealloc];
-}
-
-// token stuff
-#pragma mark Tokens
-
-- (id<ANTLRToken>)getToken
-{
-    return token; 
-}
-
-- (void) setToken: (id<ANTLRToken>) aToken
-{
-    if (token != aToken) {
-        [aToken retain];
-        if ( token ) [token release];
-        token = aToken;
-    }
-}
-
-- (NSUInteger)channel
-{
-    return channel;
-}
-
-- (void) setChannel:(NSUInteger) theChannel
-{
-    channel = theChannel;
-}
-
-- (NSUInteger) getTokenStartLine
-{
-    return tokenStartLine;
-}
-
-- (void) setTokenStartLine:(NSUInteger) theTokenStartLine
-{
-    tokenStartLine = theTokenStartLine;
-}
-
-- (NSUInteger) charPositionInLine
-{
-    return tokenStartCharPositionInLine;
-}
-
-- (void) setCharPositionInLine:(NSUInteger) theCharPosition
-{
-    tokenStartCharPositionInLine = theCharPosition;
-}
-
-- (NSInteger) getTokenStartCharIndex;
-{
-    return tokenStartCharIndex;
-}
-
-- (void) setTokenStartCharIndex:(NSInteger) theTokenStartCharIndex
-{
-    tokenStartCharIndex = theTokenStartCharIndex;
-}
-
-// error handling
-- (void) reportError:(ANTLRRecognitionException *)e
-{
-	NSLog(@"%@", e.name);
-}
-
-- (AMutableArray *) getFollowing
-{
-	return following;
-}
-
-- (void)setFollowing:(AMutableArray *)aFollow
-{
-    if ( following != aFollow ) {
-        if ( following ) [following release];
-        [aFollow retain];
-    }
-    following = aFollow;
-}
-
-- (ANTLRRuleStack *) getRuleMemo
-{
-	return ruleMemo;
-}
-
-- (void)setRuleMemo:(ANTLRRuleStack *)aRuleMemo
-{
-    if ( ruleMemo != aRuleMemo ) {
-        if ( ruleMemo ) [ruleMemo release];
-        [aRuleMemo retain];
-    }
-    ruleMemo = aRuleMemo;
-}
-
-- (BOOL) isErrorRecovery
-{
-	return errorRecovery;
-}
-
-- (void) setIsErrorRecovery: (BOOL) flag
-{
-	errorRecovery = flag;
-}
-
-
-- (BOOL) getFailed
-{
-	return failed;
-}
-
-- (void) setFailed:(BOOL)flag
-{
-	failed = flag;
-}
-
-
-- (NSInteger) backtracking
-{
-	return backtracking;
-}
-
-- (void) setBacktracking:(NSInteger) value
-{
-	backtracking = value;
-}
-
-- (void) increaseBacktracking
-{
-	backtracking++;
-}
-
-- (void) decreaseBacktracking
-{
-	backtracking--;
-}
-
-- (BOOL) isBacktracking
-{
-	return backtracking > 0;
-}
-
-
-- (NSInteger) lastErrorIndex
-{
-    return lastErrorIndex;
-}
-
-- (void) setLastErrorIndex:(NSInteger) value
-{
-	lastErrorIndex = value;
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleElementStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleElementStream.h
deleted file mode 100644
index 46e0190..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleElementStream.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-
-// TODO: this should be separated into stream and enumerator classes
-@interface ANTLRRewriteRuleElementStream : NSObject {
-    NSInteger cursor;
-    BOOL dirty;        ///< indicates whether the stream should return copies of its elements, set to true after a call to -reset
-    BOOL isSingleElement;
-    id singleElement;
-    __strong AMutableArray *elements;
-    
-    __strong NSString *elementDescription;
-    __strong id<ANTLRTreeAdaptor> treeAdaptor;
-}
-
-@property (assign) NSInteger cursor;
-@property (assign) BOOL dirty;
-@property (assign) BOOL isSingleElement;
-@property (assign) id singleElement;
-@property (assign) AMutableArray *elements;
-@property (assign) NSString *elementDescription;
-@property (retain) id<ANTLRTreeAdaptor> treeAdaptor;
-
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription;
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription
-                                                            element:(id)anElement;
-+ (ANTLRRewriteRuleElementStream*) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription
-                                                           elements:(NSArray *)theElements;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
-
-- (void)reset;
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor;
-
-- (void) addElement:(id)anElement;
-- (NSInteger) size;
- 
-- (BOOL) hasNext;
-- (id<ANTLRBaseTree>) nextTree;
-- (id<ANTLRBaseTree>) _next;       // internal: TODO: redesign if necessary. maybe delegate
-
-- (id) copyElement:(id)element;
-- (id) toTree:(id)element;
-
-- (NSString *) getDescription;
-- (void) setDescription:(NSString *)description;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleElementStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleElementStream.m
deleted file mode 100644
index 138163c..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleElementStream.m
+++ /dev/null
@@ -1,258 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRRewriteRuleElementStream.h"
-
-@implementation ANTLRRewriteRuleElementStream
-
-@synthesize cursor;
-@synthesize dirty;
-@synthesize isSingleElement;
-@synthesize singleElement;
-@synthesize elements;
-@synthesize elementDescription;
-@synthesize treeAdaptor;
-
-+ (ANTLRRewriteRuleElementStream *) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription
-{
-    return [[ANTLRRewriteRuleElementStream alloc] initWithTreeAdaptor:aTreeAdaptor
-                                                          description:anElementDescription];
-}
-
-+ (ANTLRRewriteRuleElementStream *) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription
-                                                             element:(id)anElement
-{
-    return [[ANTLRRewriteRuleElementStream alloc] initWithTreeAdaptor:aTreeAdaptor
-                                                          description:anElementDescription
-                                                              element:anElement];
-}
-
-+ (ANTLRRewriteRuleElementStream *) newANTLRRewriteRuleElementStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription
-                                                            elements:(NSArray *)theElements;
-{
-    return [[ANTLRRewriteRuleElementStream alloc] initWithTreeAdaptor:aTreeAdaptor
-                                                          description:anElementDescription
-                                                             elements:theElements];
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription
-{
-    if ((self = [super init]) != nil) {
-        cursor = 0;
-        dirty = NO;
-        [self setDescription:anElementDescription];
-        [self setTreeAdaptor:aTreeAdaptor];
-        dirty = NO;
-        isSingleElement = YES;
-        singleElement = nil;
-        elements = nil;
-    }
-    return self;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement
-{
-    if ((self = [super init]) != nil) {
-        cursor = 0;
-        dirty = NO;
-        [self setDescription:anElementDescription];
-        [self setTreeAdaptor:aTreeAdaptor];
-        dirty = NO;
-        isSingleElement = YES;
-        singleElement = nil;
-        elements = nil;
-        [self addElement:anElement];
-    }
-    return self;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements
-{
-    self = [super init];
-    if (self) {
-        cursor = 0;
-        dirty = NO;
-        [self setDescription:anElementDescription];
-        [self setTreeAdaptor:aTreeAdaptor];
-        dirty = NO;
-        singleElement = nil;
-        isSingleElement = NO;
-        elements = [[AMutableArray arrayWithArray:theElements] retain];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRRewriteRuleElementStream" );
-#endif
-    if ( singleElement && isSingleElement ) [singleElement release];
-    else if ( elements && !isSingleElement ) [elements release];
-    [self setDescription:nil];
-    [self setTreeAdaptor:nil];
-    [super dealloc];
-}
-
-- (void)reset
-{
-    cursor = 0;
-    dirty = YES;
-}
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor
-{
-    return treeAdaptor;
-}
-
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-{
-    if (treeAdaptor != aTreeAdaptor) {
-        if ( treeAdaptor ) [treeAdaptor release];
-        treeAdaptor = aTreeAdaptor;
-        [treeAdaptor retain];
-    }
-}
-
-- (void) addElement: (id)anElement
-{
-    if (anElement == nil)
-        return;
-    if (elements != nil) {
-        [elements addObject:anElement];
-        return;
-        }
-    if (singleElement == nil) {
-        singleElement = anElement;
-        singleElement = [anElement retain];
-        return;
-    }
-    isSingleElement = NO;
-    elements = [[AMutableArray arrayWithCapacity:5] retain];
-    [elements addObject:singleElement];
-    singleElement = nil;  // balance previous retain in initializer/addElement
-    [elements addObject:anElement];
-}
-
-- (void) setElement: (id)anElement
-{
-    if (anElement == nil)
-        return;
-    if (elements != nil) {
-        [elements addObject:anElement];
-        return;
-        }
-    if (singleElement == nil) {
-        singleElement = anElement;
-        singleElement = [anElement retain];
-        return;
-    }
-    isSingleElement = NO;
-    elements = [[AMutableArray arrayWithCapacity:5] retain];
-    [elements addObject:singleElement];
-    singleElement = nil;  // balance previous retain in initializer/addElement
-    [elements addObject:anElement];
-}
-
-- (id<ANTLRBaseTree>) nextTree
-{
-    NSInteger n = [self size];
-    if ( dirty && (cursor >= 0 && n == 1)) {
-        // if out of elements and size is 1, dup
-        id element = [self _next];
-        return [self copyElement:element];
-    }
-    // test size above then fetch
-    id element = [self _next];
-    return element;
-}
-
-- (id) _next       // internal: TODO: redesign if necessary. maybe delegate
-{
-    NSInteger n = [self size];
-    if (n == 0) {
-        @throw [NSException exceptionWithName:@"RewriteEmptyStreamException" reason:nil userInfo:nil];// TODO: fill in real exception
-    }
-    if ( cursor >= n ) {
-        if ( n == 1 ) {
-            return [self toTree:singleElement]; // will be dup'ed in -next
-        }
-        @throw [NSException exceptionWithName:@"RewriteCardinalityException" reason:nil userInfo:nil];// TODO: fill in real exception
-    }
-    if (singleElement != nil) {
-        cursor++;
-        return [self toTree:singleElement];
-    }
-    id el = [elements objectAtIndex:cursor];
-    cursor++;
-    return [self toTree:el];
-}
-
-- (BOOL) hasNext
-{
-    return (singleElement != nil && cursor < 1) ||
-            (elements != nil && cursor < [elements count]);
-}
-
-- (NSInteger) size
-{
-    NSInteger n = 0;
-    if (singleElement != nil)
-        n = 1;
-    if (elements != nil)
-        return [elements count];
-    return n;
-}
-
-- (id) copyElement:(id)element
-{
-    [self doesNotRecognizeSelector:_cmd];   // subclass responsibility
-    return nil;
-}
-
-- (id<ANTLRBaseTree>) toTree:(id)element
-{
-    return element;
-}
-
-- (NSString *) getDescription
-{
-    return elementDescription;
-}
-
-- (void) setDescription:(NSString *) description
-{
-    if ( description != nil && description != elementDescription ) {
-        if (elementDescription != nil) [elementDescription release];
-        elementDescription = [NSString stringWithString:description];
-        [elementDescription retain];
-    }
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleNodeStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleNodeStream.h
deleted file mode 100755
index 2789e45..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleNodeStream.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRewriteRuleElementStream.h"
-
-@interface ANTLRRewriteRuleNodeStream : ANTLRRewriteRuleElementStream {
-
-}
-
-+ (ANTLRRewriteRuleNodeStream *) newANTLRRewriteRuleNodeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
-+ (ANTLRRewriteRuleNodeStream *) newANTLRRewriteRuleNodeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
-+ (ANTLRRewriteRuleNodeStream *) newANTLRRewriteRuleNode:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
-
-- (id) nextNode;
-- (id) toTree:(id<ANTLRBaseTree>)element;
-- (id) dup:(id)element;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleNodeStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleNodeStream.m
deleted file mode 100755
index 8dfedd7..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleNodeStream.m
+++ /dev/null
@@ -1,74 +0,0 @@
-//
-//  ANTLRRewriteRuleNodeStream.m
-//  ANTLR
-//
-//  Created by Kay Röpke on 7/16/07.
-//  Copyright 2007 classDump. All rights reserved.
-//
-
-#import "ANTLRRewriteRuleNodeStream.h"
-#import "ANTLRRuntimeException.h"
-
-@implementation ANTLRRewriteRuleNodeStream
-
-+ (ANTLRRewriteRuleNodeStream*) newANTLRRewriteRuleNodeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
-{
-    return [[ANTLRRewriteRuleNodeStream alloc] initWithTreeAdaptor:aTreeAdaptor description:anElementDescription];
-}
-
-+ (ANTLRRewriteRuleNodeStream*) newANTLRRewriteRuleNodeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
-{
-    return [[ANTLRRewriteRuleNodeStream alloc] initWithTreeAdaptor:aTreeAdaptor description:anElementDescription element:anElement];
-}
-
-+ (ANTLRRewriteRuleNodeStream*) newANTLRRewriteRuleNode:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
-{
-    return [[ANTLRRewriteRuleNodeStream alloc] initWithTreeAdaptor:aTreeAdaptor description:anElementDescription elements:theElements];
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription
-{
-    if ((self = [super initWithTreeAdaptor:aTreeAdaptor description:anElementDescription]) != nil) {
-        dirty = NO;
-        isSingleElement = YES;
-    }
-    return self;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement
-{
-    if ((self = [super initWithTreeAdaptor:aTreeAdaptor description:anElementDescription element:anElement]) != nil) {
-        dirty = NO;
-    }
-    return self;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements
-{
-    if ((self = [super init]) != nil) {
-        dirty = NO;
-    }
-    return self;
-}
-
-
-- (id) nextNode
-{
-    if (dirty || (cursor >= [self size] && [self size] == 1))
-        return [treeAdaptor dupNode:[self _next]];
-    else 
-        return [self _next];
-}
-
-- (id<ANTLRBaseTree>) toTree:(id<ANTLRBaseTree>)element
-{
-    return [treeAdaptor dupNode:element];
-}
-
-- (id) dup:(id)element
-{
-    return [treeAdaptor dupTree:element];
-    @throw [ANTLRRuntimeException newException:@"ANTLRUnsupportedOperationException" reason:@"dup can't be called for a node stream."];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleSubtreeStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleSubtreeStream.h
deleted file mode 100644
index 1d18b24..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleSubtreeStream.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRewriteRuleElementStream.h"
-
-@interface ANTLRRewriteRuleSubtreeStream : ANTLRRewriteRuleElementStream {
-
-}
-
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription;
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription
-                                                             element:(id)anElement;
-+ (ANTLRRewriteRuleSubtreeStream *) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                         description:(NSString *)anElementDescription
-                                                            elements:(NSArray *)theElements;
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
-
-- (id) nextNode;
-- (id) dup:(id)element;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleSubtreeStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleSubtreeStream.m
deleted file mode 100644
index 48910bd..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleSubtreeStream.m
+++ /dev/null
@@ -1,101 +0,0 @@
-//
-//  ANTLRRewriteRuleSubtreeStream.m
-//  ANTLR
-//
-//  Created by Kay Röpke on 7/16/07.
-// [The "BSD licence"]
-// Copyright (c) 2007 Kay Röpke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRRewriteRuleSubtreeStream.h"
-
-
-@implementation ANTLRRewriteRuleSubtreeStream
-
-+ (ANTLRRewriteRuleSubtreeStream*) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription;
-{
-    return [[ANTLRRewriteRuleSubtreeStream alloc] initWithTreeAdaptor:aTreeAdaptor
-                                                          description:anElementDescription];
-}
-
-+ (ANTLRRewriteRuleSubtreeStream*) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription
-                                                            element:(id)anElement;
-{
-    return [[ANTLRRewriteRuleSubtreeStream alloc] initWithTreeAdaptor:aTreeAdaptor
-                                                          description:anElementDescription
-                                                              element:anElement];
-}
-
-+ (ANTLRRewriteRuleSubtreeStream*) newANTLRRewriteRuleSubtreeStream:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-                                                        description:(NSString *)anElementDescription
-                                                           elements:(NSArray *)theElements;
-{
-    return [[ANTLRRewriteRuleSubtreeStream alloc] initWithTreeAdaptor:aTreeAdaptor
-                                                          description:anElementDescription
-                                                             elements:theElements];
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription
-{
-    if ((self = [super initWithTreeAdaptor:aTreeAdaptor description:anElementDescription]) != nil) {
-        dirty = NO;
-        isSingleElement = YES;
-    }
-    return self;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement
-{
-    if ((self = [super initWithTreeAdaptor:aTreeAdaptor description:anElementDescription element:anElement]) != nil) {
-        dirty = NO;
-    }
-    return self;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements
-{
-    if ((self = [super initWithTreeAdaptor:aTreeAdaptor description:anElementDescription elements:theElements]) != nil) {
-        dirty = NO;
-    }
-    return self;
-}
-
-
-- (id) nextNode
-{
-    if (dirty || (cursor >= [self size] && [self size] == 1))
-        return [treeAdaptor dupNode:[self _next]];
-    else 
-        return [self _next];
-}
-
-- (id) dup:(id)element
-{
-    return [treeAdaptor dupTree:element];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleTokenStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleTokenStream.h
deleted file mode 100644
index 620a0bd..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleTokenStream.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRewriteRuleElementStream.h"
-
-
-@interface ANTLRRewriteRuleTokenStream : ANTLRRewriteRuleElementStream {
-
-}
-
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)anAdaptor
-                          description:(NSString *)elementDescription;
-/** Create a stream with one element */
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)adaptor
-                          description:(NSString *)elementDescription
-                              element:(id) oneElement;
-/** Create a stream, but feed off an existing list */
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)adaptor
-                          description:(NSString *)elementDescription
-                             elements:(AMutableArray *)elements;
-
-- (id) init;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-               description:(NSString *)aDescription;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor 
-               description:(NSString *)aDescription
-                   element:(id)element;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-               description:(NSString *)aDescription
-                  elements:(AMutableArray *)elements;
-                               
-/** Get next token from stream and make a node for it */
-- (id) nextNode;
-
-- (id) nextToken;
-
-/** Don't convert to a tree unless they explicitly call nextTree.
- *  This way we can do hetero tree nodes in rewrite.
- */
-- (id<ANTLRBaseTree>) toTree:(id<ANTLRToken>)element;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleTokenStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleTokenStream.m
deleted file mode 100644
index 5aea3f8..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRewriteRuleTokenStream.m
+++ /dev/null
@@ -1,128 +0,0 @@
-//
-//  ANTLRRewriteRuleTokenStream.m
-//  ANTLR
-//
-//  Created by Kay Röpke on 7/16/07.
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRRewriteRuleTokenStream.h"
-#import "ANTLRRuntimeException.h"
-#import "ANTLRHashMap.h"
-#import "ANTLRMapElement.h"
-
-@implementation ANTLRRewriteRuleTokenStream
-
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)anAdaptor
-                          description:(NSString *)elementDescription
-{
-    return [[ANTLRRewriteRuleTokenStream alloc] initWithTreeAdaptor:anAdaptor
-                                                        description:elementDescription];
-}
-
-/** Create a stream with one element */
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)adaptor
-                          description:(NSString *)elementDescription
-                              element:(id) oneElement
-{
-    return [[ANTLRRewriteRuleTokenStream alloc] initWithTreeAdaptor:adaptor
-                                                        description:elementDescription
-                                                            element:oneElement];
-}
-
-/** Create a stream, but feed off an existing list */
-+ (id) newANTLRRewriteRuleTokenStream:(id<ANTLRTreeAdaptor>)adaptor
-                          description:(NSString *)elementDescription
-                             elements:(AMutableArray *)elements
-{
-    return [[ANTLRRewriteRuleTokenStream alloc] initWithTreeAdaptor:adaptor
-                                                        description:elementDescription
-                                                           elements:elements];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil ) {
-    }
-    return self;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-               description:(NSString *)aDescription
-{
-    if ((self = [super initWithTreeAdaptor:anAdaptor
-                               description:aDescription]) != nil ) {
-    }
-    return self;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-               description:(NSString *)aDescription
-                   element:(id)anElement
-{
-    if ((self = [super initWithTreeAdaptor:anAdaptor
-                               description:aDescription
-                                   element:anElement]) != nil ) {
-    }
-    return self;
-}
-
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-               description:(NSString *)aDescription
-                  elements:(AMutableArray *)elementList
-{
-    if ((self = [super initWithTreeAdaptor:anAdaptor
-                               description:aDescription
-                                  elements:elementList]) != nil ) {
-    }
-    return self;
-}
-
-- (id<ANTLRBaseTree>) nextNode
-{
-    id<ANTLRToken> t = [self _next];
-    return [treeAdaptor create:t];
-}
-
-- (id) nextToken
-{
-    return [self _next];
-}
-
-/** Don't convert to a tree unless they explicitly call nextTree.
- *  This way we can do hetero tree nodes in rewrite.
- */
-- (id<ANTLRBaseTree>) toTree:(id<ANTLRToken>)element
-{
-    return element;
-}
-
-- (id) copyElement:(id)element
-{
-    @throw [ANTLRRuntimeException newException:@"copy can't be called for a token stream."];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMapElement.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMapElement.h
deleted file mode 100644
index e040b18..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMapElement.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-//  ANTLRRuleMapElement.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseMapElement.h"
-
-@interface ANTLRRuleMapElement : ANTLRBaseMapElement {
-    NSNumber *ruleNum;
-}
-
-@property (retain, getter=getRuleNum, setter=setRuleNum:) NSNumber *ruleNum;
-
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElement;
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElementWithIndex:(NSNumber *)anIdx;
-+ (ANTLRRuleMapElement *) newANTLRRuleMapElementWithIndex:(NSNumber *)anIdx RuleNum:(NSNumber *)aRuleNum;
-- (id) init;
-- (id) initWithAnIndex:(NSNumber *)anIdx;
-- (id) initWithAnIndex:(NSNumber *)anIdx RuleNum:(NSNumber *)aRuleNum;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-- (NSNumber *)getRuleNum;
-- (void)setRuleNum:(NSNumber *)aRuleNum;
-
-- (NSInteger)size;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMapElement.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMapElement.m
deleted file mode 100644
index 0e8c463..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMapElement.m
+++ /dev/null
@@ -1,111 +0,0 @@
-//
-//  ANTLRRuleMapElement.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRRuleMapElement.h"
-
-
-@implementation ANTLRRuleMapElement
-
-@synthesize ruleNum;
-
-+ (ANTLRRuleMapElement *)newANTLRRuleMapElement
-{
-    return [[ANTLRRuleMapElement alloc] init];
-}
-
-+ (ANTLRRuleMapElement *)newANTLRRuleMapElementWithIndex:(NSNumber *)aNumber
-{
-    return [[ANTLRRuleMapElement alloc] initWithAnIndex:(NSNumber *)aNumber];
-}
-
-+ (ANTLRRuleMapElement *)newANTLRRuleMapElementWithIndex:(NSNumber *)aNumber RuleNum:(NSNumber *)aRuleNum
-{
-    return [[ANTLRRuleMapElement alloc] initWithAnIndex:aNumber RuleNum:aRuleNum];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil ) {
-        index = nil;
-        ruleNum = nil;
-    }
-    return (self);
-}
-
-- (id) initWithAnIndex:(NSNumber *)aNumber
-{
-    if ((self = [super initWithAnIndex:aNumber]) != nil ) {
-        ruleNum = nil;
-    }
-    return (self);
-}
-
-- (id) initWithAnIndex:(NSNumber *)aNumber RuleNum:(NSNumber *)aRuleNum
-{
-    if ((self = [super initWithAnIndex:aNumber]) != nil ) {
-        [aRuleNum retain];
-        ruleNum = aRuleNum;
-    }
-    return (self);
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRRuleMapElement *copy;
-    
-    copy = [super copyWithZone:aZone];
-    copy.ruleNum = ruleNum;
-    return( copy );
-}
-
-- (id)getRuleNum
-{
-    return ruleNum;
-}
-
-- (void)setRuleNum:(id)aRuleNum
-{
-    if ( aRuleNum != ruleNum ) {
-        if ( ruleNum ) [ruleNum release];
-        [aRuleNum retain];
-    }
-    ruleNum = aRuleNum;
-}
-
-- (NSInteger)size
-{
-    NSInteger aSize = 0;
-    if (ruleNum != nil) aSize++;
-    if (index != nil) aSize++;
-    return( aSize );
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMemo.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMemo.h
deleted file mode 100644
index ed95af6..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMemo.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-//  ANTLRRuleMemo.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRLinkBase.h"
-
-@interface ANTLRRuleMemo : ANTLRLinkBase {
-    NSNumber *startIndex;
-    NSNumber *stopIndex;
-}
-
-@property (retain, getter=getStartIndex, setter=setStartIndex:) NSNumber *startIndex;
-@property (retain, getter=getStopIndex, setter=setStopIndex:) NSNumber *stopIndex;
-
-+ (ANTLRRuleMemo *)newANTLRRuleMemo;
-+ (ANTLRRuleMemo *)newANTLRRuleMemoWithStartIndex:(NSNumber *)aStartIndex StopIndex:(NSNumber *)aStopIndex;
-
-- (id) init;
-- (id) initWithStartIndex:(NSNumber *)aStartIndex StopIndex:(NSNumber *)aStopIndex;
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-- (ANTLRRuleMemo *)getRuleWithStartIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStartIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStopIndex:(NSInteger)aStartIndex;
-- (NSNumber *)getStartIndex;
-- (void)setStartIndex:(NSNumber *)aStartIndex;
-- (NSNumber *)getStopIndex;
-- (void)setStopIndex:(NSNumber *)aStopIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMemo.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMemo.m
deleted file mode 100644
index 836d355..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleMemo.m
+++ /dev/null
@@ -1,158 +0,0 @@
-//
-//  ANTLRRuleMemo.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/16/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRRuleMemo.h"
-
-
-@implementation ANTLRRuleMemo
-
-@synthesize startIndex;
-@synthesize stopIndex;
-
-+ (ANTLRRuleMemo *)newANTLRRuleMemo
-{
-    return [[ANTLRRuleMemo alloc] init];
-}
-
-+ (ANTLRRuleMemo *)newANTLRRuleMemoWithStartIndex:(NSNumber *)anIndex StopIndex:(NSNumber *)aStopIndex
-{
-    return [[ANTLRRuleMemo alloc] initWithStartIndex:anIndex StopIndex:aStopIndex];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil ) {
-        startIndex = nil;
-        stopIndex = nil;
-    }
-    return (self);
-}
-
-- (id) initWithStartIndex:(NSNumber *)aStartIndex StopIndex:(NSNumber *)aStopIndex
-{
-    if ((self = [super init]) != nil ) {
-        [aStartIndex retain];
-        startIndex = aStartIndex;
-        [aStopIndex retain];
-        stopIndex = aStopIndex;
-    }
-    return (self);
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRRuleMemo *copy;
-    
-    copy = [super copyWithZone:aZone];
-    copy.startIndex = startIndex;
-    copy.stopIndex = stopIndex;
-    return( copy );
-}
-
-- (NSInteger)count
-{
-    NSInteger aCnt = 0;
-    
-    if (startIndex != nil) aCnt++;
-    if (stopIndex != nil) aCnt++;
-    return aCnt;
-}
-
-- (NSInteger) size
-{
-    return (2 * sizeof(id));
-}
-
-- (ANTLRRuleMemo *)getRuleWithStartIndex:(NSInteger)aStartIndex
-{
-    ANTLRRuleMemo *aMatchMemo = self;
-    do {
-        if (aStartIndex == [aMatchMemo.startIndex integerValue] ) {
-            return aMatchMemo;
-        }
-        aMatchMemo = aMatchMemo.fNext;
-    } while ( aMatchMemo != nil );
-    return nil;
-}
-
-- (NSNumber *)getStartIndex:(NSInteger)aStartIndex
-{
-    ANTLRRuleMemo *aMatchMemo = self;
-    do {
-        if (aStartIndex == [aMatchMemo.startIndex integerValue] ) {
-            return aMatchMemo.stopIndex;
-        }
-        aMatchMemo = aMatchMemo.fNext;
-    } while ( aMatchMemo != nil );
-    return nil;
-}
-
-- (NSNumber *)getStopIndex:(NSInteger)aStartIndex
-{
-    ANTLRRuleMemo *aMatchMemo = self;
-    do {
-        if (aStartIndex == [aMatchMemo.startIndex integerValue] ) {
-            return aMatchMemo.stopIndex;
-        }
-        aMatchMemo = aMatchMemo.fNext;
-    } while ( aMatchMemo != nil );
-    return nil;
-}
-
-- (NSNumber *)getStartIndex;
-{
-    return startIndex;
-}
-
-- (void)setStartIndex:(NSNumber *)aStartIndex
-{
-    if ( aStartIndex != startIndex ) {
-        if ( startIndex ) [startIndex release];
-        [aStartIndex retain];
-    }
-    startIndex = aStartIndex;
-}
-
-- (NSNumber *)getStopIndex;
-{
-    return stopIndex;
-}
-
-- (void)setStopIndex:(NSNumber *)aStopIndex
-{
-    if ( aStopIndex != stopIndex ) {
-        if ( stopIndex ) [stopIndex release];
-        [aStopIndex retain];
-    }
-    stopIndex = aStopIndex;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleReturnScope.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleReturnScope.h
deleted file mode 100644
index b9313ed..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleReturnScope.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//
-//  ANTLRRuleReturnScope.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-
-@interface ANTLRRuleReturnScope : NSObject <NSCopying> {
-
-}
-
-/** Return the start token or tree */
-- (id<ANTLRToken>) getStart;
-
-/** Return the stop token or tree */
-- (id<ANTLRToken>) getStop;
-
-/** Has a value potentially if output=AST; */
-- (id) getTree;
-
-/** Has a value potentially if output=template; Don't use StringTemplate
- *  type as it then causes a dependency with ST lib.
- */
-- (id) getTemplate;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleReturnScope.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleReturnScope.m
deleted file mode 100644
index 70878a3..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleReturnScope.m
+++ /dev/null
@@ -1,71 +0,0 @@
-//
-//  ANTLRRuleReturnScope.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRRuleReturnScope.h"
-
-
-@implementation ANTLRRuleReturnScope
-
-/** Return the start token or tree */
-- (id) getStart
-{
-    return nil;
-}
-
-/** Return the stop token or tree */
-- (id) getStop
-{
-    return nil;
-}
-
-/** Has a value potentially if output=AST; */
-- (id) getTree
-{
-    return nil;
-}
-
-/** Has a value potentially if output=template; Don't use StringTemplate
- *  type as it then causes a dependency with ST lib.
- */
-- (id) getTemplate
-{
-    return nil;
-}
-
-// create a copy, including the text if available
-// the input stream is *not* copied!
-- (id) copyWithZone:(NSZone *)theZone
-{
-    ANTLRRuleReturnScope *copy = [[[self class] allocWithZone:theZone] init];
-    return copy;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleStack.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleStack.h
deleted file mode 100644
index 12d450b..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleStack.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-//  ANTLRRuleStack.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseStack.h"
-#import "ANTLRHashRule.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRRuleStack : ANTLRBaseStack {
-}
-
-// Contruction/Destruction
-+(ANTLRRuleStack *)newANTLRRuleStack;
-+(ANTLRRuleStack *)newANTLRRuleStack:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-
-- (NSInteger)count;
-- (NSInteger)size;
-
-- (ANTLRHashRule *) pop;
-
-- (void) insertObject:(ANTLRHashRule *)aHashRule atIndex:(NSInteger)idx;
-- (ANTLRHashRule *)objectAtIndex:(NSInteger)idx;
-- (void)putHashRuleAtRuleIndex:(NSInteger)aRuleIndex StartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleStack.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleStack.m
deleted file mode 100644
index 39e93b2..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuleStack.m
+++ /dev/null
@@ -1,152 +0,0 @@
-//
-//  ANTLRRuleStack.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-extern NSInteger debug;
-
-#import "ANTLRRuleStack.h"
-#import "ANTLRTree.h"
-
-/*
- * Start of ANTLRRuleStack
- */
-@implementation ANTLRRuleStack
-
-+ (ANTLRRuleStack *)newANTLRRuleStack
-{
-    return [[ANTLRRuleStack alloc] init];
-}
-
-+ (ANTLRRuleStack *)newANTLRRuleStack:(NSInteger)cnt
-{
-    return [[ANTLRRuleStack alloc] initWithLen:cnt];
-}
-
-- (id)init
-{
-	if ((self = [super init]) != nil) {
-	}
-    return( self );
-}
-
-- (id)initWithLen:(NSInteger)cnt
-{
-	if ((self = [super initWithLen:cnt]) != nil) {
-	}
-    return( self );
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRRuleStack" );
-#endif
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    return [super copyWithZone:aZone];
-}
-
-- (NSInteger)count
-{
-    ANTLRRuleMemo *anElement;
-    NSInteger aCnt = 0;
-    for( int i = 0; i < BuffSize; i++ ) {
-        if ((anElement = ptrBuffer[i]) != nil)
-            aCnt++;
-    }
-    return aCnt;
-}
-
-- (NSInteger)size
-{
-    ANTLRRuleMemo *anElement;
-    NSInteger aSize = 0;
-    for( int i = 0; i < BuffSize; i++ ) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aSize++;
-        }
-    }
-    return aSize;
-}
-
-- (ANTLRHashRule *)pop
-{
-    return (ANTLRHashRule *)[super pop];
-}
-
-- (void) insertObject:(ANTLRHashRule *)aRule atIndex:(NSInteger)idx
-{
-    if ( idx >= BuffSize ) {
-        if ( debug > 2 ) NSLog( @"In ANTLRRuleStack attempting to insert aRule at Index %d, but Buffer is only %d long\n", idx, BuffSize );
-        [self ensureCapacity:idx];
-    }
-    if ( aRule != ptrBuffer[idx] ) {
-        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (ANTLRHashRule *)objectAtIndex:(NSInteger)idx
-{
-    if (idx < BuffSize) {
-        return ptrBuffer[idx];
-    }
-    return nil;
-}
-
-- (void)putHashRuleAtRuleIndex:(NSInteger)aRuleIndex StartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex
-{
-    ANTLRHashRule *aHashRule;
-    ANTLRRuleMemo *aRuleMemo;
-
-    if (aRuleIndex >= BuffSize) {
-        if ( debug) NSLog( @"putHashRuleAtRuleIndex attempting to insert aRule at Index %d, but Buffer is only %d long\n", aRuleIndex, BuffSize );
-        [self ensureCapacity:aRuleIndex];
-    }
-    if ((aHashRule = ptrBuffer[aRuleIndex]) == nil) {
-        aHashRule = [[ANTLRHashRule newANTLRHashRuleWithLen:17] retain];
-        ptrBuffer[aRuleIndex] = aHashRule;
-    }
-    if (( aRuleMemo = [aHashRule objectAtIndex:aStartIndex] ) == nil ) {
-        aRuleMemo = [[ANTLRRuleMemo newANTLRRuleMemo] retain];
-        [aHashRule insertObject:aRuleMemo atIndex:aStartIndex];
-    }
-    [aRuleMemo setStartIndex:[NSNumber numberWithInteger:aStartIndex]];
-    [aRuleMemo setStopIndex:[NSNumber numberWithInteger:aStopIndex]];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuntimeException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRRuntimeException.h
deleted file mode 100644
index 47834d2..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuntimeException.h
+++ /dev/null
@@ -1,111 +0,0 @@
-//
-//  ANTLRRuntimeException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/5/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-@interface ANTLRRuntimeException : NSException {
-}
-
-+ (ANTLRRuntimeException *) newException;
-+ (ANTLRRuntimeException *) newException:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-+ (ANTLRRuntimeException *) newException:(NSString *)aName reason:(NSString *)aReason;
-+ (ANTLRRuntimeException *) newException:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-- (id) init;
-- (id) init:(NSString *)aReason;
-- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason;
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-- (NSString *) Description;
-- (id) stackTrace:(NSException *)e;
-
-@end
-
-@interface ANTLRIllegalArgumentException : ANTLRRuntimeException {
-}
-
-+ (id) newException;
-+ (id) newException:(NSString *)aReason;
-+ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-- (id) init;
-- (id)init:(NSString *)aReason;
-- (id)init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-@end
-
-@interface ANTLRIllegalStateException : ANTLRRuntimeException {
-}
-
-+ (id) newException;
-+ (id) newException:(NSString *)aReason;
-+ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-- (id) init;
-- (id)init:(NSString *)aReason;
-- (id)init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-@end
-
-@interface ANTLRNoSuchElementException : ANTLRRuntimeException {
-}
-
-+ (id) newException;
-+ (id) newException:(NSString *)aReason;
-+ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-- (id) init;
-- (id) init:(NSString *)aReason;
-- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-@end
-
-@interface ANTLRRewriteEarlyExitException : ANTLRRuntimeException {
-}
-
-+ (id) newException;
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-
-@end
-
-@interface ANTLRUnsupportedOperationException : ANTLRRuntimeException {
-}
-
-+ (id) newException:(NSString *)aReason;
-
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason;
-- (id) initWithName:(NSString *)aMsg reason:(NSString *)aCause userInfo:(NSDictionary *)userInfo;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuntimeException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRRuntimeException.m
deleted file mode 100644
index 827fc43..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRRuntimeException.m
+++ /dev/null
@@ -1,279 +0,0 @@
-//
-//  ANTLRRuntimeException.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/5/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRRuntimeException.h"
-
-
-@implementation ANTLRRuntimeException
-
-+ (id) newException
-{
-    return [[ANTLRRuntimeException alloc] init];
-}
-
-+ (id) newException:(NSString *)aReason
-{
-    return [[ANTLRRuntimeException alloc] init:aReason];
-}
-
-+ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-    return [[ANTLRRuntimeException alloc] init:aReason userInfo:aUserInfo];
-}
-
-+ (id) newException:(NSString *)aName reason:(NSString *)aReason;
-{
-    return [[ANTLRRuntimeException alloc] initWithName:aName reason:aReason];
-}
-
-+ (id) newException:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
-{
-    return [[ANTLRRuntimeException alloc] initWithName:aName reason:aReason userInfo:aUserInfo];
-}
-
-
-- (id) init
-{
-    self = [super initWithName:@"ANTLRRuntimeException" reason:@"UnknownException" userInfo:nil];
-    return(self);
-}
-
-- (id) init:(NSString *)aReason
-{
-    self = [super initWithName:(NSString *)@"ANTLRRuntimeException" reason:(NSString *)aReason userInfo:(NSDictionary *)nil];
-    return(self);
-}
-
-- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-    self = [super initWithName:@"ANTLRRuntimeException" reason:aReason userInfo:aUserInfo];
-    return(self);
-}
-
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason
-{
-    self = [super initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)nil];
-    return(self);
-}
-
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-    self = [super initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo];
-    return(self);
-}
-
-- (NSString *) Description
-{
-    return [super reason];
-}
-
-- (id) stackTrace:(NSException *)e
-{
-    NSArray *addrs = [e callStackReturnAddresses];
-    NSArray *trace = [e callStackSymbols];
-    
-    for (NSString *traceStr in trace) {
-        NSLog( @"%@", traceStr);
-        // TODO: remove special after testing
-        if ([traceStr hasPrefix:@"main("] > 0)
-            return traceStr;
-        if (![traceStr hasPrefix:@"org.stringtemplate"])
-            return traceStr;
-    }
-    return trace;    
-}
-
-@end
-
-@implementation ANTLRIllegalArgumentException
-
-+ (id) newException
-{
-    return [[ANTLRIllegalArgumentException alloc] init];
-}
-
-+ (id) newException:(NSString *)aReason
-{
-    return [[ANTLRIllegalArgumentException alloc] init:aReason];
-}
-
-+ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-    return [[ANTLRIllegalArgumentException alloc] init:aReason userInfo:aUserInfo];
-}
-
-- (id) init
-{
-    self = [super initWithName:@"ANTLRIllegalArgumentException" reason:@"UnknownException" userInfo:nil];
-    return(self);
-}
-
-- (id) init:(NSString *)aReason
-{
-    self = [super initWithName:@"ANTLRIllegalArgumentException" reason:(NSString *)aReason userInfo:nil];
-    return(self);
-}
-
-- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-    self = [super initWithName:@"ANTLRIllegalArgumentException" reason:aReason userInfo:aUserInfo];
-    return(self);
-}
-
-@end
-
-@implementation ANTLRIllegalStateException
-
-+ (id) newException
-{
-    return [[ANTLRIllegalStateException alloc] init];
-}
-
-+ (id) newException:(NSString *)aReason
-{
-    return [[ANTLRIllegalStateException alloc] init:aReason];
-}
-
-+ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-    return [[ANTLRIllegalStateException alloc] init:aReason userInfo:aUserInfo];
-}
-
-- (id) init
-{
-    self = [super initWithName:@"ANTLRIllegalStateException" reason:@"UnknownException" userInfo:nil];
-    return(self);
-}
-
-- (id) init:(NSString *)aReason
-{
-    self = [super initWithName:@"ANTLRIllegalStateException" reason:(NSString *)aReason userInfo:nil];
-    return(self);
-}
-
-- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-    self = [super initWithName:@"ANTLRIllegalStateException" reason:aReason userInfo:aUserInfo];
-    return(self);
-}
-
-@end
-
-@implementation ANTLRNoSuchElementException
-
-+ (id) newException
-{
-    return [[ANTLRNoSuchElementException alloc] init];
-}
-
-+ (id) newException:(NSString *)aReason
-{
-    return [[ANTLRNoSuchElementException alloc] init:aReason];
-}
-
-+ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-    return [[ANTLRNoSuchElementException alloc] init:aReason userInfo:(NSDictionary *)aUserInfo];
-}
-
-- (id) init
-{
-    self = [super initWithName:@"ANTLRNoSuchElementException" reason:@"UnknownException" userInfo:nil];
-    return(self);
-}
-
-- (id) init:(NSString *)aReason
-{
-    self = [super initWithName:@"ANTLRNoSuchElementException" reason:(NSString *)aReason userInfo:(NSDictionary *)nil];
-    return(self);
-}
-
-- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-    self = [super initWithName:@"ANTLRNoSuchElementException" reason:aReason userInfo:aUserInfo];
-    return(self);
-}
-
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-    self = [super initWithName:aName reason:aReason userInfo:aUserInfo];
-    return(self);
-}
-
-@end
-
-@implementation ANTLRRewriteEarlyExitException
-
-+ (id) newException
-{
-	return [[self alloc] init];
-}
-
-- (id) init
-{
-	self = [super initWithName:@"RewriteEarlyExitException" reason:nil userInfo:nil];
-	return self;
-}
-
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
-{
-    self = [super initWithName:aName reason:aReason userInfo:aUserInfo];
-    return(self);
-}
-
-- (NSString *) description
-{
-	return [self name];
-}
-
-@end
-
-@implementation ANTLRUnsupportedOperationException
-
-+ (id) newException:(NSString *)aReason
-{
-    return [[ANTLRRuntimeException alloc] initWithName:@"Unsupported Operation Exception" reason:aReason userInfo:nil];
-}
-
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason
-{
-    self=[super initWithName:aName reason:aReason userInfo:nil];
-    return self;
-}
-
-- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)userInfo
-{
-    self=[super initWithName:aName reason:aReason userInfo:userInfo];
-    return self;
-}
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRStreamEnumerator.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRStreamEnumerator.h
deleted file mode 100644
index 9e102f2..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRStreamEnumerator.h
+++ /dev/null
@@ -1,48 +0,0 @@
-//
-//  ANTLRStreamEnumertor.h
-//  ANTLR
-//
-//  Created by Ian Michell on 29/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "AMutableArray.h"
-
-@interface ANTLRStreamEnumerator : NSEnumerator 
-{
-	NSInteger i;
-	id eof;
-	AMutableArray *nodes;
-}
-
--(id) initWithNodes:(AMutableArray *) n andEOF:(id) obj;
--(BOOL) hasNext;
-
-@property NSInteger i;
-@property (retain) id eof;
-@property (retain) AMutableArray *nodes;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRStreamEnumerator.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRStreamEnumerator.m
deleted file mode 100644
index 103646e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRStreamEnumerator.m
+++ /dev/null
@@ -1,77 +0,0 @@
-//
-//  ANTLRStreamEnumertor.m
-//  ANTLR
-//
-//  Created by Ian Michell on 29/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRStreamEnumerator.h"
-
-
-@implementation ANTLRStreamEnumerator
-
--(id) init
-{
-	self = [super init];
-	if (self)
-	{
-		i = 0;
-	}
-	return self;
-}
-
--(id) initWithNodes:(AMutableArray *) n andEOF:(id) obj
-{
-	self = [self init];
-	if (self)
-	{
-		nodes = n;
-		eof = obj;
-	}
-	return self;
-}
-
--(BOOL) hasNext
-{
-	return i < [nodes count];
-}
-
--(id) nextObject
-{
-	NSUInteger current = i;
-	i++;
-	if (current < [nodes count])
-	{
-		return [nodes objectAtIndex:current];
-	}
-	return eof;
-}
-
-@synthesize i;
-@synthesize eof;
-@synthesize nodes;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRStringStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRStringStream.h
deleted file mode 100644
index e004a15..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRStringStream.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCharStream.h"
-#import "ANTLRCharStreamState.h"
-#import "ANTLRPtrBuffer.h"
-
-@interface ANTLRStringStream : NSObject < ANTLRCharStream > {
-	NSString *data;
-	NSInteger n;
-	NSInteger index;
-	NSUInteger line;
-	NSUInteger charPositionInLine;
-	NSInteger markDepth;
-	ANTLRPtrBuffer *markers;
-	NSInteger lastMarker;
-	NSString *name;
-    ANTLRCharStreamState *charState;
-}
-
-+ newANTLRStringStream;
-
-+ newANTLRStringStream:(NSString *)aString;
-
-+ newANTLRStringStream:(char *)myData Count:(NSInteger)numBytes;
-
-- (id) init;
-
-// this initializer copies the string
-- (id) initWithString:(NSString *) theString;
-
-// This is the preferred constructor as no data is copied
-- (id) initWithStringNoCopy:(NSString *) theString;
-
-- (id) initWithData:(char *)myData Count:(NSInteger)numBytes;
-
-- (void) dealloc;
-
-- (id) copyWithZone:(NSZone *)aZone;
-
-// reset the stream's state, but keep the data to feed off
-- (void) reset;
-// consume one character from the stream
-- (void) consume;
-
-// look ahead i characters
-- (NSInteger) LA:(NSInteger) i;
-- (NSInteger) LT:(NSInteger) i;
-
-// total length of the input data
-- (NSInteger) size;
-
-// seek and rewind in the stream
-- (NSInteger) mark;
-- (void) rewind:(NSInteger) marker;
-- (void) rewind;
-- (void) release:(NSInteger) marker;
-- (void) seek:(NSInteger) index;
-
-// provide the streams data (e.g. for tokens using indices)
-- (NSString *) substring:(NSInteger)startIndex To:(NSInteger)stopIndex;
-- (NSString *) substringWithRange:(NSRange) theRange;
-
-- (ANTLRPtrBuffer *)getMarkers;
-- (void) setMarkers:(ANTLRPtrBuffer *)aMarkerList;
-
-- (NSString *)getSourceName;
-
-- (NSString *)toString;
-
-// accessors to the raw data of this stream
-
-@property (retain) NSString *data;
-@property (assign) NSInteger index;
-@property (assign) NSInteger n;
-@property (assign) NSUInteger line;
-@property (assign) NSUInteger charPositionInLine;
-@property (assign) NSInteger markDepth;
-@property (retain) ANTLRPtrBuffer *markers;
-@property (assign) NSInteger lastMarker;
-@property (retain) NSString *name;
-@property (retain) ANTLRCharStreamState *charState;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRStringStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRStringStream.m
deleted file mode 100644
index 1e1da4d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRStringStream.m
+++ /dev/null
@@ -1,387 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRStringStream.h"
-
-extern NSInteger debug;
-
-@implementation ANTLRStringStream
-
-@synthesize data;
-@synthesize n;
-@synthesize index;
-@synthesize line;
-@synthesize charPositionInLine;
-@synthesize markDepth;
-@synthesize markers;
-@synthesize lastMarker;
-@synthesize name;
-@synthesize charState;
-
-+ newANTLRStringStream
-{
-    return [[ANTLRStringStream alloc] init];
-}
-
-+ newANTLRStringStream:(NSString *)aString;
-{
-    return [[ANTLRStringStream alloc] initWithString:aString];
-}
-
-
-+ newANTLRStringStream:(char *)myData Count:(NSInteger)numBytes;
-{
-    return [[ANTLRStringStream alloc] initWithData:myData Count:numBytes];
-}
-
-
-- (id) init
-{
-	if ((self = [super init]) != nil) {
-        n = 0;
-        index = 0;
-        line = 1;
-        charPositionInLine = 0;
-        markDepth = 0;
-		markers = [ANTLRPtrBuffer newANTLRPtrBufferWithLen:10];
-        [markers retain];
-        [markers addObject:[NSNull null]]; // ANTLR generates code that assumes markers to be 1-based,
-        charState = [[ANTLRCharStreamState newANTLRCharStreamState] retain];
-	}
-	return self;
-}
-
-- (id) initWithString:(NSString *) theString
-{
-	if ((self = [super init]) != nil) {
-		//[self setData:[NSString stringWithString:theString]];
-        data = [theString retain];
-        n = [data length];
-        index = 0;
-        line = 1;
-        charPositionInLine = 0;
-        markDepth = 0;
-		markers = [[ANTLRPtrBuffer newANTLRPtrBufferWithLen:10] retain];
-        [markers addObject:[NSNull null]]; // ANTLR generates code that assumes markers to be 1-based,
-        charState = [[ANTLRCharStreamState newANTLRCharStreamState] retain];
-	}
-	return self;
-}
-
-- (id) initWithStringNoCopy:(NSString *) theString
-{
-	if ((self = [super init]) != nil) {
-		//[self setData:theString];
-        data = [theString retain];
-        n = [data length];
-        index = 0;
-        line = 1;
-        charPositionInLine = 0;
-        markDepth = 0;
-		markers = [ANTLRPtrBuffer newANTLRPtrBufferWithLen:100];
-        [markers retain];
-        [markers addObject:[NSNull null]]; // ANTLR generates code that assumes markers to be 1-based,
-        charState = [[ANTLRCharStreamState newANTLRCharStreamState] retain];
-	}
-	return self;
-}
-
-- (id) initWithData:(char *)myData Count:(NSInteger)numBytes
-{
-    if ((self = [super init]) != nil) {
-        data = [NSString stringWithCString:myData encoding:NSASCIIStringEncoding];
-        n = numBytes;
-        index = 0;
-        line = 1;
-        charPositionInLine = 0;
-        markDepth = 0;
-		markers = [ANTLRPtrBuffer newANTLRPtrBufferWithLen:100];
-        [markers retain];
-        [markers addObject:[NSNull null]]; // ANTLR generates code that assumes markers to be 1-based,
-        charState = [[ANTLRCharStreamState newANTLRCharStreamState] retain];
-    }
-    return( self );
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRStringStream" );
-#endif
-    if ( markers && [markers count] ) {
-        [markers removeAllObjects];
-        [markers release];
-        markers = nil;
-    }
-    if ( data ) {
-        [data release];
-        data = nil;
-    }
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    ANTLRStringStream *copy;
-	
-    copy = [[[self class] allocWithZone:aZone] init];
-    //    copy = [super copyWithZone:aZone]; // allocation occurs here
-    if ( data != nil )
-        copy.data = [self.data copyWithZone:aZone];
-    copy.n = n;
-    copy.index = index;
-    copy.line = line;
-    copy.charPositionInLine = charPositionInLine;
-    copy.markDepth = markDepth;
-    if ( markers != nil )
-        copy.markers = [markers copyWithZone:nil];
-    copy.lastMarker = lastMarker;
-    if ( name != nil )
-        copy.name = [self.name copyWithZone:aZone];
-    return copy;
-}
-
-// reset the streams charState
-// the streams content is not reset!
-- (void) reset
-{
-	index = 0;
-	line = 1;
-	charPositionInLine = 0;
-	markDepth = 0;
-    if ( markers && [markers count] )
-        [markers removeAllObjects];
-    [markers addObject:[NSNull null]];  // ANTLR generates code that assumes markers to be 1-based,
-                                        // thus the initial null in the array!
-}
-
-// read one character off the stream, tracking line numbers and character positions
-// automatically.
-// Override this in subclasses if you want to avoid the overhead of automatic line/pos
-// handling. Do not call super in that case.
-- (void) consume 
-{
-	if ( index < n ) {
-		charPositionInLine++;
-		if ( [data characterAtIndex:index] == '\n' ) {
-			line++;
-			charPositionInLine=0;
-		}
-		index++;
-	}
-}
-
-// implement the lookahead method used in lexers
-- (NSInteger) LA:(NSInteger) i 
-{
-    NSInteger c;
-    if ( i == 0 )
-        return 0; // undefined
-    if ( i < 0 ) {
-        i++;
-        if ( index+i-1 < 0 ) {
-		    return ANTLRCharStreamEOF;
-		}
-	}
-    if ( (index+i-1) >= n ) {
-		return ANTLRCharStreamEOF;
-	}
-    c = [data characterAtIndex:index+i-1];
-	return (NSInteger)c;
-}
-
-- (NSInteger) LT:(NSInteger)i
-{
-    return [self LA:i];
-}
-
-- (NSInteger) size 
-{
-	return n;
-}
-
-// push the current charState of the stream onto a stack
-// returns the depth of the stack, to be used as a marker to rewind the stream.
-// Note: markers are 1-based!
-- (NSInteger) mark 
-{
-    if (debug > 1) NSLog(@"mark entry -- markers=%x, markDepth=%d\n", (int)markers, markDepth);
-    if ( markers == nil ) {
-        markers = [ANTLRPtrBuffer newANTLRPtrBufferWithLen:100];
-		[markers addObject:[NSNull null]]; // ANTLR generates code that assumes markers to be 1-based,
-        markDepth = markers.ptr;
-    }
-    markDepth++;
-	ANTLRCharStreamState *State = nil;
-	if ( (markDepth) >= [markers count] ) {
-        if ( markDepth > 1 ) {
-            State = [ANTLRCharStreamState newANTLRCharStreamState];
-            [State retain];
-        }
-        if ( markDepth == 1 )
-            State = charState;
-		[markers insertObject:State atIndex:markDepth];
-        if (debug > 1) NSLog(@"mark save State %x at %d, index=%d, line=%d, charPositionInLine=%d\n", (NSUInteger)State, markDepth, State.index, State.line, State.charPositionInLine);
-	}
-	else {
-        if (debug > 1) NSLog(@"mark retrieve markers=%x markDepth=%d\n", (NSUInteger)markers, markDepth);
-        State = [markers objectAtIndex:markDepth];
-        [State retain];
-        State = (ANTLRCharStreamState *)[markers objectAtIndex:markDepth];
-        if (debug > 1) NSLog(@"mark retrieve charState %x from %d, index=%d, line=%d, charPositionInLine=%d\n", (NSUInteger)State, markDepth, State.index, State.line, State.charPositionInLine);
-	}
-    State.index = index;
-	State.line = line;
-	State.charPositionInLine = charPositionInLine;
-	lastMarker = markDepth;
-    if (debug > 1) NSLog(@"mark exit -- markers=%x, charState=%x, index=%d, line=%d, charPositionInLine=%d\n", (NSUInteger)markers, (NSUInteger)State, State.index, State.line, State.charPositionInLine);
-	return markDepth;
-}
-
-- (void) rewind:(NSInteger) marker 
-{
-    ANTLRCharStreamState *State;
-    if (debug > 1) NSLog(@"rewind entry -- markers=%x marker=%d\n", (NSUInteger)markers, marker);
-    if ( marker == 1 )
-        State = charState;
-    else
-        State = (ANTLRCharStreamState *)[markers objectAtIndex:marker];
-    if (debug > 1) NSLog(@"rewind entry -- marker=%d charState=%x, index=%d, line=%d, charPositionInLine=%d\n", marker, (NSUInteger)charState, charState.index, charState.line, charState.charPositionInLine);
-	// restore stream charState
-	[self seek:State.index];
-	line = State.line;
-	charPositionInLine = charState.charPositionInLine;
-	[self release:marker];
-    if (debug > 1) NSLog(@"rewind exit -- marker=%d charState=%x, index=%d, line=%d, charPositionInLine=%d\n", marker, (NSUInteger)charState, charState.index, charState.line, charState.charPositionInLine);
-}
-
-- (void) rewind
-{
-	[self rewind:lastMarker];
-}
-
-// remove stream states on top of 'marker' from the marker stack
-// returns the new markDepth of the stack.
-// Note: unfortunate naming for Objective-C, but to keep close to the Java target this is named release:
-- (void) release:(NSInteger) marker 
-{
-	// unwind any other markers made after marker and release marker
-	markDepth = marker;
-	markDepth--;
-    if (debug > 1) NSLog(@"release:marker= %d, markDepth = %d\n", marker, markDepth);
-}
-
-// when seeking forward we must handle character position and line numbers.
-// seeking backward already has the correct line information on the markers stack, 
-// so we just take it from there.
-- (void) seek:(NSInteger) anIndex 
-{
-    if (debug > 1) NSLog(@"seek entry -- seekIndex=%d index=%d\n", anIndex, index);
-	if ( anIndex <= index ) {
-		index = anIndex; // just jump; don't update stream charState (line, ...)
-        if (debug > 1) NSLog(@"seek exit return -- index=%d index=%d\n", anIndex, index);
-		return;
-	}
-	// seek forward, consume until index hits anIndex
-	while ( index < anIndex ) {
-		[self consume];
-	}
-    if (debug > 1) NSLog(@"seek exit end -- index=%d index=%d\n", anIndex, index);
-}
-
-// get a substring from our raw data.
-- (NSString *) substring:(NSInteger)startIndex To:(NSInteger)stopIndex 
-{
-    NSRange theRange = NSMakeRange(startIndex, stopIndex-startIndex);
-	return [data substringWithRange:theRange];
-}
-
-// get a substring from our raw data.
-- (NSString *) substringWithRange:(NSRange) theRange 
-{
-	return [data substringWithRange:theRange];
-}
-
-
-- (ANTLRPtrBuffer *)getMarkers
-{
-    return markers;
-}
-
-- (void) setMarkers:(ANTLRPtrBuffer *)aMarkerList
-{
-    markers = aMarkerList;
-}
-
-- (NSString *)getSourceName
-{
-    return name;
-}
-
-- (void) setSourceName:(NSString *)aName
-{
-    if ( name != aName ) {
-        if ( name ) [name release];
-        if ( aName ) [aName retain];
-        name = aName;
-    }
-}
-
-
-- (ANTLRCharStreamState *)getCharState
-{
-    return charState;
-}
-
-- (void) setCharState:(ANTLRCharStreamState *)aCharState
-{
-    charState = aCharState;
-}
-
-- (NSString *)toString
-{
-    return [NSString stringWithString:data];
-}
-
-//---------------------------------------------------------- 
-//  data 
-//---------------------------------------------------------- 
-- (NSString *) getData
-{
-    return data; 
-}
-
-- (void) setData: (NSString *) aData
-{
-    if (data != aData) {
-        if ( data ) [data release];
-        data = [NSString stringWithString:aData];
-        [data retain];
-    }
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRSymbolStack.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRSymbolStack.h
deleted file mode 100644
index 169df9f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRSymbolStack.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//
-//  ANTLRSymbolStack.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseStack.h"
-// #import "ANTLRSymbolScope.h"
-
-//#define GLOBAL_SCOPE       0
-//#define LOCAL_SCOPE        1
-#define HASHSIZE         101
-#define HBUFSIZE      0x2000
-
-@interface ANTLRSymbolsScope : NSObject
-{
-    
-}
-
-+ (ANTLRSymbolsScope *)newANTLRSymbolsScope;
-
-- (id)init;
-@end
-
-
-@interface ANTLRSymbolStack : ANTLRBaseStack {
-}
-
-// Contruction/Destruction
-+(ANTLRSymbolStack *)newANTLRSymbolStack;
-+(ANTLRSymbolStack *)newANTLRSymbolStackWithLen:(NSInteger)cnt;
--(id)init;
--(id)initWithLen:(NSInteger)cnt;
--(void)dealloc;
-
-// Instance Methods
-- (id) copyWithZone:(NSZone *)aZone;
-/* clear -- reinitialize the maplist array */
-
--(ANTLRSymbolsScope *)getHashMapEntry:(NSInteger)idx;
-
--(ANTLRSymbolsScope **)getHashMap;
-
--(ANTLRSymbolsScope *) pop;
-
-- (void) insertObject:(ANTLRSymbolsScope *)aScope atIndex:(NSInteger)idx;
-- (ANTLRSymbolsScope *)objectAtIndex:(NSInteger)idx;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRSymbolStack.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRSymbolStack.m
deleted file mode 100644
index 3c43ebb..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRSymbolStack.m
+++ /dev/null
@@ -1,126 +0,0 @@
-//
-//  ANTLRSymbolStack.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/9/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#define SUCCESS (0)
-#define FAILURE (-1)
-
-#import "ANTLRSymbolStack.h"
-#import "ANTLRTree.h"
-
-
-@implementation ANTLRSymbolsScope
-
-+ (ANTLRSymbolsScope *)newANTLRSymbolsScope
-{
-    return( [[ANTLRSymbolsScope alloc] init] );
-}
-
-- (id)init
-{
-    if ((self = [super init]) != nil) {
-    }
-    return (self);
-}
-
-@end
-
-/*
- * Start of ANTLRSymbolStack
- */
-@implementation ANTLRSymbolStack
-
-+(ANTLRSymbolStack *)newANTLRSymbolStack
-{
-    return [[ANTLRSymbolStack alloc] initWithLen:30];
-}
-
-+(ANTLRSymbolStack *)newANTLRSymbolStackWithLen:(NSInteger)cnt
-{
-    return [[ANTLRSymbolStack alloc] initWithLen:cnt];
-}
-
--(id)init
-{
-	if ((self = [super init]) != nil) {
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)cnt
-{
-	if ((self = [super initWithLen:cnt]) != nil) {
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRSymbolStack" );
-#endif
-	[super dealloc];
-}
-
-- (id) copyWithZone:(NSZone *)aZone
-{
-    return [super copyWithZone:aZone];
-}
-
--(ANTLRSymbolsScope *)getHashMapEntry:(NSInteger)idx
-{
-	return( (ANTLRSymbolsScope *)[super objectAtIndex:idx] );
-}
-
--(ANTLRSymbolsScope **)getHashMap
-{
-	return( (ANTLRSymbolsScope **)ptrBuffer );
-}
-
--(ANTLRSymbolsScope *) pop
-{
-    return (ANTLRSymbolsScope *)[super pop];
-}
-
-- (void) insertObject:(ANTLRSymbolsScope *)aRule atIndex:(NSInteger)idx
-{
-    if ( aRule != ptrBuffer[idx] ) {
-        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
-        [aRule retain];
-    }
-    ptrBuffer[idx] = aRule;
-}
-
-- (ANTLRSymbolsScope *)objectAtIndex:(NSInteger)idx
-{
-    return (ANTLRSymbolsScope *)[super objectAtIndex:idx];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRToken+DebuggerSupport.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRToken+DebuggerSupport.h
deleted file mode 100644
index 659e763..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRToken+DebuggerSupport.h
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-//  ANTLRToken+DebuggerSupport.h
-//  ANTLR
-//
-//  Created by Kay Röpke on 03.12.2006.
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRToken.h"
-#import "ANTLRCommonToken.h"
-
-@interface ANTLRCommonToken(DebuggerSupport)
-
-- (NSString *)debuggerDescription;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRToken+DebuggerSupport.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRToken+DebuggerSupport.m
deleted file mode 100644
index 114b236..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRToken+DebuggerSupport.m
+++ /dev/null
@@ -1,61 +0,0 @@
-//
-//  ANTLRToken+DebuggerSupport.m
-//  ANTLR
-//
-//  Created by Kay Röpke on 03.12.2006.
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRToken+DebuggerSupport.h"
-
-
-@implementation ANTLRCommonToken(DebuggerSupport)
-
-- (NSString *)debuggerDescription
-{
-	NSString *_text = self.text;
-	NSMutableString *escapedText;
-	if (_text) {
-		escapedText = [_text copyWithZone:nil];
-		NSRange wholeString = NSMakeRange(0,[escapedText length]);
-		[escapedText replaceOccurrencesOfString:@"%" withString:@"%25" options:0 range:wholeString];
-		[escapedText replaceOccurrencesOfString:@"\n" withString:@"%0A" options:0 range:wholeString];
-		[escapedText replaceOccurrencesOfString:@"\r" withString:@"%0D" options:0 range:wholeString];
-	} else {
-		escapedText = [NSMutableString stringWithString:@""];
-	}
-	// format is tokenIndex, type, channel, line, col, (escaped)text
-	return [NSString stringWithFormat:@"%u %d %u %u %u \"%@", 
-		[self getTokenIndex],
-		self.type,
-		self.channel,
-		self.line,
-		self.charPositionInLine,
-		escapedText
-		];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRToken.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRToken.h
deleted file mode 100644
index 04724bd..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRToken.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-
-#ifndef DEBUG_DEALLOC
-#define DEBUG_DEALLOC
-#endif
-
-typedef enum {
-    ANTLRTokenTypeEOF = -1,
-    ANTLRTokenTypeInvalid,
-    ANTLRTokenTypeEOR,
-    ANTLRTokenTypeDOWN,
-    ANTLRTokenTypeUP,
-    ANTLRTokenTypeMIN
-} ANTLRTokenType;
-
-typedef enum {
-    ANTLRTokenChannelDefault = 0,
-    ANTLRTokenChannelHidden = 99
-} ANTLRTokenChannel;
-
-#define HIDDEN 99
-
-@protocol ANTLRToken < NSObject, NSCopying >
-
-@property (retain, getter = text, setter = setText:) NSString *text;
-@property (assign) NSInteger type;
-@property (assign) NSUInteger line;
-@property (assign) NSUInteger charPositionInLine;
-
-// The singleton eofToken instance.
-+ (id<ANTLRToken>) eofToken;
-// The default channel for this class of Tokens
-+ (ANTLRTokenChannel) defaultChannel;
-
-// provide hooks to explicitely set the text as opposed to use the indices into the CharStream
-- (NSString *) text;
-- (void) setText:(NSString *)theText;
-
-- (NSInteger)type;
-- (void) setType: (NSInteger) aType;
-
-// ANTLR v3 provides automatic line and position tracking. Subclasses do not need to
-// override these, if they do not want to store line/pos tracking information
-- (NSUInteger)line;
-- (void) setLine: (NSUInteger) aLine;
-
-- (NSUInteger)charPositionInLine;
-- (void) setCharPositionInLine:(NSUInteger)aCharPositionInLine;
-
-// explicitely change the channel this Token is on. The default parser implementation
-// just sees the defaultChannel
-// Common idiom is to put whitespace tokens on channel 99.
-- (NSUInteger)channel;
-- (void) setChannel: (NSUInteger) aChannel;
-
-// the index of this Token into the TokenStream
-- (NSInteger) getTokenIndex;
-- (void) setTokenIndex: (NSInteger) aTokenIndex;
-- (NSString *)toString;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenRewriteStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenRewriteStream.h
deleted file mode 100644
index 7b97168..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenRewriteStream.h
+++ /dev/null
@@ -1,170 +0,0 @@
-//
-//  ANTLRTokenRewriteStream.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/19/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTokenStream.h"
-#import "ANTLRLinkBase.h"
-#import "ANTLRHashMap.h"
-#import "ANTLRMapElement.h"
-#import "ANTLRTokenSource.h"
-
-// Define the rewrite operation hierarchy
-
-@interface ANTLRRewriteOperation : ANTLRCommonTokenStream
-{
-/** What rwIndex into rewrites List are we? */
-NSInteger instructionIndex;
-/** Token buffer rwIndex. */
-NSInteger rwIndex;
-NSString *text;
-}
-
-@property (getter=getInstructionIndex, setter=setInstructionIndex:) NSInteger instructionIndex;
-@property (assign) NSInteger rwIndex;
-@property (retain, getter=text, setter=setText:) NSString *text;
-
-+ (ANTLRRewriteOperation *) newANTLRRewriteOperation:(NSInteger)anIndex Text:(NSString *)text;
-
-- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText;
-
-/** Execute the rewrite operation by possibly adding to the buffer.
- *  Return the rwIndex of the next token to operate on.
- */
-- (NSInteger) execute:(NSString *)buf;
-
-- (NSString *)toString;
-- (NSInteger) indexOf:(char)aChar inString:(NSString *)aString;
-@end
-
-@interface ANTLRInsertBeforeOp : ANTLRRewriteOperation {
-}
-
-+ (ANTLRInsertBeforeOp *) newANTLRInsertBeforeOp:(NSInteger)anIndex Text:(NSString *)theText;
-- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText;
-
-@end
-
-/** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
- *  instructions.
- */
-@interface ANTLRReplaceOp : ANTLRRewriteOperation {
-    NSInteger lastIndex;
-}
-
-@property (assign) NSInteger lastIndex;
-
-+ (ANTLRReplaceOp *) newANTLRReplaceOp:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString*)theText;
-- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-
-- (NSInteger) execute:(NSString *)buf;
-- (NSString *)toString;
-
-@end
-
-@interface ANTLRDeleteOp : ANTLRReplaceOp {
-}
-+ (ANTLRDeleteOp *) newANTLRDeleteOp:(NSInteger)from ToIndex:(NSInteger)to;
-
-- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to;
-
-- (NSString *)toString;
-
-@end
-
-
-@interface ANTLRTokenRewriteStream : ANTLRCommonTokenStream {
-/** You may have multiple, named streams of rewrite operations.
- *  I'm calling these things "programs."
- *  Maps String (name) -> rewrite (List)
- */
-ANTLRHashMap *programs;
-
-/** Map String (program name) -> Integer rwIndex */
-ANTLRHashMap *lastRewriteTokenIndexes;
-}
-
-@property (retain, getter=getPrograms, setter=setPrograms:) ANTLRHashMap *programs;
-@property (retain, getter=getLastRewriteTokenIndexes, setter=setLastRewriteTokenIndexes:) ANTLRHashMap *lastRewriteTokenIndexes;
-
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream;
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream:(id<ANTLRTokenSource>) aTokenSource;
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream:(id<ANTLRTokenSource>) aTokenSource Channel:(NSInteger)aChannel;
-
-- (id) init;
-- (id)initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource;
-- (id)initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource Channel:(NSInteger)aChannel;
-
-- (ANTLRHashMap *)getPrograms;
-- (void)setPrograms:(ANTLRHashMap *)aProgList;
-
-- (void) rollback:(NSInteger)instructionIndex;
-- (void) rollback:(NSString *)programName Index:(NSInteger)anInstructionIndex;
-- (void) deleteProgram;
-- (void) deleteProgram:(NSString *)programName;
-- (void) insertAfterToken:(id<ANTLRToken>)t Text:(NSString *)theText;
-- (void) insertAfterIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) insertAfterProgNam:(NSString *)programName Index:(NSInteger)anIndex Text:(NSString *)theText;
-
-
-- (void) insertBeforeToken:(id<ANTLRToken>)t Text:(NSString *)theText;
-- (void) insertBeforeIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) insertBeforeProgName:(NSString *)programName Index:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) replaceFromIndex:(NSInteger)anIndex Text:(NSString *)theText;
-- (void) replaceFromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-- (void) replaceFromToken:(id<ANTLRToken>)indexT Text:(NSString *)theText;
-- (void) replaceFromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to Text:(NSString *)theText;
-- (void) replaceProgNam:(NSString *)programName Token:(id<ANTLRToken>)from Token:(id<ANTLRToken>)to Text:(NSString *)theText;
-- (void) replaceProgNam:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
-- (void) delete:(NSInteger)anIndex;
-- (void) delete:(NSInteger)from ToIndex:(NSInteger)to;
-- (void) deleteToken:(id<ANTLRToken>)indexT;
-- (void) deleteFromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to;
-- (void) delete:(NSString *)programName FromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to;
-- (void) delete:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to;
-- (NSInteger)getLastRewriteTokenIndex;
-- (NSInteger)getLastRewriteTokenIndex:(NSString *)programName;
-- (void)setLastRewriteTokenIndex:(NSString *)programName Index:(NSInteger)anInt;
-- (ANTLRHashMap *) getProgram:(NSString *)name;
-- (ANTLRHashMap *) initializeProgram:(NSString *)name;
-- (NSString *)toOriginalString;
-- (NSString *)toOriginalString:(NSInteger)start End:(NSInteger)end;
-- (NSString *)toString;
-- (NSString *)toString:(NSString *)programName;
-- (NSString *)toStringFromStart:(NSInteger)start ToEnd:(NSInteger)end;
-- (NSString *)toString:(NSString *)programName FromStart:(NSInteger)start ToEnd:(NSInteger)end;
-- (ANTLRHashMap *)reduceToSingleOperationPerIndex:(ANTLRHashMap *)rewrites;
-- (ANTLRHashMap *)getKindOfOps:(ANTLRHashMap *)rewrites KindOfClass:(Class)kind;
-- (ANTLRHashMap *)getKindOfOps:(ANTLRHashMap *)rewrites KindOfClass:(Class)kind Index:(NSInteger)before;
-- (NSString *)catOpText:(id)a PrevText:(id)b;
-- (NSMutableString *)toDebugString;
-- (NSMutableString *)toDebugStringFromStart:(NSInteger)start ToEnd:(NSInteger)end;
-                    
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenRewriteStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenRewriteStream.m
deleted file mode 100644
index e94f885..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenRewriteStream.m
+++ /dev/null
@@ -1,692 +0,0 @@
-//
-//  ANTLRTokenRewriteStream.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/19/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTokenRewriteStream.h"
-#import "ANTLRRuntimeException.h"
-
-static NSString *DEFAULT_PROGRAM_NAME = @"default";
-static NSInteger PROGRAM_INIT_SIZE = 100;
-static NSInteger MIN_TOKEN_INDEX = 0;
-
-extern NSInteger debug;
-
-// Define the rewrite operation hierarchy
-
-@implementation ANTLRRewriteOperation
-
-@synthesize instructionIndex;
-@synthesize rwIndex;
-@synthesize text;
-
-+ (ANTLRRewriteOperation *) newANTLRRewriteOperation:(NSInteger)anIndex Text:(NSString *)theText
-{
-    return [[ANTLRRewriteOperation alloc] initWithIndex:anIndex Text:theText];
-}
-    
-- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText
-{
-    if ((self = [super init]) != nil) {
-        rwIndex = anIndex;
-        text = theText;
-    }
-    return self;
-}
-
-/** Execute the rewrite operation by possibly adding to the buffer.
- *  Return the rwIndex of the next token to operate on.
- */
-- (NSInteger) execute:(NSString *)buf
-{
-    return rwIndex;
-}
-    
-- (NSString *)toString
-{
-    NSString *opName = [self className];
-    int $index = [self indexOf:'$' inString:opName];
-    opName = [opName substringWithRange:NSMakeRange($index+1, [opName length])];
-    return [NSString stringWithFormat:@"<%@%d:\"%@\">", opName, rwIndex, opName];			
-}
-
-- (NSInteger) indexOf:(char)aChar inString:(NSString *)aString
-{
-    char indexedChar;
-
-    for( int i = 0; i < [aString length]; i++ ) {
-        indexedChar = [aString characterAtIndex:i];
-        if (indexedChar == aChar) {
-            return i;
-        }
-    }
-    return -1;
-}
-                                                    
-@end
-
-@implementation ANTLRInsertBeforeOp
-
-+ (ANTLRInsertBeforeOp *) newANTLRInsertBeforeOp:(NSInteger) anIndex Text:(NSString *)theText
-{
-    return [[ANTLRInsertBeforeOp alloc] initWithIndex:anIndex Text:theText];
-}
-
-- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText
-{
-    if ((self = [super initWithIndex:anIndex Text:theText]) != nil) {
-        rwIndex = anIndex;
-        text = theText;
-    }
-    return self;
-}
-
-
-- (NSInteger) execute:(NSMutableString *)buf
-{
-    [buf appendString:text];
-    if ( ((ANTLRCommonToken *)[tokens objectAtIndex:rwIndex]).type != ANTLRTokenTypeEOF ) {
-        [buf appendString:[[tokens objectAtIndex:rwIndex] text]];
-    }
-    return rwIndex+1;
-}
-
-@end
-     
-/** I'm going to try replacing range from x..y with (y-x)+1 ANTLRReplaceOp
- *  instructions.
- */
-@implementation ANTLRReplaceOp
-
-@synthesize lastIndex;
-
-+ (ANTLRReplaceOp *) newANTLRReplaceOp:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString*)theText
-{
-    return [[ANTLRReplaceOp alloc] initWithIndex:from ToIndex:to Text:theText];
-}
-
-- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText
-{
-    if ((self = [super initWithIndex:from Text:theText]) != nil) {
-        lastIndex = to;
-    }
-    return self;
-}
- 
- 
-- (NSInteger) execute:(NSMutableString *)buf
-{
-    if ( text!=nil ) {
-        [buf appendString:text];
-    }
-        return lastIndex+1;
-}
-
-- (NSString *)toString
-{
-    return [NSString stringWithFormat:@"<ANTLRReplaceOp@ %d..%d :>%@\n", rwIndex, lastIndex, text];
-}
-
-@end
-
-@implementation ANTLRDeleteOp
-
-+ (ANTLRDeleteOp *) newANTLRDeleteOp:(NSInteger)from ToIndex:(NSInteger)to
-{
-    // super(from To:to, null);
-    return [[ANTLRDeleteOp alloc] initWithIndex:from ToIndex:to];
-}
-
- - (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to
-{
-    if ((self = [super initWithIndex:from ToIndex:to Text:nil]) != nil) {
-        lastIndex = to;
-    }
-    return self;
-}
-     
-- (NSString *)toString
-{
-    return [NSString stringWithFormat:@"<DeleteOp@ %d..%d\n",  rwIndex, lastIndex];
-}
-
-@end
-
-
-@implementation ANTLRTokenRewriteStream
-
-@synthesize programs;
-@synthesize lastRewriteTokenIndexes;
-
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream
-{
-    return [[ANTLRTokenRewriteStream alloc] init];
-}
-
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream:(id<ANTLRTokenSource>) aTokenSource
-{
-    return [[ANTLRTokenRewriteStream alloc] initWithTokenSource:aTokenSource];
-}
-
-+ (ANTLRTokenRewriteStream *)newANTLRTokenRewriteStream:(id<ANTLRTokenSource>) aTokenSource Channel:(NSInteger)aChannel
-{
-    return [[ANTLRTokenRewriteStream alloc] initWithTokenSource:aTokenSource Channel:aChannel];
-}
- 
-- (id) init
-{
-    if ((self = [super init]) != nil) {
-        programs = [ANTLRHashMap newANTLRHashMap];
-        [programs addObject:[ANTLRMapElement newANTLRMapElementWithName:DEFAULT_PROGRAM_NAME Node:[ANTLRHashMap newANTLRHashMapWithLen:PROGRAM_INIT_SIZE]]];
-        lastRewriteTokenIndexes = [ANTLRHashMap newANTLRHashMap];
-    }
-    return self;
-}
- 
-- (id)initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource
-{
-    if ((self = [super init]) != nil) {
-        programs = [ANTLRHashMap newANTLRHashMap];
-        [programs addObject:[ANTLRMapElement newANTLRMapElementWithName:DEFAULT_PROGRAM_NAME Node:[ANTLRHashMap newANTLRHashMapWithLen:PROGRAM_INIT_SIZE]]];
-        lastRewriteTokenIndexes = [ANTLRHashMap newANTLRHashMap];
-        tokenSource = aTokenSource;
-    }
-    return self;
-}
-
-- (id)initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource Channel:(NSInteger)aChannel
-{
-    if ((self = [super init]) != nil) {
-        programs = [ANTLRHashMap newANTLRHashMap];
-        [programs addObject:[ANTLRMapElement newANTLRMapElementWithName:DEFAULT_PROGRAM_NAME Node:[ANTLRHashMap newANTLRHashMapWithLen:PROGRAM_INIT_SIZE]]];
-        lastRewriteTokenIndexes = [ANTLRHashMap newANTLRHashMap];
-        tokenSource = aTokenSource;
-        channel = aChannel;
-    }
-    return self;
-}
- 
-- (ANTLRHashMap *)getPrograms
-{
-    return programs;
-}
- 
-- (void)setPrograms:(ANTLRHashMap *)aProgList
-{
-    programs = aProgList;
-}
-
-- (void) rollback:(NSInteger)instructionIndex
-{
-    [self rollback:DEFAULT_PROGRAM_NAME Index:instructionIndex];
-}
-
-/** Rollback the instruction stream for a program so that
- *  the indicated instruction (via instructionIndex) is no
- *  longer in the stream.  UNTESTED!
- */
-- (void) rollback:(NSString *)programName Index:(NSInteger)anInstructionIndex
-{
-    id object;
-    ANTLRHashMap *is;
-
-    //    AMutableArray *is = [programs get(programName)];
-    is = [self getPrograms];
-    object = [is getName:programName];
-    if ( is != nil ) {
-#pragma warning this has to be fixed
-        [programs insertObject:programName  atIndex:anInstructionIndex];
-    }
-}
-
-- (void) deleteProgram
-{
-    [self deleteProgram:DEFAULT_PROGRAM_NAME];
-}
-
-/** Reset the program so that no instructions exist */
-- (void) deleteProgram:(NSString *)programName
-{
-    [self rollback:programName Index:MIN_TOKEN_INDEX];
-}
-
-- (void) insertAfterToken:(id<ANTLRToken>)t Text:(NSString *)theText
-{
-    [self insertAfterProgNam:DEFAULT_PROGRAM_NAME Index:[t getTokenIndex] Text:theText];
-}
-
-- (void) insertAfterIndex:(NSInteger)anIndex Text:(NSString *)theText
-{
-    [self insertAfterProgNam:DEFAULT_PROGRAM_NAME Index:(NSInteger)anIndex Text:(NSString *)theText];
-}
-
-- (void) insertAfterProgNam:(NSString *)programName Index:(NSInteger)anIndex Text:(NSString *)theText
-{
-    // to insert after, just insert before next rwIndex (even if past end)
-    [self insertBeforeProgName:programName Index:anIndex+1 Text:theText];
-    //addToSortedRewriteList(programName, new InsertAfterOp(rwIndex,text));
-}
-
-
-
-
-
-
-
-
-
-- (void) insertBeforeToken:(id<ANTLRToken>)t Text:(NSString *)theText
-{
-    [self insertBeforeProgName:DEFAULT_PROGRAM_NAME Index:[t getTokenIndex] Text:theText];
-}
-
-- (void) insertBeforeIndex:(NSInteger)anIndex Text:(NSString *)theText
-{
-    [self insertBeforeProgName:DEFAULT_PROGRAM_NAME Index:anIndex Text:theText];
-}
-
-- (void) insertBeforeProgName:(NSString *)programName Index:(NSInteger)rwIndex Text:(NSString *)theText
-{
-    //addToSortedRewriteList(programName, new ANTLRInsertBeforeOp(rwIndex,text));
-    ANTLRRewriteOperation *op = [ANTLRInsertBeforeOp newANTLRInsertBeforeOp:rwIndex Text:theText];
-    ANTLRHashMap *rewrites = [self getProgram:programName];
-    op.instructionIndex = [rewrites count];
-    [rewrites addObject:op];		
-}
-
-- (void) replaceFromIndex:(NSInteger)anIndex Text:(NSString *)theText
-{
-    [self replaceProgNam:DEFAULT_PROGRAM_NAME FromIndex:anIndex ToIndex:anIndex Text:theText];
-}
-
-- (void) replaceFromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText
-{
-    [self replaceProgNam:DEFAULT_PROGRAM_NAME FromIndex:from ToIndex:to Text:theText];
-}
-
-- (void) replaceFromToken:(id<ANTLRToken>)anIndexT Text:(NSString *)theText
-{
-    [self replaceProgNam:DEFAULT_PROGRAM_NAME FromIndex:[anIndexT getTokenIndex] ToIndex:[anIndexT getTokenIndex] Text:theText];
-}
-
-- (void) replaceFromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to Text:(NSString *)theText
-{
-    [self replaceProgNam:DEFAULT_PROGRAM_NAME FromIndex:[from getTokenIndex] ToIndex:[to getTokenIndex] Text:theText];
-}
-
-- (void) replaceProgNam:(NSString *)programName Token:(id<ANTLRToken>)from Token:(id<ANTLRToken>)to Text:(NSString *)theText
-{
-    [self replaceProgNam:programName FromIndex:[from getTokenIndex] ToIndex:[to getTokenIndex] Text:theText];
-}
-                         
-- (void) replaceProgNam:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText
-{
-    if ( from > to || from < 0 || to < 0 || to >= [tokens count] ) {
-        @throw [ANTLRIllegalArgumentException newException:[NSString stringWithFormat:@"replace: range invalid: %d..%d size=%d\n", from, to, [tokens count]]];
-    }
-    ANTLRRewriteOperation *op = [ANTLRReplaceOp newANTLRReplaceOp:from ToIndex:to Text:theText];
-    ANTLRHashMap *rewrites = (ANTLRHashMap *)[lastRewriteTokenIndexes getName:programName];
-    op.instructionIndex = [rewrites count];
-    [rewrites addObject:op];
-}
-
-- (void) delete:(NSInteger)anIndex
-{
-    [self delete:DEFAULT_PROGRAM_NAME  FromIndex:(NSInteger)anIndex  ToIndex:(NSInteger)anIndex];
-}
-
-- (void) delete:(NSInteger)from ToIndex:(NSInteger)to
-{
-    [self delete:DEFAULT_PROGRAM_NAME FromIndex:from ToIndex:to];
-}
-
-- (void) deleteToken:(id<ANTLRToken>)anIndexT
-{
-    [self delete:DEFAULT_PROGRAM_NAME FromIndex:[anIndexT getTokenIndex] ToIndex:[anIndexT getTokenIndex]];
-}
-
-- (void) deleteFromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to
-{
-    [self delete:DEFAULT_PROGRAM_NAME FromIndex:[from getTokenIndex] ToIndex:[to getTokenIndex]];
-}
-
-- (void) delete:(NSString *)programName FromToken:(id<ANTLRToken>)from ToToken:(id<ANTLRToken>)to
-{
-    [self replaceProgNam:programName FromIndex:[from getTokenIndex] ToIndex:[to getTokenIndex] Text:nil];
-}
-
-- (void) delete:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to
-{
-    [self replaceProgNam:programName FromIndex:from ToIndex:to Text:nil];
-}
-
-- (NSInteger)getLastRewriteTokenIndex
-{
-    return [self getLastRewriteTokenIndex:DEFAULT_PROGRAM_NAME];
-}
-
-- (NSInteger)getLastRewriteTokenIndex:(NSString *)programName
-{
-#pragma warning fix this to look up the hashed name
-    NSInteger anInt = -1;
-    ANTLRMapElement *node = [lastRewriteTokenIndexes lookup:programName Scope:0];
-    if ( node != nil ) {
-        anInt = [lastRewriteTokenIndexes hash:programName];
-    }
-    return anInt;
-}
-
-- (void)setLastRewriteTokenIndex:(NSString *)programName Index:(NSInteger)anInt
-{
-    [lastRewriteTokenIndexes insertObject:programName atIndex:anInt];
-}
-
--(ANTLRHashMap *) getProgram:(NSString *)name
-{
-   ANTLRHashMap *is = (ANTLRHashMap *)[programs getName:name];
-    if ( is == nil ) {
-        is = [self initializeProgram:name];
-    }
-    return is;
-}
-
--(ANTLRHashMap *) initializeProgram:(NSString *)name
-{
-    ANTLRHashMap *is = [ANTLRHashMap newANTLRHashMapWithLen:PROGRAM_INIT_SIZE];
-    [is putName:name Node:nil];
-    return is;
-}
-
-- (NSString *)toOriginalString
-{
-    [super fill];
-    return [self toOriginalString:MIN_TOKEN_INDEX End:[tokens count]-1];
-}
-
-- (NSString *)toOriginalString:(NSInteger)start End:(NSInteger)end
-{
-    NSMutableString *buf = [NSMutableString stringWithCapacity:100];
-    for (int i = start; i >= MIN_TOKEN_INDEX && i <= end && i< [tokens count]; i++) {
-        if ( [[lastRewriteTokenIndexes objectAtIndex:i] type] != ANTLRTokenTypeEOF )
-            [buf appendString:[[tokens objectAtIndex:i] text]];
-    }
-    return [NSString stringWithString:buf];
-}
-
-- (NSString *)toString
-{
-    [super fill];
-    return [self toStringFromStart:MIN_TOKEN_INDEX ToEnd:[tokens count]-1];
-}
-
-- (NSString *)toString:(NSString *)programName
-{
-    [super fill];
-    return [self toString:programName FromStart:MIN_TOKEN_INDEX ToEnd:[[programs objectAtIndex:MIN_TOKEN_INDEX] count]-1];
-}
-
-- (NSString *)toStringFromStart:(NSInteger)start ToEnd:(NSInteger)end
-{
-    return [self toString:DEFAULT_PROGRAM_NAME FromStart:start ToEnd:end];
-}
-
-- (NSString *)toString:(NSString *)programName FromStart:(NSInteger)start ToEnd:(NSInteger)end
-{
-    ANTLRHashMap *rewrites = (ANTLRHashMap *)[programs getName:programName];
-    
-    // ensure start/end are in range
-    if ( end > [tokens count]-1 ) end = [tokens count]-1;
-    if ( start < 0 )
-        start = 0;
-    
-    if ( rewrites == nil || [rewrites count] == 0 ) {
-        return [self toOriginalString:start End:end]; // no instructions to execute
-    }
-    NSMutableString *buf = [NSMutableString stringWithCapacity:100];
-    
-    // First, optimize instruction stream
-    ANTLRHashMap *indexToOp = [self reduceToSingleOperationPerIndex:rewrites];
-    
-    // Walk buffer, executing instructions and emitting tokens
-    int i = start;
-    while ( i <= end && i < [tokens count] ) {
-        ANTLRRewriteOperation *op = (ANTLRRewriteOperation *)[indexToOp objectAtIndex:i];
-        [indexToOp setObject:nil atIndex:i]; // remove so any left have rwIndex size-1
-        id<ANTLRToken>t = (id<ANTLRToken>) [tokens objectAtIndex:i];
-        if ( op == nil ) {
-            // no operation at that rwIndex, just dump token
-            if ( t.type != ANTLRTokenTypeEOF )
-                [buf appendString:t.text];
-            i++; // move to next token
-        }
-        else {
-            i = [op execute:buf]; // execute operation and skip
-        }
-    }
-    
-    // include stuff after end if it's last rwIndex in buffer
-    // So, if they did an insertAfter(lastValidIndex, "foo"), include
-    // foo if end==lastValidIndex.
-    //if ( end == [tokens size]-1 ) {
-    if ( end == [tokens count]-1 ) {
-        // Scan any remaining operations after last token
-        // should be included (they will be inserts).
-        int i2 = 0;
-        while ( i2 < [indexToOp count] - 1 ) {
-            ANTLRRewriteOperation *op = [indexToOp objectAtIndex:i2];
-            if ( op.rwIndex >= [tokens count]-1 ) {
-                [buf appendString:op.text];
-            }
-        }
-    }
-    return [NSString stringWithString:buf];
-}
-
-/** We need to combine operations and report invalid operations (like
- *  overlapping replaces that are not completed nested).  Inserts to
- *  same rwIndex need to be combined etc...   Here are the cases:
- *
- *  I.i.u I.j.v								leave alone, nonoverlapping
- *  I.i.u I.i.v								combine: Iivu
- *
- *  R.i-j.u R.x-y.v	| i-j in x-y			delete first R
- *  R.i-j.u R.i-j.v							delete first R
- *  R.i-j.u R.x-y.v	| x-y in i-j			ERROR
- *  R.i-j.u R.x-y.v	| boundaries overlap	ERROR
- *
- *  I.i.u R.x-y.v | i in x-y				delete I
- *  I.i.u R.x-y.v | i not in x-y			leave alone, nonoverlapping
- *  R.x-y.v I.i.u | i in x-y				ERROR
- *  R.x-y.v I.x.u 							R.x-y.uv (combine, delete I)
- *  R.x-y.v I.i.u | i not in x-y			leave alone, nonoverlapping
- *
- *  I.i.u = insert u before op @ rwIndex i
- *  R.x-y.u = replace x-y indexed tokens with u
- *
- *  First we need to examine replaces.  For any replace op:
- *
- * 		1. wipe out any insertions before op within that range.
- *		2. Drop any replace op before that is contained completely within
- *         that range.
- *		3. Throw exception upon boundary overlap with any previous replace.
- *
- *  Then we can deal with inserts:
- *
- * 		1. for any inserts to same rwIndex, combine even if not adjacent.
- * 		2. for any prior replace with same left boundary, combine this
- *         insert with replace and delete this replace.
- * 		3. throw exception if rwIndex in same range as previous replace
- *
- *  Don't actually delete; make op null in list. Easier to walk list.
- *  Later we can throw as we add to rwIndex -> op map.
- *
- *  Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
- *  inserted stuff would be before the replace range.  But, if you
- *  add tokens in front of a method body '{' and then delete the method
- *  body, I think the stuff before the '{' you added should disappear too.
- *
- *  Return a map from token rwIndex to operation.
- */
-- (ANTLRHashMap *)reduceToSingleOperationPerIndex:(ANTLRHashMap *)rewrites
-{
-    //System.out.println("rewrites="+rewrites);
-    if (debug > 1) NSLog(@"rewrites=%@\n", [rewrites getName:DEFAULT_PROGRAM_NAME]);
-    // WALK REPLACES
-    for (int i = 0; i < [rewrites count]; i++) {
-        ANTLRRewriteOperation *op = (ANTLRRewriteOperation *)[rewrites objectAtIndex:i];
-        if ( op==nil )
-            continue;
-        if ( !([[op class] isKindOfClass:[ANTLRReplaceOp class]]) )
-            continue;
-        ANTLRReplaceOp *rop = (ANTLRReplaceOp *)[rewrites objectAtIndex:i];
-        // Wipe prior inserts within range
-        //List inserts = getKindOfOps(rewrites, ANTLRInsertBeforeOp.class, i);
-        ANTLRHashMap *inserts = [self getKindOfOps:rewrites KindOfClass:[ANTLRInsertBeforeOp class] Index:i];
-        for (int j = 0; j < [inserts size]; j++) {
-            ANTLRInsertBeforeOp *iop = (ANTLRInsertBeforeOp *)[inserts objectAtIndex:j];
-            if ( iop.rwIndex >= rop.rwIndex && iop.rwIndex <= rop.lastIndex ) {
-                // delete insert as it's a no-op.
-                [rewrites insertObject:nil atIndex:iop.instructionIndex];
-            }
-        }
-        // Drop any prior replaces contained within
-        ANTLRHashMap *prevReplaces = [self getKindOfOps:rewrites KindOfClass:[ANTLRReplaceOp class] Index:i];
-        for (int j = 0; j < [prevReplaces count]; j++) {
-            ANTLRReplaceOp *prevRop = (ANTLRReplaceOp *) [prevReplaces objectAtIndex:j];
-            if ( prevRop.rwIndex>=rop.rwIndex && prevRop.lastIndex <= rop.lastIndex ) {
-                // delete replace as it's a no-op.
-                [rewrites setObject:nil atIndex:prevRop.instructionIndex];
-                continue;
-            }
-            // throw exception unless disjoint or identical
-            BOOL disjoint = prevRop.lastIndex<rop.rwIndex || prevRop.rwIndex > rop.lastIndex;
-            BOOL same = prevRop.rwIndex==rop.rwIndex && prevRop.lastIndex==rop.lastIndex;
-            if ( !disjoint && !same ) {
-                @throw [ANTLRIllegalArgumentException newException:
-                        [NSString stringWithFormat:@"replace op boundaries of %@, overlap with previous %@\n", rop, prevRop]];
-            }
-        }
-    }
-    
-    // WALK INSERTS
-    for (int i = 0; i < [rewrites count]; i++) {
-        ANTLRRewriteOperation *op = (ANTLRRewriteOperation *)[rewrites objectAtIndex:i];
-        if ( op == nil )
-            continue;
-        if ( !([[op class] isKindOfClass:[ANTLRInsertBeforeOp class]]) )
-            continue;
-        ANTLRInsertBeforeOp *iop = (ANTLRInsertBeforeOp *)[rewrites objectAtIndex:i];
-        // combine current insert with prior if any at same rwIndex
-        ANTLRHashMap *prevInserts = (ANTLRHashMap *)[self getKindOfOps:rewrites KindOfClass:[ANTLRInsertBeforeOp class] Index:i];
-        for (int j = 0; j < [prevInserts count]; j++) {
-            ANTLRInsertBeforeOp *prevIop = (ANTLRInsertBeforeOp *) [prevInserts objectAtIndex:j];
-            if ( prevIop.rwIndex == iop.rwIndex ) { // combine objects
-                                                // convert to strings...we're in process of toString'ing
-                                                // whole token buffer so no lazy eval issue with any templates
-                iop.text = [self catOpText:iop.text PrevText:prevIop.text];
-                // delete redundant prior insert
-                [rewrites setObject:nil atIndex:prevIop.instructionIndex];
-            }
-        }
-        // look for replaces where iop.rwIndex is in range; error
-        ANTLRHashMap *prevReplaces = (ANTLRHashMap *)[self getKindOfOps:rewrites KindOfClass:[ANTLRReplaceOp class] Index:i];
-        for (int j = 0; j < [prevReplaces count]; j++) {
-            ANTLRReplaceOp *rop = (ANTLRReplaceOp *) [prevReplaces objectAtIndex:j];
-            if ( iop.rwIndex == rop.rwIndex ) {
-                rop.text = [self catOpText:iop.text PrevText:rop.text];
-                [rewrites setObject:nil atIndex:i];  // delete current insert
-                continue;
-            }
-            if ( iop.rwIndex >= rop.rwIndex && iop.rwIndex <= rop.lastIndex ) {
-                @throw [ANTLRIllegalArgumentException newException:[NSString stringWithFormat:@"insert op %d within boundaries of previous %d", iop, rop]];
-            }
-        }
-    }
-    // System.out.println("rewrites after="+rewrites);
-    ANTLRHashMap *m = [ANTLRHashMap newANTLRHashMapWithLen:15];
-    for (int i = 0; i < [rewrites count]; i++) {
-        ANTLRRewriteOperation *op = (ANTLRRewriteOperation *)[rewrites objectAtIndex:i];
-        if ( op == nil )
-            continue; // ignore deleted ops
-        if ( [m objectAtIndex:op.rwIndex] != nil ) {
-            @throw [ANTLRRuntimeException newException:@"should only be one op per rwIndex\n"];
-        }
-        //[m put(new Integer(op.rwIndex), op);
-        [m setObject:op atIndex:op.rwIndex];
-    }
-    //System.out.println("rwIndex to op: "+m);
-    if (debug > 1) NSLog(@"rwIndex to  op %d\n", (NSInteger)m);
-    return m;
-}
-
-- (NSString *)catOpText:(id)a PrevText:(id)b
-{
-    NSString *x = @"";
-    NSString *y = @"";
-    if ( a != nil )
-        x = [a toString];
-    if ( b != nil )
-        y = [b toString];
-    return [NSString stringWithFormat:@"%@%@",x, y];
-}
-
-- (ANTLRHashMap *)getKindOfOps:(ANTLRHashMap *)rewrites KindOfClass:(Class)kind
-{
-    return [self getKindOfOps:rewrites KindOfClass:kind Index:[rewrites count]];
-}
-
-/** Get all operations before an rwIndex of a particular kind */
-- (ANTLRHashMap *)getKindOfOps:(ANTLRHashMap *)rewrites KindOfClass:(Class)kind Index:(NSInteger)before
-{
-    ANTLRHashMap *ops = [ANTLRHashMap newANTLRHashMapWithLen:15];
-    for (int i = 0; i < before && i < [rewrites count]; i++) {
-        ANTLRRewriteOperation *op = (ANTLRRewriteOperation *)[rewrites objectAtIndex:i];
-        if ( op == nil )
-            continue; // ignore deleted
-        if ( [op isKindOfClass:(Class)kind] )
-            [ops addObject:op];
-    }		
-    return ops;
-}
-
-- (NSMutableString *)toDebugString
-{
-    return [self toDebugStringFromStart:MIN_TOKEN_INDEX ToEnd:[tokens count]-1];
-}
-
-- (NSMutableString *)toDebugStringFromStart:(NSInteger)start ToEnd:(NSInteger)end
-{
-    NSMutableString *buf = [NSMutableString stringWithCapacity:100];
-    for (int i = start; i >= MIN_TOKEN_INDEX && i <= end && i < [tokens count]; i++) {
-        [buf appendString:[[tokens objectAtIndex:i] text]];
-    }
-    return [NSString stringWithString:buf];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenSource.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenSource.h
deleted file mode 100644
index ca1fa2b..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenSource.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRToken.h"
-
-// Anything that responds to -nextToken can be treated as a lexer.
-// For instance this can be a flex lexer or a handwritten one or even
-// a proxy for a remotely running token source (database, lexer, whatever).
-@protocol ANTLRTokenSource <NSObject, NSCopying>
-
-- (id<ANTLRToken>) nextToken;
-- (NSString *)getSourceName;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenStream.h
deleted file mode 100644
index c104578..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRIntStream.h"
-#import "ANTLRToken.h"
-
-@protocol ANTLRTokenStream < ANTLRIntStream >
-
-// Get Token at current input pointer + i ahead where i=1 is next Token.
-// i<0 indicates tokens in the past.  So -1 is previous token and -2 is
-// two tokens ago. LT:0 is undefined.  For i>=n, return Token.EOFToken.
-// Return null for LT:0 and any index that results in an absolute address
-// that is negative.
-
-- (id<ANTLRToken>) LT:(NSInteger) i;
-
-- (id<ANTLRToken>) getToken:(NSUInteger) i;
-
-- (id) getTokenSource;
-
-- (NSString *) toString;
-/** Return the text of all tokens from start to stop, inclusive.
- *  If the stream does not buffer all the tokens then it can just
- *  return "" or null;  Users should not access $ruleLabel.text in
- *  an action of course in that case.
- */
-- (NSString *)toStringFromStart:(NSInteger)startIdx ToEnd:(NSInteger)stopIdx;
-
-/** Because the user is not required to use a token with an index stored
- *  in it, we must provide a means for two token objects themselves to
- *  indicate the start/end location.  Most often this will just delegate
- *  to the other toString(int,int).  This is also parallel with
- *  the TreeNodeStream.toString(Object,Object).
- */
-- (NSString *) toStringFromToken:(id<ANTLRToken>)startToken ToToken:(id<ANTLRToken>)stopToken;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTree.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTree.h
deleted file mode 100644
index 32f603a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTree.h
+++ /dev/null
@@ -1,129 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef DEBUG_DEALLOC
-#define DEBUG_DEALLOC
-#endif
-
-#import "AMutableArray.h"
-
-@protocol ANTLRTree < NSObject, NSCopying >
-
-//+ (id<ANTLRTree>) invalidNode;
-
-- (id<ANTLRTree>) getChild:(NSUInteger)index;
-- (NSUInteger) getChildCount;
-
-// Tree tracks parent and child index now > 3.0
-
-- (id<ANTLRTree>)getParent;
-
-- (void) setParent:(id<ANTLRTree>)t;
-
-/** Is there is a node above with token type ttype? */
-- (BOOL) hasAncestor:(NSInteger)ttype;
-
-/** Walk upwards and get first ancestor with this token type. */
-- (id<ANTLRTree>) getAncestor:(NSInteger) ttype;
-
-/** Return a list of all ancestors of this node.  The first node of
- *  list is the root and the last is the parent of this node.
- */
-- (AMutableArray *) getAncestors;
-
-/** This node is what child index? 0..n-1 */
-- (NSInteger) getChildIndex;
-
-- (void) setChildIndex:(NSInteger) index;
-
-/** Set the parent and child index values for all children */
-- (void) freshenParentAndChildIndexes;
-
-/** Add t as a child to this node.  If t is null, do nothing.  If t
- *  is nil, add all children of t to this' children.
- */
-- (void) addChild:(id<ANTLRTree>) t;
-
-/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
-- (void) setChild:(NSInteger)i With:(id<ANTLRTree>) t;
-
-- (id) deleteChild:(NSInteger) i;
-
-/** Delete children from start to stop and replace with t even if t is
- *  a list (nil-root tree).  num of children can increase or decrease.
- *  For huge child lists, inserting children can force walking rest of
- *  children to set their childindex; could be slow.
- */
-- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id)t;	
-
-- (NSArray *) children;
-// Add t as a child to this node.  If t is null, do nothing.  If t
-//  is nil, add all children of t to this' children.
-
-- (void) addChildren:(NSArray *) theChildren;
-//- (void) removeAllChildren;
-
-// Indicates the node is a nil node but may still have children, meaning
-// the tree is a flat list.
-
-- (BOOL) isNil;
-
-/**  What is the smallest token index (indexing from 0) for this node
- *   and its children?
- */
-- (NSInteger) getTokenStartIndex;
-
-- (void) setTokenStartIndex:(NSInteger) index;
-
-/**  What is the largest token index (indexing from 0) for this node
- *   and its children?
- */
-- (NSInteger) getTokenStopIndex;
-- (void) setTokenStopIndex:(NSInteger) index;
-
-- (id<ANTLRTree>) dupNode;
-
-- (NSString *) toString;
-
-#pragma mark Copying
-- (id) copyWithZone:(NSZone *)aZone;	// the children themselves are not copied here!
-- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
-- (id) deepCopyWithZone:(NSZone *)aZone;
-
-#pragma mark Tree Parser support
-- (NSInteger)type;
-- (NSString *)text;
-// In case we don't have a token payload, what is the line for errors?
-- (NSUInteger)line;
-- (NSUInteger)charPositionInLine;
-- (void) setCharPositionInLine:(NSUInteger)pos;
-
-#pragma mark Informational
-- (NSString *) treeDescription;
-- (NSString *) description;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTree.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTree.m
deleted file mode 100644
index 4d9edaa..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTree.m
+++ /dev/null
@@ -1,149 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTree.h"
-#import "ANTLRToken.h"
-// TODO: this shouldn't be here...but needed for invalidNode
-#import "ANTLRCommonTree.h"
-
-@implementation ANTLRTree
-
-@synthesize isEmpty;
-@synthesize isEmptyNode;
-@synthesize invalidNode;
-@synthesize children;
-
-#pragma mark ANTLRTree protocol conformance
-
-+ (id<ANTLRTree>) invalidNode
-{
-	static id<ANTLRTree> invalidNode = nil;
-	if (!invalidNode) {
-		invalidNode = [[ANTLRCommonTree alloc] initWithTokenType:ANTLRTokenTypeInvalid];
-	}
-	return invalidNode;
-}
-
-- (id<ANTLRTree>) init
-{
-	self = [super init];
-	if ( self != nil ) {
-		isEmptyNode = NO;
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-	[super dealloc];
-}
-
-- (id<ANTLRTree>) getChild:(NSUInteger) index
-{
-	return nil;
-}
-
-- (NSUInteger) getChildCount
-{
-	return 0;
-}
-
-- (NSArray *) getChildren
-{
-	return nil;
-}
-
-	// Add tree as a child to this node.  If tree is nil, do nothing.  If tree
-	// is an empty node, add all children of tree to our children.
-
-- (void) addChild:(id<ANTLRTree>) tree
-{
-}
-
-- (void) addChildren:(NSArray *) theChildren
-{
-}
-
-- (void) removeAllChildren
-{
-}
-
-	// Indicates the node is an empty node but may still have children, meaning
-	// the tree is a flat list.
-
-- (BOOL) isEmpty
-{
-	return isEmptyNode;
-}
-
-- (void) setIsEmpty:(BOOL)emptyFlag
-{
-	isEmptyNode = emptyFlag;
-}
-
-#pragma mark ANTLRTree abstract base class
-
-	// Return a token type; needed for tree parsing
-- (NSInteger) getType
-{
-	return 0;
-}
-
-- (NSString *) getText
-{
-	return [self description];
-}
-
-	// In case we don't have a token payload, what is the line for errors?
-- (NSInteger) getLine
-{
-	return 0;
-}
-
-- (NSInteger) getCharPositionInLine
-{
-	return 0;
-}
-
-- (NSString *) treeDescription
-{
-	return @"";
-}
-
-- (NSString *) description
-{
-	return @"";
-}
-
-- (void) _createChildrenList
-{
-	if ( children == nil )
-		children = [[NSMutableArray alloc] init];
-}
-
-@end
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeAdaptor.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeAdaptor.h
deleted file mode 100644
index e0d94e0..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeAdaptor.h
+++ /dev/null
@@ -1,157 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRToken.h"
-#import "ANTLRBaseTree.h"
-#import "ANTLRTokenStream.h"
-
-#pragma warning tree/node diction is broken.
-
-@protocol ANTLRTreeAdaptor <NSObject, NSCopying>
-
-#pragma mark Construction
-
-#pragma mark ANTLRTreeAdaptor implementation
-- (id)dupNode:(id)aNode;	// copies just the node
-- (id)dupTree:(id)aTree;	// copies the entire subtree, recursively
-
-/** Return a nil node (an empty but non-null node) that can hold
- *  a list of element as the children.  If you want a flat tree (a list)
- *  use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
- */
-- (id) emptyNode;
-
-/** Return a tree node representing an error.  This node records the
- *  tokens consumed during error recovery.  The start token indicates the
- *  input symbol at which the error was detected.  The stop token indicates
- *  the last symbol consumed during recovery.
- *
- *  You must specify the input stream so that the erroneous text can
- *  be packaged up in the error node.  The exception could be useful
- *  to some applications; default implementation stores ptr to it in
- *  the CommonErrorNode.
- *
- *  This only makes sense during token parsing, not tree parsing.
- *  Tree parsing should happen only when parsing and tree construction
- *  succeed.
- */
-- (id) errorNode:(id<ANTLRTokenStream>)anInput
-            From:(id<ANTLRToken>)aStartToken
-              To:(id<ANTLRToken>)aStopToken
-       Exception:(NSException *) e;
-
-/** Is tree considered a nil node used to make lists of child nodes? */
-- (BOOL) isNil:(id)aTree;
-
-
-- (void) addChild:(id)child toTree:(id)aTree;
-
-/** If oldRoot is a nil root, just copy or move the children to newRoot.
- *  If not a nil root, make oldRoot a child of newRoot.
- *
- *    old=^(nil a b c), new=r yields ^(r a b c)
- *    old=^(a b c), new=r yields ^(r ^(a b c))
- *
- *  If newRoot is a nil-rooted single child tree, use the single
- *  child as the new root node.
- *
- *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
- *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
- *
- *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
- *
- *    old=null, new=r yields r
- *    old=null, new=^(nil r) yields ^(nil r)
- *
- *  Return newRoot.  Throw an exception if newRoot is not a
- *  simple node or nil root with a single child node--it must be a root
- *  node.  If newRoot is ^(nil x) return x as newRoot.
- *
- *  Be advised that it's ok for newRoot to point at oldRoot's
- *  children; i.e., you don't have to copy the list.  We are
- *  constructing these nodes so we should have this control for
- *  efficiency.
- */
-- (id) becomeRoot:(id)newRoot old:(id)oldRoot;
-
-- (id) rulePostProcessing:(id)root;
-
-#pragma mark Rewrite Rules
-                           
-- (NSUInteger) getUniqueID:(id)aNode;
-
-- (id) create:(id<ANTLRToken>)payload;
-- (id) createTree:(NSInteger)tokenType FromToken:(id<ANTLRToken>)fromToken;
-- (id) createTree:(NSInteger)tokenType FromToken:(id<ANTLRToken>)fromToken Text:(NSString *)text;
-- (id) createTree:(NSInteger)tokenType Text:(NSString *)text;
-
-#pragma mark Content
-
-- (id)dupNode:(id)aNode;
-- (id)dupTree:(id)aTree;
-
-- (NSInteger) getType:(id)aNode;
-- (void) setType:(id)aNode Type:(NSInteger)tokenType;
-
-- (NSString *) getText:(id)aNode;
-- (void) setText:(id)aNode Text:(NSString *)tokenText;
-
-//- (id<ANTLRToken>) getToken:(id)t;
-
-- (void) setTokenBoundaries:(id)aTree From:(id<ANTLRToken>)startToken To:(id<ANTLRToken>)stopToken;
-- (NSInteger) getTokenStartIndex:(id)aTree;
-- (NSInteger) getTokenStopIndex:(id)aTree;
-
-#pragma mark Navigation / Tree Parsing
-
-/** Get a child 0..n-1 node */
-- (id) getChild:(id)aNode At:(NSInteger) i;
-/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
-- (void) setChild:(id)aTree At:(NSInteger)index Child:(id)child;
-/** Remove ith child and shift children down from right. */
-- (id) deleteChild:(id)t Index:(NSInteger)index;
-
-/** How many children?  If 0, then this is a leaf node */
-- (NSInteger) getChildCount:(id) aTree;
-
-/** Who is the parent node of this node; if null, implies node is root.
- *  If your node type doesn't handle this, it's ok but the tree rewrites
- *  in tree parsers need this functionality.
- */
-- (id)getParent:(id)t;
-- (void) setParent:(id)t With:(id)parent;
-
-/** What index is this node in the child list? Range: 0..n-1
- *  If your node type doesn't handle this, it's ok but the tree rewrites
- *  in tree parsers need this functionality.
- */
-- (NSInteger) getChildIndex:(id)t;
-- (void) setChildIndex:(id)t With:(NSInteger)index;
-
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id)t;
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeAdaptor.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeAdaptor.m
deleted file mode 100644
index ce64b13..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeAdaptor.m
+++ /dev/null
@@ -1,238 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTreeException.h"
-#import "ANTLRBaseTree.h"
-
-@implementation ANTLRTreeAdaptor
-
-
-+ (id<ANTLRBaseTree>) newEmptyTree
-{
-	return [ANTLRTreeAdaptor newTreeWithToken:nil];
-}
-
-+ (id) newAdaptor
-{
-    return [[ANTLRTreeAdaptor alloc] init];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-- (id) initWithPayload:(id<ANTLRToken>)payload
-{
-    self = [super init];
-    return self;
-}
-
-#pragma mark Rewrite Rules
-
-/** Create a tree node from Token object; for CommonTree type trees,
- *  then the token just becomes the payload.  This is the most
- *  common create call.
- *
- *  Override if you want another kind of node to be built.
- */
-- (id<ANTLRBaseTree>) create:(id<ANTLRToken>) payload
-{
-    return nil;
-}
-
-/** Create a new node derived from a token, with a new token type.
- *  This is invoked from an imaginary node ref on right side of a
- *  rewrite rule as IMAG[$tokenLabel].
- *
- *  This should invoke createToken(Token).
- */
-- (id<ANTLRBaseTree>) createTree:(NSInteger)tokenType fromToken:(id<ANTLRToken>)fromToken
-{
-	id<ANTLRToken> newToken = [self createToken:fromToken];
-	[newToken setType:tokenType];
-    
-	id<ANTLRBaseTree> newTree = [self create:newToken];
-	[newToken release];
-	return newTree;
-}
-
-/** Create a new node derived from a token, with a new token type.
- *  This is invoked from an imaginary node ref on right side of a
- *  rewrite rule as IMAG[$tokenLabel].
- *
- *  This should invoke createToken(Token).
- */
-- (id<ANTLRBaseTree>) createTree:(NSInteger)tokenType fromToken:(id<ANTLRToken>)fromToken text:(NSString *)tokenText
-{
-	id<ANTLRToken> newToken = [self createToken:fromToken];
-	[newToken setText:tokenText];
-	
-	id<ANTLRBaseTree> newTree = [self create:newToken];
-	[newToken release];
-	return newTree;
-}
-
-/** Create a new node derived from a token, with a new token type.
- *  This is invoked from an imaginary node ref on right side of a
- *  rewrite rule as IMAG["IMAG"].
- *
- *  This should invoke createToken(int,String).
- */
-- (id<ANTLRBaseTree>) createTree:(NSInteger)tokenType text:(NSString *)tokenText
-{
-	id<ANTLRToken> newToken = [self createToken:tokenType text:tokenText];
-	
-	id<ANTLRBaseTree> newTree = [self create:newToken];
-	[newToken release];
-	return newTree;
-}
-
-- (id) copyNode:(id<ANTLRBaseTree>)aNode
-{
-	return [aNode copyWithZone:nil];	// not -copy: to silence warnings
-}
-
-- (id) copyTree:(id<ANTLRBaseTree>)aTree
-{
-	return [aTree deepCopy];
-}
-
-
-- (void) addChild:(id<ANTLRBaseTree>)child toTree:(id<ANTLRBaseTree>)aTree
-{
-	[aTree addChild:child];
-}
-
-- (id) makeNode:(id<ANTLRBaseTree>)newRoot parentOf:(id<ANTLRBaseTree>)oldRoot
-{
-	id<ANTLRBaseTree> newRootNode = newRoot;
-
-	if (oldRoot == nil)
-		return newRootNode;
-    // handles ^(nil real-node) case
-	if ([newRootNode isNil]) {
-		if ([newRootNode getChildCount] > 1) {
-#warning TODO: Find a way to the current input stream here!
-			@throw [ANTLRTreeException exceptionWithOldRoot:oldRoot newRoot:newRootNode stream:nil];
-		}
-#warning TODO: double check memory management with respect to code generation
-		// remove the empty node, placing its sole child in its role.
-		id<ANTLRBaseTree> tmpRootNode = [[newRootNode childAtIndex:0] retain];
-		[newRootNode release];
-		newRootNode = tmpRootNode;		
-	}
-	// the handling of an empty node at the root of oldRoot happens in addChild:
-	[newRootNode addChild:oldRoot];
-    // this release relies on the fact that the ANTLR code generator always assigns the return value of this method
-    // to the variable originally holding oldRoot. If we don't release we leak the reference.
-    // FIXME: this is totally non-obvious. maybe do it in calling code by comparing pointers and conditionally releasing
-    // the old object
-    [oldRoot release];
-    
-    // what happens to newRootNode's retain count? Should we be autoreleasing this one? Probably.
-	return [newRootNode retain];
-}
-
-
-- (id<ANTLRBaseTree>) postProcessTree:(id<ANTLRBaseTree>)aTree
-{
-	id<ANTLRBaseTree> processedNode = aTree;
-	if (aTree != nil && [aTree isNil] != NO && [aTree getChildCount] == 1) {
-		processedNode = [aTree childAtIndex:0];
-	}
-	return processedNode;
-}
-
-
-- (NSUInteger) uniqueIdForTree:(id<ANTLRBaseTree>)aNode
-{
-	// TODO: is hash appropriate here?
-	return [aNode hash];
-}
-
-
-#pragma mark Content
-
-- (NSInteger) tokenTypeForNode:(id<ANTLRBaseTree>)aNode
-{
-	return [aNode getType];
-}
-
-- (void) setTokenType:(NSInteger)tokenType forNode:(id)aNode
-{
-	// currently unimplemented
-}
-
-
-- (NSString *) textForNode:(id<ANTLRBaseTree>)aNode
-{
-	return [aNode getText];
-}
-
-- (void) setText:(NSString *)tokenText forNode:(id<ANTLRBaseTree>)aNode
-{
-	// currently unimplemented
-}
-
-
-#pragma mark Navigation / Tree Parsing
-
-- (id<ANTLRBaseTree>) childForNode:(id<ANTLRBaseTree>) aNode atIndex:(NSInteger) i
-{
-	// currently unimplemented
-	return nil;
-}
-
-- (NSInteger) childCountForTree:(id<ANTLRBaseTree>) aTree
-{
-	// currently unimplemented
-	return 0;
-}
-
-#pragma mark Subclass Responsibilties
-
-- (void) setBoundariesForTree:(id<ANTLRBaseTree>)aTree fromToken:(id<ANTLRToken>)startToken toToken:(id<ANTLRToken>)stopToken
-{
-	// subclass responsibility
-}
-
-- (NSInteger) tokenStartIndexForTree:(id<ANTLRBaseTree>)aTree
-{
-	// subclass responsibility
-	return 0;
-}
-
-- (NSInteger) tokenStopIndexForTree:(id<ANTLRBaseTree>)aTree
-{
-	// subclass responsibility
-	return 0;
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeException.h
deleted file mode 100644
index 28330af..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeException.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTree.h"
-#import "ANTLRRecognitionException.h"
-
-@interface ANTLRTreeException : ANTLRRecognitionException {
-	id<ANTLRBaseTree> oldRoot;
-	id<ANTLRBaseTree> newRoot;
-}
-
-+ (id) newException:(id<ANTLRBaseTree>)theOldRoot newRoot:(id<ANTLRBaseTree>)theNewRoot stream:(id<ANTLRIntStream>)aStream;
-- (id) initWithOldRoot:(id<ANTLRBaseTree>)theOldRoot newRoot:(id<ANTLRBaseTree>)theNewRoot stream:(id<ANTLRIntStream>)aStream;
-
-- (void) setOldRoot:(id<ANTLRBaseTree>)aTree;
-- (void) setNewRoot:(id<ANTLRBaseTree>)aTree;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeException.m
deleted file mode 100644
index b61d29c..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeException.m
+++ /dev/null
@@ -1,85 +0,0 @@
-//
-//  ANTLRTreeException.m
-//  ANTLR
-//
-//  Created by Kay Röpke on 24.10.2006.
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRTreeException.h"
-
-
-@implementation ANTLRTreeException
-
-+ (id) newException:(id<ANTLRBaseTree>)theOldRoot newRoot:(id<ANTLRBaseTree>)theNewRoot stream:(id<ANTLRIntStream>)aStream;
-{
-	return [[ANTLRTreeException alloc] initWithOldRoot:theOldRoot newRoot:theNewRoot stream:aStream];
-}
-
-- (id) initWithOldRoot:(id<ANTLRBaseTree>)theOldRoot newRoot:(id<ANTLRBaseTree>)theNewRoot stream:(id<ANTLRIntStream>)aStream;
-{
-	if ((self = [super initWithStream:aStream reason:@"The new root has more than one child. Cannot make it the root node."]) != nil ) {
-		[self setOldRoot:theOldRoot];
-		[self setNewRoot:theNewRoot];
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRTreeException" );
-#endif
-	[self setOldRoot:nil];
-	[self setNewRoot:nil];
-	[super dealloc];
-}
-
-- (void) setNewRoot:(id<ANTLRBaseTree>)aTree
-{
-	if (newRoot != aTree) {
-		[aTree retain];
-		if ( newRoot ) [newRoot release];
-		newRoot = aTree;
-	}
-}
-
-- (void) setOldRoot:(id<ANTLRBaseTree>)aTree
-{
-	if (oldRoot != aTree) {
-		[aTree retain];
-		if ( oldRoot ) [oldRoot release];
-		oldRoot = aTree;
-	}
-}
-
-- (NSString *) description
-{
-	 return [NSMutableString stringWithFormat:@"%@ old root: <%@> new root: <%@>", [super description], [oldRoot treeDescription], [newRoot treeDescription]];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeIterator.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeIterator.h
deleted file mode 100644
index b5cf8c3..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeIterator.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//
-//  ANTLRTreeIterator.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-// [The "BSD licence"]
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRFastQueue.h"
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTree.h"
-
-@interface ANTLRTreeIterator : NSObject 
-{
-	BOOL firstTime;
-	__strong id<ANTLRTreeAdaptor> adaptor;
-	__strong id<ANTLRBaseTree> root;
-	__strong id<ANTLRBaseTree> tree;
-	
-	__strong ANTLRFastQueue *nodes;
-	__strong id<ANTLRBaseTree> up;
-	__strong id<ANTLRBaseTree> down;
-	__strong id<ANTLRBaseTree> eof;
-}
-
-@property BOOL firstTime;
-@property(retain) id<ANTLRTreeAdaptor> adaptor;
-@property(retain) id<ANTLRBaseTree> root;
-@property(retain) id<ANTLRBaseTree> tree;
-@property(retain) ANTLRFastQueue *nodes;
-@property(retain, readwrite) id<ANTLRBaseTree> up;
-@property(retain, readwrite) id<ANTLRBaseTree> down;
-@property(retain, readwrite) id<ANTLRBaseTree> eof;
-
-+ newANTRLTreeIterator;
-+ (ANTLRTreeIterator *) newANTRLTreeIteratorWithAdaptor:(ANTLRCommonTreeAdaptor *)adaptor
-                                                andTree:(id<ANTLRBaseTree>)tree;
-- (id) init;
-- (id) initWithTree:(id<ANTLRBaseTree>) t;
-- (id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>) a andTree:(id<ANTLRBaseTree>) t;
-
-- (void) reset;
-- (BOOL) hasNext;
-- (id) nextObject;
-- (NSArray *) allObjects;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeIterator.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeIterator.m
deleted file mode 100644
index b760f88..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeIterator.m
+++ /dev/null
@@ -1,202 +0,0 @@
-//
-//  ANTLRTreeIterator.m
-//  ANTLR
-//
-//  Created by Ian Michell on 26/04/2010.
-// Copyright (c) 2010 Ian Michell 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRTreeIterator.h"
-#import "ANTLRCommonTreeAdaptor.h"
-
-@implementation ANTLRTreeIterator
-
-+ (ANTLRTreeIterator *) newANTRLTreeIterator
-{
-    return [[ANTLRTreeIterator alloc] init];
-}
-
-+ (ANTLRTreeIterator *) newANTRLTreeIteratorWithAdaptor:(ANTLRCommonTreeAdaptor *)adaptor
-                                                andTree:(id<ANTLRBaseTree>)tree
-{
-    return [[ANTLRTreeIterator alloc] initWithTreeAdaptor:adaptor andTree:tree];
-}
-
-- (id) init
-{
-    self = [super init];
-    if ( self != nil ) {
-        firstTime = YES;
-        nodes = [[ANTLRFastQueue newANTLRFastQueue] retain];
-        down = [[adaptor createTree:ANTLRTokenTypeDOWN Text:@"DOWN"] retain];
-        up = [[adaptor createTree:ANTLRTokenTypeUP Text:@"UP"] retain];
-        eof = [[adaptor createTree:ANTLRTokenTypeEOF Text:@"EOF"] retain];
-        tree = eof;
-        root = eof;
-    }
-    return self;
-}
-
--(id) initWithTree:(id<ANTLRBaseTree>) t
-{
-    self = [super init];
-    if ( self != nil ) {
-        firstTime = YES;
-        adaptor = [[ANTLRCommonTreeAdaptor newTreeAdaptor] retain];
-        tree = [t retain];
-        root = t;
-        nodes = [[ANTLRFastQueue newANTLRFastQueue] retain];
-        down = [[adaptor createTree:ANTLRTokenTypeDOWN Text:@"DOWN"] retain];
-        up = [[adaptor createTree:ANTLRTokenTypeUP Text:@"UP"] retain];
-        eof = [[adaptor createTree:ANTLRTokenTypeEOF Text:@"EOF"] retain];
-    }
-    return self;
-}
-
--(id) initWithTreeAdaptor:(id<ANTLRTreeAdaptor>)a andTree:(id<ANTLRBaseTree>)t
-{
-    self = [super init];
-    if ( self != nil ) {
-        firstTime = YES;
-        adaptor = [a retain];
-        tree = [t retain];
-        root = t;
-        nodes = [[ANTLRFastQueue newANTLRFastQueue] retain];
-        down = [[adaptor createTree:ANTLRTokenTypeDOWN Text:@"DOWN"] retain];
-        up = [[adaptor createTree:ANTLRTokenTypeUP Text:@"UP"] retain];
-        eof = [[adaptor createTree:ANTLRTokenTypeEOF Text:@"EOF"] retain];
-    }
-    return self;
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRTreeIterator" );
-#endif
-    if ( adaptor ) [adaptor release];
-    if ( nodes ) [nodes release];
-    if ( tree && tree != eof ) [tree release];
-    if ( root && root != eof && root != tree ) [root release];
-    if ( down ) [down release];    
-    if ( up ) [up release];    
-    if ( eof ) [eof release];    
-    [super dealloc];
-}
-
-- (void)reset
-{
-    firstTime = YES;
-    tree = root;
-    [nodes clear];
-}
-
--(BOOL) hasNext
-{
-    if ( firstTime ) {
-        return root != nil;
-    }
-    if ( nodes && [nodes size] > 0) {
-        return YES;
-    }
-    if ( tree == nil ) {
-        return NO;
-    }
-    if ( [adaptor getChildCount:tree] > 0 ) {
-        return YES;
-    }
-    return [adaptor getParent:tree] != nil;
-}
-
--(id) nextObject
-{
-    // is this the first time we are using this method?
-    if ( firstTime ) {
-        firstTime = NO;
-        if ( [adaptor getChildCount:tree] == 0 ) {
-            [nodes addObject:eof];
-            return tree;
-        }
-        return tree;
-    }
-    // do we have any objects queued up?
-    if ( nodes && [nodes size] > 0 ) {
-        return [nodes remove];
-    }
-    // no nodes left?
-    if ( tree == nil ) {
-        return eof;
-    }
-    if ( [adaptor getChildCount:tree] > 0 ) {
-        tree = [adaptor getChild:tree At:0];
-        [nodes addObject:tree]; // real node is next after down
-        return self.down;
-    }
-    // if no children, look for next sibling of ancestor
-    id<ANTLRBaseTree> parent = [adaptor getParent:tree];
-    while (parent != nil && ([adaptor getChildIndex:tree] + 1) >= [adaptor getChildCount:parent]) {
-        [nodes addObject:up];
-        tree = parent;
-        parent = [adaptor getParent:tree];
-    }
-    if ( parent == nil ) {
-        tree = nil;
-        [nodes addObject:self.eof];
-        return [nodes remove];
-    }
-    // must have found a node with an unvisited sibling
-    // move to it and return it
-    NSInteger nextSiblingIndex = [adaptor getChildIndex:tree] + 1;
-    tree = [adaptor getChild:parent At:nextSiblingIndex];
-    [nodes addObject:tree];
-    return [nodes remove];
-}
-
--(NSArray *) allObjects
-{
-    AMutableArray *array = [AMutableArray arrayWithCapacity:10];
-    while ( [self hasNext] ) {
-        [array addObject:[self nextObject]];
-    }
-    return array;
-}
-
-- (void)remove
-{
-    @throw [ANTLRRuntimeException newException:@"ANTLRUnsupportedOperationException"];
-}
-
-@synthesize firstTime;
-@synthesize adaptor;
-@synthesize root;
-@synthesize tree;
-@synthesize nodes;
-
-@synthesize up;
-@synthesize down;
-@synthesize eof;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeNodeStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeNodeStream.h
deleted file mode 100644
index 8d10aaa..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeNodeStream.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRIntStream.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRTokenStream.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRCommonTreeAdaptor.h"
-
-@protocol ANTLRTreeNodeStream < ANTLRIntStream > 
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree;
-
-- (id) getTree:(NSInteger) idx;
-- (id) LT:(NSInteger)k;
-- (id) getTreeSource;
-- (id<ANTLRTokenStream>) getTokenStream; 
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setUniqueNavigationNodes:(BOOL)flag;
-- (void) reset;
-
-- (NSString *) toStringFromNode:(id)startNode ToNode:(id)stopNode;
-
-- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
-    
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeParser.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeParser.h
deleted file mode 100644
index 9d2d5c6..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeParser.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRBaseRecognizer.h"
-#import "ANTLRTreeNodeStream.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRMismatchedTreeNodeException.h"
-
-@interface ANTLRTreeParser : ANTLRBaseRecognizer {
-	id<ANTLRTreeNodeStream> input;
-}
-
-@property (retain, getter=input, setter=setInput:) id<ANTLRTreeNodeStream> input;
-
-+ (id) newANTLRTreeParser:(id<ANTLRTreeNodeStream>)anInput;
-+ (id) newANTLRTreeParser:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)state;
-
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)theInput;
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)theInput
-                State:(ANTLRRecognizerSharedState *)state;
-
-
-- (id<ANTLRTreeNodeStream>)input;
-- (void) setInput:(id<ANTLRTreeNodeStream>)anInput;
-
-- (void) setTreeNodeStream:(id<ANTLRTreeNodeStream>) anInput;
-- (id<ANTLRTreeNodeStream>) getTreeNodeStream;
-
-- (NSString *)getSourceName;
-
-- (id) getCurrentInputSymbol:(id<ANTLRIntStream>) anInput;
-
-- (id) getMissingSymbol:(id<ANTLRIntStream>)input
-              Exception:(ANTLRRecognitionException *) e
-          ExpectedToken:(NSInteger) expectedTokenType
-                 BitSet:(ANTLRBitSet *)follow;
-
-/** Match '.' in tree parser has special meaning.  Skip node or
- *  entire tree if node has children.  If children, scan until
- *  corresponding UP node.
- */
-- (void) matchAny:(id<ANTLRIntStream>)ignore;
-
-/** We have DOWN/UP nodes in the stream that have no line info; override.
- *  plus we want to alter the exception type.  Don't try to recover
- *  from tree parser errors inline...
- */
-- (id) recoverFromMismatchedToken:(id<ANTLRIntStream>)anInput
-                             Type:(NSInteger)ttype
-                           Follow:(ANTLRBitSet *)follow;
-
-/** Prefix error message with the grammar name because message is
- *  always intended for the programmer because the parser built
- *  the input tree not the user.
- */
-- (NSString *)getErrorHeader:(ANTLRRecognitionException *)e;
-
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e TokenNames:(AMutableArray *) tokenNames;
-
-- (void) traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-- (void) traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeParser.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeParser.m
deleted file mode 100644
index 599ec7c..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeParser.m
+++ /dev/null
@@ -1,192 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTreeParser.h"
-
-@implementation ANTLRTreeParser
-
-@synthesize input;
-
-+ (id) newANTLRTreeParser:(id<ANTLRTreeNodeStream>)anInput
-{
-    return [[ANTLRTreeParser alloc] initWithStream:anInput];
-}
-
-+ (id) newANTLRTreeParser:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)theState
-{
-    return [[ANTLRTreeParser alloc] initWithStream:anInput State:theState];
-}
-
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)theInput
-{
-	if ((self = [super init]) != nil) {
-		[self setInput:theInput];
-	}
-	return self;
-}
-
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)theInput State:(ANTLRRecognizerSharedState *)theState
-{
-	if ((self = [super init]) != nil) {
-		[self setInput:theInput];
-        state = theState;
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRTreeParser" );
-#endif
-	if ( input ) [input release];
-	[super dealloc];
-}
-
-- (void) reset
-{
-    [super reset]; // reset all recognizer state variables
-    if ( input != nil ) {
-        [input seek:0]; // rewind the input
-    }
-}
-
-- (void) mismatch:(id<ANTLRIntStream>)aStream tokenType:(ANTLRTokenType)aTType follow:(ANTLRBitSet *)aBitset
-{
-	ANTLRMismatchedTreeNodeException *mte = [ANTLRMismatchedTreeNodeException newException:aTType Stream:aStream];
-    [mte setNode:[((id<ANTLRTreeNodeStream>)aStream) LT:1]];
-	[self recoverFromMismatchedToken:aStream Type:aTType Follow:aBitset];
-}
-
-- (void) setTreeNodeStream:(id<ANTLRTreeNodeStream>) anInput
-{
-    input = anInput;
-}
-
-- (id<ANTLRTreeNodeStream>) getTreeNodeStream
-{
-    return input;
-}
-
-- (NSString *)getSourceName
-{
-    return [input getSourceName];
-}
-
-- (id) getCurrentInputSymbol:(id<ANTLRIntStream>) anInput
-{
-    return [(id<ANTLRTreeNodeStream>)anInput LT:1];
-}
-
-- (id) getMissingSymbol:(id<ANTLRIntStream>)anInput
-              Exception:(ANTLRRecognitionException *)e
-          ExpectedToken:(NSInteger)expectedTokenType
-                 BitSet:(ANTLRBitSet *)follow
-{
-    NSString *tokenText =[NSString stringWithFormat:@"<missing %@ %d>", [self getTokenNames], expectedTokenType];
-    //id<ANTLRTreeAdaptor> anAdaptor = (id<ANTLRTreeAdaptor>)[((id<ANTLRTreeNodeStream>)e.input) getTreeAdaptor];
-    //return [anAdaptor createToken:expectedTokenType Text:tokenText];
-    return [ANTLRCommonToken newToken:expectedTokenType Text:tokenText];
-}
-
-/** Match '.' in tree parser has special meaning.  Skip node or
- *  entire tree if node has children.  If children, scan until
- *  corresponding UP node.
- */
-- (void) matchAny:(id<ANTLRIntStream>)ignore
-{ // ignore stream, copy of input
-    state.errorRecovery = NO;
-    state.failed = NO;
-    id look = [input LT:1];
-    if ( [((ANTLRCommonTreeAdaptor *)[input getTreeAdaptor]) getChildCount:look] == 0) {
-        [input consume]; // not subtree, consume 1 node and return
-        return;
-    }
-    // current node is a subtree, skip to corresponding UP.
-    // must count nesting level to get right UP
-    int level=0;
-    int tokenType = [((id<ANTLRTreeAdaptor>)[input getTreeAdaptor]) getType:look];
-    while ( tokenType != ANTLRTokenTypeEOF && !( tokenType == ANTLRTokenTypeUP && level == 0) ) {
-        [input consume];
-        look = [input LT:1];
-        tokenType = [((id<ANTLRTreeAdaptor>)[input getTreeAdaptor]) getType:look];
-        if ( tokenType == ANTLRTokenTypeDOWN ) {
-            level++;
-        }
-        else if ( tokenType == ANTLRTokenTypeUP ) {
-            level--;
-        }
-    }
-    [input consume]; // consume UP
-}
-
-/** We have DOWN/UP nodes in the stream that have no line info; override.
- *  plus we want to alter the exception type.  Don't try to recover
- *  from tree parser errors inline...
- */
-- (id) recoverFromMismatchedToken:(id<ANTLRIntStream>)anInput Type:(NSInteger)ttype Follow:(ANTLRBitSet *)follow
-{
-    @throw [ANTLRMismatchedTreeNodeException newException:ttype Stream:anInput];
-}
-
-/** Prefix error message with the grammar name because message is
- *  always intended for the programmer because the parser built
- *  the input tree not the user.
- */
-- (NSString *)getErrorHeader:(ANTLRRecognitionException *)e
-{
-     return [NSString stringWithFormat:@"%@: node after line %@:%@",
-            [self getGrammarFileName], e.line, e.charPositionInLine];
-}
-
-/** Tree parsers parse nodes they usually have a token object as
- *  payload. Set the exception token and do the default behavior.
- */
-- (NSString *)getErrorMessage:(ANTLRRecognitionException *)e  TokenNames:(AMutableArray *) theTokNams
-{
-    if ( [self isKindOfClass:[ANTLRTreeParser class]] ) {
-        ANTLRCommonTreeAdaptor *adaptor = (ANTLRCommonTreeAdaptor *)[((id<ANTLRTreeNodeStream>)e.input) getTreeAdaptor];
-        e.token = [adaptor getToken:((id<ANTLRBaseTree>)e.node)];
-        if ( e.token == nil ) { // could be an UP/DOWN node
-            e.token = [ANTLRCommonToken newToken:[adaptor getType:e.node]
-                                                        Text:[adaptor getText:e.node]];
-        }
-    }
-    return [super getErrorMessage:e TokenNames:theTokNams];
-}
-
-- (void) traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex
-{
-    [super traceIn:ruleName Index:ruleIndex Object:[input LT:1]];
-}
-
-- (void) traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex
-{
-    [super traceOut:ruleName Index:ruleIndex  Object:[input LT:1]];
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternLexer.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternLexer.h
deleted file mode 100644
index 430bc83..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternLexer.h
+++ /dev/null
@@ -1,89 +0,0 @@
-//
-//  ANTLRTreePatternLexer.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-typedef enum {
-	ANTLRLexerTokenTypeEOF = -1,
-	ANTLRLexerTokenTypeInvalid,
-	ANTLRLexerTokenTypeBEGIN,
-	ANTLRLexerTokenTypeEND,
-	ANTLRLexerTokenTypeID,
-	ANTLRLexerTokenTypeARG,
-	ANTLRLexerTokenTypePERCENT,
-	ANTLRLexerTokenTypeCOLON,
-	ANTLRLexerTokenTypeDOT,
-} ANTLRLexerTokenType;
-
-
-@interface ANTLRTreePatternLexer : NSObject {
-
-/** The tree pattern to lex like "(A B C)" */
-NSString *pattern;
-    
-/** Index into input string */
-NSInteger p;
-    
-/** Current char */
-NSInteger c;
-    
-/** How long is the pattern in char? */
-NSInteger n;
-    
-/** Set when token type is ID or ARG (name mimics Java's StreamTokenizer) */
-NSMutableData *sval;
-__strong char *data;
-    
-BOOL error;
-
-}
-
-@property (retain) NSString *pattern;
-@property (assign) NSInteger p;
-@property (assign) NSInteger c;
-@property (assign) NSInteger n;
-@property (retain, getter=getSval, setter=setSval:) NSMutableData *sval;
-@property (assign) char *data;
-@property (assign) BOOL error;
-
-+ (ANTLRTreePatternLexer *)newANTLRTreePatternLexer:(NSString *)aPattern;
-- (id) init;
-- (id) initWithPattern:(NSString *)aPattern;
-
-- (void) dealloc;
-- (NSInteger) nextToken;
-- (void) consume;
-- (NSString *)toString;
-
-- (NSMutableData *)getSval;
-- (void) setSval:(NSMutableData *)aSval;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternLexer.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternLexer.m
deleted file mode 100644
index cdf969f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternLexer.m
+++ /dev/null
@@ -1,191 +0,0 @@
-//
-//  ANTLRTreePatternLexer.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTreePatternLexer.h"
-
-@implementation ANTLRTreePatternLexer
-
-@synthesize pattern;
-@synthesize p;
-@synthesize c;
-@synthesize n;
-@synthesize sval;
-@synthesize data;
-@synthesize error;
-
-+ (ANTLRTreePatternLexer *)newANTLRTreePatternLexer:(NSString *)aPattern
-{
-    return [[ANTLRTreePatternLexer alloc] initWithPattern:aPattern];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil ) {
-        p = -1;
-        n = 0;
-        error = NO;
-        sval = [[NSMutableData dataWithLength:1000] retain];
-        data = [sval mutableBytes];
-        pattern = @"";
-        n = [pattern length];
-        if ( pattern ) [pattern retain];
-        [self consume];
-    }
-    return self;
-}
-
-- (id) initWithPattern:(NSString *)aPattern
-{
-    if ((self = [super init]) != nil ) {
-        p = -1;
-        n = 0;
-        error = NO;
-        sval = [[NSMutableData dataWithLength:1000] retain];
-        data = [sval mutableBytes];
-        pattern = [aPattern retain];
-        n = [pattern length];
-        [self consume];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRTreePatternLexer" );
-#endif
-	if ( pattern ) [pattern release];
-	if ( sval ) [sval release];
-	[super dealloc];
-}
-
-- (NSInteger) nextToken
-{
-    n = 0; // reset, but reuse buffer
-    while ( c != ANTLRLexerTokenTypeEOF ) {
-        if ( c==' ' || c=='\n' || c=='\r' || c=='\t' ) {
-            [self consume];
-            continue;
-        }
-        if ( (c>='a' && c<='z') || (c>='A' && c<='Z') || c=='_' ) {
-            data[n++] = (char)c;
-            [self consume];
-            while ( (c>='a' && c<='z') || (c>='A' && c<='Z') ||
-                   (c>='0' && c<='9') || c=='_' )
-            {
-                data[n++] = (char)c;
-                [self consume];
-            }
-            return ANTLRLexerTokenTypeID;
-        }
-        if ( c == '(' ) {
-            [self consume];
-            return ANTLRLexerTokenTypeBEGIN;
-        }
-        if ( c==')' ) {
-            [self consume];
-            return ANTLRLexerTokenTypeEND;
-        }
-        if ( c=='%' ) {
-            [self consume];
-            return ANTLRLexerTokenTypePERCENT;
-        }
-        if ( c==':' ) {
-            [self consume];
-            return ANTLRLexerTokenTypeCOLON;
-        }
-        if ( c=='.' ) {
-            [self consume];
-            return ANTLRLexerTokenTypeDOT;
-        }
-        if ( c=='[' ) { // grab [x] as a string, returning x
-            [self consume];
-            while ( c!=']' ) {
-                if ( c=='\\' ) {
-                    [self consume];
-                    if ( c!=']' ) {
-                        data[n++] = (char)'\\';
-                    }
-                    data[n++] = (char)c;
-                }
-                else {
-                    data[n++] = (char)c;
-                }
-                [self consume];
-            }
-            [self consume];
-            return ANTLRLexerTokenTypeARG;
-        }
-        [self consume];
-        error = true;
-        return ANTLRLexerTokenTypeEOF;
-    }
-    return ANTLRLexerTokenTypeEOF;
-}
-
-- (void) consume
-{
-    p++;
-    if ( p >= n ) {
-        c = ANTLRLexerTokenTypeEOF;
-    }
-    else {
-        c = [pattern characterAtIndex:p];
-    }
-}
-
-- (NSString *)toString
-{
-    char buf[100];
-
-    NSInteger idx = 0;
-    for( NSInteger i = p; i < n; i++ ){
-        buf[idx++] = data[i];
-    }
-    buf[idx] = '\0';
-    return [NSString stringWithFormat:@"%s", buf];
-}
-
-- (NSMutableData *)getSval
-{
-    return sval;
-}
-
-- (void)setSval:(NSMutableData *)aSval
-{
-    if ( sval != aSval ) {
-        if ( sval ) [sval release];
-        [aSval retain];
-    }
-    sval = aSval;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternParser.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternParser.h
deleted file mode 100644
index 2051b5e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternParser.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//
-//  ANTLRTreePatternParser.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreePatternLexer.h"
-#import "ANTLRTreeWizard.h"
-#import "ANTLRTreeAdaptor.h"
-
-@interface ANTLRTreePatternParser : NSObject {
-
-ANTLRTreePatternLexer *tokenizer;
-NSInteger ttype;
-ANTLRTreeWizard *wizard;
-id<ANTLRTreeAdaptor> adaptor;
-    
-}
-
-+ (ANTLRTreePatternParser *)newANTLRTreePatternParser:(ANTLRTreePatternLexer *)aTokenizer
-                                               Wizard:(ANTLRTreeWizard *)aWizard
-                                              Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (id) init;
-- (id) initWithTokenizer:(ANTLRTreePatternLexer *)tokenizer
-                  Wizard:(ANTLRTreeWizard *)aWizard
-                 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-
-- (void) dealloc;
-- (id<ANTLRBaseTree>) pattern;
-- (id<ANTLRBaseTree>) parseTree;
-- (id<ANTLRBaseTree>) parseNode;
-
-@property (retain) ANTLRTreePatternLexer *tokenizer;
-@property NSInteger ttype;
-@property (retain) ANTLRTreeWizard *wizard;
-@property (retain) id<ANTLRTreeAdaptor> adaptor;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternParser.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternParser.m
deleted file mode 100644
index c95d995..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreePatternParser.m
+++ /dev/null
@@ -1,197 +0,0 @@
-//
-//  ANTLRTreePatternParser.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTreePatternParser.h"
-#import "ANTLRTreePatternLexer.h"
-
-@implementation ANTLRTreePatternParser
-
-+ (ANTLRTreePatternParser *)newANTLRTreePatternParser:(ANTLRTreePatternLexer *)aTokenizer
-                                               Wizard:(ANTLRTreeWizard *)aWizard
-                                              Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-{
-    return [[ANTLRTreePatternParser alloc] initWithTokenizer:aTokenizer Wizard:aWizard Adaptor:anAdaptor];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil) {
-        //tokenizer = aTokenizer;
-        //wizard = aWizard;
-        //adaptor = anAdaptor;
-        //ttype = [tokenizer nextToken]; // kickstart
-    }
-    return self;
-}
-
-- (id) initWithTokenizer:(ANTLRTreePatternLexer *)aTokenizer
-                  Wizard:(ANTLRTreeWizard *)aWizard
-                 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-{
-    if ((self = [super init]) != nil) {
-        adaptor = anAdaptor;
-        if ( adaptor ) [adaptor retain];
-        tokenizer = aTokenizer;
-        if ( tokenizer ) [tokenizer retain];
-        wizard = aWizard;
-        if ( wizard ) [wizard retain];
-        ttype = [aTokenizer nextToken]; // kickstart
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRTreePatternParser" );
-#endif
-	if ( adaptor ) [adaptor release];
-	if ( tokenizer ) [tokenizer release];
-	if ( wizard ) [wizard release];
-	[super dealloc];
-}
-
-- (id<ANTLRBaseTree>)pattern
-{
-    if ( ttype==ANTLRLexerTokenTypeBEGIN ) {
-        return [self parseTree];
-    }
-    else if ( ttype==ANTLRLexerTokenTypeID ) {
-        id<ANTLRBaseTree> node = [self parseNode];
-        if ( ttype==ANTLRLexerTokenTypeEOF ) {
-            return node;
-        }
-        return nil; // extra junk on end
-    }
-    return nil;
-}
-
-- (id<ANTLRBaseTree>) parseTree
-{
-    if ( ttype != ANTLRLexerTokenTypeBEGIN ) {
-        @throw [ANTLRRuntimeException newException:@"no BEGIN"];
-    }
-    ttype = [tokenizer nextToken];
-    id<ANTLRBaseTree> root = [self parseNode];
-    if ( root==nil ) {
-        return nil;
-    }
-    while ( ttype==ANTLRLexerTokenTypeBEGIN  ||
-           ttype==ANTLRLexerTokenTypeID      ||
-           ttype==ANTLRLexerTokenTypePERCENT ||
-           ttype==ANTLRLexerTokenTypeDOT )
-    {
-        if ( ttype==ANTLRLexerTokenTypeBEGIN ) {
-            id<ANTLRBaseTree> subtree = [self parseTree];
-            [adaptor addChild:subtree toTree:root];
-        }
-        else {
-            id<ANTLRBaseTree> child = [self parseNode];
-            if ( child == nil ) {
-                return nil;
-            }
-            [adaptor addChild:child toTree:root];
-        }
-    }
-    if ( ttype != ANTLRLexerTokenTypeEND ) {
-        @throw [ANTLRRuntimeException newException:@"no END"];
-    }
-    ttype = [tokenizer nextToken];
-    return root;
-}
-
-- (id<ANTLRBaseTree>) parseNode
-{
-    // "%label:" prefix
-    NSString *label = nil;
-    ANTLRTreePattern *node;
-    if ( ttype == ANTLRLexerTokenTypePERCENT ) {
-        ttype = [tokenizer nextToken];
-        if ( ttype != ANTLRLexerTokenTypeID ) {
-            return nil;
-        }
-        label = [tokenizer toString];
-        ttype = [tokenizer nextToken];
-        if ( ttype != ANTLRLexerTokenTypeCOLON ) {
-            return nil;
-        }
-        ttype = [tokenizer nextToken]; // move to ID following colon
-    }
-    
-    // Wildcard?
-    if ( ttype == ANTLRLexerTokenTypeDOT ) {
-        ttype = [tokenizer nextToken];
-        id<ANTLRToken> wildcardPayload = [ANTLRCommonToken newToken:0 Text:@"."];
-        node = [ANTLRWildcardTreePattern newANTLRWildcardTreePattern:wildcardPayload];
-        if ( label != nil ) {
-            node.label = label;
-        }
-        return node;
-    }
-    
-    // "ID" or "ID[arg]"
-    if ( ttype != ANTLRLexerTokenTypeID ) {
-        return nil;
-    }
-    NSString *tokenName = [tokenizer toString];
-    ttype = [tokenizer nextToken];
-    if ( [tokenName isEqualToString:@"nil"] ) {
-        return [adaptor emptyNode];
-    }
-    NSString *text = tokenName;
-    // check for arg
-    NSString *arg = nil;
-    if ( ttype == ANTLRLexerTokenTypeARG ) {
-        arg = [tokenizer toString];
-        text = arg;
-        ttype = [tokenizer nextToken];
-    }
-    
-    // create node
-    int treeNodeType = [wizard getTokenType:tokenName];
-    if ( treeNodeType==ANTLRTokenTypeInvalid ) {
-        return nil;
-    }
-    node = [adaptor createTree:treeNodeType Text:text];
-    if ( label!=nil && [node class] == [ANTLRTreePattern class] ) {
-        ((ANTLRTreePattern *)node).label = label;
-    }
-    if ( arg!=nil && [node class] == [ANTLRTreePattern class] ) {
-        ((ANTLRTreePattern *)node).hasTextArg = YES;
-    }
-    return node;
-}
-
-@synthesize tokenizer;
-@synthesize ttype;
-@synthesize wizard;
-@synthesize adaptor;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRewriter.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRewriter.h
deleted file mode 100644
index 5ab5700..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRewriter.h
+++ /dev/null
@@ -1,78 +0,0 @@
-//
-//  ANTLRTreeRewriter.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeParser.h"
-
-@interface ANTLRfptr : NSObject {
-    id  actor;
-    SEL ruleSEL;
-}
-
-+ (ANTLRfptr *)newANTLRfptrWithRule:(SEL)aRuleAction withObject:(id)anObject;
--initWithRule:(SEL)ruleAction withObject:(id)anObject;
-
-- (id)rule;
-
-@property (retain) id  actor;
-@property SEL ruleSEL;
-@end
-
-@interface ANTLRTreeRewriter : ANTLRTreeParser {
-    BOOL showTransformations;
-    id<ANTLRTokenStream> originalTokenStream;
-    id<ANTLRTreeAdaptor> originalAdaptor;
-    ANTLRfptr *rule;
-    ANTLRfptr *topdown_fptr;
-    ANTLRfptr *bottomup_ftpr;
-}
-
-+ (ANTLRTreeRewriter *) newANTLRTreeRewriter:(id<ANTLRTreeNodeStream>)anInput;
-+ (ANTLRTreeRewriter *) newANTLRTreeRewriter:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-- (id)initWithStream:(id<ANTLRTreeNodeStream>)anInput;
-- (id)initWithStream:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)aState;
-- (id) applyOnce:(ANTLRCommonTree *)t Rule:(ANTLRfptr *)whichRule;
-- (id) applyRepeatedly:(ANTLRCommonTree *)t Rule:(ANTLRfptr *)whichRule;
-- (id) downup:(ANTLRCommonTree *)t;
-- (id) pre:(ANTLRCommonTree *)t;
-- (id) post:(ANTLRCommonTree *)t;
-- (id) downup:(ANTLRCommonTree *)t XForm:(BOOL)aShowTransformations;
-- (void)reportTransformation:(ANTLRCommonTree *)oldTree Tree:(ANTLRCommonTree *)newTree;
-- (id) topdown_fptr;
-- (id) bottomup_ftpr;
-- (id) topdown;
-- (id) bottomup;
-
-@property BOOL showTransformations;
-@property (retain) id<ANTLRTokenStream> originalTokenStream;
-@property (retain) id<ANTLRTreeAdaptor> originalAdaptor;
-@property (retain) ANTLRfptr *rule;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRewriter.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRewriter.m
deleted file mode 100644
index 8495436..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRewriter.m
+++ /dev/null
@@ -1,250 +0,0 @@
-//
-//  ANTLRTreeRewriter.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTreeRewriter.h"
-#import "ANTLRCommonTreeNodeStream.h"
-#import "ANTLRTreeRuleReturnScope.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRTreeVisitor.h"
-
-@implementation ANTLRfptr
-
-+ (ANTLRfptr *)newANTLRfptrWithRule:(SEL)aRuleAction withObject:(id)anObject
-{
-    return [[ANTLRfptr alloc] initWithRule:aRuleAction withObject:(id)anObject];
-}
-
--initWithRule:(SEL)aRuleAction withObject:(id)anObject
-{
-    if ((self = [super init]) != nil) {
-        actor = anObject;
-        ruleSEL = aRuleAction;
-    }
-    return self;
-}
-
-- (id)rule
-{
-	if ( [actor respondsToSelector:ruleSEL] )
-		return [actor performSelector:ruleSEL];
-    else
-        @throw [ANTLRRuntimeException newException:@"Unknown Rewrite exception"];
-    return nil;
-}
-
-@synthesize actor;
-@synthesize ruleSEL;
-@end
-
-@implementation ANTLRTreeRewriter
-
-+ (ANTLRTreeRewriter *) newANTLRTreeRewriter:(id<ANTLRTreeNodeStream>)anInput
-{
-    return [[ANTLRTreeRewriter alloc] initWithStream:anInput State:[ANTLRRecognizerSharedState newANTLRRecognizerSharedState]];
-}
-
-+ (ANTLRTreeRewriter *) newANTLRTreeRewriter:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)aState
-{
-    return [[ANTLRTreeRewriter alloc] initWithStream:anInput State:aState];
-}
-
-- (id)initWithStream:(id<ANTLRTreeNodeStream>)anInput
-{
-    SEL aRuleSel;
-
-    if ((self = [super initWithStream:anInput]) != nil) {
-        showTransformations = NO;
-        state = [[ANTLRRecognizerSharedState newANTLRRecognizerSharedState] retain];
-        originalAdaptor = [input getTreeAdaptor];
-        if ( originalAdaptor ) [originalAdaptor retain];
-        originalTokenStream = [input getTokenStream];        
-        if ( originalTokenStream ) [originalTokenStream retain];
-        aRuleSel = @selector(topdown);
-        topdown_fptr = [ANTLRfptr newANTLRfptrWithRule:(SEL)aRuleSel withObject:self];
-        aRuleSel = @selector(bottomup);
-        bottomup_ftpr = [ANTLRfptr newANTLRfptrWithRule:(SEL)aRuleSel withObject:self];        
-    }
-    return self;
-}
-
-- (id)initWithStream:(id<ANTLRTreeNodeStream>)anInput State:(ANTLRRecognizerSharedState *)aState
-{
-    SEL aRuleSel;
-    
-    if ((self = [super initWithStream:anInput]) != nil) {
-        showTransformations = NO;
-        state = aState;
-        if ( state ) [state retain];
-        originalAdaptor = [input getTreeAdaptor];
-        if ( originalAdaptor ) [originalAdaptor retain];
-        originalTokenStream = [input getTokenStream];        
-        if ( originalTokenStream ) [originalTokenStream retain];
-        aRuleSel = @selector(topdown);
-        topdown_fptr = [ANTLRfptr newANTLRfptrWithRule:(SEL)aRuleSel withObject:self];
-        aRuleSel = @selector(bottomup);
-        bottomup_ftpr = [ANTLRfptr newANTLRfptrWithRule:(SEL)aRuleSel withObject:self];        
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRTreeRewriter" );
-#endif
-	if ( state ) [state release];
-	if ( originalAdaptor ) [originalAdaptor release];
-	if ( originalTokenStream ) [originalTokenStream release];
-	[super dealloc];
-}
-
-- (id) applyOnce:(ANTLRCommonTree *)t Rule:(ANTLRfptr *)whichRule
-{
-    if ( t == nil ) return nil;
-    @try {
-        // share TreeParser object but not parsing-related state
-        state = [ANTLRRecognizerSharedState newANTLRRecognizerSharedState];
-        input = [ANTLRCommonTreeNodeStream newANTLRCommonTreeNodeStream:(ANTLRCommonTreeAdaptor *)originalAdaptor Tree:t];
-        [(ANTLRCommonTreeNodeStream *)input setTokenStream:originalTokenStream];
-        [self setBacktrackingLevel:1];
-        ANTLRTreeRuleReturnScope *r = [(ANTLRfptr *)whichRule rule];
-        [self setBacktrackingLevel:0];
-        if ( [self getFailed] )
-            return t;
-        if ( showTransformations &&
-            r != nil && !(t == r.start) && r.start != nil ) {
-            [self reportTransformation:t Tree:r.start];
-        }
-        if ( r != nil && r.start != nil )
-            return r.start;
-        else
-            return t;
-    }
-    @catch (ANTLRRecognitionException *e) {
-        return t;
-    }
-    return t;
-}
-
-- (id) applyRepeatedly:(ANTLRCommonTree *)t Rule:(ANTLRfptr *)whichRule
-{
-    BOOL treeChanged = true;
-    while ( treeChanged ) {
-        ANTLRTreeRewriter *u = [self applyOnce:t Rule:whichRule];
-        treeChanged = !(t == u);
-        t = u;
-    }
-    return t;
-}
-
-- (id) downup:(ANTLRCommonTree *)t
-{
-    return [self downup:t XForm:NO];
-}
-
-- (id) pre:(ANTLRCommonTree *)t
-{
-    return [self applyOnce:t Rule:topdown_fptr];
-}
-
-- (id)post:(ANTLRCommonTree *)t
-{
-    return [self applyRepeatedly:t Rule:bottomup_ftpr];
-}
-
-#ifdef DONTUSENOMO
-public Object downup(Object t, boolean showTransformations) {
-    this.showTransformations = showTransformations;
-    TreeVisitor v = new TreeVisitor(new CommonTreeAdaptor());
-    TreeVisitorAction actions = new TreeVisitorAction() {
-        public Object pre(Object t)  { return applyOnce(t, topdown_fptr); }
-        public Object post(Object t) { return applyRepeatedly(t, bottomup_ftpr); }
-    };
-    t = v.visit(t, actions);
-    return t;
-}
-#endif
-
-- (id) downup:(ANTLRCommonTree *)t XForm:(BOOL)aShowTransformations
-{
-    showTransformations = aShowTransformations;
-    ANTLRTreeVisitor *v = [ANTLRTreeVisitor newANTLRTreeVisitor:[[originalAdaptor class] newTreeAdaptor]];
-    ANTLRTreeVisitorAction *actions = [ANTLRTreeVisitorAction newANTLRTreeVisitorAction];
-    {
-        //public Object pre(Object t)  { return applyOnce(t, topdown_fptr); }
-        [self pre:t];
-        //public Object post(Object t) { return applyRepeatedly(t, bottomup_ftpr); }
-        [self post:t];
-    };
-    t = [v visit:t Action:actions];
-    return t;
-}
-
-/** Override this if you need transformation tracing to go somewhere
- *  other than stdout or if you're not using Tree-derived trees.
- */
-- (void)reportTransformation:(ANTLRCommonTree *)oldTree Tree:(ANTLRCommonTree *)newTree
-{
-    //System.out.println(((Tree)oldTree).toStringTree()+" -> "+ ((Tree)newTree).toStringTree());
-}
-
-- (id)topdown_fptr
-{
-    return [self topdown];
-}
-
-- (id)bottomup_ftpr
-{
-    return [self bottomup];
-}
-
-// methods the downup strategy uses to do the up and down rules.
-// to override, just define tree grammar rule topdown and turn on
-// filter=true.
-- (id) topdown
-// @throws RecognitionException
-{
-    @throw [ANTLRRecognitionException newException:@"TopDown exception"];
-    return nil;
-}
-
-- (id) bottomup
-//@throws RecognitionException
-{
-    @throw [ANTLRRecognitionException newException:@"BottomUp exception"];
-    return nil;
-}
-
-@synthesize showTransformations;
-@synthesize originalTokenStream;
-@synthesize originalAdaptor;
-@synthesize rule;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.h
deleted file mode 100644
index 9937052..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//
-//  ANTLRTreeRuleReturnScope.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuleReturnScope.h"
-#import "ANTLRCommonTree.h"
-
-@interface ANTLRTreeRuleReturnScope : ANTLRRuleReturnScope {
-    ANTLRCommonTree *start;
-}
-
-@property (retain, getter=getStart, setter=setStart:) ANTLRCommonTree *start;
-
-/** First node or root node of tree matched for this rule. */
-
-+ (id) newReturnScope;
-- (id) init;
-- (void) dealloc;
-- (ANTLRCommonTree *)getStart;
-- (void)setStart:(ANTLRCommonTree *)aStart;
-
-- (id) copyWithZone:(NSZone *)theZone;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.m
deleted file mode 100644
index 0043314..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeRuleReturnScope.m
+++ /dev/null
@@ -1,81 +0,0 @@
-//
-//  ANTLRTreeRuleReturnScope.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/17/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTreeRuleReturnScope.h"
-
-
-@implementation ANTLRTreeRuleReturnScope
-@synthesize start;
-
-+ (id) newReturnScope
-{
-    return [[ANTLRTreeRuleReturnScope alloc] init];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRTreeRuleReturnScope" );
-#endif
-	if ( start ) [start release];
-	[super dealloc];
-}
-
-- (ANTLRCommonTree *)getStart
-{
-    return start;
-}	
-
-- (void)setStart:(ANTLRCommonTree *)aStart
-{
-    if ( start != aStart ) {
-        if ( start ) [start release];
-        [aStart retain];
-    }
-    start = aStart;
-}	
-
-// create a copy, including the text if available
-// the input stream is *not* copied!
-- (id) copyWithZone:(NSZone *)theZone
-{
-    ANTLRTreeRuleReturnScope *copy = [super copyWithZone:theZone];
-    copy.start = start;
-    return copy;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitor.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitor.h
deleted file mode 100644
index e8af0d0..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitor.h
+++ /dev/null
@@ -1,47 +0,0 @@
-//
-//  ANTLRTreeVisitor.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeAdaptor.h"
-#import "ANTLRTreeVisitorAction.h"
-
-@interface ANTLRTreeVisitor : NSObject {
-   id<ANTLRTreeAdaptor> adaptor;
-}
-+ (ANTLRTreeVisitor *)newANTLRTreeVisitor:(id<ANTLRTreeAdaptor>) anAdaptor;
-+ (ANTLRTreeVisitor *)newANTLRTreeVisitor;
-- (id)init;
-- (id)initWithAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (void) dealloc;
-- (ANTLRTreeVisitor *)visit:(id<ANTLRBaseTree>)t Action:(ANTLRTreeVisitorAction *)action;
-
-@property (retain) id<ANTLRTreeAdaptor> adaptor;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitor.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitor.m
deleted file mode 100644
index f500d6d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitor.m
+++ /dev/null
@@ -1,103 +0,0 @@
-//
-//  ANTLRTreeVisitor.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTreeVisitor.h"
-#import "ANTLRCommonTreeAdaptor.h"
-
-@implementation ANTLRTreeVisitor
-
-+ (ANTLRTreeVisitor *)newANTLRTreeVisitor:(id<ANTLRTreeAdaptor>)anAdaptor
-{
-    return [[ANTLRTreeVisitor alloc] initWithAdaptor:anAdaptor];
-}
-
-+ (ANTLRTreeVisitor *)newANTLRTreeVisitor
-{
-    return [[ANTLRTreeVisitor alloc] init];
-}
-
-
-- (id)init
-{
-    if ((self = [super init]) != nil) {
-        adaptor = [[ANTLRCommonTreeAdaptor newTreeAdaptor] retain];
-    }
-    return self;
-}
-
-- (id)initWithAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-{
-    if ((self = [super init]) != nil) {
-        adaptor = [anAdaptor retain];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRTreeVisitor" );
-#endif
-    if ( adaptor ) [adaptor release];
-    [super dealloc];
-}
-
-/** Visit every node in tree t and trigger an action for each node
- *  before/after having visited all of its children.
- *  Execute both actions even if t has no children.
- *  If a child visit yields a new child, it can update its
- *  parent's child list or just return the new child.  The
- *  child update code works even if the child visit alters its parent
- *  and returns the new tree.
- *
- *  Return result of applying post action to this node.
- */
-- (ANTLRTreeVisitor *)visit:(ANTLRCommonTree *)t Action:(ANTLRTreeVisitorAction *)action
-{
-    // System.out.println("visit "+((Tree)t).toStringTree());
-    BOOL isNil = [adaptor isNil:t];
-    if ( action != nil && !isNil ) {
-        t = [action pre:(ANTLRTreeVisitorAction *)t]; // if rewritten, walk children of new t
-    }
-    for (int i=0; i < [adaptor getChildCount:t]; i++) {
-        ANTLRCommonTree *child = [adaptor getChild:t At:i];
-        ANTLRCommonTree *visitResult = [self visit:child Action:action];
-        ANTLRCommonTree *childAfterVisit = [adaptor getChild:t At:i];
-        if ( visitResult !=  childAfterVisit ) { // result & child differ?
-            [adaptor setChild:t At:i Child:visitResult];
-        }
-    }
-    if ( action != nil && !isNil ) t = [action post:(ANTLRTreeVisitorAction *)t];
-    return t;
-}
-
-@synthesize adaptor;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitorAction.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitorAction.h
deleted file mode 100644
index c9c0856..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitorAction.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//
-//  ANTLRTreeVisitorAction.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-
-
-@interface ANTLRTreeVisitorAction : NSObject
-{
-
-}
-
-+ (ANTLRTreeVisitorAction *)newANTLRTreeVisitorAction;
-- (id) init;
-
-/** Execute an action before visiting children of t.  Return t or
- *  a rewritten t.  It is up to the visitor to decide what to do
- *  with the return value.  Children of returned value will be
- *  visited if using TreeVisitor.visit().
- */
-- (ANTLRTreeVisitorAction *)pre:(ANTLRTreeVisitorAction *) t;
-
-/** Execute an action after visiting children of t.  Return t or
- *  a rewritten t.  It is up to the visitor to decide what to do
- *  with the return value.
- */
-- (ANTLRTreeVisitorAction *)post:(ANTLRTreeVisitorAction *) t;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitorAction.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitorAction.m
deleted file mode 100644
index 09a5920..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeVisitorAction.m
+++ /dev/null
@@ -1,69 +0,0 @@
-//
-//  ANTLRTreeVisitorAction.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTreeVisitorAction.h"
-
-
-@implementation ANTLRTreeVisitorAction
-
-+ (ANTLRTreeVisitorAction *)newANTLRTreeVisitorAction
-{
-    return [[ANTLRTreeVisitorAction alloc] init];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil ) {
-    }
-    return self;
-}
-
-/** Execute an action before visiting children of t.  Return t or
- *  a rewritten t.  It is up to the visitor to decide what to do
- *  with the return value.  Children of returned value will be
- *  visited if using TreeVisitor.visit().
- */
-- (ANTLRTreeVisitorAction *)pre:(ANTLRTreeVisitorAction *) t
-{
-    return nil;
-}
-
-/** Execute an action after visiting children of t.  Return t or
- *  a rewritten t.  It is up to the visitor to decide what to do
- *  with the return value.
- */
-- (ANTLRTreeVisitorAction *)post:(ANTLRTreeVisitorAction *) t
-{
-    return nil;
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeWizard.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeWizard.h
deleted file mode 100644
index 7a57c1e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeWizard.h
+++ /dev/null
@@ -1,136 +0,0 @@
-//
-//  ANTLRTreeWizard.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRMapElement.h"
-#import "ANTLRMap.h"
-#import "AMutableArray.h"
-
-@class ANTLRVisitor;
-
-@protocol ANTLRContextVisitor <NSObject>
-// TODO: should this be called visit or something else?
-- (void) visit:(ANTLRCommonTree *)t Parent:(ANTLRCommonTree *)parent ChildIndex:(NSInteger)childIndex Map:(ANTLRMap *)labels;
-
-@end
-
-@interface ANTLRVisitor : NSObject <ANTLRContextVisitor> {
-    NSInteger action;
-    id actor;
-    id object1;
-    id object2;
-}
-+ (ANTLRVisitor *)newANTLRVisitor:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2;
-- (id) initWithAction:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2;
-
-- (void) visit:(ANTLRCommonTree *)t;
-- (void) visit:(ANTLRCommonTree *)t Parent:(ANTLRCommonTree *)parent ChildIndex:(NSInteger)childIndex Map:(ANTLRMap *)labels;
-
-@property NSInteger action;
-@property (retain) id actor;
-@property (retain) id object1;
-@property (retain) id object2;
-@end
-
-/** When using %label:TOKENNAME in a tree for parse(), we must
- *  track the label.
- */
-@interface ANTLRTreePattern : ANTLRCommonTree {
-    NSString *label;
-    BOOL      hasTextArg;
-}
-@property (retain, getter=getLabel, setter=setLabel:) NSString *label;
-@property (assign, getter=getHasTextArg, setter=setHasTextArg:) BOOL hasTextArg;
-
-+ (ANTLRCommonTree *)newANTLRTreePattern:(id<ANTLRToken>)payload;
-
-- (id) initWithToken:(id<ANTLRToken>)payload;
-- (NSString *)toString;
-@end
-
-@interface ANTLRWildcardTreePattern : ANTLRTreePattern {
-}
-
-+ (ANTLRWildcardTreePattern *)newANTLRWildcardTreePattern:(id<ANTLRToken>)payload;
-- (id) initWithToken:(id<ANTLRToken>)payload;
-@end
-
-/** This adaptor creates TreePattern objects for use during scan() */
-@interface ANTLRTreePatternTreeAdaptor : ANTLRCommonTreeAdaptor {
-}
-+ (ANTLRTreePatternTreeAdaptor *)newTreeAdaptor;
-- (id) init;
-- (ANTLRCommonTree *)createTreePattern:(id<ANTLRToken>)payload;
-
-@end
-
-@interface ANTLRTreeWizard : NSObject {
-	id<ANTLRTreeAdaptor> adaptor;
-	ANTLRMap *tokenNameToTypeMap;
-}
-+ (ANTLRTreeWizard *) newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)anAdaptor;
-+ (ANTLRTreeWizard *)newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)adaptor Map:(ANTLRMap *)aTokenNameToTypeMap;
-+ (ANTLRTreeWizard *)newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)adaptor TokenNames:(NSArray *)theTokNams;
-+ (ANTLRTreeWizard *)newANTLRTreeWizardWithTokenNames:(NSArray *)theTokNams;
-- (id) init;
-- (id) initWithAdaptor:(id<ANTLRTreeAdaptor>)adaptor;
-- (id) initWithAdaptor:(id<ANTLRTreeAdaptor>)adaptor Map:(ANTLRMap *)tokenNameToTypeMap;
-- (id) initWithTokenNames:(NSArray *)theTokNams;
-- (id) initWithTokenNames:(id<ANTLRTreeAdaptor>)anAdaptor TokenNames:(NSArray *)theTokNams;
-- (void) dealloc;
-- (ANTLRMap *)computeTokenTypes:(NSArray *)theTokNams;
-- (NSInteger)getTokenType:(NSString *)tokenName;
-- (ANTLRMap *)index:(ANTLRCommonTree *)t;
-- (void) _index:(ANTLRCommonTree *)t Map:(ANTLRMap *)m;
-- (AMutableArray *)find:(ANTLRCommonTree *) t Pattern:(NSString *)pattern;
-- (ANTLRTreeWizard *)findFirst:(ANTLRCommonTree *) t Type:(NSInteger)ttype;
-- (ANTLRTreeWizard *)findFirst:(ANTLRCommonTree *) t Pattern:(NSString *)pattern;
-- (void) visit:(ANTLRCommonTree *)t Type:(NSInteger)ttype Visitor:(ANTLRVisitor *)visitor;
-- (void) _visit:(ANTLRCommonTree *)t
-         Parent:(ANTLRCommonTree *)parent
-     ChildIndex:(NSInteger)childIndex
-           Type:(NSInteger)ttype
-        Visitor:(ANTLRVisitor *)visitor;
-- (void)visit:(ANTLRCommonTree *)t Pattern:(NSString *)pattern Visitor:(ANTLRVisitor *)visitor;
-- (BOOL)parse:(ANTLRCommonTree *)t Pattern:(NSString *)pattern Map:(ANTLRMap *)labels;
-- (BOOL) parse:(ANTLRCommonTree *) t Pattern:(NSString *)pattern;
-- (BOOL) _parse:(ANTLRCommonTree *)t1 Pattern:(ANTLRCommonTree *)tpattern Map:(ANTLRMap *)labels;
-- (ANTLRCommonTree *) createTree:(NSString *)pattern;
-- (BOOL)equals:(id)t1 O2:(id)t2 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-- (BOOL)equals:(id)t1 O2:(id)t2;
-- (BOOL) _equals:(id)t1 O2:(id)t2 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor;
-
-@property (retain) id<ANTLRTreeAdaptor> adaptor;
-@property (retain) ANTLRMap *tokenNameToTypeMap;
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeWizard.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeWizard.m
deleted file mode 100644
index 78131a8..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRTreeWizard.m
+++ /dev/null
@@ -1,735 +0,0 @@
-//
-//  ANTLRTreeWizard.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/18/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRTreeWizard.h"
-#import "ANTLRTreePatternLexer.h"
-#import "ANTLRTreePatternParser.h"
-#import "ANTLRIntArray.h"
-
-@implementation ANTLRVisitor
-
-+ (ANTLRVisitor *)newANTLRVisitor:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2
-{
-    return [[ANTLRVisitor alloc] initWithAction:anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2];
-}
-
-- (id) initWithAction:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2
-{
-    if ((self = [super init]) != nil) {
-        action = anAction;
-        actor = anActor;
-        if ( actor ) [actor retain];
-        object1 = anObject1;
-        if ( object1 ) [object1 retain];
-        object2 = anObject2;
-        if ( object2 ) [object2 retain];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRVisitor" );
-#endif
-    if ( actor ) [actor release];
-    if ( object1 ) [object1 release];
-    if ( object2 ) [object2 release];
-    [super dealloc];
-}
-
-- (void) visit:(ANTLRCommonTree *)t Parent:(ANTLRCommonTree *)parent ChildIndex:(NSInteger)childIndex Map:(ANTLRMap *)labels
-{
-    switch (action) {
-        case 0:
-            [(ANTLRMap *)object2 /* labels */ clear];
-            if ( [(ANTLRTreeWizard *)actor _parse:t Pattern:object1/* tpattern */ Map:object2 /* labels */] ) {
-                [self visit:t Parent:parent ChildIndex:childIndex Map:object2 /* labels */];
-            }
-            break;
-        case 1:
-            if ( [(ANTLRTreeWizard *)actor _parse:t Pattern:object1/* tpattern */ Map:nil] ) {
-                [(AMutableArray *)object2/* subtrees */ addObject:t];
-            }
-            break;
-    }
-    // [self visit:t];
-    return;
-}
-
-- (void) visit:(ANTLRCommonTree *)t
-{
-    [object1 addObject:t];
-    return;
-}
-
-@synthesize action;
-@synthesize actor;
-@synthesize object1;
-@synthesize object2;
-@end
-
-/** When using %label:TOKENNAME in a tree for parse(), we must
- *  track the label.
- */
-@implementation ANTLRTreePattern
-
-@synthesize label;
-@synthesize hasTextArg;
-
-+ (ANTLRCommonTree *)newANTLRTreePattern:(id<ANTLRToken>)payload
-{
-    return (ANTLRCommonTree *)[[ANTLRTreePattern alloc] initWithToken:payload];
-}
-
-- (id) initWithToken:(id<ANTLRToken>)payload
-{
-    self = [super initWithToken:payload];
-    if ( self != nil ) {
-    }
-    return (ANTLRCommonTree *)self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRTreePattern" );
-#endif
-    if ( label ) [label release];
-    [super dealloc];
-}
-
-- (NSString *)toString
-{
-    if ( label != nil ) {
-        return [NSString stringWithFormat:@"\% %@ : %@", label, [super toString]];
-    }
-    else {
-        return [super toString];				
-    }
-}
-
-@end
-
-@implementation ANTLRWildcardTreePattern
-
-+ (ANTLRWildcardTreePattern *)newANTLRWildcardTreePattern:(id<ANTLRToken>)payload
-{
-    return(ANTLRWildcardTreePattern *)[[ANTLRWildcardTreePattern alloc] initWithToken:(id<ANTLRToken>)payload];
-}
-
-- (id) initWithToken:(id<ANTLRToken>)payload
-{
-    self = [super initWithToken:payload];
-    if ( self != nil ) {
-    }
-    return self;
-}
-
-@end
-
-/** This adaptor creates TreePattern objects for use during scan() */
-@implementation ANTLRTreePatternTreeAdaptor
-
-+ (ANTLRTreePatternTreeAdaptor *)newTreeAdaptor
-{
-    return [[ANTLRTreePatternTreeAdaptor alloc] init];
-}
-
-- (id) init
-{
-    self = [super init];
-    if ( self != nil ) {
-    }
-    return self;
-}
-
-- (ANTLRCommonTree *)createTreePattern:(id<ANTLRToken>)payload
-{
-    return (ANTLRCommonTree *)[super create:payload];
-}
-          
-@end
-
-@implementation ANTLRTreeWizard
-
-// TODO: build indexes for the wizard
-
-/** During fillBuffer(), we can make a reverse index from a set
- *  of token types of interest to the list of indexes into the
- *  node stream.  This lets us convert a node pointer to a
- *  stream index semi-efficiently for a list of interesting
- *  nodes such as function definition nodes (you'll want to seek
- *  to their bodies for an interpreter).  Also useful for doing
- *  dynamic searches; i.e., go find me all PLUS nodes.
- protected Map tokenTypeToStreamIndexesMap;
- 
- ** If tokenTypesToReverseIndex set to INDEX_ALL then indexing
- *  occurs for all token types.
- public static final Set INDEX_ALL = new HashSet();
- 
- ** A set of token types user would like to index for faster lookup.
- *  If this is INDEX_ALL, then all token types are tracked.  If nil,
- *  then none are indexed.
- protected Set tokenTypesToReverseIndex = nil;
- */
-
-+ (ANTLRTreeWizard *) newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)anAdaptor
-{
-    return [[ANTLRTreeWizard alloc] initWithAdaptor:anAdaptor];
-}
-
-+ (ANTLRTreeWizard *)newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)anAdaptor Map:(ANTLRMap *)aTokenNameToTypeMap
-{
-    return [[ANTLRTreeWizard alloc] initWithAdaptor:anAdaptor Map:aTokenNameToTypeMap];
-}
-
-+ (ANTLRTreeWizard *)newANTLRTreeWizard:(id<ANTLRTreeAdaptor>)anAdaptor TokenNames:(NSArray *)theTokNams
-{
-    return [[ANTLRTreeWizard alloc] initWithTokenNames:anAdaptor TokenNames:theTokNams];
-}
-
-+ (ANTLRTreeWizard *)newANTLRTreeWizardWithTokenNames:(NSArray *)theTokNams
-{
-    return [[ANTLRTreeWizard alloc] initWithTokenNames:theTokNams];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil) {
-    }
-    return self;
-}
-
-- (id) initWithAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-{
-    if ((self = [super init]) != nil) {
-        adaptor = anAdaptor;
-        if ( adaptor ) [adaptor retain];
-    }
-    return self;
-}
-            
-- (id) initWithAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor Map:(ANTLRMap *)aTokenNameToTypeMap
-{
-    if ((self = [super init]) != nil) {
-        adaptor = anAdaptor;
-        if ( adaptor ) [adaptor retain];
-        tokenNameToTypeMap = aTokenNameToTypeMap;
-   }
-    return self;
-}
-
-- (id) initWithTokenNames:(NSArray *)theTokNams
-{
-    if ((self = [super init]) != nil) {
-#pragma warning Fix initWithTokenNames.
-        // adaptor = anAdaptor;
-        //tokenNameToTypeMap = aTokenNameToTypeMap;
-        tokenNameToTypeMap = [[self computeTokenTypes:theTokNams] retain];
-    }
-    return self;
-}
-             
-- (id) initWithTokenNames:(id<ANTLRTreeAdaptor>)anAdaptor TokenNames:(NSArray *)theTokNams
-{
-    if ((self = [super init]) != nil) {
-        adaptor = anAdaptor;
-        if ( adaptor ) [adaptor retain];
-        // tokenNameToTypeMap = aTokenNameToTypeMap;
-        tokenNameToTypeMap = [[self computeTokenTypes:theTokNams] retain];
-    }
-    return self;
-}
-            
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRTreePatternTreeAdaptor" );
-#endif
-    if ( adaptor ) [adaptor release];
-    if ( tokenNameToTypeMap ) [tokenNameToTypeMap release];
-    [super dealloc];
-}
-
-/** Compute a Map<String, Integer> that is an inverted index of
- *  tokenNames (which maps int token types to names).
- */
-- (ANTLRMap *)computeTokenTypes:(NSArray *)theTokNams
-{
-    ANTLRMap *m = [ANTLRMap newANTLRMap];
-    if ( theTokNams == nil ) {
-        return m;
-    }
-    for (int ttype = ANTLRTokenTypeMIN; ttype < [theTokNams count]; ttype++) {
-        NSString *name = (NSString *) [theTokNams objectAtIndex:ttype];
-        [m putName:name TType:ttype];
-    }
-    return m;
-}
-
-/** Using the map of token names to token types, return the type. */
-- (NSInteger)getTokenType:(NSString *)tokenName
-{
-    if ( tokenNameToTypeMap == nil ) {
-        return ANTLRTokenTypeInvalid;
-    }
-    NSInteger aTType = (NSInteger)[tokenNameToTypeMap getTType:tokenName];
-    if ( aTType != -1 ) {
-        return aTType;
-    }
-    return ANTLRTokenTypeInvalid;
-}
-
-/** Walk the entire tree and make a node name to nodes mapping.
- *  For now, use recursion but later nonrecursive version may be
- *  more efficient.  Returns Map<Integer, List> where the List is
- *  of your AST node type.  The Integer is the token type of the node.
- *
- *  TODO: save this index so that find and visit are faster
- */
-- (ANTLRMap *)index:(ANTLRCommonTree *)t
-{
-    ANTLRMap *m = [ANTLRMap newANTLRMap];
-    [self _index:t Map:m];
-    return m;
-}
-
-/** Do the work for index */
-- (void) _index:(ANTLRCommonTree *)t Map:(ANTLRMap *)m
-{
-    if ( t==nil ) {
-        return;
-    }
-#pragma warning Fix _index use of ANTLRMap.
-    NSInteger ttype = [adaptor getType:t];
-    ANTLRMap *elements = (ANTLRMap *)[m getName:ttype];
-    if ( elements == nil ) {
-        elements = [ANTLRMap newANTLRMapWithLen:100];
-        [m putNode:ttype Node:elements];
-    }
-    [elements addObject:t];
-    int n = [adaptor getChildCount:t];
-    for (int i=0; i<n; i++) {
-        ANTLRCommonTree * child = [adaptor getChild:t At:i];
-        [self _index:child Map:m];
-    }
-}
-
-/** Return a List of tree nodes with token type ttype */
-- (AMutableArray *)find:(ANTLRCommonTree *)t Type:(NSInteger)ttype
-{
-#ifdef DONTUSENOMO
-    final List nodes = new ArrayList();
-    visit(t, ttype, new TreeWizard.Visitor() {
-        public void visit(Object t) {
-            [nodes addObject t];
-        }
-    } );
-#endif
-    AMutableArray *nodes = [AMutableArray arrayWithCapacity:100];
-    ANTLRVisitor *contextVisitor = [ANTLRVisitor newANTLRVisitor:3 Actor:self Object:(id)nodes Object:nil];
-    [self visit:t Type:ttype Visitor:contextVisitor];
-    return nodes;
-}
-
-/** Return a List of subtrees matching pattern. */
-- (AMutableArray *)find:(ANTLRCommonTree *)t Pattern:(NSString *)pattern
-{
-    AMutableArray *subtrees = [AMutableArray arrayWithCapacity:100];
-    // Create a TreePattern from the pattern
-    ANTLRTreePatternLexer *tokenizer = [ANTLRTreePatternLexer newANTLRTreePatternLexer:pattern];
-    ANTLRTreePatternParser *parser = [ANTLRTreePatternParser newANTLRTreePatternParser:tokenizer
-                                                                                     Wizard:self
-                                                                                    Adaptor:[ANTLRTreePatternTreeAdaptor newTreeAdaptor]];
-    ANTLRCommonTree *tpattern = (ANTLRCommonTree *)[parser pattern];
-    // don't allow invalid patterns
-    if ( tpattern == nil ||
-        [tpattern isNil] ||
-        [tpattern class] == [ANTLRWildcardTreePattern class] )
-    {
-        return nil;
-    }
-    int rootTokenType = [tpattern type];
-#ifdef DONTUSENOMO
-    visit(t, rootTokenType, new TreeWizard.ContextVisitor() {
-        public void visit(Object t, Object parent, int childIndex, Map labels) {
-            if ( _parse(t, tpattern, null) ) {
-                subtrees.add(t);
-            }
-        }
-    } );
-#endif
-    ANTLRVisitor *contextVisitor = [ANTLRVisitor newANTLRVisitor:1 Actor:self Object:tpattern Object:subtrees];
-    [self visit:t Type:rootTokenType Visitor:contextVisitor];
-    return subtrees;
-}
-
-- (ANTLRTreeWizard *)findFirst:(ANTLRCommonTree *) t Type:(NSInteger)ttype
-{
-    return nil;
-}
-
-- (ANTLRTreeWizard *)findFirst:(ANTLRCommonTree *) t Pattern:(NSString *)pattern
-{
-    return nil;
-}
-
-/** Visit every ttype node in t, invoking the visitor.  This is a quicker
- *  version of the general visit(t, pattern) method.  The labels arg
- *  of the visitor action method is never set (it's nil) since using
- *  a token type rather than a pattern doesn't let us set a label.
- */
-- (void) visit:(ANTLRCommonTree *)t Type:(NSInteger)ttype Visitor:(ANTLRVisitor *)visitor
-{
-    [self _visit:t Parent:nil ChildIndex:0 Type:ttype Visitor:visitor];
-}
-
-/** Do the recursive work for visit */
-- (void) _visit:(ANTLRCommonTree *)t
-         Parent:(ANTLRCommonTree *)parent
-     ChildIndex:(NSInteger)childIndex
-           Type:(NSInteger)ttype
-        Visitor:(ANTLRVisitor *)visitor
-{
-    if ( t == nil ) {
-        return;
-    }
-    if ( [adaptor getType:t] == ttype ) {
-        [visitor visit:t Parent:parent ChildIndex:childIndex Map:nil];
-    }
-    int n = [adaptor getChildCount:t];
-    for (int i=0; i<n; i++) {
-        ANTLRCommonTree * child = [adaptor getChild:t At:i];
-        [self _visit:child Parent:t ChildIndex:i Type:ttype Visitor:visitor];
-    }
-}
-
-/** For all subtrees that match the pattern, execute the visit action.
- *  The implementation uses the root node of the pattern in combination
- *  with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
- *  Patterns with wildcard roots are also not allowed.
- */
-- (void)visit:(ANTLRCommonTree *)t Pattern:(NSString *)pattern Visitor:(ANTLRVisitor *)visitor
-{
-    // Create a TreePattern from the pattern
-    ANTLRTreePatternLexer *tokenizer = [ANTLRTreePatternLexer newANTLRTreePatternLexer:pattern];
-    ANTLRTreePatternParser *parser =
-    [ANTLRTreePatternParser newANTLRTreePatternParser:tokenizer Wizard:self Adaptor:[ANTLRTreePatternTreeAdaptor newTreeAdaptor]];
-    ANTLRCommonTree *tpattern = [parser pattern];
-    // don't allow invalid patterns
-    if ( tpattern == nil ||
-        [tpattern isNil] ||
-        [tpattern class] == [ANTLRWildcardTreePattern class] )
-    {
-        return;
-    }
-    ANTLRMapElement *labels = [ANTLRMap newANTLRMap]; // reused for each _parse
-    int rootTokenType = [tpattern type];
-#pragma warning This is another one of those screwy nested constructs that I have to figure out
-#ifdef DONTUSENOMO
-    visit(t, rootTokenType, new TreeWizard.ContextVisitor() {
-        public void visit(Object t, Object parent, int childIndex, Map unusedlabels) {
-            // the unusedlabels arg is null as visit on token type doesn't set.
-            labels.clear();
-            if ( _parse(t, tpattern, labels) ) {
-                visitor.visit(t, parent, childIndex, labels);
-            }
-        }
-    });
-#endif
-    ANTLRVisitor *contextVisitor = [ANTLRVisitor newANTLRVisitor:0 Actor:self Object:tpattern Object:labels];
-    [self visit:t Type:rootTokenType Visitor:contextVisitor];
-}
-
-/** Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
- *  on the various nodes and '.' (dot) as the node/subtree wildcard,
- *  return true if the pattern matches and fill the labels Map with
- *  the labels pointing at the appropriate nodes.  Return false if
- *  the pattern is malformed or the tree does not match.
- *
- *  If a node specifies a text arg in pattern, then that must match
- *  for that node in t.
- *
- *  TODO: what's a better way to indicate bad pattern? Exceptions are a hassle 
- */
-- (BOOL)parse:(ANTLRCommonTree *)t Pattern:(NSString *)pattern Map:(ANTLRMap *)labels
-{
-#ifdef DONTUSENOMO
-    TreePatternLexer tokenizer = new TreePatternLexer(pattern);
-    TreePatternParser parser =
-    new TreePatternParser(tokenizer, this, new TreePatternTreeAdaptor());
-    TreePattern tpattern = (TreePattern)parser.pattern();
-    /*
-     System.out.println("t="+((Tree)t).toStringTree());
-     System.out.println("scant="+tpattern.toStringTree());
-     */
-    boolean matched = _parse(t, tpattern, labels);
-    return matched;
-#endif
-    ANTLRTreePatternLexer *tokenizer = [ANTLRTreePatternLexer newANTLRTreePatternLexer:pattern];
-    ANTLRTreePatternParser *parser = [ANTLRTreePatternParser newANTLRTreePatternParser:tokenizer
-                                                                                Wizard:self
-                                                                               Adaptor:[ANTLRTreePatternTreeAdaptor newTreeAdaptor]];
-    ANTLRCommonTree *tpattern = [parser pattern];
-    /*
-     System.out.println("t="+((Tree)t).toStringTree());
-     System.out.println("scant="+tpattern.toStringTree());
-     */
-    //BOOL matched = [self _parse:t Pattern:tpattern Map:labels];
-    //return matched;
-    return [self _parse:t Pattern:tpattern Map:labels];
-}
-
-- (BOOL) parse:(ANTLRCommonTree *)t Pattern:(NSString *)pattern
-{
-    return [self parse:t Pattern:pattern Map:nil];
-}
-
-/** Do the work for parse. Check to see if the t2 pattern fits the
- *  structure and token types in t1.  Check text if the pattern has
- *  text arguments on nodes.  Fill labels map with pointers to nodes
- *  in tree matched against nodes in pattern with labels.
- */
-- (BOOL) _parse:(ANTLRCommonTree *)t1 Pattern:(ANTLRCommonTree *)aTPattern Map:(ANTLRMap *)labels
-{
-    ANTLRTreePattern *tpattern;
-    // make sure both are non-nil
-    if ( t1 == nil || aTPattern == nil ) {
-        return NO;
-    }
-    if ( [aTPattern isKindOfClass:[ANTLRWildcardTreePattern class]] ) {
-        tpattern = (ANTLRTreePattern *)aTPattern;
-    }
-    // check roots (wildcard matches anything)
-    if ( [tpattern class] != [ANTLRWildcardTreePattern class] ) {
-        if ( [adaptor getType:t1] != [tpattern type] )
-            return NO;
-        // if pattern has text, check node text
-        if ( tpattern.hasTextArg && ![[adaptor getText:t1] isEqualToString:[tpattern text]] ) {
-            return NO;
-        }
-    }
-    if ( tpattern.label != nil && labels!=nil ) {
-        // map label in pattern to node in t1
-        [labels putName:tpattern.label Node:t1];
-    }
-    // check children
-    int n1 = [adaptor getChildCount:t1];
-    int n2 = [tpattern getChildCount];
-    if ( n1 != n2 ) {
-        return NO;
-    }
-    for (int i=0; i<n1; i++) {
-        ANTLRCommonTree * child1 = [adaptor getChild:t1 At:i];
-        ANTLRCommonTree *child2 = (ANTLRCommonTree *)[tpattern getChild:i];
-        if ( ![self _parse:child1 Pattern:child2 Map:labels] ) {
-            return NO;
-        }
-    }
-    return YES;
-}
-
-/** Create a tree or node from the indicated tree pattern that closely
- *  follows ANTLR tree grammar tree element syntax:
- *
- * 		(root child1 ... child2).
- *
- *  You can also just pass in a node: ID
- * 
- *  Any node can have a text argument: ID[foo]
- *  (notice there are no quotes around foo--it's clear it's a string).
- *
- *  nil is a special name meaning "give me a nil node".  Useful for
- *  making lists: (nil A B C) is a list of A B C.
- */
-- (ANTLRCommonTree *) createTree:(NSString *)pattern
-{
-    ANTLRTreePatternLexer *tokenizer = [ANTLRTreePatternLexer newANTLRTreePatternLexer:pattern];
-    ANTLRTreePatternParser *parser = [ANTLRTreePatternParser newANTLRTreePatternParser:tokenizer Wizard:self Adaptor:adaptor];
-    ANTLRCommonTree * t = [parser pattern];
-    return t;
-}
-
-/** Compare t1 and t2; return true if token types/text, structure match exactly.
- *  The trees are examined in their entirety so that (A B) does not match
- *  (A B C) nor (A (B C)). 
- // TODO: allow them to pass in a comparator
- *  TODO: have a version that is nonstatic so it can use instance adaptor
- *
- *  I cannot rely on the tree node's equals() implementation as I make
- *  no constraints at all on the node types nor interface etc... 
- */
-- (BOOL)equals:(id)t1 O2:(id)t2 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-{
-    return [self _equals:t1 O2:t2 Adaptor:anAdaptor];
-}
-
-/** Compare type, structure, and text of two trees, assuming adaptor in
- *  this instance of a TreeWizard.
- */
-- (BOOL)equals:(id)t1 O2:(id)t2
-{
-    return [self _equals:t1 O2:t2 Adaptor:adaptor];
-}
-
-- (BOOL) _equals:(id)t1 O2:(id)t2 Adaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-{
-    // make sure both are non-nil
-    if ( t1==nil || t2==nil ) {
-        return NO;
-    }
-    // check roots
-    if ( [anAdaptor getType:t1] != [anAdaptor getType:t2] ) {
-        return NO;
-    }
-    if ( ![[anAdaptor getText:t1] isEqualTo:[anAdaptor getText:t2]] ) {
-        return NO;
-    }
-    // check children
-    NSInteger n1 = [anAdaptor getChildCount:t1];
-    NSInteger n2 = [anAdaptor getChildCount:t2];
-    if ( n1 != n2 ) {
-        return NO;
-    }
-    for (int i=0; i<n1; i++) {
-        ANTLRCommonTree * child1 = [anAdaptor getChild:t1 At:i];
-        ANTLRCommonTree * child2 = [anAdaptor getChild:t2 At:i];
-        if ( ![self _equals:child1 O2:child2 Adaptor:anAdaptor] ) {
-            return NO;
-        }
-    }
-    return YES;
-}
-
-// TODO: next stuff taken from CommonTreeNodeStream
-
-/** Given a node, add this to the reverse index tokenTypeToStreamIndexesMap.
- *  You can override this method to alter how indexing occurs.  The
- *  default is to create a
- *
- *    Map<Integer token type,ArrayList<Integer stream index>>
- *
- *  This data structure allows you to find all nodes with type INT in order.
- *
- *  If you really need to find a node of type, say, FUNC quickly then perhaps
- *
- *    Map<Integertoken type, Map<Object tree node, Integer stream index>>
- *
- *  would be better for you.  The interior maps map a tree node to
- *  the index so you don't have to search linearly for a specific node.
- *
- *  If you change this method, you will likely need to change
- *  getNodeIndex(), which extracts information.
-- (void)fillReverseIndex:(ANTLRCommonTree *)node Index:(NSInteger)streamIndex
-{
-    //System.out.println("revIndex "+node+"@"+streamIndex);
-    if ( tokenTypesToReverseIndex == nil ) {
-        return; // no indexing if this is empty (nothing of interest)
-    }
-    if ( tokenTypeToStreamIndexesMap == nil ) {
-        tokenTypeToStreamIndexesMap = [ANTLRMap newANTLRMap]; // first indexing op
-    }
-    int tokenType = [adaptor getType:node];
-    Integer tokenTypeI = new Integer(tokenType);
-    if ( !(tokenTypesToReverseIndex == INDEX_ALL ||
-            [tokenTypesToReverseIndex contains:tokenTypeI]) ) {
-        return; // tokenType not of interest
-    }
-    NSInteger streamIndexI = streamIndex;
-    AMutableArray *indexes = (AMutableArray *)[tokenTypeToStreamIndexesMap objectAtIndex:tokenTypeI];
-    if ( indexes==nil ) {
-        indexes = [AMutableArray arrayWithCapacity:100]; // no list yet for this token type
-        indexes.add(streamIndexI); // not there yet, add
-        [tokenTypeToStreamIndexesMap put:tokenTypeI Idexes:indexes];
-    }
-    else {
-        if ( ![indexes contains:streamIndexI] ) {
-            [indexes add:streamIndexI]; // not there yet, add
-        }
-    }
-}
- 
- ** Track the indicated token type in the reverse index.  Call this
- *  repeatedly for each type or use variant with Set argument to
- *  set all at once.
- * @param tokenType
-public void reverseIndex:(NSInteger)tokenType
-{
-    if ( tokenTypesToReverseIndex == nil ) {
-        tokenTypesToReverseIndex = [ANTLRMap newANTLRMap];
-    }
-    else if ( tokenTypesToReverseIndex == INDEX_ALL ) {
-        return;
-    }
-    tokenTypesToReverseIndex.add(new Integer(tokenType));
-}
- 
-** Track the indicated token types in the reverse index. Set
- *  to INDEX_ALL to track all token types.
-public void reverseIndex(Set tokenTypes) {
-    tokenTypesToReverseIndex = tokenTypes;
-}
- 
- ** Given a node pointer, return its index into the node stream.
- *  This is not its Token stream index.  If there is no reverse map
- *  from node to stream index or the map does not contain entries
- *  for node's token type, a linear search of entire stream is used.
- *
- *  Return -1 if exact node pointer not in stream.
-public int getNodeIndex(Object node) {
-    //System.out.println("get "+node);
-    if ( tokenTypeToStreamIndexesMap==nil ) {
-        return getNodeIndexLinearly(node);
-    }
-    int tokenType = adaptor.getType(node);
-    Integer tokenTypeI = new Integer(tokenType);
-    ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
-    if ( indexes==nil ) {
-        //System.out.println("found linearly; stream index = "+getNodeIndexLinearly(node));
-        return getNodeIndexLinearly(node);
-    }
-    for (int i = 0; i < indexes.size(); i++) {
-        Integer streamIndexI = (Integer)indexes.get(i);
-        Object n = get(streamIndexI.intValue());
-        if ( n==node ) {
-            //System.out.println("found in index; stream index = "+streamIndexI);
-            return streamIndexI.intValue(); // found it!
-        }
-    }
-    return -1;
-}
- 
-*/
-
-@synthesize adaptor;
-@synthesize tokenNameToTypeMap;
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.h
deleted file mode 100644
index aa1f9c6..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.h
+++ /dev/null
@@ -1,122 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRTreeNodeStream.h"
-#import "ANTLRCommonTokenStream.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRCommonTreeAdaptor.h"
-
-@interface ANTLRUnbufferedCommonTreeNodeStream : NSObject < ANTLRTreeNodeStream > {
-
-	BOOL shouldUseUniqueNavigationNodes;
-
-	ANTLRCommonTree *root;
-	ANTLRCommonTree *currentNode;
-	ANTLRCommonTree *previousNode;
-
-	id<ANTLRTreeAdaptor> treeAdaptor;
-	
-	id<ANTLRTokenStream> tokenStream;
-	
-	NSMutableArray *nodeStack;
-	NSMutableArray *indexStack;
-	ANTLRPtrBuffer *markers;
-	NSInteger lastMarker;
-	
-	NSInteger currentChildIndex;
-	NSInteger absoluteNodeIndex;
-	
-	NSMutableArray *lookahead;
-	NSUInteger head;
-	NSUInteger tail;
-}
-
-@property (retain, getter=getRoot, setter=setRoot:) ANTLRCommonTree *root;
-@property (retain, getter=getCurrentNode, setter=setCurrentNode:) ANTLRCommonTree *currentNode;
-@property (retain, getter=getPreviousNode, setter=setPreviousNode:) ANTLRCommonTree *previousNode;
-@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id<ANTLRTreeAdaptor> treeAdaptor;
-@property (retain, getter=getTokenStream, setter=setTokenStream:) id<ANTLRTokenStream> tokenStream;
-@property (retain, getter=getNodeStack, setter=setNodeStack:) NSMutableArray *nodeStack;
-@property (retain, getter=getIndexStack, setter=setIndexStackStack:) NSMutableArray *indexStack;
-@property (retain, getter=getMarkers, setter=setMarkers:) ANTLRPtrBuffer *markers;
-@property (assign, getter=getLastMarker, setter=setLastMarker:) NSInteger lastMarker;
-@property (assign, getter=getCurrentChildIndex, setter=setCurrentChildIndex:) NSInteger currentChildIndex;
-@property (assign, getter=getAbsoluteNodeIndex, setter=setAbsoluteNodeIndex:) NSInteger absoluteNodeIndex;
-@property (retain, getter=getLookahead, setter=setLookahead:) NSMutableArray *lookahead;
-@property (assign, getter=getHead, setter=setHead:) NSUInteger head;
-@property (assign, getter=getTail, setter=setTail:) NSUInteger tail;
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree;
-- (id) initWithTree:(ANTLRCommonTree *)theTree treeAdaptor:(ANTLRCommonTreeAdaptor *)theAdaptor;
-
-- (void) reset;
-
-#pragma mark ANTLRTreeNodeStream conformance
-
-- (id) LT:(NSInteger)k;
-- (id) treeSource;
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void)setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor;
-- (id<ANTLRTokenStream>) getTokenStream;
-- (void) setTokenStream:(id<ANTLRTokenStream>)aTokenStream;	///< Added by subclass, not in protocol
-- (void) setUsesUniqueNavigationNodes:(BOOL)flag;
-
-- (id) nodeAtIndex:(NSUInteger) idx;
-
-- (NSString *) toString;
-- (NSString *) toStringWithRange:(NSRange) aRange;
-- (NSString *) toStringFromNode:(id)startNode toNode:(id)stopNode;
-
-#pragma mark ANTLRIntStream conformance
-- (void) consume;
-- (NSInteger) LA:(NSUInteger) i;
-- (NSUInteger) mark;
-- (NSUInteger) getIndex;
-- (void) rewind:(NSUInteger) marker;
-- (void) rewind;
-- (void) release:(NSUInteger) marker;
-- (void) seek:(NSUInteger) index;
-- (NSUInteger) size;
-
-#pragma mark Lookahead Handling
-- (void) addLookahead:(id<ANTLRBaseTree>)aNode;
-- (NSUInteger) lookaheadSize;
-- (void) fillBufferWithLookahead:(NSInteger)k;
-- (id) nextObject;
-
-#pragma mark Node visiting
-- (ANTLRCommonTree *) handleRootNode;
-- (ANTLRCommonTree *) visitChild:(NSInteger)childNumber;
-- (void) walkBackToMostRecentNodeWithUnvisitedChildren;
-- (void) addNavigationNodeWithType:(NSInteger)tokenType;
-
-#pragma mark Accessors
-- (ANTLRCommonTree *) root;
-- (void) setRoot: (ANTLRCommonTree *) aRoot;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.m
deleted file mode 100644
index 1ee1e4f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStream.m
+++ /dev/null
@@ -1,432 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#import "ANTLRUnbufferedCommonTreeNodeStream.h"
-#import "ANTLRUnbufferedCommonTreeNodeStreamState.h"
-#import "ANTLRBaseTree.h"
-#import "ANTLRToken.h"
-
-#define INITIAL_LOOKAHEAD_BUFFER_SIZE 5
-@implementation ANTLRUnbufferedCommonTreeNodeStream
-
-@synthesize root;
-@synthesize currentNode;
-@synthesize previousNode;
-@synthesize treeAdaptor;
-@synthesize tokenStream;
-@synthesize nodeStack;
-@synthesize indexStack;
-@synthesize markers;
-@synthesize lastMarker;
-@synthesize currentChildIndex;
-@synthesize absoluteNodeIndex;
-@synthesize lookahead;
-@synthesize head;
-@synthesize tail;
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree
-{
-	return [self initWithTree:theTree treeAdaptor:nil];
-}
-
-- (id) initWithTree:(ANTLRCommonTree *)theTree treeAdaptor:(ANTLRCommonTreeAdaptor *)theAdaptor
-{
-	if ((self = [super init]) != nil) {
-		[self setRoot:theTree];
-		if ( theAdaptor == nil ) 
-			[self setTreeAdaptor:[ANTLRCommonTreeAdaptor newTreeAdaptor]];
-		else
-			[self setTreeAdaptor:theAdaptor];
-		nodeStack = [[NSMutableArray arrayWithCapacity:5] retain];
-		indexStack = [[NSMutableArray arrayWithCapacity:5] retain];
-		markers = [[ANTLRPtrBuffer newANTLRPtrBufferWithLen:100] retain];
-        // [markers insertObject:[NSNull null] atIndex:0];	// markers is one based - maybe fix this later
-		lookahead = [NSMutableArray arrayWithCapacity:INITIAL_LOOKAHEAD_BUFFER_SIZE];	// lookahead is filled with [NSNull null] in -reset
-        [lookahead retain];
-		[self reset];
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-	[self setRoot:nil];
-	[self setTreeAdaptor:nil];
-	
-	[nodeStack release];	nodeStack = nil;
-	[indexStack release];	indexStack = nil;
-	[markers release];		markers = nil;
-	[lookahead release];	lookahead = nil;
-	
-	[super dealloc];
-}
-
-- (void) reset
-{
-	currentNode = root;
-	previousNode = nil;
-	currentChildIndex = -1;
-	absoluteNodeIndex = -1;
-	head = tail = 0;
-	[nodeStack removeAllObjects];
-	[indexStack removeAllObjects];
-	[markers removeAllObjects];
-    // [markers insertObject:[NSNull null] atIndex:0];	// markers is one based - maybe fix this later
-	[lookahead removeAllObjects];
-	// TODO: this is not ideal, but works for now. optimize later
-	int i;
-	for (i = 0; i < INITIAL_LOOKAHEAD_BUFFER_SIZE; i++)
-		[lookahead addObject:[NSNull null]];
-}
-
-
-#pragma mark ANTLRTreeNodeStream conformance
-
-- (id) LT:(NSInteger)k
-{
-	if (k == -1)
-		return previousNode;
-	if (k < 0)
-		@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-LT: looking back more than one node unsupported for unbuffered streams" userInfo:nil];
-	if (k == 0)
-		return ANTLRBaseTree.INVALID_NODE;
-	[self fillBufferWithLookahead:k];
-	return [lookahead objectAtIndex:(head+k-1) % [lookahead count]];
-}
-
-- (id) treeSource
-{
-	return [self root];
-}
-
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-{
-	return treeAdaptor;
-}
-
-- (void)setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-{
-    if (treeAdaptor != aTreeAdaptor) {
-        [aTreeAdaptor retain];
-        [treeAdaptor release];
-        treeAdaptor = aTreeAdaptor;
-    }
-}
-
-- (id<ANTLRTokenStream>) getTokenStream
-{
-	return tokenStream;
-}
-
-- (void) setTokenStream:(id<ANTLRTokenStream>)aTokenStream
-{
-	if (tokenStream != aTokenStream) {
-		[tokenStream release];
-		[aTokenStream retain];
-		tokenStream = aTokenStream;
-	}
-}
-
-- (void) setUsesUniqueNavigationNodes:(BOOL)flag
-{
-	shouldUseUniqueNavigationNodes = flag;
-}
-
-- (id) nodeAtIndex:(NSUInteger) idx
-{
-	@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-nodeAtIndex: unsupported for unbuffered streams" userInfo:nil];
-}
-
-- (NSString *) toString
-{
-	@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-toString unsupported for unbuffered streams" userInfo:nil];
-}
-
-- (NSString *) toStringWithRange:(NSRange) aRange
-{
-	@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-toString: unsupported for unbuffered streams" userInfo:nil];
-}
-
-- (NSString *) toStringFromNode:(id)startNode ToNode:(id)stopNode
-{
-	@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-toStringFromNode:toNode: unsupported for unbuffered streams" userInfo:nil];
-}
-
-#pragma mark ANTLRIntStream conformance
-
-- (void) consume
-{
-	[self fillBufferWithLookahead:1];
-	absoluteNodeIndex++;
-	previousNode = [lookahead objectAtIndex:head];
-	head = (head+1) % [lookahead count];
-}
-
-- (NSInteger) LA:(NSUInteger) i
-{
-	ANTLRCommonTree *node = [self LT:i];
-	if (!node) 
-		return ANTLRTokenTypeInvalid;
-	int ttype = [node getType];
-	return ttype;
-}
-
-- (NSUInteger) mark
-{
-	ANTLRUnbufferedCommonTreeNodeStreamState *state = [[[ANTLRUnbufferedCommonTreeNodeStreamState alloc] init] retain];
-	[state setCurrentNode:currentNode];
-	[state setPreviousNode:previousNode];
-	[state setIndexStackSize:[indexStack count]];
-	[state setNodeStackSize:[nodeStack count]];
-	[state setCurrentChildIndex:currentChildIndex];
-	[state setAbsoluteNodeIndex:absoluteNodeIndex];
-	unsigned int lookaheadSize = [self lookaheadSize];
-	unsigned int k;
-	for ( k = 0; k < lookaheadSize; k++) {
-		[state addToLookahead:[self LT:k+1]];
-	}
-	[markers addObject:state];
-	//[state release];
-	return [markers count];
-}
-
-- (NSUInteger) getIndex
-{
-	return absoluteNodeIndex + 1;
-}
-
-- (void) rewind:(NSUInteger) marker
-{
-	if ( [markers count] < marker ) {
-		return;
-	}
-	ANTLRUnbufferedCommonTreeNodeStreamState *state = [markers objectAtIndex:marker];
-	[markers removeObjectAtIndex:marker];
-
-	absoluteNodeIndex = [state absoluteNodeIndex];
-	currentChildIndex = [state currentChildIndex];
-	currentNode = [state currentNode];
-	previousNode = [state previousNode];
-	// drop node and index stacks back to old size
-	[nodeStack removeObjectsInRange:NSMakeRange([state nodeStackSize], [nodeStack count]-[state nodeStackSize])];
-	[indexStack removeObjectsInRange:NSMakeRange([state indexStackSize], [indexStack count]-[state indexStackSize])];
-	
-	head = tail = 0; // wack lookahead buffer and then refill
-	[lookahead release];
-	lookahead = [[NSMutableArray alloc] initWithArray:[state lookahead]];
-	tail = [lookahead count];
-	// make some room after the restored lookahead, so that the above line is not a bug ;)
-	// this also ensures that a subsequent -addLookahead: will not immediately need to resize the buffer
-	[lookahead addObjectsFromArray:[NSArray arrayWithObjects:[NSNull null], [NSNull null], [NSNull null], [NSNull null], [NSNull null], nil]];
-}
-
-- (void) rewind
-{
-	[self rewind:[markers count]];
-}
-
-- (void) release:(NSUInteger) marker
-{
-	@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-release: unsupported for unbuffered streams" userInfo:nil];
-}
-
-- (void) seek:(NSUInteger) anIndex
-{
-	if ( anIndex < (NSUInteger) index )
-		@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-seek: backwards unsupported for unbuffered streams" userInfo:nil];
-	while ( (NSUInteger) index < anIndex ) {
-		[self consume];
-	}
-}
-
-- (NSUInteger) size;
-{
-	return absoluteNodeIndex + 1;	// not entirely correct, but cheap.
-}
-
-
-#pragma mark Lookahead Handling
-- (void) addLookahead:(id<ANTLRBaseTree>)aNode
-{
-	[lookahead replaceObjectAtIndex:tail withObject:aNode];
-	tail = (tail+1) % [lookahead count];
-	
-	if ( tail == head ) {
-		NSMutableArray *newLookahead = [[[NSMutableArray alloc] initWithCapacity:[lookahead count]*2] retain];
-		
-		NSRange headRange = NSMakeRange(head, [lookahead count]-head);
-		NSRange tailRange = NSMakeRange(0, tail);
-		
-		[newLookahead addObjectsFromArray:[lookahead objectsAtIndexes:[NSIndexSet indexSetWithIndexesInRange:headRange]]];
-		[newLookahead addObjectsFromArray:[lookahead objectsAtIndexes:[NSIndexSet indexSetWithIndexesInRange:tailRange]]];
-		
-		unsigned int i;
-		unsigned int lookaheadCount = [newLookahead count];
-		for (i = 0; i < lookaheadCount; i++)
-			[newLookahead addObject:[NSNull null]];
-		[lookahead release];
-		lookahead = newLookahead;
-		
-		head = 0;
-		tail = lookaheadCount;	// tail is the location the _next_ lookahead node will end up in, not the last element's idx itself!
-	}
-	
-}
-
-- (NSUInteger) lookaheadSize
-{
-	return tail < head
-		? ([lookahead count] - head + tail) 
-		: (tail - head);
-}
-
-- (void) fillBufferWithLookahead:(NSInteger)k
-{
-	unsigned int n = [self lookaheadSize];
-	unsigned int i;
-	id lookaheadObject = self; // any valid object would do.
-	for (i=1; i <= k-n && lookaheadObject != nil; i++) {
-		lookaheadObject = [self nextObject];
-	}
-}
-
-- (id) nextObject
-{
-	// NOTE: this could/should go into an NSEnumerator subclass for treenode streams.
-	if (currentNode == nil) {
-        if ( navigationNodeEOF == nil ) {
-            navigationNodeEOF = [[ANTLRTreeNavigationNodeEOF alloc] init];
-        }
-		[self addLookahead:navigationNodeEOF];
-		return nil;
-	}
-	if (currentChildIndex == -1) {
-		return [self handleRootNode];
-	}
-	if (currentChildIndex < (NSInteger)[currentNode getChildCount]) {
-		return [self visitChild:currentChildIndex];
-	}
-	[self walkBackToMostRecentNodeWithUnvisitedChildren];
-	if (currentNode != nil) {
-		return [self visitChild:currentChildIndex];
-	}
-	
-	return nil;
-}	
-
-#pragma mark Node visiting
-- (ANTLRCommonTree *) handleRootNode
-{
-	ANTLRCommonTree *node = currentNode;
-	currentChildIndex = 0;
-	if ([node isNil]) {
-		node = [self visitChild:currentChildIndex];
-	} else {
-		[self addLookahead:node];
-		if ([currentNode getChildCount] == 0) {
-			currentNode = nil;
-		}
-	}
-	return node;
-}
-
-- (ANTLRCommonTree *) visitChild:(NSInteger)childNumber
-{
-	ANTLRCommonTree *node = nil;
-	
-	[nodeStack addObject:currentNode];
-	[indexStack addObject:[NSNumber numberWithInt:childNumber]];
-	if (childNumber == 0 && ![currentNode isNil])
-		[self addNavigationNodeWithType:ANTLRTokenTypeDOWN];
-
-	currentNode = [currentNode getChild:childNumber];
-	currentChildIndex = 0;
-	node = currentNode;  // record node to return
-	[self addLookahead:node];
-	[self walkBackToMostRecentNodeWithUnvisitedChildren];
-	return node;
-}
-
-- (void) walkBackToMostRecentNodeWithUnvisitedChildren
-{
-	while (currentNode != nil && currentChildIndex >= (NSInteger)[currentNode getChildCount])
-	{
-		currentNode = (ANTLRCommonTree *)[nodeStack lastObject];
-		[nodeStack removeLastObject];
-		currentChildIndex = [(NSNumber *)[indexStack lastObject] intValue];
-		[indexStack removeLastObject];
-		currentChildIndex++; // move to next child
-		if (currentChildIndex >= (NSInteger)[currentNode getChildCount]) {
-			if (![currentNode isNil]) {
-				[self addNavigationNodeWithType:ANTLRTokenTypeUP];
-			}
-			if (currentNode == root) { // we done yet?
-				currentNode = nil;
-			}
-		}
-	}
-	
-}
-
-- (void) addNavigationNodeWithType:(NSInteger)tokenType
-{
-	// TODO: this currently ignores shouldUseUniqueNavigationNodes.
-	switch (tokenType) {
-		case ANTLRTokenTypeDOWN: {
-            if (navigationNodeDown == nil) {
-                navigationNodeDown = [[ANTLRTreeNavigationNodeDown alloc] init];
-            }
-			[self addLookahead:navigationNodeDown];
-			break;
-		}
-		case ANTLRTokenTypeUP: {
-            if (navigationNodeUp == nil) {
-                navigationNodeUp = [[ANTLRTreeNavigationNodeUp alloc] init];
-            }
-			[self addLookahead:navigationNodeUp];
-			break;
-		}
-	}
-}
-
-#pragma mark Accessors
-- (ANTLRCommonTree *) root
-{
-    return root; 
-}
-
-- (void) setRoot: (ANTLRCommonTree *) aRoot
-{
-    if (root != aRoot) {
-        [aRoot retain];
-        [root release];
-        root = aRoot;
-    }
-}
-
-@end
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStreamState.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStreamState.h
deleted file mode 100644
index f728952..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStreamState.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRCommonTree.h"
-
-@interface ANTLRUnbufferedCommonTreeNodeStreamState : NSObject {
-	ANTLRCommonTree *currentNode;
-	ANTLRCommonTree *previousNode;
-
-	int currentChildIndex;
-	int absoluteNodeIndex;
-	unsigned int nodeStackSize;
-	unsigned int indexStackSize;
-	
-	NSMutableArray *lookahead;
-}
-
-- (ANTLRCommonTree *) currentNode;
-- (void) setCurrentNode: (ANTLRCommonTree *) aCurrentNode;
-
-- (ANTLRCommonTree *) previousNode;
-- (void) setPreviousNode: (ANTLRCommonTree *) aPreviousNode;
-
-- (NSInteger) currentChildIndex;
-- (void) setCurrentChildIndex: (NSInteger) aCurrentChildIndex;
-
-- (NSInteger) absoluteNodeIndex;
-- (void) setAbsoluteNodeIndex: (NSInteger) anAbsoluteNodeIndex;
-
-- (NSUInteger) nodeStackSize;
-- (void) setNodeStackSize: (NSUInteger) aNodeStackSize;
-
-- (NSUInteger) indexStackSize;
-- (void) setIndexStackSize: (NSUInteger) anIndexStackSize;
-
-- (NSMutableArray *) lookahead;
-- (void) setLookahead: (NSMutableArray *) aLookahead;
-
-- (void) addToLookahead: (id)lookaheadObject;
-- (void) removeFromLookahead: (id)lookaheadObject;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStreamState.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStreamState.m
deleted file mode 100644
index c46d28d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedCommonTreeNodeStreamState.m
+++ /dev/null
@@ -1,140 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRUnbufferedCommonTreeNodeStreamState.h"
-
-
-@implementation ANTLRUnbufferedCommonTreeNodeStreamState
-
-- (id) init
-{
-	if ((self = [super init]) != nil) {
-		lookahead = [[NSMutableArray alloc] init];
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-	[self setLookahead:nil];
-	[self setCurrentNode:nil];
-	[self setPreviousNode:nil];
-	[super dealloc];
-}
-
-- (ANTLRCommonTree *) currentNode
-{
-    return currentNode; 
-}
-
-- (void) setCurrentNode: (ANTLRCommonTree *) aCurrentNode
-{
-    if (currentNode != aCurrentNode) {
-        [aCurrentNode retain];
-        [currentNode release];
-        currentNode = aCurrentNode;
-    }
-}
-
-- (ANTLRCommonTree *) previousNode
-{
-    return previousNode; 
-}
-
-- (void) setPreviousNode: (ANTLRCommonTree *) aPreviousNode
-{
-    if (previousNode != aPreviousNode) {
-        [aPreviousNode retain];
-        [previousNode release];
-        previousNode = aPreviousNode;
-    }
-}
-
-- (NSInteger) currentChildIndex
-{
-    return currentChildIndex;
-}
-
-- (void) setCurrentChildIndex: (NSInteger) aCurrentChildIndex
-{
-    currentChildIndex = aCurrentChildIndex;
-}
-
-- (NSInteger) absoluteNodeIndex
-{
-    return absoluteNodeIndex;
-}
-
-- (void) setAbsoluteNodeIndex: (NSInteger) anAbsoluteNodeIndex
-{
-    absoluteNodeIndex = anAbsoluteNodeIndex;
-}
-
-- (NSUInteger) nodeStackSize
-{
-    return nodeStackSize;
-}
-
-- (void) setNodeStackSize: (NSUInteger) aNodeStackSize
-{
-    nodeStackSize = aNodeStackSize;
-}
-
-- (NSUInteger) indexStackSize
-{
-    return indexStackSize;
-}
-
-- (void) setIndexStackSize: (NSUInteger) anIndexStackSize
-{
-    indexStackSize = anIndexStackSize;
-}
-
-- (NSMutableArray *) lookahead
-{
-    return lookahead; 
-}
-
-- (void) setLookahead: (NSMutableArray *) aLookahead
-{
-    if (lookahead != aLookahead) {
-        [aLookahead retain];
-        [lookahead release];
-        lookahead = aLookahead;
-    }
-}
-
-- (void) addToLookahead: (id)lookaheadObject
-{
-    [[self lookahead] addObject: lookaheadObject];
-}
-- (void) removeFromLookahead: (id)lookaheadObject
-{
-    [[self lookahead] removeObject: lookaheadObject];
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedTokenStream.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedTokenStream.h
deleted file mode 100644
index 84d8f43..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedTokenStream.h
+++ /dev/null
@@ -1,62 +0,0 @@
-//
-//  ANTLRUnbufferedTokenStream.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/12/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRRuntimeException.h"
-#import "ANTLRTokenSource.h"
-#import "ANTLRLookaheadStream.h"
-#import "ANTLRToken.h"
-
-@interface ANTLRUnbufferedTokenStream : ANTLRLookaheadStream {
-	id<ANTLRTokenSource> tokenSource;
-    NSInteger tokenIndex; // simple counter to set token index in tokens
-    NSInteger channel;
-}
-
-@property (retain, getter=getTokenSource, setter=setTokenSource:) id<ANTLRTokenSource> tokenSource;
-@property (getter=getTokenIndex, setter=setTokenIndex:) NSInteger tokenIndex;
-@property (getter=channel, setter=setChannel:) NSInteger channel;
-
-+ (ANTLRUnbufferedTokenStream *)newANTLRUnbufferedTokenStream:(id<ANTLRTokenSource>)aTokenSource;
-- (id) init;
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource;
-
-- (id<ANTLRToken>)nextElement;
-- (BOOL)isEOF:(id<ANTLRToken>) aToken;
-- (id<ANTLRTokenSource>)getTokenSource;
-- (NSString *)toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop;
-- (NSString *)toStringFromToken:(id<ANTLRToken>)aStart ToEnd:(id<ANTLRToken>)aStop;
-- (NSInteger)LA:(NSInteger)anIdx;
-- (id<ANTLRToken>)objectAtIndex:(NSInteger)anIdx;
-- (NSString *)getSourceName;
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedTokenStream.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedTokenStream.m
deleted file mode 100644
index 3b74e92..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnbufferedTokenStream.m
+++ /dev/null
@@ -1,118 +0,0 @@
-//
-//  ANTLRUnbufferedTokenStream.m
-//  ANTLR
-//
-//  Created by Alan Condit on 7/12/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRUnbufferedTokenStream.h"
-
-@implementation ANTLRUnbufferedTokenStream
-
-@synthesize tokenSource;
-@synthesize tokenIndex;
-@synthesize channel;
-
-+ (ANTLRUnbufferedTokenStream *)newANTLRUnbufferedTokenStream:(id<ANTLRTokenSource>)aTokenSource
-{
-    return [[ANTLRUnbufferedTokenStream alloc] initWithTokenSource:aTokenSource];
-}
-
-- (id) init
-{
-    if ((self = [super init]) != nil) {
-        tokenSource = nil;
-        tokenIndex = 0;
-        channel = ANTLRTokenChannelDefault;
-    }
-    return self;
-}
-
-- (id) initWithTokenSource:(id<ANTLRTokenSource>)aTokenSource
-{
-    if ((self = [super init]) != nil) {
-        tokenSource = aTokenSource;
-        if ( tokenSource ) [tokenSource retain];
-        tokenIndex = 0;
-        channel = ANTLRTokenChannelDefault;
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRUnbufferedTokenStream" );
-#endif
-    if ( tokenSource ) [tokenSource release];
-    [super dealloc];
-}
-
-- (id<ANTLRToken>)nextElement
-{
-    id<ANTLRToken> t = [tokenSource nextToken];
-    [t setTokenIndex:tokenIndex++];
-    return t;
-}
-
-- (BOOL)isEOF:(id<ANTLRToken>)aToken
-{
-    return (aToken.type == ANTLRTokenTypeEOF);
-}    
-
-- (id<ANTLRTokenSource>)getTokenSource
-{
-    return tokenSource;
-}
-
-- (NSString *)toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop
-{
-    return @"n/a";
-}
-
-- (NSString *)toStringFromToken:(id<ANTLRToken>)aStart ToEnd:(id<ANTLRToken>)aStop
-{
-    return @"n/a";
-}
-
-- (NSInteger)LA:(NSInteger)anIdx
-{
-    return [[self LT:anIdx] type];
-}
-
-- (id<ANTLRToken>)objectAtIndex:(NSInteger)anIdx
-{
-    @throw [ANTLRRuntimeException newException:@"Absolute token indexes are meaningless in an unbuffered stream"];
-}
-
-- (NSString *)getSourceName
-{
-    return [tokenSource getSourceName];
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRUniqueIDMap.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRUniqueIDMap.h
deleted file mode 100644
index 55c92d1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRUniqueIDMap.h
+++ /dev/null
@@ -1,64 +0,0 @@
-//
-//  ANTLRUniqueIDMap.h
-//  ANTLR
-//
-//  Created by Alan Condit on 7/7/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRPtrBuffer.h"
-#import "ANTLRNodeMapElement.h"
-
-#define SUCCESS             0
-#define FAILURE             -1
-#define HASHSIZE            101
-#define HBUFSIZE            0x2000
-
-@interface ANTLRUniqueIDMap : ANTLRPtrBuffer {
-    NSInteger lastHash;
-}
-
-@property (getter=getLastHash, setter=setLastHash:) NSInteger lastHash;
-
-+ (id)newANTLRUniqueIDMap;
-+ (id)newANTLRUniqueIDMapWithLen:(NSInteger)aHashSize;
-
-- (id)init;
-- (id)initWithLen:(NSInteger)cnt;
-- (void)dealloc;
-// Instance Methods
-- (NSInteger)count;
-- (NSInteger)size;
-/* clear -- reinitialize the maplist array */
-- (void) clear;
-
-- (void)deleteANTLRUniqueIDMap:(ANTLRNodeMapElement *)np;
-- (void)delete_chain:(ANTLRNodeMapElement *)np;
-- (id)getNode:(id<ANTLRBaseTree>)aNode;
-- (void)putID:(id)anID Node:(id<ANTLRBaseTree>)aNode;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRUniqueIDMap.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRUniqueIDMap.m
deleted file mode 100644
index 0d52092..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRUniqueIDMap.m
+++ /dev/null
@@ -1,184 +0,0 @@
-//
-//  ANTLRUniqueIDMap.m
-//  ANTLR
-//
-//  Created by Alan Condit on 7/7/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRUniqueIDMap.h"
-#import "ANTLRTree.h"
-
-@implementation ANTLRUniqueIDMap
-@synthesize lastHash;
-
-+(id)newANTLRUniqueIDMap
-{
-    ANTLRUniqueIDMap *aNewANTLRUniqueIDMap;
-    
-    aNewANTLRUniqueIDMap = [[ANTLRUniqueIDMap alloc] init];
-	return( aNewANTLRUniqueIDMap );
-}
-
-+(id)newANTLRUniqueIDMapWithLen:(NSInteger)aBuffSize
-{
-    ANTLRUniqueIDMap *aNewANTLRUniqueIDMap;
-    
-    aNewANTLRUniqueIDMap = [[ANTLRUniqueIDMap alloc] initWithLen:aBuffSize];
-	return( aNewANTLRUniqueIDMap );
-}
-
--(id)init
-{
-    NSInteger idx;
-    
-	if ((self = [super initWithLen:HASHSIZE]) != nil) {
-		fNext = nil;
-        for( idx = 0; idx < HASHSIZE; idx++ ) {
-            ptrBuffer[idx] = nil;
-        }
-	}
-    return( self );
-}
-
--(id)initWithLen:(NSInteger)aBuffSize
-{
-	if ((self = [super initWithLen:aBuffSize]) != nil) {
-	}
-    return( self );
-}
-
--(void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ANTLRUniqueIDMap" );
-#endif
-    ANTLRNodeMapElement *tmp, *rtmp;
-    NSInteger idx;
-	
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < HASHSIZE; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp ) {
-                rtmp = tmp;
-                tmp = (ANTLRNodeMapElement *)tmp.fNext;
-                [rtmp release];
-            }
-        }
-    }
-	[super dealloc];
-}
-
--(void)deleteANTLRUniqueIDMap:(ANTLRNodeMapElement *)np
-{
-    ANTLRNodeMapElement *tmp, *rtmp;
-    NSInteger idx;
-    
-    if ( self.fNext != nil ) {
-        for( idx = 0; idx < HASHSIZE; idx++ ) {
-            tmp = ptrBuffer[idx];
-            while ( tmp ) {
-                rtmp = tmp;
-                tmp = tmp.fNext;
-                [rtmp release];
-            }
-        }
-    }
-}
-
-- (void)clear
-{
-    ANTLRNodeMapElement *tmp, *rtmp;
-    NSInteger idx;
-    
-    for( idx = 0; idx < HASHSIZE; idx++ ) {
-        tmp = ptrBuffer[idx];
-        while ( tmp ) {
-            rtmp = tmp;
-            tmp = [tmp getfNext];
-            [rtmp release];
-        }
-        ptrBuffer[idx] = nil;
-    }
-}
-
-- (NSInteger)count
-{
-    id anElement;
-    NSInteger aCnt = 0;
-    
-    for (int i = 0; i < BuffSize; i++) {
-        if ((anElement = ptrBuffer[i]) != nil) {
-            aCnt += (NSInteger)[anElement count];
-        }
-    }
-    return aCnt;
-}
-
-- (NSInteger)size
-{
-    return BuffSize;
-}
-
--(void)delete_chain:(ANTLRNodeMapElement *)np
-{
-    if ( np.fNext != nil )
-		[self delete_chain:np.fNext];
-	[np release];
-}
-
-- (id)getNode:(id<ANTLRBaseTree>)aNode
-{
-    ANTLRNodeMapElement *np;
-    NSInteger idx;
-    
-    idx = [(id<ANTLRBaseTree>)aNode type];
-    np = ptrBuffer[idx];
-    while ( np != nil ) {
-        if (np.node == aNode) {
-            return( np.index );
-        }
-        np = np.fNext;
-    }
-    return( nil );
-}
-
-- (void)putID:(id)anID Node:(id<ANTLRBaseTree>)aNode
-{
-    ANTLRNodeMapElement *np, *np1;
-    NSInteger idx;
-    
-    idx = [(id<ANTLRBaseTree>)aNode type];
-    idx %= HASHSIZE;
-    np = [[ANTLRNodeMapElement newANTLRNodeMapElementWithIndex:anID Node:aNode] retain];
-    np1 = ptrBuffer[idx];
-    np.fNext = np1;
-    ptrBuffer[idx] = np;
-    return;
-}
-
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnwantedTokenException.h b/antlr-3.4/runtime/ObjC/Framework/ANTLRUnwantedTokenException.h
deleted file mode 100644
index fbb60e3..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnwantedTokenException.h
+++ /dev/null
@@ -1,47 +0,0 @@
-//
-//  ANTLRUnwantedTokenException.h
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <Cocoa/Cocoa.h>
-#import "ANTLRMismatchedTokenException.h"
-
-@interface ANTLRUnwantedTokenException : ANTLRMismatchedTokenException {
-
-}
-+ (ANTLRUnwantedTokenException *)newException;
-+ (ANTLRUnwantedTokenException *)newException:(NSInteger)expected Stream:(id<ANTLRIntStream>)anInput;
-
-- (id) init;
-- (id) initWithStream:(id<ANTLRIntStream>)anInput And:(NSInteger)expected;
-- (id<ANTLRToken>)getUnexpectedToken;
-- (NSString *)toString;
-                     
-    
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnwantedTokenException.m b/antlr-3.4/runtime/ObjC/Framework/ANTLRUnwantedTokenException.m
deleted file mode 100644
index b8b24f3..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ANTLRUnwantedTokenException.m
+++ /dev/null
@@ -1,80 +0,0 @@
-//
-//  ANTLRUnwantedTokenException.m
-//  ANTLR
-//
-//  Created by Alan Condit on 6/8/10.
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "ANTLRUnwantedTokenException.h"
-
-@implementation ANTLRUnwantedTokenException : ANTLRMismatchedTokenException
-	/** Used for remote debugger deserialization */
-+ (ANTLRUnwantedTokenException *)newException
-{
-    return [[ANTLRUnwantedTokenException alloc] init];
-}
-    
-+ (ANTLRUnwantedTokenException *)newException:(NSInteger)expected Stream:(id<ANTLRIntStream>)anInput
-{
-    return [[ANTLRUnwantedTokenException alloc] initWithStream:anInput And:expected];
-}
-
-- (id) init
-{
-    self = [super initWithStream:input];
-    if (self) {
-    }
-    return self;
-}
-     
-- (id) initWithStream:(id<ANTLRIntStream>)anInput And:(NSInteger)expected
-{
-    self = [super initWithStream:anInput];
-    if (self) {
-        expecting = expected;
-    }
-    return self;
-}
-    
-- (id<ANTLRToken>)getUnexpectedToken
-{
-    return token;
-}
-    
-- (NSString *)toString
-{
-    NSString *exp1 = [NSString stringWithFormat:@", expected %d", expecting];
-    if ( expecting == ANTLRTokenTypeInvalid ) {
-        exp1 = @"";
-    }
-    if ( token==nil ) {
-        return [NSString stringWithFormat:@"UnwantedTokenException(found=%@)", exp1];
-    }
-    return [NSString stringWithFormat:@"UnwantedTokenException(found=%@ %@", token.text, exp1];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ArrayIterator.h b/antlr-3.4/runtime/ObjC/Framework/ArrayIterator.h
deleted file mode 100644
index 280f03f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ArrayIterator.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2011 Terence Parr and Alan Condit
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in the
- *     documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#import <Cocoa/Cocoa.h>
-#import <Foundation/Foundation.h>
-
-/**
- * Iterator for an array so I don't have to copy the array to a List
- * just to make it iteratable.
- */
-
-/*
- * this is the state structure for FastEnumeration
- typedef struct {
- unsigned long state;
- id *itemsPtr;
- unsigned long *mutationsPtr;
- unsigned long extra[5];
- } NSFastEnumerationState;
- */
-
-@interface ArrayIterator : NSObject {
-    
-    __strong id peekObj;
-    /**
-     * NSArrays are fixed size; precompute count.
-     */
-    NSInteger count;
-    NSInteger index;
-    __strong NSArray *anArray;
-    
-}
-
-+ (ArrayIterator *) newIterator:(NSArray *)array;
-+ (ArrayIterator *) newIteratorForDictKey:(NSDictionary *)dict;
-+ (ArrayIterator *) newIteratorForDictObj:(NSDictionary *)dict;
-
-- (id) initWithArray:(NSArray *)array;
-- (id) initWithDictKey:(NSDictionary *)dict;
-- (id) initWithDictObj:(NSDictionary *)dict;
-
-- (BOOL) hasNext;
-- (id) nextObject;
-- (NSArray *)allObjects;
-- (void) removeObjectAtIndex:(NSInteger)idx;
-- (NSInteger) count;
-- (void) setCount:(NSInteger)cnt;
-- (void) dealloc;
-
-@property (retain) id peekObj;
-@property (assign, getter=count, setter=setCount:) NSInteger count;
-@property (assign) NSInteger index;
-@property (retain) NSArray *anArray;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ArrayIterator.m b/antlr-3.4/runtime/ObjC/Framework/ArrayIterator.m
deleted file mode 100644
index 45d1e6a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/ArrayIterator.m
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2011 Terence Parr and Alan Condit
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in the
- *     documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#import "AMutableArray.h"
-#import "ArrayIterator.h"
-#import "ANTLRRuntimeException.h"
-
-@class AMutableArray;
-
-@implementation ArrayIterator
-
-@synthesize peekObj;
-//@synthesize count;
-@synthesize index;
-@synthesize anArray;
-
-
-+ (ArrayIterator *) newIterator:(NSArray *)array
-{
-    return [[ArrayIterator alloc] initWithArray:array];
-}
-
-+ (ArrayIterator *) newIteratorForDictKey:(NSDictionary *)dict
-{
-    return [[ArrayIterator alloc] initWithDictKey:dict];
-}
-
-+ (ArrayIterator *) newIteratorForDictObj:(NSDictionary *)dict
-{
-    return [[ArrayIterator alloc] initWithDictObj:dict];
-}
-
-- (id) initWithArray:(NSArray *)array
-{
-    self=[super init];
-    if ( self != nil ) {
-        if (![array isKindOfClass:[NSArray class]]) {
-                @throw [NSException exceptionWithName:NSInvalidArgumentException
-                                               reason:[NSString stringWithFormat:@"ArrayIterator expecting NSArray class but got %@", [array className]]
-                                             userInfo:nil];
-        }
-        anArray = [array retain];
-#ifdef DONTUSENOMO
-        for (int i = 0; i < [array count]; i++) {
-            [anArray addObject:[array objectAtIndex:i]];
-            count++;
-        }
-#endif
-        peekObj = nil;
-        count = [anArray count];
-        index = 0;
-    }
-    return self;
-}
-
-- (id) initWithDictKey:(NSDictionary *)dict
-{
-    self=[super init];
-    if ( self != nil ) {
-        if (![dict isKindOfClass:[NSDictionary class]]) {
-            @throw [NSException exceptionWithName:NSInvalidArgumentException
-                                           reason:[NSString stringWithFormat:@"ArrayIterator expecting NSDictionary class but got %@", [dict className]]
-                                         userInfo:nil];
-        }
-        anArray = [[[dict keyEnumerator] allObjects] retain];
-        peekObj = nil;
-        count = [anArray count];
-        index = 0;
-    }
-    return self;
-}
-
-- (id) initWithDictObj:(NSDictionary *)dict
-{
-    self=[super init];
-    if ( self != nil ) {
-        if (![dict isKindOfClass:[NSDictionary class]]) {
-            @throw [NSException exceptionWithName:NSInvalidArgumentException
-                                           reason:[NSString stringWithFormat:@"ArrayIterator expecting NSDictionary class but got %@", [dict className]]
-                                         userInfo:nil];
-        }
-        anArray = [[[dict objectEnumerator] allObjects] retain];
-        peekObj = nil;
-        count = [anArray count];
-        index = 0;
-    }
-    return self;
-}
-
-- (void)dealloc
-{
-#ifdef DEBUG_DEALLOC
-    NSLog( @"called dealloc in ArrayIterator" );
-#endif
-    if ( anArray ) [anArray release];
-    [super dealloc];
-}
-
-- (BOOL) hasNext
-{
-    if ( peekObj == nil ) {
-        peekObj = [self nextObject];
-    }
-    return ((peekObj) ? YES : NO);
-}
-
-- (NSObject *) nextObject
-{
-    id obj = nil;
-    if ( peekObj ) {
-        obj = peekObj;
-        peekObj = nil;
-        return obj;
-    }
-    if ( index >= count ) {
-        return nil;
-    }
-    if ( anArray ) {
-        obj = [anArray objectAtIndex:index++];
-        if ( index >= count ) {
-            [anArray release];
-            anArray = nil;
-            index = 0;
-            count = 0;
-        }
-    }
-    return obj;
-}
-
-- (NSArray *) allObjects
-{
-    if ( (count <= 0 || index >= count) && peekObj == nil ) return nil;
-    AMutableArray *theArray = [AMutableArray arrayWithCapacity:count];
-    if (peekObj) {
-        [theArray addObject:peekObj];
-        peekObj = nil;
-    }
-    for (int i = index; i < count; i++) {
-        [theArray addObject:[anArray objectAtIndex:i]];
-    }
-    return [NSArray arrayWithArray:(NSArray *)theArray];
-}
-
-- (void) removeObjectAtIndex:(NSInteger)idx
-{
-    @throw [ANTLRUnsupportedOperationException newException:@"Cant remove object from ArrayIterator"];
-}
-
-- (NSInteger) count
-{
-    return (index - count);
-}
-
-- (void) setCount:(NSInteger)cnt
-{
-    count = cnt;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/FastQueue.h b/antlr-3.4/runtime/ObjC/Framework/FastQueue.h
deleted file mode 100644
index 25f34d6..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/FastQueue.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
-[The "BSD licence"]
-Copyright (c) 2005-2008 Terence Parr
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-3. The name of the author may not be used to endorse or promote products
-derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.runtime.misc;
-
-import java.util.List;
-import java.util.ArrayList;
-import java.util.NoSuchElementException;
-
-/** A queue that can dequeue and get(i) in O(1) and grow arbitrarily large.
- *  A linked list is fast at dequeue but slow at get(i).  An array is
- *  the reverse.  This is O(1) for both operations.
- *
- *  List grows until you dequeue last element at end of buffer. Then
- *  it resets to start filling at 0 again.  If adds/removes are balanced, the
- *  buffer will not grow too large.
- *
- *  No iterator stuff as that's not how we'll use it.
- */
-public class FastQueue<T> {
-    /** dynamically-sized buffer of elements */
-    protected List<T> data = new ArrayList<T>();
-    /** index of next element to fill */
-    protected int p = 0;
-
-    public void reset() { p = 0; data.clear(); }
-
-    /** Get and remove first element in queue */
-    public T remove() {
-        T o = get(0);
-        p++;
-        // have we hit end of buffer?
-        if ( p == data.size() ) {
-            // if so, it's an opportunity to start filling at index 0 again
-            clear(); // size goes to 0, but retains memory
-        }
-        return o;
-    }
-
-    public void add(T o) { data.add(o); }
-
-    public int size() { return data.size() - p; }
-
-    public T head() { return get(0); }
-
-    /** Return element i elements ahead of current element.  i==0 gets
-     *  current element.  This is not an absolute index into the data list
-     *  since p defines the start of the real list.
-     */
-    public T get(int i) {
-        if ( p+i >= data.size() ) {
-            throw new NoSuchElementException("queue index "+(p+i)+" > size "+data.size());
-        }
-        return data.get(p+i);
-    }
-
-    public void clear() { p = 0; data.clear(); }
-
-    /** Return string of current buffer contents; non-destructive */
-    public String toString() {
-        StringBuffer buf = new StringBuffer();
-        int n = size();
-        for (int i=0; i<n; i++) {
-            buf.append(get(i));
-            if ( (i+1)<n ) buf.append(" ");
-        }
-        return buf.toString();
-    }
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/FastQueue.m b/antlr-3.4/runtime/ObjC/Framework/FastQueue.m
deleted file mode 100644
index 25f34d6..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/FastQueue.m
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
-[The "BSD licence"]
-Copyright (c) 2005-2008 Terence Parr
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-3. The name of the author may not be used to endorse or promote products
-derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.runtime.misc;
-
-import java.util.List;
-import java.util.ArrayList;
-import java.util.NoSuchElementException;
-
-/** A queue that can dequeue and get(i) in O(1) and grow arbitrarily large.
- *  A linked list is fast at dequeue but slow at get(i).  An array is
- *  the reverse.  This is O(1) for both operations.
- *
- *  List grows until you dequeue last element at end of buffer. Then
- *  it resets to start filling at 0 again.  If adds/removes are balanced, the
- *  buffer will not grow too large.
- *
- *  No iterator stuff as that's not how we'll use it.
- */
-public class FastQueue<T> {
-    /** dynamically-sized buffer of elements */
-    protected List<T> data = new ArrayList<T>();
-    /** index of next element to fill */
-    protected int p = 0;
-
-    public void reset() { p = 0; data.clear(); }
-
-    /** Get and remove first element in queue */
-    public T remove() {
-        T o = get(0);
-        p++;
-        // have we hit end of buffer?
-        if ( p == data.size() ) {
-            // if so, it's an opportunity to start filling at index 0 again
-            clear(); // size goes to 0, but retains memory
-        }
-        return o;
-    }
-
-    public void add(T o) { data.add(o); }
-
-    public int size() { return data.size() - p; }
-
-    public T head() { return get(0); }
-
-    /** Return element i elements ahead of current element.  i==0 gets
-     *  current element.  This is not an absolute index into the data list
-     *  since p defines the start of the real list.
-     */
-    public T get(int i) {
-        if ( p+i >= data.size() ) {
-            throw new NoSuchElementException("queue index "+(p+i)+" > size "+data.size());
-        }
-        return data.get(p+i);
-    }
-
-    public void clear() { p = 0; data.clear(); }
-
-    /** Return string of current buffer contents; non-destructive */
-    public String toString() {
-        StringBuffer buf = new StringBuffer();
-        int n = size();
-        for (int i=0; i<n; i++) {
-            buf.append(get(i));
-            if ( (i+1)<n ) buf.append(" ");
-        }
-        return buf.toString();
-    }
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/IntArray.h b/antlr-3.4/runtime/ObjC/Framework/IntArray.h
deleted file mode 100644
index a075770..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/IntArray.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2008 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.runtime.misc;
-
-/** A dynamic array that uses int not Integer objects. In principle this
- *  is more efficient in time, but certainly in space.
- *
- *  This is simple enough that you can access the data array directly,
- *  but make sure that you append elements only with add() so that you
- *  get dynamic sizing.  Make sure to call ensureCapacity() when you are
- *  manually adding new elements.
- *
- *  Doesn't impl List because it doesn't return objects and I mean this
- *  really as just an array not a List per se.  Manipulate the elements
- *  at will.  This has stack methods too.
- *
- *  When runtime can be 1.5, I'll make this generic.
- */
-public class IntArray {
-	public static final int INITIAL_SIZE = 10;
-	public int[] data;
-	protected int p = -1;
-
-	public void add(int v) {
-		ensureCapacity(p+1);
-		data[++p] = v;
-	}
-
-	public void push(int v) {
-		add(v);
-	}
-
-	public int pop() {
-		int v = data[p];
-		p--;
-		return v;
-	}
-
-	/** This only tracks elements added via push/add. */
-	public int size() {
-		return p;
-	}
-
-    public void clear() {
-        p = -1;
-    }
-
-    public void ensureCapacity(int index) {
-		if ( data==null ) {
-			data = new int[INITIAL_SIZE];
-		}
-		else if ( (index+1)>=data.length ) {
-			int newSize = data.length*2;
-			if ( index>newSize ) {
-				newSize = index+1;
-			}
-			int[] newData = new int[newSize];
-			System.arraycopy(data, 0, newData, 0, data.length);
-			data = newData;
-		}
-	}
-}
diff --git a/antlr-3.4/runtime/ObjC/Framework/IntArray.m b/antlr-3.4/runtime/ObjC/Framework/IntArray.m
deleted file mode 100644
index a075770..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/IntArray.m
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- [The "BSD licence"]
- Copyright (c) 2005-2008 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.runtime.misc;
-
-/** A dynamic array that uses int not Integer objects. In principle this
- *  is more efficient in time, but certainly in space.
- *
- *  This is simple enough that you can access the data array directly,
- *  but make sure that you append elements only with add() so that you
- *  get dynamic sizing.  Make sure to call ensureCapacity() when you are
- *  manually adding new elements.
- *
- *  Doesn't impl List because it doesn't return objects and I mean this
- *  really as just an array not a List per se.  Manipulate the elements
- *  at will.  This has stack methods too.
- *
- *  When runtime can be 1.5, I'll make this generic.
- */
-public class IntArray {
-	public static final int INITIAL_SIZE = 10;
-	public int[] data;
-	protected int p = -1;
-
-	public void add(int v) {
-		ensureCapacity(p+1);
-		data[++p] = v;
-	}
-
-	public void push(int v) {
-		add(v);
-	}
-
-	public int pop() {
-		int v = data[p];
-		p--;
-		return v;
-	}
-
-	/** This only tracks elements added via push/add. */
-	public int size() {
-		return p;
-	}
-
-    public void clear() {
-        p = -1;
-    }
-
-    public void ensureCapacity(int index) {
-		if ( data==null ) {
-			data = new int[INITIAL_SIZE];
-		}
-		else if ( (index+1)>=data.length ) {
-			int newSize = data.length*2;
-			if ( index>newSize ) {
-				newSize = index+1;
-			}
-			int[] newData = new int[newSize];
-			System.arraycopy(data, 0, newData, 0, data.length);
-			data = newData;
-		}
-	}
-}
diff --git a/antlr-3.4/runtime/ObjC/Framework/LookaheadStream.h b/antlr-3.4/runtime/ObjC/Framework/LookaheadStream.h
deleted file mode 100644
index 097d7a9..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/LookaheadStream.h
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
-[The "BSD licence"]
-Copyright (c) 2005-2008 Terence Parr
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-3. The name of the author may not be used to endorse or promote products
-derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.runtime.misc;
-
-import java.util.List;
-import java.util.ArrayList;
-
-/** A lookahead queue that knows how to mark/release locations
- *  in the buffer for backtracking purposes. Any markers force the FastQueue
- *  superclass to keep all tokens until no more markers; then can reset
- *  to avoid growing a huge buffer.
- */
-public abstract class LookaheadStream<T> extends FastQueue<T> {
-    public static final int UNINITIALIZED_EOF_ELEMENT_INDEX = Integer.MAX_VALUE;
-
-    /** Set to buffer index of eof when nextElement returns eof */
-    protected int eofElementIndex = UNINITIALIZED_EOF_ELEMENT_INDEX;
-
-    /** Returned by nextElement upon end of stream; we add to buffer also */
-    public T eof = null;
-
-    /** Track the last mark() call result value for use in rewind(). */
-    protected int lastMarker;
-
-    /** tracks how deep mark() calls are nested */
-    protected int markDepth = 0;    
-
-    public LookaheadStream(T eof) {
-        this.eof = eof;
-    }
-
-    public void reset() {
-        eofElementIndex = UNINITIALIZED_EOF_ELEMENT_INDEX;
-        super.reset();
-    }
-    
-    /** Implement nextElement to supply a stream of elements to this
-     *  lookahead buffer.  Return eof upon end of the stream we're pulling from.
-     */
-    public abstract T nextElement();
-
-    /** Get and remove first element in queue; override FastQueue.remove() */
-    public T remove() {
-        T o = get(0);
-        p++;
-        // have we hit end of buffer and not backtracking?
-        if ( p == data.size() && markDepth==0 ) {
-            // if so, it's an opportunity to start filling at index 0 again
-            clear(); // size goes to 0, but retains memory
-        }
-        return o;
-    }
-
-    /** Make sure we have at least one element to remove, even if EOF */
-    public void consume() { sync(1); remove(); }
-
-    /** Make sure we have 'need' elements from current position p. Last valid
-     *  p index is data.size()-1.  p+need-1 is the data index 'need' elements
-     *  ahead.  If we need 1 element, (p+1-1)==p must be < data.size().
-     */
-    public void sync(int need) {
-        int n = (p+need-1) - data.size() + 1; // how many more elements we need?
-        if ( n > 0 ) fill(n);                 // out of elements?
-    }
-
-    /** add n elements to buffer */
-    public void fill(int n) {
-        for (int i=1; i<=n; i++) {
-            T o = nextElement();
-            if ( o==eof ) {
-                data.add(eof);
-                eofElementIndex = data.size()-1;
-            }
-            else data.add(o);
-        }
-    }
-
-    //public boolean hasNext() { return eofElementIndex!=UNINITIALIZED_EOF_ELEMENT_INDEX; }
-    
-    /** Size of entire stream is unknown; we only know buffer size from FastQueue */
-    public int size() { throw new UnsupportedOperationException("streams are of unknown size"); }
-
-    public Object LT(int k) {
-		if ( k==0 ) {
-			return null;
-		}
-		if ( k<0 ) {
-			return LB(-k);
-		}
-		//System.out.print("LT(p="+p+","+k+")=");
-		if ( (p+k-1) >= eofElementIndex ) { // move to super.LT
-			return eof;
-		}
-        sync(k);
-        return get(k-1);
-	}
-
-	/** Look backwards k nodes */
-	protected Object LB(int k) {
-		if ( k==0 ) {
-			return null;
-		}
-		if ( (p-k)<0 ) {
-			return null;
-		}
-		return get(-k);
-	}
-
-    public Object getCurrentSymbol() { return LT(1); }
-
-    public int index() { return p; }
-
-	public int mark() {
-        markDepth++;
-        lastMarker = index();
-        return lastMarker;
-	}
-
-	public void release(int marker) {
-		// no resources to release
-	}
-
-	public void rewind(int marker) {
-        markDepth--;
-        seek(marker); // assume marker is top
-        // release(marker); // waste of call; it does nothing in this class
-    }
-
-	public void rewind() {
-        seek(lastMarker); // rewind but do not release marker
-    }
-
-    /** Seek to a 0-indexed position within data buffer.  Can't handle
-     *  case where you seek beyond end of existing buffer.  Normally used
-     *  to seek backwards in the buffer. Does not force loading of nodes.
-     */
-    public void seek(int index) { p = index; }
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/LookaheadStream.m b/antlr-3.4/runtime/ObjC/Framework/LookaheadStream.m
deleted file mode 100644
index 097d7a9..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/LookaheadStream.m
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
-[The "BSD licence"]
-Copyright (c) 2005-2008 Terence Parr
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-3. The name of the author may not be used to endorse or promote products
-derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package org.antlr.runtime.misc;
-
-import java.util.List;
-import java.util.ArrayList;
-
-/** A lookahead queue that knows how to mark/release locations
- *  in the buffer for backtracking purposes. Any markers force the FastQueue
- *  superclass to keep all tokens until no more markers; then can reset
- *  to avoid growing a huge buffer.
- */
-public abstract class LookaheadStream<T> extends FastQueue<T> {
-    public static final int UNINITIALIZED_EOF_ELEMENT_INDEX = Integer.MAX_VALUE;
-
-    /** Set to buffer index of eof when nextElement returns eof */
-    protected int eofElementIndex = UNINITIALIZED_EOF_ELEMENT_INDEX;
-
-    /** Returned by nextElement upon end of stream; we add to buffer also */
-    public T eof = null;
-
-    /** Track the last mark() call result value for use in rewind(). */
-    protected int lastMarker;
-
-    /** tracks how deep mark() calls are nested */
-    protected int markDepth = 0;    
-
-    public LookaheadStream(T eof) {
-        this.eof = eof;
-    }
-
-    public void reset() {
-        eofElementIndex = UNINITIALIZED_EOF_ELEMENT_INDEX;
-        super.reset();
-    }
-    
-    /** Implement nextElement to supply a stream of elements to this
-     *  lookahead buffer.  Return eof upon end of the stream we're pulling from.
-     */
-    public abstract T nextElement();
-
-    /** Get and remove first element in queue; override FastQueue.remove() */
-    public T remove() {
-        T o = get(0);
-        p++;
-        // have we hit end of buffer and not backtracking?
-        if ( p == data.size() && markDepth==0 ) {
-            // if so, it's an opportunity to start filling at index 0 again
-            clear(); // size goes to 0, but retains memory
-        }
-        return o;
-    }
-
-    /** Make sure we have at least one element to remove, even if EOF */
-    public void consume() { sync(1); remove(); }
-
-    /** Make sure we have 'need' elements from current position p. Last valid
-     *  p index is data.size()-1.  p+need-1 is the data index 'need' elements
-     *  ahead.  If we need 1 element, (p+1-1)==p must be < data.size().
-     */
-    public void sync(int need) {
-        int n = (p+need-1) - data.size() + 1; // how many more elements we need?
-        if ( n > 0 ) fill(n);                 // out of elements?
-    }
-
-    /** add n elements to buffer */
-    public void fill(int n) {
-        for (int i=1; i<=n; i++) {
-            T o = nextElement();
-            if ( o==eof ) {
-                data.add(eof);
-                eofElementIndex = data.size()-1;
-            }
-            else data.add(o);
-        }
-    }
-
-    //public boolean hasNext() { return eofElementIndex!=UNINITIALIZED_EOF_ELEMENT_INDEX; }
-    
-    /** Size of entire stream is unknown; we only know buffer size from FastQueue */
-    public int size() { throw new UnsupportedOperationException("streams are of unknown size"); }
-
-    public Object LT(int k) {
-		if ( k==0 ) {
-			return null;
-		}
-		if ( k<0 ) {
-			return LB(-k);
-		}
-		//System.out.print("LT(p="+p+","+k+")=");
-		if ( (p+k-1) >= eofElementIndex ) { // move to super.LT
-			return eof;
-		}
-        sync(k);
-        return get(k-1);
-	}
-
-	/** Look backwards k nodes */
-	protected Object LB(int k) {
-		if ( k==0 ) {
-			return null;
-		}
-		if ( (p-k)<0 ) {
-			return null;
-		}
-		return get(-k);
-	}
-
-    public Object getCurrentSymbol() { return LT(1); }
-
-    public int index() { return p; }
-
-	public int mark() {
-        markDepth++;
-        lastMarker = index();
-        return lastMarker;
-	}
-
-	public void release(int marker) {
-		// no resources to release
-	}
-
-	public void rewind(int marker) {
-        markDepth--;
-        seek(marker); // assume marker is top
-        // release(marker); // waste of call; it does nothing in this class
-    }
-
-	public void rewind() {
-        seek(lastMarker); // rewind but do not release marker
-    }
-
-    /** Seek to a 0-indexed position within data buffer.  Can't handle
-     *  case where you seek beyond end of existing buffer.  Normally used
-     *  to seek backwards in the buffer. Does not force loading of nodes.
-     */
-    public void seek(int index) { p = index; }
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/antlr3.h b/antlr-3.4/runtime/ObjC/Framework/antlr3.h
deleted file mode 100644
index 9c941dc..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/antlr3.h
+++ /dev/null
@@ -1,114 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <ANTLR/ACBTree.h>
-#import <ANTLR/AMutableArray.h>
-#import <ANTLR/AMutableDictionary.h>
-#import <ANTLR/ANTLRBaseMapElement.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRBaseStack.h>
-#import <ANTLR/ANTLRBaseTree.h>
-#import <ANTLR/ANTLRBaseTreeAdaptor.h>
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBufferedTokenStream.h>
-#import <ANTLR/ANTLRBufferedTreeNodeStream.h>
-#import <ANTLR/ANTLRCharStream.h>
-#import <ANTLR/ANTLRCharStreamState.h>
-#import <ANTLR/ANTLRCommonErrorNode.h>
-#import <ANTLR/ANTLRCommonToken.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRDebug.h>
-#import <ANTLR/ANTLRDebugEventProxy.h>
-#import <ANTLR/ANTLRDebugEventListener.h>
-#import <ANTLR/ANTLRDebugParser.h>
-#import <ANTLR/ANTLRDebugTokenStream.h>
-#import <ANTLR/ANTLRDebugTreeAdaptor.h>
-#import <ANTLR/ANTLRDebugTreeNodeStream.h>
-#import <ANTLR/ANTLRDebugTreeParser.h>
-#import <ANTLR/ANTLRDoubleKeyMap.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRError.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRFastQueue.h>
-#import <ANTLR/ANTLRFileStream.h>
-#import <ANTLR/ANTLRHashMap.h>
-#import <ANTLR/ANTLRHashRule.h>
-#import <ANTLR/ANTLRInputStream.h>
-#import <ANTLR/ANTLRIntArray.h>
-#import <ANTLR/ANTLRIntStream.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRLexerRuleReturnScope.h>
-#import <ANTLR/ANTLRLinkBase.h>
-#import <ANTLR/ANTLRLookaheadStream.h>
-#import <ANTLR/ANTLRMapElement.h>
-#import <ANTLR/ANTLRMap.h>
-#import <ANTLR/ANTLRMismatchedNotSetException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRMissingTokenException.h>
-#import <ANTLR/ANTLRNodeMapElement.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRPtrBuffer.h>
-#import <ANTLR/ANTLRReaderStream.h>
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLRRecognizerSharedState.h>
-#import <ANTLR/ANTLRRewriteRuleElementStream.h>
-#import <ANTLR/ANTLRRewriteRuleNodeStream.h>
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
-#import <ANTLR/ANTLRRuleMemo.h>
-#import <ANTLR/ANTLRRuleStack.h>
-#import <ANTLR/ANTLRRuleReturnScope.h>
-#import <ANTLR/ANTLRRuntimeException.h>
-#import <ANTLR/ANTLRStreamEnumerator.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRSymbolStack.h>
-#import <ANTLR/ANTLRToken+DebuggerSupport.h>
-#import <ANTLR/ANTLRToken.h>
-#import <ANTLR/ANTLRTokenRewriteStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRTokenStream.h>
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeException.h>
-#import <ANTLR/ANTLRTreeIterator.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-#import <ANTLR/ANTLRUnbufferedTokenStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-#import <ANTLR/ANTLRUniqueIDMap.h>
-#import <ANTLR/ANTLRUnwantedTokenException.h>
-#import <ANTLR/ArrayIterator.h>
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseMapElement.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseMapElement.o
deleted file mode 100644
index c03b784..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseMapElement.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseRecognizer.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseRecognizer.o
deleted file mode 100644
index cc80307..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseRecognizer.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseStack.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseStack.o
deleted file mode 100644
index ad357ae..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseStack.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseTree.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseTree.o
deleted file mode 100644
index fe78152..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseTree.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseTreeAdaptor.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseTreeAdaptor.o
deleted file mode 100644
index 323f2df..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBaseTreeAdaptor.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBitSet.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBitSet.o
deleted file mode 100644
index 9c6d822..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBitSet.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBufferedTokenStream.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBufferedTokenStream.o
deleted file mode 100644
index 1283058..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBufferedTokenStream.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBufferedTreeNodeStream.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBufferedTreeNodeStream.o
deleted file mode 100644
index d09dfa9..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRBufferedTreeNodeStream.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCharStreamState.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCharStreamState.o
deleted file mode 100644
index 4880df9..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCharStreamState.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonErrorNode.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonErrorNode.o
deleted file mode 100644
index 0ed5fc6..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonErrorNode.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonToken.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonToken.o
deleted file mode 100644
index 674805b..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonToken.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTokenStream.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTokenStream.o
deleted file mode 100644
index 493eee9..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTokenStream.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTree.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTree.o
deleted file mode 100644
index f9c1630..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTree.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTreeAdaptor.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTreeAdaptor.o
deleted file mode 100644
index 9f47f11..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTreeAdaptor.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTreeNodeStream.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTreeNodeStream.o
deleted file mode 100644
index 716eb02..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRCommonTreeNodeStream.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLREarlyExitException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLREarlyExitException.o
deleted file mode 100644
index 8f4970d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLREarlyExitException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRFailedPredicateException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRFailedPredicateException.o
deleted file mode 100644
index 1633312..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRFailedPredicateException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRFastQueue.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRFastQueue.o
deleted file mode 100644
index af58b4f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRFastQueue.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRHashMap.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRHashMap.o
deleted file mode 100644
index 7cf9eea..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRHashMap.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRHashRule.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRHashRule.o
deleted file mode 100644
index 19618bf..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRHashRule.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRIntArray.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRIntArray.o
deleted file mode 100644
index cf02b84..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRIntArray.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLexer.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLexer.o
deleted file mode 100644
index e242a4f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLexer.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLexerRuleReturnScope.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLexerRuleReturnScope.o
deleted file mode 100644
index aa72475..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLexerRuleReturnScope.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLinkBase.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLinkBase.o
deleted file mode 100644
index bae3968..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLinkBase.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLookaheadStream.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLookaheadStream.o
deleted file mode 100644
index 5b815d6..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRLookaheadStream.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMap.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMap.o
deleted file mode 100644
index 0e4a1e8..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMap.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMapElement.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMapElement.o
deleted file mode 100644
index 9ea4761..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMapElement.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedNotSetException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedNotSetException.o
deleted file mode 100644
index 4e5033a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedNotSetException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedRangeException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedRangeException.o
deleted file mode 100644
index 9816a80..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedRangeException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedSetException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedSetException.o
deleted file mode 100644
index c649efb..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedSetException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedTokenException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedTokenException.o
deleted file mode 100644
index 2639a95..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedTokenException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedTreeNodeException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedTreeNodeException.o
deleted file mode 100644
index b29985d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMismatchedTreeNodeException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMissingTokenException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMissingTokenException.o
deleted file mode 100644
index e096cc2..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRMissingTokenException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRNoViableAltException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRNoViableAltException.o
deleted file mode 100644
index d5c20d1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRNoViableAltException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRNodeMapElement.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRNodeMapElement.o
deleted file mode 100644
index 1b43a3b..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRNodeMapElement.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRParseTree.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRParseTree.o
deleted file mode 100644
index f1eb79e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRParseTree.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRParser.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRParser.o
deleted file mode 100644
index 21fa14e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRParser.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRParserRuleReturnScope.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRParserRuleReturnScope.o
deleted file mode 100644
index 89c6559..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRParserRuleReturnScope.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRPtrBuffer.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRPtrBuffer.o
deleted file mode 100644
index 4ed966e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRPtrBuffer.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRecognitionException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRecognitionException.o
deleted file mode 100644
index 9b32869..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRecognitionException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRecognizerSharedState.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRecognizerSharedState.o
deleted file mode 100644
index cc5343a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRecognizerSharedState.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRewriteRuleElementStream.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRewriteRuleElementStream.o
deleted file mode 100644
index c80a790..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRewriteRuleElementStream.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRewriteRuleSubtreeStream.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRewriteRuleSubtreeStream.o
deleted file mode 100644
index 44d2d0c..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRewriteRuleSubtreeStream.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRewriteRuleTokenStream.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRewriteRuleTokenStream.o
deleted file mode 100644
index 49bf099..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRewriteRuleTokenStream.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleMapElement.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleMapElement.o
deleted file mode 100644
index 8a29d9e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleMapElement.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleMemo.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleMemo.o
deleted file mode 100644
index 6a40982..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleMemo.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleReturnScope.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleReturnScope.o
deleted file mode 100644
index 38da9bc..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleReturnScope.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleStack.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleStack.o
deleted file mode 100644
index b3a121f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuleStack.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuntimeException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuntimeException.o
deleted file mode 100644
index 16d065f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRRuntimeException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRStreamEnumerator.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRStreamEnumerator.o
deleted file mode 100644
index 9178e6b..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRStreamEnumerator.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRStringStream.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRStringStream.o
deleted file mode 100644
index 666898d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRStringStream.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRSymbolStack.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRSymbolStack.o
deleted file mode 100644
index d6588de..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRSymbolStack.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRToken+DebuggerSupport.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRToken+DebuggerSupport.o
deleted file mode 100644
index 8e07fc1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRToken+DebuggerSupport.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTokenRewriteStream.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTokenRewriteStream.o
deleted file mode 100644
index b37d4e4..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTokenRewriteStream.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeException.o
deleted file mode 100644
index 9b82ada..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeIterator.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeIterator.o
deleted file mode 100644
index c8425c8..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeIterator.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeParser.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeParser.o
deleted file mode 100644
index 35d196e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeParser.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreePatternLexer.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreePatternLexer.o
deleted file mode 100644
index 30a3c96..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreePatternLexer.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreePatternParser.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreePatternParser.o
deleted file mode 100644
index e5856c3..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreePatternParser.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeRewriter.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeRewriter.o
deleted file mode 100644
index f8c5129..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeRewriter.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeRuleReturnScope.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeRuleReturnScope.o
deleted file mode 100644
index 14048bc..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeRuleReturnScope.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeVisitor.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeVisitor.o
deleted file mode 100644
index 3ca88eb..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeVisitor.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeVisitorAction.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeVisitorAction.o
deleted file mode 100644
index 1846b5a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeVisitorAction.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeWizard.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeWizard.o
deleted file mode 100644
index 189f26e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRTreeWizard.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRUnbufferedTokenStream.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRUnbufferedTokenStream.o
deleted file mode 100644
index 77d7c65..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRUnbufferedTokenStream.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRUniqueIDMap.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRUniqueIDMap.o
deleted file mode 100644
index b191f3d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRUniqueIDMap.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRUnwantedTokenException.o b/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRUnwantedTokenException.o
deleted file mode 100644
index 1942134..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/build/ANTLR.build/Debug/ANTLR.build/Objects-normal/i386/ANTLRUnwantedTokenException.o
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleC.tokens b/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleC.tokens
deleted file mode 100644
index 635b4e1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleC.tokens
+++ /dev/null
@@ -1,31 +0,0 @@
-T__20=20
-INT=5
-ID=4
-T__9=9
-T__8=8
-T__7=7
-T__19=19
-WS=6
-T__16=16
-T__15=15
-T__18=18
-T__17=17
-T__12=12
-T__11=11
-T__14=14
-T__13=13
-T__10=10
-'char'=12
-'}'=15
-'=='=18
-'{'=14
-'void'=13
-';'=7
-'='=17
-'int'=11
-'<'=19
-'('=8
-'for'=16
-'+'=20
-','=9
-')'=10
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.h
deleted file mode 100644
index b640542..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} SimpleC.g 2011-05-06 13:53:12
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define T__7 7
-#define T__8 8
-#define T__9 9
-#define T__10 10
-#define T__11 11
-#define T__12 12
-#define T__13 13
-#define T__14 14
-#define T__15 15
-#define T__16 16
-#define T__17 17
-#define T__18 18
-#define T__19 19
-#define T__20 20
-#define ID 4
-#define INT 5
-#define WS 6
-/* interface lexer class */
-@interface SimpleCLexer : ANTLRLexer { // line 283
-/* ObjC start of actions.lexer.memVars */
-/* ObjC end of actions.lexer.memVars */
-}
-+ (void) initialize;
-+ (SimpleCLexer *)newSimpleCLexerWithCharStream:(id<ANTLRCharStream>)anInput;
-/* ObjC start actions.lexer.methodsDecl */
-/* ObjC end actions.lexer.methodsDecl */
-- (void) mT__7 ; 
-- (void) mT__8 ; 
-- (void) mT__9 ; 
-- (void) mT__10 ; 
-- (void) mT__11 ; 
-- (void) mT__12 ; 
-- (void) mT__13 ; 
-- (void) mT__14 ; 
-- (void) mT__15 ; 
-- (void) mT__16 ; 
-- (void) mT__17 ; 
-- (void) mT__18 ; 
-- (void) mT__19 ; 
-- (void) mT__20 ; 
-- (void) mID ; 
-- (void) mINT ; 
-- (void) mWS ; 
-- (void) mTokens ; 
-
-@end /* end of SimpleCLexer interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.m
deleted file mode 100644
index b4e53ae..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.m
+++ /dev/null
@@ -1,1160 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : SimpleC.g
- *     -                            On : 2011-05-06 13:53:12
- *     -                 for the lexer : SimpleCLexerLexer
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} SimpleC.g 2011-05-06 13:53:12
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SimpleCLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-/** As per Terence: No returns for lexer rules! */
-@implementation SimpleCLexer // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"SimpleC.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (SimpleCLexer *)newSimpleCLexerWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    return [[SimpleCLexer alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    self = [super initWithCharStream:anInput State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:18+1] retain]];
-    if ( self != nil ) {
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-/* ObjC Start of actions.lexer.methods */
-/* ObjC end of actions.lexer.methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-
-/* Start of Rules */
-// $ANTLR start "T__7"
-- (void) mT__7
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__7;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:7:6: ( '(' ) // ruleBlockSingleAlt
-        // SimpleC.g:7:8: '(' // alt
-        {
-        [self matchChar:'(']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__7" */
-
-// $ANTLR start "T__8"
-- (void) mT__8
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__8;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:8:6: ( ')' ) // ruleBlockSingleAlt
-        // SimpleC.g:8:8: ')' // alt
-        {
-        [self matchChar:')']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__8" */
-
-// $ANTLR start "T__9"
-- (void) mT__9
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__9;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:9:6: ( '+' ) // ruleBlockSingleAlt
-        // SimpleC.g:9:8: '+' // alt
-        {
-        [self matchChar:'+']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__9" */
-
-// $ANTLR start "T__10"
-- (void) mT__10
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__10;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:10:7: ( ',' ) // ruleBlockSingleAlt
-        // SimpleC.g:10:9: ',' // alt
-        {
-        [self matchChar:',']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__10" */
-
-// $ANTLR start "T__11"
-- (void) mT__11
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__11;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:11:7: ( ';' ) // ruleBlockSingleAlt
-        // SimpleC.g:11:9: ';' // alt
-        {
-        [self matchChar:';']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__11" */
-
-// $ANTLR start "T__12"
-- (void) mT__12
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__12;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:12:7: ( '<' ) // ruleBlockSingleAlt
-        // SimpleC.g:12:9: '<' // alt
-        {
-        [self matchChar:'<']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__12" */
-
-// $ANTLR start "T__13"
-- (void) mT__13
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__13;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:13:7: ( '=' ) // ruleBlockSingleAlt
-        // SimpleC.g:13:9: '=' // alt
-        {
-        [self matchChar:'=']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__13" */
-
-// $ANTLR start "T__14"
-- (void) mT__14
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__14;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:14:7: ( '==' ) // ruleBlockSingleAlt
-        // SimpleC.g:14:9: '==' // alt
-        {
-        [self matchString:@"=="]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__14" */
-
-// $ANTLR start "T__15"
-- (void) mT__15
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__15;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:15:7: ( 'char' ) // ruleBlockSingleAlt
-        // SimpleC.g:15:9: 'char' // alt
-        {
-        [self matchString:@"char"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__15" */
-
-// $ANTLR start "T__16"
-- (void) mT__16
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__16;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:16:7: ( 'for' ) // ruleBlockSingleAlt
-        // SimpleC.g:16:9: 'for' // alt
-        {
-        [self matchString:@"for"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__16" */
-
-// $ANTLR start "T__17"
-- (void) mT__17
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__17;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:17:7: ( 'int' ) // ruleBlockSingleAlt
-        // SimpleC.g:17:9: 'int' // alt
-        {
-        [self matchString:@"int"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__17" */
-
-// $ANTLR start "T__18"
-- (void) mT__18
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__18;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:18:7: ( 'void' ) // ruleBlockSingleAlt
-        // SimpleC.g:18:9: 'void' // alt
-        {
-        [self matchString:@"void"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__18" */
-
-// $ANTLR start "T__19"
-- (void) mT__19
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__19;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:19:7: ( '{' ) // ruleBlockSingleAlt
-        // SimpleC.g:19:9: '{' // alt
-        {
-        [self matchChar:'{']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__19" */
-
-// $ANTLR start "T__20"
-- (void) mT__20
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__20;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:20:7: ( '}' ) // ruleBlockSingleAlt
-        // SimpleC.g:20:9: '}' // alt
-        {
-        [self matchChar:'}']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__20" */
-
-// $ANTLR start "ID"
-- (void) mID
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = ID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:94:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* ) // ruleBlockSingleAlt
-        // SimpleC.g:94:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* // alt
-        {
-        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-            [input consume];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            [self recover:mse];
-            @throw mse;
-        }
-
-
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0 >= '0' && LA1_0 <= '9')||(LA1_0 >= 'A' && LA1_0 <= 'Z')||LA1_0=='_'||(LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // SimpleC.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop1;
-            }
-        } while (YES);
-        loop1: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "ID" */
-
-// $ANTLR start "INT"
-- (void) mINT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = INT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:97:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
-        // SimpleC.g:97:7: ( '0' .. '9' )+ // alt
-        {
-        // SimpleC.g:97:7: ( '0' .. '9' )+ // positiveClosureBlock
-        NSInteger cnt2 = 0;
-        do {
-            NSInteger alt2 = 2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // SimpleC.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt2 >= 1 )
-                        goto loop2;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:2];
-                    @throw eee;
-            }
-            cnt2++;
-        } while (YES);
-        loop2: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "INT" */
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:100:5: ( ( ' ' | '\\t' | '\\r' | '\\n' )+ ) // ruleBlockSingleAlt
-        // SimpleC.g:100:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // alt
-        {
-        // SimpleC.g:100:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // positiveClosureBlock
-        NSInteger cnt3 = 0;
-        do {
-            NSInteger alt3 = 2;
-            NSInteger LA3_0 = [input LA:1];
-            if ( ((LA3_0 >= '\t' && LA3_0 <= '\n')||LA3_0=='\r'||LA3_0==' ') ) {
-                alt3=1;
-            }
-
-
-            switch (alt3) {
-                case 1 : ;
-                    // SimpleC.g: // alt
-                    {
-                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == '\r'||[input LA:1] == ' ') {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt3 >= 1 )
-                        goto loop3;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:3];
-                    @throw eee;
-            }
-            cnt3++;
-        } while (YES);
-        loop3: ;
-
-
-         _channel=HIDDEN; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "WS" */
-
-- (void) mTokens
-{
-    // SimpleC.g:1:8: ( T__7 | T__8 | T__9 | T__10 | T__11 | T__12 | T__13 | T__14 | T__15 | T__16 | T__17 | T__18 | T__19 | T__20 | ID | INT | WS ) //ruleblock
-    NSInteger alt4=17;
-    unichar charLA4 = [input LA:1];
-    switch (charLA4) {
-        case '(': ;
-            {
-            alt4=1;
-            }
-            break;
-        case ')': ;
-            {
-            alt4=2;
-            }
-            break;
-        case '+': ;
-            {
-            alt4=3;
-            }
-            break;
-        case ',': ;
-            {
-            alt4=4;
-            }
-            break;
-        case ';': ;
-            {
-            alt4=5;
-            }
-            break;
-        case '<': ;
-            {
-            alt4=6;
-            }
-            break;
-        case '=': ;
-            {
-            NSInteger LA4_7 = [input LA:2];
-
-            if ( (LA4_7=='=') ) {
-                alt4=8;
-            }
-            else {
-                alt4 = 7;
-            }
-            }
-            break;
-        case 'c': ;
-            {
-            NSInteger LA4_8 = [input LA:2];
-
-            if ( (LA4_8=='h') ) {
-                NSInteger LA4_19 = [input LA:3];
-
-                if ( (LA4_19=='a') ) {
-                    NSInteger LA4_23 = [input LA:4];
-
-                    if ( (LA4_23=='r') ) {
-                        NSInteger LA4_27 = [input LA:5];
-
-                        if ( ((LA4_27 >= '0' && LA4_27 <= '9')||(LA4_27 >= 'A' && LA4_27 <= 'Z')||LA4_27=='_'||(LA4_27 >= 'a' && LA4_27 <= 'z')) ) {
-                            alt4=15;
-                        }
-                        else {
-                            alt4 = 9;
-                        }
-                    }
-                    else {
-                        alt4 = 15;
-                    }
-                }
-                else {
-                    alt4 = 15;
-                }
-            }
-            else {
-                alt4 = 15;
-            }
-            }
-            break;
-        case 'f': ;
-            {
-            NSInteger LA4_9 = [input LA:2];
-
-            if ( (LA4_9=='o') ) {
-                NSInteger LA4_20 = [input LA:3];
-
-                if ( (LA4_20=='r') ) {
-                    NSInteger LA4_24 = [input LA:4];
-
-                    if ( ((LA4_24 >= '0' && LA4_24 <= '9')||(LA4_24 >= 'A' && LA4_24 <= 'Z')||LA4_24=='_'||(LA4_24 >= 'a' && LA4_24 <= 'z')) ) {
-                        alt4=15;
-                    }
-                    else {
-                        alt4 = 10;
-                    }
-                }
-                else {
-                    alt4 = 15;
-                }
-            }
-            else {
-                alt4 = 15;
-            }
-            }
-            break;
-        case 'i': ;
-            {
-            NSInteger LA4_10 = [input LA:2];
-
-            if ( (LA4_10=='n') ) {
-                NSInteger LA4_21 = [input LA:3];
-
-                if ( (LA4_21=='t') ) {
-                    NSInteger LA4_25 = [input LA:4];
-
-                    if ( ((LA4_25 >= '0' && LA4_25 <= '9')||(LA4_25 >= 'A' && LA4_25 <= 'Z')||LA4_25=='_'||(LA4_25 >= 'a' && LA4_25 <= 'z')) ) {
-                        alt4=15;
-                    }
-                    else {
-                        alt4 = 11;
-                    }
-                }
-                else {
-                    alt4 = 15;
-                }
-            }
-            else {
-                alt4 = 15;
-            }
-            }
-            break;
-        case 'v': ;
-            {
-            NSInteger LA4_11 = [input LA:2];
-
-            if ( (LA4_11=='o') ) {
-                NSInteger LA4_22 = [input LA:3];
-
-                if ( (LA4_22=='i') ) {
-                    NSInteger LA4_26 = [input LA:4];
-
-                    if ( (LA4_26=='d') ) {
-                        NSInteger LA4_30 = [input LA:5];
-
-                        if ( ((LA4_30 >= '0' && LA4_30 <= '9')||(LA4_30 >= 'A' && LA4_30 <= 'Z')||LA4_30=='_'||(LA4_30 >= 'a' && LA4_30 <= 'z')) ) {
-                            alt4=15;
-                        }
-                        else {
-                            alt4 = 12;
-                        }
-                    }
-                    else {
-                        alt4 = 15;
-                    }
-                }
-                else {
-                    alt4 = 15;
-                }
-            }
-            else {
-                alt4 = 15;
-            }
-            }
-            break;
-        case '{': ;
-            {
-            alt4=13;
-            }
-            break;
-        case '}': ;
-            {
-            alt4=14;
-            }
-            break;
-        case 'A': ;
-        case 'B': ;
-        case 'C': ;
-        case 'D': ;
-        case 'E': ;
-        case 'F': ;
-        case 'G': ;
-        case 'H': ;
-        case 'I': ;
-        case 'J': ;
-        case 'K': ;
-        case 'L': ;
-        case 'M': ;
-        case 'N': ;
-        case 'O': ;
-        case 'P': ;
-        case 'Q': ;
-        case 'R': ;
-        case 'S': ;
-        case 'T': ;
-        case 'U': ;
-        case 'V': ;
-        case 'W': ;
-        case 'X': ;
-        case 'Y': ;
-        case 'Z': ;
-        case '_': ;
-        case 'a': ;
-        case 'b': ;
-        case 'd': ;
-        case 'e': ;
-        case 'g': ;
-        case 'h': ;
-        case 'j': ;
-        case 'k': ;
-        case 'l': ;
-        case 'm': ;
-        case 'n': ;
-        case 'o': ;
-        case 'p': ;
-        case 'q': ;
-        case 'r': ;
-        case 's': ;
-        case 't': ;
-        case 'u': ;
-        case 'w': ;
-        case 'x': ;
-        case 'y': ;
-        case 'z': ;
-            {
-            alt4=15;
-            }
-            break;
-        case '0': ;
-        case '1': ;
-        case '2': ;
-        case '3': ;
-        case '4': ;
-        case '5': ;
-        case '6': ;
-        case '7': ;
-        case '8': ;
-        case '9': ;
-            {
-            alt4=16;
-            }
-            break;
-        case '\t': ;
-        case '\n': ;
-        case '\r': ;
-        case ' ': ;
-            {
-            alt4=17;
-            }
-            break;
-
-    default: ;
-        ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:4 state:0 stream:input];
-        nvae.c = charLA4;
-        @throw nvae;
-
-    }
-
-    switch (alt4) {
-        case 1 : ;
-            // SimpleC.g:1:10: T__7 // alt
-            {
-            [self mT__7]; 
-
-
-
-            }
-            break;
-        case 2 : ;
-            // SimpleC.g:1:15: T__8 // alt
-            {
-            [self mT__8]; 
-
-
-
-            }
-            break;
-        case 3 : ;
-            // SimpleC.g:1:20: T__9 // alt
-            {
-            [self mT__9]; 
-
-
-
-            }
-            break;
-        case 4 : ;
-            // SimpleC.g:1:25: T__10 // alt
-            {
-            [self mT__10]; 
-
-
-
-            }
-            break;
-        case 5 : ;
-            // SimpleC.g:1:31: T__11 // alt
-            {
-            [self mT__11]; 
-
-
-
-            }
-            break;
-        case 6 : ;
-            // SimpleC.g:1:37: T__12 // alt
-            {
-            [self mT__12]; 
-
-
-
-            }
-            break;
-        case 7 : ;
-            // SimpleC.g:1:43: T__13 // alt
-            {
-            [self mT__13]; 
-
-
-
-            }
-            break;
-        case 8 : ;
-            // SimpleC.g:1:49: T__14 // alt
-            {
-            [self mT__14]; 
-
-
-
-            }
-            break;
-        case 9 : ;
-            // SimpleC.g:1:55: T__15 // alt
-            {
-            [self mT__15]; 
-
-
-
-            }
-            break;
-        case 10 : ;
-            // SimpleC.g:1:61: T__16 // alt
-            {
-            [self mT__16]; 
-
-
-
-            }
-            break;
-        case 11 : ;
-            // SimpleC.g:1:67: T__17 // alt
-            {
-            [self mT__17]; 
-
-
-
-            }
-            break;
-        case 12 : ;
-            // SimpleC.g:1:73: T__18 // alt
-            {
-            [self mT__18]; 
-
-
-
-            }
-            break;
-        case 13 : ;
-            // SimpleC.g:1:79: T__19 // alt
-            {
-            [self mT__19]; 
-
-
-
-            }
-            break;
-        case 14 : ;
-            // SimpleC.g:1:85: T__20 // alt
-            {
-            [self mT__20]; 
-
-
-
-            }
-            break;
-        case 15 : ;
-            // SimpleC.g:1:91: ID // alt
-            {
-            [self mID]; 
-
-
-
-            }
-            break;
-        case 16 : ;
-            // SimpleC.g:1:94: INT // alt
-            {
-            [self mINT]; 
-
-
-
-            }
-            break;
-        case 17 : ;
-            // SimpleC.g:1:98: WS // alt
-            {
-            [self mWS]; 
-
-
-
-            }
-            break;
-
-    }
-
-}
-
-@end /* end of SimpleCLexer implementation line 397 */
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.h b/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.h
deleted file mode 100644
index 6e3b2af..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} SimpleC.g 2011-05-06 13:53:12
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* parserHeaderFile */
-#ifndef ANTLR3TokenTypeAlreadyDefined
-#define ANTLR3TokenTypeAlreadyDefined
-typedef enum {
-    ANTLR_EOF = -1,
-    INVALID,
-    EOR,
-    DOWN,
-    UP,
-    MIN
-} ANTLR3TokenType;
-#endif
-
-#pragma mark Cyclic DFA interface start DFA2
-@interface DFA2 : ANTLRDFA {
-}
-+ newDFA2WithRecognizer:(ANTLRBaseRecognizer *)theRecognizer;
-- initWithRecognizer:(ANTLRBaseRecognizer *)recognizer;
-@end /* end of DFA2 interface  */
-
-#pragma mark Cyclic DFA interface end DFA2
-
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define T__7 7
-#define T__8 8
-#define T__9 9
-#define T__10 10
-#define T__11 11
-#define T__12 12
-#define T__13 13
-#define T__14 14
-#define T__15 15
-#define T__16 16
-#define T__17 17
-#define T__18 18
-#define T__19 19
-#define T__20 20
-#define ID 4
-#define INT 5
-#define WS 6
-#pragma mark Dynamic Global Scopes
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-
-/* Interface grammar class */
-@interface SimpleCParser : ANTLRParser { /* line 572 */
-/* ObjC start of ruleAttributeScopeMemVar */
-
-
-/* ObjC end of ruleAttributeScopeMemVar */
-/* ObjC start of globalAttributeScopeMemVar */
-
-
-/* ObjC end of globalAttributeScopeMemVar */
-/* ObjC start of actions.(actionScope).memVars */
-/* ObjC end of actions.(actionScope).memVars */
-/* ObjC start of memVars */
-/* ObjC end of memVars */
-
-DFA2 *dfa2;
- }
-
-/* ObjC start of actions.(actionScope).properties */
-/* ObjC end of actions.(actionScope).properties */
-/* ObjC start of properties */
-/* ObjC end of properties */
-
-+ (void) initialize;
-+ (id) newSimpleCParser:(id<ANTLRTokenStream>)aStream;
-/* ObjC start of actions.(actionScope).methodsDecl */
-/* ObjC end of actions.(actionScope).methodsDecl */
-
-/* ObjC start of methodsDecl */
-/* ObjC end of methodsDecl */
-
-- (void)program; 
-- (void)declaration; 
-- (void)variable; 
-- (void)declarator; 
-- (NSString *)functionHeader; 
-- (void)formalParameter; 
-- (void)type; 
-- (void)block; 
-- (void)stat; 
-- (void)forStat; 
-- (void)assignStat; 
-- (void)expr; 
-- (void)condExpr; 
-- (void)aexpr; 
-- (void)atom; 
-
-
-@end /* end of SimpleCParser interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.m b/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.m
deleted file mode 100644
index e65a149..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.m
+++ /dev/null
@@ -1,1438 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : SimpleC.g
- *     -                            On : 2011-05-06 13:53:12
- *     -                for the parser : SimpleCParserParser
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} SimpleC.g 2011-05-06 13:53:12
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SimpleCParser.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-#pragma mark Cyclic DFA implementation start DFA2
-@implementation DFA2
-const static NSInteger dfa2_eot[13] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static NSInteger dfa2_eof[13] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static unichar dfa2_min[13] =
-    {4,4,7,4,0,4,11,8,0,0,4,4,8};
-const static unichar dfa2_max[13] =
-    {18,4,11,18,0,4,19,10,0,0,18,4,10};
-const static NSInteger dfa2_accept[13] =
-    {-1,-1,-1,-1,1,-1,-1,-1,2,3,-1,-1,-1};
-const static NSInteger dfa2_special[13] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static NSInteger dfa2_transition[] = {};
-const static NSInteger dfa2_transition0[] = {3, -1, -1, -1, 4};
-const static NSInteger dfa2_transition1[] = {6, -1, 10};
-const static NSInteger dfa2_transition2[] = {8, -1, -1, -1, -1, -1, -1, 
- -1, 9};
-const static NSInteger dfa2_transition3[] = {2};
-const static NSInteger dfa2_transition4[] = {1, -1, -1, -1, -1, -1, -1, 
- -1, -1, -1, -1, 1, -1, 1, 1};
-const static NSInteger dfa2_transition5[] = {7};
-const static NSInteger dfa2_transition6[] = {11, -1, -1, -1, -1, -1, -1, 
- -1, -1, -1, -1, 11, -1, 11, 11};
-const static NSInteger dfa2_transition7[] = {12};
-const static NSInteger dfa2_transition8[] = {5, -1, -1, -1, 6, -1, -1, -1, 
- -1, -1, -1, 5, -1, 5, 5};
-
-
-+ (id) newDFA2WithRecognizer:(ANTLRBaseRecognizer *)aRecognizer
-{
-    return [[[DFA2 alloc] initWithRecognizer:aRecognizer] retain];
-}
-
-- (id) initWithRecognizer:(ANTLRBaseRecognizer *) theRecognizer
-{
-    self = [super initWithRecognizer:theRecognizer];
-    if ( self != nil ) {
-        decisionNumber = 2;
-        eot = dfa2_eot;
-        eof = dfa2_eof;
-        min = dfa2_min;
-        max = dfa2_max;
-        accept = dfa2_accept;
-        special = dfa2_special;
-        if (!(transition = calloc(13, sizeof(void*)))) {
-            [self release];
-            return nil;
-        }
-        len = 13;
-        transition[0] = dfa2_transition4;
-        transition[1] = dfa2_transition3;
-        transition[2] = dfa2_transition0;
-        transition[3] = dfa2_transition8;
-
-        transition[4] = dfa2_transition5;
-        transition[5] = dfa2_transition2;
-        transition[6] = dfa2_transition1;
-
-
-        transition[7] = dfa2_transition6;
-        transition[8] = dfa2_transition7;
-        transition[9] = dfa2_transition1;
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    free(transition);
-    [super dealloc];
-}
-
-- (NSString *) description
-{
-    return @"20:1: declaration : ( variable | functionHeader ';' | functionHeader block );";
-}
-
-
-@end /* end DFA2 implementation */
-
-#pragma mark Cyclic DFA implementation end DFA2
-
-
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_declaration_in_program28;
-static const unsigned long long FOLLOW_declaration_in_program28_data[] = { 0x0000000000068012LL};
-static ANTLRBitSet *FOLLOW_variable_in_declaration50;
-static const unsigned long long FOLLOW_variable_in_declaration50_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_functionHeader_in_declaration60;
-static const unsigned long long FOLLOW_functionHeader_in_declaration60_data[] = { 0x0000000000000800LL};
-static ANTLRBitSet *FOLLOW_11_in_declaration62;
-static const unsigned long long FOLLOW_11_in_declaration62_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_functionHeader_in_declaration75;
-static const unsigned long long FOLLOW_functionHeader_in_declaration75_data[] = { 0x0000000000080000LL};
-static ANTLRBitSet *FOLLOW_block_in_declaration77;
-static const unsigned long long FOLLOW_block_in_declaration77_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_type_in_variable99;
-static const unsigned long long FOLLOW_type_in_variable99_data[] = { 0x0000000000000010LL};
-static ANTLRBitSet *FOLLOW_declarator_in_variable101;
-static const unsigned long long FOLLOW_declarator_in_variable101_data[] = { 0x0000000000000800LL};
-static ANTLRBitSet *FOLLOW_11_in_variable103;
-static const unsigned long long FOLLOW_11_in_variable103_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_ID_in_declarator122;
-static const unsigned long long FOLLOW_ID_in_declarator122_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_type_in_functionHeader151;
-static const unsigned long long FOLLOW_type_in_functionHeader151_data[] = { 0x0000000000000010LL};
-static ANTLRBitSet *FOLLOW_ID_in_functionHeader153;
-static const unsigned long long FOLLOW_ID_in_functionHeader153_data[] = { 0x0000000000000080LL};
-static ANTLRBitSet *FOLLOW_7_in_functionHeader155;
-static const unsigned long long FOLLOW_7_in_functionHeader155_data[] = { 0x0000000000068110LL};
-static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader159;
-static const unsigned long long FOLLOW_formalParameter_in_functionHeader159_data[] = { 0x0000000000000500LL};
-static ANTLRBitSet *FOLLOW_10_in_functionHeader163;
-static const unsigned long long FOLLOW_10_in_functionHeader163_data[] = { 0x0000000000068010LL};
-static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader165;
-static const unsigned long long FOLLOW_formalParameter_in_functionHeader165_data[] = { 0x0000000000000500LL};
-static ANTLRBitSet *FOLLOW_8_in_functionHeader173;
-static const unsigned long long FOLLOW_8_in_functionHeader173_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_type_in_formalParameter195;
-static const unsigned long long FOLLOW_type_in_formalParameter195_data[] = { 0x0000000000000010LL};
-static ANTLRBitSet *FOLLOW_declarator_in_formalParameter197;
-static const unsigned long long FOLLOW_declarator_in_formalParameter197_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_19_in_block286;
-static const unsigned long long FOLLOW_19_in_block286_data[] = { 0x00000000001F88B0LL};
-static ANTLRBitSet *FOLLOW_variable_in_block300;
-static const unsigned long long FOLLOW_variable_in_block300_data[] = { 0x00000000001F88B0LL};
-static ANTLRBitSet *FOLLOW_stat_in_block315;
-static const unsigned long long FOLLOW_stat_in_block315_data[] = { 0x00000000001908B0LL};
-static ANTLRBitSet *FOLLOW_20_in_block326;
-static const unsigned long long FOLLOW_20_in_block326_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_forStat_in_stat338;
-static const unsigned long long FOLLOW_forStat_in_stat338_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_expr_in_stat346;
-static const unsigned long long FOLLOW_expr_in_stat346_data[] = { 0x0000000000000800LL};
-static ANTLRBitSet *FOLLOW_11_in_stat348;
-static const unsigned long long FOLLOW_11_in_stat348_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_block_in_stat362;
-static const unsigned long long FOLLOW_block_in_stat362_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_assignStat_in_stat370;
-static const unsigned long long FOLLOW_assignStat_in_stat370_data[] = { 0x0000000000000800LL};
-static ANTLRBitSet *FOLLOW_11_in_stat372;
-static const unsigned long long FOLLOW_11_in_stat372_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_11_in_stat380;
-static const unsigned long long FOLLOW_11_in_stat380_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_16_in_forStat399;
-static const unsigned long long FOLLOW_16_in_forStat399_data[] = { 0x0000000000000080LL};
-static ANTLRBitSet *FOLLOW_7_in_forStat401;
-static const unsigned long long FOLLOW_7_in_forStat401_data[] = { 0x0000000000000010LL};
-static ANTLRBitSet *FOLLOW_assignStat_in_forStat403;
-static const unsigned long long FOLLOW_assignStat_in_forStat403_data[] = { 0x0000000000000800LL};
-static ANTLRBitSet *FOLLOW_11_in_forStat405;
-static const unsigned long long FOLLOW_11_in_forStat405_data[] = { 0x00000000000000B0LL};
-static ANTLRBitSet *FOLLOW_expr_in_forStat407;
-static const unsigned long long FOLLOW_expr_in_forStat407_data[] = { 0x0000000000000800LL};
-static ANTLRBitSet *FOLLOW_11_in_forStat409;
-static const unsigned long long FOLLOW_11_in_forStat409_data[] = { 0x0000000000000010LL};
-static ANTLRBitSet *FOLLOW_assignStat_in_forStat411;
-static const unsigned long long FOLLOW_assignStat_in_forStat411_data[] = { 0x0000000000000100LL};
-static ANTLRBitSet *FOLLOW_8_in_forStat413;
-static const unsigned long long FOLLOW_8_in_forStat413_data[] = { 0x0000000000080000LL};
-static ANTLRBitSet *FOLLOW_block_in_forStat415;
-static const unsigned long long FOLLOW_block_in_forStat415_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_ID_in_assignStat442;
-static const unsigned long long FOLLOW_ID_in_assignStat442_data[] = { 0x0000000000002000LL};
-static ANTLRBitSet *FOLLOW_13_in_assignStat444;
-static const unsigned long long FOLLOW_13_in_assignStat444_data[] = { 0x00000000000000B0LL};
-static ANTLRBitSet *FOLLOW_expr_in_assignStat446;
-static const unsigned long long FOLLOW_expr_in_assignStat446_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_condExpr_in_expr468;
-static const unsigned long long FOLLOW_condExpr_in_expr468_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_aexpr_in_condExpr487;
-static const unsigned long long FOLLOW_aexpr_in_condExpr487_data[] = { 0x0000000000005002LL};
-static ANTLRBitSet *FOLLOW_set_in_condExpr491;
-static const unsigned long long FOLLOW_set_in_condExpr491_data[] = { 0x00000000000000B0LL};
-static ANTLRBitSet *FOLLOW_aexpr_in_condExpr499;
-static const unsigned long long FOLLOW_aexpr_in_condExpr499_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_atom_in_aexpr521;
-static const unsigned long long FOLLOW_atom_in_aexpr521_data[] = { 0x0000000000000202LL};
-static ANTLRBitSet *FOLLOW_9_in_aexpr525;
-static const unsigned long long FOLLOW_9_in_aexpr525_data[] = { 0x00000000000000B0LL};
-static ANTLRBitSet *FOLLOW_atom_in_aexpr527;
-static const unsigned long long FOLLOW_atom_in_aexpr527_data[] = { 0x0000000000000202LL};
-static ANTLRBitSet *FOLLOW_ID_in_atom547;
-static const unsigned long long FOLLOW_ID_in_atom547_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_INT_in_atom561;
-static const unsigned long long FOLLOW_INT_in_atom561_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_7_in_atom575;
-static const unsigned long long FOLLOW_7_in_atom575_data[] = { 0x00000000000000B0LL};
-static ANTLRBitSet *FOLLOW_expr_in_atom577;
-static const unsigned long long FOLLOW_expr_in_atom577_data[] = { 0x0000000000000100LL};
-static ANTLRBitSet *FOLLOW_8_in_atom579;
-static const unsigned long long FOLLOW_8_in_atom579_data[] = { 0x0000000000000002LL};
-
-
-#pragma mark Dynamic Global Scopes
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule Return Scopes start
-//#pragma mark Rule return scopes start
-//
-
-#pragma mark Rule return scopes start
-
-@implementation SimpleCParser  // line 637
-
-/* ObjC start of ruleAttributeScope */
-#pragma mark Dynamic Rule Scopes
-/* ObjC end of ruleAttributeScope */
-#pragma mark global Attribute Scopes
-/* ObjC start globalAttributeScope */
-/* ObjC end globalAttributeScope */
-/* ObjC start actions.(actionScope).synthesize */
-/* ObjC end actions.(actionScope).synthesize */
-/* ObjC start synthesize() */
-/* ObjC end synthesize() */
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    FOLLOW_declaration_in_program28 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declaration_in_program28_data Count:(NSUInteger)1] retain];
-    FOLLOW_variable_in_declaration50 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_declaration50_data Count:(NSUInteger)1] retain];
-    FOLLOW_functionHeader_in_declaration60 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration60_data Count:(NSUInteger)1] retain];
-    FOLLOW_11_in_declaration62 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_declaration62_data Count:(NSUInteger)1] retain];
-    FOLLOW_functionHeader_in_declaration75 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration75_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_declaration77 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_declaration77_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_variable99 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_variable99_data Count:(NSUInteger)1] retain];
-    FOLLOW_declarator_in_variable101 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_variable101_data Count:(NSUInteger)1] retain];
-    FOLLOW_11_in_variable103 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_variable103_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_declarator122 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_declarator122_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_functionHeader151 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_functionHeader151_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_functionHeader153 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_functionHeader153_data Count:(NSUInteger)1] retain];
-    FOLLOW_7_in_functionHeader155 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_functionHeader155_data Count:(NSUInteger)1] retain];
-    FOLLOW_formalParameter_in_functionHeader159 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader159_data Count:(NSUInteger)1] retain];
-    FOLLOW_10_in_functionHeader163 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_10_in_functionHeader163_data Count:(NSUInteger)1] retain];
-    FOLLOW_formalParameter_in_functionHeader165 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader165_data Count:(NSUInteger)1] retain];
-    FOLLOW_8_in_functionHeader173 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_functionHeader173_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_formalParameter195 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_formalParameter195_data Count:(NSUInteger)1] retain];
-    FOLLOW_declarator_in_formalParameter197 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_formalParameter197_data Count:(NSUInteger)1] retain];
-    FOLLOW_19_in_block286 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_19_in_block286_data Count:(NSUInteger)1] retain];
-    FOLLOW_variable_in_block300 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_block300_data Count:(NSUInteger)1] retain];
-    FOLLOW_stat_in_block315 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block315_data Count:(NSUInteger)1] retain];
-    FOLLOW_20_in_block326 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_20_in_block326_data Count:(NSUInteger)1] retain];
-    FOLLOW_forStat_in_stat338 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_forStat_in_stat338_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_stat346 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_stat346_data Count:(NSUInteger)1] retain];
-    FOLLOW_11_in_stat348 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_stat348_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_stat362 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat362_data Count:(NSUInteger)1] retain];
-    FOLLOW_assignStat_in_stat370 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_stat370_data Count:(NSUInteger)1] retain];
-    FOLLOW_11_in_stat372 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_stat372_data Count:(NSUInteger)1] retain];
-    FOLLOW_11_in_stat380 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_stat380_data Count:(NSUInteger)1] retain];
-    FOLLOW_16_in_forStat399 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_16_in_forStat399_data Count:(NSUInteger)1] retain];
-    FOLLOW_7_in_forStat401 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_forStat401_data Count:(NSUInteger)1] retain];
-    FOLLOW_assignStat_in_forStat403 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_forStat403_data Count:(NSUInteger)1] retain];
-    FOLLOW_11_in_forStat405 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_forStat405_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_forStat407 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat407_data Count:(NSUInteger)1] retain];
-    FOLLOW_11_in_forStat409 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_forStat409_data Count:(NSUInteger)1] retain];
-    FOLLOW_assignStat_in_forStat411 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_forStat411_data Count:(NSUInteger)1] retain];
-    FOLLOW_8_in_forStat413 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_forStat413_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_forStat415 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_forStat415_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_assignStat442 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_assignStat442_data Count:(NSUInteger)1] retain];
-    FOLLOW_13_in_assignStat444 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_13_in_assignStat444_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_assignStat446 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_assignStat446_data Count:(NSUInteger)1] retain];
-    FOLLOW_condExpr_in_expr468 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_condExpr_in_expr468_data Count:(NSUInteger)1] retain];
-    FOLLOW_aexpr_in_condExpr487 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_aexpr_in_condExpr487_data Count:(NSUInteger)1] retain];
-    FOLLOW_set_in_condExpr491 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_set_in_condExpr491_data Count:(NSUInteger)1] retain];
-    FOLLOW_aexpr_in_condExpr499 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_aexpr_in_condExpr499_data Count:(NSUInteger)1] retain];
-    FOLLOW_atom_in_aexpr521 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_aexpr521_data Count:(NSUInteger)1] retain];
-    FOLLOW_9_in_aexpr525 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_aexpr525_data Count:(NSUInteger)1] retain];
-    FOLLOW_atom_in_aexpr527 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_aexpr527_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_atom547 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_atom547_data Count:(NSUInteger)1] retain];
-    FOLLOW_INT_in_atom561 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_atom561_data Count:(NSUInteger)1] retain];
-    FOLLOW_7_in_atom575 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_atom575_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_atom577 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_atom577_data Count:(NSUInteger)1] retain];
-    FOLLOW_8_in_atom579 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_atom579_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"ID", @"INT", @"WS", @"'('", @"')'", @"'+'", @"','", @"';'", @"'<'", @"'='", 
- @"'=='", @"'char'", @"'for'", @"'int'", @"'void'", @"'{'", @"'}'", nil] retain]];
-    [ANTLRBaseRecognizer setGrammarFileName:@"SimpleC.g"];
-}
-
-+ (SimpleCParser *)newSimpleCParser:(id<ANTLRTokenStream>)aStream
-{
-    return [[SimpleCParser alloc] initWithTokenStream:aStream];
-
-
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)aStream
-{
-    self = [super initWithTokenStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:15+1] retain]];
-    if ( self != nil ) {
-
-
-        dfa2 = [DFA2 newDFA2WithRecognizer:self];
-        /* start of actions-actionScope-init */
-        /* start of init */
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [dfa2 release];
-    [super dealloc];
-}
-
-/* ObjC start members */
-/* ObjC end members */
-/* ObjC start actions.(actionScope).methods */
-/* ObjC end actions.(actionScope).methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-/* ObjC start rules */
-/*
- * $ANTLR start program
- * SimpleC.g:7:1: program : ( declaration )+ ;
- */
-- (void) program
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:8:5: ( ( declaration )+ ) // ruleBlockSingleAlt
-        // SimpleC.g:8:9: ( declaration )+ // alt
-        {
-        // SimpleC.g:8:9: ( declaration )+ // positiveClosureBlock
-        NSInteger cnt1 = 0;
-        do {
-            NSInteger alt1 = 2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( (LA1_0==ID||LA1_0==15||(LA1_0 >= 17 && LA1_0 <= 18)) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // SimpleC.g:8:9: declaration // alt
-                    {
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_declaration_in_program28];
-                    [self declaration];
-
-                    [self popFollow];
-
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt1 >= 1 )
-                        goto loop1;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:1];
-                    @throw eee;
-            }
-            cnt1++;
-        } while (YES);
-        loop1: ;
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end program */
-
-/*
- * $ANTLR start declaration
- * SimpleC.g:20:1: declaration : ( variable | functionHeader ';' | functionHeader block );
- */
-- (void) declaration
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-         NSString * functionHeader1 = nil ;
-         
-         NSString * functionHeader2 = nil ;
-         
-
-        // SimpleC.g:21:5: ( variable | functionHeader ';' | functionHeader block ) //ruleblock
-        NSInteger alt2=3;
-        alt2 = [dfa2 predict:input];
-        switch (alt2) {
-            case 1 : ;
-                // SimpleC.g:21:9: variable // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_variable_in_declaration50];
-                [self variable];
-
-                [self popFollow];
-
-
-
-                }
-                break;
-            case 2 : ;
-                // SimpleC.g:22:9: functionHeader ';' // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_functionHeader_in_declaration60];
-                functionHeader1 = [self functionHeader];
-
-                [self popFollow];
-
-
-
-                [self match:input TokenType:11 Follow:FOLLOW_11_in_declaration62]; 
-
-                 NSLog(@"%@ is a declaration\n", functionHeader1
-                ); 
-
-
-                }
-                break;
-            case 3 : ;
-                // SimpleC.g:24:9: functionHeader block // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_functionHeader_in_declaration75];
-                functionHeader2 = [self functionHeader];
-
-                [self popFollow];
-
-
-
-                /* ruleRef */
-                [self pushFollow:FOLLOW_block_in_declaration77];
-                [self block];
-
-                [self popFollow];
-
-
-
-                 NSLog(@"%@ is a definition\n", functionHeader2
-                ); 
-
-
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end declaration */
-
-/*
- * $ANTLR start variable
- * SimpleC.g:28:1: variable : type declarator ';' ;
- */
-- (void) variable
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:29:5: ( type declarator ';' ) // ruleBlockSingleAlt
-        // SimpleC.g:29:9: type declarator ';' // alt
-        {
-        /* ruleRef */
-        [self pushFollow:FOLLOW_type_in_variable99];
-        [self type];
-
-        [self popFollow];
-
-
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_declarator_in_variable101];
-        [self declarator];
-
-        [self popFollow];
-
-
-
-        [self match:input TokenType:11 Follow:FOLLOW_11_in_variable103]; 
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end variable */
-
-/*
- * $ANTLR start declarator
- * SimpleC.g:32:1: declarator : ID ;
- */
-- (void) declarator
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:33:5: ( ID ) // ruleBlockSingleAlt
-        // SimpleC.g:33:9: ID // alt
-        {
-        [self match:input TokenType:ID Follow:FOLLOW_ID_in_declarator122]; 
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end declarator */
-
-/*
- * $ANTLR start functionHeader
- * SimpleC.g:36:1: functionHeader returns [NSString *name] : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' ;
- */
-- (NSString *) functionHeader
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    NSString * name = nil ;
-
-
-
-        name=nil; // for now you must init here rather than in 'returns'
-
-    @try {
-        ANTLRCommonToken *ID3 = nil;
-
-        // SimpleC.g:40:5: ( type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' ) // ruleBlockSingleAlt
-        // SimpleC.g:40:9: type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' // alt
-        {
-        /* ruleRef */
-        [self pushFollow:FOLLOW_type_in_functionHeader151];
-        [self type];
-
-        [self popFollow];
-
-
-
-        ID3=(ANTLRCommonToken *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_functionHeader153]; 
-
-        [self match:input TokenType:7 Follow:FOLLOW_7_in_functionHeader155]; 
-
-        // SimpleC.g:40:21: ( formalParameter ( ',' formalParameter )* )? // block
-        NSInteger alt4=2;
-        NSInteger LA4_0 = [input LA:1];
-
-        if ( (LA4_0==ID||LA4_0==15||(LA4_0 >= 17 && LA4_0 <= 18)) ) {
-            alt4=1;
-        }
-        switch (alt4) {
-            case 1 : ;
-                // SimpleC.g:40:23: formalParameter ( ',' formalParameter )* // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_formalParameter_in_functionHeader159];
-                [self formalParameter];
-
-                [self popFollow];
-
-
-
-                do {
-                    NSInteger alt3=2;
-                    NSInteger LA3_0 = [input LA:1];
-                    if ( (LA3_0==10) ) {
-                        alt3=1;
-                    }
-
-
-                    switch (alt3) {
-                        case 1 : ;
-                            // SimpleC.g:40:41: ',' formalParameter // alt
-                            {
-                            [self match:input TokenType:10 Follow:FOLLOW_10_in_functionHeader163]; 
-
-                            /* ruleRef */
-                            [self pushFollow:FOLLOW_formalParameter_in_functionHeader165];
-                            [self formalParameter];
-
-                            [self popFollow];
-
-
-
-                            }
-                            break;
-
-                        default :
-                            goto loop3;
-                    }
-                } while (YES);
-                loop3: ;
-
-
-                }
-                break;
-
-        }
-
-
-        [self match:input TokenType:8 Follow:FOLLOW_8_in_functionHeader173]; 
-
-        name =  (ID3!=nil?ID3.text:nil);
-
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return name;
-}
-/* $ANTLR end functionHeader */
-
-/*
- * $ANTLR start formalParameter
- * SimpleC.g:44:1: formalParameter : type declarator ;
- */
-- (void) formalParameter
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:45:5: ( type declarator ) // ruleBlockSingleAlt
-        // SimpleC.g:45:9: type declarator // alt
-        {
-        /* ruleRef */
-        [self pushFollow:FOLLOW_type_in_formalParameter195];
-        [self type];
-
-        [self popFollow];
-
-
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_declarator_in_formalParameter197];
-        [self declarator];
-
-        [self popFollow];
-
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end formalParameter */
-
-/*
- * $ANTLR start type
- * SimpleC.g:48:1: type : ( 'int' | 'char' | 'void' | ID );
- */
-- (void) type
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:49:5: ( 'int' | 'char' | 'void' | ID ) // ruleBlockSingleAlt
-        // SimpleC.g: // alt
-        {
-        if ([input LA:1] == ID||[input LA:1] == 15||(([input LA:1] >= 17) && ([input LA:1] <= 18))) {
-            [input consume];
-            [state setIsErrorRecovery:NO];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            @throw mse;
-        }
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end type */
-
-/*
- * $ANTLR start block
- * SimpleC.g:55:1: block : '{' ( variable )* ( stat )* '}' ;
- */
-- (void) block
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:56:5: ( '{' ( variable )* ( stat )* '}' ) // ruleBlockSingleAlt
-        // SimpleC.g:56:9: '{' ( variable )* ( stat )* '}' // alt
-        {
-        [self match:input TokenType:19 Follow:FOLLOW_19_in_block286]; 
-
-        do {
-            NSInteger alt5=2;
-            NSInteger LA5_0 = [input LA:1];
-            if ( (LA5_0==ID) ) {
-                NSInteger LA5_2 = [input LA:2];
-                if ( (LA5_2==ID) ) {
-                    alt5=1;
-                }
-
-
-            }
-            else if ( (LA5_0==15||(LA5_0 >= 17 && LA5_0 <= 18)) ) {
-                alt5=1;
-            }
-
-
-            switch (alt5) {
-                case 1 : ;
-                    // SimpleC.g:57:13: variable // alt
-                    {
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_variable_in_block300];
-                    [self variable];
-
-                    [self popFollow];
-
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop5;
-            }
-        } while (YES);
-        loop5: ;
-
-
-        do {
-            NSInteger alt6=2;
-            NSInteger LA6_0 = [input LA:1];
-            if ( ((LA6_0 >= ID && LA6_0 <= INT)||LA6_0==7||LA6_0==11||LA6_0==16||LA6_0==19) ) {
-                alt6=1;
-            }
-
-
-            switch (alt6) {
-                case 1 : ;
-                    // SimpleC.g:58:13: stat // alt
-                    {
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_stat_in_block315];
-                    [self stat];
-
-                    [self popFollow];
-
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop6;
-            }
-        } while (YES);
-        loop6: ;
-
-
-        [self match:input TokenType:20 Follow:FOLLOW_20_in_block326]; 
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end block */
-
-/*
- * $ANTLR start stat
- * SimpleC.g:62:1: stat : ( forStat | expr ';' | block | assignStat ';' | ';' );
- */
-- (void) stat
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:62:5: ( forStat | expr ';' | block | assignStat ';' | ';' ) //ruleblock
-        NSInteger alt7=5;
-        unichar charLA7 = [input LA:1];
-        switch (charLA7) {
-            case 16: ;
-                {
-                alt7=1;
-                }
-                break;
-            case ID: ;
-                {
-                NSInteger LA7_2 = [input LA:2];
-
-                if ( (LA7_2==13) ) {
-                    alt7=4;
-                }
-                else if ( (LA7_2==9||(LA7_2 >= 11 && LA7_2 <= 12)||LA7_2==14) ) {
-                    alt7=2;
-                }
-                else {
-                    ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:7 state:2 stream:input];
-                    nvae.c = LA7_2;
-                    @throw nvae;
-
-                }
-                }
-                break;
-            case INT: ;
-            case 7: ;
-                {
-                alt7=2;
-                }
-                break;
-            case 19: ;
-                {
-                alt7=3;
-                }
-                break;
-            case 11: ;
-                {
-                alt7=5;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:7 state:0 stream:input];
-            nvae.c = charLA7;
-            @throw nvae;
-
-        }
-
-        switch (alt7) {
-            case 1 : ;
-                // SimpleC.g:62:7: forStat // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_forStat_in_stat338];
-                [self forStat];
-
-                [self popFollow];
-
-
-
-                }
-                break;
-            case 2 : ;
-                // SimpleC.g:63:7: expr ';' // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_expr_in_stat346];
-                [self expr];
-
-                [self popFollow];
-
-
-
-                [self match:input TokenType:11 Follow:FOLLOW_11_in_stat348]; 
-
-                }
-                break;
-            case 3 : ;
-                // SimpleC.g:64:7: block // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_block_in_stat362];
-                [self block];
-
-                [self popFollow];
-
-
-
-                }
-                break;
-            case 4 : ;
-                // SimpleC.g:65:7: assignStat ';' // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_assignStat_in_stat370];
-                [self assignStat];
-
-                [self popFollow];
-
-
-
-                [self match:input TokenType:11 Follow:FOLLOW_11_in_stat372]; 
-
-                }
-                break;
-            case 5 : ;
-                // SimpleC.g:66:7: ';' // alt
-                {
-                [self match:input TokenType:11 Follow:FOLLOW_11_in_stat380]; 
-
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end stat */
-
-/*
- * $ANTLR start forStat
- * SimpleC.g:69:1: forStat : 'for' '(' assignStat ';' expr ';' assignStat ')' block ;
- */
-- (void) forStat
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:70:5: ( 'for' '(' assignStat ';' expr ';' assignStat ')' block ) // ruleBlockSingleAlt
-        // SimpleC.g:70:9: 'for' '(' assignStat ';' expr ';' assignStat ')' block // alt
-        {
-        [self match:input TokenType:16 Follow:FOLLOW_16_in_forStat399]; 
-
-        [self match:input TokenType:7 Follow:FOLLOW_7_in_forStat401]; 
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_assignStat_in_forStat403];
-        [self assignStat];
-
-        [self popFollow];
-
-
-
-        [self match:input TokenType:11 Follow:FOLLOW_11_in_forStat405]; 
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_expr_in_forStat407];
-        [self expr];
-
-        [self popFollow];
-
-
-
-        [self match:input TokenType:11 Follow:FOLLOW_11_in_forStat409]; 
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_assignStat_in_forStat411];
-        [self assignStat];
-
-        [self popFollow];
-
-
-
-        [self match:input TokenType:8 Follow:FOLLOW_8_in_forStat413]; 
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_block_in_forStat415];
-        [self block];
-
-        [self popFollow];
-
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end forStat */
-
-/*
- * $ANTLR start assignStat
- * SimpleC.g:73:1: assignStat : ID '=' expr ;
- */
-- (void) assignStat
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:74:5: ( ID '=' expr ) // ruleBlockSingleAlt
-        // SimpleC.g:74:9: ID '=' expr // alt
-        {
-        [self match:input TokenType:ID Follow:FOLLOW_ID_in_assignStat442]; 
-
-        [self match:input TokenType:13 Follow:FOLLOW_13_in_assignStat444]; 
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_expr_in_assignStat446];
-        [self expr];
-
-        [self popFollow];
-
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end assignStat */
-
-/*
- * $ANTLR start expr
- * SimpleC.g:77:1: expr : condExpr ;
- */
-- (void) expr
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:77:5: ( condExpr ) // ruleBlockSingleAlt
-        // SimpleC.g:77:9: condExpr // alt
-        {
-        /* ruleRef */
-        [self pushFollow:FOLLOW_condExpr_in_expr468];
-        [self condExpr];
-
-        [self popFollow];
-
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end expr */
-
-/*
- * $ANTLR start condExpr
- * SimpleC.g:80:1: condExpr : aexpr ( ( '==' | '<' ) aexpr )? ;
- */
-- (void) condExpr
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:81:5: ( aexpr ( ( '==' | '<' ) aexpr )? ) // ruleBlockSingleAlt
-        // SimpleC.g:81:9: aexpr ( ( '==' | '<' ) aexpr )? // alt
-        {
-        /* ruleRef */
-        [self pushFollow:FOLLOW_aexpr_in_condExpr487];
-        [self aexpr];
-
-        [self popFollow];
-
-
-
-        // SimpleC.g:81:15: ( ( '==' | '<' ) aexpr )? // block
-        NSInteger alt8=2;
-        NSInteger LA8_0 = [input LA:1];
-
-        if ( (LA8_0==12||LA8_0==14) ) {
-            alt8=1;
-        }
-        switch (alt8) {
-            case 1 : ;
-                // SimpleC.g:81:17: ( '==' | '<' ) aexpr // alt
-                {
-                if ([input LA:1] == 12||[input LA:1] == 14) {
-                    [input consume];
-                    [state setIsErrorRecovery:NO];
-                } else {
-                    ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                    @throw mse;
-                }
-
-
-                /* ruleRef */
-                [self pushFollow:FOLLOW_aexpr_in_condExpr499];
-                [self aexpr];
-
-                [self popFollow];
-
-
-
-                }
-                break;
-
-        }
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end condExpr */
-
-/*
- * $ANTLR start aexpr
- * SimpleC.g:84:1: aexpr : atom ( '+' atom )* ;
- */
-- (void) aexpr
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:85:5: ( atom ( '+' atom )* ) // ruleBlockSingleAlt
-        // SimpleC.g:85:9: atom ( '+' atom )* // alt
-        {
-        /* ruleRef */
-        [self pushFollow:FOLLOW_atom_in_aexpr521];
-        [self atom];
-
-        [self popFollow];
-
-
-
-        do {
-            NSInteger alt9=2;
-            NSInteger LA9_0 = [input LA:1];
-            if ( (LA9_0==9) ) {
-                alt9=1;
-            }
-
-
-            switch (alt9) {
-                case 1 : ;
-                    // SimpleC.g:85:16: '+' atom // alt
-                    {
-                    [self match:input TokenType:9 Follow:FOLLOW_9_in_aexpr525]; 
-
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_atom_in_aexpr527];
-                    [self atom];
-
-                    [self popFollow];
-
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop9;
-            }
-        } while (YES);
-        loop9: ;
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end aexpr */
-
-/*
- * $ANTLR start atom
- * SimpleC.g:88:1: atom : ( ID | INT | '(' expr ')' );
- */
-- (void) atom
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleC.g:89:5: ( ID | INT | '(' expr ')' ) //ruleblock
-        NSInteger alt10=3;
-        unichar charLA10 = [input LA:1];
-        switch (charLA10) {
-            case ID: ;
-                {
-                alt10=1;
-                }
-                break;
-            case INT: ;
-                {
-                alt10=2;
-                }
-                break;
-            case 7: ;
-                {
-                alt10=3;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:10 state:0 stream:input];
-            nvae.c = charLA10;
-            @throw nvae;
-
-        }
-
-        switch (alt10) {
-            case 1 : ;
-                // SimpleC.g:89:7: ID // alt
-                {
-                [self match:input TokenType:ID Follow:FOLLOW_ID_in_atom547]; 
-
-                }
-                break;
-            case 2 : ;
-                // SimpleC.g:90:7: INT // alt
-                {
-                [self match:input TokenType:INT Follow:FOLLOW_INT_in_atom561]; 
-
-                }
-                break;
-            case 3 : ;
-                // SimpleC.g:91:7: '(' expr ')' // alt
-                {
-                [self match:input TokenType:7 Follow:FOLLOW_7_in_atom575]; 
-
-                /* ruleRef */
-                [self pushFollow:FOLLOW_expr_in_atom577];
-                [self expr];
-
-                [self popFollow];
-
-
-
-                [self match:input TokenType:8 Follow:FOLLOW_8_in_atom579]; 
-
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end atom */
-/* ObjC end rules */
-
-@end /* end of SimpleCParser implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/antlr3.h b/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/antlr3.h
deleted file mode 100644
index 4f16279..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/antlr3.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#import <ANTLR/ANTLRBaseMapElement.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRBaseStack.h>
-#import <ANTLR/ANTLRBaseTree.h>
-#import <ANTLR/ANTLRBaseTreeAdaptor.h>
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBufferedTokenStream.h>
-#import <ANTLR/ANTLRBufferedTreeNodeStream.h>
-#import <ANTLR/ANTLRCharStream.h>
-#import <ANTLR/ANTLRCharStreamState.h>
-#import <ANTLR/ANTLRCommonErrorNode.h>
-#import <ANTLR/ANTLRCommonToken.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRDebug.h>
-#import <ANTLR/ANTLRDebugEventProxy.h>
-#import <ANTLR/ANTLRDebugEventListener.h>
-#import <ANTLR/ANTLRDebugParser.h>
-#import <ANTLR/ANTLRDebugTokenStream.h>
-#import <ANTLR/ANTLRDebugTreeAdaptor.h>
-#import <ANTLR/ANTLRDebugTreeNodeStream.h>
-#import <ANTLR/ANTLRDebugTreeParser.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRError.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRFastQueue.h>
-#import <ANTLR/ANTLRHashMap.h>
-#import <ANTLR/ANTLRHashRule.h>
-#import <ANTLR/ANTLRIntArray.h>
-#import <ANTLR/ANTLRIntStream.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRLexerRuleReturnScope.h>
-#import <ANTLR/ANTLRLinkBase.h>
-#import <ANTLR/ANTLRLookaheadStream.h>
-#import <ANTLR/ANTLRMapElement.h>
-#import <ANTLR/ANTLRMap.h>
-#import <ANTLR/ANTLRMismatchedNotSetException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRMissingTokenException.h>
-#import <ANTLR/ANTLRNodeMapElement.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRPtrBuffer.h>
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLRRecognizerSharedState.h>
-#import <ANTLR/ANTLRRewriteRuleElementStream.h>
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
-#import <ANTLR/ANTLRRuleMemo.h>
-#import <ANTLR/ANTLRRuleStack.h>
-#import <ANTLR/ANTLRRuleReturnScope.h>
-#import <ANTLR/ANTLRRuntimeException.h>
-#import <ANTLR/ANTLRStreamEnumerator.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRSymbolStack.h>
-#import <ANTLR/ANTLRToken+DebuggerSupport.h>
-#import <ANTLR/ANTLRToken.h>
-#import <ANTLR/ANTLRTokenRewriteStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRTokenStream.h>
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeException.h>
-#import <ANTLR/ANTLRTreeIterator.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-#import <ANTLR/ANTLRUnbufferedTokenStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-#import <ANTLR/ANTLRUniqueIDMap.h>
-#import <ANTLR/ANTLRUnwantedTokenException.h>
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/main.m b/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/main.m
deleted file mode 100644
index 0a645be..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/main.m
+++ /dev/null
@@ -1,30 +0,0 @@
-#import <Cocoa/Cocoa.h>
-#import <antlr3.h>
-#import "SimpleCLexer.h"
-#import "SimpleCParser.h"
-
-int main() {
-	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
-
-	NSString *string = [NSString stringWithContentsOfFile:@"/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/examples/LL-star/input"];
-	NSLog(@"input is: %@", string);
-	ANTLRStringStream *stream = [[ANTLRStringStream alloc] initWithStringNoCopy:string];
-	SimpleCLexer *lexer = [[SimpleCLexer alloc] initWithCharStream:stream];
-
-//	ANTLRCommonToken *currentToken;
-//	while ((currentToken = [lexer nextToken]) && [currentToken getType] != ANTLRTokenTypeEOF) {
-//		NSLog(@"%@", [currentToken toString]);
-//	}
-	
-	ANTLRCommonTokenStream *tokens = [[ANTLRCommonTokenStream alloc] initWithTokenSource:lexer];
-	SimpleCParser *parser = [[SimpleCParser alloc] initWithTokenStream:tokens];
-	[parser program];
-
-	[lexer release];
-	[stream release];
-	[tokens release];
-	[parser release];
-
-	[pool release];
-	return 0;
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/combined/Combined.tokens b/antlr-3.4/runtime/ObjC/Framework/examples/combined/Combined.tokens
deleted file mode 100644
index b22f459..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/combined/Combined.tokens
+++ /dev/null
@@ -1,3 +0,0 @@
-WS=6
-INT=5
-ID=4
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedLexer.h
deleted file mode 100644
index b7faf3e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedLexer.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// $ANTLR 3.2 Aug 24, 2010 10:45:57 Combined.g 2010-08-24 13:53:42
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#define INT 5
-#define WS 6
-#define ID 4
-#define EOF -1
-@interface CombinedLexer : ANTLRLexer { // line 283
-// start of actions.lexer.memVars
-// start of action-actionScope-memVars
-}
-+ (CombinedLexer *)newCombinedLexerWithCharStream:(id<ANTLRCharStream>)anInput;
-
-- (void)mID; 
-- (void)mINT; 
-- (void)mWS; 
-- (void)mTokens; 
-
-@end /* end of CombinedLexer interface */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedLexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedLexer.m
deleted file mode 100644
index 44be164..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedLexer.m
+++ /dev/null
@@ -1,403 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version 3.2 Aug 24, 2010 10:45:57
- *
- *     -  From the grammar source file : Combined.g
- *     -                            On : 2010-08-24 13:53:42
- *     -                 for the lexer : CombinedLexerLexer *
- * Editing it, at least manually, is not wise. 
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// $ANTLR 3.2 Aug 24, 2010 10:45:57 Combined.g 2010-08-24 13:53:42
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "CombinedLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-
-/** As per Terence: No returns for lexer rules! */
-@implementation CombinedLexer // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"Combined.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (CombinedLexer *)newCombinedLexerWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    return [[CombinedLexer alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    if ((self = [super initWithCharStream:anInput State:[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:4+1]]) != nil) {
-
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-/* Start of actions.lexer.methods */
-/* start methods() */
-
-/* Start of Rules */
-// $ANTLR start "ID"
-- (void) mID
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = ID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Combined.g:14:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* ) // ruleBlockSingleAlt
-        // Combined.g:14:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* // alt
-        {
-        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-            [input consume];
-
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-            [self recover:mse];
-            @throw mse;}
-          /* element() */
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0>='0' && LA1_0<='9')||(LA1_0>='A' && LA1_0<='Z')||LA1_0=='_'||(LA1_0>='a' && LA1_0<='z')) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // Combined.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-                        [input consume];
-
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;}
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop1;
-            }
-        } while (YES);
-        loop1: ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "ID"
-
-// $ANTLR start "INT"
-- (void) mINT
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = INT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Combined.g:17:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
-        // Combined.g:17:9: ( '0' .. '9' )+ // alt
-        {
-        // Combined.g:17:9: ( '0' .. '9' )+ // positiveClosureBlock
-        NSInteger cnt2=0;
-        do {
-            NSInteger alt2=2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( ((LA2_0>='0' && LA2_0<='9')) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // Combined.g:17:10: '0' .. '9' // alt
-                    {
-                    [self matchRangeFromChar:'0' to:'9'];   /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt2 >= 1 )
-                        goto loop2;
-                    ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:2];
-                    @throw eee;
-            }
-            cnt2++;
-        } while (YES);
-        loop2: ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "INT"
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Combined.g:20:5: ( ( ' ' | '\\t' | '\\r' | '\\n' )+ ) // ruleBlockSingleAlt
-        // Combined.g:20:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // alt
-        {
-        // Combined.g:20:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // positiveClosureBlock
-        NSInteger cnt3=0;
-        do {
-            NSInteger alt3=2;
-            NSInteger LA3_0 = [input LA:1];
-            if ( ((LA3_0>='\t' && LA3_0<='\n')||LA3_0=='\r'||LA3_0==' ') ) {
-                alt3=1;
-            }
-
-
-            switch (alt3) {
-                case 1 : ;
-                    // Combined.g: // alt
-                    {
-                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == '\r'||[input LA:1] == ' ') {
-                        [input consume];
-
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;}
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt3 >= 1 )
-                        goto loop3;
-                    ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:3];
-                    @throw eee;
-            }
-            cnt3++;
-        } while (YES);
-        loop3: ;
-          /* element() */
-         _channel=99;   /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "WS"
-
-- (void) mTokens
-{
-    // Combined.g:1:8: ( ID | INT | WS ) //ruleblock
-    NSInteger alt4=3;
-    switch ([input LA:1]) {
-        case 'A': ;
-        case 'B': ;
-        case 'C': ;
-        case 'D': ;
-        case 'E': ;
-        case 'F': ;
-        case 'G': ;
-        case 'H': ;
-        case 'I': ;
-        case 'J': ;
-        case 'K': ;
-        case 'L': ;
-        case 'M': ;
-        case 'N': ;
-        case 'O': ;
-        case 'P': ;
-        case 'Q': ;
-        case 'R': ;
-        case 'S': ;
-        case 'T': ;
-        case 'U': ;
-        case 'V': ;
-        case 'W': ;
-        case 'X': ;
-        case 'Y': ;
-        case 'Z': ;
-        case '_': ;
-        case 'a': ;
-        case 'b': ;
-        case 'c': ;
-        case 'd': ;
-        case 'e': ;
-        case 'f': ;
-        case 'g': ;
-        case 'h': ;
-        case 'i': ;
-        case 'j': ;
-        case 'k': ;
-        case 'l': ;
-        case 'm': ;
-        case 'n': ;
-        case 'o': ;
-        case 'p': ;
-        case 'q': ;
-        case 'r': ;
-        case 's': ;
-        case 't': ;
-        case 'u': ;
-        case 'v': ;
-        case 'w': ;
-        case 'x': ;
-        case 'y': ;
-        case 'z': ;
-            {
-            alt4=1;
-            }
-            break;
-        case '0': ;
-        case '1': ;
-        case '2': ;
-        case '3': ;
-        case '4': ;
-        case '5': ;
-        case '6': ;
-        case '7': ;
-        case '8': ;
-        case '9': ;
-            {
-            alt4=2;
-            }
-            break;
-        case '\t': ;
-        case '\n': ;
-        case '\r': ;
-        case ' ': ;
-            {
-            alt4=3;
-            }
-            break;
-
-    default: ;
-        ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:4 state:0 stream:input];
-        @throw nvae;
-    }
-
-    switch (alt4) {
-        case 1 : ;
-            // Combined.g:1:10: ID // alt
-            {
-                [self mID]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 2 : ;
-            // Combined.g:1:13: INT // alt
-            {
-                [self mINT]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 3 : ;
-            // Combined.g:1:17: WS // alt
-            {
-                [self mWS]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-
-    }
-
-}
-
-@end /* end of CombinedLexer implementation line 397 */
-
-/* End of code
- * =============================================================================
- */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedParser.h b/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedParser.h
deleted file mode 100644
index 2d48ff4..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedParser.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// $ANTLR 3.2 Aug 24, 2010 10:45:57 Combined.g 2010-08-24 13:53:42
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* parserHeaderFile */
-#pragma mark Tokens
-#define WS 6
-#define INT 5
-#define ID 4
-#define EOF -1
-#pragma mark Dynamic Global Scopes
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-#pragma mark Rule return scopes end
-@interface CombinedParser : ANTLRParser { /* line 572 */
-// start of globalAttributeScopeMemVar
-
-
-// start of action-actionScope-memVars
-// start of ruleAttributeScopeMemVar
-
-
-// Start of memVars
-
- }
-
-// start of action-actionScope-methodsDecl
-
-
-- (void)stat; 
-- (void)identifier; 
-
-
-@end /* end of CombinedParser interface */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedParser.m b/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedParser.m
deleted file mode 100644
index cb5d6f0..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/combined/CombinedParser.m
+++ /dev/null
@@ -1,204 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version 3.2 Aug 24, 2010 10:45:57
- *
- *     -  From the grammar source file : Combined.g
- *     -                            On : 2010-08-24 13:53:42
- *     -                for the parser : CombinedParserParser *
- * Editing it, at least manually, is not wise. 
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// $ANTLR 3.2 Aug 24, 2010 10:45:57 Combined.g 2010-08-24 13:53:42
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "CombinedParser.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_identifier_in_stat20;
-static const unsigned long long FOLLOW_identifier_in_stat20_data[] = { 0x0000000000000012LL};
-static ANTLRBitSet *FOLLOW_ID_in_identifier35;
-static const unsigned long long FOLLOW_ID_in_identifier35_data[] = { 0x0000000000000002LL};
-
-
-#pragma mark Dynamic Global Scopes
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule return scopes start
-/* returnScope */
-
-/* returnScope */
-
-
-
-@implementation CombinedParser  // line 637
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    FOLLOW_identifier_in_stat20 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_identifier_in_stat20_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_identifier35 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_identifier35_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[[NSArray alloc] initWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"ID", @"INT", @"WS", nil] retain]];
-}
-
-+ (CombinedParser *)newCombinedParser:(id<ANTLRTokenStream>)aStream
-{
-    return [[CombinedParser alloc] initWithTokenStream:aStream];
-
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)aStream
-{
-    if ((self = [super initWithTokenStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:2+1] retain]]) != nil) {
-
-
-
-        /* start of actions-actionScope-init */
-        /* start of init */
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-// start actions.actionScope.methods
-// start methods()
-// start rules
-/*
- * $ANTLR start stat
- * Combined.g:7:1: stat : ( identifier )+ ;
- */
-- (void) stat
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // Combined.g:7:5: ( ( identifier )+ ) // ruleBlockSingleAlt
-        // Combined.g:7:7: ( identifier )+ // alt
-        {
-        // Combined.g:7:7: ( identifier )+ // positiveClosureBlock
-        NSInteger cnt1=0;
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( (LA1_0==ID) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // Combined.g:7:7: identifier // alt
-                    {
-                    [self pushFollow:FOLLOW_identifier_in_stat20];
-                    [self identifier];
-                    [self popFollow];
-
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt1 >= 1 )
-                        goto loop1;
-                    ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:1];
-                    @throw eee;
-            }
-            cnt1++;
-        } while (YES);
-        loop1: ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end stat */
-/*
- * $ANTLR start identifier
- * Combined.g:9:1: identifier : ID ;
- */
-- (void) identifier
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // Combined.g:10:5: ( ID ) // ruleBlockSingleAlt
-        // Combined.g:10:7: ID // alt
-        {
-        [self match:input TokenType:ID Follow:FOLLOW_ID_in_identifier35];   /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end identifier */
-
-@end /* end of CombinedParser implementation line 692 */
-
-
-/* End of code
- * =============================================================================
- */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/combined/antlr3.h b/antlr-3.4/runtime/ObjC/Framework/examples/combined/antlr3.h
deleted file mode 100644
index 4f16279..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/combined/antlr3.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#import <ANTLR/ANTLRBaseMapElement.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRBaseStack.h>
-#import <ANTLR/ANTLRBaseTree.h>
-#import <ANTLR/ANTLRBaseTreeAdaptor.h>
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBufferedTokenStream.h>
-#import <ANTLR/ANTLRBufferedTreeNodeStream.h>
-#import <ANTLR/ANTLRCharStream.h>
-#import <ANTLR/ANTLRCharStreamState.h>
-#import <ANTLR/ANTLRCommonErrorNode.h>
-#import <ANTLR/ANTLRCommonToken.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRDebug.h>
-#import <ANTLR/ANTLRDebugEventProxy.h>
-#import <ANTLR/ANTLRDebugEventListener.h>
-#import <ANTLR/ANTLRDebugParser.h>
-#import <ANTLR/ANTLRDebugTokenStream.h>
-#import <ANTLR/ANTLRDebugTreeAdaptor.h>
-#import <ANTLR/ANTLRDebugTreeNodeStream.h>
-#import <ANTLR/ANTLRDebugTreeParser.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRError.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRFastQueue.h>
-#import <ANTLR/ANTLRHashMap.h>
-#import <ANTLR/ANTLRHashRule.h>
-#import <ANTLR/ANTLRIntArray.h>
-#import <ANTLR/ANTLRIntStream.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRLexerRuleReturnScope.h>
-#import <ANTLR/ANTLRLinkBase.h>
-#import <ANTLR/ANTLRLookaheadStream.h>
-#import <ANTLR/ANTLRMapElement.h>
-#import <ANTLR/ANTLRMap.h>
-#import <ANTLR/ANTLRMismatchedNotSetException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRMissingTokenException.h>
-#import <ANTLR/ANTLRNodeMapElement.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRPtrBuffer.h>
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLRRecognizerSharedState.h>
-#import <ANTLR/ANTLRRewriteRuleElementStream.h>
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
-#import <ANTLR/ANTLRRuleMemo.h>
-#import <ANTLR/ANTLRRuleStack.h>
-#import <ANTLR/ANTLRRuleReturnScope.h>
-#import <ANTLR/ANTLRRuntimeException.h>
-#import <ANTLR/ANTLRStreamEnumerator.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRSymbolStack.h>
-#import <ANTLR/ANTLRToken+DebuggerSupport.h>
-#import <ANTLR/ANTLRToken.h>
-#import <ANTLR/ANTLRTokenRewriteStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRTokenStream.h>
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeException.h>
-#import <ANTLR/ANTLRTreeIterator.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-#import <ANTLR/ANTLRUnbufferedTokenStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-#import <ANTLR/ANTLRUniqueIDMap.h>
-#import <ANTLR/ANTLRUnwantedTokenException.h>
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/combined/main.m b/antlr-3.4/runtime/ObjC/Framework/examples/combined/main.m
deleted file mode 100644
index 7fb5bd2..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/combined/main.m
+++ /dev/null
@@ -1,23 +0,0 @@
-#import <Cocoa/Cocoa.h>
-#import "CombinedLexer.h"
-#import "antlr3.h"
-
-int main(int argc, const char * argv[])
-{
-    NSLog(@"starting combined\n");
-	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
-	NSString *string = @"xyyyyaxyyyyb";
-	NSLog(@"%@", string);
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:string];
-	CombinedLexer *lexer = [CombinedLexer newCombinedLexerWithCharStream:stream];
-	id<ANTLRToken> currentToken;
-	while ((currentToken = [lexer nextToken]) && [currentToken getType] != ANTLRTokenTypeEOF) {
-		NSLog(@"%@", currentToken);
-	}
-	[lexer release];
-	[stream release];
-	
-	[pool release];
-    NSLog(@"exiting combined\n");
-	return 0;
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/combined/output1/Combined.tokens b/antlr-3.4/runtime/ObjC/Framework/examples/combined/output1/Combined.tokens
deleted file mode 100644
index b22f459..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/combined/output1/Combined.tokens
+++ /dev/null
@@ -1,3 +0,0 @@
-WS=6
-INT=5
-ID=4
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.h b/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.h
deleted file mode 100644
index 2b255a6..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} /Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g 2011-05-05 22:05:01
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define ARG 4
-#define CALL 5
-#define CHAR 6
-#define CLASS 7
-#define COMMENT 8
-#define ESC 9
-#define FIELD 10
-#define ID 11
-#define IMPORT 12
-#define METHOD 13
-#define QID 14
-#define QIDStar 15
-#define RETURN 16
-#define SL_COMMENT 17
-#define STAT 18
-#define STRING 19
-#define TYPE 20
-#define WS 21
-/* interface lexer class */
-@interface Fuzzy : ANTLRLexer { // line 283
-SEL synpred9_FuzzySelector;
-SEL synpred2_FuzzySelector;
-SEL synpred7_FuzzySelector;
-SEL synpred4_FuzzySelector;
-SEL synpred8_FuzzySelector;
-SEL synpred6_FuzzySelector;
-SEL synpred5_FuzzySelector;
-SEL synpred3_FuzzySelector;
-SEL synpred1_FuzzySelector;
-/* ObjC start of actions.lexer.memVars */
-/* ObjC end of actions.lexer.memVars */
-}
-+ (void) initialize;
-+ (Fuzzy *)newFuzzyWithCharStream:(id<ANTLRCharStream>)anInput;
-/* ObjC start actions.lexer.methodsDecl */
-/* ObjC end actions.lexer.methodsDecl */
-- (void) mIMPORT ; 
-- (void) mRETURN ; 
-- (void) mCLASS ; 
-- (void) mMETHOD ; 
-- (void) mFIELD ; 
-- (void) mSTAT ; 
-- (void) mCALL ; 
-- (void) mCOMMENT ; 
-- (void) mSL_COMMENT ; 
-- (void) mSTRING ; 
-- (void) mCHAR ; 
-- (void) mWS ; 
-- (void) mQID ; 
-- (void) mQIDStar ; 
-- (void) mTYPE ; 
-- (void) mARG ; 
-- (void) mID ; 
-- (void) mESC ; 
-- (void) mTokens ; 
-- (void) synpred1_Fuzzy_fragment ; 
-- (void) synpred2_Fuzzy_fragment ; 
-- (void) synpred3_Fuzzy_fragment ; 
-- (void) synpred4_Fuzzy_fragment ; 
-- (void) synpred5_Fuzzy_fragment ; 
-- (void) synpred6_Fuzzy_fragment ; 
-- (void) synpred7_Fuzzy_fragment ; 
-- (void) synpred8_Fuzzy_fragment ; 
-- (void) synpred9_Fuzzy_fragment ; 
-
-@end /* end of Fuzzy interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.m b/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.m
deleted file mode 100644
index 665f412..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.m
+++ /dev/null
@@ -1,2373 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : Fuzzy.g
- *     -                            On : 2011-05-06 11:47:46
- *     -                 for the lexer : FuzzyLexer
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} Fuzzy.g 2011-05-06 11:47:46
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "Fuzzy.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-/** As per Terence: No returns for lexer rules! */
-@implementation Fuzzy // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"Fuzzy.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (Fuzzy *)newFuzzyWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    return [[Fuzzy alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    self = [super initWithCharStream:anInput State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:30+1] retain]];
-    if ( self != nil ) {
-        SEL synpred9_FuzzySelector = @selector(synpred9_Fuzzy_fragment);
-
-        SEL synpred2_FuzzySelector = @selector(synpred2_Fuzzy_fragment);
-
-        SEL synpred7_FuzzySelector = @selector(synpred7_Fuzzy_fragment);
-
-        SEL synpred4_FuzzySelector = @selector(synpred4_Fuzzy_fragment);
-
-        SEL synpred8_FuzzySelector = @selector(synpred8_Fuzzy_fragment);
-
-        SEL synpred6_FuzzySelector = @selector(synpred6_Fuzzy_fragment);
-
-        SEL synpred5_FuzzySelector = @selector(synpred5_Fuzzy_fragment);
-
-        SEL synpred3_FuzzySelector = @selector(synpred3_Fuzzy_fragment);
-
-        SEL synpred1_FuzzySelector = @selector(synpred1_Fuzzy_fragment);
-
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-/* ObjC Start of actions.lexer.methods */
-/* ObjC end of actions.lexer.methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-
-- (id<ANTLRToken>) nextToken
-{
-    while (YES) {
-        if ( [input LA:1] == ANTLRCharStreamEOF ) {
-            return [ANTLRCommonToken eofToken];
-        }
-        state.token = nil;
-        state.channel = ANTLRTokenChannelDefault;
-        state.tokenStartCharIndex = input.index;
-        state.tokenStartCharPositionInLine = input.charPositionInLine;
-        state.tokenStartLine = input.line;
-        state.text = nil;
-        @try {
-            NSInteger m = [input mark];
-            state.backtracking = 1; /* means we won't throw slow exception */
-            state.failed = NO;
-            [self mTokens];
-            state.backtracking = 0;
-            /* mTokens backtracks with synpred at backtracking==2
-               and we set the synpredgate to allow actions at level 1. */
-            if ( state.failed ) {
-                [input rewind:m];
-                [input consume]; /* advance one char and try again */
-            } else {
-                [self emit];
-                return state.token;
-            }
-        }
-        @catch (ANTLRRecognitionException *re) {
-            // shouldn't happen in backtracking mode, but...
-            [self reportError:re];
-            [self recover:re];
-        }
-    }
-}
-
-- (void)memoize:(id<ANTLRIntStream>)anInput
-      RuleIndex:(NSInteger)ruleIndex
-     StartIndex:(NSInteger)ruleStartIndex
-{
-    if ( state.backtracking > 1 ) [super memoize:anInput RuleIndex:ruleIndex StartIndex:ruleStartIndex];
-}
-
-- (BOOL)alreadyParsedRule:(id<ANTLRIntStream>)anInput RuleIndex:(NSInteger)ruleIndex
-{
-    if ( state.backtracking > 1 ) return [super alreadyParsedRule:anInput RuleIndex:ruleIndex];
-    return NO;
-}
-/* Start of Rules */
-// $ANTLR start "IMPORT"
-- (void) mIMPORT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = IMPORT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        ANTLRCommonToken *name=nil;
-
-        // Fuzzy.g:5:2: ( 'import' WS name= QIDStar ( WS )? ';' ) // ruleBlockSingleAlt
-        // Fuzzy.g:5:4: 'import' WS name= QIDStar ( WS )? ';' // alt
-        {
-        [self matchString:@"import"]; if ( state.failed ) return ;
-
-
-
-        [self mWS]; if ( state.failed ) return ;
-
-
-
-        NSInteger nameStart31 = input.index;
-        [self mQIDStar]; if ( state.failed ) return ;
-
-        name = [[ANTLRCommonToken newToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:nameStart31 Stop:input.index-1] retain];
-        name.line = self.line;
-
-
-        // Fuzzy.g:5:29: ( WS )? // block
-        NSInteger alt1=2;
-        NSInteger LA1_0 = [input LA:1];
-
-        if ( ((LA1_0 >= '\t' && LA1_0 <= '\n')||LA1_0==' ') ) {
-            alt1=1;
-        }
-        switch (alt1) {
-            case 1 : ;
-                // Fuzzy.g:5:29: WS // alt
-                {
-                [self mWS]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-
-        }
-
-
-        [self matchChar:';']; if ( state.failed ) return ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "IMPORT" */
-
-// $ANTLR start "RETURN"
-- (void) mRETURN
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = RETURN;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Fuzzy.g:10:2: ( 'return' ( options {greedy=false; } : . )* ';' ) // ruleBlockSingleAlt
-        // Fuzzy.g:10:4: 'return' ( options {greedy=false; } : . )* ';' // alt
-        {
-        [self matchString:@"return"]; if ( state.failed ) return ;
-
-
-
-        do {
-            NSInteger alt2=2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( (LA2_0==';') ) {
-                alt2=2;
-            }
-            else if ( ((LA2_0 >= 0x0000 && LA2_0 <= ':')||(LA2_0 >= '<' && LA2_0 <= 0xFFFF)) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // Fuzzy.g:10:38: . // alt
-                    {
-                    [self matchAny]; if ( state.failed ) return ;
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop2;
-            }
-        } while (YES);
-        loop2: ;
-
-
-        [self matchChar:';']; if ( state.failed ) return ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "RETURN" */
-
-// $ANTLR start "CLASS"
-- (void) mCLASS
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = CLASS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        ANTLRCommonToken *name=nil;
-
-        // Fuzzy.g:14:2: ( 'class' WS name= ID ( WS )? ( 'extends' WS QID ( WS )? )? ( 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' ) // ruleBlockSingleAlt
-        // Fuzzy.g:14:4: 'class' WS name= ID ( WS )? ( 'extends' WS QID ( WS )? )? ( 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' // alt
-        {
-        [self matchString:@"class"]; if ( state.failed ) return ;
-
-
-
-        [self mWS]; if ( state.failed ) return ;
-
-
-
-        NSInteger nameStart81 = input.index;
-        [self mID]; if ( state.failed ) return ;
-
-        name = [[ANTLRCommonToken newToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:nameStart81 Stop:input.index-1] retain];
-        name.line = self.line;
-
-
-        // Fuzzy.g:14:23: ( WS )? // block
-        NSInteger alt3=2;
-        NSInteger LA3_0 = [input LA:1];
-
-        if ( ((LA3_0 >= '\t' && LA3_0 <= '\n')||LA3_0==' ') ) {
-            alt3=1;
-        }
-        switch (alt3) {
-            case 1 : ;
-                // Fuzzy.g:14:23: WS // alt
-                {
-                [self mWS]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-
-        }
-
-
-        // Fuzzy.g:14:27: ( 'extends' WS QID ( WS )? )? // block
-        NSInteger alt5=2;
-        NSInteger LA5_0 = [input LA:1];
-
-        if ( (LA5_0=='e') ) {
-            alt5=1;
-        }
-        switch (alt5) {
-            case 1 : ;
-                // Fuzzy.g:14:28: 'extends' WS QID ( WS )? // alt
-                {
-                [self matchString:@"extends"]; if ( state.failed ) return ;
-
-
-
-                [self mWS]; if ( state.failed ) return ;
-
-
-
-                [self mQID]; if ( state.failed ) return ;
-
-
-
-                // Fuzzy.g:14:45: ( WS )? // block
-                NSInteger alt4=2;
-                NSInteger LA4_0 = [input LA:1];
-
-                if ( ((LA4_0 >= '\t' && LA4_0 <= '\n')||LA4_0==' ') ) {
-                    alt4=1;
-                }
-                switch (alt4) {
-                    case 1 : ;
-                        // Fuzzy.g:14:45: WS // alt
-                        {
-                        [self mWS]; if ( state.failed ) return ;
-
-
-
-                        }
-                        break;
-
-                }
-
-
-                }
-                break;
-
-        }
-
-
-        // Fuzzy.g:15:3: ( 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? // block
-        NSInteger alt10=2;
-        NSInteger LA10_0 = [input LA:1];
-
-        if ( (LA10_0=='i') ) {
-            alt10=1;
-        }
-        switch (alt10) {
-            case 1 : ;
-                // Fuzzy.g:15:4: 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* // alt
-                {
-                [self matchString:@"implements"]; if ( state.failed ) return ;
-
-
-
-                [self mWS]; if ( state.failed ) return ;
-
-
-
-                [self mQID]; if ( state.failed ) return ;
-
-
-
-                // Fuzzy.g:15:24: ( WS )? // block
-                NSInteger alt6=2;
-                NSInteger LA6_0 = [input LA:1];
-
-                if ( ((LA6_0 >= '\t' && LA6_0 <= '\n')||LA6_0==' ') ) {
-                    alt6=1;
-                }
-                switch (alt6) {
-                    case 1 : ;
-                        // Fuzzy.g:15:24: WS // alt
-                        {
-                        [self mWS]; if ( state.failed ) return ;
-
-
-
-                        }
-                        break;
-
-                }
-
-
-                do {
-                    NSInteger alt9=2;
-                    NSInteger LA9_0 = [input LA:1];
-                    if ( (LA9_0==',') ) {
-                        alt9=1;
-                    }
-
-
-                    switch (alt9) {
-                        case 1 : ;
-                            // Fuzzy.g:15:29: ',' ( WS )? QID ( WS )? // alt
-                            {
-                            [self matchChar:',']; if ( state.failed ) return ;
-
-
-                            // Fuzzy.g:15:33: ( WS )? // block
-                            NSInteger alt7=2;
-                            NSInteger LA7_0 = [input LA:1];
-
-                            if ( ((LA7_0 >= '\t' && LA7_0 <= '\n')||LA7_0==' ') ) {
-                                alt7=1;
-                            }
-                            switch (alt7) {
-                                case 1 : ;
-                                    // Fuzzy.g:15:33: WS // alt
-                                    {
-                                    [self mWS]; if ( state.failed ) return ;
-
-
-
-                                    }
-                                    break;
-
-                            }
-
-
-                            [self mQID]; if ( state.failed ) return ;
-
-
-
-                            // Fuzzy.g:15:41: ( WS )? // block
-                            NSInteger alt8=2;
-                            NSInteger LA8_0 = [input LA:1];
-
-                            if ( ((LA8_0 >= '\t' && LA8_0 <= '\n')||LA8_0==' ') ) {
-                                alt8=1;
-                            }
-                            switch (alt8) {
-                                case 1 : ;
-                                    // Fuzzy.g:15:41: WS // alt
-                                    {
-                                    [self mWS]; if ( state.failed ) return ;
-
-
-
-                                    }
-                                    break;
-
-                            }
-
-
-                            }
-                            break;
-
-                        default :
-                            goto loop9;
-                    }
-                } while (YES);
-                loop9: ;
-
-
-                }
-                break;
-
-        }
-
-
-        [self matchChar:'{']; if ( state.failed ) return ;
-
-
-        if ( state.backtracking == 1 ) {
-            NSLog(@"found class %@", (name!=nil?name.text:nil));
-        }
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "CLASS" */
-
-// $ANTLR start "METHOD"
-- (void) mMETHOD
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = METHOD;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        ANTLRCommonToken *name=nil;
-
-        // Fuzzy.g:20:5: ( TYPE WS name= ID ( WS )? '(' ( ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* )? ')' ( WS )? ( 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' ) // ruleBlockSingleAlt
-        // Fuzzy.g:20:9: TYPE WS name= ID ( WS )? '(' ( ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* )? ')' ( WS )? ( 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' // alt
-        {
-        [self mTYPE]; if ( state.failed ) return ;
-
-
-
-        [self mWS]; if ( state.failed ) return ;
-
-
-
-        NSInteger nameStart158 = input.index;
-        [self mID]; if ( state.failed ) return ;
-
-        name = [[ANTLRCommonToken newToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:nameStart158 Stop:input.index-1] retain];
-        name.line = self.line;
-
-
-        // Fuzzy.g:20:25: ( WS )? // block
-        NSInteger alt11=2;
-        NSInteger LA11_0 = [input LA:1];
-
-        if ( ((LA11_0 >= '\t' && LA11_0 <= '\n')||LA11_0==' ') ) {
-            alt11=1;
-        }
-        switch (alt11) {
-            case 1 : ;
-                // Fuzzy.g:20:25: WS // alt
-                {
-                [self mWS]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-
-        }
-
-
-        [self matchChar:'(']; if ( state.failed ) return ;
-
-
-        // Fuzzy.g:20:33: ( ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* )? // block
-        NSInteger alt16=2;
-        NSInteger LA16_0 = [input LA:1];
-
-        if ( ((LA16_0 >= 'A' && LA16_0 <= 'Z')||LA16_0=='_'||(LA16_0 >= 'a' && LA16_0 <= 'z')) ) {
-            alt16=1;
-        }
-        switch (alt16) {
-            case 1 : ;
-                // Fuzzy.g:20:35: ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* // alt
-                {
-                [self mARG]; if ( state.failed ) return ;
-
-
-
-                // Fuzzy.g:20:39: ( WS )? // block
-                NSInteger alt12=2;
-                NSInteger LA12_0 = [input LA:1];
-
-                if ( ((LA12_0 >= '\t' && LA12_0 <= '\n')||LA12_0==' ') ) {
-                    alt12=1;
-                }
-                switch (alt12) {
-                    case 1 : ;
-                        // Fuzzy.g:20:39: WS // alt
-                        {
-                        [self mWS]; if ( state.failed ) return ;
-
-
-
-                        }
-                        break;
-
-                }
-
-
-                do {
-                    NSInteger alt15=2;
-                    NSInteger LA15_0 = [input LA:1];
-                    if ( (LA15_0==',') ) {
-                        alt15=1;
-                    }
-
-
-                    switch (alt15) {
-                        case 1 : ;
-                            // Fuzzy.g:20:44: ',' ( WS )? ARG ( WS )? // alt
-                            {
-                            [self matchChar:',']; if ( state.failed ) return ;
-
-
-                            // Fuzzy.g:20:48: ( WS )? // block
-                            NSInteger alt13=2;
-                            NSInteger LA13_0 = [input LA:1];
-
-                            if ( ((LA13_0 >= '\t' && LA13_0 <= '\n')||LA13_0==' ') ) {
-                                alt13=1;
-                            }
-                            switch (alt13) {
-                                case 1 : ;
-                                    // Fuzzy.g:20:48: WS // alt
-                                    {
-                                    [self mWS]; if ( state.failed ) return ;
-
-
-
-                                    }
-                                    break;
-
-                            }
-
-
-                            [self mARG]; if ( state.failed ) return ;
-
-
-
-                            // Fuzzy.g:20:56: ( WS )? // block
-                            NSInteger alt14=2;
-                            NSInteger LA14_0 = [input LA:1];
-
-                            if ( ((LA14_0 >= '\t' && LA14_0 <= '\n')||LA14_0==' ') ) {
-                                alt14=1;
-                            }
-                            switch (alt14) {
-                                case 1 : ;
-                                    // Fuzzy.g:20:56: WS // alt
-                                    {
-                                    [self mWS]; if ( state.failed ) return ;
-
-
-
-                                    }
-                                    break;
-
-                            }
-
-
-                            }
-                            break;
-
-                        default :
-                            goto loop15;
-                    }
-                } while (YES);
-                loop15: ;
-
-
-                }
-                break;
-
-        }
-
-
-        [self matchChar:')']; if ( state.failed ) return ;
-
-
-        // Fuzzy.g:20:69: ( WS )? // block
-        NSInteger alt17=2;
-        NSInteger LA17_0 = [input LA:1];
-
-        if ( ((LA17_0 >= '\t' && LA17_0 <= '\n')||LA17_0==' ') ) {
-            alt17=1;
-        }
-        switch (alt17) {
-            case 1 : ;
-                // Fuzzy.g:20:69: WS // alt
-                {
-                [self mWS]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-
-        }
-
-
-        // Fuzzy.g:21:8: ( 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? // block
-        NSInteger alt22=2;
-        NSInteger LA22_0 = [input LA:1];
-
-        if ( (LA22_0=='t') ) {
-            alt22=1;
-        }
-        switch (alt22) {
-            case 1 : ;
-                // Fuzzy.g:21:9: 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* // alt
-                {
-                [self matchString:@"throws"]; if ( state.failed ) return ;
-
-
-
-                [self mWS]; if ( state.failed ) return ;
-
-
-
-                [self mQID]; if ( state.failed ) return ;
-
-
-
-                // Fuzzy.g:21:25: ( WS )? // block
-                NSInteger alt18=2;
-                NSInteger LA18_0 = [input LA:1];
-
-                if ( ((LA18_0 >= '\t' && LA18_0 <= '\n')||LA18_0==' ') ) {
-                    alt18=1;
-                }
-                switch (alt18) {
-                    case 1 : ;
-                        // Fuzzy.g:21:25: WS // alt
-                        {
-                        [self mWS]; if ( state.failed ) return ;
-
-
-
-                        }
-                        break;
-
-                }
-
-
-                do {
-                    NSInteger alt21=2;
-                    NSInteger LA21_0 = [input LA:1];
-                    if ( (LA21_0==',') ) {
-                        alt21=1;
-                    }
-
-
-                    switch (alt21) {
-                        case 1 : ;
-                            // Fuzzy.g:21:30: ',' ( WS )? QID ( WS )? // alt
-                            {
-                            [self matchChar:',']; if ( state.failed ) return ;
-
-
-                            // Fuzzy.g:21:34: ( WS )? // block
-                            NSInteger alt19=2;
-                            NSInteger LA19_0 = [input LA:1];
-
-                            if ( ((LA19_0 >= '\t' && LA19_0 <= '\n')||LA19_0==' ') ) {
-                                alt19=1;
-                            }
-                            switch (alt19) {
-                                case 1 : ;
-                                    // Fuzzy.g:21:34: WS // alt
-                                    {
-                                    [self mWS]; if ( state.failed ) return ;
-
-
-
-                                    }
-                                    break;
-
-                            }
-
-
-                            [self mQID]; if ( state.failed ) return ;
-
-
-
-                            // Fuzzy.g:21:42: ( WS )? // block
-                            NSInteger alt20=2;
-                            NSInteger LA20_0 = [input LA:1];
-
-                            if ( ((LA20_0 >= '\t' && LA20_0 <= '\n')||LA20_0==' ') ) {
-                                alt20=1;
-                            }
-                            switch (alt20) {
-                                case 1 : ;
-                                    // Fuzzy.g:21:42: WS // alt
-                                    {
-                                    [self mWS]; if ( state.failed ) return ;
-
-
-
-                                    }
-                                    break;
-
-                            }
-
-
-                            }
-                            break;
-
-                        default :
-                            goto loop21;
-                    }
-                } while (YES);
-                loop21: ;
-
-
-                }
-                break;
-
-        }
-
-
-        [self matchChar:'{']; if ( state.failed ) return ;
-
-
-        if ( state.backtracking == 1 ) {
-            NSLog(@"found method %@", (name!=nil?name.text:nil));
-        }
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "METHOD" */
-
-// $ANTLR start "FIELD"
-- (void) mFIELD
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = FIELD;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        ANTLRCommonToken *name=nil;
-
-        // Fuzzy.g:26:5: ( TYPE WS name= ID ( '[]' )? ( WS )? ( ';' | '=' ) ) // ruleBlockSingleAlt
-        // Fuzzy.g:26:9: TYPE WS name= ID ( '[]' )? ( WS )? ( ';' | '=' ) // alt
-        {
-        [self mTYPE]; if ( state.failed ) return ;
-
-
-
-        [self mWS]; if ( state.failed ) return ;
-
-
-
-        NSInteger nameStart261 = input.index;
-        [self mID]; if ( state.failed ) return ;
-
-        name = [[ANTLRCommonToken newToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:nameStart261 Stop:input.index-1] retain];
-        name.line = self.line;
-
-
-        // Fuzzy.g:26:25: ( '[]' )? // block
-        NSInteger alt23=2;
-        NSInteger LA23_0 = [input LA:1];
-
-        if ( (LA23_0=='[') ) {
-            alt23=1;
-        }
-        switch (alt23) {
-            case 1 : ;
-                // Fuzzy.g:26:25: '[]' // alt
-                {
-                [self matchString:@"[]"]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-
-        }
-
-
-        // Fuzzy.g:26:31: ( WS )? // block
-        NSInteger alt24=2;
-        NSInteger LA24_0 = [input LA:1];
-
-        if ( ((LA24_0 >= '\t' && LA24_0 <= '\n')||LA24_0==' ') ) {
-            alt24=1;
-        }
-        switch (alt24) {
-            case 1 : ;
-                // Fuzzy.g:26:31: WS // alt
-                {
-                [self mWS]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-
-        }
-
-
-        if ([input LA:1] == ';'||[input LA:1] == '=') {
-            [input consume];
-            state.failed = NO;
-
-        } else {
-            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            [self recover:mse];
-            @throw mse;
-        }
-
-
-        if ( state.backtracking == 1 ) {
-            NSLog(@"found var %@", (name!=nil?name.text:nil));
-        }
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "FIELD" */
-
-// $ANTLR start "STAT"
-- (void) mSTAT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = STAT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Fuzzy.g:30:5: ( ( 'if' | 'while' | 'switch' | 'for' ) ( WS )? '(' ) // ruleBlockSingleAlt
-        // Fuzzy.g:30:7: ( 'if' | 'while' | 'switch' | 'for' ) ( WS )? '(' // alt
-        {
-        // Fuzzy.g:30:7: ( 'if' | 'while' | 'switch' | 'for' ) // block
-        NSInteger alt25=4;
-        unichar charLA25 = [input LA:1];
-        switch (charLA25) {
-            case 'i': ;
-                {
-                alt25=1;
-                }
-                break;
-            case 'w': ;
-                {
-                alt25=2;
-                }
-                break;
-            case 's': ;
-                {
-                alt25=3;
-                }
-                break;
-            case 'f': ;
-                {
-                alt25=4;
-                }
-                break;
-
-        default: ;
-            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:25 state:0 stream:input];
-            nvae.c = charLA25;
-            @throw nvae;
-
-        }
-
-        switch (alt25) {
-            case 1 : ;
-                // Fuzzy.g:30:8: 'if' // alt
-                {
-                [self matchString:@"if"]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-            case 2 : ;
-                // Fuzzy.g:30:13: 'while' // alt
-                {
-                [self matchString:@"while"]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-            case 3 : ;
-                // Fuzzy.g:30:21: 'switch' // alt
-                {
-                [self matchString:@"switch"]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-            case 4 : ;
-                // Fuzzy.g:30:30: 'for' // alt
-                {
-                [self matchString:@"for"]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-
-        }
-
-
-        // Fuzzy.g:30:37: ( WS )? // block
-        NSInteger alt26=2;
-        NSInteger LA26_0 = [input LA:1];
-
-        if ( ((LA26_0 >= '\t' && LA26_0 <= '\n')||LA26_0==' ') ) {
-            alt26=1;
-        }
-        switch (alt26) {
-            case 1 : ;
-                // Fuzzy.g:30:37: WS // alt
-                {
-                [self mWS]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-
-        }
-
-
-        [self matchChar:'(']; if ( state.failed ) return ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "STAT" */
-
-// $ANTLR start "CALL"
-- (void) mCALL
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = CALL;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        ANTLRCommonToken *name=nil;
-
-        // Fuzzy.g:33:5: (name= QID ( WS )? '(' ) // ruleBlockSingleAlt
-        // Fuzzy.g:33:9: name= QID ( WS )? '(' // alt
-        {
-        NSInteger nameStart326 = input.index;
-        [self mQID]; if ( state.failed ) return ;
-
-        name = [[ANTLRCommonToken newToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:nameStart326 Stop:input.index-1] retain];
-        name.line = self.line;
-
-
-        // Fuzzy.g:33:18: ( WS )? // block
-        NSInteger alt27=2;
-        NSInteger LA27_0 = [input LA:1];
-
-        if ( ((LA27_0 >= '\t' && LA27_0 <= '\n')||LA27_0==' ') ) {
-            alt27=1;
-        }
-        switch (alt27) {
-            case 1 : ;
-                // Fuzzy.g:33:18: WS // alt
-                {
-                [self mWS]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-
-        }
-
-
-        [self matchChar:'(']; if ( state.failed ) return ;
-
-
-        if ( state.backtracking == 1 ) {
-            /*ignore if this/super */ NSLog(@"found call %@",(name!=nil?name.text:nil));
-        }
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "CALL" */
-
-// $ANTLR start "COMMENT"
-- (void) mCOMMENT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = COMMENT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Fuzzy.g:38:5: ( '/*' ( options {greedy=false; } : . )* '*/' ) // ruleBlockSingleAlt
-        // Fuzzy.g:38:9: '/*' ( options {greedy=false; } : . )* '*/' // alt
-        {
-        [self matchString:@"/*"]; if ( state.failed ) return ;
-
-
-
-        do {
-            NSInteger alt28=2;
-            NSInteger LA28_0 = [input LA:1];
-            if ( (LA28_0=='*') ) {
-                NSInteger LA28_1 = [input LA:2];
-                if ( (LA28_1=='/') ) {
-                    alt28=2;
-                }
-                else if ( ((LA28_1 >= 0x0000 && LA28_1 <= '.')||(LA28_1 >= '0' && LA28_1 <= 0xFFFF)) ) {
-                    alt28=1;
-                }
-
-
-            }
-            else if ( ((LA28_0 >= 0x0000 && LA28_0 <= ')')||(LA28_0 >= '+' && LA28_0 <= 0xFFFF)) ) {
-                alt28=1;
-            }
-
-
-            switch (alt28) {
-                case 1 : ;
-                    // Fuzzy.g:38:41: . // alt
-                    {
-                    [self matchAny]; if ( state.failed ) return ;
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop28;
-            }
-        } while (YES);
-        loop28: ;
-
-
-        [self matchString:@"*/"]; if ( state.failed ) return ;
-
-
-
-        if ( state.backtracking == 1 ) {
-            NSLog(@"found comment %@", [self text]);
-        }
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "COMMENT" */
-
-// $ANTLR start "SL_COMMENT"
-- (void) mSL_COMMENT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = SL_COMMENT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Fuzzy.g:43:5: ( '//' ( options {greedy=false; } : . )* '\\n' ) // ruleBlockSingleAlt
-        // Fuzzy.g:43:9: '//' ( options {greedy=false; } : . )* '\\n' // alt
-        {
-        [self matchString:@"//"]; if ( state.failed ) return ;
-
-
-
-        do {
-            NSInteger alt29=2;
-            NSInteger LA29_0 = [input LA:1];
-            if ( (LA29_0=='\n') ) {
-                alt29=2;
-            }
-            else if ( ((LA29_0 >= 0x0000 && LA29_0 <= '\t')||(LA29_0 >= 0x000B && LA29_0 <= 0xFFFF)) ) {
-                alt29=1;
-            }
-
-
-            switch (alt29) {
-                case 1 : ;
-                    // Fuzzy.g:43:41: . // alt
-                    {
-                    [self matchAny]; if ( state.failed ) return ;
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop29;
-            }
-        } while (YES);
-        loop29: ;
-
-
-        [self matchChar:'\n']; if ( state.failed ) return ;
-
-
-        if ( state.backtracking == 1 ) {
-            NSLog(@"found // comment %@", [self text]);
-        }
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "SL_COMMENT" */
-
-// $ANTLR start "STRING"
-- (void) mSTRING
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = STRING;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Fuzzy.g:48:2: ( '\"' ( options {greedy=false; } : ESC | . )* '\"' ) // ruleBlockSingleAlt
-        // Fuzzy.g:48:4: '\"' ( options {greedy=false; } : ESC | . )* '\"' // alt
-        {
-        [self matchChar:'"']; if ( state.failed ) return ;
-
-
-        do {
-            NSInteger alt30=3;
-            NSInteger LA30_0 = [input LA:1];
-            if ( (LA30_0=='"') ) {
-                alt30=3;
-            }
-            else if ( (LA30_0=='\\') ) {
-                NSInteger LA30_2 = [input LA:2];
-                if ( (LA30_2=='"') ) {
-                    alt30=1;
-                }
-                else if ( (LA30_2=='\\') ) {
-                    alt30=1;
-                }
-                else if ( (LA30_2=='\'') ) {
-                    alt30=1;
-                }
-                else if ( ((LA30_2 >= 0x0000 && LA30_2 <= '!')||(LA30_2 >= '#' && LA30_2 <= '&')||(LA30_2 >= '(' && LA30_2 <= '[')||(LA30_2 >= ']' && LA30_2 <= 0xFFFF)) ) {
-                    alt30=2;
-                }
-
-
-            }
-            else if ( ((LA30_0 >= 0x0000 && LA30_0 <= '!')||(LA30_0 >= '#' && LA30_0 <= '[')||(LA30_0 >= ']' && LA30_0 <= 0xFFFF)) ) {
-                alt30=2;
-            }
-
-
-            switch (alt30) {
-                case 1 : ;
-                    // Fuzzy.g:48:34: ESC // alt
-                    {
-                    [self mESC]; if ( state.failed ) return ;
-
-
-
-                    }
-                    break;
-                case 2 : ;
-                    // Fuzzy.g:48:40: . // alt
-                    {
-                    [self matchAny]; if ( state.failed ) return ;
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop30;
-            }
-        } while (YES);
-        loop30: ;
-
-
-        [self matchChar:'"']; if ( state.failed ) return ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "STRING" */
-
-// $ANTLR start "CHAR"
-- (void) mCHAR
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = CHAR;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Fuzzy.g:52:2: ( '\\'' ( options {greedy=false; } : ESC | . )* '\\'' ) // ruleBlockSingleAlt
-        // Fuzzy.g:52:4: '\\'' ( options {greedy=false; } : ESC | . )* '\\'' // alt
-        {
-        [self matchChar:'\'']; if ( state.failed ) return ;
-
-
-        do {
-            NSInteger alt31=3;
-            NSInteger LA31_0 = [input LA:1];
-            if ( (LA31_0=='\'') ) {
-                alt31=3;
-            }
-            else if ( (LA31_0=='\\') ) {
-                NSInteger LA31_2 = [input LA:2];
-                if ( (LA31_2=='\'') ) {
-                    alt31=1;
-                }
-                else if ( (LA31_2=='\\') ) {
-                    alt31=1;
-                }
-                else if ( (LA31_2=='"') ) {
-                    alt31=1;
-                }
-                else if ( ((LA31_2 >= 0x0000 && LA31_2 <= '!')||(LA31_2 >= '#' && LA31_2 <= '&')||(LA31_2 >= '(' && LA31_2 <= '[')||(LA31_2 >= ']' && LA31_2 <= 0xFFFF)) ) {
-                    alt31=2;
-                }
-
-
-            }
-            else if ( ((LA31_0 >= 0x0000 && LA31_0 <= '&')||(LA31_0 >= '(' && LA31_0 <= '[')||(LA31_0 >= ']' && LA31_0 <= 0xFFFF)) ) {
-                alt31=2;
-            }
-
-
-            switch (alt31) {
-                case 1 : ;
-                    // Fuzzy.g:52:35: ESC // alt
-                    {
-                    [self mESC]; if ( state.failed ) return ;
-
-
-
-                    }
-                    break;
-                case 2 : ;
-                    // Fuzzy.g:52:41: . // alt
-                    {
-                    [self matchAny]; if ( state.failed ) return ;
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop31;
-            }
-        } while (YES);
-        loop31: ;
-
-
-        [self matchChar:'\'']; if ( state.failed ) return ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "CHAR" */
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Fuzzy.g:55:5: ( ( ' ' | '\\t' | '\\n' )+ ) // ruleBlockSingleAlt
-        // Fuzzy.g:55:9: ( ' ' | '\\t' | '\\n' )+ // alt
-        {
-        // Fuzzy.g:55:9: ( ' ' | '\\t' | '\\n' )+ // positiveClosureBlock
-        NSInteger cnt32 = 0;
-        do {
-            NSInteger alt32 = 2;
-            NSInteger LA32_0 = [input LA:1];
-            if ( ((LA32_0 >= '\t' && LA32_0 <= '\n')||LA32_0==' ') ) {
-                alt32=1;
-            }
-
-
-            switch (alt32) {
-                case 1 : ;
-                    // Fuzzy.g: // alt
-                    {
-                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == ' ') {
-                        [input consume];
-                        state.failed = NO;
-
-                    } else {
-                        if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt32 >= 1 )
-                        goto loop32;
-                    if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:32];
-                    @throw eee;
-            }
-            cnt32++;
-        } while (YES);
-        loop32: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "WS" */
-
-// $ANTLR start "QID"
-- (void) mQID
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // Fuzzy.g:59:5: ( ID ( '.' ID )* ) // ruleBlockSingleAlt
-        // Fuzzy.g:59:7: ID ( '.' ID )* // alt
-        {
-        [self mID]; if ( state.failed ) return ;
-
-
-
-        do {
-            NSInteger alt33=2;
-            NSInteger LA33_0 = [input LA:1];
-            if ( (LA33_0=='.') ) {
-                alt33=1;
-            }
-
-
-            switch (alt33) {
-                case 1 : ;
-                    // Fuzzy.g:59:11: '.' ID // alt
-                    {
-                    [self matchChar:'.']; if ( state.failed ) return ;
-
-
-                    [self mID]; if ( state.failed ) return ;
-
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop33;
-            }
-        } while (YES);
-        loop33: ;
-
-
-        }
-
-
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "QID" */
-
-// $ANTLR start "QIDStar"
-- (void) mQIDStar
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // Fuzzy.g:68:2: ( ID ( '.' ID )* ( '.*' )? ) // ruleBlockSingleAlt
-        // Fuzzy.g:68:4: ID ( '.' ID )* ( '.*' )? // alt
-        {
-        [self mID]; if ( state.failed ) return ;
-
-
-
-        do {
-            NSInteger alt34=2;
-            NSInteger LA34_0 = [input LA:1];
-            if ( (LA34_0=='.') ) {
-                NSInteger LA34_1 = [input LA:2];
-                if ( ((LA34_1 >= 'A' && LA34_1 <= 'Z')||LA34_1=='_'||(LA34_1 >= 'a' && LA34_1 <= 'z')) ) {
-                    alt34=1;
-                }
-
-
-            }
-
-
-            switch (alt34) {
-                case 1 : ;
-                    // Fuzzy.g:68:8: '.' ID // alt
-                    {
-                    [self matchChar:'.']; if ( state.failed ) return ;
-
-
-                    [self mID]; if ( state.failed ) return ;
-
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop34;
-            }
-        } while (YES);
-        loop34: ;
-
-
-        // Fuzzy.g:68:17: ( '.*' )? // block
-        NSInteger alt35=2;
-        NSInteger LA35_0 = [input LA:1];
-
-        if ( (LA35_0=='.') ) {
-            alt35=1;
-        }
-        switch (alt35) {
-            case 1 : ;
-                // Fuzzy.g:68:17: '.*' // alt
-                {
-                [self matchString:@".*"]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-
-        }
-
-
-        }
-
-
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "QIDStar" */
-
-// $ANTLR start "TYPE"
-- (void) mTYPE
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // Fuzzy.g:72:5: ( QID ( '[]' )? ) // ruleBlockSingleAlt
-        // Fuzzy.g:72:9: QID ( '[]' )? // alt
-        {
-        [self mQID]; if ( state.failed ) return ;
-
-
-
-        // Fuzzy.g:72:13: ( '[]' )? // block
-        NSInteger alt36=2;
-        NSInteger LA36_0 = [input LA:1];
-
-        if ( (LA36_0=='[') ) {
-            alt36=1;
-        }
-        switch (alt36) {
-            case 1 : ;
-                // Fuzzy.g:72:13: '[]' // alt
-                {
-                [self matchString:@"[]"]; if ( state.failed ) return ;
-
-
-
-                }
-                break;
-
-        }
-
-
-        }
-
-
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "TYPE" */
-
-// $ANTLR start "ARG"
-- (void) mARG
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // Fuzzy.g:76:5: ( TYPE WS ID ) // ruleBlockSingleAlt
-        // Fuzzy.g:76:9: TYPE WS ID // alt
-        {
-        [self mTYPE]; if ( state.failed ) return ;
-
-
-
-        [self mWS]; if ( state.failed ) return ;
-
-
-
-        [self mID]; if ( state.failed ) return ;
-
-
-
-        }
-
-
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "ARG" */
-
-// $ANTLR start "ID"
-- (void) mID
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // Fuzzy.g:80:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )* ) // ruleBlockSingleAlt
-        // Fuzzy.g:80:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )* // alt
-        {
-        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-            [input consume];
-            state.failed = NO;
-
-        } else {
-            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            [self recover:mse];
-            @throw mse;
-        }
-
-
-        do {
-            NSInteger alt37=2;
-            NSInteger LA37_0 = [input LA:1];
-            if ( ((LA37_0 >= '0' && LA37_0 <= '9')||(LA37_0 >= 'A' && LA37_0 <= 'Z')||LA37_0=='_'||(LA37_0 >= 'a' && LA37_0 <= 'z')) ) {
-                alt37=1;
-            }
-
-
-            switch (alt37) {
-                case 1 : ;
-                    // Fuzzy.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-                        [input consume];
-                        state.failed = NO;
-
-                    } else {
-                        if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop37;
-            }
-        } while (YES);
-        loop37: ;
-
-
-        }
-
-
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "ID" */
-
-// $ANTLR start "ESC"
-- (void) mESC
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // Fuzzy.g:84:5: ( '\\\\' ( '\"' | '\\'' | '\\\\' ) ) // ruleBlockSingleAlt
-        // Fuzzy.g:84:7: '\\\\' ( '\"' | '\\'' | '\\\\' ) // alt
-        {
-        [self matchChar:'\\']; if ( state.failed ) return ;
-
-
-        if ([input LA:1] == '"'||[input LA:1] == '\''||[input LA:1] == '\\') {
-            [input consume];
-            state.failed = NO;
-
-        } else {
-            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            [self recover:mse];
-            @throw mse;
-        }
-
-
-        }
-
-
-    }
-    @finally {
-        //
-        /* my stuff */
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "ESC" */
-
-- (void) mTokens
-{
-    // Fuzzy.g:1:39: ( IMPORT | RETURN | CLASS | METHOD | FIELD | STAT | CALL | COMMENT | SL_COMMENT | STRING | CHAR | WS ) //ruleblock
-    NSInteger alt38=12;
-    unichar charLA38 = [input LA:1];
-    switch (charLA38) {
-        case 'i': ;
-            {
-            NSInteger LA38_1 = [input LA:2];
-
-            if ( ([self evaluateSyntacticPredicate:@selector(synpred1_Fuzzy_fragment)]) ) {
-                alt38=1;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) ) {
-                alt38=4;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) ) {
-                alt38=5;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred6_Fuzzy_fragment)]) ) {
-                alt38=6;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) ) {
-                alt38=7;
-            }
-            else {
-                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-                ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:38 state:1 stream:input];
-                nvae.c = LA38_1;
-                @throw nvae;
-
-            }
-            }
-            break;
-        case 'r': ;
-            {
-            NSInteger LA38_7 = [input LA:2];
-
-            if ( ([self evaluateSyntacticPredicate:@selector(synpred2_Fuzzy_fragment)]) ) {
-                alt38=2;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) ) {
-                alt38=4;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) ) {
-                alt38=5;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) ) {
-                alt38=7;
-            }
-            else {
-                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-                ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:38 state:7 stream:input];
-                nvae.c = LA38_7;
-                @throw nvae;
-
-            }
-            }
-            break;
-        case 'c': ;
-            {
-            NSInteger LA38_9 = [input LA:2];
-
-            if ( ([self evaluateSyntacticPredicate:@selector(synpred3_Fuzzy_fragment)]) ) {
-                alt38=3;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) ) {
-                alt38=4;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) ) {
-                alt38=5;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) ) {
-                alt38=7;
-            }
-            else {
-                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-                ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:38 state:9 stream:input];
-                nvae.c = LA38_9;
-                @throw nvae;
-
-            }
-            }
-            break;
-        case 'f': ;
-        case 's': ;
-        case 'w': ;
-            {
-            NSInteger LA38_11 = [input LA:2];
-
-            if ( ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) ) {
-                alt38=4;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) ) {
-                alt38=5;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred6_Fuzzy_fragment)]) ) {
-                alt38=6;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) ) {
-                alt38=7;
-            }
-            else {
-                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-                ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:38 state:11 stream:input];
-                nvae.c = LA38_11;
-                @throw nvae;
-
-            }
-            }
-            break;
-        case 'A': ;
-        case 'B': ;
-        case 'C': ;
-        case 'D': ;
-        case 'E': ;
-        case 'F': ;
-        case 'G': ;
-        case 'H': ;
-        case 'I': ;
-        case 'J': ;
-        case 'K': ;
-        case 'L': ;
-        case 'M': ;
-        case 'N': ;
-        case 'O': ;
-        case 'P': ;
-        case 'Q': ;
-        case 'R': ;
-        case 'S': ;
-        case 'T': ;
-        case 'U': ;
-        case 'V': ;
-        case 'W': ;
-        case 'X': ;
-        case 'Y': ;
-        case 'Z': ;
-        case '_': ;
-        case 'a': ;
-        case 'b': ;
-        case 'd': ;
-        case 'e': ;
-        case 'g': ;
-        case 'h': ;
-        case 'j': ;
-        case 'k': ;
-        case 'l': ;
-        case 'm': ;
-        case 'n': ;
-        case 'o': ;
-        case 'p': ;
-        case 'q': ;
-        case 't': ;
-        case 'u': ;
-        case 'v': ;
-        case 'x': ;
-        case 'y': ;
-        case 'z': ;
-            {
-            NSInteger LA38_12 = [input LA:2];
-
-            if ( ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) ) {
-                alt38=4;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) ) {
-                alt38=5;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) ) {
-                alt38=7;
-            }
-            else {
-                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-                ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:38 state:12 stream:input];
-                nvae.c = LA38_12;
-                @throw nvae;
-
-            }
-            }
-            break;
-        case '/': ;
-            {
-            NSInteger LA38_13 = [input LA:2];
-
-            if ( ([self evaluateSyntacticPredicate:@selector(synpred8_Fuzzy_fragment)]) ) {
-                alt38=8;
-            }
-            else if ( ([self evaluateSyntacticPredicate:@selector(synpred9_Fuzzy_fragment)]) ) {
-                alt38=9;
-            }
-            else {
-                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-                ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:38 state:13 stream:input];
-                nvae.c = LA38_13;
-                @throw nvae;
-
-            }
-            }
-            break;
-        case '"': ;
-            {
-            alt38=10;
-            }
-            break;
-        case '\'': ;
-            {
-            alt38=11;
-            }
-            break;
-        case '\t': ;
-        case '\n': ;
-        case ' ': ;
-            {
-            alt38=12;
-            }
-            break;
-
-    default: ;
-        if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-
-        ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:38 state:0 stream:input];
-        nvae.c = charLA38;
-        @throw nvae;
-
-    }
-
-    switch (alt38) {
-        case 1 : ;
-            // Fuzzy.g:1:41: IMPORT // alt
-            {
-            [self mIMPORT]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-        case 2 : ;
-            // Fuzzy.g:1:48: RETURN // alt
-            {
-            [self mRETURN]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-        case 3 : ;
-            // Fuzzy.g:1:55: CLASS // alt
-            {
-            [self mCLASS]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-        case 4 : ;
-            // Fuzzy.g:1:61: METHOD // alt
-            {
-            [self mMETHOD]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-        case 5 : ;
-            // Fuzzy.g:1:68: FIELD // alt
-            {
-            [self mFIELD]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-        case 6 : ;
-            // Fuzzy.g:1:74: STAT // alt
-            {
-            [self mSTAT]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-        case 7 : ;
-            // Fuzzy.g:1:79: CALL // alt
-            {
-            [self mCALL]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-        case 8 : ;
-            // Fuzzy.g:1:84: COMMENT // alt
-            {
-            [self mCOMMENT]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-        case 9 : ;
-            // Fuzzy.g:1:92: SL_COMMENT // alt
-            {
-            [self mSL_COMMENT]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-        case 10 : ;
-            // Fuzzy.g:1:103: STRING // alt
-            {
-            [self mSTRING]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-        case 11 : ;
-            // Fuzzy.g:1:110: CHAR // alt
-            {
-            [self mCHAR]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-        case 12 : ;
-            // Fuzzy.g:1:115: WS // alt
-            {
-            [self mWS]; if ( state.failed ) return ;
-
-
-
-            }
-            break;
-
-    }
-
-}
-
-// $ANTLR start synpred1_Fuzzy_fragment
-- (void) synpred1_Fuzzy_fragment
-{
-    // Fuzzy.g:1:41: ( IMPORT ) // ruleBlockSingleAlt
-    // Fuzzy.g:1:41: IMPORT // alt
-    {
-    [self mIMPORT]; if ( state.failed ) return ;
-
-
-
-    }
-
-} // $ANTLR end synpred1_Fuzzy_fragment
-
-// $ANTLR start synpred2_Fuzzy_fragment
-- (void) synpred2_Fuzzy_fragment
-{
-    // Fuzzy.g:1:48: ( RETURN ) // ruleBlockSingleAlt
-    // Fuzzy.g:1:48: RETURN // alt
-    {
-    [self mRETURN]; if ( state.failed ) return ;
-
-
-
-    }
-
-} // $ANTLR end synpred2_Fuzzy_fragment
-
-// $ANTLR start synpred3_Fuzzy_fragment
-- (void) synpred3_Fuzzy_fragment
-{
-    // Fuzzy.g:1:55: ( CLASS ) // ruleBlockSingleAlt
-    // Fuzzy.g:1:55: CLASS // alt
-    {
-    [self mCLASS]; if ( state.failed ) return ;
-
-
-
-    }
-
-} // $ANTLR end synpred3_Fuzzy_fragment
-
-// $ANTLR start synpred4_Fuzzy_fragment
-- (void) synpred4_Fuzzy_fragment
-{
-    // Fuzzy.g:1:61: ( METHOD ) // ruleBlockSingleAlt
-    // Fuzzy.g:1:61: METHOD // alt
-    {
-    [self mMETHOD]; if ( state.failed ) return ;
-
-
-
-    }
-
-} // $ANTLR end synpred4_Fuzzy_fragment
-
-// $ANTLR start synpred5_Fuzzy_fragment
-- (void) synpred5_Fuzzy_fragment
-{
-    // Fuzzy.g:1:68: ( FIELD ) // ruleBlockSingleAlt
-    // Fuzzy.g:1:68: FIELD // alt
-    {
-    [self mFIELD]; if ( state.failed ) return ;
-
-
-
-    }
-
-} // $ANTLR end synpred5_Fuzzy_fragment
-
-// $ANTLR start synpred6_Fuzzy_fragment
-- (void) synpred6_Fuzzy_fragment
-{
-    // Fuzzy.g:1:74: ( STAT ) // ruleBlockSingleAlt
-    // Fuzzy.g:1:74: STAT // alt
-    {
-    [self mSTAT]; if ( state.failed ) return ;
-
-
-
-    }
-
-} // $ANTLR end synpred6_Fuzzy_fragment
-
-// $ANTLR start synpred7_Fuzzy_fragment
-- (void) synpred7_Fuzzy_fragment
-{
-    // Fuzzy.g:1:79: ( CALL ) // ruleBlockSingleAlt
-    // Fuzzy.g:1:79: CALL // alt
-    {
-    [self mCALL]; if ( state.failed ) return ;
-
-
-
-    }
-
-} // $ANTLR end synpred7_Fuzzy_fragment
-
-// $ANTLR start synpred8_Fuzzy_fragment
-- (void) synpred8_Fuzzy_fragment
-{
-    // Fuzzy.g:1:84: ( COMMENT ) // ruleBlockSingleAlt
-    // Fuzzy.g:1:84: COMMENT // alt
-    {
-    [self mCOMMENT]; if ( state.failed ) return ;
-
-
-
-    }
-
-} // $ANTLR end synpred8_Fuzzy_fragment
-
-// $ANTLR start synpred9_Fuzzy_fragment
-- (void) synpred9_Fuzzy_fragment
-{
-    // Fuzzy.g:1:92: ( SL_COMMENT ) // ruleBlockSingleAlt
-    // Fuzzy.g:1:92: SL_COMMENT // alt
-    {
-    [self mSL_COMMENT]; if ( state.failed ) return ;
-
-
-
-    }
-
-} // $ANTLR end synpred9_Fuzzy_fragment
-
-@end /* end of Fuzzy implementation line 397 */
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/antlr3.h b/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/antlr3.h
deleted file mode 100644
index 4f16279..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/antlr3.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#import <ANTLR/ANTLRBaseMapElement.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRBaseStack.h>
-#import <ANTLR/ANTLRBaseTree.h>
-#import <ANTLR/ANTLRBaseTreeAdaptor.h>
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBufferedTokenStream.h>
-#import <ANTLR/ANTLRBufferedTreeNodeStream.h>
-#import <ANTLR/ANTLRCharStream.h>
-#import <ANTLR/ANTLRCharStreamState.h>
-#import <ANTLR/ANTLRCommonErrorNode.h>
-#import <ANTLR/ANTLRCommonToken.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRDebug.h>
-#import <ANTLR/ANTLRDebugEventProxy.h>
-#import <ANTLR/ANTLRDebugEventListener.h>
-#import <ANTLR/ANTLRDebugParser.h>
-#import <ANTLR/ANTLRDebugTokenStream.h>
-#import <ANTLR/ANTLRDebugTreeAdaptor.h>
-#import <ANTLR/ANTLRDebugTreeNodeStream.h>
-#import <ANTLR/ANTLRDebugTreeParser.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRError.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRFastQueue.h>
-#import <ANTLR/ANTLRHashMap.h>
-#import <ANTLR/ANTLRHashRule.h>
-#import <ANTLR/ANTLRIntArray.h>
-#import <ANTLR/ANTLRIntStream.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRLexerRuleReturnScope.h>
-#import <ANTLR/ANTLRLinkBase.h>
-#import <ANTLR/ANTLRLookaheadStream.h>
-#import <ANTLR/ANTLRMapElement.h>
-#import <ANTLR/ANTLRMap.h>
-#import <ANTLR/ANTLRMismatchedNotSetException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRMissingTokenException.h>
-#import <ANTLR/ANTLRNodeMapElement.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRPtrBuffer.h>
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLRRecognizerSharedState.h>
-#import <ANTLR/ANTLRRewriteRuleElementStream.h>
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
-#import <ANTLR/ANTLRRuleMemo.h>
-#import <ANTLR/ANTLRRuleStack.h>
-#import <ANTLR/ANTLRRuleReturnScope.h>
-#import <ANTLR/ANTLRRuntimeException.h>
-#import <ANTLR/ANTLRStreamEnumerator.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRSymbolStack.h>
-#import <ANTLR/ANTLRToken+DebuggerSupport.h>
-#import <ANTLR/ANTLRToken.h>
-#import <ANTLR/ANTLRTokenRewriteStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRTokenStream.h>
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeException.h>
-#import <ANTLR/ANTLRTreeIterator.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-#import <ANTLR/ANTLRUnbufferedTokenStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-#import <ANTLR/ANTLRUniqueIDMap.h>
-#import <ANTLR/ANTLRUnwantedTokenException.h>
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/main.m b/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/main.m
deleted file mode 100644
index 379a519..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/main.m
+++ /dev/null
@@ -1,26 +0,0 @@
-#import <Cocoa/Cocoa.h>
-#import "Fuzzy.h"
-#import "antlr3.h"
-
-int main(int argc, const char * argv[])
-{
-    NSError *error;
-	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
-	NSString *input = [NSString stringWithContentsOfFile:@"/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/examples/fuzzy/input"  encoding:NSASCIIStringEncoding error:&error];
-	NSLog(@"%@", input);
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:input];
-	Fuzzy *lex = [Fuzzy newFuzzyWithCharStream:stream];
-	ANTLRCommonTokenStream *tokens = [ANTLRCommonTokenStream newANTLRCommonTokenStreamWithTokenSource:lex];
-	NSLog( [tokens toString] );
-
-	id<ANTLRToken> currentToken;
-	while ((currentToken = [lex nextToken]) && [currentToken getType] != ANTLRTokenTypeEOF) {
-		NSLog(@"### %@", [currentToken toString]);
-	}
-
-	[lex release];
-	[stream release];
-	
-	[pool release];
-	return 0;
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/output1/Fuzzy.m b/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/output1/Fuzzy.m
deleted file mode 100644
index 908a214..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/output1/Fuzzy.m
+++ /dev/null
@@ -1,2162 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version 3.2 Aug 20, 2010 13:39:32
- *
- *     -  From the grammar source file : /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g
- *     -                            On : 2010-08-20 13:40:15
- *     -                 for the lexer : FuzzyLexer *
- * Editing it, at least manually, is not wise. 
- *
- * C language generator and runtime by Jim Idle, jimi|hereisanat|idle|dotgoeshere|ws.
- *
- *
-*/
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// $ANTLR 3.2 Aug 20, 2010 13:39:32 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g 2010-08-20 13:40:15
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "Fuzzy.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-#pragma mark Cyclic DFA implementation start DFA38
-@implementation DFA38
-const static NSInteger dfa38_eot[19] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static NSInteger dfa38_eof[19] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static unichar dfa38_min[19] =
-    {9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-const static unichar dfa38_max[19] =
-    {122,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-const static NSInteger dfa38_accept[19] =
-    {-1,-1,4,5,7,-1,2,-1,8,9,-1,3,-1,1,6,10,-1,12,11};
-const static NSInteger dfa38_special[19] =
-    {-1,0,-1,-1,-1,1,-1,2,-1,-1,3,-1,4,-1,-1,-1,5,-1,-1};
-const static NSInteger dfa38_transition[] = {};
-const static NSInteger dfa38_transition0[] = {17, 17, -1, -1, -1, -1, -1, 
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 17, -1, 
- 15, -1, -1, -1, -1, 18, -1, -1, -1, -1, -1, -1, -1, 7, -1, -1, -1, -1, 
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 
- 1, -1, 1, 1, 10, 1, 1, 16, 1, 1, 12, 1, 1, 1, 1, 1, 1, 1, 1, 5, 16, 1, 
- 1, 1, 16, 1, 1, 1};
-const static NSInteger dfa38_transition1[] = {-1};
-
-
-+ () newDFA38WithRecognizer:(ANTLRBaseRecognizer *)aRecognizer
-{
-    return [[[DFA38 alloc] initWithRecognizer:aRecognizer] retain];
-}
-
-- (id) initWithRecognizer:(ANTLRBaseRecognizer *) theRecognizer
-{
-    if ((self = [super initWithRecognizer:theRecognizer]) != nil) {
-        decisionNumber = 38;
-        eot = dfa38_eot;
-        eof = dfa38_eof;
-        min = dfa38_min;
-        max = dfa38_max;
-        accept = dfa38_accept;
-        special = dfa38_special;
-        if (!(transition = calloc(19, sizeof(void*)))) {
-            [self release];
-            return nil;
-        }
-        len = 19;
-        transition[0] = dfa38_transition0;
-        transition[1] = dfa38_transition1;
-        transition[2] = dfa38_transition;
-        transition[3] = dfa38_transition;
-        transition[4] = dfa38_transition;
-        transition[5] = dfa38_transition1;
-        transition[6] = dfa38_transition;
-        transition[7] = dfa38_transition1;
-        transition[8] = dfa38_transition;
-        transition[9] = dfa38_transition;
-        transition[10] = dfa38_transition1;
-        transition[11] = dfa38_transition;
-        transition[12] = dfa38_transition1;
-        transition[13] = dfa38_transition;
-        transition[14] = dfa38_transition;
-        transition[15] = dfa38_transition;
-        transition[16] = dfa38_transition1;
-        transition[17] = dfa38_transition;
-        transition[18] = dfa38_transition;
-    }
-    return self;
-}
-
-/* start dfa.specialStateSTs */
-- (NSInteger) specialStateTransition:(NSInteger)s Stream:(id<ANTLRIntStream>)anInput
-{
-    id<ANTLRIntStream> input = anInput;
-    switch (s) {
-                case 0 : ;
-                    /* cyclicDFAState */
-                    NSInteger LA38_1 = [input LA:1];
-
-                     
-                    NSInteger index38_1 = [input getIndex];
-                    [input rewind];
-                    s = -1;
-                    /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) { s = 2;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) { s = 3;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) { s = 4;}
-
-                     
-                    [input seek:index38_1];
-                    if ( s >= 0 )
-                        return s;
-                     break;
-                case 1 : ;
-                    /* cyclicDFAState */
-                    NSInteger LA38_5 = [input LA:1];
-
-                     
-                    NSInteger index38_5 = [input getIndex];
-                    [input rewind];
-                    s = -1;
-                    /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred2_Fuzzy_fragment)]) { s = 6;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) { s = 2;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) { s = 3;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) { s = 4;}
-
-                     
-                    [input seek:index38_5];
-                    if ( s >= 0 )
-                        return s;
-                     break;
-                case 2 : ;
-                    /* cyclicDFAState */
-                    NSInteger LA38_7 = [input LA:1];
-
-                     
-                    NSInteger index38_7 = [input getIndex];
-                    [input rewind];
-                    s = -1;
-                    /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred8_Fuzzy_fragment)]) { s = 8;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred9_Fuzzy_fragment)]) { s = 9;}
-
-                     
-                    [input seek:index38_7];
-                    if ( s >= 0 )
-                        return s;
-                     break;
-                case 3 : ;
-                    /* cyclicDFAState */
-                    NSInteger LA38_10 = [input LA:1];
-
-                     
-                    NSInteger index38_10 = [input getIndex];
-                    [input rewind];
-                    s = -1;
-                    /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred3_Fuzzy_fragment)]) { s = 11;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) { s = 2;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) { s = 3;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) { s = 4;}
-
-                     
-                    [input seek:index38_10];
-                    if ( s >= 0 )
-                        return s;
-                     break;
-                case 4 : ;
-                    /* cyclicDFAState */
-                    NSInteger LA38_12 = [input LA:1];
-
-                     
-                    NSInteger index38_12 = [input getIndex];
-                    [input rewind];
-                    s = -1;
-                    /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred1_Fuzzy_fragment)]) { s = 13;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) { s = 2;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) { s = 3;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred6_Fuzzy_fragment)]) { s = 14;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) { s = 4;}
-
-                     
-                    [input seek:index38_12];
-                    if ( s >= 0 )
-                        return s;
-                     break;
-                case 5 : ;
-                    /* cyclicDFAState */
-                    NSInteger LA38_16 = [input LA:1];
-
-                     
-                    NSInteger index38_16 = [input getIndex];
-                    [input rewind];
-                    s = -1;
-                    /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) { s = 2;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) { s = 3;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred6_Fuzzy_fragment)]) { s = 14;}
-
-                    else /* cyclicDFAEdge */
-                    if ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) { s = 4;}
-
-                     
-                    [input seek:index38_16];
-                    if ( s >= 0 )
-                        return s;
-                     break;
-    }
-    if ( [recognizer getBacktrackingLevel] > 0 ) { [recognizer setFailed:YES]; return -1; }
-    ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:38 state:s stream:[recognizer getInput]];
-    /* [self error:nvae]; */ 
-    @throw nvae;
-}
-
-/* end dfa.specialStateSTs */
-- (void) dealloc
-{
-    free(transition);
-    [super dealloc];
-}
-
-- (NSString *) description
-{
-    return @"1:1: Tokens options {k=1; backtrack=true; } : ( IMPORT | RETURN | CLASS | METHOD | FIELD | STAT | CALL | COMMENT | SL_COMMENT | STRING | CHAR | WS );";
-}
-
-
-@end
-#pragma mark Cyclic DFA implementation end DFA38
-
-
-
-/** As per Terence: No returns for lexer rules!
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-*/
-@implementation Fuzzy // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"/usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (Fuzzy *)newFuzzy:(id<ANTLRCharStream>)anInput
-{
-    return [[Fuzzy alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    if ((self = [super initWithCharStream:anInput State:[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:30+1]]) != nil) {
-        SEL synpred9_FuzzySelector = @selector(synpred9_Fuzzy_fragment);
-
-        SEL synpred2_FuzzySelector = @selector(synpred2_Fuzzy_fragment);
-
-        SEL synpred7_FuzzySelector = @selector(synpred7_Fuzzy_fragment);
-
-        SEL synpred4_FuzzySelector = @selector(synpred4_Fuzzy_fragment);
-
-        SEL synpred8_FuzzySelector = @selector(synpred8_Fuzzy_fragment);
-
-        SEL synpred6_FuzzySelector = @selector(synpred6_Fuzzy_fragment);
-
-        SEL synpred5_FuzzySelector = @selector(synpred5_Fuzzy_fragment);
-
-        SEL synpred3_FuzzySelector = @selector(synpred3_Fuzzy_fragment);
-
-        SEL synpred1_FuzzySelector = @selector(synpred1_Fuzzy_fragment);
-
-        dfa38 = [DFA38 newDFA38WithRecognizer:self];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [dfa38 release];
-    [super dealloc];
-}
-
-// Start of actions.lexer.methods
-// start methods()
-
-- (id<ANTLRToken>) nextToken
-{
-    while (YES) {
-        if ( [input LA:1] == ANTLRCharStreamEOF ) {
-            return [ANTLRCommonToken eofToken]; // should really be a +eofToken call here -> go figure
-        }
-        state.token = nil;
-        state.channel = ANTLRTokenChannelDefault;
-        state.tokenStartCharIndex = [input getIndex];
-        state.tokenStartCharPositionInLine = [input getCharPositionInLine];
-        state.tokenStartLine = [input getLine];
-        state.text = nil;
-        @try {
-            NSInteger m = [input mark];
-            state.backtracking = 1; /* means we won't throw slow exception */
-            state.failed = NO;
-            [self mTokens];
-            state.backtracking = 0;
-            /* mTokens backtracks with synpred at backtracking==2
-               and we set the synpredgate to allow actions at level 1. */
-            if ( state.failed == YES ) {
-                [input rewind:m];
-                [input consume]; /* advance one char and try again */
-            } else {
-                [self emit];
-                return state.token;
-            }
-        }
-        @catch (ANTLRRecognitionException *re) {
-            // shouldn't happen in backtracking mode, but...
-            [self reportError:re];
-            [self recover:re];
-        }
-    }
-}
-
-- (void)memoize:(id<ANTLRIntStream>)anInput
-      RuleIndex:(NSInteger)ruleIndex
-     StartIndex:(NSInteger)ruleStartIndex
-{
-    if ( state.backtracking > 1 ) [super memoize:anInput RuleIndex:ruleIndex StartIndex:ruleStartIndex];
-}
-
-- (BOOL)alreadyParsedRule:(id<ANTLRIntStream>)anInput RuleIndex:(NSInteger)ruleIndex
-{
-    if ( state.backtracking > 1 ) return [super alreadyParsedRule:anInput RuleIndex:ruleIndex];
-    return NO;
-}
-// Start of Rules
-// $ANTLR start "IMPORT"
-- (void) mIMPORT
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = IMPORT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        id<ANTLRToken> name=nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:5:2: ( 'import' WS name= QIDStar ( WS )? ';' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:5:4: 'import' WS name= QIDStar ( WS )? ';' // alt
-        {
-        [self matchString:@"import"]; if ( state.failed == YES ) return ;
-          /* element() */
-            [self mWS]; if ( state.failed == YES ) return ;
-          /* element() */
-        NSInteger nameStart31 = [self getIndex];
-        [self mQIDStar]; if ( state.failed == YES ) return ;
-
-        name = [[ANTLRCommonToken newANTLRCommonToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:nameStart31 Stop:[self getIndex]-1] retain];
-        [name setLine:[self getLine]];  /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:5:29: ( WS )? // block
-        NSInteger alt1=2;
-        NSInteger LA1_0 = [input LA:1];
-
-        if ( ((LA1_0>='\t' && LA1_0<='\n')||LA1_0==' ') ) {
-            alt1=1;
-        }
-        switch (alt1) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:5:29: WS // alt
-                {
-                    [self mWS]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        [self matchChar:';']; if ( state.failed == YES ) return ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "IMPORT"
-
-// $ANTLR start "RETURN"
-- (void) mRETURN
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = RETURN;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:10:2: ( 'return' ( options {greedy=false; } : . )* ';' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:10:4: 'return' ( options {greedy=false; } : . )* ';' // alt
-        {
-        [self matchString:@"return"]; if ( state.failed == YES ) return ;
-          /* element() */
-        do {
-            NSInteger alt2=2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( (LA2_0==';') ) {
-                alt2=2;
-            }
-            else if ( ((LA2_0>=0x0000 && LA2_0<=':')||(LA2_0>='<' && LA2_0<=0xFFFF)) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:10:38: . // alt
-                    {
-                    [self matchAny]; if ( state.failed == YES ) return ;
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop2;
-            }
-        } while (YES);
-        loop2: ;
-          /* element() */
-        [self matchChar:';']; if ( state.failed == YES ) return ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "RETURN"
-
-// $ANTLR start "CLASS"
-- (void) mCLASS
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = CLASS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        id<ANTLRToken> name=nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:14:2: ( 'class' WS name= ID ( WS )? ( 'extends' WS QID ( WS )? )? ( 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:14:4: 'class' WS name= ID ( WS )? ( 'extends' WS QID ( WS )? )? ( 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' // alt
-        {
-        [self matchString:@"class"]; if ( state.failed == YES ) return ;
-          /* element() */
-            [self mWS]; if ( state.failed == YES ) return ;
-          /* element() */
-        NSInteger nameStart81 = [self getIndex];
-        [self mID]; if ( state.failed == YES ) return ;
-
-        name = [[ANTLRCommonToken newANTLRCommonToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:nameStart81 Stop:[self getIndex]-1] retain];
-        [name setLine:[self getLine]];  /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:14:23: ( WS )? // block
-        NSInteger alt3=2;
-        NSInteger LA3_0 = [input LA:1];
-
-        if ( ((LA3_0>='\t' && LA3_0<='\n')||LA3_0==' ') ) {
-            alt3=1;
-        }
-        switch (alt3) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:14:23: WS // alt
-                {
-                    [self mWS]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:14:27: ( 'extends' WS QID ( WS )? )? // block
-        NSInteger alt5=2;
-        NSInteger LA5_0 = [input LA:1];
-
-        if ( (LA5_0=='e') ) {
-            alt5=1;
-        }
-        switch (alt5) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:14:28: 'extends' WS QID ( WS )? // alt
-                {
-                [self matchString:@"extends"]; if ( state.failed == YES ) return ;
-                  /* element() */
-                    [self mWS]; if ( state.failed == YES ) return ;
-                  /* element() */
-                    [self mQID]; if ( state.failed == YES ) return ;
-                  /* element() */
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:14:45: ( WS )? // block
-                NSInteger alt4=2;
-                NSInteger LA4_0 = [input LA:1];
-
-                if ( ((LA4_0>='\t' && LA4_0<='\n')||LA4_0==' ') ) {
-                    alt4=1;
-                }
-                switch (alt4) {
-                    case 1 : ;
-                        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:14:45: WS // alt
-                        {
-                            [self mWS]; if ( state.failed == YES ) return ;
-                          /* element() */
-                         /* elements */
-                        }
-                        break;
-
-                }
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:15:3: ( 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? // block
-        NSInteger alt10=2;
-        NSInteger LA10_0 = [input LA:1];
-
-        if ( (LA10_0=='i') ) {
-            alt10=1;
-        }
-        switch (alt10) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:15:4: 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* // alt
-                {
-                [self matchString:@"implements"]; if ( state.failed == YES ) return ;
-                  /* element() */
-                    [self mWS]; if ( state.failed == YES ) return ;
-                  /* element() */
-                    [self mQID]; if ( state.failed == YES ) return ;
-                  /* element() */
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:15:24: ( WS )? // block
-                NSInteger alt6=2;
-                NSInteger LA6_0 = [input LA:1];
-
-                if ( ((LA6_0>='\t' && LA6_0<='\n')||LA6_0==' ') ) {
-                    alt6=1;
-                }
-                switch (alt6) {
-                    case 1 : ;
-                        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:15:24: WS // alt
-                        {
-                            [self mWS]; if ( state.failed == YES ) return ;
-                          /* element() */
-                         /* elements */
-                        }
-                        break;
-
-                }
-                  /* element() */
-                do {
-                    NSInteger alt9=2;
-                    NSInteger LA9_0 = [input LA:1];
-                    if ( (LA9_0==',') ) {
-                        alt9=1;
-                    }
-
-
-                    switch (alt9) {
-                        case 1 : ;
-                            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:15:29: ',' ( WS )? QID ( WS )? // alt
-                            {
-                            [self matchChar:',']; if ( state.failed == YES ) return ;
-                              /* element() */
-                            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:15:33: ( WS )? // block
-                            NSInteger alt7=2;
-                            NSInteger LA7_0 = [input LA:1];
-
-                            if ( ((LA7_0>='\t' && LA7_0<='\n')||LA7_0==' ') ) {
-                                alt7=1;
-                            }
-                            switch (alt7) {
-                                case 1 : ;
-                                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:15:33: WS // alt
-                                    {
-                                        [self mWS]; if ( state.failed == YES ) return ;
-                                      /* element() */
-                                     /* elements */
-                                    }
-                                    break;
-
-                            }
-                              /* element() */
-                                [self mQID]; if ( state.failed == YES ) return ;
-                              /* element() */
-                            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:15:41: ( WS )? // block
-                            NSInteger alt8=2;
-                            NSInteger LA8_0 = [input LA:1];
-
-                            if ( ((LA8_0>='\t' && LA8_0<='\n')||LA8_0==' ') ) {
-                                alt8=1;
-                            }
-                            switch (alt8) {
-                                case 1 : ;
-                                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:15:41: WS // alt
-                                    {
-                                        [self mWS]; if ( state.failed == YES ) return ;
-                                      /* element() */
-                                     /* elements */
-                                    }
-                                    break;
-
-                            }
-                              /* element() */
-                             /* elements */
-                            }
-                            break;
-
-                        default :
-                            goto loop9;
-                    }
-                } while (YES);
-                loop9: ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        [self matchChar:'{']; if ( state.failed == YES ) return ;
-          /* element() */
-        if ( state.backtracking == 1 ) {
-            NSLog(@"found class %@", (name!=nil?[name getText]:0));
-        }  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "CLASS"
-
-// $ANTLR start "METHOD"
-- (void) mMETHOD
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = METHOD;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        id<ANTLRToken> name=nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:5: ( TYPE WS name= ID ( WS )? '(' ( ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* )? ')' ( WS )? ( 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:9: TYPE WS name= ID ( WS )? '(' ( ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* )? ')' ( WS )? ( 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' // alt
-        {
-            [self mTYPE]; if ( state.failed == YES ) return ;
-          /* element() */
-            [self mWS]; if ( state.failed == YES ) return ;
-          /* element() */
-        NSInteger nameStart158 = [self getIndex];
-        [self mID]; if ( state.failed == YES ) return ;
-
-        name = [[ANTLRCommonToken newANTLRCommonToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:nameStart158 Stop:[self getIndex]-1] retain];
-        [name setLine:[self getLine]];  /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:25: ( WS )? // block
-        NSInteger alt11=2;
-        NSInteger LA11_0 = [input LA:1];
-
-        if ( ((LA11_0>='\t' && LA11_0<='\n')||LA11_0==' ') ) {
-            alt11=1;
-        }
-        switch (alt11) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:25: WS // alt
-                {
-                    [self mWS]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        [self matchChar:'(']; if ( state.failed == YES ) return ;
-          /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:33: ( ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* )? // block
-        NSInteger alt16=2;
-        NSInteger LA16_0 = [input LA:1];
-
-        if ( ((LA16_0>='A' && LA16_0<='Z')||LA16_0=='_'||(LA16_0>='a' && LA16_0<='z')) ) {
-            alt16=1;
-        }
-        switch (alt16) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:35: ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* // alt
-                {
-                    [self mARG]; if ( state.failed == YES ) return ;
-                  /* element() */
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:39: ( WS )? // block
-                NSInteger alt12=2;
-                NSInteger LA12_0 = [input LA:1];
-
-                if ( ((LA12_0>='\t' && LA12_0<='\n')||LA12_0==' ') ) {
-                    alt12=1;
-                }
-                switch (alt12) {
-                    case 1 : ;
-                        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:39: WS // alt
-                        {
-                            [self mWS]; if ( state.failed == YES ) return ;
-                          /* element() */
-                         /* elements */
-                        }
-                        break;
-
-                }
-                  /* element() */
-                do {
-                    NSInteger alt15=2;
-                    NSInteger LA15_0 = [input LA:1];
-                    if ( (LA15_0==',') ) {
-                        alt15=1;
-                    }
-
-
-                    switch (alt15) {
-                        case 1 : ;
-                            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:44: ',' ( WS )? ARG ( WS )? // alt
-                            {
-                            [self matchChar:',']; if ( state.failed == YES ) return ;
-                              /* element() */
-                            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:48: ( WS )? // block
-                            NSInteger alt13=2;
-                            NSInteger LA13_0 = [input LA:1];
-
-                            if ( ((LA13_0>='\t' && LA13_0<='\n')||LA13_0==' ') ) {
-                                alt13=1;
-                            }
-                            switch (alt13) {
-                                case 1 : ;
-                                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:48: WS // alt
-                                    {
-                                        [self mWS]; if ( state.failed == YES ) return ;
-                                      /* element() */
-                                     /* elements */
-                                    }
-                                    break;
-
-                            }
-                              /* element() */
-                                [self mARG]; if ( state.failed == YES ) return ;
-                              /* element() */
-                            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:56: ( WS )? // block
-                            NSInteger alt14=2;
-                            NSInteger LA14_0 = [input LA:1];
-
-                            if ( ((LA14_0>='\t' && LA14_0<='\n')||LA14_0==' ') ) {
-                                alt14=1;
-                            }
-                            switch (alt14) {
-                                case 1 : ;
-                                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:56: WS // alt
-                                    {
-                                        [self mWS]; if ( state.failed == YES ) return ;
-                                      /* element() */
-                                     /* elements */
-                                    }
-                                    break;
-
-                            }
-                              /* element() */
-                             /* elements */
-                            }
-                            break;
-
-                        default :
-                            goto loop15;
-                    }
-                } while (YES);
-                loop15: ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        [self matchChar:')']; if ( state.failed == YES ) return ;
-          /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:69: ( WS )? // block
-        NSInteger alt17=2;
-        NSInteger LA17_0 = [input LA:1];
-
-        if ( ((LA17_0>='\t' && LA17_0<='\n')||LA17_0==' ') ) {
-            alt17=1;
-        }
-        switch (alt17) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:20:69: WS // alt
-                {
-                    [self mWS]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:21:8: ( 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? // block
-        NSInteger alt22=2;
-        NSInteger LA22_0 = [input LA:1];
-
-        if ( (LA22_0=='t') ) {
-            alt22=1;
-        }
-        switch (alt22) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:21:9: 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* // alt
-                {
-                [self matchString:@"throws"]; if ( state.failed == YES ) return ;
-                  /* element() */
-                    [self mWS]; if ( state.failed == YES ) return ;
-                  /* element() */
-                    [self mQID]; if ( state.failed == YES ) return ;
-                  /* element() */
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:21:25: ( WS )? // block
-                NSInteger alt18=2;
-                NSInteger LA18_0 = [input LA:1];
-
-                if ( ((LA18_0>='\t' && LA18_0<='\n')||LA18_0==' ') ) {
-                    alt18=1;
-                }
-                switch (alt18) {
-                    case 1 : ;
-                        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:21:25: WS // alt
-                        {
-                            [self mWS]; if ( state.failed == YES ) return ;
-                          /* element() */
-                         /* elements */
-                        }
-                        break;
-
-                }
-                  /* element() */
-                do {
-                    NSInteger alt21=2;
-                    NSInteger LA21_0 = [input LA:1];
-                    if ( (LA21_0==',') ) {
-                        alt21=1;
-                    }
-
-
-                    switch (alt21) {
-                        case 1 : ;
-                            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:21:30: ',' ( WS )? QID ( WS )? // alt
-                            {
-                            [self matchChar:',']; if ( state.failed == YES ) return ;
-                              /* element() */
-                            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:21:34: ( WS )? // block
-                            NSInteger alt19=2;
-                            NSInteger LA19_0 = [input LA:1];
-
-                            if ( ((LA19_0>='\t' && LA19_0<='\n')||LA19_0==' ') ) {
-                                alt19=1;
-                            }
-                            switch (alt19) {
-                                case 1 : ;
-                                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:21:34: WS // alt
-                                    {
-                                        [self mWS]; if ( state.failed == YES ) return ;
-                                      /* element() */
-                                     /* elements */
-                                    }
-                                    break;
-
-                            }
-                              /* element() */
-                                [self mQID]; if ( state.failed == YES ) return ;
-                              /* element() */
-                            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:21:42: ( WS )? // block
-                            NSInteger alt20=2;
-                            NSInteger LA20_0 = [input LA:1];
-
-                            if ( ((LA20_0>='\t' && LA20_0<='\n')||LA20_0==' ') ) {
-                                alt20=1;
-                            }
-                            switch (alt20) {
-                                case 1 : ;
-                                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:21:42: WS // alt
-                                    {
-                                        [self mWS]; if ( state.failed == YES ) return ;
-                                      /* element() */
-                                     /* elements */
-                                    }
-                                    break;
-
-                            }
-                              /* element() */
-                             /* elements */
-                            }
-                            break;
-
-                        default :
-                            goto loop21;
-                    }
-                } while (YES);
-                loop21: ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        [self matchChar:'{']; if ( state.failed == YES ) return ;
-          /* element() */
-        if ( state.backtracking == 1 ) {
-            NSLog(@"found method %@", (name!=nil?[name getText]:0));
-        }  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "METHOD"
-
-// $ANTLR start "FIELD"
-- (void) mFIELD
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = FIELD;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        id<ANTLRToken> name=nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:26:5: ( TYPE WS name= ID ( '[]' )? ( WS )? ( ';' | '=' ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:26:9: TYPE WS name= ID ( '[]' )? ( WS )? ( ';' | '=' ) // alt
-        {
-            [self mTYPE]; if ( state.failed == YES ) return ;
-          /* element() */
-            [self mWS]; if ( state.failed == YES ) return ;
-          /* element() */
-        NSInteger nameStart261 = [self getIndex];
-        [self mID]; if ( state.failed == YES ) return ;
-
-        name = [[ANTLRCommonToken newANTLRCommonToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:nameStart261 Stop:[self getIndex]-1] retain];
-        [name setLine:[self getLine]];  /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:26:25: ( '[]' )? // block
-        NSInteger alt23=2;
-        NSInteger LA23_0 = [input LA:1];
-
-        if ( (LA23_0=='[') ) {
-            alt23=1;
-        }
-        switch (alt23) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:26:25: '[]' // alt
-                {
-                [self matchString:@"[]"]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:26:31: ( WS )? // block
-        NSInteger alt24=2;
-        NSInteger LA24_0 = [input LA:1];
-
-        if ( ((LA24_0>='\t' && LA24_0<='\n')||LA24_0==' ') ) {
-            alt24=1;
-        }
-        switch (alt24) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:26:31: WS // alt
-                {
-                    [self mWS]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        if ([input LA:1] == ';'||[input LA:1] == '=') {
-            [input consume];
-        state.failed = NO;
-
-        } else {
-            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-            [self recover:mse];
-            @throw mse;}
-          /* element() */
-        if ( state.backtracking == 1 ) {
-            NSLog(@"found var %@", (name!=nil?[name getText]:0));
-        }  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "FIELD"
-
-// $ANTLR start "STAT"
-- (void) mSTAT
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = STAT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:30:5: ( ( 'if' | 'while' | 'switch' | 'for' ) ( WS )? '(' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:30:7: ( 'if' | 'while' | 'switch' | 'for' ) ( WS )? '(' // alt
-        {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:30:7: ( 'if' | 'while' | 'switch' | 'for' ) // block
-        NSInteger alt25=4;
-        switch ([input LA:1]) {
-            case 'i': ;
-                {
-                alt25=1;
-                }
-                break;
-            case 'w': ;
-                {
-                alt25=2;
-                }
-                break;
-            case 's': ;
-                {
-                alt25=3;
-                }
-                break;
-            case 'f': ;
-                {
-                alt25=4;
-                }
-                break;
-
-        default: ;
-            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:25 state:0 stream:input];
-            @throw nvae;
-        }
-
-        switch (alt25) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:30:8: 'if' // alt
-                {
-                [self matchString:@"if"]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-            case 2 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:30:13: 'while' // alt
-                {
-                [self matchString:@"while"]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-            case 3 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:30:21: 'switch' // alt
-                {
-                [self matchString:@"switch"]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-            case 4 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:30:30: 'for' // alt
-                {
-                [self matchString:@"for"]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:30:37: ( WS )? // block
-        NSInteger alt26=2;
-        NSInteger LA26_0 = [input LA:1];
-
-        if ( ((LA26_0>='\t' && LA26_0<='\n')||LA26_0==' ') ) {
-            alt26=1;
-        }
-        switch (alt26) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:30:37: WS // alt
-                {
-                    [self mWS]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        [self matchChar:'(']; if ( state.failed == YES ) return ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "STAT"
-
-// $ANTLR start "CALL"
-- (void) mCALL
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = CALL;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        id<ANTLRToken> name=nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:33:5: (name= QID ( WS )? '(' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:33:9: name= QID ( WS )? '(' // alt
-        {
-        NSInteger nameStart326 = [self getIndex];
-        [self mQID]; if ( state.failed == YES ) return ;
-
-        name = [[ANTLRCommonToken newANTLRCommonToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:nameStart326 Stop:[self getIndex]-1] retain];
-        [name setLine:[self getLine]];  /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:33:18: ( WS )? // block
-        NSInteger alt27=2;
-        NSInteger LA27_0 = [input LA:1];
-
-        if ( ((LA27_0>='\t' && LA27_0<='\n')||LA27_0==' ') ) {
-            alt27=1;
-        }
-        switch (alt27) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:33:18: WS // alt
-                {
-                    [self mWS]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        [self matchChar:'(']; if ( state.failed == YES ) return ;
-          /* element() */
-        if ( state.backtracking == 1 ) {
-            /*ignore if this/super */ NSLog(@"found call %@",(name!=nil?[name getText]:0));
-        }  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "CALL"
-
-// $ANTLR start "COMMENT"
-- (void) mCOMMENT
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = COMMENT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:38:5: ( '/*' ( options {greedy=false; } : . )* '*/' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:38:9: '/*' ( options {greedy=false; } : . )* '*/' // alt
-        {
-        [self matchString:@"/*"]; if ( state.failed == YES ) return ;
-          /* element() */
-        do {
-            NSInteger alt28=2;
-            NSInteger LA28_0 = [input LA:1];
-            if ( (LA28_0=='*') ) {
-                NSInteger LA28_1 = [input LA:2];
-                if ( (LA28_1=='/') ) {
-                    alt28=2;
-                }
-                else if ( ((LA28_1>=0x0000 && LA28_1<='.')||(LA28_1>='0' && LA28_1<=0xFFFF)) ) {
-                    alt28=1;
-                }
-
-
-            }
-            else if ( ((LA28_0>=0x0000 && LA28_0<=')')||(LA28_0>='+' && LA28_0<=0xFFFF)) ) {
-                alt28=1;
-            }
-
-
-            switch (alt28) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:38:41: . // alt
-                    {
-                    [self matchAny]; if ( state.failed == YES ) return ;
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop28;
-            }
-        } while (YES);
-        loop28: ;
-          /* element() */
-        [self matchString:@"*/"]; if ( state.failed == YES ) return ;
-          /* element() */
-        if ( state.backtracking == 1 ) {
-            NSLog(@"found comment %@", [self getText]);
-        }  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "COMMENT"
-
-// $ANTLR start "SL_COMMENT"
-- (void) mSL_COMMENT
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = SL_COMMENT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:43:5: ( '//' ( options {greedy=false; } : . )* '\\n' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:43:9: '//' ( options {greedy=false; } : . )* '\\n' // alt
-        {
-        [self matchString:@"//"]; if ( state.failed == YES ) return ;
-          /* element() */
-        do {
-            NSInteger alt29=2;
-            NSInteger LA29_0 = [input LA:1];
-            if ( (LA29_0=='\n') ) {
-                alt29=2;
-            }
-            else if ( ((LA29_0>=0x0000 && LA29_0<='\t')||(LA29_0>=0x000B && LA29_0<=0xFFFF)) ) {
-                alt29=1;
-            }
-
-
-            switch (alt29) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:43:41: . // alt
-                    {
-                    [self matchAny]; if ( state.failed == YES ) return ;
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop29;
-            }
-        } while (YES);
-        loop29: ;
-          /* element() */
-        [self matchChar:'\n']; if ( state.failed == YES ) return ;
-          /* element() */
-        if ( state.backtracking == 1 ) {
-            NSLog(@"found // comment %@", [self getText]);
-        }  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "SL_COMMENT"
-
-// $ANTLR start "STRING"
-- (void) mSTRING
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = STRING;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:48:2: ( '\"' ( options {greedy=false; } : ESC | . )* '\"' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:48:4: '\"' ( options {greedy=false; } : ESC | . )* '\"' // alt
-        {
-        [self matchChar:'"']; if ( state.failed == YES ) return ;
-          /* element() */
-        do {
-            NSInteger alt30=3;
-            NSInteger LA30_0 = [input LA:1];
-            if ( (LA30_0=='"') ) {
-                alt30=3;
-            }
-            else if ( (LA30_0=='\\') ) {
-                NSInteger LA30_2 = [input LA:2];
-                if ( (LA30_2=='"') ) {
-                    alt30=1;
-                }
-                else if ( (LA30_2=='\\') ) {
-                    alt30=1;
-                }
-                else if ( (LA30_2=='\'') ) {
-                    alt30=1;
-                }
-                else if ( ((LA30_2>=0x0000 && LA30_2<='!')||(LA30_2>='#' && LA30_2<='&')||(LA30_2>='(' && LA30_2<='[')||(LA30_2>=']' && LA30_2<=0xFFFF)) ) {
-                    alt30=2;
-                }
-
-
-            }
-            else if ( ((LA30_0>=0x0000 && LA30_0<='!')||(LA30_0>='#' && LA30_0<='[')||(LA30_0>=']' && LA30_0<=0xFFFF)) ) {
-                alt30=2;
-            }
-
-
-            switch (alt30) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:48:34: ESC // alt
-                    {
-                        [self mESC]; if ( state.failed == YES ) return ;
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-                case 2 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:48:40: . // alt
-                    {
-                    [self matchAny]; if ( state.failed == YES ) return ;
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop30;
-            }
-        } while (YES);
-        loop30: ;
-          /* element() */
-        [self matchChar:'"']; if ( state.failed == YES ) return ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "STRING"
-
-// $ANTLR start "CHAR"
-- (void) mCHAR
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = CHAR;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:52:2: ( '\\'' ( options {greedy=false; } : ESC | . )* '\\'' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:52:4: '\\'' ( options {greedy=false; } : ESC | . )* '\\'' // alt
-        {
-        [self matchChar:'\'']; if ( state.failed == YES ) return ;
-          /* element() */
-        do {
-            NSInteger alt31=3;
-            NSInteger LA31_0 = [input LA:1];
-            if ( (LA31_0=='\'') ) {
-                alt31=3;
-            }
-            else if ( (LA31_0=='\\') ) {
-                NSInteger LA31_2 = [input LA:2];
-                if ( (LA31_2=='\'') ) {
-                    alt31=1;
-                }
-                else if ( (LA31_2=='\\') ) {
-                    alt31=1;
-                }
-                else if ( (LA31_2=='"') ) {
-                    alt31=1;
-                }
-                else if ( ((LA31_2>=0x0000 && LA31_2<='!')||(LA31_2>='#' && LA31_2<='&')||(LA31_2>='(' && LA31_2<='[')||(LA31_2>=']' && LA31_2<=0xFFFF)) ) {
-                    alt31=2;
-                }
-
-
-            }
-            else if ( ((LA31_0>=0x0000 && LA31_0<='&')||(LA31_0>='(' && LA31_0<='[')||(LA31_0>=']' && LA31_0<=0xFFFF)) ) {
-                alt31=2;
-            }
-
-
-            switch (alt31) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:52:35: ESC // alt
-                    {
-                        [self mESC]; if ( state.failed == YES ) return ;
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-                case 2 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:52:41: . // alt
-                    {
-                    [self matchAny]; if ( state.failed == YES ) return ;
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop31;
-            }
-        } while (YES);
-        loop31: ;
-          /* element() */
-        [self matchChar:'\'']; if ( state.failed == YES ) return ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "CHAR"
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:55:5: ( ( ' ' | '\\t' | '\\n' )+ ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:55:9: ( ' ' | '\\t' | '\\n' )+ // alt
-        {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:55:9: ( ' ' | '\\t' | '\\n' )+ // positiveClosureBlock
-        NSInteger cnt32=0;
-        do {
-            NSInteger alt32=2;
-            NSInteger LA32_0 = [input LA:1];
-            if ( ((LA32_0>='\t' && LA32_0<='\n')||LA32_0==' ') ) {
-                alt32=1;
-            }
-
-
-            switch (alt32) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g: // alt
-                    {
-                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == ' ') {
-                        [input consume];
-                    state.failed = NO;
-
-                    } else {
-                        if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;}
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt32 >= 1 )
-                        goto loop32;
-                    if ( state.backtracking > 0 ) { state.failed = YES; return ; }            ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:32];
-                    @throw eee;
-            }
-            cnt32++;
-        } while (YES);
-        loop32: ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "WS"
-
-// $ANTLR start "QID"
-- (void) mQID
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:59:5: ( ID ( '.' ID )* ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:59:7: ID ( '.' ID )* // alt
-        {
-            [self mID]; if ( state.failed == YES ) return ;
-          /* element() */
-        do {
-            NSInteger alt33=2;
-            NSInteger LA33_0 = [input LA:1];
-            if ( (LA33_0=='.') ) {
-                alt33=1;
-            }
-
-
-            switch (alt33) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:59:11: '.' ID // alt
-                    {
-                    [self matchChar:'.']; if ( state.failed == YES ) return ;
-                      /* element() */
-                        [self mID]; if ( state.failed == YES ) return ;
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop33;
-            }
-        } while (YES);
-        loop33: ;
-          /* element() */
-         /* elements */
-        }
-
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "QID"
-
-// $ANTLR start "QIDStar"
-- (void) mQIDStar
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:68:2: ( ID ( '.' ID )* ( '.*' )? ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:68:4: ID ( '.' ID )* ( '.*' )? // alt
-        {
-            [self mID]; if ( state.failed == YES ) return ;
-          /* element() */
-        do {
-            NSInteger alt34=2;
-            NSInteger LA34_0 = [input LA:1];
-            if ( (LA34_0=='.') ) {
-                NSInteger LA34_1 = [input LA:2];
-                if ( ((LA34_1>='A' && LA34_1<='Z')||LA34_1=='_'||(LA34_1>='a' && LA34_1<='z')) ) {
-                    alt34=1;
-                }
-
-
-            }
-
-
-            switch (alt34) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:68:8: '.' ID // alt
-                    {
-                    [self matchChar:'.']; if ( state.failed == YES ) return ;
-                      /* element() */
-                        [self mID]; if ( state.failed == YES ) return ;
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop34;
-            }
-        } while (YES);
-        loop34: ;
-          /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:68:17: ( '.*' )? // block
-        NSInteger alt35=2;
-        NSInteger LA35_0 = [input LA:1];
-
-        if ( (LA35_0=='.') ) {
-            alt35=1;
-        }
-        switch (alt35) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:68:17: '.*' // alt
-                {
-                [self matchString:@".*"]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-         /* elements */
-        }
-
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "QIDStar"
-
-// $ANTLR start "TYPE"
-- (void) mTYPE
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:72:5: ( QID ( '[]' )? ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:72:9: QID ( '[]' )? // alt
-        {
-            [self mQID]; if ( state.failed == YES ) return ;
-          /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:72:13: ( '[]' )? // block
-        NSInteger alt36=2;
-        NSInteger LA36_0 = [input LA:1];
-
-        if ( (LA36_0=='[') ) {
-            alt36=1;
-        }
-        switch (alt36) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:72:13: '[]' // alt
-                {
-                [self matchString:@"[]"]; if ( state.failed == YES ) return ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-         /* elements */
-        }
-
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "TYPE"
-
-// $ANTLR start "ARG"
-- (void) mARG
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:76:5: ( TYPE WS ID ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:76:9: TYPE WS ID // alt
-        {
-            [self mTYPE]; if ( state.failed == YES ) return ;
-          /* element() */
-            [self mWS]; if ( state.failed == YES ) return ;
-          /* element() */
-            [self mID]; if ( state.failed == YES ) return ;
-          /* element() */
-         /* elements */
-        }
-
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "ARG"
-
-// $ANTLR start "ID"
-- (void) mID
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:80:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )* ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:80:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )* // alt
-        {
-        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-            [input consume];
-        state.failed = NO;
-
-        } else {
-            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-            [self recover:mse];
-            @throw mse;}
-          /* element() */
-        do {
-            NSInteger alt37=2;
-            NSInteger LA37_0 = [input LA:1];
-            if ( ((LA37_0>='0' && LA37_0<='9')||(LA37_0>='A' && LA37_0<='Z')||LA37_0=='_'||(LA37_0>='a' && LA37_0<='z')) ) {
-                alt37=1;
-            }
-
-
-            switch (alt37) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-                        [input consume];
-                    state.failed = NO;
-
-                    } else {
-                        if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;}
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop37;
-            }
-        } while (YES);
-        loop37: ;
-          /* element() */
-         /* elements */
-        }
-
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "ID"
-
-// $ANTLR start "ESC"
-- (void) mESC
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:84:5: ( '\\\\' ( '\"' | '\\'' | '\\\\' ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:84:7: '\\\\' ( '\"' | '\\'' | '\\\\' ) // alt
-        {
-        [self matchChar:'\\']; if ( state.failed == YES ) return ;
-          /* element() */
-        if ([input LA:1] == '"'||[input LA:1] == '\''||[input LA:1] == '\\') {
-            [input consume];
-        state.failed = NO;
-
-        } else {
-            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-            [self recover:mse];
-            @throw mse;}
-          /* element() */
-         /* elements */
-        }
-
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "ESC"
-
-- (void) mTokens
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:39: ( IMPORT | RETURN | CLASS | METHOD | FIELD | STAT | CALL | COMMENT | SL_COMMENT | STRING | CHAR | WS ) //ruleblock
-    NSInteger alt38=12;
-    alt38 = [dfa38 predict:input];
-    switch (alt38) {
-        case 1 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:41: IMPORT // alt
-            {
-                [self mIMPORT]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 2 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:48: RETURN // alt
-            {
-                [self mRETURN]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 3 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:55: CLASS // alt
-            {
-                [self mCLASS]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 4 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:61: METHOD // alt
-            {
-                [self mMETHOD]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 5 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:68: FIELD // alt
-            {
-                [self mFIELD]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 6 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:74: STAT // alt
-            {
-                [self mSTAT]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 7 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:79: CALL // alt
-            {
-                [self mCALL]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 8 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:84: COMMENT // alt
-            {
-                [self mCOMMENT]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 9 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:92: SL_COMMENT // alt
-            {
-                [self mSL_COMMENT]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 10 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:103: STRING // alt
-            {
-                [self mSTRING]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 11 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:110: CHAR // alt
-            {
-                [self mCHAR]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 12 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:115: WS // alt
-            {
-                [self mWS]; if ( state.failed == YES ) return ;
-              /* element() */
-             /* elements */
-            }
-            break;
-
-    }
-
-}
-
-// $ANTLR start synpred1_Fuzzy_fragment
-- (void) synpred1_Fuzzy_fragment
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:41: ( IMPORT ) // ruleBlockSingleAlt
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:41: IMPORT // alt
-    {
-        [self mIMPORT]; if ( state.failed == YES ) return ;
-      /* element() */
-     /* elements */
-    }
-} // $ANTLR end synpred1_Fuzzy_fragment
-
-// $ANTLR start synpred2_Fuzzy_fragment
-- (void) synpred2_Fuzzy_fragment
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:48: ( RETURN ) // ruleBlockSingleAlt
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:48: RETURN // alt
-    {
-        [self mRETURN]; if ( state.failed == YES ) return ;
-      /* element() */
-     /* elements */
-    }
-} // $ANTLR end synpred2_Fuzzy_fragment
-
-// $ANTLR start synpred3_Fuzzy_fragment
-- (void) synpred3_Fuzzy_fragment
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:55: ( CLASS ) // ruleBlockSingleAlt
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:55: CLASS // alt
-    {
-        [self mCLASS]; if ( state.failed == YES ) return ;
-      /* element() */
-     /* elements */
-    }
-} // $ANTLR end synpred3_Fuzzy_fragment
-
-// $ANTLR start synpred4_Fuzzy_fragment
-- (void) synpred4_Fuzzy_fragment
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:61: ( METHOD ) // ruleBlockSingleAlt
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:61: METHOD // alt
-    {
-        [self mMETHOD]; if ( state.failed == YES ) return ;
-      /* element() */
-     /* elements */
-    }
-} // $ANTLR end synpred4_Fuzzy_fragment
-
-// $ANTLR start synpred5_Fuzzy_fragment
-- (void) synpred5_Fuzzy_fragment
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:68: ( FIELD ) // ruleBlockSingleAlt
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:68: FIELD // alt
-    {
-        [self mFIELD]; if ( state.failed == YES ) return ;
-      /* element() */
-     /* elements */
-    }
-} // $ANTLR end synpred5_Fuzzy_fragment
-
-// $ANTLR start synpred6_Fuzzy_fragment
-- (void) synpred6_Fuzzy_fragment
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:74: ( STAT ) // ruleBlockSingleAlt
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:74: STAT // alt
-    {
-        [self mSTAT]; if ( state.failed == YES ) return ;
-      /* element() */
-     /* elements */
-    }
-} // $ANTLR end synpred6_Fuzzy_fragment
-
-// $ANTLR start synpred7_Fuzzy_fragment
-- (void) synpred7_Fuzzy_fragment
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:79: ( CALL ) // ruleBlockSingleAlt
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:79: CALL // alt
-    {
-        [self mCALL]; if ( state.failed == YES ) return ;
-      /* element() */
-     /* elements */
-    }
-} // $ANTLR end synpred7_Fuzzy_fragment
-
-// $ANTLR start synpred8_Fuzzy_fragment
-- (void) synpred8_Fuzzy_fragment
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:84: ( COMMENT ) // ruleBlockSingleAlt
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:84: COMMENT // alt
-    {
-        [self mCOMMENT]; if ( state.failed == YES ) return ;
-      /* element() */
-     /* elements */
-    }
-} // $ANTLR end synpred8_Fuzzy_fragment
-
-// $ANTLR start synpred9_Fuzzy_fragment
-- (void) synpred9_Fuzzy_fragment
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:92: ( SL_COMMENT ) // ruleBlockSingleAlt
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g:1:92: SL_COMMENT // alt
-    {
-        [self mSL_COMMENT]; if ( state.failed == YES ) return ;
-      /* element() */
-     /* elements */
-    }
-} // $ANTLR end synpred9_Fuzzy_fragment
-
-@end // end of Fuzzy implementation // line 397
-
-/* End of code
- * =============================================================================
- */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/output1/Fuzzy.tokens b/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/output1/Fuzzy.tokens
deleted file mode 100644
index 3a7034c..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/output1/Fuzzy.tokens
+++ /dev/null
@@ -1,18 +0,0 @@
-STAT=15
-CLASS=10
-ESC=19
-CHAR=21
-ID=8
-QID=9
-TYPE=11
-IMPORT=6
-WS=4
-ARG=12
-QIDStar=5
-SL_COMMENT=18
-RETURN=7
-FIELD=14
-CALL=16
-COMMENT=17
-METHOD=13
-STRING=20
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/output1/FuzzyLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/output1/FuzzyLexer.h
deleted file mode 100644
index 9a4b194..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/output1/FuzzyLexer.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// $ANTLR 3.2 Aug 20, 2010 13:39:32 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/fuzzy/Fuzzy.g 2010-08-20 13:40:15
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-#pragma mark Cyclic DFA interface start DFA38
-@interface DFA38 : ANTLRDFA {
-}
-+ newDFA38WithRecognizer:(ANTLRBaseRecognizer *)theRecognizer;
-- initWithRecognizer:(ANTLRBaseRecognizer *)recognizer;
-@end
-
-#pragma mark Cyclic DFA interface end DFA38
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#define STAT 15
-#define CLASS 10
-#define ESC 19
-#define CHAR 21
-#define ID 8
-#define EOF -1
-#define QID 9
-#define TYPE 11
-#define IMPORT 6
-#define WS 4
-#define ARG 12
-#define QIDStar 5
-#define SL_COMMENT 18
-#define RETURN 7
-#define FIELD 14
-#define CALL 16
-#define COMMENT 17
-#define METHOD 13
-#define STRING 20
-@interface Fuzzy : ANTLRLexer { // line 283
-    DFA38 *dfa38;
-    SEL synpred9_FuzzySelector;
-    SEL synpred2_FuzzySelector;
-    SEL synpred7_FuzzySelector;
-    SEL synpred4_FuzzySelector;
-    SEL synpred8_FuzzySelector;
-    SEL synpred6_FuzzySelector;
-    SEL synpred5_FuzzySelector;
-    SEL synpred3_FuzzySelector;
-    SEL synpred1_FuzzySelector;
-}
-+ (Fuzzy *)newFuzzy:(id<ANTLRCharStream>)anInput;
-
-- (void)mIMPORT; 
-- (void)mRETURN; 
-- (void)mCLASS; 
-- (void)mMETHOD; 
-- (void)mFIELD; 
-- (void)mSTAT; 
-- (void)mCALL; 
-- (void)mCOMMENT; 
-- (void)mSL_COMMENT; 
-- (void)mSTRING; 
-- (void)mCHAR; 
-- (void)mWS; 
-- (void)mQID; 
-- (void)mQIDStar; 
-- (void)mTYPE; 
-- (void)mARG; 
-- (void)mID; 
-- (void)mESC; 
-- (void)mTokens; 
-- (void)synpred1_Fuzzy_fragment; 
-- (void)synpred2_Fuzzy_fragment; 
-- (void)synpred3_Fuzzy_fragment; 
-- (void)synpred4_Fuzzy_fragment; 
-- (void)synpred5_Fuzzy_fragment; 
-- (void)synpred6_Fuzzy_fragment; 
-- (void)synpred7_Fuzzy_fragment; 
-- (void)synpred8_Fuzzy_fragment; 
-- (void)synpred9_Fuzzy_fragment; 
-
-@end // end of Fuzzy interface
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.h
deleted file mode 100644
index 9f5067d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} T.g 2011-05-06 19:14:23
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define T__7 7
-#define ID 4
-#define INT 5
-#define WS 6
-/* interface lexer class */
-@interface TLexer : ANTLRLexer { // line 283
-/* ObjC start of actions.lexer.memVars */
-/* ObjC end of actions.lexer.memVars */
-}
-+ (void) initialize;
-+ (TLexer *)newTLexerWithCharStream:(id<ANTLRCharStream>)anInput;
-/* ObjC start actions.lexer.methodsDecl */
-/* ObjC end actions.lexer.methodsDecl */
-- (void) mT__7 ; 
-- (void) mID ; 
-- (void) mINT ; 
-- (void) mWS ; 
-- (void) mTokens ; 
-
-@end /* end of TLexer interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.m
deleted file mode 100644
index b55e539..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.m
+++ /dev/null
@@ -1,472 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : T.g
- *     -                            On : 2011-05-06 19:14:23
- *     -                 for the lexer : TLexerLexer
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} T.g 2011-05-06 19:14:23
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "TLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-/** As per Terence: No returns for lexer rules! */
-@implementation TLexer // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"T.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (TLexer *)newTLexerWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    return [[TLexer alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    self = [super initWithCharStream:anInput State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:5+1] retain]];
-    if ( self != nil ) {
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-/* ObjC Start of actions.lexer.methods */
-/* ObjC end of actions.lexer.methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-
-/* Start of Rules */
-// $ANTLR start "T__7"
-- (void) mT__7
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__7;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // T.g:7:6: ( 'enum' ) // ruleBlockSingleAlt
-        // T.g:7:8: 'enum' // alt
-        {
-        [self matchString:@"enum"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__7" */
-
-// $ANTLR start "ID"
-- (void) mID
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = ID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // T.g:37:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* ) // ruleBlockSingleAlt
-        // T.g:37:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* // alt
-        {
-        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-            [input consume];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            [self recover:mse];
-            @throw mse;
-        }
-
-
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0 >= '0' && LA1_0 <= '9')||(LA1_0 >= 'A' && LA1_0 <= 'Z')||LA1_0=='_'||(LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // T.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop1;
-            }
-        } while (YES);
-        loop1: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "ID" */
-
-// $ANTLR start "INT"
-- (void) mINT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = INT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // T.g:40:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
-        // T.g:40:7: ( '0' .. '9' )+ // alt
-        {
-        // T.g:40:7: ( '0' .. '9' )+ // positiveClosureBlock
-        NSInteger cnt2 = 0;
-        do {
-            NSInteger alt2 = 2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // T.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt2 >= 1 )
-                        goto loop2;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:2];
-                    @throw eee;
-            }
-            cnt2++;
-        } while (YES);
-        loop2: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "INT" */
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // T.g:43:5: ( ( ' ' | '\\t' | '\\r' | '\\n' )+ ) // ruleBlockSingleAlt
-        // T.g:43:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // alt
-        {
-        // T.g:43:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // positiveClosureBlock
-        NSInteger cnt3 = 0;
-        do {
-            NSInteger alt3 = 2;
-            NSInteger LA3_0 = [input LA:1];
-            if ( ((LA3_0 >= '\t' && LA3_0 <= '\n')||LA3_0=='\r'||LA3_0==' ') ) {
-                alt3=1;
-            }
-
-
-            switch (alt3) {
-                case 1 : ;
-                    // T.g: // alt
-                    {
-                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == '\r'||[input LA:1] == ' ') {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt3 >= 1 )
-                        goto loop3;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:3];
-                    @throw eee;
-            }
-            cnt3++;
-        } while (YES);
-        loop3: ;
-
-
-         _channel=99; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "WS" */
-
-- (void) mTokens
-{
-    // T.g:1:8: ( T__7 | ID | INT | WS ) //ruleblock
-    NSInteger alt4=4;
-    unichar charLA4 = [input LA:1];
-    switch (charLA4) {
-        case 'e': ;
-            {
-            NSInteger LA4_1 = [input LA:2];
-
-            if ( (LA4_1=='n') ) {
-                NSInteger LA4_5 = [input LA:3];
-
-                if ( (LA4_5=='u') ) {
-                    NSInteger LA4_6 = [input LA:4];
-
-                    if ( (LA4_6=='m') ) {
-                        NSInteger LA4_7 = [input LA:5];
-
-                        if ( ((LA4_7 >= '0' && LA4_7 <= '9')||(LA4_7 >= 'A' && LA4_7 <= 'Z')||LA4_7=='_'||(LA4_7 >= 'a' && LA4_7 <= 'z')) ) {
-                            alt4=2;
-                        }
-                        else {
-                            alt4 = 1;
-                        }
-                    }
-                    else {
-                        alt4 = 2;
-                    }
-                }
-                else {
-                    alt4 = 2;
-                }
-            }
-            else {
-                alt4 = 2;
-            }
-            }
-            break;
-        case 'A': ;
-        case 'B': ;
-        case 'C': ;
-        case 'D': ;
-        case 'E': ;
-        case 'F': ;
-        case 'G': ;
-        case 'H': ;
-        case 'I': ;
-        case 'J': ;
-        case 'K': ;
-        case 'L': ;
-        case 'M': ;
-        case 'N': ;
-        case 'O': ;
-        case 'P': ;
-        case 'Q': ;
-        case 'R': ;
-        case 'S': ;
-        case 'T': ;
-        case 'U': ;
-        case 'V': ;
-        case 'W': ;
-        case 'X': ;
-        case 'Y': ;
-        case 'Z': ;
-        case '_': ;
-        case 'a': ;
-        case 'b': ;
-        case 'c': ;
-        case 'd': ;
-        case 'f': ;
-        case 'g': ;
-        case 'h': ;
-        case 'i': ;
-        case 'j': ;
-        case 'k': ;
-        case 'l': ;
-        case 'm': ;
-        case 'n': ;
-        case 'o': ;
-        case 'p': ;
-        case 'q': ;
-        case 'r': ;
-        case 's': ;
-        case 't': ;
-        case 'u': ;
-        case 'v': ;
-        case 'w': ;
-        case 'x': ;
-        case 'y': ;
-        case 'z': ;
-            {
-            alt4=2;
-            }
-            break;
-        case '0': ;
-        case '1': ;
-        case '2': ;
-        case '3': ;
-        case '4': ;
-        case '5': ;
-        case '6': ;
-        case '7': ;
-        case '8': ;
-        case '9': ;
-            {
-            alt4=3;
-            }
-            break;
-        case '\t': ;
-        case '\n': ;
-        case '\r': ;
-        case ' ': ;
-            {
-            alt4=4;
-            }
-            break;
-
-    default: ;
-        ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:4 state:0 stream:input];
-        nvae.c = charLA4;
-        @throw nvae;
-
-    }
-
-    switch (alt4) {
-        case 1 : ;
-            // T.g:1:10: T__7 // alt
-            {
-            [self mT__7]; 
-
-
-
-            }
-            break;
-        case 2 : ;
-            // T.g:1:15: ID // alt
-            {
-            [self mID]; 
-
-
-
-            }
-            break;
-        case 3 : ;
-            // T.g:1:18: INT // alt
-            {
-            [self mINT]; 
-
-
-
-            }
-            break;
-        case 4 : ;
-            // T.g:1:22: WS // alt
-            {
-            [self mWS]; 
-
-
-
-            }
-            break;
-
-    }
-
-}
-
-@end /* end of TLexer implementation line 397 */
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.h b/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.h
deleted file mode 100644
index 898d0de..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} T.g 2011-05-06 19:14:23
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* parserHeaderFile */
-#ifndef ANTLR3TokenTypeAlreadyDefined
-#define ANTLR3TokenTypeAlreadyDefined
-typedef enum {
-    ANTLR_EOF = -1,
-    INVALID,
-    EOR,
-    DOWN,
-    UP,
-    MIN
-} ANTLR3TokenType;
-#endif
-
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define T__7 7
-#define ID 4
-#define INT 5
-#define WS 6
-#pragma mark Dynamic Global Scopes
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-
-/* Interface grammar class */
-@interface TParser : ANTLRParser { /* line 572 */
-/* ObjC start of ruleAttributeScopeMemVar */
-
-
-/* ObjC end of ruleAttributeScopeMemVar */
-/* ObjC start of globalAttributeScopeMemVar */
-
-
-/* ObjC end of globalAttributeScopeMemVar */
-/* ObjC start of actions.(actionScope).memVars */
-
-/* With this true, enum is seen as a keyword.  False, it's an identifier */
-BOOL enableEnum;
-
-/* ObjC end of actions.(actionScope).memVars */
-/* ObjC start of memVars */
-/* ObjC end of memVars */
-
- }
-
-/* ObjC start of actions.(actionScope).properties */
-/* ObjC end of actions.(actionScope).properties */
-/* ObjC start of properties */
-/* ObjC end of properties */
-
-+ (void) initialize;
-+ (id) newTParser:(id<ANTLRTokenStream>)aStream;
-/* ObjC start of actions.(actionScope).methodsDecl */
-/* ObjC end of actions.(actionScope).methodsDecl */
-
-/* ObjC start of methodsDecl */
-/* ObjC end of methodsDecl */
-
-- (void)stat; 
-- (void)identifier; 
-- (void)enumAsKeyword; 
-- (void)enumAsID; 
-
-
-@end /* end of TParser interface */
-
-/** Demonstrates how semantic predicates get hoisted out of the rule in 
- *  which they are found and used in other decisions.  This grammar illustrates
- *  how predicates can be used to distinguish between enum as a keyword and
- *  an ID *dynamically*. :)
-
- * Run "java org.antlr.Tool -dfa t.g" to generate DOT (graphviz) files.  See
- * the T_dec-1.dot file to see the predicates in action.
- */
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.m b/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.m
deleted file mode 100644
index e440290..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.m
+++ /dev/null
@@ -1,354 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : T.g
- *     -                            On : 2011-05-06 19:14:23
- *     -                for the parser : TParserParser
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} T.g 2011-05-06 19:14:23
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "TParser.h"
-/* ----------------------------------------- */
-
-/** Demonstrates how semantic predicates get hoisted out of the rule in 
- *  which they are found and used in other decisions.  This grammar illustrates
- *  how predicates can be used to distinguish between enum as a keyword and
- *  an ID *dynamically*. :)
-
- * Run "java org.antlr.Tool -dfa t.g" to generate DOT (graphviz) files.  See
- * the T_dec-1.dot file to see the predicates in action.
- */
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_identifier_in_stat34;
-static const unsigned long long FOLLOW_identifier_in_stat34_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_enumAsKeyword_in_stat47;
-static const unsigned long long FOLLOW_enumAsKeyword_in_stat47_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_ID_in_identifier66;
-static const unsigned long long FOLLOW_ID_in_identifier66_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_enumAsID_in_identifier74;
-static const unsigned long long FOLLOW_enumAsID_in_identifier74_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_7_in_enumAsKeyword89;
-static const unsigned long long FOLLOW_7_in_enumAsKeyword89_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_7_in_enumAsID100;
-static const unsigned long long FOLLOW_7_in_enumAsID100_data[] = { 0x0000000000000002LL};
-
-
-#pragma mark Dynamic Global Scopes
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule Return Scopes start
-//#pragma mark Rule return scopes start
-//
-
-#pragma mark Rule return scopes start
-
-@implementation TParser  // line 637
-
-/* ObjC start of ruleAttributeScope */
-#pragma mark Dynamic Rule Scopes
-/* ObjC end of ruleAttributeScope */
-#pragma mark global Attribute Scopes
-/* ObjC start globalAttributeScope */
-/* ObjC end globalAttributeScope */
-/* ObjC start actions.(actionScope).synthesize */
-/* ObjC end actions.(actionScope).synthesize */
-/* ObjC start synthesize() */
-/* ObjC end synthesize() */
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    FOLLOW_identifier_in_stat34 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_identifier_in_stat34_data Count:(NSUInteger)1] retain];
-    FOLLOW_enumAsKeyword_in_stat47 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_enumAsKeyword_in_stat47_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_identifier66 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_identifier66_data Count:(NSUInteger)1] retain];
-    FOLLOW_enumAsID_in_identifier74 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_enumAsID_in_identifier74_data Count:(NSUInteger)1] retain];
-    FOLLOW_7_in_enumAsKeyword89 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_enumAsKeyword89_data Count:(NSUInteger)1] retain];
-    FOLLOW_7_in_enumAsID100 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_enumAsID100_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"ID", @"INT", @"WS", @"'enum'", nil] retain]];
-    [ANTLRBaseRecognizer setGrammarFileName:@"T.g"];
-}
-
-+ (TParser *)newTParser:(id<ANTLRTokenStream>)aStream
-{
-    return [[TParser alloc] initWithTokenStream:aStream];
-
-
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)aStream
-{
-    self = [super initWithTokenStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:4+1] retain]];
-    if ( self != nil ) {
-
-
-        /* start of actions-actionScope-init */
-
-        enableEnum = NO;
-
-        /* start of init */
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-/* ObjC start members */
-/* ObjC end members */
-/* ObjC start actions.(actionScope).methods */
-/* ObjC end actions.(actionScope).methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-/* ObjC start rules */
-/*
- * $ANTLR start stat
- * T.g:24:1: stat : ( identifier | enumAsKeyword );
- */
-- (void) stat
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // T.g:24:5: ( identifier | enumAsKeyword ) //ruleblock
-        NSInteger alt1=2;
-        NSInteger LA1_0 = [input LA:1];
-
-        if ( (LA1_0==ID) ) {
-            alt1=1;
-        }
-        else if ( (LA1_0==7) ) {
-            NSInteger LA1_2 = [input LA:2];
-
-            if ( ((!enableEnum)) ) {
-                alt1=1;
-            }
-            else if ( ((enableEnum)) ) {
-                alt1=2;
-            }
-            else {
-                ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:1 state:2 stream:input];
-                nvae.c = LA1_2;
-                @throw nvae;
-
-            }
-        }
-        else {
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:1 state:0 stream:input];
-            nvae.c = LA1_0;
-            @throw nvae;
-
-        }
-        switch (alt1) {
-            case 1 : ;
-                // T.g:24:7: identifier // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_identifier_in_stat34];
-                [self identifier];
-
-                [self popFollow];
-
-
-
-                NSLog(@"enum is an ID");
-
-
-                }
-                break;
-            case 2 : ;
-                // T.g:25:7: enumAsKeyword // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_enumAsKeyword_in_stat47];
-                [self enumAsKeyword];
-
-                [self popFollow];
-
-
-
-                NSLog(@"enum is a keyword");
-
-
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end stat */
-
-/*
- * $ANTLR start identifier
- * T.g:28:1: identifier : ( ID | enumAsID );
- */
-- (void) identifier
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // T.g:29:5: ( ID | enumAsID ) //ruleblock
-        NSInteger alt2=2;
-        NSInteger LA2_0 = [input LA:1];
-
-        if ( (LA2_0==ID) ) {
-            alt2=1;
-        }
-        else if ( (LA2_0==7) ) {
-            alt2=2;
-        }
-        else {
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:2 state:0 stream:input];
-            nvae.c = LA2_0;
-            @throw nvae;
-
-        }
-        switch (alt2) {
-            case 1 : ;
-                // T.g:29:7: ID // alt
-                {
-                [self match:input TokenType:ID Follow:FOLLOW_ID_in_identifier66]; 
-
-                }
-                break;
-            case 2 : ;
-                // T.g:30:7: enumAsID // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_enumAsID_in_identifier74];
-                [self enumAsID];
-
-                [self popFollow];
-
-
-
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end identifier */
-
-/*
- * $ANTLR start enumAsKeyword
- * T.g:33:1: enumAsKeyword :{...}? 'enum' ;
- */
-- (void) enumAsKeyword
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // T.g:33:15: ({...}? 'enum' ) // ruleBlockSingleAlt
-        // T.g:33:17: {...}? 'enum' // alt
-        {
-        if ( !((enableEnum)) ) {
-            @throw [ANTLRFailedPredicateException newException:@"enumAsKeyword" predicate:@"enableEnum" stream:input];
-        }
-
-        [self match:input TokenType:7 Follow:FOLLOW_7_in_enumAsKeyword89]; 
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end enumAsKeyword */
-
-/*
- * $ANTLR start enumAsID
- * T.g:35:1: enumAsID :{...}? 'enum' ;
- */
-- (void) enumAsID
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // T.g:35:10: ({...}? 'enum' ) // ruleBlockSingleAlt
-        // T.g:35:12: {...}? 'enum' // alt
-        {
-        if ( !((!enableEnum)) ) {
-            @throw [ANTLRFailedPredicateException newException:@"enumAsID" predicate:@"!enableEnum" stream:input];
-        }
-
-        [self match:input TokenType:7 Follow:FOLLOW_7_in_enumAsID100]; 
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end enumAsID */
-/* ObjC end rules */
-
-@end /* end of TParser implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/antlr3.h b/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/antlr3.h
deleted file mode 100644
index 4f16279..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/antlr3.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#import <ANTLR/ANTLRBaseMapElement.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRBaseStack.h>
-#import <ANTLR/ANTLRBaseTree.h>
-#import <ANTLR/ANTLRBaseTreeAdaptor.h>
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBufferedTokenStream.h>
-#import <ANTLR/ANTLRBufferedTreeNodeStream.h>
-#import <ANTLR/ANTLRCharStream.h>
-#import <ANTLR/ANTLRCharStreamState.h>
-#import <ANTLR/ANTLRCommonErrorNode.h>
-#import <ANTLR/ANTLRCommonToken.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRDebug.h>
-#import <ANTLR/ANTLRDebugEventProxy.h>
-#import <ANTLR/ANTLRDebugEventListener.h>
-#import <ANTLR/ANTLRDebugParser.h>
-#import <ANTLR/ANTLRDebugTokenStream.h>
-#import <ANTLR/ANTLRDebugTreeAdaptor.h>
-#import <ANTLR/ANTLRDebugTreeNodeStream.h>
-#import <ANTLR/ANTLRDebugTreeParser.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRError.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRFastQueue.h>
-#import <ANTLR/ANTLRHashMap.h>
-#import <ANTLR/ANTLRHashRule.h>
-#import <ANTLR/ANTLRIntArray.h>
-#import <ANTLR/ANTLRIntStream.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRLexerRuleReturnScope.h>
-#import <ANTLR/ANTLRLinkBase.h>
-#import <ANTLR/ANTLRLookaheadStream.h>
-#import <ANTLR/ANTLRMapElement.h>
-#import <ANTLR/ANTLRMap.h>
-#import <ANTLR/ANTLRMismatchedNotSetException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRMissingTokenException.h>
-#import <ANTLR/ANTLRNodeMapElement.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRPtrBuffer.h>
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLRRecognizerSharedState.h>
-#import <ANTLR/ANTLRRewriteRuleElementStream.h>
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
-#import <ANTLR/ANTLRRuleMemo.h>
-#import <ANTLR/ANTLRRuleStack.h>
-#import <ANTLR/ANTLRRuleReturnScope.h>
-#import <ANTLR/ANTLRRuntimeException.h>
-#import <ANTLR/ANTLRStreamEnumerator.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRSymbolStack.h>
-#import <ANTLR/ANTLRToken+DebuggerSupport.h>
-#import <ANTLR/ANTLRToken.h>
-#import <ANTLR/ANTLRTokenRewriteStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRTokenStream.h>
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeException.h>
-#import <ANTLR/ANTLRTreeIterator.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-#import <ANTLR/ANTLRUnbufferedTokenStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-#import <ANTLR/ANTLRUniqueIDMap.h>
-#import <ANTLR/ANTLRUnwantedTokenException.h>
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/main.m b/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/main.m
deleted file mode 100644
index 747bdb8..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/main.m
+++ /dev/null
@@ -1,30 +0,0 @@
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-#import "TLexer.h"
-#import "TParser.h"
-
-int main() {
-    NSError *error;
-	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
-	
-	NSString *string = [NSString stringWithContentsOfFile:@"/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/examples/hoistedPredicates/input" encoding:NSASCIIStringEncoding error:&error];
-	NSLog(@"input is : %@", string);
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:string];
-	TLexer *lexer = [TLexer newTLexerWithCharStream:stream];
-	
-	//	ANTLRToken *currentToken;
-	//	while ((currentToken = [lexer nextToken]) && [currentToken type] != ANTLRTokenTypeEOF) {
-	//		NSLog(@"%@", currentToken);
-	//	}
-	
-	ANTLRCommonTokenStream *tokenStream = [ANTLRCommonTokenStream newANTLRCommonTokenStreamWithTokenSource:lexer];
-	TParser *parser = [[TParser alloc] initWithTokenStream:tokenStream];
-	[parser stat];
-	[lexer release];
-	[stream release];
-	[tokenStream release];
-	[parser release];
-	
-	[pool release];
-	return 0;
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.h
deleted file mode 100644
index 40c0fde..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// $ANTLR 3.2 Aug 07, 2010 22:08:38 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/Test.g 2010-08-11 13:24:39
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#define DIGIT 5
-#define ID 6
-#define EOF -1
-#define LETTER 4
-@interface TestLexer : ANTLRLexer {
-}
-- (void) mID; 
-- (void) mDIGIT; 
-- (void) mLETTER; 
-- (void) mTokens; 
-@end // end of Test interface
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.h.old b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.h.old
deleted file mode 100755
index f8252ca..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.h.old
+++ /dev/null
@@ -1,29 +0,0 @@
-// $ANTLR 3.0 Test.gl 2007-08-04 15:59:43
-
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-
-#pragma mark Tokens
-#define TestLexer_LETTER	4
-#define TestLexer_EOF	-1
-#define TestLexer_Tokens	7
-#define TestLexer_DIGIT	5
-#define TestLexer_ID	6
-
-@interface TestLexer : ANTLRLexer {
-    NSInteger _tokenType;
-}
-
-
-- (void) mID;
-- (void) mDIGIT;
-- (void) mLETTER;
-- (void) mTokens;
-
-
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.m
deleted file mode 100644
index bb95f66..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.m
+++ /dev/null
@@ -1,210 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : TestLexer.g
- *     -                            On : 2011-05-06 19:16:22
- *     -                 for the lexer : TestLexerLexer
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} TestLexer.g 2011-05-06 19:16:22
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "TestLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-/** As per Terence: No returns for lexer rules! */
-@implementation TestLexer // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"TestLexer.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (TestLexer *)newTestLexerWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    return [[TestLexer alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    self = [super initWithCharStream:anInput State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:4+1] retain]];
-    if ( self != nil ) {
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-/* ObjC Start of actions.lexer.methods */
-/* ObjC end of actions.lexer.methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-
-/* Start of Rules */
-// $ANTLR start "ID"
-- (void) mID
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = ID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // TestLexer.g:8:4: ( LETTER ( LETTER | DIGIT )* ) // ruleBlockSingleAlt
-        // TestLexer.g:8:6: LETTER ( LETTER | DIGIT )* // alt
-        {
-        [self mLETTER]; 
-
-
-
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0 >= '0' && LA1_0 <= '9')||(LA1_0 >= 'A' && LA1_0 <= 'Z')||(LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // TestLexer.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop1;
-            }
-        } while (YES);
-        loop1: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "ID" */
-
-// $ANTLR start "DIGIT"
-- (void) mDIGIT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // TestLexer.g:11:16: ( '0' .. '9' ) // ruleBlockSingleAlt
-        // TestLexer.g: // alt
-        {
-        if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
-            [input consume];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            [self recover:mse];
-            @throw mse;
-        }
-
-
-        }
-
-
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "DIGIT" */
-
-// $ANTLR start "LETTER"
-- (void) mLETTER
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // TestLexer.g:15:2: ( 'a' .. 'z' | 'A' .. 'Z' ) // ruleBlockSingleAlt
-        // TestLexer.g: // alt
-        {
-        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-            [input consume];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            [self recover:mse];
-            @throw mse;
-        }
-
-
-        }
-
-
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "LETTER" */
-
-- (void) mTokens
-{
-    // TestLexer.g:1:8: ( ID ) // ruleBlockSingleAlt
-    // TestLexer.g:1:10: ID // alt
-    {
-    [self mID]; 
-
-
-
-    }
-
-
-}
-
-@end /* end of TestLexer implementation line 397 */
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.m.old b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.m.old
deleted file mode 100755
index a48de92..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.m.old
+++ /dev/null
@@ -1,157 +0,0 @@
-// $ANTLR 3.0 Test.gl 2007-08-04 15:59:43
-
-#import "TestLexer.h"
-
-/** As per Terence: No returns for lexer rules!
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-*/
-@implementation TestLexer
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-	if ((self = [super initWithCharStream:anInput]) != nil) {
-	}
-	return self;
-}
-
-- (void) dealloc
-{
-	[super dealloc];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return nil;
-}
-
-- (NSString *) grammarFileName
-{
-	return @"Test.gl";
-}
-
-
-- (void) mID
-{
-    @try {
-        ruleNestingLevel++;
-        int _type = TestLexer_ID;
-        // Test.gl:8:6: ( LETTER ( LETTER | DIGIT )* ) // ruleBlockSingleAlt
-        // Test.gl:8:6: LETTER ( LETTER | DIGIT )* // alt
-        {
-        [self mLETTER];
-
-
-        do {
-            int alt1=2;
-            {
-            	int LA1_0 = [input LA:1];
-            	if ( (LA1_0>='0' && LA1_0<='9')||(LA1_0>='A' && LA1_0<='Z')||(LA1_0>='a' && LA1_0<='z') ) {
-            		alt1 = 1;
-            	}
-
-            }
-            switch (alt1) {
-        	case 1 :
-        	    // Test.gl: // alt
-        	    {
-        	    if (([input LA:1]>='0' && [input LA:1]<='9')||([input LA:1]>='A' && [input LA:1]<='Z')||([input LA:1]>='a' && [input LA:1]<='z')) {
-        	    	[input consume];
-
-        	    } else {
-        	    	ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-        	    	[self recover:mse];	@throw mse;
-        	    }
-
-
-        	    }
-        	    break;
-
-        	default :
-        	    goto loop1;
-            }
-        } while (YES); loop1: ;
-
-
-        }
-
-        self->_tokenType = _type;
-    }
-    @finally {
-        ruleNestingLevel--;
-        // rule cleanup
-        // token+rule list labels
-
-    }
-    return;
-}
-// $ANTLR end ID
-
-
-- (void) mDIGIT
-{
-    @try {
-        ruleNestingLevel++;
-        // Test.gl:11:18: ( '0' .. '9' ) // ruleBlockSingleAlt
-        // Test.gl:11:18: '0' .. '9' // alt
-        {
-        [self matchRangeFromChar:'0' to:'9'];
-
-        }
-
-    }
-    @finally {
-        ruleNestingLevel--;
-        // rule cleanup
-        // token+rule list labels
-
-    }
-    return;
-}
-// $ANTLR end DIGIT
-
-
-- (void) mLETTER
-{
-    @try {
-        ruleNestingLevel++;
-        // Test.gl:15:4: ( 'a' .. 'z' | 'A' .. 'Z' ) // ruleBlockSingleAlt
-        // Test.gl: // alt
-        {
-        if (([input LA:1]>='A' && [input LA:1]<='Z')||([input LA:1]>='a' && [input LA:1]<='z')) {
-        	[input consume];
-
-        } else {
-        	ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-        	[self recover:mse];	@throw mse;
-        }
-
-
-        }
-
-    }
-    @finally {
-        ruleNestingLevel--;
-        // rule cleanup
-        // token+rule list labels
-
-    }
-    return;
-}
-// $ANTLR end LETTER
-
-- (void) mTokens
-{
-    // Test.gl:1:10: ( ID ) // ruleBlockSingleAlt
-    // Test.gl:1:10: ID // alt
-    {
-    [self mID];
-
-
-
-    }
-
-
-}
-
-@end
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexerLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexerLexer.h
deleted file mode 100644
index cdd1cee..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexerLexer.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} TestLexer.g 2011-05-06 19:16:22
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define DIGIT 4
-#define ID 5
-#define LETTER 6
-/* interface lexer class */
-@interface TestLexer : ANTLRLexer { // line 283
-/* ObjC start of actions.lexer.memVars */
-/* ObjC end of actions.lexer.memVars */
-}
-+ (void) initialize;
-+ (TestLexer *)newTestLexerWithCharStream:(id<ANTLRCharStream>)anInput;
-/* ObjC start actions.lexer.methodsDecl */
-/* ObjC end actions.lexer.methodsDecl */
-- (void) mID ; 
-- (void) mDIGIT ; 
-- (void) mLETTER ; 
-- (void) mTokens ; 
-
-@end /* end of TestLexer interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/antlr3.h b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/antlr3.h
deleted file mode 100644
index 4f16279..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/antlr3.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#import <ANTLR/ANTLRBaseMapElement.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRBaseStack.h>
-#import <ANTLR/ANTLRBaseTree.h>
-#import <ANTLR/ANTLRBaseTreeAdaptor.h>
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBufferedTokenStream.h>
-#import <ANTLR/ANTLRBufferedTreeNodeStream.h>
-#import <ANTLR/ANTLRCharStream.h>
-#import <ANTLR/ANTLRCharStreamState.h>
-#import <ANTLR/ANTLRCommonErrorNode.h>
-#import <ANTLR/ANTLRCommonToken.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRDebug.h>
-#import <ANTLR/ANTLRDebugEventProxy.h>
-#import <ANTLR/ANTLRDebugEventListener.h>
-#import <ANTLR/ANTLRDebugParser.h>
-#import <ANTLR/ANTLRDebugTokenStream.h>
-#import <ANTLR/ANTLRDebugTreeAdaptor.h>
-#import <ANTLR/ANTLRDebugTreeNodeStream.h>
-#import <ANTLR/ANTLRDebugTreeParser.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRError.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRFastQueue.h>
-#import <ANTLR/ANTLRHashMap.h>
-#import <ANTLR/ANTLRHashRule.h>
-#import <ANTLR/ANTLRIntArray.h>
-#import <ANTLR/ANTLRIntStream.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRLexerRuleReturnScope.h>
-#import <ANTLR/ANTLRLinkBase.h>
-#import <ANTLR/ANTLRLookaheadStream.h>
-#import <ANTLR/ANTLRMapElement.h>
-#import <ANTLR/ANTLRMap.h>
-#import <ANTLR/ANTLRMismatchedNotSetException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRMissingTokenException.h>
-#import <ANTLR/ANTLRNodeMapElement.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRPtrBuffer.h>
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLRRecognizerSharedState.h>
-#import <ANTLR/ANTLRRewriteRuleElementStream.h>
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
-#import <ANTLR/ANTLRRuleMemo.h>
-#import <ANTLR/ANTLRRuleStack.h>
-#import <ANTLR/ANTLRRuleReturnScope.h>
-#import <ANTLR/ANTLRRuntimeException.h>
-#import <ANTLR/ANTLRStreamEnumerator.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRSymbolStack.h>
-#import <ANTLR/ANTLRToken+DebuggerSupport.h>
-#import <ANTLR/ANTLRToken.h>
-#import <ANTLR/ANTLRTokenRewriteStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRTokenStream.h>
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeException.h>
-#import <ANTLR/ANTLRTreeIterator.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-#import <ANTLR/ANTLRUnbufferedTokenStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-#import <ANTLR/ANTLRUniqueIDMap.h>
-#import <ANTLR/ANTLRUnwantedTokenException.h>
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/main.m b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/main.m
deleted file mode 100644
index 464c319..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/main.m
+++ /dev/null
@@ -1,23 +0,0 @@
-#import <Cocoa/Cocoa.h>
-#import "TestLexer.h"
-#import "antlr3.h"
-#import <unistd.h>
-
-int main(int argc, const char * argv[])
-{
-	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
-	
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"abB9Cdd44"];
-	TestLexer *lexer = [[TestLexer alloc] initWithCharStream:stream];
-	id<ANTLRToken> currentToken;
-	while ((currentToken = [[lexer nextToken] retain]) && [currentToken getType] != ANTLRTokenTypeEOF) {
-		NSLog(@"%@", currentToken);
-	}
-	[lexer release];
-	[stream release];
-	
-	[pool release];
-    // sleep for objectalloc
-    // while (1) sleep(60);
-	return 0;
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/Test.tokens b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/Test.tokens
deleted file mode 100644
index 2100fc5..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/Test.tokens
+++ /dev/null
@@ -1,3 +0,0 @@
-DIGIT=5
-ID=6
-LETTER=4
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/TestLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/TestLexer.h
deleted file mode 100644
index 61f1691..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/TestLexer.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// $ANTLR 3.2 Aug 07, 2010 22:08:38 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/Test.g 2010-08-11 13:24:39
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#define DIGIT 5
-#define ID 6
-#define EOF -1
-#define LETTER 4
-@interface Test : ANTLRLexer {
-}
-- (void) mID; 
-- (void) mDIGIT; 
-- (void) mLETTER; 
-- (void) mTokens; 
-@end // end of Test interface
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/TestLexer.tokens b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/TestLexer.tokens
deleted file mode 100644
index 2100fc5..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/TestLexer.tokens
+++ /dev/null
@@ -1,3 +0,0 @@
-DIGIT=5
-ID=6
-LETTER=4
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/TestLexerLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/TestLexerLexer.h
deleted file mode 100644
index 1170ab3..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/TestLexerLexer.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// $ANTLR 3.2 Aug 07, 2010 22:08:38 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g 2010-08-11 13:41:44
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#define DIGIT 5
-#define ID 6
-#define EOF -1
-#define LETTER 4
-@interface TestLexer : ANTLRLexer {
-}
-- (void) mID; 
-- (void) mDIGIT; 
-- (void) mLETTER; 
-- (void) mTokens; 
-@end // end of TestLexer interface
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/Testlexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/Testlexer.m
deleted file mode 100644
index 3bb398b..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/output1/Testlexer.m
+++ /dev/null
@@ -1,216 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version 3.2 Aug 07, 2010 22:08:38
- *
- *     -  From the grammar source file : /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g
- *     -                            On : 2010-08-11 13:41:44
- *     -                 for the lexer : TestLexerLexer *
- * Editing it, at least manually, is not wise. 
- *
- * C language generator and runtime by Jim Idle, jimi|hereisanat|idle|dotgoeshere|ws.
- *
- *
-*/
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// $ANTLR 3.2 Aug 07, 2010 22:08:38 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g 2010-08-11 13:41:44
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "TestLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-
-/** As per Terence: No returns for lexer rules!
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-*/
-@implementation TestLexer
-
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"/usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g"];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    if ((self = [super initWithCharStream:anInput State:[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:4+1]]) != nil) {
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-// $ANTLR start "ID"
-- (void) mID
-{
-    //
-    // This is not in the Java.stg
-
-    @try {
-        NSInteger _type = ID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g:8:4: ( LETTER ( LETTER | DIGIT )* ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g:8:6: LETTER ( LETTER | DIGIT )* // alt
-        {
-            [self mLETTER];
-
-          /* element() */
-        do {
-            NSInteger alt1=2;
-            {
-                NSInteger LA1_0 = [input LA:1];
-                if ( (LA1_0>='0' && LA1_0<='9')||(LA1_0>='A' && LA1_0<='Z')||(LA1_0>='a' && LA1_0<='z') ) {
-                    alt1=1;
-                }
-
-            }
-            switch (alt1) {
-                case 1 :
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g: // alt
-                    {
-                    if (([input LA:1]>='0' && [input LA:1]<='9')||([input LA:1]>='A' && [input LA:1]<='Z')||([input LA:1]>='a' && [input LA:1]<='z')) {
-                        [input consume];
-
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;}
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop1;
-            }
-        } while (YES);
-        loop1: ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end ID
-
-// $ANTLR start "DIGIT"
-- (void) mDIGIT
-{
-    //
-    // This is not in the Java.stg
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g:11:16: ( '0' .. '9' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g:11:18: '0' .. '9' // alt
-        {
-        [self matchRangeFromChar:'0' to:'9'];   /* element() */
-         /* elements */
-        }
-
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end DIGIT
-
-// $ANTLR start "LETTER"
-- (void) mLETTER
-{
-    //
-    // This is not in the Java.stg
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g:15:2: ( 'a' .. 'z' | 'A' .. 'Z' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g: // alt
-        {
-        if (([input LA:1]>='A' && [input LA:1]<='Z')||([input LA:1]>='a' && [input LA:1]<='z')) {
-            [input consume];
-
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-            [self recover:mse];
-            @throw mse;}
-          /* element() */
-         /* elements */
-        }
-
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end LETTER
-
-- (void) mTokens
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g:1:8: ( ID ) // ruleBlockSingleAlt
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/lexertest-simple/TestLexer.g:1:10: ID // alt
-    {
-        [self mID];
-
-      /* element() */
-     /* elements */
-    }
-
-
-}
-
-@end // end of TestLexer implementation
-
-/* End of code
- * =============================================================================
- */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTable.g b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTable.g
deleted file mode 100644
index 3001d02..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTable.g
+++ /dev/null
@@ -1,75 +0,0 @@
-grammar SymbolTable;
-
-/* Scope of symbol names.  Both globals and block rules need to push a new
- * symbol table upon entry and they must use the same stack.  So, I must
- * define a global scope and say that globals and block use this by saying
- * 'scope Symbols;' in those rule definitions.
- */
-
-options {
-	language=ObjC;
-}
-
-scope Symbols {
-  ANTLRPtrBuffer *names;
-}
-
-@memVars {
-int level;
-}
-
-@init {
-level = 0;
-}
-
-prog
-// scope Symbols;
-    :   globals (method)*
-    ;
-
-globals
-scope Symbols;
-@init {
-    level++;
-    $Symbols::names = [ANTLRPtrBuffer newANTLRPtrBufferWithLen:10];
-}
-    :   (decl)*
-        {
-            NSLog( @"globals: \%@", [$Symbols::names toString] );
-            level--;
-        }
-    ;
-
-method
-    :   'method' ID '(' ')' block
-    ;
-
-block
-scope Symbols;
-@init {
-    level++;
-    $Symbols::names = [ANTLRPtrBuffer newANTLRPtrBufferWithLen:10];
-}
-    :   '{' (decl)* (stat)* '}'
-        {
-            NSLog( @"level \%d symbols: \%@", level, [$Symbols::names toString] );
-            level--;
-        }
-    ;
-
-stat:   ID '=' INT ';'
-    |   block
-    ;
-
-decl:   'int' ID ';'
-        {[$Symbols::names addObject:$ID];} // add to current symbol table
-    ;
-
-ID  :   ('a'..'z')+
-    ;
-
-INT :   ('0'..'9')+
-    ;
-
-WS  :   (' '|'\n'|'\r')+ {$channel=HIDDEN;}
-    ;
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.h
deleted file mode 100644
index 2689521..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} SymbolTable.g 2011-05-06 15:04:43
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define T__7 7
-#define T__8 8
-#define T__9 9
-#define T__10 10
-#define T__11 11
-#define T__12 12
-#define T__13 13
-#define T__14 14
-#define ID 4
-#define INT 5
-#define WS 6
-/* interface lexer class */
-@interface SymbolTableLexer : ANTLRLexer { // line 283
-/* ObjC start of actions.lexer.memVars */
-/* ObjC end of actions.lexer.memVars */
-}
-+ (void) initialize;
-+ (SymbolTableLexer *)newSymbolTableLexerWithCharStream:(id<ANTLRCharStream>)anInput;
-/* ObjC start actions.lexer.methodsDecl */
-/* ObjC end actions.lexer.methodsDecl */
-- (void) mT__7 ; 
-- (void) mT__8 ; 
-- (void) mT__9 ; 
-- (void) mT__10 ; 
-- (void) mT__11 ; 
-- (void) mT__12 ; 
-- (void) mT__13 ; 
-- (void) mT__14 ; 
-- (void) mID ; 
-- (void) mINT ; 
-- (void) mWS ; 
-- (void) mTokens ; 
-
-@end /* end of SymbolTableLexer interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.m
deleted file mode 100644
index 9daf547..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.m
+++ /dev/null
@@ -1,799 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : SymbolTable.g
- *     -                            On : 2011-05-06 15:04:43
- *     -                 for the lexer : SymbolTableLexerLexer
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} SymbolTable.g 2011-05-06 15:04:43
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SymbolTableLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-/** As per Terence: No returns for lexer rules! */
-@implementation SymbolTableLexer // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"SymbolTable.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (SymbolTableLexer *)newSymbolTableLexerWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    return [[SymbolTableLexer alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    self = [super initWithCharStream:anInput State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:12+1] retain]];
-    if ( self != nil ) {
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-/* ObjC Start of actions.lexer.methods */
-/* ObjC end of actions.lexer.methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-
-/* Start of Rules */
-// $ANTLR start "T__7"
-- (void) mT__7
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__7;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SymbolTable.g:7:6: ( '(' ) // ruleBlockSingleAlt
-        // SymbolTable.g:7:8: '(' // alt
-        {
-        [self matchChar:'(']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__7" */
-
-// $ANTLR start "T__8"
-- (void) mT__8
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__8;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SymbolTable.g:8:6: ( ')' ) // ruleBlockSingleAlt
-        // SymbolTable.g:8:8: ')' // alt
-        {
-        [self matchChar:')']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__8" */
-
-// $ANTLR start "T__9"
-- (void) mT__9
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__9;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SymbolTable.g:9:6: ( ';' ) // ruleBlockSingleAlt
-        // SymbolTable.g:9:8: ';' // alt
-        {
-        [self matchChar:';']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__9" */
-
-// $ANTLR start "T__10"
-- (void) mT__10
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__10;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SymbolTable.g:10:7: ( '=' ) // ruleBlockSingleAlt
-        // SymbolTable.g:10:9: '=' // alt
-        {
-        [self matchChar:'=']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__10" */
-
-// $ANTLR start "T__11"
-- (void) mT__11
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__11;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SymbolTable.g:11:7: ( 'int' ) // ruleBlockSingleAlt
-        // SymbolTable.g:11:9: 'int' // alt
-        {
-        [self matchString:@"int"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__11" */
-
-// $ANTLR start "T__12"
-- (void) mT__12
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__12;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SymbolTable.g:12:7: ( 'method' ) // ruleBlockSingleAlt
-        // SymbolTable.g:12:9: 'method' // alt
-        {
-        [self matchString:@"method"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__12" */
-
-// $ANTLR start "T__13"
-- (void) mT__13
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__13;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SymbolTable.g:13:7: ( '{' ) // ruleBlockSingleAlt
-        // SymbolTable.g:13:9: '{' // alt
-        {
-        [self matchChar:'{']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__13" */
-
-// $ANTLR start "T__14"
-- (void) mT__14
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__14;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SymbolTable.g:14:7: ( '}' ) // ruleBlockSingleAlt
-        // SymbolTable.g:14:9: '}' // alt
-        {
-        [self matchChar:'}']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__14" */
-
-// $ANTLR start "ID"
-- (void) mID
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = ID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SymbolTable.g:68:5: ( ( 'a' .. 'z' )+ ) // ruleBlockSingleAlt
-        // SymbolTable.g:68:9: ( 'a' .. 'z' )+ // alt
-        {
-        // SymbolTable.g:68:9: ( 'a' .. 'z' )+ // positiveClosureBlock
-        NSInteger cnt1 = 0;
-        do {
-            NSInteger alt1 = 2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // SymbolTable.g: // alt
-                    {
-                    if ((([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt1 >= 1 )
-                        goto loop1;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:1];
-                    @throw eee;
-            }
-            cnt1++;
-        } while (YES);
-        loop1: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "ID" */
-
-// $ANTLR start "INT"
-- (void) mINT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = INT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SymbolTable.g:71:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
-        // SymbolTable.g:71:9: ( '0' .. '9' )+ // alt
-        {
-        // SymbolTable.g:71:9: ( '0' .. '9' )+ // positiveClosureBlock
-        NSInteger cnt2 = 0;
-        do {
-            NSInteger alt2 = 2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // SymbolTable.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt2 >= 1 )
-                        goto loop2;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:2];
-                    @throw eee;
-            }
-            cnt2++;
-        } while (YES);
-        loop2: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "INT" */
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SymbolTable.g:74:5: ( ( ' ' | '\\n' | '\\r' )+ ) // ruleBlockSingleAlt
-        // SymbolTable.g:74:9: ( ' ' | '\\n' | '\\r' )+ // alt
-        {
-        // SymbolTable.g:74:9: ( ' ' | '\\n' | '\\r' )+ // positiveClosureBlock
-        NSInteger cnt3 = 0;
-        do {
-            NSInteger alt3 = 2;
-            NSInteger LA3_0 = [input LA:1];
-            if ( (LA3_0=='\n'||LA3_0=='\r'||LA3_0==' ') ) {
-                alt3=1;
-            }
-
-
-            switch (alt3) {
-                case 1 : ;
-                    // SymbolTable.g: // alt
-                    {
-                    if ([input LA:1] == '\n'||[input LA:1] == '\r'||[input LA:1] == ' ') {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt3 >= 1 )
-                        goto loop3;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:3];
-                    @throw eee;
-            }
-            cnt3++;
-        } while (YES);
-        loop3: ;
-
-
-        _channel=HIDDEN;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "WS" */
-
-- (void) mTokens
-{
-    // SymbolTable.g:1:8: ( T__7 | T__8 | T__9 | T__10 | T__11 | T__12 | T__13 | T__14 | ID | INT | WS ) //ruleblock
-    NSInteger alt4=11;
-    unichar charLA4 = [input LA:1];
-    switch (charLA4) {
-        case '(': ;
-            {
-            alt4=1;
-            }
-            break;
-        case ')': ;
-            {
-            alt4=2;
-            }
-            break;
-        case ';': ;
-            {
-            alt4=3;
-            }
-            break;
-        case '=': ;
-            {
-            alt4=4;
-            }
-            break;
-        case 'i': ;
-            {
-            NSInteger LA4_5 = [input LA:2];
-
-            if ( (LA4_5=='n') ) {
-                NSInteger LA4_12 = [input LA:3];
-
-                if ( (LA4_12=='t') ) {
-                    NSInteger LA4_14 = [input LA:4];
-
-                    if ( ((LA4_14 >= 'a' && LA4_14 <= 'z')) ) {
-                        alt4=9;
-                    }
-                    else {
-                        alt4 = 5;
-                    }
-                }
-                else {
-                    alt4 = 9;
-                }
-            }
-            else {
-                alt4 = 9;
-            }
-            }
-            break;
-        case 'm': ;
-            {
-            NSInteger LA4_6 = [input LA:2];
-
-            if ( (LA4_6=='e') ) {
-                NSInteger LA4_13 = [input LA:3];
-
-                if ( (LA4_13=='t') ) {
-                    NSInteger LA4_15 = [input LA:4];
-
-                    if ( (LA4_15=='h') ) {
-                        NSInteger LA4_17 = [input LA:5];
-
-                        if ( (LA4_17=='o') ) {
-                            NSInteger LA4_18 = [input LA:6];
-
-                            if ( (LA4_18=='d') ) {
-                                NSInteger LA4_19 = [input LA:7];
-
-                                if ( ((LA4_19 >= 'a' && LA4_19 <= 'z')) ) {
-                                    alt4=9;
-                                }
-                                else {
-                                    alt4 = 6;
-                                }
-                            }
-                            else {
-                                alt4 = 9;
-                            }
-                        }
-                        else {
-                            alt4 = 9;
-                        }
-                    }
-                    else {
-                        alt4 = 9;
-                    }
-                }
-                else {
-                    alt4 = 9;
-                }
-            }
-            else {
-                alt4 = 9;
-            }
-            }
-            break;
-        case '{': ;
-            {
-            alt4=7;
-            }
-            break;
-        case '}': ;
-            {
-            alt4=8;
-            }
-            break;
-        case 'a': ;
-        case 'b': ;
-        case 'c': ;
-        case 'd': ;
-        case 'e': ;
-        case 'f': ;
-        case 'g': ;
-        case 'h': ;
-        case 'j': ;
-        case 'k': ;
-        case 'l': ;
-        case 'n': ;
-        case 'o': ;
-        case 'p': ;
-        case 'q': ;
-        case 'r': ;
-        case 's': ;
-        case 't': ;
-        case 'u': ;
-        case 'v': ;
-        case 'w': ;
-        case 'x': ;
-        case 'y': ;
-        case 'z': ;
-            {
-            alt4=9;
-            }
-            break;
-        case '0': ;
-        case '1': ;
-        case '2': ;
-        case '3': ;
-        case '4': ;
-        case '5': ;
-        case '6': ;
-        case '7': ;
-        case '8': ;
-        case '9': ;
-            {
-            alt4=10;
-            }
-            break;
-        case '\n': ;
-        case '\r': ;
-        case ' ': ;
-            {
-            alt4=11;
-            }
-            break;
-
-    default: ;
-        ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:4 state:0 stream:input];
-        nvae.c = charLA4;
-        @throw nvae;
-
-    }
-
-    switch (alt4) {
-        case 1 : ;
-            // SymbolTable.g:1:10: T__7 // alt
-            {
-            [self mT__7]; 
-
-
-
-            }
-            break;
-        case 2 : ;
-            // SymbolTable.g:1:15: T__8 // alt
-            {
-            [self mT__8]; 
-
-
-
-            }
-            break;
-        case 3 : ;
-            // SymbolTable.g:1:20: T__9 // alt
-            {
-            [self mT__9]; 
-
-
-
-            }
-            break;
-        case 4 : ;
-            // SymbolTable.g:1:25: T__10 // alt
-            {
-            [self mT__10]; 
-
-
-
-            }
-            break;
-        case 5 : ;
-            // SymbolTable.g:1:31: T__11 // alt
-            {
-            [self mT__11]; 
-
-
-
-            }
-            break;
-        case 6 : ;
-            // SymbolTable.g:1:37: T__12 // alt
-            {
-            [self mT__12]; 
-
-
-
-            }
-            break;
-        case 7 : ;
-            // SymbolTable.g:1:43: T__13 // alt
-            {
-            [self mT__13]; 
-
-
-
-            }
-            break;
-        case 8 : ;
-            // SymbolTable.g:1:49: T__14 // alt
-            {
-            [self mT__14]; 
-
-
-
-            }
-            break;
-        case 9 : ;
-            // SymbolTable.g:1:55: ID // alt
-            {
-            [self mID]; 
-
-
-
-            }
-            break;
-        case 10 : ;
-            // SymbolTable.g:1:58: INT // alt
-            {
-            [self mINT]; 
-
-
-
-            }
-            break;
-        case 11 : ;
-            // SymbolTable.g:1:62: WS // alt
-            {
-            [self mWS]; 
-
-
-
-            }
-            break;
-
-    }
-
-}
-
-@end /* end of SymbolTableLexer implementation line 397 */
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.h b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.h
deleted file mode 100644
index e67094b..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.h
+++ /dev/null
@@ -1,113 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} SymbolTable.g 2011-05-06 15:04:42
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* parserHeaderFile */
-#ifndef ANTLR3TokenTypeAlreadyDefined
-#define ANTLR3TokenTypeAlreadyDefined
-typedef enum {
-    ANTLR_EOF = -1,
-    INVALID,
-    EOR,
-    DOWN,
-    UP,
-    MIN
-} ANTLR3TokenType;
-#endif
-
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define T__7 7
-#define T__8 8
-#define T__9 9
-#define T__10 10
-#define T__11 11
-#define T__12 12
-#define T__13 13
-#define T__14 14
-#define ID 4
-#define INT 5
-#define WS 6
-#pragma mark Dynamic Global Scopes
-/* globalAttributeScopeInterface */
-@interface Symbols_Scope : ANTLRSymbolsScope {
-ANTLRPtrBuffer * names;
-
-}
-/* start of globalAttributeScopeInterface properties */
-
-@property (assign, getter=getnames, setter=setnames:) ANTLRPtrBuffer * names;
-
-/* end globalAttributeScopeInterface properties */
-
-
-+ (Symbols_Scope *)newSymbols_Scope;
-- (id) init;
-/* start of globalAttributeScopeInterface methodsDecl */
-
-- (ANTLRPtrBuffer *)getnames;
-- (void)setnames:(ANTLRPtrBuffer *)aVal;
-
-/* End of globalAttributeScopeInterface methodsDecl */
-
-@end /* end of Symbols_Scope interface */
-
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-
-/* Interface grammar class */
-@interface SymbolTableParser : ANTLRParser { /* line 572 */
-/* ObjC start of ruleAttributeScopeMemVar */
-
-
-/* ObjC end of ruleAttributeScopeMemVar */
-/* ObjC start of globalAttributeScopeMemVar */
-/* globalAttributeScopeMemVar */
-//ANTLRSymbolStack *gStack;
-ANTLRSymbolStack *Symbols_stack;
-Symbols_Scope *Symbols_scope;
-
-/* ObjC end of globalAttributeScopeMemVar */
-/* ObjC start of actions.(actionScope).memVars */
-
-int level;
-
-/* ObjC end of actions.(actionScope).memVars */
-/* ObjC start of memVars */
-/* ObjC end of memVars */
-
- }
-
-/* ObjC start of actions.(actionScope).properties */
-/* ObjC end of actions.(actionScope).properties */
-/* ObjC start of properties */
-/* ObjC end of properties */
-
-+ (void) initialize;
-+ (id) newSymbolTableParser:(id<ANTLRTokenStream>)aStream;
-/* ObjC start of actions.(actionScope).methodsDecl */
-/* ObjC end of actions.(actionScope).methodsDecl */
-
-/* ObjC start of methodsDecl */
-/* ObjC end of methodsDecl */
-
-- (void)prog; 
-- (void)globals; 
-- (void)method; 
-- (void)block; 
-- (void)stat; 
-- (void)decl; 
-
-
-@end /* end of SymbolTableParser interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.m b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.m
deleted file mode 100644
index cc2b37e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.m
+++ /dev/null
@@ -1,601 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : SymbolTable.g
- *     -                            On : 2011-05-06 15:04:42
- *     -                for the parser : SymbolTableParserParser
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} SymbolTable.g 2011-05-06 15:04:42
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SymbolTableParser.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_globals_in_prog50;
-static const unsigned long long FOLLOW_globals_in_prog50_data[] = { 0x0000000000001002LL};
-static ANTLRBitSet *FOLLOW_method_in_prog53;
-static const unsigned long long FOLLOW_method_in_prog53_data[] = { 0x0000000000001002LL};
-static ANTLRBitSet *FOLLOW_decl_in_globals85;
-static const unsigned long long FOLLOW_decl_in_globals85_data[] = { 0x0000000000000802LL};
-static ANTLRBitSet *FOLLOW_12_in_method116;
-static const unsigned long long FOLLOW_12_in_method116_data[] = { 0x0000000000000010LL};
-static ANTLRBitSet *FOLLOW_ID_in_method118;
-static const unsigned long long FOLLOW_ID_in_method118_data[] = { 0x0000000000000080LL};
-static ANTLRBitSet *FOLLOW_7_in_method120;
-static const unsigned long long FOLLOW_7_in_method120_data[] = { 0x0000000000000100LL};
-static ANTLRBitSet *FOLLOW_8_in_method122;
-static const unsigned long long FOLLOW_8_in_method122_data[] = { 0x0000000000002000LL};
-static ANTLRBitSet *FOLLOW_block_in_method124;
-static const unsigned long long FOLLOW_block_in_method124_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_13_in_block153;
-static const unsigned long long FOLLOW_13_in_block153_data[] = { 0x0000000000006810LL};
-static ANTLRBitSet *FOLLOW_decl_in_block156;
-static const unsigned long long FOLLOW_decl_in_block156_data[] = { 0x0000000000006810LL};
-static ANTLRBitSet *FOLLOW_stat_in_block161;
-static const unsigned long long FOLLOW_stat_in_block161_data[] = { 0x0000000000006010LL};
-static ANTLRBitSet *FOLLOW_14_in_block165;
-static const unsigned long long FOLLOW_14_in_block165_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_ID_in_stat189;
-static const unsigned long long FOLLOW_ID_in_stat189_data[] = { 0x0000000000000400LL};
-static ANTLRBitSet *FOLLOW_10_in_stat191;
-static const unsigned long long FOLLOW_10_in_stat191_data[] = { 0x0000000000000020LL};
-static ANTLRBitSet *FOLLOW_INT_in_stat193;
-static const unsigned long long FOLLOW_INT_in_stat193_data[] = { 0x0000000000000200LL};
-static ANTLRBitSet *FOLLOW_9_in_stat195;
-static const unsigned long long FOLLOW_9_in_stat195_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_block_in_stat205;
-static const unsigned long long FOLLOW_block_in_stat205_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_11_in_decl219;
-static const unsigned long long FOLLOW_11_in_decl219_data[] = { 0x0000000000000010LL};
-static ANTLRBitSet *FOLLOW_ID_in_decl221;
-static const unsigned long long FOLLOW_ID_in_decl221_data[] = { 0x0000000000000200LL};
-static ANTLRBitSet *FOLLOW_9_in_decl223;
-static const unsigned long long FOLLOW_9_in_decl223_data[] = { 0x0000000000000002LL};
-
-
-#pragma mark Dynamic Global Scopes
-@implementation Symbols_Scope  /* globalAttributeScopeImplementation */
-/* start of synthesize -- OBJC-Line 1750 */
-
-@synthesize names;
-
-+ (Symbols_Scope *)newSymbols_Scope
-{
-    return [[[Symbols_Scope alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* start of iterate get and set functions */
-
-- (ANTLRPtrBuffer *)getnames { return( names ); }
-
-- (void)setnames:(ANTLRPtrBuffer *)aVal { names = aVal; }
-
-/* End of iterate get and set functions */
-
-@end /* end of Symbols_Scope implementation */
-
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule Return Scopes start
-//#pragma mark Rule return scopes start
-//
-
-#pragma mark Rule return scopes start
-
-@implementation SymbolTableParser  // line 637
-
-/* ObjC start of ruleAttributeScope */
-#pragma mark Dynamic Rule Scopes
-/* ObjC end of ruleAttributeScope */
-#pragma mark global Attribute Scopes
-/* ObjC start globalAttributeScope */
-static _stack;
-
-/* ObjC end globalAttributeScope */
-/* ObjC start actions.(actionScope).synthesize */
-/* ObjC end actions.(actionScope).synthesize */
-/* ObjC start synthesize() */
-/* ObjC end synthesize() */
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    FOLLOW_globals_in_prog50 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_globals_in_prog50_data Count:(NSUInteger)1] retain];
-    FOLLOW_method_in_prog53 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_method_in_prog53_data Count:(NSUInteger)1] retain];
-    FOLLOW_decl_in_globals85 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_decl_in_globals85_data Count:(NSUInteger)1] retain];
-    FOLLOW_12_in_method116 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_12_in_method116_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_method118 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_method118_data Count:(NSUInteger)1] retain];
-    FOLLOW_7_in_method120 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_method120_data Count:(NSUInteger)1] retain];
-    FOLLOW_8_in_method122 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_method122_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_method124 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_method124_data Count:(NSUInteger)1] retain];
-    FOLLOW_13_in_block153 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_13_in_block153_data Count:(NSUInteger)1] retain];
-    FOLLOW_decl_in_block156 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_decl_in_block156_data Count:(NSUInteger)1] retain];
-    FOLLOW_stat_in_block161 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block161_data Count:(NSUInteger)1] retain];
-    FOLLOW_14_in_block165 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_14_in_block165_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_stat189 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_stat189_data Count:(NSUInteger)1] retain];
-    FOLLOW_10_in_stat191 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_10_in_stat191_data Count:(NSUInteger)1] retain];
-    FOLLOW_INT_in_stat193 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_stat193_data Count:(NSUInteger)1] retain];
-    FOLLOW_9_in_stat195 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_stat195_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_stat205 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat205_data Count:(NSUInteger)1] retain];
-    FOLLOW_11_in_decl219 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_decl219_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_decl221 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_decl221_data Count:(NSUInteger)1] retain];
-    FOLLOW_9_in_decl223 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_decl223_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"ID", @"INT", @"WS", @"'('", @"')'", @"';'", @"'='", @"'int'", @"'method'", 
- @"'{'", @"'}'", nil] retain]];
-    [ANTLRBaseRecognizer setGrammarFileName:@"SymbolTable.g"];
-}
-
-+ (SymbolTableParser *)newSymbolTableParser:(id<ANTLRTokenStream>)aStream
-{
-    return [[SymbolTableParser alloc] initWithTokenStream:aStream];
-
-
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)aStream
-{
-    self = [super initWithTokenStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:6+1] retain]];
-    if ( self != nil ) {
-
-
-        /* globalAttributeScopeInit */
-        Symbols_scope = [Symbols_Scope newSymbols_Scope];
-        Symbols_stack = [ANTLRSymbolStack newANTLRSymbolStackWithLen:30];
-        /* start of actions-actionScope-init */
-
-        level = 0;
-
-        /* start of init */
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [Symbols_stack release];
-    [super dealloc];
-}
-
-/* ObjC start members */
-/* ObjC end members */
-/* ObjC start actions.(actionScope).methods */
-/* ObjC end actions.(actionScope).methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-/* ObjC start rules */
-/*
- * $ANTLR start prog
- * SymbolTable.g:25:1: prog : globals ( method )* ;
- */
-- (void) prog
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SymbolTable.g:27:5: ( globals ( method )* ) // ruleBlockSingleAlt
-        // SymbolTable.g:27:9: globals ( method )* // alt
-        {
-        /* ruleRef */
-        [self pushFollow:FOLLOW_globals_in_prog50];
-        [self globals];
-
-        [self popFollow];
-
-
-
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( (LA1_0==12) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // SymbolTable.g:27:18: method // alt
-                    {
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_method_in_prog53];
-                    [self method];
-
-                    [self popFollow];
-
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop1;
-            }
-        } while (YES);
-        loop1: ;
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end prog */
-
-/*
- * $ANTLR start globals
- * SymbolTable.g:30:1: globals : ( decl )* ;
- */
-- (void) globals
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-    [Symbols_stack push:[[Symbols_Scope newSymbols_Scope] retain]];
-
-
-        level++;
-        /* scopeSetAttributeRef */((Symbols_Scope *)[Symbols_stack peek]).names =  [ANTLRPtrBuffer newANTLRPtrBufferWithLen:10];
-
-    @try {
-        // SymbolTable.g:36:5: ( ( decl )* ) // ruleBlockSingleAlt
-        // SymbolTable.g:36:9: ( decl )* // alt
-        {
-        do {
-            NSInteger alt2=2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( (LA2_0==11) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // SymbolTable.g:36:10: decl // alt
-                    {
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_decl_in_globals85];
-                    [self decl];
-
-                    [self popFollow];
-
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop2;
-            }
-        } while (YES);
-        loop2: ;
-
-
-
-                    NSLog( @"globals: %@", [((Symbols_Scope *)[Symbols_stack peek]).names toString] );
-                    level--;
-                
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-        [Symbols_stack pop];
-
-    }
-    return ;
-}
-/* $ANTLR end globals */
-
-/*
- * $ANTLR start method
- * SymbolTable.g:43:1: method : 'method' ID '(' ')' block ;
- */
-- (void) method
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SymbolTable.g:44:5: ( 'method' ID '(' ')' block ) // ruleBlockSingleAlt
-        // SymbolTable.g:44:9: 'method' ID '(' ')' block // alt
-        {
-        [self match:input TokenType:12 Follow:FOLLOW_12_in_method116]; 
-
-        [self match:input TokenType:ID Follow:FOLLOW_ID_in_method118]; 
-
-        [self match:input TokenType:7 Follow:FOLLOW_7_in_method120]; 
-
-        [self match:input TokenType:8 Follow:FOLLOW_8_in_method122]; 
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_block_in_method124];
-        [self block];
-
-        [self popFollow];
-
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end method */
-
-/*
- * $ANTLR start block
- * SymbolTable.g:47:1: block : '{' ( decl )* ( stat )* '}' ;
- */
-- (void) block
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-    [Symbols_stack push:[[Symbols_Scope newSymbols_Scope] retain]];
-
-
-        level++;
-        /* scopeSetAttributeRef */((Symbols_Scope *)[Symbols_stack peek]).names =  [ANTLRPtrBuffer newANTLRPtrBufferWithLen:10];
-
-    @try {
-        // SymbolTable.g:53:5: ( '{' ( decl )* ( stat )* '}' ) // ruleBlockSingleAlt
-        // SymbolTable.g:53:9: '{' ( decl )* ( stat )* '}' // alt
-        {
-        [self match:input TokenType:13 Follow:FOLLOW_13_in_block153]; 
-
-        do {
-            NSInteger alt3=2;
-            NSInteger LA3_0 = [input LA:1];
-            if ( (LA3_0==11) ) {
-                alt3=1;
-            }
-
-
-            switch (alt3) {
-                case 1 : ;
-                    // SymbolTable.g:53:14: decl // alt
-                    {
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_decl_in_block156];
-                    [self decl];
-
-                    [self popFollow];
-
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop3;
-            }
-        } while (YES);
-        loop3: ;
-
-
-        do {
-            NSInteger alt4=2;
-            NSInteger LA4_0 = [input LA:1];
-            if ( (LA4_0==ID||LA4_0==13) ) {
-                alt4=1;
-            }
-
-
-            switch (alt4) {
-                case 1 : ;
-                    // SymbolTable.g:53:22: stat // alt
-                    {
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_stat_in_block161];
-                    [self stat];
-
-                    [self popFollow];
-
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop4;
-            }
-        } while (YES);
-        loop4: ;
-
-
-        [self match:input TokenType:14 Follow:FOLLOW_14_in_block165]; 
-
-
-                    NSLog( @"level %d symbols: %@", level, [((Symbols_Scope *)[Symbols_stack peek]).names toString] );
-                    level--;
-                
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-        [Symbols_stack pop];
-
-    }
-    return ;
-}
-/* $ANTLR end block */
-
-/*
- * $ANTLR start stat
- * SymbolTable.g:60:1: stat : ( ID '=' INT ';' | block );
- */
-- (void) stat
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SymbolTable.g:60:5: ( ID '=' INT ';' | block ) //ruleblock
-        NSInteger alt5=2;
-        NSInteger LA5_0 = [input LA:1];
-
-        if ( (LA5_0==ID) ) {
-            alt5=1;
-        }
-        else if ( (LA5_0==13) ) {
-            alt5=2;
-        }
-        else {
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:5 state:0 stream:input];
-            nvae.c = LA5_0;
-            @throw nvae;
-
-        }
-        switch (alt5) {
-            case 1 : ;
-                // SymbolTable.g:60:9: ID '=' INT ';' // alt
-                {
-                [self match:input TokenType:ID Follow:FOLLOW_ID_in_stat189]; 
-
-                [self match:input TokenType:10 Follow:FOLLOW_10_in_stat191]; 
-
-                [self match:input TokenType:INT Follow:FOLLOW_INT_in_stat193]; 
-
-                [self match:input TokenType:9 Follow:FOLLOW_9_in_stat195]; 
-
-                }
-                break;
-            case 2 : ;
-                // SymbolTable.g:61:9: block // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_block_in_stat205];
-                [self block];
-
-                [self popFollow];
-
-
-
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end stat */
-
-/*
- * $ANTLR start decl
- * SymbolTable.g:64:1: decl : 'int' ID ';' ;
- */
-- (void) decl
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        ANTLRCommonToken *ID1 = nil;
-
-        // SymbolTable.g:64:5: ( 'int' ID ';' ) // ruleBlockSingleAlt
-        // SymbolTable.g:64:9: 'int' ID ';' // alt
-        {
-        [self match:input TokenType:11 Follow:FOLLOW_11_in_decl219]; 
-
-        ID1=(ANTLRCommonToken *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_decl221]; 
-
-        [self match:input TokenType:9 Follow:FOLLOW_9_in_decl223]; 
-
-        [((Symbols_Scope *)[Symbols_stack peek]).names addObject:ID1];
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end decl */
-/* ObjC end rules */
-
-@end /* end of SymbolTableParser implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/antlr3.h b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/antlr3.h
deleted file mode 100644
index 4f16279..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/antlr3.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#import <ANTLR/ANTLRBaseMapElement.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRBaseStack.h>
-#import <ANTLR/ANTLRBaseTree.h>
-#import <ANTLR/ANTLRBaseTreeAdaptor.h>
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBufferedTokenStream.h>
-#import <ANTLR/ANTLRBufferedTreeNodeStream.h>
-#import <ANTLR/ANTLRCharStream.h>
-#import <ANTLR/ANTLRCharStreamState.h>
-#import <ANTLR/ANTLRCommonErrorNode.h>
-#import <ANTLR/ANTLRCommonToken.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRDebug.h>
-#import <ANTLR/ANTLRDebugEventProxy.h>
-#import <ANTLR/ANTLRDebugEventListener.h>
-#import <ANTLR/ANTLRDebugParser.h>
-#import <ANTLR/ANTLRDebugTokenStream.h>
-#import <ANTLR/ANTLRDebugTreeAdaptor.h>
-#import <ANTLR/ANTLRDebugTreeNodeStream.h>
-#import <ANTLR/ANTLRDebugTreeParser.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRError.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRFastQueue.h>
-#import <ANTLR/ANTLRHashMap.h>
-#import <ANTLR/ANTLRHashRule.h>
-#import <ANTLR/ANTLRIntArray.h>
-#import <ANTLR/ANTLRIntStream.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRLexerRuleReturnScope.h>
-#import <ANTLR/ANTLRLinkBase.h>
-#import <ANTLR/ANTLRLookaheadStream.h>
-#import <ANTLR/ANTLRMapElement.h>
-#import <ANTLR/ANTLRMap.h>
-#import <ANTLR/ANTLRMismatchedNotSetException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRMissingTokenException.h>
-#import <ANTLR/ANTLRNodeMapElement.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRPtrBuffer.h>
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLRRecognizerSharedState.h>
-#import <ANTLR/ANTLRRewriteRuleElementStream.h>
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
-#import <ANTLR/ANTLRRuleMemo.h>
-#import <ANTLR/ANTLRRuleStack.h>
-#import <ANTLR/ANTLRRuleReturnScope.h>
-#import <ANTLR/ANTLRRuntimeException.h>
-#import <ANTLR/ANTLRStreamEnumerator.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRSymbolStack.h>
-#import <ANTLR/ANTLRToken+DebuggerSupport.h>
-#import <ANTLR/ANTLRToken.h>
-#import <ANTLR/ANTLRTokenRewriteStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRTokenStream.h>
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeException.h>
-#import <ANTLR/ANTLRTreeIterator.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-#import <ANTLR/ANTLRUnbufferedTokenStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-#import <ANTLR/ANTLRUniqueIDMap.h>
-#import <ANTLR/ANTLRUnwantedTokenException.h>
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/main.m b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/main.m
deleted file mode 100644
index edd9a23..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/main.m
+++ /dev/null
@@ -1,30 +0,0 @@
-#import <Cocoa/Cocoa.h>
-#import <antlr3.h>
-#import "SymbolTableLexer.h"
-#import "SymbolTableParser.h"
-
-int main() {
-	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
-	
-	NSString *string = [NSString stringWithContentsOfFile:@"/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/examples/scopes/input"];
-	NSLog(@"input is : %@", string);
-	ANTLRStringStream *stream = [[ANTLRStringStream alloc] initWithStringNoCopy:string];
-	SymbolTableLexer *lexer = [[SymbolTableLexer alloc] initWithCharStream:stream];
-	
-//	ANTLRCommonToken *currentToken;
-//	while ((currentToken = [lexer nextToken]) && [currentToken getType] != ANTLRTokenTypeEOF) {
-//		NSLog(@"%@", currentToken);
-//	}
-	
-	ANTLRCommonTokenStream *tokens = [[ANTLRCommonTokenStream alloc] initWithTokenSource:lexer];
-	SymbolTableParser *parser = [[SymbolTableParser alloc] initWithTokenStream:tokens];
-	[parser prog];
-
-	[lexer release];
-	[stream release];
-	[tokens release];
-	[parser release];
-	
-	[pool release];
-	return 0;
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTable.tokens b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTable.tokens
deleted file mode 100644
index 6740901..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTable.tokens
+++ /dev/null
@@ -1,19 +0,0 @@
-WS=6
-T__12=12
-T__11=11
-T__14=14
-T__13=13
-T__10=10
-INT=5
-ID=4
-T__9=9
-T__8=8
-T__7=7
-';'=13
-'}'=11
-'='=12
-'int'=14
-'('=8
-'method'=7
-')'=9
-'{'=10
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableLexer.h
deleted file mode 100644
index 47276a3..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableLexer.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// $ANTLR 3.2 Aug 19, 2010 17:16:04 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g 2010-08-19 17:16:47
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-#pragma mark Cyclic DFA interface start DFA4
-@interface DFA4 : ANTLRDFA {
-}
-+ newDFA4WithRecognizer:(ANTLRBaseRecognizer *)theRecognizer;
-- initWithRecognizer:(ANTLRBaseRecognizer *)recognizer;
-@end
-
-#pragma mark Cyclic DFA interface end DFA4
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#define WS 6
-#define T__12 12
-#define T__11 11
-#define T__14 14
-#define T__13 13
-#define T__10 10
-#define INT 5
-#define ID 4
-#define EOF -1
-#define T__9 9
-#define T__8 8
-#define T__7 7
-@interface SymbolTableLexer : ANTLRLexer { // line 283
-    DFA4 *dfa4;
-}
-+ (SymbolTableLexer *)newSymbolTableLexer:(id<ANTLRCharStream>)anInput;
-
-- (void) mT__7; 
-- (void) mT__8; 
-- (void) mT__9; 
-- (void) mT__10; 
-- (void) mT__11; 
-- (void) mT__12; 
-- (void) mT__13; 
-- (void) mT__14; 
-- (void) mID; 
-- (void) mINT; 
-- (void) mWS; 
-- (void) mTokens; 
-@end // end of SymbolTableLexer interface
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableLexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableLexer.m
deleted file mode 100644
index baf9292..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableLexer.m
+++ /dev/null
@@ -1,712 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version 3.2 Aug 19, 2010 17:16:04
- *
- *     -  From the grammar source file : /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g
- *     -                            On : 2010-08-19 17:16:47
- *     -                 for the lexer : SymbolTableLexerLexer *
- * Editing it, at least manually, is not wise. 
- *
- * C language generator and runtime by Jim Idle, jimi|hereisanat|idle|dotgoeshere|ws.
- *
- *
-*/
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// $ANTLR 3.2 Aug 19, 2010 17:16:04 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g 2010-08-19 17:16:47
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SymbolTableLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-#pragma mark Cyclic DFA implementation start DFA4
-@implementation DFA4
-const static NSInteger dfa4_eot[21] =
-    {-1,9,-1,-1,-1,-1,-1,-1,9,-1,-1,-1,9,9,9,17,9,-1,9,20,-1};
-const static NSInteger dfa4_eof[21] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static unichar dfa4_min[21] =
-    {10,101,0,0,0,0,0,0,110,0,0,0,116,116,104,97,111,0,100,97,0};
-const static unichar dfa4_max[21] =
-    {125,101,0,0,0,0,0,0,110,0,0,0,116,116,104,122,111,0,100,122,0};
-const static NSInteger dfa4_accept[21] =
-    {-1,-1,2,3,4,5,6,7,-1,9,10,11,-1,-1,-1,-1,-1,8,-1,-1,1};
-const static NSInteger dfa4_special[21] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static NSInteger dfa4_transition[] = {};
-const static NSInteger dfa4_transition0[] = {11, -1, -1, 11, -1, -1, -1, 
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 11, -1, -1, 
- -1, -1, -1, -1, -1, 2, 3, -1, -1, -1, -1, -1, -1, 10, 10, 10, 10, 10, 10, 
- 10, 10, 10, 10, -1, 7, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 
- -1, -1, -1, -1, -1, -1, 9, 9, 9, 9, 9, 9, 9, 9, 8, 9, 9, 9, 1, 9, 9, 9, 
- 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 4, -1, 5};
-const static NSInteger dfa4_transition1[] = {19};
-const static NSInteger dfa4_transition2[] = {16};
-const static NSInteger dfa4_transition3[] = {18};
-const static NSInteger dfa4_transition4[] = {13};
-const static NSInteger dfa4_transition5[] = {15};
-const static NSInteger dfa4_transition6[] = {12};
-const static NSInteger dfa4_transition7[] = {14};
-const static NSInteger dfa4_transition8[] = {9, 9, 9, 9, 9, 9, 9, 9, 9, 
- 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9};
-
-
-+ () newDFA4WithRecognizer:(ANTLRBaseRecognizer *)aRecognizer
-{
-    return [[[DFA4 alloc] initWithRecognizer:aRecognizer] retain];
-}
-
-- (id) initWithRecognizer:(ANTLRBaseRecognizer *) theRecognizer
-{
-    if ((self = [super initWithRecognizer:theRecognizer]) != nil) {
-        decisionNumber = 4;
-        eot = dfa4_eot;
-        eof = dfa4_eof;
-        min = dfa4_min;
-        max = dfa4_max;
-        accept = dfa4_accept;
-        special = dfa4_special;
-        if (!(transition = calloc(21, sizeof(void*)))) {
-            [self release];
-            return nil;
-        }
-        len = 21;
-        transition[0] = dfa4_transition0;
-        transition[1] = dfa4_transition6;
-        transition[2] = dfa4_transition;
-        transition[3] = dfa4_transition;
-        transition[4] = dfa4_transition;
-        transition[5] = dfa4_transition;
-        transition[6] = dfa4_transition;
-        transition[7] = dfa4_transition;
-        transition[8] = dfa4_transition4;
-        transition[9] = dfa4_transition;
-        transition[10] = dfa4_transition;
-        transition[11] = dfa4_transition;
-        transition[12] = dfa4_transition7;
-        transition[13] = dfa4_transition5;
-        transition[14] = dfa4_transition2;
-        transition[15] = dfa4_transition8;
-        transition[16] = dfa4_transition3;
-        transition[17] = dfa4_transition;
-        transition[18] = dfa4_transition1;
-        transition[19] = dfa4_transition8;
-        transition[20] = dfa4_transition;
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    free(transition);
-    [super dealloc];
-}
-
-- (NSString *) description
-{
-    return @"1:1: Tokens : ( T__7 | T__8 | T__9 | T__10 | T__11 | T__12 | T__13 | T__14 | ID | INT | WS );";
-}
-
-
-@end
-#pragma mark Cyclic DFA implementation end DFA4
-
-
-
-/** As per Terence: No returns for lexer rules!
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-*/
-@implementation SymbolTableLexer // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"/usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (SymbolTableLexer *)newSymbolTableLexer:(id<ANTLRCharStream>)anInput
-{
-    return [[SymbolTableLexer alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    if ((self = [super initWithCharStream:anInput State:[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:12+1]]) != nil) {
-
-        dfa4 = [DFA4 newDFA4WithRecognizer:self];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [dfa4 release];
-    [super dealloc];
-}
-
-// Start of actions.lexer.methods
-// start methods()
-
-// Start of Rules
-// $ANTLR start "T__7"
-- (void) mT__7
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = T__7;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:7:6: ( 'method' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:7:8: 'method' // alt
-        {
-        [self matchString:@"method"]; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__7"
-
-// $ANTLR start "T__8"
-- (void) mT__8
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = T__8;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:8:6: ( '(' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:8:8: '(' // alt
-        {
-        [self matchChar:'(']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__8"
-
-// $ANTLR start "T__9"
-- (void) mT__9
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = T__9;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:9:6: ( ')' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:9:8: ')' // alt
-        {
-        [self matchChar:')']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__9"
-
-// $ANTLR start "T__10"
-- (void) mT__10
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = T__10;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:10:7: ( '{' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:10:9: '{' // alt
-        {
-        [self matchChar:'{']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__10"
-
-// $ANTLR start "T__11"
-- (void) mT__11
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = T__11;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:11:7: ( '}' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:11:9: '}' // alt
-        {
-        [self matchChar:'}']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__11"
-
-// $ANTLR start "T__12"
-- (void) mT__12
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = T__12;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:12:7: ( '=' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:12:9: '=' // alt
-        {
-        [self matchChar:'=']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__12"
-
-// $ANTLR start "T__13"
-- (void) mT__13
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = T__13;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:13:7: ( ';' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:13:9: ';' // alt
-        {
-        [self matchChar:';']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__13"
-
-// $ANTLR start "T__14"
-- (void) mT__14
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = T__14;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:14:7: ( 'int' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:14:9: 'int' // alt
-        {
-        [self matchString:@"int"]; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__14"
-
-// $ANTLR start "ID"
-- (void) mID
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = ID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:66:5: ( ( 'a' .. 'z' )+ ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:66:9: ( 'a' .. 'z' )+ // alt
-        {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:66:9: ( 'a' .. 'z' )+ // positiveClosureBlock
-        NSInteger cnt1=0;
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0>='a' && LA1_0<='z')) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:66:10: 'a' .. 'z' // alt
-                    {
-                    [self matchRangeFromChar:'a' to:'z'];   /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt1 >= 1 )
-                        goto loop1;
-                    ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:1];
-                    @throw eee;
-            }
-            cnt1++;
-        } while (YES);
-        loop1: ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "ID"
-
-// $ANTLR start "INT"
-- (void) mINT
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = INT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:69:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:69:9: ( '0' .. '9' )+ // alt
-        {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:69:9: ( '0' .. '9' )+ // positiveClosureBlock
-        NSInteger cnt2=0;
-        do {
-            NSInteger alt2=2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( ((LA2_0>='0' && LA2_0<='9')) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:69:10: '0' .. '9' // alt
-                    {
-                    [self matchRangeFromChar:'0' to:'9'];   /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt2 >= 1 )
-                        goto loop2;
-                    ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:2];
-                    @throw eee;
-            }
-            cnt2++;
-        } while (YES);
-        loop2: ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "INT"
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:72:5: ( ( ' ' | '\\n' | '\\r' )+ ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:72:9: ( ' ' | '\\n' | '\\r' )+ // alt
-        {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:72:9: ( ' ' | '\\n' | '\\r' )+ // positiveClosureBlock
-        NSInteger cnt3=0;
-        do {
-            NSInteger alt3=2;
-            NSInteger LA3_0 = [input LA:1];
-            if ( (LA3_0=='\n'||LA3_0=='\r'||LA3_0==' ') ) {
-                alt3=1;
-            }
-
-
-            switch (alt3) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g: // alt
-                    {
-                    if ([input LA:1] == '\n'||[input LA:1] == '\r'||[input LA:1] == ' ') {
-                        [input consume];
-
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;}
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt3 >= 1 )
-                        goto loop3;
-                    ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:3];
-                    @throw eee;
-            }
-            cnt3++;
-        } while (YES);
-        loop3: ;
-          /* element() */
-        state.channel=99;  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "WS"
-
-- (void) mTokens
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:8: ( T__7 | T__8 | T__9 | T__10 | T__11 | T__12 | T__13 | T__14 | ID | INT | WS ) //ruleblock
-    NSInteger alt4=11;
-    alt4 = [dfa4 predict:input];
-    switch (alt4) {
-        case 1 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:10: T__7 // alt
-            {
-                [self mT__7]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 2 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:15: T__8 // alt
-            {
-                [self mT__8]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 3 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:20: T__9 // alt
-            {
-                [self mT__9]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 4 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:25: T__10 // alt
-            {
-                [self mT__10]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 5 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:31: T__11 // alt
-            {
-                [self mT__11]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 6 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:37: T__12 // alt
-            {
-                [self mT__12]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 7 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:43: T__13 // alt
-            {
-                [self mT__13]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 8 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:49: T__14 // alt
-            {
-                [self mT__14]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 9 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:55: ID // alt
-            {
-                [self mID]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 10 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:58: INT // alt
-            {
-                [self mINT]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 11 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:1:62: WS // alt
-            {
-                [self mWS]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-
-    }
-
-}
-
-@end // end of SymbolTableLexer implementation // line 397
-
-/* End of code
- * =============================================================================
- */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableParser.h b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableParser.h
deleted file mode 100644
index 6f22956..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableParser.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// $ANTLR 3.2 Aug 19, 2010 17:16:04 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g 2010-08-19 17:16:47
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-#pragma mark Tokens
-#define WS 6
-#define T__12 12
-#define T__11 11
-#define T__14 14
-#define T__13 13
-#define T__10 10
-#define INT 5
-#define ID 4
-#define EOF -1
-#define T__9 9
-#define T__8 8
-#define T__7 7
-#pragma mark Dynamic Global Scopes
-@interface Symbols_Scope : ANTLRSymbolsScope {  /* globalAttributeScopeDecl */
-ANTLRHashMap * names;
-}
-/* start of properties */
-
-@property (retain, getter=getnames, setter=setnames:) ANTLRHashMap * names;
-
-/* end properties */
-
-+ (Symbols_Scope *)newSymbols_Scope;
-/* start of iterated get and set functions */
-
-- (ANTLRHashMap *)getnames;
-- (void)setnames:(ANTLRHashMap *)aVal;
-
-/* End of iterated get and set functions */
-
-@end
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-#pragma mark Rule return scopes end
-@interface SymbolTableParser : ANTLRParser { // line 529
-// start of globalAttributeScopeMemVar
-/* globalAttributeScopeMemVar */
-ANTLRSymbolStack *gStack;
-Symbols_Scope *Symbols_scope;
-
-// start of action-actionScope-memVars
-
-int level;
-
-// start of ruleAttributeScopeMemVar
-
-
-// Start of memVars
-
- }
-
-// start of action-actionScope-methodsDecl
-
-
-- (void)prog; 
-- (void)globals; 
-- (void)method; 
-- (void)block; 
-- (void)stat; 
-- (void)decl; 
-
-
-@end // end of SymbolTableParser
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableParser.m b/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableParser.m
deleted file mode 100644
index 100592a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output1/SymbolTableParser.m
+++ /dev/null
@@ -1,579 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version 3.2 Aug 19, 2010 17:16:04
- *
- *     -  From the grammar source file : /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g
- *     -                            On : 2010-08-19 17:16:47
- *     -                for the parser : SymbolTableParserParser *
- * Editing it, at least manually, is not wise. 
- *
- * C language generator and runtime by Jim Idle, jimi|hereisanat|idle|dotgoeshere|ws.
- *
- *
-*/
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// $ANTLR 3.2 Aug 19, 2010 17:16:04 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g 2010-08-19 17:16:47
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SymbolTableParser.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_globals_in_prog44;
-
-const unsigned long long FOLLOW_globals_in_prog44_data[] = { 0x0000000000000082LL};
-
-static ANTLRBitSet *FOLLOW_method_in_prog47;
-
-const unsigned long long FOLLOW_method_in_prog47_data[] = { 0x0000000000000082LL};
-
-static ANTLRBitSet *FOLLOW_decl_in_globals79;
-
-const unsigned long long FOLLOW_decl_in_globals79_data[] = { 0x0000000000004002LL};
-
-static ANTLRBitSet *FOLLOW_7_in_method110;
-
-const unsigned long long FOLLOW_7_in_method110_data[] = { 0x0000000000000010LL};
-
-static ANTLRBitSet *FOLLOW_ID_in_method112;
-
-const unsigned long long FOLLOW_ID_in_method112_data[] = { 0x0000000000000100LL};
-
-static ANTLRBitSet *FOLLOW_8_in_method114;
-
-const unsigned long long FOLLOW_8_in_method114_data[] = { 0x0000000000000200LL};
-
-static ANTLRBitSet *FOLLOW_9_in_method116;
-
-const unsigned long long FOLLOW_9_in_method116_data[] = { 0x0000000000000400LL};
-
-static ANTLRBitSet *FOLLOW_block_in_method118;
-
-const unsigned long long FOLLOW_block_in_method118_data[] = { 0x0000000000000002LL};
-
-static ANTLRBitSet *FOLLOW_10_in_block147;
-
-const unsigned long long FOLLOW_10_in_block147_data[] = { 0x0000000000004C10LL};
-
-static ANTLRBitSet *FOLLOW_decl_in_block150;
-
-const unsigned long long FOLLOW_decl_in_block150_data[] = { 0x0000000000004C10LL};
-
-static ANTLRBitSet *FOLLOW_stat_in_block155;
-
-const unsigned long long FOLLOW_stat_in_block155_data[] = { 0x0000000000000C10LL};
-
-static ANTLRBitSet *FOLLOW_11_in_block159;
-
-const unsigned long long FOLLOW_11_in_block159_data[] = { 0x0000000000000002LL};
-
-static ANTLRBitSet *FOLLOW_ID_in_stat183;
-
-const unsigned long long FOLLOW_ID_in_stat183_data[] = { 0x0000000000001000LL};
-
-static ANTLRBitSet *FOLLOW_12_in_stat185;
-
-const unsigned long long FOLLOW_12_in_stat185_data[] = { 0x0000000000000020LL};
-
-static ANTLRBitSet *FOLLOW_INT_in_stat187;
-
-const unsigned long long FOLLOW_INT_in_stat187_data[] = { 0x0000000000002000LL};
-
-static ANTLRBitSet *FOLLOW_13_in_stat189;
-
-const unsigned long long FOLLOW_13_in_stat189_data[] = { 0x0000000000000002LL};
-
-static ANTLRBitSet *FOLLOW_block_in_stat199;
-
-const unsigned long long FOLLOW_block_in_stat199_data[] = { 0x0000000000000002LL};
-
-static ANTLRBitSet *FOLLOW_14_in_decl213;
-
-const unsigned long long FOLLOW_14_in_decl213_data[] = { 0x0000000000000010LL};
-
-static ANTLRBitSet *FOLLOW_ID_in_decl215;
-
-const unsigned long long FOLLOW_ID_in_decl215_data[] = { 0x0000000000002000LL};
-
-static ANTLRBitSet *FOLLOW_13_in_decl217;
-
-const unsigned long long FOLLOW_13_in_decl217_data[] = { 0x0000000000000002LL};
-
-
-
-#pragma mark Dynamic Global Scopes
-@implementation Symbols_Scope  /* globalAttributeScopeImpl */
-/* start of synthesize -- OBJC-Line 1750 */
-
-@synthesize names;
-+ (Symbols_Scope *)newSymbols_Scope
-{
-    return [[[Symbols_Scope alloc] init] retain];
-}
-/* start of iterate get and set functions */
-
-- (ANTLRHashMap *)getnames { return( names ); }
-
-- (void)setnames:(ANTLRHashMap *)aVal { names = aVal; }
-
-
-
-/* End of iterate get and set functions */
-
-@end
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule return scopes start
-
-@implementation SymbolTableParser  // line 610
-
-+ (void) initialize
-{
-    FOLLOW_globals_in_prog44 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_globals_in_prog44_data Count:(NSUInteger)1] retain];
-    FOLLOW_method_in_prog47 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_method_in_prog47_data Count:(NSUInteger)1] retain];
-    FOLLOW_decl_in_globals79 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_decl_in_globals79_data Count:(NSUInteger)1] retain];
-    FOLLOW_7_in_method110 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_method110_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_method112 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_method112_data Count:(NSUInteger)1] retain];
-    FOLLOW_8_in_method114 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_method114_data Count:(NSUInteger)1] retain];
-    FOLLOW_9_in_method116 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_method116_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_method118 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_method118_data Count:(NSUInteger)1] retain];
-    FOLLOW_10_in_block147 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_10_in_block147_data Count:(NSUInteger)1] retain];
-    FOLLOW_decl_in_block150 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_decl_in_block150_data Count:(NSUInteger)1] retain];
-    FOLLOW_stat_in_block155 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block155_data Count:(NSUInteger)1] retain];
-    FOLLOW_11_in_block159 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_block159_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_stat183 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_stat183_data Count:(NSUInteger)1] retain];
-    FOLLOW_12_in_stat185 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_12_in_stat185_data Count:(NSUInteger)1] retain];
-    FOLLOW_INT_in_stat187 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_stat187_data Count:(NSUInteger)1] retain];
-    FOLLOW_13_in_stat189 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_13_in_stat189_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_stat199 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat199_data Count:(NSUInteger)1] retain];
-    FOLLOW_14_in_decl213 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_14_in_decl213_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_decl215 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_decl215_data Count:(NSUInteger)1] retain];
-    FOLLOW_13_in_decl217 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_13_in_decl217_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[[NSArray alloc] initWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"ID", @"INT", @"WS", @"'method'", @"'('", @"')'", @"'{'", @"'}'", @"'='", 
- @"';'", @"'int'", nil] retain]];
-}
-
-+ (SymbolTableParser *)newSymbolTableParser:(id<ANTLRTokenStream>)aStream
-{
-    return [[SymbolTableParser alloc] initWithTokenStream:aStream];
-
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)aStream
-{
-    if ((self = [super initWithTokenStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:6+1] retain]]) != nil) {
-
-
-        (Symbols_Scope *)Symbols_scope = [Symbols_Scope newSymbols_Scope];
-                                                        
-        // start of actions-actionScope-init
-
-        level = 0;
-
-        // start of init
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [Symbols_scope release];
-    [super dealloc];
-}
-// start actions.actionScope.methods
-// start methods()
-// start rules
-/*
- * $ANTLR start prog
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:25:1: prog : globals ( method )* ;
- */
-- (void) prog
-{
-    // ruleScopeSetUp
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:25:5: ( globals ( method )* ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:25:9: globals ( method )* // alt
-        {
-        [self pushFollow:FOLLOW_globals_in_prog44];
-        [self globals];
-        [self popFollow];
-
-          /* element() */
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( (LA1_0==7) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:25:18: method // alt
-                    {
-                    [self pushFollow:FOLLOW_method_in_prog47];
-                    [self method];
-                    [self popFollow];
-
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop1;
-            }
-        } while (YES);
-        loop1: ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end prog */
-/*
- * $ANTLR start globals
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:28:1: globals : ( decl )* ;
- */
-- (void) globals
-{
-    // ruleScopeSetUp
-    gStack = [ANTLRSymbolStack newANTLRSymbolStackWithLen:30];
-    Symbols_scope = [[Symbols_Scope newSymbols_Scope] retain];
-    [gStack push:Symbols_scope];
-
-
-        level++;
-        /* scopeSetAttributeRef */
-    ((Symbols_Scope *)[gStack peek]).names =  [ANTLRHashMap newANTLRHashMapWithLen:101];
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:34:5: ( ( decl )* ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:34:9: ( decl )* // alt
-        {
-        do {
-            NSInteger alt2=2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( (LA2_0==14) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:34:10: decl // alt
-                    {
-                    [self pushFollow:FOLLOW_decl_in_globals79];
-                    [self decl];
-                    [self popFollow];
-
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop2;
-            }
-        } while (YES);
-        loop2: ;
-          /* element() */
-
-                NSLog(@"globals: %@", /* scopeAttributeRef */
-        ((Symbols_Scope *)[gStack peek]).names);
-                level--;
-                  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-        [gStack pop];
-
-    }
-    return ;
-}
-/* $ANTLR end globals */
-/*
- * $ANTLR start method
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:41:1: method : 'method' ID '(' ')' block ;
- */
-- (void) method
-{
-    // ruleScopeSetUp
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:42:5: ( 'method' ID '(' ')' block ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:42:9: 'method' ID '(' ')' block // alt
-        {
-        [self match:input TokenType:7 Follow:FOLLOW_7_in_method110];   /* element() */
-        [self match:input TokenType:ID Follow:FOLLOW_ID_in_method112];   /* element() */
-        [self match:input TokenType:8 Follow:FOLLOW_8_in_method114];   /* element() */
-        [self match:input TokenType:9 Follow:FOLLOW_9_in_method116];   /* element() */
-        [self pushFollow:FOLLOW_block_in_method118];
-        [self block];
-        [self popFollow];
-
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end method */
-/*
- * $ANTLR start block
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:45:1: block : '{' ( decl )* ( stat )* '}' ;
- */
-- (void) block
-{
-    // ruleScopeSetUp
-    gStack = [ANTLRSymbolStack newANTLRSymbolStackWithLen:30];
-    Symbols_scope = [[Symbols_Scope newSymbols_Scope] retain];
-    [gStack push:Symbols_scope];
-
-
-        level++;
-        /* scopeSetAttributeRef */
-    ((Symbols_Scope *)[gStack peek]).names =  [ANTLRHashMap newANTLRHashMapWithLen:101];
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:51:5: ( '{' ( decl )* ( stat )* '}' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:51:9: '{' ( decl )* ( stat )* '}' // alt
-        {
-        [self match:input TokenType:10 Follow:FOLLOW_10_in_block147];   /* element() */
-        do {
-            NSInteger alt3=2;
-            NSInteger LA3_0 = [input LA:1];
-            if ( (LA3_0==14) ) {
-                alt3=1;
-            }
-
-
-            switch (alt3) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:51:14: decl // alt
-                    {
-                    [self pushFollow:FOLLOW_decl_in_block150];
-                    [self decl];
-                    [self popFollow];
-
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop3;
-            }
-        } while (YES);
-        loop3: ;
-          /* element() */
-        do {
-            NSInteger alt4=2;
-            NSInteger LA4_0 = [input LA:1];
-            if ( (LA4_0==ID||LA4_0==10) ) {
-                alt4=1;
-            }
-
-
-            switch (alt4) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:51:22: stat // alt
-                    {
-                    [self pushFollow:FOLLOW_stat_in_block155];
-                    [self stat];
-                    [self popFollow];
-
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop4;
-            }
-        } while (YES);
-        loop4: ;
-          /* element() */
-        [self match:input TokenType:11 Follow:FOLLOW_11_in_block159];   /* element() */
-
-                NSLog(@"level %d symbols: %@", level, /* scopeAttributeRef */
-        ((Symbols_Scope *)[gStack peek]).names);
-                level--;
-                  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-        [gStack pop];
-
-    }
-    return ;
-}
-/* $ANTLR end block */
-/*
- * $ANTLR start stat
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:58:1: stat : ( ID '=' INT ';' | block );
- */
-- (void) stat
-{
-    // ruleScopeSetUp
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:58:5: ( ID '=' INT ';' | block ) //ruleblock
-        NSInteger alt5=2;
-        NSInteger LA5_0 = [input LA:1];
-
-        if ( (LA5_0==ID) ) {
-            alt5=1;
-        }
-        else if ( (LA5_0==10) ) {
-            alt5=2;
-        }
-        else {
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:5 state:0 stream:input];
-            @throw nvae;
-        }
-        switch (alt5) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:58:9: ID '=' INT ';' // alt
-                {
-                [self match:input TokenType:ID Follow:FOLLOW_ID_in_stat183];   /* element() */
-                [self match:input TokenType:12 Follow:FOLLOW_12_in_stat185];   /* element() */
-                [self match:input TokenType:INT Follow:FOLLOW_INT_in_stat187];   /* element() */
-                [self match:input TokenType:13 Follow:FOLLOW_13_in_stat189];   /* element() */
-                 /* elements */
-                }
-                break;
-            case 2 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:59:9: block // alt
-                {
-                [self pushFollow:FOLLOW_block_in_stat199];
-                [self block];
-                [self popFollow];
-
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end stat */
-/*
- * $ANTLR start decl
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:62:1: decl : 'int' ID ';' ;
- */
-- (void) decl
-{
-    // ruleScopeSetUp
-
-    @try {
-        id<ANTLRToken> ID1 = nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:62:5: ( 'int' ID ';' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/scopes/SymbolTable.g:62:9: 'int' ID ';' // alt
-        {
-        [self match:input TokenType:14 Follow:FOLLOW_14_in_decl213];   /* element() */
-        ID1=(id<ANTLRToken>)[self match:input TokenType:ID Follow:FOLLOW_ID_in_decl215];   /* element() */
-        [self match:input TokenType:13 Follow:FOLLOW_13_in_decl217];   /* element() */
-        [/* scopeAttributeRef */
-        ((Symbols_Scope *)[gStack peek]).names addObject:ID1];  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end decl */
-
-@end // end of SymbolTableParser implementation line 669
-
-/* End of code
- * =============================================================================
- */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.h
deleted file mode 100644
index 6c33456..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} SimpleC.g 2011-05-06 15:09:17
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define ARG_DEF 4
-#define BLOCK 5
-#define FUNC_DECL 6
-#define FUNC_DEF 7
-#define FUNC_HDR 8
-#define K_CHAR 9
-#define K_COMMA 10
-#define K_EQ 11
-#define K_EQEQ 12
-#define K_FOR 13
-#define K_ID 14
-#define K_INT 15
-#define K_INT_TYPE 16
-#define K_LCURLY 17
-#define K_LCURVE 18
-#define K_LT 19
-#define K_PLUS 20
-#define K_RCURLY 21
-#define K_RCURVE 22
-#define K_SEMICOLON 23
-#define K_VOID 24
-#define VAR_DEF 25
-#define WS 26
-/* interface lexer class */
-@interface SimpleCLexer : ANTLRLexer { // line 283
-/* ObjC start of actions.lexer.memVars */
-/* ObjC end of actions.lexer.memVars */
-}
-+ (void) initialize;
-+ (SimpleCLexer *)newSimpleCLexerWithCharStream:(id<ANTLRCharStream>)anInput;
-/* ObjC start actions.lexer.methodsDecl */
-/* ObjC end actions.lexer.methodsDecl */
-- (void) mK_FOR ; 
-- (void) mK_CHAR ; 
-- (void) mK_INT_TYPE ; 
-- (void) mK_VOID ; 
-- (void) mK_ID ; 
-- (void) mK_INT ; 
-- (void) mK_LCURVE ; 
-- (void) mK_RCURVE ; 
-- (void) mK_PLUS ; 
-- (void) mK_COMMA ; 
-- (void) mK_SEMICOLON ; 
-- (void) mK_LT ; 
-- (void) mK_EQ ; 
-- (void) mK_EQEQ ; 
-- (void) mK_LCURLY ; 
-- (void) mK_RCURLY ; 
-- (void) mWS ; 
-- (void) mTokens ; 
-
-@end /* end of SimpleCLexer interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.java b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.java
deleted file mode 100644
index 58cff4e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.java
+++ /dev/null
@@ -1,730 +0,0 @@
-// $ANTLR 3.2 Aug 13, 2010 14:19:31 SimpleC.g 2010-08-13 14:29:19
-
-import org.antlr.runtime.*;
-import java.util.Stack;
-import java.util.List;
-import java.util.ArrayList;
-
-public class SimpleCLexer extends Lexer {
-    public static final int LT=18;
-    public static final int T__26=26;
-    public static final int T__25=25;
-    public static final int T__24=24;
-    public static final int T__23=23;
-    public static final int T__22=22;
-    public static final int T__21=21;
-    public static final int CHAR=15;
-    public static final int FOR=13;
-    public static final int FUNC_HDR=6;
-    public static final int INT=12;
-    public static final int FUNC_DEF=8;
-    public static final int INT_TYPE=14;
-    public static final int ID=10;
-    public static final int EOF=-1;
-    public static final int FUNC_DECL=7;
-    public static final int ARG_DEF=5;
-    public static final int WS=20;
-    public static final int BLOCK=9;
-    public static final int PLUS=19;
-    public static final int VOID=16;
-    public static final int EQ=11;
-    public static final int VAR_DEF=4;
-    public static final int EQEQ=17;
-
-    // delegates
-    // delegators
-
-    public SimpleCLexer() {;} 
-    public SimpleCLexer(CharStream input) {
-        this(input, new RecognizerSharedState());
-    }
-    public SimpleCLexer(CharStream input, RecognizerSharedState state) {
-        super(input,state);
-
-    }
-    public String getGrammarFileName() { return "SimpleC.g"; }
-
-    // $ANTLR start "T__21"
-    public final void mT__21() throws RecognitionException {
-        try {
-            int _type = T__21;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:7:7: ( ';' )
-            // SimpleC.g:7:9: ';'
-            {
-            match(';'); 
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "T__21"
-
-    // $ANTLR start "T__22"
-    public final void mT__22() throws RecognitionException {
-        try {
-            int _type = T__22;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:8:7: ( '(' )
-            // SimpleC.g:8:9: '('
-            {
-            match('('); 
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "T__22"
-
-    // $ANTLR start "T__23"
-    public final void mT__23() throws RecognitionException {
-        try {
-            int _type = T__23;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:9:7: ( ',' )
-            // SimpleC.g:9:9: ','
-            {
-            match(','); 
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "T__23"
-
-    // $ANTLR start "T__24"
-    public final void mT__24() throws RecognitionException {
-        try {
-            int _type = T__24;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:10:7: ( ')' )
-            // SimpleC.g:10:9: ')'
-            {
-            match(')'); 
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "T__24"
-
-    // $ANTLR start "T__25"
-    public final void mT__25() throws RecognitionException {
-        try {
-            int _type = T__25;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:11:7: ( '{' )
-            // SimpleC.g:11:9: '{'
-            {
-            match('{'); 
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "T__25"
-
-    // $ANTLR start "T__26"
-    public final void mT__26() throws RecognitionException {
-        try {
-            int _type = T__26;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:12:7: ( '}' )
-            // SimpleC.g:12:9: '}'
-            {
-            match('}'); 
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "T__26"
-
-    // $ANTLR start "FOR"
-    public final void mFOR() throws RecognitionException {
-        try {
-            int _type = FOR;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:91:5: ( 'for' )
-            // SimpleC.g:91:7: 'for'
-            {
-            match("for"); 
-
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "FOR"
-
-    // $ANTLR start "INT_TYPE"
-    public final void mINT_TYPE() throws RecognitionException {
-        try {
-            int _type = INT_TYPE;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:92:10: ( 'int' )
-            // SimpleC.g:92:12: 'int'
-            {
-            match("int"); 
-
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "INT_TYPE"
-
-    // $ANTLR start "CHAR"
-    public final void mCHAR() throws RecognitionException {
-        try {
-            int _type = CHAR;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:93:5: ( 'char' )
-            // SimpleC.g:93:7: 'char'
-            {
-            match("char"); 
-
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "CHAR"
-
-    // $ANTLR start "VOID"
-    public final void mVOID() throws RecognitionException {
-        try {
-            int _type = VOID;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:94:5: ( 'void' )
-            // SimpleC.g:94:7: 'void'
-            {
-            match("void"); 
-
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "VOID"
-
-    // $ANTLR start "ID"
-    public final void mID() throws RecognitionException {
-        try {
-            int _type = ID;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:96:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* )
-            // SimpleC.g:96:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )*
-            {
-            if ( (input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) {
-                input.consume();
-
-            }
-            else {
-                MismatchedSetException mse = new MismatchedSetException(null,input);
-                recover(mse);
-                throw mse;}
-
-            // SimpleC.g:96:33: ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )*
-            loop1:
-            do {
-                int alt1=2;
-                int LA1_0 = input.LA(1);
-
-                if ( ((LA1_0>='0' && LA1_0<='9')||(LA1_0>='A' && LA1_0<='Z')||LA1_0=='_'||(LA1_0>='a' && LA1_0<='z')) ) {
-                    alt1=1;
-                }
-
-
-                switch (alt1) {
-            	case 1 :
-            	    // SimpleC.g:
-            	    {
-            	    if ( (input.LA(1)>='0' && input.LA(1)<='9')||(input.LA(1)>='A' && input.LA(1)<='Z')||input.LA(1)=='_'||(input.LA(1)>='a' && input.LA(1)<='z') ) {
-            	        input.consume();
-
-            	    }
-            	    else {
-            	        MismatchedSetException mse = new MismatchedSetException(null,input);
-            	        recover(mse);
-            	        throw mse;}
-
-
-            	    }
-            	    break;
-
-            	default :
-            	    break loop1;
-                }
-            } while (true);
-
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "ID"
-
-    // $ANTLR start "INT"
-    public final void mINT() throws RecognitionException {
-        try {
-            int _type = INT;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            CommonToken int=null;
-            List list_int=null;
-            // SimpleC.g:99:5: ( (int+= ( '0' .. '9' ) )+ )
-            // SimpleC.g:99:7: (int+= ( '0' .. '9' ) )+
-            {
-            // SimpleC.g:99:10: (int+= ( '0' .. '9' ) )+
-            int cnt2=0;
-            loop2:
-            do {
-                int alt2=2;
-                int LA2_0 = input.LA(1);
-
-                if ( ((LA2_0>='0' && LA2_0<='9')) ) {
-                    alt2=1;
-                }
-
-
-                switch (alt2) {
-            	case 1 :
-            	    // SimpleC.g:99:10: int+= ( '0' .. '9' )
-            	    {
-            	    // SimpleC.g:99:12: ( '0' .. '9' )
-            	    // SimpleC.g:99:13: '0' .. '9'
-            	    {
-            	    matchRange('0','9'); 
-
-            	    }
-
-
-            	    }
-            	    break;
-
-            	default :
-            	    if ( cnt2 >= 1 ) break loop2;
-                        EarlyExitException eee =
-                            new EarlyExitException(2, input);
-                        throw eee;
-                }
-                cnt2++;
-            } while (true);
-
-            NSLog(@"%@", list_int);
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "INT"
-
-    // $ANTLR start "EQ"
-    public final void mEQ() throws RecognitionException {
-        try {
-            int _type = EQ;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:102:6: ( '=' )
-            // SimpleC.g:102:8: '='
-            {
-            match('='); 
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "EQ"
-
-    // $ANTLR start "EQEQ"
-    public final void mEQEQ() throws RecognitionException {
-        try {
-            int _type = EQEQ;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:103:6: ( '==' )
-            // SimpleC.g:103:8: '=='
-            {
-            match("=="); 
-
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "EQEQ"
-
-    // $ANTLR start "LT"
-    public final void mLT() throws RecognitionException {
-        try {
-            int _type = LT;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:104:6: ( '<' )
-            // SimpleC.g:104:8: '<'
-            {
-            match('<'); 
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "LT"
-
-    // $ANTLR start "PLUS"
-    public final void mPLUS() throws RecognitionException {
-        try {
-            int _type = PLUS;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:105:6: ( '+' )
-            // SimpleC.g:105:8: '+'
-            {
-            match('+'); 
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "PLUS"
-
-    // $ANTLR start "WS"
-    public final void mWS() throws RecognitionException {
-        try {
-            int _type = WS;
-            int _channel = DEFAULT_TOKEN_CHANNEL;
-            // SimpleC.g:107:5: ( ( ' ' | '\\t' | '\\r' | '\\n' )+ )
-            // SimpleC.g:107:9: ( ' ' | '\\t' | '\\r' | '\\n' )+
-            {
-            // SimpleC.g:107:9: ( ' ' | '\\t' | '\\r' | '\\n' )+
-            int cnt3=0;
-            loop3:
-            do {
-                int alt3=2;
-                int LA3_0 = input.LA(1);
-
-                if ( ((LA3_0>='\t' && LA3_0<='\n')||LA3_0=='\r'||LA3_0==' ') ) {
-                    alt3=1;
-                }
-
-
-                switch (alt3) {
-            	case 1 :
-            	    // SimpleC.g:
-            	    {
-            	    if ( (input.LA(1)>='\t' && input.LA(1)<='\n')||input.LA(1)=='\r'||input.LA(1)==' ' ) {
-            	        input.consume();
-
-            	    }
-            	    else {
-            	        MismatchedSetException mse = new MismatchedSetException(null,input);
-            	        recover(mse);
-            	        throw mse;}
-
-
-            	    }
-            	    break;
-
-            	default :
-            	    if ( cnt3 >= 1 ) break loop3;
-                        EarlyExitException eee =
-                            new EarlyExitException(3, input);
-                        throw eee;
-                }
-                cnt3++;
-            } while (true);
-
-             _channel=99; 
-
-            }
-
-            state.type = _type;
-            state.channel = _channel;
-        }
-        finally {
-        }
-    }
-    // $ANTLR end "WS"
-
-    public void mTokens() throws RecognitionException {
-        // SimpleC.g:1:8: ( T__21 | T__22 | T__23 | T__24 | T__25 | T__26 | FOR | INT_TYPE | CHAR | VOID | ID | INT | EQ | EQEQ | LT | PLUS | WS )
-        int alt4=17;
-        alt4 = dfa4.predict(input);
-        switch (alt4) {
-            case 1 :
-                // SimpleC.g:1:10: T__21
-                {
-                mT__21(); 
-
-                }
-                break;
-            case 2 :
-                // SimpleC.g:1:16: T__22
-                {
-                mT__22(); 
-
-                }
-                break;
-            case 3 :
-                // SimpleC.g:1:22: T__23
-                {
-                mT__23(); 
-
-                }
-                break;
-            case 4 :
-                // SimpleC.g:1:28: T__24
-                {
-                mT__24(); 
-
-                }
-                break;
-            case 5 :
-                // SimpleC.g:1:34: T__25
-                {
-                mT__25(); 
-
-                }
-                break;
-            case 6 :
-                // SimpleC.g:1:40: T__26
-                {
-                mT__26(); 
-
-                }
-                break;
-            case 7 :
-                // SimpleC.g:1:46: FOR
-                {
-                mFOR(); 
-
-                }
-                break;
-            case 8 :
-                // SimpleC.g:1:50: INT_TYPE
-                {
-                mINT_TYPE(); 
-
-                }
-                break;
-            case 9 :
-                // SimpleC.g:1:59: CHAR
-                {
-                mCHAR(); 
-
-                }
-                break;
-            case 10 :
-                // SimpleC.g:1:64: VOID
-                {
-                mVOID(); 
-
-                }
-                break;
-            case 11 :
-                // SimpleC.g:1:69: ID
-                {
-                mID(); 
-
-                }
-                break;
-            case 12 :
-                // SimpleC.g:1:72: INT
-                {
-                mINT(); 
-
-                }
-                break;
-            case 13 :
-                // SimpleC.g:1:76: EQ
-                {
-                mEQ(); 
-
-                }
-                break;
-            case 14 :
-                // SimpleC.g:1:79: EQEQ
-                {
-                mEQEQ(); 
-
-                }
-                break;
-            case 15 :
-                // SimpleC.g:1:84: LT
-                {
-                mLT(); 
-
-                }
-                break;
-            case 16 :
-                // SimpleC.g:1:87: PLUS
-                {
-                mPLUS(); 
-
-                }
-                break;
-            case 17 :
-                // SimpleC.g:1:92: WS
-                {
-                mWS(); 
-
-                }
-                break;
-
-        }
-
-    }
-
-
-    protected DFA4 dfa4 = new DFA4(this);
-    static final String DFA4_eotS =
-        "\7\uffff\4\13\2\uffff\1\26\3\uffff\4\13\2\uffff\1\33\1\34\2\13\2"+
-        "\uffff\1\37\1\40\2\uffff";
-    static final String DFA4_eofS =
-        "\41\uffff";
-    static final String DFA4_minS =
-        "\1\11\6\uffff\1\157\1\156\1\150\1\157\2\uffff\1\75\3\uffff\1\162"+
-        "\1\164\1\141\1\151\2\uffff\2\60\1\162\1\144\2\uffff\2\60\2\uffff";
-    static final String DFA4_maxS =
-        "\1\175\6\uffff\1\157\1\156\1\150\1\157\2\uffff\1\75\3\uffff\1\162"+
-        "\1\164\1\141\1\151\2\uffff\2\172\1\162\1\144\2\uffff\2\172\2\uffff";
-    static final String DFA4_acceptS =
-        "\1\uffff\1\1\1\2\1\3\1\4\1\5\1\6\4\uffff\1\13\1\14\1\uffff\1\17"+
-        "\1\20\1\21\4\uffff\1\16\1\15\4\uffff\1\7\1\10\2\uffff\1\11\1\12";
-    static final String DFA4_specialS =
-        "\41\uffff}>";
-    static final String[] DFA4_transitionS = {
-            "\2\20\2\uffff\1\20\22\uffff\1\20\7\uffff\1\2\1\4\1\uffff\1\17"+
-            "\1\3\3\uffff\12\14\1\uffff\1\1\1\16\1\15\3\uffff\32\13\4\uffff"+
-            "\1\13\1\uffff\2\13\1\11\2\13\1\7\2\13\1\10\14\13\1\12\4\13\1"+
-            "\5\1\uffff\1\6",
-            "",
-            "",
-            "",
-            "",
-            "",
-            "",
-            "\1\21",
-            "\1\22",
-            "\1\23",
-            "\1\24",
-            "",
-            "",
-            "\1\25",
-            "",
-            "",
-            "",
-            "\1\27",
-            "\1\30",
-            "\1\31",
-            "\1\32",
-            "",
-            "",
-            "\12\13\7\uffff\32\13\4\uffff\1\13\1\uffff\32\13",
-            "\12\13\7\uffff\32\13\4\uffff\1\13\1\uffff\32\13",
-            "\1\35",
-            "\1\36",
-            "",
-            "",
-            "\12\13\7\uffff\32\13\4\uffff\1\13\1\uffff\32\13",
-            "\12\13\7\uffff\32\13\4\uffff\1\13\1\uffff\32\13",
-            "",
-            ""
-    };
-
-    static final short[] DFA4_eot = DFA.unpackEncodedString(DFA4_eotS);
-    static final short[] DFA4_eof = DFA.unpackEncodedString(DFA4_eofS);
-    static final char[] DFA4_min = DFA.unpackEncodedStringToUnsignedChars(DFA4_minS);
-    static final char[] DFA4_max = DFA.unpackEncodedStringToUnsignedChars(DFA4_maxS);
-    static final short[] DFA4_accept = DFA.unpackEncodedString(DFA4_acceptS);
-    static final short[] DFA4_special = DFA.unpackEncodedString(DFA4_specialS);
-    static final short[][] DFA4_transition;
-
-    static {
-        int numStates = DFA4_transitionS.length;
-        DFA4_transition = new short[numStates][];
-        for (int i=0; i<numStates; i++) {
-            DFA4_transition[i] = DFA.unpackEncodedString(DFA4_transitionS[i]);
-        }
-    }
-
-    class DFA4 extends DFA {
-
-        public DFA4(BaseRecognizer recognizer) {
-            this.recognizer = recognizer;
-            this.decisionNumber = 4;
-            this.eot = DFA4_eot;
-            this.eof = DFA4_eof;
-            this.min = DFA4_min;
-            this.max = DFA4_max;
-            this.accept = DFA4_accept;
-            this.special = DFA4_special;
-            this.transition = DFA4_transition;
-        }
-        public String getDescription() {
-            return "1:1: Tokens : ( T__21 | T__22 | T__23 | T__24 | T__25 | T__26 | FOR | INT_TYPE | CHAR | VOID | ID | INT | EQ | EQEQ | LT | PLUS | WS );";
-        }
-    }
- 
-
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.m
deleted file mode 100644
index ce506e1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.m
+++ /dev/null
@@ -1,1152 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : SimpleC.g
- *     -                            On : 2011-05-06 15:09:17
- *     -                 for the lexer : SimpleCLexerLexer
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} SimpleC.g 2011-05-06 15:09:17
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SimpleCLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-/** As per Terence: No returns for lexer rules! */
-@implementation SimpleCLexer // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"SimpleC.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (SimpleCLexer *)newSimpleCLexerWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    return [[SimpleCLexer alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    self = [super initWithCharStream:anInput State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:18+1] retain]];
-    if ( self != nil ) {
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-/* ObjC Start of actions.lexer.methods */
-/* ObjC end of actions.lexer.methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-
-/* Start of Rules */
-// $ANTLR start "K_FOR"
-- (void) mK_FOR
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_FOR;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:91:7: ( 'for' ) // ruleBlockSingleAlt
-        // SimpleC.g:91:9: 'for' // alt
-        {
-        [self matchString:@"for"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_FOR" */
-
-// $ANTLR start "K_CHAR"
-- (void) mK_CHAR
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_CHAR;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:92:7: ( 'char' ) // ruleBlockSingleAlt
-        // SimpleC.g:92:9: 'char' // alt
-        {
-        [self matchString:@"char"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_CHAR" */
-
-// $ANTLR start "K_INT_TYPE"
-- (void) mK_INT_TYPE
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_INT_TYPE;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:93:12: ( 'int' ) // ruleBlockSingleAlt
-        // SimpleC.g:93:14: 'int' // alt
-        {
-        [self matchString:@"int"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_INT_TYPE" */
-
-// $ANTLR start "K_VOID"
-- (void) mK_VOID
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_VOID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:94:7: ( 'void' ) // ruleBlockSingleAlt
-        // SimpleC.g:94:9: 'void' // alt
-        {
-        [self matchString:@"void"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_VOID" */
-
-// $ANTLR start "K_ID"
-- (void) mK_ID
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_ID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:96:7: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* ) // ruleBlockSingleAlt
-        // SimpleC.g:96:11: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* // alt
-        {
-        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-            [input consume];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            [self recover:mse];
-            @throw mse;
-        }
-
-
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0 >= '0' && LA1_0 <= '9')||(LA1_0 >= 'A' && LA1_0 <= 'Z')||LA1_0=='_'||(LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // SimpleC.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    goto loop1;
-            }
-        } while (YES);
-        loop1: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_ID" */
-
-// $ANTLR start "K_INT"
-- (void) mK_INT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_INT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        ANTLRCommonToken *anInt=nil;
-        AMutableArray *list_anInt=nil; 
-        // SimpleC.g:99:7: ( (anInt+= ( '0' .. '9' ) )+ ) // ruleBlockSingleAlt
-        // SimpleC.g:99:9: (anInt+= ( '0' .. '9' ) )+ // alt
-        {
-        // SimpleC.g:99:14: (anInt+= ( '0' .. '9' ) )+ // positiveClosureBlock
-        NSInteger cnt2 = 0;
-        do {
-            NSInteger alt2 = 2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // SimpleC.g:99:14: anInt+= ( '0' .. '9' ) // alt
-                    {
-                    anInt = [input LA:1];
-
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        mse.c = anInt;
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt2 >= 1 )
-                        goto loop2;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:2];
-                    @throw eee;
-            }
-            cnt2++;
-        } while (YES);
-        loop2: ;
-
-
-        NSLog(@"%@", list_anInt);
-
-
-        }
-
-        // token+rule list labels
-        [list_anInt release];
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_INT" */
-
-// $ANTLR start "K_LCURVE"
-- (void) mK_LCURVE
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_LCURVE;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:102:10: ( '(' ) // ruleBlockSingleAlt
-        // SimpleC.g:102:12: '(' // alt
-        {
-        [self matchChar:'(']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_LCURVE" */
-
-// $ANTLR start "K_RCURVE"
-- (void) mK_RCURVE
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_RCURVE;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:103:10: ( ')' ) // ruleBlockSingleAlt
-        // SimpleC.g:103:12: ')' // alt
-        {
-        [self matchChar:')']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_RCURVE" */
-
-// $ANTLR start "K_PLUS"
-- (void) mK_PLUS
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_PLUS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:104:8: ( '+' ) // ruleBlockSingleAlt
-        // SimpleC.g:104:10: '+' // alt
-        {
-        [self matchChar:'+']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_PLUS" */
-
-// $ANTLR start "K_COMMA"
-- (void) mK_COMMA
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_COMMA;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:105:9: ( ',' ) // ruleBlockSingleAlt
-        // SimpleC.g:105:11: ',' // alt
-        {
-        [self matchChar:',']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_COMMA" */
-
-// $ANTLR start "K_SEMICOLON"
-- (void) mK_SEMICOLON
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_SEMICOLON;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:106:13: ( ';' ) // ruleBlockSingleAlt
-        // SimpleC.g:106:15: ';' // alt
-        {
-        [self matchChar:';']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_SEMICOLON" */
-
-// $ANTLR start "K_LT"
-- (void) mK_LT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_LT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:107:8: ( '<' ) // ruleBlockSingleAlt
-        // SimpleC.g:107:10: '<' // alt
-        {
-        [self matchChar:'<']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_LT" */
-
-// $ANTLR start "K_EQ"
-- (void) mK_EQ
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_EQ;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:108:8: ( '=' ) // ruleBlockSingleAlt
-        // SimpleC.g:108:10: '=' // alt
-        {
-        [self matchChar:'=']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_EQ" */
-
-// $ANTLR start "K_EQEQ"
-- (void) mK_EQEQ
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_EQEQ;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:109:8: ( '==' ) // ruleBlockSingleAlt
-        // SimpleC.g:109:10: '==' // alt
-        {
-        [self matchString:@"=="]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_EQEQ" */
-
-// $ANTLR start "K_LCURLY"
-- (void) mK_LCURLY
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_LCURLY;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:110:10: ( '{' ) // ruleBlockSingleAlt
-        // SimpleC.g:110:12: '{' // alt
-        {
-        [self matchChar:'{']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_LCURLY" */
-
-// $ANTLR start "K_RCURLY"
-- (void) mK_RCURLY
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = K_RCURLY;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:111:10: ( '}' ) // ruleBlockSingleAlt
-        // SimpleC.g:111:12: '}' // alt
-        {
-        [self matchChar:'}']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "K_RCURLY" */
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // SimpleC.g:113:5: ( ( ' ' | '\\t' | '\\r' | '\\n' )+ ) // ruleBlockSingleAlt
-        // SimpleC.g:113:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // alt
-        {
-        // SimpleC.g:113:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // positiveClosureBlock
-        NSInteger cnt3 = 0;
-        do {
-            NSInteger alt3 = 2;
-            NSInteger LA3_0 = [input LA:1];
-            if ( ((LA3_0 >= '\t' && LA3_0 <= '\n')||LA3_0=='\r'||LA3_0==' ') ) {
-                alt3=1;
-            }
-
-
-            switch (alt3) {
-                case 1 : ;
-                    // SimpleC.g: // alt
-                    {
-                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == '\r'||[input LA:1] == ' ') {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt3 >= 1 )
-                        goto loop3;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:3];
-                    @throw eee;
-            }
-            cnt3++;
-        } while (YES);
-        loop3: ;
-
-
-         _channel=HIDDEN; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "WS" */
-
-- (void) mTokens
-{
-    // SimpleC.g:1:8: ( K_FOR | K_CHAR | K_INT_TYPE | K_VOID | K_ID | K_INT | K_LCURVE | K_RCURVE | K_PLUS | K_COMMA | K_SEMICOLON | K_LT | K_EQ | K_EQEQ | K_LCURLY | K_RCURLY | WS ) //ruleblock
-    NSInteger alt4=17;
-    unichar charLA4 = [input LA:1];
-    switch (charLA4) {
-        case 'f': ;
-            {
-            NSInteger LA4_1 = [input LA:2];
-
-            if ( (LA4_1=='o') ) {
-                NSInteger LA4_17 = [input LA:3];
-
-                if ( (LA4_17=='r') ) {
-                    NSInteger LA4_23 = [input LA:4];
-
-                    if ( ((LA4_23 >= '0' && LA4_23 <= '9')||(LA4_23 >= 'A' && LA4_23 <= 'Z')||LA4_23=='_'||(LA4_23 >= 'a' && LA4_23 <= 'z')) ) {
-                        alt4=5;
-                    }
-                    else {
-                        alt4 = 1;
-                    }
-                }
-                else {
-                    alt4 = 5;
-                }
-            }
-            else {
-                alt4 = 5;
-            }
-            }
-            break;
-        case 'c': ;
-            {
-            NSInteger LA4_2 = [input LA:2];
-
-            if ( (LA4_2=='h') ) {
-                NSInteger LA4_18 = [input LA:3];
-
-                if ( (LA4_18=='a') ) {
-                    NSInteger LA4_24 = [input LA:4];
-
-                    if ( (LA4_24=='r') ) {
-                        NSInteger LA4_28 = [input LA:5];
-
-                        if ( ((LA4_28 >= '0' && LA4_28 <= '9')||(LA4_28 >= 'A' && LA4_28 <= 'Z')||LA4_28=='_'||(LA4_28 >= 'a' && LA4_28 <= 'z')) ) {
-                            alt4=5;
-                        }
-                        else {
-                            alt4 = 2;
-                        }
-                    }
-                    else {
-                        alt4 = 5;
-                    }
-                }
-                else {
-                    alt4 = 5;
-                }
-            }
-            else {
-                alt4 = 5;
-            }
-            }
-            break;
-        case 'i': ;
-            {
-            NSInteger LA4_3 = [input LA:2];
-
-            if ( (LA4_3=='n') ) {
-                NSInteger LA4_19 = [input LA:3];
-
-                if ( (LA4_19=='t') ) {
-                    NSInteger LA4_25 = [input LA:4];
-
-                    if ( ((LA4_25 >= '0' && LA4_25 <= '9')||(LA4_25 >= 'A' && LA4_25 <= 'Z')||LA4_25=='_'||(LA4_25 >= 'a' && LA4_25 <= 'z')) ) {
-                        alt4=5;
-                    }
-                    else {
-                        alt4 = 3;
-                    }
-                }
-                else {
-                    alt4 = 5;
-                }
-            }
-            else {
-                alt4 = 5;
-            }
-            }
-            break;
-        case 'v': ;
-            {
-            NSInteger LA4_4 = [input LA:2];
-
-            if ( (LA4_4=='o') ) {
-                NSInteger LA4_20 = [input LA:3];
-
-                if ( (LA4_20=='i') ) {
-                    NSInteger LA4_26 = [input LA:4];
-
-                    if ( (LA4_26=='d') ) {
-                        NSInteger LA4_30 = [input LA:5];
-
-                        if ( ((LA4_30 >= '0' && LA4_30 <= '9')||(LA4_30 >= 'A' && LA4_30 <= 'Z')||LA4_30=='_'||(LA4_30 >= 'a' && LA4_30 <= 'z')) ) {
-                            alt4=5;
-                        }
-                        else {
-                            alt4 = 4;
-                        }
-                    }
-                    else {
-                        alt4 = 5;
-                    }
-                }
-                else {
-                    alt4 = 5;
-                }
-            }
-            else {
-                alt4 = 5;
-            }
-            }
-            break;
-        case 'A': ;
-        case 'B': ;
-        case 'C': ;
-        case 'D': ;
-        case 'E': ;
-        case 'F': ;
-        case 'G': ;
-        case 'H': ;
-        case 'I': ;
-        case 'J': ;
-        case 'K': ;
-        case 'L': ;
-        case 'M': ;
-        case 'N': ;
-        case 'O': ;
-        case 'P': ;
-        case 'Q': ;
-        case 'R': ;
-        case 'S': ;
-        case 'T': ;
-        case 'U': ;
-        case 'V': ;
-        case 'W': ;
-        case 'X': ;
-        case 'Y': ;
-        case 'Z': ;
-        case '_': ;
-        case 'a': ;
-        case 'b': ;
-        case 'd': ;
-        case 'e': ;
-        case 'g': ;
-        case 'h': ;
-        case 'j': ;
-        case 'k': ;
-        case 'l': ;
-        case 'm': ;
-        case 'n': ;
-        case 'o': ;
-        case 'p': ;
-        case 'q': ;
-        case 'r': ;
-        case 's': ;
-        case 't': ;
-        case 'u': ;
-        case 'w': ;
-        case 'x': ;
-        case 'y': ;
-        case 'z': ;
-            {
-            alt4=5;
-            }
-            break;
-        case '0': ;
-        case '1': ;
-        case '2': ;
-        case '3': ;
-        case '4': ;
-        case '5': ;
-        case '6': ;
-        case '7': ;
-        case '8': ;
-        case '9': ;
-            {
-            alt4=6;
-            }
-            break;
-        case '(': ;
-            {
-            alt4=7;
-            }
-            break;
-        case ')': ;
-            {
-            alt4=8;
-            }
-            break;
-        case '+': ;
-            {
-            alt4=9;
-            }
-            break;
-        case ',': ;
-            {
-            alt4=10;
-            }
-            break;
-        case ';': ;
-            {
-            alt4=11;
-            }
-            break;
-        case '<': ;
-            {
-            alt4=12;
-            }
-            break;
-        case '=': ;
-            {
-            NSInteger LA4_13 = [input LA:2];
-
-            if ( (LA4_13=='=') ) {
-                alt4=14;
-            }
-            else {
-                alt4 = 13;
-            }
-            }
-            break;
-        case '{': ;
-            {
-            alt4=15;
-            }
-            break;
-        case '}': ;
-            {
-            alt4=16;
-            }
-            break;
-        case '\t': ;
-        case '\n': ;
-        case '\r': ;
-        case ' ': ;
-            {
-            alt4=17;
-            }
-            break;
-
-    default: ;
-        ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:4 state:0 stream:input];
-        nvae.c = charLA4;
-        @throw nvae;
-
-    }
-
-    switch (alt4) {
-        case 1 : ;
-            // SimpleC.g:1:10: K_FOR // alt
-            {
-            [self mK_FOR]; 
-
-
-
-            }
-            break;
-        case 2 : ;
-            // SimpleC.g:1:16: K_CHAR // alt
-            {
-            [self mK_CHAR]; 
-
-
-
-            }
-            break;
-        case 3 : ;
-            // SimpleC.g:1:23: K_INT_TYPE // alt
-            {
-            [self mK_INT_TYPE]; 
-
-
-
-            }
-            break;
-        case 4 : ;
-            // SimpleC.g:1:34: K_VOID // alt
-            {
-            [self mK_VOID]; 
-
-
-
-            }
-            break;
-        case 5 : ;
-            // SimpleC.g:1:41: K_ID // alt
-            {
-            [self mK_ID]; 
-
-
-
-            }
-            break;
-        case 6 : ;
-            // SimpleC.g:1:46: K_INT // alt
-            {
-            [self mK_INT]; 
-
-
-
-            }
-            break;
-        case 7 : ;
-            // SimpleC.g:1:52: K_LCURVE // alt
-            {
-            [self mK_LCURVE]; 
-
-
-
-            }
-            break;
-        case 8 : ;
-            // SimpleC.g:1:61: K_RCURVE // alt
-            {
-            [self mK_RCURVE]; 
-
-
-
-            }
-            break;
-        case 9 : ;
-            // SimpleC.g:1:70: K_PLUS // alt
-            {
-            [self mK_PLUS]; 
-
-
-
-            }
-            break;
-        case 10 : ;
-            // SimpleC.g:1:77: K_COMMA // alt
-            {
-            [self mK_COMMA]; 
-
-
-
-            }
-            break;
-        case 11 : ;
-            // SimpleC.g:1:85: K_SEMICOLON // alt
-            {
-            [self mK_SEMICOLON]; 
-
-
-
-            }
-            break;
-        case 12 : ;
-            // SimpleC.g:1:97: K_LT // alt
-            {
-            [self mK_LT]; 
-
-
-
-            }
-            break;
-        case 13 : ;
-            // SimpleC.g:1:102: K_EQ // alt
-            {
-            [self mK_EQ]; 
-
-
-
-            }
-            break;
-        case 14 : ;
-            // SimpleC.g:1:107: K_EQEQ // alt
-            {
-            [self mK_EQEQ]; 
-
-
-
-            }
-            break;
-        case 15 : ;
-            // SimpleC.g:1:114: K_LCURLY // alt
-            {
-            [self mK_LCURLY]; 
-
-
-
-            }
-            break;
-        case 16 : ;
-            // SimpleC.g:1:123: K_RCURLY // alt
-            {
-            [self mK_RCURLY]; 
-
-
-
-            }
-            break;
-        case 17 : ;
-            // SimpleC.g:1:132: WS // alt
-            {
-            [self mWS]; 
-
-
-
-            }
-            break;
-
-    }
-
-}
-
-@end /* end of SimpleCLexer implementation line 397 */
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.h b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.h
deleted file mode 100644
index a5420cc..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.h
+++ /dev/null
@@ -1,415 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} SimpleC.g 2011-05-06 15:09:17
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* parserHeaderFile */
-#ifndef ANTLR3TokenTypeAlreadyDefined
-#define ANTLR3TokenTypeAlreadyDefined
-typedef enum {
-    ANTLR_EOF = -1,
-    INVALID,
-    EOR,
-    DOWN,
-    UP,
-    MIN
-} ANTLR3TokenType;
-#endif
-
-#pragma mark Cyclic DFA interface start DFA2
-@interface DFA2 : ANTLRDFA {
-}
-+ newDFA2WithRecognizer:(ANTLRBaseRecognizer *)theRecognizer;
-- initWithRecognizer:(ANTLRBaseRecognizer *)recognizer;
-@end /* end of DFA2 interface  */
-
-#pragma mark Cyclic DFA interface end DFA2
-
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define ARG_DEF 4
-#define BLOCK 5
-#define FUNC_DECL 6
-#define FUNC_DEF 7
-#define FUNC_HDR 8
-#define K_CHAR 9
-#define K_COMMA 10
-#define K_EQ 11
-#define K_EQEQ 12
-#define K_FOR 13
-#define K_ID 14
-#define K_INT 15
-#define K_INT_TYPE 16
-#define K_LCURLY 17
-#define K_LCURVE 18
-#define K_LT 19
-#define K_PLUS 20
-#define K_RCURLY 21
-#define K_RCURVE 22
-#define K_SEMICOLON 23
-#define K_VOID 24
-#define VAR_DEF 25
-#define WS 26
-#pragma mark Dynamic Global Scopes
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-/* returnScopeInterface SimpleCParser_program_return */
-@interface SimpleCParser_program_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_program_return *)newSimpleCParser_program_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_declaration_return */
-@interface SimpleCParser_declaration_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_declaration_return *)newSimpleCParser_declaration_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_variable_return */
-@interface SimpleCParser_variable_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_variable_return *)newSimpleCParser_variable_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_declarator_return */
-@interface SimpleCParser_declarator_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_declarator_return *)newSimpleCParser_declarator_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_functionHeader_return */
-@interface SimpleCParser_functionHeader_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_functionHeader_return *)newSimpleCParser_functionHeader_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_formalParameter_return */
-@interface SimpleCParser_formalParameter_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_formalParameter_return *)newSimpleCParser_formalParameter_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_type_return */
-@interface SimpleCParser_type_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_type_return *)newSimpleCParser_type_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_block_return */
-@interface SimpleCParser_block_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_block_return *)newSimpleCParser_block_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_stat_return */
-@interface SimpleCParser_stat_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_stat_return *)newSimpleCParser_stat_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_forStat_return */
-@interface SimpleCParser_forStat_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_forStat_return *)newSimpleCParser_forStat_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_assignStat_return */
-@interface SimpleCParser_assignStat_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_assignStat_return *)newSimpleCParser_assignStat_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_expr_return */
-@interface SimpleCParser_expr_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_expr_return *)newSimpleCParser_expr_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_condExpr_return */
-@interface SimpleCParser_condExpr_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_condExpr_return *)newSimpleCParser_condExpr_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_aexpr_return */
-@interface SimpleCParser_aexpr_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_aexpr_return *)newSimpleCParser_aexpr_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface SimpleCParser_atom_return */
-@interface SimpleCParser_atom_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_atom_return *)newSimpleCParser_atom_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-
-/* Interface grammar class */
-@interface SimpleCParser : ANTLRParser { /* line 572 */
-/* ObjC start of ruleAttributeScopeMemVar */
-
-
-/* ObjC end of ruleAttributeScopeMemVar */
-/* ObjC start of globalAttributeScopeMemVar */
-
-
-/* ObjC end of globalAttributeScopeMemVar */
-/* ObjC start of actions.(actionScope).memVars */
-/* ObjC end of actions.(actionScope).memVars */
-/* ObjC start of memVars */
-/* AST parserHeaderFile.memVars */
-NSInteger ruleLevel;
-NSArray *ruleNames;
-  /* AST super.memVars */
-/* AST parserMemVars */
-id<ANTLRTreeAdaptor> treeAdaptor;   /* AST parserMemVars */
-/* ObjC end of memVars */
-
-DFA2 *dfa2;
- }
-
-/* ObjC start of actions.(actionScope).properties */
-/* ObjC end of actions.(actionScope).properties */
-/* ObjC start of properties */
-/* AST parserHeaderFile.properties */
-  /* AST super.properties */
-/* AST parserProperties */
-@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id<ANTLRTreeAdaptor> treeAdaptor;   /* AST parserproperties */
-/* ObjC end of properties */
-
-+ (void) initialize;
-+ (id) newSimpleCParser:(id<ANTLRTokenStream>)aStream;
-/* ObjC start of actions.(actionScope).methodsDecl */
-/* ObjC end of actions.(actionScope).methodsDecl */
-
-/* ObjC start of methodsDecl */
-/* AST parserHeaderFile.methodsDecl */
-  /* AST super.methodsDecl */
-/* AST parserMethodsDecl */
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)theTreeAdaptor;   /* AST parsermethodsDecl */
-/* ObjC end of methodsDecl */
-
-- (SimpleCParser_program_return *)program; 
-- (SimpleCParser_declaration_return *)declaration; 
-- (SimpleCParser_variable_return *)variable; 
-- (SimpleCParser_declarator_return *)declarator; 
-- (SimpleCParser_functionHeader_return *)functionHeader; 
-- (SimpleCParser_formalParameter_return *)formalParameter; 
-- (SimpleCParser_type_return *)type; 
-- (SimpleCParser_block_return *)block; 
-- (SimpleCParser_stat_return *)stat; 
-- (SimpleCParser_forStat_return *)forStat; 
-- (SimpleCParser_assignStat_return *)assignStat; 
-- (SimpleCParser_expr_return *)expr; 
-- (SimpleCParser_condExpr_return *)condExpr; 
-- (SimpleCParser_aexpr_return *)aexpr; 
-- (SimpleCParser_atom_return *)atom; 
-
-
-@end /* end of SimpleCParser interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.java b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.java
deleted file mode 100644
index 4744ee1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.java
+++ /dev/null
@@ -1,1814 +0,0 @@
-// $ANTLR 3.2 Aug 13, 2010 14:19:31 SimpleC.g 2010-08-13 14:29:19
-
-import org.antlr.runtime.*;
-import java.util.Stack;
-import java.util.List;
-import java.util.ArrayList;
-
-
-import org.antlr.runtime.tree.*;
-
-public class SimpleCParser extends Parser {
-    public static final String[] tokenNames = new String[] {
-        "<invalid>", "<EOR>", "<DOWN>", "<UP>", "VAR_DEF", "ARG_DEF", "FUNC_HDR", "FUNC_DECL", "FUNC_DEF", "BLOCK", "ID", "EQ", "INT", "FOR", "INT_TYPE", "CHAR", "VOID", "EQEQ", "LT", "PLUS", "WS", "';'", "'('", "','", "')'", "'{'", "'}'"
-    };
-    public static final int LT=18;
-    public static final int T__26=26;
-    public static final int T__25=25;
-    public static final int T__24=24;
-    public static final int T__23=23;
-    public static final int T__22=22;
-    public static final int T__21=21;
-    public static final int CHAR=15;
-    public static final int FOR=13;
-    public static final int FUNC_HDR=6;
-    public static final int INT=12;
-    public static final int FUNC_DEF=8;
-    public static final int INT_TYPE=14;
-    public static final int ID=10;
-    public static final int EOF=-1;
-    public static final int FUNC_DECL=7;
-    public static final int ARG_DEF=5;
-    public static final int WS=20;
-    public static final int BLOCK=9;
-    public static final int PLUS=19;
-    public static final int VOID=16;
-    public static final int EQ=11;
-    public static final int VAR_DEF=4;
-    public static final int EQEQ=17;
-
-    // delegates
-    // delegators
-
-
-        public SimpleCParser(TokenStream input) {
-            this(input, new RecognizerSharedState());
-        }
-        public SimpleCParser(TokenStream input, RecognizerSharedState state) {
-            super(input, state);
-             
-        }
-        
-    protected TreeAdaptor adaptor = new CommonTreeAdaptor();
-
-    public void setTreeAdaptor(TreeAdaptor adaptor) {
-        this.adaptor = adaptor;
-    }
-    public TreeAdaptor getTreeAdaptor() {
-        return adaptor;
-    }
-
-    public String[] getTokenNames() { return SimpleCParser.tokenNames; }
-    public String getGrammarFileName() { return "SimpleC.g"; }
-
-
-    public static class program_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "program"
-    // SimpleC.g:16:1: program : ( declaration )+ ;
-    public final SimpleCParser.program_return program() throws RecognitionException {
-        SimpleCParser.program_return retval = new SimpleCParser.program_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        SimpleCParser.declaration_return declaration1 = null;
-
-
-
-        try {
-            // SimpleC.g:17:5: ( ( declaration )+ )
-            // SimpleC.g:17:9: ( declaration )+
-            {
-            root_0 = (Object)adaptor.nil();
-
-            // SimpleC.g:17:9: ( declaration )+
-            int cnt1=0;
-            loop1:
-            do {
-                int alt1=2;
-                int LA1_0 = input.LA(1);
-
-                if ( (LA1_0==ID||(LA1_0>=INT_TYPE && LA1_0<=VOID)) ) {
-                    alt1=1;
-                }
-
-
-                switch (alt1) {
-            	case 1 :
-            	    // SimpleC.g:17:9: declaration
-            	    {
-            	    pushFollow(FOLLOW_declaration_in_program85);
-            	    declaration1=declaration();
-
-            	    state._fsp--;
-
-            	    adaptor.addChild(root_0, declaration1.getTree());
-
-            	    }
-            	    break;
-
-            	default :
-            	    if ( cnt1 >= 1 ) break loop1;
-                        EarlyExitException eee =
-                            new EarlyExitException(1, input);
-                        throw eee;
-                }
-                cnt1++;
-            } while (true);
-
-
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "program"
-
-    public static class declaration_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "declaration"
-    // SimpleC.g:20:1: declaration : ( variable | functionHeader ';' -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) );
-    public final SimpleCParser.declaration_return declaration() throws RecognitionException {
-        SimpleCParser.declaration_return retval = new SimpleCParser.declaration_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token char_literal4=null;
-        SimpleCParser.variable_return variable2 = null;
-
-        SimpleCParser.functionHeader_return functionHeader3 = null;
-
-        SimpleCParser.functionHeader_return functionHeader5 = null;
-
-        SimpleCParser.block_return block6 = null;
-
-
-        Object char_literal4_tree=null;
-        RewriteRuleTokenStream stream_21=new RewriteRuleTokenStream(adaptor,"token 21");
-        RewriteRuleSubtreeStream stream_functionHeader=new RewriteRuleSubtreeStream(adaptor,"rule functionHeader");
-        RewriteRuleSubtreeStream stream_block=new RewriteRuleSubtreeStream(adaptor,"rule block");
-        try {
-            // SimpleC.g:21:5: ( variable | functionHeader ';' -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) )
-            int alt2=3;
-            alt2 = dfa2.predict(input);
-            switch (alt2) {
-                case 1 :
-                    // SimpleC.g:21:9: variable
-                    {
-                    root_0 = (Object)adaptor.nil();
-
-                    pushFollow(FOLLOW_variable_in_declaration105);
-                    variable2=variable();
-
-                    state._fsp--;
-
-                    adaptor.addChild(root_0, variable2.getTree());
-
-                    }
-                    break;
-                case 2 :
-                    // SimpleC.g:22:9: functionHeader ';'
-                    {
-                    pushFollow(FOLLOW_functionHeader_in_declaration115);
-                    functionHeader3=functionHeader();
-
-                    state._fsp--;
-
-                    stream_functionHeader.add(functionHeader3.getTree());
-                    char_literal4=(Token)match(input,21,FOLLOW_21_in_declaration117);  
-                    stream_21.add(char_literal4);
-
-
-
-                    // AST REWRITE
-                    // elements: functionHeader
-                    // token labels: 
-                    // rule labels: retval
-                    // token list labels: 
-                    // rule list labels: 
-                    // wildcard labels: 
-                    retval.tree = root_0;
-                    RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null);
-
-                    root_0 = (Object)adaptor.nil();
-                    // 22:28: -> ^( FUNC_DECL functionHeader )
-                    {
-                        // SimpleC.g:22:31: ^( FUNC_DECL functionHeader )
-                        {
-                        Object root_1 = (Object)adaptor.nil();
-                        root_1 = (Object)adaptor.becomeRoot((Object)adaptor.create(FUNC_DECL, "FUNC_DECL"), root_1);
-
-                        adaptor.addChild(root_1, stream_functionHeader.nextTree());
-
-                        adaptor.addChild(root_0, root_1);
-                        }
-
-                    }
-
-                    retval.tree = root_0;
-                    }
-                    break;
-                case 3 :
-                    // SimpleC.g:23:9: functionHeader block
-                    {
-                    pushFollow(FOLLOW_functionHeader_in_declaration135);
-                    functionHeader5=functionHeader();
-
-                    state._fsp--;
-
-                    stream_functionHeader.add(functionHeader5.getTree());
-                    pushFollow(FOLLOW_block_in_declaration137);
-                    block6=block();
-
-                    state._fsp--;
-
-                    stream_block.add(block6.getTree());
-
-
-                    // AST REWRITE
-                    // elements: block, functionHeader
-                    // token labels: 
-                    // rule labels: retval
-                    // token list labels: 
-                    // rule list labels: 
-                    // wildcard labels: 
-                    retval.tree = root_0;
-                    RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null);
-
-                    root_0 = (Object)adaptor.nil();
-                    // 23:30: -> ^( FUNC_DEF functionHeader block )
-                    {
-                        // SimpleC.g:23:33: ^( FUNC_DEF functionHeader block )
-                        {
-                        Object root_1 = (Object)adaptor.nil();
-                        root_1 = (Object)adaptor.becomeRoot((Object)adaptor.create(FUNC_DEF, "FUNC_DEF"), root_1);
-
-                        adaptor.addChild(root_1, stream_functionHeader.nextTree());
-                        adaptor.addChild(root_1, stream_block.nextTree());
-
-                        adaptor.addChild(root_0, root_1);
-                        }
-
-                    }
-
-                    retval.tree = root_0;
-                    }
-                    break;
-
-            }
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "declaration"
-
-    public static class variable_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "variable"
-    // SimpleC.g:26:1: variable : type declarator ';' -> ^( VAR_DEF type declarator ) ;
-    public final SimpleCParser.variable_return variable() throws RecognitionException {
-        SimpleCParser.variable_return retval = new SimpleCParser.variable_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token char_literal9=null;
-        SimpleCParser.type_return type7 = null;
-
-        SimpleCParser.declarator_return declarator8 = null;
-
-
-        Object char_literal9_tree=null;
-        RewriteRuleTokenStream stream_21=new RewriteRuleTokenStream(adaptor,"token 21");
-        RewriteRuleSubtreeStream stream_declarator=new RewriteRuleSubtreeStream(adaptor,"rule declarator");
-        RewriteRuleSubtreeStream stream_type=new RewriteRuleSubtreeStream(adaptor,"rule type");
-        try {
-            // SimpleC.g:27:5: ( type declarator ';' -> ^( VAR_DEF type declarator ) )
-            // SimpleC.g:27:9: type declarator ';'
-            {
-            pushFollow(FOLLOW_type_in_variable166);
-            type7=type();
-
-            state._fsp--;
-
-            stream_type.add(type7.getTree());
-            pushFollow(FOLLOW_declarator_in_variable168);
-            declarator8=declarator();
-
-            state._fsp--;
-
-            stream_declarator.add(declarator8.getTree());
-            char_literal9=(Token)match(input,21,FOLLOW_21_in_variable170);  
-            stream_21.add(char_literal9);
-
-
-
-            // AST REWRITE
-            // elements: declarator, type
-            // token labels: 
-            // rule labels: retval
-            // token list labels: 
-            // rule list labels: 
-            // wildcard labels: 
-            retval.tree = root_0;
-            RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null);
-
-            root_0 = (Object)adaptor.nil();
-            // 27:29: -> ^( VAR_DEF type declarator )
-            {
-                // SimpleC.g:27:32: ^( VAR_DEF type declarator )
-                {
-                Object root_1 = (Object)adaptor.nil();
-                root_1 = (Object)adaptor.becomeRoot((Object)adaptor.create(VAR_DEF, "VAR_DEF"), root_1);
-
-                adaptor.addChild(root_1, stream_type.nextTree());
-                adaptor.addChild(root_1, stream_declarator.nextTree());
-
-                adaptor.addChild(root_0, root_1);
-                }
-
-            }
-
-            retval.tree = root_0;
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "variable"
-
-    public static class declarator_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "declarator"
-    // SimpleC.g:30:1: declarator : ID ;
-    public final SimpleCParser.declarator_return declarator() throws RecognitionException {
-        SimpleCParser.declarator_return retval = new SimpleCParser.declarator_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token ID10=null;
-
-        Object ID10_tree=null;
-
-        try {
-            // SimpleC.g:31:5: ( ID )
-            // SimpleC.g:31:9: ID
-            {
-            root_0 = (Object)adaptor.nil();
-
-            ID10=(Token)match(input,ID,FOLLOW_ID_in_declarator199); 
-            ID10_tree = (Object)adaptor.create(ID10);
-            adaptor.addChild(root_0, ID10_tree);
-
-
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "declarator"
-
-    public static class functionHeader_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "functionHeader"
-    // SimpleC.g:34:1: functionHeader : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' -> ^( FUNC_HDR type ID ( formalParameter )+ ) ;
-    public final SimpleCParser.functionHeader_return functionHeader() throws RecognitionException {
-        SimpleCParser.functionHeader_return retval = new SimpleCParser.functionHeader_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token ID12=null;
-        Token char_literal13=null;
-        Token char_literal15=null;
-        Token char_literal17=null;
-        SimpleCParser.type_return type11 = null;
-
-        SimpleCParser.formalParameter_return formalParameter14 = null;
-
-        SimpleCParser.formalParameter_return formalParameter16 = null;
-
-
-        Object ID12_tree=null;
-        Object char_literal13_tree=null;
-        Object char_literal15_tree=null;
-        Object char_literal17_tree=null;
-        RewriteRuleTokenStream stream_ID=new RewriteRuleTokenStream(adaptor,"token ID");
-        RewriteRuleTokenStream stream_22=new RewriteRuleTokenStream(adaptor,"token 22");
-        RewriteRuleTokenStream stream_23=new RewriteRuleTokenStream(adaptor,"token 23");
-        RewriteRuleTokenStream stream_24=new RewriteRuleTokenStream(adaptor,"token 24");
-        RewriteRuleSubtreeStream stream_formalParameter=new RewriteRuleSubtreeStream(adaptor,"rule formalParameter");
-        RewriteRuleSubtreeStream stream_type=new RewriteRuleSubtreeStream(adaptor,"rule type");
-        try {
-            // SimpleC.g:35:5: ( type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' -> ^( FUNC_HDR type ID ( formalParameter )+ ) )
-            // SimpleC.g:35:9: type ID '(' ( formalParameter ( ',' formalParameter )* )? ')'
-            {
-            pushFollow(FOLLOW_type_in_functionHeader219);
-            type11=type();
-
-            state._fsp--;
-
-            stream_type.add(type11.getTree());
-            ID12=(Token)match(input,ID,FOLLOW_ID_in_functionHeader221);  
-            stream_ID.add(ID12);
-
-            char_literal13=(Token)match(input,22,FOLLOW_22_in_functionHeader223);  
-            stream_22.add(char_literal13);
-
-            // SimpleC.g:35:21: ( formalParameter ( ',' formalParameter )* )?
-            int alt4=2;
-            int LA4_0 = input.LA(1);
-
-            if ( (LA4_0==ID||(LA4_0>=INT_TYPE && LA4_0<=VOID)) ) {
-                alt4=1;
-            }
-            switch (alt4) {
-                case 1 :
-                    // SimpleC.g:35:23: formalParameter ( ',' formalParameter )*
-                    {
-                    pushFollow(FOLLOW_formalParameter_in_functionHeader227);
-                    formalParameter14=formalParameter();
-
-                    state._fsp--;
-
-                    stream_formalParameter.add(formalParameter14.getTree());
-                    // SimpleC.g:35:39: ( ',' formalParameter )*
-                    loop3:
-                    do {
-                        int alt3=2;
-                        int LA3_0 = input.LA(1);
-
-                        if ( (LA3_0==23) ) {
-                            alt3=1;
-                        }
-
-
-                        switch (alt3) {
-                    	case 1 :
-                    	    // SimpleC.g:35:41: ',' formalParameter
-                    	    {
-                    	    char_literal15=(Token)match(input,23,FOLLOW_23_in_functionHeader231);  
-                    	    stream_23.add(char_literal15);
-
-                    	    pushFollow(FOLLOW_formalParameter_in_functionHeader233);
-                    	    formalParameter16=formalParameter();
-
-                    	    state._fsp--;
-
-                    	    stream_formalParameter.add(formalParameter16.getTree());
-
-                    	    }
-                    	    break;
-
-                    	default :
-                    	    break loop3;
-                        }
-                    } while (true);
-
-
-                    }
-                    break;
-
-            }
-
-            char_literal17=(Token)match(input,24,FOLLOW_24_in_functionHeader241);  
-            stream_24.add(char_literal17);
-
-
-
-            // AST REWRITE
-            // elements: ID, formalParameter, type
-            // token labels: 
-            // rule labels: retval
-            // token list labels: 
-            // rule list labels: 
-            // wildcard labels: 
-            retval.tree = root_0;
-            RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null);
-
-            root_0 = (Object)adaptor.nil();
-            // 36:9: -> ^( FUNC_HDR type ID ( formalParameter )+ )
-            {
-                // SimpleC.g:36:12: ^( FUNC_HDR type ID ( formalParameter )+ )
-                {
-                Object root_1 = (Object)adaptor.nil();
-                root_1 = (Object)adaptor.becomeRoot((Object)adaptor.create(FUNC_HDR, "FUNC_HDR"), root_1);
-
-                adaptor.addChild(root_1, stream_type.nextTree());
-                adaptor.addChild(root_1, stream_ID.nextNode());
-                if ( !(stream_formalParameter.hasNext()) ) {
-                    throw new RewriteEarlyExitException();
-                }
-                while ( stream_formalParameter.hasNext() ) {
-                    adaptor.addChild(root_1, stream_formalParameter.nextTree());
-
-                }
-                stream_formalParameter.reset();
-
-                adaptor.addChild(root_0, root_1);
-                }
-
-            }
-
-            retval.tree = root_0;
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "functionHeader"
-
-    public static class formalParameter_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "formalParameter"
-    // SimpleC.g:39:1: formalParameter : type declarator -> ^( ARG_DEF type declarator ) ;
-    public final SimpleCParser.formalParameter_return formalParameter() throws RecognitionException {
-        SimpleCParser.formalParameter_return retval = new SimpleCParser.formalParameter_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        SimpleCParser.type_return type18 = null;
-
-        SimpleCParser.declarator_return declarator19 = null;
-
-
-        RewriteRuleSubtreeStream stream_declarator=new RewriteRuleSubtreeStream(adaptor,"rule declarator");
-        RewriteRuleSubtreeStream stream_type=new RewriteRuleSubtreeStream(adaptor,"rule type");
-        try {
-            // SimpleC.g:40:5: ( type declarator -> ^( ARG_DEF type declarator ) )
-            // SimpleC.g:40:9: type declarator
-            {
-            pushFollow(FOLLOW_type_in_formalParameter281);
-            type18=type();
-
-            state._fsp--;
-
-            stream_type.add(type18.getTree());
-            pushFollow(FOLLOW_declarator_in_formalParameter283);
-            declarator19=declarator();
-
-            state._fsp--;
-
-            stream_declarator.add(declarator19.getTree());
-
-
-            // AST REWRITE
-            // elements: declarator, type
-            // token labels: 
-            // rule labels: retval
-            // token list labels: 
-            // rule list labels: 
-            // wildcard labels: 
-            retval.tree = root_0;
-            RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null);
-
-            root_0 = (Object)adaptor.nil();
-            // 40:25: -> ^( ARG_DEF type declarator )
-            {
-                // SimpleC.g:40:28: ^( ARG_DEF type declarator )
-                {
-                Object root_1 = (Object)adaptor.nil();
-                root_1 = (Object)adaptor.becomeRoot((Object)adaptor.create(ARG_DEF, "ARG_DEF"), root_1);
-
-                adaptor.addChild(root_1, stream_type.nextTree());
-                adaptor.addChild(root_1, stream_declarator.nextTree());
-
-                adaptor.addChild(root_0, root_1);
-                }
-
-            }
-
-            retval.tree = root_0;
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "formalParameter"
-
-    public static class type_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "type"
-    // SimpleC.g:43:1: type : ( 'int' | 'char' | 'void' | ID );
-    public final SimpleCParser.type_return type() throws RecognitionException {
-        SimpleCParser.type_return retval = new SimpleCParser.type_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token set20=null;
-
-        Object set20_tree=null;
-
-        try {
-            // SimpleC.g:44:5: ( 'int' | 'char' | 'void' | ID )
-            // SimpleC.g:
-            {
-            root_0 = (Object)adaptor.nil();
-
-            set20=(Token)input.LT(1);
-            if ( input.LA(1)==ID||(input.LA(1)>=INT_TYPE && input.LA(1)<=VOID) ) {
-                input.consume();
-                adaptor.addChild(root_0, (Object)adaptor.create(set20));
-                state.errorRecovery=false;
-            }
-            else {
-                MismatchedSetException mse = new MismatchedSetException(null,input);
-                throw mse;
-            }
-
-
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "type"
-
-    public static class block_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "block"
-    // SimpleC.g:50:1: block : lc= '{' ( variable )* ( stat )* '}' -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* ) ;
-    public final SimpleCParser.block_return block() throws RecognitionException {
-        SimpleCParser.block_return retval = new SimpleCParser.block_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token lc=null;
-        Token char_literal23=null;
-        SimpleCParser.variable_return variable21 = null;
-
-        SimpleCParser.stat_return stat22 = null;
-
-
-        Object lc_tree=null;
-        Object char_literal23_tree=null;
-        RewriteRuleTokenStream stream_25=new RewriteRuleTokenStream(adaptor,"token 25");
-        RewriteRuleTokenStream stream_26=new RewriteRuleTokenStream(adaptor,"token 26");
-        RewriteRuleSubtreeStream stream_variable=new RewriteRuleSubtreeStream(adaptor,"rule variable");
-        RewriteRuleSubtreeStream stream_stat=new RewriteRuleSubtreeStream(adaptor,"rule stat");
-        try {
-            // SimpleC.g:51:5: (lc= '{' ( variable )* ( stat )* '}' -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* ) )
-            // SimpleC.g:51:9: lc= '{' ( variable )* ( stat )* '}'
-            {
-            lc=(Token)match(input,25,FOLLOW_25_in_block376);  
-            stream_25.add(lc);
-
-            // SimpleC.g:52:13: ( variable )*
-            loop5:
-            do {
-                int alt5=2;
-                int LA5_0 = input.LA(1);
-
-                if ( (LA5_0==ID) ) {
-                    int LA5_2 = input.LA(2);
-
-                    if ( (LA5_2==ID) ) {
-                        alt5=1;
-                    }
-
-
-                }
-                else if ( ((LA5_0>=INT_TYPE && LA5_0<=VOID)) ) {
-                    alt5=1;
-                }
-
-
-                switch (alt5) {
-            	case 1 :
-            	    // SimpleC.g:52:13: variable
-            	    {
-            	    pushFollow(FOLLOW_variable_in_block390);
-            	    variable21=variable();
-
-            	    state._fsp--;
-
-            	    stream_variable.add(variable21.getTree());
-
-            	    }
-            	    break;
-
-            	default :
-            	    break loop5;
-                }
-            } while (true);
-
-            // SimpleC.g:53:13: ( stat )*
-            loop6:
-            do {
-                int alt6=2;
-                int LA6_0 = input.LA(1);
-
-                if ( (LA6_0==ID||(LA6_0>=INT && LA6_0<=FOR)||(LA6_0>=21 && LA6_0<=22)||LA6_0==25) ) {
-                    alt6=1;
-                }
-
-
-                switch (alt6) {
-            	case 1 :
-            	    // SimpleC.g:53:13: stat
-            	    {
-            	    pushFollow(FOLLOW_stat_in_block405);
-            	    stat22=stat();
-
-            	    state._fsp--;
-
-            	    stream_stat.add(stat22.getTree());
-
-            	    }
-            	    break;
-
-            	default :
-            	    break loop6;
-                }
-            } while (true);
-
-            char_literal23=(Token)match(input,26,FOLLOW_26_in_block416);  
-            stream_26.add(char_literal23);
-
-
-
-            // AST REWRITE
-            // elements: stat, variable
-            // token labels: 
-            // rule labels: retval
-            // token list labels: 
-            // rule list labels: 
-            // wildcard labels: 
-            retval.tree = root_0;
-            RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null);
-
-            root_0 = (Object)adaptor.nil();
-            // 55:9: -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* )
-            {
-                // SimpleC.g:55:12: ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* )
-                {
-                Object root_1 = (Object)adaptor.nil();
-                root_1 = (Object)adaptor.becomeRoot((Object)adaptor.create(BLOCK, lc, @"BLOCK"), root_1);
-
-                // SimpleC.g:55:34: ( variable )*
-                while ( stream_variable.hasNext() ) {
-                    adaptor.addChild(root_1, stream_variable.nextTree());
-
-                }
-                stream_variable.reset();
-                // SimpleC.g:55:44: ( stat )*
-                while ( stream_stat.hasNext() ) {
-                    adaptor.addChild(root_1, stream_stat.nextTree());
-
-                }
-                stream_stat.reset();
-
-                adaptor.addChild(root_0, root_1);
-                }
-
-            }
-
-            retval.tree = root_0;
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "block"
-
-    public static class stat_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "stat"
-    // SimpleC.g:58:1: stat : ( forStat | expr ';' | block | assignStat ';' | ';' );
-    public final SimpleCParser.stat_return stat() throws RecognitionException {
-        SimpleCParser.stat_return retval = new SimpleCParser.stat_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token char_literal26=null;
-        Token char_literal29=null;
-        Token char_literal30=null;
-        SimpleCParser.forStat_return forStat24 = null;
-
-        SimpleCParser.expr_return expr25 = null;
-
-        SimpleCParser.block_return block27 = null;
-
-        SimpleCParser.assignStat_return assignStat28 = null;
-
-
-        Object char_literal26_tree=null;
-        Object char_literal29_tree=null;
-        Object char_literal30_tree=null;
-
-        try {
-            // SimpleC.g:58:5: ( forStat | expr ';' | block | assignStat ';' | ';' )
-            int alt7=5;
-            switch ( input.LA(1) ) {
-            case FOR:
-                {
-                alt7=1;
-                }
-                break;
-            case ID:
-                {
-                int LA7_2 = input.LA(2);
-
-                if ( (LA7_2==EQ) ) {
-                    alt7=4;
-                }
-                else if ( ((LA7_2>=EQEQ && LA7_2<=PLUS)||LA7_2==21) ) {
-                    alt7=2;
-                }
-                else {
-                    NoViableAltException nvae =
-                        new NoViableAltException("", 7, 2, input);
-
-                    throw nvae;
-                }
-                }
-                break;
-            case INT:
-            case 22:
-                {
-                alt7=2;
-                }
-                break;
-            case 25:
-                {
-                alt7=3;
-                }
-                break;
-            case 21:
-                {
-                alt7=5;
-                }
-                break;
-            default:
-                NoViableAltException nvae =
-                    new NoViableAltException("", 7, 0, input);
-
-                throw nvae;
-            }
-
-            switch (alt7) {
-                case 1 :
-                    // SimpleC.g:58:7: forStat
-                    {
-                    root_0 = (Object)adaptor.nil();
-
-                    pushFollow(FOLLOW_forStat_in_stat449);
-                    forStat24=forStat();
-
-                    state._fsp--;
-
-                    adaptor.addChild(root_0, forStat24.getTree());
-
-                    }
-                    break;
-                case 2 :
-                    // SimpleC.g:59:7: expr ';'
-                    {
-                    root_0 = (Object)adaptor.nil();
-
-                    pushFollow(FOLLOW_expr_in_stat457);
-                    expr25=expr();
-
-                    state._fsp--;
-
-                    adaptor.addChild(root_0, expr25.getTree());
-                    char_literal26=(Token)match(input,21,FOLLOW_21_in_stat459); 
-
-                    }
-                    break;
-                case 3 :
-                    // SimpleC.g:60:7: block
-                    {
-                    root_0 = (Object)adaptor.nil();
-
-                    pushFollow(FOLLOW_block_in_stat468);
-                    block27=block();
-
-                    state._fsp--;
-
-                    adaptor.addChild(root_0, block27.getTree());
-
-                    }
-                    break;
-                case 4 :
-                    // SimpleC.g:61:7: assignStat ';'
-                    {
-                    root_0 = (Object)adaptor.nil();
-
-                    pushFollow(FOLLOW_assignStat_in_stat476);
-                    assignStat28=assignStat();
-
-                    state._fsp--;
-
-                    adaptor.addChild(root_0, assignStat28.getTree());
-                    char_literal29=(Token)match(input,21,FOLLOW_21_in_stat478); 
-
-                    }
-                    break;
-                case 5 :
-                    // SimpleC.g:62:7: ';'
-                    {
-                    root_0 = (Object)adaptor.nil();
-
-                    char_literal30=(Token)match(input,21,FOLLOW_21_in_stat487); 
-
-                    }
-                    break;
-
-            }
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "stat"
-
-    public static class forStat_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "forStat"
-    // SimpleC.g:65:1: forStat : 'for' '(' start= assignStat ';' expr ';' next= assignStat ')' block -> ^( 'for' $start expr $next block ) ;
-    public final SimpleCParser.forStat_return forStat() throws RecognitionException {
-        SimpleCParser.forStat_return retval = new SimpleCParser.forStat_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token string_literal31=null;
-        Token char_literal32=null;
-        Token char_literal33=null;
-        Token char_literal35=null;
-        Token char_literal36=null;
-        SimpleCParser.assignStat_return start = null;
-
-        SimpleCParser.assignStat_return next = null;
-
-        SimpleCParser.expr_return expr34 = null;
-
-        SimpleCParser.block_return block37 = null;
-
-
-        Object string_literal31_tree=null;
-        Object char_literal32_tree=null;
-        Object char_literal33_tree=null;
-        Object char_literal35_tree=null;
-        Object char_literal36_tree=null;
-        RewriteRuleTokenStream stream_21=new RewriteRuleTokenStream(adaptor,"token 21");
-        RewriteRuleTokenStream stream_FOR=new RewriteRuleTokenStream(adaptor,"token FOR");
-        RewriteRuleTokenStream stream_22=new RewriteRuleTokenStream(adaptor,"token 22");
-        RewriteRuleTokenStream stream_24=new RewriteRuleTokenStream(adaptor,"token 24");
-        RewriteRuleSubtreeStream stream_assignStat=new RewriteRuleSubtreeStream(adaptor,"rule assignStat");
-        RewriteRuleSubtreeStream stream_block=new RewriteRuleSubtreeStream(adaptor,"rule block");
-        RewriteRuleSubtreeStream stream_expr=new RewriteRuleSubtreeStream(adaptor,"rule expr");
-        try {
-            // SimpleC.g:66:5: ( 'for' '(' start= assignStat ';' expr ';' next= assignStat ')' block -> ^( 'for' $start expr $next block ) )
-            // SimpleC.g:66:9: 'for' '(' start= assignStat ';' expr ';' next= assignStat ')' block
-            {
-            string_literal31=(Token)match(input,FOR,FOLLOW_FOR_in_forStat507);  
-            stream_FOR.add(string_literal31);
-
-            char_literal32=(Token)match(input,22,FOLLOW_22_in_forStat509);  
-            stream_22.add(char_literal32);
-
-            pushFollow(FOLLOW_assignStat_in_forStat513);
-            start=assignStat();
-
-            state._fsp--;
-
-            stream_assignStat.add(start.getTree());
-            char_literal33=(Token)match(input,21,FOLLOW_21_in_forStat515);  
-            stream_21.add(char_literal33);
-
-            pushFollow(FOLLOW_expr_in_forStat517);
-            expr34=expr();
-
-            state._fsp--;
-
-            stream_expr.add(expr34.getTree());
-            char_literal35=(Token)match(input,21,FOLLOW_21_in_forStat519);  
-            stream_21.add(char_literal35);
-
-            pushFollow(FOLLOW_assignStat_in_forStat523);
-            next=assignStat();
-
-            state._fsp--;
-
-            stream_assignStat.add(next.getTree());
-            char_literal36=(Token)match(input,24,FOLLOW_24_in_forStat525);  
-            stream_24.add(char_literal36);
-
-            pushFollow(FOLLOW_block_in_forStat527);
-            block37=block();
-
-            state._fsp--;
-
-            stream_block.add(block37.getTree());
-
-
-            // AST REWRITE
-            // elements: next, start, FOR, block, expr
-            // token labels: 
-            // rule labels: retval, start, next
-            // token list labels: 
-            // rule list labels: 
-            // wildcard labels: 
-            retval.tree = root_0;
-            RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null);
-            RewriteRuleSubtreeStream stream_start=new RewriteRuleSubtreeStream(adaptor,"rule start",start!=null?start.tree:null);
-            RewriteRuleSubtreeStream stream_next=new RewriteRuleSubtreeStream(adaptor,"rule next",next!=null?next.tree:null);
-
-            root_0 = (Object)adaptor.nil();
-            // 67:9: -> ^( 'for' $start expr $next block )
-            {
-                // SimpleC.g:67:12: ^( 'for' $start expr $next block )
-                {
-                Object root_1 = (Object)adaptor.nil();
-                root_1 = (Object)adaptor.becomeRoot(stream_FOR.nextNode(), root_1);
-
-                adaptor.addChild(root_1, stream_start.nextTree());
-                adaptor.addChild(root_1, stream_expr.nextTree());
-                adaptor.addChild(root_1, stream_next.nextTree());
-                adaptor.addChild(root_1, stream_block.nextTree());
-
-                adaptor.addChild(root_0, root_1);
-                }
-
-            }
-
-            retval.tree = root_0;
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "forStat"
-
-    public static class assignStat_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "assignStat"
-    // SimpleC.g:70:1: assignStat : ID EQ expr -> ^( EQ ID expr ) ;
-    public final SimpleCParser.assignStat_return assignStat() throws RecognitionException {
-        SimpleCParser.assignStat_return retval = new SimpleCParser.assignStat_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token ID38=null;
-        Token EQ39=null;
-        SimpleCParser.expr_return expr40 = null;
-
-
-        Object ID38_tree=null;
-        Object EQ39_tree=null;
-        RewriteRuleTokenStream stream_EQ=new RewriteRuleTokenStream(adaptor,"token EQ");
-        RewriteRuleTokenStream stream_ID=new RewriteRuleTokenStream(adaptor,"token ID");
-        RewriteRuleSubtreeStream stream_expr=new RewriteRuleSubtreeStream(adaptor,"rule expr");
-        try {
-            // SimpleC.g:71:5: ( ID EQ expr -> ^( EQ ID expr ) )
-            // SimpleC.g:71:9: ID EQ expr
-            {
-            ID38=(Token)match(input,ID,FOLLOW_ID_in_assignStat570);  
-            stream_ID.add(ID38);
-
-            EQ39=(Token)match(input,EQ,FOLLOW_EQ_in_assignStat572);  
-            stream_EQ.add(EQ39);
-
-            pushFollow(FOLLOW_expr_in_assignStat574);
-            expr40=expr();
-
-            state._fsp--;
-
-            stream_expr.add(expr40.getTree());
-
-
-            // AST REWRITE
-            // elements: EQ, ID, expr
-            // token labels: 
-            // rule labels: retval
-            // token list labels: 
-            // rule list labels: 
-            // wildcard labels: 
-            retval.tree = root_0;
-            RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null);
-
-            root_0 = (Object)adaptor.nil();
-            // 71:20: -> ^( EQ ID expr )
-            {
-                // SimpleC.g:71:23: ^( EQ ID expr )
-                {
-                Object root_1 = (Object)adaptor.nil();
-                root_1 = (Object)adaptor.becomeRoot(stream_EQ.nextNode(), root_1);
-
-                adaptor.addChild(root_1, stream_ID.nextNode());
-                adaptor.addChild(root_1, stream_expr.nextTree());
-
-                adaptor.addChild(root_0, root_1);
-                }
-
-            }
-
-            retval.tree = root_0;
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "assignStat"
-
-    public static class expr_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "expr"
-    // SimpleC.g:74:1: expr : condExpr ;
-    public final SimpleCParser.expr_return expr() throws RecognitionException {
-        SimpleCParser.expr_return retval = new SimpleCParser.expr_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        SimpleCParser.condExpr_return condExpr41 = null;
-
-
-
-        try {
-            // SimpleC.g:74:5: ( condExpr )
-            // SimpleC.g:74:9: condExpr
-            {
-            root_0 = (Object)adaptor.nil();
-
-            pushFollow(FOLLOW_condExpr_in_expr598);
-            condExpr41=condExpr();
-
-            state._fsp--;
-
-            adaptor.addChild(root_0, condExpr41.getTree());
-
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "expr"
-
-    public static class condExpr_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "condExpr"
-    // SimpleC.g:77:1: condExpr : aexpr ( ( '==' | '<' ) aexpr )? ;
-    public final SimpleCParser.condExpr_return condExpr() throws RecognitionException {
-        SimpleCParser.condExpr_return retval = new SimpleCParser.condExpr_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token string_literal43=null;
-        Token char_literal44=null;
-        SimpleCParser.aexpr_return aexpr42 = null;
-
-        SimpleCParser.aexpr_return aexpr45 = null;
-
-
-        Object string_literal43_tree=null;
-        Object char_literal44_tree=null;
-
-        try {
-            // SimpleC.g:78:5: ( aexpr ( ( '==' | '<' ) aexpr )? )
-            // SimpleC.g:78:9: aexpr ( ( '==' | '<' ) aexpr )?
-            {
-            root_0 = (Object)adaptor.nil();
-
-            pushFollow(FOLLOW_aexpr_in_condExpr617);
-            aexpr42=aexpr();
-
-            state._fsp--;
-
-            adaptor.addChild(root_0, aexpr42.getTree());
-            // SimpleC.g:78:15: ( ( '==' | '<' ) aexpr )?
-            int alt9=2;
-            int LA9_0 = input.LA(1);
-
-            if ( ((LA9_0>=EQEQ && LA9_0<=LT)) ) {
-                alt9=1;
-            }
-            switch (alt9) {
-                case 1 :
-                    // SimpleC.g:78:17: ( '==' | '<' ) aexpr
-                    {
-                    // SimpleC.g:78:17: ( '==' | '<' )
-                    int alt8=2;
-                    int LA8_0 = input.LA(1);
-
-                    if ( (LA8_0==EQEQ) ) {
-                        alt8=1;
-                    }
-                    else if ( (LA8_0==LT) ) {
-                        alt8=2;
-                    }
-                    else {
-                        NoViableAltException nvae =
-                            new NoViableAltException("", 8, 0, input);
-
-                        throw nvae;
-                    }
-                    switch (alt8) {
-                        case 1 :
-                            // SimpleC.g:78:18: '=='
-                            {
-                            string_literal43=(Token)match(input,EQEQ,FOLLOW_EQEQ_in_condExpr622); 
-                            string_literal43_tree = (Object)adaptor.create(string_literal43);
-                            root_0 = (Object)adaptor.becomeRoot(string_literal43_tree, root_0);
-
-
-                            }
-                            break;
-                        case 2 :
-                            // SimpleC.g:78:26: '<'
-                            {
-                            char_literal44=(Token)match(input,LT,FOLLOW_LT_in_condExpr627); 
-                            char_literal44_tree = (Object)adaptor.create(char_literal44);
-                            root_0 = (Object)adaptor.becomeRoot(char_literal44_tree, root_0);
-
-
-                            }
-                            break;
-
-                    }
-
-                    pushFollow(FOLLOW_aexpr_in_condExpr631);
-                    aexpr45=aexpr();
-
-                    state._fsp--;
-
-                    adaptor.addChild(root_0, aexpr45.getTree());
-
-                    }
-                    break;
-
-            }
-
-
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "condExpr"
-
-    public static class aexpr_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "aexpr"
-    // SimpleC.g:81:1: aexpr : atom ( '+' atom )* ;
-    public final SimpleCParser.aexpr_return aexpr() throws RecognitionException {
-        SimpleCParser.aexpr_return retval = new SimpleCParser.aexpr_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token char_literal47=null;
-        SimpleCParser.atom_return atom46 = null;
-
-        SimpleCParser.atom_return atom48 = null;
-
-
-        Object char_literal47_tree=null;
-
-        try {
-            // SimpleC.g:82:5: ( atom ( '+' atom )* )
-            // SimpleC.g:82:9: atom ( '+' atom )*
-            {
-            root_0 = (Object)adaptor.nil();
-
-            pushFollow(FOLLOW_atom_in_aexpr653);
-            atom46=atom();
-
-            state._fsp--;
-
-            adaptor.addChild(root_0, atom46.getTree());
-            // SimpleC.g:82:14: ( '+' atom )*
-            loop10:
-            do {
-                int alt10=2;
-                int LA10_0 = input.LA(1);
-
-                if ( (LA10_0==PLUS) ) {
-                    alt10=1;
-                }
-
-
-                switch (alt10) {
-            	case 1 :
-            	    // SimpleC.g:82:16: '+' atom
-            	    {
-            	    char_literal47=(Token)match(input,PLUS,FOLLOW_PLUS_in_aexpr657); 
-            	    char_literal47_tree = (Object)adaptor.create(char_literal47);
-            	    root_0 = (Object)adaptor.becomeRoot(char_literal47_tree, root_0);
-
-            	    pushFollow(FOLLOW_atom_in_aexpr660);
-            	    atom48=atom();
-
-            	    state._fsp--;
-
-            	    adaptor.addChild(root_0, atom48.getTree());
-
-            	    }
-            	    break;
-
-            	default :
-            	    break loop10;
-                }
-            } while (true);
-
-
-            }
-
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "aexpr"
-
-    public static class atom_return extends ParserRuleReturnScope {
-        Object tree;
-        public Object getTree() { return tree; }
-    };
-
-    // $ANTLR start "atom"
-    // SimpleC.g:85:1: atom : ( ID | INT | '(' expr ')' -> expr );
-    public final SimpleCParser.atom_return atom() throws RecognitionException {
-        SimpleCParser.atom_return retval = new SimpleCParser.atom_return();
-        retval.start = input.LT(1);
-
-        Object root_0 = null;
-
-        Token ID49=null;
-        Token INT50=null;
-        Token char_literal51=null;
-        Token char_literal53=null;
-        SimpleCParser.expr_return expr52 = null;
-
-
-        Object ID49_tree=null;
-        Object INT50_tree=null;
-        Object char_literal51_tree=null;
-        Object char_literal53_tree=null;
-        RewriteRuleTokenStream stream_22=new RewriteRuleTokenStream(adaptor,"token 22");
-        RewriteRuleTokenStream stream_24=new RewriteRuleTokenStream(adaptor,"token 24");
-        RewriteRuleSubtreeStream stream_expr=new RewriteRuleSubtreeStream(adaptor,"rule expr");
-        try {
-            // SimpleC.g:86:5: ( ID | INT | '(' expr ')' -> expr )
-            int alt11=3;
-            switch ( input.LA(1) ) {
-            case ID:
-                {
-                alt11=1;
-                }
-                break;
-            case INT:
-                {
-                alt11=2;
-                }
-                break;
-            case 22:
-                {
-                alt11=3;
-                }
-                break;
-            default:
-                NoViableAltException nvae =
-                    new NoViableAltException("", 11, 0, input);
-
-                throw nvae;
-            }
-
-            switch (alt11) {
-                case 1 :
-                    // SimpleC.g:86:7: ID
-                    {
-                    root_0 = (Object)adaptor.nil();
-
-                    ID49=(Token)match(input,ID,FOLLOW_ID_in_atom680); 
-                    ID49_tree = (Object)adaptor.create(ID49);
-                    adaptor.addChild(root_0, ID49_tree);
-
-
-                    }
-                    break;
-                case 2 :
-                    // SimpleC.g:87:7: INT
-                    {
-                    root_0 = (Object)adaptor.nil();
-
-                    INT50=(Token)match(input,INT,FOLLOW_INT_in_atom694); 
-                    INT50_tree = (Object)adaptor.create(INT50);
-                    adaptor.addChild(root_0, INT50_tree);
-
-
-                    }
-                    break;
-                case 3 :
-                    // SimpleC.g:88:7: '(' expr ')'
-                    {
-                    char_literal51=(Token)match(input,22,FOLLOW_22_in_atom708);  
-                    stream_22.add(char_literal51);
-
-                    pushFollow(FOLLOW_expr_in_atom710);
-                    expr52=expr();
-
-                    state._fsp--;
-
-                    stream_expr.add(expr52.getTree());
-                    char_literal53=(Token)match(input,24,FOLLOW_24_in_atom712);  
-                    stream_24.add(char_literal53);
-
-
-
-                    // AST REWRITE
-                    // elements: expr
-                    // token labels: 
-                    // rule labels: retval
-                    // token list labels: 
-                    // rule list labels: 
-                    // wildcard labels: 
-                    retval.tree = root_0;
-                    RewriteRuleSubtreeStream stream_retval=new RewriteRuleSubtreeStream(adaptor,"rule retval",retval!=null?retval.tree:null);
-
-                    root_0 = (Object)adaptor.nil();
-                    // 88:20: -> expr
-                    {
-                        adaptor.addChild(root_0, stream_expr.nextTree());
-
-                    }
-
-                    retval.tree = root_0;
-                    }
-                    break;
-
-            }
-            retval.stop = input.LT(-1);
-
-            retval.tree = (Object)adaptor.rulePostProcessing(root_0);
-            adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-    	retval.tree = (Object)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "atom"
-
-    // Delegated rules
-
-
-    protected DFA2 dfa2 = new DFA2(this);
-    static final String DFA2_eotS =
-        "\15\uffff";
-    static final String DFA2_eofS =
-        "\15\uffff";
-    static final String DFA2_minS =
-        "\2\12\1\25\1\12\1\uffff\1\12\1\25\1\27\2\uffff\2\12\1\27";
-    static final String DFA2_maxS =
-        "\1\20\1\12\1\26\1\30\1\uffff\1\12\1\31\1\30\2\uffff\1\20\1\12\1"+
-        "\30";
-    static final String DFA2_acceptS =
-        "\4\uffff\1\1\3\uffff\1\3\1\2\3\uffff";
-    static final String DFA2_specialS =
-        "\15\uffff}>";
-    static final String[] DFA2_transitionS = {
-            "\1\1\3\uffff\3\1",
-            "\1\2",
-            "\1\4\1\3",
-            "\1\5\3\uffff\3\5\7\uffff\1\6",
-            "",
-            "\1\7",
-            "\1\11\3\uffff\1\10",
-            "\1\12\1\6",
-            "",
-            "",
-            "\1\13\3\uffff\3\13",
-            "\1\14",
-            "\1\12\1\6"
-    };
-
-    static final short[] DFA2_eot = DFA.unpackEncodedString(DFA2_eotS);
-    static final short[] DFA2_eof = DFA.unpackEncodedString(DFA2_eofS);
-    static final char[] DFA2_min = DFA.unpackEncodedStringToUnsignedChars(DFA2_minS);
-    static final char[] DFA2_max = DFA.unpackEncodedStringToUnsignedChars(DFA2_maxS);
-    static final short[] DFA2_accept = DFA.unpackEncodedString(DFA2_acceptS);
-    static final short[] DFA2_special = DFA.unpackEncodedString(DFA2_specialS);
-    static final short[][] DFA2_transition;
-
-    static {
-        int numStates = DFA2_transitionS.length;
-        DFA2_transition = new short[numStates][];
-        for (int i=0; i<numStates; i++) {
-            DFA2_transition[i] = DFA.unpackEncodedString(DFA2_transitionS[i]);
-        }
-    }
-
-    class DFA2 extends DFA {
-
-        public DFA2(BaseRecognizer recognizer) {
-            this.recognizer = recognizer;
-            this.decisionNumber = 2;
-            this.eot = DFA2_eot;
-            this.eof = DFA2_eof;
-            this.min = DFA2_min;
-            this.max = DFA2_max;
-            this.accept = DFA2_accept;
-            this.special = DFA2_special;
-            this.transition = DFA2_transition;
-        }
-        public String getDescription() {
-            return "20:1: declaration : ( variable | functionHeader ';' -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) );";
-        }
-    }
- 
-
-    public static final BitSet FOLLOW_declaration_in_program85 = new BitSet(new long[]{0x000000000001C402L});
-    public static final BitSet FOLLOW_variable_in_declaration105 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_functionHeader_in_declaration115 = new BitSet(new long[]{0x0000000000200000L});
-    public static final BitSet FOLLOW_21_in_declaration117 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_functionHeader_in_declaration135 = new BitSet(new long[]{0x0000000002000000L});
-    public static final BitSet FOLLOW_block_in_declaration137 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_type_in_variable166 = new BitSet(new long[]{0x0000000000000400L});
-    public static final BitSet FOLLOW_declarator_in_variable168 = new BitSet(new long[]{0x0000000000200000L});
-    public static final BitSet FOLLOW_21_in_variable170 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_ID_in_declarator199 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_type_in_functionHeader219 = new BitSet(new long[]{0x0000000000000400L});
-    public static final BitSet FOLLOW_ID_in_functionHeader221 = new BitSet(new long[]{0x0000000000400000L});
-    public static final BitSet FOLLOW_22_in_functionHeader223 = new BitSet(new long[]{0x000000000101C400L});
-    public static final BitSet FOLLOW_formalParameter_in_functionHeader227 = new BitSet(new long[]{0x0000000001800000L});
-    public static final BitSet FOLLOW_23_in_functionHeader231 = new BitSet(new long[]{0x000000000001C400L});
-    public static final BitSet FOLLOW_formalParameter_in_functionHeader233 = new BitSet(new long[]{0x0000000001800000L});
-    public static final BitSet FOLLOW_24_in_functionHeader241 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_type_in_formalParameter281 = new BitSet(new long[]{0x0000000000000400L});
-    public static final BitSet FOLLOW_declarator_in_formalParameter283 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_set_in_type0 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_25_in_block376 = new BitSet(new long[]{0x000000000661F400L});
-    public static final BitSet FOLLOW_variable_in_block390 = new BitSet(new long[]{0x000000000661F400L});
-    public static final BitSet FOLLOW_stat_in_block405 = new BitSet(new long[]{0x0000000006603400L});
-    public static final BitSet FOLLOW_26_in_block416 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_forStat_in_stat449 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_expr_in_stat457 = new BitSet(new long[]{0x0000000000200000L});
-    public static final BitSet FOLLOW_21_in_stat459 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_block_in_stat468 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_assignStat_in_stat476 = new BitSet(new long[]{0x0000000000200000L});
-    public static final BitSet FOLLOW_21_in_stat478 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_21_in_stat487 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_FOR_in_forStat507 = new BitSet(new long[]{0x0000000000400000L});
-    public static final BitSet FOLLOW_22_in_forStat509 = new BitSet(new long[]{0x0000000000000400L});
-    public static final BitSet FOLLOW_assignStat_in_forStat513 = new BitSet(new long[]{0x0000000000200000L});
-    public static final BitSet FOLLOW_21_in_forStat515 = new BitSet(new long[]{0x0000000000401400L});
-    public static final BitSet FOLLOW_expr_in_forStat517 = new BitSet(new long[]{0x0000000000200000L});
-    public static final BitSet FOLLOW_21_in_forStat519 = new BitSet(new long[]{0x0000000000000400L});
-    public static final BitSet FOLLOW_assignStat_in_forStat523 = new BitSet(new long[]{0x0000000001000000L});
-    public static final BitSet FOLLOW_24_in_forStat525 = new BitSet(new long[]{0x0000000002000000L});
-    public static final BitSet FOLLOW_block_in_forStat527 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_ID_in_assignStat570 = new BitSet(new long[]{0x0000000000000800L});
-    public static final BitSet FOLLOW_EQ_in_assignStat572 = new BitSet(new long[]{0x0000000000401400L});
-    public static final BitSet FOLLOW_expr_in_assignStat574 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_condExpr_in_expr598 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_aexpr_in_condExpr617 = new BitSet(new long[]{0x0000000000060002L});
-    public static final BitSet FOLLOW_EQEQ_in_condExpr622 = new BitSet(new long[]{0x0000000000401400L});
-    public static final BitSet FOLLOW_LT_in_condExpr627 = new BitSet(new long[]{0x0000000000401400L});
-    public static final BitSet FOLLOW_aexpr_in_condExpr631 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_atom_in_aexpr653 = new BitSet(new long[]{0x0000000000080002L});
-    public static final BitSet FOLLOW_PLUS_in_aexpr657 = new BitSet(new long[]{0x0000000000401400L});
-    public static final BitSet FOLLOW_atom_in_aexpr660 = new BitSet(new long[]{0x0000000000080002L});
-    public static final BitSet FOLLOW_ID_in_atom680 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_INT_in_atom694 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_22_in_atom708 = new BitSet(new long[]{0x0000000000401400L});
-    public static final BitSet FOLLOW_expr_in_atom710 = new BitSet(new long[]{0x0000000001000000L});
-    public static final BitSet FOLLOW_24_in_atom712 = new BitSet(new long[]{0x0000000000000002L});
-
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.m b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.m
deleted file mode 100644
index 882a065..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.m
+++ /dev/null
@@ -1,3106 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : SimpleC.g
- *     -                            On : 2011-05-06 15:09:17
- *     -                for the parser : SimpleCParserParser
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} SimpleC.g 2011-05-06 15:09:17
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SimpleCParser.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-#pragma mark Cyclic DFA implementation start DFA2
-@implementation DFA2
-const static NSInteger dfa2_eot[13] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static NSInteger dfa2_eof[13] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static unichar dfa2_min[13] =
-    {9,14,18,9,0,14,17,10,0,0,9,14,10};
-const static unichar dfa2_max[13] =
-    {24,14,23,24,0,14,23,22,0,0,24,14,22};
-const static NSInteger dfa2_accept[13] =
-    {-1,-1,-1,-1,1,-1,-1,-1,2,3,-1,-1,-1};
-const static NSInteger dfa2_special[13] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static NSInteger dfa2_transition[] = {};
-const static NSInteger dfa2_transition0[] = {9, -1, -1, -1, -1, -1, 8};
-const static NSInteger dfa2_transition1[] = {10, -1, -1, -1, -1, -1, -1, 
- -1, -1, -1, -1, -1, 6};
-const static NSInteger dfa2_transition2[] = {5, -1, -1, -1, -1, 5, -1, 5, 
- -1, -1, -1, -1, -1, 6, -1, 5};
-const static NSInteger dfa2_transition3[] = {11, -1, -1, -1, -1, 11, -1, 
- 11, -1, -1, -1, -1, -1, -1, -1, 11};
-const static NSInteger dfa2_transition4[] = {3, -1, -1, -1, -1, 4};
-const static NSInteger dfa2_transition5[] = {1, -1, -1, -1, -1, 1, -1, 1, 
- -1, -1, -1, -1, -1, -1, -1, 1};
-const static NSInteger dfa2_transition6[] = {7};
-const static NSInteger dfa2_transition7[] = {12};
-const static NSInteger dfa2_transition8[] = {2};
-
-
-+ (id) newDFA2WithRecognizer:(ANTLRBaseRecognizer *)aRecognizer
-{
-    return [[[DFA2 alloc] initWithRecognizer:aRecognizer] retain];
-}
-
-- (id) initWithRecognizer:(ANTLRBaseRecognizer *) theRecognizer
-{
-    self = [super initWithRecognizer:theRecognizer];
-    if ( self != nil ) {
-        decisionNumber = 2;
-        eot = dfa2_eot;
-        eof = dfa2_eof;
-        min = dfa2_min;
-        max = dfa2_max;
-        accept = dfa2_accept;
-        special = dfa2_special;
-        if (!(transition = calloc(13, sizeof(void*)))) {
-            [self release];
-            return nil;
-        }
-        len = 13;
-        transition[0] = dfa2_transition5;
-        transition[1] = dfa2_transition8;
-        transition[2] = dfa2_transition4;
-        transition[3] = dfa2_transition2;
-
-        transition[4] = dfa2_transition6;
-        transition[5] = dfa2_transition0;
-        transition[6] = dfa2_transition1;
-
-
-        transition[7] = dfa2_transition3;
-        transition[8] = dfa2_transition7;
-        transition[9] = dfa2_transition1;
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    free(transition);
-    [super dealloc];
-}
-
-- (NSString *) description
-{
-    return @"20:1: declaration : ( variable | functionHeader K_SEMICOLON -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) );";
-}
-
-
-@end /* end DFA2 implementation */
-
-#pragma mark Cyclic DFA implementation end DFA2
-
-
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_declaration_in_program85;
-static const unsigned long long FOLLOW_declaration_in_program85_data[] = { 0x0000000001014202LL};
-static ANTLRBitSet *FOLLOW_variable_in_declaration105;
-static const unsigned long long FOLLOW_variable_in_declaration105_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_functionHeader_in_declaration115;
-static const unsigned long long FOLLOW_functionHeader_in_declaration115_data[] = { 0x0000000000800000LL};
-static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_declaration117;
-static const unsigned long long FOLLOW_K_SEMICOLON_in_declaration117_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_functionHeader_in_declaration135;
-static const unsigned long long FOLLOW_functionHeader_in_declaration135_data[] = { 0x0000000000020000LL};
-static ANTLRBitSet *FOLLOW_block_in_declaration137;
-static const unsigned long long FOLLOW_block_in_declaration137_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_type_in_variable166;
-static const unsigned long long FOLLOW_type_in_variable166_data[] = { 0x0000000000004000LL};
-static ANTLRBitSet *FOLLOW_declarator_in_variable168;
-static const unsigned long long FOLLOW_declarator_in_variable168_data[] = { 0x0000000000800000LL};
-static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_variable170;
-static const unsigned long long FOLLOW_K_SEMICOLON_in_variable170_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_declarator199;
-static const unsigned long long FOLLOW_K_ID_in_declarator199_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_type_in_functionHeader219;
-static const unsigned long long FOLLOW_type_in_functionHeader219_data[] = { 0x0000000000004000LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_functionHeader221;
-static const unsigned long long FOLLOW_K_ID_in_functionHeader221_data[] = { 0x0000000000040000LL};
-static ANTLRBitSet *FOLLOW_K_LCURVE_in_functionHeader223;
-static const unsigned long long FOLLOW_K_LCURVE_in_functionHeader223_data[] = { 0x0000000001414200LL};
-static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader227;
-static const unsigned long long FOLLOW_formalParameter_in_functionHeader227_data[] = { 0x0000000000400400LL};
-static ANTLRBitSet *FOLLOW_K_COMMA_in_functionHeader231;
-static const unsigned long long FOLLOW_K_COMMA_in_functionHeader231_data[] = { 0x0000000001014200LL};
-static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader233;
-static const unsigned long long FOLLOW_formalParameter_in_functionHeader233_data[] = { 0x0000000000400400LL};
-static ANTLRBitSet *FOLLOW_K_RCURVE_in_functionHeader241;
-static const unsigned long long FOLLOW_K_RCURVE_in_functionHeader241_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_type_in_formalParameter281;
-static const unsigned long long FOLLOW_type_in_formalParameter281_data[] = { 0x0000000000004000LL};
-static ANTLRBitSet *FOLLOW_declarator_in_formalParameter283;
-static const unsigned long long FOLLOW_declarator_in_formalParameter283_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_LCURLY_in_block376;
-static const unsigned long long FOLLOW_K_LCURLY_in_block376_data[] = { 0x0000000001A7E200LL};
-static ANTLRBitSet *FOLLOW_variable_in_block390;
-static const unsigned long long FOLLOW_variable_in_block390_data[] = { 0x0000000001A7E200LL};
-static ANTLRBitSet *FOLLOW_stat_in_block405;
-static const unsigned long long FOLLOW_stat_in_block405_data[] = { 0x0000000000A6E000LL};
-static ANTLRBitSet *FOLLOW_K_RCURLY_in_block416;
-static const unsigned long long FOLLOW_K_RCURLY_in_block416_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_forStat_in_stat449;
-static const unsigned long long FOLLOW_forStat_in_stat449_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_expr_in_stat457;
-static const unsigned long long FOLLOW_expr_in_stat457_data[] = { 0x0000000000800000LL};
-static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_stat459;
-static const unsigned long long FOLLOW_K_SEMICOLON_in_stat459_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_block_in_stat468;
-static const unsigned long long FOLLOW_block_in_stat468_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_assignStat_in_stat476;
-static const unsigned long long FOLLOW_assignStat_in_stat476_data[] = { 0x0000000000800000LL};
-static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_stat478;
-static const unsigned long long FOLLOW_K_SEMICOLON_in_stat478_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_stat487;
-static const unsigned long long FOLLOW_K_SEMICOLON_in_stat487_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_FOR_in_forStat507;
-static const unsigned long long FOLLOW_K_FOR_in_forStat507_data[] = { 0x0000000000040000LL};
-static ANTLRBitSet *FOLLOW_K_LCURVE_in_forStat509;
-static const unsigned long long FOLLOW_K_LCURVE_in_forStat509_data[] = { 0x0000000000004000LL};
-static ANTLRBitSet *FOLLOW_assignStat_in_forStat513;
-static const unsigned long long FOLLOW_assignStat_in_forStat513_data[] = { 0x0000000000800000LL};
-static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_forStat515;
-static const unsigned long long FOLLOW_K_SEMICOLON_in_forStat515_data[] = { 0x000000000004C000LL};
-static ANTLRBitSet *FOLLOW_expr_in_forStat517;
-static const unsigned long long FOLLOW_expr_in_forStat517_data[] = { 0x0000000000800000LL};
-static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_forStat519;
-static const unsigned long long FOLLOW_K_SEMICOLON_in_forStat519_data[] = { 0x0000000000004000LL};
-static ANTLRBitSet *FOLLOW_assignStat_in_forStat523;
-static const unsigned long long FOLLOW_assignStat_in_forStat523_data[] = { 0x0000000000400000LL};
-static ANTLRBitSet *FOLLOW_K_RCURVE_in_forStat525;
-static const unsigned long long FOLLOW_K_RCURVE_in_forStat525_data[] = { 0x0000000000020000LL};
-static ANTLRBitSet *FOLLOW_block_in_forStat527;
-static const unsigned long long FOLLOW_block_in_forStat527_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_assignStat570;
-static const unsigned long long FOLLOW_K_ID_in_assignStat570_data[] = { 0x0000000000000800LL};
-static ANTLRBitSet *FOLLOW_K_EQ_in_assignStat572;
-static const unsigned long long FOLLOW_K_EQ_in_assignStat572_data[] = { 0x000000000004C000LL};
-static ANTLRBitSet *FOLLOW_expr_in_assignStat574;
-static const unsigned long long FOLLOW_expr_in_assignStat574_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_condExpr_in_expr598;
-static const unsigned long long FOLLOW_condExpr_in_expr598_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_aexpr_in_condExpr617;
-static const unsigned long long FOLLOW_aexpr_in_condExpr617_data[] = { 0x0000000000081002LL};
-static ANTLRBitSet *FOLLOW_K_EQEQ_in_condExpr622;
-static const unsigned long long FOLLOW_K_EQEQ_in_condExpr622_data[] = { 0x000000000004C000LL};
-static ANTLRBitSet *FOLLOW_K_LT_in_condExpr627;
-static const unsigned long long FOLLOW_K_LT_in_condExpr627_data[] = { 0x000000000004C000LL};
-static ANTLRBitSet *FOLLOW_aexpr_in_condExpr631;
-static const unsigned long long FOLLOW_aexpr_in_condExpr631_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_atom_in_aexpr653;
-static const unsigned long long FOLLOW_atom_in_aexpr653_data[] = { 0x0000000000100002LL};
-static ANTLRBitSet *FOLLOW_K_PLUS_in_aexpr657;
-static const unsigned long long FOLLOW_K_PLUS_in_aexpr657_data[] = { 0x000000000004C000LL};
-static ANTLRBitSet *FOLLOW_atom_in_aexpr660;
-static const unsigned long long FOLLOW_atom_in_aexpr660_data[] = { 0x0000000000100002LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_atom680;
-static const unsigned long long FOLLOW_K_ID_in_atom680_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_INT_in_atom694;
-static const unsigned long long FOLLOW_K_INT_in_atom694_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_LCURVE_in_atom708;
-static const unsigned long long FOLLOW_K_LCURVE_in_atom708_data[] = { 0x000000000004C000LL};
-static ANTLRBitSet *FOLLOW_expr_in_atom710;
-static const unsigned long long FOLLOW_expr_in_atom710_data[] = { 0x0000000000400000LL};
-static ANTLRBitSet *FOLLOW_K_RCURVE_in_atom712;
-static const unsigned long long FOLLOW_K_RCURVE_in_atom712_data[] = { 0x0000000000000002LL};
-
-
-#pragma mark Dynamic Global Scopes
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule Return Scopes start
-@implementation SimpleCParser_program_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_program_return *)newSimpleCParser_program_return
-{
-    return [[[SimpleCParser_program_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_declaration_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_declaration_return *)newSimpleCParser_declaration_return
-{
-    return [[[SimpleCParser_declaration_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_variable_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_variable_return *)newSimpleCParser_variable_return
-{
-    return [[[SimpleCParser_variable_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_declarator_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_declarator_return *)newSimpleCParser_declarator_return
-{
-    return [[[SimpleCParser_declarator_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_functionHeader_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_functionHeader_return *)newSimpleCParser_functionHeader_return
-{
-    return [[[SimpleCParser_functionHeader_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_formalParameter_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_formalParameter_return *)newSimpleCParser_formalParameter_return
-{
-    return [[[SimpleCParser_formalParameter_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_type_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_type_return *)newSimpleCParser_type_return
-{
-    return [[[SimpleCParser_type_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_block_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_block_return *)newSimpleCParser_block_return
-{
-    return [[[SimpleCParser_block_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_stat_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_stat_return *)newSimpleCParser_stat_return
-{
-    return [[[SimpleCParser_stat_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_forStat_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_forStat_return *)newSimpleCParser_forStat_return
-{
-    return [[[SimpleCParser_forStat_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_assignStat_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_assignStat_return *)newSimpleCParser_assignStat_return
-{
-    return [[[SimpleCParser_assignStat_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_expr_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_expr_return *)newSimpleCParser_expr_return
-{
-    return [[[SimpleCParser_expr_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_condExpr_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_condExpr_return *)newSimpleCParser_condExpr_return
-{
-    return [[[SimpleCParser_condExpr_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_aexpr_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_aexpr_return *)newSimpleCParser_aexpr_return
-{
-    return [[[SimpleCParser_aexpr_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation SimpleCParser_atom_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCParser_atom_return *)newSimpleCParser_atom_return
-{
-    return [[[SimpleCParser_atom_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-//#pragma mark Rule return scopes start
-//
-
-#pragma mark Rule return scopes start
-
-@implementation SimpleCParser  // line 637
-
-/* ObjC start of ruleAttributeScope */
-#pragma mark Dynamic Rule Scopes
-/* ObjC end of ruleAttributeScope */
-#pragma mark global Attribute Scopes
-/* ObjC start globalAttributeScope */
-/* ObjC end globalAttributeScope */
-/* ObjC start actions.(actionScope).synthesize */
-/* ObjC end actions.(actionScope).synthesize */
-/* ObjC start synthesize() */
-/* AST genericParser.synthesize */
-/* AST parserProperties */
-@synthesize treeAdaptor;
-/* ObjC end synthesize() */
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    FOLLOW_declaration_in_program85 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declaration_in_program85_data Count:(NSUInteger)1] retain];
-    FOLLOW_variable_in_declaration105 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_declaration105_data Count:(NSUInteger)1] retain];
-    FOLLOW_functionHeader_in_declaration115 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration115_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_SEMICOLON_in_declaration117 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_declaration117_data Count:(NSUInteger)1] retain];
-    FOLLOW_functionHeader_in_declaration135 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration135_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_declaration137 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_declaration137_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_variable166 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_variable166_data Count:(NSUInteger)1] retain];
-    FOLLOW_declarator_in_variable168 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_variable168_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_SEMICOLON_in_variable170 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_variable170_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_declarator199 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_declarator199_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_functionHeader219 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_functionHeader219_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_functionHeader221 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_functionHeader221_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_LCURVE_in_functionHeader223 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_LCURVE_in_functionHeader223_data Count:(NSUInteger)1] retain];
-    FOLLOW_formalParameter_in_functionHeader227 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader227_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_COMMA_in_functionHeader231 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_COMMA_in_functionHeader231_data Count:(NSUInteger)1] retain];
-    FOLLOW_formalParameter_in_functionHeader233 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader233_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_RCURVE_in_functionHeader241 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_RCURVE_in_functionHeader241_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_formalParameter281 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_formalParameter281_data Count:(NSUInteger)1] retain];
-    FOLLOW_declarator_in_formalParameter283 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_formalParameter283_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_LCURLY_in_block376 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_LCURLY_in_block376_data Count:(NSUInteger)1] retain];
-    FOLLOW_variable_in_block390 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_block390_data Count:(NSUInteger)1] retain];
-    FOLLOW_stat_in_block405 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block405_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_RCURLY_in_block416 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_RCURLY_in_block416_data Count:(NSUInteger)1] retain];
-    FOLLOW_forStat_in_stat449 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_forStat_in_stat449_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_stat457 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_stat457_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_SEMICOLON_in_stat459 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_stat459_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_stat468 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat468_data Count:(NSUInteger)1] retain];
-    FOLLOW_assignStat_in_stat476 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_stat476_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_SEMICOLON_in_stat478 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_stat478_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_SEMICOLON_in_stat487 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_stat487_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_FOR_in_forStat507 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_FOR_in_forStat507_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_LCURVE_in_forStat509 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_LCURVE_in_forStat509_data Count:(NSUInteger)1] retain];
-    FOLLOW_assignStat_in_forStat513 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_forStat513_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_SEMICOLON_in_forStat515 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_forStat515_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_forStat517 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat517_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_SEMICOLON_in_forStat519 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_forStat519_data Count:(NSUInteger)1] retain];
-    FOLLOW_assignStat_in_forStat523 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_forStat523_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_RCURVE_in_forStat525 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_RCURVE_in_forStat525_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_forStat527 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_forStat527_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_assignStat570 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_assignStat570_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_EQ_in_assignStat572 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQ_in_assignStat572_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_assignStat574 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_assignStat574_data Count:(NSUInteger)1] retain];
-    FOLLOW_condExpr_in_expr598 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_condExpr_in_expr598_data Count:(NSUInteger)1] retain];
-    FOLLOW_aexpr_in_condExpr617 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_aexpr_in_condExpr617_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_EQEQ_in_condExpr622 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQEQ_in_condExpr622_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_LT_in_condExpr627 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_LT_in_condExpr627_data Count:(NSUInteger)1] retain];
-    FOLLOW_aexpr_in_condExpr631 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_aexpr_in_condExpr631_data Count:(NSUInteger)1] retain];
-    FOLLOW_atom_in_aexpr653 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_aexpr653_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_PLUS_in_aexpr657 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_PLUS_in_aexpr657_data Count:(NSUInteger)1] retain];
-    FOLLOW_atom_in_aexpr660 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_aexpr660_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_atom680 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_atom680_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_INT_in_atom694 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_INT_in_atom694_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_LCURVE_in_atom708 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_LCURVE_in_atom708_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_atom710 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_atom710_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_RCURVE_in_atom712 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_RCURVE_in_atom712_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"ARG_DEF", @"BLOCK", @"FUNC_DECL", @"FUNC_DEF", @"FUNC_HDR", @"K_CHAR", 
- @"K_COMMA", @"K_EQ", @"K_EQEQ", @"K_FOR", @"K_ID", @"K_INT", @"K_INT_TYPE", 
- @"K_LCURLY", @"K_LCURVE", @"K_LT", @"K_PLUS", @"K_RCURLY", @"K_RCURVE", 
- @"K_SEMICOLON", @"K_VOID", @"VAR_DEF", @"WS", nil] retain]];
-    [ANTLRBaseRecognizer setGrammarFileName:@"SimpleC.g"];
-}
-
-+ (SimpleCParser *)newSimpleCParser:(id<ANTLRTokenStream>)aStream
-{
-    return [[SimpleCParser alloc] initWithTokenStream:aStream];
-
-
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)aStream
-{
-    self = [super initWithTokenStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:15+1] retain]];
-    if ( self != nil ) {
-
-
-        dfa2 = [DFA2 newDFA2WithRecognizer:self];
-        /* start of actions-actionScope-init */
-        /* start of init */
-        /* AST genericParser.init */
-        [self setTreeAdaptor:[[ANTLRCommonTreeAdaptor newTreeAdaptor] retain]];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [dfa2 release];
-    /* AST genericParser.dealloc */
-    [self setTreeAdaptor:nil];
-
-    [super dealloc];
-}
-
-/* ObjC start members */
-/* ObjC end members */
-/* ObjC start actions.(actionScope).methods */
-/* ObjC end actions.(actionScope).methods */
-/* ObjC start methods() */
-/* AST genericParser.methods */
-/* AST parserMethods */
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor
-{
-	return treeAdaptor;
-}
-
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-{
-	if (aTreeAdaptor != treeAdaptor) {
-		treeAdaptor = aTreeAdaptor;
-	}
-}
-/* ObjC end methods() */
-/* ObjC start rules */
-/*
- * $ANTLR start program
- * SimpleC.g:16:1: program : ( declaration )+ ;
- */
-- (SimpleCParser_program_return *) program
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_program_return * retval = [SimpleCParser_program_return newSimpleCParser_program_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-         SimpleCParser_declaration_return * declaration1 = nil ;
-         
-
-
-        // SimpleC.g:17:5: ( ( declaration )+ ) // ruleBlockSingleAlt
-        // SimpleC.g:17:9: ( declaration )+ // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-        // SimpleC.g:17:9: ( declaration )+ // positiveClosureBlock
-        NSInteger cnt1 = 0;
-        do {
-            NSInteger alt1 = 2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( (LA1_0==K_CHAR||LA1_0==K_ID||LA1_0==K_INT_TYPE||LA1_0==K_VOID) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // SimpleC.g:17:9: declaration // alt
-                    {
-                    /* ASTParser ruleRef */
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_declaration_in_program85];
-                    declaration1 = [self declaration];
-
-                    [self popFollow];
-
-
-                    [treeAdaptor addChild:[declaration1 getTree] toTree:root_0];
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt1 >= 1 )
-                        goto loop1;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:1];
-                    @throw eee;
-            }
-            cnt1++;
-        } while (YES);
-        loop1: ;
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end program */
-
-/*
- * $ANTLR start declaration
- * SimpleC.g:20:1: declaration : ( variable | functionHeader K_SEMICOLON -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) );
- */
-- (SimpleCParser_declaration_return *) declaration
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_declaration_return * retval = [SimpleCParser_declaration_return newSimpleCParser_declaration_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_SEMICOLON4 = nil;
-         SimpleCParser_variable_return * variable2 = nil ;
-         
-         SimpleCParser_functionHeader_return * functionHeader3 = nil ;
-         
-         SimpleCParser_functionHeader_return * functionHeader5 = nil ;
-         
-         SimpleCParser_block_return * block6 = nil ;
-         
-
-        ANTLRCommonTree *K_SEMICOLON4_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_K_SEMICOLON =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_SEMICOLON"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_functionHeader =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule functionHeader"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_block =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule block"] retain];
-        // SimpleC.g:21:5: ( variable | functionHeader K_SEMICOLON -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) ) //ruleblock
-        NSInteger alt2=3;
-        alt2 = [dfa2 predict:input];
-        switch (alt2) {
-            case 1 : ;
-                // SimpleC.g:21:9: variable // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-                /* ASTParser ruleRef */
-                /* ruleRef */
-                [self pushFollow:FOLLOW_variable_in_declaration105];
-                variable2 = [self variable];
-
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[variable2 getTree] toTree:root_0];
-
-                }
-                break;
-            case 2 : ;
-                // SimpleC.g:22:9: functionHeader K_SEMICOLON // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_functionHeader_in_declaration115];
-                functionHeader3 = [self functionHeader];
-
-                [self popFollow];
-
-
-                [stream_functionHeader addElement:[functionHeader3 getTree]];
-
-                K_SEMICOLON4=(ANTLRCommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_declaration117];  
-                    [stream_K_SEMICOLON addElement:K_SEMICOLON4];
-
-
-                // AST REWRITE
-                // elements: functionHeader
-                // token labels: 
-                // rule labels: retval
-                // token list labels: 
-                // rule list labels: 
-                // wildcard labels: 
-                retval.tree = root_0;
-
-                ANTLRRewriteRuleSubtreeStream *stream_retval =
-                    [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
-
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                // 22:36: -> ^( FUNC_DECL functionHeader )
-                {
-                    // SimpleC.g:22:39: ^( FUNC_DECL functionHeader )
-                    {
-                        ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                        root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:
-                                [treeAdaptor createTree:FUNC_DECL Text:@"FUNC_DECL"]
-                         old:root_1];
-
-                        [treeAdaptor addChild:[stream_functionHeader nextTree] toTree:root_1];
-
-                        [treeAdaptor addChild:root_1 toTree:root_0];
-                    }
-
-                }
-
-
-                retval.tree = root_0;
-
-
-                }
-                break;
-            case 3 : ;
-                // SimpleC.g:23:9: functionHeader block // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_functionHeader_in_declaration135];
-                functionHeader5 = [self functionHeader];
-
-                [self popFollow];
-
-
-                [stream_functionHeader addElement:[functionHeader5 getTree]];
-
-                /* ruleRef */
-                [self pushFollow:FOLLOW_block_in_declaration137];
-                block6 = [self block];
-
-                [self popFollow];
-
-
-                [stream_block addElement:[block6 getTree]];
-
-                // AST REWRITE
-                // elements: functionHeader, block
-                // token labels: 
-                // rule labels: retval
-                // token list labels: 
-                // rule list labels: 
-                // wildcard labels: 
-                retval.tree = root_0;
-
-                ANTLRRewriteRuleSubtreeStream *stream_retval =
-                    [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
-
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                // 23:30: -> ^( FUNC_DEF functionHeader block )
-                {
-                    // SimpleC.g:23:33: ^( FUNC_DEF functionHeader block )
-                    {
-                        ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                        root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:
-                                [treeAdaptor createTree:FUNC_DEF Text:@"FUNC_DEF"]
-                         old:root_1];
-
-                        [treeAdaptor addChild:[stream_functionHeader nextTree] toTree:root_1];
-
-                        [treeAdaptor addChild:[stream_block nextTree] toTree:root_1];
-
-                        [treeAdaptor addChild:root_1 toTree:root_0];
-                    }
-
-                }
-
-
-                retval.tree = root_0;
-
-
-                }
-                break;
-
-        }
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-        [stream_K_SEMICOLON release];
-        [stream_functionHeader release];
-        [stream_block release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end declaration */
-
-/*
- * $ANTLR start variable
- * SimpleC.g:26:1: variable : type declarator K_SEMICOLON -> ^( VAR_DEF type declarator ) ;
- */
-- (SimpleCParser_variable_return *) variable
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_variable_return * retval = [SimpleCParser_variable_return newSimpleCParser_variable_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_SEMICOLON9 = nil;
-         SimpleCParser_type_return * type7 = nil ;
-         
-         SimpleCParser_declarator_return * declarator8 = nil ;
-         
-
-        ANTLRCommonTree *K_SEMICOLON9_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_K_SEMICOLON =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_SEMICOLON"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_declarator =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule declarator"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_type =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule type"] retain];
-        // SimpleC.g:27:5: ( type declarator K_SEMICOLON -> ^( VAR_DEF type declarator ) ) // ruleBlockSingleAlt
-        // SimpleC.g:27:9: type declarator K_SEMICOLON // alt
-        {
-        /* ruleRef */
-        [self pushFollow:FOLLOW_type_in_variable166];
-        type7 = [self type];
-
-        [self popFollow];
-
-
-        [stream_type addElement:[type7 getTree]];
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_declarator_in_variable168];
-        declarator8 = [self declarator];
-
-        [self popFollow];
-
-
-        [stream_declarator addElement:[declarator8 getTree]];
-
-        K_SEMICOLON9=(ANTLRCommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_variable170];  
-            [stream_K_SEMICOLON addElement:K_SEMICOLON9];
-
-
-        // AST REWRITE
-        // elements: declarator, type
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 27:37: -> ^( VAR_DEF type declarator )
-        {
-            // SimpleC.g:27:40: ^( VAR_DEF type declarator )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:
-                        [treeAdaptor createTree:VAR_DEF Text:@"VAR_DEF"]
-                 old:root_1];
-
-                [treeAdaptor addChild:[stream_type nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:[stream_declarator nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-
-        retval.tree = root_0;
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-        [stream_K_SEMICOLON release];
-        [stream_declarator release];
-        [stream_type release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end variable */
-
-/*
- * $ANTLR start declarator
- * SimpleC.g:30:1: declarator : K_ID ;
- */
-- (SimpleCParser_declarator_return *) declarator
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_declarator_return * retval = [SimpleCParser_declarator_return newSimpleCParser_declarator_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_ID10 = nil;
-
-        ANTLRCommonTree *K_ID10_tree=nil;
-
-        // SimpleC.g:31:5: ( K_ID ) // ruleBlockSingleAlt
-        // SimpleC.g:31:9: K_ID // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-        /* ASTParser tokenRef */
-        K_ID10=(ANTLRCommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_declarator199]; 
-        K_ID10_tree = /* ASTParser createNodeFromToken */
-        (ANTLRCommonTree *)[[treeAdaptor create:K_ID10] retain]
-        ;
-        [treeAdaptor addChild:K_ID10_tree  toTree:root_0];
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end declarator */
-
-/*
- * $ANTLR start functionHeader
- * SimpleC.g:34:1: functionHeader : type K_ID K_LCURVE ( formalParameter ( K_COMMA formalParameter )* )? K_RCURVE -> ^( FUNC_HDR type K_ID ( formalParameter )+ ) ;
- */
-- (SimpleCParser_functionHeader_return *) functionHeader
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_functionHeader_return * retval = [SimpleCParser_functionHeader_return newSimpleCParser_functionHeader_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_ID12 = nil;
-        ANTLRCommonToken *K_LCURVE13 = nil;
-        ANTLRCommonToken *K_COMMA15 = nil;
-        ANTLRCommonToken *K_RCURVE17 = nil;
-         SimpleCParser_type_return * type11 = nil ;
-         
-         SimpleCParser_formalParameter_return * formalParameter14 = nil ;
-         
-         SimpleCParser_formalParameter_return * formalParameter16 = nil ;
-         
-
-        ANTLRCommonTree *K_ID12_tree=nil;
-        ANTLRCommonTree *K_LCURVE13_tree=nil;
-        ANTLRCommonTree *K_COMMA15_tree=nil;
-        ANTLRCommonTree *K_RCURVE17_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_K_ID =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_ID"] retain];
-        ANTLRRewriteRuleTokenStream *stream_K_LCURVE =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_LCURVE"] retain];
-        ANTLRRewriteRuleTokenStream *stream_K_RCURVE =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_RCURVE"] retain];
-        ANTLRRewriteRuleTokenStream *stream_K_COMMA =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_COMMA"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_formalParameter =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule formalParameter"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_type =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule type"] retain];
-        // SimpleC.g:35:5: ( type K_ID K_LCURVE ( formalParameter ( K_COMMA formalParameter )* )? K_RCURVE -> ^( FUNC_HDR type K_ID ( formalParameter )+ ) ) // ruleBlockSingleAlt
-        // SimpleC.g:35:9: type K_ID K_LCURVE ( formalParameter ( K_COMMA formalParameter )* )? K_RCURVE // alt
-        {
-        /* ruleRef */
-        [self pushFollow:FOLLOW_type_in_functionHeader219];
-        type11 = [self type];
-
-        [self popFollow];
-
-
-        [stream_type addElement:[type11 getTree]];
-
-        K_ID12=(ANTLRCommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_functionHeader221];  
-            [stream_K_ID addElement:K_ID12];
-
-
-        K_LCURVE13=(ANTLRCommonToken *)[self match:input TokenType:K_LCURVE Follow:FOLLOW_K_LCURVE_in_functionHeader223];  
-            [stream_K_LCURVE addElement:K_LCURVE13];
-
-
-        // SimpleC.g:35:28: ( formalParameter ( K_COMMA formalParameter )* )? // block
-        NSInteger alt4=2;
-        NSInteger LA4_0 = [input LA:1];
-
-        if ( (LA4_0==K_CHAR||LA4_0==K_ID||LA4_0==K_INT_TYPE||LA4_0==K_VOID) ) {
-            alt4=1;
-        }
-        switch (alt4) {
-            case 1 : ;
-                // SimpleC.g:35:30: formalParameter ( K_COMMA formalParameter )* // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_formalParameter_in_functionHeader227];
-                formalParameter14 = [self formalParameter];
-
-                [self popFollow];
-
-
-                [stream_formalParameter addElement:[formalParameter14 getTree]];
-
-                do {
-                    NSInteger alt3=2;
-                    NSInteger LA3_0 = [input LA:1];
-                    if ( (LA3_0==K_COMMA) ) {
-                        alt3=1;
-                    }
-
-
-                    switch (alt3) {
-                        case 1 : ;
-                            // SimpleC.g:35:48: K_COMMA formalParameter // alt
-                            {
-                            K_COMMA15=(ANTLRCommonToken *)[self match:input TokenType:K_COMMA Follow:FOLLOW_K_COMMA_in_functionHeader231];  
-                                [stream_K_COMMA addElement:K_COMMA15];
-
-
-                            /* ruleRef */
-                            [self pushFollow:FOLLOW_formalParameter_in_functionHeader233];
-                            formalParameter16 = [self formalParameter];
-
-                            [self popFollow];
-
-
-                            [stream_formalParameter addElement:[formalParameter16 getTree]];
-
-                            }
-                            break;
-
-                        default :
-                            goto loop3;
-                    }
-                } while (YES);
-                loop3: ;
-
-
-                }
-                break;
-
-        }
-
-
-        K_RCURVE17=(ANTLRCommonToken *)[self match:input TokenType:K_RCURVE Follow:FOLLOW_K_RCURVE_in_functionHeader241];  
-            [stream_K_RCURVE addElement:K_RCURVE17];
-
-
-        // AST REWRITE
-        // elements: formalParameter, type, K_ID
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 36:9: -> ^( FUNC_HDR type K_ID ( formalParameter )+ )
-        {
-            // SimpleC.g:36:12: ^( FUNC_HDR type K_ID ( formalParameter )+ )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:
-                        [treeAdaptor createTree:FUNC_HDR Text:@"FUNC_HDR"]
-                 old:root_1];
-
-                [treeAdaptor addChild:[stream_type nextTree] toTree:root_1];
-
-                 // TODO: args: 
-                [treeAdaptor addChild:
-                            [stream_K_ID nextNode]
-                 toTree:root_1];
-
-                // SimpleC.g:36:33: ( formalParameter )+
-                {
-                if ( !([stream_formalParameter hasNext]) ) {
-                    @throw [ANTLRRewriteEarlyExitException newException];
-                }
-                while ( [stream_formalParameter hasNext] ) {
-                    [treeAdaptor addChild:[stream_formalParameter nextTree] toTree:root_1];
-
-                }
-                [stream_formalParameter reset];
-
-                }
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-
-        retval.tree = root_0;
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-        [stream_K_ID release];
-        [stream_K_LCURVE release];
-        [stream_K_RCURVE release];
-        [stream_K_COMMA release];
-        [stream_formalParameter release];
-        [stream_type release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end functionHeader */
-
-/*
- * $ANTLR start formalParameter
- * SimpleC.g:39:1: formalParameter : type declarator -> ^( ARG_DEF type declarator ) ;
- */
-- (SimpleCParser_formalParameter_return *) formalParameter
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_formalParameter_return * retval = [SimpleCParser_formalParameter_return newSimpleCParser_formalParameter_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-         SimpleCParser_type_return * type18 = nil ;
-         
-         SimpleCParser_declarator_return * declarator19 = nil ;
-         
-
-        ANTLRRewriteRuleSubtreeStream *stream_declarator =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule declarator"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_type =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule type"] retain];
-        // SimpleC.g:40:5: ( type declarator -> ^( ARG_DEF type declarator ) ) // ruleBlockSingleAlt
-        // SimpleC.g:40:9: type declarator // alt
-        {
-        /* ruleRef */
-        [self pushFollow:FOLLOW_type_in_formalParameter281];
-        type18 = [self type];
-
-        [self popFollow];
-
-
-        [stream_type addElement:[type18 getTree]];
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_declarator_in_formalParameter283];
-        declarator19 = [self declarator];
-
-        [self popFollow];
-
-
-        [stream_declarator addElement:[declarator19 getTree]];
-
-        // AST REWRITE
-        // elements: declarator, type
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 40:25: -> ^( ARG_DEF type declarator )
-        {
-            // SimpleC.g:40:28: ^( ARG_DEF type declarator )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:
-                        [treeAdaptor createTree:ARG_DEF Text:@"ARG_DEF"]
-                 old:root_1];
-
-                [treeAdaptor addChild:[stream_type nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:[stream_declarator nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-
-        retval.tree = root_0;
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-        [stream_declarator release];
-        [stream_type release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end formalParameter */
-
-/*
- * $ANTLR start type
- * SimpleC.g:43:1: type : ( K_INT_TYPE | K_CHAR | K_VOID | K_ID );
- */
-- (SimpleCParser_type_return *) type
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_type_return * retval = [SimpleCParser_type_return newSimpleCParser_type_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *set20 = nil;
-
-        ANTLRCommonTree *set20_tree=nil;
-
-        // SimpleC.g:44:5: ( K_INT_TYPE | K_CHAR | K_VOID | K_ID ) // ruleBlockSingleAlt
-        // SimpleC.g: // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-        /* ASTParser matchRuleBlockSet */
-        /* ASTParser matchSet */
-        set20 = (ANTLRCommonToken *)[input LT:1]; /* matchSet */
-
-        if ([input LA:1] == K_CHAR||[input LA:1] == K_ID||[input LA:1] == K_INT_TYPE||[input LA:1] == K_VOID) {
-            [input consume];
-            [treeAdaptor addChild:/* ASTParser createNodeFromToken */
-            (ANTLRCommonTree *)[[treeAdaptor create:set20] retain]
-             toTree:root_0 ];
-            [state setIsErrorRecovery:NO];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            @throw mse;
-        }
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end type */
-
-/*
- * $ANTLR start block
- * SimpleC.g:50:1: block : lc= K_LCURLY ( variable )* ( stat )* K_RCURLY -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* ) ;
- */
-- (SimpleCParser_block_return *) block
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_block_return * retval = [SimpleCParser_block_return newSimpleCParser_block_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *lc = nil;
-        ANTLRCommonToken *K_RCURLY23 = nil;
-         SimpleCParser_variable_return * variable21 = nil ;
-         
-         SimpleCParser_stat_return * stat22 = nil ;
-         
-
-        ANTLRCommonTree *lc_tree=nil;
-        ANTLRCommonTree *K_RCURLY23_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_K_LCURLY =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_LCURLY"] retain];
-        ANTLRRewriteRuleTokenStream *stream_K_RCURLY =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_RCURLY"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_variable =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule variable"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_stat =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule stat"] retain];
-        // SimpleC.g:51:5: (lc= K_LCURLY ( variable )* ( stat )* K_RCURLY -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* ) ) // ruleBlockSingleAlt
-        // SimpleC.g:51:9: lc= K_LCURLY ( variable )* ( stat )* K_RCURLY // alt
-        {
-        lc=(ANTLRCommonToken *)[self match:input TokenType:K_LCURLY Follow:FOLLOW_K_LCURLY_in_block376];  
-            [stream_K_LCURLY addElement:lc];
-
-
-        do {
-            NSInteger alt5=2;
-            NSInteger LA5_0 = [input LA:1];
-            if ( (LA5_0==K_ID) ) {
-                NSInteger LA5_2 = [input LA:2];
-                if ( (LA5_2==K_ID) ) {
-                    alt5=1;
-                }
-
-
-            }
-            else if ( (LA5_0==K_CHAR||LA5_0==K_INT_TYPE||LA5_0==K_VOID) ) {
-                alt5=1;
-            }
-
-
-            switch (alt5) {
-                case 1 : ;
-                    // SimpleC.g:52:13: variable // alt
-                    {
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_variable_in_block390];
-                    variable21 = [self variable];
-
-                    [self popFollow];
-
-
-                    [stream_variable addElement:[variable21 getTree]];
-
-                    }
-                    break;
-
-                default :
-                    goto loop5;
-            }
-        } while (YES);
-        loop5: ;
-
-
-        do {
-            NSInteger alt6=2;
-            NSInteger LA6_0 = [input LA:1];
-            if ( ((LA6_0 >= K_FOR && LA6_0 <= K_INT)||(LA6_0 >= K_LCURLY && LA6_0 <= K_LCURVE)||LA6_0==K_SEMICOLON) ) {
-                alt6=1;
-            }
-
-
-            switch (alt6) {
-                case 1 : ;
-                    // SimpleC.g:53:13: stat // alt
-                    {
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_stat_in_block405];
-                    stat22 = [self stat];
-
-                    [self popFollow];
-
-
-                    [stream_stat addElement:[stat22 getTree]];
-
-                    }
-                    break;
-
-                default :
-                    goto loop6;
-            }
-        } while (YES);
-        loop6: ;
-
-
-        K_RCURLY23=(ANTLRCommonToken *)[self match:input TokenType:K_RCURLY Follow:FOLLOW_K_RCURLY_in_block416];  
-            [stream_K_RCURLY addElement:K_RCURLY23];
-
-
-        // AST REWRITE
-        // elements: stat, variable
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 55:9: -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* )
-        {
-            // SimpleC.g:55:12: ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:
-                        [treeAdaptor createTree:BLOCK FromToken:lc Text:@"BLOCK"]
-                 old:root_1];
-
-                // SimpleC.g:55:34: ( variable )*
-                while ( [stream_variable hasNext] ) {
-                    [treeAdaptor addChild:[stream_variable nextTree] toTree:root_1];
-
-                }
-                [stream_variable reset];
-
-                // SimpleC.g:55:44: ( stat )*
-                while ( [stream_stat hasNext] ) {
-                    [treeAdaptor addChild:[stream_stat nextTree] toTree:root_1];
-
-                }
-                [stream_stat reset];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-
-        retval.tree = root_0;
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-        [stream_K_LCURLY release];
-        [stream_K_RCURLY release];
-        [stream_variable release];
-        [stream_stat release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end block */
-
-/*
- * $ANTLR start stat
- * SimpleC.g:58:1: stat : ( forStat | expr K_SEMICOLON !| block | assignStat K_SEMICOLON !| K_SEMICOLON !);
- */
-- (SimpleCParser_stat_return *) stat
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_stat_return * retval = [SimpleCParser_stat_return newSimpleCParser_stat_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_SEMICOLON26 = nil;
-        ANTLRCommonToken *K_SEMICOLON29 = nil;
-        ANTLRCommonToken *K_SEMICOLON30 = nil;
-         SimpleCParser_forStat_return * forStat24 = nil ;
-         
-         SimpleCParser_expr_return * expr25 = nil ;
-         
-         SimpleCParser_block_return * block27 = nil ;
-         
-         SimpleCParser_assignStat_return * assignStat28 = nil ;
-         
-
-        ANTLRCommonTree *K_SEMICOLON26_tree=nil;
-        ANTLRCommonTree *K_SEMICOLON29_tree=nil;
-        ANTLRCommonTree *K_SEMICOLON30_tree=nil;
-
-        // SimpleC.g:58:5: ( forStat | expr K_SEMICOLON !| block | assignStat K_SEMICOLON !| K_SEMICOLON !) //ruleblock
-        NSInteger alt7=5;
-        unichar charLA7 = [input LA:1];
-        switch (charLA7) {
-            case K_FOR: ;
-                {
-                alt7=1;
-                }
-                break;
-            case K_ID: ;
-                {
-                NSInteger LA7_2 = [input LA:2];
-
-                if ( (LA7_2==K_EQ) ) {
-                    alt7=4;
-                }
-                else if ( (LA7_2==K_EQEQ||(LA7_2 >= K_LT && LA7_2 <= K_PLUS)||LA7_2==K_SEMICOLON) ) {
-                    alt7=2;
-                }
-                else {
-                    ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:7 state:2 stream:input];
-                    nvae.c = LA7_2;
-                    @throw nvae;
-
-                }
-                }
-                break;
-            case K_INT: ;
-            case K_LCURVE: ;
-                {
-                alt7=2;
-                }
-                break;
-            case K_LCURLY: ;
-                {
-                alt7=3;
-                }
-                break;
-            case K_SEMICOLON: ;
-                {
-                alt7=5;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:7 state:0 stream:input];
-            nvae.c = charLA7;
-            @throw nvae;
-
-        }
-
-        switch (alt7) {
-            case 1 : ;
-                // SimpleC.g:58:7: forStat // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-                /* ASTParser ruleRef */
-                /* ruleRef */
-                [self pushFollow:FOLLOW_forStat_in_stat449];
-                forStat24 = [self forStat];
-
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[forStat24 getTree] toTree:root_0];
-
-                }
-                break;
-            case 2 : ;
-                // SimpleC.g:59:7: expr K_SEMICOLON ! // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-                /* ASTParser ruleRef */
-                /* ruleRef */
-                [self pushFollow:FOLLOW_expr_in_stat457];
-                expr25 = [self expr];
-
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[expr25 getTree] toTree:root_0];
-
-                K_SEMICOLON26=(ANTLRCommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_stat459]; 
-
-                }
-                break;
-            case 3 : ;
-                // SimpleC.g:60:7: block // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-                /* ASTParser ruleRef */
-                /* ruleRef */
-                [self pushFollow:FOLLOW_block_in_stat468];
-                block27 = [self block];
-
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[block27 getTree] toTree:root_0];
-
-                }
-                break;
-            case 4 : ;
-                // SimpleC.g:61:7: assignStat K_SEMICOLON ! // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-                /* ASTParser ruleRef */
-                /* ruleRef */
-                [self pushFollow:FOLLOW_assignStat_in_stat476];
-                assignStat28 = [self assignStat];
-
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[assignStat28 getTree] toTree:root_0];
-
-                K_SEMICOLON29=(ANTLRCommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_stat478]; 
-
-                }
-                break;
-            case 5 : ;
-                // SimpleC.g:62:7: K_SEMICOLON ! // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-                K_SEMICOLON30=(ANTLRCommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_stat487]; 
-
-                }
-                break;
-
-        }
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end stat */
-
-/*
- * $ANTLR start forStat
- * SimpleC.g:65:1: forStat : K_FOR K_LCURVE start= assignStat K_SEMICOLON expr K_SEMICOLON next= assignStat K_RCURVE block -> ^( K_FOR $start expr $next block ) ;
- */
-- (SimpleCParser_forStat_return *) forStat
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_forStat_return * retval = [SimpleCParser_forStat_return newSimpleCParser_forStat_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_FOR31 = nil;
-        ANTLRCommonToken *K_LCURVE32 = nil;
-        ANTLRCommonToken *K_SEMICOLON33 = nil;
-        ANTLRCommonToken *K_SEMICOLON35 = nil;
-        ANTLRCommonToken *K_RCURVE36 = nil;
-         SimpleCParser_assignStat_return * start = nil ;
-         
-         SimpleCParser_assignStat_return * next = nil ;
-         
-         SimpleCParser_expr_return * expr34 = nil ;
-         
-         SimpleCParser_block_return * block37 = nil ;
-         
-
-        ANTLRCommonTree *K_FOR31_tree=nil;
-        ANTLRCommonTree *K_LCURVE32_tree=nil;
-        ANTLRCommonTree *K_SEMICOLON33_tree=nil;
-        ANTLRCommonTree *K_SEMICOLON35_tree=nil;
-        ANTLRCommonTree *K_RCURVE36_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_K_LCURVE =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_LCURVE"] retain];
-        ANTLRRewriteRuleTokenStream *stream_K_RCURVE =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_RCURVE"] retain];
-        ANTLRRewriteRuleTokenStream *stream_K_SEMICOLON =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_SEMICOLON"] retain];
-        ANTLRRewriteRuleTokenStream *stream_K_FOR =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_FOR"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_assignStat =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule assignStat"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_block =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule block"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_expr =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule expr"] retain];
-        // SimpleC.g:66:5: ( K_FOR K_LCURVE start= assignStat K_SEMICOLON expr K_SEMICOLON next= assignStat K_RCURVE block -> ^( K_FOR $start expr $next block ) ) // ruleBlockSingleAlt
-        // SimpleC.g:66:9: K_FOR K_LCURVE start= assignStat K_SEMICOLON expr K_SEMICOLON next= assignStat K_RCURVE block // alt
-        {
-        K_FOR31=(ANTLRCommonToken *)[self match:input TokenType:K_FOR Follow:FOLLOW_K_FOR_in_forStat507];  
-            [stream_K_FOR addElement:K_FOR31];
-
-
-        K_LCURVE32=(ANTLRCommonToken *)[self match:input TokenType:K_LCURVE Follow:FOLLOW_K_LCURVE_in_forStat509];  
-            [stream_K_LCURVE addElement:K_LCURVE32];
-
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_assignStat_in_forStat513];
-        start = [self assignStat];
-
-        [self popFollow];
-
-
-        [stream_assignStat addElement:[start getTree]];
-
-        K_SEMICOLON33=(ANTLRCommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_forStat515];  
-            [stream_K_SEMICOLON addElement:K_SEMICOLON33];
-
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_expr_in_forStat517];
-        expr34 = [self expr];
-
-        [self popFollow];
-
-
-        [stream_expr addElement:[expr34 getTree]];
-
-        K_SEMICOLON35=(ANTLRCommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_forStat519];  
-            [stream_K_SEMICOLON addElement:K_SEMICOLON35];
-
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_assignStat_in_forStat523];
-        next = [self assignStat];
-
-        [self popFollow];
-
-
-        [stream_assignStat addElement:[next getTree]];
-
-        K_RCURVE36=(ANTLRCommonToken *)[self match:input TokenType:K_RCURVE Follow:FOLLOW_K_RCURVE_in_forStat525];  
-            [stream_K_RCURVE addElement:K_RCURVE36];
-
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_block_in_forStat527];
-        block37 = [self block];
-
-        [self popFollow];
-
-
-        [stream_block addElement:[block37 getTree]];
-
-        // AST REWRITE
-        // elements: start, next, expr, block, K_FOR
-        // token labels: 
-        // rule labels: retval, start, next
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_start =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                description:@"token start" element:start!=nil?[start getTree]:nil] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_next =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                description:@"token next" element:next!=nil?[next getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 67:9: -> ^( K_FOR $start expr $next block )
-        {
-            // SimpleC.g:67:12: ^( K_FOR $start expr $next block )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:
-                            [stream_K_FOR nextNode]
-                 old:root_1];
-
-                [treeAdaptor addChild:[stream_start nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:[stream_expr nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:[stream_next nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:[stream_block nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-
-        retval.tree = root_0;
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-        [stream_K_LCURVE release];
-        [stream_K_RCURVE release];
-        [stream_K_SEMICOLON release];
-        [stream_K_FOR release];
-        [stream_assignStat release];
-        [stream_block release];
-        [stream_expr release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end forStat */
-
-/*
- * $ANTLR start assignStat
- * SimpleC.g:70:1: assignStat : K_ID K_EQ expr -> ^( K_EQ K_ID expr ) ;
- */
-- (SimpleCParser_assignStat_return *) assignStat
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_assignStat_return * retval = [SimpleCParser_assignStat_return newSimpleCParser_assignStat_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_ID38 = nil;
-        ANTLRCommonToken *K_EQ39 = nil;
-         SimpleCParser_expr_return * expr40 = nil ;
-         
-
-        ANTLRCommonTree *K_ID38_tree=nil;
-        ANTLRCommonTree *K_EQ39_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_K_ID =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_ID"] retain];
-        ANTLRRewriteRuleTokenStream *stream_K_EQ =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_EQ"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_expr =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule expr"] retain];
-        // SimpleC.g:71:5: ( K_ID K_EQ expr -> ^( K_EQ K_ID expr ) ) // ruleBlockSingleAlt
-        // SimpleC.g:71:9: K_ID K_EQ expr // alt
-        {
-        K_ID38=(ANTLRCommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_assignStat570];  
-            [stream_K_ID addElement:K_ID38];
-
-
-        K_EQ39=(ANTLRCommonToken *)[self match:input TokenType:K_EQ Follow:FOLLOW_K_EQ_in_assignStat572];  
-            [stream_K_EQ addElement:K_EQ39];
-
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_expr_in_assignStat574];
-        expr40 = [self expr];
-
-        [self popFollow];
-
-
-        [stream_expr addElement:[expr40 getTree]];
-
-        // AST REWRITE
-        // elements: K_ID, K_EQ, expr
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 71:24: -> ^( K_EQ K_ID expr )
-        {
-            // SimpleC.g:71:27: ^( K_EQ K_ID expr )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:
-                            [stream_K_EQ nextNode]
-                 old:root_1];
-
-                 // TODO: args: 
-                [treeAdaptor addChild:
-                            [stream_K_ID nextNode]
-                 toTree:root_1];
-
-                [treeAdaptor addChild:[stream_expr nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-
-        retval.tree = root_0;
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-        [stream_K_ID release];
-        [stream_K_EQ release];
-        [stream_expr release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end assignStat */
-
-/*
- * $ANTLR start expr
- * SimpleC.g:74:1: expr : condExpr ;
- */
-- (SimpleCParser_expr_return *) expr
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_expr_return * retval = [SimpleCParser_expr_return newSimpleCParser_expr_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-         SimpleCParser_condExpr_return * condExpr41 = nil ;
-         
-
-
-        // SimpleC.g:74:5: ( condExpr ) // ruleBlockSingleAlt
-        // SimpleC.g:74:9: condExpr // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-        /* ASTParser ruleRef */
-        /* ruleRef */
-        [self pushFollow:FOLLOW_condExpr_in_expr598];
-        condExpr41 = [self condExpr];
-
-        [self popFollow];
-
-
-        [treeAdaptor addChild:[condExpr41 getTree] toTree:root_0];
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end expr */
-
-/*
- * $ANTLR start condExpr
- * SimpleC.g:77:1: condExpr : aexpr ( ( K_EQEQ ^| K_LT ^) aexpr )? ;
- */
-- (SimpleCParser_condExpr_return *) condExpr
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_condExpr_return * retval = [SimpleCParser_condExpr_return newSimpleCParser_condExpr_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_EQEQ43 = nil;
-        ANTLRCommonToken *K_LT44 = nil;
-         SimpleCParser_aexpr_return * aexpr42 = nil ;
-         
-         SimpleCParser_aexpr_return * aexpr45 = nil ;
-         
-
-        ANTLRCommonTree *K_EQEQ43_tree=nil;
-        ANTLRCommonTree *K_LT44_tree=nil;
-
-        // SimpleC.g:78:5: ( aexpr ( ( K_EQEQ ^| K_LT ^) aexpr )? ) // ruleBlockSingleAlt
-        // SimpleC.g:78:9: aexpr ( ( K_EQEQ ^| K_LT ^) aexpr )? // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-        /* ASTParser ruleRef */
-        /* ruleRef */
-        [self pushFollow:FOLLOW_aexpr_in_condExpr617];
-        aexpr42 = [self aexpr];
-
-        [self popFollow];
-
-
-        [treeAdaptor addChild:[aexpr42 getTree] toTree:root_0];
-
-        // SimpleC.g:78:15: ( ( K_EQEQ ^| K_LT ^) aexpr )? // block
-        NSInteger alt9=2;
-        NSInteger LA9_0 = [input LA:1];
-
-        if ( (LA9_0==K_EQEQ||LA9_0==K_LT) ) {
-            alt9=1;
-        }
-        switch (alt9) {
-            case 1 : ;
-                // SimpleC.g:78:17: ( K_EQEQ ^| K_LT ^) aexpr // alt
-                {
-                // SimpleC.g:78:17: ( K_EQEQ ^| K_LT ^) // block
-                NSInteger alt8=2;
-                NSInteger LA8_0 = [input LA:1];
-
-                if ( (LA8_0==K_EQEQ) ) {
-                    alt8=1;
-                }
-                else if ( (LA8_0==K_LT) ) {
-                    alt8=2;
-                }
-                else {
-                    ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:8 state:0 stream:input];
-                    nvae.c = LA8_0;
-                    @throw nvae;
-
-                }
-                switch (alt8) {
-                    case 1 : ;
-                        // SimpleC.g:78:18: K_EQEQ ^ // alt
-                        {
-                        K_EQEQ43=(ANTLRCommonToken *)[self match:input TokenType:K_EQEQ Follow:FOLLOW_K_EQEQ_in_condExpr622]; 
-                        K_EQEQ43_tree = /* ASTParser createNodeFromToken */
-                        (ANTLRCommonTree *)[[treeAdaptor create:K_EQEQ43] retain]
-                        ;
-                        root_0 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:K_EQEQ43_tree old:root_0];
-
-
-                        }
-                        break;
-                    case 2 : ;
-                        // SimpleC.g:78:28: K_LT ^ // alt
-                        {
-                        K_LT44=(ANTLRCommonToken *)[self match:input TokenType:K_LT Follow:FOLLOW_K_LT_in_condExpr627]; 
-                        K_LT44_tree = /* ASTParser createNodeFromToken */
-                        (ANTLRCommonTree *)[[treeAdaptor create:K_LT44] retain]
-                        ;
-                        root_0 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:K_LT44_tree old:root_0];
-
-
-                        }
-                        break;
-
-                }
-
-
-                /* ASTParser ruleRef */
-                /* ruleRef */
-                [self pushFollow:FOLLOW_aexpr_in_condExpr631];
-                aexpr45 = [self aexpr];
-
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[aexpr45 getTree] toTree:root_0];
-
-                }
-                break;
-
-        }
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end condExpr */
-
-/*
- * $ANTLR start aexpr
- * SimpleC.g:81:1: aexpr : atom ( K_PLUS ^ atom )* ;
- */
-- (SimpleCParser_aexpr_return *) aexpr
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_aexpr_return * retval = [SimpleCParser_aexpr_return newSimpleCParser_aexpr_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_PLUS47 = nil;
-         SimpleCParser_atom_return * atom46 = nil ;
-         
-         SimpleCParser_atom_return * atom48 = nil ;
-         
-
-        ANTLRCommonTree *K_PLUS47_tree=nil;
-
-        // SimpleC.g:82:5: ( atom ( K_PLUS ^ atom )* ) // ruleBlockSingleAlt
-        // SimpleC.g:82:9: atom ( K_PLUS ^ atom )* // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-        /* ASTParser ruleRef */
-        /* ruleRef */
-        [self pushFollow:FOLLOW_atom_in_aexpr653];
-        atom46 = [self atom];
-
-        [self popFollow];
-
-
-        [treeAdaptor addChild:[atom46 getTree] toTree:root_0];
-
-        do {
-            NSInteger alt10=2;
-            NSInteger LA10_0 = [input LA:1];
-            if ( (LA10_0==K_PLUS) ) {
-                alt10=1;
-            }
-
-
-            switch (alt10) {
-                case 1 : ;
-                    // SimpleC.g:82:16: K_PLUS ^ atom // alt
-                    {
-                    K_PLUS47=(ANTLRCommonToken *)[self match:input TokenType:K_PLUS Follow:FOLLOW_K_PLUS_in_aexpr657]; 
-                    K_PLUS47_tree = /* ASTParser createNodeFromToken */
-                    (ANTLRCommonTree *)[[treeAdaptor create:K_PLUS47] retain]
-                    ;
-                    root_0 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:K_PLUS47_tree old:root_0];
-
-
-                    /* ASTParser ruleRef */
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_atom_in_aexpr660];
-                    atom48 = [self atom];
-
-                    [self popFollow];
-
-
-                    [treeAdaptor addChild:[atom48 getTree] toTree:root_0];
-
-                    }
-                    break;
-
-                default :
-                    goto loop10;
-            }
-        } while (YES);
-        loop10: ;
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end aexpr */
-
-/*
- * $ANTLR start atom
- * SimpleC.g:85:1: atom : ( K_ID | K_INT | K_LCURVE expr K_RCURVE -> expr );
- */
-- (SimpleCParser_atom_return *) atom
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_atom_return * retval = [SimpleCParser_atom_return newSimpleCParser_atom_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_ID49 = nil;
-        ANTLRCommonToken *K_INT50 = nil;
-        ANTLRCommonToken *K_LCURVE51 = nil;
-        ANTLRCommonToken *K_RCURVE53 = nil;
-         SimpleCParser_expr_return * expr52 = nil ;
-         
-
-        ANTLRCommonTree *K_ID49_tree=nil;
-        ANTLRCommonTree *K_INT50_tree=nil;
-        ANTLRCommonTree *K_LCURVE51_tree=nil;
-        ANTLRCommonTree *K_RCURVE53_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_K_LCURVE =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_LCURVE"] retain];
-        ANTLRRewriteRuleTokenStream *stream_K_RCURVE =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_RCURVE"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_expr =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule expr"] retain];
-        // SimpleC.g:86:5: ( K_ID | K_INT | K_LCURVE expr K_RCURVE -> expr ) //ruleblock
-        NSInteger alt11=3;
-        unichar charLA11 = [input LA:1];
-        switch (charLA11) {
-            case K_ID: ;
-                {
-                alt11=1;
-                }
-                break;
-            case K_INT: ;
-                {
-                alt11=2;
-                }
-                break;
-            case K_LCURVE: ;
-                {
-                alt11=3;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:11 state:0 stream:input];
-            nvae.c = charLA11;
-            @throw nvae;
-
-        }
-
-        switch (alt11) {
-            case 1 : ;
-                // SimpleC.g:86:7: K_ID // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-                /* ASTParser tokenRef */
-                K_ID49=(ANTLRCommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_atom680]; 
-                K_ID49_tree = /* ASTParser createNodeFromToken */
-                (ANTLRCommonTree *)[[treeAdaptor create:K_ID49] retain]
-                ;
-                [treeAdaptor addChild:K_ID49_tree  toTree:root_0];
-
-
-                }
-                break;
-            case 2 : ;
-                // SimpleC.g:87:7: K_INT // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-                /* ASTParser tokenRef */
-                K_INT50=(ANTLRCommonToken *)[self match:input TokenType:K_INT Follow:FOLLOW_K_INT_in_atom694]; 
-                K_INT50_tree = /* ASTParser createNodeFromToken */
-                (ANTLRCommonTree *)[[treeAdaptor create:K_INT50] retain]
-                ;
-                [treeAdaptor addChild:K_INT50_tree  toTree:root_0];
-
-
-                }
-                break;
-            case 3 : ;
-                // SimpleC.g:88:7: K_LCURVE expr K_RCURVE // alt
-                {
-                K_LCURVE51=(ANTLRCommonToken *)[self match:input TokenType:K_LCURVE Follow:FOLLOW_K_LCURVE_in_atom708];  
-                    [stream_K_LCURVE addElement:K_LCURVE51];
-
-
-                /* ruleRef */
-                [self pushFollow:FOLLOW_expr_in_atom710];
-                expr52 = [self expr];
-
-                [self popFollow];
-
-
-                [stream_expr addElement:[expr52 getTree]];
-
-                K_RCURVE53=(ANTLRCommonToken *)[self match:input TokenType:K_RCURVE Follow:FOLLOW_K_RCURVE_in_atom712];  
-                    [stream_K_RCURVE addElement:K_RCURVE53];
-
-
-                // AST REWRITE
-                // elements: expr
-                // token labels: 
-                // rule labels: retval
-                // token list labels: 
-                // rule list labels: 
-                // wildcard labels: 
-                retval.tree = root_0;
-
-                ANTLRRewriteRuleSubtreeStream *stream_retval =
-                    [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
-
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                // 88:30: -> expr
-                {
-                    [treeAdaptor addChild:[stream_expr nextTree] toTree:root_0];
-
-                }
-
-
-                retval.tree = root_0;
-
-
-                }
-                break;
-
-        }
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-        [stream_K_LCURVE release];
-        [stream_K_RCURVE release];
-        [stream_expr release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end atom */
-/* ObjC end rules */
-
-@end /* end of SimpleCParser implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g
deleted file mode 100644
index c1f89b8..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g
+++ /dev/null
@@ -1,69 +0,0 @@
-tree grammar SimpleCTP;
-options {
-    tokenVocab = SimpleC;
-	language = ObjC;
-	ASTLabelType = ANTLRCommonTree;
-}
-
-scope Symbols
-{
-ANTLRCommonTree *tree;
-}
-
-program
-    :   declaration+
-    ;
-
-declaration
-    :   variable
-    |   ^(FUNC_DECL functionHeader)
-    |   ^(FUNC_DEF functionHeader block)
-    ;
-
-variable
-    :   ^(VAR_DEF type declarator)
-    ;
-
-declarator
-    :   K_ID 
-    ;
-
-functionHeader
-    :   ^(FUNC_HDR type K_ID formalParameter+)
-    ;
-
-formalParameter
-    :   ^(ARG_DEF type declarator)
-    ;
-
-type
-    :   K_INT_TYPE
-    |   K_CHAR  
-    |   K_VOID
-    |   K_ID        
-    ;
-
-block
-    :   ^(BLOCK variable* stat*)
-    ;
-
-stat: forStat
-    | expr
-    | block
-    ;
-
-forStat
-    :   ^(K_FOR expr expr expr block)
-    ;
-
-expr:   ^(K_EQEQ expr expr)
-    |   ^(K_LT expr expr)
-    |   ^(K_PLUS expr expr)
-    |   ^(K_EQ K_ID e=expr) { NSLog(@"assigning \%@ to variable \%@", $e.text, $K_ID.text); }
-    |   atom
-    ;
-
-atom
-    : K_ID      
-    | K_INT      
-    ; 
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.h b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.h
deleted file mode 100644
index 2832b66..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.h
+++ /dev/null
@@ -1,140 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} SimpleCTP.g 2011-05-06 15:09:28
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* treeParserHeaderFile */
-#ifndef ANTLR3TokenTypeAlreadyDefined
-#define ANTLR3TokenTypeAlreadyDefined
-typedef enum {
-    ANTLR_EOF = -1,
-    INVALID,
-    EOR,
-    DOWN,
-    UP,
-    MIN
-} ANTLR3TokenType;
-#endif
-
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define ARG_DEF 4
-#define BLOCK 5
-#define FUNC_DECL 6
-#define FUNC_DEF 7
-#define FUNC_HDR 8
-#define K_CHAR 9
-#define K_COMMA 10
-#define K_EQ 11
-#define K_EQEQ 12
-#define K_FOR 13
-#define K_ID 14
-#define K_INT 15
-#define K_INT_TYPE 16
-#define K_LCURLY 17
-#define K_LCURVE 18
-#define K_LT 19
-#define K_PLUS 20
-#define K_RCURLY 21
-#define K_RCURVE 22
-#define K_SEMICOLON 23
-#define K_VOID 24
-#define VAR_DEF 25
-#define WS 26
-#pragma mark Dynamic Global Scopes
-/* globalAttributeScopeInterface */
-@interface Symbols_Scope : ANTLRSymbolsScope {
-ANTLRCommonTree * tree;
-
-}
-/* start of globalAttributeScopeInterface properties */
-
-@property (assign, getter=gettree, setter=settree:) ANTLRCommonTree * tree;
-
-/* end globalAttributeScopeInterface properties */
-
-
-+ (Symbols_Scope *)newSymbols_Scope;
-- (id) init;
-/* start of globalAttributeScopeInterface methodsDecl */
-
-- (ANTLRCommonTree *)gettree;
-- (void)settree:(ANTLRCommonTree *)aVal;
-
-/* End of globalAttributeScopeInterface methodsDecl */
-
-@end /* end of Symbols_Scope interface */
-
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-/* returnScopeInterface SimpleCTP_expr_return */
-@interface SimpleCTP_expr_return :ANTLRTreeRuleReturnScope { /* returnScopeInterface line 1838 */
- /* ObjC start of memVars() */
-}
-/* start properties */
-+ (SimpleCTP_expr_return *)newSimpleCTP_expr_return;
-/* this is start of set and get methods */
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-
-/* Interface grammar class */
-@interface SimpleCTP : ANTLRTreeParser { /* line 572 */
-/* ObjC start of ruleAttributeScopeMemVar */
-
-
-/* ObjC end of ruleAttributeScopeMemVar */
-/* ObjC start of globalAttributeScopeMemVar */
-/* globalAttributeScopeMemVar */
-//ANTLRSymbolStack *gStack;
-ANTLRSymbolStack *Symbols_stack;
-Symbols_Scope *Symbols_scope;
-
-/* ObjC end of globalAttributeScopeMemVar */
-/* ObjC start of actions.(actionScope).memVars */
-/* ObjC end of actions.(actionScope).memVars */
-/* ObjC start of memVars */
-/* ObjC end of memVars */
-
- }
-
-/* ObjC start of actions.(actionScope).properties */
-/* ObjC end of actions.(actionScope).properties */
-/* ObjC start of properties */
-/* ObjC end of properties */
-
-+ (void) initialize;
-+ (id) newSimpleCTP:(id<ANTLRTreeNodeStream>)aStream;
-/* ObjC start of actions.(actionScope).methodsDecl */
-/* ObjC end of actions.(actionScope).methodsDecl */
-
-/* ObjC start of methodsDecl */
-/* ObjC end of methodsDecl */
-
-- (void)program; 
-- (void)declaration; 
-- (void)variable; 
-- (void)declarator; 
-- (void)functionHeader; 
-- (void)formalParameter; 
-- (void)type; 
-- (void)block; 
-- (void)stat; 
-- (void)forStat; 
-- (SimpleCTP_expr_return *)expr; 
-- (void)atom; 
-
-
-@end /* end of SimpleCTP interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.java b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.java
deleted file mode 100644
index e2e0d1c..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.java
+++ /dev/null
@@ -1,852 +0,0 @@
-// $ANTLR 3.2 Aug 13, 2010 14:19:31 SimpleCTP.g 2010-08-13 14:29:19
-
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.*;import java.util.Stack;
-import java.util.List;
-import java.util.ArrayList;
-
-public class SimpleCTP extends TreeParser {
-    public static final String[] tokenNames = new String[] {
-        "<invalid>", "<EOR>", "<DOWN>", "<UP>", "VAR_DEF", "ARG_DEF", "FUNC_HDR", "FUNC_DECL", "FUNC_DEF", "BLOCK", "ID", "EQ", "INT", "FOR", "INT_TYPE", "CHAR", "VOID", "EQEQ", "LT", "PLUS", "WS", "';'", "'('", "','", "')'", "'{'", "'}'"
-    };
-    public static final int LT=18;
-    public static final int T__26=26;
-    public static final int T__25=25;
-    public static final int T__24=24;
-    public static final int T__23=23;
-    public static final int T__22=22;
-    public static final int T__21=21;
-    public static final int CHAR=15;
-    public static final int FOR=13;
-    public static final int FUNC_HDR=6;
-    public static final int INT=12;
-    public static final int FUNC_DEF=8;
-    public static final int INT_TYPE=14;
-    public static final int ID=10;
-    public static final int EOF=-1;
-    public static final int FUNC_DECL=7;
-    public static final int ARG_DEF=5;
-    public static final int WS=20;
-    public static final int BLOCK=9;
-    public static final int PLUS=19;
-    public static final int VOID=16;
-    public static final int EQ=11;
-    public static final int VAR_DEF=4;
-    public static final int EQEQ=17;
-
-    // delegates
-    // delegators
-
-
-        public SimpleCTP(TreeNodeStream input) {
-            this(input, new RecognizerSharedState());
-        }
-        public SimpleCTP(TreeNodeStream input, RecognizerSharedState state) {
-            super(input, state);
-             
-        }
-        
-
-    public String[] getTokenNames() { return SimpleCTP.tokenNames; }
-    public String getGrammarFileName() { return "SimpleCTP.g"; }
-
-
-
-    // $ANTLR start "program"
-    // SimpleCTP.g:8:1: program : ( declaration )+ ;
-    public final void program() throws RecognitionException {
-        try {
-            // SimpleCTP.g:9:5: ( ( declaration )+ )
-            // SimpleCTP.g:9:9: ( declaration )+
-            {
-            // SimpleCTP.g:9:9: ( declaration )+
-            int cnt1=0;
-            loop1:
-            do {
-                int alt1=2;
-                int LA1_0 = input.LA(1);
-
-                if ( (LA1_0==VAR_DEF||(LA1_0>=FUNC_DECL && LA1_0<=FUNC_DEF)) ) {
-                    alt1=1;
-                }
-
-
-                switch (alt1) {
-            	case 1 :
-            	    // SimpleCTP.g:9:9: declaration
-            	    {
-            	    pushFollow(FOLLOW_declaration_in_program43);
-            	    declaration();
-
-            	    state._fsp--;
-
-
-            	    }
-            	    break;
-
-            	default :
-            	    if ( cnt1 >= 1 ) break loop1;
-                        EarlyExitException eee =
-                            new EarlyExitException(1, input);
-                        throw eee;
-                }
-                cnt1++;
-            } while (true);
-
-
-            }
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return ;
-    }
-    // $ANTLR end "program"
-
-
-    // $ANTLR start "declaration"
-    // SimpleCTP.g:12:1: declaration : ( variable | ^( FUNC_DECL functionHeader ) | ^( FUNC_DEF functionHeader block ) );
-    public final void declaration() throws RecognitionException {
-        try {
-            // SimpleCTP.g:13:5: ( variable | ^( FUNC_DECL functionHeader ) | ^( FUNC_DEF functionHeader block ) )
-            int alt2=3;
-            switch ( input.LA(1) ) {
-            case VAR_DEF:
-                {
-                alt2=1;
-                }
-                break;
-            case FUNC_DECL:
-                {
-                alt2=2;
-                }
-                break;
-            case FUNC_DEF:
-                {
-                alt2=3;
-                }
-                break;
-            default:
-                NoViableAltException nvae =
-                    new NoViableAltException("", 2, 0, input);
-
-                throw nvae;
-            }
-
-            switch (alt2) {
-                case 1 :
-                    // SimpleCTP.g:13:9: variable
-                    {
-                    pushFollow(FOLLOW_variable_in_declaration63);
-                    variable();
-
-                    state._fsp--;
-
-
-                    }
-                    break;
-                case 2 :
-                    // SimpleCTP.g:14:9: ^( FUNC_DECL functionHeader )
-                    {
-                    match(input,FUNC_DECL,FOLLOW_FUNC_DECL_in_declaration74); 
-
-                    match(input, Token.DOWN, null); 
-                    pushFollow(FOLLOW_functionHeader_in_declaration76);
-                    functionHeader();
-
-                    state._fsp--;
-
-
-                    match(input, Token.UP, null); 
-
-                    }
-                    break;
-                case 3 :
-                    // SimpleCTP.g:15:9: ^( FUNC_DEF functionHeader block )
-                    {
-                    match(input,FUNC_DEF,FOLLOW_FUNC_DEF_in_declaration88); 
-
-                    match(input, Token.DOWN, null); 
-                    pushFollow(FOLLOW_functionHeader_in_declaration90);
-                    functionHeader();
-
-                    state._fsp--;
-
-                    pushFollow(FOLLOW_block_in_declaration92);
-                    block();
-
-                    state._fsp--;
-
-
-                    match(input, Token.UP, null); 
-
-                    }
-                    break;
-
-            }
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return ;
-    }
-    // $ANTLR end "declaration"
-
-
-    // $ANTLR start "variable"
-    // SimpleCTP.g:18:1: variable : ^( VAR_DEF type declarator ) ;
-    public final void variable() throws RecognitionException {
-        try {
-            // SimpleCTP.g:19:5: ( ^( VAR_DEF type declarator ) )
-            // SimpleCTP.g:19:9: ^( VAR_DEF type declarator )
-            {
-            match(input,VAR_DEF,FOLLOW_VAR_DEF_in_variable113); 
-
-            match(input, Token.DOWN, null); 
-            pushFollow(FOLLOW_type_in_variable115);
-            type();
-
-            state._fsp--;
-
-            pushFollow(FOLLOW_declarator_in_variable117);
-            declarator();
-
-            state._fsp--;
-
-
-            match(input, Token.UP, null); 
-
-            }
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return ;
-    }
-    // $ANTLR end "variable"
-
-
-    // $ANTLR start "declarator"
-    // SimpleCTP.g:22:1: declarator : ID ;
-    public final void declarator() throws RecognitionException {
-        try {
-            // SimpleCTP.g:23:5: ( ID )
-            // SimpleCTP.g:23:9: ID
-            {
-            match(input,ID,FOLLOW_ID_in_declarator137); 
-
-            }
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return ;
-    }
-    // $ANTLR end "declarator"
-
-
-    // $ANTLR start "functionHeader"
-    // SimpleCTP.g:26:1: functionHeader : ^( FUNC_HDR type ID ( formalParameter )+ ) ;
-    public final void functionHeader() throws RecognitionException {
-        try {
-            // SimpleCTP.g:27:5: ( ^( FUNC_HDR type ID ( formalParameter )+ ) )
-            // SimpleCTP.g:27:9: ^( FUNC_HDR type ID ( formalParameter )+ )
-            {
-            match(input,FUNC_HDR,FOLLOW_FUNC_HDR_in_functionHeader158); 
-
-            match(input, Token.DOWN, null); 
-            pushFollow(FOLLOW_type_in_functionHeader160);
-            type();
-
-            state._fsp--;
-
-            match(input,ID,FOLLOW_ID_in_functionHeader162); 
-            // SimpleCTP.g:27:28: ( formalParameter )+
-            int cnt3=0;
-            loop3:
-            do {
-                int alt3=2;
-                int LA3_0 = input.LA(1);
-
-                if ( (LA3_0==ARG_DEF) ) {
-                    alt3=1;
-                }
-
-
-                switch (alt3) {
-            	case 1 :
-            	    // SimpleCTP.g:27:28: formalParameter
-            	    {
-            	    pushFollow(FOLLOW_formalParameter_in_functionHeader164);
-            	    formalParameter();
-
-            	    state._fsp--;
-
-
-            	    }
-            	    break;
-
-            	default :
-            	    if ( cnt3 >= 1 ) break loop3;
-                        EarlyExitException eee =
-                            new EarlyExitException(3, input);
-                        throw eee;
-                }
-                cnt3++;
-            } while (true);
-
-
-            match(input, Token.UP, null); 
-
-            }
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return ;
-    }
-    // $ANTLR end "functionHeader"
-
-
-    // $ANTLR start "formalParameter"
-    // SimpleCTP.g:30:1: formalParameter : ^( ARG_DEF type declarator ) ;
-    public final void formalParameter() throws RecognitionException {
-        try {
-            // SimpleCTP.g:31:5: ( ^( ARG_DEF type declarator ) )
-            // SimpleCTP.g:31:9: ^( ARG_DEF type declarator )
-            {
-            match(input,ARG_DEF,FOLLOW_ARG_DEF_in_formalParameter186); 
-
-            match(input, Token.DOWN, null); 
-            pushFollow(FOLLOW_type_in_formalParameter188);
-            type();
-
-            state._fsp--;
-
-            pushFollow(FOLLOW_declarator_in_formalParameter190);
-            declarator();
-
-            state._fsp--;
-
-
-            match(input, Token.UP, null); 
-
-            }
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return ;
-    }
-    // $ANTLR end "formalParameter"
-
-
-    // $ANTLR start "type"
-    // SimpleCTP.g:34:1: type : ( 'int' | 'char' | 'void' | ID );
-    public final void type() throws RecognitionException {
-        try {
-            // SimpleCTP.g:35:5: ( 'int' | 'char' | 'void' | ID )
-            // SimpleCTP.g:
-            {
-            if ( input.LA(1)==ID||(input.LA(1)>=INT_TYPE && input.LA(1)<=VOID) ) {
-                input.consume();
-                state.errorRecovery=false;
-            }
-            else {
-                MismatchedSetException mse = new MismatchedSetException(null,input);
-                throw mse;
-            }
-
-
-            }
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return ;
-    }
-    // $ANTLR end "type"
-
-
-    // $ANTLR start "block"
-    // SimpleCTP.g:41:1: block : ^( BLOCK ( variable )* ( stat )* ) ;
-    public final void block() throws RecognitionException {
-        try {
-            // SimpleCTP.g:42:5: ( ^( BLOCK ( variable )* ( stat )* ) )
-            // SimpleCTP.g:42:9: ^( BLOCK ( variable )* ( stat )* )
-            {
-            match(input,BLOCK,FOLLOW_BLOCK_in_block273); 
-
-            if ( input.LA(1)==Token.DOWN ) {
-                match(input, Token.DOWN, null); 
-                // SimpleCTP.g:42:17: ( variable )*
-                loop4:
-                do {
-                    int alt4=2;
-                    int LA4_0 = input.LA(1);
-
-                    if ( (LA4_0==VAR_DEF) ) {
-                        alt4=1;
-                    }
-
-
-                    switch (alt4) {
-                	case 1 :
-                	    // SimpleCTP.g:42:17: variable
-                	    {
-                	    pushFollow(FOLLOW_variable_in_block275);
-                	    variable();
-
-                	    state._fsp--;
-
-
-                	    }
-                	    break;
-
-                	default :
-                	    break loop4;
-                    }
-                } while (true);
-
-                // SimpleCTP.g:42:27: ( stat )*
-                loop5:
-                do {
-                    int alt5=2;
-                    int LA5_0 = input.LA(1);
-
-                    if ( ((LA5_0>=BLOCK && LA5_0<=FOR)||(LA5_0>=EQEQ && LA5_0<=PLUS)) ) {
-                        alt5=1;
-                    }
-
-
-                    switch (alt5) {
-                	case 1 :
-                	    // SimpleCTP.g:42:27: stat
-                	    {
-                	    pushFollow(FOLLOW_stat_in_block278);
-                	    stat();
-
-                	    state._fsp--;
-
-
-                	    }
-                	    break;
-
-                	default :
-                	    break loop5;
-                    }
-                } while (true);
-
-
-                match(input, Token.UP, null); 
-            }
-
-            }
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return ;
-    }
-    // $ANTLR end "block"
-
-
-    // $ANTLR start "stat"
-    // SimpleCTP.g:45:1: stat : ( forStat | expr | block );
-    public final void stat() throws RecognitionException {
-        try {
-            // SimpleCTP.g:45:5: ( forStat | expr | block )
-            int alt6=3;
-            switch ( input.LA(1) ) {
-            case FOR:
-                {
-                alt6=1;
-                }
-                break;
-            case ID:
-            case EQ:
-            case INT:
-            case EQEQ:
-            case LT:
-            case PLUS:
-                {
-                alt6=2;
-                }
-                break;
-            case BLOCK:
-                {
-                alt6=3;
-                }
-                break;
-            default:
-                NoViableAltException nvae =
-                    new NoViableAltException("", 6, 0, input);
-
-                throw nvae;
-            }
-
-            switch (alt6) {
-                case 1 :
-                    // SimpleCTP.g:45:7: forStat
-                    {
-                    pushFollow(FOLLOW_forStat_in_stat292);
-                    forStat();
-
-                    state._fsp--;
-
-
-                    }
-                    break;
-                case 2 :
-                    // SimpleCTP.g:46:7: expr
-                    {
-                    pushFollow(FOLLOW_expr_in_stat300);
-                    expr();
-
-                    state._fsp--;
-
-
-                    }
-                    break;
-                case 3 :
-                    // SimpleCTP.g:47:7: block
-                    {
-                    pushFollow(FOLLOW_block_in_stat308);
-                    block();
-
-                    state._fsp--;
-
-
-                    }
-                    break;
-
-            }
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return ;
-    }
-    // $ANTLR end "stat"
-
-
-    // $ANTLR start "forStat"
-    // SimpleCTP.g:50:1: forStat : ^( 'for' expr expr expr block ) ;
-    public final void forStat() throws RecognitionException {
-        try {
-            // SimpleCTP.g:51:5: ( ^( 'for' expr expr expr block ) )
-            // SimpleCTP.g:51:9: ^( 'for' expr expr expr block )
-            {
-            match(input,FOR,FOLLOW_FOR_in_forStat328); 
-
-            match(input, Token.DOWN, null); 
-            pushFollow(FOLLOW_expr_in_forStat330);
-            expr();
-
-            state._fsp--;
-
-            pushFollow(FOLLOW_expr_in_forStat332);
-            expr();
-
-            state._fsp--;
-
-            pushFollow(FOLLOW_expr_in_forStat334);
-            expr();
-
-            state._fsp--;
-
-            pushFollow(FOLLOW_block_in_forStat336);
-            block();
-
-            state._fsp--;
-
-
-            match(input, Token.UP, null); 
-
-            }
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return ;
-    }
-    // $ANTLR end "forStat"
-
-    public static class expr_return extends TreeRuleReturnScope {
-    };
-
-    // $ANTLR start "expr"
-    // SimpleCTP.g:54:1: expr : ( ^( EQEQ expr expr ) | ^( LT expr expr ) | ^( PLUS expr expr ) | ^( EQ ID e= expr ) | atom );
-    public final SimpleCTP.expr_return expr() throws RecognitionException {
-        SimpleCTP.expr_return retval = new SimpleCTP.expr_return();
-        retval.start = input.LT(1);
-
-        ANTLRCommonTree ID1=null;
-        SimpleCTP.expr_return e = null;
-
-
-        try {
-            // SimpleCTP.g:54:5: ( ^( EQEQ expr expr ) | ^( LT expr expr ) | ^( PLUS expr expr ) | ^( EQ ID e= expr ) | atom )
-            int alt7=5;
-            switch ( input.LA(1) ) {
-            case EQEQ:
-                {
-                alt7=1;
-                }
-                break;
-            case LT:
-                {
-                alt7=2;
-                }
-                break;
-            case PLUS:
-                {
-                alt7=3;
-                }
-                break;
-            case EQ:
-                {
-                alt7=4;
-                }
-                break;
-            case ID:
-            case INT:
-                {
-                alt7=5;
-                }
-                break;
-            default:
-                NoViableAltException nvae =
-                    new NoViableAltException("", 7, 0, input);
-
-                throw nvae;
-            }
-
-            switch (alt7) {
-                case 1 :
-                    // SimpleCTP.g:54:9: ^( EQEQ expr expr )
-                    {
-                    match(input,EQEQ,FOLLOW_EQEQ_in_expr352); 
-
-                    match(input, Token.DOWN, null); 
-                    pushFollow(FOLLOW_expr_in_expr354);
-                    expr();
-
-                    state._fsp--;
-
-                    pushFollow(FOLLOW_expr_in_expr356);
-                    expr();
-
-                    state._fsp--;
-
-
-                    match(input, Token.UP, null); 
-
-                    }
-                    break;
-                case 2 :
-                    // SimpleCTP.g:55:9: ^( LT expr expr )
-                    {
-                    match(input,LT,FOLLOW_LT_in_expr368); 
-
-                    match(input, Token.DOWN, null); 
-                    pushFollow(FOLLOW_expr_in_expr370);
-                    expr();
-
-                    state._fsp--;
-
-                    pushFollow(FOLLOW_expr_in_expr372);
-                    expr();
-
-                    state._fsp--;
-
-
-                    match(input, Token.UP, null); 
-
-                    }
-                    break;
-                case 3 :
-                    // SimpleCTP.g:56:9: ^( PLUS expr expr )
-                    {
-                    match(input,PLUS,FOLLOW_PLUS_in_expr384); 
-
-                    match(input, Token.DOWN, null); 
-                    pushFollow(FOLLOW_expr_in_expr386);
-                    expr();
-
-                    state._fsp--;
-
-                    pushFollow(FOLLOW_expr_in_expr388);
-                    expr();
-
-                    state._fsp--;
-
-
-                    match(input, Token.UP, null); 
-
-                    }
-                    break;
-                case 4 :
-                    // SimpleCTP.g:57:9: ^( EQ ID e= expr )
-                    {
-                    match(input,EQ,FOLLOW_EQ_in_expr400); 
-
-                    match(input, Token.DOWN, null); 
-                    ID1=(ANTLRCommonTree)match(input,ID,FOLLOW_ID_in_expr402); 
-                    pushFollow(FOLLOW_expr_in_expr406);
-                    e=expr();
-
-                    state._fsp--;
-
-
-                    match(input, Token.UP, null); 
-                     NSLog(@"assigning %@ to variable %@", (e!=null?(input.getTokenStream().toString(
-                      input.getTreeAdaptor().getTokenStartIndex(e.start),
-                      input.getTreeAdaptor().getTokenStopIndex(e.start))):null), (ID1!=null?ID1.getText():null)); 
-
-                    }
-                    break;
-                case 5 :
-                    // SimpleCTP.g:58:9: atom
-                    {
-                    pushFollow(FOLLOW_atom_in_expr419);
-                    atom();
-
-                    state._fsp--;
-
-
-                    }
-                    break;
-
-            }
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return retval;
-    }
-    // $ANTLR end "expr"
-
-
-    // $ANTLR start "atom"
-    // SimpleCTP.g:61:1: atom : ( ID | INT );
-    public final void atom() throws RecognitionException {
-        try {
-            // SimpleCTP.g:62:5: ( ID | INT )
-            // SimpleCTP.g:
-            {
-            if ( input.LA(1)==ID||input.LA(1)==INT ) {
-                input.consume();
-                state.errorRecovery=false;
-            }
-            else {
-                MismatchedSetException mse = new MismatchedSetException(null,input);
-                throw mse;
-            }
-
-
-            }
-
-        }
-        catch (RecognitionException re) {
-            reportError(re);
-            recover(input,re);
-        }
-        finally {
-        }
-        return ;
-    }
-    // $ANTLR end "atom"
-
-    // Delegated rules
-
-
- 
-
-    public static final BitSet FOLLOW_declaration_in_program43 = new BitSet(new long[]{0x0000000000000192L});
-    public static final BitSet FOLLOW_variable_in_declaration63 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_FUNC_DECL_in_declaration74 = new BitSet(new long[]{0x0000000000000004L});
-    public static final BitSet FOLLOW_functionHeader_in_declaration76 = new BitSet(new long[]{0x0000000000000008L});
-    public static final BitSet FOLLOW_FUNC_DEF_in_declaration88 = new BitSet(new long[]{0x0000000000000004L});
-    public static final BitSet FOLLOW_functionHeader_in_declaration90 = new BitSet(new long[]{0x0000000000000200L});
-    public static final BitSet FOLLOW_block_in_declaration92 = new BitSet(new long[]{0x0000000000000008L});
-    public static final BitSet FOLLOW_VAR_DEF_in_variable113 = new BitSet(new long[]{0x0000000000000004L});
-    public static final BitSet FOLLOW_type_in_variable115 = new BitSet(new long[]{0x0000000000000400L});
-    public static final BitSet FOLLOW_declarator_in_variable117 = new BitSet(new long[]{0x0000000000000008L});
-    public static final BitSet FOLLOW_ID_in_declarator137 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_FUNC_HDR_in_functionHeader158 = new BitSet(new long[]{0x0000000000000004L});
-    public static final BitSet FOLLOW_type_in_functionHeader160 = new BitSet(new long[]{0x0000000000000400L});
-    public static final BitSet FOLLOW_ID_in_functionHeader162 = new BitSet(new long[]{0x0000000000000020L});
-    public static final BitSet FOLLOW_formalParameter_in_functionHeader164 = new BitSet(new long[]{0x0000000000000028L});
-    public static final BitSet FOLLOW_ARG_DEF_in_formalParameter186 = new BitSet(new long[]{0x0000000000000004L});
-    public static final BitSet FOLLOW_type_in_formalParameter188 = new BitSet(new long[]{0x0000000000000400L});
-    public static final BitSet FOLLOW_declarator_in_formalParameter190 = new BitSet(new long[]{0x0000000000000008L});
-    public static final BitSet FOLLOW_set_in_type0 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_BLOCK_in_block273 = new BitSet(new long[]{0x0000000000000004L});
-    public static final BitSet FOLLOW_variable_in_block275 = new BitSet(new long[]{0x00000000000E3E18L});
-    public static final BitSet FOLLOW_stat_in_block278 = new BitSet(new long[]{0x00000000000E3E08L});
-    public static final BitSet FOLLOW_forStat_in_stat292 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_expr_in_stat300 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_block_in_stat308 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_FOR_in_forStat328 = new BitSet(new long[]{0x0000000000000004L});
-    public static final BitSet FOLLOW_expr_in_forStat330 = new BitSet(new long[]{0x00000000000E1C00L});
-    public static final BitSet FOLLOW_expr_in_forStat332 = new BitSet(new long[]{0x00000000000E1C00L});
-    public static final BitSet FOLLOW_expr_in_forStat334 = new BitSet(new long[]{0x0000000000000200L});
-    public static final BitSet FOLLOW_block_in_forStat336 = new BitSet(new long[]{0x0000000000000008L});
-    public static final BitSet FOLLOW_EQEQ_in_expr352 = new BitSet(new long[]{0x0000000000000004L});
-    public static final BitSet FOLLOW_expr_in_expr354 = new BitSet(new long[]{0x00000000000E1C00L});
-    public static final BitSet FOLLOW_expr_in_expr356 = new BitSet(new long[]{0x0000000000000008L});
-    public static final BitSet FOLLOW_LT_in_expr368 = new BitSet(new long[]{0x0000000000000004L});
-    public static final BitSet FOLLOW_expr_in_expr370 = new BitSet(new long[]{0x00000000000E1C00L});
-    public static final BitSet FOLLOW_expr_in_expr372 = new BitSet(new long[]{0x0000000000000008L});
-    public static final BitSet FOLLOW_PLUS_in_expr384 = new BitSet(new long[]{0x0000000000000004L});
-    public static final BitSet FOLLOW_expr_in_expr386 = new BitSet(new long[]{0x00000000000E1C00L});
-    public static final BitSet FOLLOW_expr_in_expr388 = new BitSet(new long[]{0x0000000000000008L});
-    public static final BitSet FOLLOW_EQ_in_expr400 = new BitSet(new long[]{0x0000000000000004L});
-    public static final BitSet FOLLOW_ID_in_expr402 = new BitSet(new long[]{0x00000000000E1C00L});
-    public static final BitSet FOLLOW_expr_in_expr406 = new BitSet(new long[]{0x0000000000000008L});
-    public static final BitSet FOLLOW_atom_in_expr419 = new BitSet(new long[]{0x0000000000000002L});
-    public static final BitSet FOLLOW_set_in_atom0 = new BitSet(new long[]{0x0000000000000002L});
-
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.m b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.m
deleted file mode 100644
index a9100eb..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.m
+++ /dev/null
@@ -1,1222 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : SimpleCTP.g
- *     -                            On : 2011-05-06 15:09:28
- *     -           for the tree parser : SimpleCTPTreeParser
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} SimpleCTP.g 2011-05-06 15:09:28
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SimpleCTP.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_declaration_in_program56;
-static const unsigned long long FOLLOW_declaration_in_program56_data[] = { 0x00000000020000C2LL};
-static ANTLRBitSet *FOLLOW_variable_in_declaration76;
-static const unsigned long long FOLLOW_variable_in_declaration76_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_FUNC_DECL_in_declaration87;
-static const unsigned long long FOLLOW_FUNC_DECL_in_declaration87_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_functionHeader_in_declaration89;
-static const unsigned long long FOLLOW_functionHeader_in_declaration89_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_FUNC_DEF_in_declaration101;
-static const unsigned long long FOLLOW_FUNC_DEF_in_declaration101_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_functionHeader_in_declaration103;
-static const unsigned long long FOLLOW_functionHeader_in_declaration103_data[] = { 0x0000000000000020LL};
-static ANTLRBitSet *FOLLOW_block_in_declaration105;
-static const unsigned long long FOLLOW_block_in_declaration105_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_VAR_DEF_in_variable126;
-static const unsigned long long FOLLOW_VAR_DEF_in_variable126_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_type_in_variable128;
-static const unsigned long long FOLLOW_type_in_variable128_data[] = { 0x0000000000004000LL};
-static ANTLRBitSet *FOLLOW_declarator_in_variable130;
-static const unsigned long long FOLLOW_declarator_in_variable130_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_declarator150;
-static const unsigned long long FOLLOW_K_ID_in_declarator150_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_FUNC_HDR_in_functionHeader171;
-static const unsigned long long FOLLOW_FUNC_HDR_in_functionHeader171_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_type_in_functionHeader173;
-static const unsigned long long FOLLOW_type_in_functionHeader173_data[] = { 0x0000000000004000LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_functionHeader175;
-static const unsigned long long FOLLOW_K_ID_in_functionHeader175_data[] = { 0x0000000000000010LL};
-static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader177;
-static const unsigned long long FOLLOW_formalParameter_in_functionHeader177_data[] = { 0x0000000000000018LL};
-static ANTLRBitSet *FOLLOW_ARG_DEF_in_formalParameter199;
-static const unsigned long long FOLLOW_ARG_DEF_in_formalParameter199_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_type_in_formalParameter201;
-static const unsigned long long FOLLOW_type_in_formalParameter201_data[] = { 0x0000000000004000LL};
-static ANTLRBitSet *FOLLOW_declarator_in_formalParameter203;
-static const unsigned long long FOLLOW_declarator_in_formalParameter203_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_BLOCK_in_block283;
-static const unsigned long long FOLLOW_BLOCK_in_block283_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_variable_in_block285;
-static const unsigned long long FOLLOW_variable_in_block285_data[] = { 0x000000000218F828LL};
-static ANTLRBitSet *FOLLOW_stat_in_block288;
-static const unsigned long long FOLLOW_stat_in_block288_data[] = { 0x000000000018F828LL};
-static ANTLRBitSet *FOLLOW_forStat_in_stat302;
-static const unsigned long long FOLLOW_forStat_in_stat302_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_expr_in_stat310;
-static const unsigned long long FOLLOW_expr_in_stat310_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_block_in_stat318;
-static const unsigned long long FOLLOW_block_in_stat318_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_FOR_in_forStat338;
-static const unsigned long long FOLLOW_K_FOR_in_forStat338_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_expr_in_forStat340;
-static const unsigned long long FOLLOW_expr_in_forStat340_data[] = { 0x000000000018D800LL};
-static ANTLRBitSet *FOLLOW_expr_in_forStat342;
-static const unsigned long long FOLLOW_expr_in_forStat342_data[] = { 0x000000000018D800LL};
-static ANTLRBitSet *FOLLOW_expr_in_forStat344;
-static const unsigned long long FOLLOW_expr_in_forStat344_data[] = { 0x0000000000000020LL};
-static ANTLRBitSet *FOLLOW_block_in_forStat346;
-static const unsigned long long FOLLOW_block_in_forStat346_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_K_EQEQ_in_expr362;
-static const unsigned long long FOLLOW_K_EQEQ_in_expr362_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr364;
-static const unsigned long long FOLLOW_expr_in_expr364_data[] = { 0x000000000018D800LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr366;
-static const unsigned long long FOLLOW_expr_in_expr366_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_K_LT_in_expr378;
-static const unsigned long long FOLLOW_K_LT_in_expr378_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr380;
-static const unsigned long long FOLLOW_expr_in_expr380_data[] = { 0x000000000018D800LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr382;
-static const unsigned long long FOLLOW_expr_in_expr382_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_K_PLUS_in_expr394;
-static const unsigned long long FOLLOW_K_PLUS_in_expr394_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr396;
-static const unsigned long long FOLLOW_expr_in_expr396_data[] = { 0x000000000018D800LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr398;
-static const unsigned long long FOLLOW_expr_in_expr398_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_K_EQ_in_expr410;
-static const unsigned long long FOLLOW_K_EQ_in_expr410_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_expr412;
-static const unsigned long long FOLLOW_K_ID_in_expr412_data[] = { 0x000000000018D800LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr416;
-static const unsigned long long FOLLOW_expr_in_expr416_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_atom_in_expr429;
-static const unsigned long long FOLLOW_atom_in_expr429_data[] = { 0x0000000000000002LL};
-
-
-#pragma mark Dynamic Global Scopes
-@implementation Symbols_Scope  /* globalAttributeScopeImplementation */
-/* start of synthesize -- OBJC-Line 1750 */
-
-@synthesize tree;
-
-+ (Symbols_Scope *)newSymbols_Scope
-{
-    return [[[Symbols_Scope alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* start of iterate get and set functions */
-
-- (ANTLRCommonTree *)gettree { return( tree ); }
-
-- (void)settree:(ANTLRCommonTree *)aVal { tree = aVal; }
-
-/* End of iterate get and set functions */
-
-@end /* end of Symbols_Scope implementation */
-
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule Return Scopes start
-@implementation SimpleCTP_expr_return /* returnScope */
- /* start of synthesize -- OBJC-Line 1837 */
-+ (SimpleCTP_expr_return *)newSimpleCTP_expr_return
-{
-    return [[[SimpleCTP_expr_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-//#pragma mark Rule return scopes start
-//
-
-#pragma mark Rule return scopes start
-
-@implementation SimpleCTP  // line 637
-
-/* ObjC start of ruleAttributeScope */
-#pragma mark Dynamic Rule Scopes
-/* ObjC end of ruleAttributeScope */
-#pragma mark global Attribute Scopes
-/* ObjC start globalAttributeScope */
-static _stack;
-
-/* ObjC end globalAttributeScope */
-/* ObjC start actions.(actionScope).synthesize */
-/* ObjC end actions.(actionScope).synthesize */
-/* ObjC start synthesize() */
-/* ObjC end synthesize() */
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    FOLLOW_declaration_in_program56 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declaration_in_program56_data Count:(NSUInteger)1] retain];
-    FOLLOW_variable_in_declaration76 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_declaration76_data Count:(NSUInteger)1] retain];
-    FOLLOW_FUNC_DECL_in_declaration87 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_DECL_in_declaration87_data Count:(NSUInteger)1] retain];
-    FOLLOW_functionHeader_in_declaration89 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration89_data Count:(NSUInteger)1] retain];
-    FOLLOW_FUNC_DEF_in_declaration101 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_DEF_in_declaration101_data Count:(NSUInteger)1] retain];
-    FOLLOW_functionHeader_in_declaration103 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration103_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_declaration105 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_declaration105_data Count:(NSUInteger)1] retain];
-    FOLLOW_VAR_DEF_in_variable126 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_VAR_DEF_in_variable126_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_variable128 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_variable128_data Count:(NSUInteger)1] retain];
-    FOLLOW_declarator_in_variable130 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_variable130_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_declarator150 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_declarator150_data Count:(NSUInteger)1] retain];
-    FOLLOW_FUNC_HDR_in_functionHeader171 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_HDR_in_functionHeader171_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_functionHeader173 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_functionHeader173_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_functionHeader175 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_functionHeader175_data Count:(NSUInteger)1] retain];
-    FOLLOW_formalParameter_in_functionHeader177 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader177_data Count:(NSUInteger)1] retain];
-    FOLLOW_ARG_DEF_in_formalParameter199 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ARG_DEF_in_formalParameter199_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_formalParameter201 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_formalParameter201_data Count:(NSUInteger)1] retain];
-    FOLLOW_declarator_in_formalParameter203 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_formalParameter203_data Count:(NSUInteger)1] retain];
-    FOLLOW_BLOCK_in_block283 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_BLOCK_in_block283_data Count:(NSUInteger)1] retain];
-    FOLLOW_variable_in_block285 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_block285_data Count:(NSUInteger)1] retain];
-    FOLLOW_stat_in_block288 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block288_data Count:(NSUInteger)1] retain];
-    FOLLOW_forStat_in_stat302 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_forStat_in_stat302_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_stat310 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_stat310_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_stat318 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat318_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_FOR_in_forStat338 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_FOR_in_forStat338_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_forStat340 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat340_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_forStat342 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat342_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_forStat344 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat344_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_forStat346 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_forStat346_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_EQEQ_in_expr362 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQEQ_in_expr362_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr364 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr364_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr366 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr366_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_LT_in_expr378 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_LT_in_expr378_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr380 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr380_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr382 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr382_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_PLUS_in_expr394 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_PLUS_in_expr394_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr396 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr396_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr398 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr398_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_EQ_in_expr410 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQ_in_expr410_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_expr412 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_expr412_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr416 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr416_data Count:(NSUInteger)1] retain];
-    FOLLOW_atom_in_expr429 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_expr429_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"ARG_DEF", @"BLOCK", @"FUNC_DECL", @"FUNC_DEF", @"FUNC_HDR", @"K_CHAR", 
- @"K_COMMA", @"K_EQ", @"K_EQEQ", @"K_FOR", @"K_ID", @"K_INT", @"K_INT_TYPE", 
- @"K_LCURLY", @"K_LCURVE", @"K_LT", @"K_PLUS", @"K_RCURLY", @"K_RCURVE", 
- @"K_SEMICOLON", @"K_VOID", @"VAR_DEF", @"WS", nil] retain]];
-    [ANTLRBaseRecognizer setGrammarFileName:@"SimpleCTP.g"];
-}
-
-+ (SimpleCTP *)newSimpleCTP:(id<ANTLRTreeNodeStream>)aStream
-{
-    return [[SimpleCTP alloc] initWithStream:aStream];
-
-
-}
-
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)aStream
-{
-    self = [super initWithStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:12+1] retain]];
-    if ( self != nil ) {
-
-
-        /* globalAttributeScopeInit */
-        Symbols_scope = [Symbols_Scope newSymbols_Scope];
-        Symbols_stack = [ANTLRSymbolStack newANTLRSymbolStackWithLen:30];
-        /* start of actions-actionScope-init */
-        /* start of init */
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [Symbols_stack release];
-    [super dealloc];
-}
-
-/* ObjC start members */
-/* ObjC end members */
-/* ObjC start actions.(actionScope).methods */
-/* ObjC end actions.(actionScope).methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-/* ObjC start rules */
-/*
- * $ANTLR start program
- * SimpleCTP.g:13:1: program : ( declaration )+ ;
- */
-- (void) program
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleCTP.g:14:5: ( ( declaration )+ ) // ruleBlockSingleAlt
-        // SimpleCTP.g:14:9: ( declaration )+ // alt
-        {
-        // SimpleCTP.g:14:9: ( declaration )+ // positiveClosureBlock
-        NSInteger cnt1 = 0;
-        do {
-            NSInteger alt1 = 2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0 >= FUNC_DECL && LA1_0 <= FUNC_DEF)||LA1_0==VAR_DEF) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // SimpleCTP.g:14:9: declaration // alt
-                    {
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_declaration_in_program56];
-                    [self declaration];
-
-                    [self popFollow];
-
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt1 >= 1 )
-                        goto loop1;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:1];
-                    @throw eee;
-            }
-            cnt1++;
-        } while (YES);
-        loop1: ;
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end program */
-
-/*
- * $ANTLR start declaration
- * SimpleCTP.g:17:1: declaration : ( variable | ^( FUNC_DECL functionHeader ) | ^( FUNC_DEF functionHeader block ) );
- */
-- (void) declaration
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleCTP.g:18:5: ( variable | ^( FUNC_DECL functionHeader ) | ^( FUNC_DEF functionHeader block ) ) //ruleblock
-        NSInteger alt2=3;
-        unichar charLA2 = [input LA:1];
-        switch (charLA2) {
-            case VAR_DEF: ;
-                {
-                alt2=1;
-                }
-                break;
-            case FUNC_DECL: ;
-                {
-                alt2=2;
-                }
-                break;
-            case FUNC_DEF: ;
-                {
-                alt2=3;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:2 state:0 stream:input];
-            nvae.c = charLA2;
-            @throw nvae;
-
-        }
-
-        switch (alt2) {
-            case 1 : ;
-                // SimpleCTP.g:18:9: variable // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_variable_in_declaration76];
-                [self variable];
-
-                [self popFollow];
-
-
-
-                }
-                break;
-            case 2 : ;
-                // SimpleCTP.g:19:9: ^( FUNC_DECL functionHeader ) // alt
-                {
-                [self match:input TokenType:FUNC_DECL Follow:FOLLOW_FUNC_DECL_in_declaration87]; 
-
-                    [self match:input TokenType:DOWN Follow:nil]; 
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_functionHeader_in_declaration89];
-                    [self functionHeader];
-
-                    [self popFollow];
-
-
-
-                    [self match:input TokenType:UP Follow:nil]; 
-
-
-                }
-                break;
-            case 3 : ;
-                // SimpleCTP.g:20:9: ^( FUNC_DEF functionHeader block ) // alt
-                {
-                [self match:input TokenType:FUNC_DEF Follow:FOLLOW_FUNC_DEF_in_declaration101]; 
-
-                    [self match:input TokenType:DOWN Follow:nil]; 
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_functionHeader_in_declaration103];
-                    [self functionHeader];
-
-                    [self popFollow];
-
-
-
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_block_in_declaration105];
-                    [self block];
-
-                    [self popFollow];
-
-
-
-                    [self match:input TokenType:UP Follow:nil]; 
-
-
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end declaration */
-
-/*
- * $ANTLR start variable
- * SimpleCTP.g:23:1: variable : ^( VAR_DEF type declarator ) ;
- */
-- (void) variable
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleCTP.g:24:5: ( ^( VAR_DEF type declarator ) ) // ruleBlockSingleAlt
-        // SimpleCTP.g:24:9: ^( VAR_DEF type declarator ) // alt
-        {
-        [self match:input TokenType:VAR_DEF Follow:FOLLOW_VAR_DEF_in_variable126]; 
-
-            [self match:input TokenType:DOWN Follow:nil]; 
-            /* ruleRef */
-            [self pushFollow:FOLLOW_type_in_variable128];
-            [self type];
-
-            [self popFollow];
-
-
-
-            /* ruleRef */
-            [self pushFollow:FOLLOW_declarator_in_variable130];
-            [self declarator];
-
-            [self popFollow];
-
-
-
-            [self match:input TokenType:UP Follow:nil]; 
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end variable */
-
-/*
- * $ANTLR start declarator
- * SimpleCTP.g:27:1: declarator : K_ID ;
- */
-- (void) declarator
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleCTP.g:28:5: ( K_ID ) // ruleBlockSingleAlt
-        // SimpleCTP.g:28:9: K_ID // alt
-        {
-        [self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_declarator150]; 
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end declarator */
-
-/*
- * $ANTLR start functionHeader
- * SimpleCTP.g:31:1: functionHeader : ^( FUNC_HDR type K_ID ( formalParameter )+ ) ;
- */
-- (void) functionHeader
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleCTP.g:32:5: ( ^( FUNC_HDR type K_ID ( formalParameter )+ ) ) // ruleBlockSingleAlt
-        // SimpleCTP.g:32:9: ^( FUNC_HDR type K_ID ( formalParameter )+ ) // alt
-        {
-        [self match:input TokenType:FUNC_HDR Follow:FOLLOW_FUNC_HDR_in_functionHeader171]; 
-
-            [self match:input TokenType:DOWN Follow:nil]; 
-            /* ruleRef */
-            [self pushFollow:FOLLOW_type_in_functionHeader173];
-            [self type];
-
-            [self popFollow];
-
-
-
-            [self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_functionHeader175]; 
-
-            // SimpleCTP.g:32:30: ( formalParameter )+ // positiveClosureBlock
-            NSInteger cnt3 = 0;
-            do {
-                NSInteger alt3 = 2;
-                NSInteger LA3_0 = [input LA:1];
-                if ( (LA3_0==ARG_DEF) ) {
-                    alt3=1;
-                }
-
-
-                switch (alt3) {
-                    case 1 : ;
-                        // SimpleCTP.g:32:30: formalParameter // alt
-                        {
-                        /* ruleRef */
-                        [self pushFollow:FOLLOW_formalParameter_in_functionHeader177];
-                        [self formalParameter];
-
-                        [self popFollow];
-
-
-
-                        }
-                        break;
-
-                    default :
-                        if ( cnt3 >= 1 )
-                            goto loop3;
-                        ANTLREarlyExitException *eee =
-                            [ANTLREarlyExitException newException:input decisionNumber:3];
-                        @throw eee;
-                }
-                cnt3++;
-            } while (YES);
-            loop3: ;
-
-
-            [self match:input TokenType:UP Follow:nil]; 
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end functionHeader */
-
-/*
- * $ANTLR start formalParameter
- * SimpleCTP.g:35:1: formalParameter : ^( ARG_DEF type declarator ) ;
- */
-- (void) formalParameter
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleCTP.g:36:5: ( ^( ARG_DEF type declarator ) ) // ruleBlockSingleAlt
-        // SimpleCTP.g:36:9: ^( ARG_DEF type declarator ) // alt
-        {
-        [self match:input TokenType:ARG_DEF Follow:FOLLOW_ARG_DEF_in_formalParameter199]; 
-
-            [self match:input TokenType:DOWN Follow:nil]; 
-            /* ruleRef */
-            [self pushFollow:FOLLOW_type_in_formalParameter201];
-            [self type];
-
-            [self popFollow];
-
-
-
-            /* ruleRef */
-            [self pushFollow:FOLLOW_declarator_in_formalParameter203];
-            [self declarator];
-
-            [self popFollow];
-
-
-
-            [self match:input TokenType:UP Follow:nil]; 
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end formalParameter */
-
-/*
- * $ANTLR start type
- * SimpleCTP.g:39:1: type : ( K_INT_TYPE | K_CHAR | K_VOID | K_ID );
- */
-- (void) type
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleCTP.g:40:5: ( K_INT_TYPE | K_CHAR | K_VOID | K_ID ) // ruleBlockSingleAlt
-        // SimpleCTP.g: // alt
-        {
-        if ([input LA:1] == K_CHAR||[input LA:1] == K_ID||[input LA:1] == K_INT_TYPE||[input LA:1] == K_VOID) {
-            [input consume];
-            [state setIsErrorRecovery:NO];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            @throw mse;
-        }
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end type */
-
-/*
- * $ANTLR start block
- * SimpleCTP.g:46:1: block : ^( BLOCK ( variable )* ( stat )* ) ;
- */
-- (void) block
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleCTP.g:47:5: ( ^( BLOCK ( variable )* ( stat )* ) ) // ruleBlockSingleAlt
-        // SimpleCTP.g:47:9: ^( BLOCK ( variable )* ( stat )* ) // alt
-        {
-        [self match:input TokenType:BLOCK Follow:FOLLOW_BLOCK_in_block283]; 
-
-        if ( [input LA:1] == DOWN ) {
-            [self match:input TokenType:DOWN Follow:nil]; 
-            do {
-                NSInteger alt4=2;
-                NSInteger LA4_0 = [input LA:1];
-                if ( (LA4_0==VAR_DEF) ) {
-                    alt4=1;
-                }
-
-
-                switch (alt4) {
-                    case 1 : ;
-                        // SimpleCTP.g:47:17: variable // alt
-                        {
-                        /* ruleRef */
-                        [self pushFollow:FOLLOW_variable_in_block285];
-                        [self variable];
-
-                        [self popFollow];
-
-
-
-                        }
-                        break;
-
-                    default :
-                        goto loop4;
-                }
-            } while (YES);
-            loop4: ;
-
-
-            do {
-                NSInteger alt5=2;
-                NSInteger LA5_0 = [input LA:1];
-                if ( (LA5_0==BLOCK||(LA5_0 >= K_EQ && LA5_0 <= K_INT)||(LA5_0 >= K_LT && LA5_0 <= K_PLUS)) ) {
-                    alt5=1;
-                }
-
-
-                switch (alt5) {
-                    case 1 : ;
-                        // SimpleCTP.g:47:27: stat // alt
-                        {
-                        /* ruleRef */
-                        [self pushFollow:FOLLOW_stat_in_block288];
-                        [self stat];
-
-                        [self popFollow];
-
-
-
-                        }
-                        break;
-
-                    default :
-                        goto loop5;
-                }
-            } while (YES);
-            loop5: ;
-
-
-            [self match:input TokenType:UP Follow:nil]; 
-        }
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end block */
-
-/*
- * $ANTLR start stat
- * SimpleCTP.g:50:1: stat : ( forStat | expr | block );
- */
-- (void) stat
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleCTP.g:50:5: ( forStat | expr | block ) //ruleblock
-        NSInteger alt6=3;
-        unichar charLA6 = [input LA:1];
-        switch (charLA6) {
-            case K_FOR: ;
-                {
-                alt6=1;
-                }
-                break;
-            case K_EQ: ;
-            case K_EQEQ: ;
-            case K_ID: ;
-            case K_INT: ;
-            case K_LT: ;
-            case K_PLUS: ;
-                {
-                alt6=2;
-                }
-                break;
-            case BLOCK: ;
-                {
-                alt6=3;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:6 state:0 stream:input];
-            nvae.c = charLA6;
-            @throw nvae;
-
-        }
-
-        switch (alt6) {
-            case 1 : ;
-                // SimpleCTP.g:50:7: forStat // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_forStat_in_stat302];
-                [self forStat];
-
-                [self popFollow];
-
-
-
-                }
-                break;
-            case 2 : ;
-                // SimpleCTP.g:51:7: expr // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_expr_in_stat310];
-                [self expr];
-
-                [self popFollow];
-
-
-
-                }
-                break;
-            case 3 : ;
-                // SimpleCTP.g:52:7: block // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_block_in_stat318];
-                [self block];
-
-                [self popFollow];
-
-
-
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end stat */
-
-/*
- * $ANTLR start forStat
- * SimpleCTP.g:55:1: forStat : ^( K_FOR expr expr expr block ) ;
- */
-- (void) forStat
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleCTP.g:56:5: ( ^( K_FOR expr expr expr block ) ) // ruleBlockSingleAlt
-        // SimpleCTP.g:56:9: ^( K_FOR expr expr expr block ) // alt
-        {
-        [self match:input TokenType:K_FOR Follow:FOLLOW_K_FOR_in_forStat338]; 
-
-            [self match:input TokenType:DOWN Follow:nil]; 
-            /* ruleRef */
-            [self pushFollow:FOLLOW_expr_in_forStat340];
-            [self expr];
-
-            [self popFollow];
-
-
-
-            /* ruleRef */
-            [self pushFollow:FOLLOW_expr_in_forStat342];
-            [self expr];
-
-            [self popFollow];
-
-
-
-            /* ruleRef */
-            [self pushFollow:FOLLOW_expr_in_forStat344];
-            [self expr];
-
-            [self popFollow];
-
-
-
-            /* ruleRef */
-            [self pushFollow:FOLLOW_block_in_forStat346];
-            [self block];
-
-            [self popFollow];
-
-
-
-            [self match:input TokenType:UP Follow:nil]; 
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end forStat */
-
-/*
- * $ANTLR start expr
- * SimpleCTP.g:59:1: expr : ( ^( K_EQEQ expr expr ) | ^( K_LT expr expr ) | ^( K_PLUS expr expr ) | ^( K_EQ K_ID e= expr ) | atom );
- */
-- (SimpleCTP_expr_return *) expr
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    SimpleCTP_expr_return * retval = [SimpleCTP_expr_return newSimpleCTP_expr_return];
-    [retval setStart:[input LT:1]];
-
-
-    @try {
-        ANTLRCommonTree *K_ID1 = nil;
-         SimpleCTP_expr_return * e = nil ;
-         
-
-        // SimpleCTP.g:59:5: ( ^( K_EQEQ expr expr ) | ^( K_LT expr expr ) | ^( K_PLUS expr expr ) | ^( K_EQ K_ID e= expr ) | atom ) //ruleblock
-        NSInteger alt7=5;
-        unichar charLA7 = [input LA:1];
-        switch (charLA7) {
-            case K_EQEQ: ;
-                {
-                alt7=1;
-                }
-                break;
-            case K_LT: ;
-                {
-                alt7=2;
-                }
-                break;
-            case K_PLUS: ;
-                {
-                alt7=3;
-                }
-                break;
-            case K_EQ: ;
-                {
-                alt7=4;
-                }
-                break;
-            case K_ID: ;
-            case K_INT: ;
-                {
-                alt7=5;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:7 state:0 stream:input];
-            nvae.c = charLA7;
-            @throw nvae;
-
-        }
-
-        switch (alt7) {
-            case 1 : ;
-                // SimpleCTP.g:59:9: ^( K_EQEQ expr expr ) // alt
-                {
-                [self match:input TokenType:K_EQEQ Follow:FOLLOW_K_EQEQ_in_expr362]; 
-
-                    [self match:input TokenType:DOWN Follow:nil]; 
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_expr_in_expr364];
-                    [self expr];
-
-                    [self popFollow];
-
-
-
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_expr_in_expr366];
-                    [self expr];
-
-                    [self popFollow];
-
-
-
-                    [self match:input TokenType:UP Follow:nil]; 
-
-
-                }
-                break;
-            case 2 : ;
-                // SimpleCTP.g:60:9: ^( K_LT expr expr ) // alt
-                {
-                [self match:input TokenType:K_LT Follow:FOLLOW_K_LT_in_expr378]; 
-
-                    [self match:input TokenType:DOWN Follow:nil]; 
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_expr_in_expr380];
-                    [self expr];
-
-                    [self popFollow];
-
-
-
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_expr_in_expr382];
-                    [self expr];
-
-                    [self popFollow];
-
-
-
-                    [self match:input TokenType:UP Follow:nil]; 
-
-
-                }
-                break;
-            case 3 : ;
-                // SimpleCTP.g:61:9: ^( K_PLUS expr expr ) // alt
-                {
-                [self match:input TokenType:K_PLUS Follow:FOLLOW_K_PLUS_in_expr394]; 
-
-                    [self match:input TokenType:DOWN Follow:nil]; 
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_expr_in_expr396];
-                    [self expr];
-
-                    [self popFollow];
-
-
-
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_expr_in_expr398];
-                    [self expr];
-
-                    [self popFollow];
-
-
-
-                    [self match:input TokenType:UP Follow:nil]; 
-
-
-                }
-                break;
-            case 4 : ;
-                // SimpleCTP.g:62:9: ^( K_EQ K_ID e= expr ) // alt
-                {
-                [self match:input TokenType:K_EQ Follow:FOLLOW_K_EQ_in_expr410]; 
-
-                    [self match:input TokenType:DOWN Follow:nil]; 
-                    K_ID1=(ANTLRCommonTree *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_expr412]; 
-
-                    /* ruleRef */
-                    [self pushFollow:FOLLOW_expr_in_expr416];
-                    e = [self expr];
-
-                    [self popFollow];
-
-
-
-                    [self match:input TokenType:UP Follow:nil]; 
-
-
-                 NSLog(@"assigning %@ to variable %@", (e!=nil?[[input getTokenStream] toStringFromStart:[[input getTreeAdaptor] getTokenStartIndex:[e getStart]]ToEnd:[[input getTreeAdaptor] getTokenStopIndex:[e getStart]]]:0), (K_ID1!=nil?K_ID1.text:nil)); 
-
-
-                }
-                break;
-            case 5 : ;
-                // SimpleCTP.g:63:9: atom // alt
-                {
-                /* ruleRef */
-                [self pushFollow:FOLLOW_atom_in_expr429];
-                [self atom];
-
-                [self popFollow];
-
-
-
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end expr */
-
-/*
- * $ANTLR start atom
- * SimpleCTP.g:66:1: atom : ( K_ID | K_INT );
- */
-- (void) atom
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // SimpleCTP.g:67:5: ( K_ID | K_INT ) // ruleBlockSingleAlt
-        // SimpleCTP.g: // alt
-        {
-        if ((([input LA:1] >= K_ID) && ([input LA:1] <= K_INT))) {
-            [input consume];
-            [state setIsErrorRecovery:NO];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            @throw mse;
-        }
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end atom */
-/* ObjC end rules */
-
-@end /* end of SimpleCTP implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/antlr3.h b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/antlr3.h
deleted file mode 100644
index 4f16279..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/antlr3.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#import <ANTLR/ANTLRBaseMapElement.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRBaseStack.h>
-#import <ANTLR/ANTLRBaseTree.h>
-#import <ANTLR/ANTLRBaseTreeAdaptor.h>
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBufferedTokenStream.h>
-#import <ANTLR/ANTLRBufferedTreeNodeStream.h>
-#import <ANTLR/ANTLRCharStream.h>
-#import <ANTLR/ANTLRCharStreamState.h>
-#import <ANTLR/ANTLRCommonErrorNode.h>
-#import <ANTLR/ANTLRCommonToken.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRDebug.h>
-#import <ANTLR/ANTLRDebugEventProxy.h>
-#import <ANTLR/ANTLRDebugEventListener.h>
-#import <ANTLR/ANTLRDebugParser.h>
-#import <ANTLR/ANTLRDebugTokenStream.h>
-#import <ANTLR/ANTLRDebugTreeAdaptor.h>
-#import <ANTLR/ANTLRDebugTreeNodeStream.h>
-#import <ANTLR/ANTLRDebugTreeParser.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRError.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRFastQueue.h>
-#import <ANTLR/ANTLRHashMap.h>
-#import <ANTLR/ANTLRHashRule.h>
-#import <ANTLR/ANTLRIntArray.h>
-#import <ANTLR/ANTLRIntStream.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRLexerRuleReturnScope.h>
-#import <ANTLR/ANTLRLinkBase.h>
-#import <ANTLR/ANTLRLookaheadStream.h>
-#import <ANTLR/ANTLRMapElement.h>
-#import <ANTLR/ANTLRMap.h>
-#import <ANTLR/ANTLRMismatchedNotSetException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRMissingTokenException.h>
-#import <ANTLR/ANTLRNodeMapElement.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRPtrBuffer.h>
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLRRecognizerSharedState.h>
-#import <ANTLR/ANTLRRewriteRuleElementStream.h>
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
-#import <ANTLR/ANTLRRuleMemo.h>
-#import <ANTLR/ANTLRRuleStack.h>
-#import <ANTLR/ANTLRRuleReturnScope.h>
-#import <ANTLR/ANTLRRuntimeException.h>
-#import <ANTLR/ANTLRStreamEnumerator.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRSymbolStack.h>
-#import <ANTLR/ANTLRToken+DebuggerSupport.h>
-#import <ANTLR/ANTLRToken.h>
-#import <ANTLR/ANTLRTokenRewriteStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRTokenStream.h>
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeException.h>
-#import <ANTLR/ANTLRTreeIterator.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-#import <ANTLR/ANTLRUnbufferedTokenStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-#import <ANTLR/ANTLRUniqueIDMap.h>
-#import <ANTLR/ANTLRUnwantedTokenException.h>
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/main.m b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/main.m
deleted file mode 100644
index f735ac3..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/main.m
+++ /dev/null
@@ -1,84 +0,0 @@
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-#import "SimpleCLexer.h"
-#import "SimpleCParser.h"
-#import "SimpleCWalker.h"
-#import "stdio.h"
-#include <unistd.h>
-
-int main(int argc, const char * argv[]) {
-    NSError *anError;
-	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
-    char *inp = "/Users/acondit/source/antlr3/acondit_localhost/code/antlr/antlr3-main/runtime/ObjC/Framework/examples/simplecTreeParser/input";
-    
-/*
-    if (argc < 2) {
-        NSLog(@"provide the input file, please");
-        return 1;
-    }
- */
-	
-	// simply read in the input file in one gulp
-	NSString *string = [NSString stringWithContentsOfFile:[NSString stringWithCString:inp encoding:NSASCIIStringEncoding] encoding:NSASCIIStringEncoding error:&anError];
-	NSLog(@"input is : %@", string);
-
-	// create a stream over the input, so the lexer can seek back and forth, but don't copy the string,
-	// as we make sure it will not go away.
-	// If the string would be coming from a volatile source, say a text field, we could opt to copy the string.
-	// That way we could do the parsing in a different thread, and still let the user edit the original string.
-	// But here we do it the simple way.
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:string];
-	
-	// Actually create the lexer feeding of the character stream.
-	SimpleCLexer *lexer = [SimpleCLexer newSimpleCLexerWithCharStream:stream];
-	
-	// For fun, you could print all tokens the lexer recognized, but we can only do it once. After that
-	// we would need to reset the lexer, and lex again.
-//    id<ANTLRToken> currentToken;
-//    while ((currentToken = [lexer nextToken]) && [currentToken type] != ANTLRTokenTypeEOF) {
-//        NSLog(@"%@", currentToken);
-//    }
-//	  [lexer reset];
-	
-	// Since the parser needs to scan back and forth over the tokens, we put them into a stream, too.
-	ANTLRCommonTokenStream *tokenStream = [ANTLRCommonTokenStream newANTLRCommonTokenStreamWithTokenSource:lexer];
-
-	// Construct a parser and feed it the token stream.
-	SimpleCParser *parser = [[SimpleCParser alloc] initWithTokenStream:tokenStream];
-	
-	// We start the parsing process by calling a parser rule. In theory you can call any parser rule here,
-	// but it obviously has to match the input token stream. Otherwise parsing would fail.
-	// Also watch out for internal dependencies in your grammar (e.g. you use a symbol table that's only
-	// initialized when you call a specific parser rule).
-	// This is a simple example, so we just call the top-most rule 'program'.
-	// Since we want to parse the AST the parser builds, we just ask the returned object for that.
-	ANTLRCommonTree *program_tree = [[parser program] getTree];
-
-    NSLog(@"Reached end of first parse\n");
-	// Print the matched tree as a Lisp-style string
-	NSLog(@"tree: %@", [program_tree treeDescription]);
-	
-	// Create a new tree node stream that's feeding off of the root node (thus seeing the whole tree)
-	ANTLRCommonTreeNodeStream *treeStream = [ANTLRCommonTreeNodeStream newANTLRCommonTreeNodeStream:program_tree];
-	// tell the TreeNodeStream where the tokens originally came from, so we can retrieve arbitrary tokens and their text.
-	[treeStream setTokenStream:tokenStream];
-	
-	// Create the treeparser instance, passing it the stream of nodes
-	SimpleCWalker *walker = [[SimpleCWalker alloc] initWithStream:treeStream];
-	// As with parsers, you can invoke any treeparser rule here.
-	[walker program];
-
-	// Whew, done. Release everything that we are responsible for.
-	[lexer release];
-	[stream release];
-	[tokenStream release];
-	[parser release];
-	[treeStream release];
-	[walker release];
-
-	[pool release];
-
-    // use this for ObjectAlloc on Tiger
-    //while(1) sleep(5);
-	return 0;
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleC.tokens b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleC.tokens
deleted file mode 100644
index 6d06db9..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleC.tokens
+++ /dev/null
@@ -1,29 +0,0 @@
-K_ID=10
-T__26=26
-T__25=25
-T__24=24
-T__23=23
-K_EQEQ=16
-T__22=22
-K_INT=11
-T__21=21
-K_FOR=14
-FUNC_HDR=6
-FUNC_DEF=8
-K_INT_TYPE=19
-FUNC_DECL=7
-ARG_DEF=5
-WS=20
-K_EQ=15
-BLOCK=9
-K_LT=17
-K_CHAR=12
-K_VOID=13
-VAR_DEF=4
-K_PLUS=18
-';'=21
-'}'=26
-'('=22
-','=23
-')'=24
-'{'=25
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCLexer.h
deleted file mode 100644
index 6972a44..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCLexer.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// $ANTLR 3.2 Aug 23, 2010 07:48:06 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g 2010-08-23 07:54:47
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-#pragma mark Cyclic DFA interface start DFA4
-@interface DFA4 : ANTLRDFA {
-}
-+ newDFA4WithRecognizer:(ANTLRBaseRecognizer *)theRecognizer;
-- initWithRecognizer:(ANTLRBaseRecognizer *)recognizer;
-@end /* end of DFA4 interface  */
-
-#pragma mark Cyclic DFA interface end DFA4
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#define K_ID 10
-#define T__26 26
-#define T__25 25
-#define T__24 24
-#define T__23 23
-#define K_EQEQ 16
-#define T__22 22
-#define K_INT 11
-#define T__21 21
-#define K_FOR 14
-#define FUNC_HDR 6
-#define FUNC_DEF 8
-#define EOF -1
-#define K_INT_TYPE 19
-#define FUNC_DECL 7
-#define ARG_DEF 5
-#define WS 20
-#define K_EQ 15
-#define BLOCK 9
-#define K_LT 17
-#define K_CHAR 12
-#define K_VOID 13
-#define VAR_DEF 4
-#define K_PLUS 18
-@interface SimpleCLexer : ANTLRLexer { // line 283
-DFA4 *dfa4;
-// start of actions.lexer.memVars
-// start of action-actionScope-memVars
-}
-+ (SimpleCLexer *)newSimpleCLexer:(id<ANTLRCharStream>)anInput;
-
-- (void)mT__21; 
-- (void)mT__22; 
-- (void)mT__23; 
-- (void)mT__24; 
-- (void)mT__25; 
-- (void)mT__26; 
-- (void)mK_FOR; 
-- (void)mK_INT_TYPE; 
-- (void)mK_CHAR; 
-- (void)mK_VOID; 
-- (void)mK_ID; 
-- (void)mK_INT; 
-- (void)mK_EQ; 
-- (void)mK_EQEQ; 
-- (void)mK_LT; 
-- (void)mK_PLUS; 
-- (void)mWS; 
-- (void)mTokens; 
-
-@end /* end of SimpleCLexer interface */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCLexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCLexer.m
deleted file mode 100644
index f8d4cfe..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCLexer.m
+++ /dev/null
@@ -1,985 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version 3.2 Aug 23, 2010 07:48:06
- *
- *     -  From the grammar source file : /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g
- *     -                            On : 2010-08-23 07:54:47
- *     -                 for the lexer : SimpleCLexerLexer *
- * Editing it, at least manually, is not wise. 
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// $ANTLR 3.2 Aug 23, 2010 07:48:06 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g 2010-08-23 07:54:47
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SimpleCLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-#pragma mark Cyclic DFA implementation start DFA4
-@implementation DFA4
-const static NSInteger dfa4_eot[33] =
-    {-1,-1,-1,-1,-1,-1,-1,11,11,11,11,-1,-1,22,-1,-1,-1,11,11,11,11,-1,-1,
-     27,28,11,11,-1,-1,31,32,-1,-1};
-const static NSInteger dfa4_eof[33] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-     -1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static unichar dfa4_min[33] =
-    {9,0,0,0,0,0,0,111,110,104,111,0,0,61,0,0,0,114,116,97,105,0,0,48,48,
-     114,100,0,0,48,48,0,0};
-const static unichar dfa4_max[33] =
-    {125,0,0,0,0,0,0,111,110,104,111,0,0,61,0,0,0,114,116,97,105,0,0,122,
-     122,114,100,0,0,122,122,0,0};
-const static NSInteger dfa4_accept[33] =
-    {-1,1,2,3,4,5,6,-1,-1,-1,-1,11,12,-1,15,16,17,-1,-1,-1,-1,14,13,-1,-1,
-     -1,-1,7,8,-1,-1,9,10};
-const static NSInteger dfa4_special[33] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-     -1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static NSInteger dfa4_transition[] = {};
-const static NSInteger dfa4_transition0[] = {24};
-const static NSInteger dfa4_transition1[] = {26};
-const static NSInteger dfa4_transition2[] = {29};
-const static NSInteger dfa4_transition3[] = {20};
-const static NSInteger dfa4_transition4[] = {25};
-const static NSInteger dfa4_transition5[] = {16, 16, -1, -1, 16, -1, -1, 
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 16, -1, 
- -1, -1, -1, -1, -1, -1, 2, 4, -1, 15, 3, -1, -1, -1, 12, 12, 12, 12, 12, 
- 12, 12, 12, 12, 12, -1, 1, 14, 13, -1, -1, -1, 11, 11, 11, 11, 11, 11, 
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 
- 11, 11, -1, -1, -1, -1, 11, -1, 11, 11, 9, 11, 11, 7, 11, 11, 8, 11, 11, 
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 11, 11, 11, 11, 5, -1, 6};
-const static NSInteger dfa4_transition6[] = {21};
-const static NSInteger dfa4_transition7[] = {11, 11, 11, 11, 11, 11, 11, 
- 11, 11, 11, -1, -1, -1, -1, -1, -1, -1, 11, 11, 11, 11, 11, 11, 11, 11, 
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 
- -1, -1, -1, -1, 11, -1, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11};
-const static NSInteger dfa4_transition8[] = {18};
-const static NSInteger dfa4_transition9[] = {19};
-const static NSInteger dfa4_transition10[] = {23};
-const static NSInteger dfa4_transition11[] = {17};
-const static NSInteger dfa4_transition12[] = {30};
-
-
-+ () newDFA4WithRecognizer:(ANTLRBaseRecognizer *)aRecognizer
-{
-    return [[[DFA4 alloc] initWithRecognizer:aRecognizer] retain];
-}
-
-- (id) initWithRecognizer:(ANTLRBaseRecognizer *) theRecognizer
-{
-    if ((self = [super initWithRecognizer:theRecognizer]) != nil) {
-        decisionNumber = 4;
-        eot = dfa4_eot;
-        eof = dfa4_eof;
-        min = dfa4_min;
-        max = dfa4_max;
-        accept = dfa4_accept;
-        special = dfa4_special;
-        if (!(transition = calloc(33, sizeof(void*)))) {
-            [self release];
-            return nil;
-        }
-        len = 33;
-        transition[0] = dfa4_transition5;
-        transition[1] = dfa4_transition;
-        transition[2] = dfa4_transition;
-        transition[3] = dfa4_transition;
-        transition[4] = dfa4_transition;
-        transition[5] = dfa4_transition;
-        transition[6] = dfa4_transition;
-        transition[7] = dfa4_transition11;
-        transition[8] = dfa4_transition8;
-        transition[9] = dfa4_transition9;
-        transition[10] = dfa4_transition3;
-        transition[11] = dfa4_transition;
-        transition[12] = dfa4_transition;
-        transition[13] = dfa4_transition6;
-        transition[14] = dfa4_transition;
-        transition[15] = dfa4_transition;
-        transition[16] = dfa4_transition;
-        transition[17] = dfa4_transition10;
-        transition[18] = dfa4_transition0;
-        transition[19] = dfa4_transition4;
-        transition[20] = dfa4_transition1;
-        transition[21] = dfa4_transition;
-        transition[22] = dfa4_transition;
-        transition[23] = dfa4_transition7;
-        transition[24] = dfa4_transition7;
-        transition[25] = dfa4_transition2;
-        transition[26] = dfa4_transition12;
-        transition[27] = dfa4_transition;
-        transition[28] = dfa4_transition;
-        transition[29] = dfa4_transition7;
-        transition[30] = dfa4_transition7;
-        transition[31] = dfa4_transition;
-        transition[32] = dfa4_transition;
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    free(transition);
-    [super dealloc];
-}
-
-- (NSString *) description
-{
-    return @"1:1: Tokens : ( T__21 | T__22 | T__23 | T__24 | T__25 | T__26 | K_FOR | K_INT_TYPE | K_CHAR | K_VOID | K_ID | K_INT | K_EQ | K_EQEQ | K_LT | K_PLUS | WS );";
-}
-
-
-@end /* end DFA4 implementation */
-
-#pragma mark Cyclic DFA implementation end DFA4
-
-
-
-/** As per Terence: No returns for lexer rules!
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-*/
-@implementation SimpleCLexer // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"/usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (SimpleCLexer *)newSimpleCLexer:(id<ANTLRCharStream>)anInput
-{
-    return [[SimpleCLexer alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    if ((self = [super initWithCharStream:anInput State:[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:18+1]]) != nil) {
-
-        dfa4 = [DFA4 newDFA4WithRecognizer:self];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [dfa4 release];
-    [super dealloc];
-}
-
-/* Start of actions.lexer.methods */
-/* start methods() */
-
-/* Start of Rules */
-// $ANTLR start "T__21"
-- (void) mT__21
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = T__21;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:7:7: ( ';' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:7:9: ';' // alt
-        {
-        [self matchChar:';']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__21"
-
-// $ANTLR start "T__22"
-- (void) mT__22
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = T__22;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:8:7: ( '(' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:8:9: '(' // alt
-        {
-        [self matchChar:'(']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__22"
-
-// $ANTLR start "T__23"
-- (void) mT__23
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = T__23;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:9:7: ( ',' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:9:9: ',' // alt
-        {
-        [self matchChar:',']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__23"
-
-// $ANTLR start "T__24"
-- (void) mT__24
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = T__24;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:10:7: ( ')' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:10:9: ')' // alt
-        {
-        [self matchChar:')']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__24"
-
-// $ANTLR start "T__25"
-- (void) mT__25
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = T__25;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:11:7: ( '{' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:11:9: '{' // alt
-        {
-        [self matchChar:'{']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__25"
-
-// $ANTLR start "T__26"
-- (void) mT__26
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = T__26;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:12:7: ( '}' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:12:9: '}' // alt
-        {
-        [self matchChar:'}']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "T__26"
-
-// $ANTLR start "K_FOR"
-- (void) mK_FOR
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = K_FOR;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:91:7: ( 'for' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:91:9: 'for' // alt
-        {
-        [self matchString:@"for"]; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "K_FOR"
-
-// $ANTLR start "K_INT_TYPE"
-- (void) mK_INT_TYPE
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = K_INT_TYPE;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:92:12: ( 'int' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:92:14: 'int' // alt
-        {
-        [self matchString:@"int"]; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "K_INT_TYPE"
-
-// $ANTLR start "K_CHAR"
-- (void) mK_CHAR
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = K_CHAR;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:93:7: ( 'char' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:93:9: 'char' // alt
-        {
-        [self matchString:@"char"]; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "K_CHAR"
-
-// $ANTLR start "K_VOID"
-- (void) mK_VOID
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = K_VOID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:94:7: ( 'void' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:94:9: 'void' // alt
-        {
-        [self matchString:@"void"]; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "K_VOID"
-
-// $ANTLR start "K_ID"
-- (void) mK_ID
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = K_ID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:96:7: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:96:11: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* // alt
-        {
-        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-            [input consume];
-
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-            [self recover:mse];
-            @throw mse;}
-          /* element() */
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0>='0' && LA1_0<='9')||(LA1_0>='A' && LA1_0<='Z')||LA1_0=='_'||(LA1_0>='a' && LA1_0<='z')) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-                        [input consume];
-
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;}
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop1;
-            }
-        } while (YES);
-        loop1: ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "K_ID"
-
-// $ANTLR start "K_INT"
-- (void) mK_INT
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = K_INT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        id<ANTLRToken> *int=nil;
-        NSMutableArray *list_int=nil; 
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:99:7: ( (int+= ( '0' .. '9' ) )+ ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:99:9: (int+= ( '0' .. '9' ) )+ // alt
-        {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:99:12: (int+= ( '0' .. '9' ) )+ // positiveClosureBlock
-        NSInteger cnt2=0;
-        do {
-            NSInteger alt2=2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( ((LA2_0>='0' && LA2_0<='9')) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:99:12: int+= ( '0' .. '9' ) // alt
-                    {
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:99:14: ( '0' .. '9' ) // blockSingleAlt
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:99:15: '0' .. '9' // alt
-                    {
-                    [self matchRangeFromChar:'0' to:'9'];   /* element() */
-                     /* elements */
-                    }
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt2 >= 1 )
-                        goto loop2;
-                    ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:2];
-                    @throw eee;
-            }
-            cnt2++;
-        } while (YES);
-        loop2: ;
-          /* element() */
-        NSLog(@"%@", list_int);  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-        [list_int release];
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "K_INT"
-
-// $ANTLR start "K_EQ"
-- (void) mK_EQ
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = K_EQ;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:102:8: ( '=' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:102:10: '=' // alt
-        {
-        [self matchChar:'=']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "K_EQ"
-
-// $ANTLR start "K_EQEQ"
-- (void) mK_EQEQ
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = K_EQEQ;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:103:8: ( '==' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:103:10: '==' // alt
-        {
-        [self matchString:@"=="]; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "K_EQEQ"
-
-// $ANTLR start "K_LT"
-- (void) mK_LT
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = K_LT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:104:8: ( '<' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:104:10: '<' // alt
-        {
-        [self matchChar:'<']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "K_LT"
-
-// $ANTLR start "K_PLUS"
-- (void) mK_PLUS
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = K_PLUS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:105:8: ( '+' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:105:10: '+' // alt
-        {
-        [self matchChar:'+']; 
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "K_PLUS"
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    /* ruleScopeSetUp */
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:107:5: ( ( ' ' | '\\t' | '\\r' | '\\n' )+ ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:107:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // alt
-        {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:107:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // positiveClosureBlock
-        NSInteger cnt3=0;
-        do {
-            NSInteger alt3=2;
-            NSInteger LA3_0 = [input LA:1];
-            if ( ((LA3_0>='\t' && LA3_0<='\n')||LA3_0=='\r'||LA3_0==' ') ) {
-                alt3=1;
-            }
-
-
-            switch (alt3) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g: // alt
-                    {
-                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == '\r'||[input LA:1] == ' ') {
-                        [input consume];
-
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;}
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt3 >= 1 )
-                        goto loop3;
-                    ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:3];
-                    @throw eee;
-            }
-            cnt3++;
-        } while (YES);
-        loop3: ;
-          /* element() */
-         state.channel=99;   /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "WS"
-
-- (void) mTokens
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:8: ( T__21 | T__22 | T__23 | T__24 | T__25 | T__26 | K_FOR | K_INT_TYPE | K_CHAR | K_VOID | K_ID | K_INT | K_EQ | K_EQEQ | K_LT | K_PLUS | WS ) //ruleblock
-    NSInteger alt4=17;
-    alt4 = [dfa4 predict:input];
-    switch (alt4) {
-        case 1 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:10: T__21 // alt
-            {
-                [self mT__21]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 2 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:16: T__22 // alt
-            {
-                [self mT__22]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 3 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:22: T__23 // alt
-            {
-                [self mT__23]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 4 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:28: T__24 // alt
-            {
-                [self mT__24]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 5 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:34: T__25 // alt
-            {
-                [self mT__25]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 6 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:40: T__26 // alt
-            {
-                [self mT__26]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 7 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:46: K_FOR // alt
-            {
-                [self mK_FOR]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 8 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:52: K_INT_TYPE // alt
-            {
-                [self mK_INT_TYPE]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 9 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:63: K_CHAR // alt
-            {
-                [self mK_CHAR]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 10 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:70: K_VOID // alt
-            {
-                [self mK_VOID]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 11 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:77: K_ID // alt
-            {
-                [self mK_ID]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 12 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:82: K_INT // alt
-            {
-                [self mK_INT]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 13 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:88: K_EQ // alt
-            {
-                [self mK_EQ]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 14 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:93: K_EQEQ // alt
-            {
-                [self mK_EQEQ]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 15 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:100: K_LT // alt
-            {
-                [self mK_LT]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 16 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:105: K_PLUS // alt
-            {
-                [self mK_PLUS]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 17 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:1:112: WS // alt
-            {
-                [self mWS]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-
-    }
-
-}
-
-@end /* end of SimpleCLexer implementation line 397 */
-
-/* End of code
- * =============================================================================
- */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCParser.h b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCParser.h
deleted file mode 100644
index 6d4f180..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCParser.h
+++ /dev/null
@@ -1,300 +0,0 @@
-// $ANTLR 3.2 Aug 23, 2010 07:48:06 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g 2010-08-23 07:54:46
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-#pragma mark Cyclic DFA interface start DFA2
-@interface DFA2 : ANTLRDFA {
-}
-+ newDFA2WithRecognizer:(ANTLRBaseRecognizer *)theRecognizer;
-- initWithRecognizer:(ANTLRBaseRecognizer *)recognizer;
-@end /* end of DFA2 interface  */
-
-#pragma mark Cyclic DFA interface end DFA2
-#pragma mark Tokens
-#define K_ID 10
-#define T__26 26
-#define T__25 25
-#define T__24 24
-#define T__23 23
-#define K_EQEQ 16
-#define T__22 22
-#define K_INT 11
-#define T__21 21
-#define K_FOR 14
-#define FUNC_HDR 6
-#define FUNC_DEF 8
-#define EOF -1
-#define K_INT_TYPE 19
-#define FUNC_DECL 7
-#define ARG_DEF 5
-#define WS 20
-#define K_EQ 15
-#define BLOCK 9
-#define K_LT 17
-#define K_CHAR 12
-#define K_VOID 13
-#define VAR_DEF 4
-#define K_PLUS 18
-#pragma mark Dynamic Global Scopes
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-@interface SimpleCParser_program_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_program_return *)newSimpleCParser_program_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_declaration_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_declaration_return *)newSimpleCParser_declaration_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_variable_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_variable_return *)newSimpleCParser_variable_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_declarator_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_declarator_return *)newSimpleCParser_declarator_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_functionHeader_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_functionHeader_return *)newSimpleCParser_functionHeader_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_formalParameter_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_formalParameter_return *)newSimpleCParser_formalParameter_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_type_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_type_return *)newSimpleCParser_type_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_block_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_block_return *)newSimpleCParser_block_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_stat_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_stat_return *)newSimpleCParser_stat_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_forStat_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_forStat_return *)newSimpleCParser_forStat_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_assignStat_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_assignStat_return *)newSimpleCParser_assignStat_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_expr_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_expr_return *)newSimpleCParser_expr_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_condExpr_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_condExpr_return *)newSimpleCParser_condExpr_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_aexpr_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_aexpr_return *)newSimpleCParser_aexpr_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-@interface SimpleCParser_atom_return :ANTLRParserRuleReturnScope { // line 1672
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCParser_atom_return *)newSimpleCParser_atom_return;
-// this is start of set and get methods
-/* AST returnScopeInterface.methodsdecl */
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-
-#pragma mark Rule return scopes end
-@interface SimpleCParser : ANTLRParser { // line 529
-// start of globalAttributeScopeMemVar
-
-
-// start of action-actionScope-memVars
-// start of ruleAttributeScopeMemVar
-
-
-// Start of memVars
-/* AST parserHeaderFile.memVars */
-/* AST parsermemVars */
-id<ANTLRTreeAdaptor> treeAdaptor;
-
-DFA2 *dfa2;
- }
-
-// start of action-actionScope-methodsDecl
-
-/* AST parserHeaderFile.methodsdecl */
-/* AST parserMethodsDecl */
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)theTreeAdaptor;
-
-- (SimpleCParser_program_return *)program; 
-- (SimpleCParser_declaration_return *)declaration; 
-- (SimpleCParser_variable_return *)variable; 
-- (SimpleCParser_declarator_return *)declarator; 
-- (SimpleCParser_functionHeader_return *)functionHeader; 
-- (SimpleCParser_formalParameter_return *)formalParameter; 
-- (SimpleCParser_type_return *)type; 
-- (SimpleCParser_block_return *)block; 
-- (SimpleCParser_stat_return *)stat; 
-- (SimpleCParser_forStat_return *)forStat; 
-- (SimpleCParser_assignStat_return *)assignStat; 
-- (SimpleCParser_expr_return *)expr; 
-- (SimpleCParser_condExpr_return *)condExpr; 
-- (SimpleCParser_aexpr_return *)aexpr; 
-- (SimpleCParser_atom_return *)atom; 
-
-
-@end /* end of SimpleCParser interface */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCParser.m b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCParser.m
deleted file mode 100644
index 386640a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCParser.m
+++ /dev/null
@@ -1,2763 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version 3.2 Aug 23, 2010 07:48:06
- *
- *     -  From the grammar source file : /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g
- *     -                            On : 2010-08-23 07:54:46
- *     -                for the parser : SimpleCParserParser *
- * Editing it, at least manually, is not wise. 
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// $ANTLR 3.2 Aug 23, 2010 07:48:06 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g 2010-08-23 07:54:46
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SimpleCParser.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-
-#pragma mark Cyclic DFA implementation start DFA2
-@implementation DFA2
-const static NSInteger dfa2_eot[13] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static NSInteger dfa2_eof[13] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static unichar dfa2_min[13] =
-    {10,10,21,10,0,10,21,23,0,0,10,10,23};
-const static unichar dfa2_max[13] =
-    {13,10,22,24,0,10,25,24,0,0,13,10,24};
-const static NSInteger dfa2_accept[13] =
-    {-1,-1,-1,-1,1,-1,-1,-1,3,2,-1,-1,-1};
-const static NSInteger dfa2_special[13] =
-    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
-const static NSInteger dfa2_transition[] = {};
-const static NSInteger dfa2_transition0[] = {9, -1, -1, -1, 8};
-const static NSInteger dfa2_transition1[] = {10, 6};
-const static NSInteger dfa2_transition2[] = {5, 5, 5, 5, -1, -1, -1, -1, 
- -1, -1, -1, -1, -1, -1, 6};
-const static NSInteger dfa2_transition3[] = {11, 11, 11, 11};
-const static NSInteger dfa2_transition4[] = {4, 3};
-const static NSInteger dfa2_transition5[] = {1, 1, 1, 1};
-const static NSInteger dfa2_transition6[] = {7};
-const static NSInteger dfa2_transition7[] = {12};
-const static NSInteger dfa2_transition8[] = {2};
-
-
-+ () newDFA2WithRecognizer:(ANTLRBaseRecognizer *)aRecognizer
-{
-    return [[[DFA2 alloc] initWithRecognizer:aRecognizer] retain];
-}
-
-- (id) initWithRecognizer:(ANTLRBaseRecognizer *) theRecognizer
-{
-    if ((self = [super initWithRecognizer:theRecognizer]) != nil) {
-        decisionNumber = 2;
-        eot = dfa2_eot;
-        eof = dfa2_eof;
-        min = dfa2_min;
-        max = dfa2_max;
-        accept = dfa2_accept;
-        special = dfa2_special;
-        if (!(transition = calloc(13, sizeof(void*)))) {
-            [self release];
-            return nil;
-        }
-        len = 13;
-        transition[0] = dfa2_transition5;
-        transition[1] = dfa2_transition8;
-        transition[2] = dfa2_transition4;
-        transition[3] = dfa2_transition2;
-        transition[4] = dfa2_transition;
-        transition[5] = dfa2_transition6;
-        transition[6] = dfa2_transition0;
-        transition[7] = dfa2_transition1;
-        transition[8] = dfa2_transition;
-        transition[9] = dfa2_transition;
-        transition[10] = dfa2_transition3;
-        transition[11] = dfa2_transition7;
-        transition[12] = dfa2_transition1;
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    free(transition);
-    [super dealloc];
-}
-
-- (NSString *) description
-{
-    return @"20:1: declaration : ( variable | functionHeader ';' -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) );";
-}
-
-
-@end /* end DFA2 implementation */
-
-#pragma mark Cyclic DFA implementation end DFA2
-
-
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_declaration_in_program85;
-static const unsigned long long FOLLOW_declaration_in_program85_data[] = { 0x0000000000003C02LL};
-static ANTLRBitSet *FOLLOW_variable_in_declaration105;
-static const unsigned long long FOLLOW_variable_in_declaration105_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_functionHeader_in_declaration115;
-static const unsigned long long FOLLOW_functionHeader_in_declaration115_data[] = { 0x0000000000200000LL};
-static ANTLRBitSet *FOLLOW_21_in_declaration117;
-static const unsigned long long FOLLOW_21_in_declaration117_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_functionHeader_in_declaration135;
-static const unsigned long long FOLLOW_functionHeader_in_declaration135_data[] = { 0x0000000002000000LL};
-static ANTLRBitSet *FOLLOW_block_in_declaration137;
-static const unsigned long long FOLLOW_block_in_declaration137_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_type_in_variable166;
-static const unsigned long long FOLLOW_type_in_variable166_data[] = { 0x0000000000000400LL};
-static ANTLRBitSet *FOLLOW_declarator_in_variable168;
-static const unsigned long long FOLLOW_declarator_in_variable168_data[] = { 0x0000000000200000LL};
-static ANTLRBitSet *FOLLOW_21_in_variable170;
-static const unsigned long long FOLLOW_21_in_variable170_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_declarator199;
-static const unsigned long long FOLLOW_K_ID_in_declarator199_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_type_in_functionHeader219;
-static const unsigned long long FOLLOW_type_in_functionHeader219_data[] = { 0x0000000000000400LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_functionHeader221;
-static const unsigned long long FOLLOW_K_ID_in_functionHeader221_data[] = { 0x0000000000400000LL};
-static ANTLRBitSet *FOLLOW_22_in_functionHeader223;
-static const unsigned long long FOLLOW_22_in_functionHeader223_data[] = { 0x0000000001003C00LL};
-static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader227;
-static const unsigned long long FOLLOW_formalParameter_in_functionHeader227_data[] = { 0x0000000001800000LL};
-static ANTLRBitSet *FOLLOW_23_in_functionHeader231;
-static const unsigned long long FOLLOW_23_in_functionHeader231_data[] = { 0x0000000000003C00LL};
-static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader233;
-static const unsigned long long FOLLOW_formalParameter_in_functionHeader233_data[] = { 0x0000000001800000LL};
-static ANTLRBitSet *FOLLOW_24_in_functionHeader241;
-static const unsigned long long FOLLOW_24_in_functionHeader241_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_type_in_formalParameter281;
-static const unsigned long long FOLLOW_type_in_formalParameter281_data[] = { 0x0000000000000400LL};
-static ANTLRBitSet *FOLLOW_declarator_in_formalParameter283;
-static const unsigned long long FOLLOW_declarator_in_formalParameter283_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_set_in_type0;
-static const unsigned long long FOLLOW_set_in_type0_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_25_in_block376;
-static const unsigned long long FOLLOW_25_in_block376_data[] = { 0x0000000006607C00LL};
-static ANTLRBitSet *FOLLOW_variable_in_block390;
-static const unsigned long long FOLLOW_variable_in_block390_data[] = { 0x0000000006607C00LL};
-static ANTLRBitSet *FOLLOW_stat_in_block405;
-static const unsigned long long FOLLOW_stat_in_block405_data[] = { 0x0000000006604C00LL};
-static ANTLRBitSet *FOLLOW_26_in_block416;
-static const unsigned long long FOLLOW_26_in_block416_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_forStat_in_stat449;
-static const unsigned long long FOLLOW_forStat_in_stat449_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_expr_in_stat457;
-static const unsigned long long FOLLOW_expr_in_stat457_data[] = { 0x0000000000200000LL};
-static ANTLRBitSet *FOLLOW_21_in_stat459;
-static const unsigned long long FOLLOW_21_in_stat459_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_block_in_stat468;
-static const unsigned long long FOLLOW_block_in_stat468_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_assignStat_in_stat476;
-static const unsigned long long FOLLOW_assignStat_in_stat476_data[] = { 0x0000000000200000LL};
-static ANTLRBitSet *FOLLOW_21_in_stat478;
-static const unsigned long long FOLLOW_21_in_stat478_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_21_in_stat487;
-static const unsigned long long FOLLOW_21_in_stat487_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_FOR_in_forStat507;
-static const unsigned long long FOLLOW_K_FOR_in_forStat507_data[] = { 0x0000000000400000LL};
-static ANTLRBitSet *FOLLOW_22_in_forStat509;
-static const unsigned long long FOLLOW_22_in_forStat509_data[] = { 0x0000000000000400LL};
-static ANTLRBitSet *FOLLOW_assignStat_in_forStat513;
-static const unsigned long long FOLLOW_assignStat_in_forStat513_data[] = { 0x0000000000200000LL};
-static ANTLRBitSet *FOLLOW_21_in_forStat515;
-static const unsigned long long FOLLOW_21_in_forStat515_data[] = { 0x0000000000400C00LL};
-static ANTLRBitSet *FOLLOW_expr_in_forStat517;
-static const unsigned long long FOLLOW_expr_in_forStat517_data[] = { 0x0000000000200000LL};
-static ANTLRBitSet *FOLLOW_21_in_forStat519;
-static const unsigned long long FOLLOW_21_in_forStat519_data[] = { 0x0000000000000400LL};
-static ANTLRBitSet *FOLLOW_assignStat_in_forStat523;
-static const unsigned long long FOLLOW_assignStat_in_forStat523_data[] = { 0x0000000001000000LL};
-static ANTLRBitSet *FOLLOW_24_in_forStat525;
-static const unsigned long long FOLLOW_24_in_forStat525_data[] = { 0x0000000002000000LL};
-static ANTLRBitSet *FOLLOW_block_in_forStat527;
-static const unsigned long long FOLLOW_block_in_forStat527_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_assignStat570;
-static const unsigned long long FOLLOW_K_ID_in_assignStat570_data[] = { 0x0000000000008000LL};
-static ANTLRBitSet *FOLLOW_K_EQ_in_assignStat572;
-static const unsigned long long FOLLOW_K_EQ_in_assignStat572_data[] = { 0x0000000000400C00LL};
-static ANTLRBitSet *FOLLOW_expr_in_assignStat574;
-static const unsigned long long FOLLOW_expr_in_assignStat574_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_condExpr_in_expr598;
-static const unsigned long long FOLLOW_condExpr_in_expr598_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_aexpr_in_condExpr617;
-static const unsigned long long FOLLOW_aexpr_in_condExpr617_data[] = { 0x0000000000030002LL};
-static ANTLRBitSet *FOLLOW_K_EQEQ_in_condExpr622;
-static const unsigned long long FOLLOW_K_EQEQ_in_condExpr622_data[] = { 0x0000000000400C00LL};
-static ANTLRBitSet *FOLLOW_K_LT_in_condExpr627;
-static const unsigned long long FOLLOW_K_LT_in_condExpr627_data[] = { 0x0000000000400C00LL};
-static ANTLRBitSet *FOLLOW_aexpr_in_condExpr631;
-static const unsigned long long FOLLOW_aexpr_in_condExpr631_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_atom_in_aexpr653;
-static const unsigned long long FOLLOW_atom_in_aexpr653_data[] = { 0x0000000000040002LL};
-static ANTLRBitSet *FOLLOW_K_PLUS_in_aexpr657;
-static const unsigned long long FOLLOW_K_PLUS_in_aexpr657_data[] = { 0x0000000000400C00LL};
-static ANTLRBitSet *FOLLOW_atom_in_aexpr660;
-static const unsigned long long FOLLOW_atom_in_aexpr660_data[] = { 0x0000000000040002LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_atom680;
-static const unsigned long long FOLLOW_K_ID_in_atom680_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_INT_in_atom694;
-static const unsigned long long FOLLOW_K_INT_in_atom694_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_22_in_atom708;
-static const unsigned long long FOLLOW_22_in_atom708_data[] = { 0x0000000000400C00LL};
-static ANTLRBitSet *FOLLOW_expr_in_atom710;
-static const unsigned long long FOLLOW_expr_in_atom710_data[] = { 0x0000000001000000LL};
-static ANTLRBitSet *FOLLOW_24_in_atom712;
-static const unsigned long long FOLLOW_24_in_atom712_data[] = { 0x0000000000000002LL};
-
-
-#pragma mark Dynamic Global Scopes
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule return scopes start
-@implementation SimpleCParser_program_return
-+ (SimpleCParser_program_return *)newSimpleCParser_program_return
-{
-    return [[[SimpleCParser_program_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_declaration_return
-+ (SimpleCParser_declaration_return *)newSimpleCParser_declaration_return
-{
-    return [[[SimpleCParser_declaration_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_variable_return
-+ (SimpleCParser_variable_return *)newSimpleCParser_variable_return
-{
-    return [[[SimpleCParser_variable_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_declarator_return
-+ (SimpleCParser_declarator_return *)newSimpleCParser_declarator_return
-{
-    return [[[SimpleCParser_declarator_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_functionHeader_return
-+ (SimpleCParser_functionHeader_return *)newSimpleCParser_functionHeader_return
-{
-    return [[[SimpleCParser_functionHeader_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_formalParameter_return
-+ (SimpleCParser_formalParameter_return *)newSimpleCParser_formalParameter_return
-{
-    return [[[SimpleCParser_formalParameter_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_type_return
-+ (SimpleCParser_type_return *)newSimpleCParser_type_return
-{
-    return [[[SimpleCParser_type_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_block_return
-+ (SimpleCParser_block_return *)newSimpleCParser_block_return
-{
-    return [[[SimpleCParser_block_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_stat_return
-+ (SimpleCParser_stat_return *)newSimpleCParser_stat_return
-{
-    return [[[SimpleCParser_stat_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_forStat_return
-+ (SimpleCParser_forStat_return *)newSimpleCParser_forStat_return
-{
-    return [[[SimpleCParser_forStat_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_assignStat_return
-+ (SimpleCParser_assignStat_return *)newSimpleCParser_assignStat_return
-{
-    return [[[SimpleCParser_assignStat_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_expr_return
-+ (SimpleCParser_expr_return *)newSimpleCParser_expr_return
-{
-    return [[[SimpleCParser_expr_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_condExpr_return
-+ (SimpleCParser_condExpr_return *)newSimpleCParser_condExpr_return
-{
-    return [[[SimpleCParser_condExpr_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_aexpr_return
-+ (SimpleCParser_aexpr_return *)newSimpleCParser_aexpr_return
-{
-    return [[[SimpleCParser_aexpr_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-@implementation SimpleCParser_atom_return
-+ (SimpleCParser_atom_return *)newSimpleCParser_atom_return
-{
-    return [[[SimpleCParser_atom_return alloc] init] retain];
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-
-
-@implementation SimpleCParser  // line 610
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    FOLLOW_declaration_in_program85 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declaration_in_program85_data Count:(NSUInteger)1] retain];
-    FOLLOW_variable_in_declaration105 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_declaration105_data Count:(NSUInteger)1] retain];
-    FOLLOW_functionHeader_in_declaration115 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration115_data Count:(NSUInteger)1] retain];
-    FOLLOW_21_in_declaration117 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_21_in_declaration117_data Count:(NSUInteger)1] retain];
-    FOLLOW_functionHeader_in_declaration135 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration135_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_declaration137 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_declaration137_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_variable166 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_variable166_data Count:(NSUInteger)1] retain];
-    FOLLOW_declarator_in_variable168 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_variable168_data Count:(NSUInteger)1] retain];
-    FOLLOW_21_in_variable170 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_21_in_variable170_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_declarator199 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_declarator199_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_functionHeader219 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_functionHeader219_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_functionHeader221 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_functionHeader221_data Count:(NSUInteger)1] retain];
-    FOLLOW_22_in_functionHeader223 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_22_in_functionHeader223_data Count:(NSUInteger)1] retain];
-    FOLLOW_formalParameter_in_functionHeader227 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader227_data Count:(NSUInteger)1] retain];
-    FOLLOW_23_in_functionHeader231 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_23_in_functionHeader231_data Count:(NSUInteger)1] retain];
-    FOLLOW_formalParameter_in_functionHeader233 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader233_data Count:(NSUInteger)1] retain];
-    FOLLOW_24_in_functionHeader241 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_24_in_functionHeader241_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_formalParameter281 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_formalParameter281_data Count:(NSUInteger)1] retain];
-    FOLLOW_declarator_in_formalParameter283 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_formalParameter283_data Count:(NSUInteger)1] retain];
-    FOLLOW_set_in_type0 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_set_in_type0_data Count:(NSUInteger)1] retain];
-    FOLLOW_25_in_block376 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_25_in_block376_data Count:(NSUInteger)1] retain];
-    FOLLOW_variable_in_block390 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_block390_data Count:(NSUInteger)1] retain];
-    FOLLOW_stat_in_block405 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block405_data Count:(NSUInteger)1] retain];
-    FOLLOW_26_in_block416 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_26_in_block416_data Count:(NSUInteger)1] retain];
-    FOLLOW_forStat_in_stat449 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_forStat_in_stat449_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_stat457 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_stat457_data Count:(NSUInteger)1] retain];
-    FOLLOW_21_in_stat459 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_21_in_stat459_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_stat468 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat468_data Count:(NSUInteger)1] retain];
-    FOLLOW_assignStat_in_stat476 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_stat476_data Count:(NSUInteger)1] retain];
-    FOLLOW_21_in_stat478 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_21_in_stat478_data Count:(NSUInteger)1] retain];
-    FOLLOW_21_in_stat487 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_21_in_stat487_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_FOR_in_forStat507 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_FOR_in_forStat507_data Count:(NSUInteger)1] retain];
-    FOLLOW_22_in_forStat509 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_22_in_forStat509_data Count:(NSUInteger)1] retain];
-    FOLLOW_assignStat_in_forStat513 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_forStat513_data Count:(NSUInteger)1] retain];
-    FOLLOW_21_in_forStat515 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_21_in_forStat515_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_forStat517 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat517_data Count:(NSUInteger)1] retain];
-    FOLLOW_21_in_forStat519 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_21_in_forStat519_data Count:(NSUInteger)1] retain];
-    FOLLOW_assignStat_in_forStat523 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_forStat523_data Count:(NSUInteger)1] retain];
-    FOLLOW_24_in_forStat525 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_24_in_forStat525_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_forStat527 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_forStat527_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_assignStat570 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_assignStat570_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_EQ_in_assignStat572 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQ_in_assignStat572_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_assignStat574 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_assignStat574_data Count:(NSUInteger)1] retain];
-    FOLLOW_condExpr_in_expr598 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_condExpr_in_expr598_data Count:(NSUInteger)1] retain];
-    FOLLOW_aexpr_in_condExpr617 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_aexpr_in_condExpr617_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_EQEQ_in_condExpr622 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQEQ_in_condExpr622_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_LT_in_condExpr627 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_LT_in_condExpr627_data Count:(NSUInteger)1] retain];
-    FOLLOW_aexpr_in_condExpr631 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_aexpr_in_condExpr631_data Count:(NSUInteger)1] retain];
-    FOLLOW_atom_in_aexpr653 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_aexpr653_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_PLUS_in_aexpr657 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_PLUS_in_aexpr657_data Count:(NSUInteger)1] retain];
-    FOLLOW_atom_in_aexpr660 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_aexpr660_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_atom680 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_atom680_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_INT_in_atom694 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_INT_in_atom694_data Count:(NSUInteger)1] retain];
-    FOLLOW_22_in_atom708 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_22_in_atom708_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_atom710 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_atom710_data Count:(NSUInteger)1] retain];
-    FOLLOW_24_in_atom712 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_24_in_atom712_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[[NSArray alloc] initWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"VAR_DEF", @"ARG_DEF", @"FUNC_HDR", @"FUNC_DECL", @"FUNC_DEF", @"BLOCK", 
- @"K_ID", @"K_INT", @"K_CHAR", @"K_VOID", @"K_FOR", @"K_EQ", @"K_EQEQ", 
- @"K_LT", @"K_PLUS", @"K_INT_TYPE", @"WS", @"';'", @"'('", @"','", @"')'", 
- @"'{'", @"'}'", nil] retain]];
-}
-
-+ (SimpleCParser *)newSimpleCParser:(id<ANTLRTokenStream>)aStream
-{
-    return [[SimpleCParser alloc] initWithTokenStream:aStream];
-
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)aStream
-{
-    if ((self = [super initWithTokenStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:15+1] retain]]) != nil) {
-
-
-        dfa2 = [DFA2 newDFA2WithRecognizer:self];
-
-        /* start of actions-actionScope-init */
-        /* start of init */
-        /* AST genericParser.init */
-        [self setTreeAdaptor:[[ANTLRCommonTreeAdaptor newANTLRCommonTreeAdaptor] retain]];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [dfa2 release];
-    /* AST genericParser.dealloc */
-    [self setTreeAdaptor:nil];
-
-    [super dealloc];
-}
-// start actions.actionScope.methods
-// start methods()
-/* AST genericParser.methods */
-/* AST parserMethods */
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor
-{
-	return treeAdaptor;
-}
-
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-{
-	if (aTreeAdaptor != treeAdaptor) {
-		treeAdaptor = aTreeAdaptor;
-	}
-}
-// start rules
-/*
- * $ANTLR start program
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:16:1: program : ( declaration )+ ;
- */
-- (SimpleCParser_program_return *) program
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_program_return * retval = [SimpleCParser_program_return newSimpleCParser_program_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        SimpleCParser_declaration_return * declaration1 = nil;
-
-
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:17:5: ( ( declaration )+ ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:17:9: ( declaration )+ // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:17:9: ( declaration )+ // positiveClosureBlock
-        NSInteger cnt1=0;
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0>=K_ID && LA1_0<=K_VOID)) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:17:9: declaration // alt
-                    {
-                    /* ASTParser ruleRef */
-                    [self pushFollow:FOLLOW_declaration_in_program85];
-                    declaration1 = [self declaration];
-                    [self popFollow];
-
-
-                    [treeAdaptor addChild:[declaration1 getTree] toTree:root_0];  /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt1 >= 1 )
-                        goto loop1;
-                    ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:1];
-                    @throw eee;
-            }
-            cnt1++;
-        } while (YES);
-        loop1: ;
-          /* element() */
-         /* elements */
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end program */
-/*
- * $ANTLR start declaration
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:20:1: declaration : ( variable | functionHeader ';' -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) );
- */
-- (SimpleCParser_declaration_return *) declaration
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_declaration_return * retval = [SimpleCParser_declaration_return newSimpleCParser_declaration_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *char_literal4 = nil;
-        SimpleCParser_variable_return * variable2 = nil;
-
-        SimpleCParser_functionHeader_return * functionHeader3 = nil;
-
-        SimpleCParser_functionHeader_return * functionHeader5 = nil;
-
-        SimpleCParser_block_return * block6 = nil;
-
-
-        ANTLRCommonTree *char_literal4_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_21 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 21"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_functionHeader = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule functionHeader"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_block = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule block"] retain];
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:21:5: ( variable | functionHeader ';' -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) ) //ruleblock
-        NSInteger alt2=3;
-        alt2 = [dfa2 predict:input];
-        switch (alt2) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:21:9: variable // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                /* ASTParser ruleRef */
-                [self pushFollow:FOLLOW_variable_in_declaration105];
-                variable2 = [self variable];
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[variable2 getTree] toTree:root_0];  /* element() */
-                 /* elements */
-                }
-                break;
-            case 2 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:22:9: functionHeader ';' // alt
-                {
-                [self pushFollow:FOLLOW_functionHeader_in_declaration115];
-                functionHeader3 = [self functionHeader];
-                [self popFollow];
-
-
-                [stream_functionHeader addElement:[functionHeader3 getTree]];  /* element() */
-                char_literal4=(ANTLRCommonToken *)[self match:input TokenType:21 Follow:FOLLOW_21_in_declaration117];  
-                    [stream_21 addElement:char_literal4];
-                  /* element() */
-                 /* elements */
-
-                // AST REWRITE
-                // elements: functionHeader
-                // token labels: 
-                // rule labels: retval
-                // token list labels: 
-                // rule list labels: 
-                // wildcard labels: 
-                 [retval setTree:root_0];
-
-                retval.tree = root_0;
-
-                ANTLRRewriteRuleSubtreeStream *stream_retval =
-                    [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                        description:@"token retval"
-                                                                            element:retval!=nil?[retval getTree]:nil] retain];
-
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                // 22:28: -> ^( FUNC_DECL functionHeader )
-                {
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:22:31: ^( FUNC_DECL functionHeader )
-                    {
-                        ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                        root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:[[treeAdaptor createTree:(NSInteger)FUNC_DECL Text:[[ANTLRBaseRecognizer getTokenNames] objectAtIndex:(NSUInteger)FUNC_DECL]] retain]
-                                                                               old:root_1];
-
-                        [treeAdaptor addChild:[stream_functionHeader nextTree] toTree:root_1];
-
-                        [treeAdaptor addChild:root_1 toTree:root_0];
-                    }
-
-                }
-
-                retval.tree = root_0;
-
-                }
-                break;
-            case 3 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:23:9: functionHeader block // alt
-                {
-                [self pushFollow:FOLLOW_functionHeader_in_declaration135];
-                functionHeader5 = [self functionHeader];
-                [self popFollow];
-
-
-                [stream_functionHeader addElement:[functionHeader5 getTree]];  /* element() */
-                [self pushFollow:FOLLOW_block_in_declaration137];
-                block6 = [self block];
-                [self popFollow];
-
-
-                [stream_block addElement:[block6 getTree]];  /* element() */
-                 /* elements */
-
-                // AST REWRITE
-                // elements: functionHeader, block
-                // token labels: 
-                // rule labels: retval
-                // token list labels: 
-                // rule list labels: 
-                // wildcard labels: 
-                 [retval setTree:root_0];
-
-                retval.tree = root_0;
-
-                ANTLRRewriteRuleSubtreeStream *stream_retval =
-                    [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                        description:@"token retval"
-                                                                            element:retval!=nil?[retval getTree]:nil] retain];
-
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                // 23:30: -> ^( FUNC_DEF functionHeader block )
-                {
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:23:33: ^( FUNC_DEF functionHeader block )
-                    {
-                        ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                        root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:[[treeAdaptor createTree:(NSInteger)FUNC_DEF Text:[[ANTLRBaseRecognizer getTokenNames] objectAtIndex:(NSUInteger)FUNC_DEF]] retain]
-                                                                               old:root_1];
-
-                        [treeAdaptor addChild:[stream_functionHeader nextTree] toTree:root_1];
-                        [treeAdaptor addChild:[stream_block nextTree] toTree:root_1];
-
-                        [treeAdaptor addChild:root_1 toTree:root_0];
-                    }
-
-                }
-
-                retval.tree = root_0;
-
-                }
-                break;
-
-        }
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-        [stream_21 release];
-        [stream_functionHeader release];
-        [stream_block release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end declaration */
-/*
- * $ANTLR start variable
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:26:1: variable : type declarator ';' -> ^( VAR_DEF type declarator ) ;
- */
-- (SimpleCParser_variable_return *) variable
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_variable_return * retval = [SimpleCParser_variable_return newSimpleCParser_variable_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *char_literal9 = nil;
-        SimpleCParser_type_return * type7 = nil;
-
-        SimpleCParser_declarator_return * declarator8 = nil;
-
-
-        ANTLRCommonTree *char_literal9_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_21 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 21"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_declarator = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule declarator"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_type = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule type"] retain];
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:27:5: ( type declarator ';' -> ^( VAR_DEF type declarator ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:27:9: type declarator ';' // alt
-        {
-        [self pushFollow:FOLLOW_type_in_variable166];
-        type7 = [self type];
-        [self popFollow];
-
-
-        [stream_type addElement:[type7 getTree]];  /* element() */
-        [self pushFollow:FOLLOW_declarator_in_variable168];
-        declarator8 = [self declarator];
-        [self popFollow];
-
-
-        [stream_declarator addElement:[declarator8 getTree]];  /* element() */
-        char_literal9=(ANTLRCommonToken *)[self match:input TokenType:21 Follow:FOLLOW_21_in_variable170];  
-            [stream_21 addElement:char_literal9];
-          /* element() */
-         /* elements */
-
-        // AST REWRITE
-        // elements: type, declarator
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-         [retval setTree:root_0];
-
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"token retval"
-                                                                    element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 27:29: -> ^( VAR_DEF type declarator )
-        {
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:27:32: ^( VAR_DEF type declarator )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:[[treeAdaptor createTree:(NSInteger)VAR_DEF Text:[[ANTLRBaseRecognizer getTokenNames] objectAtIndex:(NSUInteger)VAR_DEF]] retain]
-                                                                       old:root_1];
-
-                [treeAdaptor addChild:[stream_type nextTree] toTree:root_1];
-                [treeAdaptor addChild:[stream_declarator nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-        retval.tree = root_0;
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-        [stream_21 release];
-        [stream_declarator release];
-        [stream_type release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end variable */
-/*
- * $ANTLR start declarator
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:30:1: declarator : K_ID ;
- */
-- (SimpleCParser_declarator_return *) declarator
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_declarator_return * retval = [SimpleCParser_declarator_return newSimpleCParser_declarator_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_ID10 = nil;
-
-        ANTLRCommonTree *K_ID10_tree=nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:31:5: ( K_ID ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:31:9: K_ID // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        /* ASTParser tokenRef */
-        K_ID10=(ANTLRCommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_declarator199]; 
-        K_ID10_tree = /* ASTParser createNodeFromToken */
-        (ANTLRCommonTree *)[[treeAdaptor createTree:K_ID10] retain];
-        [treeAdaptor addChild:K_ID10_tree  toTree:root_0];
-          /* element() */
-         /* elements */
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end declarator */
-/*
- * $ANTLR start functionHeader
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:34:1: functionHeader : type K_ID '(' ( formalParameter ( ',' formalParameter )* )? ')' -> ^( FUNC_HDR type K_ID ( formalParameter )+ ) ;
- */
-- (SimpleCParser_functionHeader_return *) functionHeader
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_functionHeader_return * retval = [SimpleCParser_functionHeader_return newSimpleCParser_functionHeader_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_ID12 = nil;
-        ANTLRCommonToken *char_literal13 = nil;
-        ANTLRCommonToken *char_literal15 = nil;
-        ANTLRCommonToken *char_literal17 = nil;
-        SimpleCParser_type_return * type11 = nil;
-
-        SimpleCParser_formalParameter_return * formalParameter14 = nil;
-
-        SimpleCParser_formalParameter_return * formalParameter16 = nil;
-
-
-        ANTLRCommonTree *K_ID12_tree=nil;
-        ANTLRCommonTree *char_literal13_tree=nil;
-        ANTLRCommonTree *char_literal15_tree=nil;
-        ANTLRCommonTree *char_literal17_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_K_ID = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_ID"] retain];
-        ANTLRRewriteRuleTokenStream *stream_22 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 22"] retain];
-        ANTLRRewriteRuleTokenStream *stream_23 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 23"] retain];
-        ANTLRRewriteRuleTokenStream *stream_24 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 24"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_formalParameter = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule formalParameter"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_type = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule type"] retain];
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:35:5: ( type K_ID '(' ( formalParameter ( ',' formalParameter )* )? ')' -> ^( FUNC_HDR type K_ID ( formalParameter )+ ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:35:9: type K_ID '(' ( formalParameter ( ',' formalParameter )* )? ')' // alt
-        {
-        [self pushFollow:FOLLOW_type_in_functionHeader219];
-        type11 = [self type];
-        [self popFollow];
-
-
-        [stream_type addElement:[type11 getTree]];  /* element() */
-        K_ID12=(ANTLRCommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_functionHeader221];  
-            [stream_K_ID addElement:K_ID12];
-          /* element() */
-        char_literal13=(ANTLRCommonToken *)[self match:input TokenType:22 Follow:FOLLOW_22_in_functionHeader223];  
-            [stream_22 addElement:char_literal13];
-          /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:35:23: ( formalParameter ( ',' formalParameter )* )? // block
-        NSInteger alt4=2;
-        NSInteger LA4_0 = [input LA:1];
-
-        if ( ((LA4_0>=K_ID && LA4_0<=K_VOID)) ) {
-            alt4=1;
-        }
-        switch (alt4) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:35:25: formalParameter ( ',' formalParameter )* // alt
-                {
-                [self pushFollow:FOLLOW_formalParameter_in_functionHeader227];
-                formalParameter14 = [self formalParameter];
-                [self popFollow];
-
-
-                [stream_formalParameter addElement:[formalParameter14 getTree]];  /* element() */
-                do {
-                    NSInteger alt3=2;
-                    NSInteger LA3_0 = [input LA:1];
-                    if ( (LA3_0==23) ) {
-                        alt3=1;
-                    }
-
-
-                    switch (alt3) {
-                        case 1 : ;
-                            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:35:43: ',' formalParameter // alt
-                            {
-                            char_literal15=(ANTLRCommonToken *)[self match:input TokenType:23 Follow:FOLLOW_23_in_functionHeader231];  
-                                [stream_23 addElement:char_literal15];
-                              /* element() */
-                            [self pushFollow:FOLLOW_formalParameter_in_functionHeader233];
-                            formalParameter16 = [self formalParameter];
-                            [self popFollow];
-
-
-                            [stream_formalParameter addElement:[formalParameter16 getTree]];  /* element() */
-                             /* elements */
-                            }
-                            break;
-
-                        default :
-                            goto loop3;
-                    }
-                } while (YES);
-                loop3: ;
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-        char_literal17=(ANTLRCommonToken *)[self match:input TokenType:24 Follow:FOLLOW_24_in_functionHeader241];  
-            [stream_24 addElement:char_literal17];
-          /* element() */
-         /* elements */
-
-        // AST REWRITE
-        // elements: K_ID, formalParameter, type
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-         [retval setTree:root_0];
-
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"token retval"
-                                                                    element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 36:9: -> ^( FUNC_HDR type K_ID ( formalParameter )+ )
-        {
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:36:12: ^( FUNC_HDR type K_ID ( formalParameter )+ )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:[[treeAdaptor createTree:(NSInteger)FUNC_HDR Text:[[ANTLRBaseRecognizer getTokenNames] objectAtIndex:(NSUInteger)FUNC_HDR]] retain]
-                                                                       old:root_1];
-
-                [treeAdaptor addChild:[stream_type nextTree] toTree:root_1];
-                 // TODO: args: 
-                [treeAdaptor addChild:[stream_K_ID nextNode] toTree:root_1];
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:36:33: ( formalParameter )+
-                {
-                if ( !([stream_formalParameter hasNext]) ) {
-                    @throw [NSException exceptionWithName:@"RewriteEarlyExitException" reason:nil userInfo:nil];
-                }
-                while ( [stream_formalParameter hasNext] ) {
-                    [treeAdaptor addChild:[stream_formalParameter nextTree] toTree:root_1];
-
-                }
-                [stream_formalParameter reset];
-
-                }
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-        retval.tree = root_0;
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-        [stream_K_ID release];
-        [stream_22 release];
-        [stream_23 release];
-        [stream_24 release];
-        [stream_formalParameter release];
-        [stream_type release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end functionHeader */
-/*
- * $ANTLR start formalParameter
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:39:1: formalParameter : type declarator -> ^( ARG_DEF type declarator ) ;
- */
-- (SimpleCParser_formalParameter_return *) formalParameter
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_formalParameter_return * retval = [SimpleCParser_formalParameter_return newSimpleCParser_formalParameter_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        SimpleCParser_type_return * type18 = nil;
-
-        SimpleCParser_declarator_return * declarator19 = nil;
-
-
-        ANTLRRewriteRuleSubtreeStream *stream_declarator = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule declarator"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_type = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule type"] retain];
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:40:5: ( type declarator -> ^( ARG_DEF type declarator ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:40:9: type declarator // alt
-        {
-        [self pushFollow:FOLLOW_type_in_formalParameter281];
-        type18 = [self type];
-        [self popFollow];
-
-
-        [stream_type addElement:[type18 getTree]];  /* element() */
-        [self pushFollow:FOLLOW_declarator_in_formalParameter283];
-        declarator19 = [self declarator];
-        [self popFollow];
-
-
-        [stream_declarator addElement:[declarator19 getTree]];  /* element() */
-         /* elements */
-
-        // AST REWRITE
-        // elements: declarator, type
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-         [retval setTree:root_0];
-
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"token retval"
-                                                                    element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 40:25: -> ^( ARG_DEF type declarator )
-        {
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:40:28: ^( ARG_DEF type declarator )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:[[treeAdaptor createTree:(NSInteger)ARG_DEF Text:[[ANTLRBaseRecognizer getTokenNames] objectAtIndex:(NSUInteger)ARG_DEF]] retain]
-                                                                       old:root_1];
-
-                [treeAdaptor addChild:[stream_type nextTree] toTree:root_1];
-                [treeAdaptor addChild:[stream_declarator nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-        retval.tree = root_0;
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-        [stream_declarator release];
-        [stream_type release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end formalParameter */
-/*
- * $ANTLR start type
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:43:1: type : ( K_INT | K_CHAR | K_VOID | K_ID );
- */
-- (SimpleCParser_type_return *) type
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_type_return * retval = [SimpleCParser_type_return newSimpleCParser_type_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *set20 = nil;
-
-        ANTLRCommonTree *set20_tree=nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:44:5: ( K_INT | K_CHAR | K_VOID | K_ID ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g: // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        /* ASTParser matchRuleBlockSet */
-        /* ASTParser matchSet */
-        set20 = (ANTLRCommonToken *)[input LT:1];
-        if ((([input LA:1] >= K_ID) && ([input LA:1] <= K_VOID))) {
-            [input consume];
-                [treeAdaptor addChild:/* ASTParser createNodeFromToken */
-            (ANTLRCommonTree *)[[treeAdaptor createTree:set20] retain] toTree:root_0 ];
-            [state setIsErrorRecovery:NO];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-            @throw mse;
-        }
-          /* element() */
-         /* elements */
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end type */
-/*
- * $ANTLR start block
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:50:1: block : lc= '{' ( variable )* ( stat )* '}' -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* ) ;
- */
-- (SimpleCParser_block_return *) block
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_block_return * retval = [SimpleCParser_block_return newSimpleCParser_block_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *lc = nil;
-        ANTLRCommonToken *char_literal23 = nil;
-        SimpleCParser_variable_return * variable21 = nil;
-
-        SimpleCParser_stat_return * stat22 = nil;
-
-
-        ANTLRCommonTree *lc_tree=nil;
-        ANTLRCommonTree *char_literal23_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_25 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 25"] retain];
-        ANTLRRewriteRuleTokenStream *stream_26 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 26"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_variable = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule variable"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_stat = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule stat"] retain];
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:51:5: (lc= '{' ( variable )* ( stat )* '}' -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:51:9: lc= '{' ( variable )* ( stat )* '}' // alt
-        {
-        lc=(ANTLRCommonToken *)[self match:input TokenType:25 Follow:FOLLOW_25_in_block376];  
-            [stream_25 addElement:lc];
-          /* element() */
-        do {
-            NSInteger alt5=2;
-            switch ([input LA:1]) {
-                case K_ID: ;
-                    {
-                    NSInteger LA5_2 = [input LA:2];
-                    if ( (LA5_2==K_ID) ) {
-                        alt5=1;
-                    }
-
-
-                    }
-                    break;
-                case K_INT: ;
-                    {
-                    NSInteger LA5_3 = [input LA:2];
-                    if ( (LA5_3==K_ID) ) {
-                        alt5=1;
-                    }
-
-
-                    }
-                    break;
-                case K_CHAR: ;
-                case K_VOID: ;
-                    {
-                    alt5=1;
-                    }
-                    break;
-
-            }
-
-            switch (alt5) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:52:13: variable // alt
-                    {
-                    [self pushFollow:FOLLOW_variable_in_block390];
-                    variable21 = [self variable];
-                    [self popFollow];
-
-
-                    [stream_variable addElement:[variable21 getTree]];  /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop5;
-            }
-        } while (YES);
-        loop5: ;
-          /* element() */
-        do {
-            NSInteger alt6=2;
-            NSInteger LA6_0 = [input LA:1];
-            if ( ((LA6_0>=K_ID && LA6_0<=K_INT)||LA6_0==K_FOR||(LA6_0>=21 && LA6_0<=22)||LA6_0==25) ) {
-                alt6=1;
-            }
-
-
-            switch (alt6) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:53:13: stat // alt
-                    {
-                    [self pushFollow:FOLLOW_stat_in_block405];
-                    stat22 = [self stat];
-                    [self popFollow];
-
-
-                    [stream_stat addElement:[stat22 getTree]];  /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop6;
-            }
-        } while (YES);
-        loop6: ;
-          /* element() */
-        char_literal23=(ANTLRCommonToken *)[self match:input TokenType:26 Follow:FOLLOW_26_in_block416];  
-            [stream_26 addElement:char_literal23];
-          /* element() */
-         /* elements */
-
-        // AST REWRITE
-        // elements: stat, variable
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-         [retval setTree:root_0];
-
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"token retval"
-                                                                    element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 55:9: -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* )
-        {
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:55:12: ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:[[treeAdaptor createTree:(NSInteger)BLOCK Text:[[ANTLRBaseRecognizer getTokenNames] objectAtIndex:(NSUInteger)BLOCK]] retain]
-                                                                       old:root_1];
-
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:55:34: ( variable )*
-                while ( [stream_variable hasNext] ) {
-                    [treeAdaptor addChild:[stream_variable nextTree] toTree:root_1];
-
-                }
-                [stream_variable reset];
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:55:44: ( stat )*
-                while ( [stream_stat hasNext] ) {
-                    [treeAdaptor addChild:[stream_stat nextTree] toTree:root_1];
-
-                }
-                [stream_stat reset];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-        retval.tree = root_0;
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-        [stream_25 release];
-        [stream_26 release];
-        [stream_variable release];
-        [stream_stat release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end block */
-/*
- * $ANTLR start stat
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:58:1: stat : ( forStat | expr ';' | block | assignStat ';' | ';' );
- */
-- (SimpleCParser_stat_return *) stat
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_stat_return * retval = [SimpleCParser_stat_return newSimpleCParser_stat_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *char_literal26 = nil;
-        ANTLRCommonToken *char_literal29 = nil;
-        ANTLRCommonToken *char_literal30 = nil;
-        SimpleCParser_forStat_return * forStat24 = nil;
-
-        SimpleCParser_expr_return * expr25 = nil;
-
-        SimpleCParser_block_return * block27 = nil;
-
-        SimpleCParser_assignStat_return * assignStat28 = nil;
-
-
-        ANTLRCommonTree *char_literal26_tree=nil;
-        ANTLRCommonTree *char_literal29_tree=nil;
-        ANTLRCommonTree *char_literal30_tree=nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:58:5: ( forStat | expr ';' | block | assignStat ';' | ';' ) //ruleblock
-        NSInteger alt7=5;
-        switch ([input LA:1]) {
-            case K_FOR: ;
-                {
-                alt7=1;
-                }
-                break;
-            case K_ID: ;
-                {
-                NSInteger LA7_2 = [input LA:2];
-
-                if ( (LA7_2==K_EQ) ) {
-                    alt7=4;
-                }
-                else if ( ((LA7_2>=K_EQEQ && LA7_2<=K_PLUS)||LA7_2==21) ) {
-                    alt7=2;
-                }
-                else {
-                    ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:7 state:2 stream:input];
-                    @throw nvae;
-                }
-                }
-                break;
-            case K_INT: ;
-            case 22: ;
-                {
-                alt7=2;
-                }
-                break;
-            case 25: ;
-                {
-                alt7=3;
-                }
-                break;
-            case 21: ;
-                {
-                alt7=5;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:7 state:0 stream:input];
-            @throw nvae;
-        }
-
-        switch (alt7) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:58:7: forStat // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                /* ASTParser ruleRef */
-                [self pushFollow:FOLLOW_forStat_in_stat449];
-                forStat24 = [self forStat];
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[forStat24 getTree] toTree:root_0];  /* element() */
-                 /* elements */
-                }
-                break;
-            case 2 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:59:7: expr ';' // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                /* ASTParser ruleRef */
-                [self pushFollow:FOLLOW_expr_in_stat457];
-                expr25 = [self expr];
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[expr25 getTree] toTree:root_0];  /* element() */
-                char_literal26=(ANTLRCommonToken *)[self match:input TokenType:21 Follow:FOLLOW_21_in_stat459];   /* element() */
-                 /* elements */
-                }
-                break;
-            case 3 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:60:7: block // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                /* ASTParser ruleRef */
-                [self pushFollow:FOLLOW_block_in_stat468];
-                block27 = [self block];
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[block27 getTree] toTree:root_0];  /* element() */
-                 /* elements */
-                }
-                break;
-            case 4 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:61:7: assignStat ';' // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                /* ASTParser ruleRef */
-                [self pushFollow:FOLLOW_assignStat_in_stat476];
-                assignStat28 = [self assignStat];
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[assignStat28 getTree] toTree:root_0];  /* element() */
-                char_literal29=(ANTLRCommonToken *)[self match:input TokenType:21 Follow:FOLLOW_21_in_stat478];   /* element() */
-                 /* elements */
-                }
-                break;
-            case 5 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:62:7: ';' // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                char_literal30=(ANTLRCommonToken *)[self match:input TokenType:21 Follow:FOLLOW_21_in_stat487];   /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end stat */
-/*
- * $ANTLR start forStat
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:65:1: forStat : K_FOR '(' start= assignStat ';' expr ';' next= assignStat ')' block -> ^( K_FOR $start expr $next block ) ;
- */
-- (SimpleCParser_forStat_return *) forStat
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_forStat_return * retval = [SimpleCParser_forStat_return newSimpleCParser_forStat_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_FOR31 = nil;
-        ANTLRCommonToken *char_literal32 = nil;
-        ANTLRCommonToken *char_literal33 = nil;
-        ANTLRCommonToken *char_literal35 = nil;
-        ANTLRCommonToken *char_literal36 = nil;
-        SimpleCParser_assignStat_return * start = nil;
-
-        SimpleCParser_assignStat_return * next = nil;
-
-        SimpleCParser_expr_return * expr34 = nil;
-
-        SimpleCParser_block_return * block37 = nil;
-
-
-        ANTLRCommonTree *K_FOR31_tree=nil;
-        ANTLRCommonTree *char_literal32_tree=nil;
-        ANTLRCommonTree *char_literal33_tree=nil;
-        ANTLRCommonTree *char_literal35_tree=nil;
-        ANTLRCommonTree *char_literal36_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_21 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 21"] retain];
-        ANTLRRewriteRuleTokenStream *stream_22 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 22"] retain];
-        ANTLRRewriteRuleTokenStream *stream_24 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 24"] retain];
-        ANTLRRewriteRuleTokenStream *stream_K_FOR = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_FOR"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_assignStat = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule assignStat"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_block = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule block"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_expr = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule expr"] retain];
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:66:5: ( K_FOR '(' start= assignStat ';' expr ';' next= assignStat ')' block -> ^( K_FOR $start expr $next block ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:66:9: K_FOR '(' start= assignStat ';' expr ';' next= assignStat ')' block // alt
-        {
-        K_FOR31=(ANTLRCommonToken *)[self match:input TokenType:K_FOR Follow:FOLLOW_K_FOR_in_forStat507];  
-            [stream_K_FOR addElement:K_FOR31];
-          /* element() */
-        char_literal32=(ANTLRCommonToken *)[self match:input TokenType:22 Follow:FOLLOW_22_in_forStat509];  
-            [stream_22 addElement:char_literal32];
-          /* element() */
-        [self pushFollow:FOLLOW_assignStat_in_forStat513];
-        start = [self assignStat];
-        [self popFollow];
-
-
-        [stream_assignStat addElement:[start getTree]];  /* element() */
-        char_literal33=(ANTLRCommonToken *)[self match:input TokenType:21 Follow:FOLLOW_21_in_forStat515];  
-            [stream_21 addElement:char_literal33];
-          /* element() */
-        [self pushFollow:FOLLOW_expr_in_forStat517];
-        expr34 = [self expr];
-        [self popFollow];
-
-
-        [stream_expr addElement:[expr34 getTree]];  /* element() */
-        char_literal35=(ANTLRCommonToken *)[self match:input TokenType:21 Follow:FOLLOW_21_in_forStat519];  
-            [stream_21 addElement:char_literal35];
-          /* element() */
-        [self pushFollow:FOLLOW_assignStat_in_forStat523];
-        next = [self assignStat];
-        [self popFollow];
-
-
-        [stream_assignStat addElement:[next getTree]];  /* element() */
-        char_literal36=(ANTLRCommonToken *)[self match:input TokenType:24 Follow:FOLLOW_24_in_forStat525];  
-            [stream_24 addElement:char_literal36];
-          /* element() */
-        [self pushFollow:FOLLOW_block_in_forStat527];
-        block37 = [self block];
-        [self popFollow];
-
-
-        [stream_block addElement:[block37 getTree]];  /* element() */
-         /* elements */
-
-        // AST REWRITE
-        // elements: K_FOR, expr, start, next, block
-        // token labels: 
-        // rule labels: retval, start, next
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-         [retval setTree:root_0];
-
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"token retval"
-                                                                    element:retval!=nil?[retval getTree]:nil] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_start =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"token start"
-                                                                    element:start!=nil?[start getTree]:nil] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_next =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"token next"
-                                                                    element:next!=nil?[next getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 67:9: -> ^( K_FOR $start expr $next block )
-        {
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:67:12: ^( K_FOR $start expr $next block )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:[stream_K_FOR nextNode] old:root_1];
-
-                [treeAdaptor addChild:[stream_start nextTree] toTree:root_1];
-                [treeAdaptor addChild:[stream_expr nextTree] toTree:root_1];
-                [treeAdaptor addChild:[stream_next nextTree] toTree:root_1];
-                [treeAdaptor addChild:[stream_block nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-        retval.tree = root_0;
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-        [stream_21 release];
-        [stream_22 release];
-        [stream_24 release];
-        [stream_K_FOR release];
-        [stream_assignStat release];
-        [stream_block release];
-        [stream_expr release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end forStat */
-/*
- * $ANTLR start assignStat
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:70:1: assignStat : K_ID K_EQ expr -> ^( K_EQ K_ID expr ) ;
- */
-- (SimpleCParser_assignStat_return *) assignStat
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_assignStat_return * retval = [SimpleCParser_assignStat_return newSimpleCParser_assignStat_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_ID38 = nil;
-        ANTLRCommonToken *K_EQ39 = nil;
-        SimpleCParser_expr_return * expr40 = nil;
-
-
-        ANTLRCommonTree *K_ID38_tree=nil;
-        ANTLRCommonTree *K_EQ39_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_K_ID = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_ID"] retain];
-        ANTLRRewriteRuleTokenStream *stream_K_EQ = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token K_EQ"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_expr = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule expr"] retain];
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:71:5: ( K_ID K_EQ expr -> ^( K_EQ K_ID expr ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:71:9: K_ID K_EQ expr // alt
-        {
-        K_ID38=(ANTLRCommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_assignStat570];  
-            [stream_K_ID addElement:K_ID38];
-          /* element() */
-        K_EQ39=(ANTLRCommonToken *)[self match:input TokenType:K_EQ Follow:FOLLOW_K_EQ_in_assignStat572];  
-            [stream_K_EQ addElement:K_EQ39];
-          /* element() */
-        [self pushFollow:FOLLOW_expr_in_assignStat574];
-        expr40 = [self expr];
-        [self popFollow];
-
-
-        [stream_expr addElement:[expr40 getTree]];  /* element() */
-         /* elements */
-
-        // AST REWRITE
-        // elements: K_EQ, K_ID, expr
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-         [retval setTree:root_0];
-
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"token retval"
-                                                                    element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 71:24: -> ^( K_EQ K_ID expr )
-        {
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:71:27: ^( K_EQ K_ID expr )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:[stream_K_EQ nextNode] old:root_1];
-
-                 // TODO: args: 
-                [treeAdaptor addChild:[stream_K_ID nextNode] toTree:root_1];
-                [treeAdaptor addChild:[stream_expr nextTree] toTree:root_1];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-        retval.tree = root_0;
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-        [stream_K_ID release];
-        [stream_K_EQ release];
-        [stream_expr release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end assignStat */
-/*
- * $ANTLR start expr
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:74:1: expr : condExpr ;
- */
-- (SimpleCParser_expr_return *) expr
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_expr_return * retval = [SimpleCParser_expr_return newSimpleCParser_expr_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        SimpleCParser_condExpr_return * condExpr41 = nil;
-
-
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:74:5: ( condExpr ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:74:9: condExpr // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        /* ASTParser ruleRef */
-        [self pushFollow:FOLLOW_condExpr_in_expr598];
-        condExpr41 = [self condExpr];
-        [self popFollow];
-
-
-        [treeAdaptor addChild:[condExpr41 getTree] toTree:root_0];  /* element() */
-         /* elements */
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end expr */
-/*
- * $ANTLR start condExpr
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:77:1: condExpr : aexpr ( ( K_EQEQ | K_LT ) aexpr )? ;
- */
-- (SimpleCParser_condExpr_return *) condExpr
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_condExpr_return * retval = [SimpleCParser_condExpr_return newSimpleCParser_condExpr_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_EQEQ43 = nil;
-        ANTLRCommonToken *K_LT44 = nil;
-        SimpleCParser_aexpr_return * aexpr42 = nil;
-
-        SimpleCParser_aexpr_return * aexpr45 = nil;
-
-
-        ANTLRCommonTree *K_EQEQ43_tree=nil;
-        ANTLRCommonTree *K_LT44_tree=nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:78:5: ( aexpr ( ( K_EQEQ | K_LT ) aexpr )? ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:78:9: aexpr ( ( K_EQEQ | K_LT ) aexpr )? // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        /* ASTParser ruleRef */
-        [self pushFollow:FOLLOW_aexpr_in_condExpr617];
-        aexpr42 = [self aexpr];
-        [self popFollow];
-
-
-        [treeAdaptor addChild:[aexpr42 getTree] toTree:root_0];  /* element() */
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:78:15: ( ( K_EQEQ | K_LT ) aexpr )? // block
-        NSInteger alt9=2;
-        NSInteger LA9_0 = [input LA:1];
-
-        if ( ((LA9_0>=K_EQEQ && LA9_0<=K_LT)) ) {
-            alt9=1;
-        }
-        switch (alt9) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:78:17: ( K_EQEQ | K_LT ) aexpr // alt
-                {
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:78:17: ( K_EQEQ | K_LT ) // block
-                NSInteger alt8=2;
-                NSInteger LA8_0 = [input LA:1];
-
-                if ( (LA8_0==K_EQEQ) ) {
-                    alt8=1;
-                }
-                else if ( (LA8_0==K_LT) ) {
-                    alt8=2;
-                }
-                else {
-                    ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:8 state:0 stream:input];
-                    @throw nvae;
-                }
-                switch (alt8) {
-                    case 1 : ;
-                        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:78:18: K_EQEQ // alt
-                        {
-                        K_EQEQ43=(ANTLRCommonToken *)[self match:input TokenType:K_EQEQ Follow:FOLLOW_K_EQEQ_in_condExpr622]; 
-                        K_EQEQ43_tree = /* ASTParser createNodeFromToken */
-                        (ANTLRCommonTree *)[[treeAdaptor createTree:K_EQEQ43] retain];
-                        root_0 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:K_EQEQ43_tree old:root_0];
-                          /* element() */
-                         /* elements */
-                        }
-                        break;
-                    case 2 : ;
-                        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:78:28: K_LT // alt
-                        {
-                        K_LT44=(ANTLRCommonToken *)[self match:input TokenType:K_LT Follow:FOLLOW_K_LT_in_condExpr627]; 
-                        K_LT44_tree = /* ASTParser createNodeFromToken */
-                        (ANTLRCommonTree *)[[treeAdaptor createTree:K_LT44] retain];
-                        root_0 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:K_LT44_tree old:root_0];
-                          /* element() */
-                         /* elements */
-                        }
-                        break;
-
-                }
-                  /* element() */
-                /* ASTParser ruleRef */
-                [self pushFollow:FOLLOW_aexpr_in_condExpr631];
-                aexpr45 = [self aexpr];
-                [self popFollow];
-
-
-                [treeAdaptor addChild:[aexpr45 getTree] toTree:root_0];  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-          /* element() */
-         /* elements */
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end condExpr */
-/*
- * $ANTLR start aexpr
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:81:1: aexpr : atom ( K_PLUS atom )* ;
- */
-- (SimpleCParser_aexpr_return *) aexpr
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_aexpr_return * retval = [SimpleCParser_aexpr_return newSimpleCParser_aexpr_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_PLUS47 = nil;
-        SimpleCParser_atom_return * atom46 = nil;
-
-        SimpleCParser_atom_return * atom48 = nil;
-
-
-        ANTLRCommonTree *K_PLUS47_tree=nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:82:5: ( atom ( K_PLUS atom )* ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:82:9: atom ( K_PLUS atom )* // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        /* ASTParser ruleRef */
-        [self pushFollow:FOLLOW_atom_in_aexpr653];
-        atom46 = [self atom];
-        [self popFollow];
-
-
-        [treeAdaptor addChild:[atom46 getTree] toTree:root_0];  /* element() */
-        do {
-            NSInteger alt10=2;
-            NSInteger LA10_0 = [input LA:1];
-            if ( (LA10_0==K_PLUS) ) {
-                alt10=1;
-            }
-
-
-            switch (alt10) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:82:16: K_PLUS atom // alt
-                    {
-                    K_PLUS47=(ANTLRCommonToken *)[self match:input TokenType:K_PLUS Follow:FOLLOW_K_PLUS_in_aexpr657]; 
-                    K_PLUS47_tree = /* ASTParser createNodeFromToken */
-                    (ANTLRCommonTree *)[[treeAdaptor createTree:K_PLUS47] retain];
-                    root_0 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:K_PLUS47_tree old:root_0];
-                      /* element() */
-                    /* ASTParser ruleRef */
-                    [self pushFollow:FOLLOW_atom_in_aexpr660];
-                    atom48 = [self atom];
-                    [self popFollow];
-
-
-                    [treeAdaptor addChild:[atom48 getTree] toTree:root_0];  /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    goto loop10;
-            }
-        } while (YES);
-        loop10: ;
-          /* element() */
-         /* elements */
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end aexpr */
-/*
- * $ANTLR start atom
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:85:1: atom : ( K_ID | K_INT | '(' expr ')' -> expr );
- */
-- (SimpleCParser_atom_return *) atom
-{
-    /* ruleScopeSetUp */
-
-    /* AST ruleDeclarations */
-    SimpleCParser_atom_return * retval = [SimpleCParser_atom_return newSimpleCParser_atom_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *K_ID49 = nil;
-        ANTLRCommonToken *K_INT50 = nil;
-        ANTLRCommonToken *char_literal51 = nil;
-        ANTLRCommonToken *char_literal53 = nil;
-        SimpleCParser_expr_return * expr52 = nil;
-
-
-        ANTLRCommonTree *K_ID49_tree=nil;
-        ANTLRCommonTree *K_INT50_tree=nil;
-        ANTLRCommonTree *char_literal51_tree=nil;
-        ANTLRCommonTree *char_literal53_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_22 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 22"] retain];
-        ANTLRRewriteRuleTokenStream *stream_24 = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 24"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_expr = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule expr"] retain];
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:86:5: ( K_ID | K_INT | '(' expr ')' -> expr ) //ruleblock
-        NSInteger alt11=3;
-        switch ([input LA:1]) {
-            case K_ID: ;
-                {
-                alt11=1;
-                }
-                break;
-            case K_INT: ;
-                {
-                alt11=2;
-                }
-                break;
-            case 22: ;
-                {
-                alt11=3;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:11 state:0 stream:input];
-            @throw nvae;
-        }
-
-        switch (alt11) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:86:7: K_ID // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                /* ASTParser tokenRef */
-                K_ID49=(ANTLRCommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_atom680]; 
-                K_ID49_tree = /* ASTParser createNodeFromToken */
-                (ANTLRCommonTree *)[[treeAdaptor createTree:K_ID49] retain];
-                [treeAdaptor addChild:K_ID49_tree  toTree:root_0];
-                  /* element() */
-                 /* elements */
-                }
-                break;
-            case 2 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:87:7: K_INT // alt
-                {
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                /* ASTParser tokenRef */
-                K_INT50=(ANTLRCommonToken *)[self match:input TokenType:K_INT Follow:FOLLOW_K_INT_in_atom694]; 
-                K_INT50_tree = /* ASTParser createNodeFromToken */
-                (ANTLRCommonTree *)[[treeAdaptor createTree:K_INT50] retain];
-                [treeAdaptor addChild:K_INT50_tree  toTree:root_0];
-                  /* element() */
-                 /* elements */
-                }
-                break;
-            case 3 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleC.g:88:7: '(' expr ')' // alt
-                {
-                char_literal51=(ANTLRCommonToken *)[self match:input TokenType:22 Follow:FOLLOW_22_in_atom708];  
-                    [stream_22 addElement:char_literal51];
-                  /* element() */
-                [self pushFollow:FOLLOW_expr_in_atom710];
-                expr52 = [self expr];
-                [self popFollow];
-
-
-                [stream_expr addElement:[expr52 getTree]];  /* element() */
-                char_literal53=(ANTLRCommonToken *)[self match:input TokenType:24 Follow:FOLLOW_24_in_atom712];  
-                    [stream_24 addElement:char_literal53];
-                  /* element() */
-                 /* elements */
-
-                // AST REWRITE
-                // elements: expr
-                // token labels: 
-                // rule labels: retval
-                // token list labels: 
-                // rule list labels: 
-                // wildcard labels: 
-                 [retval setTree:root_0];
-
-                retval.tree = root_0;
-
-                ANTLRRewriteRuleSubtreeStream *stream_retval =
-                    [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                        description:@"token retval"
-                                                                            element:retval!=nil?[retval getTree]:nil] retain];
-
-                root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-                // 88:20: -> expr
-                {
-                    [treeAdaptor addChild:[stream_expr nextTree] toTree:root_0];
-
-                }
-
-                retval.tree = root_0;
-
-                }
-                break;
-
-        }
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-        [stream_22 release];
-        [stream_24 release];
-        [stream_expr release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end atom */
-
-@end /* end of SimpleCParser implementation line 669 */
-
-
-/* End of code
- * =============================================================================
- */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCTP.h b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCTP.h
deleted file mode 100644
index fd59407..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCTP.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// $ANTLR 3.2 Aug 23, 2010 07:48:06 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g 2010-08-23 07:55:04
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-#pragma mark Tokens
-#define K_ID 10
-#define T__26 26
-#define T__25 25
-#define T__24 24
-#define T__23 23
-#define K_EQEQ 16
-#define T__22 22
-#define K_INT 11
-#define T__21 21
-#define K_FOR 14
-#define FUNC_HDR 6
-#define FUNC_DEF 8
-#define EOF -1
-#define K_INT_TYPE 19
-#define FUNC_DECL 7
-#define ARG_DEF 5
-#define WS 20
-#define K_EQ 15
-#define BLOCK 9
-#define K_LT 17
-#define K_CHAR 12
-#define K_VOID 13
-#define VAR_DEF 4
-#define K_PLUS 18
-#pragma mark Dynamic Global Scopes
-@interface Symbols_Scope : ANTLRSymbolsScope {  /* globalAttributeScopeDecl */
-ANTLRCommonTree * tree;
-}
-/* start of properties */
-
-@property (retain, getter=gettree, setter=settree:) ANTLRCommonTree * tree;
-
-/* end properties */
-
-+ (Symbols_Scope *)newSymbols_Scope;
-/* start of iterated get and set functions */
-
-- (ANTLRCommonTree *)gettree;
-- (void)settree:(ANTLRCommonTree *)aVal;
-
-/* End of iterated get and set functions */
-
-@end /* end of Symbols_Scope interface */
-
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-@interface SimpleCTP_expr_return :ANTLRTreeRuleReturnScope { // line 1672
- // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (SimpleCTP_expr_return *)newSimpleCTP_expr_return;
-// this is start of set and get methods
-  // methodsDecl
-@end /* end of returnScopeInterface interface */
-
-#pragma mark Rule return scopes end
-@interface SimpleCTP : ANTLRTreeParser { // line 529
-// start of globalAttributeScopeMemVar
-/* globalAttributeScopeMemVar */
-ANTLRSymbolStack *gStack;
-Symbols_Scope *Symbols_scope;
-
-// start of action-actionScope-memVars
-// start of ruleAttributeScopeMemVar
-
-
-// Start of memVars
-
- }
-
-// start of action-actionScope-methodsDecl
-
-
-- (void)program; 
-- (void)declaration; 
-- (void)variable; 
-- (void)declarator; 
-- (void)functionHeader; 
-- (void)formalParameter; 
-- (void)type; 
-- (void)block; 
-- (void)stat; 
-- (void)forStat; 
-- (SimpleCTP_expr_return *)expr; 
-- (void)atom; 
-
-
-@end /* end of SimpleCTP interface */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCTP.m b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCTP.m
deleted file mode 100644
index 1ac0952..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCTP.m
+++ /dev/null
@@ -1,1059 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version 3.2 Aug 23, 2010 07:48:06
- *
- *     -  From the grammar source file : /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g
- *     -                            On : 2010-08-23 07:55:04
- *     -           for the tree parser : SimpleCTPTreeParser *
- * Editing it, at least manually, is not wise. 
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// $ANTLR 3.2 Aug 23, 2010 07:48:06 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g 2010-08-23 07:55:04
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "SimpleCTP.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_declaration_in_program56;
-static const unsigned long long FOLLOW_declaration_in_program56_data[] = { 0x0000000000000192LL};
-static ANTLRBitSet *FOLLOW_variable_in_declaration76;
-static const unsigned long long FOLLOW_variable_in_declaration76_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_FUNC_DECL_in_declaration87;
-static const unsigned long long FOLLOW_FUNC_DECL_in_declaration87_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_functionHeader_in_declaration89;
-static const unsigned long long FOLLOW_functionHeader_in_declaration89_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_FUNC_DEF_in_declaration101;
-static const unsigned long long FOLLOW_FUNC_DEF_in_declaration101_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_functionHeader_in_declaration103;
-static const unsigned long long FOLLOW_functionHeader_in_declaration103_data[] = { 0x0000000000000200LL};
-static ANTLRBitSet *FOLLOW_block_in_declaration105;
-static const unsigned long long FOLLOW_block_in_declaration105_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_VAR_DEF_in_variable126;
-static const unsigned long long FOLLOW_VAR_DEF_in_variable126_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_type_in_variable128;
-static const unsigned long long FOLLOW_type_in_variable128_data[] = { 0x0000000000000400LL};
-static ANTLRBitSet *FOLLOW_declarator_in_variable130;
-static const unsigned long long FOLLOW_declarator_in_variable130_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_declarator150;
-static const unsigned long long FOLLOW_K_ID_in_declarator150_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_FUNC_HDR_in_functionHeader171;
-static const unsigned long long FOLLOW_FUNC_HDR_in_functionHeader171_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_type_in_functionHeader173;
-static const unsigned long long FOLLOW_type_in_functionHeader173_data[] = { 0x0000000000000400LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_functionHeader175;
-static const unsigned long long FOLLOW_K_ID_in_functionHeader175_data[] = { 0x0000000000000020LL};
-static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader177;
-static const unsigned long long FOLLOW_formalParameter_in_functionHeader177_data[] = { 0x0000000000000028LL};
-static ANTLRBitSet *FOLLOW_ARG_DEF_in_formalParameter199;
-static const unsigned long long FOLLOW_ARG_DEF_in_formalParameter199_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_type_in_formalParameter201;
-static const unsigned long long FOLLOW_type_in_formalParameter201_data[] = { 0x0000000000000400LL};
-static ANTLRBitSet *FOLLOW_declarator_in_formalParameter203;
-static const unsigned long long FOLLOW_declarator_in_formalParameter203_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_set_in_type0;
-static const unsigned long long FOLLOW_set_in_type0_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_BLOCK_in_block286;
-static const unsigned long long FOLLOW_BLOCK_in_block286_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_variable_in_block288;
-static const unsigned long long FOLLOW_variable_in_block288_data[] = { 0x000000000007CE18LL};
-static ANTLRBitSet *FOLLOW_stat_in_block291;
-static const unsigned long long FOLLOW_stat_in_block291_data[] = { 0x000000000007CE08LL};
-static ANTLRBitSet *FOLLOW_forStat_in_stat305;
-static const unsigned long long FOLLOW_forStat_in_stat305_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_expr_in_stat313;
-static const unsigned long long FOLLOW_expr_in_stat313_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_block_in_stat321;
-static const unsigned long long FOLLOW_block_in_stat321_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_K_FOR_in_forStat341;
-static const unsigned long long FOLLOW_K_FOR_in_forStat341_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_expr_in_forStat343;
-static const unsigned long long FOLLOW_expr_in_forStat343_data[] = { 0x0000000000078C00LL};
-static ANTLRBitSet *FOLLOW_expr_in_forStat345;
-static const unsigned long long FOLLOW_expr_in_forStat345_data[] = { 0x0000000000078C00LL};
-static ANTLRBitSet *FOLLOW_expr_in_forStat347;
-static const unsigned long long FOLLOW_expr_in_forStat347_data[] = { 0x0000000000000200LL};
-static ANTLRBitSet *FOLLOW_block_in_forStat349;
-static const unsigned long long FOLLOW_block_in_forStat349_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_K_EQEQ_in_expr365;
-static const unsigned long long FOLLOW_K_EQEQ_in_expr365_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr367;
-static const unsigned long long FOLLOW_expr_in_expr367_data[] = { 0x0000000000078C00LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr369;
-static const unsigned long long FOLLOW_expr_in_expr369_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_K_LT_in_expr381;
-static const unsigned long long FOLLOW_K_LT_in_expr381_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr383;
-static const unsigned long long FOLLOW_expr_in_expr383_data[] = { 0x0000000000078C00LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr385;
-static const unsigned long long FOLLOW_expr_in_expr385_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_K_PLUS_in_expr397;
-static const unsigned long long FOLLOW_K_PLUS_in_expr397_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr399;
-static const unsigned long long FOLLOW_expr_in_expr399_data[] = { 0x0000000000078C00LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr401;
-static const unsigned long long FOLLOW_expr_in_expr401_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_K_EQ_in_expr413;
-static const unsigned long long FOLLOW_K_EQ_in_expr413_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_K_ID_in_expr415;
-static const unsigned long long FOLLOW_K_ID_in_expr415_data[] = { 0x0000000000078C00LL};
-static ANTLRBitSet *FOLLOW_expr_in_expr419;
-static const unsigned long long FOLLOW_expr_in_expr419_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_atom_in_expr432;
-static const unsigned long long FOLLOW_atom_in_expr432_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_set_in_atom0;
-static const unsigned long long FOLLOW_set_in_atom0_data[] = { 0x0000000000000002LL};
-
-
-#pragma mark Dynamic Global Scopes
-@implementation Symbols_Scope  /* globalAttributeScopeImpl */
-/* start of synthesize -- OBJC-Line 1750 */
-
-@synthesize tree;
-+ (Symbols_Scope *)newSymbols_Scope
-{
-    return [[[Symbols_Scope alloc] init] retain];
-}
-/* start of iterate get and set functions */
-
-- (ANTLRCommonTree *)gettree { return( tree ); }
-
-- (void)settree:(ANTLRCommonTree *)aVal { tree = aVal; }
-
-
-
-/* End of iterate get and set functions */
-
-@end /* end of Symbols_Scope implementation */
-
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule return scopes start
-@implementation SimpleCTP_expr_return
-+ (SimpleCTP_expr_return *)newSimpleCTP_expr_return
-{
-    return [[[SimpleCTP_expr_return alloc] init] retain];
-}
-
-
-
-
-@end /* end of returnScope implementation */
-
-
-
-@implementation SimpleCTP  // line 610
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    FOLLOW_declaration_in_program56 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declaration_in_program56_data Count:(NSUInteger)1] retain];
-    FOLLOW_variable_in_declaration76 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_declaration76_data Count:(NSUInteger)1] retain];
-    FOLLOW_FUNC_DECL_in_declaration87 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_DECL_in_declaration87_data Count:(NSUInteger)1] retain];
-    FOLLOW_functionHeader_in_declaration89 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration89_data Count:(NSUInteger)1] retain];
-    FOLLOW_FUNC_DEF_in_declaration101 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_DEF_in_declaration101_data Count:(NSUInteger)1] retain];
-    FOLLOW_functionHeader_in_declaration103 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration103_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_declaration105 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_declaration105_data Count:(NSUInteger)1] retain];
-    FOLLOW_VAR_DEF_in_variable126 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_VAR_DEF_in_variable126_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_variable128 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_variable128_data Count:(NSUInteger)1] retain];
-    FOLLOW_declarator_in_variable130 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_variable130_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_declarator150 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_declarator150_data Count:(NSUInteger)1] retain];
-    FOLLOW_FUNC_HDR_in_functionHeader171 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_HDR_in_functionHeader171_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_functionHeader173 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_functionHeader173_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_functionHeader175 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_functionHeader175_data Count:(NSUInteger)1] retain];
-    FOLLOW_formalParameter_in_functionHeader177 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader177_data Count:(NSUInteger)1] retain];
-    FOLLOW_ARG_DEF_in_formalParameter199 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ARG_DEF_in_formalParameter199_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_formalParameter201 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_formalParameter201_data Count:(NSUInteger)1] retain];
-    FOLLOW_declarator_in_formalParameter203 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_formalParameter203_data Count:(NSUInteger)1] retain];
-    FOLLOW_set_in_type0 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_set_in_type0_data Count:(NSUInteger)1] retain];
-    FOLLOW_BLOCK_in_block286 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_BLOCK_in_block286_data Count:(NSUInteger)1] retain];
-    FOLLOW_variable_in_block288 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_block288_data Count:(NSUInteger)1] retain];
-    FOLLOW_stat_in_block291 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block291_data Count:(NSUInteger)1] retain];
-    FOLLOW_forStat_in_stat305 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_forStat_in_stat305_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_stat313 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_stat313_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_stat321 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat321_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_FOR_in_forStat341 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_FOR_in_forStat341_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_forStat343 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat343_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_forStat345 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat345_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_forStat347 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat347_data Count:(NSUInteger)1] retain];
-    FOLLOW_block_in_forStat349 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_forStat349_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_EQEQ_in_expr365 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQEQ_in_expr365_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr367 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr367_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr369 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr369_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_LT_in_expr381 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_LT_in_expr381_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr383 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr383_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr385 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr385_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_PLUS_in_expr397 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_PLUS_in_expr397_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr399 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr399_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr401 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr401_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_EQ_in_expr413 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQ_in_expr413_data Count:(NSUInteger)1] retain];
-    FOLLOW_K_ID_in_expr415 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_expr415_data Count:(NSUInteger)1] retain];
-    FOLLOW_expr_in_expr419 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr419_data Count:(NSUInteger)1] retain];
-    FOLLOW_atom_in_expr432 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_expr432_data Count:(NSUInteger)1] retain];
-    FOLLOW_set_in_atom0 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_set_in_atom0_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[[NSArray alloc] initWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"VAR_DEF", @"ARG_DEF", @"FUNC_HDR", @"FUNC_DECL", @"FUNC_DEF", @"BLOCK", 
- @"K_ID", @"K_INT", @"K_CHAR", @"K_VOID", @"K_FOR", @"K_EQ", @"K_EQEQ", 
- @"K_LT", @"K_PLUS", @"K_INT_TYPE", @"WS", @"';'", @"'('", @"','", @"')'", 
- @"'{'", @"'}'", nil] retain]];
-}
-
-+ (SimpleCTP *)newSimpleCTP:(id<ANTLRTreeNodeStream>)aStream
-{
-
-    return [[SimpleCTP alloc] initWithStream:aStream];
-
-}
-
-
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)aStream
-{
-    if ((self = [super initWithStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:12+1] retain]]) != nil) {
-
-
-
-        /* start of actions-actionScope-init */
-        /* start of init */
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [Symbols_scope release];
-    [super dealloc];
-}
-// start actions.actionScope.methods
-// start methods()
-// start rules
-/*
- * $ANTLR start program
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:13:1: program : ( declaration )+ ;
- */
-- (void) program
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:14:5: ( ( declaration )+ ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:14:9: ( declaration )+ // alt
-        {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:14:9: ( declaration )+ // positiveClosureBlock
-        NSInteger cnt1=0;
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( (LA1_0==VAR_DEF||(LA1_0>=FUNC_DECL && LA1_0<=FUNC_DEF)) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:14:9: declaration // alt
-                    {
-                    [self pushFollow:FOLLOW_declaration_in_program56];
-                    [self declaration];
-                    [self popFollow];
-
-                      /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt1 >= 1 )
-                        goto loop1;
-                    ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:1];
-                    @throw eee;
-            }
-            cnt1++;
-        } while (YES);
-        loop1: ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end program */
-/*
- * $ANTLR start declaration
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:17:1: declaration : ( variable | ^( FUNC_DECL functionHeader ) | ^( FUNC_DEF functionHeader block ) );
- */
-- (void) declaration
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:18:5: ( variable | ^( FUNC_DECL functionHeader ) | ^( FUNC_DEF functionHeader block ) ) //ruleblock
-        NSInteger alt2=3;
-        switch ([input LA:1]) {
-            case VAR_DEF: ;
-                {
-                alt2=1;
-                }
-                break;
-            case FUNC_DECL: ;
-                {
-                alt2=2;
-                }
-                break;
-            case FUNC_DEF: ;
-                {
-                alt2=3;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:2 state:0 stream:input];
-            @throw nvae;
-        }
-
-        switch (alt2) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:18:9: variable // alt
-                {
-                [self pushFollow:FOLLOW_variable_in_declaration76];
-                [self variable];
-                [self popFollow];
-
-                  /* element() */
-                 /* elements */
-                }
-                break;
-            case 2 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:19:9: ^( FUNC_DECL functionHeader ) // alt
-                {
-                [self match:input TokenType:FUNC_DECL Follow:FOLLOW_FUNC_DECL_in_declaration87];   /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; 
-                    [self pushFollow:FOLLOW_functionHeader_in_declaration89];
-                    [self functionHeader];
-                    [self popFollow];
-
-                      /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeUP Follow:nil];   /* element() */
-                 /* elements */
-                }
-                break;
-            case 3 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:20:9: ^( FUNC_DEF functionHeader block ) // alt
-                {
-                [self match:input TokenType:FUNC_DEF Follow:FOLLOW_FUNC_DEF_in_declaration101];   /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; 
-                    [self pushFollow:FOLLOW_functionHeader_in_declaration103];
-                    [self functionHeader];
-                    [self popFollow];
-
-                      /* element() */
-                    [self pushFollow:FOLLOW_block_in_declaration105];
-                    [self block];
-                    [self popFollow];
-
-                      /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeUP Follow:nil];   /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end declaration */
-/*
- * $ANTLR start variable
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:23:1: variable : ^( VAR_DEF type declarator ) ;
- */
-- (void) variable
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:24:5: ( ^( VAR_DEF type declarator ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:24:9: ^( VAR_DEF type declarator ) // alt
-        {
-        [self match:input TokenType:VAR_DEF Follow:FOLLOW_VAR_DEF_in_variable126];   /* element() */
-
-            [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; 
-            [self pushFollow:FOLLOW_type_in_variable128];
-            [self type];
-            [self popFollow];
-
-              /* element() */
-            [self pushFollow:FOLLOW_declarator_in_variable130];
-            [self declarator];
-            [self popFollow];
-
-              /* element() */
-
-            [self match:input TokenType:ANTLRTokenTypeUP Follow:nil];   /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end variable */
-/*
- * $ANTLR start declarator
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:27:1: declarator : K_ID ;
- */
-- (void) declarator
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:28:5: ( K_ID ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:28:9: K_ID // alt
-        {
-        [self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_declarator150];   /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end declarator */
-/*
- * $ANTLR start functionHeader
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:31:1: functionHeader : ^( FUNC_HDR type K_ID ( formalParameter )+ ) ;
- */
-- (void) functionHeader
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:32:5: ( ^( FUNC_HDR type K_ID ( formalParameter )+ ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:32:9: ^( FUNC_HDR type K_ID ( formalParameter )+ ) // alt
-        {
-        [self match:input TokenType:FUNC_HDR Follow:FOLLOW_FUNC_HDR_in_functionHeader171];   /* element() */
-
-            [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; 
-            [self pushFollow:FOLLOW_type_in_functionHeader173];
-            [self type];
-            [self popFollow];
-
-              /* element() */
-            [self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_functionHeader175];   /* element() */
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:32:30: ( formalParameter )+ // positiveClosureBlock
-            NSInteger cnt3=0;
-            do {
-                NSInteger alt3=2;
-                NSInteger LA3_0 = [input LA:1];
-                if ( (LA3_0==ARG_DEF) ) {
-                    alt3=1;
-                }
-
-
-                switch (alt3) {
-                    case 1 : ;
-                        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:32:30: formalParameter // alt
-                        {
-                        [self pushFollow:FOLLOW_formalParameter_in_functionHeader177];
-                        [self formalParameter];
-                        [self popFollow];
-
-                          /* element() */
-                         /* elements */
-                        }
-                        break;
-
-                    default :
-                        if ( cnt3 >= 1 )
-                            goto loop3;
-                        ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:3];
-                        @throw eee;
-                }
-                cnt3++;
-            } while (YES);
-            loop3: ;
-              /* element() */
-
-            [self match:input TokenType:ANTLRTokenTypeUP Follow:nil];   /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end functionHeader */
-/*
- * $ANTLR start formalParameter
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:35:1: formalParameter : ^( ARG_DEF type declarator ) ;
- */
-- (void) formalParameter
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:36:5: ( ^( ARG_DEF type declarator ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:36:9: ^( ARG_DEF type declarator ) // alt
-        {
-        [self match:input TokenType:ARG_DEF Follow:FOLLOW_ARG_DEF_in_formalParameter199];   /* element() */
-
-            [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; 
-            [self pushFollow:FOLLOW_type_in_formalParameter201];
-            [self type];
-            [self popFollow];
-
-              /* element() */
-            [self pushFollow:FOLLOW_declarator_in_formalParameter203];
-            [self declarator];
-            [self popFollow];
-
-              /* element() */
-
-            [self match:input TokenType:ANTLRTokenTypeUP Follow:nil];   /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end formalParameter */
-/*
- * $ANTLR start type
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:39:1: type : ( K_INT | K_CHAR | K_VOID | K_ID );
- */
-- (void) type
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:40:5: ( K_INT | K_CHAR | K_VOID | K_ID ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g: // alt
-        {
-        if ((([input LA:1] >= K_ID) && ([input LA:1] <= K_VOID))) {
-            [input consume];
-            [state setIsErrorRecovery:NO];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-            @throw mse;
-        }
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end type */
-/*
- * $ANTLR start block
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:46:1: block : ^( BLOCK ( variable )* ( stat )* ) ;
- */
-- (void) block
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:47:5: ( ^( BLOCK ( variable )* ( stat )* ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:47:9: ^( BLOCK ( variable )* ( stat )* ) // alt
-        {
-        [self match:input TokenType:BLOCK Follow:FOLLOW_BLOCK_in_block286];   /* element() */
-
-        if ( [input LA:1] == ANTLRTokenTypeDOWN ) {
-            [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; 
-            do {
-                NSInteger alt4=2;
-                NSInteger LA4_0 = [input LA:1];
-                if ( (LA4_0==VAR_DEF) ) {
-                    alt4=1;
-                }
-
-
-                switch (alt4) {
-                    case 1 : ;
-                        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:47:17: variable // alt
-                        {
-                        [self pushFollow:FOLLOW_variable_in_block288];
-                        [self variable];
-                        [self popFollow];
-
-                          /* element() */
-                         /* elements */
-                        }
-                        break;
-
-                    default :
-                        goto loop4;
-                }
-            } while (YES);
-            loop4: ;
-              /* element() */
-            do {
-                NSInteger alt5=2;
-                NSInteger LA5_0 = [input LA:1];
-                if ( ((LA5_0>=BLOCK && LA5_0<=K_INT)||(LA5_0>=K_FOR && LA5_0<=K_PLUS)) ) {
-                    alt5=1;
-                }
-
-
-                switch (alt5) {
-                    case 1 : ;
-                        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:47:27: stat // alt
-                        {
-                        [self pushFollow:FOLLOW_stat_in_block291];
-                        [self stat];
-                        [self popFollow];
-
-                          /* element() */
-                         /* elements */
-                        }
-                        break;
-
-                    default :
-                        goto loop5;
-                }
-            } while (YES);
-            loop5: ;
-              /* element() */
-
-            [self match:input TokenType:ANTLRTokenTypeUP Follow:nil]; 
-        }  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end block */
-/*
- * $ANTLR start stat
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:50:1: stat : ( forStat | expr | block );
- */
-- (void) stat
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:50:5: ( forStat | expr | block ) //ruleblock
-        NSInteger alt6=3;
-        switch ([input LA:1]) {
-            case K_FOR: ;
-                {
-                alt6=1;
-                }
-                break;
-            case K_ID: ;
-            case K_INT: ;
-            case K_EQ: ;
-            case K_EQEQ: ;
-            case K_LT: ;
-            case K_PLUS: ;
-                {
-                alt6=2;
-                }
-                break;
-            case BLOCK: ;
-                {
-                alt6=3;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:6 state:0 stream:input];
-            @throw nvae;
-        }
-
-        switch (alt6) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:50:7: forStat // alt
-                {
-                [self pushFollow:FOLLOW_forStat_in_stat305];
-                [self forStat];
-                [self popFollow];
-
-                  /* element() */
-                 /* elements */
-                }
-                break;
-            case 2 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:51:7: expr // alt
-                {
-                [self pushFollow:FOLLOW_expr_in_stat313];
-                [self expr];
-                [self popFollow];
-
-                  /* element() */
-                 /* elements */
-                }
-                break;
-            case 3 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:52:7: block // alt
-                {
-                [self pushFollow:FOLLOW_block_in_stat321];
-                [self block];
-                [self popFollow];
-
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end stat */
-/*
- * $ANTLR start forStat
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:55:1: forStat : ^( K_FOR expr expr expr block ) ;
- */
-- (void) forStat
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:56:5: ( ^( K_FOR expr expr expr block ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:56:9: ^( K_FOR expr expr expr block ) // alt
-        {
-        [self match:input TokenType:K_FOR Follow:FOLLOW_K_FOR_in_forStat341];   /* element() */
-
-            [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; 
-            [self pushFollow:FOLLOW_expr_in_forStat343];
-            [self expr];
-            [self popFollow];
-
-              /* element() */
-            [self pushFollow:FOLLOW_expr_in_forStat345];
-            [self expr];
-            [self popFollow];
-
-              /* element() */
-            [self pushFollow:FOLLOW_expr_in_forStat347];
-            [self expr];
-            [self popFollow];
-
-              /* element() */
-            [self pushFollow:FOLLOW_block_in_forStat349];
-            [self block];
-            [self popFollow];
-
-              /* element() */
-
-            [self match:input TokenType:ANTLRTokenTypeUP Follow:nil];   /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end forStat */
-/*
- * $ANTLR start expr
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:59:1: expr : ( ^( K_EQEQ expr expr ) | ^( K_LT expr expr ) | ^( K_PLUS expr expr ) | ^( K_EQ K_ID e= expr ) | atom );
- */
-- (SimpleCTP_expr_return *) expr
-{
-    /* ruleScopeSetUp */
-
-    SimpleCTP_expr_return * retval = [SimpleCTP_expr_return newSimpleCTP_expr_return];
-    [retval setStart:[input LT:1]];
-
-    @try {
-        ANTLRCommonTree *K_ID1 = nil;
-        SimpleCTP_expr_return * e = nil;
-
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:59:5: ( ^( K_EQEQ expr expr ) | ^( K_LT expr expr ) | ^( K_PLUS expr expr ) | ^( K_EQ K_ID e= expr ) | atom ) //ruleblock
-        NSInteger alt7=5;
-        switch ([input LA:1]) {
-            case K_EQEQ: ;
-                {
-                alt7=1;
-                }
-                break;
-            case K_LT: ;
-                {
-                alt7=2;
-                }
-                break;
-            case K_PLUS: ;
-                {
-                alt7=3;
-                }
-                break;
-            case K_EQ: ;
-                {
-                alt7=4;
-                }
-                break;
-            case K_ID: ;
-            case K_INT: ;
-                {
-                alt7=5;
-                }
-                break;
-
-        default: ;
-            ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:7 state:0 stream:input];
-            @throw nvae;
-        }
-
-        switch (alt7) {
-            case 1 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:59:9: ^( K_EQEQ expr expr ) // alt
-                {
-                [self match:input TokenType:K_EQEQ Follow:FOLLOW_K_EQEQ_in_expr365];   /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; 
-                    [self pushFollow:FOLLOW_expr_in_expr367];
-                    [self expr];
-                    [self popFollow];
-
-                      /* element() */
-                    [self pushFollow:FOLLOW_expr_in_expr369];
-                    [self expr];
-                    [self popFollow];
-
-                      /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeUP Follow:nil];   /* element() */
-                 /* elements */
-                }
-                break;
-            case 2 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:60:9: ^( K_LT expr expr ) // alt
-                {
-                [self match:input TokenType:K_LT Follow:FOLLOW_K_LT_in_expr381];   /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; 
-                    [self pushFollow:FOLLOW_expr_in_expr383];
-                    [self expr];
-                    [self popFollow];
-
-                      /* element() */
-                    [self pushFollow:FOLLOW_expr_in_expr385];
-                    [self expr];
-                    [self popFollow];
-
-                      /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeUP Follow:nil];   /* element() */
-                 /* elements */
-                }
-                break;
-            case 3 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:61:9: ^( K_PLUS expr expr ) // alt
-                {
-                [self match:input TokenType:K_PLUS Follow:FOLLOW_K_PLUS_in_expr397];   /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; 
-                    [self pushFollow:FOLLOW_expr_in_expr399];
-                    [self expr];
-                    [self popFollow];
-
-                      /* element() */
-                    [self pushFollow:FOLLOW_expr_in_expr401];
-                    [self expr];
-                    [self popFollow];
-
-                      /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeUP Follow:nil];   /* element() */
-                 /* elements */
-                }
-                break;
-            case 4 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:62:9: ^( K_EQ K_ID e= expr ) // alt
-                {
-                [self match:input TokenType:K_EQ Follow:FOLLOW_K_EQ_in_expr413];   /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; 
-                    K_ID1=(ANTLRCommonTree *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_expr415];   /* element() */
-                    [self pushFollow:FOLLOW_expr_in_expr419];
-                    e = [self expr];
-                    [self popFollow];
-
-                      /* element() */
-
-                    [self match:input TokenType:ANTLRTokenTypeUP Follow:nil];   /* element() */
-                 NSLog(@"assigning %@ to variable %@", (e!=nil?[[input getTokenStream] toStringFromStart:[[input getTreeAdaptor] getTokenStartIndex:[e getStart]]
-                         ToEnd:[[input getTreeAdaptor] getTokenStopIndex:[e getStart]]]:nil), (K_ID1!=nil?[K_ID1 getText]:0));   /* element() */
-                 /* elements */
-                }
-                break;
-            case 5 : ;
-                // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:63:9: atom // alt
-                {
-                [self pushFollow:FOLLOW_atom_in_expr432];
-                [self atom];
-                [self popFollow];
-
-                  /* element() */
-                 /* elements */
-                }
-                break;
-
-        }
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end expr */
-/*
- * $ANTLR start atom
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:66:1: atom : ( K_ID | K_INT );
- */
-- (void) atom
-{
-    /* ruleScopeSetUp */
-
-    @try {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g:67:5: ( K_ID | K_INT ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/simplecTreeParser/SimpleCTP.g: // alt
-        {
-        if ((([input LA:1] >= K_ID) && ([input LA:1] <= K_INT))) {
-            [input consume];
-            [state setIsErrorRecovery:NO];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException exceptionWithSet:nil stream:input];
-            @throw mse;
-        }
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }    @finally {
-    }
-    return ;
-}
-/* $ANTLR end atom */
-
-@end /* end of SimpleCTP implementation line 669 */
-
-
-/* End of code
- * =============================================================================
- */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCTP.tokens b/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCTP.tokens
deleted file mode 100644
index 6d06db9..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCTP.tokens
+++ /dev/null
@@ -1,29 +0,0 @@
-K_ID=10
-T__26=26
-T__25=25
-T__24=24
-T__23=23
-K_EQEQ=16
-T__22=22
-K_INT=11
-T__21=21
-K_FOR=14
-FUNC_HDR=6
-FUNC_DEF=8
-K_INT_TYPE=19
-FUNC_DECL=7
-ARG_DEF=5
-WS=20
-K_EQ=15
-BLOCK=9
-K_LT=17
-K_CHAR=12
-K_VOID=13
-VAR_DEF=4
-K_PLUS=18
-';'=21
-'}'=26
-'('=22
-','=23
-')'=24
-'{'=25
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/Lang.g b/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/Lang.g
deleted file mode 100755
index 90a2512..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/Lang.g
+++ /dev/null
@@ -1,22 +0,0 @@
-grammar Lang;
-options {
-	output=AST;
-	language = ObjC;
-	ASTLabelType=ANTLRCommonTree;
-}
-
-tokens {DECL;} // an imaginary node
-
-start : decl ;
-
-decl : type ID ';' -> ^(DECL type ID)
-     ;
-type : INTTYPE  // automatic tree construction builds a node for this rule
-     | FLOATTYPE
-     ;
-
-INTTYPE : 'int' ;
-FLOATTYPE : 'float' ;
-ID : 'a'..'z'+ ;
-INT : '0'..'9'+ ;
-WS : (' '|'\n') {$channel=HIDDEN;} ;
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g b/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g
deleted file mode 100755
index 07d3e39..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g
+++ /dev/null
@@ -1,17 +0,0 @@
-tree grammar LangDumpDecl;
-options {
-    tokenVocab=Lang;
-	language = ObjC;
-    ASTLabelType = ANTLRCommonTree;
-}
-
-decl : ^(DECL type declarator)
-       // label.start, label.start, label.text
-       { NSLog(@"int \%@", $declarator.text);}
-     ;
-
-type : INTTYPE ;
-
-declarator
-     : ID
-     ;
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.h b/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.h
deleted file mode 100644
index daaa14d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} LangDumpDecl.g 2011-05-06 17:39:09
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* treeParserHeaderFile */
-#ifndef ANTLR3TokenTypeAlreadyDefined
-#define ANTLR3TokenTypeAlreadyDefined
-typedef enum {
-    ANTLR_EOF = -1,
-    INVALID,
-    EOR,
-    DOWN,
-    UP,
-    MIN
-} ANTLR3TokenType;
-#endif
-
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define T__10 10
-#define DECL 4
-#define FLOATTYPE 5
-#define ID 6
-#define INT 7
-#define INTTYPE 8
-#define WS 9
-#pragma mark Dynamic Global Scopes
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-/* returnScopeInterface LangDumpDecl_declarator_return */
-@interface LangDumpDecl_declarator_return :ANTLRTreeRuleReturnScope { /* returnScopeInterface line 1838 */
- /* ObjC start of memVars() */
-}
-/* start properties */
-+ (LangDumpDecl_declarator_return *)newLangDumpDecl_declarator_return;
-/* this is start of set and get methods */
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-
-/* Interface grammar class */
-@interface LangDumpDecl : ANTLRTreeParser { /* line 572 */
-/* ObjC start of ruleAttributeScopeMemVar */
-
-
-/* ObjC end of ruleAttributeScopeMemVar */
-/* ObjC start of globalAttributeScopeMemVar */
-
-
-/* ObjC end of globalAttributeScopeMemVar */
-/* ObjC start of actions.(actionScope).memVars */
-/* ObjC end of actions.(actionScope).memVars */
-/* ObjC start of memVars */
-/* ObjC end of memVars */
-
- }
-
-/* ObjC start of actions.(actionScope).properties */
-/* ObjC end of actions.(actionScope).properties */
-/* ObjC start of properties */
-/* ObjC end of properties */
-
-+ (void) initialize;
-+ (id) newLangDumpDecl:(id<ANTLRTreeNodeStream>)aStream;
-/* ObjC start of actions.(actionScope).methodsDecl */
-/* ObjC end of actions.(actionScope).methodsDecl */
-
-/* ObjC start of methodsDecl */
-/* ObjC end of methodsDecl */
-
-- (void)decl; 
-- (void)type; 
-- (LangDumpDecl_declarator_return *)declarator; 
-
-
-@end /* end of LangDumpDecl interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.m b/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.m
deleted file mode 100644
index 72bd695..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.m
+++ /dev/null
@@ -1,258 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : LangDumpDecl.g
- *     -                            On : 2011-05-06 17:39:09
- *     -           for the tree parser : LangDumpDeclTreeParser
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} LangDumpDecl.g 2011-05-06 17:39:09
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "LangDumpDecl.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_DECL_in_decl45;
-static const unsigned long long FOLLOW_DECL_in_decl45_data[] = { 0x0000000000000004LL};
-static ANTLRBitSet *FOLLOW_type_in_decl47;
-static const unsigned long long FOLLOW_type_in_decl47_data[] = { 0x0000000000000040LL};
-static ANTLRBitSet *FOLLOW_declarator_in_decl49;
-static const unsigned long long FOLLOW_declarator_in_decl49_data[] = { 0x0000000000000008LL};
-static ANTLRBitSet *FOLLOW_INTTYPE_in_type81;
-static const unsigned long long FOLLOW_INTTYPE_in_type81_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_ID_in_declarator95;
-static const unsigned long long FOLLOW_ID_in_declarator95_data[] = { 0x0000000000000002LL};
-
-
-#pragma mark Dynamic Global Scopes
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule Return Scopes start
-@implementation LangDumpDecl_declarator_return /* returnScope */
- /* start of synthesize -- OBJC-Line 1837 */
-+ (LangDumpDecl_declarator_return *)newLangDumpDecl_declarator_return
-{
-    return [[[LangDumpDecl_declarator_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-//#pragma mark Rule return scopes start
-//
-
-#pragma mark Rule return scopes start
-
-@implementation LangDumpDecl  // line 637
-
-/* ObjC start of ruleAttributeScope */
-#pragma mark Dynamic Rule Scopes
-/* ObjC end of ruleAttributeScope */
-#pragma mark global Attribute Scopes
-/* ObjC start globalAttributeScope */
-/* ObjC end globalAttributeScope */
-/* ObjC start actions.(actionScope).synthesize */
-/* ObjC end actions.(actionScope).synthesize */
-/* ObjC start synthesize() */
-/* ObjC end synthesize() */
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    FOLLOW_DECL_in_decl45 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_DECL_in_decl45_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_decl47 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_decl47_data Count:(NSUInteger)1] retain];
-    FOLLOW_declarator_in_decl49 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_decl49_data Count:(NSUInteger)1] retain];
-    FOLLOW_INTTYPE_in_type81 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_INTTYPE_in_type81_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_declarator95 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_declarator95_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"DECL", @"FLOATTYPE", @"ID", @"INT", @"INTTYPE", @"WS", @"';'", nil] retain]];
-    [ANTLRBaseRecognizer setGrammarFileName:@"LangDumpDecl.g"];
-}
-
-+ (LangDumpDecl *)newLangDumpDecl:(id<ANTLRTreeNodeStream>)aStream
-{
-    return [[LangDumpDecl alloc] initWithStream:aStream];
-
-
-}
-
-- (id) initWithStream:(id<ANTLRTreeNodeStream>)aStream
-{
-    self = [super initWithStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:3+1] retain]];
-    if ( self != nil ) {
-
-
-        /* start of actions-actionScope-init */
-        /* start of init */
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-/* ObjC start members */
-/* ObjC end members */
-/* ObjC start actions.(actionScope).methods */
-/* ObjC end actions.(actionScope).methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-/* ObjC start rules */
-/*
- * $ANTLR start decl
- * LangDumpDecl.g:8:1: decl : ^( DECL type declarator ) ;
- */
-- (void) decl
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-         LangDumpDecl_declarator_return * declarator1 = nil ;
-         
-
-        // LangDumpDecl.g:8:6: ( ^( DECL type declarator ) ) // ruleBlockSingleAlt
-        // LangDumpDecl.g:8:8: ^( DECL type declarator ) // alt
-        {
-        [self match:input TokenType:DECL Follow:FOLLOW_DECL_in_decl45]; 
-
-            [self match:input TokenType:DOWN Follow:nil]; 
-            /* ruleRef */
-            [self pushFollow:FOLLOW_type_in_decl47];
-            [self type];
-
-            [self popFollow];
-
-
-
-            /* ruleRef */
-            [self pushFollow:FOLLOW_declarator_in_decl49];
-            declarator1 = [self declarator];
-
-            [self popFollow];
-
-
-
-            [self match:input TokenType:UP Follow:nil]; 
-
-
-         NSLog(@"int %@", (declarator1!=nil?[[input getTokenStream] toStringFromStart:[[input getTreeAdaptor] getTokenStartIndex:[declarator1 getStart]]ToEnd:[[input getTreeAdaptor] getTokenStopIndex:[declarator1 getStart]]]:0));
-
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end decl */
-
-/*
- * $ANTLR start type
- * LangDumpDecl.g:13:1: type : INTTYPE ;
- */
-- (void) type
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        // LangDumpDecl.g:13:6: ( INTTYPE ) // ruleBlockSingleAlt
-        // LangDumpDecl.g:13:8: INTTYPE // alt
-        {
-        [self match:input TokenType:INTTYPE Follow:FOLLOW_INTTYPE_in_type81]; 
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return ;
-}
-/* $ANTLR end type */
-
-/*
- * $ANTLR start declarator
- * LangDumpDecl.g:15:1: declarator : ID ;
- */
-- (LangDumpDecl_declarator_return *) declarator
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    LangDumpDecl_declarator_return * retval = [LangDumpDecl_declarator_return newLangDumpDecl_declarator_return];
-    [retval setStart:[input LT:1]];
-
-
-    @try {
-        // LangDumpDecl.g:16:6: ( ID ) // ruleBlockSingleAlt
-        // LangDumpDecl.g:16:8: ID // alt
-        {
-        [self match:input TokenType:ID Follow:FOLLOW_ID_in_declarator95]; 
-
-        }
-
-        // token+rule list labels
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end declarator */
-/* ObjC end rules */
-
-@end /* end of LangDumpDecl implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangLexer.h
deleted file mode 100644
index d647d13..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangLexer.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} Lang.g 2011-05-06 17:38:52
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define T__10 10
-#define DECL 4
-#define FLOATTYPE 5
-#define ID 6
-#define INT 7
-#define INTTYPE 8
-#define WS 9
-/* interface lexer class */
-@interface LangLexer : ANTLRLexer { // line 283
-/* ObjC start of actions.lexer.memVars */
-/* ObjC end of actions.lexer.memVars */
-}
-+ (void) initialize;
-+ (LangLexer *)newLangLexerWithCharStream:(id<ANTLRCharStream>)anInput;
-/* ObjC start actions.lexer.methodsDecl */
-/* ObjC end actions.lexer.methodsDecl */
-- (void) mT__10 ; 
-- (void) mINTTYPE ; 
-- (void) mFLOATTYPE ; 
-- (void) mID ; 
-- (void) mINT ; 
-- (void) mWS ; 
-- (void) mTokens ; 
-
-@end /* end of LangLexer interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangLexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangLexer.m
deleted file mode 100644
index 12cf90c..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangLexer.m
+++ /dev/null
@@ -1,532 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : Lang.g
- *     -                            On : 2011-05-06 17:38:52
- *     -                 for the lexer : LangLexerLexer
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} Lang.g 2011-05-06 17:38:52
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "LangLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-/** As per Terence: No returns for lexer rules! */
-@implementation LangLexer // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"Lang.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (LangLexer *)newLangLexerWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    return [[LangLexer alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    self = [super initWithCharStream:anInput State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:7+1] retain]];
-    if ( self != nil ) {
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-/* ObjC Start of actions.lexer.methods */
-/* ObjC end of actions.lexer.methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-
-/* Start of Rules */
-// $ANTLR start "T__10"
-- (void) mT__10
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = T__10;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Lang.g:7:7: ( ';' ) // ruleBlockSingleAlt
-        // Lang.g:7:9: ';' // alt
-        {
-        [self matchChar:';']; 
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "T__10" */
-
-// $ANTLR start "INTTYPE"
-- (void) mINTTYPE
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = INTTYPE;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Lang.g:18:9: ( 'int' ) // ruleBlockSingleAlt
-        // Lang.g:18:11: 'int' // alt
-        {
-        [self matchString:@"int"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "INTTYPE" */
-
-// $ANTLR start "FLOATTYPE"
-- (void) mFLOATTYPE
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = FLOATTYPE;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Lang.g:19:11: ( 'float' ) // ruleBlockSingleAlt
-        // Lang.g:19:13: 'float' // alt
-        {
-        [self matchString:@"float"]; 
-
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "FLOATTYPE" */
-
-// $ANTLR start "ID"
-- (void) mID
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = ID;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Lang.g:20:4: ( ( 'a' .. 'z' )+ ) // ruleBlockSingleAlt
-        // Lang.g:20:6: ( 'a' .. 'z' )+ // alt
-        {
-        // Lang.g:20:6: ( 'a' .. 'z' )+ // positiveClosureBlock
-        NSInteger cnt1 = 0;
-        do {
-            NSInteger alt1 = 2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // Lang.g: // alt
-                    {
-                    if ((([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt1 >= 1 )
-                        goto loop1;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:1];
-                    @throw eee;
-            }
-            cnt1++;
-        } while (YES);
-        loop1: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "ID" */
-
-// $ANTLR start "INT"
-- (void) mINT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = INT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Lang.g:21:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
-        // Lang.g:21:7: ( '0' .. '9' )+ // alt
-        {
-        // Lang.g:21:7: ( '0' .. '9' )+ // positiveClosureBlock
-        NSInteger cnt2 = 0;
-        do {
-            NSInteger alt2 = 2;
-            NSInteger LA2_0 = [input LA:1];
-            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
-                alt2=1;
-            }
-
-
-            switch (alt2) {
-                case 1 : ;
-                    // Lang.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt2 >= 1 )
-                        goto loop2;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:2];
-                    @throw eee;
-            }
-            cnt2++;
-        } while (YES);
-        loop2: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "INT" */
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // Lang.g:22:4: ( ( ' ' | '\\n' ) ) // ruleBlockSingleAlt
-        // Lang.g:22:6: ( ' ' | '\\n' ) // alt
-        {
-        if ([input LA:1] == '\n'||[input LA:1] == ' ') {
-            [input consume];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            [self recover:mse];
-            @throw mse;
-        }
-
-
-        _channel=HIDDEN;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "WS" */
-
-- (void) mTokens
-{
-    // Lang.g:1:8: ( T__10 | INTTYPE | FLOATTYPE | ID | INT | WS ) //ruleblock
-    NSInteger alt3=6;
-    unichar charLA3 = [input LA:1];
-    switch (charLA3) {
-        case ';': ;
-            {
-            alt3=1;
-            }
-            break;
-        case 'i': ;
-            {
-            NSInteger LA3_2 = [input LA:2];
-
-            if ( (LA3_2=='n') ) {
-                NSInteger LA3_7 = [input LA:3];
-
-                if ( (LA3_7=='t') ) {
-                    NSInteger LA3_9 = [input LA:4];
-
-                    if ( ((LA3_9 >= 'a' && LA3_9 <= 'z')) ) {
-                        alt3=4;
-                    }
-                    else {
-                        alt3 = 2;
-                    }
-                }
-                else {
-                    alt3 = 4;
-                }
-            }
-            else {
-                alt3 = 4;
-            }
-            }
-            break;
-        case 'f': ;
-            {
-            NSInteger LA3_3 = [input LA:2];
-
-            if ( (LA3_3=='l') ) {
-                NSInteger LA3_8 = [input LA:3];
-
-                if ( (LA3_8=='o') ) {
-                    NSInteger LA3_10 = [input LA:4];
-
-                    if ( (LA3_10=='a') ) {
-                        NSInteger LA3_12 = [input LA:5];
-
-                        if ( (LA3_12=='t') ) {
-                            NSInteger LA3_13 = [input LA:6];
-
-                            if ( ((LA3_13 >= 'a' && LA3_13 <= 'z')) ) {
-                                alt3=4;
-                            }
-                            else {
-                                alt3 = 3;
-                            }
-                        }
-                        else {
-                            alt3 = 4;
-                        }
-                    }
-                    else {
-                        alt3 = 4;
-                    }
-                }
-                else {
-                    alt3 = 4;
-                }
-            }
-            else {
-                alt3 = 4;
-            }
-            }
-            break;
-        case 'a': ;
-        case 'b': ;
-        case 'c': ;
-        case 'd': ;
-        case 'e': ;
-        case 'g': ;
-        case 'h': ;
-        case 'j': ;
-        case 'k': ;
-        case 'l': ;
-        case 'm': ;
-        case 'n': ;
-        case 'o': ;
-        case 'p': ;
-        case 'q': ;
-        case 'r': ;
-        case 's': ;
-        case 't': ;
-        case 'u': ;
-        case 'v': ;
-        case 'w': ;
-        case 'x': ;
-        case 'y': ;
-        case 'z': ;
-            {
-            alt3=4;
-            }
-            break;
-        case '0': ;
-        case '1': ;
-        case '2': ;
-        case '3': ;
-        case '4': ;
-        case '5': ;
-        case '6': ;
-        case '7': ;
-        case '8': ;
-        case '9': ;
-            {
-            alt3=5;
-            }
-            break;
-        case '\n': ;
-        case ' ': ;
-            {
-            alt3=6;
-            }
-            break;
-
-    default: ;
-        ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:3 state:0 stream:input];
-        nvae.c = charLA3;
-        @throw nvae;
-
-    }
-
-    switch (alt3) {
-        case 1 : ;
-            // Lang.g:1:10: T__10 // alt
-            {
-            [self mT__10]; 
-
-
-
-            }
-            break;
-        case 2 : ;
-            // Lang.g:1:16: INTTYPE // alt
-            {
-            [self mINTTYPE]; 
-
-
-
-            }
-            break;
-        case 3 : ;
-            // Lang.g:1:24: FLOATTYPE // alt
-            {
-            [self mFLOATTYPE]; 
-
-
-
-            }
-            break;
-        case 4 : ;
-            // Lang.g:1:34: ID // alt
-            {
-            [self mID]; 
-
-
-
-            }
-            break;
-        case 5 : ;
-            // Lang.g:1:37: INT // alt
-            {
-            [self mINT]; 
-
-
-
-            }
-            break;
-        case 6 : ;
-            // Lang.g:1:41: WS // alt
-            {
-            [self mWS]; 
-
-
-
-            }
-            break;
-
-    }
-
-}
-
-@end /* end of LangLexer implementation line 397 */
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangParser.h b/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangParser.h
deleted file mode 100644
index ba51fba..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangParser.h
+++ /dev/null
@@ -1,149 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} Lang.g 2011-05-06 17:38:52
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* parserHeaderFile */
-#ifndef ANTLR3TokenTypeAlreadyDefined
-#define ANTLR3TokenTypeAlreadyDefined
-typedef enum {
-    ANTLR_EOF = -1,
-    INVALID,
-    EOR,
-    DOWN,
-    UP,
-    MIN
-} ANTLR3TokenType;
-#endif
-
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define T__10 10
-#define DECL 4
-#define FLOATTYPE 5
-#define ID 6
-#define INT 7
-#define INTTYPE 8
-#define WS 9
-#pragma mark Dynamic Global Scopes
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-/* returnScopeInterface LangParser_start_return */
-@interface LangParser_start_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (LangParser_start_return *)newLangParser_start_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface LangParser_decl_return */
-@interface LangParser_decl_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (LangParser_decl_return *)newLangParser_decl_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface LangParser_type_return */
-@interface LangParser_type_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (LangParser_type_return *)newLangParser_type_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-
-/* Interface grammar class */
-@interface LangParser : ANTLRParser { /* line 572 */
-/* ObjC start of ruleAttributeScopeMemVar */
-
-
-/* ObjC end of ruleAttributeScopeMemVar */
-/* ObjC start of globalAttributeScopeMemVar */
-
-
-/* ObjC end of globalAttributeScopeMemVar */
-/* ObjC start of actions.(actionScope).memVars */
-/* ObjC end of actions.(actionScope).memVars */
-/* ObjC start of memVars */
-/* AST parserHeaderFile.memVars */
-NSInteger ruleLevel;
-NSArray *ruleNames;
-  /* AST super.memVars */
-/* AST parserMemVars */
-id<ANTLRTreeAdaptor> treeAdaptor;   /* AST parserMemVars */
-/* ObjC end of memVars */
-
- }
-
-/* ObjC start of actions.(actionScope).properties */
-/* ObjC end of actions.(actionScope).properties */
-/* ObjC start of properties */
-/* AST parserHeaderFile.properties */
-  /* AST super.properties */
-/* AST parserProperties */
-@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id<ANTLRTreeAdaptor> treeAdaptor;   /* AST parserproperties */
-/* ObjC end of properties */
-
-+ (void) initialize;
-+ (id) newLangParser:(id<ANTLRTokenStream>)aStream;
-/* ObjC start of actions.(actionScope).methodsDecl */
-/* ObjC end of actions.(actionScope).methodsDecl */
-
-/* ObjC start of methodsDecl */
-/* AST parserHeaderFile.methodsDecl */
-  /* AST super.methodsDecl */
-/* AST parserMethodsDecl */
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)theTreeAdaptor;   /* AST parsermethodsDecl */
-/* ObjC end of methodsDecl */
-
-- (LangParser_start_return *)start; 
-- (LangParser_decl_return *)decl; 
-- (LangParser_type_return *)type; 
-
-
-@end /* end of LangParser interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangParser.m b/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangParser.m
deleted file mode 100644
index 6c49d09..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangParser.m
+++ /dev/null
@@ -1,515 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : Lang.g
- *     -                            On : 2011-05-06 17:38:52
- *     -                for the parser : LangParserParser
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} Lang.g 2011-05-06 17:38:52
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "LangParser.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_decl_in_start41;
-static const unsigned long long FOLLOW_decl_in_start41_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_type_in_decl50;
-static const unsigned long long FOLLOW_type_in_decl50_data[] = { 0x0000000000000040LL};
-static ANTLRBitSet *FOLLOW_ID_in_decl52;
-static const unsigned long long FOLLOW_ID_in_decl52_data[] = { 0x0000000000000400LL};
-static ANTLRBitSet *FOLLOW_10_in_decl54;
-static const unsigned long long FOLLOW_10_in_decl54_data[] = { 0x0000000000000002LL};
-
-
-#pragma mark Dynamic Global Scopes
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule Return Scopes start
-@implementation LangParser_start_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (LangParser_start_return *)newLangParser_start_return
-{
-    return [[[LangParser_start_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation LangParser_decl_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (LangParser_decl_return *)newLangParser_decl_return
-{
-    return [[[LangParser_decl_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation LangParser_type_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (LangParser_type_return *)newLangParser_type_return
-{
-    return [[[LangParser_type_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-//#pragma mark Rule return scopes start
-//
-
-#pragma mark Rule return scopes start
-
-@implementation LangParser  // line 637
-
-/* ObjC start of ruleAttributeScope */
-#pragma mark Dynamic Rule Scopes
-/* ObjC end of ruleAttributeScope */
-#pragma mark global Attribute Scopes
-/* ObjC start globalAttributeScope */
-/* ObjC end globalAttributeScope */
-/* ObjC start actions.(actionScope).synthesize */
-/* ObjC end actions.(actionScope).synthesize */
-/* ObjC start synthesize() */
-/* AST genericParser.synthesize */
-/* AST parserProperties */
-@synthesize treeAdaptor;
-/* ObjC end synthesize() */
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    FOLLOW_decl_in_start41 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_decl_in_start41_data Count:(NSUInteger)1] retain];
-    FOLLOW_type_in_decl50 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_decl50_data Count:(NSUInteger)1] retain];
-    FOLLOW_ID_in_decl52 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_decl52_data Count:(NSUInteger)1] retain];
-    FOLLOW_10_in_decl54 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_10_in_decl54_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"DECL", @"FLOATTYPE", @"ID", @"INT", @"INTTYPE", @"WS", @"';'", nil] retain]];
-    [ANTLRBaseRecognizer setGrammarFileName:@"Lang.g"];
-}
-
-+ (LangParser *)newLangParser:(id<ANTLRTokenStream>)aStream
-{
-    return [[LangParser alloc] initWithTokenStream:aStream];
-
-
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)aStream
-{
-    self = [super initWithTokenStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:3+1] retain]];
-    if ( self != nil ) {
-
-
-        /* start of actions-actionScope-init */
-        /* start of init */
-        /* AST genericParser.init */
-        [self setTreeAdaptor:[[ANTLRCommonTreeAdaptor newTreeAdaptor] retain]];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    /* AST genericParser.dealloc */
-    [self setTreeAdaptor:nil];
-
-    [super dealloc];
-}
-
-/* ObjC start members */
-/* ObjC end members */
-/* ObjC start actions.(actionScope).methods */
-/* ObjC end actions.(actionScope).methods */
-/* ObjC start methods() */
-/* AST genericParser.methods */
-/* AST parserMethods */
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor
-{
-	return treeAdaptor;
-}
-
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-{
-	if (aTreeAdaptor != treeAdaptor) {
-		treeAdaptor = aTreeAdaptor;
-	}
-}
-/* ObjC end methods() */
-/* ObjC start rules */
-/*
- * $ANTLR start start
- * Lang.g:10:1: start : decl ;
- */
-- (LangParser_start_return *) start
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    LangParser_start_return * retval = [LangParser_start_return newLangParser_start_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-         LangParser_decl_return * decl1 = nil ;
-         
-
-
-        // Lang.g:10:7: ( decl ) // ruleBlockSingleAlt
-        // Lang.g:10:9: decl // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-        /* ASTParser ruleRef */
-        /* ruleRef */
-        [self pushFollow:FOLLOW_decl_in_start41];
-        decl1 = [self decl];
-
-        [self popFollow];
-
-
-        [treeAdaptor addChild:[decl1 getTree] toTree:root_0];
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end start */
-
-/*
- * $ANTLR start decl
- * Lang.g:12:1: decl : type ID ';' -> ^( DECL type ID ) ;
- */
-- (LangParser_decl_return *) decl
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    LangParser_decl_return * retval = [LangParser_decl_return newLangParser_decl_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *ID3 = nil;
-        ANTLRCommonToken *char_literal4 = nil;
-         LangParser_type_return * type2 = nil ;
-         
-
-        ANTLRCommonTree *ID3_tree=nil;
-        ANTLRCommonTree *char_literal4_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_10 =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token 10"] retain];
-        ANTLRRewriteRuleTokenStream *stream_ID =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token ID"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_type =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule type"] retain];
-        // Lang.g:12:6: ( type ID ';' -> ^( DECL type ID ) ) // ruleBlockSingleAlt
-        // Lang.g:12:8: type ID ';' // alt
-        {
-        /* ruleRef */
-        [self pushFollow:FOLLOW_type_in_decl50];
-        type2 = [self type];
-
-        [self popFollow];
-
-
-        [stream_type addElement:[type2 getTree]];
-
-        ID3=(ANTLRCommonToken *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_decl52];  
-            [stream_ID addElement:ID3];
-
-
-        char_literal4=(ANTLRCommonToken *)[self match:input TokenType:10 Follow:FOLLOW_10_in_decl54];  
-            [stream_10 addElement:char_literal4];
-
-
-        // AST REWRITE
-        // elements: ID, type
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 12:20: -> ^( DECL type ID )
-        {
-            // Lang.g:12:23: ^( DECL type ID )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:
-                        [treeAdaptor createTree:DECL Text:@"DECL"]
-                 old:root_1];
-
-                [treeAdaptor addChild:[stream_type nextTree] toTree:root_1];
-
-                 // TODO: args: 
-                [treeAdaptor addChild:
-                            [stream_ID nextNode]
-                 toTree:root_1];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-
-        retval.tree = root_0;
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-        [stream_10 release];
-        [stream_ID release];
-        [stream_type release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end decl */
-
-/*
- * $ANTLR start type
- * Lang.g:14:1: type : ( INTTYPE | FLOATTYPE );
- */
-- (LangParser_type_return *) type
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    LangParser_type_return * retval = [LangParser_type_return newLangParser_type_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *set5 = nil;
-
-        ANTLRCommonTree *set5_tree=nil;
-
-        // Lang.g:14:6: ( INTTYPE | FLOATTYPE ) // ruleBlockSingleAlt
-        // Lang.g: // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-        /* ASTParser matchRuleBlockSet */
-        /* ASTParser matchSet */
-        set5 = (ANTLRCommonToken *)[input LT:1]; /* matchSet */
-
-        if ([input LA:1] == FLOATTYPE||[input LA:1] == INTTYPE) {
-            [input consume];
-            [treeAdaptor addChild:/* ASTParser createNodeFromToken */
-            (ANTLRCommonTree *)[[treeAdaptor create:set5] retain]
-             toTree:root_0 ];
-            [state setIsErrorRecovery:NO];
-        } else {
-            ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-            @throw mse;
-        }
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end type */
-/* ObjC end rules */
-
-@end /* end of LangParser implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/main.m b/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/main.m
deleted file mode 100644
index d1eeaf1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/main.m
+++ /dev/null
@@ -1,55 +0,0 @@
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-#import "LangLexer.h"
-#import "LangParser.h"
-#import "LangDumpDecl.h"
-#import "stdio.h"
-#include <unistd.h>
-
-/*
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.*;
-
-public class Main {
-	public static void main(String[] args) throws Exception {
-		CharStream input = new ANTLRFileStream(args[0]);
-		LangLexer lex = new LangLexer(input);
-		CommonTokenStream tokens = new CommonTokenStream(lex);
-		LangParser parser = new LangParser(tokens);
-		//LangParser.decl_return r = parser.decl();
-		LangParser.start_return r = parser.start();
-		System.out.println("tree: "+((Tree)r.tree).toStringTree());
-		CommonTree r0 = ((CommonTree)r.tree);
-        
-		CommonTreeNodeStream nodes = new CommonTreeNodeStream(r0);
-		nodes.setTokenStream(tokens);
-		LangDumpDecl walker = new LangDumpDecl(nodes);
-		walker.decl();
-	}
-}
-*/
-
-int main(int argc, const char * argv[])
-{
-    NSError *error;
-    NSLog(@"starting treeparser\n");
-    NSString *dir = @"/Users/acondit/source/antlr3/acondit_localhost/code/antlr/main/runtime/ObjC/Framework/examples/treeparser/input";
-	NSString *string = [NSString stringWithContentsOfFile:dir  encoding:NSASCIIStringEncoding error:&error];
-	NSLog(@"input = %@", string);
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:string];
-	LangLexer *lex = [LangLexer newLangLexerWithCharStream:stream];
-    ANTLRCommonTokenStream *tokens = [ANTLRCommonTokenStream newANTLRCommonTokenStreamWithTokenSource:lex];
-    LangParser *parser = [LangParser newLangParser:tokens];
-//    LangParser_decl_return *r = [parser decl];
-    LangParser_start_return *r = [parser start];
-    NSLog( @"tree: %@", [r.tree toStringTree]);
-    ANTLRCommonTree *r0 = [r getTree];
-    
-    ANTLRCommonTreeNodeStream *nodes = [ANTLRCommonTreeNodeStream newANTLRCommonTreeNodeStream:r0];
-    [nodes setTokenStream:tokens];
-    LangDumpDecl *walker = [LangDumpDecl newLangDumpDecl:nodes];
-    [walker decl];
-
-    NSLog(@"exiting treeparser\n");
-	return 0;
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.h
deleted file mode 100644
index f51686c..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} TreeRewrite.g 2011-05-06 18:56:28
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define INT 4
-#define WS 5
-/* interface lexer class */
-@interface TreeRewriteLexer : ANTLRLexer { // line 283
-/* ObjC start of actions.lexer.memVars */
-/* ObjC end of actions.lexer.memVars */
-}
-+ (void) initialize;
-+ (TreeRewriteLexer *)newTreeRewriteLexerWithCharStream:(id<ANTLRCharStream>)anInput;
-/* ObjC start actions.lexer.methodsDecl */
-/* ObjC end actions.lexer.methodsDecl */
-- (void) mINT ; 
-- (void) mWS ; 
-- (void) mTokens ; 
-
-@end /* end of TreeRewriteLexer interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.m
deleted file mode 100644
index 0d9d31a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.m
+++ /dev/null
@@ -1,210 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : TreeRewrite.g
- *     -                            On : 2011-05-06 18:56:28
- *     -                 for the lexer : TreeRewriteLexerLexer
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} TreeRewrite.g 2011-05-06 18:56:28
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "TreeRewriteLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-/** As per Terence: No returns for lexer rules! */
-@implementation TreeRewriteLexer // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"TreeRewrite.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (TreeRewriteLexer *)newTreeRewriteLexerWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    return [[TreeRewriteLexer alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    self = [super initWithCharStream:anInput State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:3+1] retain]];
-    if ( self != nil ) {
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-/* ObjC Start of actions.lexer.methods */
-/* ObjC end of actions.lexer.methods */
-/* ObjC start methods() */
-/* ObjC end methods() */
-
-/* Start of Rules */
-// $ANTLR start "INT"
-- (void) mINT
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = INT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // TreeRewrite.g:15:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
-        // TreeRewrite.g:15:7: ( '0' .. '9' )+ // alt
-        {
-        // TreeRewrite.g:15:7: ( '0' .. '9' )+ // positiveClosureBlock
-        NSInteger cnt1 = 0;
-        do {
-            NSInteger alt1 = 2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0 >= '0' && LA1_0 <= '9')) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // TreeRewrite.g: // alt
-                    {
-                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
-                        [input consume];
-                    } else {
-                        ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-                        [self recover:mse];
-                        @throw mse;
-                    }
-
-
-                    }
-                    break;
-
-                default :
-                    if ( cnt1 >= 1 )
-                        goto loop1;
-                    ANTLREarlyExitException *eee =
-                        [ANTLREarlyExitException newException:input decisionNumber:1];
-                    @throw eee;
-            }
-            cnt1++;
-        } while (YES);
-        loop1: ;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "INT" */
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // TreeRewrite.g:18:5: ( ' ' ) // ruleBlockSingleAlt
-        // TreeRewrite.g:18:9: ' ' // alt
-        {
-        [self matchChar:' ']; 
-
-
-        _channel=HIDDEN;
-
-
-        }
-
-        // token+rule list labels
-
-        state.type = _type;
-        state.channel = _channel;
-    }
-    @finally {
-        //
-        /* Terence's stuff */
-
-    }
-    return;
-}
-/* $ANTLR end "WS" */
-
-- (void) mTokens
-{
-    // TreeRewrite.g:1:8: ( INT | WS ) //ruleblock
-    NSInteger alt2=2;
-    NSInteger LA2_0 = [input LA:1];
-
-    if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
-        alt2=1;
-    }
-    else if ( (LA2_0==' ') ) {
-        alt2=2;
-    }
-    else {
-        ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:2 state:0 stream:input];
-        nvae.c = LA2_0;
-        @throw nvae;
-
-    }
-    switch (alt2) {
-        case 1 : ;
-            // TreeRewrite.g:1:10: INT // alt
-            {
-            [self mINT]; 
-
-
-
-            }
-            break;
-        case 2 : ;
-            // TreeRewrite.g:1:14: WS // alt
-            {
-            [self mWS]; 
-
-
-
-            }
-            break;
-
-    }
-
-}
-
-@end /* end of TreeRewriteLexer implementation line 397 */
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.h b/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.h
deleted file mode 100644
index f587cdd..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.h
+++ /dev/null
@@ -1,124 +0,0 @@
-// $ANTLR ${project.version} ${buildNumber} TreeRewrite.g 2011-05-06 18:56:28
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import <ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* parserHeaderFile */
-#ifndef ANTLR3TokenTypeAlreadyDefined
-#define ANTLR3TokenTypeAlreadyDefined
-typedef enum {
-    ANTLR_EOF = -1,
-    INVALID,
-    EOR,
-    DOWN,
-    UP,
-    MIN
-} ANTLR3TokenType;
-#endif
-
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-#define EOF -1
-#define INT 4
-#define WS 5
-#pragma mark Dynamic Global Scopes
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-/* returnScopeInterface TreeRewriteParser_rule_return */
-@interface TreeRewriteParser_rule_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (TreeRewriteParser_rule_return *)newTreeRewriteParser_rule_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-/* returnScopeInterface TreeRewriteParser_subrule_return */
-@interface TreeRewriteParser_subrule_return :ANTLRParserRuleReturnScope { /* returnScopeInterface line 1838 */
-/* AST returnScopeInterface.memVars */
-ANTLRCommonTree *tree; /* ObjC start of memVars() */
-}
-/* start properties */
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (TreeRewriteParser_subrule_return *)newTreeRewriteParser_subrule_return;
-/* this is start of set and get methods */
-/* AST returnScopeInterface.methodsDecl */
-- (ANTLRCommonTree *)getTree;
-
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  /* methodsDecl */
-@end /* end of returnScopeInterface interface */
-
-
-
-
-/* Interface grammar class */
-@interface TreeRewriteParser : ANTLRParser { /* line 572 */
-/* ObjC start of ruleAttributeScopeMemVar */
-
-
-/* ObjC end of ruleAttributeScopeMemVar */
-/* ObjC start of globalAttributeScopeMemVar */
-
-
-/* ObjC end of globalAttributeScopeMemVar */
-/* ObjC start of actions.(actionScope).memVars */
-/* ObjC end of actions.(actionScope).memVars */
-/* ObjC start of memVars */
-/* AST parserHeaderFile.memVars */
-NSInteger ruleLevel;
-NSArray *ruleNames;
-  /* AST super.memVars */
-/* AST parserMemVars */
-id<ANTLRTreeAdaptor> treeAdaptor;   /* AST parserMemVars */
-/* ObjC end of memVars */
-
- }
-
-/* ObjC start of actions.(actionScope).properties */
-/* ObjC end of actions.(actionScope).properties */
-/* ObjC start of properties */
-/* AST parserHeaderFile.properties */
-  /* AST super.properties */
-/* AST parserProperties */
-@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id<ANTLRTreeAdaptor> treeAdaptor;   /* AST parserproperties */
-/* ObjC end of properties */
-
-+ (void) initialize;
-+ (id) newTreeRewriteParser:(id<ANTLRTokenStream>)aStream;
-/* ObjC start of actions.(actionScope).methodsDecl */
-/* ObjC end of actions.(actionScope).methodsDecl */
-
-/* ObjC start of methodsDecl */
-/* AST parserHeaderFile.methodsDecl */
-  /* AST super.methodsDecl */
-/* AST parserMethodsDecl */
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)theTreeAdaptor;   /* AST parsermethodsDecl */
-/* ObjC end of methodsDecl */
-
-- (TreeRewriteParser_rule_return *)rule; 
-- (TreeRewriteParser_subrule_return *)subrule; 
-
-
-@end /* end of TreeRewriteParser interface */
-
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.m b/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.m
deleted file mode 100644
index 9340e6e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.m
+++ /dev/null
@@ -1,383 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version ${project.version} ${buildNumber}
- *
- *     -  From the grammar source file : TreeRewrite.g
- *     -                            On : 2011-05-06 18:56:28
- *     -                for the parser : TreeRewriteParserParser
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
-*/
-// $ANTLR ${project.version} ${buildNumber} TreeRewrite.g 2011-05-06 18:56:28
-
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "TreeRewriteParser.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_INT_in_rule26;
-static const unsigned long long FOLLOW_INT_in_rule26_data[] = { 0x0000000000000010LL};
-static ANTLRBitSet *FOLLOW_subrule_in_rule28;
-static const unsigned long long FOLLOW_subrule_in_rule28_data[] = { 0x0000000000000002LL};
-static ANTLRBitSet *FOLLOW_INT_in_subrule53;
-static const unsigned long long FOLLOW_INT_in_subrule53_data[] = { 0x0000000000000002LL};
-
-
-#pragma mark Dynamic Global Scopes
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule Return Scopes start
-@implementation TreeRewriteParser_rule_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (TreeRewriteParser_rule_return *)newTreeRewriteParser_rule_return
-{
-    return [[[TreeRewriteParser_rule_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-@implementation TreeRewriteParser_subrule_return /* returnScope */
-/* AST returnScope.synthesize */
-@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
-+ (TreeRewriteParser_subrule_return *)newTreeRewriteParser_subrule_return
-{
-    return [[[TreeRewriteParser_subrule_return alloc] init] retain];
-}
-
-- (id) init
-{
-    self = [super init];
-    return self;
-}
-
-/* AST returnScope.methods */
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
-
-
-@end /* end of returnScope implementation */
-
-
-//#pragma mark Rule return scopes start
-//
-
-#pragma mark Rule return scopes start
-
-@implementation TreeRewriteParser  // line 637
-
-/* ObjC start of ruleAttributeScope */
-#pragma mark Dynamic Rule Scopes
-/* ObjC end of ruleAttributeScope */
-#pragma mark global Attribute Scopes
-/* ObjC start globalAttributeScope */
-/* ObjC end globalAttributeScope */
-/* ObjC start actions.(actionScope).synthesize */
-/* ObjC end actions.(actionScope).synthesize */
-/* ObjC start synthesize() */
-/* AST genericParser.synthesize */
-/* AST parserProperties */
-@synthesize treeAdaptor;
-/* ObjC end synthesize() */
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    FOLLOW_INT_in_rule26 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_rule26_data Count:(NSUInteger)1] retain];
-    FOLLOW_subrule_in_rule28 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_subrule_in_rule28_data Count:(NSUInteger)1] retain];
-    FOLLOW_INT_in_subrule53 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_subrule53_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"INT", @"WS", nil] retain]];
-    [ANTLRBaseRecognizer setGrammarFileName:@"TreeRewrite.g"];
-}
-
-+ (TreeRewriteParser *)newTreeRewriteParser:(id<ANTLRTokenStream>)aStream
-{
-    return [[TreeRewriteParser alloc] initWithTokenStream:aStream];
-
-
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)aStream
-{
-    self = [super initWithTokenStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:2+1] retain]];
-    if ( self != nil ) {
-
-
-        /* start of actions-actionScope-init */
-        /* start of init */
-        /* AST genericParser.init */
-        [self setTreeAdaptor:[[ANTLRCommonTreeAdaptor newTreeAdaptor] retain]];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    /* AST genericParser.dealloc */
-    [self setTreeAdaptor:nil];
-
-    [super dealloc];
-}
-
-/* ObjC start members */
-/* ObjC end members */
-/* ObjC start actions.(actionScope).methods */
-/* ObjC end actions.(actionScope).methods */
-/* ObjC start methods() */
-/* AST genericParser.methods */
-/* AST parserMethods */
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor
-{
-	return treeAdaptor;
-}
-
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-{
-	if (aTreeAdaptor != treeAdaptor) {
-		treeAdaptor = aTreeAdaptor;
-	}
-}
-/* ObjC end methods() */
-/* ObjC start rules */
-/*
- * $ANTLR start rule
- * TreeRewrite.g:8:1: rule : INT subrule -> ^( subrule INT ) ;
- */
-- (TreeRewriteParser_rule_return *) rule
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    TreeRewriteParser_rule_return * retval = [TreeRewriteParser_rule_return newTreeRewriteParser_rule_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *INT1 = nil;
-         TreeRewriteParser_subrule_return * subrule2 = nil ;
-         
-
-        ANTLRCommonTree *INT1_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_INT =
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token INT"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_subrule =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule subrule"] retain];
-        // TreeRewrite.g:8:5: ( INT subrule -> ^( subrule INT ) ) // ruleBlockSingleAlt
-        // TreeRewrite.g:8:7: INT subrule // alt
-        {
-        INT1=(ANTLRCommonToken *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_rule26];  
-            [stream_INT addElement:INT1];
-
-
-        /* ruleRef */
-        [self pushFollow:FOLLOW_subrule_in_rule28];
-        subrule2 = [self subrule];
-
-        [self popFollow];
-
-
-        [stream_subrule addElement:[subrule2 getTree]];
-
-        // AST REWRITE
-        // elements: subrule, INT
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 8:19: -> ^( subrule INT )
-        {
-            // TreeRewrite.g:8:22: ^( subrule INT )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:(id<ANTLRTree>)[stream_subrule nextNode] old:root_1];
-
-                 // TODO: args: 
-                [treeAdaptor addChild:
-                            [stream_INT nextNode]
-                 toTree:root_1];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-
-        retval.tree = root_0;
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-        [stream_INT release];
-        [stream_subrule release];
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end rule */
-
-/*
- * $ANTLR start subrule
- * TreeRewrite.g:11:1: subrule : INT ;
- */
-- (TreeRewriteParser_subrule_return *) subrule
-{
-    /* my ruleScopeSetUp */
-    /* Terence's stuff */
-
-    /* AST ruleDeclarations */
-    TreeRewriteParser_subrule_return * retval = [TreeRewriteParser_subrule_return newTreeRewriteParser_subrule_return];
-    [retval setStart:[input LT:1]];
-
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        /* AST ruleLabelDefs */
-        ANTLRCommonToken *INT3 = nil;
-
-        ANTLRCommonTree *INT3_tree=nil;
-
-        // TreeRewrite.g:12:5: ( INT ) // ruleBlockSingleAlt
-        // TreeRewrite.g:12:9: INT // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-
-        /* ASTParser tokenRef */
-        INT3=(ANTLRCommonToken *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_subrule53]; 
-        INT3_tree = /* ASTParser createNodeFromToken */
-        (ANTLRCommonTree *)[[treeAdaptor create:INT3] retain]
-        ;
-        [treeAdaptor addChild:INT3_tree  toTree:root_0];
-
-
-        }
-
-        /* ASTParser ruleCleanUp */
-        /* AST ruleCleanUp */
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-
-            retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        /* ASTParser rule.setErrorReturnValue */
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-
-    }
-
-    @finally {
-        /* Terence's stuff */
-
-    }
-    return retval;
-}
-/* $ANTLR end subrule */
-/* ObjC end rules */
-
-@end /* end of TreeRewriteParser implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/antlr3.h b/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/antlr3.h
deleted file mode 100644
index 4f16279..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/antlr3.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#import <ANTLR/ANTLRBaseMapElement.h>
-#import <ANTLR/ANTLRBaseRecognizer.h>
-#import <ANTLR/ANTLRBaseStack.h>
-#import <ANTLR/ANTLRBaseTree.h>
-#import <ANTLR/ANTLRBaseTreeAdaptor.h>
-#import <ANTLR/ANTLRBitSet.h>
-#import <ANTLR/ANTLRBufferedTokenStream.h>
-#import <ANTLR/ANTLRBufferedTreeNodeStream.h>
-#import <ANTLR/ANTLRCharStream.h>
-#import <ANTLR/ANTLRCharStreamState.h>
-#import <ANTLR/ANTLRCommonErrorNode.h>
-#import <ANTLR/ANTLRCommonToken.h>
-#import <ANTLR/ANTLRCommonTokenStream.h>
-#import <ANTLR/ANTLRCommonTree.h>
-#import <ANTLR/ANTLRCommonTreeAdaptor.h>
-#import <ANTLR/ANTLRCommonTreeNodeStream.h>
-#import <ANTLR/ANTLRDFA.h>
-#import <ANTLR/ANTLRDebug.h>
-#import <ANTLR/ANTLRDebugEventProxy.h>
-#import <ANTLR/ANTLRDebugEventListener.h>
-#import <ANTLR/ANTLRDebugParser.h>
-#import <ANTLR/ANTLRDebugTokenStream.h>
-#import <ANTLR/ANTLRDebugTreeAdaptor.h>
-#import <ANTLR/ANTLRDebugTreeNodeStream.h>
-#import <ANTLR/ANTLRDebugTreeParser.h>
-#import <ANTLR/ANTLREarlyExitException.h>
-#import <ANTLR/ANTLRError.h>
-#import <ANTLR/ANTLRFailedPredicateException.h>
-#import <ANTLR/ANTLRFastQueue.h>
-#import <ANTLR/ANTLRHashMap.h>
-#import <ANTLR/ANTLRHashRule.h>
-#import <ANTLR/ANTLRIntArray.h>
-#import <ANTLR/ANTLRIntStream.h>
-#import <ANTLR/ANTLRLexer.h>
-#import <ANTLR/ANTLRLexerRuleReturnScope.h>
-#import <ANTLR/ANTLRLinkBase.h>
-#import <ANTLR/ANTLRLookaheadStream.h>
-#import <ANTLR/ANTLRMapElement.h>
-#import <ANTLR/ANTLRMap.h>
-#import <ANTLR/ANTLRMismatchedNotSetException.h>
-#import <ANTLR/ANTLRMismatchedRangeException.h>
-#import <ANTLR/ANTLRMismatchedSetException.h>
-#import <ANTLR/ANTLRMismatchedTokenException.h>
-#import <ANTLR/ANTLRMismatchedTreeNodeException.h>
-#import <ANTLR/ANTLRMissingTokenException.h>
-#import <ANTLR/ANTLRNodeMapElement.h>
-#import <ANTLR/ANTLRNoViableAltException.h>
-#import <ANTLR/ANTLRParser.h>
-#import <ANTLR/ANTLRParserRuleReturnScope.h>
-#import <ANTLR/ANTLRPtrBuffer.h>
-#import <ANTLR/ANTLRRecognitionException.h>
-#import <ANTLR/ANTLRRecognizerSharedState.h>
-#import <ANTLR/ANTLRRewriteRuleElementStream.h>
-#import <ANTLR/ANTLRRewriteRuleSubtreeStream.h>
-#import <ANTLR/ANTLRRewriteRuleTokenStream.h>
-#import <ANTLR/ANTLRRuleMemo.h>
-#import <ANTLR/ANTLRRuleStack.h>
-#import <ANTLR/ANTLRRuleReturnScope.h>
-#import <ANTLR/ANTLRRuntimeException.h>
-#import <ANTLR/ANTLRStreamEnumerator.h>
-#import <ANTLR/ANTLRStringStream.h>
-#import <ANTLR/ANTLRSymbolStack.h>
-#import <ANTLR/ANTLRToken+DebuggerSupport.h>
-#import <ANTLR/ANTLRToken.h>
-#import <ANTLR/ANTLRTokenRewriteStream.h>
-#import <ANTLR/ANTLRTokenSource.h>
-#import <ANTLR/ANTLRTokenStream.h>
-#import <ANTLR/ANTLRTree.h>
-#import <ANTLR/ANTLRTreeAdaptor.h>
-#import <ANTLR/ANTLRTreeException.h>
-#import <ANTLR/ANTLRTreeIterator.h>
-#import <ANTLR/ANTLRTreeNodeStream.h>
-#import <ANTLR/ANTLRTreeParser.h>
-#import <ANTLR/ANTLRTreeRuleReturnScope.h>
-#import <ANTLR/ANTLRUnbufferedTokenStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStream.h>
-//#import <ANTLR/ANTLRUnbufferedCommonTreeNodeStreamState.h>
-#import <ANTLR/ANTLRUniqueIDMap.h>
-#import <ANTLR/ANTLRUnwantedTokenException.h>
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/main.m b/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/main.m
deleted file mode 100644
index cd8f9e1..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/main.m
+++ /dev/null
@@ -1,38 +0,0 @@
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-#import "TreeRewriteLexer.h"
-#import "TreeRewriteParser.h"
-//#import "stdio.h"
-//#include <unistd.h>
-
-int main() {
-	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
-
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"23 42"];
-	TreeRewriteLexer *lexer = [TreeRewriteLexer newTreeRewriteLexerWithCharStream:stream];
-	
-//    id<ANTLRToken> currentToken;
-//    while ((currentToken = [lexer nextToken]) && [currentToken type] != ANTLRTokenTypeEOF) {
-//        NSLog(@"%@", currentToken);
-//    }
-	
-	ANTLRCommonTokenStream *tokenStream = [ANTLRCommonTokenStream newANTLRCommonTokenStreamWithTokenSource:lexer];
-	TreeRewriteParser *parser = [[TreeRewriteParser alloc] initWithTokenStream:tokenStream];
-	ANTLRCommonTree *rule_tree = [[parser rule] getTree];
-	NSLog(@"tree: %@", [rule_tree treeDescription]);
-//	ANTLRCommonTreeNodeStream *treeStream = [[ANTLRCommonTreeNodeStream alloc] initWithTree:program_tree];
-//	SimpleCTP *walker = [[SimpleCTP alloc] initWithTreeNodeStream:treeStream];
-//	[walker program];
-
-	[lexer release];
-	[stream release];
-	[tokenStream release];
-	[parser release];
-//	[treeStream release];
-//	[walker release];
-
-	[pool release];
-    // sleep for objectalloc
-    // while(1) sleep(60);
-	return 0;
-}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewrite.tokens b/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewrite.tokens
deleted file mode 100644
index eb18cc6..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewrite.tokens
+++ /dev/null
@@ -1,2 +0,0 @@
-WS=5
-INT=4
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteLexer.h b/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteLexer.h
deleted file mode 100644
index c97f099..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteLexer.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// $ANTLR 3.2 Aug 20, 2010 15:00:19 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g 2010-08-20 15:03:14
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-/* Start cyclicDFAInterface */
-
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-#pragma mark Tokens
-#define INT 4
-#define WS 5
-#define EOF -1
-@interface TreeRewriteLexer : ANTLRLexer { // line 283
-// start of actions.lexer.memVars
-// start of action-actionScope-memVars
-}
-+ (TreeRewriteLexer *)newTreeRewriteLexer:(id<ANTLRCharStream>)anInput;
-
-- (void)mINT; 
-- (void)mWS; 
-- (void)mTokens; 
-
-@end /* end of TreeRewriteLexer interface */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteLexer.m b/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteLexer.m
deleted file mode 100644
index 3b52b07..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteLexer.m
+++ /dev/null
@@ -1,224 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version 3.2 Aug 20, 2010 15:00:19
- *
- *     -  From the grammar source file : /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g
- *     -                            On : 2010-08-20 15:03:14
- *     -                 for the lexer : TreeRewriteLexerLexer *
- * Editing it, at least manually, is not wise. 
- *
- * C language generator and runtime by Jim Idle, jimi|hereisanat|idle|dotgoeshere|ws.
- *
- *
-*/
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// $ANTLR 3.2 Aug 20, 2010 15:00:19 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g 2010-08-20 15:03:14
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "TreeRewriteLexer.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-
-/** As per Terence: No returns for lexer rules!
-#pragma mark Rule return scopes start
-#pragma mark Rule return scopes end
-*/
-@implementation TreeRewriteLexer // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"/usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (TreeRewriteLexer *)newTreeRewriteLexer:(id<ANTLRCharStream>)anInput
-{
-    return [[TreeRewriteLexer alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id<ANTLRCharStream>)anInput
-{
-    if ((self = [super initWithCharStream:anInput State:[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:3+1]]) != nil) {
-
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [super dealloc];
-}
-
-// Start of actions.lexer.methods
-// start methods()
-
-// Start of Rules
-// $ANTLR start "INT"
-- (void) mINT
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = INT;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:15:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:15:7: ( '0' .. '9' )+ // alt
-        {
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:15:7: ( '0' .. '9' )+ // positiveClosureBlock
-        NSInteger cnt1=0;
-        do {
-            NSInteger alt1=2;
-            NSInteger LA1_0 = [input LA:1];
-            if ( ((LA1_0>='0' && LA1_0<='9')) ) {
-                alt1=1;
-            }
-
-
-            switch (alt1) {
-                case 1 : ;
-                    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:15:8: '0' .. '9' // alt
-                    {
-                    [self matchRangeFromChar:'0' to:'9'];   /* element() */
-                     /* elements */
-                    }
-                    break;
-
-                default :
-                    if ( cnt1 >= 1 )
-                        goto loop1;
-                    ANTLREarlyExitException *eee = [ANTLREarlyExitException exceptionWithStream:input decisionNumber:1];
-                    @throw eee;
-            }
-            cnt1++;
-        } while (YES);
-        loop1: ;
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "INT"
-
-// $ANTLR start "WS"
-- (void) mWS
-{
-    //
-    // ruleScopeSetUp
-
-    @try {
-        NSInteger _type = WS;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:18:5: ( ' ' ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:18:9: ' ' // alt
-        {
-        [self matchChar:' ']; 
-          /* element() */
-        state.channel=99;  /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-
-        [state setType:_type];
-
-        state.channel = _channel;
-    }
-    @finally {
-        //
-    }
-    return;
-}
-// $ANTLR end "WS"
-
-- (void) mTokens
-{
-    // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:1:8: ( INT | WS ) //ruleblock
-    NSInteger alt2=2;
-    NSInteger LA2_0 = [input LA:1];
-
-    if ( ((LA2_0>='0' && LA2_0<='9')) ) {
-        alt2=1;
-    }
-    else if ( (LA2_0==' ') ) {
-        alt2=2;
-    }
-    else {
-        ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newANTLRNoViableAltException:2 state:0 stream:input];
-        @throw nvae;
-    }
-    switch (alt2) {
-        case 1 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:1:10: INT // alt
-            {
-                [self mINT]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-        case 2 : ;
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:1:14: WS // alt
-            {
-                [self mWS]; 
-              /* element() */
-             /* elements */
-            }
-            break;
-
-    }
-
-}
-
-@end // end of TreeRewriteLexer implementation // line 397
-
-/* End of code
- * =============================================================================
- */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteParser.h b/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteParser.h
deleted file mode 100644
index 058e7af..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteParser.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// $ANTLR 3.2 Aug 20, 2010 15:00:19 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g 2010-08-20 15:03:14
-
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import <Cocoa/Cocoa.h>
-#import "antlr3.h"
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-
-#pragma mark Tokens
-#define WS 5
-#define INT 4
-#define EOF -1
-#pragma mark Dynamic Global Scopes
-#pragma mark Dynamic Rule Scopes
-#pragma mark Rule Return Scopes start
-@interface TreeRewriteParser_rule_return :ANTLRParserRuleReturnScope { // line 1672
-// returnScopeInterface.memVars
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (TreeRewriteParser_rule_return *)newTreeRewriteParser_rule_return;
-// this is start of set and get methods
-// returnScopeInterface.methodsdecl
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end 
-@interface TreeRewriteParser_subrule_return :ANTLRParserRuleReturnScope { // line 1672
-// returnScopeInterface.memVars
-ANTLRCommonTree *tree; // start of memVars()
-}
-
-// start properties
-@property (retain, getter=getTree, setter=setTree:) ANTLRCommonTree *tree;
-+ (TreeRewriteParser_subrule_return *)newTreeRewriteParser_subrule_return;
-// this is start of set and get methods
-// returnScopeInterface.methodsdecl
-- (ANTLRCommonTree *)getTree;
-- (void) setTree:(ANTLRCommonTree *)aTree;
-  // methodsDecl
-@end 
-
-#pragma mark Rule return scopes end
-@interface TreeRewriteParser : ANTLRParser { // line 529
-// start of globalAttributeScopeMemVar
-
-
-// start of action-actionScope-memVars
-// start of ruleAttributeScopeMemVar
-
-
-// Start of memVars
-// parserHeaderFile.memVars
-// parsermemVars
-id<ANTLRTreeAdaptor> treeAdaptor;
-
- }
-
-// start of action-actionScope-methodsDecl
-
-// parserHeaderFile.methodsdecl
-// parserMethodsDecl
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)theTreeAdaptor;
-
-- (TreeRewriteParser_rule_return *)mrule; 
-- (TreeRewriteParser_subrule_return *)msubrule; 
-
-
-@end /* end of TreeRewriteParser interface */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteParser.m b/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteParser.m
deleted file mode 100644
index efd085f..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/output1/TreeRewriteParser.m
+++ /dev/null
@@ -1,354 +0,0 @@
-/** \file
- *  This OBJC source file was generated by $ANTLR version 3.2 Aug 20, 2010 15:00:19
- *
- *     -  From the grammar source file : /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g
- *     -                            On : 2010-08-20 15:03:14
- *     -                for the parser : TreeRewriteParserParser *
- * Editing it, at least manually, is not wise. 
- *
- * C language generator and runtime by Jim Idle, jimi|hereisanat|idle|dotgoeshere|ws.
- *
- *
-*/
-// [The "BSD licence"]
-// Copyright (c) 2010 Alan Condit
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// $ANTLR 3.2 Aug 20, 2010 15:00:19 /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g 2010-08-20 15:03:14
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "TreeRewriteParser.h"
-/* ----------------------------------------- */
-
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-
-
-#pragma mark Bitsets
-static ANTLRBitSet *FOLLOW_INT_in_rule26;
-
-const unsigned long long FOLLOW_INT_in_rule26_data[] = { 0x0000000000000010LL};
-
-static ANTLRBitSet *FOLLOW_subrule_in_rule28;
-
-const unsigned long long FOLLOW_subrule_in_rule28_data[] = { 0x0000000000000002LL};
-
-static ANTLRBitSet *FOLLOW_INT_in_subrule53;
-
-const unsigned long long FOLLOW_INT_in_subrule53_data[] = { 0x0000000000000002LL};
-
-
-
-#pragma mark Dynamic Global Scopes
-
-#pragma mark Dynamic Rule Scopes
-
-#pragma mark Rule return scopes start
-@implementation TreeRewriteParser_rule_return
-@synthesize tree;
-+ (TreeRewriteParser_rule_return *)newTreeRewriteParser_rule_return
-{
-    return [[[TreeRewriteParser_rule_return alloc] init] retain];
-}
-
-// returnScope.methods
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end 
-
-@implementation TreeRewriteParser_subrule_return
-@synthesize tree;
-+ (TreeRewriteParser_subrule_return *)newTreeRewriteParser_subrule_return
-{
-    return [[[TreeRewriteParser_subrule_return alloc] init] retain];
-}
-
-// returnScope.methods
-- (ANTLRCommonTree *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(ANTLRCommonTree *)aTree
-{
-    if (tree != aTree) {
-        if (tree != nil) [tree release];
-        if (aTree != nil) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-
-
-
-@end 
-
-
-
-@implementation TreeRewriteParser  // line 610
-
-+ (void) initialize
-{
-    FOLLOW_INT_in_rule26 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_rule26_data Count:(NSUInteger)1] retain];
-    FOLLOW_subrule_in_rule28 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_subrule_in_rule28_data Count:(NSUInteger)1] retain];
-    FOLLOW_INT_in_subrule53 = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_subrule53_data Count:(NSUInteger)1] retain];
-
-    [ANTLRBaseRecognizer setTokenNames:[[[NSArray alloc] initWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
- @"INT", @"WS", nil] retain]];
-}
-
-+ (TreeRewriteParser *)newTreeRewriteParser:(id<ANTLRTokenStream>)aStream
-{
-    return [[TreeRewriteParser alloc] initWithTokenStream:aStream];
-
-}
-
-- (id) initWithTokenStream:(id<ANTLRTokenStream>)aStream
-{
-    if ((self = [super initWithTokenStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:2+1] retain]]) != nil) {
-
-
-                        
-        // start of actions-actionScope-init
-        // start of init
-        // genericParser.init
-        [self setTreeAdaptor:[[ANTLRCommonTreeAdaptor newANTLRCommonTreeAdaptor] retain]];
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    [self setTreeAdaptor:nil];
-
-    [super dealloc];
-}
-// start actions.actionScope.methods
-// start methods()
-// genericParser.methods
-// parserMethods
-- (id<ANTLRTreeAdaptor>) getTreeAdaptor
-{
-	return treeAdaptor;
-}
-
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)aTreeAdaptor
-{
-	if (aTreeAdaptor != treeAdaptor) {
-		treeAdaptor = aTreeAdaptor;
-	}
-}
-// start rules
-/*
- * $ANTLR start rule
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:8:1: rule : INT subrule -> ^( subrule INT ) ;
- */
-- (TreeRewriteParser_rule_return *) rule
-{
-    // ruleScopeSetUp
-
-    // ruleDeclarations
-    TreeRewriteParser_rule_return * retval = [TreeRewriteParser_rule_return newTreeRewriteParser_rule_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        // ruleLabelDefs
-        id<ANTLRToken> INT1 = nil;
-        TreeRewriteParser_subrule_return * subrule2 = nil;
-
-
-        ANTLRCommonTree *INT1_tree=nil;
-        ANTLRRewriteRuleTokenStream *stream_INT = 
-            [[ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"token INT"] retain];
-        ANTLRRewriteRuleSubtreeStream *stream_subrule = 
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"rule subrule"] retain];
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:8:5: ( INT subrule -> ^( subrule INT ) ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:8:7: INT subrule // alt
-        {
-        INT1=(id<ANTLRToken>)[self match:input TokenType:INT Follow:FOLLOW_INT_in_rule26];  
-            [stream_INT addElement:INT1];
-          /* element() */
-        [self pushFollow:FOLLOW_subrule_in_rule28];
-        subrule2 = [self subrule];
-        [self popFollow];
-
-
-        [stream_subrule addElement:[subrule2 getTree]];  /* element() */
-         /* elements */
-
-        // AST REWRITE
-        // elements: INT, subrule
-        // token labels: 
-        // rule labels: retval
-        // token list labels: 
-        // rule list labels: 
-        // wildcard labels: 
-         [retval setTree:root_0];
-
-        retval.tree = root_0;
-
-        ANTLRRewriteRuleSubtreeStream *stream_retval =
-            [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                                description:@"token retval"
-                                                                    element:retval!=nil?[retval getTree]:nil] retain];
-
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        // 8:19: -> ^( subrule INT )
-        {
-            // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:8:22: ^( subrule INT )
-            {
-                ANTLRCommonTree *root_1 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-                root_1 = (ANTLRCommonTree *)[treeAdaptor becomeRoot:(id<ANTLRTree>)[stream_subrule nextNode]
-                                                                         old:root_1];
-
-                 // TODO: args: 
-                [treeAdaptor addChild:[stream_INT nextNode] toTree:root_1];
-
-                [treeAdaptor addChild:root_1 toTree:root_0];
-            }
-
-        }
-
-        retval.tree = root_0;
-
-        }
-
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-        [stream_INT release];
-        [stream_subrule release];
-
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-        [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end rule */
-/*
- * $ANTLR start subrule
- * /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:11:1: subrule : INT ;
- */
-- (TreeRewriteParser_subrule_return *) subrule
-{
-    // ruleScopeSetUp
-
-    // ruleDeclarations
-    TreeRewriteParser_subrule_return * retval = [TreeRewriteParser_subrule_return newTreeRewriteParser_subrule_return];
-    [retval setStart:[input LT:1]];
-
-    ANTLRCommonTree *root_0 = nil;
-
-    @try {
-        // ruleLabelDefs
-        id<ANTLRToken> INT3 = nil;
-
-        ANTLRCommonTree *INT3_tree=nil;
-
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:12:5: ( INT ) // ruleBlockSingleAlt
-        // /usr/local/ANTLR3-ObjC2.0-Runtime/Framework/examples/treerewrite/TreeRewrite.g:12:9: INT // alt
-        {
-        root_0 = (ANTLRCommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
-
-        INT3=(id<ANTLRToken>)[self match:input TokenType:INT Follow:FOLLOW_INT_in_subrule53]; 
-        INT3_tree = (ANTLRCommonTree *)[[treeAdaptor createTree:INT3] retain];
-        [treeAdaptor addChild:INT3_tree  toTree:root_0];
-          /* element() */
-         /* elements */
-        }
-
-        // token+rule list labels
-        [retval setStop:[input LT:-1]];
-
-
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor rulePostProcessing:root_0];
-        [treeAdaptor setTokenBoundaries:retval.tree From:retval.startToken To:retval.stopToken];
-
-    }
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        retval.tree = (ANTLRCommonTree *)[treeAdaptor errorNode:input From:retval.startToken To:[input LT:-1] Exception:re];
-
-    }    @finally {
-    }
-    return retval;
-}
-/* $ANTLR end subrule */
-
-@end /* end of TreeRewriteParser implementation line 669 */
-
-
-/* End of code
- * =============================================================================
- */
diff --git a/antlr-3.4/runtime/ObjC/Framework/java src b/antlr-3.4/runtime/ObjC/Framework/java src
deleted file mode 100644
index 7012ce8..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/java src
+++ /dev/null
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.h b/antlr-3.4/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.h
deleted file mode 100755
index 06e128a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import <SenTestingKit/SenTestingKit.h>
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRRewriteRuleTokenStream.h"
-#import "ANTLRCommonToken.h"
-
-@interface TestRewriteRuleTokenStream : SenTestCase {
-    ANTLRCommonTreeAdaptor *treeAdaptor;
-    ANTLRRewriteRuleTokenStream *stream;
-    
-    ANTLRCommonToken *token1;
-    ANTLRCommonToken *token2;
-    ANTLRCommonToken *token3;
-    ANTLRCommonToken *token4;
-}
-
-- (void) setUp;
-- (void) tearDown;
-//- (void) test01EmptyRewriteStream;
-- (void) test02RewriteStreamCount;
-- (void) test03SingleElement;
-- (void) test04SingleElementDup;
-- (void) test05MultipleElements;
-- (void) test06MultipleElementsAfterReset;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.m b/antlr-3.4/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.m
deleted file mode 100755
index ef84361..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.m
+++ /dev/null
@@ -1,201 +0,0 @@
-// [The "BSD licence"]
-// Copyright (c) 2007 Kay Roepke
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#import "TestRewriteRuleTokenStream.h"
-#import "ANTLRRewriteRuleTokenStream.h"
-#import "ANTLRCommonTreeAdaptor.h"
-#import "ANTLRCommonToken.h"
-
-@implementation TestRewriteRuleTokenStream
-
-- (void) setUp
-{
-    treeAdaptor = [ANTLRCommonTreeAdaptor newTreeAdaptor];
-    stream = [ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                          description:@"rewrite rule token stream"];
-    token1 = [ANTLRCommonToken newToken:5];
-    token2 = [ANTLRCommonToken newToken:6];
-    token3 = [ANTLRCommonToken newToken:7];
-    token4 = [ANTLRCommonToken newToken:8];
-    [token1 setText:@"token 1"];
-    [token2 setText:@"token 2"];
-    [token3 setText:@"token 3"];
-    [token4 setText:@"token 4"];
-}
-
-- (void) tearDown
-{
-    [token1 release]; token1 = nil;
-    [token2 release]; token2 = nil;
-    [token3 release]; token3 = nil;
-    [token4 release]; token4 = nil;
-    
-    [treeAdaptor release]; treeAdaptor = nil;
-    [stream release]; stream = nil;
-}
-
-- (void) test01EmptyRewriteStream
-{
-    treeAdaptor = [ANTLRCommonTreeAdaptor newTreeAdaptor];
-    stream = [ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"rewrite rule token stream"];
-    STAssertFalse([stream hasNext], @"-(BOOL)hasNext should be NO, but isn't");
-    STAssertThrows([stream nextToken], @"-next on empty stream should throw exception, but doesn't");
-}
-
-- (void) test02RewriteStreamCount
-{
-    treeAdaptor = [ANTLRCommonTreeAdaptor newTreeAdaptor];
-    stream = [ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"rewrite rule token stream"];
-    token1 = [ANTLRCommonToken newToken:5];
-    token2 = [ANTLRCommonToken newToken:6];
-    [token1 setText:@"token 1"];
-    [token2 setText:@"token 2"];
-    STAssertTrue([stream size] == 0,
-                 @"empty stream should have count==0");
-    [stream addElement:token1];
-    STAssertTrue([stream size] == 1,
-                 @"single element stream should have count==1");
-    [stream addElement:token2];
-    STAssertTrue([stream size] == 2,
-                 @"multiple stream should have count==2");
-
-}
-
-- (void) test03SingleElement
-{
-    treeAdaptor = [ANTLRCommonTreeAdaptor newTreeAdaptor];
-    stream = [ANTLRRewriteRuleTokenStream newANTLRRewriteRuleTokenStream:treeAdaptor
-                                                             description:@"rewrite rule token stream"];
-    token1 = [ANTLRCommonToken newToken:5];
-    token2 = [ANTLRCommonToken newToken:6];
-    token3 = [ANTLRCommonToken newToken:7];
-    token4 = [ANTLRCommonToken newToken:8];
-    [token1 setText:@"token 1"];
-    [token2 setText:@"token 2"];
-    [token3 setText:@"token 3"];
-    [token4 setText:@"token 4"];
-    [stream addElement:token1];
-    STAssertTrue([stream hasNext], @"-hasNext should be YES, but isn't");
-    ANTLRCommonTree *tree = [stream nextNode];
-    STAssertEqualObjects([tree getToken], token1, @"return token from stream should be token1, but isn't");
-}
-
-- (void) test04SingleElementDup
-{
-    [stream addElement:token1];
-    ANTLRCommonTree *tree1, *tree2;
-    STAssertNoThrow(tree1 = [stream nextNode],
-                    @"stream iteration should not throw exception"
-                    );
-    STAssertNoThrow(tree2 = [stream nextNode],
-                    @"stream iteration past element count (single element) should not throw exception"
-                    );
-    STAssertEqualObjects([tree1 getToken], [tree2 getToken],
-                         @"tokens should be the same");
-    STAssertFalse(tree1 == tree2, 
-                         @"trees should be different, but aren't");
-}
-
-- (void) test05MultipleElements
-{
-    [stream addElement:token1];
-    [stream addElement:token2];
-    [stream addElement:token3];
-    ANTLRCommonTree *tree1, *tree2, *tree3, *tree4;
-    STAssertNoThrow(tree1 = [stream nextNode],
-                    @"stream iteration should not throw exception"
-                    );
-    STAssertEqualObjects([tree1 getToken], token1,
-                         @"[tree1 token] should be equal to token1"
-                         );
-    STAssertNoThrow(tree2 = [stream nextNode],
-                    @"stream iteration should not throw exception"
-                    );
-    STAssertEqualObjects([tree2 getToken], token2,
-                         @"[tree2 token] should be equal to token2"
-                         );
-    STAssertNoThrow(tree3 = [stream nextNode],
-                    @"stream iteration should not throw exception"
-                    );
-    STAssertEqualObjects([tree3 getToken], token3,
-                         @"[tree3 token] should be equal to token3"
-                         );
-    STAssertThrows(tree4 = [stream nextNode],
-                    @"iterating beyond end of stream should throw an exception"
-                    );
-}
-
-- (void) test06MultipleElementsAfterReset
-{
-    [stream addElement:token1];
-    [stream addElement:token2];
-    [stream addElement:token3];
-    ANTLRCommonTree *tree1, *tree2, *tree3;
-    
-    // consume the stream completely
-    STAssertNoThrow(tree1 = [stream nextNode],
-                    @"stream iteration should not throw exception"
-                    );
-    STAssertEqualObjects([tree1 getToken], token1,
-                         @"[tree1 token] should be equal to token1"
-                         );
-    STAssertNoThrow(tree2 = [stream nextNode],
-                    @"stream iteration should not throw exception"
-                    );
-    STAssertEqualObjects([tree2 getToken], token2,
-                         @"[tree2 token] should be equal to token2"
-                         );
-    STAssertNoThrow(tree3 = [stream nextNode],
-                    @"stream iteration should not throw exception"
-                    );
-    
-    [stream reset]; // after resetting the stream it should dup
-    
-    ANTLRCommonTree *tree1Dup, *tree2Dup, *tree3Dup;
-
-    STAssertNoThrow(tree1Dup = [stream nextNode],
-                    @"stream iteration should not throw exception"
-                    );
-    STAssertTrue(tree1 != tree1Dup,
-                 @"[tree1 token] should be equal to token1"
-                 );
-    STAssertNoThrow(tree2Dup = [stream nextNode],
-                    @"stream iteration should not throw exception"
-                    );
-    STAssertTrue(tree2 != tree2Dup,
-                 @"[tree2 token] should be equal to token2"
-                 );
-    STAssertNoThrow(tree3Dup = [stream nextNode],
-                    @"stream iteration should not throw exception"
-                    );
-    STAssertTrue(tree3 != tree3Dup,
-                 @"[tree3 token] should be equal to token3"
-                 );
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRFastQueueTest.h b/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRFastQueueTest.h
deleted file mode 100644
index eb85a2d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRFastQueueTest.h
+++ /dev/null
@@ -1,24 +0,0 @@
-//
-//  ANTLRFastQueueTest.h
-//  ANTLR
-//
-//  Created by Ian Michell on 13/05/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import <SenTestingKit/SenTestingKit.h>
-
-
-@interface ANTLRFastQueueTest : SenTestCase {
-
-}
-
--(void) testInit;
--(void) testAddAndGet;
--(void) testInvalidElementIndex;
--(void) testHead;
--(void) testClear;
--(void) testDescription;
--(void) testRemove;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRFastQueueTest.m b/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRFastQueueTest.m
deleted file mode 100644
index 74508a7..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRFastQueueTest.m
+++ /dev/null
@@ -1,103 +0,0 @@
-//
-//  ANTLRFastQueueTest.m
-//  ANTLR
-//
-//  Created by Ian Michell on 13/05/2010.
-//  Copyright 2010 Ian Michell and Alan Condit. All rights reserved.
-//
-
-#import "ANTLRFastQueueTest.h"
-#import "ANTLRFastQueue.h"
-#import "ANTLRError.h"
-#import "ANTLRRuntimeException.h"
-
-@implementation ANTLRFastQueueTest
-
--(void) testInit
-{
-	ANTLRFastQueue *queue = [[ANTLRFastQueue newANTLRFastQueue] retain];
-	STAssertNotNil(queue, @"Queue was not created and was nil");
-	[queue release];
-}
-
--(void) testAddAndGet
-{
-	ANTLRFastQueue *queue = [[ANTLRFastQueue newANTLRFastQueue] retain];
-	STAssertNotNil(queue, @"Queue was not created and was nil");
-	[queue addObject:@"My String"];
-	STAssertTrue([[queue objectAtIndex:0] isKindOfClass:[NSString class]], @"First object is not a NSString");
-	STAssertEquals([queue objectAtIndex:0], @"My String", @"Object at index zero is invalid");
-	STAssertTrue([queue size] == 1, @"Queue is the wrong size: %d", [queue size]);
-	[queue release];
-}
-
--(void) testInvalidElementIndex
-{
-    //ANTLRRuntimeException *ANTLRNoSuchElementException = [ANTLRNoSuchElementException newException:@"No such element exception"];
-    id retVal;
-	ANTLRFastQueue *queue = [[ANTLRFastQueue newANTLRFastQueue] retain];
-	STAssertNotNil(queue, @"Queue was not created and was nil");
-	@try 
-	{
-		retVal = [queue objectAtIndex:100];
-	}
-	@catch (ANTLRNoSuchElementException *e) 
-	{
-		STAssertTrue([[e name] isEqualTo:@"ANTLRNoSuchElementException"], @"Exception was not type: ANTLRNoSuchElementException -- %@", [e name]);
-		return;
-	}
-	STFail(@"Exception ANTLRNoSuchElementException was not thrown -- %@", [retVal name]);
-    [queue release];
-}
-
--(void) testHead
-{
-	ANTLRFastQueue *queue = [[ANTLRFastQueue newANTLRFastQueue] retain];
-	STAssertNotNil(queue, @"Queue was not created and was nil");
-	[queue addObject:@"Item 1"];
-	[queue addObject:@"Item 2"];
-	[queue addObject:@"Item 3"];
-	id head = [queue head];
-	STAssertNotNil(head, @"Object returned from head is nil");
-	STAssertEquals(head, @"Item 1", @"Object returned was not first item in");
-	[queue release];
-}
-
--(void) testClear
-{
-	ANTLRFastQueue *queue = [[ANTLRFastQueue newANTLRFastQueue] retain];
-	STAssertNotNil(queue, @"Queue was not created and was nil");
-	[queue addObject:@"Item 1"];
-	[queue addObject:@"Item 2"];
-	[queue addObject:@"Item 3"];
-	STAssertTrue([queue size] == 3, @"Queue was too small, was: %d expected 3", [queue size]);
-	[queue reset];
-	STAssertTrue([queue size] == 0, @"Queue is not empty, it's still %d", [queue size]);
-	[queue release];
-}
-
--(void) testDescription
-{
-	ANTLRFastQueue *queue = [[ANTLRFastQueue newANTLRFastQueue] retain];
-	STAssertNotNil(queue, @"Queue was not created and was nil");
-	[queue addObject:@"My"];
-	[queue addObject:@"String"];
-	STAssertTrue([[queue description] isEqualToString:@"My String"], @"Queue description was not right, got: \"%@\" expected: \"My String\"", [queue description]);
-	[queue release];
-}
-
--(void) testRemove
-{
-	ANTLRFastQueue *queue = [[ANTLRFastQueue newANTLRFastQueue] retain];
-	STAssertNotNil(queue, @"Queue was not created and was nil");
-	[queue addObject:@"My"];
-	[queue addObject:@"String"];
-	STAssertTrue([queue size] == 2, @"Queue not the correct size, was: %d expected 2", [queue size]);
-	[queue remove];
-	STAssertTrue([queue size] == 1, @"Queue not the correct size, was %d expected 1", [queue size]);
-	[queue remove]; // test that the queue is reset when we remove the last object...
-	STAssertTrue([queue size] == 0, @"Queue was not reset, when we hit the buffer, was still %d", [queue size]);
-	[queue release];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRIntArrayTest.h b/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRIntArrayTest.h
deleted file mode 100644
index 1dffb23..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRIntArrayTest.h
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-//  ANTLRIntArrayTest.h
-//  ANTLR
-//
-//  Created by Ian Michell on 13/05/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import <SenTestingKit/SenTestingKit.h>
-
-
-@interface ANTLRIntArrayTest : SenTestCase 
-{
-
-}
-
--(void) testAdd;
--(void) testPushPop;
--(void) testClearAndAdd;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRIntArrayTest.m b/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRIntArrayTest.m
deleted file mode 100644
index a3edc20..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/ANTLRIntArrayTest.m
+++ /dev/null
@@ -1,47 +0,0 @@
-//
-//  ANTLRIntArrayTest.m
-//  ANTLR
-//
-//  Created by Ian Michell on 13/05/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import "ANTLRIntArrayTest.h"
-#import "ANTLRIntArray.h"
-
-@implementation ANTLRIntArrayTest
-
--(void) testAdd
-{
-	ANTLRIntArray *intArray = [ANTLRIntArray newArrayWithLen:10];
-	[intArray addInteger:1];
-	STAssertTrue([intArray count] == 1, @"Int array should be of size 1");
-	STAssertTrue([intArray integerAtIndex:0] == 1, @"First item in int array should be 1");
-	[intArray release];
-}
-
--(void) testPushPop
-{
-	ANTLRIntArray *intArray = [ANTLRIntArray newArrayWithLen:10];
-	for (NSInteger i = 0; i < 10; i++)
-	{
-		[intArray push:i + 1];
-	}
-	NSInteger popped = [intArray pop];
-	NSLog(@"Popped value: %d", popped);
-	STAssertTrue(popped == 10, @"Pop should pull the last element out, which should be 10 was: %d", popped);
-	[intArray release];
-}
-
--(void) testClearAndAdd
-{
-	ANTLRIntArray *intArray = [ANTLRIntArray newArrayWithLen:10];
-	[intArray addInteger:1];
-	STAssertTrue([intArray count] == 1, @"Int array should be of size 1");
-	STAssertTrue([intArray integerAtIndex:0] == 1, @"First item in int array should be 1");
-	[intArray reset];
-	STAssertTrue([intArray count] == 0, @"Array size should be 0");
-	[intArray release];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/IntArrayTest.m b/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/IntArrayTest.m
deleted file mode 100644
index 2b3f3f0..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/IntArrayTest.m
+++ /dev/null
@@ -1,47 +0,0 @@
-//
-//  IntArrayTest.m
-//  ANTLR
-//
-//  Created by Ian Michell on 13/05/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import "IntArrayTest.h"
-#import "IntArray.h"
-
-@implementation IntArrayTest
-
--(void) testAdd
-{
-	IntArray *intArray = [IntArray newIntArrayWithLen:10];
-	[intArray addInteger:1];
-	STAssertTrue([intArray count] == 1, @"Int array should be of size 1");
-	STAssertTrue([intArray integerAtIndex:0] == 1, @"First item in int array should be 1");
-	[intArray release];
-}
-
--(void) testPushPop
-{
-	IntArray *intArray = [IntArray newIntArrayWithLen:10];
-	for (NSInteger i = 0; i < 10; i++)
-	{
-		[intArray push:i + 1];
-	}
-	NSInteger popped = [intArray pop];
-	NSLog(@"Popped value: %d", popped);
-	STAssertTrue(popped == 10, @"Pop should pull the last element out, which should be 10 was: %d", popped);
-	[intArray release];
-}
-
--(void) testClearAndAdd
-{
-	IntArray *intArray = [IntArray newIntArrayWithLen:10];
-	[intArray addInteger:1];
-	STAssertTrue([intArray count] == 1, @"Int array should be of size 1");
-	STAssertTrue([intArray integerAtIndex:0] == 1, @"First item in int array should be 1");
-	[intArray reset];
-	STAssertTrue([intArray count] == 0, @"Array size should be 0");
-	[intArray release];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/recognizer/ANTLRRecognizerTest.m b/antlr-3.4/runtime/ObjC/Framework/test/runtime/recognizer/ANTLRRecognizerTest.m
deleted file mode 100755
index f857107..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/recognizer/ANTLRRecognizerTest.m
+++ /dev/null
@@ -1,14 +0,0 @@
-//
-//  ANTLRRecognizerTest.m
-//  ANTLR
-//
-//  Created by Ian Michell on 02/07/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import "ANTLRRecognizerTest.h"
-
-
-@implementation ANTLRRecognizerTest
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.h b/antlr-3.4/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.h
deleted file mode 100644
index debf650..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.h
+++ /dev/null
@@ -1,24 +0,0 @@
-//
-//  ANTLRBitSetTest.h
-//  ANTLR
-//
-//  Created by Ian Michell on 13/05/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import <SenTestingKit/SenTestingKit.h>
-
-@interface ANTLRBitSetTest : SenTestCase 
-{
-	
-}
-
--(void) testWithBitData;
--(void) testWithBitArray;
--(void) testAdd;
--(void) testRemove;
--(void) testCopyBitSet;
--(void) testOr;
--(void) testDescription;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.m b/antlr-3.4/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.m
deleted file mode 100644
index 70fd894..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.m
+++ /dev/null
@@ -1,108 +0,0 @@
-//
-//  ANTLRBitSetTest.m
-//  ANTLR
-//
-//  Created by Ian Michell on 13/05/2010.
-//  Copyright 2010 Ian Michell and Alan Condit. All rights reserved.
-//
-
-#import "ANTLRBitSetTest.h"
-#import "ANTLRBitSet.h"
-#import <CoreFoundation/CoreFoundation.h>
-#import <CoreFoundation/CFBitVector.h>
-
-@implementation ANTLRBitSetTest
-
--(void) testWithBitData
-{
-	static const unsigned long long bitData[] = {3LL, 1LL};
-	ANTLRBitSet *bitSet = [ANTLRBitSet newANTLRBitSetWithBits:bitData Count:2];
-    CFIndex actual = (CFIndex)[bitSet numBits];
-    CFIndex expected = 3;
-	
-    STAssertEquals(actual, expected, @"There should be three bits set in bitvector. But I have %d", actual);
-	[bitSet release];
-}
-
--(void) testWithBitArray
-{
-	AMutableArray *bits = [AMutableArray arrayWithCapacity:10];
-	[bits addObject:[NSNumber numberWithBool:YES]];
-	[bits addObject:[NSNumber numberWithBool:YES]];
-	[bits addObject:[NSNumber numberWithBool:NO]];
-	[bits addObject:[NSNumber numberWithBool:YES]];
-	[bits addObject:[NSNumber numberWithBool:NO]];
-	[bits addObject:[NSNumber numberWithBool:YES]];
-	STAssertTrue([[bits objectAtIndex:0] boolValue], @"Value at index 0 was not true");
-	STAssertTrue([[bits objectAtIndex:1] boolValue], @"Value at index 1 was not true");
-	STAssertFalse([[bits objectAtIndex:2] boolValue], @"Value at index 2 was not false");
-	STAssertTrue([[bits objectAtIndex:3] boolValue], @"Value at index 3 was not true");
-	STAssertFalse([[bits objectAtIndex:4] boolValue], @"Value at index 4 was not false");
-	STAssertTrue([[bits objectAtIndex:5] boolValue], @"Value at index 5 was not true");
-	ANTLRBitSet *bitSet = [ANTLRBitSet newANTLRBitSetWithArray:bits];
-	CFIndex actual = (CFIndex)[bitSet numBits];
-	CFIndex expected = 4;
-	STAssertEquals(actual, expected, @"There should be four bits set in bitvector. But I have %d", actual);
-	[bitSet release];
-}
-
--(void) testAdd
-{
-
-	ANTLRBitSet *bitSet = [ANTLRBitSet newANTLRBitSet];
-	[bitSet add:1];
-	[bitSet add:2];
-	[bitSet add:3];
-	CFIndex actual = (CFIndex)[bitSet numBits];
-	CFIndex expected = 3;
-	STAssertEquals(actual, expected, @"There should be three bits set in bitvector. But I have %d", actual);
-	[bitSet release];
-}
-
--(void) testRemove
-{
-	ANTLRBitSet *bitSet = [ANTLRBitSet newANTLRBitSet];
-	[bitSet add:1];
-	CFIndex actual = (CFIndex)[bitSet numBits];
-	CFIndex expected = 1;
-	STAssertTrue(actual == expected, @"Bitset was not of size 1");
-	STAssertTrue([bitSet member:1], @"Bit at index 1 is not a member...");
-	[bitSet remove:1];
-	actual = [bitSet numBits];
-	STAssertTrue(actual == 0, @"Bitset was not empty");
-	STAssertFalse([bitSet member:1], @"Bit at index 1 is a member...");
-	STAssertTrue([bitSet isNil], @"There was at least one bit on...");
-}
-
--(void) testCopyBitSet
-{
-	static const unsigned long long bitData[] = {3LL, 1LL};
-	ANTLRBitSet *bitSet = [ANTLRBitSet newANTLRBitSetWithBits:bitData Count:2];
-	ANTLRBitSet *copy = [bitSet mutableCopyWithZone:nil];
-	CFIndex actual = (CFIndex)[copy numBits];
-	STAssertEquals(actual, (CFIndex)[bitSet numBits], @"There should be three bits set in bitvector. But I have %d", [copy numBits]);
-	[bitSet release];
-}
-
--(void) testOr
-{
-	static const unsigned long long bitData[] = {3LL, 1LL};
-	ANTLRBitSet *bitSet = [ANTLRBitSet newANTLRBitSetWithBits:bitData Count:2];
-	
-	static const unsigned long long otherData[] = {5LL, 3LL, 1LL};
-	ANTLRBitSet *otherBitSet = [ANTLRBitSet newANTLRBitSetWithBits:otherData Count:3];
-	
-	ANTLRBitSet *c = [bitSet or:otherBitSet];
-	STAssertTrue([c size] == [otherBitSet size], @"c should be the same as otherBitSet");
-}
-
--(void) testDescription
-{
-	ANTLRBitSet *bitSet = [ANTLRBitSet newANTLRBitSet];
-	[bitSet add:1];
-	[bitSet add:2];
-	NSMutableString *aDescription = (NSMutableString *)[bitSet description];
-	STAssertTrue([aDescription isEqualToString:@"{1,2}"], @"Description was not right, expected '{1,2}' got: %@", aDescription);
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/stream/ANTLRStringStreamTest.m b/antlr-3.4/runtime/ObjC/Framework/test/runtime/stream/ANTLRStringStreamTest.m
deleted file mode 100644
index 7b6b66e..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/stream/ANTLRStringStreamTest.m
+++ /dev/null
@@ -1,108 +0,0 @@
-//
-//  ANTLRStringStreamTest.m
-//  ANTLR
-//
-//  Created by Ian Michell on 12/05/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import "ANTLRStringStreamTest.h"
-#import "ANTLRCharStream.h"
-#import "ANTLRStringStream.h"
-#import "ANTLRError.h"
-
-@implementation ANTLRStringStreamTest
-
--(void) testInitWithInput
-{
-	NSString *input = @"This is a string used for ANTLRStringStream input ;)";
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:input];
-	NSString *subString = [stream substring:0 To:10];
-	NSLog(@"The first ten chars are '%@'", subString);
-	STAssertTrue([@"This is a " isEqualToString:subString], @"The strings do not match");
-	[stream release];
-}
-
--(void) testConsumeAndReset
-{
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"This is a string used for input"];
-	[stream consume];
-	STAssertTrue(stream.index > 0, @"Index should be greater than 0 after consume");
-	[stream reset];
-	STAssertTrue(stream.index == 0, @"Index should be 0 after reset");
-	[stream release];
-}
-
--(void) testConsumeWithNewLine
-{
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"This is a string\nused for input"];
-	while (stream.index < [stream size] && stream.line == 1)
-	{
-		[stream consume];
-	}
-	STAssertTrue(stream.line == 2, @"Line number is incorrect, should be 2, was %d!", stream.line);
-	STAssertTrue(stream.charPositionInLine == 0, @"Char position in line should be 0, it was: %d!", stream.charPositionInLine);
-	[stream release];
-}
-
--(void) testLAEOF
-{
-    NSInteger i;
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"This is a string\nused for input"];
-	BOOL eofFound = NO;
-	for (i = 1; i <= [stream size]+1; i++) {
-		NSInteger r = [stream LA:i];
-		if (r == (NSInteger)ANTLRCharStreamEOF) {
-			eofFound = YES;
-            break;
-		}
-	}
-	STAssertTrue(eofFound, @"EOF Was not found in stream, Length =%d, index = %d, i = %d", [stream size], stream.index, i);
-	[stream release];
-}
-
--(void) testLTEOF
-{
-    NSInteger i;
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"This is a string\nused for input"];
-	BOOL eofFound = NO;
-	for ( i = 1; i <= [stream size]+1; i++) {
-		NSInteger r = [stream LT:i];
-		if (r == (NSInteger)ANTLRCharStreamEOF) {
-			eofFound = YES;
-            break;
-		}
-	}
-	STAssertTrue(eofFound, @"EOF Was not found in stream, Length =%d, index = %d, i = %d", [stream size], stream.index, i);
-	[stream release];
-}
-
--(void) testSeek
-{
-	ANTLRStringStream *stream =[ANTLRStringStream newANTLRStringStream:@"This is a string used for input"];
-	[stream seek:10];
-	STAssertTrue(stream.index == 10, @"Index should be 10");
-	// Get char 10 which is s (with 0 being T)
-	STAssertTrue([stream LA:1] > -1 && (char)[stream LA:1] == 's', @"Char returned should be s");
-	[stream release];
-}
-
--(void) testSeekMarkAndRewind
-{
-	ANTLRStringStream *stream =[ANTLRStringStream newANTLRStringStream:@"This is a string used for input"];
-	[stream mark];
-	[stream seek:10];
-	STAssertTrue(stream.index == 10, @"Index should be 10");
-	[stream rewind];
-	STAssertTrue(stream.index == 0, @"Index should be 0");
-	[stream seek:5];
-	STAssertTrue(stream.index == 5, @"Index should be 5");
-	[stream mark]; // make a new marker to test a branch.
-	[stream seek:10];
-	STAssertTrue(stream.index == 10, @"Index should be 10");
-	[stream rewind]; // should be marked to 5.
-	STAssertTrue(stream.index == 5, @"Index should be 5");
-	[stream release];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/token/ANTLRCommonTokenTest.h b/antlr-3.4/runtime/ObjC/Framework/test/runtime/token/ANTLRCommonTokenTest.h
deleted file mode 100644
index 77f028b..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/token/ANTLRCommonTokenTest.h
+++ /dev/null
@@ -1,25 +0,0 @@
-//
-//  ANTLRCommonTokenTest.h
-//  ANTLR
-//
-//  Created by Ian Michell on 25/05/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import <SenTestingKit/SenTestingKit.h>
-
-
-@interface ANTLRCommonTokenTest : SenTestCase 
-{
-
-}
-
--(void) test01InitAndRelease;
--(void) test02GetEOFToken;
--(void) test03InitWithTokenType;
--(void) test04InitWithTokenTypeAndText;
--(void) test05InitWithCharStream;
--(void) test06InitWithToken;
--(void) test07TokenDescription;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/token/ANTLRCommonTokenTest.m b/antlr-3.4/runtime/ObjC/Framework/test/runtime/token/ANTLRCommonTokenTest.m
deleted file mode 100644
index e945c5d..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/token/ANTLRCommonTokenTest.m
+++ /dev/null
@@ -1,98 +0,0 @@
-//
-//  ANTLRCommonTokenTest.m
-//  ANTLR
-//
-//  Created by Ian Michell on 25/05/2010.
-//  Copyright 2010 Ian Michell and Alan Condit. All rights reserved.
-//
-
-#import "ANTLRCommonTokenTest.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRStringStream.h"
-
-@implementation ANTLRCommonTokenTest
-
--(void) test01InitAndRelease
-{
-	ANTLRCommonToken *token = [[ANTLRCommonToken newToken] retain];
-	STAssertNotNil(token, @"Token was nil");
-	[token release];
-}
-
--(void) test02GetEOFToken
-{
-	ANTLRCommonToken *token = [[ANTLRCommonToken eofToken] retain];
-	STAssertNotNil(token, @"Token was nil");
-	STAssertEquals(token.type, (NSInteger)ANTLRTokenTypeEOF, @"Token was not of type ANTLRTokenTypeEOF");
-	[token release];
-}
-
--(void) test03InitWithTokenType
-{
-	ANTLRCommonToken *token = [[ANTLRCommonToken newToken:ANTLRTokenTypeUP] retain];
-	token.text = @"<UP>";
-	STAssertNotNil(token, @"Token was nil");
-	STAssertEquals(token.type, (NSInteger)ANTLRTokenTypeUP, @"Token was not of type ANTLRTokenTypeUP");
-	STAssertNotNil(token.text, @"Token text was nil, was expecting <UP>");
-	STAssertTrue([token.text isEqualToString:@"<UP>"], @"Token text was not <UP> was instead: %@", token.text);
-	[token release];
-}
-
--(void) test04InitWithTokenTypeAndText
-{
-	ANTLRCommonToken *token = [[ANTLRCommonToken newToken:ANTLRTokenTypeUP Text:@"<UP>"] retain];
-	STAssertNotNil(token, @"Token was nil");
-	STAssertEquals(token.type, (NSInteger)ANTLRTokenTypeUP, @"Token was not of type ANTLRTokenTypeUP");
-	STAssertNotNil(token.text, @"Token text was nil, was expecting <UP>");
-	STAssertTrue([token.text isEqualToString:@"<UP>"], @"Token text was not <UP> was instead: %@", token.text);
-	[token release];
-}
-
--(void) test05InitWithCharStream
-{
-	ANTLRStringStream *stream = [[ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"] retain];
-	ANTLRCommonToken *token = [[ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5] retain];
-	STAssertNotNil(token, @"Token was nil");
-	STAssertEquals(token.type, (NSInteger)555, @"Token was not of type 555"); // Nice random type number
-	STAssertNotNil(token.text, @"Token text was nil, was expecting ||");
-	STAssertTrue([token.text isEqualToString:@"||"], @"Token text was not || was instead: %@", token.text);
-	[token release];
-    [stream release];
-}
-
--(void) test06InitWithToken
-{
-	ANTLRStringStream *stream = [[ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"] retain];
-	ANTLRCommonToken *token = [[ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5] retain];
-	STAssertNotNil(token, @"Token was nil");
-	STAssertEquals(token.type, (NSInteger)555, @"Token was not of type 555"); // Nice random type number
-	STAssertNotNil(token.text, @"Token text was nil, was expecting ||");
-	STAssertTrue([token.text isEqualToString:@"||"], @"Token text was not || was instead: %@", token.text);
-	
-	ANTLRCommonToken *newToken = [[ANTLRCommonToken newTokenWithToken:token] retain];
-	STAssertNotNil(newToken, @"New token is nil!");
-	STAssertEquals(newToken.type, token.type, @"Tokens types do not match %d:%d!", newToken.type, token.type);
-	STAssertEquals(newToken.line, token.line, @"Token lines do not match!");
-	STAssertEquals(newToken.index, token.index, @"Token indexes do not match");
-	STAssertEquals(newToken.channel, token.channel, @"Token channels are not the same");
-	STAssertEquals(newToken.charPositionInLine, token.charPositionInLine, @"Token char positions in lines do not match");
-	STAssertEquals(newToken.startIndex, token.startIndex, @"Token start positions do not match");
-	STAssertEquals(newToken.stopIndex, token.stopIndex, @"Token stop positions do not match");
-	STAssertTrue([newToken.text isEqualToString:token.text], @"Token text does not match!");
-	[token release];
-	[newToken release];
-    [stream release];
-}
-
--(void) test07TokenDescription
-{
-    NSString *aDescription;
-	ANTLRStringStream *stream = [[ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"] retain];
-	ANTLRCommonToken *token = [[ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5] retain];
-    aDescription = [token description];
-	STAssertTrue([aDescription isEqualToString:@"[@0, 4:5='||',<555>,0:0]"], @"String description for token is not correct! got %@", aDescription);
-    [token release];
-    [stream release];
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonErrorNodeTest.m b/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonErrorNodeTest.m
deleted file mode 100755
index b5d1c8a..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonErrorNodeTest.m
+++ /dev/null
@@ -1,14 +0,0 @@
-//
-//  ANTLRCommonErrorNodeTest.m
-//  ANTLR
-//
-//  Created by Ian Michell on 10/06/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import "ANTLRCommonErrorNodeTest.h"
-
-
-@implementation ANTLRCommonErrorNodeTest
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeAdaptorTest.h b/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeAdaptorTest.h
deleted file mode 100755
index 7326675..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeAdaptorTest.h
+++ /dev/null
@@ -1,16 +0,0 @@
-//
-//  ANTLRCommonTreeAdaptorTest.h
-//  ANTLR
-//
-//  Created by Ian Michell on 10/06/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import <SenTestingKit/SenTestingKit.h>
-
-
-@interface ANTLRCommonTreeAdaptorTest : SenTestCase {
-
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeAdaptorTest.m b/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeAdaptorTest.m
deleted file mode 100755
index 9c9a7bb..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeAdaptorTest.m
+++ /dev/null
@@ -1,14 +0,0 @@
-//
-//  ANTLRCommonTreeAdaptorTest.m
-//  ANTLR
-//
-//  Created by Ian Michell on 10/06/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import "ANTLRCommonTreeAdaptorTest.h"
-
-
-@implementation ANTLRCommonTreeAdaptorTest
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeTest.h b/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeTest.h
deleted file mode 100644
index 36d23b3..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeTest.h
+++ /dev/null
@@ -1,42 +0,0 @@
-//
-//  ANTLRCommonTreeTest.h
-//  ANTLR
-//
-//  Created by Ian Michell on 26/05/2010.
-//  Copyright 2010 Ian Michell and Alan Condit. All rights reserved.
-//
-
-#import <SenTestingKit/SenTestingKit.h>
-
-
-@interface ANTLRCommonTreeTest : SenTestCase 
-{
-}
-
--(void) test01InitAndRelease;
--(void) test02InitWithTree;
--(void) test03WithToken;
--(void) test04InvalidTreeNode;
--(void) test05InitWithCommonTreeNode;
--(void) test06CopyTree;
--(void) test07Description;
--(void) test08Text;
--(void) test09AddChild;
--(void) test10AddChildren;
--(void) test11AddSelfAsChild;
--(void) test12AddEmptyChildWithNoChildren;
--(void) test13AddEmptyChildWithChildren;
--(void) test14ChildAtIndex;
--(void) test15SetChildAtIndex;
--(void) test16GetAncestor;
--(void) test17FirstChildWithType;
--(void) test18SanityCheckParentAndChildIndexesForParentTree;
--(void) test19DeleteChild;
--(void) test20TreeDescriptions;
--(void) test21ReplaceChildrenAtIndexWithNoChildren;
--(void) test22ReplaceChildrenAtIndex;
--(void) test23ReplaceChildrenAtIndexWithChild;
--(void) test24ReplacechildrenAtIndexWithLessChildren;
--(void) test25ReplacechildrenAtIndexWithMoreChildren;
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeTest.m b/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeTest.m
deleted file mode 100644
index b944721..0000000
--- a/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonTreeTest.m
+++ /dev/null
@@ -1,555 +0,0 @@
-//
-//  ANTLRCommonTreeTest.m
-//  ANTLR
-//
-//  Created by Ian Michell on 26/05/2010.
-//  Copyright 2010 Ian Michell. All rights reserved.
-//
-
-#import "ANTLRBaseTree.h"
-#import "ANTLRCommonTreeTest.h"
-#import "ANTLRStringStream.h"
-#import "ANTLRCommonTree.h"
-#import "ANTLRCommonToken.h"
-#import "ANTLRError.h"
-#import "ANTLRRuntimeException.h"
-
-@implementation ANTLRCommonTreeTest
-
--(void) test01InitAndRelease
-{
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTree];
-	STAssertNotNil(tree, @"Tree was nil");
-	// FIXME: It doesn't do anything else, perhaps initWithTree should set something somewhere, java says no though...
-    return;
-}
-
--(void) test02InitWithTree
-{
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTree];
-	STAssertNotNil(tree, @"Tree was nil");
-    if (tree != nil)
-        STAssertEquals([tree getType], (NSInteger)ANTLRTokenTypeInvalid, @"Tree should have an invalid token type, because it has no token");
-    // [tree release];
-    return;
-}
-
--(void) test03WithToken
-{
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	token.line = 1;
-	token.charPositionInLine = 4;
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	STAssertNotNil(tree, @"Tree was nil");
-    if (tree != nil)
-        STAssertNotNil(tree.token, @"Tree with token was nil");
-    if (tree != nil && tree.token != nil) {
-        STAssertEquals((NSUInteger) tree.token.line, (NSUInteger)1, [NSString stringWithFormat:@"Tree should be at line 1, but was at %d", tree.token.line] );
-        STAssertEquals((NSUInteger) tree.token.charPositionInLine, (NSUInteger)4, [NSString stringWithFormat:@"Char position should be 1, but was at %d", tree.token.charPositionInLine]);
-        STAssertNotNil(((ANTLRCommonToken *)tree.token).text, @"Tree with token with text was nil");
-    }
-    if (tree != nil && tree.token != nil && tree.token.text != nil)
-        STAssertTrue([tree.token.text isEqualToString:@"||"], @"Text was not ||");
-	//[tree release];
-    return;
-}
-
--(void) test04InvalidTreeNode
-{
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:[ANTLRCommonToken invalidToken]];
-	STAssertNotNil(tree, @"Tree was nil");
-	STAssertEquals(tree.token.type, (NSInteger)ANTLRTokenTypeInvalid, @"Tree Token type was not ANTLRTokenTypeInvalid");
-	//[tree release];
-    return;
-}
-
--(void) test05InitWithCommonTreeNode
-{
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	STAssertNotNil(tree, @"Tree was nil");
-	STAssertNotNil(tree.token, @"Tree token was nil");
-	ANTLRCommonTree *newTree = [ANTLRCommonTree newTreeWithTree:tree];
-	STAssertNotNil(newTree, @"New tree was nil");
-	STAssertNotNil(newTree.token, @"New tree token was nil");
-	STAssertEquals(newTree.token, tree.token, @"Tokens did not match");
-	STAssertEquals(newTree.startIndex, tree.startIndex, @"Token start index did not match %d:%d", newTree.startIndex, tree.startIndex);
-	STAssertEquals(newTree.stopIndex, tree.stopIndex, @"Token stop index did not match %d:%d", newTree.stopIndex, tree.stopIndex);
-	//[stream release];
-	//[tree release];
-	//[newTree release];
-	//[token release];
-    return;
-}
-
--(void) test06CopyTree
-{
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	STAssertNotNil(tree, @"Tree was nil");
-	ANTLRCommonTree *newTree = (ANTLRCommonTree *)[tree copyWithZone:nil];
-	STAssertTrue([newTree isKindOfClass:[ANTLRCommonTree class]], @"Copied tree was not an ANTLRCommonTree");
-	STAssertNotNil(newTree, @"New tree was nil");
-	// STAssertEquals(newTree.token, tree.token, @"Tokens did not match");
-	STAssertEquals(newTree.stopIndex, tree.stopIndex, @"Token stop index did not match");
-	STAssertEquals(newTree.startIndex, tree.startIndex, @"Token start index did not match");
-	//[stream release];
-	//[tree release];
-	//[newTree release];
-	// [token release];
-    return;
-}
-
--(void) test07Description
-{
-    NSString *aString;
-	ANTLRCommonTree *errorTree = [ANTLRCommonTree invalidNode];
-	STAssertNotNil(errorTree, @"Error tree node is nil");
-    if (errorTree != nil) {
-        aString = [errorTree description];
-        STAssertNotNil( aString, @"errorTree description returned nil");
-        if (aString != nil)
-            STAssertTrue([aString isEqualToString:@"<errornode>"], @"Not a valid error node description %@", aString);
-    }
-	//[errorTree release];
-	
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeUP];
-	STAssertNotNil(tree, @"Tree is nil");
-    if (tree != nil)
-        STAssertNil([tree description], @"Tree description was not nil, was: %@", [tree description]);
-	//[tree release];
-	
-	tree = [ANTLRCommonTree newTree];
-	STAssertNotNil(tree, @"Tree is nil");
-    if (tree != nil) {
-        aString = [tree description];
-        STAssertNotNil(aString, @"tree description returned nil");
-        if (aString != nil)
-            STAssertTrue([aString isEqualToString:@"nil"], @"Tree description was not empty", [tree description]);
-    }
-	//[tree release];
-	
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	tree = [ANTLRCommonTree newTreeWithToken:token];
-	STAssertNotNil(tree, @"Tree node is nil");
-    aString = [tree description];
-    STAssertNotNil(aString, @"tree description returned nil");
-    if (aString != nil)
-        STAssertTrue([aString isEqualToString:@"||"], @"description was not || was instead %@", [tree description]);
-	//[tree release];
-    return;
-}
-
--(void) test08Text
-{
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	STAssertNotNil(tree, @"Tree was nil");
-	STAssertTrue([tree.token.text isEqualToString:@"||"], @"Tree text was not valid, should have been || was %@", tree.token.text);
-	//[tree release];
-	
-	// test nil (for line coverage)
-	tree = [ANTLRCommonTree newTree];
-	STAssertNotNil(tree, @"Tree was nil");
-	STAssertNil(tree.token.text, @"Tree text was not nil: %@", tree.token.text);
-    return;
-}
-
--(void) test09AddChild
-{
-	// Create a new tree
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTreeWithTokenType:555];
-    parent.token.line = 1;
-	parent.token.charPositionInLine = 1;
-	
-	// Child tree
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	token.line = 1;
-	token.charPositionInLine = 4;
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	
-	// Add a child to the parent tree
-	[parent addChild:tree];
-
-
-	STAssertNotNil(parent, @"parent was nil");
-    if (parent != nil)
-        STAssertNotNil(parent.token, @"parent was nil");
-	STAssertEquals((NSInteger)parent.token.line, (NSInteger)1, @"Tree should be at line 1 but is %d", parent.token.line);
-	STAssertEquals((NSInteger)parent.token.charPositionInLine, (NSInteger)1, @"Char position should be 1 but is %d", parent.token.charPositionInLine);
-	
-	STAssertEquals((NSInteger)[parent getChildCount], (NSInteger)1, @"There should be 1 child but there were %d", [parent getChildCount]);
-	STAssertEquals((NSInteger)[[parent getChild:0] getChildIndex], (NSInteger)0, @"Child index should be 0 was : %d", [[parent getChild:0] getChildIndex]);
-	STAssertEquals([[parent getChild:0] getParent], parent, @"Parent not set for child");
-	
-	//[parent release];
-    return;
-}
-
--(void) test10AddChildren
-{
-	// Create a new tree
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTree];
-	
-	// Child tree
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	token.line = 1;
-	token.charPositionInLine = 4;
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	
-	// Add a child to the parent tree
-	[parent addChild: tree];
-	
-	ANTLRCommonTree *newParent = [ANTLRCommonTree newTree];
-	[newParent addChildren:parent.children];
-	
-	STAssertEquals([newParent getChild:0], [parent getChild:0], @"Children did not match");
-    return;
-}
-
--(void) test11AddSelfAsChild
-{
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTree];
-	@try 
-	{
-		[parent addChild:parent];
-	}
-	@catch (NSException *e) 
-	{
-		STAssertTrue([[e name] isEqualToString:@"ANTLRIllegalArgumentException"], @"Got wrong kind of exception! %@", [e name]);
-		//[parent release];
-		return;
-	}
-	STFail(@"Did not get an exception when adding an empty child!");
-    return;
-}
-
--(void) test12AddEmptyChildWithNoChildren
-{
-	ANTLRCommonTree *emptyChild = [ANTLRCommonTree newTree];
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTree];
-	[parent addChild:emptyChild];
-	STAssertEquals((NSInteger)[parent getChildCount], (NSInteger)0, @"There were supposed to be no children!");
-	//[parent release];
-	//[emptyChild release];
-    return;
-}
-
--(void) test13AddEmptyChildWithChildren
-{
-	// Create a new tree
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTree];
-	
-	// Child tree
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	token.line = 1;
-	token.charPositionInLine = 4;
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	
-	// Add a child to the parent tree
-	[parent addChild: tree];
-	
-	ANTLRCommonTree *newParent = [ANTLRCommonTree newTree];
-	[newParent addChild:parent];
-	
-	STAssertEquals((NSInteger)[newParent getChildCount], (NSInteger)1, @"Parent should only have 1 child: %d", [newParent getChildCount]);
-	STAssertEquals([newParent getChild:0], tree, @"Child was not the correct object.");
-	//[parent release];
-	//[newParent release];
-	//[tree release];
-    return;
-}
-
--(void) test14ChildAtIndex
-{
-	// Create a new tree
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTree];
-	
-	// Child tree
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	
-	// Add a child to the parent tree
-	[parent addChild: tree];
-	
-	STAssertEquals((NSInteger)[parent getChildCount], (NSInteger)1, @"There were either no children or more than 1: %d", [parent getChildCount]);
-	
-	ANTLRCommonTree *child = [parent getChild:0];
-	STAssertNotNil(child, @"Child at index 0 should not be nil");
-	STAssertEquals(child, tree, @"Child and Original tree were not the same");
-	//[parent release];
-    return;
-}
-
--(void) test15SetChildAtIndex
-{
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTree];
-	
-	// Child tree
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	
-	
-	tree = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeUP];
-	tree.token.text = @"<UP>";
-	[parent addChild:tree];
-	
-	STAssertTrue([parent getChild:0] == tree, @"Trees don't match");
-	[parent setChild:0 With:tree];
-	
-	ANTLRCommonTree *child = [parent getChild:0];
-	STAssertTrue([parent getChildCount] == 1, @"There were either no children or more than 1: %d", [parent getChildCount]);
-	STAssertNotNil(child, @"Child at index 0 should not be nil");
-	STAssertEquals(child, tree, @"Child and Original tree were not the same");
-	//[parent release];
-    return;
-}
-
--(void) test16GetAncestor
-{
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeUP];
-	parent.token.text = @"<UP>";
-	
-	ANTLRCommonTree *down = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeDOWN];
-	down.token.text = @"<DOWN>";
-	
-	[parent addChild:down];
-	
-	// Child tree
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	
-	[down addChild:tree];
-	STAssertTrue([tree hasAncestor:ANTLRTokenTypeUP], @"Should have an ancestor of type ANTLRTokenTypeUP");
-	
-	ANTLRCommonTree *ancestor = [tree getAncestor:ANTLRTokenTypeUP];
-	STAssertNotNil(ancestor, @"Ancestor should not be nil");
-	STAssertEquals(ancestor, parent, @"Acenstors do not match");
-	//[parent release];
-    return;
-}
-
--(void) test17FirstChildWithType
-{
-	// Create a new tree
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTree];
-	
-	ANTLRCommonTree *up = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeUP];
-	ANTLRCommonTree *down = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeDOWN];
-	
-	[parent addChild:up];
-	[parent addChild:down];
-	
-	ANTLRCommonTree *found = (ANTLRCommonTree *)[parent getFirstChildWithType:ANTLRTokenTypeDOWN];
-	STAssertNotNil(found, @"Child with type DOWN should not be nil");
-    if (found != nil) {
-        STAssertNotNil(found.token, @"Child token with type DOWN should not be nil");
-        if (found.token != nil)
-            STAssertEquals((NSInteger)found.token.type, (NSInteger)ANTLRTokenTypeDOWN, @"Token type was not correct, should be down!");
-    }
-	found = (ANTLRCommonTree *)[parent getFirstChildWithType:ANTLRTokenTypeUP];
-	STAssertNotNil(found, @"Child with type UP should not be nil");
-    if (found != nil) {
-        STAssertNotNil(found.token, @"Child token with type UP should not be nil");
-        if (found.token != nil)
-            STAssertEquals((NSInteger)found.token.type, (NSInteger)ANTLRTokenTypeUP, @"Token type was not correct, should be up!");
-    }
-	//[parent release];
-    return;
-}
-
--(void) test18SanityCheckParentAndChildIndexesForParentTree
-{
-	// Child tree
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTreeWithTokenType:555];
-	STAssertNotNil(tree, @"tree should not be nil");
-	@try 
-	{
-		[tree sanityCheckParentAndChildIndexes];
-	}
-	@catch (NSException * e) 
-	{
-		STFail(@"Exception was thrown and this is not what's right...");
-	}
-	
-	BOOL passed = NO;
-	@try 
-	{
-		[tree sanityCheckParentAndChildIndexes:parent At:0];
-	}
-	@catch (NSException * e) 
-	{
-		STAssertTrue([[e name] isEqualToString:@"ANTLRIllegalStateException"], @"Exception was not an ANTLRIllegalStateException but was %@", [e name]);
-		passed = YES;
-	}
-	if (!passed)
-	{
-		STFail(@"An exception should have been thrown");
-	}
-	
-	STAssertNotNil(parent, @"parent should not be nil");
-	[parent addChild:tree];
-	@try 
-	{
-		[tree sanityCheckParentAndChildIndexes:parent At:0];
-	}
-	@catch (NSException * e) 
-	{
-		STFail(@"No exception should have been thrown!");
-	}
-    return;
-}
-
--(void) test19DeleteChild
-{
-	// Child tree
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTree];
-	[parent addChild:tree];
-	
-	ANTLRCommonTree *deletedChild = [parent deleteChild:0];
-	STAssertEquals(deletedChild, tree, @"Children do not match!");
-	STAssertEquals((NSInteger)[parent getChildCount], (NSInteger)0, @"Child count should be zero!");
-    return;
-}
-
--(void) test20TreeDescriptions
-{
-	// Child tree
-	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
-	ANTLRCommonToken *token = [ANTLRCommonToken newToken:stream Type:555 Channel:ANTLRTokenChannelDefault Start:4 Stop:5];
-	ANTLRCommonTree *tree = [ANTLRCommonTree newTreeWithToken:token];
-	
-	// Description for tree
-	NSString *treeDesc = [tree treeDescription];
-    STAssertNotNil(treeDesc, @"Tree description should not be nil");
-    STAssertTrue([treeDesc isEqualToString:@"||"], @"Tree description was not || but rather %@", treeDesc);
-	
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTree];
-	STAssertTrue([[parent treeDescription] isEqualToString:@"nil"], @"Tree description was not nil was %@", [parent treeDescription]);
-	[parent addChild:tree];
-	treeDesc = [parent treeDescription];
-	STAssertTrue([treeDesc isEqualToString:@"||"], @"Tree description was not || but was: %@", treeDesc);
-	
-	// Test non empty parent
-	ANTLRCommonTree *down = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeDOWN];
-	down.token.text = @"<DOWN>";
-	
-	[tree addChild:down];
-	treeDesc = [parent treeDescription];
-	STAssertTrue([treeDesc isEqualToString:@"(|| <DOWN>)"], @"Tree description was wrong expected (|| <DOWN>) but got: %@", treeDesc);
-    return;
-}
-
--(void) test21ReplaceChildrenAtIndexWithNoChildren
-{
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTree];
-	ANTLRCommonTree *parent2 = [ANTLRCommonTree newTree];
-	ANTLRCommonTree *child = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeDOWN];
-	child.token.text = @"<DOWN>";
-	[parent2 addChild:child];
-	@try 
-	{
-		[parent replaceChildrenFrom:1 To:2 With:parent2];
-	}
-	@catch (NSException *ex)
-	{
-		STAssertTrue([[ex name] isEqualToString:@"ANTLRIllegalArgumentException"], @"Expected an illegal argument exception... Got instead: %@", [ex name]);
-		return;
-	}
-	STFail(@"Exception was not thrown when I tried to replace a child on a parent with no children");
-    return;
-}
-
--(void) test22ReplaceChildrenAtIndex
-{
-	ANTLRCommonTree *parent1 = [ANTLRCommonTree newTree];
-	ANTLRCommonTree *child1 = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeUP];
-	[parent1 addChild:child1];
-	ANTLRCommonTree *parent2 = [ANTLRCommonTree newTree];
-	ANTLRCommonTree *child2 = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeDOWN];
-	child2.token.text = @"<DOWN>";
-	[parent2 addChild:child2];
-	
-	[parent2 replaceChildrenFrom:0 To:0 With:parent1];
-	
-	STAssertEquals([parent2 getChild:0], child1, @"Child for parent 2 should have been from parent 1");
-    return;
-}
-
--(void) test23ReplaceChildrenAtIndexWithChild
-{
-	ANTLRCommonTree *replacement = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeUP];
-	replacement.token.text = @"<UP>";
-	ANTLRCommonTree *parent = [ANTLRCommonTree newTree];
-	ANTLRCommonTree *child = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeDOWN];
-	child.token.text = @"<DOWN>";
-	[parent addChild:child];
-	
-	[parent replaceChildrenFrom:0 To:0 With:replacement];
-	
-	STAssertTrue([parent getChild:0] == replacement, @"Children do not match");
-    return;
-}
-
--(void) test24ReplacechildrenAtIndexWithLessChildren
-{
-	ANTLRCommonTree *parent1 = [ANTLRCommonTree newTree];
-	ANTLRCommonTree *child1 = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeUP];
-	[parent1 addChild:child1];
-	
-	ANTLRCommonTree *parent2 = [ANTLRCommonTree newTree];
-	
-	ANTLRCommonTree *child2 = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeEOF];
-	[parent2 addChild:child2];
-	
-	ANTLRCommonTree *child3 = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeDOWN];
-	child2.token.text = @"<DOWN>";
-	[parent2 addChild:child3];
-	
-	[parent2 replaceChildrenFrom:0 To:1 With:parent1];
-	STAssertEquals((NSInteger)[parent2 getChildCount], (NSInteger)1, @"Should have one child but has %d", [parent2 getChildCount]);
-	STAssertEquals([parent2 getChild:0], child1, @"Child for parent 2 should have been from parent 1");
-    return;
-}
-
--(void) test25ReplacechildrenAtIndexWithMoreChildren
-{
-	ANTLRCommonTree *parent1 = [ANTLRCommonTree newTree];
-	ANTLRCommonTree *child1 = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeUP];
-	[parent1 addChild:child1];
-	ANTLRCommonTree *child2 = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeEOF];
-	[parent1 addChild:child2];
-	
-	ANTLRCommonTree *parent2 = [ANTLRCommonTree newTree];
-	
-	ANTLRCommonTree *child3 = [ANTLRCommonTree newTreeWithTokenType:ANTLRTokenTypeDOWN];
-	child2.token.text = @"<DOWN>";
-	[parent2 addChild:child3];
-	
-	[parent2 replaceChildrenFrom:0 To:0 With:parent1];
-	STAssertEquals((NSInteger)[parent2 getChildCount], (NSInteger)2, @"Should have one child but has %d", [parent2 getChildCount]);
-	STAssertEquals([parent2 getChild:0], child1, @"Child for parent 2 should have been from parent 1");
-	STAssertEquals([parent2 getChild:1], child2, @"An extra child (child2) should be in the children collection");
-    return;
-}
-
-@end
diff --git a/antlr-3.4/runtime/ObjC/README b/antlr-3.4/runtime/ObjC/README
deleted file mode 100644
index 702bf9c..0000000
--- a/antlr-3.4/runtime/ObjC/README
+++ /dev/null
@@ -1,26 +0,0 @@
-ANTLR version 3 supports target language generation for the lexical
-analyzer and parsers. Objective C was supported previously but had not
-been brought up to date for some time. This release is built on the work
-by Kay Roepke, Ian Michell and Alan Condit.
-
-The project is currently working sufficiently for me to use it in compiling
-my grammar and tree walker. I am sure that it still has some bugs but I have
-fixed all of the bugs that I have found so far.
-
-The project consists of an Objective-C runtime framework that must be
-installed in /Library/Frameworks.
-
-It also requires the installation of the String Template files to
-support the target language code generation. Hopefully, at some point
-they will be incorporated into the ANTLR release code, so that the
-individual user doesn't have to do anything but load the framework into
-the proper location. However, for now you need to create an ObjC
-directory in antlr-3.2/tool/src/main/resources/org/antlr/codegen/templates
-and then copy the ObjC ".stg" files to 
-antlr-3.2/tool/src/main/resources/org/antlr/codegen/templates/ObjC/*.
-
-There is also a java file ObjCTarget.java that goes in <
-antlr-3.2/tool/src/main/java/org/antlr/codegen/ObjCTarget/Java>.
-
-If you are using Antlr3.3 the code from here is included with the Antlr tarball. You just need
-to copy the ANTLR.framework to /Library/Frameworks.
\ No newline at end of file
diff --git a/antlr-3.4/runtime/Python/antlr3/dottreegen.py b/antlr-3.4/runtime/Python/antlr3/dottreegen.py
deleted file mode 100644
index 827d4ec..0000000
--- a/antlr-3.4/runtime/Python/antlr3/dottreegen.py
+++ /dev/null
@@ -1,210 +0,0 @@
-""" @package antlr3.dottreegenerator
-@brief ANTLR3 runtime package, tree module
-
-This module contains all support classes for AST construction and tree parsers.
-
-"""
-
-# begin[licence]
-#
-# [The "BSD licence"]
-# Copyright (c) 2005-2008 Terence Parr
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-#    notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-#    notice, this list of conditions and the following disclaimer in the
-#    documentation and/or other materials provided with the distribution.
-# 3. The name of the author may not be used to endorse or promote products
-#    derived from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# end[licence]
-
-# lot's of docstrings are missing, don't complain for now...
-# pylint: disable-msg=C0111
-
-from antlr3.tree import CommonTreeAdaptor
-import stringtemplate3
-
-class DOTTreeGenerator(object):
-    """
-    A utility class to generate DOT diagrams (graphviz) from
-    arbitrary trees.  You can pass in your own templates and
-    can pass in any kind of tree or use Tree interface method.
-    """
-
-    _treeST = stringtemplate3.StringTemplate(
-        template=(
-        "digraph {\n" +
-        "  ordering=out;\n" +
-        "  ranksep=.4;\n" +
-        "  node [shape=plaintext, fixedsize=true, fontsize=11, fontname=\"Courier\",\n" +
-        "        width=.25, height=.25];\n" +
-        "  edge [arrowsize=.5]\n" +
-        "  $nodes$\n" +
-        "  $edges$\n" +
-        "}\n")
-        )
-
-    _nodeST = stringtemplate3.StringTemplate(
-        template="$name$ [label=\"$text$\"];\n"
-        )
-
-    _edgeST = stringtemplate3.StringTemplate(
-        template="$parent$ -> $child$ // \"$parentText$\" -> \"$childText$\"\n"
-        )
-
-    def __init__(self):
-        ## Track node to number mapping so we can get proper node name back
-        self.nodeToNumberMap = {}
-
-        ## Track node number so we can get unique node names
-        self.nodeNumber = 0
-
-
-    def toDOT(self, tree, adaptor=None, treeST=_treeST, edgeST=_edgeST):
-        if adaptor is None:
-            adaptor = CommonTreeAdaptor()
-
-        treeST = treeST.getInstanceOf()
-
-        self.nodeNumber = 0
-        self.toDOTDefineNodes(tree, adaptor, treeST)
-
-        self.nodeNumber = 0
-        self.toDOTDefineEdges(tree, adaptor, treeST, edgeST)
-        return treeST
-
-
-    def toDOTDefineNodes(self, tree, adaptor, treeST, knownNodes=None):
-        if knownNodes is None:
-            knownNodes = set()
-
-        if tree is None:
-            return
-
-        n = adaptor.getChildCount(tree)
-        if n == 0:
-            # must have already dumped as child from previous
-            # invocation; do nothing
-            return
-
-        # define parent node
-        number = self.getNodeNumber(tree)
-        if number not in knownNodes:
-            parentNodeST = self.getNodeST(adaptor, tree)
-            treeST.setAttribute("nodes", parentNodeST)
-            knownNodes.add(number)
-
-        # for each child, do a "<unique-name> [label=text]" node def
-        for i in range(n):
-            child = adaptor.getChild(tree, i)
-            
-            number = self.getNodeNumber(child)
-            if number not in knownNodes:
-                nodeST = self.getNodeST(adaptor, child)
-                treeST.setAttribute("nodes", nodeST)
-                knownNodes.add(number)
-
-            self.toDOTDefineNodes(child, adaptor, treeST, knownNodes)
-
-
-    def toDOTDefineEdges(self, tree, adaptor, treeST, edgeST):
-        if tree is None:
-            return
-
-        n = adaptor.getChildCount(tree)
-        if n == 0:
-            # must have already dumped as child from previous
-            # invocation; do nothing
-            return
-
-        parentName = "n%d" % self.getNodeNumber(tree)
-
-        # for each child, do a parent -> child edge using unique node names
-        parentText = adaptor.getText(tree)
-        for i in range(n):
-            child = adaptor.getChild(tree, i)
-            childText = adaptor.getText(child)
-            childName = "n%d" % self.getNodeNumber(child)
-            edgeST = edgeST.getInstanceOf()
-            edgeST.setAttribute("parent", parentName)
-            edgeST.setAttribute("child", childName)
-            edgeST.setAttribute("parentText", parentText)
-            edgeST.setAttribute("childText", childText)
-            treeST.setAttribute("edges", edgeST)
-            self.toDOTDefineEdges(child, adaptor, treeST, edgeST)
-
-
-    def getNodeST(self, adaptor, t):
-        text = adaptor.getText(t)
-        nodeST = self._nodeST.getInstanceOf()
-        uniqueName = "n%d" % self.getNodeNumber(t)
-        nodeST.setAttribute("name", uniqueName)
-        if text is not None:
-            text = text.replace('"', r'\\"')
-        nodeST.setAttribute("text", text)
-        return nodeST
-
-
-    def getNodeNumber(self, t):
-        try:
-            return self.nodeToNumberMap[t]
-        except KeyError:
-            self.nodeToNumberMap[t] = self.nodeNumber
-            self.nodeNumber += 1
-            return self.nodeNumber - 1
-
-
-def toDOT(tree, adaptor=None, treeST=DOTTreeGenerator._treeST, edgeST=DOTTreeGenerator._edgeST):
-    """
-    Generate DOT (graphviz) for a whole tree not just a node.
-    For example, 3+4*5 should generate:
-
-    digraph {
-        node [shape=plaintext, fixedsize=true, fontsize=11, fontname="Courier",
-            width=.4, height=.2];
-        edge [arrowsize=.7]
-        "+"->3
-        "+"->"*"
-        "*"->4
-        "*"->5
-    }
-
-    Return the ST not a string in case people want to alter.
-
-    Takes a Tree interface object.
-
-    Example of invokation:
-
-        import antlr3
-        import antlr3.extras
-
-        input = antlr3.ANTLRInputStream(sys.stdin)
-        lex = TLexer(input)
-        tokens = antlr3.CommonTokenStream(lex)
-        parser = TParser(tokens)
-        tree = parser.e().tree
-        print tree.toStringTree()
-        st = antlr3.extras.toDOT(t)
-        print st
-        
-    """
-
-    gen = DOTTreeGenerator()
-    return gen.toDOT(tree, adaptor, treeST, edgeST)
diff --git a/antlr-3.4/runtime/Python/antlr3/streams.py b/antlr-3.4/runtime/Python/antlr3/streams.py
deleted file mode 100644
index c9ba7ca..0000000
--- a/antlr-3.4/runtime/Python/antlr3/streams.py
+++ /dev/null
@@ -1,1522 +0,0 @@
-"""ANTLR3 runtime package"""
-
-# begin[licence]
-#
-# [The "BSD licence"]
-# Copyright (c) 2005-2008 Terence Parr
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-#    notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-#    notice, this list of conditions and the following disclaimer in the
-#    documentation and/or other materials provided with the distribution.
-# 3. The name of the author may not be used to endorse or promote products
-#    derived from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# end[licence]
-
-import codecs
-from StringIO import StringIO
-
-from antlr3.constants import DEFAULT_CHANNEL, EOF
-from antlr3.tokens import Token, CommonToken
-
-
-############################################################################
-#
-# basic interfaces
-#   IntStream
-#    +- CharStream
-#    \- TokenStream
-#
-# subclasses must implemented all methods
-#
-############################################################################
-
-class IntStream(object):
-    """
-    @brief Base interface for streams of integer values.
-
-    A simple stream of integers used when all I care about is the char
-    or token type sequence (such as interpretation).
-    """
-
-    def consume(self):
-        raise NotImplementedError
-
-
-    def LA(self, i):
-        """Get int at current input pointer + i ahead where i=1 is next int.
-
-        Negative indexes are allowed.  LA(-1) is previous token (token
-	just matched).  LA(-i) where i is before first token should
-	yield -1, invalid char / EOF.
-	"""
-
-        raise NotImplementedError
-
-
-    def mark(self):
-        """
-        Tell the stream to start buffering if it hasn't already.  Return
-        current input position, index(), or some other marker so that
-        when passed to rewind() you get back to the same spot.
-        rewind(mark()) should not affect the input cursor.  The Lexer
-        track line/col info as well as input index so its markers are
-        not pure input indexes.  Same for tree node streams.
-        """
-
-        raise NotImplementedError
-
-
-    def index(self):
-        """
-        Return the current input symbol index 0..n where n indicates the
-        last symbol has been read.  The index is the symbol about to be
-        read not the most recently read symbol.
-        """
-
-        raise NotImplementedError
-
-
-    def rewind(self, marker=None):
-        """
-        Reset the stream so that next call to index would return marker.
-        The marker will usually be index() but it doesn't have to be.  It's
-        just a marker to indicate what state the stream was in.  This is
-        essentially calling release() and seek().  If there are markers
-        created after this marker argument, this routine must unroll them
-        like a stack.  Assume the state the stream was in when this marker
-        was created.
-
-        If marker is None:
-        Rewind to the input position of the last marker.
-        Used currently only after a cyclic DFA and just
-        before starting a sem/syn predicate to get the
-        input position back to the start of the decision.
-        Do not "pop" the marker off the state.  mark(i)
-        and rewind(i) should balance still. It is
-        like invoking rewind(last marker) but it should not "pop"
-        the marker off.  It's like seek(last marker's input position).
-	"""
-
-        raise NotImplementedError
-
-
-    def release(self, marker=None):
-        """
-        You may want to commit to a backtrack but don't want to force the
-        stream to keep bookkeeping objects around for a marker that is
-        no longer necessary.  This will have the same behavior as
-        rewind() except it releases resources without the backward seek.
-        This must throw away resources for all markers back to the marker
-        argument.  So if you're nested 5 levels of mark(), and then release(2)
-        you have to release resources for depths 2..5.
-	"""
-
-        raise NotImplementedError
-
-
-    def seek(self, index):
-        """
-        Set the input cursor to the position indicated by index.  This is
-        normally used to seek ahead in the input stream.  No buffering is
-        required to do this unless you know your stream will use seek to
-        move backwards such as when backtracking.
-
-        This is different from rewind in its multi-directional
-        requirement and in that its argument is strictly an input cursor
-        (index).
-
-        For char streams, seeking forward must update the stream state such
-        as line number.  For seeking backwards, you will be presumably
-        backtracking using the mark/rewind mechanism that restores state and
-        so this method does not need to update state when seeking backwards.
-
-        Currently, this method is only used for efficient backtracking using
-        memoization, but in the future it may be used for incremental parsing.
-
-        The index is 0..n-1.  A seek to position i means that LA(1) will
-        return the ith symbol.  So, seeking to 0 means LA(1) will return the
-        first element in the stream.
-        """
-
-        raise NotImplementedError
-
-
-    def size(self):
-        """
-        Only makes sense for streams that buffer everything up probably, but
-        might be useful to display the entire stream or for testing.  This
-        value includes a single EOF.
-	"""
-
-        raise NotImplementedError
-
-
-    def getSourceName(self):
-        """
-        Where are you getting symbols from?  Normally, implementations will
-        pass the buck all the way to the lexer who can ask its input stream
-        for the file name or whatever.
-        """
-
-        raise NotImplementedError
-
-
-class CharStream(IntStream):
-    """
-    @brief A source of characters for an ANTLR lexer.
-
-    This is an abstract class that must be implemented by a subclass.
-
-    """
-
-    # pylint does not realize that this is an interface, too
-    #pylint: disable-msg=W0223
-
-    EOF = -1
-
-
-    def substring(self, start, stop):
-        """
-        For infinite streams, you don't need this; primarily I'm providing
-        a useful interface for action code.  Just make sure actions don't
-        use this on streams that don't support it.
-        """
-
-        raise NotImplementedError
-
-
-    def LT(self, i):
-        """
-        Get the ith character of lookahead.  This is the same usually as
-        LA(i).  This will be used for labels in the generated
-        lexer code.  I'd prefer to return a char here type-wise, but it's
-        probably better to be 32-bit clean and be consistent with LA.
-        """
-
-        raise NotImplementedError
-
-
-    def getLine(self):
-        """ANTLR tracks the line information automatically"""
-
-        raise NotImplementedError
-
-
-    def setLine(self, line):
-        """
-        Because this stream can rewind, we need to be able to reset the line
-        """
-
-        raise NotImplementedError
-
-
-    def getCharPositionInLine(self):
-        """
-        The index of the character relative to the beginning of the line 0..n-1
-        """
-
-        raise NotImplementedError
-
-
-    def setCharPositionInLine(self, pos):
-        raise NotImplementedError
-
-
-class TokenStream(IntStream):
-    """
-
-    @brief A stream of tokens accessing tokens from a TokenSource
-
-    This is an abstract class that must be implemented by a subclass.
-
-    """
-
-    # pylint does not realize that this is an interface, too
-    #pylint: disable-msg=W0223
-
-    def LT(self, k):
-        """
-        Get Token at current input pointer + i ahead where i=1 is next Token.
-        i<0 indicates tokens in the past.  So -1 is previous token and -2 is
-        two tokens ago. LT(0) is undefined.  For i>=n, return Token.EOFToken.
-        Return null for LT(0) and any index that results in an absolute address
-        that is negative.
-	"""
-
-        raise NotImplementedError
-
-
-    def range(self):
-        """
-        How far ahead has the stream been asked to look?  The return
-        value is a valid index from 0..n-1.
-        """
-
-        raise NotImplementedError
-
-
-    def get(self, i):
-        """
-        Get a token at an absolute index i; 0..n-1.  This is really only
-        needed for profiling and debugging and token stream rewriting.
-        If you don't want to buffer up tokens, then this method makes no
-        sense for you.  Naturally you can't use the rewrite stream feature.
-        I believe DebugTokenStream can easily be altered to not use
-        this method, removing the dependency.
-        """
-
-        raise NotImplementedError
-
-
-    def getTokenSource(self):
-        """
-        Where is this stream pulling tokens from?  This is not the name, but
-        the object that provides Token objects.
-	"""
-
-        raise NotImplementedError
-
-
-    def toString(self, start=None, stop=None):
-        """
-        Return the text of all tokens from start to stop, inclusive.
-        If the stream does not buffer all the tokens then it can just
-        return "" or null;  Users should not access $ruleLabel.text in
-        an action of course in that case.
-
-        Because the user is not required to use a token with an index stored
-        in it, we must provide a means for two token objects themselves to
-        indicate the start/end location.  Most often this will just delegate
-        to the other toString(int,int).  This is also parallel with
-        the TreeNodeStream.toString(Object,Object).
-	"""
-
-        raise NotImplementedError
-
-
-############################################################################
-#
-# character streams for use in lexers
-#   CharStream
-#   \- ANTLRStringStream
-#
-############################################################################
-
-
-class ANTLRStringStream(CharStream):
-    """
-    @brief CharStream that pull data from a unicode string.
-
-    A pretty quick CharStream that pulls all data from an array
-    directly.  Every method call counts in the lexer.
-
-    """
-
-
-    def __init__(self, data):
-        """
-        @param data This should be a unicode string holding the data you want
-           to parse. If you pass in a byte string, the Lexer will choke on
-           non-ascii data.
-
-        """
-
-        CharStream.__init__(self)
-
-  	# The data being scanned
-        self.strdata = unicode(data)
-        self.data = [ord(c) for c in self.strdata]
-
-	# How many characters are actually in the buffer
-        self.n = len(data)
-
- 	# 0..n-1 index into string of next char
-        self.p = 0
-
-	# line number 1..n within the input
-        self.line = 1
-
- 	# The index of the character relative to the beginning of the
-        # line 0..n-1
-        self.charPositionInLine = 0
-
-	# A list of CharStreamState objects that tracks the stream state
-        # values line, charPositionInLine, and p that can change as you
-        # move through the input stream.  Indexed from 0..markDepth-1.
-        self._markers = [ ]
-        self.lastMarker = None
-        self.markDepth = 0
-
-        # What is name or source of this char stream?
-        self.name = None
-
-
-    def reset(self):
-        """
-        Reset the stream so that it's in the same state it was
-        when the object was created *except* the data array is not
-        touched.
-        """
-
-        self.p = 0
-        self.line = 1
-        self.charPositionInLine = 0
-        self._markers = [ ]
-
-
-    def consume(self):
-        try:
-            if self.data[self.p] == 10: # \n
-                self.line += 1
-                self.charPositionInLine = 0
-            else:
-                self.charPositionInLine += 1
-
-            self.p += 1
-
-        except IndexError:
-            # happend when we reached EOF and self.data[self.p] fails
-            # just do nothing
-            pass
-
-
-
-    def LA(self, i):
-        if i == 0:
-            return 0 # undefined
-
-        if i < 0:
-            i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
-
-        try:
-            return self.data[self.p+i-1]
-        except IndexError:
-            return EOF
-
-
-
-    def LT(self, i):
-        if i == 0:
-            return 0 # undefined
-
-        if i < 0:
-            i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
-
-        try:
-            return self.strdata[self.p+i-1]
-        except IndexError:
-            return EOF
-
-
-    def index(self):
-        """
-        Return the current input symbol index 0..n where n indicates the
-        last symbol has been read.  The index is the index of char to
-        be returned from LA(1).
-        """
-
-        return self.p
-
-
-    def size(self):
-        return self.n
-
-
-    def mark(self):
-        state = (self.p, self.line, self.charPositionInLine)
-        try:
-            self._markers[self.markDepth] = state
-        except IndexError:
-            self._markers.append(state)
-        self.markDepth += 1
-
-        self.lastMarker = self.markDepth
-
-        return self.lastMarker
-
-
-    def rewind(self, marker=None):
-        if marker is None:
-            marker = self.lastMarker
-
-        p, line, charPositionInLine = self._markers[marker-1]
-
-        self.seek(p)
-        self.line = line
-        self.charPositionInLine = charPositionInLine
-        self.release(marker)
-
-
-    def release(self, marker=None):
-        if marker is None:
-            marker = self.lastMarker
-
-        self.markDepth = marker-1
-
-
-    def seek(self, index):
-        """
-        consume() ahead until p==index; can't just set p=index as we must
-        update line and charPositionInLine.
-        """
-
-        if index <= self.p:
-            self.p = index # just jump; don't update stream state (line, ...)
-            return
-
-        # seek forward, consume until p hits index
-        while self.p < index:
-            self.consume()
-
-
-    def substring(self, start, stop):
-        return self.strdata[start:stop+1]
-
-
-    def getLine(self):
-        """Using setter/getter methods is deprecated. Use o.line instead."""
-        return self.line
-
-
-    def getCharPositionInLine(self):
-        """
-        Using setter/getter methods is deprecated. Use o.charPositionInLine
-        instead.
-        """
-        return self.charPositionInLine
-
-
-    def setLine(self, line):
-        """Using setter/getter methods is deprecated. Use o.line instead."""
-        self.line = line
-
-
-    def setCharPositionInLine(self, pos):
-        """
-        Using setter/getter methods is deprecated. Use o.charPositionInLine
-        instead.
-        """
-        self.charPositionInLine = pos
-
-
-    def getSourceName(self):
-        return self.name
-
-
-class ANTLRFileStream(ANTLRStringStream):
-    """
-    @brief CharStream that opens a file to read the data.
-
-    This is a char buffer stream that is loaded from a file
-    all at once when you construct the object.
-    """
-
-    def __init__(self, fileName, encoding=None):
-        """
-        @param fileName The path to the file to be opened. The file will be
-           opened with mode 'rb'.
-
-        @param encoding If you set the optional encoding argument, then the
-           data will be decoded on the fly.
-
-        """
-
-        self.fileName = fileName
-
-        fp = codecs.open(fileName, 'rb', encoding)
-        try:
-            data = fp.read()
-        finally:
-            fp.close()
-
-        ANTLRStringStream.__init__(self, data)
-
-
-    def getSourceName(self):
-        """Deprecated, access o.fileName directly."""
-
-        return self.fileName
-
-
-class ANTLRInputStream(ANTLRStringStream):
-    """
-    @brief CharStream that reads data from a file-like object.
-
-    This is a char buffer stream that is loaded from a file like object
-    all at once when you construct the object.
-
-    All input is consumed from the file, but it is not closed.
-    """
-
-    def __init__(self, file, encoding=None):
-        """
-        @param file A file-like object holding your input. Only the read()
-           method must be implemented.
-
-        @param encoding If you set the optional encoding argument, then the
-           data will be decoded on the fly.
-
-        """
-
-        if encoding is not None:
-            # wrap input in a decoding reader
-            reader = codecs.lookup(encoding)[2]
-            file = reader(file)
-
-        data = file.read()
-
-        ANTLRStringStream.__init__(self, data)
-
-
-# I guess the ANTLR prefix exists only to avoid a name clash with some Java
-# mumbojumbo. A plain "StringStream" looks better to me, which should be
-# the preferred name in Python.
-StringStream = ANTLRStringStream
-FileStream = ANTLRFileStream
-InputStream = ANTLRInputStream
-
-
-############################################################################
-#
-# Token streams
-#   TokenStream
-#   +- CommonTokenStream
-#   \- TokenRewriteStream
-#
-############################################################################
-
-
-class CommonTokenStream(TokenStream):
-    """
-    @brief The most common stream of tokens
-
-    The most common stream of tokens is one where every token is buffered up
-    and tokens are prefiltered for a certain channel (the parser will only
-    see these tokens and cannot change the filter channel number during the
-    parse).
-    """
-
-    def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
-        """
-        @param tokenSource A TokenSource instance (usually a Lexer) to pull
-            the tokens from.
-
-        @param channel Skip tokens on any channel but this one; this is how we
-            skip whitespace...
-
-        """
-
-        TokenStream.__init__(self)
-
-        self.tokenSource = tokenSource
-
-	# Record every single token pulled from the source so we can reproduce
-        # chunks of it later.
-        self.tokens = []
-
-	# Map<tokentype, channel> to override some Tokens' channel numbers
-        self.channelOverrideMap = {}
-
-	# Set<tokentype>; discard any tokens with this type
-        self.discardSet = set()
-
-	# Skip tokens on any channel but this one; this is how we skip
-        # whitespace...
-        self.channel = channel
-
-	# By default, track all incoming tokens
-        self.discardOffChannelTokens = False
-
-	# The index into the tokens list of the current token (next token
-        # to consume).  p==-1 indicates that the tokens list is empty
-        self.p = -1
-
-        # Remember last marked position
-        self.lastMarker = None
-
-        # how deep have we gone?
-        self._range = -1
-
-
-    def makeEOFToken(self):
-        return self.tokenSource.makeEOFToken()
-
-
-    def setTokenSource(self, tokenSource):
-        """Reset this token stream by setting its token source."""
-
-        self.tokenSource = tokenSource
-        self.tokens = []
-        self.p = -1
-        self.channel = DEFAULT_CHANNEL
-
-
-    def reset(self):
-        self.p = 0
-        self.lastMarker = None
-
-
-    def fillBuffer(self):
-        """
-        Load all tokens from the token source and put in tokens.
-	This is done upon first LT request because you might want to
-        set some token type / channel overrides before filling buffer.
-        """
-
-
-        index = 0
-        t = self.tokenSource.nextToken()
-        while t is not None and t.type != EOF:
-            discard = False
-
-            if self.discardSet is not None and t.type in self.discardSet:
-                discard = True
-
-            elif self.discardOffChannelTokens and t.channel != self.channel:
-                discard = True
-
-            # is there a channel override for token type?
-            try:
-                overrideChannel = self.channelOverrideMap[t.type]
-
-            except KeyError:
-                # no override for this type
-                pass
-
-            else:
-                if overrideChannel == self.channel:
-                    t.channel = overrideChannel
-                else:
-                    discard = True
-
-            if not discard:
-                t.index = index
-                self.tokens.append(t)
-                index += 1
-
-            t = self.tokenSource.nextToken()
-
-        # leave p pointing at first token on channel
-        self.p = 0
-        self.p = self.skipOffTokenChannels(self.p)
-
-
-    def consume(self):
-        """
-        Move the input pointer to the next incoming token.  The stream
-        must become active with LT(1) available.  consume() simply
-        moves the input pointer so that LT(1) points at the next
-        input symbol. Consume at least one token.
-
-        Walk past any token not on the channel the parser is listening to.
-        """
-
-        if self.p < len(self.tokens):
-            self.p += 1
-
-            self.p = self.skipOffTokenChannels(self.p) # leave p on valid token
-
-
-    def skipOffTokenChannels(self, i):
-        """
-        Given a starting index, return the index of the first on-channel
-        token.
-        """
-
-        try:
-            while self.tokens[i].channel != self.channel:
-                i += 1
-        except IndexError:
-            # hit the end of token stream
-            pass
-
-        return i
-
-
-    def skipOffTokenChannelsReverse(self, i):
-        while i >= 0 and self.tokens[i].channel != self.channel:
-            i -= 1
-
-        return i
-
-
-    def setTokenTypeChannel(self, ttype, channel):
-        """
-        A simple filter mechanism whereby you can tell this token stream
-        to force all tokens of type ttype to be on channel.  For example,
-        when interpreting, we cannot exec actions so we need to tell
-        the stream to force all WS and NEWLINE to be a different, ignored
-        channel.
-	"""
-
-        self.channelOverrideMap[ttype] = channel
-
-
-    def discardTokenType(self, ttype):
-        self.discardSet.add(ttype)
-
-
-    def getTokens(self, start=None, stop=None, types=None):
-        """
-        Given a start and stop index, return a list of all tokens in
-        the token type set.  Return None if no tokens were found.  This
-        method looks at both on and off channel tokens.
-        """
-
-        if self.p == -1:
-            self.fillBuffer()
-
-        if stop is None or stop >= len(self.tokens):
-            stop = len(self.tokens) - 1
-
-        if start is None or stop < 0:
-            start = 0
-
-        if start > stop:
-            return None
-
-        if isinstance(types, (int, long)):
-            # called with a single type, wrap into set
-            types = set([types])
-
-        filteredTokens = [
-            token for token in self.tokens[start:stop]
-            if types is None or token.type in types
-            ]
-
-        if len(filteredTokens) == 0:
-            return None
-
-        return filteredTokens
-
-
-    def LT(self, k):
-        """
-        Get the ith token from the current position 1..n where k=1 is the
-        first symbol of lookahead.
-        """
-
-        if self.p == -1:
-            self.fillBuffer()
-
-        if k == 0:
-            return None
-
-        if k < 0:
-            return self.LB(-k)
-
-        i = self.p
-        n = 1
-        # find k good tokens
-        while n < k:
-            # skip off-channel tokens
-            i = self.skipOffTokenChannels(i+1) # leave p on valid token
-            n += 1
-
-        if i > self._range:
-            self._range = i
-
-        try:
-            return self.tokens[i]
-        except IndexError:
-            return self.makeEOFToken()
-
-
-    def LB(self, k):
-        """Look backwards k tokens on-channel tokens"""
-
-        if self.p == -1:
-            self.fillBuffer()
-
-        if k == 0:
-            return None
-
-        if self.p - k < 0:
-            return None
-
-        i = self.p
-        n = 1
-        # find k good tokens looking backwards
-        while n <= k:
-            # skip off-channel tokens
-            i = self.skipOffTokenChannelsReverse(i-1) # leave p on valid token
-            n += 1
-
-        if i < 0:
-            return None
-
-        return self.tokens[i]
-
-
-    def get(self, i):
-        """
-        Return absolute token i; ignore which channel the tokens are on;
-        that is, count all tokens not just on-channel tokens.
-        """
-
-        return self.tokens[i]
-
-
-    def slice(self, start, stop):
-        if self.p == -1:
-            self.fillBuffer()
-
-        if start < 0 or stop < 0:
-            return None
-
-        return self.tokens[start:stop+1]
-
-
-    def LA(self, i):
-        return self.LT(i).type
-
-
-    def mark(self):
-        self.lastMarker = self.index()
-        return self.lastMarker
-
-
-    def release(self, marker=None):
-        # no resources to release
-        pass
-
-
-    def size(self):
-        return len(self.tokens)
-
-
-    def range(self):
-        return self._range
-
-
-    def index(self):
-        return self.p
-
-
-    def rewind(self, marker=None):
-        if marker is None:
-            marker = self.lastMarker
-
-        self.seek(marker)
-
-
-    def seek(self, index):
-        self.p = index
-
-
-    def getTokenSource(self):
-        return self.tokenSource
-
-
-    def getSourceName(self):
-        return self.tokenSource.getSourceName()
-
-
-    def toString(self, start=None, stop=None):
-        if self.p == -1:
-            self.fillBuffer()
-
-        if start is None:
-            start = 0
-        elif not isinstance(start, int):
-            start = start.index
-
-        if stop is None:
-            stop = len(self.tokens) - 1
-        elif not isinstance(stop, int):
-            stop = stop.index
-
-        if stop >= len(self.tokens):
-            stop = len(self.tokens) - 1
-
-        return ''.join([t.text for t in self.tokens[start:stop+1]])
-
-
-class RewriteOperation(object):
-    """@brief Internal helper class."""
-
-    def __init__(self, stream, index, text):
-        self.stream = stream
-
-        # What index into rewrites List are we?
-        self.instructionIndex = None
-
-        # Token buffer index.
-        self.index = index
-        self.text = text
-
-    def execute(self, buf):
-        """Execute the rewrite operation by possibly adding to the buffer.
-        Return the index of the next token to operate on.
-        """
-
-        return self.index
-
-    def toString(self):
-        opName = self.__class__.__name__
-        return '<%s@%d:"%s">' % (
-            opName, self.index, self.text)
-
-    __str__ = toString
-    __repr__ = toString
-
-
-class InsertBeforeOp(RewriteOperation):
-    """@brief Internal helper class."""
-
-    def execute(self, buf):
-        buf.write(self.text)
-        if self.stream.tokens[self.index].type != EOF:
-            buf.write(self.stream.tokens[self.index].text)
-        return self.index + 1
-
-
-class ReplaceOp(RewriteOperation):
-    """
-    @brief Internal helper class.
-
-    I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
-    instructions.
-    """
-
-    def __init__(self, stream, first, last, text):
-        RewriteOperation.__init__(self, stream, first, text)
-        self.lastIndex = last
-
-
-    def execute(self, buf):
-        if self.text is not None:
-            buf.write(self.text)
-
-        return self.lastIndex + 1
-
-
-    def toString(self):
-        if self.text is None:
-            return '<DeleteOp@%d..%d>' % (self.index, self.lastIndex)
-
-        return '<ReplaceOp@%d..%d:"%s">' % (
-            self.index, self.lastIndex, self.text)
-
-    __str__ = toString
-    __repr__ = toString
-
-
-class TokenRewriteStream(CommonTokenStream):
-    """@brief CommonTokenStream that can be modified.
-
-    Useful for dumping out the input stream after doing some
-    augmentation or other manipulations.
-
-    You can insert stuff, replace, and delete chunks.  Note that the
-    operations are done lazily--only if you convert the buffer to a
-    String.  This is very efficient because you are not moving data around
-    all the time.  As the buffer of tokens is converted to strings, the
-    toString() method(s) check to see if there is an operation at the
-    current index.  If so, the operation is done and then normal String
-    rendering continues on the buffer.  This is like having multiple Turing
-    machine instruction streams (programs) operating on a single input tape. :)
-
-    Since the operations are done lazily at toString-time, operations do not
-    screw up the token index values.  That is, an insert operation at token
-    index i does not change the index values for tokens i+1..n-1.
-
-    Because operations never actually alter the buffer, you may always get
-    the original token stream back without undoing anything.  Since
-    the instructions are queued up, you can easily simulate transactions and
-    roll back any changes if there is an error just by removing instructions.
-    For example,
-
-     CharStream input = new ANTLRFileStream("input");
-     TLexer lex = new TLexer(input);
-     TokenRewriteStream tokens = new TokenRewriteStream(lex);
-     T parser = new T(tokens);
-     parser.startRule();
-
-     Then in the rules, you can execute
-        Token t,u;
-        ...
-        input.insertAfter(t, "text to put after t");}
-        input.insertAfter(u, "text after u");}
-        System.out.println(tokens.toString());
-
-    Actually, you have to cast the 'input' to a TokenRewriteStream. :(
-
-    You can also have multiple "instruction streams" and get multiple
-    rewrites from a single pass over the input.  Just name the instruction
-    streams and use that name again when printing the buffer.  This could be
-    useful for generating a C file and also its header file--all from the
-    same buffer:
-
-        tokens.insertAfter("pass1", t, "text to put after t");}
-        tokens.insertAfter("pass2", u, "text after u");}
-        System.out.println(tokens.toString("pass1"));
-        System.out.println(tokens.toString("pass2"));
-
-    If you don't use named rewrite streams, a "default" stream is used as
-    the first example shows.
-    """
-
-    DEFAULT_PROGRAM_NAME = "default"
-    MIN_TOKEN_INDEX = 0
-
-    def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
-        CommonTokenStream.__init__(self, tokenSource, channel)
-
-        # You may have multiple, named streams of rewrite operations.
-        # I'm calling these things "programs."
-        #  Maps String (name) -> rewrite (List)
-        self.programs = {}
-        self.programs[self.DEFAULT_PROGRAM_NAME] = []
-
- 	# Map String (program name) -> Integer index
-        self.lastRewriteTokenIndexes = {}
-
-
-    def rollback(self, *args):
-        """
-        Rollback the instruction stream for a program so that
-        the indicated instruction (via instructionIndex) is no
-        longer in the stream.  UNTESTED!
-        """
-
-        if len(args) == 2:
-            programName = args[0]
-            instructionIndex = args[1]
-        elif len(args) == 1:
-            programName = self.DEFAULT_PROGRAM_NAME
-            instructionIndex = args[0]
-        else:
-            raise TypeError("Invalid arguments")
-
-        p = self.programs.get(programName, None)
-        if p is not None:
-            self.programs[programName] = (
-                p[self.MIN_TOKEN_INDEX:instructionIndex])
-
-
-    def deleteProgram(self, programName=DEFAULT_PROGRAM_NAME):
-        """Reset the program so that no instructions exist"""
-
-        self.rollback(programName, self.MIN_TOKEN_INDEX)
-
-
-    def insertAfter(self, *args):
-        if len(args) == 2:
-            programName = self.DEFAULT_PROGRAM_NAME
-            index = args[0]
-            text = args[1]
-
-        elif len(args) == 3:
-            programName = args[0]
-            index = args[1]
-            text = args[2]
-
-        else:
-            raise TypeError("Invalid arguments")
-
-        if isinstance(index, Token):
-            # index is a Token, grap the stream index from it
-            index = index.index
-
-        # to insert after, just insert before next index (even if past end)
-        self.insertBefore(programName, index+1, text)
-
-
-    def insertBefore(self, *args):
-        if len(args) == 2:
-            programName = self.DEFAULT_PROGRAM_NAME
-            index = args[0]
-            text = args[1]
-
-        elif len(args) == 3:
-            programName = args[0]
-            index = args[1]
-            text = args[2]
-
-        else:
-            raise TypeError("Invalid arguments")
-
-        if isinstance(index, Token):
-            # index is a Token, grap the stream index from it
-            index = index.index
-
-        op = InsertBeforeOp(self, index, text)
-        rewrites = self.getProgram(programName)
-        op.instructionIndex = len(rewrites)
-        rewrites.append(op)
-
-
-    def replace(self, *args):
-        if len(args) == 2:
-            programName = self.DEFAULT_PROGRAM_NAME
-            first = args[0]
-            last = args[0]
-            text = args[1]
-
-        elif len(args) == 3:
-            programName = self.DEFAULT_PROGRAM_NAME
-            first = args[0]
-            last = args[1]
-            text = args[2]
-
-        elif len(args) == 4:
-            programName = args[0]
-            first = args[1]
-            last = args[2]
-            text = args[3]
-
-        else:
-            raise TypeError("Invalid arguments")
-
-        if isinstance(first, Token):
-            # first is a Token, grap the stream index from it
-            first = first.index
-
-        if isinstance(last, Token):
-            # last is a Token, grap the stream index from it
-            last = last.index
-
-        if first > last or first < 0 or last < 0 or last >= len(self.tokens):
-            raise ValueError(
-                "replace: range invalid: %d..%d (size=%d)"
-                % (first, last, len(self.tokens)))
-
-        op = ReplaceOp(self, first, last, text)
-        rewrites = self.getProgram(programName)
-        op.instructionIndex = len(rewrites)
-        rewrites.append(op)
-
-
-    def delete(self, *args):
-        self.replace(*(list(args) + [None]))
-
-
-    def getLastRewriteTokenIndex(self, programName=DEFAULT_PROGRAM_NAME):
-        return self.lastRewriteTokenIndexes.get(programName, -1)
-
-
-    def setLastRewriteTokenIndex(self, programName, i):
-        self.lastRewriteTokenIndexes[programName] = i
-
-
-    def getProgram(self, name):
-        p = self.programs.get(name, None)
-        if p is  None:
-            p = self.initializeProgram(name)
-
-        return p
-
-
-    def initializeProgram(self, name):
-        p = []
-        self.programs[name] = p
-        return p
-
-
-    def toOriginalString(self, start=None, end=None):
-        if self.p == -1:
-            self.fillBuffer()
-
-        if start is None:
-            start = self.MIN_TOKEN_INDEX
-        if end is None:
-            end = self.size() - 1
-
-        buf = StringIO()
-        i = start
-        while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
-            if self.get(i).type != EOF:
-                buf.write(self.get(i).text)
-            i += 1
-
-        return buf.getvalue()
-
-
-    def toString(self, *args):
-        if self.p == -1:
-            self.fillBuffer()
-
-        if len(args) == 0:
-            programName = self.DEFAULT_PROGRAM_NAME
-            start = self.MIN_TOKEN_INDEX
-            end = self.size() - 1
-
-        elif len(args) == 1:
-            programName = args[0]
-            start = self.MIN_TOKEN_INDEX
-            end = self.size() - 1
-
-        elif len(args) == 2:
-            programName = self.DEFAULT_PROGRAM_NAME
-            start = args[0]
-            end = args[1]
-
-        if start is None:
-            start = self.MIN_TOKEN_INDEX
-        elif not isinstance(start, int):
-            start = start.index
-
-        if end is None:
-            end = len(self.tokens) - 1
-        elif not isinstance(end, int):
-            end = end.index
-
-        # ensure start/end are in range
-        if end >= len(self.tokens):
-            end = len(self.tokens) - 1
-
-        if start < 0:
-            start = 0
-
-        rewrites = self.programs.get(programName)
-        if rewrites is None or len(rewrites) == 0:
-            # no instructions to execute
-            return self.toOriginalString(start, end)
-
-        buf = StringIO()
-
-        # First, optimize instruction stream
-        indexToOp = self.reduceToSingleOperationPerIndex(rewrites)
-
-        # Walk buffer, executing instructions and emitting tokens
-        i = start
-        while i <= end and i < len(self.tokens):
-            op = indexToOp.get(i)
-            # remove so any left have index size-1
-            try:
-                del indexToOp[i]
-            except KeyError:
-                pass
-
-            t = self.tokens[i]
-            if op is None:
-                # no operation at that index, just dump token
-                if t.type != EOF:
-                    buf.write(t.text)
-                i += 1 # move to next token
-
-            else:
-                i = op.execute(buf) # execute operation and skip
-
-        # include stuff after end if it's last index in buffer
-        # So, if they did an insertAfter(lastValidIndex, "foo"), include
-        # foo if end==lastValidIndex.
-        if end == len(self.tokens) - 1:
-            # Scan any remaining operations after last token
-            # should be included (they will be inserts).
-            for i in sorted(indexToOp.keys()):
-                op = indexToOp[i]
-                if op.index >= len(self.tokens)-1:
-                    buf.write(op.text)
-
-        return buf.getvalue()
-
-    __str__ = toString
-
-
-    def reduceToSingleOperationPerIndex(self, rewrites):
-        """
-        We need to combine operations and report invalid operations (like
-        overlapping replaces that are not completed nested).  Inserts to
-        same index need to be combined etc...   Here are the cases:
-
-        I.i.u I.j.v                           leave alone, nonoverlapping
-        I.i.u I.i.v                           combine: Iivu
-
-        R.i-j.u R.x-y.v | i-j in x-y          delete first R
-        R.i-j.u R.i-j.v                       delete first R
-        R.i-j.u R.x-y.v | x-y in i-j          ERROR
-        R.i-j.u R.x-y.v | boundaries overlap  ERROR
-
-        Delete special case of replace (text==null):
-        D.i-j.u D.x-y.v |                     boundaries overlapcombine to
-                                              max(min)..max(right)
-
-        I.i.u R.x-y.v   |                     i in (x+1)-ydelete I (since
-                                              insert before we're not deleting
-                                              i)
-        I.i.u R.x-y.v   |                     i not in (x+1)-yleave alone,
-                                              nonoverlapping
-
-        R.x-y.v I.i.u   | i in x-y            ERROR
-        R.x-y.v I.x.u                         R.x-y.uv (combine, delete I)
-        R.x-y.v I.i.u   | i not in x-y        leave alone, nonoverlapping
-
-        I.i.u = insert u before op @ index i
-        R.x-y.u = replace x-y indexed tokens with u
-
-        First we need to examine replaces.  For any replace op:
-
-          1. wipe out any insertions before op within that range.
-          2. Drop any replace op before that is contained completely within
-             that range.
-          3. Throw exception upon boundary overlap with any previous replace.
-
-        Then we can deal with inserts:
-
-          1. for any inserts to same index, combine even if not adjacent.
-          2. for any prior replace with same left boundary, combine this
-             insert with replace and delete this replace.
-          3. throw exception if index in same range as previous replace
-
-        Don't actually delete; make op null in list. Easier to walk list.
-        Later we can throw as we add to index -> op map.
-
-        Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
-        inserted stuff would be before the replace range.  But, if you
-        add tokens in front of a method body '{' and then delete the method
-        body, I think the stuff before the '{' you added should disappear too.
-
-        Return a map from token index to operation.
-        """
-
-        # WALK REPLACES
-        for i, rop in enumerate(rewrites):
-            if rop is None:
-                continue
-
-            if not isinstance(rop, ReplaceOp):
-                continue
-
-            # Wipe prior inserts within range
-            for j, iop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
-                if iop.index == rop.index:
-                    # E.g., insert before 2, delete 2..2; update replace
-                    # text to include insert before, kill insert
-                    rewrites[iop.instructionIndex] = None
-                    rop.text = self.catOpText(iop.text, rop.text)
-
-                elif iop.index > rop.index and iop.index <= rop.lastIndex:
-                    # delete insert as it's a no-op.
-                    rewrites[j] = None
-
-            # Drop any prior replaces contained within
-            for j, prevRop in self.getKindOfOps(rewrites, ReplaceOp, i):
-                if (prevRop.index >= rop.index
-                    and prevRop.lastIndex <= rop.lastIndex):
-                    # delete replace as it's a no-op.
-                    rewrites[j] = None
-                    continue
-
-                # throw exception unless disjoint or identical
-                disjoint = (prevRop.lastIndex < rop.index
-                            or prevRop.index > rop.lastIndex)
-                same = (prevRop.index == rop.index
-                        and prevRop.lastIndex == rop.lastIndex)
-
-                # Delete special case of replace (text==null):
-                # D.i-j.u D.x-y.v| boundaries overlapcombine to
-                # max(min)..max(right)
-                if prevRop.text is None and rop.text is None and not disjoint:
-                    # kill first delete
-                    rewrites[prevRop.instructionIndex] = None
-
-                    rop.index = min(prevRop.index, rop.index)
-                    rop.lastIndex = max(prevRop.lastIndex, rop.lastIndex)
-
-                elif not disjoint and not same:
-                    raise ValueError(
-                        "replace op boundaries of %s overlap with previous %s"
-                        % (rop, prevRop))
-
-        # WALK INSERTS
-        for i, iop in enumerate(rewrites):
-            if iop is None:
-                continue
-
-            if not isinstance(iop, InsertBeforeOp):
-                continue
-
-            # combine current insert with prior if any at same index
-            for j, prevIop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
-                if prevIop.index == iop.index: # combine objects
-                    # convert to strings...we're in process of toString'ing
-                    # whole token buffer so no lazy eval issue with any
-                    # templates
-                    iop.text = self.catOpText(iop.text, prevIop.text)
-                    # delete redundant prior insert
-                    rewrites[j] = None
-
-            # look for replaces where iop.index is in range; error
-            for j, rop in self.getKindOfOps(rewrites, ReplaceOp, i):
-                if iop.index == rop.index:
-                    rop.text = self.catOpText(iop.text, rop.text)
-                    # delete current insert
-                    rewrites[i] = None
-                    continue
-
-                if iop.index >= rop.index and iop.index <= rop.lastIndex:
-                    raise ValueError(
-                        "insert op %s within boundaries of previous %s"
-                        % (iop, rop))
-
-        m = {}
-        for i, op in enumerate(rewrites):
-            if op is None:
-                # ignore deleted ops
-                continue
-
-            assert op.index not in m, "should only be one op per index"
-            m[op.index] = op
-
-        return m
-
-
-    def catOpText(self, a, b):
-        x = ""
-        y = ""
-        if a is not None:
-            x = a
-        if b is not None:
-            y = b
-        return x + y
-
-
-    def getKindOfOps(self, rewrites, kind, before=None):
-        """Get all operations before an index of a particular kind."""
-
-        if before is None:
-            before = len(rewrites)
-        elif before > len(rewrites):
-            before = len(rewrites)
-
-        for i, op in enumerate(rewrites[:before]):
-            if op is None:
-                # ignore deleted
-                continue
-            if op.__class__ == kind:
-                yield i, op
-
-
-    def toDebugString(self, start=None, end=None):
-        if start is None:
-            start = self.MIN_TOKEN_INDEX
-        if end is None:
-            end = self.size() - 1
-
-        buf = StringIO()
-        i = start
-        while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
-            buf.write(self.get(i))
-            i += 1
-
-        return buf.getvalue()
diff --git a/antlr-3.4/tool/CHANGES.txt b/antlr-3.4/tool/CHANGES.txt
deleted file mode 100644
index f7415f0..0000000
--- a/antlr-3.4/tool/CHANGES.txt
+++ /dev/null
@@ -1,3493 +0,0 @@
-ANTLR 3.4 Release
-July 18, 2011
-
-Terence Parr, parrt at cs usfca edu
-ANTLR project lead and supreme dictator for life
-University of San Francisco
-
-CHANGES
-
-July 18, 2011 -- release 3.4
-
-* Added tree method insertChild(int i, Object t).
-
-July 14, 2011
-
-* Added BaesTree.freshenParentAndChildIndexesDeeply() to recursively
-  walk tree and set ptrs.
-
-July 6, 2011
-
-* reset() for token stream didn't skip initial off-channel tokens.
-
-July 5, 2011
-
-* Sam fixes rare infinite loop upon error in nextToken().
-* rewrites weren't pulled from syntactic predicates.
-
-June 29, 2011
-
-* Sam noticed CommonToken.getText() cached substring pulled from input, which
-  defeated purpose of pointing into input char array.  Altered to not cache.
-  Should reduce memory footprint.
-
-June 24, 2011
-
-* empty alts with actions didn't have EPSILON dummy alt node.
-
-June 19, 2011
-
-* Udo noticed that Parser.getMissingSymbol() didn't set invented token's input
-
-June 8, 2011
-
-* Added inContext(String context) back into TreeParser.
-
-April 21, 2011
-
-* Updated for ST v4.0.2 (setting iterateAcrossValues = true as instance var)
-* Needed throws condition for delegatedRules.
-
-April 20, 2011 (Sam Harwell)
-
-* Implement the 'throwsSpec' feature of parser rules for the Java target
-* Improve algorithm for SemanticContext Boolean predicate reduction
-
-April 13, 2011
-
-* Unmangled region names in STViz hiearchy tree display.
-* Removed conversion timeout thing again
-
-April 11, 2011
-
-* Added option -Xconversiontimeout back in.  Turns out we hit NFA conversion
-  time landmine occasionally with huge grammars; fails over to backtracking
-  (if turned on) if it can't make DFA.
-
-March 29 - April 10, 2011
-
-* Uses ST v4 now!!!  Massive change.  Only updated Java target so far.
-  Ripped out ST v3 usage to remove indirect dependency on ANTLR v2.
-
-March 28, 2011
-
-* Sam Harwell ported all v2 grammars to v3!
-
-March 3, 2011
-
-* left-recursion pattern off until all targets catch up
-
-* ANTLRCore.sti no longer used; removed from all targets.
-
-* Adding type, text terminal options
-
-* Replaced hetero arg with terminalOptions arg in all templates that reference hetero
-  (this is the class name / node type used for TOKEN<NODETYPE> references
-  in grammar).  Widespread but trivial changes to all targets.  hetero is
-  now terminalOptions.node.  Can also get terminalOptions.type and
-  terminalOptions.text from dictionary of options.
-
-* Fixed mispelling of license in BSD license headers
-
-March 3, 2011
-
-* Add tree, getTree() to ParserRuleReturnScope to do away with specific ones like:
-    public static class rewrite_template_args_return extends ParserRuleReturnScope {
-        CommonTree tree;
-        public Object getTree() { return tree; }
-    };
-  Removed these special classes if it's just AST; keep if they have defined "returns"
-  values
-
-February 26, 2011
-
-* All finally {} have comment now to suppress warning.
-
-* removed ; from {;} blank method
-
-* Added @SuppressWarnings({"all"}) to front of each
-  generated class.
-
-* -print wasn't always showing ^ and ! in grammar
-
-* Added java-left-recur/Java.g example.
-
-* left-recursion pattern detection handles backtracking mode now
-
-February 25, 2011
-
-* -Xmaxinlinedfastates went to 60 from 10 for forcing prediction in left-
-  recursive expression rules to stay in rule; preds use a parameter.
-
-* trees know where they came from now start..stop tokens; todo: use for better err handling.
-
-* Got immediate left-recursion working for rules. Added TestLeftRecursion.java
-
-February 21, 2011
-
-* Fixed http://www.antlr.org/jira/browse/ANTLR-437 thanks to Vladislav Kuzkokov.
-  added unit test.
-
-February 17, 2011
-
-* Add -language L option to override language=L option in grammar.  Same
-  grammar can yield multiple parsers in different languages now.
-
-February 10, 2011
-
-* Added method to generated Java code to return the array of delegates; e.g.,
-    import Sub1, Sub2;
-  yields:
-
-    public Parser[] getDelegates() {
-        return new Parser[] {gSub1, gSub2};
-    }
-
-January 25, 2011
-
-* Improve error messages for no viable alt parse exceptions
-
-January 20, 2011
-
-* TokenRewriteStream had dead code; also updated insertBefore followed by
-  replace or delete.  If input is abc and I did insertBefore(2,"y"), where
-  'c' is index 2, then did delete of 2 previously defined functionality
-  was to ignore the insert. that's weird; fixed to keep insert.  Also
-  Delete special case of replace (text==null):
-  	  D.i-j.u D.x-y.v	| boundaries overlap => combine to max(min)..max(right)
-
-December 12, 2010
-
-* Send couldBacktrack now to enterDecision in debug protocol
-
-December 4, 2010
-
-* TreeWizard ctor needed a default tree adapator.
-
-November 29, 2010 -- ANTLR v3.3
-
-November 23, 2010
-
-* CodeGenerator.loadLanguageTarget is now static and available to load
-  targets so we can ask them questions during analysis.
-
-* Fixed and added unit test
-    http://www.antlr.org/jira/browse/ANTLR-370
-    http://www.antlr.org/jira/browse/ANTLR-375
-
-November 23, 2010
-
-* Added source name to syntax error msgs
-
-October 20, 2010
-
-Added boolean couldBacktrack to enterDecision in dbg interface. Breaks AW
-interface and other tools! [BREAKS BACKWARD COMPATIBILITY]
-
-October 17, 2010
-
-* Missing -trace in help msg
-
-November 22, 2010
-
-* Added GrammarAST: public int getCharPositionInLine() { return getColumn()-1; }
-  and Grammar.getHasDelegates() for C# guys
-
-October 16, 2010
-
-* Doesn't write profile data to file anymore; emits decision data to stderr
-
-October 14, 2010
-
-* Make OrderedHashSet have deterministic iteration
-
-July 20, 2010
-
-* greedy=true option shuts off nondeterminism warning.
-
-* code gen for AST and -profile didn't compile. had useless line:
-
-             proxy.setTreeAdaptor(adap);
-
-
-July 17, 2010
-
-* Removed conversion timeout failsafe; no longer needed.
-
-* Stats updated to be correct for -report.
-
-June 10, 2010
-
-* added toArray in OrderedHashSet to make addAll calls get same order for DFA edges and possibly code gen in some areas.
-
-June 5, 2010
-
-* Added -Xsavelexer
-
-May 24, 2010
-
-* lexerStringRef was missing elementIndex attribute. i='import' didn't work
-  in lexer.  Altered all target stg files.  Set in codegen.g
-
-* output=AST, rewrite=true for tree rewriters broken. nextNode for subtree
-  streams didn't dup node, it gave whole tree back.
-
-March 17, 2010
-
-* Added MachineProbe class to make it easier to highlight ambig paths in
-  grammar.  More accurate than DecisionProbe; retrofitted from v4.
-
-February 20, 2010
-
-* added range to TokenStream and implementors:
-    /** How far ahead has the stream been asked to look?  The return
-     *  value is a valid index from 0..n-1.
-     */
-    int range();
-
-* added new method to get subset of tokens to buffered token streams:
-	public List get(int start, int stop);
-
-February 15, 2010
-
-* Refs to other tokens in a lexer rule didn't get its line/charpos right.
-  altered Java.stg.
-
-January 31, 2010
-
-* Creating token from another token didn't copy input stream in CommonToken.
-  makes sense to copy too; i don't think anybody relies on it being null after
-  a copy. We might want to know where token came from.
-
-January 26, 2009
-
-* TreeParser.getMissingSymbol() use CommonTree instead of using
-  adaptor.create()
-
-December 8, 2009
-
-* Instead of sharing Token.EOF_TOKEN, I'm now creating EOF tokens so I can set the char position for better error messages.
-
-December 5, 2009
-
-* Fixed bug in TreeVisitor when rewrites altered number of children. Thanks to Chris DiGiano.
-
-* added new buffered on-demand streams: BufferedTokenStream. Renamed CommonTokenStream to LegacyCommonTokenStream and made new one as subclass of BufferedTokenStream.
-
-November 3, 2009
-
-* Added org.antlr.runtime.UnbufferedTokenStream. Was trivial and works!
-
-November 1, 2009
-
-* Couldn't properly reuse parser state; ctor reset the state; fixed.
-	Parser(TokenStream input, RecognizerSharedState state)
-
-* LookaheadStream<T> used some hardcoded Object return types for LT, etc...
-  uses T now.
-
-September 23, 2009 -- ANTLR v3.2
-
-September 21, 2009 [Jim Idle]
-
-* Added new options for tool invocation to control the points at which the code
-  generator tells the target code to use its equivalent of switch() instead of
-  inline ifs.
-      -Xmaxswitchcaselabels m don't generate switch() statements for dfas
-                              bigger  than m [300]
-      -Xminswitchalts m       don't generate switch() statements for dfas smaller
-                              than m [3]
-* Upgraded -X help output to include new optins and provide the default
-  settings, as well as provide units for those settings that need them.
-
-* Change the C Target to overide the deafults for the new settings to
-  generate the most optimizable C code from the modern C compiler point of
-  view. This is essentially to always use swtich statements unless there
-  is absolutely no other option. C defaults are to use 1 for minimum and
-  3000 for maximum number of alts that trigger switch(). This results in
-  object code that is 30% smaller and up to 20% faster.
-
-April 23, 2009
-
-* Added reset to TreeNodeStream interface.
-
-April 22, 2009
-
-* Fixed ANTLR-374.  Was caused by moved of grammars. %foo() stuff didn't work
-
-April 9, 2009
-
-* .g and .g3 file extensions work again.
-* introduced bug in 3.1.3: gives exception not error msg upon
-  missing .g file
-
-March 26, 2009
-
-* Made ctor in TreeRewriter and TreeFilter call this not super.
-
-March 21, 2009
-
-* Added ctor to RecognizerSharedState to allow cloning it.
-
-March 17, 2009 -- ANTLR v3.1.3
-
-* improved ANTLRv3.g to handle <...> element options
-
-March 15, 2009
-
-* Fixed ANTLR-389. Strip didn't ignore options in subrules; also seemed
-  to demand stdin.
-
-March 15, 2009
-
-* ANTLR always sorts incoming grammar list by dependency.  For example,
-  If W.g depends on tokens from P.g then P.g is done first even if
-  W.g mentioned first on command line.  It does not ignore any files you
-  specify the commandline.  If you do *.g and that includes some
-  imported grammars, it will run antlr on them.
-
-* -make option prevents ANTLR from running on P.g if P older than
-  generated files.
-
-* Added org.antlr.tool.GrammarSpelunker to build a faster dependency
-  checker (what grammars depend on etc...).  Totally independent of any
-  ANTLR code; easy to pull into other tools.
-
-* Added org.antlr.misc.Graph, a general graph with nodes
-  containing an Object payload. It knows how to do a topological sort
-  on the nodes.
-
-March 10, 2009
-
-* Added associativity token option to support upcoming special expression
-  parsing. Added rule option strategy=precedence also
-
-March 1, 2009
-
-* Changed ANTLRWorks debug port from 49153 to 49100.  Apparently we change the port in
-  ANTLRWorks to 49100 in 1.2 but forgot to do so in the ANTLR targets.
-
-START CHANGES FOR TREE FILTER MODE (pulled from dev branch)
-
-This feature will be announced in 3.2, but I am integrating from my development branch now into the mainline so target developers have a chance to implement. We might release 3.1.3 bug fix release before 3.2.
-
-* CommonTreeNodeStream -> BufferedTreeNodeStream.  Now,
-  CommonTreeNodeStream is completely unbuffered unless you are
-  backtracking.  No longer making a list of all nodes before tree parsing.
-
-* Added tree grammar filter=true mode.
-
-  Altered templates:
-	Java.stg: added filterMode to genericParser and treeParser.
-	This required a change to ANTLRCore.sti
-	Defined a default parameter in treeParser to set the superclass
-	to TreeFilter for tree grammar with filter=true. It sets
-	superclass to TreeRewriter if filter=true and output=AST.
-  Other them that, I only had to change ANTLR itself a little bit.
-  Made filter mode valid for tree grammars and have it automatically set
-  the necessary elements: @synpredgate, backtrack=true, rewrite=true
-  (if output=AST).  Added error message for detecting conflicting
-  options.
-
-* Added misc.FastQueue and TestFastQueue:
-  A queue that can dequeue and get(i) in O(1) and grow arbitrarily large.
-  A linked list is fast at dequeue but slow at get(i).  An array is
-  the reverse.  This is O(1) for both operations.
-
-* Added tree.TreeIterator, a generator that walks a doubly linked tree.
-  The nodes must know what index they are. It's an Iterator but
-  remove() is not supported. Returns navigation nodes always:
-  UP, DOWN, EOF.
-
-* Added misc.LookaheadStream: A lookahead queue that knows how
-  to mark/release locations in the buffer for backtracking purposes.
-  I hope to use for both tree nodes and tokens.  Just implement
-  nextElement() to say how to get next node or token.
-
-END CHANGES FOR TREE FILTER MODE
-
-February 23, 2009 -- ANTLR v3.1.2
-
-February 18, 2009
-
-* Added org.antlr.tool.Strip (reads from file arg or stdin, emits to stdout)
-  to strip actions from a grammar.
-
-February 4, 2009
-
-* Added CommonTree.setUnknownTokenBoundaries().  Sometimes we build trees
-  in a grammar and some of the token boundaries are not set properly.
-  This only matters if you want to print out the original text associated
-  with a subtree.  Check this out rule:
-
-	postfixExpression
-	    :   primary ('.'^ ID)*
-	    ;
-
-  For a.b.c, we get a '.' that does not have the token boundaries set.
-  ANTLR only sets token boundaries for subtrees returned from a rule.
-  SO, the overall '.' operator has the token boundaries set from 'a'
-  to 'c' tokens, but the lower '.' subtree does not get the boundaries
-  set (they are -1,-1).  Calling setUnknownTokenBoundaries() on
-  the returned tree sets the boundaries appropriately according to the
-  children's token boundaries.
-
-January 22, 2009
-
-* fixed to be listeners.add(listener); in addListener() of DebugEventHub.java
-
-January 20, 2009
-
-* Removed runtime method: mismatch in BaseRecognizer and TreeParser.  Seems
-  to be unused.  Had to override method recoverFromMismatchedToken() in
-  TreeParser to get rid of single token insertion and deletion for
-  tree parsing because it makes no sense with all of the up-and-down nodes.
-
-* Changed JIRA port number from 8888 to no port spec (aka port 80) and all
-  refs to it in this file.
-
-* Changed BaseTree to Tree typecase in getChild and toStringTree() and
-  deleteChild() to make more generic.
-
-December 16, 2008
-
-* Added -verbose cmd-line option and turned off standard header
-  and list of read files.  Silent now without -verbose.
-
-November 24, 2008
-
-* null-ptr protected getParent and a few others.
-
-* Added new ctor to CommonTreeNodeStream for walking subtrees.  Avoids
-  having to make new serialized stream as it can reuse overall node stream
-  buffer.
-
-November 20, 2008
-
-* Updated BaseTest to isolate tests better.
-
-November 17, 2008
-
-* BaseTreeAdaptor.getType() was hosed; always gave 0.  Thanks to Sam Harwell.
-
-November 8, 2008
-
-* Added methods to BaseRecognizer:
-  public void setBacktrackingLevel(int n) { state.backtracking = n; }
-  /** Return whether or not a backtracking attempt failed. */
-  public boolean failed() { return state.failed; }
-
-November 5, 2008
-
-* Tweaked traceIn/Out to say "fail/succeeded"
-
-* Bug in code gen for tree grammar wildcard list label x+=.
-
-* Use of backtrack=true anywhere in grammar causes backtracking sensitive
-  code to be generated.  Actions are gated etc...  Previously, that only
-  happened when a syntactic predicate appeared in a DFA.  But, we need
-  to gate actions when backtracking option is set even if no decision
-  is generated to support filtering of trees.
-
-October 25, 2008
-
-* Fixed debug event socket protocol to allow spaces in filenames.
-
-* Added TreeVisitor and TreeVisitorAction to org.antlr.runtime.tree.
-
-October 22, 2008
-
-* Added inContext() to TreeParser.  Very useful for predicating
-  tree grammar productions according to context (their parent list).
-  Added new TestTreeContext unit tests (15).
-
-    /** Check if current node in input has a context.  Context means sequence
-     *  of nodes towards root of tree.  For example, you might say context
-     *  is "MULT" which means my parent must be MULT.  "CLASS VARDEF" says
-     *  current node must be child of a VARDEF and whose parent is a CLASS node.
-     *  You can use "..." to mean zero-or-more nodes.  "METHOD ... VARDEF"
-     *  means my parent is VARDEF and somewhere above that is a METHOD node.
-     *  The first node in the context is not necessarily the root.  The context
-     *  matcher stops matching and returns true when it runs out of context.
-     *  There is no way to force the first node to be the root.
-     */
-    public boolean inContext(String context) {...}
-
-* Added 3 methods to Tree interface [BREAKS BACKWARD COMPATIBILITY]
-
-    /** Is there is a node above with token type ttype? */
-    public boolean hasAncestor(int ttype);
-
-    /** Walk upwards and get first ancestor with this token type. */
-    public Tree getAncestor(int ttype);
-
-    /** Return a list of all ancestors of this node.  The first node of
-     *  list is the root and the last is the parent of this node.
-     */
-    public List getAncestors();
-
-October 21, 2008
-
-* Updated unit tests to be correct for \uFFFE->\uFFFF change
-
-* Made . in tree grammar look like ^(. .*) to analysis, though ^(. foo)
-  is illegal (can't have . at root). Wildcard is subtree or node.
-  Fixed bugs:
-    http://www.antlr.org/browse/ANTLR-248
-    http://www.antlr.org/browse/ANTLR-344
-
-October 1, 2008 -- ANTLR v3.1.1
-
-September 8, 2008
-
-* Labels on tokens, rules carry into synpreds now so semantic predicates work.
-  This didn't work since labels were stripped in the synpred and they weren't
-  defined in the generated method.
-
-  a : x=A z=a {$x.text.equals($z.text)}? A
-    | y=A a A A
-    ;
-
-September 3, 2008
-
-* Made a REV static variable in Tool so that we can change the rev for
-  daily builds.
-
-* Made \uFFFF a valid character. Token types are 32-bit clean using -1
-  not 0x0000FFFF as -1 so it should be okay.  Label.java:
-    public static final int MIN_CHAR_VALUE = '\u0000';
-    public static final int MAX_CHAR_VALUE = '\uFFFF';
-
-August 30, 2008
-
-* Changed messages in en.stg so that TOKEN_NONDETERMINISM correctly
-  indicates when actions hid semantic predicates.
-
-August 15, 2008
-
-* Tweaked build properties and build.xml
-
-August 13, 2008
-
-* Fixed ANTLR-314; 3.1 introduced a problem with list labels +=
-
-August 12, 2008 -- ANTLR v3.1
-
-* Added JavaScript target
-
-August 7, 2008
-
-* an NFA target of EOF predicate transition in DFA cause an exception in
-  getPredicatesPerNonDeterministicAlt().
-
-* Kay Roepke found a nasty bug when debugging AST-constructing
-  composite recognizers.  If the input state was null to the constructor,
-  super class constructor created a new parser state object.
-  Later, though we passed the argument state not this.state
-  to the delegate constructors, forcing them to share a different
-  state objects!  Changed state to this.state in Dbg.stg constructors.
-
-* Ack. messed up debug/AST.  Have to set proxy's tree adaptor; it's
-  a circular ref.  Just an ASTDbg.stg change.
-
-August 4, 2008
-
-* superClass works now for lexers
-
-* Made Grammar.defineNamedAction propogate header actions down to all
-  delegates if root grammar; regardless of lexer/parser scope.
-
-* Rejiggered AST templates to propogate changes to tree adaptor
-  for delegate grammars. Fixes ANTLR-302
-
-August 4, 2008
-
-* FOLLOW set computations altered constant FOLLOW bit sets.
-
-* Added (...) are all predicate evaluations.
-
-* Extra init code for tree parser nonrewrite mode removed.
-
-* Added empty child list check in becomeRoot
-
-August 3, 2008
-
-* Was using RuleReturnScope not Rulename_return for list labels in tree
-  parser.
-
-* Didn't set _last in tree parser for rule ref track stuff (rewrite=true)
-
-August 2, 2008
-
-* Benjamin found another rewrite engine bug.
-
-July 30, 2008
-
-* CommonTreeNodeStream / CommonTokenStream did not reset properly.
-
-July 29, 2008
-
-* Fixed another bug in TokenRewriteStream; didn't like inserts after end.
-
-July 28, 2008
-
-* Fixed bug in TokenRewriteStream.toString(start,stop); it ignored
-  parameters. ;)
-
-July 17, 2008
-
-* allow qualified type names in hetero <...> options like T<a.b.c.Node>
-
-July 5, 2008
-
-* treeLevel not set for setBlock alts; added unit test
-
-July 3, 2008
-
-* Fixed ANTLR-267. parse tree added nodes during backtracking and
-  cyclic DFAs.  tracks hidden tokens too now. Added toInputString() to
-  get text back including hidden tokens.  Shows <epsilon> for rules
-  that match nothing.
-
-June 26, 2008
-
-* Added gParent ptr that points to immediate parent grammar. E.g.,
-    // delegators
-    public MParser gM;
-    public M_S gS;
-    public M_S gParent = gS; // NEW
-
-* Grammar imports didn't set all the delegate pointers...lots of imported
-  grammars would cause a null ptr exception.  Fixes ANTLR-292.
-
-June 25, 2008
-
-* List labels in tree construction didn't always track the tree; sometimes
-  had a rule result structure.
-
-June 4, 2008
-
-* Improved unit testing so that each test suite executes and builds grammars
-  in a separate temporary directory. This means they can execute concurrently.
-  Also seem to be a problem with my class path during execution. Moved
-  tmpdir for ahead of standard CLASSPATH.
-
-* By virtue of an improvement to StringTemplate, output newlines
-  in generated files should be normalized to whatever your host uses.
-
-June 3, 2008
-
-* Restrict legality of grammar options; for example you cannot use output option
-  in lexer anymore.
-
-June 2, 2008
-
-* Throw illegal arg exception upon invalid TokenRewriteStream ops. Rewrote
-  core of engine.  Slightly different operation.  Added many more unit tests.
-
-3.1b1 - May 20, 2008
-
-May 11, 2008
-
-* rewrite=true, output=AST for tree grammar was not working.  Altered trees were not
-  propagated back up the rule reference chain.  Required a number of mods to
-  ASTTreeParser.stg.  Added unit tests.
-
-May 10, 2008
-
-* [BACKWARD INCOMPATIBLE if you override match()]
-  I had turned off single token insertion and deletion because I could not figure
-  out how to work with trees and actions. Figure that out and so I turned it back on.
-  match() returns Object matched now (parser, tree parser) so we can set labels
-  on token refs properly after single token ins/del error recovery.  Allows actions
-  and tree construction to proceed normally even though we recover in the middle of
-  an alternative.  Added methods for conjuring up missing symbols: getMissingSymbol().
-
-* refactored BaseRecognizer error handling routines
-
-* Single token error recovery was not properly taking into consideration EOF.
-
-* ANTLR no longer tries to recover in tree parsers inline using single node deletion or insertion; throw exception.  Trees should be well formed as they are not created by users.
-
-* Added empty constructors to the exception classes that did not have them so that ANTLRWorks can create the exceptions.
-
-* Made debug tree adaptor deal with tokens conjured up during error recovery.
-
-* Removed extra location() debug element that was emitted.
-
-May 8, 2008
-
-* ANTLR didn't update line/col to DFA map for AW.
-
-May 6-7, 2008
-
-* Insufficiently covered (with semantic predicates) alt warnings are now emitted before
-  nondeterminisms so it's clear the nondeterminism is a result of insufficient preds.
-
-* Improved insufficiently covered alt warnings from:
-    warning(203): T.g:2:3: The following alternatives are insufficiently covered with predicates: 1
-  to:
-    warning(203): T.g:2:3: Input B is insufficiently covered with predicates at loca
-tions: alt 1: line 3:15, alt 2: line 2:9
-
-* Improved nondeterminism warning to have:
-  Semantic predicates were present but were hidden by actions.
-parser grammar U;
-a : (A B)? ;
-b : X a {p1}? A B | Y a {a1} {p2}? A B | Z a ;
-
-To create the prediction DFA for the optional sub rule in 'a', ANTLR must find all references to 'a' to determine what can follow. A B can follow 'a' in the first two alts rule 'b'.   To resolve the conflict between matching A B immediately in the sub rule and exiting rule 'a' to match it in 'b', ANTLR looks for predicates. In this case, there are two predicates that indicate the semantic context in which the surrounding alternatives are valid. The problem is that one of the predicates is hidden by an action. It took me 1.5 days, but I've finally have gotten ANTLR to properly track the insufficiently covered alternatives. Further, I have gotten it to tell you precisely where the uncovered predicates are even if they are simply hidden by actions. I have also updated all of the nondeterminism warnings so that it tells you if there was a predicate but one hidden by an action (this could be a separate condition from insufficiently covered predicates). here are your messages from ANTLR:
-
-ANTLR Parser Generator  Version 3.1b1 (??)  1989-2007
-warning(203): U.g:2:5: Input such as "A B" is insufficiently covered with predicates at locations: alt 2: line 3:38 at B
-Semantic predicates were present but were hidden by actions.
-warning(200): U.g:2:5: Decision can match input such as "A B" using multiple alternatives: 1, 2
-As a result, alternative(s) 2 were disabled for that input
-Semantic predicates were present but were hidden by actions.
-
-* Fixed issue where
-r41
-   : (INT -> INT) ( ('+' i=INT) -> ^($i $r41) )* ';'
-   ;
-still warned about $r41 being ambig.
-
-* actions are now added to the NFA.
-
-* Fixed ANTLR-222.  ANTLR now ignores preds after actions.
-
-May 5, 2008
-
-* Fixed ANTLR-235 by backing out a change from 12-31-07.
-
-* Fixed ANTLR-249; I include semantic context again in closure busy signal.
-
-May 3, 2008
-
-* Fixed ANTLR-208.  Looks in library or in -o output path.  antlr -o foo T.g U.g where U needs T.tokens won't work unless we look in foo too.  fixed.
-
-* Refactored assign.types.g to move methods to a class called AssignTokenTypesBehavior.
-
-* Fixed ANTLR-207.  Lexers importing vocabs didn't see ';'=4 type aliases in .tokens.
-
-* Fixed ANTLR-228.  Couldn't use wildcard in alts with AST rewrites.
-
-May 2, 2008
-
-* Fixed ANTLR-230; can use \' now in action.
-
-* Scope attributes no longer have a stack depth check on front.  If you ref $r::a when r has not invoked you, then you get an exception not a default value.  Back to the way 3.0.1 worked.
-
-* $channel was a global variable in 3.0.1 unlike $type which did not affect an invoking lexer rule.  Now it's local too.  Only $type and $channel are ever set with regularity.  Setting those should not affect an invoking lexer rule as in the following should work:
-
-  X : ID WS? '=' ID ;  // result is X on normal channel
-  WS : ' '+ {$channel = HIDDEN; } ;
-
-  STRING : '"' (ESC|.)* '"' ;  // result is STRING not ESC
-
-  FLOAT : INT '.' INT? ; // should be FLOAT
-  INT : Digit+ ;
-  fragment
-  Digit : '0'..'9' ;
-
-* Fixed bug in interpreter regarding (...)* loops
-
-May 1, 2008
-
-* Fixed ANTLR-202.  These now give warnings about ambig ref to $a.
-    a : ID a -> $a | INT ;
-  and
-    a : A a {$a.text} | B ;
-
-April 30, 2008
-
-* Fixed ANTLR-237. updated -depend to know about imported grammars.
-$ java org.antlr.Tool -depend -lib foo T.g
-  ANTLR Parser Generator  Version 3.1b1 (??)  1989-2007
-  T.g: foo/Java.g
-  TParser.java : T.g
-  T.tokens : T.g
-  TLexer.java : T.g
-  T_Java : T.g
-
-April 29, 2008
-
-* Fixed ANTLR-217; scope A,B,C; didn't work
-
-* Fixed ANTLR-224; ! or ^ on item in alt with rewrite gave exception
-
-* Added token options to terminals: ID<node=V; foo="Big bob"> etc...
-  node is default so you can do ID<V> for hetero tree types. most common.
-
-April 17, 2008
-
-* Use default msg if unknown recog type in getErrorMessage():
-	String msg = e.getMessage();
-
-April 14, 2008
-
-* %x.y = foo; was not working in @members section
-
-March 29, 2008
-
-* Import couldn't handle A imports B imports C.
-
-March 27, 2008
-
-* Added get/setInputStream to Token interface and affected classes.
-
-February 26, 2008
-
-* made fillBuffer public in CommonTreeNodeStream so we can add trees
-  to stream for interactive interpreters.
-
-February 14, 2008
-
-* Fixed a bug in the code generation where tree level 0 was used
-  no matter what to rewrite trees in tree grammars. added unit test
-
-* Fixed ANTLR-221. exceptions were generated when using
-  AST construction operators and no output=AST option.
-
-February 13, 2008
-
-* Improved error msgs for unreachable alts and tokens.
-
-February 11-12, 2008
-
-* Fixed ANTLR-219.
-  It looks like the AST construction code for sets was totally messed up.
-  This was for not only the new tree parser AST construction, but also
-  the regular tree construction for parsers. I had to introduce templates
-  in the ASTTreeParser.stg file to deal with this. added unit tests:
-  TestTreeGrammarRewriteAST.testSetMatchNoRewrite(),
-  testSetMatchNoRewriteLevel2(), testSetMatchNoRewriteLevel2Root().
-  Had to add template matchRuleBlockSet()
-  to differentiate between a regular set in one that is an entire rule.
-  If it is an entire rule, it has to set the return value, retval.tree.
-
-* Fixed ANTLR-220.
-  Made TreeAdaptor dupNode and dupTree events emit debugging events
-  so AW could see tree node duplications.
-
-February 4, 2008
-
-* BACKWARD INCOMPATIBILITY
-  Added getSourceName to IntStream and TokenSource interfaces and also the
-  BaseRecognizer.  Have to know where char come from for error messages.
-  Widespread change, but a trivial one.
-
-January 17, 2008
-
-* Interpreter throws FailedPredicateException now when it sees a predicate;
-  before it was silently failing.  I'll make it work one of these days. ;)
-
-January 12, 2008
-
-* Copy ctor not copying start and stop for common token. Fixes ANTLR-212
-
-* Removed single token insertion and deletion for tokens, sets.
-  Required a change to the code generation for matchSet() template
-  and a tweak inside the BaseRecognizer.  To engage this again is easy,
-  just override mismatch() to call mismatchRecover(). I changed it to simply
-  throw an exception.
-
-* Added syntaxError recognizer state var so you can easily tell if
-  a recognizer failed.  Added getNumberOfSyntaxErrors() to recognizers.
-
-* Added doc for the error node stuff:
-  http://www.antlr.org/wiki/display/ANTLR3/Tree+construction
-
-* Fixed ANTLR-193
-
-* Added recognizer methods to answer questions about current mismatched
-  token error.  Useful now since i don't automatically recover inline
-  to such errors (I throw exception):
-	mismatchIsUnwantedToken(IntStream input, int ttype)
-	mismatchIsMissingToken(IntStream input, BitSet follow)
-
-* Added UnwantedTokenException and MissingTokenException to make
-  match() problems more precise in case you want to catch differently.
-  Updated getErrorMessage() to be more precise.  Says:
-
-	line 2:9 missing EQ at '0'
-
-  now instead of
-
-	line 2:9 mismatched input '0' expecting EQ
-
-  Input "x=9 9;" gives
-
-	line 3:8 extraneous input '9' expecting ';'
-
-  When very confused, "x=9 for;", you still get old mismatched message:
-
-	line 3:8 extraneous input 'for' expecting ';'
-	line 3:11 mismatched input ';' expecting '('
-
-* Added unit tests to TestAutoAST and copied to TestRewriteAST with
-  suitable rewrites to ensure basic error node insertion works.
-
-January 11, 2008
-
-* Adding errorNode to TreeAdaptor and various debug
-  events/listeners.  Had to add new class runtime.tree.CommonErrorNode
-  to hold all the goodies: input stream, start/stop objects.
-
-* Tweaked CommonTree.getType() to return INVALID_TOKEN_TYPE
-  instead of literal 0 (same thing at moment though).
-
-* Updated ANTLRWorks to show error nodes in tree as much as I could; Jean
-  will get to rest of it.
-
-January 9-10, 2008
-
-* Continued work on debugging/profiling composite grammars.
-
-* Updated debug protocol for debugging composite grammars.  enter/exit
-  rule needs grammar to know when to flip display in AW.
-
-* Fixed ANTLR-209.  ANTLR consumed 2 not 1 char to recover in lexer.
-
-* Added two faqs instead of making changes to antlr runtime about
-  lexer error handling:
-  http://www.antlr.org/wiki/pages/viewpage.action?pageId=5341230
-  http://www.antlr.org/wiki/pages/viewpage.action?pageId=5341217
-
-January 1-8, 2008
-
-* Making debugging/profiling work with composite grammars.
-
-* Updated ANTLRWorks so it works still for noncomposite grammars.
-
-* two new examples: import and composite-java (the java example grammar
-  broken up into multiple pieces using import).
-
-* Worked on composite grammars.  Had to refactor a lot of code to make
-  ANTLR deal with one meta grammar made up of multiple grammars.  I
-  thought I had it sort of working back in August.  Yes, but barely. Lots
-  of work to do it seemed.  Lots of clean up work.  Many new unit tests
-  in TestCompositeGrammars.  Had to add new error messages warning about
-  conflicting tokens inherited from multiple grammars etc...
-
-    TOKEN_ALIAS_CONFLICT(arg,arg2) ::=
-      "cannot alias <arg>; string already assigned to <arg2>"
-    TOKEN_ALIAS_REASSIGNMENT(arg,arg2) ::=
-      "cannot alias <arg>; token name already assigned to <arg2>"
-    TOKEN_VOCAB_IN_DELEGATE(arg,arg2) ::=
-      "tokenVocab option ignored in imported grammar <arg>"
-    INVALID_IMPORT(arg,arg2) ::=
-      "<arg.grammarTypeString> grammar <arg.name> cannot import <arg2.grammarTypeString> grammar <arg2.name>"
-    IMPORTED_TOKENS_RULE_EMPTY(arg,arg2) ::=
-      "no lexer rules contributed to <arg> from imported grammar <arg2>"
-    IMPORT_NAME_CLASH(arg,arg2) ::=
-      "combined grammar <arg.name> and imported <arg2.grammarTypeString> grammar <arg2.name> both generate <arg2.recognizerName>; import ignored"
-
-  This stuff got really really complicated.  Syntactic predicate names even
-  had to be scoped per grammar so they don't conflict.
-
-* When using subrules like (atom->atom) to set result tree, it was not
-  properly setting result (early enough).  Future code got null for
-  $rule.tree.
-
-December 31, 2007
-
-* Added the start of a semantic predicate computation for LL(1) to
-  solve a problem with slow grammar analysis even with k=1 due to
-  predicates.  Then I realized the problem with that grammar was
-  elsewhere.  Semantic context really shouldn't be used when
-  preventing closure recomputation (May 2008 I discovered I was
-  wrong--you do need it).  The predicates became huge even though the
-  reduced value would be no different.  The analyzer seems faster now
-  that I am not testing predicate values all the time.  Further it may
-  terminate sooner just due to reduced closure recursion.
-
-* Moved FIRST/FOLLOW computations to a separate class LL1Analyzer to
-  tidy up.
-
-* ANTLR lexer allowed octal escapes, but they didn't work. ;)  Rather than
-  fix, I'm removing.  Use '\uxxxx' to get even 8 bit char values: \u00xx.
-
-December 29, 2007
-
-* Fixed ANTLR-206. I wasn't avoiding analyzing decisions in
-  left-recursive rules.
-
-* Had to add hetero arg to all tokenRef*() templates.  Added _last
-  local var to track last child so we can do replaceChildren() during
-  AST rewrite mode for tree grammars.  Should be useful later for .text
-  property.  Ack, hetero arg is on lots of templates. :(  Moved
-  ruleCleanUp() template into ASTTreeParser and ASTParser groups.
-
-* added noRewrite() template (to Java.stg) so we can insert code during
-  rewrite mode to return original tree if no rewrite.  Might be useful
-  for token rewrites later.  For templates too?
-
-* Had to add if !rewriteMode around tree construction in tree parser
-  templates.
-
-* Harald Muller pointed out that we need to use <initValue(attr.type)>
-  in our tests for null token/rule property references. For int types
-  we need 0 not null. (p!=null?p.line:0).  Changed scopeAttributeRef,
-  ruleLabelRef.  Also changed the known typed attributes like
-  lexerRuleLabelPropertyRef_line to yield 0 upon null rule ref to
-  be consistent with case when we don't know the type.  Fixes ANTLR-195.
-  Added testTypeOfGuardedAttributeRefIsCorrect test and reset expected
-  output for 13 tests that now "fail".
-
-December 28, 2007
-
-* added polydiff example (Java target)
-
-* added "int" property for token and lexer rule refs.  super convenient. E.g.,
-  a : b=INT {int x = $b.int;} ;
-
-December 27, 2007
-
-* Changed -Xnoinlinedfa to -Xmaxinlinedfastates m where m is
-  maximum number of states a DFA can have before ANTLR avoids
-  inlining it.  Instead, you get a table-based DFA.  This
-  affectively avoids some acyclic DFA that still have many states
-  with multiple incident edges.  The combinatorial explosion smacks
-  of infinite loop.  Fixes ANTLR-130.
-
-* [...] are allowed in args now but ] must be escaped as \]. E.g.,
-  a[String[\] ick, int i] : ... ;
-  And calling a rule: foo[x[i\], 34]
-  Fixes ANTLR-140.
-
-* Fixed ANTLR-105.  Target.getTargetStringLiteralFromANTLRStringLiteral()
-  escaped " that were already escaped.
-
-* target's can now specify how to encode int as char escape.  Moved
-  DFA.encodeIntAsCharEscape to Target.
-
-* Bug in runtime.DFA.  If a special state (one with predicate) failed, it
-  tried to continue (causing out of range exception due to state = -1)
-  instead of reporting error.
-
-* If -dfa with combined grammar T.g, builds T.dec-*.dot and TLexer.dec-*.dot
-
-* Fix ANTLR-165.
-  Generate TParser.java and TLexer.java from T.g if combined, else
-  use T.java as output regardless of type.
-  BACKWARD INCOMPATIBILITY since file names change.
-  I changed the examples-v3/java to be consistent.  Required XML.g ->
-  XMLLexer.java and fuzzy/Main.java change.
-
-* Fix ANTLR-169.  Deletes tmp lexer grammar file.
-
-December 25, 2007
-
-* Fixed ANTLR-111.  More unit tests in TestAttributes.
-
-December 25, 2007
-
-* Dangling states ("decision cannot distinguish between alternatives
-  for at least one input sequence") is now an error not a warning.
-
-* Added sample input sequence that leads to dangling DFA state, one
-  that cannot reach an accept state.  ANTLR ran into a case where
-  the same input sequence reaches multiple locations in the NFA
-  (and so not nondeterministic), but analysis ran out of further
-  NFA states to look for more input.  Commonly at EOF target states.
-  Now says:
-
-  error(202): CS.g:248:95: the decision cannot distinguish between alternative(s) 1,2 for input such as "DOT IDENTIFIER EOF"
-
-  Also fixed bug where dangling states did not resolve to stop states.
-
-* Fixed ANTLR-123
-
-December 17-21, 2007
-
-* k=1 doesn't prevent backtracking anymore as in
-  (options {k=1;}:'else' statement)?
-  if backtrack=true for overall grammar.  Set to false in subrule.
-
-* Optimized the analysis engine for LL(1).  Doesn't attempt LL(*) unless
-  LL(1) fails.  If not LL(1) but autobacktracking but no other kind of
-  predicate, it also avoids LL(*).  This is only important for really
-  big 4000 line grammars etc...
-
-* Lots of code clean up
-
-December 16, 2007
-
-* Yet more Kay pair programming.  Saved yet more RAM; 15% by
-  wacking NFA configurations etc in each DFA state after DFA construction.
-
-* Overall we drop from 2m49s to 1m11s for a huge 4000 line TSQL grammar
-  with k=*.  Only needs -Xconversiontimeout 2000 now not
-  -Xconversiontimeout 5000 too.  With k=1, it's 1m30s down to 40s.
-
-December 15, 2007
-
-* Working with Kay Roepke, we got about 15% speed improvement in
-  overall ANTLR exec time.  Memory footprint seems to be about 50%
-  smaller.
-
-December 13-14, 2007
-
-* I abort entire DFA construction now when I see recursion in > 1 alt.
-  Decision is non-LL(*) even if some pieces are LL(*).  Safer to bail
-  out and try with fixed k.  If user set fixed k then it continues because
-  analysis will eventually terminate for sure.  If a pred is encountered
-  and k=* and it's non-LL(*), it aborts and retries at k=1 but does NOT
-  emit an error.
-
-* Decided that recursion overflow while computing a lookahead DFA is
-  serious enough that I should bail out of entire DFA computation.
-  Previously analysis tried to keep going and made the rules about
-  how analysis worked more complicated.  Better to simply abort when
-  decision can't be computed with current max stack (-Xm option).
-  User can adjust or add predicate etc...  This is now an error
-  not a warning.
-
-* Recursion overflow and unreachable alt is now a fatal error; no code gen.
-  The decision will literally not work.
-
-* Cleaned up how DFA construction/analysis aborts due to non-LL(*) and
-  overflow etc...  Throws exceptions now, which cleans up a bunch of IF
-  checks etc...  Very nice now. Exceptions:
-	analysis/AnalysisRecursionOverflowException.java
-	analysis/AnalysisTimeoutException.java
-	analysis/NonLLStarDecisionException.java
-
-* ErrorManager.grammarWarning() counted them as errors not warnings.
-
-* Unreachable alt warnings are now errors.
-
-* The upshot of these changes is that I fixed ANTLR-178 and did
-  lots of refactoring of code handling analysis failure.
-
-December 11, 2007
-
-* Could not deal with spaces, oddly enough in arg lists:
-	grammar Bad;
-	a : A b["foo", $A.text] ;
-	b[String x, String y] : C ;
-
-October 28, 2007
-
-* Made ANTLR emit a better error message when it cannot write the
-  implicit lexer file from a combined grammar. Used to say "cannot open
-  file", now says "cannot write file" and gives backtrace.
-
-September 15, 2007
-
-add getCharStream to Lexer.
-
-September 10, 2007
-
-* Added {{...}} forced action executed even during backtracking.
-
-September 9, 2007
-
-* r='string' in lexer got a duplicate label definition.
-
-August 21, 2007
-
-* $scope::variable refs now check for empty stack so that expr == null if
-  $scope has an empty stack. Works for $scope[...]::variable too.  Nice!
-
-August 20, 2007
-
-* Added reset() to CommonTreeNodeStream, token stream too
-
-* Made refs to rule/token properties use ?: to avoid null ptr exception.
-  $label.st now is label!=null?label.st:null.  Updated TestAttributes.
-  This is useful not only for optional rule/token refs, but also during
-  error recovery.  If ID is not matched, $ID.text won't cause a null ptr.
-
-August 20, 2007
-*	Fixed ANTLR-177: hashCode/equals not consistent for label
-	Fixed bug where Rule was compared to string; introduced from dev branch
-
-August 15, 2007 -- Got rough draft of the grammar import working.
-                   Submit to dev and then integrate into mainline.
-
-	All file changes/additions:
-
-	README.txt	# edit
-	CHANGES.txt	# add
-	  Factored out the changes from the readme.
-
-	runtime/Java/src/org/antlr/runtime/BaseRecognizer.java	# edit
-	runtime/Java/src/org/antlr/runtime/DFA.java	# edit
-	runtime/Java/src/org/antlr/runtime/Lexer.java	# edit
-	runtime/Java/src/org/antlr/runtime/Parser.java	# edit
-	runtime/Java/src/org/antlr/runtime/debug/DebugParser.java	# edit
-	runtime/Java/src/org/antlr/runtime/tree/TreeParser.java	# edit
-	  Factored state fields into RecognizerSharedState
-	  object. You will see a lot of things like
-            state.errorRecovery = false;
-	runtime/Java/src/org/antlr/runtime/RecognizerSharedState.java	# add
-          Shares all recognizer state variables including lexer even though
-	  these are superfluous to parsers and tree parsers.  There
-	  was a casting issue that I could not resolve.
-
-	src/org/antlr/Tool.java	# edit
-	  Broke a part Grammar.setGrammarContent() into
-	  parseAndBuildAST() and analyzeGrammar() to make the grammar
-	  import work. I needed to be able to look at the trees for
-	  imported grammars before analyzing them and building DFA. Added
-	  use of the CompositeGrammar object and handling of multiple
-	  delegate grammars. Changed decision DFA DOT file names to
-	  include the grammar name.
-
-	src/org/antlr/analysis/DFA.java	# edit
-	  Just tweaked to use generics, updated a comment.
-
-	src/org/antlr/analysis/DecisionProbe.java	# edit
-	  Just tweaked to use generics.
-
-	src/org/antlr/analysis/NFA.java	# edit
-	  NFA now span multiple grammars and so I moved the NFAs state
-	  tracking to the composite grammar object.
-
-	src/org/antlr/analysis/NFAState.java	# edit
-	  Added some null checking and made a field public.
-
-	src/org/antlr/analysis/NFAToDFAConverter.java	# edit
-	  Changed a method call to directly access a field.
-
-	src/org/antlr/analysis/RuleClosureTransition.java	# edit
-	  Instead of using a rule index, which does not span multiple
-	  grammars, the transition object now attracts a pointer to
-	  the actual Rule definition object.
-
-	src/org/antlr/analysis/SemanticContext.java	# edit
-	  Tweaked to use a field instead of a method
-
-	src/org/antlr/codegen/ActionTranslator.g	# edit
-	src/org/antlr/codegen/ActionTranslatorLexer.java	# edit
-	  Tweaked to use new runtime and they changed method name.
-
-	src/org/antlr/codegen/CodeGenerator.java	# edit
-	  Tweaked comments.
-
-	src/org/antlr/codegen/codegen.g	# edit
-	  Added import grammar syntax and altered rule atom to pass a
-	  scope around so that grammar.rule works.  Caution this
-	  feature is used internally by ANTLR and is not meant to be
-	  used by users at this point.
-
-	src/org/antlr/codegen/templates/ANTLRCore.sti	# edit
-	  Added scope to all ruleref template interfaces.
-
-	src/org/antlr/codegen/templates/Java/Java.stg	# edit
-	  Grammars can now import other grammars, which I implemented
-	  using a delegation pointer to the other grammar(s). So if
-	  grammar A imports grammars B and C, then the generated
-	  recognizer for A must have delegation pointers to BParser
-	  and CParser objects. These are now fields:
-
-	    // delegates
-	    <grammar.delegates:
-	     {g|public <g.name>Lexer <g:delegateName()>;}; separator="\n">
-
-          Also, B and C must have back pointers to the delegator so
-          that they can refer to rules that have been overridden.
-          This is a mechanism akin to static inheritance:
-
-	    // delegators
-	    <grammar.delegators:
-	     {g|public <g.name>Lexer <g:delegateName()>;}; separator="\n">
-
-	  This file also has a lot of changes so that state variables
-	  now are state.backtracking instead of the implied
-	  this.backtracking.
-
-	  The file also refers to grammar.delegatedRules attribute
-	  which is the list of Rule objects for which you must
-	  generate manual delegation.  This amounts to a stub whereby
-	  rule foo's method foo() simply calls X.foo() if foo is not
-	  defined inside the delegator.
-
-	  You will notice that the ruleref templates now take a scope
-	  so that I can have implicit rule Tokens referred to
-	  delegate.Tokens rule in a delegate grammar.  This is the way
-	  I do lexer grammar imports.
-
-	  I added a template called delegateName which uses the
-	  grammar name to compute a delegate name if the user does not
-	  specify a label in the import statement such as:
-
-	  import x=X;
-
-	  Oh, note that rule reference templates all receive a Rule
-	  object now instead of the simple rule name as the 'rule'
-	  attribute.  You will see me doing <rule.name> instead of
-	  <name> now.
-
-	src/org/antlr/codegen/templates/Java/Dbg.stg	# edit
-	  Changes mirroring the constructor and field stuff from
-	  Java.stg. Part of this is a cut and paste because of a bug
-	  in ST.
-
-	src/org/antlr/codegen/templates/Java/AST.stg	# edit
-	src/org/antlr/codegen/templates/Java/ASTParser.stg	# edit
-	src/org/antlr/codegen/templates/Java/ASTTreeParser.stg	# edit
-	  Just added the scope attribute.
-
-	src/org/antlr/test/BaseTest.java	# edit
-	  Added functionality to support testing composite grammars.
-	    execLexer()
-
-	src/org/antlr/test/TestAttributes.java	# edit
-	  Tweak to deal with shared recognizer state.
-
-	src/org/antlr/test/TestCompositeGrammars.java	# add
-	  Start of my unit tests.
-
-	src/org/antlr/tool/CompositeGrammar.java	# add
-	src/org/antlr/tool/CompositeGrammarTree.java	# add
-	  Tracks main grammar and all delegate grammars. Tracks unique
-	  NFA state numbers and unique token types. This keeps a tree
-	  of grammars computed from the import/delegation chain. When
-	  you want to look up a rule, it starts at the root of the
-	  tree and does a pre-order search to find the rule.
-
-	src/org/antlr/tool/ActionAnalysis.g	# edit
-	src/org/antlr/tool/ActionAnalysisLexer.java	# edit
-
-	src/org/antlr/tool/AttributeScope.java	# edit
-	  Updated to use generics in one place.
-
-	src/org/antlr/tool/DOTGenerator.java	# edit
-	  Updated to indicate when nonlocal rules are referenced.
-
-	src/org/antlr/tool/ErrorManager.java	# edit
-	  Added some error messages for import grammars; I need more.
-
-	src/org/antlr/tool/FASerializer.java	# edit
-	  Tweaked to use a field not method.
-
-	src/org/antlr/tool/Grammar.java	# edit
-	  This is where most of the meat is for the grammar import
-	  stuff as you can imagine.  I factored out the token type
-	  tracking into the CompositeGrammar object. I added code to
-	  the addArtificialMatchTokensRule method so that it includes
-	  references to all delegate lexer Tokens rules. Altered the
-	  rule lookup stuff so that it knows about delegate grammars.
-
-	src/org/antlr/tool/GrammarAST.java	# edit
-	src/org/antlr/tool/GrammarAnalysisAbortedMessage.java	# edit
-	src/org/antlr/tool/GrammarReport.java	# edit
-	src/org/antlr/tool/NonRegularDecisionMessage.java	# edit
-	  Made enclosing rule visible as field.
-
-	src/org/antlr/tool/GrammarSanity.java	# edit
-	  General cleanup and addition of generics.
-
-	src/org/antlr/tool/Interpreter.java	# edit
-	  Reference fields instead of methods.
-
-	src/org/antlr/tool/NFAFactory.java	# edit
-	  General cleanup and use of Rule object instead of rule
-	  index.
-
-	src/org/antlr/tool/NameSpaceChecker.java	# edit
-	  A little bit of cleanup and changes to use either the local
-	  or globally visible rule. Added code to check that scopes
-	  are valid on scoped rule references. again this is an
-	  internal feature, not to be used by users.
-
-	src/org/antlr/tool/RandomPhrase.java	# edit
-	  Tweaked.
-
-	src/org/antlr/tool/Rule.java	# edit
-	  Added field imported. Removed some unused methods by
-	  commenting them out. Made toString() more expressive.
-
-	src/org/antlr/tool/antlr.g	# edit
-	src/org/antlr/tool/antlr.print.g	# edit
-	src/org/antlr/tool/assign.types.g	# edit
-	src/org/antlr/tool/buildnfa.g	# edit
-	src/org/antlr/tool/define.g	# edit
-	  Added syntax for import statement.  assign.types.g is the
-	  grammar that invokes Grammar.importGrammar().
-
-	src/org/antlr/tool/templates/messages/languages/en.stg	# edit
-	  Added error messages.
-
-	Added
-
-	CHANGES.txt
-	runtime/Java/src/org/antlr/runtime/RecognizerSharedState.java
-	src/org/antlr/test/TestCompositeGrammars.java
-	src/org/antlr/tool/CompositeGrammar.java
-	src/org/antlr/tool/CompositeGrammarTree.java
-
-3.0.1 - August 13, 2007
-
-[See target pages on the wiki for more information on the non-Java targets]
-
-August 7, 2007
-
-* added escaping of double quotes in DOTTreeGenerator
-
-July 22, 2007
-
-* fixed dynamic scope implementation in lexers. They were not creating new scope
-  entries on the stack.  Unsupported feature!
-
-July 30, 2007
-
-* float return values were initalized to 0.0 not 0.0f in java.
-
-July 28, 2007
-
-* Sam Ellis points out an init var bug in ANTLRReaderStream.
-
-July 27, 2007 (done in dev branch)
-
-* Moved token type index stuff from CommonTreeNodeStream to TreeWizard
-
-* Added getChildren to BaseTree.
-
-* Added heterogeneous tree functionality; rewrite for parser/tree parser
-  and auto AST constr. for parser.
-
-	org/antlr/runtime/tree/RewriteRuleElementStream.java
-	org/antlr/runtime/tree/RewriteRuleNodeStream.java
-	org/antlr/runtime/tree/RewriteRuleTokenStream.java
-		Renamed method next() and re-factor things to have more
-		specific methods: nextToken, nextNode, nextTree.
-
-	codegen/codegen.g
-		Updated to include new <NodeType> AST structure for
-		token references.  Pushed hereto attribute into
-		all tokenRef* templates.
-	codegen/templates/Java/AST.stg
-		Factored out a few templates:
-			createImaginaryNode(tokenType,hetero,args)
-			createRewriteNodeFromElement(token,hetero,args)
-		Converted a lot of stream next() calls to more specific
-			nextToken, nextNode, nextTree per above.
-	codegen/templates/Java/ASTParser.stg
-		Added createNodeFromToken template and re-factored creation
-		sites to use that template.  Added hetero attribute.
-	codegen/templates/Java/ASTTreeParser.stg
-		Added createRewriteNodeFromElement template and re-factored.
-
-	test/TestHeteroAST.java
-		New file. Unit tests to test new hetero tree construction.
-	test/TestRewriteAST.java
-		Fixed test.  Nil single-node trees no longer return nil;
-		They return null.
-
-	tool/ErrorManager.java
-	tool/templates/messages/languages/en.stg
-		Added error message:
-		HETERO_ILLEGAL_IN_REWRITE_ALT(arg) ::=
-		  "alts with rewrites can't use heterogeneous types left of ->"
-
-	tool/antlr.g
-	tool/antlr.print.g
-	tool/assign.types.g
-	tool/buildnfa.g
-	tool/define.g
-		Added syntax for <NodeType> to token references.
-		Altered AST structure rippled through different phases.
-
-July 24, 2007
-
-* Deleted DoubleLinkTree.java; CommonTree does that now.
-
-July 23, 2007
-
-* template group outputFile; changed rewrite arg to rewriteMode.
-
-* added rewrite mode for tree parser build AST.
-
-July 22, 2007
-
-* Kay fixed dynamic scope implementation in lexers. They were not
-  creating new scope entries on the stack.  This is an UNSUPPORTED feature.
-
-* added getParent and getChildIndex to TreeAdaptor.  Added
-  implementation to CommonTree.  It's just too useful having those
-  parent and child indexes available for rewriting etc...  I tried 2x
-  to make an implementation of tree rewriting w/o this and the
-  constraints just made it too expensive and complicated.  Have to
-  update adaptors to set parent, child index values.  Updated Tree
-  interface and BaseTree also.  Should only affect target developers
-  not users.  Well, unless they impl Tree.
-
-* dupNode (via ctor) of CommonTree didn't copy start/stop token indexes.
-
-TARGET DEVELOPERS WARNING -- AST.stg split with some functionality
-                             going into ASTParser.stg then I added
-                             ASTTreeParser.stg.  CodeGenerator
-                             assumes new subgroups exist.
-
-July 20, 2007
-
-* Added AST construction for tree parsers including -> rewrite rules.
-  Rewrite mode (rewrite=true) alters the tree in place rather than
-  constructing a whole new tree.  Implementation notes:
-
-  org/antlr/runtime/tree/Tree.java
-	Add methods for parent and child index functionality.
-	Also added freshenParentAndChildIndexes() which you can use
-	to ensure that all double linking is set up right after you
-	manipulate the tree manually.  The setChild preteens etc. do
-	the proper thing so you shouldn't need this.
-	Added replaceChildren() to support tree rewrite mode in tree parsers
-  org/antlr/runtime/tree/BaseTree.java
-	Updated to set parent and child index stuff.  Added replaceChildren
-	method etc...  It still only has a list of children as sole field
-     	but calls methods that subclasses can choose to implement such as
-	CommonTree.
-  org/antlr/runtime/tree/CommonTree.java
-	Added parent and childIndex fields to doubly link.
-  org/antlr/runtime/tree/TreeAdaptor.java
-	Added methods for new parent and child index functionality.
-	Also added method for rewrite mode in tree parsers:
-	replaceChildren(Object parent, int startChildIndex,
-                        int stopChildIndex, Object t);
-	Added setChild and deleteChild methods
-  org/antlr/runtime/tree/BaseTreeAdaptor.java
-	Moved dupTree here from BaseTree.
-	Updated rulePostProcessing to deal with parent and child index.
-	Added setChild and deleteChild implementations
-  org/antlr/runtime/tree/CommonTreeAdaptor.java
-	Added methods to deal with the parent and child index for a node.
-
-  org/antlr/runtime/tree/CommonTreeNodeStream.java
-	Removed token type index and method fillReverseIndex etc...
-	Probably will move into the tree wizard in the future.
-	Changed call/seek stack implementation to use IntArray
-	Added replaceChildren interface.
-  org/antlr/runtime/tree/TreeNodeStream.java
-	Added replaceChildren.
-  org/antlr/runtime/tree/UnBufferedTreeNodeStream.java
-	Added replaceChildren method but no implementation
-
-  codegen/templates/ANTLRCore.sti
-	Changed rewrite to a better name: rewriteMode
-	Added tree level argument to alt, tree so that auto AST
-        construction can occur while recognizing in tree parsers.
-
-  codegen/templates/Java/AST.stg
-	Split template group: added two subclasses to handle different
-	functionality for normal parsing and tree parsing + AST
-	construction.  Tree parsers default behavior is to dup tree
-	not construct another.  Added ASTParser.stg and
-	ASTTreeParser.stg to handle auto AST construction during
-	recognition for the two different parser types.  I just copied
-	the token, rule, set, wildcard templates to the subclasses.
-	The rewrite templates are still in AST.stg. I factored out the
-	node creation so that the same rewrite templates can be used
-	for both parsing and tree parsing.
-
-  codegen/templates/Java/ASTParser.stg
-	The templates needed to build trees with auto construction
-	during parsing.
-  codegen/templates/Java/ASTTreeParser.stg
-	The templates needed to build trees with auto construction
-	during tree parsing.
-  codegen/templates/Java/Java.stg
-	genericParser now has rewriteElementType (Note or Token) so
-	that the rewrite streams know what kind of elements are inside
-	during rewrite rule tree construction.
-  codegen/templates/Java/ST.stg
-	rewrite attribute name change to rewriteMode
-
-  org/antlr/runtime/debug/DebugTreeAdaptor.java
-  org/antlr/runtime/debug/DebugTreeNodeStream.java
-	Updated to handle new interfaces
-
-  test/BaseTest.java
-	Added test rig update to handle AST construction by tree parsers.
-	All tree construction runs automatically test sanity of parent
-	and child indexes.
-  test/TestTreeGrammarRewriteAST.java
-  test/TestTreeNodeStream.java
-  test/TestTrees.java
-	new file; tests the new parent and child index stuff in trees.
-
-July 19, 2007
-
-* implemented new unique ID; GC was causing non unique hash codes.  Debugging
-  tree grammars was messing up.
-
-* got tree rewrites working in tree grammars.  It builds a completely new
-  tree from old tree; i.e., you get two trees in memory.  W/o a rewrite
-  rule, the input for that rule is duplicated and returned. -> w/o elements
-  to the right means don't return anything; i.e., delete.  Ooops...way
-  harder than I thought.  Real implementation notes above.
-
-INCOMPATIBILITY WARNING -- templates have changed; must regen output from
-                           grammars.  Runtime libraries have also changed.
-                           Debug event listener interface has changed also.
-
-July 17, 2007
-
-* Added line/charposition to node socket events and event dump so
-  we have more info during tree parsing.  Only works if your
-  tree adaptor returns a value Token object from getToken(treenode)
-  with line/col set.  Refactored consumeNode/LN to use deserializeNode().
-
-* Fixed mismatched tree node exceptions; for imaginary nodes, it said
-  "missing null".  Now prints the token type we found.
-
-* Cleaned up exception stuff. MismatchedTreeNodeException was setting
-  line/col, but only RecognitionException should do that.
-
-* If imaginary token gets a mismatch, there is no line info.  Search
-  backwards in stream if input node stream supports to find last
-  node with good line/col info. E.g.,
-
-ANTLRv3Tree.g: node from after line 156:72 mismatched tree node: EOA expecting <UP>
-
-  which used to be:
-
-ANTLRv3Tree.g: node from line 0:0 mismatched tree node: null expecting <UP>
-
-* mismatched tree node exceptions were not sent to the debug event stream.
-  Due to a type being slightly different on recoverFromMismatchedToken()
-  in DebugTreeParser.  Was calling BaseRecognizer version not subclass.
-  Now we get:
-
-  9459:   Recognition exception MismatchedTreeNodeException(0!=0)
-
-* List labels were not allowed as root nodes in tree rewrites like
-  ^($listlabel ...).  Had to add a template to AST.stg:
-
-  /** Gen ^($label ...) where label+=... */
-  rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
-
-
-July 16, 2007
-
-* fixed nextNode in RewriteRuleSubtreeStream was dup'ing too much,
-  screwing up debug event stream.  Also there was a bug in how
-  the rewrite tree stream stuff decided to dup nodes.
-
-* fixed bug in LT for tree parsing; text was not transmitted properly;
-  only single words worked.
-
-* made decision for rule put line/col on colon not first token of first alt.
-
-* remote ProxyToken now emits token index for easier debugging when looking
-  at AW's event stream.  For example, the @5 here is the token index:
-
-  31	Consume hidden [ /<64>,channel=99,30:7, @5]
-
-* same is true for consume nodes now:
-
-  25586	Consume node [')'/, <44>, 4712040,@1749]	25
-
-  When debugging tree parsers, it helps to track errors when you know
-  what corresponding input symbol created this tree node.
-
-* Changed debug events associated with trees quite a bit.  Passes nodes around
-  now rather than text, type, unique IDs etc...  Mostly affects internal stuff.
-  Target developers will have some work in their runtime to do to match
-  this change. :(  BUT, there is only a slight tweak in the Dbg.stg
-  and ASTDbg.stg templates.
-  Interface just didn't make sense as is.  If you turn on debugging, and
-  want to track a node creation, you want the node pointer not its ID,
-  text, etc...
-  Added ProxyTree for passing across socket.  Has line/charpos and tokenIndex
-
-July 15, 2007
-
-* added null ptr protection in CommonTreeAdaptor.
-
-July 14, 2007
-
-* null child in TreeAdaptor does nothing now.  Changed interface and
-  implementation.  Changed DebugTreeAdaptor to not fire events on null add
-  as well.
-
-July 12, 2007
-
-* added get method for the line/col to DFA map in Grammar.java
-
-July 7, 2007
-
-* fixed wrong order of test for exceptions in Lexer.getErrorMessage()
-
-June 28, 2007
-
-* Added ability to set the port number in the constructor for the debug parser.
-
-June 5, 2007
-
-* Changed (hidden) option -verbose to -Xnfastates; this just prints out the NFA states along each nondeterministic path for nondeterminism warnings.
-
-May 18, 2007
-
-* there were some dependencies with org.antlr.* that I removed from
-  org.antlr.runtime.*
-
-3.0 final - May 17, 2007
-
-May 14, 2007
-
-* Auto backtracking didn't work with ! and ^ suffixes on first element
-  of an alt.
-
-* Auto backtracking didn't work with an action as first element.
-
-May 10, 2007
-
-* turn off the warning about no local messages:
- no such locale file org/antlr/tool/templates/messages/languages/ru.stg retrying with English locale
-
-May 5, 2007
-
-* moving org.antlr.runtime to runtime/Java/src/org/... Other target
-  source / libs are under runtime/targetname.
-
-May 4, 2007
-
-* You could not use arguments on a token reference that was a route in a
-  tree rewrite rule like -> ^(ID[args] ...).
-
-May 3, 2007
-
-* Fixed ANTLR-82.  Actions after the root were considered part of
-  an optional child.  They were not always executed.  Required a change
-  to the ANTLRCore.sti interface for tree() template.
-
-May 2, 2007
-
-* Fixed ANTLR-117. Wasn't building decisions properly for subrules in
-  syntactic predicates.
-
-April 22, 2007
-
-* Made build.xml ref all jars in antlr lib.  Thanks to Miguel Ping.
-
-* Fixed ANTLR-11
-
-* Now labels on ranges and such in lexer work properly.
-
-* ActionAnalysisLexer was in wrong package.
-
-April 21, 2007
-
-* Pushing a huge update that fixes:
-	http://www.antlr.org/browse/ANTLR-112
-	http://www.antlr.org/browse/ANTLR-110
-	http://www.antlr.org/browse/ANTLR-109
-	http://www.antlr.org/browse/ANTLR-103
-	http://www.antlr.org/browse/ANTLR-97
-	http://www.antlr.org/browse/ANTLR-113
-	http://www.antlr.org/browse/ANTLR-66
-	http://www.antlr.org/browse/ANTLR-98
-	http://www.antlr.org/browse/ANTLR-24
-	http://www.antlr.org/browse/ANTLR-114
-	http://www.antlr.org/browse/ANTLR-5
-	http://www.antlr.org/browse/ANTLR-6
-
-  Basically, I gutted the way AST rewrites work.  MUCH better.
-
-* Fixed lots of little label issues in the lexer.  Couldn't do x+=ID
-  in lexer, for example.  Fixed ANTLR-114, ANTLR-112
-
-* Isolated EOT transition in lexer generated dangling else clause.
-  Fixed ANTLR-113.
-
-April 17, 2007
-
-* Fixed a major problem with gated semantic predicates.  Added more
-  unit tests.
-
-* Fixed bug in cyclic DFA with syntactic predicates.  Wasn't rewinding
-  properly.  Further, mark() in token stream did not fill buffer so
-  when you rewound back to last marker index was -1 not 0.  At same time
-  I fixed ANTLR-103.  Syn preds evaluated only once now.
-
-* Altered code gen file writing so it writes directly to a file
-  instead of building a big string and then writing that out.  Should
-  be faster and much less memory intensive.
-
-* Fixed so antlr writes files to correct location again.  See:
-
-http://www.antlr.org/wiki/pages/viewpage.action?pageId=1862
-
-3.0b7 - April 12, 2007
-
-April 10, 2007
-
-* Allows -> {...} actions now when building ASTs.  Fixed ANTLR-14.
-
-* Allows ! on sets and wildcard now during output=AST option. Fixed ANTLR-17.
-
-* Fixed ANTLR-92 bug.  Couldn't use sets with -> tree construction.
-
-* No lexer rule for a token type is now a warning.
-
-* Fixed set labels in lexer; ANTLR-60 bug
-
-* Fixed problem with duplicate state variable definitions in switch-case
-
-April 9, 2007
-
-* Gated predicates didn't work properly in cyclic DFA.
-
-April 7, 2007
-
-* Couldn't have more than one set per rule it seems.  Fixed.
-
-April 3, 2007
-
-* Fix a problem in my unused label optimization.  Added new
-  pass over actions to examine them.
-
-* RuleReturnScope has method back:
-  /** Has a value potentially if output=template; Don't use StringTemplate
-   *  type as it then causes a dependency with ST lib.
-   */
-  public Object getTemplate() { return null; }
-
-March 30, 2007
-
-* Fixed ANTLR-8.  Labels to rules w/o return values caused compile errors.
-
-* Fixed ANTLR-89; semantic predicates in lexer sometimes
-  caused exception in code gen.
-
-* Fixed ANTLR-36; remove runtime dependency with ST
-
-March 29, 2007
-
-* Over last few days, I've gutted how ANTLR handles sets of chars or
-  tokens.  I cleaned up a lot of stuff in the grammars and added lots
-  of unit tests.
-
-March 26, 2007
-
-* CommonTreeNodeStream didn't push correctly; couldn't handle very
-  deeply nested trees.
-
-* Fixed bug that E : 'a' 'b' ; made E be seen as an alias of 'a'.
-
-March 22, 2007
-
-* Working with Egor Ushakov from Sun Optimization / NetBeans team I
-  made all the Java lexer transition tables static w/o screwing up
-  ability to reference semantic predicates etc...  Only changed Java.stg
-
-* cached text string in CommonToken.getText(); saves on repeated calls;
-  Java mode.
-
-* made all generated methods final; saves a few percent speed according to
-  Egor Ushakov (Java only).
-
-* removed most assignments from each lexer rule and even the Lexer.emit()
-  call!  All done in nextToken now.  Saves on code gen size and a wee bit of
-  execution speed probably.  Variables became fields: type, channel, line,
-  etc... Now emit() needs no args even.  Again, Egor helped on this.
-
-March 17, 2007
-
-* Jonathan DeKlotz updated C# templates to be 3.0b6 current
-
-March 14, 2007
-
-* Manually-specified (...)=> force backtracking eval of that predicate.
-  backtracking=true mode does not however.  Added unit test.
-
-March 14, 2007
-
-* Fixed bug in lexer where ~T didn't compute the set from rule T.
-
-* Added -Xnoinlinedfa make all DFA with tables; no inline prediction with IFs
-
-* Fixed http://www.antlr.org/browse/ANTLR-80.
-  Sem pred states didn't define lookahead vars.
-
-* Fixed http://www.antlr.org/browse/ANTLR-91.
-  When forcing some acyclic DFA to be state tables, they broke.
-  Forcing all DFA to be state tables should give same results.
-
-March 12, 2007
-
-* setTokenSource in CommonTokenStream didn't clear tokens list.
-  setCharStream calls reset in Lexer.
-
-* Altered -depend.  No longer printing grammar files for multiple input
-  files with -depend.  Doesn't show T__.g temp file anymore. Added
-  TLexer.tokens.  Added .h files if defined.
-
-February 11, 2007
-
-* Added -depend command-line option that, instead of processing files,
-  it shows you what files the input grammar(s) depend on and what files
-  they generate. For combined grammar T.g:
-
-  $ java org.antlr.Tool -depend T.g
-
-  You get:
-
-  TParser.java : T.g
-  T.tokens : T.g
-  T__.g : T.g
-
-  Now, assuming U.g is a tree grammar ref'd T's tokens:
-
-  $ java org.antlr.Tool -depend T.g U.g
-
-  TParser.java : T.g
-  T.tokens : T.g
-  T__.g : T.g
-  U.g: T.tokens
-  U.java : U.g
-  U.tokens : U.g
-
-  Handles spaces by escaping them.  Pays attention to -o, -fo and -lib.
-  Dir 'x y' is a valid dir in current dir.
-
-  $ java org.antlr.Tool -depend -lib /usr/local/lib -o 'x y' T.g U.g
-  x\ y/TParser.java : T.g
-  x\ y/T.tokens : T.g
-  x\ y/T__.g : T.g
-  U.g: /usr/local/lib/T.tokens
-  x\ y/U.java : U.g
-  x\ y/U.tokens : U.g
-
-  You have API access via org.antlr.tool.BuildDependencyGenerator class:
-  getGeneratedFileList(), getDependenciesFileList().  You can also access
-  the output template: getDependencies().  The file
-  org/antlr/tool/templates/depend.stg contains the template.  You can
-  modify as you want.  File objects go in so you can play with path etc...
-
-February 10, 2007
-
-* no more .gl files generated.  All .g all the time.
-
-* changed @finally to be @after and added a finally clause to the
-  exception stuff.  I also removed the superfluous "exception"
-  keyword.  Here's what the new syntax looks like:
-
-  a
-  @after { System.out.println("ick"); }
-    : 'a'
-    ;
-    catch[RecognitionException e] { System.out.println("foo"); }
-    catch[IOException e] { System.out.println("io"); }
-    finally { System.out.println("foobar"); }
-
-  @after executes after bookkeeping to set $rule.stop, $rule.tree but
-  before scopes pop and any memoization happens.  Dynamic scopes and
-  memoization are still in generated finally block because they must
-  exec even if error in rule.  The @after action and tree setting
-  stuff can technically be skipped upon syntax error in rule.  [Later
-  we might add something to finally to stick an ERROR token in the
-  tree and set the return value.]  Sequence goes: set $stop, $tree (if
-  any), @after (if any), pop scopes (if any), memoize (if needed),
-  grammar finally clause.  Last 3 are in generated code's finally
-  clause.
-
-3.0b6 - January 31, 2007
-
-January 30, 2007
-
-* Fixed bug in IntervalSet.and: it returned the same empty set all the time
-  rather than new empty set.  Code altered the same empty set.
-
-* Made analysis terminate faster upon a decision that takes too long;
-  it seemed to keep doing work for a while.  Refactored some names
-  and updated comments.  Also made it terminate when it realizes it's
-  non-LL(*) due to recursion.  just added terminate conditions to loop
-  in convert().
-
-* Sometimes fatal non-LL(*) messages didn't appear; instead you got
-  "antlr couldn't analyze", which is actually untrue.  I had the
-  order of some prints wrong in the DecisionProbe.
-
-* The code generator incorrectly detected when it could use a fixed,
-  acyclic inline DFA (i.e., using an IF).  Upon non-LL(*) decisions
-  with predicates, analysis made cyclic DFA.  But this stops
-  the computation detecting whether they are cyclic.  I just added
-  a protection in front of the acyclic DFA generator to avoid if
-  non-LL(*).  Updated comments.
-
-January 23, 2007
-
-* Made tree node streams use adaptor to create navigation nodes.
-  Thanks to Emond Papegaaij.
-
-January 22, 2007
-
-* Added lexer rule properties: start, stop
-
-January 1, 2007
-
-* analysis failsafe is back on; if a decision takes too long, it bails out
-  and uses k=1
-
-January 1, 2007
-
-* += labels for rules only work for output option; previously elements
-  of list were the return value structs, but are now either the tree or
-  StringTemplate return value.  You can label different rules now
-  x+=a x+=b.
-
-December 30, 2006
-
-* Allow \" to work correctly in "..." template.
-
-December 28, 2006
-
-* errors that are now warnings: missing AST label type in trees.
-  Also "no start rule detected" is warning.
-
-* tree grammars also can do rewrite=true for output=template.
-  Only works for alts with single node or tree as alt elements.
-  If you are going to use $text in a tree grammar or do rewrite=true
-  for templates, you must use in your main:
-
-  nodes.setTokenStream(tokens);
-
-* You get a warning for tree grammars that do rewrite=true and
-  output=template and have -> for alts that are not simple nodes
-  or simple trees.  new unit tests in TestRewriteTemplates at end.
-
-December 27, 2006
-
-* Error message appears when you use -> in tree grammar with
-  output=template and rewrite=true for alt that is not simple
-  node or tree ref.
-
-* no more $stop attribute for tree parsers; meaningless/useless.
-  Removed from TreeRuleReturnScope also.
-
-* rule text attribute in tree parser must pull from token buffer.
-  Makes no sense otherwise.  added getTokenStream to TreeNodeStream
-  so rule $text attr works.  CommonTreeNodeStream etc... now let
-  you set the token stream so you can access later from tree parser.
-  $text is not well-defined for rules like
-
-     slist : stat+ ;
-
-  because stat is not a single node nor rooted with a single node.
-  $slist.text will get only first stat.  I need to add a warning about
-  this...
-
-* Fixed http://www.antlr.org/browse/ANTLR-76 for Java.
-  Enhanced TokenRewriteStream so it accepts any object; converts
-  to string at last second.  Allows you to rewrite with StringTemplate
-  templates now :)
-
-* added rewrite option that makes -> template rewrites do replace ops for
-  TokenRewriteStream input stream.  In output=template and rewrite=true mode
-  same as before 'cept that the parser does
-
-    ((TokenRewriteStream)input).replace(
-	      ((Token)retval.start).getTokenIndex(),
-	      input.LT(-1).getTokenIndex(),
-	      retval.st);
-
-  after each rewrite so that the input stream is altered.  Later refs to
-  $text will have rewrites.  Here's a sample test program for grammar Rew.
-
-        FileReader groupFileR = new FileReader("Rew.stg");
-        StringTemplateGroup templates = new StringTemplateGroup(groupFileR);
-        ANTLRInputStream input = new ANTLRInputStream(System.in);
-        RewLexer lexer = new RewLexer(input);
-        TokenRewriteStream tokens = new TokenRewriteStream(lexer);
-        RewParser parser = new RewParser(tokens);
-        parser.setTemplateLib(templates);
-        parser.program();
-        System.out.println(tokens.toString());
-        groupFileR.close();
-
-December 26, 2006
-
-* BaseTree.dupTree didn't dup recursively.
-
-December 24, 2006
-
-* Cleaned up some comments and removed field treeNode
-  from MismatchedTreeNodeException class.  It is "node" in
-  RecognitionException.
-
-* Changed type from Object to BitSet for expecting fields in
-  MismatchedSetException and MismatchedNotSetException
-
-* Cleaned up error printing in lexers and the messages that it creates.
-
-* Added this to TreeAdaptor:
-	/** Return the token object from which this node was created.
-	 *  Currently used only for printing an error message.
-	 *  The error display routine in BaseRecognizer needs to
-	 *  display where the input the error occurred. If your
-	 *  tree of limitation does not store information that can
-	 *  lead you to the token, you can create a token filled with
-	 *  the appropriate information and pass that back.  See
-	 *  BaseRecognizer.getErrorMessage().
-	 */
-	public Token getToken(Object t);
-
-December 23, 2006
-
-* made BaseRecognizer.displayRecognitionError nonstatic so people can
-  override it. Not sure why it was static before.
-
-* Removed state/decision message that comes out of no
-  viable alternative exceptions, as that was too much.
-  removed the decision number from the early exit exception
-  also.  During development, you can simply override
-  displayRecognitionError from BaseRecognizer to add the stuff
-  back in if you want.
-
-* made output go to an output method you can override: emitErrorMessage()
-
-* general cleanup of the error emitting code in BaseRecognizer.  Lots
-  more stuff you can override: getErrorHeader, getTokenErrorDisplay,
-  emitErrorMessage, getErrorMessage.
-
-December 22, 2006
-
-* Altered Tree.Parser.matchAny() so that it skips entire trees if
-  node has children otherwise skips one node.  Now this works to
-  skip entire body of function if single-rooted subtree:
-  ^(FUNC name=ID arg=ID .)
-
-* Added "reverse index" from node to stream index.  Override
-  fillReverseIndex() in CommonTreeNodeStream if you want to change.
-  Use getNodeIndex(node) to find stream index for a specific tree node.
-  See getNodeIndex(), reverseIndex(Set tokenTypes),
-  reverseIndex(int tokenType), fillReverseIndex().  The indexing
-  costs time and memory to fill, but pulling stuff out will be lots
-  faster as it can jump from a node ptr straight to a stream index.
-
-* Added TreeNodeStream.get(index) to make it easier for interpreters to
-  jump around in tree node stream.
-
-* New CommonTreeNodeStream buffers all nodes in stream for fast jumping
-  around.  It now has push/pop methods to invoke other locations in
-  the stream for building interpreters.
-
-* Moved CommonTreeNodeStream to UnBufferedTreeNodeStream and removed
-  Iterator implementation.  moved toNodesOnlyString() to TestTreeNodeStream
-
-* [BREAKS ANY TREE IMPLEMENTATION]
-  made CommonTreeNodeStream work with any tree node type.  TreeAdaptor
-  now implements isNil so must add; trivial, but does break back
-  compatibility.
-
-December 17, 2006
-
-* Added traceIn/Out methods to recognizers so that you can override them;
-  previously they were in-line print statements. The message has also
-  been slightly improved.
-
-* Factored BuildParseTree into debug package; cleaned stuff up. Fixed
-  unit tests.
-
-December 15, 2006
-
-* [BREAKS ANY TREE IMPLEMENTATION]
-  org.antlr.runtime.tree.Tree; needed to add get/set for token start/stop
-  index so CommonTreeAdaptor can assume Tree interface not CommonTree
-  implementation.  Otherwise, no way to create your own nodes that satisfy
-  Tree because CommonTreeAdaptor was doing
-
-	public int getTokenStartIndex(Object t) {
-		return ((CommonTree)t).startIndex;
-	}
-
-  Added to Tree:
-
-	/**  What is the smallest token index (indexing from 0) for this node
-	 *   and its children?
-	 */
-	int getTokenStartIndex();
-
-	void setTokenStartIndex(int index);
-
-	/**  What is the largest token index (indexing from 0) for this node
-	 *   and its children?
-	 */
-	int getTokenStopIndex();
-
-	void setTokenStopIndex(int index);
-
-December 13, 2006
-
-* Added org.antlr.runtime.tree.DOTTreeGenerator so you can generate DOT
-  diagrams easily from trees.
-
-	CharStream input = new ANTLRInputStream(System.in);
-	TLexer lex = new TLexer(input);
-	CommonTokenStream tokens = new CommonTokenStream(lex);
-	TParser parser = new TParser(tokens);
-	TParser.e_return r = parser.e();
-	Tree t = (Tree)r.tree;
-	System.out.println(t.toStringTree());
-	DOTTreeGenerator gen = new DOTTreeGenerator();
-	StringTemplate st = gen.toDOT(t);
-	System.out.println(st);
-
-* Changed the way mark()/rewind() work in CommonTreeNode stream to mirror
-  more flexible solution in ANTLRStringStream.  Forgot to set lastMarker
-  anyway.  Now you can rewind to non-most-recent marker.
-
-December 12, 2006
-
-* Temp lexer now end in .gl (T__.gl, for example)
-
-* TreeParser suffix no longer generated for tree grammars
-
-* Defined reset for lexer, parser, tree parser; rewinds the input stream also
-
-December 10, 2006
-
-* Made Grammar.abortNFAToDFAConversion() abort in middle of a DFA.
-
-December 9, 2006
-
-* fixed bug in OrderedHashSet.add().  It didn't track elements correctly.
-
-December 6, 2006
-
-* updated build.xml for future Ant compatibility, thanks to Matt Benson.
-
-* various tests in TestRewriteTemplate and TestSyntacticPredicateEvaluation
-  were using the old 'channel' vs. new '$channel' notation.
-  TestInterpretedParsing didn't pick up an earlier change to CommonToken.
-  Reported by Matt Benson.
-
-* fixed platform dependent test failures in TestTemplates, supplied by Matt
-  Benson.
-
-November 29, 2006
-
-*  optimized semantic predicate evaluation so that p||!p yields true.
-
-November 22, 2006
-
-* fixed bug that prevented var = $rule.some_retval from working in anything
-  but the first alternative of a rule or subrule.
-
-* attribute names containing digits were not allowed, this is now fixed,
-  allowing attributes like 'name1' but not '1name1'.
-
-November 19, 2006
-
-* Removed LeftRecursionMessage and apparatus because it seems that I check
-  for left recursion upfront before analysis and everything gets specified as
-  recursion cycles at this point.
-
-November 16, 2006
-
-* TokenRewriteStream.replace was not passing programName to next method.
-
-November 15, 2006
-
-* updated DOT files for DFA generation to make smaller circles.
-
-* made epsilon edges italics in the NFA diagrams.
-
-3.0b5 - November 15, 2006
-
-The biggest thing is that your grammar file names must match the grammar name
-inside (your generated class names will also be different) and we use
-$channel=HIDDEN now instead of channel=99 inside lexer actions.
-Should be compatible other than that.   Please look at complete list of
-changes.
-
-November 14, 2006
-
-* Force token index to be -1 for CommonIndex in case not set.
-
-November 11, 2006
-
-* getUniqueID for TreeAdaptor now uses identityHashCode instead of hashCode.
-
-November 10, 2006
-
-* No grammar nondeterminism warning now when wildcard '.' is final alt.
-  Examples:
-
-	a : A | B | . ;
-
-	A : 'a'
-	  | .
-	  ;
-
-	SL_COMMENT
-	    : '//' (options {greedy=false;} : .)* '\r'? '\n'
-	    ;
-
-	SL_COMMENT2
-	    : '//' (options {greedy=false;} : 'x'|.)* '\r'? '\n'
-	    ;
-
-
-November 8, 2006
-
-* Syntactic predicates did not get hoisting properly upon non-LL(*) decision.  Other hoisting issues fixed.  Cleaned up code.
-
-* Removed failsafe that check to see if I'm spending too much time on a single DFA; I don't think we need it anymore.
-
-November 3, 2006
-
-* $text, $line, etc... were not working in assignments. Fixed and added
-  test case.
-
-* $label.text translated to label.getText in lexer even if label was on a char
-
-November 2, 2006
-
-* Added error if you don't specify what the AST type is; actions in tree
-  grammar won't work without it.
-
-  $ cat x.g
-  tree grammar x;
-  a : ID {String s = $ID.text;} ;
-
-  ANTLR Parser Generator   Early Access Version 3.0b5 (??, 2006)  1989-2006
-  error: x.g:0:0: (152) tree grammar x has no ASTLabelType option
-
-November 1, 2006
-
-* $text, $line, etc... were not working properly within lexer rule.
-
-October 32, 2006
-
-* Finally actions now execute before dynamic scopes are popped it in the
-  rule. Previously was not possible to access the rules scoped variables
-  in a finally action.
-
-October 29, 2006
-
-* Altered ActionTranslator to emit errors on setting read-only attributes
-  such as $start, $stop, $text in a rule. Also forbid setting any attributes
-  in rules/tokens referenced by a label or name.
-  Setting dynamic scopes's attributes and your own parameter attributes
-  is legal.
-
-October 27, 2006
-
-* Altered how ANTLR figures out what decision is associated with which
-  block of grammar.  Makes ANTLRWorks correctly find DFA for a block.
-
-October 26, 2006
-
-* Fixed bug where EOT transitions led to no NFA configs in a DFA state,
-  yielding an error in DFA table generation.
-
-* renamed action.g to ActionTranslator.g
-  the ActionTranslator class is now called ActionTranslatorLexer, as ANTLR
-  generates this classname now. Fixed rest of codebase accordingly.
-
-* added rules recognizing setting of scopes' attributes to ActionTranslator.g
-  the Objective C target needed access to the right-hand side of the assignment
-  in order to generate correct code
-
-* changed ANTLRCore.sti to reflect the new mandatory templates to support the above
-  namely: scopeSetAttributeRef, returnSetAttributeRef and the ruleSetPropertyRef_*
-  templates, with the exception of ruleSetPropertyRef_text. we cannot set this attribute
-
-October 19, 2006
-
-* Fixed 2 bugs in DFA conversion that caused exceptions.
-  altered functionality of getMinElement so it ignores elements<0.
-
-October 18, 2006
-
-* moved resetStateNumbersToBeContiguous() to after issuing of warnings;
-  an internal error in that routine should make more sense as issues
-  with decision will appear first.
-
-* fixed cut/paste bug I introduced when fixed EOF in min/max
-  bug. Prevented C grammar from working briefly.
-
-October 17, 2006
-
-* Removed a failsafe that seems to be unnecessary that ensure DFA didn't
-  get too big.  It was resulting in some failures in code generation that
-  led me on quite a strange debugging trip.
-
-October 16, 2006
-
-* Use channel=HIDDEN not channel=99 to put tokens on hidden channel.
-
-October 12, 2006
-
-* ANTLR now has a customizable message format for errors and warnings,
-  to make it easier to fulfill requirements by IDEs and such.
-  The format to be used can be specified via the '-message-format name'
-  command line switch. The default for name is 'antlr', also available
-  at the moment is 'gnu'. This is done via StringTemplate, for details
-  on the requirements look in org/antlr/tool/templates/messages/formats/
-
-* line numbers for lexers in combined grammars are now reported correctly.
-
-September 29, 2006
-
-* ANTLRReaderStream improperly checked for end of input.
-
-September 28, 2006
-
-* For ANTLRStringStream, LA(-1) was off by one...gave you LA(-2).
-
-3.0b4 - August 24, 2006
-
-* error when no rules in grammar.  doesn't crash now.
-
-* Token is now an interface.
-
-* remove dependence on non runtime classes in runtime package.
-
-* filename and grammar name must be same Foo in Foo.g.  Generates FooParser,
-  FooLexer, ...  Combined grammar Foo generates Foo$Lexer.g which generates
-  FooLexer.java.  tree grammars generate FooTreeParser.java
-
-August 24, 2006
-
-* added C# target to lib, codegen, templates
-
-August 11, 2006
-
-* added tree arg to navigation methods in treeadaptor
-
-August 07, 2006
-
-* fixed bug related to (a|)+ on end of lexer rules.  crashed instead
-  of warning.
-
-* added warning that interpreter doesn't do synpreds yet
-
-* allow different source of classloader:
-ClassLoader cl = Thread.currentThread().getContextClassLoader();
-if ( cl==null ) {
-    cl = this.getClass().getClassLoader();
-}
-
-
-July 26, 2006
-
-* compressed DFA edge tables significantly.  All edge tables are
-  unique. The transition table can reuse arrays.  Look like this now:
-
-     public static readonly DFA30_transition0 =
-     	new short[] { 46, 46, -1, 46, 46, -1, -1, -1, -1, -1, -1, -1,...};
-         public static readonly DFA30_transition1 =
-     	new short[] { 21 };
-      public static readonly short[][] DFA30_transition = {
-     	  DFA30_transition0,
-     	  DFA30_transition0,
-     	  DFA30_transition1,
-     	  ...
-      };
-
-* If you defined both a label like EQ and '=', sometimes the '=' was
-  used instead of the EQ label.
-
-* made headerFile template have same arg list as outputFile for consistency
-
-* outputFile, lexer, genericParser, parser, treeParser templates
-  reference cyclicDFAs attribute which was no longer used after I
-  started the new table-based DFA.  I made cyclicDFADescriptors
-  argument to outputFile and headerFile (only).  I think this is
-  correct as only OO languages will want the DFA in the recognizer.
-  At the top level, C and friends can use it.  Changed name to use
-  cyclicDFAs again as it's a better name probably.  Removed parameter
-  from the lexer, ...  For example, my parser template says this now:
-
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-* made all token ref token types go thru code gen's
-  getTokenTypeAsTargetLabel()
-
-* no more computing DFA transition tables for acyclic DFA.
-
-July 25, 2006
-
-* fixed a place where I was adding syn predicates into rewrite stuff.
-
-* turned off invalid token index warning in AW support; had a problem.
-
-* bad location event generated with -debug for synpreds in autobacktrack mode.
-
-July 24, 2006
-
-* changed runtime.DFA so that it treats all chars and token types as
-  char (unsigned 16 bit int).  -1 becomes '\uFFFF' then or 65535.
-
-* changed MAX_STATE_TRANSITIONS_FOR_TABLE to be 65534 by default
-  now. This means that all states can use a table to do transitions.
-
-* was not making synpreds on (C)* type loops with backtrack=true
-
-* was copying tree stuff and actions into synpreds with backtrack=true
-
-* was making synpreds on even single alt rules / blocks with backtrack=true
-
-3.0b3 - July 21, 2006
-
-* ANTLR fails to analyze complex decisions much less frequently.  It
-  turns out that the set of decisions for which ANTLR fails (times
-  out) is the same set (so far) of non-LL(*) decisions.  Morever, I'm
-  able to detect this situation quickly and report rather than timing
-  out. Errors look like:
-
-  java.g:468:23: [fatal] rule concreteDimensions has non-LL(*)
-    decision due to recursive rule invocations in alts 1,2.  Resolve
-    by left-factoring or using syntactic predicates with fixed k
-    lookahead or use backtrack=true option.
-
-  This message only appears when k=*.
-
-* Shortened no viable alt messages to not include decision
-  description:
-
-[compilationUnit, declaration]: line 8:8 decision=<<67:1: declaration
-: ( ( fieldDeclaration )=> fieldDeclaration | ( methodDeclaration )=>
-methodDeclaration | ( constructorDeclaration )=>
-constructorDeclaration | ( classDeclaration )=> classDeclaration | (
-interfaceDeclaration )=> interfaceDeclaration | ( blockDeclaration )=>
-blockDeclaration | emptyDeclaration );>> state 3 (decision=14) no
-viable alt; token=[@1,184:187='java',<122>,8:8]
-
-  too long and hard to read.
-
-July 19, 2006
-
-* Code gen bug: states with no emanating edges were ignored by ST.
-  Now an empty list is used.
-
-* Added grammar parameter to recognizer templates so they can access
-  properties like getName(), ...
-
-July 10, 2006
-
-* Fixed the gated pred merged state bug.  Added unit test.
-
-* added new method to Target: getTokenTypeAsTargetLabel()
-
-July 7, 2006
-
-* I was doing an AND instead of OR in the gated predicate stuff.
-  Thanks to Stephen Kou!
-
-* Reduce op for combining predicates was insanely slow sometimes and
-  didn't actually work well.  Now it's fast and works.
-
-* There is a bug in merging of DFA stop states related to gated
-  preds...turned it off for now.
-
-3.0b2 - July 5, 2006
-
-July 5, 2006
-
-* token emission not properly protected in lexer filter mode.
-
-* EOT, EOT DFA state transition tables should be init'd to -1 (only
-  was doing this for compressed tables).  Fixed.
-
-* in trace mode, exit method not shown for memoized rules
-
-* added -Xmaxdfaedges to allow you to increase number of edges allowed
-  for a single DFA state before it becomes "special" and can't fit in
-  a simple table.
-
-* Bug in tables.  Short are signed so min/max tables for DFA are now
-  char[].  Bizarre.
-
-July 3, 2006
-
-* Added a method to reset the tool error state for current thread.
-  See ErrorManager.java
-
-* [Got this working properly today] backtrack mode that let's you type
-  in any old crap and ANTLR will backtrack if it can't figure out what
-  you meant.  No errors are reported by antlr during analysis.  It
-  implicitly adds a syn pred in front of every production, using them
-  only if static grammar LL(*) analysis fails.  Syn pred code is not
-  generated if the pred is not used in a decision.
-
-  This is essentially a rapid prototyping mode.
-
-* Added backtracking report to the -report option
-
-* Added NFA->DFA conversion early termination report to the -report option
-
-* Added grammar level k and backtrack options to -report
-
-* Added a dozen unit tests to test autobacktrack NFA construction.
-
-* If you are using filter mode, you must manually use option
-  memoize=true now.
-
-July 2, 2006
-
-* Added k=* option so you can set k=2, for example, on whole grammar,
-  but an individual decision can be LL(*).
-
-* memoize option for grammars, rules, blocks.  Remove -nomemo cmd-line option
-
-* but in DOT generator for DFA; fixed.
-
-* runtime.DFA reported errors even when backtracking
-
-July 1, 2006
-
-* Added -X option list to help
-
-* Syn preds were being hoisted into other rules, causing lots of extra
-  backtracking.
-
-June 29, 2006
-
-* unnecessary files removed during build.
-
-* Matt Benson updated build.xml
-
-* Detecting use of synpreds in analysis now instead of codegen.  In
-  this way, I can avoid analyzing decisions in synpreds for synpreds
-  not used in a DFA for a real rule.  This is used to optimize things
-  for backtrack option.
-
-* Code gen must add _fragment or whatever to end of pred name in
-  template synpredRule to avoid having ANTLR know anything about
-  method names.
-
-* Added -IdbgST option to emit ST delimiters at start/stop of all
-  templates spit out.
-
-June 28, 2006
-
-* Tweaked message when ANTLR cannot handle analysis.
-
-3.0b1 - June 27, 2006
-
-June 24, 2006
-
-* syn preds no longer generate little static classes; they also don't
-  generate a whole bunch of extra crap in the rules built to test syn
-  preds.  Removed GrammarFragmentPointer class from runtime.
-
-June 23-24, 2006
-
-* added output option to -report output.
-
-* added profiling info:
-  Number of rule invocations in "guessing" mode
-  number of rule memoization cache hits
-  number of rule memoization cache misses
-
-* made DFA DOT diagrams go left to right not top to bottom
-
-* I try to recursive overflow states now by resolving these states
-  with semantic/syntactic predicates if they exist.  The DFA is then
-  deterministic rather than simply resolving by choosing first
-  nondeterministic alt.  I used to generated errors:
-
-~/tmp $ java org.antlr.Tool -dfa t.g
-ANTLR Parser Generator   Early Access Version 3.0b2 (July 5, 2006)  1989-2006
-t.g:2:5: Alternative 1: after matching input such as A A A A A decision cannot predict what comes next due to recursion overflow to b from b
-t.g:2:5: Alternative 2: after matching input such as A A A A A decision cannot predict what comes next due to recursion overflow to b from b
-
-  Now, I uses predicates if available and emits no warnings.
-
-* made sem preds share accept states.  Previously, multiple preds in a
-decision forked new accepts each time for each nondet state.
-
-June 19, 2006
-
-* Need parens around the prediction expressions in templates.
-
-* Referencing $ID.text in an action forced bad code gen in lexer rule ID.
-
-* Fixed a bug in how predicates are collected.  The definition of
-  "last predicated alternative" was incorrect in the analysis.  Further,
-  gated predicates incorrectly missed a case where an edge should become
-  true (a tautology).
-
-* Removed an unnecessary input.consume() reference in the runtime/DFA class.
-
-June 14, 2006
-
-* -> ($rulelabel)? didn't generate proper code for ASTs.
-
-* bug in code gen (did not compile)
-a : ID -> ID
-  | ID -> ID
-  ;
-Problem is repeated ref to ID from left side.  Juergen pointed this out.
-
-* use of tokenVocab with missing file yielded exception
-
-* (A|B)=> foo yielded an exception as (A|B) is a set not a block. Fixed.
-
-* Didn't set ID1= and INT1= for this alt:
-  | ^(ID INT+ {System.out.print(\"^(\"+$ID+\" \"+$INT+\")\");})
-
-* Fixed so repeated dangling state errors only occur once like:
-t.g:4:17: the decision cannot distinguish between alternative(s) 2,1 for at least one input sequence
-
-* tracking of rule elements was on (making list defs at start of
-  method) with templates instead of just with ASTs.  Turned off.
-
-* Doesn't crash when you give it a missing file now.
-
-* -report: add output info: how many LL(1) decisions.
-
-June 13, 2006
-
-* ^(ROOT ID?) Didn't work; nor did any other nullable child list such as
-  ^(ROOT ID* INT?).  Now, I check to see if child list is nullable using
-  Grammar.LOOK() and, if so, I generate an "IF lookahead is DOWN" gate
-  around the child list so the whole thing is optional.
-
-* Fixed a bug in LOOK that made it not look through nullable rules.
-
-* Using AST suffixes or -> rewrite syntax now gives an error w/o a grammar
-  output option.  Used to crash ;)
-
-* References to EOF ended up with improper -1 refs instead of EOF in output.
-
-* didn't warn of ambig ref to $expr in rewrite; fixed.
-list
-     :	'[' expr 'for' type ID 'in' expr ']'
-	-> comprehension(expr={$expr.st},type={},list={},i={})
-	;
-
-June 12, 2006
-
-* EOF works in the parser as a token name.
-
-* Rule b:(A B?)*; didn't display properly in AW due to the way ANTLR
-  generated NFA.
-
-* "scope x;" in a rule for unknown x gives no error.  Fixed.  Added unit test.
-
-* Label type for refs to start/stop in tree parser and other parsers were
-  not used.  Lots of casting.  Ick. Fixed.
-
-* couldn't refer to $tokenlabel in isolation; but need so we can test if
-  something was matched.  Fixed.
-
-* Lots of little bugs fixed in $x.y, %... translation due to new
-  action translator.
-
-* Improperly tracking block nesting level; result was that you couldn't
-  see $ID in action of rule "a : A+ | ID {Token t = $ID;} | C ;"
-
-* a : ID ID {$ID.text;} ; did not get a warning about ambiguous $ID ref.
-
-* No error was found on $COMMENT.text:
-
-COMMENT
-    :   '/*' (options {greedy=false;} : . )* '*/'
-        {System.out.println("found method "+$COMMENT.text);}
-    ;
-
-  $enclosinglexerrule scope does not exist.  Use text or setText() here.
-
-June 11, 2006
-
-* Single return values are initialized now to default or to your spec.
-
-* cleaned up input stream stuff.  Added ANTLRReaderStream, ANTLRInputStream
-  and refactored.  You can specify encodings now on ANTLRFileStream (and
-  ANTLRInputStream) now.
-
-* You can set text local var now in a lexer rule and token gets that text.
-  start/stop indexes are still set for the token.
-
-* Changed lexer slightly.  Calling a nonfragment rule from a
-  nonfragment rule does not set the overall token.
-
-June 10, 2006
-
-* Fixed bug where unnecessary escapes yield char==0 like '\{'.
-
-* Fixed analysis bug.  This grammar didn't report a recursion warning:
-x   : y X
-    | y Y
-    ;
-y   : L y R
-    | B
-    ;
-  The DFAState.equals() method was messed up.
-
-* Added @synpredgate {...} action so you can tell ANTLR how to gate actions
-  in/out during syntactic predicate evaluation.
-
-* Fuzzy parsing should be more efficient.  It should backtrack over a rule
-  and then rewind and do it again "with feeling" to exec actions.  It was
-  actually doing it 3x not 2x.
-
-June 9, 2006
-
-* Gutted and rebuilt the action translator for $x.y, $x::y, ...
-  Uses ANTLR v3 now for the first time inside v3 source. :)
-  ActionTranslator.java
-
-* Fixed a bug where referencing a return value on a rule didn't work
-  because later a ref to that rule's predefined properties didn't
-  properly force a return value struct to be built.  Added unit test.
-
-June 6, 2006
-
-* New DFA mechanisms.  Cyclic DFA are implemented as state tables,
-  encoded via strings as java cannot handle large static arrays :(
-  States with edges emanating that have predicates are specially
-  treated.  A method is generated to do these states.  The DFA
-  simulation routine uses the "special" array to figure out if the
-  state is special.  See March 25, 2006 entry for description:
-  http://www.antlr.org/blog/antlr3/codegen.tml.  analysis.DFA now has
-  all the state tables generated for code gen.  CyclicCodeGenerator.java
-  disappeared as it's unneeded code. :)
-
-* Internal general clean up of the DFA.states vs uniqueStates thing.
-  Fixed lookahead decisions no longer fill uniqueStates.  Waste of
-  time.  Also noted that when adding sem pred edges, I didn't check
-  for state reuse.  Fixed.
-
-June 4, 2006
-
-* When resolving ambig DFA states predicates, I did not add the new states
-  to the list of unique DFA states.  No observable effect on output except
-  that DFA state numbers were not always contiguous for predicated decisions.
-  I needed this fix for new DFA tables.
-
-3.0ea10 - June 2, 2006
-
-June 2, 2006
-
-* Improved grammar stats and added syntactic pred tracking.
-
-June 1, 2006
-
-* Due to a type mismatch, the DebugParser.recoverFromMismatchedToken()
-  method was not called.  Debug events for mismatched token error
-  notification were not sent to ANTLRWorks probably
-
-* Added getBacktrackingLevel() for any recognizer; needed for profiler.
-
-* Only writes profiling data for antlr grammar analysis with -profile set
-
-* Major update and bug fix to (runtime) Profiler.
-
-May 27, 2006
-
-* Added Lexer.skip() to force lexer to ignore current token and look for
-  another; no token is created for current rule and is not passed on to
-  parser (or other consumer of the lexer).
-
-* Parsers are much faster now.  I removed use of java.util.Stack for pushing
-  follow sets and use a hardcoded array stack instead.  Dropped from
-  5900ms to 3900ms for parse+lex time parsing entire java 1.4.2 source.  Lex
-  time alone was about 1500ms.  Just looking at parse time, we get about 2x
-  speed improvement. :)
-
-May 26, 2006
-
-* Fixed NFA construction so it generates NFA for (A*)* such that ANTLRWorks
-  can display it properly.
-
-May 25, 2006
-
-* added abort method to Grammar so AW can terminate the conversion if it's
-  taking too long.
-
-May 24, 2006
-
-* added method to get left recursive rules from grammar without doing full
-  grammar analysis.
-
-* analysis, code gen not attempted if serious error (like
-  left-recursion or missing rule definition) occurred while reading
-  the grammar in and defining symbols.
-
-* added amazing optimization; reduces analysis time by 90% for java
-  grammar; simple IF statement addition!
-
-3.0ea9 - May 20, 2006
-
-* added global k value for grammar to limit lookahead for all decisions unless
-overridden in a particular decision.
-
-* added failsafe so that any decision taking longer than 2 seconds to create
-the DFA will fall back on k=1.  Use -ImaxtimeforDFA n (in ms) to set the time.
-
-* added an option (turned off for now) to use multiple threads to
-perform grammar analysis.  Not much help on a 2-CPU computer as
-garbage collection seems to peg the 2nd CPU already. :( Gotta wait for
-a 4 CPU box ;)
-
-* switched from #src to // $ANTLR src directive.
-
-* CommonTokenStream.getTokens() looked past end of buffer sometimes. fixed.
-
-* unicode literals didn't really work in DOT output and generated code. fixed.
-
-* fixed the unit test rig so it compiles nicely with Java 1.5
-
-* Added ant build.xml file (reads build.properties file)
-
-* predicates sometimes failed to compile/eval properly due to missing (...)
-  in IF expressions.  Forced (..)
-
-* (...)? with only one alt were not optimized.  Was:
-
-        // t.g:4:7: ( B )?
-        int alt1=2;
-        int LA1_0 = input.LA(1);
-        if ( LA1_0==B ) {
-            alt1=1;
-        }
-        else if ( LA1_0==-1 ) {
-            alt1=2;
-        }
-        else {
-            NoViableAltException nvae =
-                new NoViableAltException("4:7: ( B )?", 1, 0, input);
-            throw nvae;
-        }
-
-is now:
-
-        // t.g:4:7: ( B )?
-        int alt1=2;
-        int LA1_0 = input.LA(1);
-        if ( LA1_0==B ) {
-            alt1=1;
-        }
-
-  Smaller, faster and more readable.
-
-* Allow manual init of return values now:
-  functionHeader returns [int x=3*4, char (*f)()=null] : ... ;
-
-* Added optimization for DFAs that fixed a codegen bug with rules in lexer:
-   EQ			 : '=' ;
-   ASSIGNOP		 : '=' | '+=' ;
-  EQ is a subset of other rule.  It did not given an error which is
-  correct, but generated bad code.
-
-* ANTLR was sending column not char position to ANTLRWorks.
-
-* Bug fix: location 0, 0 emitted for synpreds and empty alts.
-
-* debugging event handshake how sends grammar file name.  Added getGrammarFileName() to recognizers.  Java.stg generates it:
-
-    public String getGrammarFileName() { return "<fileName>"; }
-
-* tree parsers can do arbitrary lookahead now including backtracking.  I
-  updated CommonTreeNodeStream.
-
-* added events for debugging tree parsers:
-
-	/** Input for a tree parser is an AST, but we know nothing for sure
-	 *  about a node except its type and text (obtained from the adaptor).
-	 *  This is the analog of the consumeToken method.  Again, the ID is
-	 *  the hashCode usually of the node so it only works if hashCode is
-	 *  not implemented.
-	 */
-	public void consumeNode(int ID, String text, int type);
-
-	/** The tree parser looked ahead */
-	public void LT(int i, int ID, String text, int type);
-
-	/** The tree parser has popped back up from the child list to the
-	 *  root node.
-	 */
-	public void goUp();
-
-	/** The tree parser has descended to the first child of a the current
-	 *  root node.
-	 */
-	public void goDown();
-
-* Added DebugTreeNodeStream and DebugTreeParser classes
-
-* Added ctor because the debug tree node stream will need to ask quesitons about nodes and since  nodes are just Object, it needs an adaptor to decode the nodes and get text/type info for the debugger.
-
-public CommonTreeNodeStream(TreeAdaptor adaptor, Tree tree);
-
-* added getter to TreeNodeStream:
-	public TreeAdaptor getTreeAdaptor();
-
-* Implemented getText/getType in CommonTreeAdaptor.
-
-* Added TraceDebugEventListener that can dump all events to stdout.
-
-* I broke down and make Tree implement getText
-
-* tree rewrites now gen location debug events.
-
-* added AST debug events to listener; added blank listener for convenience
-
-* updated debug events to send begin/end backtrack events for debugging
-
-* with a : (b->b) ('+' b -> ^(PLUS $a b))* ; you get b[0] each time as
-  there is no loop in rewrite rule itself.  Need to know context that
-  the -> is inside the rule and hence b means last value of b not all
-  values.
-
-* Bug in TokenRewriteStream; ops at indexes < start index blocked proper op.
-
-* Actions in ST rewrites "-> ({$op})()" were not translated
-
-* Added new action name:
-
-@rulecatch {
-catch (RecognitionException re) {
-    reportError(re);
-    recover(input,re);
-}
-catch (Throwable t) {
-    System.err.println(t);
-}
-}
-Overrides rule catch stuff.
-
-* Isolated $ refs caused exception
-
-3.0ea8 - March 11, 2006
-
-* added @finally {...} action like @init for rules.  Executes in
-  finally block (java target) after all other stuff like rule memoization.
-  No code changes needs; ST just refs a new action:
-      <ruleDescriptor.actions.finally>
-
-* hideous bug fixed: PLUS='+' didn't result in '+' rule in lexer
-
-* TokenRewriteStream didn't do toString() right when no rewrites had been done.
-
-* lexer errors in interpreter were not printed properly
-
-* bitsets are dumped in hex not decimal now for FOLLOW sets
-
-* /* epsilon */ is not printed now when printing out grammars with empty alts
-
-* Fixed another bug in tree rewrite stuff where it was checking that elements
-  had at least one element.  Strange...commented out for now to see if I can remember what's up.
-
-* Tree rewrites had problems when you didn't have x+=FOO variables.  Rules
-  like this work now:
-
-  a : (x=ID)? y=ID -> ($x $y)?;
-
-* filter=true for lexers turns on k=1 and backtracking for every token
-  alternative.  Put the rules in priority order.
-
-* added getLine() etc... to Tree to support better error reporting for
-  trees.  Added MismatchedTreeNodeException.
-
-* $templates::foo() is gone.  added % as special template symbol.
-  %foo(a={},b={},...) ctor (even shorter than $templates::foo(...))
-  %({name-expr})(a={},...) indirect template ctor reference
-
-  The above are parsed by antlr.g and translated by codegen.g
-  The following are parsed manually here:
-
-  %{string-expr} anonymous template from string expr
-  %{expr}.y = z; template attribute y of StringTemplate-typed expr to z
-  %x.y = z; set template attribute y of x (always set never get attr)
-            to z [languages like python without ';' must still use the
-            ';' which the code generator is free to remove during code gen]
-
-* -> ({expr})(a={},...) notation for indirect template rewrite.
-  expr is the name of the template.
-
-* $x[i]::y and $x[-i]::y notation for accesssing absolute scope stack
-  indexes and relative negative scopes.  $x[-1]::y is the y attribute
-  of the previous scope (stack top - 1).
-
-* filter=true mode for lexers; can do this now...upon mismatch, just
-  consumes a char and tries again:
-lexer grammar FuzzyJava;
-options {filter=true;}
-
-FIELD
-    :   TYPE WS? name=ID WS? (';'|'=')
-        {System.out.println("found var "+$name.text);}
-    ;
-
-* refactored char streams so ANTLRFileStream is now a subclass of
-  ANTLRStringStream.
-
-* char streams for lexer now allowed nested backtracking in lexer.
-
-* added TokenLabelType for lexer/parser for all token labels
-
-* line numbers for error messages were not updated properly in antlr.g
-  for strings, char literals and <<...>>
-
-* init action in lexer rules was before the type,start,line,... decls.
-
-* Tree grammars can now specify output; I've only tested output=templat
-  though.
-
-* You can reference EOF now in the parser and lexer.  It's just token type
-  or char value -1.
-
-* Bug fix: $ID refs in the *lexer* were all messed up.  Cleaned up the
-  set of properties available...
-
-* Bug fix: .st not found in rule ref when rule has scope:
-field
-scope {
-	StringTemplate funcDef;
-}
-    :   ...
-	{$field::funcDef = $field.st;}
-    ;
-it gets field_stack.st instead
-
-* return in backtracking must return retval or null if return value.
-
-* $property within a rule now works like $text, $st, ...
-
-* AST/Template Rewrites were not gated by backtracking==0 so they
-  executed even when guessing.  Auto AST construction is now gated also.
-
-* CommonTokenStream was somehow returning tokens not text in toString()
-
-* added useful methods to runtime.BitSet and also to CommonToken so you can
-  update the text.  Added nice Token stream method:
-
-  /** Given a start and stop index, return a List of all tokens in
-   *  the token type BitSet.  Return null if no tokens were found.  This
-   *  method looks at both on and off channel tokens.
-   */
-  public List getTokens(int start, int stop, BitSet types);
-
-* literals are now passed in the .tokens files so you can ref them in
-  tree parses, for example.
-
-* added basic exception handling; no labels, just general catches:
-
-a : {;}A | B ;
-        exception
-                catch[RecognitionException re] {
-                        System.out.println("recog error");
-                }
-                catch[Exception e] {
-                        System.out.println("error");
-                }
-
-* Added method to TokenStream:
-  public String toString(Token start, Token stop);
-
-* antlr generates #src lines in lexer grammars generated from combined grammars
-  so error messages refer to original file.
-
-* lexers generated from combined grammars now use originally formatting.
-
-* predicates have $x.y stuff translated now.  Warning: predicates might be
-  hoisted out of context.
-
-* return values in return val structs are now public.
-
-* output=template with return values on rules was broken.  I assume return values with ASTs was broken too.  Fixed.
-
-3.0ea7 - December 14, 2005
-
-* Added -print option to print out grammar w/o actions
-
-* Renamed BaseParser to be BaseRecognizer and even made Lexer derive from
-  this; nice as it now shares backtracking support code.
-
-* Added syntactic predicates (...)=>.  See December 4, 2005 entry:
-
-  http://www.antlr.org/blog/antlr3/lookahead.tml
-
-  Note that we have a new option for turning off rule memoization during
-  backtracking:
-
-  -nomemo        when backtracking don't generate memoization code
-
-* Predicates are now tested in order that you specify the alts.  If you
-  leave the last alt "naked" (w/o pred), it will assume a true pred rather
-  than union of other preds.
-
-* Added gated predicates "{p}?=>" that literally turn off a production whereas
-disambiguating predicates are only hoisted into the predictor when syntax alone
-is not sufficient to uniquely predict alternatives.
-
-A : {p}?  => "a" ;
-B : {!p}? => ("a"|"b")+ ;
-
-* bug fixed related to predicates in predictor
-lexer grammar w;
-A : {p}? "a" ;
-B : {!p}? ("a"|"b")+ ;
-DFA is correct.  A state splits for input "a" on the pred.
-Generated code though was hosed.  No pred tests in prediction code!
-I added testLexerPreds() and others in TestSemanticPredicateEvaluation.java
-
-* added execAction template in case we want to do something in front of
-  each action execution or something.
-
-* left-recursive cycles from rules w/o decisions were not detected.
-
-* undefined lexer rules were not announced! fixed.
-
-* unreachable messages for Tokens rule now indicate rule name not alt. E.g.,
-
-  Ruby.lexer.g:24:1: The following token definitions are unreachable: IVAR
-
-* nondeterminism warnings improved for Tokens rule:
-
-Ruby.lexer.g:10:1: Multiple token rules can match input such as ""0".."9"": INT, FLOAT
-As a result, tokens(s) FLOAT were disabled for that input
-
-
-* DOT diagrams didn't show escaped char properly.
-
-* Char/string literals are now all 'abc' not "abc".
-
-* action syntax changed "@scope::actionname {action}" where scope defaults
-  to "parser" if parser grammar or combined grammar, "lexer" if lexer grammar,
-  and "treeparser" if tree grammar.  The code generation targets decide
-  what scopes are available.  Each "scope" yields a hashtable for use in
-  the output templates.  The scopes full of actions are sent to all output
-  file templates (currently headerFile and outputFile) as attribute actions.
-  Then you can reference <actions.scope> to get the map of actions associated
-  with scope and <actions.parser.header> to get the parser's header action
-  for example.  This should be very flexible.  The target should only have
-  to define which scopes are valid, but the action names should be variable
-  so we don't have to recompile ANTLR to add actions to code gen templates.
-
-  grammar T;
-  options {language=Java;}
-  @header { package foo; }
-  @parser::stuff { int i; } // names within scope not checked; target dependent
-  @members { int i; }
-  @lexer::header {head}
-  @lexer::members { int j; }
-  @headerfile::blort {...} // error: this target doesn't have headerfile
-  @treeparser::members {...} // error: this is not a tree parser
-  a
-  @init {int i;}
-    : ID
-    ;
-  ID : 'a'..'z';
-
-  For now, the Java target uses members and header as a valid name.  Within a
-  rule, the init action name is valid.
-
-* changed $dynamicscope.value to $dynamicscope::value even if value is defined
-  in same rule such as $function::name where rule function defines name.
-
-* $dynamicscope gets you the stack
-
-* rule scopes go like this now:
-
-  rule
-  scope {...}
-  scope slist,Symbols;
-  	: ...
-	;
-
-* Created RuleReturnScope as a generic rule return value.  Makes it easier
-  to do this:
-    RuleReturnScope r = parser.program();
-    System.out.println(r.getTemplate().toString());
-
-* $template, $tree, $start, etc...
-
-* $r.x in current rule.  $r is ignored as fully-qualified name. $r.start works too
-
-* added warning about $r referring to both return value of rule and dynamic scope of rule
-
-* integrated StringTemplate in a very simple manner
-
-Syntax:
--> template(arglist) "..."
--> template(arglist) <<...>>
--> namedTemplate(arglist)
--> {free expression}
--> // empty
-
-Predicate syntax:
-a : A B -> {p1}? foo(a={$A.text})
-        -> {p2}? foo(a={$B.text})
-        -> // return nothing
-
-An arg list is just a list of template attribute assignments to actions in curlies.
-
-There is a setTemplateLib() method for you to use with named template rewrites.
-
-Use a new option:
-
-grammar t;
-options {output=template;}
-...
-
-This all should work for tree grammars too, but I'm still testing.
-
-* fixed bugs where strings were improperly escaped in exceptions, comments, etc..  For example, newlines came out as newlines not the escaped version
-
-3.0ea6 - November 13, 2005
-
-* turned off -debug/-profile, which was on by default
-
-* completely refactored the output templates; added some missing templates.
-
-* dramatically improved infinite recursion error messages (actually
-  left-recursion never even was printed out before).
-
-* wasn't printing dangling state messages when it reanalyzes with k=1.
-
-* fixed a nasty bug in the analysis engine dealing with infinite recursion.
-  Spent all day thinking about it and cleaned up the code dramatically.
-  Bug fixed and software is more powerful and I understand it better! :)
-
-* improved verbose DFA nodes; organized by alt
-
-* got much better random phrase generation.  For example:
-
- $ java org.antlr.tool.RandomPhrase simple.g program
- int Ktcdn ';' method wh '(' ')' '{' return 5 ';' '}'
-
-* empty rules like "a : ;" generated code that didn't compile due to
-  try/catch for RecognitionException.  Generated code couldn't possibly
-  throw that exception.
-
-* when printing out a grammar, such as in comments in generated code,
-  ANTLR didn't print ast suffix stuff back out for literals.
-
-* This never exited loop:
-  DATA : (options {greedy=false;}: .* '\n' )* '\n' '.' ;
-  and now it works due to new default nongreedy .*  Also this works:
-  DATA : (options {greedy=false;}: .* '\n' )* '.' ;
-
-* Dot star ".*" syntax didn't work; in lexer it is nongreedy by
-  default.  In parser it is on greedy but also k=1 by default.  Added
-  unit tests.  Added blog entry to describe.
-
-* ~T where T is the only token yielded an empty set but no error
-
-* Used to generate unreachable message here:
-
-  parser grammar t;
-  a : ID a
-    | ID
-    ;
-
-  z.g:3:11: The following alternatives are unreachable: 2
-
-  In fact it should really be an error; now it generates:
-
-  no start rule in grammar t (no rule can obviously be followed by EOF)
-
-  Per next change item, ANTLR cannot know that EOF follows rule 'a'.
-
-* added error message indicating that ANTLR can't figure out what your
-  start rule is.  Required to properly generate code in some cases.
-
-* validating semantic predicates now work (if they are false, they
-  throw a new FailedPredicateException
-
-* two hideous bug fixes in the IntervalSet, which made analysis go wrong
-  in a few cases.  Thanks to Oliver Zeigermann for finding lots of bugs
-  and making suggested fixes (including the next two items)!
-
-* cyclic DFAs are now nonstatic and hence can access instance variables
-
-* labels are now allowed on lexical elements (in the lexer)
-
-* added some internal debugging options
-
-* ~'a'* and ~('a')* were not working properly; refactored antlr.g grammar
-
-3.0ea5 - July 5, 2005
-
-* Using '\n' in a parser grammar resulted in a nonescaped version of '\n' in the token names table making compilation fail.  I fixed this by reorganizing/cleaning up portion of ANTLR that deals with literals.  See comment org.antlr.codegen.Target.
-
-* Target.getMaxCharValue() did not use the appropriate max value constant.
-
-* ALLCHAR was a constant when it should use the Target max value def.  set complement for wildcard also didn't use the Target def.  Generally cleaned up the max char value stuff.
-
-* Code gen didn't deal with ASTLabelType properly...I think even the 3.0ea7 example tree parser was broken! :(
-
-* Added a few more unit tests dealing with escaped literals
-
-3.0ea4 - June 29, 2005
-
-* tree parsers work; added CommonTreeNodeStream.  See simplecTreeParser
-  example in examples-v3 tarball.
-
-* added superClass and ASTLabelType options
-
-* refactored Parser to have a BaseParser and added TreeParser
-
-* bug fix: actions being dumped in description strings; compile errors
-  resulted
-
-3.0ea3 - June 23, 2005
-
-Enhancements
-
-* Automatic tree construction operators are in: ! ^ ^^
-
-* Tree construction rewrite rules are in
-	-> {pred1}? rewrite1
-	-> {pred2}? rewrite2
-	...
-	-> rewriteN
-
-  The rewrite rules may be elements like ID, expr, $label, {node expr}
-  and trees ^( <root> <children> ).  You have have (...)?, (...)*, (...)+
-  subrules as well.
-
-  You may have rewrites in subrules not just at outer level of rule, but
-  any -> rewrite forces auto AST construction off for that alternative
-  of that rule.
-
-  To avoid cycles, copy semantics are used:
-
-  r : INT -> INT INT ;
-
-  means make two new nodes from the same INT token.
-
-  Repeated references to a rule element implies a copy for at least one
-  tree:
-
-  a : atom -> ^(atom atom) ; // NOT CYCLE! (dup atom tree)
-
-* $ruleLabel.tree refers to tree created by matching the labeled element.
-
-* A description of the blocks/alts is generated as a comment in output code
-
-* A timestamp / signature is put at top of each generated code file
-
-3.0ea2 - June 12, 2005
-
-Bug fixes
-
-* Some error messages were missing the stackTrace parameter
-
-* Removed the file locking mechanism as it's not cross platform
-
-* Some absolute vs relative path name problems with writing output
-  files.  Rules are now more concrete.  -o option takes precedence
-  // -o /tmp /var/lib/t.g => /tmp/T.java
-  // -o subdir/output /usr/lib/t.g => subdir/output/T.java
-  // -o . /usr/lib/t.g => ./T.java
-  // -o /tmp subdir/t.g => /tmp/subdir/t.g
-  // If they didn't specify a -o dir so just write to location
-  // where grammar is, absolute or relative
-
-* does error checking on unknown option names now
-
-* Using just language code not locale name for error message file.  I.e.,
-  the default (and for any English speaking locale) is en.stg not en_US.stg
-  anymore.
-
-* The error manager now asks the Tool to panic rather than simply doing
-  a System.exit().
-
-* Lots of refactoring concerning grammar, rule, subrule options.  Now
-  detects invalid options.
-
-3.0ea1 - June 1, 2005
-
-Initial early access release
diff --git a/antlr-3.4/tool/LICENSE.txt b/antlr-3.4/tool/LICENSE.txt
deleted file mode 100644
index e1cc972..0000000
--- a/antlr-3.4/tool/LICENSE.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-[The "BSD license"]
-Copyright (c) 201 Terence Parr
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/antlr-3.4/tool/README.txt b/antlr-3.4/tool/README.txt
deleted file mode 100644
index ca5e198..0000000
--- a/antlr-3.4/tool/README.txt
+++ /dev/null
@@ -1,139 +0,0 @@
-ANTLR v3.4
-July 18, 2011
-
-Terence Parr, parrt at cs usfca edu
-ANTLR project lead and supreme dictator for life
-University of San Francisco
-
-INTRODUCTION
-
-Welcome to ANTLR v3!  ANTLR (ANother Tool for Language Recognition) is
-a language tool that provides a framework for constructing
-recognizers, interpreters, compilers, and translators from grammatical
-descriptions containing actions in a variety of target
-languages. ANTLR provides excellent support for tree construction,
-tree walking, translation, error recovery, and error reporting. I've
-been working on parser generators for 20 years and on this particular
-version of ANTLR for 7 years.
-
-You should use v3 in conjunction with ANTLRWorks:
-
-    http://www.antlr.org/works/index.html
-
-and gUnit (grammar unit testing tool included in distribution):
-
-    http://www.antlr.org/wiki/display/ANTLR3/gUnit+-+Grammar+Unit+Testing
-
-The book will also help you a great deal (printed May 15, 2007); you
-can also buy the PDF:
-
-    http://www.pragmaticprogrammer.com/titles/tpantlr/index.html
-
-2nd book, Language Implementation Patterns:
-
-    http://pragprog.com/titles/tpdsl/language-implementation-patterns
-
-See the getting started document:
-
-    http://www.antlr.org/wiki/display/ANTLR3/FAQ+-+Getting+Started
-
-You also have the examples plus the source to guide you.
-
-See the wiki FAQ:
-
-    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ
-
-and general doc root:
-
-    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+3+Wiki+Home
-
-Please help add/update FAQ entries.
-
-If all else fails, you can buy support or ask the antlr-interest list:
-
-    http://www.antlr.org/support.html
-
-Per the license in LICENSE.txt, this software is not guaranteed to
-work and might even destroy all life on this planet:
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
-IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-----------------------------------------------------------------------
-
-EXAMPLES
-
-ANTLR v3 sample grammars:
-
-    http://www.antlr.org/download/examples-v3.tar.gz
-
-Examples from Language Implementation Patterns:
-
-    http://www.pragprog.com/titles/tpdsl/source_code
-
-Also check out Mantra Programming Language for a prototype (work in
-progress) using v3:
-
-    http://www.linguamantra.org/
-
-----------------------------------------------------------------------
-
-What is ANTLR?
-
-ANTLR stands for (AN)other (T)ool for (L)anguage (R)ecognition
-and generates LL(*) recursive-descent parsers. ANTLR is a language tool
-that provides a framework for constructing recognizers, compilers, and
-translators from grammatical descriptions containing actions.
-Target language list:
-
-http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
-
-----------------------------------------------------------------------
-
-How is ANTLR v3 different than ANTLR v2?
-
-See "What is the difference between ANTLR v2 and v3?"
-
-    http://www.antlr.org/wiki/pages/viewpage.action?pageId=719
-
-See migration guide:
-
-    http://www.antlr.org/wiki/display/ANTLR3/Migrating+from+ANTLR+2+to+ANTLR+3
-
-----------------------------------------------------------------------
-
-How do I install this damn thing?
-
-Just untar antlr-3.4.tar.gz and you'll get:
-
-antlr-3.4/BUILD.txt
-antlr-3.4/antlr3-maven-plugin
-antlr-3.4/antlrjar.xml
-antlr-3.4/antlrsources.xml
-antlr-3.4/gunit
-antlr-3.4/gunit-maven-plugin
-antlr-3.4/pom.xml
-antlr-3.4/runtime
-antlr-3.4/tool
-antlr-3.4/lib
-
-This is the source and java binaries.  You could grab the
-antlr-3.4-complete.jar file from the website, but it's in lib dir.
-It has all of the jars you need combined into one. Then you need to
-add antlr-3.4-complete.jar to your CLASSPATH or add
-to arg list; e.g., on unix:
-
-$ java -cp "/usr/local/lib/antlr-3.4-complete.jar:$CLASSPATH" org.antlr.Tool Test.g
-
-Please see the FAQ
-
-    http://www.antlr.org/wiki/display/ANTLR3/ANTLR+v3+FAQ
diff --git a/antlr-3.4/tool/pom.xml b/antlr-3.4/tool/pom.xml
deleted file mode 100644
index 60c3871..0000000
--- a/antlr-3.4/tool/pom.xml
+++ /dev/null
@@ -1,118 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>org.antlr</groupId>
-    <artifactId>antlr</artifactId>
-    <packaging>jar</packaging>
-    <name>ANTLR Grammar Tool v 3.4</name>
-    <url>http://antlr.org</url>
-
-
-  <!--
-
-    Inherit from the ANTLR master pom, which tells us what
-    version we are and allows us to inherit dependencies
-    and so on.
-
-    -->
-    <parent>
-        <groupId>org.antlr</groupId>
-        <artifactId>antlr-master</artifactId>
-        <version>3.4</version>
-    </parent>
-    
-    <profiles>
-        <profile>
-            <id>uber</id>
-            <activation>
-                <property>
-                    <name>uber</name>
-                    <value>true</value>
-                </property>
-            </activation>
-            <dependencies>
-                <dependency>
-                    <groupId>org.antlr</groupId>
-                    <artifactId>gunit</artifactId>
-                    <version>${project.version}</version>
-                    <scope>runtime</scope>
-                </dependency>
-            </dependencies>
-        </profile>
-    </profiles>
-    
-    <dependencies>
-        
-        <dependency>
-            <groupId>org.antlr</groupId>
-            <artifactId>antlr-runtime</artifactId>
-            <version>${project.version}</version>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-            <version>4.8.2</version>
-            <scope>test</scope>
-        </dependency>
-        
-        <dependency>
-            <groupId>org.antlr</groupId>
-            <artifactId>ST4</artifactId>
-            <version>4.0.4</version>
-            <scope>compile</scope>
-        </dependency>
-            
-    </dependencies>
-  <!--
-
-    Tell Maven which other artifacts we need in order to
-    build, run and test the ANTLR Tool. The ANTLR Tool uses earlier versions
-    of ANTLR at runtime (for the moment), uses the current
-    released version of ANTLR String template, but obviously is
-    reliant on the latest snapshot of the runtime, which will either be
-    taken from the antlr-snapshot repository, or your local .m2
-    repository if you built and installed that locally.
-
-    -->
-
-
-    <build>
-
-        <defaultGoal>install</defaultGoal>
-
-        <plugins>
-
-            <plugin>
-                <groupId>org.antlr</groupId>
-                <artifactId>antlr3-maven-plugin</artifactId>
-                <version>3.3</version>
-                <configuration>
-                    <libDirectory>target/generated-sources/antlr/org/antlr/grammar/v3</libDirectory>
-                </configuration>
-                <executions>
-                    <execution>
-                        <goals>
-                            <goal>antlr</goal>
-                        </goals>
-                    </execution>
-                </executions>
-            </plugin>
-
-            <plugin>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>2.3.2</version>
-                <configuration>
-                    <source>1.6</source>
-                    <target>jsr14</target>
-                    <sourceDirectory>src</sourceDirectory>
-                </configuration>
-            </plugin>
-
-
-            
-        </plugins>
-
-    </build>
-</project>
diff --git a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLR.g b/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLR.g
deleted file mode 100644
index 6b4e60b..0000000
--- a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLR.g
+++ /dev/null
@@ -1,1353 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2011 Terence Parr
- All rights reserved.
-
- Grammar conversion to ANTLR v3:
- Copyright (c) 2011 Sam Harwell
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-	notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-	notice, this list of conditions and the following disclaimer in the
-	documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-	derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Read in an ANTLR grammar and build an AST.  Try not to do
- *  any actions, just build the tree.
- *
- *  The phases are:
- *
- *		antlr.g (this file)
- *		assign.types.g
- *		define.g
- *		buildnfa.g
- *		antlr.print.g (optional)
- *		codegen.g
- *
- *  Terence Parr
- *  University of San Francisco
- *  2005
- */
-
-grammar ANTLR;
-
-options
-{
-	output=AST;
-	ASTLabelType=GrammarAST;
-}
-
-tokens
-{
-	//OPTIONS='options';
-	//TOKENS='tokens';
-	LEXER='lexer';
-	PARSER='parser';
-	CATCH='catch';
-	FINALLY='finally';
-	GRAMMAR='grammar';
-	PRIVATE='private';
-	PROTECTED='protected';
-	PUBLIC='public';
-	RETURNS='returns';
-	THROWS='throws';
-	TREE='tree';
-
-	RULE;
-	PREC_RULE;
-	RECURSIVE_RULE_REF; // flip recursive RULE_REF to RECURSIVE_RULE_REF in prec rules
-	BLOCK;
-	OPTIONAL;
-	CLOSURE;
-	POSITIVE_CLOSURE;
-	SYNPRED;
-	RANGE;
-	CHAR_RANGE;
-	EPSILON;
-	ALT;
-	EOR;
-	EOB;
-	EOA; // end of alt
-	ID;
-	ARG;
-	ARGLIST;
-	RET;
-	LEXER_GRAMMAR;
-	PARSER_GRAMMAR;
-	TREE_GRAMMAR;
-	COMBINED_GRAMMAR;
-	INITACTION;
-	FORCED_ACTION; // {{...}} always exec even during syn preds
-	LABEL; // $x used in rewrite rules
-	TEMPLATE;
-	SCOPE='scope';
-	IMPORT='import';
-	GATED_SEMPRED; // {p}? =>
-	SYN_SEMPRED; // (...) =>   it's a manually-specified synpred converted to sempred
-	BACKTRACK_SEMPRED; // auto backtracking mode syn pred converted to sempred
-	FRAGMENT='fragment';
-	DOT;
-	REWRITES;
-}
-
-@lexer::header {
-package org.antlr.grammar.v3;
-import org.antlr.tool.ErrorManager;
-import org.antlr.tool.Grammar;
-}
-
-@parser::header {
-package org.antlr.grammar.v3;
-import org.antlr.tool.ErrorManager;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.GrammarAST;
-import org.antlr.misc.IntSet;
-import org.antlr.tool.Rule;
-}
-
-@lexer::members {
-public boolean hasASTOperator = false;
-private String fileName;
-
-public String getFileName() {
-    return fileName;
-}
-
-public void setFileName(String value) {
-    fileName = value;
-}
-}
-
-@parser::members {
-protected String currentRuleName = null;
-protected GrammarAST currentBlockAST = null;
-protected boolean atTreeRoot; // are we matching a tree root in tree grammar?
-
-public static ANTLRParser createParser(TokenStream input) {
-    ANTLRParser parser = new ANTLRParser(input);
-    parser.adaptor = new grammar_Adaptor(parser);
-    return parser;
-}
-
-private static class GrammarASTErrorNode extends GrammarAST {
-    public IntStream input;
-    public Token start;
-    public Token stop;
-    public RecognitionException trappedException;
-
-    public GrammarASTErrorNode(TokenStream input, Token start, Token stop, RecognitionException e) {
-        super(stop);
-        //Console.Out.WriteLine( "start: " + start + ", stop: " + stop );
-        if ( stop == null ||
-             ( stop.getTokenIndex() < start.getTokenIndex() &&
-              stop.getType() != Token.EOF) ) {
-            // sometimes resync does not consume a token (when LT(1) is
-            // in follow set.  So, stop will be 1 to left to start. adjust.
-            // Also handle case where start is the first token and no token
-            // is consumed during recovery; LT(-1) will return null.
-            stop = start;
-        }
-        this.input = input;
-        this.start = start;
-        this.stop = stop;
-        this.trappedException = e;
-    }
-
-    @Override
-    public boolean isNil() { return false; }
-
-    @Override
-    public String getText()
-    {
-        String badText = null;
-        if (start instanceof Token) {
-            int i = ((Token)start).getTokenIndex();
-            int j = ((Token)stop).getTokenIndex();
-            if (((Token)stop).getType() == Token.EOF) {
-                j = ((TokenStream)input).size();
-            }
-            badText = ((TokenStream)input).toString(i, j);
-        } else if (start instanceof Tree) {
-            badText = ((TreeNodeStream)input).toString(start, stop);
-        } else {
-            // people should subclass if they alter the tree type so this
-            // next one is for sure correct.
-            badText = "<unknown>";
-        }
-        return badText;
-    }
-
-    @Override
-    public void setText(String value) { }
-
-    @Override
-    public int getType() { return Token.INVALID_TOKEN_TYPE; }
-
-    @Override
-    public void setType(int value) { }
-
-    @Override
-    public String toString()
-    {
-        if (trappedException instanceof MissingTokenException)
-        {
-            return "<missing type: " +
-                   ( (MissingTokenException)trappedException ).getMissingType() +
-                   ">";
-        } else if (trappedException instanceof UnwantedTokenException) {
-            return "<extraneous: " +
-                   ( (UnwantedTokenException)trappedException ).getUnexpectedToken() +
-                   ", resync=" + getText() + ">";
-        } else if (trappedException instanceof MismatchedTokenException) {
-            return "<mismatched token: " + trappedException.token + ", resync=" + getText() + ">";
-        } else if (trappedException instanceof NoViableAltException) {
-            return "<unexpected: " + trappedException.token +
-                   ", resync=" + getText() + ">";
-        }
-        return "<error: " + getText() + ">";
-    }
-}
-
-static class grammar_Adaptor extends CommonTreeAdaptor {
-    ANTLRParser _outer;
-
-    public grammar_Adaptor(ANTLRParser outer) {
-        _outer = outer;
-    }
-
-    @Override
-    public Object create(Token payload) {
-        GrammarAST t = new GrammarAST( payload );
-        if (_outer != null)
-            t.enclosingRuleName = _outer.currentRuleName;
-        return t;
-    }
-
-    @Override
-    public Object errorNode(TokenStream input, Token start, Token stop, RecognitionException e) {
-        GrammarAST t = new GrammarASTErrorNode(input, start, stop, e);
-        if (_outer != null)
-            t.enclosingRuleName = _outer.currentRuleName;
-        return t;
-    }
-}
-
-private Grammar grammar;
-private int grammarType;
-private String fileName;
-
-public Grammar getGrammar() {
-    return grammar;
-}
-
-public void setGrammar(Grammar value) {
-    grammar = value;
-}
-
-public int getGrammarType() {
-    return grammarType;
-}
-
-public void setGrammarType(int value) {
-    grammarType = value;
-}
-
-public String getFileName() {
-    return fileName;
-}
-
-public void setFileName(String value) {
-    fileName = value;
-}
-
-private final int LA(int i) { return input.LA( i ); }
-
-private final Token LT(int k) { return input.LT( k ); }
-
-/*partial void createTreeAdaptor(ref ITreeAdaptor adaptor)
-{
-    adaptor = new grammar_Adaptor(this);
-}*/
-
-protected GrammarAST setToBlockWithSet(GrammarAST b) {
-    /*
-     * alt = ^(ALT["ALT"] {b} EOA["EOA"])
-     * prefixWithSynpred( alt )
-     * return ^(BLOCK["BLOCK"] {alt} EOB["<end-of-block>"])
-     */
-    GrammarAST alt = (GrammarAST)adaptor.create(ALT, "ALT");
-    adaptor.addChild(alt, b);
-    adaptor.addChild(alt, adaptor.create(EOA, "<end-of-alt>"));
-
-    prefixWithSynPred(alt);
-
-    GrammarAST block = (GrammarAST)adaptor.create(BLOCK, b.getToken(), "BLOCK");
-    adaptor.addChild(block, alt);
-    adaptor.addChild(alt, adaptor.create(EOB, "<end-of-block>"));
-
-    return block;
-}
-
-/** Create a copy of the alt and make it into a BLOCK; all actions,
- *  labels, tree operators, rewrites are removed.
- */
-protected GrammarAST createBlockFromDupAlt(GrammarAST alt) {
-    /*
-     * ^(BLOCK["BLOCK"] {GrammarAST.dupTreeNoActions(alt)} EOB["<end-of-block>"])
-     */
-    GrammarAST nalt = GrammarAST.dupTreeNoActions(alt, null);
-
-    GrammarAST block = (GrammarAST)adaptor.create(BLOCK, alt.getToken(), "BLOCK");
-    adaptor.addChild( block, nalt );
-    adaptor.addChild( block, adaptor.create( EOB, "<end-of-block>" ) );
-
-    return block;
-}
-
-/** Rewrite alt to have a synpred as first element;
- *  (xxx)=>xxx
- *  but only if they didn't specify one manually.
- */
-protected void prefixWithSynPred( GrammarAST alt ) {
-    // if they want backtracking and it's not a lexer rule in combined grammar
-    String autoBacktrack = (String)grammar.getBlockOption( currentBlockAST, "backtrack" );
-    if ( autoBacktrack == null )
-    {
-        autoBacktrack = (String)grammar.getOption( "backtrack" );
-    }
-    if ( autoBacktrack != null && autoBacktrack.equals( "true" ) &&
-         !( grammarType == Grammar.COMBINED &&
-         Rule.getRuleType(currentRuleName) == Grammar.LEXER) &&
-         alt.getChild( 0 ).getType() != SYN_SEMPRED )
-    {
-        // duplicate alt and make a synpred block around that dup'd alt
-        GrammarAST synpredBlockAST = createBlockFromDupAlt( alt );
-
-        // Create a BACKTRACK_SEMPRED node as if user had typed this in
-        // Effectively we replace (xxx)=>xxx with {synpredxxx}? xxx
-        GrammarAST synpredAST = createSynSemPredFromBlock( synpredBlockAST,
-                                                          BACKTRACK_SEMPRED );
-
-        // insert BACKTRACK_SEMPRED as first element of alt
-        //synpredAST.getLastSibling().setNextSibling( alt.getFirstChild() );
-        //synpredAST.addChild( alt.getFirstChild() );
-        //alt.setFirstChild( synpredAST );
-        GrammarAST[] children = alt.getChildrenAsArray();
-        adaptor.setChild( alt, 0, synpredAST );
-        for ( int i = 0; i < children.length; i++ )
-        {
-            if ( i < children.length - 1 )
-                adaptor.setChild( alt, i + 1, children[i] );
-            else
-                adaptor.addChild( alt, children[i] );
-        }
-    }
-}
-
-protected GrammarAST createSynSemPredFromBlock( GrammarAST synpredBlockAST, int synpredTokenType ) {
-    // add grammar fragment to a list so we can make fake rules for them later.
-    String predName = grammar.defineSyntacticPredicate( synpredBlockAST, currentRuleName );
-    // convert (alpha)=> into {synpredN}? where N is some pred count
-    // during code gen we convert to function call with templates
-    String synpredinvoke = predName;
-    GrammarAST p = (GrammarAST)adaptor.create( synpredTokenType, synpredinvoke );
-    // track how many decisions have synpreds
-    grammar.blocksWithSynPreds.add( currentBlockAST );
-    return p;
-}
-
-public static GrammarAST createSimpleRuleAST( String name, GrammarAST block, boolean fragment ) {
-    TreeAdaptor adaptor = new grammar_Adaptor(null);
-
-    GrammarAST modifier = null;
-    if ( fragment )
-    {
-        modifier = (GrammarAST)adaptor.create( FRAGMENT, "fragment" );
-    }
-
-    /*
-     * EOBAST = block.getLastChild()
-     * ^(RULE[block,"rule"] ID["name"] {modifier} ARG["ARG"] RET["RET"] SCOPE["scope"] {block} EOR[EOBAST,"<end-of-rule>"])
-     */
-    GrammarAST rule = (GrammarAST)adaptor.create( RULE, block.getToken(), "rule" );
-
-    adaptor.addChild( rule, adaptor.create( ID, name ) );
-    if ( modifier != null )
-        adaptor.addChild( rule, modifier );
-    adaptor.addChild( rule, adaptor.create( ARG, "ARG" ) );
-    adaptor.addChild( rule, adaptor.create( RET, "RET" ) );
-    adaptor.addChild( rule, adaptor.create( SCOPE, "scope" ) );
-    adaptor.addChild( rule, block );
-    adaptor.addChild( rule, adaptor.create( EOR, block.getLastChild().getToken(), "<end-of-rule>" ) );
-
-    return rule;
-}
-
-@Override
-public void reportError(RecognitionException ex)
-{
-    //Token token = null;
-    //try
-    //{
-    //    token = LT( 1 );
-    //}
-    //catch ( TokenStreamException tse )
-    //{
-    //    ErrorManager.internalError( "can't get token???", tse );
-    //}
-    Token token = ex.token;
-    ErrorManager.syntaxError(
-        ErrorManager.MSG_SYNTAX_ERROR,
-        grammar,
-        token,
-        "antlr: " + ex.toString(),
-        ex );
-}
-
-public void cleanup( GrammarAST root )
-{
-    if ( grammarType == Grammar.LEXER )
-    {
-        String filter = (String)grammar.getOption( "filter" );
-        GrammarAST tokensRuleAST =
-            grammar.addArtificialMatchTokensRule(
-                root,
-                grammar.lexerRuleNamesInCombined,
-                grammar.getDelegateNames(),
-                filter != null && filter.equals( "true" ) );
-    }
-}
-}
-
-public
-grammar_![Grammar g]
-@init
-{
-	this.grammar = g;
-	Map<String, Object> opts;
-}
-@after
-{
-	cleanup( $tree );
-}
-	:	//hdr:headerSpec
-		( ACTION )?
-		( cmt=DOC_COMMENT  )?
-		gr=grammarType gid=id {grammar.setName($gid.text);} SEMI
-		(	optionsSpec {opts = $optionsSpec.opts; grammar.setOptions(opts, $optionsSpec.start);}
-		)?
-		(ig=delegateGrammars)?
-		(ts=tokensSpec)?
-		scopes=attrScopes
-		(a=actions)?
-		r=rules
-		EOF
-		-> ^($gr $gid $cmt? optionsSpec? $ig? $ts? $scopes? $a? $r)
-	;
-
-grammarType
-	:	(	'lexer'  gr='grammar' {grammarType=Grammar.LEXER; grammar.type = Grammar.LEXER;}       // pure lexer
-			-> LEXER_GRAMMAR[$gr]
-		|	'parser' gr='grammar' {grammarType=Grammar.PARSER; grammar.type = Grammar.PARSER;}     // pure parser
-			-> PARSER_GRAMMAR[$gr]
-		|	'tree'   gr='grammar' {grammarType=Grammar.TREE_PARSER; grammar.type = Grammar.TREE_PARSER;}  // a tree parser
-			-> TREE_GRAMMAR[$gr]
-		|			 gr='grammar' {grammarType=Grammar.COMBINED; grammar.type = Grammar.COMBINED;} // merged parser/lexer
-			-> COMBINED_GRAMMAR[$gr]
-		)
-	;
-
-actions
-	:	(action)+
-	;
-
-/** Match stuff like @parser::members {int i;} */
-action
-	:	AMPERSAND^ (actionScopeName COLON! COLON!)? id ACTION
-	;
-
-/** Sometimes the scope names will collide with keywords; allow them as
- *  ids for action scopes.
- */
-actionScopeName
-	:	id
-	|	l='lexer'
-		-> ID[$l]
-	|	p='parser'
-		-> ID[$p]
-	;
-
-optionsSpec returns [Map<String, Object> opts=new HashMap<String, Object>()]
-	:	OPTIONS^ (option[$opts] SEMI!)+ RCURLY!
-	;
-
-option[Map<String, Object> opts]
-	:	id ASSIGN^ optionValue
-		{
-			$opts.put($id.text, $optionValue.value);
-		}
-	;
-
-optionValue returns [Object value = null]
-	:	x=id			 {$value = $x.text;}
-	|	s=STRING_LITERAL {String vs = $s.text;
-						  // remove the quotes:
-						  $value=vs.substring(1,vs.length()-1);}
-	|	c=CHAR_LITERAL   {String vs = $c.text;
-						  // remove the quotes:
-						  $value=vs.substring(1,vs.length()-1);}
-	|	i=INT            {$value = Integer.parseInt($i.text);}
-	|	ss=STAR			 {$value = "*";} // used for k=*
-		-> STRING_LITERAL[$ss]
-//	|	cs:charSet       {value = #cs;} // return set AST in this case
-	;
-
-delegateGrammars
-	:	'import'^ delegateGrammar (COMMA! delegateGrammar)* SEMI!
-	;
-
-delegateGrammar
-	:	lab=id ASSIGN^ g=id {grammar.importGrammar($g.tree, $lab.text);}
-	|	g2=id               {grammar.importGrammar($g2.tree,null);}
-	;
-
-tokensSpec
-	:	TOKENS^
-			tokenSpec*
-		RCURLY!
-	;
-
-tokenSpec
-	:	TOKEN_REF ( ASSIGN^ (STRING_LITERAL|CHAR_LITERAL) )? SEMI!
-	;
-
-attrScopes
-	:	(attrScope)*
-	;
-
-attrScope
-	:	'scope'^ id ruleActions? ACTION
-	;
-
-rules
-	:	(	rule
-		)+
-	;
-
-public
-rule
-@init
-{
-	GrammarAST eob=null;
-	CommonToken start = (CommonToken)LT(1);
-	int startLine = LT(1).getLine();
-}
-	:
-	(	(	d=DOC_COMMENT
-		)?
-		(	p1='protected'	//{modifier=$p1.tree;}
-		|	p2='public'		//{modifier=$p2.tree;}
-		|	p3='private'	//{modifier=$p3.tree;}
-		|	p4='fragment'	//{modifier=$p4.tree;}
-		)?
-		ruleName=id
-		{
-			currentRuleName=$ruleName.text;
-			if ( grammarType==Grammar.LEXER && $p4==null )
-				grammar.lexerRuleNamesInCombined.add(currentRuleName);
-		}
-		( BANG )?
-		( aa=ARG_ACTION )?
-		( 'returns' rt=ARG_ACTION  )?
-		( throwsSpec )?
-		( optionsSpec )?
-		scopes=ruleScopeSpec
-		(ruleActions)?
-		COLON
-		ruleAltList[$optionsSpec.opts]
-		SEMI
-		( ex=exceptionGroup )?
-		->	^(	RULE[$ruleName.start, "rule"]
-				$ruleName
-				// the modifier will be 0 or one of the modifiers:
-				$p1? $p2? $p3? $p4?
-				^(ARG["ARG"] $aa?)
-				^(RET["RET"] $rt?)
-				throwsSpec?
-				optionsSpec?
-				$scopes
-				ruleActions?
-				ruleAltList
-				$ex?
-				EOR[$SEMI,"<end-of-rule>"])
-	)
-	{
-		$tree.setTreeEnclosingRuleNameDeeply(currentRuleName);
-		((GrammarAST)$tree.getChild(0)).setBlockOptions($optionsSpec.opts);
-	}
-	;
-
-ruleActions
-	:	(ruleAction)+
-	;
-
-/** Match stuff like @init {int i;} */
-ruleAction
-	:	AMPERSAND^ id ACTION
-	;
-
-throwsSpec
-	:	'throws'^ id ( COMMA! id )*
-	;
-
-ruleScopeSpec
-	:	( 'scope' ruleActions? ACTION )?
-		( 'scope' idList SEMI )*
-		-> ^(SCOPE[$start,"scope"] ruleActions? ACTION? idList*)
-	;
-
-ruleAltList[Map<String, Object> opts]
-@init
-{
-	GrammarAST blkRoot = null;
-	GrammarAST save = currentBlockAST;
-}
-	:	( -> BLOCK[input.LT(-1),"BLOCK"] )
-		{
-			blkRoot = (GrammarAST)$tree.getChild(0);
-			blkRoot.setBlockOptions($opts);
-			currentBlockAST = blkRoot;
-		}
-		(	a1=alternative r1=rewrite
-			{if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred($a1.tree);}
-			-> $a1 $r1?
-		)
-		(	(	OR a2=alternative r2=rewrite
-				{if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred($a2.tree);}
-				-> $ruleAltList $a2 $r2?
-			)+
-		|
-		)
-		-> ^({blkRoot} $ruleAltList EOB["<end-of-block>"])
-	;
-finally { currentBlockAST = save; }
-
-/** Build #(BLOCK ( #(ALT ...) EOB )+ ) */
-block
-@init
-{
-	GrammarAST save = currentBlockAST;
-}
-	:	(	lp=LPAREN
-			-> BLOCK[$lp,"BLOCK"]
-		)
-		{currentBlockAST = (GrammarAST)$tree.getChild(0);}
-		(
-			// 2nd alt and optional branch ambig due to
-			// linear approx LL(2) issue.  COLON ACTION
-			// matched correctly in 2nd alt.
-			(optionsSpec {((GrammarAST)$tree.getChild(0)).setOptions(grammar,$optionsSpec.opts);})?
-			( ruleActions )?
-			COLON
-		|	ACTION COLON
-		)?
-
-		a=alternative r=rewrite
-		{
-			stream_alternative.add( $r.tree );
-			if ( LA(1)==OR || (LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR) )
-				prefixWithSynPred($a.tree);
-		}
-		(	OR a=alternative r=rewrite
-			{
-				stream_alternative.add( $r.tree );
-				if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR))
-					prefixWithSynPred($a.tree);
-			}
-		)*
-
-		rp=RPAREN
-		-> ^($block optionsSpec? ruleActions? ACTION? alternative+ EOB[$rp,"<end-of-block>"])
-	;
-finally { currentBlockAST = save; }
-
-// ALT and EOA have indexes tracking start/stop of entire alt
-alternative
-	:	element+
-		-> ^(ALT[$start,"ALT"] element+ EOA[input.LT(-1),"<end-of-alt>"])
-	|	// epsilon alt
-		-> ^(ALT[$start,"ALT"] EPSILON[input.LT(-1),"epsilon"] EOA[input.LT(-1),"<end-of-alt>"])
-	;
-
-exceptionGroup
-	:	exceptionHandler+ finallyClause?
-	|	finallyClause
-	;
-
-exceptionHandler
-	:	'catch'^ ARG_ACTION ACTION
-	;
-
-finallyClause
-	:	'finally'^ ACTION
-	;
-
-element
-	:	elementNoOptionSpec
-	;
-
-elementNoOptionSpec
-@init
-{
-	IntSet elements=null;
-}
-	:	(	(	id (ASSIGN^|PLUS_ASSIGN^) (atom|block)
-			)
-			(	sub=ebnfSuffix[root_0,false]! {root_0 = $sub.tree;}
-			)?
-		|	a=atom
-			(	sub2=ebnfSuffix[$a.tree,false]! {root_0=$sub2.tree;}
-			)?
-		|	ebnf
-		|	FORCED_ACTION
-		|	ACTION
-		|	p=SEMPRED ( IMPLIES! {$p.setType(GATED_SEMPRED);} )?
-			{
-			grammar.blocksWithSemPreds.add(currentBlockAST);
-			}
-		|	t3=tree_
-		)
-	;
-
-atom
-	:	range (ROOT^|BANG^)?
-	|	(
-			// grammar.rule but ensure no spaces. "A . B" is not a qualified ref
-			// We do here rather than lexer so we can build a tree
-			({LT(1).getCharPositionInLine()+LT(1).getText().length()==LT(2).getCharPositionInLine()&&
-			 LT(2).getCharPositionInLine()+1==LT(3).getCharPositionInLine()}? id WILDCARD (terminal|ruleref)) =>
-			id w=WILDCARD^ (terminal|ruleref) {$w.setType(DOT);}
-		|	terminal
-		|	ruleref
-		)
-	|	notSet (ROOT^|BANG^)?
-	;
-
-ruleref
-	:	RULE_REF^ ARG_ACTION? (ROOT^|BANG^)?
-	;
-
-notSet
-	:	NOT^
-		(	notTerminal
-		|	block
-		)
-	;
-
-treeRoot
-@init{atTreeRoot=true;}
-@after{atTreeRoot=false;}
-	:	id (ASSIGN^|PLUS_ASSIGN^) (atom|block)
-	|	atom
-	|	block
-	;
-
-tree_
-	:	TREE_BEGIN^
-		treeRoot element+
-		RPAREN!
-	;
-
-/** matches ENBF blocks (and sets via block rule) */
-ebnf
-	:	block
-		(	QUESTION
-			-> ^(OPTIONAL[$start,"?"] block)
-		|	STAR
-			-> ^(CLOSURE[$start,"*"] block)
-		|	PLUS
-			-> ^(POSITIVE_CLOSURE[$start,"+"] block)
-		|	IMPLIES // syntactic predicate
-			// ignore for lexer rules in combined
-			-> {grammarType == Grammar.COMBINED && Rule.getRuleType(currentRuleName) == Grammar.LEXER}? ^(SYNPRED[$start,"=>"] block)
-			// create manually specified (...)=> predicate; convert to sempred
-			-> {createSynSemPredFromBlock($block.tree, SYN_SEMPRED)}
-		|	ROOT
-			-> ^(ROOT block)
-		|	BANG
-			-> ^(BANG block)
-		|
-			-> block
-		)
-	;
-
-range
-	:	c1=CHAR_LITERAL RANGE c2=CHAR_LITERAL
-		-> ^(CHAR_RANGE[$c1,".."] $c1 $c2)
-	;
-
-terminal
-	:	cl=CHAR_LITERAL^ ( elementOptions[$cl.tree]! )? (ROOT^|BANG^)?
-
-	|	tr=TOKEN_REF^
-		( elementOptions[$tr.tree]! )?
-		( ARG_ACTION )? // Args are only valid for lexer rules
-		(ROOT^|BANG^)?
-
-	|	sl=STRING_LITERAL^ ( elementOptions[$sl.tree]! )? (ROOT^|BANG^)?
-
-	|	wi=WILDCARD (ROOT^|BANG^)?
-		{
-			if ( atTreeRoot )
-			{
-				ErrorManager.syntaxError(
-					ErrorManager.MSG_WILDCARD_AS_ROOT,grammar,$wi,null,null);
-			}
-		}
-	;
-
-elementOptions[GrammarAST terminalAST]
-	:	OPEN_ELEMENT_OPTION^ defaultNodeOption[terminalAST] CLOSE_ELEMENT_OPTION!
-	|	OPEN_ELEMENT_OPTION^ elementOption[terminalAST] (SEMI! elementOption[terminalAST])* CLOSE_ELEMENT_OPTION!
-	;
-
-defaultNodeOption[GrammarAST terminalAST]
-	:	elementOptionId
-		{terminalAST.setTerminalOption(grammar,Grammar.defaultTokenOption,$elementOptionId.qid);}
-	;
-
-elementOption[GrammarAST terminalAST]
-	:	id ASSIGN^
-		(	elementOptionId
-			{terminalAST.setTerminalOption(grammar,$id.text,$elementOptionId.qid);}
-		|	(t=STRING_LITERAL|t=DOUBLE_QUOTE_STRING_LITERAL|t=DOUBLE_ANGLE_STRING_LITERAL)
-			{terminalAST.setTerminalOption(grammar,$id.text,$t.text);}
-		)
-	;
-
-elementOptionId returns [String qid]
-@init{StringBuffer buf = new StringBuffer();}
-	:	i=id {buf.append($i.text);} ('.' i=id {buf.append("." + $i.text);})*
-		{$qid = buf.toString();}
-	;
-
-ebnfSuffix[GrammarAST elemAST, boolean inRewrite]
-@init
-{
-GrammarAST blkRoot=null;
-GrammarAST alt=null;
-GrammarAST save = currentBlockAST;
-}
-@after
-{
-currentBlockAST = save;
-}
-	:	(	-> BLOCK[$elemAST.getToken(), "BLOCK"]
-		)
-		{ blkRoot = (GrammarAST)$tree.getChild(0); currentBlockAST = blkRoot; }
-		(	// create alt
-			-> ^(ALT[$elemAST.getToken(), "ALT"] {$elemAST} EOA["<end-of-alt>"])
-		)
-		{
-			alt = (GrammarAST)$tree.getChild(0);
-			if ( !inRewrite )
-				prefixWithSynPred(alt);
-		}
-		(	QUESTION
-			-> OPTIONAL[$elemAST.getToken(),"?"]
-		|	STAR
-			-> CLOSURE[$elemAST.getToken(),"*"]
-		|	PLUS
-			-> POSITIVE_CLOSURE[$elemAST.getToken(),"+"]
-		)
-		-> ^($ebnfSuffix ^({blkRoot} {alt} EOB[$elemAST.getToken(), "<end-of-block>"]))
-	;
-
-notTerminal
-	:	CHAR_LITERAL
-	|	TOKEN_REF
-	|	STRING_LITERAL
-	;
-
-idList
-	:	id (COMMA! id)*
-	;
-
-id
-	:	TOKEN_REF
-		-> ID[$TOKEN_REF]
-	|	RULE_REF
-		-> ID[$RULE_REF]
-	;
-
-// R E W R I T E  S Y N T A X
-
-rewrite
-	:	rewrite_with_sempred*
-		REWRITE rewrite_alternative
-		-> ^(REWRITES rewrite_with_sempred* ^(REWRITE rewrite_alternative))
-	|
-	;
-
-rewrite_with_sempred
-	:	REWRITE^ SEMPRED rewrite_alternative
-	;
-
-rewrite_block
-	:	LPAREN
-		rewrite_alternative
-		RPAREN
-		-> ^(BLOCK[$LPAREN,"BLOCK"] rewrite_alternative EOB[$RPAREN,"<end-of-block>"])
-	;
-
-rewrite_alternative
-options{k=1;}
-	:	{grammar.buildTemplate()}? => rewrite_template
-
-	|	{grammar.buildAST()}? => ( rewrite_element )+
-		-> {!stream_rewrite_element.hasNext()}? ^(ALT[LT(1),"ALT"] EPSILON["epsilon"] EOA["<end-of-alt>"])
-		-> ^(ALT[LT(1),"ALT"] rewrite_element+ EOA["<end-of-alt>"])
-
-	|
-		-> ^(ALT[LT(1),"ALT"] EPSILON["epsilon"] EOA["<end-of-alt>"])
-	|	{grammar.buildAST()}? ETC
-	;
-
-rewrite_element
-	:	(	t=rewrite_atom
-			-> $t
-		)
-		(	subrule=ebnfSuffix[$t.tree,true]
-			-> $subrule
-		)?
-	|	rewrite_ebnf
-	|	(	tr=rewrite_tree
-			-> $tr
-		)
-		(	subrule=ebnfSuffix[$tr.tree,true]
-			-> $subrule
-		)?
-	;
-
-rewrite_atom
-	:	tr=TOKEN_REF^ elementOptions[$tr.tree]!? ARG_ACTION? // for imaginary nodes
-	|	RULE_REF
-	|	cl=CHAR_LITERAL elementOptions[$cl.tree]!?
-	|	sl=STRING_LITERAL elementOptions[$sl.tree]!?
-	|	DOLLAR! label // reference to a label in a rewrite rule
-	|	ACTION
-	;
-
-label
-	:	TOKEN_REF -> LABEL[$TOKEN_REF]
-	|	RULE_REF -> LABEL[$RULE_REF]
-	;
-
-rewrite_ebnf
-	:	b=rewrite_block
-		(	QUESTION
-			-> ^(OPTIONAL[$b.start,"?"] $b)
-		|	STAR
-			-> ^(CLOSURE[$b.start,"*"] $b)
-		|	PLUS
-			-> ^(POSITIVE_CLOSURE[$b.start,"+"] $b)
-		)
-	;
-
-rewrite_tree
-	:	TREE_BEGIN^
-			rewrite_atom rewrite_element*
-		RPAREN!
-	;
-
-/** Build a tree for a template rewrite:
-	  ^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
-	where ARGLIST is always there even if no args exist.
-	ID can be "template" keyword.  If first child is ACTION then it's
-	an indirect template ref
-
-	-> foo(a={...}, b={...})
-	-> ({string-e})(a={...}, b={...})  // e evaluates to template name
-	-> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
-	-> {st-expr} // st-expr evaluates to ST
- */
-public
-rewrite_template
-options{k=1;}
-	:	// -> template(a={...},...) "..."
-		{LT(1).getText().equals("template")}? => // inline
-		(	rewrite_template_head
-			-> rewrite_template_head
-		)
-		( st=DOUBLE_QUOTE_STRING_LITERAL | st=DOUBLE_ANGLE_STRING_LITERAL )
-		{ adaptor.addChild( $tree.getChild(0), adaptor.create($st) ); }
-
-	|	// -> foo(a={...}, ...)
-		rewrite_template_head
-
-	|	// -> ({expr})(a={...}, ...)
-		rewrite_indirect_template_head
-
-	|	// -> {...}
-		ACTION
-	;
-
-/** -> foo(a={...}, ...) */
-rewrite_template_head
-	:	id lp=LPAREN
-		rewrite_template_args
-		RPAREN
-		-> ^(TEMPLATE[$lp,"TEMPLATE"] id rewrite_template_args)
-	;
-
-/** -> ({expr})(a={...}, ...) */
-rewrite_indirect_template_head
-	:	lp=LPAREN
-		ACTION
-		RPAREN
-		LPAREN rewrite_template_args RPAREN
-		-> ^(TEMPLATE[$lp,"TEMPLATE"] ACTION rewrite_template_args)
-	;
-
-rewrite_template_args
-	:	rewrite_template_arg (COMMA rewrite_template_arg)*
-		-> ^(ARGLIST["ARGLIST"] rewrite_template_arg+)
-	|
-		-> ARGLIST["ARGLIST"]
-	;
-
-rewrite_template_arg
-	:	id a=ASSIGN ACTION
-		-> ^(ARG[$a,"ARG"] id ACTION)
-	;
-
-//////////////////////////////////////////////////////////////////////////////
-//////////////////////////////////////////////////////////////////////////////
-//////////////////////////////////////////////////////////////////////////////
-// L E X E R
-
-// get rid of warnings:
-fragment STRING_LITERAL : ;
-fragment FORCED_ACTION : ;
-fragment DOC_COMMENT : ;
-fragment SEMPRED : ;
-
-WS
-	:	(	' '
-		|	'\t'
-		|	('\r')? '\n'
-		)
-		{ $channel = HIDDEN; }
-	;
-
-COMMENT
-@init{List<Integer> type = new ArrayList<Integer>() {{ add(0); }};}
-	:	( SL_COMMENT | ML_COMMENT[type] {$type = type.get(0);} )
-		{
-			if ( $type != DOC_COMMENT )
-				$channel = HIDDEN;
-		}
-	;
-
-fragment
-SL_COMMENT
-	:	'//'
-		(	(' $ANTLR') => ' $ANTLR ' SRC (('\r')? '\n')? // src directive
-		|	~('\r'|'\n')* (('\r')? '\n')?
-		)
-	;
-
-fragment
-ML_COMMENT[List<Integer> type]
-	:	'/*'
-		{$type.set(0, (input.LA(1) == '*' && input.LA(2) != '/') ? DOC_COMMENT : ML_COMMENT);}
-		.*
-		'*/'
-	;
-
-OPEN_ELEMENT_OPTION
-	:	'<'
-	;
-
-CLOSE_ELEMENT_OPTION
-	:	'>'
-	;
-
-AMPERSAND : '@';
-
-COMMA : ',';
-
-QUESTION :	'?' ;
-
-TREE_BEGIN : '^(' ;
-
-LPAREN:	'(' ;
-
-RPAREN:	')' ;
-
-COLON :	':' ;
-
-STAR:	'*' ;
-
-PLUS:	'+' ;
-
-ASSIGN : '=' ;
-
-PLUS_ASSIGN : '+=' ;
-
-IMPLIES : '=>' ;
-
-REWRITE : '->' ;
-
-SEMI:	';' ;
-
-ROOT : '^' {hasASTOperator=true;} ;
-
-BANG : '!' {hasASTOperator=true;} ;
-
-OR	:	'|' ;
-
-WILDCARD : '.' ;
-
-ETC : '...' ;
-
-RANGE : '..' ;
-
-NOT :	'~' ;
-
-RCURLY:	'}'	;
-
-DOLLAR : '$' ;
-
-STRAY_BRACKET
-	:	']'
-		{
-			ErrorManager.syntaxError(
-				ErrorManager.MSG_SYNTAX_ERROR,
-				null,
-				state.token,
-				"antlr: dangling ']'? make sure to escape with \\]",
-				null);
-		}
-	;
-
-CHAR_LITERAL
-	:	'\''
-		(	ESC
-		|	~('\\'|'\'')
-		)*
-		'\''
-		{
-			StringBuffer s = Grammar.getUnescapedStringFromGrammarStringLiteral($text);
-			if ( s.length() > 1 )
-			{
-				$type = STRING_LITERAL;
-			}
-		}
-	;
-
-DOUBLE_QUOTE_STRING_LITERAL
-@init
-{
-	StringBuilder builder = new StringBuilder();
-}
-	:	'"'							{builder.append('"');}
-		(	('\\\"') => '\\' '"'	{builder.append('"');}
-		|	'\\' c=~'"'				{builder.append("\\" + (char)$c);}
-		|	c=~('\\'|'"')			{builder.append((char)$c);}
-		)*
-		'"'							{builder.append('"');}
-		{
-			setText(builder.toString());
-		}
-	;
-
-DOUBLE_ANGLE_STRING_LITERAL
-	:	'<<' .* '>>'
-	;
-
-fragment
-ESC
-	:	'\\'
-		(	// due to the way ESC is used, we don't need to handle the following character in different ways
-			/*'n'
-		|	'r'
-		|	't'
-		|	'b'
-		|	'f'
-		|	'"'
-		|	'\''
-		|	'\\'
-		|	'>'
-		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
-		|*/	. // unknown, leave as it is
-		)
-	;
-
-fragment
-DIGIT
-	:	'0'..'9'
-	;
-
-fragment
-XDIGIT
-	:	'0' .. '9'
-	|	'a' .. 'f'
-	|	'A' .. 'F'
-	;
-
-INT
-	:	('0'..'9')+
-	;
-
-ARG_ACTION
-@init {
-	List<String> text = new ArrayList<String>() {{ add(null); }};
-}
-	:	'['
-		NESTED_ARG_ACTION[text]
-		']'
-		{setText(text.get(0));}
-	;
-
-fragment
-NESTED_ARG_ACTION[List<String> text]
-@init {
-	$text.set(0, "");
-	StringBuilder builder = new StringBuilder();
-}
-	:	(	('\\]') => '\\' ']'		{builder.append("]");}
-		|	'\\' c=~(']')			{builder.append("\\" + (char)$c);}
-		|	ACTION_STRING_LITERAL	{builder.append($ACTION_STRING_LITERAL.text);}
-		|	ACTION_CHAR_LITERAL		{builder.append($ACTION_CHAR_LITERAL.text);}
-		|	c=~('\\'|'"'|'\''|']')	{builder.append((char)$c);}
-		)*
-		{
-			$text.set(0, builder.toString());
-		}
-	;
-
-ACTION
-@init
-{
-	int actionLine = getLine();
-	int actionColumn = getCharPositionInLine();
-}
-	:	NESTED_ACTION
-		('?' {$type = SEMPRED;})?
-		{
-			String action = $text;
-			int n = 1; // num delimiter chars
-			if ( action.startsWith("{{") && action.endsWith("}}") )
-			{
-				$type = FORCED_ACTION;
-				n = 2;
-			}
-			action = action.substring(n,action.length()-n - ($type==SEMPRED ? 1 : 0));
-			setText(action);
-		}
-	;
-
-fragment
-NESTED_ACTION
-	:	'{'
-		(	NESTED_ACTION
-		|	ACTION_CHAR_LITERAL
-		|	('//' | '/*') => COMMENT
-		|	ACTION_STRING_LITERAL
-		|	ACTION_ESC
-		|	~('{'|'\''|'"'|'\\'|'}')
-		)*
-		'}'
-	;
-
-fragment
-ACTION_CHAR_LITERAL
-	:	'\''
-		(	ACTION_ESC
-		|	~('\\'|'\'')
-		)*
-		'\''
-	;
-
-fragment
-ACTION_STRING_LITERAL
-	:	'"'
-		(	ACTION_ESC
-		|	~('\\'|'"')
-		)*
-		'"'
-	;
-
-fragment
-ACTION_ESC
-	:	'\\\''
-	|	'\\\"'
-	|	'\\' ~('\''|'"')
-	;
-
-TOKEN_REF
-	:	'A'..'Z'
-		(	'a'..'z'|'A'..'Z'|'_'|'0'..'9'
-		)*
-	;
-
-TOKENS
-	:	'tokens' WS_LOOP '{'
-	;
-
-OPTIONS
-	:	'options' WS_LOOP '{'
-	;
-
-// we get a warning here when looking for options '{', but it works right
-RULE_REF
-@init
-{
-	int t=0;
-}
-	:	'a'..'z' ('a'..'z' | 'A'..'Z' | '_' | '0'..'9')*
-	;
-
-fragment
-WS_LOOP
-	:	(	WS
-		|	COMMENT
-		)*
-	;
-
-fragment
-WS_OPT
-	:	(WS)?
-	;
-
-/** Reset the file and line information; useful when the grammar
- *  has been generated so that errors are shown relative to the
- *  original file like the old C preprocessor used to do.
- */
-fragment
-SRC
-	:	'src' ' ' file=ACTION_STRING_LITERAL ' ' line=INT
-		{
-			setFileName($file.text.substring(1,$file.text.length()-1));
-			input.setLine(Integer.parseInt($line.text) - 1);  // -1 because SL_COMMENT will increment the line no. KR
-		}
-	;
diff --git a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRTreePrinter.g b/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRTreePrinter.g
deleted file mode 100644
index 0fbbfa0..0000000
--- a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRTreePrinter.g
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2011 Terence Parr
- All rights reserved.
-
- Grammar conversion to ANTLR v3:
- Copyright (c) 2011 Sam Harwell
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-	notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-	notice, this list of conditions and the following disclaimer in the
-	documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-	derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Print out a grammar (no pretty printing).
- *
- *  Terence Parr
- *  University of San Francisco
- *  August 19, 2003
- */
-tree grammar ANTLRTreePrinter;
-
-options
-{
-	tokenVocab = ANTLR;
-	ASTLabelType = GrammarAST;
-}
-
-@header {
-package org.antlr.grammar.v3;
-import org.antlr.tool.*;
-import java.util.StringTokenizer;
-}
-
-@members {
-protected Grammar grammar;
-protected boolean showActions;
-protected StringBuilder buf = new StringBuilder(300);
-
-private ANTLRTreePrinter.block_return block(GrammarAST t, boolean forceParens) throws RecognitionException {
-    ANTLRTreePrinter other = new ANTLRTreePrinter(new CommonTreeNodeStream(t));
-    other.buf = buf;
-    return other.block(forceParens);
-}
-
-public final int countAltsForBlock(GrammarAST t) {
-    int n = 0;
-    for ( int i = 0; i < t.getChildCount(); i++ )
-    {
-        if ( t.getChild(i).getType() == ALT )
-            n++;
-    }
-
-    return n;
-}
-
-public void out(String s) {
-    buf.append(s);
-}
-
-@Override
-public void reportError(RecognitionException ex) {
-    Token token = null;
-    if (ex instanceof MismatchedTokenException) {
-        token = ((MismatchedTokenException)ex).token;
-    } else if (ex instanceof NoViableAltException) {
-        token = ((NoViableAltException)ex).token;
-    }
-
-    ErrorManager.syntaxError(
-        ErrorManager.MSG_SYNTAX_ERROR,
-        grammar,
-        token,
-        "antlr.print: " + ex.toString(),
-        ex );
-}
-
-/** Normalize a grammar print out by removing all double spaces
- *  and trailing/beginning stuff.  FOr example, convert
- *
- *  ( A  |  B  |  C )*
- *
- *  to
- *
- *  ( A | B | C )*
- */
-public static String normalize(String g) {
-    StringTokenizer st = new StringTokenizer(g, " ", false);
-    StringBuffer buf = new StringBuffer();
-    while ( st.hasMoreTokens() ) {
-        String w = st.nextToken();
-        buf.append(w);
-        buf.append(" ");
-    }
-    return buf.toString().trim();
-}
-}
-
-/** Call this to figure out how to print */
-public
-toString[Grammar g, boolean showActions] returns [String s=null]
-@init {
-	grammar = g;
-	this.showActions = showActions;
-}
-	:	(	grammar_
-		|	rule
-		|	alternative
-		|	element
-		|	single_rewrite
-		|	rewrite
-		|	EOR //{s="EOR";}
-		)
-		{return normalize(buf.toString());}
-	;
-
-// --------------
-
-grammar_
-	:	^( LEXER_GRAMMAR grammarSpec["lexer " ] )
-	|	^( PARSER_GRAMMAR grammarSpec["parser "] )
-	|	^( TREE_GRAMMAR grammarSpec["tree "] )
-	|	^( COMBINED_GRAMMAR grammarSpec[""] )
-	;
-
-attrScope
-	:	^( 'scope' ID ruleAction* ACTION )
-	;
-
-grammarSpec[String gtype]
-	:	id=ID {out(gtype+"grammar "+$id.text);}
-		(cmt=DOC_COMMENT {out($cmt.text+"\n");} )?
-		(optionsSpec)? {out(";\n");}
-		(delegateGrammars)?
-		(tokensSpec)?
-		(attrScope)*
-		(actions)?
-		rules
-	;
-
-actions
-	:	( action )+
-	;
-
-action
-@init {
-	String scope=null, name=null;
-	String action=null;
-}
-	:	^(	AMPERSAND id1=ID
-			(	id2=ID a1=ACTION
-				{scope=$id1.text; name=$a1.text; action=$a1.text;}
-			|	a2=ACTION
-				{scope=null; name=$id1.text; action=$a2.text;}
-			)
-		)
-		{
-			if ( showActions )
-			{
-				out("@"+(scope!=null?scope+"::":"")+name+action);
-			}
-		}
-	;
-
-optionsSpec
-	:	^(	OPTIONS {out(" options {");}
-			(option {out("; ");})+
-			{out("} ");}
-		)
-	;
-
-option
-	:	^( ASSIGN id=ID {out($id.text+"=");} optionValue )
-	;
-
-optionValue
-	:	id=ID            {out($id.text);}
-	|	s=STRING_LITERAL {out($s.text);}
-	|	c=CHAR_LITERAL   {out($c.text);}
-	|	i=INT            {out($i.text);}
-//	|   charSet
-	;
-
-/*
-charSet
-	:   #( CHARSET charSetElement )
-	;
-
-charSetElement
-	:   c:CHAR_LITERAL {out(#c.getText());}
-	|   #( OR c1:CHAR_LITERAL c2:CHAR_LITERAL )
-	|   #( RANGE c3:CHAR_LITERAL c4:CHAR_LITERAL )
-	;
-*/
-
-delegateGrammars
-	:	^( 'import' ( ^(ASSIGN ID ID) | ID )+ )
-	;
-
-tokensSpec
-	:	^(TOKENS tokenSpec*)
-	;
-
-tokenSpec
-	:	TOKEN_REF
-	|	^( ASSIGN TOKEN_REF (STRING_LITERAL|CHAR_LITERAL) )
-	;
-
-rules
-	:	( rule | precRule )+
-	;
-
-rule
-	:	^(	RULE id=ID
-			(modifier)?
-			{out($id.text);}
-			^(ARG (arg=ARG_ACTION {out("["+$arg.text+"]");} )? )
-			^(RET (ret=ARG_ACTION {out(" returns ["+$ret.text+"]");} )? )
-			(throwsSpec)?
-			(optionsSpec)?
-			(ruleScopeSpec)?
-			(ruleAction)*
-			{out(" :");}
-			{
-				if ( input.LA(5) == NOT || input.LA(5) == ASSIGN )
-					out(" ");
-			}
-			b=block[false]
-			(exceptionGroup)?
-			EOR {out(";\n");}
-		)
-	;
-
-precRule
-	:	^(	PREC_RULE id=ID
-			(modifier)?
-			{out($id.text);}
-			^(ARG (arg=ARG_ACTION {out("["+$arg.text+"]");} )? )
-			^(RET (ret=ARG_ACTION {out(" returns ["+$ret.text+"]");} )? )
-			(throwsSpec)?
-			(optionsSpec)?
-			(ruleScopeSpec)?
-			(ruleAction)*
-			{out(" :");}
-			{
-				if ( input.LA(5) == NOT || input.LA(5) == ASSIGN )
-					out(" ");
-			}
-			b=block[false]
-			(exceptionGroup)?
-			EOR {out(";\n");}
-		)
-	;
-
-ruleAction
-	:	^(AMPERSAND id=ID a=ACTION )
-		{if ( showActions ) out("@"+$id.text+"{"+$a.text+"}");}
-	;
-
-modifier
-@init
-{out($modifier.start.getText()); out(" ");}
-	:	'protected'
-	|	'public'
-	|	'private'
-	|	'fragment'
-	;
-
-throwsSpec
-	:	^('throws' ID+)
-	;
-
-ruleScopeSpec
-	:	^( 'scope' ruleAction* (ACTION)? ( ID )* )
-	;
-
-block[boolean forceParens]
-@init
-{
-int numAlts = countAltsForBlock($start);
-}
-	:	^(	BLOCK
-			{
-				if ( forceParens||numAlts>1 )
-				{
-					//for ( Antlr.Runtime.Tree.Tree parent = $start.getParent(); parent != null && parent.getType() != RULE; parent = parent.getParent() )
-					//{
-					//	if ( parent.getType() == BLOCK && countAltsForBlock((GrammarAST)parent) > 1 )
-					//	{
-					//		out(" ");
-					//		break;
-					//	}
-					//}
-					out(" (");
-				}
-			}
-			(optionsSpec {out(" :");} )?
-			alternative rewrite ( {out("|");} alternative rewrite )*
-			EOB   {if ( forceParens||numAlts>1 ) out(")");}
-		 )
-	;
-
-alternative
-	:	^( ALT element* EOA )
-	;
-
-exceptionGroup
-	:	( exceptionHandler )+ (finallyClause)?
-	|	finallyClause
-	;
-
-exceptionHandler
-	:	^('catch' ARG_ACTION ACTION)
-	;
-
-finallyClause
-	:	^('finally' ACTION)
-	;
-
-rewrite
-	:	^(REWRITES single_rewrite+)
-	|	REWRITES
-	|
-	;
-
-single_rewrite
-	:	^(	REWRITE {out(" ->");}
-			(	SEMPRED {out(" {"+$SEMPRED.text+"}?");}
-			)?
-			(	alternative
-			|	rewrite_template
-			|	ETC {out("...");}
-			|	ACTION {out(" {"+$ACTION.text+"}");}
-			)
-		)
-	;
-
-rewrite_template
-	:	^(	TEMPLATE
-			(	id=ID {out(" "+$id.text);}
-			|	ind=ACTION {out(" ({"+$ind.text+"})");}
-			)
-			^(	ARGLIST
-				{out("(");}
-				(	^(	ARG arg=ID {out($arg.text+"=");}
-						a=ACTION   {out($a.text);}
-					)
-				)*
-				{out(")");}
-			)
-			(	DOUBLE_QUOTE_STRING_LITERAL {out(" "+$DOUBLE_QUOTE_STRING_LITERAL.text);}
-			|	DOUBLE_ANGLE_STRING_LITERAL {out(" "+$DOUBLE_ANGLE_STRING_LITERAL.text);}
-			)?
-		)
-	;
-
-element
-	:	^(ROOT element) {out("^");}
-	|	^(BANG element) {out("!");}
-	|	atom
-	|	^(NOT {out("~");} element)
-	|	^(RANGE atom {out("..");} atom)
-	|	^(CHAR_RANGE atom {out("..");} atom)
-	|	^(ASSIGN id=ID {out($id.text+"=");} element)
-	|	^(PLUS_ASSIGN id2=ID {out($id2.text+"+=");} element)
-	|	ebnf
-	|	tree_
-	|	^( SYNPRED block[true] ) {out("=>");}
-	|	a=ACTION  {if ( showActions ) {out("{"); out($a.text); out("}");}}
-	|	a2=FORCED_ACTION  {if ( showActions ) {out("{{"); out($a2.text); out("}}");}}
-	|	pred=SEMPRED
-		{
-			if ( showActions )
-			{
-				out("{");
-				out($pred.text);
-				out("}?");
-			}
-			else
-			{
-				out("{...}?");
-			}
-		}
-	|	spred=SYN_SEMPRED
-		{
-			String name = $spred.text;
-			GrammarAST predAST=grammar.getSyntacticPredicate(name);
-			block(predAST, true);
-			out("=>");
-		}
-	|	^(BACKTRACK_SEMPRED .*) // don't print anything (auto backtrack stuff)
-	|	gpred=GATED_SEMPRED
-		{
-		if ( showActions ) {out("{"); out($gpred.text); out("}? =>");}
-		else {out("{...}? =>");}
-		}
-	|	EPSILON
-	;
-
-ebnf
-	:	block[true] {out(" ");}
-	|	^( OPTIONAL block[true] ) {out("? ");}
-	|	^( CLOSURE block[true] )  {out("* ");}
-	|	^( POSITIVE_CLOSURE block[true] ) {out("+ ");}
-	;
-
-tree_
-	:	^(TREE_BEGIN {out(" ^(");} element (element)* {out(") ");} )
-	;
-
-atom
-@init
-{out(" ");}
-	:	(	^(	RULE_REF		{out($start.toString());}
-				(rarg=ARG_ACTION	{out("["+$rarg.toString()+"]");})?
-				(ast_suffix)?
-			)
-		|	^(	TOKEN_REF		{out($start.toString());}
-				(targ=ARG_ACTION	{out("["+$targ.toString()+"]");} )?
-				(ast_suffix)?
-			)
-		|	^(	CHAR_LITERAL	{out($start.toString());}
-				(ast_suffix)?
-			)
-		|	^(	STRING_LITERAL	{out($start.toString());}
-				(ast_suffix)?
-			)
-		|	^(	WILDCARD		{out($start.toString());}
-				(ast_suffix)?
-			)
-		)
-		{out(" ");}
-	|	LABEL {out(" $"+$LABEL.text);} // used in -> rewrites
-	|	^(DOT ID {out($ID.text+".");} atom) // scope override on rule
-	;
-
-ast_suffix
-	:	ROOT {out("^");}
-	|	BANG  {out("!");}
-	;
diff --git a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3.g b/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3.g
deleted file mode 100644
index ff6cfa0..0000000
--- a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3.g
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2010 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/** ANTLR v3 grammar written in ANTLR v3 with AST construction */
-grammar ANTLRv3;
-
-options {
-	output=AST;
-	ASTLabelType=CommonTree;
-}
-
-tokens {
-	DOC_COMMENT;
-	PARSER;	
-    LEXER;
-    RULE;
-    BLOCK;
-    OPTIONAL;
-    CLOSURE;
-    POSITIVE_CLOSURE;
-    SYNPRED;
-    RANGE;
-    CHAR_RANGE;
-    EPSILON;
-    ALT;
-    EOR;
-    EOB;
-    EOA; // end of alt
-    ID;
-    ARG;
-    ARGLIST;
-    RET='returns';
-    LEXER_GRAMMAR;
-    PARSER_GRAMMAR;
-    TREE_GRAMMAR;
-    COMBINED_GRAMMAR;
-    LABEL; // $x used in rewrite rules
-    TEMPLATE;
-    SCOPE='scope';
-    SEMPRED;
-    GATED_SEMPRED; // {p}? =>
-    SYN_SEMPRED; // (...) =>   it's a manually-specified synpred converted to sempred
-    BACKTRACK_SEMPRED; // auto backtracking mode syn pred converted to sempred
-    FRAGMENT='fragment';
-    TREE_BEGIN='^(';
-    ROOT='^';
-    BANG='!';
-    RANGE='..';
-    REWRITE='->';
-    AT='@';
-    LABEL_ASSIGN='=';
-    LIST_LABEL_ASSIGN='+=';
-}
-
-@parser::header
-{
-    package org.antlr.grammar.v3;
-}
-@lexer::header
-{
-    package org.antlr.grammar.v3;
-}
-
-@members {
-	int gtype;
-}
-
-grammarDef
-    :   DOC_COMMENT?
-    	(	'lexer'  {gtype=LEXER_GRAMMAR;}    // pure lexer
-    	|   'parser' {gtype=PARSER_GRAMMAR;}   // pure parser
-    	|   'tree'   {gtype=TREE_GRAMMAR;}     // a tree parser
-    	|		     {gtype=COMBINED_GRAMMAR;} // merged parser/lexer
-    	)
-    	g='grammar' id ';' optionsSpec? tokensSpec? attrScope* action*
-    	rule+
-    	EOF
-    	-> ^( {adaptor.create(gtype,$g)}
-    		  id DOC_COMMENT? optionsSpec? tokensSpec? attrScope* action* rule+
-    		)
-    ;
-
-tokensSpec
-	:	TOKENS tokenSpec+ '}' -> ^(TOKENS tokenSpec+)
-	;
-
-tokenSpec
-	:	TOKEN_REF
-		(	'=' (lit=STRING_LITERAL|lit=CHAR_LITERAL)	-> ^('=' TOKEN_REF $lit)
-		|												-> TOKEN_REF
-		)
-		';'
-	;
-
-attrScope
-	:	'scope' id ACTION -> ^('scope' id ACTION)
-	;
-
-/** Match stuff like @parser::members {int i;} */
-action
-	:	'@' (actionScopeName '::')? id ACTION -> ^('@' actionScopeName? id ACTION)
-	;
-
-/** Sometimes the scope names will collide with keywords; allow them as
- *  ids for action scopes.
- */
-actionScopeName
-	:	id
-	|	l='lexer'	-> ID[$l]
-    |   p='parser'	-> ID[$p]
-	;
-
-optionsSpec
-	:	OPTIONS (option ';')+ '}' -> ^(OPTIONS option+)
-	;
-
-option
-    :   id '=' optionValue -> ^('=' id optionValue)
- 	;
- 	
-optionValue
-    :   qid
-    |   STRING_LITERAL
-    |   CHAR_LITERAL
-    |   INT
-    |	s='*' -> STRING_LITERAL[$s]  // used for k=*
-    ;
-
-rule
-scope {
-	String name;
-}
-	:	DOC_COMMENT?
-		( modifier=('protected'|'public'|'private'|'fragment') )?
-		id {$rule::name = $id.text;}
-		'!'?
-		( arg=ARG_ACTION )?
-		( 'returns' rt=ARG_ACTION  )?
-		throwsSpec? optionsSpec? ruleScopeSpec? ruleAction*
-		':'	altList	';'
-		exceptionGroup?
-	    -> ^( RULE id {modifier!=null?adaptor.create(modifier):null} ^(ARG[$arg] $arg)? ^('returns' $rt)?
-	    	  throwsSpec? optionsSpec? ruleScopeSpec? ruleAction*
-	    	  altList
-	    	  exceptionGroup?
-	    	  EOR["EOR"]
-	    	)
-	;
-
-/** Match stuff like @init {int i;} */
-ruleAction
-	:	'@' id ACTION -> ^('@' id ACTION)
-	;
-
-throwsSpec
-	:	'throws' id ( ',' id )* -> ^('throws' id+)
-	;
-
-ruleScopeSpec
-	:	'scope' ACTION -> ^('scope' ACTION)
-	|	'scope' id (',' id)* ';' -> ^('scope' id+)
-	|	'scope' ACTION
-		'scope' id (',' id)* ';'
-		-> ^('scope' ACTION id+ )
-	;
-
-block
-    :   lp='('
-		( (opts=optionsSpec)? ':' )?
-		altpair ( '|' altpair )*
-        rp=')'
-        -> ^( BLOCK[$lp,"BLOCK"] optionsSpec? altpair+ EOB[$rp,"EOB"] )
-    ;
-
-altpair : alternative rewrite ;
-
-altList
-@init {
-	// must create root manually as it's used by invoked rules in real antlr tool.
-	// leave here to demonstrate use of {...} in rewrite rule
-	// it's really BLOCK[firstToken,"BLOCK"]; set line/col to previous ( or : token.
-    CommonTree blkRoot = (CommonTree)adaptor.create(BLOCK,input.LT(-1),"BLOCK");
-}
-    :   altpair ( '|' altpair )* -> ^( {blkRoot} altpair+ EOB["EOB"] )
-    ;
-
-alternative
-@init {
-	Token firstToken = input.LT(1);
-	Token prevToken = input.LT(-1); // either : or | I think
-}
-    :   element+ -> ^(ALT[firstToken,"ALT"] element+ EOA["EOA"])
-    |   -> ^(ALT[prevToken,"ALT"] EPSILON[prevToken,"EPSILON"] EOA["EOA"])
-    ;
-
-exceptionGroup
-	:	( exceptionHandler )+ ( finallyClause )?
-	|	finallyClause
-    ;
-
-exceptionHandler
-    :    'catch' ARG_ACTION ACTION -> ^('catch' ARG_ACTION ACTION)
-    ;
-
-finallyClause
-    :    'finally' ACTION -> ^('finally' ACTION)
-    ;
-
-element
-	:	id (labelOp='='|labelOp='+=') atom
-		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] ^($labelOp id atom) EOA["EOA"]) EOB["EOB"]))
-		|				-> ^($labelOp id atom)
-		)
-	|	id (labelOp='='|labelOp='+=') block
-		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] ^($labelOp id block) EOA["EOA"]) EOB["EOB"]))
-		|				-> ^($labelOp id block)
-		)
-	|	atom
-		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] atom EOA["EOA"]) EOB["EOB"]) )
-		|				-> atom
-		)
-	|	ebnf
-	|   ACTION
-	|   SEMPRED ( g='=>' -> GATED_SEMPRED[$g] | -> SEMPRED )
-	|   treeSpec
-		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] treeSpec EOA["EOA"]) EOB["EOB"]) )
-		|				-> treeSpec
-		)
-	;
-
-atom:   terminal
-	|	range 
-		(	(op='^'|op='!')	-> ^($op range)
-		|					-> range
-		)
-    |	notSet
-		(	(op='^'|op='!')	-> ^($op notSet)
-		|					-> notSet
-		)
-    |   RULE_REF ARG_ACTION?
-		(	(op='^'|op='!')	-> ^($op RULE_REF ARG_ACTION?)
-		|					-> ^(RULE_REF ARG_ACTION?)
-		)
-    ;
-
-notSet
-	:	'~'
-		(	notTerminal elementOptions?	-> ^('~' notTerminal elementOptions?)
-		|	block elementOptions?		-> ^('~' block elementOptions?)
-		)
-	;
-
-notTerminal
-	:   CHAR_LITERAL
-	|	TOKEN_REF
-	|	STRING_LITERAL
-	;
-	
-elementOptions
-	:	'<' qid '>'					 -> ^(OPTIONS qid)
-	|	'<' option (';' option)* '>' -> ^(OPTIONS option+)
-	;
-
-elementOption
-	:	id '=' optionValue -> ^('=' id optionValue)
-	;
-	
-treeSpec
-	:	'^(' element ( element )+ ')' -> ^(TREE_BEGIN element+)
-	;
-
-range!
-	:	c1=CHAR_LITERAL RANGE c2=CHAR_LITERAL elementOptions?
-		-> ^(CHAR_RANGE[$c1,".."] $c1 $c2 elementOptions?)
-	;
-
-terminal
-    :   (	CHAR_LITERAL elementOptions?    	  -> ^(CHAR_LITERAL elementOptions?)
-	    	// Args are only valid for lexer rules
-		|   TOKEN_REF ARG_ACTION? elementOptions? -> ^(TOKEN_REF ARG_ACTION? elementOptions?)
-		|   STRING_LITERAL elementOptions?		  -> ^(STRING_LITERAL elementOptions?)
-		|   '.' elementOptions?		 			  -> ^('.' elementOptions?)
-		)
-		(	'^'							-> ^('^' $terminal)
-		|	'!' 						-> ^('!' $terminal)
-		)?
-	;
-
-/** Matches ENBF blocks (and token sets via block rule) */
-ebnf
-@init {
-    Token firstToken = input.LT(1);
-}
-@after {
-	$ebnf.tree.getToken().setLine(firstToken.getLine());
-	$ebnf.tree.getToken().setCharPositionInLine(firstToken.getCharPositionInLine());
-}
-	:	block
-		(	op='?'	-> ^(OPTIONAL[op] block)
-		|	op='*'	-> ^(CLOSURE[op] block)
-		|	op='+'	-> ^(POSITIVE_CLOSURE[op] block)
-		|   '=>'	// syntactic predicate
-					-> {gtype==COMBINED_GRAMMAR &&
-					    Character.isUpperCase($rule::name.charAt(0))}?
-					   // if lexer rule in combined, leave as pred for lexer
-					   ^(SYNPRED["=>"] block)
-					// in real antlr tool, text for SYN_SEMPRED is predname
-					-> SYN_SEMPRED
-        |			-> block
-		)
-	;
-
-ebnfSuffix
-@init {
-	Token op = input.LT(1);
-}
-	:	'?'	-> OPTIONAL[op]
-  	|	'*' -> CLOSURE[op]
-   	|	'+' -> POSITIVE_CLOSURE[op]
-	;
-	
-
-
-// R E W R I T E  S Y N T A X
-
-rewrite
-@init {
-	Token firstToken = input.LT(1);
-}
-	:	(rew+='->' preds+=SEMPRED predicated+=rewrite_alternative)*
-		rew2='->' last=rewrite_alternative
-        -> ^($rew $preds $predicated)* ^($rew2 $last)
-	|
-	;
-
-rewrite_alternative
-options {backtrack=true;}
-	:	rewrite_template
-	|	rewrite_tree_alternative
-   	|   /* empty rewrite */ -> ^(ALT["ALT"] EPSILON["EPSILON"] EOA["EOA"])
-	;
-	
-rewrite_tree_block
-    :   lp='(' rewrite_tree_alternative ')'
-    	-> ^(BLOCK[$lp,"BLOCK"] rewrite_tree_alternative EOB[$lp,"EOB"])
-    ;
-
-rewrite_tree_alternative
-    :	rewrite_tree_element+ -> ^(ALT["ALT"] rewrite_tree_element+ EOA["EOA"])
-    ;
-
-rewrite_tree_element
-	:	rewrite_tree_atom
-	|	rewrite_tree_atom ebnfSuffix
-		-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] rewrite_tree_atom EOA["EOA"]) EOB["EOB"]))
-	|   rewrite_tree
-		(	ebnfSuffix
-			-> ^(ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] rewrite_tree EOA["EOA"]) EOB["EOB"]))
-		|	-> rewrite_tree
-		)
-	|   rewrite_tree_ebnf
-	;
-
-rewrite_tree_atom
-    :   CHAR_LITERAL
-	|   TOKEN_REF ARG_ACTION? -> ^(TOKEN_REF ARG_ACTION?) // for imaginary nodes
-    |   RULE_REF
-	|   STRING_LITERAL
-	|   d='$' id -> LABEL[$d,$id.text] // reference to a label in a rewrite rule
-	|	ACTION
-	;
-
-rewrite_tree_ebnf
-@init {
-    Token firstToken = input.LT(1);
-}
-@after {
-	$rewrite_tree_ebnf.tree.getToken().setLine(firstToken.getLine());
-	$rewrite_tree_ebnf.tree.getToken().setCharPositionInLine(firstToken.getCharPositionInLine());
-}
-	:	rewrite_tree_block ebnfSuffix -> ^(ebnfSuffix rewrite_tree_block)
-	;
-	
-rewrite_tree
-	:	'^(' rewrite_tree_atom rewrite_tree_element* ')'
-		-> ^(TREE_BEGIN rewrite_tree_atom rewrite_tree_element* )
-	;
-
-/** Build a tree for a template rewrite:
-      ^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
-    where ARGLIST is always there even if no args exist.
-    ID can be "template" keyword.  If first child is ACTION then it's
-    an indirect template ref
-
-    -> foo(a={...}, b={...})
-    -> ({string-e})(a={...}, b={...})  // e evaluates to template name
-    -> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
-	-> {st-expr} // st-expr evaluates to ST
- */
-rewrite_template
-	:   // -> template(a={...},...) "..."    inline template
-		id lp='(' rewrite_template_args	')'
-		( str=DOUBLE_QUOTE_STRING_LITERAL | str=DOUBLE_ANGLE_STRING_LITERAL )
-		-> ^(TEMPLATE[$lp,"TEMPLATE"] id rewrite_template_args $str)
-
-	|	// -> foo(a={...}, ...)
-		rewrite_template_ref
-
-	|	// -> ({expr})(a={...}, ...)
-		rewrite_indirect_template_head
-
-	|	// -> {...}
-		ACTION
-	;
-
-/** -> foo(a={...}, ...) */
-rewrite_template_ref
-	:	id lp='(' rewrite_template_args	')'
-		-> ^(TEMPLATE[$lp,"TEMPLATE"] id rewrite_template_args)
-	;
-
-/** -> ({expr})(a={...}, ...) */
-rewrite_indirect_template_head
-	:	lp='(' ACTION ')' '(' rewrite_template_args ')'
-		-> ^(TEMPLATE[$lp,"TEMPLATE"] ACTION rewrite_template_args)
-	;
-
-rewrite_template_args
-	:	rewrite_template_arg (',' rewrite_template_arg)*
-		-> ^(ARGLIST rewrite_template_arg+)
-	|	-> ARGLIST
-	;
-
-rewrite_template_arg
-	:   id '=' ACTION -> ^(ARG[$id.start] id ACTION)
-	;
-
-qid :	id ('.' id)* ;
-	
-id	:	TOKEN_REF -> ID[$TOKEN_REF]
-	|	RULE_REF  -> ID[$RULE_REF]
-	;
-
-// L E X I C A L   R U L E S
-
-SL_COMMENT
- 	:	'//'
- 	 	(	' $ANTLR ' SRC // src directive
- 		|	~('\r'|'\n')*
-		)
-		'\r'? '\n'
-		{$channel=HIDDEN;}
-	;
-
-ML_COMMENT
-	:	'/*' {if (input.LA(1)=='*') $type=DOC_COMMENT; else $channel=HIDDEN;} .* '*/'
-	;
-
-CHAR_LITERAL
-	:	'\'' LITERAL_CHAR '\''
-	;
-
-STRING_LITERAL
-	:	'\'' LITERAL_CHAR LITERAL_CHAR* '\''
-	;
-
-fragment
-LITERAL_CHAR
-	:	ESC
-	|	~('\''|'\\')
-	;
-
-DOUBLE_QUOTE_STRING_LITERAL
-	:	'"' (ESC | ~('\\'|'"'))* '"'
-	;
-
-DOUBLE_ANGLE_STRING_LITERAL
-	:	'<<' .* '>>'
-	;
-
-fragment
-ESC	:	'\\'
-		(	'n'
-		|	'r'
-		|	't'
-		|	'b'
-		|	'f'
-		|	'"'
-		|	'\''
-		|	'\\'
-		|	'>'
-		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
-		|	. // unknown, leave as it is
-		)
-	;
-
-fragment
-XDIGIT :
-		'0' .. '9'
-	|	'a' .. 'f'
-	|	'A' .. 'F'
-	;
-
-INT	:	'0'..'9'+
-	;
-
-ARG_ACTION
-	:	NESTED_ARG_ACTION
-	;
-
-fragment
-NESTED_ARG_ACTION :
-	'['
-	(	options {greedy=false; k=1;}
-	:	NESTED_ARG_ACTION
-	|	ACTION_STRING_LITERAL
-	|	ACTION_CHAR_LITERAL
-	|	.
-	)*
-	']'
-	//{setText(getText().substring(1, getText().length()-1));}
-	;
-
-ACTION
-	:	NESTED_ACTION ( '?' {$type = SEMPRED;} )?
-	;
-
-fragment
-NESTED_ACTION :
-	'{'
-	(	options {greedy=false; k=2;}
-	:	NESTED_ACTION
-	|	SL_COMMENT
-	|	ML_COMMENT
-	|	ACTION_STRING_LITERAL
-	|	ACTION_CHAR_LITERAL
-	|	.
-	)*
-	'}'
-   ;
-
-fragment
-ACTION_CHAR_LITERAL
-	:	'\'' (ACTION_ESC|~('\\'|'\'')) '\''
-	;
-
-fragment
-ACTION_STRING_LITERAL
-	:	'"' (ACTION_ESC|~('\\'|'"'))* '"'
-	;
-
-fragment
-ACTION_ESC
-	:	'\\\''
-	|	'\\' '"' // ANTLR doesn't like: '\\"'
-	|	'\\' ~('\''|'"')
-	;
-
-TOKEN_REF
-	:	'A'..'Z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
-	;
-
-RULE_REF
-	:	'a'..'z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
-	;
-
-/** Match the start of an options section.  Don't allow normal
- *  action processing on the {...} as it's not a action.
- */
-OPTIONS
-	:	'options' WS_LOOP '{'
-	;
-	
-TOKENS
-	:	'tokens' WS_LOOP '{'
-	;
-
-/** Reset the file and line information; useful when the grammar
- *  has been generated so that errors are shown relative to the
- *  original file like the old C preprocessor used to do.
- */
-fragment
-SRC	:	'src' ' ' file=ACTION_STRING_LITERAL ' ' line=INT
-	;
-
-WS	:	(	' '
-		|	'\t'
-		|	'\r'? '\n'
-		)+
-		{$channel=HIDDEN;}
-	;
-
-fragment
-WS_LOOP
-	:	(	WS
-		|	SL_COMMENT
-		|	ML_COMMENT
-		)*
-	;
-
diff --git a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3Tree.g b/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3Tree.g
deleted file mode 100644
index f6b03d3..0000000
--- a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3Tree.g
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2010 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** ANTLR v3 tree grammar to walk trees created by ANTLRv3.g */
-tree grammar ANTLRv3Tree;
-
-options {
-	tokenVocab = ANTLRv3;
-	ASTLabelType = CommonTree;
-}
-
-@header {
-package org.antlr.grammar.v3;
-}
-
-grammarDef
-    :   ^( grammarType ID DOC_COMMENT? optionsSpec? tokensSpec? attrScope* action* rule+ )
-    ;
-
-grammarType
-	:	LEXER_GRAMMAR
-    |	PARSER_GRAMMAR
-    |	TREE_GRAMMAR
-    |	COMBINED_GRAMMAR
-    ;
-
-tokensSpec
-	:	^(TOKENS tokenSpec+)
-	;
-
-tokenSpec
-	:	^('=' TOKEN_REF STRING_LITERAL)
-	|	^('=' TOKEN_REF CHAR_LITERAL)
-	|	TOKEN_REF
-	;
-
-attrScope
-	:	^('scope' ID ACTION)
-	;
-
-action
-	:	^('@' ID ID ACTION)
-	|	^('@' ID ACTION)
-	;
-
-optionsSpec
-	:	^(OPTIONS option+)
-	;
-
-option
-    :   qid // only allowed in element options
-    |	^('=' ID optionValue)
- 	;
- 	
-optionValue
-    :   ID
-    |   STRING_LITERAL
-    |   CHAR_LITERAL
-    |   INT
-    ;
-
-rule
-	:	^( RULE ID modifier? (^(ARG ARG_ACTION))? (^(RET ARG_ACTION))?
-	       throwsSpec? optionsSpec? ruleScopeSpec? ruleAction*
-	       altList
-	       exceptionGroup? EOR
-	     )
-	;
-
-modifier
-	:	'protected'|'public'|'private'|'fragment'
-	;
-
-/** Match stuff like @init {int i;} */
-ruleAction
-	:	^('@' ID ACTION)
-	;
-
-throwsSpec
-	:	^('throws' ID+)
-	;
-
-ruleScopeSpec
-	:	^('scope' ACTION)
-	|	^('scope' ACTION ID+)
-	|	^('scope' ID+)
-	;
-
-block
-    :   ^( BLOCK optionsSpec? (alternative rewrite)+ EOB )
-    ;
-
-altList
-    :   ^( BLOCK (alternative rewrite)+ EOB )
-    ;
-
-alternative
-    :   ^(ALT element+ EOA)
-    |   ^(ALT EPSILON EOA)
-    ;
-
-exceptionGroup
-	:	exceptionHandler+ finallyClause?
-	|	finallyClause
-    ;
-
-exceptionHandler
-    :    ^('catch' ARG_ACTION ACTION)
-    ;
-
-finallyClause
-    :    ^('finally' ACTION)
-    ;
-
-element
-	:	^(('='|'+=') ID block)
-	|	^(('='|'+=') ID atom)
-	|	atom
-	|	ebnf
-	|   ACTION
-	|   SEMPRED
-	|	GATED_SEMPRED
-	|   ^(TREE_BEGIN element+)
-	;
-
-atom:   ^(('^'|'!') atom)
-	|	^(CHAR_RANGE CHAR_LITERAL CHAR_LITERAL optionsSpec?)
-	|	^('~' notTerminal optionsSpec?)
-	|	^('~' block optionsSpec?)
-    |	^(RULE_REF ARG_ACTION)
-    |	RULE_REF
-    |   CHAR_LITERAL
-    |   ^(CHAR_LITERAL optionsSpec)
-    |	TOKEN_REF
-    |	^(TOKEN_REF optionsSpec)
-    |	^(TOKEN_REF ARG_ACTION optionsSpec)
-    |	^(TOKEN_REF ARG_ACTION)
-    |	STRING_LITERAL
-    |	^(STRING_LITERAL optionsSpec)
-    |	'.'
-    |	^('.' optionsSpec?)
-    ;
-
-/** Matches ENBF blocks (and token sets via block rule) */
-ebnf
-	:	^(SYNPRED block)
-	|	^(OPTIONAL block)
-  	|	^(CLOSURE block)
-   	|	^(POSITIVE_CLOSURE block)
-	|	SYN_SEMPRED
-	|	block
-	;
-
-notTerminal
-	:   CHAR_LITERAL
-	|	TOKEN_REF
-	|	STRING_LITERAL
-	;
-		
-// R E W R I T E  S Y N T A X
-
-rewrite
-	:	(^('->' SEMPRED rewrite_alternative))* ^('->' rewrite_alternative)
-	|
-	;
-
-rewrite_alternative
-	:	rewrite_template
-	|	rewrite_tree_alternative
-   	|   ^(ALT EPSILON EOA)
-	;
-	
-rewrite_tree_block
-    :   ^(BLOCK rewrite_tree_alternative EOB)
-    ;
-
-rewrite_tree_alternative
-    :	^(ALT rewrite_tree_element+ EOA)
-    ;
-
-rewrite_tree_element
-	:	rewrite_tree_atom
-	|	rewrite_tree
-	|   rewrite_tree_block
-	|   rewrite_tree_ebnf
-	;
-
-rewrite_tree_atom
-    :   CHAR_LITERAL
-	|   TOKEN_REF
-	|   ^(TOKEN_REF ARG_ACTION) // for imaginary nodes
-    |   RULE_REF
-	|   STRING_LITERAL
-	|   LABEL
-	|	ACTION
-	;
-
-rewrite_tree_ebnf
-	:	^(OPTIONAL rewrite_tree_block)
-  	|	^(CLOSURE rewrite_tree_block)
-   	|	^(POSITIVE_CLOSURE rewrite_tree_block)
-	;
-	
-rewrite_tree
-	:	^(TREE_BEGIN rewrite_tree_atom rewrite_tree_element* )
-	;
-
-rewrite_template
-	:   ^( TEMPLATE ID rewrite_template_args
-		   (DOUBLE_QUOTE_STRING_LITERAL | DOUBLE_ANGLE_STRING_LITERAL)
-		 )
-	|	rewrite_template_ref
-	|	rewrite_indirect_template_head
-	|	ACTION
-	;
-
-/** foo(a={...}, ...) */
-rewrite_template_ref
-	:	^(TEMPLATE ID rewrite_template_args)
-	;
-
-/** ({expr})(a={...}, ...) */
-rewrite_indirect_template_head
-	:	^(TEMPLATE ACTION rewrite_template_args)
-	;
-
-rewrite_template_args
-	:	^(ARGLIST rewrite_template_arg+)
-	|	ARGLIST
-	;
-
-rewrite_template_arg
-	:   ^(ARG ID ACTION)
-	;
-
-qid	:	ID ('.' ID)* ;
diff --git a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ActionAnalysis.g b/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ActionAnalysis.g
deleted file mode 100644
index ed8bb21..0000000
--- a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ActionAnalysis.g
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2010 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** We need to set Rule.referencedPredefinedRuleAttributes before
- *  code generation.  This filter looks at an action in context of
- *  its rule and outer alternative number and figures out which
- *  rules have predefined prefs referenced.  I need this so I can
- *  remove unusued labels.  This also tracks, for labeled rules,
- *  which are referenced by actions.
- */
-lexer grammar ActionAnalysis;
-options {
-  filter=true;  // try all non-fragment rules in order specified
-}
-
-@header {
-package org.antlr.grammar.v3;
-import org.antlr.runtime.*;
-import org.antlr.tool.*;
-}
-
-@members {
-Rule enclosingRule;
-Grammar grammar;
-Token actionToken;
-int outerAltNum = 0;
-
-	public ActionAnalysis(Grammar grammar, String ruleName, GrammarAST actionAST)
-	{
-		this(new ANTLRStringStream(actionAST.token.getText()));
-		this.grammar = grammar;
-	    this.enclosingRule = grammar.getLocallyDefinedRule(ruleName);
-	    this.actionToken = actionAST.token;
-	    this.outerAltNum = actionAST.outerAltNum;
-	}
-
-public void analyze() {
-	// System.out.println("###\naction="+actionToken);
-	Token t;
-	do {
-		t = nextToken();
-	} while ( t.getType()!= Token.EOF );
-}
-}
-
-/**	$x.y	x is enclosing rule or rule ref or rule label
- *			y is a return value, parameter, or predefined property.
- */
-X_Y :	'$' x=ID '.' y=ID {enclosingRule!=null}?
-		{
-		AttributeScope scope = null;
-		String refdRuleName = null;
-		if ( $x.text.equals(enclosingRule.name) ) {
-			// ref to enclosing rule.
-			refdRuleName = $x.text;
-			scope = enclosingRule.getLocalAttributeScope($y.text);
-		}
-		else if ( enclosingRule.getRuleLabel($x.text)!=null ) {
-			// ref to rule label
-			Grammar.LabelElementPair pair = enclosingRule.getRuleLabel($x.text);
-			pair.actionReferencesLabel = true;
-			refdRuleName = pair.referencedRuleName;
-			Rule refdRule = grammar.getRule(refdRuleName);
-			if ( refdRule!=null ) {
-				scope = refdRule.getLocalAttributeScope($y.text);
-			}
-		}
-		else if ( enclosingRule.getRuleRefsInAlt(x.getText(), outerAltNum)!=null ) {
-			// ref to rule referenced in this alt
-			refdRuleName = $x.text;
-			Rule refdRule = grammar.getRule(refdRuleName);
-			if ( refdRule!=null ) {
-				scope = refdRule.getLocalAttributeScope($y.text);
-			}
-		}
-		if ( scope!=null &&
-			 (scope.isPredefinedRuleScope||scope.isPredefinedLexerRuleScope) )
-		{
-			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
-			//System.out.println("referenceRuleLabelPredefinedAttribute for "+refdRuleName);
-		}
-		}
-	;
-
-/** $x	x is an isolated rule label.  Just record that the label was referenced */
-X	:	'$' x=ID {enclosingRule!=null && enclosingRule.getRuleLabel($x.text)!=null}?
-		{
-			Grammar.LabelElementPair pair = enclosingRule.getRuleLabel($x.text);
-			pair.actionReferencesLabel = true;
-		}
-	;
-	
-/** $y	y is a return value, parameter, or predefined property of current rule */
-Y	:	'$' ID {enclosingRule!=null && enclosingRule.getLocalAttributeScope($ID.text)!=null}?
-		{
-			AttributeScope scope = enclosingRule.getLocalAttributeScope($ID.text);
-			if ( scope!=null &&
-				 (scope.isPredefinedRuleScope||scope.isPredefinedLexerRuleScope) )
-			{
-				grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
-				//System.out.println("referenceRuleLabelPredefinedAttribute for "+$ID.text);
-			}
-		}
-	;
-	
-fragment
-ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
-    ;
diff --git a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ActionTranslator.g b/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ActionTranslator.g
deleted file mode 100644
index 0aca8b8..0000000
--- a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/ActionTranslator.g
+++ /dev/null
@@ -1,809 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2010 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-lexer grammar ActionTranslator;
-options {
-  filter=true;  // try all non-fragment rules in order specified
-  // output=template;  TODO: can we make tokens return templates somehow?
-}
-
-@header {
-package org.antlr.grammar.v3;
-import org.stringtemplate.v4.ST;
-import org.antlr.runtime.*;
-import org.antlr.tool.*;
-import org.antlr.codegen.*;
-
-import org.antlr.runtime.*;
-import java.util.List;
-import java.util.ArrayList;
-import org.antlr.grammar.v3.ANTLRParser;
-
-}
-
-@members {
-public List chunks = new ArrayList();
-Rule enclosingRule;
-int outerAltNum;
-Grammar grammar;
-CodeGenerator generator;
-Token actionToken;
-
-	public ActionTranslator(CodeGenerator generator,
-								 String ruleName,
-								 GrammarAST actionAST)
-	{
-		this(new ANTLRStringStream(actionAST.token.getText()));
-		this.generator = generator;
-		this.grammar = generator.grammar;
-	    this.enclosingRule = grammar.getLocallyDefinedRule(ruleName);
-	    this.actionToken = actionAST.token;
-	    this.outerAltNum = actionAST.outerAltNum;
-	}
-
-	public ActionTranslator(CodeGenerator generator,
-								 String ruleName,
-								 Token actionToken,
-								 int outerAltNum)
-	{
-		this(new ANTLRStringStream(actionToken.getText()));
-		this.generator = generator;
-		grammar = generator.grammar;
-	    this.enclosingRule = grammar.getRule(ruleName);
-	    this.actionToken = actionToken;
-		this.outerAltNum = outerAltNum;
-	}
-
-/** Return a list of strings and ST objects that
- *  represent the translated action.
- */
-public List translateToChunks() {
-	// System.out.println("###\naction="+action);
-	Token t;
-	do {
-		t = nextToken();
-	} while ( t.getType()!= Token.EOF );
-	return chunks;
-}
-
-public String translate() {
-	List theChunks = translateToChunks();
-	//System.out.println("chunks="+a.chunks);
-	StringBuffer buf = new StringBuffer();
-	for (int i = 0; i < theChunks.size(); i++) {
-		Object o = (Object) theChunks.get(i);
-		if ( o instanceof ST ) buf.append(((ST)o).render());
-		else buf.append(o);
-	}
-	//System.out.println("translated: "+buf.toString());
-	return buf.toString();
-}
-
-public List translateAction(String action) {
-	String rname = null;
-	if ( enclosingRule!=null ) {
-		rname = enclosingRule.name;
-	}
-	ActionTranslator translator =
-		new ActionTranslator(generator,
-								  rname,
-								  new CommonToken(ANTLRParser.ACTION,action),outerAltNum);
-    return translator.translateToChunks();
-}
-
-public boolean isTokenRefInAlt(String id) {
-    return enclosingRule.getTokenRefsInAlt(id, outerAltNum)!=null;
-}
-public boolean isRuleRefInAlt(String id) {
-    return enclosingRule.getRuleRefsInAlt(id, outerAltNum)!=null;
-}
-public Grammar.LabelElementPair getElementLabel(String id) {
-    return enclosingRule.getLabel(id);
-}
-
-public void checkElementRefUniqueness(String ref, boolean isToken) {
-		List refs = null;
-		if ( isToken ) {
-		    refs = enclosingRule.getTokenRefsInAlt(ref, outerAltNum);
-		}
-		else {
-		    refs = enclosingRule.getRuleRefsInAlt(ref, outerAltNum);
-		}
-		if ( refs!=null && refs.size()>1 ) {
-			ErrorManager.grammarError(ErrorManager.MSG_NONUNIQUE_REF,
-									  grammar,
-									  actionToken,
-									  ref);
-		}
-}
-
-/** For \$rulelabel.name, return the Attribute found for name.  It
- *  will be a predefined property or a return value.
- */
-public Attribute getRuleLabelAttribute(String ruleName, String attrName) {
-	Rule r = grammar.getRule(ruleName);
-	AttributeScope scope = r.getLocalAttributeScope(attrName);
-	if ( scope!=null && !scope.isParameterScope ) {
-		return scope.getAttribute(attrName);
-	}
-	return null;
-}
-
-AttributeScope resolveDynamicScope(String scopeName) {
-	if ( grammar.getGlobalScope(scopeName)!=null ) {
-		return grammar.getGlobalScope(scopeName);
-	}
-	Rule scopeRule = grammar.getRule(scopeName);
-	if ( scopeRule!=null ) {
-		return scopeRule.ruleScope;
-	}
-	return null; // not a valid dynamic scope
-}
-
-protected ST template(String name) {
-	ST st = generator.getTemplates().getInstanceOf(name);
-	chunks.add(st);
-	return st;
-}
-
-
-}
-
-/**	$x.y	x is enclosing rule, y is a return value, parameter, or
- * 			predefined property.
- *
- * 			r[int i] returns [int j]
- * 				:	{$r.i, $r.j, $r.start, $r.stop, $r.st, $r.tree}
- * 				;
- */
-SET_ENCLOSING_RULE_SCOPE_ATTR
-	:	'$' x=ID '.' y=ID WS? '=' expr=ATTR_VALUE_EXPR ';'
-							{enclosingRule!=null &&
-	                         $x.text.equals(enclosingRule.name) &&
-	                         enclosingRule.getLocalAttributeScope($y.text)!=null}?
-		//{System.out.println("found \$rule.attr");}
-		{
-		ST st = null;
-		AttributeScope scope = enclosingRule.getLocalAttributeScope($y.text);
-		if ( scope.isPredefinedRuleScope ) {
-			if ( $y.text.equals("st") || $y.text.equals("tree") ) {
-				st = template("ruleSetPropertyRef_"+$y.text);
-				grammar.referenceRuleLabelPredefinedAttribute($x.text);
-				st.add("scope", $x.text);
-				st.add("attr", $y.text);
-				st.add("expr", translateAction($expr.text));
-			} else {
-				ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
-										  grammar,
-										  actionToken,
-										  $x.text,
-										  $y.text);
-			}
-		}
-	    else if ( scope.isPredefinedLexerRuleScope ) {
-	    	// this is a better message to emit than the previous one...
-			ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
-									  grammar,
-									  actionToken,
-									  $x.text,
-									  $y.text);
-	    }
-		else if ( scope.isParameterScope ) {
-			st = template("parameterSetAttributeRef");
-			st.add("attr", scope.getAttribute($y.text));
-			st.add("expr", translateAction($expr.text));
-		}
-		else { // must be return value
-			st = template("returnSetAttributeRef");
-			st.add("ruleDescriptor", enclosingRule);
-			st.add("attr", scope.getAttribute($y.text));
-			st.add("expr", translateAction($expr.text));
-		}
-		}
-	;
-ENCLOSING_RULE_SCOPE_ATTR
-	:	'$' x=ID '.' y=ID	{enclosingRule!=null &&
-	                         $x.text.equals(enclosingRule.name) &&
-	                         enclosingRule.getLocalAttributeScope($y.text)!=null}?
-		//{System.out.println("found \$rule.attr");}
-		{
-		if ( isRuleRefInAlt($x.text)  ) {
-			ErrorManager.grammarError(ErrorManager.MSG_RULE_REF_AMBIG_WITH_RULE_IN_ALT,
-									  grammar,
-									  actionToken,
-									  $x.text);
-		}
-		ST st = null;
-		AttributeScope scope = enclosingRule.getLocalAttributeScope($y.text);
-		if ( scope.isPredefinedRuleScope ) {
-			st = template("rulePropertyRef_"+$y.text);
-			grammar.referenceRuleLabelPredefinedAttribute($x.text);
-			st.add("scope", $x.text);
-			st.add("attr", $y.text);
-		}
-	    else if ( scope.isPredefinedLexerRuleScope ) {
-	    	// perhaps not the most precise error message to use, but...
-			ErrorManager.grammarError(ErrorManager.MSG_RULE_HAS_NO_ARGS,
-									  grammar,
-									  actionToken,
-									  $x.text);
-	    }
-		else if ( scope.isParameterScope ) {
-			st = template("parameterAttributeRef");
-			st.add("attr", scope.getAttribute($y.text));
-		}
-		else { // must be return value
-			st = template("returnAttributeRef");
-			st.add("ruleDescriptor", enclosingRule);
-			st.add("attr", scope.getAttribute($y.text));
-		}
-		}
-	;
-
-/** Setting $tokenlabel.attr or $tokenref.attr where attr is predefined property of a token is an error. */
-SET_TOKEN_SCOPE_ATTR
-	:	'$' x=ID '.' y=ID WS? '='
-							 {enclosingRule!=null && input.LA(1)!='=' &&
-	                         (enclosingRule.getTokenLabel($x.text)!=null||
-	                          isTokenRefInAlt($x.text)) &&
-	                         AttributeScope.tokenScope.getAttribute($y.text)!=null}?
-		//{System.out.println("found \$tokenlabel.attr or \$tokenref.attr");}
-		{
-		ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
-								  grammar,
-								  actionToken,
-								  $x.text,
-								  $y.text);
-		}
-	;
-
-/** $tokenlabel.attr or $tokenref.attr where attr is predefined property of a token.
- *  If in lexer grammar, only translate for strings and tokens (rule refs)
- */
-TOKEN_SCOPE_ATTR
-	:	'$' x=ID '.' y=ID	{enclosingRule!=null &&
-	                         (enclosingRule.getTokenLabel($x.text)!=null||
-	                          isTokenRefInAlt($x.text)) &&
-	                         AttributeScope.tokenScope.getAttribute($y.text)!=null &&
-	                         (grammar.type!=Grammar.LEXER ||
-	                         getElementLabel($x.text).elementRef.token.getType()==ANTLRParser.TOKEN_REF ||
-	                         getElementLabel($x.text).elementRef.token.getType()==ANTLRParser.STRING_LITERAL)}?
-		// {System.out.println("found \$tokenlabel.attr or \$tokenref.attr");}
-		{
-		String label = $x.text;
-		if ( enclosingRule.getTokenLabel($x.text)==null ) {
-			// \$tokenref.attr  gotta get old label or compute new one
-			checkElementRefUniqueness($x.text, true);
-			label = enclosingRule.getElementLabel($x.text, outerAltNum, generator);
-			if ( label==null ) {
-				ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
-										  grammar,
-										  actionToken,
-										  "\$"+$x.text+"."+$y.text);
-				label = $x.text;
-			}
-		}
-		ST st = template("tokenLabelPropertyRef_"+$y.text);
-		st.add("scope", label);
-		st.add("attr", AttributeScope.tokenScope.getAttribute($y.text));
-		}
-	;
-
-/** Setting $rulelabel.attr or $ruleref.attr where attr is a predefined property is an error
- *  This must also fail, if we try to access a local attribute's field, like $tree.scope = localObject
- *  That must be handled by LOCAL_ATTR below. ANTLR only concerns itself with the top-level scope
- *  attributes declared in scope {} or parameters, return values and the like.
- */
-SET_RULE_SCOPE_ATTR
-@init {
-Grammar.LabelElementPair pair=null;
-String refdRuleName=null;
-}
-	:	'$' x=ID '.' y=ID WS? '=' {enclosingRule!=null && input.LA(1)!='='}?
-		{
-		pair = enclosingRule.getRuleLabel($x.text);
-		refdRuleName = $x.text;
-		if ( pair!=null ) {
-			refdRuleName = pair.referencedRuleName;
-		}
-		}
-		// supercomplicated because I can't exec the above action.
-		// This asserts that if it's a label or a ref to a rule proceed but only if the attribute
-		// is valid for that rule's scope
-		{(enclosingRule.getRuleLabel($x.text)!=null || isRuleRefInAlt($x.text)) &&
-	      getRuleLabelAttribute(enclosingRule.getRuleLabel($x.text)!=null?enclosingRule.getRuleLabel($x.text).referencedRuleName:$x.text,$y.text)!=null}?
-		//{System.out.println("found set \$rulelabel.attr or \$ruleref.attr: "+$x.text+"."+$y.text);}
-		{
-		ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
-								  grammar,
-								  actionToken,
-								  $x.text,
-								  $y.text);
-		}
-	;
-
-/** $rulelabel.attr or $ruleref.attr where attr is a predefined property*/
-RULE_SCOPE_ATTR
-@init {
-Grammar.LabelElementPair pair=null;
-String refdRuleName=null;
-}
-	:	'$' x=ID '.' y=ID {enclosingRule!=null}?
-		{
-		pair = enclosingRule.getRuleLabel($x.text);
-		refdRuleName = $x.text;
-		if ( pair!=null ) {
-			refdRuleName = pair.referencedRuleName;
-		}
-		}
-		// supercomplicated because I can't exec the above action.
-		// This asserts that if it's a label or a ref to a rule proceed but only if the attribute
-		// is valid for that rule's scope
-		{(enclosingRule.getRuleLabel($x.text)!=null || isRuleRefInAlt($x.text)) &&
-	      getRuleLabelAttribute(enclosingRule.getRuleLabel($x.text)!=null?enclosingRule.getRuleLabel($x.text).referencedRuleName:$x.text,$y.text)!=null}?
-		//{System.out.println("found \$rulelabel.attr or \$ruleref.attr: "+$x.text+"."+$y.text);}
-		{
-		String label = $x.text;
-		if ( pair==null ) {
-			// \$ruleref.attr  gotta get old label or compute new one
-			checkElementRefUniqueness($x.text, false);
-			label = enclosingRule.getElementLabel($x.text, outerAltNum, generator);
-			if ( label==null ) {
-				ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
-										  grammar,
-										  actionToken,
-										  "\$"+$x.text+"."+$y.text);
-				label = $x.text;
-			}
-		}
-		ST st;
-		Rule refdRule = grammar.getRule(refdRuleName);
-		AttributeScope scope = refdRule.getLocalAttributeScope($y.text);
-		if ( scope.isPredefinedRuleScope ) {
-			st = template("ruleLabelPropertyRef_"+$y.text);
-			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
-			st.add("scope", label);
-			st.add("attr", $y.text);
-		}
-		else if ( scope.isPredefinedLexerRuleScope ) {
-			st = template("lexerRuleLabelPropertyRef_"+$y.text);
-			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
-			st.add("scope", label);
-			st.add("attr", $y.text);
-		}
-		else if ( scope.isParameterScope ) {
-			// TODO: error!
-		}
-		else {
-			st = template("ruleLabelRef");
-			st.add("referencedRule", refdRule);
-			st.add("scope", label);
-			st.add("attr", scope.getAttribute($y.text));
-		}
-		}
-	;
-
-
-/** $label	either a token label or token/rule list label like label+=expr */
-LABEL_REF
-	:	'$' ID {enclosingRule!=null &&
-	            getElementLabel($ID.text)!=null &&
-		        enclosingRule.getRuleLabel($ID.text)==null}?
-		// {System.out.println("found \$label");}
-		{
-		ST st;
-		Grammar.LabelElementPair pair = getElementLabel($ID.text);
-		if ( pair.type==Grammar.RULE_LIST_LABEL ||
-             pair.type==Grammar.TOKEN_LIST_LABEL ||
-             pair.type==Grammar.WILDCARD_TREE_LIST_LABEL )
-        {
-			st = template("listLabelRef");
-		}
-		else {
-			st = template("tokenLabelRef");
-		}
-		st.add("label", $ID.text);
-		}
-	;
-
-/** $tokenref in a non-lexer grammar */
-ISOLATED_TOKEN_REF
-	:	'$' ID	{grammar.type!=Grammar.LEXER && enclosingRule!=null && isTokenRefInAlt($ID.text)}?
-		//{System.out.println("found \$tokenref");}
-		{
-		String label = enclosingRule.getElementLabel($ID.text, outerAltNum, generator);
-		checkElementRefUniqueness($ID.text, true);
-		if ( label==null ) {
-			ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
-									  grammar,
-									  actionToken,
-									  $ID.text);
-		}
-		else {
-			ST st = template("tokenLabelRef");
-			st.add("label", label);
-		}
-		}
-	;
-
-/** $lexerruleref from within the lexer */
-ISOLATED_LEXER_RULE_REF
-	:	'$' ID	{grammar.type==Grammar.LEXER &&
-	             enclosingRule!=null &&
-	             isRuleRefInAlt($ID.text)}?
-		//{System.out.println("found \$lexerruleref");}
-		{
-		String label = enclosingRule.getElementLabel($ID.text, outerAltNum, generator);
-		checkElementRefUniqueness($ID.text, false);
-		if ( label==null ) {
-			ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
-									  grammar,
-									  actionToken,
-									  $ID.text);
-		}
-		else {
-			ST st = template("lexerRuleLabel");
-			st.add("label", label);
-		}
-		}
-	;
-
-/**  $y 	return value, parameter, predefined rule property, or token/rule
- *          reference within enclosing rule's outermost alt.
- *          y must be a "local" reference; i.e., it must be referring to
- *          something defined within the enclosing rule.
- *
- * 			r[int i] returns [int j]
- * 				:	{$i, $j, $start, $stop, $st, $tree}
- *              ;
- *
- *	TODO: this might get the dynamic scope's elements too.!!!!!!!!!
- */
-SET_LOCAL_ATTR
-	:	'$' ID WS? '=' expr=ATTR_VALUE_EXPR ';' {enclosingRule!=null
-													&& enclosingRule.getLocalAttributeScope($ID.text)!=null
-													&& !enclosingRule.getLocalAttributeScope($ID.text).isPredefinedLexerRuleScope}?
-		//{System.out.println("found set \$localattr");}
-		{
-		ST st;
-		AttributeScope scope = enclosingRule.getLocalAttributeScope($ID.text);
-		if ( scope.isPredefinedRuleScope ) {
-			if ($ID.text.equals("tree") || $ID.text.equals("st")) {
-				st = template("ruleSetPropertyRef_"+$ID.text);
-				grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
-				st.add("scope", enclosingRule.name);
-				st.add("attr", $ID.text);
-				st.add("expr", translateAction($expr.text));
-			} else {
-				ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
-										 grammar,
-										 actionToken,
-										 $ID.text,
-										 "");
-			}
-		}
-		else if ( scope.isParameterScope ) {
-			st = template("parameterSetAttributeRef");
-			st.add("attr", scope.getAttribute($ID.text));
-			st.add("expr", translateAction($expr.text));
-		}
-		else {
-			st = template("returnSetAttributeRef");
-			st.add("ruleDescriptor", enclosingRule);
-			st.add("attr", scope.getAttribute($ID.text));
-			st.add("expr", translateAction($expr.text));
-			}
-		}
-	;
-LOCAL_ATTR
-	:	'$' ID {enclosingRule!=null && enclosingRule.getLocalAttributeScope($ID.text)!=null}?
-		//{System.out.println("found \$localattr");}
-		{
-		ST st;
-		AttributeScope scope = enclosingRule.getLocalAttributeScope($ID.text);
-		if ( scope.isPredefinedRuleScope ) {
-			st = template("rulePropertyRef_"+$ID.text);
-			grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
-			st.add("scope", enclosingRule.name);
-			st.add("attr", $ID.text);
-		}
-		else if ( scope.isPredefinedLexerRuleScope ) {
-			st = template("lexerRulePropertyRef_"+$ID.text);
-			st.add("scope", enclosingRule.name);
-			st.add("attr", $ID.text);
-		}
-		else if ( scope.isParameterScope ) {
-			st = template("parameterAttributeRef");
-			st.add("attr", scope.getAttribute($ID.text));
-		}
-		else {
-			st = template("returnAttributeRef");
-			st.add("ruleDescriptor", enclosingRule);
-			st.add("attr", scope.getAttribute($ID.text));
-		}
-		}
-	;
-
-/**	$x::y	the only way to access the attributes within a dynamic scope
- * 			regardless of whether or not you are in the defining rule.
- *
- * 			scope Symbols { List names; }
- * 			r
- * 			scope {int i;}
- * 			scope Symbols;
- * 				:	{$r::i=3;} s {$Symbols::names;}
- * 				;
- * 			s	:	{$r::i; $Symbols::names;}
- * 				;
- */
-SET_DYNAMIC_SCOPE_ATTR
-	:	'$' x=ID '::' y=ID WS? '=' expr=ATTR_VALUE_EXPR ';'
-						   {resolveDynamicScope($x.text)!=null &&
-						     resolveDynamicScope($x.text).getAttribute($y.text)!=null}?
-		//{System.out.println("found set \$scope::attr "+ $x.text + "::" + $y.text + " to " + $expr.text);}
-		{
-		AttributeScope scope = resolveDynamicScope($x.text);
-		if ( scope!=null ) {
-			ST st = template("scopeSetAttributeRef");
-			st.add("scope", $x.text);
-			st.add("attr",  scope.getAttribute($y.text));
-			st.add("expr",  translateAction($expr.text));
-		}
-		else {
-			// error: invalid dynamic attribute
-		}
-		}
-	;
-
-DYNAMIC_SCOPE_ATTR
-	:	'$' x=ID '::' y=ID
-						   {resolveDynamicScope($x.text)!=null &&
-						     resolveDynamicScope($x.text).getAttribute($y.text)!=null}?
-		//{System.out.println("found \$scope::attr "+ $x.text + "::" + $y.text);}
-		{
-		AttributeScope scope = resolveDynamicScope($x.text);
-		if ( scope!=null ) {
-			ST st = template("scopeAttributeRef");
-			st.add("scope", $x.text);
-			st.add("attr",  scope.getAttribute($y.text));
-		}
-		else {
-			// error: invalid dynamic attribute
-		}
-		}
-	;
-
-
-ERROR_SCOPED_XY
-	:	'$' x=ID '::' y=ID
-		{
-		chunks.add(getText());
-		generator.issueInvalidScopeError($x.text,$y.text,
-		                                 enclosingRule,actionToken,
-		                                 outerAltNum);		
-		}
-	;
-	
-/**		To access deeper (than top of stack) scopes, use the notation:
- *
- * 		$x[-1]::y previous (just under top of stack)
- * 		$x[-i]::y top of stack - i where the '-' MUST BE PRESENT;
- * 				  i.e., i cannot simply be negative without the '-' sign!
- * 		$x[i]::y  absolute index i (0..size-1)
- * 		$x[0]::y  is the absolute 0 indexed element (bottom of the stack)
- */
-DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR
-	:	'$' x=ID '[' '-' expr=SCOPE_INDEX_EXPR ']' '::' y=ID
-		// {System.out.println("found \$scope[-...]::attr");}
-		{
-		ST st = template("scopeAttributeRef");
-		st.add("scope",    $x.text);
-		st.add("attr",     resolveDynamicScope($x.text).getAttribute($y.text));
-		st.add("negIndex", $expr.text);
-		}		
-	;
-
-DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR
-	:	'$' x=ID '[' expr=SCOPE_INDEX_EXPR ']' '::' y=ID 
-		// {System.out.println("found \$scope[...]::attr");}
-		{
-		ST st = template("scopeAttributeRef");
-		st.add("scope", $x.text);
-		st.add("attr",  resolveDynamicScope($x.text).getAttribute($y.text));
-		st.add("index", $expr.text);
-		}		
-	;
-
-fragment
-SCOPE_INDEX_EXPR
-	:	(~']')+
-	;
-	
-/** $r		y is a rule's dynamic scope or a global shared scope.
- * 			Isolated $rulename is not allowed unless it has a dynamic scope *and*
- * 			there is no reference to rulename in the enclosing alternative,
- * 			which would be ambiguous.  See TestAttributes.testAmbiguousRuleRef()
- */
-ISOLATED_DYNAMIC_SCOPE
-	:	'$' ID {resolveDynamicScope($ID.text)!=null}?
-		// {System.out.println("found isolated \$scope where scope is a dynamic scope");}
-		{
-		ST st = template("isolatedDynamicScopeRef");
-		st.add("scope", $ID.text);
-		}		
-	;
-	
-// antlr.g then codegen.g does these first two currently.
-// don't want to duplicate that code.
-
-/** %foo(a={},b={},...) ctor */
-TEMPLATE_INSTANCE
-	:	'%' ID '(' ( WS? ARG (',' WS? ARG)* WS? )? ')'
-		// {System.out.println("found \%foo(args)");}
-		{
-		String action = getText().substring(1,getText().length());
-		String ruleName = "<outside-of-rule>";
-		if ( enclosingRule!=null ) {
-			ruleName = enclosingRule.name;
-		}
-		ST st =
-			generator.translateTemplateConstructor(ruleName,
-												   outerAltNum,
-												   actionToken,
-												   action);
-		if ( st!=null ) {
-			chunks.add(st);
-		}
-		}
-	;
-
-/** %({name-expr})(a={},...) indirect template ctor reference */
-INDIRECT_TEMPLATE_INSTANCE
-	:	'%' '(' ACTION ')' '(' ( WS? ARG (',' WS? ARG)* WS? )? ')'
-		// {System.out.println("found \%({...})(args)");}
-		{
-		String action = getText().substring(1,getText().length());
-		ST st =
-			generator.translateTemplateConstructor(enclosingRule.name,
-												   outerAltNum,
-												   actionToken,
-												   action);
-		chunks.add(st);
-		}
-	;
-
-fragment
-ARG	:	ID '=' ACTION
-	;
-
-/**	%{expr}.y = z; template attribute y of ST-typed expr to z */
-SET_EXPR_ATTRIBUTE
-	:	'%' a=ACTION '.' ID WS? '=' expr=ATTR_VALUE_EXPR ';'
-		// {System.out.println("found \%{expr}.y = z;");}
-		{
-		ST st = template("actionSetAttribute");
-		String action = $a.text;
-		action = action.substring(1,action.length()-1); // stuff inside {...}
-		st.add("st", translateAction(action));
-		st.add("attrName", $ID.text);
-		st.add("expr", translateAction($expr.text));
-		}
-	;
-	
-/*    %x.y = z; set template attribute y of x (always set never get attr)
- *              to z [languages like python without ';' must still use the
- *              ';' which the code generator is free to remove during code gen]
- */
-SET_ATTRIBUTE
-	:	'%' x=ID '.' y=ID WS? '=' expr=ATTR_VALUE_EXPR ';'
-		// {System.out.println("found \%x.y = z;");}
-		{
-		ST st = template("actionSetAttribute");
-		st.add("st", $x.text);
-		st.add("attrName", $y.text);
-		st.add("expr", translateAction($expr.text));
-		}
-	;
-
-/** Don't allow an = as first char to prevent $x == 3; kind of stuff. */
-fragment
-ATTR_VALUE_EXPR
-	:	~'=' (~';')*
-	;
-	
-/** %{string-expr} anonymous template from string expr */
-TEMPLATE_EXPR
-	:	'%' a=ACTION
-		// {System.out.println("found \%{expr}");}
-		{
-		ST st = template("actionStringConstructor");
-		String action = $a.text;
-		action = action.substring(1,action.length()-1); // stuff inside {...}
-		st.add("stringExpr", translateAction(action));
-		}
-	;
-	
-fragment
-ACTION
-	:	'{' (options {greedy=false;}:.)* '}'
-	;
-	
-ESC :   '\\' '$' {chunks.add("\$");}
-	|	'\\' '%' {chunks.add("\%");}
-	|	'\\' ~('$'|'%') {chunks.add(getText());}
-    ;       
-
-ERROR_XY
-	:	'$' x=ID '.' y=ID
-		{
-		chunks.add(getText());
-		generator.issueInvalidAttributeError($x.text,$y.text,
-		                                     enclosingRule,actionToken,
-		                                     outerAltNum);
-		}
-	;
-	
-ERROR_X
-	:	'$' x=ID
-		{
-		chunks.add(getText());
-		generator.issueInvalidAttributeError($x.text,
-		                                     enclosingRule,actionToken,
-		                                     outerAltNum);
-		}
-	;
-	
-UNKNOWN_SYNTAX
-	:	'$'
-		{
-		chunks.add(getText());
-		// shouldn't need an error here.  Just accept \$ if it doesn't look like anything
-		}
-	|	'%' (ID|'.'|'('|')'|','|'{'|'}'|'"')*
-		{
-		chunks.add(getText());
-		ErrorManager.grammarError(ErrorManager.MSG_INVALID_TEMPLATE_ACTION,
-								  grammar,
-								  actionToken,
-								  getText());
-		}
-	;
-
-TEXT:	~('$'|'%'|'\\')+ {chunks.add(getText());}
-	;
-	
-fragment
-ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
-    ;
-
-fragment
-INT :	'0'..'9'+
-	;
-
-fragment
-WS	:	(' '|'\t'|'\n'|'\r')+
-	;
diff --git a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/AssignTokenTypesWalker.g b/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/AssignTokenTypesWalker.g
deleted file mode 100644
index 4d35c64..0000000
--- a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/AssignTokenTypesWalker.g
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2011 Terence Parr
- All rights reserved.
-
- Grammar conversion to ANTLR v3:
- Copyright (c) 2011 Sam Harwell
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-	notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-	notice, this list of conditions and the following disclaimer in the
-	documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-	derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** [Warning: TJP says that this is probably out of date as of 11/19/2005,
- *   but since it's probably still useful, I'll leave in.  Don't have energy
- *   to update at the moment.]
- *
- *  Compute the token types for all literals and rules etc..  There are
- *  a few different cases to consider for grammar types and a few situations
- *  within.
- *
- *  CASE 1 : pure parser grammar
- *	a) Any reference to a token gets a token type.
- *  b) The tokens section may alias a token name to a string or char
- *
- *  CASE 2 : pure lexer grammar
- *  a) Import token vocabulary if available. Set token types for any new tokens
- *     to values above last imported token type
- *  b) token rule definitions get token types if not already defined
- *  c) literals do NOT get token types
- *
- *  CASE 3 : merged parser / lexer grammar
- *	a) Any char or string literal gets a token type in a parser rule
- *  b) Any reference to a token gets a token type if not referencing
- *     a fragment lexer rule
- *  c) The tokens section may alias a token name to a string or char
- *     which must add a rule to the lexer
- *  d) token rule definitions get token types if not already defined
- *  e) token rule definitions may also alias a token name to a literal.
- *     E.g., Rule 'FOR : "for";' will alias FOR to "for" in the sense that
- *     references to either in the parser grammar will yield the token type
- *
- *  What this pass does:
- *
- *  0. Collects basic info about the grammar like grammar name and type;
- *     Oh, I have go get the options in case they affect the token types.
- *     E.g., tokenVocab option.
- *     Imports any token vocab name/type pairs into a local hashtable.
- *  1. Finds a list of all literals and token names.
- *  2. Finds a list of all token name rule definitions;
- *     no token rules implies pure parser.
- *  3. Finds a list of all simple token rule defs of form "<NAME> : <literal>;"
- *     and aliases them.
- *  4. Walks token names table and assign types to any unassigned
- *  5. Walks aliases and assign types to referenced literals
- *  6. Walks literals, assigning types if untyped
- *  4. Informs the Grammar object of the type definitions such as:
- *     g.defineToken(<charliteral>, ttype);
- *     g.defineToken(<stringliteral>, ttype);
- *     g.defineToken(<tokenID>, ttype);
- *     where some of the ttype values will be the same for aliases tokens.
- */
-tree grammar AssignTokenTypesWalker;
-
-options
-{
-	tokenVocab = ANTLR;
-	ASTLabelType = GrammarAST;
-}
-
-@header {
-package org.antlr.grammar.v3;
-
-import java.util.*;
-import org.antlr.analysis.*;
-import org.antlr.misc.*;
-import org.antlr.tool.*;
-
-import org.antlr.runtime.BitSet;
-}
-
-@members {
-protected Grammar grammar;
-protected String currentRuleName;
-
-protected static GrammarAST stringAlias;
-protected static GrammarAST charAlias;
-protected static GrammarAST stringAlias2;
-protected static GrammarAST charAlias2;
-
-@Override
-public void reportError(RecognitionException ex)
-{
-    Token token = null;
-    if (ex instanceof MismatchedTokenException) {
-        token = ((MismatchedTokenException)ex).token;
-    } else if (ex instanceof NoViableAltException) {
-        token = ((NoViableAltException)ex).token;
-    }
-
-    ErrorManager.syntaxError(
-        ErrorManager.MSG_SYNTAX_ERROR,
-        grammar,
-        token,
-        "assign.types: " + ex.toString(),
-        ex);
-}
-
-protected void initASTPatterns()
-{
-    TreeAdaptor adaptor = new ANTLRParser.grammar_Adaptor(null);
-
-    /*
-     * stringAlias = ^(BLOCK[] ^(ALT[] STRING_LITERAL[] EOA[]) EOB[])
-     */
-    stringAlias = (GrammarAST)adaptor.create( BLOCK, "BLOCK" );
-    {
-        GrammarAST alt = (GrammarAST)adaptor.create( ALT, "ALT" );
-        adaptor.addChild( alt, adaptor.create( STRING_LITERAL, "STRING_LITERAL" ) );
-        adaptor.addChild( alt, adaptor.create( EOA, "EOA" ) );
-        adaptor.addChild( stringAlias, alt );
-    }
-    adaptor.addChild( stringAlias, adaptor.create( EOB, "EOB" ) );
-
-    /*
-     * charAlias = ^(BLOCK[] ^(ALT[] CHAR_LITERAL[] EOA[]) EOB[])
-     */
-    charAlias = (GrammarAST)adaptor.create( BLOCK, "BLOCK" );
-    {
-        GrammarAST alt = (GrammarAST)adaptor.create( ALT, "ALT" );
-        adaptor.addChild( alt, adaptor.create( CHAR_LITERAL, "CHAR_LITERAL" ) );
-        adaptor.addChild( alt, adaptor.create( EOA, "EOA" ) );
-        adaptor.addChild( charAlias, alt );
-    }
-    adaptor.addChild( charAlias, adaptor.create( EOB, "EOB" ) );
-
-    /*
-     * stringAlias2 = ^(BLOCK[] ^(ALT[] STRING_LITERAL[] ACTION[] EOA[]) EOB[])
-     */
-    stringAlias2 = (GrammarAST)adaptor.create( BLOCK, "BLOCK" );
-    {
-        GrammarAST alt = (GrammarAST)adaptor.create( ALT, "ALT" );
-        adaptor.addChild( alt, adaptor.create( STRING_LITERAL, "STRING_LITERAL" ) );
-        adaptor.addChild( alt, adaptor.create( ACTION, "ACTION" ) );
-        adaptor.addChild( alt, adaptor.create( EOA, "EOA" ) );
-        adaptor.addChild( stringAlias2, alt );
-    }
-    adaptor.addChild( stringAlias2, adaptor.create( EOB, "EOB" ) );
-
-    /*
-     * charAlias = ^(BLOCK[] ^(ALT[] CHAR_LITERAL[] ACTION[] EOA[]) EOB[])
-     */
-    charAlias2 = (GrammarAST)adaptor.create( BLOCK, "BLOCK" );
-    {
-        GrammarAST alt = (GrammarAST)adaptor.create( ALT, "ALT" );
-        adaptor.addChild( alt, adaptor.create( CHAR_LITERAL, "CHAR_LITERAL" ) );
-        adaptor.addChild( alt, adaptor.create( ACTION, "ACTION" ) );
-        adaptor.addChild( alt, adaptor.create( EOA, "EOA" ) );
-        adaptor.addChild( charAlias2, alt );
-    }
-    adaptor.addChild( charAlias2, adaptor.create( EOB, "EOB" ) );
-}
-
-// Behavior moved to AssignTokenTypesBehavior
-protected void trackString(GrammarAST t) {}
-protected void trackToken( GrammarAST t ) {}
-protected void trackTokenRule( GrammarAST t, GrammarAST modifier, GrammarAST block ) {}
-protected void alias( GrammarAST t, GrammarAST s ) {}
-public void defineTokens( Grammar root ) {}
-protected void defineStringLiteralsFromDelegates() {}
-protected void assignStringTypes( Grammar root ) {}
-protected void aliasTokenIDsAndLiterals( Grammar root ) {}
-protected void assignTokenIDTypes( Grammar root ) {}
-protected void defineTokenNamesAndLiteralsInGrammar( Grammar root ) {}
-protected void init( Grammar root ) {}
-}
-
-public
-grammar_[Grammar g]
-@init
-{
-	if ( state.backtracking == 0 )
-		init($g);
-}
-	:	(	^( LEXER_GRAMMAR 	  grammarSpec )
-		|	^( PARSER_GRAMMAR   grammarSpec )
-		|	^( TREE_GRAMMAR     grammarSpec )
-		|	^( COMBINED_GRAMMAR grammarSpec )
-		)
-	;
-
-grammarSpec
-	:	id=ID
-		(cmt=DOC_COMMENT)?
-		(optionsSpec)?
-		(delegateGrammars)?
-		(tokensSpec)?
-		(attrScope)*
-		( ^(AMPERSAND .*) )* // skip actions
-		rules
-	;
-
-attrScope
-	:	^( 'scope' ID ( ^(AMPERSAND .*) )* ACTION )
-	;
-
-optionsSpec returns [Map<Object, Object> opts = new HashMap<Object, Object>()]
-	:	^( OPTIONS (option[$opts])+ )
-	;
-
-option[Map<Object, Object> opts]
-	:	^( ASSIGN ID optionValue )
-		{
-			String key = $ID.text;
-			$opts.put(key, $optionValue.value);
-			// check for grammar-level option to import vocabulary
-			if ( currentRuleName==null && key.equals("tokenVocab") )
-			{
-				grammar.importTokenVocabulary($ID,(String)$optionValue.value);
-			}
-		}
-	;
-
-optionValue returns [Object value=null]
-@init
-{
-	if ( state.backtracking == 0 )
-		$value = $start.getText();
-}
-	:	ID
-	|	STRING_LITERAL
-	|	CHAR_LITERAL
-	|	INT
-		{$value = Integer.parseInt($INT.text);}
-//  |   cs=charSet       {$value = $cs;} // return set AST in this case
-	;
-
-charSet
-	:	^( CHARSET charSetElement )
-	;
-
-charSetElement
-	:	CHAR_LITERAL
-	|	^( OR CHAR_LITERAL CHAR_LITERAL )
-	|	^( RANGE CHAR_LITERAL CHAR_LITERAL )
-	;
-
-delegateGrammars
-	:	^(	'import'
-			(	^(ASSIGN ID ID)
-			|	ID
-			)+
-		)
-	;
-
-tokensSpec
-	:	^(TOKENS tokenSpec*)
-	;
-
-tokenSpec
-	:	t=TOKEN_REF            {trackToken($t);}
-	|	^(	ASSIGN
-			t2=TOKEN_REF       {trackToken($t2);}
-			( s=STRING_LITERAL {trackString($s); alias($t2,$s);}
-			| c=CHAR_LITERAL   {trackString($c); alias($t2,$c);}
-			)
-		)
-	;
-
-rules
-	:	rule+
-	;
-
-rule
-	:	^(RULE ruleBody)
-	|	^(PREC_RULE ruleBody)
-	;
-
-ruleBody
-	:	id=ID {currentRuleName=$id.text;}
-		(m=modifier)?
-		^(ARG (ARG_ACTION)?)
-		^(RET (ARG_ACTION)?)
-		(throwsSpec)?
-		(optionsSpec)?
-		(ruleScopeSpec)?
-		( ^(AMPERSAND .*) )*
-		b=block
-		(exceptionGroup)?
-		EOR
-		{trackTokenRule($id,$m.start,$b.start);}
-	;
-
-modifier
-	:	'protected'
-	|	'public'
-	|	'private'
-	|	'fragment'
-	;
-
-throwsSpec
-	:	^('throws' ID+)
-	;
-
-ruleScopeSpec
-	:	^( 'scope' ( ^(AMPERSAND .*) )* (ACTION)? ( ID )* )
-	;
-
-block
-	:	^(	BLOCK
-			(optionsSpec)?
-			( alternative rewrite )+
-			EOB
-		)
-	;
-
-alternative
-	:	^( ALT (element)+ EOA )
-	;
-
-exceptionGroup
-	:	( exceptionHandler )+ (finallyClause)?
-	|	finallyClause
-	;
-
-exceptionHandler
-	:	^('catch' ARG_ACTION ACTION)
-	;
-
-finallyClause
-	:	^('finally' ACTION)
-	;
-
-rewrite
-	:	^(REWRITES ( ^(REWRITE .*) )* )
-	|
-	;
-
-element
-	:	^(ROOT element)
-	|	^(BANG element)
-	|	atom
-	|	^(NOT element)
-	|	^(RANGE atom atom)
-	|	^(CHAR_RANGE atom atom)
-	|	^(ASSIGN ID element)
-	|	^(PLUS_ASSIGN ID element)
-	|	ebnf
-	|	tree_
-	|	^( SYNPRED block )
-	|	FORCED_ACTION
-	|	ACTION
-	|	SEMPRED
-	|	SYN_SEMPRED
-	|	^(BACKTRACK_SEMPRED .*)
-	|	GATED_SEMPRED
-	|	EPSILON
-	;
-
-ebnf
-	:	block
-	|	^( OPTIONAL block )
-	|	^( CLOSURE block )
-	|	^( POSITIVE_CLOSURE block )
-	;
-
-tree_
-	:	^(TREE_BEGIN element+)
-	;
-
-atom
-	:	^( RULE_REF (ARG_ACTION)? )
-	|	^( t=TOKEN_REF (ARG_ACTION )? ) {trackToken($t);}
-	|	c=CHAR_LITERAL   {trackString($c);}
-	|	s=STRING_LITERAL {trackString($s);}
-	|	WILDCARD
-	|	^(DOT ID atom) // scope override on rule
-	;
-
-ast_suffix
-	:	ROOT
-	|	BANG
-	;
diff --git a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/CodeGenTreeWalker.g b/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/CodeGenTreeWalker.g
deleted file mode 100644
index 7615833..0000000
--- a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/CodeGenTreeWalker.g
+++ /dev/null
@@ -1,1608 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2011 Terence Parr
- All rights reserved.
-
- Grammar conversion to ANTLR v3:
- Copyright (c) 2011 Sam Harwell
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-	notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-	notice, this list of conditions and the following disclaimer in the
-	documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-	derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Walk a grammar and generate code by gradually building up
- *  a bigger and bigger ST.
- *
- *  Terence Parr
- *  University of San Francisco
- *  June 15, 2004
- */
-tree grammar CodeGenTreeWalker;
-
-options {
-	tokenVocab = ANTLR;
-	ASTLabelType=GrammarAST;
-}
-
-@header {
-package org.antlr.grammar.v3;
-
-import org.antlr.analysis.*;
-import org.antlr.misc.*;
-import org.antlr.tool.*;
-import org.antlr.codegen.*;
-
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Collection;
-import org.antlr.runtime.BitSet;
-import org.antlr.runtime.DFA;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STGroup;
-}
-
-@members {
-protected static final int RULE_BLOCK_NESTING_LEVEL = 0;
-protected static final int OUTER_REWRITE_NESTING_LEVEL = 0;
-
-private String currentRuleName = null;
-protected int blockNestingLevel = 0;
-protected int rewriteBlockNestingLevel = 0;
-private int outerAltNum = 0;
-protected ST currentBlockST = null;
-protected boolean currentAltHasASTRewrite = false;
-protected int rewriteTreeNestingLevel = 0;
-protected HashSet<Object> rewriteRuleRefs = null;
-
-public String getCurrentRuleName() {
-    return currentRuleName;
-}
-
-public void setCurrentRuleName(String value) {
-    currentRuleName = value;
-}
-
-public int getOuterAltNum() {
-    return outerAltNum;
-}
-
-public void setOuterAltNum(int value) {
-    outerAltNum = value;
-}
-
-@Override
-public void reportError(RecognitionException ex) {
-    Token token = null;
-    if (ex instanceof MismatchedTokenException) {
-        token = ((MismatchedTokenException)ex).token;
-    } else if (ex instanceof NoViableAltException) {
-        token = ((NoViableAltException)ex).token;
-    }
-
-    ErrorManager.syntaxError(
-        ErrorManager.MSG_SYNTAX_ERROR,
-        grammar,
-        token,
-        "codegen: " + ex.toString(),
-        ex );
-}
-
-public final void reportError(String s) {
-    System.out.println("codegen: error: " + s);
-}
-
-protected CodeGenerator generator;
-protected Grammar grammar;
-protected STGroup templates;
-
-/** The overall lexer/parser template; simulate dynamically scoped
- *  attributes by making this an instance var of the walker.
- */
-protected ST recognizerST;
-
-protected ST outputFileST;
-protected ST headerFileST;
-
-protected String outputOption = "";
-
-protected final ST getWildcardST(GrammarAST elementAST, GrammarAST ast_suffix, String label) {
-    String name = "wildcard";
-    if (grammar.type == Grammar.LEXER) {
-        name = "wildcardChar";
-    }
-    return getTokenElementST(name, name, elementAST, ast_suffix, label);
-}
-
-protected final ST getRuleElementST( String name,
-                                          String ruleTargetName,
-                                          GrammarAST elementAST,
-                                          GrammarAST ast_suffix,
-                                          String label ) {
-	Rule r = grammar.getRule( currentRuleName );
-	String suffix = getSTSuffix(elementAST, ast_suffix, label);
-	if ( !r.isSynPred ) {
-		name += suffix;
-	}
-	// if we're building trees and there is no label, gen a label
-	// unless we're in a synpred rule.
-	if ( ( grammar.buildAST() || suffix.length() > 0 ) && label == null &&
-		 ( r == null || !r.isSynPred ) ) {
-		// we will need a label to do the AST or tracking, make one
-		label = generator.createUniqueLabel( ruleTargetName );
-		CommonToken labelTok = new CommonToken( ANTLRParser.ID, label );
-		grammar.defineRuleRefLabel( currentRuleName, labelTok, elementAST );
-	}
-
-	ST elementST = templates.getInstanceOf( name );
-	if ( label != null ) {
-		elementST.add( "label", label );
-	}
-
-
-	return elementST;
-}
-
-protected final ST getTokenElementST( String name,
-                                           String elementName,
-                                           GrammarAST elementAST,
-                                           GrammarAST ast_suffix,
-                                           String label ) {
-    boolean tryUnchecked = false;
-    if (name == "matchSet" && elementAST.enclosingRuleName != null && elementAST.enclosingRuleName.length() > 0 && Rule.getRuleType(elementAST.enclosingRuleName) == Grammar.LEXER)
-    {
-        if ( ( elementAST.getParent().getType() == ANTLRLexer.ALT && elementAST.getParent().getParent().getParent().getType() == RULE && elementAST.getParent().getParent().getChildCount() == 2 )
-            || ( elementAST.getParent().getType() == ANTLRLexer.NOT && elementAST.getParent().getParent().getParent().getParent().getType() == RULE && elementAST.getParent().getParent().getParent().getChildCount() == 2 ) ) {
-            // single alt at the start of the rule needs to be checked
-        } else {
-            tryUnchecked = true;
-        }
-    }
-
-    String suffix = getSTSuffix( elementAST, ast_suffix, label );
-    // if we're building trees and there is no label, gen a label
-    // unless we're in a synpred rule.
-    Rule r = grammar.getRule( currentRuleName );
-    if ( ( grammar.buildAST() || suffix.length() > 0 ) && label == null &&
-         ( r == null || !r.isSynPred ) )
-    {
-        label = generator.createUniqueLabel( elementName );
-        CommonToken labelTok = new CommonToken( ANTLRParser.ID, label );
-        grammar.defineTokenRefLabel( currentRuleName, labelTok, elementAST );
-    }
-
-    ST elementST = null;
-    if ( tryUnchecked && templates.isDefined( name + "Unchecked" + suffix ) )
-        elementST = templates.getInstanceOf( name + "Unchecked" + suffix );
-    if ( elementST == null )
-        elementST = templates.getInstanceOf( name + suffix );
-
-    if ( label != null )
-    {
-        elementST.add( "label", label );
-    }
-    return elementST;
-}
-
-public final boolean isListLabel(String label) {
-    boolean hasListLabel = false;
-    if ( label != null ) {
-        Rule r = grammar.getRule( currentRuleName );
-        //String stName = null;
-        if ( r != null )
-        {
-            Grammar.LabelElementPair pair = r.getLabel( label );
-            if ( pair != null &&
-                 ( pair.type == Grammar.TOKEN_LIST_LABEL ||
-                  pair.type == Grammar.RULE_LIST_LABEL ||
-                  pair.type == Grammar.WILDCARD_TREE_LIST_LABEL ) )
-            {
-                hasListLabel = true;
-            }
-        }
-    }
-    return hasListLabel;
-}
-
-/** Return a non-empty template name suffix if the token is to be
- *  tracked, added to a tree, or both.
- */
-protected final String getSTSuffix(GrammarAST elementAST, GrammarAST ast_suffix, String label) {
-    if ( grammar.type == Grammar.LEXER )
-    {
-        return "";
-    }
-    // handle list label stuff; make element use "Track"
-
-    String operatorPart = "";
-    String rewritePart = "";
-    String listLabelPart = "";
-    Rule ruleDescr = grammar.getRule( currentRuleName );
-    if ( ast_suffix != null && !ruleDescr.isSynPred )
-    {
-        if ( ast_suffix.getType() == ANTLRParser.ROOT )
-        {
-            operatorPart = "RuleRoot";
-        }
-        else if ( ast_suffix.getType() == ANTLRParser.BANG )
-        {
-            operatorPart = "Bang";
-        }
-    }
-    if ( currentAltHasASTRewrite && elementAST.getType() != WILDCARD )
-    {
-        rewritePart = "Track";
-    }
-    if ( isListLabel( label ) )
-    {
-        listLabelPart = "AndListLabel";
-    }
-    String STsuffix = operatorPart + rewritePart + listLabelPart;
-    //JSystem.@out.println("suffix = "+STsuffix);
-
-    return STsuffix;
-}
-
-/** Convert rewrite AST lists to target labels list */
-protected final List<String> getTokenTypesAsTargetLabels(Collection<GrammarAST> refs)
-{
-    if ( refs == null || refs.size() == 0 )
-        return null;
-
-    List<String> labels = new ArrayList<String>( refs.size() );
-    for ( GrammarAST t : refs )
-    {
-        String label;
-        if ( t.getType() == ANTLRParser.RULE_REF || t.getType() == ANTLRParser.TOKEN_REF || t.getType() == ANTLRParser.LABEL)
-        {
-            label = t.getText();
-        }
-        else
-        {
-            // must be char or String literal
-            label = generator.getTokenTypeAsTargetLabel(grammar.getTokenType(t.getText()));
-        }
-        labels.add( label );
-    }
-    return labels;
-}
-
-public final void init( Grammar g ) {
-    this.grammar = g;
-    this.generator = grammar.getCodeGenerator();
-    this.templates = generator.getTemplates();
-}
-}
-
-public
-grammar_[Grammar g,
-		ST recognizerST,
-		ST outputFileST,
-		ST headerFileST]
-@init
-{
-	if ( state.backtracking == 0 )
-	{
-		init(g);
-		this.recognizerST = recognizerST;
-		this.outputFileST = outputFileST;
-		this.headerFileST = headerFileST;
-		String superClass = (String)g.getOption("superClass");
-		outputOption = (String)g.getOption("output");
-		if ( superClass!=null ) recognizerST.add("superClass", superClass);
-		if ( g.type!=Grammar.LEXER ) {
-		    Object lt = g.getOption("ASTLabelType");
-			if ( lt!=null ) recognizerST.add("ASTLabelType", lt);
-		}
-		if ( g.type==Grammar.TREE_PARSER && g.getOption("ASTLabelType")==null ) {
-			ErrorManager.grammarWarning(ErrorManager.MSG_MISSING_AST_TYPE_IN_TREE_GRAMMAR,
-									   g,
-									   null,
-									   g.name);
-		}
-		if ( g.type!=Grammar.TREE_PARSER ) {
-		    Object lt = g.getOption("TokenLabelType");
-			if ( lt!=null ) recognizerST.add("labelType", lt);
-		}
-		$recognizerST.add("numRules", grammar.getRules().size());
-		$outputFileST.add("numRules", grammar.getRules().size());
-		$headerFileST.add("numRules", grammar.getRules().size());
-	}
-}
-	:	(	^( LEXER_GRAMMAR grammarSpec )
-		|	^( PARSER_GRAMMAR grammarSpec )
-		|	^( TREE_GRAMMAR grammarSpec )
-		|	^( COMBINED_GRAMMAR grammarSpec )
-		)
-	;
-
-attrScope
-	:	^( 'scope' ID ( ^(AMPERSAND .*) )* ACTION )
-	;
-
-grammarSpec
-	:   name=ID
-		(	cmt=DOC_COMMENT
-			{
-				outputFileST.add("docComment", $cmt.text);
-				headerFileST.add("docComment", $cmt.text);
-			}
-		)?
-		{
-			recognizerST.add("name", grammar.getRecognizerName());
-			outputFileST.add("name", grammar.getRecognizerName());
-			headerFileST.add("name", grammar.getRecognizerName());
-			recognizerST.add("scopes", grammar.getGlobalScopes());
-			headerFileST.add("scopes", grammar.getGlobalScopes());
-		}
-		( ^(OPTIONS .*) )?
-		( ^(IMPORT .*) )?
-		( ^(TOKENS .*) )?
-		(attrScope)*
-		( ^(AMPERSAND .*) )*
-		rules[recognizerST]
-	;
-
-rules[ST recognizerST]
-@init
-{
-	String ruleName = ((GrammarAST)input.LT(1)).getChild(0).getText();
-	boolean generated = grammar.generateMethodForRule(ruleName);
-}
-	:	(	(	options {k=1;} :
-				{generated}? =>
-				rST=rule
-				{
-					if ( $rST.code != null )
-					{
-						recognizerST.add("rules", $rST.code);
-						outputFileST.add("rules", $rST.code);
-						headerFileST.add("rules", $rST.code);
-					}
-				}
-			|	^(RULE .*)
-			|	^(PREC_RULE .*) // ignore
-			)
-			{{
-				if ( input.LA(1) == RULE )
-				{
-					ruleName = ((GrammarAST)input.LT(1)).getChild(0).getText();
-					//System.Diagnostics.Debug.Assert( ruleName == ((GrammarAST)input.LT(1)).enclosingRuleName );
-					generated = grammar.generateMethodForRule(ruleName);
-				}
-			}}
-		)+
-	;
-
-rule returns [ST code=null]
-@init
-{
-	String initAction = null;
-	// get the dfa for the BLOCK
-	GrammarAST block2=(GrammarAST)$start.getFirstChildWithType(BLOCK);
-	org.antlr.analysis.DFA dfa = block2.getLookaheadDFA();
-	// init blockNestingLevel so it's block level RULE_BLOCK_NESTING_LEVEL
-	// for alts of rule
-	blockNestingLevel = RULE_BLOCK_NESTING_LEVEL-1;
-	Rule ruleDescr = grammar.getRule($start.getChild(0).getText());
-	currentRuleName = $start.getChild(0).getText();
-
-	// For syn preds, we don't want any AST code etc... in there.
-	// Save old templates ptr and restore later.  Base templates include Dbg.
-	STGroup saveGroup = templates;
-	if ( ruleDescr.isSynPred )
-	{
-		templates = generator.getBaseTemplates();
-	}
-
-	String description = "";
-}
-	:	^(	RULE id=ID
-			{assert currentRuleName == $id.text;}
-			(mod=modifier)?
-			^(ARG (ARG_ACTION)?)
-			^(RET (ARG_ACTION)?)
-			(throwsSpec)?
-			( ^(OPTIONS .*) )?
-			(ruleScopeSpec)?
-			( ^(AMPERSAND .*) )*
-			b=block["ruleBlock", dfa]
-			{
-				description =
-					grammar.grammarTreeToString((GrammarAST)$start.getFirstChildWithType(BLOCK),
-												false);
-				description =
-					generator.target.getTargetStringLiteralFromString(description);
-				$b.code.add("description", description);
-				// do not generate lexer rules in combined grammar
-				String stName = null;
-				if ( ruleDescr.isSynPred )
-				{
-					stName = "synpredRule";
-				}
-				else if ( grammar.type==Grammar.LEXER )
-				{
-					if ( currentRuleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) )
-					{
-						stName = "tokensRule";
-					}
-					else
-					{
-						stName = "lexerRule";
-					}
-				}
-				else
-				{
-					if ( !(grammar.type==Grammar.COMBINED &&
-						 Rule.getRuleType(currentRuleName) == Grammar.LEXER) )
-					{
-						stName = "rule";
-					}
-				}
-				$code = templates.getInstanceOf(stName);
-				if ( $code.getName().equals("/rule") )
-				{
-					$code.add("emptyRule", grammar.isEmptyRule(block2));
-				}
-				$code.add("ruleDescriptor", ruleDescr);
-				String memo = (String)grammar.getBlockOption($start,"memoize");
-				if ( memo==null )
-				{
-					memo = (String)grammar.getOption("memoize");
-				}
-				if ( memo!=null && memo.equals("true") &&
-					 (stName.equals("rule")||stName.equals("lexerRule")) )
-				{
-					$code.add("memoize", memo!=null && memo.equals("true"));
-				}
-			}
-
-			(exceptionGroup[$code])?
-			EOR
-		)
-		{
-			if ( $code!=null )
-			{
-				if ( grammar.type==Grammar.LEXER )
-				{
-					boolean naked =
-						currentRuleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ||
-						($mod.start!=null&&$mod.start.getText().equals(Grammar.FRAGMENT_RULE_MODIFIER));
-					$code.add("nakedBlock", naked);
-				}
-				else
-				{
-					description = grammar.grammarTreeToString($start,false);
-					description = generator.target.getTargetStringLiteralFromString(description);
-					$code.add("description", description);
-				}
-				Rule theRule = grammar.getRule(currentRuleName);
-				generator.translateActionAttributeReferencesForSingleScope(
-					theRule,
-					theRule.getActions()
-				);
-				$code.add("ruleName", currentRuleName);
-				$code.add("block", $b.code);
-				if ( initAction!=null )
-				{
-					$code.add("initAction", initAction);
-				}
-			}
-		}
-	;
-finally { templates = saveGroup; }
-
-modifier
-	:	'protected'
-	|	'public'
-	|	'private'
-	|	'fragment'
-	;
-
-throwsSpec
-	:	^('throws' ID+)
-	;
-
-ruleScopeSpec
-	:	^( 'scope' ( ^(AMPERSAND .*) )* (ACTION)? ( ID )* )
-	;
-
-block[String blockTemplateName, org.antlr.analysis.DFA dfa]
-	 returns [ST code=null]
-options { k=1; }
-@init
-{
-	int altNum = 0;
-
-	blockNestingLevel++;
-	if ( state.backtracking == 0 )
-	{
-		ST decision = null;
-		if ( $dfa != null )
-		{
-			$code = templates.getInstanceOf($blockTemplateName);
-			decision = generator.genLookaheadDecision(recognizerST,$dfa);
-			$code.add("decision", decision);
-			$code.add("decisionNumber", $dfa.getDecisionNumber());
-			$code.add("maxK",$dfa.getMaxLookaheadDepth());
-			$code.add("maxAlt",$dfa.getNumberOfAlts());
-		}
-		else
-		{
-			$code = templates.getInstanceOf($blockTemplateName+"SingleAlt");
-		}
-		$code.add("blockLevel", blockNestingLevel);
-		$code.add("enclosingBlockLevel", blockNestingLevel-1);
-		altNum = 1;
-		if ( this.blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
-			this.outerAltNum=1;
-		}
-	}
-}
-	:	{$start.getSetValue()!=null}? => setBlock
-		{
-			$code.add("alts",$setBlock.code);
-		}
-
-	|	^(  BLOCK
-			( ^(OPTIONS .*) )? // ignore
-			( alt=alternative rew=rewrite
-				{
-					if ( this.blockNestingLevel==RULE_BLOCK_NESTING_LEVEL )
-					{
-						this.outerAltNum++;
-					}
-					// add the rewrite code as just another element in the alt :)
-					// (unless it's a " -> ..." rewrite
-					// ( -> ... )
-					GrammarAST firstRewriteAST = (GrammarAST)$rew.start.findFirstType(REWRITE);
-					boolean etc =
-						$rew.start.getType()==REWRITES &&
-						firstRewriteAST.getChild(0)!=null &&
-						firstRewriteAST.getChild(0).getType()==ETC;
-					if ( $rew.code!=null && !etc )
-					{
-						$alt.code.add("rew", $rew.code);
-					}
-					// add this alt to the list of alts for this block
-					$code.add("alts",$alt.code);
-					$alt.code.add("altNum", altNum);
-					$alt.code.add("outerAlt", blockNestingLevel==RULE_BLOCK_NESTING_LEVEL);
-					altNum++;
-				}
-			)+
-			EOB
-		 )
-	;
-finally { blockNestingLevel--; }
-
-setBlock returns [ST code=null]
-@init
-{
-	ST setcode = null;
-	if ( state.backtracking == 0 )
-	{
-		if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() )
-		{
-			Rule r = grammar.getRule(currentRuleName);
-			currentAltHasASTRewrite = r.hasRewrite(outerAltNum);
-			if ( currentAltHasASTRewrite )
-			{
-				r.trackTokenReferenceInAlt($start, outerAltNum);
-			}
-		}
-	}
-}
-	:	^(s=BLOCK .*)
-		{
-			int i = ((CommonToken)$s.getToken()).getTokenIndex();
-			if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL )
-			{
-				setcode = getTokenElementST("matchRuleBlockSet", "set", $s, null, null);
-			}
-			else
-			{
-				setcode = getTokenElementST("matchSet", "set", $s, null, null);
-			}
-			setcode.add("elementIndex", i);
-			//if ( grammar.type!=Grammar.LEXER )
-			//{
-			//	generator.generateLocalFOLLOW($s,"set",currentRuleName,i);
-			//}
-			setcode.add("s",
-				generator.genSetExpr(templates,$s.getSetValue(),1,false));
-			ST altcode=templates.getInstanceOf("alt");
-			altcode.addAggr("elements.{el,line,pos}",
-								 setcode,
-								 $s.getLine(),
-								 $s.getCharPositionInLine() + 1
-								);
-			altcode.add("altNum", 1);
-			altcode.add("outerAlt", blockNestingLevel==RULE_BLOCK_NESTING_LEVEL);
-			if ( !currentAltHasASTRewrite && grammar.buildAST() )
-			{
-				altcode.add("autoAST", true);
-			}
-			altcode.add("treeLevel", rewriteTreeNestingLevel);
-			$code = altcode;
-		}
-	;
-
-setAlternative
-	:	^(ALT setElement+ EOA)
-	;
-
-exceptionGroup[ST ruleST]
-	:	( exceptionHandler[$ruleST] )+ (finallyClause[$ruleST])?
-	|	finallyClause[$ruleST]
-	;
-
-exceptionHandler[ST ruleST]
-	:	^('catch' ARG_ACTION ACTION)
-		{
-			List chunks = generator.translateAction(currentRuleName,$ACTION);
-			$ruleST.addAggr("exceptions.{decl,action}",$ARG_ACTION.text,chunks);
-		}
-	;
-
-finallyClause[ST ruleST]
-	:	^('finally' ACTION)
-		{
-			List chunks = generator.translateAction(currentRuleName,$ACTION);
-			$ruleST.add("finally",chunks);
-		}
-	;
-
-alternative returns [ST code]
-@init
-{
-	if ( state.backtracking == 0 )
-	{
-		$code = templates.getInstanceOf("alt");
-		if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() )
-		{
-			Rule r = grammar.getRule(currentRuleName);
-			currentAltHasASTRewrite = r.hasRewrite(outerAltNum);
-		}
-		String description = grammar.grammarTreeToString($start, false);
-		description = generator.target.getTargetStringLiteralFromString(description);
-		$code.add("description", description);
-		$code.add("treeLevel", rewriteTreeNestingLevel);
-		if ( !currentAltHasASTRewrite && grammar.buildAST() )
-		{
-			$code.add("autoAST", true);
-		}
-	}
-}
-	:	^(	a=ALT
-			(
-				e=element[null,null]
-				{
-					if (e != null && e.code != null)
-					{
-						$code.addAggr("elements.{el,line,pos}",
-										  $e.code,
-										  $e.start.getLine(),
-										  $e.start.getCharPositionInLine() + 1
-										 );
-					}
-				}
-			)+
-			EOA
-		)
-	;
-
-element[GrammarAST label, GrammarAST astSuffix] returns [ST code=null]
-options { k=1; }
-@init
-{
-	IntSet elements=null;
-	GrammarAST ast = null;
-}
-	:	^(ROOT e=element[label,$ROOT])
-		{ $code = $e.code; }
-
-	|	^(BANG e=element[label,$BANG])
-		{ $code = $e.code; }
-
-	|	^( n=NOT notElement[$n, $label, $astSuffix] )
-		{ $code = $notElement.code; }
-
-	|	^( ASSIGN alabel=ID e=element[$alabel,$astSuffix] )
-		{ $code = $e.code; }
-
-	|	^( PLUS_ASSIGN label2=ID e=element[$label2,$astSuffix] )
-		{ $code = $e.code; }
-
-	|	^(CHAR_RANGE a=CHAR_LITERAL b=CHAR_LITERAL)
-		{
-			$code = templates.getInstanceOf("charRangeRef");
-			String low = generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,$a.text);
-			String high = generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,$b.text);
-			$code.add("a", low);
-			$code.add("b", high);
-			if ( label!=null )
-			{
-				$code.add("label", $label.getText());
-			}
-		}
-
-	|	({((GrammarAST)input.LT(1)).getSetValue()==null}? (BLOCK|OPTIONAL|CLOSURE|POSITIVE_CLOSURE)) => /*{$start.getSetValue()==null}?*/ ebnf
-		{ $code = $ebnf.code; }
-
-	|	atom[null, $label, $astSuffix]
-		{ $code = $atom.code; }
-
-	|	tree_
-		{ $code = $tree_.code; }
-
-	|	element_action
-		{ $code = $element_action.code; }
-
-	|   (sp=SEMPRED|sp=GATED_SEMPRED)
-		{
-			$code = templates.getInstanceOf("validateSemanticPredicate");
-			$code.add("pred", generator.translateAction(currentRuleName,$sp));
-			String description = generator.target.getTargetStringLiteralFromString($sp.text);
-			$code.add("description", description);
-		}
-
-	|	SYN_SEMPRED // used only in lookahead; don't generate validating pred
-
-	|	^(SYNPRED .*)
-
-	|	^(BACKTRACK_SEMPRED .*)
-
-	|   EPSILON
-	;
-
-element_action returns [ST code=null]
-	:	act=ACTION
-		{
-			$code = templates.getInstanceOf("execAction");
-			$code.add("action", generator.translateAction(currentRuleName,$act));
-		}
-	|	act2=FORCED_ACTION
-		{
-			$code = templates.getInstanceOf("execForcedAction");
-			$code.add("action", generator.translateAction(currentRuleName,$act2));
-		}
-	;
-
-notElement[GrammarAST n, GrammarAST label, GrammarAST astSuffix] returns [ST code=null]
-@init
-{
-	IntSet elements=null;
-	String labelText = null;
-	if ( label!=null )
-	{
-		labelText = label.getText();
-	}
-}
-	:	(	assign_c=CHAR_LITERAL
-			{
-				int ttype=0;
-				if ( grammar.type==Grammar.LEXER )
-				{
-					ttype = Grammar.getCharValueFromGrammarCharLiteral($assign_c.text);
-				}
-				else
-				{
-					ttype = grammar.getTokenType($assign_c.text);
-				}
-				elements = grammar.complement(ttype);
-			}
-		|	assign_s=STRING_LITERAL
-			{
-				int ttype=0;
-				if ( grammar.type==Grammar.LEXER )
-				{
-					// TODO: error!
-				}
-				else
-				{
-					ttype = grammar.getTokenType($assign_s.text);
-				}
-				elements = grammar.complement(ttype);
-			}
-		|	assign_t=TOKEN_REF
-			{
-				int ttype = grammar.getTokenType($assign_t.text);
-				elements = grammar.complement(ttype);
-			}
-		|	^(assign_st=BLOCK .*)
-			{
-				elements = $assign_st.getSetValue();
-				elements = grammar.complement(elements);
-			}
-		)
-		{
-			$code = getTokenElementST("matchSet",
-									 "set",
-									 (GrammarAST)$n.getChild(0),
-									 astSuffix,
-									 labelText);
-			$code.add("s",generator.genSetExpr(templates,elements,1,false));
-			int i = ((CommonToken)n.getToken()).getTokenIndex();
-			$code.add("elementIndex", i);
-			if ( grammar.type!=Grammar.LEXER )
-			{
-				generator.generateLocalFOLLOW(n,"set",currentRuleName,i);
-			}
-		}
-	;
-
-ebnf returns [ST code=null]
-@init
-{
-	org.antlr.analysis.DFA dfa=null;
-	GrammarAST b = (GrammarAST)$start.getChild(0);
-	GrammarAST eob = (GrammarAST)b.getLastChild(); // loops will use EOB DFA
-}
-	:	(	{ dfa = $start.getLookaheadDFA(); }
-			blk=block["block", dfa]
-			{ $code = $blk.code; }
-		|	{ dfa = $start.getLookaheadDFA(); }
-			^( OPTIONAL blk=block["optionalBlock", dfa] )
-			{ $code = $blk.code; }
-		|	{ dfa = eob.getLookaheadDFA(); }
-			^( CLOSURE blk=block["closureBlock", dfa] )
-			{ $code = $blk.code; }
-		|	{ dfa = eob.getLookaheadDFA(); }
-			^( POSITIVE_CLOSURE blk=block["positiveClosureBlock", dfa] )
-			{ $code = $blk.code; }
-		)
-		{
-			String description = grammar.grammarTreeToString($start, false);
-			description = generator.target.getTargetStringLiteralFromString(description);
-			$code.add("description", description);
-		}
-	;
-
-tree_ returns [ST code]
-@init
-{
-	rewriteTreeNestingLevel++;
-	GrammarAST rootSuffix = null;
-	if ( state.backtracking == 0 )
-	{
-		$code = templates.getInstanceOf("tree");
-		NFAState afterDOWN = (NFAState)$start.NFATreeDownState.transition(0).target;
-		LookaheadSet s = grammar.LOOK(afterDOWN);
-		if ( s.member(Label.UP) ) {
-			// nullable child list if we can see the UP as the next token
-			// we need an "if ( input.LA(1)==Token.DOWN )" gate around
-			// the child list.
-			$code.add("nullableChildList", "true");
-		}
-		$code.add("enclosingTreeLevel", rewriteTreeNestingLevel-1);
-		$code.add("treeLevel", rewriteTreeNestingLevel);
-		Rule r = grammar.getRule(currentRuleName);
-		if ( grammar.buildAST() && !r.hasRewrite(outerAltNum) ) {
-			rootSuffix = new GrammarAST(ROOT,"ROOT");
-		}
-	}
-}
-	:	^(	TREE_BEGIN
-			el=element[null,rootSuffix]
-			{
-				$code.addAggr("root.{el,line,pos}",
-								  $el.code,
-								  $el.start.getLine(),
-								  $el.start.getCharPositionInLine() + 1
-								  );
-			}
-			// push all the immediately-following actions out before children
-			// so actions aren't guarded by the "if (input.LA(1)==Token.DOWN)"
-			// guard in generated code.
-			(	(element_action) =>
-				act=element_action
-				{
-					$code.addAggr("actionsAfterRoot.{el,line,pos}",
-									  $act.code,
-									  $act.start.getLine(),
-									  $act.start.getCharPositionInLine() + 1
-									);
-				}
-			)*
-			(	 el=element[null,null]
-				 {
-				 $code.addAggr("children.{el,line,pos}",
-								  $el.code,
-								  $el.start.getLine(),
-								  $el.start.getCharPositionInLine() + 1
-								  );
-				 }
-			)*
-		)
-	;
-finally { rewriteTreeNestingLevel--; }
-
-atom[GrammarAST scope, GrammarAST label, GrammarAST astSuffix]
-	returns [ST code=null]
-@init
-{
-	String labelText=null;
-	if ( state.backtracking == 0 )
-	{
-		if ( label!=null )
-		{
-			labelText = label.getText();
-		}
-		if ( grammar.type!=Grammar.LEXER &&
-			 ($start.getType()==RULE_REF||$start.getType()==TOKEN_REF||
-			  $start.getType()==CHAR_LITERAL||$start.getType()==STRING_LITERAL) )
-		{
-			Rule encRule = grammar.getRule(((GrammarAST)$start).enclosingRuleName);
-			if ( encRule!=null && encRule.hasRewrite(outerAltNum) && astSuffix!=null )
-			{
-				ErrorManager.grammarError(ErrorManager.MSG_AST_OP_IN_ALT_WITH_REWRITE,
-										  grammar,
-										  ((GrammarAST)$start).getToken(),
-										  ((GrammarAST)$start).enclosingRuleName,
-										  outerAltNum);
-				astSuffix = null;
-			}
-		}
-	}
-}
-	:   ^( r=RULE_REF (rarg=ARG_ACTION)? )
-		{
-			grammar.checkRuleReference(scope, $r, $rarg, currentRuleName);
-			String scopeName = null;
-			if ( scope!=null ) {
-				scopeName = scope.getText();
-			}
-			Rule rdef = grammar.getRule(scopeName, $r.text);
-			// don't insert label=r() if $label.attr not used, no ret value, ...
-			if ( !rdef.getHasReturnValue() ) {
-				labelText = null;
-			}
-			$code = getRuleElementST("ruleRef", $r.text, $r, astSuffix, labelText);
-			$code.add("rule", rdef);
-			if ( scope!=null ) { // scoped rule ref
-				Grammar scopeG = grammar.composite.getGrammar(scope.getText());
-				$code.add("scope", scopeG);
-			}
-			else if ( rdef.grammar != this.grammar ) { // nonlocal
-				// if rule definition is not in this grammar, it's nonlocal
-				List<Grammar> rdefDelegates = rdef.grammar.getDelegates();
-				if ( rdefDelegates.contains(this.grammar) ) {
-					$code.add("scope", rdef.grammar);
-				}
-				else {
-					// defining grammar is not a delegate, scope all the
-					// back to root, which has delegate methods for all
-					// rules.  Don't use scope if we are root.
-					if ( this.grammar != rdef.grammar.composite.delegateGrammarTreeRoot.grammar ) {
-						$code.add("scope",
-										  rdef.grammar.composite.delegateGrammarTreeRoot.grammar);
-					}
-				}
-			}
-
-			if ( $rarg!=null ) {
-				List args = generator.translateAction(currentRuleName,$rarg);
-				$code.add("args", args);
-			}
-			int i = ((CommonToken)r.getToken()).getTokenIndex();
-			$code.add("elementIndex", i);
-			generator.generateLocalFOLLOW($r,$r.text,currentRuleName,i);
-			$r.code = $code;
-		}
-
-	|	^( t=TOKEN_REF (targ=ARG_ACTION)? )
-		{
-			if ( currentAltHasASTRewrite && $t.terminalOptions!=null &&
-				$t.terminalOptions.get(Grammar.defaultTokenOption)!=null )
-			{
-				ErrorManager.grammarError(ErrorManager.MSG_HETERO_ILLEGAL_IN_REWRITE_ALT,
-										grammar,
-										((GrammarAST)($t)).getToken(),
-										$t.text);
-			}
-			grammar.checkRuleReference(scope, $t, $targ, currentRuleName);
-			if ( grammar.type==Grammar.LEXER )
-			{
-				if ( grammar.getTokenType($t.text)==Label.EOF )
-				{
-					$code = templates.getInstanceOf("lexerMatchEOF");
-				}
-				else
-				{
-					$code = templates.getInstanceOf("lexerRuleRef");
-					if ( isListLabel(labelText) )
-					{
-						$code = templates.getInstanceOf("lexerRuleRefAndListLabel");
-					}
-					String scopeName = null;
-					if ( scope!=null )
-					{
-						scopeName = scope.getText();
-					}
-					Rule rdef2 = grammar.getRule(scopeName, $t.text);
-					$code.add("rule", rdef2);
-					if ( scope!=null )
-					{ // scoped rule ref
-						Grammar scopeG = grammar.composite.getGrammar(scope.getText());
-						$code.add("scope", scopeG);
-					}
-					else if ( rdef2.grammar != this.grammar )
-					{ // nonlocal
-						// if rule definition is not in this grammar, it's nonlocal
-						$code.add("scope", rdef2.grammar);
-					}
-					if ( $targ!=null )
-					{
-						List args = generator.translateAction(currentRuleName,$targ);
-						$code.add("args", args);
-					}
-				}
-				int i = ((CommonToken)$t.getToken()).getTokenIndex();
-				$code.add("elementIndex", i);
-				if ( label!=null )
-					$code.add("label", labelText);
-			}
-			else
-			{
-				$code = getTokenElementST("tokenRef", $t.text, $t, astSuffix, labelText);
-				String tokenLabel =
-					generator.getTokenTypeAsTargetLabel(grammar.getTokenType(t.getText()));
-				$code.add("token",tokenLabel);
-				if ( !currentAltHasASTRewrite && $t.terminalOptions!=null )
-				{
-					$code.add("terminalOptions", $t.terminalOptions);
-				}
-				int i = ((CommonToken)$t.getToken()).getTokenIndex();
-				$code.add("elementIndex", i);
-				generator.generateLocalFOLLOW($t,tokenLabel,currentRuleName,i);
-			}
-			$t.code = $code;
-		}
-
-	|	c=CHAR_LITERAL
-		{
-			if ( grammar.type==Grammar.LEXER )
-			{
-				$code = templates.getInstanceOf("charRef");
-				$code.add("char",
-				   generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,$c.text));
-				if ( label!=null )
-				{
-					$code.add("label", labelText);
-				}
-			}
-			else { // else it's a token type reference
-				$code = getTokenElementST("tokenRef", "char_literal", $c, astSuffix, labelText);
-				String tokenLabel = generator.getTokenTypeAsTargetLabel(grammar.getTokenType($c.text));
-				$code.add("token",tokenLabel);
-				if ( $c.terminalOptions!=null ) {
-					$code.add("terminalOptions",$c.terminalOptions);
-				}
-				int i = ((CommonToken)$c.getToken()).getTokenIndex();
-				$code.add("elementIndex", i);
-				generator.generateLocalFOLLOW($c,tokenLabel,currentRuleName,i);
-			}
-		}
-
-	|	s=STRING_LITERAL
-		{
-			int i = ((CommonToken)$s.getToken()).getTokenIndex();
-			if ( grammar.type==Grammar.LEXER )
-			{
-				$code = templates.getInstanceOf("lexerStringRef");
-				$code.add("string",
-					generator.target.getTargetStringLiteralFromANTLRStringLiteral(generator,$s.text));
-				$code.add("elementIndex", i);
-				if ( label!=null )
-				{
-					$code.add("label", labelText);
-				}
-			}
-			else
-			{
-				// else it's a token type reference
-				$code = getTokenElementST("tokenRef", "string_literal", $s, astSuffix, labelText);
-				String tokenLabel =
-					generator.getTokenTypeAsTargetLabel(grammar.getTokenType($s.text));
-				$code.add("token",tokenLabel);
-				if ( $s.terminalOptions!=null )
-				{
-					$code.add("terminalOptions",$s.terminalOptions);
-				}
-				$code.add("elementIndex", i);
-				generator.generateLocalFOLLOW($s,tokenLabel,currentRuleName,i);
-			}
-		}
-
-	|	w=WILDCARD
-		{
-			$code = getWildcardST($w,astSuffix,labelText);
-			$code.add("elementIndex", ((CommonToken)$w.getToken()).getTokenIndex());
-		}
-
-	|	^(DOT ID a=atom[$ID, label, astSuffix]) // scope override on rule or token
-		{ $code = $a.code; }
-
-	|	set[label,astSuffix]
-		{ $code = $set.code; }
-	;
-
-ast_suffix
-	:	ROOT
-	|	BANG
-	;
-
-set[GrammarAST label, GrammarAST astSuffix] returns [ST code=null]
-@init
-{
-	String labelText=null;
-	if ( $label!=null )
-	{
-		labelText = $label.getText();
-	}
-}
-	:	^(s=BLOCK .*) // only care that it's a BLOCK with setValue!=null
-		{
-			$code = getTokenElementST("matchSet", "set", $s, astSuffix, labelText);
-			int i = ((CommonToken)$s.getToken()).getTokenIndex();
-			$code.add("elementIndex", i);
-			if ( grammar.type!=Grammar.LEXER )
-			{
-				generator.generateLocalFOLLOW($s,"set",currentRuleName,i);
-			}
-			$code.add("s", generator.genSetExpr(templates,$s.getSetValue(),1,false));
-		}
-	;
-
-setElement
-	:	CHAR_LITERAL
-	|	TOKEN_REF
-	|	STRING_LITERAL
-	|	^(CHAR_RANGE CHAR_LITERAL CHAR_LITERAL)
-	;
-
-// REWRITE stuff
-
-rewrite returns [ST code=null]
-@init
-{
-	if ( state.backtracking == 0 )
-	{
-		if ( $start.getType()==REWRITES )
-		{
-			if ( generator.grammar.buildTemplate() )
-			{
-				$code = templates.getInstanceOf("rewriteTemplate");
-			}
-			else
-			{
-				$code = templates.getInstanceOf("rewriteCode");
-				$code.add("treeLevel", OUTER_REWRITE_NESTING_LEVEL);
-				$code.add("rewriteBlockLevel", OUTER_REWRITE_NESTING_LEVEL);
-				$code.add("referencedElementsDeep",
-								  getTokenTypesAsTargetLabels($start.rewriteRefsDeep));
-				Set<String> tokenLabels =
-					grammar.getLabels($start.rewriteRefsDeep, Grammar.TOKEN_LABEL);
-				Set<String> tokenListLabels =
-					grammar.getLabels($start.rewriteRefsDeep, Grammar.TOKEN_LIST_LABEL);
-				Set<String> ruleLabels =
-					grammar.getLabels($start.rewriteRefsDeep, Grammar.RULE_LABEL);
-				Set<String> ruleListLabels =
-					grammar.getLabels($start.rewriteRefsDeep, Grammar.RULE_LIST_LABEL);
-				Set<String> wildcardLabels =
-					grammar.getLabels($start.rewriteRefsDeep, Grammar.WILDCARD_TREE_LABEL);
-				Set<String> wildcardListLabels =
-					grammar.getLabels($start.rewriteRefsDeep, Grammar.WILDCARD_TREE_LIST_LABEL);
-				// just in case they ref $r for "previous value", make a stream
-				// from retval.tree
-				ST retvalST = templates.getInstanceOf("prevRuleRootRef");
-				ruleLabels.add(retvalST.render());
-				$code.add("referencedTokenLabels", tokenLabels);
-				$code.add("referencedTokenListLabels", tokenListLabels);
-				$code.add("referencedRuleLabels", ruleLabels);
-				$code.add("referencedRuleListLabels", ruleListLabels);
-				$code.add("referencedWildcardLabels", wildcardLabels);
-				$code.add("referencedWildcardListLabels", wildcardListLabels);
-			}
-		}
-		else
-		{
-				$code = templates.getInstanceOf("noRewrite");
-				$code.add("treeLevel", OUTER_REWRITE_NESTING_LEVEL);
-				$code.add("rewriteBlockLevel", OUTER_REWRITE_NESTING_LEVEL);
-		}
-	}
-}
-	:	^(	REWRITES
-			(
-				{rewriteRuleRefs = new HashSet<Object>();}
-				^( r=REWRITE (pred=SEMPRED)? alt=rewrite_alternative)
-				{
-					rewriteBlockNestingLevel = OUTER_REWRITE_NESTING_LEVEL;
-					List predChunks = null;
-					if ( $pred!=null )
-					{
-						//predText = #pred.getText();
-						predChunks = generator.translateAction(currentRuleName,$pred);
-					}
-					String description =
-						grammar.grammarTreeToString($r,false);
-					description = generator.target.getTargetStringLiteralFromString(description);
-					$code.addAggr("alts.{pred,alt,description}",
-									  predChunks,
-									  alt,
-									  description);
-					pred=null;
-				}
-			)*
-		)
-	|
-	;
-
-rewrite_block[String blockTemplateName] returns [ST code=null]
-@init
-{
-	rewriteBlockNestingLevel++;
-	ST save_currentBlockST = currentBlockST;
-	if ( state.backtracking == 0 )
-	{
-		$code = templates.getInstanceOf(blockTemplateName);
-		currentBlockST = $code;
-		$code.add("rewriteBlockLevel", rewriteBlockNestingLevel);
-	}
-}
-	:	^(	BLOCK
-			{
-				currentBlockST.add("referencedElementsDeep",
-					getTokenTypesAsTargetLabels($BLOCK.rewriteRefsDeep));
-				currentBlockST.add("referencedElements",
-					getTokenTypesAsTargetLabels($BLOCK.rewriteRefsShallow));
-			}
-			alt=rewrite_alternative
-			EOB
-		)
-		{
-			$code.add("alt", $alt.code);
-		}
-	;
-finally { rewriteBlockNestingLevel--; currentBlockST = save_currentBlockST; }
-
-rewrite_alternative returns [ST code=null]
-	:	{generator.grammar.buildAST()}?
-		^(	a=ALT {$code=templates.getInstanceOf("rewriteElementList");}
-			(	(
-					el=rewrite_element
-					{$code.addAggr("elements.{el,line,pos}",
-										$el.code,
-										$el.start.getLine(),
-										$el.start.getCharPositionInLine() + 1
-										);
-					}
-				)+
-			|	EPSILON
-				{$code.addAggr("elements.{el,line,pos}",
-								   templates.getInstanceOf("rewriteEmptyAlt"),
-								   $a.getLine(),
-								   $a.getCharPositionInLine() + 1
-								   );
-				}
-			)
-			EOA
-		 )
-
-	|	{generator.grammar.buildTemplate()}? rewrite_template
-		{ $code = $rewrite_template.code; }
-
-	|	// reproduce same input (only AST at moment)
-		ETC
-	;
-
-rewrite_element returns [ST code=null]
-@init
-{
-	IntSet elements=null;
-	GrammarAST ast = null;
-}
-	:	rewrite_atom[false]
-		{ $code = $rewrite_atom.code; }
-	|	rewrite_ebnf
-		{ $code = $rewrite_ebnf.code; }
-	|	rewrite_tree
-		{ $code = $rewrite_tree.code; }
-	;
-
-rewrite_ebnf returns [ST code=null]
-	:	^( OPTIONAL rewrite_block["rewriteOptionalBlock"] )
-		{ $code = $rewrite_block.code; }
-		{
-			String description = grammar.grammarTreeToString($start, false);
-			description = generator.target.getTargetStringLiteralFromString(description);
-			$code.add("description", description);
-		}
-	|	^( CLOSURE rewrite_block["rewriteClosureBlock"] )
-		{ $code = $rewrite_block.code; }
-		{
-			String description = grammar.grammarTreeToString($start, false);
-			description = generator.target.getTargetStringLiteralFromString(description);
-			$code.add("description", description);
-		}
-	|	^( POSITIVE_CLOSURE rewrite_block["rewritePositiveClosureBlock"] )
-		{ $code = $rewrite_block.code; }
-		{
-			String description = grammar.grammarTreeToString($start, false);
-			description = generator.target.getTargetStringLiteralFromString(description);
-			$code.add("description", description);
-		}
-	;
-
-rewrite_tree returns [ST code]
-@init
-{
-	rewriteTreeNestingLevel++;
-	if ( state.backtracking == 0 )
-	{
-		$code = templates.getInstanceOf("rewriteTree");
-		$code.add("treeLevel", rewriteTreeNestingLevel);
-		$code.add("enclosingTreeLevel", rewriteTreeNestingLevel-1);
-	}
-}
-	:	^(	TREE_BEGIN
-			r=rewrite_atom[true]
-			{
-				$code.addAggr("root.{el,line,pos}",
-								   $r.code,
-								   $r.start.getLine(),
-								   $r.start.getCharPositionInLine() + 1
-								  );
-			}
-			(
-			  el=rewrite_element
-			  {
-				$code.addAggr("children.{el,line,pos}",
-									$el.code,
-									$el.start.getLine(),
-									$el.start.getCharPositionInLine() + 1
-									);
-			  }
-			)*
-		)
-		{
-			String description = grammar.grammarTreeToString($start, false);
-			description = generator.target.getTargetStringLiteralFromString(description);
-			$code.add("description", description);
-		}
-	;
-finally { rewriteTreeNestingLevel--; }
-
-rewrite_atom[boolean isRoot] returns [ST code=null]
-	:   r=RULE_REF
-		{
-			String ruleRefName = $r.text;
-			String stName = "rewriteRuleRef";
-			if ( isRoot )
-			{
-				stName += "Root";
-			}
-			$code = templates.getInstanceOf(stName);
-			$code.add("rule", ruleRefName);
-			if ( grammar.getRule(ruleRefName)==null )
-			{
-				ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_RULE_REF,
-										  grammar,
-										  ((GrammarAST)($r)).getToken(),
-										  ruleRefName);
-				$code = new ST(""); // blank; no code gen
-			}
-			else if ( grammar.getRule(currentRuleName)
-						 .getRuleRefsInAlt(ruleRefName,outerAltNum)==null )
-			{
-				ErrorManager.grammarError(ErrorManager.MSG_REWRITE_ELEMENT_NOT_PRESENT_ON_LHS,
-										  grammar,
-										  ((GrammarAST)($r)).getToken(),
-										  ruleRefName);
-				$code = new ST(""); // blank; no code gen
-			}
-			else
-			{
-				// track all rule refs as we must copy 2nd ref to rule and beyond
-				if ( !rewriteRuleRefs.contains(ruleRefName) )
-				{
-					rewriteRuleRefs.add(ruleRefName);
-				}
-			}
-		}
-
-	|
-		(	^(tk=TOKEN_REF (arg=ARG_ACTION)?)
-		|	cl=CHAR_LITERAL
-		|	sl=STRING_LITERAL
-		)
-		{
-			GrammarAST term = $tk;
-			if (term == null) term = $cl;
-			if (term == null) term = $sl;
-			String tokenName = $start.getToken().getText();
-			String stName = "rewriteTokenRef";
-			Rule rule = grammar.getRule(currentRuleName);
-			Collection<String> tokenRefsInAlt = rule.getTokenRefsInAlt(outerAltNum);
-			boolean createNewNode = !tokenRefsInAlt.contains(tokenName) || $arg!=null;
-			if ( createNewNode )
-			{
-				stName = "rewriteImaginaryTokenRef";
-			}
-			if ( isRoot )
-			{
-				stName += "Root";
-			}
-			$code = templates.getInstanceOf(stName);
-			$code.add("terminalOptions",term.terminalOptions);
-			if ( $arg!=null )
-			{
-				List args = generator.translateAction(currentRuleName,$arg);
-				$code.add("args", args);
-			}
-			$code.add("elementIndex", ((CommonToken)$start.getToken()).getTokenIndex());
-			int ttype = grammar.getTokenType(tokenName);
-			String tok = generator.getTokenTypeAsTargetLabel(ttype);
-			$code.add("token", tok);
-			if ( grammar.getTokenType(tokenName)==Label.INVALID )
-			{
-				ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE,
-										  grammar,
-										  ((GrammarAST)($start)).getToken(),
-										  tokenName);
-				$code = new ST(""); // blank; no code gen
-			}
-		}
-
-	|	LABEL
-		{
-			String labelName = $LABEL.text;
-			Rule rule = grammar.getRule(currentRuleName);
-			Grammar.LabelElementPair pair = rule.getLabel(labelName);
-			if ( labelName.equals(currentRuleName) )
-			{
-				// special case; ref to old value via $ rule
-				if ( rule.hasRewrite(outerAltNum) &&
-					 rule.getRuleRefsInAlt(outerAltNum).contains(labelName) )
-				{
-					ErrorManager.grammarError(ErrorManager.MSG_RULE_REF_AMBIG_WITH_RULE_IN_ALT,
-											  grammar,
-											  ((GrammarAST)($LABEL)).getToken(),
-											  labelName);
-				}
-				ST labelST = templates.getInstanceOf("prevRuleRootRef");
-				$code = templates.getInstanceOf("rewriteRuleLabelRef"+(isRoot?"Root":""));
-				$code.add("label", labelST);
-			}
-			else if ( pair==null )
-			{
-				ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_LABEL_REF_IN_REWRITE,
-										  grammar,
-										  ((GrammarAST)($LABEL)).getToken(),
-										  labelName);
-				$code = new ST("");
-			}
-			else
-			{
-				String stName = null;
-				switch ( pair.type )
-				{
-				case Grammar.TOKEN_LABEL :
-					stName = "rewriteTokenLabelRef";
-					break;
-				case Grammar.WILDCARD_TREE_LABEL :
-					stName = "rewriteWildcardLabelRef";
-					break;
-				case Grammar.WILDCARD_TREE_LIST_LABEL:
-					stName = "rewriteRuleListLabelRef"; // acts like rule ref list for ref
-					break;
-				case Grammar.RULE_LABEL :
-					stName = "rewriteRuleLabelRef";
-					break;
-				case Grammar.TOKEN_LIST_LABEL :
-					stName = "rewriteTokenListLabelRef";
-					break;
-				case Grammar.RULE_LIST_LABEL :
-					stName = "rewriteRuleListLabelRef";
-					break;
-				}
-				if ( isRoot )
-				{
-					stName += "Root";
-				}
-				$code = templates.getInstanceOf(stName);
-				$code.add("label", labelName);
-			}
-		}
-
-	|	ACTION
-		{
-			// actions in rewrite rules yield a tree object
-			String actText = $ACTION.text;
-			List chunks = generator.translateAction(currentRuleName,$ACTION);
-			$code = templates.getInstanceOf("rewriteNodeAction"+(isRoot?"Root":""));
-			$code.add("action", chunks);
-		}
-	;
-
-public
-rewrite_template returns [ST code=null]
-	:	^( ALT EPSILON EOA ) {$code=templates.getInstanceOf("rewriteEmptyTemplate");}
-	|	^(	TEMPLATE (id=ID|ind=ACTION)
-			{
-				if ( $id!=null && $id.text.equals("template") )
-				{
-						$code = templates.getInstanceOf("rewriteInlineTemplate");
-				}
-				else if ( $id!=null )
-				{
-						$code = templates.getInstanceOf("rewriteExternalTemplate");
-						$code.add("name", $id.text);
-				}
-				else if ( $ind!=null )
-				{ // must be \%({expr})(args)
-					$code = templates.getInstanceOf("rewriteIndirectTemplate");
-					List chunks=generator.translateAction(currentRuleName,$ind);
-					$code.add("expr", chunks);
-				}
-			}
-			^(	ARGLIST
-				(	^( ARG arg=ID a=ACTION
-					{
-						// must set alt num here rather than in define.g
-						// because actions like \%foo(name={\$ID.text}) aren't
-						// broken up yet into trees.
-						$a.outerAltNum = this.outerAltNum;
-						List chunks = generator.translateAction(currentRuleName,$a);
-						$code.addAggr("args.{name,value}", $arg.text, chunks);
-					}
-					)
-				)*
-			)
-			(	DOUBLE_QUOTE_STRING_LITERAL
-				{
-					String sl = $DOUBLE_QUOTE_STRING_LITERAL.text;
-					String t = sl.substring( 1, sl.length() - 1 ); // strip quotes
-					t = generator.target.getTargetStringLiteralFromString(t);
-					$code.add("template",t);
-				}
-			|	DOUBLE_ANGLE_STRING_LITERAL
-				{
-					String sl = $DOUBLE_ANGLE_STRING_LITERAL.text;
-					String t = sl.substring( 2, sl.length() - 2 ); // strip double angle quotes
-					t = generator.target.getTargetStringLiteralFromString(t);
-					$code.add("template",t);
-				}
-			)?
-		)
-
-	|	act=ACTION
-		{
-			// set alt num for same reason as ARGLIST above
-			$act.outerAltNum = this.outerAltNum;
-			$code=templates.getInstanceOf("rewriteAction");
-			$code.add("action",
-							  generator.translateAction(currentRuleName,$act));
-		}
-	;
diff --git a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/DefineGrammarItemsWalker.g b/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/DefineGrammarItemsWalker.g
deleted file mode 100644
index a47ba8b..0000000
--- a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/DefineGrammarItemsWalker.g
+++ /dev/null
@@ -1,700 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2011 Terence Parr
- All rights reserved.
-
- Grammar conversion to ANTLR v3:
- Copyright (c) 2011 Sam Harwell
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-	notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-	notice, this list of conditions and the following disclaimer in the
-	documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-	derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-tree grammar DefineGrammarItemsWalker;
-
-options {
-	tokenVocab = ANTLR;
-	ASTLabelType = GrammarAST;
-}
-
-scope AttributeScopeActions {
-	HashMap<GrammarAST, GrammarAST> actions;
-}
-
-@header {
-package org.antlr.grammar.v3;
-import org.antlr.tool.*;
-import java.util.HashSet;
-import java.util.Set;
-}
-
-@members {
-protected Grammar grammar;
-protected GrammarAST root;
-protected String currentRuleName;
-protected GrammarAST currentRewriteBlock;
-protected GrammarAST currentRewriteRule;
-protected int outerAltNum = 0;
-protected int blockLevel = 0;
-
-public final int countAltsForRule( CommonTree t ) {
-    CommonTree block = (CommonTree)t.getFirstChildWithType(BLOCK);
-    int altCount = 0;
-    for (int i = 0; i < block.getChildCount(); i++) {
-        if (block.getChild(i).getType() == ALT)
-            altCount++;
-    }
-    return altCount;
-}
-
-protected final void finish() {
-    trimGrammar();
-}
-
-/** Remove any lexer rules from a COMBINED; already passed to lexer */
-protected final void trimGrammar() {
-    if ( grammar.type != Grammar.COMBINED ) {
-        return;
-    }
-    // form is (header ... ) ( grammar ID (scope ...) ... ( rule ... ) ( rule ... ) ... )
-    GrammarAST p = root;
-    // find the grammar spec
-    while ( !p.getText().equals( "grammar" ) ) {
-        p = (GrammarAST)p.getNextSibling();
-    }
-    for ( int i = 0; i < p.getChildCount(); i++ ) {
-        if ( p.getChild( i ).getType() != RULE )
-            continue;
-
-        String ruleName = p.getChild(i).getChild(0).getText();
-        //Console.Out.WriteLine( "rule " + ruleName + " prev=" + prev.getText() );
-        if (Rule.getRuleType(ruleName) == Grammar.LEXER) {
-            // remove lexer rule
-            p.deleteChild( i );
-            i--;
-        }
-    }
-    //Console.Out.WriteLine( "root after removal is: " + root.ToStringList() );
-}
-
-protected final void trackInlineAction( GrammarAST actionAST ) {
-    Rule r = grammar.getRule( currentRuleName );
-    if ( r != null ) {
-        r.trackInlineAction( actionAST );
-    }
-}
-}
-
-public
-grammar_[Grammar g]
-@init
-{
-grammar = $g;
-root = $start;
-}
-@after
-{
-finish();
-}
-	:	^( LEXER_GRAMMAR	{grammar.type = Grammar.LEXER;} 		grammarSpec )
-	|	^( PARSER_GRAMMAR	{grammar.type = Grammar.PARSER;}		grammarSpec )
-	|	^( TREE_GRAMMAR		{grammar.type = Grammar.TREE_PARSER;}	grammarSpec )
-	|	^( COMBINED_GRAMMAR	{grammar.type = Grammar.COMBINED;}		grammarSpec )
-	;
-
-attrScope
-scope AttributeScopeActions;
-@init
-{
-	$AttributeScopeActions::actions = new HashMap<GrammarAST, GrammarAST>();
-}
-	:	^( 'scope' name=ID attrScopeAction* attrs=ACTION )
-		{
-			AttributeScope scope = grammar.defineGlobalScope($name.text,$attrs.getToken());
-			scope.isDynamicGlobalScope = true;
-			scope.addAttributes($attrs.text, ';');
-			for (GrammarAST action : $AttributeScopeActions::actions.keySet())
-				scope.defineNamedAction(action, $AttributeScopeActions::actions.get(action));
-		}
-	;
-
-attrScopeAction
-	:	^(AMPERSAND ID ACTION)
-		{
-			$AttributeScopeActions::actions.put( $ID, $ACTION );
-		}
-	;
-
-grammarSpec
-	:	id=ID
-		(cmt=DOC_COMMENT)?
-		( optionsSpec )?
-		(delegateGrammars)?
-		(tokensSpec)?
-		(attrScope)*
-		(actions)?
-		rules
-	;
-
-actions
-	:	( action )+
-	;
-
-action
-@init
-{
-	String scope=null;
-	GrammarAST nameAST=null, actionAST=null;
-}
-	:	^(amp=AMPERSAND id1=ID
-			( id2=ID a1=ACTION
-			  {scope=$id1.text; nameAST=$id2; actionAST=$a1;}
-			| a2=ACTION
-			  {scope=null; nameAST=$id1; actionAST=$a2;}
-			)
-		 )
-		 {
-		 grammar.defineNamedAction($amp,scope,nameAST,actionAST);
-		 }
-	;
-
-optionsSpec
-	:	^(OPTIONS .*)
-	;
-
-delegateGrammars
-	:	^( 'import' ( ^(ASSIGN ID ID) | ID )+ )
-	;
-
-tokensSpec
-	:	^(TOKENS tokenSpec*)
-	;
-
-tokenSpec
-	:	t=TOKEN_REF
-	|	^(	ASSIGN
-			TOKEN_REF
-			(	STRING_LITERAL
-			|	CHAR_LITERAL
-			)
-		 )
-	;
-
-rules
-	:	(rule | ^(PREC_RULE .*))+
-	;
-
-rule
-@init
-{
-	String name=null;
-	Map<String, Object> opts=null;
-	Rule r = null;
-}
-	:		^( RULE id=ID {opts = $RULE.getBlockOptions();}
-			(modifier)?
-			^( ARG (args=ARG_ACTION)? )
-			^( RET (ret=ARG_ACTION)? )
-			(throwsSpec)?
-			(optionsSpec)?
-			{
-				name = $id.text;
-				currentRuleName = name;
-				if ( Rule.getRuleType(name) == Grammar.LEXER && grammar.type==Grammar.COMBINED )
-				{
-					// a merged grammar spec, track lexer rules and send to another grammar
-					grammar.defineLexerRuleFoundInParser($id.getToken(), $start);
-				}
-				else
-				{
-					int numAlts = countAltsForRule($start);
-					grammar.defineRule($id.getToken(), $modifier.mod, opts, $start, $args, numAlts);
-					r = grammar.getRule(name);
-					if ( $args!=null )
-					{
-						r.parameterScope = grammar.createParameterScope(name,$args.getToken());
-						r.parameterScope.addAttributes($args.text, ',');
-					}
-					if ( $ret!=null )
-					{
-						r.returnScope = grammar.createReturnScope(name,$ret.getToken());
-						r.returnScope.addAttributes($ret.text, ',');
-					}
-					if ( $throwsSpec.exceptions != null )
-					{
-						for (String exception : $throwsSpec.exceptions)
-							r.throwsSpec.add( exception );
-					}
-				}
-			}
-			(ruleScopeSpec[r])?
-			(ruleAction[r])*
-			{ this.blockLevel=0; }
-			b=block
-			(exceptionGroup)?
-			EOR
-			{
-				// copy rule options into the block AST, which is where
-				// the analysis will look for k option etc...
-				$b.start.setBlockOptions(opts);
-			}
-		)
-	;
-
-ruleAction[Rule r]
-	:	^(amp=AMPERSAND id=ID a=ACTION ) {if (r!=null) r.defineNamedAction($amp,$id,$a);}
-	;
-
-modifier returns [String mod]
-@init
-{
-	$mod = $start.getToken().getText();
-}
-	:	'protected'
-	|	'public'
-	|	'private'
-	|	'fragment'
-	;
-
-throwsSpec returns [HashSet<String> exceptions]
-@init
-{
-	$exceptions = new HashSet<String>();
-}
-	:	^('throws' (ID {$exceptions.add($ID.text);})+ )
-	;
-
-ruleScopeSpec[Rule r]
-scope AttributeScopeActions;
-@init
-{
-	$AttributeScopeActions::actions = new HashMap<GrammarAST, GrammarAST>();
-}
-	:	^(	'scope'
-			(	attrScopeAction* attrs=ACTION
-				{
-					r.ruleScope = grammar.createRuleScope(r.name,$attrs.getToken());
-					r.ruleScope.isDynamicRuleScope = true;
-					r.ruleScope.addAttributes($attrs.text, ';');
-					for (GrammarAST action : $AttributeScopeActions::actions.keySet())
-						r.ruleScope.defineNamedAction(action, $AttributeScopeActions::actions.get(action));
-				}
-			)?
-			(	uses=ID
-				{
-					if ( grammar.getGlobalScope($uses.text)==null ) {
-					ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE,
-					grammar,
-					$uses.getToken(),
-					$uses.text);
-					}
-					else {
-					if ( r.useScopes==null ) {r.useScopes=new ArrayList<String>();}
-					r.useScopes.add($uses.text);
-					}
-				}
-			)*
-		)
-	;
-
-block
-@init
-{
-	// must run during backtracking
-	this.blockLevel++;
-	if ( blockLevel == 1 )
-		this.outerAltNum=1;
-}
-	:	^(	BLOCK
-			(optionsSpec)?
-			(blockAction)*
-			(	alternative rewrite
-				{{
-					if ( this.blockLevel == 1 )
-						this.outerAltNum++;
-				}}
-			)+
-			EOB
-		 )
-	;
-finally { blockLevel--; }
-
-// TODO: this does nothing now! subrules cannot have init actions. :(
-blockAction
-	:	^(amp=AMPERSAND id=ID a=ACTION ) // {r.defineAction(#amp,#id,#a);}
-	;
-
-alternative
-//@init
-//{
-//	if ( state.backtracking == 0 )
-//	{
-//		if ( grammar.type!=Grammar.LEXER && grammar.GetOption("output")!=null && blockLevel==1 )
-//		{
-//			GrammarAST aRewriteNode = $start.FindFirstType(REWRITE); // alt itself has rewrite?
-//			GrammarAST rewriteAST = (GrammarAST)$start.Parent.getChild($start.ChildIndex + 1);
-//			// we have a rewrite if alt uses it inside subrule or this alt has one
-//			// but don't count -> ... rewrites, which mean "do default auto construction"
-//			if ( aRewriteNode!=null||
-//				 (firstRewriteAST!=null &&
-//				  firstRewriteAST.getType()==REWRITE &&
-//				  firstRewriteAST.getChild(0)!=null &&
-//				  firstRewriteAST.getChild(0).getType()!=ETC) )
-//			{
-//				Rule r = grammar.getRule(currentRuleName);
-//				r.TrackAltsWithRewrites($start,this.outerAltNum);
-//			}
-//		}
-//	}
-//}
-	:	^( ALT (element)+ EOA )
-	;
-
-exceptionGroup
-	:	( exceptionHandler )+ (finallyClause)?
-	|	finallyClause
-	;
-
-exceptionHandler
-	:   ^('catch' ARG_ACTION ACTION) {trackInlineAction($ACTION);}
-	;
-
-finallyClause
-	:    ^('finally' ACTION) {trackInlineAction($ACTION);}
-	;
-
-element
-	:   ^(ROOT element)
-	|   ^(BANG element)
-	|   atom[null]
-	|   ^(NOT element)
-	|   ^(RANGE atom[null] atom[null])
-	|   ^(CHAR_RANGE atom[null] atom[null])
-	|	^(	ASSIGN id=ID el=element)
-			{
-				GrammarAST e = $el.start;
-				if ( e.getType()==ANTLRParser.ROOT || e.getType()==ANTLRParser.BANG )
-				{
-					e = (GrammarAST)e.getChild(0);
-				}
-				if ( e.getType()==RULE_REF)
-				{
-					grammar.defineRuleRefLabel(currentRuleName,$id.getToken(),e);
-				}
-				else if ( e.getType()==WILDCARD && grammar.type==Grammar.TREE_PARSER )
-				{
-					grammar.defineWildcardTreeLabel(currentRuleName,$id.getToken(),e);
-				}
-				else
-				{
-					grammar.defineTokenRefLabel(currentRuleName,$id.getToken(),e);
-				}
-			}
-	|	^(	PLUS_ASSIGN id2=ID a2=element
-			{
-				GrammarAST a = $a2.start;
-				if ( a.getType()==ANTLRParser.ROOT || a.getType()==ANTLRParser.BANG )
-				{
-					a = (GrammarAST)a.getChild(0);
-				}
-				if ( a.getType()==RULE_REF )
-				{
-					grammar.defineRuleListLabel(currentRuleName,$id2.getToken(),a);
-				}
-				else if ( a.getType() == WILDCARD && grammar.type == Grammar.TREE_PARSER )
-				{
-					grammar.defineWildcardTreeListLabel( currentRuleName, $id2.getToken(), a );
-				}
-				else
-				{
-					grammar.defineTokenListLabel(currentRuleName,$id2.getToken(),a);
-				}
-			}
-		 )
-	|   ebnf
-	|   tree_
-	|   ^( SYNPRED block )
-	|   act=ACTION
-		{
-			$act.outerAltNum = this.outerAltNum;
-			trackInlineAction($act);
-		}
-	|   act2=FORCED_ACTION
-		{
-			$act2.outerAltNum = this.outerAltNum;
-			trackInlineAction($act2);
-		}
-	|   SEMPRED
-		{
-			$SEMPRED.outerAltNum = this.outerAltNum;
-			trackInlineAction($SEMPRED);
-		}
-	|   SYN_SEMPRED
-	|   ^(BACKTRACK_SEMPRED .*)
-	|   GATED_SEMPRED
-		{
-			$GATED_SEMPRED.outerAltNum = this.outerAltNum;
-			trackInlineAction($GATED_SEMPRED);
-		}
-	|   EPSILON 
-	;
-
-ebnf
-	:	(dotLoop) => dotLoop // .* or .+
-	|	block
-	|	^( OPTIONAL block )
-	|	^( CLOSURE block )
-	|	^( POSITIVE_CLOSURE block )
-	;
-
-/** Track the .* and .+ idioms and make them nongreedy by default.
- */
-dotLoop
-	:	(	^( CLOSURE dotBlock )
-		|	^( POSITIVE_CLOSURE dotBlock )
-		)
-		{
-			GrammarAST block = (GrammarAST)$start.getChild(0);
-			Map<String, Object> opts = new HashMap<String, Object>();
-			opts.put("greedy", "false");
-			if ( grammar.type!=Grammar.LEXER )
-			{
-				// parser grammars assume k=1 for .* loops
-				// otherwise they (analysis?) look til EOF!
-				opts.put("k", 1);
-			}
-			block.setOptions(grammar,opts);
-		}
-	;
-
-dotBlock
-	:	^( BLOCK ^( ALT WILDCARD EOA ) EOB )
-	;
-
-tree_
-	:	^(TREE_BEGIN element+)
-	;
-
-atom[GrammarAST scope_]
-	:	^( rr=RULE_REF (rarg=ARG_ACTION)? )
-		{
-			grammar.altReferencesRule( currentRuleName, $scope_, $rr, this.outerAltNum );
-			if ( $rarg != null )
-			{
-				$rarg.outerAltNum = this.outerAltNum;
-				trackInlineAction($rarg);
-			}
-		}
-	|	^( t=TOKEN_REF (targ=ARG_ACTION )? )
-		{
-			if ( $targ != null )
-			{
-				$targ.outerAltNum = this.outerAltNum;
-				trackInlineAction($targ);
-			}
-			if ( grammar.type == Grammar.LEXER )
-			{
-				grammar.altReferencesRule( currentRuleName, $scope_, $t, this.outerAltNum );
-			}
-			else
-			{
-				grammar.altReferencesTokenID( currentRuleName, $t, this.outerAltNum );
-			}
-		}
-	|	c=CHAR_LITERAL
-		{
-			if ( grammar.type != Grammar.LEXER )
-			{
-				Rule rule = grammar.getRule(currentRuleName);
-				if ( rule != null )
-					rule.trackTokenReferenceInAlt($c, outerAltNum);
-			}
-		}
-	|	s=STRING_LITERAL 
-		{
-			if ( grammar.type != Grammar.LEXER )
-			{
-				Rule rule = grammar.getRule(currentRuleName);
-				if ( rule!=null )
-					rule.trackTokenReferenceInAlt($s, outerAltNum);
-			}
-		}
-	|	WILDCARD
-	|	^(DOT ID atom[$ID]) // scope override on rule
-	;
-
-ast_suffix
-	:	ROOT
-	|	BANG
-	;
-
-rewrite
-@init
-{
-	// track top level REWRITES node, store stuff there
-	currentRewriteRule = $start; // has to execute during backtracking
-	if ( state.backtracking == 0 )
-	{
-		if ( grammar.buildAST() )
-			currentRewriteRule.rewriteRefsDeep = new HashSet<GrammarAST>();
-	}
-}
-	:	^(	REWRITES
-			(	^( REWRITE (pred=SEMPRED)? rewrite_alternative )
-				{
-					if ( $pred != null )
-					{
-						$pred.outerAltNum = this.outerAltNum;
-						trackInlineAction($pred);
-					}
-				}
-			)*
-		)
-		//{System.out.println("-> refs = "+currentRewriteRule.rewriteRefsDeep);}
-	|
-	;
-
-rewrite_block
-@init
-{
-	GrammarAST enclosingBlock = currentRewriteBlock;
-	if ( state.backtracking == 0 )
-	{
-		// don't do if guessing
-		currentRewriteBlock=$start; // pts to BLOCK node
-		currentRewriteBlock.rewriteRefsShallow = new HashSet<GrammarAST>();
-		currentRewriteBlock.rewriteRefsDeep = new HashSet<GrammarAST>();
-	}
-}
-	:   ^( BLOCK rewrite_alternative EOB )
-		//{System.out.println("atoms="+currentRewriteBlock.rewriteRefs);}
-		{
-			// copy the element refs in this block to the surrounding block
-			if ( enclosingBlock != null )
-			{
-				for (GrammarAST item : currentRewriteBlock.rewriteRefsShallow)
-					enclosingBlock.rewriteRefsDeep.add( item );
-			}
-			//currentRewriteBlock = enclosingBlock; // restore old BLOCK ptr
-		}
-	;
-finally { currentRewriteBlock = enclosingBlock; }
-
-rewrite_alternative
-	:	{grammar.buildAST()}? => ^( a=ALT ( ( rewrite_element )+ | EPSILON ) EOA )
-	|	{grammar.buildTemplate()}? => rewrite_template
-	|	ETC {this.blockLevel==1}? // only valid as outermost rewrite
-	;
-
-rewrite_element
-	:	rewrite_atom
-	|	rewrite_ebnf
-	|	rewrite_tree
-	;
-
-rewrite_ebnf
-	:	^( OPTIONAL rewrite_block )
-	|	^( CLOSURE rewrite_block )
-	|	^( POSITIVE_CLOSURE rewrite_block )
-	;
-
-rewrite_tree
-	:   ^(	TREE_BEGIN rewrite_atom ( rewrite_element )* )
-	;
-
-rewrite_atom
-@init
-{
-	if ( state.backtracking == 0 )
-	{
-		Rule r = grammar.getRule(currentRuleName);
-		Set tokenRefsInAlt = r.getTokenRefsInAlt(outerAltNum);
-		boolean imaginary =
-			$start.getType()==TOKEN_REF &&
-			!tokenRefsInAlt.contains($start.getText());
-		if ( !imaginary && grammar.buildAST() &&
-			 ($start.getType()==RULE_REF ||
-			  $start.getType()==LABEL ||
-			  $start.getType()==TOKEN_REF ||
-			  $start.getType()==CHAR_LITERAL ||
-			  $start.getType()==STRING_LITERAL) )
-		{
-			// track per block and for entire rewrite rule
-			if ( currentRewriteBlock!=null )
-			{
-				currentRewriteBlock.rewriteRefsShallow.add($start);
-				currentRewriteBlock.rewriteRefsDeep.add($start);
-			}
-
-			//System.out.println("adding "+$start.getText()+" to "+currentRewriteRule.getText());
-			currentRewriteRule.rewriteRefsDeep.add($start);
-		}
-	}
-}
-	:	RULE_REF 
-	|	(	^(	TOKEN_REF
-				(	ARG_ACTION
-					{
-						$ARG_ACTION.outerAltNum = this.outerAltNum;
-						trackInlineAction($ARG_ACTION);
-					}
-				)?
-			)
-		|	CHAR_LITERAL
-		|	STRING_LITERAL
-		)
-	|	LABEL
-	|	ACTION
-		{
-			$ACTION.outerAltNum = this.outerAltNum;
-			trackInlineAction($ACTION);
-		}
-	;
-
-rewrite_template
-	:	^(	ALT EPSILON EOA )
-	|	^(	TEMPLATE (id=ID|ind=ACTION)
-			^( ARGLIST
-				(	^( ARG arg=ID a=ACTION )
-					{
-						$a.outerAltNum = this.outerAltNum;
-						trackInlineAction($a);
-					}
-				)*
-			)
-			{
-				if ( $ind!=null )
-				{
-					$ind.outerAltNum = this.outerAltNum;
-					trackInlineAction($ind);
-				}
-			}
-			(	DOUBLE_QUOTE_STRING_LITERAL
-			|	DOUBLE_ANGLE_STRING_LITERAL
-			)?
-		)
-	|	act=ACTION
-		{
-			$act.outerAltNum = this.outerAltNum;
-			trackInlineAction($act);
-		}
-	;
diff --git a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/LeftRecursiveRuleWalker.g b/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/LeftRecursiveRuleWalker.g
deleted file mode 100644
index 537bd12..0000000
--- a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/LeftRecursiveRuleWalker.g
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Grammar conversion to ANTLR v3:
- * Copyright (c) 2011 Sam Harwell
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** Find left-recursive rules */
-tree grammar LeftRecursiveRuleWalker;
-
-options {
-	tokenVocab=ANTLR;
-    ASTLabelType=GrammarAST;
-}
-
-@header {
-package org.antlr.grammar.v3;
-
-import org.antlr.analysis.*;
-import org.antlr.misc.*;
-import org.antlr.tool.*;
-
-import org.antlr.runtime.BitSet;
-import org.antlr.runtime.DFA;
-}
-
-@members {
-protected Grammar grammar;
-private String ruleName;
-private int outerAlt; // which outer alt of rule?
-public int numAlts;  // how many alts for this rule total?
-
-@Override
-public void reportError(RecognitionException ex)
-{
-    Token token = null;
-    if (ex instanceof MismatchedTokenException)
-    {
-        token = ((MismatchedTokenException)ex).token;
-    }
-    else if (ex instanceof NoViableAltException)
-    {
-        token = ((NoViableAltException)ex).token;
-    }
-
-    ErrorManager.syntaxError(
-        ErrorManager.MSG_SYNTAX_ERROR,
-        grammar,
-        token,
-        "assign.types: " + ex.toString(),
-        ex);
-}
-
-public void setTokenPrec(GrammarAST t, int alt) {}
-public void binaryAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {}
-public void ternaryAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {}
-public void prefixAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {}
-public void suffixAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {}
-public void otherAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {}
-public void setReturnValues(GrammarAST t) {}
-}
-
-optionsSpec
-	:	^(OPTIONS option+)
-	;
-
-option
-	:	^(ASSIGN ID optionValue)
-	;
-
-optionValue
-	:	ID
-	|	STRING_LITERAL
-	|	CHAR_LITERAL
-	|	INT
-	;
-
-charSetElement
-	:	CHAR_LITERAL
-	|	^(OR CHAR_LITERAL CHAR_LITERAL)
-	|	^(RANGE CHAR_LITERAL CHAR_LITERAL)
-	;
-
-public
-rec_rule[Grammar g] returns [boolean isLeftRec]
-@init
-{
-	grammar = g;
-	outerAlt = 1;
-}
-	:	^(	r=RULE id=ID {ruleName=$id.getText();}
-			modifier?
-			^(ARG ARG_ACTION?)
-			^(RET ARG_ACTION?)
-			optionsSpec?
-			ruleScopeSpec?
-			(^(AMPERSAND .*))*
-			ruleBlock {$isLeftRec = $ruleBlock.isLeftRec;}
-			exceptionGroup?
-			EOR
-		)
-		{if ($ruleBlock.isLeftRec) $r.setType(PREC_RULE);}
-	;
-
-modifier
-	:	'protected'
-	|	'public'
-	|	'private'
-	|	'fragment'
-	;
-
-ruleScopeSpec
- 	:	^('scope' ACTION? ID*)
- 	;
-
-ruleBlock returns [boolean isLeftRec]
-@init{boolean lr=false; this.numAlts = $start.getChildCount();}
-	:	^(	BLOCK
-			optionsSpec?
-			(	outerAlternative
-				{if ($outerAlternative.isLeftRec) $isLeftRec = true;}
-				rewrite?
-				{outerAlt++;}
-			)+
-			EOB
-		)
-	;
-
-block
-    :   ^(  BLOCK
-            optionsSpec?
-            ( ^(ALT element+ EOA) rewrite? )+
-            EOB   
-         )
-    ;
-
-/** An alt is either prefix, suffix, binary, or ternary operation or "other" */
-outerAlternative returns [boolean isLeftRec]
-@init
-{
-GrammarAST rew=(GrammarAST)$start.getNextSibling();
-if (rew.getType() != REWRITES)
-	rew = null;
-}
-    :   (binaryMultipleOp)=> binaryMultipleOp
-                             {binaryAlt($start, rew, outerAlt); $isLeftRec=true;}
-    |   (binary)=>           binary       
-                             {binaryAlt($start, rew, outerAlt); $isLeftRec=true;}
-    |   (ternary)=>          ternary
-                             {ternaryAlt($start, rew, outerAlt); $isLeftRec=true;}
-    |   (prefix)=>           prefix
-                             {prefixAlt($start, rew, outerAlt);}
-    |   (suffix)=>           suffix
-                             {suffixAlt($start, rew, outerAlt); $isLeftRec=true;}
-    |   ^(ALT element+ EOA) // "other" case
-                             {otherAlt($start, rew, outerAlt);}
-    ;
-
-binary
-	:	^( ALT (^(BACKTRACK_SEMPRED .*))? recurseNoLabel op=token recurse EOA ) {setTokenPrec($op.t, outerAlt);}
-	;
-
-binaryMultipleOp
-	:	^( ALT (^(BACKTRACK_SEMPRED .*))? recurseNoLabel ^( BLOCK ( ^( ALT op=token EOA {setTokenPrec($op.t, outerAlt);} ) )+ EOB ) recurse EOA )
-	;
-
-ternary
-	:	^( ALT (^(BACKTRACK_SEMPRED .*))? recurseNoLabel op=token recurse token recurse EOA ) {setTokenPrec($op.t, outerAlt);}
-	;
-
-prefix : ^( ALT (^(BACKTRACK_SEMPRED .*))? {setTokenPrec((GrammarAST)input.LT(1), outerAlt);} ({!((CommonTree)input.LT(1)).getText().equals(ruleName)}? element)+ recurse EOA ) ;
-
-suffix : ^( ALT (^(BACKTRACK_SEMPRED .*))? recurseNoLabel {setTokenPrec((GrammarAST)input.LT(1), outerAlt);} element+  EOA ) ;
-
-recurse
-	:	^(ASSIGN ID recurseNoLabel)
-	|	^(PLUS_ASSIGN ID recurseNoLabel)
-	|	recurseNoLabel
-	;
-
-recurseNoLabel : {((CommonTree)input.LT(1)).getText().equals(ruleName)}? RULE_REF;
-
-/*
-elementNotRecursiveRule
-    :   {_t.findFirstType(RULE_REF)!=null && _t.findFirstType(RULE_REF).getText().equals(ruleName)}?
-        e:element
-    ;
-*/
-
-token returns [GrammarAST t=null]
-	:	^(ASSIGN ID s=token {$t = $s.t;})
-	|	^(PLUS_ASSIGN ID s=token {$t = $s.t;})
-	|	^(ROOT s=token {$t = $s.t;})
-	|	^(BANG s=token {$t = $s.t;})
-	|	a=CHAR_LITERAL      {$t = $a;}
-	|	b=STRING_LITERAL    {$t = $b;}
-	|	c=TOKEN_REF         {$t = $c;}
-	;
-
-exceptionGroup
-	:	exceptionHandler+ finallyClause?
-	|	finallyClause
-    ;
-
-exceptionHandler
-	:	^('catch' ARG_ACTION ACTION)
-	;
-
-finallyClause
-	:	^('finally' ACTION)
-	;
-
-rewrite
-	:	^(REWRITES ( ^( REWRITE SEMPRED? (^(ALT .*)|^(TEMPLATE .*)|ACTION|ETC) ) )* )
-	;
-
-element
-	:	^(ROOT element)
-	|	^(BANG element)
-	|	atom
-	|	^(NOT element)
-	|	^(RANGE atom atom)
-	|	^(ASSIGN ID element)
-	|	^(PLUS_ASSIGN ID element)
-	|	ebnf
-	|	tree_
-	|	^(SYNPRED block) 
-	|	FORCED_ACTION
-	|	ACTION
-	|	SEMPRED
-	|	SYN_SEMPRED
-	|	BACKTRACK_SEMPRED
-	|	GATED_SEMPRED
-	|	EPSILON 
-	;
-
-ebnf:   block
-    |   ^( OPTIONAL block ) 
-    |   ^( CLOSURE block )  
-    |   ^( POSITIVE_CLOSURE block ) 
-    ;
-
-tree_
-	:	^(TREE_BEGIN element+)
-	;
-
-atom
-	:	^(RULE_REF ARG_ACTION?)
-	|	^(TOKEN_REF ARG_ACTION?)
-	|	CHAR_LITERAL
-	|	STRING_LITERAL
-	|	WILDCARD
-	|	^(DOT ID atom) // scope override on rule
-	;
-
-ast_suffix
-	:	ROOT
-	|	BANG
-	;
diff --git a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/TreeToNFAConverter.g b/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/TreeToNFAConverter.g
deleted file mode 100644
index ab206ef..0000000
--- a/antlr-3.4/tool/src/main/antlr3/org/antlr/grammar/v3/TreeToNFAConverter.g
+++ /dev/null
@@ -1,855 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2011 Terence Parr
- All rights reserved.
-
- Grammar conversion to ANTLR v3:
- Copyright (c) 2011 Sam Harwell
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-	notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-	notice, this list of conditions and the following disclaimer in the
-	documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-	derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Build an NFA from a tree representing an ANTLR grammar. */
-tree grammar TreeToNFAConverter;
-
-options {
-	tokenVocab = ANTLR;
-	ASTLabelType = GrammarAST;
-}
-
-@header {
-package org.antlr.grammar.v3;
-
-import org.antlr.analysis.*;
-import org.antlr.misc.*;
-import org.antlr.tool.*;
-
-import org.antlr.runtime.BitSet;
-import org.antlr.runtime.DFA;
-}
-
-@members {
-/** Factory used to create nodes and submachines */
-protected NFAFactory factory = null;
-
-/** Which NFA object are we filling in? */
-protected NFA nfa = null;
-
-/** Which grammar are we converting an NFA for? */
-protected Grammar grammar = null;
-
-protected String currentRuleName = null;
-
-protected int outerAltNum = 0;
-protected int blockLevel = 0;
-
-protected int inTest = 0;
-
-public TreeToNFAConverter(TreeNodeStream input, Grammar g, NFA nfa, NFAFactory factory) {
-    this(input);
-    this.grammar = g;
-    this.nfa = nfa;
-    this.factory = factory;
-}
-
-public final IntSet setRule(GrammarAST t) throws RecognitionException {
-    TreeToNFAConverter other = new TreeToNFAConverter( new CommonTreeNodeStream( t ), grammar, nfa, factory );
-
-    other.currentRuleName = currentRuleName;
-    other.outerAltNum = outerAltNum;
-    other.blockLevel = blockLevel;
-
-    return other.setRule();
-}
-
-public final int testBlockAsSet( GrammarAST t ) throws RecognitionException {
-    Rule r = grammar.getLocallyDefinedRule( currentRuleName );
-    if ( r.hasRewrite( outerAltNum ) )
-        return -1;
-
-    TreeToNFAConverter other = new TreeToNFAConverter( new CommonTreeNodeStream( t ), grammar, nfa, factory );
-
-    other.state.backtracking++;
-    other.currentRuleName = currentRuleName;
-    other.outerAltNum = outerAltNum;
-    other.blockLevel = blockLevel;
-
-    int result = other.testBlockAsSet();
-    if ( other.state.failed )
-        return -1;
-
-    return result;
-}
-
-public final int testSetRule( GrammarAST t ) throws RecognitionException {
-    TreeToNFAConverter other = new TreeToNFAConverter( new CommonTreeNodeStream( t ), grammar, nfa, factory );
-
-    other.state.backtracking++;
-    other.currentRuleName = currentRuleName;
-    other.outerAltNum = outerAltNum;
-    other.blockLevel = blockLevel;
-
-    int result = other.testSetRule();
-    if ( other.state.failed )
-        state.failed = true;
-
-    return result;
-}
-
-protected void addFollowTransition( String ruleName, NFAState following ) {
-    //System.Console.Out.WriteLine( "adding follow link to rule " + ruleName );
-    // find last link in FOLLOW chain emanating from rule
-    Rule r = grammar.getRule( ruleName );
-    NFAState end = r.stopState;
-    while ( end.transition( 1 ) != null )
-    {
-        end = (NFAState)end.transition( 1 ).target;
-    }
-    if ( end.transition( 0 ) != null )
-    {
-        // already points to a following node
-        // gotta add another node to keep edges to a max of 2
-        NFAState n = factory.newState();
-        Transition e = new Transition( Label.EPSILON, n );
-        end.addTransition( e );
-        end = n;
-    }
-    Transition followEdge = new Transition( Label.EPSILON, following );
-    end.addTransition( followEdge );
-}
-
-protected void finish() {
-    int numEntryPoints = factory.build_EOFStates( grammar.getRules() );
-    if ( numEntryPoints == 0 )
-    {
-        ErrorManager.grammarWarning( ErrorManager.MSG_NO_GRAMMAR_START_RULE,
-                                   grammar,
-                                   null,
-                                   grammar.name );
-    }
-}
-
-@Override
-public void reportError(RecognitionException ex) {
-    if ( inTest > 0 )
-        throw new IllegalStateException(ex);
-
-    Token token = null;
-    if ( ex instanceof MismatchedTokenException )
-    {
-        token = ( (MismatchedTokenException)ex ).token;
-    }
-    else if ( ex instanceof NoViableAltException )
-    {
-        token = ( (NoViableAltException)ex ).token;
-    }
-
-    ErrorManager.syntaxError(
-        ErrorManager.MSG_SYNTAX_ERROR,
-        grammar,
-        token,
-        "buildnfa: " + ex.toString(),
-        ex );
-}
-
-private boolean hasElementOptions(GrammarAST node) {
-    if (node == null)
-        throw new NullPointerException("node");
-    return node.terminalOptions != null && node.terminalOptions.size() > 0;
-}
-}
-
-public
-grammar_
-@after
-{
-	finish();
-}
-	:	(	^( LEXER_GRAMMAR grammarSpec )
-		|	^( PARSER_GRAMMAR grammarSpec )
-		|	^( TREE_GRAMMAR grammarSpec )
-		|	^( COMBINED_GRAMMAR grammarSpec )
-		)
-	;
-
-attrScope
-	:	^( 'scope' ID ( ^(AMPERSAND .*) )* ACTION )
-	;
-
-grammarSpec
-	:	ID
-		(cmt=DOC_COMMENT)?
-		( ^(OPTIONS .*) )?
-		( ^(IMPORT .*) )?
-		( ^(TOKENS .*) )?
-		(attrScope)*
-		( ^(AMPERSAND .*) )* // skip actions
-		rules
-	;
-
-rules
-	:	(rule | ^(PREC_RULE .*))+
-	;
-
-rule
-	:	^(	RULE id=ID
-			{
-				currentRuleName = $id.text;
-				factory.setCurrentRule(grammar.getLocallyDefinedRule(currentRuleName));
-			}
-			(modifier)?
-			^(ARG (ARG_ACTION)?)
-			^(RET (ARG_ACTION)?)
-			(throwsSpec)?
-			( ^(OPTIONS .*) )?
-			( ruleScopeSpec )?
-			( ^(AMPERSAND .*) )*
-			b=block
-			(exceptionGroup)?
-			EOR
-			{
-				StateCluster g = $b.g;
-				if ($b.start.getSetValue() != null)
-				{
-					// if block comes back as a set not BLOCK, make it
-					// a single ALT block
-					g = factory.build_AlternativeBlockFromSet(g);
-				}
-				if (Rule.getRuleType(currentRuleName) == Grammar.PARSER || grammar.type==Grammar.LEXER)
-				{
-					// attach start node to block for this rule
-					Rule thisR = grammar.getLocallyDefinedRule(currentRuleName);
-					NFAState start = thisR.startState;
-					start.associatedASTNode = $id;
-					start.addTransition(new Transition(Label.EPSILON, g.left));
-
-					// track decision if > 1 alts
-					if ( grammar.getNumberOfAltsForDecisionNFA(g.left)>1 )
-					{
-						g.left.setDescription(grammar.grammarTreeToString($start, false));
-						g.left.setDecisionASTNode($b.start);
-						int d = grammar.assignDecisionNumber( g.left );
-						grammar.setDecisionNFA( d, g.left );
-						grammar.setDecisionBlockAST(d, $b.start);
-					}
-
-					// hook to end of rule node
-					NFAState end = thisR.stopState;
-					g.right.addTransition(new Transition(Label.EPSILON,end));
-				}
-			}
-		)
-	;
-
-modifier
-	:	'protected'
-	|	'public'
-	|	'private'
-	|	'fragment'
-	;
-
-throwsSpec
-	:	^('throws' ID+)
-	;
-
-ruleScopeSpec
-	:	^( 'scope' ( ^(AMPERSAND .*) )* (ACTION)? ( ID )* )
-	;
-
-block returns [StateCluster g = null]
-@init
-{
-	List<StateCluster> alts = new ArrayList<StateCluster>();
-	this.blockLevel++;
-	if ( this.blockLevel==1 )
-		this.outerAltNum=1;
-}
-	:	{grammar.isValidSet(this,$start) &&
-		 !currentRuleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME)}? =>
-		set {$g = $set.g;}
-
-	|	^(	BLOCK ( ^(OPTIONS .*) )?
-			(	a=alternative rewrite
-				{
-					alts.add($a.g);
-				}
-				{{
-					if ( blockLevel == 1 )
-						outerAltNum++;
-				}}
-			)+
-			EOB
-		)
-		{$g = factory.build_AlternativeBlock(alts);}
-	;
-finally { blockLevel--; }
-
-alternative returns [StateCluster g=null]
-	:	^( ALT (e=element {$g = factory.build_AB($g,$e.g);} )+ EOA )
-		{
-			if ($g==null) { // if alt was a list of actions or whatever
-				$g = factory.build_Epsilon();
-			}
-			else {
-				factory.optimizeAlternative($g);
-			}
-		}
-	;
-
-exceptionGroup
-	:	( exceptionHandler )+ (finallyClause)?
-	|	finallyClause
-	;
-
-exceptionHandler
-	:    ^('catch' ARG_ACTION ACTION)
-	;
-
-finallyClause
-	:    ^('finally' ACTION)
-	;
-
-rewrite
-	:	^(	REWRITES
-			(
-				{
-					if ( grammar.getOption("output")==null )
-					{
-						ErrorManager.grammarError(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
-												  grammar, $start.getToken(), currentRuleName);
-					}
-				}
-				^(REWRITE .*)
-			)*
-		)
-	|
-	;
-
-element returns [StateCluster g=null]
-	:   ^(ROOT e=element {$g = $e.g;})
-	|   ^(BANG e=element {$g = $e.g;})
-	|	^(ASSIGN ID e=element {$g = $e.g;})
-	|	^(PLUS_ASSIGN ID e=element {$g = $e.g;})
-	|   ^(RANGE a=atom[null] b=atom[null])
-		{$g = factory.build_Range(grammar.getTokenType($a.text),
-								 grammar.getTokenType($b.text));}
-	|   ^(CHAR_RANGE c1=CHAR_LITERAL c2=CHAR_LITERAL)
-		{
-		if ( grammar.type==Grammar.LEXER ) {
-			$g = factory.build_CharRange($c1.text, $c2.text);
-		}
-		}
-	|   atom_or_notatom {$g = $atom_or_notatom.g;}
-	|   ebnf {$g = $ebnf.g;}
-	|   tree_ {$g = $tree_.g;}
-	|   ^( SYNPRED block )
-	|   ACTION {$g = factory.build_Action($ACTION);}
-	|   FORCED_ACTION {$g = factory.build_Action($FORCED_ACTION);}
-	|   pred=SEMPRED {$g = factory.build_SemanticPredicate($pred);}
-	|   spred=SYN_SEMPRED {$g = factory.build_SemanticPredicate($spred);}
-	|   ^(bpred=BACKTRACK_SEMPRED .*) {$g = factory.build_SemanticPredicate($bpred);}
-	|   gpred=GATED_SEMPRED {$g = factory.build_SemanticPredicate($gpred);}
-	|   EPSILON {$g = factory.build_Epsilon();}
-	;
-
-ebnf returns [StateCluster g=null]
-@init
-{
-	GrammarAST blk = $start;
-	if (blk.getType() != BLOCK) {
-		blk = (GrammarAST)blk.getChild(0);
-	}
-	GrammarAST eob = blk.getLastChild();
-}
-	:	{grammar.isValidSet(this,$start)}? => set {$g = $set.g;}
-
-	|	b=block
-		{
-			// track decision if > 1 alts
-			if ( grammar.getNumberOfAltsForDecisionNFA($b.g.left)>1 )
-			{
-				$b.g.left.setDescription(grammar.grammarTreeToString(blk, false));
-				$b.g.left.setDecisionASTNode(blk);
-				int d = grammar.assignDecisionNumber( $b.g.left );
-				grammar.setDecisionNFA( d, $b.g.left );
-				grammar.setDecisionBlockAST(d, blk);
-			}
-			$g = $b.g;
-		}
-	|	^( OPTIONAL b=block )
-		{
-			StateCluster bg = $b.g;
-			if ( blk.getSetValue()!=null )
-			{
-				// if block comes back SET not BLOCK, make it
-				// a single ALT block
-				bg = factory.build_AlternativeBlockFromSet(bg);
-			}
-			$g = factory.build_Aoptional(bg);
-			$g.left.setDescription(grammar.grammarTreeToString($start, false));
-			// there is always at least one alt even if block has just 1 alt
-			int d = grammar.assignDecisionNumber( $g.left );
-			grammar.setDecisionNFA(d, $g.left);
-			grammar.setDecisionBlockAST(d, blk);
-			$g.left.setDecisionASTNode($start);
-		}
-	|	^( CLOSURE b=block )
-		{
-			StateCluster bg = $b.g;
-			if ( blk.getSetValue()!=null )
-			{
-				bg = factory.build_AlternativeBlockFromSet(bg);
-			}
-			$g = factory.build_Astar(bg);
-			// track the loop back / exit decision point
-			bg.right.setDescription("()* loopback of "+grammar.grammarTreeToString($start, false));
-			int d = grammar.assignDecisionNumber( bg.right );
-			grammar.setDecisionNFA(d, bg.right);
-			grammar.setDecisionBlockAST(d, blk);
-			bg.right.setDecisionASTNode(eob);
-			// make block entry state also have same decision for interpreting grammar
-			NFAState altBlockState = (NFAState)$g.left.transition(0).target;
-			altBlockState.setDecisionASTNode($start);
-			altBlockState.setDecisionNumber(d);
-			$g.left.setDecisionNumber(d); // this is the bypass decision (2 alts)
-			$g.left.setDecisionASTNode($start);
-		}
-	|	^( POSITIVE_CLOSURE b=block )
-		{
-			StateCluster bg = $b.g;
-			if ( blk.getSetValue()!=null )
-			{
-				bg = factory.build_AlternativeBlockFromSet(bg);
-			}
-			$g = factory.build_Aplus(bg);
-			// don't make a decision on left edge, can reuse loop end decision
-			// track the loop back / exit decision point
-			bg.right.setDescription("()+ loopback of "+grammar.grammarTreeToString($start, false));
-			int d = grammar.assignDecisionNumber( bg.right );
-			grammar.setDecisionNFA(d, bg.right);
-			grammar.setDecisionBlockAST(d, blk);
-			bg.right.setDecisionASTNode(eob);
-			// make block entry state also have same decision for interpreting grammar
-			NFAState altBlockState = (NFAState)$g.left.transition(0).target;
-			altBlockState.setDecisionASTNode($start);
-			altBlockState.setDecisionNumber(d);
-		}
-	;
-
-tree_ returns [StateCluster g=null]
-@init
-{
-	StateCluster down=null, up=null;
-}
-	:	^(	TREE_BEGIN
-			e=element { $g = $e.g; }
-			{
-				down = factory.build_Atom(Label.DOWN, $e.start);
-				// TODO set following states for imaginary nodes?
-				//el.followingNFAState = down.right;
-				$g = factory.build_AB($g,down);
-			}
-			( e=element {$g = factory.build_AB($g,$e.g);} )*
-			{
-				up = factory.build_Atom(Label.UP, $e.start);
-				//el.followingNFAState = up.right;
-				$g = factory.build_AB($g,up);
-				// tree roots point at right edge of DOWN for LOOK computation later
-				$start.NFATreeDownState = down.left;
-			}
-		)
-	;
-
-atom_or_notatom returns [StateCluster g=null]
-	:	atom[null] {$g = $atom.g;}
-	|	^(	n=NOT
-			(	c=CHAR_LITERAL (ast1=ast_suffix)?
-				{
-					int ttype=0;
-					if ( grammar.type==Grammar.LEXER )
-					{
-						ttype = Grammar.getCharValueFromGrammarCharLiteral($c.text);
-					}
-					else
-					{
-						ttype = grammar.getTokenType($c.text);
-					}
-					IntSet notAtom = grammar.complement(ttype);
-					if ( notAtom.isNil() )
-					{
-						ErrorManager.grammarError(
-							ErrorManager.MSG_EMPTY_COMPLEMENT,
-							grammar,
-							$c.getToken(),
-							$c.text);
-					}
-					$g=factory.build_Set(notAtom,$n);
-				}
-			|	t=TOKEN_REF (ast3=ast_suffix)?
-				{
-					int ttype=0;
-					IntSet notAtom = null;
-					if ( grammar.type==Grammar.LEXER )
-					{
-						notAtom = grammar.getSetFromRule(this,$t.text);
-						if ( notAtom==null )
-						{
-							ErrorManager.grammarError(
-								ErrorManager.MSG_RULE_INVALID_SET,
-								grammar,
-								$t.getToken(),
-								$t.text);
-						}
-						else
-						{
-							notAtom = grammar.complement(notAtom);
-						}
-					}
-					else
-					{
-						ttype = grammar.getTokenType($t.text);
-						notAtom = grammar.complement(ttype);
-					}
-					if ( notAtom==null || notAtom.isNil() )
-					{
-						ErrorManager.grammarError(
-							ErrorManager.MSG_EMPTY_COMPLEMENT,
-							grammar,
-							$t.getToken(),
-							$t.text);
-					}
-					$g=factory.build_Set(notAtom,$n);
-				}
-			|	set {$g = $set.g;}
-				{
-					GrammarAST stNode = (GrammarAST)$n.getChild(0);
-					//IntSet notSet = grammar.complement(stNode.getSetValue());
-					// let code generator complement the sets
-					IntSet s = stNode.getSetValue();
-					stNode.setSetValue(s);
-					// let code gen do the complement again; here we compute
-					// for NFA construction
-					s = grammar.complement(s);
-					if ( s.isNil() )
-					{
-						ErrorManager.grammarError(
-							ErrorManager.MSG_EMPTY_COMPLEMENT,
-							grammar,
-							$n.getToken());
-					}
-					$g=factory.build_Set(s,$n);
-				}
-			)
-			{$n.followingNFAState = $g.right;}
-		)
-	;
-
-atom[String scopeName] returns [StateCluster g=null]
-	:	^( r=RULE_REF (rarg=ARG_ACTION)? (as1=ast_suffix)? )
-		{
-			NFAState start = grammar.getRuleStartState(scopeName,$r.text);
-			if ( start!=null )
-			{
-				Rule rr = grammar.getRule(scopeName,$r.text);
-				$g = factory.build_RuleRef(rr, start);
-				r.followingNFAState = $g.right;
-				r.NFAStartState = $g.left;
-				if ( $g.left.transition(0) instanceof RuleClosureTransition
-					&& grammar.type!=Grammar.LEXER )
-				{
-					addFollowTransition($r.text, $g.right);
-				}
-				// else rule ref got inlined to a set
-			}
-		}
-
-	|	^( t=TOKEN_REF  (targ=ARG_ACTION)? (as2=ast_suffix)? )
-		{
-			if ( grammar.type==Grammar.LEXER )
-			{
-				NFAState start = grammar.getRuleStartState(scopeName,$t.text);
-				if ( start!=null )
-				{
-					Rule rr = grammar.getRule(scopeName,t.getText());
-					$g = factory.build_RuleRef(rr, start);
-					t.NFAStartState = $g.left;
-					// don't add FOLLOW transitions in the lexer;
-					// only exact context should be used.
-				}
-			}
-			else
-			{
-				$g = factory.build_Atom(t);
-				t.followingNFAState = $g.right;
-			}
-		}
-
-	|	^( c=CHAR_LITERAL  (as3=ast_suffix)? )
-		{
-			if ( grammar.type==Grammar.LEXER )
-			{
-				$g = factory.build_CharLiteralAtom(c);
-			}
-			else
-			{
-				$g = factory.build_Atom(c);
-				c.followingNFAState = $g.right;
-			}
-		}
-
-	|	^( s=STRING_LITERAL  (as4=ast_suffix)? )
-		{
-			if ( grammar.type==Grammar.LEXER )
-			{
-				$g = factory.build_StringLiteralAtom(s);
-			}
-			else
-			{
-				$g = factory.build_Atom(s);
-				s.followingNFAState = $g.right;
-			}
-		}
-
-	|	^(	w=WILDCARD (as5=ast_suffix)? )
-			{
-				if ( nfa.grammar.type == Grammar.TREE_PARSER
-					&& (w.getChildIndex() > 0 || w.getParent().getChild(1).getType() == EOA) )
-				{
-					$g = factory.build_WildcardTree( $w );
-				}
-				else
-				{
-					$g = factory.build_Wildcard( $w );
-				}
-			}
-
-	|	^( DOT scope_=ID a=atom[$scope_.text] {$g = $a.g;} ) // scope override
-	;
-
-ast_suffix
-	:	ROOT
-	|	BANG
-	;
-
-set returns [StateCluster g=null]
-@init
-{
-	IntSet elements=new IntervalSet();
-	if ( state.backtracking == 0 )
-		$start.setSetValue(elements); // track set for use by code gen
-}
-	:	^( b=BLOCK
-		   (^(ALT ( ^(BACKTRACK_SEMPRED .*) )? setElement[elements] EOA))+
-		   EOB
-		 )
-		{
-		$g = factory.build_Set(elements,$b);
-		$b.followingNFAState = $g.right;
-		$b.setSetValue(elements); // track set value of this block
-		}
-		//{System.out.println("set elements="+elements.toString(grammar));}
-	;
-
-setRule returns [IntSet elements=new IntervalSet()]
-@init
-{
-	IntSet s=null;
-}
-	:	^( RULE id=ID (modifier)? ARG RET ( ^(OPTIONS .*) )? ( ruleScopeSpec )?
-			( ^(AMPERSAND .*) )*
-			^( BLOCK ( ^(OPTIONS .*) )?
-			   ( ^(ALT (BACKTRACK_SEMPRED)? setElement[elements] EOA) )+
-			   EOB
-			 )
-			(exceptionGroup)?
-			EOR
-		 )
-	;
-catch[RecognitionException re] { throw re; }
-
-setElement[IntSet elements]
-@init
-{
-	int ttype;
-	IntSet ns=null;
-}
-	:	c=CHAR_LITERAL
-		{
-			if ( grammar.type==Grammar.LEXER )
-			{
-				ttype = Grammar.getCharValueFromGrammarCharLiteral($c.text);
-			}
-			else
-			{
-				ttype = grammar.getTokenType($c.text);
-			}
-			if ( elements.member(ttype) )
-			{
-				ErrorManager.grammarError(
-					ErrorManager.MSG_DUPLICATE_SET_ENTRY,
-					grammar,
-					$c.getToken(),
-					$c.text);
-			}
-			elements.add(ttype);
-		}
-	|	t=TOKEN_REF
-		{
-			if ( grammar.type==Grammar.LEXER )
-			{
-				// recursively will invoke this rule to match elements in target rule ref
-				IntSet ruleSet = grammar.getSetFromRule(this,$t.text);
-				if ( ruleSet==null )
-				{
-					ErrorManager.grammarError(
-						ErrorManager.MSG_RULE_INVALID_SET,
-						grammar,
-						$t.getToken(),
-						$t.text);
-				}
-				else
-				{
-					elements.addAll(ruleSet);
-				}
-			}
-			else
-			{
-				ttype = grammar.getTokenType($t.text);
-				if ( elements.member(ttype) )
-				{
-					ErrorManager.grammarError(
-						ErrorManager.MSG_DUPLICATE_SET_ENTRY,
-						grammar,
-						$t.getToken(),
-						$t.text);
-				}
-				elements.add(ttype);
-			}
-		}
-
-	|	s=STRING_LITERAL
-		{
-			ttype = grammar.getTokenType($s.text);
-			if ( elements.member(ttype) )
-			{
-				ErrorManager.grammarError(
-					ErrorManager.MSG_DUPLICATE_SET_ENTRY,
-					grammar,
-					$s.getToken(),
-					$s.text);
-			}
-			elements.add(ttype);
-		}
-	|	^(CHAR_RANGE c1=CHAR_LITERAL c2=CHAR_LITERAL)
-		{
-			if ( grammar.type==Grammar.LEXER )
-			{
-				int a = Grammar.getCharValueFromGrammarCharLiteral($c1.text);
-				int b = Grammar.getCharValueFromGrammarCharLiteral($c2.text);
-				elements.addAll(IntervalSet.of(a,b));
-			}
-		}
-
-	|	gset=set
-		{
-			Transition setTrans = $gset.g.left.transition(0);
-			elements.addAll(setTrans.label.getSet());
-		}
-
-	|	^(	NOT {ns=new IntervalSet();}
-			setElement[ns]
-			{
-				IntSet not = grammar.complement(ns);
-				elements.addAll(not);
-			}
-		)
-	;
-
-/** Check to see if this block can be a set.  Can't have actions
- *  etc...  Also can't be in a rule with a rewrite as we need
- *  to track what's inside set for use in rewrite.
- *
- *  This should only be called from the helper function in TreeToNFAConverterHelper.cs
- *  and from the rule testSetElement below.
- */
-testBlockAsSet returns [int alts=0]
-options { backtrack = true; }
-@init
-{
-	inTest++;
-}
-	:	^(	BLOCK
-			(	^(ALT (BACKTRACK_SEMPRED)? testSetElement {{$alts += $testSetElement.alts;}} EOA)
-			)+
-			EOB
-		)
-	;
-catch[RecognitionException re] { throw re; }
-finally { inTest--; }
-
-testSetRule returns [int alts=0]
-@init
-{
-	inTest++;
-}
-	:	^(	RULE id=ID (modifier)? ARG RET ( ^(OPTIONS .*) )? ( ruleScopeSpec )?
-			( ^(AMPERSAND .*) )*
-			^(	BLOCK
-				(	^(ALT (BACKTRACK_SEMPRED)? testSetElement {{$alts += $testSetElement.alts;}} EOA)
-				)+
-				EOB
-			)
-			(exceptionGroup)?
-			EOR
-		)
-	;
-catch[RecognitionException re] { throw re; }
-finally { inTest--; }
-
-/** Match just an element; no ast suffix etc.. */
-testSetElement returns [int alts=1]
-	:	c=CHAR_LITERAL {!hasElementOptions($c)}?
-	|	t=TOKEN_REF {!hasElementOptions($t)}?
-		{{
-			if ( grammar.type==Grammar.LEXER )
-			{
-				Rule rule = grammar.getRule($t.text);
-				if ( rule==null )
-				{
-					//throw new RecognitionException("invalid rule");
-					throw new RecognitionException();
-				}
-				// recursively will invoke this rule to match elements in target rule ref
-				$alts += testSetRule(rule.tree);
-			}
-		}}
-	|   {grammar.type!=Grammar.LEXER}? => s=STRING_LITERAL
-	|	^(CHAR_RANGE c1=CHAR_LITERAL c2=CHAR_LITERAL)
-		{{ $alts = IntervalSet.of( Grammar.getCharValueFromGrammarCharLiteral($c1.text), Grammar.getCharValueFromGrammarCharLiteral($c2.text) ).size(); }}
-	|   testBlockAsSet
-		{{ $alts = $testBlockAsSet.alts; }}
-	|   ^( NOT tse=testSetElement )
-		{{ $alts = grammar.getTokenTypes().size() - $tse.alts; }}
-	;
-catch[RecognitionException re] { throw re; }
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/Tool.java b/antlr-3.4/tool/src/main/java/org/antlr/Tool.java
deleted file mode 100644
index b336baf..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/Tool.java
+++ /dev/null
@@ -1,1390 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr;
-
-import org.antlr.analysis.*;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.misc.Graph;
-import org.antlr.runtime.misc.Stats;
-import org.antlr.tool.*;
-import org.stringtemplate.v4.STGroup;
-
-import java.io.*;
-import java.util.*;
-
-/** The main ANTLR entry point.  Read a grammar and generate a parser. */
-public class Tool {
-
-    public final Properties antlrSettings = new Properties();
-    public String VERSION = "3.4";
-    //public static final String VERSION = "${project.version}";
-    public static final String UNINITIALIZED_DIR = "<unset-dir>";
-    private List<String> grammarFileNames = new ArrayList<String>();
-    private boolean generate_NFA_dot = false;
-    private boolean generate_DFA_dot = false;
-    private String outputDirectory = ".";
-    private boolean haveOutputDir = false;
-    private String inputDirectory = null;
-    private String parentGrammarDirectory;
-    private String grammarOutputDirectory;
-    private boolean haveInputDir = false;
-    private String libDirectory = ".";
-    private boolean debug = false;
-    private boolean trace = false;
-    private boolean profile = false;
-    private boolean report = false;
-    private boolean printGrammar = false;
-    private boolean depend = false;
-    private boolean forceAllFilesToOutputDir = false;
-    private boolean forceRelativeOutput = false;
-    protected boolean deleteTempLexer = true;
-    private boolean verbose = false;
-    /** Don't process grammar file if generated files are newer than grammar */
-    private boolean make = false;
-    private boolean showBanner = true;
-	private static boolean exitNow = false;
-	private static boolean return_dont_exit = false;
-
-
-	public String forcedLanguageOption; // -language L on command line
-
-    // The internal options are for my use on the command line during dev
-    //
-    public static boolean internalOption_PrintGrammarTree = false;
-    public static boolean internalOption_PrintDFA = false;
-    public static boolean internalOption_ShowNFAConfigsInDFA = false;
-    public static boolean internalOption_watchNFAConversion = false;
-
-    /**
-     * A list of dependency generators that are accumulated aaaas (and if) the
-     * tool is required to sort the provided grammars into build dependency order.
-    protected Map<String, BuildDependencyGenerator> buildDependencyGenerators;
-     */
-
-    public static void main(String[] args) {
-        Tool antlr = new Tool(args);
-
-        if (!exitNow) {
-            antlr.process();
-			if ( return_dont_exit ) return;
-            if (ErrorManager.getNumErrors() > 0) {
-                System.exit(1);
-            }
-            System.exit(0);
-        }
-    }
-
-    /**
-     * Load the properties file org/antlr/antlr.properties and populate any
-     * variables that must be initialized from it, such as the version of ANTLR.
-     */
-    private void loadResources() {
-        InputStream in = null;
-        in = this.getClass().getResourceAsStream("antlr.properties");
-
-        // If we found the resource, then load it, otherwise revert to the
-        // defaults.
-        //
-        if (in != null) {
-            try {
-                // Load the resources into the map
-                //
-                antlrSettings.load(in);
-
-                // Set any variables that we need to populate from the resources
-                //
-//                VERSION = antlrSettings.getProperty("antlr.version");
-            } catch (Exception e) {
-                // Do nothing, just leave the defaults in place
-            }
-        }
-    }
-
-    public Tool() {
-        loadResources();
-    }
-
-    public Tool(String[] args) {
-        loadResources();
-
-        // Set all the options and pick up all the named grammar files
-        processArgs(args);
-    }
-
-    public void processArgs(String[] args) {
-
-        if (isVerbose()) {
-            ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
-            showBanner = false;
-        }
-
-        if (args == null || args.length == 0) {
-            help();
-            return;
-        }
-        for (int i = 0; i < args.length; i++) {
-            if (args[i].equals("-o") || args[i].equals("-fo")) {
-                if (i + 1 >= args.length) {
-                    System.err.println("missing output directory with -fo/-o option; ignoring");
-                }
-                else {
-                    if (args[i].equals("-fo")) { // force output into dir
-                        setForceAllFilesToOutputDir(true);
-                    }
-                    i++;
-                    outputDirectory = args[i];
-                    if (outputDirectory.endsWith("/") ||
-                        outputDirectory.endsWith("\\")) {
-                        outputDirectory =
-                            outputDirectory.substring(0, getOutputDirectory().length() - 1);
-                    }
-                    File outDir = new File(outputDirectory);
-                    haveOutputDir = true;
-                    if (outDir.exists() && !outDir.isDirectory()) {
-                        ErrorManager.error(ErrorManager.MSG_OUTPUT_DIR_IS_FILE, outputDirectory);
-                        setLibDirectory(".");
-                    }
-                }
-            }
-			else if (args[i].equals("-lib")) {
-				if (i + 1 >= args.length) {
-					System.err.println("missing library directory with -lib option; ignoring");
-				}
-				else {
-					i++;
-					setLibDirectory(args[i]);
-					if (getLibraryDirectory().endsWith("/") ||
-						getLibraryDirectory().endsWith("\\")) {
-						setLibDirectory(getLibraryDirectory().substring(0, getLibraryDirectory().length() - 1));
-					}
-					File outDir = new File(getLibraryDirectory());
-					if (!outDir.exists()) {
-						ErrorManager.error(ErrorManager.MSG_DIR_NOT_FOUND, getLibraryDirectory());
-						setLibDirectory(".");
-					}
-				}
-			}
-			else if (args[i].equals("-language")) {
-				if (i + 1 >= args.length) {
-					System.err.println("missing language name; ignoring");
-				}
-				else {
-					i++;
-					forcedLanguageOption = args[i];
-				}
-			}
-            else if (args[i].equals("-nfa")) {
-                setGenerate_NFA_dot(true);
-            }
-            else if (args[i].equals("-dfa")) {
-                setGenerate_DFA_dot(true);
-            }
-            else if (args[i].equals("-debug")) {
-                setDebug(true);
-            }
-            else if (args[i].equals("-trace")) {
-                setTrace(true);
-            }
-            else if (args[i].equals("-report")) {
-                setReport(true);
-            }
-            else if (args[i].equals("-profile")) {
-                setProfile(true);
-            }
-            else if (args[i].equals("-print")) {
-                setPrintGrammar(true);
-            }
-            else if (args[i].equals("-depend")) {
-                setDepend(true);
-            }
-            else if (args[i].equals("-verbose")) {
-                setVerbose(true);
-            }
-            else if (args[i].equals("-version")) {
-                version();
-                exitNow = true;
-            }
-            else if (args[i].equals("-make")) {
-                setMake(true);
-            }
-            else if (args[i].equals("-message-format")) {
-                if (i + 1 >= args.length) {
-                    System.err.println("missing output format with -message-format option; using default");
-                }
-                else {
-                    i++;
-                    ErrorManager.setFormat(args[i]);
-                }
-            }
-            else if (args[i].equals("-Xgrtree")) {
-                internalOption_PrintGrammarTree = true; // print grammar tree
-            }
-            else if (args[i].equals("-Xdfa")) {
-                internalOption_PrintDFA = true;
-            }
-            else if (args[i].equals("-Xnoprune")) {
-                DFAOptimizer.PRUNE_EBNF_EXIT_BRANCHES = false;
-            }
-            else if (args[i].equals("-Xnocollapse")) {
-                DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES = false;
-            }
-            else if (args[i].equals("-Xdbgconversion")) {
-                NFAToDFAConverter.debug = true;
-            }
-            else if (args[i].equals("-Xmultithreaded")) {
-                NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION = false;
-            }
-            else if (args[i].equals("-Xnomergestopstates")) {
-                DFAOptimizer.MERGE_STOP_STATES = false;
-            }
-            else if (args[i].equals("-Xdfaverbose")) {
-                internalOption_ShowNFAConfigsInDFA = true;
-            }
-            else if (args[i].equals("-Xwatchconversion")) {
-                internalOption_watchNFAConversion = true;
-            }
-            else if (args[i].equals("-XdbgST")) {
-                CodeGenerator.LAUNCH_ST_INSPECTOR = true;
-				STGroup.trackCreationEvents = true;
-				return_dont_exit = true;
-            }
-            else if (args[i].equals("-Xmaxinlinedfastates")) {
-                if (i + 1 >= args.length) {
-                    System.err.println("missing max inline dfa states -Xmaxinlinedfastates option; ignoring");
-                }
-                else {
-                    i++;
-                    CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE = Integer.parseInt(args[i]);
-                }
-            }
-            else if (args[i].equals("-Xmaxswitchcaselabels")) {
-                if (i + 1 >= args.length) {
-                    System.err.println("missing max switch case labels -Xmaxswitchcaselabels option; ignoring");
-                }
-                else {
-                    i++;
-                    CodeGenerator.MAX_SWITCH_CASE_LABELS = Integer.parseInt(args[i]);
-                }
-            }
-            else if (args[i].equals("-Xminswitchalts")) {
-                if (i + 1 >= args.length) {
-                    System.err.println("missing min switch alternatives -Xminswitchalts option; ignoring");
-                }
-                else {
-                    i++;
-                    CodeGenerator.MIN_SWITCH_ALTS = Integer.parseInt(args[i]);
-                }
-            }
-            else if (args[i].equals("-Xm")) {
-                if (i + 1 >= args.length) {
-                    System.err.println("missing max recursion with -Xm option; ignoring");
-                }
-                else {
-                    i++;
-                    NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = Integer.parseInt(args[i]);
-                }
-            }
-            else if (args[i].equals("-Xmaxdfaedges")) {
-                if (i + 1 >= args.length) {
-                    System.err.println("missing max number of edges with -Xmaxdfaedges option; ignoring");
-                }
-                else {
-                    i++;
-                    DFA.MAX_STATE_TRANSITIONS_FOR_TABLE = Integer.parseInt(args[i]);
-                }
-            }
-            else if (args[i].equals("-Xconversiontimeout")) {
-                if (i + 1 >= args.length) {
-                    System.err.println("missing max time in ms -Xconversiontimeout option; ignoring");
-                }
-                else {
-                    i++;
-                    DFA.MAX_TIME_PER_DFA_CREATION = Integer.parseInt(args[i]);
-                }
-            }
-			else if (args[i].equals("-Xnfastates")) {
-				DecisionProbe.verbose = true;
-			}
-			else if (args[i].equals("-Xsavelexer")) {
-				deleteTempLexer = false;
-			}
-            else if (args[i].equals("-X")) {
-                Xhelp();
-            }
-            else {
-                if (args[i].charAt(0) != '-') {
-                    // Must be the grammar file
-                    addGrammarFile(args[i]);
-                }
-            }
-        }
-    }
-
-    /*
-    protected void checkForInvalidArguments(String[] args, BitSet cmdLineArgValid) {
-    // check for invalid command line args
-    for (int a = 0; a < args.length; a++) {
-    if (!cmdLineArgValid.member(a)) {
-    System.err.println("invalid command-line argument: " + args[a] + "; ignored");
-    }
-    }
-    }
-     */
-
-    /**
-     * Checks to see if the list of outputFiles all exist, and have
-     * last-modified timestamps which are later than the last-modified
-     * timestamp of all the grammar files involved in build the output
-     * (imports must be checked). If these conditions hold, the method
-     * returns false, otherwise, it returns true.
-     *
-     * @param grammarFileName The grammar file we are checking
-     */
-    public boolean buildRequired(String grammarFileName)
-        throws IOException
-    {
-        BuildDependencyGenerator bd =
-            new BuildDependencyGenerator(this, grammarFileName);
-
-        List<File> outputFiles = bd.getGeneratedFileList();
-        List<File> inputFiles = bd.getDependenciesFileList();
-        // Note that input directory must be set to use buildRequired
-        File grammarFile;
-        if (haveInputDir) {
-            grammarFile = new File(inputDirectory, grammarFileName);
-        }
-        else {
-            grammarFile = new File(grammarFileName);
-        }
-        long grammarLastModified = grammarFile.lastModified();
-        for (File outputFile : outputFiles) {
-            if (!outputFile.exists() || grammarLastModified > outputFile.lastModified()) {
-                // One of the output files does not exist or is out of date, so we must build it
-                return true;
-            }
-            // Check all of the imported grammars and see if any of these are younger
-            // than any of the output files.
-            if (inputFiles != null) {
-                for (File inputFile : inputFiles) {
-
-                    if (inputFile.lastModified() > outputFile.lastModified()) {
-                        // One of the imported grammar files has been updated so we must build
-                        return true;
-                    }
-                }
-            }
-        }
-        if (isVerbose()) {
-            System.out.println("Grammar " + grammarFile + " is up to date - build skipped");
-        }
-        return false;
-    }
-
-    public void process() {
-        boolean exceptionWhenWritingLexerFile = false;
-        String lexerGrammarFileName = null;		// necessary at this scope to have access in the catch below
-
-        // Have to be tricky here when Maven or build tools call in and must new Tool()
-        // before setting options. The banner won't display that way!
-        if (isVerbose() && showBanner) {
-            ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
-            showBanner = false;
-        }
-
-        try {
-            sortGrammarFiles(); // update grammarFileNames
-        }
-        catch (Exception e) {
-            ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR,e);
-        }
-        catch (Error e) {
-            ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, e);
-        }
-
-        for (String grammarFileName : grammarFileNames) {
-            // If we are in make mode (to support build tools like Maven) and the
-            // file is already up to date, then we do not build it (and in verbose mode
-            // we will say so).
-            if (make) {
-                try {
-                    if ( !buildRequired(grammarFileName) ) continue;
-                }
-                catch (Exception e) {
-                    ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR,e);
-                }
-            }
-
-            if (isVerbose() && !isDepend()) {
-                System.out.println(grammarFileName);
-            }
-            try {
-                if (isDepend()) {
-                    BuildDependencyGenerator dep =
-                        new BuildDependencyGenerator(this, grammarFileName);
-                    /*
-                    List outputFiles = dep.getGeneratedFileList();
-                    List dependents = dep.getDependenciesFileList();
-                    System.out.println("output: "+outputFiles);
-                    System.out.println("dependents: "+dependents);
-                     */
-                    System.out.println(dep.getDependencies().render());
-                    continue;
-                }
-
-                Grammar rootGrammar = getRootGrammar(grammarFileName);
-                // we now have all grammars read in as ASTs
-                // (i.e., root and all delegates)
-				rootGrammar.composite.assignTokenTypes();
-				//rootGrammar.composite.translateLeftRecursiveRules();
-				rootGrammar.addRulesForSyntacticPredicates();
-				rootGrammar.composite.defineGrammarSymbols();
-                rootGrammar.composite.createNFAs();
-
-                generateRecognizer(rootGrammar);
-
-                if (isPrintGrammar()) {
-                    rootGrammar.printGrammar(System.out);
-                }
-
-                if (isReport()) {
-					GrammarReport2 greport = new GrammarReport2(rootGrammar);
-					System.out.print(greport.toString());
-//                    GrammarReport greport = new GrammarReport(rootGrammar);
-//                    System.out.println(greport.toString());
-//                    // print out a backtracking report too (that is not encoded into log)
-//                    System.out.println(greport.getBacktrackingReport());
-                }
-                if (isProfile()) {
-                    GrammarReport greport = new GrammarReport(rootGrammar);
-                    Stats.writeReport(GrammarReport.GRAMMAR_STATS_FILENAME,
-                                      greport.toNotifyString());
-                }
-
-                // now handle the lexer if one was created for a merged spec
-                String lexerGrammarStr = rootGrammar.getLexerGrammar();
-                //System.out.println("lexer rootGrammar:\n"+lexerGrammarStr);
-                if (rootGrammar.type == Grammar.COMBINED && lexerGrammarStr != null) {
-                    lexerGrammarFileName = rootGrammar.getImplicitlyGeneratedLexerFileName();
-                    try {
-                        Writer w = getOutputFile(rootGrammar, lexerGrammarFileName);
-                        w.write(lexerGrammarStr);
-                        w.close();
-                    }
-                    catch (IOException e) {
-                        // emit different error message when creating the implicit lexer fails
-                        // due to write permission error
-                        exceptionWhenWritingLexerFile = true;
-                        throw e;
-                    }
-                    try {
-                        StringReader sr = new StringReader(lexerGrammarStr);
-                        Grammar lexerGrammar = new Grammar(this);
-                        lexerGrammar.composite.watchNFAConversion = internalOption_watchNFAConversion;
-                        lexerGrammar.implicitLexer = true;
-                        //lexerGrammar.setTool(this);
-                        File lexerGrammarFullFile =
-                            new File(getFileDirectory(lexerGrammarFileName), lexerGrammarFileName);
-                        lexerGrammar.setFileName(lexerGrammarFullFile.toString());
-
-                        lexerGrammar.importTokenVocabulary(rootGrammar);
-                        lexerGrammar.parseAndBuildAST(sr);
-
-                        sr.close();
-
-                        lexerGrammar.composite.assignTokenTypes();
-						lexerGrammar.addRulesForSyntacticPredicates();
-                        lexerGrammar.composite.defineGrammarSymbols();
-                        lexerGrammar.composite.createNFAs();
-
-                        generateRecognizer(lexerGrammar);
-                    }
-                    finally {
-                        // make sure we clean up
-                        if (deleteTempLexer) {
-                            File outputDir = getOutputDirectory(lexerGrammarFileName);
-                            File outputFile = new File(outputDir, lexerGrammarFileName);
-                            outputFile.delete();
-                        }
-                    }
-                }
-            }
-            catch (IOException e) {
-                if (exceptionWhenWritingLexerFile) {
-                    ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, e);
-                }
-                else {
-                    ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE,
-                                       grammarFileName);
-                }
-            }
-            catch (Exception e) {
-                ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, grammarFileName, e);
-            }
-            /*
-           finally {
-           System.out.println("creates="+ Interval.creates);
-           System.out.println("hits="+ Interval.hits);
-           System.out.println("misses="+ Interval.misses);
-           System.out.println("outOfRange="+ Interval.outOfRange);
-           }
-            */
-        }
-    }
-
-    public void sortGrammarFiles() throws IOException {
-        //System.out.println("Grammar names "+getGrammarFileNames());
-        Graph g = new Graph();
-        List<String> missingFiles = new ArrayList<String>();
-        for (String gfile : grammarFileNames) {
-            try {
-                GrammarSpelunker grammar = new GrammarSpelunker(inputDirectory, gfile);
-                grammar.parse();
-                String vocabName = grammar.getTokenVocab();
-                String grammarName = grammar.getGrammarName();
-                // Make all grammars depend on any tokenVocab options
-                if ( vocabName!=null ) g.addEdge(gfile, vocabName+CodeGenerator.VOCAB_FILE_EXTENSION);
-                // Make all generated tokens files depend on their grammars
-                g.addEdge(grammarName+CodeGenerator.VOCAB_FILE_EXTENSION, gfile);
-            }
-            catch (FileNotFoundException fnfe) {
-                ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE, gfile);
-                missingFiles.add(gfile);
-            }
-        }
-        List<Object> sorted = g.sort();
-        //System.out.println("sorted="+sorted);
-        grammarFileNames.clear(); // wipe so we can give new ordered list
-        for (int i = 0; i < sorted.size(); i++) {
-            String f = (String)sorted.get(i);
-            if ( missingFiles.contains(f) ) continue;
-            if ( !(f.endsWith(".g") || f.endsWith(".g3")) ) continue;
-            grammarFileNames.add(f);
-        }
-        //System.out.println("new grammars="+grammarFileNames);
-    }
-
-    /** Get a grammar mentioned on the command-line and any delegates */
-    public Grammar getRootGrammar(String grammarFileName)
-        throws IOException
-    {
-        //ST.setLintMode(true);
-        // grammars mentioned on command line are either roots or single grammars.
-        // create the necessary composite in case it's got delegates; even
-        // single grammar needs it to get token types.
-        CompositeGrammar composite = new CompositeGrammar();
-        Grammar grammar = new Grammar(this, grammarFileName, composite);
-        composite.setDelegationRoot(grammar);
-        FileReader fr = null;
-        File f = null;
-
-        if (haveInputDir) {
-            f = new File(inputDirectory, grammarFileName);
-        }
-        else {
-            f = new File(grammarFileName);
-        }
-
-        // Store the location of this grammar as if we import files, we can then
-        // search for imports in the same location as the original grammar as well as in
-        // the lib directory.
-        //
-        parentGrammarDirectory = f.getParent();
-
-        if (grammarFileName.lastIndexOf(File.separatorChar) == -1) {
-            grammarOutputDirectory = ".";
-        }
-        else {
-            grammarOutputDirectory = grammarFileName.substring(0, grammarFileName.lastIndexOf(File.separatorChar));
-        }
-        fr = new FileReader(f);
-        BufferedReader br = new BufferedReader(fr);
-        grammar.parseAndBuildAST(br);
-        composite.watchNFAConversion = internalOption_watchNFAConversion;
-        br.close();
-        fr.close();
-        return grammar;
-    }
-
-    /** Create NFA, DFA and generate code for grammar.
-     *  Create NFA for any delegates first.  Once all NFA are created,
-     *  it's ok to create DFA, which must check for left-recursion.  That check
-     *  is done by walking the full NFA, which therefore must be complete.
-     *  After all NFA, comes DFA conversion for root grammar then code gen for
-     *  root grammar.  DFA and code gen for delegates comes next.
-     */
-    protected void generateRecognizer(Grammar grammar) {
-        String language = (String) grammar.getOption("language");
-        if (language != null) {
-            CodeGenerator generator = new CodeGenerator(this, grammar, language);
-            grammar.setCodeGenerator(generator);
-            generator.setDebug(isDebug());
-            generator.setProfile(isProfile());
-            generator.setTrace(isTrace());
-
-            // generate NFA early in case of crash later (for debugging)
-            if (isGenerate_NFA_dot()) {
-                generateNFAs(grammar);
-            }
-
-            // GENERATE CODE
-            generator.genRecognizer();
-
-            if (isGenerate_DFA_dot()) {
-                generateDFAs(grammar);
-            }
-
-            List<Grammar> delegates = grammar.getDirectDelegates();
-            for (int i = 0; delegates != null && i < delegates.size(); i++) {
-                Grammar delegate = (Grammar) delegates.get(i);
-                if (delegate != grammar) { // already processing this one
-                    generateRecognizer(delegate);
-                }
-            }
-        }
-    }
-
-    public void generateDFAs(Grammar g) {
-        for (int d = 1; d <= g.getNumberOfDecisions(); d++) {
-            DFA dfa = g.getLookaheadDFA(d);
-            if (dfa == null) {
-                continue; // not there for some reason, ignore
-            }
-            DOTGenerator dotGenerator = new DOTGenerator(g);
-            String dot = dotGenerator.getDOT(dfa.startState);
-            String dotFileName = g.name + "." + "dec-" + d;
-            if (g.implicitLexer) {
-                dotFileName = g.name + Grammar.grammarTypeToFileNameSuffix[g.type] + "." + "dec-" + d;
-            }
-            try {
-                writeDOTFile(g, dotFileName, dot);
-            } catch (IOException ioe) {
-                ErrorManager.error(ErrorManager.MSG_CANNOT_GEN_DOT_FILE,
-                                   dotFileName,
-                                   ioe);
-            }
-        }
-    }
-
-    protected void generateNFAs(Grammar g) {
-        DOTGenerator dotGenerator = new DOTGenerator(g);
-        Collection rules = g.getAllImportedRules();
-        rules.addAll(g.getRules());
-
-        for (Iterator itr = rules.iterator(); itr.hasNext();) {
-            Rule r = (Rule) itr.next();
-            try {
-                String dot = dotGenerator.getDOT(r.startState);
-                if (dot != null) {
-                    writeDOTFile(g, r, dot);
-                }
-            } catch (IOException ioe) {
-                ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, ioe);
-            }
-        }
-    }
-
-    protected void writeDOTFile(Grammar g, Rule r, String dot) throws IOException {
-        writeDOTFile(g, r.grammar.name + "." + r.name, dot);
-    }
-
-    protected void writeDOTFile(Grammar g, String name, String dot) throws IOException {
-        Writer fw = getOutputFile(g, name + ".dot");
-        fw.write(dot);
-        fw.close();
-    }
-
-    private static void version() {
-        ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
-    }
-
-    private static void help() {
-        ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
-        System.err.println("usage: java org.antlr.Tool [args] file.g [file2.g file3.g ...]");
-        System.err.println("  -o outputDir          specify output directory where all output is generated");
-        System.err.println("  -fo outputDir         same as -o but force even files with relative paths to dir");
-        System.err.println("  -lib dir              specify location of token files");
-        System.err.println("  -depend               generate file dependencies");
-        System.err.println("  -report               print out a report about the grammar(s) processed");
-        System.err.println("  -print                print out the grammar without actions");
-        System.err.println("  -debug                generate a parser that emits debugging events");
-		System.err.println("  -profile              generate a parser that computes profiling information");
-		System.err.println("  -trace                generate a recognizer that traces rule entry/exit");
-        System.err.println("  -nfa                  generate an NFA for each rule");
-        System.err.println("  -dfa                  generate a DFA for each decision point");
-        System.err.println("  -message-format name  specify output style for messages");
-        System.err.println("  -verbose              generate ANTLR version and other information");
-        System.err.println("  -make                 only build if generated files older than grammar");
-		System.err.println("  -version              print the version of ANTLR and exit.");
-		System.err.println("  -language L           override language grammar option; generate L");
-        System.err.println("  -X                    display extended argument list");
-    }
-
-    private static void Xhelp() {
-        ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
-        System.err.println("  -Xgrtree                print the grammar AST");
-        System.err.println("  -Xdfa                   print DFA as text ");
-        System.err.println("  -Xnoprune               test lookahead against EBNF block exit branches");
-        System.err.println("  -Xnocollapse            collapse incident edges into DFA states");
-		System.err.println("  -Xdbgconversion         dump lots of info during NFA conversion");
-		System.err.println("  -Xconversiontimeout     use to restrict NFA conversion exponentiality");
-        System.err.println("  -Xmultithreaded         run the analysis in 2 threads");
-        System.err.println("  -Xnomergestopstates     do not merge stop states");
-        System.err.println("  -Xdfaverbose            generate DFA states in DOT with NFA configs");
-        System.err.println("  -Xwatchconversion       print a message for each NFA before converting");
-        System.err.println("  -XdbgST                 put tags at start/stop of all templates in output");
-        System.err.println("  -Xnfastates             for nondeterminisms, list NFA states for each path");
-        System.err.println("  -Xm m                   max number of rule invocations during conversion           [" + NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK + "]");
-        System.err.println("  -Xmaxdfaedges m         max \"comfortable\" number of edges for single DFA state     [" + DFA.MAX_STATE_TRANSITIONS_FOR_TABLE + "]");
-        System.err.println("  -Xmaxinlinedfastates m  max DFA states before table used rather than inlining      [" + CodeGenerator.MADSI_DEFAULT +"]");
-        System.err.println("  -Xmaxswitchcaselabels m don't generate switch() statements for dfas bigger  than m [" + CodeGenerator.MSCL_DEFAULT +"]");
-		System.err.println("  -Xminswitchalts m       don't generate switch() statements for dfas smaller than m [" + CodeGenerator.MSA_DEFAULT + "]");
-		System.err.println("  -Xsavelexer             don't delete temporary lexers generated from combined grammars");
-    }
-
-    /**
-     * Set the threshold of case labels beyond which ANTLR will not instruct the target template
-     * to generate switch() { case xxx: ...
-     *
-     * @param maxSwitchCaseLabels Maximum number of case lables that ANTLR should allow the target code
-     */
-    public void setMaxSwitchCaseLabels(int maxSwitchCaseLabels) {
-        CodeGenerator.MAX_SWITCH_CASE_LABELS = maxSwitchCaseLabels;
-    }
-
-    /**
-     * Set the threshold of the number alts, below which ANTLR will not instruct the target
-     * template to use a switch statement.
-     *
-     * @param minSwitchAlts the minimum number of alts required to use a switch staement
-     */
-    public void setMinSwitchAlts(int minSwitchAlts) {
-        CodeGenerator.MIN_SWITCH_ALTS = minSwitchAlts;
-    }
-
-    /**
-     * Set the location (base directory) where output files should be produced
-     * by the ANTLR tool.
-     * @param outputDirectory
-     */
-    public void setOutputDirectory(String outputDirectory) {
-        haveOutputDir = true;
-        this.outputDirectory = outputDirectory;
-    }
-
-    /**
-     * Used by build tools to force the output files to always be
-     * relative to the base output directory, even though the tool
-     * had to set the output directory to an absolute path as it
-     * cannot rely on the workign directory like command line invocation
-     * can.
-     *
-     * @param forceRelativeOutput true if output files hould always be relative to base output directory
-     */
-    public void setForceRelativeOutput(boolean forceRelativeOutput) {
-        this.forceRelativeOutput = forceRelativeOutput;
-    }
-
-    /**
-     * Set the base location of input files. Normally (when the tool is
-     * invoked from the command line), the inputDirectory is not set, but
-     * for build tools such as Maven, we need to be able to locate the input
-     * files relative to the base, as the working directory could be anywhere and
-     * changing workig directories is not a valid concept for JVMs because of threading and
-     * so on. Setting the directory just means that the getFileDirectory() method will
-     * try to open files relative to this input directory.
-     *
-     * @param inputDirectory Input source base directory
-     */
-    public void setInputDirectory(String inputDirectory) {
-        this.inputDirectory = inputDirectory;
-        haveInputDir = true;
-    }
-
-    /** This method is used by all code generators to create new output
-     *  files. If the outputDir set by -o is not present it will be created.
-     *  The final filename is sensitive to the output directory and
-     *  the directory where the grammar file was found.  If -o is /tmp
-     *  and the original grammar file was foo/t.g then output files
-     *  go in /tmp/foo.
-     *
-     *  The output dir -o spec takes precedence if it's absolute.
-     *  E.g., if the grammar file dir is absolute the output dir is given
-     *  precendence. "-o /tmp /usr/lib/t.g" results in "/tmp/T.java" as
-     *  output (assuming t.g holds T.java).
-     *
-     *  If no -o is specified, then just write to the directory where the
-     *  grammar file was found.
-     *
-     *  If outputDirectory==null then write a String.
-     */
-    public Writer getOutputFile(Grammar g, String fileName) throws IOException {
-        if (getOutputDirectory() == null) {
-            return new StringWriter();
-        }
-        // output directory is a function of where the grammar file lives
-        // for subdir/T.g, you get subdir here.  Well, depends on -o etc...
-        // But, if this is a .tokens file, then we force the output to
-        // be the base output directory (or current directory if there is not a -o)
-        //
-        File outputDir;
-        if (fileName.endsWith(CodeGenerator.VOCAB_FILE_EXTENSION)) {
-            if (haveOutputDir) {
-                outputDir = new File(getOutputDirectory());
-            }
-            else {
-                outputDir = new File(".");
-            }
-        }
-        else {
-            outputDir = getOutputDirectory(g.getFileName());
-        }
-        File outputFile = new File(outputDir, fileName);
-
-        if (!outputDir.exists()) {
-            outputDir.mkdirs();
-        }
-        FileWriter fw = new FileWriter(outputFile);
-        return new BufferedWriter(fw);
-    }
-
-    /**
-     * Return the location where ANTLR will generate output files for a given file. This is a
-     * base directory and output files will be relative to here in some cases
-     * such as when -o option is used and input files are given relative
-     * to the input directory.
-     *
-     * @param fileNameWithPath path to input source
-     * @return
-     */
-    public File getOutputDirectory(String fileNameWithPath) {
-
-        File outputDir = new File(getOutputDirectory());
-        String fileDirectory;
-
-        // Some files are given to us without a PATH but should should
-        // still be written to the output directory in the relative path of
-        // the output directory. The file directory is either the set of sub directories
-        // or just or the relative path recorded for the parent grammar. This means
-        // that when we write the tokens files, or the .java files for imported grammars
-        // taht we will write them in the correct place.
-        //
-        if (fileNameWithPath.lastIndexOf(File.separatorChar) == -1) {
-
-            // No path is included in the file name, so make the file
-            // directory the same as the parent grammar (which might sitll be just ""
-            // but when it is not, we will write the file in the correct place.
-            //
-            fileDirectory = grammarOutputDirectory;
-
-        }
-        else {
-            fileDirectory = fileNameWithPath.substring(0, fileNameWithPath.lastIndexOf(File.separatorChar));
-        }
-        if (haveOutputDir) {
-            // -o /tmp /var/lib/t.g => /tmp/T.java
-            // -o subdir/output /usr/lib/t.g => subdir/output/T.java
-            // -o . /usr/lib/t.g => ./T.java
-            if ((fileDirectory != null && !forceRelativeOutput) &&
-                (new File(fileDirectory).isAbsolute() ||
-                 fileDirectory.startsWith("~")) || // isAbsolute doesn't count this :(
-                isForceAllFilesToOutputDir()) {
-                // somebody set the dir, it takes precendence; write new file there
-                outputDir = new File(getOutputDirectory());
-            }
-            else {
-                // -o /tmp subdir/t.g => /tmp/subdir/t.g
-                if (fileDirectory != null) {
-                    outputDir = new File(getOutputDirectory(), fileDirectory);
-                }
-                else {
-                    outputDir = new File(getOutputDirectory());
-                }
-            }
-        }
-        else {
-            // they didn't specify a -o dir so just write to location
-            // where grammar is, absolute or relative, this will only happen
-            // with command line invocation as build tools will always
-            // supply an output directory.
-            //
-            outputDir = new File(fileDirectory);
-        }
-        return outputDir;
-    }
-
-    /**
-     * Name a file from the -lib dir.  Imported grammars and .tokens files
-     *
-     * If we do not locate the file in the library directory, then we try
-     * the location of the originating grammar.
-     *
-     * @param fileName input name we are looking for
-     * @return Path to file that we think shuold be the import file
-     *
-     * @throws java.io.IOException
-     */
-    public String getLibraryFile(String fileName) throws IOException {
-
-        // First, see if we can find the file in the library directory
-        //
-        File f = new File(getLibraryDirectory() + File.separator + fileName);
-
-        if (f.exists()) {
-
-            // Found in the library directory
-            //
-            return f.getAbsolutePath();
-        }
-
-        // Need to assume it is in the same location as the input file. Note that
-        // this is only relevant for external build tools and when the input grammar
-        // was specified relative to the source directory (working directory if using
-        // the command line.
-        //
-        return parentGrammarDirectory + File.separator + fileName;
-    }
-
-    /** Return the directory containing the grammar file for this grammar.
-     *  normally this is a relative path from current directory.  People will
-     *  often do "java org.antlr.Tool grammars/*.g3"  So the file will be
-     *  "grammars/foo.g3" etc...  This method returns "grammars".
-     *
-     *  If we have been given a specific input directory as a base, then
-     *  we must find the directory relative to this directory, unless the
-     *  file name is given to us in absolute terms.
-     */
-    public String getFileDirectory(String fileName) {
-
-        File f;
-        if (haveInputDir && !fileName.startsWith(File.separator)) {
-            f = new File(inputDirectory, fileName);
-        }
-        else {
-            f = new File(fileName);
-        }
-        // And ask Java what the base directory of this location is
-        //
-        return f.getParent();
-    }
-
-    /** Return a File descriptor for vocab file.  Look in library or
-     *  in -o output path.  antlr -o foo T.g U.g where U needs T.tokens
-     *  won't work unless we look in foo too. If we do not find the
-     *  file in the lib directory then must assume that the .tokens file
-     *  is going to be generated as part of this build and we have defined
-     *  .tokens files so that they ALWAYS are generated in the base output
-     *  directory, which means the current directory for the command line tool if there
-     *  was no output directory specified.
-     */
-    public File getImportedVocabFile(String vocabName) {
-
-        File f = new File(getLibraryDirectory(),
-                          File.separator +
-                          vocabName +
-                          CodeGenerator.VOCAB_FILE_EXTENSION);
-        if (f.exists()) {
-            return f;
-        }
-
-        // We did not find the vocab file in the lib directory, so we need
-        // to look for it in the output directory which is where .tokens
-        // files are generated (in the base, not relative to the input
-        // location.)
-        //
-        if (haveOutputDir) {
-            f = new File(getOutputDirectory(), vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
-        }
-        else {
-            f = new File(vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
-        }
-        return f;
-    }
-
-    /** If the tool needs to panic/exit, how do we do that?
-     */
-    public void panic() {
-        throw new Error("ANTLR panic");
-    }
-
-    /** Return a time stamp string accurate to sec: yyyy-mm-dd hh:mm:ss
-     */
-    public static String getCurrentTimeStamp() {
-        GregorianCalendar calendar = new java.util.GregorianCalendar();
-        int y = calendar.get(Calendar.YEAR);
-        int m = calendar.get(Calendar.MONTH) + 1; // zero-based for months
-        int d = calendar.get(Calendar.DAY_OF_MONTH);
-        int h = calendar.get(Calendar.HOUR_OF_DAY);
-        int min = calendar.get(Calendar.MINUTE);
-        int sec = calendar.get(Calendar.SECOND);
-        String sy = String.valueOf(y);
-        String sm = m < 10 ? "0" + m : String.valueOf(m);
-        String sd = d < 10 ? "0" + d : String.valueOf(d);
-        String sh = h < 10 ? "0" + h : String.valueOf(h);
-        String smin = min < 10 ? "0" + min : String.valueOf(min);
-        String ssec = sec < 10 ? "0" + sec : String.valueOf(sec);
-        return new StringBuffer().append(sy).append("-").append(sm).append("-").append(sd).append(" ").append(sh).append(":").append(smin).append(":").append(ssec).toString();
-    }
-
-    /**
-     * Provide the List of all grammar file names that the ANTLR tool will
-     * process or has processed.
-     *
-     * @return the grammarFileNames
-     */
-    public List<String> getGrammarFileNames() {
-        return grammarFileNames;
-    }
-
-    /**
-     * Indicates whether ANTLR has gnerated or will generate a description of
-     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
-     *
-     * @return the generate_NFA_dot
-     */
-    public boolean isGenerate_NFA_dot() {
-        return generate_NFA_dot;
-    }
-
-    /**
-     * Indicates whether ANTLR has generated or will generate a description of
-     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
-     *
-     * @return the generate_DFA_dot
-     */
-    public boolean isGenerate_DFA_dot() {
-        return generate_DFA_dot;
-    }
-
-    /**
-     * Return the Path to the base output directory, where ANTLR
-     * will generate all the output files for the current language target as
-     * well as any ancillary files such as .tokens vocab files.
-     *
-     * @return the output Directory
-     */
-    public String getOutputDirectory() {
-        return outputDirectory;
-    }
-
-    /**
-     * Return the Path to the directory in which ANTLR will search for ancillary
-     * files such as .tokens vocab files and imported grammar files.
-     *
-     * @return the lib Directory
-     */
-    public String getLibraryDirectory() {
-        return libDirectory;
-    }
-
-    /**
-     * Indicate if ANTLR has generated, or will generate a debug version of the
-     * recognizer. Debug versions of a parser communicate with a debugger such
-     * as that contained in ANTLRWorks and at start up will 'hang' waiting for
-     * a connection on an IP port (49100 by default).
-     *
-     * @return the debug flag
-     */
-    public boolean isDebug() {
-        return debug;
-    }
-
-    /**
-     * Indicate whether ANTLR has generated, or will generate a version of the
-     * recognizer that prints trace messages on entry and exit of each rule.
-     *
-     * @return the trace flag
-     */
-    public boolean isTrace() {
-        return trace;
-    }
-
-    /**
-     * Indicates whether ANTLR has generated or will generate a version of the
-     * recognizer that gathers statistics about its execution, which it prints when
-     * it terminates.
-     *
-     * @return the profile
-     */
-    public boolean isProfile() {
-        return profile;
-    }
-
-    /**
-     * Indicates whether ANTLR has generated or will generate a report of various
-     * elements of the grammar analysis, once it it has finished analyzing a grammar
-     * file.
-     *
-     * @return the report flag
-     */
-    public boolean isReport() {
-        return report;
-    }
-
-    /**
-     * Indicates whether ANTLR has printed, or will print, a version of the input grammar
-     * file(s) that is stripped of any action code embedded within.
-     *
-     * @return the printGrammar flag
-     */
-    public boolean isPrintGrammar() {
-        return printGrammar;
-    }
-
-    /**
-     * Indicates whether ANTLR has supplied, or will supply, a list of all the things
-     * that the input grammar depends upon and all the things that will be generated
-     * when that grammar is successfully analyzed.
-     *
-     * @return the depend flag
-     */
-    public boolean isDepend() {
-        return depend;
-    }
-
-    /**
-     * Indicates whether ANTLR will force all files to the output directory, even
-     * if the input files have relative paths from the input directory.
-     *
-     * @return the forceAllFilesToOutputDir flag
-     */
-    public boolean isForceAllFilesToOutputDir() {
-        return forceAllFilesToOutputDir;
-    }
-
-    /**
-     * Indicates whether ANTLR will be verbose when analyzing grammar files, such as
-     * displaying the names of the files it is generating and similar information.
-     *
-     * @return the verbose flag
-     */
-    public boolean isVerbose() {
-        return verbose;
-    }
-
-    /**
-     * Provide the current setting of the conversion timeout on DFA creation.
-     *
-     * @return DFA creation timeout value in milliseconds
-     */
-    public int getConversionTimeout() {
-        return DFA.MAX_TIME_PER_DFA_CREATION;
-    }
-
-    /**
-     * Returns the current setting of the message format descriptor
-     * @return Current message format
-     */
-    public String getMessageFormat() {
-        return ErrorManager.getMessageFormat().toString();
-    }
-
-    /**
-     * Returns the number of errors that the analysis/processing threw up.
-     * @return Error count
-     */
-    public int getNumErrors() {
-        return ErrorManager.getNumErrors();
-    }
-
-    /**
-     * Indicate whether the tool will analyze the dependencies of the provided grammar
-     * file list and ensure that grammars with dependencies are built
-     * after any of the other gramamrs in the list that they are dependent on. Setting
-     * this option also has the side effect that any grammars that are includes for other
-     * grammars in the list are excluded from individual analysis, which allows the caller
-     * to invoke the tool via org.antlr.tool -make *.g and not worry about the inclusion
-     * of grammars that are just includes for other grammars or what order the grammars
-     * appear on the command line.
-     *
-     * This option was coded to make life easier for tool integration (such as Maven) but
-     * may also be useful at the command line.
-     *
-     * @return true if the tool is currently configured to analyze and sort grammar files.
-     */
-    public boolean getMake() {
-        return make;
-    }
-
-    /**
-     * Set the message format to one of ANTLR, gnu, vs2005
-     *
-     * @param format
-     */
-    public void setMessageFormat(String format) {
-        ErrorManager.setFormat(format);
-    }
-
-    /** Provide the List of all grammar file names that the ANTLR tool should process.
-     *
-     * @param grammarFileNames The list of grammar files to process
-     */
-    public void setGrammarFileNames(List<String> grammarFileNames) {
-        this.grammarFileNames = grammarFileNames;
-    }
-
-    public void addGrammarFile(String grammarFileName) {
-        if (!grammarFileNames.contains(grammarFileName)) {
-            grammarFileNames.add(grammarFileName);
-        }
-    }
-
-    /**
-     * Indicate whether ANTLR should generate a description of
-     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
-     *
-     * @param generate_NFA_dot True to generate dot descriptions
-     */
-    public void setGenerate_NFA_dot(boolean generate_NFA_dot) {
-        this.generate_NFA_dot = generate_NFA_dot;
-    }
-
-    /**
-     * Indicates whether ANTLR should generate a description of
-     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
-     *
-     * @param generate_DFA_dot True to generate dot descriptions
-     */
-    public void setGenerate_DFA_dot(boolean generate_DFA_dot) {
-        this.generate_DFA_dot = generate_DFA_dot;
-    }
-
-    /**
-     * Set the Path to the directory in which ANTLR will search for ancillary
-     * files such as .tokens vocab files and imported grammar files.
-     *
-     * @param libDirectory the libDirectory to set
-     */
-    public void setLibDirectory(String libDirectory) {
-        this.libDirectory = libDirectory;
-    }
-
-    /**
-     * Indicate whether ANTLR should generate a debug version of the
-     * recognizer. Debug versions of a parser communicate with a debugger such
-     * as that contained in ANTLRWorks and at start up will 'hang' waiting for
-     * a connection on an IP port (49100 by default).
-     *
-     * @param debug true to generate a debug mode parser
-     */
-    public void setDebug(boolean debug) {
-        this.debug = debug;
-    }
-
-    /**
-     * Indicate whether ANTLR should generate a version of the
-     * recognizer that prints trace messages on entry and exit of each rule
-     *
-     * @param trace true to generate a tracing parser
-     */
-    public void setTrace(boolean trace) {
-        this.trace = trace;
-    }
-
-    /**
-     * Indicate whether ANTLR should generate a version of the
-     * recognizer that gathers statistics about its execution, which it prints when
-     * it terminates.
-     *
-     * @param profile true to generate a profiling parser
-     */
-    public void setProfile(boolean profile) {
-        this.profile = profile;
-    }
-
-    /**
-     * Indicate whether ANTLR should generate a report of various
-     * elements of the grammar analysis, once it it has finished analyzing a grammar
-     * file.
-     *
-     * @param report true to generate the analysis report
-     */
-    public void setReport(boolean report) {
-        this.report = report;
-    }
-
-    /**
-     * Indicate whether ANTLR should print a version of the input grammar
-     * file(s) that is stripped of any action code embedded within.
-     *
-     * @param printGrammar true to generate a stripped file
-     */
-    public void setPrintGrammar(boolean printGrammar) {
-        this.printGrammar = printGrammar;
-    }
-
-    /**
-     * Indicate whether ANTLR should supply a list of all the things
-     * that the input grammar depends upon and all the things that will be generated
-     * when that gramamr is successfully analyzed.
-     *
-     * @param depend true to get depends set rather than process the grammar
-     */
-    public void setDepend(boolean depend) {
-        this.depend = depend;
-    }
-
-    /**
-     * Indicates whether ANTLR will force all files to the output directory, even
-     * if the input files have relative paths from the input directory.
-     *
-     * @param forceAllFilesToOutputDir true to force files to output directory
-     */
-    public void setForceAllFilesToOutputDir(boolean forceAllFilesToOutputDir) {
-        this.forceAllFilesToOutputDir = forceAllFilesToOutputDir;
-    }
-
-    /**
-     * Indicate whether ANTLR should be verbose when analyzing grammar files, such as
-     * displaying the names of the files it is generating and similar information.
-     *
-     * @param verbose true to be verbose
-     */
-    public void setVerbose(boolean verbose) {
-        this.verbose = verbose;
-    }
-
-    /**
-     * Indicate whether the tool should analyze the dependencies of the provided grammar
-     * file list and ensure that the grammars with dependencies are built
-     * after any of the other gramamrs in the list that they are dependent on. Setting
-     * this option also has the side effect that any grammars that are includes for other
-     * grammars in the list are excluded from individual analysis, which allows the caller
-     * to invoke the tool via org.antlr.tool -make *.g and not worry about the inclusion
-     * of grammars that are just includes for other grammars or what order the grammars
-     * appear on the command line.
-     *
-     * This option was coded to make life easier for tool integration (such as Maven) but
-     * may also be useful at the command line.
-     *
-     * @param make
-     */
-    public void setMake(boolean make) {
-        this.make = make;
-    }
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/ActionLabel.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/ActionLabel.java
deleted file mode 100644
index e39024c..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/ActionLabel.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.tool.Grammar;
-import org.antlr.tool.GrammarAST;
-
-public class ActionLabel extends Label {
-	public GrammarAST actionAST;
-	
-	public ActionLabel(GrammarAST actionAST) {
-		super(ACTION);
-		this.actionAST = actionAST;
-	}
-
-	public boolean isEpsilon() {
-		return true; // we are to be ignored by analysis 'cept for predicates
-	}
-
-	public boolean isAction() {
-		return true;
-	}
-
-	public String toString() {
-		return "{"+actionAST+"}";
-	}
-
-	public String toString(Grammar g) {
-		return toString();
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/DFA.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/DFA.java
deleted file mode 100644
index 896acc7..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/DFA.java
+++ /dev/null
@@ -1,1167 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.misc.IntSet;
-import org.antlr.misc.IntervalSet;
-import org.antlr.misc.Utils;
-import org.antlr.runtime.IntStream;
-import org.stringtemplate.v4.ST;
-import org.antlr.tool.*;
-
-import java.util.*;
-
-/** A DFA (converted from a grammar's NFA).
- *  DFAs are used as prediction machine for alternative blocks in all kinds
- *  of recognizers (lexers, parsers, tree walkers).
- */
-public class DFA {
-	public static final int REACHABLE_UNKNOWN = -2;
-	public static final int REACHABLE_BUSY = -1; // in process of computing
-	public static final int REACHABLE_NO = 0;
-	public static final int REACHABLE_YES = 1;
-
-	public static final int CYCLIC_UNKNOWN = -2;
-	public static final int CYCLIC_BUSY = -1; // in process of computing
-	public static final int CYCLIC_DONE = 0;
-	
-	/** Prevent explosion of DFA states during conversion. The max number
-	 *  of states per alt in a single decision's DFA.
-	public static final int MAX_STATES_PER_ALT_IN_DFA = 450;
-	 */
-
-	/** Set to 0 to not terminate early (time in ms) */
-	public static int MAX_TIME_PER_DFA_CREATION = 1*1000;
-
-	/** How many edges can each DFA state have before a "special" state
-	 *  is created that uses IF expressions instead of a table?
-	 */
-	public static int MAX_STATE_TRANSITIONS_FOR_TABLE = 65534;
-
-	/** What's the start state for this DFA? */
-    public DFAState startState;
-
-	/** This DFA is being built for which decision? */
-	public int decisionNumber = 0;
-
-    /** From what NFAState did we create the DFA? */
-    public NFAState decisionNFAStartState;
-
-	/** The printable grammar fragment associated with this DFA */
-	public String description;
-
-	/** A set of all uniquely-numbered DFA states.  Maps hash of DFAState
-     *  to the actual DFAState object.  We use this to detect
-     *  existing DFA states.  Map<DFAState,DFAState>.  Use Map so
-	 *  we can get old state back (Set only allows you to see if it's there).
-	 *  Not used during fixed k lookahead as it's a waste to fill it with
-	 *  a dup of states array.
-     */
-    protected Map<DFAState, DFAState> uniqueStates = new HashMap<DFAState, DFAState>();
-
-	/** Maps the state number to the actual DFAState.  Use a Vector as it
-	 *  grows automatically when I set the ith element.  This contains all
-	 *  states, but the states are not unique.  s3 might be same as s1 so
-	 *  s3 -> s1 in this table.  This is how cycles occur.  If fixed k,
-	 *  then these states will all be unique as states[i] always points
-	 *  at state i when no cycles exist.
-	 *
-	 *  This is managed in parallel with uniqueStates and simply provides
-	 *  a way to go from state number to DFAState rather than via a
-	 *  hash lookup.
-	 */
-	protected Vector<DFAState> states = new Vector<DFAState>();
-
-	/** Unique state numbers per DFA */
-	protected int stateCounter = 0;
-
-	/** count only new states not states that were rejected as already present */
-	protected int numberOfStates = 0;
-
-	/** User specified max fixed lookahead.  If 0, nothing specified.  -1
-	 *  implies we have not looked at the options table yet to set k.
-	 */
-	protected int user_k = -1;
-
-	/** While building the DFA, track max lookahead depth if not cyclic */
-	protected int max_k = -1;
-
-    /** Is this DFA reduced?  I.e., can all states lead to an accept state? */
-    protected boolean reduced = true;
-
-    /** Are there any loops in this DFA?
-	 *  Computed by doesStateReachAcceptState()
-	 */
-    protected boolean cyclic = false;
-
-	/** Track whether this DFA has at least one sem/syn pred encountered
-	 *  during a closure operation.  This is useful for deciding whether
-	 *  to retry a non-LL(*) with k=1.  If no pred, it will not work w/o
-	 *  a pred so don't bother.  It would just give another error message.
-	 */
-	public boolean predicateVisible = false;
-
-	public boolean hasPredicateBlockedByAction = false;
-
-	/** Each alt in an NFA derived from a grammar must have a DFA state that
-     *  predicts it lest the parser not know what to do.  Nondeterminisms can
-     *  lead to this situation (assuming no semantic predicates can resolve
-     *  the problem) and when for some reason, I cannot compute the lookahead
-     *  (which might arise from an error in the algorithm or from
-     *  left-recursion etc...).  This list starts out with all alts contained
-     *  and then in method doesStateReachAcceptState() I remove the alts I
-     *  know to be uniquely predicted.
-     */
-    protected List<Integer> unreachableAlts;
-
-	protected int nAlts = 0;
-
-	/** We only want one accept state per predicted alt; track here */
-	protected DFAState[] altToAcceptState;
-
-	/** Track whether an alt discovers recursion for each alt during
-	 *  NFA to DFA conversion; >1 alt with recursion implies nonregular.
-	 */
-	public IntSet recursiveAltSet = new IntervalSet();
-
-	/** Which NFA are we converting (well, which piece of the NFA)? */
-    public NFA nfa;
-
-	protected NFAToDFAConverter nfaConverter;
-
-	/** This probe tells you a lot about a decision and is useful even
-	 *  when there is no error such as when a syntactic nondeterminism
-	 *  is solved via semantic predicates.  Perhaps a GUI would want
-	 *  the ability to show that.
-	 */
-	public DecisionProbe probe = new DecisionProbe(this);
-
-	/** Track absolute time of the conversion so we can have a failsafe:
-	 *  if it takes too long, then terminate.  Assume bugs are in the
-	 *  analysis engine.
-	 */
-	//protected long conversionStartTime;
-
-	/** Map an edge transition table to a unique set number; ordered so
-	 *  we can push into the output template as an ordered list of sets
-	 *  and then ref them from within the transition[][] table.  Like this
-	 *  for C# target:
-	 *     public static readonly DFA30_transition0 =
-	 *     	new short[] { 46, 46, -1, 46, 46, -1, -1, -1, -1, -1, -1, -1,...};
-	 *         public static readonly DFA30_transition1 =
-	 *     	new short[] { 21 };
-	 *      public static readonly short[][] DFA30_transition = {
-	 *     	  DFA30_transition0,
-	 *     	  DFA30_transition0,
-	 *     	  DFA30_transition1,
-	 *     	  ...
-	 *      };
-	 */
-	public Map edgeTransitionClassMap = new LinkedHashMap();
-
-	/** The unique edge transition class number; every time we see a new
-	 *  set of edges emanating from a state, we number it so we can reuse
-	 *  if it's every seen again for another state.  For Java grammar,
-	 *  some of the big edge transition tables are seen about 57 times.
-	 */
-	protected int edgeTransitionClass =0;
-
-	/* This DFA can be converted to a transition[state][char] table and
-	 * the following tables are filled by createStateTables upon request.
-	 * These are injected into the templates for code generation.
-	 * See March 25, 2006 entry for description:
-	 *   http://www.antlr.org/blog/antlr3/codegen.tml
-	 * Often using Vector as can't set ith position in a List and have
-	 * it extend list size; bizarre.
-	 */
-
-	/** List of special DFAState objects */
-	public List specialStates;
-	/** List of ST for special states. */
-	public List specialStateSTs;
-	public Vector accept;
-	public Vector eot;
-	public Vector eof;
-	public Vector min;
-	public Vector max;
-	public Vector special;
-	public Vector transition;
-	/** just the Vector<Integer> indicating which unique edge table is at
-	 *  position i.
-	 */
-	public Vector transitionEdgeTables; // not used by java yet
-	protected int uniqueCompressedSpecialStateNum = 0;
-
-	/** Which generator to use if we're building state tables */
-	protected CodeGenerator generator = null;
-
-	protected DFA() {;}
-
-	public DFA(int decisionNumber, NFAState decisionStartState) {
-		this.decisionNumber = decisionNumber;
-        this.decisionNFAStartState = decisionStartState;
-        nfa = decisionStartState.nfa;
-        nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(decisionStartState);
-        //setOptions( nfa.grammar.getDecisionOptions(getDecisionNumber()) );
-        initAltRelatedInfo();
-
-		//long start = System.currentTimeMillis();
-        nfaConverter = new NFAToDFAConverter(this);
-		try {
-			nfaConverter.convert();
-
-			// figure out if there are problems with decision
-			verify();
-
-			if ( !probe.isDeterministic() || probe.analysisOverflowed() ) {
-				probe.issueWarnings();
-			}
-
-			// must be after verify as it computes cyclic, needed by this routine
-			// should be after warnings because early termination or something
-			// will not allow the reset to operate properly in some cases.
-			resetStateNumbersToBeContiguous();
-
-			//long stop = System.currentTimeMillis();
-			//System.out.println("verify cost: "+(int)(stop-start)+" ms");
-		}
-//		catch (AnalysisTimeoutException at) {
-//			probe.reportAnalysisTimeout();
-//			if ( !okToRetryDFAWithK1() ) {
-//				probe.issueWarnings();
-//			}
-//		}
-		catch (NonLLStarDecisionException nonLL) {
-			probe.reportNonLLStarDecision(this);
-			// >1 alt recurses, k=* and no auto backtrack nor manual sem/syn
-			if ( !okToRetryDFAWithK1() ) {
-				probe.issueWarnings();
-			}
-		}
-    }
-
-	/** Walk all states and reset their numbers to be a contiguous sequence
-	 *  of integers starting from 0.  Only cyclic DFA can have unused positions
-	 *  in states list.  State i might be identical to a previous state j and
-	 *  will result in states[i] == states[j].  We don't want to waste a state
-	 *  number on this.  Useful mostly for code generation in tables.
-	 *
-	 *  At the start of this routine, states[i].stateNumber <= i by definition.
-	 *  If states[50].stateNumber is 50 then a cycle during conversion may
-	 *  try to add state 103, but we find that an identical DFA state, named
-	 *  50, already exists, hence, states[103]==states[50] and both have
-	 *  stateNumber 50 as they point at same object.  Afterwards, the set
-	 *  of state numbers from all states should represent a contiguous range
-	 *  from 0..n-1 where n is the number of unique states.
-	 */
-	public void resetStateNumbersToBeContiguous() {
-		if ( getUserMaxLookahead()>0 ) {
-			// all numbers are unique already; no states are thrown out.
-			return;
-		}
-
-        // walk list of DFAState objects by state number,
-		// setting state numbers to 0..n-1
-		int snum=0;
-		for (int i = 0; i <= getMaxStateNumber(); i++) {
-			DFAState s = getState(i);
-            // some states are unused after creation most commonly due to cycles
-            // or conflict resolution.
-            if ( s==null ) {
-                continue;
-            }
-			// state i is mapped to DFAState with state number set to i originally
-			// so if it's less than i, then we renumbered it already; that
-			// happens when states have been merged or cycles occurred I think.
-			// states[50] will point to DFAState with s50 in it but
-			// states[103] might also point at this same DFAState.  Since
-			// 50 < 103 then it's already been renumbered as it points downwards.
-			boolean alreadyRenumbered = s.stateNumber<i;
-			if ( !alreadyRenumbered ) {
-				// state i is a valid state, reset it's state number
-				s.stateNumber = snum; // rewrite state numbers to be 0..n-1
-				snum++;
-			}
-		}
-        if ( snum!=getNumberOfStates() ) {
-			ErrorManager.internalError("DFA "+decisionNumber+": "+
-				decisionNFAStartState.getDescription()+" num unique states "+getNumberOfStates()+
-				"!= num renumbered states "+snum);
-		}
-	}
-
-	// JAVA-SPECIFIC Accessors!!!!!  It is so impossible to get arrays
-	// or even consistently formatted strings acceptable to java that
-	// I am forced to build the individual char elements here
-
-	public List getJavaCompressedAccept() { return getRunLengthEncoding(accept); }
-	public List getJavaCompressedEOT() { return getRunLengthEncoding(eot); }
-	public List getJavaCompressedEOF() { return getRunLengthEncoding(eof); }
-	public List getJavaCompressedMin() { return getRunLengthEncoding(min); }
-	public List getJavaCompressedMax() { return getRunLengthEncoding(max); }
-	public List getJavaCompressedSpecial() { return getRunLengthEncoding(special); }
-	public List getJavaCompressedTransition() {
-		if ( transition==null || transition.size()==0 ) {
-			return null;
-		}
-		List encoded = new ArrayList(transition.size());
-		// walk Vector<Vector<FormattedInteger>> which is the transition[][] table
-		for (int i = 0; i < transition.size(); i++) {
-			Vector transitionsForState = (Vector) transition.elementAt(i);
-			encoded.add(getRunLengthEncoding(transitionsForState));
-		}
-		return encoded;
-	}
-
-	/** Compress the incoming data list so that runs of same number are
-	 *  encoded as number,value pair sequences.  3 -1 -1 -1 28 is encoded
-	 *  as 1 3 3 -1 1 28.  I am pretty sure this is the lossless compression
-	 *  that GIF files use.  Transition tables are heavily compressed by
-	 *  this technique.  I got the idea from JFlex http://jflex.de/
-	 *
-	 *  Return List<String> where each string is either \xyz for 8bit char
-	 *  and \uFFFF for 16bit.  Hideous and specific to Java, but it is the
-	 *  only target bad enough to need it.
-	 */
-	public List getRunLengthEncoding(List data) {
-		if ( data==null || data.size()==0 ) {
-			// for states with no transitions we want an empty string ""
-			// to hold its place in the transitions array.
-			List empty = new ArrayList();
-			empty.add("");
-			return empty;
-		}
-		int size = Math.max(2,data.size()/2);
-		List encoded = new ArrayList(size); // guess at size
-		// scan values looking for runs
-		int i = 0;
-		Integer emptyValue = Utils.integer(-1);
-		while ( i < data.size() ) {
-			Integer I = (Integer)data.get(i);
-			if ( I==null ) {
-				I = emptyValue;
-			}
-			// count how many v there are?
-			int n = 0;
-			for (int j = i; j < data.size(); j++) {
-				Integer v = (Integer)data.get(j);
-				if ( v==null ) {
-					v = emptyValue;
-				}
-				if ( I.equals(v) ) {
-					n++;
-				}
-				else {
-					break;
-				}
-			}
-			encoded.add(generator.target.encodeIntAsCharEscape((char)n));
-			encoded.add(generator.target.encodeIntAsCharEscape((char)I.intValue()));
-			i+=n;
-		}
-		return encoded;
-	}
-
-	public void createStateTables(CodeGenerator generator) {
-		//System.out.println("createTables:\n"+this);
-		this.generator = generator;
-		description = getNFADecisionStartState().getDescription();
-		description =
-			generator.target.getTargetStringLiteralFromString(description);
-
-		// create all the tables
-		special = new Vector(this.getNumberOfStates()); // Vector<short>
-		special.setSize(this.getNumberOfStates());
-		specialStates = new ArrayList();				// List<DFAState>
-		specialStateSTs = new ArrayList();				// List<ST>
-		accept = new Vector(this.getNumberOfStates()); // Vector<int>
-		accept.setSize(this.getNumberOfStates());
-		eot = new Vector(this.getNumberOfStates()); // Vector<int>
-		eot.setSize(this.getNumberOfStates());
-		eof = new Vector(this.getNumberOfStates()); // Vector<int>
-		eof.setSize(this.getNumberOfStates());
-		min = new Vector(this.getNumberOfStates()); // Vector<int>
-		min.setSize(this.getNumberOfStates());
-		max = new Vector(this.getNumberOfStates()); // Vector<int>
-		max.setSize(this.getNumberOfStates());
-		transition = new Vector(this.getNumberOfStates()); // Vector<Vector<int>>
-		transition.setSize(this.getNumberOfStates());
-		transitionEdgeTables = new Vector(this.getNumberOfStates()); // Vector<Vector<int>>
-		transitionEdgeTables.setSize(this.getNumberOfStates());
-
-		// for each state in the DFA, fill relevant tables.
-		Iterator it = null;
-		if ( getUserMaxLookahead()>0 ) {
-			it = states.iterator();
-		}
-		else {
-			it = getUniqueStates().values().iterator();
-		}
-		while ( it.hasNext() ) {
-			DFAState s = (DFAState)it.next();
-			if ( s==null ) {
-				// ignore null states; some acylic DFA see this condition
-				// when inlining DFA (due to lacking of exit branch pruning?)
-				continue;
-			}
-			if ( s.isAcceptState() ) {
-				// can't compute min,max,special,transition on accepts
-				accept.set(s.stateNumber,
-						   Utils.integer(s.getUniquelyPredictedAlt()));
-			}
-			else {
-				createMinMaxTables(s);
-				createTransitionTableEntryForState(s);
-				createSpecialTable(s);
-				createEOTAndEOFTables(s);
-			}
-		}
-
-		// now that we have computed list of specialStates, gen code for 'em
-		for (int i = 0; i < specialStates.size(); i++) {
-			DFAState ss = (DFAState) specialStates.get(i);
-			ST stateST =
-				generator.generateSpecialState(ss);
-			specialStateSTs.add(stateST);
-		}
-
-		// check that the tables are not messed up by encode/decode
-		/*
-		testEncodeDecode(min);
-		testEncodeDecode(max);
-		testEncodeDecode(accept);
-		testEncodeDecode(special);
-		System.out.println("min="+min);
-		System.out.println("max="+max);
-		System.out.println("eot="+eot);
-		System.out.println("eof="+eof);
-		System.out.println("accept="+accept);
-		System.out.println("special="+special);
-		System.out.println("transition="+transition);
-		*/
-	}
-
-	/*
-	private void testEncodeDecode(List data) {
-		System.out.println("data="+data);
-		List encoded = getRunLengthEncoding(data);
-		StringBuffer buf = new StringBuffer();
-		for (int i = 0; i < encoded.size(); i++) {
-			String I = (String)encoded.get(i);
-			int v = 0;
-			if ( I.startsWith("\\u") ) {
-				v = Integer.parseInt(I.substring(2,I.length()), 16);
-			}
-			else {
-				v = Integer.parseInt(I.substring(1,I.length()), 8);
-			}
-			buf.append((char)v);
-		}
-		String encodedS = buf.toString();
-		short[] decoded = org.antlr.runtime.DFA.unpackEncodedString(encodedS);
-		//System.out.println("decoded:");
-		for (int i = 0; i < decoded.length; i++) {
-			short x = decoded[i];
-			if ( x!=((Integer)data.get(i)).intValue() ) {
-				System.err.println("problem with encoding");
-			}
-			//System.out.print(", "+x);
-		}
-		//System.out.println();
-	}
-	*/
-
-	protected void createMinMaxTables(DFAState s) {
-		int smin = Label.MAX_CHAR_VALUE + 1;
-		int smax = Label.MIN_ATOM_VALUE - 1;
-		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
-			Transition edge = (Transition) s.transition(j);
-			Label label = edge.label;
-			if ( label.isAtom() ) {
-				if ( label.getAtom()>=Label.MIN_CHAR_VALUE ) {
-					if ( label.getAtom()<smin ) {
-						smin = label.getAtom();
-					}
-					if ( label.getAtom()>smax ) {
-						smax = label.getAtom();
-					}
-				}
-			}
-			else if ( label.isSet() ) {
-				IntervalSet labels = (IntervalSet)label.getSet();
-				int lmin = labels.getMinElement();
-				// if valid char (don't do EOF) and less than current min
-				if ( lmin<smin && lmin>=Label.MIN_CHAR_VALUE ) {
-					smin = labels.getMinElement();
-				}
-				if ( labels.getMaxElement()>smax ) {
-					smax = labels.getMaxElement();
-				}
-			}
-		}
-
-		if ( smax<0 ) {
-			// must be predicates or pure EOT transition; just zero out min, max
-			smin = Label.MIN_CHAR_VALUE;
-			smax = Label.MIN_CHAR_VALUE;
-		}
-
-		min.set(s.stateNumber, Utils.integer((char)smin));
-		max.set(s.stateNumber, Utils.integer((char)smax));
-
-		if ( smax<0 || smin>Label.MAX_CHAR_VALUE || smin<0 ) {
-			ErrorManager.internalError("messed up: min="+min+", max="+max);
-		}
-	}
-
-	protected void createTransitionTableEntryForState(DFAState s) {
-		/*
-		System.out.println("createTransitionTableEntryForState s"+s.stateNumber+
-			" dec "+s.dfa.decisionNumber+" cyclic="+s.dfa.isCyclic());
-			*/
-		int smax = ((Integer)max.get(s.stateNumber)).intValue();
-		int smin = ((Integer)min.get(s.stateNumber)).intValue();
-
-		Vector stateTransitions = new Vector(smax-smin+1);
-		stateTransitions.setSize(smax-smin+1);
-		transition.set(s.stateNumber, stateTransitions);
-		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
-			Transition edge = (Transition) s.transition(j);
-			Label label = edge.label;
-			if ( label.isAtom() && label.getAtom()>=Label.MIN_CHAR_VALUE ) {
-				int labelIndex = label.getAtom()-smin; // offset from 0
-				stateTransitions.set(labelIndex,
-									 Utils.integer(edge.target.stateNumber));
-			}
-			else if ( label.isSet() ) {
-				IntervalSet labels = (IntervalSet)label.getSet();
-				int[] atoms = labels.toArray();
-				for (int a = 0; a < atoms.length; a++) {
-					// set the transition if the label is valid (don't do EOF)
-					if ( atoms[a]>=Label.MIN_CHAR_VALUE ) {
-						int labelIndex = atoms[a]-smin; // offset from 0
-						stateTransitions.set(labelIndex,
-											 Utils.integer(edge.target.stateNumber));
-					}
-				}
-			}
-		}
-		// track unique state transition tables so we can reuse
-		Integer edgeClass = (Integer)edgeTransitionClassMap.get(stateTransitions);
-		if ( edgeClass!=null ) {
-			//System.out.println("we've seen this array before; size="+stateTransitions.size());
-			transitionEdgeTables.set(s.stateNumber, edgeClass);
-		}
-		else {
-			edgeClass = Utils.integer(edgeTransitionClass);
-			transitionEdgeTables.set(s.stateNumber, edgeClass);
-			edgeTransitionClassMap.put(stateTransitions, edgeClass);
-			edgeTransitionClass++;
-		}
-	}
-
-	/** Set up the EOT and EOF tables; we cannot put -1 min/max values so
-	 *  we need another way to test that in the DFA transition function.
-	 */
-	protected void createEOTAndEOFTables(DFAState s) {
-		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
-			Transition edge = (Transition) s.transition(j);
-			Label label = edge.label;
-			if ( label.isAtom() ) {
-				if ( label.getAtom()==Label.EOT ) {
-					// eot[s] points to accept state
-					eot.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
-				}
-				else if ( label.getAtom()==Label.EOF ) {
-					// eof[s] points to accept state
-					eof.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
-				}
-			}
-			else if ( label.isSet() ) {
-				IntervalSet labels = (IntervalSet)label.getSet();
-				int[] atoms = labels.toArray();
-				for (int a = 0; a < atoms.length; a++) {
-					if ( atoms[a]==Label.EOT ) {
-						// eot[s] points to accept state
-						eot.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
-					}
-					else if ( atoms[a]==Label.EOF ) {
-						eof.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
-					}
-				}
-			}
-		}
-	}
-
-	protected void createSpecialTable(DFAState s) {
-		// number all special states from 0...n-1 instead of their usual numbers
-		boolean hasSemPred = false;
-
-		// TODO this code is very similar to canGenerateSwitch.  Refactor to share
-		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
-			Transition edge = (Transition) s.transition(j);
-			Label label = edge.label;
-			// can't do a switch if the edges have preds or are going to
-			// require gated predicates
-			if ( label.isSemanticPredicate() ||
-				 ((DFAState)edge.target).getGatedPredicatesInNFAConfigurations()!=null)
-			{
-				hasSemPred = true;
-				break;
-			}
-		}
-		// if has pred or too big for table, make it special
-		int smax = ((Integer)max.get(s.stateNumber)).intValue();
-		int smin = ((Integer)min.get(s.stateNumber)).intValue();
-		if ( hasSemPred || smax-smin>MAX_STATE_TRANSITIONS_FOR_TABLE ) {
-			special.set(s.stateNumber,
-						Utils.integer(uniqueCompressedSpecialStateNum));
-			uniqueCompressedSpecialStateNum++;
-			specialStates.add(s);
-		}
-		else {
-			special.set(s.stateNumber, Utils.integer(-1)); // not special
-		}
-	}
-
-	public int predict(IntStream input) {
-		Interpreter interp = new Interpreter(nfa.grammar, input);
-		return interp.predict(this);
-	}
-
-	/** Add a new DFA state to this DFA if not already present.
-     *  To force an acyclic, fixed maximum depth DFA, just always
-	 *  return the incoming state.  By not reusing old states,
-	 *  no cycles can be created.  If we're doing fixed k lookahead
-	 *  don't updated uniqueStates, just return incoming state, which
-	 *  indicates it's a new state.
-     */
-    protected DFAState addState(DFAState d) {
-		if ( getUserMaxLookahead()>0 ) {
-			return d;
-		}
-		// does a DFA state exist already with everything the same
-		// except its state number?
-		DFAState existing = (DFAState)uniqueStates.get(d);
-		if ( existing != null ) {
-            /*
-            System.out.println("state "+d.stateNumber+" exists as state "+
-                existing.stateNumber);
-                */
-            // already there...get the existing DFA state
-			return existing;
-		}
-
-		// if not there, then add new state.
-		uniqueStates.put(d,d);
-        numberOfStates++;
-		return d;
-	}
-
-	public void removeState(DFAState d) {
-		DFAState it = (DFAState)uniqueStates.remove(d);
-		if ( it!=null ) {
-			numberOfStates--;
-		}
-	}
-
-	public Map<DFAState, DFAState> getUniqueStates() {
-		return uniqueStates;
-	}
-
-	/** What is the max state number ever created?  This may be beyond
-	 *  getNumberOfStates().
-	 */
-	public int getMaxStateNumber() {
-		return states.size()-1;
-	}
-
-	public DFAState getState(int stateNumber) {
-		return (DFAState)states.get(stateNumber);
-	}
-
-	public void setState(int stateNumber, DFAState d) {
-		states.set(stateNumber, d);
-	}
-
-	/** Is the DFA reduced?  I.e., does every state have a path to an accept
-     *  state?  If not, don't delete as we need to generate an error indicating
-     *  which paths are "dead ends".  Also tracks list of alts with no accept
-     *  state in the DFA.  Must call verify() first before this makes sense.
-     */
-    public boolean isReduced() {
-        return reduced;
-    }
-
-    /** Is this DFA cyclic?  That is, are there any loops?  If not, then
-     *  the DFA is essentially an LL(k) predictor for some fixed, max k value.
-     *  We can build a series of nested IF statements to match this.  In the
-     *  presence of cycles, we need to build a general DFA and interpret it
-     *  to distinguish between alternatives.
-     */
-    public boolean isCyclic() {
-        return cyclic && getUserMaxLookahead()==0;
-    }
-
-	public boolean isClassicDFA() {
-		return !isCyclic() &&
-			   !nfa.grammar.decisionsWhoseDFAsUsesSemPreds.contains(this) &&
-			   !nfa.grammar.decisionsWhoseDFAsUsesSynPreds.contains(this);
-	}
-
-	public boolean canInlineDecision() {
-		return !isCyclic() &&
-		    !probe.isNonLLStarDecision() &&
-			getNumberOfStates() < CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE;
-	}
-
-	/** Is this DFA derived from the NFA for the Tokens rule? */
-	public boolean isTokensRuleDecision() {
-		if ( nfa.grammar.type!=Grammar.LEXER ) {
-			return false;
-		}
-		NFAState nfaStart = getNFADecisionStartState();
-		Rule r = nfa.grammar.getLocallyDefinedRule(Grammar.ARTIFICIAL_TOKENS_RULENAME);
-		NFAState TokensRuleStart = r.startState;
-		NFAState TokensDecisionStart =
-			(NFAState)TokensRuleStart.transition[0].target;
-		return nfaStart == TokensDecisionStart;
-	}
-
-	/** The user may specify a max, acyclic lookahead for any decision.  No
-	 *  DFA cycles are created when this value, k, is greater than 0.
-	 *  If this decision has no k lookahead specified, then try the grammar.
-	 */
-	public int getUserMaxLookahead() {
-		if ( user_k>=0 ) { // cache for speed
-			return user_k;
-		}
-		user_k = nfa.grammar.getUserMaxLookahead(decisionNumber);
-		return user_k;
-	}
-
-	public boolean getAutoBacktrackMode() {
-		return nfa.grammar.getAutoBacktrackMode(decisionNumber);
-	}
-
-	public void setUserMaxLookahead(int k) {
-		this.user_k = k;
-	}
-
-	/** Return k if decision is LL(k) for some k else return max int
-     */
-	public int getMaxLookaheadDepth() {
-		if ( hasCycle() ) return Integer.MAX_VALUE;
-		// compute to be sure
-		return _getMaxLookaheadDepth(startState, 0);
-	}
-
-	int _getMaxLookaheadDepth(DFAState d, int depth) {
-		// not cyclic; don't worry about termination
-		// fail if pred edge.
-		int max = depth;
-		for (int i=0; i<d.getNumberOfTransitions(); i++) {
-			Transition t = d.transition(i);
-//			if ( t.isSemanticPredicate() ) return Integer.MAX_VALUE;
-			if ( !t.isSemanticPredicate() ) {
-				// if pure pred not gated, it must target stop state; don't count
-				DFAState edgeTarget = (DFAState)t.target;
-				int m = _getMaxLookaheadDepth(edgeTarget, depth+1);
-				max = Math.max(max, m);
-			}
-		}
-		return max;
-	}
-
-	/** Count all disambiguating syn preds (ignore synpred tests
-	 *  for gated edges, which occur for nonambig input sequences).
-	 *  E.g.,
-	 *  x  : (X)=> (X|Y)\n" +
-	 *     | X\n" +
-	 *     ;
-	 *
-	 *  gives
-	 * 
-	 * .s0-X->.s1
-	 * .s0-Y&&{synpred1_t}?->:s2=>1
-	 * .s1-{synpred1_t}?->:s2=>1
-	 * .s1-{true}?->:s3=>2
-	 */
-	public boolean hasSynPred() {
-		boolean has = _hasSynPred(startState, new HashSet<DFAState>());
-//		if ( !has ) {
-//			System.out.println("no synpred in dec "+decisionNumber);
-//			FASerializer serializer = new FASerializer(nfa.grammar);
-//			String result = serializer.serialize(startState);
-//			System.out.println(result);
-//		}
-		return has;
-	}
-
-	public boolean getHasSynPred() { return hasSynPred(); } // for ST	
-
-	boolean _hasSynPred(DFAState d, Set<DFAState> busy) {
-		busy.add(d);
-		for (int i=0; i<d.getNumberOfTransitions(); i++) {
-			Transition t = d.transition(i);
-			if ( t.isSemanticPredicate() ) {
-				SemanticContext ctx = t.label.getSemanticContext();
-//				if ( ctx.toString().indexOf("synpred")>=0 ) {
-//					System.out.println("has pred "+ctx.toString()+" "+ctx.isSyntacticPredicate());
-//					System.out.println(((SemanticContext.Predicate)ctx).predicateAST.token);
-//				}
-				if ( ctx.isSyntacticPredicate() ) return true;
-			}
-			DFAState edgeTarget = (DFAState)t.target;
-			if ( !busy.contains(edgeTarget) && _hasSynPred(edgeTarget, busy) ) return true;
-		}
-
-		return false;
-	}
-
-	public boolean hasSemPred() { // has user-defined sempred
-		boolean has = _hasSemPred(startState, new HashSet<DFAState>());
-		return has;
-	}
-
-	boolean _hasSemPred(DFAState d, Set<DFAState> busy) {
-		busy.add(d);
-		for (int i=0; i<d.getNumberOfTransitions(); i++) {
-			Transition t = d.transition(i);
-			if ( t.isSemanticPredicate() ) {
-				SemanticContext ctx = t.label.getSemanticContext();
-				if ( ctx.hasUserSemanticPredicate() ) return true;
-			}
-			DFAState edgeTarget = (DFAState)t.target;
-			if ( !busy.contains(edgeTarget) && _hasSemPred(edgeTarget, busy) ) return true;
-		}
-
-		return false;
-	}
-
-	/** Compute cyclic w/o relying on state computed during analysis. just check. */
-	public boolean hasCycle() {
-		boolean cyclic = _hasCycle(startState, new HashMap<DFAState, Integer>());
-		return cyclic;
-	}
-
-	boolean _hasCycle(DFAState d, Map<DFAState, Integer> busy) {
-		busy.put(d, CYCLIC_BUSY);
-		for (int i=0; i<d.getNumberOfTransitions(); i++) {
-			Transition t = d.transition(i);
-			DFAState target = (DFAState)t.target;
-			int cond = CYCLIC_UNKNOWN;
-			if ( busy.get(target)!=null ) cond = busy.get(target);
-			if ( cond==CYCLIC_BUSY ) return true;
-			if ( cond!=CYCLIC_DONE && _hasCycle(target, busy) ) return true;
-		}
-		busy.put(d, CYCLIC_DONE);
-		return false;
-	}
-
-
-    /** Return a list of Integer alt numbers for which no lookahead could
-     *  be computed or for which no single DFA accept state predicts those
-     *  alts.  Must call verify() first before this makes sense.
-     */
-    public List<Integer> getUnreachableAlts() {
-        return unreachableAlts;
-    }
-
-	/** Once this DFA has been built, need to verify that:
-	 *
-	 *  1. it's reduced
-	 *  2. all alts have an accept state
-	 *
-	 *  Elsewhere, in the NFA converter, we need to verify that:
-	 *
-	 *  3. alts i and j have disjoint lookahead if no sem preds
-	 *  4. if sem preds, nondeterministic alts must be sufficiently covered
-	 *
-	 *  This is avoided if analysis bails out for any reason.
-	 */
-	public void verify() {
-		doesStateReachAcceptState(startState);
-	}
-
-    /** figure out if this state eventually reaches an accept state and
-     *  modify the instance variable 'reduced' to indicate if we find
-     *  at least one state that cannot reach an accept state.  This implies
-     *  that the overall DFA is not reduced.  This algorithm should be
-     *  linear in the number of DFA states.
-     *
-     *  The algorithm also tracks which alternatives have no accept state,
-     *  indicating a nondeterminism.
-	 *
-	 *  Also computes whether the DFA is cyclic.
-	 *
-     *  TODO: I call getUniquelyPredicatedAlt too much; cache predicted alt
-     */
-    protected boolean doesStateReachAcceptState(DFAState d) {
-		if ( d.isAcceptState() ) {
-            // accept states have no edges emanating from them so we can return
-            d.setAcceptStateReachable(REACHABLE_YES);
-            // this alt is uniquely predicted, remove from nondeterministic list
-            int predicts = d.getUniquelyPredictedAlt();
-            unreachableAlts.remove(Utils.integer(predicts));
-            return true;
-        }
-
-        // avoid infinite loops
-        d.setAcceptStateReachable(REACHABLE_BUSY);
-
-        boolean anEdgeReachesAcceptState = false;
-        // Visit every transition, track if at least one edge reaches stop state
-		// Cannot terminate when we know this state reaches stop state since
-		// all transitions must be traversed to set status of each DFA state.
-		for (int i=0; i<d.getNumberOfTransitions(); i++) {
-            Transition t = d.transition(i);
-            DFAState edgeTarget = (DFAState)t.target;
-            int targetStatus = edgeTarget.getAcceptStateReachable();
-            if ( targetStatus==REACHABLE_BUSY ) { // avoid cycles; they say nothing
-                cyclic = true;
-                continue;
-            }
-            if ( targetStatus==REACHABLE_YES ) { // avoid unnecessary work
-                anEdgeReachesAcceptState = true;
-                continue;
-            }
-            if ( targetStatus==REACHABLE_NO ) {  // avoid unnecessary work
-                continue;
-            }
-			// target must be REACHABLE_UNKNOWN (i.e., unvisited)
-            if ( doesStateReachAcceptState(edgeTarget) ) {
-                anEdgeReachesAcceptState = true;
-                // have to keep looking so don't break loop
-                // must cover all states even if we find a path for this state
-            }
-        }
-        if ( anEdgeReachesAcceptState ) {
-            d.setAcceptStateReachable(REACHABLE_YES);
-        }
-        else {
-            d.setAcceptStateReachable(REACHABLE_NO);
-			reduced = false;
-        }
-        return anEdgeReachesAcceptState;
-    }
-
-	/** Walk all accept states and find the manually-specified synpreds.
-	 *  Gated preds are not always hoisted
-	 *  I used to do this in the code generator, but that is too late.
-	 *  This converter tries to avoid computing DFA for decisions in
-	 *  syntactic predicates that are not ever used such as those
-	 *  created by autobacktrack mode.
-	 */
-	public void findAllGatedSynPredsUsedInDFAAcceptStates() {
-		int nAlts = getNumberOfAlts();
-		for (int i=1; i<=nAlts; i++) {
-			DFAState a = getAcceptState(i);
-			//System.out.println("alt "+i+": "+a);
-			if ( a!=null ) {
-				Set synpreds = a.getGatedSyntacticPredicatesInNFAConfigurations();
-				if ( synpreds!=null ) {
-					// add all the predicates we find (should be just one, right?)
-					for (Iterator it = synpreds.iterator(); it.hasNext();) {
-						SemanticContext semctx = (SemanticContext) it.next();
-						// System.out.println("synpreds: "+semctx);
-						nfa.grammar.synPredUsedInDFA(this, semctx);
-					}
-				}
-			}
-		}
-	}
-
-	public NFAState getNFADecisionStartState() {
-        return decisionNFAStartState;
-    }
-
-	public DFAState getAcceptState(int alt) {
-		return altToAcceptState[alt];
-	}
-
-	public void setAcceptState(int alt, DFAState acceptState) {
-		altToAcceptState[alt] = acceptState;
-	}
-
-	public String getDescription() {
-		return description;
-	}
-
-	public int getDecisionNumber() {
-        return decisionNFAStartState.getDecisionNumber();
-    }
-
-	/** If this DFA failed to finish during construction, we might be
-	 *  able to retry with k=1 but we need to know whether it will
-	 *  potentially succeed.  Can only succeed if there is a predicate
-	 *  to resolve the issue.  Don't try if k=1 already as it would
-	 *  cycle forever.  Timeout can retry with k=1 even if no predicate
-	 *  if k!=1.
-	 */
-	public boolean okToRetryDFAWithK1() {
-		boolean nonLLStarOrOverflowAndPredicateVisible =
-			(probe.isNonLLStarDecision()||probe.analysisOverflowed()) &&
-		    predicateVisible; // auto backtrack or manual sem/syn
-		return getUserMaxLookahead()!=1 &&
-			 nonLLStarOrOverflowAndPredicateVisible;
-	}
-
-	public String getReasonForFailure() {
-		StringBuffer buf = new StringBuffer();
-		if ( probe.isNonLLStarDecision() ) {
-			buf.append("non-LL(*)");
-			if ( predicateVisible ) {
-				buf.append(" && predicate visible");
-			}
-		}
-		if ( probe.analysisOverflowed() ) {
-			buf.append("recursion overflow");
-			if ( predicateVisible ) {
-				buf.append(" && predicate visible");
-			}
-		}
-		buf.append("\n");
-		return buf.toString();
-	}
-
-	/** What GrammarAST node (derived from the grammar) is this DFA
-     *  associated with?  It will point to the start of a block or
-     *  the loop back of a (...)+ block etc...
-     */
-    public GrammarAST getDecisionASTNode() {
-        return decisionNFAStartState.associatedASTNode;
-    }
-
-    public boolean isGreedy() {
-		GrammarAST blockAST = nfa.grammar.getDecisionBlockAST(decisionNumber);
-		Object v = nfa.grammar.getBlockOption(blockAST,"greedy");
-		if ( v!=null && v.equals("false") ) {
-			return false;
-		}
-        return true;
-
-	}
-
-    public DFAState newState() {
-        DFAState n = new DFAState(this);
-        n.stateNumber = stateCounter;
-        stateCounter++;
-		states.setSize(n.stateNumber+1);
-		states.set(n.stateNumber, n); // track state num to state
-        return n;
-    }
-
-	public int getNumberOfStates() {
-		if ( getUserMaxLookahead()>0 ) {
-			// if using fixed lookahead then uniqueSets not set
-			return states.size();
-		}
-		return numberOfStates;
-	}
-
-	public int getNumberOfAlts() {
-		return nAlts;
-	}
-
-//	public boolean analysisTimedOut() {
-//		return probe.analysisTimedOut();
-//	}
-
-    protected void initAltRelatedInfo() {
-        unreachableAlts = new LinkedList();
-        for (int i = 1; i <= nAlts; i++) {
-            unreachableAlts.add(Utils.integer(i));
-        }
-		altToAcceptState = new DFAState[nAlts+1];
-    }
-
-	public String toString() {
-		FASerializer serializer = new FASerializer(nfa.grammar);
-		if ( startState==null ) {
-			return "";
-		}
-		return serializer.serialize(startState, false);
-	}
-
-	/** EOT (end of token) is a label that indicates when the DFA conversion
-	 *  algorithm would "fall off the end of a lexer rule".  It normally
-	 *  means the default clause.  So for ('a'..'z')+ you would see a DFA
-	 *  with a state that has a..z and EOT emanating from it.  a..z would
-	 *  jump to a state predicting alt 1 and EOT would jump to a state
-	 *  predicting alt 2 (the exit loop branch).  EOT implies anything other
-	 *  than a..z.  If for some reason, the set is "all char" such as with
-	 *  the wildcard '.', then EOT cannot match anything.  For example,
-	 *
-	 *     BLOCK : '{' (.)* '}'
-	 *
-	 *  consumes all char until EOF when greedy=true.  When all edges are
-	 *  combined for the DFA state after matching '}', you will find that
-	 *  it is all char.  The EOT transition has nothing to match and is
-	 *  unreachable.  The findNewDFAStatesAndAddDFATransitions() method
-	 *  must know to ignore the EOT, so we simply remove it from the
-	 *  reachable labels.  Later analysis will find that the exit branch
-	 *  is not predicted by anything.  For greedy=false, we leave only
-	 *  the EOT label indicating that the DFA should stop immediately
-	 *  and predict the exit branch. The reachable labels are often a
-	 *  set of disjoint values like: [<EOT>, 42, {0..41, 43..65534}]
-	 *  due to DFA conversion so must construct a pure set to see if
-	 *  it is same as Label.ALLCHAR.
-	 *
-	 *  Only do this for Lexers.
-	 *
-	 *  If EOT coexists with ALLCHAR:
-	 *  1. If not greedy, modify the labels parameter to be EOT
-	 *  2. If greedy, remove EOT from the labels set
-	protected boolean reachableLabelsEOTCoexistsWithAllChar(OrderedHashSet labels)
-	{
-		Label eot = new Label(Label.EOT);
-		if ( !labels.containsKey(eot) ) {
-			return false;
-		}
-		System.out.println("### contains EOT");
-		boolean containsAllChar = false;
-		IntervalSet completeVocab = new IntervalSet();
-		int n = labels.size();
-		for (int i=0; i<n; i++) {
-			Label rl = (Label)labels.get(i);
-			if ( !rl.equals(eot) ) {
-				completeVocab.addAll(rl.getSet());
-			}
-		}
-		System.out.println("completeVocab="+completeVocab);
-		if ( completeVocab.equals(Label.ALLCHAR) ) {
-			System.out.println("all char");
-			containsAllChar = true;
-		}
-		return containsAllChar;
-	}
-	 */
-}
-
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/DFAOptimizer.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/DFAOptimizer.java
deleted file mode 100644
index d8e8291..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/DFAOptimizer.java
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.misc.Utils;
-import org.antlr.tool.Grammar;
-
-import java.util.HashSet;
-import java.util.Set;
-
-/** A module to perform optimizations on DFAs.
- *
- *  I could more easily (and more quickly) do some optimizations (such as
- *  PRUNE_EBNF_EXIT_BRANCHES) during DFA construction, but then it
- *  messes up the determinism checking.  For example, it looks like
- *  loop exit branches are unreachable if you prune exit branches
- *  during DFA construction and before determinism checks.
- *
- *  In general, ANTLR's NFA->DFA->codegen pipeline seems very robust
- *  to me which I attribute to a uniform and consistent set of data
- *  structures.  Regardless of what I want to "say"/implement, I do so
- *  within the confines of, for example, a DFA.  The code generator
- *  can then just generate code--it doesn't have to do much thinking.
- *  Putting optimizations in the code gen code really starts to make
- *  it a spagetti factory (uh oh, now I'm hungry!).  The pipeline is
- *  very testable; each stage has well defined input/output pairs.
- *
- *  ### Optimization: PRUNE_EBNF_EXIT_BRANCHES
- *
- *  There is no need to test EBNF block exit branches.  Not only is it
- *  an unneeded computation, but counter-intuitively, you actually get
- *  better errors. You can report an error at the missing or extra
- *  token rather than as soon as you've figured out you will fail.
- *
- *  Imagine optional block "( DOT CLASS )? SEMI".  ANTLR generates:
- *
- *  int alt=0;
- *  if ( input.LA(1)==DOT ) {
- *      alt=1;
- *  }
- *  else if ( input.LA(1)==SEMI ) {
- *      alt=2;
- *  }
- *
- *  Clearly, since Parser.match() will ultimately find the error, we
- *  do not want to report an error nor do we want to bother testing
- *  lookahead against what follows the (...)?  We want to generate
- *  simply "should I enter the subrule?":
- *
- *  int alt=2;
- *  if ( input.LA(1)==DOT ) {
- *      alt=1;
- *  }
- *
- *  NOTE 1. Greedy loops cannot be optimized in this way.  For example,
- *  "(greedy=false:'x'|.)* '\n'".  You specifically need the exit branch
- *  to tell you when to terminate the loop as the same input actually
- *  predicts one of the alts (i.e., staying in the loop).
- *
- *  NOTE 2.  I do not optimize cyclic DFAs at the moment as it doesn't
- *  seem to work. ;)  I'll have to investigate later to see what work I
- *  can do on cyclic DFAs to make them have fewer edges.  Might have
- *  something to do with the EOT token.
- *
- *  ### PRUNE_SUPERFLUOUS_EOT_EDGES
- *
- *  When a token is a subset of another such as the following rules, ANTLR
- *  quietly assumes the first token to resolve the ambiguity.
- *
- *  EQ			: '=' ;
- *  ASSIGNOP	: '=' | '+=' ;
- *
- *  It can yield states that have only a single edge on EOT to an accept
- *  state.  This is a waste and messes up my code generation. ;)  If
- *  Tokens rule DFA goes
- *
- * 		s0 -'='-> s3 -EOT-> s5 (accept)
- *
- *  then s5 should be pruned and s3 should be made an accept.  Do NOT do this
- *  for keyword versus ID as the state with EOT edge emanating from it will
- *  also have another edge.
- *
- *  ### Optimization: COLLAPSE_ALL_INCIDENT_EDGES
- *
- *  Done during DFA construction.  See method addTransition() in
- *  NFAToDFAConverter.
- *
- *  ### Optimization: MERGE_STOP_STATES
- *
- *  Done during DFA construction.  See addDFAState() in NFAToDFAConverter.
- */
-public class DFAOptimizer {
-	public static boolean PRUNE_EBNF_EXIT_BRANCHES = true;
-	public static boolean PRUNE_TOKENS_RULE_SUPERFLUOUS_EOT_EDGES = true;
-	public static boolean COLLAPSE_ALL_PARALLEL_EDGES = true;
-	public static boolean MERGE_STOP_STATES = true;
-
-	/** Used by DFA state machine generator to avoid infinite recursion
-	 *  resulting from cycles int the DFA.  This is a set of int state #s.
-	 *  This is a side-effect of calling optimize; can't clear after use
-	 *  because code gen needs it.
-	 */
-	protected Set visited = new HashSet();
-
-    protected Grammar grammar;
-
-    public DFAOptimizer(Grammar grammar) {
-		this.grammar = grammar;
-    }
-
-	public void optimize() {
-		// optimize each DFA in this grammar
-		for (int decisionNumber=1;
-			 decisionNumber<=grammar.getNumberOfDecisions();
-			 decisionNumber++)
-		{
-			DFA dfa = grammar.getLookaheadDFA(decisionNumber);
-			optimize(dfa);
-		}
-	}
-
-	protected void optimize(DFA dfa) {
-		if ( dfa==null ) {
-			return; // nothing to do
-		}
-		/*
-		System.out.println("Optimize DFA "+dfa.decisionNFAStartState.decisionNumber+
-						   " num states="+dfa.getNumberOfStates());
-		*/
-		//long start = System.currentTimeMillis();
-		if ( PRUNE_EBNF_EXIT_BRANCHES && dfa.canInlineDecision() ) {
-			visited.clear();
-			int decisionType =
-				dfa.getNFADecisionStartState().decisionStateType;
-			if ( dfa.isGreedy() &&
-				 (decisionType==NFAState.OPTIONAL_BLOCK_START ||
-				 decisionType==NFAState.LOOPBACK) )
-			{
-				optimizeExitBranches(dfa.startState);
-			}
-		}
-		// If the Tokens rule has syntactically ambiguous rules, try to prune
-		if ( PRUNE_TOKENS_RULE_SUPERFLUOUS_EOT_EDGES &&
-			 dfa.isTokensRuleDecision() &&
-			 dfa.probe.stateToSyntacticallyAmbiguousTokensRuleAltsMap.size()>0 )
-		{
-			visited.clear();
-			optimizeEOTBranches(dfa.startState);
-		}
-
-		/* ack...code gen needs this, cannot optimize
-		visited.clear();
-		unlinkUnneededStateData(dfa.startState);
-		*/
-		//long stop = System.currentTimeMillis();
-		//System.out.println("minimized in "+(int)(stop-start)+" ms");
-    }
-
-	protected void optimizeExitBranches(DFAState d) {
-		Integer sI = Utils.integer(d.stateNumber);
-		if ( visited.contains(sI) ) {
-			return; // already visited
-		}
-		visited.add(sI);
-		int nAlts = d.dfa.getNumberOfAlts();
-		for (int i = 0; i < d.getNumberOfTransitions(); i++) {
-			Transition edge = (Transition) d.transition(i);
-			DFAState edgeTarget = ((DFAState)edge.target);
-			/*
-			System.out.println(d.stateNumber+"-"+
-							   edge.label.toString(d.dfa.nfa.grammar)+"->"+
-							   edgeTarget.stateNumber);
-			*/
-			// if target is an accept state and that alt is the exit alt
-			if ( edgeTarget.isAcceptState() &&
-				edgeTarget.getUniquelyPredictedAlt()==nAlts)
-			{
-				/*
-				System.out.println("ignoring transition "+i+" to max alt "+
-					d.dfa.getNumberOfAlts());
-				*/
-				d.removeTransition(i);
-				i--; // back up one so that i++ of loop iteration stays within bounds
-			}
-			optimizeExitBranches(edgeTarget);
-		}
-	}
-
-	protected void optimizeEOTBranches(DFAState d) {
-		Integer sI = Utils.integer(d.stateNumber);
-		if ( visited.contains(sI) ) {
-			return; // already visited
-		}
-		visited.add(sI);
-		for (int i = 0; i < d.getNumberOfTransitions(); i++) {
-			Transition edge = (Transition) d.transition(i);
-			DFAState edgeTarget = ((DFAState)edge.target);
-			/*
-			System.out.println(d.stateNumber+"-"+
-							   edge.label.toString(d.dfa.nfa.grammar)+"->"+
-							   edgeTarget.stateNumber);
-			*/
-			// if only one edge coming out, it is EOT, and target is accept prune
-			if ( PRUNE_TOKENS_RULE_SUPERFLUOUS_EOT_EDGES &&
-				edgeTarget.isAcceptState() &&
-				d.getNumberOfTransitions()==1 &&
-				edge.label.isAtom() &&
-				edge.label.getAtom()==Label.EOT )
-			{
-				//System.out.println("state "+d+" can be pruned");
-				// remove the superfluous EOT edge
-				d.removeTransition(i);
-				d.setAcceptState(true); // make it an accept state
-				// force it to uniquely predict the originally predicted state
-				d.cachedUniquelyPredicatedAlt =
-					edgeTarget.getUniquelyPredictedAlt();
-				i--; // back up one so that i++ of loop iteration stays within bounds
-			}
-			optimizeEOTBranches(edgeTarget);
-		}
-	}
-
-	/** Walk DFA states, unlinking the nfa configs and whatever else I
-	 *  can to reduce memory footprint.
-	protected void unlinkUnneededStateData(DFAState d) {
-		Integer sI = Utils.integer(d.stateNumber);
-		if ( visited.contains(sI) ) {
-			return; // already visited
-		}
-		visited.add(sI);
-		d.nfaConfigurations = null;
-		for (int i = 0; i < d.getNumberOfTransitions(); i++) {
-			Transition edge = (Transition) d.transition(i);
-			DFAState edgeTarget = ((DFAState)edge.target);
-			unlinkUnneededStateData(edgeTarget);
-		}
-	}
-	 */
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/DFAState.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/DFAState.java
deleted file mode 100644
index ee1a703..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/DFAState.java
+++ /dev/null
@@ -1,776 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.misc.IntSet;
-import org.antlr.misc.MultiMap;
-import org.antlr.misc.OrderedHashSet;
-import org.antlr.misc.Utils;
-import org.antlr.tool.Grammar;
-
-import java.util.*;
-
-/** A DFA state represents a set of possible NFA configurations.
- *  As Aho, Sethi, Ullman p. 117 says "The DFA uses its state
- *  to keep track of all possible states the NFA can be in after
- *  reading each input symbol.  That is to say, after reading
- *  input a1a2..an, the DFA is in a state that represents the
- *  subset T of the states of the NFA that are reachable from the
- *  NFA's start state along some path labeled a1a2..an."
- *  In conventional NFA->DFA conversion, therefore, the subset T
- *  would be a bitset representing the set of states the
- *  NFA could be in.  We need to track the alt predicted by each
- *  state as well, however.  More importantly, we need to maintain
- *  a stack of states, tracking the closure operations as they
- *  jump from rule to rule, emulating rule invocations (method calls).
- *  Recall that NFAs do not normally have a stack like a pushdown-machine
- *  so I have to add one to simulate the proper lookahead sequences for
- *  the underlying LL grammar from which the NFA was derived.
- *
- *  I use a list of NFAConfiguration objects.  An NFAConfiguration
- *  is both a state (ala normal conversion) and an NFAContext describing
- *  the chain of rules (if any) followed to arrive at that state.  There
- *  is also the semantic context, which is the "set" of predicates found
- *  on the path to this configuration.
- *
- *  A DFA state may have multiple references to a particular state,
- *  but with different NFAContexts (with same or different alts)
- *  meaning that state was reached via a different set of rule invocations.
- */
-public class DFAState extends State {
-    public static final int INITIAL_NUM_TRANSITIONS = 4;
-	public static final int PREDICTED_ALT_UNSET = NFA.INVALID_ALT_NUMBER-1;
-
-    /** We are part of what DFA?  Use this ref to get access to the
-     *  context trees for an alt.
-     */
-    public DFA dfa;
-
-    /** Track the transitions emanating from this DFA state.  The List
-     *  elements are Transition objects.
-     */
-    protected List<Transition> transitions =
-		new ArrayList<Transition>(INITIAL_NUM_TRANSITIONS);
-
-	/** When doing an acyclic DFA, this is the number of lookahead symbols
-	 *  consumed to reach this state.  This value may be nonzero for most
-	 *  dfa states, but it is only a valid value if the user has specified
-	 *  a max fixed lookahead.
-	 */
-    protected int k;
-
-    /** The NFA->DFA algorithm may terminate leaving some states
-     *  without a path to an accept state, implying that upon certain
-     *  input, the decision is not deterministic--no decision about
-     *  predicting a unique alternative can be made.  Recall that an
-     *  accept state is one in which a unique alternative is predicted.
-     */
-    protected int acceptStateReachable = DFA.REACHABLE_UNKNOWN;
-
-    /** Rather than recheck every NFA configuration in a DFA state (after
-     *  resolving) in findNewDFAStatesAndAddDFATransitions just check
-     *  this boolean.  Saves a linear walk perhaps DFA state creation.
-     *  Every little bit helps.
-     */
-    protected boolean resolvedWithPredicates = false;
-
-	/** If a closure operation finds that we tried to invoke the same
-	 *  rule too many times (stack would grow beyond a threshold), it
-	 *  marks the state has aborted and notifies the DecisionProbe.
-	 */
-	public boolean abortedDueToRecursionOverflow = false;
-
-	/** If we detect recursion on more than one alt, decision is non-LL(*),
-	 *  but try to isolate it to only those states whose closure operations
-	 *  detect recursion.  There may be other alts that are cool:
-	 *
-	 *  a : recur '.'
-	 *    | recur ';'
-	 *    | X Y  // LL(2) decision; don't abort and use k=1 plus backtracking
-	 *    | X Z
-	 *    ;
-	 *
-	 *  12/13/2007: Actually this has caused problems.  If k=*, must terminate
-	 *  and throw out entire DFA; retry with k=1.  Since recursive, do not
-	 *  attempt more closure ops as it may take forever.  Exception thrown
-	 *  now and we simply report the problem.  If synpreds exist, I'll retry
-	 *  with k=1.
-	 */
-	protected boolean abortedDueToMultipleRecursiveAlts = false;
-
-	/** Build up the hash code for this state as NFA configurations
-     *  are added as it's monotonically increasing list of configurations.
-     */
-    protected int cachedHashCode;
-
-	protected int cachedUniquelyPredicatedAlt = PREDICTED_ALT_UNSET;
-
-	public int minAltInConfigurations=Integer.MAX_VALUE;
-
-	public boolean atLeastOneConfigurationHasAPredicate = false;
-
-	/** The set of NFA configurations (state,alt,context) for this DFA state */
-    public OrderedHashSet<NFAConfiguration> nfaConfigurations =
-		new OrderedHashSet<NFAConfiguration>();
-
-	public List<NFAConfiguration> configurationsWithLabeledEdges =
-		new ArrayList<NFAConfiguration>();
-
-	/** Used to prevent the closure operation from looping to itself and
-     *  hence looping forever.  Sensitive to the NFA state, the alt, and
-     *  the stack context.  This just the nfa config set because we want to
-	 *  prevent closures only on states contributed by closure not reach
-	 *  operations.
-	 *
-	 *  Two configurations identical including semantic context are
-	 *  considered the same closure computation.  @see NFAToDFAConverter.closureBusy().
-     */
-	protected Set<NFAConfiguration> closureBusy = new HashSet<NFAConfiguration>();
-
-	/** As this state is constructed (i.e., as NFA states are added), we
-     *  can easily check for non-epsilon transitions because the only
-     *  transition that could be a valid label is transition(0).  When we
-     *  process this node eventually, we'll have to walk all states looking
-     *  for all possible transitions.  That is of the order: size(label space)
-     *  times size(nfa states), which can be pretty damn big.  It's better
-     *  to simply track possible labels.
-     */
-    protected OrderedHashSet<Label> reachableLabels;
-
-    public DFAState(DFA dfa) {
-        this.dfa = dfa;
-    }
-
-	public void reset() {
-		//nfaConfigurations = null; // getGatedPredicatesInNFAConfigurations needs
-		configurationsWithLabeledEdges = null;
-		closureBusy = null;
-		reachableLabels = null;
-	}
-
-	public Transition transition(int i) {
-        return (Transition)transitions.get(i);
-    }
-
-    public int getNumberOfTransitions() {
-        return transitions.size();
-    }
-
-    public void addTransition(Transition t) {
-        transitions.add(t);
-    }
-
-	/** Add a transition from this state to target with label.  Return
-	 *  the transition number from 0..n-1.
-	 */
-    public int addTransition(DFAState target, Label label) {
-		transitions.add( new Transition(label, target) );
-		return transitions.size()-1;
-    }
-
-    public Transition getTransition(int trans) {
-        return transitions.get(trans);
-    }
-
-	public void removeTransition(int trans) {
-		transitions.remove(trans);
-	}
-
-    /** Add an NFA configuration to this DFA node.  Add uniquely
-     *  an NFA state/alt/syntactic&semantic context (chain of invoking state(s)
-     *  and semantic predicate contexts).
-     *
-     *  I don't see how there could be two configurations with same
-     *  state|alt|synCtx and different semantic contexts because the
-     *  semantic contexts are computed along the path to a particular state
-     *  so those two configurations would have to have the same predicate.
-     *  Nonetheless, the addition of configurations is unique on all
-     *  configuration info.  I guess I'm saying that syntactic context
-     *  implies semantic context as the latter is computed according to the
-     *  former.
-     *
-     *  As we add configurations to this DFA state, track the set of all possible
-     *  transition labels so we can simply walk it later rather than doing a
-     *  loop over all possible labels in the NFA.
-     */
-    public void addNFAConfiguration(NFAState state, NFAConfiguration c) {
-		if ( nfaConfigurations.contains(c) ) {
-            return;
-        }
-
-        nfaConfigurations.add(c);
-
-		// track min alt rather than compute later
-		if ( c.alt < minAltInConfigurations ) {
-			minAltInConfigurations = c.alt;
-		}
-
-		if ( c.semanticContext!=SemanticContext.EMPTY_SEMANTIC_CONTEXT ) {
-			atLeastOneConfigurationHasAPredicate = true;
-		}
-
-		// update hashCode; for some reason using context.hashCode() also
-        // makes the GC take like 70% of the CPU and is slow!
-        cachedHashCode += c.state + c.alt;
-
-		// update reachableLabels
-		// We're adding an NFA state; check to see if it has a non-epsilon edge
-		if ( state.transition[0] != null ) {
-			Label label = state.transition[0].label;
-			if ( !(label.isEpsilon()||label.isSemanticPredicate()) ) {
-				// this NFA state has a non-epsilon edge, track for fast
-				// walking later when we do reach on this DFA state we're
-				// building.
-				configurationsWithLabeledEdges.add(c);
-				if ( state.transition[1] ==null ) {
-					// later we can check this to ignore o-A->o states in closure
-					c.singleAtomTransitionEmanating = true;
-				}
-				addReachableLabel(label);
-			}
-		}
-    }
-
-	public NFAConfiguration addNFAConfiguration(NFAState state,
-												int alt,
-												NFAContext context,
-												SemanticContext semanticContext)
-	{
-		NFAConfiguration c = new NFAConfiguration(state.stateNumber,
-												  alt,
-												  context,
-												  semanticContext);
-		addNFAConfiguration(state, c);
-		return c;
-	}
-
-	/** Add label uniquely and disjointly; intersection with
-     *  another set or int/char forces breaking up the set(s).
-     *
-     *  Example, if reachable list of labels is [a..z, {k,9}, 0..9],
-     *  the disjoint list will be [{a..j,l..z}, k, 9, 0..8].
-     *
-     *  As we add NFA configurations to a DFA state, we might as well track
-     *  the set of all possible transition labels to make the DFA conversion
-     *  more efficient.  W/o the reachable labels, we'd need to check the
-     *  whole vocabulary space (could be 0..\uFFFF)!  The problem is that
-     *  labels can be sets, which may overlap with int labels or other sets.
-     *  As we need a deterministic set of transitions from any
-     *  state in the DFA, we must make the reachable labels set disjoint.
-     *  This operation amounts to finding the character classes for this
-     *  DFA state whereas with tools like flex, that need to generate a
-     *  homogeneous DFA, must compute char classes across all states.
-     *  We are going to generate DFAs with heterogeneous states so we
-     *  only care that the set of transitions out of a single state are
-     *  unique. :)
-     *
-     *  The idea for adding a new set, t, is to look for overlap with the
-     *  elements of existing list s.  Upon overlap, replace
-     *  existing set s[i] with two new disjoint sets, s[i]-t and s[i]&t.
-     *  (if s[i]-t is nil, don't add).  The remainder is t-s[i], which is
-     *  what you want to add to the set minus what was already there.  The
-     *  remainder must then be compared against the i+1..n elements in s
-     *  looking for another collision.  Each collision results in a smaller
-     *  and smaller remainder.  Stop when you run out of s elements or
-     *  remainder goes to nil.  If remainder is non nil when you run out of
-     *  s elements, then add remainder to the end.
-     *
-     *  Single element labels are treated as sets to make the code uniform.
-     */
-    protected void addReachableLabel(Label label) {
-		if ( reachableLabels==null ) {
-			reachableLabels = new OrderedHashSet<Label>();
-		}
-		/*
-		System.out.println("addReachableLabel to state "+dfa.decisionNumber+"."+stateNumber+": "+label.getSet().toString(dfa.nfa.grammar));
-		System.out.println("start of add to state "+dfa.decisionNumber+"."+stateNumber+": " +
-				"reachableLabels="+reachableLabels.toString());
-				*/
-		if ( reachableLabels.contains(label) ) { // exact label present
-            return;
-        }
-        IntSet t = label.getSet();
-        IntSet remainder = t; // remainder starts out as whole set to add
-        int n = reachableLabels.size(); // only look at initial elements
-        // walk the existing list looking for the collision
-        for (int i=0; i<n; i++) {
-			Label rl = reachableLabels.get(i);
-            /*
-			System.out.println("comparing ["+i+"]: "+label.toString(dfa.nfa.grammar)+" & "+
-                    rl.toString(dfa.nfa.grammar)+"="+
-                    intersection.toString(dfa.nfa.grammar));
-            */
-			if ( !Label.intersect(label, rl) ) {
-                continue;
-            }
-			//System.out.println(label+" collides with "+rl);
-
-			// For any (s_i, t) with s_i&t!=nil replace with (s_i-t, s_i&t)
-            // (ignoring s_i-t if nil; don't put in list)
-
-            // Replace existing s_i with intersection since we
-            // know that will always be a non nil character class
-			IntSet s_i = rl.getSet();
-			IntSet intersection = s_i.and(t);
-            reachableLabels.set(i, new Label(intersection));
-
-            // Compute s_i-t to see what is in current set and not in incoming
-            IntSet existingMinusNewElements = s_i.subtract(t);
-			//System.out.println(s_i+"-"+t+"="+existingMinusNewElements);
-            if ( !existingMinusNewElements.isNil() ) {
-                // found a new character class, add to the end (doesn't affect
-                // outer loop duration due to n computation a priori.
-                Label newLabel = new Label(existingMinusNewElements);
-                reachableLabels.add(newLabel);
-            }
-
-			/*
-            System.out.println("after collision, " +
-                    "reachableLabels="+reachableLabels.toString());
-					*/
-
-            // anything left to add to the reachableLabels?
-            remainder = t.subtract(s_i);
-            if ( remainder.isNil() ) {
-                break; // nothing left to add to set.  done!
-            }
-
-            t = remainder;
-        }
-        if ( !remainder.isNil() ) {
-			/*
-			System.out.println("before add remainder to state "+dfa.decisionNumber+"."+stateNumber+": " +
-					"reachableLabels="+reachableLabels.toString());
-			System.out.println("remainder state "+dfa.decisionNumber+"."+stateNumber+": "+remainder.toString(dfa.nfa.grammar));
-            */
-			Label newLabel = new Label(remainder);
-            reachableLabels.add(newLabel);
-        }
-		/*
-		System.out.println("#END of add to state "+dfa.decisionNumber+"."+stateNumber+": " +
-				"reachableLabels="+reachableLabels.toString());
-				*/
-    }
-
-    public OrderedHashSet getReachableLabels() {
-        return reachableLabels;
-    }
-
-	public void setNFAConfigurations(OrderedHashSet<NFAConfiguration> configs) {
-		this.nfaConfigurations = configs;
-	}
-
-    /** A decent hash for a DFA state is the sum of the NFA state/alt pairs.
-     *  This is used when we add DFAState objects to the DFA.states Map and
-     *  when we compare DFA states.  Computed in addNFAConfiguration()
-     */
-    public int hashCode() {
-		if ( cachedHashCode==0 ) {
-			// LL(1) algorithm doesn't use NFA configurations, which
-			// dynamically compute hashcode; must have something; use super
-			return super.hashCode();
-		}
-		return cachedHashCode;
-    }
-
-    /** Two DFAStates are equal if their NFA configuration sets are the
-	 *  same. This method is used to see if a DFA state already exists.
-	 *
-     *  Because the number of alternatives and number of NFA configurations are
-     *  finite, there is a finite number of DFA states that can be processed.
-     *  This is necessary to show that the algorithm terminates.
-	 *
-	 *  Cannot test the DFA state numbers here because in DFA.addState we need
-	 *  to know if any other state exists that has this exact set of NFA
-	 *  configurations.  The DFAState state number is irrelevant.
-     */
-    public boolean equals(Object o) {
-		// compare set of NFA configurations in this set with other
-        DFAState other = (DFAState)o;
-		return this.nfaConfigurations.equals(other.nfaConfigurations);
-	}
-
-    /** Walk each configuration and if they are all the same alt, return
-     *  that alt else return NFA.INVALID_ALT_NUMBER.  Ignore resolved
-     *  configurations, but don't ignore resolveWithPredicate configs
-     *  because this state should not be an accept state.  We need to add
-     *  this to the work list and then have semantic predicate edges
-     *  emanating from it.
-     */
-    public int getUniquelyPredictedAlt() {
-		if ( cachedUniquelyPredicatedAlt!=PREDICTED_ALT_UNSET ) {
-			return cachedUniquelyPredicatedAlt;
-		}
-        int alt = NFA.INVALID_ALT_NUMBER;
-		int numConfigs = nfaConfigurations.size();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
-			// ignore anything we resolved; predicates will still result
-			// in transitions out of this state, so must count those
-			// configurations; i.e., don't ignore resolveWithPredicate configs
-			if ( configuration.resolved ) {
-				continue;
-			}
-			if ( alt==NFA.INVALID_ALT_NUMBER ) {
-				alt = configuration.alt; // found first nonresolved alt
-			}
-			else if ( configuration.alt!=alt ) {
-				return NFA.INVALID_ALT_NUMBER;
-			}
-		}
-		this.cachedUniquelyPredicatedAlt = alt;
-        return alt;
-    }
-
-	/** Return the uniquely mentioned alt from the NFA configurations;
-	 *  Ignore the resolved bit etc...  Return INVALID_ALT_NUMBER
-	 *  if there is more than one alt mentioned.
-	 */ 
-	public int getUniqueAlt() {
-		int alt = NFA.INVALID_ALT_NUMBER;
-		int numConfigs = nfaConfigurations.size();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
-			if ( alt==NFA.INVALID_ALT_NUMBER ) {
-				alt = configuration.alt; // found first alt
-			}
-			else if ( configuration.alt!=alt ) {
-				return NFA.INVALID_ALT_NUMBER;
-			}
-		}
-		return alt;
-	}
-
-	/** When more than one alternative can match the same input, the first
-	 *  alternative is chosen to resolve the conflict.  The other alts
-	 *  are "turned off" by setting the "resolved" flag in the NFA
-	 *  configurations.  Return the set of disabled alternatives.  For
-	 *
-	 *  a : A | A | A ;
-	 *
-	 *  this method returns {2,3} as disabled.  This does not mean that
-	 *  the alternative is totally unreachable, it just means that for this
-	 *  DFA state, that alt is disabled.  There may be other accept states
-	 *  for that alt.
-	 */
-	public Set getDisabledAlternatives() {
-		Set disabled = new LinkedHashSet();
-		int numConfigs = nfaConfigurations.size();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
-			if ( configuration.resolved ) {
-				disabled.add(Utils.integer(configuration.alt));
-			}
-		}
-		return disabled;
-	}
-
-	protected Set getNonDeterministicAlts() {
-		int user_k = dfa.getUserMaxLookahead();
-		if ( user_k>0 && user_k==k ) {
-			// if fixed lookahead, then more than 1 alt is a nondeterminism
-			// if we have hit the max lookahead
-			return getAltSet();
-		}
-		else if ( abortedDueToMultipleRecursiveAlts || abortedDueToRecursionOverflow ) {
-			// if we had to abort for non-LL(*) state assume all alts are a problem
-			return getAltSet();
-		}
-		else {
-			return getConflictingAlts();
-		}
-	}
-
-    /** Walk each NFA configuration in this DFA state looking for a conflict
-     *  where (s|i|ctx) and (s|j|ctx) exist, indicating that state s with
-     *  context conflicting ctx predicts alts i and j.  Return an Integer set
-	 *  of the alternative numbers that conflict.  Two contexts conflict if
-	 *  they are equal or one is a stack suffix of the other or one is
-	 *  the empty context.
-	 *
-     *  Use a hash table to record the lists of configs for each state
-	 *  as they are encountered.  We need only consider states for which
-	 *  there is more than one configuration.  The configurations' predicted
-	 *  alt must be different or must have different contexts to avoid a
-	 *  conflict.
-	 *
-	 *  Don't report conflicts for DFA states that have conflicting Tokens
-	 *  rule NFA states; they will be resolved in favor of the first rule.
-     */
-    protected Set<Integer> getConflictingAlts() {
-		// TODO this is called multiple times: cache result?
-		//System.out.println("getNondetAlts for DFA state "+stateNumber);
- 		Set<Integer> nondeterministicAlts = new HashSet<Integer>();
-
-		// If only 1 NFA conf then no way it can be nondeterministic;
-		// save the overhead.  There are many o-a->o NFA transitions
-		// and so we save a hash map and iterator creation for each
-		// state.
-		int numConfigs = nfaConfigurations.size();
-		if ( numConfigs <=1 ) {
-			return null;
-		}
-
-		// First get a list of configurations for each state.
-		// Most of the time, each state will have one associated configuration.
-		MultiMap<Integer, NFAConfiguration> stateToConfigListMap =
-			new MultiMap<Integer, NFAConfiguration>();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
-			Integer stateI = Utils.integer(configuration.state);
-			stateToConfigListMap.map(stateI, configuration);
-		}
-		// potential conflicts are states with > 1 configuration and diff alts
-		Set states = stateToConfigListMap.keySet();
-		int numPotentialConflicts = 0;
-		for (Iterator it = states.iterator(); it.hasNext();) {
-			Integer stateI = (Integer) it.next();
-			boolean thisStateHasPotentialProblem = false;
-			List configsForState = (List)stateToConfigListMap.get(stateI);
-			int alt=0;
-			int numConfigsForState = configsForState.size();
-			for (int i = 0; i < numConfigsForState && numConfigsForState>1 ; i++) {
-				NFAConfiguration c = (NFAConfiguration) configsForState.get(i);
-				if ( alt==0 ) {
-					alt = c.alt;
-				}
-				else if ( c.alt!=alt ) {
-					/*
-					System.out.println("potential conflict in state "+stateI+
-									   " configs: "+configsForState);
-					*/
-					// 11/28/2005: don't report closures that pinch back
-					// together in Tokens rule.  We want to silently resolve
-					// to the first token definition ala lex/flex by ignoring
-					// these conflicts.
-					// Also this ensures that lexers look for more and more
-					// characters (longest match) before resorting to predicates.
-					// TestSemanticPredicates.testLexerMatchesLongestThenTestPred()
-					// for example would terminate at state s1 and test predicate
-					// meaning input "ab" would test preds to decide what to
-					// do but it should match rule C w/o testing preds.
-					if ( dfa.nfa.grammar.type!=Grammar.LEXER ||
-						 !dfa.decisionNFAStartState.enclosingRule.name.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) )
-					{
-						numPotentialConflicts++;
-						thisStateHasPotentialProblem = true;
-					}
-				}
-			}
-			if ( !thisStateHasPotentialProblem ) {
-				// remove NFA state's configurations from
-				// further checking; no issues with it
-				// (can't remove as it's concurrent modification; set to null)
-				stateToConfigListMap.put(stateI, null);
-			}
-		}
-
-		// a fast check for potential issues; most states have none
-		if ( numPotentialConflicts==0 ) {
-			return null;
-		}
-
-		// we have a potential problem, so now go through config lists again
-		// looking for different alts (only states with potential issues
-		// are left in the states set).  Now we will check context.
-		// For example, the list of configs for NFA state 3 in some DFA
-		// state might be:
-		//   [3|2|[28 18 $], 3|1|[28 $], 3|1, 3|2]
-		// I want to create a map from context to alts looking for overlap:
-		//   [28 18 $] -> 2
-		//   [28 $] -> 1
-		//   [$] -> 1,2
-		// Indeed a conflict exists as same state 3, same context [$], predicts
-		// alts 1 and 2.
-		// walk each state with potential conflicting configurations
-		for (Iterator it = states.iterator(); it.hasNext();) {
-			Integer stateI = (Integer) it.next();
-			List configsForState = (List)stateToConfigListMap.get(stateI);
-			// compare each configuration pair s, t to ensure:
-			// s.ctx different than t.ctx if s.alt != t.alt
-			int numConfigsForState = 0;
-			if ( configsForState!=null ) {
-				numConfigsForState = configsForState.size();
-			}
-			for (int i = 0; i < numConfigsForState; i++) {
-				NFAConfiguration s = (NFAConfiguration) configsForState.get(i);
-				for (int j = i+1; j < numConfigsForState; j++) {
-					NFAConfiguration t = (NFAConfiguration)configsForState.get(j);
-					// conflicts means s.ctx==t.ctx or s.ctx is a stack
-					// suffix of t.ctx or vice versa (if alts differ).
-					// Also a conflict if s.ctx or t.ctx is empty
-					if ( s.alt != t.alt && s.context.conflictsWith(t.context) ) {
-						nondeterministicAlts.add(Utils.integer(s.alt));
-						nondeterministicAlts.add(Utils.integer(t.alt));
-					}
-				}
-			}
-		}
-
-		if ( nondeterministicAlts.size()==0 ) {
-			return null;
-		}
-        return nondeterministicAlts;
-    }
-
-	/** Get the set of all alts mentioned by all NFA configurations in this
-	 *  DFA state.
-	 */
-	public Set getAltSet() {
-		int numConfigs = nfaConfigurations.size();
-		Set alts = new HashSet();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
-			alts.add(Utils.integer(configuration.alt));
-		}
-		if ( alts.size()==0 ) {
-			return null;
-		}
-		return alts;
-	}
-
-	public Set getGatedSyntacticPredicatesInNFAConfigurations() {
-		int numConfigs = nfaConfigurations.size();
-		Set<SemanticContext> synpreds = new HashSet<SemanticContext>();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
-			SemanticContext gatedPredExpr =
-				configuration.semanticContext.getGatedPredicateContext();
-			// if this is a manual syn pred (gated and syn pred), add
-			if ( gatedPredExpr!=null &&
-				 configuration.semanticContext.isSyntacticPredicate() )
-			{
-				synpreds.add(configuration.semanticContext);
-			}
-		}
-		if ( synpreds.size()==0 ) {
-			return null;
-		}
-		return synpreds;
-	}
-
-	/** For gated productions, we need an OR'd list of all predicates for the
-	 *  target of an edge so we can gate the edge based upon the predicates
-	 *  associated with taking that path (if any).
-	 *
-	 *  For syntactic predicates, we only want to generate predicate
-	 *  evaluations as it transitions to an accept state; waste to
-	 *  do it earlier.  So, only add gated preds derived from manually-
-	 *  specified syntactic predicates if this is an accept state.
-	 *
-	 *  Also, since configurations w/o gated predicates are like true
-	 *  gated predicates, finding a configuration whose alt has no gated
-	 *  predicate implies we should evaluate the predicate to true. This
-	 *  means the whole edge has to be ungated. Consider:
-	 *
-	 *	 X : ('a' | {p}?=> 'a')
-	 *	   | 'a' 'b'
-	 *	   ;
-	 *
-	 *  Here, you 'a' gets you from s0 to s1 but you can't test p because
-	 *  plain 'a' is ok.  It's also ok for starting alt 2.  Hence, you can't
-	 *  test p.  Even on the edge going to accept state for alt 1 of X, you
-	 *  can't test p.  You can get to the same place with and w/o the context.
-	 *  Therefore, it is never ok to test p in this situation. 
-	 *
-	 *  TODO: cache this as it's called a lot; or at least set bit if >1 present in state
-	 */
-	public SemanticContext getGatedPredicatesInNFAConfigurations() {
-		SemanticContext unionOfPredicatesFromAllAlts = null;
-		int numConfigs = nfaConfigurations.size();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
-			SemanticContext gatedPredExpr =
-				configuration.semanticContext.getGatedPredicateContext();
-			if ( gatedPredExpr==null ) {
-				// if we ever find a configuration w/o a gated predicate
-				// (even if it's a nongated predicate), we cannot gate
-				// the indident edges.
-				return null;
-			}
-			else if ( acceptState || !configuration.semanticContext.isSyntacticPredicate() ) {
-				// at this point we have a gated predicate and, due to elseif,
-				// we know it's an accept or not a syn pred.  In this case,
-				// it's safe to add the gated predicate to the union.  We
-				// only want to add syn preds if it's an accept state.  Other
-				// gated preds can be used with edges leading to accept states.
-				if ( unionOfPredicatesFromAllAlts==null ) {
-					unionOfPredicatesFromAllAlts = gatedPredExpr;
-				}
-				else {
-					unionOfPredicatesFromAllAlts =
-						SemanticContext.or(unionOfPredicatesFromAllAlts,gatedPredExpr);
-				}
-			}
-		}
-		if ( unionOfPredicatesFromAllAlts instanceof SemanticContext.TruePredicate ) {
-			return null;
-		}
-		return unionOfPredicatesFromAllAlts;
-	}
-
-    /** Is an accept state reachable from this state? */
-    public int getAcceptStateReachable() {
-        return acceptStateReachable;
-    }
-
-    public void setAcceptStateReachable(int acceptStateReachable) {
-        this.acceptStateReachable = acceptStateReachable;
-    }
-
-    public boolean isResolvedWithPredicates() {
-        return resolvedWithPredicates;
-    }
-
-    /** Print all NFA states plus what alts they predict */
-    public String toString() {
-        StringBuffer buf = new StringBuffer();
-        buf.append(stateNumber+":{");
-		for (int i = 0; i < nfaConfigurations.size(); i++) {
-			NFAConfiguration configuration = (NFAConfiguration) nfaConfigurations.get(i);
-			if ( i>0 ) {
-				buf.append(", ");
-			}
-			buf.append(configuration);
-		}
-        buf.append("}");
-        return buf.toString();
-    }
-
-	public int getLookaheadDepth() {
-		return k;
-	}
-
-	public void setLookaheadDepth(int k) {
-		this.k = k;
-		if ( k > dfa.max_k ) { // track max k for entire DFA
-			dfa.max_k = k;
-		}
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/DecisionProbe.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/DecisionProbe.java
deleted file mode 100644
index 0e4d393..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/DecisionProbe.java
+++ /dev/null
@@ -1,915 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.misc.MultiMap;
-import org.antlr.misc.Utils;
-import org.antlr.runtime.Token;
-import org.antlr.tool.ErrorManager;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.GrammarAST;
-
-import java.util.*;
-
-/** Collection of information about what is wrong with a decision as
- *  discovered while building the DFA predictor.
- *
- *  The information is collected during NFA->DFA conversion and, while
- *  some of this is available elsewhere, it is nice to have it all tracked
- *  in one spot so a great error message can be easily had.  I also like
- *  the fact that this object tracks it all for later perusing to make an
- *  excellent error message instead of lots of imprecise on-the-fly warnings
- *  (during conversion).
- *
- *  A decision normally only has one problem; e.g., some input sequence
- *  can be matched by multiple alternatives.  Unfortunately, some decisions
- *  such as
- *
- *  a : ( A | B ) | ( A | B ) | A ;
- *
- *  have multiple problems.  So in general, you should approach a decision
- *  as having multiple flaws each one uniquely identified by a DFAState.
- *  For example, statesWithSyntacticallyAmbiguousAltsSet tracks the set of
- *  all DFAStates where ANTLR has discovered a problem.  Recall that a decision
- *  is represented internall with a DFA comprised of multiple states, each of
- *  which could potentially have problems.
- *
- *  Because of this, you need to iterate over this list of DFA states.  You'll
- *  note that most of the informational methods like
- *  getSampleNonDeterministicInputSequence() require a DFAState.  This state
- *  will be one of the iterated states from stateToSyntacticallyAmbiguousAltsSet.
- *
- *  This class is not thread safe due to shared use of visited maps etc...
- *  Only one thread should really need to access one DecisionProbe anyway.
- */
-public class DecisionProbe {
-	public DFA dfa;
-
-	/** Track all DFA states with nondeterministic alternatives.
-	 *  By reaching the same DFA state, a path through the NFA for some input
-	 *  is able to reach the same NFA state by starting at more than one
-	 *  alternative's left edge.  Though, later, we may find that predicates
-	 *  resolve the issue, but track info anyway.
-	 *  Note that from the DFA state, you can ask for
-	 *  which alts are nondeterministic.
-	 */
-	protected Set<DFAState> statesWithSyntacticallyAmbiguousAltsSet = new HashSet<DFAState>();
-
-	/** Track just like stateToSyntacticallyAmbiguousAltsMap, but only
-	 *  for nondeterminisms that arise in the Tokens rule such as keyword vs
-	 *  ID rule.  The state maps to the list of Tokens rule alts that are
-	 *  in conflict.
-	 */
-	protected Map<DFAState, Set<Integer>> stateToSyntacticallyAmbiguousTokensRuleAltsMap =
-		new HashMap<DFAState, Set<Integer>>();
-
-	/** Was a syntactic ambiguity resolved with predicates?  Any DFA
-	 *  state that predicts more than one alternative, must be resolved
-	 *  with predicates or it should be reported to the user.
-	 */
-	protected Set<DFAState> statesResolvedWithSemanticPredicatesSet = new HashSet<DFAState>();
-
-	/** Track the predicates for each alt per DFA state;
-	 *  more than one DFA state might have syntactically ambig alt prediction.
-	 *  Maps DFA state to another map, mapping alt number to a
-	 *  SemanticContext (pred(s) to execute to resolve syntactic ambiguity).
-	 */
-	protected Map<DFAState, Map<Integer,SemanticContext>> stateToAltSetWithSemanticPredicatesMap =
-		new HashMap<DFAState, Map<Integer,SemanticContext>>();
-
-	/** Tracks alts insufficiently covered.
-	 *  For example, p1||true gets reduced to true and so leaves
-	 *  whole alt uncovered.  This maps DFA state to the set of alts
-	 */
-	protected Map<DFAState,Map<Integer, Set<Token>>> stateToIncompletelyCoveredAltsMap =
-		new HashMap<DFAState,Map<Integer, Set<Token>>>();
-
-	/** The set of states w/o emanating edges and w/o resolving sem preds. */
-	protected Set<DFAState> danglingStates = new HashSet<DFAState>();
-
-	/** The overall list of alts within the decision that have at least one
-	 *  conflicting input sequence.
-	 */
-	protected Set<Integer> altsWithProblem = new HashSet<Integer>();
-
-	/** If decision with > 1 alt has recursion in > 1 alt, it's (likely) nonregular
-	 *  lookahead.  The decision cannot be made with a DFA.
-	 *  the alts are stored in altsWithProblem.
-	 */
-	public boolean nonLLStarDecision = false;
-
-	/** Recursion is limited to a particular depth.  If that limit is exceeded
-	 *  the proposed new NFAConfiguration is recorded for the associated DFA state.
-	 */
-	protected MultiMap<Integer, NFAConfiguration> stateToRecursionOverflowConfigurationsMap =
-		new MultiMap<Integer, NFAConfiguration>();
-	/*
-	protected Map<Integer, List<NFAConfiguration>> stateToRecursionOverflowConfigurationsMap =
-		new HashMap<Integer, List<NFAConfiguration>>();
-		*/
-
-	/** Left recursion discovered.  The proposed new NFAConfiguration
-	 *  is recorded for the associated DFA state.
-	protected Map<Integer,List<NFAConfiguration>> stateToLeftRecursiveConfigurationsMap =
-		new HashMap<Integer,List<NFAConfiguration>>();
-	 */
-
-	/** Did ANTLR have to terminate early on the analysis of this decision? */
-	protected boolean timedOut = false;
-
-	/** Used to find paths through syntactically ambiguous DFA. If we've
-	 *  seen statement number before, what did we learn?
-	 */
-	protected Map<Integer, Integer> stateReachable;
-
-	public static final Integer REACHABLE_BUSY = Utils.integer(-1);
-	public static final Integer REACHABLE_NO = Utils.integer(0);
-	public static final Integer REACHABLE_YES = Utils.integer(1);
-
-	/** Used while finding a path through an NFA whose edge labels match
-	 *  an input sequence.  Tracks the input position
-	 *  we were at the last time at this node.  If same input position, then
-	 *  we'd have reached same state without consuming input...probably an
-	 *  infinite loop.  Stop.  Set<String>.  The strings look like
-	 *  stateNumber_labelIndex.
-	 */
-	protected Set<String> statesVisitedAtInputDepth;
-
-	protected Set<Integer> statesVisitedDuringSampleSequence;
-
-	public static boolean verbose = false;
-
-	public DecisionProbe(DFA dfa) {
-		this.dfa = dfa;
-	}
-
-	// I N F O R M A T I O N  A B O U T  D E C I S I O N
-
-	/** Return a string like "3:22: ( A {;} | B )" that describes this
-	 *  decision.
-	 */
-	public String getDescription() {
-		return dfa.getNFADecisionStartState().getDescription();
-	}
-
-	public boolean isReduced() {
-		return dfa.isReduced();
-	}
-
-	public boolean isCyclic() {
-		return dfa.isCyclic();
-	}
-
-	/** If no states are dead-ends, no alts are unreachable, there are
-	 *  no nondeterminisms unresolved by syn preds, all is ok with decision.
-	 */
-	public boolean isDeterministic() {
-		if ( danglingStates.size()==0 &&
-			 statesWithSyntacticallyAmbiguousAltsSet.size()==0 &&
-			 dfa.getUnreachableAlts().size()==0 )
-		{
-			return true;
-		}
-
-		if ( statesWithSyntacticallyAmbiguousAltsSet.size()>0 ) {
-			Iterator it =
-				statesWithSyntacticallyAmbiguousAltsSet.iterator();
-			while (	it.hasNext() ) {
-				DFAState d = (DFAState) it.next();
-				if ( !statesResolvedWithSemanticPredicatesSet.contains(d) ) {
-					return false;
-				}
-			}
-			// no syntactically ambig alts were left unresolved by predicates
-			return true;
-		}
-		return false;
-	}
-
-	/** Did the analysis complete it's work? */
-//	public boolean analysisTimedOut() {
-//		return timedOut;
-//	}
-
-	/** Took too long to analyze a DFA */
-	public boolean analysisOverflowed() {
-		return stateToRecursionOverflowConfigurationsMap.size()>0;
-	}
-
-	/** Found recursion in > 1 alt */
-	public boolean isNonLLStarDecision() {
-		return nonLLStarDecision;
-	}
-
-	/** How many states does the DFA predictor have? */
-	public int getNumberOfStates() {
-		return dfa.getNumberOfStates();
-	}
-
-	/** Get a list of all unreachable alternatives for this decision.  There
-	 *  may be multiple alternatives with ambiguous input sequences, but this
-	 *  is the overall list of unreachable alternatives (either due to
-	 *  conflict resolution or alts w/o accept states).
-	 */
-	public List<Integer> getUnreachableAlts() {
-		return dfa.getUnreachableAlts();
-	}
-
-	/** return set of states w/o emanating edges and w/o resolving sem preds.
-	 *  These states come about because the analysis algorithm had to
-	 *  terminate early to avoid infinite recursion for example (due to
-	 *  left recursion perhaps).
-	 */
-	public Set getDanglingStates() {
-		return danglingStates;
-	}
-
-    public Set getNonDeterministicAlts() {
-        return altsWithProblem;
-	}
-
-	/** Return the sorted list of alts that conflict within a single state.
-	 *  Note that predicates may resolve the conflict.
-	 */
-	public List getNonDeterministicAltsForState(DFAState targetState) {
-		Set nondetAlts = targetState.getNonDeterministicAlts();
-		if ( nondetAlts==null ) {
-			return null;
-		}
-		List sorted = new LinkedList();
-		sorted.addAll(nondetAlts);
-		Collections.sort(sorted); // make sure it's 1, 2, ...
-		return sorted;
-	}
-
-	/** Return all DFA states in this DFA that have NFA configurations that
-	 *  conflict.  You must report a problem for each state in this set
-	 *  because each state represents a different input sequence.
-	 */
-	public Set getDFAStatesWithSyntacticallyAmbiguousAlts() {
-		return statesWithSyntacticallyAmbiguousAltsSet;
-	}
-
-	/** Which alts were specifically turned off to resolve nondeterminisms?
-	 *  This is different than the unreachable alts.  Disabled doesn't mean that
-	 *  the alternative is totally unreachable necessarily, it just means
-	 *  that for this DFA state, that alt is disabled.  There may be other
-	 *  accept states for that alt that make an alt reachable.
-	 */
-	public Set getDisabledAlternatives(DFAState d) {
-		return d.getDisabledAlternatives();
-	}
-
-	/** If a recursion overflow is resolve with predicates, then we need
-	 *  to shut off the warning that would be generated.
-	 */
-	public void removeRecursiveOverflowState(DFAState d) {
-		Integer stateI = Utils.integer(d.stateNumber);
-		stateToRecursionOverflowConfigurationsMap.remove(stateI);
-	}
-
-	/** Return a List<Label> indicating an input sequence that can be matched
-	 *  from the start state of the DFA to the targetState (which is known
-	 *  to have a problem).
-	 */
-	public List<Label> getSampleNonDeterministicInputSequence(DFAState targetState) {
-		Set dfaStates = getDFAPathStatesToTarget(targetState);
-		statesVisitedDuringSampleSequence = new HashSet<Integer>();
-		List<Label> labels = new ArrayList<Label>(); // may access ith element; use array
-		if ( dfa==null || dfa.startState==null ) {
-			return labels;
-		}
-		getSampleInputSequenceUsingStateSet(dfa.startState,
-											targetState,
-											dfaStates,
-											labels);
-		return labels;
-	}
-
-	/** Given List<Label>, return a String with a useful representation
-	 *  of the associated input string.  One could show something different
-	 *  for lexers and parsers, for example.
-	 */
-	public String getInputSequenceDisplay(List labels) {
-        Grammar g = dfa.nfa.grammar;
-		StringBuffer buf = new StringBuffer();
-		for (Iterator it = labels.iterator(); it.hasNext();) {
-			Label label = (Label) it.next();
-			buf.append(label.toString(g));
-			if ( it.hasNext() && g.type!=Grammar.LEXER ) {
-				buf.append(' ');
-			}
-		}
-		return buf.toString();
-	}
-
-    /** Given an alternative associated with a nondeterministic DFA state,
-	 *  find the path of NFA states associated with the labels sequence.
-	 *  Useful tracing where in the NFA, a single input sequence can be
-	 *  matched.  For different alts, you should get different NFA paths.
-	 *
-	 *  The first NFA state for all NFA paths will be the same: the starting
-	 *  NFA state of the first nondeterministic alt.  Imagine (A|B|A|A):
-	 *
-	 * 	5->9-A->o
-	 *  |
-	 *  6->10-B->o
-	 *  |
-	 *  7->11-A->o
-	 *  |
-	 *  8->12-A->o
-	 *
-	 *  There are 3 nondeterministic alts.  The paths should be:
-	 *  5 9 ...
-	 *  5 6 7 11 ...
-	 *  5 6 7 8 12 ...
-	 *
-	 *  The NFA path matching the sample input sequence (labels) is computed
-	 *  using states 9, 11, and 12 rather than 5, 7, 8 because state 5, for
-	 *  example can get to all ambig paths.  Must isolate for each alt (hence,
-	 *  the extra state beginning each alt in my NFA structures).  Here,
-	 *  firstAlt=1.
-	 */
-	public List getNFAPathStatesForAlt(int firstAlt,
-									   int alt,
-									   List labels)
-	{
-		NFAState nfaStart = dfa.getNFADecisionStartState();
-		List path = new LinkedList();
-		// first add all NFA states leading up to altStart state
-		for (int a=firstAlt; a<=alt; a++) {
-			NFAState s =
-				dfa.nfa.grammar.getNFAStateForAltOfDecision(nfaStart,a);
-			path.add(s);
-		}
-
-		// add first state of actual alt
-		NFAState altStart = dfa.nfa.grammar.getNFAStateForAltOfDecision(nfaStart,alt);
-		NFAState isolatedAltStart = (NFAState)altStart.transition[0].target;
-		path.add(isolatedAltStart);
-
-		// add the actual path now
-		statesVisitedAtInputDepth = new HashSet();
-		getNFAPath(isolatedAltStart,
-				   0,
-				   labels,
-				   path);
-        return path;
-	}
-
-	/** Each state in the DFA represents a different input sequence for an
-	 *  alt of the decision.  Given a DFA state, what is the semantic
-	 *  predicate context for a particular alt.
-	 */
-    public SemanticContext getSemanticContextForAlt(DFAState d, int alt) {
-		Map altToPredMap = (Map)stateToAltSetWithSemanticPredicatesMap.get(d);
-		if ( altToPredMap==null ) {
-			return null;
-		}
-		return (SemanticContext)altToPredMap.get(Utils.integer(alt));
-	}
-
-	/** At least one alt refs a sem or syn pred */
-	public boolean hasPredicate() {
-		return stateToAltSetWithSemanticPredicatesMap.size()>0;
-	}
-
-	public Set getNondeterministicStatesResolvedWithSemanticPredicate() {
-		return statesResolvedWithSemanticPredicatesSet;
-	}
-
-	/** Return a list of alts whose predicate context was insufficient to
-	 *  resolve a nondeterminism for state d.
-	 */
-	public Map<Integer, Set<Token>> getIncompletelyCoveredAlts(DFAState d) {
-		return stateToIncompletelyCoveredAltsMap.get(d);
-	}
-
-	public void issueWarnings() {
-		// NONREGULAR DUE TO RECURSION > 1 ALTS
-		// Issue this before aborted analysis, which might also occur
-		// if we take too long to terminate
-		if ( nonLLStarDecision && !dfa.getAutoBacktrackMode() ) {
-			ErrorManager.nonLLStarDecision(this);
-		}
-
-		issueRecursionWarnings();
-
-		// generate a separate message for each problem state in DFA
-		Set resolvedStates = getNondeterministicStatesResolvedWithSemanticPredicate();
-		Set problemStates = getDFAStatesWithSyntacticallyAmbiguousAlts();
-		if ( problemStates.size()>0 ) {
-			Iterator it =
-				problemStates.iterator();
-			while (	it.hasNext() && !dfa.nfa.grammar.NFAToDFAConversionExternallyAborted() ) {
-				DFAState d = (DFAState) it.next();
-				Map<Integer, Set<Token>> insufficientAltToLocations = getIncompletelyCoveredAlts(d);
-				if ( insufficientAltToLocations!=null && insufficientAltToLocations.size()>0 ) {
-					ErrorManager.insufficientPredicates(this,d,insufficientAltToLocations);
-				}
-				// don't report problem if resolved
-				if ( resolvedStates==null || !resolvedStates.contains(d) ) {
-					// first strip last alt from disableAlts if it's wildcard
-					// then don't print error if no more disable alts
-					Set disabledAlts = getDisabledAlternatives(d);
-					stripWildCardAlts(disabledAlts);
-					if ( disabledAlts.size()>0 ) {
-						// nondeterminism; same input predicts multiple alts.
-						// but don't emit error if greedy=true explicitly set
-						boolean explicitlyGreedy = false;
-						GrammarAST blockAST =
-							d.dfa.nfa.grammar.getDecisionBlockAST(d.dfa.decisionNumber);
-						if ( blockAST!=null ) {
-							String greedyS = (String)blockAST.getBlockOption("greedy");
-							if ( greedyS!=null && greedyS.equals("true") ) explicitlyGreedy = true;
-						}
-						if ( !explicitlyGreedy) ErrorManager.nondeterminism(this,d);
-					}
-				}
-			}
-		}
-
-		Set danglingStates = getDanglingStates();
-		if ( danglingStates.size()>0 ) {
-			//System.err.println("no emanating edges for states: "+danglingStates);
-			for (Iterator it = danglingStates.iterator(); it.hasNext();) {
-				DFAState d = (DFAState) it.next();
-				ErrorManager.danglingState(this,d);
-			}
-		}
-
-		if ( !nonLLStarDecision ) {
-			List<Integer> unreachableAlts = dfa.getUnreachableAlts();
-			if ( unreachableAlts!=null && unreachableAlts.size()>0 ) {
-				// give different msg if it's an empty Tokens rule from delegate
-				boolean isInheritedTokensRule = false;
-				if ( dfa.isTokensRuleDecision() ) {
-					for (Integer altI : unreachableAlts) {
-						GrammarAST decAST = dfa.getDecisionASTNode();
-						GrammarAST altAST = (GrammarAST)decAST.getChild(altI-1);
-						GrammarAST delegatedTokensAlt =
-							(GrammarAST)altAST.getFirstChildWithType(ANTLRParser.DOT);
-						if ( delegatedTokensAlt !=null ) {
-							isInheritedTokensRule = true;
-							ErrorManager.grammarWarning(ErrorManager.MSG_IMPORTED_TOKENS_RULE_EMPTY,
-														dfa.nfa.grammar,
-														null,
-														dfa.nfa.grammar.name,
-														delegatedTokensAlt.getChild(0).getText());
-						}
-					}
-				}
-				if ( isInheritedTokensRule ) {
-				}
-				else {
-					ErrorManager.unreachableAlts(this,unreachableAlts);
-				}
-			}
-		}
-	}
-
-	/** Get the last disabled alt number and check in the grammar to see
-	 *  if that alt is a simple wildcard.  If so, treat like an else clause
-	 *  and don't emit the error.  Strip out the last alt if it's wildcard.
-	 */
-	protected void stripWildCardAlts(Set disabledAlts) {
-		List sortedDisableAlts = new ArrayList(disabledAlts);
-		Collections.sort(sortedDisableAlts);
-		Integer lastAlt =
-			(Integer)sortedDisableAlts.get(sortedDisableAlts.size()-1);
-		GrammarAST blockAST =
-			dfa.nfa.grammar.getDecisionBlockAST(dfa.decisionNumber);
-		//System.out.println("block with error = "+blockAST.toStringTree());
-		GrammarAST lastAltAST = null;
-		if ( blockAST.getChild(0).getType()==ANTLRParser.OPTIONS ) {
-			// if options, skip first child: ( options { ( = greedy false ) )
-			lastAltAST = (GrammarAST)blockAST.getChild(lastAlt.intValue());
-		}
-		else {
-			lastAltAST = (GrammarAST)blockAST.getChild(lastAlt.intValue()-1);
-		}
-		//System.out.println("last alt is "+lastAltAST.toStringTree());
-		// if last alt looks like ( ALT . <end-of-alt> ) then wildcard
-		// Avoid looking at optional blocks etc... that have last alt
-		// as the EOB:
-		// ( BLOCK ( ALT 'else' statement <end-of-alt> ) <end-of-block> )
-		if ( lastAltAST.getType()!=ANTLRParser.EOB &&
-			 lastAltAST.getChild(0).getType()== ANTLRParser.WILDCARD &&
-			 lastAltAST.getChild(1).getType()== ANTLRParser.EOA )
-		{
-			//System.out.println("wildcard");
-			disabledAlts.remove(lastAlt);
-		}
-	}
-
-	protected void issueRecursionWarnings() {
-		// RECURSION OVERFLOW
-		Set dfaStatesWithRecursionProblems =
-			stateToRecursionOverflowConfigurationsMap.keySet();
-		// now walk truly unique (unaliased) list of dfa states with inf recur
-		// Goal: create a map from alt to map<target,List<callsites>>
-		// Map<Map<String target, List<NFAState call sites>>
-		Map altToTargetToCallSitesMap = new HashMap();
-		// track a single problem DFA state for each alt
-		Map altToDFAState = new HashMap();
-		computeAltToProblemMaps(dfaStatesWithRecursionProblems,
-								stateToRecursionOverflowConfigurationsMap,
-								altToTargetToCallSitesMap, // output param
-								altToDFAState);            // output param
-
-		// walk each alt with recursion overflow problems and generate error
-		Set alts = altToTargetToCallSitesMap.keySet();
-		List sortedAlts = new ArrayList(alts);
-		Collections.sort(sortedAlts);
-		for (Iterator altsIt = sortedAlts.iterator(); altsIt.hasNext();) {
-			Integer altI = (Integer) altsIt.next();
-			Map targetToCallSiteMap =
-				(Map)altToTargetToCallSitesMap.get(altI);
-			Set targetRules = targetToCallSiteMap.keySet();
-			Collection callSiteStates = targetToCallSiteMap.values();
-			DFAState sampleBadState = (DFAState)altToDFAState.get(altI);
-			ErrorManager.recursionOverflow(this,
-										   sampleBadState,
-										   altI.intValue(),
-										   targetRules,
-										   callSiteStates);
-		}
-	}
-
-	private void computeAltToProblemMaps(Set dfaStatesUnaliased,
-										 Map configurationsMap,
-										 Map altToTargetToCallSitesMap,
-										 Map altToDFAState)
-	{
-		for (Iterator it = dfaStatesUnaliased.iterator(); it.hasNext();) {
-			Integer stateI = (Integer) it.next();
-			// walk this DFA's config list
-			List configs = (List)configurationsMap.get(stateI);
-			for (int i = 0; i < configs.size(); i++) {
-				NFAConfiguration c = (NFAConfiguration) configs.get(i);
-				NFAState ruleInvocationState = dfa.nfa.getState(c.state);
-				Transition transition0 = ruleInvocationState.transition[0];
-				RuleClosureTransition ref = (RuleClosureTransition)transition0;
-				String targetRule = ((NFAState) ref.target).enclosingRule.name;
-				Integer altI = Utils.integer(c.alt);
-				Map targetToCallSiteMap =
-					(Map)altToTargetToCallSitesMap.get(altI);
-				if ( targetToCallSiteMap==null ) {
-					targetToCallSiteMap = new HashMap();
-					altToTargetToCallSitesMap.put(altI, targetToCallSiteMap);
-				}
-				Set callSites =
-					(HashSet)targetToCallSiteMap.get(targetRule);
-				if ( callSites==null ) {
-					callSites = new HashSet();
-					targetToCallSiteMap.put(targetRule, callSites);
-				}
-				callSites.add(ruleInvocationState);
-				// track one problem DFA state per alt
-				if ( altToDFAState.get(altI)==null ) {
-					DFAState sampleBadState = dfa.getState(stateI.intValue());
-					altToDFAState.put(altI, sampleBadState);
-				}
-			}
-		}
-	}
-
-	private Set getUnaliasedDFAStateSet(Set dfaStatesWithRecursionProblems) {
-		Set dfaStatesUnaliased = new HashSet();
-		for (Iterator it = dfaStatesWithRecursionProblems.iterator(); it.hasNext();) {
-			Integer stateI = (Integer) it.next();
-			DFAState d = dfa.getState(stateI.intValue());
-			dfaStatesUnaliased.add(Utils.integer(d.stateNumber));
-		}
-		return dfaStatesUnaliased;
-	}
-
-
-	// T R A C K I N G  M E T H O D S
-
-    /** Report the fact that DFA state d is not a state resolved with
-     *  predicates and yet it has no emanating edges.  Usually this
-     *  is a result of the closure/reach operations being unable to proceed
-     */
-	public void reportDanglingState(DFAState d) {
-		danglingStates.add(d);
-	}
-
-//	public void reportAnalysisTimeout() {
-//		timedOut = true;
-//		dfa.nfa.grammar.setOfDFAWhoseAnalysisTimedOut.add(dfa);
-//	}
-
-	/** Report that at least 2 alts have recursive constructs.  There is
-	 *  no way to build a DFA so we terminated.
-	 */
-	public void reportNonLLStarDecision(DFA dfa) {
-		/*
-		System.out.println("non-LL(*) DFA "+dfa.decisionNumber+", alts: "+
-						   dfa.recursiveAltSet.toList());
-						   */
-		nonLLStarDecision = true;
-		dfa.nfa.grammar.numNonLLStar++;
-		altsWithProblem.addAll(dfa.recursiveAltSet.toList());
-	}
-
-	public void reportRecursionOverflow(DFAState d,
-										NFAConfiguration recursionNFAConfiguration)
-	{
-		// track the state number rather than the state as d will change
-		// out from underneath us; hash wouldn't return any value
-
-		// left-recursion is detected in start state.  Since we can't
-		// call resolveNondeterminism() on the start state (it would
-		// not look k=1 to get min single token lookahead), we must
-		// prevent errors derived from this state.  Avoid start state
-		if ( d.stateNumber > 0 ) {
-			Integer stateI = Utils.integer(d.stateNumber);
-			stateToRecursionOverflowConfigurationsMap.map(stateI, recursionNFAConfiguration);
-		}
-	}
-
-	public void reportNondeterminism(DFAState d, Set<Integer> nondeterministicAlts) {
-		altsWithProblem.addAll(nondeterministicAlts); // track overall list
-		statesWithSyntacticallyAmbiguousAltsSet.add(d);
-		dfa.nfa.grammar.setOfNondeterministicDecisionNumbers.add(
-			Utils.integer(dfa.getDecisionNumber())
-		);
-	}
-
-	/** Currently the analysis reports issues between token definitions, but
-	 *  we don't print out warnings in favor of just picking the first token
-	 *  definition found in the grammar ala lex/flex.
-	 */
-	public void reportLexerRuleNondeterminism(DFAState d, Set<Integer> nondeterministicAlts) {
-		stateToSyntacticallyAmbiguousTokensRuleAltsMap.put(d,nondeterministicAlts);
-	}
-
-	public void reportNondeterminismResolvedWithSemanticPredicate(DFAState d) {
-		// First, prevent a recursion warning on this state due to
-		// pred resolution
-		if ( d.abortedDueToRecursionOverflow ) {
-			d.dfa.probe.removeRecursiveOverflowState(d);
-		}
-		statesResolvedWithSemanticPredicatesSet.add(d);
-		//System.out.println("resolved with pred: "+d);
-		dfa.nfa.grammar.setOfNondeterministicDecisionNumbersResolvedWithPredicates.add(
-			Utils.integer(dfa.getDecisionNumber())
-		);
-	}
-
-	/** Report the list of predicates found for each alternative; copy
-	 *  the list because this set gets altered later by the method
-	 *  tryToResolveWithSemanticPredicates() while flagging NFA configurations
-	 *  in d as resolved.
-	 */
-	public void reportAltPredicateContext(DFAState d, Map altPredicateContext) {
-		Map copy = new HashMap();
-		copy.putAll(altPredicateContext);
-		stateToAltSetWithSemanticPredicatesMap.put(d,copy);
-	}
-
-	public void reportIncompletelyCoveredAlts(DFAState d,
-											  Map<Integer, Set<Token>> altToLocationsReachableWithoutPredicate)
-	{
-		stateToIncompletelyCoveredAltsMap.put(d, altToLocationsReachableWithoutPredicate);
-	}
-
-	// S U P P O R T
-
-	/** Given a start state and a target state, return true if start can reach
-	 *  target state.  Also, compute the set of DFA states
-	 *  that are on a path from start to target; return in states parameter.
-	 */
-	protected boolean reachesState(DFAState startState,
-								   DFAState targetState,
-								   Set states) {
-		if ( startState==targetState ) {
-			states.add(targetState);
-			//System.out.println("found target DFA state "+targetState.getStateNumber());
-			stateReachable.put(startState.stateNumber, REACHABLE_YES);
-			return true;
-		}
-
-		DFAState s = startState;
-		// avoid infinite loops
-		stateReachable.put(s.stateNumber, REACHABLE_BUSY);
-
-		// look for a path to targetState among transitions for this state
-		// stop when you find the first one; I'm pretty sure there is
-		// at most one path to any DFA state with conflicting predictions
-		for (int i=0; i<s.getNumberOfTransitions(); i++) {
-			Transition t = s.transition(i);
-			DFAState edgeTarget = (DFAState)t.target;
-			Integer targetStatus = stateReachable.get(edgeTarget.stateNumber);
-			if ( targetStatus==REACHABLE_BUSY ) { // avoid cycles; they say nothing
-				continue;
-			}
-			if ( targetStatus==REACHABLE_YES ) { // return success!
-				stateReachable.put(s.stateNumber, REACHABLE_YES);
-				return true;
-			}
-			if ( targetStatus==REACHABLE_NO ) { // try another transition
-				continue;
-			}
-			// if null, target must be REACHABLE_UNKNOWN (i.e., unvisited)
-			if ( reachesState(edgeTarget, targetState, states) ) {
-				states.add(s);
-				stateReachable.put(s.stateNumber, REACHABLE_YES);
-				return true;
-			}
-		}
-
-		stateReachable.put(s.stateNumber, REACHABLE_NO);
-		return false; // no path to targetState found.
-	}
-
-	protected Set getDFAPathStatesToTarget(DFAState targetState) {
-		Set dfaStates = new HashSet();
-		stateReachable = new HashMap();
-		if ( dfa==null || dfa.startState==null ) {
-			return dfaStates;
-		}
-		boolean reaches = reachesState(dfa.startState, targetState, dfaStates);
-		return dfaStates;
-	}
-
-	/** Given a start state and a final state, find a list of edge labels
-	 *  between the two ignoring epsilon.  Limit your scan to a set of states
-	 *  passed in.  This is used to show a sample input sequence that is
-	 *  nondeterministic with respect to this decision.  Return List<Label> as
-	 *  a parameter.  The incoming states set must be all states that lead
-	 *  from startState to targetState and no others so this algorithm doesn't
-	 *  take a path that eventually leads to a state other than targetState.
-	 *  Don't follow loops, leading to short (possibly shortest) path.
-	 */
-	protected void getSampleInputSequenceUsingStateSet(State startState,
-													   State targetState,
-													   Set states,
-													   List<Label> labels)
-	{
-		statesVisitedDuringSampleSequence.add(startState.stateNumber);
-
-		// pick the first edge in states as the one to traverse
-		for (int i=0; i<startState.getNumberOfTransitions(); i++) {
-			Transition t = startState.transition(i);
-			DFAState edgeTarget = (DFAState)t.target;
-			if ( states.contains(edgeTarget) &&
-				 !statesVisitedDuringSampleSequence.contains(edgeTarget.stateNumber) )
-			{
-				labels.add(t.label); // traverse edge and track label
-				if ( edgeTarget!=targetState ) {
-					// get more labels if not at target
-					getSampleInputSequenceUsingStateSet(edgeTarget,
-														targetState,
-														states,
-														labels);
-				}
-				// done with this DFA state as we've found a good path to target
-				return;
-			}
-		}
-		labels.add(new Label(Label.EPSILON)); // indicate no input found
-		// this happens on a : {p1}? a | A ;
-		//ErrorManager.error(ErrorManager.MSG_CANNOT_COMPUTE_SAMPLE_INPUT_SEQ);
-	}
-
-	/** Given a sample input sequence, you usually would like to know the
-	 *  path taken through the NFA.  Return the list of NFA states visited
-	 *  while matching a list of labels.  This cannot use the usual
-	 *  interpreter, which does a deterministic walk.  We need to be able to
-	 *  take paths that are turned off during nondeterminism resolution. So,
-	 *  just do a depth-first walk traversing edges labeled with the current
-	 *  label.  Return true if a path was found emanating from state s.
-	 */
-	protected boolean getNFAPath(NFAState s,     // starting where?
-								 int labelIndex, // 0..labels.size()-1
-								 List labels,    // input sequence
-								 List path)      // output list of NFA states
-	{
-		// track a visit to state s at input index labelIndex if not seen
-		String thisStateKey = getStateLabelIndexKey(s.stateNumber,labelIndex);
-		if ( statesVisitedAtInputDepth.contains(thisStateKey) ) {
-			/*
-			System.out.println("### already visited "+s.stateNumber+" previously at index "+
-						   labelIndex);
-			*/
-			return false;
-		}
-		statesVisitedAtInputDepth.add(thisStateKey);
-
-		/*
-		System.out.println("enter state "+s.stateNumber+" visited states: "+
-						   statesVisitedAtInputDepth);
-        */
-
-		// pick the first edge whose target is in states and whose
-		// label is labels[labelIndex]
-		for (int i=0; i<s.getNumberOfTransitions(); i++) {
-			Transition t = s.transition[i];
-			NFAState edgeTarget = (NFAState)t.target;
-			Label label = (Label)labels.get(labelIndex);
-			/*
-			System.out.println(s.stateNumber+"-"+
-							   t.label.toString(dfa.nfa.grammar)+"->"+
-							   edgeTarget.stateNumber+" =="+
-							   label.toString(dfa.nfa.grammar)+"?");
-			*/
-			if ( t.label.isEpsilon() || t.label.isSemanticPredicate() ) {
-				// nondeterministically backtrack down epsilon edges
-				path.add(edgeTarget);
-				boolean found =
-					getNFAPath(edgeTarget, labelIndex, labels, path);
-				if ( found ) {
-					statesVisitedAtInputDepth.remove(thisStateKey);
-					return true; // return to "calling" state
-				}
-				path.remove(path.size()-1); // remove; didn't work out
-				continue; // look at the next edge
-			}
-			if ( t.label.matches(label) ) {
-				path.add(edgeTarget);
-				/*
-				System.out.println("found label "+
-								   t.label.toString(dfa.nfa.grammar)+
-								   " at state "+s.stateNumber+"; labelIndex="+labelIndex);
-				*/
-				if ( labelIndex==labels.size()-1 ) {
-					// found last label; done!
-					statesVisitedAtInputDepth.remove(thisStateKey);
-					return true;
-				}
-				// otherwise try to match remaining input
-				boolean found =
-					getNFAPath(edgeTarget, labelIndex+1, labels, path);
-				if ( found ) {
-					statesVisitedAtInputDepth.remove(thisStateKey);
-					return true;
-				}
-				/*
-				System.out.println("backtrack; path from "+s.stateNumber+"->"+
-								   t.label.toString(dfa.nfa.grammar)+" didn't work");
-				*/
-				path.remove(path.size()-1); // remove; didn't work out
-				continue; // keep looking for a path for labels
-			}
-		}
-		//System.out.println("no epsilon or matching edge; removing "+thisStateKey);
-		// no edge was found matching label; is ok, some state will have it
-		statesVisitedAtInputDepth.remove(thisStateKey);
-		return false;
-	}
-
-	protected String getStateLabelIndexKey(int s, int i) {
-		StringBuffer buf = new StringBuffer();
-		buf.append(s);
-		buf.append('_');
-		buf.append(i);
-		return buf.toString();
-	}
-
-	/** From an alt number associated with artificial Tokens rule, return
-	 *  the name of the token that is associated with that alt.
-	 */ 
-	public String getTokenNameForTokensRuleAlt(int alt) {
-		NFAState decisionState = dfa.getNFADecisionStartState();
-		NFAState altState =
-			dfa.nfa.grammar.getNFAStateForAltOfDecision(decisionState,alt);
-		NFAState decisionLeft = (NFAState)altState.transition[0].target;
-		RuleClosureTransition ruleCallEdge =
-			(RuleClosureTransition)decisionLeft.transition[0];
-		NFAState ruleStartState = (NFAState)ruleCallEdge.target;
-		//System.out.println("alt = "+decisionLeft.getEnclosingRule());
-		return ruleStartState.enclosingRule.name;
-	}
-
-	public void reset() {
-		stateToRecursionOverflowConfigurationsMap.clear();
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/LL1Analyzer.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/LL1Analyzer.java
deleted file mode 100644
index c3f4432..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/LL1Analyzer.java
+++ /dev/null
@@ -1,449 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.misc.IntSet;
-import org.antlr.misc.IntervalSet;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.Rule;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Created by IntelliJ IDEA.
- * User: parrt
- * Date: Dec 31, 2007
- * Time: 1:31:16 PM
- * To change this template use File | Settings | File Templates.
- */
-public class LL1Analyzer {
-	/**	0	if we hit end of rule and invoker should keep going (epsilon) */
-	public static final int DETECT_PRED_EOR = 0;
-	/**	1	if we found a nonautobacktracking pred */
-	public static final int DETECT_PRED_FOUND = 1;
-	/**	2	if we didn't find such a pred */
-	public static final int DETECT_PRED_NOT_FOUND = 2;
-
-	public Grammar grammar;
-
-	/** Used during LOOK to detect computation cycles */
-	protected Set<NFAState> lookBusy = new HashSet<NFAState>();
-
-	public Map<NFAState, LookaheadSet> FIRSTCache = new HashMap<NFAState, LookaheadSet>();
-	public Map<Rule, LookaheadSet> FOLLOWCache = new HashMap<Rule, LookaheadSet>();
-
-	public LL1Analyzer(Grammar grammar) {
-		this.grammar = grammar;
-	}
-
-	/*
-	public void computeRuleFIRSTSets() {
-		if ( getNumberOfDecisions()==0 ) {
-			createNFAs();
-		}
-		for (Iterator it = getRules().iterator(); it.hasNext();) {
-			Rule r = (Rule)it.next();
-			if ( r.isSynPred ) {
-				continue;
-			}
-			LookaheadSet s = FIRST(r);
-			System.out.println("FIRST("+r.name+")="+s);
-		}
-	}
-	*/
-
-	/*
-	public Set<String> getOverriddenRulesWithDifferentFIRST() {
-		// walk every rule in this grammar and compare FIRST set with
-		// those in imported grammars.
-		Set<String> rules = new HashSet();
-		for (Iterator it = getRules().iterator(); it.hasNext();) {
-			Rule r = (Rule)it.next();
-			//System.out.println(r.name+" FIRST="+r.FIRST);
-			for (int i = 0; i < delegates.size(); i++) {
-				Grammar g = delegates.get(i);
-				Rule importedRule = g.getRule(r.name);
-				if ( importedRule != null ) { // exists in imported grammar
-					// System.out.println(r.name+" exists in imported grammar: FIRST="+importedRule.FIRST);
-					if ( !r.FIRST.equals(importedRule.FIRST) ) {
-						rules.add(r.name);
-					}
-				}
-			}
-		}
-		return rules;
-	}
-
-	public Set<Rule> getImportedRulesSensitiveToOverriddenRulesDueToLOOK() {
-		Set<String> diffFIRSTs = getOverriddenRulesWithDifferentFIRST();
-		Set<Rule> rules = new HashSet();
-		for (Iterator it = diffFIRSTs.iterator(); it.hasNext();) {
-			String r = (String) it.next();
-			for (int i = 0; i < delegates.size(); i++) {
-				Grammar g = delegates.get(i);
-				Set<Rule> callers = g.ruleSensitivity.get(r);
-				// somebody invokes rule whose FIRST changed in subgrammar?
-				if ( callers!=null ) {
-					rules.addAll(callers);
-					//System.out.println(g.name+" rules "+callers+" sensitive to "+r+"; dup 'em");
-				}
-			}
-		}
-		return rules;
-	}
-*/
-
-	/*
-	public LookaheadSet LOOK(Rule r) {
-		if ( r.FIRST==null ) {
-			r.FIRST = FIRST(r.startState);
-		}
-		return r.FIRST;
-	}
-*/
-
-	/** From an NFA state, s, find the set of all labels reachable from s.
-	 *  Used to compute follow sets for error recovery.  Never computes
-	 *  a FOLLOW operation.  FIRST stops at end of rules, returning EOR, unless
-	 *  invoked from another rule.  I.e., routine properly handles
-	 *
-	 *     a : b A ;
-	 *
-	 *  where b is nullable.
-	 *
-	 *  We record with EOR_TOKEN_TYPE if we hit the end of a rule so we can
-	 *  know at runtime (when these sets are used) to start walking up the
-	 *  follow chain to compute the real, correct follow set (as opposed to
-	 *  the FOLLOW, which is a superset).
-	 *
-	 *  This routine will only be used on parser and tree parser grammars.
-	 */
-	public LookaheadSet FIRST(NFAState s) {
-		//System.out.println("> FIRST("+s.enclosingRule.name+") in rule "+s.enclosingRule);
-		lookBusy.clear();
-		LookaheadSet look = _FIRST(s, false);
-		//System.out.println("< FIRST("+s.enclosingRule.name+") in rule "+s.enclosingRule+"="+look.toString(this.grammar));
-		return look;
-	}
-
-	public LookaheadSet FOLLOW(Rule r) {
-        //System.out.println("> FOLLOW("+r.name+") in rule "+r.startState.enclosingRule);
-		LookaheadSet f = FOLLOWCache.get(r);
-		if ( f!=null ) {
-			return f;
-		}
-		f = _FIRST(r.stopState, true);
-		FOLLOWCache.put(r, f);
-        //System.out.println("< FOLLOW("+r+") in rule "+r.startState.enclosingRule+"="+f.toString(this.grammar));
-		return f;
-	}
-
-	public LookaheadSet LOOK(NFAState s) {
-		if ( NFAToDFAConverter.debug ) {
-			System.out.println("> LOOK("+s+")");
-		}
-		lookBusy.clear();
-		LookaheadSet look = _FIRST(s, true);
-		// FOLLOW makes no sense (at the moment!) for lexical rules.
-		if ( grammar.type!=Grammar.LEXER && look.member(Label.EOR_TOKEN_TYPE) ) {
-			// avoid altering FIRST reset as it is cached
-			LookaheadSet f = FOLLOW(s.enclosingRule);
-			f.orInPlace(look);
-			f.remove(Label.EOR_TOKEN_TYPE);
-			look = f;
-			//look.orInPlace(FOLLOW(s.enclosingRule));
-		}
-		else if ( grammar.type==Grammar.LEXER && look.member(Label.EOT) ) {
-			// if this has EOT, lookahead is all char (all char can follow rule)
-			//look = new LookaheadSet(Label.EOT);
-			look = new LookaheadSet(IntervalSet.COMPLETE_SET);
-		}
-		if ( NFAToDFAConverter.debug ) {
-			System.out.println("< LOOK("+s+")="+look.toString(grammar));
-		}
-		return look;
-	}
-
-	protected LookaheadSet _FIRST(NFAState s, boolean chaseFollowTransitions) {
-		/*
-		System.out.println("_LOOK("+s+") in rule "+s.enclosingRule);
-		if ( s.transition[0] instanceof RuleClosureTransition ) {
-			System.out.println("go to rule "+((NFAState)s.transition[0].target).enclosingRule);
-		}
-		*/
-		if ( !chaseFollowTransitions && s.isAcceptState() ) {
-			if ( grammar.type==Grammar.LEXER ) {
-				// FOLLOW makes no sense (at the moment!) for lexical rules.
-				// assume all char can follow
-				return new LookaheadSet(IntervalSet.COMPLETE_SET);
-			}
-			return new LookaheadSet(Label.EOR_TOKEN_TYPE);
-		}
-
-		if ( lookBusy.contains(s) ) {
-			// return a copy of an empty set; we may modify set inline
-			return new LookaheadSet();
-		}
-		lookBusy.add(s);
-
-		Transition transition0 = s.transition[0];
-		if ( transition0==null ) {
-			return null;
-		}
-
-		if ( transition0.label.isAtom() ) {
-			int atom = transition0.label.getAtom();
-			return new LookaheadSet(atom);
-		}
-		if ( transition0.label.isSet() ) {
-			IntSet sl = transition0.label.getSet();
-			return new LookaheadSet(sl);
-		}
-
-		// compute FIRST of transition 0
-		LookaheadSet tset = null;
-		// if transition 0 is a rule call and we don't want FOLLOW, check cache
-		if ( !chaseFollowTransitions && transition0 instanceof RuleClosureTransition ) {
-			tset = FIRSTCache.get((NFAState)transition0.target);
-		}
-
-		// if not in cache, must compute
-		if ( tset==null ) {
-			tset = _FIRST((NFAState)transition0.target, chaseFollowTransitions);
-			// save FIRST cache for transition 0 if rule call
-			if ( !chaseFollowTransitions && transition0 instanceof RuleClosureTransition ) {
-				FIRSTCache.put((NFAState)transition0.target, tset);
-			}
-		}
-
-        LookaheadSet tsetCached = tset; // tset is stored in cache. We can't return the same instance
-
-		// did we fall off the end?
-		if ( grammar.type!=Grammar.LEXER && tset.member(Label.EOR_TOKEN_TYPE) ) {
-			if ( transition0 instanceof RuleClosureTransition ) {
-				// we called a rule that found the end of the rule.
-				// That means the rule is nullable and we need to
-				// keep looking at what follows the rule ref.  E.g.,
-				// a : b A ; where b is nullable means that LOOK(a)
-				// should include A.
-				RuleClosureTransition ruleInvocationTrans =
-					(RuleClosureTransition)transition0;
-				// remove the EOR and get what follows
-				//tset.remove(Label.EOR_TOKEN_TYPE);
-				NFAState following = (NFAState) ruleInvocationTrans.followState;
-				LookaheadSet fset =	_FIRST(following, chaseFollowTransitions);
-				fset.orInPlace(tset); // tset cached; or into new set
-				fset.remove(Label.EOR_TOKEN_TYPE);
-				tset = fset;
-			}
-		}
-
-		Transition transition1 = s.transition[1];
-		if ( transition1!=null ) {
-			LookaheadSet tset1 =
-				_FIRST((NFAState)transition1.target, chaseFollowTransitions);
-			tset1.orInPlace(tset);
-			tset = tset1;
-		}
-
-		// never return a cached set; clone
-		return tset==tsetCached ? new LookaheadSet(tset) : tset;
-	}
-
-	/** Is there a non-syn-pred predicate visible from s that is not in
-	 *  the rule enclosing s?  This accounts for most predicate situations
-	 *  and lets ANTLR do a simple LL(1)+pred computation.
-	 *
-	 *  TODO: what about gated vs regular preds?
-	 */
-	public boolean detectConfoundingPredicates(NFAState s) {
-		lookBusy.clear();
-		Rule r = s.enclosingRule;
-		return _detectConfoundingPredicates(s, r, false) == DETECT_PRED_FOUND;
-	}
-
-	protected int _detectConfoundingPredicates(NFAState s,
-											   Rule enclosingRule,
-											   boolean chaseFollowTransitions)
-	{
-		//System.out.println("_detectNonAutobacktrackPredicates("+s+")");
-		if ( !chaseFollowTransitions && s.isAcceptState() ) {
-			if ( grammar.type==Grammar.LEXER ) {
-				// FOLLOW makes no sense (at the moment!) for lexical rules.
-				// assume all char can follow
-				return DETECT_PRED_NOT_FOUND;
-			}
-			return DETECT_PRED_EOR;
-		}
-
-		if ( lookBusy.contains(s) ) {
-			// return a copy of an empty set; we may modify set inline
-			return DETECT_PRED_NOT_FOUND;
-		}
-		lookBusy.add(s);
-
-		Transition transition0 = s.transition[0];
-		if ( transition0==null ) {
-			return DETECT_PRED_NOT_FOUND;
-		}
-
-		if ( !(transition0.label.isSemanticPredicate()||
-			   transition0.label.isEpsilon()) ) {
-			return DETECT_PRED_NOT_FOUND;
-		}
-
-		if ( transition0.label.isSemanticPredicate() ) {
-			//System.out.println("pred "+transition0.label);
-			SemanticContext ctx = transition0.label.getSemanticContext();
-			SemanticContext.Predicate p = (SemanticContext.Predicate)ctx;
-			if ( p.predicateAST.getType() != ANTLRParser.BACKTRACK_SEMPRED ) {
-				return DETECT_PRED_FOUND;
-			}
-		}
-		
-		/*
-		if ( transition0.label.isSemanticPredicate() ) {
-			System.out.println("pred "+transition0.label);
-			SemanticContext ctx = transition0.label.getSemanticContext();
-			SemanticContext.Predicate p = (SemanticContext.Predicate)ctx;
-			// if a non-syn-pred found not in enclosingRule, say we found one
-			if ( p.predicateAST.getType() != ANTLRParser.BACKTRACK_SEMPRED &&
-				 !p.predicateAST.enclosingRuleName.equals(enclosingRule.name) )
-			{
-				System.out.println("found pred "+p+" not in "+enclosingRule.name);
-				return DETECT_PRED_FOUND;
-			}
-		}
-		*/
-
-		int result = _detectConfoundingPredicates((NFAState)transition0.target,
-												  enclosingRule,
-												  chaseFollowTransitions);
-		if ( result == DETECT_PRED_FOUND ) {
-			return DETECT_PRED_FOUND;
-		}
-
-		if ( result == DETECT_PRED_EOR ) {
-			if ( transition0 instanceof RuleClosureTransition ) {
-				// we called a rule that found the end of the rule.
-				// That means the rule is nullable and we need to
-				// keep looking at what follows the rule ref.  E.g.,
-				// a : b A ; where b is nullable means that LOOK(a)
-				// should include A.
-				RuleClosureTransition ruleInvocationTrans =
-					(RuleClosureTransition)transition0;
-				NFAState following = (NFAState) ruleInvocationTrans.followState;
-				int afterRuleResult =
-					_detectConfoundingPredicates(following,
-												 enclosingRule,
-												 chaseFollowTransitions);
-				if ( afterRuleResult == DETECT_PRED_FOUND ) {
-					return DETECT_PRED_FOUND;
-				}
-			}
-		}
-
-		Transition transition1 = s.transition[1];
-		if ( transition1!=null ) {
-			int t1Result =
-				_detectConfoundingPredicates((NFAState)transition1.target,
-											 enclosingRule,
-											 chaseFollowTransitions);
-			if ( t1Result == DETECT_PRED_FOUND ) {
-				return DETECT_PRED_FOUND;
-			}
-		}
-
-		return DETECT_PRED_NOT_FOUND;
-	}
-
-	/** Return predicate expression found via epsilon edges from s.  Do
-	 *  not look into other rules for now.  Do something simple.  Include
-	 *  backtracking synpreds.
-	 */
-	public SemanticContext getPredicates(NFAState altStartState) {
-		lookBusy.clear();
-		return _getPredicates(altStartState, altStartState);
-	}
-
-	protected SemanticContext _getPredicates(NFAState s, NFAState altStartState) {
-		//System.out.println("_getPredicates("+s+")");
-		if ( s.isAcceptState() ) {
-			return null;
-		}
-
-		// avoid infinite loops from (..)* etc...
-		if ( lookBusy.contains(s) ) {
-			return null;
-		}
-		lookBusy.add(s);
-
-		Transition transition0 = s.transition[0];
-		// no transitions
-		if ( transition0==null ) {
-			return null;
-		}
-
-		// not a predicate and not even an epsilon
-		if ( !(transition0.label.isSemanticPredicate()||
-			   transition0.label.isEpsilon()) ) {
-			return null;
-		}
-
-		SemanticContext p = null;
-		SemanticContext p0 = null;
-		SemanticContext p1 = null;
-		if ( transition0.label.isSemanticPredicate() ) {
-			//System.out.println("pred "+transition0.label);
-			p = transition0.label.getSemanticContext();
-			// ignore backtracking preds not on left edge for this decision
-			if ( ((SemanticContext.Predicate)p).predicateAST.getType() ==
-				  ANTLRParser.BACKTRACK_SEMPRED  &&
-				 s == altStartState.transition[0].target )
-			{
-				p = null; // don't count
-			}
-		}
-
-		// get preds from beyond this state
-		p0 = _getPredicates((NFAState)transition0.target, altStartState);
-
-		// get preds from other transition
-		Transition transition1 = s.transition[1];
-		if ( transition1!=null ) {
-			p1 = _getPredicates((NFAState)transition1.target, altStartState);
-		}
-
-		// join this&following-right|following-down
-		return SemanticContext.and(p,SemanticContext.or(p0,p1));
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/LL1DFA.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/LL1DFA.java
deleted file mode 100644
index d8f6759..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/LL1DFA.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.misc.IntervalSet;
-import org.antlr.misc.MultiMap;
-
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-
-/** A special DFA that is exactly LL(1) or LL(1) with backtracking mode
- *  predicates to resolve edge set collisions.
- */
-public class LL1DFA extends DFA {
-	/** From list of lookahead sets (one per alt in decision), create
-	 *  an LL(1) DFA.  One edge per set.
-	 *
-	 *  s0-{alt1}->:o=>1
-	 *  | \
-	 *  |  -{alt2}->:o=>2
-	 *  |
-	 *  ...
-	 */
-	public LL1DFA(int decisionNumber, NFAState decisionStartState, LookaheadSet[] altLook) {
-		DFAState s0 = newState();
-		startState = s0;
-		nfa = decisionStartState.nfa;
-		nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(decisionStartState);
-		this.decisionNumber = decisionNumber;
-		this.decisionNFAStartState = decisionStartState;
-		initAltRelatedInfo();
-		unreachableAlts = null;
-		for (int alt=1; alt<altLook.length; alt++) {
-			DFAState acceptAltState = newState();
-			acceptAltState.acceptState = true;
-			setAcceptState(alt, acceptAltState);
-			acceptAltState.k = 1;
-			acceptAltState.cachedUniquelyPredicatedAlt = alt;
-			Label e = getLabelForSet(altLook[alt].tokenTypeSet);
-			s0.addTransition(acceptAltState, e);
-		}
-	}
-
-	/** From a set of edgeset->list-of-alts mappings, create a DFA
-	 *  that uses syn preds for all |list-of-alts|>1.
-	 */
-	public LL1DFA(int decisionNumber,
-				  NFAState decisionStartState,
-				  MultiMap<IntervalSet, Integer> edgeMap)
-	{
-		DFAState s0 = newState();
-		startState = s0;
-		nfa = decisionStartState.nfa;
-		nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(decisionStartState);
-		this.decisionNumber = decisionNumber;
-		this.decisionNFAStartState = decisionStartState;
-		initAltRelatedInfo();
-		unreachableAlts = null;
-		for (Iterator it = edgeMap.keySet().iterator(); it.hasNext();) {
-			IntervalSet edge = (IntervalSet)it.next();
-			List<Integer> alts = edgeMap.get(edge);
-			Collections.sort(alts); // make sure alts are attempted in order
-			//System.out.println(edge+" -> "+alts);
-			DFAState s = newState();
-			s.k = 1;
-			Label e = getLabelForSet(edge);
-			s0.addTransition(s, e);
-			if ( alts.size()==1 ) {
-				s.acceptState = true;
-				int alt = alts.get(0);
-				setAcceptState(alt, s);
-				s.cachedUniquelyPredicatedAlt = alt;
-			}
-			else {
-				// resolve with syntactic predicates.  Add edges from
-				// state s that test predicates.
-				s.resolvedWithPredicates = true;
-				for (int i = 0; i < alts.size(); i++) {
-					int alt = (int)alts.get(i);
-					s.cachedUniquelyPredicatedAlt =	NFA.INVALID_ALT_NUMBER;
-					DFAState predDFATarget = getAcceptState(alt);
-					if ( predDFATarget==null ) {
-						predDFATarget = newState(); // create if not there.
-						predDFATarget.acceptState = true;
-						predDFATarget.cachedUniquelyPredicatedAlt =	alt;
-						setAcceptState(alt, predDFATarget);
-					}
-					// add a transition to pred target from d
-					/*
-					int walkAlt =
-						decisionStartState.translateDisplayAltToWalkAlt(alt);
-					NFAState altLeftEdge = nfa.grammar.getNFAStateForAltOfDecision(decisionStartState, walkAlt);
-					NFAState altStartState = (NFAState)altLeftEdge.transition[0].target;
-					SemanticContext ctx = nfa.grammar.ll1Analyzer.getPredicates(altStartState);
-					System.out.println("sem ctx = "+ctx);
-					if ( ctx == null ) {
-						ctx = new SemanticContext.TruePredicate();
-					}
-					s.addTransition(predDFATarget, new Label(ctx));
-					*/
-					SemanticContext.Predicate synpred =
-						getSynPredForAlt(decisionStartState, alt);
-					if ( synpred == null ) {
-						synpred = new SemanticContext.TruePredicate();
-					}
-					s.addTransition(predDFATarget, new PredicateLabel(synpred));
-				}
-			}
-		}
-		//System.out.println("dfa for preds=\n"+this);
-	}
-
-	protected Label getLabelForSet(IntervalSet edgeSet) {
-		Label e = null;
-		int atom = edgeSet.getSingleElement();
-		if ( atom != Label.INVALID ) {
-			e = new Label(atom);
-		}
-		else {
-			e = new Label(edgeSet);
-		}
-		return e;
-	}
-
-	protected SemanticContext.Predicate getSynPredForAlt(NFAState decisionStartState,
-														 int alt)
-	{
-		int walkAlt =
-			decisionStartState.translateDisplayAltToWalkAlt(alt);
-		NFAState altLeftEdge =
-			nfa.grammar.getNFAStateForAltOfDecision(decisionStartState, walkAlt);
-		NFAState altStartState = (NFAState)altLeftEdge.transition[0].target;
-		//System.out.println("alt "+alt+" start state = "+altStartState.stateNumber);
-		if ( altStartState.transition[0].isSemanticPredicate() ) {
-			SemanticContext ctx = altStartState.transition[0].label.getSemanticContext();
-			if ( ctx.isSyntacticPredicate() ) {
-				SemanticContext.Predicate p = (SemanticContext.Predicate)ctx;
-				if ( p.predicateAST.getType() == ANTLRParser.BACKTRACK_SEMPRED ) {
-					/*
-					System.out.println("syn pred for alt "+walkAlt+" "+
-									   ((SemanticContext.Predicate)altStartState.transition[0].label.getSemanticContext()).predicateAST);
-					*/
-					if ( ctx.isSyntacticPredicate() ) {
-						nfa.grammar.synPredUsedInDFA(this, ctx);
-					}
-					return (SemanticContext.Predicate)altStartState.transition[0].label.getSemanticContext();
-				}
-			}
-		}
-		return null;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/Label.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/Label.java
deleted file mode 100644
index 3a8a976..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/Label.java
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.misc.IntSet;
-import org.antlr.misc.IntervalSet;
-import org.antlr.tool.Grammar;
-
-/** A state machine transition label.  A label can be either a simple
- *  label such as a token or character.  A label can be a set of char or
- *  tokens.  It can be an epsilon transition.  It can be a semantic predicate
- *  (which assumes an epsilon transition) or a tree of predicates (in a DFA).
- *  Special label types have to be < 0 to avoid conflict with char.
- */
-public class Label implements Comparable, Cloneable {
-    public static final int INVALID = -7;
-
-	public static final int ACTION = -6;
-	
-	public static final int EPSILON = -5;
-
-    public static final String EPSILON_STR = "<EPSILON>";
-
-    /** label is a semantic predicate; implies label is epsilon also */
-    public static final int SEMPRED = -4;
-
-    /** label is a set of tokens or char */
-    public static final int SET = -3;
-
-    /** End of Token is like EOF for lexer rules.  It implies that no more
-     *  characters are available and that NFA conversion should terminate
-     *  for this path.  For example
-     *
-     *  A : 'a' 'b' | 'a' ;
-     *
-     *  yields a DFA predictor:
-     *
-     *  o-a->o-b->1   predict alt 1
-     *       |
-     *       |-EOT->o predict alt 2
-     *
-     *  To generate code for EOT, treat it as the "default" path, which
-     *  implies there is no way to mismatch a char for the state from
-     *  which the EOT emanates.
-     */
-    public static final int EOT = -2;
-
-    public static final int EOF = -1;
-
-	/** We have labels like EPSILON that are below 0; it's hard to
-	 *  store them in an array with negative index so use this
-	 *  constant as an index shift when accessing arrays based upon
-	 *  token type.  If real token type is i, then array index would be
-	 *  NUM_FAUX_LABELS + i.
-	 */
-	public static final int NUM_FAUX_LABELS = -INVALID;
-
-    /** Anything at this value or larger can be considered a simple atom int
-     *  for easy comparison during analysis only; faux labels are not used
-	 *  during parse time for real token types or char values.
-     */
-    public static final int MIN_ATOM_VALUE = EOT;
-
-    // TODO: is 0 a valid unicode char? max is FFFF -1, right?
-    public static final int MIN_CHAR_VALUE = '\u0000';
-    public static final int MAX_CHAR_VALUE = '\uFFFF';
-
-	/** End of rule token type; imaginary token type used only for
-	 *  local, partial FOLLOW sets to indicate that the local FOLLOW
-	 *  hit the end of rule.  During error recovery, the local FOLLOW
-	 *  of a token reference may go beyond the end of the rule and have
-	 *  to use FOLLOW(rule).  I have to just shift the token types to 2..n
-	 *  rather than 1..n to accommodate this imaginary token in my bitsets.
-	 *  If I didn't use a bitset implementation for runtime sets, I wouldn't
-	 *  need this.  EOF is another candidate for a run time token type for
-	 *  parsers.  Follow sets are not computed for lexers so we do not have
-	 *  this issue.
-	 */
-	public static final int EOR_TOKEN_TYPE =
-		org.antlr.runtime.Token.EOR_TOKEN_TYPE;
-
-	public static final int DOWN = org.antlr.runtime.Token.DOWN;
-	public static final int UP = org.antlr.runtime.Token.UP;
-
-    /** tokens and char range overlap; tokens are MIN_TOKEN_TYPE..n */
-	public static final int MIN_TOKEN_TYPE =
-		org.antlr.runtime.Token.MIN_TOKEN_TYPE;
-
-    /** The wildcard '.' char atom implies all valid characters==UNICODE */
-    //public static final IntSet ALLCHAR = IntervalSet.of(MIN_CHAR_VALUE,MAX_CHAR_VALUE);
-
-    /** The token type or character value; or, signifies special label. */
-    protected int label;
-
-    /** A set of token types or character codes if label==SET */
-	// TODO: try IntervalSet for everything
-    protected IntSet labelSet;
-
-    public Label(int label) {
-        this.label = label;
-    }
-
-    /** Make a set label */
-    public Label(IntSet labelSet) {
-		if ( labelSet==null ) {
-			this.label = SET;
-			this.labelSet = IntervalSet.of(INVALID);
-			return;
-		}
-		int singleAtom = labelSet.getSingleElement();
-        if ( singleAtom!=INVALID ) {
-            // convert back to a single atomic element if |labelSet|==1
-            label = singleAtom;
-            return;
-        }
-        this.label = SET;
-        this.labelSet = labelSet;
-    }
-
-	public Object clone() {
-		Label l;
-		try {
-			l = (Label)super.clone();
-			l.label = this.label;
-            l.labelSet = new IntervalSet();
-			l.labelSet.addAll(this.labelSet);
-		}
-		catch (CloneNotSupportedException e) {
-			throw new InternalError();
-		}
-		return l;
-	}
-
-	public void add(Label a) {
-		if ( isAtom() ) {
-			labelSet = IntervalSet.of(label);
-			label=SET;
-			if ( a.isAtom() ) {
-				labelSet.add(a.getAtom());
-			}
-			else if ( a.isSet() ) {
-				labelSet.addAll(a.getSet());
-			}
-			else {
-				throw new IllegalStateException("can't add element to Label of type "+label);
-			}
-			return;
-		}
-		if ( isSet() ) {
-			if ( a.isAtom() ) {
-				labelSet.add(a.getAtom());
-			}
-			else if ( a.isSet() ) {
-				labelSet.addAll(a.getSet());
-			}
-			else {
-				throw new IllegalStateException("can't add element to Label of type "+label);
-			}
-			return;
-		}
-		throw new IllegalStateException("can't add element to Label of type "+label);
-	}
-
-    public boolean isAtom() {
-        return label>=MIN_ATOM_VALUE;
-    }
-
-    public boolean isEpsilon() {
-        return label==EPSILON;
-    }
-
-	public boolean isSemanticPredicate() {
-		return false;
-	}
-
-	public boolean isAction() {
-		return false;
-	}
-
-    public boolean isSet() {
-        return label==SET;
-    }
-
-    /** return the single atom label or INVALID if not a single atom */
-    public int getAtom() {
-        if ( isAtom() ) {
-            return label;
-        }
-        return INVALID;
-    }
-
-    public IntSet getSet() {
-        if ( label!=SET ) {
-            // convert single element to a set if they ask for it.
-            return IntervalSet.of(label);
-        }
-        return labelSet;
-    }
-
-    public void setSet(IntSet set) {
-        label=SET;
-        labelSet = set;
-    }
-
-    public SemanticContext getSemanticContext() {
-        return null;
-    }
-
-	public boolean matches(int atom) {
-		if ( label==atom ) {
-			return true; // handle the single atom case efficiently
-		}
-		if ( isSet() ) {
-			return labelSet.member(atom);
-		}
-		return false;
-	}
-
-	public boolean matches(IntSet set) {
-		if ( isAtom() ) {
-			return set.member(getAtom());
-		}
-		if ( isSet() ) {
-			// matches if intersection non-nil
-			return !getSet().and(set).isNil();
-		}
-		return false;
-	}
-
-
-	public boolean matches(Label other) {
-		if ( other.isSet() ) {
-			return matches(other.getSet());
-		}
-		if ( other.isAtom() ) {
-			return matches(other.getAtom());
-		}
-		return false;
-	}
-
-    public int hashCode() {
-        if (label==SET) {
-            return labelSet.hashCode();
-		}
-		else {
-			return label;
-		}
-	}
-
-	// TODO: do we care about comparing set {A} with atom A? Doesn't now.
-	public boolean equals(Object o) {
-		if ( o==null ) {
-			return false;
-		}
-		if ( this == o ) {
-			return true; // equals if same object
-		}
-		// labels must be the same even if epsilon or set or sempred etc...
-        if ( label!=((Label)o).label ) {
-            return false;
-        }
-		if ( label==SET ) {
-			return this.labelSet.equals(((Label)o).labelSet);
-		}
-		return true;  // label values are same, so true
-    }
-
-    public int compareTo(Object o) {
-        return this.label-((Label)o).label;
-    }
-
-    /** Predicates are lists of AST nodes from the NFA created from the
-     *  grammar, but the same predicate could be cut/paste into multiple
-     *  places in the grammar.  I must compare the text of all the
-     *  predicates to truly answer whether {p1,p2} .equals {p1,p2}.
-     *  Unfortunately, I cannot rely on the AST.equals() to work properly
-     *  so I must do a brute force O(n^2) nested traversal of the Set
-     *  doing a String compare.
-     *
-     *  At this point, Labels are not compared for equals when they are
-     *  predicates, but here's the code for future use.
-     */
-    /*
-    protected boolean predicatesEquals(Set others) {
-        Iterator iter = semanticContext.iterator();
-        while (iter.hasNext()) {
-            AST predAST = (AST) iter.next();
-            Iterator inner = semanticContext.iterator();
-            while (inner.hasNext()) {
-                AST otherPredAST = (AST) inner.next();
-                if ( !predAST.getText().equals(otherPredAST.getText()) ) {
-                    return false;
-                }
-            }
-        }
-        return true;
-    }
-      */
-
-    public String toString() {
-        switch (label) {
-            case SET :
-                return labelSet.toString();
-            default :
-                return String.valueOf(label);
-        }
-    }
-
-    public String toString(Grammar g) {
-        switch (label) {
-            case SET :
-                return labelSet.toString(g);
-            default :
-                return g.getTokenDisplayName(label);
-        }
-    }
-
-    /*
-    public String predicatesToString() {
-        if ( semanticContext==NFAConfiguration.DEFAULT_CLAUSE_SEMANTIC_CONTEXT ) {
-            return "!other preds";
-        }
-        StringBuffer buf = new StringBuffer();
-        Iterator iter = semanticContext.iterator();
-        while (iter.hasNext()) {
-            AST predAST = (AST) iter.next();
-            buf.append(predAST.getText());
-            if ( iter.hasNext() ) {
-                buf.append("&");
-            }
-        }
-        return buf.toString();
-    }
-    */
-
-	public static boolean intersect(Label label, Label edgeLabel) {
-		boolean hasIntersection = false;
-		boolean labelIsSet = label.isSet();
-		boolean edgeIsSet = edgeLabel.isSet();
-		if ( !labelIsSet && !edgeIsSet && edgeLabel.label==label.label ) {
-			hasIntersection = true;
-		}
-		else if ( labelIsSet && edgeIsSet &&
-				  !edgeLabel.getSet().and(label.getSet()).isNil() ) {
-			hasIntersection = true;
-		}
-		else if ( labelIsSet && !edgeIsSet &&
-				  label.getSet().member(edgeLabel.label) ) {
-			hasIntersection = true;
-		}
-		else if ( !labelIsSet && edgeIsSet &&
-				  edgeLabel.getSet().member(label.label) ) {
-			hasIntersection = true;
-		}
-		return hasIntersection;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/LookaheadSet.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/LookaheadSet.java
deleted file mode 100644
index 7325cc8..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/LookaheadSet.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.misc.IntSet;
-import org.antlr.misc.IntervalSet;
-import org.antlr.tool.Grammar;
-
-/** An LL(1) lookahead set; contains a set of token types and a "hasEOF"
- *  condition when the set contains EOF.  Since EOF is -1 everywhere and -1
- *  cannot be stored in my BitSet, I set a condition here.  There may be other
- *  reasons in the future to abstract a LookaheadSet over a raw BitSet.
- */
-public class LookaheadSet {
-	public IntervalSet tokenTypeSet;
-
-	public LookaheadSet() {
-		tokenTypeSet = new IntervalSet();
-	}
-
-	public LookaheadSet(IntSet s) {
-		this();
-		tokenTypeSet.addAll(s);
-	}
-
-	public LookaheadSet(int atom) {
-		tokenTypeSet = IntervalSet.of(atom);
-	}
-
-    public LookaheadSet(LookaheadSet other) {
-        this();
-        this.tokenTypeSet.addAll(other.tokenTypeSet);
-    }
-
-    public void orInPlace(LookaheadSet other) {
-		this.tokenTypeSet.addAll(other.tokenTypeSet);
-	}
-
-	public LookaheadSet or(LookaheadSet other) {
-		return new LookaheadSet(tokenTypeSet.or(other.tokenTypeSet));
-	}
-
-	public LookaheadSet subtract(LookaheadSet other) {
-		return new LookaheadSet(this.tokenTypeSet.subtract(other.tokenTypeSet));
-	}
-
-	public boolean member(int a) {
-		return tokenTypeSet.member(a);
-	}
-
-	public LookaheadSet intersection(LookaheadSet s) {
-		IntSet i = this.tokenTypeSet.and(s.tokenTypeSet);
-		LookaheadSet intersection = new LookaheadSet(i);
-		return intersection;
-	}
-
-	public boolean isNil() {
-		return tokenTypeSet.isNil();
-	}
-
-	public void remove(int a) {
-		tokenTypeSet = (IntervalSet)tokenTypeSet.subtract(IntervalSet.of(a));
-	}
-
-	public int hashCode() {
-		return tokenTypeSet.hashCode();
-	}
-
-	public boolean equals(Object other) {
-		return tokenTypeSet.equals(((LookaheadSet)other).tokenTypeSet);
-	}
-
-	public String toString(Grammar g) {
-		if ( tokenTypeSet==null ) {
-			return "";
-		}
-		String r = tokenTypeSet.toString(g);
-		return r;
-	}
-
-	public String toString() {
-		return toString(null);
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/MachineProbe.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/MachineProbe.java
deleted file mode 100644
index e5da266..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/MachineProbe.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.analysis;
-
-import org.antlr.misc.IntSet;
-import org.antlr.runtime.CommonToken;
-import org.antlr.runtime.Token;
-import org.antlr.tool.Grammar;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-public class MachineProbe {
-	DFA dfa;
-
-	public MachineProbe(DFA dfa) {
-		this.dfa = dfa;
-	}
-
-	List<DFAState> getAnyDFAPathToTarget(DFAState targetState) {
-		Set<DFAState> visited = new HashSet<DFAState>();
-		return getAnyDFAPathToTarget(dfa.startState, targetState, visited);
-	}
-
-	public List<DFAState> getAnyDFAPathToTarget(DFAState startState,
-			DFAState targetState, Set<DFAState> visited) {
-		List<DFAState> dfaStates = new ArrayList<DFAState>();
-		visited.add(startState);
-		if (startState.equals(targetState)) {
-			dfaStates.add(targetState);
-			return dfaStates;
-		}
-		// for (Edge e : startState.edges) { // walk edges looking for valid
-		// path
-		for (int i = 0; i < startState.getNumberOfTransitions(); i++) {
-			Transition e = startState.getTransition(i);
-			if (!visited.contains(e.target)) {
-				List<DFAState> path = getAnyDFAPathToTarget(
-						(DFAState) e.target, targetState, visited);
-				if (path != null) { // found path, we're done
-					dfaStates.add(startState);
-					dfaStates.addAll(path);
-					return dfaStates;
-				}
-			}
-		}
-		return null;
-	}
-
-	/** Return a list of edge labels from start state to targetState. */
-	public List<IntSet> getEdgeLabels(DFAState targetState) {
-		List<DFAState> dfaStates = getAnyDFAPathToTarget(targetState);
-		List<IntSet> labels = new ArrayList<IntSet>();
-		for (int i = 0; i < dfaStates.size() - 1; i++) {
-			DFAState d = dfaStates.get(i);
-			DFAState nextState = dfaStates.get(i + 1);
-			// walk looking for edge whose target is next dfa state
-			for (int j = 0; j < d.getNumberOfTransitions(); j++) {
-				Transition e = d.getTransition(j);
-				if (e.target.stateNumber == nextState.stateNumber) {
-					labels.add(e.label.getSet());
-				}
-			}
-		}
-		return labels;
-	}
-
-	/**
-	 * Given List<IntSet>, return a String with a useful representation of the
-	 * associated input string. One could show something different for lexers
-	 * and parsers, for example.
-	 */
-	public String getInputSequenceDisplay(Grammar g, List<IntSet> labels) {
-		List<String> tokens = new ArrayList<String>();
-		for (IntSet label : labels)
-			tokens.add(label.toString(g));
-		return tokens.toString();
-	}
-
-	/**
-	 * Given an alternative associated with a DFA state, return the list of
-	 * tokens (from grammar) associated with path through NFA following the
-	 * labels sequence. The nfaStates gives the set of NFA states associated
-	 * with alt that take us from start to stop. One of the NFA states in
-	 * nfaStates[i] will have an edge intersecting with labels[i].
-	 */
-	public List<Token> getGrammarLocationsForInputSequence(
-			List<Set<NFAState>> nfaStates, List<IntSet> labels) {
-		List<Token> tokens = new ArrayList<Token>();
-		for (int i = 0; i < nfaStates.size() - 1; i++) {
-			Set<NFAState> cur = nfaStates.get(i);
-			Set<NFAState> next = nfaStates.get(i + 1);
-			IntSet label = labels.get(i);
-			// find NFA state with edge whose label matches labels[i]
-			nfaConfigLoop: 
-			
-			for (NFAState p : cur) {
-				// walk p's transitions, looking for label
-				for (int j = 0; j < p.getNumberOfTransitions(); j++) {
-					Transition t = p.transition(j);
-					if (!t.isEpsilon() && !t.label.getSet().and(label).isNil()
-							&& next.contains(t.target)) {
-						if (p.associatedASTNode != null) {
-							Token oldtoken = p.associatedASTNode.token;
-							CommonToken token = new CommonToken(oldtoken
-									.getType(), oldtoken.getText());
-							token.setLine(oldtoken.getLine());
-							token.setCharPositionInLine(oldtoken.getCharPositionInLine());
-							tokens.add(token);
-							break nfaConfigLoop; // found path, move to next
-													// NFAState set
-						}
-					}
-				}
-			}
-		}
-		return tokens;
-	}
-
-	// /** Used to find paths through syntactically ambiguous DFA. If we've
-	// * seen statement number before, what did we learn?
-	// */
-	// protected Map<Integer, Integer> stateReachable;
-	//
-	// public Map<DFAState, Set<DFAState>> getReachSets(Collection<DFAState>
-	// targets) {
-	// Map<DFAState, Set<DFAState>> reaches = new HashMap<DFAState,
-	// Set<DFAState>>();
-	// // targets can reach themselves
-	// for (final DFAState d : targets) {
-	// reaches.put(d,new HashSet<DFAState>() {{add(d);}});
-	// }
-	//
-	// boolean changed = true;
-	// while ( changed ) {
-	// changed = false;
-	// for (DFAState d : dfa.states.values()) {
-	// if ( d.getNumberOfEdges()==0 ) continue;
-	// Set<DFAState> r = reaches.get(d);
-	// if ( r==null ) {
-	// r = new HashSet<DFAState>();
-	// reaches.put(d, r);
-	// }
-	// int before = r.size();
-	// // add all reaches from all edge targets
-	// for (Edge e : d.edges) {
-	// //if ( targets.contains(e.target) ) r.add(e.target);
-	// r.addAll( reaches.get(e.target) );
-	// }
-	// int after = r.size();
-	// if ( after>before) changed = true;
-	// }
-	// }
-	// return reaches;
-	// }
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAConfiguration.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAConfiguration.java
deleted file mode 100644
index 37f3d38..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAConfiguration.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.misc.Utils;
-
-/** An NFA state, predicted alt, and syntactic/semantic context.
- *  The syntactic context is a pointer into the rule invocation
- *  chain used to arrive at the state.  The semantic context is
- *  the unordered set semantic predicates encountered before reaching
- *  an NFA state.
- */
-public class NFAConfiguration {
-    /** The NFA state associated with this configuration */
-    public int state;
-
-    /** What alt is predicted by this configuration */
-    public int alt;
-
-    /** What is the stack of rule invocations that got us to state? */
-    public NFAContext context;
-
-    /** The set of semantic predicates associated with this NFA
-     *  configuration.  The predicates were found on the way to
-     *  the associated NFA state in this syntactic context.
-     *  Set<AST>: track nodes in grammar containing the predicate
-     *  for error messages and such (nice to know where the predicate
-     *  came from in case of duplicates etc...).  By using a set,
-     *  the equals() method will correctly show {pred1,pred2} as equals()
-     *  to {pred2,pred1}.
-     */
-    public SemanticContext semanticContext = SemanticContext.EMPTY_SEMANTIC_CONTEXT;
-
-    /** Indicate that this configuration has been resolved and no further
-     *  DFA processing should occur with it.  Essentially, this is used
-     *  as an "ignore" bit so that upon a set of nondeterministic configurations
-     *  such as (s|2) and (s|3), I can set (s|3) to resolved=true (and any
-     *  other configuration associated with alt 3).
-     */
-    protected boolean resolved;
-
-    /** This bit is used to indicate a semantic predicate will be
-     *  used to resolve the conflict.  Method
-     *  DFA.findNewDFAStatesAndAddDFATransitions will add edges for
-     *  the predicates after it performs the reach operation.  The
-     *  nondeterminism resolver sets this when it finds a set of
-     *  nondeterministic configurations (as it does for "resolved" field)
-     *  that have enough predicates to resolve the conflit.
-     */
-    protected boolean resolveWithPredicate;
-
-    /** Lots of NFA states have only epsilon edges (1 or 2).  We can
-     *  safely consider only n>0 during closure.
-     */
-    protected int numberEpsilonTransitionsEmanatingFromState;
-
-    /** Indicates that the NFA state associated with this configuration
-     *  has exactly one transition and it's an atom (not epsilon etc...).
-     */
-    protected boolean singleAtomTransitionEmanating;
-
-	//protected boolean addedDuringClosure = true;
-
-	public NFAConfiguration(int state,
-                            int alt,
-                            NFAContext context,
-                            SemanticContext semanticContext)
-    {
-        this.state = state;
-        this.alt = alt;
-        this.context = context;
-        this.semanticContext = semanticContext;
-    }
-
-    /** An NFA configuration is equal to another if both have
-     *  the same state, the predict the same alternative, and
-     *  syntactic/semantic contexts are the same.  I don't think
-     *  the state|alt|ctx could be the same and have two different
-     *  semantic contexts, but might as well define equals to be
-     *  everything.
-     */
-    public boolean equals(Object o) {
-		if ( o==null ) {
-			return false;
-		}
-        NFAConfiguration other = (NFAConfiguration)o;
-        return this.state==other.state &&
-               this.alt==other.alt &&
-               this.context.equals(other.context)&&
-               this.semanticContext.equals(other.semanticContext);
-    }
-
-    public int hashCode() {
-        int h = state + alt + context.hashCode();
-        return h;
-    }
-
-	public String toString() {
-		return toString(true);
-	}
-
-	public String toString(boolean showAlt) {
-		StringBuffer buf = new StringBuffer();
-		buf.append(state);
-		if ( showAlt ) {
-			buf.append("|");
-			buf.append(alt);
-		}
-		if ( context.parent!=null ) {
-            buf.append("|");
-            buf.append(context);
-        }
-        if ( semanticContext!=null &&
-             semanticContext!=SemanticContext.EMPTY_SEMANTIC_CONTEXT ) {
-            buf.append("|");
-			String escQuote = Utils.replace(semanticContext.toString(), "\"", "\\\"");
-			buf.append(escQuote);
-        }
-        if ( resolved ) {
-            buf.append("|resolved");
-        }
-		if ( resolveWithPredicate ) {
-			buf.append("|resolveWithPredicate");
-		}
-		return buf.toString();
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAContext.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAContext.java
deleted file mode 100644
index f56ec42..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAContext.java
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-/** A tree node for tracking the call chains for NFAs that invoke
- *  other NFAs.  These trees only have to point upwards to their parents
- *  so we can walk back up the tree (i.e., pop stuff off the stack).  We
- *  never walk from stack down down through the children.
- *
- *  Each alt predicted in a decision has its own context tree,
- *  representing all possible return nodes.  The initial stack has
- *  EOF ("$") in it.  So, for m alternative productions, the lookahead
- *  DFA will have m NFAContext trees.
- *
- *  To "push" a new context, just do "new NFAContext(context-parent, state)"
- *  which will add itself to the parent.  The root is NFAContext(null, null).
- *
- *  The complete context for an NFA configuration is the set of invoking states
- *  on the path from this node thru the parent pointers to the root.
- */
-public class NFAContext {
-	/** This is similar to Bermudez's m constant in his LAR(m) where
-	 *  you bound the stack so your states don't explode.  The main difference
-	 *  is that I bound only recursion on the stack, not the simple stack size.
-	 *  This looser constraint will let the conversion roam further to find
-	 *  lookahead to resolve a decision.
-	 *
-	 *  Bermudez's m operates differently as it is his LR stack depth
-	 *  I'm pretty sure it therefore includes all stack symbols.  Here I
-	 *  restrict the size of an NFA configuration to be finite because a
-	 *  stack component may mention the same NFA invocation state at
-	 *  most m times.  Hence, the number of DFA states will not grow forever.
-	 *  With recursive rules like
-	 *
-	 *    e : '(' e ')' | INT ;
-	 *
-	 *  you could chase your tail forever if somebody said "s : e '.' | e ';' ;"
-	 *  This constant prevents new states from being created after a stack gets
-	 *  "too big".  Actually (12/14/2007) I realize that this example is
-	 *  trapped by the non-LL(*) detector for recursion in > 1 alt.  Here is
-	 *  an example that trips stack overflow:
-	 *
-	 *	  s : a Y | A A A A A X ; // force recursion past m=4
-	 *	  a : A a | Q;
-	 *
-	 *  If that were:
-	 *
-	 *	  s : a Y | A+ X ;
-	 *
-	 *  it could loop forever.
-	 *
-	 *  Imagine doing a depth-first search on the e DFA...as you chase an input
-	 *  sequence you can recurse to same rule such as e above.  You'd have a
-	 *  chain of ((((.  When you get do some point, you have to give up.  The
-	 *  states in the chain will have longer and longer NFA config stacks.
-	 *  Must limit size.
-	 *
-	 *  max=0 implies you cannot ever jump to another rule during closure.
-	 *  max=1 implies you can make as many calls as you want--you just
-	 *        can't ever visit a state that is on your rule invocation stack.
-	 * 		  I.e., you cannot ever recurse.
-	 *  max=2 implies you are able to recurse once (i.e., call a rule twice
-	 *  	  from the same place).
-	 *
-	 *  This tracks recursion to a rule specific to an invocation site!
-	 *  It does not detect multiple calls to a rule from different rule
-	 *  invocation states.  We are guaranteed to terminate because the
-	 *  stack can only grow as big as the number of NFA states * max.
-	 *
-	 *  I noticed that the Java grammar didn't work with max=1, but did with
-	 *  max=4.  Let's set to 4. Recursion is sometimes needed to resolve some
-	 *  fixed lookahead decisions.
-	 */
-	public static int MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = 4;
-
-    public NFAContext parent;
-
-    /** The NFA state that invoked another rule's start state is recorded
-     *  on the rule invocation context stack.
-     */
-    public NFAState invokingState;
-
-    /** Computing the hashCode is very expensive and closureBusy()
-     *  uses it to track when it's seen a state|ctx before to avoid
-     *  infinite loops.  As we add new contexts, record the hash code
-     *  as this.invokingState + parent.cachedHashCode.  Avoids walking
-     *  up the tree for every hashCode().  Note that this caching works
-     *  because a context is a monotonically growing tree of context nodes
-     *  and nothing on the stack is ever modified...ctx just grows
-     *  or shrinks.
-     */
-    protected int cachedHashCode;
-
-    public NFAContext(NFAContext parent, NFAState invokingState) {
-        this.parent = parent;
-        this.invokingState = invokingState;
-        if ( invokingState!=null ) {
-            this.cachedHashCode = invokingState.stateNumber;
-        }
-        if ( parent!=null ) {
-            this.cachedHashCode += parent.cachedHashCode;
-        }
-    }
-
-	/** Two contexts are equals() if both have
-	 *  same call stack; walk upwards to the root.
-	 *  Recall that the root sentinel node has no invokingStates and no parent.
-	 *  Note that you may be comparing contexts in different alt trees.
-	 *
-	 *  The hashCode is now cheap as it's computed once upon each context
-	 *  push on the stack.  Use it to make equals() more efficient.
-	 */
-	public boolean equals(Object o) {
-		NFAContext other = ((NFAContext)o);
-		if ( this.cachedHashCode != other.cachedHashCode ) {
-			return false; // can't be same if hash is different
-		}
-		if ( this==other ) {
-			return true;
-		}
-		// System.out.println("comparing "+this+" with "+other);
-		NFAContext sp = this;
-		while ( sp.parent!=null && other.parent!=null ) {
-			if ( sp.invokingState != other.invokingState ) {
-				return false;
-			}
-			sp = sp.parent;
-			other = other.parent;
-		}
-		if ( !(sp.parent==null && other.parent==null) ) {
-			return false; // both pointers must be at their roots after walk
-		}
-		return true;
-	}
-
-	/** Two contexts conflict() if they are equals() or one is a stack suffix
-	 *  of the other.  For example, contexts [21 12 $] and [21 9 $] do not
-	 *  conflict, but [21 $] and [21 12 $] do conflict.  Note that I should
-	 *  probably not show the $ in this case.  There is a dummy node for each
-	 *  stack that just means empty; $ is a marker that's all.
-	 *
-	 *  This is used in relation to checking conflicts associated with a
-	 *  single NFA state's configurations within a single DFA state.
-	 *  If there are configurations s and t within a DFA state such that
-	 *  s.state=t.state && s.alt != t.alt && s.ctx conflicts t.ctx then
-	 *  the DFA state predicts more than a single alt--it's nondeterministic.
-	 *  Two contexts conflict if they are the same or if one is a suffix
-	 *  of the other.
-	 *
-	 *  When comparing contexts, if one context has a stack and the other
-	 *  does not then they should be considered the same context.  The only
-	 *  way for an NFA state p to have an empty context and a nonempty context
-	 *  is the case when closure falls off end of rule without a call stack
-	 *  and re-enters the rule with a context.  This resolves the issue I
-	 *  discussed with Sriram Srinivasan Feb 28, 2005 about not terminating
-	 *  fast enough upon nondeterminism.
-	 */
-	public boolean conflictsWith(NFAContext other) {
-		return this.suffix(other); // || this.equals(other);
-	}
-
-	/** [$] suffix any context
-	 *  [21 $] suffix [21 12 $]
-	 *  [21 12 $] suffix [21 $]
-	 *  [21 18 $] suffix [21 18 12 9 $]
-	 *  [21 18 12 9 $] suffix [21 18 $]
-	 *  [21 12 $] not suffix [21 9 $]
-	 *
-	 *  Example "[21 $] suffix [21 12 $]" means: rule r invoked current rule
-	 *  from state 21.  Rule s invoked rule r from state 12 which then invoked
-	 *  current rule also via state 21.  While the context prior to state 21
-	 *  is different, the fact that both contexts emanate from state 21 implies
-	 *  that they are now going to track perfectly together.  Once they
-	 *  converged on state 21, there is no way they can separate.  In other
-	 *  words, the prior stack state is not consulted when computing where to
-	 *  go in the closure operation.  ?$ and ??$ are considered the same stack.
-	 *  If ? is popped off then $ and ?$ remain; they are now an empty and
-	 *  nonempty context comparison.  So, if one stack is a suffix of
-	 *  another, then it will still degenerate to the simple empty stack
-	 *  comparison case.
-	 */
-	protected boolean suffix(NFAContext other) {
-		NFAContext sp = this;
-		// if one of the contexts is empty, it never enters loop and returns true
-		while ( sp.parent!=null && other.parent!=null ) {
-			if ( sp.invokingState != other.invokingState ) {
-				return false;
-			}
-			sp = sp.parent;
-			other = other.parent;
-		}
-		//System.out.println("suffix");
-		return true;
-	}
-
-    /** Walk upwards to the root of the call stack context looking
-     *  for a particular invoking state.
-	public boolean contains(int state) {
-        NFAContext sp = this;
-		int n = 0; // track recursive invocations of state
-		System.out.println("this.context is "+sp);
-		while ( sp.parent!=null ) {
-            if ( sp.invokingState.stateNumber == state ) {
-				return true;
-            }
-            sp = sp.parent;
-        }
-        return false;
-    }
-	 */
-
-	/** Given an NFA state number, how many times has the NFA-to-DFA
-	 *  conversion pushed that state on the stack?  In other words,
-	 *  the NFA state must be a rule invocation state and this method
-	 *  tells you how many times you've been to this state.  If none,
-	 *  then you have not called the target rule from this state before
-	 *  (though another NFA state could have called that target rule).
-	 *  If n=1, then you've been to this state before during this
-	 *  DFA construction and are going to invoke that rule again.
-	 *
-	 *  Note that many NFA states can invoke rule r, but we ignore recursion
-	 *  unless you hit the same rule invocation state again.
-	 */
-	public int recursionDepthEmanatingFromState(int state) {
-		NFAContext sp = this;
-		int n = 0; // track recursive invocations of target from this state
-		//System.out.println("this.context is "+sp);
-		while ( sp.parent!=null ) {
-			if ( sp.invokingState.stateNumber == state ) {
-				n++;
-			}
-			sp = sp.parent;
-		}
-		return n;
-	}
-
-    public int hashCode() {
-        return cachedHashCode;
-        /*
-        int h = 0;
-        NFAContext sp = this;
-        while ( sp.parent!=null ) {
-            h += sp.invokingState.getStateNumber();
-            sp = sp.parent;
-        }
-        return h;
-        */
-    }
-
-	/** A context is empty if there is no parent; meaning nobody pushed
-	 *  anything on the call stack.
-	 */
-	public boolean isEmpty() {
-		return parent==null;
-	}
-
-    public String toString() {
-        StringBuffer buf = new StringBuffer();
-        NFAContext sp = this;
-        buf.append("[");
-        while ( sp.parent!=null ) {
-            buf.append(sp.invokingState.stateNumber);
-            buf.append(" ");
-            sp = sp.parent;
-        }
-        buf.append("$]");
-        return buf.toString();
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAConversionThread.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAConversionThread.java
deleted file mode 100644
index 0966fda..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAConversionThread.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.misc.Barrier;
-import org.antlr.tool.ErrorManager;
-import org.antlr.tool.Grammar;
-
-/** Convert all decisions i..j inclusive in a thread */
-public class NFAConversionThread implements Runnable {
-	Grammar grammar;
-	int i, j;
-	Barrier barrier;
-	public NFAConversionThread(Grammar grammar,
-							   Barrier barrier,
-							   int i,
-							   int j)
-	{
-		this.grammar = grammar;
-		this.barrier = barrier;
-		this.i = i;
-		this.j = j;
-	}
-	public void run() {
-		for (int decision=i; decision<=j; decision++) {
-			NFAState decisionStartState = grammar.getDecisionNFAStartState(decision);
-			if ( decisionStartState.getNumberOfTransitions()>1 ) {
-				grammar.createLookaheadDFA(decision,true);
-			}
-		}
-		// now wait for others to finish
-		try {
-			barrier.waitForRelease();
-		}
-		catch(InterruptedException e) {
-			ErrorManager.internalError("what the hell? DFA interruptus", e);
-		}
-	}
-}
-
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAState.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAState.java
deleted file mode 100644
index 84e1365..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAState.java
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.tool.ErrorManager;
-import org.antlr.tool.GrammarAST;
-import org.antlr.tool.Rule;
-
-/** A state within an NFA. At most 2 transitions emanate from any NFA state. */
-public class NFAState extends State {
-	// I need to distinguish between NFA decision states for (...)* and (...)+
-	// during NFA interpretation.
-	public static final int LOOPBACK = 1;
-	public static final int BLOCK_START = 2;
-	public static final int OPTIONAL_BLOCK_START = 3;
-	public static final int BYPASS = 4;
-	public static final int RIGHT_EDGE_OF_BLOCK = 5;
-
-	public static final int MAX_TRANSITIONS = 2;
-
-	/** How many transitions; 0, 1, or 2 transitions */
-	int numTransitions = 0;
-	public Transition[] transition = new Transition[MAX_TRANSITIONS];
-
-	/** For o-A->o type NFA tranitions, record the label that leads to this
-	 *  state.  Useful for creating rich error messages when we find
-	 *  insufficiently (with preds) covered states.
-	 */
-	public Label incidentEdgeLabel;
-
-	/** Which NFA are we in? */
-	public NFA nfa = null;
-
-	/** What's its decision number from 1..n? */
-	protected int decisionNumber = 0;
-
-	/** Subrules (...)* and (...)+ have more than one decision point in
-	 *  the NFA created for them.  They both have a loop-exit-or-stay-in
-	 *  decision node (the loop back node).  They both have a normal
-	 *  alternative block decision node at the left edge.  The (...)* is
-	 *  worse as it even has a bypass decision (2 alts: stay in or bypass)
-	 *  node at the extreme left edge.  This is not how they get generated
-	 *  in code as a while-loop or whatever deals nicely with either.  For
-	 *  error messages (where I need to print the nondeterministic alts)
-	 *  and for interpretation, I need to use the single DFA that is created
-	 *  (for efficiency) but interpret the results differently depending
-	 *  on which of the 2 or 3 decision states uses the DFA.  For example,
-	 *  the DFA will always report alt n+1 as the exit branch for n real
-	 *  alts, so I need to translate that depending on the decision state.
-	 *
-	 *  If decisionNumber>0 then this var tells you what kind of decision
-	 *  state it is.
-	 */
-	public int decisionStateType;
-
-	/** What rule do we live in? */
-	public Rule enclosingRule;
-
-	/** During debugging and for nondeterminism warnings, it's useful
-	 *  to know what relationship this node has to the original grammar.
-	 *  For example, "start of alt 1 of rule a".
-	 */
-	protected String description;
-
-	/** Associate this NFAState with the corresponding GrammarAST node
-	 *  from which this node was created.  This is useful not only for
-	 *  associating the eventual lookahead DFA with the associated
-	 *  Grammar position, but also for providing users with
-	 *  nondeterminism warnings.  Mainly used by decision states to
-	 *  report line:col info.  Could also be used to track line:col
-	 *  for elements such as token refs.
-	 */
-	public GrammarAST associatedASTNode;
-
-	/** Is this state the sole target of an EOT transition? */
-	protected boolean EOTTargetState = false;
-
-	/** Jean Bovet needs in the GUI to know which state pairs correspond
-	 *  to the start/stop of a block.
-	  */
-	public int endOfBlockStateNumber = State.INVALID_STATE_NUMBER;
-
-	public NFAState(NFA nfa) {
-		this.nfa = nfa;
-	}
-
-	public int getNumberOfTransitions() {
-		return numTransitions;
-	}
-
-	public void addTransition(Transition e) {
-		if ( e==null ) {
-			throw new IllegalArgumentException("You can't add a null transition");			
-		}
-		if ( numTransitions>transition.length ) {
-			throw new IllegalArgumentException("You can only have "+transition.length+" transitions");
-		}
-		if ( e!=null ) {
-			transition[numTransitions] = e;
-			numTransitions++;
-			// Set the "back pointer" of the target state so that it
-			// knows about the label of the incoming edge.
-			Label label = e.label;
-			if ( label.isAtom() || label.isSet() ) {
-				if ( ((NFAState)e.target).incidentEdgeLabel!=null ) {
-					ErrorManager.internalError("Clobbered incident edge");
-				}
-				((NFAState)e.target).incidentEdgeLabel = e.label;
-			}
-		}
-	}
-
-	/** Used during optimization to reset a state to have the (single)
-	 *  transition another state has.
-	 */
-	public void setTransition0(Transition e) {
-		if ( e==null ) {
-			throw new IllegalArgumentException("You can't use a solitary null transition");
-		}
-		transition[0] = e;
-		transition[1] = null;
-		numTransitions = 1;
-	}
-
-	public Transition transition(int i) {
-		return transition[i];
-	}
-
-	/** The DFA decision for this NFA decision state always has
-	 *  an exit path for loops as n+1 for n alts in the loop.
-	 *  That is really useful for displaying nondeterministic alts
-	 *  and so on, but for walking the NFA to get a sequence of edge
-	 *  labels or for actually parsing, we need to get the real alt
-	 *  number.  The real alt number for exiting a loop is always 1
-	 *  as transition 0 points at the exit branch (we compute DFAs
-	 *  always for loops at the loopback state).
-	 *
-	 *  For walking/parsing the loopback state:
-	 * 		1 2 3 display alt (for human consumption)
-	 * 		2 3 1 walk alt
-	 *
-	 *  For walking the block start:
-	 * 		1 2 3 display alt
-	 * 		1 2 3
-	 *
-	 *  For walking the bypass state of a (...)* loop:
-	 * 		1 2 3 display alt
-	 * 		1 1 2 all block alts map to entering loop exit means take bypass
-	 *
-	 *  Non loop EBNF do not need to be translated; they are ignored by
-	 *  this method as decisionStateType==0.
-	 *
-	 *  Return same alt if we can't translate.
-	 */
-	public int translateDisplayAltToWalkAlt(int displayAlt) {
-		NFAState nfaStart = this;
-		if ( decisionNumber==0 || decisionStateType==0 ) {
-			return displayAlt;
-		}
-		int walkAlt = 0;
-		// find the NFA loopback state associated with this DFA
-		// and count number of alts (all alt numbers are computed
-		// based upon the loopback's NFA state.
-		/*
-		DFA dfa = nfa.grammar.getLookaheadDFA(decisionNumber);
-		if ( dfa==null ) {
-			ErrorManager.internalError("can't get DFA for decision "+decisionNumber);
-		}
-		*/
-		int nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(nfaStart);
-		switch ( nfaStart.decisionStateType ) {
-			case LOOPBACK :
-				walkAlt = displayAlt % nAlts + 1; // rotate right mod 1..3
-				break;
-			case BLOCK_START :
-			case OPTIONAL_BLOCK_START :
-				walkAlt = displayAlt; // identity transformation
-				break;
-			case BYPASS :
-				if ( displayAlt == nAlts ) {
-					walkAlt = 2; // bypass
-				}
-				else {
-					walkAlt = 1; // any non exit branch alt predicts entering
-				}
-				break;
-		}
-		return walkAlt;
-	}
-
-	// Setter/Getters
-
-	/** What AST node is associated with this NFAState?  When you
-	 *  set the AST node, I set the node to point back to this NFA state.
-	 */
-	public void setDecisionASTNode(GrammarAST decisionASTNode) {
-		decisionASTNode.setNFAStartState(this);
-		this.associatedASTNode = decisionASTNode;
-	}
-
-	public String getDescription() {
-		return description;
-	}
-
-	public void setDescription(String description) {
-		this.description = description;
-	}
-
-	public int getDecisionNumber() {
-		return decisionNumber;
-	}
-
-	public void setDecisionNumber(int decisionNumber) {
-		this.decisionNumber = decisionNumber;
-	}
-
-	public boolean isEOTTargetState() {
-		return EOTTargetState;
-	}
-
-	public void setEOTTargetState(boolean eot) {
-		EOTTargetState = eot;
-	}
-
-	public boolean isDecisionState() {
-		return decisionStateType>0;
-	}
-
-	public String toString() {
-		return String.valueOf(stateNumber);
-	}
-
-}
-
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAToDFAConverter.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAToDFAConverter.java
deleted file mode 100644
index 543ab2b..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFAToDFAConverter.java
+++ /dev/null
@@ -1,1739 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.misc.OrderedHashSet;
-import org.antlr.misc.Utils;
-import org.antlr.runtime.Token;
-import org.antlr.tool.ErrorManager;
-
-import java.util.*;
-
-/** Code that embodies the NFA conversion to DFA. A new object is needed
- *  per DFA (also required for thread safety if multiple conversions
- *  launched).
- */
-public class NFAToDFAConverter {
-	/** A list of DFA states we still need to process during NFA conversion */
-	protected List work = new LinkedList();
-
-	/** While converting NFA, we must track states that
-	 *  reference other rule's NFAs so we know what to do
-	 *  at the end of a rule.  We need to know what context invoked
-	 *  this rule so we can know where to continue looking for NFA
-	 *  states.  I'm tracking a context tree (record of rule invocation
-	 *  stack trace) for each alternative that could be predicted.
-	 */
-	protected NFAContext[] contextTrees;
-
-	/** We are converting which DFA? */
-	protected DFA dfa;
-
-	public static boolean debug = false;
-
-	/** Should ANTLR launch multiple threads to convert NFAs to DFAs?
-	 *  With a 2-CPU box, I note that it's about the same single or
-	 *  multithreaded.  Both CPU meters are going even when single-threaded
-	 *  so I assume the GC is killing us.  Could be the compiler.  When I
-	 *  run java -Xint mode, I get about 15% speed improvement with multiple
-	 *  threads.
-	 */
-	public static boolean SINGLE_THREADED_NFA_CONVERSION = true;
-
-	protected boolean computingStartState = false;
-
-	public NFAToDFAConverter(DFA dfa) {
-		this.dfa = dfa;
-		int nAlts = dfa.getNumberOfAlts();
-		initContextTrees(nAlts);
-	}
-
-	public void convert() {
-		//dfa.conversionStartTime = System.currentTimeMillis();
-
-		// create the DFA start state
-		dfa.startState = computeStartState();
-
-		// while more DFA states to check, process them
-		while ( work.size()>0 &&
-				!dfa.nfa.grammar.NFAToDFAConversionExternallyAborted() )
-		{
-			DFAState d = (DFAState) work.get(0);
-			if ( dfa.nfa.grammar.composite.watchNFAConversion ) {
-				System.out.println("convert DFA state "+d.stateNumber+
-								   " ("+d.nfaConfigurations.size()+" nfa states)");
-			}
-			int k = dfa.getUserMaxLookahead();
-			if ( k>0 && k==d.getLookaheadDepth() ) {
-				// we've hit max lookahead, make this a stop state
-				//System.out.println("stop state @k="+k+" (terminated early)");
-				/*
-				List<Label> sampleInputLabels = d.dfa.probe.getSampleNonDeterministicInputSequence(d);
-				String input = d.dfa.probe.getInputSequenceDisplay(sampleInputLabels);
-				System.out.println("sample input: "+input);
-				 */
-				resolveNonDeterminisms(d);
-				// Check to see if we need to add any semantic predicate transitions
-				if ( d.isResolvedWithPredicates() ) {
-					addPredicateTransitions(d);
-				}
-				else {
-					d.setAcceptState(true); // must convert to accept state at k
-				}
-			}
-			else {
-				findNewDFAStatesAndAddDFATransitions(d);
-			}
-			work.remove(0); // done with it; remove from work list
-		}
-
-		// Find all manual syn preds (gated).  These are not discovered
-		// in tryToResolveWithSemanticPredicates because they are implicitly
-		// added to every edge by code gen, DOT generation etc...
-		dfa.findAllGatedSynPredsUsedInDFAAcceptStates();
-	}
-
-	/** From this first NFA state of a decision, create a DFA.
-	 *  Walk each alt in decision and compute closure from the start of that
-	 *  rule, making sure that the closure does not include other alts within
-	 *  that same decision.  The idea is to associate a specific alt number
-	 *  with the starting closure so we can trace the alt number for all states
-	 *  derived from this.  At a stop state in the DFA, we can return this alt
-	 *  number, indicating which alt is predicted.
-	 *
-	 *  If this DFA is derived from an loop back NFA state, then the first
-	 *  transition is actually the exit branch of the loop.  Rather than make
-	 *  this alternative one, let's make this alt n+1 where n is the number of
-	 *  alts in this block.  This is nice to keep the alts of the block 1..n;
-	 *  helps with error messages.
-	 *
-	 *  I handle nongreedy in findNewDFAStatesAndAddDFATransitions
-	 *  when nongreedy and EOT transition.  Make state with EOT emanating
-	 *  from it the accept state.
-	 */
-	protected DFAState computeStartState() {
-		NFAState alt = dfa.decisionNFAStartState;
-		DFAState startState = dfa.newState();
-		computingStartState = true;
-		int i = 0;
-		int altNum = 1;
-		while ( alt!=null ) {
-			// find the set of NFA states reachable without consuming
-			// any input symbols for each alt.  Keep adding to same
-			// overall closure that will represent the DFA start state,
-			// but track the alt number
-			NFAContext initialContext = contextTrees[i];
-			// if first alt is derived from loopback/exit branch of loop,
-			// make alt=n+1 for n alts instead of 1
-			if ( i==0 &&
-				 dfa.getNFADecisionStartState().decisionStateType==NFAState.LOOPBACK )
-			{
-				int numAltsIncludingExitBranch = dfa.nfa.grammar
-					.getNumberOfAltsForDecisionNFA(dfa.decisionNFAStartState);
-				altNum = numAltsIncludingExitBranch;
-				closure((NFAState)alt.transition[0].target,
-						altNum,
-						initialContext,
-						SemanticContext.EMPTY_SEMANTIC_CONTEXT,
-						startState,
-						true
-				);
-				altNum = 1; // make next alt the first
-			}
-			else {
-				closure((NFAState)alt.transition[0].target,
-						altNum,
-						initialContext,
-						SemanticContext.EMPTY_SEMANTIC_CONTEXT,
-						startState,
-						true
-				);
-				altNum++;
-			}
-			i++;
-
-			// move to next alternative
-			if ( alt.transition[1] ==null ) {
-				break;
-			}
-			alt = (NFAState)alt.transition[1].target;
-		}
-
-		// now DFA start state has the complete closure for the decision
-		// but we have tracked which alt is associated with which
-		// NFA states.
-		dfa.addState(startState); // make sure dfa knows about this state
-		work.add(startState);
-		computingStartState = false;
-		return startState;
-	}
-
-	/** From this node, add a d--a-->t transition for all
-	 *  labels 'a' where t is a DFA node created
-	 *  from the set of NFA states reachable from any NFA
-	 *  state in DFA state d.
-	 */
-	protected void findNewDFAStatesAndAddDFATransitions(DFAState d) {
-		//System.out.println("work on DFA state "+d);
-		OrderedHashSet labels = d.getReachableLabels();
-		//System.out.println("reachable labels="+labels);
-
-		/*
-		System.out.println("|reachable|/|nfaconfigs|="+
-				labels.size()+"/"+d.getNFAConfigurations().size()+"="+
-				labels.size()/(float)d.getNFAConfigurations().size());
-		*/
-
-		// normally EOT is the "default" clause and decisions just
-		// choose that last clause when nothing else matches.  DFA conversion
-		// continues searching for a unique sequence that predicts the
-		// various alts or until it finds EOT.  So this rule
-		//
-		// DUH : ('x'|'y')* "xy!";
-		//
-		// does not need a greedy indicator.  The following rule works fine too
-		//
-		// A : ('x')+ ;
-		//
-		// When the follow branch could match what is in the loop, by default,
-		// the nondeterminism is resolved in favor of the loop.  You don't
-		// get a warning because the only way to get this condition is if
-		// the DFA conversion hits the end of the token.  In that case,
-		// we're not *sure* what will happen next, but it could be anything.
-		// Anyway, EOT is the default case which means it will never be matched
-		// as resolution goes to the lowest alt number.  Exit branches are
-		// always alt n+1 for n alts in a block.
-		//
-		// When a loop is nongreedy and we find an EOT transition, the DFA
-		// state should become an accept state, predicting exit of loop.  It's
-		// just reversing the resolution of ambiguity.
-		// TODO: should this be done in the resolveAmbig method?
-		Label EOTLabel = new Label(Label.EOT);
-		boolean containsEOT = labels!=null && labels.contains(EOTLabel);
-		if ( !dfa.isGreedy() && containsEOT ) {
-			convertToEOTAcceptState(d);
-			return; // no more work to do on this accept state
-		}
-
-		// if in filter mode for lexer, want to match shortest not longest
-		// string so if we see an EOT edge emanating from this state, then
-		// convert this state to an accept state.  This only counts for
-		// The Tokens rule as all other decisions must continue to look for
-		// longest match.
-		// [Taking back out a few days later on Jan 17, 2006.  This could
-		//  be an option for the future, but this was wrong soluion for
-		//  filtering.]
-		/*
-		if ( dfa.nfa.grammar.type==Grammar.LEXER && containsEOT ) {
-			String filterOption = (String)dfa.nfa.grammar.getOption("filter");
-			boolean filterMode = filterOption!=null && filterOption.equals("true");
-			if ( filterMode && d.dfa.isTokensRuleDecision() ) {
-				DFAState t = reach(d, EOTLabel);
-				if ( t.getNFAConfigurations().size()>0 ) {
-					convertToEOTAcceptState(d);
-					//System.out.println("state "+d+" has EOT target "+t.stateNumber);
-					return;
-				}
-			}
-		}
-		*/
-
-		int numberOfEdgesEmanating = 0;
-		Map targetToLabelMap = new HashMap();
-		// for each label that could possibly emanate from NFAStates of d
-		int numLabels = 0;
-		if ( labels!=null ) {
-			numLabels = labels.size();
-		}
-		for (int i=0; i<numLabels; i++) {
-			Label label = (Label)labels.get(i);
-			DFAState t = reach(d, label);
-			if ( debug ) {
-				System.out.println("DFA state after reach "+label+" "+d+"-" +
-								   label.toString(dfa.nfa.grammar)+"->"+t);
-			}
-			if ( t==null ) {
-				// nothing was reached by label due to conflict resolution
-				// EOT also seems to be in here occasionally probably due
-				// to an end-of-rule state seeing it even though we'll pop
-				// an invoking state off the state; don't bother to conflict
-				// as this labels set is a covering approximation only.
-				continue;
-			}
-			//System.out.println("dfa.k="+dfa.getUserMaxLookahead());
-			if ( t.getUniqueAlt()==NFA.INVALID_ALT_NUMBER ) {
-				// Only compute closure if a unique alt number is not known.
-				// If a unique alternative is mentioned among all NFA
-				// configurations then there is no possibility of needing to look
-				// beyond this state; also no possibility of a nondeterminism.
-				// This optimization May 22, 2006 just dropped -Xint time
-				// for analysis of Java grammar from 11.5s to 2s!  Wow.
-				closure(t);  // add any NFA states reachable via epsilon
-			}
-
-			/*
-			System.out.println("DFA state after closure "+d+"-"+
-							   label.toString(dfa.nfa.grammar)+
-							   "->"+t);
-							   */
-
-			// add if not in DFA yet and then make d-label->t
-			DFAState targetState = addDFAStateToWorkList(t);
-
-			numberOfEdgesEmanating +=
-				addTransition(d, label, targetState, targetToLabelMap);
-
-			// lookahead of target must be one larger than d's k
-			// We are possibly setting the depth of a pre-existing state
-			// that is equal to one we just computed...not sure if that's
-			// ok.
-			targetState.setLookaheadDepth(d.getLookaheadDepth() + 1);
-		}
-
-		//System.out.println("DFA after reach / closures:\n"+dfa);
-		if ( !d.isResolvedWithPredicates() && numberOfEdgesEmanating==0 ) {
-			//System.out.println("dangling DFA state "+d+"\nAfter reach / closures:\n"+dfa);
-			// TODO: can fixed lookahead hit a dangling state case?
-			// TODO: yes, with left recursion
-			//System.err.println("dangling state alts: "+d.getAltSet());
-			dfa.probe.reportDanglingState(d);
-			// turn off all configurations except for those associated with
-			// min alt number; somebody has to win else some input will not
-			// predict any alt.
-			int minAlt = resolveByPickingMinAlt(d, null);
-			// force it to be an accept state
-			// don't call convertToAcceptState() which merges stop states.
-			// other states point at us; don't want them pointing to dead states
-			d.setAcceptState(true); // might be adding new accept state for alt
-			dfa.setAcceptState(minAlt, d);
-			//convertToAcceptState(d, minAlt); // force it to be an accept state
-		}
-
-		// Check to see if we need to add any semantic predicate transitions
-		// might have both token and predicated edges from d
-		if ( d.isResolvedWithPredicates() ) {
-			addPredicateTransitions(d);
-		}
-	}
-
-	/** Add a transition from state d to targetState with label in normal case.
-	 *  if COLLAPSE_ALL_INCIDENT_EDGES, however, try to merge all edges from
-	 *  d to targetState; this means merging their labels.  Another optimization
-	 *  is to reduce to a single EOT edge any set of edges from d to targetState
-	 *  where there exists an EOT state.  EOT is like the wildcard so don't
-	 *  bother to test any other edges.  Example:
-	 *
-	 *  NUM_INT
-	 *    : '1'..'9' ('0'..'9')* ('l'|'L')?
-     *    | '0' ('x'|'X') ('0'..'9'|'a'..'f'|'A'..'F')+ ('l'|'L')?
-     *    | '0' ('0'..'7')* ('l'|'L')?
-	 *    ;
-	 *
-	 *  The normal decision to predict alts 1, 2, 3 is:
-	 *
-	 *  if ( (input.LA(1)>='1' && input.LA(1)<='9') ) {
-     *       alt7=1;
-     *  }
-     *  else if ( input.LA(1)=='0' ) {
-     *      if ( input.LA(2)=='X'||input.LA(2)=='x' ) {
-     *          alt7=2;
-     *      }
-     *      else if ( (input.LA(2)>='0' && input.LA(2)<='7') ) {
-     *           alt7=3;
-     *      }
-     *      else if ( input.LA(2)=='L'||input.LA(2)=='l' ) {
-     *           alt7=3;
-     *      }
-     *      else {
-     *           alt7=3;
-     *      }
-     *  }
-     *  else error
-	 *
-     *  Clearly, alt 3 is predicted with extra work since it tests 0..7
-	 *  and [lL] before finally realizing that any character is actually
-	 *  ok at k=2.
-	 *
-	 *  A better decision is as follows:
-     *
-	 *  if ( (input.LA(1)>='1' && input.LA(1)<='9') ) {
-	 *      alt7=1;
-	 *  }
-	 *  else if ( input.LA(1)=='0' ) {
-	 *      if ( input.LA(2)=='X'||input.LA(2)=='x' ) {
-	 *          alt7=2;
-	 *      }
-	 *      else {
-	 *          alt7=3;
-	 *      }
-	 *  }
-	 *
-	 *  The DFA originally has 3 edges going to the state the predicts alt 3,
-	 *  but upon seeing the EOT edge (the "else"-clause), this method
-	 *  replaces the old merged label (which would have (0..7|l|L)) with EOT.
-	 *  The code generator then leaves alt 3 predicted with a simple else-
-	 *  clause. :)
-	 *
-	 *  The only time the EOT optimization makes no sense is in the Tokens
-	 *  rule.  We want EOT to truly mean you have matched an entire token
-	 *  so don't bother actually rewinding to execute that rule unless there
-	 *  are actions in that rule.  For now, since I am not preventing
-	 *  backtracking from Tokens rule, I will simply allow the optimization.
-	 */
-	protected static int addTransition(DFAState d,
-									   Label label,
-									   DFAState targetState,
-									   Map targetToLabelMap)
-	{
-		//System.out.println(d.stateNumber+"-"+label.toString(dfa.nfa.grammar)+"->"+targetState.stateNumber);
-		int n = 0;
-		if ( DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES ) {
-			// track which targets we've hit
-			Integer tI = Utils.integer(targetState.stateNumber);
-			Transition oldTransition = (Transition)targetToLabelMap.get(tI);
-			if ( oldTransition!=null ) {
-				//System.out.println("extra transition to "+tI+" upon "+label.toString(dfa.nfa.grammar));
-				// already seen state d to target transition, just add label
-				// to old label unless EOT
-				if ( label.getAtom()==Label.EOT ) {
-					// merge with EOT means old edge can go away
-					oldTransition.label = new Label(Label.EOT);
-				}
-				else {
-					// don't add anything to EOT, it's essentially the wildcard
-					if ( oldTransition.label.getAtom()!=Label.EOT ) {
-						// ok, not EOT, add in this label to old label
-						oldTransition.label.add(label);
-					}
-					//System.out.println("label updated to be "+oldTransition.label.toString(dfa.nfa.grammar));
-				}
-			}
-			else {
-				// make a transition from d to t upon 'a'
-				n = 1;
-				label = (Label)label.clone(); // clone in case we alter later
-				int transitionIndex = d.addTransition(targetState, label);
-				Transition trans = d.getTransition(transitionIndex);
-				// track target/transition pairs
-				targetToLabelMap.put(tI, trans);
-			}
-		}
-		else {
-			n = 1;
-			d.addTransition(targetState, label);
-		}
-		return n;
-	}
-
-	/** For all NFA states (configurations) merged in d,
-	 *  compute the epsilon closure; that is, find all NFA states reachable
-	 *  from the NFA states in d via purely epsilon transitions.
-	 */
-	public void closure(DFAState d) {
-		if ( debug ) {
-			System.out.println("closure("+d+")");
-		}
-
-		List<NFAConfiguration> configs = new ArrayList<NFAConfiguration>();
-		// Because we are adding to the configurations in closure
-		// must clone initial list so we know when to stop doing closure
-		configs.addAll(d.nfaConfigurations);
-		// for each NFA configuration in d (abort if we detect non-LL(*) state)
-		int numConfigs = configs.size();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration c = (NFAConfiguration)configs.get(i);
-			if ( c.singleAtomTransitionEmanating ) {
-				continue; // ignore NFA states w/o epsilon transitions
-			}
-			//System.out.println("go do reach for NFA state "+c.state);
-			// figure out reachable NFA states from each of d's nfa states
-			// via epsilon transitions.
-			// Fill configsInClosure rather than altering d configs inline
-			closure(dfa.nfa.getState(c.state),
-					c.alt,
-					c.context,
-					c.semanticContext,
-					d,
-					false);
-		}
-		//System.out.println("after closure d="+d);
-		d.closureBusy = null; // wack all that memory used during closure
-	}
-
-	/** Where can we get from NFA state p traversing only epsilon transitions?
-	 *  Add new NFA states + context to DFA state d.  Also add semantic
-	 *  predicates to semantic context if collectPredicates is set.  We only
-	 *  collect predicates at hoisting depth 0, meaning before any token/char
-	 *  have been recognized.  This corresponds, during analysis, to the
-	 *  initial DFA start state construction closure() invocation.
-	 *
-	 *  There are four cases of interest (the last being the usual transition):
-	 *
-	 *   1. Traverse an edge that takes us to the start state of another
-	 *      rule, r.  We must push this state so that if the DFA
-	 *      conversion hits the end of rule r, then it knows to continue
-	 *      the conversion at state following state that "invoked" r. By
-	 *      construction, there is a single transition emanating from a rule
-	 *      ref node.
-	 *
-	 *   2. Reach an NFA state associated with the end of a rule, r, in the
-	 *      grammar from which it was built.  We must add an implicit (i.e.,
-	 *      don't actually add an epsilon transition) epsilon transition
-	 *      from r's end state to the NFA state following the NFA state
-	 *      that transitioned to rule r's start state.  Because there are
-	 *      many states that could reach r, the context for a rule invocation
-	 *      is part of a call tree not a simple stack.  When we fall off end
-	 *      of rule, "pop" a state off the call tree and add that state's
-	 *      "following" node to d's NFA configuration list.  The context
-	 *      for this new addition will be the new "stack top" in the call tree.
-	 *
-	 *   3. Like case 2, we reach an NFA state associated with the end of a
-	 *      rule, r, in the grammar from which NFA was built.  In this case,
-	 *      however, we realize that during this NFA->DFA conversion, no state
-	 *      invoked the current rule's NFA.  There is no choice but to add
-	 *      all NFA states that follow references to r's start state.  This is
-	 *      analogous to computing the FOLLOW(r) in the LL(k) world.  By
-	 *      construction, even rule stop state has a chain of nodes emanating
-	 *      from it that points to every possible following node.  This case
-	 *      is conveniently handled then by the 4th case.
-	 *
-	 *   4. Normal case.  If p can reach another NFA state q, then add
-	 *      q to d's configuration list, copying p's context for q's context.
-	 *      If there is a semantic predicate on the transition, then AND it
-	 *      with any existing semantic context.
-	 *
-	 *   Current state p is always added to d's configuration list as it's part
-	 *   of the closure as well.
-	 *
-	 *  When is a closure operation in a cycle condition?  While it is
-	 *  very possible to have the same NFA state mentioned twice
-	 *  within the same DFA state, there are two situations that
-	 *  would lead to nontermination of closure operation:
-	 *
-	 *  o   Whenever closure reaches a configuration where the same state
-	 *      with same or a suffix context already exists.  This catches
-	 *      the IF-THEN-ELSE tail recursion cycle and things like
-	 *
-	 *      a : A a | B ;
-	 *
-	 *      the context will be $ (empty stack).
-	 *
-	 *      We have to check
-	 *      larger context stacks because of (...)+ loops.  For
-	 *      example, the context of a (...)+ can be nonempty if the
-	 *      surrounding rule is invoked by another rule:
-	 *
-	 *      a : b A | X ;
-	 *      b : (B|)+ ;  // nondeterministic by the way
-	 *
-	 *      The context of the (B|)+ loop is "invoked from item
-	 *      a : . b A ;" and then the empty alt of the loop can reach back
-	 *      to itself.  The context stack will have one "return
-	 *      address" element and so we must check for same state, same
-	 *      context for arbitrary context stacks.
-	 *
-	 *      Idea: If we've seen this configuration before during closure, stop.
-	 *      We also need to avoid reaching same state with conflicting context.
-	 *      Ultimately analysis would stop and we'd find the conflict, but we
-	 *      should stop the computation.  Previously I only checked for
-	 *      exact config.  Need to check for same state, suffix context
-	 * 		not just exact context.
-	 *
-	 *  o   Whenever closure reaches a configuration where state p
-	 *      is present in its own context stack.  This means that
-	 *      p is a rule invocation state and the target rule has
-	 *      been called before.  NFAContext.MAX_RECURSIVE_INVOCATIONS
-	 *      (See the comment there also) determines how many times
-	 *      it's possible to recurse; clearly we cannot recurse forever.
-	 *      Some grammars such as the following actually require at
-	 *      least one recursive call to correctly compute the lookahead:
-	 *
-	 *      a : L ID R
-	 *        | b
-	 *        ;
-	 *      b : ID
-	 *        | L a R
-	 *        ;
-	 *
-	 *      Input L ID R is ambiguous but to figure this out, ANTLR
-	 *      needs to go a->b->a->b to find the L ID sequence.
-	 *
-	 *      Do not allow closure to add a configuration that would
-	 *      allow too much recursion.
-	 *
-	 *      This case also catches infinite left recursion.
-	 */
-	public void closure(NFAState p,
-						int alt,
-						NFAContext context,
-						SemanticContext semanticContext,
-						DFAState d,
-						boolean collectPredicates)
-	{
-		if ( debug ){
-			System.out.println("closure at "+p.enclosingRule.name+" state "+p.stateNumber+"|"+
-							   alt+" filling DFA state "+d.stateNumber+" with context "+context
-							   );
-		}
-
-//		if ( DFA.MAX_TIME_PER_DFA_CREATION>0 &&
-//			 System.currentTimeMillis() - d.dfa.conversionStartTime >=
-//			 DFA.MAX_TIME_PER_DFA_CREATION )
-//		{
-//			// bail way out; we've blown up somehow
-//			throw new AnalysisTimeoutException(d.dfa);
-//		}
-
-		NFAConfiguration proposedNFAConfiguration =
-				new NFAConfiguration(p.stateNumber,
-						alt,
-						context,
-						semanticContext);
-
-		// Avoid infinite recursion
-		if ( closureIsBusy(d, proposedNFAConfiguration) ) {
-			if ( debug ) {
-				System.out.println("avoid visiting exact closure computation NFA config: "+
-								   proposedNFAConfiguration+" in "+p.enclosingRule.name);
-				System.out.println("state is "+d.dfa.decisionNumber+"."+d.stateNumber);
-			}
-			return;
-		}
-
-		// set closure to be busy for this NFA configuration
-		d.closureBusy.add(proposedNFAConfiguration);
-
-		// p itself is always in closure
-		d.addNFAConfiguration(p, proposedNFAConfiguration);
-
-		// Case 1: are we a reference to another rule?
-		Transition transition0 = p.transition[0];
-		if ( transition0 instanceof RuleClosureTransition ) {
-			int depth = context.recursionDepthEmanatingFromState(p.stateNumber);
-			// Detect recursion by more than a single alt, which indicates
-			// that the decision's lookahead language is potentially non-regular; terminate
-			if ( depth == 1 && d.dfa.getUserMaxLookahead()==0 ) { // k=* only
-				d.dfa.recursiveAltSet.add(alt); // indicate that this alt is recursive
-				if ( d.dfa.recursiveAltSet.size()>1 ) {
-					//System.out.println("recursive alts: "+d.dfa.recursiveAltSet.toString());
-					d.abortedDueToMultipleRecursiveAlts = true;
-					throw new NonLLStarDecisionException(d.dfa);
-				}
-				/*
-				System.out.println("alt "+alt+" in rule "+p.enclosingRule+" dec "+d.dfa.decisionNumber+
-					" ctx: "+context);
-				System.out.println("d="+d);
-				*/
-			}
-			// Detect an attempt to recurse too high
-			// if this context has hit the max recursions for p.stateNumber,
-			// don't allow it to enter p.stateNumber again
-			if ( depth >= NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK ) {
-				/*
-				System.out.println("OVF state "+d);
-				System.out.println("proposed "+proposedNFAConfiguration);
-				*/
-				d.abortedDueToRecursionOverflow = true;
-				d.dfa.probe.reportRecursionOverflow(d, proposedNFAConfiguration);
-				if ( debug ) {
-					System.out.println("analysis overflow in closure("+d.stateNumber+")");
-				}
-				return;
-			}
-
-			// otherwise, it's cool to (re)enter target of this rule ref
-			RuleClosureTransition ref = (RuleClosureTransition)transition0;
-			// first create a new context and push onto call tree,
-			// recording the fact that we are invoking a rule and
-			// from which state (case 2 below will get the following state
-			// via the RuleClosureTransition emanating from the invoking state
-			// pushed on the stack).
-			// Reset the context to reflect the fact we invoked rule
-			NFAContext newContext = new NFAContext(context, p);
-			//System.out.println("invoking rule "+ref.rule.name);
-			// System.out.println(" context="+context);
-			// traverse epsilon edge to new rule
-			NFAState ruleTarget = (NFAState)ref.target;
-			closure(ruleTarget, alt, newContext, semanticContext, d, collectPredicates);
-		}
-		// Case 2: end of rule state, context (i.e., an invoker) exists
-		else if ( p.isAcceptState() && context.parent!=null ) {
-			NFAState whichStateInvokedRule = context.invokingState;
-			RuleClosureTransition edgeToRule =
-				(RuleClosureTransition)whichStateInvokedRule.transition[0];
-			NFAState continueState = edgeToRule.followState;
-			NFAContext newContext = context.parent; // "pop" invoking state
-			closure(continueState, alt, newContext, semanticContext, d, collectPredicates);
-		}
-		// Case 3: end of rule state, nobody invoked this rule (no context)
-		//    Fall thru to be handled by case 4 automagically.
-		// Case 4: ordinary NFA->DFA conversion case: simple epsilon transition
-		else {
-			// recurse down any epsilon transitions
-			if ( transition0!=null && transition0.isEpsilon() ) {
-				boolean collectPredicatesAfterAction = collectPredicates;
-				if ( transition0.isAction() && collectPredicates ) {
-					collectPredicatesAfterAction = false;
-					/*
-					if ( computingStartState ) {
-						System.out.println("found action during prediction closure "+((ActionLabel)transition0.label).actionAST.token);
-					}
-					 */
-				}
-				closure((NFAState)transition0.target,
-						alt,
-						context,
-						semanticContext,
-						d,
-						collectPredicatesAfterAction
-				);
-			}
-			else if ( transition0!=null && transition0.isSemanticPredicate() ) {
-                SemanticContext labelContext = transition0.label.getSemanticContext();
-                if ( computingStartState ) {
-                    if ( collectPredicates ) {
-                        // only indicate we can see a predicate if we're collecting preds
-                        // Could be computing start state & seen an action before this.
-                        dfa.predicateVisible = true;
-                    }
-                    else {
-                        // this state has a pred, but we can't see it.
-                        dfa.hasPredicateBlockedByAction = true;
-                        // System.out.println("found pred during prediction but blocked by action found previously");
-                    }
-                }
-                // continue closure here too, but add the sem pred to ctx
-                SemanticContext newSemanticContext = semanticContext;
-                if ( collectPredicates ) {
-                    // AND the previous semantic context with new pred
-                    // do not hoist syn preds from other rules; only get if in
-                    // starting state's rule (i.e., context is empty)
-                    int walkAlt =
-						dfa.decisionNFAStartState.translateDisplayAltToWalkAlt(alt);
-					NFAState altLeftEdge =
-						dfa.nfa.grammar.getNFAStateForAltOfDecision(dfa.decisionNFAStartState,walkAlt);
-					/*
-					System.out.println("state "+p.stateNumber+" alt "+alt+" walkAlt "+walkAlt+" trans to "+transition0.target);
-					System.out.println("DFA start state "+dfa.decisionNFAStartState.stateNumber);
-					System.out.println("alt left edge "+altLeftEdge.stateNumber+
-						", epsilon target "+
-						altLeftEdge.transition(0).target.stateNumber);
-					*/
-					if ( !labelContext.isSyntacticPredicate() ||
-						 p==altLeftEdge.transition[0].target )
-					{
-						//System.out.println("&"+labelContext+" enclosingRule="+p.enclosingRule);
-						newSemanticContext =
-							SemanticContext.and(semanticContext, labelContext);
-					}
-				}
-				closure((NFAState)transition0.target,
-						alt,
-						context,
-						newSemanticContext,
-						d,
-						collectPredicates);
-			}
-			Transition transition1 = p.transition[1];
-			if ( transition1!=null && transition1.isEpsilon() ) {
-				closure((NFAState)transition1.target,
-						alt,
-						context,
-						semanticContext,
-						d,
-						collectPredicates);
-			}
-		}
-
-		// don't remove "busy" flag as we want to prevent all
-		// references to same config of state|alt|ctx|semCtx even
-		// if resulting from another NFA state
-	}
-
-	/** A closure operation should abort if that computation has already
-	 *  been done or a computation with a conflicting context has already
-	 *  been done.  If proposed NFA config's state and alt are the same
-	 *  there is potentially a problem.  If the stack context is identical
-	 *  then clearly the exact same computation is proposed.  If a context
-	 *  is a suffix of the other, then again the computation is in an
-	 *  identical context.  ?$ and ??$ are considered the same stack.
-	 *  We could walk configurations linearly doing the comparison instead
-	 *  of a set for exact matches but it's much slower because you can't
-	 *  do a Set lookup.  I use exact match as ANTLR
-	 *  always detect the conflict later when checking for context suffixes...
-	 *  I check for left-recursive stuff and terminate before analysis to
-	 *  avoid need to do this more expensive computation.
-	 *
-	 *  12-31-2007: I had to use the loop again rather than simple
-	 *  closureBusy.contains(proposedNFAConfiguration) lookup.  The
-	 *  semantic context should not be considered when determining if
-	 *  a closure operation is busy.  I saw a FOLLOW closure operation
-	 *  spin until time out because the predicate context kept increasing
-	 *  in size even though it's same boolean value.  This seems faster also
-	 *  because I'm not doing String.equals on the preds all the time.
-	 *
-	 *  05-05-2008: Hmm...well, i think it was a mistake to remove the sem
-	 *  ctx check below...adding back in.  Coincides with report of ANTLR
-	 *  getting super slow: http://www.antlr.org:8888/browse/ANTLR-235
-	 *  This could be because it doesn't properly compute then resolve
-	 *  a predicate expression.  Seems to fix unit test:
-	 *  TestSemanticPredicates.testSemanticContextPreventsEarlyTerminationOfClosure()
-	 *  Changing back to Set from List.  Changed a large grammar from 8 minutes
-	 *  to 11 seconds.  Cool.  Closing ANTLR-235.
-	 */
-	public static boolean closureIsBusy(DFAState d,
-										NFAConfiguration proposedNFAConfiguration)
-	{
-		return d.closureBusy.contains(proposedNFAConfiguration);
-/*
-		int numConfigs = d.closureBusy.size();
-		// Check epsilon cycle (same state, same alt, same context)
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration c = (NFAConfiguration) d.closureBusy.get(i);
-			if ( proposedNFAConfiguration.state==c.state &&
-				 proposedNFAConfiguration.alt==c.alt &&
-				 proposedNFAConfiguration.semanticContext.equals(c.semanticContext) &&
-				 proposedNFAConfiguration.context.suffix(c.context) )
-			{
-				return true;
-			}
-		}
-		return false;
-		*/
-	}
-
-	/** Given the set of NFA states in DFA state d, find all NFA states
-	 *  reachable traversing label arcs.  By definition, there can be
-	 *  only one DFA state reachable by an atom from DFA state d so we must
-	 *  find and merge all NFA states reachable via label.  Return a new
-	 *  DFAState that has all of those NFA states with their context (i.e.,
-	 *  which alt do they predict and where to return to if they fall off
-	 *  end of a rule).
-	 *
-	 *  Because we cannot jump to another rule nor fall off the end of a rule
-	 *  via a non-epsilon transition, NFA states reachable from d have the
-	 *  same configuration as the NFA state in d.  So if NFA state 7 in d's
-	 *  configurations can reach NFA state 13 then 13 will be added to the
-	 *  new DFAState (labelDFATarget) with the same configuration as state
-	 *  7 had.
-	 *
-	 *  This method does not see EOT transitions off the end of token rule
-	 *  accept states if the rule was invoked by somebody.
-	 */
-	public DFAState reach(DFAState d, Label label) {
-		//System.out.println("reach "+label.toString(dfa.nfa.grammar)+" from "+d.stateNumber);
-		DFAState labelDFATarget = dfa.newState();
-
-		// for each NFA state in d with a labeled edge,
-		// add in target states for label
-		//System.out.println("size(d.state="+d.stateNumber+")="+d.nfaConfigurations.size());
-		//System.out.println("size(labeled edge states)="+d.configurationsWithLabeledEdges.size());
-		List<NFAConfiguration> configs = d.configurationsWithLabeledEdges;
-		int numConfigs = configs.size();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration c = configs.get(i);
-			if ( c.resolved || c.resolveWithPredicate ) {
-				continue; // the conflict resolver indicates we must leave alone
-			}
-			NFAState p = dfa.nfa.getState(c.state);
-			// by design of the grammar->NFA conversion, only transition 0
-			// may have a non-epsilon edge.
-			Transition edge = p.transition[0];
-			if ( edge==null || !c.singleAtomTransitionEmanating ) {
-				continue;
-			}
-			Label edgeLabel = edge.label;
-
-			// SPECIAL CASE
-			// if it's an EOT transition on end of lexer rule, but context
-			// stack is not empty, then don't see the EOT; the closure
-			// will have added in the proper states following the reference
-			// to this rule in the invoking rule.  In other words, if
-			// somebody called this rule, don't see the EOT emanating from
-			// this accept state.
-			if ( c.context.parent!=null && edgeLabel.label==Label.EOT )	{
-				continue;
-			}
-
-			// Labels not unique at this point (not until addReachableLabels)
-			// so try simple int label match before general set intersection
-			//System.out.println("comparing "+edgeLabel+" with "+label);
-			if ( Label.intersect(label, edgeLabel) ) {
-				// found a transition with label;
-				// add NFA target to (potentially) new DFA state
-				NFAConfiguration newC = labelDFATarget.addNFAConfiguration(
-					(NFAState)edge.target,
-					c.alt,
-					c.context,
-					c.semanticContext);
-			}
-		}
-		if ( labelDFATarget.nfaConfigurations.size()==0 ) {
-			// kill; it's empty
-			dfa.setState(labelDFATarget.stateNumber, null);
-			labelDFATarget = null;
-		}
-        return labelDFATarget;
-	}
-
-	/** Walk the configurations of this DFA state d looking for the
-	 *  configuration, c, that has a transition on EOT.  State d should
-	 *  be converted to an accept state predicting the c.alt.  Blast
-	 *  d's current configuration set and make it just have config c.
-	 *
-	 *  TODO: can there be more than one config with EOT transition?
-	 *  That would mean that two NFA configurations could reach the
-	 *  end of the token with possibly different predicted alts.
-	 *  Seems like that would be rare or impossible.  Perhaps convert
-	 *  this routine to find all such configs and give error if >1.
-	 */
-	protected void convertToEOTAcceptState(DFAState d) {
-		Label eot = new Label(Label.EOT);
-		int numConfigs = d.nfaConfigurations.size();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration c = (NFAConfiguration)d.nfaConfigurations.get(i);
-			if ( c.resolved || c.resolveWithPredicate ) {
-				continue; // the conflict resolver indicates we must leave alone
-			}
-			NFAState p = dfa.nfa.getState(c.state);
-			Transition edge = p.transition[0];
-			Label edgeLabel = edge.label;
-			if ( edgeLabel.equals(eot) ) {
-				//System.out.println("config with EOT: "+c);
-				d.setAcceptState(true);
-				//System.out.println("d goes from "+d);
-				d.nfaConfigurations.clear();
-				d.addNFAConfiguration(p,c.alt,c.context,c.semanticContext);
-				//System.out.println("to "+d);
-				return; // assume only one EOT transition
-			}
-		}
-	}
-
-	/** Add a new DFA state to the DFA if not already present.
-     *  If the DFA state uniquely predicts a single alternative, it
-     *  becomes a stop state; don't add to work list.  Further, if
-     *  there exists an NFA state predicted by > 1 different alternatives
-     *  and with the same syn and sem context, the DFA is nondeterministic for
-     *  at least one input sequence reaching that NFA state.
-     */
-    protected DFAState addDFAStateToWorkList(DFAState d) {
-        DFAState existingState = dfa.addState(d);
-		if ( d != existingState ) {
-			// already there...use/return the existing DFA state.
-			// But also set the states[d.stateNumber] to the existing
-			// DFA state because the closureIsBusy must report
-			// infinite recursion on a state before it knows
-			// whether or not the state will already be
-			// found after closure on it finishes.  It could be
-			// referring to a state that will ultimately not make it
-			// into the reachable state space and the error
-			// reporting must be able to compute the path from
-			// start to the error state with infinite recursion
-			dfa.setState(d.stateNumber, existingState);
-			return existingState;
-		}
-
-		// if not there, then examine new state.
-
-		// resolve syntactic conflicts by choosing a single alt or
-        // by using semantic predicates if present.
-        resolveNonDeterminisms(d);
-
-        // If deterministic, don't add this state; it's an accept state
-        // Just return as a valid DFA state
-		int alt = d.getUniquelyPredictedAlt();
-		if ( alt!=NFA.INVALID_ALT_NUMBER ) { // uniquely predicts an alt?
-			d = convertToAcceptState(d, alt);
-			/*
-			System.out.println("convert to accept; DFA "+d.dfa.decisionNumber+" state "+d.stateNumber+" uniquely predicts alt "+
-				d.getUniquelyPredictedAlt());
-				*/
-		}
-		else {
-            // unresolved, add to work list to continue NFA conversion
-            work.add(d);
-        }
-        return d;
-    }
-
-	protected DFAState convertToAcceptState(DFAState d, int alt) {
-		// only merge stop states if they are deterministic and no
-		// recursion problems and only if they have the same gated pred
-		// context!
-		// Later, the error reporting may want to trace the path from
-		// the start state to the nondet state
-		if ( DFAOptimizer.MERGE_STOP_STATES &&
-			d.getNonDeterministicAlts()==null &&
-			!d.abortedDueToRecursionOverflow &&
-			!d.abortedDueToMultipleRecursiveAlts )
-		{
-			// check to see if we already have an accept state for this alt
-			// [must do this after we resolve nondeterminisms in general]
-			DFAState acceptStateForAlt = dfa.getAcceptState(alt);
-			if ( acceptStateForAlt!=null ) {
-				// we already have an accept state for alt;
-				// Are their gate sem pred contexts the same?
-				// For now we assume a braindead version: both must not
-				// have gated preds or share exactly same single gated pred.
-				// The equals() method is only defined on Predicate contexts not
-				// OR etc...
-				SemanticContext gatedPreds = d.getGatedPredicatesInNFAConfigurations();
-				SemanticContext existingStateGatedPreds =
-					acceptStateForAlt.getGatedPredicatesInNFAConfigurations();
-				if ( (gatedPreds==null && existingStateGatedPreds==null) ||
-				     ((gatedPreds!=null && existingStateGatedPreds!=null) &&
-					  gatedPreds.equals(existingStateGatedPreds)) )
-				{
-					// make this d.statenumber point at old DFA state
-					dfa.setState(d.stateNumber, acceptStateForAlt);
-					dfa.removeState(d);    // remove this state from unique DFA state set
-					d = acceptStateForAlt; // use old accept state; throw this one out
-					return d;
-				}
-				// else consider it a new accept state; fall through.
-			}
-		}
-		d.setAcceptState(true); // new accept state for alt
-		dfa.setAcceptState(alt, d);
-		return d;
-	}
-
-	/** If > 1 NFA configurations within this DFA state have identical
-	 *  NFA state and context, but differ in their predicted
-	 *  TODO update for new context suffix stuff 3-9-2005
-	 *  alternative then a single input sequence predicts multiple alts.
-	 *  The NFA decision is therefore syntactically indistinguishable
-	 *  from the left edge upon at least one input sequence.  We may
-	 *  terminate the NFA to DFA conversion for these paths since no
-	 *  paths emanating from those NFA states can possibly separate
-	 *  these conjoined twins once interwined to make things
-	 *  deterministic (unless there are semantic predicates; see below).
-	 *
-	 *  Upon a nondeterministic set of NFA configurations, we should
-	 *  report a problem to the grammar designer and resolve the issue
-	 *  by aribitrarily picking the first alternative (this usually
-	 *  ends up producing the most natural behavior).  Pick the lowest
-	 *  alt number and just turn off all NFA configurations
-	 *  associated with the other alts. Rather than remove conflicting
-	 *  NFA configurations, I set the "resolved" bit so that future
-	 *  computations will ignore them.  In this way, we maintain the
-	 *  complete DFA state with all its configurations, but prevent
-	 *  future DFA conversion operations from pursuing undesirable
-	 *  paths.  Remember that we want to terminate DFA conversion as
-	 *  soon as we know the decision is deterministic *or*
-	 *  nondeterministic.
-	 *
-	 *  [BTW, I have convinced myself that there can be at most one
-	 *  set of nondeterministic configurations in a DFA state.  Only NFA
-	 *  configurations arising from the same input sequence can appear
-	 *  in a DFA state.  There is no way to have another complete set
-	 *  of nondeterministic NFA configurations without another input
-	 *  sequence, which would reach a different DFA state.  Therefore,
-	 *  the two nondeterministic NFA configuration sets cannot collide
-	 *  in the same DFA state.]
-	 *
-	 *  Consider DFA state {(s|1),(s|2),(s|3),(t|3),(v|4)} where (s|a)
-	 *  is state 's' and alternative 'a'.  Here, configuration set
-	 *  {(s|1),(s|2),(s|3)} predicts 3 different alts.  Configurations
-	 *  (s|2) and (s|3) are "resolved", leaving {(s|1),(t|3),(v|4)} as
-	 *  items that must still be considered by the DFA conversion
-	 *  algorithm in DFA.findNewDFAStatesAndAddDFATransitions().
-	 *
-	 *  Consider the following grammar where alts 1 and 2 are no
-	 *  problem because of the 2nd lookahead symbol.  Alts 3 and 4 are
-	 *  identical and will therefore reach the rule end NFA state but
-	 *  predicting 2 different alts (no amount of future lookahead
-	 *  will render them deterministic/separable):
-	 *
-	 *  a : A B
-	 *    | A C
-	 *    | A
-	 *    | A
-	 *    ;
-	 *
-	 *  Here is a (slightly reduced) NFA of this grammar:
-	 *
-	 *  (1)-A->(2)-B->(end)-EOF->(8)
-	 *   |              ^
-	 *  (2)-A->(3)-C----|
-	 *   |              ^
-	 *  (4)-A->(5)------|
-	 *   |              ^
-	 *  (6)-A->(7)------|
-	 *
-	 *  where (n) is NFA state n.  To begin DFA conversion, the start
-	 *  state is created:
-	 *
-	 *  {(1|1),(2|2),(4|3),(6|4)}
-	 *
-	 *  Upon A, all NFA configurations lead to new NFA states yielding
-	 *  new DFA state:
-	 *
-	 *  {(2|1),(3|2),(5|3),(7|4),(end|3),(end|4)}
-	 *
-	 *  where the configurations with state end in them are added
-	 *  during the epsilon closure operation.  State end predicts both
-	 *  alts 3 and 4.  An error is reported, the latter configuration is
-	 *  flagged as resolved leaving the DFA state as:
-	 *
-	 *  {(2|1),(3|2),(5|3),(7|4|resolved),(end|3),(end|4|resolved)}
-	 *
-	 *  As NFA configurations are added to a DFA state during its
-	 *  construction, the reachable set of labels is computed.  Here
-	 *  reachable is {B,C,EOF} because there is at least one NFA state
-	 *  in the DFA state that can transition upon those symbols.
-	 *
-	 *  The final DFA looks like:
-	 *
-	 *  {(1|1),(2|2),(4|3),(6|4)}
-	 *              |
-	 *              v
-	 *  {(2|1),(3|2),(5|3),(7|4),(end|3),(end|4)} -B-> (end|1)
-	 *              |                        |
-	 *              C                        ----EOF-> (8,3)
-	 *              |
-	 *              v
-	 *           (end|2)
-	 *
-	 *  Upon AB, alt 1 is predicted.  Upon AC, alt 2 is predicted.
-	 *  Upon A EOF, alt 3 is predicted.  Alt 4 is not a viable
-	 *  alternative.
-	 *
-	 *  The algorithm is essentially to walk all the configurations
-	 *  looking for a conflict of the form (s|i) and (s|j) for i!=j.
-	 *  Use a hash table to track state+context pairs for collisions
-	 *  so that we have O(n) to walk the n configurations looking for
-	 *  a conflict.  Upon every conflict, track the alt number so
-	 *  we have a list of all nondeterministically predicted alts. Also
-	 *  track the minimum alt.  Next go back over the configurations, setting
-	 *  the "resolved" bit for any that have an alt that is a member of
-	 *  the nondeterministic set.  This will effectively remove any alts
-	 *  but the one we want from future consideration.
-	 *
-	 *  See resolveWithSemanticPredicates()
-	 *
-	 *  AMBIGUOUS TOKENS
-	 *
-	 *  With keywords and ID tokens, there is an inherit ambiguity in that
-	 *  "int" can be matched by ID also.  Each lexer rule has an EOT
-	 *  transition emanating from it which is used whenever the end of
-	 *  a rule is reached and another token rule did not invoke it.  EOT
-	 *  is the only thing that can be seen next.  If two rules are identical
-	 *  like "int" and "int" then the 2nd def is unreachable and you'll get
-	 *  a warning.  We prevent a warning though for the keyword/ID issue as
-	 *  ID is still reachable.  This can be a bit weird.  '+' rule then a
-	 *  '+'|'+=' rule will fail to match '+' for the 2nd rule.
-	 *
-	 *  If all NFA states in this DFA state are targets of EOT transitions,
-	 *  (and there is more than one state plus no unique alt is predicted)
-	 *  then DFA conversion will leave this state as a dead state as nothing
-	 *  can be reached from this state.  To resolve the ambiguity, just do
-	 *  what flex and friends do: pick the first rule (alt in this case) to
-	 *  win.  This means you should put keywords before the ID rule.
-	 *  If the DFA state has only one NFA state then there is no issue:
-	 *  it uniquely predicts one alt. :)  Problem
-	 *  states will look like this during conversion:
-	 *
-	 *  DFA 1:{9|1, 19|2, 14|3, 20|2, 23|2, 24|2, ...}-<EOT>->5:{41|3, 42|2}
-	 *
-	 *  Worse, when you have two identical literal rules, you will see 3 alts
-	 *  in the EOT state (one for ID and one each for the identical rules).
-	 */
-	public void resolveNonDeterminisms(DFAState d) {
-		if ( debug ) {
-			System.out.println("resolveNonDeterminisms "+d.toString());
-		}
-		boolean conflictingLexerRules = false;
-		Set nondeterministicAlts = d.getNonDeterministicAlts();
-		if ( debug && nondeterministicAlts!=null ) {
-			System.out.println("nondet alts="+nondeterministicAlts);
-		}
-
-		// CHECK FOR AMBIGUOUS EOT (if |allAlts|>1 and EOT state, resolve)
-		// grab any config to see if EOT state; any other configs must
-		// transition on EOT to get to this DFA state as well so all
-		// states in d must be targets of EOT.  These are the end states
-		// created in NFAFactory.build_EOFState
-		NFAConfiguration anyConfig = d.nfaConfigurations.get(0);
-		NFAState anyState = dfa.nfa.getState(anyConfig.state);
-
-		// if d is target of EOT and more than one predicted alt
-		// indicate that d is nondeterministic on all alts otherwise
-		// it looks like state has no problem
-		if ( anyState.isEOTTargetState() ) {
-			Set allAlts = d.getAltSet();
-			// is more than 1 alt predicted?
-			if ( allAlts!=null && allAlts.size()>1 ) {
-				nondeterministicAlts = allAlts;
-				// track Tokens rule issues differently than other decisions
-				if ( d.dfa.isTokensRuleDecision() ) {
-					dfa.probe.reportLexerRuleNondeterminism(d,allAlts);
-					//System.out.println("Tokens rule DFA state "+d+" nondeterministic");
-					conflictingLexerRules = true;
-				}
-			}
-		}
-
-		// if no problems return unless we aborted work on d to avoid inf recursion
-		if ( !d.abortedDueToRecursionOverflow && nondeterministicAlts==null ) {
-			return; // no problems, return
-		}
-
-		// if we're not a conflicting lexer rule and we didn't abort, report ambig
-		// We should get a report for abort so don't give another
-		if ( !d.abortedDueToRecursionOverflow && !conflictingLexerRules ) {
-			// TODO: with k=x option set, this is called twice for same state
-			dfa.probe.reportNondeterminism(d, nondeterministicAlts);
-			// TODO: how to turn off when it's only the FOLLOW that is
-			// conflicting.  This used to shut off even alts i,j < n
-			// conflict warnings. :(
-		}
-
-		// ATTEMPT TO RESOLVE WITH SEMANTIC PREDICATES
-		boolean resolved =
-			tryToResolveWithSemanticPredicates(d, nondeterministicAlts);
-		if ( resolved ) {
-			if ( debug ) {
-				System.out.println("resolved DFA state "+d.stateNumber+" with pred");
-			}
-			d.resolvedWithPredicates = true;
-			dfa.probe.reportNondeterminismResolvedWithSemanticPredicate(d);
-			return;
-		}
-
-		// RESOLVE SYNTACTIC CONFLICT BY REMOVING ALL BUT ONE ALT
-		resolveByChoosingFirstAlt(d, nondeterministicAlts);
-
-		//System.out.println("state "+d.stateNumber+" resolved to alt "+winningAlt);
-	}
-
-	protected int resolveByChoosingFirstAlt(DFAState d, Set nondeterministicAlts) {
-		int winningAlt = 0;
-		if ( dfa.isGreedy() ) {
-			winningAlt = resolveByPickingMinAlt(d,nondeterministicAlts);
-		}
-		else {
-			// If nongreedy, the exit alt shout win, but only if it's
-			// involved in the nondeterminism!
-			/*
-			System.out.println("resolving exit alt for decision="+
-				dfa.decisionNumber+" state="+d);
-			System.out.println("nondet="+nondeterministicAlts);
-			System.out.println("exit alt "+exitAlt);
-			*/
-			int exitAlt = dfa.getNumberOfAlts();
-			if ( nondeterministicAlts.contains(Utils.integer(exitAlt)) ) {
-				// if nongreedy and exit alt is one of those nondeterministic alts
-				// predicted, resolve in favor of what follows block
-				winningAlt = resolveByPickingExitAlt(d,nondeterministicAlts);
-			}
-			else {
-				winningAlt = resolveByPickingMinAlt(d,nondeterministicAlts);
-			}
-		}
-		return winningAlt;
-	}
-
-	/** Turn off all configurations associated with the
-	 *  set of incoming nondeterministic alts except the min alt number.
-	 *  There may be many alts among the configurations but only turn off
-	 *  the ones with problems (other than the min alt of course).
-	 *
-	 *  If nondeterministicAlts is null then turn off all configs 'cept those
-	 *  associated with the minimum alt.
-	 *
-	 *  Return the min alt found.
-	 */
-	protected int resolveByPickingMinAlt(DFAState d, Set nondeterministicAlts) {
-		int min = Integer.MAX_VALUE;
-		if ( nondeterministicAlts!=null ) {
-			min = getMinAlt(nondeterministicAlts);
-		}
-		else {
-			min = d.minAltInConfigurations;
-		}
-
-		turnOffOtherAlts(d, min, nondeterministicAlts);
-
-		return min;
-	}
-
-	/** Resolve state d by choosing exit alt, which is same value as the
-	 *  number of alternatives.  Return that exit alt.
-	 */
-	protected int resolveByPickingExitAlt(DFAState d, Set nondeterministicAlts) {
-		int exitAlt = dfa.getNumberOfAlts();
-		turnOffOtherAlts(d, exitAlt, nondeterministicAlts);
-		return exitAlt;
-	}
-
-	/** turn off all states associated with alts other than the good one
-	 *  (as long as they are one of the nondeterministic ones)
-	 */
-	protected static void turnOffOtherAlts(DFAState d, int min, Set<Integer> nondeterministicAlts) {
-		int numConfigs = d.nfaConfigurations.size();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration configuration = (NFAConfiguration)d.nfaConfigurations.get(i);
-			if ( configuration.alt!=min ) {
-				if ( nondeterministicAlts==null ||
-					 nondeterministicAlts.contains(Utils.integer(configuration.alt)) )
-				{
-					configuration.resolved = true;
-				}
-			}
-		}
-	}
-
-	protected static int getMinAlt(Set<Integer> nondeterministicAlts) {
-		int min = Integer.MAX_VALUE;
-		for (Integer altI : nondeterministicAlts) {
-			int alt = altI.intValue();
-			if ( alt < min ) {
-				min = alt;
-			}
-		}
-		return min;
-	}
-
-	/** See if a set of nondeterministic alternatives can be disambiguated
-	 *  with the semantic predicate contexts of the alternatives.
-	 *
-	 *  Without semantic predicates, syntactic conflicts are resolved
-	 *  by simply choosing the first viable alternative.  In the
-	 *  presence of semantic predicates, you can resolve the issue by
-	 *  evaluating boolean expressions at run time.  During analysis,
-	 *  this amounts to suppressing grammar error messages to the
-	 *  developer.  NFA configurations are always marked as "to be
-	 *  resolved with predicates" so that
-	 *  DFA.findNewDFAStatesAndAddDFATransitions() will know to ignore
-	 *  these configurations and add predicate transitions to the DFA
-	 *  after adding token/char labels.
-	 *
-	 *  During analysis, we can simply make sure that for n
-	 *  ambiguously predicted alternatives there are at least n-1
-	 *  unique predicate sets.  The nth alternative can be predicted
-	 *  with "not" the "or" of all other predicates.  NFA configurations without
-	 *  predicates are assumed to have the default predicate of
-	 *  "true" from a user point of view.  When true is combined via || with
-	 *  another predicate, the predicate is a tautology and must be removed
-	 *  from consideration for disambiguation:
-	 *
-	 *  a : b | B ; // hoisting p1||true out of rule b, yields no predicate
-	 *  b : {p1}? B | B ;
-	 *
-	 *  This is done down in getPredicatesPerNonDeterministicAlt().
-	 */
-	protected boolean tryToResolveWithSemanticPredicates(DFAState d,
-														 Set nondeterministicAlts)
-	{
-		Map<Integer, SemanticContext> altToPredMap =
-				getPredicatesPerNonDeterministicAlt(d, nondeterministicAlts);
-
-		if ( altToPredMap.size()==0 ) {
-			return false;
-		}
-
-		//System.out.println("nondeterministic alts with predicates: "+altToPredMap);
-		dfa.probe.reportAltPredicateContext(d, altToPredMap);
-
-		if ( nondeterministicAlts.size()-altToPredMap.size()>1 ) {
-			// too few predicates to resolve; just return
-			return false;
-		}
-
-		// Handle case where 1 predicate is missing
-		// Case 1. Semantic predicates
-		// If the missing pred is on nth alt, !(union of other preds)==true
-		// so we can avoid that computation.  If naked alt is ith, then must
-		// test it with !(union) since semantic predicated alts are order
-		// independent
-		// Case 2: Syntactic predicates
-		// The naked alt is always assumed to be true as the order of
-		// alts is the order of precedence.  The naked alt will be a tautology
-		// anyway as it's !(union of other preds).  This implies
-		// that there is no such thing as noviable alt for synpred edges
-		// emanating from a DFA state.
-		if ( altToPredMap.size()==nondeterministicAlts.size()-1 ) {
-			// if there are n-1 predicates for n nondeterministic alts, can fix
-			org.antlr.misc.BitSet ndSet = org.antlr.misc.BitSet.of(nondeterministicAlts);
-			org.antlr.misc.BitSet predSet = org.antlr.misc.BitSet.of(altToPredMap);
-			int nakedAlt = ndSet.subtract(predSet).getSingleElement();
-			SemanticContext nakedAltPred = null;
-			if ( nakedAlt == max(nondeterministicAlts) ) {
-				// the naked alt is the last nondet alt and will be the default clause
-				nakedAltPred = new SemanticContext.TruePredicate();
-			}
-			else {
-				// pretend naked alternative is covered with !(union other preds)
-				// unless one of preds from other alts is a manually specified synpred
-				// since those have precedence same as alt order.  Missing synpred
-				// is true so that alt wins (or is at least attempted).
-				// Note: can't miss any preds on alts (can't be here) if auto backtrack
-				// since it prefixes all.
-				// In LL(*) paper, i'll just have algorithm emit warning about uncovered
-				// pred
-				SemanticContext unionOfPredicatesFromAllAlts =
-					getUnionOfPredicates(altToPredMap);
-				//System.out.println("all predicates "+unionOfPredicatesFromAllAlts);
-				if ( unionOfPredicatesFromAllAlts.isSyntacticPredicate() ) {
-					nakedAltPred = new SemanticContext.TruePredicate();
-				}
-				else {
-					nakedAltPred =
-						SemanticContext.not(unionOfPredicatesFromAllAlts);
-				}
-			}
-
-			//System.out.println("covering naked alt="+nakedAlt+" with "+nakedAltPred);
-
-			altToPredMap.put(Utils.integer(nakedAlt), nakedAltPred);
-			// set all config with alt=nakedAlt to have the computed predicate
-			int numConfigs = d.nfaConfigurations.size();
-			for (int i = 0; i < numConfigs; i++) { // TODO: I don't think we need to do this; altToPredMap has it
-			 //7/27/10  theok, I removed it and it still seems to work with everything; leave in anyway just in case
-				NFAConfiguration configuration = (NFAConfiguration)d.nfaConfigurations.get(i);
-				if ( configuration.alt == nakedAlt ) {
-					configuration.semanticContext = nakedAltPred;
-				}
-			}
-		}
-
-		if ( altToPredMap.size()==nondeterministicAlts.size() ) {
-			// RESOLVE CONFLICT by picking one NFA configuration for each alt
-			// and setting its resolvedWithPredicate flag
-			// First, prevent a recursion warning on this state due to
-			// pred resolution
-			if ( d.abortedDueToRecursionOverflow ) {
-				d.dfa.probe.removeRecursiveOverflowState(d);
-			}
-			int numConfigs = d.nfaConfigurations.size();
-			//System.out.println("pred map="+altToPredMap);
-			for (int i = 0; i < numConfigs; i++) {
-				NFAConfiguration configuration = (NFAConfiguration)d.nfaConfigurations.get(i);
-				SemanticContext semCtx = (SemanticContext)
-						altToPredMap.get(Utils.integer(configuration.alt));
-				if ( semCtx!=null ) {
-					// resolve (first found) with pred
-					// and remove alt from problem list
-					//System.out.println("c="+configuration);
-					configuration.resolveWithPredicate = true;
-					// altToPredMap has preds from all alts; store into "annointed" config
-					configuration.semanticContext = semCtx; // reset to combined
-					altToPredMap.remove(Utils.integer(configuration.alt));
-
-					// notify grammar that we've used the preds contained in semCtx
-					if ( semCtx.isSyntacticPredicate() ) {
-						dfa.nfa.grammar.synPredUsedInDFA(dfa, semCtx);
-					}
-				}
-				else if ( nondeterministicAlts.contains(Utils.integer(configuration.alt)) ) {
-					// resolve all configurations for nondeterministic alts
-					// for which there is no predicate context by turning it off
-					configuration.resolved = true;
-				}
-			}
-			return true;
-		}
-
-		return false;  // couldn't fix the problem with predicates
-	}
-
-	/** Return a mapping from nondeterministc alt to combined list of predicates.
-	 *  If both (s|i|semCtx1) and (t|i|semCtx2) exist, then the proper predicate
-	 *  for alt i is semCtx1||semCtx2 because you have arrived at this single
-	 *  DFA state via two NFA paths, both of which have semantic predicates.
-	 *  We ignore deterministic alts because syntax alone is sufficient
-	 *  to predict those.  Do not include their predicates.
-	 *
-	 *  Alts with no predicate are assumed to have {true}? pred.
-	 *
-	 *  When combining via || with "true", all predicates are removed from
-	 *  consideration since the expression will always be true and hence
-	 *  not tell us how to resolve anything.  So, if any NFA configuration
-	 *  in this DFA state does not have a semantic context, the alt cannot
-	 *  be resolved with a predicate.
-	 *
-	 *  If nonnull, incidentEdgeLabel tells us what NFA transition label
-	 *  we did a reach on to compute state d.  d may have insufficient
-	 *  preds, so we really want this for the error message.
-	 */
-	protected Map<Integer, SemanticContext> getPredicatesPerNonDeterministicAlt(DFAState d,
-																				Set nondeterministicAlts)
-	{
-		// map alt to combined SemanticContext
-		Map<Integer, SemanticContext> altToPredicateContextMap =
-			new HashMap<Integer, SemanticContext>();
-		// init the alt to predicate set map
-		Map<Integer, OrderedHashSet<SemanticContext>> altToSetOfContextsMap =
-			new HashMap<Integer, OrderedHashSet<SemanticContext>>();
-		for (Iterator it = nondeterministicAlts.iterator(); it.hasNext();) {
-			Integer altI = (Integer) it.next();
-			altToSetOfContextsMap.put(altI, new OrderedHashSet<SemanticContext>());
-		}
-
-		/*
-		List<Label> sampleInputLabels = d.dfa.probe.getSampleNonDeterministicInputSequence(d);
-		String input = d.dfa.probe.getInputSequenceDisplay(sampleInputLabels);
-		System.out.println("sample input: "+input);
-		*/
-
-		// for each configuration, create a unique set of predicates
-		// Also, track the alts with at least one uncovered configuration
-		// (one w/o a predicate); tracks tautologies like p1||true
-		Map<Integer, Set<Token>> altToLocationsReachableWithoutPredicate = new HashMap<Integer, Set<Token>>();
-		Set<Integer> nondetAltsWithUncoveredConfiguration = new HashSet<Integer>();
-		//System.out.println("configs="+d.nfaConfigurations);
-		//System.out.println("configs with preds?"+d.atLeastOneConfigurationHasAPredicate);
-		//System.out.println("configs with preds="+d.configurationsWithPredicateEdges);
-		int numConfigs = d.nfaConfigurations.size();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration configuration = (NFAConfiguration)d.nfaConfigurations.get(i);
-			Integer altI = Utils.integer(configuration.alt);
-			// if alt is nondeterministic, combine its predicates
-			if ( nondeterministicAlts.contains(altI) ) {
-				// if there is a predicate for this NFA configuration, OR in
-				if ( configuration.semanticContext !=
-					 SemanticContext.EMPTY_SEMANTIC_CONTEXT )
-				{
-					Set<SemanticContext> predSet = altToSetOfContextsMap.get(altI);
-					predSet.add(configuration.semanticContext);
-				}
-				else {
-					// if no predicate, but it's part of nondeterministic alt
-					// then at least one path exists not covered by a predicate.
-					// must remove predicate for this alt; track incomplete alts
-					nondetAltsWithUncoveredConfiguration.add(altI);
-					/*
-					NFAState s = dfa.nfa.getState(configuration.state);
-					System.out.println("###\ndec "+dfa.decisionNumber+" alt "+configuration.alt+
-									   " enclosing rule for nfa state not covered "+
-									   s.enclosingRule);
-					if ( s.associatedASTNode!=null ) {
-						System.out.println("token="+s.associatedASTNode.token);
-					}
-					System.out.println("nfa state="+s);
-
-					if ( s.incidentEdgeLabel!=null && Label.intersect(incidentEdgeLabel, s.incidentEdgeLabel) ) {
-						Set<Token> locations = altToLocationsReachableWithoutPredicate.get(altI);
-						if ( locations==null ) {
-							locations = new HashSet<Token>();
-							altToLocationsReachableWithoutPredicate.put(altI, locations);
-						}
-						locations.add(s.associatedASTNode.token);
-					}
-					*/
-				}
-			}
-		}
-
-		// For each alt, OR together all unique predicates associated with
-		// all configurations
-		// Also, track the list of incompletely covered alts: those alts
-		// with at least 1 predicate and at least one configuration w/o a
-		// predicate. We want this in order to report to the decision probe.
-		List<Integer> incompletelyCoveredAlts = new ArrayList<Integer>();
-		for (Iterator it = nondeterministicAlts.iterator(); it.hasNext();) {
-			Integer altI = (Integer) it.next();
-			Set<SemanticContext> contextsForThisAlt = altToSetOfContextsMap.get(altI);
-			if ( nondetAltsWithUncoveredConfiguration.contains(altI) ) { // >= 1 config has no ctx
-				if ( contextsForThisAlt.size()>0 ) {    // && at least one pred
-					incompletelyCoveredAlts.add(altI);  // this alt incompleted covered
-				}
-				continue; // don't include at least 1 config has no ctx
-			}
-			SemanticContext combinedContext = null;
-			for (Iterator itrSet = contextsForThisAlt.iterator(); itrSet.hasNext();) {
-				SemanticContext ctx = (SemanticContext) itrSet.next();
-				combinedContext =
-						SemanticContext.or(combinedContext,ctx);
-			}
-			altToPredicateContextMap.put(altI, combinedContext);
-		}
-
-		if ( incompletelyCoveredAlts.size()>0 ) {
-			/*
-			System.out.println("prob in dec "+dfa.decisionNumber+" state="+d);
-			FASerializer serializer = new FASerializer(dfa.nfa.grammar);
-			String result = serializer.serialize(dfa.startState);
-			System.out.println("dfa: "+result);
-			System.out.println("incomplete alts: "+incompletelyCoveredAlts);
-			System.out.println("nondet="+nondeterministicAlts);
-			System.out.println("nondetAltsWithUncoveredConfiguration="+ nondetAltsWithUncoveredConfiguration);
-			System.out.println("altToCtxMap="+altToSetOfContextsMap);
-			System.out.println("altToPredicateContextMap="+altToPredicateContextMap);
-			*/
-			for (int i = 0; i < numConfigs; i++) {
-				NFAConfiguration configuration = (NFAConfiguration)d.nfaConfigurations.get(i);
-				Integer altI = Utils.integer(configuration.alt);
-				if ( incompletelyCoveredAlts.contains(altI) &&
-					 configuration.semanticContext == SemanticContext.EMPTY_SEMANTIC_CONTEXT )
-				{
-					NFAState s = dfa.nfa.getState(configuration.state);
-					/*
-					System.out.print("nondet config w/o context "+configuration+
-									 " incident "+(s.incidentEdgeLabel!=null?s.incidentEdgeLabel.toString(dfa.nfa.grammar):null));
-					if ( s.associatedASTNode!=null ) {
-						System.out.print(" token="+s.associatedASTNode.token);
-					}
-					else System.out.println();
-					*/
-                    // We want to report getting to an NFA state with an
-                    // incoming label, unless it's EOF, w/o a predicate.
-                    if ( s.incidentEdgeLabel!=null && s.incidentEdgeLabel.label != Label.EOF ) {
-                        if ( s.associatedASTNode==null || s.associatedASTNode.token==null ) {
-							ErrorManager.internalError("no AST/token for nonepsilon target w/o predicate");
-						}
-						else {
-							Set<Token> locations = altToLocationsReachableWithoutPredicate.get(altI);
-							if ( locations==null ) {
-								locations = new HashSet<Token>();
-								altToLocationsReachableWithoutPredicate.put(altI, locations);
-							}
-							locations.add(s.associatedASTNode.token);
-						}
-					}
-				}
-			}
-			dfa.probe.reportIncompletelyCoveredAlts(d,
-													altToLocationsReachableWithoutPredicate);
-		}
-
-		return altToPredicateContextMap;
-	}
-
-	/** OR together all predicates from the alts.  Note that the predicate
-	 *  for an alt could itself be a combination of predicates.
-	 */
-	protected static SemanticContext getUnionOfPredicates(Map altToPredMap) {
-		Iterator iter;
-		SemanticContext unionOfPredicatesFromAllAlts = null;
-		iter = altToPredMap.values().iterator();
-		while ( iter.hasNext() ) {
-			SemanticContext semCtx = (SemanticContext)iter.next();
-			if ( unionOfPredicatesFromAllAlts==null ) {
-				unionOfPredicatesFromAllAlts = semCtx;
-			}
-			else {
-				unionOfPredicatesFromAllAlts =
-						SemanticContext.or(unionOfPredicatesFromAllAlts,semCtx);
-			}
-		}
-		return unionOfPredicatesFromAllAlts;
-	}
-
-	/** for each NFA config in d, look for "predicate required" sign set
-	 *  during nondeterminism resolution.
-	 *
-	 *  Add the predicate edges sorted by the alternative number; I'm fairly
-	 *  sure that I could walk the configs backwards so they are added to
-	 *  the predDFATarget in the right order, but it's best to make sure.
-	 *  Predicates succeed in the order they are specifed.  Alt i wins
-	 *  over alt i+1 if both predicates are true.
-	 */
-	protected void addPredicateTransitions(DFAState d) {
-		List configsWithPreds = new ArrayList();
-		// get a list of all configs with predicates
-		int numConfigs = d.nfaConfigurations.size();
-		for (int i = 0; i < numConfigs; i++) {
-			NFAConfiguration c = (NFAConfiguration)d.nfaConfigurations.get(i);
-			if ( c.resolveWithPredicate ) {
-				configsWithPreds.add(c);
-			}
-		}
-		// Sort ascending according to alt; alt i has higher precedence than i+1
-		Collections.sort(configsWithPreds,
-			 new Comparator() {
-				 public int compare(Object a, Object b) {
-					 NFAConfiguration ca = (NFAConfiguration)a;
-					 NFAConfiguration cb = (NFAConfiguration)b;
-					 if ( ca.alt < cb.alt ) return -1;
-					 else if ( ca.alt > cb.alt ) return 1;
-					 return 0;
-				 }
-			 });
-		List predConfigsSortedByAlt = configsWithPreds;
-		// Now, we can add edges emanating from d for these preds in right order
-		for (int i = 0; i < predConfigsSortedByAlt.size(); i++) {
-			NFAConfiguration c = (NFAConfiguration)predConfigsSortedByAlt.get(i);
-			DFAState predDFATarget = d.dfa.getAcceptState(c.alt);
-			if ( predDFATarget==null ) {
-				predDFATarget = dfa.newState(); // create if not there.
-				// create a new DFA state that is a target of the predicate from d
-				predDFATarget.addNFAConfiguration(dfa.nfa.getState(c.state),
-												  c.alt,
-												  c.context,
-												  c.semanticContext);
-				predDFATarget.setAcceptState(true);
-				dfa.setAcceptState(c.alt, predDFATarget);
-				DFAState existingState = dfa.addState(predDFATarget);
-				if ( predDFATarget != existingState ) {
-					// already there...use/return the existing DFA state that
-					// is a target of this predicate.  Make this state number
-					// point at the existing state
-					dfa.setState(predDFATarget.stateNumber, existingState);
-					predDFATarget = existingState;
-				}
-			}
-			// add a transition to pred target from d
-			d.addTransition(predDFATarget, new PredicateLabel(c.semanticContext));
-		}
-	}
-
-	protected void initContextTrees(int numberOfAlts) {
-        contextTrees = new NFAContext[numberOfAlts];
-        for (int i = 0; i < contextTrees.length; i++) {
-            int alt = i+1;
-            // add a dummy root node so that an NFA configuration can
-            // always point at an NFAContext.  If a context refers to this
-            // node then it implies there is no call stack for
-            // that configuration
-            contextTrees[i] = new NFAContext(null, null);
-        }
-    }
-
-	public static int max(Set s) {
-		if ( s==null ) {
-			return Integer.MIN_VALUE;
-		}
-		int i = 0;
-		int m = 0;
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			i++;
-			Integer I = (Integer) it.next();
-			if ( i==1 ) { // init m with first value
-				m = I.intValue();
-				continue;
-			}
-			if ( I.intValue()>m ) {
-				m = I.intValue();
-			}
-		}
-		return m;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/PredicateLabel.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/PredicateLabel.java
deleted file mode 100644
index 7252f4f..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/PredicateLabel.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.tool.Grammar;
-import org.antlr.tool.GrammarAST;
-
-public class PredicateLabel extends Label {
-	/** A tree of semantic predicates from the grammar AST if label==SEMPRED.
-	 *  In the NFA, labels will always be exactly one predicate, but the DFA
-	 *  may have to combine a bunch of them as it collects predicates from
-	 *  multiple NFA configurations into a single DFA state.
-	 */
-	protected SemanticContext semanticContext;
-	
-	/** Make a semantic predicate label */
-	public PredicateLabel(GrammarAST predicateASTNode) {
-		super(SEMPRED);
-		this.semanticContext = new SemanticContext.Predicate(predicateASTNode);
-	}
-
-	/** Make a semantic predicates label */
-	public PredicateLabel(SemanticContext semCtx) {
-		super(SEMPRED);
-		this.semanticContext = semCtx;
-	}
-
-	public int hashCode() {
-		return semanticContext.hashCode();
-	}
-
-	public boolean equals(Object o) {
-		if ( o==null ) {
-			return false;
-		}
-		if ( this == o ) {
-			return true; // equals if same object
-		}
-		if ( !(o instanceof PredicateLabel) ) {
-			return false;
-		}
-		return semanticContext.equals(((PredicateLabel)o).semanticContext);
-	}
-
-	public boolean isSemanticPredicate() {
-		return true;
-	}
-
-	public SemanticContext getSemanticContext() {
-		return semanticContext;
-	}
-
-	public String toString() {
-		return "{"+semanticContext+"}?";
-	}
-
-	public String toString(Grammar g) {
-		return toString();
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/SemanticContext.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/SemanticContext.java
deleted file mode 100644
index 682de32..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/SemanticContext.java
+++ /dev/null
@@ -1,820 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.GrammarAST;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STGroup;
-
-import java.util.*;
-
-/** A binary tree structure used to record the semantic context in which
- *  an NFA configuration is valid.  It's either a single predicate or
- *  a tree representing an operation tree such as: p1&&p2 or p1||p2.
- *
- *  For NFA o-p1->o-p2->o, create tree AND(p1,p2).
- *  For NFA (1)-p1->(2)
- *           |       ^
- *           |       |
- *          (3)-p2----
- *  we will have to combine p1 and p2 into DFA state as we will be
- *  adding NFA configurations for state 2 with two predicates p1,p2.
- *  So, set context for combined NFA config for state 2: OR(p1,p2).
- *
- *  I have scoped the AND, NOT, OR, and Predicate subclasses of
- *  SemanticContext within the scope of this outer class.
- *
- *  July 7, 2006: TJP altered OR to be set of operands. the Binary tree
- *  made it really hard to reduce complicated || sequences to their minimum.
- *  Got huge repeated || conditions.
- */
-public abstract class SemanticContext {
-	/** Create a default value for the semantic context shared among all
-	 *  NFAConfigurations that do not have an actual semantic context.
-	 *  This prevents lots of if!=null type checks all over; it represents
-	 *  just an empty set of predicates.
-	 */
-	public static final SemanticContext EMPTY_SEMANTIC_CONTEXT = new Predicate(Predicate.INVALID_PRED_VALUE);
-
-	/** Given a semantic context expression tree, return a tree with all
-	 *  nongated predicates set to true and then reduced.  So p&&(q||r) would
-	 *  return p&&r if q is nongated but p and r are gated.
-	 */
-	public abstract SemanticContext getGatedPredicateContext();
-
-	/** Generate an expression that will evaluate the semantic context,
-	 *  given a set of output templates.
-	 */
-	public abstract ST genExpr(CodeGenerator generator,
-										   STGroup templates,
-										   DFA dfa);
-
-	public abstract boolean hasUserSemanticPredicate(); // user-specified sempred {}? or {}?=>
-	public abstract boolean isSyntacticPredicate();
-
-	/** Notify the indicated grammar of any syn preds used within this context */
-	public void trackUseOfSyntacticPredicates(Grammar g) {
-	}
-
-	public static class Predicate extends SemanticContext {
-		/** The AST node in tree created from the grammar holding the predicate */
-		public GrammarAST predicateAST;
-
-		/** Is this a {...}?=> gating predicate or a normal disambiguating {..}?
-		 *  If any predicate in expression is gated, then expression is considered
-		 *  gated.
-		 *
-		 *  The simple Predicate object's predicate AST's type is used to set
-		 *  gated to true if type==GATED_SEMPRED.
-		 */
-		protected boolean gated = false;
-
-		/** syntactic predicates are converted to semantic predicates
-		 *  but synpreds are generated slightly differently.
-		 */
-		protected boolean synpred = false;
-
-		public static final int INVALID_PRED_VALUE = -2;
-		public static final int FALSE_PRED = 0;
-		public static final int TRUE_PRED = ~0;
-
-		/** sometimes predicates are known to be true or false; we need
-		 *  a way to represent this without resorting to a target language
-		 *  value like true or TRUE.
-		 */
-		protected int constantValue = INVALID_PRED_VALUE;
-
-		public Predicate(int constantValue) {
-			predicateAST = new GrammarAST();
-			this.constantValue=constantValue;
-		}
-
-		public Predicate(GrammarAST predicate) {
-			this.predicateAST = predicate;
-			this.gated =
-				predicate.getType()==ANTLRParser.GATED_SEMPRED ||
-				predicate.getType()==ANTLRParser.SYN_SEMPRED ;
-			this.synpred =
-				predicate.getType()==ANTLRParser.SYN_SEMPRED ||
-				predicate.getType()==ANTLRParser.BACKTRACK_SEMPRED;
-		}
-
-		public Predicate(Predicate p) {
-			this.predicateAST = p.predicateAST;
-			this.gated = p.gated;
-			this.synpred = p.synpred;
-			this.constantValue = p.constantValue;
-		}
-
-		/** Two predicates are the same if they are literally the same
-		 *  text rather than same node in the grammar's AST.
-		 *  Or, if they have the same constant value, return equal.
-		 *  As of July 2006 I'm not sure these are needed.
-		 */
-		public boolean equals(Object o) {
-			if ( !(o instanceof Predicate) ) {
-				return false;
-			}
-
-			Predicate other = (Predicate)o;
-			if (this.constantValue != other.constantValue){
-				return false;
-			}
-
-			if (this.constantValue != INVALID_PRED_VALUE){
-				return true;
-			}
-
-			return predicateAST.getText().equals(other.predicateAST.getText());
-		}
-
-		public int hashCode() {
-			if (constantValue != INVALID_PRED_VALUE){
-				return constantValue;
-			}
-
-			if ( predicateAST ==null ) {
-				return 0;
-			}
-
-			return predicateAST.getText().hashCode();
-		}
-
-		public ST genExpr(CodeGenerator generator,
-									  STGroup templates,
-									  DFA dfa)
-		{
-			ST eST = null;
-			if ( templates!=null ) {
-				if ( synpred ) {
-					eST = templates.getInstanceOf("evalSynPredicate");
-				}
-				else {
-					eST = templates.getInstanceOf("evalPredicate");
-					generator.grammar.decisionsWhoseDFAsUsesSemPreds.add(dfa);
-				}
-				String predEnclosingRuleName = predicateAST.enclosingRuleName;
-				/*
-				String decisionEnclosingRuleName =
-					dfa.getNFADecisionStartState().getEnclosingRule();
-				// if these rulenames are diff, then pred was hoisted out of rule
-				// Currently I don't warn you about this as it could be annoying.
-				// I do the translation anyway.
-				*/
-				//eST.add("pred", this.toString());
-				if ( generator!=null ) {
-					eST.add("pred",
-									 generator.translateAction(predEnclosingRuleName,predicateAST));
-				}
-			}
-			else {
-				eST = new ST("<pred>");
-				eST.add("pred", this.toString());
-				return eST;
-			}
-			if ( generator!=null ) {
-				String description =
-					generator.target.getTargetStringLiteralFromString(this.toString());
-				eST.add("description", description);
-			}
-			return eST;
-		}
-
-		@Override
-		public SemanticContext getGatedPredicateContext() {
-			if ( gated ) {
-				return this;
-			}
-			return null;
-		}
-
-		@Override
-		public boolean hasUserSemanticPredicate() { // user-specified sempred
-			return predicateAST !=null &&
-				   ( predicateAST.getType()==ANTLRParser.GATED_SEMPRED ||
-					 predicateAST.getType()==ANTLRParser.SEMPRED );
-		}
-
-		@Override
-		public boolean isSyntacticPredicate() {
-			return predicateAST !=null &&
-				( predicateAST.getType()==ANTLRParser.SYN_SEMPRED ||
-				  predicateAST.getType()==ANTLRParser.BACKTRACK_SEMPRED );
-		}
-
-		@Override
-		public void trackUseOfSyntacticPredicates(Grammar g) {
-			if ( synpred ) {
-				g.synPredNamesUsedInDFA.add(predicateAST.getText());
-			}
-		}
-
-		@Override
-		public String toString() {
-			if ( predicateAST ==null ) {
-				return "<nopred>";
-			}
-			return predicateAST.getText();
-		}
-	}
-
-	public static class TruePredicate extends Predicate {
-		public TruePredicate() {
-			super(TRUE_PRED);
-		}
-
-		@Override
-		public ST genExpr(CodeGenerator generator,
-									  STGroup templates,
-									  DFA dfa)
-		{
-			if ( templates!=null ) {
-				return templates.getInstanceOf("true_value");
-			}
-			return new ST("true");
-		}
-
-		@Override
-		public boolean hasUserSemanticPredicate() {
-			return false; // not user specified.
-		}
-
-		@Override
-		public String toString() {
-			return "true"; // not used for code gen, just DOT and print outs
-		}
-	}
-
-	public static class FalsePredicate extends Predicate {
-		public FalsePredicate() {
-			super(FALSE_PRED);
-		}
-
-		@Override
-		public ST genExpr(CodeGenerator generator,
-									  STGroup templates,
-									  DFA dfa)
-		{
-			if ( templates!=null ) {
-				return templates.getInstanceOf("false");
-			}
-			return new ST("false");
-		}
-
-		@Override
-		public boolean hasUserSemanticPredicate() {
-			return false; // not user specified.
-		}
-
-		@Override
-		public String toString() {
-			return "false"; // not used for code gen, just DOT and print outs
-		}
-	}
-
-	public static abstract class CommutativePredicate extends SemanticContext {
-		protected final Set<SemanticContext> operands = new HashSet<SemanticContext>();
-		protected int hashcode;
-
-		public CommutativePredicate(SemanticContext a, SemanticContext b) {
-			if (a.getClass() == this.getClass()){
-				CommutativePredicate predicate = (CommutativePredicate)a;
-				operands.addAll(predicate.operands);
-			} else {
-				operands.add(a);
-			}
-
-			if (b.getClass() == this.getClass()){
-				CommutativePredicate predicate = (CommutativePredicate)b;
-				operands.addAll(predicate.operands);
-			} else {
-				operands.add(b);
-			}
-
-			hashcode = calculateHashCode();
-		}
-
-		public CommutativePredicate(HashSet<SemanticContext> contexts){
-			for (SemanticContext context : contexts){
-				if (context.getClass() == this.getClass()){
-					CommutativePredicate predicate = (CommutativePredicate)context;
-					operands.addAll(predicate.operands);
-				} else {
-					operands.add(context);
-				}
-			}
-
-			hashcode = calculateHashCode();
-		}
-
-		@Override
-		public SemanticContext getGatedPredicateContext() {
-			SemanticContext result = null;
-			for (SemanticContext semctx : operands) {
-				SemanticContext gatedPred = semctx.getGatedPredicateContext();
-				if ( gatedPred!=null ) {
-					result = combinePredicates(result, gatedPred);
-				}
-			}
-			return result;
-		}
-
-		@Override
-		public boolean hasUserSemanticPredicate() {
-			for (SemanticContext semctx : operands) {
-				if ( semctx.hasUserSemanticPredicate() ) {
-					return true;
-				}
-			}
-			return false;
-		}
-
-		@Override
-		public boolean isSyntacticPredicate() {
-			for (SemanticContext semctx : operands) {
-				if ( semctx.isSyntacticPredicate() ) {
-					return true;
-				}
-			}
-			return false;
-		}
-
-		@Override
-		public void trackUseOfSyntacticPredicates(Grammar g) {
-			for (SemanticContext semctx : operands) {
-				semctx.trackUseOfSyntacticPredicates(g);
-			}
-		}
-
-		@Override
-		public boolean equals(Object obj) {
-			if (this == obj)
-				return true;
-
-			if (obj.getClass() == this.getClass()) {
-				CommutativePredicate commutative = (CommutativePredicate)obj;
-				Set<SemanticContext> otherOperands = commutative.operands;
-				if (operands.size() != otherOperands.size())
-					return false;
-
-				return operands.containsAll(otherOperands);
-			}
-
-			if (obj instanceof NOT)
-			{
-				NOT not = (NOT)obj;
-				if (not.ctx instanceof CommutativePredicate && not.ctx.getClass() != this.getClass()) {
-					Set<SemanticContext> otherOperands = ((CommutativePredicate)not.ctx).operands;
-					if (operands.size() != otherOperands.size())
-						return false;
-
-					ArrayList<SemanticContext> temp = new ArrayList<SemanticContext>(operands.size());
-					for (SemanticContext context : otherOperands) {
-						temp.add(not(context));
-					}
-
-					return operands.containsAll(temp);
-				}
-			}
-
-			return false;
-		}
-
-		@Override
-		public int hashCode(){
-			return hashcode;
-		}
-
-		@Override
-		public String toString() {
-			StringBuffer buf = new StringBuffer();
-			buf.append("(");
-			int i = 0;
-			for (SemanticContext semctx : operands) {
-				if ( i>0 ) {
-					buf.append(getOperandString());
-				}
-				buf.append(semctx.toString());
-				i++;
-			}
-			buf.append(")");
-			return buf.toString();
-		}
-
-		public abstract String getOperandString();
-
-		public abstract SemanticContext combinePredicates(SemanticContext left, SemanticContext right);
-
-		public abstract int calculateHashCode();
-	}
-
-	public static class AND extends CommutativePredicate {
-		public AND(SemanticContext a, SemanticContext b) {
-			super(a,b);
-		}
-
-		public AND(HashSet<SemanticContext> contexts) {
-			super(contexts);
-		}
-
-		@Override
-		public ST genExpr(CodeGenerator generator,
-									  STGroup templates,
-									  DFA dfa)
-		{
-			ST result = null;
-			for (SemanticContext operand : operands) {
-				if (result == null)
-					result = operand.genExpr(generator, templates, dfa);
-
-				ST eST = null;
-				if ( templates!=null ) {
-					eST = templates.getInstanceOf("andPredicates");
-				}
-				else {
-					eST = new ST("(<left>&&<right>)");
-				}
-				eST.add("left", result);
-				eST.add("right", operand.genExpr(generator,templates,dfa));
-				result = eST;
-			}
-
-			return result;
-		}
-
-		@Override
-		public String getOperandString() {
-			return "&&";
-		}
-
-		@Override
-		public SemanticContext combinePredicates(SemanticContext left, SemanticContext right) {
-			return SemanticContext.and(left, right);
-		}
-
-		@Override
-		public int calculateHashCode() {
-			int hashcode = 0;
-			for (SemanticContext context : operands) {
-				hashcode = hashcode ^ context.hashCode();
-			}
-
-			return hashcode;
-		}
-	}
-
-	public static class OR extends CommutativePredicate {
-		public OR(SemanticContext a, SemanticContext b) {
-			super(a,b);
-		}
-
-		public OR(HashSet<SemanticContext> contexts) {
-			super(contexts);
-		}
-
-		@Override
-		public ST genExpr(CodeGenerator generator,
-									  STGroup templates,
-									  DFA dfa)
-		{
-			ST eST = null;
-			if ( templates!=null ) {
-				eST = templates.getInstanceOf("orPredicates");
-			}
-			else {
-				eST = new ST("(<first(operands)><rest(operands):{o | ||<o>}>)");
-			}
-			for (SemanticContext semctx : operands) {
-				eST.add("operands", semctx.genExpr(generator,templates,dfa));
-			}
-			return eST;
-		}
-
-		@Override
-		public String getOperandString() {
-			return "||";
-		}
-
-		@Override
-		public SemanticContext combinePredicates(SemanticContext left, SemanticContext right) {
-			return SemanticContext.or(left, right);
-		}
-
-		@Override
-		public int calculateHashCode() {
-			int hashcode = 0;
-			for (SemanticContext context : operands) {
-				hashcode = ~hashcode ^ context.hashCode();
-			}
-
-			return hashcode;
-		}
-	}
-
-	public static class NOT extends SemanticContext {
-		protected SemanticContext ctx;
-		public NOT(SemanticContext ctx) {
-			this.ctx = ctx;
-		}
-
-		@Override
-		public ST genExpr(CodeGenerator generator,
-									  STGroup templates,
-									  DFA dfa)
-		{
-			ST eST = null;
-			if ( templates!=null ) {
-				eST = templates.getInstanceOf("notPredicate");
-			}
-			else {
-				eST = new ST("!(<pred>)");
-			}
-			eST.add("pred", ctx.genExpr(generator,templates,dfa));
-			return eST;
-		}
-
-		@Override
-		public SemanticContext getGatedPredicateContext() {
-			SemanticContext p = ctx.getGatedPredicateContext();
-			if ( p==null ) {
-				return null;
-			}
-			return new NOT(p);
-		}
-
-		@Override
-		public boolean hasUserSemanticPredicate() {
-			return ctx.hasUserSemanticPredicate();
-		}
-
-		@Override
-		public boolean isSyntacticPredicate() {
-			return ctx.isSyntacticPredicate();
-		}
-
-		@Override
-		public void trackUseOfSyntacticPredicates(Grammar g) {
-			ctx.trackUseOfSyntacticPredicates(g);
-		}
-
-		@Override
-		public boolean equals(Object object) {
-			if ( !(object instanceof NOT) ) {
-				return false;
-			}
-			return this.ctx.equals(((NOT)object).ctx);
-		}
-
-		@Override
-		public int hashCode() {
-			return ~ctx.hashCode();
-		}
-
-		@Override
-		public String toString() {
-			return "!("+ctx+")";
-		}
-	}
-
-	public static SemanticContext and(SemanticContext a, SemanticContext b) {
-		//System.out.println("AND: "+a+"&&"+b);
-		SemanticContext[] terms = factorOr(a, b);
-		SemanticContext commonTerms = terms[0];
-		a = terms[1];
-		b = terms[2];
-
-		boolean factored = commonTerms != null && commonTerms != EMPTY_SEMANTIC_CONTEXT && !(commonTerms instanceof TruePredicate);
-		if (factored) {
-			return or(commonTerms, and(a, b));
-		}
-		
-		//System.Console.Out.WriteLine( "AND: " + a + "&&" + b );
-		if (a instanceof FalsePredicate || b instanceof FalsePredicate)
-			return new FalsePredicate();
-
-		if ( a==EMPTY_SEMANTIC_CONTEXT || a==null ) {
-			return b;
-		}
-		if ( b==EMPTY_SEMANTIC_CONTEXT || b==null ) {
-			return a;
-		}
-
-		if (a instanceof TruePredicate)
-			return b;
-
-		if (b instanceof TruePredicate)
-			return a;
-
-		//// Factoring takes care of this case
-		//if (a.Equals(b))
-		//    return a;
-
-		//System.out.println("## have to AND");
-		return new AND(a,b);
-	}
-
-	public static SemanticContext or(SemanticContext a, SemanticContext b) {
-		//System.out.println("OR: "+a+"||"+b);
-		SemanticContext[] terms = factorAnd(a, b);
-		SemanticContext commonTerms = terms[0];
-		a = terms[1];
-		b = terms[2];
-		boolean factored = commonTerms != null && commonTerms != EMPTY_SEMANTIC_CONTEXT && !(commonTerms instanceof FalsePredicate);
-		if (factored) {
-			return and(commonTerms, or(a, b));
-		}
-
-		if ( a==EMPTY_SEMANTIC_CONTEXT || a==null || a instanceof FalsePredicate ) {
-			return b;
-		}
-
-		if ( b==EMPTY_SEMANTIC_CONTEXT || b==null || b instanceof FalsePredicate ) {
-			return a;
-		}
-
-		if ( a instanceof TruePredicate || b instanceof TruePredicate || commonTerms instanceof TruePredicate ) {
-			return new TruePredicate();
-		}
-
-		//// Factoring takes care of this case
-		//if (a.equals(b))
-		//    return a;
-
-		if ( a instanceof NOT ) {
-			NOT n = (NOT)a;
-			// check for !p||p
-			if ( n.ctx.equals(b) ) {
-				return new TruePredicate();
-			}
-		}
-		else if ( b instanceof NOT ) {
-			NOT n = (NOT)b;
-			// check for p||!p
-			if ( n.ctx.equals(a) ) {
-				return new TruePredicate();
-			}
-		}
-
-		//System.out.println("## have to OR");
-		OR result = new OR(a,b);
-		if (result.operands.size() == 1)
-			return result.operands.iterator().next();
-
-		return result;
-	}
-
-	public static SemanticContext not(SemanticContext a) {
-		if (a instanceof NOT) {
-			return ((NOT)a).ctx;
-		}
-
-		if (a instanceof TruePredicate)
-			return new FalsePredicate();
-		else if (a instanceof FalsePredicate)
-			return new TruePredicate();
-
-		return new NOT(a);
-	}
-
-	// Factor so (a && b) == (result && a && b)
-	public static SemanticContext[] factorAnd(SemanticContext a, SemanticContext b)
-	{
-		if (a == EMPTY_SEMANTIC_CONTEXT || a == null || a instanceof FalsePredicate)
-			return new SemanticContext[] { EMPTY_SEMANTIC_CONTEXT, a, b };
-		if (b == EMPTY_SEMANTIC_CONTEXT || b == null || b instanceof FalsePredicate)
-			return new SemanticContext[] { EMPTY_SEMANTIC_CONTEXT, a, b };
-
-		if (a instanceof TruePredicate || b instanceof TruePredicate)
-		{
-			return new SemanticContext[] { new TruePredicate(), EMPTY_SEMANTIC_CONTEXT, EMPTY_SEMANTIC_CONTEXT };
-		}
-
-		HashSet<SemanticContext> opsA = new HashSet<SemanticContext>(getAndOperands(a));
-		HashSet<SemanticContext> opsB = new HashSet<SemanticContext>(getAndOperands(b));
-
-		HashSet<SemanticContext> result = new HashSet<SemanticContext>(opsA);
-		result.retainAll(opsB);
-		if (result.size() == 0)
-			return new SemanticContext[] { EMPTY_SEMANTIC_CONTEXT, a, b };
-
-		opsA.removeAll(result);
-		if (opsA.size() == 0)
-			a = new TruePredicate();
-		else if (opsA.size() == 1)
-			a = opsA.iterator().next();
-		else
-			a = new AND(opsA);
-
-		opsB.removeAll(result);
-		if (opsB.size() == 0)
-			b = new TruePredicate();
-		else if (opsB.size() == 1)
-			b = opsB.iterator().next();
-		else
-			b = new AND(opsB);
-
-		if (result.size() == 1)
-			return new SemanticContext[] { result.iterator().next(), a, b };
-
-		return new SemanticContext[] { new AND(result), a, b };
-	}
-
-	// Factor so (a || b) == (result || a || b)
-	public static SemanticContext[] factorOr(SemanticContext a, SemanticContext b)
-	{
-		HashSet<SemanticContext> opsA = new HashSet<SemanticContext>(getOrOperands(a));
-		HashSet<SemanticContext> opsB = new HashSet<SemanticContext>(getOrOperands(b));
-
-		HashSet<SemanticContext> result = new HashSet<SemanticContext>(opsA);
-		result.retainAll(opsB);
-		if (result.size() == 0)
-			return new SemanticContext[] { EMPTY_SEMANTIC_CONTEXT, a, b };
-
-		opsA.removeAll(result);
-		if (opsA.size() == 0)
-			a = new FalsePredicate();
-		else if (opsA.size() == 1)
-			a = opsA.iterator().next();
-		else
-			a = new OR(opsA);
-
-		opsB.removeAll(result);
-		if (opsB.size() == 0)
-			b = new FalsePredicate();
-		else if (opsB.size() == 1)
-			b = opsB.iterator().next();
-		else
-			b = new OR(opsB);
-
-		if (result.size() == 1)
-			return new SemanticContext[] { result.iterator().next(), a, b };
-
-		return new SemanticContext[] { new OR(result), a, b };
-	}
-
-	public static Collection<SemanticContext> getAndOperands(SemanticContext context)
-	{
-		if (context instanceof AND)
-			return ((AND)context).operands;
-
-		if (context instanceof NOT) {
-			Collection<SemanticContext> operands = getOrOperands(((NOT)context).ctx);
-			List<SemanticContext> result = new ArrayList<SemanticContext>(operands.size());
-			for (SemanticContext operand : operands) {
-				result.add(not(operand));
-			}
-			return result;
-		}
-
-		ArrayList<SemanticContext> result = new ArrayList<SemanticContext>();
-		result.add(context);
-		return result;
-	}
-
-	public static Collection<SemanticContext> getOrOperands(SemanticContext context)
-	{
-		if (context instanceof OR)
-			return ((OR)context).operands;
-
-		if (context instanceof NOT) {
-			Collection<SemanticContext> operands = getAndOperands(((NOT)context).ctx);
-			List<SemanticContext> result = new ArrayList<SemanticContext>(operands.size());
-			for (SemanticContext operand : operands) {
-				result.add(not(operand));
-			}
-			return result;
-		}
-
-		ArrayList<SemanticContext> result = new ArrayList<SemanticContext>();
-		result.add(context);
-		return result;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/Transition.java b/antlr-3.4/tool/src/main/java/org/antlr/analysis/Transition.java
deleted file mode 100644
index 44df53e..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/analysis/Transition.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.analysis;
-
-/** A generic transition between any two state machine states.  It defines
- *  some special labels that indicate things like epsilon transitions and
- *  that the label is actually a set of labels or a semantic predicate.
- *  This is a one way link.  It emanates from a state (usually via a list of
- *  transitions) and has a label/target pair.  I have abstracted the notion
- *  of a Label to handle the various kinds of things it can be.
- */
-public class Transition implements Comparable {
-    /** What label must be consumed to transition to target */
-    public Label label;
-
-    /** The target of this transition */
-    public State target;
-
-    public Transition(Label label, State target) {
-        this.label = label;
-        this.target = target;
-    }
-
-    public Transition(int label, State target) {
-        this.label = new Label(label);
-        this.target = target;
-    }
-
-	public boolean isEpsilon() {
-		return label.isEpsilon();
-	}
-
-	public boolean isAction() {
-		return label.isAction();
-	}
-
-    public boolean isSemanticPredicate() {
-        return label.isSemanticPredicate();
-    }
-
-    public int hashCode() {
-        return label.hashCode() + target.stateNumber;
-    }
-
-    public boolean equals(Object o) {
-        Transition other = (Transition)o;
-        return this.label.equals(other.label) &&
-               this.target.equals(other.target);
-    }
-
-    public int compareTo(Object o) {
-        Transition other = (Transition)o;
-        return this.label.compareTo(other.label);
-    }
-
-    public String toString() {
-        return label+"->"+target.stateNumber;
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/ACyclicDFACodeGenerator.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/ACyclicDFACodeGenerator.java
deleted file mode 100644
index 6bc5fd3..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/ACyclicDFACodeGenerator.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.codegen;
-
-import org.antlr.analysis.*;
-import org.antlr.misc.Utils;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STGroup;
-
-import java.util.List;
-
-public class ACyclicDFACodeGenerator {
-	protected CodeGenerator parentGenerator;
-
-	public ACyclicDFACodeGenerator(CodeGenerator parent) {
-		this.parentGenerator = parent;
-	}
-
-	public ST genFixedLookaheadDecision(STGroup templates,
-													DFA dfa)
-	{
-		return walkFixedDFAGeneratingStateMachine(templates, dfa, dfa.startState, 1);
-	}
-
-	protected ST walkFixedDFAGeneratingStateMachine(
-			STGroup templates,
-			DFA dfa,
-			DFAState s,
-			int k)
-	{
-		//System.out.println("walk "+s.stateNumber+" in dfa for decision "+dfa.decisionNumber);
-		if ( s.isAcceptState() ) {
-			ST dfaST = templates.getInstanceOf("dfaAcceptState");
-			dfaST.add("alt", Utils.integer(s.getUniquelyPredictedAlt()));
-			return dfaST;
-		}
-
-		// the default templates for generating a state and its edges
-		// can be an if-then-else structure or a switch
-		String dfaStateName = "dfaState";
-		String dfaLoopbackStateName = "dfaLoopbackState";
-		String dfaOptionalBlockStateName = "dfaOptionalBlockState";
-		String dfaEdgeName = "dfaEdge";
-		if ( parentGenerator.canGenerateSwitch(s) ) {
-			dfaStateName = "dfaStateSwitch";
-			dfaLoopbackStateName = "dfaLoopbackStateSwitch";
-			dfaOptionalBlockStateName = "dfaOptionalBlockStateSwitch";
-			dfaEdgeName = "dfaEdgeSwitch";
-		}
-
-		ST dfaST = templates.getInstanceOf(dfaStateName);
-		if ( dfa.getNFADecisionStartState().decisionStateType==NFAState.LOOPBACK ) {
-			dfaST = templates.getInstanceOf(dfaLoopbackStateName);
-		}
-		else if ( dfa.getNFADecisionStartState().decisionStateType==NFAState.OPTIONAL_BLOCK_START ) {
-			dfaST = templates.getInstanceOf(dfaOptionalBlockStateName);
-		}
-		dfaST.add("k", Utils.integer(k));
-		dfaST.add("stateNumber", Utils.integer(s.stateNumber));
-		dfaST.add("semPredState",
-						   Boolean.valueOf(s.isResolvedWithPredicates()));
-		/*
-		String description = dfa.getNFADecisionStartState().getDescription();
-		description = parentGenerator.target.getTargetStringLiteralFromString(description);
-		//System.out.println("DFA: "+description+" associated with AST "+dfa.getNFADecisionStartState());
-		if ( description!=null ) {
-			dfaST.add("description", description);
-		}
-		*/
-		int EOTPredicts = NFA.INVALID_ALT_NUMBER;
-		DFAState EOTTarget = null;
-		//System.out.println("DFA state "+s.stateNumber);
-		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
-			Transition edge = (Transition) s.transition(i);
-			//System.out.println("edge "+s.stateNumber+"-"+edge.label.toString()+"->"+edge.target.stateNumber);
-			if ( edge.label.getAtom()==Label.EOT ) {
-				// don't generate a real edge for EOT; track alt EOT predicts
-				// generate that prediction in the else clause as default case
-				EOTTarget = (DFAState)edge.target;
-				EOTPredicts = EOTTarget.getUniquelyPredictedAlt();
-				/*
-				System.out.println("DFA s"+s.stateNumber+" EOT goes to s"+
-								   edge.target.stateNumber+" predicates alt "+
-								   EOTPredicts);
-				*/
-				continue;
-			}
-			ST edgeST = templates.getInstanceOf(dfaEdgeName);
-			// If the template wants all the label values delineated, do that
-			if ( edgeST.impl.formalArguments.get("labels")!=null ) {
-				List labels = edge.label.getSet().toList();
-				for (int j = 0; j < labels.size(); j++) {
-					Integer vI = (Integer) labels.get(j);
-					String label =
-						parentGenerator.getTokenTypeAsTargetLabel(vI.intValue());
-					labels.set(j, label); // rewrite List element to be name
-				}
-				edgeST.add("labels", labels);
-			}
-			else { // else create an expression to evaluate (the general case)
-				edgeST.add("labelExpr",
-									parentGenerator.genLabelExpr(templates,edge,k));
-			}
-
-			// stick in any gated predicates for any edge if not already a pred
-			if ( !edge.label.isSemanticPredicate() ) {
-				DFAState target = (DFAState)edge.target;
-				SemanticContext preds =
-					target.getGatedPredicatesInNFAConfigurations();
-				if ( preds!=null ) {
-					//System.out.println("preds="+target.getGatedPredicatesInNFAConfigurations());
-					ST predST = preds.genExpr(parentGenerator,
-														  parentGenerator.getTemplates(),
-														  dfa);
-					edgeST.add("predicates", predST);
-				}
-			}
-
-			ST targetST =
-				walkFixedDFAGeneratingStateMachine(templates,
-												   dfa,
-												   (DFAState)edge.target,
-												   k+1);
-			edgeST.add("targetState", targetST);
-			dfaST.add("edges", edgeST);
-			/*
-			System.out.println("back to DFA "+
-							   dfa.decisionNumber+"."+s.stateNumber);
-							   */
-		}
-
-		// HANDLE EOT EDGE
-		if ( EOTPredicts!=NFA.INVALID_ALT_NUMBER ) {
-			// EOT unique predicts an alt
-			dfaST.add("eotPredictsAlt", Utils.integer(EOTPredicts));
-		}
-		else if ( EOTTarget!=null && EOTTarget.getNumberOfTransitions()>0 ) {
-			// EOT state has transitions so must split on predicates.
-			// Generate predicate else-if clauses and then generate
-			// NoViableAlt exception as else clause.
-			// Note: these predicates emanate from the EOT target state
-			// rather than the current DFAState s so the error message
-			// might be slightly misleading if you are looking at the
-			// state number.  Predicates emanating from EOT targets are
-			// hoisted up to the state that has the EOT edge.
-			for (int i = 0; i < EOTTarget.getNumberOfTransitions(); i++) {
-				Transition predEdge = (Transition)EOTTarget.transition(i);
-				ST edgeST = templates.getInstanceOf(dfaEdgeName);
-				edgeST.add("labelExpr",
-									parentGenerator.genSemanticPredicateExpr(templates,predEdge));
-				// the target must be an accept state
-				//System.out.println("EOT edge");
-				ST targetST =
-					walkFixedDFAGeneratingStateMachine(templates,
-													   dfa,
-													   (DFAState)predEdge.target,
-													   k+1);
-				edgeST.add("targetState", targetST);
-				dfaST.add("edges", edgeST);
-			}
-		}
-		return dfaST;
-	}
-}
-
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/ActionScriptTarget.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/ActionScriptTarget.java
deleted file mode 100644
index 4e2943b..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/ActionScriptTarget.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.stringtemplate.v4.ST;
-import org.antlr.tool.Grammar;
-
-public class ActionScriptTarget extends Target {
-
-    public String getTargetCharLiteralFromANTLRCharLiteral(
-            CodeGenerator generator,
-            String literal) {
-
-        int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
-        return String.valueOf(c);
-    }
-
-    public String getTokenTypeAsTargetLabel(CodeGenerator generator,
-                                            int ttype) {
-        // use ints for predefined types;
-        // <invalid> <EOR> <DOWN> <UP>
-        if (ttype >= 0 && ttype <= 3) {
-            return String.valueOf(ttype);
-        }
-
-        String name = generator.grammar.getTokenDisplayName(ttype);
-
-        // If name is a literal, return the token type instead
-        if (name.charAt(0) == '\'') {
-            return String.valueOf(ttype);
-        }
-
-        return name;
-    }
-
-    /**
-     * ActionScript doesn't support Unicode String literals that are considered "illegal"
-     * or are in the surrogate pair ranges.  For example "/uffff" will not encode properly
-     * nor will "/ud800".  To keep things as compact as possible we use the following encoding
-     * if the int is below 255, we encode as hex literal
-     * If the int is between 255 and 0x7fff we use a single unicode literal with the value
-     * If the int is above 0x7fff, we use a unicode literal of 0x80hh, where hh is the high-order
-     * bits followed by \xll where ll is the lower order bits of a 16-bit number.
-     *
-     * Ideally this should be improved at a future date.  The most optimal way to encode this
-     * may be a compressed AMF encoding that is embedded using an Embed tag in ActionScript.
-     *
-     * @param v
-     * @return
-     */
-    public String encodeIntAsCharEscape(int v) {
-        // encode as hex
-        if ( v<=255 ) {
-			return "\\x"+ Integer.toHexString(v|0x100).substring(1,3);
-		}
-        if (v <= 0x7fff) {
-            String hex = Integer.toHexString(v|0x10000).substring(1,5);
-		    return "\\u"+hex;
-        }
-        if (v > 0xffff) {
-            System.err.println("Warning: character literal out of range for ActionScript target " + v);
-            return "";
-        }
-        StringBuffer buf = new StringBuffer("\\u80");
-        buf.append(Integer.toHexString((v >> 8) | 0x100).substring(1, 3)); // high - order bits
-        buf.append("\\x");
-        buf.append(Integer.toHexString((v & 0xff) | 0x100).substring(1, 3)); // low -order bits
-        return buf.toString();
-    }
-
-    /** Convert long to two 32-bit numbers separted by a comma.
-     *  ActionScript does not support 64-bit numbers, so we need to break
-     *  the number into two 32-bit literals to give to the Bit.  A number like
-     *  0xHHHHHHHHLLLLLLLL is broken into the following string:
-     *  "0xLLLLLLLL, 0xHHHHHHHH"
-	 *  Note that the low order bits are first, followed by the high order bits.
-     *  This is to match how the BitSet constructor works, where the bits are
-     *  passed in in 32-bit chunks with low-order bits coming first.
-	 */
-	public String getTarget64BitStringFromValue(long word) {
-		StringBuffer buf = new StringBuffer(22); // enough for the two "0x", "," and " "
-		buf.append("0x");
-        writeHexWithPadding(buf, Integer.toHexString((int)(word & 0x00000000ffffffffL)));
-        buf.append(", 0x");
-        writeHexWithPadding(buf, Integer.toHexString((int)(word >> 32)));
-
-        return buf.toString();
-	}
-
-    private void writeHexWithPadding(StringBuffer buf, String digits) {
-       digits = digits.toUpperCase();
-		int padding = 8 - digits.length();
-		// pad left with zeros
-		for (int i=1; i<=padding; i++) {
-			buf.append('0');
-		}
-		buf.append(digits);
-    }
-
-    protected ST chooseWhereCyclicDFAsGo(Tool tool,
-                                                     CodeGenerator generator,
-                                                     Grammar grammar,
-                                                     ST recognizerST,
-                                                     ST cyclicDFAST) {
-        return recognizerST;
-    }
-}
-
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/CPPTarget.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/CPPTarget.java
deleted file mode 100644
index 22962e0..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/CPPTarget.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STGroup;
-import org.antlr.tool.Grammar;
-
-import java.io.IOException;
-
-public class CPPTarget extends Target {
-	
-	public String escapeChar( int c ) {
-		// System.out.println("CPPTarget.escapeChar("+c+")");
-		switch (c) {
-		case '\n' : return "\\n";
-		case '\t' : return "\\t";
-		case '\r' : return "\\r";
-		case '\\' : return "\\\\";
-		case '\'' : return "\\'";
-		case '"' :  return "\\\"";
-		default :
-			if ( c < ' ' || c > 126 )
-			{
-				if (c > 255)
-				{
-					String s = Integer.toString(c,16);
-					// put leading zeroes in front of the thing..
-					while( s.length() < 4 )
-						s = '0' + s;
-					return "\\u" + s;
-				}
-				else {
-					return "\\" + Integer.toString(c,8);
-				}
-			}
-			else {
-				return String.valueOf((char)c);
-			}
-		}
-	}
-
-	/** Converts a String into a representation that can be use as a literal
-	 * when surrounded by double-quotes.
-	 *
-	 * Used for escaping semantic predicate strings for exceptions.
-	 *
-	 * @param s The String to be changed into a literal
-	 */
-	public String escapeString(String s)
-	{
-		StringBuffer retval = new StringBuffer();
-		for (int i = 0; i < s.length(); i++) {
-			retval.append(escapeChar(s.charAt(i)));
-		}
-
-		return retval.toString();
-	}
-
-	protected void genRecognizerHeaderFile(Tool tool,
-										   CodeGenerator generator,
-										   Grammar grammar,
-										   ST headerFileST,
-										   String extName)
-		throws IOException
-	{
-		generator.write(headerFileST, grammar.name+extName);
-	}
-
-	/** Convert from an ANTLR char literal found in a grammar file to
-	 *  an equivalent char literal in the target language.  For Java, this
-	 *  is the identify translation; i.e., '\n' -> '\n'.  Most languages
-	 *  will be able to use this 1-to-1 mapping.  Expect single quotes
-	 *  around the incoming literal.
-	 *  Depending on the charvocabulary the charliteral should be prefixed with a 'L'
-	 */
-	public String getTargetCharLiteralFromANTLRCharLiteral( CodeGenerator codegen, String literal) {
-		int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
-		String prefix = "'";
-		if( codegen.grammar.getMaxCharValue() > 255 )
-			prefix = "L'";
-		else if( (c & 0x80) != 0 )	// if in char mode prevent sign extensions
-			return ""+c;
-		return prefix+escapeChar(c)+"'";
-	}
-
-	/** Convert from an ANTLR string literal found in a grammar file to
-	 *  an equivalent string literal in the target language.  For Java, this
-	 *  is the identify translation; i.e., "\"\n" -> "\"\n".  Most languages
-	 *  will be able to use this 1-to-1 mapping.  Expect double quotes 
-	 *  around the incoming literal.
-	 *  Depending on the charvocabulary the string should be prefixed with a 'L'
-	 */
-	public String getTargetStringLiteralFromANTLRStringLiteral( CodeGenerator codegen, String literal) {
-		StringBuffer buf = Grammar.getUnescapedStringFromGrammarStringLiteral(literal);
-		String prefix = "\"";
-		if( codegen.grammar.getMaxCharValue() > 255 )
-			prefix = "L\"";
-		return prefix+escapeString(buf.toString())+"\"";
-	}
-	/** Character constants get truncated to this value.
-	 * TODO: This should be derived from the charVocabulary. Depending on it
-	 * being 255 or 0xFFFF the templates should generate normal character
-	 * constants or multibyte ones.
-	 */
-	public int getMaxCharValue( CodeGenerator codegen ) {
-		int maxval = 255; // codegen.grammar.get????();
-		if ( maxval <= 255 )
-			return 255;
-		else
-			return maxval;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/CSharp3Target.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/CSharp3Target.java
deleted file mode 100644
index f5fef65..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/CSharp3Target.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2010 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.antlr.tool.Grammar;
-import org.stringtemplate.v4.AttributeRenderer;
-import org.stringtemplate.v4.ST;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Locale;
-import java.util.Map;
-
-public class CSharp3Target extends Target {
-    private static final HashSet<String> _languageKeywords = new HashSet<String>()
-        {{
-            add("abstract"); add("event"); add("new"); add("struct");
-            add("as"); add("explicit"); add("null"); add("switch");
-            add("base"); add("extern"); add("object"); add("this");
-            add("bool"); add("false"); add("operator"); add("throw");
-            add("break"); add("finally"); add("out"); add("true");
-            add("byte"); add("fixed"); add("override"); add("try");
-            add("case"); add("float"); add("params"); add("typeof");
-            add("catch"); add("for"); add("private"); add("uint");
-            add("char"); add("foreach"); add("protected"); add("ulong");
-            add("checked"); add("goto"); add("public"); add("unchecked");
-            add("class"); add("if"); add("readonly"); add("unsafe");
-            add("const"); add("implicit"); add("ref"); add("ushort");
-            add("continue"); add("in"); add("return"); add("using");
-            add("decimal"); add("int"); add("sbyte"); add("virtual");
-            add("default"); add("interface"); add("sealed"); add("volatile");
-            add("delegate"); add("internal"); add("short"); add("void");
-            add("do"); add("is"); add("sizeof"); add("while");
-            add("double"); add("lock"); add("stackalloc");
-            add("else"); add("long"); add("static");
-            add("enum"); add("namespace"); add("string");
-        }};
-
-    @Override
-    public String encodeIntAsCharEscape(int v) {
-        return "\\x" + Integer.toHexString(v).toUpperCase();
-    }
-
-    @Override
-    public String getTarget64BitStringFromValue(long word) {
-        return "0x" + Long.toHexString(word).toUpperCase();
-    }
-
-    @Override
-    protected void genRecognizerFile(Tool tool, CodeGenerator generator, Grammar grammar, ST outputFileST) throws IOException
-    {
-        if (!grammar.getGrammarIsRoot())
-        {
-            Grammar rootGrammar = grammar.composite.getRootGrammar();
-            String actionScope = grammar.getDefaultActionScope(grammar.type);
-            Map<String, Object> actions = rootGrammar.getActions().get(actionScope);
-            Object rootNamespace = actions != null ? actions.get("namespace") : null;
-            if (actions != null && rootNamespace != null)
-            {
-                actions = grammar.getActions().get(actionScope);
-                if (actions == null)
-                {
-                    actions = new HashMap<String, Object>();
-                    grammar.getActions().put(actionScope, actions);
-                }
-
-                actions.put("namespace", rootNamespace);
-            }
-        }
-
-        generator.getTemplates().registerRenderer(String.class, new StringRenderer(generator, this));
-        super.genRecognizerFile(tool, generator, grammar, outputFileST);
-    }
-
-    public static class StringRenderer implements AttributeRenderer
-    {
-        private final CodeGenerator _generator;
-        private final CSharp3Target _target;
-
-        public StringRenderer(CodeGenerator generator, CSharp3Target target)
-        {
-            _generator = generator;
-            _target = target;
-        }
-
-        public String toString(Object obj, String formatName, Locale locale)
-        {
-            String value = (String)obj;
-            if (value == null || formatName == null)
-                return value;
-
-            if (formatName.equals("id")) {
-                if (_languageKeywords.contains(value))
-                    return "@" + value;
-
-                return value;
-            } else if (formatName.equals("cap")) {
-                return Character.toUpperCase(value.charAt(0)) + value.substring(1);
-            } else if (formatName.equals("string")) {
-                return _target.getTargetStringLiteralFromString(value, true);
-            } else {
-                throw new IllegalArgumentException("Unsupported format name: '" + formatName + "'");
-            }
-        }
-    }
-}
-
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/CTarget.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/CTarget.java
deleted file mode 100644
index 51911c3..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/CTarget.java
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.stringtemplate.v4.ST;
-import org.antlr.tool.Grammar;
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-public class CTarget extends Target {
-
-    ArrayList strings = new ArrayList();
-
-    @Override
-    protected void genRecognizerFile(Tool tool,
-            CodeGenerator generator,
-            Grammar grammar,
-            ST outputFileST)
-            throws IOException {
-
-        // Before we write this, and cause it to generate its string,
-        // we need to add all the string literals that we are going to match
-        //
-        outputFileST.add("literals", strings);
-        String fileName = generator.getRecognizerFileName(grammar.name, grammar.type);
-        generator.write(outputFileST, fileName);
-    }
-
-    @Override
-    protected void genRecognizerHeaderFile(Tool tool,
-            CodeGenerator generator,
-            Grammar grammar,
-            ST headerFileST,
-            String extName)
-            throws IOException {
-        // Pick up the file name we are generating. This method will return a
-        // a file suffixed with .c, so we must substring and add the extName
-        // to it as we cannot assign into strings in Java.
-        ///
-        String fileName = generator.getRecognizerFileName(grammar.name, grammar.type);
-        fileName = fileName.substring(0, fileName.length() - 2) + extName;
-
-        generator.write(headerFileST, fileName);
-    }
-
-    protected ST chooseWhereCyclicDFAsGo(Tool tool,
-            CodeGenerator generator,
-            Grammar grammar,
-            ST recognizerST,
-            ST cyclicDFAST) {
-        return recognizerST;
-    }
-
-    /** Is scope in @scope::name {action} valid for this kind of grammar?
-     *  Targets like C++ may want to allow new scopes like headerfile or
-     *  some such.  The action names themselves are not policed at the
-     *  moment so targets can add template actions w/o having to recompile
-     *  ANTLR.
-     */
-    @Override
-    public boolean isValidActionScope(int grammarType, String scope) {
-        switch (grammarType) {
-            case Grammar.LEXER:
-                if (scope.equals("lexer")) {
-                    return true;
-                }
-                if (scope.equals("header")) {
-                    return true;
-                }
-                if (scope.equals("includes")) {
-                    return true;
-                }
-                if (scope.equals("preincludes")) {
-                    return true;
-                }
-                if (scope.equals("overrides")) {
-                    return true;
-                }
-                break;
-            case Grammar.PARSER:
-                if (scope.equals("parser")) {
-                    return true;
-                }
-                if (scope.equals("header")) {
-                    return true;
-                }
-                if (scope.equals("includes")) {
-                    return true;
-                }
-                if (scope.equals("preincludes")) {
-                    return true;
-                }
-                if (scope.equals("overrides")) {
-                    return true;
-                }
-                break;
-            case Grammar.COMBINED:
-                if (scope.equals("parser")) {
-                    return true;
-                }
-                if (scope.equals("lexer")) {
-                    return true;
-                }
-                if (scope.equals("header")) {
-                    return true;
-                }
-                if (scope.equals("includes")) {
-                    return true;
-                }
-                if (scope.equals("preincludes")) {
-                    return true;
-                }
-                if (scope.equals("overrides")) {
-                    return true;
-                }
-                break;
-            case Grammar.TREE_PARSER:
-                if (scope.equals("treeparser")) {
-                    return true;
-                }
-                if (scope.equals("header")) {
-                    return true;
-                }
-                if (scope.equals("includes")) {
-                    return true;
-                }
-                if (scope.equals("preincludes")) {
-                    return true;
-                }
-                if (scope.equals("overrides")) {
-                    return true;
-                }
-                break;
-        }
-        return false;
-    }
-
-    @Override
-    public String getTargetCharLiteralFromANTLRCharLiteral(
-            CodeGenerator generator,
-            String literal) {
-
-        if (literal.startsWith("'\\u")) {
-            literal = "0x" + literal.substring(3, 7);
-        } else {
-            int c = literal.charAt(1);
-
-            if (c < 32 || c > 127) {
-                literal = "0x" + Integer.toHexString(c);
-            }
-        }
-
-        return literal;
-    }
-
-    /** Convert from an ANTLR string literal found in a grammar file to
-     *  an equivalent string literal in the C target.
-     *  Because we must support Unicode character sets and have chosen
-     *  to have the lexer match UTF32 characters, then we must encode
-     *  string matches to use 32 bit character arrays. Here then we
-     *  must produce the C array and cater for the case where the
-     *  lexer has been encoded with a string such as 'xyz\n',
-     */
-    @Override
-    public String getTargetStringLiteralFromANTLRStringLiteral(
-            CodeGenerator generator,
-            String literal) {
-        int index;
-        String bytes;
-        StringBuffer buf = new StringBuffer();
-
-        buf.append("{ ");
-
-        // We need ot lose any escaped characters of the form \x and just
-        // replace them with their actual values as well as lose the surrounding
-        // quote marks.
-        //
-        for (int i = 1; i < literal.length() - 1; i++) {
-            buf.append("0x");
-
-            if (literal.charAt(i) == '\\') {
-                i++; // Assume that there is a next character, this will just yield
-                // invalid strings if not, which is what the input would be of course - invalid
-                switch (literal.charAt(i)) {
-                    case 'u':
-                    case 'U':
-                        buf.append(literal.substring(i + 1, i + 5));  // Already a hex string
-                        i = i + 5;                                // Move to next string/char/escape
-                        break;
-
-                    case 'n':
-                    case 'N':
-
-                        buf.append("0A");
-                        break;
-
-                    case 'r':
-                    case 'R':
-
-                        buf.append("0D");
-                        break;
-
-                    case 't':
-                    case 'T':
-
-                        buf.append("09");
-                        break;
-
-                    case 'b':
-                    case 'B':
-
-                        buf.append("08");
-                        break;
-
-                    case 'f':
-                    case 'F':
-
-                        buf.append("0C");
-                        break;
-
-                    default:
-
-                        // Anything else is what it is!
-                        //
-                        buf.append(Integer.toHexString((int) literal.charAt(i)).toUpperCase());
-                        break;
-                }
-            } else {
-                buf.append(Integer.toHexString((int) literal.charAt(i)).toUpperCase());
-            }
-            buf.append(", ");
-        }
-        buf.append(" ANTLR3_STRING_TERMINATOR}");
-
-        bytes = buf.toString();
-        index = strings.indexOf(bytes);
-
-        if (index == -1) {
-            strings.add(bytes);
-            index = strings.indexOf(bytes);
-        }
-
-        String strref = "lit_" + String.valueOf(index + 1);
-
-        return strref;
-    }
-
-    /**
-     * Overrides the standard grammar analysis so we can prepare the analyser
-     * a little differently from the other targets.
-     *
-     * In particular we want to influence the way the code generator makes assumptions about
-     * switchs vs ifs, vs table driven DFAs. In general, C code should be generated that
-     * has the minimum use of tables, and tha meximum use of large switch statements. This
-     * allows the optimizers to generate very efficient code, it can reduce object code size
-     * by about 30% and give about a 20% performance improvement over not doing this. Hence,
-     * for the C target only, we change the defaults here, but only if they are still set to the
-     * defaults.
-     *
-     * @param generator An instance of the generic code generator class.
-     * @param grammar The grammar that we are currently analyzing
-     */
-    @Override
-    protected void performGrammarAnalysis(CodeGenerator generator, Grammar grammar) {
-
-        // Check to see if the maximum inline DFA states is still set to
-        // the default size. If it is then whack it all the way up to the maximum that
-        // we can sensibly get away with.
-        //
-        if (CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE == CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE ) {
-
-            CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE = 65535;
-        }
-
-        // Check to see if the maximum switch size is still set to the default
-        // and bring it up much higher if it is. Modern C compilers can handle
-        // much bigger switch statements than say Java can and if anyone finds a compiler
-        // that cannot deal with such big switches, all the need do is generate the
-        // code with a reduced -Xmaxswitchcaselabels nnn
-        //
-        if  (CodeGenerator.MAX_SWITCH_CASE_LABELS == CodeGenerator.MSCL_DEFAULT) {
-
-            CodeGenerator.MAX_SWITCH_CASE_LABELS = 3000;
-        }
-
-        // Check to see if the number of transitions considered a miminum for using
-        // a switch is still at the default. Because a switch is still generally faster than
-        // an if even with small sets, and given that the optimizer will do the best thing with it
-        // anyway, then we simply want to generate a switch for any number of states.
-        //
-        if (CodeGenerator.MIN_SWITCH_ALTS == CodeGenerator.MSA_DEFAULT) {
-
-            CodeGenerator.MIN_SWITCH_ALTS = 1;
-        }
-
-        // Now we allow the superclass implementation to do whatever it feels it
-        // must do.
-        //
-        super.performGrammarAnalysis(generator, grammar);
-    }
-}
-
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/CodeGenerator.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/CodeGenerator.java
deleted file mode 100644
index 916b1fa..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/CodeGenerator.java
+++ /dev/null
@@ -1,1320 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.codegen;
-
-
-import org.antlr.Tool;
-import org.antlr.analysis.DFA;
-import org.antlr.analysis.*;
-import org.antlr.grammar.v3.ANTLRLexer;
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.grammar.v3.ActionTranslator;
-import org.antlr.grammar.v3.CodeGenTreeWalker;
-import org.antlr.misc.BitSet;
-import org.antlr.misc.*;
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.CommonTreeNodeStream;
-import org.antlr.tool.*;
-import org.stringtemplate.v4.*;
-
-import java.io.IOException;
-import java.io.Writer;
-import java.util.*;
-
-/** ANTLR's code generator.
- *
- *  Generate recognizers derived from grammars.  Language independence
- *  achieved through the use of STGroup objects.  All output
- *  strings are completely encapsulated in the group files such as Java.stg.
- *  Some computations are done that are unused by a particular language.
- *  This generator just computes and sets the values into the templates;
- *  the templates are free to use or not use the information.
- *
- *  To make a new code generation target, define X.stg for language X
- *  by copying from existing Y.stg most closely releated to your language;
- *  e.g., to do CSharp.stg copy Java.stg.  The template group file has a
- *  bunch of templates that are needed by the code generator.  You can add
- *  a new target w/o even recompiling ANTLR itself.  The language=X option
- *  in a grammar file dictates which templates get loaded/used.
- *
- *  Some language like C need both parser files and header files.  Java needs
- *  to have a separate file for the cyclic DFA as ANTLR generates bytecodes
- *  directly (which cannot be in the generated parser Java file).  To facilitate
- *  this,
- *
- * cyclic can be in same file, but header, output must be searpate.  recognizer
- *  is in outptufile.
- */
-public class CodeGenerator {
-	/** When generating SWITCH statements, some targets might need to limit
-	 *  the size (based upon the number of case labels).  Generally, this
-	 *  limit will be hit only for lexers where wildcard in a UNICODE
-	 *  vocabulary environment would generate a SWITCH with 65000 labels.
-	 */
-	public final static int MSCL_DEFAULT = 300;
-	public static int MAX_SWITCH_CASE_LABELS = MSCL_DEFAULT;
-	public final static int MSA_DEFAULT = 3;
-	public static int MIN_SWITCH_ALTS = MSA_DEFAULT;
-	public boolean GENERATE_SWITCHES_WHEN_POSSIBLE = true;
-	public static boolean LAUNCH_ST_INSPECTOR = false;
-	public final static int MADSI_DEFAULT = 60; // do lots of states inline (needed for expression rules)
-	public static int MAX_ACYCLIC_DFA_STATES_INLINE = MADSI_DEFAULT;
-
-	public static String classpathTemplateRootDirectoryName =
-		"org/antlr/codegen/templates";
-
-	/** Which grammar are we generating code for?  Each generator
-	 *  is attached to a specific grammar.
-	 */
-	public Grammar grammar;
-
-	/** What language are we generating? */
-	protected String language;
-
-	/** The target specifies how to write out files and do other language
-	 *  specific actions.
-	 */
-	public Target target = null;
-
-	/** Where are the templates this generator should use to generate code? */
-	protected STGroup templates;
-
-	/** The basic output templates without AST or templates stuff; this will be
-	 *  the templates loaded for the language such as Java.stg *and* the Dbg
-	 *  stuff if turned on.  This is used for generating syntactic predicates.
-	 */
-	protected STGroup baseTemplates;
-
-	protected ST recognizerST;
-	protected ST outputFileST;
-	protected ST headerFileST;
-
-	/** Used to create unique labels */
-	protected int uniqueLabelNumber = 1;
-
-	/** A reference to the ANTLR tool so we can learn about output directories
-	 *  and such.
-	 */
-	protected Tool tool;
-
-	/** Generate debugging event method calls */
-	protected boolean debug;
-
-	/** Create a Tracer object and make the recognizer invoke this. */
-	protected boolean trace;
-
-	/** Track runtime parsing information about decisions etc...
-	 *  This requires the debugging event mechanism to work.
-	 */
-	protected boolean profile;
-
-	protected int lineWidth = 72;
-
-	/** I have factored out the generation of acyclic DFAs to separate class */
-	public ACyclicDFACodeGenerator acyclicDFAGenerator =
-		new ACyclicDFACodeGenerator(this);
-
-	/** I have factored out the generation of cyclic DFAs to separate class */
-	/*
-	public CyclicDFACodeGenerator cyclicDFAGenerator =
-		new CyclicDFACodeGenerator(this);
-		*/
-
-	public static final String VOCAB_FILE_EXTENSION = ".tokens";
-	protected final static String vocabFilePattern =
-		"<tokens:{it|<it.name>=<it.type>\n}>" +
-		"<literals:{it|<it.name>=<it.type>\n}>";
-
-	public CodeGenerator(Tool tool, Grammar grammar, String language) {
-		this.tool = tool;
-		this.grammar = grammar;
-		this.language = language;
-		target = loadLanguageTarget(language);
-	}
-
-	public static Target loadLanguageTarget(String language) {
-		Target target = null;
-		String targetName = "org.antlr.codegen."+language+"Target";
-		try {
-			Class c = Class.forName(targetName);
-			target = (Target)c.newInstance();
-		}
-		catch (ClassNotFoundException cnfe) {
-			target = new Target(); // use default
-		}
-		catch (InstantiationException ie) {
-			ErrorManager.error(ErrorManager.MSG_CANNOT_CREATE_TARGET_GENERATOR,
-							   targetName,
-							   ie);
-		}
-		catch (IllegalAccessException cnfe) {
-			ErrorManager.error(ErrorManager.MSG_CANNOT_CREATE_TARGET_GENERATOR,
-							   targetName,
-							   cnfe);
-		}
-		return target;
-	}
-
-	/** load the main language.stg template group file */
-	public void loadTemplates(String language) {
-		String langDir = classpathTemplateRootDirectoryName+"/"+language;
-		STGroup coreTemplates = new STGroupFile(langDir+"/"+language+".stg");
-
-		baseTemplates = coreTemplates;
-		if ( coreTemplates ==null ) {
-			ErrorManager.error(ErrorManager.MSG_MISSING_CODE_GEN_TEMPLATES,
-							   language);
-			return;
-		}
-
-		// dynamically add subgroups that act like filters to apply to
-		// their supergroup.  E.g., Java:Dbg:AST:ASTParser::ASTDbg.
-		String outputOption = (String)grammar.getOption("output");
-		if ( outputOption!=null && outputOption.equals("AST") ) {
-			if ( debug && grammar.type!=Grammar.LEXER ) {
-				STGroup dbgTemplates = new STGroupFile(langDir+"/Dbg.stg");
-				dbgTemplates.importTemplates(coreTemplates);
-				baseTemplates = dbgTemplates;
-				STGroup astTemplates = new STGroupFile(langDir+"/AST.stg");
-				astTemplates.importTemplates(dbgTemplates);
-				STGroup astParserTemplates = astTemplates;
-				if ( grammar.type==Grammar.TREE_PARSER ) {
-					astParserTemplates = new STGroupFile(langDir+"/ASTTreeParser.stg");
-					astParserTemplates.importTemplates(astTemplates);
-				}
-				else {
-					astParserTemplates = new STGroupFile(langDir+"/ASTParser.stg");
-					astParserTemplates.importTemplates(astTemplates);
-				}
-				STGroup astDbgTemplates = new STGroupFile(langDir+"/ASTDbg.stg");
-				astDbgTemplates.importTemplates(astParserTemplates);
-				templates = astDbgTemplates;
-				dbgTemplates.iterateAcrossValues = true; // ST v3 compatibility with Maps
-				astDbgTemplates.iterateAcrossValues = true;
-				astParserTemplates.iterateAcrossValues = true;
-			}
-			else {
-				STGroup astTemplates = new STGroupFile(langDir+"/AST.stg");
-				astTemplates.importTemplates(coreTemplates);
-				STGroup astParserTemplates = astTemplates;
-				if ( grammar.type==Grammar.TREE_PARSER ) {
-					astParserTemplates = new STGroupFile(langDir+"/ASTTreeParser.stg");
-					astParserTemplates.importTemplates(astTemplates);
-				}
-				else {
-					astParserTemplates = new STGroupFile(langDir+"/ASTParser.stg");
-					astParserTemplates.importTemplates(astTemplates);
-				}
-				templates = astParserTemplates;
-				astTemplates.iterateAcrossValues = true; // ST v3 compatibility with Maps
-				astParserTemplates.iterateAcrossValues = true;
-			}
-		}
-		else if ( outputOption!=null && outputOption.equals("template") ) {
-			if ( debug && grammar.type!=Grammar.LEXER ) {
-				STGroup dbgTemplates = new STGroupFile(langDir+"/Dbg.stg");
-				dbgTemplates.importTemplates(coreTemplates);
-				baseTemplates = dbgTemplates;
-				STGroup stTemplates = new STGroupFile(langDir+"/ST.stg");
-				stTemplates.importTemplates(dbgTemplates);
-				templates = stTemplates;
-				dbgTemplates.iterateAcrossValues = true;
-			}
-			else {
-				STGroup stTemplates = new STGroupFile(langDir+"/ST.stg");
-				stTemplates.importTemplates(coreTemplates);
-				templates = stTemplates;
-			}
-			templates.iterateAcrossValues = true; // ST v3 compatibility with Maps
-		}
-		else if ( debug && grammar.type!=Grammar.LEXER ) {
-			STGroup dbgTemplates = new STGroupFile(langDir+"/Dbg.stg");
-			dbgTemplates.importTemplates(coreTemplates);
-			templates = dbgTemplates;
-			baseTemplates = templates;
-			baseTemplates.iterateAcrossValues = true; // ST v3 compatibility with Maps
-		}
-		else {
-			templates = coreTemplates;
-			coreTemplates.iterateAcrossValues = true; // ST v3 compatibility with Maps
-		}
-	}
-
-	/** Given the grammar to which we are attached, walk the AST associated
-	 *  with that grammar to create NFAs.  Then create the DFAs for all
-	 *  decision points in the grammar by converting the NFAs to DFAs.
-	 *  Finally, walk the AST again to generate code.
-	 *
-	 *  Either 1 or 2 files are written:
-	 *
-	 * 		recognizer: the main parser/lexer/treewalker item
-	 * 		header file: language like C/C++ need extern definitions
-	 *
-	 *  The target, such as JavaTarget, dictates which files get written.
-	 */
-	public ST genRecognizer() {
-		//System.out.println("### generate "+grammar.name+" recognizer");
-		// LOAD OUTPUT TEMPLATES
-		loadTemplates(language);
-		if ( templates==null ) {
-			return null;
-		}
-
-		// CREATE NFA FROM GRAMMAR, CREATE DFA FROM NFA
-		if ( ErrorManager.doNotAttemptAnalysis() ) {
-			return null;
-		}
-		target.performGrammarAnalysis(this, grammar);
-
-
-		// some grammar analysis errors will not yield reliable DFA
-		if ( ErrorManager.doNotAttemptCodeGen() ) {
-			return null;
-		}
-
-		// OPTIMIZE DFA
-		DFAOptimizer optimizer = new DFAOptimizer(grammar);
-		optimizer.optimize();
-
-		// OUTPUT FILE (contains recognizerST)
-		outputFileST = templates.getInstanceOf("outputFile");
-
-		// HEADER FILE
-		if ( templates.isDefined("headerFile") ) {
-			headerFileST = templates.getInstanceOf("headerFile");
-		}
-		else {
-			// create a dummy to avoid null-checks all over code generator
-			headerFileST = new ST(templates,"xyz");
-			headerFileST.add("cyclicDFAs", (Object)null); // it normally sees this from outputFile
-			//headerFileST.impl.name = "dummy-header-file";
-		}
-
-		boolean filterMode = grammar.getOption("filter")!=null &&
-							  grammar.getOption("filter").equals("true");
-        boolean canBacktrack = grammar.getSyntacticPredicates()!=null ||
-                               grammar.composite.getRootGrammar().atLeastOneBacktrackOption ||
-                               filterMode;
-
-        // TODO: move this down further because generating the recognizer
-		// alters the model with info on who uses predefined properties etc...
-		// The actions here might refer to something.
-
-		// The only two possible output files are available at this point.
-		// Verify action scopes are ok for target and dump actions into output
-		// Templates can say <actions.parser.header> for example.
-		Map<String, Map<String, Object>> actions = grammar.getActions();
-		verifyActionScopesOkForTarget(actions);
-		// translate $x::y references
-		translateActionAttributeReferences(actions);
-
-        ST gateST = templates.getInstanceOf("actionGate");
-        if ( filterMode ) {
-            // if filtering, we need to set actions to execute at backtracking
-            // level 1 not 0.
-            gateST = templates.getInstanceOf("filteringActionGate");
-        }
-        grammar.setSynPredGateIfNotAlready(gateST);
-
-        headerFileST.add("actions", actions);
-		outputFileST.add("actions", actions);
-
-		headerFileST.add("buildTemplate", new Boolean(grammar.buildTemplate()));
-		outputFileST.add("buildTemplate", new Boolean(grammar.buildTemplate()));
-		headerFileST.add("buildAST", new Boolean(grammar.buildAST()));
-		outputFileST.add("buildAST", new Boolean(grammar.buildAST()));
-
-		outputFileST.add("rewriteMode", Boolean.valueOf(grammar.rewriteMode()));
-		headerFileST.add("rewriteMode", Boolean.valueOf(grammar.rewriteMode()));
-
-		outputFileST.add("backtracking", Boolean.valueOf(canBacktrack));
-		headerFileST.add("backtracking", Boolean.valueOf(canBacktrack));
-		// turn on memoize attribute at grammar level so we can create ruleMemo.
-		// each rule has memoize attr that hides this one, indicating whether
-		// it needs to save results
-		String memoize = (String)grammar.getOption("memoize");
-		outputFileST.add("memoize",
-						 (grammar.atLeastOneRuleMemoizes ||
-						  Boolean.valueOf(memoize != null && memoize.equals("true")) &&
-						  canBacktrack));
-		headerFileST.add("memoize",
-						 (grammar.atLeastOneRuleMemoizes ||
-						  Boolean.valueOf(memoize != null && memoize.equals("true")) &&
-						  canBacktrack));
-
-
-		outputFileST.add("trace", Boolean.valueOf(trace));
-		headerFileST.add("trace", Boolean.valueOf(trace));
-
-		outputFileST.add("profile", Boolean.valueOf(profile));
-		headerFileST.add("profile", Boolean.valueOf(profile));
-
-		// RECOGNIZER
-		if ( grammar.type==Grammar.LEXER ) {
-			recognizerST = templates.getInstanceOf("lexer");
-			outputFileST.add("LEXER", Boolean.valueOf(true));
-			headerFileST.add("LEXER", Boolean.valueOf(true));
-			recognizerST.add("filterMode",
-							 Boolean.valueOf(filterMode));
-		}
-		else if ( grammar.type==Grammar.PARSER ||
-			grammar.type==Grammar.COMBINED )
-		{
-			recognizerST = templates.getInstanceOf("parser");
-			outputFileST.add("PARSER", Boolean.valueOf(true));
-			headerFileST.add("PARSER", Boolean.valueOf(true));
-		}
-		else {
-			recognizerST = templates.getInstanceOf("treeParser");
-			outputFileST.add("TREE_PARSER", Boolean.valueOf(true));
-			headerFileST.add("TREE_PARSER", Boolean.valueOf(true));
-            recognizerST.add("filterMode",
-							 Boolean.valueOf(filterMode));
-		}
-		outputFileST.add("recognizer", recognizerST);
-		headerFileST.add("recognizer", recognizerST);
-		outputFileST.add("actionScope",
-						 grammar.getDefaultActionScope(grammar.type));
-		headerFileST.add("actionScope",
-						 grammar.getDefaultActionScope(grammar.type));
-
-		String targetAppropriateFileNameString =
-			target.getTargetStringLiteralFromString(grammar.getFileName());
-		outputFileST.add("fileName", targetAppropriateFileNameString);
-		headerFileST.add("fileName", targetAppropriateFileNameString);
-		outputFileST.add("ANTLRVersion", tool.VERSION);
-		headerFileST.add("ANTLRVersion", tool.VERSION);
-		outputFileST.add("generatedTimestamp", Tool.getCurrentTimeStamp());
-		headerFileST.add("generatedTimestamp", Tool.getCurrentTimeStamp());
-
-		// GENERATE RECOGNIZER
-		// Walk the AST holding the input grammar, this time generating code
-		// Decisions are generated by using the precomputed DFAs
-		// Fill in the various templates with data
-		CodeGenTreeWalker gen = new CodeGenTreeWalker(new CommonTreeNodeStream(grammar.getGrammarTree()));
-		try {
-			gen.grammar_(
-						grammar,
-						recognizerST,
-						outputFileST,
-						headerFileST);
-		}
-		catch (RecognitionException re) {
-			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
-							   re);
-		}
-
-		genTokenTypeConstants(recognizerST);
-		genTokenTypeConstants(outputFileST);
-		genTokenTypeConstants(headerFileST);
-
-		if ( grammar.type!=Grammar.LEXER ) {
-			genTokenTypeNames(recognizerST);
-			genTokenTypeNames(outputFileST);
-			genTokenTypeNames(headerFileST);
-		}
-
-		// Now that we know what synpreds are used, we can set into template
-		Set synpredNames = null;
-		if ( grammar.synPredNamesUsedInDFA.size()>0 ) {
-			synpredNames = grammar.synPredNamesUsedInDFA;
-		}
-		outputFileST.add("synpreds", synpredNames);
-		headerFileST.add("synpreds", synpredNames);
-
-		// all recognizers can see Grammar object
-		recognizerST.add("grammar", grammar);
-
-		if (LAUNCH_ST_INSPECTOR) {
-			outputFileST.inspect();
-			if ( templates.isDefined("headerFile") ) headerFileST.inspect();
-		}
-
-		// WRITE FILES
-		try {
-			target.genRecognizerFile(tool,this,grammar,outputFileST);
-			if ( templates.isDefined("headerFile") ) {
-				ST extST = templates.getInstanceOf("headerFileExtension");
-				target.genRecognizerHeaderFile(tool,this,grammar,headerFileST,extST.render());
-			}
-			// write out the vocab interchange file; used by antlr,
-			// does not change per target
-			ST tokenVocabSerialization = genTokenVocabOutput();
-			String vocabFileName = getVocabFileName();
-			if ( vocabFileName!=null ) {
-				write(tokenVocabSerialization, vocabFileName);
-			}
-			//System.out.println(outputFileST.getDOTForDependencyGraph(false));
-		}
-		catch (IOException ioe) {
-			ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, ioe);
-		}
-		/*
-		System.out.println("num obj.prop refs: "+ ASTExpr.totalObjPropRefs);
-		System.out.println("num reflection lookups: "+ ASTExpr.totalReflectionLookups);
-		*/
-
-		return outputFileST;
-	}
-
-	/** Some targets will have some extra scopes like C++ may have
-	 *  '@headerfile:name {action}' or something.  Make sure the
-	 *  target likes the scopes in action table.
-	 */
-	protected void verifyActionScopesOkForTarget(Map actions) {
-		Set actionScopeKeySet = actions.keySet();
-		for (Iterator it = actionScopeKeySet.iterator(); it.hasNext();) {
-			String scope = (String)it.next();
-			if ( !target.isValidActionScope(grammar.type, scope) ) {
-				// get any action from the scope to get error location
-				Map scopeActions = (Map)actions.get(scope);
-				GrammarAST actionAST =
-					(GrammarAST)scopeActions.values().iterator().next();
-				ErrorManager.grammarError(
-					ErrorManager.MSG_INVALID_ACTION_SCOPE,grammar,
-					actionAST.getToken(),scope,
-					grammar.getGrammarTypeString());
-			}
-		}
-	}
-
-	/** Actions may reference $x::y attributes, call translateAction on
-	 *  each action and replace that action in the Map.
-	 */
-	protected void translateActionAttributeReferences(Map actions) {
-		Set actionScopeKeySet = actions.keySet();
-		for (Iterator it = actionScopeKeySet.iterator(); it.hasNext();) {
-			String scope = (String)it.next();
-			Map scopeActions = (Map)actions.get(scope);
-			translateActionAttributeReferencesForSingleScope(null,scopeActions);
-		}
-	}
-
-	/** Use for translating rule @init{...} actions that have no scope */
-	public void translateActionAttributeReferencesForSingleScope(
-		Rule r,
-		Map scopeActions)
-	{
-		String ruleName=null;
-		if ( r!=null ) {
-			ruleName = r.name;
-		}
-		Set actionNameSet = scopeActions.keySet();
-		for (Iterator nameIT = actionNameSet.iterator(); nameIT.hasNext();) {
-			String name = (String) nameIT.next();
-			GrammarAST actionAST = (GrammarAST)scopeActions.get(name);
-			List chunks = translateAction(ruleName,actionAST);
-			scopeActions.put(name, chunks); // replace with translation
-		}
-	}
-
-	/** Error recovery in ANTLR recognizers.
-	 *
-	 *  Based upon original ideas:
-	 *
-	 *  Algorithms + Data Structures = Programs by Niklaus Wirth
-	 *
-	 *  and
-	 *
-	 *  A note on error recovery in recursive descent parsers:
-	 *  http://portal.acm.org/citation.cfm?id=947902.947905
-	 *
-	 *  Later, Josef Grosch had some good ideas:
-	 *  Efficient and Comfortable Error Recovery in Recursive Descent Parsers:
-	 *  ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
-	 *
-	 *  Like Grosch I implemented local FOLLOW sets that are combined at run-time
-	 *  upon error to avoid parsing overhead.
-	 */
-	public void generateLocalFOLLOW(GrammarAST referencedElementNode,
-									String referencedElementName,
-									String enclosingRuleName,
-									int elementIndex)
-	{
-		/*
-		System.out.println("compute FOLLOW "+grammar.name+"."+referencedElementNode.toString()+
-						 " for "+referencedElementName+"#"+elementIndex +" in "+
-						 enclosingRuleName+
-						 " line="+referencedElementNode.getLine());
-						 */
-		NFAState followingNFAState = referencedElementNode.followingNFAState;
-		LookaheadSet follow = null;
-		if ( followingNFAState!=null ) {
-			// compute follow for this element and, as side-effect, track
-			// the rule LOOK sensitivity.
-			follow = grammar.FIRST(followingNFAState);
-		}
-
-		if ( follow==null ) {
-			ErrorManager.internalError("no follow state or cannot compute follow");
-			follow = new LookaheadSet();
-		}
-		if ( follow.member(Label.EOF) ) {
-			// TODO: can we just remove?  Seems needed here:
-			// compilation_unit : global_statement* EOF
-			// Actually i guess we resync to EOF regardless
-			follow.remove(Label.EOF);
-		}
-		//System.out.println(" "+follow);
-
-        List tokenTypeList = null;
-        long[] words = null;
-		if ( follow.tokenTypeSet==null ) {
-			words = new long[1];
-            tokenTypeList = new ArrayList();
-        }
-		else {
-			BitSet bits = BitSet.of(follow.tokenTypeSet);
-			words = bits.toPackedArray();
-            tokenTypeList = follow.tokenTypeSet.toList();
-        }
-		// use the target to convert to hex strings (typically)
-		String[] wordStrings = new String[words.length];
-		for (int j = 0; j < words.length; j++) {
-			long w = words[j];
-			wordStrings[j] = target.getTarget64BitStringFromValue(w);
-		}
-		recognizerST.addAggr("bitsets.{name,inName,bits,tokenTypes,tokenIndex}",
-							 referencedElementName,
-							 enclosingRuleName,
-							 wordStrings,
-							 tokenTypeList,
-							 Utils.integer(elementIndex));
-		outputFileST.addAggr("bitsets.{name,inName,bits,tokenTypes,tokenIndex}",
-							 referencedElementName,
-							 enclosingRuleName,
-							 wordStrings,
-							 tokenTypeList,
-							 Utils.integer(elementIndex));
-		headerFileST.addAggr("bitsets.{name,inName,bits,tokenTypes,tokenIndex}",
-							 referencedElementName,
-							 enclosingRuleName,
-							 wordStrings,
-							 tokenTypeList,
-							 Utils.integer(elementIndex));
-	}
-
-	// L O O K A H E A D  D E C I S I O N  G E N E R A T I O N
-
-	/** Generate code that computes the predicted alt given a DFA.  The
-	 *  recognizerST can be either the main generated recognizerTemplate
-	 *  for storage in the main parser file or a separate file.  It's up to
-	 *  the code that ultimately invokes the codegen.g grammar rule.
-	 *
-	 *  Regardless, the output file and header file get a copy of the DFAs.
-	 */
-	public ST genLookaheadDecision(ST recognizerST,
-								   DFA dfa)
-	{
-		ST decisionST;
-		// If we are doing inline DFA and this one is acyclic and LL(*)
-		// I have to check for is-non-LL(*) because if non-LL(*) the cyclic
-		// check is not done by DFA.verify(); that is, verify() avoids
-		// doesStateReachAcceptState() if non-LL(*)
-		if ( dfa.canInlineDecision() ) {
-			decisionST =
-				acyclicDFAGenerator.genFixedLookaheadDecision(getTemplates(), dfa);
-		}
-		else {
-			// generate any kind of DFA here (cyclic or acyclic)
-			dfa.createStateTables(this);
-			outputFileST.add("cyclicDFAs", dfa);
-			headerFileST.add("cyclicDFAs", dfa);
-			decisionST = templates.getInstanceOf("dfaDecision");
-			String description = dfa.getNFADecisionStartState().getDescription();
-			description = target.getTargetStringLiteralFromString(description);
-			if ( description!=null ) {
-				decisionST.add("description", description);
-			}
-			decisionST.add("decisionNumber",
-						   Utils.integer(dfa.getDecisionNumber()));
-		}
-		return decisionST;
-	}
-
-	/** A special state is huge (too big for state tables) or has a predicated
-	 *  edge.  Generate a simple if-then-else.  Cannot be an accept state as
-	 *  they have no emanating edges.  Don't worry about switch vs if-then-else
-	 *  because if you get here, the state is super complicated and needs an
-	 *  if-then-else.  This is used by the new DFA scheme created June 2006.
-	 */
-	public ST generateSpecialState(DFAState s) {
-		ST stateST;
-		stateST = templates.getInstanceOf("cyclicDFAState");
-		stateST.add("needErrorClause", Boolean.valueOf(true));
-		stateST.add("semPredState",
-					Boolean.valueOf(s.isResolvedWithPredicates()));
-		stateST.add("stateNumber", s.stateNumber);
-		stateST.add("decisionNumber", s.dfa.decisionNumber);
-
-		boolean foundGatedPred = false;
-		ST eotST = null;
-		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
-			Transition edge = (Transition) s.transition(i);
-			ST edgeST;
-			if ( edge.label.getAtom()==Label.EOT ) {
-				// this is the default clause; has to held until last
-				edgeST = templates.getInstanceOf("eotDFAEdge");
-				stateST.remove("needErrorClause");
-				eotST = edgeST;
-			}
-			else {
-				edgeST = templates.getInstanceOf("cyclicDFAEdge");
-				ST exprST =
-					genLabelExpr(templates,edge,1);
-				edgeST.add("labelExpr", exprST);
-			}
-			edgeST.add("edgeNumber", Utils.integer(i + 1));
-			edgeST.add("targetStateNumber",
-					   Utils.integer(edge.target.stateNumber));
-			// stick in any gated predicates for any edge if not already a pred
-			if ( !edge.label.isSemanticPredicate() ) {
-				DFAState t = (DFAState)edge.target;
-				SemanticContext preds =	t.getGatedPredicatesInNFAConfigurations();
-				if ( preds!=null ) {
-					foundGatedPred = true;
-					ST predST = preds.genExpr(this,
-														  getTemplates(),
-														  t.dfa);
-					edgeST.add("predicates", predST.render());
-				}
-			}
-			if ( edge.label.getAtom()!=Label.EOT ) {
-				stateST.add("edges", edgeST);
-			}
-		}
-		if ( foundGatedPred ) {
-			// state has >= 1 edge with a gated pred (syn or sem)
-			// must rewind input first, set flag.
-			stateST.add("semPredState", new Boolean(foundGatedPred));
-		}
-		if ( eotST!=null ) {
-			stateST.add("edges", eotST);
-		}
-		return stateST;
-	}
-
-	/** Generate an expression for traversing an edge. */
-	protected ST genLabelExpr(STGroup templates,
-										  Transition edge,
-										  int k)
-	{
-		Label label = edge.label;
-		if ( label.isSemanticPredicate() ) {
-			return genSemanticPredicateExpr(templates, edge);
-		}
-		if ( label.isSet() ) {
-			return genSetExpr(templates, label.getSet(), k, true);
-		}
-		// must be simple label
-		ST eST = templates.getInstanceOf("lookaheadTest");
-		eST.add("atom", getTokenTypeAsTargetLabel(label.getAtom()));
-		eST.add("atomAsInt", Utils.integer(label.getAtom()));
-		eST.add("k", Utils.integer(k));
-		return eST;
-	}
-
-	protected ST genSemanticPredicateExpr(STGroup templates,
-													  Transition edge)
-	{
-		DFA dfa = ((DFAState)edge.target).dfa; // which DFA are we in
-		Label label = edge.label;
-		SemanticContext semCtx = label.getSemanticContext();
-		return semCtx.genExpr(this,templates,dfa);
-	}
-
-	/** For intervals such as [3..3, 30..35], generate an expression that
-	 *  tests the lookahead similar to LA(1)==3 || (LA(1)>=30&&LA(1)<=35)
-	 */
-	public ST genSetExpr(STGroup templates,
-									 IntSet set,
-									 int k,
-									 boolean partOfDFA)
-	{
-		if ( !(set instanceof IntervalSet) ) {
-			throw new IllegalArgumentException("unable to generate expressions for non IntervalSet objects");
-		}
-		IntervalSet iset = (IntervalSet)set;
-		if ( iset.getIntervals()==null || iset.getIntervals().size()==0 ) {
-			ST emptyST = new ST(templates, "");
-			emptyST.impl.name = "empty-set-expr";
-			return emptyST;
-		}
-		String testSTName = "lookaheadTest";
-		String testRangeSTName = "lookaheadRangeTest";
-		if ( !partOfDFA ) {
-			testSTName = "isolatedLookaheadTest";
-			testRangeSTName = "isolatedLookaheadRangeTest";
-		}
-		ST setST = templates.getInstanceOf("setTest");
-		Iterator iter = iset.getIntervals().iterator();
-		int rangeNumber = 1;
-		while (iter.hasNext()) {
-			Interval I = (Interval) iter.next();
-			int a = I.a;
-			int b = I.b;
-			ST eST;
-			if ( a==b ) {
-				eST = templates.getInstanceOf(testSTName);
-				eST.add("atom", getTokenTypeAsTargetLabel(a));
-				eST.add("atomAsInt", Utils.integer(a));
-				//eST.add("k",Utils.integer(k));
-			}
-			else {
-				eST = templates.getInstanceOf(testRangeSTName);
-				eST.add("lower", getTokenTypeAsTargetLabel(a));
-				eST.add("lowerAsInt", Utils.integer(a));
-				eST.add("upper", getTokenTypeAsTargetLabel(b));
-				eST.add("upperAsInt", Utils.integer(b));
-				eST.add("rangeNumber", Utils.integer(rangeNumber));
-			}
-			eST.add("k", Utils.integer(k));
-			setST.add("ranges", eST);
-			rangeNumber++;
-		}
-		return setST;
-	}
-
-	// T O K E N  D E F I N I T I O N  G E N E R A T I O N
-
-	/** Set attributes tokens and literals attributes in the incoming
-	 *  code template.  This is not the token vocab interchange file, but
-	 *  rather a list of token type ID needed by the recognizer.
-	 */
-	protected void genTokenTypeConstants(ST code) {
-		// make constants for the token types
-		Iterator tokenIDs = grammar.getTokenIDs().iterator();
-		while (tokenIDs.hasNext()) {
-			String tokenID = (String) tokenIDs.next();
-			int tokenType = grammar.getTokenType(tokenID);
-			if ( tokenType==Label.EOF ||
-				 tokenType>=Label.MIN_TOKEN_TYPE )
-			{
-				// don't do FAUX labels 'cept EOF
-				code.addAggr("tokens.{name,type}", tokenID, Utils.integer(tokenType));
-			}
-		}
-	}
-
-	/** Generate a token names table that maps token type to a printable
-	 *  name: either the label like INT or the literal like "begin".
-	 */
-	protected void genTokenTypeNames(ST code) {
-		for (int t=Label.MIN_TOKEN_TYPE; t<=grammar.getMaxTokenType(); t++) {
-			String tokenName = grammar.getTokenDisplayName(t);
-			if ( tokenName!=null ) {
-				tokenName=target.getTargetStringLiteralFromString(tokenName, true);
-				code.add("tokenNames", tokenName);
-			}
-		}
-	}
-
-	/** Get a meaningful name for a token type useful during code generation.
-	 *  Literals without associated names are converted to the string equivalent
-	 *  of their integer values. Used to generate x==ID and x==34 type comparisons
-	 *  etc...  Essentially we are looking for the most obvious way to refer
-	 *  to a token type in the generated code.  If in the lexer, return the
-	 *  char literal translated to the target language.  For example, ttype=10
-	 *  will yield '\n' from the getTokenDisplayName method.  That must
-	 *  be converted to the target languages literals.  For most C-derived
-	 *  languages no translation is needed.
-	 */
-	public String getTokenTypeAsTargetLabel(int ttype) {
-		if ( grammar.type==Grammar.LEXER ) {
-			String name = grammar.getTokenDisplayName(ttype);
-			return target.getTargetCharLiteralFromANTLRCharLiteral(this,name);
-		}
-		return target.getTokenTypeAsTargetLabel(this,ttype);
-	}
-
-	/** Generate a token vocab file with all the token names/types.  For example:
-	 *  ID=7
-	 *  FOR=8
-	 *  'for'=8
-	 *
-	 *  This is independent of the target language; used by antlr internally
-	 */
-	protected ST genTokenVocabOutput() {
-		ST vocabFileST = new ST(vocabFilePattern);
-		vocabFileST.add("literals",(Object)null); // "define" literals arg
-		vocabFileST.add("tokens",(Object)null);
-		vocabFileST.impl.name = "vocab-file";
-		// make constants for the token names
-		Iterator tokenIDs = grammar.getTokenIDs().iterator();
-		while (tokenIDs.hasNext()) {
-			String tokenID = (String) tokenIDs.next();
-			int tokenType = grammar.getTokenType(tokenID);
-			if ( tokenType>=Label.MIN_TOKEN_TYPE ) {
-				vocabFileST.addAggr("tokens.{name,type}", tokenID, Utils.integer(tokenType));
-			}
-		}
-
-		// now dump the strings
-		Iterator literals = grammar.getStringLiterals().iterator();
-		while (literals.hasNext()) {
-			String literal = (String) literals.next();
-			int tokenType = grammar.getTokenType(literal);
-			if ( tokenType>=Label.MIN_TOKEN_TYPE ) {
-				vocabFileST.addAggr("tokens.{name,type}", literal, Utils.integer(tokenType));
-			}
-		}
-
-		return vocabFileST;
-	}
-
-	public List translateAction(String ruleName,
-								GrammarAST actionTree)
-	{
-		if ( actionTree.getType()==ANTLRParser.ARG_ACTION ) {
-			return translateArgAction(ruleName, actionTree);
-		}
-		ActionTranslator translator = new ActionTranslator(this,ruleName,actionTree);
-		List chunks = translator.translateToChunks();
-		chunks = target.postProcessAction(chunks, actionTree.token);
-		return chunks;
-	}
-
-	/** Translate an action like [3,"foo",a[3]] and return a List of the
-	 *  translated actions.  Because actions are themselves translated to a list
-	 *  of chunks, must cat together into a ST>.  Don't translate
-	 *  to strings early as we need to eval templates in context.
-	 */
-	public List<ST> translateArgAction(String ruleName,
-										   GrammarAST actionTree)
-	{
-		String actionText = actionTree.token.getText();
-		List<String> args = getListOfArgumentsFromAction(actionText,',');
-		List<ST> translatedArgs = new ArrayList<ST>();
-		for (String arg : args) {
-			if ( arg!=null ) {
-				Token actionToken =
-					new CommonToken(ANTLRParser.ACTION,arg);
-				ActionTranslator translator =
-					new ActionTranslator(this,ruleName,
-											  actionToken,
-											  actionTree.outerAltNum);
-				List chunks = translator.translateToChunks();
-				chunks = target.postProcessAction(chunks, actionToken);
-				ST catST = new ST(templates, "<chunks>");
-				catST.add("chunks", chunks);
-				translatedArgs.add(catST);
-			}
-		}
-		if ( translatedArgs.size()==0 ) {
-			return null;
-		}
-		return translatedArgs;
-	}
-
-	public static List<String> getListOfArgumentsFromAction(String actionText,
-															int separatorChar)
-	{
-		List<String> args = new ArrayList<String>();
-		getListOfArgumentsFromAction(actionText, 0, -1, separatorChar, args);
-		return args;
-	}
-
-	/** Given an arg action like
-	 *
-	 *  [x, (*a).foo(21,33), 3.2+1, '\n',
-	 *  "a,oo\nick", {bl, "fdkj"eck}, ["cat\n,", x, 43]]
-	 *
-	 *  convert to a list of arguments.  Allow nested square brackets etc...
-	 *  Set separatorChar to ';' or ',' or whatever you want.
-	 */
-	public static int getListOfArgumentsFromAction(String actionText,
-												   int start,
-												   int targetChar,
-												   int separatorChar,
-												   List<String> args)
-	{
-		if ( actionText==null ) {
-			return -1;
-		}
-		actionText = actionText.replaceAll("//.*\n", "");
-		int n = actionText.length();
-		//System.out.println("actionText@"+start+"->"+(char)targetChar+"="+actionText.substring(start,n));
-		int p = start;
-		int last = p;
-		while ( p<n && actionText.charAt(p)!=targetChar ) {
-			int c = actionText.charAt(p);
-			switch ( c ) {
-				case '\'' :
-					p++;
-					while ( p<n && actionText.charAt(p)!='\'' ) {
-						if ( actionText.charAt(p)=='\\' && (p+1)<n &&
-							 actionText.charAt(p+1)=='\'' )
-						{
-							p++; // skip escaped quote
-						}
-						p++;
-					}
-					p++;
-					break;
-				case '"' :
-					p++;
-					while ( p<n && actionText.charAt(p)!='\"' ) {
-						if ( actionText.charAt(p)=='\\' && (p+1)<n &&
-							 actionText.charAt(p+1)=='\"' )
-						{
-							p++; // skip escaped quote
-						}
-						p++;
-					}
-					p++;
-					break;
-				case '(' :
-					p = getListOfArgumentsFromAction(actionText,p+1,')',separatorChar,args);
-					break;
-				case '{' :
-					p = getListOfArgumentsFromAction(actionText,p+1,'}',separatorChar,args);
-					break;
-				case '<' :
-					if ( actionText.indexOf('>',p+1)>=p ) {
-						// do we see a matching '>' ahead?  if so, hope it's a generic
-						// and not less followed by expr with greater than
-						p = getListOfArgumentsFromAction(actionText,p+1,'>',separatorChar,args);
-					}
-					else {
-						p++; // treat as normal char
-					}
-					break;
-				case '[' :
-					p = getListOfArgumentsFromAction(actionText,p+1,']',separatorChar,args);
-					break;
-				default :
-					if ( c==separatorChar && targetChar==-1 ) {
-						String arg = actionText.substring(last, p);
-						//System.out.println("arg="+arg);
-						args.add(arg.trim());
-						last = p+1;
-					}
-					p++;
-					break;
-			}
-		}
-		if ( targetChar==-1 && p<=n ) {
-			String arg = actionText.substring(last, p).trim();
-			//System.out.println("arg="+arg);
-			if ( arg.length()>0 ) {
-				args.add(arg.trim());
-			}
-		}
-		p++;
-		return p;
-	}
-
-	/** Given a template constructor action like %foo(a={...}) in
-	 *  an action, translate it to the appropriate template constructor
-	 *  from the templateLib. This translates a *piece* of the action.
-	 */
-	public ST translateTemplateConstructor(String ruleName,
-													   int outerAltNum,
-													   Token actionToken,
-													   String templateActionText)
-	{
-		// first, parse with antlr.g
-		//System.out.println("translate template: "+templateActionText);
-		ANTLRLexer lexer = new ANTLRLexer(new ANTLRStringStream(templateActionText));
-		lexer.setFileName(grammar.getFileName());
-		ANTLRParser parser = ANTLRParser.createParser(new CommonTokenStream(lexer));
-		parser.setFileName(grammar.getFileName());
-		ANTLRParser.rewrite_template_return parseResult = null;
-		try {
-			parseResult = parser.rewrite_template();
-		}
-		catch (RecognitionException re) {
-			ErrorManager.grammarError(ErrorManager.MSG_INVALID_TEMPLATE_ACTION,
-										  grammar,
-										  actionToken,
-										  templateActionText);
-		}
-		catch (Exception tse) {
-			ErrorManager.internalError("can't parse template action",tse);
-		}
-		GrammarAST rewriteTree = (GrammarAST)parseResult.getTree();
-
-		// then translate via codegen.g
-		CodeGenTreeWalker gen = new CodeGenTreeWalker(new CommonTreeNodeStream(rewriteTree));
-		gen.init(grammar);
-		gen.setCurrentRuleName(ruleName);
-		gen.setOuterAltNum(outerAltNum);
-		ST st = null;
-		try {
-			st = gen.rewrite_template();
-		}
-		catch (RecognitionException re) {
-			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
-							   re);
-		}
-		return st;
-	}
-
-
-	public void issueInvalidScopeError(String x,
-									   String y,
-									   Rule enclosingRule,
-									   Token actionToken,
-									   int outerAltNum)
-	{
-		//System.out.println("error $"+x+"::"+y);
-		Rule r = grammar.getRule(x);
-		AttributeScope scope = grammar.getGlobalScope(x);
-		if ( scope==null ) {
-			if ( r!=null ) {
-				scope = r.ruleScope; // if not global, might be rule scope
-			}
-		}
-		if ( scope==null ) {
-			ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE,
-										  grammar,
-										  actionToken,
-										  x);
-		}
-		else if ( scope.getAttribute(y)==null ) {
-			ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE,
-										  grammar,
-										  actionToken,
-										  x,
-										  y);
-		}
-	}
-
-	public void issueInvalidAttributeError(String x,
-										   String y,
-										   Rule enclosingRule,
-										   Token actionToken,
-										   int outerAltNum)
-	{
-		//System.out.println("error $"+x+"."+y);
-		if ( enclosingRule==null ) {
-			// action not in a rule
-			ErrorManager.grammarError(ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE,
-										  grammar,
-										  actionToken,
-										  x,
-										  y);
-			return;
-		}
-
-		// action is in a rule
-		Grammar.LabelElementPair label = enclosingRule.getRuleLabel(x);
-
-		if ( label!=null || enclosingRule.getRuleRefsInAlt(x, outerAltNum)!=null ) {
-			// $rulelabel.attr or $ruleref.attr; must be unknown attr
-			String refdRuleName = x;
-			if ( label!=null ) {
-				refdRuleName = enclosingRule.getRuleLabel(x).referencedRuleName;
-			}
-			Rule refdRule = grammar.getRule(refdRuleName);
-			AttributeScope scope = refdRule.getAttributeScope(y);
-			if ( scope==null ) {
-				ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_RULE_ATTRIBUTE,
-										  grammar,
-										  actionToken,
-										  refdRuleName,
-										  y);
-			}
-			else if ( scope.isParameterScope ) {
-				ErrorManager.grammarError(ErrorManager.MSG_INVALID_RULE_PARAMETER_REF,
-										  grammar,
-										  actionToken,
-										  refdRuleName,
-										  y);
-			}
-			else if ( scope.isDynamicRuleScope ) {
-				ErrorManager.grammarError(ErrorManager.MSG_INVALID_RULE_SCOPE_ATTRIBUTE_REF,
-										  grammar,
-										  actionToken,
-										  refdRuleName,
-										  y);
-			}
-		}
-
-	}
-
-	public void issueInvalidAttributeError(String x,
-										   Rule enclosingRule,
-										   Token actionToken,
-										   int outerAltNum)
-	{
-		//System.out.println("error $"+x);
-		if ( enclosingRule==null ) {
-			// action not in a rule
-			ErrorManager.grammarError(ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE,
-										  grammar,
-										  actionToken,
-										  x);
-			return;
-		}
-
-		// action is in a rule
-		Grammar.LabelElementPair label = enclosingRule.getRuleLabel(x);
-		AttributeScope scope = enclosingRule.getAttributeScope(x);
-
-		if ( label!=null ||
-			 enclosingRule.getRuleRefsInAlt(x, outerAltNum)!=null ||
-			 enclosingRule.name.equals(x) )
-		{
-			ErrorManager.grammarError(ErrorManager.MSG_ISOLATED_RULE_SCOPE,
-										  grammar,
-										  actionToken,
-										  x);
-		}
-		else if ( scope!=null && scope.isDynamicRuleScope ) {
-			ErrorManager.grammarError(ErrorManager.MSG_ISOLATED_RULE_ATTRIBUTE,
-										  grammar,
-										  actionToken,
-										  x);
-		}
-		else {
-			ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE,
-									  grammar,
-									  actionToken,
-									  x);
-		}
-	}
-
-	// M I S C
-
-	public STGroup getTemplates() {
-		return templates;
-	}
-
-	public STGroup getBaseTemplates() {
-		return baseTemplates;
-	}
-
-	public void setDebug(boolean debug) {
-		this.debug = debug;
-	}
-
-	public void setTrace(boolean trace) {
-		this.trace = trace;
-	}
-
-	public void setProfile(boolean profile) {
-		this.profile = profile;
-		if ( profile ) {
-			setDebug(true); // requires debug events
-		}
-	}
-
-	public ST getRecognizerST() {
-		return outputFileST;
-	}
-
-	/** Generate TParser.java and TLexer.java from T.g if combined, else
-	 *  just use T.java as output regardless of type.
-	 */
-	public String getRecognizerFileName(String name, int type) {
-		ST extST = templates.getInstanceOf("codeFileExtension");
-		String recognizerName = grammar.getRecognizerName();
-		return recognizerName+extST.render();
-		/*
-		String suffix = "";
-		if ( type==Grammar.COMBINED ||
-			 (type==Grammar.LEXER && !grammar.implicitLexer) )
-		{
-			suffix = Grammar.grammarTypeToFileNameSuffix[type];
-		}
-		return name+suffix+extST.toString();
-		*/
-	}
-
-	/** What is the name of the vocab file generated for this grammar?
-	 *  Returns null if no .tokens file should be generated.
-	 */
-	public String getVocabFileName() {
-		if ( grammar.isBuiltFromString() ) {
-			return null;
-		}
-		return grammar.name+VOCAB_FILE_EXTENSION;
-	}
-
-	public void write(ST code, String fileName) throws IOException {
-		//long start = System.currentTimeMillis();
-		Writer w = tool.getOutputFile(grammar, fileName);
-		// Write the output to a StringWriter
-		STWriter wr = new AutoIndentWriter(w);
-		wr.setLineWidth(lineWidth);
-		code.write(wr);
-		w.close();
-		//long stop = System.currentTimeMillis();
-		//System.out.println("render time for "+fileName+": "+(int)(stop-start)+"ms");
-	}
-
-	/** You can generate a switch rather than if-then-else for a DFA state
-	 *  if there are no semantic predicates and the number of edge label
-	 *  values is small enough; e.g., don't generate a switch for a state
-	 *  containing an edge label such as 20..52330 (the resulting byte codes
-	 *  would overflow the method 65k limit probably).
-	 */
-	protected boolean canGenerateSwitch(DFAState s) {
-		if ( !GENERATE_SWITCHES_WHEN_POSSIBLE ) {
-			return false;
-		}
-		int size = 0;
-		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
-			Transition edge = (Transition) s.transition(i);
-			if ( edge.label.isSemanticPredicate() ) {
-				return false;
-			}
-			// can't do a switch if the edges are going to require predicates
-			if ( edge.label.getAtom()==Label.EOT ) {
-				int EOTPredicts = ((DFAState)edge.target).getUniquelyPredictedAlt();
-				if ( EOTPredicts==NFA.INVALID_ALT_NUMBER ) {
-					// EOT target has to be a predicate then; no unique alt
-					return false;
-				}
-			}
-			// if target is a state with gated preds, we need to use preds on
-			// this edge then to reach it.
-			if ( ((DFAState)edge.target).getGatedPredicatesInNFAConfigurations()!=null ) {
-				return false;
-			}
-			size += edge.label.getSet().size();
-		}
-		if ( s.getNumberOfTransitions()<MIN_SWITCH_ALTS ||
-			 size>MAX_SWITCH_CASE_LABELS ) {
-			return false;
-		}
-		return true;
-	}
-
-	/** Create a label to track a token / rule reference's result.
-	 *  Technically, this is a place where I break model-view separation
-	 *  as I am creating a variable name that could be invalid in a
-	 *  target language, however, label ::= <ID><INT> is probably ok in
-	 *  all languages we care about.
-	 */
-	public String createUniqueLabel(String name) {
-		return new StringBuffer()
-			.append(name).append(uniqueLabelNumber++).toString();
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/DelphiTarget.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/DelphiTarget.java
deleted file mode 100644
index 24bb819..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/DelphiTarget.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.antlr.analysis.Label;
-import org.antlr.misc.Utils;
-import org.stringtemplate.v4.ST;
-import org.antlr.tool.Grammar;
-
-public class DelphiTarget extends Target 
-{
-  public DelphiTarget() { 
-    targetCharValueEscape['\n'] = "'#10'";    
-    targetCharValueEscape['\r'] = "'#13'";    
-    targetCharValueEscape['\t'] = "'#9'";   
-    targetCharValueEscape['\b'] = "\\b";    
-    targetCharValueEscape['\f'] = "\\f";    
-    targetCharValueEscape['\\'] = "\\";   
-    targetCharValueEscape['\''] = "''";   
-    targetCharValueEscape['"'] = "'";
-  } 
-
-  protected ST chooseWhereCyclicDFAsGo(Tool tool,
-                           CodeGenerator generator,
-                           Grammar grammar,
-                           ST recognizerST,
-                           ST cyclicDFAST)
-  {
-    return recognizerST;
-  }
-
-  public String encodeIntAsCharEscape(int v)
-  {
-    if (v <= 127)
-    {
-      String hex1 = Integer.toHexString(v | 0x10000).substring(3, 5);
-      return "'#$" + hex1 + "'";
-    }
-    String hex = Integer.toHexString(v | 0x10000).substring(1, 5);
-    return "'#$" + hex + "'";
-  }
-  
-  public String getTargetCharLiteralFromANTLRCharLiteral(
-    CodeGenerator generator,
-    String literal)
-  {
-    StringBuffer buf = new StringBuffer();
-    int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
-    if ( c<Label.MIN_CHAR_VALUE ) {
-      return "0";
-    }
-    // normal char
-    buf.append(c);
-
-    return buf.toString();
-  } 
-
-  public String getTargetStringLiteralFromString(String s, boolean quoted) {
-    if ( s==null ) {
-      return null;
-    }
-    StringBuffer buf = new StringBuffer();
-    if ( quoted ) {
-      buf.append('\'');
-    }
-    for (int i=0; i<s.length(); i++) {
-      int c = s.charAt(i);
-      if ( c!='"' && // don't escape double quotes in strings for Delphi
-         c<targetCharValueEscape.length &&
-         targetCharValueEscape[c]!=null )
-      {
-        buf.append(targetCharValueEscape[c]);
-      }
-      else {
-        buf.append((char)c);
-      }
-      if ((i & 127) == 127)
-      {
-        // Concatenate string literals because Delphi doesn't support literals over 255 characters,
-        // and the code editor doesn't support lines over 1023 characters
-        buf.append("\' + \r\n  \'");
-      }
-    }
-    if ( quoted ) {
-      buf.append('\'');
-    }
-    return buf.toString();
-  }
-
-  public String getTargetStringLiteralFromANTLRStringLiteral(
-    CodeGenerator generator,
-    String literal)
-  {
-    literal = Utils.replace(literal,"\\\'","''"); // \' to ' to normalize
-    literal = Utils.replace(literal,"\\r\\n","'#13#10'"); 
-    literal = Utils.replace(literal,"\\r","'#13'"); 
-    literal = Utils.replace(literal,"\\n","'#10'"); 
-    StringBuffer buf = new StringBuffer(literal);
-    buf.setCharAt(0,'\'');
-    buf.setCharAt(literal.length()-1,'\'');
-    return buf.toString();
-  }
-   
-  public String getTarget64BitStringFromValue(long word) {
-    int numHexDigits = 8*2;
-    StringBuffer buf = new StringBuffer(numHexDigits+2);
-    buf.append("$");
-    String digits = Long.toHexString(word);
-    digits = digits.toUpperCase();
-    int padding = numHexDigits - digits.length();
-    // pad left with zeros
-    for (int i=1; i<=padding; i++) {
-      buf.append('0');
-    }
-    buf.append(digits);
-    return buf.toString();
-  }
-
-}
\ No newline at end of file
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/JavaScriptTarget.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/JavaScriptTarget.java
deleted file mode 100755
index 31e4ea5..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/JavaScriptTarget.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.codegen;
-
-public class JavaScriptTarget extends Target {
-    /** Convert an int to a JavaScript Unicode character literal.
-     *
-     *  The current JavaScript spec (ECMA-262) doesn't provide for octal
-     *  notation in String literals, although some implementations support it.
-     *  This method overrides the parent class so that characters will always
-     *  be encoded as Unicode literals (e.g. \u0011).
-     */
-    public String encodeIntAsCharEscape(int v) {
-        String hex = Integer.toHexString(v|0x10000).substring(1,5);
-        return "\\u"+hex;
-    }
-
-    /** Convert long to two 32-bit numbers separted by a comma.
-     *  JavaScript does not support 64-bit numbers, so we need to break
-     *  the number into two 32-bit literals to give to the Bit.  A number like
-     *  0xHHHHHHHHLLLLLLLL is broken into the following string:
-     *  "0xLLLLLLLL, 0xHHHHHHHH"
-     *  Note that the low order bits are first, followed by the high order bits.
-     *  This is to match how the BitSet constructor works, where the bits are
-     *  passed in in 32-bit chunks with low-order bits coming first.
-     *
-     *  Note: stole the following two methods from the ActionScript target.
-     */
-    public String getTarget64BitStringFromValue(long word) {
-        StringBuffer buf = new StringBuffer(22); // enough for the two "0x", "," and " "
-        buf.append("0x");
-        writeHexWithPadding(buf, Integer.toHexString((int)(word & 0x00000000ffffffffL)));
-        buf.append(", 0x");
-        writeHexWithPadding(buf, Integer.toHexString((int)(word >> 32)));
-
-        return buf.toString();
-    }
-
-    private void writeHexWithPadding(StringBuffer buf, String digits) {
-        digits = digits.toUpperCase();
-        int padding = 8 - digits.length();
-        // pad left with zeros
-        for (int i=1; i<=padding; i++) {
-            buf.append('0');
-        }
-        buf.append(digits);
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/JavaTarget.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/JavaTarget.java
deleted file mode 100644
index 3ec7a86..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/JavaTarget.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.Rule;
-import org.stringtemplate.v4.ST;
-
-import java.util.Set;
-
-public class JavaTarget extends Target {
-	protected ST chooseWhereCyclicDFAsGo(Tool tool,
-										 CodeGenerator generator,
-										 Grammar grammar,
-										 ST recognizerST,
-										 ST cyclicDFAST)
-	{
-		return recognizerST;
-	}
-
-	@Override
-	protected void performGrammarAnalysis(CodeGenerator generator, Grammar grammar) {
-		super.performGrammarAnalysis(generator, grammar);
-		for (Rule rule : grammar.getRules()) {
-			rule.throwsSpec.add("RecognitionException");
-		}
-		Set<Rule> delegatedRules = grammar.getDelegatedRules();
-		if ( delegatedRules!=null ) {
-			for (Rule rule : delegatedRules) {
-				rule.throwsSpec.add("RecognitionException");
-			}
-		}
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/ObjCTarget.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/ObjCTarget.java
deleted file mode 100644
index e4554e9..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/ObjCTarget.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr and Alan Condit
- *  Copyright (c) 2006 Kay Roepke (Objective-C runtime)
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.antlr.misc.Utils;
-import org.stringtemplate.v4.ST;
-import org.antlr.tool.Grammar;
-
-import java.io.IOException;
-
-public class ObjCTarget extends Target {
-	protected void genRecognizerHeaderFile(Tool tool,
-										   CodeGenerator generator,
-										   Grammar grammar,
-										   ST headerFileST,
-										   String extName)
-	throws IOException
-	{
-		generator.write(headerFileST, grammar.name + Grammar.grammarTypeToFileNameSuffix[grammar.type] + extName);
-	}
-
-	public String getTargetCharLiteralFromANTLRCharLiteral(CodeGenerator generator,
-														   String literal)
-	{
-		if  (literal.startsWith("'\\u") ) {
-			literal = "0x" +literal.substring(3, 7);
-		} else	{
-			int c = literal.charAt(1); // TJP
-			if  (c < 32 || c > 127) {
-				literal  =  "0x" + Integer.toHexString(c);
-			}
-		}
-
-		return literal;
-	}
-
-	/** Convert from an ANTLR string literal found in a grammar file to
-	*  an equivalent string literal in the target language.  For Java, this
-	*  is the translation 'a\n"' -> "a\n\"".  Expect single quotes
-	*  around the incoming literal.  Just flip the quotes and replace
-	*  double quotes with \"
-	*/
-	public String getTargetStringLiteralFromANTLRStringLiteral(CodeGenerator generator,
-															   String literal)
-	{
-		literal = Utils.replace(literal,"\"","\\\"");
-		StringBuffer buf = new StringBuffer(literal);
-		buf.setCharAt(0,'"');
-		buf.setCharAt(literal.length()-1,'"');
-		buf.insert(0,'@');
-		return buf.toString();
-	}
-
-	/** If we have a label, prefix it with the recognizer's name */
-	public String getTokenTypeAsTargetLabel(CodeGenerator generator, int ttype) {
-		String name = generator.grammar.getTokenDisplayName(ttype);
-		// If name is a literal, return the token type instead
-		if ( name.charAt(0)=='\'' ) {
-			return String.valueOf(ttype);
-		}
-		return name;
-		//return generator.grammar.name + Grammar.grammarTypeToFileNameSuffix[generator.grammar.type] + "_" + name;
-		//return super.getTokenTypeAsTargetLabel(generator, ttype);
-		//return this.getTokenTextAndTypeAsTargetLabel(generator, null, ttype);
-	}
-
-	/** Target must be able to override the labels used for token types. Sometimes also depends on the token text.*/
-	public String getTokenTextAndTypeAsTargetLabel(CodeGenerator generator, String text, int tokenType) {
-		String name = generator.grammar.getTokenDisplayName(tokenType);
-		// If name is a literal, return the token type instead
-		if ( name.charAt(0)=='\'' ) {
-			return String.valueOf(tokenType);
-		}
-		String textEquivalent = text == null ? name : text;
-		if (textEquivalent.charAt(0) >= '0' && textEquivalent.charAt(0) <= '9') {
-			return textEquivalent;
-		} else {
-			return generator.grammar.name + Grammar.grammarTypeToFileNameSuffix[generator.grammar.type] + "_" + textEquivalent;
-		}
-	}
-
-}
-
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/Perl5Target.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/Perl5Target.java
deleted file mode 100644
index dc16e39..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/Perl5Target.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.codegen;
-
-import org.antlr.analysis.Label;
-import org.antlr.tool.AttributeScope;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.RuleLabelScope;
-
-public class Perl5Target extends Target {
-    public Perl5Target() {
-        targetCharValueEscape['$'] = "\\$";
-        targetCharValueEscape['@'] = "\\@";
-        targetCharValueEscape['%'] = "\\%";
-        AttributeScope.tokenScope.addAttribute("self", null);
-        RuleLabelScope.predefinedLexerRulePropertiesScope.addAttribute("self", null);
-    }
-
-    public String getTargetCharLiteralFromANTLRCharLiteral(final CodeGenerator generator,
-                                                           final String literal) {
-        final StringBuffer buf = new StringBuffer(10);
-
-        final int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
-        if (c < Label.MIN_CHAR_VALUE) {
-            buf.append("\\x{0000}");
-        } else if (c < targetCharValueEscape.length &&
-                targetCharValueEscape[c] != null) {
-            buf.append(targetCharValueEscape[c]);
-        } else if (Character.UnicodeBlock.of((char) c) ==
-                Character.UnicodeBlock.BASIC_LATIN &&
-                !Character.isISOControl((char) c)) {
-            // normal char
-            buf.append((char) c);
-        } else {
-            // must be something unprintable...use \\uXXXX
-            // turn on the bit above max "\\uFFFF" value so that we pad with zeros
-            // then only take last 4 digits
-            String hex = Integer.toHexString(c | 0x10000).toUpperCase().substring(1, 5);
-            buf.append("\\x{");
-            buf.append(hex);
-            buf.append("}");
-        }
-
-        if (buf.indexOf("\\") == -1) {
-            // no need for interpolation, use single quotes
-            buf.insert(0, '\'');
-            buf.append('\'');
-        } else {
-            // need string interpolation
-            buf.insert(0, '\"');
-            buf.append('\"');
-        }
-
-        return buf.toString();
-    }
-
-    public String encodeIntAsCharEscape(final int v) {
-        final int intValue;
-        if ((v & 0x8000) == 0) {
-            intValue = v;
-        } else {
-            intValue = -(0x10000 - v);
-        }
-
-        return String.valueOf(intValue);
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/PythonTarget.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/PythonTarget.java
deleted file mode 100644
index d087f4c..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/PythonTarget.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
-
-Please excuse my obvious lack of Java experience. The code here is probably
-full of WTFs - though IMHO Java is the Real WTF(TM) here...
-
- */
-
-package org.antlr.codegen;
-
-import org.antlr.runtime.Token;
-import org.antlr.tool.Grammar;
-
-import java.util.ArrayList;
-import java.util.List;
-
-public class PythonTarget extends Target {
-    /** Target must be able to override the labels used for token types */
-    public String getTokenTypeAsTargetLabel(CodeGenerator generator,
-					    int ttype) {
-	// use ints for predefined types;
-	// <invalid> <EOR> <DOWN> <UP>
-	if ( ttype >= 0 && ttype <= 3 ) {
-	    return String.valueOf(ttype);
-	}
-
-	String name = generator.grammar.getTokenDisplayName(ttype);
-
-	// If name is a literal, return the token type instead
-	if ( name.charAt(0)=='\'' ) {
-	    return String.valueOf(ttype);
-	}
-
-	return name;
-    }
-
-    public String getTargetCharLiteralFromANTLRCharLiteral(
-            CodeGenerator generator,
-            String literal) {
-	int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
-	return String.valueOf(c);
-    }
-
-    private List splitLines(String text) {
-		ArrayList l = new ArrayList();
-		int idx = 0;
-
-		while ( true ) {
-			int eol = text.indexOf("\n", idx);
-			if ( eol == -1 ) {
-				l.add(text.substring(idx));
-				break;
-			}
-			else {
-				l.add(text.substring(idx, eol+1));
-				idx = eol+1;
-			}
-		}
-
-		return l;
-    }
-
-    public List postProcessAction(List chunks, Token actionToken) {
-		/* TODO
-		   - check for and report TAB usage
-		 */
-
-		//System.out.println("\n*** Action at " + actionToken.getLine() + ":" + actionToken.getColumn());
-
-		/* First I create a new list of chunks. String chunks are splitted into
-		   lines and some whitespace my be added at the beginning.
-
-		   As a result I get a list of chunks
-		   - where the first line starts at column 0
-		   - where every LF is at the end of a string chunk
-		*/
-
-		List nChunks = new ArrayList();
-		for (int i = 0; i < chunks.size(); i++) {
-			Object chunk = chunks.get(i);
-
-			if ( chunk instanceof String ) {
-				String text = (String)chunks.get(i);
-				if ( nChunks.size() == 0 && actionToken.getCharPositionInLine() >= 0 ) {
-					// first chunk and some 'virtual' WS at beginning
-					// prepend to this chunk
-
-					String ws = "";
-					for ( int j = 0 ; j < actionToken.getCharPositionInLine() ; j++ ) {
-						ws += " ";
-					}
-					text = ws + text;
-				}
-
-				List parts = splitLines(text);
-				for ( int j = 0 ; j < parts.size() ; j++ ) {
-					chunk = parts.get(j);
-					nChunks.add(chunk);
-				}
-			}
-			else {
-				if ( nChunks.size() == 0 && actionToken.getCharPositionInLine() >= 0 ) {
-					// first chunk and some 'virtual' WS at beginning
-					// add as a chunk of its own
-
-					String ws = "";
-					for ( int j = 0 ; j <= actionToken.getCharPositionInLine() ; j++ ) {
-						ws += " ";
-					}
-					nChunks.add(ws);
-				}
-
-				nChunks.add(chunk);
-			}
-		}
-
-		int lineNo = actionToken.getLine();
-		int col = 0;
-
-		// strip trailing empty lines
-		int lastChunk = nChunks.size() - 1;
-		while ( lastChunk > 0
-				&& nChunks.get(lastChunk) instanceof String
-				&& ((String)nChunks.get(lastChunk)).trim().length() == 0 )
-			lastChunk--;
-
-		// string leading empty lines
-		int firstChunk = 0;
-		while ( firstChunk <= lastChunk
-				&& nChunks.get(firstChunk) instanceof String
-				&& ((String)nChunks.get(firstChunk)).trim().length() == 0
-				&& ((String)nChunks.get(firstChunk)).endsWith("\n") ) {
-			lineNo++;
-			firstChunk++;
-		}
-
-		int indent = -1;
-		for ( int i = firstChunk ; i <= lastChunk ; i++ ) {
-			Object chunk = nChunks.get(i);
-
-			//System.out.println(lineNo + ":" + col + " " + quote(chunk.toString()));
-
-			if ( chunk instanceof String ) {
-				String text = (String)chunk;
-
-				if ( col == 0 ) {
-					if ( indent == -1 ) {
-						// first non-blank line
-						// count number of leading whitespaces
-
-						indent = 0;
-						for ( int j = 0; j < text.length(); j++ ) {
-							if ( !Character.isWhitespace(text.charAt(j)) )
-								break;
-			
-							indent++;
-						}
-					}
-
-					if ( text.length() >= indent ) {
-						int j;
-						for ( j = 0; j < indent ; j++ ) {
-							if ( !Character.isWhitespace(text.charAt(j)) ) {
-								// should do real error reporting here...
-								System.err.println("Warning: badly indented line " + lineNo + " in action:");
-								System.err.println(text);
-								break;
-							}
-						}
-
-						nChunks.set(i, text.substring(j));
-					}
-					else if ( text.trim().length() > 0 ) {
-						// should do real error reporting here...
-						System.err.println("Warning: badly indented line " + lineNo + " in action:");
-						System.err.println(text);
-					}
-				}
-
-				if ( text.endsWith("\n") ) {
-					lineNo++;
-					col = 0;
-				}
-				else {
-					col += text.length();
-				}
-			}
-			else {
-				// not really correct, but all I need is col to increment...
-				col += 1;
-			}
-		}
-
-		return nChunks;
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/RubyTarget.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/RubyTarget.java
deleted file mode 100644
index 4506d01..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/RubyTarget.java
+++ /dev/null
@@ -1,482 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2010 Kyle Yetter
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.antlr.tool.Grammar;
-import org.stringtemplate.v4.AttributeRenderer;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STGroup;
-
-import java.io.IOException;
-import java.util.*;
-
-public class RubyTarget extends Target
-{
-    /** A set of ruby keywords which are used to escape labels and method names
-     *  which will cause parse errors in the ruby source
-     */
-    public static final Set rubyKeywords =
-    new HashSet() {
-        {
-        	add( "alias" );     add( "END" );     add( "retry" );
-        	add( "and" );       add( "ensure" );  add( "return" );
-        	add( "BEGIN" );     add( "false" );   add( "self" );
-        	add( "begin" );     add( "for" );     add( "super" );
-        	add( "break" );     add( "if" );      add( "then" );
-        	add( "case" );      add( "in" );      add( "true" );
-        	add( "class" );     add( "module" );  add( "undef" );
-        	add( "def" );       add( "next" );    add( "unless" );
-        	add( "defined?" );  add( "nil" );     add( "until" );
-        	add( "do" );        add( "not" );     add( "when" );
-        	add( "else" );      add( "or" );      add( "while" );
-        	add( "elsif" );     add( "redo" );    add( "yield" );
-        	add( "end" );       add( "rescue" );
-        }
-    };
-
-    public static Map<String, Map<String, Object>> sharedActionBlocks = new HashMap<String, Map<String, Object>>();
-
-    public class RubyRenderer implements AttributeRenderer
-    {
-    	protected String[] rubyCharValueEscape = new String[256];
-
-    	public RubyRenderer() {
-    		for ( int i = 0; i < 16; i++ ) {
-    			rubyCharValueEscape[ i ] = "\\x0" + Integer.toHexString( i );
-    		}
-    		for ( int i = 16; i < 32; i++ ) {
-    			rubyCharValueEscape[ i ] = "\\x" + Integer.toHexString( i );
-    		}
-    		for ( char i = 32; i < 127; i++ ) {
-    			rubyCharValueEscape[ i ] = Character.toString( i );
-    		}
-    		for ( int i = 127; i < 256; i++ ) {
-    			rubyCharValueEscape[ i ] = "\\x" + Integer.toHexString( i );
-    		}
-
-    		rubyCharValueEscape['\n'] = "\\n";
-    		rubyCharValueEscape['\r'] = "\\r";
-    		rubyCharValueEscape['\t'] = "\\t";
-    		rubyCharValueEscape['\b'] = "\\b";
-    		rubyCharValueEscape['\f'] = "\\f";
-    		rubyCharValueEscape['\\'] = "\\\\";
-    		rubyCharValueEscape['"'] = "\\\"";
-    	}
-
-        public String toString( Object o, String formatName, Locale locale ) {
-			if ( formatName==null ) {
-				return o.toString();
-			}
-			
-            String idString = o.toString();
-
-            if ( idString.isEmpty() ) return idString;
-
-            if ( formatName.equals( "snakecase" ) ) {
-                return snakecase( idString );
-            } else if ( formatName.equals( "camelcase" ) ) {
-                return camelcase( idString );
-            } else if ( formatName.equals( "subcamelcase" ) ) {
-                return subcamelcase( idString );
-            } else if ( formatName.equals( "constant" ) ) {
-                return constantcase( idString );
-            } else if ( formatName.equals( "platform" ) ) {
-                return platform( idString );
-            } else if ( formatName.equals( "lexerRule" ) ) {
-                return lexerRule( idString );
-            } else if ( formatName.equals( "constantPath" ) ) {
-            	return constantPath( idString );
-            } else if ( formatName.equals( "rubyString" ) ) {
-                return rubyString( idString );
-            } else if ( formatName.equals( "label" ) ) {
-                return label( idString );
-            } else if ( formatName.equals( "symbol" ) ) {
-                return symbol( idString );
-            } else {
-                throw new IllegalArgumentException( "Unsupported format name" );
-            }
-        }
-
-        /** given an input string, which is presumed
-         * to contain a word, which may potentially be camelcased,
-         * and convert it to snake_case underscore style.
-         *
-         * algorithm --
-         *   iterate through the string with a sliding window 3 chars wide
-         *
-         * example -- aGUIWhatNot
-         *   c   c+1 c+2  action
-         *   a   G        << 'a' << '_'  // a lower-upper word edge
-         *   G   U   I    << 'g'
-         *   U   I   W    << 'w'
-         *   I   W   h    << 'i' << '_'  // the last character in an acronym run of uppers
-         *   W   h        << 'w'
-         *   ... and so on
-         */
-        private String snakecase( String value ) {
-            StringBuilder output_buffer = new StringBuilder();
-            int l = value.length();
-            int cliff = l - 1;
-            char cur;
-            char next;
-            char peek;
-
-            if ( value.isEmpty() ) return value;
-            if ( l == 1 ) return value.toLowerCase();
-
-            for ( int i = 0; i < cliff; i++ ) {
-                cur  = value.charAt( i );
-                next = value.charAt( i + 1 );
-
-                if ( Character.isLetter( cur ) ) {
-                    output_buffer.append( Character.toLowerCase( cur ) );
-
-                    if ( Character.isDigit( next ) || Character.isWhitespace( next ) ) {
-                        output_buffer.append( '_' );
-                    } else if ( Character.isLowerCase( cur ) && Character.isUpperCase( next ) ) {
-                        // at camelcase word edge
-                        output_buffer.append( '_' );
-                    } else if ( ( i < cliff - 1 ) && Character.isUpperCase( cur ) && Character.isUpperCase( next ) ) {
-                        // cur is part of an acronym
-
-                        peek = value.charAt( i + 2 );
-                        if ( Character.isLowerCase( peek ) ) {
-                            /* if next is the start of word (indicated when peek is lowercase)
-                                         then the acronym must be completed by appending an underscore */
-                            output_buffer.append( '_' );
-                        }
-                    }
-                } else if ( Character.isDigit( cur ) ) {
-                    output_buffer.append( cur );
-                    if ( Character.isLetter( next ) ) {
-                        output_buffer.append( '_' );
-                    }
-                } else if ( Character.isWhitespace( cur ) ) {
-                    // do nothing
-                } else {
-                    output_buffer.append( cur );
-                }
-
-            }
-
-            cur  = value.charAt( cliff );
-            if ( ! Character.isWhitespace( cur ) ) {
-                output_buffer.append( Character.toLowerCase( cur ) );
-            }
-
-            return output_buffer.toString();
-        }
-
-        private String constantcase( String value ) {
-            return snakecase( value ).toUpperCase();
-        }
-
-        private String platform( String value ) {
-            return ( "__" + value + "__" );
-        }
-
-        private String symbol( String value ) {
-            if ( value.matches( "[a-zA-Z_]\\w*[\\?\\!\\=]?" ) ) {
-                return ( ":" + value );
-            } else {
-                return ( "%s(" + value + ")" );
-            }
-        }
-
-        private String lexerRule( String value ) {
-					  // System.out.print( "lexerRule( \"" + value + "\") => " );
-            if ( value.equals( "Tokens" ) ) {
-							  // System.out.println( "\"token!\"" );
-                return "token!";
-            } else {
-							  // String result = snakecase( value ) + "!";
-								// System.out.println( "\"" + result + "\"" );
-                return ( snakecase( value ) + "!" );
-            }
-        }
-
-        private String constantPath( String value ) {
-            return value.replaceAll( "\\.", "::" );
-        }
-
-        private String rubyString( String value ) {
-        	StringBuilder output_buffer = new StringBuilder();
-        	int len = value.length();
-
-        	output_buffer.append( '"' );
-        	for ( int i = 0; i < len; i++ ) {
-        		output_buffer.append( rubyCharValueEscape[ value.charAt( i ) ] );
-        	}
-        	output_buffer.append( '"' );
-        	return output_buffer.toString();
-        }
-
-        private String camelcase( String value ) {
-            StringBuilder output_buffer = new StringBuilder();
-            int cliff = value.length();
-            char cur;
-            char next;
-            boolean at_edge = true;
-
-            if ( value.isEmpty() ) return value;
-            if ( cliff == 1 ) return value.toUpperCase();
-
-            for ( int i = 0; i < cliff; i++ ) {
-                cur  = value.charAt( i );
-
-                if ( Character.isWhitespace( cur ) ) {
-                    at_edge = true;
-                    continue;
-                } else if ( cur == '_' ) {
-                    at_edge = true;
-                    continue;
-                } else if ( Character.isDigit( cur ) ) {
-                    output_buffer.append( cur );
-                    at_edge = true;
-                    continue;
-                }
-
-                if ( at_edge ) {
-                    output_buffer.append( Character.toUpperCase( cur ) );
-                    if ( Character.isLetter( cur ) ) at_edge = false;
-                } else {
-                    output_buffer.append( cur );
-                }
-            }
-
-            return output_buffer.toString();
-        }
-
-        private String label( String value ) {
-            if ( rubyKeywords.contains( value ) ) {
-                return platform( value );
-            } else if ( Character.isUpperCase( value.charAt( 0 ) ) &&
-                        ( !value.equals( "FILE" ) ) &&
-                        ( !value.equals( "LINE" ) ) ) {
-                return platform( value );
-            } else if ( value.equals( "FILE" ) ) {
-                return "_FILE_";
-            } else if ( value.equals( "LINE" ) ) {
-                return "_LINE_";
-            } else {
-                return value;
-            }
-        }
-
-        private String subcamelcase( String value ) {
-            value = camelcase( value );
-            if ( value.isEmpty() )
-                return value;
-            Character head = Character.toLowerCase( value.charAt( 0 ) );
-            String tail = value.substring( 1 );
-            return head.toString().concat( tail );
-        }
-    }
-
-    protected void genRecognizerFile(
-    		Tool tool,
-    		CodeGenerator generator,
-    		Grammar grammar,
-    		ST outputFileST
-    ) throws IOException
-    {
-        /*
-            Below is an experimental attempt at providing a few named action blocks
-            that are printed in both lexer and parser files from combined grammars.
-            ANTLR appears to first generate a parser, then generate an independent lexer,
-            and then generate code from that. It keeps the combo/parser grammar object
-            and the lexer grammar object, as well as their respective code generator and
-            target instances, completely independent. So, while a bit hack-ish, this is
-            a solution that should work without having to modify Terrence Parr's
-            core tool code.
-
-            - sharedActionBlocks is a class variable containing a hash map
-            - if this method is called with a combo grammar, and the action map
-              in the grammar contains an entry for the named scope "all",
-              add an entry to sharedActionBlocks mapping the grammar name to
-              the "all" action map.
-            - if this method is called with an `implicit lexer'
-              (one that's extracted from a combo grammar), check to see if
-              there's an entry in sharedActionBlocks for the lexer's grammar name.
-            - if there is an action map entry, place it in the lexer's action map
-            - the recognizerFile template has code to place the
-              "all" actions appropriately
-
-            problems:
-              - This solution assumes that the parser will be generated
-                before the lexer. If that changes at some point, this will
-                not work.
-              - I have not investigated how this works with delegation yet
-
-            Kyle Yetter - March 25, 2010
-        */
-
-        if ( grammar.type == Grammar.COMBINED ) {
-            Map<String, Map<String, Object>> actions = grammar.getActions();
-            if ( actions.containsKey( "all" ) ) {
-                sharedActionBlocks.put( grammar.name, actions.get( "all" ) );
-            }
-        } else if ( grammar.implicitLexer ) {
-            if ( sharedActionBlocks.containsKey( grammar.name ) ) {
-                Map<String, Map<String, Object>> actions = grammar.getActions();
-                actions.put( "all", sharedActionBlocks.get( grammar.name ) );
-            }
-        }
-
-        STGroup group = generator.getTemplates();
-        RubyRenderer renderer = new RubyRenderer();
-        try {
-            group.registerRenderer( Class.forName( "java.lang.String" ), renderer );
-        } catch ( ClassNotFoundException e ) {
-            // this shouldn't happen
-            System.err.println( "ClassNotFoundException: " + e.getMessage() );
-            e.printStackTrace( System.err );
-        }
-        String fileName =
-            generator.getRecognizerFileName( grammar.name, grammar.type );
-        generator.write( outputFileST, fileName );
-    }
-
-    public String getTargetCharLiteralFromANTLRCharLiteral(
-        CodeGenerator generator,
-        String literal
-    )
-    {
-        int code_point = 0;
-        literal = literal.substring( 1, literal.length() - 1 );
-
-        if ( literal.charAt( 0 ) == '\\' ) {
-            switch ( literal.charAt( 1 ) ) {
-                case    '\\':
-                case    '"':
-                case    '\'':
-                    code_point = literal.codePointAt( 1 );
-                    break;
-                case    'n':
-                    code_point = 10;
-                    break;
-                case    'r':
-                    code_point = 13;
-                    break;
-                case    't':
-                    code_point = 9;
-                    break;
-                case    'b':
-                    code_point = 8;
-                    break;
-                case    'f':
-                    code_point = 12;
-                    break;
-                case    'u':    // Assume unnnn
-                    code_point = Integer.parseInt( literal.substring( 2 ), 16 );
-                    break;
-                default:
-                    System.out.println( "1: hey you didn't account for this: \"" + literal + "\"" );
-                    break;
-            }
-        } else if ( literal.length() == 1 ) {
-            code_point = literal.codePointAt( 0 );
-        } else {
-            System.out.println( "2: hey you didn't account for this: \"" + literal + "\"" );
-        }
-
-        return ( "0x" + Integer.toHexString( code_point ) );
-    }
-
-    public int getMaxCharValue( CodeGenerator generator )
-    {
-        // Versions before 1.9 do not support unicode
-        return 0xFF;
-    }
-
-    public String getTokenTypeAsTargetLabel( CodeGenerator generator, int ttype )
-    {
-        String name = generator.grammar.getTokenDisplayName( ttype );
-        // If name is a literal, return the token type instead
-        if ( name.charAt( 0 )=='\'' ) {
-            return generator.grammar.computeTokenNameFromLiteral( ttype, name );
-        }
-        return name;
-    }
-
-    public boolean isValidActionScope( int grammarType, String scope ) {
-        if ( scope.equals( "all" ) )       {
-            return true;
-        }
-        if ( scope.equals( "token" ) )     {
-            return true;
-        }
-        if ( scope.equals( "module" ) )    {
-            return true;
-        }
-        if ( scope.equals( "overrides" ) ) {
-            return true;
-        }
-
-        switch ( grammarType ) {
-        case Grammar.LEXER:
-            if ( scope.equals( "lexer" ) ) {
-                return true;
-            }
-            break;
-        case Grammar.PARSER:
-            if ( scope.equals( "parser" ) ) {
-                return true;
-            }
-            break;
-        case Grammar.COMBINED:
-            if ( scope.equals( "parser" ) ) {
-                return true;
-            }
-            if ( scope.equals( "lexer" ) ) {
-                return true;
-            }
-            break;
-        case Grammar.TREE_PARSER:
-            if ( scope.equals( "treeparser" ) ) {
-                return true;
-            }
-            break;
-        }
-        return false;
-    }
-
-    public String encodeIntAsCharEscape( final int v ) {
-        final int intValue;
-
-        if ( v == 65535 ) {
-            intValue = -1;
-        } else {
-            intValue = v;
-        }
-
-        return String.valueOf( intValue );
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/Target.java b/antlr-3.4/tool/src/main/java/org/antlr/codegen/Target.java
deleted file mode 100644
index 848dd7f..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/codegen/Target.java
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.codegen;
-
-import org.antlr.Tool;
-import org.antlr.analysis.Label;
-import org.antlr.runtime.Token;
-import org.stringtemplate.v4.ST;
-import org.antlr.tool.Grammar;
-
-import java.io.IOException;
-import java.util.List;
-
-/** The code generator for ANTLR can usually be retargeted just by providing
- *  a new X.stg file for language X, however, sometimes the files that must
- *  be generated vary enough that some X-specific functionality is required.
- *  For example, in C, you must generate header files whereas in Java you do not.
- *  Other languages may want to keep DFA separate from the main
- *  generated recognizer file.
- *
- *  The notion of a Code Generator target abstracts out the creation
- *  of the various files.  As new language targets get added to the ANTLR
- *  system, this target class may have to be altered to handle more
- *  functionality.  Eventually, just about all language generation issues
- *  will be expressible in terms of these methods.
- *
- *  If org.antlr.codegen.XTarget class exists, it is used else
- *  Target base class is used.  I am using a superclass rather than an
- *  interface for this target concept because I can add functionality
- *  later without breaking previously written targets (extra interface
- *  methods would force adding dummy functions to all code generator
- *  target classes).
- *
- */
-public class Target {
-
-	/** For pure strings of Java 16-bit unicode char, how can we display
-	 *  it in the target language as a literal.  Useful for dumping
-	 *  predicates and such that may refer to chars that need to be escaped
-	 *  when represented as strings.  Also, templates need to be escaped so
-	 *  that the target language can hold them as a string.
-	 *
-	 *  I have defined (via the constructor) the set of typical escapes,
-	 *  but your Target subclass is free to alter the translated chars or
-	 *  add more definitions.  This is nonstatic so each target can have
-	 *  a different set in memory at same time.
-	 */
-	protected String[] targetCharValueEscape = new String[255];
-
-	public Target() {
-		targetCharValueEscape['\n'] = "\\n";
-		targetCharValueEscape['\r'] = "\\r";
-		targetCharValueEscape['\t'] = "\\t";
-		targetCharValueEscape['\b'] = "\\b";
-		targetCharValueEscape['\f'] = "\\f";
-		targetCharValueEscape['\\'] = "\\\\";
-		targetCharValueEscape['\''] = "\\'";
-		targetCharValueEscape['"'] = "\\\"";
-	}
-
-	protected void genRecognizerFile(Tool tool,
-									 CodeGenerator generator,
-									 Grammar grammar,
-									 ST outputFileST)
-		throws IOException
-	{
-		String fileName =
-			generator.getRecognizerFileName(grammar.name, grammar.type);
-		generator.write(outputFileST, fileName);
-	}
-
-	protected void genRecognizerHeaderFile(Tool tool,
-										   CodeGenerator generator,
-										   Grammar grammar,
-										   ST headerFileST,
-										   String extName) // e.g., ".h"
-		throws IOException
-	{
-		// no header file by default
-	}
-
-	protected void performGrammarAnalysis(CodeGenerator generator,
-										  Grammar grammar)
-	{
-		// Build NFAs from the grammar AST
-		grammar.buildNFA();
-
-		// Create the DFA predictors for each decision
-		grammar.createLookaheadDFAs();
-	}
-
-	/** Is scope in @scope::name {action} valid for this kind of grammar?
-	 *  Targets like C++ may want to allow new scopes like headerfile or
-	 *  some such.  The action names themselves are not policed at the
-	 *  moment so targets can add template actions w/o having to recompile
-	 *  ANTLR.
-	 */
-	public boolean isValidActionScope(int grammarType, String scope) {
-		switch (grammarType) {
-			case Grammar.LEXER :
-				if ( scope.equals("lexer") ) {return true;}
-				break;
-			case Grammar.PARSER :
-				if ( scope.equals("parser") ) {return true;}
-				break;
-			case Grammar.COMBINED :
-				if ( scope.equals("parser") ) {return true;}
-				if ( scope.equals("lexer") ) {return true;}
-				break;
-			case Grammar.TREE_PARSER :
-				if ( scope.equals("treeparser") ) {return true;}
-				break;
-		}
-		return false;
-	}
-
-	/** Target must be able to override the labels used for token types */
-	public String getTokenTypeAsTargetLabel(CodeGenerator generator, int ttype) {
-		String name = generator.grammar.getTokenDisplayName(ttype);
-		// If name is a literal, return the token type instead
-		if ( name.charAt(0)=='\'' ) {
-			return String.valueOf(ttype);
-		}
-		return name;
-	}
-
-	/** Convert from an ANTLR char literal found in a grammar file to
-	 *  an equivalent char literal in the target language.  For most
-	 *  languages, this means leaving 'x' as 'x'.  Actually, we need
-	 *  to escape '\u000A' so that it doesn't get converted to \n by
-	 *  the compiler.  Convert the literal to the char value and then
-	 *  to an appropriate target char literal.
-	 *
-	 *  Expect single quotes around the incoming literal.
-	 */
-	public String getTargetCharLiteralFromANTLRCharLiteral(
-		CodeGenerator generator,
-		String literal)
-	{
-		StringBuffer buf = new StringBuffer();
-		buf.append('\'');
-		int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
-		if ( c<Label.MIN_CHAR_VALUE ) {
-			return "'\u0000'";
-		}
-		if ( c<targetCharValueEscape.length &&
-			 targetCharValueEscape[c]!=null )
-		{
-			buf.append(targetCharValueEscape[c]);
-		}
-		else if ( Character.UnicodeBlock.of((char)c)==
-				  Character.UnicodeBlock.BASIC_LATIN &&
-				  !Character.isISOControl((char)c) )
-		{
-			// normal char
-			buf.append((char)c);
-		}
-		else {
-			// must be something unprintable...use \\uXXXX
-			// turn on the bit above max "\\uFFFF" value so that we pad with zeros
-			// then only take last 4 digits
-			String hex = Integer.toHexString(c|0x10000).toUpperCase().substring(1,5);
-			buf.append("\\u");
-			buf.append(hex);
-		}
-
-		buf.append('\'');
-		return buf.toString();
-	}
-
-	/** Convert from an ANTLR string literal found in a grammar file to
-	 *  an equivalent string literal in the target language.  For Java, this
-	 *  is the translation 'a\n"' -> "a\n\"".  Expect single quotes
-	 *  around the incoming literal.  Just flip the quotes and replace
-	 *  double quotes with \"
-     * 
-     *  Note that we have decided to allow poeple to use '\"' without
-     *  penalty, so we must build the target string in a loop as Utils.replae
-     *  cannot handle both \" and " without a lot of messing around.
-     * 
-	 */
-	public String getTargetStringLiteralFromANTLRStringLiteral(
-		CodeGenerator generator,
-		String literal)
-	{
-        StringBuilder sb = new StringBuilder();
-        StringBuffer is = new StringBuffer(literal);
-        
-        // Opening quote
-        //
-        sb.append('"');
-        
-        for (int i = 1; i < is.length() -1; i++) {
-            if  (is.charAt(i) == '\\') {
-                // Anything escaped is what it is! We assume that
-                // people know how to escape characters correctly. However
-                // we catch anything that does not need an escape in Java (which
-                // is what the default implementation is dealing with and remove 
-                // the escape. The C target does this for instance.
-                //
-                switch (is.charAt(i+1)) {
-                    // Pass through any escapes that Java also needs
-                    //
-                    case    '"':
-                    case    'n':
-                    case    'r':
-                    case    't':
-                    case    'b':
-                    case    'f':
-                    case    '\\':
-                    case    'u':    // Assume unnnn
-                        sb.append('\\');    // Pass the escape through
-                        break;
-                    default:
-                        // Remove the escape by virtue of not adding it here
-                        // Thus \' becomes ' and so on
-                        //
-                        break;
-                }
-                
-                // Go past the \ character
-                //
-                i++;
-            } else {
-                // Chracters that don't need \ in ANTLR 'strings' but do in Java
-                //
-                if (is.charAt(i) == '"') {
-                    // We need to escape " in Java
-                    //
-                    sb.append('\\');
-                }
-            }
-            // Add in the next character, which may have been escaped
-            //
-            sb.append(is.charAt(i));   
-        }
-        
-        // Append closing " and return
-        //
-        sb.append('"');
-        
-		return sb.toString();
-	}
-
-	/** Given a random string of Java unicode chars, return a new string with
-	 *  optionally appropriate quote characters for target language and possibly
-	 *  with some escaped characters.  For example, if the incoming string has
-	 *  actual newline characters, the output of this method would convert them
-	 *  to the two char sequence \n for Java, C, C++, ...  The new string has
-	 *  double-quotes around it as well.  Example String in memory:
-	 *
-	 *     a"[newlinechar]b'c[carriagereturnchar]d[tab]e\f
-	 *
-	 *  would be converted to the valid Java s:
-	 *
-	 *     "a\"\nb'c\rd\te\\f"
-	 *
-	 *  or
-	 *
-	 *     a\"\nb'c\rd\te\\f
-	 *
-	 *  depending on the quoted arg.
-	 */
-	public String getTargetStringLiteralFromString(String s, boolean quoted) {
-		if ( s==null ) {
-			return null;
-		}
-
-		StringBuffer buf = new StringBuffer();
-		if ( quoted ) {
-			buf.append('"');
-		}
-		for (int i=0; i<s.length(); i++) {
-			int c = s.charAt(i);
-			if ( c!='\'' && // don't escape single quotes in strings for java
-				 c<targetCharValueEscape.length &&
-				 targetCharValueEscape[c]!=null )
-			{
-				buf.append(targetCharValueEscape[c]);
-			}
-			else {
-				buf.append((char)c);
-			}
-		}
-		if ( quoted ) {
-			buf.append('"');
-		}
-		return buf.toString();
-	}
-
-	public String getTargetStringLiteralFromString(String s) {
-		return getTargetStringLiteralFromString(s, false);
-	}
-
-	/** Convert long to 0xNNNNNNNNNNNNNNNN by default for spitting out
-	 *  with bitsets.  I.e., convert bytes to hex string.
-	 */
-	public String getTarget64BitStringFromValue(long word) {
-		int numHexDigits = 8*2;
-		StringBuffer buf = new StringBuffer(numHexDigits+2);
-		buf.append("0x");
-		String digits = Long.toHexString(word);
-		digits = digits.toUpperCase();
-		int padding = numHexDigits - digits.length();
-		// pad left with zeros
-		for (int i=1; i<=padding; i++) {
-			buf.append('0');
-		}
-		buf.append(digits);
-		return buf.toString();
-	}
-
-	public String encodeIntAsCharEscape(int v) {
-		if ( v<=127 ) {
-			return "\\"+Integer.toOctalString(v);
-		}
-		String hex = Integer.toHexString(v|0x10000).substring(1,5);
-		return "\\u"+hex;
-	}
-
-	/** Some targets only support ASCII or 8-bit chars/strings.  For example,
-	 *  C++ will probably want to return 0xFF here.
-	 */
-	public int getMaxCharValue(CodeGenerator generator) {
-		return Label.MAX_CHAR_VALUE;
-	}
-
-	/** Give target a chance to do some postprocessing on actions.
-	 *  Python for example will have to fix the indention.
-	 */
-	public List postProcessAction(List chunks, Token actionToken) {
-		return chunks;
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/misc/BitSet.java b/antlr-3.4/tool/src/main/java/org/antlr/misc/BitSet.java
deleted file mode 100644
index ab2928e..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/misc/BitSet.java
+++ /dev/null
@@ -1,574 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.misc;
-
-import org.antlr.analysis.Label;
-import org.antlr.tool.Grammar;
-
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-/**A BitSet to replace java.util.BitSet.
- *
- * Primary differences are that most set operators return new sets
- * as opposed to oring and anding "in place".  Further, a number of
- * operations were added.  I cannot contain a BitSet because there
- * is no way to access the internal bits (which I need for speed)
- * and, because it is final, I cannot subclass to add functionality.
- * Consider defining set degree.  Without access to the bits, I must
- * call a method n times to test the ith bit...ack!
- *
- * Also seems like or() from util is wrong when size of incoming set is bigger
- * than this.bits.length.
- *
- * @author Terence Parr
- */
-public class BitSet implements IntSet, Cloneable {
-    protected final static int BITS = 64;    // number of bits / long
-    protected final static int LOG_BITS = 6; // 2^6 == 64
-
-    /* We will often need to do a mod operator (i mod nbits).  Its
-     * turns out that, for powers of two, this mod operation is
-     * same as (i & (nbits-1)).  Since mod is slow, we use a
-     * precomputed mod mask to do the mod instead.
-     */
-    protected final static int MOD_MASK = BITS - 1;
-
-    /** The actual data bits */
-    protected long bits[];
-
-    /** Construct a bitset of size one word (64 bits) */
-    public BitSet() {
-        this(BITS);
-    }
-
-    /** Construction from a static array of longs */
-    public BitSet(long[] bits_) {
-        bits = bits_;
-    }
-
-    /** Construct a bitset given the size
-     * @param nbits The size of the bitset in bits
-     */
-    public BitSet(int nbits) {
-        bits = new long[((nbits - 1) >> LOG_BITS) + 1];
-    }
-
-    /** or this element into this set (grow as necessary to accommodate) */
-    public void add(int el) {
-        //System.out.println("add("+el+")");
-        int n = wordNumber(el);
-        //System.out.println("word number is "+n);
-        //System.out.println("bits.length "+bits.length);
-        if (n >= bits.length) {
-            growToInclude(el);
-        }
-        bits[n] |= bitMask(el);
-    }
-
-    public void addAll(IntSet set) {
-        if ( set instanceof BitSet ) {
-            this.orInPlace((BitSet)set);
-        }
-		else if ( set instanceof IntervalSet ) {
-			IntervalSet other = (IntervalSet)set;
-			// walk set and add each interval
-			for (Iterator iter = other.intervals.iterator(); iter.hasNext();) {
-				Interval I = (Interval) iter.next();
-				this.orInPlace(BitSet.range(I.a,I.b));
-			}
-		}
-		else {
-			throw new IllegalArgumentException("can't add "+
-											   set.getClass().getName()+
-											   " to BitSet");
-		}
-    }
-
-	public void addAll(int[] elements) {
-		if ( elements==null ) {
-			return;
-		}
-		for (int i = 0; i < elements.length; i++) {
-			int e = elements[i];
-			add(e);
-		}
-	}
-
-	public void addAll(Iterable elements) {
-		if ( elements==null ) {
-			return;
-		}
-		Iterator it = elements.iterator();
-		while (it.hasNext()) {
-			Object o = (Object) it.next();
-			if ( !(o instanceof Integer) ) {
-				throw new IllegalArgumentException();
-			}
-			Integer eI = (Integer)o;
-			add(eI.intValue());
-		}
-		/*
-		int n = elements.size();
-		for (int i = 0; i < n; i++) {
-			Object o = elements.get(i);
-			if ( !(o instanceof Integer) ) {
-				throw new IllegalArgumentException();
-			}
-			Integer eI = (Integer)o;
-			add(eI.intValue());
-		}
-		 */
-	}
-
-    public IntSet and(IntSet a) {
-        BitSet s = (BitSet)this.clone();
-        s.andInPlace((BitSet)a);
-        return s;
-    }
-
-    public void andInPlace(BitSet a) {
-        int min = Math.min(bits.length, a.bits.length);
-        for (int i = min - 1; i >= 0; i--) {
-            bits[i] &= a.bits[i];
-        }
-        // clear all bits in this not present in a (if this bigger than a).
-        for (int i = min; i < bits.length; i++) {
-            bits[i] = 0;
-        }
-    }
-
-    private final static long bitMask(int bitNumber) {
-        int bitPosition = bitNumber & MOD_MASK; // bitNumber mod BITS
-        return 1L << bitPosition;
-    }
-
-    public void clear() {
-        for (int i = bits.length - 1; i >= 0; i--) {
-            bits[i] = 0;
-        }
-    }
-
-    public void clear(int el) {
-        int n = wordNumber(el);
-        if (n >= bits.length) {	// grow as necessary to accommodate
-            growToInclude(el);
-        }
-        bits[n] &= ~bitMask(el);
-    }
-
-    public Object clone() {
-        BitSet s;
-        try {
-            s = (BitSet)super.clone();
-            s.bits = new long[bits.length];
-            System.arraycopy(bits, 0, s.bits, 0, bits.length);
-        }
-        catch (CloneNotSupportedException e) {
-            throw new InternalError();
-        }
-        return s;
-    }
-
-    public int size() {
-        int deg = 0;
-        for (int i = bits.length - 1; i >= 0; i--) {
-            long word = bits[i];
-            if (word != 0L) {
-                for (int bit = BITS - 1; bit >= 0; bit--) {
-                    if ((word & (1L << bit)) != 0) {
-                        deg++;
-                    }
-                }
-            }
-        }
-        return deg;
-    }
-
-    public boolean equals(Object other) {
-        if ( other == null || !(other instanceof BitSet) ) {
-            return false;
-        }
-
-        BitSet otherSet = (BitSet)other;
-
-        int n = Math.min(this.bits.length, otherSet.bits.length);
-
-        // for any bits in common, compare
-        for (int i=0; i<n; i++) {
-            if (this.bits[i] != otherSet.bits[i]) {
-                return false;
-            }
-        }
-
-        // make sure any extra bits are off
-
-        if (this.bits.length > n) {
-            for (int i = n+1; i<this.bits.length; i++) {
-                if (this.bits[i] != 0) {
-                    return false;
-                }
-            }
-        }
-        else if (otherSet.bits.length > n) {
-            for (int i = n+1; i<otherSet.bits.length; i++) {
-                if (otherSet.bits[i] != 0) {
-                    return false;
-                }
-            }
-        }
-
-        return true;
-    }
-
-    /**
-     * Grows the set to a larger number of bits.
-     * @param bit element that must fit in set
-     */
-    public void growToInclude(int bit) {
-        int newSize = Math.max(bits.length << 1, numWordsToHold(bit));
-        long newbits[] = new long[newSize];
-        System.arraycopy(bits, 0, newbits, 0, bits.length);
-        bits = newbits;
-    }
-
-    public boolean member(int el) {
-        int n = wordNumber(el);
-        if (n >= bits.length) return false;
-        return (bits[n] & bitMask(el)) != 0;
-    }
-
-    /** Get the first element you find and return it.  Return Label.INVALID
-     *  otherwise.
-     */
-    public int getSingleElement() {
-        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
-            if (member(i)) {
-                return i;
-            }
-        }
-        return Label.INVALID;
-    }
-
-    public boolean isNil() {
-        for (int i = bits.length - 1; i >= 0; i--) {
-            if (bits[i] != 0) return false;
-        }
-        return true;
-    }
-
-    public IntSet complement() {
-        BitSet s = (BitSet)this.clone();
-        s.notInPlace();
-        return s;
-    }
-
-    public IntSet complement(IntSet set) {
-		if ( set==null ) {
-			return this.complement();
-		}
-        return set.subtract(this);
-    }
-
-    public void notInPlace() {
-        for (int i = bits.length - 1; i >= 0; i--) {
-            bits[i] = ~bits[i];
-        }
-    }
-
-    /** complement bits in the range 0..maxBit. */
-    public void notInPlace(int maxBit) {
-        notInPlace(0, maxBit);
-    }
-
-    /** complement bits in the range minBit..maxBit.*/
-    public void notInPlace(int minBit, int maxBit) {
-        // make sure that we have room for maxBit
-        growToInclude(maxBit);
-        for (int i = minBit; i <= maxBit; i++) {
-            int n = wordNumber(i);
-            bits[n] ^= bitMask(i);
-        }
-    }
-
-    private final int numWordsToHold(int el) {
-        return (el >> LOG_BITS) + 1;
-    }
-
-    public static BitSet of(int el) {
-        BitSet s = new BitSet(el + 1);
-        s.add(el);
-        return s;
-    }
-
-    public static BitSet of(Collection elements) {
-        BitSet s = new BitSet();
-        Iterator iter = elements.iterator();
-        while (iter.hasNext()) {
-            Integer el = (Integer) iter.next();
-            s.add(el.intValue());
-        }
-        return s;
-    }
-
-	public static BitSet of(IntSet set) {
-		if ( set==null ) {
-			return null;
-		}
-
-		if ( set instanceof BitSet ) {
-			return (BitSet)set;
-		}
-		if ( set instanceof IntervalSet ) {
-			BitSet s = new BitSet();
-			s.addAll(set);
-			return s;
-		}
-		throw new IllegalArgumentException("can't create BitSet from "+set.getClass().getName());
-	}
-
-    public static BitSet of(Map elements) {
-        return BitSet.of(elements.keySet());
-    }
-
-	public static BitSet range(int a, int b) {
-		BitSet s = new BitSet(b + 1);
-		for (int i = a; i <= b; i++) {
-			int n = wordNumber(i);
-			s.bits[n] |= bitMask(i);
-		}
-		return s;
-	}
-
-    /** return this | a in a new set */
-    public IntSet or(IntSet a) {
-		if ( a==null ) {
-			return this;
-		}
-        BitSet s = (BitSet)this.clone();
-        s.orInPlace((BitSet)a);
-        return s;
-    }
-
-    public void orInPlace(BitSet a) {
-		if ( a==null ) {
-			return;
-		}
-        // If this is smaller than a, grow this first
-        if (a.bits.length > bits.length) {
-            setSize(a.bits.length);
-        }
-        int min = Math.min(bits.length, a.bits.length);
-        for (int i = min - 1; i >= 0; i--) {
-            bits[i] |= a.bits[i];
-        }
-    }
-
-    // remove this element from this set
-    public void remove(int el) {
-        int n = wordNumber(el);
-        if (n >= bits.length) {
-            growToInclude(el);
-        }
-        bits[n] &= ~bitMask(el);
-    }
-
-    /**
-     * Sets the size of a set.
-     * @param nwords how many words the new set should be
-     */
-    private void setSize(int nwords) {
-        long newbits[] = new long[nwords];
-        int n = Math.min(nwords, bits.length);
-        System.arraycopy(bits, 0, newbits, 0, n);
-        bits = newbits;
-    }
-
-    public int numBits() {
-        return bits.length << LOG_BITS; // num words * bits per word
-    }
-
-    /** return how much space is being used by the bits array not
-     *  how many actually have member bits on.
-     */
-    public int lengthInLongWords() {
-        return bits.length;
-    }
-
-    /**Is this contained within a? */
-    public boolean subset(BitSet a) {
-        if (a == null) return false;
-        return this.and(a).equals(this);
-    }
-
-    /**Subtract the elements of 'a' from 'this' in-place.
-     * Basically, just turn off all bits of 'this' that are in 'a'.
-     */
-    public void subtractInPlace(BitSet a) {
-        if (a == null) return;
-        // for all words of 'a', turn off corresponding bits of 'this'
-        for (int i = 0; i < bits.length && i < a.bits.length; i++) {
-            bits[i] &= ~a.bits[i];
-        }
-    }
-
-    public IntSet subtract(IntSet a) {
-        if (a == null || !(a instanceof BitSet)) return null;
-
-        BitSet s = (BitSet)this.clone();
-        s.subtractInPlace((BitSet)a);
-        return s;
-    }
-
-	public List toList() {
-		throw new NoSuchMethodError("BitSet.toList() unimplemented");
-	}
-
-    public int[] toArray() {
-        int[] elems = new int[size()];
-        int en = 0;
-        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
-            if (member(i)) {
-                elems[en++] = i;
-            }
-        }
-        return elems;
-    }
-
-    public long[] toPackedArray() {
-        return bits;
-    }
-
-    public String toString() {
-        return toString(null);
-    }
-
-    /** Transform a bit set into a string by formatting each element as an integer
-     * separator The string to put in between elements
-     * @return A commma-separated list of values
-     */
-    public String toString(Grammar g) {
-        StringBuffer buf = new StringBuffer();
-        String separator = ",";
-		boolean havePrintedAnElement = false;
-		buf.append('{');
-
-        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
-            if (member(i)) {
-                if (i > 0 && havePrintedAnElement ) {
-                    buf.append(separator);
-                }
-                if ( g!=null ) {
-                    buf.append(g.getTokenDisplayName(i));
-                }
-                else {
-                    buf.append(i);
-                }
-				havePrintedAnElement = true;
-            }
-        }
-		buf.append('}');
-        return buf.toString();
-    }
-
-    /**Create a string representation where instead of integer elements, the
-     * ith element of vocabulary is displayed instead.  Vocabulary is a Vector
-     * of Strings.
-     * separator The string to put in between elements
-     * @return A commma-separated list of character constants.
-     */
-    public String toString(String separator, List vocabulary) {
-        if (vocabulary == null) {
-            return toString(null);
-        }
-        String str = "";
-        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
-            if (member(i)) {
-                if (str.length() > 0) {
-                    str += separator;
-                }
-                if (i >= vocabulary.size()) {
-                    str += "'" + (char)i + "'";
-                }
-                else if (vocabulary.get(i) == null) {
-                    str += "'" + (char)i + "'";
-                }
-                else {
-                    str += (String)vocabulary.get(i);
-                }
-            }
-        }
-        return str;
-    }
-
-    /**
-     * Dump a comma-separated list of the words making up the bit set.
-     * Split each 64 bit number into two more manageable 32 bit numbers.
-     * This generates a comma-separated list of C++-like unsigned long constants.
-     */
-    public String toStringOfHalfWords() {
-        StringBuffer s = new StringBuffer();
-        for (int i = 0; i < bits.length; i++) {
-            if (i != 0) s.append(", ");
-            long tmp = bits[i];
-            tmp &= 0xFFFFFFFFL;
-            s.append(tmp);
-			s.append("UL");
-            s.append(", ");
-            tmp = bits[i] >>> 32;
-            tmp &= 0xFFFFFFFFL;
-			s.append(tmp);
-			s.append("UL");
-        }
-		return s.toString();
-    }
-
-    /**
-     * Dump a comma-separated list of the words making up the bit set.
-     * This generates a comma-separated list of Java-like long int constants.
-     */
-    public String toStringOfWords() {
-		StringBuffer s = new StringBuffer();
-        for (int i = 0; i < bits.length; i++) {
-            if (i != 0) s.append(", ");
-            s.append(bits[i]);
-			s.append("L");
-        }
-        return s.toString();
-    }
-
-    public String toStringWithRanges() {
-        return toString();
-    }
-
-    private final static int wordNumber(int bit) {
-        return bit >> LOG_BITS; // bit / BITS
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/misc/Graph.java b/antlr-3.4/tool/src/main/java/org/antlr/misc/Graph.java
deleted file mode 100644
index 74962e5..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/misc/Graph.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.misc;
-
-import java.util.*;
-
-/** A generic graph with edges; Each node as a single Object payload.
- *  This is only used to topologically sort a list of file dependencies
- *  at the moment.
- */
-public class Graph {
-
-    public static class Node {
-        Object payload;
-        List<Node> edges; // points at which nodes?
-
-        public Node(Object payload) { this.payload = payload; }
-
-        public void addEdge(Node n) {
-            if ( edges==null ) edges = new ArrayList<Node>();
-            if ( !edges.contains(n) ) edges.add(n);
-        }
-
-        public String toString() { return payload.toString(); }
-    }
-
-    /** Map from node payload to node containing it */
-    protected Map<Object,Node> nodes = new HashMap<Object,Node>();
-
-    public void addEdge(Object a, Object b) {
-        //System.out.println("add edge "+a+" to "+b);
-        Node a_node = getNode(a);
-        Node b_node = getNode(b);
-        a_node.addEdge(b_node);
-    }
-
-    protected Node getNode(Object a) {
-        Node existing = nodes.get(a);
-        if ( existing!=null ) return existing;
-        Node n = new Node(a);
-        nodes.put(a, n);
-        return n;
-    }
-
-    /** DFS-based topological sort.  A valid sort is the reverse of
-     *  the post-order DFA traversal.  Amazingly simple but true.
-     *  For sorting, I'm not following convention here since ANTLR
-     *  needs the opposite.  Here's what I assume for sorting:
-     *
-     *    If there exists an edge u -> v then u depends on v and v
-     *    must happen before u.
-     *
-     *  So if this gives nonreversed postorder traversal, I get the order
-     *  I want.
-     */
-    public List<Object> sort() {
-        Set<Node> visited = new OrderedHashSet<Node>();
-        ArrayList<Object> sorted = new ArrayList<Object>();
-        while ( visited.size() < nodes.size() ) {
-            // pick any unvisited node, n
-            Node n = null;
-            for (Iterator it = nodes.values().iterator(); it.hasNext();) {
-                n = (Node)it.next();
-                if ( !visited.contains(n) ) break;
-            }
-            DFS(n, visited, sorted);
-        }
-        return sorted;
-    }
-
-    public void DFS(Node n, Set<Node> visited, ArrayList<Object> sorted) {
-        if ( visited.contains(n) ) return;
-        visited.add(n);
-        if ( n.edges!=null ) {
-            for (Iterator it = n.edges.iterator(); it.hasNext();) {
-                Node target = (Node) it.next();
-                DFS(target, visited, sorted);
-            }
-        }
-        sorted.add(n.payload);
-    }
-}
\ No newline at end of file
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/misc/IntArrayList.java b/antlr-3.4/tool/src/main/java/org/antlr/misc/IntArrayList.java
deleted file mode 100644
index ab319dd..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/misc/IntArrayList.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.misc;
-
-import java.util.AbstractList;
-
-/** An ArrayList based upon int members.  Not quite a real implementation of a
- *  modifiable list as I don't do, for example, add(index,element).
- *  TODO: unused?
- */
-public class IntArrayList extends AbstractList implements Cloneable {
-	private static final int DEFAULT_CAPACITY = 10;
-	protected int n = 0;
-	protected int[] elements = null;
-
-	public IntArrayList() {
-		this(DEFAULT_CAPACITY);
-	}
-
-	public IntArrayList(int initialCapacity) {
-		elements = new int[initialCapacity];
-	}
-
-	/** Set the ith element.  Like ArrayList, this does NOT affect size. */
-	public int set(int i, int newValue) {
-		if ( i>=n ) {
-			setSize(i); // unlike definition of set in ArrayList, set size
-		}
-		int v = elements[i];
-		elements[i] = newValue;
-		return v;
-	}
-
-	public boolean add(int o) {
-		if ( n>=elements.length ) {
-			grow();
-		}
-		elements[n] = o;
-		n++;
-		return true;
-	}
-
-	public void setSize(int newSize) {
-		if ( newSize>=elements.length ) {
-            ensureCapacity(newSize);
-		}
-		n = newSize;
-	}
-
-	protected void grow() {
-		ensureCapacity((elements.length * 3)/2 + 1);
-	}
-
-	public boolean contains(int v) {
-		for (int i = 0; i < n; i++) {
-			int element = elements[i];
-			if ( element == v ) {
-				return true;
-			}
-		}
-		return false;
-	}
-
-	public void ensureCapacity(int newCapacity) {
-		int oldCapacity = elements.length;
-		if (n>=oldCapacity) {
-			int oldData[] = elements;
-			elements = new int[newCapacity];
-			System.arraycopy(oldData, 0, elements, 0, n);
-		}
-	}
-
-	public Object get(int i) {
-		return Utils.integer(element(i));
-	}
-
-	public int element(int i) {
-		return elements[i];
-	}
-
-	public int[] elements() {
-		int[] a = new int[n];
-		System.arraycopy(elements, 0, a, 0, n);
-		return a;
-	}
-
-	public int size() {
-		return n;
-	}
-
-    public int capacity() {
-        return elements.length;
-    }
-
-	public boolean equals(Object o) {
-        if ( o==null ) {
-            return false;
-        }
-        IntArrayList other = (IntArrayList)o;
-        if ( this.size()!=other.size() ) {
-            return false;
-        }
-		for (int i = 0; i < n; i++) {
-			if ( elements[i] != other.elements[i] ) {
-				return false;
-			}
-		}
-		return true;
-	}
-
-    public Object clone() throws CloneNotSupportedException {
-		IntArrayList a = (IntArrayList)super.clone();
-        a.n = this.n;
-        System.arraycopy(this.elements, 0, a.elements, 0, this.elements.length);
-        return a;
-    }
-
-	public String toString() {
-		StringBuffer buf = new StringBuffer();
-		for (int i = 0; i < n; i++) {
-			if ( i>0 ) {
-				buf.append(", ");
-			}
-			buf.append(elements[i]);
-		}
-		return buf.toString();
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/misc/IntSet.java b/antlr-3.4/tool/src/main/java/org/antlr/misc/IntSet.java
deleted file mode 100644
index 3526f26..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/misc/IntSet.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.misc;
-
-import org.antlr.tool.Grammar;
-
-import java.util.List;
-
-/** A generic set of ints that has an efficient implementation, BitSet,
- *  which is a compressed bitset and is useful for ints that
- *  are small, for example less than 500 or so, and w/o many ranges.  For
- *  ranges with large values like unicode char sets, this is not very efficient.
- *  Consider using IntervalSet.  Not all methods in IntervalSet are implemented.
- *
- *  @see org.antlr.misc.BitSet
- *  @see org.antlr.misc.IntervalSet
- */
-public interface IntSet {
-    /** Add an element to the set */
-    void add(int el);
-
-    /** Add all elements from incoming set to this set.  Can limit
-     *  to set of its own type.
-     */
-    void addAll(IntSet set);
-
-    /** Return the intersection of this set with the argument, creating
-     *  a new set.
-     */
-    IntSet and(IntSet a);
-
-    IntSet complement(IntSet elements);
-
-    IntSet or(IntSet a);
-
-    IntSet subtract(IntSet a);
-
-    /** Return the size of this set (not the underlying implementation's
-     *  allocated memory size, for example).
-     */
-    int size();
-
-    boolean isNil();
-
-    boolean equals(Object obj);
-
-    int getSingleElement();
-
-    boolean member(int el);
-
-    /** remove this element from this set */
-    void remove(int el);
-
-    List toList();
-
-    String toString();
-
-    String toString(Grammar g);
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/misc/Interval.java b/antlr-3.4/tool/src/main/java/org/antlr/misc/Interval.java
deleted file mode 100644
index 5b2410b..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/misc/Interval.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.misc;
-
-/** An immutable inclusive interval a..b */
-public class Interval {
-	public static final int INTERVAL_POOL_MAX_VALUE = 1000;
-
-	static Interval[] cache = new Interval[INTERVAL_POOL_MAX_VALUE+1];
-
-	public int a;
-	public int b;
-
-	public static int creates = 0;
-	public static int misses = 0;
-	public static int hits = 0;
-	public static int outOfRange = 0;
-
-	public Interval(int a, int b) { this.a=a; this.b=b; }
-
-	/** Interval objects are used readonly so share all with the
-	 *  same single value a==b up to some max size.  Use an array as a perfect hash.
-	 *  Return shared object for 0..INTERVAL_POOL_MAX_VALUE or a new
-	 *  Interval object with a..a in it.  On Java.g, 218623 IntervalSets
-	 *  have a..a (set with 1 element).
-	 */
-	public static Interval create(int a, int b) {
-		//return new Interval(a,b);
-		// cache just a..a
-		if ( a!=b || a<0 || a>INTERVAL_POOL_MAX_VALUE ) {
-			return new Interval(a,b);
-		}
-		if ( cache[a]==null ) {
-			cache[a] = new Interval(a,a);
-		}
-		return cache[a];
-	}
-
-	public boolean equals(Object o) {
-		if ( o==null ) {
-			return false;
-		}
-		Interval other = (Interval)o;
-		return this.a==other.a && this.b==other.b;
-	}
-
-	/** Does this start completely before other? Disjoint */
-	public boolean startsBeforeDisjoint(Interval other) {
-		return this.a<other.a && this.b<other.a;
-	}
-
-	/** Does this start at or before other? Nondisjoint */
-	public boolean startsBeforeNonDisjoint(Interval other) {
-		return this.a<=other.a && this.b>=other.a;
-	}
-
-	/** Does this.a start after other.b? May or may not be disjoint */
-	public boolean startsAfter(Interval other) { return this.a>other.a; }
-
-	/** Does this start completely after other? Disjoint */
-	public boolean startsAfterDisjoint(Interval other) {
-		return this.a>other.b;
-	}
-
-	/** Does this start after other? NonDisjoint */
-	public boolean startsAfterNonDisjoint(Interval other) {
-		return this.a>other.a && this.a<=other.b; // this.b>=other.b implied
-	}
-
-	/** Are both ranges disjoint? I.e., no overlap? */
-	public boolean disjoint(Interval other) {
-		return startsBeforeDisjoint(other) || startsAfterDisjoint(other);
-	}
-
-	/** Are two intervals adjacent such as 0..41 and 42..42? */
-	public boolean adjacent(Interval other) {
-		return this.a == other.b+1 || this.b == other.a-1;
-	}
-
-	public boolean properlyContains(Interval other) {
-		return other.a >= this.a && other.b <= this.b;
-	}
-
-	/** Return the interval computed from combining this and other */
-	public Interval union(Interval other) {
-		return Interval.create(Math.min(a,other.a), Math.max(b,other.b));
-	}
-
-	/** Return the interval in common between this and o */
-	public Interval intersection(Interval other) {
-		return Interval.create(Math.max(a,other.a), Math.min(b,other.b));
-	}
-
-	/** Return the interval with elements from this not in other;
-	 *  other must not be totally enclosed (properly contained)
-	 *  within this, which would result in two disjoint intervals
-	 *  instead of the single one returned by this method.
-	 */
-	public Interval differenceNotProperlyContained(Interval other) {
-		Interval diff = null;
-		// other.a to left of this.a (or same)
-		if ( other.startsBeforeNonDisjoint(this) ) {
-			diff = Interval.create(Math.max(this.a,other.b+1),
-								   this.b);
-		}
-
-		// other.a to right of this.a
-		else if ( other.startsAfterNonDisjoint(this) ) {
-			diff = Interval.create(this.a, other.a-1);
-		}
-		return diff;
-	}
-
-	public String toString() {
-		return a+".."+b;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/misc/IntervalSet.java b/antlr-3.4/tool/src/main/java/org/antlr/misc/IntervalSet.java
deleted file mode 100644
index dee54fe..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/misc/IntervalSet.java
+++ /dev/null
@@ -1,692 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.misc;
-
-import org.antlr.analysis.Label;
-import org.antlr.tool.Grammar;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.ListIterator;
-
-/** A set of integers that relies on ranges being common to do
- *  "run-length-encoded" like compression (if you view an IntSet like
- *  a BitSet with runs of 0s and 1s).  Only ranges are recorded so that
- *  a few ints up near value 1000 don't cause massive bitsets, just two
- *  integer intervals.
- *
- *  element values may be negative.  Useful for sets of EPSILON and EOF.
- *
- *  0..9 char range is index pair ['\u0030','\u0039'].
- *  Multiple ranges are encoded with multiple index pairs.  Isolated
- *  elements are encoded with an index pair where both intervals are the same.
- *
- *  The ranges are ordered and disjoint so that 2..6 appears before 101..103.
- */
-public class IntervalSet implements IntSet {
-	public static final IntervalSet COMPLETE_SET = IntervalSet.of(0,Label.MAX_CHAR_VALUE);
-
-	/** The list of sorted, disjoint intervals. */
-    protected List<Interval> intervals;
-
-	/** Create a set with no elements */
-    public IntervalSet() {
-        intervals = new ArrayList<Interval>(2); // most sets are 1 or 2 elements
-    }
-
-	public IntervalSet(List<Interval> intervals) {
-		this.intervals = intervals;
-	}
-
-	/** Create a set with a single element, el. */
-    public static IntervalSet of(int a) {
-		IntervalSet s = new IntervalSet();
-        s.add(a);
-        return s;
-    }
-
-    /** Create a set with all ints within range [a..b] (inclusive) */
-    public static IntervalSet of(int a, int b) {
-        IntervalSet s = new IntervalSet();
-        s.add(a,b);
-        return s;
-    }
-
-    /** Add a single element to the set.  An isolated element is stored
-     *  as a range el..el.
-     */
-    public void add(int el) {
-        add(el,el);
-    }
-
-    /** Add interval; i.e., add all integers from a to b to set.
-     *  If b<a, do nothing.
-     *  Keep list in sorted order (by left range value).
-     *  If overlap, combine ranges.  For example,
-     *  If this is {1..5, 10..20}, adding 6..7 yields
-     *  {1..5, 6..7, 10..20}.  Adding 4..8 yields {1..8, 10..20}.
-     */
-    public void add(int a, int b) {
-        add(Interval.create(a,b));
-    }
-
-	// copy on write so we can cache a..a intervals and sets of that
-	protected void add(Interval addition) {
-		//System.out.println("add "+addition+" to "+intervals.toString());
-		if ( addition.b<addition.a ) {
-			return;
-		}
-		// find position in list
-		// Use iterators as we modify list in place
-		for (ListIterator iter = intervals.listIterator(); iter.hasNext();) {
-			Interval r = (Interval) iter.next();
-			if ( addition.equals(r) ) {
-				return;
-			}
-			if ( addition.adjacent(r) || !addition.disjoint(r) ) {
-				// next to each other, make a single larger interval
-				Interval bigger = addition.union(r);
-				iter.set(bigger);
-				// make sure we didn't just create an interval that
-				// should be merged with next interval in list
-				if ( iter.hasNext() ) {
-					Interval next = (Interval) iter.next();
-					if ( bigger.adjacent(next)||!bigger.disjoint(next) ) {
-						// if we bump up against or overlap next, merge
-						iter.remove();   // remove this one
-						iter.previous(); // move backwards to what we just set
-						iter.set(bigger.union(next)); // set to 3 merged ones
-					}
-				}
-				return;
-			}
-			if ( addition.startsBeforeDisjoint(r) ) {
-				// insert before r
-				iter.previous();
-				iter.add(addition);
-				return;
-			}
-			// if disjoint and after r, a future iteration will handle it
-		}
-		// ok, must be after last interval (and disjoint from last interval)
-		// just add it
-		intervals.add(addition);
-	}
-
-	/*
-	protected void add(Interval addition) {
-        //System.out.println("add "+addition+" to "+intervals.toString());
-        if ( addition.b<addition.a ) {
-            return;
-        }
-        // find position in list
-        //for (ListIterator iter = intervals.listIterator(); iter.hasNext();) {
-		int n = intervals.size();
-		for (int i=0; i<n; i++) {
-			Interval r = (Interval)intervals.get(i);
-            if ( addition.equals(r) ) {
-                return;
-            }
-            if ( addition.adjacent(r) || !addition.disjoint(r) ) {
-                // next to each other, make a single larger interval
-                Interval bigger = addition.union(r);
-				intervals.set(i, bigger);
-                // make sure we didn't just create an interval that
-                // should be merged with next interval in list
-				if ( (i+1)<n ) {
-					i++;
-					Interval next = (Interval)intervals.get(i);
-                    if ( bigger.adjacent(next)||!bigger.disjoint(next) ) {
-                        // if we bump up against or overlap next, merge
-						intervals.remove(i); // remove next one
-						i--;
-						intervals.set(i, bigger.union(next)); // set to 3 merged ones
-                    }
-                }
-                return;
-            }
-            if ( addition.startsBeforeDisjoint(r) ) {
-                // insert before r
-				intervals.add(i, addition);
-                return;
-            }
-            // if disjoint and after r, a future iteration will handle it
-        }
-        // ok, must be after last interval (and disjoint from last interval)
-        // just add it
-        intervals.add(addition);
-    }
-*/
-
-	public void addAll(IntSet set) {
-		if ( set==null ) {
-			return;
-		}
-        if ( !(set instanceof IntervalSet) ) {
-            throw new IllegalArgumentException("can't add non IntSet ("+
-											   set.getClass().getName()+
-											   ") to IntervalSet");
-        }
-        IntervalSet other = (IntervalSet)set;
-        // walk set and add each interval
-		int n = other.intervals.size();
-		for (int i = 0; i < n; i++) {
-			Interval I = (Interval) other.intervals.get(i);
-			this.add(I.a,I.b);
-		}
-    }
-
-    public IntSet complement(int minElement, int maxElement) {
-        return this.complement(IntervalSet.of(minElement,maxElement));
-    }
-
-    /** Given the set of possible values (rather than, say UNICODE or MAXINT),
-     *  return a new set containing all elements in vocabulary, but not in
-     *  this.  The computation is (vocabulary - this).
-     *
-     *  'this' is assumed to be either a subset or equal to vocabulary.
-     */
-    public IntSet complement(IntSet vocabulary) {
-        if ( vocabulary==null ) {
-            return null; // nothing in common with null set
-        }
-		if ( !(vocabulary instanceof IntervalSet ) ) {
-			throw new IllegalArgumentException("can't complement with non IntervalSet ("+
-											   vocabulary.getClass().getName()+")");
-		}
-		IntervalSet vocabularyIS = ((IntervalSet)vocabulary);
-		int maxElement = vocabularyIS.getMaxElement();
-
-		IntervalSet compl = new IntervalSet();
-		int n = intervals.size();
-		if ( n ==0 ) {
-			return compl;
-		}
-		Interval first = (Interval)intervals.get(0);
-		// add a range from 0 to first.a constrained to vocab
-		if ( first.a > 0 ) {
-			IntervalSet s = IntervalSet.of(0, first.a-1);
-			IntervalSet a = (IntervalSet)s.and(vocabularyIS);
-			compl.addAll(a);
-		}
-		for (int i=1; i<n; i++) { // from 2nd interval .. nth
-			Interval previous = (Interval)intervals.get(i-1);
-			Interval current = (Interval)intervals.get(i);
-			IntervalSet s = IntervalSet.of(previous.b+1, current.a-1);
-			IntervalSet a = (IntervalSet)s.and(vocabularyIS);
-			compl.addAll(a);
-		}
-		Interval last = (Interval)intervals.get(n -1);
-		// add a range from last.b to maxElement constrained to vocab
-		if ( last.b < maxElement ) {
-			IntervalSet s = IntervalSet.of(last.b+1, maxElement);
-			IntervalSet a = (IntervalSet)s.and(vocabularyIS);
-			compl.addAll(a);
-		}
-		return compl;
-    }
-
-	/** Compute this-other via this&~other.
-	 *  Return a new set containing all elements in this but not in other.
-	 *  other is assumed to be a subset of this;
-     *  anything that is in other but not in this will be ignored.
-	 */
-	public IntSet subtract(IntSet other) {
-		// assume the whole unicode range here for the complement
-		// because it doesn't matter.  Anything beyond the max of this' set
-		// will be ignored since we are doing this & ~other.  The intersection
-		// will be empty.  The only problem would be when this' set max value
-		// goes beyond MAX_CHAR_VALUE, but hopefully the constant MAX_CHAR_VALUE
-		// will prevent this.
-		return this.and(((IntervalSet)other).complement(COMPLETE_SET));
-	}
-
-	/** return a new set containing all elements in this but not in other.
-     *  Intervals may have to be broken up when ranges in this overlap
-     *  with ranges in other.  other is assumed to be a subset of this;
-     *  anything that is in other but not in this will be ignored.
-	 *
-	 *  Keep around, but 10-20-2005, I decided to make complement work w/o
-	 *  subtract and so then subtract can simply be a&~b
-	 *
-    public IntSet subtract(IntSet other) {
-        if ( other==null || !(other instanceof IntervalSet) ) {
-            return null; // nothing in common with null set
-        }
-
-        IntervalSet diff = new IntervalSet();
-
-        // iterate down both interval lists
-        ListIterator thisIter = this.intervals.listIterator();
-        ListIterator otherIter = ((IntervalSet)other).intervals.listIterator();
-        Interval mine=null;
-        Interval theirs=null;
-        if ( thisIter.hasNext() ) {
-            mine = (Interval)thisIter.next();
-        }
-        if ( otherIter.hasNext() ) {
-            theirs = (Interval)otherIter.next();
-        }
-        while ( mine!=null ) {
-            //System.out.println("mine="+mine+", theirs="+theirs);
-            // CASE 1: nothing in theirs removes a chunk from mine
-            if ( theirs==null || mine.disjoint(theirs) ) {
-                // SUBCASE 1a: finished traversing theirs; keep adding mine now
-                if ( theirs==null ) {
-                    // add everything in mine to difference since theirs done
-                    diff.add(mine);
-                    mine = null;
-                    if ( thisIter.hasNext() ) {
-                        mine = (Interval)thisIter.next();
-                    }
-                }
-                else {
-                    // SUBCASE 1b: mine is completely to the left of theirs
-                    // so we can add to difference; move mine, but not theirs
-                    if ( mine.startsBeforeDisjoint(theirs) ) {
-                        diff.add(mine);
-                        mine = null;
-                        if ( thisIter.hasNext() ) {
-                            mine = (Interval)thisIter.next();
-                        }
-                    }
-                    // SUBCASE 1c: theirs is completely to the left of mine
-                    else {
-                        // keep looking in theirs
-                        theirs = null;
-                        if ( otherIter.hasNext() ) {
-                            theirs = (Interval)otherIter.next();
-                        }
-                    }
-                }
-            }
-            else {
-                // CASE 2: theirs breaks mine into two chunks
-                if ( mine.properlyContains(theirs) ) {
-                    // must add two intervals: stuff to left and stuff to right
-                    diff.add(mine.a, theirs.a-1);
-                    // don't actually add stuff to right yet as next 'theirs'
-                    // might overlap with it
-                    // The stuff to the right might overlap with next "theirs".
-                    // so it is considered next
-                    Interval right = new Interval(theirs.b+1, mine.b);
-                    mine = right;
-                    // move theirs forward
-                    theirs = null;
-                    if ( otherIter.hasNext() ) {
-                        theirs = (Interval)otherIter.next();
-                    }
-                }
-
-                // CASE 3: theirs covers mine; nothing to add to diff
-                else if ( theirs.properlyContains(mine) ) {
-                    // nothing to add, theirs forces removal totally of mine
-                    // just move mine looking for an overlapping interval
-                    mine = null;
-                    if ( thisIter.hasNext() ) {
-                        mine = (Interval)thisIter.next();
-                    }
-                }
-
-                // CASE 4: non proper overlap
-                else {
-                    // overlap, but not properly contained
-                    diff.add(mine.differenceNotProperlyContained(theirs));
-                    // update iterators
-                    boolean moveTheirs = true;
-                    if ( mine.startsBeforeNonDisjoint(theirs) ||
-                         theirs.b > mine.b )
-                    {
-                        // uh oh, right of theirs extends past right of mine
-                        // therefore could overlap with next of mine so don't
-                        // move theirs iterator yet
-                        moveTheirs = false;
-                    }
-                    // always move mine
-                    mine = null;
-                    if ( thisIter.hasNext() ) {
-                        mine = (Interval)thisIter.next();
-                    }
-                    if ( moveTheirs ) {
-                        theirs = null;
-                        if ( otherIter.hasNext() ) {
-                            theirs = (Interval)otherIter.next();
-                        }
-                    }
-                }
-            }
-        }
-        return diff;
-    }
-	 */
-
-    /** TODO: implement this! */
-	public IntSet or(IntSet a) {
-		IntervalSet o = new IntervalSet();
-		o.addAll(this);
-		o.addAll(a);
-		//throw new NoSuchMethodError();
-		return o;
-	}
-
-    /** Return a new set with the intersection of this set with other.  Because
-     *  the intervals are sorted, we can use an iterator for each list and
-     *  just walk them together.  This is roughly O(min(n,m)) for interval
-     *  list lengths n and m.
-     */
-	public IntSet and(IntSet other) {
-		if ( other==null ) { //|| !(other instanceof IntervalSet) ) {
-			return null; // nothing in common with null set
-		}
-
-		ArrayList myIntervals = (ArrayList)this.intervals;
-		ArrayList theirIntervals = (ArrayList)((IntervalSet)other).intervals;
-		IntervalSet intersection = null;
-		int mySize = myIntervals.size();
-		int theirSize = theirIntervals.size();
-		int i = 0;
-		int j = 0;
-		// iterate down both interval lists looking for nondisjoint intervals
-		while ( i<mySize && j<theirSize ) {
-			Interval mine = (Interval)myIntervals.get(i);
-			Interval theirs = (Interval)theirIntervals.get(j);
-			//System.out.println("mine="+mine+" and theirs="+theirs);
-			if ( mine.startsBeforeDisjoint(theirs) ) {
-				// move this iterator looking for interval that might overlap
-				i++;
-			}
-			else if ( theirs.startsBeforeDisjoint(mine) ) {
-				// move other iterator looking for interval that might overlap
-				j++;
-			}
-			else if ( mine.properlyContains(theirs) ) {
-				// overlap, add intersection, get next theirs
-				if ( intersection==null ) {
-					intersection = new IntervalSet();
-				}
-				intersection.add(mine.intersection(theirs));
-				j++;
-			}
-			else if ( theirs.properlyContains(mine) ) {
-				// overlap, add intersection, get next mine
-				if ( intersection==null ) {
-					intersection = new IntervalSet();
-				}
-				intersection.add(mine.intersection(theirs));
-				i++;
-			}
-			else if ( !mine.disjoint(theirs) ) {
-				// overlap, add intersection
-				if ( intersection==null ) {
-					intersection = new IntervalSet();
-				}
-				intersection.add(mine.intersection(theirs));
-				// Move the iterator of lower range [a..b], but not
-				// the upper range as it may contain elements that will collide
-				// with the next iterator. So, if mine=[0..115] and
-				// theirs=[115..200], then intersection is 115 and move mine
-				// but not theirs as theirs may collide with the next range
-				// in thisIter.
-				// move both iterators to next ranges
-				if ( mine.startsAfterNonDisjoint(theirs) ) {
-					j++;
-				}
-				else if ( theirs.startsAfterNonDisjoint(mine) ) {
-					i++;
-				}
-			}
-		}
-		if ( intersection==null ) {
-			return new IntervalSet();
-		}
-		return intersection;
-	}
-
-    /** Is el in any range of this set? */
-    public boolean member(int el) {
-		int n = intervals.size();
-		for (int i = 0; i < n; i++) {
-			Interval I = (Interval) intervals.get(i);
-			int a = I.a;
-			int b = I.b;
-			if ( el<a ) {
-				break; // list is sorted and el is before this interval; not here
-			}
-			if ( el>=a && el<=b ) {
-				return true; // found in this interval
-			}
-		}
-		return false;
-/*
-		for (ListIterator iter = intervals.listIterator(); iter.hasNext();) {
-            Interval I = (Interval) iter.next();
-            if ( el<I.a ) {
-                break; // list is sorted and el is before this interval; not here
-            }
-            if ( el>=I.a && el<=I.b ) {
-                return true; // found in this interval
-            }
-        }
-        return false;
-        */
-    }
-
-    /** return true if this set has no members */
-    public boolean isNil() {
-        return intervals==null || intervals.size()==0;
-    }
-
-    /** If this set is a single integer, return it otherwise Label.INVALID */
-    public int getSingleElement() {
-        if ( intervals!=null && intervals.size()==1 ) {
-            Interval I = (Interval)intervals.get(0);
-            if ( I.a == I.b ) {
-                return I.a;
-            }
-        }
-        return Label.INVALID;
-    }
-
-	public int getMaxElement() {
-		if ( isNil() ) {
-			return Label.INVALID;
-		}
-		Interval last = (Interval)intervals.get(intervals.size()-1);
-		return last.b;
-	}
-
-	/** Return minimum element >= 0 */
-	public int getMinElement() {
-		if ( isNil() ) {
-			return Label.INVALID;
-		}
-		int n = intervals.size();
-		for (int i = 0; i < n; i++) {
-			Interval I = (Interval) intervals.get(i);
-			int a = I.a;
-			int b = I.b;
-			for (int v=a; v<=b; v++) {
-				if ( v>=0 ) return v;
-			}
-		}
-		return Label.INVALID;
-	}
-
-    /** Return a list of Interval objects. */
-    public List<Interval> getIntervals() {
-        return intervals;
-    }
-
-    /** Are two IntervalSets equal?  Because all intervals are sorted
-     *  and disjoint, equals is a simple linear walk over both lists
-     *  to make sure they are the same.  Interval.equals() is used
-     *  by the List.equals() method to check the ranges.
-     */
-    public boolean equals(Object obj) {
-        if ( obj==null || !(obj instanceof IntervalSet) ) {
-            return false;
-        }
-        IntervalSet other = (IntervalSet)obj;
-        return this.intervals.equals(other.intervals);
-    }
-
-    public String toString() {
-        return toString(null);
-    }
-
-    public String toString(Grammar g) {
-        StringBuffer buf = new StringBuffer();
-		if ( this.intervals==null || this.intervals.size()==0 ) {
-			return "{}";
-		}
-        if ( this.intervals.size()>1 ) {
-            buf.append("{");
-        }
-        Iterator iter = this.intervals.iterator();
-        while (iter.hasNext()) {
-            Interval I = (Interval) iter.next();
-            int a = I.a;
-            int b = I.b;
-            if ( a==b ) {
-                if ( g!=null ) {
-                    buf.append(g.getTokenDisplayName(a));
-                }
-                else {
-                    buf.append(a);
-                }
-            }
-            else {
-                if ( g!=null ) {
-                    buf.append(g.getTokenDisplayName(a)+".."+g.getTokenDisplayName(b));
-                }
-                else {
-                    buf.append(a+".."+b);
-                }
-            }
-            if ( iter.hasNext() ) {
-                buf.append(", ");
-            }
-        }
-        if ( this.intervals.size()>1 ) {
-            buf.append("}");
-        }
-        return buf.toString();
-    }
-
-    public int size() {
-		int n = 0;
-		int numIntervals = intervals.size();
-		if ( numIntervals==1 ) {
-			Interval firstInterval = this.intervals.get(0);
-			return firstInterval.b-firstInterval.a+1;
-		}
-		for (int i = 0; i < numIntervals; i++) {
-			Interval I = (Interval) intervals.get(i);
-			n += (I.b-I.a+1);
-		}
-		return n;
-    }
-
-    public List toList() {
-		List values = new ArrayList();
-		int n = intervals.size();
-		for (int i = 0; i < n; i++) {
-			Interval I = (Interval) intervals.get(i);
-			int a = I.a;
-			int b = I.b;
-			for (int v=a; v<=b; v++) {
-				values.add(Utils.integer(v));
-			}
-		}
-		return values;
-    }
-
-	/** Get the ith element of ordered set.  Used only by RandomPhrase so
-	 *  don't bother to implement if you're not doing that for a new
-	 *  ANTLR code gen target.
-	 */
-	public int get(int i) {
-		int n = intervals.size();
-		int index = 0;
-		for (int j = 0; j < n; j++) {
-			Interval I = (Interval) intervals.get(j);
-			int a = I.a;
-			int b = I.b;
-			for (int v=a; v<=b; v++) {
-				if ( index==i ) {
-					return v;
-				}
-				index++;
-			}
-		}
-		return -1;
-	}
-
-	public int[] toArray() {
-		int[] values = new int[size()];
-		int n = intervals.size();
-		int j = 0;
-		for (int i = 0; i < n; i++) {
-			Interval I = (Interval) intervals.get(i);
-			int a = I.a;
-			int b = I.b;
-			for (int v=a; v<=b; v++) {
-				values[j] = v;
-				j++;
-			}
-		}
-		return values;
-	}
-
-	public org.antlr.runtime.BitSet toRuntimeBitSet() {
-		org.antlr.runtime.BitSet s =
-			new org.antlr.runtime.BitSet(getMaxElement()+1);
-		int n = intervals.size();
-		for (int i = 0; i < n; i++) {
-			Interval I = (Interval) intervals.get(i);
-			int a = I.a;
-			int b = I.b;
-			for (int v=a; v<=b; v++) {
-				s.add(v);
-			}
-		}
-		return s;
-	}
-
-	public void remove(int el) {
-        throw new NoSuchMethodError("IntervalSet.remove() unimplemented");
-    }
-
-	/*
-	protected void finalize() throws Throwable {
-		super.finalize();
-		System.out.println("size "+intervals.size()+" "+size());
-	}
-	*/
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/misc/OrderedHashSet.java b/antlr-3.4/tool/src/main/java/org/antlr/misc/OrderedHashSet.java
deleted file mode 100644
index fa5e859..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/misc/OrderedHashSet.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.misc;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.List;
-
-/** A HashMap that remembers the order that the elements were added.
- *  You can alter the ith element with set(i,value) too :)  Unique list.
- *  I need the replace/set-element-i functionality so I'm subclassing
- *  OrderedHashSet.
- */
-public class OrderedHashSet<T> extends LinkedHashSet {
-    /** Track the elements as they are added to the set */
-    protected List<T> elements = new ArrayList<T>();
-
-    public T get(int i) {
-        return elements.get(i);
-    }
-
-    /** Replace an existing value with a new value; updates the element
-     *  list and the hash table, but not the key as that has not changed.
-     */
-    public T set(int i, T value) {
-        T oldElement = elements.get(i);
-        elements.set(i,value); // update list
-        super.remove(oldElement); // now update the set: remove/add
-        super.add(value);
-        return oldElement;
-    }
-
-    /** Add a value to list; keep in hashtable for consistency also;
-     *  Key is object itself.  Good for say asking if a certain string is in
-     *  a list of strings.
-     */
-    public boolean add(Object value) {
-        boolean result = super.add(value);
-		if ( result ) {  // only track if new element not in set
-			elements.add((T)value);
-		}
-		return result;
-    }
-
-    public boolean remove(Object o) {
-		throw new UnsupportedOperationException();
-		/*
-		elements.remove(o);
-        return super.remove(o);
-        */
-    }
-
-    public void clear() {
-        elements.clear();
-        super.clear();
-    }
-
-    /** Return the List holding list of table elements.  Note that you are
-     *  NOT getting a copy so don't write to the list.
-     */
-    public List<T> elements() {
-        return elements;
-    }
-
-	public Iterator<T> iterator() {
-		return elements.iterator();
-	}
-
-	public Object[] toArray() {
-		return elements.toArray();
-	}
-	
-    public int size() {
-		/*
-		if ( elements.size()!=super.size() ) {
-			ErrorManager.internalError("OrderedHashSet: elements and set size differs; "+
-									   elements.size()+"!="+super.size());
-        }
-        */
-        return elements.size();
-    }
-
-    public String toString() {
-        return elements.toString();
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/misc/Utils.java b/antlr-3.4/tool/src/main/java/org/antlr/misc/Utils.java
deleted file mode 100644
index ca156d5..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/misc/Utils.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.misc;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class Utils {
-	public static final int INTEGER_POOL_MAX_VALUE = 1000;
-	static Integer[] ints = new Integer[INTEGER_POOL_MAX_VALUE+1];
-
-	/** Integer objects are immutable so share all Integers with the
-	 *  same value up to some max size.  Use an array as a perfect hash.
-	 *  Return shared object for 0..INTEGER_POOL_MAX_VALUE or a new
-	 *  Integer object with x in it.
-	 */
-	public static Integer integer(int x) {
-		if ( x<0 || x>INTEGER_POOL_MAX_VALUE ) {
-			return new Integer(x);
-		}
-		if ( ints[x]==null ) {
-			ints[x] = new Integer(x);
-		}
-		return ints[x];
-	}
-
-	/** Given a source string, src,
-		a string to replace, replacee,
-		and a string to replace with, replacer,
-		return a new string w/ the replacing done.
-		You can use replacer==null to remove replacee from the string.
-
-		This should be faster than Java's String.replaceAll as that one
-		uses regex (I only want to play with strings anyway).
-	*/
-	public static String replace(String src, String replacee, String replacer) {
-		StringBuffer result = new StringBuffer(src.length() + 50);
-		int startIndex = 0;
-		int endIndex = src.indexOf(replacee);
-		while(endIndex != -1) {
-			result.append(src.substring(startIndex,endIndex));
-			if ( replacer!=null ) {
-				result.append(replacer);
-			}
-			startIndex = endIndex + replacee.length();
-			endIndex = src.indexOf(replacee,startIndex);
-		}
-		result.append(src.substring(startIndex,src.length()));
-		return result.toString();
-	}
-
-//	/** mimic struct; like a non-iterable map. */
-//	public static class Struct {
-//		public Map<String,Object> fields = new HashMap<String,Object>();
-//
-//		@Override
-//		public String toString() { return fields.toString(); }
-//	}
-//
-//	public static Struct struct(String propNames, Object... values) {
-//		String[] props = propNames.split(",");
-//		int i=0;
-//		Struct s = new Struct();
-//		for (String p : props) s.fields.put(p, values[i++]);
-//		return s;
-//	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/AssignTokenTypesBehavior.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/AssignTokenTypesBehavior.java
deleted file mode 100644
index fba4f0e..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/AssignTokenTypesBehavior.java
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.Label;
-import org.antlr.grammar.v3.AssignTokenTypesWalker;
-import org.antlr.misc.Utils;
-import org.antlr.runtime.tree.TreeNodeStream;
-
-import java.util.*;
-
-/** Move all of the functionality from assign.types.g grammar file. */
-public class AssignTokenTypesBehavior extends AssignTokenTypesWalker {
-	protected static final Integer UNASSIGNED = Utils.integer(-1);
-	protected static final Integer UNASSIGNED_IN_PARSER_RULE = Utils.integer(-2);
-
-	protected Map<String,Integer> stringLiterals = new TreeMap<String, Integer>();
-	protected Map<String,Integer> tokens = new TreeMap<String, Integer>();
-	protected Map<String,String> aliases = new TreeMap<String, String>();
-	protected Map<String,String> aliasesReverseIndex = new HashMap<String,String>();
-
-	/** Track actual lexer rule defs so we don't get repeated token defs in
-	 *  generated lexer.
-	 */
-	protected Set<String> tokenRuleDefs = new HashSet();
-
-	public AssignTokenTypesBehavior() {
-		super(null);
-	}
-
-    @Override
-	protected void init(Grammar g) {
-		this.grammar = g;
-		currentRuleName = null;
-		if ( stringAlias==null ) {
-			// only init once; can't statically init since we need astFactory
-			initASTPatterns();
-		}
-	}
-
-	/** Track string literals (could be in tokens{} section) */
-    @Override
-	protected void trackString(GrammarAST t) {
-		// if lexer, don't allow aliasing in tokens section
-		if ( currentRuleName==null && grammar.type==Grammar.LEXER ) {
-			ErrorManager.grammarError(ErrorManager.MSG_CANNOT_ALIAS_TOKENS_IN_LEXER,
-									  grammar,
-									  t.token,
-									  t.getText());
-			return;
-		}
-		// in a plain parser grammar rule, cannot reference literals
-		// (unless defined previously via tokenVocab option)
-		// don't warn until we hit root grammar as may be defined there.
-		if ( grammar.getGrammarIsRoot() &&
-			 grammar.type==Grammar.PARSER &&
-			 grammar.getTokenType(t.getText())== Label.INVALID )
-		{
-			ErrorManager.grammarError(ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE,
-									  grammar,
-									  t.token,
-									  t.getText());
-		}
-		// Don't record literals for lexers, they are things to match not tokens
-		if ( grammar.type==Grammar.LEXER ) {
-			return;
-		}
-		// otherwise add literal to token types if referenced from parser rule
-		// or in the tokens{} section
-		if ( (currentRuleName==null ||
-			  Character.isLowerCase(currentRuleName.charAt(0))) &&
-																grammar.getTokenType(t.getText())==Label.INVALID )
-		{
-			stringLiterals.put(t.getText(), UNASSIGNED_IN_PARSER_RULE);
-		}
-	}
-
-    @Override
-	protected void trackToken(GrammarAST t) {
-		// imported token names might exist, only add if new
-		// Might have ';'=4 in vocab import and SEMI=';'. Avoid
-		// setting to UNASSIGNED if we have loaded ';'/SEMI
-		if ( grammar.getTokenType(t.getText())==Label.INVALID &&
-			 tokens.get(t.getText())==null )
-		{
-			tokens.put(t.getText(), UNASSIGNED);
-		}
-	}
-
-    @Override
-	protected void trackTokenRule(GrammarAST t,
-								  GrammarAST modifier,
-								  GrammarAST block)
-	{
-		// imported token names might exist, only add if new
-		if ( grammar.type==Grammar.LEXER || grammar.type==Grammar.COMBINED ) {
-			if ( !Character.isUpperCase(t.getText().charAt(0)) ) {
-				return;
-			}
-			if ( t.getText().equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ) {
-				// don't add Tokens rule
-				return;
-			}
-
-			// track all lexer rules so we can look for token refs w/o
-			// associated lexer rules.
-			grammar.composite.lexerRules.add(t.getText());
-
-			int existing = grammar.getTokenType(t.getText());
-			if ( existing==Label.INVALID ) {
-				tokens.put(t.getText(), UNASSIGNED);
-			}
-			// look for "<TOKEN> : <literal> ;" pattern
-			// (can have optional action last)
-			if ( block.hasSameTreeStructure(charAlias) ||
-				 block.hasSameTreeStructure(stringAlias) ||
-				 block.hasSameTreeStructure(charAlias2) ||
-				 block.hasSameTreeStructure(stringAlias2) )
-			{
-				tokenRuleDefs.add(t.getText());
-				/*
-			Grammar parent = grammar.composite.getDelegator(grammar);
-			boolean importedByParserOrCombined =
-				parent!=null &&
-				(parent.type==Grammar.LEXER||parent.type==Grammar.PARSER);
-				*/
-				if ( grammar.type==Grammar.COMBINED || grammar.type==Grammar.LEXER ) {
-					// only call this rule an alias if combined or lexer
-					alias(t, (GrammarAST)block.getChild(0).getChild(0));
-				}
-			}
-		}
-		// else error
-	}
-
-    @Override
-	protected void alias(GrammarAST t, GrammarAST s) {
-		String tokenID = t.getText();
-		String literal = s.getText();
-		String prevAliasLiteralID = aliasesReverseIndex.get(literal);
-		if ( prevAliasLiteralID!=null ) { // we've seen this literal before
-			if ( tokenID.equals(prevAliasLiteralID) ) {
-				// duplicate but identical alias; might be tokens {A='a'} and
-				// lexer rule A : 'a' ;  Is ok, just return
-				return;
-			}
-
-			// give error unless both are rules (ok if one is in tokens section)
-			if ( !(tokenRuleDefs.contains(tokenID) && tokenRuleDefs.contains(prevAliasLiteralID)) )
-			{
-				// don't allow alias if A='a' in tokens section and B : 'a'; is rule.
-				// Allow if both are rules.  Will get DFA nondeterminism error later.
-				ErrorManager.grammarError(ErrorManager.MSG_TOKEN_ALIAS_CONFLICT,
-										  grammar,
-										  t.token,
-										  tokenID+"="+literal,
-										  prevAliasLiteralID);
-			}
-			return; // don't do the alias
-		}
-		int existingLiteralType = grammar.getTokenType(literal);
-		if ( existingLiteralType !=Label.INVALID ) {
-			// we've seen this before from a tokenVocab most likely
-			// don't assign a new token type; use existingLiteralType.
-			tokens.put(tokenID, existingLiteralType);
-		}
-		String prevAliasTokenID = aliases.get(tokenID);
-		if ( prevAliasTokenID!=null ) {
-			ErrorManager.grammarError(ErrorManager.MSG_TOKEN_ALIAS_REASSIGNMENT,
-									  grammar,
-									  t.token,
-									  tokenID+"="+literal,
-									  prevAliasTokenID);
-			return; // don't do the alias
-		}
-		aliases.put(tokenID, literal);
-		aliasesReverseIndex.put(literal, tokenID);
-	}
-
-    @Override
-	public void defineTokens(Grammar root) {
-/*
-	System.out.println("stringLiterals="+stringLiterals);
-	System.out.println("tokens="+tokens);
-	System.out.println("aliases="+aliases);
-	System.out.println("aliasesReverseIndex="+aliasesReverseIndex);
-*/
-
-		assignTokenIDTypes(root);
-
-		aliasTokenIDsAndLiterals(root);
-
-		assignStringTypes(root);
-
-/*
-	System.out.println("stringLiterals="+stringLiterals);
-	System.out.println("tokens="+tokens);
-	System.out.println("aliases="+aliases);
-*/
-		defineTokenNamesAndLiteralsInGrammar(root);
-	}
-
-/*
-protected void defineStringLiteralsFromDelegates() {
-	 if ( grammar.getGrammarIsMaster() && grammar.type==Grammar.COMBINED ) {
-		 List<Grammar> delegates = grammar.getDelegates();
-		 System.out.println("delegates in master combined: "+delegates);
-		 for (int i = 0; i < delegates.size(); i++) {
-			 Grammar d = (Grammar) delegates.get(i);
-			 Set<String> literals = d.getStringLiterals();
-			 for (Iterator it = literals.iterator(); it.hasNext();) {
-				 String literal = (String) it.next();
-				 System.out.println("literal "+literal);
-				 int ttype = grammar.getTokenType(literal);
-				 grammar.defineLexerRuleForStringLiteral(literal, ttype);
-			 }
-		 }
-	 }
-}
-*/
-
-    @Override
-	protected void assignStringTypes(Grammar root) {
-		// walk string literals assigning types to unassigned ones
-		Set s = stringLiterals.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String lit = (String) it.next();
-			Integer oldTypeI = (Integer)stringLiterals.get(lit);
-			int oldType = oldTypeI.intValue();
-			if ( oldType<Label.MIN_TOKEN_TYPE ) {
-				Integer typeI = Utils.integer(root.getNewTokenType());
-				stringLiterals.put(lit, typeI);
-				// if string referenced in combined grammar parser rule,
-				// automatically define in the generated lexer
-				root.defineLexerRuleForStringLiteral(lit, typeI.intValue());
-			}
-		}
-	}
-
-    @Override
-	protected void aliasTokenIDsAndLiterals(Grammar root) {
-		if ( root.type==Grammar.LEXER ) {
-			return; // strings/chars are never token types in LEXER
-		}
-		// walk aliases if any and assign types to aliased literals if literal
-		// was referenced
-		Set s = aliases.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String tokenID = (String) it.next();
-			String literal = (String)aliases.get(tokenID);
-			if ( literal.charAt(0)=='\'' && stringLiterals.get(literal)!=null ) {
-				stringLiterals.put(literal, tokens.get(tokenID));
-				// an alias still means you need a lexer rule for it
-				Integer typeI = (Integer)tokens.get(tokenID);
-				if ( !tokenRuleDefs.contains(tokenID) ) {
-					root.defineLexerRuleForAliasedStringLiteral(tokenID, literal, typeI.intValue());
-				}
-			}
-		}
-	}
-
-    @Override
-	protected void assignTokenIDTypes(Grammar root) {
-		// walk token names, assigning values if unassigned
-		Set s = tokens.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String tokenID = (String) it.next();
-			if ( tokens.get(tokenID)==UNASSIGNED ) {
-				tokens.put(tokenID, Utils.integer(root.getNewTokenType()));
-			}
-		}
-	}
-
-    @Override
-	protected void defineTokenNamesAndLiteralsInGrammar(Grammar root) {
-		Set s = tokens.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String tokenID = (String) it.next();
-			int ttype = ((Integer)tokens.get(tokenID)).intValue();
-			root.defineToken(tokenID, ttype);
-		}
-		s = stringLiterals.keySet();
-		for (Iterator it = s.iterator(); it.hasNext();) {
-			String lit = (String) it.next();
-			int ttype = ((Integer)stringLiterals.get(lit)).intValue();
-			root.defineToken(lit, ttype);
-		}
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/Attribute.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/Attribute.java
deleted file mode 100644
index 9834ec8..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/Attribute.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.tool;
-
-/** Track the names of attributes define in arg lists, return values,
- *  scope blocks etc...
- */
-public class Attribute {
-	/** The entire declaration such as "String foo;" */
-	public String decl;
-
-	/** The type; might be empty such as for Python which has no static typing */
-	public String type;
-
-	/** The name of the attribute "foo" */
-	public String name;
-
-	/** The optional attribute intialization expression */
-	public String initValue;
-
-	public Attribute(String decl) {
-		extractAttribute(decl);
-	}
-
-	public Attribute(String name, String decl) {
-		this.name = name;
-		this.decl = decl;
-	}
-
-	/** For decls like "String foo" or "char *foo32[3]" compute the ID
-	 *  and type declarations.  Also handle "int x=3" and 'T t = new T("foo")'
-	 *  but if the separator is ',' you cannot use ',' in the initvalue.
-	 *  AttributeScope.addAttributes takes care of the separation so we are
-	 *  free here to use from '=' to end of string as the expression.
-	 *
-	 *  Set name, type, initvalue, and full decl instance vars.
-	 */
-	protected void extractAttribute(String decl) {
-		if ( decl==null ) {
-			return;
-		}
-		boolean inID = false;
-		int start = -1;
-		int rightEdgeOfDeclarator = decl.length()-1;
-		int equalsIndex = decl.indexOf('=');
-		if ( equalsIndex>0 ) {
-			// everything after the '=' is the init value
-			this.initValue = decl.substring(equalsIndex+1,decl.length());
-			rightEdgeOfDeclarator = equalsIndex-1;
-		}
-		// walk backwards looking for start of an ID
-		for (int i=rightEdgeOfDeclarator; i>=0; i--) {
-			// if we haven't found the end yet, keep going
-			if ( !inID && Character.isLetterOrDigit(decl.charAt(i)) ) {
-			    inID = true;
-			}
-			else if ( inID &&
-				      !(Character.isLetterOrDigit(decl.charAt(i))||
-				       decl.charAt(i)=='_') ) {
-				start = i+1;
-				break;
-			}
-		}
-		if ( start<0 && inID ) {
-			start = 0;
-		}
-		if ( start<0 ) {
-			ErrorManager.error(ErrorManager.MSG_CANNOT_FIND_ATTRIBUTE_NAME_IN_DECL,decl);
-		}
-		// walk forwards looking for end of an ID
-		int stop=-1;
-		for (int i=start; i<=rightEdgeOfDeclarator; i++) {
-			// if we haven't found the end yet, keep going
-			if ( !(Character.isLetterOrDigit(decl.charAt(i))||
-				decl.charAt(i)=='_') )
-			{
-				stop = i;
-				break;
-			}
-			if ( i==rightEdgeOfDeclarator ) {
-				stop = i+1;
-			}
-		}
-
-		// the name is the last ID
-		this.name = decl.substring(start,stop);
-
-		// the type is the decl minus the ID (could be empty)
-		this.type = decl.substring(0,start);
-		if ( stop<=rightEdgeOfDeclarator ) {
-			this.type += decl.substring(stop,rightEdgeOfDeclarator+1);
-		}
-		this.type = type.trim();
-		if ( this.type.length()==0 ) {
-			this.type = null;
-		}
-
-		this.decl = decl;
-	}
-
-	public String toString() {
-		if ( initValue!=null ) {
-			return type+" "+name+"="+initValue;
-		}
-		return type+" "+name;
-	}
-}
-
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/AttributeScope.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/AttributeScope.java
deleted file mode 100644
index c2641da..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/AttributeScope.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.tool;
-
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.runtime.Token;
-
-import java.util.*;
-
-/** Track the attributes within a scope.  A named scoped has just its list
- *  of attributes.  Each rule has potentially 3 scopes: return values,
- *  parameters, and an implicitly-named scope (i.e., a scope defined in a rule).
- *  Implicitly-defined scopes are named after the rule; rules and scopes then
- *  must live in the same name space--no collisions allowed.
- */
-public class AttributeScope {
-
-	/** All token scopes (token labels) share the same fixed scope of
-	 *  of predefined attributes.  I keep this out of the runtime.Token
-	 *  object to avoid a runtime space burden.
-	 */
-	public static AttributeScope tokenScope = new AttributeScope("Token",null);
-	static {
-		tokenScope.addAttribute("text", null);
-		tokenScope.addAttribute("type", null);
-		tokenScope.addAttribute("line", null);
-		tokenScope.addAttribute("index", null);
-		tokenScope.addAttribute("pos", null);
-		tokenScope.addAttribute("channel", null);
-		tokenScope.addAttribute("tree", null);
-		tokenScope.addAttribute("int", null);
-	}
-
-	/** This scope is associated with which input token (for error handling)? */
-	public Token derivedFromToken;
-
-	public Grammar grammar;
-
-	/** The scope name */
-	private String name;
-
-	/** Not a rule scope, but visible to all rules "scope symbols { ...}" */
-	public boolean isDynamicGlobalScope;
-
-	/** Visible to all rules, but defined in rule "scope { int i; }" */
-	public boolean isDynamicRuleScope;
-
-	public boolean isParameterScope;
-
-	public boolean isReturnScope;
-
-	public boolean isPredefinedRuleScope;
-
-	public boolean isPredefinedLexerRuleScope;
-
-	/** The list of Attribute objects */
-	protected LinkedHashMap<String,Attribute> attributes = new LinkedHashMap();
-
-	/* Placeholder for compatibility with the CSharp3 target. */
-	public LinkedHashMap<String, GrammarAST> actions = new LinkedHashMap();
-
-	public AttributeScope(String name, Token derivedFromToken) {
-		this(null,name,derivedFromToken);
-	}
-
-	public AttributeScope(Grammar grammar, String name, Token derivedFromToken) {
-		this.grammar = grammar;
-		this.name = name;
-		this.derivedFromToken = derivedFromToken;
-	}
-
-	public String getName() {
-		if ( isParameterScope ) {
-			return name+"_parameter";
-		}
-		else if ( isReturnScope ) {
-			return name+"_return";
-		}
-		return name;
-	}
-
-	/** From a chunk of text holding the definitions of the attributes,
-	 *  pull them apart and create an Attribute for each one.  Add to
-	 *  the list of attributes for this scope.  Pass in the character
-	 *  that terminates a definition such as ',' or ';'.  For example,
-	 *
-	 *  scope symbols {
-	 *  	int n;
-	 *  	List names;
-	 *  }
-	 *
-	 *  would pass in definitions equal to the text in between {...} and
-	 *  separator=';'.  It results in two Attribute objects.
-	 */
-	public void addAttributes(String definitions, int separator) {
-		List<String> attrs = new ArrayList<String>();
-		CodeGenerator.getListOfArgumentsFromAction(definitions,0,-1,separator,attrs);
-		for (String a : attrs) {
-			Attribute attr = new Attribute(a);
-			if ( !isReturnScope && attr.initValue!=null ) {
-				ErrorManager.grammarError(ErrorManager.MSG_ARG_INIT_VALUES_ILLEGAL,
-										  grammar,
-										  derivedFromToken,
-										  attr.name);
-				attr.initValue=null; // wipe it out
-			}
-			attributes.put(attr.name, attr);
-		}
-	}
-
-	public void addAttribute(String name, String decl) {
-		attributes.put(name, new Attribute(name,decl));
-	}
-
-	/** Given @scope::name {action} define it for this attribute scope. Later,
-	 *  the code generator will ask for the actions table.
-	 */
-	public final void defineNamedAction(GrammarAST nameAST, GrammarAST actionAST)
-	{
-		String actionName = nameAST.getText();
-		GrammarAST a = actions.get(actionName);
-		if (a != null) {
-			ErrorManager.grammarError(ErrorManager.MSG_ACTION_REDEFINITION,
-									  grammar,
-									  nameAST.getToken(),
-									  nameAST.getText());
-		} else {
-			actions.put(actionName, actionAST);
-		}
-	}
-
-	public Attribute getAttribute(String name) {
-		return (Attribute)attributes.get(name);
-	}
-
-	/** Used by templates to get all attributes */
-	public List<Attribute> getAttributes() {
-		List<Attribute> a = new ArrayList<Attribute>();
-		a.addAll(attributes.values());
-		return a;
-	}
-
-	/** Return the set of keys that collide from
-	 *  this and other.
-	 */
-	public Set intersection(AttributeScope other) {
-		if ( other==null || other.size()==0 || size()==0 ) {
-			return null;
-		}
-		Set inter = new HashSet();
-		Set thisKeys = attributes.keySet();
-		for (Iterator it = thisKeys.iterator(); it.hasNext();) {
-			String key = (String) it.next();
-			if ( other.attributes.get(key)!=null ) {
-				inter.add(key);
-			}
-		}
-		if ( inter.size()==0 ) {
-			return null;
-		}
-		return inter;
-	}
-
-	public int size() {
-		return attributes==null?0:attributes.size();
-	}
-
-	public String toString() {
-		return (isDynamicGlobalScope?"global ":"")+getName()+":"+attributes;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/BuildDependencyGenerator.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/BuildDependencyGenerator.java
deleted file mode 100644
index 844046e..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/BuildDependencyGenerator.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.Tool;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.misc.Utils;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STGroup;
-import org.stringtemplate.v4.STGroupFile;
-
-import java.io.*;
-import java.util.ArrayList;
-import java.util.List;
-
-/** Given a grammar file, show the dependencies on .tokens etc...
- *  Using ST, emit a simple "make compatible" list of dependencies.
- *  For example, combined grammar T.g (no token import) generates:
- *
- *		TParser.java : T.g
- * 		T.tokens : T.g
- * 		T__g : T.g
- *
- *  For tree grammar TP with import of T.tokens:
- *
- * 		TP.g : T.tokens
- * 		TP.java : TP.g
- *
- *  If "-lib libdir" is used on command-line with -depend, then include the
- *  path like
- *
- * 		TP.g : libdir/T.tokens
- *
- *  Pay attention to -o as well:
- *
- * 		outputdir/TParser.java : T.g
- *
- *  So this output shows what the grammar depends on *and* what it generates.
- *
- *  Operate on one grammar file at a time.  If given a list of .g on the
- *  command-line with -depend, just emit the dependencies.  The grammars
- *  may depend on each other, but the order doesn't matter.  Build tools,
- *  reading in this output, will know how to organize it.
- *
- *  This is a wee bit slow probably because the code generator has to load
- *  all of its template files in order to figure out the file extension
- *  for the generated recognizer.
- *
- *  This code was obvious until I removed redundant "./" on front of files
- *  and had to escape spaces in filenames :(
- */
-public class BuildDependencyGenerator {
-    protected String grammarFileName;
-    protected String tokenVocab;
-    protected Tool tool;
-    protected Grammar grammar;
-    protected CodeGenerator generator;
-    protected STGroup templates;
-
-    public BuildDependencyGenerator(Tool tool, String grammarFileName)
-            throws IOException {
-        this.tool = tool;
-        this.grammarFileName = grammarFileName;
-        grammar = tool.getRootGrammar(grammarFileName);
-        String language = (String) grammar.getOption("language");
-        generator = new CodeGenerator(tool, grammar, language);
-        generator.loadTemplates(language);
-    }
-
-    /** From T.g return a list of File objects that
-     *  name files ANTLR will emit from T.g.
-     */
-    public List<File> getGeneratedFileList() {
-        List<File> files = new ArrayList<File>();
-        File outputDir = tool.getOutputDirectory(grammarFileName);
-        if (outputDir.getName().equals(".")) {
-            outputDir = null;
-        } else if (outputDir.getName().indexOf(' ') >= 0) { // has spaces?
-            String escSpaces = Utils.replace(outputDir.toString(),
-                    " ",
-                    "\\ ");
-            outputDir = new File(escSpaces);
-        }
-        // add generated recognizer; e.g., TParser.java
-        String recognizer =
-                generator.getRecognizerFileName(grammar.name, grammar.type);
-        files.add(new File(outputDir, recognizer));
-        // add output vocab file; e.g., T.tokens. This is always generated to
-        // the base output directory, which will be just . if there is no -o option
-        //
-        files.add(new File(tool.getOutputDirectory(), generator.getVocabFileName()));
-        // are we generating a .h file?
-        ST headerExtST = null;
-        ST extST = generator.getTemplates().getInstanceOf("codeFileExtension");
-        if (generator.getTemplates().isDefined("headerFile")) {
-            headerExtST = generator.getTemplates().getInstanceOf("headerFileExtension");
-            String suffix = Grammar.grammarTypeToFileNameSuffix[grammar.type];
-            String fileName = grammar.name + suffix + headerExtST.render();
-            files.add(new File(outputDir, fileName));
-        }
-        if (grammar.type == Grammar.COMBINED) {
-            // add autogenerated lexer; e.g., TLexer.java TLexer.h TLexer.tokens
-            // don't add T__.g (just a temp file)
-            
-            String suffix = Grammar.grammarTypeToFileNameSuffix[Grammar.LEXER];
-            String lexer = grammar.name + suffix + extST.render();
-            files.add(new File(outputDir, lexer));
-
-            // TLexer.h
-            if (headerExtST != null) {
-                String header = grammar.name + suffix + headerExtST.render();
-                files.add(new File(outputDir, header));
-            }
-        // for combined, don't generate TLexer.tokens
-        }
-
-        // handle generated files for imported grammars
-        List<Grammar> imports =
-                grammar.composite.getDelegates(grammar.composite.getRootGrammar());
-        for (Grammar g : imports) {
-            outputDir = tool.getOutputDirectory(g.getFileName());
-            String fname = groomQualifiedFileName(outputDir.toString(), g.getRecognizerName() + extST.render());
-            files.add(new File(fname));
-        }
-
-        if (files.size() == 0) {
-            return null;
-        }
-        return files;
-    }
-
-    /**
-     * Return a list of File objects that name files ANTLR will read
-     * to process T.g; This can be .tokens files if the grammar uses the tokenVocab option
-     * as well as any imported grammar files.
-     */
-    public List<File> getDependenciesFileList() {
-        // Find all the things other than imported grammars
-        List<File> files = getNonImportDependenciesFileList();
-
-        // Handle imported grammars
-        List<Grammar> imports =
-                grammar.composite.getDelegates(grammar.composite.getRootGrammar());
-        for (Grammar g : imports) {
-            String libdir = tool.getLibraryDirectory();
-            String fileName = groomQualifiedFileName(libdir, g.fileName);
-            files.add(new File(fileName));
-        }
-
-        if (files.size() == 0) {
-            return null;
-        }
-        return files;
-    }
-
-    /**
-     * Return a list of File objects that name files ANTLR will read
-     * to process T.g; This can only be .tokens files and only
-     * if they use the tokenVocab option.
-     *
-     * @return List of dependencies other than imported grammars
-     */
-    public List<File> getNonImportDependenciesFileList() {
-        List<File> files = new ArrayList<File>();
-
-        // handle token vocabulary loads
-        tokenVocab = (String) grammar.getOption("tokenVocab");
-        if (tokenVocab != null) {
-
-            File vocabFile = tool.getImportedVocabFile(tokenVocab);
-            files.add(vocabFile);
-        }
-
-        return files;
-    }
-
-    public ST getDependencies() {
-        loadDependencyTemplates();
-        ST dependenciesST = templates.getInstanceOf("dependencies");
-        dependenciesST.add("in", getDependenciesFileList());
-        dependenciesST.add("out", getGeneratedFileList());
-        dependenciesST.add("grammarFileName", grammar.fileName);
-        return dependenciesST;
-    }
-
-    public void loadDependencyTemplates() {
-        if (templates != null) return;
-        String fileName = "org/antlr/tool/templates/depend.stg";
-        templates = new STGroupFile(fileName);
-    }
-
-    public String getTokenVocab() {
-        return tokenVocab;
-    }
-
-    public CodeGenerator getGenerator() {
-        return generator;
-    }    
-
-    public String groomQualifiedFileName(String outputDir, String fileName) {
-        if (outputDir.equals(".")) {
-            return fileName;
-        } else if (outputDir.indexOf(' ') >= 0) { // has spaces?
-            String escSpaces = Utils.replace(outputDir.toString(),
-                    " ",
-                    "\\ ");
-            return escSpaces + File.separator + fileName;
-        } else {
-            return outputDir + File.separator + fileName;
-        }
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/CompositeGrammar.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/CompositeGrammar.java
deleted file mode 100644
index b9ef82f..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/CompositeGrammar.java
+++ /dev/null
@@ -1,532 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */package org.antlr.tool;
-
-import org.antlr.analysis.Label;
-import org.antlr.analysis.NFAState;
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.grammar.v3.AssignTokenTypesWalker;
-import org.antlr.misc.Utils;
-import org.antlr.runtime.RecognitionException;
-import org.antlr.runtime.tree.CommonTreeNodeStream;
-
-import java.util.*;
-
-/** A tree of component (delegate) grammars.
- *
- *  Rules defined in delegates are "inherited" like multi-inheritance
- *  so you can override them.  All token types must be consistent across
- *  rules from all delegate grammars, so they must be stored here in one
- *  central place.
- *
- *  We have to start out assuming a composite grammar situation as we can't
- *  look into the grammar files a priori to see if there is a delegate
- *  statement.  Because of this, and to avoid duplicating token type tracking
- *  in each grammar, even single noncomposite grammars use one of these objects
- *  to track token types.
- */
-public class CompositeGrammar {
-	public static final int MIN_RULE_INDEX = 1;
-
-	public CompositeGrammarTree delegateGrammarTreeRoot;
-
-	/** Used during getRuleReferenceClosure to detect computation cycles */
-	protected Set<NFAState> refClosureBusy = new HashSet<NFAState>();
-
-	/** Used to assign state numbers; all grammars in composite share common
-	 *  NFA space.  This NFA tracks state numbers number to state mapping.
-	 */
-	public int stateCounter = 0;
-
-	/** The NFA states in the NFA built from rules across grammars in composite.
-	 *  Maps state number to NFAState object.
-	 *  This is a Vector instead of a List because I need to be able to grow
-	 *  this properly.  After talking to Josh Bloch, Collections guy at Sun,
-	 *  I decided this was easiest solution.
-	 */
-	protected Vector<NFAState> numberToStateList = new Vector<NFAState>(1000);
-
-	/** Token names and literal tokens like "void" are uniquely indexed.
-	 *  with -1 implying EOF.  Characters are different; they go from
-	 *  -1 (EOF) to \uFFFE.  For example, 0 could be a binary byte you
-	 *  want to lexer.  Labels of DFA/NFA transitions can be both tokens
-	 *  and characters.  I use negative numbers for bookkeeping labels
-	 *  like EPSILON. Char/String literals and token types overlap in the same
-	 *  space, however.
-	 */
-	protected int maxTokenType = Label.MIN_TOKEN_TYPE-1;
-
-	/** Map token like ID (but not literals like "while") to its token type */
-	public Map tokenIDToTypeMap = new LinkedHashMap();
-
-	/** Map token literals like "while" to its token type.  It may be that
-	 *  WHILE="while"=35, in which case both tokenIDToTypeMap and this
-	 *  field will have entries both mapped to 35.
-	 */
-	public Map<String, Integer> stringLiteralToTypeMap = new LinkedHashMap<String, Integer>();
-	/** Reverse index for stringLiteralToTypeMap */
-	public Vector<String> typeToStringLiteralList = new Vector<String>();
-
-	/** Map a token type to its token name.
-	 *  Must subtract MIN_TOKEN_TYPE from index.
-	 */
-	public Vector<String> typeToTokenList = new Vector<String>();
-
-	/** If combined or lexer grammar, track the rules.
-	 * 	Track lexer rules so we can warn about undefined tokens.
-	 *  This is combined set of lexer rules from all lexer grammars
-	 *  seen in all imports.
-	 */
-	protected Set<String> lexerRules = new HashSet<String>();
-
-	/** Rules are uniquely labeled from 1..n among all grammars */
-	protected int ruleIndex = MIN_RULE_INDEX;
-
-	/** Map a rule index to its name; use a Vector on purpose as new
-	 *  collections stuff won't let me setSize and make it grow.  :(
-	 *  I need a specific guaranteed index, which the Collections stuff
-	 *  won't let me have.
-	 */
-	protected Vector<Rule> ruleIndexToRuleList = new Vector<Rule>();
-
-	public boolean watchNFAConversion = false;
-
-	protected void initTokenSymbolTables() {
-		// the faux token types take first NUM_FAUX_LABELS positions
-		// then we must have room for the predefined runtime token types
-		// like DOWN/UP used for tree parsing.
-		typeToTokenList.setSize(Label.NUM_FAUX_LABELS+Label.MIN_TOKEN_TYPE-1);
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.INVALID, "<INVALID>");
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOT, "<EOT>");
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SEMPRED, "<SEMPRED>");
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SET, "<SET>");
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EPSILON, Label.EPSILON_STR);
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOF, "EOF");
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOR_TOKEN_TYPE-1, "<EOR>");
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.DOWN-1, "DOWN");
-		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.UP-1, "UP");
-		tokenIDToTypeMap.put("<INVALID>", Utils.integer(Label.INVALID));
-		tokenIDToTypeMap.put("<EOT>", Utils.integer(Label.EOT));
-		tokenIDToTypeMap.put("<SEMPRED>", Utils.integer(Label.SEMPRED));
-		tokenIDToTypeMap.put("<SET>", Utils.integer(Label.SET));
-		tokenIDToTypeMap.put("<EPSILON>", Utils.integer(Label.EPSILON));
-		tokenIDToTypeMap.put("EOF", Utils.integer(Label.EOF));
-		tokenIDToTypeMap.put("<EOR>", Utils.integer(Label.EOR_TOKEN_TYPE));
-		tokenIDToTypeMap.put("DOWN", Utils.integer(Label.DOWN));
-		tokenIDToTypeMap.put("UP", Utils.integer(Label.UP));
-	}
-
-	public CompositeGrammar() {
-		initTokenSymbolTables();
-	}
-
-	public CompositeGrammar(Grammar g) {
-		this();
-		setDelegationRoot(g);
-	}
-
-	public void setDelegationRoot(Grammar root) {
-		delegateGrammarTreeRoot = new CompositeGrammarTree(root);
-		root.compositeTreeNode = delegateGrammarTreeRoot;
-	}
-
-	public Rule getRule(String ruleName) {
-		return delegateGrammarTreeRoot.getRule(ruleName);
-	}
-
-	public Object getOption(String key) {
-		return delegateGrammarTreeRoot.getOption(key);
-	}
-
-	/** Add delegate grammar as child of delegator */
-	public void addGrammar(Grammar delegator, Grammar delegate) {
-		if ( delegator.compositeTreeNode==null ) {
-			delegator.compositeTreeNode = new CompositeGrammarTree(delegator);
-		}
-		delegator.compositeTreeNode.addChild(new CompositeGrammarTree(delegate));
-
-		/*// find delegator in tree so we can add a child to it
-		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(delegator);
-		t.addChild();
-		*/
-		// make sure new grammar shares this composite
-		delegate.composite = this;
-	}
-
-	/** Get parent of this grammar */
-	public Grammar getDelegator(Grammar g) {
-		CompositeGrammarTree me = delegateGrammarTreeRoot.findNode(g);
-		if ( me==null ) {
-			return null; // not found
-		}
-		if ( me.parent!=null ) {
-			return me.parent.grammar;
-		}
-		return null;
-	}
-
-	/** Get list of all delegates from all grammars in the delegate subtree of g.
-	 *  The grammars are in delegation tree preorder.  Don't include g itself
-	 *  in list as it is not a delegate of itself.
-	 */
-	public List<Grammar> getDelegates(Grammar g) {
-		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(g);
-		if ( t==null ) {
-			return null; // no delegates
-		}
-		List<Grammar> grammars = t.getPostOrderedGrammarList();
-		grammars.remove(grammars.size()-1); // remove g (last one)
-		return grammars;
-	}
-
-	public List<Grammar> getDirectDelegates(Grammar g) {
-		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(g);
-		List<CompositeGrammarTree> children = t.children;
-		if ( children==null ) {
-			return null;
-		}
-		List<Grammar> grammars = new ArrayList();
-		for (int i = 0; children!=null && i < children.size(); i++) {
-			CompositeGrammarTree child = (CompositeGrammarTree) children.get(i);
-			grammars.add(child.grammar);
-		}
-		return grammars;
-	}
-
-	/** Get delegates below direct delegates of g */
-	public List<Grammar> getIndirectDelegates(Grammar g) {
-		List<Grammar> direct = getDirectDelegates(g);
-		List<Grammar> delegates = getDelegates(g);
-		delegates.removeAll(direct);
-		return delegates;
-	}
-
-	/** Return list of delegate grammars from root down to g.
-	 *  Order is root, ..., g.parent.  (g not included).
-	 */
-	public List<Grammar> getDelegators(Grammar g) {
-		if ( g==delegateGrammarTreeRoot.grammar ) {
-			return null;
-		}
-		List<Grammar> grammars = new ArrayList();
-		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(g);
-		// walk backwards to root, collecting grammars
-		CompositeGrammarTree p = t.parent;
-		while ( p!=null ) {
-			grammars.add(0, p.grammar); // add to head so in order later
-			p = p.parent;
-		}
-		return grammars;
-	}
-
-	/** Get set of rules for grammar g that need to have manual delegation
-	 *  methods.  This is the list of rules collected from all direct/indirect
-	 *  delegates minus rules overridden in grammar g.
-	 *
-	 *  This returns null except for the delegate root because it is the only
-	 *  one that has to have a complete grammar rule interface.  The delegates
-	 *  should not be instantiated directly for use as parsers (you can create
-	 *  them to pass to the root parser's ctor as arguments).
-	 */
-	public Set<Rule> getDelegatedRules(Grammar g) {
-		if ( g!=delegateGrammarTreeRoot.grammar ) {
-			return null;
-		}
-		Set<Rule> rules = getAllImportedRules(g);
-		for (Iterator it = rules.iterator(); it.hasNext();) {
-			Rule r = (Rule) it.next();
-			Rule localRule = g.getLocallyDefinedRule(r.name);
-			// if locally defined or it's not local but synpred, don't make
-			// a delegation method
-			if ( localRule!=null || r.isSynPred ) {
-				it.remove(); // kill overridden rules
-			}
-		}
-		return rules;
-	}
-
-	/** Get all rule definitions from all direct/indirect delegate grammars
-	 *  of g.
-	 */
-	public Set<Rule> getAllImportedRules(Grammar g) {
-		Set<String> ruleNames = new HashSet();
-		Set<Rule> rules = new HashSet();
-		CompositeGrammarTree subtreeRoot = delegateGrammarTreeRoot.findNode(g);
-
-		List<Grammar> grammars = subtreeRoot.getPreOrderedGrammarList();
-		// walk all grammars preorder, priority given to grammar listed first.
-		for (int i = 0; i < grammars.size(); i++) {
-			Grammar delegate = (org.antlr.tool.Grammar) grammars.get(i);
-			// for each rule in delegate, add to rules if no rule with that
-			// name as been seen.  (can't use removeAll; wrong hashcode/equals on Rule)
-			for (Iterator it = delegate.getRules().iterator(); it.hasNext();) {
-				Rule r = (Rule)it.next();
-				if ( !ruleNames.contains(r.name) ) {
-					ruleNames.add(r.name); // track that we've seen this
-					rules.add(r);
-				}
-			}
-		}
-		return rules;
-	}
-
-	public Grammar getRootGrammar() {
-		if ( delegateGrammarTreeRoot==null ) {
-			return null;
-		}
-		return delegateGrammarTreeRoot.grammar;
-	}
-
-	public Grammar getGrammar(String grammarName) {
-		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(grammarName);
-		if ( t!=null ) {
-			return t.grammar;
-		}
-		return null;
-	}
-
-	// NFA spans multiple grammars, must handle here
-
-	public int getNewNFAStateNumber() {
-		return stateCounter++;
-	}
-
-	public void addState(NFAState state) {
-		numberToStateList.setSize(state.stateNumber+1); // make sure we have room
-		numberToStateList.set(state.stateNumber, state);
-	}
-
-	public NFAState getState(int s) {
-		return (NFAState)numberToStateList.get(s);
-	}
-
-	public void assignTokenTypes() throws RecognitionException {
-		// ASSIGN TOKEN TYPES for all delegates (same walker)
-		//System.out.println("### assign types");
-		AssignTokenTypesWalker ttypesWalker = new AssignTokenTypesBehavior();
-		List<Grammar> grammars = delegateGrammarTreeRoot.getPostOrderedGrammarList();
-		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
-			Grammar g = (Grammar)grammars.get(i);
-			ttypesWalker.setTreeNodeStream(new CommonTreeNodeStream(g.getGrammarTree()));
-			try {
-				//System.out.println("    walking "+g.name);
-				ttypesWalker.grammar_(g);
-			}
-			catch (RecognitionException re) {
-				ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
-								   re);
-			}
-		}
-		// the walker has filled literals, tokens, and alias tables.
-		// now tell it to define them in the root grammar
-		ttypesWalker.defineTokens(delegateGrammarTreeRoot.grammar);
-	}
-
-	public void translateLeftRecursiveRules() {
-		List<Grammar> grammars = delegateGrammarTreeRoot.getPostOrderedGrammarList();
-		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
-			Grammar g = grammars.get(i);
-			if ( !(g.type==Grammar.PARSER || g.type==Grammar.COMBINED) ) continue;
-			for (GrammarAST r : g.grammarTree.findAllType(ANTLRParser.RULE)) {
-				if ( !Character.isUpperCase(r.getChild(0).getText().charAt(0)) ) {
-					if ( LeftRecursiveRuleAnalyzer.hasImmediateRecursiveRuleRefs(r, r.enclosingRuleName) ) {
-						g.translateLeftRecursiveRule(r);
-					}
-				}
-			}
-		}
-	}
-
-	public void defineGrammarSymbols() {
-		delegateGrammarTreeRoot.trimLexerImportsIntoCombined();
-		List<Grammar> grammars = delegateGrammarTreeRoot.getPostOrderedGrammarList();
-		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
-			Grammar g = grammars.get(i);
-			g.defineGrammarSymbols();
-		}
-		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
-			Grammar g = grammars.get(i);
-			g.checkNameSpaceAndActions();
-		}
-		minimizeRuleSet();
-	}
-
-	public void createNFAs() {
-		if ( ErrorManager.doNotAttemptAnalysis() ) {
-			return;
-		}
-		List<Grammar> grammars = delegateGrammarTreeRoot.getPostOrderedGrammarList();
-		//System.out.println("### createNFAs for composite; grammars: "+names);
-		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
-			Grammar g = (Grammar)grammars.get(i);
-			g.createRuleStartAndStopNFAStates();
-		}
-		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
-			Grammar g = (Grammar)grammars.get(i);
-			g.buildNFA();
-		}
-	}
-
-	public void minimizeRuleSet() {
-		Set<String> ruleDefs = new HashSet<String>();
-		_minimizeRuleSet(ruleDefs, delegateGrammarTreeRoot);
-	}
-
-	public void _minimizeRuleSet(Set<String> ruleDefs,
-								 CompositeGrammarTree p) {
-		Set<String> localRuleDefs = new HashSet<String>();
-		Set<String> overrides = new HashSet<String>();
-		// compute set of non-overridden rules for this delegate
-		for (Rule r : p.grammar.getRules()) {
-			if ( !ruleDefs.contains(r.name) ) {
-				localRuleDefs.add(r.name);
-			}
-			else if ( !r.name.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ) {
-				// record any overridden rule 'cept tokens rule
-				overrides.add(r.name);
-			}
-		}
-		//System.out.println("rule defs for "+p.grammar.name+": "+localRuleDefs);
-		//System.out.println("overridden rule for "+p.grammar.name+": "+overrides);
-		p.grammar.overriddenRules = overrides;
-
-		// make set of all rules defined thus far walking delegation tree.
-		// the same rule in two delegates resolves in favor of first found
-		// in tree therefore second must not be included
-		ruleDefs.addAll(localRuleDefs);
-
-		// pass larger set of defined rules to delegates
-		if ( p.children!=null ) {
-			for (CompositeGrammarTree delegate : p.children) {
-				_minimizeRuleSet(ruleDefs, delegate);
-			}
-		}
-	}
-
-	/*
-	public void minimizeRuleSet() {
-		Set<Rule> refs = _minimizeRuleSet(delegateGrammarTreeRoot);
-		System.out.println("all rule refs: "+refs);
-	}
-
-	public Set<Rule> _minimizeRuleSet(CompositeGrammarTree p) {
-		Set<Rule> refs = new HashSet<Rule>();
-		for (GrammarAST refAST : p.grammar.ruleRefs) {
-			System.out.println("ref "+refAST.getText()+": "+refAST.NFAStartState+
-							   " enclosing rule: "+refAST.NFAStartState.enclosingRule+
-							   " invoking rule: "+((NFAState)refAST.NFAStartState.transition[0].target).enclosingRule);
-			refs.add(((NFAState)refAST.NFAStartState.transition[0].target).enclosingRule);
-		}
-
-		if ( p.children!=null ) {
-			for (CompositeGrammarTree delegate : p.children) {
-				Set<Rule> delegateRuleRefs = _minimizeRuleSet(delegate);
-				refs.addAll(delegateRuleRefs);
-			}
-		}
-
-		return refs;
-	}
-	*/
-
-	/*
-	public void oldminimizeRuleSet() {
-		// first walk to remove all overridden rules
-		Set<String> ruleDefs = new HashSet<String>();
-		Set<String> ruleRefs = new HashSet<String>();
-		for (GrammarAST refAST : delegateGrammarTreeRoot.grammar.ruleRefs) {
-			String rname = refAST.getText();
-			ruleRefs.add(rname);
-		}
-		_minimizeRuleSet(ruleDefs,
-						 ruleRefs,
-						 delegateGrammarTreeRoot);
-		System.out.println("overall rule defs: "+ruleDefs);
-	}
-
-	public void _minimizeRuleSet(Set<String> ruleDefs,
-								 Set<String> ruleRefs,
-								 CompositeGrammarTree p) {
-		Set<String> localRuleDefs = new HashSet<String>();
-		for (Rule r : p.grammar.getRules()) {
-			if ( !ruleDefs.contains(r.name) ) {
-				localRuleDefs.add(r.name);
-				ruleDefs.add(r.name);
-			}
-		}
-		System.out.println("rule defs for "+p.grammar.name+": "+localRuleDefs);
-
-		// remove locally-defined rules not in ref set
-		// find intersection of local rules and references from delegator
-		// that is set of rules needed by delegator
-		Set<String> localRuleDefsSatisfyingRefsFromBelow = new HashSet<String>();
-		for (String r : ruleRefs) {
-			if ( localRuleDefs.contains(r) ) {
-				localRuleDefsSatisfyingRefsFromBelow.add(r);
-			}
-		}
-
-		// now get list of refs from localRuleDefsSatisfyingRefsFromBelow.
-		// Those rules are also allowed in this delegate
-		for (GrammarAST refAST : p.grammar.ruleRefs) {
-			if ( localRuleDefsSatisfyingRefsFromBelow.contains(refAST.enclosingRuleName) ) {
-				// found rule ref within needed rule
-			}
-		}
-
-		// remove rule refs not in the new rule def set
-
-		// walk all children, adding rules not already defined
-		if ( p.children!=null ) {
-			for (CompositeGrammarTree delegate : p.children) {
-				_minimizeRuleSet(ruleDefs, ruleRefs, delegate);
-			}
-		}
-	}
-	*/
-
-	/*
-	public void trackNFAStatesThatHaveLabeledEdge(Label label,
-												  NFAState stateWithLabeledEdge)
-	{
-		Set<NFAState> states = typeToNFAStatesWithEdgeOfTypeMap.get(label);
-		if ( states==null ) {
-			states = new HashSet<NFAState>();
-			typeToNFAStatesWithEdgeOfTypeMap.put(label, states);
-		}
-		states.add(stateWithLabeledEdge);
-	}
-
-	public Map<Label, Set<NFAState>> getTypeToNFAStatesWithEdgeOfTypeMap() {
-		return typeToNFAStatesWithEdgeOfTypeMap;
-	}
-
-	public Set<NFAState> getStatesWithEdge(Label label) {
-		return typeToNFAStatesWithEdgeOfTypeMap.get(label);
-	}
-*/
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/DOTGenerator.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/DOTGenerator.java
deleted file mode 100644
index 4e906a3..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/DOTGenerator.java
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.Tool;
-import org.antlr.analysis.*;
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.misc.Utils;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STGroup;
-import org.stringtemplate.v4.STGroupDir;
-
-import java.util.*;
-
-/** The DOT (part of graphviz) generation aspect. */
-public class DOTGenerator {
-	public static final boolean STRIP_NONREDUCED_STATES = false;
-
-	protected String arrowhead="normal";
-	protected String rankdir="LR";
-
-	/** Library of output templates; use <attrname> format */
-    public static STGroup stlib = new STGroupDir("org/antlr/tool/templates/dot/dfa");
-
-    /** To prevent infinite recursion when walking state machines, record
-     *  which states we've visited.  Make a new set every time you start
-     *  walking in case you reuse this object.
-     */
-    protected Set markedStates = null;
-
-    protected Grammar grammar;
-
-    /** This aspect is associated with a grammar */
-	public DOTGenerator(Grammar grammar) {
-		this.grammar = grammar;
-	}
-
-    /** Return a String containing a DOT description that, when displayed,
-     *  will show the incoming state machine visually.  All nodes reachable
-     *  from startState will be included.
-     */
-    public String getDOT(State startState) {
-		if ( startState==null ) {
-			return null;
-		}
-		// The output DOT graph for visualization
-		ST dot = null;
-		markedStates = new HashSet();
-        if ( startState instanceof DFAState ) {
-            dot = stlib.getInstanceOf("dfa");
-			dot.add("startState",
-					Utils.integer(startState.stateNumber));
-			dot.add("useBox",
-					Boolean.valueOf(Tool.internalOption_ShowNFAConfigsInDFA));
-			walkCreatingDFADOT(dot, (DFAState)startState);
-        }
-        else {
-            dot = stlib.getInstanceOf("nfa");
-			dot.add("startState",
-					Utils.integer(startState.stateNumber));
-			walkRuleNFACreatingDOT(dot, startState);
-        }
-		dot.add("rankdir", rankdir);
-        return dot.toString();
-    }
-
-    /** Return a String containing a DOT description that, when displayed,
-     *  will show the incoming state machine visually.  All nodes reachable
-     *  from startState will be included.
-    public String getRuleNFADOT(State startState) {
-        // The output DOT graph for visualization
-        ST dot = stlib.getInstanceOf("nfa");
-
-        markedStates = new HashSet();
-        dot.add("startState",
-                Utils.integer(startState.stateNumber));
-        walkRuleNFACreatingDOT(dot, startState);
-        return dot.toString();
-    }
-	 */
-
-    /** Do a depth-first walk of the state machine graph and
-     *  fill a DOT description template.  Keep filling the
-     *  states and edges attributes.
-     */
-    protected void walkCreatingDFADOT(ST dot,
-									  DFAState s)
-    {
-		if ( markedStates.contains(Utils.integer(s.stateNumber)) ) {
-			return; // already visited this node
-        }
-
-		markedStates.add(Utils.integer(s.stateNumber)); // mark this node as completed.
-
-        // first add this node
-        ST st;
-        if ( s.isAcceptState() ) {
-            st = stlib.getInstanceOf("stopstate");
-        }
-        else {
-            st = stlib.getInstanceOf("state");
-        }
-        st.add("name", getStateLabel(s));
-        dot.add("states", st);
-
-        // make a DOT edge for each transition
-		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
-			Transition edge = (Transition) s.transition(i);
-			/*
-			System.out.println("dfa "+s.dfa.decisionNumber+
-				" edge from s"+s.stateNumber+" ["+i+"] of "+s.getNumberOfTransitions());
-			*/
-			if ( STRIP_NONREDUCED_STATES ) {
-				if ( edge.target instanceof DFAState &&
-					((DFAState)edge.target).getAcceptStateReachable()!=DFA.REACHABLE_YES )
-				{
-					continue; // don't generate nodes for terminal states
-				}
-			}
-			st = stlib.getInstanceOf("edge");
-			st.add("label", getEdgeLabel(edge));
-			st.add("src", getStateLabel(s));
-            st.add("target", getStateLabel(edge.target));
-			st.add("arrowhead", arrowhead);
-            dot.add("edges", st);
-            walkCreatingDFADOT(dot, (DFAState)edge.target); // keep walkin'
-        }
-    }
-
-    /** Do a depth-first walk of the state machine graph and
-     *  fill a DOT description template.  Keep filling the
-     *  states and edges attributes.  We know this is an NFA
-     *  for a rule so don't traverse edges to other rules and
-     *  don't go past rule end state.
-     */
-    protected void walkRuleNFACreatingDOT(ST dot,
-                                          State s)
-    {
-        if ( markedStates.contains(s) ) {
-            return; // already visited this node
-        }
-
-        markedStates.add(s); // mark this node as completed.
-
-        // first add this node
-        ST stateST;
-        if ( s.isAcceptState() ) {
-            stateST = stlib.getInstanceOf("stopstate");
-        }
-        else {
-            stateST = stlib.getInstanceOf("state");
-        }
-        stateST.add("name", getStateLabel(s));
-        dot.add("states", stateST);
-
-        if ( s.isAcceptState() )  {
-            return; // don't go past end of rule node to the follow states
-        }
-
-        // special case: if decision point, then line up the alt start states
-        // unless it's an end of block
-		if ( ((NFAState)s).isDecisionState() ) {
-			GrammarAST n = ((NFAState)s).associatedASTNode;
-			if ( n!=null && n.getType()!=ANTLRParser.EOB ) {
-				ST rankST = stlib.getInstanceOf("decision-rank");
-				NFAState alt = (NFAState)s;
-				while ( alt!=null ) {
-					rankST.add("states", getStateLabel(alt));
-					if ( alt.transition[1] !=null ) {
-						alt = (NFAState)alt.transition[1].target;
-					}
-					else {
-						alt=null;
-					}
-				}
-				dot.add("decisionRanks", rankST);
-			}
-		}
-
-        // make a DOT edge for each transition
-		ST edgeST = null;
-		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
-            Transition edge = (Transition) s.transition(i);
-            if ( edge instanceof RuleClosureTransition ) {
-                RuleClosureTransition rr = ((RuleClosureTransition)edge);
-                // don't jump to other rules, but display edge to follow node
-                edgeST = stlib.getInstanceOf("edge");
-				if ( rr.rule.grammar != grammar ) {
-					edgeST.add("label", "<" + rr.rule.grammar.name + "." + rr.rule.name + ">");
-				}
-				else {
-					edgeST.add("label", "<" + rr.rule.name + ">");
-				}
-				edgeST.add("src", getStateLabel(s));
-				edgeST.add("target", getStateLabel(rr.followState));
-				edgeST.add("arrowhead", arrowhead);
-                dot.add("edges", edgeST);
-				walkRuleNFACreatingDOT(dot, rr.followState);
-                continue;
-            }
-			if ( edge.isAction() ) {
-				edgeST = stlib.getInstanceOf("action-edge");
-			}
-			else if ( edge.isEpsilon() ) {
-				edgeST = stlib.getInstanceOf("epsilon-edge");
-			}
-			else {
-				edgeST = stlib.getInstanceOf("edge");
-			}
-			edgeST.add("label", getEdgeLabel(edge));
-            edgeST.add("src", getStateLabel(s));
-			edgeST.add("target", getStateLabel(edge.target));
-			edgeST.add("arrowhead", arrowhead);
-            dot.add("edges", edgeST);
-            walkRuleNFACreatingDOT(dot, edge.target); // keep walkin'
-        }
-    }
-
-    /*
-	public void writeDOTFilesForAllRuleNFAs() throws IOException {
-        Collection rules = grammar.getRules();
-        for (Iterator itr = rules.iterator(); itr.hasNext();) {
-			Grammar.Rule r = (Grammar.Rule) itr.next();
-            String ruleName = r.name;
-            writeDOTFile(
-                    ruleName,
-                    getRuleNFADOT(grammar.getRuleStartState(ruleName)));
-        }
-    }
-    */
-
-    /*
-	public void writeDOTFilesForAllDecisionDFAs() throws IOException {
-        // for debugging, create a DOT file for each decision in
-        // a directory named for the grammar.
-        File grammarDir = new File(grammar.name+"_DFAs");
-        grammarDir.mkdirs();
-        List decisionList = grammar.getDecisionNFAStartStateList();
-        if ( decisionList==null ) {
-            return;
-        }
-        int i = 1;
-        Iterator iter = decisionList.iterator();
-        while (iter.hasNext()) {
-            NFAState decisionState = (NFAState)iter.next();
-            DFA dfa = decisionState.getDecisionASTNode().getLookaheadDFA();
-            if ( dfa!=null ) {
-                String dot = getDOT( dfa.startState );
-                writeDOTFile(grammarDir+"/dec-"+i, dot);
-            }
-            i++;
-        }
-    }
-    */
-
-    /** Fix edge strings so they print out in DOT properly;
-	 *  generate any gated predicates on edge too.
-	 */
-    protected String getEdgeLabel(Transition edge) {
-		String label = edge.label.toString(grammar);
-		label = Utils.replace(label,"\\", "\\\\");
-		label = Utils.replace(label,"\"", "\\\"");
-		label = Utils.replace(label,"\n", "\\\\n");
-		label = Utils.replace(label,"\r", "");
-		if ( label.equals(Label.EPSILON_STR) ) {
-            label = "e";
-        }
-		State target = edge.target;
-		if ( !edge.isSemanticPredicate() && target instanceof DFAState ) {
-			// look for gated predicates; don't add gated to simple sempred edges
-			SemanticContext preds =
-				((DFAState)target).getGatedPredicatesInNFAConfigurations();
-			if ( preds!=null ) {
-				String predsStr = "";
-				predsStr = "&&{"+
-					preds.genExpr(grammar.generator,
-								  grammar.generator.getTemplates(), null).toString()
-					+"}?";
-				label += predsStr;
-			}
-		}
-        return label;
-    }
-
-    protected String getStateLabel(State s) {
-        if ( s==null ) {
-            return "null";
-        }
-        String stateLabel = String.valueOf(s.stateNumber);
-		if ( s instanceof DFAState ) {
-            StringBuffer buf = new StringBuffer(250);
-			buf.append('s');
-			buf.append(s.stateNumber);
-			if ( Tool.internalOption_ShowNFAConfigsInDFA ) {
-				if ( s instanceof DFAState ) {
-					if ( ((DFAState)s).abortedDueToRecursionOverflow ) {
-						buf.append("\\n");
-						buf.append("abortedDueToRecursionOverflow");
-					}
-				}
-				Set alts = ((DFAState)s).getAltSet();
-				if ( alts!=null ) {
-					buf.append("\\n");
-					// separate alts
-					List altList = new ArrayList();
-					altList.addAll(alts);
-					Collections.sort(altList);
-					Set configurations = ((DFAState) s).nfaConfigurations;
-					for (int altIndex = 0; altIndex < altList.size(); altIndex++) {
-						Integer altI = (Integer) altList.get(altIndex);
-						int alt = altI.intValue();
-						if ( altIndex>0 ) {
-							buf.append("\\n");
-						}
-						buf.append("alt");
-						buf.append(alt);
-						buf.append(':');
-						// get a list of configs for just this alt
-						// it will help us print better later
-						List configsInAlt = new ArrayList();
-						for (Iterator it = configurations.iterator(); it.hasNext();) {
-							NFAConfiguration c = (NFAConfiguration) it.next();
-							if ( c.alt!=alt ) continue;
-							configsInAlt.add(c);
-						}
-						int n = 0;
-						for (int cIndex = 0; cIndex < configsInAlt.size(); cIndex++) {
-							NFAConfiguration c =
-								(NFAConfiguration)configsInAlt.get(cIndex);
-							n++;
-							buf.append(c.toString(false));
-							if ( (cIndex+1)<configsInAlt.size() ) {
-								buf.append(", ");
-							}
-							if ( n%5==0 && (configsInAlt.size()-cIndex)>3 ) {
-								buf.append("\\n");
-							}
-						}
-					}
-				}
-			}
-            stateLabel = buf.toString();
-        }
-		if ( (s instanceof NFAState) && ((NFAState)s).isDecisionState() ) {
-			stateLabel = stateLabel+",d="+
-					((NFAState)s).getDecisionNumber();
-			if ( ((NFAState)s).endOfBlockStateNumber!=State.INVALID_STATE_NUMBER ) {
-				stateLabel += ",eob="+((NFAState)s).endOfBlockStateNumber;
-			}
-		}
-		else if ( (s instanceof NFAState) &&
-			((NFAState)s).endOfBlockStateNumber!=State.INVALID_STATE_NUMBER)
-		{
-			NFAState n = ((NFAState)s);
-			stateLabel = stateLabel+",eob="+n.endOfBlockStateNumber;
-		}
-        else if ( s instanceof DFAState && ((DFAState)s).isAcceptState() ) {
-            stateLabel = stateLabel+
-                    "=>"+((DFAState)s).getUniquelyPredictedAlt();
-        }
-        return '"'+stateLabel+'"';
-    }
-
-	public String getArrowheadType() {
-		return arrowhead;
-	}
-
-	public void setArrowheadType(String arrowhead) {
-		this.arrowhead = arrowhead;
-	}
-
-	public String getRankdir() {
-		return rankdir;
-	}
-
-	public void setRankdir(String rankdir) {
-		this.rankdir = rankdir;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/ErrorManager.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/ErrorManager.java
deleted file mode 100644
index 7baaf68..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/ErrorManager.java
+++ /dev/null
@@ -1,924 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.Tool;
-import org.antlr.analysis.DFAState;
-import org.antlr.analysis.DecisionProbe;
-import org.antlr.misc.BitSet;
-import org.antlr.runtime.RecognitionException;
-import org.antlr.runtime.Token;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STErrorListener;
-import org.stringtemplate.v4.STGroup;
-import org.stringtemplate.v4.STGroupFile;
-import org.stringtemplate.v4.misc.STMessage;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.util.*;
-
-/** Defines all the errors ANTLR can generator for both the tool and for
- *  issues with a grammar.
- *
- *  Here is a list of language names:
- *
- *  http://ftp.ics.uci.edu/pub/ietf/http/related/iso639.txt
- *
- *  Here is a list of country names:
- *
- *  http://www.chemie.fu-berlin.de/diverse/doc/ISO_3166.html
- *
- *  I use constants not strings to identify messages as the compiler will
- *  find any errors/mismatches rather than leaving a mistyped string in
- *  the code to be found randomly in the future.  Further, Intellij can
- *  do field name expansion to save me some typing.  I have to map
- *  int constants to template names, however, which could introduce a mismatch.
- *  Someone could provide a .stg file that had a template name wrong.  When
- *  I load the group, then, I must verify that all messages are there.
- *
- *  This is essentially the functionality of the resource bundle stuff Java
- *  has, but I don't want to load a property file--I want to load a template
- *  group file and this is so simple, why mess with their junk.
- *
- *  I use the default Locale as defined by java to compute a group file name
- *  in the org/antlr/tool/templates/messages dir called en_US.stg and so on.
- *
- *  Normally we want to use the default locale, but often a message file will
- *  not exist for it so we must fall back on the US local.
- *
- *  During initialization of this class, all errors go straight to System.err.
- *  There is no way around this.  If I have not set up the error system, how
- *  can I do errors properly?  For example, if the string template group file
- *  full of messages has an error, how could I print to anything but System.err?
- *
- *  TODO: how to map locale to a file encoding for the stringtemplate group file?
- *  ST knows how to pay attention to the default encoding so it
- *  should probably just work unless a GUI sets the local to some chinese
- *  variation but System.getProperty("file.encoding") is US.  Hmm...
- *
- *  TODO: get antlr.g etc.. parsing errors to come here.
- */
-public class ErrorManager {
-	// TOOL ERRORS
-	// file errors
-	public static final int MSG_CANNOT_WRITE_FILE = 1;
-	public static final int MSG_CANNOT_CLOSE_FILE = 2;
-	public static final int MSG_CANNOT_FIND_TOKENS_FILE = 3;
-	public static final int MSG_ERROR_READING_TOKENS_FILE = 4;
-	public static final int MSG_DIR_NOT_FOUND = 5;
-	public static final int MSG_OUTPUT_DIR_IS_FILE = 6;
-	public static final int MSG_CANNOT_OPEN_FILE = 7;
-	public static final int MSG_FILE_AND_GRAMMAR_NAME_DIFFER = 8;
-	public static final int MSG_FILENAME_EXTENSION_ERROR = 9;
-
-	public static final int MSG_INTERNAL_ERROR = 10;
-	public static final int MSG_INTERNAL_WARNING = 11;
-	public static final int MSG_ERROR_CREATING_ARTIFICIAL_RULE = 12;
-	public static final int MSG_TOKENS_FILE_SYNTAX_ERROR = 13;
-	public static final int MSG_CANNOT_GEN_DOT_FILE = 14;
-	public static final int MSG_BAD_AST_STRUCTURE = 15;
-	public static final int MSG_BAD_ACTION_AST_STRUCTURE = 16;
-
-	// code gen errors
-	public static final int MSG_MISSING_CODE_GEN_TEMPLATES = 20;
-	public static final int MSG_MISSING_CYCLIC_DFA_CODE_GEN_TEMPLATES = 21;
-	public static final int MSG_CODE_GEN_TEMPLATES_INCOMPLETE = 22;
-	public static final int MSG_CANNOT_CREATE_TARGET_GENERATOR = 23;
-	//public static final int MSG_CANNOT_COMPUTE_SAMPLE_INPUT_SEQ = 24;
-
-	// GRAMMAR ERRORS
-	public static final int MSG_SYNTAX_ERROR = 100;
-	public static final int MSG_RULE_REDEFINITION = 101;
-	public static final int MSG_LEXER_RULES_NOT_ALLOWED = 102;
-	public static final int MSG_PARSER_RULES_NOT_ALLOWED = 103;
-	public static final int MSG_CANNOT_FIND_ATTRIBUTE_NAME_IN_DECL = 104;
-	public static final int MSG_NO_TOKEN_DEFINITION = 105;
-	public static final int MSG_UNDEFINED_RULE_REF = 106;
-	public static final int MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE = 107;
-	public static final int MSG_CANNOT_ALIAS_TOKENS_IN_LEXER = 108;
-	public static final int MSG_ATTRIBUTE_REF_NOT_IN_RULE = 111;
-	public static final int MSG_INVALID_RULE_SCOPE_ATTRIBUTE_REF = 112;
-	public static final int MSG_UNKNOWN_ATTRIBUTE_IN_SCOPE = 113;
-	public static final int MSG_UNKNOWN_SIMPLE_ATTRIBUTE = 114;
-	public static final int MSG_INVALID_RULE_PARAMETER_REF = 115;
-	public static final int MSG_UNKNOWN_RULE_ATTRIBUTE = 116;
-	public static final int MSG_ISOLATED_RULE_SCOPE = 117;
-	public static final int MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE = 118;
-	public static final int MSG_LABEL_CONFLICTS_WITH_RULE = 119;
-	public static final int MSG_LABEL_CONFLICTS_WITH_TOKEN = 120;
-	public static final int MSG_LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE = 121;
-	public static final int MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL = 122;
-	public static final int MSG_ATTRIBUTE_CONFLICTS_WITH_RULE = 123;
-	public static final int MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL = 124;
-	public static final int MSG_LABEL_TYPE_CONFLICT = 125;
-	public static final int MSG_ARG_RETVAL_CONFLICT = 126;
-	public static final int MSG_NONUNIQUE_REF = 127;
-	public static final int MSG_FORWARD_ELEMENT_REF = 128;
-	public static final int MSG_MISSING_RULE_ARGS = 129;
-	public static final int MSG_RULE_HAS_NO_ARGS = 130;
-	public static final int MSG_ARGS_ON_TOKEN_REF = 131;
-	public static final int MSG_RULE_REF_AMBIG_WITH_RULE_IN_ALT = 132;
-	public static final int MSG_ILLEGAL_OPTION = 133;
-	public static final int MSG_LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT = 134;
-	public static final int MSG_UNDEFINED_TOKEN_REF_IN_REWRITE = 135;
-	public static final int MSG_REWRITE_ELEMENT_NOT_PRESENT_ON_LHS = 136;
-	public static final int MSG_UNDEFINED_LABEL_REF_IN_REWRITE = 137;
-	public static final int MSG_NO_GRAMMAR_START_RULE = 138;
-	public static final int MSG_EMPTY_COMPLEMENT = 139;
-	public static final int MSG_UNKNOWN_DYNAMIC_SCOPE = 140;
-	public static final int MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE = 141;
-	public static final int MSG_ISOLATED_RULE_ATTRIBUTE = 142;
-	public static final int MSG_INVALID_ACTION_SCOPE = 143;
-	public static final int MSG_ACTION_REDEFINITION = 144;
-	public static final int MSG_DOUBLE_QUOTES_ILLEGAL = 145;
-	public static final int MSG_INVALID_TEMPLATE_ACTION = 146;
-	public static final int MSG_MISSING_ATTRIBUTE_NAME = 147;
-	public static final int MSG_ARG_INIT_VALUES_ILLEGAL = 148;
-	public static final int MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION = 149;
-	public static final int MSG_NO_RULES = 150;
-	public static final int MSG_WRITE_TO_READONLY_ATTR = 151;
-	public static final int MSG_MISSING_AST_TYPE_IN_TREE_GRAMMAR = 152;
-	public static final int MSG_REWRITE_FOR_MULTI_ELEMENT_ALT = 153;
-	public static final int MSG_RULE_INVALID_SET = 154;
-	public static final int MSG_HETERO_ILLEGAL_IN_REWRITE_ALT = 155;
-	public static final int MSG_NO_SUCH_GRAMMAR_SCOPE = 156;
-	public static final int MSG_NO_SUCH_RULE_IN_SCOPE = 157;
-	public static final int MSG_TOKEN_ALIAS_CONFLICT = 158;
-	public static final int MSG_TOKEN_ALIAS_REASSIGNMENT = 159;
-	public static final int MSG_TOKEN_VOCAB_IN_DELEGATE = 160;
-	public static final int MSG_INVALID_IMPORT = 161;
-	public static final int MSG_IMPORTED_TOKENS_RULE_EMPTY = 162;
-	public static final int MSG_IMPORT_NAME_CLASH = 163;
-	public static final int MSG_AST_OP_WITH_NON_AST_OUTPUT_OPTION = 164;
-	public static final int MSG_AST_OP_IN_ALT_WITH_REWRITE = 165;
-    public static final int MSG_WILDCARD_AS_ROOT = 166;
-    public static final int MSG_CONFLICTING_OPTION_IN_TREE_FILTER = 167;
-	public static final int MSG_ILLEGAL_OPTION_VALUE = 168;
-	public static final int MSG_ALL_OPS_NEED_SAME_ASSOC = 169;
-
-	// GRAMMAR WARNINGS
-	public static final int MSG_GRAMMAR_NONDETERMINISM = 200; // A predicts alts 1,2
-	public static final int MSG_UNREACHABLE_ALTS = 201;       // nothing predicts alt i
-	public static final int MSG_DANGLING_STATE = 202;         // no edges out of state
-	public static final int MSG_INSUFFICIENT_PREDICATES = 203;
-	public static final int MSG_DUPLICATE_SET_ENTRY = 204;    // (A|A)
-	public static final int MSG_ANALYSIS_ABORTED = 205;
-	public static final int MSG_RECURSION_OVERLOW = 206;
-	public static final int MSG_LEFT_RECURSION = 207;
-	public static final int MSG_UNREACHABLE_TOKENS = 208; // nothing predicts token
-	public static final int MSG_TOKEN_NONDETERMINISM = 209; // alts of Tokens rule
-	public static final int MSG_LEFT_RECURSION_CYCLES = 210;
-	public static final int MSG_NONREGULAR_DECISION = 211;
-
-
-    // Dependency sorting errors
-    //
-    public static final int MSG_CIRCULAR_DEPENDENCY = 212; // t1.g -> t2.g -> t3.g ->t1.g
-
-	public static final int MAX_MESSAGE_NUMBER = 212;
-
-	/** Do not do perform analysis if one of these happens */
-	public static final BitSet ERRORS_FORCING_NO_ANALYSIS = new BitSet() {
-		{
-			add(MSG_RULE_REDEFINITION);
-			add(MSG_UNDEFINED_RULE_REF);
-			add(MSG_LEFT_RECURSION_CYCLES);
-			add(MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION);
-			add(MSG_NO_RULES);
-			add(MSG_NO_SUCH_GRAMMAR_SCOPE);
-			add(MSG_NO_SUCH_RULE_IN_SCOPE);
-			add(MSG_LEXER_RULES_NOT_ALLOWED);
-            add(MSG_WILDCARD_AS_ROOT);
-            add(MSG_CIRCULAR_DEPENDENCY);
-            // TODO: ...
-		}
-	};
-
-	/** Do not do code gen if one of these happens */
-	public static final BitSet ERRORS_FORCING_NO_CODEGEN = new BitSet() {
-		{
-			add(MSG_NONREGULAR_DECISION);
-			add(MSG_RECURSION_OVERLOW);
-			add(MSG_UNREACHABLE_ALTS);
-			add(MSG_FILE_AND_GRAMMAR_NAME_DIFFER);
-			add(MSG_INVALID_IMPORT);
-			add(MSG_AST_OP_WITH_NON_AST_OUTPUT_OPTION);
-            add(MSG_CIRCULAR_DEPENDENCY);
-			// TODO: ...
-		}
-	};
-
-	/** Only one error can be emitted for any entry in this table.
-	 *  Map<String,Set> where the key is a method name like danglingState.
-	 *  The set is whatever that method accepts or derives like a DFA.
-	 */
-	public static final Map emitSingleError = new HashMap() {
-		{
-			put("danglingState", new HashSet());
-		}
-	};
-
-	/** Messages should be sensitive to the locale. */
-	private static Locale locale;
-	private static String formatName;
-
-	/** Each thread might need it's own error listener; e.g., a GUI with
-	 *  multiple window frames holding multiple grammars.
-	 */
-	private static Map threadToListenerMap = new HashMap();
-
-	static class ErrorState {
-		public int errors;
-		public int warnings;
-		public int infos;
-		/** Track all msgIDs; we use to abort later if necessary
-		 *  also used in Message to find out what type of message it is via getMessageType()
-		 */
-		public BitSet errorMsgIDs = new BitSet();
-		public BitSet warningMsgIDs = new BitSet();
-		// TODO: figure out how to do info messages. these do not have IDs...kr
-		//public BitSet infoMsgIDs = new BitSet();
-	}
-
-	/** Track the number of errors regardless of the listener but track
-	 *  per thread.
-	 */
-	private static Map threadToErrorStateMap = new HashMap();
-
-	/** Each thread has its own ptr to a Tool object, which knows how
-	 *  to panic, for example.  In a GUI, the thread might just throw an Error
-	 *  to exit rather than the suicide System.exit.
-	 */
-	private static Map threadToToolMap = new HashMap();
-
-	/** The group of templates that represent all possible ANTLR errors. */
-	private static STGroup messages;
-	/** The group of templates that represent the current message format. */
-	private static STGroup format;
-
-	/** From a msgID how can I get the name of the template that describes
-	 *  the error or warning?
-	 */
-	private static String[] idToMessageTemplateName = new String[MAX_MESSAGE_NUMBER+1];
-
-	static ANTLRErrorListener theDefaultErrorListener = new ANTLRErrorListener() {
-		public void info(String msg) {
-			if (formatWantsSingleLineMessage()) {
-				msg = msg.replaceAll("\n", " ");
-			}
-			System.err.println(msg);
-		}
-
-		public void error(Message msg) {
-			String outputMsg = msg.toString();
-			if (formatWantsSingleLineMessage()) {
-				outputMsg = outputMsg.replaceAll("\n", " ");
-			}
-			System.err.println(outputMsg);
-		}
-
-		public void warning(Message msg) {
-			String outputMsg = msg.toString();
-			if (formatWantsSingleLineMessage()) {
-				outputMsg = outputMsg.replaceAll("\n", " ");
-			}
-			System.err.println(outputMsg);
-		}
-
-		public void error(ToolMessage msg) {
-			String outputMsg = msg.toString();
-			if (formatWantsSingleLineMessage()) {
-				outputMsg = outputMsg.replaceAll("\n", " ");
-			}
-			System.err.println(outputMsg);
-		}
-	};
-
-	/** Handle all ST error listeners here (code gen, Grammar, and this class
-	 *  use templates.
-	 */
-	static STErrorListener initSTListener =
-		new STErrorListener() {
-			public void compileTimeError(STMessage msg) {
-				System.err.println("ErrorManager init error: "+msg);
-			}
-
-			public void runTimeError(STMessage msg) {
-				System.err.println("ErrorManager init error: "+msg);
-			}
-
-			public void IOError(STMessage msg) {
-				System.err.println("ErrorManager init error: "+msg);
-			}
-
-			public void internalError(STMessage msg) {
-				System.err.println("ErrorManager init error: "+msg);
-			}
-
-		};
-
-	/** During verification of the messages group file, don't gen errors.
-	 *  I'll handle them here.  This is used only after file has loaded ok
-	 *  and only for the messages STG.
-	 */
-	static STErrorListener blankSTListener =
-		new STErrorListener() {
-			public void compileTimeError(STMessage msg) {			}
-			public void runTimeError(STMessage msg) {			}
-			public void IOError(STMessage msg) {			}
-			public void internalError(STMessage msg) {			}
-		};
-
-	/** Errors during initialization related to ST must all go to System.err.
-	 */
-	static STErrorListener theDefaultSTListener =
-		new STErrorListener() {
-			public void compileTimeError(STMessage msg) {
-				ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, msg.toString(), msg.cause);
-			}
-
-			public void runTimeError(STMessage msg) {
-				ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, msg.toString(), msg.cause);
-			}
-
-			public void IOError(STMessage msg) {
-				ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, msg.toString(), msg.cause);
-			}
-
-			public void internalError(STMessage msg) {
-				ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, msg.toString(), msg.cause);
-			}
-		};
-
-	// make sure that this class is ready to use after loading
-	static {
-		initIdToMessageNameMapping();
-		// it is inefficient to set the default locale here if another
-		// piece of code is going to set the locale, but that would
-		// require that a user call an init() function or something.  I prefer
-		// that this class be ready to go when loaded as I'm absentminded ;)
-		setLocale(Locale.getDefault());
-		// try to load the message format group
-		// the user might have specified one on the command line
-		// if not, or if the user has given an illegal value, we will fall back to "antlr"
-		setFormat("antlr");
-	}
-
-    public static STErrorListener getSTErrorListener() {
-		return theDefaultSTListener;
-	}
-
-	/** We really only need a single locale for entire running ANTLR code
-	 *  in a single VM.  Only pay attention to the language, not the country
-	 *  so that French Canadians and French Frenchies all get the same
-	 *  template file, fr.stg.  Just easier this way.
-	 */
-	public static void setLocale(Locale locale) {
-		ErrorManager.locale = locale;
-		String language = locale.getLanguage();
-		String fileName = "org/antlr/tool/templates/messages/languages/"+language+".stg";
-		try {
-			messages = new STGroupFile(fileName);
-		}
-		catch (IllegalArgumentException iae) {
-			if ( language.equals(Locale.US.getLanguage()) ) {
-				rawError("ANTLR installation corrupted; cannot find English messages file "+fileName);
-				panic();
-			}
-			else {
-				setLocale(Locale.US); // recurse on this rule, trying the US locale
-			}
-		}
-
-		messages.setListener(blankSTListener);
-		boolean messagesOK = verifyMessages();
-		if ( !messagesOK && language.equals(Locale.US.getLanguage()) ) {
-			rawError("ANTLR installation corrupted; English messages file "+language+".stg incomplete");
-			panic();
-		}
-		else if ( !messagesOK ) {
-			setLocale(Locale.US); // try US to see if that will work
-		}
-	}
-
-	/** The format gets reset either from the Tool if the user supplied a command line option to that effect
-	 *  Otherwise we just use the default "antlr".
-	 */
-	public static void setFormat(String formatName) {
-		ErrorManager.formatName = formatName;
-		String fileName = "org/antlr/tool/templates/messages/formats/"+formatName+".stg";
-		format = new STGroupFile(fileName);
-		format.setListener(initSTListener);
-		if ( !format.isDefined("message") ) { // pick random msg to load
-			if ( formatName.equals("antlr") ) {
-				rawError("no such message format file "+fileName+" retrying with default ANTLR format");
-				setFormat("antlr"); // recurse on this rule, trying the default message format
-				return;
-			}
-			else {
-				setFormat("antlr"); // recurse on this rule, trying the default message format
-			}
-		}
-
-		format.setListener(blankSTListener);
-		boolean formatOK = verifyFormat();
-		if ( !formatOK && formatName.equals("antlr") ) {
-			rawError("ANTLR installation corrupted; ANTLR messages format file "+formatName+".stg incomplete");
-			panic();
-		}
-		else if ( !formatOK ) {
-			setFormat("antlr"); // recurse on this rule, trying the default message format
-		}
-	}
-
-	/** Encodes the error handling found in setLocale, but does not trigger
-	 *  panics, which would make GUI tools die if ANTLR's installation was
-	 *  a bit screwy.  Duplicated code...ick.
-	public static Locale getLocaleForValidMessages(Locale locale) {
-		ErrorManager.locale = locale;
-		String language = locale.getLanguage();
-		String fileName = "org/antlr/tool/templates/messages/"+language+".stg";
-		ClassLoader cl = Thread.currentThread().getContextClassLoader();
-		InputStream is = cl.getResourceAsStream(fileName);
-		if ( is==null && language.equals(Locale.US.getLanguage()) ) {
-			return null;
-		}
-		else if ( is==null ) {
-			return getLocaleForValidMessages(Locale.US); // recurse on this rule, trying the US locale
-		}
-
-		boolean messagesOK = verifyMessages();
-		if ( !messagesOK && language.equals(Locale.US.getLanguage()) ) {
-			return null;
-		}
-		else if ( !messagesOK ) {
-			return getLocaleForValidMessages(Locale.US); // try US to see if that will work
-		}
-		return true;
-	}
-	 */
-
-	/** In general, you'll want all errors to go to a single spot.
-	 *  However, in a GUI, you might have two frames up with two
-	 *  different grammars.  Two threads might launch to process the
-	 *  grammars--you would want errors to go to different objects
-	 *  depending on the thread.  I store a single listener per
-	 *  thread.
-	 */
-	public static void setErrorListener(ANTLRErrorListener listener) {
-		threadToListenerMap.put(Thread.currentThread(), listener);
-	}
-
-    public static void removeErrorListener() {
-        threadToListenerMap.remove(Thread.currentThread());
-    }
-
-	public static void setTool(Tool tool) {
-		threadToToolMap.put(Thread.currentThread(), tool);
-	}
-
-	/** Given a message ID, return a ST that somebody can fill
-	 *  with data.  We need to convert the int ID to the name of a template
-	 *  in the messages ST group.
-	 */
-	public static ST getMessage(int msgID) {
-        String msgName = idToMessageTemplateName[msgID];
-		return messages.getInstanceOf(msgName);
-	}
-	public static String getMessageType(int msgID) {
-		if (getErrorState().warningMsgIDs.member(msgID)) {
-			return messages.getInstanceOf("warning").render();
-		}
-		else if (getErrorState().errorMsgIDs.member(msgID)) {
-			return messages.getInstanceOf("error").render();
-		}
-		assertTrue(false, "Assertion failed! Message ID " + msgID + " created but is not present in errorMsgIDs or warningMsgIDs.");
-		return "";
-	}
-
-	/** Return a ST that refers to the current format used for
-	 * emitting messages.
-	 */
-	public static ST getLocationFormat() {
-		return format.getInstanceOf("location");
-	}
-	public static ST getReportFormat() {
-		return format.getInstanceOf("report");
-	}
-	public static ST getMessageFormat() {
-		return format.getInstanceOf("message");
-	}
-	public static boolean formatWantsSingleLineMessage() {
-		return format.getInstanceOf("wantsSingleLineMessage").render().equals("true");
-	}
-
-	public static ANTLRErrorListener getErrorListener() {
-		ANTLRErrorListener el =
-			(ANTLRErrorListener)threadToListenerMap.get(Thread.currentThread());
-		if ( el==null ) {
-			return theDefaultErrorListener;
-		}
-		return el;
-	}
-
-	public static ErrorState getErrorState() {
-		ErrorState ec =
-			(ErrorState)threadToErrorStateMap.get(Thread.currentThread());
-		if ( ec==null ) {
-			ec = new ErrorState();
-			threadToErrorStateMap.put(Thread.currentThread(), ec);
-		}
-		return ec;
-	}
-
-	public static int getNumErrors() {
-		return getErrorState().errors;
-	}
-
-	public static void resetErrorState() {
-        threadToListenerMap = new HashMap();
-        ErrorState ec = new ErrorState();
-		threadToErrorStateMap.put(Thread.currentThread(), ec);
-	}
-
-	public static void info(String msg) {
-		getErrorState().infos++;
-		getErrorListener().info(msg);
-	}
-
-	public static void error(int msgID) {
-		getErrorState().errors++;
-		getErrorState().errorMsgIDs.add(msgID);
-		getErrorListener().error(new ToolMessage(msgID));
-	}
-
-	public static void error(int msgID, Throwable e) {
-		getErrorState().errors++;
-		getErrorState().errorMsgIDs.add(msgID);
-		getErrorListener().error(new ToolMessage(msgID,e));
-	}
-
-	public static void error(int msgID, Object arg) {
-		getErrorState().errors++;
-		getErrorState().errorMsgIDs.add(msgID);
-		getErrorListener().error(new ToolMessage(msgID, arg));
-	}
-
-	public static void error(int msgID, Object arg, Object arg2) {
-		getErrorState().errors++;
-		getErrorState().errorMsgIDs.add(msgID);
-		getErrorListener().error(new ToolMessage(msgID, arg, arg2));
-	}
-
-	public static void error(int msgID, Object arg, Throwable e) {
-		getErrorState().errors++;
-		getErrorState().errorMsgIDs.add(msgID);
-		getErrorListener().error(new ToolMessage(msgID, arg, e));
-	}
-
-	public static void warning(int msgID, Object arg) {
-		getErrorState().warnings++;
-		getErrorState().warningMsgIDs.add(msgID);
-		getErrorListener().warning(new ToolMessage(msgID, arg));
-	}
-
-	public static void nondeterminism(DecisionProbe probe,
-									  DFAState d)
-	{
-		getErrorState().warnings++;
-		Message msg = new GrammarNonDeterminismMessage(probe,d);
-		getErrorState().warningMsgIDs.add(msg.msgID);
-		getErrorListener().warning(msg);
-	}
-
-	public static void danglingState(DecisionProbe probe,
-									 DFAState d)
-	{
-		getErrorState().errors++;
-		Message msg = new GrammarDanglingStateMessage(probe,d);
-		getErrorState().errorMsgIDs.add(msg.msgID);
-		Set seen = (Set)emitSingleError.get("danglingState");
-		if ( !seen.contains(d.dfa.decisionNumber+"|"+d.getAltSet()) ) {
-			getErrorListener().error(msg);
-			// we've seen this decision and this alt set; never again
-			seen.add(d.dfa.decisionNumber+"|"+d.getAltSet());
-		}
-	}
-
-	public static void analysisAborted(DecisionProbe probe)
-	{
-		getErrorState().warnings++;
-		Message msg = new GrammarAnalysisAbortedMessage(probe);
-		getErrorState().warningMsgIDs.add(msg.msgID);
-		getErrorListener().warning(msg);
-	}
-
-	public static void unreachableAlts(DecisionProbe probe,
-									   List alts)
-	{
-		getErrorState().errors++;
-		Message msg = new GrammarUnreachableAltsMessage(probe,alts);
-		getErrorState().errorMsgIDs.add(msg.msgID);
-		getErrorListener().error(msg);
-	}
-
-	public static void insufficientPredicates(DecisionProbe probe,
-											  DFAState d,
-											  Map<Integer, Set<Token>> altToUncoveredLocations)
-	{
-		getErrorState().warnings++;
-		Message msg = new GrammarInsufficientPredicatesMessage(probe,d,altToUncoveredLocations);
-		getErrorState().warningMsgIDs.add(msg.msgID);
-		getErrorListener().warning(msg);
-	}
-
-	public static void nonLLStarDecision(DecisionProbe probe) {
-		getErrorState().errors++;
-		Message msg = new NonRegularDecisionMessage(probe, probe.getNonDeterministicAlts());
-		getErrorState().errorMsgIDs.add(msg.msgID);
-		getErrorListener().error(msg);
-	}
-
-	public static void recursionOverflow(DecisionProbe probe,
-										 DFAState sampleBadState,
-										 int alt,
-										 Collection targetRules,
-										 Collection callSiteStates)
-	{
-		getErrorState().errors++;
-		Message msg = new RecursionOverflowMessage(probe,sampleBadState, alt,
-										 targetRules, callSiteStates);
-		getErrorState().errorMsgIDs.add(msg.msgID);
-		getErrorListener().error(msg);
-	}
-
-	/*
-	// TODO: we can remove I think.  All detected now with cycles check.
-	public static void leftRecursion(DecisionProbe probe,
-									 int alt,
-									 Collection targetRules,
-									 Collection callSiteStates)
-	{
-		getErrorState().warnings++;
-		Message msg = new LeftRecursionMessage(probe, alt, targetRules, callSiteStates);
-		getErrorState().warningMsgIDs.add(msg.msgID);
-		getErrorListener().warning(msg);
-	}
-	*/
-
-	public static void leftRecursionCycles(Collection cycles) {
-		getErrorState().errors++;
-		Message msg = new LeftRecursionCyclesMessage(cycles);
-		getErrorState().errorMsgIDs.add(msg.msgID);
-		getErrorListener().error(msg);
-	}
-
-	public static void grammarError(int msgID,
-									Grammar g,
-									Token token,
-									Object arg,
-									Object arg2)
-	{
-		getErrorState().errors++;
-		Message msg = new GrammarSemanticsMessage(msgID,g,token,arg,arg2);
-		getErrorState().errorMsgIDs.add(msgID);
-		getErrorListener().error(msg);
-	}
-
-	public static void grammarError(int msgID,
-									Grammar g,
-									Token token,
-									Object arg)
-	{
-		grammarError(msgID,g,token,arg,null);
-	}
-
-	public static void grammarError(int msgID,
-									Grammar g,
-									Token token)
-	{
-		grammarError(msgID,g,token,null,null);
-	}
-
-	public static void grammarWarning(int msgID,
-									  Grammar g,
-									  Token token,
-									  Object arg,
-									  Object arg2)
-	{
-		getErrorState().warnings++;
-		Message msg = new GrammarSemanticsMessage(msgID,g,token,arg,arg2);
-		getErrorState().warningMsgIDs.add(msgID);
-		getErrorListener().warning(msg);
-	}
-
-	public static void grammarWarning(int msgID,
-									  Grammar g,
-									  Token token,
-									  Object arg)
-	{
-		grammarWarning(msgID,g,token,arg,null);
-	}
-
-	public static void grammarWarning(int msgID,
-									  Grammar g,
-									  Token token)
-	{
-		grammarWarning(msgID,g,token,null,null);
-	}
-
-	public static void syntaxError(int msgID,
-								   Grammar grammar,
-								   Token token,
-								   Object arg,
-								   RecognitionException re)
-	{
-		getErrorState().errors++;
-		getErrorState().errorMsgIDs.add(msgID);
-		getErrorListener().error(
-			new GrammarSyntaxMessage(msgID,grammar,token,arg,re)
-		);
-	}
-
-	public static void internalError(Object error, Throwable e) {
-		StackTraceElement location = getLastNonErrorManagerCodeLocation(e);
-		String msg = "Exception "+e+"@"+location+": "+error;
-		error(MSG_INTERNAL_ERROR, msg);
-	}
-
-	public static void internalError(Object error) {
-		StackTraceElement location =
-			getLastNonErrorManagerCodeLocation(new Exception());
-		String msg = location+": "+error;
-		error(MSG_INTERNAL_ERROR, msg);
-	}
-
-	public static boolean doNotAttemptAnalysis() {
-		return !getErrorState().errorMsgIDs.and(ERRORS_FORCING_NO_ANALYSIS).isNil();
-	}
-
-	public static boolean doNotAttemptCodeGen() {
-		return doNotAttemptAnalysis() ||
-			   !getErrorState().errorMsgIDs.and(ERRORS_FORCING_NO_CODEGEN).isNil();
-	}
-
-	/** Return first non ErrorManager code location for generating messages */
-	private static StackTraceElement getLastNonErrorManagerCodeLocation(Throwable e) {
-		StackTraceElement[] stack = e.getStackTrace();
-		int i = 0;
-		for (; i < stack.length; i++) {
-			StackTraceElement t = stack[i];
-			if ( t.toString().indexOf("ErrorManager")<0 ) {
-				break;
-			}
-		}
-		StackTraceElement location = stack[i];
-		return location;
-	}
-
-	// A S S E R T I O N  C O D E
-
-	public static void assertTrue(boolean condition, String message) {
-		if ( !condition ) {
-			internalError(message);
-		}
-	}
-
-	// S U P P O R T  C O D E
-
-	protected static boolean initIdToMessageNameMapping() {
-		// make sure a message exists, even if it's just to indicate a problem
-		for (int i = 0; i < idToMessageTemplateName.length; i++) {
-			idToMessageTemplateName[i] = "INVALID MESSAGE ID: "+i;
-		}
-		// get list of fields and use it to fill in idToMessageTemplateName mapping
-		Field[] fields = ErrorManager.class.getFields();
-		for (int i = 0; i < fields.length; i++) {
-			Field f = fields[i];
-			String fieldName = f.getName();
-			if ( !fieldName.startsWith("MSG_") ) {
-				continue;
-			}
-			String templateName =
-				fieldName.substring("MSG_".length(),fieldName.length());
-			int msgID = 0;
-			try {
-				// get the constant value from this class object
-				msgID = f.getInt(ErrorManager.class);
-			}
-			catch (IllegalAccessException iae) {
-				System.err.println("cannot get const value for "+f.getName());
-				continue;
-			}
-			if ( fieldName.startsWith("MSG_") ) {
-                idToMessageTemplateName[msgID] = templateName;
-			}
-		}
-		return true;
-	}
-
-	/** Use reflection to find list of MSG_ fields and then verify a
-	 *  template exists for each one from the locale's group.
-	 */
-	protected static boolean verifyMessages() {
-		boolean ok = true;
-		Field[] fields = ErrorManager.class.getFields();
-		for (int i = 0; i < fields.length; i++) {
-			Field f = fields[i];
-			String fieldName = f.getName();
-			String templateName =
-				fieldName.substring("MSG_".length(),fieldName.length());
-			if ( fieldName.startsWith("MSG_") ) {
-				if ( !messages.isDefined(templateName) ) {
-					System.err.println("Message "+templateName+" in locale "+
-									   locale+" not found");
-					ok = false;
-				}
-			}
-		}
-		// check for special templates
-		if (!messages.isDefined("warning")) {
-			System.err.println("Message template 'warning' not found in locale "+ locale);
-			ok = false;
-		}
-		if (!messages.isDefined("error")) {
-			System.err.println("Message template 'error' not found in locale "+ locale);
-			ok = false;
-		}
-		return ok;
-	}
-
-	/** Verify the message format template group */
-	protected static boolean verifyFormat() {
-		boolean ok = true;
-		if (!format.isDefined("location")) {
-			System.err.println("Format template 'location' not found in " + formatName);
-			ok = false;
-		}
-		if (!format.isDefined("message")) {
-			System.err.println("Format template 'message' not found in " + formatName);
-			ok = false;
-		}
-		if (!format.isDefined("report")) {
-			System.err.println("Format template 'report' not found in " + formatName);
-			ok = false;
-		}
-		return ok;
-	}
-
-	/** If there are errors during ErrorManager init, we have no choice
-	 *  but to go to System.err.
-	 */
-	static void rawError(String msg) {
-		System.err.println(msg);
-	}
-
-	static void rawError(String msg, Throwable e) {
-		rawError(msg);
-		e.printStackTrace(System.err);
-	}
-
-	/** I *think* this will allow Tool subclasses to exit gracefully
-	 *  for GUIs etc...
-	 */
-	public static void panic() {
-		Tool tool = (Tool)threadToToolMap.get(Thread.currentThread());
-		if ( tool==null ) {
-			// no tool registered, exit
-			throw new Error("ANTLR ErrorManager panic");
-		}
-		else {
-			tool.panic();
-		}
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/FASerializer.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/FASerializer.java
deleted file mode 100644
index 401bbb3..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/FASerializer.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.*;
-import org.antlr.misc.Utils;
-
-import java.util.*;
-
-/** An aspect of FA (finite automata) that knows how to dump them to serialized
- *  strings.
- */
-public class FASerializer {
-    /** To prevent infinite recursion when walking state machines, record
-     *  which states we've visited.  Make a new set every time you start
-     *  walking in case you reuse this object.  Multiple threads will trash
-     *  this shared variable.  Use a different FASerializer per thread.
-     */
-    protected Set markedStates;
-
-    /** Each state we walk will get a new state number for serialization
-     *  purposes.  This is the variable that tracks state numbers.
-     */
-    protected int stateCounter = 0;
-
-    /** Rather than add a new instance variable to NFA and DFA just for
-     *  serializing machines, map old state numbers to new state numbers
-     *  by a State object -> Integer new state number HashMap.
-     */
-    protected Map stateNumberTranslator;
-
-    protected Grammar grammar;
-
-    /** This aspect is associated with a grammar; used to get token names */
-    public FASerializer(Grammar grammar) {
-        this.grammar = grammar;
-    }
-
-	public String serialize(State s) {
-		if ( s==null ) {
-			return "<no automaton>";
-		}
-		return serialize(s, true);
-	}
-
-	/** Return a string representation of a state machine.  Two identical
-     *  NFAs or DFAs will have identical serialized representations.  The
-     *  state numbers inside the state are not used; instead, a new number
-     *  is computed and because the serialization will walk the two
-     *  machines using the same specific algorithm, then the state numbers
-     *  will be identical.  Accept states are distinguished from regular
-     *  states.
-     */
-    public String serialize(State s, boolean renumber) {
-        markedStates = new HashSet();
-        stateCounter = 0;
-		if ( renumber ) {
-			stateNumberTranslator = new HashMap();
-        	walkFANormalizingStateNumbers(s);
-		}
-		List lines = new ArrayList();
-        if ( s.getNumberOfTransitions()>0 ) {
-			walkSerializingFA(lines, s);
-		}
-		else {
-			// special case: s0 is an accept
-			String s0 = getStateString(0, s);
-			lines.add(s0+"\n");
-		}
-        StringBuffer buf = new StringBuffer(0);
-        // sort lines to normalize; makes states come out ordered
-        // and then ordered by edge labels then by target state number :)
-        Collections.sort(lines);
-        for (int i = 0; i < lines.size(); i++) {
-            String line = (String) lines.get(i);
-            buf.append(line);
-        }
-        return buf.toString();
-    }
-
-    /** In stateNumberTranslator, get a map from State to new, normalized
-     *  state number.  Used by walkSerializingFA to make sure any two
-     *  identical state machines will serialize the same way.
-     */
-    protected void walkFANormalizingStateNumbers(State s) {
-		if ( s==null ) {
-			ErrorManager.internalError("null state s");
-			return;
-		}
-        if ( stateNumberTranslator.get(s)!=null ) {
-            return; // already did this state
-        }
-        // assign a new state number for this node if there isn't one
-        stateNumberTranslator.put(s, Utils.integer(stateCounter));
-        stateCounter++;
-
-        // visit nodes pointed to by each transition;
-        for (int i = 0; i < s.getNumberOfTransitions(); i++) {
-            Transition edge = (Transition) s.transition(i);
-            walkFANormalizingStateNumbers(edge.target); // keep walkin'
-            // if this transition is a rule reference, the node "following" this state
-            // will not be found and appear to be not in graph.  Must explicitly jump
-            // to it, but don't "draw" an edge.
-            if ( edge instanceof RuleClosureTransition ) {
-				walkFANormalizingStateNumbers(((RuleClosureTransition) edge).followState);
-            }
-        }
-    }
-
-    protected void walkSerializingFA(List lines, State s) {
-        if ( markedStates.contains(s) ) {
-            return; // already visited this node
-        }
-
-        markedStates.add(s); // mark this node as completed.
-
-		int normalizedStateNumber = s.stateNumber;
-		if ( stateNumberTranslator!=null ) {
-	        Integer normalizedStateNumberI = (Integer)stateNumberTranslator.get(s);
-			normalizedStateNumber = normalizedStateNumberI.intValue();
-		}
-
-		String stateStr = getStateString(normalizedStateNumber, s);
-
-        // depth first walk each transition, printing its edge first
-        for (int i = 0; i < s.getNumberOfTransitions(); i++) {
-            Transition edge = (Transition) s.transition(i);
-            StringBuffer buf = new StringBuffer();
-            buf.append(stateStr);
-			if ( edge.isAction() ) {
-				buf.append("-{}->");
-			}
-			else if ( edge.isEpsilon() ) {
-				buf.append("->");
-			}
-			else if ( edge.isSemanticPredicate() ) {
-				buf.append("-{"+edge.label.getSemanticContext()+"}?->");
-			}
-			else {
-				String predsStr = "";
-				if ( edge.target instanceof DFAState ) {
-					// look for gated predicates; don't add gated to simple sempred edges
-					SemanticContext preds =
-						((DFAState)edge.target).getGatedPredicatesInNFAConfigurations();
-					if ( preds!=null ) {
-						predsStr = "&&{"+
-							preds.genExpr(grammar.generator,
-									   	  grammar.generator.getTemplates(), null).render()
-							+"}?";
-					}
-				}
-				buf.append("-"+edge.label.toString(grammar)+predsStr+"->");
-			}
-
-			int normalizedTargetStateNumber = edge.target.stateNumber;
-			if ( stateNumberTranslator!=null ) {
-				Integer normalizedTargetStateNumberI =
-					(Integer)stateNumberTranslator.get(edge.target);
-				normalizedTargetStateNumber = normalizedTargetStateNumberI.intValue();
-			}
-			buf.append(getStateString(normalizedTargetStateNumber, edge.target));
-            buf.append("\n");
-            lines.add(buf.toString());
-
-            // walk this transition
-            walkSerializingFA(lines, edge.target);
-
-            // if this transition is a rule reference, the node "following" this state
-            // will not be found and appear to be not in graph.  Must explicitly jump
-            // to it, but don't "draw" an edge.
-            if ( edge instanceof RuleClosureTransition ) {
-				walkSerializingFA(lines, ((RuleClosureTransition) edge).followState);
-            }
-        }
-
-    }
-
-    private String getStateString(int n, State s) {
-        String stateStr = ".s"+n;
-        if ( s.isAcceptState() ) {
-            if ( s instanceof DFAState ) {
-                stateStr = ":s"+n+"=>"+((DFAState)s).getUniquelyPredictedAlt();
-            }
-            else {
-                stateStr = ":s"+n;
-            }
-        }
-        return stateStr;
-    }
-
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/Grammar.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/Grammar.java
deleted file mode 100644
index a88314d..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/Grammar.java
+++ /dev/null
@@ -1,3171 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.Tool;
-import org.antlr.analysis.*;
-import org.antlr.analysis.DFA;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.codegen.*;
-import org.antlr.grammar.v3.*;
-import org.antlr.misc.*;
-import org.antlr.misc.Utils;
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.CommonTreeNodeStream;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STGroup;
-import org.stringtemplate.v4.STGroupString;
-
-import java.io.*;
-import java.util.*;
-
-/** Represents a grammar in memory. */
-public class Grammar {
-	public static final String SYNPRED_RULE_PREFIX = "synpred";
-
-	public static final String GRAMMAR_FILE_EXTENSION = ".g";
-
-	/** used for generating lexer temp files */
-	public static final String LEXER_GRAMMAR_FILE_EXTENSION = ".g";
-
-	public static final int INITIAL_DECISION_LIST_SIZE = 300;
-	public static final int INVALID_RULE_INDEX = -1;
-
-	// the various kinds of labels. t=type, id=ID, types+=type ids+=ID
-	public static final int RULE_LABEL = 1;
-	public static final int TOKEN_LABEL = 2;
-	public static final int RULE_LIST_LABEL = 3;
-	public static final int TOKEN_LIST_LABEL = 4;
-    public static final int CHAR_LABEL = 5; // used in lexer for x='a'
-    public static final int WILDCARD_TREE_LABEL = 6; // Used in tree grammar x=.
-    public static final int WILDCARD_TREE_LIST_LABEL = 7; // Used in tree grammar x+=.
-
-
-    public static String[] LabelTypeToString =
-		{"<invalid>", "rule", "token", "rule-list", "token-list", "wildcard-tree", "wildcard-tree-list"};
-
-	public static final String ARTIFICIAL_TOKENS_RULENAME = "Tokens";
-	public static final String FRAGMENT_RULE_MODIFIER = "fragment";
-
-	public static final String SYNPREDGATE_ACTION_NAME = "synpredgate";
-
-	/** When converting ANTLR char and string literals, here is the
-	 *  value set of escape chars.
-	 */
-	public static int ANTLRLiteralEscapedCharValue[] = new int[255];
-
-	/** Given a char, we need to be able to show as an ANTLR literal.
-	 */
-	public static String ANTLRLiteralCharValueEscape[] = new String[255];
-
-	static {
-		ANTLRLiteralEscapedCharValue['n'] = '\n';
-		ANTLRLiteralEscapedCharValue['r'] = '\r';
-		ANTLRLiteralEscapedCharValue['t'] = '\t';
-		ANTLRLiteralEscapedCharValue['b'] = '\b';
-		ANTLRLiteralEscapedCharValue['f'] = '\f';
-		ANTLRLiteralEscapedCharValue['\\'] = '\\';
-		ANTLRLiteralEscapedCharValue['\''] = '\'';
-		ANTLRLiteralEscapedCharValue['"'] = '"';
-		ANTLRLiteralCharValueEscape['\n'] = "\\n";
-		ANTLRLiteralCharValueEscape['\r'] = "\\r";
-		ANTLRLiteralCharValueEscape['\t'] = "\\t";
-		ANTLRLiteralCharValueEscape['\b'] = "\\b";
-		ANTLRLiteralCharValueEscape['\f'] = "\\f";
-		ANTLRLiteralCharValueEscape['\\'] = "\\\\";
-		ANTLRLiteralCharValueEscape['\''] = "\\'";
-	}
-
-	public static final int LEXER = 1;
-	public static final int PARSER = 2;
-	public static final int TREE_PARSER = 3;
-	public static final int COMBINED = 4;
-	public static final String[] grammarTypeToString = new String[] {
-		"<invalid>",
-		"lexer",
-		"parser",
-		"tree",
-		"combined"
-	};
-
-	public static final String[] grammarTypeToFileNameSuffix = new String[] {
-		"<invalid>",
-		"Lexer",
-		"Parser",
-		"", // no suffix for tree grammars
-		"Parser" // if combined grammar, gen Parser and Lexer will be done later
-	};
-
-	/** Set of valid imports.  E.g., can only import a tree parser into
-	 *  another tree parser.  Maps delegate to set of delegator grammar types.
-	 *  validDelegations.get(LEXER) gives list of the kinds of delegators
-	 *  that can import lexers.
-	 */
-	public static MultiMap<Integer,Integer> validDelegations =
-		new MultiMap<Integer,Integer>() {
-			{
-				map(LEXER, LEXER);
-				map(LEXER, PARSER);
-				map(LEXER, COMBINED);
-
-				map(PARSER, PARSER);
-				map(PARSER, COMBINED);
-
-				map(TREE_PARSER, TREE_PARSER);
-
-				// TODO: allow COMBINED
-				// map(COMBINED, COMBINED);
-			}
-		};
-
-	/** This is the buffer of *all* tokens found in the grammar file
-	 *  including whitespace tokens etc...  I use this to extract
-	 *  lexer rules from combined grammars.
-	 */
-	public CommonTokenStream tokenBuffer;
-	public static final String IGNORE_STRING_IN_GRAMMAR_FILE_NAME = "__";
-	public static final String AUTO_GENERATED_TOKEN_NAME_PREFIX = "T__";
-
-	public static class Decision {
-		public Grammar grammar;
-		public int decision;
-		public NFAState startState;
-		public GrammarAST blockAST;
-		public DFA dfa;
-	}
-
-	public class LabelElementPair {
-		public Token label;
-		public GrammarAST elementRef;
-		public String referencedRuleName;
-		/** Has an action referenced the label?  Set by ActionAnalysis.g
-		 *  Currently only set for rule labels.
-		 */
-		public boolean actionReferencesLabel;
-		public int type; // in {RULE_LABEL,TOKEN_LABEL,RULE_LIST_LABEL,TOKEN_LIST_LABEL}
-		public LabelElementPair(Token label, GrammarAST elementRef) {
-			this.label = label;
-			this.elementRef = elementRef;
-			this.referencedRuleName = elementRef.getText();
-		}
-		public Rule getReferencedRule() {
-			return getRule(referencedRuleName);
-		}
-		public String toString() {
-			return elementRef.toString();
-		}
-	}
-
-	/** What name did the user provide for this grammar? */
-	public String name;
-
-	/** What type of grammar is this: lexer, parser, tree walker */
-	public int type;
-
-	/** A list of options specified at the grammar level such as language=Java.
-	 *  The value can be an AST for complicated values such as character sets.
-	 *  There may be code generator specific options in here.  I do no
-	 *  interpretation of the key/value pairs...they are simply available for
-	 *  who wants them.
-	 */
-	protected Map options;
-
-	public static final Set legalLexerOptions =
-			new HashSet() {
-				{
-				add("language"); add("tokenVocab");
-				add("TokenLabelType");
-				add("superClass");
-				add("filter");
-				add("k");
-				add("backtrack");
-				add("memoize");
-				}
-			};
-
-	public static final Set legalParserOptions =
-			new HashSet() {
-				{
-				add("language"); add("tokenVocab");
-				add("output"); add("rewrite"); add("ASTLabelType");
-				add("TokenLabelType");
-				add("superClass");
-				add("k");
-				add("backtrack");
-				add("memoize");
-				}
-			};
-
-    public static final Set legalTreeParserOptions =
-        new HashSet() {
-            {
-                add("language"); add("tokenVocab");
-                add("output"); add("rewrite"); add("ASTLabelType");
-                add("TokenLabelType");
-                add("superClass");
-                add("k");
-                add("backtrack");
-                add("memoize");
-                add("filter");
-            }
-        };
-
-	public static final Set doNotCopyOptionsToLexer =
-		new HashSet() {
-			{
-				add("output"); add("ASTLabelType"); add("superClass");
-				add("k"); add("backtrack"); add("memoize"); add("rewrite");
-			}
-		};
-
-	public static final Map defaultOptions =
-			new HashMap() {
-				{
-					put("language","Java");
-				}
-			};
-
-	public static final Set legalBlockOptions =
-			new HashSet() {{add("k"); add("greedy"); add("backtrack"); add("memoize");}};
-
-	/** What are the default options for a subrule? */
-	public static final Map defaultBlockOptions =
-			new HashMap() {{put("greedy","true");}};
-
-	public static final Map defaultLexerBlockOptions =
-			new HashMap() {{put("greedy","true");}};
-
-	// Token options are here to avoid contaminating Token object in runtime
-
-	/** Legal options for terminal refs like ID<node=MyVarNode> */
-	public static final Set legalTokenOptions =
-		new HashSet() {
-			{
-				add(defaultTokenOption);
-				add("type");
-				add("text");
-				add("assoc");
-			}
-		};
-
-	public static final String defaultTokenOption = "node";
-
-	/** Is there a global fixed lookahead set for this grammar?
-	 *  If 0, nothing specified.  -1 implies we have not looked at
-	 *  the options table yet to set k.
-	 */
-	protected int global_k = -1;
-
-	/** Map a scope to a map of name:action pairs.
-	 *  Map<String, Map<String,GrammarAST>>
-	 *  The code generator will use this to fill holes in the output files.
-	 *  I track the AST node for the action in case I need the line number
-	 *  for errors.
-	 */
-	private Map<String, Map<String, Object>> actions =
-		new HashMap<String, Map<String, Object>>();
-
-	/** The NFA that represents the grammar with edges labelled with tokens
-	 *  or epsilon.  It is more suitable to analysis than an AST representation.
-	 */
-	public NFA nfa;
-
-	protected NFAFactory factory;
-
-	/** If this grammar is part of a larger composite grammar via delegate
-	 *  statement, then this points at the composite.  The composite holds
-	 *  a global list of rules, token types, decision numbers, etc...
-	 */
-	public CompositeGrammar composite;
-
-	/** A pointer back into grammar tree.  Needed so we can add delegates. */
-	public CompositeGrammarTree compositeTreeNode;
-
-	/** If this is a delegate of another grammar, this is the label used
-	 *  as an instance var by that grammar to point at this grammar. null
-	 *  if no label was specified in the delegate statement.
-	 */
-	public String label;
-
-	/** TODO: hook this to the charVocabulary option */
-	protected IntSet charVocabulary = null;
-
-	/** For ANTLRWorks, we want to be able to map a line:col to a specific
-	 *  decision DFA so it can display DFA.
-	 */
-	Map lineColumnToLookaheadDFAMap = new HashMap();
-
-	public Tool tool;
-
-	/** The unique set of all rule references in any rule; set of tree node
-	 *  objects so two refs to same rule can exist but at different line/position.
-	 */
-	protected Set<GrammarAST> ruleRefs = new HashSet<GrammarAST>();
-
-	protected Set<GrammarAST> scopedRuleRefs = new HashSet();
-
-	/** The unique set of all token ID references in any rule */
-	protected Set<Token> tokenIDRefs = new HashSet<Token>();
-
-	/** Be able to assign a number to every decision in grammar;
-	 *  decisions in 1..n
-	 */
-	protected int decisionCount = 0;
-
-	/** A list of all rules that are in any left-recursive cycle.  There
-	 *  could be multiple cycles, but this is a flat list of all problematic
-	 *  rules. This is stuff we couldn't refactor to precedence rule.
-	 */
-	protected Set<Rule> leftRecursiveRules;
-
-	/** An external tool requests that DFA analysis abort prematurely.  Stops
-	 *  at DFA granularity, which are limited to a DFA size and time computation
-	 *  as failsafe.
-	 */
-	protected boolean externalAnalysisAbort;
-
-	public int numNonLLStar = 0; // hack to track for -report
-
-	/** When we read in a grammar, we track the list of syntactic predicates
-	 *  and build faux rules for them later.  See my blog entry Dec 2, 2005:
-	 *  http://www.antlr.org/blog/antlr3/lookahead.tml
-	 *  This maps the name (we make up) for a pred to the AST grammar fragment.
-	 */
-	protected LinkedHashMap<String, GrammarAST> nameToSynpredASTMap;
-
-	/** Each left-recursive precedence rule must define precedence array
-	 *  for binary operators like:
-	 *
-	 *  	static int[] e_prec = new int[tokenNames.length];
-	 *  	static {
-   	 *  		e_prec[75] = 1;
-	 *  	}
-	 *  Track and we push into parser later; this is computed
-	 *  early when we look for prec rules.
-	 */
-	public List<String> precRuleInitCodeBlocks = new ArrayList<String>();
-
-    /** At least one rule has memoize=true */
-    public boolean atLeastOneRuleMemoizes;
-
-    /** At least one backtrack=true in rule or decision or grammar. */
-    public boolean atLeastOneBacktrackOption;
-
-	/** Was this created from a COMBINED grammar? */
-	public boolean implicitLexer;
-
-	/** Map a rule to it's Rule object */
-	protected LinkedHashMap<String,Rule> nameToRuleMap = new LinkedHashMap<String,Rule>();
-
-	/** If this rule is a delegate, some rules might be overridden; don't
-	 *  want to gen code for them.
-	 */
-	public Set<String> overriddenRules = new HashSet<String>();
-
-	/** The list of all rules referenced in this grammar, not defined here,
-	 *  and defined in a delegate grammar.  Not all of these will be generated
-	 *  in the recognizer for this file; only those that are affected by rule
-	 *  definitions in this grammar.  I am not sure the Java target will need
-	 *  this but I'm leaving in case other targets need it.
-	 *  see NameSpaceChecker.lookForReferencesToUndefinedSymbols()
-	 */
-	protected Set<Rule> delegatedRuleReferences = new HashSet();
-
-	/** The ANTLRParser tracks lexer rules when reading combined grammars
-	 *  so we can build the Tokens rule.
-	 */
-	public List<String> lexerRuleNamesInCombined = new ArrayList<String>();
-
-	/** Track the scopes defined outside of rules and the scopes associated
-	 *  with all rules (even if empty).
-	 */
-	protected Map scopes = new HashMap();
-
-	/** An AST that records entire input grammar with all rules.  A simple
-	 *  grammar with one rule, "grammar t; a : A | B ;", looks like:
-	 * ( grammar t ( rule a ( BLOCK ( ALT A ) ( ALT B ) ) <end-of-rule> ) )
-	 */
-	protected GrammarAST grammarTree = null;
-
-	/** Each subrule/rule is a decision point and we must track them so we
-	 *  can go back later and build DFA predictors for them.  This includes
-	 *  all the rules, subrules, optional blocks, ()+, ()* etc...
-	 */
-	protected Vector<Decision> indexToDecision =
-		new Vector<Decision>(INITIAL_DECISION_LIST_SIZE);
-
-	/** If non-null, this is the code generator we will use to generate
-	 *  recognizers in the target language.
-	 */
-	protected CodeGenerator generator;
-
-	public NameSpaceChecker nameSpaceChecker = new NameSpaceChecker(this);
-
-	public LL1Analyzer ll1Analyzer = new LL1Analyzer(this);
-
-	/** For merged lexer/parsers, we must construct a separate lexer spec.
-	 *  This is the template for lexer; put the literals first then the
-	 *  regular rules.  We don't need to specify a token vocab import as
-	 *  I make the new grammar import from the old all in memory; don't want
-	 *  to force it to read from the disk.  Lexer grammar will have same
-	 *  name as original grammar but will be in different filename.  Foo.g
-	 *  with combined grammar will have FooParser.java generated and
-	 *  Foo__.g with again Foo inside.  It will however generate FooLexer.java
-	 *  as it's a lexer grammar.  A bit odd, but autogenerated.  Can tweak
-	 *  later if we want.
-	 */
-	protected String lexerGrammarTemplate =
-			"grammar(name, options, imports, actionNames, actions, literals, rules) ::= <<\n" +
-			"lexer grammar <name>;\n" +
-			"<if(options)>" +
-			"options {\n" +
-			"  <options:{it | <it.name>=<it.value>;<\\n>}>\n" +
-			"}<\\n>\n" +
-			"<endif>\n" +
-			"<if(imports)>import <imports; separator=\", \">;<endif>\n" +
-			"<actionNames,actions:{n,a|@<n> {<a>\\}\n}>\n" +
-			"<literals:{it | <it.ruleName> : <it.literal> ;\n}>\n" +
-			"<rules>\n" +
-			">>\n";
-	protected ST lexerGrammarST;
-
-	/** What file name holds this grammar? */
-	protected String fileName;
-
-	/** How long in ms did it take to build DFAs for this grammar?
-	 *  If this grammar is a combined grammar, it only records time for
-	 *  the parser grammar component.  This only records the time to
-	 *  do the LL(*) work; NFA->DFA conversion.
-	 */
-	public long DFACreationWallClockTimeInMS;
-
-	public int numberOfSemanticPredicates = 0;
-	public int numberOfManualLookaheadOptions = 0;
-	public Set<Integer> setOfNondeterministicDecisionNumbers = new HashSet<Integer>();
-	public Set<Integer> setOfNondeterministicDecisionNumbersResolvedWithPredicates =
-		new HashSet<Integer>();
-
-	/** Track decisions with syn preds specified for reporting.
-	 *  This is the a set of BLOCK type AST nodes.
-	 */
-	public Set<GrammarAST> blocksWithSynPreds = new HashSet();
-
-	/** Track decisions that actually use the syn preds in the DFA.
-	 *  Computed during NFA to DFA conversion.
-	 */
-	public Set<DFA> decisionsWhoseDFAsUsesSynPreds = new HashSet<DFA>();
-
-	/** Track names of preds so we can avoid generating preds that aren't used
-	 *  Computed during NFA to DFA conversion.  Just walk accept states
-	 *  and look for synpreds because that is the only state target whose
-	 *  incident edges can have synpreds.  Same is try for
-	 *  decisionsWhoseDFAsUsesSynPreds.
-	 */
-	public Set<String> synPredNamesUsedInDFA = new HashSet();
-
-	/** Track decisions with syn preds specified for reporting.
-	 *  This is the a set of BLOCK type AST nodes.
-	 */
-	public Set<GrammarAST> blocksWithSemPreds = new HashSet();
-
-	/** Track decisions that actually use the syn preds in the DFA. */
-	public Set<DFA> decisionsWhoseDFAsUsesSemPreds = new HashSet();
-
-	protected boolean allDecisionDFACreated = false;
-
-	/** We need a way to detect when a lexer grammar is autogenerated from
-	 *  another grammar or we are just sending in a string representing a
-	 *  grammar.  We don't want to generate a .tokens file, for example,
-	 *  in such cases.
-	 */
-	protected boolean builtFromString = false;
-
-	/** Factored out the sanity checking code; delegate to it. */
-	GrammarSanity sanity = new GrammarSanity(this);
-
-	/** Useful for asking questions about target during analysis */
-	Target target;
-
-	/** Create a grammar from file name.  */
-	public Grammar(Tool tool, String fileName, CompositeGrammar composite) {
-		this.composite = composite;
-		setTool(tool);
-		setFileName(fileName);
-		// ensure we have the composite set to something
-		if ( composite.delegateGrammarTreeRoot==null ) {
-			composite.setDelegationRoot(this);
-		}
-		STGroup lexerGrammarSTG = new STGroupString(lexerGrammarTemplate);
-		lexerGrammarST = lexerGrammarSTG.getInstanceOf("grammar");
-		target = CodeGenerator.loadLanguageTarget((String) getOption("language"));
-	}
-
-	/** Useful for when you are sure that you are not part of a composite
-	 *  already.  Used in Interp/RandomPhrase and testing.
-	 */
-	public Grammar() { this((Tool)null); }
-
-	public Grammar(Tool tool) {
-		setTool(tool);
-		builtFromString = true;
-		composite = new CompositeGrammar(this);
-		STGroup lexerGrammarSTG = new STGroupString(lexerGrammarTemplate);
-		lexerGrammarST = lexerGrammarSTG.getInstanceOf("grammar");
-		target = CodeGenerator.loadLanguageTarget((String)getOption("language"));
-	}
-
-	/** Used for testing; only useful on noncomposite grammars.*/
-	public Grammar(String grammarString)
-			throws RecognitionException
-	{
-		this(null, grammarString);
-	}
-
-	/** Used for testing and Interp/RandomPhrase.  Only useful on
-	 *  noncomposite grammars.
-	 */
-	public Grammar(Tool tool, String grammarString)
-		throws RecognitionException
-	{
-		this(tool);
-		setFileName("<string>");
-		StringReader r = new StringReader(grammarString);
-		parseAndBuildAST(r);
-		composite.assignTokenTypes();
-		//composite.translateLeftRecursiveRules();
-		addRulesForSyntacticPredicates();
-		composite.defineGrammarSymbols();
-		//composite.createNFAs();
-		checkNameSpaceAndActions();
-	}
-
-	public void setFileName(String fileName) {
-		this.fileName = fileName;
-	}
-
-	public String getFileName() {
-		return fileName;
-	}
-
-	public void setName(String name) {
-		if ( name==null ) {
-			return;
-		}
-		// don't error check autogenerated files (those with '__' in them)
-		String saneFile = fileName.replace('\\', '/');
-		int lastSlash = saneFile.lastIndexOf('/');
-		String onlyFileName = saneFile.substring(lastSlash+1, fileName.length());
-		if ( !builtFromString ) {
-			int lastDot = onlyFileName.lastIndexOf('.');
-			String onlyFileNameNoSuffix = null;
-			if ( lastDot < 0 ) {
-				ErrorManager.error(ErrorManager.MSG_FILENAME_EXTENSION_ERROR, fileName);
-				onlyFileNameNoSuffix = onlyFileName+GRAMMAR_FILE_EXTENSION;
-			}
-			else {
-				onlyFileNameNoSuffix = onlyFileName.substring(0,lastDot);
-			}
-			if ( !name.equals(onlyFileNameNoSuffix) ) {
-				ErrorManager.error(ErrorManager.MSG_FILE_AND_GRAMMAR_NAME_DIFFER,
-								   name,
-								   fileName);
-			}
-		}
-		this.name = name;
-	}
-
-	public void setGrammarContent(String grammarString) throws RecognitionException {
-		StringReader r = new StringReader(grammarString);
-		parseAndBuildAST(r);
-		composite.assignTokenTypes();
-		composite.defineGrammarSymbols();
-	}
-
-	public void parseAndBuildAST()
-		throws IOException
-	{
-		FileReader fr = null;
-		BufferedReader br = null;
-		try {
-			fr = new FileReader(fileName);
-			br = new BufferedReader(fr);
-			parseAndBuildAST(br);
-			br.close();
-			br = null;
-		}
-		finally {
-			if ( br!=null ) {
-				br.close();
-			}
-		}
-	}
-
-	public void parseAndBuildAST(Reader r) {
-		// BUILD AST FROM GRAMMAR
-		ANTLRLexer lexer;
-		try {
-			lexer = new ANTLRLexer(new ANTLRReaderStream(r));
-		} catch (IOException e) {
-			ErrorManager.internalError("unexpected stream error from parsing "+fileName, e);
-			return;
-		}
-
-		lexer.setFileName(this.getFileName());
-		tokenBuffer = new CommonTokenStream(lexer);
-		ANTLRParser parser = ANTLRParser.createParser(tokenBuffer);
-		parser.setFileName(this.getFileName());
-		ANTLRParser.grammar__return result = null;
-		try {
-			result = parser.grammar_(this);
-		}
-		catch (RecognitionException re) {
-			ErrorManager.internalError("unexpected parser recognition error from "+fileName, re);
-		}
-
-        dealWithTreeFilterMode(); // tree grammar and filter=true?
-
-        if ( lexer.hasASTOperator && !buildAST() ) {
-			Object value = getOption("output");
-			if ( value == null ) {
-				ErrorManager.grammarWarning(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
-										    this, null);
-				setOption("output", "AST", null);
-			}
-			else {
-				ErrorManager.grammarError(ErrorManager.MSG_AST_OP_WITH_NON_AST_OUTPUT_OPTION,
-										  this, null, value);
-			}
-		}
-
-		setGrammarTree((GrammarAST)result.getTree());
-
-		//if ( grammarTree!=null ) System.out.println("grammar tree: "+grammarTree.toStringTree());
-
-		grammarTree.setUnknownTokenBoundaries();
-
-		setFileName(lexer.getFileName()); // the lexer #src might change name
-		if ( grammarTree==null || grammarTree.findFirstType(ANTLRParser.RULE)==null ) {
-			ErrorManager.error(ErrorManager.MSG_NO_RULES, getFileName());
-			return;
-		}
-	}
-
-    protected void dealWithTreeFilterMode() {
-        Object filterMode = (String)getOption("filter");
-        if ( type==TREE_PARSER && filterMode!=null && filterMode.toString().equals("true") ) {
-            // check for conflicting options
-            // filter => backtrack=true
-            // filter&&output=AST => rewrite=true
-            // filter&&output!=AST => error
-            // any deviation from valid option set is an error
-            Object backtrack = (String)getOption("backtrack");
-            Object output = getOption("output");
-            Object rewrite = getOption("rewrite");
-            if ( backtrack!=null && !backtrack.toString().equals("true") ) {
-                ErrorManager.error(ErrorManager.MSG_CONFLICTING_OPTION_IN_TREE_FILTER,
-                                   "backtrack", backtrack);
-            }
-            if ( output!=null && !output.toString().equals("AST") ) {
-                ErrorManager.error(ErrorManager.MSG_CONFLICTING_OPTION_IN_TREE_FILTER,
-                                   "output", output);
-                setOption("output", "", null);
-            }
-            if ( rewrite!=null && !rewrite.toString().equals("true") ) {
-                ErrorManager.error(ErrorManager.MSG_CONFLICTING_OPTION_IN_TREE_FILTER,
-                                   "rewrite", rewrite);
-            }
-            // set options properly
-            setOption("backtrack", "true", null);
-            if ( output!=null && output.toString().equals("AST") ) {
-                setOption("rewrite", "true", null);
-            }
-            // @synpredgate set to state.backtracking==1 by code gen when filter=true
-            // superClass set in template target::treeParser
-        }
-    }
-
-	public void translateLeftRecursiveRule(GrammarAST ruleAST) {
-		//System.out.println(ruleAST.toStringTree());
-		CommonTreeNodeStream input = new CommonTreeNodeStream(ruleAST);
-		LeftRecursiveRuleAnalyzer leftRecursiveRuleWalker =
-			new LeftRecursiveRuleAnalyzer(input, this, ruleAST.enclosingRuleName);
-		boolean isLeftRec = false;
-		try {
-			//System.out.println("TESTING "+ruleAST.enclosingRuleName);
-			isLeftRec = leftRecursiveRuleWalker.rec_rule(this);
-		}
-		catch (RecognitionException re) {
-			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE, re);
-		}
-		if ( !isLeftRec ) return;
-		List<String> rules = new ArrayList<String>();
-		rules.add( leftRecursiveRuleWalker.getArtificialPrecStartRule() ) ;
-		rules.add( leftRecursiveRuleWalker.getArtificialOpPrecRule() );
-		rules.add( leftRecursiveRuleWalker.getArtificialPrimaryRule() );
-		for (String r : rules) {
-			GrammarAST t = parseArtificialRule(r);
-			addRule(grammarTree, t);
-			//System.out.println(t.toStringTree());
-		}
-
-		//precRuleInitCodeBlocks.add( precRuleWalker.getOpPrecJavaCode() );
-	}
-
-	public void defineGrammarSymbols() {
-		if ( Tool.internalOption_PrintGrammarTree ) {
-			System.out.println(grammarTree.toStringList());
-		}
-
-		// DEFINE RULES
-		//System.out.println("### define "+name+" rules");
-		DefineGrammarItemsWalker defineItemsWalker = new DefineGrammarItemsWalker(new CommonTreeNodeStream(getGrammarTree()));
-		try {
-			defineItemsWalker.grammar_(this);
-		}
-		catch (RecognitionException re) {
-			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
-							   re);
-		}
-	}
-
-	/** ANALYZE ACTIONS, LOOKING FOR LABEL AND ATTR REFS, sanity check */
-	public void checkNameSpaceAndActions() {
-		examineAllExecutableActions();
-		checkAllRulesForUselessLabels();
-
-		nameSpaceChecker.checkConflicts();
-	}
-
-	/** Many imports are illegal such as lexer into a tree grammar */
-	public boolean validImport(Grammar delegate) {
-		List<Integer> validDelegators = validDelegations.get(delegate.type);
-		return validDelegators!=null && validDelegators.contains(this.type);
-	}
-
-	/** If the grammar is a combined grammar, return the text of the implicit
-	 *  lexer grammar.
-	 */
-	public String getLexerGrammar() {
-		if ( lexerGrammarST.getAttribute("literals")==null &&
-			 lexerGrammarST.getAttribute("rules")==null )
-		{
-			// if no rules, return nothing
-			return null;
-		}
-		lexerGrammarST.add("name", name);
-		// if there are any actions set for lexer, pass them in
-		if ( getActions().get("lexer")!=null ) {
-			lexerGrammarST.add("actionNames",
-										getActions().get("lexer").keySet());
-			lexerGrammarST.add("actions",
-										getActions().get("lexer").values());
-		}
-		// make sure generated grammar has the same options
-		if ( options!=null ) {
-			Iterator optionNames = options.keySet().iterator();
-			while (optionNames.hasNext()) {
-				String optionName = (String) optionNames.next();
-				if ( !doNotCopyOptionsToLexer.contains(optionName) ) {
-					Object value = options.get(optionName);
-					lexerGrammarST.addAggr("options.{name,value}", optionName, value);
-				}
-			}
-		}
-		return lexerGrammarST.render();
-	}
-
-	public String getImplicitlyGeneratedLexerFileName() {
-		return name+
-			   IGNORE_STRING_IN_GRAMMAR_FILE_NAME +
-			   LEXER_GRAMMAR_FILE_EXTENSION;
-	}
-
-	/** Get the name of the generated recognizer; may or may not be same
-	 *  as grammar name.
-	 *  Recognizer is TParser and TLexer from T if combined, else
-	 *  just use T regardless of grammar type.
-	 */
-	public String getRecognizerName() {
-		String suffix = "";
-		List<Grammar> grammarsFromRootToMe = composite.getDelegators(this);
-		//System.out.println("grammarsFromRootToMe="+grammarsFromRootToMe);
-		String qualifiedName = name;
-		if ( grammarsFromRootToMe!=null ) {
-			StringBuffer buf = new StringBuffer();
-			for (Grammar g : grammarsFromRootToMe) {
-				buf.append(g.name);
-				buf.append('_');
-			}
-			buf.append(name);
-			qualifiedName = buf.toString();
-		}
-		if ( type==Grammar.COMBINED ||
-			 (type==Grammar.LEXER && implicitLexer) )
-		{
-			suffix = Grammar.grammarTypeToFileNameSuffix[type];
-		}
-		return qualifiedName+suffix;
-	}
-
-	/** Parse a rule we add artificially that is a list of the other lexer
-	 *  rules like this: "Tokens : ID | INT | SEMI ;"  nextToken() will invoke
-	 *  this to set the current token.  Add char literals before
-	 *  the rule references.
-	 *
-	 *  If in filter mode, we want every alt to backtrack and we need to
-	 *  do k=1 to force the "first token def wins" rule.  Otherwise, the
-	 *  longest-match rule comes into play with LL(*).
-	 *
-	 *  The ANTLRParser antlr.g file now invokes this when parsing a lexer
-	 *  grammar, which I think is proper even though it peeks at the info
-	 *  that later phases will (re)compute.  It gets a list of lexer rules
-	 *  and builds a string representing the rule; then it creates a parser
-	 *  and adds the resulting tree to the grammar's tree.
-	 */
-	public GrammarAST addArtificialMatchTokensRule(GrammarAST grammarAST,
-												   List<String> ruleNames,
-												   List<String> delegateNames,
-												   boolean filterMode) {
-		ST matchTokenRuleST = null;
-		if ( filterMode ) {
-			matchTokenRuleST = new ST(
-					ARTIFICIAL_TOKENS_RULENAME+
-					" options {k=1; backtrack=true;} : <rules; separator=\"|\">;");
-		}
-		else {
-			matchTokenRuleST = new ST(
-					ARTIFICIAL_TOKENS_RULENAME+" : <rules; separator=\"|\">;");
-		}
-
-		// Now add token rule references
-		for (int i = 0; i < ruleNames.size(); i++) {
-			String rname = (String) ruleNames.get(i);
-			matchTokenRuleST.add("rules", rname);
-		}
-		for (int i = 0; i < delegateNames.size(); i++) {
-			String dname = (String) delegateNames.get(i);
-			matchTokenRuleST.add("rules", dname+".Tokens");
-		}
-		//System.out.println("tokens rule: "+matchTokenRuleST.toString());
-		GrammarAST r = parseArtificialRule(matchTokenRuleST.render());
-		addRule(grammarAST, r);
-		//addRule((GrammarAST)parser.getAST());
-		//return (GrammarAST)parser.getAST();
-		return r;
-	}
-
-	public GrammarAST parseArtificialRule(String ruleText) {
-		ANTLRLexer lexer = new ANTLRLexer(new ANTLRStringStream(ruleText));
-		ANTLRParser parser = ANTLRParser.createParser(new CommonTokenStream(lexer));
-		parser.setGrammar(this);
-		parser.setGrammarType(this.type);
-		try {
-			ANTLRParser.rule_return result = parser.rule();
-			return (GrammarAST)result.getTree();
-		}
-		catch (Exception e) {
-			ErrorManager.error(ErrorManager.MSG_ERROR_CREATING_ARTIFICIAL_RULE,
-							   e);
-			return null;
-		}
-	}
-
-	public void addRule(GrammarAST grammarTree, GrammarAST t) {
-		GrammarAST p = null;
-		for (int i = 0; i < grammarTree.getChildCount(); i++ ) {
-			p = (GrammarAST)grammarTree.getChild(i);
-			if (p == null || p.getType() == ANTLRParser.RULE || p.getType() == ANTLRParser.PREC_RULE) {
-				break;
-			}
-		}
-
-		if (p != null) {
-			grammarTree.addChild(t);
-		}
-	}
-
-	/** for any syntactic predicates, we need to define rules for them; they will get
-	 *  defined automatically like any other rule. :)
-	 */
-	protected List getArtificialRulesForSyntacticPredicates(LinkedHashMap<String,GrammarAST> nameToSynpredASTMap)
-	{
-		List<GrammarAST> rules = new ArrayList<GrammarAST>();
-		if ( nameToSynpredASTMap==null ) {
-			return rules;
-		}
-		boolean isLexer = grammarTree.getType()==ANTLRParser.LEXER_GRAMMAR;
-		for (String synpredName : nameToSynpredASTMap.keySet()) {
-			GrammarAST fragmentAST = nameToSynpredASTMap.get(synpredName);
-			GrammarAST ruleAST =
-				ANTLRParser.createSimpleRuleAST(synpredName,
-												fragmentAST,
-												isLexer);
-			rules.add(ruleAST);
-		}
-		return rules;
-	}
-
-	public void addRulesForSyntacticPredicates() {
-		// Get syn pred rules and add to existing tree
-		List synpredRules =
-			getArtificialRulesForSyntacticPredicates(nameToSynpredASTMap);
-		for (int i = 0; i < synpredRules.size(); i++) {
-			GrammarAST rAST = (GrammarAST) synpredRules.get(i);
-			grammarTree.addChild(rAST);
-		}
-	}
-
-	/** Walk the list of options, altering this Grammar object according
-	 *  to any I recognize.
-	protected void processOptions() {
-		Iterator optionNames = options.keySet().iterator();
-		while (optionNames.hasNext()) {
-			String optionName = (String) optionNames.next();
-			Object value = options.get(optionName);
-			if ( optionName.equals("tokenVocab") ) {
-
-			}
-		}
-	}
-	 */
-
-	/** Define all the rule begin/end NFAStates to solve forward reference
-	 *  issues.  Critical for composite grammars too.
-	 *  This is normally called on all root/delegates manually and then
-	 *  buildNFA() is called afterwards because the NFA construction needs
-	 *  to see rule start/stop states from potentially every grammar. Has
-	 *  to be have these created a priori.  Testing routines will often
-	 *  just call buildNFA(), which forces a call to this method if not
-	 *  done already. Works ONLY for single noncomposite grammars.
-	 */
-	public void createRuleStartAndStopNFAStates() {
-		//System.out.println("### createRuleStartAndStopNFAStates "+getGrammarTypeString()+" grammar "+name+" NFAs");
-		if ( nfa!=null ) {
-			return;
-		}
-		nfa = new NFA(this);
-		factory = new NFAFactory(nfa);
-
-		Collection rules = getRules();
-		for (Iterator itr = rules.iterator(); itr.hasNext();) {
-			Rule r = (Rule) itr.next();
-			String ruleName = r.name;
-			NFAState ruleBeginState = factory.newState();
-			ruleBeginState.setDescription("rule "+ruleName+" start");
-			ruleBeginState.enclosingRule = r;
-			r.startState = ruleBeginState;
-			NFAState ruleEndState = factory.newState();
-			ruleEndState.setDescription("rule "+ruleName+" end");
-			ruleEndState.setAcceptState(true);
-			ruleEndState.enclosingRule = r;
-			r.stopState = ruleEndState;
-		}
-	}
-
-	public void buildNFA() {
-		if ( nfa==null ) {
-			createRuleStartAndStopNFAStates();
-		}
-		if ( nfa.complete ) {
-			// don't let it create more than once; has side-effects
-			return;
-		}
-		//System.out.println("### build "+getGrammarTypeString()+" grammar "+name+" NFAs");
-		if ( getRules().size()==0 ) {
-			return;
-		}
-
-		CommonTreeNodeStream input = new CommonTreeNodeStream(getGrammarTree());
-		TreeToNFAConverter nfaBuilder = new TreeToNFAConverter(input, this, nfa, factory);
-		try {
-			nfaBuilder.grammar_();
-		}
-		catch (RecognitionException re) {
-			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
-							   name,
-							   re);
-		}
-		nfa.complete = true;
-	}
-
-	/** For each decision in this grammar, compute a single DFA using the
-	 *  NFA states associated with the decision.  The DFA construction
-	 *  determines whether or not the alternatives in the decision are
-	 *  separable using a regular lookahead language.
-	 *
-	 *  Store the lookahead DFAs in the AST created from the user's grammar
-	 *  so the code generator or whoever can easily access it.
-	 *
-	 *  This is a separate method because you might want to create a
-	 *  Grammar without doing the expensive analysis.
-	 */
-	public void createLookaheadDFAs() {
-		createLookaheadDFAs(true);
-	}
-
-	public void createLookaheadDFAs(boolean wackTempStructures) {
-		if ( nfa==null ) {
-			buildNFA();
-		}
-
-		// CHECK FOR LEFT RECURSION; Make sure we can actually do analysis
-		checkAllRulesForLeftRecursion();
-
-		/*
-		// was there a severe problem while sniffing the grammar?
-		if ( ErrorManager.doNotAttemptAnalysis() ) {
-			return;
-		}
-		*/
-
-		long start = System.currentTimeMillis();
-
-		//System.out.println("### create DFAs");
-		int numDecisions = getNumberOfDecisions();
-		if ( NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION ) {
-			for (int decision=1; decision<=numDecisions; decision++) {
-				NFAState decisionStartState = getDecisionNFAStartState(decision);
-				if ( leftRecursiveRules.contains(decisionStartState.enclosingRule) ) {
-					// don't bother to process decisions within left recursive rules.
-					if ( composite.watchNFAConversion ) {
-						System.out.println("ignoring decision "+decision+
-										   " within left-recursive rule "+decisionStartState.enclosingRule.name);
-					}
-					continue;
-				}
-				if ( !externalAnalysisAbort && decisionStartState.getNumberOfTransitions()>1 ) {
-					Rule r = decisionStartState.enclosingRule;
-					if ( r.isSynPred && !synPredNamesUsedInDFA.contains(r.name) ) {
-						continue;
-					}
-					DFA dfa = null;
-					// if k=* or k=1, try LL(1)
-					if ( getUserMaxLookahead(decision)==0 ||
-						 getUserMaxLookahead(decision)==1 )
-					{
-						dfa = createLL_1_LookaheadDFA(decision);
-					}
-					if ( dfa==null ) {
-						if ( composite.watchNFAConversion ) {
-							System.out.println("decision "+decision+
-											   " not suitable for LL(1)-optimized DFA analysis");
-						}
-						dfa = createLookaheadDFA(decision, wackTempStructures);
-					}
-					if ( dfa.startState==null ) {
-						// something went wrong; wipe out DFA
-						setLookaheadDFA(decision, null);
-					}
-					if ( Tool.internalOption_PrintDFA ) {
-						System.out.println("DFA d="+decision);
-						FASerializer serializer = new FASerializer(nfa.grammar);
-						String result = serializer.serialize(dfa.startState);
-						System.out.println(result);
-					}
-				}
-			}
-		}
-		else {
-			ErrorManager.info("two-threaded DFA conversion");
-			// create a barrier expecting n DFA and this main creation thread
-			Barrier barrier = new Barrier(3);
-			// assume 2 CPU for now
-			int midpoint = numDecisions/2;
-			NFAConversionThread t1 =
-				new NFAConversionThread(this, barrier, 1, midpoint);
-			new Thread(t1).start();
-			if ( midpoint == (numDecisions/2) ) {
-				midpoint++;
-			}
-			NFAConversionThread t2 =
-				new NFAConversionThread(this, barrier, midpoint, numDecisions);
-			new Thread(t2).start();
-			// wait for these two threads to finish
-			try {
-				barrier.waitForRelease();
-			}
-			catch(InterruptedException e) {
-				ErrorManager.internalError("what the hell? DFA interruptus", e);
-			}
-		}
-
-		long stop = System.currentTimeMillis();
-		DFACreationWallClockTimeInMS = stop - start;
-
-		// indicate that we've finished building DFA (even if #decisions==0)
-		allDecisionDFACreated = true;
-	}
-
-	public DFA createLL_1_LookaheadDFA(int decision) {
-		Decision d = getDecision(decision);
-		String enclosingRule = d.startState.enclosingRule.name;
-		Rule r = d.startState.enclosingRule;
-		NFAState decisionStartState = getDecisionNFAStartState(decision);
-
-		if ( composite.watchNFAConversion ) {
-			System.out.println("--------------------\nattempting LL(1) DFA (d="
-							   +decisionStartState.getDecisionNumber()+") for "+
-							   decisionStartState.getDescription());
-		}
-
-		if ( r.isSynPred && !synPredNamesUsedInDFA.contains(enclosingRule) ) {
-			return null;
-		}
-
-		// compute lookahead for each alt
-		int numAlts = getNumberOfAltsForDecisionNFA(decisionStartState);
-		LookaheadSet[] altLook = new LookaheadSet[numAlts+1];
-		for (int alt = 1; alt <= numAlts; alt++) {
-			int walkAlt =
-				decisionStartState.translateDisplayAltToWalkAlt(alt);
-			NFAState altLeftEdge = getNFAStateForAltOfDecision(decisionStartState, walkAlt);
-			NFAState altStartState = (NFAState)altLeftEdge.transition[0].target;
-			//System.out.println("alt "+alt+" start state = "+altStartState.stateNumber);
-			altLook[alt] = ll1Analyzer.LOOK(altStartState);
-			//System.out.println("alt "+alt+": "+altLook[alt].toString(this));
-		}
-
-		// compare alt i with alt j for disjointness
-		boolean decisionIsLL_1 = true;
-outer:
-		for (int i = 1; i <= numAlts; i++) {
-			for (int j = i+1; j <= numAlts; j++) {
-				/*
-				System.out.println("compare "+i+", "+j+": "+
-								   altLook[i].toString(this)+" with "+
-								   altLook[j].toString(this));
-				*/
-				LookaheadSet collision = altLook[i].intersection(altLook[j]);
-				if ( !collision.isNil() ) {
-					//System.out.println("collision (non-LL(1)): "+collision.toString(this));
-					decisionIsLL_1 = false;
-					break outer;
-				}
-			}
-		}
-
-		boolean foundConfoundingPredicate =
-			ll1Analyzer.detectConfoundingPredicates(decisionStartState);
-		if ( decisionIsLL_1 && !foundConfoundingPredicate ) {
-			// build an LL(1) optimized DFA with edge for each altLook[i]
-			if ( NFAToDFAConverter.debug ) {
-				System.out.println("decision "+decision+" is simple LL(1)");
-			}
-			DFA lookaheadDFA = new LL1DFA(decision, decisionStartState, altLook);
-			setLookaheadDFA(decision, lookaheadDFA);
-			updateLineColumnToLookaheadDFAMap(lookaheadDFA);
-			return lookaheadDFA;
-		}
-
-		// not LL(1) but perhaps we can solve with simplified predicate search
-		// even if k=1 set manually, only resolve here if we have preds; i.e.,
-		// don't resolve etc...
-
-		/*
-		SemanticContext visiblePredicates =
-			ll1Analyzer.getPredicates(decisionStartState);
-		boolean foundConfoundingPredicate =
-			ll1Analyzer.detectConfoundingPredicates(decisionStartState);
-			*/
-
-		// exit if not forced k=1 or we found a predicate situation we
-		// can't handle: predicates in rules invoked from this decision.
-		if ( getUserMaxLookahead(decision)!=1 || // not manually set to k=1
-			 !getAutoBacktrackMode(decision) ||
-			 foundConfoundingPredicate )
-		{
-			//System.out.println("trying LL(*)");
-			return null;
-		}
-
-		List<IntervalSet> edges = new ArrayList<IntervalSet>();
-		for (int i = 1; i < altLook.length; i++) {
-			LookaheadSet s = altLook[i];
-			edges.add((IntervalSet)s.tokenTypeSet);
-		}
-		List<IntervalSet> disjoint = makeEdgeSetsDisjoint(edges);
-		//System.out.println("disjoint="+disjoint);
-
-		MultiMap<IntervalSet, Integer> edgeMap = new MultiMap<IntervalSet, Integer>();
-		for (int i = 0; i < disjoint.size(); i++) {
-			IntervalSet ds = (IntervalSet) disjoint.get(i);
-			for (int alt = 1; alt < altLook.length; alt++) {
-				LookaheadSet look = altLook[alt];
-				if ( !ds.and(look.tokenTypeSet).isNil() ) {
-					edgeMap.map(ds, alt);
-				}
-			}
-		}
-		//System.out.println("edge map: "+edgeMap);
-
-		// TODO: how do we know we covered stuff?
-
-		// build an LL(1) optimized DFA with edge for each altLook[i]
-		DFA lookaheadDFA = new LL1DFA(decision, decisionStartState, edgeMap);
-		setLookaheadDFA(decision, lookaheadDFA);
-
-		// create map from line:col to decision DFA (for ANTLRWorks)
-		updateLineColumnToLookaheadDFAMap(lookaheadDFA);
-
-		return lookaheadDFA;
-	}
-
-	private void updateLineColumnToLookaheadDFAMap(DFA lookaheadDFA) {
-		GrammarAST decisionAST = nfa.grammar.getDecisionBlockAST(lookaheadDFA.decisionNumber);
-		int line = decisionAST.getLine();
-		int col = decisionAST.getCharPositionInLine();
-		lineColumnToLookaheadDFAMap.put(new StringBuffer().append(line + ":")
-										.append(col).toString(), lookaheadDFA);
-	}
-
-	protected List<IntervalSet> makeEdgeSetsDisjoint(List<IntervalSet> edges) {
-		OrderedHashSet<IntervalSet> disjointSets = new OrderedHashSet<IntervalSet>();
-		// walk each incoming edge label/set and add to disjoint set
-		int numEdges = edges.size();
-		for (int e = 0; e < numEdges; e++) {
-			IntervalSet t = (IntervalSet) edges.get(e);
-			if ( disjointSets.contains(t) ) { // exact set present
-				continue;
-			}
-
-			// compare t with set i for disjointness
-			IntervalSet remainder = t; // remainder starts out as whole set to add
-			int numDisjointElements = disjointSets.size();
-			for (int i = 0; i < numDisjointElements; i++) {
-				IntervalSet s_i = (IntervalSet)disjointSets.get(i);
-
-				if ( t.and(s_i).isNil() ) { // nothing in common
-					continue;
-				}
-				//System.out.println(label+" collides with "+rl);
-
-				// For any (s_i, t) with s_i&t!=nil replace with (s_i-t, s_i&t)
-				// (ignoring s_i-t if nil; don't put in list)
-
-				// Replace existing s_i with intersection since we
-				// know that will always be a non nil character class
-				IntervalSet intersection = (IntervalSet)s_i.and(t);
-				disjointSets.set(i, intersection);
-
-				// Compute s_i-t to see what is in current set and not in incoming
-				IntSet existingMinusNewElements = s_i.subtract(t);
-				//System.out.println(s_i+"-"+t+"="+existingMinusNewElements);
-				if ( !existingMinusNewElements.isNil() ) {
-					// found a new character class, add to the end (doesn't affect
-					// outer loop duration due to n computation a priori.
-					disjointSets.add(existingMinusNewElements);
-				}
-
-				// anything left to add to the reachableLabels?
-				remainder = (IntervalSet)t.subtract(s_i);
-				if ( remainder.isNil() ) {
-					break; // nothing left to add to set.  done!
-				}
-
-				t = remainder;
-			}
-			if ( !remainder.isNil() ) {
-				disjointSets.add(remainder);
-			}
-		}
-		return disjointSets.elements();
-	}
-
-	public DFA createLookaheadDFA(int decision, boolean wackTempStructures) {
-		Decision d = getDecision(decision);
-		String enclosingRule = d.startState.enclosingRule.name;
-		Rule r = d.startState.enclosingRule;
-
-		//System.out.println("createLookaheadDFA(): "+enclosingRule+" dec "+decision+"; synprednames prev used "+synPredNamesUsedInDFA);
-		NFAState decisionStartState = getDecisionNFAStartState(decision);
-		long startDFA=0,stopDFA=0;
-		if ( composite.watchNFAConversion ) {
-			System.out.println("--------------------\nbuilding lookahead DFA (d="
-							   +decisionStartState.getDecisionNumber()+") for "+
-							   decisionStartState.getDescription());
-			startDFA = System.currentTimeMillis();
-		}
-
-		DFA lookaheadDFA = new DFA(decision, decisionStartState);
-		// Retry to create a simpler DFA if analysis failed (non-LL(*),
-		// recursion overflow, or time out).
-		boolean failed =
-			lookaheadDFA.probe.isNonLLStarDecision() ||
-			lookaheadDFA.probe.analysisOverflowed();
-		if ( failed && lookaheadDFA.okToRetryDFAWithK1() ) {
-			// set k=1 option and try again.
-			// First, clean up tracking stuff
-			decisionsWhoseDFAsUsesSynPreds.remove(lookaheadDFA);
-			// TODO: clean up synPredNamesUsedInDFA also (harder)
-			d.blockAST.setBlockOption(this, "k", Utils.integer(1));
-			if ( composite.watchNFAConversion ) {
-				System.out.print("trying decision "+decision+
-								 " again with k=1; reason: "+
-								 lookaheadDFA.getReasonForFailure());
-			}
-			lookaheadDFA = null; // make sure other memory is "free" before redoing
-			lookaheadDFA = new DFA(decision, decisionStartState);
-		}
-
-		setLookaheadDFA(decision, lookaheadDFA);
-
-		if ( wackTempStructures ) {
-			for (DFAState s : lookaheadDFA.getUniqueStates().values()) {
-				s.reset();
-			}
-		}
-
-		// create map from line:col to decision DFA (for ANTLRWorks)
-		updateLineColumnToLookaheadDFAMap(lookaheadDFA);
-
-		if ( composite.watchNFAConversion ) {
-			stopDFA = System.currentTimeMillis();
-			System.out.println("cost: "+lookaheadDFA.getNumberOfStates()+
-							   " states, "+(int)(stopDFA-startDFA)+" ms");
-		}
-		//System.out.println("after create DFA; synPredNamesUsedInDFA="+synPredNamesUsedInDFA);
-		return lookaheadDFA;
-	}
-
-	/** Terminate DFA creation (grammar analysis).
-	 */
-	public void externallyAbortNFAToDFAConversion() {
-		externalAnalysisAbort = true;
-	}
-
-	public boolean NFAToDFAConversionExternallyAborted() {
-		return externalAnalysisAbort;
-	}
-
-	/** Return a new unique integer in the token type space */
-	public int getNewTokenType() {
-		composite.maxTokenType++;
-		return composite.maxTokenType;
-	}
-
-	/** Define a token at a particular token type value.  Blast an
-	 *  old value with a new one.  This is called normal grammar processsing
-	 *  and during import vocab operations to set tokens with specific values.
-	 */
-	public void defineToken(String text, int tokenType) {
-		//System.out.println("defineToken("+text+", "+tokenType+")");
-		if ( composite.tokenIDToTypeMap.get(text)!=null ) {
-			// already defined?  Must be predefined one like EOF;
-			// do nothing
-			return;
-		}
-		// the index in the typeToTokenList table is actually shifted to
-		// hold faux labels as you cannot have negative indices.
-		if ( text.charAt(0)=='\'' ) {
-			composite.stringLiteralToTypeMap.put(text, Utils.integer(tokenType));
-			// track in reverse index too
-			if ( tokenType>=composite.typeToStringLiteralList.size() ) {
-				composite.typeToStringLiteralList.setSize(tokenType+1);
-			}
-			composite.typeToStringLiteralList.set(tokenType, text);
-		}
-		else { // must be a label like ID
-			composite.tokenIDToTypeMap.put(text, Utils.integer(tokenType));
-		}
-		int index = Label.NUM_FAUX_LABELS+tokenType-1;
-		//System.out.println("defining "+name+" token "+text+" at type="+tokenType+", index="+index);
-		composite.maxTokenType = Math.max(composite.maxTokenType, tokenType);
-		if ( index>=composite.typeToTokenList.size() ) {
-			composite.typeToTokenList.setSize(index+1);
-		}
-		String prevToken = (String)composite.typeToTokenList.get(index);
-		if ( prevToken==null || prevToken.charAt(0)=='\'' ) {
-			// only record if nothing there before or if thing before was a literal
-			composite.typeToTokenList.set(index, text);
-		}
-	}
-
-	/** Define a new rule.  A new rule index is created by incrementing
-	 *  ruleIndex.
-	 */
-	public void defineRule(Token ruleToken,
-						   String modifier,
-						   Map options,
-						   GrammarAST tree,
-						   GrammarAST argActionAST,
-						   int numAlts)
-	{
-		String ruleName = ruleToken.getText();
-		if ( getLocallyDefinedRule(ruleName)!=null ) {
-			ErrorManager.grammarError(ErrorManager.MSG_RULE_REDEFINITION,
-									  this, ruleToken, ruleName);
-			return;
-		}
-
-		if ( (type==Grammar.PARSER||type==Grammar.TREE_PARSER) &&
-			 Character.isUpperCase(ruleName.charAt(0)) )
-		{
-			ErrorManager.grammarError(ErrorManager.MSG_LEXER_RULES_NOT_ALLOWED,
-									  this, ruleToken, ruleName);
-			return;
-		}
-
-		Rule r = new Rule(this, ruleName, composite.ruleIndex, numAlts);
-		/*
-		System.out.println("defineRule("+ruleName+",modifier="+modifier+
-						   "): index="+r.index+", nalts="+numAlts);
-		*/
-		r.modifier = modifier;
-		nameToRuleMap.put(ruleName, r);
-		setRuleAST(ruleName, tree);
-		r.setOptions(options, ruleToken);
-		r.argActionAST = argActionAST;
-		composite.ruleIndexToRuleList.setSize(composite.ruleIndex+1);
-		composite.ruleIndexToRuleList.set(composite.ruleIndex, r);
-		composite.ruleIndex++;
-		if ( ruleName.startsWith(SYNPRED_RULE_PREFIX) ) {
-			r.isSynPred = true;
-		}
-	}
-
-	/** Define a new predicate and get back its name for use in building
-	 *  a semantic predicate reference to the syn pred.
-	 */
-	public String defineSyntacticPredicate(GrammarAST blockAST,
-										   String currentRuleName)
-	{
-		if ( nameToSynpredASTMap==null ) {
-			nameToSynpredASTMap = new LinkedHashMap();
-		}
-		String predName =
-			SYNPRED_RULE_PREFIX+(nameToSynpredASTMap.size() + 1)+"_"+name;
-		blockAST.setTreeEnclosingRuleNameDeeply(predName);
-		nameToSynpredASTMap.put(predName, blockAST);
-		return predName;
-	}
-
-	public LinkedHashMap getSyntacticPredicates() {
-		return nameToSynpredASTMap;
-	}
-
-	public GrammarAST getSyntacticPredicate(String name) {
-		if ( nameToSynpredASTMap==null ) {
-			return null;
-		}
-		return (GrammarAST)nameToSynpredASTMap.get(name);
-	}
-
-	public void synPredUsedInDFA(DFA dfa, SemanticContext semCtx) {
-		decisionsWhoseDFAsUsesSynPreds.add(dfa);
-		semCtx.trackUseOfSyntacticPredicates(this); // walk ctx looking for preds
-	}
-
-	/*
-	public Set<Rule> getRuleNamesVisitedDuringLOOK() {
-		return rulesSensitiveToOtherRules;
-	}
-	*/
-
-	/** Given @scope::name {action} define it for this grammar.  Later,
-	 *  the code generator will ask for the actions table.  For composite
-     *  grammars, make sure header action propogates down to all delegates.
-	 */
-	public void defineNamedAction(GrammarAST ampersandAST,
-								  String scope,
-								  GrammarAST nameAST,
-								  GrammarAST actionAST)
-	{
-		if ( scope==null ) {
-			scope = getDefaultActionScope(type);
-		}
-		//System.out.println("Grammar "+name+" define @"+scope+"::"+nameAST.getText()+"{"+actionAST.getText()+"}");
-		String actionName = nameAST.getText();
-		Map<String, Object> scopeActions = getActions().get(scope);
-		if ( scopeActions==null ) {
-			scopeActions = new HashMap<String, Object>();
-			getActions().put(scope, scopeActions);
-		}
-		Object a = scopeActions.get(actionName);
-		if ( a!=null ) {
-			ErrorManager.grammarError(
-				ErrorManager.MSG_ACTION_REDEFINITION,this,
-				nameAST.getToken(),nameAST.getText());
-		}
-		else {
-			scopeActions.put(actionName,actionAST);
-		}
-        // propogate header (regardless of scope (lexer, parser, ...) ?
-        if ( this==composite.getRootGrammar() && actionName.equals("header") ) {
-            List<Grammar> allgrammars = composite.getRootGrammar().getDelegates();
-            for (Grammar delegate : allgrammars) {
-				if ( target.isValidActionScope(delegate.type, scope) ) {
-					//System.out.println("propogate to "+delegate.name);
-                	delegate.defineNamedAction(ampersandAST, scope, nameAST, actionAST);
-				}
-            }
-        }
-    }
-
-    public void setSynPredGateIfNotAlready(ST gateST) {
-        String scope = getDefaultActionScope(type);
-        Map<String, Object> actionsForGrammarScope = getActions().get(scope);
-        // if no synpredgate action set by user then set
-        if ( (actionsForGrammarScope==null ||
-             !actionsForGrammarScope.containsKey(Grammar.SYNPREDGATE_ACTION_NAME)) )
-        {
-            if ( actionsForGrammarScope==null ) {
-                actionsForGrammarScope=new HashMap<String, Object>();
-                getActions().put(scope, actionsForGrammarScope);
-            }
-            actionsForGrammarScope.put(Grammar.SYNPREDGATE_ACTION_NAME,
-                                       gateST);
-        }
-    }
-
-	public Map<String, Map<String, Object>> getActions() {
-		return actions;
-	}
-
-	/** Given a grammar type, what should be the default action scope?
-	 *  If I say @members in a COMBINED grammar, for example, the
-	 *  default scope should be "parser".
-	 */
-	public String getDefaultActionScope(int grammarType) {
-		switch (grammarType) {
-			case Grammar.LEXER :
-				return "lexer";
-			case Grammar.PARSER :
-			case Grammar.COMBINED :
-				return "parser";
-			case Grammar.TREE_PARSER :
-				return "treeparser";
-		}
-		return null;
-	}
-
-	public void defineLexerRuleFoundInParser(Token ruleToken,
-											 GrammarAST ruleAST)
-	{
-//		System.out.println("rule tree is:\n"+ruleAST.toStringTree());
-		/*
-		String ruleText = tokenBuffer.toOriginalString(ruleAST.ruleStartTokenIndex,
-											   ruleAST.ruleStopTokenIndex);
-		*/
-		// first, create the text of the rule
-		StringBuffer buf = new StringBuffer();
-		buf.append("// $ANTLR src \"");
-		buf.append(getFileName());
-		buf.append("\" ");
-		buf.append(ruleAST.getLine());
-		buf.append("\n");
-		for (int i=ruleAST.getTokenStartIndex();
-			 i<=ruleAST.getTokenStopIndex() && i<tokenBuffer.size();
-			 i++)
-		{
-			CommonToken t = (CommonToken)tokenBuffer.get(i);
-			// undo the text deletions done by the lexer (ugh)
-			if ( t.getType()==ANTLRParser.BLOCK ) {
-				buf.append("(");
-			}
-			else if ( t.getType()==ANTLRParser.ACTION ) {
-				buf.append("{");
-				buf.append(t.getText());
-				buf.append("}");
-			}
-			else if ( t.getType()==ANTLRParser.SEMPRED ||
-					  t.getType()==ANTLRParser.SYN_SEMPRED ||
-					  t.getType()==ANTLRParser.GATED_SEMPRED ||
-					  t.getType()==ANTLRParser.BACKTRACK_SEMPRED )
-			{
-				buf.append("{");
-				buf.append(t.getText());
-				buf.append("}?");
-			}
-			else if ( t.getType()==ANTLRParser.ARG_ACTION ) {
-				buf.append("[");
-				buf.append(t.getText());
-				buf.append("]");
-			}
-			else {
-				buf.append(t.getText());
-			}
-		}
-		String ruleText = buf.toString();
-		//System.out.println("[["+ruleText+"]]");
-		// now put the rule into the lexer grammar template
-		if ( getGrammarIsRoot() ) { // don't build lexers for delegates
-			lexerGrammarST.add("rules", ruleText);
-		}
-		// track this lexer rule's name
-		composite.lexerRules.add(ruleToken.getText());
-	}
-
-	/** If someone does PLUS='+' in the parser, must make sure we get
-	 *  "PLUS : '+' ;" in lexer not "T73 : '+';"
-	 */
-	public void defineLexerRuleForAliasedStringLiteral(String tokenID,
-													   String literal,
-													   int tokenType)
-	{
-		if ( getGrammarIsRoot() ) { // don't build lexers for delegates
-			//System.out.println("defineLexerRuleForAliasedStringLiteral: "+literal+" "+tokenType);
-			lexerGrammarST.addAggr("literals.{ruleName,type,literal}",
-										tokenID,
-										Utils.integer(tokenType),
-										literal);
-		}
-		// track this lexer rule's name
-		composite.lexerRules.add(tokenID);
-	}
-
-	public void defineLexerRuleForStringLiteral(String literal, int tokenType) {
-		//System.out.println("defineLexerRuleForStringLiteral: "+literal+" "+tokenType);
-		// compute new token name like T237 and define it as having tokenType
-		String tokenID = computeTokenNameFromLiteral(tokenType,literal);
-		defineToken(tokenID, tokenType);
-		// tell implicit lexer to define a rule to match the literal
-		if ( getGrammarIsRoot() ) { // don't build lexers for delegates
-			lexerGrammarST.addAggr("literals.{ruleName,type,literal}",
-										tokenID,
-										Utils.integer(tokenType),
-										literal);
-		}
-	}
-
-	public Rule getLocallyDefinedRule(String ruleName) {
-		Rule r = nameToRuleMap.get(ruleName);
-		return r;
-	}
-
-	public Rule getRule(String ruleName) {
-		Rule r = composite.getRule(ruleName);
-		/*
-		if ( r!=null && r.grammar != this ) {
-			System.out.println(name+".getRule("+ruleName+")="+r);
-		}
-		*/
-		return r;
-	}
-
-	public Rule getRule(String scopeName, String ruleName) {
-		if ( scopeName!=null ) { // scope override
-			Grammar scope = composite.getGrammar(scopeName);
-			if ( scope==null ) {
-				return null;
-			}
-			return scope.getLocallyDefinedRule(ruleName);
-		}
-		return getRule(ruleName);
-	}
-
-	public int getRuleIndex(String scopeName, String ruleName) {
-		Rule r = getRule(scopeName, ruleName);
-		if ( r!=null ) {
-			return r.index;
-		}
-		return INVALID_RULE_INDEX;
-	}
-
-	public int getRuleIndex(String ruleName) {
-		return getRuleIndex(null, ruleName);
-	}
-
-	public String getRuleName(int ruleIndex) {
-		Rule r = composite.ruleIndexToRuleList.get(ruleIndex);
-		if ( r!=null ) {
-			return r.name;
-		}
-		return null;
-	}
-
-	/** Should codegen.g gen rule for ruleName?
-	 * 	If synpred, only gen if used in a DFA.
-	 *  If regular rule, only gen if not overridden in delegator
-	 *  Always gen Tokens rule though.
-	 */
-	public boolean generateMethodForRule(String ruleName) {
-		if ( ruleName.equals(ARTIFICIAL_TOKENS_RULENAME) ) {
-			// always generate Tokens rule to satisfy lexer interface
-			// but it may have no alternatives.
-			return true;
-		}
-		if ( overriddenRules.contains(ruleName) ) {
-			// don't generate any overridden rules
-			return false;
-		}
-		// generate if non-synpred or synpred used in a DFA
-		Rule r = getLocallyDefinedRule(ruleName);
-		return !r.isSynPred ||
-			   (r.isSynPred&&synPredNamesUsedInDFA.contains(ruleName));
-	}
-
-	public AttributeScope defineGlobalScope(String name, Token scopeAction) {
-		AttributeScope scope = new AttributeScope(this, name, scopeAction);
-		scopes.put(name,scope);
-		return scope;
-	}
-
-	public AttributeScope createReturnScope(String ruleName, Token retAction) {
-		AttributeScope scope = new AttributeScope(this, ruleName, retAction);
-		scope.isReturnScope = true;
-		return scope;
-	}
-
-	public AttributeScope createRuleScope(String ruleName, Token scopeAction) {
-		AttributeScope scope = new AttributeScope(this, ruleName, scopeAction);
-		scope.isDynamicRuleScope = true;
-		return scope;
-	}
-
-	public AttributeScope createParameterScope(String ruleName, Token argAction) {
-		AttributeScope scope = new AttributeScope(this, ruleName, argAction);
-		scope.isParameterScope = true;
-		return scope;
-	}
-
-	/** Get a global scope */
-	public AttributeScope getGlobalScope(String name) {
-		return (AttributeScope)scopes.get(name);
-	}
-
-	public Map getGlobalScopes() {
-		return scopes;
-	}
-
-	/** Define a label defined in a rule r; check the validity then ask the
-	 *  Rule object to actually define it.
-	 */
-	protected void defineLabel(Rule r, Token label, GrammarAST element, int type) {
-		boolean err = nameSpaceChecker.checkForLabelTypeMismatch(r, label, type);
-		if ( err ) {
-			return;
-		}
-		r.defineLabel(label, element, type);
-	}
-
-	public void defineTokenRefLabel(String ruleName,
-									Token label,
-									GrammarAST tokenRef)
-	{
-		Rule r = getLocallyDefinedRule(ruleName);
-		if ( r!=null ) {
-			if ( type==LEXER &&
-				 (tokenRef.getType()==ANTLRParser.CHAR_LITERAL||
-				  tokenRef.getType()==ANTLRParser.BLOCK||
-				  tokenRef.getType()==ANTLRParser.NOT||
-				  tokenRef.getType()==ANTLRParser.CHAR_RANGE||
-				  tokenRef.getType()==ANTLRParser.WILDCARD))
-			{
-				defineLabel(r, label, tokenRef, CHAR_LABEL);
-			}
-            else {
-				defineLabel(r, label, tokenRef, TOKEN_LABEL);
-			}
-		}
-	}
-
-    public void defineWildcardTreeLabel(String ruleName,
-                                           Token label,
-                                           GrammarAST tokenRef)
-    {
-        Rule r = getLocallyDefinedRule(ruleName);
-        if ( r!=null ) {
-            defineLabel(r, label, tokenRef, WILDCARD_TREE_LABEL);
-        }
-    }
-
-    public void defineWildcardTreeListLabel(String ruleName,
-                                           Token label,
-                                           GrammarAST tokenRef)
-    {
-        Rule r = getLocallyDefinedRule(ruleName);
-        if ( r!=null ) {
-            defineLabel(r, label, tokenRef, WILDCARD_TREE_LIST_LABEL);
-        }
-    }
-
-    public void defineRuleRefLabel(String ruleName,
-								   Token label,
-								   GrammarAST ruleRef)
-	{
-		Rule r = getLocallyDefinedRule(ruleName);
-		if ( r!=null ) {
-			defineLabel(r, label, ruleRef, RULE_LABEL);
-		}
-	}
-
-	public void defineTokenListLabel(String ruleName,
-									 Token label,
-									 GrammarAST element)
-	{
-		Rule r = getLocallyDefinedRule(ruleName);
-		if ( r!=null ) {
-			defineLabel(r, label, element, TOKEN_LIST_LABEL);
-		}
-	}
-
-	public void defineRuleListLabel(String ruleName,
-									Token label,
-									GrammarAST element)
-	{
-		Rule r = getLocallyDefinedRule(ruleName);
-		if ( r!=null ) {
-			if ( !r.getHasMultipleReturnValues() ) {
-				ErrorManager.grammarError(
-					ErrorManager.MSG_LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT,this,
-					label,label.getText());
-			}
-			defineLabel(r, label, element, RULE_LIST_LABEL);
-		}
-	}
-
-	/** Given a set of all rewrite elements on right of ->, filter for
-	 *  label types such as Grammar.TOKEN_LABEL, Grammar.TOKEN_LIST_LABEL, ...
-	 *  Return a displayable token type name computed from the GrammarAST.
-	 */
-	public Set<String> getLabels(Set<GrammarAST> rewriteElements, int labelType) {
-		Set<String> labels = new HashSet<String>();
-		for (GrammarAST el : rewriteElements) {
-			if ( el.getType()==ANTLRParser.LABEL ) {
-				String labelName = el.getText();
-				Rule enclosingRule = getLocallyDefinedRule(el.enclosingRuleName);
-				if ( enclosingRule==null ) continue;
-				LabelElementPair pair = enclosingRule.getLabel(labelName);
-                /*
-                // if tree grammar and we have a wildcard, only notice it
-                // when looking for rule labels not token label. x=. should
-                // look like a rule ref since could be subtree.
-                if ( type==TREE_PARSER && pair!=null &&
-                     pair.elementRef.getType()==ANTLRParser.WILDCARD )
-                {
-                    if ( labelType==WILDCARD_TREE_LABEL ) {
-                        labels.add(labelName);
-                        continue;
-                    }
-                    else continue;
-                }
-                 */
-                // if valid label and type is what we're looking for
-				// and not ref to old value val $rule, add to list
-				if ( pair!=null && pair.type==labelType &&
-					 !labelName.equals(el.enclosingRuleName) )
-				{
-					labels.add(labelName);
-				}
-			}
-		}
-		return labels;
-	}
-
-	/** Before generating code, we examine all actions that can have
-	 *  $x.y and $y stuff in them because some code generation depends on
-	 *  Rule.referencedPredefinedRuleAttributes.  I need to remove unused
-	 *  rule labels for example.
-	 */
-	protected void examineAllExecutableActions() {
-		Collection rules = getRules();
-		for (Iterator it = rules.iterator(); it.hasNext();) {
-			Rule r = (Rule) it.next();
-			// walk all actions within the rule elements, args, and exceptions
-			List<GrammarAST> actions = r.getInlineActions();
-			for (int i = 0; i < actions.size(); i++) {
-				GrammarAST actionAST = (GrammarAST) actions.get(i);
-				ActionAnalysis sniffer =
-					new ActionAnalysis(this, r.name, actionAST);
-				sniffer.analyze();
-			}
-			// walk any named actions like @init, @after
-			Collection<GrammarAST> namedActions = r.getActions().values();
-			for (Iterator it2 = namedActions.iterator(); it2.hasNext();) {
-				GrammarAST actionAST = (GrammarAST) it2.next();
-				ActionAnalysis sniffer =
-					new ActionAnalysis(this, r.name, actionAST);
-				sniffer.analyze();
-			}
-		}
-	}
-
-	/** Remove all labels on rule refs whose target rules have no return value.
-	 *  Do this for all rules in grammar.
-	 */
-	public void checkAllRulesForUselessLabels() {
-		if ( type==LEXER ) {
-			return;
-		}
-		Set rules = nameToRuleMap.keySet();
-		for (Iterator it = rules.iterator(); it.hasNext();) {
-			String ruleName = (String) it.next();
-			Rule r = getRule(ruleName);
-			removeUselessLabels(r.getRuleLabels());
-			removeUselessLabels(r.getRuleListLabels());
-		}
-	}
-
-	/** A label on a rule is useless if the rule has no return value, no
-	 *  tree or template output, and it is not referenced in an action.
-	 */
-	protected void removeUselessLabels(Map ruleToElementLabelPairMap) {
-		if ( ruleToElementLabelPairMap==null ) {
-			return;
-		}
-		Collection labels = ruleToElementLabelPairMap.values();
-		List kill = new ArrayList();
-		for (Iterator labelit = labels.iterator(); labelit.hasNext();) {
-			LabelElementPair pair = (LabelElementPair) labelit.next();
-			Rule refdRule = getRule(pair.elementRef.getText());
-			if ( refdRule!=null && !refdRule.getHasReturnValue() && !pair.actionReferencesLabel ) {
-				//System.out.println(pair.label.getText()+" is useless");
-				kill.add(pair.label.getText());
-			}
-		}
-		for (int i = 0; i < kill.size(); i++) {
-			String labelToKill = (String) kill.get(i);
-			// System.out.println("kill "+labelToKill);
-			ruleToElementLabelPairMap.remove(labelToKill);
-		}
-	}
-
-	/** Track a rule reference within an outermost alt of a rule.  Used
-	 *  at the moment to decide if $ruleref refers to a unique rule ref in
-	 *  the alt.  Rewrite rules force tracking of all rule AST results.
-	 *
-	 *  This data is also used to verify that all rules have been defined.
-	 */
-	public void altReferencesRule(String enclosingRuleName,
-								  GrammarAST refScopeAST,
-								  GrammarAST refAST,
-								  int outerAltNum)
-	{
-		/* Do nothing for now; not sure need; track S.x as x
-		String scope = null;
-		Grammar scopeG = null;
-		if ( refScopeAST!=null ) {
-			if ( !scopedRuleRefs.contains(refScopeAST) ) {
-				scopedRuleRefs.add(refScopeAST);
-			}
-			scope = refScopeAST.getText();
-		}
-		*/
-		Rule r = getRule(enclosingRuleName);
-		if ( r==null ) {
-			return; // no error here; see NameSpaceChecker
-		}
-		r.trackRuleReferenceInAlt(refAST, outerAltNum);
-		Token refToken = refAST.getToken();
-		if ( !ruleRefs.contains(refAST) ) {
-			ruleRefs.add(refAST);
-		}
-	}
-
-	/** Track a token reference within an outermost alt of a rule.  Used
-	 *  to decide if $tokenref refers to a unique token ref in
-	 *  the alt. Does not track literals!
-	 *
-	 *  Rewrite rules force tracking of all tokens.
-	 */
-	public void altReferencesTokenID(String ruleName, GrammarAST refAST, int outerAltNum) {
-		Rule r = getLocallyDefinedRule(ruleName);
-		if ( r==null ) {
-			return;
-		}
-		r.trackTokenReferenceInAlt(refAST, outerAltNum);
-		if ( !tokenIDRefs.contains(refAST.getToken()) ) {
-			tokenIDRefs.add(refAST.getToken());
-		}
-	}
-
-	/** To yield smaller, more readable code, track which rules have their
-	 *  predefined attributes accessed.  If the rule has no user-defined
-	 *  return values, then don't generate the return value scope classes
-	 *  etc...  Make the rule have void return value.  Don't track for lexer
-	 *  rules.
-	 */
-	public void referenceRuleLabelPredefinedAttribute(String ruleName) {
-		Rule r = getRule(ruleName);
-		if ( r!=null && type!=LEXER ) {
-			// indicate that an action ref'd an attr unless it's in a lexer
-			// so that $ID.text refs don't force lexer rules to define
-			// return values...Token objects are created by the caller instead.
-			r.referencedPredefinedRuleAttributes = true;
-		}
-	}
-
-	public List checkAllRulesForLeftRecursion() {
-		return sanity.checkAllRulesForLeftRecursion();
-	}
-
-	/** Return a list of left-recursive rules; no analysis can be done
-	 *  successfully on these.  Useful to skip these rules then and also
-	 *  for ANTLRWorks to highlight them.
-	 */
-	public Set<Rule> getLeftRecursiveRules() {
-		if ( nfa==null ) {
-			buildNFA();
-		}
-		if ( leftRecursiveRules!=null ) {
-			return leftRecursiveRules;
-		}
-		sanity.checkAllRulesForLeftRecursion();
-		return leftRecursiveRules;
-	}
-
-	public void checkRuleReference(GrammarAST scopeAST,
-								   GrammarAST refAST,
-								   GrammarAST argsAST,
-								   String currentRuleName)
-	{
-		sanity.checkRuleReference(scopeAST, refAST, argsAST, currentRuleName);
-	}
-
-	/** Rules like "a : ;" and "a : {...} ;" should not generate
-	 *  try/catch blocks for RecognitionException.  To detect this
-	 *  it's probably ok to just look for any reference to an atom
-	 *  that can match some input.  W/o that, the rule is unlikey to have
-	 *  any else.
-	 */
-	public boolean isEmptyRule(GrammarAST block) {
-		GrammarAST aTokenRefNode =
-			block.findFirstType(ANTLRParser.TOKEN_REF);
-		GrammarAST aStringLiteralRefNode =
-			block.findFirstType(ANTLRParser.STRING_LITERAL);
-		GrammarAST aCharLiteralRefNode =
-			block.findFirstType(ANTLRParser.CHAR_LITERAL);
-		GrammarAST aWildcardRefNode =
-			block.findFirstType(ANTLRParser.WILDCARD);
-		GrammarAST aRuleRefNode =
-			block.findFirstType(ANTLRParser.RULE_REF);
-		if ( aTokenRefNode==null&&
-			 aStringLiteralRefNode==null&&
-			 aCharLiteralRefNode==null&&
-			 aWildcardRefNode==null&&
-			 aRuleRefNode==null )
-		{
-			return true;
-		}
-		return false;
-	}
-
-	public boolean isAtomTokenType(int ttype) {
-		return ttype == ANTLRParser.WILDCARD||
-			   ttype == ANTLRParser.CHAR_LITERAL||
-			   ttype == ANTLRParser.CHAR_RANGE||
-			   ttype == ANTLRParser.STRING_LITERAL||
-			   ttype == ANTLRParser.NOT||
-			   (type != LEXER && ttype == ANTLRParser.TOKEN_REF);
-	}
-
-	public int getTokenType(String tokenName) {
-		Integer I = null;
-		if ( tokenName.charAt(0)=='\'') {
-			I = (Integer)composite.stringLiteralToTypeMap.get(tokenName);
-		}
-		else { // must be a label like ID
-			I = (Integer)composite.tokenIDToTypeMap.get(tokenName);
-		}
-		int i = (I!=null)?I.intValue():Label.INVALID;
-		//System.out.println("grammar type "+type+" "+tokenName+"->"+i);
-		return i;
-	}
-
-	/** Get the list of tokens that are IDs like BLOCK and LPAREN */
-	public Set getTokenIDs() {
-		return composite.tokenIDToTypeMap.keySet();
-	}
-
-	/** Return an ordered integer list of token types that have no
-	 *  corresponding token ID like INT or KEYWORD_BEGIN; for stuff
-	 *  like 'begin'.
-	 */
-	public Collection getTokenTypesWithoutID() {
-		List types = new ArrayList();
-		for (int t =Label.MIN_TOKEN_TYPE; t<=getMaxTokenType(); t++) {
-			String name = getTokenDisplayName(t);
-			if ( name.charAt(0)=='\'' ) {
-				types.add(Utils.integer(t));
-			}
-		}
-		return types;
-	}
-
-	/** Get a list of all token IDs and literals that have an associated
-	 *  token type.
-	 */
-	public Set<String> getTokenDisplayNames() {
-		Set<String> names = new HashSet<String>();
-		for (int t =Label.MIN_TOKEN_TYPE; t <=getMaxTokenType(); t++) {
-			names.add(getTokenDisplayName(t));
-		}
-		return names;
-	}
-
-	/** Given a literal like (the 3 char sequence with single quotes) 'a',
-	 *  return the int value of 'a'. Convert escape sequences here also.
-	 *  ANTLR's antlr.g parser does not convert escape sequences.
-	 *
-	 *  11/26/2005: I changed literals to always be '...' even for strings.
-	 *  This routine still works though.
-	 */
-	public static int getCharValueFromGrammarCharLiteral(String literal) {
-		switch ( literal.length() ) {
-			case 3 :
-				// 'x'
-				return literal.charAt(1); // no escape char
-			case 4 :
-				// '\x'  (antlr lexer will catch invalid char)
-				if ( Character.isDigit(literal.charAt(2)) ) {
-					ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,
-									   "invalid char literal: "+literal);
-					return -1;
-				}
-				int escChar = literal.charAt(2);
-				int charVal = ANTLRLiteralEscapedCharValue[escChar];
-				if ( charVal==0 ) {
-					// Unnecessary escapes like '\{' should just yield {
-					return escChar;
-				}
-				return charVal;
-			case 8 :
-				// '\u1234'
-				String unicodeChars = literal.substring(3,literal.length()-1);
-				return Integer.parseInt(unicodeChars, 16);
-			default :
-				ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,
-								   "invalid char literal: "+literal);
-				return -1;
-		}
-	}
-
-	/** ANTLR does not convert escape sequences during the parse phase because
-	 *  it could not know how to print String/char literals back out when
-	 *  printing grammars etc...  Someone in China might use the real unicode
-	 *  char in a literal as it will display on their screen; when printing
-	 *  back out, I could not know whether to display or use a unicode escape.
-	 *
-	 *  This routine converts a string literal with possible escape sequences
-	 *  into a pure string of 16-bit char values.  Escapes and unicode \u0000
-	 *  specs are converted to pure chars.  return in a buffer; people may
-	 *  want to walk/manipulate further.
-	 *
-	 *  The NFA construction routine must know the actual char values.
-	 */
-	public static StringBuffer getUnescapedStringFromGrammarStringLiteral(String literal) {
-		//System.out.println("escape: ["+literal+"]");
-		StringBuffer buf = new StringBuffer();
-		int last = literal.length()-1; // skip quotes on outside
-		for (int i=1; i<last; i++) {
-			char c = literal.charAt(i);
-			if ( c=='\\' ) {
-				i++;
-				c = literal.charAt(i);
-				if ( Character.toUpperCase(c)=='U' ) {
-					// \u0000
-					i++;
-					String unicodeChars = literal.substring(i,i+4);
-					// parse the unicode 16 bit hex value
-					int val = Integer.parseInt(unicodeChars, 16);
-					i+=4-1; // loop will inc by 1; only jump 3 then
-					buf.append((char)val);
-				}
-				else if ( Character.isDigit(c) ) {
-					ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,
-									   "invalid char literal: "+literal);
-					buf.append("\\"+(char)c);
-				}
-				else {
-					buf.append((char)ANTLRLiteralEscapedCharValue[c]); // normal \x escape
-				}
-			}
-			else {
-				buf.append(c); // simple char x
-			}
-		}
-		//System.out.println("string: ["+buf.toString()+"]");
-		return buf;
-	}
-
-	/** Pull your token definitions from an existing grammar in memory.
-	 *  You must use Grammar() ctor then this method then setGrammarContent()
-	 *  to make this work.  This was useful primarily for testing and
-	 *  interpreting grammars until I added import grammar functionality.
-	 *  When you import a grammar you implicitly import its vocabulary as well
-	 *  and keep the same token type values.
-	 *
-	 *  Returns the max token type found.
-	 */
-	public int importTokenVocabulary(Grammar importFromGr) {
-		Set importedTokenIDs = importFromGr.getTokenIDs();
-		for (Iterator it = importedTokenIDs.iterator(); it.hasNext();) {
-			String tokenID = (String) it.next();
-			int tokenType = importFromGr.getTokenType(tokenID);
-			composite.maxTokenType = Math.max(composite.maxTokenType,tokenType);
-			if ( tokenType>=Label.MIN_TOKEN_TYPE ) {
-				//System.out.println("import token from grammar "+tokenID+"="+tokenType);
-				defineToken(tokenID, tokenType);
-			}
-		}
-		return composite.maxTokenType; // return max found
-	}
-
-	/** Import the rules/tokens of a delegate grammar. All delegate grammars are
-	 *  read during the ctor of first Grammar created.
-	 *
-	 *  Do not create NFA here because NFA construction needs to hook up with
-	 *  overridden rules in delegation root grammar.
-	 */
-	public void importGrammar(GrammarAST grammarNameAST, String label) {
-		String grammarName = grammarNameAST.getText();
-		//System.out.println("import "+gfile.getName());
-		String gname = grammarName + GRAMMAR_FILE_EXTENSION;
-		BufferedReader br = null;
-		try {
-			String fullName = tool.getLibraryFile(gname);
-			FileReader fr = new FileReader(fullName);
-			br = new BufferedReader(fr);
-			Grammar delegateGrammar = null;
-			delegateGrammar = new Grammar(tool, gname, composite);
-			delegateGrammar.label = label;
-
-			addDelegateGrammar(delegateGrammar);
-
-			delegateGrammar.parseAndBuildAST(br);
-			delegateGrammar.addRulesForSyntacticPredicates();
-			if ( !validImport(delegateGrammar) ) {
-				ErrorManager.grammarError(ErrorManager.MSG_INVALID_IMPORT,
-										  this,
-										  grammarNameAST.token,
-										  this,
-										  delegateGrammar);
-				return;
-			}
-			if ( this.type==COMBINED &&
-				 (delegateGrammar.name.equals(this.name+grammarTypeToFileNameSuffix[LEXER])||
-				  delegateGrammar.name.equals(this.name+grammarTypeToFileNameSuffix[PARSER])) )
-			{
-				ErrorManager.grammarError(ErrorManager.MSG_IMPORT_NAME_CLASH,
-										  this,
-										  grammarNameAST.token,
-										  this,
-										  delegateGrammar);
-				return;
-			}
-			if ( delegateGrammar.grammarTree!=null ) {
-				// we have a valid grammar
-				// deal with combined grammars
-				if ( delegateGrammar.type == LEXER && this.type == COMBINED ) {
-					// ooops, we wasted some effort; tell lexer to read it in
-					// later
-					lexerGrammarST.add("imports", grammarName);
-					// but, this parser grammar will need the vocab
-					// so add to composite anyway so we suck in the tokens later
-				}
-			}
-			//System.out.println("Got grammar:\n"+delegateGrammar);
-		}
-		catch (IOException ioe) {
-			ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE,
-							   gname,
-							   ioe);
-		}
-		finally {
-			if ( br!=null ) {
-				try {
-					br.close();
-				}
-				catch (IOException ioe) {
-					ErrorManager.error(ErrorManager.MSG_CANNOT_CLOSE_FILE,
-									   gname,
-									   ioe);
-				}
-			}
-		}
-	}
-
-	/** add new delegate to composite tree */
-	protected void addDelegateGrammar(Grammar delegateGrammar) {
-		CompositeGrammarTree t = composite.delegateGrammarTreeRoot.findNode(this);
-		t.addChild(new CompositeGrammarTree(delegateGrammar));
-		// make sure new grammar shares this composite
-		delegateGrammar.composite = this.composite;
-	}
-
-	/** Load a vocab file <vocabName>.tokens and return max token type found. */
-	public int importTokenVocabulary(GrammarAST tokenVocabOptionAST,
-									 String vocabName)
-	{
-		if ( !getGrammarIsRoot() ) {
-			ErrorManager.grammarWarning(ErrorManager.MSG_TOKEN_VOCAB_IN_DELEGATE,
-										this,
-										tokenVocabOptionAST.token,
-										name);
-			return composite.maxTokenType;
-		}
-
-		File fullFile = tool.getImportedVocabFile(vocabName);
-		try {
-			FileReader fr = new FileReader(fullFile);
-			BufferedReader br = new BufferedReader(fr);
-			StreamTokenizer tokenizer = new StreamTokenizer(br);
-			tokenizer.parseNumbers();
-			tokenizer.wordChars('_', '_');
-			tokenizer.eolIsSignificant(true);
-			tokenizer.slashSlashComments(true);
-			tokenizer.slashStarComments(true);
-			tokenizer.ordinaryChar('=');
-			tokenizer.quoteChar('\'');
-			tokenizer.whitespaceChars(' ',' ');
-			tokenizer.whitespaceChars('\t','\t');
-			int lineNum = 1;
-			int token = tokenizer.nextToken();
-			while (token != StreamTokenizer.TT_EOF) {
-				String tokenID;
-				if ( token == StreamTokenizer.TT_WORD ) {
-					tokenID = tokenizer.sval;
-				}
-				else if ( token == '\'' ) {
-					tokenID = "'"+tokenizer.sval+"'";
-				}
-				else {
-					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
-									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
-									   Utils.integer(lineNum));
-					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}
-					token = tokenizer.nextToken();
-					continue;
-				}
-				token = tokenizer.nextToken();
-				if ( token != '=' ) {
-					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
-									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
-									   Utils.integer(lineNum));
-					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}
-					token = tokenizer.nextToken();
-					continue;
-				}
-				token = tokenizer.nextToken(); // skip '='
-				if ( token != StreamTokenizer.TT_NUMBER ) {
-					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
-									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
-									   Utils.integer(lineNum));
-					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}
-					token = tokenizer.nextToken();
-					continue;
-				}
-				int tokenType = (int)tokenizer.nval;
-				token = tokenizer.nextToken();
-				//System.out.println("import "+tokenID+"="+tokenType);
-				composite.maxTokenType = Math.max(composite.maxTokenType,tokenType);
-				defineToken(tokenID, tokenType);
-				lineNum++;
-				if ( token != StreamTokenizer.TT_EOL ) {
-					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
-									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
-									   Utils.integer(lineNum));
-					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}
-					token = tokenizer.nextToken();
-					continue;
-				}
-				token = tokenizer.nextToken(); // skip newline
-			}
-			br.close();
-		}
-		catch (FileNotFoundException fnfe) {
-			ErrorManager.error(ErrorManager.MSG_CANNOT_FIND_TOKENS_FILE,
-							   fullFile);
-		}
-		catch (IOException ioe) {
-			ErrorManager.error(ErrorManager.MSG_ERROR_READING_TOKENS_FILE,
-							   fullFile,
-							   ioe);
-		}
-		catch (Exception e) {
-			ErrorManager.error(ErrorManager.MSG_ERROR_READING_TOKENS_FILE,
-							   fullFile,
-							   e);
-		}
-		return composite.maxTokenType;
-	}
-
-	/** Given a token type, get a meaningful name for it such as the ID
-	 *  or string literal.  If this is a lexer and the ttype is in the
-	 *  char vocabulary, compute an ANTLR-valid (possibly escaped) char literal.
-	 */
-	public String getTokenDisplayName(int ttype) {
-		String tokenName = null;
-		int index=0;
-		// inside any target's char range and is lexer grammar?
-		if ( this.type==LEXER &&
-			 ttype >= Label.MIN_CHAR_VALUE && ttype <= Label.MAX_CHAR_VALUE )
-		{
-			return getANTLRCharLiteralForChar(ttype);
-		}
-		// faux label?
-		else if ( ttype<0 ) {
-			tokenName = (String)composite.typeToTokenList.get(Label.NUM_FAUX_LABELS+ttype);
-		}
-		else {
-			// compute index in typeToTokenList for ttype
-			index = ttype-1; // normalize to 0..n-1
-			index += Label.NUM_FAUX_LABELS;     // jump over faux tokens
-
-			if ( index<composite.typeToTokenList.size() ) {
-				tokenName = (String)composite.typeToTokenList.get(index);
-				if ( tokenName!=null &&
-					 tokenName.startsWith(AUTO_GENERATED_TOKEN_NAME_PREFIX) )
-				{
-					tokenName = composite.typeToStringLiteralList.get(ttype);
-				}
-			}
-			else {
-				tokenName = String.valueOf(ttype);
-			}
-		}
-		//System.out.println("getTokenDisplayName ttype="+ttype+", index="+index+", name="+tokenName);
-		return tokenName;
-	}
-
-	/** Get the list of ANTLR String literals */
-	public Set<String> getStringLiterals() {
-		return composite.stringLiteralToTypeMap.keySet();
-	}
-
-	public String getGrammarTypeString() {
-		return grammarTypeToString[type];
-	}
-
-	public int getGrammarMaxLookahead() {
-		if ( global_k>=0 ) {
-			return global_k;
-		}
-		Object k = getOption("k");
-		if ( k==null ) {
-			global_k = 0;
-		}
-		else if (k instanceof Integer) {
-			Integer kI = (Integer)k;
-			global_k = kI.intValue();
-		}
-		else {
-			// must be String "*"
-			if ( k.equals("*") ) {  // this the default anyway
-				global_k = 0;
-			}
-		}
-		return global_k;
-	}
-
-	/** Save the option key/value pair and process it; return the key
-	 *  or null if invalid option.
-	 */
-	public String setOption(String key, Object value, Token optionsStartToken) {
-		if ( legalOption(key) ) {
-			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,
-									  this,
-									  optionsStartToken,
-									  key);
-			return null;
-		}
-		if ( !optionIsValid(key, value) ) {
-			return null;
-		}
-        if ( key.equals("backtrack") && value.toString().equals("true") ) {
-            composite.getRootGrammar().atLeastOneBacktrackOption = true;
-        }
-        if ( options==null ) {
-			options = new HashMap();
-		}
-		options.put(key, value);
-		return key;
-	}
-
-	public boolean legalOption(String key) {
-		switch ( type ) {
-			case LEXER :
-				return !legalLexerOptions.contains(key);
-			case PARSER :
-				return !legalParserOptions.contains(key);
-			case TREE_PARSER :
-				return !legalTreeParserOptions.contains(key);
-			default :
-				return !legalParserOptions.contains(key);
-		}
-	}
-
-	public void setOptions(Map options, Token optionsStartToken) {
-		if ( options==null ) {
-			this.options = null;
-			return;
-		}
-		Set keys = options.keySet();
-		for (Iterator it = keys.iterator(); it.hasNext();) {
-			String optionName = (String) it.next();
-			Object optionValue = options.get(optionName);
-			String stored=setOption(optionName, optionValue, optionsStartToken);
-			if ( stored==null ) {
-				it.remove();
-			}
-		}
-	}
-
-	public Object getOption(String key) {
-		return composite.getOption(key);
-	}
-
-	public Object getLocallyDefinedOption(String key) {
-		Object value = null;
-		if ( options!=null ) {
-			value = options.get(key);
-		}
-		if ( value==null ) {
-			value = defaultOptions.get(key);
-		}
-		return value;
-	}
-
-	public Object getBlockOption(GrammarAST blockAST, String key) {
-		String v = (String)blockAST.getBlockOption(key);
-		if ( v!=null ) {
-			return v;
-		}
-		if ( type==Grammar.LEXER ) {
-			return defaultLexerBlockOptions.get(key);
-		}
-		return defaultBlockOptions.get(key);
-	}
-
-	public int getUserMaxLookahead(int decision) {
-		int user_k = 0;
-		GrammarAST blockAST = nfa.grammar.getDecisionBlockAST(decision);
-		Object k = blockAST.getBlockOption("k");
-		if ( k==null ) {
-			user_k = nfa.grammar.getGrammarMaxLookahead();
-			return user_k;
-		}
-		if (k instanceof Integer) {
-			Integer kI = (Integer)k;
-			user_k = kI.intValue();
-		}
-		else {
-			// must be String "*"
-			if ( k.equals("*") ) {
-				user_k = 0;
-			}
-		}
-		return user_k;
-	}
-
-	public boolean getAutoBacktrackMode(int decision) {
-		NFAState decisionNFAStartState = getDecisionNFAStartState(decision);
-		String autoBacktrack =
-			(String)getBlockOption(decisionNFAStartState.associatedASTNode, "backtrack");
-
-		if ( autoBacktrack==null ) {
-			autoBacktrack = (String)nfa.grammar.getOption("backtrack");
-		}
-		return autoBacktrack!=null&&autoBacktrack.equals("true");
-	}
-
-	public boolean optionIsValid(String key, Object value) {
-		return true;
-	}
-
-	public boolean buildAST() {
-		String outputType = (String)getOption("output");
-		if ( outputType!=null ) {
-			return outputType.toString().equals("AST");
-		}
-		return false;
-	}
-
-	public boolean rewriteMode() {
-		Object outputType = getOption("rewrite");
-		if ( outputType!=null ) {
-			return outputType.toString().equals("true");
-		}
-		return false;
-	}
-
-	public boolean isBuiltFromString() {
-		return builtFromString;
-	}
-
-	public boolean buildTemplate() {
-		String outputType = (String)getOption("output");
-		if ( outputType!=null ) {
-			return outputType.toString().equals("template");
-		}
-		return false;
-	}
-
-	public Collection<Rule> getRules() {
-		return nameToRuleMap.values();
-	}
-
-	/** Get the set of Rules that need to have manual delegations
-	 *  like "void rule() { importedGrammar.rule(); }"
-	 *
-	 *  If this grammar is master, get list of all rule definitions from all
-	 *  delegate grammars.  Only master has complete interface from combined
-	 *  grammars...we will generated delegates as helper objects.
-	 *
-	 *  Composite grammars that are not the root/master do not have complete
-	 *  interfaces.  It is not my intention that people use subcomposites.
-	 *  Only the outermost grammar should be used from outside code.  The
-	 *  other grammar components are specifically generated to work only
-	 *  with the master/root.
-	 *
-	 *  delegatedRules = imported - overridden
-	 */
-	public Set<Rule> getDelegatedRules() {
-		return composite.getDelegatedRules(this);
-	}
-
-	/** Get set of all rules imported from all delegate grammars even if
-	 *  indirectly delegated.
-	 */
-	public Set<Rule> getAllImportedRules() {
-		return composite.getAllImportedRules(this);
-	}
-
-	/** Get list of all delegates from all grammars directly or indirectly
-	 *  imported into this grammar.
-	 */
-	public List<Grammar> getDelegates() {
-		return composite.getDelegates(this);
-	}
-
-	public boolean getHasDelegates() {
-	   return !getDelegates().isEmpty();
-	}
-
-	public List<String> getDelegateNames() {
-		// compute delegates:{Grammar g | return g.name;}
-		List<String> names = new ArrayList<String>();
-		List<Grammar> delegates = composite.getDelegates(this);
-		if ( delegates!=null ) {
-			for (Grammar g : delegates) {
-				names.add(g.name);
-			}
-		}
-		return names;
-	}
-
-	public List<Grammar> getDirectDelegates() {
-		return composite.getDirectDelegates(this);
-	}
-
-	/** Get delegates below direct delegates */
-	public List<Grammar> getIndirectDelegates() {
-		return composite.getIndirectDelegates(this);
-	}
-
-	/** Get list of all delegators.  This amounts to the grammars on the path
-	 *  to the root of the delegation tree.
-	 */
-	public List<Grammar> getDelegators() {
-		return composite.getDelegators(this);
-	}
-
-	/** Who's my direct parent grammar? */
-	public Grammar getDelegator() {
-		return composite.getDelegator(this);
-	}
-
-	public Set<Rule> getDelegatedRuleReferences() {
-		return delegatedRuleReferences;
-	}
-
-	public boolean getGrammarIsRoot() {
-		return composite.delegateGrammarTreeRoot.grammar == this;
-	}
-
-	public void setRuleAST(String ruleName, GrammarAST t) {
-		Rule r = getLocallyDefinedRule(ruleName);
-		if ( r!=null ) {
-			r.tree = t;
-			r.EORNode = t.getLastChild();
-		}
-	}
-
-	public NFAState getRuleStartState(String ruleName) {
-		return getRuleStartState(null, ruleName);
-	}
-
-	public NFAState getRuleStartState(String scopeName, String ruleName) {
-		Rule r = getRule(scopeName, ruleName);
-		if ( r!=null ) {
-			//System.out.println("getRuleStartState("+scopeName+", "+ruleName+")="+r.startState);
-			return r.startState;
-		}
-		//System.out.println("getRuleStartState("+scopeName+", "+ruleName+")=null");
-		return null;
-	}
-
-	public String getRuleModifier(String ruleName) {
-		Rule r = getRule(ruleName);
-		if ( r!=null ) {
-			return r.modifier;
-		}
-		return null;
-	}
-
-	public NFAState getRuleStopState(String ruleName) {
-		Rule r = getRule(ruleName);
-		if ( r!=null ) {
-			return r.stopState;
-		}
-		return null;
-	}
-
-	public int assignDecisionNumber(NFAState state) {
-		decisionCount++;
-		state.setDecisionNumber(decisionCount);
-		return decisionCount;
-	}
-
-	protected Decision getDecision(int decision) {
-		int index = decision-1;
-		if ( index >= indexToDecision.size() ) {
-			return null;
-		}
-		Decision d = indexToDecision.get(index);
-		return d;
-	}
-
-	public List<Decision> getDecisions() {
-		return indexToDecision;
-	}
-
-	protected Decision createDecision(int decision) {
-		int index = decision-1;
-		if ( index < indexToDecision.size() ) {
-			return getDecision(decision); // don't recreate
-		}
-		Decision d = new Decision();
-		d.decision = decision;
-		d.grammar = this;
-		indexToDecision.setSize(getNumberOfDecisions());
-		indexToDecision.set(index, d);
-		return d;
-	}
-
-	public List getDecisionNFAStartStateList() {
-		List states = new ArrayList(100);
-		for (int d = 0; d < indexToDecision.size(); d++) {
-			Decision dec = (Decision) indexToDecision.get(d);
-			states.add(dec.startState);
-		}
-		return states;
-	}
-
-	public NFAState getDecisionNFAStartState(int decision) {
-		Decision d = getDecision(decision);
-		if ( d==null ) {
-			return null;
-		}
-		return d.startState;
-	}
-
-	public DFA getLookaheadDFA(int decision) {
-		Decision d = getDecision(decision);
-		if ( d==null ) {
-			return null;
-		}
-		return d.dfa;
-	}
-
-	public GrammarAST getDecisionBlockAST(int decision) {
-		Decision d = getDecision(decision);
-		if ( d==null ) {
-			return null;
-		}
-		return d.blockAST;
-	}
-
-	/** returns a list of column numbers for all decisions
-	 *  on a particular line so ANTLRWorks choose the decision
-	 *  depending on the location of the cursor (otherwise,
-	 *  ANTLRWorks has to give the *exact* location which
-	 *  is not easy from the user point of view).
-	 *
-	 *  This is not particularly fast as it walks entire line:col->DFA map
-	 *  looking for a prefix of "line:".
-	 */
-	public List getLookaheadDFAColumnsForLineInFile(int line) {
-		String prefix = line+":";
-		List columns = new ArrayList();
-		for(Iterator iter = lineColumnToLookaheadDFAMap.keySet().iterator();
-			iter.hasNext(); ) {
-			String key = (String)iter.next();
-			if(key.startsWith(prefix)) {
-				columns.add(Integer.valueOf(key.substring(prefix.length())));
-			}
-		}
-		return columns;
-	}
-
-	/** Useful for ANTLRWorks to map position in file to the DFA for display */
-	public DFA getLookaheadDFAFromPositionInFile(int line, int col) {
-		return (DFA)lineColumnToLookaheadDFAMap.get(
-			new StringBuffer().append(line + ":").append(col).toString());
-	}
-
-	public Map getLineColumnToLookaheadDFAMap() {
-		return lineColumnToLookaheadDFAMap;
-	}
-
-	/*
-	public void setDecisionOptions(int decision, Map options) {
-		Decision d = createDecision(decision);
-		d.options = options;
-	}
-
-	public void setDecisionOption(int decision, String name, Object value) {
-		Decision d = getDecision(decision);
-		if ( d!=null ) {
-			if ( d.options==null ) {
-				d.options = new HashMap();
-			}
-			d.options.put(name,value);
-		}
-	}
-
-	public Map getDecisionOptions(int decision) {
-		Decision d = getDecision(decision);
-		if ( d==null ) {
-			return null;
-		}
-		return d.options;
-    }
-    */
-
-	public int getNumberOfDecisions() {
-		return decisionCount;
-	}
-
-	public int getNumberOfCyclicDecisions() {
-		int n = 0;
-		for (int i=1; i<=getNumberOfDecisions(); i++) {
-			Decision d = getDecision(i);
-			if ( d.dfa!=null && d.dfa.isCyclic() ) {
-				n++;
-			}
-		}
-		return n;
-	}
-
-	/** Set the lookahead DFA for a particular decision.  This means
-	 *  that the appropriate AST node must updated to have the new lookahead
-	 *  DFA.  This method could be used to properly set the DFAs without
-	 *  using the createLookaheadDFAs() method.  You could do this
-	 *
-	 *    Grammar g = new Grammar("...");
-	 *    g.setLookahead(1, dfa1);
-	 *    g.setLookahead(2, dfa2);
-	 *    ...
-	 */
-	public void setLookaheadDFA(int decision, DFA lookaheadDFA) {
-		Decision d = createDecision(decision);
-		d.dfa = lookaheadDFA;
-		GrammarAST ast = d.startState.associatedASTNode;
-		ast.setLookaheadDFA(lookaheadDFA);
-	}
-
-	public void setDecisionNFA(int decision, NFAState state) {
-		Decision d = createDecision(decision);
-		d.startState = state;
-	}
-
-	public void setDecisionBlockAST(int decision, GrammarAST blockAST) {
-		//System.out.println("setDecisionBlockAST("+decision+", "+blockAST.token);
-		Decision d = createDecision(decision);
-		d.blockAST = blockAST;
-	}
-
-	public boolean allDecisionDFAHaveBeenCreated() {
-		return allDecisionDFACreated;
-	}
-
-	/** How many token types have been allocated so far? */
-	public int getMaxTokenType() {
-		return composite.maxTokenType;
-	}
-
-	/** What is the max char value possible for this grammar's target?  Use
-	 *  unicode max if no target defined.
-	 */
-	public int getMaxCharValue() {
-		if ( generator!=null ) {
-			return generator.target.getMaxCharValue(generator);
-		}
-		else {
-			return Label.MAX_CHAR_VALUE;
-		}
-	}
-
-	/** Return a set of all possible token or char types for this grammar */
-	public IntSet getTokenTypes() {
-		if ( type==LEXER ) {
-			return getAllCharValues();
-		}
-		return IntervalSet.of(Label.MIN_TOKEN_TYPE, getMaxTokenType());
-	}
-
-	/** If there is a char vocabulary, use it; else return min to max char
-	 *  as defined by the target.  If no target, use max unicode char value.
-	 */
-	public IntSet getAllCharValues() {
-		if ( charVocabulary!=null ) {
-			return charVocabulary;
-		}
-		IntSet allChar = IntervalSet.of(Label.MIN_CHAR_VALUE, getMaxCharValue());
-		return allChar;
-	}
-
-	/** Return a string representing the escaped char for code c.  E.g., If c
-	 *  has value 0x100, you will get "\u0100".  ASCII gets the usual
-	 *  char (non-hex) representation.  Control characters are spit out
-	 *  as unicode.  While this is specially set up for returning Java strings,
-	 *  it can be used by any language target that has the same syntax. :)
-	 *
-	 *  11/26/2005: I changed this to use double quotes, consistent with antlr.g
-	 *  12/09/2005: I changed so everything is single quotes
-	 */
-	public static String getANTLRCharLiteralForChar(int c) {
-		if ( c<Label.MIN_CHAR_VALUE ) {
-			ErrorManager.internalError("invalid char value "+c);
-			return "'<INVALID>'";
-		}
-		if ( c<ANTLRLiteralCharValueEscape.length && ANTLRLiteralCharValueEscape[c]!=null ) {
-			return '\''+ANTLRLiteralCharValueEscape[c]+'\'';
-		}
-		if ( Character.UnicodeBlock.of((char)c)==Character.UnicodeBlock.BASIC_LATIN &&
-			 !Character.isISOControl((char)c) ) {
-			if ( c=='\\' ) {
-				return "'\\\\'";
-			}
-			if ( c=='\'') {
-				return "'\\''";
-			}
-			return '\''+Character.toString((char)c)+'\'';
-		}
-		// turn on the bit above max "\uFFFF" value so that we pad with zeros
-		// then only take last 4 digits
-		String hex = Integer.toHexString(c|0x10000).toUpperCase().substring(1,5);
-		String unicodeStr = "'\\u"+hex+"'";
-		return unicodeStr;
-	}
-
-	/** For lexer grammars, return everything in unicode not in set.
-	 *  For parser and tree grammars, return everything in token space
-	 *  from MIN_TOKEN_TYPE to last valid token type or char value.
-	 */
-	public IntSet complement(IntSet set) {
-		//System.out.println("complement "+set.toString(this));
-		//System.out.println("vocabulary "+getTokenTypes().toString(this));
-		IntSet c = set.complement(getTokenTypes());
-		//System.out.println("result="+c.toString(this));
-		return c;
-	}
-
-	public IntSet complement(int atom) {
-		return complement(IntervalSet.of(atom));
-	}
-
-	/** Given set tree like ( SET A B ), check that A and B
-	 *  are both valid sets themselves, else we must tree like a BLOCK
-	 */
-	public boolean isValidSet(TreeToNFAConverter nfabuilder, GrammarAST t) {
-		boolean valid = true;
-		try {
-			//System.out.println("parse BLOCK as set tree: "+t.toStringTree());
-			int alts = nfabuilder.testBlockAsSet(t);
-			valid = alts > 1;
-		}
-		catch (RecognitionException re) {
-			// The rule did not parse as a set, return null; ignore exception
-			valid = false;
-		}
-		//System.out.println("valid? "+valid);
-		return valid;
-	}
-
-	/** Get the set equivalent (if any) of the indicated rule from this
-	 *  grammar.  Mostly used in the lexer to do ~T for some fragment rule
-	 *  T.  If the rule AST has a SET use that.  If the rule is a single char
-	 *  convert it to a set and return.  If rule is not a simple set (w/o actions)
-	 *  then return null.
-	 *  Rules have AST form:
-	 *
-	 *		^( RULE ID modifier ARG RET SCOPE block EOR )
-	 */
-	public IntSet getSetFromRule(TreeToNFAConverter nfabuilder, String ruleName)
-		throws RecognitionException
-	{
-		Rule r = getRule(ruleName);
-		if ( r==null ) {
-			return null;
-		}
-		IntSet elements = null;
-		//System.out.println("parsed tree: "+r.tree.toStringTree());
-		elements = nfabuilder.setRule(r.tree);
-		//System.out.println("elements="+elements);
-		return elements;
-	}
-
-	/** Decisions are linked together with transition(1).  Count how
-	 *  many there are.  This is here rather than in NFAState because
-	 *  a grammar decides how NFAs are put together to form a decision.
-	 */
-	public int getNumberOfAltsForDecisionNFA(NFAState decisionState) {
-		if ( decisionState==null ) {
-			return 0;
-		}
-		int n = 1;
-		NFAState p = decisionState;
-		while ( p.transition[1] !=null ) {
-			n++;
-			p = (NFAState)p.transition[1].target;
-		}
-		return n;
-	}
-
-	/** Get the ith alternative (1..n) from a decision; return null when
-	 *  an invalid alt is requested.  I must count in to find the right
-	 *  alternative number.  For (A|B), you get NFA structure (roughly):
-	 *
-	 *  o->o-A->o
-	 *  |
-	 *  o->o-B->o
-	 *
-	 *  This routine returns the leftmost state for each alt.  So alt=1, returns
-	 *  the upperleft most state in this structure.
-	 */
-	public NFAState getNFAStateForAltOfDecision(NFAState decisionState, int alt) {
-		if ( decisionState==null || alt<=0 ) {
-			return null;
-		}
-		int n = 1;
-		NFAState p = decisionState;
-		while ( p!=null ) {
-			if ( n==alt ) {
-				return p;
-			}
-			n++;
-			Transition next = p.transition[1];
-			p = null;
-			if ( next!=null ) {
-				p = (NFAState)next.target;
-			}
-		}
-		return null;
-	}
-
-	/*
-	public void computeRuleFOLLOWSets() {
-		if ( getNumberOfDecisions()==0 ) {
-			createNFAs();
-		}
-		for (Iterator it = getRules().iterator(); it.hasNext();) {
-			Rule r = (Rule)it.next();
-			if ( r.isSynPred ) {
-				continue;
-			}
-			LookaheadSet s = ll1Analyzer.FOLLOW(r);
-			System.out.println("FOLLOW("+r.name+")="+s);
-		}
-	}
-	*/
-
-	public LookaheadSet FIRST(NFAState s) {
-		return ll1Analyzer.FIRST(s);
-	}
-
-	public LookaheadSet LOOK(NFAState s) {
-		return ll1Analyzer.LOOK(s);
-	}
-
-	public void setCodeGenerator(CodeGenerator generator) {
-		this.generator = generator;
-	}
-
-	public CodeGenerator getCodeGenerator() {
-		return generator;
-	}
-
-	public GrammarAST getGrammarTree() {
-		return grammarTree;
-	}
-
-	public void setGrammarTree(GrammarAST value) {
-		grammarTree = value;
-	}
-
-	public Tool getTool() {
-		return tool;
-	}
-
-	public void setTool(Tool tool) {
-		this.tool = tool;
-	}
-
-	/** given a token type and the text of the literal, come up with a
-	 *  decent token type label.  For now it's just T<type>.  Actually,
-	 *  if there is an aliased name from tokens like PLUS='+', use it.
-	 */
-	public String computeTokenNameFromLiteral(int tokenType, String literal) {
-		return AUTO_GENERATED_TOKEN_NAME_PREFIX +tokenType;
-	}
-
-	public String toString() {
-	//	return "FFFFFFFFFFFFFF";
-		return grammarTreeToString(grammarTree);
-	}
-
-	public String grammarTreeToString(GrammarAST t) {
-		return grammarTreeToString(t, true);
-	}
-
-	public String grammarTreeToString(GrammarAST t, boolean showActions) {
-		String s = null;
-		try {
-			s = t.getLine()+":"+(t.getCharPositionInLine()+1)+": ";
-			s += new ANTLRTreePrinter(new CommonTreeNodeStream(t)).toString(this, showActions);
-		}
-		catch (Exception e) {
-			s = "<invalid or missing tree structure>";
-		}
-		return s;
-	}
-
-	public void printGrammar(PrintStream output) {
-		ANTLRTreePrinter printer = new ANTLRTreePrinter(new CommonTreeNodeStream(getGrammarTree()));
-		try {
-			String g = printer.toString(this, false);
-			output.println(g);
-		}
-		catch (RecognitionException re) {
-			ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,re);
-		}
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarAST.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarAST.java
deleted file mode 100644
index 3d36cc8..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarAST.java
+++ /dev/null
@@ -1,566 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.DFA;
-import org.antlr.analysis.NFAState;
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.misc.IntSet;
-import org.antlr.misc.Interval;
-import org.antlr.runtime.CommonToken;
-import org.antlr.runtime.Token;
-import org.antlr.runtime.TokenSource;
-import org.antlr.runtime.tree.CommonTree;
-import org.antlr.runtime.tree.Tree;
-import org.antlr.runtime.tree.TreeAdaptor;
-import org.stringtemplate.v4.ST;
-import org.omg.PortableInterceptor.ORBInitInfoPackage.DuplicateName;
-
-import java.util.*;
-
-/** Grammars are first converted to ASTs using this class and then are
- *  converted to NFAs via a tree walker.
- *
- *  The reader may notice that I have made a very non-OO decision in this
- *  class to track variables for many different kinds of nodes.  It wastes
- *  space for nodes that don't need the values and OO principles cry out
- *  for a new class type for each kind of node in my tree.  I am doing this
- *  on purpose for a variety of reasons.  I don't like using the type
- *  system for different node types; it yields too many damn class files
- *  which I hate.  Perhaps if I put them all in one file.  Most importantly
- *  though I hate all the type casting that would have to go on.  I would
- *  have all sorts of extra work to do.  Ick.  Anyway, I'm doing all this
- *  on purpose, not out of ignorance. ;)
- */
-public class GrammarAST extends CommonTree {
-	static int count = 0;
-
-	public int ID = ++count;
-
-	private String textOverride;
-
-    public String enclosingRuleName;
-
-    /** If this is a decision node, what is the lookahead DFA? */
-    public DFA lookaheadDFA = null;
-
-    /** What NFA start state was built from this node? */
-    public NFAState NFAStartState = null;
-
-	/** This is used for TREE_BEGIN nodes to point into
-	 *  the NFA.  TREE_BEGINs point at left edge of DOWN for LOOK computation
-     *  purposes (Nullable tree child list needs special code gen when matching).
-	 */
-	public NFAState NFATreeDownState = null;
-
-	/** Rule ref nodes, token refs, set, and NOT set refs need to track their
-	 *  location in the generated NFA so that local FOLLOW sets can be
-	 *  computed during code gen for automatic error recovery.
-	 */
-	public NFAState followingNFAState = null;
-
-	/** If this is a SET node, what are the elements? */
-    protected IntSet setValue = null;
-
-    /** If this is a BLOCK node, track options here */
-    protected Map<String,Object> blockOptions;
-
-	/** If this is a BLOCK node for a rewrite rule, track referenced
-	 *  elements here.  Don't track elements in nested subrules.
-	 */
-	public Set<GrammarAST> rewriteRefsShallow;
-
-	/*	If REWRITE node, track EVERY element and label ref to right of ->
-	 *  for this rewrite rule.  There could be multiple of these per
-	 *  rule:
-	 *
-	 *     a : ( ... -> ... | ... -> ... ) -> ... ;
-	 *
-	 *  We may need a list of all refs to do definitions for whole rewrite
-	 *  later.
-	 *
-	 *  If BLOCK then tracks every element at that level and below.
-	 */
-	public Set<GrammarAST> rewriteRefsDeep;
-
-	public Map<String,Object> terminalOptions;
-
-	/** if this is an ACTION node, this is the outermost enclosing
-	 *  alt num in rule.  For actions, define.g sets these (used to
-	 *  be codegen.g).  We need these set so we can examine actions
-	 *  early, before code gen, for refs to rule predefined properties
-	 *  and rule labels.  For most part define.g sets outerAltNum, but
-	 *  codegen.g does the ones for %foo(a={$ID.text}) type refs as
-	 *  the {$ID...} is not seen as an action until code gen pulls apart.
-	 */
-	public int outerAltNum;
-
-	/** if this is a TOKEN_REF or RULE_REF node, this is the code ST
-	 *  generated for this node.  We need to update it later to add
-	 *  a label if someone does $tokenref or $ruleref in an action.
-	 */
-	public ST code;
-
-    /**
-     *
-     * @return
-     */
-    public Map<String, Object> getBlockOptions() {
-        return blockOptions;
-    }
-
-    /**
-     *
-     * @param blockOptions
-     */
-    public void setBlockOptions(Map<String, Object> blockOptions) {
-        this.blockOptions = blockOptions;
-    }
-
-	public GrammarAST() {;}
-
-	public GrammarAST(int t, String txt) {
-		initialize(t,txt);
-	}
-
-	public GrammarAST(Token token) {
-		initialize(token);
-	}
-
-	public void initialize(int i, String s) {
-        token = new CommonToken(i,s);
-		token.setTokenIndex(-1);
-    }
-
-    public void initialize(Tree ast) {
-		GrammarAST t = ((GrammarAST)ast);
-		this.startIndex = t.startIndex;
-		this.stopIndex = t.stopIndex;
-		this.token = t.token;
-		this.enclosingRuleName = t.enclosingRuleName;
-		this.setValue = t.setValue;
-		this.blockOptions = t.blockOptions;
-		this.outerAltNum = t.outerAltNum;
-	}
-
-    public void initialize(Token token) {
-        this.token = token;
-		if ( token!=null ) {
-			startIndex = token.getTokenIndex();
-			stopIndex = startIndex;
-		}
-    }
-
-    public DFA getLookaheadDFA() {
-        return lookaheadDFA;
-    }
-
-    public void setLookaheadDFA(DFA lookaheadDFA) {
-        this.lookaheadDFA = lookaheadDFA;
-    }
-
-    public NFAState getNFAStartState() {
-        return NFAStartState;
-    }
-
-    public void setNFAStartState(NFAState nfaStartState) {
-		this.NFAStartState = nfaStartState;
-	}
-
-	/** Save the option key/value pair and process it; return the key
-	 *  or null if invalid option.
-	 */
-	public String setBlockOption(Grammar grammar, String key, Object value) {
-		if ( blockOptions == null ) {
-			blockOptions = new HashMap();
-		}
-		return setOption(blockOptions, Grammar.legalBlockOptions, grammar, key, value);
-	}
-
-	public String setTerminalOption(Grammar grammar, String key, Object value) {
-		if ( terminalOptions == null ) {
-			terminalOptions = new HashMap<String,Object>();
-		}
-		return setOption(terminalOptions, Grammar.legalTokenOptions, grammar, key, value);
-	}
-
-	public String setOption(Map options, Set legalOptions, Grammar grammar, String key, Object value) {
-		if ( !legalOptions.contains(key) ) {
-			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,
-									  grammar,
-									  token,
-									  key);
-			return null;
-		}
-		if ( value instanceof String ) {
-			String vs = (String)value;
-			if ( vs.charAt(0)=='"' ) {
-				value = vs.substring(1,vs.length()-1); // strip quotes
-            }
-        }
-		if ( key.equals("k") ) {
-			grammar.numberOfManualLookaheadOptions++;
-		}
-        if ( key.equals("backtrack") && value.toString().equals("true") ) {
-            grammar.composite.getRootGrammar().atLeastOneBacktrackOption = true;
-        }
-        options.put(key, value);
-		return key;
-    }
-
-    public Object getBlockOption(String key) {
-		Object value = null;
-		if ( blockOptions != null ) {
-			value = blockOptions.get(key);
-		}
-		return value;
-	}
-
-    public void setOptions(Grammar grammar, Map options) {
-		if ( options==null ) {
-			this.blockOptions = null;
-			return;
-		}
-		String[] keys = (String[])options.keySet().toArray(new String[options.size()]);
-		for (String optionName : keys) {
-			String stored= setBlockOption(grammar, optionName, options.get(optionName));
-			if ( stored==null ) {
-				options.remove(optionName);
-			}
-		}
-    }
-
-    @Override
-    public String getText() {
-		if ( textOverride!=null ) return textOverride;
-        if ( token!=null ) {
-            return token.getText();
-        }
-        return "";
-    }
-
-	public void setType(int type) {
-		token.setType(type);
-	}
-
-	public void setText(String text) {
-		textOverride = text; // don't alt tokens as others might see
-	}
-
-    @Override
-    public int getType() {
-        if ( token!=null ) {
-            return token.getType();
-        }
-        return -1;
-    }
-
-    @Override
-    public int getLine() {
-		int line=0;
-        if ( token!=null ) {
-            line = token.getLine();
-        }
-		if ( line==0 ) {
-			Tree child = getChild(0);
-			if ( child!=null ) {
-				line = child.getLine();
-			}
-		}
-        return line;
-    }
-
-    @Override
-    public int getCharPositionInLine(){
-		int col=0;
-        if ( token!=null ) {
-            col = token.getCharPositionInLine();
-        }
-		if ( col==0 ) {
-			Tree child = getChild(0);
-			if ( child!=null ) {
-				col = child.getCharPositionInLine();
-			}
-		}
-        return col;
-    }
-
-    public void setLine(int line) {
-        token.setLine(line);
-    }
-
-    public void setCharPositionInLine(int value){
-        token.setCharPositionInLine(value);
-    }
-
- 	public IntSet getSetValue() {
-        return setValue;
-    }
-
-    public void setSetValue(IntSet setValue) {
-        this.setValue = setValue;
-    }
-
-    public GrammarAST getLastChild() {
-        if (getChildCount() == 0)
-            return null;
-        return (GrammarAST)getChild(getChildCount() - 1);
-    }
-
-    public GrammarAST getNextSibling() {
-        return (GrammarAST)getParent().getChild(getChildIndex() + 1);
-    }
-
-    public GrammarAST getLastSibling() {
-        Tree parent = getParent();
-        if ( parent==null ) {
-            return null;
-        }
-        return (GrammarAST)parent.getChild(parent.getChildCount() - 1);
-    }
-
-
-    public GrammarAST[] getChildrenAsArray() {
-        return (GrammarAST[])getChildren().toArray(new GrammarAST[getChildCount()]);
-    }
-
-    private static final GrammarAST DescendantDownNode = new GrammarAST(Token.DOWN, "DOWN");
-    private static final GrammarAST DescendantUpNode = new GrammarAST(Token.UP, "UP");
-
-    public static List<Tree> descendants(Tree root){
-        return descendants(root, false);
-    }
-
-    public static List<Tree> descendants(Tree root, boolean insertDownUpNodes){
-        List<Tree> result = new ArrayList<Tree>();
-        int count = root.getChildCount();
-
-        if (insertDownUpNodes){
-            result.add(root);
-            result.add(DescendantDownNode);
-
-            for (int i = 0 ; i < count ; i++){
-                Tree child = root.getChild(i);
-                for (Tree subchild : descendants(child, true))
-                    result.add(subchild);
-            }
-
-            result.add(DescendantUpNode);
-        }else{
-            result.add(root);
-            for (int i = 0 ; i < count ; i++){
-                Tree child = root.getChild(i);
-                for (Tree subchild : descendants(child, false))
-                    result.add(subchild);
-            }
-        }
-
-        return result;
-    }
-
-	public GrammarAST findFirstType(int ttype) {
-		// check this node (the root) first
-		if ( this.getType()==ttype ) {
-			return this;
-		}
-		// else check children
-		List<Tree> descendants = descendants(this);
-		for (Tree child : descendants) {
-			if ( child.getType()==ttype ) {
-				return (GrammarAST)child;
-			}
-		}
-		return null;
-	}
-
-	public List<GrammarAST> findAllType(int ttype) {
-		List<GrammarAST> nodes = new ArrayList<GrammarAST>();
-		_findAllType(ttype, nodes);
-		return nodes;
-	}
-
-	public void _findAllType(int ttype, List<GrammarAST> nodes) {
-		// check this node (the root) first
-		if ( this.getType()==ttype ) nodes.add(this);
-		// check children
-		for (int i = 0; i < getChildCount(); i++){
-			GrammarAST child = (GrammarAST)getChild(i);
-			child._findAllType(ttype, nodes);
-		}
-	}
-
-    /** Make nodes unique based upon Token so we can add them to a Set; if
-	 *  not a GrammarAST, check type.
-	 */
-	@Override
-	public boolean equals(Object ast) {
-		if ( this == ast ) {
-			return true;
-		}
-		if ( !(ast instanceof GrammarAST) ) {
-			return this.getType() == ((Tree)ast).getType();
-		}
-		GrammarAST t = (GrammarAST)ast;
-		return token.getLine() == t.getLine() &&
-			   token.getCharPositionInLine() == t.getCharPositionInLine();
-	}
-
-    /** Make nodes unique based upon Token so we can add them to a Set; if
-	 *  not a GrammarAST, check type.
-	 */
-    @Override
-    public int hashCode(){
-        if (token == null)
-            return 0;
-
-        return token.hashCode();
-    }
-
-	/** See if tree has exact token types and structure; no text */
-	public boolean hasSameTreeStructure(Tree other) {
-		// check roots first.
-		if (this.getType() != other.getType()) return false;
-		// if roots match, do full list match test on children.
-		Iterator<Tree> thisDescendants = descendants(this, true).iterator();
-		Iterator<Tree> otherDescendants = descendants(other, true).iterator();
-		while (thisDescendants.hasNext()) {
-			if (!otherDescendants.hasNext())
-				return false;
-			if (thisDescendants.next().getType() != otherDescendants.next().getType())
-				return false;
-		}
-		return !otherDescendants.hasNext();
-	}
-
-	public static GrammarAST dup(Tree t) {
-		if ( t==null ) {
-			return null;
-		}
-		GrammarAST dup_t = new GrammarAST();
-		dup_t.initialize(t);
-		return dup_t;
-	}
-
-    @Override
-    public Tree dupNode(){
-        return dup(this);
-    }
-
-	/**Duplicate a tree, assuming this is a root node of a tree--
-	 * duplicate that node and what's below; ignore siblings of root node.
-	 */
-	public static GrammarAST dupTreeNoActions(GrammarAST t, GrammarAST parent) {
-		if ( t==null ) {
-			return null;
-		}
-		GrammarAST result = (GrammarAST)t.dupNode();
-		for (GrammarAST subchild : getChildrenForDupTree(t)) {
-			result.addChild(dupTreeNoActions(subchild, result));
-		}
-		return result;
-	}
-
-	private static List<GrammarAST> getChildrenForDupTree(GrammarAST t) {
-		List<GrammarAST> result = new ArrayList<GrammarAST>();
-		for (int i = 0; i < t.getChildCount(); i++){
-			GrammarAST child = (GrammarAST)t.getChild(i);
-			int ttype = child.getType();
-			if (ttype == ANTLRParser.REWRITES || ttype == ANTLRParser.REWRITE || ttype==ANTLRParser.ACTION) {
-				continue;
-			}
-
-			if (ttype == ANTLRParser.BANG || ttype == ANTLRParser.ROOT) {
-				for (GrammarAST subchild : getChildrenForDupTree(child))
-					result.add(subchild);
-			} else {
-				result.add(child);
-			}
-		}
-		if ( result.size()==1 && result.get(0).getType()==ANTLRParser.EOA &&
-			 t.getType()==ANTLRParser.ALT )
-		{
-			// can't have an empty alt, insert epsilon
-			result.add(0, new GrammarAST(ANTLRParser.EPSILON, "epsilon"));
-		}
-
-		return result;
-	}
-
-	public static GrammarAST dupTree(GrammarAST t) {
-		if ( t==null ) {
-			return null;
-		}
-		GrammarAST root = dup(t);		// make copy of root
-		// copy all children of root.
-		for (int i= 0; i < t.getChildCount(); i++) {
-			GrammarAST child = (GrammarAST)t.getChild(i);
-			root.addChild(dupTree(child));
-		}
-		return root;
-	}
-
-	public void setTreeEnclosingRuleNameDeeply(String rname) {
-		enclosingRuleName = rname;
-		if (getChildCount() == 0) return;
-		for (Object child : getChildren()) {
-			if (!(child instanceof GrammarAST)) {
-				continue;
-			}
-			GrammarAST grammarAST = (GrammarAST)child;
-			grammarAST.setTreeEnclosingRuleNameDeeply(rname);
-		}
-	}
-
-	String toStringList() {
-		return "";
-	}
-
-	/** Track start/stop token for subtree root created for a rule.
-	 *  Only works with Tree nodes.  For rules that match nothing,
-	 *  seems like this will yield start=i and stop=i-1 in a nil node.
-	 *  Might be useful info so I'll not force to be i..i.
-	 */
-	public void setTokenBoundaries(Token startToken, Token stopToken) {
-		if ( startToken!=null ) startIndex = startToken.getTokenIndex();
-		if ( stopToken!=null ) stopIndex = stopToken.getTokenIndex();
-	}
-
-	public GrammarAST getBlockALT(int i) {
-		if ( this.getType()!=ANTLRParser.BLOCK ) return null;
-		int alts = 0;
-		for (int j =0 ; j < getChildCount(); j++) {
-			if (getChild(j).getType() == ANTLRParser.ALT) {
-				alts++;
-			}
-			if (alts == i) {
-				return (GrammarAST)getChild(j);
-			}
-		}
-		return null;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarAnalysisAbortedMessage.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarAnalysisAbortedMessage.java
deleted file mode 100644
index 95d1d17..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarAnalysisAbortedMessage.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.DecisionProbe;
-import org.stringtemplate.v4.ST;
-
-/** Reports the condition that ANTLR's LL(*) analysis engine terminated
- *  early.
- */
-public class GrammarAnalysisAbortedMessage extends Message {
-	public DecisionProbe probe;
-
-	public GrammarAnalysisAbortedMessage(DecisionProbe probe) {
-		super(ErrorManager.MSG_ANALYSIS_ABORTED);
-		this.probe = probe;
-	}
-
-	public String toString() {
-		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
-		line = decisionASTNode.getLine();
-		column = decisionASTNode.getCharPositionInLine();
-		String fileName = probe.dfa.nfa.grammar.getFileName();
-		if ( fileName!=null ) {
-			file = fileName;
-		}
-		ST st = getMessageTemplate();
-		st.add("enclosingRule",
-						probe.dfa.getNFADecisionStartState().enclosingRule.name);
-
-		return super.toString(st);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarDanglingStateMessage.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarDanglingStateMessage.java
deleted file mode 100644
index e360dd7..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarDanglingStateMessage.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.DFAState;
-import org.antlr.analysis.DecisionProbe;
-import org.stringtemplate.v4.ST;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-/** Reports a potential parsing issue with a decision; the decision is
- *  nondeterministic in some way.
- */
-public class GrammarDanglingStateMessage extends Message {
-	public DecisionProbe probe;
-	public DFAState problemState;
-
-	public GrammarDanglingStateMessage(DecisionProbe probe,
-									   DFAState problemState)
-	{
-		super(ErrorManager.MSG_DANGLING_STATE);
-		this.probe = probe;
-		this.problemState = problemState;
-	}
-
-	public String toString() {
-		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
-		line = decisionASTNode.getLine();
-		column = decisionASTNode.getCharPositionInLine();
-		String fileName = probe.dfa.nfa.grammar.getFileName();
-		if ( fileName!=null ) {
-			file = fileName;
-		}
-		List labels = probe.getSampleNonDeterministicInputSequence(problemState);
-		String input = probe.getInputSequenceDisplay(labels);
-		ST st = getMessageTemplate();
-		List alts = new ArrayList();
-		alts.addAll(problemState.getAltSet());
-		Collections.sort(alts);
-		st.add("danglingAlts", alts);
-		st.add("input", input);
-
-		return super.toString(st);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarInsufficientPredicatesMessage.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarInsufficientPredicatesMessage.java
deleted file mode 100644
index 156bdc1..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarInsufficientPredicatesMessage.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.DFAState;
-import org.antlr.analysis.DecisionProbe;
-import org.antlr.analysis.Label;
-import org.antlr.runtime.Token;
-import org.stringtemplate.v4.ST;
-
-import java.util.*;
-
-public class GrammarInsufficientPredicatesMessage extends Message {
-	public DecisionProbe probe;
-    public Map<Integer, Set<Token>> altToLocations;
-	public DFAState problemState;
-
-	public GrammarInsufficientPredicatesMessage(DecisionProbe probe,
-												DFAState problemState,
-												Map<Integer, Set<Token>> altToLocations)
-	{
-		super(ErrorManager.MSG_INSUFFICIENT_PREDICATES);
-		this.probe = probe;
-		this.problemState = problemState;
-		this.altToLocations = altToLocations;
-	}
-
-	public String toString() {
-		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
-		line = decisionASTNode.getLine();
-		column = decisionASTNode.getCharPositionInLine();
-		String fileName = probe.dfa.nfa.grammar.getFileName();
-		if ( fileName!=null ) {
-			file = fileName;
-		}
-		ST st = getMessageTemplate();
-		// convert to string key to avoid 3.1 ST bug
-		Map<String, Set<Token>> altToLocationsWithStringKey = new LinkedHashMap<String, Set<Token>>();
-		List<Integer> alts = new ArrayList<Integer>();
-		alts.addAll(altToLocations.keySet());
-		Collections.sort(alts);
-		for (Integer altI : alts) {
-			altToLocationsWithStringKey.put(altI.toString(), altToLocations.get(altI));
-			/*
-			List<String> tokens = new ArrayList<String>();
-			for (Token t : altToLocations.get(altI)) {
-				tokens.add(t.toString());
-			}
-			Collections.sort(tokens);
-			System.out.println("tokens=\n"+tokens);
-			*/
-		}
-		st.add("altToLocations", altToLocationsWithStringKey);
-
-		List<Label> sampleInputLabels = problemState.dfa.probe.getSampleNonDeterministicInputSequence(problemState);
-		String input = problemState.dfa.probe.getInputSequenceDisplay(sampleInputLabels);
-		st.add("upon", input);
-
-		st.add("hasPredicateBlockedByAction", problemState.dfa.hasPredicateBlockedByAction);
-
-		return super.toString(st);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarNonDeterminismMessage.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarNonDeterminismMessage.java
deleted file mode 100644
index 8cd95ae..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarNonDeterminismMessage.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.DFAState;
-import org.antlr.analysis.DecisionProbe;
-import org.antlr.analysis.NFAState;
-import org.antlr.misc.Utils;
-import org.stringtemplate.v4.ST;
-
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-
-/** Reports a potential parsing issue with a decision; the decision is
- *  nondeterministic in some way.
- */
-public class GrammarNonDeterminismMessage extends Message {
-	public DecisionProbe probe;
-    public DFAState problemState;
-
-	public GrammarNonDeterminismMessage(DecisionProbe probe,
-										DFAState problemState)
-	{
-		super(ErrorManager.MSG_GRAMMAR_NONDETERMINISM);
-		this.probe = probe;
-		this.problemState = problemState;
-		// flip msg ID if alts are actually token refs in Tokens rule
-		if ( probe.dfa.isTokensRuleDecision() ) {
-			setMessageID(ErrorManager.MSG_TOKEN_NONDETERMINISM);
-		}
-	}
-
-	public String toString() {
-		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
-		line = decisionASTNode.getLine();
-		column = decisionASTNode.getCharPositionInLine();
-		String fileName = probe.dfa.nfa.grammar.getFileName();
-		if ( fileName!=null ) {
-			file = fileName;
-		}
-
-		ST st = getMessageTemplate();
-		// Now fill template with information about problemState
-		List labels = probe.getSampleNonDeterministicInputSequence(problemState);
-		String input = probe.getInputSequenceDisplay(labels);
-		st.add("input", input);
-
-		if ( probe.dfa.isTokensRuleDecision() ) {
-			Set disabledAlts = probe.getDisabledAlternatives(problemState);
-			for (Iterator it = disabledAlts.iterator(); it.hasNext();) {
-				Integer altI = (Integer) it.next();
-				String tokenName =
-					probe.getTokenNameForTokensRuleAlt(altI.intValue());
-				// reset the line/col to the token definition (pick last one)
-				NFAState ruleStart =
-					probe.dfa.nfa.grammar.getRuleStartState(tokenName);
-				line = ruleStart.associatedASTNode.getLine();
-				column = ruleStart.associatedASTNode.getCharPositionInLine();
-				st.add("disabled", tokenName);
-			}
-		}
-		else {
-			st.add("disabled", probe.getDisabledAlternatives(problemState));
-		}
-
-		List nondetAlts = probe.getNonDeterministicAltsForState(problemState);
-		NFAState nfaStart = probe.dfa.getNFADecisionStartState();
-		// all state paths have to begin with same NFA state
-		int firstAlt = 0;
-		if ( nondetAlts!=null ) {
-			for (Iterator iter = nondetAlts.iterator(); iter.hasNext();) {
-				Integer displayAltI = (Integer) iter.next();
-				if ( DecisionProbe.verbose ) {
-					int tracePathAlt =
-						nfaStart.translateDisplayAltToWalkAlt(displayAltI.intValue());
-					if ( firstAlt == 0 ) {
-						firstAlt = tracePathAlt;
-					}
-					List path =
-						probe.getNFAPathStatesForAlt(firstAlt,
-													 tracePathAlt,
-													 labels);
-					st.addAggr("paths.{alt, states}", displayAltI, path);
-				}
-				else {
-					if ( probe.dfa.isTokensRuleDecision() ) {
-						// alts are token rules, convert to the names instead of numbers
-						String tokenName =
-							probe.getTokenNameForTokensRuleAlt(displayAltI.intValue());
-						st.add("conflictingTokens", tokenName);
-					}
-					else {
-						st.add("conflictingAlts", displayAltI);
-					}
-				}
-			}
-		}
-		st.add("hasPredicateBlockedByAction", problemState.dfa.hasPredicateBlockedByAction);
-		return super.toString(st);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarReport.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarReport.java
deleted file mode 100644
index e0462a3..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarReport.java
+++ /dev/null
@@ -1,485 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.DFA;
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.misc.Utils;
-import org.antlr.runtime.misc.Stats;
-
-import java.lang.reflect.Field;
-import java.util.*;
-
-public class GrammarReport {
-	/** Because I may change the stats, I need to track version for later
-	 *  computations to be consistent.
-	 */
-	public static final String Version = "5";
-	public static final String GRAMMAR_STATS_FILENAME = "grammar.stats";
-
-	public static class ReportData {
-		String version;
-		String gname;
-		String gtype;
-		String language;
-		int numRules;
-		int numOuterProductions;
-		int numberOfDecisionsInRealRules;
-		int numberOfDecisions;
-		int numberOfCyclicDecisions;
-		int numberOfFixedKDecisions;
-		int numLL1;
-		int mink;
-		int maxk;
-		double avgk;
-		int numTokens;
-		long DFACreationWallClockTimeInMS;
-		int numberOfSemanticPredicates;
-		int numberOfManualLookaheadOptions; // TODO: verify
-		int numNonLLStarDecisions;
-		int numNondeterministicDecisions;
-		int numNondeterministicDecisionNumbersResolvedWithPredicates;
-		int errors;
-		int warnings;
-		int infos;
-		//int num_synpreds;
-		int blocksWithSynPreds;
-		int decisionsWhoseDFAsUsesSynPreds;
-		int blocksWithSemPreds;
-		int decisionsWhoseDFAsUsesSemPreds;
-		String output;
-		String grammarLevelk;
-		String grammarLevelBacktrack;
-	}
-
-	public static final String newline = System.getProperty("line.separator");
-
-	public Grammar grammar;
-
-	public GrammarReport(Grammar grammar) {
-		this.grammar = grammar;
-	}
-
-	public static ReportData getReportData(Grammar g) {
-		ReportData data = new ReportData();
-		data.version = Version;
-		data.gname = g.name;
-
-		data.gtype = g.getGrammarTypeString();
-
-		data.language = (String) g.getOption("language");
-		data.output = (String) g.getOption("output");
-		if ( data.output==null ) {
-			data.output = "none";
-		}
-
-		String k = (String) g.getOption("k");
-		if ( k==null ) {
-			k = "none";
-		}
-		data.grammarLevelk = k;
-
-		String backtrack = (String) g.getOption("backtrack");
-		if ( backtrack==null ) {
-			backtrack = "false";
-		}
-		data.grammarLevelBacktrack = backtrack;
-
-		int totalNonSynPredProductions = 0;
-		int totalNonSynPredRules = 0;
-		Collection rules = g.getRules();
-		for (Iterator it = rules.iterator(); it.hasNext();) {
-			Rule r = (Rule) it.next();
-			if ( !r.name.toUpperCase()
-				.startsWith(Grammar.SYNPRED_RULE_PREFIX.toUpperCase()) )
-			{
-				totalNonSynPredProductions += r.numberOfAlts;
-				totalNonSynPredRules++;
-			}
-		}
-
-		data.numRules = totalNonSynPredRules;
-		data.numOuterProductions = totalNonSynPredProductions;
-
-		int numACyclicDecisions =
-			g.getNumberOfDecisions()- g.getNumberOfCyclicDecisions();
-		List<Integer> depths = new ArrayList<Integer>();
-		int[] acyclicDFAStates = new int[numACyclicDecisions];
-		int[] cyclicDFAStates = new int[g.getNumberOfCyclicDecisions()];
-		int acyclicIndex = 0;
-		int cyclicIndex = 0;
-		int numLL1 = 0;
-		int blocksWithSynPreds = 0;
-		int dfaWithSynPred = 0;
-		int numDecisions = 0;
-		int numCyclicDecisions = 0;
-		for (int i=1; i<= g.getNumberOfDecisions(); i++) {
-			Grammar.Decision d = g.getDecision(i);
-			if( d.dfa==null ) {
-				//System.out.println("dec "+d.decision+" has no AST");
-				continue;
-			}
-			Rule r = d.dfa.decisionNFAStartState.enclosingRule;
-			if ( r.name.toUpperCase()
-				.startsWith(Grammar.SYNPRED_RULE_PREFIX.toUpperCase()) )
-			{
-				//System.out.println("dec "+d.decision+" is a synpred");
-				continue;
-			}
-
-			numDecisions++;
-			if ( blockHasSynPred(d.blockAST) ) blocksWithSynPreds++;
-			//if ( g.decisionsWhoseDFAsUsesSynPreds.contains(d.dfa) ) dfaWithSynPred++;
-			if ( d.dfa.hasSynPred() ) dfaWithSynPred++;
-			
-//			NFAState decisionStartState = grammar.getDecisionNFAStartState(d.decision);
-//			int nalts = grammar.getNumberOfAltsForDecisionNFA(decisionStartState);
-//			for (int alt = 1; alt <= nalts; alt++) {
-//				int walkAlt =
-//					decisionStartState.translateDisplayAltToWalkAlt(alt);
-//				NFAState altLeftEdge = grammar.getNFAStateForAltOfDecision(decisionStartState, walkAlt);
-//			}
-//			int nalts = grammar.getNumberOfAltsForDecisionNFA(d.dfa.decisionNFAStartState);
-//			for (int a=1; a<nalts; a++) {
-//				NFAState altStart =
-//					grammar.getNFAStateForAltOfDecision(d.dfa.decisionNFAStartState, a);
-//			}
-			if ( !d.dfa.isCyclic() ) {
-				if ( d.dfa.isClassicDFA() ) {
-					int maxk = d.dfa.getMaxLookaheadDepth();
-					//System.out.println("decision "+d.dfa.decisionNumber+" k="+maxk);
-					if ( maxk==1 ) numLL1++;
-					depths.add( maxk );
-				}
-				else {
-					acyclicDFAStates[acyclicIndex] = d.dfa.getNumberOfStates();
-					acyclicIndex++;
-				}
-			}
-			else {
-				//System.out.println("CYCLIC decision "+d.dfa.decisionNumber);
-				numCyclicDecisions++;
-				cyclicDFAStates[cyclicIndex] = d.dfa.getNumberOfStates();
-				cyclicIndex++;
-			}
-		}
-
-		data.numLL1 = numLL1;
-		data.numberOfFixedKDecisions = depths.size();
-		data.mink = Stats.min(depths);
-		data.maxk = Stats.max(depths);
-		data.avgk = Stats.avg(depths);
-
-		data.numberOfDecisionsInRealRules = numDecisions;
-		data.numberOfDecisions = g.getNumberOfDecisions();
-		data.numberOfCyclicDecisions = numCyclicDecisions;
-
-//		Map synpreds = grammar.getSyntacticPredicates();
-//		int num_synpreds = synpreds!=null ? synpreds.size() : 0;
-//		data.num_synpreds = num_synpreds;
-		data.blocksWithSynPreds = blocksWithSynPreds;
-		data.decisionsWhoseDFAsUsesSynPreds = dfaWithSynPred;
-
-//
-//		data. = Stats.stddev(depths);
-//
-//		data. = Stats.min(acyclicDFAStates);
-//
-//		data. = Stats.max(acyclicDFAStates);
-//
-//		data. = Stats.avg(acyclicDFAStates);
-//
-//		data. = Stats.stddev(acyclicDFAStates);
-//
-//		data. = Stats.sum(acyclicDFAStates);
-//
-//		data. = Stats.min(cyclicDFAStates);
-//
-//		data. = Stats.max(cyclicDFAStates);
-//
-//		data. = Stats.avg(cyclicDFAStates);
-//
-//		data. = Stats.stddev(cyclicDFAStates);
-//
-//		data. = Stats.sum(cyclicDFAStates);
-
-		data.numTokens = g.getTokenTypes().size();
-
-		data.DFACreationWallClockTimeInMS = g.DFACreationWallClockTimeInMS;
-
-		// includes true ones and preds in synpreds I think; strip out. 
-		data.numberOfSemanticPredicates = g.numberOfSemanticPredicates;
-
-		data.numberOfManualLookaheadOptions = g.numberOfManualLookaheadOptions;
-
-		data.numNonLLStarDecisions = g.numNonLLStar;
-		data.numNondeterministicDecisions = g.setOfNondeterministicDecisionNumbers.size();
-		data.numNondeterministicDecisionNumbersResolvedWithPredicates =
-			g.setOfNondeterministicDecisionNumbersResolvedWithPredicates.size();
-
-		data.errors = ErrorManager.getErrorState().errors;
-		data.warnings = ErrorManager.getErrorState().warnings;
-		data.infos = ErrorManager.getErrorState().infos;
-
-		data.blocksWithSemPreds = g.blocksWithSemPreds.size();
-
-		data.decisionsWhoseDFAsUsesSemPreds = g.decisionsWhoseDFAsUsesSemPreds.size();
-
-		return data;
-	}
-	
-	/** Create a single-line stats report about this grammar suitable to
-	 *  send to the notify page at antlr.org
-	 */
-	public String toNotifyString() {
-		StringBuffer buf = new StringBuffer();
-		ReportData data = getReportData(grammar);
-		Field[] fields = ReportData.class.getDeclaredFields();
-		int i = 0;
-		for (Field f : fields) {
-			try {
-				Object v = f.get(data);
-				String s = v!=null ? v.toString() : "null";
-				if (i>0) buf.append('\t');
-				buf.append(s);
-			}
-			catch (Exception e) {
-				ErrorManager.internalError("Can't get data", e);
-			}
-			i++;
-		}
-		return buf.toString();
-	}
-
-	public String getBacktrackingReport() {
-		StringBuffer buf = new StringBuffer();
-		buf.append("Backtracking report:");
-		buf.append(newline);
-		buf.append("Number of decisions that backtrack: ");
-		buf.append(grammar.decisionsWhoseDFAsUsesSynPreds.size());
-		buf.append(newline);
-		buf.append(getDFALocations(grammar.decisionsWhoseDFAsUsesSynPreds));
-		return buf.toString();
-	}
-
-	protected String getDFALocations(Set dfas) {
-		Set decisions = new HashSet();
-		StringBuffer buf = new StringBuffer();
-		Iterator it = dfas.iterator();
-		while ( it.hasNext() ) {
-			DFA dfa = (DFA) it.next();
-			// if we aborted a DFA and redid with k=1, the backtrackin
-			if ( decisions.contains(Utils.integer(dfa.decisionNumber)) ) {
-				continue;
-			}
-			decisions.add(Utils.integer(dfa.decisionNumber));
-			buf.append("Rule ");
-			buf.append(dfa.decisionNFAStartState.enclosingRule.name);
-			buf.append(" decision ");
-			buf.append(dfa.decisionNumber);
-			buf.append(" location ");
-			GrammarAST decisionAST =
-				dfa.decisionNFAStartState.associatedASTNode;
-			buf.append(decisionAST.getLine());
-			buf.append(":");
-			buf.append(decisionAST.getCharPositionInLine());
-			buf.append(newline);
-		}
-		return buf.toString();
-	}
-
-	/** Given a stats line suitable for sending to the antlr.org site,
-	 *  return a human-readable version.  Return null if there is a
-	 *  problem with the data.
-	 */
-	public String toString() {
-		return toString(toNotifyString());
-	}
-
-	protected static ReportData decodeReportData(String dataS) {
-		ReportData data = new ReportData();
-		StringTokenizer st = new StringTokenizer(dataS, "\t");
-		Field[] fields = ReportData.class.getDeclaredFields();
-		for (Field f : fields) {
-			String v = st.nextToken();
-			try {
-				if ( f.getType() == String.class ) {
-					f.set(data, v);
-				}
-				else if ( f.getType() == double.class ) {
-					f.set(data, Double.valueOf(v));					
-				}
-				else {
-					f.set(data, Integer.valueOf(v));					
-				}
-			}
-			catch (Exception e) {
-				ErrorManager.internalError("Can't get data", e);
-			}
-		}
-		return data;
-	}
-
-	public static String toString(String notifyDataLine) {
-		ReportData data = decodeReportData(notifyDataLine);
-		if ( data ==null ) {
-			return null;
-		}
-		StringBuffer buf = new StringBuffer();
-		buf.append("ANTLR Grammar Report; Stats Version ");
-		buf.append(data.version);
-		buf.append('\n');
-		buf.append("Grammar: ");
-		buf.append(data.gname);
-		buf.append('\n');
-		buf.append("Type: ");
-		buf.append(data.gtype);
-		buf.append('\n');
-		buf.append("Target language: ");
-		buf.append(data.language);
-		buf.append('\n');
-		buf.append("Output: ");
-		buf.append(data.output);
-		buf.append('\n');
-		buf.append("Grammar option k: ");
-		buf.append(data.grammarLevelk);
-		buf.append('\n');
-		buf.append("Grammar option backtrack: ");
-		buf.append(data.grammarLevelBacktrack);
-		buf.append('\n');
-		buf.append("Rules: ");
-		buf.append(data.numRules);
-		buf.append('\n');
-		buf.append("Outer productions: ");
-		buf.append(data.numOuterProductions);
-		buf.append('\n');
-		buf.append("Decisions: ");
-		buf.append(data.numberOfDecisions);
-		buf.append('\n');
-		buf.append("Decisions (ignoring decisions in synpreds): ");
-		buf.append(data.numberOfDecisionsInRealRules);
-		buf.append('\n');
-		buf.append("Fixed k DFA decisions: ");
-		buf.append(data.numberOfFixedKDecisions);
-		buf.append('\n');
-		buf.append("Cyclic DFA decisions: ");
-		buf.append(data.numberOfCyclicDecisions);
-		buf.append('\n');
-		buf.append("LL(1) decisions: "); buf.append(data.numLL1);
-		buf.append('\n');
-		buf.append("Min fixed k: "); buf.append(data.mink);
-		buf.append('\n');
-		buf.append("Max fixed k: "); buf.append(data.maxk);
-		buf.append('\n');
-		buf.append("Average fixed k: "); buf.append(data.avgk);
-		buf.append('\n');
-//		buf.append("Standard deviation of fixed k: "); buf.append(fields[12]);
-//		buf.append('\n');
-//		buf.append("Min acyclic DFA states: "); buf.append(fields[13]);
-//		buf.append('\n');
-//		buf.append("Max acyclic DFA states: "); buf.append(fields[14]);
-//		buf.append('\n');
-//		buf.append("Average acyclic DFA states: "); buf.append(fields[15]);
-//		buf.append('\n');
-//		buf.append("Standard deviation of acyclic DFA states: "); buf.append(fields[16]);
-//		buf.append('\n');
-//		buf.append("Total acyclic DFA states: "); buf.append(fields[17]);
-//		buf.append('\n');
-//		buf.append("Min cyclic DFA states: "); buf.append(fields[18]);
-//		buf.append('\n');
-//		buf.append("Max cyclic DFA states: "); buf.append(fields[19]);
-//		buf.append('\n');
-//		buf.append("Average cyclic DFA states: "); buf.append(fields[20]);
-//		buf.append('\n');
-//		buf.append("Standard deviation of cyclic DFA states: "); buf.append(fields[21]);
-//		buf.append('\n');
-//		buf.append("Total cyclic DFA states: "); buf.append(fields[22]);
-//		buf.append('\n');
-		buf.append("DFA creation time in ms: ");
-		buf.append(data.DFACreationWallClockTimeInMS);
-		buf.append('\n');
-
-//		buf.append("Number of syntactic predicates available (including synpred rules): ");
-//		buf.append(data.num_synpreds);
-//		buf.append('\n');
-		buf.append("Decisions with available syntactic predicates (ignoring synpred rules): ");
-		buf.append(data.blocksWithSynPreds);
-		buf.append('\n');
-		buf.append("Decision DFAs using syntactic predicates (ignoring synpred rules): ");
-		buf.append(data.decisionsWhoseDFAsUsesSynPreds);
-		buf.append('\n');
-
-		buf.append("Number of semantic predicates found: ");
-		buf.append(data.numberOfSemanticPredicates);
-		buf.append('\n');
-		buf.append("Decisions with semantic predicates: ");
-		buf.append(data.blocksWithSemPreds);
-		buf.append('\n');
-		buf.append("Decision DFAs using semantic predicates: ");
-		buf.append(data.decisionsWhoseDFAsUsesSemPreds);
-		buf.append('\n');
-
-		buf.append("Number of (likely) non-LL(*) decisions: ");
-		buf.append(data.numNonLLStarDecisions);
-		buf.append('\n');
-		buf.append("Number of nondeterministic decisions: ");
-		buf.append(data.numNondeterministicDecisions);
-		buf.append('\n');
-		buf.append("Number of nondeterministic decisions resolved with predicates: ");
-		buf.append(data.numNondeterministicDecisionNumbersResolvedWithPredicates);
-		buf.append('\n');
-
-		buf.append("Number of manual or forced fixed lookahead k=value options: ");
-		buf.append(data.numberOfManualLookaheadOptions);
-		buf.append('\n');
-
-		buf.append("Vocabulary size: ");
-		buf.append(data.numTokens);
-		buf.append('\n');
-		buf.append("Number of errors: ");
-		buf.append(data.errors);
-		buf.append('\n');
-		buf.append("Number of warnings: ");
-		buf.append(data.warnings);
-		buf.append('\n');
-		buf.append("Number of infos: ");
-		buf.append(data.infos);
-		buf.append('\n');
-		return buf.toString();
-	}
-
-	public static boolean blockHasSynPred(GrammarAST blockAST) {
-		GrammarAST c1 = blockAST.findFirstType(ANTLRParser.SYN_SEMPRED);
-		GrammarAST c2 = blockAST.findFirstType(ANTLRParser.BACKTRACK_SEMPRED);
-		if ( c1!=null || c2!=null ) return true;
-//		System.out.println(blockAST.enclosingRuleName+
-//						   " "+blockAST.getLine()+":"+blockAST.getColumn()+" no preds AST="+blockAST.toStringTree());
-		return false;
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarReport2.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarReport2.java
deleted file mode 100644
index bc57891..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarReport2.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.tool;
-
-/** Simplifying report dramatically for LL(*) paper.  Old results were
- *  wrong anyway it seems.  We need:
- *
- * 		percent decisions that potentially backtrack
- *  	histogram of regular lookahead depth (int k or *)
- */
-public class GrammarReport2 {
-	public static final String newline = System.getProperty("line.separator");
-
-	public Grammar root;
-
-	public GrammarReport2(Grammar rootGrammar) {
-		this.root = rootGrammar;
-	}
-
-	public String toString() {
-		StringBuilder buf = new StringBuilder();
-		stats(root, buf);
-		CompositeGrammar composite = root.composite;
-		for (Grammar g : composite.getDelegates(root)) {
-			stats(g, buf);
-		}
-		return buf.toString();
-	}
-
-	void stats(Grammar g, StringBuilder buf) {
-		int numDec = g.getNumberOfDecisions();
-		for (int decision=1; decision<=numDec; decision++) {
-			Grammar.Decision d = g.getDecision(decision);
-			if ( d.dfa==null ) { // unusued decisions in auto synpreds
-				//System.err.println("no decision "+decision+" dfa for "+d.blockAST.toStringTree());
-				continue;
-			}
-			int k = d.dfa.getMaxLookaheadDepth();
-			Rule enclosingRule = d.dfa.decisionNFAStartState.enclosingRule;
-			if ( enclosingRule.isSynPred ) continue; // don't count synpred rules
-			buf.append(g.name+"."+enclosingRule.name+":" +
-					   "");
-			GrammarAST decisionAST =
-				d.dfa.decisionNFAStartState.associatedASTNode;
-			buf.append(decisionAST.getLine());
-			buf.append(":");
-			buf.append(decisionAST.getCharPositionInLine());
-			buf.append(" decision "+decision+":");
-			
-			if ( d.dfa.isCyclic() ) buf.append(" cyclic");
-			if ( k!=Integer.MAX_VALUE ) buf.append(" k="+k); // fixed, no sempreds
-			if ( d.dfa.hasSynPred() ) buf.append(" backtracks"); // isolated synpred not gated
-			if ( d.dfa.hasSemPred() ) buf.append(" sempred"); // user-defined sempred
-//			else {
-//				buf.append("undefined");
-//				FASerializer serializer = new FASerializer(g);
-//				String result = serializer.serialize(d.dfa.startState);
-//				System.err.println(result);
-//			}
-			nl(buf);
-		}
-	}
-
-	void nl(StringBuilder buf) {
-		buf.append(newline);
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSanity.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSanity.java
deleted file mode 100644
index bcfabfb..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSanity.java
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.NFAState;
-import org.antlr.analysis.RuleClosureTransition;
-import org.antlr.analysis.Transition;
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.runtime.tree.Tree;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/** Factor out routines that check sanity of rules, alts, grammars, etc.. */
-public class GrammarSanity {
-	/** The checkForLeftRecursion method needs to track what rules it has
-	 *  visited to track infinite recursion.
-	 */
-	protected Set<Rule> visitedDuringRecursionCheck = null;
-
-	protected Grammar grammar;
-	public GrammarSanity(Grammar grammar) {
-		this.grammar = grammar;
-	}
-
-	/** Check all rules for infinite left recursion before analysis. Return list
-	 *  of troublesome rule cycles.  This method has two side-effects: it notifies
-	 *  the error manager that we have problems and it sets the list of
-	 *  recursive rules that we should ignore during analysis.
-	 */
-	public List<Set<Rule>> checkAllRulesForLeftRecursion() {
-		grammar.buildNFA(); // make sure we have NFAs
-		grammar.leftRecursiveRules = new HashSet();
-		List<Set<Rule>> listOfRecursiveCycles = new ArrayList();
-		for (int i = 0; i < grammar.composite.ruleIndexToRuleList.size(); i++) {
-			Rule r = grammar.composite.ruleIndexToRuleList.elementAt(i);
-			if ( r!=null ) {
-				visitedDuringRecursionCheck = new HashSet();
-				visitedDuringRecursionCheck.add(r);
-				Set visitedStates = new HashSet();
-				traceStatesLookingForLeftRecursion(r.startState,
-												   visitedStates,
-												   listOfRecursiveCycles);
-			}
-		}
-		if ( listOfRecursiveCycles.size()>0 ) {
-			ErrorManager.leftRecursionCycles(listOfRecursiveCycles);
-		}
-		return listOfRecursiveCycles;
-	}
-
-	/** From state s, look for any transition to a rule that is currently
-	 *  being traced.  When tracing r, visitedDuringRecursionCheck has r
-	 *  initially.  If you reach an accept state, return but notify the
-	 *  invoking rule that it is nullable, which implies that invoking
-	 *  rule must look at follow transition for that invoking state.
-	 *  The visitedStates tracks visited states within a single rule so
-	 *  we can avoid epsilon-loop-induced infinite recursion here.  Keep
-	 *  filling the cycles in listOfRecursiveCycles and also, as a
-	 *  side-effect, set leftRecursiveRules.
-	 */
-	protected boolean traceStatesLookingForLeftRecursion(NFAState s,
-														 Set visitedStates,
-														 List<Set<Rule>> listOfRecursiveCycles)
-	{
-		if ( s.isAcceptState() ) {
-			// this rule must be nullable!
-			// At least one epsilon edge reached accept state
-			return true;
-		}
-		if ( visitedStates.contains(s) ) {
-			// within same rule, we've hit same state; quit looping
-			return false;
-		}
-		visitedStates.add(s);
-		boolean stateReachesAcceptState = false;
-		Transition t0 = s.transition[0];
-		if ( t0 instanceof RuleClosureTransition ) {
-			RuleClosureTransition refTrans = (RuleClosureTransition)t0;
-			Rule refRuleDef = refTrans.rule;
-			//String targetRuleName = ((NFAState)t0.target).getEnclosingRule();
-			if ( visitedDuringRecursionCheck.contains(refRuleDef) ) {
-				// record left-recursive rule, but don't go back in
-				grammar.leftRecursiveRules.add(refRuleDef);
-				/*
-				System.out.println("already visited "+refRuleDef+", calling from "+
-								   s.enclosingRule);
-								   */
-				addRulesToCycle(refRuleDef,
-								s.enclosingRule,
-								listOfRecursiveCycles);
-			}
-			else {
-				// must visit if not already visited; send new visitedStates set
-				visitedDuringRecursionCheck.add(refRuleDef);
-				boolean callReachedAcceptState =
-					traceStatesLookingForLeftRecursion((NFAState)t0.target,
-													   new HashSet(),
-													   listOfRecursiveCycles);
-				// we're back from visiting that rule
-				visitedDuringRecursionCheck.remove(refRuleDef);
-				// must keep going in this rule then
-				if ( callReachedAcceptState ) {
-					NFAState followingState =
-						((RuleClosureTransition) t0).followState;
-					stateReachesAcceptState |=
-						traceStatesLookingForLeftRecursion(followingState,
-														   visitedStates,
-														   listOfRecursiveCycles);
-				}
-			}
-		}
-		else if ( t0.label.isEpsilon() || t0.label.isSemanticPredicate() ) {
-			stateReachesAcceptState |=
-				traceStatesLookingForLeftRecursion((NFAState)t0.target, visitedStates, listOfRecursiveCycles);
-		}
-		// else it has a labeled edge
-
-		// now do the other transition if it exists
-		Transition t1 = s.transition[1];
-		if ( t1!=null ) {
-			stateReachesAcceptState |=
-				traceStatesLookingForLeftRecursion((NFAState)t1.target,
-												   visitedStates,
-												   listOfRecursiveCycles);
-		}
-		return stateReachesAcceptState;
-	}
-
-	/** enclosingRuleName calls targetRuleName, find the cycle containing
-	 *  the target and add the caller.  Find the cycle containing the caller
-	 *  and add the target.  If no cycles contain either, then create a new
-	 *  cycle.  listOfRecursiveCycles is List<Set<String>> that holds a list
-	 *  of cycles (sets of rule names).
-	 */
-	protected void addRulesToCycle(Rule targetRule,
-								   Rule enclosingRule,
-								   List<Set<Rule>> listOfRecursiveCycles)
-	{
-		boolean foundCycle = false;
-		for (int i = 0; i < listOfRecursiveCycles.size(); i++) {
-			Set<Rule> rulesInCycle = listOfRecursiveCycles.get(i);
-			// ensure both rules are in same cycle
-			if ( rulesInCycle.contains(targetRule) ) {
-				rulesInCycle.add(enclosingRule);
-				foundCycle = true;
-			}
-			if ( rulesInCycle.contains(enclosingRule) ) {
-				rulesInCycle.add(targetRule);
-				foundCycle = true;
-			}
-		}
-		if ( !foundCycle ) {
-			Set cycle = new HashSet();
-			cycle.add(targetRule);
-			cycle.add(enclosingRule);
-			listOfRecursiveCycles.add(cycle);
-		}
-	}
-
-	public void checkRuleReference(GrammarAST scopeAST,
-								   GrammarAST refAST,
-								   GrammarAST argsAST,
-								   String currentRuleName)
-	{
-		Rule r = grammar.getRule(refAST.getText());
-		if ( refAST.getType()==ANTLRParser.RULE_REF ) {
-			if ( argsAST!=null ) {
-				// rule[args]; ref has args
-                if ( r!=null && r.argActionAST==null ) {
-					// but rule def has no args
-					ErrorManager.grammarError(
-						ErrorManager.MSG_RULE_HAS_NO_ARGS,
-						grammar,
-						argsAST.getToken(),
-						r.name);
-				}
-			}
-			else {
-				// rule ref has no args
-				if ( r!=null && r.argActionAST!=null ) {
-					// but rule def has args
-					ErrorManager.grammarError(
-						ErrorManager.MSG_MISSING_RULE_ARGS,
-						grammar,
-						refAST.getToken(),
-						r.name);
-				}
-			}
-		}
-		else if ( refAST.getType()==ANTLRParser.TOKEN_REF ) {
-			if ( grammar.type!=Grammar.LEXER ) {
-				if ( argsAST!=null ) {
-					// args on a token ref not in a lexer rule
-					ErrorManager.grammarError(
-						ErrorManager.MSG_ARGS_ON_TOKEN_REF,
-						grammar,
-						refAST.getToken(),
-						refAST.getText());
-				}
-				return; // ignore token refs in nonlexers
-			}
-			if ( argsAST!=null ) {
-				// tokenRef[args]; ref has args
-				if ( r!=null && r.argActionAST==null ) {
-					// but token rule def has no args
-					ErrorManager.grammarError(
-						ErrorManager.MSG_RULE_HAS_NO_ARGS,
-						grammar,
-						argsAST.getToken(),
-						r.name);
-				}
-			}
-			else {
-				// token ref has no args
-				if ( r!=null && r.argActionAST!=null ) {
-					// but token rule def has args
-					ErrorManager.grammarError(
-						ErrorManager.MSG_MISSING_RULE_ARGS,
-						grammar,
-						refAST.getToken(),
-						r.name);
-				}
-			}
-		}
-	}
-
-	/** Rules in tree grammar that use -> rewrites and are spitting out
-	 *  templates via output=template and then use rewrite=true must only
-	 *  use -> on alts that are simple nodes or trees or single rule refs
-	 *  that match either nodes or trees.  The altAST is the ALT node
-	 *  for an ALT.  Verify that its first child is simple.  Must be either
-	 *  ( ALT ^( A B ) <end-of-alt> ) or ( ALT A <end-of-alt> ) or
-	 *  other element.
-	 *
-	 *  Ignore predicates in front and labels.
-	 */
-	public void ensureAltIsSimpleNodeOrTree(GrammarAST altAST,
-											GrammarAST elementAST,
-											int outerAltNum)
-	{
-		if ( isValidSimpleElementNode(elementAST) ) {
-			GrammarAST next = (GrammarAST)elementAST.getNextSibling();
-			if ( !isNextNonActionElementEOA(next)) {
-				ErrorManager.grammarWarning(ErrorManager.MSG_REWRITE_FOR_MULTI_ELEMENT_ALT,
-											grammar,
-											next.token,
-											new Integer(outerAltNum));
-			}
-			return;
-		}
-		switch ( elementAST.getType() ) {
-			case ANTLRParser.ASSIGN :		// labels ok on non-rule refs
-			case ANTLRParser.PLUS_ASSIGN :
-				if ( isValidSimpleElementNode(elementAST.getChild(1)) ) {
-					return;
-				}
-				break;
-			case ANTLRParser.ACTION :		// skip past actions
-			case ANTLRParser.SEMPRED :
-			case ANTLRParser.SYN_SEMPRED :
-			case ANTLRParser.BACKTRACK_SEMPRED :
-			case ANTLRParser.GATED_SEMPRED :
-				ensureAltIsSimpleNodeOrTree(altAST,
-											(GrammarAST)elementAST.getNextSibling(),
-											outerAltNum);
-				return;
-		}
-		ErrorManager.grammarWarning(ErrorManager.MSG_REWRITE_FOR_MULTI_ELEMENT_ALT,
-									grammar,
-									elementAST.token,
-									new Integer(outerAltNum));
-	}
-
-	protected boolean isValidSimpleElementNode(Tree t) {
-		switch ( t.getType() ) {
-			case ANTLRParser.TREE_BEGIN :
-			case ANTLRParser.TOKEN_REF :
-			case ANTLRParser.CHAR_LITERAL :
-			case ANTLRParser.STRING_LITERAL :
-			case ANTLRParser.WILDCARD :
-				return true;
-			default :
-				return false;
-		}
-	}
-
-	protected boolean isNextNonActionElementEOA(GrammarAST t) {
-		while ( t.getType()==ANTLRParser.ACTION ||
-				t.getType()==ANTLRParser.SEMPRED )
-		{
-			t = (GrammarAST)t.getNextSibling();
-		}
-		if ( t.getType()==ANTLRParser.EOA ) {
-			return true;
-		}
-		return false;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSemanticsMessage.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSemanticsMessage.java
deleted file mode 100644
index 1027182..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSemanticsMessage.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.runtime.Token;
-import org.stringtemplate.v4.ST;
-
-/** A problem with the symbols and/or meaning of a grammar such as rule
- *  redefinition.
- */
-public class GrammarSemanticsMessage extends Message {
-	public Grammar g;
-	/** Most of the time, we'll have a token such as an undefined rule ref
-	 *  and so this will be set.
-	 */
-	public Token offendingToken;
-
-	public GrammarSemanticsMessage(int msgID,
-						  Grammar g,
-						  Token offendingToken)
-	{
-		this(msgID,g,offendingToken,null,null);
-	}
-
-	public GrammarSemanticsMessage(int msgID,
-						  Grammar g,
-						  Token offendingToken,
-						  Object arg)
-	{
-		this(msgID,g,offendingToken,arg,null);
-	}
-
-	public GrammarSemanticsMessage(int msgID,
-						  Grammar g,
-						  Token offendingToken,
-						  Object arg,
-						  Object arg2)
-	{
-		super(msgID,arg,arg2);
-		this.g = g;
-		this.offendingToken = offendingToken;
-	}
-
-	public String toString() {
-		line = 0;
-		column = 0;
-		if ( offendingToken!=null ) {
-			line = offendingToken.getLine();
-			column = offendingToken.getCharPositionInLine();
-		}
-		if ( g!=null ) {
-			file = g.getFileName();
-		}
-		ST st = getMessageTemplate();
-		if ( arg!=null ) {
-			st.add("arg", arg);
-		}
-		if ( arg2!=null ) {
-			st.add("arg2", arg2);
-		}
-		return super.toString(st);
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSerializerFoo.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSerializerFoo.java
deleted file mode 100644
index fc1afb8..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSerializerFoo.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.tool;
-
-import org.antlr.runtime.SerializedGrammar;
-
-import java.io.*;
-import java.util.Stack;
-
-/** Serialize a grammar into a highly compressed form with
- *  only the info needed to recognize sentences.
- *  FORMAT:
- *
- *  file ::= $ANTLR<version:byte><grammartype:byte><name:string>;<numRules:short><rules>
- *  rule ::= R<rulename:string>;B<nalts:short><alts>.
- *  alt  ::= A<elems>;
- *  elem ::= t<tokentype:short> | r<ruleIndex:short> | -<char:uchar><char:uchar> | ~<tokentype> | w
- */
-public class GrammarSerializerFoo {
-    protected DataOutputStream out;
-    protected String filename;
-    protected Grammar g;
-
-    protected Stack streams = new Stack();
-    protected ByteArrayOutputStream altBuf;
-    protected int numElementsInAlt = 0;
-
-    public GrammarSerializerFoo(Grammar g) {
-        this.g = g;
-    }
-
-    public void open(String filename) throws IOException {
-        this.filename = filename;
-        FileOutputStream fos = new FileOutputStream(filename);
-        BufferedOutputStream bos = new BufferedOutputStream(fos);
-        out = new DataOutputStream(bos);
-        writeString(out, SerializedGrammar.COOKIE);
-        out.writeByte(SerializedGrammar.FORMAT_VERSION);
-    }
-
-    public void close() throws IOException {
-        if ( out!=null ) out.close();
-        out = null;
-    }
-
-
-    // WRITE
-
-    public void grammar(int grammarTokenType, String name) {
-        try {
-            /*
-            switch ( grammarTokenType ) {
-                case ANTLRParser.LEXER_GRAMMAR : out.writeByte('l'); break;
-                case ANTLRParser.PARSER_GRAMMAR : out.writeByte('p'); break;
-                case ANTLRParser.TREE_GRAMMAR: out.writeByte('t'); break;
-                case ANTLRParser.COMBINED_GRAMMAR : out.writeByte('c'); break;
-            }
-            writeString(out, name);
-            */
-            out.writeShort(g.getRules().size());
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-    }
-
-    public void rule(String name) {
-        try {
-            out.writeByte('R');
-            writeString(out, name);
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-    }
-
-    public void endRule() {
-        try {
-            out.writeByte('.');
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-    }
-
-    public void block(int nalts) {
-        try {
-            out.writeByte('B');
-            out.writeShort(nalts);
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-    }
-
-    public void alt(GrammarAST alt) {
-        numElementsInAlt = 0;
-        try {
-            out.writeByte('A');
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-        //streams.push(out);
-        //altBuf = new ByteArrayOutputStream();
-        //out = new DataOutputStream(altBuf);
-    }
-
-    public void endAlt() {
-        try {
-            //out.flush();
-            //out = (DataOutputStream)streams.pop(); // restore previous stream
-            out.writeByte(';');
-            //out.writeShort(numElementsInAlt);
-            //out.write(altBuf.toByteArray());
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-    }
-
-    public void ruleRef(GrammarAST t) {
-        numElementsInAlt++;
-        try {
-            out.writeByte('r');
-            out.writeShort(g.getRuleIndex(t.getText()));
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-    }
-
-    public void token(GrammarAST t) {
-        numElementsInAlt++;
-        try {
-            out.writeByte('t');
-            int ttype = g.getTokenType(t.getText());
-            out.writeShort(ttype);
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-    }
-
-    public void charLiteral(GrammarAST t) {
-        numElementsInAlt++;
-        try {
-            if ( g.type!=Grammar.LEXER ) {
-                out.writeByte('t');
-                int ttype = g.getTokenType(t.getText());
-                out.writeShort(ttype);
-            }
-            // else lexer???
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-    }
-
-    public void wildcard(GrammarAST t) {
-        numElementsInAlt++;
-        try {
-            out.writeByte('w');
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-    }
-
-    public void range() { // must be char range
-        numElementsInAlt++;
-        try {
-            out.writeByte('-');
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-    }
-
-    public void not() {
-        try {
-            out.writeByte('~');
-        }
-        catch (IOException ioe) {
-            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
-        }
-    }
-
-    public void writeString(DataOutputStream out, String s) throws IOException {
-        out.writeBytes(s);
-        out.writeByte(';');
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSpelunker.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSpelunker.java
deleted file mode 100644
index d7986ec..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSpelunker.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import java.io.*;
-import java.util.ArrayList;
-import java.util.List;
-
-/** Load a grammar file and scan it just until we learn a few items
- *  of interest.  Currently: name, type, imports, tokenVocab, language option.
- *
- *  GrammarScanner (at bottom of this class) converts grammar to stuff like:
- *
- *   grammar Java ; options { backtrack true memoize true }
- *   import JavaDecl JavaAnnotations JavaExpr ;
- *   ... : ...
- *
- *  First ':' or '@' indicates we can stop looking for imports/options.
- *
- *  Then we just grab interesting grammar properties.
- */
-public class GrammarSpelunker {
-    protected String grammarFileName;
-    protected String token;
-    protected Scanner scanner;
-
-    // grammar info / properties
-    protected String grammarModifier;
-    protected String grammarName;
-    protected String tokenVocab;
-    protected String language = "Java"; // default
-    protected String inputDirectory;
-    protected List<String> importedGrammars;
-
-    public GrammarSpelunker(String inputDirectory, String grammarFileName) {
-        this.inputDirectory = inputDirectory;
-        this.grammarFileName = grammarFileName;
-    }
-
-    void consume() throws IOException { token = scanner.nextToken(); }
-
-    protected void match(String expecting) throws IOException {
-        //System.out.println("match "+expecting+"; is "+token);
-        if ( token.equals(expecting) ) consume();
-        else throw new Error("Error parsing "+grammarFileName+": '"+token+
-                             "' not expected '"+expecting+"'");
-    }
-
-    public void parse() throws IOException {
-        Reader r = new FileReader((inputDirectory != null ? inputDirectory + File.separator : "") + grammarFileName);
-        BufferedReader br = new BufferedReader(r);
-        try {
-            scanner = new Scanner(br);
-            consume();
-            grammarHeader();
-            // scan until imports or options
-            while ( token!=null && !token.equals("@") && !token.equals(":") &&
-                    !token.equals("import") && !token.equals("options") )
-            {
-                consume();
-            }
-            if ( token.equals("options") ) options();
-            // scan until options or first rule
-            while ( token!=null && !token.equals("@") && !token.equals(":") &&
-                    !token.equals("import") )
-            {
-                consume();
-            }
-            if ( token.equals("import") ) imports();
-            // ignore rest of input; close up shop
-        }
-        finally {
-            if ( br!=null ) br.close();
-        }
-    }
-
-    protected void grammarHeader() throws IOException {
-        if ( token==null ) return;
-        if ( token.equals("tree") || token.equals("parser") || token.equals("lexer") ) {
-            grammarModifier=token;
-            consume();
-        }
-        match("grammar");
-        grammarName = token;
-        consume(); // move beyond name
-    }
-
-    // looks like "options { backtrack true ; tokenVocab MyTokens ; }"
-    protected void options() throws IOException {
-        match("options");
-        match("{");
-        while ( token!=null && !token.equals("}") ) {
-            String name = token;
-            consume();
-            String value = token;
-            consume();
-            match(";");
-            if ( name.equals("tokenVocab") ) tokenVocab = value;
-            if ( name.equals("language") ) language = value;
-        }
-        match("}");
-    }
-
-    // looks like "import JavaDecl JavaAnnotations JavaExpr ;"
-    protected void imports() throws IOException {
-        match("import");
-        importedGrammars = new ArrayList<String>();
-        while ( token!=null && !token.equals(";") ) {
-            importedGrammars.add(token);
-            consume();
-        }
-        match(";");
-        if ( importedGrammars.size()==0 ) importedGrammars = null;
-    }
-
-    public String getGrammarModifier() { return grammarModifier; }
-
-    public String getGrammarName() { return grammarName; }
-
-    public String getTokenVocab() { return tokenVocab; }
-
-    public String getLanguage() { return language; }
-
-    public List<String> getImportedGrammars() { return importedGrammars; }
-
-    /** Strip comments and then return stream of words and
-     *  tokens {';', ':', '{', '}'}
-     */ 
-    public static class Scanner {
-        public static final int EOF = -1;
-        Reader input;
-        int c;
-
-        public Scanner(Reader input) throws IOException {
-            this.input = input;
-            consume();
-        }
-
-        boolean isDIGIT() { return c>='0'&&c<='9'; }
-        boolean isID_START() { return c>='a'&&c<='z' || c>='A'&&c<='Z'; }
-        boolean isID_LETTER() { return isID_START() || c>='0'&&c<='9' || c=='_'; }
-        
-        void consume() throws IOException { c = input.read(); }
-
-        public String nextToken() throws IOException {
-            while ( c!=EOF ) {
-                //System.out.println("check "+(char)c);
-                switch ( c ) {
-                    case ';' : consume(); return ";";
-                    case '{' : consume(); return "{";
-                    case '}' : consume(); return "}";
-                    case ':' : consume(); return ":";
-                    case '@' : consume(); return "@";
-                    case '/' : COMMENT(); break;
-                    case '\'': return STRING();
-                    default:
-                        if ( isID_START() ) return ID();
-                        else if ( isDIGIT() ) return INT();
-                        consume(); // ignore anything else
-                }
-            }
-            return null;
-        }
-
-        /** NAME : LETTER+ ; // NAME is sequence of >=1 letter */
-        String ID() throws IOException {
-            StringBuffer buf = new StringBuffer();
-            while ( c!=EOF && isID_LETTER() ) { buf.append((char)c); consume(); }
-            return buf.toString();
-        }
-
-        String INT() throws IOException {
-            StringBuffer buf = new StringBuffer();
-            while ( c!=EOF && isDIGIT() ) { buf.append((char)c); consume(); }
-            return buf.toString();
-        }
-
-        String STRING() throws IOException {
-            StringBuffer buf = new StringBuffer();
-            consume();
-            while ( c!=EOF && c!='\'' ) {
-                if ( c=='\\' ) {
-                    buf.append((char)c);
-                    consume();
-                }
-                buf.append((char)c);
-                consume();
-            }
-            consume(); // scan past '
-            return buf.toString();
-        }
-
-        void COMMENT() throws IOException {
-            if ( c=='/' ) {
-                consume();
-                if ( c=='*' ) {
-                    consume();
-        scarf:
-                    while ( true ) {
-                        if ( c=='*' ) {
-                            consume();
-                            if ( c=='/' ) { consume(); break scarf; }
-                        }
-                        else {
-                            while ( c!=EOF && c!='*' ) consume();
-                        }
-                    }
-                }
-                else if ( c=='/' ) {
-                    while ( c!=EOF && c!='\n' ) consume();
-                }
-            }
-        }
-    }
-
-    /** Tester; Give grammar filename as arg */
-    public static void main(String[] args) throws IOException {
-        GrammarSpelunker g = new GrammarSpelunker(".", args[0]);
-        g.parse();
-        System.out.println(g.grammarModifier+" grammar "+g.grammarName);
-        System.out.println("language="+g.language);
-        System.out.println("tokenVocab="+g.tokenVocab);
-        System.out.println("imports="+g.importedGrammars);
-    }
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSyntaxMessage.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSyntaxMessage.java
deleted file mode 100644
index 290cb66..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarSyntaxMessage.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.runtime.RecognitionException;
-import org.antlr.runtime.Token;
-import org.stringtemplate.v4.ST;
-
-/** A problem with the syntax of your antlr grammar such as
- *  "The '{' came as a complete surprise to me at this point in your program"
- */
-public class GrammarSyntaxMessage extends Message {
-	public Grammar g;
-	/** Most of the time, we'll have a token and so this will be set. */
-	public Token offendingToken;
-	public RecognitionException exception;
-
-	public GrammarSyntaxMessage(int msgID,
-								Grammar grammar,
-								Token offendingToken,
-								RecognitionException exception)
-	{
-		this(msgID,grammar,offendingToken,null,exception);
-	}
-
-	public GrammarSyntaxMessage(int msgID,
-								Grammar grammar,
-								Token offendingToken,
-								Object arg,
-								RecognitionException exception)
-	{
-		super(msgID, arg, null);
-		this.offendingToken = offendingToken;
-		this.exception = exception;
-		this.g = grammar;
-	}
-
-	public String toString() {
-		line = 0;
-		column = 0;
-		if ( offendingToken!=null ) {
-			line = offendingToken.getLine();
-			column = offendingToken.getCharPositionInLine();
-		}
-		// TODO: actually set the right Grammar instance to get the filename
-		// TODO: have to update all v2 grammar files for this. or use errormanager and tool to get the current grammar
-		if (g != null) {
-			file = g.getFileName();
-		}
-		ST st = getMessageTemplate();
-		if ( arg!=null ) {
-			st.add("arg", arg);
-		}
-		return super.toString(st);
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarUnreachableAltsMessage.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarUnreachableAltsMessage.java
deleted file mode 100644
index 1c18cd5..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/GrammarUnreachableAltsMessage.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.DecisionProbe;
-import org.antlr.analysis.NFAState;
-import org.stringtemplate.v4.ST;
-
-import java.util.List;
-
-/** Reports a potential parsing issue with a decision; the decision is
- *  nondeterministic in some way.
- */
-public class GrammarUnreachableAltsMessage extends Message {
-	public DecisionProbe probe;
-    public List alts;
-
-	public GrammarUnreachableAltsMessage(DecisionProbe probe,
-										 List alts)
-	{
-		super(ErrorManager.MSG_UNREACHABLE_ALTS);
-		this.probe = probe;
-		this.alts = alts;
-		// flip msg ID if alts are actually token refs in Tokens rule
-		if ( probe.dfa.isTokensRuleDecision() ) {
-			setMessageID(ErrorManager.MSG_UNREACHABLE_TOKENS);
-		}
-	}
-
-	public String toString() {
-		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
-		line = decisionASTNode.getLine();
-		column = decisionASTNode.getCharPositionInLine();
-		String fileName = probe.dfa.nfa.grammar.getFileName();
-		if ( fileName!=null ) {
-			file = fileName;
-		}
-
-		ST st = getMessageTemplate();
-
-		if ( probe.dfa.isTokensRuleDecision() ) {
-			// alts are token rules, convert to the names instead of numbers
-			for (int i = 0; i < alts.size(); i++) {
-				Integer altI = (Integer) alts.get(i);
-				String tokenName =
-					probe.getTokenNameForTokensRuleAlt(altI.intValue());
-				// reset the line/col to the token definition
-				NFAState ruleStart =
-					probe.dfa.nfa.grammar.getRuleStartState(tokenName);
-				line = ruleStart.associatedASTNode.getLine();
-				column = ruleStart.associatedASTNode.getCharPositionInLine();
-				st.add("tokens", tokenName);
-			}
-		}
-		else {
-			// regular alt numbers, show the alts
-			st.add("alts", alts);
-		}
-
-		return super.toString(st);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/Interp.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/Interp.java
deleted file mode 100644
index 7ba49bd..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/Interp.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.Tool;
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.ParseTree;
-
-import java.io.BufferedReader;
-import java.io.FileReader;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.StringTokenizer;
-
-/** Interpret any ANTLR grammar:
- *
- *  java Interp file.g tokens-to-ignore start-rule input-file
- *
- *  java Interp C.g 'WS COMMENT' program t.c
- *
- *  where the WS and COMMENT are the names of tokens you want to have
- *  the parser ignore.
- */
-public class Interp {
-    public static class FilteringTokenStream extends CommonTokenStream {
-        public FilteringTokenStream(TokenSource src) { super(src); }
-        Set<Integer> hide = new HashSet<Integer>();
-        protected void sync(int i) {
-            super.sync(i);
-            if ( hide.contains(get(i).getType()) ) get(i).setChannel(Token.HIDDEN_CHANNEL);
-        }
-        public void setTokenTypeChannel(int ttype, int channel) {
-            hide.add(ttype);
-        }
-    }
-
-	// pass me a java file to parse
-	public static void main(String[] args) throws Exception {
-		if ( args.length!=4 ) {
-			System.err.println("java Interp file.g tokens-to-ignore start-rule input-file");
-			return;
-		}
-		String grammarFileName = args[0];
-		String ignoreTokens = args[1];
-		String startRule = args[2];
-		String inputFileName = args[3];
-
-		// TODO: using wrong constructor now
-		Tool tool = new Tool();
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar parser = new Grammar(tool, grammarFileName, composite);
-		composite.setDelegationRoot(parser);
-		FileReader fr = new FileReader(grammarFileName);
-		BufferedReader br = new BufferedReader(fr);
-		parser.parseAndBuildAST(br);
-		br.close();
-
-		parser.composite.assignTokenTypes();
-		parser.composite.defineGrammarSymbols();
-		parser.composite.createNFAs();
-
-		List leftRecursiveRules = parser.checkAllRulesForLeftRecursion();
-		if ( leftRecursiveRules.size()>0 ) {
-			return;
-		}
-
-		if ( parser.getRule(startRule)==null ) {
-			System.out.println("undefined start rule "+startRule);
-			return;
-		}
-
-		String lexerGrammarText = parser.getLexerGrammar();
-		Grammar lexer = new Grammar(tool);
-		lexer.importTokenVocabulary(parser);
-		lexer.fileName = grammarFileName;
-		lexer.setTool(tool);
-		if ( lexerGrammarText!=null ) {
-			lexer.setGrammarContent(lexerGrammarText);
-		}
-		else {
-			System.err.println("no lexer grammar found in "+grammarFileName);
-		}
-		lexer.composite.createNFAs();
-		
-		CharStream input =
-			new ANTLRFileStream(inputFileName);
-		Interpreter lexEngine = new Interpreter(lexer, input);
-		FilteringTokenStream tokens = new FilteringTokenStream(lexEngine);
-		StringTokenizer tk = new StringTokenizer(ignoreTokens, " ");
-		while ( tk.hasMoreTokens() ) {
-			String tokenName = tk.nextToken();
-			tokens.setTokenTypeChannel(lexer.getTokenType(tokenName), 99);
-		}
-
-		if ( parser.getRule(startRule)==null ) {
-			System.err.println("Rule "+startRule+" does not exist in "+grammarFileName);
-			return;
-		}
-		Interpreter parseEngine = new Interpreter(parser, tokens);
-		ParseTree t = parseEngine.parse(startRule);
-		System.out.println(t.toStringTree());
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/Interpreter.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/Interpreter.java
deleted file mode 100644
index fe4e95c..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/Interpreter.java
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.DFA;
-import org.antlr.analysis.*;
-import org.antlr.misc.IntervalSet;
-import org.antlr.runtime.*;
-import org.antlr.runtime.debug.BlankDebugEventListener;
-import org.antlr.runtime.debug.DebugEventListener;
-import org.antlr.runtime.debug.ParseTreeBuilder;
-import org.antlr.runtime.tree.ParseTree;
-
-import java.util.List;
-import java.util.Stack;
-
-/** The recognition interpreter/engine for grammars.  Separated
- *  out of Grammar as it's related, but technically not a Grammar function.
- *  You create an interpreter for a grammar and an input stream.  This object
- *  can act as a TokenSource so that you can hook up two grammars (via
- *  a CommonTokenStream) to lex/parse.  Being a token source only makes sense
- *  for a lexer grammar of course.
- */
-public class Interpreter implements TokenSource {
-	protected Grammar grammar;
-	protected IntStream input;
-
-	/** A lexer listener that just creates token objects as they
-	 *  are matched.  scan() use this listener to get a single object.
-	 *  To get a stream of tokens, you must call scan() multiple times,
-	 *  recording the token object result after each call.
-	 */
-	class LexerActionGetTokenType extends BlankDebugEventListener {
-		public CommonToken token;
-		Grammar g;
-		public LexerActionGetTokenType(Grammar g) {
-			this.g = g;
-		}
-
-		public void exitRule(String grammarFileName, String ruleName) {
-			if ( !ruleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ){
-				int type = g.getTokenType(ruleName);
-				int channel = Token.DEFAULT_CHANNEL;
-				token = new CommonToken((CharStream)input,type,channel,0,0);
-			}
-		}
-	}
-
-	public Interpreter(Grammar grammar, IntStream input) {
-		this.grammar = grammar;
-		this.input = input;
-	}
-
-	public Token nextToken() {
-		if ( grammar.type!=Grammar.LEXER ) {
-			return null;
-		}
-		if ( input.LA(1)==CharStream.EOF ) {
-            return new CommonToken((CharStream)input,Token.EOF,Token.DEFAULT_CHANNEL,input.index(),input.index());
-		}
-		int start = input.index();
-		int charPos = ((CharStream)input).getCharPositionInLine();
-		CommonToken token = null;
-		loop:
-		while (input.LA(1)!=CharStream.EOF) {
-			try {
-				token = scan(Grammar.ARTIFICIAL_TOKENS_RULENAME, null);
-				break;
-			}
-			catch (RecognitionException re) {
-				// report a problem and try for another
-				reportScanError(re);
-				continue loop;
-			}
-		}
-		// the scan can only set type
-		// we must set the line, and other junk here to make it a complete token
-		int stop = input.index()-1;
-		if ( token==null ) {
-            return new CommonToken((CharStream)input,Token.EOF,Token.DEFAULT_CHANNEL,start,start);
-		}
-		token.setLine(((CharStream)input).getLine());
-		token.setStartIndex(start);
-		token.setStopIndex(stop);
-		token.setCharPositionInLine(charPos);
-		return token;
-	}
-
-	/** For a given input char stream, try to match against the NFA
-	 *  starting at startRule.  This is a deterministic parse even though
-	 *  it is using an NFA because it uses DFAs at each decision point to
-	 *  predict which alternative will succeed.  This is exactly what the
-	 *  generated parser will do.
-	 *
-	 *  This only does lexer grammars.
-	 *
-	 *  Return the token type associated with the final rule end state.
-	 */
-	public void scan(String startRule,
-					 DebugEventListener actions,
-					 List visitedStates)
-		throws RecognitionException
-	{
-		if ( grammar.type!=Grammar.LEXER ) {
-			return;
-		}
-		CharStream in = (CharStream)this.input;
-		//System.out.println("scan("+startRule+",'"+in.substring(in.index(),in.size()-1)+"')");
-		// Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet
-		if ( grammar.getRuleStartState(startRule)==null ) {
-			grammar.buildNFA();
-		}
-
-		if ( !grammar.allDecisionDFAHaveBeenCreated() ) {
-			// Create the DFA predictors for each decision
-			grammar.createLookaheadDFAs();
-		}
-
-		// do the parse
-		Stack ruleInvocationStack = new Stack();
-		NFAState start = grammar.getRuleStartState(startRule);
-		NFAState stop = grammar.getRuleStopState(startRule);
-		parseEngine(startRule, start, stop, in, ruleInvocationStack,
-					actions, visitedStates);
-	}
-
-	public CommonToken scan(String startRule)
-		throws RecognitionException
-	{
-		return scan(startRule, null);
-	}
-
-	public CommonToken scan(String startRule,
-							List visitedStates)
-		throws RecognitionException
-	{
-		LexerActionGetTokenType actions = new LexerActionGetTokenType(grammar);
-		scan(startRule, actions, visitedStates);
-		return actions.token;
-	}
-
-	public void parse(String startRule,
-					  DebugEventListener actions,
-					  List visitedStates)
-		throws RecognitionException
-	{
-		//System.out.println("parse("+startRule+")");
-		// Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet
-		if ( grammar.getRuleStartState(startRule)==null ) {
-			grammar.buildNFA();
-		}
-		if ( !grammar.allDecisionDFAHaveBeenCreated() ) {
-			// Create the DFA predictors for each decision
-			grammar.createLookaheadDFAs();
-		}
-		// do the parse
-		Stack ruleInvocationStack = new Stack();
-		NFAState start = grammar.getRuleStartState(startRule);
-		NFAState stop = grammar.getRuleStopState(startRule);
-		parseEngine(startRule, start, stop, input, ruleInvocationStack,
-					actions, visitedStates);
-	}
-
-	public ParseTree parse(String startRule)
-		throws RecognitionException
-	{
-		return parse(startRule, null);
-	}
-
-	public ParseTree parse(String startRule, List visitedStates)
-		throws RecognitionException
-	{
-		ParseTreeBuilder actions = new ParseTreeBuilder(grammar.name);
-		try {
-			parse(startRule, actions, visitedStates);
-		}
-		catch (RecognitionException re) {
-			// Errors are tracked via the ANTLRDebugInterface
-			// Exceptions are used just to blast out of the parse engine
-			// The error will be in the parse tree.
-		}
-		return actions.getTree();
-	}
-
-	/** Fill a list of all NFA states visited during the parse */
-	protected void parseEngine(String startRule,
-							   NFAState start,
-							   NFAState stop,
-							   IntStream input,
-							   Stack ruleInvocationStack,
-							   DebugEventListener actions,
-							   List visitedStates)
-		throws RecognitionException
-	{
-		NFAState s = start;
-		if ( actions!=null ) {
-			actions.enterRule(s.nfa.grammar.getFileName(), start.enclosingRule.name);
-		}
-		int t = input.LA(1);
-		while ( s!=stop ) {
-			if ( visitedStates!=null ) {
-				visitedStates.add(s);
-			}
-			/*
-			System.out.println("parse state "+s.stateNumber+" input="+
-				s.nfa.grammar.getTokenDisplayName(t));
-				*/
-			// CASE 1: decision state
-			if ( s.getDecisionNumber()>0 && s.nfa.grammar.getNumberOfAltsForDecisionNFA(s)>1 ) {
-				// decision point, must predict and jump to alt
-				DFA dfa = s.nfa.grammar.getLookaheadDFA(s.getDecisionNumber());
-				/*
-				if ( s.nfa.grammar.type!=Grammar.LEXER ) {
-					System.out.println("decision: "+
-								   dfa.getNFADecisionStartState().getDescription()+
-								   " input="+s.nfa.grammar.getTokenDisplayName(t));
-				}
-				*/
-				int m = input.mark();
-				int predictedAlt = predict(dfa);
-				if ( predictedAlt == NFA.INVALID_ALT_NUMBER ) {
-					String description = dfa.getNFADecisionStartState().getDescription();
-					NoViableAltException nvae =
-						new NoViableAltException(description,
-													  dfa.getDecisionNumber(),
-													  s.stateNumber,
-													  input);
-					if ( actions!=null ) {
-						actions.recognitionException(nvae);
-					}
-					input.consume(); // recover
-					throw nvae;
-				}
-				input.rewind(m);
-				int parseAlt =
-					s.translateDisplayAltToWalkAlt(predictedAlt);
-				/*
-				if ( s.nfa.grammar.type!=Grammar.LEXER ) {
-					System.out.println("predicted alt "+predictedAlt+", parseAlt "+
-									   parseAlt);
-				}
-				*/
-				NFAState alt;
-				if ( parseAlt > s.nfa.grammar.getNumberOfAltsForDecisionNFA(s) ) {
-					// implied branch of loop etc...
-					alt = s.nfa.grammar.nfa.getState( s.endOfBlockStateNumber );
-				}
-				else {
-					alt = s.nfa.grammar.getNFAStateForAltOfDecision(s, parseAlt);
-				}
-				s = (NFAState)alt.transition[0].target;
-				continue;
-			}
-
-			// CASE 2: finished matching a rule
-			if ( s.isAcceptState() ) { // end of rule node
-				if ( actions!=null ) {
-					actions.exitRule(s.nfa.grammar.getFileName(), s.enclosingRule.name);
-				}
-				if ( ruleInvocationStack.empty() ) {
-					// done parsing.  Hit the start state.
-					//System.out.println("stack empty in stop state for "+s.getEnclosingRule());
-					break;
-				}
-				// pop invoking state off the stack to know where to return to
-				NFAState invokingState = (NFAState)ruleInvocationStack.pop();
-				RuleClosureTransition invokingTransition =
-						(RuleClosureTransition)invokingState.transition[0];
-				// move to node after state that invoked this rule
-				s = invokingTransition.followState;
-				continue;
-			}
-
-			Transition trans = s.transition[0];
-			Label label = trans.label;
-			if ( label.isSemanticPredicate() ) {
-				FailedPredicateException fpe =
-					new FailedPredicateException(input,
-												 s.enclosingRule.name,
-												 "can't deal with predicates yet");
-				if ( actions!=null ) {
-					actions.recognitionException(fpe);
-				}
-			}
-
-			// CASE 3: epsilon transition
-			if ( label.isEpsilon() ) {
-				// CASE 3a: rule invocation state
-				if ( trans instanceof RuleClosureTransition ) {
-					ruleInvocationStack.push(s);
-					s = (NFAState)trans.target;
-					//System.out.println("call "+s.enclosingRule.name+" from "+s.nfa.grammar.getFileName());
-					if ( actions!=null ) {
-						actions.enterRule(s.nfa.grammar.getFileName(), s.enclosingRule.name);
-					}
-					// could be jumping to new grammar, make sure DFA created
-					if ( !s.nfa.grammar.allDecisionDFAHaveBeenCreated() ) {
-						s.nfa.grammar.createLookaheadDFAs();
-					}
-				}
-				// CASE 3b: plain old epsilon transition, just move
-				else {
-					s = (NFAState)trans.target;
-				}
-			}
-
-			// CASE 4: match label on transition
-			else if ( label.matches(t) ) {
-				if ( actions!=null ) {
-					if ( s.nfa.grammar.type == Grammar.PARSER ||
-						 s.nfa.grammar.type == Grammar.COMBINED )
-					{
-						actions.consumeToken(((TokenStream)input).LT(1));
-					}
-				}
-				s = (NFAState)s.transition[0].target;
-				input.consume();
-				t = input.LA(1);
-			}
-
-			// CASE 5: error condition; label is inconsistent with input
-			else {
-				if ( label.isAtom() ) {
-					MismatchedTokenException mte =
-						new MismatchedTokenException(label.getAtom(), input);
-					if ( actions!=null ) {
-						actions.recognitionException(mte);
-					}
-					input.consume(); // recover
-					throw mte;
-				}
-				else if ( label.isSet() ) {
-					MismatchedSetException mse =
-						new MismatchedSetException(((IntervalSet)label.getSet()).toRuntimeBitSet(),
-												   input);
-					if ( actions!=null ) {
-						actions.recognitionException(mse);
-					}
-					input.consume(); // recover
-					throw mse;
-				}
-				else if ( label.isSemanticPredicate() ) {
-					FailedPredicateException fpe =
-						new FailedPredicateException(input,
-													 s.enclosingRule.name,
-													 label.getSemanticContext().toString());
-					if ( actions!=null ) {
-						actions.recognitionException(fpe);
-					}
-					input.consume(); // recover
-					throw fpe;
-				}
-				else {
-					throw new RecognitionException(input); // unknown error
-				}
-			}
-		}
-		//System.out.println("hit stop state for "+stop.getEnclosingRule());
-		if ( actions!=null ) {
-			actions.exitRule(s.nfa.grammar.getFileName(), stop.enclosingRule.name);
-		}
-	}
-
-	/** Given an input stream, return the unique alternative predicted by
-	 *  matching the input.  Upon error, return NFA.INVALID_ALT_NUMBER
-	 *  The first symbol of lookahead is presumed to be primed; that is,
-	 *  input.lookahead(1) must point at the input symbol you want to start
-	 *  predicting with.
-	 */
-	public int predict(DFA dfa) {
-		DFAState s = dfa.startState;
-		int c = input.LA(1);
-		Transition eotTransition = null;
-	dfaLoop:
-		while ( !s.isAcceptState() ) {
-			/*
-			System.out.println("DFA.predict("+s.getStateNumber()+", "+
-					dfa.getNFA().getGrammar().getTokenName(c)+")");
-			*/
-			// for each edge of s, look for intersection with current char
-			for (int i=0; i<s.getNumberOfTransitions(); i++) {
-				Transition t = s.transition(i);
-				// special case: EOT matches any char
-				if ( t.label.matches(c) ) {
-					// take transition i
-					s = (DFAState)t.target;
-					input.consume();
-					c = input.LA(1);
-					continue dfaLoop;
-				}
-				if ( t.label.getAtom()==Label.EOT ) {
-					eotTransition = t;
-				}
-			}
-			if ( eotTransition!=null ) {
-				s = (DFAState)eotTransition.target;
-				continue dfaLoop;
-			}
-			/*
-			ErrorManager.error(ErrorManager.MSG_NO_VIABLE_DFA_ALT,
-							   s,
-							   dfa.nfa.grammar.getTokenName(c));
-			*/
-			return NFA.INVALID_ALT_NUMBER;
-		}
-		// woohoo!  We know which alt to predict
-		// nothing emanates from a stop state; must terminate anyway
-		/*
-		System.out.println("DFA stop state "+s.getStateNumber()+" predicts "+
-				s.getUniquelyPredictedAlt());
-		*/
-		return s.getUniquelyPredictedAlt();
-	}
-
-	public void reportScanError(RecognitionException re) {
-		CharStream cs = (CharStream)input;
-		// print as good of a message as we can, given that we do not have
-		// a Lexer object and, hence, cannot call the routine to get a
-		// decent error message.
-		System.err.println("problem matching token at "+
-			cs.getLine()+":"+cs.getCharPositionInLine()+" "+re);
-	}
-
-	public String getSourceName() {
-		return input.getSourceName();
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/LeftRecursionCyclesMessage.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/LeftRecursionCyclesMessage.java
deleted file mode 100644
index 412978e..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/LeftRecursionCyclesMessage.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.stringtemplate.v4.ST;
-
-import java.util.Collection;
-
-/** Similar to LeftRecursionMessage except this is used for announcing
- *  cycles found by walking rules without decisions; the other msg is
- *  invoked when a decision DFA construction finds a problem in closure.
- */
-public class LeftRecursionCyclesMessage extends Message {
-	public Collection cycles;
-
-	public LeftRecursionCyclesMessage(Collection cycles) {
-		super(ErrorManager.MSG_LEFT_RECURSION_CYCLES);
-		this.cycles = cycles;
-	}
-
-	public String toString() {
-		ST st = getMessageTemplate();
-		st.add("listOfCycles", cycles);
-		return super.toString(st);
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/LeftRecursiveRuleAnalyzer.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/LeftRecursiveRuleAnalyzer.java
deleted file mode 100644
index fcbf7db..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/LeftRecursiveRuleAnalyzer.java
+++ /dev/null
@@ -1,352 +0,0 @@
-package org.antlr.tool;
-
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.grammar.v3.*;
-import org.antlr.runtime.Token;
-import org.antlr.runtime.tree.CommonTreeNodeStream;
-import org.antlr.runtime.tree.TreeNodeStream;
-import org.stringtemplate.v4.*;
-
-import java.util.*;
-
-/** */
-public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
-	public static enum ASSOC { left, right };
-
-	public Grammar g;
-	public CodeGenerator generator;
-	public String ruleName;
-	Map<Integer, Integer> tokenToPrec = new HashMap<Integer, Integer>();
-	public LinkedHashMap<Integer, String> binaryAlts = new LinkedHashMap<Integer, String>();
-	public LinkedHashMap<Integer, String> ternaryAlts = new LinkedHashMap<Integer, String>();
-	public LinkedHashMap<Integer, String> suffixAlts = new LinkedHashMap<Integer, String>();
-	public List<String> prefixAlts = new ArrayList<String>();
-	public List<String> otherAlts = new ArrayList<String>();
-
-	public GrammarAST retvals;
-
-	public STGroup recRuleTemplates;
-	public String language;
-
-	public Map<Integer, ASSOC> altAssociativity = new HashMap<Integer, ASSOC>();
-
-	public LeftRecursiveRuleAnalyzer(TreeNodeStream input, Grammar g, String ruleName) {
-		super(input);
-		this.g = g;
-		this.ruleName = ruleName;
-		language = (String)g.getOption("language");
-		generator = new CodeGenerator(g.tool, g, language);
-		generator.loadTemplates(language);
-		loadPrecRuleTemplates();
-	}
-
-	public void loadPrecRuleTemplates() {
-		recRuleTemplates =
-			new STGroupFile(CodeGenerator.classpathTemplateRootDirectoryName+
-							"/LeftRecursiveRules.stg");
-		if ( !recRuleTemplates.isDefined("recRuleName") ) {
-			ErrorManager.error(ErrorManager.MSG_MISSING_CODE_GEN_TEMPLATES,
-							   "PrecRules");
-			return;
-		}
-	}
-
-	@Override
-	public void setReturnValues(GrammarAST t) {
-		System.out.println(t);
-		retvals = t;
-	}
-
-	@Override
-	public void setTokenPrec(GrammarAST t, int alt) {
-		int ttype = g.getTokenType(t.getText());
-		tokenToPrec.put(ttype, alt);
-		ASSOC assoc = ASSOC.left;
-		if ( t.terminalOptions!=null ) {
-			String a = (String)t.terminalOptions.get("assoc");
-			if ( a!=null ) {
-				if ( a.equals(ASSOC.right.toString()) ) {
-					assoc = ASSOC.right;
-				}
-				else {
-					ErrorManager.error(ErrorManager.MSG_ILLEGAL_OPTION_VALUE, "assoc", assoc);
-				}
-			}
-		}
-
-		if ( altAssociativity.get(alt)!=null && altAssociativity.get(alt)!=assoc ) {
-			ErrorManager.error(ErrorManager.MSG_ALL_OPS_NEED_SAME_ASSOC, alt);
-		}
-		altAssociativity.put(alt, assoc);
-
-		//System.out.println("op " + alt + ": " + t.getText()+", assoc="+assoc);
-	}
-
-	@Override
-	public void binaryAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {
-		altTree = GrammarAST.dupTree(altTree);
-		rewriteTree = GrammarAST.dupTree(rewriteTree);
-
-		stripSynPred(altTree);
-		stripLeftRecursion(altTree);
-
-		// rewrite e to be e_[rec_arg]
-		int nextPrec = nextPrecedence(alt);
-		ST refST = recRuleTemplates.getInstanceOf("recRuleRef");
-		refST.add("ruleName", ruleName);
-		refST.add("arg", nextPrec);
-		altTree = replaceRuleRefs(altTree, refST.render());
-
-		String altText = text(altTree);
-		altText = altText.trim();
-		altText += "{}"; // add empty alt to prevent pred hoisting
-		ST nameST = recRuleTemplates.getInstanceOf("recRuleName");
-		nameST.add("ruleName", ruleName);
-		rewriteTree = replaceRuleRefs(rewriteTree, "$" + nameST.render());
-		String rewriteText = text(rewriteTree);
-		binaryAlts.put(alt, altText + (rewriteText != null ? " " + rewriteText : ""));
-		//System.out.println("binaryAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
-	}
-
-	/** Convert e ? e : e  ->  ? e : e_[nextPrec] */
-	@Override
-	public void ternaryAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {
-		altTree = GrammarAST.dupTree(altTree);
-		rewriteTree = GrammarAST.dupTree(rewriteTree);
-
-		stripSynPred(altTree);
-		stripLeftRecursion(altTree);
-
-		int nextPrec = nextPrecedence(alt);
-		ST refST = recRuleTemplates.getInstanceOf("recRuleRef");
-		refST.add("ruleName", ruleName);
-		refST.add("arg", nextPrec);
-		altTree = replaceLastRuleRef(altTree, refST.render());
-
-		String altText = text(altTree);
-		altText = altText.trim();
-		altText += "{}"; // add empty alt to prevent pred hoisting
-		ST nameST = recRuleTemplates.getInstanceOf("recRuleName");
-		nameST.add("ruleName", ruleName);
-		rewriteTree = replaceRuleRefs(rewriteTree, "$" + nameST.render());
-		String rewriteText = text(rewriteTree);
-		ternaryAlts.put(alt, altText + (rewriteText != null ? " " + rewriteText : ""));
-		//System.out.println("ternaryAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
-	}
-
-	@Override
-	public void prefixAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {
-		altTree = GrammarAST.dupTree(altTree);
-		rewriteTree = GrammarAST.dupTree(rewriteTree);
-
-		stripSynPred(altTree);
-
-		int nextPrec = precedence(alt);
-		// rewrite e to be e_[rec_arg]
-		ST refST = recRuleTemplates.getInstanceOf("recRuleRef");
-		refST.add("ruleName", ruleName);
-		refST.add("arg", nextPrec);
-		altTree = replaceRuleRefs(altTree, refST.render());
-		String altText = text(altTree);
-		altText = altText.trim();
-		altText += "{}"; // add empty alt to prevent pred hoisting
-
-		ST nameST = recRuleTemplates.getInstanceOf("recRuleName");
-		nameST.add("ruleName", ruleName);
-		rewriteTree = replaceRuleRefs(rewriteTree, nameST.render());
-		String rewriteText = text(rewriteTree);
-
-		prefixAlts.add(altText + (rewriteText != null ? " " + rewriteText : ""));
-		//System.out.println("prefixAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
-	}
-
-	@Override
-	public void suffixAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {
-		altTree = GrammarAST.dupTree(altTree);
-		rewriteTree = GrammarAST.dupTree(rewriteTree);
-		stripSynPred(altTree);
-		stripLeftRecursion(altTree);
-		ST nameST = recRuleTemplates.getInstanceOf("recRuleName");
-		nameST.add("ruleName", ruleName);
-		rewriteTree = replaceRuleRefs(rewriteTree, "$" + nameST.render());
-		String rewriteText = text(rewriteTree);
-		String altText = text(altTree);
-		altText = altText.trim();
-		suffixAlts.put(alt, altText + (rewriteText != null ? " " + rewriteText : ""));
-//		System.out.println("suffixAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
-	}
-
-	@Override
-	public void otherAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {
-		altTree = GrammarAST.dupTree(altTree);
-		rewriteTree = GrammarAST.dupTree(rewriteTree);
-		stripSynPred(altTree);
-		stripLeftRecursion(altTree);
-		String altText = text(altTree);
-
-		String rewriteText = text(rewriteTree);
-		otherAlts.add(altText + (rewriteText != null ? " " + rewriteText : ""));
-		//System.out.println("otherAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
-	}
-
-	// --------- get transformed rules ----------------
-
-	public String getArtificialPrecStartRule() {
-		ST ruleST = recRuleTemplates.getInstanceOf("recRuleStart");
-		ruleST.add("ruleName", ruleName);
-		ruleST.add("minPrec", 0);
-		ruleST.add("userRetvals", retvals);
-		fillRetValAssignments(ruleST, "recRuleName");
-
-		System.out.println("start: " + ruleST);
-		return ruleST.render();
-	}
-
-	public String getArtificialOpPrecRule() {
-		ST ruleST = recRuleTemplates.getInstanceOf("recRule");
-		ruleST.add("ruleName", ruleName);
-		ruleST.add("buildAST", grammar.buildAST());
-		ST argDefST =
-			generator.getTemplates().getInstanceOf("recRuleDefArg");
-		ruleST.add("precArgDef", argDefST);
-		ST ruleArgST =
-			generator.getTemplates().getInstanceOf("recRuleArg");
-		ruleST.add("argName", ruleArgST);
-		ST setResultST =
-			generator.getTemplates().getInstanceOf("recRuleSetResultAction");
-		ruleST.add("setResultAction", setResultST);
-		ruleST.add("userRetvals", retvals);
-		fillRetValAssignments(ruleST, "recPrimaryName");
-
-		LinkedHashMap<Integer, String> opPrecRuleAlts = new LinkedHashMap<Integer, String>();
-		opPrecRuleAlts.putAll(binaryAlts);
-		opPrecRuleAlts.putAll(ternaryAlts);
-		opPrecRuleAlts.putAll(suffixAlts);
-		for (int alt : opPrecRuleAlts.keySet()) {
-			String altText = opPrecRuleAlts.get(alt);
-			ST altST = recRuleTemplates.getInstanceOf("recRuleAlt");
-			ST predST =
-				generator.getTemplates().getInstanceOf("recRuleAltPredicate");
-			predST.add("opPrec", precedence(alt));
-			predST.add("ruleName", ruleName);
-			altST.add("pred", predST);
-			altST.add("alt", altText);
-			ruleST.add("alts", altST);
-		}
-
-		System.out.println(ruleST);
-
-		return ruleST.render();
-	}
-
-	public String getArtificialPrimaryRule() {
-		ST ruleST = recRuleTemplates.getInstanceOf("recPrimaryRule");
-		ruleST.add("ruleName", ruleName);
-		ruleST.add("alts", prefixAlts);
-		ruleST.add("alts", otherAlts);
-		ruleST.add("userRetvals", retvals);
-		System.out.println(ruleST);
-		return ruleST.render();
-	}
-
-	public GrammarAST replaceRuleRefs(GrammarAST t, String name) {
-		if ( t==null ) return null;
-		for (GrammarAST rref : t.findAllType(RULE_REF)) {
-			if ( rref.getText().equals(ruleName) ) rref.setText(name);
-		}
-		return t;
-	}
-
-	public static boolean hasImmediateRecursiveRuleRefs(GrammarAST t, String ruleName) {
-		if ( t==null ) return false;
-		for (GrammarAST rref : t.findAllType(RULE_REF)) {
-			if ( rref.getText().equals(ruleName) ) return true;
-		}
-		return false;
-	}
-
-	public GrammarAST replaceLastRuleRef(GrammarAST t, String name) {
-		if ( t==null ) return null;
-		GrammarAST last = null;
-		for (GrammarAST rref : t.findAllType(RULE_REF)) { last = rref; }
-		if ( last !=null && last.getText().equals(ruleName) ) last.setText(name);
-		return t;
-	}
-
-	public void stripSynPred(GrammarAST altAST) {
-		GrammarAST t = (GrammarAST)altAST.getChild(0);
-		if ( t.getType()==ANTLRParser.BACKTRACK_SEMPRED ||
-			 t.getType()==ANTLRParser.SYNPRED ||
-			 t.getType()==ANTLRParser.SYN_SEMPRED )
-		{
-			altAST.deleteChild(0); // kill it
-		}
-	}
-
-	public void stripLeftRecursion(GrammarAST altAST) {
-		GrammarAST rref = (GrammarAST)altAST.getChild(0);
-		if ( rref.getType()== ANTLRParser.RULE_REF &&
-			 rref.getText().equals(ruleName))
-		{
-			// remove rule ref
-			altAST.deleteChild(0);
-			// reset index so it prints properly
-			GrammarAST newFirstChild = (GrammarAST) altAST.getChild(0);
-			altAST.setTokenStartIndex(newFirstChild.getTokenStartIndex());
-		}
-	}
-
-	public String text(GrammarAST t) {
-		if ( t==null ) return null;
-		try {
-			return new ANTLRTreePrinter(new CommonTreeNodeStream(t)).toString(grammar, true);
-		}
-		catch (Exception e) {
-			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE, e);
-		}
-		return null;
-	}
-
-	public int precedence(int alt) {
-		return numAlts-alt+1;
-	}
-
-	public int nextPrecedence(int alt) {
-		int p = precedence(alt);
-		if ( altAssociativity.get(alt)==ASSOC.left ) p++;
-		return p;
-	}
-
-	public void fillRetValAssignments(ST ruleST, String srcName) {
-		if ( retvals==null ) return;
-
-		// complicated since we must be target-independent
-		for (String name : getNamesFromArgAction(retvals.token)) {
-			ST setRetValST =
-				generator.getTemplates().getInstanceOf("recRuleSetReturnAction");
-			ST ruleNameST = recRuleTemplates.getInstanceOf(srcName);
-			ruleNameST.add("ruleName", ruleName);
-			setRetValST.add("src", ruleNameST);
-			setRetValST.add("name", name);
-			ruleST.add("userRetvalAssignments",setRetValST);
-		}
-	}
-
-	public Collection<String> getNamesFromArgAction(Token t) {
-		AttributeScope returnScope = grammar.createReturnScope("",t);
-		returnScope.addAttributes(t.getText(), ',');
-		return returnScope.attributes.keySet();
-	}
-
-	@Override
-	public String toString() {
-		return "PrecRuleOperatorCollector{" +
-			   "binaryAlts=" + binaryAlts +
-			   ", rec=" + tokenToPrec +
-			   ", ternaryAlts=" + ternaryAlts +
-			   ", suffixAlts=" + suffixAlts +
-			   ", prefixAlts=" + prefixAlts +
-			   ", otherAlts=" + otherAlts +
-			   '}';
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/NFAFactory.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/NFAFactory.java
deleted file mode 100644
index de52287..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/NFAFactory.java
+++ /dev/null
@@ -1,732 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.*;
-import org.antlr.misc.IntSet;
-import org.antlr.misc.IntervalSet;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-
-/** Routines to construct StateClusters from EBNF grammar constructs.
- *  No optimization is done to remove unnecessary epsilon edges.
- *
- *  TODO: add an optimization that reduces number of states and transitions
- *  will help with speed of conversion and make it easier to view NFA.  For
- *  example, o-A->o-->o-B->o should be o-A->o-B->o
- */
-public class NFAFactory {
-	/** This factory is attached to a specifc NFA that it is building.
-     *  The NFA will be filled up with states and transitions.
-     */
-	NFA nfa = null;
-
-    public Rule getCurrentRule() {
-        return currentRule;
-    }
-
-    public void setCurrentRule(Rule currentRule) {
-        this.currentRule = currentRule;
-    }
-
-	Rule currentRule = null;
-
-	public NFAFactory(NFA nfa) {
-        nfa.setFactory(this);
-		this.nfa = nfa;
-	}
-
-    public NFAState newState() {
-        NFAState n = new NFAState(nfa);
-        int state = nfa.getNewNFAStateNumber();
-        n.stateNumber = state;
-        nfa.addState(n);
-		n.enclosingRule = currentRule;
-		return n;
-    }
-
-	/** Optimize an alternative (list of grammar elements).
-	 *
-	 *  Walk the chain of elements (which can be complicated loop blocks...)
-	 *  and throw away any epsilon transitions used to link up simple elements.
-	 *
-	 *  This only removes 195 states from the java.g's NFA, but every little
-	 *  bit helps.  Perhaps I can improve in the future.
-	 */
-	public void optimizeAlternative(StateCluster alt) {
-		NFAState s = alt.left;
-		while ( s!=alt.right ) {
-			// if it's a block element, jump over it and continue
-			if ( s.endOfBlockStateNumber!=State.INVALID_STATE_NUMBER ) {
-				s = nfa.getState(s.endOfBlockStateNumber);
-				continue;
-			}
-			Transition t = s.transition[0];
-			if ( t instanceof RuleClosureTransition ) {
-				s = ((RuleClosureTransition) t).followState;
-				continue;
-			}
-			if ( t.label.isEpsilon() && !t.label.isAction() && s.getNumberOfTransitions()==1 ) {
-				// bypass epsilon transition and point to what the epsilon's
-				// target points to unless that epsilon transition points to
-				// a block or loop etc..  Also don't collapse epsilons that
-				// point at the last node of the alt. Don't collapse action edges
-				NFAState epsilonTarget = (NFAState)t.target;
-				if ( epsilonTarget.endOfBlockStateNumber==State.INVALID_STATE_NUMBER &&
-					 epsilonTarget.transition[0] !=null )
-				{
-					s.setTransition0(epsilonTarget.transition[0]);
-					/*
-					System.out.println("### opt "+s.stateNumber+"->"+
-									   epsilonTarget.transition(0).target.stateNumber);
-					*/
-				}
-			}
-			s = (NFAState)t.target;
-		}
-	}
-
-	/** From label A build Graph o-A->o */
-	public StateCluster build_Atom(int label, GrammarAST associatedAST) {
-		NFAState left = newState();
-		NFAState right = newState();
-		left.associatedASTNode = associatedAST;
-		right.associatedASTNode = associatedAST;
-		transitionBetweenStates(left, right, label);
-		StateCluster g = new StateCluster(left, right);
-		return g;
-	}
-
-	public StateCluster build_Atom(GrammarAST atomAST) {
-		int tokenType = nfa.grammar.getTokenType(atomAST.getText());
-		return build_Atom(tokenType, atomAST);
-	}
-
-	/** From set build single edge graph o->o-set->o.  To conform to
-     *  what an alt block looks like, must have extra state on left.
-     */
-	public StateCluster build_Set(IntSet set, GrammarAST associatedAST) {
-        NFAState left = newState();
-        NFAState right = newState();
-		left.associatedASTNode = associatedAST;
-		right.associatedASTNode = associatedAST;
-		Label label = new Label(set);
-		Transition e = new Transition(label,right);
-        left.addTransition(e);
-		StateCluster g = new StateCluster(left, right);
-        return g;
-	}
-
-    /** Can only complement block of simple alts; can complement build_Set()
-     *  result, that is.  Get set and complement, replace old with complement.
-    public StateCluster build_AlternativeBlockComplement(StateCluster blk) {
-        State s0 = blk.left;
-        IntSet set = getCollapsedBlockAsSet(s0);
-        if ( set!=null ) {
-            // if set is available, then structure known and blk is a set
-            set = nfa.grammar.complement(set);
-            Label label = s0.transition(0).target.transition(0).label;
-            label.setSet(set);
-        }
-        return blk;
-    }
-	 */
-
-    public StateCluster build_Range(int a, int b) {
-        NFAState left = newState();
-        NFAState right = newState();
-		Label label = new Label(IntervalSet.of(a, b));
-		Transition e = new Transition(label,right);
-        left.addTransition(e);
-        StateCluster g = new StateCluster(left, right);
-        return g;
-    }
-
-	/** From char 'c' build StateCluster o-intValue(c)->o
-	 */
-	public StateCluster build_CharLiteralAtom(GrammarAST charLiteralAST) {
-        int c = Grammar.getCharValueFromGrammarCharLiteral(charLiteralAST.getText());
-		return build_Atom(c, charLiteralAST);
-	}
-
-	/** From char 'c' build StateCluster o-intValue(c)->o
-	 *  can include unicode spec likes '\u0024' later.  Accepts
-	 *  actual unicode 16-bit now, of course, by default.
-     *  TODO not supplemental char clean!
-	 */
-	public StateCluster build_CharRange(String a, String b) {
-		int from = Grammar.getCharValueFromGrammarCharLiteral(a);
-		int to = Grammar.getCharValueFromGrammarCharLiteral(b);
-		return build_Range(from, to);
-	}
-
-    /** For a non-lexer, just build a simple token reference atom.
-     *  For a lexer, a string is a sequence of char to match.  That is,
-     *  "fog" is treated as 'f' 'o' 'g' not as a single transition in
-     *  the DFA.  Machine== o-'f'->o-'o'->o-'g'->o and has n+1 states
-     *  for n characters.
-     */
-    public StateCluster build_StringLiteralAtom(GrammarAST stringLiteralAST) {
-        if ( nfa.grammar.type==Grammar.LEXER ) {
-			StringBuffer chars =
-				Grammar.getUnescapedStringFromGrammarStringLiteral(stringLiteralAST.getText());
-            NFAState first = newState();
-            NFAState last = null;
-            NFAState prev = first;
-            for (int i=0; i<chars.length(); i++) {
-                int c = chars.charAt(i);
-                NFAState next = newState();
-                transitionBetweenStates(prev, next, c);
-                prev = last = next;
-            }
-            return  new StateCluster(first, last);
-        }
-
-        // a simple token reference in non-Lexers
-        int tokenType = nfa.grammar.getTokenType(stringLiteralAST.getText());
-		return build_Atom(tokenType, stringLiteralAST);
-    }
-
-    /** For reference to rule r, build
-     *
-     *  o-e->(r)  o
-     *
-     *  where (r) is the start of rule r and the trailing o is not linked
-     *  to from rule ref state directly (it's done thru the transition(0)
-     *  RuleClosureTransition.
-     *
-     *  If the rule r is just a list of tokens, it's block will be just
-     *  a set on an edge o->o->o-set->o->o->o, could inline it rather than doing
-     *  the rule reference, but i'm not doing this yet as I'm not sure
-     *  it would help much in the NFA->DFA construction.
-     *
-     *  TODO add to codegen: collapse alt blks that are sets into single matchSet
-     */
-    public StateCluster build_RuleRef(Rule refDef, NFAState ruleStart) {
-        //System.out.println("building ref to rule "+nfa.grammar.name+"."+refDef.name);
-        NFAState left = newState();
-        // left.setDescription("ref to "+ruleStart.getDescription());
-        NFAState right = newState();
-        // right.setDescription("NFAState following ref to "+ruleStart.getDescription());
-        Transition e = new RuleClosureTransition(refDef,ruleStart,right);
-        left.addTransition(e);
-        StateCluster g = new StateCluster(left, right);
-        return g;
-    }
-
-    /** From an empty alternative build StateCluster o-e->o */
-    public StateCluster build_Epsilon() {
-        NFAState left = newState();
-        NFAState right = newState();
-        transitionBetweenStates(left, right, Label.EPSILON);
-        StateCluster g = new StateCluster(left, right);
-        return g;
-    }
-
-	/** Build what amounts to an epsilon transition with a semantic
-	 *  predicate action.  The pred is a pointer into the AST of
-	 *  the SEMPRED token.
-	 */
-	public StateCluster build_SemanticPredicate(GrammarAST pred) {
-		// don't count syn preds
-		if ( !pred.getText().toUpperCase()
-				.startsWith(Grammar.SYNPRED_RULE_PREFIX.toUpperCase()) )
-		{
-			nfa.grammar.numberOfSemanticPredicates++;
-		}
-		NFAState left = newState();
-		NFAState right = newState();
-		Transition e = new Transition(new PredicateLabel(pred), right);
-		left.addTransition(e);
-		StateCluster g = new StateCluster(left, right);
-		return g;
-	}
-
-	/** Build what amounts to an epsilon transition with an action.
-	 *  The action goes into NFA though it is ignored during analysis.
-	 *  It slows things down a bit, but I must ignore predicates after
-	 *  having seen an action (5-5-2008).
-	 */
-	public StateCluster build_Action(GrammarAST action) {
-		NFAState left = newState();
-		NFAState right = newState();
-		Transition e = new Transition(new ActionLabel(action), right);
-		left.addTransition(e);
-		return new StateCluster(left, right);
-	}
-
-	/** add an EOF transition to any rule end NFAState that points to nothing
-     *  (i.e., for all those rules not invoked by another rule).  These
-     *  are start symbols then.
-	 *
-	 *  Return the number of grammar entry points; i.e., how many rules are
-	 *  not invoked by another rule (they can only be invoked from outside).
-	 *  These are the start rules.
-     */
-    public int build_EOFStates(Collection rules) {
-		int numberUnInvokedRules = 0;
-        for (Iterator iterator = rules.iterator(); iterator.hasNext();) {
-			Rule r = (Rule) iterator.next();
-			NFAState endNFAState = r.stopState;
-            // Is this rule a start symbol?  (no follow links)
-			if ( endNFAState.transition[0] ==null ) {
-				// if so, then don't let algorithm fall off the end of
-				// the rule, make it hit EOF/EOT.
-				build_EOFState(endNFAState);
-				// track how many rules have been invoked by another rule
-				numberUnInvokedRules++;
-			}
-        }
-		return numberUnInvokedRules;
-    }
-
-    /** set up an NFA NFAState that will yield eof tokens or,
-     *  in the case of a lexer grammar, an EOT token when the conversion
-     *  hits the end of a rule.
-     */
-    private void build_EOFState(NFAState endNFAState) {
-		NFAState end = newState();
-        int label = Label.EOF;
-        if ( nfa.grammar.type==Grammar.LEXER ) {
-            label = Label.EOT;
-			end.setEOTTargetState(true);
-        }
-		/*
-		System.out.println("build "+nfa.grammar.getTokenDisplayName(label)+
-						   " loop on end of state "+endNFAState.getDescription()+
-						   " to state "+end.stateNumber);
-		*/
-		Transition toEnd = new Transition(label, end);
-		endNFAState.addTransition(toEnd);
-	}
-
-    /** From A B build A-e->B (that is, build an epsilon arc from right
-     *  of A to left of B).
-     *
-     *  As a convenience, return B if A is null or return A if B is null.
-     */
-    public StateCluster build_AB(StateCluster A, StateCluster B) {
-        if ( A==null ) {
-            return B;
-        }
-        if ( B==null ) {
-            return A;
-        }
-		transitionBetweenStates(A.right, B.left, Label.EPSILON);
-		StateCluster g = new StateCluster(A.left, B.right);
-        return g;
-    }
-
-	/** From a set ('a'|'b') build
-     *
-     *  o->o-'a'..'b'->o->o (last NFAState is blockEndNFAState pointed to by all alts)
-	 */
-	public StateCluster build_AlternativeBlockFromSet(StateCluster set) {
-		if ( set==null ) {
-			return null;
-		}
-
-		// single alt, no decision, just return only alt state cluster
-		NFAState startOfAlt = newState(); // must have this no matter what
-		transitionBetweenStates(startOfAlt, set.left, Label.EPSILON);
-
-		return new StateCluster(startOfAlt,set.right);
-	}
-
-	/** From A|B|..|Z alternative block build
-     *
-     *  o->o-A->o->o (last NFAState is blockEndNFAState pointed to by all alts)
-     *  |          ^
-     *  o->o-B->o--|
-     *  |          |
-     *  ...        |
-     *  |          |
-     *  o->o-Z->o--|
-     *
-     *  So every alternative gets begin NFAState connected by epsilon
-     *  and every alt right side points at a block end NFAState.  There is a
-     *  new NFAState in the NFAState in the StateCluster for each alt plus one for the
-     *  end NFAState.
-     *
-     *  Special case: only one alternative: don't make a block with alt
-     *  begin/end.
-     *
-     *  Special case: if just a list of tokens/chars/sets, then collapse
-     *  to a single edge'd o-set->o graph.
-     *
-     *  Set alt number (1..n) in the left-Transition NFAState.
-     */
-    public StateCluster build_AlternativeBlock(List alternativeStateClusters)
-    {
-        StateCluster result = null;
-        if ( alternativeStateClusters==null || alternativeStateClusters.size()==0 ) {
-            return null;
-        }
-
-		// single alt case
-		if ( alternativeStateClusters.size()==1 ) {
-			// single alt, no decision, just return only alt state cluster
-			StateCluster g = (StateCluster)alternativeStateClusters.get(0);
-			NFAState startOfAlt = newState(); // must have this no matter what
-			transitionBetweenStates(startOfAlt, g.left, Label.EPSILON);
-
-			//System.out.println("### opt saved start/stop end in (...)");
-			return new StateCluster(startOfAlt,g.right);
-		}
-
-		// even if we can collapse for lookahead purposes, we will still
-        // need to predict the alts of this subrule in case there are actions
-        // etc...  This is the decision that is pointed to from the AST node
-        // (always)
-        NFAState prevAlternative = null; // tracks prev so we can link to next alt
-        NFAState firstAlt = null;
-        NFAState blockEndNFAState = newState();
-        blockEndNFAState.setDescription("end block");
-        int altNum = 1;
-        for (Iterator iter = alternativeStateClusters.iterator(); iter.hasNext();) {
-            StateCluster g = (StateCluster) iter.next();
-            // add begin NFAState for this alt connected by epsilon
-            NFAState left = newState();
-            left.setDescription("alt "+altNum+" of ()");
-			transitionBetweenStates(left, g.left, Label.EPSILON);
-			transitionBetweenStates(g.right, blockEndNFAState, Label.EPSILON);
-			// Are we the first alternative?
-			if ( firstAlt==null ) {
-				firstAlt = left; // track extreme left node of StateCluster
-			}
-			else {
-				// if not first alternative, must link to this alt from previous
-				transitionBetweenStates(prevAlternative, left, Label.EPSILON);
-			}
-			prevAlternative = left;
-			altNum++;
-		}
-
-		// return StateCluster pointing representing entire block
-		// Points to first alt NFAState on left, block end on right
-		result = new StateCluster(firstAlt, blockEndNFAState);
-
-		firstAlt.decisionStateType = NFAState.BLOCK_START;
-
-		// set EOB markers for Jean
-		firstAlt.endOfBlockStateNumber = blockEndNFAState.stateNumber;
-
-		return result;
-    }
-
-    /** From (A)? build either:
-     *
-	 *  o--A->o
-	 *  |     ^
-	 *  o---->|
-     *
-     *  or, if A is a block, just add an empty alt to the end of the block
-     */
-    public StateCluster build_Aoptional(StateCluster A) {
-        StateCluster g = null;
-        int n = nfa.grammar.getNumberOfAltsForDecisionNFA(A.left);
-        if ( n==1 ) {
-            // no decision, just wrap in an optional path
-			//NFAState decisionState = newState();
-			NFAState decisionState = A.left; // resuse left edge
-			decisionState.setDescription("only alt of ()? block");
-			NFAState emptyAlt = newState();
-            emptyAlt.setDescription("epsilon path of ()? block");
-            NFAState blockEndNFAState = null;
-			blockEndNFAState = newState();
-			transitionBetweenStates(A.right, blockEndNFAState, Label.EPSILON);
-			blockEndNFAState.setDescription("end ()? block");
-            //transitionBetweenStates(decisionState, A.left, Label.EPSILON);
-            transitionBetweenStates(decisionState, emptyAlt, Label.EPSILON);
-            transitionBetweenStates(emptyAlt, blockEndNFAState, Label.EPSILON);
-
-			// set EOB markers for Jean
-			decisionState.endOfBlockStateNumber = blockEndNFAState.stateNumber;
-			blockEndNFAState.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
-
-            g = new StateCluster(decisionState, blockEndNFAState);
-        }
-        else {
-            // a decision block, add an empty alt
-            NFAState lastRealAlt =
-                    nfa.grammar.getNFAStateForAltOfDecision(A.left, n);
-            NFAState emptyAlt = newState();
-            emptyAlt.setDescription("epsilon path of ()? block");
-            transitionBetweenStates(lastRealAlt, emptyAlt, Label.EPSILON);
-            transitionBetweenStates(emptyAlt, A.right, Label.EPSILON);
-
-			// set EOB markers for Jean (I think this is redundant here)
-			A.left.endOfBlockStateNumber = A.right.stateNumber;
-			A.right.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
-
-            g = A; // return same block, but now with optional last path
-        }
-		g.left.decisionStateType = NFAState.OPTIONAL_BLOCK_START;
-
-        return g;
-    }
-
-    /** From (A)+ build
-	 *
-     *     |---|    (Transition 2 from A.right points at alt 1)
-	 *     v   |    (follow of loop is Transition 1)
-     *  o->o-A-o->o
-     *
-     *  Meaning that the last NFAState in A points back to A's left Transition NFAState
-     *  and we add a new begin/end NFAState.  A can be single alternative or
-     *  multiple.
-	 *
-	 *  During analysis we'll call the follow link (transition 1) alt n+1 for
-	 *  an n-alt A block.
-     */
-    public StateCluster build_Aplus(StateCluster A) {
-        NFAState left = newState();
-        NFAState blockEndNFAState = newState();
-		blockEndNFAState.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
-
-		// don't reuse A.right as loopback if it's right edge of another block
-		if ( A.right.decisionStateType == NFAState.RIGHT_EDGE_OF_BLOCK ) {
-			// nested A* so make another tail node to be the loop back
-			// instead of the usual A.right which is the EOB for inner loop
-			NFAState extraRightEdge = newState();
-			transitionBetweenStates(A.right, extraRightEdge, Label.EPSILON);
-			A.right = extraRightEdge;
-		}
-
-        transitionBetweenStates(A.right, blockEndNFAState, Label.EPSILON); // follow is Transition 1
-		// turn A's block end into a loopback (acts like alt 2)
-		transitionBetweenStates(A.right, A.left, Label.EPSILON); // loop back Transition 2
-		transitionBetweenStates(left, A.left, Label.EPSILON);
-		
-		A.right.decisionStateType = NFAState.LOOPBACK;
-		A.left.decisionStateType = NFAState.BLOCK_START;
-
-		// set EOB markers for Jean
-		A.left.endOfBlockStateNumber = A.right.stateNumber;
-
-        StateCluster g = new StateCluster(left, blockEndNFAState);
-        return g;
-    }
-
-    /** From (A)* build
-     *
-	 *     |---|
-	 *     v   |
-	 *  o->o-A-o--o (Transition 2 from block end points at alt 1; follow is Transition 1)
-     *  |         ^
-     *  o---------| (optional branch is 2nd alt of optional block containing A+)
-     *
-     *  Meaning that the last (end) NFAState in A points back to A's
-     *  left side NFAState and we add 3 new NFAStates (the
-     *  optional branch is built just like an optional subrule).
-     *  See the Aplus() method for more on the loop back Transition.
-	 *  The new node on right edge is set to RIGHT_EDGE_OF_CLOSURE so we
-	 *  can detect nested (A*)* loops and insert an extra node.  Previously,
-	 *  two blocks shared same EOB node.
-     *
-     *  There are 2 or 3 decision points in a A*.  If A is not a block (i.e.,
-     *  it only has one alt), then there are two decisions: the optional bypass
-     *  and then loopback.  If A is a block of alts, then there are three
-     *  decisions: bypass, loopback, and A's decision point.
-     *
-     *  Note that the optional bypass must be outside the loop as (A|B)* is
-     *  not the same thing as (A|B|)+.
-     *
-     *  This is an accurate NFA representation of the meaning of (A)*, but
-     *  for generating code, I don't need a DFA for the optional branch by
-     *  virtue of how I generate code.  The exit-loopback-branch decision
-     *  is sufficient to let me make an appropriate enter, exit, loop
-     *  determination.  See codegen.g
-     */
-    public StateCluster build_Astar(StateCluster A) {
-		NFAState bypassDecisionState = newState();
-		bypassDecisionState.setDescription("enter loop path of ()* block");
-        NFAState optionalAlt = newState();
-        optionalAlt.setDescription("epsilon path of ()* block");
-        NFAState blockEndNFAState = newState();
-		blockEndNFAState.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
-
-		// don't reuse A.right as loopback if it's right edge of another block
-		if ( A.right.decisionStateType == NFAState.RIGHT_EDGE_OF_BLOCK ) {
-			// nested A* so make another tail node to be the loop back
-			// instead of the usual A.right which is the EOB for inner loop
-			NFAState extraRightEdge = newState();
-			transitionBetweenStates(A.right, extraRightEdge, Label.EPSILON);
-			A.right = extraRightEdge;
-		}
-
-		// convert A's end block to loopback
-		A.right.setDescription("()* loopback");
-		// Transition 1 to actual block of stuff
-        transitionBetweenStates(bypassDecisionState, A.left, Label.EPSILON);
-        // Transition 2 optional to bypass
-        transitionBetweenStates(bypassDecisionState, optionalAlt, Label.EPSILON);
-		transitionBetweenStates(optionalAlt, blockEndNFAState, Label.EPSILON);
-        // Transition 1 of end block exits
-        transitionBetweenStates(A.right, blockEndNFAState, Label.EPSILON);
-        // Transition 2 of end block loops
-        transitionBetweenStates(A.right, A.left, Label.EPSILON);
-
-		bypassDecisionState.decisionStateType = NFAState.BYPASS;
-		A.left.decisionStateType = NFAState.BLOCK_START;
-		A.right.decisionStateType = NFAState.LOOPBACK;
-
-		// set EOB markers for Jean
-		A.left.endOfBlockStateNumber = A.right.stateNumber;
-		bypassDecisionState.endOfBlockStateNumber = blockEndNFAState.stateNumber;
-
-        StateCluster g = new StateCluster(bypassDecisionState, blockEndNFAState);
-        return g;
-    }
-
-    /** Build an NFA predictor for special rule called Tokens manually that
-     *  predicts which token will succeed.  The refs to the rules are not
-     *  RuleRefTransitions as I want DFA conversion to stop at the EOT
-     *  transition on the end of each token, rather than return to Tokens rule.
-     *  If I used normal build_alternativeBlock for this, the RuleRefTransitions
-     *  would save return address when jumping away from Tokens rule.
-     *
-     *  All I do here is build n new states for n rules with an epsilon
-     *  edge to the rule start states and then to the next state in the
-     *  list:
-     *
-     *   o->(A)  (a state links to start of A and to next in list)
-     *   |
-     *   o->(B)
-     *   |
-     *   ...
-     *   |
-     *   o->(Z)
-	 *
-	 *  This is the NFA created for the artificial rule created in
-	 *  Grammar.addArtificialMatchTokensRule().
-	 *
-	 *  11/28/2005: removed so we can use normal rule construction for Tokens.
-    public NFAState build_ArtificialMatchTokensRuleNFA() {
-        int altNum = 1;
-        NFAState firstAlt = null; // the start state for the "rule"
-        NFAState prevAlternative = null;
-        Iterator iter = nfa.grammar.getRules().iterator();
-		// TODO: add a single decision node/state for good description
-        while (iter.hasNext()) {
-			Rule r = (Rule) iter.next();
-            String ruleName = r.name;
-			String modifier = nfa.grammar.getRuleModifier(ruleName);
-            if ( ruleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ||
-				 (modifier!=null &&
-				  modifier.equals(Grammar.FRAGMENT_RULE_MODIFIER)) )
-			{
-                continue; // don't loop to yourself or do nontoken rules
-            }
-            NFAState ruleStartState = nfa.grammar.getRuleStartState(ruleName);
-            NFAState left = newState();
-            left.setDescription("alt "+altNum+" of artificial rule "+Grammar.ARTIFICIAL_TOKENS_RULENAME);
-            transitionBetweenStates(left, ruleStartState, Label.EPSILON);
-            // Are we the first alternative?
-            if ( firstAlt==null ) {
-                firstAlt = left; // track extreme top left node as rule start
-            }
-            else {
-                // if not first alternative, must link to this alt from previous
-                transitionBetweenStates(prevAlternative, left, Label.EPSILON);
-            }
-            prevAlternative = left;
-            altNum++;
-        }
-		firstAlt.decisionStateType = NFAState.BLOCK_START;
-
-        return firstAlt;
-    }
-	 */
-
-    /** Build an atom with all possible values in its label */
-    public StateCluster build_Wildcard(GrammarAST associatedAST) {
-        NFAState left = newState();
-        NFAState right = newState();
-        left.associatedASTNode = associatedAST;
-        right.associatedASTNode = associatedAST;
-        Label label = new Label(nfa.grammar.getTokenTypes()); // char or tokens
-        Transition e = new Transition(label,right);
-        left.addTransition(e);
-        StateCluster g = new StateCluster(left, right);
-        return g;
-    }
-
-    /** Build a subrule matching ^(. .*) (any tree or node). Let's use
-     *  (^(. .+) | .) to be safe.
-     */
-    public StateCluster build_WildcardTree(GrammarAST associatedAST) {
-        StateCluster wildRoot = build_Wildcard(associatedAST);
-
-        StateCluster down = build_Atom(Label.DOWN, associatedAST);
-        wildRoot = build_AB(wildRoot,down); // hook in; . DOWN
-
-        // make .+
-        StateCluster wildChildren = build_Wildcard(associatedAST);
-        wildChildren = build_Aplus(wildChildren);
-        wildRoot = build_AB(wildRoot,wildChildren); // hook in; . DOWN .+
-
-        StateCluster up = build_Atom(Label.UP, associatedAST);
-        wildRoot = build_AB(wildRoot,up); // hook in; . DOWN .+ UP
-
-        // make optional . alt
-        StateCluster optionalNodeAlt = build_Wildcard(associatedAST);
-
-        List alts = new ArrayList();
-        alts.add(wildRoot);
-        alts.add(optionalNodeAlt);
-        StateCluster blk = build_AlternativeBlock(alts);
-
-        return blk;
-    }
-
-    /** Given a collapsed block of alts (a set of atoms), pull out
-     *  the set and return it.
-     */
-    protected IntSet getCollapsedBlockAsSet(State blk) {
-        State s0 = blk;
-        if ( s0!=null && s0.transition(0)!=null ) {
-            State s1 = s0.transition(0).target;
-            if ( s1!=null && s1.transition(0)!=null ) {
-                Label label = s1.transition(0).label;
-                if ( label.isSet() ) {
-                    return label.getSet();
-                }
-            }
-        }
-        return null;
-    }
-
-	private void transitionBetweenStates(NFAState a, NFAState b, int label) {
-		Transition e = new Transition(label,b);
-		a.addTransition(e);
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/NameSpaceChecker.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/NameSpaceChecker.java
deleted file mode 100644
index 6c6dbb5..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/NameSpaceChecker.java
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.Label;
-import org.antlr.runtime.Token;
-
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-
-public class NameSpaceChecker {
-	protected Grammar grammar;
-
-	public NameSpaceChecker(Grammar grammar) {
-		this.grammar = grammar;
-	}
-
-	public void checkConflicts() {
-		for (int i = CompositeGrammar.MIN_RULE_INDEX; i < grammar.composite.ruleIndexToRuleList.size(); i++) {
-			Rule r = grammar.composite.ruleIndexToRuleList.elementAt(i);
-			if ( r==null ) {
-				continue;
-			}
-			// walk all labels for Rule r
-			if ( r.labelNameSpace!=null ) {
-				Iterator it = r.labelNameSpace.values().iterator();
-				while ( it.hasNext() ) {
-					Grammar.LabelElementPair pair = (Grammar.LabelElementPair) it.next();
-					checkForLabelConflict(r, pair.label);
-				}
-			}
-			// walk rule scope attributes for Rule r
-			if ( r.ruleScope!=null ) {
-				List attributes = r.ruleScope.getAttributes();
-				for (int j = 0; j < attributes.size(); j++) {
-					Attribute attribute = (Attribute) attributes.get(j);
-					checkForRuleScopeAttributeConflict(r, attribute);
-				}
-			}
-			checkForRuleDefinitionProblems(r);
-			checkForRuleArgumentAndReturnValueConflicts(r);
-		}
-		// check all global scopes against tokens
-		Iterator it = grammar.getGlobalScopes().values().iterator();
-		while (it.hasNext()) {
-			AttributeScope scope = (AttributeScope) it.next();
-			checkForGlobalScopeTokenConflict(scope);
-		}
-		// check for missing rule, tokens
-		lookForReferencesToUndefinedSymbols();
-	}
-
-	protected void checkForRuleArgumentAndReturnValueConflicts(Rule r) {
-		if ( r.returnScope!=null ) {
-			Set conflictingKeys = r.returnScope.intersection(r.parameterScope);
-			if (conflictingKeys!=null) {
-				for (Iterator it = conflictingKeys.iterator(); it.hasNext();) {
-					String key = (String) it.next();
-					ErrorManager.grammarError(
-						ErrorManager.MSG_ARG_RETVAL_CONFLICT,
-						grammar,
-						r.tree.getToken(),
-						key,
-						r.name);
-				}
-			}
-		}
-	}
-
-	protected void checkForRuleDefinitionProblems(Rule r) {
-		String ruleName = r.name;
-		Token ruleToken = r.tree.getToken();
-		int msgID = 0;
-		if ( (grammar.type==Grammar.PARSER||grammar.type==Grammar.TREE_PARSER) &&
-			 Character.isUpperCase(ruleName.charAt(0)) )
-		{
-			msgID = ErrorManager.MSG_LEXER_RULES_NOT_ALLOWED;
-        }
-        else if ( grammar.type==Grammar.LEXER &&
-			      Character.isLowerCase(ruleName.charAt(0)) &&
-			      !r.isSynPred )
-		{
-			msgID = ErrorManager.MSG_PARSER_RULES_NOT_ALLOWED;
-        }
-		else if ( grammar.getGlobalScope(ruleName)!=null ) {
-			msgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
-		}
-		if ( msgID!=0 ) {
-			ErrorManager.grammarError(msgID, grammar, ruleToken, ruleName);
-		}
-	}
-
-	/** If ref to undefined rule, give error at first occurrence.
-	 * 
-	 *  Give error if you cannot find the scope override on a rule reference.
-	 *
-	 *  If you ref ID in a combined grammar and don't define ID as a lexer rule
-	 *  it is an error.
-	 */
-	protected void lookForReferencesToUndefinedSymbols() {
-		// for each rule ref, ask if there is a rule definition
-		for (Iterator iter = grammar.ruleRefs.iterator(); iter.hasNext();) {
-			GrammarAST refAST = (GrammarAST)iter.next();
-			Token tok = refAST.token;
-			String ruleName = tok.getText();
-			Rule localRule = grammar.getLocallyDefinedRule(ruleName);
-			Rule rule = grammar.getRule(ruleName);
-			if ( localRule==null && rule!=null ) { // imported rule?
-				grammar.delegatedRuleReferences.add(rule);
-				rule.imported = true;
-			}
-			if ( rule==null && grammar.getTokenType(ruleName)!=Label.EOF ) {
-				ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_RULE_REF,
-										  grammar,
-										  tok,
-										  ruleName);
-			}
-        }
-		if ( grammar.type==Grammar.COMBINED ) {
-			// if we're a combined grammar, we know which token IDs have no
-			// associated lexer rule.
-			for (Iterator iter = grammar.tokenIDRefs.iterator(); iter.hasNext();) {
-				Token tok = (Token) iter.next();
-				String tokenID = tok.getText();
-				if ( !grammar.composite.lexerRules.contains(tokenID) &&
-					 grammar.getTokenType(tokenID)!=Label.EOF )
-				{
-					ErrorManager.grammarWarning(ErrorManager.MSG_NO_TOKEN_DEFINITION,
-												grammar,
-												tok,
-												tokenID);
-				}
-			}
-		}
-		// check scopes and scoped rule refs
-		for (Iterator it = grammar.scopedRuleRefs.iterator(); it.hasNext();) {
-			GrammarAST scopeAST = (GrammarAST)it.next(); // ^(DOT ID atom)
-			Grammar scopeG = grammar.composite.getGrammar(scopeAST.getText());
-			GrammarAST refAST = (GrammarAST)scopeAST.getChild(1);
-			String ruleName = refAST.getText();
-			if ( scopeG==null ) {
-				ErrorManager.grammarError(ErrorManager.MSG_NO_SUCH_GRAMMAR_SCOPE,
-										  grammar,
-										  scopeAST.getToken(),
-										  scopeAST.getText(),
-										  ruleName);
-			}
-			else {
-				Rule rule = grammar.getRule(scopeG.name, ruleName);
-				if ( rule==null ) {
-					ErrorManager.grammarError(ErrorManager.MSG_NO_SUCH_RULE_IN_SCOPE,
-											  grammar,
-											  scopeAST.getToken(),
-											  scopeAST.getText(),
-											  ruleName);
-				}
-			}
-		}
-	}
-
-	protected void checkForGlobalScopeTokenConflict(AttributeScope scope) {
-		if ( grammar.getTokenType(scope.getName())!=Label.INVALID ) {
-			ErrorManager.grammarError(ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE,
-									  grammar, null, scope.getName());
-		}
-	}
-
-	/** Check for collision of a rule-scope dynamic attribute with:
-	 *  arg, return value, rule name itself.  Labels are checked elsewhere.
-	 */
-	public void checkForRuleScopeAttributeConflict(Rule r, Attribute attribute) {
-		int msgID = 0;
-		Object arg2 = null;
-		String attrName = attribute.name;
-		if ( r.name.equals(attrName) ) {
-			msgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE;
-			arg2 = r.name;
-		}
-		else if ( (r.returnScope!=null&&r.returnScope.getAttribute(attrName)!=null) ||
-				  (r.parameterScope!=null&&r.parameterScope.getAttribute(attrName)!=null) )
-		{
-			msgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
-			arg2 = r.name;
-		}
-		if ( msgID!=0 ) {
-			ErrorManager.grammarError(msgID,grammar,r.tree.getToken(),attrName,arg2);
-		}
-	}
-
-	/** Make sure a label doesn't conflict with another symbol.
-	 *  Labels must not conflict with: rules, tokens, scope names,
-	 *  return values, parameters, and rule-scope dynamic attributes
-	 *  defined in surrounding rule.
-	 */
-	protected void checkForLabelConflict(Rule r, Token label) {
-		int msgID = 0;
-		Object arg2 = null;
-		if ( grammar.getGlobalScope(label.getText())!=null ) {
-			msgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
-		}
-		else if ( grammar.getRule(label.getText())!=null ) {
-			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE;
-		}
-		else if ( grammar.getTokenType(label.getText())!=Label.INVALID ) {
-			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_TOKEN;
-		}
-		else if ( r.ruleScope!=null && r.ruleScope.getAttribute(label.getText())!=null ) {
-			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE;
-			arg2 = r.name;
-		}
-		else if ( (r.returnScope!=null&&r.returnScope.getAttribute(label.getText())!=null) ||
-				  (r.parameterScope!=null&&r.parameterScope.getAttribute(label.getText())!=null) )
-		{
-			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
-			arg2 = r.name;
-		}
-		if ( msgID!=0 ) {
-			ErrorManager.grammarError(msgID,grammar,label,label.getText(),arg2);
-		}
-	}
-
-	/** If type of previous label differs from new label's type, that's an error.
-	 */
-	public boolean checkForLabelTypeMismatch(Rule r, Token label, int type) {
-		Grammar.LabelElementPair prevLabelPair =
-			(Grammar.LabelElementPair)r.labelNameSpace.get(label.getText());
-		if ( prevLabelPair!=null ) {
-			// label already defined; if same type, no problem
-			if ( prevLabelPair.type != type ) {
-				String typeMismatchExpr =
-					Grammar.LabelTypeToString[type]+"!="+
-					Grammar.LabelTypeToString[prevLabelPair.type];
-				ErrorManager.grammarError(
-					ErrorManager.MSG_LABEL_TYPE_CONFLICT,
-					grammar,
-					label,
-					label.getText(),
-					typeMismatchExpr);
-				return true;
-			}
-		}
-		return false;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/NonRegularDecisionMessage.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/NonRegularDecisionMessage.java
deleted file mode 100644
index 169f99a..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/NonRegularDecisionMessage.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.DecisionProbe;
-import org.stringtemplate.v4.ST;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
-/** More a single alternative recurses so this decision is not regular. */
-public class NonRegularDecisionMessage extends Message {
-	public DecisionProbe probe;
-	public Set<Integer> altsWithRecursion;
-
-	public NonRegularDecisionMessage(DecisionProbe probe, Set<Integer> altsWithRecursion) {
-		super(ErrorManager.MSG_NONREGULAR_DECISION);
-		this.probe = probe;
-		this.altsWithRecursion = altsWithRecursion;
-	}
-
-	public String toString() {
-		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
-		line = decisionASTNode.getLine();
-		column = decisionASTNode.getCharPositionInLine();
-		String fileName = probe.dfa.nfa.grammar.getFileName();
-		if ( fileName!=null ) {
-			file = fileName;
-		}
-
-		ST st = getMessageTemplate();
-		String ruleName = probe.dfa.getNFADecisionStartState().enclosingRule.name;
-		st.add("ruleName", ruleName);
-		List sortedAlts = new ArrayList();
-		sortedAlts.addAll(altsWithRecursion);
-		Collections.sort(sortedAlts); // make sure it's 1, 2, ...
-		st.add("alts", sortedAlts);
-
-		return super.toString(st);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/RandomPhrase.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/RandomPhrase.java
deleted file mode 100644
index fff6086..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/RandomPhrase.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.Tool;
-import org.antlr.analysis.Label;
-import org.antlr.analysis.NFAState;
-import org.antlr.analysis.RuleClosureTransition;
-import org.antlr.analysis.Transition;
-import org.antlr.misc.IntervalSet;
-import org.antlr.misc.Utils;
-
-import java.io.BufferedReader;
-import java.io.FileReader;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.Stack;
-
-/** Generate a random phrase given a grammar.
- *  Usage:
- *     java org.antlr.tool.RandomPhrase grammarFile.g startRule [seed]
- *
- *  For example:
- *     java org.antlr.tool.RandomPhrase simple.g program 342
- *
- *  The seed acts like a unique identifier so you can get the same random
- *  phrase back during unit testing, for example.
- *
- *  If you do not specify a seed then the current time in milliseconds is used
- *  guaranteeing that you'll never see that seed again.
- *
- *  NOTE: this does not work well for large grammars...it tends to recurse
- *  too much and build really long strings.  I need throttle control; later.
- */
-public class RandomPhrase {
-	public static final boolean debug = false;
-
-	protected static Random random;
-
-	/** an experimental method to generate random phrases for a given
-	 *  grammar given a start rule.  Return a list of token types.
-	 */
-	protected static void randomPhrase(Grammar g, List<Integer> tokenTypes, String startRule) {
-		NFAState state = g.getRuleStartState(startRule);
-		NFAState stopState = g.getRuleStopState(startRule);
-
-		Stack ruleInvocationStack = new Stack();
-		while ( true ) {
-			if ( state==stopState && ruleInvocationStack.size()==0 ) {
-				break;
-			}
-			if ( debug ) System.out.println("state "+state);
-			if ( state.getNumberOfTransitions()==0 ) {
-				if ( debug ) System.out.println("dangling state: "+state);
-				return;
-			}
-			// end of rule node
-			if ( state.isAcceptState() ) {
-				NFAState invokingState = (NFAState)ruleInvocationStack.pop();
-				if ( debug ) System.out.println("pop invoking state "+invokingState);
-				//System.out.println("leave "+state.enclosingRule.name);
-				RuleClosureTransition invokingTransition =
-					(RuleClosureTransition)invokingState.transition[0];
-				// move to node after state that invoked this rule
-				state = invokingTransition.followState;
-				continue;
-			}
-			if ( state.getNumberOfTransitions()==1 ) {
-				// no branching, just take this path
-				Transition t0 = state.transition[0];
-				if ( t0 instanceof RuleClosureTransition ) {
-					ruleInvocationStack.push(state);
-					if ( debug ) System.out.println("push state "+state);
-					//System.out.println("call "+((RuleClosureTransition)t0).rule.name);
-					//System.out.println("stack depth="+ruleInvocationStack.size());
-				}
-				else if ( t0.label.isSet() || t0.label.isAtom() ) {
-					tokenTypes.add( getTokenType(t0.label) );
-				}
-				state = (NFAState)t0.target;
-				continue;
-			}
-
-			int decisionNumber = state.getDecisionNumber();
-			if ( decisionNumber==0 ) {
-				System.out.println("weird: no decision number but a choice node");
-				continue;
-			}
-			// decision point, pick ith alternative randomly
-			int n = g.getNumberOfAltsForDecisionNFA(state);
-			int randomAlt = random.nextInt(n) + 1;
-			if ( debug ) System.out.println("randomAlt="+randomAlt);
-			NFAState altStartState =
-				g.getNFAStateForAltOfDecision(state, randomAlt);
-			Transition t = altStartState.transition[0];
-			state = (NFAState)t.target;
-		}
-	}
-
-	protected static Integer getTokenType(Label label) {
-		if ( label.isSet() ) {
-			// pick random element of set
-			IntervalSet typeSet = (IntervalSet)label.getSet();
-			int randomIndex = random.nextInt(typeSet.size());
-			return typeSet.get(randomIndex);
-		}
-		else {
-			return Utils.integer(label.getAtom());
-		}
-		//System.out.println(t0.label.toString(g));
-	}
-
-	/** Used to generate random strings */
-	public static void main(String[] args) {
-		if ( args.length < 2 ) {
-			System.err.println("usage: java org.antlr.tool.RandomPhrase grammarfile startrule");
-			return;
-		}
-		String grammarFileName = args[0];
-		String startRule = args[1];
-		long seed = System.currentTimeMillis(); // use random seed unless spec.
-		if ( args.length==3 ) {
-			String seedStr = args[2];
-			seed = Long.parseLong(seedStr);
-		}
-		try {
-			random = new Random(seed);
-
-			CompositeGrammar composite = new CompositeGrammar();
-			Tool tool = new Tool();
-			Grammar parser = new Grammar(tool, grammarFileName, composite);
-			composite.setDelegationRoot(parser);
-
-			FileReader fr = new FileReader(grammarFileName);
-			BufferedReader br = new BufferedReader(fr);
-			parser.parseAndBuildAST(br);
-			br.close();
-
-			parser.composite.assignTokenTypes();
-			parser.composite.defineGrammarSymbols();
-			parser.composite.createNFAs();
-
-			List leftRecursiveRules = parser.checkAllRulesForLeftRecursion();
-			if ( leftRecursiveRules.size()>0 ) {
-				return;
-			}
-
-			if ( parser.getRule(startRule)==null ) {
-				System.out.println("undefined start rule "+startRule);
-				return;
-			}
-
-			String lexerGrammarText = parser.getLexerGrammar();
-			Grammar lexer = new Grammar(tool);
-			lexer.importTokenVocabulary(parser);
-			lexer.fileName = grammarFileName;
-			if ( lexerGrammarText!=null ) {
-				lexer.setGrammarContent(lexerGrammarText);
-			}
-			else {
-				System.err.println("no lexer grammar found in "+grammarFileName);
-			}
-			lexer.buildNFA();
-			leftRecursiveRules = lexer.checkAllRulesForLeftRecursion();
-			if ( leftRecursiveRules.size()>0 ) {
-				return;
-			}
-			//System.out.println("lexer:\n"+lexer);
-
-			List<Integer> tokenTypes = new ArrayList<Integer>(100);
-			randomPhrase(parser, tokenTypes, startRule);
-			System.out.println("token types="+tokenTypes);
-			for (int i = 0; i < tokenTypes.size(); i++) {
-				Integer ttypeI = (Integer) tokenTypes.get(i);
-				int ttype = ttypeI.intValue();
-				String ttypeDisplayName = parser.getTokenDisplayName(ttype);
-				if ( Character.isUpperCase(ttypeDisplayName.charAt(0)) ) {
-					List<Integer> charsInToken = new ArrayList<Integer>(10);
-					randomPhrase(lexer, charsInToken, ttypeDisplayName);
-					System.out.print(" ");
-					for (int j = 0; j < charsInToken.size(); j++) {
-						java.lang.Integer cI = (java.lang.Integer) charsInToken.get(j);
-						System.out.print((char)cI.intValue());
-					}
-				}
-				else { // it's a literal
-					String literal =
-						ttypeDisplayName.substring(1,ttypeDisplayName.length()-1);
-					System.out.print(" "+literal);
-				}
-			}
-			System.out.println();
-		}
-		catch (Error er) {
-			System.err.println("Error walking "+grammarFileName+" rule "+startRule+" seed "+seed);
-			er.printStackTrace(System.err);
-		}
-		catch (Exception e) {
-			System.err.println("Exception walking "+grammarFileName+" rule "+startRule+" seed "+seed);
-			e.printStackTrace(System.err);
-		}
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/RecursionOverflowMessage.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/RecursionOverflowMessage.java
deleted file mode 100644
index 50e72c6..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/RecursionOverflowMessage.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.DFAState;
-import org.antlr.analysis.DecisionProbe;
-import org.stringtemplate.v4.ST;
-
-import java.util.Collection;
-import java.util.List;
-
-/** Indicates recursion overflow.  A DFA state tried add an NFA configuration
- *  with NFA state p that was mentioned in its stack context too many times.
- */
-public class RecursionOverflowMessage extends Message {
-	public DecisionProbe probe;
-	public DFAState sampleBadState;
-	public int alt;
-	public Collection targetRules;
-	public Collection callSiteStates;
-
-	public RecursionOverflowMessage(DecisionProbe probe,
-									DFAState sampleBadState,
-									int alt,
-									Collection targetRules,
-									Collection callSiteStates)
-	{
-		super(ErrorManager.MSG_RECURSION_OVERLOW);
-		this.probe = probe;
-		this.sampleBadState = sampleBadState;
-		this.alt = alt;
-		this.targetRules = targetRules;
-		this.callSiteStates = callSiteStates;
-	}
-
-	public String toString() {
-		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
-		line = decisionASTNode.getLine();
-		column = decisionASTNode.getCharPositionInLine();
-		String fileName = probe.dfa.nfa.grammar.getFileName();
-		if ( fileName!=null ) {
-			file = fileName;
-		}
-
-		ST st = getMessageTemplate();
-		st.add("targetRules", targetRules);
-		st.add("alt", alt);
-		st.add("callSiteStates", callSiteStates);
-
-		List labels =
-			probe.getSampleNonDeterministicInputSequence(sampleBadState);
-		String input = probe.getInputSequenceDisplay(labels);
-		st.add("input", input);
-
-		return super.toString(st);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/Rule.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/Rule.java
deleted file mode 100644
index 0c141f3..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/Rule.java
+++ /dev/null
@@ -1,582 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.analysis.NFAState;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.runtime.CommonToken;
-import org.antlr.runtime.Token;
-
-import java.util.*;
-
-/** Combine the info associated with a rule. */
-public class Rule {
-	public String name;
-	public int index;
-	public String modifier;
-	public NFAState startState;
-	public NFAState stopState;
-
-	/** This rule's options */
-	protected Map options;
-
-	public static final Set legalOptions =
-			new HashSet() {
-                {
-                    add("k"); add("greedy"); add("memoize");
-                    add("backtrack");
-                }
-            };
-
-	/** The AST representing the whole rule */
-	public GrammarAST tree;
-
-	/** To which grammar does this belong? */
-	public Grammar grammar;
-
-	/** For convenience, track the argument def AST action node if any */
-	public GrammarAST argActionAST;
-
-	public GrammarAST EORNode;
-
-	/** The return values of a rule and predefined rule attributes */
-	public AttributeScope returnScope;
-
-	public AttributeScope parameterScope;
-
-	/** the attributes defined with "scope {...}" inside a rule */
-	public AttributeScope ruleScope;
-
-	/** A list of scope names (String) used by this rule */
-	public List useScopes;
-
-    /** Exceptions that this rule can throw */
-    public Set<String> throwsSpec;
-
-    /** A list of all LabelElementPair attached to tokens like id=ID */
-    public LinkedHashMap tokenLabels;
-
-    /** A list of all LabelElementPair attached to tokens like x=. in tree grammar */
-    public LinkedHashMap wildcardTreeLabels;
-
-    /** A list of all LabelElementPair attached to tokens like x+=. in tree grammar */
-    public LinkedHashMap wildcardTreeListLabels;
-
-	/** A list of all LabelElementPair attached to single char literals like x='a' */
-	public LinkedHashMap charLabels;
-
-	/** A list of all LabelElementPair attached to rule references like f=field */
-	public LinkedHashMap ruleLabels;
-
-	/** A list of all Token list LabelElementPair like ids+=ID */
-	public LinkedHashMap tokenListLabels;
-
-	/** A list of all rule ref list LabelElementPair like ids+=expr */
-	public LinkedHashMap ruleListLabels;
-
-	/** All labels go in here (plus being split per the above lists) to
-	 *  catch dup label and label type mismatches.
-	 */
-	protected Map<String, Grammar.LabelElementPair> labelNameSpace =
-		new HashMap<String, Grammar.LabelElementPair>();
-
-	/** Map a name to an action for this rule.  Currently init is only
-	 *  one we use, but we can add more in future.
-	 *  The code generator will use this to fill holes in the rule template.
-	 *  I track the AST node for the action in case I need the line number
-	 *  for errors.  A better name is probably namedActions, but I don't
-	 *  want everyone to have to change their code gen templates now.
-	 */
-	protected Map<String, GrammarAST> actions =
-		new HashMap<String, GrammarAST>();
-
-	/** Track all executable actions other than named actions like @init.
-	 *  Also tracks exception handlers, predicates, and rewrite rewrites.
-	 *  We need to examine these actions before code generation so
-	 *  that we can detect refs to $rule.attr etc...
-	 */
-	protected List<GrammarAST> inlineActions = new ArrayList<GrammarAST>();
-
-	public int numberOfAlts;
-
-	/** Each alt has a Map<tokenRefName,List<tokenRefAST>>; range 1..numberOfAlts.
-	 *  So, if there are 3 ID refs in a rule's alt number 2, you'll have
-	 *  altToTokenRef[2].get("ID").size()==3.  This is used to see if $ID is ok.
-	 *  There must be only one ID reference in the alt for $ID to be ok in
-	 *  an action--must be unique.
-	 *
-	 *  This also tracks '+' and "int" literal token references
-	 *  (if not in LEXER).
-	 *
-	 *  Rewrite rules force tracking of all tokens.
-	 */
-	protected Map<String, List<GrammarAST>>[] altToTokenRefMap;
-
-	/** Each alt has a Map<ruleRefName,List<ruleRefAST>>; range 1..numberOfAlts
-	 *  So, if there are 3 expr refs in a rule's alt number 2, you'll have
-	 *  altToRuleRef[2].get("expr").size()==3.  This is used to see if $expr is ok.
-	 *  There must be only one expr reference in the alt for $expr to be ok in
-	 *  an action--must be unique.
-	 *
-	 *  Rewrite rules force tracking of all rule result ASTs. 1..n
-	 */
-	protected Map<String, List<GrammarAST>>[] altToRuleRefMap;
-
-	/** Do not generate start, stop etc... in a return value struct unless
-	 *  somebody references $r.start somewhere.
-	 */
-	public boolean referencedPredefinedRuleAttributes = false;
-
-	public boolean isSynPred = false;
-
-	public boolean imported = false;
-
-	public Rule(Grammar grammar,
-				String ruleName,
-				int ruleIndex,
-				int numberOfAlts)
-	{
-		this.name = ruleName;
-		this.index = ruleIndex;
-		this.numberOfAlts = numberOfAlts;
-		this.grammar = grammar;
-		throwsSpec = new HashSet<String>();
-		altToTokenRefMap = new Map[numberOfAlts+1];
-		altToRuleRefMap = new Map[numberOfAlts+1];
-		for (int alt=1; alt<=numberOfAlts; alt++) {
-			altToTokenRefMap[alt] = new HashMap<String, List<GrammarAST>>();
-			altToRuleRefMap[alt] = new HashMap<String, List<GrammarAST>>();
-		}
-	}
-
-	public static int getRuleType(String ruleName){
-		if (ruleName == null || ruleName.length() == 0)
-			throw new IllegalArgumentException("The specified rule name is not valid.");
-		return Character.isUpperCase(ruleName.charAt(0)) ? Grammar.LEXER : Grammar.PARSER;
-	}
-
-	public void defineLabel(Token label, GrammarAST elementRef, int type) {
-		Grammar.LabelElementPair pair = grammar.new LabelElementPair(label,elementRef);
-		pair.type = type;
-		labelNameSpace.put(label.getText(), pair);
-		switch ( type ) {
-            case Grammar.TOKEN_LABEL :
-                if ( tokenLabels==null ) tokenLabels = new LinkedHashMap();
-                tokenLabels.put(label.getText(), pair);
-                break;
-            case Grammar.WILDCARD_TREE_LABEL :
-                if ( wildcardTreeLabels==null ) wildcardTreeLabels = new LinkedHashMap();
-                wildcardTreeLabels.put(label.getText(), pair);
-                break;
-            case Grammar.WILDCARD_TREE_LIST_LABEL :
-                if ( wildcardTreeListLabels==null ) wildcardTreeListLabels = new LinkedHashMap();
-                wildcardTreeListLabels.put(label.getText(), pair);
-                break;
-			case Grammar.RULE_LABEL :
-				if ( ruleLabels==null ) ruleLabels = new LinkedHashMap();
-				ruleLabels.put(label.getText(), pair);
-				break;
-			case Grammar.TOKEN_LIST_LABEL :
-				if ( tokenListLabels==null ) tokenListLabels = new LinkedHashMap();
-				tokenListLabels.put(label.getText(), pair);
-				break;
-			case Grammar.RULE_LIST_LABEL :
-				if ( ruleListLabels==null ) ruleListLabels = new LinkedHashMap();
-				ruleListLabels.put(label.getText(), pair);
-				break;
-			case Grammar.CHAR_LABEL :
-				if ( charLabels==null ) charLabels = new LinkedHashMap();
-				charLabels.put(label.getText(), pair);
-				break;
-		}
-	}
-
-	public Grammar.LabelElementPair getLabel(String name) {
-		return (Grammar.LabelElementPair)labelNameSpace.get(name);
-	}
-
-	public Grammar.LabelElementPair getTokenLabel(String name) {
-		Grammar.LabelElementPair pair = null;
-		if ( tokenLabels!=null ) {
-			return (Grammar.LabelElementPair)tokenLabels.get(name);
-		}
-		return pair;
-	}
-
-	public Map getRuleLabels() {
-		return ruleLabels;
-	}
-
-	public Map getRuleListLabels() {
-		return ruleListLabels;
-	}
-
-	public Grammar.LabelElementPair getRuleLabel(String name) {
-		Grammar.LabelElementPair pair = null;
-		if ( ruleLabels!=null ) {
-			return (Grammar.LabelElementPair)ruleLabels.get(name);
-		}
-		return pair;
-	}
-
-	public Grammar.LabelElementPair getTokenListLabel(String name) {
-		Grammar.LabelElementPair pair = null;
-		if ( tokenListLabels!=null ) {
-			return (Grammar.LabelElementPair)tokenListLabels.get(name);
-		}
-		return pair;
-	}
-
-	public Grammar.LabelElementPair getRuleListLabel(String name) {
-		Grammar.LabelElementPair pair = null;
-		if ( ruleListLabels!=null ) {
-			return (Grammar.LabelElementPair)ruleListLabels.get(name);
-		}
-		return pair;
-	}
-
-	/** Track a token ID or literal like '+' and "void" as having been referenced
-	 *  somewhere within the alts (not rewrite sections) of a rule.
-	 *
-	 *  This differs from Grammar.altReferencesTokenID(), which tracks all
-	 *  token IDs to check for token IDs without corresponding lexer rules.
-	 */
-	public void trackTokenReferenceInAlt(GrammarAST refAST, int outerAltNum) {
-		List refs = (List)altToTokenRefMap[outerAltNum].get(refAST.getText());
-		if ( refs==null ) {
-			refs = new ArrayList();
-			altToTokenRefMap[outerAltNum].put(refAST.getText(), refs);
-		}
-		refs.add(refAST);
-	}
-
-	public List getTokenRefsInAlt(String ref, int outerAltNum) {
-		if ( altToTokenRefMap[outerAltNum]!=null ) {
-			List tokenRefASTs = (List)altToTokenRefMap[outerAltNum].get(ref);
-			return tokenRefASTs;
-		}
-		return null;
-	}
-
-	public void trackRuleReferenceInAlt(GrammarAST refAST, int outerAltNum) {
-		List refs = (List)altToRuleRefMap[outerAltNum].get(refAST.getText());
-		if ( refs==null ) {
-			refs = new ArrayList();
-			altToRuleRefMap[outerAltNum].put(refAST.getText(), refs);
-		}
-		refs.add(refAST);
-	}
-
-	public List getRuleRefsInAlt(String ref, int outerAltNum) {
-		if ( altToRuleRefMap[outerAltNum]!=null ) {
-			List ruleRefASTs = (List)altToRuleRefMap[outerAltNum].get(ref);
-			return ruleRefASTs;
-		}
-		return null;
-	}
-
-	public Set getTokenRefsInAlt(int altNum) {
-		return altToTokenRefMap[altNum].keySet();
-	}
-
-	/** For use with rewrite rules, we must track all tokens matched on the
-	 *  left-hand-side; so we need Lists.  This is a unique list of all
-	 *  token types for which the rule needs a list of tokens.  This
-	 *  is called from the rule template not directly by the code generator.
-	 */
-	public Set getAllTokenRefsInAltsWithRewrites() {
-		String output = (String)grammar.getOption("output");
-		Set<String> tokens = new HashSet<String>();
-		if ( output==null || !output.equals("AST") ) {
-			// return nothing if not generating trees; i.e., don't do for templates
-			return tokens;
-		}
-		//System.out.println("blk "+tree.findFirstType(ANTLRParser.BLOCK).toStringTree());
-		for (int i = 1; i <= numberOfAlts; i++) {
-			if ( hasRewrite(i) ) {
-				Map<String, List<GrammarAST>> m = altToTokenRefMap[i];
-				for (String tokenName : m.keySet()) {
-					// convert token name like ID to ID, "void" to 31
-					int ttype = grammar.getTokenType(tokenName);
-					String label = grammar.generator.getTokenTypeAsTargetLabel(ttype);
-					tokens.add(label);
-				}
-			}
-		}
-		return tokens;
-	}
-
-	public Set getRuleRefsInAlt(int outerAltNum) {
-		return altToRuleRefMap[outerAltNum].keySet();
-	}
-
-	/** For use with rewrite rules, we must track all rule AST results on the
-	 *  left-hand-side; so we need Lists.  This is a unique list of all
-	 *  rule results for which the rule needs a list of results.
-	 */
-	public Set getAllRuleRefsInAltsWithRewrites() {
-		Set rules = new HashSet();
-		for (int i = 1; i <= numberOfAlts; i++) {
-			if ( hasRewrite(i) ) {
-				Map m = altToRuleRefMap[i];
-				rules.addAll(m.keySet());
-			}
-		}
-		return rules;
-	}
-
-	public List<GrammarAST> getInlineActions() {
-		return inlineActions;
-	}
-
-	public boolean hasRewrite(int i) {
-		GrammarAST blk = tree.findFirstType(ANTLRParser.BLOCK);
-		GrammarAST alt = blk.getBlockALT(i);
-		GrammarAST rew = (GrammarAST)alt.getNextSibling();
-		if ( rew!=null && rew.getType()==ANTLRParser.REWRITES ) return true;
-		if ( alt.findFirstType(ANTLRParser.REWRITES)!=null ) return true;
-		return false;
-	}
-
-	/** Return the scope containing name */
-	public AttributeScope getAttributeScope(String name) {
-		AttributeScope scope = getLocalAttributeScope(name);
-		if ( scope!=null ) {
-			return scope;
-		}
-		if ( ruleScope!=null && ruleScope.getAttribute(name)!=null ) {
-			scope = ruleScope;
-		}
-		return scope;
-	}
-
-	/** Get the arg, return value, or predefined property for this rule */
-	public AttributeScope getLocalAttributeScope(String name) {
-		AttributeScope scope = null;
-		if ( returnScope!=null && returnScope.getAttribute(name)!=null ) {
-			scope = returnScope;
-		}
-		else if ( parameterScope!=null && parameterScope.getAttribute(name)!=null ) {
-			scope = parameterScope;
-		}
-		else {
-			AttributeScope rulePropertiesScope =
-				RuleLabelScope.grammarTypeToRulePropertiesScope[grammar.type];
-			if ( rulePropertiesScope.getAttribute(name)!=null ) {
-				scope = rulePropertiesScope;
-			}
-		}
-		return scope;
-	}
-
-	/** For references to tokens rather than by label such as $ID, we
-	 *  need to get the existing label for the ID ref or create a new
-	 *  one.
-	 */
-	public String getElementLabel(String refdSymbol,
-								  int outerAltNum,
-								  CodeGenerator generator)
-	{
-		GrammarAST uniqueRefAST;
-		if ( grammar.type != Grammar.LEXER &&
-			 Character.isUpperCase(refdSymbol.charAt(0)) )
-		{
-			// symbol is a token
-			List tokenRefs = getTokenRefsInAlt(refdSymbol, outerAltNum);
-			uniqueRefAST = (GrammarAST)tokenRefs.get(0);
-		}
-		else {
-			// symbol is a rule
-			List ruleRefs = getRuleRefsInAlt(refdSymbol, outerAltNum);
-			uniqueRefAST = (GrammarAST)ruleRefs.get(0);
-		}
-		if ( uniqueRefAST.code==null ) {
-			// no code?  must not have gen'd yet; forward ref
-			return null;
-		}
-		String labelName = null;
-		String existingLabelName =
-			(String)uniqueRefAST.code.getAttribute("label");
-		// reuse any label or list label if it exists
-		if ( existingLabelName!=null ) {
-			labelName = existingLabelName;
-		}
-		else {
-			// else create new label
-			labelName = generator.createUniqueLabel(refdSymbol);
-			CommonToken label = new CommonToken(ANTLRParser.ID, labelName);
-			if ( grammar.type != Grammar.LEXER &&
-				 Character.isUpperCase(refdSymbol.charAt(0)) )
-			{
-				grammar.defineTokenRefLabel(name, label, uniqueRefAST);
-			}
-			else {
-				grammar.defineRuleRefLabel(name, label, uniqueRefAST);
-			}
-			uniqueRefAST.code.add("label", labelName);
-		}
-		return labelName;
-	}
-
-	/** If a rule has no user-defined return values and nobody references
-	 *  it's start/stop (predefined attributes), then there is no need to
-	 *  define a struct; otherwise for now we assume a struct.  A rule also
-	 *  has multiple return values if you are building trees or templates.
-	 */
-	public boolean getHasMultipleReturnValues() {
-		return
-			referencedPredefinedRuleAttributes || grammar.buildAST() ||
-			grammar.buildTemplate() ||
-			(returnScope!=null && returnScope.attributes.size()>1);
-	}
-
-	public boolean getHasSingleReturnValue() {
-		return
-			!(referencedPredefinedRuleAttributes || grammar.buildAST() ||
-			  grammar.buildTemplate()) &&
-									   (returnScope!=null && returnScope.attributes.size()==1);
-	}
-
-	public boolean getHasReturnValue() {
-		return
-			referencedPredefinedRuleAttributes || grammar.buildAST() ||
-			grammar.buildTemplate() ||
-			(returnScope!=null && returnScope.attributes.size()>0);
-	}
-
-	public String getSingleValueReturnType() {
-		if ( returnScope!=null && returnScope.attributes.size()==1 ) {
-			Collection retvalAttrs = returnScope.attributes.values();
-			Object[] javaSucks = retvalAttrs.toArray();
-			return ((Attribute)javaSucks[0]).type;
-		}
-		return null;
-	}
-
-	public String getSingleValueReturnName() {
-		if ( returnScope!=null && returnScope.attributes.size()==1 ) {
-			Collection retvalAttrs = returnScope.attributes.values();
-			Object[] javaSucks = retvalAttrs.toArray();
-			return ((Attribute)javaSucks[0]).name;
-		}
-		return null;
-	}
-
-	/** Given @scope::name {action} define it for this grammar.  Later,
-	 *  the code generator will ask for the actions table.
-	 */
-	public void defineNamedAction(GrammarAST ampersandAST,
-								  GrammarAST nameAST,
-								  GrammarAST actionAST)
-	{
-		//System.out.println("rule @"+nameAST.getText()+"{"+actionAST.getText()+"}");
-		String actionName = nameAST.getText();
-		GrammarAST a = (GrammarAST)actions.get(actionName);
-		if ( a!=null ) {
-			ErrorManager.grammarError(
-				ErrorManager.MSG_ACTION_REDEFINITION,grammar,
-				nameAST.getToken(),nameAST.getText());
-		}
-		else {
-			actions.put(actionName,actionAST);
-		}
-	}
-
-	public void trackInlineAction(GrammarAST actionAST) {
-		inlineActions.add(actionAST);
-	}
-
-	public Map<String, GrammarAST> getActions() {
-		return actions;
-	}
-
-	public void setActions(Map<String, GrammarAST> actions) {
-		this.actions = actions;
-	}
-
-	/** Save the option key/value pair and process it; return the key
-	 *  or null if invalid option.
-	 */
-	public String setOption(String key, Object value, Token optionsStartToken) {
-		if ( !legalOptions.contains(key) ) {
-			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,
-									  grammar,
-									  optionsStartToken,
-									  key);
-			return null;
-		}
-		if ( options==null ) {
-			options = new HashMap();
-		}
-        if ( key.equals("memoize") && value.toString().equals("true") ) {
-            grammar.atLeastOneRuleMemoizes = true;
-        }
-        if ( key.equals("backtrack") && value.toString().equals("true") ) {
-            grammar.composite.getRootGrammar().atLeastOneBacktrackOption = true;
-        }
-		if ( key.equals("k") ) {
-			grammar.numberOfManualLookaheadOptions++;
-		}
-		 options.put(key, value);
-		return key;
-	}
-
-	public void setOptions(Map options, Token optionsStartToken) {
-		if ( options==null ) {
-			this.options = null;
-			return;
-		}
-		Set keys = options.keySet();
-		for (Iterator it = keys.iterator(); it.hasNext();) {
-			String optionName = (String) it.next();
-			Object optionValue = options.get(optionName);
-			String stored=setOption(optionName, optionValue, optionsStartToken);
-			if ( stored==null ) {
-				it.remove();
-			}
-		}
-	}
-
-	/** Used during grammar imports to see if sets of rules intersect... This
-	 *  method and hashCode use the String name as the key for Rule objects.
-	public boolean equals(Object other) {
-		return this.name.equals(((Rule)other).name);
-	}
-	 */
-
-	/** Used during grammar imports to see if sets of rules intersect...
-	public int hashCode() {
-		return name.hashCode();
-	}
-	 * */
-
-	public String toString() { // used for testing
-		return "["+grammar.name+"."+name+",index="+index+",line="+tree.getToken().getLine()+"]";
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/RuleLabelScope.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/RuleLabelScope.java
deleted file mode 100644
index 265d245..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/RuleLabelScope.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.antlr.runtime.Token;
-
-public class RuleLabelScope extends AttributeScope {
-	/** Rules have a predefined set of attributes as well as
-	 *  the return values.  'text' needs to be computed though so.
-	 */
-	public static AttributeScope predefinedRulePropertiesScope =
-		new AttributeScope("RulePredefined",null) {{
-			addAttribute("text", null);
-			addAttribute("start", null);
-			addAttribute("stop", null);
-			addAttribute("tree", null);
-			addAttribute("st", null);
-			isPredefinedRuleScope = true;
-		}};
-
-	public static AttributeScope predefinedTreeRulePropertiesScope =
-		new AttributeScope("RulePredefined",null) {{
-			addAttribute("text", null);
-			addAttribute("start", null); // note: no stop; not meaningful
-			addAttribute("tree", null);
-			addAttribute("st", null);
-			isPredefinedRuleScope = true;
-		}};
-
-	public static AttributeScope predefinedLexerRulePropertiesScope =
-		new AttributeScope("LexerRulePredefined",null) {{
-			addAttribute("text", null);
-			addAttribute("type", null);
-			addAttribute("line", null);
-			addAttribute("index", null);
-			addAttribute("pos", null);
-			addAttribute("channel", null);
-			addAttribute("start", null);
-			addAttribute("stop", null);
-			addAttribute("int", null);
-			isPredefinedLexerRuleScope = true;
-		}};
-
-	public static AttributeScope[] grammarTypeToRulePropertiesScope =
-		{
-			null,
-			predefinedLexerRulePropertiesScope,	// LEXER
-			predefinedRulePropertiesScope,		// PARSER
-			predefinedTreeRulePropertiesScope,	// TREE_PARSER
-			predefinedRulePropertiesScope,		// COMBINED
-		};
-
-	public Rule referencedRule;
-
-	public RuleLabelScope(Rule referencedRule, Token actionToken) {
-		super("ref_"+referencedRule.name,actionToken);
-		this.referencedRule = referencedRule;
-	}
-
-	/** If you label a rule reference, you can access that rule's
-	 *  return values as well as any predefined attributes.
-	 */
-	public Attribute getAttribute(String name) {
-		AttributeScope rulePropertiesScope =
-			RuleLabelScope.grammarTypeToRulePropertiesScope[grammar.type];
-		if ( rulePropertiesScope.getAttribute(name)!=null ) {
-			return rulePropertiesScope.getAttribute(name);
-		}
-
-		if ( referencedRule.returnScope!=null ) {
-			return referencedRule.returnScope.getAttribute(name);
-		}
-		return null;
-	}
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/Strip.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/Strip.java
deleted file mode 100644
index 50152ad..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/Strip.java
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.tool;
-
-import org.antlr.grammar.v3.ANTLRv3Lexer;
-import org.antlr.grammar.v3.ANTLRv3Parser;
-import org.antlr.runtime.*;
-import org.antlr.runtime.tree.CommonTree;
-import org.antlr.runtime.tree.TreeAdaptor;
-import org.antlr.runtime.tree.TreeWizard;
-
-import java.util.List;
-
-/** A basic action stripper. */
-public class Strip {
-    protected String filename;
-    protected TokenRewriteStream tokens;
-    protected boolean tree_option = false;
-    protected String args[];
-
-    public static void main(String args[]) throws Exception {
-        Strip s = new Strip(args);
-        s.parseAndRewrite();
-        System.out.println(s.tokens);
-    }
-
-    public Strip(String[] args) { this.args = args; }
-
-    public TokenRewriteStream getTokenStream() { return tokens; }
-
-    public void parseAndRewrite() throws Exception {
-        processArgs(args);
-        CharStream input = null;
-        if ( filename!=null ) input = new ANTLRFileStream(filename);
-        else input = new ANTLRInputStream(System.in);
-        // BUILD AST
-        ANTLRv3Lexer lex = new ANTLRv3Lexer(input);
-        tokens = new TokenRewriteStream(lex);
-        ANTLRv3Parser g = new ANTLRv3Parser(tokens);
-        ANTLRv3Parser.grammarDef_return r = g.grammarDef();
-        CommonTree t = (CommonTree)r.getTree();
-        if (tree_option) System.out.println(t.toStringTree());
-        rewrite(g.getTreeAdaptor(),t,g.getTokenNames());
-    }
-
-    public void rewrite(TreeAdaptor adaptor, CommonTree t, String[] tokenNames) throws Exception {
-        TreeWizard wiz = new TreeWizard(adaptor, tokenNames);
-
-        // ACTIONS STUFF
-        wiz.visit(t, ANTLRv3Parser.ACTION,
-           new TreeWizard.Visitor() {
-               public void visit(Object t) { ACTION(tokens, (CommonTree)t); }
-           });
-
-        wiz.visit(t, ANTLRv3Parser.AT,  // ^('@' id ACTION) rule actions
-            new TreeWizard.Visitor() {
-              public void visit(Object t) {
-                  CommonTree a = (CommonTree)t;
-                  CommonTree action = null;
-                  if ( a.getChildCount()==2 ) action = (CommonTree)a.getChild(1);
-                  else if ( a.getChildCount()==3 ) action = (CommonTree)a.getChild(2);
-                  if ( action.getType()==ANTLRv3Parser.ACTION ) {
-                      tokens.delete(a.getTokenStartIndex(),
-                                    a.getTokenStopIndex());
-                      killTrailingNewline(tokens, action.getTokenStopIndex());
-                  }
-              }
-            });
-        wiz.visit(t, ANTLRv3Parser.ARG, // wipe rule arguments
-                  new TreeWizard.Visitor() {
-              public void visit(Object t) {
-                  CommonTree a = (CommonTree)t;
-                  a = (CommonTree)a.getChild(0);
-                  tokens.delete(a.token.getTokenIndex());
-                  killTrailingNewline(tokens, a.token.getTokenIndex());
-              }
-            });
-        wiz.visit(t, ANTLRv3Parser.RET, // wipe rule return declarations
-            new TreeWizard.Visitor() {
-                public void visit(Object t) {
-                    CommonTree a = (CommonTree)t;
-                    CommonTree ret = (CommonTree)a.getChild(0);
-                    tokens.delete(a.token.getTokenIndex(),
-                                  ret.token.getTokenIndex());
-                }
-            });
-        wiz.visit(t, ANTLRv3Parser.SEMPRED, // comment out semantic predicates
-            new TreeWizard.Visitor() {
-                public void visit(Object t) {
-                    CommonTree a = (CommonTree)t;
-                    tokens.replace(a.token.getTokenIndex(), "/*"+a.getText()+"*/");
-                }
-            });
-        wiz.visit(t, ANTLRv3Parser.GATED_SEMPRED, // comment out semantic predicates
-            new TreeWizard.Visitor() {
-                public void visit(Object t) {
-                    CommonTree a = (CommonTree)t;
-                    String text = tokens.toString(a.getTokenStartIndex(),
-                                                  a.getTokenStopIndex());
-                    tokens.replace(a.getTokenStartIndex(),
-                                   a.getTokenStopIndex(),
-                                   "/*"+text+"*/");
-                }
-            });
-        wiz.visit(t, ANTLRv3Parser.SCOPE, // comment scope specs
-            new TreeWizard.Visitor() {
-                public void visit(Object t) {
-                    CommonTree a = (CommonTree)t;
-                    tokens.delete(a.getTokenStartIndex(),
-                                  a.getTokenStopIndex());
-                    killTrailingNewline(tokens, a.getTokenStopIndex());
-                }
-            });        
-        wiz.visit(t, ANTLRv3Parser.ARG_ACTION, // args r[x,y] -> ^(r [x,y])
-            new TreeWizard.Visitor() {
-                public void visit(Object t) {
-                    CommonTree a = (CommonTree)t;
-                    if ( a.getParent().getType()==ANTLRv3Parser.RULE_REF ) {
-                        tokens.delete(a.getTokenStartIndex(),
-                                      a.getTokenStopIndex());
-                    }
-                }
-            });
-        wiz.visit(t, ANTLRv3Parser.LABEL_ASSIGN, // ^('=' id ^(RULE_REF [arg])), ...
-            new TreeWizard.Visitor() {
-                public void visit(Object t) {
-                    CommonTree a = (CommonTree)t;
-                    if ( !a.hasAncestor(ANTLRv3Parser.OPTIONS) ) { // avoid options
-                        CommonTree child = (CommonTree)a.getChild(0);
-                        tokens.delete(a.token.getTokenIndex());     // kill "id="
-                        tokens.delete(child.token.getTokenIndex());
-                    }
-                }
-            });
-        wiz.visit(t, ANTLRv3Parser.LIST_LABEL_ASSIGN, // ^('+=' id ^(RULE_REF [arg])), ...
-            new TreeWizard.Visitor() {
-              public void visit(Object t) {
-                  CommonTree a = (CommonTree)t;
-                  CommonTree child = (CommonTree)a.getChild(0);
-                  tokens.delete(a.token.getTokenIndex());     // kill "id+="
-                  tokens.delete(child.token.getTokenIndex());
-              }
-            });
-
-
-        // AST STUFF
-        wiz.visit(t, ANTLRv3Parser.REWRITE,
-            new TreeWizard.Visitor() {
-              public void visit(Object t) {
-                  CommonTree a = (CommonTree)t;
-                  CommonTree child = (CommonTree)a.getChild(0);
-                  int stop = child.getTokenStopIndex();
-                  if ( child.getType()==ANTLRv3Parser.SEMPRED ) {
-                      CommonTree rew = (CommonTree)a.getChild(1);
-                      stop = rew.getTokenStopIndex();
-                  }
-                  tokens.delete(a.token.getTokenIndex(), stop);
-                  killTrailingNewline(tokens, stop);
-              }
-            });
-        wiz.visit(t, ANTLRv3Parser.ROOT,
-           new TreeWizard.Visitor() {
-               public void visit(Object t) {
-                   tokens.delete(((CommonTree)t).token.getTokenIndex());
-               }
-           });
-        wiz.visit(t, ANTLRv3Parser.BANG,
-           new TreeWizard.Visitor() {
-               public void visit(Object t) {
-                   tokens.delete(((CommonTree)t).token.getTokenIndex());
-               }
-           });
-    }
-
-    public static void ACTION(TokenRewriteStream tokens, CommonTree t) {
-        CommonTree parent = (CommonTree)t.getParent();
-        int ptype = parent.getType();
-        if ( ptype==ANTLRv3Parser.SCOPE || // we have special rules for these
-             ptype==ANTLRv3Parser.AT )
-        {
-            return;
-        }
-        //System.out.println("ACTION: "+t.getText());
-        CommonTree root = (CommonTree)t.getAncestor(ANTLRv3Parser.RULE);
-        if ( root!=null ) {
-            CommonTree rule = (CommonTree)root.getChild(0);
-            //System.out.println("rule: "+rule);
-            if ( !Character.isUpperCase(rule.getText().charAt(0)) ) {
-                tokens.delete(t.getTokenStartIndex(),t.getTokenStopIndex());
-                killTrailingNewline(tokens, t.token.getTokenIndex());
-            }
-        }
-    }
-
-    private static void killTrailingNewline(TokenRewriteStream tokens, int index) {
-        List all = tokens.getTokens();
-        Token tok = (Token)all.get(index);
-        Token after = (Token)all.get(index+1);
-        String ws = after.getText();
-        if ( ws.startsWith("\n") ) {
-            //System.out.println("killing WS after action");
-            if ( ws.length()>1 ) {
-                int space = ws.indexOf(' ');
-                int tab = ws.indexOf('\t');
-                if ( ws.startsWith("\n") &&
-                     space>=0 || tab>=0 )
-                {
-                    return; // do nothing if \n + indent
-                }
-                // otherwise kill all \n
-                ws = ws.replaceAll("\n", "");
-                tokens.replace(after.getTokenIndex(), ws);
-            }
-            else {
-                tokens.delete(after.getTokenIndex());
-            }
-        }
-    }
-
-    public void processArgs(String[] args) {
-		if ( args==null || args.length==0 ) {
-			help();
-			return;
-		}
-		for (int i = 0; i < args.length; i++) {
-			if (args[i].equals("-tree")) tree_option = true;
-			else {
-				if (args[i].charAt(0) != '-') {
-					// Must be the grammar file
-                    filename = args[i];
-				}
-			}
-		}
-	}
-
-    private static void help() {
-        System.err.println("usage: java org.antlr.tool.Strip [args] file.g");
-        System.err.println("  -tree      print out ANTLR grammar AST");
-    }
-
-}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/ToolMessage.java b/antlr-3.4/tool/src/main/java/org/antlr/tool/ToolMessage.java
deleted file mode 100644
index 6bbd5c0..0000000
--- a/antlr-3.4/tool/src/main/java/org/antlr/tool/ToolMessage.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.tool;
-
-import org.stringtemplate.v4.ST;
-
-/** A generic message from the tool such as "file not found" type errors; there
- *  is no reason to create a special object for each error unlike the grammar
- *  errors, which may be rather complex.
- *
- *  Sometimes you need to pass in a filename or something to say it is "bad".
- *  Allow a generic object to be passed in and the string template can deal
- *  with just printing it or pulling a property out of it.
- *
- *  TODO what to do with exceptions?  Want stack trace for internal errors?
- */
-public class ToolMessage extends Message {
-
-	public ToolMessage(int msgID) {
-		super(msgID, null, null);
-	}
-	public ToolMessage(int msgID, Object arg) {
-		super(msgID, arg, null);
-	}
-	public ToolMessage(int msgID, Throwable e) {
-		super(msgID);
-		this.e = e;
-	}
-	public ToolMessage(int msgID, Object arg, Object arg2) {
-		super(msgID, arg, arg2);
-	}
-	public ToolMessage(int msgID, Object arg, Throwable e) {
-		super(msgID,arg,null);
-		this.e = e;
-	}
-	public String toString() {
-		ST st = getMessageTemplate();
-		if ( arg!=null ) {
-			st.add("arg", arg);
-		}
-		if ( arg2!=null ) {
-			st.add("arg2", arg2);
-		}
-		if ( e!=null ) {
-			st.add("exception", e);
-			st.add("stackTrace", e.getStackTrace());
-		}
-		return super.toString(st);
-	}
-}
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/AST.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/AST.stg
deleted file mode 100644
index 3a0eeda..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/AST.stg
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-group AST;
-
-@outputFile.imports() ::= <<
-<@super.imports()>
-<if(!TREE_PARSER)><! tree parser would already have imported !>
-import org.antlr.runtime.tree.*;<\n>
-<endif>
->>
-
-@genericParser.members() ::= <<
-<@super.members()>
-<parserMembers()>
->>
-
-/** Add an adaptor property that knows how to build trees */
-parserMembers() ::= <<
-protected var adaptor:TreeAdaptor = new CommonTreeAdaptor();<\n>
-override public function set treeAdaptor(adaptor:TreeAdaptor):void {
-    this.adaptor = adaptor;
-    <grammar.directDelegates:{g|<g:delegateName()>.treeAdaptor = this.adaptor;}>
-}
-override public function get treeAdaptor():TreeAdaptor {
-    return adaptor;
-}
->>
-
-@returnScope.ruleReturnMembers() ::= <<
-<ASTLabelType> tree;
-public function get tree():Object { return tree; }
->>
-
-/** Add a variable to track rule's return AST */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-var root_0:<ASTLabelType> = null;<\n>
->>
-
-ruleLabelDefs() ::= <<
-<super.ruleLabelDefs()>
-<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
-  ruleDescriptor.wildcardTreeListLabels]:{var <it.label.text>_tree:<ASTLabelType>=null;}; separator="\n">
-<ruleDescriptor.tokenListLabels:{var <it.label.text>_tree:<ASTLabelType>=null;}; separator="\n">
-<ruleDescriptor.allTokenRefsInAltsWithRewrites
-    :{var stream_<it>:RewriteRule<rewriteElementType>Stream=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
-<ruleDescriptor.allRuleRefsInAltsWithRewrites
-    :{var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
->>
-
-/** When doing auto AST construction, we must define some variables;
- *  These should be turned off if doing rewrites.  This must be a "mode"
- *  as a rule could have both rewrite and AST within the same alternative
- *  block.
- */
-@alt.declarations() ::= <<
-<if(autoAST)>
-<if(outerAlt)>
-<if(!rewriteMode)>
-root_0 = <ASTLabelType>(adaptor.nil());<\n>
-<endif>
-<endif>
-<endif>
->>
-
-// T r a c k i n g  R u l e  E l e m e n t s
-
-/** ID and track it for use in a rewrite rule */
-tokenRefTrack(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)> <! Track implies no auto AST construction!>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
->>
-
-/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
- *  to the tracking list stream_ID for use in the rewrite.
- */
-tokenRefTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefTrack(...)>
-<listLabel(elem=label,...)>
->>
-
-/** ^(ID ...) track for rewrite */
-tokenRefRuleRootTrack(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
->>
-
-/** Match ^(label+=TOKEN ...) track for rewrite */
-tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefRuleRootTrack(...)>
-<listLabel(elem=label,...)>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule.name>.add(<label>.tree);
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefTrack(...)>
-<listLabel(elem=label+".tree",...)>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule>.add(<label>.tree);
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRootTrack(...)>
-<listLabel(elem=label+".tree",...)>
->>
-
-// R e w r i t e
-
-rewriteCode(
-	alts, description,
-	referencedElementsDeep, // ALL referenced elements to right of ->
-	referencedTokenLabels,
-	referencedTokenListLabels,
-	referencedRuleLabels,
-	referencedRuleListLabels,
-    referencedWildcardLabels,
-    referencedWildcardListLabels,
-	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
-<<
-
-// AST REWRITE
-// elements: <referencedElementsDeep; separator=", ">
-// token labels: <referencedTokenLabels; separator=", ">
-// rule labels: <referencedRuleLabels; separator=", ">
-// token list labels: <referencedTokenListLabels; separator=", ">
-// rule list labels: <referencedRuleListLabels; separator=", ">
-<if(backtracking)>
-if ( <actions.(actionScope).synpredgate> ) {<\n>
-<endif>
-<prevRuleRootRef()>.tree = root_0;
-<rewriteCodeLabels()>
-root_0 = <ASTLabelType>(adaptor.nil());
-<alts:rewriteAlt(); separator="else ">
-<! if tree parser and rewrite=true !>
-<if(TREE_PARSER)>
-<if(rewriteMode)>
-<prevRuleRootRef()>.tree = <ASTLabelType>(adaptor.rulePostProcessing(root_0));
-input.replaceChildren(adaptor.getParent(retval.start),
-                      adaptor.getChildIndex(retval.start),
-                      adaptor.getChildIndex(_last),
-                      retval.tree);
-<endif>
-<endif>
-<! if parser or tree-parser && rewrite!=true, we need to set result !>
-<if(!TREE_PARSER)>
-<prevRuleRootRef()>.tree = root_0;
-<else>
-<if(!rewriteMode)>
-<prevRuleRootRef()>.tree = root_0;
-<endif>
-<endif>
-<if(backtracking)>
-}
-<endif>
->>
-
-rewriteCodeLabels() ::= <<
-<referencedTokenLabels
-    :{var stream_<it>:RewriteRule<rewriteElementType>Stream=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>",<it>);};
-    separator="\n"
->
-<referencedTokenListLabels
-    :{var stream_<it>:RewriteRule<rewriteElementType>Stream=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
-    separator="\n"
->
-<referencedWildcardLabels
-    :{var  stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
-    separator="\n"
->
-<referencedWildcardListLabels
-    :{var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
-    separator="\n"
->
-<referencedRuleLabels
-    :{var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"rule <it>",<it>!=null?<it>.tree:null);};
-    separator="\n"
->
-<referencedRuleListLabels
-    :{var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"rule <it>",list_<it>);};
-    separator="\n"
->
->>
-
-/** Generate code for an optional rewrite block; note it uses the deep ref'd element
-  *  list rather shallow like other blocks.
-  */
-rewriteOptionalBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-if ( <referencedElementsDeep:{el | stream_<el>.hasNext}; separator="||"> ) {
-    <alt>
-}
-<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
->>
-
-rewriteClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-while ( <referencedElements:{el | stream_<el>.hasNext}; separator="||"> ) {
-    <alt>
-}
-<referencedElements:{el | stream_<el>.reset();<\n>}>
->>
-
-rewritePositiveClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-if ( !(<referencedElements:{el | stream_<el>.hasNext}; separator="||">) ) {
-    throw new RewriteEarlyExitException();
-}
-while ( <referencedElements:{el | stream_<el>.hasNext}; separator="||"> ) {
-    <alt>
-}
-<referencedElements:{el | stream_<el>.reset();<\n>}>
->>
-
-rewriteAlt(a) ::= <<
-// <a.description>
-<if(a.pred)>
-if (<a.pred>) {
-    <a.alt>
-}<\n>
-<else>
-{
-    <a.alt>
-}<\n>
-<endif>
->>
-
-/** For empty rewrites: "r : ... -> ;" */
-rewriteEmptyAlt() ::= "root_0 = null;"
-
-rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
-// <fileName>:<description>
-{
-var root_<treeLevel>:<ASTLabelType> = <ASTLabelType>(adaptor.nil());
-<root:rewriteElement()>
-<children:rewriteElement()>
-adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
-}<\n>
->>
-
-rewriteElementList(elements) ::= "<elements:rewriteElement()>"
-
-rewriteElement(e) ::= <<
-<@pregen()>
-<e.el>
->>
-
-/** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,terminalOptions,args) ::= <<
-adaptor.addChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
->>
-
-/** Gen $label ... where defined via label=ID */
-rewriteTokenLabelRef(label,elementIndex) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
->>
-
-/** Gen $label ... where defined via label+=ID */
-rewriteTokenListLabelRef(label,elementIndex) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
->>
-
-/** Gen ^($label ...) */
-rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
-root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>));<\n>
->>
-
-/** Gen ^($label ...) where label+=... */
-rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
-
-/** Gen ^(ID ...) or ^(ID[args] ...) */
-rewriteTokenRefRoot(token,elementIndex,terminalOptions,args) ::= <<
-root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>));<\n>
->>
-
-rewriteImaginaryTokenRef(args,token,terminalOptions,elementIndex) ::= <<
-adaptor.addChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
->>
-
-rewriteImaginaryTokenRefRoot(args,token,terminalOptions,elementIndex) ::= <<
-root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>));<\n>
->>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-root_0 = <action>;<\n>
->>
-
-/** What is the name of the previous value of this rule's root tree?  This
- *  let's us refer to $rule to mean previous value.  I am reusing the
- *  variable 'tree' sitting in retval struct to hold the value of root_0 right
- *  before I set it during rewrites.  The assign will be to retval.tree.
- */
-prevRuleRootRef() ::= "retval"
-
-rewriteRuleRef(rule) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<rule>.nextTree());<\n>
->>
-
-rewriteRuleRefRoot(rule) ::= <<
-root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>));<\n>
->>
-
-rewriteNodeAction(action) ::= <<
-adaptor.addChild(root_<treeLevel>, <action>);<\n>
->>
-
-rewriteNodeActionRoot(action) ::= <<
-root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<action>, root_<treeLevel>));<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel=rule */
-rewriteRuleLabelRef(label) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
-rewriteRuleListLabelRef(label) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel=rule */
-rewriteRuleLabelRefRoot(label) ::= <<
-root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>));<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
-rewriteRuleListLabelRefRoot(label) ::= <<
-root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>));<\n>
->>
-
-rewriteWildcardLabelRef(label) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
->>
-
-createImaginaryNode(tokenType,terminalOptions,args) ::= <<
-<if(terminalOptions.node)>
-<! new MethodNode(IDLabel, args) !>
-new <terminalOptions.node>(<tokenType><if(args)>, <args; separator=", "><endif>)
-<else>
-<ASTLabelType>(adaptor.create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>))
-<endif>
->>
-
-createRewriteNodeFromElement(token,terminalOptions,args) ::= <<
-<if(terminalOptions.node)>
-new <terminalOptions.node>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
-<else>
-<if(args)> <! must create new node from old !>
-adaptor.create(<token>, <args; separator=", ">)
-<else>
-stream_<token>.nextNode()
-<endif>
-<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTParser.stg
deleted file mode 100644
index 1596c95..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTParser.stg
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Templates for building ASTs during normal parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  The situation is not too bad as rewrite (->) usage makes ^ and !
- *  invalid. There is no huge explosion of combinations.
- */
-group ASTParser;
-
-@rule.setErrorReturnValue() ::= <<
-retval.tree = <ASTLabelType>(adaptor.errorNode(input, Token(retval.start), input.LT(-1), re));
-<! trace("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
->>
-
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = <createNodeFromToken(...)>;
-adaptor.addChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-/** ID! and output=AST (same as plain tokenRef) */
-tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = <createNodeFromToken(...)>;
-root_0 = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_0));
-<if(backtracking)>}<endif>
->>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)>
-<listLabel(elem=label,...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,terminalOptions,elementIndex) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label,...)>
->>
-
-// SET AST
-
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
-
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <createNodeFromToken(...)>);})>
->>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-<matchSet(...)>
->>
-
-matchSetBang(s,label,elementIndex,postmatchCode) ::= "<super.matchSet(...)>"
-
-// note there is no matchSetTrack because -> rewrites force sets to be
-// plain old blocks of alts: (A|B|...|C)
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-<if(label)>
-<label>=<labelType>(input.LT(1));<\n>
-<endif>
-<super.matchSet(..., postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = <ASTLabelType>(adaptor.becomeRoot(<createNodeFromToken(...)>, root_0));})>
->>
-
-// RULE REF AST
-
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <label>.tree);
->>
-
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
-
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = <ASTLabelType>(adaptor.becomeRoot(<label>.tree, root_0));
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label+".tree",...)>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefBang(...)>
-<listLabel(elem=label+".tree",...)>
->>
-
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabel(elem=label+".tree",...)>
->>
-
-// WILDCARD AST
-
-wildcard(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = <ASTLabelType>(adaptor.create(<label>));
-adaptor.addChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
-
-wildcardRuleRoot(label,elementIndex) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = <ASTLabelType>(adaptor.create(<label>));
-root_0 = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_0));
-<if(backtracking)>}<endif>
->>
-
-createNodeFromToken(label,terminalOptions) ::= <<
-<if(terminalOptions.node)>
-new <terminalOptions.node>(<label>) <! new MethodNode(IDLabel) !>
-<else>
-<ASTLabelType>(adaptor.create(<label>))
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
-retval.tree = <ASTLabelType>(adaptor.rulePostProcessing(root_0));
-adaptor.setTokenBoundaries(retval.tree, Token(retval.start), Token(retval.stop));
-<if(backtracking)>}<endif>
->>
\ No newline at end of file
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTTreeParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTTreeParser.stg
deleted file mode 100644
index f598d4f..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTTreeParser.stg
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Templates for building ASTs during tree parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  Each combination has its own template except that label/no label
- *  is combined into tokenRef, ruleRef, ...
- */
-group ASTTreeParser;
-
-/** Add a variable to track last element matched */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-var _first_0:<ASTLabelType> = null;
-var _last:<ASTLabelType> = null;<\n>
->>
-
-/** What to emit when there is no rewrite rule.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= <<
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(rewriteMode)>
-retval.tree = <ASTLabelType>(_first_0);
-if ( adaptor.getParent(retval.tree)!=null && adaptor.isNil( adaptor.getParent(retval.tree) ) )
-    retval.tree = <ASTLabelType>(adaptor.getParent(retval.tree));
-<endif>
-<if(backtracking)>}<endif>
->>
-
-/** match ^(root children) in tree parser; override here to
- *  add tree construction actions.
- */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-{
-var _save_last_<treeLevel>:<ASTLabelType> = _last;
-var _first_<treeLevel>:<ASTLabelType> = null;
-<if(!rewriteMode)>
-var root_<treeLevel>:<ASTLabelType> = <ASTLabelType>(adaptor.nil());
-<endif>
-<root:element()>
-<if(rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-<if(root.el.rule)>
-if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>.tree;
-<else>
-if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>;
-<endif>
-<endif>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( input.LA(1)==TokenConstants.DOWN ) {
-    matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
-    <children:element()>
-    matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
-}
-<else>
-matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
-<children:element()>
-matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
-<endif>
-<if(!rewriteMode)>
-adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
-<endif>
-_last = _save_last_<treeLevel>;
-}<\n>
->>
-
-// TOKEN AST STUFF
-
-/** ID! and output=AST (same as plain tokenRef) 'cept add
- *  setting of _last
- */
-tokenRefBang(token,label,elementIndex) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-<super.tokenRef(...)>
->>
-
-/** ID auto construct */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<label>);
-<else>
-<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
-<endif><\n>
-adaptor.addChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>}<endif>
-<else> <! rewrite mode !>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
-<endif>
->>
-
-/** label+=TOKEN auto construct */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** ^(ID ...) auto construct */
-tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<label>);
-<else>
-<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
-<endif><\n>
-root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_<treeLevel>));
-<if(backtracking)>}<endif>
-<endif>
->>
-
-/** Match ^(label+=TOKEN ...) auto construct */
-tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard and auto dup the node/subtree */
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.wildcard(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.dupTree(<label>);
-adaptor.addChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>}<endif>
-<else> <! rewrite mode !>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
-<endif>
->>
-
-// SET AST
-
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-<super.matchSet(..., postmatchCode={
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<label>);
-<else>
-<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
-<endif><\n>
-adaptor.addChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>}<endif>
-<endif>
-}
-)>
->>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-<matchSet(...)>
-<noRewrite()> <! set return tree !>
->>
-
-matchSetBang(s,label,elementIndex,postmatchCode) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-<super.matchSet(...)>
->>
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-<super.matchSet(..., postmatchCode={
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<label>);
-<else>
-<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
-<endif><\n>
-root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_<treeLevel>));
-<if(backtracking)>}<endif>
-<endif>
-}
-)>
->>
-
-// RULE REF AST
-
-/** rule auto construct */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
-<if(!rewriteMode)>
-adaptor.addChild(root_<treeLevel>, <label>.tree);
-<else> <! rewrite mode !>
-if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>.tree;
-<endif>
->>
-
-/** x+=rule auto construct */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label+".tree",...)>
->>
-
-/** ^(rule ...) auto construct */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-<super.ruleRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<label>.tree, root_<treeLevel>));
-<endif>
->>
-
-/** ^(x+=rule ...) auto construct */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabel(elem=label+".tree",...)>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-<super.ruleRefTrack(...)>
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-<super.ruleRefTrackAndListLabel(...)>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-<super.ruleRefRootTrack(...)>
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-_last = <ASTLabelType>(input.LT(1));
-<super.ruleRefRuleRootTrackAndListLabel(...)>
->>
-
-/** Streams for token refs are tree nodes now; override to
- *  change nextToken to nextNode.
- */
-createRewriteNodeFromElement(token,terminalOptions,scope) ::= <<
-<if(terminalOptions.node)>
-new <terminalOptions.node>(stream_<token>.nextNode())
-<else>
-stream_<token>.nextNode()
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
-retval.tree = <ASTLabelType>(adaptor.rulePostProcessing(root_0));
-<if(backtracking)>}<endif>
-<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ActionScript.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ActionScript.stg
deleted file mode 100644
index 8187ab7..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ActionScript.stg
+++ /dev/null
@@ -1,1289 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-group ActionScript;
-
-asTypeInitMap ::= [
-	"int":"0",
-	"uint":"0",
-	"Number":"0.0",
-	"Boolean":"false",
-	default:"null" // anything other than an atomic type
-]
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
-           docComment, recognizer,
-           name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
-	   backtracking, synpreds, memoize, numRules,
-	   fileName, ANTLRVersion, generatedTimestamp, trace,
-	   scopes, superClass, literals) ::=
-<<
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-package<if(actions.(actionScope).package)> <actions.(actionScope).package><endif> {
-    <actions.(actionScope).header>
-    <@imports>
-import org.antlr.runtime.*;
-<if(TREE_PARSER)>
-    import org.antlr.runtime.tree.*;
-<endif>
-    <@end>
-
-    <docComment>
-    <recognizer>
-}
->>
-
-lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
-      filterMode, superClass="Lexer") ::= <<
-public class <grammar.recognizerName> extends <if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else><@superClassName><superClass><@end><endif> {
-    <tokens:{public static const <it.name>:int=<it.type>;}; separator="\n">
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-    <actions.lexer.members>
-
-    // delegates
-    <grammar.delegates:
-         {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
-    // delegators
-    <grammar.delegators:
-         {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
-    <last(grammar.delegators):{g|public var gParent:<g.recognizerName>;}>
-
-    public function <grammar.recognizerName>(<grammar.delegators:{g|<g:delegateName()>:<g.recognizerName>, }>input:CharStream = null, state:RecognizerSharedState = null) {
-        super(input, state);
-        <cyclicDFAs:cyclicDFACtor()>
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-        this.state.ruleMemo = new Array(<numRules>+1);<\n> <! index from 1..n !>
-<endif>
-<endif>
-        <grammar.directDelegates:
-         {g|<g:delegateName()> = new <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>this, input, this.state);}; separator="\n">
-        <grammar.delegators:
-         {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
-        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
-    }
-    public override function get grammarFileName():String { return "<fileName>"; }
-
-<if(filterMode)>
-    <filteringNextToken()>
-<endif>
-    <rules; separator="\n\n">
-
-    <synpreds:{p | <lexerSynpred(p)>}>
-
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-}
->>
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-public override function nextToken():Token {
-    while (true) {
-        if ( input.LA(1)==CharStreamConstants.EOF ) {
-            return TokenConstants.EOF_TOKEN;
-        }
-        this.state.token = null;
-	    this.state.channel = TokenConstants.DEFAULT_CHANNEL;
-        this.state.tokenStartCharIndex = input.index;
-        this.state.tokenStartCharPositionInLine = input.charPositionInLine;
-        this.state.tokenStartLine = input.line;
-	    this.state.text = null;
-        try {
-            var m:int = input.mark();
-            this.state.backtracking=1; <! means we won't throw slow exception !>
-            this.state.failed=false;
-            mTokens();
-            this.state.backtracking=0;
-            <! mTokens backtracks with synpred at backtracking==2
-               and we set the synpredgate to allow actions at level 1. !>
-            if ( this.state.failed ) {
-                input.rewindTo(m);
-                input.consume(); <! advance one char and try again !>
-            }
-            else {
-                emit();
-                return this.state.token;
-            }
-        }
-        catch (re:RecognitionException) {
-            // shouldn't happen in backtracking mode, but...
-            reportError(re);
-            recover(re);
-        }
-    }
-    // Not reached - For ActionScript compiler
-    throw new Error();
-}
-
-public override function memoize(input:IntStream,
-		ruleIndex:int,
-		ruleStartIndex:int):void
-{
-if ( this.state.backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
-}
-
-public override function alreadyParsedRule(input:IntStream, ruleIndex:int):Boolean {
-if ( this.state.backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
-return false;
-}
->>
-
-actionGate() ::= "this.state.backtracking==0"
-
-filteringActionGate() ::= "this.state.backtracking==1"
-
-/** How to generate a parser */
-genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass, filterMode,
-              ASTLabelType="Object", labelType, members, rewriteElementType) ::= <<
-public class <grammar.recognizerName> extends <if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else><@superClassName><superClass><@end><endif> {
-<if(grammar.grammarIsRoot)>
-    public static const tokenNames:Array = [
-        "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
-    ];<\n>
-<endif>
-    <tokens:{public static const <it.name>:int=<it.type>;}; separator="\n">
-
-    // delegates
-    <grammar.delegates:
-         {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
-    // delegators
-    <grammar.delegators:
-         {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
-    <last(grammar.delegators):{g|public var gParent:<g.recognizerName>;}>
-
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-    <@members>
-   <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-    public function <grammar.recognizerName>(<grammar.delegators:{g|<g:delegateName()>:<g.recognizerName>, }>input:<inputStreamType>, state:RecognizerSharedState = null) {
-        super(input, state);
-        <cyclicDFAs:cyclicDFACtor()>
-        <parserCtorBody()>
-        <grammar.directDelegates:
-         {g|<g:delegateName()> = new <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>this, input, this.state);}; separator="\n">
-        <grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
-        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
-    }
-    <@end>
-
-    public override function get tokenNames():Array { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; }
-    public override function get grammarFileName():String { return "<fileName>"; }
-
-    <members>
-
-    <rules; separator="\n\n">
-
-    <! generate rule/method definitions for imported rules so they
-       appear to be defined in this recognizer. !>
-       // Delegated rules
-    <grammar.delegatedRules:{ruleDescriptor|
-        public function <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>):<returnType()> \{ <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); \}}; separator="\n">
-
-    <synpreds:{p | <synpred(p)>}>
-
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
-                    words64=it.bits)>
-}
->>
-
-parserCtorBody() ::= <<
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-this.state.ruleMemo = new Array(<length(grammar.allImportedRules)>+1);<\n> <! index from 1..n !>
-<endif>
-<endif>
-<grammar.delegators:
- {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
->>
-
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType="Object", superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="TokenStream", rewriteElementType="Token", ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
-<genericParser(inputStreamType="TreeNodeStream", rewriteElementType="Node", ...)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-// $ANTLR start <ruleName>
-public final function <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>):void {
-    <ruleLabelDefs()>
-<if(trace)>
-    traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
-    try {
-        <block>
-    }
-    finally {
-        traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
-    }
-<else>
-    <block>
-<endif>
-}
-// $ANTLR end <ruleName>
->>
-
-synpred(name) ::= <<
-public final function <name>():Boolean {
-    this.state.backtracking++;
-    <@start()>
-    var start:int = input.mark();
-    try {
-        <name>_fragment(); // can never throw exception
-    } catch (re:RecognitionException) {
-        trace("impossible: "+re);
-    }
-    var success:Boolean = !this.state.failed;
-    input.rewindTo(start);
-    <@stop()>
-    this.state.backtracking--;
-    this.state.failed=false;
-    return success;
-}<\n>
->>
-
-lexerSynpred(name) ::= <<
-<synpred(name)>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if ( this.state.backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (this.state.failed) return <ruleReturnValue()>;<endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>if (this.state.backtracking>0) {this.state.failed=true; return <ruleReturnValue()>;}<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-// $ANTLR start <ruleName>
-// <fileName>:<description>
-public final function <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>):<returnType()> {
-    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    <ruleLabelDefs()>
-    <ruleDescriptor.actions.init>
-    <@preamble()>
-    try {
-        <ruleMemoization(name=ruleName)>
-        <block>
-        <ruleCleanUp()>
-        <(ruleDescriptor.actions.after):execAction()>
-    }
-<if(exceptions)>
-    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-    <actions.(actionScope).rulecatch>
-<else>
-    catch (re:RecognitionException) {
-        reportError(re);
-        recoverStream(input,re);
-        <@setErrorReturnValue()>
-    }<\n>
-<endif>
-<endif>
-<endif>
-    finally {
-        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <memoize()>
-        <ruleScopeCleanUp()>
-        <finally>
-    }
-    <@postamble()>
-    return <ruleReturnValue()>;
-}
-// $ANTLR end <ruleName>
->>
-
-catch(decl,action) ::= <<
-catch (<e.decl>) {
-    <e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-var retval:<returnType()> = new <returnType()>();
-retval.start = input.LT(1);<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-var <a.name>:<a.type> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
-}>
-<endif>
-<if(memoize)>
-var <ruleDescriptor.name>_StartIndex:int = input.index;
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{<it>_stack.push(new Object());}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>_stack.push(new Object());}; separator="\n">
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{<it>_stack.pop();}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>_stack.pop();}; separator="\n">
->>
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
-  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{var <it.label.text>:<labelType>=null;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{var list_<it.label.text>:Array=null;}; separator="\n"
->
-<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
-<ruleDescriptor.ruleListLabels:{ll|var <ll.label.text>:RuleReturnScope = null;}; separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{var <it.label.text>:<labelType>=null;}; separator="\n"
->
-<ruleDescriptor.charLabels:{var <it.label.text>:int;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{var list_<it.label.text>:Array=null;}; separator="\n"
->
->>
-
-ruleReturnValue() ::= <<
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-retval
-<endif>
-<endif>
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-retval.stop = input.LT(-1);<\n>
-<endif>
-<endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if ( this.state.backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-// $ANTLR start <ruleName>
-public final function m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>):void {
-    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    try {
-<if(nakedBlock)>
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block><\n>
-<else>
-        var _type:int = <ruleName>;
-        var _channel:int = DEFAULT_TOKEN_CHANNEL;
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block>
-        <ruleCleanUp()>
-        this.state.type = _type;
-        this.state.channel = _channel;
-        <(ruleDescriptor.actions.after):execAction()>
-<endif>
-    }
-    finally {
-        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <ruleScopeCleanUp()>
-        <memoize()>
-    }
-}
-// $ANTLR end <ruleName>
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-public override function mTokens():void {
-    <block><\n>
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-var alt<decisionNumber>:int=<maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-<@prebranch()>
-switch (alt<decisionNumber>) {
-    <alts:altSwitchCase()>
-}
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-var alt<decisionNumber>:int=<maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-switch (alt<decisionNumber>) {
-    <alts:altSwitchCase()>
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-var cnt<decisionNumber>:int=0;
-<decls>
-<@preloop()>
-loop<decisionNumber>:
-do {
-    var alt<decisionNumber>:int=<maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) {
-	<alts:altSwitchCase()>
-	default :
-	    if ( cnt<decisionNumber> >= 1 ) break loop<decisionNumber>;
-	    <ruleBacktrackFailure()>
-            throw new EarlyExitException(<decisionNumber>, input);
-            <! Need to add support for earlyExitException debug hook !>
-    }
-    cnt<decisionNumber>++;
-} while (true);
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@preloop()>
-loop<decisionNumber>:
-do {
-    var alt<decisionNumber>:int=<maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) {
-	<alts:altSwitchCase()>
-	default :
-	    break loop<decisionNumber>;
-    }
-} while (true);
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase() ::= <<
-case <i> :
-    <@prealt()>
-    <it>
-    break;<\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
-// <fileName>:<description>
-{
-<@declarations()>
-<elements:element()>
-<rew>
-<@cleanup()>
-}
->>
-
-/** What to emit when there is no rewrite.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element() ::= <<
-<@prematch()>
-<it.el><\n>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)><label>=<labelType>(<endif>matchStream(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>)<if(label)>)<endif>; <checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-listLabel(label,elem) ::= <<
-if (list_<label>==null) list_<label>=new Array();
-list_<label>.push(<elem>);<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-match(<char>); <checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-matchRange(<a>,<b>); <checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,postmatchCode="") ::= <<
-<if(label)>
-<if(LEXER)>
-<label>= input.LA(1);<\n>
-<else>
-<label>=<labelType>(input.LT(1));<\n>
-<endif>
-<endif>
-if ( <s> ) {
-    input.consume();
-    <postmatchCode>
-<if(!LEXER)>
-    this.state.errorRecovery=false;
-<endif>
-    <if(backtracking)>this.state.failed=false;<endif>
-}
-else {
-    <ruleBacktrackFailure()>
-    <@mismatchedSetException()>
-<if(LEXER)>
-    throw recover(new MismatchedSetException(null,input));<\n>
-<else>
-    throw new MismatchedSetException(null,input);
-    <! use following code to make it recover inline; remove throw mse;
-    recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
-    !>
-<endif>
-}<\n>
->>
-
-matchRuleBlockSet ::= matchSet
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex) ::= <<
-<if(label)>
-var <label>Start:int = charIndex;
-matchString(<string>); <checkRuleBacktrackFailure()>
-<label> = CommonToken.createFromStream(input, TokenConstants.INVALID_TOKEN_TYPE, TokenConstants.DEFAULT_CHANNEL, <label>Start, charIndex-1);
-<else>
-matchString(<string>); <checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-wildcard(label,elementIndex) ::= <<
-<if(label)>
-<label>=<labelType>(input.LT(1));<\n>
-<endif>
-matchAny(input); <checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(label,elementIndex) ::= <<
-<wildcard(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-matchAny(); <checkRuleBacktrackFailure()>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.  The 'rule' argument was the
- *  target rule name, but now is type Rule, whose toString is
- *  same: the rule name.  Now though you can access full rule
- *  descriptor stuff.
- *
- * GMS: Note:  do not use post-decrement operator!  ASC produces bad code for exceptions in this case.
- *      See: https://bugs.adobe.com/jira/browse/ASC-3625
- */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-pushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
-<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
-state._fsp = state._fsp - 1;
-<checkRuleBacktrackFailure()>
->>
-
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** A lexer rule reference.
- *
- *  The 'rule' argument was the target rule name, but now
- *  is type Rule, whose toString is same: the rule name.
- *  Now though you can access full rule descriptor stuff.
- */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
-<if(label)>
-var <label>Start<elementIndex>:int = charIndex;
-<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<label> = CommonToken.createFromStream(input, TokenConstants.INVALID_TOKEN_TYPE, TokenConstants.DEFAULT_CHANNEL, <label>Start<elementIndex>, charIndex-1);
-<else>
-<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-var <label>Start<elementIndex>:int = charIndex;
-match(EOF); <checkRuleBacktrackFailure()>
-var <label>:<labelType> = CommonToken.createFromStream(input, EOF, TokenConstants.DEFAULT_CHANNEL, <label>Start<elementIndex>, charIndex-1);
-<else>
-match(EOF); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( input.LA(1)==TokenConstants.DOWN ) {
-    matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
-    <children:element()>
-    matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
-}
-<else>
-matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
-<children:element()>
-matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if ( !(<evalPredicate(...)>) ) {
-    <ruleBacktrackFailure()>
-    throw new FailedPredicateException(input, "<ruleName>", "<description>");
-}
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-var LA<decisionNumber>_<stateNumber>:int = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
-else {
-<if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    throw new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
-    <! Need to add hook for noViableAltException() !>
-<endif>
-}
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-var LA<decisionNumber>_<stateNumber>:int = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-var LA<decisionNumber>_<stateNumber>:int = input.LA(<k>);<\n>
-<edges; separator="\nelse "><\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
-<else>
-else {
-    alt<decisionNumber>=<eotPredictsAlt>;
-}<\n>
-<endif>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
-    <targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
-<edges; separator="\n">
-default:
-<if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    throw new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
-    <! Need to add hook for noViableAltException !>
-<endif>
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
-    <edges; separator="\n">
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
-<edges; separator="\n"><\n>
-<if(eotPredictsAlt)>
-default:
-    alt<decisionNumber>=<eotPredictsAlt>;
-    break;<\n>
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{case <it>:}; separator="\n">
-    {
-    <targetState>
-    }
-    break;
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-alt<decisionNumber> = dfa<decisionNumber>.predict(input);
->>
-
-cyclicDFACtor(dfa) ::= <<
-
-dfa<dfa.decisionNumber> = new DFA(this, <dfa.decisionNumber>,
-            "<dfa.description>",
-            DFA<dfa.decisionNumber>_eot, DFA<dfa.decisionNumber>_eof, DFA<dfa.decisionNumber>_min,
-            DFA<dfa.decisionNumber>_max, DFA<dfa.decisionNumber>_accept, DFA<dfa.decisionNumber>_special,
-            DFA<dfa.decisionNumber>_transition<if(dfa.specialStateSTs)>, DFA<dfa.decisionNumber>_specialStateTransition<endif>);
-
->>
-/* Dump DFA tables as run-length-encoded Strings of octal values.
- * Can't use hex as compiler translates them before compilation.
- * These strings are split into multiple, concatenated strings.
- * Java puts them back together at compile time thankfully.
- * Java cannot handle large static arrays, so we're stuck with this
- * encode/decode approach.  See analysis and runtime DFA for
- * the encoding methods.
- */
-cyclicDFA(dfa) ::= <<
-
-private const DFA<dfa.decisionNumber>_eot:Array =
-    DFA.unpackEncodedString("<dfa.javaCompressedEOT; wrap="\"+\n    \"">");
-private const DFA<dfa.decisionNumber>_eof:Array =
-    DFA.unpackEncodedString("<dfa.javaCompressedEOF; wrap="\"+\n    \"">");
-private const DFA<dfa.decisionNumber>_min:Array =
-    DFA.unpackEncodedString("<dfa.javaCompressedMin; wrap="\"+\n    \"">", true);
-private const DFA<dfa.decisionNumber>_max:Array =
-    DFA.unpackEncodedString("<dfa.javaCompressedMax; wrap="\"+\n    \"">", true);
-private const DFA<dfa.decisionNumber>_accept:Array =
-    DFA.unpackEncodedString("<dfa.javaCompressedAccept; wrap="\"+\n    \"">");
-private const DFA<dfa.decisionNumber>_special:Array =
-    DFA.unpackEncodedString("<dfa.javaCompressedSpecial; wrap="\"+\n    \"">");
-private const DFA<dfa.decisionNumber>_transition:Array = [
-        <dfa.javaCompressedTransition:{s|DFA.unpackEncodedString("<s; wrap="\"+\n\"">")}; separator=",\n">
-];
-<if(dfa.specialStateSTs)>
-    private function DFA<dfa.decisionNumber>_specialStateTransition(dfa:DFA, s:int, _input:IntStream):int {
-        <if(LEXER)>
-        var input:IntStream = _input;
-        <endif>
-        <if(PARSER)>
-        var input:TokenStream = TokenStream(_input);
-        <endif>
-        <if(TREE_PARSER)>
-        var input:TreeNodeStream = TreeNodeStream(_input);
-        <endif>
-    	var _s:int = s;
-        switch ( s ) {
-        <dfa.specialStateSTs:{state |
-        case <i0> : <! compressed special state numbers 0..n-1 !>
-            <state>}; separator="\n">
-        }
-<if(backtracking)>
-        if (this.state.backtracking>0) {this.state.failed=true; return -1;}<\n>
-<endif>
-        throw dfa.error(new NoViableAltException(dfa.description, <dfa.decisionNumber>, _s, input));
-    }<\n>
-<endif>
-
-protected var dfa<dfa.decisionNumber>:DFA;  // initialized in constructor
-
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-var LA<decisionNumber>_<stateNumber>:int = input.LA(1);<\n>
-<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-var index<decisionNumber>_<stateNumber>:int = input.index;
-input.rewind();<\n>
-<endif>
-s = -1;
-<edges; separator="\nelse ">
-<if(semPredState)> <! return input cursor to state before we rewound !>
-input.seek(index<decisionNumber>_<stateNumber>);<\n>
-<endif>
-if ( s>=0 ) return s;
-break;
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-s = <targetStateNumber>;<\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "(<left>&&<right>)"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
-
-notPredicate(pred) ::= "!(<evalPredicate(...)>)"
-
-evalPredicate(pred,description) ::= "(<pred>)"
-
-evalSynPredicate(pred,description) ::= "<pred>()"
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atomAsInt>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atomAsInt>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
-(LA<decisionNumber>_<stateNumber>\>=<lowerAsInt> && LA<decisionNumber>_<stateNumber>\<=<upperAsInt>)
->>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)\>=<lowerAsInt> && input.LA(<k>)\<=<upperAsInt>)"
-
-setTest(ranges) ::= "<ranges; separator=\"||\">"
-
-// A T T R I B U T E S
-
-globalAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected var <scope.name>_stack:Array = new Array();<\n>
-<endif>
->>
-
-ruleAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected var <scope.name>_stack:Array = new Array();<\n>
-<endif>
->>
-
-returnStructName() ::= "<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope"
-
-returnType() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<returnStructName()>
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-/** Generate the Java type associated with a single or multiple return
- *  values.
- */
-ruleLabelType(referencedRule) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-<returnStructName()>
-<else>
-<if(referencedRule.hasSingleReturnValue)>
-<referencedRule.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-delegateName() ::= <<
-<if(it.label)><it.label><else>g<it.name><endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
- */
-initValue(typeName) ::= <<
-<asTypeInitMap.(typeName)>
->>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= <<
-var <label.label.text>:<ruleLabelType(referencedRule=label.referencedRule)> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
->>
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScope(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-public static class <returnType()> extends <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope {
-    <scope.attributes:{public <it.decl>;}; separator="\n">
-    <@ruleReturnMembers()>
-};
-<endif>
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{<it.name>:<it.type>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <<
- <if(negIndex)>
- <scope>_stack[<scope>_stack.length-<negIndex>-1].<attr.name>
- <else>
- <if(index)>
- <scope>_stack[<index>].<attr.name>
- <else>
- <scope>_stack[<scope>_stack.length-1].<attr.name>
- <endif>
- <endif>
->>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
-<if(negIndex)>
-<scope>_stack[<scope>_stack.length-<negIndex>-1].<attr.name> =<expr>;
-<else>
-<if(index)>
-<scope>_stack[<index>].<attr.name> =<expr>;
-<else>
-<scope>_stack[<scope>_stack.length-1].<attr.name> =<expr>;
-<endif>
-<endif>
->>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-(<scope>!=null?<scope>.values.<attr.name>:<initValue(attr.type)>)
-<else>
-<scope>
-<endif>
->>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.values.<attr.name>
-<else>
-<attr.name>
-<endif>
->>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.values.<attr.name> =<expr>;
-<else>
-<attr.name> =<expr>;
-<endif>
->>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach
-
-tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=null?<scope>.text:null)"
-tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=null?<scope>.type:0)"
-tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=null?<scope>.line:0)"
-tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=null?<scope>.charPositionInLine:0)"
-tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=null?<scope>.channel:0)"
-tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=null?<scope>.tokenIndex:0)"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?int(<scope>.text):0)"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=null?<labelType>(<scope>.start):null)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=null?<labelType>(<scope>.stop):null)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=null?<ASTLabelType>(<scope>.tree):null)"
-ruleLabelPropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-(<scope>!=null?(input.tokenStream.toStringWithRange(
-  input.treeAdaptor.getTokenStartIndex(<scope>.start),
-  input.treeAdaptor.getTokenStopIndex(<scope>.start))):null)
-<else>
-(<scope>!=null?input.toStringWithTokenRange(<scope>.start,<scope>.stop):null)
-<endif>
->>
-
-ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=null?<scope>.st:null)"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::=
-    "(<scope>!=null?<scope>.type:0)"
-lexerRuleLabelPropertyRef_line(scope,attr) ::=
-    "(<scope>!=null?<scope>.lien:0)"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::=
-    "(<scope>!=null?<scope>.charPositionInLine:0)"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::=
-    "(<scope>!=null?<scope>.channel:0)"
-lexerRuleLabelPropertyRef_index(scope,attr) ::=
-    "(<scope>!=null?<scope>.tokenIndex:0)"
-lexerRuleLabelPropertyRef_text(scope,attr) ::=
-    "(<scope>!=null?<scope>.text:null)"
-lexerRuleLabelPropertyRef_int(scope,attr) ::=
-    "(<scope>!=null?int(<scope>.text):0)"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "<labelType>(retval.start)"
-rulePropertyRef_stop(scope,attr) ::= "<labelType>(retval.stop)"
-rulePropertyRef_tree(scope,attr) ::= "<ASTLabelType>(retval.tree)"
-rulePropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-input.tokenStream.toStringWithRange(
-  input.treeAdaptor.getTokenStartIndex(retval.start),
-  input.treeAdaptor.getTokenStopIndex(retval.start))
-<else>
-input.toStringWithTokenRange(retval.start,input.LT(-1))
-<endif>
->>
-rulePropertyRef_st(scope,attr) ::= "retval.st"
-
-lexerRulePropertyRef_text(scope,attr) ::= "text"
-lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
-lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(charIndex-1)"
-lexerRulePropertyRef_int(scope,attr) ::= "int(<scope>.text)"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
-
-/** How to execute an action (only when not backtracking) */
-execAction(action) ::= <<
-<if(backtracking)>
-if ( <actions.(actionScope).synpredgate> ) {
-  <action>
-}
-<else>
-<action>
-<endif>
->>
-
-/** How to always execute an action even when backtracking */
-execForcedAction(action) ::= "<action>"
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-public static const <name>:BitSet = new BitSet([<words64:{<it>};separator=", ">]);<\n>
->>
-
-codeFileExtension() ::= ".as"
-
-true() ::= "true"
-false() ::= "false"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/C.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/C.stg
deleted file mode 100644
index eb3a7b6..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/C.stg
+++ /dev/null
@@ -1,3256 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
- http://www.temporal-wave.com
- http://www.linkedin.com/in/jimidle
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/*
- * This code generating template and the associated C runtime was produced by:
- * Jim Idle jimi|hereisanat|idle|dotgoeshere|ws.
- * If it causes the destruction of the Universe, it will be pretty cool so long as
- * I am in a different one at the time.
- */
-cTypeInitMap ::= [
-	"int"		    : "0",              // Integers     start out being 0
-	"long"		    : "0",              // Longs        start out being 0
-	"float"		    : "0.0",           // Floats       start out being 0
-	"double"	    : "0.0",           // Doubles      start out being 0
-	"ANTLR3_BOOLEAN"    : "ANTLR3_FALSE",   // Booleans     start out being Antlr C for false
-	"byte"		    : "0",              // Bytes        start out being 0
-	"short"		    : "0",              // Shorts       start out being 0
-	"char"		    : "0"              // Chars        start out being 0
-]
-
-leadIn(type) ::=
-<<
-/** \file
- *  This <type> file was generated by $ANTLR version <ANTLRVersion>
- *
- *     -  From the grammar source file : <fileName>
- *     -                            On : <generatedTimestamp>
-<if(LEXER)>
- *     -                 for the lexer : <name>Lexer
-<endif>
-<if(PARSER)>
- *     -                for the parser : <name>Parser
-<endif>
-<if(TREE_PARSER)>
- *     -           for the tree parser : <name>TreeParser
-<endif>
- *
- * Editing it, at least manually, is not wise.
- *
- * C language generator and runtime by Jim Idle, jimi|hereisanat|idle|dotgoeshere|ws.
- *
- *
->>
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile( LEXER,
-            PARSER,
-            TREE_PARSER,
-            actionScope,
-            actions,
-            docComment,
-            recognizer,
-            name,
-            tokens,
-            tokenNames,
-            rules,
-            cyclicDFAs,
-            bitsets,
-            buildTemplate,
-            buildAST,
-            rewriteMode,
-            profile,
-            backtracking,
-            synpreds,
-            memoize,
-            numRules,
-            fileName,
-            ANTLRVersion,
-            generatedTimestamp,
-            trace,
-            scopes,
-            superClass,
-            literals
-            ) ::=
-<<
-<leadIn("C source")>
-*/
-// [The "BSD license"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-<if(actions.(actionScope).header)>
-
-/* =============================================================================
- * This is what the grammar programmer asked us to put at the top of every file.
- */
-<actions.(actionScope).header>
-/* End of Header action.
- * =============================================================================
- */
-<endif>
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#include    "<name>.h"
-<actions.(actionScope).postinclude>
-/* ----------------------------------------- */
-
-<docComment>
-
-<if(literals)>
-/** String literals used by <name> that we must do things like MATCHS() with.
- *  C will normally just lay down 8 bit characters, and you can use L"xxx" to
- *  get wchar_t, but wchar_t is 16 bits on Windows, which is not UTF32 and so
- *  we perform this little trick of defining the literals as arrays of UINT32
- *  and passing in the address of these.
- */
-<literals:{it | static ANTLR3_UCHAR	lit_<i>[]  = <it>;}; separator="\n">
-
-<endif>
-
-
-
-
-/* MACROS that hide the C interface implementations from the
- * generated code, which makes it a little more understandable to the human eye.
- * I am very much against using C pre-processor macros for function calls and bits
- * of code as you cannot see what is happening when single stepping in debuggers
- * and so on. The exception (in my book at least) is for generated code, where you are
- * not maintaining it, but may wish to read and understand it. If you single step it, you know that input()
- * hides some indirect calls, but is always referring to the input stream. This is
- * probably more readable than ctx->input->istream->input(snarfle0->blarg) and allows me to rejig
- * the runtime interfaces without changing the generated code too often, without
- * confusing the reader of the generated output, who may not wish to know the gory
- * details of the interface inheritance.
- */
-
-#define		CTX	ctx
-
-/* Aids in accessing scopes for grammar programmers
- */
-#undef	SCOPE_TYPE
-#undef	SCOPE_STACK
-#undef	SCOPE_TOP
-#define	SCOPE_TYPE(scope)   p<name>_##scope##_SCOPE
-#define SCOPE_STACK(scope)  p<name>_##scope##Stack
-#define	SCOPE_TOP(scope)    ctx->p<name>_##scope##Top
-#define	SCOPE_SIZE(scope)		ctx->p<name>_##scope##Stack_limit
-#define SCOPE_INSTANCE(scope, i)	(ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope),i))
-
-<if(LEXER)>
-
-/* Macros for accessing things in a lexer
- */
-#undef	    LEXER
-#undef	    RECOGNIZER
-#undef	    RULEMEMO
-#undef	    GETCHARINDEX
-#undef	    GETLINE
-#undef	    GETCHARPOSITIONINLINE
-#undef	    EMIT
-#undef	    EMITNEW
-#undef	    MATCHC
-#undef	    MATCHS
-#undef	    MATCHRANGE
-#undef	    LTOKEN
-#undef	    HASFAILED
-#undef	    FAILEDFLAG
-#undef	    INPUT
-#undef	    STRSTREAM
-#undef	    LA
-#undef	    HASEXCEPTION
-#undef	    EXCEPTION
-#undef	    CONSTRUCTEX
-#undef	    CONSUME
-#undef	    LRECOVER
-#undef	    MARK
-#undef	    REWIND
-#undef	    REWINDLAST
-#undef	    BACKTRACKING
-#undef		MATCHANY
-#undef		MEMOIZE
-#undef		HAVEPARSEDRULE
-#undef		GETTEXT
-#undef		INDEX
-#undef		SEEK
-#undef		PUSHSTREAM
-#undef		POPSTREAM
-#undef		SETTEXT
-#undef		SETTEXT8
-
-#define	    LEXER					ctx->pLexer
-#define	    RECOGNIZER			    LEXER->rec
-#define		LEXSTATE				RECOGNIZER->state
-#define		TOKSOURCE				LEXSTATE->tokSource
-#define	    GETCHARINDEX()			LEXER->getCharIndex(LEXER)
-#define	    GETLINE()				LEXER->getLine(LEXER)
-#define	    GETTEXT()				LEXER->getText(LEXER)
-#define	    GETCHARPOSITIONINLINE() LEXER->getCharPositionInLine(LEXER)
-#define	    EMIT()					LEXSTATE->type = _type; LEXER->emit(LEXER)
-#define	    EMITNEW(t)				LEXER->emitNew(LEXER, t)
-#define	    MATCHC(c)				LEXER->matchc(LEXER, c)
-#define	    MATCHS(s)				LEXER->matchs(LEXER, s)
-#define	    MATCHRANGE(c1,c2)	    LEXER->matchRange(LEXER, c1, c2)
-#define	    MATCHANY()				LEXER->matchAny(LEXER)
-#define	    LTOKEN  				LEXSTATE->token
-#define	    HASFAILED()				(LEXSTATE->failed == ANTLR3_TRUE)
-#define	    BACKTRACKING			LEXSTATE->backtracking
-#define	    FAILEDFLAG				LEXSTATE->failed
-#define	    INPUT					LEXER->input
-#define	    STRSTREAM				INPUT
-#define		ISTREAM					INPUT->istream
-#define		INDEX()					ISTREAM->index(ISTREAM)
-#define		SEEK(n)					ISTREAM->seek(ISTREAM, n)
-#define	    EOF_TOKEN				&(LEXSTATE->tokSource->eofToken)
-#define	    HASEXCEPTION()			(LEXSTATE->error == ANTLR3_TRUE)
-#define	    EXCEPTION				LEXSTATE->exception
-#define	    CONSTRUCTEX()			RECOGNIZER->exConstruct(RECOGNIZER)
-#define	    LRECOVER()				LEXER->recover(LEXER)
-#define	    MARK()					ISTREAM->mark(ISTREAM)
-#define	    REWIND(m)				ISTREAM->rewind(ISTREAM, m)
-#define	    REWINDLAST()			ISTREAM->rewindLast(ISTREAM)
-#define		MEMOIZE(ri,si)			RECOGNIZER->memoize(RECOGNIZER, ri, si)
-#define		HAVEPARSEDRULE(r)		RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
-#define		PUSHSTREAM(str)			LEXER->pushCharStream(LEXER, str)
-#define		POPSTREAM()				LEXER->popCharStream(LEXER)
-#define		SETTEXT(str)			LEXSTATE->text = str
-#define		SKIP()					LEXSTATE->token = &(TOKSOURCE->skipToken)
-#define		USER1					LEXSTATE->user1
-#define		USER2					LEXSTATE->user2
-#define		USER3					LEXSTATE->user3
-#define		CUSTOM					LEXSTATE->custom
-#define		RULEMEMO				LEXSTATE->ruleMemo
-#define		DBG						RECOGNIZER->debugger
-
-/* If we have been told we can rely on the standard 8 bit or UTF16 input
- * stream, then we can define our macros to use the direct pointers
- * in the input object, which is much faster than indirect calls. This
- * is really only significant to lexers with a lot of fragment rules (which
- * do not place LA(1) in a temporary at the moment) and even then
- * only if there is a lot of input (order of say 1M or so).
- */
-#if	defined(ANTLR3_INLINE_INPUT_8BIT) || defined(ANTLR3_INLINE_INPUT_UTF16)
-
-# ifdef	ANTLR3_INLINE_INPUT_8BIT
-
-/* 8 bit character set */
-
-#  define	    NEXTCHAR	((pANTLR3_UINT8)(INPUT->nextChar))
-#  define	    DATAP	((pANTLR3_UINT8)(INPUT->data))
-
-# else
-
-#  define	    NEXTCHAR	((pANTLR3_UINT16)(INPUT->nextChar))
-#  define	    DATAP	((pANTLR3_UINT16)(INPUT->data))
-
-# endif
-
-# define	    LA(n) ((NEXTCHAR + n) > (DATAP + INPUT->sizeBuf) ? ANTLR3_CHARSTREAM_EOF : (ANTLR3_UCHAR)(*(NEXTCHAR + n - 1)))
-# define            CONSUME()                                           \\
-{                                                                       \\
-    if        (NEXTCHAR \< (DATAP + INPUT->sizeBuf))                     \\
-    {                                                                   \\
-        INPUT->charPositionInLine++;                                    \\
-        if  ((ANTLR3_UCHAR)(*NEXTCHAR) == INPUT->newlineChar)           \\
-        {                                                               \\
-            INPUT->line++;                                              \\
-            INPUT->charPositionInLine        = 0;                       \\
-            INPUT->currentLine                = (void *)(NEXTCHAR + 1); \\
-        }                                                               \\
-        INPUT->nextChar = (void *)(NEXTCHAR + 1);                       \\
-    }                                                                   \\
-}
-
-#else
-
-// Pick up the input character by calling the input stream implementation.
-//
-#define	    CONSUME()   INPUT->istream->consume(INPUT->istream)
-#define	    LA(n)       INPUT->istream->_LA(INPUT->istream, n)
-
-#endif
-<endif>
-
-<if(PARSER)>
-/* Macros for accessing things in the parser
- */
-
-#undef	    PARSER
-#undef	    RECOGNIZER
-#undef	    HAVEPARSEDRULE
-#undef		MEMOIZE
-#undef	    INPUT
-#undef	    STRSTREAM
-#undef	    HASEXCEPTION
-#undef	    EXCEPTION
-#undef	    MATCHT
-#undef	    MATCHANYT
-#undef	    FOLLOWSTACK
-#undef	    FOLLOWPUSH
-#undef	    FOLLOWPOP
-#undef	    PRECOVER
-#undef	    PREPORTERROR
-#undef	    LA
-#undef	    LT
-#undef	    CONSTRUCTEX
-#undef	    CONSUME
-#undef	    MARK
-#undef	    REWIND
-#undef	    REWINDLAST
-#undef	    PERRORRECOVERY
-#undef	    HASFAILED
-#undef	    FAILEDFLAG
-#undef	    RECOVERFROMMISMATCHEDSET
-#undef	    RECOVERFROMMISMATCHEDELEMENT
-#undef		INDEX
-#undef      ADAPTOR
-#undef		SEEK
-#undef	    RULEMEMO
-#undef		DBG
-
-#define	    PARSER				ctx->pParser
-#define	    RECOGNIZER				PARSER->rec
-#define	    PSRSTATE				RECOGNIZER->state
-#define	    HAVEPARSEDRULE(r)			RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
-#define	    MEMOIZE(ri,si)			RECOGNIZER->memoize(RECOGNIZER, ri, si)
-#define	    INPUT				PARSER->tstream
-#define	    STRSTREAM				INPUT
-#define	    ISTREAM				INPUT->istream
-#define	    INDEX()				ISTREAM->index(INPUT->istream)
-#define	    HASEXCEPTION()			(PSRSTATE->error == ANTLR3_TRUE)
-#define	    EXCEPTION				PSRSTATE->exception
-#define	    MATCHT(t, fs)			RECOGNIZER->match(RECOGNIZER, t, fs)
-#define	    MATCHANYT()				RECOGNIZER->matchAny(RECOGNIZER)
-#define	    FOLLOWSTACK				PSRSTATE->following
-#ifdef  SKIP_FOLLOW_SETS
-#define	    FOLLOWPUSH(x)
-#define	    FOLLOWPOP()
-#else
-#define	    FOLLOWPUSH(x)			FOLLOWSTACK->push(FOLLOWSTACK, ((void *)(&(x))), NULL)
-#define	    FOLLOWPOP()				FOLLOWSTACK->pop(FOLLOWSTACK)
-#endif
-#define	    PRECOVER()				RECOGNIZER->recover(RECOGNIZER)
-#define	    PREPORTERROR()			RECOGNIZER->reportError(RECOGNIZER)
-#define	    LA(n)				INPUT->istream->_LA(ISTREAM, n)
-#define	    LT(n)				INPUT->_LT(INPUT, n)
-#define	    CONSTRUCTEX()			RECOGNIZER->exConstruct(RECOGNIZER)
-#define	    CONSUME()				ISTREAM->consume(ISTREAM)
-#define	    MARK()				ISTREAM->mark(ISTREAM)
-#define	    REWIND(m)				ISTREAM->rewind(ISTREAM, m)
-#define	    REWINDLAST()			ISTREAM->rewindLast(ISTREAM)
-#define	    SEEK(n)				ISTREAM->seek(ISTREAM, n)
-#define	    PERRORRECOVERY			PSRSTATE->errorRecovery
-#define	    FAILEDFLAG				PSRSTATE->failed
-#define	    HASFAILED()				(FAILEDFLAG == ANTLR3_TRUE)
-#define	    BACKTRACKING			PSRSTATE->backtracking
-#define	    RECOVERFROMMISMATCHEDSET(s)		RECOGNIZER->recoverFromMismatchedSet(RECOGNIZER, s)
-#define	    RECOVERFROMMISMATCHEDELEMENT(e)	RECOGNIZER->recoverFromMismatchedElement(RECOGNIZER, s)
-#define     ADAPTOR                         ctx->adaptor
-#define		RULEMEMO						PSRSTATE->ruleMemo
-#define		DBG								RECOGNIZER->debugger
-
-<endif>
-
-<if(TREE_PARSER)>
-/* Macros for accessing things in the parser
- */
-
-#undef	    PARSER
-#undef	    RECOGNIZER
-#undef	    HAVEPARSEDRULE
-#undef	    INPUT
-#undef	    STRSTREAM
-#undef	    HASEXCEPTION
-#undef	    EXCEPTION
-#undef	    MATCHT
-#undef	    MATCHANYT
-#undef	    FOLLOWSTACK
-#undef	    FOLLOWPUSH
-#undef	    FOLLOWPOP
-#undef	    PRECOVER
-#undef	    PREPORTERROR
-#undef	    LA
-#undef	    LT
-#undef	    CONSTRUCTEX
-#undef	    CONSUME
-#undef	    MARK
-#undef	    REWIND
-#undef	    REWINDLAST
-#undef	    PERRORRECOVERY
-#undef	    HASFAILED
-#undef	    FAILEDFLAG
-#undef	    RECOVERFROMMISMATCHEDSET
-#undef	    RECOVERFROMMISMATCHEDELEMENT
-#undef	    BACKTRACKING
-#undef      ADAPTOR
-#undef	    RULEMEMO
-#undef		SEEK
-#undef		INDEX
-#undef		DBG
-
-#define	    PARSER							ctx->pTreeParser
-#define	    RECOGNIZER						PARSER->rec
-#define		PSRSTATE						RECOGNIZER->state
-#define	    HAVEPARSEDRULE(r)				RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
-#define	    INPUT							PARSER->ctnstream
-#define		ISTREAM							INPUT->tnstream->istream
-#define	    STRSTREAM						INPUT->tnstream
-#define	    HASEXCEPTION()					(PSRSTATE->error == ANTLR3_TRUE)
-#define	    EXCEPTION						PSRSTATE->exception
-#define	    MATCHT(t, fs)					RECOGNIZER->match(RECOGNIZER, t, fs)
-#define	    MATCHANYT()						RECOGNIZER->matchAny(RECOGNIZER)
-#define	    FOLLOWSTACK					    PSRSTATE->following
-#define	    FOLLOWPUSH(x)					FOLLOWSTACK->push(FOLLOWSTACK, ((void *)(&(x))), NULL)
-#define	    FOLLOWPOP()						FOLLOWSTACK->pop(FOLLOWSTACK)
-#define	    PRECOVER()						RECOGNIZER->recover(RECOGNIZER)
-#define	    PREPORTERROR()					RECOGNIZER->reportError(RECOGNIZER)
-#define	    LA(n)							ISTREAM->_LA(ISTREAM, n)
-#define	    LT(n)							INPUT->tnstream->_LT(INPUT->tnstream, n)
-#define	    CONSTRUCTEX()					RECOGNIZER->exConstruct(RECOGNIZER)
-#define	    CONSUME()						ISTREAM->consume(ISTREAM)
-#define	    MARK()							ISTREAM->mark(ISTREAM)
-#define	    REWIND(m)						ISTREAM->rewind(ISTREAM, m)
-#define	    REWINDLAST()					ISTREAM->rewindLast(ISTREAM)
-#define	    PERRORRECOVERY					PSRSTATE->errorRecovery
-#define	    FAILEDFLAG						PSRSTATE->failed
-#define	    HASFAILED()						(FAILEDFLAG == ANTLR3_TRUE)
-#define	    BACKTRACKING					PSRSTATE->backtracking
-#define	    RECOVERFROMMISMATCHEDSET(s)		RECOGNIZER->recoverFromMismatchedSet(RECOGNIZER, s)
-#define	    RECOVERFROMMISMATCHEDELEMENT(e)	RECOGNIZER->recoverFromMismatchedElement(RECOGNIZER, s)
-#define     ADAPTOR                         INPUT->adaptor
-#define		RULEMEMO						PSRSTATE->ruleMemo
-#define		SEEK(n)							ISTREAM->seek(ISTREAM, n)
-#define		INDEX()							ISTREAM->index(ISTREAM)
-#define		DBG								RECOGNIZER->debugger
-
-
-<endif>
-
-#define		TOKTEXT(tok, txt)				tok, (pANTLR3_UINT8)txt
-
-/* The 4 tokens defined below may well clash with your own #defines or token types. If so
- * then for the present you must use different names for your defines as these are hard coded
- * in the code generator. It would be better not to use such names internally, and maybe
- * we can change this in a forthcoming release. I deliberately do not #undef these
- * here as this will at least give you a redefined error somewhere if they clash.
- */
-#define	    UP	    ANTLR3_TOKEN_UP
-#define	    DOWN    ANTLR3_TOKEN_DOWN
-#define	    EOR	    ANTLR3_TOKEN_EOR
-#define	    INVALID ANTLR3_TOKEN_INVALID
-
-
-/* =============================================================================
- * Functions to create and destroy scopes. First come the rule scopes, followed
- * by the global declared scopes.
- */
-
-<rules: {r |<if(r.ruleDescriptor.ruleScope)>
-<ruleAttributeScopeFuncDecl(scope=r.ruleDescriptor.ruleScope)>
-<ruleAttributeScopeFuncs(scope=r.ruleDescriptor.ruleScope)>
-<endif>}>
-
-<recognizer.scopes:{it | <if(it.isDynamicGlobalScope)>
-<globalAttributeScopeFuncDecl(it)>
-<globalAttributeScopeFuncs(it)>
-<endif>}>
-
-/* ============================================================================= */
-
-/* =============================================================================
- * Start of recognizer
- */
-
-<recognizer>
-
-/* End of code
- * =============================================================================
- */
-
->>
-headerFileExtension() ::= ".h"
-
-headerFile( LEXER,
-            PARSER,
-            TREE_PARSER,
-            actionScope,
-            actions,
-            docComment,
-            recognizer,
-            name,
-            tokens,
-            tokenNames,
-            rules,
-            cyclicDFAs,
-            bitsets,
-            buildTemplate,
-            buildAST,
-            rewriteMode,
-            profile,
-            backtracking,
-            synpreds,
-            memoize,
-            numRules,
-            fileName,
-            ANTLRVersion,
-            generatedTimestamp,
-            trace,
-            scopes,
-			superClass,
-            literals
-        ) ::=
-<<
-<leadIn("C header")>
-<if(PARSER)>
- * The parser <mainName()>
-<endif>
-<if(LEXER)>
- * The lexer <mainName()>
-<endif>
-<if(TREE_PARSER)>
- * The tree parser <mainName()>
-<endif>
-has the callable functions (rules) shown below,
- * which will invoke the code for the associated rule in the source grammar
- * assuming that the input stream is pointing to a token/text stream that could begin
- * this rule.
- *
- * For instance if you call the first (topmost) rule in a parser grammar, you will
- * get the results of a full parse, but calling a rule half way through the grammar will
- * allow you to pass part of a full token stream to the parser, such as for syntax checking
- * in editors and so on.
- *
- * The parser entry points are called indirectly (by function pointer to function) via
- * a parser context typedef p<name>, which is returned from a call to <name>New().
- *
-<if(LEXER)>
- * As this is a generated lexer, it is unlikely you will call it 'manually'. However
- * the methods are provided anyway.
- *
-<endif>
- * The methods in p<name> are  as follows:
- *
- * <rules:{r | <if(!r.ruleDescriptor.isSynPred)> - <headerReturnType(ruleDescriptor=r.ruleDescriptor,...)>      p<name>-><r.ruleDescriptor.name>(p<name>)<endif>}; separator="\n * ">
- *
- * The return type for any particular rule is of course determined by the source
- * grammar file.
- */
-// [The "BSD license"]
-// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
-// http://www.temporal-wave.com
-// http://www.linkedin.com/in/jimidle
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-//    notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-//    notice, this list of conditions and the following disclaimer in the
-//    documentation and/or other materials provided with the distribution.
-// 3. The name of the author may not be used to endorse or promote products
-//    derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef	_<name>_H
-#define _<name>_H
-<actions.(actionScope).preincludes>
-/* =============================================================================
- * Standard antlr3 C runtime definitions
- */
-#include    \<antlr3.h>
-
-/* End of standard antlr 3 runtime definitions
- * =============================================================================
- */
-<actions.(actionScope).includes>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Forward declare the context typedef so that we can use it before it is
-// properly defined. Delegators and delegates (from import statements) are
-// interdependent and their context structures contain pointers to each other
-// C only allows such things to be declared if you pre-declare the typedef.
-//
-typedef struct <name>_Ctx_struct <name>, * p<name>;
-
-<if(recognizer.grammar.delegates)>
-// Include delegate definition header files
-//
-<recognizer.grammar.delegates: {g|#include	\<<g.recognizerName>.h>}; separator="\n">
-
-<endif>
-
-
-<actions.(actionScope).header>
-
-#ifdef	ANTLR3_WINDOWS
-// Disable: Unreferenced parameter,							- Rules with parameters that are not used
-//          constant conditional,							- ANTLR realizes that a prediction is always true (synpred usually)
-//          initialized but unused variable					- tree rewrite variables declared but not needed
-//          Unreferenced local variable						- lexer rule declares but does not always use _type
-//          potentially unitialized variable used			- retval always returned from a rule
-//			unreferenced local function has been removed	- susually getTokenNames or freeScope, they can go without warnigns
-//
-// These are only really displayed at warning level /W4 but that is the code ideal I am aiming at
-// and the codegen must generate some of these warnings by necessity, apart from 4100, which is
-// usually generated when a parser rule is given a parameter that it does not use. Mostly though
-// this is a matter of orthogonality hence I disable that one.
-//
-#pragma warning( disable : 4100 )
-#pragma warning( disable : 4101 )
-#pragma warning( disable : 4127 )
-#pragma warning( disable : 4189 )
-#pragma warning( disable : 4505 )
-#pragma warning( disable : 4701 )
-#endif
-<if(backtracking)>
-
-/* ========================
- * BACKTRACKING IS ENABLED
- * ========================
- */
-<endif>
-
-<rules:{r |<headerReturnScope(ruleDescriptor=r.ruleDescriptor,...)>}>
-
-<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeDecl(it)><endif>}>
-<rules:{r |<ruleAttributeScopeDecl(scope=r.ruleDescriptor.ruleScope)>}>
-<if(recognizer.grammar.delegators)>
-// Include delegator definition header files
-//
-<recognizer.grammar.delegators: {g|#include	\<<g.recognizerName>.h>}; separator="\n">
-
-<endif>
-
-/** Context tracking structure for <mainName()>
- */
-struct <name>_Ctx_struct
-{
-    /** Built in ANTLR3 context tracker contains all the generic elements
-     *  required for context tracking.
-     */
-<if(PARSER)>
-    pANTLR3_PARSER   pParser;
-<endif>
-<if(LEXER)>
-    pANTLR3_LEXER    pLexer;
-<endif>
-<if(TREE_PARSER)>
-    pANTLR3_TREE_PARSER	    pTreeParser;
-<endif>
-
-<if(recognizer.grammar.delegates)>
-	<recognizer.grammar.delegates:
-         {g|p<g.recognizerName>	<g:delegateName()>;}; separator="\n">
-<endif>
-<if(recognizer.grammar.delegators)>
-	<recognizer.grammar.delegators:
-         {g|p<g.recognizerName>	<g:delegateName()>;}; separator="\n">
-<endif>
-<scopes:{it | <if(it.isDynamicGlobalScope)>
-    <globalAttributeScopeDef(it)>
-<endif>}; separator="\n\n">
-<rules: {r |<if(r.ruleDescriptor.ruleScope)>
-    <ruleAttributeScopeDef(scope=r.ruleDescriptor.ruleScope)>
-<endif>}>
-
-<if(LEXER)>
-    <rules:{r | <if(!r.ruleDescriptor.isSynPred)><headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*m<r.ruleDescriptor.name>)	(struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);<endif>}; separator="\n">
-<endif>
-<if(!LEXER)>
-    <rules:{r | <headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*<r.ruleDescriptor.name>)	(struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n">
-<! generate rule/method definitions for imported rules so they
-   appear to be defined in this recognizer. !>
-    // Delegated rules
-<recognizer.grammar.delegatedRules:{ruleDescriptor|
-    <headerReturnType(ruleDescriptor)> (*<ruleDescriptor.name>)(struct <name>_Ctx_struct * ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n">
-<endif>
-
-    const char * (*getGrammarFileName)();
-    void            (*reset)  (struct <name>_Ctx_struct * ctx);
-    void	    (*free)   (struct <name>_Ctx_struct * ctx);
-    <@members>
-    <@end>
-    <actions.(actionScope).context>
-};
-
-// Function protoypes for the constructor functions that external translation units
-// such as delegators and delegates may wish to call.
-//
-ANTLR3_API p<name> <name>New         (<inputType()> instream<recognizer.grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>);
-ANTLR3_API p<name> <name>NewSSD      (<inputType()> instream, pANTLR3_RECOGNIZER_SHARED_STATE state<recognizer.grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>);
-<if(!recognizer.grammar.grammarIsRoot)>
-extern pANTLR3_UINT8   <recognizer.grammar.composite.rootGrammar.recognizerName>TokenNames[];
-<endif>
-
-
-/** Symbolic definitions of all the tokens that the <grammarType()> will work with.
- * \{
- *
- * Antlr will define EOF, but we can't use that as it it is too common in
- * in C header files and that would be confusing. There is no way to filter this out at the moment
- * so we just undef it here for now. That isn't the value we get back from C recognizers
- * anyway. We are looking for ANTLR3_TOKEN_EOF.
- */
-#ifdef	EOF
-#undef	EOF
-#endif
-#ifdef	Tokens
-#undef	Tokens
-#endif
-<tokens:{it | #define <it.name>      <it.type>}; separator="\n">
-#ifdef	EOF
-#undef	EOF
-#define	EOF	ANTLR3_TOKEN_EOF
-#endif
-
-#ifndef TOKENSOURCE
-#define TOKENSOURCE(lxr) lxr->pLexer->rec->state->tokSource
-#endif
-
-/* End of token definitions for <name>
- * =============================================================================
- */
-/** \} */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-/* END - Note:Keep extra line feed to satisfy UNIX systems */
-
->>
-
-inputType() ::=<<
-<if(LEXER)>
-pANTLR3_INPUT_STREAM
-<endif>
-<if(PARSER)>
-pANTLR3_COMMON_TOKEN_STREAM
-<endif>
-<if(TREE_PARSER)>
-pANTLR3_COMMON_TREE_NODE_STREAM
-<endif>
->>
-
-grammarType() ::= <<
-<if(PARSER)>
-parser
-<endif>
-<if(LEXER)>
-lexer
-<endif>
-<if(TREE_PARSER)>
-tree parser
-<endif>
->>
-
-mainName() ::= <<
-<if(PARSER)>
-<name>
-<endif>
-<if(LEXER)>
-<name>
-<endif>
-<if(TREE_PARSER)>
-<name>
-<endif>
->>
-
-headerReturnScope(ruleDescriptor) ::= "<returnScope(...)>"
-
-headerReturnType(ruleDescriptor) ::= <<
-<if(LEXER)>
-<if(!ruleDescriptor.isSynPred)>
- void
-<else>
- <returnType()>
-<endif>
-<else>
- <returnType()>
-<endif>
->>
-
-// Produce the lexer output
-//
-lexer(  grammar,
-		name,
-        tokens,
-        scopes,
-        rules,
-        numRules,
-        filterMode,
-        superClass,
-        labelType="pANTLR3_COMMON_TOKEN") ::= <<
-
-<if(filterMode)>
-/* Forward declare implementation function for ANTLR3_TOKEN_SOURCE interface when
- * this is a filter mode lexer.
- */
-static pANTLR3_COMMON_TOKEN <name>NextToken   (pANTLR3_TOKEN_SOURCE toksource);
-
-/* Override the normal MEMOIZE and HAVEALREADYPARSED macros as this is a filtering
- * lexer. In filter mode, the memoizing and backtracking are gated at BACKTRACKING > 1 rather
- * than just BACKTRACKING. In some cases this might generate code akin to:
- *   if (BACKTRACKING) if (BACKTRACKING > 1) memoize.
- * However, I assume that the C compilers/optimizers are smart enough to work this one out
- * these days - Jim
- */
-#undef		MEMOIZE
-#define		MEMOIZE(ri,si)			if (BACKTRACKING>1) { RECOGNIZER->memoize(RECOGNIZER, ri, si) }
-#undef		HAVEPARSEDRULE
-#define		HAVEPARSEDRULE(r)		if (BACKTRACKING>1) { RECOGNIZER->alreadyParsedRule(RECOGNIZER, r) }
-<endif>
-
-/* Forward declare the locally static matching functions we have generated and any predicate functions.
- */
-<rules:{r | static ANTLR3_INLINE <headerReturnType(ruleDescriptor=r.ruleDescriptor)>	<if(!r.ruleDescriptor.isSynPred)>m<endif><r.ruleDescriptor.name>    (p<name> ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n">
-static void	<name>Free(p<name> ctx);
-
-/* =========================================================================
- * Lexer matching rules end.
- * =========================================================================
- */
-
-<scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScope(it)><endif>}>
-
-<actions.lexer.members>
-
-static void
-<name>Free  (p<name> ctx)
-{
-<if(memoize)>
-	if	(RULEMEMO != NULL)
-	{
-		RULEMEMO->free(RULEMEMO);
-		RULEMEMO = NULL;
-	}
-<endif>
-<if(grammar.directDelegates)>
-	// Free the lexers that we delegated to
-	// functions to. NULL the state so we only free it once.
-	//
-	<grammar.directDelegates:
-         {g|ctx-><g:delegateName()>->pLexer->rec->state = NULL;
-         ctx-><g:delegateName()>->free(ctx-><g:delegateName()>);}; separator="\n">
-<endif>
-    LEXER->free(LEXER);
-
-    ANTLR3_FREE(ctx);
-}
-
-static void
-<name>Reset (p<name> ctx)
-{
-    RECOGNIZER->reset(RECOGNIZER);
-}
-
-/** \brief Name of the grammar file that generated this code
- */
-static const char fileName[] = "<fileName>";
-
-/** \brief Return the name of the grammar file that generated this code.
- */
-static const char * getGrammarFileName()
-{
-	return fileName;
-}
-
-<if(filterMode)>
-    <filteringNextToken()>
-<endif>
-
-/** \brief Create a new lexer called <name>
- *
- * \param[in]    instream Pointer to an initialized input stream
- * \return
- *     - Success p<name> initialized for the lex start
- *     - Fail NULL
- */
-ANTLR3_API p<name> <name>New
-(<inputType()> instream<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
-{
-	// See if we can create a new lexer with the standard constructor
-	//
-	return <name>NewSSD(instream, NULL<grammar.delegators:{g|, <g:delegateName()>}>);
-}
-
-/** \brief Create a new lexer called <name>
- *
- * \param[in]    instream Pointer to an initialized input stream
- * \param[state] state Previously created shared recognizer stat
- * \return
- *     - Success p<name> initialized for the lex start
- *     - Fail NULL
- */
-ANTLR3_API p<name> <name>NewSSD
-(pANTLR3_INPUT_STREAM instream, pANTLR3_RECOGNIZER_SHARED_STATE state<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
-{
-    p<name> ctx; // Context structure we will build and return
-
-    ctx = (p<name>) ANTLR3_CALLOC(1, sizeof(<name>));
-
-    if  (ctx == NULL)
-    {
-        // Failed to allocate memory for lexer context
-        return  NULL;
-    }
-
-    /* -------------------------------------------------------------------
-     * Memory for basic structure is allocated, now to fill in
-     * in base ANTLR3 structures. We initialize the function pointers
-     * for the standard ANTLR3 lexer function set, but upon return
-     * from here, the programmer may set the pointers to provide custom
-     * implementations of each function.
-     *
-     * We don't use the macros defined in <name>.h here so you can get a sense
-     * of what goes where.
-     */
-
-    /* Create a base lexer, using the supplied input stream
-     */
-    ctx->pLexer	= antlr3LexerNewStream(ANTLR3_SIZE_HINT, instream, state);
-
-    /* Check that we allocated the memory correctly
-     */
-    if	(ctx->pLexer == NULL)
-    {
-		ANTLR3_FREE(ctx);
-		return  NULL;
-    }
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-    // Create a LIST for recording rule memos.
-    //
-    ctx->pLexer->rec->ruleMemo    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */
-<endif>
-<endif>
-
-    /* Install the implementation of our <name> interface
-     */
-    <rules:{r | <if(!r.ruleDescriptor.isSynPred)>ctx->m<r.ruleDescriptor.name>	= m<r.ruleDescriptor.name>;<endif>}; separator="\n">
-
-    /** When the nextToken() call is made to this lexer's pANTLR3_TOKEN_SOURCE
-     *  it will call mTokens() in this generated code, and will pass it the ctx
-     * pointer of this lexer, not the context of the base lexer, so store that now.
-     */
-    ctx->pLexer->ctx	    = ctx;
-
-    /**Install the token matching function
-     */
-    ctx->pLexer->mTokens = (void (*) (void *))(mTokens);
-
-    ctx->getGrammarFileName	= getGrammarFileName;
-    ctx->free		= <name>Free;
-    ctx->reset          = <name>Reset;
-
-<if(grammar.directDelegates)>
-	// Initialize the lexers that we are going to delegate some
-	// functions to.
-	//
-	<grammar.directDelegates:
-         {g|ctx-><g:delegateName()> = <g.recognizerName>NewSSD(instream, ctx->pLexer->rec->state, ctx<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
-<endif>
-<if(grammar.delegators)>
-	// Install the pointers back to lexers that will delegate us to perform certain functions
-	// for them.
-	//
-	<grammar.delegators:
-         {g|ctx-><g:delegateName()>			= <g:delegateName()>;}; separator="\n">
-<endif>
-<if(filterMode)>
-    /* We have filter mode turned on, so install the filtering nextToken function
-     */
-    ctx->pLexer->rec->state->tokSource->nextToken = <name>NextToken;
-<endif>
-	 <actions.lexer.apifuncs>
-
-    /* Return the newly built lexer to the caller
-     */
-    return  ctx;
-}
-<if(cyclicDFAs)>
-
-/* =========================================================================
- * DFA tables for the lexer
- */
-<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-/* =========================================================================
- * End of DFA tables for the lexer
- */
-<endif>
-
-/* =========================================================================
- * Functions to match the lexer grammar defined tokens from the input stream
- */
-
-<rules; separator="\n\n">
-
-/* =========================================================================
- * Lexer matching rules end.
- * =========================================================================
- */
-<if(synpreds)>
-
-/* =========================================================================
- * Lexer syntactic predicates
- */
-<synpreds:{p | <lexerSynpred(predname=p)>}>
-/* =========================================================================
- * Lexer syntactic predicates end.
- * =========================================================================
- */
-<endif>
-
-/* End of Lexer code
- * ================================================
- * ================================================
- */
-
->>
-
-
-filteringNextToken() ::= <<
-/** An override of the lexer's nextToken() method that backtracks over mTokens() looking
- *  for matches in lexer filterMode.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  BACKTRACKING needs to be set as well.
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at BACKTRACKING==1.
- */
-static pANTLR3_COMMON_TOKEN
-<name>NextToken(pANTLR3_TOKEN_SOURCE toksource)
-{
-    pANTLR3_LEXER   lexer;
-	pANTLR3_RECOGNIZER_SHARED_STATE state;
-
-    lexer   = (pANTLR3_LEXER)(toksource->super);
-    state	= lexer->rec->state;
-
-    /* Get rid of any previous token (token factory takes care of
-     * any deallocation when this token is finally used up.
-     */
-    state		->token	    = NULL;
-    state		->error	    = ANTLR3_FALSE;	    /* Start out without an exception	*/
-    state		->failed    = ANTLR3_FALSE;
-
-    /* Record the start of the token in our input stream.
-     */
-    state->tokenStartCharIndex			= lexer->input->istream->index(lexer->input->istream);
-    state->tokenStartCharPositionInLine	= lexer->input->getCharPositionInLine(lexer->input);
-    state->tokenStartLine				= lexer->input->getLine(lexer->input);
-    state->text							= NULL;
-
-    /* Now call the matching rules and see if we can generate a new token
-     */
-    for	(;;)
-    {
-		if  (lexer->input->istream->_LA(lexer->input->istream, 1) == ANTLR3_CHARSTREAM_EOF)
-		{
-			/* Reached the end of the stream, nothing more to do.
-			 */
-			pANTLR3_COMMON_TOKEN    teof = &(toksource->eofToken);
-
-			teof->setStartIndex (teof, lexer->getCharIndex(lexer));
-			teof->setStopIndex  (teof, lexer->getCharIndex(lexer));
-			teof->setLine		(teof, lexer->getLine(lexer));
-			return  teof;
-		}
-
-		state->token		= NULL;
-		state->error		= ANTLR3_FALSE;	    /* Start out without an exception	*/
-
-		{
-			ANTLR3_MARKER   m;
-
-			m						= lexer->input->istream->mark(lexer->input->istream);
-			state->backtracking		= 1;				/* No exceptions */
-			state->failed			= ANTLR3_FALSE;
-
-			/* Call the generated lexer, see if it can get a new token together.
-			 */
-			lexer->mTokens(lexer->ctx);
-    		state->backtracking	= 0;
-
-    		<! mTokens backtracks with synpred at BACKTRACKING==2
-				and we set the synpredgate to allow actions at level 1. !>
-
-			if	(state->failed == ANTLR3_TRUE)
-			{
-				lexer->input->istream->rewind(lexer->input->istream, m);
-				lexer->input->istream->consume(lexer->input->istream); <! advance one char and try again !>
-			}
-			else
-			{
-				lexer->emit(lexer);					/* Assemble the token and emit it to the stream */
-				return	state->token;
-			}
-		}
-    }
-}
->>
-
-actionGate() ::= "BACKTRACKING==0"
-
-filteringActionGate() ::= "BACKTRACKING==1"
-
-/** How to generate a parser */
-genericParser(  grammar,
-				name,
-                scopes,
-                tokens,
-                tokenNames,
-                rules,
-                numRules,
-                bitsets,
-                inputStreamType,
-                superClass,
-                labelType,
-				members,
-				rewriteElementType, filterMode,
-                ASTLabelType="pANTLR3_BASE_TREE"
-              ) ::= <<
-
-
-<if(grammar.grammarIsRoot)>
-/** \brief Table of all token names in symbolic order, mainly used for
- *         error reporting.
- */
-pANTLR3_UINT8   <name>TokenNames[<length(tokenNames)>+4]
-     = {
-        (pANTLR3_UINT8) "\<invalid>",       /* String to print to indicate an invalid token */
-        (pANTLR3_UINT8) "\<EOR>",
-        (pANTLR3_UINT8) "\<DOWN>",
-        (pANTLR3_UINT8) "\<UP>",
-        <tokenNames:{it |(pANTLR3_UINT8) <it>}; separator=",\n">
-       };
-<endif>
-
-    <@members>
-
-    <@end>
-<rules:{r |<ruleAttributeScopeFuncMacro(scope=r.ruleDescriptor.ruleScope)>}>
-<scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScopeFuncMacro(it)><endif>}>
-
-// Forward declare the locally static matching functions we have generated.
-//
-<rules:{r | static <headerReturnType(ruleDescriptor=r.ruleDescriptor)>	<r.ruleDescriptor.name>    (p<name> ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n">
-static void	<name>Free(p<name> ctx);
-static void     <name>Reset (p<name> ctx);
-
-<if(!LEXER)>
-<! generate rule/method definitions for imported rules so they
-   appear to be defined in this recognizer. !>
-<if(recognizer.grammar.delegatedRules)>
-// Delegated rules
-//
-<recognizer.grammar.delegatedRules:{ruleDescriptor|static <headerReturnType(ruleDescriptor)> <ruleDescriptor.name>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n">
-
-<endif>
-<endif>
-
-/* For use in tree output where we are accumulating rule labels via label += ruleRef
- * we need a function that knows how to free a return scope when the list is destroyed.
- * We cannot just use ANTLR3_FREE because in debug tracking mode, this is a macro.
- */
-static	void ANTLR3_CDECL freeScope(void * scope)
-{
-    ANTLR3_FREE(scope);
-}
-
-/** \brief Name of the grammar file that generated this code
- */
-static const char fileName[] = "<fileName>";
-
-/** \brief Return the name of the grammar file that generated this code.
- */
-static const char * getGrammarFileName()
-{
-	return fileName;
-}
-/** \brief Create a new <name> parser and return a context for it.
- *
- * \param[in] instream Pointer to an input stream interface.
- *
- * \return Pointer to new parser context upon success.
- */
-ANTLR3_API p<name>
-<name>New   (<inputStreamType> instream<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
-{
-	// See if we can create a new parser with the standard constructor
-	//
-	return <name>NewSSD(instream, NULL<grammar.delegators:{g|, <g:delegateName()>}>);
-}
-
-/** \brief Create a new <name> parser and return a context for it.
- *
- * \param[in] instream Pointer to an input stream interface.
- *
- * \return Pointer to new parser context upon success.
- */
-ANTLR3_API p<name>
-<name>NewSSD   (<inputStreamType> instream, pANTLR3_RECOGNIZER_SHARED_STATE state<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
-{
-    p<name> ctx;	    /* Context structure we will build and return   */
-
-    ctx	= (p<name>) ANTLR3_CALLOC(1, sizeof(<name>));
-
-    if	(ctx == NULL)
-    {
-		// Failed to allocate memory for parser context
-		//
-        return  NULL;
-    }
-
-    /* -------------------------------------------------------------------
-     * Memory for basic structure is allocated, now to fill in
-     * the base ANTLR3 structures. We initialize the function pointers
-     * for the standard ANTLR3 parser function set, but upon return
-     * from here, the programmer may set the pointers to provide custom
-     * implementations of each function.
-     *
-     * We don't use the macros defined in <name>.h here, in order that you can get a sense
-     * of what goes where.
-     */
-
-<if(PARSER)>
-    /* Create a base parser/recognizer, using the supplied token stream
-     */
-    ctx->pParser	    = antlr3ParserNewStream(ANTLR3_SIZE_HINT, instream->tstream, state);
-<endif>
-<if(TREE_PARSER)>
-    /* Create a base Tree parser/recognizer, using the supplied tree node stream
-     */
-    ctx->pTreeParser		= antlr3TreeParserNewStream(ANTLR3_SIZE_HINT, instream, state);
-<endif>
-
-    /* Install the implementation of our <name> interface
-     */
-    <rules:{r | ctx-><r.ruleDescriptor.name>	= <r.ruleDescriptor.name>;}; separator="\n">
-<if(grammar.delegatedRules)>
-	// Install the delegated methods so that they appear to be a part of this
-	// parser
-	//
-    <grammar.delegatedRules:{ruleDescriptor | ctx-><ruleDescriptor.name>	= <ruleDescriptor.name>;}; separator="\n">
-<endif>
-
-    ctx->free			= <name>Free;
-    ctx->reset			= <name>Reset;
-    ctx->getGrammarFileName	= getGrammarFileName;
-
-    /* Install the scope pushing methods.
-     */
-    <rules: {r |<if(r.ruleDescriptor.ruleScope)>
-<ruleAttributeScope(scope=r.ruleDescriptor.ruleScope)><\n>
-<endif>}>
-    <recognizer.scopes:{it |<if(it.isDynamicGlobalScope)>
-<globalAttributeScope(it)><\n>
-<endif>}>
-    <@apifuncs>
-
-    <@end>
-<if(grammar.directDelegates)>
-	// Initialize the parsers that we are going to delegate some
-	// functions to.
-	//
-	<grammar.directDelegates:
-         {g|ctx-><g:delegateName()> = <g.recognizerName>NewSSD(instream, PSRSTATE, ctx<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
-<endif>
-<if(grammar.delegators)>
-	// Install the pointers back to parsers that will delegate us to perform certain functions
-	// for them.
-	//
-	<grammar.delegators:
-         {g|ctx-><g:delegateName()>			= <g:delegateName()>;}; separator="\n">
-<endif>
-    <actions.parser.apifuncs>
-    <actions.treeparser.apifuncs>
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-    /* Create a LIST for recording rule memos.
-     */
-     RULEMEMO    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */<\n>
-<endif>
-<endif>
-    /* Install the token table
-     */
-    PSRSTATE->tokenNames   = <grammar.composite.rootGrammar.recognizerName>TokenNames;
-
-    <@debugStuff()>
-
-    /* Return the newly built parser to the caller
-     */
-    return  ctx;
-}
-
-static void
-<name>Reset (p<name> ctx)
-{
-    RECOGNIZER->reset(RECOGNIZER);
-}
-
-/** Free the parser resources
- */
- static void
- <name>Free(p<name> ctx)
- {
-    /* Free any scope memory
-     */
-    <rules: {r |<if(r.ruleDescriptor.ruleScope)><ruleAttributeScopeFree(scope=r.ruleDescriptor.ruleScope)><\n><endif>}>
-    <recognizer.scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScopeFree(it)><\n><endif>}>
-
-    <@cleanup>
-    <@end>
-<if(grammar.directDelegates)>
-	// Free the parsers that we delegated to
-	// functions to.NULL the state so we only free it once.
-	//
-	<grammar.directDelegates:
-         {g| ctx-><g:delegateName()>-><if(TREE_PARSER)>pTreeParser<else>pParser<endif>->rec->state = NULL;
-         ctx-><g:delegateName()>->free(ctx-><g:delegateName()>);}; separator="\n">
-<endif>
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-	if	(RULEMEMO != NULL)
-	{
-		RULEMEMO->free(RULEMEMO);
-		RULEMEMO = NULL;
-	}
-<endif>
-<endif>
-	// Free this parser
-	//
-<if(TREE_PARSER)>
-    ctx->pTreeParser->free(ctx->pTreeParser);<\n>
-<else>
-    ctx->pParser->free(ctx->pParser);<\n>
-<endif>
-
-    ANTLR3_FREE(ctx);
-
-    /* Everything is released, so we can return
-     */
-    return;
- }
-
-/** Return token names used by this <grammarType()>
- *
- * The returned pointer is used as an index into the token names table (using the token
- * number as the index).
- *
- * \return Pointer to first char * in the table.
- */
-static pANTLR3_UINT8    *getTokenNames()
-{
-        return <grammar.composite.rootGrammar.recognizerName>TokenNames;
-}
-
-    <members>
-
-/* Declare the bitsets
- */
-<bitsets:{it | <bitsetDeclare(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
-                    words64=it.bits)>}>
-
-
-<if(cyclicDFAs)>
-
-/* =========================================================================
- * DFA tables for the parser
- */
-<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-/* =========================================================================
- * End of DFA tables for the parser
- */
-<endif>
-
-/* ==============================================
- * Parsing rules
- */
-<rules; separator="\n\n">
-<if(grammar.delegatedRules)>
-	// Delegated methods that appear to be a part of this
-	// parser
-	//
-<grammar.delegatedRules:{ruleDescriptor|
-    <returnType()> <ruleDescriptor.name>(p<name> ctx<if(ruleDescriptor.parameterScope.attributes)>, <endif><ruleDescriptor.parameterScope:parameterScope()>)
-    {
-        <if(ruleDescriptor.hasReturnValue)>return <endif>       ctx-><ruleDescriptor.grammar:delegateName()>-><ruleDescriptor.name>(ctx-><ruleDescriptor.grammar:delegateName()><if(ruleDescriptor.parameterScope.attributes)>, <endif><ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">);
-	\}}; separator="\n">
-
-<endif>
-/* End of parsing rules
- * ==============================================
- */
-
-/* ==============================================
- * Syntactic predicates
- */
-<synpreds:{p | <synpred(predname=p)>}>
-/* End of syntactic predicates
- * ==============================================
- */
-
-
-
-
-
->>
-
-parser(	grammar,
-		name,
-		scopes,
-		tokens,
-		tokenNames,
-		rules,
-		numRules,
-		bitsets,
-		ASTLabelType,
-		superClass="Parser",
-		labelType="pANTLR3_COMMON_TOKEN",
-		members={<actions.parser.members>}
-		) ::= <<
-<genericParser(inputStreamType="pANTLR3_COMMON_TOKEN_STREAM", rewriteElementType="TOKEN", ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(	grammar,
-			name,
-			scopes,
-			tokens,
-			tokenNames,
-			globalAction,
-			rules,
-			numRules,
-			bitsets,
-			filterMode,
-			labelType={<ASTLabelType>},
-			ASTLabelType="pANTLR3_BASE_TREE",
-			superClass="TreeParser",
-			members={<actions.treeparser.members>}
-			) ::= <<
-<genericParser(inputStreamType="pANTLR3_COMMON_TREE_NODE_STREAM", rewriteElementType="NODE", ...)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-// $ANTLR start <ruleName>
-static void <ruleName>_fragment(p<name> ctx <ruleDescriptor.parameterScope:parameterScope()>)
-{
-	<ruleLabelDefs()>
-	<ruleLabelInitializations()>
-<if(trace)>
-    ANTLR3_PRINTF("enter <ruleName> %d failed = %d, backtracking = %d\\n",LT(1),failed,BACKTRACKING);
-    <block>
-    ANTLR3_PRINTF("exit <ruleName> %d, failed = %d, backtracking = %d\\n",LT(1),failed,BACKTRACKING);
-
-<else>
-    <block>
-<endif>
-<ruleCleanUp()>
-}
-// $ANTLR end <ruleName>
->>
-
-synpred(predname) ::= <<
-static ANTLR3_BOOLEAN <predname>(p<name> ctx)
-{
-    ANTLR3_MARKER   start;
-    ANTLR3_BOOLEAN  success;
-
-    BACKTRACKING++;
-    <@start()>
-    start	= MARK();
-    <predname>_fragment(ctx);	    // can never throw exception
-    success	= !(FAILEDFLAG);
-    REWIND(start);
-    <@stop()>
-    BACKTRACKING--;
-    FAILEDFLAG	= ANTLR3_FALSE;
-    return success;
-}<\n>
->>
-
-lexerSynpred(predname) ::= <<
-<synpred(predname)>
->>
-
-ruleMemoization(rname) ::= <<
-<if(memoize)>
-if ( (BACKTRACKING>0) && (HAVEPARSEDRULE(<ruleDescriptor.index>)) )
-{
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!ruleDescriptor.isSynPred)>
-	retval.start = 0;<\n>
-<endif>
-<endif>
-    <(ruleDescriptor.actions.after):execAfter()>
-    <finalCode(finalBlock=finally)>
-<if(!ruleDescriptor.isSynPred)>
-    <scopeClean()><\n>
-<endif>
-    return <ruleReturnValue()>;
-}
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-if  (HASEXCEPTION())
-{
-    goto rule<ruleDescriptor.name>Ex;
-}
-<if(backtracking)>
-if (HASFAILED())
-{
-    <scopeClean()>
-    <@debugClean()>
-    return <ruleReturnValue()>;
-}
-<endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>
-if (BACKTRACKING>0)
-{
-    FAILEDFLAG = <true_value()>;
-    <scopeClean()>
-    return <ruleReturnValue()>;
-}
-<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-/**
- * $ANTLR start <ruleName>
- * <fileName>:<description>
- */
-static <returnType()>
-<ruleName>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>)
-{
-    <if(trace)>ANTLR3_PRINTF("enter <ruleName> %s failed=%d, backtracking=%d\n", LT(1), BACKTRACKING);<endif>
-    <ruleDeclarations()>
-    <ruleDescriptor.actions.declarations>
-    <ruleLabelDefs()>
-    <ruleInitializations()>
-    <ruleDescriptor.actions.init>
-    <ruleMemoization(rname=ruleName)>
-    <ruleLabelInitializations()>
-    <@preamble()>
-    {
-        <block>
-    }
-
-    <ruleCleanUp()>
-<if(exceptions)>
-    if	(HASEXCEPTION())
-    {
-	<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-    }
-    else
-    {
-	<(ruleDescriptor.actions.after):execAfter()>
-    }
-<else>
-    <if(!emptyRule)>
-        <if(actions.(actionScope).rulecatch)>
-            <actions.(actionScope).rulecatch>
-        <else>
-            if (HASEXCEPTION())
-            {
-                PREPORTERROR();
-                PRECOVER();
-                <@setErrorReturnValue()>
-            }
-            <if(ruleDescriptor.actions.after)>
-            else
-            {
-                <(ruleDescriptor.actions.after):execAfter()>
-            }<\n>
-            <endif>
-        <endif>
-    <endif>
-<endif>
-
-    <if(trace)>ANTLR3_PRINTF("exit <ruleName> %d failed=%s backtracking=%s\n", LT(1), failed, BACKTRACKING);<endif>
-    <memoize()>
-<if(finally)>
-    <finalCode(finalBlock=finally)>
-<endif>
-    <scopeClean()>
-    <@postamble()>
-    return <ruleReturnValue()>;
-}
-/* $ANTLR end <ruleName> */
->>
-
-finalCode(finalBlock) ::= <<
-{
-    <finalBlock>
-}
-
->>
-
-catch(decl,action) ::= <<
-/* catch(decl,action)
- */
-{
-    <e.action>
-}
-
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<returnType()> retval;<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<a.type> <a.name>;
-}>
-<endif>
-<if(memoize)>
-ANTLR3_UINT32 <ruleDescriptor.name>_StartIndex;
-<endif>
->>
-
-ruleInitializations() ::= <<
-/* Initialize rule variables
- */
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor.returnScope.attributes:{ a |
-<if(a.initValue)>retval.<a.name> = <a.initValue>;<endif>
-}>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<if(a.initValue)><a.name> = <a.initValue>;<endif>
-}>
-<endif>
-<if(memoize)>
-<ruleDescriptor.name>_StartIndex = INDEX();<\n>
-<endif>
-<ruleDescriptor.useScopes:{it |<scopeTop(it)> = <scopePush(it)>;}; separator="\n">
-<ruleDescriptor.ruleScope:{it |<scopeTop(it.name)> = <scopePush(it.name)>;}; separator="\n">
->>
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
-    :{it |<labelType>    <it.label.text>;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
-    :{it |pANTLR3_VECTOR    list_<it.label.text>;}; separator="\n"
->
-<[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
-    :ruleLabelDef(); separator="\n"
->
->>
-
-ruleLabelInitializations() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
-    :{it |<it.label.text>       = NULL;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
-    :{it |list_<it.label.text>     = NULL;}; separator="\n"
->
-<[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
-    :ruleLabelInitVal(); separator="\n"
->
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!ruleDescriptor.isSynPred)>
-retval.start = LT(1); retval.stop = retval.start;<\n>
-<endif>
-<endif>
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{it |<labelType> <it.label.text>;}; separator="\n"
->
-<ruleDescriptor.charLabels:{it |ANTLR3_UINT32 <it.label.text>;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{it |pANTLR3_INT_TRIE list_<it.label.text>;}; separator="\n"
->
->>
-
-lexerRuleLabelInit() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{it |<it.label.text> = NULL;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{it |list_<it.label.text> = antlr3IntTrieNew(31);}; separator="\n"
->
->>
-
-lexerRuleLabelFree() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{it |<it.label.text> = NULL;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{it |list_<it.label.text>->free(list_<it.label.text>);}; separator="\n"
->
->>
-
-ruleReturnValue() ::= <%
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-retval
-<endif>
-<endif>
-<endif>
-%>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if ( BACKTRACKING>0 ) { MEMOIZE(<ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
-<endif>
-<endif>
->>
-
-ruleCleanUp() ::= <<
-
-// This is where rules clean up and exit
-//
-goto rule<ruleDescriptor.name>Ex; /* Prevent compiler warnings */
-rule<ruleDescriptor.name>Ex: ;
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-<if(!ruleDescriptor.isSynPred)>
-retval.stop = LT(-1);<\n>
-<endif>
-<endif>
-<endif>
->>
-
-scopeClean() ::= <<
-<ruleDescriptor.useScopes:{it |<scopePop(it)>}; separator="\n">
-<ruleDescriptor.ruleScope:{it |<scopePop(it.name)>}; separator="\n">
-
->>
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules, which do not produce tokens.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-//   Comes from: <block.description>
-/** \brief Lexer rule generated by ANTLR3
- *
- * $ANTLR start <ruleName>
- *
- * Looks to match the characters the constitute the token <ruleName>
- * from the attached input stream.
- *
- *
- * \remark
- *  - lexer->error == ANTLR3_TRUE if an exception was thrown.
- */
-static ANTLR3_INLINE
-void m<ruleName>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>)
-{
-	ANTLR3_UINT32	_type;
-    <ruleDeclarations()>
-    <ruleDescriptor.actions.declarations>
-    <lexerRuleLabelDefs()>
-    <if(trace)>System.out.println("enter <ruleName> '"+(char)LA(1)+"' line="+GETLINE()+":"+GETCHARPOSITIONINLINE()+" failed="+failed+" backtracking="+BACKTRACKING);<endif>
-
-<if(nakedBlock)>
-    <ruleMemoization(rname=ruleName)>
-    <lexerRuleLabelInit()>
-    <ruleDescriptor.actions.init>
-
-    <block><\n>
-<else>
-    <ruleMemoization(rname=ruleName)>
-    <lexerRuleLabelInit()>
-    _type	    = <ruleName>;
-
-    <ruleDescriptor.actions.init>
-
-    <block>
-	LEXSTATE->type = _type;
-<endif>
-    <if(trace)> ANTLR3_FPRINTF(stderr, "exit <ruleName> '%c' line=%d:%d failed = %d, backtracking =%d\n",LA(1),GETLINE(),GETCHARPOSITIONINLINE(),failed,BACKTRACKING);<endif>
-    <ruleCleanUp()>
-    <lexerRuleLabelFree()>
-    <(ruleDescriptor.actions.after):execAfter()>
-    <memoize>
-}
-// $ANTLR end <ruleName>
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-/** This is the entry point in to the lexer from an object that
- *  wants to generate the next token, such as a pCOMMON_TOKEN_STREAM
- */
-static void
-mTokens(p<name> ctx)
-{
-    <block><\n>
-
-    goto ruleTokensEx; /* Prevent compiler warnings */
-ruleTokensEx: ;
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-
-// <fileName>:<description>
-{
-    int alt<decisionNumber>=<maxAlt>;
-    <decls>
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    <@prebranch()>
-    switch (alt<decisionNumber>)
-    {
-	<alts:{a | <altSwitchCase(i,a)>}>
-    }
-    <@postbranch()>
-}
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-{
-    //  <fileName>:<description>
-
-    ANTLR3_UINT32 alt<decisionNumber>;
-
-    alt<decisionNumber>=<maxAlt>;
-
-    <decls>
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>)
-    {
-	<alts:{a | <altSwitchCase(i,a)>}>
-    }
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-{
-    int cnt<decisionNumber>=0;
-    <decls>
-    <@preloop()>
-
-    for (;;)
-    {
-        int alt<decisionNumber>=<maxAlt>;
-	<@predecision()>
-	<decision>
-	<@postdecision()>
-	switch (alt<decisionNumber>)
-	{
-	    <alts:{a | <altSwitchCase(i,a)>}>
-	    default:
-
-		if ( cnt<decisionNumber> >= 1 )
-		{
-		    goto loop<decisionNumber>;
-		}
-		<ruleBacktrackFailure()>
-		<earlyExitEx()>
-		<@earlyExitException()>
-		goto rule<ruleDescriptor.name>Ex;
-	}
-	cnt<decisionNumber>++;
-    }
-    loop<decisionNumber>: ;	/* Jump to here if this rule does not match */
-    <@postloop()>
-}
->>
-
-earlyExitEx() ::= <<
-/* mismatchedSetEx()
- */
-CONSTRUCTEX();
-EXCEPTION->type = ANTLR3_EARLY_EXIT_EXCEPTION;
-EXCEPTION->name = (void *)ANTLR3_EARLY_EXIT_NAME;
-<\n>
->>
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-
-// <fileName>:<description>
-<decls>
-
-<@preloop()>
-for (;;)
-{
-    int alt<decisionNumber>=<maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>)
-    {
-	<alts:{a | <altSwitchCase(i,a)>}>
-	default:
-	    goto loop<decisionNumber>;	/* break out of the loop */
-	    break;
-    }
-}
-loop<decisionNumber>: ; /* Jump out to here if this rule does not match */
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by antlr before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase(altNum,alt) ::= <<
-case <altNum>:
-    <@prealt()>
-    <alt>
-    break;<\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
-// <fileName>:<description>
-{
-    <@declarations()>
-    <@initializations()>
-    <elements:element()>
-    <rew>
-    <@cleanup()>
-}
->>
-
-// E L E M E N T S
-/** What to emit when there is no rewrite.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-/** Dump the elements one per line */
-element(e) ::= <<
-<@prematch()>
-<e.el><\n>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)><label> = (<labelType>)<endif> MATCHT(<token>, &FOLLOW_<token>_in_<ruleName><elementIndex>);
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-listLabel(label,elem) ::= <<
-if (list_<label> == NULL)
-{
-    list_<label>=ctx->vectors->newVector(ctx->vectors);
-}
-list_<label>->add(list_<label>, <elem>, NULL);
->>
-
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> = LA(1);<\n>
-<endif>
-MATCHC(<char>);
-<checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> = LA(1);<\n>
-<endif>
-MATCHRANGE(<a>, <b>);
-<checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= <<
-<if(label)>
-<if(LEXER)>
-<label>= LA(1);<\n>
-<else>
-<label>=(<labelType>)LT(1);<\n>
-<endif>
-<endif>
-if ( <s> )
-{
-    CONSUME();
-    <postmatchCode>
-<if(!LEXER)>
-    PERRORRECOVERY=ANTLR3_FALSE;
-<endif>
-    <if(backtracking)>FAILEDFLAG=ANTLR3_FALSE;<\n><endif>
-}
-else
-{
-    <ruleBacktrackFailure()>
-    <mismatchedSetEx()>
-    <@mismatchedSetException()>
-<if(LEXER)>
-    LRECOVER();
-<else>
-<! use following code to make it recover inline;
-    RECOVERFROMMISMATCHEDSET(&FOLLOW_set_in_<ruleName><elementIndex>);
-!>
-<endif>
-    goto rule<ruleDescriptor.name>Ex;
-}<\n>
->>
-
-mismatchedSetEx() ::= <<
-CONSTRUCTEX();
-EXCEPTION->type         = ANTLR3_MISMATCHED_SET_EXCEPTION;
-EXCEPTION->name         = (void *)ANTLR3_MISMATCHED_SET_NAME;
-<if(PARSER)>
-EXCEPTION->expectingSet = NULL;
-<! use following code to make it recover inline;
-EXCEPTION->expectingSet = &FOLLOW_set_in_<ruleName><elementIndex>;
-!>
-<endif>
->>
-
-matchRuleBlockSet ::= matchSet
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex) ::= <<
-<if(label)>
-<label>Start = GETCHARINDEX();
-MATCHS(<string>);
-<checkRuleBacktrackFailure()>
-<label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory);
-<label>->setType(<label>, ANTLR3_TOKEN_INVALID);
-<label>->setStartIndex(<label>, <label>Start);
-<label>->setStopIndex(<label>, GETCHARINDEX()-1);
-<label>->input = INPUT->tnstream->istream;
-<else>
-MATCHS(<string>);
-<checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)>
-<label>=(<labelType>)LT(1);<\n>
-<endif>
-MATCHANYT();
-<checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<wildcard(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> = LA(1);<\n>
-<endif>
-MATCHANY();
-<checkRuleBacktrackFailure()>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values. The 'rule' argument was the
- *  target rule name, but now is type Rule, whose toString is
- *  same: the rule name.  Now though you can access full rule
- *  descriptor stuff.
- */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-FOLLOWPUSH(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
-<if(label)><label>=<endif><if(scope)>ctx-><scope:delegateName()>-><endif><rule.name>(ctx<if(scope)>-><scope:delegateName()><endif><if(args)>, <args; separator=", "><endif>);<\n>
-FOLLOWPOP();
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** A lexer rule reference
- *  The 'rule' argument was the target rule name, but now
- *  is type Rule, whose toString is same: the rule name.
- *  Now though you can access full rule descriptor stuff.
- */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
-/* <description> */
-<if(label)>
-{
-    ANTLR3_MARKER <label>Start<elementIndex> = GETCHARINDEX();
-    <if(scope)>ctx-><scope:delegateName()>-><endif>m<rule.name>(ctx<if(scope)>-><scope:delegateName()><endif> <if(args)>, <endif><args; separator=", ">);
-    <checkRuleBacktrackFailure()>
-    <label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory);
-    <label>->setType(<label>, ANTLR3_TOKEN_INVALID);
-    <label>->setStartIndex(<label>, <label>Start<elementIndex>);
-    <label>->setStopIndex(<label>, GETCHARINDEX()-1);
-    <label>->input = INPUT;
-}
-<else>
-<if(scope)>ctx-><scope:delegateName()>-><endif>m<rule.name>(ctx<if(scope)>-><scope:delegateName()><endif> <if(args)>, <endif><args; separator=", ">);
-<checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-{
-    ANTLR3_UINT32 <label>Start<elementIndex>;
-    <labelType> <label>;
-    <label>Start<elementIndex> = GETCHARINDEX();
-    MATCHC(ANTLR3_CHARSTREAM_EOF);
-    <checkRuleBacktrackFailure()>
-    <label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory);
-    <label>->setType(<label>, ANTLR3_TOKEN_EOF);
-    <label>->setStartIndex(<label>, <label>Start<elementIndex>);
-    <label>->setStopIndex(<label>, GETCHARINDEX()-1);
-    <label>->input = INPUT->tnstream->istream;
-}
-<else>
-    MATCHC(ANTLR3_CHARSTREAM_EOF);
-    <checkRuleBacktrackFailure()>
-    <endif>
->>
-
-// used for left-recursive rules
-recRuleDefArg()                       ::= "int <recRuleArg()>"
-recRuleArg()                          ::= "_p"
-recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
-recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
-recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList, enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( LA(1)==ANTLR3_TOKEN_DOWN ) {
-    MATCHT(ANTLR3_TOKEN_DOWN, NULL);
-    <checkRuleBacktrackFailure()>
-    <children:element()>
-    MATCHT(ANTLR3_TOKEN_UP, NULL);
-    <checkRuleBacktrackFailure()>
-}
-<else>
-MATCHT(ANTLR3_TOKEN_DOWN, NULL);
-<checkRuleBacktrackFailure()>
-<children:element()>
-MATCHT(ANTLR3_TOKEN_UP, NULL);
-<checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if ( !(<evalPredicate(...)>) )
-{
-    <ruleBacktrackFailure()>
-    <newFPE(...)>
-}
->>
-
-newFPE() ::= <<
-    CONSTRUCTEX();
-    EXCEPTION->type         = ANTLR3_FAILED_PREDICATE_EXCEPTION;
-    EXCEPTION->message      = (void *)"<description>";
-    EXCEPTION->ruleName	 = (void *)"<ruleName>";
-    <\n>
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-
-{
-    int LA<decisionNumber>_<stateNumber> = LA(<k>);
-    <edges; separator="\nelse ">
-    else
-    {
-<if(eotPredictsAlt)>
-        alt<decisionNumber>=<eotPredictsAlt>;
-<else>
-        <ruleBacktrackFailure()>
-
-        <newNVException()>
-        goto rule<ruleDescriptor.name>Ex;
-
-<endif>
-    }
-}
->>
-
-newNVException() ::= <<
-CONSTRUCTEX();
-EXCEPTION->type         = ANTLR3_NO_VIABLE_ALT_EXCEPTION;
-EXCEPTION->message      = (void *)"<description>";
-EXCEPTION->decisionNum  = <decisionNumber>;
-EXCEPTION->state        = <stateNumber>;
-<@noViableAltException()>
-<\n>
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-{
-    int LA<decisionNumber>_<stateNumber> = LA(<k>);
-    <edges; separator="\nelse ">
-}
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-
-dfaLoopbackStateDecls()::= <<
-ANTLR3_UINT32   LA<decisionNumber>_<stateNumber>;
->>
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-{
-   /* dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState)
-    */
-    int LA<decisionNumber>_<stateNumber> = LA(<k>);
-    <edges; separator="\nelse "><\n>
-    <if(eotPredictsAlt)>
-    <if(!edges)>
-	alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
-	<else>
-    else
-    {
-	alt<decisionNumber>=<eotPredictsAlt>;
-    }<\n>
-    <endif>
-    <endif>
-}
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ( <if(predicates)>(<predicates>) && <endif>(<labelExpr>))
-{
-    <targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( LA(<k>) )
-{
-<edges; separator="\n">
-
-default:
-<if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    <newNVException()>
-    goto rule<ruleDescriptor.name>Ex;<\n>
-<endif>
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( LA(<k>) )
-{
-    <edges; separator="\n">
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( LA(<k>) )
-{
-<edges; separator="\n"><\n>
-<if(eotPredictsAlt)>
-default:
-    alt<decisionNumber>=<eotPredictsAlt>;
-    break;<\n>
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{it |case <it>:}; separator="\n">
-	{
-		<targetState>
-	}
-    break;
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-alt<decisionNumber> = cdfa<decisionNumber>.predict(ctx, RECOGNIZER, ISTREAM, &cdfa<decisionNumber>);
-<checkRuleBacktrackFailure()>
->>
-
-/* Dump DFA tables as static initialized arrays of shorts(16 bits)/characters(8 bits)
- * which are then used to statically initialize the dfa structure, which means that there
- * is no runtime initialization whatsoever, other than anything the C compiler might
- * need to generate. In general the C compiler will lay out memory such that there is no
- * runtime code required.
- */
-cyclicDFA(dfa) ::= <<
-/** Static dfa state tables for Cyclic dfa:
- *    <dfa.description>
- */
-static const ANTLR3_INT32 dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] =
-    {
-	<dfa.eot; wrap="\n", separator=", ", null="-1">
-    };
-static const ANTLR3_INT32 dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] =
-    {
-	<dfa.eof; wrap="\n", separator=", ", null="-1">
-    };
-static const ANTLR3_INT32 dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] =
-    {
-	<dfa.min; wrap="\n", separator=", ", null="-1">
-    };
-static const ANTLR3_INT32 dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] =
-    {
-	<dfa.max; wrap="\n", separator=", ", null="-1">
-    };
-static const ANTLR3_INT32 dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] =
-    {
-	<dfa.accept; wrap="\n", separator=", ", null="-1">
-    };
-static const ANTLR3_INT32 dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] =
-    {
-	<dfa.special; wrap="\n", separator=", ", null="-1">
-    };
-
-/** Used when there is no transition table entry for a particular state */
-#define dfa<dfa.decisionNumber>_T_empty	    NULL
-
-<dfa.edgeTransitionClassMap.keys:{ table |
-static const ANTLR3_INT32 dfa<dfa.decisionNumber>_T<i0>[] =
-    {
-	<table; separator=", ", wrap="\n", null="-1">
-    \};<\n>}; null = "">
-
-/* Transition tables are a table of sub tables, with some tables
- * reused for efficiency.
- */
-static const ANTLR3_INT32 * const dfa<dfa.decisionNumber>_transitions[] =
-{
-    <dfa.transitionEdgeTables:{xref|dfa<dfa.decisionNumber>_T<xref>}; separator=", ", wrap="\n", null="NULL">
-};
-
-<if(dfa.specialStateSTs)>
-static ANTLR3_INT32 dfa<dfa.decisionNumber>_sst(p<name> ctx, pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, pANTLR3_CYCLIC_DFA dfa, ANTLR3_INT32 s)
-{
-    ANTLR3_INT32    _s;
-
-    _s	    = s;
-    switch  (s)
-    {
-    <dfa.specialStateSTs:{state |
-    case <i0>:
-
-	<state>}; separator="\n">
-    }
-<if(backtracking)>
-    if (BACKTRACKING > 0)
-    {
-	FAILEDFLAG = ANTLR3_TRUE;
-	return	-1;
-    }
-<endif>
-
-    CONSTRUCTEX();
-    EXCEPTION->type         = ANTLR3_NO_VIABLE_ALT_EXCEPTION;
-    EXCEPTION->message      = (void *)"<dfa.description>";
-    EXCEPTION->decisionNum  = <dfa.decisionNumber>;
-    EXCEPTION->state        = _s;
-    <@noViableAltException()>
-    return -1;
-}
-<endif>
-
-<@errorMethod()>
-
-/* Declare tracking structure for Cyclic DFA <dfa.decisionNumber>
- */
-static
-ANTLR3_CYCLIC_DFA cdfa<dfa.decisionNumber>
-    =	{
-	    <dfa.decisionNumber>,		    /* Decision number of this dfa	    */
-	    /* Which decision this represents:   */
-	    (const pANTLR3_UCHAR)"<dfa.description>",
-<if(dfa.specialStateSTs)>
-	    (CDFA_SPECIAL_FUNC) dfa<dfa.decisionNumber>_sst,
-<else>
-	    (CDFA_SPECIAL_FUNC) antlr3dfaspecialStateTransition,	/* Default special state transition function	*/
-<endif>
-
-	    antlr3dfaspecialTransition,		/* DFA specialTransition is currently just a default function in the runtime */
-	    antlr3dfapredict,			/* DFA simulator function is in the runtime */
-	    dfa<dfa.decisionNumber>_eot,	    /* EOT table			    */
-	    dfa<dfa.decisionNumber>_eof,	    /* EOF table			    */
-	    dfa<dfa.decisionNumber>_min,	    /* Minimum tokens for each state    */
-	    dfa<dfa.decisionNumber>_max,	    /* Maximum tokens for each state    */
-	    dfa<dfa.decisionNumber>_accept,	/* Accept table			    */
-	    dfa<dfa.decisionNumber>_special,	/* Special transition states	    */
-	    dfa<dfa.decisionNumber>_transitions	/* Table of transition tables	    */
-
-	};
-/* End of Cyclic DFA <dfa.decisionNumber>
- * ---------------------
- */
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-{
-    ANTLR3_UINT32 LA<decisionNumber>_<stateNumber>;<\n>
-    ANTLR3_MARKER index<decisionNumber>_<stateNumber>;<\n>
-
-	LA<decisionNumber>_<stateNumber> = LA(1);<\n>
-    <if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-    index<decisionNumber>_<stateNumber> = INDEX();<\n>
-    REWINDLAST();<\n>
-    <endif>
-    s = -1;
-    <edges; separator="\nelse ">
-	<if(semPredState)> <! return input cursor to state before we rewound !>
-	SEEK(index<decisionNumber>_<stateNumber>);<\n>
-	<endif>
-    if ( s>=0 )
-    {
-	return s;
-    }
-}
-break;
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ( <if(predicates)>(<predicates>) && <endif>(<labelExpr>) )
-{
-    s = <targetStateNumber>;
-}<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
- s = <targetStateNumber>;<\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "( (<left>) && (<right>) )"
-
-orPredicates(operands) ::= "((<first(operands)>)<rest(operands):{o | ||(<o>)}>)"
-
-notPredicate(pred) ::= "!( <evalPredicate(pred,{})> )"
-
-evalPredicate(pred,description) ::= "(<pred>)"
-
-evalSynPredicate(pred,description) ::= "<pred>(ctx)"
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "LA(<k>) == <atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
-((LA<decisionNumber>_<stateNumber> >= <lower>) && (LA<decisionNumber>_<stateNumber> \<= <upper>))
-%>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "((LA(<k>) >= <lower>) && (LA(<k>) \<= <upper>))"
-
-setTest(ranges) ::= "<ranges; separator=\" || \">"
-
-// A T T R I B U T E S
-
-makeScopeSet() ::= <<
-/* makeScopeSet()
- */
- /** Definition of the <scope.name> scope variable tracking
- *  structure. An instance of this structure is created by calling
- *  <name>_<scope.name>Push().
- */
-typedef struct  <scopeStruct(sname=scope.name,...)>_struct
-{
-    /** Function that the user may provide to be called when the
-     *  scope is destroyed (so you can free pANTLR3_HASH_TABLES and so on)
-     *
-     * \param POinter to an instance of this typedef/struct
-     */
-    void    (ANTLR3_CDECL *free)	(struct <scopeStruct(sname=scope.name,...)>_struct * frame);
-
-    /* =============================================================================
-     * Programmer defined variables...
-     */
-    <scope.attributes:{it |<it.decl>;}; separator="\n">
-
-    /* End of programmer defined variables
-     * =============================================================================
-     */
-}
-    <scopeStruct(sname=scope.name,...)>, * <scopeType(sname=scope.name,...)>;
-
->>
-
-globalAttributeScopeDecl(scope) ::= <<
-<if(scope.attributes)>
-/* globalAttributeScopeDecl(scope)
- */
-<makeScopeSet(...)>
-<endif>
->>
-
-ruleAttributeScopeDecl(scope) ::= <<
-<if(scope.attributes)>
-/* ruleAttributeScopeDecl(scope)
- */
-<makeScopeSet(...)>
-<endif>
->>
-
-globalAttributeScopeFuncDecl(scope) ::=
-<<
-/* globalAttributeScopeFuncDecl(scope)
- */
-<if(scope.attributes)>
-/* -----------------------------------------------------------------------------
- * Function declaration for creating a <name>_<scope.name> scope set
- */
-static <scopeType(sname=scope.name,...)>   <scopePushName(sname=scope.name,...)>(p<name> ctx);
-static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope);
-/* ----------------------------------------------------------------------------- */
-
-<endif>
->>
-
-globalAttributeScopeFuncMacro(scope) ::= <<
-<if(scope.attributes)>
-/* globalAttributeScopeFuncMacro(scope)
- */
-/** Function  for popping the top value from a <scopeStack(sname=scope.name)>
- */
-void
-<scopePopName(sname=scope.name,...)>(p<name> ctx)
-{
-    // First see if the user defined a function they want to be called when a
-    // scope is popped/freed.
-    //
-	// If the user supplied the scope entries with a free function,then call it first
-	//
-    if	(SCOPE_TOP(<scope.name>)->free != NULL)
-	{
-        SCOPE_TOP(<scope.name>)->free(SCOPE_TOP(<scope.name>));
-	}
-
-    // Now we decrement the scope's upper limit bound. We do not actually pop the scope as
-    // we want to reuse scope entries if we do continuous push and pops. Most scopes don't
-    // next too far so we don't want to keep freeing and allocating them
-    //
-    ctx-><scopeStack(sname=scope.name,...)>_limit--;
-    SCOPE_TOP(<scope.name>) = (<scopeType(sname=scope.name)>)(ctx-><scopeStack(sname=scope.name,...)>->get(ctx-><bscopeStack(sname=scope.name,...)>, ctx-><scopeStack(sname=scope.name,...)>_limit - 1));
-}
-<endif>
->>
-
-ruleAttributeScopeFuncDecl(scope) ::= <<
-<if(scope.attributes)>
-/* ruleAttributeScopeFuncDecl(scope)
- */
-/* -----------------------------------------------------------------------------
- * Function declarations for creating a <name>_<scope.name> scope set
- */
-static <scopeType(sname=scope.name,...)>   <scopePushName(sname=scope.name,...)>(p<name> ctx);
-static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope);
-/* ----------------------------------------------------------------------------- */
-
-<endif>
->>
-
-ruleAttributeScopeFuncMacro(scope) ::= <<
-<if(scope.attributes)>
-/* ruleAttributeScopeFuncMacro(scope)
- */
-/** Function for popping the top value from a <scopeStack(sname=scope.name,...)>
- */
-void
-<scopePopName(sname=scope.name,...)>(p<name> ctx)
-{
-    // First see if the user defined a function they want to be called when a
-    // scope is popped/freed.
-    //
-	// If the user supplied the scope entries with a free function,then call it first
-	//
-    if	(SCOPE_TOP(<scope.name>)->free != NULL)
-	{
-        SCOPE_TOP(<scope.name>)->free(SCOPE_TOP(<scope.name>));
-	}
-
-    // Now we decrement the scope's upper limit bound. We do not actually pop the scope as
-    // we want to reuse scope entries if we do continuous push and pops. Most scopes don't
-    // next too far so we don't want to keep freeing and allocating them
-    //
-    ctx-><scopeStack(sname=scope.name,...)>_limit--;
-    SCOPE_TOP(<scope.name>) = (<scopeType(sname=scope.name)>)(ctx-><scopeStack(sname=scope.name,...)>->get(ctx-><scopeStack(sname=scope.name,...)>, ctx-><scopeStack(sname=scope.name,...)>_limit - 1));
-}
-
-<endif>
->>
-
-globalAttributeScopeDef(scope) ::=
-<<
-/* globalAttributeScopeDef(scope)
- */
-<if(scope.attributes)>
-/** Pointer to the  <scope.name> stack for use by <scopePushName(sname=scope.name)>()
- *  and <scopePopName(sname=scope.name,...)>()
- */
-pANTLR3_STACK <scopeStack(sname=scope.name)>;
-ANTLR3_UINT32 <scopeStack(sname=scope.name)>_limit;
-/** Pointer to the top of the stack for the global scope <scopeStack(sname=scope.name)>
- */
-<scopeType(sname=scope.name,...)>    (*<scopePushName(sname=scope.name,...)>)(struct <name>_Ctx_struct * ctx);
-<scopeType(sname=scope.name,...)>    <scopeTopDecl(sname=scope.name,...)>;
-
-<endif>
->>
-
-ruleAttributeScopeDef(scope) ::= <<
-<if(scope.attributes)>
-/* ruleAttributeScopeDef(scope)
- */
-/** Pointer to the  <scope.name> stack for use by <scopePushName(sname=scope.name)>()
- *  and <scopePopName(sname=scope.name,...)>()
- */
-pANTLR3_STACK <scopeStack(sname=scope.name,...)>;
-ANTLR3_UINT32 <scopeStack(sname=scope.name,...)>_limit;
-<scopeType(sname=scope.name,...)>   (*<scopePushName(sname=scope.name,...)>)(struct <name>_Ctx_struct * ctx);
-<scopeType(sname=scope.name,...)>   <scopeTopDecl(sname=scope.name,...)>;
-
-<endif>
->>
-
-globalAttributeScopeFuncs(scope) ::= <<
-<if(scope.attributes)>
-/* globalAttributeScopeFuncs(scope)
- */
-<attributeFuncs(scope)>
-<endif>
->>
-
-ruleAttributeScopeFuncs(scope) ::= <<
-<if(scope.attributes)>
-/* ruleAttributeScopeFuncs(scope)
- */
-<attributeFuncs(scope)>
-<endif>
->>
-
-globalAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-/* globalAttributeScope(scope)
- */
-ctx-><scopePushName(sname=scope.name,...)>     = <scopePushName(sname=scope.name,...)>;
-ctx-><scopeStack(sname=scope.name,...)>    = antlr3StackNew(0);
-ctx-><scopeStack(sname=scope.name,...)>_limit    = 0;
-<scopeTop(sname=scope.name,...)>      = NULL;
-<endif>
->>
-
-ruleAttributeScope(scope) ::=
-<<
-<if(scope.attributes)>
-/* ruleAttributeScope(scope)
- */
-ctx-><scopePushName(sname=scope.name,...)>     = <scopePushName(sname=scope.name,...)>;
-ctx-><scopeStack(sname=scope.name,...)>    = antlr3StackNew(0);
-ctx-><scopeStack(sname=scope.name,...)>_limit    = 0;
-<scopeTop(sname=scope.name,...)>      = NULL;
-<endif>
->>
-globalAttributeScopeFree(scope) ::= <<
-<if(scope.attributes)>
-/* globalAttributeScope(scope)
- */
-ctx-><scopeStack(sname=scope.name,...)>->free(ctx-><scopeStack(sname=scope.name,...)>);
-<endif>
->>
-
-ruleAttributeScopeFree(scope) ::=
-<<
-<if(scope.attributes)>
-/* ruleAttributeScope(scope)
- */
-ctx-><scopeStack(sname=scope.name,...)>->free(ctx-><scopeStack(sname=scope.name,...)>);
-<endif>
->>
-
-scopeTopDecl(sname) ::= <<
-p<name>_<sname>Top
->>
-
-scopeTop(sname) ::= <<
-ctx-><scopeTopDecl(sname=sname,...)>
->>
-
-scopePop(sname) ::= <<
-<scopePopName(sname=sname,...)>(ctx);
->>
-
-scopePush(sname) ::= <<
-p<name>_<sname>Push(ctx)
->>
-
-scopePopName(sname) ::= <<
-p<name>_<sname>Pop
->>
-
-scopePushName(sname) ::= <<
-p<name>_<sname>Push
->>
-
-scopeType(sname) ::= <<
-p<name>_<sname>_SCOPE
->>
-
-scopeStruct(sname) ::= <<
-<name>_<sname>_SCOPE
->>
-
-scopeStack(sname) ::= <<
-p<name>_<sname>Stack
->>
-
-attributeFuncs(scope) ::= <<
-<if(scope.attributes)>
-/* attributeFuncs(scope)
- */
-
-static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope)
-{
-    ANTLR3_FREE(scope);
-}
-
-/** \brief Allocate initial memory for a <name> <scope.name> scope variable stack entry and
- *         add it to the top of the stack.
- *
- * \remark
- * By default the structure is freed with ANTLR_FREE(), but you can use the
- * the \@init action to install a pointer to a custom free() routine by
- * adding the code:
- * \code
- *   <scopeTop(sname=scope.name)>->free = myroutine;
- * \endcode
- *
- * With lots of comments of course! The routine should be declared in
- * \@members { } as:
- * \code
- *   void ANTLR3_CDECL myfunc( <scopeType(sname=scope.name)> ptr).
- * \endcode
- *
- * It should perform any custom freeing stuff that you need (call ANTLR_FREE3, not free()
- * NB: It should not free the pointer it is given, which is the scope stack entry itself
- * and will be freed by the function that calls your custom free routine.
- *
- */
-static <scopeType(sname=scope.name)>
-<scopePushName(sname=scope.name)>(p<name> ctx)
-{
-    /* Pointer used to create a new set of attributes
-     */
-    <scopeType(sname=scope.name)>      newAttributes;
-
-    /* Allocate the memory for a new structure if we need one.
-     */
-    if (ctx-><scopeStack(sname=scope.name)>->size(ctx-><scopeStack(sname=scope.name)>) > ctx-><scopeStack(sname=scope.name)>_limit)
-    {
-        // The current limit value was less than the number of scopes available on the stack so
-        // we can just reuse one. Our limit tracks the stack count, so the index of the entry we want
-        // is one less than that, or conveniently, the current value of limit.
-        //
-        newAttributes = (<scopeType(sname=scope.name)>)ctx-><scopeStack(sname=scope.name)>->get(ctx-><scopeStack(sname=scope.name)>, ctx-><scopeStack(sname=scope.name)>_limit);
-    }
-    else
-    {
-        // Need a new allocation
-        //
-        newAttributes = (<scopeType(sname=scope.name)>) ANTLR3_MALLOC(sizeof(<scopeStruct(sname=scope.name)>));
-        if  (newAttributes != NULL)
-        {
-            /* Standard ANTLR3 library implementation
-             */
-            ctx-><scopeStack(sname=scope.name)>->push(ctx-><scopeStack(sname=scope.name)>, newAttributes, (void (*)(void *))<scope.name>Free);
-        }
-    }
-
-    // Blank out any previous free pointer, the user might or might install a new one.
-    //
-    newAttributes->free = NULL;
-
-    // Indicate the position in the available stack that the current level is at
-    //
-    ctx-><scopeStack(sname=scope.name)>_limit++;
-
-	/* Return value is the pointer to the new entry, which may be used locally
-	 * without de-referencing via the context.
-     */
-    return  newAttributes;
-}<\n>
-
-<endif>
->>
-returnStructName(r) ::= "<r.name>_return"
-
-returnType() ::= <%
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()>
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
-<else>
-ANTLR3_BOOLEAN
-<endif>
-%>
-
-/** Generate the C type associated with a single or multiple return
- *  value(s).
- */
-ruleLabelType(referencedRule) ::= <%
-<if(referencedRule.hasMultipleReturnValues)>
-<referencedRule.grammar.recognizerName>_<referencedRule.name>_return
-<else>
-<if(referencedRule.hasSingleReturnValue)>
-<referencedRule.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
-%>
-
-delegateName(d) ::= <<
-<if(d.label)><d.label><else>g<d.name><endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "0".
- */
-initValue(typeName) ::= <<
- = <cTypeInitMap.(typeName)>
->>
-
-/** Define a rule label  */
-ruleLabelDef(label) ::= <<
-<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text>;
-#undef	RETURN_TYPE_<label.label.text>
-#define	RETURN_TYPE_<label.label.text> <ruleLabelType(referencedRule=label.referencedRule)><\n>
->>
-/**  Rule label default value */
-ruleLabelInitVal(label) ::= <<
->>
-
-ASTLabelType() ::= "<if(recognizer.ASTLabelType)><recognizer.ASTLabelType><else>pANTLR3_BASE_TREE<endif>"
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScope(scope) ::= <<
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasMultipleReturnValues)>
-typedef struct <ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()>_struct
-{
-<if(!TREE_PARSER)>
-    /** Generic return elements for ANTLR3 rules that are not in tree parsers or returning trees
-     */
-    pANTLR3_COMMON_TOKEN    start;
-    pANTLR3_COMMON_TOKEN    stop;
-<else>
-    <recognizer.ASTLabelType>       start;
-    <recognizer.ASTLabelType>       stop;
-<endif>
-    <@ruleReturnMembers()>
-    <ruleDescriptor.returnScope.attributes:{it |<it.type> <it.name>;}; separator="\n">
-}
-    <ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()>;<\n><\n>
-<endif>
-<endif>
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{it |<it.decl>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "<attr.name>=<expr>;"
-
-/** Note that the scopeAttributeRef does not have access to the
- * grammar name directly
- */
-scopeAttributeRef(scope,attr,index,negIndex) ::= <%
-<if(negIndex)>
-	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get( ctx->SCOPE_STACK(<scope>), ctx->SCOPE_STACK(<scope>)->size(ctx->SCOPE_STACK(<scope>)) - <negIndex> - 1) ))-><attr.name>
-<else>
-<if(index)>
-	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get(ctx->SCOPE_STACK(<scope>), (ANTLR3_UINT32)<index> ) ))-><attr.name>
-<else>
-	(SCOPE_TOP(<scope>))-><attr.name>
-<endif>
-<endif>
-%>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
-<if(negIndex)>
-	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get( ctx->SCOPE_STACK(<scope>), ctx->SCOPE_STACK(<scope>)->size(ctx->SCOPE_STACK(<scope>)) - <negIndex> - 1) ))-><attr.name> = <expr>;
-<else>
-<if(index)>
-	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get(ctx->SCOPE_STACK(<scope>), (ANTLR3_UINT32)<index> ) ))-><attr.name> = <expr>;
-<else>
-	(SCOPE_TOP(<scope>))-><attr.name>=<expr>;
-<endif>
-<endif>
-%>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "ctx->SCOPE_STACK(<scope>)"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-<scope>.<attr.name>
-<else>
-<scope>
-<endif>
->>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name>
-<else>
-<attr.name>
-<endif>
->>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name>=<expr>;
-<else>
-<attr.name>=<expr>;
-<endif>
->>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach
-//
-tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText(<scope>))"
-tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>->getType(<scope>))"
-tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>->getLine(<scope>))"
-tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>->getCharPositionInLine(<scope>))"
-tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>->getChannel(<scope>))"
-tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>->getTokenIndex(<scope>))"
-tokenLabelPropertyRef_tree(scope,attr) ::= "(<scope>->tree)"
-tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>->getText(<scope>)->toInt32(<scope>->getText(<scope>)))"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>.start)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>.stop)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>.tree)"
-ruleLabelPropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-(STRSTREAM->toStringSS(STRSTREAM, <scope>.start, <scope>.start))
-<else>
-(STRSTREAM->toStringTT(STRSTREAM, <scope>.start, <scope>.stop))
-<endif>
->>
-
-ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "(<scope>->getType(<scope>))"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "(<scope>->getLine(<scope>))"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(<scope>->getCharPositionInLine(<scope>))"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(<scope>->getChannel(<scope>))"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "(<scope>->getTokenIndex(<scope>))"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText(<scope>))"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "retval.start"
-rulePropertyRef_stop(scope,attr) ::= "retval.stop"
-rulePropertyRef_tree(scope,attr) ::= "retval.tree"
-rulePropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-INPUT->toStringSS(INPUT, ADAPTOR->getTokenStartIndex(ADAPTOR, retval.start), ADAPTOR->getTokenStopIndex(ADAPTOR, retval.start))
-<else>
-STRSTREAM->toStringTT(STRSTREAM, retval.start, LT(-1))
-<endif>
->>
-rulePropertyRef_st(scope,attr) ::= "retval.st"
-
-lexerRulePropertyRef_text(scope,attr) ::= "LEXER->getText(LEXER)"
-lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "LEXSTATE->tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "LEXSTATE->tokenStartCharPositionInLine"
-lexerRulePropertyRef_channel(scope,attr) ::= "LEXSTATE->channel"
-lexerRulePropertyRef_start(scope,attr) ::= "LEXSTATE->tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(LEXER->getCharIndex(LEXER)-1)"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_int(scope,attr) ::= "LEXER->getText(LEXER)->toInt32(LEXER->getText(LEXER))"
-
-
-// setting $st and $tree is allowed in local rule. everything else is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree=<expr>;"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st=<expr>;"
-
-
-/** How to deal with an @after for C targets. Because we cannot rely on
- *  any garbage collection, after code is executed even in backtracking
- *  mode. Must be documented clearly.
- */
-execAfter(action) ::= <<
-{
-    <action>
-}
->>
-
-/** How to execute an action (when not backtracking) */
-execAction(action) ::= <<
-<if(backtracking)>
-<if(actions.(actionScope).synpredgate)>
-if ( <actions.(actionScope).synpredgate> )
-{
-    <action>
-}
-<else>
-if ( BACKTRACKING == 0 )
-{
-    <action>
-}
-<endif>
-<else>
-{
-    <action>
-}
-<endif>
->>
-
-// M I S C (properties, etc...)
-
-bitsetDeclare(name, words64) ::= <<
-
-/** Bitset defining follow set for error recovery in rule state: <name>  */
-static	ANTLR3_BITWORD <name>_bits[]	= { <words64:{it |ANTLR3_UINT64_LIT(<it>)}; separator=", "> };
-static  ANTLR3_BITSET_LIST <name>	= { <name>_bits, <length(words64)>	};
->>
-
-bitset(name, words64) ::= <<
-antlr3BitsetSetAPI(&<name>);<\n>
->>
-
-codeFileExtension() ::= ".c"
-
-true_value() ::= "ANTLR3_TRUE"
-false_value() ::= "ANTLR3_FALSE"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CPP/CPP.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CPP/CPP.stg
deleted file mode 100644
index 4488b06..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CPP/CPP.stg
+++ /dev/null
@@ -1,1351 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-group Cpp;
-
-cppTypeInitMap ::= [
-	"int":"0",
-	"long":"0",
-	"float":"0.0",
-	"double":"0.0",
-	"bool":"false",
-	"byte":"0",
-	"short":"0",
-	"char":"0",
-	default:"0" // anything other than an atomic type
-]
-
-// What we generate lexer/parser/treeparser, used a suffix in a few places
-generatedType() ::= <<
-<if(LEXER)>Lexer<endif><if(PARSER)>Parser<endif><if(TREE_PARSER)>TreeParser<endif>
->>
-
-leadIn(type) ::=
-<<
-/** \file
- *
- *  This <type> file was generated by ANTLR version <ANTLRVersion>
- *
- *     -  From the grammar source file : <fileName>
- *     -                            On : <generatedTimestamp>
-<if(LEXER)>
- *     -                 for the lexer : <name><\n>
-<endif>
-<if(PARSER)>
- *     -                for the parser : <name><\n>
-<endif>
-<if(TREE_PARSER)>
- *     -           for the tree parser : <name><\n>
-<endif>
- *
- *  Edit at your own peril.
- */
->>
-
-standardHeaders() ::=
-<<
-#include \<antlr3/<generatedType()>.h>
-
-<if(profile)>
-#warning "No profiling support.."
-<endif>
-<if(TREE_PARSER)>
-#warning "No tree parsing yet..."
-<endif>
->>
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
-			  docComment, recognizer,
-			  name, tokens, tokenNames, rules, cyclicDFAs,
-			  bitsets, buildTemplate, profile,
-			  backtracking, synpreds, memoize, numRules,
-			  fileName, ANTLRVersion, generatedTimestamp, trace,
-			  scopes, superClass) ::=
-<<
-<leadIn("C++ source")>
-<@includes>
-#include "<name><headerFileExtension()>"
-<@end>
-<if(actions.(actionScope).header)>
-// Header action start ========================================================
-<actions.(actionScope).header>
-// Header action end   ========================================================
-<endif>
-
-<headerAction>
-
-<standardHeaders()>
-
-<docComment>
-<recognizer>
->>
-parserHeaderFile() ::= <<
->>
-treeParserHeaderFile() ::= <<
->>
-lexerHeaderFile() ::= <<
-template\<typename StreamType, typename TokenType, typename TokenBuilder>
-class <name> : public antlr3::Lexer\<StreamType,TokenType,TokenBuilder> {
-	// carry over general types
-	typedef typename StreamType::position_type position_type;
-	typedef typename StreamType::char_type char_type;
-
-	typedef antlr3::tokenid_type               tokenid_type;
-	typedef antlr3::channel_type               channel_type;
-	typedef antlr3::decision_type              decision_type;
-	// exception shorthands
-	typedef antlr3::MismatchException\<position_type,char_type>        MismatchException;
-	typedef antlr3::MismatchedRangeException\<position_type,char_type> MismatchedRangeException;
-	typedef antlr3::MismatchedSetException\<position_type,char_type>   MismatchedSetException;
-	typedef antlr3::EarlyExitException\<position_type>       EarlyExitException;
-	typedef antlr3::NoViableAltException\<position_type>     NoViableAltException;
-<if(backtracking)>
-	// @TODO backtracking ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
-
-public:
-	<tokens:{static const tokenid_type <tokenPrefix()><it.name> = <it.type>;}; separator="\n">
-	<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-	<actions.lexer.members>
-
-	<name>(StreamType* input)
-	: antlr3::Lexer\<StreamType,TokenType,TokenBuilder>(input)
-	{
-	}
-
-<!if(filterMode)!>
-    <!filteringNextToken()!>
-<!endif!>
-	<rules; separator="\n\n">
-
-	// syn preds
-	<synpreds:{p | <lexerSynpred(p)>}>
-
-	// cyclic dfa's
-	<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
-	// dfa tables..
-}; // class <name><\n>
->>
-
-headerFile( LEXER,
-            PARSER,
-            TREE_PARSER,
-            actionScope,
-            actions,
-            docComment,
-            recognizer,
-            name,
-            tokens,
-            tokenNames,
-            rules,
-            cyclicDFAs,
-            bitsets,
-            buildTemplate,
-            profile,
-            backtracking,
-            synpreds,
-            memoize,
-            numRules,
-            fileName,
-            ANTLRVersion,
-            generatedTimestamp,
-            trace,
-            scopes,
-            superClass
-        ) ::=
-<<
-#ifndef	_<name>_H
-#define _<name>_H
-<leadIn("C++ header")>
-<actions.(actionScope).headerfile>
-
-<@includes>
-<standardHeaders()>
-<@end>
-
-<if(LEXER)>
-<lexerHeaderFile()>
-<endif>
-<if(PARSER)>
-<parserHeaderFile()>
-<endif>
-<if(TREE_PARSER)>
-<treeParserHeaderFile()>
-<endif>
-
-
-#endif	// _<name>_H<\n>
->>
-
-lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
-      filterMode) ::= <<
-
-<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
->>
-
-filteringNextToken() ::= <<
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-public Token nextToken() {
-	while (true) {
-		if ( input.LA(1)==CharStream.EOF ) {
-			return Token.EOF_TOKEN;
-		}
-		this->token = 0;
-		tokenStartCharIndex = getCharIndex();
-		try {
-			int m = input.mark();
-			backtracking=1; <! means we won't throw slow exception !>
-			failed=false;
-			mTokens();
-			backtracking=0;
-			<! mTokens backtracks with synpred at backtracking==2
-				and we set the synpredgate to allow actions at level 1. !>
-			if ( failed ) {
-				input.rewind(m);
-				input.consume(); <! advance one char and try again !>
-			}
-			else {
-				return token;
-			}
-		}
-		catch (RecognitionException re) {
-			// shouldn't happen in backtracking mode, but...
-			reportError(re);
-			recover(re);
-		}
-	}
-}
-
-public void memoize(IntStream input, int ruleIndex, int ruleStartIndex)
-{
-	if ( backtracking > 1 )
-		super.memoize(input, ruleIndex, ruleStartIndex);
-}
-
-public boolean alreadyParsedRule(IntStream input, int ruleIndex)
-{
-	if ( backtracking > 1 )
-		return super.alreadyParsedRule(input, ruleIndex);
-	return false;
-}
->>
-
-filteringActionGate() ::= "backtracking == 1"
-
-/** How to generate a parser */
-genericParser(
-	grammar, name, scopes, tokens, tokenNames, rules, numRules, cyclicDFAs,
-	bitsets, inputStreamType, superClass, ASTLabelType="Object",
-	labelType, members, filterMode
-	) ::= <<
-// genericParser
-class <name> : public <@superClassName><superClass><@end> {
-public:
-	static const char* tokenNames[] = {
-		"\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
-	};
-	<tokens:{static tokenid_type <tokenPrefix()><it.name>=<it.type>;}; separator="\n">
-	<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-	<@members>
-
-	<name>(StreamType* input)
-	: <superClass>\<StreamType,TokenType>(input)
-	{
-<if(backtracking)>
-		ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
-	}
-	<@end>
-
-	//@TODO public String[] getTokenNames() { return tokenNames; }
-	//@TODO public String getGrammarFileName() { return "<fileName>"; }
-	<members>
-
-	<rules; separator="\n\n">
-
-	<synpreds:{p | <synpred(p)>}>
-
-	<cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
-	<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-	<bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
-							words64=it.bits)>
-};
->>
-
-parser(
-	grammar, name, scopes, tokens, tokenNames,
-	rules, numRules, bitsets, ASTLabelType,
-	superClass="Parser", labelType="Token",
-	members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="TokenStream", ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction,
-	rules, numRules,
-	bitsets,
-	labelType={<ASTLabelType>}, ASTLabelType="Object",
-	superClass="TreeParser", members={<actions.treeparser.members>}, filterMode
-	) ::= <<
-<genericParser(inputStreamType="TreeNodeStream", ...)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-// $ANTLR start <ruleName>
-public void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throws RecognitionException {
-	<if(trace)>System.out.println("enter <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);<endif>
-<if(trace)>
-	try {
-		<block>
-	}
-	finally {
-		System.out.println("exit <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);
-	}
-<else>
-	<block>
-<endif>
-}
-// $ANTLR end <ruleName>
->>
-
-synpred(name) ::= <<
-public boolean <name>() {
-    this->backtracking++;
-    <@start()>
-    int start = input.mark();
-    try {
-        <name>_fragment(); // can never throw exception
-    } catch (RecognitionException re) {
-        System.err.println("impossible: "+re);
-    }
-    boolean success = ! this->failed;
-    input.rewind(start);
-    <@stop()>
-    this->backtracking--;
-    this->failed = false;
-    return success;
-}<\n>
->>
-
-lexerSynpred(name) ::= <<
-<synpred(name)>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if ( backtracking > 0 && alreadyParsedRule(input, <ruleDescriptor.index>) )
-	return <ruleReturnValue()>;
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>
-if (failed)
-	return <ruleReturnValue()>;
-<endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>
-if (backtracking > 0)
-{
-	failed = true;
-	return <ruleReturnValue()>;
-}
-<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,memoize) ::= <<
-<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-<returnScope(scope=ruleDescriptor.returnScope)>
-
-// $ANTLR start <ruleName>
-// <fileName>:<description>
-public <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throw(antlr3::BaseRecognitionException)
-{
-<if(trace)>
-	antlr3::Tracer trace(this,"<ruleName>");
-	System.out.println("enter <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);
-<endif>
-	<ruleDeclarations()>
-	<ruleLabelDefs()>
-	<ruleDescriptor.actions.init>
-	<@preamble()>
-	try {
-		<ruleMemoization(name=ruleName)>
-		<block>
-	}
-<if(exceptions)>
-	<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-	<actions.(actionScope).rulecatch>
-<else>
-	catch (RecognitionException re) {
-		reportError(re);
-		recover(input,re);
-	}<\n>
-<endif>
-<endif>
-<endif>
-	finally {
-		<if(trace)>System.out.println("exit <ruleName> "+input.LT(1)+" failed="+failed+" backtracking="+backtracking);<endif>
-		<ruleCleanUp()>
-		<(ruleDescriptor.actions.finally):execAction()>
-	}
-	<@postamble()>
-	return <ruleReturnValue()>;
-}
-// $ANTLR end <ruleName>
->>
-
-catch(decl,action) ::= <<
-catch (<e.decl>) {
-    <e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-<ruleDescriptor.useScopes:{<it>_stack.push(new <it>_scope());}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>_stack.push(new <it.name>_scope());}; separator="\n">
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<returnType()> retval = new <returnType()>();
-retval.start = input.LT(1);<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
-}>
-<endif>
-<if(memoize)>
-int <ruleDescriptor.name>_StartIndex = input.index();
-<endif>
->>
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
-    :{<labelType> <it.label.text>=null;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
-    :{List list_<it.label.text>=null;}; separator="\n"
->
-<[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
-    :ruleLabelDef(label=it); separator="\n"
->
-<[ruleDescriptor.allRuleRefsInAltsWithRewrites,ruleDescriptor.allTokenRefsInAltsWithRewrites]
-    :{List list_<it>=new ArrayList();}; separator="\n"
->
->>
-
-ruleReturnValue() ::= <<
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-retval
-<endif>
-<endif>
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<ruleDescriptor.useScopes:{<it>_stack.pop();}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>_stack.pop();}; separator="\n">
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.stop = input.LT(-1);<\n>
-<endif>
-<if(memoize)>
-<if(backtracking)>
-if ( backtracking > 0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) throw(antlr3::BaseRecognitionException)
-{
-<if(trace)>
-	antlr3::Tracer trace(this,"<ruleName>");
-<endif>
-	antlr3::CountScope nestingTracker(this->ruleNestingLevel);
-	StreamType& input(this->getInput());
-<if(nakedBlock)>
-	<ruleDescriptor.actions.init>
-	<ruleMemoization(name=ruleName)>
-	<block><\n>
-<else>
-	tokenid_type type = <tokenPrefix()><ruleName>;
-	channel_type channel = antlr3::Token::DEFAULT_CHANNEL;
-	position_type start(input.getPosition());
-	<ruleDescriptor.actions.init>
-	<ruleMemoization(name=ruleName)>
-	<block>
-	<! create token if none exists *and* we are an outermost token rule !>
-	<execAction({if ( this->token == 0 && this->ruleNestingLevel == 1 ) {
-	TokenType *tt = TokenBuilder::build(type,start,input,channel);
-	std::cout \<\< (*tt) \<\< std::endl;
-	this->emit(tt);
-	}<\n>
-})>
-<endif>
-}
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-void mTokens() throw(antlr3::BaseRecognitionException)
-{
-	StreamType& input(this->getInput());
-	<block><\n>
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,
-	maxK,maxAlt,description) ::= <<
-// block <fileName>:<description>
-decision_type alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-<@prebranch()>
-switch (alt<decisionNumber>) {
-    <alts:altSwitchCase()>
-}
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// ruleBlock <fileName>:<description>
-decision_type alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-switch (alt<decisionNumber>) {
-    <alts:altSwitchCase()>
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// ruleBlockSingleAlt <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 0 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// positiveClosureBlock <fileName>:<description>
-decision_type cnt<decisionNumber>=0;
-<decls>
-<@preloop()>
-do {
-	decision_type alt<decisionNumber>=<maxAlt>;
-	<@predecision()>
-	<decision>
-	<@postdecision()>
-	switch (alt<decisionNumber>) {
-	<alts:altSwitchCase()>
-	default :
-		if ( cnt<decisionNumber> >= 1 )
-			goto loop<decisionNumber>;
-            EarlyExitException eee( input.getPosition(), <decisionNumber> );
-				<@earlyExitException()>
-            throw eee;
-	}
-	cnt<decisionNumber>++;
-} while (true);
-loop<decisionNumber>: ;
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// closureBlock <fileName>:<description>
-<decls>
-<@preloop()>
-do {
-	decision_type alt<decisionNumber>=<maxAlt>;
-	<@predecision()>
-	<decision>
-	<@postdecision()>
-	switch (alt<decisionNumber>) {
-	<alts:altSwitchCase()>
-	default :
-		goto loop<decisionNumber>;
-	}
-} while (true);
-loop<decisionNumber>: ;
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase() ::= <<
-case <i> :
-	<@prealt()>
-	<it>
-	break;<\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt) ::= <<
-// alt <fileName>:<description>
-{
-	<@declarations()>
-	<elements:element()>
-	<@cleanup()>
-}
->>
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element() ::= <<
-// element <fileName>:<description>
-<@prematch()>
-<it.el><\n>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex) ::= <<
-// tokenRef
-<if(label)>
-<label> = input.LT(1);<\n>
-<endif>
-this->match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>);
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID no AST building */
-tokenRefAndListLabel(token,label,elementIndex) ::= <<
-<tokenRef(...)>
-<listLabel(...)>
->>
-
-listLabel(label) ::= <<
-if (list_<label>==null) list_<label>=new ArrayList();
-list_<label>.add(<label>);<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-// charRef
-<if(label)>
-<tokenid_type()> <label> = input.LA(1);<\n>
-<endif>
-this->match(<char>);
-<checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b) ::= "this->matchRange(<a>,<b>); <checkRuleBacktrackFailure()>"
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,postmatchCode="") ::= <<
-// matchSet
-<if(label)>
-<label> = input.LT(1);<\n>
-<endif>
-if ( <s> )
-{
-	<postmatchCode>
-	input.consume();
-<if(!LEXER)>
-	errorRecovery=false;
-<endif>
-	<if(backtracking)>failed=false;<endif>
-}
-else
-{
-	<ruleBacktrackFailure()>
-	MismatchedSetException mse(input.getPosition(),input.LA(1));
-	<@mismatchedSetException()>
-<if(LEXER)>
-	this->recover(mse);
-<else>
-	this->recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
-<endif>
-	throw mse;
-}<\n>
->>
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex) ::= <<
-// lexerStringRef
-<if(label)>
-position_type <label>Start(input.getPosition());
-this->match( <string> );
-<checkRuleBacktrackFailure()>
-TokenType* <label> = TokenBuilder::build(Token.INVALID_TOKEN_TYPE,<label>Start,input,Token.DEFAULT_CHANNEL);
-<else>
-this->match( <string> );
-<checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-wildcard(label,elementIndex) ::= <<
-<if(label)>
-<label> = input.LT(1);<\n>
-<endif>
-this->matchAny( input );
-<checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(label,elementIndex) ::= <<
-<wildcard(...)>
-<listLabel(...)>
->>
-
-/** Match . wildcard */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<tokenid_type()> <label> = input.LA(1);<\n>
-<endif>
-this->matchAny();
-<checkRuleBacktrackFailure()>
->>
-
-tokenid_type() ::= "<if(LEXER)>char_type<else>tokenid_type<endif>"
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabel(...)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.
- */
-ruleRef(rule,label,elementIndex,args) ::= <<
-following.push(FOLLOW_<rule>_in_<ruleName><elementIndex>);
-<if(label)>
-<label>=<rule>(<args>);<\n>
-<else>
-<rule>(<args>);<\n>
-<endif>
-following.pop();
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-ruleRefAndListLabel(rule,label,elementIndex,args) ::= <<
-<ruleRef(...)>
-<listLabel(...)>
->>
-
-/** A lexer rule reference */
-lexerRuleRef(rule,label,args) ::= <<
-<if(label)>
-position_type <label>Start(input.getPosition());
-m<rule>(<args>);
-<checkRuleBacktrackFailure()>
-TokenType* <label> = TokenBuilder::build(Token.INVALID_TOKEN_TYPE,<label>Start,input,Token.DEFAULT_CHANNEL);
-<else>
-m<rule>(<args>);
-<checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label) ::= <<
-<if(label)>
-position_type <label>Start(input.getPosition());
-match(EOF);
-<checkRuleBacktrackFailure()>
-TokenType* <label> = TokenBuilder::build(Token.EOF,<label>Start,input,Token.DEFAULT_CHANNEL);
-<else>
-match(EOF);
-<checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** match ^(root children) in tree parser */
-tree(root, children, nullableChildList) ::= <<
-<root:element()>
-<if(nullableChildList)>
-if ( input.LA(1)==antlr3::Token::DOWN ) {
-    match(input, antlr3::Token::DOWN, null);
-    <checkRuleBacktrackFailure()>
-    <children:element()>
-    match(input, antlr3::Token::UP, null);
-    <checkRuleBacktrackFailure()>
-}
-<else>
-match(input, antlr3::Token::DOWN, null);
-<checkRuleBacktrackFailure()>
-<children:element()>
-match(input, antlr3::Token::UP, null);
-<checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if ( !(<evalPredicate(...)>) ) {
-	<ruleBacktrackFailure()>
-	throw new FailedPredicateException(input, "<ruleName>", "<description>");
-}
->>
-
-// F i x e d  D F A  (if-then-else)
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-<if(!semPredState)>
-<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<endif>
-<edges; separator="\nelse ">
-else
-{
-<if(eotPredictsAlt)>
-	alt<decisionNumber> = <eotPredictsAlt>;<\n>
-<else>
-	<ruleBacktrackFailure()>
-	NoViableAltException nvae(input.getPosition(), "<description>", <decisionNumber>, <stateNumber>);<\n>
-	<@noViableAltException()>
-	throw nvae;<\n>
-<endif>
-}
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-<if(!semPredState)>
-<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(<k>);
-<endif>
-<edges; separator="\nelse ">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-<if(!semPredState)>
-<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(<k>);
-<endif>
-<edges; separator="\nelse "><\n>
-<if(eotPredictsAlt)>
-else
-{
-	alt<decisionNumber> = <eotPredictsAlt>;
-}<\n>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>)
-{
-	<targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
-<edges; separator="\n">
-default:
-<if(eotPredictsAlt)>
-	alt<decisionNumber> = <eotPredictsAlt>;
-<else>
-	NoViableAltException nvae( input.getPosition(), "<description>", <decisionNumber>, <stateNumber> );<\n>
-	<@noViableAltException()>
-	throw nvae;<\n>
-<endif>
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
-	<edges; separator="\n">
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
-<edges; separator="\n"><\n>
-<if(eotPredictsAlt)>
-default:
-	alt<decisionNumber> = <eotPredictsAlt>;
-	break;<\n>
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{case <it>:}; separator="\n"> {
-	<targetState>
-} break;
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-// dfaDecision
-alt<decisionNumber> = predictDFA<decisionNumber>(input);
->>
-
-/** The overall cyclic DFA chunk; contains all the DFA states */
-cyclicDFA(dfa) ::= <<
-/* cyclicDFA=<dfa>
-*/
-// cyclic    = <dfa.cyclic>
-// numstates = <dfa.numberOfStates>
-
-// startState = <dfa.startState>
-// startState.numberOfTransitions = <dfa.startState.NumberOfTransitions>
-// startState.lookaheadDepth = <dfa.startState.LookaheadDepth>
-
-const static short <name>dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] = {
-	<dfa.eot; wrap="\n     ", separator=",", null="-1">
-};
-const static short <name>dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] = {
-	<dfa.eof; wrap="\n     ", separator=",", null="-1">
-};
-const static unichar <name>dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] = {
-	<dfa.min; wrap="\n     ", separator=",", null="0">
-};
-const static unichar <name>dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] = {
-	<dfa.max; wrap="\n     ", separator=",", null="0">
-};
-const static short <name>dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] = {
-	<dfa.accept; wrap="\n     ", separator=",", null="-1">
-};
-const static short <name>dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] = {
-	<dfa.special; wrap="\n     ", separator=",", null="-1">
-};
-<dfa.edgeTransitionClassMap.keys:{ table |
-const static short <name>dfa<dfa.decisionNumber>_transition<i0>[] = {
-	<table; separator=", ", wrap="\n	", null="-1">
-};
-}; null="">
-const static short <name>dfa<dfa.decisionNumber>_transition[] = {
-	<dfa.transitionEdgeTables:{whichTable|<name>dfa<dfa.decisionNumber>_transition<whichTable>,}; separator="\n", null="0 /* fixme? */">
-};
-	<! add attribute for the DFA !>
-	DFA\<char_type> dfa<dfa.decisionNumber>;
-<! this should go in the initializer of the thing
-- (id) init
-{
-	if ((self = [super init]) != nil) {
-		eot = <name>dfa<dfa.decisionNumber>_eot;
-		eof = <name>dfa<dfa.decisionNumber>_eof;
-		min = <name>dfa<dfa.decisionNumber>_min;
-		max = <name>dfa<dfa.decisionNumber>_max;
-		accept = <name>dfa<dfa.decisionNumber>_accept;
-		special = <name>dfa<dfa.decisionNumber>_special;
-		if (!(transition = calloc(<dfa.numberOfStates>, sizeof(void*)))) {
-			[self release];
-			return nil;
-		}
-		<dfa.transitionEdgeTables:{whichTable|transition[<i0>] = <name>dfa<dfa.decisionNumber>_transition<whichTable>;}; separator="\n", null="">
-	}
-	return self;
-}
-!>
-
-<if(dfa.specialStateSTs)>
-int specialStateTransition( int state )
-{
-	int s = state;
-	switch ( s ) {
- 		<dfa.specialStateSTs:{state |
-		case <i0> : <! compressed special state numbers 0..n-1 !>
-		<state>}; separator="\n">
-	}
-<if(backtracking)>
-	if ( recognizer.isBacktracking() ) {
-		recognizer.setFailed();
-		return -1;
-	}<\n>
-<endif>
-	noViableAlt(s, input);
-}<\n>
-<endif>
-
-
-<\n>
-
-// <dfa.description>
-decision_type predictDFA<dfa.decisionNumber>( StreamType& input )
-{
-	/* mark current location (rewind automatically when the rewinder goes
-	 * out of scope */
-	antlr3::Rewinder\<position_type> markPoint(input.getPosition());
-	goto s0;	// goto start...
-	// ...
-	throw NoViableAltException( input.getPosition(), "<dfa.description>", <dfa.decisionNumber>, 0 /* fixme */ );<\n>
-}<\n>
->>
-
-/** A state in a cyclic DFA */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-// cyclicDFAState
-s<stateNumber>: {
-	<if(semPredState)>
-	input.rewind();<\n>
-	<else>
-	<tokenid_type()> LA<decisionNumber>_<stateNumber> = input.LA(1);
-	<endif>
-	<edges>
-	<if(needErrorClause)>
-	throw NoViableAltException( input.getPosition(), "<description>", <decisionNumber>, <stateNumber> );<\n>
-	<endif><\n>
-}<\n>
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-// cyclicDFAEdge
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>)
-{
-	input.consume();
-	goto s<targetStateNumber>;
-}<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= "goto s<targetStateNumber>;"
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "(<left> && <right>)"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
-
-notPredicate(pred) ::= "!(<pred>)"
-
-evalPredicate(pred,description) ::= "<pred>"
-
-evalSynPredicate(pred,description) ::= "<pred>()"
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
-(LA<decisionNumber>_<stateNumber>\>=<lower> && LA<decisionNumber>_<stateNumber>\<=<upper>)
->>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)\>=<lower> && input.LA(<k>)\<=<upper>)"
-
-setTest(ranges) ::= "<ranges; separator=\"||\">"
-
-// A T T R I B U T E S
-
-globalAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected static class <scope.name> {
-    <scope.attributes:{<it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
-<endif>
->>
-
-ruleAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected static class <scope.name>_scope {
-    <scope.attributes:{<it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
-<endif>
->>
-
-returnType() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor.name>_return
-<else>
-<if(ruleDescriptor.singleValueReturnType)>
-<ruleDescriptor.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-ruleLabelType(referencedRule) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-<referencedRule.name>_return
-<else>
-<if(referencedRule.singleValueReturnType)>
-<referencedRule.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
- */
-initValue(typeName) ::= <<
-<javaTypeInitMap.(typeName)>
->>
-
-ruleLabelDef(label) ::= <<
-<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
->>
-
-returnScope(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-public static class <returnType()> {
-    <labelType> start, stop;
-<if(buildAST)>
-    <ASTLabelType> tree;
-<else>
-<if(buildTemplate)>
-    StringTemplate st;
-<endif>
-<endif>
-    <scope.attributes:{<it.decl>;}; separator="\n">
-};
-<endif>
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{<it.decl>}; separator=", ">
->>
-
-/** Used in codegen.g to translate $x.y references.
- *  I could have left actions as StringTemplates to be inserted in
- *  the output (so they could use attributes inherited from surrounding
- *  templates), but really wanted to pass in AttributeScope and Attribute
- *  objects so this translation could query them.  So, translation of
- *  $x.y to executable code occurs before recognizerST.toString() occurs.
- *  I.e., actions are just text strings during final code generation.
- */
-globalAttributeRef(scope,attr) ::= <<
-((<scope>)<scope>_stack.peek()).<attr.name>
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <<
-<if(negIndex)>
-((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name>
-<else>
-<if(index)>
-((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name>
-<else>
-((<scope>_scope)<scope>_stack.peek()).<attr.name>
-<endif>
-<endif>
->>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <<
-<if(referencedRule.singleValueReturnType)>
-<scope>
-<else>
-<scope>.<attr.name>
-<endif>
->>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <<
-<if(ruleDescriptor.singleValueReturnType)>
-<attr.name>
-<else>
-retval.<attr.name>
-<endif>
->>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-// not sure the next are the right approach; and they are evaluated early;
-// they cannot see TREE_PARSER or PARSER attributes for example. :(
-
-tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
-tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
-tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
-tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
-tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
-tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "<scope>.start"
-ruleLabelPropertyRef_stop(scope,attr) ::= "<scope>.stop"
-ruleLabelPropertyRef_tree(scope,attr) ::= "<scope>.tree"
-ruleLabelPropertyRef_text(scope,attr) ::= "input.toString(<scope>.start,<scope>.stop)"
-ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
-rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.stop)"
-rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.tree)"
-rulePropertyRef_text(scope,attr) ::= "input.toString(retval.start,input.LT(-1))"
-rulePropertyRef_st(scope,attr) ::= "retval.st"
-
-// A C T I O N S
-
-emit(type) ::= "emit(<type>);"
-
-setType(type) ::= "setType(<type>);"
-
-/** How to execute an action */
-execAction(action) ::= <<
-<if(backtracking)>
-<if(actions.(actionScope).synpredgate)>
-if ( <actions.(actionScope).synpredgate> )
-{
-	<action>
-}
-<else>
-if ( backtracking == 0 )
-{
-	<action>
-}
-<endif>
-<else>
-<action>
-<endif>
->>
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-public static final BitSet <name> = new BitSet(new long[]{<words64:{<it>L};separator=",">});<\n>
->>
-
-tokenPrefix() ::= "TOK_"
-codeFileExtension() ::= ".cpp"
-// used in CPPTarget.java to generate the headerfile extension
-headerFileExtension() ::= ".h"
-
-true() ::= "true"
-false() ::= "false"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/AST.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/AST.stg
deleted file mode 100644
index 35fdebd..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/AST.stg
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2007-2008 Johannes Luber
- * Copyright (c) 2005-2007 Kunle Odutola
- * Copyright (c) 2011 Sam Harwell
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-@outputFile.imports() ::= <<
-<@super.imports()>
-
-<if(!TREE_PARSER)>
-<! tree parser would already have imported !>
-using Antlr.Runtime.Tree;
-using RewriteRuleITokenStream = Antlr.Runtime.Tree.RewriteRuleTokenStream;
-<endif>
->>
-
-@genericParser.members() ::= <<
-<@super.members()>
-<parserMembers()>
->>
-
-parserCtorBody() ::= <%
-<super.parserCtorBody()><\n>
-TreeAdaptor = 
-<if(actions.(actionScope).treeAdaptorInitializer)>
-	<actions.(actionScope).treeAdaptorInitializer>
-<else>
-	new <actions.(actionScope).treeAdaptorType; null="CommonTreeAdaptor">()
-<end>
-;
-%>
-
-/** Add an adaptor property that knows how to build trees */
-parserMembers() ::= <<
-private <treeAdaptorType()> adaptor;
-
-public <treeAdaptorType()> TreeAdaptor
-{
-	get
-	{
-		return adaptor;
-	}
-
-	set
-	{
-		this.adaptor = value;
-		<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
-	}
-}
->>
-
-treeAdaptorType() ::= <<
-<actions.(actionScope).treeAdaptorType; null="ITreeAdaptor">
->>
-
-ruleReturnBaseType() ::= <%
-Ast<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope\<<ASTLabelType>, <labelType>>
-%>
-
-/** Add a variable to track rule's return AST */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-<ASTLabelType> root_0 = default(<ASTLabelType>);<\n>
->>
-
-ruleLabelDefs() ::= <<
-<super.ruleLabelDefs()>
-<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
-	:{it|<ASTLabelType> <it.label.text>_tree = default(<ASTLabelType>);}; separator="\n">
-<ruleDescriptor.tokenListLabels:{it|<ASTLabelType> <it.label.text>_tree = default(<ASTLabelType>);}; separator="\n">
-<ruleDescriptor.allTokenRefsInAltsWithRewrites
-	:{it|RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
-<ruleDescriptor.allRuleRefsInAltsWithRewrites
-	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
->>
-
-/** When doing auto AST construction, we must define some variables;
- *  These should be turned off if doing rewrites.  This must be a "mode"
- *  as a rule could have both rewrite and AST within the same alternative
- *  block.
- */
-@alt.declarations() ::= <<
-<if(autoAST)>
-<if(outerAlt)>
-<if(!rewriteMode)>
-root_0 = (<ASTLabelType>)adaptor.Nil();
-<endif>
-<endif>
-<endif>
->>
-
-// T r a c k i n g  R u l e  E l e m e n t s
-
-/** ID and track it for use in a rewrite rule */
-tokenRefTrack(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)> <! Track implies no auto AST construction!>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<token>.Add(<label>);<\n>
->>
-
-/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
- *  to the tracking list stream_ID for use in the rewrite.
- */
-tokenRefTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefTrack(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** ^(ID ...) track for rewrite */
-tokenRefRuleRootTrack(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<token>.Add(<label>);
->>
-
-/** Match ^(label+=TOKEN ...) track for rewrite */
-tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefRuleRootTrack(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<rule.name>.Add(<label>.Tree);
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefTrack(...)>
-<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<rule>.Add(<label>.Tree);
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRootTrack(...)>
-<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
->>
-
-// R e w r i t e
-
-rewriteCode(
-	alts, description,
-	referencedElementsDeep, // ALL referenced elements to right of ->
-	referencedTokenLabels,
-	referencedTokenListLabels,
-	referencedRuleLabels,
-	referencedRuleListLabels,
-	referencedWildcardLabels,
-	referencedWildcardListLabels,
-	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::= <<
-<\n>{
-// AST REWRITE
-// elements: <referencedElementsDeep; separator=", ">
-// token labels: <referencedTokenLabels; separator=", ">
-// rule labels: <referencedRuleLabels; separator=", ">
-// token list labels: <referencedTokenListLabels; separator=", ">
-// rule list labels: <referencedRuleListLabels; separator=", ">
-// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
-<if(backtracking)>
-if (<actions.(actionScope).synpredgate>) {
-<endif>
-<prevRuleRootRef()>.Tree = root_0;
-<rewriteCodeLabels()>
-root_0 = (<ASTLabelType>)adaptor.Nil();
-<alts:rewriteAlt(); separator="else ">
-<! if tree parser and rewrite=true !>
-<if(TREE_PARSER&&rewriteMode)>
-<prevRuleRootRef()>.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
-input.ReplaceChildren(adaptor.GetParent(retval.Start),
-                      adaptor.GetChildIndex(retval.Start),
-                      adaptor.GetChildIndex(_last),
-                      retval.Tree);
-<endif>
-<! if parser or tree-parser && rewrite!=true, we need to set result !>
-<if(!TREE_PARSER||!rewriteMode)>
-<prevRuleRootRef()>.Tree = root_0;
-<endif>
-<if(backtracking)>
-}
-<endif>
-}
-
->>
-
-rewriteCodeLabels() ::= <<
-<referencedTokenLabels
-    :{it|RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>",<it>);};
-    separator="\n"
->
-<referencedTokenListLabels
-    :{it|RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
-    separator="\n"
->
-<referencedWildcardLabels
-	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
-	separator="\n"
->
-<referencedWildcardListLabels
-	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
-	separator="\n"
->
-<referencedRuleLabels
-    :{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>",<it>!=null?<it>.Tree:null);};
-    separator="\n"
->
-<referencedRuleListLabels
-    :{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"token <it>",list_<it>);};
-    separator="\n"
->
->>
-
-/** Generate code for an optional rewrite block; note it uses the deep ref'd element
-  *  list rather shallow like other blocks.
-  */
-rewriteOptionalBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-if (<referencedElementsDeep:{el | stream_<el>.HasNext}; separator="||">)
-{
-	<alt>
-}
-<referencedElementsDeep:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewriteClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-while ( <referencedElements:{el | stream_<el>.HasNext}; separator="||"> )
-{
-	<alt>
-}
-<referencedElements:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewritePositiveClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-if (!(<referencedElements:{el | stream_<el>.HasNext}; separator="||">))
-{
-	throw new RewriteEarlyExitException();
-}
-while ( <referencedElements:{el | stream_<el>.HasNext}; separator="||"> )
-{
-	<alt>
-}
-<referencedElements:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewriteAlt(a) ::= <<
-// <a.description>
-<if(a.pred)>
-if (<a.pred>)
-{
-	<a.alt>
-}
-<else>
-{
-	<a.alt>
-}
-<endif>
->>
-
-/** For empty rewrites: "r : ... -> ;" */
-rewriteEmptyAlt() ::= "root_0 = null;"
-
-rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
-// <fileName>:<description>
-{
-<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.Nil();
-<root:rewriteElement()>
-<children:rewriteElement()>
-adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
-}<\n>
->>
-
-rewriteElementList(elements) ::= "<elements:rewriteElement()>"
-
-rewriteElement(e) ::= <%
-<@pregen()>
-DebugLocation(<e.line>, <e.pos>);<\n>
-<e.el>
-%>
-
-/** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,terminalOptions,args) ::= <<
-adaptor.AddChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
->>
-
-/** Gen $label ... where defined via label=ID */
-rewriteTokenLabelRef(label,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
->>
-
-/** Gen $label ... where defined via label+=ID */
-rewriteTokenListLabelRef(label,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
->>
-
-/** Gen ^($label ...) */
-rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
->>
-
-/** Gen ^($label ...) where label+=... */
-rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
-
-/** Gen ^(ID ...) or ^(ID[args] ...) */
-rewriteTokenRefRoot(token,elementIndex,terminalOptions,args) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>);<\n>
->>
-
-rewriteImaginaryTokenRef(args,token,terminalOptions,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
->>
-
-rewriteImaginaryTokenRefRoot(args,token,terminalOptions,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>);<\n>
->>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-root_0 = <action>;<\n>
->>
-
-/** What is the name of the previous value of this rule's root tree?  This
- *  let's us refer to $rule to mean previous value.  I am reusing the
- *  variable 'tree' sitting in retval struct to hold the value of root_0 right
- *  before I set it during rewrites.  The assign will be to retval.tree.
- */
-prevRuleRootRef() ::= "retval"
-
-rewriteRuleRef(rule) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<rule>.NextTree());<\n>
->>
-
-rewriteRuleRefRoot(rule) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<rule>.NextNode(), root_<treeLevel>);<\n>
->>
-
-rewriteNodeAction(action) ::= <<
-adaptor.AddChild(root_<treeLevel>, <action>);<\n>
->>
-
-rewriteNodeActionRoot(action) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<action>, root_<treeLevel>);<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel=rule */
-rewriteRuleLabelRef(label) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
-rewriteRuleListLabelRef(label) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel=rule */
-rewriteRuleLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
-rewriteRuleListLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
->>
-
-rewriteWildcardLabelRef(label) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
->>
-
-createImaginaryNode(tokenType,terminalOptions,args) ::= <%
-<if(terminalOptions.node)>
-<! new MethodNode(IDLabel, args) !>
-new <terminalOptions.node>(<tokenType><if(args)>, <args; separator=", "><endif>)
-<else>
-(<ASTLabelType>)adaptor.Create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
-<endif>
-%>
-
-createRewriteNodeFromElement(token,terminalOptions,args) ::= <%
-<if(terminalOptions.node)>
-new <terminalOptions.node>(stream_<token>.NextToken()<if(args)>, <args; separator=", "><endif>)
-<else>
-<if(args)> <! must create new node from old !>
-adaptor.Create(<token>, <args; separator=", ">)
-<else>
-stream_<token>.NextNode()
-<endif>
-<endif>
-%>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTDbg.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTDbg.stg
deleted file mode 100644
index 0d2a441..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTDbg.stg
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2007-2008 Johannes Luber
- * Copyright (c) 2005-2007 Kunle Odutola
- * Copyright (c) 2005 Terence Parr
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
- *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
- */
-
-parserMembers() ::= <<
-protected DebugTreeAdaptor adaptor;
-
-public ITreeAdaptor TreeAdaptor
-{
-	get
-	{
-		return adaptor;
-	}
-	set
-	{
-<if(grammar.grammarIsRoot)>
-		this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
-<else>
-		this.adaptor = (DebugTreeAdaptor)adaptor; // delegator sends dbg adaptor
-<endif><\n>
-		<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
-	}
-}<\n>
->>
-
-parserCtorBody() ::= <<
-<super.parserCtorBody()>
->>
-
-createListenerAndHandshake() ::= <<
-DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, <if(TREE_PARSER)>input.TreeAdaptor<else>adaptor<endif> );
-DebugListener = proxy;
-<inputStreamType> = new Debug<inputStreamType>( input, proxy );
-try
-{
-	proxy.Handshake();
-}
-catch ( IOException ioe )
-{
-	ReportError( ioe );
-}
->>
-
-@ctorForRootGrammar.finally() ::= <<
-ITreeAdaptor adap = new CommonTreeAdaptor();
-TreeAdaptor = adap;
-proxy.TreeAdaptor = adap;
->>
-
-@ctorForProfilingRootGrammar.finally() ::=<<
-ITreeAdaptor adap = new CommonTreeAdaptor();
-TreeAdaptor = adap;
->>
-
-@ctorForPredefinedListener.superClassRef() ::= ": base( input, dbg )"
-
-@ctorForPredefinedListener.finally() ::=<<
-<if(grammar.grammarIsRoot)><! don't create new adaptor for delegates !>
-ITreeAdaptor adap = new CommonTreeAdaptor();
-TreeAdaptor = adap;<\n>
-<endif>
->>
-
-//@rewriteElement.pregen() ::= "dbg.Location( <e.line>, <e.pos> );"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTParser.stg
deleted file mode 100644
index 6413dfe..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTParser.stg
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2007-2008 Johannes Luber
- * Copyright (c) 2005-2007 Kunle Odutola
- * Copyright (c) 2011 Sam Harwell
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** Templates for building ASTs during normal parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  The situation is not too bad as rewrite (->) usage makes ^ and !
- *  invalid. There is no huge explosion of combinations.
- */
-
-@rule.setErrorReturnValue() ::= <<
-retval.Tree = (<ASTLabelType>)adaptor.ErrorNode(input, retval.Start, input.LT(-1), re);
-<! System.out.WriteLine("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
->>
-
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if (state.backtracking == 0) {<endif>
-<label>_tree = <createNodeFromToken(...)>;
-adaptor.AddChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-/** ID! and output=AST (same as plain tokenRef) */
-tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<label>_tree = <createNodeFromToken(...)>;
-root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
-<if(backtracking)>}<endif>
->>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,terminalOptions,elementIndex) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-// SET AST
-
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
-
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-<super.matchSet(postmatchCode={<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>adaptor.AddChild(root_0, <createNodeFromToken(...)>);}, ...)>
->>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-<matchSet(...)>
->>
-
-matchSetBang(s,label,elementIndex,terminalOptions,postmatchCode) ::= "<super.matchSet(...)>"
-
-// note there is no matchSetTrack because -> rewrites force sets to be
-// plain old blocks of alts: (A|B|...|C)
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-<if(label)>
-<label>=(<labelType>)input.LT(1);
-<endif>
-<super.matchSet(postmatchCode={<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<createNodeFromToken(...)>, root_0);}, ...)>
->>
-
-// RULE REF AST
-
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>adaptor.AddChild(root_0, <label>.Tree);
->>
-
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
-
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_0);
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefBang(...)>
-<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
->>
-
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
->>
-
-// WILDCARD AST
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-adaptor.AddChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
-
-wildcardRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
-<if(backtracking)>}<endif>
->>
-
-createNodeFromToken(label,terminalOptions) ::= <%
-<if(terminalOptions.node)>
-new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>)
-<else>
-(<ASTLabelType>)adaptor.Create(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>)
-<endif>
-%>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
-adaptor.SetTokenBoundaries(retval.Tree, retval.Start, retval.Stop);
-<if(backtracking)>}<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTTreeParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTTreeParser.stg
deleted file mode 100644
index d749fa6..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTTreeParser.stg
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2007-2008 Johannes Luber
- * Copyright (c) 2005-2007 Kunle Odutola
- * Copyright (c) 2011 Sam Harwell
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** Templates for building ASTs during tree parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  Each combination has its own template except that label/no label
- *  is combined into tokenRef, ruleRef, ...
- */
-
-/** Add a variable to track last element matched */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-<ASTLabelType> _first_0 = default(<ASTLabelType>);
-<ASTLabelType> _last = default(<ASTLabelType>);<\n>
->>
-
-/** What to emit when there is no rewrite rule.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= <<
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<if(rewriteMode)>
-retval.Tree = (<ASTLabelType>)_first_0;
-if (adaptor.GetParent(retval.Tree)!=null && adaptor.IsNil(adaptor.GetParent(retval.Tree)))
-    retval.Tree = (<ASTLabelType>)adaptor.GetParent(retval.Tree);
-<endif>
-<if(backtracking)>}<endif>
->>
-
-/** match ^(root children) in tree parser; override here to
- *  add tree construction actions.
- */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-{
-<ASTLabelType> _save_last_<treeLevel> = _last;
-<ASTLabelType> _first_<treeLevel> = default(<ASTLabelType>);
-<if(!rewriteMode)>
-<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.Nil();
-<endif>
-<root:element()>
-<if(rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
-<if(root.el.rule)>
-if (_first_<enclosingTreeLevel> == null) _first_<enclosingTreeLevel> = <root.el.label>.Tree;
-<else>
-if (_first_<enclosingTreeLevel> == null) _first_<enclosingTreeLevel> = <root.el.label>;
-<endif>
-<endif>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if (input.LA(1) == TokenTypes.Down) {
-    Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
-    <children:element()>
-    Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
-}
-<else>
-Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
-<children:element()>
-Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
-<endif>
-<if(!rewriteMode)>
-adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
-<endif>
-_last = _save_last_<treeLevel>;
-}<\n>
->>
-
-// TOKEN AST STUFF
-
-/** ID! and output=AST (same as plain tokenRef) 'cept add
- *  setting of _last
- */
-tokenRefBang(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.tokenRef(...)>
->>
-
-/** ID auto construct */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<endif><\n>
-adaptor.AddChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>}<endif>
-<else> <! rewrite mode !>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
-if (_first_<treeLevel> == null) _first_<treeLevel> = <label>;
-<endif>
->>
-
-/** label+=TOKEN auto construct */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabelElem(elem=label,...)>
->>
-
-/** ^(ID ...) auto construct */
-tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<endif><\n>
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
-<if(backtracking)>}<endif>
-<endif>
->>
-
-/** Match ^(label+=TOKEN ...) auto construct */
-tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabelElem(elem=label,...)>
->>
-
-/** Match . wildcard and auto dup the node/subtree */
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.wildcard(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.DupTree(<label>);
-adaptor.AddChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>}<endif>
-<else> <! rewrite mode !>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
-if (_first_<treeLevel> == null) _first_<treeLevel> = <label>;
-<endif>
->>
-
-// SET AST
-
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.matchSet(postmatchCode={
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<endif><\n>
-adaptor.AddChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>\}<endif>
-<endif>
-}, ...
-)>
->>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-<matchSet(...)>
-<noRewrite(...)> <! set return tree !>
->>
-
-matchSetBang(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.matchSet(...)>
->>
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-<super.matchSet(postmatchCode={
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<endif><\n>
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
-<if(backtracking)>\}<endif>
-<endif>
-}, ...
-)>
->>
-
-// RULE REF AST
-
-/** rule auto construct */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRef(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
-<if(!rewriteMode)>
-adaptor.AddChild(root_<treeLevel>, <label>.Tree);
-<else> <! rewrite mode !>
-if (_first_<treeLevel> == null) _first_<treeLevel> = <label>.Tree;
-<endif>
->>
-
-/** x+=rule auto construct */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabelElem(elem={<label>.Tree},...)>
->>
-
-/** ^(rule ...) auto construct */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_<treeLevel>);
-<endif>
->>
-
-/** ^(x+=rule ...) auto construct */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabelElem(elem={<label>.Tree},...)>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefTrack(...)>
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefTrackAndListLabel(...)>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefRootTrack(...)>
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefRuleRootTrackAndListLabel(...)>
->>
-
-/** Streams for token refs are tree nodes now; override to
- *  change NextToken to NextNode.
- */
-createRewriteNodeFromElement(token,terminalOptions,args) ::= <%
-<if(terminalOptions.node)>
-new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif>stream_<token>.NextNode())
-<else>
-stream_<token>.NextNode()
-<endif>
-%>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
-<if(backtracking)>}<endif>
-<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/CSharp2.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/CSharp2.stg
deleted file mode 100644
index 8e5d603..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/CSharp2.stg
+++ /dev/null
@@ -1,1715 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2007-2008 Johannes Luber
- * Copyright (c) 2005-2007 Kunle Odutola
- * Copyright (c) 2011 Sam Harwell
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-csharpVisibilityMap ::= [
-	"private":"private",
-	"protected":"protected",
-	"public":"public",
-	"fragment":"private",
-	default:"private"
-]
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile(	LEXER,PARSER,TREE_PARSER, actionScope, actions,
-			docComment, recognizer,
-			name, tokens, tokenNames, rules, cyclicDFAs,
-			bitsets, buildTemplate, buildAST, rewriteMode, profile,
-			backtracking, synpreds, memoize, numRules,
-			fileName, ANTLRVersion, generatedTimestamp, trace,
-			scopes, superClass, literals) ::=
-<<
-//------------------------------------------------------------------------------
-// \<auto-generated>
-//     This code was generated by a tool.
-//     ANTLR Version: <ANTLRVersion>
-//
-//     Changes to this file may cause incorrect behavior and will be lost if
-//     the code is regenerated.
-// \</auto-generated>
-//------------------------------------------------------------------------------
-
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-
-<if(trace)>
-#define ANTLR_TRACE
-<endif>
-<@debugPreprocessor()>
-// The variable 'variable' is assigned but its value is never used.
-#pragma warning disable 168, 219
-// Unreachable code detected.
-#pragma warning disable 162
-// Missing XML comment for publicly visible type or member 'Type_or_Member'
-#pragma warning disable 1591
-
-<actions.(actionScope).header>
-
-<@imports>
-using System.Collections.Generic;
-using Antlr.Runtime;
-using Antlr.Runtime.Misc;
-<if(TREE_PARSER)>
-using Antlr.Runtime.Tree;
-using RewriteRuleITokenStream = Antlr.Runtime.Tree.RewriteRuleTokenStream;
-<endif>
-using ConditionalAttribute = System.Diagnostics.ConditionalAttribute;
-<@end>
-<if(actions.(actionScope).namespace)>
-namespace <actions.(actionScope).namespace>
-{
-<endif>
-<docComment>
-<recognizer>
-<if(actions.(actionScope).namespace)>
-
-} // namespace <actions.(actionScope).namespace>
-<endif>
->>
-
-lexerInputStreamType() ::= <<
-<actions.(actionScope).inputStreamType; null="ICharStream">
->>
-
-lexer(grammar, name, tokens, scopes, rules, numRules, filterMode, labelType="CommonToken",
-      superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Lexer<endif>}) ::= <<
-[System.CodeDom.Compiler.GeneratedCode("ANTLR", "<ANTLRVersion>")]
-[System.CLSCompliant(false)]
-<parserModifier(grammar=grammar, actions=actions)> partial class <grammar.recognizerName> : <@superClassName><superClass><@end>
-{
-	<tokens:{it|public const int <it.name; format="id">=<it.type>;}; separator="\n">
-	<scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-	<actions.lexer.members>
-
-    // delegates
-    <grammar.delegates:
-         {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
-    // delegators
-    <grammar.delegators:
-         {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
-    <last(grammar.delegators):{g|private <g.recognizerName> gParent;}>
-
-	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>()<! needed by subclasses !>
-	{
-		OnCreated();
-	}
-
-	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<lexerInputStreamType()> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
-		: this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
-	{
-	}
-
-	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<lexerInputStreamType()> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
-		: base(input, state)
-	{
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-		state.ruleMemo = new System.Collections.Generic.Dictionary\<int, int>[<numRules>+1];<\n><! index from 1..n !>
-<endif>
-<endif>
-		<grammar.directDelegates:
-		 {g|<g:delegateName()> = new <g.recognizerName>(input, this.state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
-		<grammar.delegators:
-		 {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
-		<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
-
-		OnCreated();
-	}
-	public override string GrammarFileName { get { return "<fileName>"; } }
-
-	private static readonly bool[] decisionCanBacktrack = new bool[0];
-
-<if(grammar.hasDelegates)>
-	public override <lexerInputStreamType()> CharStream
-	{
-		get
-		{
-			return base.CharStream;
-		}
-		set
-		{
-			base.CharStream = value;
-			<grammar.directDelegates:
-			 {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
-			<grammar.delegators:
-			 {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
-			<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
-		}
-	}
-
-<endif>
-<if(filterMode)>
-	<filteringNextToken()>
-<endif>
-
-	[Conditional("ANTLR_TRACE")]
-	protected virtual void OnCreated() {}
-	[Conditional("ANTLR_TRACE")]
-	protected virtual void EnterRule(string ruleName, int ruleIndex) {}
-	[Conditional("ANTLR_TRACE")]
-	protected virtual void LeaveRule(string ruleName, int ruleIndex) {}
-
-    <rules; separator="\n">
-
-	<insertLexerSynpreds(synpreds)>
-
-	#region DFA
-	<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
-
-	protected override void InitDFAs()
-	{
-		base.InitDFAs();
-		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this<if(dfa.specialStateSTs)>, SpecialStateTransition<dfa.decisionNumber><endif>);}; separator="\n">
-	}
-
-	<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-	#endregion
-
-}
->>
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-public override IToken NextToken()
-{
-	while (true)
-	{
-		if (input.LA(1) == CharStreamConstants.EndOfFile)
-		{
-			IToken eof = new CommonToken((ICharStream)input, CharStreamConstants.EndOfFile, TokenChannels.Default, input.Index, input.Index);
-			eof.Line = Line;
-			eof.CharPositionInLine = CharPositionInLine;
-			return eof;
-		}
-		state.token = null;
-		state.channel = TokenChannels.Default;
-		state.tokenStartCharIndex = input.Index;
-		state.tokenStartCharPositionInLine = input.CharPositionInLine;
-		state.tokenStartLine = input.Line;
-		state.text = null;
-		try
-		{
-			int m = input.Mark();
-			state.backtracking=1;<! means we won't throw slow exception !>
-			state.failed=false;
-			mTokens();
-			state.backtracking=0;
-			<! mTokens backtracks with synpred at backtracking==2
-			   and we set the synpredgate to allow actions at level 1. !>
-			if (state.failed)
-			{
-				input.Rewind(m);
-				input.Consume();<! advance one char and try again !>
-			}
-			else
-			{
-				Emit();
-				return state.token;
-			}
-		}
-		catch (RecognitionException re)
-		{
-			// shouldn't happen in backtracking mode, but...
-			ReportError(re);
-			Recover(re);
-		}
-	}
-}
-
-public override void Memoize(IIntStream input, int ruleIndex, int ruleStartIndex)
-{
-	if (state.backtracking > 1)
-		base.Memoize(input, ruleIndex, ruleStartIndex);
-}
-
-public override bool AlreadyParsedRule(IIntStream input, int ruleIndex)
-{
-	if (state.backtracking > 1)
-		return base.AlreadyParsedRule(input, ruleIndex);
-
-	return false;
-}
->>
-
-actionGate() ::= "state.backtracking == 0"
-
-filteringActionGate() ::= "state.backtracking == 1"
-
-/** How to generate a parser */
-genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass,
-              labelType, members, rewriteElementType,
-              filterMode, ASTLabelType="object") ::= <<
-[System.CodeDom.Compiler.GeneratedCode("ANTLR", "<ANTLRVersion>")]
-[System.CLSCompliant(false)]
-<parserModifier(grammar=grammar, actions=actions)> partial class <grammar.recognizerName> : <@superClassName><superClass><@end>
-{
-<if(grammar.grammarIsRoot)>
-	internal static readonly string[] tokenNames = new string[] {
-		"\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
-	};
-<endif>
-	<tokens:{it|public const int <it.name; format="id">=<it.type>;}; separator="\n">
-
-<if(grammar.delegates)>
-	// delegates
-	<grammar.delegates:
-		 {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
-<endif>
-<if(grammar.delegators)>
-	// delegators
-	<grammar.delegators:
-		 {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
-	<last(grammar.delegators):{g|private <g.recognizerName> gParent;}>
-<endif>
-
-	<scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-	<@members()>
-
-	public override string[] TokenNames { get { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; } }
-	public override string GrammarFileName { get { return "<fileName>"; } }
-
-	<members>
-
-	[Conditional("ANTLR_TRACE")]
-	protected virtual void OnCreated() {}
-	[Conditional("ANTLR_TRACE")]
-	protected virtual void EnterRule(string ruleName, int ruleIndex) {}
-	[Conditional("ANTLR_TRACE")]
-	protected virtual void LeaveRule(string ruleName, int ruleIndex) {}
-
-	#region Rules
-	<rules; separator="\n">
-	#endregion Rules
-
-<if(grammar.delegatedRules)>
-<! generate rule/method definitions for imported rules so they
-   appear to be defined in this recognizer. !>
-	#region Delegated rules
-<grammar.delegatedRules:{ruleDescriptor|
-	<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> <returnType(ruleDescriptor)> <ruleDescriptor.name; format="id">(<ruleDescriptor.parameterScope:parameterScope()>) <!throws RecognitionException !>{ <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name; format="id">(<ruleDescriptor.parameterScope.attributes:{a|<a.name; format="id">}; separator=", ">); \}}; separator="\n">
-	#endregion Delegated rules
-<endif>
-
-	<insertSynpreds(synpreds)>
-
-<if(cyclicDFAs)>
-	#region DFA
-	<cyclicDFAs:{dfa | private DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
-
-	protected override void InitDFAs()
-	{
-		base.InitDFAs();
-		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>( this<if(dfa.specialStateSTs)>, SpecialStateTransition<dfa.decisionNumber><endif> );}; separator="\n">
-	}
-
-	<cyclicDFAs:cyclicDFA()><! dump tables for all DFA !>
-	#endregion DFA
-<endif>
-
-<if(bitsets)>
-	#region Follow sets
-	private static class Follow
-	{
-		<bitsets:{it|<bitset(name={_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>}; separator="\n">
-	}
-	#endregion Follow sets
-<endif>
-}
->>
-
-@genericParser.members() ::= <<
-#if ANTLR_DEBUG
-	private static readonly bool[] decisionCanBacktrack =
-		new bool[]
-		{
-			false, // invalid decision
-			<grammar.decisions:{d | <d.dfa.hasSynPred>}; wrap="\n", separator=", ">
-		};
-#else
-	private static readonly bool[] decisionCanBacktrack = new bool[0];
-#endif
-<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<inputStreamType> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
-	: this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
-{
-}
-<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<inputStreamType> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
-	: base(input, state)
-{
-	<parserCtorBody()>
-<if(grammar.directDelegates)>
-	<grammar.directDelegates:
-	 {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
-<endif>
-<if(grammar.indirectDelegates)>
-	<grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
-<endif>
-<if(grammar.delegators)>
-	<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
-<endif>
-	OnCreated();
-}
->>
-
-// imported grammars are 'public' (can't be internal because their return scope classes must be accessible)
-parserModifier(grammar, actions) ::= <<
-<if(grammar.grammarIsRoot)><actions.(actionScope).modifier; null="public"><else>public<endif>
->>
-
-parserCtorBody() ::= <<
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-this.state.ruleMemo = new System.Collections.Generic.Dictionary\<int, int>[<length(grammar.allImportedRules)>+1];<\n><! index from 1..n !>
-<endif>
-<endif>
-<grammar.delegators:
- {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
->>
-
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
-       ASTLabelType="object", superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Parser<endif>}, labelType="IToken",
-       members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="ITokenStream", rewriteElementType="IToken", ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
-           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="object",
-           superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Tree.<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif><endif>},
-           members={<actions.treeparser.members>}) ::= <<
-<genericParser(inputStreamType="ITreeNodeStream", rewriteElementType="Node", ...)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-[Conditional("ANTLR_TRACE")]
-protected virtual void EnterRule_<ruleName>_fragment() {}
-[Conditional("ANTLR_TRACE")]
-protected virtual void LeaveRule_<ruleName>_fragment() {}
-
-// $ANTLR start <ruleName>
-public <!final !>void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope()>)
-{
-	<ruleLabelDefs()>
-	EnterRule_<ruleName>_fragment();
-	EnterRule("<ruleName>_fragment", <ruleDescriptor.index>);
-	TraceIn("<ruleName>_fragment", <ruleDescriptor.index>);
-	try
-	{
-		<block>
-	}
-	finally
-	{
-		TraceOut("<ruleName>_fragment", <ruleDescriptor.index>);
-		LeaveRule("<ruleName>_fragment", <ruleDescriptor.index>);
-		LeaveRule_<ruleName>_fragment();
-	}
-}
-// $ANTLR end <ruleName>
->>
-
-insertLexerSynpreds(synpreds) ::= <<
-<insertSynpreds(synpreds)>
->>
-
-insertSynpreds(synpreds) ::= <<
-<if(synpreds)>
-#region Synpreds
-private bool EvaluatePredicate(System.Action fragment)
-{
-	bool success = false;
-	state.backtracking++;
-	<@start()>
-	try { DebugBeginBacktrack(state.backtracking);
-	int start = input.Mark();
-	try
-	{
-		fragment();
-	}
-	catch ( RecognitionException re )
-	{
-		System.Console.Error.WriteLine("impossible: "+re);
-	}
-	success = !state.failed;
-	input.Rewind(start);
-	} finally { DebugEndBacktrack(state.backtracking, success); }
-	<@stop()>
-	state.backtracking--;
-	state.failed=false;
-	return success;
-}
-#endregion Synpreds
-<endif>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if (state.backtracking > 0 && AlreadyParsedRule(input, <ruleDescriptor.index>)) { <returnFromRule()> }
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (state.failed) <returnFromRule()><endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>if (state.backtracking>0) {state.failed=true; <returnFromRule()>}<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-<returnScope(ruleDescriptor.returnScope)>
-
-[Conditional("ANTLR_TRACE")]
-protected virtual void EnterRule_<ruleName>() {}
-[Conditional("ANTLR_TRACE")]
-protected virtual void LeaveRule_<ruleName>() {}
-
-// $ANTLR start "<ruleName>"
-// <fileName>:<description>
-[GrammarRule("<ruleName>")]
-<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> <returnType(ruleDescriptor)> <ruleName; format="id">(<ruleDescriptor.parameterScope:parameterScope()>)
-{
-	EnterRule_<ruleName>();
-	EnterRule("<ruleName>", <ruleDescriptor.index>);
-	TraceIn("<ruleName>", <ruleDescriptor.index>);
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    <ruleLabelDefs()>
-    <ruleDescriptor.actions.init>
-	try { DebugEnterRule(GrammarFileName, "<ruleName>");
-	DebugLocation(<ruleDescriptor.tree.line>, <ruleDescriptor.EORNode.charPositionInLine>);
-	<@preamble()>
-	try
-	{
-		<ruleMemoization(name=ruleName)>
-		<block>
-		<ruleCleanUp()>
-		<(ruleDescriptor.actions.after):execAction()>
-	}
-<if(exceptions)>
-	<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-	<actions.(actionScope).rulecatch>
-<else>
-	catch (RecognitionException re)
-	{
-		ReportError(re);
-		Recover(input,re);
-	<@setErrorReturnValue()>
-	}
-<endif>
-<endif>
-<endif>
-	finally
-	{
-		TraceOut("<ruleName>", <ruleDescriptor.index>);
-		LeaveRule("<ruleName>", <ruleDescriptor.index>);
-		LeaveRule_<ruleName>();
-        <memoize()>
-        <ruleScopeCleanUp()>
-        <finally>
-    }
- 	DebugLocation(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>);
-	} finally { DebugExitRule(GrammarFileName, "<ruleName>"); }
-	<@postamble()>
-	<returnFromRule()><\n>
-}
-// $ANTLR end "<ruleName>"
->>
-
-// imported grammars need to have internal rules
-ruleModifier(grammar,ruleDescriptor) ::= <<
-<if(grammar.grammarIsRoot)><csharpVisibilityMap.(ruleDescriptor.modifier); null="private"><else>internal<endif>
->>
-
-// imported grammars need to have public return scopes
-returnScopeModifier(grammar,ruleDescriptor) ::= <<
-<if(grammar.grammarIsRoot)><csharpVisibilityMap.(ruleDescriptor.modifier); null="private"><else>public<endif>
->>
-
-catch(decl,action) ::= <<
-catch (<e.decl>)
-{
-	<e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<returnType(ruleDescriptor)> retval = new <returnType(ruleDescriptor)>();
-retval.Start = (<labelType>)input.LT(1);
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<a.type> <a.name; format="id"> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
-}>
-<endif>
-<if(memoize)>
-int <ruleDescriptor.name>_StartIndex = input.Index;
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{it|<it>_stack.Push(new <it>_scope());<it>_scopeInit(<it>_stack.Peek());}; separator="\n">
-<ruleDescriptor.ruleScope:{it|<it.name>_stack.Push(new <it.name>_scope());<it.name>_scopeInit(<it.name>_stack.Peek());}; separator="\n">
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{it|<it>_scopeAfter(<it>_stack.Peek());<it>_stack.Pop();}; separator="\n">
-<ruleDescriptor.ruleScope:{it|<it.name>_scopeAfter(<it.name>_stack.Peek());<it.name>_stack.Pop();}; separator="\n">
->>
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{it|<labelType> <it.label.text> = default(<labelType>);}; separator="\n"
->
-<ruleDescriptor.tokenListLabels
-    :{it|List\<<labelType>\> list_<it.label.text> = null;}; separator="\n"
->
-<[ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{it|List\<<ASTLabelType>\> list_<it.label.text> = null;}; separator="\n"
->
-<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
-<ruleDescriptor.ruleListLabels:ruleLabelDef(); separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{it|<labelType> <it.label.text> = default(<labelType>);}; separator="\n"
->
-<[ruleDescriptor.charListLabels,
-  ruleDescriptor.charLabels]
-	:{it|int <it.label.text> = 0;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{it|List\<<labelType>\> list_<it.label.text> = null;}; separator="\n"
->
-<ruleDescriptor.charListLabels:{it|List\<int\> list_<it.label.text> = null;}; separator="\n"
->
->>
-
-returnFromRule() ::= <%
-return
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<! This comment is a hack to make sure the following
-   single space appears in the output. !> <ruleDescriptor.singleValueReturnName>
-<else>
-<!!> retval
-<endif>
-<endif>
-<endif>
-;
-%>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-retval.Stop = (<labelType>)input.LT(-1);
-<endif>
-<endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if (state.backtracking > 0) { Memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-
-[Conditional("ANTLR_TRACE")]
-protected virtual void EnterRule_<ruleName>() {}
-[Conditional("ANTLR_TRACE")]
-protected virtual void LeaveRule_<ruleName>() {}
-
-// $ANTLR start "<ruleName>"
-[GrammarRule("<ruleName>")]
-<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>)
-{
-	EnterRule_<ruleName>();
-	EnterRule("<ruleName>", <ruleDescriptor.index>);
-	TraceIn("<ruleName>", <ruleDescriptor.index>);
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-		try
-		{
-<if(nakedBlock)>
-		<ruleMemoization(name=ruleName)>
-		<lexerRuleLabelDefs()>
-		<ruleDescriptor.actions.init>
-		<block>
-<else>
-		int _type = <ruleName>;
-		int _channel = DefaultTokenChannel;
-		<ruleMemoization(name=ruleName)>
-		<lexerRuleLabelDefs()>
-		<ruleDescriptor.actions.init>
-		<block>
-		<ruleCleanUp()>
-		state.type = _type;
-		state.channel = _channel;
-		<(ruleDescriptor.actions.after):execAction()>
-<endif>
-	}
-	finally
-	{
-		TraceOut("<ruleName>", <ruleDescriptor.index>);
-		LeaveRule("<ruleName>", <ruleDescriptor.index>);
-		LeaveRule_<ruleName>();
-        <ruleScopeCleanUp()>
-        <memoize()>
-    }
-}
-// $ANTLR end "<ruleName>"
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-
-public override void mTokens()
-{
-	<block><\n>
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-try { DebugEnterSubRule(<decisionNumber>);
-try { DebugEnterDecision(<decisionNumber>, decisionCanBacktrack[<decisionNumber>]);
-<decision>
-} finally { DebugExitDecision(<decisionNumber>); }
-<@postdecision()>
-<@prebranch()>
-switch (alt<decisionNumber>)
-{
-<alts:{a|<altSwitchCase(i,a)>}>
-}
-} finally { DebugExitSubRule(<decisionNumber>); }
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-try { DebugEnterDecision(<decisionNumber>, decisionCanBacktrack[<decisionNumber>]);
-<decision>
-} finally { DebugExitDecision(<decisionNumber>); }
-<@postdecision()>
-switch (alt<decisionNumber>)
-{
-<alts:{a|<altSwitchCase(i,a)>}>
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-DebugEnterAlt(1);
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-DebugEnterAlt(1);
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int cnt<decisionNumber>=0;
-<decls>
-<@preloop()>
-try { DebugEnterSubRule(<decisionNumber>);
-while (true)
-{
-	int alt<decisionNumber>=<maxAlt>;
-	<@predecision()>
-	try { DebugEnterDecision(<decisionNumber>, decisionCanBacktrack[<decisionNumber>]);
-	<decision>
-	} finally { DebugExitDecision(<decisionNumber>); }
-	<@postdecision()>
-	switch (alt<decisionNumber>)
-	{
-	<alts:{a|<altSwitchCase(i,a)>}>
-	default:
-		if (cnt<decisionNumber> >= 1)
-			goto loop<decisionNumber>;
-
-		<ruleBacktrackFailure()>
-		EarlyExitException eee<decisionNumber> = new EarlyExitException( <decisionNumber>, input );
-		DebugRecognitionException(eee<decisionNumber>);
-		<@earlyExitException()>
-		throw eee<decisionNumber>;
-	}
-	cnt<decisionNumber>++;
-}
-loop<decisionNumber>:
-	;
-
-} finally { DebugExitSubRule(<decisionNumber>); }
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@preloop()>
-try { DebugEnterSubRule(<decisionNumber>);
-while (true)
-{
-	int alt<decisionNumber>=<maxAlt>;
-	<@predecision()>
-	try { DebugEnterDecision(<decisionNumber>, decisionCanBacktrack[<decisionNumber>]);
-	<decision>
-	} finally { DebugExitDecision(<decisionNumber>); }
-	<@postdecision()>
-	switch ( alt<decisionNumber> )
-	{
-	<alts:{a|<altSwitchCase(i,a)>}>
-	default:
-		goto loop<decisionNumber>;
-	}
-}
-
-loop<decisionNumber>:
-	;
-
-} finally { DebugExitSubRule(<decisionNumber>); }
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase(altNum,alt) ::= <<
-case <altNum>:
-	<@prealt()>
-	DebugEnterAlt(<altNum>);
-	<alt>
-	break;<\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
-// <fileName>:<description>
-{
-<@declarations()>
-<elements:element()>
-<rew>
-<@cleanup()>
-}
->>
-
-/** What to emit when there is no rewrite.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element(it) ::= <%
-<@prematch()>
-DebugLocation(<it.line>, <it.pos>);<\n>
-<it.el><\n>
-%>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)><label>=(<labelType>)<endif>Match(input,<token>,Follow._<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-listLabelElem(label,elem,elemType) ::= <<
-if (list_<label>==null) list_<label>=new List\<<elemType; null={<labelType>}>\>();
-list_<label>.Add(<elem>);<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-Match(<char>); <checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-MatchRange(<a>,<b>); <checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode="") ::= <<
-<if(label)>
-<matchSetLabel()>
-<endif>
-if (<s>)
-{
-	input.Consume();
-	<postmatchCode>
-	<if(!LEXER)>state.errorRecovery=false;<endif><if(backtracking)>state.failed=false;<endif>
-}
-else
-{
-	<ruleBacktrackFailure()>
-	MismatchedSetException mse = new MismatchedSetException(null,input);
-	DebugRecognitionException(mse);
-	<@mismatchedSetException()>
-<if(LEXER)>
-	Recover(mse);
-	throw mse;
-<else>
-	throw mse;
-	<! use following code to make it recover inline; remove throw mse;
-	recoverFromMismatchedSet(input,mse,Follow._set_in_<ruleName><elementIndex>);
-	!>
-<endif>
-}<\n>
->>
-
-matchSetUnchecked(s,label,elementIndex,postmatchCode=false) ::= <%
-<if(label)>
-<matchSetLabel()><\n>
-<endif>
-input.Consume();<\n>
-<if(postmatchCode)>
-<postmatchCode><\n>
-<endif>
-<if(!LEXER)>state.errorRecovery=false;<endif><if(backtracking)>state.failed=false;<endif>
-%>
-
-matchSetLabel() ::= <%
-<if(LEXER)>
-<label>= input.LA(1);
-<else>
-<label>=(<labelType>)input.LT(1);
-<endif>
-%>
-
-matchRuleBlockSet ::= matchSet
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex) ::= <%
-<if(label)>
-int <label>Start = CharIndex;<\n>
-Match(<string>); <checkRuleBacktrackFailure()><\n>
-int <label>StartLine<elementIndex> = Line;<\n>
-int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
-<label> = new <labelType>(input, TokenTypes.Invalid, TokenChannels.Default, <label>Start, CharIndex-1);<\n>
-<label>.Line = <label>StartLine<elementIndex>;<\n>
-<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
-<else>
-Match(<string>); <checkRuleBacktrackFailure()><\n>
-<endif>
-%>
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)>
-<label>=(<labelType>)input.LT(1);<\n>
-<endif>
-MatchAny(input); <checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<wildcard(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-MatchAny(); <checkRuleBacktrackFailure()>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.  The 'rule' argument was the
- *  target rule name, but now is type Rule, whose toString is
- *  same: the rule name.  Now though you can access full rule
- *  descriptor stuff.
- */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-PushFollow(Follow._<rule.name>_in_<ruleName><elementIndex>);
-<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name; format="id">(<args; separator=", ">);
-PopFollow();
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabelElem(elem=label,elemType={<ASTLabelType>},...)>
->>
-
-/** A lexer rule reference.
- *
- *  The 'rule' argument was the target rule name, but now
- *  is type Rule, whose toString is same: the rule name.
- *  Now though you can access full rule descriptor stuff.
- */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <%
-<if(label)>
-int <label>Start<elementIndex> = CharIndex;<\n>
-int <label>StartLine<elementIndex> = Line;<\n>
-int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
-<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()><\n>
-<label> = new <labelType>(input, TokenTypes.Invalid, TokenChannels.Default, <label>Start<elementIndex>, CharIndex-1);<\n>
-<label>.Line = <label>StartLine<elementIndex>;<\n>
-<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
-<else>
-<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<endif>
-%>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <%
-<if(label)>
-int <label>Start<elementIndex> = CharIndex;<\n>
-int <label>StartLine<elementIndex> = Line;<\n>
-int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
-Match(EOF); <checkRuleBacktrackFailure()><\n>
-<labelType> <label> = new <labelType>(input, EOF, TokenChannels.Default, <label>Start<elementIndex>, CharIndex-1);<\n>
-<label>.Line = <label>StartLine<elementIndex>;<\n>
-<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
-<else>
-Match(EOF); <checkRuleBacktrackFailure()>
-<endif>
-%>
-
-// used for left-recursive rules
-recRuleDefArg()                       ::= "int <recRuleArg()>"
-recRuleArg()                          ::= "_p"
-recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
-recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
-recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if (input.LA(1) == TokenTypes.Down)
-{
-	Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
-	<children:element()>
-	Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
-}
-<else>
-Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
-<children:element()>
-Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if (!(<evalPredicate(...)>))
-{
-	<ruleBacktrackFailure()>
-	throw new FailedPredicateException(input, "<ruleName>", "<description>");
-}
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
-else
-{
-<if(eotPredictsAlt)>
-	alt<decisionNumber> = <eotPredictsAlt>;
-<else>
-	<ruleBacktrackFailure()>
-	NoViableAltException nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);
-	DebugRecognitionException(nvae);
-	<@noViableAltException()>
-	throw nvae;
-<endif>
-}
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse "><\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-alt<decisionNumber> = <eotPredictsAlt>;<! if no edges, don't gen ELSE !>
-<else>
-else
-{
-	alt<decisionNumber> = <eotPredictsAlt>;
-}<\n>
-<endif>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ((<labelExpr>)<if(predicates)> && (<predicates>)<endif>)
-{
-	<targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch (input.LA(<k>))
-{
-<edges; separator="\n">
-default:
-<if(eotPredictsAlt)>
-	alt<decisionNumber>=<eotPredictsAlt>;
-	break;<\n>
-<else>
-	{
-		<ruleBacktrackFailure()>
-		NoViableAltException nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);
-		DebugRecognitionException(nvae);
-		<@noViableAltException()>
-		throw nvae;
-	}
-<endif>
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch (input.LA(<k>))
-{
-<edges; separator="\n">
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch (input.LA(<k>))
-{
-<edges; separator="\n">
-<if(eotPredictsAlt)>
-default:
-	alt<decisionNumber>=<eotPredictsAlt>;
-	break;<\n>
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{it|case <it>:}; separator="\n">
-	{
-	<targetState>
-	}
-	break;
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-try
-{
-	alt<decisionNumber> = dfa<decisionNumber>.Predict(input);
-}
-catch (NoViableAltException nvae)
-{
-	DebugRecognitionException(nvae);
-	throw;
-}
->>
-
-/* Dump DFA tables as run-length-encoded Strings of octal values.
- * Can't use hex as compiler translates them before compilation.
- * These strings are split into multiple, concatenated strings.
- * Java puts them back together at compile time thankfully.
- * Java cannot handle large static arrays, so we're stuck with this
- * encode/decode approach.  See analysis and runtime DFA for
- * the encoding methods.
- */
-cyclicDFA(dfa) ::= <<
-private class DFA<dfa.decisionNumber> : DFA
-{
-	private const string DFA<dfa.decisionNumber>_eotS =
-		"<dfa.javaCompressedEOT; wrap="\"+\n\t\t\"">";
-	private const string DFA<dfa.decisionNumber>_eofS =
-		"<dfa.javaCompressedEOF; wrap="\"+\n\t\t\"">";
-	private const string DFA<dfa.decisionNumber>_minS =
-		"<dfa.javaCompressedMin; wrap="\"+\n\t\t\"">";
-	private const string DFA<dfa.decisionNumber>_maxS =
-		"<dfa.javaCompressedMax; wrap="\"+\n\t\t\"">";
-	private const string DFA<dfa.decisionNumber>_acceptS =
-		"<dfa.javaCompressedAccept; wrap="\"+\n\t\t\"">";
-	private const string DFA<dfa.decisionNumber>_specialS =
-		"<dfa.javaCompressedSpecial; wrap="\"+\n\t\t\"">}>";
-	private static readonly string[] DFA<dfa.decisionNumber>_transitionS =
-		{
-			<dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
-		};
-
-	private static readonly short[] DFA<dfa.decisionNumber>_eot = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eotS);
-	private static readonly short[] DFA<dfa.decisionNumber>_eof = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eofS);
-	private static readonly char[] DFA<dfa.decisionNumber>_min = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
-	private static readonly char[] DFA<dfa.decisionNumber>_max = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
-	private static readonly short[] DFA<dfa.decisionNumber>_accept = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
-	private static readonly short[] DFA<dfa.decisionNumber>_special = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_specialS);
-	private static readonly short[][] DFA<dfa.decisionNumber>_transition;
-
-	static DFA<dfa.decisionNumber>()
-	{
-		int numStates = DFA<dfa.decisionNumber>_transitionS.Length;
-		DFA<dfa.decisionNumber>_transition = new short[numStates][];
-		for ( int i=0; i \< numStates; i++ )
-		{
-			DFA<dfa.decisionNumber>_transition[i] = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_transitionS[i]);
-		}
-	}
-
-	public DFA<dfa.decisionNumber>( BaseRecognizer recognizer<if(dfa.specialStateSTs)>, SpecialStateTransitionHandler specialStateTransition<endif> )
-<if(dfa.specialStateSTs)>
-		: base(specialStateTransition)
-<endif>
-	{
-		this.recognizer = recognizer;
-		this.decisionNumber = <dfa.decisionNumber>;
-		this.eot = DFA<dfa.decisionNumber>_eot;
-		this.eof = DFA<dfa.decisionNumber>_eof;
-		this.min = DFA<dfa.decisionNumber>_min;
-		this.max = DFA<dfa.decisionNumber>_max;
-		this.accept = DFA<dfa.decisionNumber>_accept;
-		this.special = DFA<dfa.decisionNumber>_special;
-		this.transition = DFA<dfa.decisionNumber>_transition;
-	}
-
-	public override string Description { get { return "<dfa.description>"; } }
-
-	public override void Error(NoViableAltException nvae)
-	{
-		DebugRecognitionException(nvae);
-	}
-}<\n>
-<if(dfa.specialStateSTs)>
-private int SpecialStateTransition<dfa.decisionNumber>(DFA dfa, int s, IIntStream _input)<! throws NoViableAltException!>
-{
-	<if(LEXER)>
-	IIntStream input = _input;
-	<endif>
-	<if(PARSER)>
-	ITokenStream input = (ITokenStream)_input;
-	<endif>
-	<if(TREE_PARSER)>
-	ITreeNodeStream input = (ITreeNodeStream)_input;
-	<endif>
-	int _s = s;
-	switch (s)
-	{
-	<dfa.specialStateSTs:{state |
-	case <i0>:<! compressed special state numbers 0..n-1 !>
-		<state>}; separator="\n">
-	}
-<if(backtracking)>
-	if (state.backtracking > 0) {state.failed=true; return -1;}
-<endif>
-	NoViableAltException nvae = new NoViableAltException(dfa.Description, <dfa.decisionNumber>, _s, input);
-	dfa.Error(nvae);
-	throw nvae;
-}
-<endif>
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
-<if(semPredState)>
-<! get next lookahead symbol to test edges, then rewind !>
-<\n>int index<decisionNumber>_<stateNumber> = input.Index;
-input.Rewind();
-<endif>
-s = -1;
-<edges; separator="\nelse ">
-<if(semPredState)>
-<! return input cursor to state before we rewound !>
-<\n>input.Seek(index<decisionNumber>_<stateNumber>);
-<endif>
-if (s >= 0) return s;
-break;
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ((<labelExpr>)<if(predicates)> && (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-s = <targetStateNumber>;<\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "(<left>&&<right>)"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
-
-notPredicate(pred) ::= "!(<evalPredicate(...)>)"
-
-evalPredicate(pred,description) ::= "(<pred>)"
-
-evalSynPredicate(pred,description) ::= "EvaluatePredicate(<pred>_fragment)"
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
-(LA<decisionNumber>_<stateNumber><ge()><lower> && LA<decisionNumber>_<stateNumber><le()><upper>)
-%>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)<ge()><lower> && input.LA(<k>)<le()><upper>)"
-
-le() ::= "\<="
-ge() ::= ">="
-
-setTest(ranges) ::= <<
-<ranges; separator="||">
->>
-
-// A T T R I B U T E S
-
-attributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected sealed partial class <scope.name>_scope
-{
-	<scope.attributes:{it|public <it.decl>;}; separator="\n">
-}
-<if(scope.actions.scopeinit)>
-protected void <scope.name>_scopeInit( <scope.name>_scope scope )
-{
-	<scope.actions.scopeinit>
-}
-<else>
-protected virtual void <scope.name>_scopeInit( <scope.name>_scope scope ) {}
-<endif>
-<if(scope.actions.scopeafter)>
-protected void <scope.name>_scopeAfter( <scope.name>_scope scope )
-{
-	<scope.actions.scopeafter>
-}
-<else>
-protected virtual void <scope.name>_scopeAfter( <scope.name>_scope scope ) {}
-<endif>
-protected readonly ListStack\<<scope.name>_scope\> <scope.name>_stack = new ListStack\<<scope.name>_scope\>();
-<endif>
->>
-
-globalAttributeScope(scope) ::= <<
-<attributeScope(...)>
->>
-
-ruleAttributeScope(scope) ::= <<
-<attributeScope(...)>
->>
-
-returnStructName(it) ::= "<it.name>_return"
-
-returnType(ruleDescriptor) ::= <%
-<if(ruleDescriptor.returnScope.attributes && ruleDescriptor.hasMultipleReturnValues)>
-	<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
-<elseif(ruleDescriptor.hasMultipleReturnValues)>
-	<ruleReturnBaseType()>
-<elseif(ruleDescriptor.hasSingleReturnValue)>
-	<ruleDescriptor.singleValueReturnType>
-<else>
-	void
-<endif>
-%>
-
-/** Generate the C# type associated with a single or multiple return
- *  values.
- */
-ruleLabelType(referencedRule) ::= <%
-<if(referencedRule.returnScope.attributes&&referencedRule.hasMultipleReturnValues)>
-	<referencedRule.grammar.recognizerName>.<referencedRule:returnStructName()>
-<elseif(referencedRule.hasMultipleReturnValues)>
-	<ruleReturnBaseType()>
-<elseif(referencedRule.hasSingleReturnValue)>
-	<referencedRule.singleValueReturnType>
-<else>
-	void
-<endif>
-%>
-
-delegateName(it) ::= <<
-<if(it.label)><it.label><else>g<it.name><endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
- */
-initValue(typeName) ::= <<
-default(<typeName>)
->>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= <%
-<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;
-%>
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScope(scope) ::= <<
-<if(scope.attributes && ruleDescriptor.hasMultipleReturnValues)>
-<returnScopeModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> sealed partial class <ruleDescriptor:returnStructName()> : <ruleReturnBaseType()><@ruleReturnInterfaces()>
-{
-	<scope.attributes:{it|public <it.decl>;}; separator="\n">
-	<@ruleReturnMembers()>
-}
-<endif>
->>
-
-ruleReturnBaseType() ::= <%
-<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope\<<labelType>>
-%>
-
-@returnScope.ruleReturnMembers() ::= <<
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{it|<it.decl>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= <<
-<attr.name; format="id">
->>
-
-parameterSetAttributeRef(attr,expr) ::= <<
-<attr.name; format="id"> =<expr>;
->>
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <%
-<if(negIndex)>
-<scope>_stack[<scope>_stack.Count - <negIndex> - 1].<attr.name; format="id">
-<else>
-<if(index)>
-<scope>_stack[<index>].<attr.name; format="id">
-<else>
-<scope>_stack.Peek().<attr.name; format="id">
-<endif>
-<endif>
-%>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
-<if(negIndex)>
-<scope>_stack[<scope>_stack.Count - <negIndex> - 1].<attr.name; format="id"> = <expr>;
-<else>
-<if(index)>
-<scope>_stack[<index>].<attr.name; format="id"> = <expr>;
-<else>
-<scope>_stack.Peek().<attr.name; format="id"> = <expr>;
-<endif>
-<endif>
-%>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.Count>0 && $function::name.Equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <%
-<if(referencedRule.hasMultipleReturnValues)>
-(<scope>!=null?<scope>.<attr.name; format="id">:<initValue(attr.type)>)
-<else>
-<scope>
-<endif>
-%>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name; format="id">
-<else>
-<attr.name; format="id">
-<endif>
-%>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name; format="id"> =<expr>;
-<else>
-<attr.name; format="id"> =<expr>;
-<endif>
-%>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach
-
-tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=null?<scope>.Text:null)"
-tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=null?<scope>.Type:0)"
-tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=null?<scope>.Line:0)"
-tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=null?<scope>.CharPositionInLine:0)"
-tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=null?<scope>.Channel:0)"
-tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=null?<scope>.TokenIndex:0)"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?int.Parse(<scope>.Text):0)"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.Start):default(<labelType>))"
-ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.Stop):default(<labelType>))"
-ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=null?((<ASTLabelType>)<scope>.Tree):default(<ASTLabelType>))"
-ruleLabelPropertyRef_text(scope,attr) ::= <%
-<if(TREE_PARSER)>
-(<scope>!=null?(input.TokenStream.ToString(
-  input.TreeAdaptor.GetTokenStartIndex(<scope>.Start),
-  input.TreeAdaptor.GetTokenStopIndex(<scope>.Start))):null)
-<else>
-(<scope>!=null?input.ToString(<scope>.Start,<scope>.Stop):null)
-<endif>
-%>
-
-ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=null?<scope>.Template:null)"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::=
-    "(<scope>!=null?<scope>.Type:0)"
-
-lexerRuleLabelPropertyRef_line(scope,attr) ::=
-    "(<scope>!=null?<scope>.Line:0)"
-
-lexerRuleLabelPropertyRef_pos(scope,attr) ::=
-    "(<scope>!=null?<scope>.CharPositionInLine:-1)"
-
-lexerRuleLabelPropertyRef_channel(scope,attr) ::=
-    "(<scope>!=null?<scope>.Channel:0)"
-
-lexerRuleLabelPropertyRef_index(scope,attr) ::=
-    "(<scope>!=null?<scope>.TokenIndex:0)"
-
-lexerRuleLabelPropertyRef_text(scope,attr) ::=
-    "(<scope>!=null?<scope>.Text:null)"
-
-lexerRuleLabelPropertyRef_int(scope,attr) ::=
-    "(<scope>!=null?int.Parse(<scope>.Text):0)"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "retval.Start"
-rulePropertyRef_stop(scope,attr) ::= "retval.Stop"
-rulePropertyRef_tree(scope,attr) ::= "retval.Tree"
-rulePropertyRef_text(scope,attr) ::= <%
-<if(TREE_PARSER)>
-input.TokenStream.ToString(
-  input.TreeAdaptor.GetTokenStartIndex(retval.Start),
-  input.TreeAdaptor.GetTokenStopIndex(retval.Start))
-<else>
-input.ToString(retval.Start,input.LT(-1))
-<endif>
-%>
-rulePropertyRef_st(scope,attr) ::= "retval.Template"
-
-lexerRulePropertyRef_text(scope,attr) ::= "Text"
-lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
-lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
-lexerRulePropertyRef_int(scope,attr) ::= "int.Parse(<scope>.Text)"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.Tree = <expr>;"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.Template =<expr>;"
-
-/** How to execute an action (only when not backtracking) */
-execAction(action) ::= <%
-<if(backtracking)>
-if (<actions.(actionScope).synpredgate>)<\n>
-{<\n>
-<@indentedAction()><\n>
-}
-<else>
-<action>
-<endif>
-%>
-
-@execAction.indentedAction() ::= <<
-	<action>
->>
-
-/** How to always execute an action even when backtracking */
-execForcedAction(action) ::= "<action>"
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-public static readonly BitSet <name> = new BitSet(new ulong[]{<words64:{it|<it>UL};separator=",">});
->>
-
-codeFileExtension() ::= ".cs"
-
-true_value() ::= "true"
-false_value() ::= "false"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/Dbg.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/Dbg.stg
deleted file mode 100644
index efad5b5..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/Dbg.stg
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2007-2008 Johannes Luber
- * Copyright (c) 2005-2007 Kunle Odutola
- * Copyright (c) 2011 Sam Harwell
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-/** Template overrides to add debugging to normal Java output;
- *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
- */
-
-@outputFile.debugPreprocessor() ::= "#define ANTLR_DEBUG"
-
-@outputFile.imports() ::= <<
-<@super.imports()>
-using Antlr.Runtime.Debug;
-using IOException = System.IO.IOException;
->>
-
-@genericParser.members() ::= <<
-<if(grammar.grammarIsRoot)>
-public static readonly string[] ruleNames =
-	new string[]
-	{
-		"invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n	", separator=", ">
-	};<\n>
-<endif>
-<if(grammar.grammarIsRoot)><! grammar imports other grammar(s) !>
-	int ruleLevel = 0;
-	public virtual int RuleLevel { get { return ruleLevel; } }
-	public virtual void IncRuleLevel() { ruleLevel++; }
-	public virtual void DecRuleLevel() { ruleLevel--; }
-<if(profile)>
-	<ctorForProfilingRootGrammar()>
-<else>
-	<ctorForRootGrammar()>
-<endif>
-<ctorForPredefinedListener()>
-<else><! imported grammar !>
-	public int RuleLevel { get { return <grammar.delegators:{g| <g:delegateName()>}>.RuleLevel; } }
-	public void IncRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.IncRuleLevel(); }
-	public void DecRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.DecRuleLevel(); }
-	<ctorForDelegateGrammar()>
-<endif>
-<if(profile)>
-public override bool AlreadyParsedRule( IIntStream input, int ruleIndex )
-{
-	int stopIndex = GetRuleMemoization(ruleIndex, input.Index);
-	((Profiler)dbg).ExamineRuleMemoization(input, ruleIndex, stopIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
-	return base.AlreadyParsedRule(input, ruleIndex);
-}<\n>
-public override void Memoize( IIntStream input, int ruleIndex, int ruleStartIndex )
-{
-	((Profiler)dbg).Memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
-	base.Memoize(input, ruleIndex, ruleStartIndex);
-}<\n>
-<endif>
-protected virtual bool EvalPredicate( bool result, string predicate )
-{
-	dbg.SemanticPredicate( result, predicate );
-	return result;
-}<\n>
->>
-
-ctorForRootGrammar() ::= <<
-<! bug: can't use <@super.members()> cut-n-paste instead !>
-<! Same except we add port number and profile stuff if root grammar !>
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input )
-	: this( input, DebugEventSocketProxy.DefaultDebuggerPort, new RecognizerSharedState() )
-{
-}
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, int port, RecognizerSharedState state )
-	: base( input, state )
-{
-	<parserCtorBody()>
-	<createListenerAndHandshake()>
-	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>( input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
-	<@finally()>
-}<\n>
->>
-
-ctorForProfilingRootGrammar() ::= <<
-<! bug: can't use <@super.members()> cut-n-paste instead !>
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input )
-	: this( input, new Profiler(null), new RecognizerSharedState() )
-{
-}
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state )
-	: base( input, dbg, state )
-{
-	Profiler p = (Profiler)dbg;
-	p.setParser(this);
-	<parserCtorBody()>
-	<grammar.directDelegates:
-	 {g|<g:delegateName()> = new <g.recognizerName>( input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
-	<@finally()>
-}
-<\n>
->>
-
-/** Basically we don't want to set any dbg listeners are root will have it. */
-ctorForDelegateGrammar() ::= <<
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
-	: base( input, dbg, state )
-{
-	<parserCtorBody()>
-	<grammar.directDelegates:
-	 {g|<g:delegateName()> = new <g.recognizerName>( input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
-}<\n>
->>
-
-ctorForPredefinedListener() ::= <<
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg )
-	<@superClassRef>: base( input, dbg, new RecognizerSharedState() )<@end>
-{
-<if(profile)>
-	Profiler p = (Profiler)dbg;
-	p.setParser(this);
-<endif>
-	<parserCtorBody()>
-	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
-	<@finally()>
-}<\n>
->>
-
-createListenerAndHandshake() ::= <<
-<if(TREE_PARSER)>
-DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, input.TreeAdaptor );<\n>
-<else>
-DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, null );<\n>
-<endif>
-DebugListener = proxy;
-try
-{
-	proxy.Handshake();
-}
-catch ( IOException ioe )
-{
-	ReportError( ioe );
-}
->>
-
-@genericParser.superClassName() ::= "Debug<@super.superClassName()>"
-
-/*
- * Many of the following rules were merged into CSharp2.stg.
- */
-
-@rule.preamble() ::= <<
-if (RuleLevel == 0)
-	DebugListener.Commence();
-IncRuleLevel();
->>
-//@rule.preamble() ::= <<
-//try
-//{
-//	dbg.EnterRule( GrammarFileName, "<ruleName>" );
-//	if ( RuleLevel == 0 )
-//	{
-//		dbg.Commence();
-//	}
-//	IncRuleLevel();
-//	dbg.Location( <ruleDescriptor.tree.line>, <ruleDescriptor.tree.charPositionInLine> );<\n>
-//>>
-
-@rule.postamble() ::= <<
-DecRuleLevel();
-if (RuleLevel == 0)
-	DebugListener.Terminate();
->>
-//@rule.postamble() ::= <<
-//dbg.Location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>);<\n>
-//}
-//finally
-//{
-//	dbg.ExitRule( GrammarFileName, "<ruleName>" );
-//	DecRuleLevel();
-//	if ( RuleLevel == 0 )
-//	{
-//		dbg.Terminate();
-//	}
-//}<\n>
-//>>
-
-//@insertSynpreds.start() ::= "dbg.BeginBacktrack( state.backtracking );"
-//@insertSynpreds.stop() ::= "dbg.EndBacktrack( state.backtracking, success );"
-
-// Common debug event triggers used by region overrides below
-
-//enterSubRule() ::= <<
-//try
-//{
-//	dbg.EnterSubRule( <decisionNumber> );<\n>
-//>>
-
-//exitSubRule() ::= <<
-//}
-//finally
-//{
-//	dbg.ExitSubRule( <decisionNumber> );
-//}<\n>
-//>>
-
-//enterDecision() ::= <<
-//try
-//{
-//	dbg.EnterDecision( <decisionNumber> );<\n>
-//>>
-
-//exitDecision() ::= <<
-//}
-//finally
-//{
-//	dbg.ExitDecision( <decisionNumber> );
-//}<\n>
-//>>
-
-//enterAlt(n) ::= "dbg.EnterAlt( <n> );<\n>"
-
-// Region overrides that tell various constructs to add debugging triggers
-
-//@block.predecision() ::= "<enterSubRule()><enterDecision()>"
-
-//@block.postdecision() ::= "<exitDecision()>"
-
-//@block.postbranch() ::= "<exitSubRule()>"
-
-//@ruleBlock.predecision() ::= "<enterDecision()>"
-
-//@ruleBlock.postdecision() ::= "<exitDecision()>"
-
-//@ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
-//@blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
-//@positiveClosureBlock.preloop() ::= "<enterSubRule()>"
-
-//@positiveClosureBlock.postloop() ::= "<exitSubRule()>"
-
-//@positiveClosureBlock.predecision() ::= "<enterDecision()>"
-
-//@positiveClosureBlock.postdecision() ::= "<exitDecision()>"
-
-//@positiveClosureBlock.earlyExitException() ::=
-//	"dbg.RecognitionException( eee<decisionNumber> );<\n>"
-
-//@closureBlock.preloop() ::= "<enterSubRule()>"
-
-//@closureBlock.postloop() ::= "<exitSubRule()>"
-
-//@closureBlock.predecision() ::= "<enterDecision()>"
-
-//@closureBlock.postdecision() ::= "<exitDecision()>"
-
-//@altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
-
-//@element.prematch() ::=
-//	"dbg.Location( <it.line>, <it.pos> );"
-
-//@matchSet.mismatchedSetException() ::=
-//	"dbg.RecognitionException( mse );"
-
-//@dfaState.noViableAltException() ::= "dbg.RecognitionException( nvae );"
-
-//@dfaStateSwitch.noViableAltException() ::= "dbg.RecognitionException( nvae );"
-
-//dfaDecision(decisionNumber,description) ::= <<
-//try
-//{
-//	isCyclicDecision = true;
-//	<super.dfaDecision(...)>
-//}
-//catch ( NoViableAltException nvae )
-//{
-//	dbg.RecognitionException( nvae );
-//	throw nvae;
-//}
-//>>
-
-//@cyclicDFA.errorMethod() ::= <<
-//public override void Error( NoViableAltException nvae )
-//{
-//	((DebugParser)recognizer).dbg.RecognitionException( nvae );
-//}
-//>>
-
-/** Force predicate validation to trigger an event */
-evalPredicate(pred,description) ::= <<
-EvalPredicate(<pred>, "<description>")
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/AST.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/AST.stg
deleted file mode 100644
index 0243429..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/AST.stg
+++ /dev/null
@@ -1,430 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-@outputFile.imports() ::= <<
-<@super.imports()>
-
-<if(!TREE_PARSER)>
-<! tree parser would already have imported !>
-using Antlr.Runtime.Tree;
-using RewriteRuleITokenStream = Antlr.Runtime.Tree.RewriteRuleTokenStream;
-<endif>
->>
-
-@genericParser.members() ::= <<
-<@super.members()>
-<parserMembers()>
->>
-
-parserCtorBody() ::= <<
-<super.parserCtorBody()>
-<treeAdaptorType()> treeAdaptor = default(<treeAdaptorType()>);
-CreateTreeAdaptor(ref treeAdaptor);
-TreeAdaptor = treeAdaptor<if(!actions.(actionScope).treeAdaptorType)> ?? new CommonTreeAdaptor()<endif>;
->>
-
-/** Add an adaptor property that knows how to build trees */
-parserMembers() ::= <<
-// Implement this function in your helper file to use a custom tree adaptor
-partial void CreateTreeAdaptor(ref <treeAdaptorType()> adaptor);
-
-private <treeAdaptorType()> adaptor;
-
-public <treeAdaptorType()> TreeAdaptor
-{
-	get
-	{
-		return adaptor;
-	}
-
-	set
-	{
-		this.adaptor = value;
-		<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
-	}
-}
->>
-
-treeAdaptorType() ::= <<
-<actions.(actionScope).treeAdaptorType; null="ITreeAdaptor">
->>
-
-ruleReturnBaseType() ::= <%
-Ast<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope\<<ASTLabelType>, <labelType>>
-%>
-
-/** Add a variable to track rule's return AST */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-<ASTLabelType> root_0 = default(<ASTLabelType>);<\n>
->>
-
-ruleLabelDefs(ruleDescriptor, labelType, ASTLabelType, rewriteElementType) ::= <%
-<super.ruleLabelDefs(...)>
-<if(!ruleDescriptor.isSynPred)>
-<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
-	:{it|<\n><ASTLabelType> <it.label.text>_tree = default(<ASTLabelType>);}>
-<ruleDescriptor.tokenListLabels:{it|<\n><ASTLabelType> <it.label.text>_tree = default(<ASTLabelType>);}>
-<ruleDescriptor.allTokenRefsInAltsWithRewrites
-	:{it|<\n>RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}>
-<ruleDescriptor.allRuleRefsInAltsWithRewrites
-	:{it|<\n>RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}>
-<endif>
-%>
-
-/** When doing auto AST construction, we must define some variables;
- *  These should be turned off if doing rewrites.  This must be a "mode"
- *  as a rule could have both rewrite and AST within the same alternative
- *  block.
- */
-@alt.declarations() ::= <<
-<if(autoAST && outerAlt && !rewriteMode && !ruleDescriptor.isSynPred)>
-root_0 = (<ASTLabelType>)adaptor.Nil();
-<endif>
->>
-
-// T r a c k i n g  R u l e  E l e m e n t s
-
-/** ID and track it for use in a rewrite rule */
-tokenRefTrack(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)> <! Track implies no auto AST construction!>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<token>.Add(<label>);<\n>
->>
-
-/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
- *  to the tracking list stream_ID for use in the rewrite.
- */
-tokenRefTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefTrack(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** ^(ID ...) track for rewrite */
-tokenRefRuleRootTrack(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<token>.Add(<label>);
->>
-
-/** Match ^(label+=TOKEN ...) track for rewrite */
-tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefRuleRootTrack(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<rule.name>.Add(<label>.Tree);
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefTrack(...)>
-<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<rule>.Add(<label>.Tree);
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRootTrack(...)>
-<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
->>
-
-// R e w r i t e
-
-rewriteCode(
-	alts, description,
-	referencedElementsDeep, // ALL referenced elements to right of ->
-	referencedTokenLabels,
-	referencedTokenListLabels,
-	referencedRuleLabels,
-	referencedRuleListLabels,
-	referencedWildcardLabels,
-	referencedWildcardListLabels,
-	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::= <<
-<\n>{
-// AST REWRITE
-// elements: <referencedElementsDeep; separator=", ">
-// token labels: <referencedTokenLabels; separator=", ">
-// rule labels: <referencedRuleLabels; separator=", ">
-// token list labels: <referencedTokenListLabels; separator=", ">
-// rule list labels: <referencedRuleListLabels; separator=", ">
-// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
-<if(backtracking)>
-if (<actions.(actionScope).synpredgate>) {
-<endif>
-<prevRuleRootRef()>.Tree = root_0;
-<rewriteCodeLabels()>
-root_0 = (<ASTLabelType>)adaptor.Nil();
-<alts:rewriteAlt(); separator="else ">
-<! if tree parser and rewrite=true !>
-<if(TREE_PARSER&&rewriteMode)>
-<prevRuleRootRef()>.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
-input.ReplaceChildren(adaptor.GetParent(retval.Start),
-                      adaptor.GetChildIndex(retval.Start),
-                      adaptor.GetChildIndex(_last),
-                      retval.Tree);
-<endif>
-<! if parser or tree-parser && rewrite!=true, we need to set result !>
-<if(!TREE_PARSER||!rewriteMode)>
-<prevRuleRootRef()>.Tree = root_0;
-<endif>
-<if(backtracking)>
-}
-<endif>
-}
-
->>
-
-rewriteCodeLabels() ::= <<
-<referencedTokenLabels
-    :{it|RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>",<it>);};
-    separator="\n"
->
-<referencedTokenListLabels
-    :{it|RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
-    separator="\n"
->
-<referencedWildcardLabels
-	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
-	separator="\n"
->
-<referencedWildcardListLabels
-	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
-	separator="\n"
->
-<referencedRuleLabels
-    :{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>",<it>!=null?<it>.Tree:null);};
-    separator="\n"
->
-<referencedRuleListLabels
-    :{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"token <it>",list_<it>);};
-    separator="\n"
->
->>
-
-/** Generate code for an optional rewrite block; note it uses the deep ref'd element
-  *  list rather shallow like other blocks.
-  */
-rewriteOptionalBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-if (<referencedElementsDeep:{el | stream_<el>.HasNext}; separator="||">)
-{
-	<alt>
-}
-<referencedElementsDeep:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewriteClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-while ( <referencedElements:{el | stream_<el>.HasNext}; separator="||"> )
-{
-	<alt>
-}
-<referencedElements:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewritePositiveClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-if (!(<referencedElements:{el | stream_<el>.HasNext}; separator="||">))
-{
-	throw new RewriteEarlyExitException();
-}
-while ( <referencedElements:{el | stream_<el>.HasNext}; separator="||"> )
-{
-	<alt>
-}
-<referencedElements:{el | stream_<el>.Reset();<\n>}>
->>
-
-rewriteAlt(a) ::= <<
-// <a.description>
-<if(a.pred)>
-if (<a.pred>)
-{
-	<a.alt>
-}
-<else>
-{
-	<a.alt>
-}
-<endif>
->>
-
-/** For empty rewrites: "r : ... -> ;" */
-rewriteEmptyAlt() ::= "root_0 = null;"
-
-rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
-// <fileName>:<description>
-{
-<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.Nil();
-<root:rewriteElement()>
-<children:rewriteElement()>
-adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
-}<\n>
->>
-
-rewriteElementList(elements) ::= "<elements:rewriteElement()>"
-
-rewriteElement(e) ::= <%
-<@pregen()>
-DebugLocation(<e.line>, <e.pos>);<\n>
-<e.el>
-%>
-
-/** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,terminalOptions,args) ::= <<
-adaptor.AddChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
->>
-
-/** Gen $label ... where defined via label=ID */
-rewriteTokenLabelRef(label,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
->>
-
-/** Gen $label ... where defined via label+=ID */
-rewriteTokenListLabelRef(label,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
->>
-
-/** Gen ^($label ...) */
-rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
->>
-
-/** Gen ^($label ...) where label+=... */
-rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
-
-/** Gen ^(ID ...) or ^(ID[args] ...) */
-rewriteTokenRefRoot(token,elementIndex,terminalOptions,args) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>);<\n>
->>
-
-rewriteImaginaryTokenRef(args,token,terminalOptions,elementIndex) ::= <<
-adaptor.AddChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
->>
-
-rewriteImaginaryTokenRefRoot(args,token,terminalOptions,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>);<\n>
->>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-root_0 = <action>;<\n>
->>
-
-/** What is the name of the previous value of this rule's root tree?  This
- *  let's us refer to $rule to mean previous value.  I am reusing the
- *  variable 'tree' sitting in retval struct to hold the value of root_0 right
- *  before I set it during rewrites.  The assign will be to retval.tree.
- */
-prevRuleRootRef() ::= "retval"
-
-rewriteRuleRef(rule) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<rule>.NextTree());<\n>
->>
-
-rewriteRuleRefRoot(rule) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<rule>.NextNode(), root_<treeLevel>);<\n>
->>
-
-rewriteNodeAction(action) ::= <<
-adaptor.AddChild(root_<treeLevel>, <action>);<\n>
->>
-
-rewriteNodeActionRoot(action) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<action>, root_<treeLevel>);<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel=rule */
-rewriteRuleLabelRef(label) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
-rewriteRuleListLabelRef(label) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel=rule */
-rewriteRuleLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
-rewriteRuleListLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
->>
-
-rewriteWildcardLabelRef(label) ::= <<
-adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
->>
-
-createImaginaryNode(tokenType,terminalOptions,args) ::= <%
-<if(terminalOptions.node)>
-<! new MethodNode(IDLabel, args) !>
-new <terminalOptions.node>(<tokenType><if(args)>, <args; separator=", "><endif>)
-<else>
-(<ASTLabelType>)adaptor.Create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
-<endif>
-%>
-
-createRewriteNodeFromElement(token,terminalOptions,args) ::= <%
-<if(terminalOptions.node)>
-new <terminalOptions.node>(stream_<token>.NextToken()<if(args)>, <args; separator=", "><endif>)
-<else>
-<if(args)> <! must create new node from old !>
-adaptor.Create(<token>, <args; separator=", ">)
-<else>
-stream_<token>.NextNode()
-<endif>
-<endif>
-%>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTDbg.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTDbg.stg
deleted file mode 100644
index 35d1629..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTDbg.stg
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
- *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
- */
-
-parserMembers() ::= <<
-// Implement this function in your helper file to use a custom tree adaptor
-partial void InitializeTreeAdaptor();
-protected DebugTreeAdaptor adaptor;
-
-public ITreeAdaptor TreeAdaptor
-{
-	get
-	{
-		return adaptor;
-	}
-	set
-	{
-<if(grammar.grammarIsRoot)>
-		this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
-<else>
-		this.adaptor = (DebugTreeAdaptor)adaptor; // delegator sends dbg adaptor
-<endif><\n>
-		<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
-	}
-}<\n>
->>
-
-parserCtorBody() ::= <<
-<super.parserCtorBody()>
->>
-
-createListenerAndHandshake() ::= <<
-DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, <if(TREE_PARSER)>input.TreeAdaptor<else>adaptor<endif> );
-DebugListener = proxy;
-<inputStreamType> = new Debug<inputStreamType>( input, proxy );
-try
-{
-	proxy.Handshake();
-}
-catch ( IOException ioe )
-{
-	ReportError( ioe );
-}
->>
-
-@ctorForRootGrammar.finally() ::= <<
-ITreeAdaptor adap = new CommonTreeAdaptor();
-TreeAdaptor = adap;
-proxy.TreeAdaptor = adap;
->>
-
-@ctorForProfilingRootGrammar.finally() ::=<<
-ITreeAdaptor adap = new CommonTreeAdaptor();
-TreeAdaptor = adap;
->>
-
-@ctorForPredefinedListener.superClassRef() ::= ": base( input, dbg )"
-
-@ctorForPredefinedListener.finally() ::=<<
-<if(grammar.grammarIsRoot)><! don't create new adaptor for delegates !>
-ITreeAdaptor adap = new CommonTreeAdaptor();
-TreeAdaptor = adap;<\n>
-<endif>
->>
-
-//@rewriteElement.pregen() ::= "dbg.Location( <e.line>, <e.pos> );"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTParser.stg
deleted file mode 100644
index 8b507c7..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTParser.stg
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** Templates for building ASTs during normal parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  The situation is not too bad as rewrite (->) usage makes ^ and !
- *  invalid. There is no huge explosion of combinations.
- */
-
-@rule.setErrorReturnValue() ::= <<
-retval.Tree = (<ASTLabelType>)adaptor.ErrorNode(input, retval.Start, input.LT(-1), re);
-<! System.out.WriteLine("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
->>
-
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <%
-<super.tokenRef(...)>
-<if(!ruleDescriptor.isSynPred)>
-<if(backtracking)><\n>if (state.backtracking == 0) {<endif>
-<\n><label>_tree = <createNodeFromToken(...)>;
-<\n>adaptor.AddChild(root_0, <label>_tree);
-<if(backtracking)><\n>}<endif>
-<endif>
-%>
-
-/** ID! and output=AST (same as plain tokenRef) */
-tokenRefBang(token,label,elementIndex,terminalOptions) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <%
-<super.tokenRef(...)>
-<if(!ruleDescriptor.isSynPred)>
-<if(backtracking)><\n>if (<actions.(actionScope).synpredgate>) {<endif>
-<\n><label>_tree = <createNodeFromToken(...)>;
-<\n>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
-<if(backtracking)><\n>}<endif>
-<endif>
-%>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,terminalOptions,elementIndex) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-// SET AST
-
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
-
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-<super.matchSet(postmatchCode={<if(!ruleDescriptor.isSynPred)><if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>adaptor.AddChild(root_0, <createNodeFromToken(...)>);<endif>}, ...)>
->>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-<matchSet(...)>
->>
-
-matchSetBang(s,label,elementIndex,terminalOptions,postmatchCode) ::= "<super.matchSet(...)>"
-
-// note there is no matchSetTrack because -> rewrites force sets to be
-// plain old blocks of alts: (A|B|...|C)
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-<if(label)>
-<label>=(<labelType>)input.LT(1);
-<endif>
-<super.matchSet(postmatchCode={<if(!ruleDescriptor.isSynPred)><if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<createNodeFromToken(...)>, root_0);<endif>}, ...)>
->>
-
-// RULE REF AST
-
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args,scope) ::= <%
-<super.ruleRef(...)>
-<if(!ruleDescriptor.isSynPred)>
-<\n><if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>adaptor.AddChild(root_0, <label>.Tree);
-<endif>
-%>
-
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
-
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_0);
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefBang(...)>
-<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
->>
-
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
->>
-
-// WILDCARD AST
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<super.wildcard(...)>
-<if(!ruleDescriptor.isSynPred)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-adaptor.AddChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
-<endif>
->>
-
-wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
-
-wildcardRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-<super.wildcard(...)>
-<if(!ruleDescriptor.isSynPred)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
-root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
-<if(backtracking)>}<endif>
-<endif>
->>
-
-createNodeFromToken(label,terminalOptions) ::= <%
-<if(terminalOptions.node)>
-new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>)
-<else>
-(<ASTLabelType>)adaptor.Create(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>)
-<endif>
-%>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
-adaptor.SetTokenBoundaries(retval.Tree, retval.Start, retval.Stop);
-<if(backtracking)>}<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTTreeParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTTreeParser.stg
deleted file mode 100644
index 3be82ed..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTTreeParser.stg
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** Templates for building ASTs during tree parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  Each combination has its own template except that label/no label
- *  is combined into tokenRef, ruleRef, ...
- */
-
-/** Add a variable to track last element matched */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-<ASTLabelType> _first_0 = default(<ASTLabelType>);
-<ASTLabelType> _last = default(<ASTLabelType>);<\n>
->>
-
-/** What to emit when there is no rewrite rule.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= <<
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<if(rewriteMode)>
-retval.Tree = (<ASTLabelType>)_first_0;
-if (adaptor.GetParent(retval.Tree)!=null && adaptor.IsNil(adaptor.GetParent(retval.Tree)))
-    retval.Tree = (<ASTLabelType>)adaptor.GetParent(retval.Tree);
-<endif>
-<if(backtracking)>}<endif>
->>
-
-/** match ^(root children) in tree parser; override here to
- *  add tree construction actions.
- */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-{
-<ASTLabelType> _save_last_<treeLevel> = _last;
-<ASTLabelType> _first_<treeLevel> = default(<ASTLabelType>);
-<if(!rewriteMode)>
-<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.Nil();
-<endif>
-<root:element()>
-<if(rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
-<if(root.el.rule)>
-if (_first_<enclosingTreeLevel> == null) _first_<enclosingTreeLevel> = <root.el.label>.Tree;
-<else>
-if (_first_<enclosingTreeLevel> == null) _first_<enclosingTreeLevel> = <root.el.label>;
-<endif>
-<endif>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if (input.LA(1) == TokenTypes.Down) {
-    Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
-    <children:element()>
-    Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
-}
-<else>
-Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
-<children:element()>
-Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
-<endif>
-<if(!rewriteMode)>
-adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
-<endif>
-_last = _save_last_<treeLevel>;
-}<\n>
->>
-
-// TOKEN AST STUFF
-
-/** ID! and output=AST (same as plain tokenRef) 'cept add
- *  setting of _last
- */
-tokenRefBang(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.tokenRef(...)>
->>
-
-/** ID auto construct */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<endif><\n>
-adaptor.AddChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>}<endif>
-<else> <! rewrite mode !>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
-if (_first_<treeLevel> == null) _first_<treeLevel> = <label>;
-<endif>
->>
-
-/** label+=TOKEN auto construct */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabelElem(elem=label,...)>
->>
-
-/** ^(ID ...) auto construct */
-tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<endif><\n>
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
-<if(backtracking)>}<endif>
-<endif>
->>
-
-/** Match ^(label+=TOKEN ...) auto construct */
-tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabelElem(elem=label,...)>
->>
-
-/** Match . wildcard and auto dup the node/subtree */
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.wildcard(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.DupTree(<label>);
-adaptor.AddChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>}<endif>
-<else> <! rewrite mode !>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
-if (_first_<treeLevel> == null) _first_<treeLevel> = <label>;
-<endif>
->>
-
-// SET AST
-
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.matchSet(postmatchCode={
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<endif><\n>
-adaptor.AddChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>\}<endif>
-<endif>
-}, ...
-)>
->>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-<matchSet(...)>
-<noRewrite(...)> <! set return tree !>
->>
-
-matchSetBang(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.matchSet(...)>
->>
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-<super.matchSet(postmatchCode={
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
-<endif><\n>
-root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
-<if(backtracking)>\}<endif>
-<endif>
-}, ...
-)>
->>
-
-// RULE REF AST
-
-/** rule auto construct */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRef(...)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
-<if(!rewriteMode)>
-adaptor.AddChild(root_<treeLevel>, <label>.Tree);
-<else> <! rewrite mode !>
-if (_first_<treeLevel> == null) _first_<treeLevel> = <label>.Tree;
-<endif>
->>
-
-/** x+=rule auto construct */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabelElem(elem={<label>.Tree},...)>
->>
-
-/** ^(rule ...) auto construct */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_<treeLevel>);
-<endif>
->>
-
-/** ^(x+=rule ...) auto construct */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabelElem(elem={<label>.Tree},...)>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefTrack(...)>
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefTrackAndListLabel(...)>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefRootTrack(...)>
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefRuleRootTrackAndListLabel(...)>
->>
-
-/** Streams for token refs are tree nodes now; override to
- *  change NextToken to NextNode.
- */
-createRewriteNodeFromElement(token,terminalOptions,args) ::= <%
-<if(terminalOptions.node)>
-new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif>stream_<token>.NextNode())
-<else>
-stream_<token>.NextNode()
-<endif>
-%>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(!rewriteMode)>
-<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
-retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
-<if(backtracking)>}<endif>
-<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/CSharp3.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/CSharp3.stg
deleted file mode 100644
index 29c8fae..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/CSharp3.stg
+++ /dev/null
@@ -1,1699 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-csharpVisibilityMap ::= [
-	"private":"private",
-	"protected":"protected",
-	"public":"public",
-	"fragment":"private",
-	default:"private"
-]
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile(	LEXER,PARSER,TREE_PARSER, actionScope, actions,
-			docComment, recognizer,
-			name, tokens, tokenNames, rules, cyclicDFAs,
-			bitsets, buildTemplate, buildAST, rewriteMode, profile,
-			backtracking, synpreds, memoize, numRules,
-			fileName, ANTLRVersion, generatedTimestamp, trace,
-			scopes, superClass, literals) ::=
-<<
-//------------------------------------------------------------------------------
-// \<auto-generated>
-//     This code was generated by a tool.
-//     ANTLR Version: <ANTLRVersion>
-//
-//     Changes to this file may cause incorrect behavior and will be lost if
-//     the code is regenerated.
-// \</auto-generated>
-//------------------------------------------------------------------------------
-
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-
-// The variable 'variable' is assigned but its value is never used.
-#pragma warning disable 219
-// Unreachable code detected.
-#pragma warning disable 162
-// Missing XML comment for publicly visible type or member 'Type_or_Member'
-#pragma warning disable 1591
-// CLS compliance checking will not be performed on 'type' because it is not visible from outside this assembly.
-#pragma warning disable 3019
-
-<actions.(actionScope).header>
-
-<@imports>
-using System.Collections.Generic;
-using Antlr.Runtime;
-using Antlr.Runtime.Misc;
-<if(TREE_PARSER)>
-using Antlr.Runtime.Tree;
-using RewriteRuleITokenStream = Antlr.Runtime.Tree.RewriteRuleTokenStream;
-<endif>
-<@end>
-<if(actions.(actionScope).namespace)>
-namespace <actions.(actionScope).namespace>
-{
-<endif>
-<docComment>
-<recognizer>
-<if(actions.(actionScope).namespace)>
-
-} // namespace <actions.(actionScope).namespace>
-<endif>
->>
-
-lexerInputStreamType() ::= <<
-<actions.(actionScope).inputStreamType; null="ICharStream">
->>
-
-lexer(grammar, name, tokens, scopes, rules, numRules, filterMode, labelType="CommonToken",
-      superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Lexer<endif>},
-	  rewriteElementType={}, ASTLabelType={}) ::= <<
-[System.CodeDom.Compiler.GeneratedCode("ANTLR", "<ANTLRVersion>")]
-[System.CLSCompliant(false)]
-<parserModifier(grammar=grammar, actions=actions)> partial class <grammar.recognizerName> : <@superClassName><superClass><@end>
-{
-	<tokens:{it|public const int <it.name; format="id">=<it.type>;}; separator="\n">
-	<scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-	<actions.lexer.members>
-
-    // delegates
-    <grammar.delegates:
-         {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
-    // delegators
-    <grammar.delegators:
-         {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
-    <last(grammar.delegators):{g|private <g.recognizerName> gParent;}>
-
-	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>()<! needed by subclasses !>
-	{
-		OnCreated();
-	}
-
-	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<lexerInputStreamType()> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
-		: this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
-	{
-	}
-
-	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<lexerInputStreamType()> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
-		: base(input, state)
-	{
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-		state.ruleMemo = new System.Collections.Generic.Dictionary\<int, int>[<numRules>+1];<\n><! index from 1..n !>
-<endif>
-<endif>
-		<grammar.directDelegates:
-		 {g|<g:delegateName()> = new <g.recognizerName>(input, this.state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
-		<grammar.delegators:
-		 {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
-		<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
-
-		OnCreated();
-	}
-	public override string GrammarFileName { get { return "<fileName>"; } }
-
-<if(grammar.hasDelegates)>
-	public override <lexerInputStreamType()> CharStream
-	{
-		get
-		{
-			return base.CharStream;
-		}
-		set
-		{
-			base.CharStream = value;
-			<grammar.directDelegates:
-			 {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
-			<grammar.delegators:
-			 {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
-			<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
-		}
-	}
-
-<endif>
-<if(filterMode)>
-	<filteringNextToken()>
-<endif>
-
-
-	partial void OnCreated();
-	partial void EnterRule(string ruleName, int ruleIndex);
-	partial void LeaveRule(string ruleName, int ruleIndex);
-
-	<rules; separator="\n">
-
-	<insertLexerSynpreds(synpreds)>
-
-	#region DFA
-	<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
-
-	protected override void InitDFAs()
-	{
-		base.InitDFAs();
-		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this<if(dfa.specialStateSTs)>, SpecialStateTransition<dfa.decisionNumber><endif>);}; separator="\n">
-	}
-
-	<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-	#endregion
-
-}
->>
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-public override IToken NextToken()
-{
-	while (true)
-	{
-		if (input.LA(1) == CharStreamConstants.EndOfFile)
-		{
-			IToken eof = new CommonToken((ICharStream)input, CharStreamConstants.EndOfFile, TokenChannels.Default, input.Index, input.Index);
-			eof.Line = Line;
-			eof.CharPositionInLine = CharPositionInLine;
-			return eof;
-		}
-		state.token = null;
-		state.channel = TokenChannels.Default;
-		state.tokenStartCharIndex = input.Index;
-		state.tokenStartCharPositionInLine = input.CharPositionInLine;
-		state.tokenStartLine = input.Line;
-		state.text = null;
-		try
-		{
-			int m = input.Mark();
-			state.backtracking=1;<! means we won't throw slow exception !>
-			state.failed=false;
-			mTokens();
-			state.backtracking=0;
-			<! mTokens backtracks with synpred at backtracking==2
-			   and we set the synpredgate to allow actions at level 1. !>
-			if (state.failed)
-			{
-				input.Rewind(m);
-				input.Consume();<! advance one char and try again !>
-			}
-			else
-			{
-				Emit();
-				return state.token;
-			}
-		}
-		catch (RecognitionException re)
-		{
-			// shouldn't happen in backtracking mode, but...
-			ReportError(re);
-			Recover(re);
-		}
-	}
-}
-
-public override void Memoize(IIntStream input, int ruleIndex, int ruleStartIndex)
-{
-	if (state.backtracking > 1)
-		base.Memoize(input, ruleIndex, ruleStartIndex);
-}
-
-public override bool AlreadyParsedRule(IIntStream input, int ruleIndex)
-{
-	if (state.backtracking > 1)
-		return base.AlreadyParsedRule(input, ruleIndex);
-
-	return false;
-}
->>
-
-actionGate() ::= "state.backtracking == 0"
-
-filteringActionGate() ::= "state.backtracking == 1"
-
-/** How to generate a parser */
-genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass,
-              labelType, members, rewriteElementType,
-              filterMode, ASTLabelType="object") ::= <<
-[System.CodeDom.Compiler.GeneratedCode("ANTLR", "<ANTLRVersion>")]
-[System.CLSCompliant(false)]
-<parserModifier(grammar=grammar, actions=actions)> partial class <grammar.recognizerName> : <@superClassName><superClass><@end>
-{
-<if(grammar.grammarIsRoot)>
-	internal static readonly string[] tokenNames = new string[] {
-		"\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
-	};
-<endif>
-	<tokens:{it|public const int <it.name; format="id">=<it.type>;}; separator="\n">
-
-<if(grammar.delegates)>
-	// delegates
-	<grammar.delegates:
-		 {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
-<endif>
-<if(grammar.delegators)>
-	// delegators
-	<grammar.delegators:
-		 {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
-	<last(grammar.delegators):{g|private <g.recognizerName> gParent;}>
-<endif>
-
-	<scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-	<@members()>
-
-	public override string[] TokenNames { get { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; } }
-	public override string GrammarFileName { get { return "<fileName>"; } }
-
-	<members>
-
-	partial void OnCreated();
-	partial void EnterRule(string ruleName, int ruleIndex);
-	partial void LeaveRule(string ruleName, int ruleIndex);
-
-	#region Rules
-	<rules; separator="\n">
-	#endregion Rules
-
-<if(grammar.delegatedRules)>
-<! generate rule/method definitions for imported rules so they
-   appear to be defined in this recognizer. !>
-	#region Delegated rules
-<grammar.delegatedRules:{ruleDescriptor|
-	<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> <returnType(ruleDescriptor)> <ruleDescriptor.name; format="id">(<ruleDescriptor.parameterScope:parameterScope()>) <!throws RecognitionException !>{ <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name; format="id">(<ruleDescriptor.parameterScope.attributes:{a|<a.name; format="id">}; separator=", ">); \}}; separator="\n">
-	#endregion Delegated rules
-<endif>
-
-	<insertSynpreds(synpreds)>
-
-<if(cyclicDFAs)>
-	#region DFA
-	<cyclicDFAs:{dfa | private DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
-
-	protected override void InitDFAs()
-	{
-		base.InitDFAs();
-		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>( this<if(dfa.specialStateSTs)>, SpecialStateTransition<dfa.decisionNumber><endif> );}; separator="\n">
-	}
-
-	<cyclicDFAs:cyclicDFA()><! dump tables for all DFA !>
-	#endregion DFA
-<endif>
-
-<if(bitsets)>
-	#region Follow sets
-	private static class Follow
-	{
-		<bitsets:{it|<bitset(name={_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>}; separator="\n">
-	}
-	#endregion Follow sets
-<endif>
-}
->>
-
-@genericParser.members() ::= <<
-<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<inputStreamType> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
-	: this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
-{
-}
-<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<inputStreamType> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
-	: base(input, state)
-{
-	<parserCtorBody()>
-<if(grammar.directDelegates)>
-	<grammar.directDelegates:
-		{g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
-<endif>
-<if(grammar.indirectDelegates)>
-	<grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
-<endif>
-<if(grammar.delegators)>
-	<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
-<endif>
-	OnCreated();
-}
->>
-
-// imported grammars are 'public' (can't be internal because their return scope classes must be accessible)
-parserModifier(grammar, actions) ::= <<
-<if(grammar.grammarIsRoot)><actions.(actionScope).modifier; null="public"><else>public<endif>
->>
-
-parserCtorBody() ::= <<
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-this.state.ruleMemo = new System.Collections.Generic.Dictionary\<int, int>[<length(grammar.allImportedRules)>+1];<\n><! index from 1..n !>
-<endif>
-<endif>
-<grammar.delegators:
- {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
->>
-
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
-       ASTLabelType="object", superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Parser<endif>}, labelType="IToken",
-       members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="ITokenStream", rewriteElementType="IToken", filterMode=false, ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
-           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="object",
-           superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Tree.<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif><endif>},
-           members={<actions.treeparser.members>}) ::= <<
-<genericParser(inputStreamType="ITreeNodeStream", rewriteElementType="Node", ...)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-
-partial void EnterRule_<ruleName>_fragment();
-partial void LeaveRule_<ruleName>_fragment();
-
-// $ANTLR start <ruleName>
-public <!final !>void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope()>)
-{
-	<ruleLabelDefs(...)>
-	EnterRule_<ruleName>_fragment();
-	EnterRule("<ruleName>_fragment", <ruleDescriptor.index>);
-	TraceIn("<ruleName>_fragment", <ruleDescriptor.index>);
-	try
-	{
-		<block>
-	}
-	finally
-	{
-		TraceOut("<ruleName>_fragment", <ruleDescriptor.index>);
-		LeaveRule("<ruleName>_fragment", <ruleDescriptor.index>);
-		LeaveRule_<ruleName>_fragment();
-	}
-}
-// $ANTLR end <ruleName>
->>
-
-insertLexerSynpreds(synpreds) ::= <<
-<insertSynpreds(synpreds)>
->>
-
-insertSynpreds(synpreds) ::= <<
-<if(synpreds)>
-#region Synpreds
-private bool EvaluatePredicate(System.Action fragment)
-{
-	bool success = false;
-	state.backtracking++;
-	<@start()>
-	try { DebugBeginBacktrack(state.backtracking);
-	int start = input.Mark();
-	try
-	{
-		fragment();
-	}
-	catch ( RecognitionException re )
-	{
-		System.Console.Error.WriteLine("impossible: "+re);
-	}
-	success = !state.failed;
-	input.Rewind(start);
-	} finally { DebugEndBacktrack(state.backtracking, success); }
-	<@stop()>
-	state.backtracking--;
-	state.failed=false;
-	return success;
-}
-#endregion Synpreds
-<endif>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if (state.backtracking > 0 && AlreadyParsedRule(input, <ruleDescriptor.index>)) { <returnFromRule()> }
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (state.failed) <returnFromRule()><endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>if (state.backtracking>0) {state.failed=true; <returnFromRule()>}<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-<returnScope(ruleDescriptor.returnScope)>
-partial void EnterRule_<ruleName>();
-partial void LeaveRule_<ruleName>();
-
-// $ANTLR start "<ruleName>"
-// <fileName>:<description>
-[GrammarRule("<ruleName>")]
-<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> <returnType(ruleDescriptor)> <ruleName; format="id">(<ruleDescriptor.parameterScope:parameterScope()>)
-{
-	EnterRule_<ruleName>();
-	EnterRule("<ruleName>", <ruleDescriptor.index>);
-	TraceIn("<ruleName>", <ruleDescriptor.index>);
-	<ruleScopeSetUp()>
-	<ruleDeclarations()>
-	<ruleLabelDefs(...)>
-	<ruleDescriptor.actions.init>
-	try { DebugEnterRule(GrammarFileName, "<ruleName>");
-	DebugLocation(<ruleDescriptor.tree.line>, <ruleDescriptor.EORNode.charPositionInLine>);
-	<@preamble()>
-	try
-	{
-		<ruleMemoization(name=ruleName)>
-		<block>
-		<ruleCleanUp()>
-		<(ruleDescriptor.actions.after):execAction()>
-	}
-<if(exceptions)>
-	<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-	<actions.(actionScope).rulecatch>
-<else>
-	catch (RecognitionException re)
-	{
-		ReportError(re);
-		Recover(input,re);
-	<@setErrorReturnValue()>
-	}
-<endif>
-<endif>
-<endif>
-	finally
-	{
-		TraceOut("<ruleName>", <ruleDescriptor.index>);
-		LeaveRule("<ruleName>", <ruleDescriptor.index>);
-		LeaveRule_<ruleName>();
-		<memoize()>
-		<ruleScopeCleanUp()>
-		<finally>
-	}
-	DebugLocation(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>);
-	} finally { DebugExitRule(GrammarFileName, "<ruleName>"); }
-	<@postamble()>
-	<returnFromRule()><\n>
-}
-// $ANTLR end "<ruleName>"
->>
-
-// imported grammars need to have internal rules
-ruleModifier(grammar,ruleDescriptor) ::= <<
-<if(grammar.grammarIsRoot)><csharpVisibilityMap.(ruleDescriptor.modifier); null="private"><else>internal<endif>
->>
-
-// imported grammars need to have public return scopes
-returnScopeModifier(grammar,ruleDescriptor) ::= <<
-<if(grammar.grammarIsRoot)><csharpVisibilityMap.(ruleDescriptor.modifier); null="private"><else>public<endif>
->>
-
-catch(decl,action) ::= <<
-catch (<e.decl>)
-{
-	<e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<returnType(ruleDescriptor)> retval = new <returnType(ruleDescriptor)>(<if(ruleDescriptor.returnScope.attributes)>this<endif>);
-retval.Start = (<labelType>)input.LT(1);
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<a.type> <a.name; format="id"> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
-}>
-<endif>
-<if(memoize)>
-int <ruleDescriptor.name>_StartIndex = input.Index;
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{it|<it>_stack.Push(new <it>_scope(this));<it>_scopeInit(<it>_stack.Peek());}; separator="\n">
-<ruleDescriptor.ruleScope:{it|<it.name>_stack.Push(new <it.name>_scope(this));<it.name>_scopeInit(<it.name>_stack.Peek());}; separator="\n">
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{it|<it>_scopeAfter(<it>_stack.Peek());<it>_stack.Pop();}; separator="\n">
-<ruleDescriptor.ruleScope:{it|<it.name>_scopeAfter(<it.name>_stack.Peek());<it.name>_stack.Pop();}; separator="\n">
->>
-
-ruleLabelDefs(ruleDescriptor, labelType, ASTLabelType, rewriteElementType) ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{it|<labelType> <it.label.text> = default(<labelType>);}; separator="\n"
->
-<ruleDescriptor.tokenListLabels
-    :{it|List\<<labelType>\> list_<it.label.text> = null;}; separator="\n"
->
-<[ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{it|List\<<ASTLabelType>\> list_<it.label.text> = null;}; separator="\n"
->
-<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
-<ruleDescriptor.ruleListLabels:ruleLabelDef(); separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{it|<labelType> <it.label.text> = default(<labelType>);}; separator="\n"
->
-<[ruleDescriptor.charListLabels,
-  ruleDescriptor.charLabels]
-	:{it|int <it.label.text> = 0;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{it|List\<<labelType>\> list_<it.label.text> = null;}; separator="\n"
->
-<ruleDescriptor.charListLabels:{it|List\<int\> list_<it.label.text> = null;}; separator="\n"
->
->>
-
-returnFromRule() ::= <%
-return
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<! This comment is a hack to make sure the following
-   single space appears in the output. !> <ruleDescriptor.singleValueReturnName>
-<else>
-<!!> retval
-<endif>
-<endif>
-<endif>
-;
-%>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-retval.Stop = (<labelType>)input.LT(-1);
-<endif>
-<endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if (state.backtracking > 0) { Memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-
-partial void EnterRule_<ruleName>();
-partial void LeaveRule_<ruleName>();
-
-// $ANTLR start "<ruleName>"
-[GrammarRule("<ruleName>")]
-<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>)
-{
-	EnterRule_<ruleName>();
-	EnterRule("<ruleName>", <ruleDescriptor.index>);
-	TraceIn("<ruleName>", <ruleDescriptor.index>);
-	<ruleScopeSetUp()>
-	<ruleDeclarations()>
-	try
-	{
-<if(nakedBlock)>
-		<ruleMemoization(name=ruleName)>
-		<lexerRuleLabelDefs()>
-		<ruleDescriptor.actions.init>
-		<block>
-<else>
-		int _type = <ruleName>;
-		int _channel = DefaultTokenChannel;
-		<ruleMemoization(name=ruleName)>
-		<lexerRuleLabelDefs()>
-		<ruleDescriptor.actions.init>
-		<block>
-		<ruleCleanUp()>
-		state.type = _type;
-		state.channel = _channel;
-		<(ruleDescriptor.actions.after):execAction()>
-<endif>
-	}
-	finally
-	{
-		TraceOut("<ruleName>", <ruleDescriptor.index>);
-		LeaveRule("<ruleName>", <ruleDescriptor.index>);
-		LeaveRule_<ruleName>();
-		<ruleScopeCleanUp()>
-		<memoize()>
-	}
-}
-// $ANTLR end "<ruleName>"
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-
-public override void mTokens()
-{
-	<block><\n>
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-try { DebugEnterSubRule(<decisionNumber>);
-try { DebugEnterDecision(<decisionNumber>, false<!<decision.dfa.hasSynPred>!>);
-<decision>
-} finally { DebugExitDecision(<decisionNumber>); }
-<@postdecision()>
-<@prebranch()>
-switch (alt<decisionNumber>)
-{
-<alts:{a|<altSwitchCase(i,a)>}>
-}
-} finally { DebugExitSubRule(<decisionNumber>); }
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-try { DebugEnterDecision(<decisionNumber>, false<!<decision.dfa.hasSynPred>!>);
-<decision>
-} finally { DebugExitDecision(<decisionNumber>); }
-<@postdecision()>
-switch (alt<decisionNumber>)
-{
-<alts:{a|<altSwitchCase(i,a)>}>
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-DebugEnterAlt(1);
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-DebugEnterAlt(1);
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int cnt<decisionNumber>=0;
-<decls>
-<@preloop()>
-try { DebugEnterSubRule(<decisionNumber>);
-while (true)
-{
-	int alt<decisionNumber>=<maxAlt>;
-	<@predecision()>
-	try { DebugEnterDecision(<decisionNumber>, false<!<decision.dfa.hasSynPred>!>);
-	<decision>
-	} finally { DebugExitDecision(<decisionNumber>); }
-	<@postdecision()>
-	switch (alt<decisionNumber>)
-	{
-	<alts:{a|<altSwitchCase(i,a)>}>
-	default:
-		if (cnt<decisionNumber> >= 1)
-			goto loop<decisionNumber>;
-
-		<ruleBacktrackFailure()>
-		EarlyExitException eee<decisionNumber> = new EarlyExitException( <decisionNumber>, input );
-		DebugRecognitionException(eee<decisionNumber>);
-		<@earlyExitException()>
-		throw eee<decisionNumber>;
-	}
-	cnt<decisionNumber>++;
-}
-loop<decisionNumber>:
-	;
-
-} finally { DebugExitSubRule(<decisionNumber>); }
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@preloop()>
-try { DebugEnterSubRule(<decisionNumber>);
-while (true)
-{
-	int alt<decisionNumber>=<maxAlt>;
-	<@predecision()>
-	try { DebugEnterDecision(<decisionNumber>, false<!<decision.dfa.hasSynPred>!>);
-	<decision>
-	} finally { DebugExitDecision(<decisionNumber>); }
-	<@postdecision()>
-	switch ( alt<decisionNumber> )
-	{
-	<alts:{a|<altSwitchCase(i,a)>}>
-	default:
-		goto loop<decisionNumber>;
-	}
-}
-
-loop<decisionNumber>:
-	;
-
-} finally { DebugExitSubRule(<decisionNumber>); }
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase(altNum,alt) ::= <<
-case <altNum>:
-	<@prealt()>
-	DebugEnterAlt(<altNum>);
-	<alt>
-	break;<\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
-// <fileName>:<description>
-{
-<@declarations()>
-<elements:element()>
-<rew>
-<@cleanup()>
-}
->>
-
-/** What to emit when there is no rewrite.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element(it) ::= <%
-<@prematch()>
-DebugLocation(<it.line>, <it.pos>);<\n>
-<it.el><\n>
-%>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)><label>=(<labelType>)<endif>Match(input,<token>,Follow._<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-listLabel(label,elem) ::= <<
-#error The listLabel template should not be used with this target.<\n>
->>
-
-listLabelElem(label,elem,elemType) ::= <<
-if (list_<label>==null) list_<label>=new List\<<elemType; null={<labelType>}>\>();
-list_<label>.Add(<elem>);<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-Match(<char>); <checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-MatchRange(<a>,<b>); <checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode="") ::= <<
-<if(label)>
-<matchSetLabel()>
-<endif>
-if (<s>)
-{
-	input.Consume();
-	<postmatchCode>
-	<if(!LEXER)>state.errorRecovery=false;<endif><if(backtracking)>state.failed=false;<endif>
-}
-else
-{
-	<ruleBacktrackFailure()>
-	MismatchedSetException mse = new MismatchedSetException(null,input);
-	DebugRecognitionException(mse);
-	<@mismatchedSetException()>
-<if(LEXER)>
-	Recover(mse);
-	throw mse;
-<else>
-	throw mse;
-	<! use following code to make it recover inline; remove throw mse;
-	recoverFromMismatchedSet(input,mse,Follow._set_in_<ruleName><elementIndex>);
-	!>
-<endif>
-}<\n>
->>
-
-matchSetUnchecked(s,label,elementIndex,postmatchCode=false) ::= <%
-<if(label)>
-<matchSetLabel()><\n>
-<endif>
-input.Consume();<\n>
-<if(postmatchCode)>
-<postmatchCode><\n>
-<endif>
-<if(!LEXER)>state.errorRecovery=false;<endif><if(backtracking)>state.failed=false;<endif>
-%>
-
-matchSetLabel() ::= <%
-<if(LEXER)>
-<label>= input.LA(1);
-<else>
-<label>=(<labelType>)input.LT(1);
-<endif>
-%>
-
-matchRuleBlockSet ::= matchSet
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex) ::= <%
-<if(label)>
-int <label>Start = CharIndex;<\n>
-Match(<string>); <checkRuleBacktrackFailure()><\n>
-int <label>StartLine<elementIndex> = Line;<\n>
-int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
-<label> = new <labelType>(input, TokenTypes.Invalid, TokenChannels.Default, <label>Start, CharIndex-1);<\n>
-<label>.Line = <label>StartLine<elementIndex>;<\n>
-<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
-<else>
-Match(<string>); <checkRuleBacktrackFailure()><\n>
-<endif>
-%>
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)>
-<label>=(<labelType>)input.LT(1);<\n>
-<endif>
-MatchAny(input); <checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<wildcard(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-MatchAny(); <checkRuleBacktrackFailure()>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.  The 'rule' argument was the
- *  target rule name, but now is type Rule, whose toString is
- *  same: the rule name.  Now though you can access full rule
- *  descriptor stuff.
- */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-PushFollow(Follow._<rule.name>_in_<ruleName><elementIndex>);
-<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name; format="id">(<args; separator=", ">);
-PopFollow();
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabelElem(elem=label,elemType={<ASTLabelType>},...)>
->>
-
-/** A lexer rule reference.
- *
- *  The 'rule' argument was the target rule name, but now
- *  is type Rule, whose toString is same: the rule name.
- *  Now though you can access full rule descriptor stuff.
- */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <%
-<if(label)>
-int <label>Start<elementIndex> = CharIndex;<\n>
-int <label>StartLine<elementIndex> = Line;<\n>
-int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
-<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()><\n>
-<label> = new <labelType>(input, TokenTypes.Invalid, TokenChannels.Default, <label>Start<elementIndex>, CharIndex-1);<\n>
-<label>.Line = <label>StartLine<elementIndex>;<\n>
-<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
-<else>
-<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<endif>
-%>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(...)>
-<listLabelElem(elem=label,elemType=labelType,...)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <%
-<if(label)>
-int <label>Start<elementIndex> = CharIndex;<\n>
-int <label>StartLine<elementIndex> = Line;<\n>
-int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
-Match(EOF); <checkRuleBacktrackFailure()><\n>
-<labelType> <label> = new <labelType>(input, EOF, TokenChannels.Default, <label>Start<elementIndex>, CharIndex-1);<\n>
-<label>.Line = <label>StartLine<elementIndex>;<\n>
-<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
-<else>
-Match(EOF); <checkRuleBacktrackFailure()>
-<endif>
-%>
-
-// used for left-recursive rules
-recRuleDefArg()                       ::= "int <recRuleArg()>"
-recRuleArg()                          ::= "_p"
-recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
-recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if (input.LA(1) == TokenTypes.Down)
-{
-	Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
-	<children:element()>
-	Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
-}
-<else>
-Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
-<children:element()>
-Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if (!(<evalPredicate(...)>))
-{
-	<ruleBacktrackFailure()>
-	throw new FailedPredicateException(input, "<ruleName>", "<description>");
-}
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
-else
-{
-<if(eotPredictsAlt)>
-	alt<decisionNumber> = <eotPredictsAlt>;
-<else>
-	<ruleBacktrackFailure()>
-	NoViableAltException nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);
-	DebugRecognitionException(nvae);
-	<@noViableAltException()>
-	throw nvae;
-<endif>
-}
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse "><\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-alt<decisionNumber> = <eotPredictsAlt>;<! if no edges, don't gen ELSE !>
-<else>
-else
-{
-	alt<decisionNumber> = <eotPredictsAlt>;
-}<\n>
-<endif>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ((<labelExpr>)<if(predicates)> && (<predicates>)<endif>)
-{
-	<targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch (input.LA(<k>))
-{
-<edges; separator="\n">
-default:
-<if(eotPredictsAlt)>
-	alt<decisionNumber>=<eotPredictsAlt>;
-	break;<\n>
-<else>
-	{
-		<ruleBacktrackFailure()>
-		NoViableAltException nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);
-		DebugRecognitionException(nvae);
-		<@noViableAltException()>
-		throw nvae;
-	}
-<endif>
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch (input.LA(<k>))
-{
-<edges; separator="\n">
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch (input.LA(<k>))
-{
-<edges; separator="\n">
-<if(eotPredictsAlt)>
-default:
-	alt<decisionNumber>=<eotPredictsAlt>;
-	break;<\n>
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{it|case <it>:}; separator="\n">
-	{
-	<targetState>
-	}
-	break;
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-try
-{
-	alt<decisionNumber> = dfa<decisionNumber>.Predict(input);
-}
-catch (NoViableAltException nvae)
-{
-	DebugRecognitionException(nvae);
-	throw;
-}
->>
-
-/* Dump DFA tables as run-length-encoded Strings of octal values.
- * Can't use hex as compiler translates them before compilation.
- * These strings are split into multiple, concatenated strings.
- * Java puts them back together at compile time thankfully.
- * Java cannot handle large static arrays, so we're stuck with this
- * encode/decode approach.  See analysis and runtime DFA for
- * the encoding methods.
- */
-cyclicDFA(dfa) ::= <<
-private class DFA<dfa.decisionNumber> : DFA
-{
-	private const string DFA<dfa.decisionNumber>_eotS =
-		"<dfa.javaCompressedEOT; wrap="\"+\n\t\t\"">";
-	private const string DFA<dfa.decisionNumber>_eofS =
-		"<dfa.javaCompressedEOF; wrap="\"+\n\t\t\"">";
-	private const string DFA<dfa.decisionNumber>_minS =
-		"<dfa.javaCompressedMin; wrap="\"+\n\t\t\"">";
-	private const string DFA<dfa.decisionNumber>_maxS =
-		"<dfa.javaCompressedMax; wrap="\"+\n\t\t\"">";
-	private const string DFA<dfa.decisionNumber>_acceptS =
-		"<dfa.javaCompressedAccept; wrap="\"+\n\t\t\"">";
-	private const string DFA<dfa.decisionNumber>_specialS =
-		"<dfa.javaCompressedSpecial; wrap="\"+\n\t\t\"">}>";
-	private static readonly string[] DFA<dfa.decisionNumber>_transitionS =
-		{
-			<dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
-		};
-
-	private static readonly short[] DFA<dfa.decisionNumber>_eot = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eotS);
-	private static readonly short[] DFA<dfa.decisionNumber>_eof = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eofS);
-	private static readonly char[] DFA<dfa.decisionNumber>_min = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
-	private static readonly char[] DFA<dfa.decisionNumber>_max = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
-	private static readonly short[] DFA<dfa.decisionNumber>_accept = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
-	private static readonly short[] DFA<dfa.decisionNumber>_special = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_specialS);
-	private static readonly short[][] DFA<dfa.decisionNumber>_transition;
-
-	static DFA<dfa.decisionNumber>()
-	{
-		int numStates = DFA<dfa.decisionNumber>_transitionS.Length;
-		DFA<dfa.decisionNumber>_transition = new short[numStates][];
-		for ( int i=0; i \< numStates; i++ )
-		{
-			DFA<dfa.decisionNumber>_transition[i] = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_transitionS[i]);
-		}
-	}
-
-	public DFA<dfa.decisionNumber>( BaseRecognizer recognizer<if(dfa.specialStateSTs)>, SpecialStateTransitionHandler specialStateTransition<endif> )
-<if(dfa.specialStateSTs)>
-		: base(specialStateTransition)
-<endif>
-	{
-		this.recognizer = recognizer;
-		this.decisionNumber = <dfa.decisionNumber>;
-		this.eot = DFA<dfa.decisionNumber>_eot;
-		this.eof = DFA<dfa.decisionNumber>_eof;
-		this.min = DFA<dfa.decisionNumber>_min;
-		this.max = DFA<dfa.decisionNumber>_max;
-		this.accept = DFA<dfa.decisionNumber>_accept;
-		this.special = DFA<dfa.decisionNumber>_special;
-		this.transition = DFA<dfa.decisionNumber>_transition;
-	}
-
-	public override string Description { get { return "<dfa.description>"; } }
-
-	public override void Error(NoViableAltException nvae)
-	{
-		DebugRecognitionException(nvae);
-	}
-}<\n>
-<if(dfa.specialStateSTs)>
-private int SpecialStateTransition<dfa.decisionNumber>(DFA dfa, int s, IIntStream _input)<! throws NoViableAltException!>
-{
-	<if(LEXER)>
-	IIntStream input = _input;
-	<endif>
-	<if(PARSER)>
-	ITokenStream input = (ITokenStream)_input;
-	<endif>
-	<if(TREE_PARSER)>
-	ITreeNodeStream input = (ITreeNodeStream)_input;
-	<endif>
-	int _s = s;
-	switch (s)
-	{
-	<dfa.specialStateSTs:{state |
-	case <i0>:<! compressed special state numbers 0..n-1 !>
-		<state>}; separator="\n">
-	}
-<if(backtracking)>
-	if (state.backtracking > 0) {state.failed=true; return -1;}
-<endif>
-	NoViableAltException nvae = new NoViableAltException(dfa.Description, <dfa.decisionNumber>, _s, input);
-	dfa.Error(nvae);
-	throw nvae;
-}
-<endif>
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
-<if(semPredState)>
-<! get next lookahead symbol to test edges, then rewind !>
-<\n>int index<decisionNumber>_<stateNumber> = input.Index;
-input.Rewind();
-<endif>
-s = -1;
-<edges; separator="\nelse ">
-<if(semPredState)>
-<! return input cursor to state before we rewound !>
-<\n>input.Seek(index<decisionNumber>_<stateNumber>);
-<endif>
-if (s >= 0) return s;
-break;
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ((<labelExpr>)<if(predicates)> && (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-s = <targetStateNumber>;<\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "(<left>&&<right>)"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
-
-notPredicate(pred) ::= "!(<evalPredicate(...)>)"
-
-evalPredicate(pred,description) ::= "(<pred>)"
-
-evalSynPredicate(pred,description) ::= "EvaluatePredicate(<pred>_fragment)"
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
-(LA<decisionNumber>_<stateNumber><ge()><lower> && LA<decisionNumber>_<stateNumber><le()><upper>)
-%>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)<ge()><lower> && input.LA(<k>)<le()><upper>)"
-
-le() ::= "\<="
-ge() ::= ">="
-
-setTest(ranges) ::= <<
-<ranges; separator="||">
->>
-
-// A T T R I B U T E S
-
-attributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected sealed partial class <scope.name>_scope
-{
-	<scope.attributes:{it|public <it.decl>;}; separator="\n">
-
-	public <scope.name>_scope(<grammar.recognizerName> grammar) { OnCreated(grammar); }
-	partial void OnCreated(<grammar.recognizerName> grammar);
-}
-<if(scope.actions.scopeinit)>
-protected void <scope.name>_scopeInit( <scope.name>_scope scope )
-{
-	<scope.actions.scopeinit>
-}
-<else>
-partial void <scope.name>_scopeInit( <scope.name>_scope scope );
-<endif>
-<if(scope.actions.scopeafter)>
-protected void <scope.name>_scopeAfter( <scope.name>_scope scope )
-{
-	<scope.actions.scopeafter>
-}
-<else>
-partial void <scope.name>_scopeAfter( <scope.name>_scope scope );
-<endif>
-protected readonly ListStack\<<scope.name>_scope\> <scope.name>_stack = new ListStack\<<scope.name>_scope\>();
-<endif>
->>
-
-globalAttributeScope(scope) ::= <<
-<attributeScope(...)>
->>
-
-ruleAttributeScope(scope) ::= <<
-<attributeScope(...)>
->>
-
-returnStructName(it) ::= "<it.name>_return"
-
-returnType(ruleDescriptor) ::= <%
-<if(ruleDescriptor.returnScope.attributes && ruleDescriptor.hasMultipleReturnValues)>
-	<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
-<elseif(ruleDescriptor.hasMultipleReturnValues)>
-	<ruleReturnBaseType()>
-<elseif(ruleDescriptor.hasSingleReturnValue)>
-	<ruleDescriptor.singleValueReturnType>
-<else>
-	void
-<endif>
-%>
-
-/** Generate the C# type associated with a single or multiple return
- *  values.
- */
-ruleLabelType(referencedRule) ::= <%
-<if(referencedRule.returnScope.attributes&&referencedRule.hasMultipleReturnValues)>
-	<referencedRule.grammar.recognizerName>.<referencedRule:returnStructName()>
-<elseif(referencedRule.hasMultipleReturnValues)>
-	<ruleReturnBaseType()>
-<elseif(referencedRule.hasSingleReturnValue)>
-	<referencedRule.singleValueReturnType>
-<else>
-	void
-<endif>
-%>
-
-delegateName(it) ::= <<
-<if(it.label)><it.label><else>g<it.name><endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
- */
-initValue(typeName) ::= <<
-default(<typeName>)
->>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= <%
-<ruleLabelType(label.referencedRule)> <label.label.text> = <initValue(ruleLabelType(label.referencedRule))>;
-%>
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScope(scope) ::= <<
-<if(scope.attributes && ruleDescriptor.hasMultipleReturnValues)>
-<returnScopeModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> sealed partial class <ruleDescriptor:returnStructName()> : <ruleReturnBaseType()><@ruleReturnInterfaces()>
-{
-	<scope.attributes:{it|public <it.decl>;}; separator="\n">
-	<@ruleReturnMembers()>
-}
-<endif>
->>
-
-ruleReturnBaseType() ::= <%
-<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope\<<labelType>>
-%>
-
-@returnScope.ruleReturnMembers() ::= <<
-public <ruleDescriptor:returnStructName()>(<grammar.recognizerName> grammar) {OnCreated(grammar);}
-partial void OnCreated(<grammar.recognizerName> grammar);
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{it|<it.decl>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= <<
-<attr.name; format="id">
->>
-
-parameterSetAttributeRef(attr,expr) ::= <<
-<attr.name; format="id"> =<expr>;
->>
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <%
-<if(negIndex)>
-<scope>_stack[<scope>_stack.Count - <negIndex> - 1].<attr.name; format="id">
-<else>
-<if(index)>
-<scope>_stack[<index>].<attr.name; format="id">
-<else>
-<scope>_stack.Peek().<attr.name; format="id">
-<endif>
-<endif>
-%>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
-<if(negIndex)>
-<scope>_stack[<scope>_stack.Count - <negIndex> - 1].<attr.name; format="id"> = <expr>;
-<else>
-<if(index)>
-<scope>_stack[<index>].<attr.name; format="id"> = <expr>;
-<else>
-<scope>_stack.Peek().<attr.name; format="id"> = <expr>;
-<endif>
-<endif>
-%>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.Count>0 && $function::name.Equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <%
-<if(referencedRule.hasMultipleReturnValues)>
-(<scope>!=null?<scope>.<attr.name; format="id">:<initValue(attr.type)>)
-<else>
-<scope>
-<endif>
-%>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name; format="id">
-<else>
-<attr.name; format="id">
-<endif>
-%>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name; format="id"> =<expr>;
-<else>
-<attr.name; format="id"> =<expr>;
-<endif>
-%>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach
-
-tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=null?<scope>.Text:null)"
-tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=null?<scope>.Type:0)"
-tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=null?<scope>.Line:0)"
-tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=null?<scope>.CharPositionInLine:0)"
-tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=null?<scope>.Channel:0)"
-tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=null?<scope>.TokenIndex:0)"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?int.Parse(<scope>.Text):0)"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.Start):default(<labelType>))"
-ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.Stop):default(<labelType>))"
-ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=null?((<ASTLabelType>)<scope>.Tree):default(<ASTLabelType>))"
-ruleLabelPropertyRef_text(scope,attr) ::= <%
-<if(TREE_PARSER)>
-(<scope>!=null?(input.TokenStream.ToString(
-  input.TreeAdaptor.GetTokenStartIndex(<scope>.Start),
-  input.TreeAdaptor.GetTokenStopIndex(<scope>.Start))):null)
-<else>
-(<scope>!=null?input.ToString(<scope>.Start,<scope>.Stop):null)
-<endif>
-%>
-
-ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=null?<scope>.Template:null)"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::=
-    "(<scope>!=null?<scope>.Type:0)"
-
-lexerRuleLabelPropertyRef_line(scope,attr) ::=
-    "(<scope>!=null?<scope>.Line:0)"
-
-lexerRuleLabelPropertyRef_pos(scope,attr) ::=
-    "(<scope>!=null?<scope>.CharPositionInLine:-1)"
-
-lexerRuleLabelPropertyRef_channel(scope,attr) ::=
-    "(<scope>!=null?<scope>.Channel:0)"
-
-lexerRuleLabelPropertyRef_index(scope,attr) ::=
-    "(<scope>!=null?<scope>.TokenIndex:0)"
-
-lexerRuleLabelPropertyRef_text(scope,attr) ::=
-    "(<scope>!=null?<scope>.Text:null)"
-
-lexerRuleLabelPropertyRef_int(scope,attr) ::=
-    "(<scope>!=null?int.Parse(<scope>.Text):0)"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "retval.Start"
-rulePropertyRef_stop(scope,attr) ::= "retval.Stop"
-rulePropertyRef_tree(scope,attr) ::= "retval.Tree"
-rulePropertyRef_text(scope,attr) ::= <%
-<if(TREE_PARSER)>
-input.TokenStream.ToString(
-  input.TreeAdaptor.GetTokenStartIndex(retval.Start),
-  input.TreeAdaptor.GetTokenStopIndex(retval.Start))
-<else>
-input.ToString(retval.Start,input.LT(-1))
-<endif>
-%>
-rulePropertyRef_st(scope,attr) ::= "retval.Template"
-
-lexerRulePropertyRef_text(scope,attr) ::= "Text"
-lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
-lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
-lexerRulePropertyRef_int(scope,attr) ::= "int.Parse(<scope>.Text)"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.Tree = <expr>;"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.Template =<expr>;"
-
-/** How to execute an action (only when not backtracking) */
-execAction(action) ::= <%
-<if(backtracking)>
-if (<actions.(actionScope).synpredgate>)<\n>
-{<\n>
-<@indentedAction()><\n>
-}
-<else>
-<action>
-<endif>
-%>
-
-@execAction.indentedAction() ::= <<
-	<action>
->>
-
-/** How to always execute an action even when backtracking */
-execForcedAction(action) ::= "<action>"
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-public static readonly BitSet <name> = new BitSet(new ulong[]{<words64:{it|<it>UL};separator=",">});
->>
-
-codeFileExtension() ::= ".cs"
-
-true_value() ::= "true"
-false_value() ::= "false"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/Dbg.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/Dbg.stg
deleted file mode 100644
index 3841a8f..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/Dbg.stg
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2005-2008 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-/** Template overrides to add debugging to normal Java output;
- *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
- */
-
-@outputFile.imports() ::= <<
-<@super.imports()>
-using Antlr.Runtime.Debug;
-using IOException = System.IO.IOException;
->>
-
-@genericParser.members() ::= <<
-<if(grammar.grammarIsRoot)>
-public static readonly string[] ruleNames =
-	new string[]
-	{
-		"invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n	", separator=", ">
-	};<\n>
-<endif>
-<if(grammar.grammarIsRoot)><! grammar imports other grammar(s) !>
-	int ruleLevel = 0;
-	public virtual int RuleLevel { get { return ruleLevel; } }
-	public virtual void IncRuleLevel() { ruleLevel++; }
-	public virtual void DecRuleLevel() { ruleLevel--; }
-<if(profile)>
-	<ctorForProfilingRootGrammar()>
-<else>
-	<ctorForRootGrammar()>
-<endif>
-<ctorForPredefinedListener()>
-<else><! imported grammar !>
-	public int RuleLevel { get { return <grammar.delegators:{g| <g:delegateName()>}>.RuleLevel; } }
-	public void IncRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.IncRuleLevel(); }
-	public void DecRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.DecRuleLevel(); }
-	<ctorForDelegateGrammar()>
-<endif>
-<if(profile)>
-public override bool AlreadyParsedRule( IIntStream input, int ruleIndex )
-{
-	int stopIndex = GetRuleMemoization(ruleIndex, input.Index);
-	((Profiler)dbg).ExamineRuleMemoization(input, ruleIndex, stopIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
-	return base.AlreadyParsedRule(input, ruleIndex);
-}<\n>
-public override void Memoize( IIntStream input, int ruleIndex, int ruleStartIndex )
-{
-	((Profiler)dbg).Memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
-	base.Memoize(input, ruleIndex, ruleStartIndex);
-}<\n>
-<endif>
-protected virtual bool EvalPredicate( bool result, string predicate )
-{
-	dbg.SemanticPredicate( result, predicate );
-	return result;
-}<\n>
->>
-
-ctorForRootGrammar() ::= <<
-<! bug: can't use <@super.members()> cut-n-paste instead !>
-<! Same except we add port number and profile stuff if root grammar !>
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input )
-	: this( input, DebugEventSocketProxy.DefaultDebuggerPort, new RecognizerSharedState() )
-{
-}
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, int port, RecognizerSharedState state )
-	: base( input, state )
-{
-	<parserCtorBody()>
-	<createListenerAndHandshake()>
-	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>( input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
-	<@finally()>
-}<\n>
->>
-
-ctorForProfilingRootGrammar() ::= <<
-<! bug: can't use <@super.members()> cut-n-paste instead !>
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input )
-	: this( input, new Profiler(null), new RecognizerSharedState() )
-{
-}
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state )
-	: base( input, dbg, state )
-{
-	Profiler p = (Profiler)dbg;
-	p.setParser(this);
-	<parserCtorBody()>
-	<grammar.directDelegates:
-	 {g|<g:delegateName()> = new <g.recognizerName>( input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
-	<@finally()>
-}
-<\n>
->>
-
-/** Basically we don't want to set any dbg listeners are root will have it. */
-ctorForDelegateGrammar() ::= <<
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
-	: base( input, dbg, state )
-{
-	<parserCtorBody()>
-	<grammar.directDelegates:
-	 {g|<g:delegateName()> = new <g.recognizerName>( input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
-}<\n>
->>
-
-ctorForPredefinedListener() ::= <<
-<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg )
-	<@superClassRef>: base( input, dbg, new RecognizerSharedState() )<@end>
-{
-<if(profile)>
-	Profiler p = (Profiler)dbg;
-	p.setParser(this);
-<endif>
-	<parserCtorBody()>
-	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
-	<@finally()>
-}<\n>
->>
-
-createListenerAndHandshake() ::= <<
-<if(TREE_PARSER)>
-DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, input.TreeAdaptor );<\n>
-<else>
-DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, null );<\n>
-<endif>
-DebugListener = proxy;
-try
-{
-	proxy.Handshake();
-}
-catch ( IOException ioe )
-{
-	ReportError( ioe );
-}
->>
-
-@genericParser.superClassName() ::= "Debug<@super.superClassName()>"
-
-/*
- * Much of the following rules were merged into CSharp3.stg.
- */
-
-@rule.preamble() ::= <<
-if (RuleLevel == 0)
-	DebugListener.Commence();
-IncRuleLevel();
->>
-//@rule.preamble() ::= <<
-//try
-//{
-//	dbg.EnterRule( GrammarFileName, "<ruleName>" );
-//	if ( RuleLevel == 0 )
-//	{
-//		dbg.Commence();
-//	}
-//	IncRuleLevel();
-//	dbg.Location( <ruleDescriptor.tree.line>, <ruleDescriptor.tree.charPositionInLine> );<\n>
-//>>
-
-@rule.postamble() ::= <<
-DecRuleLevel();
-if (RuleLevel == 0)
-	DebugListener.Terminate();
->>
-//@rule.postamble() ::= <<
-//dbg.Location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>);<\n>
-//}
-//finally
-//{
-//	dbg.ExitRule( GrammarFileName, "<ruleName>" );
-//	DecRuleLevel();
-//	if ( RuleLevel == 0 )
-//	{
-//		dbg.Terminate();
-//	}
-//}<\n>
-//>>
-
-//@insertSynpreds.start() ::= "dbg.BeginBacktrack( state.backtracking );"
-//@insertSynpreds.stop() ::= "dbg.EndBacktrack( state.backtracking, success );"
-
-// Common debug event triggers used by region overrides below
-
-//enterSubRule() ::= <<
-//try
-//{
-//	dbg.EnterSubRule( <decisionNumber> );<\n>
-//>>
-
-//exitSubRule() ::= <<
-//}
-//finally
-//{
-//	dbg.ExitSubRule( <decisionNumber> );
-//}<\n>
-//>>
-
-//enterDecision() ::= <<
-//try
-//{
-//	dbg.EnterDecision( <decisionNumber> );<\n>
-//>>
-
-//exitDecision() ::= <<
-//}
-//finally
-//{
-//	dbg.ExitDecision( <decisionNumber> );
-//}<\n>
-//>>
-
-//enterAlt(n) ::= "dbg.EnterAlt( <n> );<\n>"
-
-// Region overrides that tell various constructs to add debugging triggers
-
-//@block.predecision() ::= "<enterSubRule()><enterDecision()>"
-
-//@block.postdecision() ::= "<exitDecision()>"
-
-//@block.postbranch() ::= "<exitSubRule()>"
-
-//@ruleBlock.predecision() ::= "<enterDecision()>"
-
-//@ruleBlock.postdecision() ::= "<exitDecision()>"
-
-//@ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
-//@blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
-//@positiveClosureBlock.preloop() ::= "<enterSubRule()>"
-
-//@positiveClosureBlock.postloop() ::= "<exitSubRule()>"
-
-//@positiveClosureBlock.predecision() ::= "<enterDecision()>"
-
-//@positiveClosureBlock.postdecision() ::= "<exitDecision()>"
-
-//@positiveClosureBlock.earlyExitException() ::=
-//	"dbg.RecognitionException( eee<decisionNumber> );<\n>"
-
-//@closureBlock.preloop() ::= "<enterSubRule()>"
-
-//@closureBlock.postloop() ::= "<exitSubRule()>"
-
-//@closureBlock.predecision() ::= "<enterDecision()>"
-
-//@closureBlock.postdecision() ::= "<exitDecision()>"
-
-//@altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
-
-//@element.prematch() ::=
-//	"dbg.Location( <it.line>, <it.pos> );"
-
-//@matchSet.mismatchedSetException() ::=
-//	"dbg.RecognitionException( mse );"
-
-//@dfaState.noViableAltException() ::= "dbg.RecognitionException( nvae );"
-
-//@dfaStateSwitch.noViableAltException() ::= "dbg.RecognitionException( nvae );"
-
-//dfaDecision(decisionNumber,description) ::= <<
-//try
-//{
-//	isCyclicDecision = true;
-//	<super.dfaDecision(...)>
-//}
-//catch ( NoViableAltException nvae )
-//{
-//	dbg.RecognitionException( nvae );
-//	throw nvae;
-//}
-//>>
-
-//@cyclicDFA.errorMethod() ::= <<
-//public override void Error( NoViableAltException nvae )
-//{
-//	((DebugParser)recognizer).dbg.RecognitionException( nvae );
-//}
-//>>
-
-/** Force predicate validation to trigger an event */
-evalPredicate(pred,description) ::= <<
-EvalPredicate(<pred>, "<description>")
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Delphi/Delphi.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Delphi/Delphi.stg
deleted file mode 100644
index bbf3dfb..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Delphi/Delphi.stg
+++ /dev/null
@@ -1,1805 +0,0 @@
-/* [The "BSD license"]
- Copyright (c) 2008 Erik van Bilsen
- Copyright (c) 2007-2008 Johannes Luber
- Copyright (c) 2005-2007 Kunle Odutola
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-group Delphi;
-
-csharpTypeInitMap ::= [
-  "int":"0",
-  "uint":"0",
-  "long":"0",
-  "ulong":"0",
-  "float":"0.0",
-  "double":"0.0",
-  "bool":"False",
-  "byte":"0",
-  "sbyte":"0",
-  "short":"0",
-  "ushort":"0",
-  "char":"#0",
-  "string":"''",
-  "String":"''",
-  default:"nil" // anything other than an atomic type
-]
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- *  LEXER (Boolean): should we generate lexer code?
- *  PARSER (Boolean): should we generate parser code?
- *  TREE_PARSER (Boolean): should we generate tree parser code?
- *  actionScope (String): 'lexer', 'parser', 'tree_parser' or custom scope
- *  actions (HashMap):
- *  docComment (String): document comment
- *  recognizer (Object): recognizer class generator
- *  name (String): name of grammar
- *  tokens (HashMap<name: String, type: Integer>):
- *  tokenNames:
- *  rules:
- *  cyclicDFAs:
- *  bitsets:
- *  buildTemplate (Boolean): should we generate a string template?
- *  buildAST (Boolean): should we generate an AST?
- *  rewriteMode (Boolean): are we rewriteing nodes?
- *  profile (Boolean):
- *  backtracking (Boolean): backtracking mode?
- *  synpreds (): syntactic predicates
- *  memoize (Boolean): should we memoize?
- *  numRules (Integer): number of rules
- *  fileName (String): fully qualified name of original .g file
- *  ANTLRVersion (String): ANTLR version in Major.Minor.Build format
- *  generatedTimestamp (String): date/time when the file is generated
- *  trace (Boolean): should we trace input/output?
- *  scopes:
- *  superClass (String): name of base class, or empty string
- *  literals:
- */
-outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
-           docComment, recognizer,
-           name, tokens, tokenNames, rules, cyclicDFAs,
-     bitsets, buildTemplate, buildAST, rewriteMode, profile,
-     backtracking, synpreds, memoize, numRules,
-     fileName, ANTLRVersion, generatedTimestamp, trace,
-     scopes, superClass, literals) ::=
-<<
-unit <name>;
-
-{$HINTS OFF}
-
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-
-<actions.(actionScope).header>
-
-interface
-
-<@imports>
-uses<\n>
-<@end>
-  <actions.(actionScope).usesInterface>
-<if(TREE_PARSER)>
-  Antlr.Runtime.Tree,<\n>
-<endif>
-  Antlr.Runtime,
-  Antlr.Runtime.Collections,
-  Antlr.Runtime.Tools;
-
-<docComment>
-<recognizer>
->>
-
-/** Generates source code for the lexer class
- * grammar (Grammar object)
- */
-lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
-      filterMode, superClass="Lexer") ::= <<
-type
-  I<grammar.recognizerName> = interface(I<@superClassName><superClass><@end>)
-  end;
-
-  T<grammar.recognizerName> = class(T<@superClassName><superClass><@end>, I<grammar.recognizerName>)
-  strict private
-    FCnt: array [0..<grammar.numberOfDecisions>] of Byte;
-    FLA: array [0..<grammar.numberOfDecisions>, 0..255] of Integer;
-    FException: ERecognitionException;
-    procedure InitializeCyclicDFAs;
-  <cyclicDFAs:cyclicDFADeclaration()>
-  public
-    const
-      <tokens:{<it.name> = <it.type>;}; separator="\n">
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-  strict private
-    <actions.(actionScope).memberDeclarations>
-  public
-    // delegates
-    <grammar.delegates: {g|<g:delegateName()>: I<superClass>; {<g.recognizerName>}}; separator="\n">
-  public
-    // delegators
-    <grammar.delegators: {g|<g:delegateName()>: Pointer; {<g.recognizerName>}}; separator="\n">
-    <last(grammar.delegators):{g|gParent: Pointer; {<g.recognizerName>}}>
-  protected
-    { IBaseRecognizer }
-    function GetGrammarFileName: String; override;
-<if(filterMode)>
-    function AlreadyParsedRule(const Input: IIntStream;
-      const RuleIndex: Integer): Boolean; override;
-    procedure Memoize(const Input: IIntStream; const RuleIndex,
-      RuleStartIndex: Integer); override;
-  protected
-    { ILexer }
-    function NextToken: IToken; override;<\n>
-<endif>
-  protected
-    { ILexer }
-    procedure DoTokens; override;
-  public
-    constructor Create; overload;
-    constructor Create(const AInput: ICharStream<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
-    constructor Create(const AInput: ICharStream; const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
-
-    <rules: {r | <if(!r.ruleDescriptor.isSynPred)><lexerRuleDeclaration(r)><endif>}>
-    <synpreds:{p | <lexerSynpredDeclaration(p)>}; separator="\n">
-  end;
-
-implementation
-
-uses
-  <grammar.delegates: {g|<g.recognizerName>,}; separator="\n">
-  <grammar.delegators: {g|<g.recognizerName>,}; separator="\n">
-  <actions.(actionScope).usesImplementation>
-  SysUtils,
-  StrUtils,
-  Math;
-
-{ T<grammar.recognizerName> }
-
-constructor T<grammar.recognizerName>.Create;
-begin
-  InitializeCyclicDFAs;
-end;
-
-constructor T<grammar.recognizerName>.Create(const AInput: ICharStream<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
-begin
-  Create(AInput, nil<grammar.delegators:{g|, A<g:delegateName()>}>);
-end;
-
-constructor T<grammar.recognizerName>.Create(const AInput: ICharStream; const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
-begin
-  inherited Create(AInput, AState);
-  InitializeCyclicDFAs; { TODO: Necessary in Delphi??? Not removed yet. }
-  <if(memoize)>
-  <if(grammar.grammarIsRoot)>
-  State.RuleMemoCount := <numRules>+1;<\n> <! index from 1..n !>
-  <endif>
-  <endif>
-  <grammar.directDelegates:
-   {g|<g:delegateName()> := T<g.recognizerName>.Create(AInput, State<trunc(g.delegators):{p|, <p:delegateName()>}>, Self);}; separator="\n">
-  <grammar.delegators:
-   {g|<g:delegateName()> := Pointer(A<g:delegateName()>);}; separator="\n">
-  <last(grammar.delegators):{g|gParent := Pointer(A<g:delegateName()>);}>
-  <actions.(actionScope).memberInitializations>
-end;
-<actions.(actionScope).memberImplementations>
-function T<grammar.recognizerName>.GetGrammarFileName: String;
-begin
-  Result := '<fileName>';
-end;
-
-<if(filterMode)>
-<filteringNextToken()>
-<endif>
-
-<rules; separator="\n\n">
-<synpreds:{p | <lexerSynpred(p)>}>
-
-procedure T<grammar.recognizerName>.InitializeCyclicDFAs;
-begin
-  <cyclicDFAs:{dfa | FDFA<dfa.decisionNumber> := TDFA<dfa.decisionNumber>.Create(Self<@debugAddition()>);}; separator="\n">
-  <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>FDFA<dfa.decisionNumber>.SpecialStateTransitionHandler := DFA<dfa.decisionNumber>_SpecialStateTransition;<endif>}; separator="\n">
-end;
-
-<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-end.>>
-
-lexerRuleDeclaration(rule) ::= <<
-procedure m<rule.ruleName>(<rule.ruleDescriptor.parameterScope:parameterScope(scope=rule)>);<\n>
->>
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-function T<grammar.recognizerName>.NextToken: IToken;
-var
-  M: Integer;
-begin
-  while (True) do
-  begin
-    if (Input.LA(1) = Integer(cscEOF)) then
-      Exit(TToken.EOF_TOKEN);
-
-    State.Token := nil;
-    State.Channel := TToken.DEFAULT_CHANNEL;
-    State.TokenStartCharIndex := Input.Index;
-    State.TokenStartCharPositionInLine := Input.CharPositionInLine;
-    State.TokenStartLine := Input.Line;
-    State.Text := '';
-    try
-      M := Input.Mark();
-      State.Backtracking := 1; <! means we won't throw slow exception !>
-      State.Failed := False;
-      mTokens();
-      State.Backtracking := 0;
-<!
-      mTokens backtracks with synpred at backtracking==2
-            and we set the synpredgate to allow actions at level 1.
-!>
-      if (State.Failed) then
-      begin
-        Input.Rewind(M);
-        Input.Consume; <! // advance one char and try again !>
-      end
-      else
-      begin
-        Emit;
-        Exit(State.Token);
-      end;
-    except
-      on RE: ERecognitionException do
-      begin
-        // shouldn't happen in backtracking mode, but...
-        ReportError(RE);
-        Recover(RE);
-      end;
-    end;
-  end;
-end;
-
-function T<grammar.recognizerName>.AlreadyParsedRule(const Input: IIntStream;
-  const RuleIndex: Integer): Boolean;
-begin
-  if (State.Backtracking > 1) then
-    Result := inherited AlreadyParsedRule(Input, RuleIndex)
-  else
-    Result := False;
-end;
-
-procedure T<grammar.recognizerName>.Memoize(const Input: IIntStream; const RuleIndex,
-  RuleStartIndex: Integer);
-begin
-  if (State.Backtracking > 1) then
-    inherited Memoize(Input, RuleIndex, RuleStartIndex);
-end;
-
->>
-
-filteringActionGate() ::= "(State.Backtracking = 1)"
-
-/** How to generate a parser */
-genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass, filterMode,
-              ASTLabelType="ANTLRInterface", labelType, members, rewriteElementType) ::= <<
-type
-  <rules: {r | <genericParserRuleReturnType(rule=r, ruleDescriptor=r.ruleDescriptor)>}>
-  I<grammar.recognizerName> = interface(I<@superClassName><superClass><@end>)
-    <rules: {r | <genericParserRuleInterface(rule=r, ruleDescriptor=r.ruleDescriptor)>}>
-  end;
-
-  T<grammar.recognizerName> = class(T<@superClassName><superClass><@end>, I<grammar.recognizerName>)
-<if(grammar.grammarIsRoot)>
-  public
-    const
-      TOKEN_NAMES: array [0..<length(tokenNames)>+3] of String = (
-        '\<invalid>',
-        '\<EOR>',
-        '\<DOWN>',
-        '\<UP>',
-        <tokenNames; separator=",\n">);<\n>
-<endif>
-  public
-    const
-      <tokens:{<it.name> = <it.type>;}; separator="\n">
-  public
-    // delegates
-    <grammar.delegates: {g|<g:delegateName()>: I<superClass>; {<g.recognizerName>}}; separator="\n">
-  public
-    // delegators
-    <grammar.delegators: {g|<g:delegateName()>: Pointer; {<g.recognizerName>}}; separator="\n">
-    <last(grammar.delegators):{g|gParent: Pointer; {<g.recognizerName>}}>
-
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeDeclaration(scope=it)><endif>}>
-<@members>
-    <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-  public
-    constructor Create(const AInput: <inputStreamType><grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
-    constructor Create(const AInput: <inputStreamType>; const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
-<@end>
-  protected
-    { IBaseRecognizer }
-    function GetTokenNames: TStringArray; override;
-    function GetGrammarFileName: String; override;
-  strict private
-    <actions.(actionScope).memberDeclarations>
-  <rules: {r | <genericParserRuleDeclaration(rule=r, ruleDescriptor=r.ruleDescriptor)>}>
-
-<! generate rule/method definitions for imported rules so they
-   appear to be defined in this recognizer. !>
-    // Delegated rules
-    <grammar.delegatedRules:{ruleDescriptor| <delegatedRuleDeclaration(ruleDescriptor)>}>
-
-    <synpreds:{p | <synpredDeclaration(p)>}; separator="\n">
-  <cyclicDFAs:cyclicDFADeclaration()>
-  strict private
-    FException: ERecognitionException;
-    FLA: array [0..<grammar.numberOfDecisions>, 0..255] of Integer;
-    FCnt: array [0..<grammar.numberOfDecisions>] of Byte;
-    procedure InitializeCyclicDFAs;
-<if(bitsets)>
-  public
-    class var
-      <bitsets:bitsetDecl(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>})>
-  public
-    class procedure InitializeBitsets; static;<\n>
-<endif>
-  end;
-
-implementation
-
-uses
-  <grammar.delegates: {g|<g.recognizerName>,}; separator="\n">
-  <grammar.delegators: {g|<g.recognizerName>,}; separator="\n">
-  <actions.(actionScope).usesImplementation>
-  SysUtils,
-  StrUtils,
-  Math;
-
-{ T<grammar.recognizerName> }
-
-constructor T<grammar.recognizerName>.Create(const AInput: <inputStreamType><grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
-begin
-  Create(AInput, TRecognizerSharedState.Create<grammar.delegators:{g|, A<g:delegateName()>}>);
-end;
-
-constructor T<grammar.recognizerName>.Create(const AInput: <inputStreamType>;
-  const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
-begin
-  inherited Create(AInput, AState);
-  <@membersConstructor>
-  <@end>
-  <parserCtorBody()>
-  <grammar.directDelegates:{g|<g:delegateName()> := T<g.recognizerName>.Create(Input, State<trunc(g.delegators):{p|, <p:delegateName()>}>, Self);}; separator="\n">
-  <grammar.indirectDelegates:{g | <g:delegateName()> := <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
-  <last(grammar.delegators):{g|gParent := Pointer(A<g:delegateName()>);}>
-  <rules: {r | <ruleAttributeScopeInit(scope=r.ruleDescriptor.ruleScope)>}>
-  <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-  <actions.(actionScope).memberInitializations>
-end;
-<actions.(actionScope).memberImplementations>
-
-<grammar.delegatedRules:{ruleDescriptor| <delegatedRuleImplementation(ruleDescriptor)>}; separator="\n">
-procedure T<grammar.recognizerName>.InitializeCyclicDFAs;
-begin
-  <cyclicDFAs:{dfa | FDFA<dfa.decisionNumber> := TDFA<dfa.decisionNumber>.Create(Self);}; separator="\n">
-  <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>FDFA<dfa.decisionNumber>.SpecialStateTransitionHandler := DFA<dfa.decisionNumber>_SpecialStateTransition;<endif>}; separator="\n">
-end;
-
-<if(bitsets)>
-class procedure T<grammar.recognizerName>.InitializeBitsets;
-begin
-  <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>
-end;
-<endif>
-
-<@membersImplementation>
- <@end>
-
-function T<grammar.recognizerName>.GetTokenNames: TStringArray;
-var
-  I: Integer;
-begin
-  SetLength(Result,Length(T<grammar.composite.rootGrammar.recognizerName>.TOKEN_NAMES));
-  for I := 0 to Length(T<grammar.composite.rootGrammar.recognizerName>.TOKEN_NAMES) - 1 do
-    Result[I] := T<grammar.composite.rootGrammar.recognizerName>.TOKEN_NAMES[I];
-end;
-
-function T<grammar.recognizerName>.GetGrammarFileName: String;
-begin
-  Result := '<fileName>';
-end;
-
-<rules; separator="\n\n">
-<synpreds:{p | <synpred(p)>}>
-
-<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-<if(bitsets)>
-initialization
-  T<grammar.recognizerName>.InitializeBitsets;<\n>
-<endif>
-end.>>
-
-delegatedRuleDeclaration(ruleDescriptor) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-function <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): I<returnType()>;<\n>
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-function <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): <returnType()>;<\n>
-<else>
-procedure <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);<\n>
-<endif>
-<endif>
->>
-
-delegatedRuleImplementation(ruleDescriptor) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-function T<grammar.recognizerName>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): I<returnType()>;<\n>
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-function T<grammar.recognizerName>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): <returnType()>;<\n>
-<else>
-procedure T<grammar.recognizerName>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);<\n>
-<endif>
-<endif>
-begin
-  <if(ruleDescriptor.hasReturnValue)>Result :=<endif> T<ruleDescriptor.grammar.recognizerName>(<ruleDescriptor.grammar:delegateName()>.Implementor).<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">);
-end;
-
->>
-
-parserCtorBody() ::= <<
-InitializeCyclicDFAs;
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-State.RuleMemoCount := <length(grammar.allImportedRules)>+1;<\n> <! index from 1..n !>
-<endif>
-<endif>
-<grammar.delegators: {g|<g:delegateName()> := Pointer(A<g:delegateName()>);}; separator="\n">
->>
-
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="ITokenStream", rewriteElementType="Token", ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="object", superClass="TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
-<genericParser(inputStreamType="ITreeNodeStream", rewriteElementType="Node", ...)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-// $ANTLR start "<ruleName>"
-procedure T<grammar.recognizerName>.<ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);
-var
-  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
-  <ruleLabelDefVars()>
-begin
-  <ruleLabelDefs()>
-<if(trace)>
-  TraceIn('<ruleName>_fragment', <ruleDescriptor.index>);
-  try
-    <block>
-  finally
-    TraceOut('<ruleName>_fragment', <ruleDescriptor.index>);
-  end;
-<else>
-  <block>
-<endif>
-end;
-// $ANTLR end "<ruleName>"
->>
-
-synpredDecls(name) ::= <<
-SynPredPointer <name>;<\n>
->>
-
-synpred(name) ::= <<
-
-function T<grammar.recognizerName>.<name>: Boolean;
-var
-  Start: Integer;
-  Success: Boolean;
-begin
-  State.Backtracking := State.Backtracking + 1;
-  <@start()>
-  Start := Input.Mark;
-  try
-    <name>_fragment(); // can never throw exception
-  except
-    on RE: ERecognitionException do
-      WriteLn('Impossible: ' + RE.ToString);
-  end;
-  Success := not State.Failed;
-  Input.Rewind(Start);
-  <@stop()>
-  State.Backtracking := State.Backtracking - 1;
-  State.Failed := False;
-  Result := Success;
-end;<\n>
->>
-
-lexerSynpred(name) ::= <<
-<synpred(name)>
->>
-
-lexerSynpredDeclaration(name) ::= <<
-function <name>: Boolean;
-procedure <name>_fragment;
->>
-
-synpredDeclaration(name) ::= <<
-function <name>: Boolean;
-procedure <name>_fragment;
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if ((State.Backtracking > 0) and AlreadyParsedRule(Input, <ruleDescriptor.index>)) then
-  Exit(<ruleReturnValue()>);
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)><\n>if (State.Failed) then Exit(<ruleReturnValue()>);<\n><endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>if (State.Backtracking > 0) then
-begin
-  State.Failed := True;
-  Exit(<ruleReturnValue()>);
-end;<endif>
->>
-
-genericParserRuleDeclaration(rule, ruleDescriptor) ::= <<
-<if(ruleDescriptor.isSynPred)>
-<else>
-<ruleAttributeScopeDeclaration(scope=ruleDescriptor.ruleScope)>
-<returnScopeDeclaration(scope=ruleDescriptor.returnScope)>
-public
-<if(ruleDescriptor.hasMultipleReturnValues)>
-  function <rule.ruleName>: I<returnType()>;<\n>
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-  function <rule.ruleName>: <returnType()>;<\n>
-<else>
-  procedure <rule.ruleName>;<\n>
-<endif>
-<endif>
-<endif>
->>
-
-genericParserRuleInterface(rule, ruleDescriptor) ::= <<
-<if(ruleDescriptor.isSynPred)>
-<else>
-<if(ruleDescriptor.hasMultipleReturnValues)>
-function <rule.ruleName>: I<returnType()>;<\n>
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-function <rule.ruleName>: <returnType()>;<\n>
-<else>
-procedure <rule.ruleName>;<\n>
-<endif>
-<endif>
-<endif>
->>
-
-genericParserRuleReturnType(rule, ruleDescriptor) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(ruleDescriptor.isSynPred)>
-<else>
-I<returnType()> = interface(I<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope)
-end;<\n>
-<endif>
-<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-<returnScope(scope=ruleDescriptor.returnScope)>
-
-// $ANTLR start "<ruleName>"
-(* <fileName>:<description> *)
-<if(ruleDescriptor.hasMultipleReturnValues)>
-function T<grammar.recognizerName>.<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): I<returnType()>;
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-function T<grammar.recognizerName>.<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): <returnType()>;
-<else>
-procedure T<grammar.recognizerName>.<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);
-<endif>
-<endif>
-
-var
-<ruleDescriptor.actions.vars>
-  Locals: TLocalStorage;
-<if(ruleDescriptor.hasMultipleReturnValues)>
-  RetVal: I<returnType()>;<\n>
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-  RetVal: <returnType()>;<\n>
-<else>
-<endif>
-<endif>
-  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
-  <ruleDeclarationVars()>
-  <ruleLabelDefVars()>
-begin
-  Locals.Initialize;
-  try
-    <if(trace)>TraceIn('<ruleName>', <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    <ruleLabelDefs()>
-    <ruleDescriptor.actions.init>
-    <@preamble()>
-    try
-      try
-        <ruleMemoization(name=ruleName)>
-        <block>
-        <ruleCleanUp()>
-        <(ruleDescriptor.actions.after):execAction()>
-<if(exceptions)>
-        <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-        <actions.(actionScope).rulecatch>
-<else>
-      except
-        on RE: ERecognitionException do
-        begin
-          ReportError(RE);
-          Recover(Input,RE);
-          <@setErrorReturnValue()>
-        end;<\n>
-<endif>
-<endif>
-<endif>
-      end;
-    finally
-      <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-      <memoize()>
-      <ruleScopeCleanUp()>
-      <finally>
-    end;
-    <@postamble()>
-  finally
-    Locals.Finalize;
-  end;
-  Exit(<ruleReturnValue()>);
-end;
-// $ANTLR end "<ruleName>"
->>
-
-catch(decl,action) ::= <<
-catch (<e.decl>)
-{
-    <e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-RetVal := T<returnType()>.Create;
-RetVal.Start := Input.LT(1);<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<a.name> := <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
-}>
-<endif>
-<if(memoize)>
-<ruleDescriptor.name>_StartIndex := Input.Index();
-<endif>
->>
-
-ruleDeclarationVars() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<a.name>: <a.type>;
-}>
-<endif>
-<if(memoize)>
-<ruleDescriptor.name>_StartIndex: Integer;
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{<it>Stack.Push(T<it>Scope.Create);}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>Stack.Push(T<it.name>Scope.Create);}; separator="\n">
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{<it>Stack.Pop();}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>Stack.Pop;}; separator="\n">
->>
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]:{<it.label.text> := nil;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{list_<it.label.text> := nil;}; separator="\n">
-<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
-<ruleDescriptor.ruleListLabels:{ll|<ll.label.text> := nil;}; separator="\n">
->>
-
-ruleLabelDefVars() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]:{<it.label.text>: I<labelType>;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{list_<it.label.text>: IList\<IANTLRInterface\>;}; separator="\n">
-<ruleDescriptor.ruleLabels:ruleLabelDefVar(label=it); separator="\n">
-<ruleDescriptor.ruleListLabels:{ll|<ll.label.text>: <ruleLabelType(referencedRule=ll.referencedRule)>;}; separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{<it.label.text> := nil;}; separator="\n"
->
-<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{List_<it.label.text> := nil;}; separator="\n"
->
->>
-
-lexerRuleLabelDefDeclarations() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{<it.label.text>: I<labelType>;}; separator="\n"
->
-<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{List_<it.label.text>: IList;}; separator="\n"
->
->>
-
-ruleReturnValue() ::= <<
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-RetVal
-<endif>
-<else>
-<! nil !>
-<endif>
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-RetVal.Stop := Input.LT(-1);
-<endif>
-<endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if (State.Backtracking > 0) then
-  Memoize(Input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex);
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-// $ANTLR start "<ruleName>"
-<ruleDescriptor.parameterScope>
-procedure T<grammar.recognizerName>.m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);
-var
-  <ruleDescriptor.actions.vars>
-  Locals: TLocalStorage;
-  TokenType, Channel: Integer;
-  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
-  <lexerRuleLabelDefDeclarations()>
-begin
-  Locals.Initialize;
-  try
-    <ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-    <if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    try
-<if(nakedBlock)>
-      <ruleMemoization(name=ruleName)>
-      <lexerRuleLabelDefs()>
-      <ruleDescriptor.actions.init>
-      <block><\n>
-<else>
-      TokenType := <ruleName>;
-      Channel := DEFAULT_TOKEN_CHANNEL;
-      <ruleMemoization(name=ruleName)>
-      <lexerRuleLabelDefs()>
-      <ruleDescriptor.actions.init>
-      <block>
-      <ruleCleanUp()>
-      State.TokenType := TokenType;
-      State.Channel := Channel;
-      <(ruleDescriptor.actions.after):execAction()>
-<endif>
-    finally
-      <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-      <ruleScopeCleanUp()>
-      <memoize()>
-    end;
-  finally
-    Locals.Finalize;
-  end;
-end;
-// $ANTLR end "<ruleName>"
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-procedure T<grammar.recognizerName>.mTokens;
-var
-  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
-begin
-  <block>
-end;
-
-procedure T<grammar.recognizerName>.DoTokens;
-begin
-  mTokens;
-end;
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-(* <fileName>:<description> *)
-Alt[<decisionNumber>] := <maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-<@prebranch()>
-case Alt[<decisionNumber>] of
-  <alts:altSwitchCase()>
-end;
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-(* <fileName>:<description> *)
-Alt[<decisionNumber>] := <maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-case Alt[<decisionNumber>] of
-  <alts:altSwitchCase()>
-end;
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-(* <fileName>:<description> *)
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-(* <fileName>:<description> *)
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-(* <fileName>:<description> *)
-FCnt[<decisionNumber>] := 0;
-<decls>
-<@preloop()>
-while (True) do
-begin
-  Alt[<decisionNumber>] := <maxAlt>;
-  <@predecision()>
-  <decision>
-  <@postdecision()>
-  case Alt[<decisionNumber>] of
-    <alts:altSwitchCase()>
-  else
-    begin
-      if (FCnt[<decisionNumber>] >= 1) then
-        Break;
-      <ruleBacktrackFailure()>
-      raise EEarlyExitException.Create(<decisionNumber>, Input);
-      <@earlyExitException()>
-    end;
-  end;
-  Inc(FCnt[<decisionNumber>]);
-end;
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-(* <fileName>:<description> *)
-<decls>
-<@preloop()>
-while (True) do
-begin
-  Alt[<decisionNumber>] := <maxAlt>;
-  <@predecision()>
-  <decision>
-  <@postdecision()>
-  case Alt[<decisionNumber>] of
-    <alts:altSwitchCase()>
-  else
-    Break;
-  end;
-end;
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase() ::= <<
-<i>:
-  <@prealt()>
-  <it><\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
-(* <fileName>:<description> *)
-begin
-  <@declarations()>
-  <elements:element()>
-  <rew>
-  <@cleanup()>
-end;
->>
-
-/** What to emit when there is no rewrite.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element() ::= <<
-<@prematch()>
-<it.el>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)><label> := <endif>Match(Input, <token>, FOLLOW_<token>_in_<ruleName><elementIndex>)<if(label)> as I<labelType><endif>;<\n><checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-listLabel(label,elem) ::= <<
-if (list_<label> = nil) then list_<label> := TList\<IANTLRInterface\>.Create;
-list_<label>.Add(<elem>);<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> := Input.LA(1);<\n>
-<endif>
-Match(<char>); <checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> := Input.LA(1);<\n>
-<endif>
-MatchRange(<a>, <b>); <checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,postmatchCode="") ::= <<
-<if(label)>
-<if(LEXER)>
-<label> := Input.LA(1);<\n>
-<else>
-<label> := Input.LT(1) as I<labelType>;<\n>
-<endif>
-<endif>
-if (<s>) then
-begin
-  Input.Consume;
-  <postmatchCode>
-  <if(!LEXER)>
-  State.ErrorRecovery := False;<endif>
-  <if(backtracking)>State.Failed := False;<endif>
-end
-else
-begin
-  <ruleBacktrackFailure()>
-  FException := EMismatchedSetException.Create(nil, Input);
-  <@mismatchedSetException()>
-<if(LEXER)>
-  Recover(FException);
-  raise FException;<\n>
-<else>
-  raise FException;
-  <! use following code to make it recover inline; remove throw mse;
-  RecoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
-  !>
-<endif>
-end;<\n>
->>
-
-matchRuleBlockSet ::= matchSet
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex) ::= <<
-<if(label)>
-Locals.AsInteger['<label>Start'] := CharIndex;
-Match(<string>); <checkRuleBacktrackFailure()>
-<label> := TCommonToken.Create(Input, TToken.INVALID_TOKEN_TYPE, TToken.DEFAULT_CHANNEL, Locals.AsInteger['<label>Start'], CharIndex-1);
-<else>
-Match(<string>); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-wildcard(label,elementIndex) ::= <<
-<if(label)>
-<label> := Input.LT(1) as I<labelType>;<\n>
-<endif>
-MatchAny(input); <checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(label,elementIndex) ::= <<
-<wildcard(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> := Input.LA(1);<\n>
-<endif>
-MatchAny(); <checkRuleBacktrackFailure()>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.  The 'rule' argument was the
- *  target rule name, but now is type Rule, whose toString is
- *  same: the rule name.  Now though you can access full rule
- *  descriptor stuff.
- */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-PushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
-<if(label)>
-<label> := <if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
-<else>
-<if(scope)>T<scope.recognizerName>(IANTLRObject(<scope:delegateName()>).Implementor).<endif><rule.name>(<args; separator=", ">);<\n>
-<endif>
-State.FollowingStackPointer := State.FollowingStackPointer - 1;
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** A lexer rule reference.
- *
- *  The 'rule' argument was the target rule name, but now
- *  is type Rule, whose toString is same: the rule name.
- *  Now though you can access full rule descriptor stuff.
- */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
-<if(label)>
-Locals.AsInteger['<label>Start<elementIndex>'] := CharIndex;
-<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<label> := TCommonToken.Create(Input, TToken.INVALID_TOKEN_TYPE, TToken.DEFAULT_CHANNEL,
-  Locals.AsInteger['<label>Start<elementIndex>'], CharIndex - 1);
-<else>
-<if(scope)>(<scope:delegateName()>.Implementor as T<scope.recognizerName>).<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-Locals.AsInteger['<label>Start<elementIndex>'] := CharIndex;
-Match(EOF); <checkRuleBacktrackFailure()>
-Locals['<label>'] := TCommonToken.Create(Input, EOF, TToken.DEFAULT_CHANNEL, Locals.AsInteger['<label>Start<elementIndex>'], CharIndex-1);
-<else>
-Match(EOF); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if (Input.LA(1) = TToken.DOWN) then
-begin
-  Match(Input, TToken.DOWN, nil); <checkRuleBacktrackFailure()>
-  <children:element()>
-  Match(Input, TToken.UP, nil); <checkRuleBacktrackFailure()>
-end;
-<else>
-Match(Input, TToken.DOWN, nil); <checkRuleBacktrackFailure()>
-<children:element()>
-Match(Input, TToken.UP, nil);<\n><checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if (not (<evalPredicate(...)>)) then
-begin
-  <ruleBacktrackFailure()>
-  raise EFailedPredicateException.Create(Input, '<ruleName>', '<description>');
-end;<\n>
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-FLA[<decisionNumber>,<stateNumber>] := Input.LA(<k>);<\n>
-<edges; separator="\nelse ">
-else
-begin
-<if(eotPredictsAlt)>
-  Alt[<decisionNumber>] := <eotPredictsAlt>;<\n>
-<else>
-  <ruleBacktrackFailure()>
-  raise ENoViableAltException.Create('<description>', <decisionNumber>, <stateNumber>, Input);<\n>
-<endif>
-end;
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-FLA[<decisionNumber>,<stateNumber>] := Input.LA(<k>);<\n>
-<edges; separator="\nelse ">;
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-FLA[<decisionNumber>,<stateNumber>] := Input.LA(<k>);
-<edges; separator="\nelse ">;<\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-Alt[<decisionNumber>] := <eotPredictsAlt>; <! if no edges, don't gen ELSE !>
-<else>
-else
-begin
-  Alt[<decisionNumber>] := <eotPredictsAlt>;
-end;<\n>
-<endif>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "Alt[<decisionNumber>] := <alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ((<labelExpr>)<if(predicates)> and (<predicates>)<endif>) then
-begin
-  <targetState>
-end <! no ; here !>
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-case Input.LA(<k>) of
-  <edges; separator="\n">
-else
-  begin
-<if(eotPredictsAlt)>
-    Alt[<decisionNumber>] := <eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    <@noViableAltException()>
-    raise ENoViableAltException.Create('<description>', <decisionNumber>, <stateNumber>, Input);<\n>
-<endif>
-  end;
-end;<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-case Input.LA(<k>) of
-  <edges; separator="\n">
-end;<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-case Input.LA(<k>) of
-  <edges; separator="\n"><\n>
-<if(eotPredictsAlt)>
-else
-  Alt[<decisionNumber>] := <eotPredictsAlt>;<\n>
-<endif>
-end;<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{<it>}; separator=",\n">:
-  begin
-    <targetState>
-  end;
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-Alt[<decisionNumber>] := FDFA<decisionNumber>.Predict(Input);
->>
-
-/* Dump DFA tables.
- */
-cyclicDFADeclaration(dfa) ::= <<
-strict protected
-  type
-    TDFA<dfa.decisionNumber> = class(TDFA)
-    protected
-      { IDFA }
-      function Description: String; override;
-    public
-      constructor Create(const ARecognizer: IBaseRecognizer);
-    end;
-  var
-    FDFA<dfa.decisionNumber>: IDFA;
-<if(dfa.specialStateSTs)>
-strict protected
-  function DFA<dfa.decisionNumber>_SpecialStateTransition(const DFA: IDFA; S: Integer;
-    const AInput: IIntStream): Integer;<endif>
->>
-
-cyclicDFA(dfa) ::= <<
-{ T<grammar.recognizerName>.TDFA<dfa.decisionNumber> }
-
-constructor T<grammar.recognizerName>.TDFA<dfa.decisionNumber>.Create(const ARecognizer: IBaseRecognizer);
-const
-  DFA<dfa.decisionNumber>_EOT = '<dfa.javaCompressedEOT; wrap="'+\n    '">';
-  DFA<dfa.decisionNumber>_EOF = '<dfa.javaCompressedEOF; wrap="'+\n    '">';
-  DFA<dfa.decisionNumber>_MIN = '<dfa.javaCompressedMin; wrap="'+\n    '">';
-  DFA<dfa.decisionNumber>_MAX = '<dfa.javaCompressedMax; wrap="'+\n    '">';
-  DFA<dfa.decisionNumber>_ACCEPT = '<dfa.javaCompressedAccept; wrap="'+\n    '">';
-  DFA<dfa.decisionNumber>_SPECIAL = '<dfa.javaCompressedSpecial; wrap="'+\n    '">';
-  DFA<dfa.decisionNumber>_TRANSITION: array [0..<length(dfa.javaCompressedTransition)>-1] of String = (
-    <dfa.javaCompressedTransition:{s|'<s; wrap="'+\n'">'}; separator=",\n">);
-begin
-  inherited Create;
-  Recognizer := ARecognizer;
-  DecisionNumber := <dfa.decisionNumber>;
-  EOT := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_EOT);
-  EOF := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_EOF);
-  Min := TDFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_MIN);
-  Max := TDFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_MAX);
-  Accept := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_ACCEPT);
-  Special := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_SPECIAL);
-  Transition := TDFA.UnpackEncodedStringArray(DFA<dfa.decisionNumber>_TRANSITION);
-end;
-
-function T<grammar.recognizerName>.TDFA<dfa.decisionNumber>.Description: String;
-begin
-  Result := '<dfa.description>';
-end;<\n>
-<if(dfa.specialStateSTs)>
-function T<grammar.recognizerName>.DFA<dfa.decisionNumber>_SpecialStateTransition(const DFA: IDFA; S: Integer;
-  const AInput: IIntStream): Integer;
-var
-  Locals: TLocalStorage;
-  <if(LEXER)>
-  Input: IIntStream;
-  <endif>
-  <if(PARSER)>
-  Input: ITokenStream;
-  <endif>
-  <if(TREE_PARSER)>
-  Input: ITreeNodeStream;
-  <endif>
-  _S: Integer;
-  NVAE: ENoViableAltException;
-begin
-  Result := -1;
-  Locals.Initialize;
-  try
-    <if(LEXER)>
-    Input := AInput;
-    <endif>
-    <if(PARSER)>
-    Input := AInput as ITokenStream;
-    <endif>
-    <if(TREE_PARSER)>
-    Input := AInput as ITreeNodeStream;
-    <endif>
-    _S := S;
-    case S of
-      <dfa.specialStateSTs:{state | <i0>: begin<! compressed special state numbers 0..n-1 !>
-     <state> <\n>   end;}; separator="\n">
-    end;
-<if(backtracking)>
-    if (State.Backtracking > 0) then
-    begin
-      State.Failed := True;
-      Exit(-1);
-    end;<\n>
-<endif>
-    NVAE := ENoViableAltException.Create(DFA.Description, <dfa.decisionNumber>, _S, Input);
-    DFA.Error(NVAE);
-    raise NVAE;
-  finally
-    Locals.Finalize;
-  end;
-end;<\n>
-<endif>
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-FLA[<decisionNumber>,<stateNumber>] := Input.LA(1);<\n>
-<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-Locals.AsInteger['index<decisionNumber>_<stateNumber>'] := Input.Index;
-Input.Rewind;<\n>
-<endif>
-S := -1;
-<edges; separator="\nelse ">;
-<if(semPredState)> <! return input cursor to state before we rewound !>
-Input.Seek(Locals.AsInteger['index<decisionNumber>_<stateNumber>']);<\n>
-<endif>
-if (S >= 0) then
-  Exit(S);
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ((<labelExpr>)<if(predicates)> and (<predicates>)<endif>) then
-  S := <targetStateNumber>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-S := <targetStateNumber>;<\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "((<left>) and (<right>))"
-
-orPredicates(operands) ::= "((<first(operands)>)<rest(operands):{o | or (<o>)}>)"
-
-notPredicate(pred) ::= "!(<evalPredicate(...)>)"
-
-evalPredicate(pred,description) ::= "(<pred>)"
-
-evalSynPredicate(pred,description) ::= "<pred>()"
-
-lookaheadTest(atom,k,atomAsInt) ::= "FLA[<decisionNumber>,<stateNumber>] = <atomAsInt>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "Input.LA(<k>) = <atomAsInt>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
-((FLA[<decisionNumber>,<stateNumber>] \>= <lowerAsInt>) and (FLA[<decisionNumber>,<stateNumber>] \<= <upperAsInt>))
->>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(Input.LA(<k>) \>= <lowerAsInt>) and (Input.LA(<k>) \<= <upperAsInt>)"
-
-setTest(ranges) ::= "<ranges; separator=\") or (\">"
-
-// A T T R I B U T E S
-
-globalAttributeScope(scope) ::= <<
-<scope.name>Stack := TStackList\<I<scope.name>Scope\>.Create;<\n>
-<endif>
->>
-
-globalAttributeScopeDeclaration(scope) ::= <<
-<if(scope.attributes)>
-strict protected
-  type
-    I<scope.name>Scope = interface(IANTLRObject)
-    end;
-    T<scope.name>Scope = class(TANTLRObject, I<scope.name>Scope)
-    protected
-      <scope.attributes:{<it.name>: <it.type>;}; separator="\n">
-    end;
-strict protected
-  <scope.name>Stack: IStackList\<I<scope.name>Scope\>;
-<endif>
->>
-
-ruleAttributeScopeDeclaration(scope) ::= <<
-<if(scope.attributes)>
-strict protected
-  type
-    I<scope.name>Scope = interface(IANTLRObject)
-    end;
-    T<scope.name>Scope = class(TANTLRObject, I<scope.name>Scope)
-    protected
-      <scope.attributes:{<it.name>: <it.type>;}; separator="\n">
-    end;
-strict protected
-  <scope.name>Stack: IStackList\<I<scope.name>Scope\>;
-<endif>
->>
-
-ruleAttributeScope(scope) ::= <<
-<! protected Stack <scope.name>Stack = new Stack();<\n> !>
->>
-
-ruleAttributeScopeInit(scope) ::= <<
-<if(scope)>
-<scope.name>Stack := TStackList\<I<scope.name>Scope\>.Create;<\n>
-<endif>
->>
-
-returnStructName() ::= "<it.name>_return"
-
-returnType() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor:returnStructName()>
-<! I<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope !>
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnType>
-<else>
-<! Pointer/void !>
-<endif>
-<endif>
->>
-
-/** Generate the C# type associated with a single or multiple return
- *  values.
- */
-ruleLabelType(referencedRule) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-I<referencedRule.name>_return
-<else>
-<if(referencedRule.hasSingleReturnValue)>
-<referencedRule.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-delegateName() ::= <<
-<if(it.label)><it.label><else>g<it.name><endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
- */
-initValue(typeName) ::= <<
-<csharpTypeInitMap.(typeName)>
->>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= <<
-<label.label.text> := <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
->>
-
-ruleLabelDefVar(label) ::= <<
-<label.label.text>: <ruleLabelType(referencedRule=label.referencedRule)>;
->>
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScope(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-{ T<ruleDescriptor:returnStructName()> }
-
-<scope.attributes:{public <it.decl>;}; separator="\n">
-<@ruleReturnMembers()>
-<endif>
->>
-
-returnScopeDeclaration(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-public
-  type
-    T<ruleDescriptor:returnStructName()> = class(T<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope, I<ruleDescriptor:returnStructName()>)
-    <scope.attributes:{public <it.decl>;}; separator="\n">
-    <@ruleReturnMembers()>
-    end;
-<endif>
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{<it.decl>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "<attr.name> := <expr>;"
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <<
-<if(negIndex)>
-(<scope>Stack[<scope>Stack.Count-<negIndex>-1] as T<scope>Scope).<attr.name>
-<else>
-<if(index)>
-(<scope>Stack[<index>] as T<scope>Scope).<attr.name>
-((<scope>_scope)<scope>_stack[<index>]).<attr.name>
-<else>
-(<scope>Stack.Peek.Implementor as T<scope>Scope).<attr.name>
-<endif>
-<endif>
->>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
-<if(negIndex)>
-(<scope>Stack[<scope>Stack.Count-<negIndex>-1] as T<scope>Scope).<attr.name> := <expr>;<\n>
-<else>
-<if(index)>
-(<scope>Stack[<index>] as T<scope>Scope).<attr.name> := <expr>;<\n>
-<else>
-(<scope>Stack.Peek.Implementor as T<scope>Scope).<attr.name> := <expr>;<\n>
-<endif>
-<endif>
->>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "<scope>Stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-(IfThen(Assigned(<scope>),Def(<scope>).<attr.name>,<initValue(attr.type)>))
-<else>
-<scope>
-<endif>
->>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-RetVal.<attr.name>
-<else>
-<attr.name>
-<endif>
->>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-RetVal.<attr.name> := <expr>;
-<else>
-<attr.name> := <expr>;
-<endif>
->>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach
-
-tokenLabelPropertyRef_text(scope,attr) ::= "(Def(<scope>).Text)"
-tokenLabelPropertyRef_type(scope,attr) ::= "(Def(<scope>).TokenType)"
-tokenLabelPropertyRef_line(scope,attr) ::= "(Def(<scope>).Line)"
-tokenLabelPropertyRef_pos(scope,attr) ::= "(Def(<scope>).CharPositionInLine)"
-tokenLabelPropertyRef_channel(scope,attr) ::= "(Def(<scope>).Channel)"
-tokenLabelPropertyRef_index(scope,attr) ::= "(Def(<scope>).TokenIndex)"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-tokenLabelPropertyRef_int(scope,attr) ::= "(StrToIntDef(Def(<scope>).Text,0))"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "(IfThen(Assigned(<scope>), Def(<scope>).Start, nil) as I<labelType>)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "(Def(<scope>).Stop as I<labelType>)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "(Def(Def(<scope>).Tree as I<ASTLabelType>))"
-ruleLabelPropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-IfThen(Assigned(<scope>), Input.TokenStream.ToString(
-  Input.TreeAdaptor.GetTokenStartIndex(Def(<scope>).Start),
-  Input.TreeAdaptor.GetTokenStopIndex(Def(<scope>).Start)), '')
-<else>
-IfThen(Assigned(<scope>), Input.ToString(
-  (Def(<scope>).Start) as IToken,(Def(<scope>).Stop) as IToken), '')
-<endif>
->>
-ruleLabelPropertyRef_st(scope,attr) ::= "((<scope> != null) ? <scope>.ST : null)"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "(Def(<scope>).TokenType)"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "(Def(<scope>).Line)"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(IfThen(Assigned(<scope>),Def(<scope>).CharPositionInLine,-1))"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(Def(<scope>).Channel)"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "(Def(<scope>).TokenIndex)"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "(Def(<scope>).Text)"
-lexerRuleLabelPropertyRef_int(scope,attr) ::= "(StrToIntDef(Def(<scope>).Text,0))"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "(RetVal.Start as I<labelType>)"
-rulePropertyRef_stop(scope,attr) ::= "(RetVal.Stop as I<labelType>)"
-rulePropertyRef_tree(scope,attr) ::= "(RetVal.Tree as I<ASTLabelType>)"
-rulePropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-Input.TokenStream.ToString(
-  Input.TreeAdaptor.GetTokenStartIndex(RetVal.Start),
-  Input.TreeAdaptor.GetTokenStopIndex(RetVal.Start))
-<else>
-Input.ToString(RetVal.Start as IToken,Input.LT(-1))
-<endif>
->>
-rulePropertyRef_st(scope,attr) ::= "RetVal.ST"
-
-lexerRulePropertyRef_text(scope,attr) ::= "Text"
-lexerRulePropertyRef_type(scope,attr) ::= "TokenType"
-lexerRulePropertyRef_line(scope,attr) ::= "State.TokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "State.TokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "Channel"
-lexerRulePropertyRef_start(scope,attr) ::= "State.TokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
-lexerRulePropertyRef_int(scope,attr) ::= "StrToInt(<scope>.Text)"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "RetVal.Tree := <expr>;"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "RetVal.ST := <expr>;"
-
-
-/** How to execute an action (only when not backtracking) */
-execAction(action) ::= <<
-<if(backtracking)>
-<if(actions.(actionScope).synpredgate)>
-if (<actions.(actionScope).synpredgate>) then
-begin
-  <action>
-end;
-<else>
-if (State.Backtracking = 0) then
-begin
-  <action>
-end;<\n>
-<endif>
-<else>
-<action>
-<endif>
->>
-
-
-/** How to always execute an action even when backtracking */
-execForcedAction(action) ::= "<action>"
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-<name> := TBitSet.Create([<words64:{<it>};separator=",">]);<\n>
->>
-
-bitsetDecl(name) ::= <<
-<name>: IBitSet;<\n>
->>
-
-codeFileExtension() ::= ".pas"
-
-true() ::= "True"
-false() ::= "False"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/AST.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/AST.stg
deleted file mode 100644
index 2f6671a..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/AST.stg
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-@outputFile.imports() ::= <<
-<@super.imports()>
-<if(!TREE_PARSER)><! tree parser would already have imported !>
-import org.antlr.runtime.tree.*;<\n>
-<endif>
->>
-
-@genericParser.members() ::= <<
-<@super.members()>
-<parserMembers()>
->>
-
-/** Add an adaptor property that knows how to build trees */
-parserMembers() ::= <<
-protected TreeAdaptor adaptor = new CommonTreeAdaptor();<\n>
-public void setTreeAdaptor(TreeAdaptor adaptor) {
-    this.adaptor = adaptor;
-    <grammar.directDelegates:{g|<g:delegateName()>.setTreeAdaptor(this.adaptor);}>
-}
-public TreeAdaptor getTreeAdaptor() {
-    return adaptor;
-}
->>
-
-@returnScope.ruleReturnMembers() ::= <<
-<ASTLabelType> tree;
-public Object getTree() { return tree; }
->>
-
-/** Add a variable to track rule's return AST */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-<ASTLabelType> root_0 = null;<\n>
->>
-
-ruleLabelDefs() ::= <<
-<super.ruleLabelDefs()>
-<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
-  ruleDescriptor.wildcardTreeListLabels]:{it | <ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
-<ruleDescriptor.tokenListLabels:{it | <ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
-<ruleDescriptor.allTokenRefsInAltsWithRewrites
-    :{it | RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
-<ruleDescriptor.allRuleRefsInAltsWithRewrites
-    :{it | RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
->>
-
-/** When doing auto AST construction, we must define some variables;
- *  These should be turned off if doing rewrites.  This must be a "mode"
- *  as a rule could have both rewrite and AST within the same alternative
- *  block.
- */
-@alt.declarations() ::= <<
-<if(autoAST)>
-<if(outerAlt)>
-<if(!rewriteMode)>
-root_0 = (<ASTLabelType>)adaptor.nil();<\n>
-<endif>
-<endif>
-<endif>
->>
-
-// T r a c k i n g  R u l e  E l e m e n t s
-
-/** ID and track it for use in a rewrite rule */
-tokenRefTrack(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)> <! Track implies no auto AST construction!>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
->>
-
-/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
- *  to the tracking list stream_ID for use in the rewrite.
- */
-tokenRefTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefTrack(...)>
-<listLabel(elem=label, ...)>
->>
-
-/** ^(ID ...) track for rewrite */
-tokenRefRuleRootTrack(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
->>
-
-/** Match ^(label+=TOKEN ...) track for rewrite */
-tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefRuleRootTrack(...)>
-<listLabel(elem=label, ...)>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule.name>.add(<label>.getTree());
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefTrack(...)>
-<listLabel(label, {<label>.getTree()})>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule>.add(<label>.getTree());
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRootTrack(...)>
-<listLabel(label, {<label>.getTree()})>
->>
-
-// R e w r i t e
-
-rewriteCode(
-	alts, description,
-	referencedElementsDeep, // ALL referenced elements to right of ->
-	referencedTokenLabels,
-	referencedTokenListLabels,
-	referencedRuleLabels,
-	referencedRuleListLabels,
-	referencedWildcardLabels,
-	referencedWildcardListLabels,
-	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
-<<
-
-// AST REWRITE
-// elements: <referencedElementsDeep; separator=", ">
-// token labels: <referencedTokenLabels; separator=", ">
-// rule labels: <referencedRuleLabels; separator=", ">
-// token list labels: <referencedTokenListLabels; separator=", ">
-// rule list labels: <referencedRuleListLabels; separator=", ">
-// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
-<if(backtracking)>
-if ( <actions.(actionScope).synpredgate> ) {<\n>
-<endif>
-<prevRuleRootRef()>.tree = root_0;
-<rewriteCodeLabels()>
-root_0 = (<ASTLabelType>)adaptor.nil();
-<alts:rewriteAlt(); separator="else ">
-<! if tree parser and rewrite=true !>
-<if(TREE_PARSER)>
-<if(rewriteMode)>
-<prevRuleRootRef()>.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
-input.replaceChildren(adaptor.getParent(retval.start),
-                      adaptor.getChildIndex(retval.start),
-                      adaptor.getChildIndex(_last),
-                      retval.tree);
-<endif>
-<endif>
-<! if parser or tree-parser && rewrite!=true, we need to set result !>
-<if(!TREE_PARSER)>
-<prevRuleRootRef()>.tree = root_0;
-<else>
-<if(!rewriteMode)>
-<prevRuleRootRef()>.tree = root_0;
-<endif>
-<endif>
-<if(backtracking)>
-}
-<endif>
->>
-
-rewriteCodeLabels() ::= <<
-<referencedTokenLabels
-    :{it | RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>",<it>);};
-    separator="\n"
->
-<referencedTokenListLabels
-    :{it | RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
-    separator="\n"
->
-<referencedWildcardLabels
-    :{it | RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
-    separator="\n"
->
-<referencedWildcardListLabels
-    :{it | RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
-    separator="\n"
->
-<referencedRuleLabels
-    :{it | RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>",<it>!=null?<it>.tree:null);};
-    separator="\n"
->
-<referencedRuleListLabels
-    :{it | RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"token <it>",list_<it>);};
-    separator="\n"
->
->>
-
-/** Generate code for an optional rewrite block; note it uses the deep ref'd element
-  *  list rather shallow like other blocks.
-  */
-rewriteOptionalBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-if ( <referencedElementsDeep:{el | stream_<el>.hasNext()}; separator="||"> ) {
-    <alt>
-}
-<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
->>
-
-rewriteClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-while ( <referencedElements:{el | stream_<el>.hasNext()}; separator="||"> ) {
-    <alt>
-}
-<referencedElements:{el | stream_<el>.reset();<\n>}>
->>
-
-rewritePositiveClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-if ( !(<referencedElements:{el | stream_<el>.hasNext()}; separator="||">) ) {
-    throw new RewriteEarlyExitException();
-}
-while ( <referencedElements:{el | stream_<el>.hasNext()}; separator="||"> ) {
-    <alt>
-}
-<referencedElements:{el | stream_<el>.reset();<\n>}>
->>
-
-rewriteAlt(a) ::= <<
-// <a.description>
-<if(a.pred)>
-if (<a.pred>) {
-    <a.alt>
-}<\n>
-<else>
-{
-    <a.alt>
-}<\n>
-<endif>
->>
-
-/** For empty rewrites: "r : ... -> ;" */
-rewriteEmptyAlt() ::= "root_0 = null;"
-
-rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
-// <fileName>:<description>
-{
-<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.nil();
-<root:rewriteElement()>
-<children:rewriteElement()>
-adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
-}<\n>
->>
-
-rewriteElementList(elements) ::= "<elements:rewriteElement()>"
-
-rewriteElement(e) ::= <<
-<@pregen()>
-<e.el>
->>
-
-/** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,terminalOptions,args) ::= <<
-adaptor.addChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
->>
-
-/** Gen $label ... where defined via label=ID */
-rewriteTokenLabelRef(label,elementIndex) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
->>
-
-/** Gen $label ... where defined via label+=ID */
-rewriteTokenListLabelRef(label,elementIndex) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
->>
-
-/** Gen ^($label ...) */
-rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);<\n>
->>
-
-/** Gen ^($label ...) where label+=... */
-rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
-
-/** Gen ^(ID ...) or ^(ID[args] ...) */
-rewriteTokenRefRoot(token,elementIndex,terminalOptions,args) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>);<\n>
->>
-
-rewriteImaginaryTokenRef(args,token,terminalOptions,elementIndex) ::= <<
-adaptor.addChild(root_<treeLevel>, <createImaginaryNode(token,terminalOptions,args)>);<\n>
->>
-
-rewriteImaginaryTokenRefRoot(args,token,terminalOptions,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<createImaginaryNode(token,terminalOptions,args)>, root_<treeLevel>);<\n>
->>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-root_0 = <action>;<\n>
->>
-
-/** What is the name of the previous value of this rule's root tree?  This
- *  let's us refer to $rule to mean previous value.  I am reusing the
- *  variable 'tree' sitting in retval struct to hold the value of root_0 right
- *  before I set it during rewrites.  The assign will be to retval.tree.
- */
-prevRuleRootRef() ::= "retval"
-
-rewriteRuleRef(rule) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<rule>.nextTree());<\n>
->>
-
-rewriteRuleRefRoot(rule) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>);<\n>
->>
-
-rewriteNodeAction(action) ::= <<
-adaptor.addChild(root_<treeLevel>, <action>);<\n>
->>
-
-rewriteNodeActionRoot(action) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<action>, root_<treeLevel>);<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel=rule */
-rewriteRuleLabelRef(label) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
-rewriteRuleListLabelRef(label) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel=rule */
-rewriteRuleLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
-rewriteRuleListLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);<\n>
->>
-
-rewriteWildcardLabelRef(label) ::= <<
-adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
->>
-
-
-createImaginaryNode(tokenType,terminalOptions,args) ::= <<
-<if(terminalOptions.node)>
-<! new MethodNode(IDLabel, args) !>
-new <terminalOptions.node>(<tokenType><if(args)>, <args; separator=", "><endif>)
-<else>
-(<ASTLabelType>)adaptor.create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
-<endif>
->>
-
-createRewriteNodeFromElement(token,terminalOptions,args) ::= <<
-<if(terminalOptions.node)>
-new <terminalOptions.node>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
-<else>
-<if(args)> <! must create new node from old !>
-adaptor.create(<token>, <args; separator=", ">)
-<else>
-stream_<token>.nextNode()
-<endif>
-<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTDbg.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTDbg.stg
deleted file mode 100644
index 886e198..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTDbg.stg
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
- *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
- */
-
-parserMembers() ::= <<
-protected DebugTreeAdaptor adaptor;
-public void setTreeAdaptor(TreeAdaptor adaptor) {
-<if(grammar.grammarIsRoot)>
-    this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
-<else>
-    this.adaptor = (DebugTreeAdaptor)adaptor; // delegator sends dbg adaptor
-<endif><\n>
-    <grammar.directDelegates:{g|<g:delegateName()>.setTreeAdaptor(this.adaptor);}>
-}
-public TreeAdaptor getTreeAdaptor() {
-    return adaptor;
-}<\n>
->>
-
-parserCtorBody() ::= <<
-<super.parserCtorBody()>
->>
-
-createListenerAndHandshake() ::= <<
-DebugEventSocketProxy proxy =
-    new DebugEventSocketProxy(this,port,<if(TREE_PARSER)>input.getTreeAdaptor()<else>adaptor<endif>);
-setDebugListener(proxy);
-set<inputStreamType>(new Debug<inputStreamType>(input,proxy));
-try {
-    proxy.handshake();
-}
-catch (IOException ioe) {
-    reportError(ioe);
-}
->>
-
-@ctorForRootGrammar.finally() ::= <<
-TreeAdaptor adap = new CommonTreeAdaptor();
-setTreeAdaptor(adap);
-proxy.setTreeAdaptor(adap);
->>
-
-@ctorForProfilingRootGrammar.finally() ::=<<
-TreeAdaptor adap = new CommonTreeAdaptor();
-setTreeAdaptor(adap);
->>
-
-@ctorForPredefinedListener.superClassRef() ::= "super(input, dbg);"
-
-@ctorForPredefinedListener.finally() ::=<<
-<if(grammar.grammarIsRoot)> <! don't create new adaptor for delegates !>
-TreeAdaptor adap = new CommonTreeAdaptor();
-setTreeAdaptor(adap);<\n>
-<endif>
->>
-
-@rewriteElement.pregen() ::= "dbg.location(<e.line>,<e.pos>);"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTParser.stg
deleted file mode 100644
index fd12288..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTParser.stg
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Templates for building ASTs during normal parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  The situation is not too bad as rewrite (->) usage makes ^ and !
- *  invalid. There is no huge explosion of combinations.
- */
-
-@rule.setErrorReturnValue() ::= <<
-retval.tree = (<ASTLabelType>)adaptor.errorNode(input, retval.start, input.LT(-1), re);
-<! System.out.println("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
->>
-
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = <createNodeFromToken(...)>;
-adaptor.addChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-/** ID! and output=AST (same as plain tokenRef) */
-tokenRefBang(token,label,elementIndex,terminalOptions) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = <createNodeFromToken(...)>;
-root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_0);
-<if(backtracking)>}<endif>
->>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)>
-<listLabel(elem=label, ...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label, ...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,terminalOptions,elementIndex) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label, ...)>
->>
-
-// SET AST
-
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
-
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <%
-<super.matchSet(postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <createNodeFromToken(...)>);}, ...)>
-%>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-<matchSet(...)>
->>
-
-matchSetBang(s,label,elementIndex,terminalOptions,postmatchCode) ::= "<super.matchSet(...)>"
-
-// note there is no matchSetTrack because -> rewrites force sets to be
-// plain old blocks of alts: (A|B|...|C)
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-<if(label)>
-<label>=(<labelType>)input.LT(1);<\n>
-<endif>
-<super.matchSet(postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = (<ASTLabelType>)adaptor.becomeRoot(<createNodeFromToken(...)>, root_0);},...)>
->>
-
-// RULE REF AST
-
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <label>.getTree());
->>
-
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
-
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>.getTree(), root_0);
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(label, {<label>.getTree()})>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefBang(...)>
-<listLabel(label, {<label>.getTree()})>
->>
-
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabel(label, {<label>.getTree()})>
->>
-
-// WILDCARD AST
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
-adaptor.addChild(root_0, <label>_tree);
-<if(backtracking)>}<endif>
->>
-
-wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
-
-wildcardRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-<super.wildcard(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
-root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_0);
-<if(backtracking)>}<endif>
->>
-
-createNodeFromToken(label,terminalOptions) ::= <<
-<if(terminalOptions.node)>
-new <terminalOptions.node>(<label>) <! new MethodNode(IDLabel) !>
-<else>
-(<ASTLabelType>)adaptor.create(<label>)
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
-retval.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
-adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
-<if(backtracking)>}<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTTreeParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTTreeParser.stg
deleted file mode 100644
index 1f86683..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTTreeParser.stg
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Templates for building ASTs during tree parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  Each combination has its own template except that label/no label
- *  is combined into tokenRef, ruleRef, ...
- */
-
-/** Add a variable to track last element matched */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-<ASTLabelType> _first_0 = null;
-<ASTLabelType> _last = null;<\n>
->>
-
-/** What to emit when there is no rewrite rule.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= <<
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(rewriteMode)>
-retval.tree = (<ASTLabelType>)_first_0;
-if ( adaptor.getParent(retval.tree)!=null && adaptor.isNil( adaptor.getParent(retval.tree) ) )
-    retval.tree = (<ASTLabelType>)adaptor.getParent(retval.tree);
-<endif>
-<if(backtracking)>}<endif>
->>
-
-/** match ^(root children) in tree parser; override here to
- *  add tree construction actions.
- */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-{
-<ASTLabelType> _save_last_<treeLevel> = _last;
-<ASTLabelType> _first_<treeLevel> = null;
-<if(!rewriteMode)>
-<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.nil();
-<endif>
-<root:element()>
-<if(rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-<if(root.el.rule)>
-if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>.tree;
-<else>
-if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>;
-<endif>
-<endif>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( input.LA(1)==Token.DOWN ) {
-    match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
-    <children:element()>
-    match(input, Token.UP, null); <checkRuleBacktrackFailure()>
-}
-<else>
-match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
-<children:element()>
-match(input, Token.UP, null); <checkRuleBacktrackFailure()>
-<endif>
-<if(!rewriteMode)>
-adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
-<endif>
-_last = _save_last_<treeLevel>;
-}<\n>
->>
-
-// TOKEN AST STUFF
-
-/** ID! and output=AST (same as plain tokenRef) 'cept add
- *  setting of _last
- */
-tokenRefBang(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.tokenRef(...)>
->>
-
-/** ID auto construct */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<label>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
-<endif><\n>
-adaptor.addChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>}<endif>
-<else> <! rewrite mode !>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
-<endif>
->>
-
-/** label+=TOKEN auto construct */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** ^(ID ...) auto construct */
-tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<label>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
-<endif><\n>
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_<treeLevel>);
-<if(backtracking)>}<endif>
-<endif>
->>
-
-/** Match ^(label+=TOKEN ...) auto construct */
-tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard and auto dup the node/subtree */
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.wildcard(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = (<ASTLabelType>)adaptor.dupTree(<label>);
-adaptor.addChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>}<endif>
-<else> <! rewrite mode !>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
-<endif>
->>
-
-// SET AST
-
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.matchSet(postmatchCode={
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<label>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
-<endif><\n>
-adaptor.addChild(root_<treeLevel>, <label>_tree);
-<if(backtracking)>\}<endif>
-<endif>
-}, ...
-)>
->>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-<matchSet(...)>
-<noRewrite(...)> <! set return tree !>
->>
-
-matchSetBang(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.matchSet(...)>
->>
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-<super.matchSet(postmatchCode={
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = new <terminalOptions.node>(<label>);
-<else>
-<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
-<endif><\n>
-root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_<treeLevel>);
-<if(backtracking)>\}<endif>
-<endif>
-}, ...
-)>
->>
-
-// RULE REF AST
-
-/** rule auto construct */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
-<if(!rewriteMode)>
-adaptor.addChild(root_<treeLevel>, <label>.getTree());
-<else> <! rewrite mode !>
-if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>.tree;
-<endif>
->>
-
-/** x+=rule auto construct */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(label, {<label>.getTree()})>
->>
-
-/** ^(rule ...) auto construct */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>.getTree(), root_<treeLevel>);
-<endif>
->>
-
-/** ^(x+=rule ...) auto construct */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabel(label, {<label>.getTree()})>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefTrack(...)>
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefTrackAndListLabel(...)>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefRootTrack(...)>
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-_last = (<ASTLabelType>)input.LT(1);
-<super.ruleRefRuleRootTrackAndListLabel(...)>
->>
-
-/** Streams for token refs are tree nodes now; override to
- *  change nextToken to nextNode.
- */
-createRewriteNodeFromElement(token,terminalOptions,scope) ::= <<
-<if(terminalOptions.node)>
-new <terminalOptions.node>(stream_<token>.nextNode())
-<else>
-stream_<token>.nextNode()
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
-retval.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
-<if(backtracking)>}<endif>
-<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/Dbg.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/Dbg.stg
deleted file mode 100644
index b512b2b..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/Dbg.stg
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/** Template overrides to add debugging to normal Java output;
- *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
- */
-@outputFile.imports() ::= <<
-<@super.imports()>
-import org.antlr.runtime.debug.*;
-import java.io.IOException;
->>
-
-@genericParser.members() ::= <<
-<if(grammar.grammarIsRoot)>
-public static final String[] ruleNames = new String[] {
-    "invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n    ", separator=", ">
-};<\n>
-<endif>
-public static final boolean[] decisionCanBacktrack = new boolean[] {
-    false, // invalid decision
-    <grammar.decisions:{d | <d.dfa.hasSynPred; null="false">}; wrap="\n    ", separator=", ">
-};<\n>
-<if(grammar.grammarIsRoot)> <! grammar imports other grammar(s) !>
-    public int ruleLevel = 0;
-    public int getRuleLevel() { return ruleLevel; }
-    public void incRuleLevel() { ruleLevel++; }
-    public void decRuleLevel() { ruleLevel--; }
-<if(profile)>
-    <ctorForProfilingRootGrammar()>
-<else>
-    <ctorForRootGrammar()>
-<endif>
-<ctorForPredefinedListener()>
-<else> <! imported grammar !>
-    public int getRuleLevel() { return <grammar.delegators:{g| <g:delegateName()>}>.getRuleLevel(); }
-    public void incRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.incRuleLevel(); }
-    public void decRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.decRuleLevel(); }
-    <ctorForDelegateGrammar()>
-<endif>
-<if(profile)>
-public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
-	int stopIndex = getRuleMemoization(ruleIndex, input.index());
-    ((Profiler)dbg).examineRuleMemoization(input, ruleIndex, stopIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
-    return super.alreadyParsedRule(input, ruleIndex);
-}<\n>
-public void memoize(IntStream input,
-                    int ruleIndex,
-                    int ruleStartIndex)
-{
-    ((Profiler)dbg).memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
-    super.memoize(input, ruleIndex, ruleStartIndex);
-}<\n>
-<endif>
-protected boolean evalPredicate(boolean result, String predicate) {
-    dbg.semanticPredicate(result, predicate);
-    return result;
-}<\n>
->>
-
-ctorForRootGrammar() ::= <<
-<! bug: can't use <@super.members()> cut-n-paste instead !>
-<! Same except we add port number and profile stuff if root grammar !>
-public <name>(<inputStreamType> input) {
-    this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT, new RecognizerSharedState());
-}
-public <name>(<inputStreamType> input, int port, RecognizerSharedState state) {
-    super(input, state);
-    <parserCtorBody()>
-    <createListenerAndHandshake()>
-    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
-    <@finally()>
-}<\n>
->>
-
-ctorForProfilingRootGrammar() ::= <<
-<! bug: can't use <@super.members()> cut-n-paste instead !>
-public <name>(<inputStreamType> input) {
-    this(input, new Profiler(null), new RecognizerSharedState());
-}
-public <name>(<inputStreamType> input, DebugEventListener dbg, RecognizerSharedState state) {
-    super(input, dbg, state);
-    Profiler p = (Profiler)dbg;
-    p.setParser(this);
-    <parserCtorBody()>
-    <grammar.directDelegates:
-     {g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
-    <@finally()>
-}
-<\n>
->>
-
-/** Basically we don't want to set any dbg listeners are root will have it. */
-ctorForDelegateGrammar() ::= <<
-public <name>(<inputStreamType> input, DebugEventListener dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
-    super(input, dbg, state);
-    <parserCtorBody()>
-    <grammar.directDelegates:
-     {g|<g:delegateName()> = new <g.recognizerName>(input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
-}<\n>
->>
-
-ctorForPredefinedListener() ::= <<
-public <name>(<inputStreamType> input, DebugEventListener dbg) {
-    <@superClassRef>super(input, dbg, new RecognizerSharedState());<@end>
-<if(profile)>
-    Profiler p = (Profiler)dbg;
-    p.setParser(this);
-<endif>
-    <parserCtorBody()>
-    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
-    <@finally()>
-}<\n>
->>
-
-createListenerAndHandshake() ::= <<
-<if(TREE_PARSER)>
-DebugEventSocketProxy proxy =
-    new DebugEventSocketProxy(this, port, input.getTreeAdaptor());<\n>
-<else>
-DebugEventSocketProxy proxy =
-    new DebugEventSocketProxy(this, port, null);<\n>
-<endif>
-setDebugListener(proxy);
-try {
-    proxy.handshake();
-}
-catch (IOException ioe) {
-    reportError(ioe);
-}
->>
-
-@genericParser.superClassName() ::= "Debug<@super.superClassName()>"
-
-@rule.preamble() ::= <<
-try { dbg.enterRule(getGrammarFileName(), "<ruleName>");
-if ( getRuleLevel()==0 ) {dbg.commence();}
-incRuleLevel();
-dbg.location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.charPositionInLine>);<\n>
->>
-
-@rule.postamble() ::= <<
-dbg.location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>);<\n>
-}
-finally {
-    dbg.exitRule(getGrammarFileName(), "<ruleName>");
-    decRuleLevel();
-    if ( getRuleLevel()==0 ) {dbg.terminate();}
-}<\n>
->>
-
-@synpred.start() ::= "dbg.beginBacktrack(state.backtracking);"
-
-@synpred.stop() ::= "dbg.endBacktrack(state.backtracking, success);"
-
-// Common debug event triggers used by region overrides below
-
-enterSubRule() ::=
-    "try { dbg.enterSubRule(<decisionNumber>);<\n>"
-
-exitSubRule() ::=
-    "} finally {dbg.exitSubRule(<decisionNumber>);}<\n>"
-
-enterDecision() ::=
-    "try { dbg.enterDecision(<decisionNumber>, decisionCanBacktrack[<decisionNumber>]);<\n>"
-
-exitDecision() ::=
-    "} finally {dbg.exitDecision(<decisionNumber>);}<\n>"
-
-enterAlt(n) ::= "dbg.enterAlt(<n>);<\n>"
-
-// Region overrides that tell various constructs to add debugging triggers
-
-@block.predecision() ::= "<enterSubRule()><enterDecision()>"
-
-@block.postdecision() ::= "<exitDecision()>"
-
-@block.postbranch() ::= "<exitSubRule()>"
-
-@ruleBlock.predecision() ::= "<enterDecision()>"
-
-@ruleBlock.postdecision() ::= "<exitDecision()>"
-
-@ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
-@blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
-@positiveClosureBlock.preloop() ::= "<enterSubRule()>"
-
-@positiveClosureBlock.postloop() ::= "<exitSubRule()>"
-
-@positiveClosureBlock.predecision() ::= "<enterDecision()>"
-
-@positiveClosureBlock.postdecision() ::= "<exitDecision()>"
-
-@positiveClosureBlock.earlyExitException() ::=
-    "dbg.recognitionException(eee);<\n>"
-
-@closureBlock.preloop() ::= "<enterSubRule()>"
-
-@closureBlock.postloop() ::= "<exitSubRule()>"
-
-@closureBlock.predecision() ::= "<enterDecision()>"
-
-@closureBlock.postdecision() ::= "<exitDecision()>"
-
-@altSwitchCase.prealt() ::= "<enterAlt(altNum)>" // altNum is arg of altSwitchCase
-
-@element.prematch() ::=
-    "dbg.location(<e.line>,<e.pos>);" // e is arg of element
-
-@matchSet.mismatchedSetException() ::=
-    "dbg.recognitionException(mse);"
-
-@dfaState.noViableAltException() ::= "dbg.recognitionException(nvae);"
-
-@dfaStateSwitch.noViableAltException() ::= "dbg.recognitionException(nvae);"
-
-dfaDecision(decisionNumber,description) ::= <<
-try {
-    isCyclicDecision = true;
-    <super.dfaDecision(...)>
-}
-catch (NoViableAltException nvae) {
-    dbg.recognitionException(nvae);
-    throw nvae;
-}
->>
-
-@cyclicDFA.errorMethod() ::= <<
-public void error(NoViableAltException nvae) {
-    dbg.recognitionException(nvae);
-}
->>
-
-/** Force predicate validation to trigger an event */
-evalPredicate(pred,description) ::= <<
-evalPredicate(<pred>,"<description>")
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/Java.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/Java.stg
deleted file mode 100644
index c8ef58c..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/Java.stg
+++ /dev/null
@@ -1,1401 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2010 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-javaTypeInitMap ::= [
-	"int":"0",
-	"long":"0",
-	"float":"0.0f",
-	"double":"0.0",
-	"boolean":"false",
-	"byte":"0",
-	"short":"0",
-	"char":"0",
-	default:"null" // anything other than an atomic type
-]
-
-// System.Boolean.ToString() returns "True" and "False", but the proper C# literals are "true" and "false"
-// The Java version of Boolean returns "true" and "false", so they map to themselves here.
-booleanLiteral ::= [
-	"True":"true",
-	"False":"false",
-	"true":"true",
-	"false":"false",
-	default:"false"
-]
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
-           docComment, recognizer,
-           name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
-	   backtracking, synpreds, memoize, numRules,
-	   fileName, ANTLRVersion, generatedTimestamp, trace,
-	   scopes, superClass, literals) ::=
-<<
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-<actions.(actionScope).header>
-
-<@imports>
-import org.antlr.runtime.*;
-<if(TREE_PARSER)>
-import org.antlr.runtime.tree.*;
-<endif>
-import java.util.Stack;
-import java.util.List;
-import java.util.ArrayList;
-<if(backtracking)>
-import java.util.Map;
-import java.util.HashMap;
-<endif>
-<@end>
-
-<docComment>
-@SuppressWarnings({"all", "warnings", "unchecked"})
-<recognizer>
->>
-
-lexer(grammar, name, tokens, scopes, rules, numRules, filterMode, labelType="CommonToken",
-      superClass="Lexer") ::= <<
-public class <grammar.recognizerName> extends <@superClassName><superClass><@end> {
-    <tokens:{it | public static final int <it.name>=<it.type>;}; separator="\n">
-    <scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScope(it)><endif>}>
-    <actions.lexer.members>
-
-    // delegates
-    <grammar.delegates:
-         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
-    // delegators
-    <grammar.delegators:
-         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
-    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
-    public <superClass>[] getDelegates() {
-        return new <superClass>[] {<grammar.delegates: {g|<g:delegateName()>}; separator = ", ">};
-    }
-
-    public <grammar.recognizerName>() {} <! needed by subclasses !>
-    public <grammar.recognizerName>(CharStream input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
-        this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>);
-    }
-    public <grammar.recognizerName>(CharStream input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
-        super(input,state);
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-        state.ruleMemo = new HashMap[<numRules>+1];<\n> <! index from 1..n !>
-<endif>
-<endif>
-        <grammar.directDelegates:
-         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
-        <grammar.delegators:
-         {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
-        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
-    }
-    public String getGrammarFileName() { return "<fileName>"; }
-
-<if(filterMode)>
-    <filteringNextToken()>
-<endif>
-    <rules; separator="\n\n">
-
-    <synpreds:{p | <lexerSynpred(p)>}>
-
-    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-}
->>
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-public Token nextToken() {
-    while (true) {
-        if ( input.LA(1)==CharStream.EOF ) {
-            Token eof = new CommonToken((CharStream)input,Token.EOF,
-                                        Token.DEFAULT_CHANNEL,
-                                        input.index(),input.index());
-            eof.setLine(getLine());
-            eof.setCharPositionInLine(getCharPositionInLine());
-            return eof;
-        }
-        state.token = null;
-	state.channel = Token.DEFAULT_CHANNEL;
-        state.tokenStartCharIndex = input.index();
-        state.tokenStartCharPositionInLine = input.getCharPositionInLine();
-        state.tokenStartLine = input.getLine();
-	state.text = null;
-        try {
-            int m = input.mark();
-            state.backtracking=1; <! means we won't throw slow exception !>
-            state.failed=false;
-            mTokens();
-            state.backtracking=0;
-            <! mTokens backtracks with synpred at backtracking==2
-               and we set the synpredgate to allow actions at level 1. !>
-            if ( state.failed ) {
-                input.rewind(m);
-                input.consume(); <! advance one char and try again !>
-            }
-            else {
-                emit();
-                return state.token;
-            }
-        }
-        catch (RecognitionException re) {
-            // shouldn't happen in backtracking mode, but...
-            reportError(re);
-            recover(re);
-        }
-    }
-}
-
-public void memoize(IntStream input,
-		int ruleIndex,
-		int ruleStartIndex)
-{
-if ( state.backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
-}
-
-public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
-if ( state.backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
-return false;
-}
->>
-
-actionGate() ::= "state.backtracking==0"
-
-filteringActionGate() ::= "state.backtracking==1"
-
-/** How to generate a parser */
-genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass,
-              labelType, members, rewriteElementType,
-              filterMode, ASTLabelType="Object") ::= <<
-public class <grammar.recognizerName> extends <@superClassName><superClass><@end> {
-<if(grammar.grammarIsRoot)>
-    public static final String[] tokenNames = new String[] {
-        "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
-    };<\n>
-<endif>
-    <tokens:{it |public static final int <it.name>=<it.type>;}; separator="\n">
-
-    // delegates
-    <grammar.delegates: {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
-    public <superClass>[] getDelegates() {
-        return new <superClass>[] {<grammar.delegates: {g|<g:delegateName()>}; separator = ", ">};
-    }
-
-    // delegators
-    <grammar.delegators:
-         {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
-    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
-
-    <scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScope(it)><endif>}>
-
-    <@members>
-    <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-    public <grammar.recognizerName>(<inputStreamType> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
-        this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>);
-    }
-    public <grammar.recognizerName>(<inputStreamType> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
-        super(input, state);
-        <parserCtorBody()>
-        <grammar.directDelegates:
-         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
-        <grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
-        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
-    }
-    <@end>
-
-    public String[] getTokenNames() { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; }
-    public String getGrammarFileName() { return "<fileName>"; }
-
-    <members>
-
-    <rules; separator="\n\n">
-
-<! generate rule/method definitions for imported rules so they
-   appear to be defined in this recognizer. !>
-    // Delegated rules
-<grammar.delegatedRules:{ruleDescriptor|
-    public <returnType()> <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope()>) throws <ruleDescriptor.throwsSpec; separator=", "> { <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); \}}; separator="\n">
-
-    <synpreds:{p | <synpred(p)>}>
-
-    <cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-    <bitsets:{it | <bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
-                          words64=it.bits)>}>
-}
->>
-
-parserCtorBody() ::= <<
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-this.state.ruleMemo = new HashMap[<length(grammar.allImportedRules)>+1];<\n> <! index from 1..n !>
-<endif>
-<endif>
-<grammar.delegators:
- {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
->>
-
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
-       ASTLabelType="Object", superClass="Parser", labelType="Token",
-       members={<actions.parser.members>}) ::= <<
-<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, "TokenStream", superClass,
-              labelType, members, "Token",
-              false, ASTLabelType)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
-           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="Object",
-           superClass={<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif>},
-           members={<actions.treeparser.members>}
-           ) ::= <<
-<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, "TreeNodeStream", superClass,
-              labelType, members, "Node",
-              filterMode, ASTLabelType)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-// $ANTLR start <ruleName>
-public final void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope()>) throws <ruleDescriptor.throwsSpec:{x|<x>}; separator=", "> {
-    <ruleLabelDefs()>
-<if(trace)>
-    traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
-    try {
-        <block>
-    }
-    finally {
-        traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
-    }
-<else>
-    <block>
-<endif>
-}
-// $ANTLR end <ruleName>
->>
-
-synpred(name) ::= <<
-public final boolean <name>() {
-    state.backtracking++;
-    <@start()>
-    int start = input.mark();
-    try {
-        <name>_fragment(); // can never throw exception
-    } catch (RecognitionException re) {
-        System.err.println("impossible: "+re);
-    }
-    boolean success = !state.failed;
-    input.rewind(start);
-    <@stop()>
-    state.backtracking--;
-    state.failed=false;
-    return success;
-}<\n>
->>
-
-lexerSynpred(name) ::= <<
-<synpred(name)>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if ( state.backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (state.failed) return <ruleReturnValue()>;<endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>if (state.backtracking>0) {state.failed=true; return <ruleReturnValue()>;}<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-<returnScope(scope=ruleDescriptor.returnScope)>
-
-// $ANTLR start "<ruleName>"
-// <fileName>:<description>
-public final <returnType()> <ruleName>(<ruleDescriptor.parameterScope:parameterScope()>) throws <ruleDescriptor.throwsSpec:{x|<x>}; separator=", "> {
-    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    <ruleLabelDefs()>
-    <ruleDescriptor.actions.init>
-    <@preamble()>
-    try {
-        <ruleMemoization(name=ruleName)>
-        <block>
-        <ruleCleanUp()>
-        <(ruleDescriptor.actions.after):execAction()>
-    }
-<if(exceptions)>
-    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-    <actions.(actionScope).rulecatch>
-<else>
-    catch (RecognitionException re) {
-        reportError(re);
-        recover(input,re);
-	<@setErrorReturnValue()>
-    }<\n>
-<endif>
-<endif>
-<endif>
-    finally {
-    	// do for sure before leaving
-        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <memoize()>
-        <ruleScopeCleanUp()>
-        <finally>
-    }
-    <@postamble()>
-    return <ruleReturnValue()>;
-}
-// $ANTLR end "<ruleName>"
->>
-
-catch(decl,action) ::= <<
-catch (<e.decl>) {
-    <e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<returnType()> retval = new <returnType()>();
-retval.start = input.LT(1);<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
-}>
-<endif>
-<if(memoize)>
-int <ruleDescriptor.name>_StartIndex = input.index();
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{it |<it>_stack.push(new <it>_scope());}; separator="\n">
-<ruleDescriptor.ruleScope:{it |<it.name>_stack.push(new <it.name>_scope());}; separator="\n">
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{it |<it>_stack.pop();}; separator="\n">
-<ruleDescriptor.ruleScope:{it |<it.name>_stack.pop();}; separator="\n">
->>
-
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
-  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{it |<labelType> <it.label.text>=null;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{it |List list_<it.label.text>=null;}; separator="\n"
->
-<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
-<ruleDescriptor.ruleListLabels:{ll|RuleReturnScope <ll.label.text> = null;}; separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{it |<labelType> <it.label.text>=null;}; separator="\n"
->
-<ruleDescriptor.charLabels:{it |int <it.label.text>;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{it |List list_<it.label.text>=null;}; separator="\n"
->
->>
-
-ruleReturnValue() ::= <%
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-retval
-<endif>
-<endif>
-<endif>
-%>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-retval.stop = input.LT(-1);<\n>
-<endif>
-<endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if ( state.backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-// $ANTLR start "<ruleName>"
-public final void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>) throws RecognitionException {
-    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    try {
-<if(nakedBlock)>
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block><\n>
-<else>
-        int _type = <ruleName>;
-        int _channel = DEFAULT_TOKEN_CHANNEL;
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block>
-        <ruleCleanUp()>
-        state.type = _type;
-        state.channel = _channel;
-        <(ruleDescriptor.actions.after):execAction()>
-<endif>
-    }
-    finally {
-    	// do for sure before leaving
-        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <ruleScopeCleanUp()>
-        <memoize()>
-    }
-}
-// $ANTLR end "<ruleName>"
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-public void mTokens() throws RecognitionException {
-    <block><\n>
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-<@prebranch()>
-switch (alt<decisionNumber>) {
-    <alts:{a | <altSwitchCase(i,a)>}>
-}
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-switch (alt<decisionNumber>) {
-    <alts:{a | <altSwitchCase(i,a)>}>
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-int cnt<decisionNumber>=0;
-<decls>
-<@preloop()>
-loop<decisionNumber>:
-do {
-    int alt<decisionNumber>=<maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) {
-	<alts:{a | <altSwitchCase(i,a)>}>
-	default :
-	    if ( cnt<decisionNumber> >= 1 ) break loop<decisionNumber>;
-	    <ruleBacktrackFailure()>
-            EarlyExitException eee =
-                new EarlyExitException(<decisionNumber>, input);
-            <@earlyExitException()>
-            throw eee;
-    }
-    cnt<decisionNumber>++;
-} while (true);
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@preloop()>
-loop<decisionNumber>:
-do {
-    int alt<decisionNumber>=<maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) {
-	<alts:{a | <altSwitchCase(i,a)>}>
-	default :
-	    break loop<decisionNumber>;
-    }
-} while (true);
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase(altNum,alt) ::= <<
-case <altNum> :
-    <@prealt()>
-    <alt>
-    break;<\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
-// <fileName>:<description>
-{
-<@declarations()>
-<elements:element()>
-<rew>
-<@cleanup()>
-}
->>
-
-/** What to emit when there is no rewrite.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element(e) ::= <<
-<@prematch()>
-<e.el><\n>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)><label>=(<labelType>)<endif>match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(token,label,elementIndex,terminalOptions)>
-<listLabel(label, label)>
->>
-
-listLabel(label,elem) ::= <<
-if (list_<label>==null) list_<label>=new ArrayList();
-list_<label>.add(<elem>);<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-match(<char>); <checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-matchRange(<a>,<b>); <checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= <<
-<if(label)>
-<if(LEXER)>
-<label>= input.LA(1);<\n>
-<else>
-<label>=(<labelType>)input.LT(1);<\n>
-<endif>
-<endif>
-if ( <s> ) {
-    input.consume();
-    <postmatchCode>
-<if(!LEXER)>
-    state.errorRecovery=false;
-<endif>
-    <if(backtracking)>state.failed=false;<endif>
-}
-else {
-    <ruleBacktrackFailure()>
-    MismatchedSetException mse = new MismatchedSetException(null,input);
-    <@mismatchedSetException()>
-<if(LEXER)>
-    recover(mse);
-    throw mse;
-<else>
-    throw mse;
-    <! use following code to make it recover inline; remove throw mse;
-    recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
-    !>
-<endif>
-}<\n>
->>
-
-matchRuleBlockSet ::= matchSet
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(label, label)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex="0") ::= <<
-<if(label)>
-int <label>Start = getCharIndex();
-match(<string>); <checkRuleBacktrackFailure()>
-int <label>StartLine<elementIndex> = getLine();
-int <label>StartCharPos<elementIndex> = getCharPositionInLine();
-<label> = new <labelType>(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, getCharIndex()-1);
-<label>.setLine(<label>StartLine<elementIndex>);
-<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>);
-<else>
-match(<string>); <checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)>
-<label>=(<labelType>)input.LT(1);<\n>
-<endif>
-matchAny(input); <checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<wildcard(...)>
-<listLabel(label, label)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> = input.LA(1);<\n>
-<endif>
-matchAny(); <checkRuleBacktrackFailure()>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(label, elementIndex)>
-<listLabel(label, label)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.  The 'rule' argument was the
- *  target rule name, but now is type Rule, whose toString is
- *  same: the rule name.  Now though you can access full rule
- *  descriptor stuff.
- */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-pushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
-<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
-state._fsp--;
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(rule,label,elementIndex,args,scope)>
-<listLabel(label, label)>
->>
-
-/** A lexer rule reference.
- *
- *  The 'rule' argument was the target rule name, but now
- *  is type Rule, whose toString is same: the rule name.
- *  Now though you can access full rule descriptor stuff.
- */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
-<if(label)>
-int <label>Start<elementIndex> = getCharIndex();
-int <label>StartLine<elementIndex> = getLine();
-int <label>StartCharPos<elementIndex> = getCharPositionInLine();
-<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<label> = new <labelType>(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
-<label>.setLine(<label>StartLine<elementIndex>);
-<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>);
-<else>
-<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(rule,label,args,elementIndex,scope)>
-<listLabel(label, label)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-int <label>Start<elementIndex> = getCharIndex();
-int <label>StartLine<elementIndex> = getLine();
-int <label>StartCharPos<elementIndex> = getCharPositionInLine();
-match(EOF); <checkRuleBacktrackFailure()>
-<labelType> <label> = new <labelType>(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
-<label>.setLine(<label>StartLine<elementIndex>);
-<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>);
-<else>
-match(EOF); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-// used for left-recursive rules
-recRuleDefArg()                       ::= "int <recRuleArg()>"
-recRuleArg()                          ::= "_p"
-recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
-recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
-recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( input.LA(1)==Token.DOWN ) {
-    match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
-    <children:element()>
-    match(input, Token.UP, null); <checkRuleBacktrackFailure()>
-}
-<else>
-match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
-<children:element()>
-match(input, Token.UP, null); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if ( !(<evalPredicate(pred,description)>) ) {
-    <ruleBacktrackFailure()>
-    throw new FailedPredicateException(input, "<ruleName>", "<description>");
-}
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
-else {
-<if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    NoViableAltException nvae =
-        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
-    <@noViableAltException()>
-    throw nvae;<\n>
-<endif>
-}
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse ">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(<k>);<\n>
-<edges; separator="\nelse "><\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
-<else>
-else {
-    alt<decisionNumber>=<eotPredictsAlt>;
-}<\n>
-<endif>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
-    <targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
-<edges; separator="\n">
-default:
-<if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    NoViableAltException nvae =
-        new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
-    <@noViableAltException()>
-    throw nvae;<\n>
-<endif>
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
-    <edges; separator="\n">
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( input.LA(<k>) ) {
-<edges; separator="\n"><\n>
-<if(eotPredictsAlt)>
-default:
-    alt<decisionNumber>=<eotPredictsAlt>;
-    break;<\n>
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{it |case <it>:}; separator="\n">
-    {
-    <targetState>
-    }
-    break;
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-alt<decisionNumber> = dfa<decisionNumber>.predict(input);
->>
-
-/* Dump DFA tables as run-length-encoded Strings of octal values.
- * Can't use hex as compiler translates them before compilation.
- * These strings are split into multiple, concatenated strings.
- * Java puts them back together at compile time thankfully.
- * Java cannot handle large static arrays, so we're stuck with this
- * encode/decode approach.  See analysis and runtime DFA for
- * the encoding methods.
- */
-cyclicDFA(dfa) ::= <<
-static final String DFA<dfa.decisionNumber>_eotS =
-    "<dfa.javaCompressedEOT; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_eofS =
-    "<dfa.javaCompressedEOF; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_minS =
-    "<dfa.javaCompressedMin; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_maxS =
-    "<dfa.javaCompressedMax; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_acceptS =
-    "<dfa.javaCompressedAccept; wrap="\"+\n    \"">";
-static final String DFA<dfa.decisionNumber>_specialS =
-    "<dfa.javaCompressedSpecial; wrap="\"+\n    \"">}>";
-static final String[] DFA<dfa.decisionNumber>_transitionS = {
-        <dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
-};
-
-static final short[] DFA<dfa.decisionNumber>_eot = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eotS);
-static final short[] DFA<dfa.decisionNumber>_eof = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eofS);
-static final char[] DFA<dfa.decisionNumber>_min = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
-static final char[] DFA<dfa.decisionNumber>_max = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
-static final short[] DFA<dfa.decisionNumber>_accept = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
-static final short[] DFA<dfa.decisionNumber>_special = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_specialS);
-static final short[][] DFA<dfa.decisionNumber>_transition;
-
-static {
-    int numStates = DFA<dfa.decisionNumber>_transitionS.length;
-    DFA<dfa.decisionNumber>_transition = new short[numStates][];
-    for (int i=0; i\<numStates; i++) {
-        DFA<dfa.decisionNumber>_transition[i] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_transitionS[i]);
-    }
-}
-
-class DFA<dfa.decisionNumber> extends DFA {
-
-    public DFA<dfa.decisionNumber>(BaseRecognizer recognizer) {
-        this.recognizer = recognizer;
-        this.decisionNumber = <dfa.decisionNumber>;
-        this.eot = DFA<dfa.decisionNumber>_eot;
-        this.eof = DFA<dfa.decisionNumber>_eof;
-        this.min = DFA<dfa.decisionNumber>_min;
-        this.max = DFA<dfa.decisionNumber>_max;
-        this.accept = DFA<dfa.decisionNumber>_accept;
-        this.special = DFA<dfa.decisionNumber>_special;
-        this.transition = DFA<dfa.decisionNumber>_transition;
-    }
-    public String getDescription() {
-        return "<dfa.description>";
-    }
-    <@errorMethod()>
-<if(dfa.specialStateSTs)>
-    public int specialStateTransition(int s, IntStream _input) throws NoViableAltException {
-        <if(LEXER)>
-        IntStream input = _input;
-        <endif>
-        <if(PARSER)>
-        TokenStream input = (TokenStream)_input;
-        <endif>
-        <if(TREE_PARSER)>
-        TreeNodeStream input = (TreeNodeStream)_input;
-        <endif>
-    	int _s = s;
-        switch ( s ) {
-        <dfa.specialStateSTs:{state |
-        case <i0> : <! compressed special state numbers 0..n-1 !>
-            <state>}; separator="\n">
-        }
-<if(backtracking)>
-        if (state.backtracking>0) {state.failed=true; return -1;}<\n>
-<endif>
-        NoViableAltException nvae =
-            new NoViableAltException(getDescription(), <dfa.decisionNumber>, _s, input);
-        error(nvae);
-        throw nvae;
-    }<\n>
-<endif>
-}<\n>
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-int LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
-<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-int index<decisionNumber>_<stateNumber> = input.index();
-input.rewind();<\n>
-<endif>
-s = -1;
-<edges; separator="\nelse ">
-<if(semPredState)> <! return input cursor to state before we rewound !>
-input.seek(index<decisionNumber>_<stateNumber>);<\n>
-<endif>
-if ( s>=0 ) return s;
-break;
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-s = <targetStateNumber>;<\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "(<left>&&<right>)"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
-
-notPredicate(pred) ::= "!(<evalPredicate(pred,{})>)"
-
-evalPredicate(pred,description) ::= "(<pred>)"
-
-evalSynPredicate(pred,description) ::= "<pred>()"
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
-(LA<decisionNumber>_<stateNumber> >= <lower> && LA<decisionNumber>_<stateNumber> \<= <upper>)
-%>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>) >= <lower> && input.LA(<k>) \<= <upper>)"
-
-setTest(ranges) ::= <<
-<ranges; separator="||">
->>
-
-// A T T R I B U T E S
-
-globalAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected static class <scope.name>_scope {
-    <scope.attributes:{it |<it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
-<endif>
->>
-
-ruleAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected static class <scope.name>_scope {
-    <scope.attributes:{it |<it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
-<endif>
->>
-
-returnStructName(r) ::= "<r.name>_return"
-
-returnType() ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
-%>
-
-/** Generate the Java type associated with a single or multiple return
- *  values.
- */
-ruleLabelType(referencedRule) ::= <%
-<if(referencedRule.hasMultipleReturnValues)>
-<referencedRule.grammar.recognizerName>.<referencedRule.name>_return
-<else>
-<if(referencedRule.hasSingleReturnValue)>
-<referencedRule.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
-%>
-
-delegateName(d) ::= <<
-<if(d.label)><d.label><else>g<d.name><endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
- */
-initValue(typeName) ::= <<
-<javaTypeInitMap.(typeName)>
->>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= <%
-<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> =
- <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
-%>
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScope(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-public static class <ruleDescriptor:returnStructName()> extends <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope {
-    <scope.attributes:{it |public <it.decl>;}; separator="\n">
-    <@ruleReturnMembers()>
-};
-<endif>
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{it |<it.decl>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <%
-<if(negIndex)>
-((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name>
-<else>
-<if(index)>
-((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name>
-<else>
-((<scope>_scope)<scope>_stack.peek()).<attr.name>
-<endif>
-<endif>
-%>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
-<if(negIndex)>
-((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name> =<expr>;
-<else>
-<if(index)>
-((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name> =<expr>;
-<else>
-((<scope>_scope)<scope>_stack.peek()).<attr.name> =<expr>;
-<endif>
-<endif>
-%>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <%
-<if(referencedRule.hasMultipleReturnValues)>
-(<scope>!=null?<scope>.<attr.name>:<initValue(attr.type)>)
-<else>
-<scope>
-<endif>
-%>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name>
-<else>
-<attr.name>
-<endif>
-%>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name> =<expr>;
-<else>
-<attr.name> =<expr>;
-<endif>
-%>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach
-
-tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=null?<scope>.getText():null)"
-tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=null?<scope>.getType():0)"
-tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=null?<scope>.getLine():0)"
-tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=null?<scope>.getCharPositionInLine():0)"
-tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=null?<scope>.getChannel():0)"
-tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=null?<scope>.getTokenIndex():0)"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?Integer.valueOf(<scope>.getText()):0)"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.start):null)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.stop):null)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=null?((<ASTLabelType>)<scope>.tree):null)"
-ruleLabelPropertyRef_text(scope,attr) ::= <%
-<if(TREE_PARSER)>
-(<scope>!=null?(input.getTokenStream().toString(
-  input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
-  input.getTreeAdaptor().getTokenStopIndex(<scope>.start))):null)
-<else>
-(<scope>!=null?input.toString(<scope>.start,<scope>.stop):null)
-<endif>
-%>
-
-ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=null?<scope>.st:null)"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::=
-    "(<scope>!=null?<scope>.getType():0)"
-lexerRuleLabelPropertyRef_line(scope,attr) ::=
-    "(<scope>!=null?<scope>.getLine():0)"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::=
-    "(<scope>!=null?<scope>.getCharPositionInLine():-1)"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::=
-    "(<scope>!=null?<scope>.getChannel():0)"
-lexerRuleLabelPropertyRef_index(scope,attr) ::=
-    "(<scope>!=null?<scope>.getTokenIndex():0)"
-lexerRuleLabelPropertyRef_text(scope,attr) ::=
-    "(<scope>!=null?<scope>.getText():null)"
-lexerRuleLabelPropertyRef_int(scope,attr) ::=
-    "(<scope>!=null?Integer.valueOf(<scope>.getText()):0)"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
-rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.stop)"
-rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.tree)"
-rulePropertyRef_text(scope,attr) ::= <%
-<if(TREE_PARSER)>
-input.getTokenStream().toString(
-  input.getTreeAdaptor().getTokenStartIndex(retval.start),
-  input.getTreeAdaptor().getTokenStopIndex(retval.start))
-<else>
-input.toString(retval.start,input.LT(-1))
-<endif>
-%>
-rulePropertyRef_st(scope,attr) ::= "retval.st"
-
-lexerRulePropertyRef_text(scope,attr) ::= "getText()"
-lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
-lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(getCharIndex()-1)"
-lexerRulePropertyRef_int(scope,attr) ::= "Integer.valueOf(<scope>.getText())"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
-
-/** How to execute an action (only when not backtracking) */
-execAction(action) ::= <%
-<if(backtracking)>
-if ( <actions.(actionScope).synpredgate> ) {
-  <action>
-}
-<else>
-<action>
-<endif>
-%>
-
-/** How to always execute an action even when backtracking */
-execForcedAction(action) ::= "<action>"
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-public static final BitSet <name> = new BitSet(new long[]{<words64:{it |<it>L};separator=",">});<\n>
->>
-
-codeFileExtension() ::= ".java"
-
-true_value() ::= "true"
-false_value() ::= "false"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ST.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ST.stg
deleted file mode 100644
index a898e4d..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Java/ST.stg
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/** Template subgroup to add template rewrite output
- *  If debugging, then you'll also get STDbg.stg loaded.
- */
-
-@outputFile.imports() ::= <<
-<@super.imports()>
-import org.antlr.stringtemplate.*;
-import org.antlr.stringtemplate.language.*;
-import java.util.HashMap;
->>
-
-/** Add this to each rule's return value struct */
-@returnScope.ruleReturnMembers() ::= <<
-public StringTemplate st;
-public Object getTemplate() { return st; }
-public String toString() { return st==null?null:st.toString(); }
->>
-
-@genericParser.members() ::= <<
-<@super.members()>
-protected StringTemplateGroup templateLib =
-  new StringTemplateGroup("<name>Templates", AngleBracketTemplateLexer.class);
-
-public void setTemplateLib(StringTemplateGroup templateLib) {
-  this.templateLib = templateLib;
-}
-public StringTemplateGroup getTemplateLib() {
-  return templateLib;
-}
-/** allows convenient multi-value initialization:
- *  "new STAttrMap().put(...).put(...)"
- */
-public static class STAttrMap extends HashMap {
-  public STAttrMap put(String attrName, Object value) {
-    super.put(attrName, value);
-    return this;
-  }
-  public STAttrMap put(String attrName, int value) {
-    super.put(attrName, new Integer(value));
-    return this;
-  }
-}
->>
-
-/** x+=rule when output=template */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(rule,label,elementIndex,args,scope)>
-<listLabel(label, {<label>.getTemplate()})>
->>
-
-rewriteTemplate(alts) ::= <<
-
-// TEMPLATE REWRITE
-<if(backtracking)>
-if ( <actions.(actionScope).synpredgate> ) {
-  <alts:rewriteTemplateAlt(); separator="else ">
-  <if(rewriteMode)><replaceTextInLine()><endif>
-}
-<else>
-<alts:rewriteTemplateAlt(); separator="else ">
-<if(rewriteMode)><replaceTextInLine()><endif>
-<endif>
->>
-
-replaceTextInLine() ::= <<
-<if(TREE_PARSER)>
-((TokenRewriteStream)input.getTokenStream()).replace(
-  input.getTreeAdaptor().getTokenStartIndex(retval.start),
-  input.getTreeAdaptor().getTokenStopIndex(retval.start),
-  retval.st);
-<else>
-((TokenRewriteStream)input).replace(
-  ((Token)retval.start).getTokenIndex(),
-  input.LT(-1).getTokenIndex(),
-  retval.st);
-<endif>
->>
-
-rewriteTemplateAlt(alt) ::= <<
-// <alt.description>
-<if(alt.pred)>
-if (<alt.pred>) {
-    retval.st = <alt.alt>;
-}<\n>
-<else>
-{
-    retval.st = <alt.alt>;
-}<\n>
-<endif>
->>
-
-rewriteEmptyTemplate(alts) ::= <<
-null;
->>
-
-/** Invoke a template with a set of attribute name/value pairs.
- *  Set the value of the rule's template *after* having set
- *  the attributes because the rule's template might be used as
- *  an attribute to build a bigger template; you get a self-embedded
- *  template.
- */
-rewriteExternalTemplate(name,args) ::= <%
-templateLib.getInstanceOf("<name>"<if(args)>,
-  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
-  <endif>)
-%>
-
-/** expr is a string expression that says what template to load */
-rewriteIndirectTemplate(expr,args) ::= <%
-templateLib.getInstanceOf(<expr><if(args)>,
-  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
-  <endif>)
-%>
-
-/** Invoke an inline template with a set of attribute name/value pairs */
-rewriteInlineTemplate(args, template) ::= <%
-new StringTemplate(templateLib, "<template>"<if(args)>,
-  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
-  <endif>)
-%>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-<action>
->>
-
-/** An action has %st.attrName=expr; or %{st}.attrName=expr; */
-actionSetAttribute(st,attrName,expr) ::= <<
-(<st>).setAttribute("<attrName>",<expr>);
->>
-
-/** Translate %{stringExpr} */
-actionStringConstructor(stringExpr) ::= <<
-new StringTemplate(templateLib,<stringExpr>)
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/JavaScript.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/JavaScript.stg
deleted file mode 100755
index e0adab4..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/JavaScript.stg
+++ /dev/null
@@ -1,1333 +0,0 @@
-group JavaScript;
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
-           docComment, recognizer,
-           name, tokens, tokenNames, rules, cyclicDFAs,
-       bitsets, buildTemplate, buildAST, rewriteMode, profile,
-       backtracking, synpreds, memoize, numRules,
-       fileName, ANTLRVersion, generatedTimestamp, trace,
-       scopes, superClass, literals) ::=
-<<
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-<actions.(actionScope).header>
-
-<@imports>
-<if(TREE_PARSER)>
-<endif>
-<@end>
-
-<docComment>
-<recognizer>
->>
-
-lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
-      filterMode, superClass="org.antlr.runtime.Lexer") ::= <<
-var <grammar.recognizerName> = function(input, state<grammar.delegators:{g|, <g:delegateName()>}>) {
-// alternate constructor @todo
-// public <grammar.recognizerName>(CharStream input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
-// public <grammar.recognizerName>(CharStream input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
-    if (!state) {
-        state = new org.antlr.runtime.RecognizerSharedState();
-    }
-
-    (function(){
-        <actions.lexer.members>
-    }).call(this);
-
-    <cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new <grammar.recognizerName>.DFA<dfa.decisionNumber>(this);}; separator="\n">
-    <grammar.recognizerName>.superclass.constructor.call(this, input, state);
-    <if(memoize)>
-    <if(grammar.grammarIsRoot)>
-    this.state.ruleMemo = {};
-    <endif>
-    <endif>
-
-    <grammar.directDelegates:
-       {g|this.<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
-    <grammar.delegators:
-       {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
-    <last(grammar.delegators):{g|this.gParent = this.<g:delegateName()>;}>
-
-    <actions.lexer.init>
-};
-
-org.antlr.lang.augmentObject(<grammar.recognizerName>, {
-    <tokens:{<it.name>: <it.type>}; separator=",\n">
-});
-
-(function(){
-var HIDDEN = org.antlr.runtime.Token.HIDDEN_CHANNEL,
-    EOF = org.antlr.runtime.Token.EOF;
-org.antlr.lang.extend(<grammar.recognizerName>, <@superClassName><superClass><@end>, {
-    <tokens:{<it.name> : <it.type>,}; separator="\n">
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-    getGrammarFileName: function() { return "<fileName>"; }
-});
-org.antlr.lang.augmentObject(<grammar.recognizerName>.prototype, {
-<if(filterMode)>
-    <filteringNextToken()>
-<endif>
-    <rules; separator=",\n\n">
-
-    <synpreds:{p | <lexerSynpred(p)>}; separator=",\n">
-}, true); // important to pass true to overwrite default implementations
-
-<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-})();
->>
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-nextToken: function() {
-    while (true) {
-        if ( this.input.LA(1)==org.antlr.runtime.CharStream.EOF ) {
-            return org.antlr.runtime.Token.EOF_TOKEN;
-        }
-        this.state.token = null;
-        this.state.channel = org.antlr.runtime.Token.DEFAULT_CHANNEL;
-        this.state.tokenStartCharIndex = this.input.index();
-        this.state.tokenStartCharPositionInLine = this.input.getCharPositionInLine();
-        this.state.tokenStartLine = this.input.getLine();
-        this.state.text = null;
-        try {
-            var m = this.input.mark();
-            this.state.backtracking=1; <! means we won't throw slow exception !>
-            this.state.failed=false;
-            this.mTokens();
-            this.state.backtracking=0;
-            <! mTokens backtracks with synpred at backtracking==2
-               and we set the synpredgate to allow actions at level 1. !>
-            if ( this.state.failed ) {
-                this.input.rewind(m);
-                this.input.consume(); <! advance one char and try again !>
-            }
-            else {
-                this.emit();
-                return this.state.token;
-            }
-        }
-        catch (re) {
-            // shouldn't happen in backtracking mode, but...
-            if (re instanceof org.antlr.runtime.RecognitionException) {
-                this.reportError(re);
-                this.recover(re);
-            } else {
-                throw re;
-            }
-        }
-    }
-},
-
-memoize: function(input, ruleIndex, ruleStartIndex) {
-    if (this.state.backtracking>1) {
-        <grammar.recognizerName>.superclass.prototype.memoize.call(this, input, ruleIndex, ruleStartIndex);
-    }
-},
-
-alreadyParsedRule: function(input, ruleIndex) {
-    if (this.state.backtracking>1) {
-        return <grammar.recognizerName>.superclass.prototype.alreadyParsedRule.call(this, input, ruleIndex);
-    }
-    return false;
-},
-
-
->>
-
-actionGate() ::= "this.state.backtracking===0"
-
-filteringActionGate() ::= "this.state.backtracking===1"
-
-/** How to generate a parser */
-genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass,
-              ASTLabelType="Object", labelType, members, rewriteElementType) ::= <<
-<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-var <grammar.recognizerName> = function(input, state<grammar.delegators:{g|, <g:delegateName()>}>) {
-    if (!state) {
-        state = new org.antlr.runtime.RecognizerSharedState();
-    }
-
-    (function(){
-        <members>
-    }).call(this);
-
-    <grammar.recognizerName>.superclass.constructor.call(this, input, state);
-
-    <cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new <grammar.recognizerName>.DFA<dfa.decisionNumber>(this);}; separator="\n">
-
-        <parserCtorBody()>
-        <grammar.directDelegates:
-         {g|this.<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
-         <grammar.indirectDelegates:{g | this.<g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
-         <last(grammar.delegators):{g|this.gParent = this.<g:delegateName()>;}>
-
-    /* @todo only create adaptor if output=AST */
-    this.adaptor = new org.antlr.runtime.tree.CommonTreeAdaptor();<\n>
-};
-
-org.antlr.lang.augmentObject(<grammar.recognizerName>, {
-    <tokens:{<it.name>: <it.type>}; separator=",\n">
-});
-
-(function(){
-// public class variables
-var <tokens:{<it.name>= <it.type>}; separator=",\n    ">;
-<if(TREE_PARSER)>
-var UP = org.antlr.runtime.Token.UP,
-    DOWN = org.antlr.runtime.Token.DOWN;
-<endif>
-
-
-// public instance methods/vars
-org.antlr.lang.extend(<grammar.recognizerName>, org.antlr.runtime.<@superClassName><superClass><@end>, {
-    <@members>
-    <@end>
-    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-
-    getTokenNames: function() { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; },
-    getGrammarFileName: function() { return "<fileName>"; }
-});
-org.antlr.lang.augmentObject(<grammar.recognizerName>.prototype, {
-
-    <rules; separator=",\n\n">
-
-<! generate rule/method definitions for imported rules so they
-   appear to be defined in this recognizer. !>
-    // Delegated rules
-<grammar.delegatedRules:{ruleDescriptor|
-    , <ruleDescriptor.name>: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) \{ <if(ruleDescriptor.hasReturnValue)>return <endif>this.<ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); \}}>
-
-
-
-    <synpreds:{p | <synpred(p)>}; separator=",\n">
-
-}, true); // important to pass true to overwrite default implementations
-
-<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-// public class variables
-org.antlr.lang.augmentObject(<grammar.recognizerName>, {
-<if(grammar.grammarIsRoot)>
-    tokenNames: ["\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">],<\n>
-<endif>
-    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
-                    words64=it.bits); separator=",\n">
-});
-
-})();
->>
-
-parserCtorBody() ::= <<
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-this.state.ruleMemo = {};<\n> <! index from 1..n !>
-<endif>
-<endif>
-<grammar.delegators:
- {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
->>
-
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType="Object", superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="TokenStream", rewriteElementType="Token", ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="var", superClass="tree.TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
-<genericParser(inputStreamType="TreeNodeStream", rewriteElementType="Node", ...)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-// $ANTLR start "<ruleName>"
-<ruleName>_fragment: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) {
-<if(trace)>
-    this.traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
-    try {
-        <block>
-    }
-    finally {
-        this.traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
-    }
-<else>
-    <block>
-<endif>
-},
-// $ANTLR end "<ruleName>"
->>
-
-synpred(name) ::= <<
-<name>: function() {
-    this.state.backtracking++;
-    <@start()>
-    var start = this.input.mark();
-    try {
-        this.<name>_fragment(); // can never throw exception
-    } catch (re) {
-        alert("impossible: "+re.toString());
-    }
-    var success = !this.state.failed;
-    this.input.rewind(start);
-    <@stop()>
-    this.state.backtracking--;
-    this.state.failed=false;
-    return success;
-}
->>
-
-lexerSynpred(name) ::= <<
-<synpred(name)>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if ( this.state.backtracking>0 && this.alreadyParsedRule(this.input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (this.state.failed) return <ruleReturnValue()>;<endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>if (this.state.backtracking>0) {this.state.failed=true; return <ruleReturnValue()>;}<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-<returnScope(scope=ruleDescriptor.returnScope)>
-
-// <fileName>:<description>
-// $ANTLR start "<ruleName>"
-<ruleDescriptor.actions.decorate>
-<ruleName>: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) {
-    <if(trace)>this.traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    <ruleLabelDefs()>
-    <ruleDescriptor.actions.init>
-    <@preamble()>
-    try {
-        <ruleMemoization(name=ruleName)>
-        <block>
-        <ruleCleanUp()>
-        <(ruleDescriptor.actions.after):execAction()>
-    }
-<if(exceptions)>
-    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-    <actions.(actionScope).rulecatch>
-<else>
-    catch (re) {
-        if (re instanceof org.antlr.runtime.RecognitionException) {
-            this.reportError(re);
-            this.recover(this.input,re);
-            <@setErrorReturnValue()>
-        } else {
-            throw re;
-        }
-    }<\n>
-<endif>
-<endif>
-<endif>
-    finally {
-        <if(trace)>this.traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <memoize()>
-        <ruleScopeCleanUp()>
-        <finally>
-    }
-    <@postamble()>
-    return <ruleReturnValue()>;
-}
->>
-
-catch(decl,action) ::= <<
-catch (<e.decl>) {
-    <e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-var retval = new <returnType()>();
-retval.start = this.input.LT(1);<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-var <a.name> = <if(a.initValue)><a.initValue><else>null<endif>;
-}>
-<endif>
-<if(memoize)>
-var <ruleDescriptor.name>_StartIndex = this.input.index();
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{this.<it>_stack.push({});}; separator="\n">
-<ruleDescriptor.ruleScope:{this.<it.name>_stack.push({});}; separator="\n">
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{this.<it>_stack.pop();}; separator="\n">
-<ruleDescriptor.ruleScope:{this.<it.name>_stack.pop();}; separator="\n">
->>
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
-    :{var <it.label.text> = null;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
-    :{var list_<it.label.text>=null;}; separator="\n"
->
-<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
-<ruleDescriptor.ruleListLabels:{ll|var <ll.label.text> = null;}; separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{var <it.label.text>=null;}; separator="\n"
->
-<ruleDescriptor.charLabels:{var <it.label.text>;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{var list_<it.label.text>=null;}; separator="\n"
->
->>
-
-ruleReturnValue() ::= <<
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-retval
-<endif>
-<endif>
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-retval.stop = this.input.LT(-1);<\n>
-<endif>
-<endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if ( this.state.backtracking>0 ) { this.memoize(this.input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-// $ANTLR start <ruleName>
-m<ruleName>: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>)  {
-    <if(trace)>this.traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    try {
-<if(nakedBlock)>
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block><\n>
-<else>
-        var _type = this.<ruleName>;
-        var _channel = org.antlr.runtime.BaseRecognizer.DEFAULT_TOKEN_CHANNEL;
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block>
-        <ruleCleanUp()>
-        this.state.type = _type;
-        this.state.channel = _channel;
-        <(ruleDescriptor.actions.after):execAction()>
-<endif>
-    }
-    finally {
-        <if(trace)>this.traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <ruleScopeCleanUp()>
-        <memoize()>
-    }
-},
-// $ANTLR end "<ruleName>"
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-mTokens: function() {
-    <block><\n>
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-var alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-<@prebranch()>
-switch (alt<decisionNumber>) {
-    <alts:altSwitchCase()>
-}
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-var alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-switch (alt<decisionNumber>) {
-    <alts:altSwitchCase()>
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-var cnt<decisionNumber>=0;
-<decls>
-<@preloop()>
-loop<decisionNumber>:
-do {
-    var alt<decisionNumber>=<maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) {
-    <alts:altSwitchCase()>
-    default :
-        if ( cnt<decisionNumber> >= 1 ) {
-            break loop<decisionNumber>;
-        }
-        <ruleBacktrackFailure()>
-            var eee = new org.antlr.runtime.EarlyExitException(<decisionNumber>, this.input);
-            <@earlyExitException()>
-            throw eee;
-    }
-    cnt<decisionNumber>++;
-} while (true);
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@preloop()>
-loop<decisionNumber>:
-do {
-    var alt<decisionNumber>=<maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) {
-    <alts:altSwitchCase()>
-    default :
-        break loop<decisionNumber>;
-    }
-} while (true);
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase() ::= <<
-case <i> :
-    <@prealt()>
-    <it>
-    break;<\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
-// <fileName>:<description>
-<! (function() { /* @todo4 (do we really need a new scope?) */ !>
-<@declarations()>
-<elements:element()>
-<rew>
-<@cleanup()>
-<! }).call(this); !>
->>
-
-/** What to emit when there is no rewrite.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element() ::= <<
-<@prematch()>
-<it.el><\n>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)><label>=<endif>this.match(this.input,<token>,<grammar.recognizerName>.FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-listLabel(label,elem) ::= <<
-if (org.antlr.lang.isNull(list_<label>)) list_<label> = [];
-list_<label>.push(<elem>);<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> = this.input.LA(1);<\n>
-<endif>
-this.match(<char>); <checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> = this.input.LA(1);<\n>
-<endif>
-this.matchRange(<a>,<b>); <checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,postmatchCode="") ::= <<
-<if(label)>
-<if(LEXER)>
-<label>= this.input.LA(1);<\n>
-<else>
-<label>=this.input.LT(1);<\n>
-<endif>
-<endif>
-if ( <s> ) {
-    this.input.consume();
-    <postmatchCode>
-<if(!LEXER)>
-    this.state.errorRecovery=false;
-<endif>
-    <if(backtracking)>this.state.failed=false;<endif>
-}
-else {
-    <ruleBacktrackFailure()>
-    var mse = new org.antlr.runtime.MismatchedSetException(null,this.input);
-    <@mismatchedSetException()>
-<if(LEXER)>
-    this.recover(mse);
-    throw mse;
-<else>
-    throw mse;
-    <! use following code to make it recover inline; remove throw mse;
-    this.recoverFromMismatchedSet(this.input,mse,<grammar.recognizerName>.FOLLOW_set_in_<ruleName><elementIndex>);
-    !>
-<endif>
-}<\n>
->>
-
-matchRuleBlockSet ::= matchSet
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex) ::= <<
-<if(label)>
-var <label>Start = this.getCharIndex();
-this.match(<string>); <checkRuleBacktrackFailure()>
-var <label> = new org.antlr.runtime.CommonToken(this.input, org.antlr.runtime.Token.INVALID_TOKEN_TYPE, org.antlr.runtime.Token.DEFAULT_CHANNEL, <label>Start, this.getCharIndex()-1);
-<else>
-this.match(<string>); <checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-wildcard(label,elementIndex) ::= <<
-<if(label)>
-<label>=this.input.LT(1);<\n>
-<endif>
-this.matchAny(this.input); <checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(label,elementIndex) ::= <<
-<wildcard(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> = this.input.LA(1);<\n>
-<endif>
-this.matchAny(); <checkRuleBacktrackFailure()>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabel(elem=label,...)>
->>
-
-// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.
- */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-this.pushFollow(<grammar.recognizerName>.FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
-<if(label)><label>=<endif>this.<if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
-this.state._fsp--;
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** A lexer rule reference */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
-<if(label)>
-var <label>Start<elementIndex> = this.getCharIndex();
-this.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<label> = new org.antlr.runtime.CommonToken(this.input, org.antlr.runtime.Token.INVALID_TOKEN_TYPE, org.antlr.runtime.Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, this.getCharIndex()-1);
-<else>
-this.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-var <label>Start<elementIndex> = this.getCharIndex();
-this.match(EOF); <checkRuleBacktrackFailure()>
-var <label> = new org.antlr.runtime.CommonToken(this.input, this.EOF, org.antlr.runtime.Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, this.getCharIndex()-1);
-<else>
-this.match(this.EOF); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-// used for left-recursive rules
-recRuleDefArg()                       ::= "int <recRuleArg()>"
-recRuleArg()                          ::= "_p"
-recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
-recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
-recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList,enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( this.input.LA(1)==org.antlr.runtime.Token.DOWN ) {
-    this.match(this.input, org.antlr.runtime.Token.DOWN, null); <checkRuleBacktrackFailure()>
-    <children:element()>
-    this.match(this.input, org.antlr.runtime.Token.UP, null); <checkRuleBacktrackFailure()>
-}
-<else>
-this.match(this.input, org.antlr.runtime.Token.DOWN, null); <checkRuleBacktrackFailure()>
-<children:element()>
-this.match(this.input, org.antlr.runtime.Token.UP, null); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if ( !(<evalPredicate(...)>) ) {
-    <ruleBacktrackFailure()>
-    throw new org.antlr.runtime.FailedPredicateException(this.input, "<ruleName>", "<description>");
-}
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-var LA<decisionNumber>_<stateNumber> = this.input.LA(<k>);<\n>
-<edges; separator="\nelse ">
-else {
-<if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    var nvae =
-        new org.antlr.runtime.NoViableAltException("<description>", <decisionNumber>, <stateNumber>, this.input);<\n>
-    <@noViableAltException()>
-    throw nvae;<\n>
-<endif>
-}
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-var LA<decisionNumber>_<stateNumber> = this.input.LA(<k>);<\n>
-<edges; separator="\nelse ">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-var LA<decisionNumber>_<stateNumber> = this.input.LA(<k>);<\n>
-<edges; separator="\nelse "><\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
-<else>
-else {
-    alt<decisionNumber>=<eotPredictsAlt>;
-}<\n>
-<endif>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
-    <targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( this.input.LA(<k>) ) {
-<edges; separator="\n">
-default:
-<if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    var nvae =
-        new org.antlr.runtime.NoViableAltException("<description>", <decisionNumber>, <stateNumber>, this.input);<\n>
-    <@noViableAltException()>
-    throw nvae;<\n>
-<endif>
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( this.input.LA(<k>) ) {
-    <edges; separator="\n">
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( this.input.LA(<k>) ) {
-<edges; separator="\n"><\n>
-<if(eotPredictsAlt)>
-default:
-    alt<decisionNumber>=<eotPredictsAlt>;
-    break;<\n>
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{case <it>:}; separator="\n">
-    <targetState>
-    break;
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-alt<decisionNumber> = this.dfa<decisionNumber>.predict(this.input);
->>
-
-/* Dump DFA tables as run-length-encoded Strings of octal values.
- * Can't use hex as compiler translates them before compilation.
- * These strings are split into multiple, concatenated strings.
- * Java puts them back together at compile time thankfully.
- * Java cannot handle large static arrays, so we're stuck with this
- * encode/decode approach.  See analysis and runtime DFA for
- * the encoding methods.
- */
-cyclicDFA(dfa) ::= <<
-org.antlr.lang.augmentObject(<grammar.recognizerName>, {
-    DFA<dfa.decisionNumber>_eotS:
-        "<dfa.javaCompressedEOT; wrap="\"+\n    \"">",
-    DFA<dfa.decisionNumber>_eofS:
-        "<dfa.javaCompressedEOF; wrap="\"+\n    \"">",
-    DFA<dfa.decisionNumber>_minS:
-        "<dfa.javaCompressedMin; wrap="\"+\n    \"">",
-    DFA<dfa.decisionNumber>_maxS:
-        "<dfa.javaCompressedMax; wrap="\"+\n    \"">",
-    DFA<dfa.decisionNumber>_acceptS:
-        "<dfa.javaCompressedAccept; wrap="\"+\n    \"">",
-    DFA<dfa.decisionNumber>_specialS:
-        "<dfa.javaCompressedSpecial; wrap="\"+\n    \"">}>",
-    DFA<dfa.decisionNumber>_transitionS: [
-            <dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
-    ]
-});
-
-org.antlr.lang.augmentObject(<grammar.recognizerName>, {
-    DFA<dfa.decisionNumber>_eot:
-        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_eotS),
-    DFA<dfa.decisionNumber>_eof:
-        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_eofS),
-    DFA<dfa.decisionNumber>_min:
-        org.antlr.runtime.DFA.unpackEncodedStringToUnsignedChars(<grammar.recognizerName>.DFA<dfa.decisionNumber>_minS),
-    DFA<dfa.decisionNumber>_max:
-        org.antlr.runtime.DFA.unpackEncodedStringToUnsignedChars(<grammar.recognizerName>.DFA<dfa.decisionNumber>_maxS),
-    DFA<dfa.decisionNumber>_accept:
-        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_acceptS),
-    DFA<dfa.decisionNumber>_special:
-        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_specialS),
-    DFA<dfa.decisionNumber>_transition: (function() {
-        var a = [],
-            i,
-            numStates = <grammar.recognizerName>.DFA<dfa.decisionNumber>_transitionS.length;
-        for (i=0; i\<numStates; i++) {
-            a.push(org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_transitionS[i]));
-        }
-        return a;
-    })()
-});
-
-<grammar.recognizerName>.DFA<dfa.decisionNumber> = function(recognizer) {
-    this.recognizer = recognizer;
-    this.decisionNumber = <dfa.decisionNumber>;
-    this.eot = <grammar.recognizerName>.DFA<dfa.decisionNumber>_eot;
-    this.eof = <grammar.recognizerName>.DFA<dfa.decisionNumber>_eof;
-    this.min = <grammar.recognizerName>.DFA<dfa.decisionNumber>_min;
-    this.max = <grammar.recognizerName>.DFA<dfa.decisionNumber>_max;
-    this.accept = <grammar.recognizerName>.DFA<dfa.decisionNumber>_accept;
-    this.special = <grammar.recognizerName>.DFA<dfa.decisionNumber>_special;
-    this.transition = <grammar.recognizerName>.DFA<dfa.decisionNumber>_transition;
-};
-
-org.antlr.lang.extend(<grammar.recognizerName>.DFA<dfa.decisionNumber>, org.antlr.runtime.DFA, {
-    getDescription: function() {
-        return "<dfa.description>";
-    },
-    <@errorMethod()>
-<if(dfa.specialStateSTs)>
-    specialStateTransition: function(s, input) {
-        var _s = s;
-        /* bind to recognizer so semantic predicates can be evaluated */
-        var retval = (function(s, input) {
-            switch ( s ) {
-            <dfa.specialStateSTs:{state |
-            case <i0> : <! compressed special state numbers 0..n-1 !>
-                <state>}; separator="\n">
-            }
-        }).call(this.recognizer, s, input);
-        if (!org.antlr.lang.isUndefined(retval)) {
-            return retval;
-        }
-<if(backtracking)>
-        if (this.recognizer.state.backtracking>0) {this.recognizer.state.failed=true; return -1;}<\n>
-<endif>
-        var nvae =
-            new org.antlr.runtime.NoViableAltException(this.getDescription(), <dfa.decisionNumber>, _s, input);
-        this.error(nvae);
-        throw nvae;
-    },<\n>
-<endif>
-    dummy: null
-});<\n>
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-var LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
-<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-var index<decisionNumber>_<stateNumber> = input.index();
-input.rewind();<\n>
-<endif>
-s = -1;
-<edges; separator="\nelse ">
-<if(semPredState)> <! return input cursor to state before we rewound !>
-input.seek(index<decisionNumber>_<stateNumber>);<\n>
-<endif>
-if ( s>=0 ) return s;
-break;
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-s = <targetStateNumber>;<\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "(<left>&&<right>)"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
-
-notPredicate(pred) ::= "!(<evalPredicate(...)>)"
-
-evalPredicate(pred,description) ::= "(<pred>)"
-
-evalSynPredicate(pred,description) ::= "this.<pred>()"
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "this.input.LA(<k>)==<atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
-(LA<decisionNumber>_<stateNumber>\>=<lower> && LA<decisionNumber>_<stateNumber>\<=<upper>)
->>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(this.input.LA(<k>)\>=<lower> && this.input.LA(<k>)\<=<upper>)"
-
-setTest(ranges) ::= "<ranges; separator=\"||\">"
-
-// A T T R I B U T E S
-
-globalAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-<scope.name>_stack: [],<\n>
-<endif>
->>
-
-ruleAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-<scope.name>_stack: [],<\n>
-<endif>
->>
-
-returnStructName() ::= "<it.name>_return"
-
-returnType() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-/** Generate the Java type associated with a single or multiple return
- *  values.
- */
-ruleLabelType(referencedRule) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-<referencedRule.grammar.recognizerName>.<referencedRule.name>_return
-<else>
-<if(referencedRule.hasSingleReturnValue)>
-<referencedRule.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-delegateName() ::= <<
-<if(it.label)><it.label><else>g<it.name><endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
- */
-initValue(typeName) ::= <<
-null
->>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= <<
-<!<ruleLabelType(referencedRule=label.referencedRule)>!> var <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;
->>
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScope(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-// inline static return class
-<ruleDescriptor:returnStructName()>: (function() {
-    <returnType()> = function(){};
-    org.antlr.lang.extend(<returnType()>,
-                      org.antlr.runtime.<if(TREE_PARSER)>tree.Tree<else>Parser<endif>RuleReturnScope,
-    {
-        <@ruleReturnMembers()>
-    });
-    return;
-})(),
-<endif>
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{<it.decl>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <<
-<if(negIndex)>
-(this.<scope>_stack[this.<scope>_stack.length-<negIndex>-1]).<attr.name>
-<else>
-<if(index)>
-(this.<scope>_stack[<index>]).<attr.name>
-<else>
-org.antlr.lang.array.peek(this.<scope>_stack).<attr.name>
-<endif>
-<endif>
->>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
-<if(negIndex)>
-(this.<scope>_stack[this.<scope>_stack.length-<negIndex>-1]).<attr.name> =<expr>;
-<else>
-<if(index)>
-(this.<scope>_stack[<index>]).<attr.name> =<expr>;
-<else>
-org.antlr.lang.array.peek(this.<scope>_stack).<attr.name> =<expr>;
-<endif>
-<endif>
->>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "this.<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-(<scope>!==null?<scope>.<attr.name>:<initValue(attr.type)>)
-<else>
-<scope>
-<endif>
->>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name>
-<else>
-<attr.name>
-<endif>
->>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name> =<expr>;
-<else>
-<attr.name> =<expr>;
-<endif>
->>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach
-
-tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>?<scope>.getText():null)"
-tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>?<scope>.getType():0)"
-tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>?<scope>.getLine():0)"
-tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>?<scope>.getCharPositionInLine():0)"
-tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>?<scope>.getChannel():0)"
-tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>?<scope>.getTokenIndex():0)"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>?parseInt(<scope>.getText(), 10):0)"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>?<scope>.start:null)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>?<scope>.stop:null)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>?<scope>.tree:null)"
-ruleLabelPropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-(<scope>?(this.input.getTokenStream().toString(
-  this.input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
-  this.input.getTreeAdaptor().getTokenStopIndex(<scope>.start))):null)
-<else>
-(<scope>?this.input.toString(<scope>.start,<scope>.stop):null)
-<endif>
->>
-
-ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "(<scope>?<scope>.getType():0)"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "(<scope>?<scope>.getLine():0)"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(<scope>?<scope>.getCharPositionInLine():-1)"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(<scope>?<scope>.getChannel():0)"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "(<scope>?<scope>.getTokenIndex():0)"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "(<scope>?<scope>.getText():0)"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "(retval.start)"
-rulePropertyRef_stop(scope,attr) ::= "(retval.stop)"
-rulePropertyRef_tree(scope,attr) ::= "(retval.tree)"
-rulePropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-this.input.getTokenStream().toString(
-  this.input.getTreeAdaptor().getTokenStartIndex(retval.start),
-  this.input.getTreeAdaptor().getTokenStopIndex(retval.start))
-<else>
-this.input.toString(retval.start,this.input.LT(-1))
-<endif>
->>
-rulePropertyRef_st(scope,attr) ::= "retval.st"
-
-lexerRulePropertyRef_text(scope,attr) ::= "this.getText()"
-lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "this.state.tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "this.state.tokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
-lexerRulePropertyRef_start(scope,attr) ::= "this.state.tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(this.getCharIndex()-1)"
-lexerRulePropertyRef_int(scope,attr) ::= "parseInt(<scope>.getText(),10)"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
-
-
-/** How to execute an action */
-execAction(action) ::= <<
-<if(backtracking)>
-if ( <actions.(actionScope).synpredgate> ) {
-  <action>
-}
-<else>
-<action>
-<endif>
->>
-
-/** How to always execute an action even when backtracking */
-execForcedAction(action) ::= "<action>"
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-<! @todo overflow issue !>
-<name>: new org.antlr.runtime.BitSet([<words64:{<it>};separator=",">])
->>
-
-codeFileExtension() ::= ".js"
-
-true() ::= "true"
-false() ::= "false"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/AST.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/AST.stg
deleted file mode 100644
index 4e9215e..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/AST.stg
+++ /dev/null
@@ -1,563 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2006, 2007 Kay Roepke 2010 Alan Condit
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-@genericParserHeaderFile.memVars() ::= <<
-/* AST parserHeaderFile.memVars */
-NSInteger ruleLevel;
-NSArray *ruleNames;
-<@super.memVars()>  /* AST super.memVars */
-<parserMemVars()>   /* AST parserMemVars */
->>
-
-@genericParserHeaderFile.properties() ::= <<
-/* AST parserHeaderFile.properties */
-<@super.properties()>  /* AST super.properties */
-<parserProperties()>   /* AST parserproperties */
->>
-
-@genericParserHeaderFile.methodsDecl() ::= <<
-/* AST parserHeaderFile.methodsDecl */
-<@super.methodsDecl()>  /* AST super.methodsDecl */
-<parserMethodsDecl()>   /* AST parsermethodsDecl */
->>
-
-@genericParser.synthesize() ::= <<
-/* AST genericParser.synthesize */
-<@super.synthesize()>
-<parserSynthesize()>
->>
-
-@genericParser.methods() ::= <<
-/* AST genericParser.methods */
-<@super.methods()>
-<parserMethods()>
->>
-
-/* additional init code for tree support */
-@genericParser.init() ::= <<
-/* AST genericParser.init */
-<@super.init()>
-[self setTreeAdaptor:[[ANTLRCommonTreeAdaptor newTreeAdaptor] retain]];
->>
-
-@genericParser.dealloc() ::= <<
-/* AST genericParser.dealloc */
-[self setTreeAdaptor:nil];
-<@super.dealloc()>
->>
-
-/* Add an adaptor property that knows how to build trees */
-parserMemVars() ::= <<
-/* AST parserMemVars */
-id\<ANTLRTreeAdaptor> treeAdaptor;
->>
-
-/* Add an adaptor property that knows how to build trees */
-parserProperties() ::= <<
-/* AST parserProperties */
-@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id\<ANTLRTreeAdaptor> treeAdaptor;
->>
-
-/** Declaration of additional tree support methods - go in interface of parserHeaderFile() */
-parserMethodsDecl() ::= <<
-/* AST parserMethodsDecl */
-- (id\<ANTLRTreeAdaptor>) getTreeAdaptor;
-- (void) setTreeAdaptor:(id\<ANTLRTreeAdaptor>)theTreeAdaptor;
->>
-
-/* Add an adaptor property that knows how to build trees */
-parserSynthesize() ::= <<
-/* AST parserProperties */
-@synthesize treeAdaptor;
->>
-
-/** Definition of addition tree support methods - go in implementation of genericParser() */
-parserMethods() ::= <<
-/* AST parserMethods */
-- (id\<ANTLRTreeAdaptor>) getTreeAdaptor
-{
-	return treeAdaptor;
-}
-
-- (void) setTreeAdaptor:(id\<ANTLRTreeAdaptor>)aTreeAdaptor
-{
-	if (aTreeAdaptor != treeAdaptor) {
-		treeAdaptor = aTreeAdaptor;
-	}
-}
->>
-
-/** addition memVars for returnscopes */
-@returnScopeInterface.memVars() ::= <<
-/* AST returnScopeInterface.memVars */
-<recognizer.ASTLabelType; null="ANTLRCommonTree"> *tree;
->>
-
-/** the interface of returnScope properties */
-@returnScopeInterface.properties() ::= <<
-/* AST returnScopeInterface.properties */
-@property (retain, getter=getTree, setter=setTree:) <recognizer.ASTLabelType; null="ANTLRCommonTree"> *tree;
->>
-
-/** the interface of returnScope methodsDecl */
-@returnScopeInterface.methodsDecl() ::= <<
-/* AST returnScopeInterface.methodsDecl */
-- (<recognizer.ASTLabelType; null="ANTLRCommonTree"> *)getTree;<\n>
-- (void) setTree:(<recognizer.ASTLabelType; null="ANTLRCommonTree"> *)aTree;<\n>
->>
-
-/** the implementation of returnScope synthesize */
-@returnScopeImplementation.synthesize() ::= <<
-/* AST returnScope.synthesize */
-@synthesize tree;
->>
-
-/** the implementation of returnScope methods */
-@returnScopeImplementation.methods() ::= <<
-/* AST returnScope.methods */
-- (<ASTLabelType> *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(<ASTLabelType> *)aTree
-{
-    if (tree != aTree) {
-        if ( tree ) [tree release];
-        if ( aTree ) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    self.tree = nil;
-    [super dealloc];
-}
-
->>
-
-/** Add a variable to track rule's return AST */
-ruleDeclarations() ::= <<
-/* AST ruleDeclarations */
-<super.ruleDeclarations()>
-<ASTLabelType> *root_0 = nil;<\n>
->>
-
-ruleLabelDefs() ::= <<
-/* AST ruleLabelDefs */
-<super.ruleLabelDefs()>
-<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
-  ruleDescriptor.wildcardTreeListLabels]:{it | <ASTLabelType> *<it.label.text>_tree=nil;}; separator="\n">
-<ruleDescriptor.tokenListLabels:{it | <ASTLabelType> *<it.label.text>_tree = nil;}; separator="\n">
-<ruleDescriptor.allTokenRefsInAltsWithRewrites:{it | ANTLRRewriteRuleTokenStream *stream_<it> =
-    [[ANTLRRewriteRule<rewriteElementType>Stream newANTLRRewriteRule<rewriteElementType>Stream:treeAdaptor
-                                                     description:@"token <it>"] retain];}; separator="\n">
-<ruleDescriptor.allRuleRefsInAltsWithRewrites:{it | ANTLRRewriteRuleSubtreeStream *stream_<it> =
-    [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-                                                        description:@"rule <it>"] retain];}; separator="\n">
->>
-
-ruleCleanUp() ::= <<
-/* AST ruleCleanUp */
-<super.ruleCleanUp()>
-<[ruleDescriptor.allTokenRefsInAltsWithRewrites,ruleDescriptor.allRuleRefsInAltsWithRewrites]:{it | [stream_<it> release];}; separator="\n">
-<!
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(backtracking)>if ( state.backtracking == 0 ) {<\n>
-<endif>
-    [<prevRuleRootRef()> setTree:(<ASTLabelType> *)[treeAdaptor rulePostProcessing:root_0]];<\n>
-    [treeAdaptor setTokenBoundaries:[<prevRuleRootRef()> getTree]
-                               From:[<prevRuleRootRef()> getStart]
-                                 To:[<prevRuleRootRef()> getStop]];<\n>
-<if(backtracking)>}<\n>
-<endif>
-<endif>
-[root_0 release];
-!>
->>
-
-rewriteCodeLabelsCleanUp() ::= <<
-/* AST rewriteCodeLabelsCleanUp */
- <referencedTokenLabels:{it | [stream_<it> release];}; separator="\n">
- <referencedTokenListLabels:{it | [stream_<it> release];}; separator="\n">
- <referencedRuleLabels:{it | [stream_<it> release];}; separator="\n">
- <referencedRuleListLabels:{it | [stream_<it> release];}; separator="\n">
->>
-
-/** When doing auto AST construction, we must define some variables;
- *  These should be turned off if doing rewrites.  This must be a "mode"
- *  as a rule could have both rewrite and AST within the same alternative
- *  block.
- */
-@alt.declarations() ::= <<
-<if(autoAST)>
-<if(outerAlt)>
-<if(!rewriteMode)>
-root_0 = (<ASTLabelType> *)[[[treeAdaptor class] newEmptyTree] retain];<\n>
-<endif>
-<endif>
-<endif>
->>
-
-// T r a c k i n g  R u l e  E l e m e n t s
-
-/** ID and track it for use in a rewrite rule */
-tokenRefTrack(token,label,elementIndex) ::= <<
-<! <super.tokenRef(...)> !>
-<tokenRefBang(...)> <! Track implies no auto AST construction!>
-<if(backtracking)>
-if ( <actions.(actionScope).synpredgate> ) <endif>
-    [stream_<token> addElement:<label>];<\n>
->>
-
-/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
- *  to the tracking list stream_ID for use in the rewrite.
- */
-tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefTrack(...)>
-<listLabel(elem=label,...)>
->>
-
-/** ^(ID ...) track for rewrite */
-tokenRefRuleRootTrack(token,label,elementIndex) ::= <<
-<! <super.tokenRef(...)> !>
-<tokenRefBang(...)>
-<if(backtracking)>
-if ( !<actions.(actionScope).synpredgate> ) <endif>
-    [stream_<token> addElement:<label>];<\n>
->>
-
-/** Match ^(label+=TOKEN ...) track for rewrite */
-tokenRefRuleRootTrackAndListLabel(token,label,elementIndex) ::= <<
-<tokenRefRuleRootTrack(...)>
-<listLabel(elem=label,...)>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
-[stream_<rule.name> addElement:[<label> getTree]];
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefTrack(...)>
-<listLabel(elem={[<label> getTree]},...)>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-<! <super.ruleRefRuleRoot(...)> !>
-<ruleRefRuleRoot(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
-    [stream_<rule.name> addElement:[<label> getTree]];<\n>
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRootTrack(...)>
-<listLabel(elem={[<label> getTree]},...)>
->>
-
-// R e w r i t e
-
-rewriteCode(
-	alts, description,
-	referencedElementsDeep, // ALL referenced elements to right of ->
-	referencedTokenLabels,
-	referencedTokenListLabels,
-	referencedRuleLabels,
-	referencedRuleListLabels,
-	referencedWildcardLabels,
-	referencedWildcardListLabels,
-	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
-<<
-
-// AST REWRITE
-// elements: <referencedElementsDeep; separator=", ">
-// token labels: <referencedTokenLabels; separator=", ">
-// rule labels: <referencedRuleLabels; separator=", ">
-// token list labels: <referencedTokenListLabels; separator=", ">
-// rule list labels: <referencedRuleListLabels; separator=", ">
-// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
-<if(backtracking)>
-if ( <actions.(actionScope).synpredgate> ) {<\n>
-<endif>
-<prevRuleRootRef()>.tree = root_0;<\n>
-<rewriteCodeLabels()>
-root_0 = (<ASTLabelType> *)[[[treeAdaptor class] newEmptyTree] retain];<\n>
-<alts:rewriteAlt(); separator="else ">
-<! if tree parser and rewrite=true !>
-<if(TREE_PARSER)>
-<if(rewriteMode)>
-<prevRuleRootRef()>.tree = (<ASTLabelType>)[treeAdaptor rulePostProcessing:root_0];
-[input replaceChildren:[treeAdaptor getParent:retval.start]
-                  From:[treeAdaptor getChildIndex:retval.start]
-                    To:[treeAdaptor getChildIndex:_last]
-                  With:retval.tree];
-<endif>
-<endif>
-<! if parser or tree-parser && rewrite!=true, we need to set result !>
-<if(!TREE_PARSER)>
-<prevRuleRootRef()>.tree = root_0;<\n>
-<else>
-<if(!rewriteMode)>
-<prevRuleRootRef()>.tree = root_0;<\n>
-<endif>
-<endif>
-<if(backtracking)>
-}
-<endif>
->>
-
-rewriteCodeLabels() ::= <<
-<referencedTokenLabels
-    :{it | ANTLRRewriteRule<rewriteElementType>Stream *stream_<it> =
-    [[ANTLRRewriteRule<rewriteElementType>Stream newANTLRRewriteRule<rewriteElementType>Stream:treeAdaptor description:@"token <it>" element:<it>] retain];};
-    separator="\n"
->
-<referencedTokenListLabels:{it | ANTLRRewriteRule<rewriteElementType>Stream *stream_<it> =
-    [[ANTLRRewriteRule<rewriteElementType>Stream newANTLRRewriteRule<rewriteElementType>Stream:treeAdaptor
-        description:@"token <it>" elements:list_<it>] retain];};
-    separator="\n"
->
-<referencedWildcardLabels:{it | RewriteRuleSubtreeStream stream_<it> =
-    [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-        description:"wildcard <it>" element:<it>] retain];};
-    separator="\n"
->
-<referencedWildcardListLabels:{it | RewriteRuleSubtreeStream stream_<it> =
-    [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-        descriptor:"wildcard <it>" elements:list_<it>] retain];};
-    separator="\n"
->
-<referencedRuleLabels:{it | ANTLRRewriteRuleSubtreeStream *stream_<it> =
-    [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-        description:@"token <it>" element:<it>!=nil?[<it> getTree]:nil] retain];};
-    separator="\n"
->
-<referencedRuleListLabels:{it | ANTLRRewriteRuleSubtreeStream *stream_<it> =
-    [[ANTLRRewriteRuleSubtreeStream newANTLRRewriteRuleSubtreeStream:treeAdaptor
-        description:@"token <it>" elements:list_<it>] retain];};
-    separator="\n"
->
->>
-
-/** Generate code for an optional rewrite block; note it uses the deep ref'd element
-  *  list rather shallow like other blocks.
-  */
-rewriteOptionalBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-if ( <referencedElementsDeep:{el | [stream_<el> hasNext]}; separator="||"> ) {
-	<alt>
-}
-<referencedElementsDeep:{el | [stream_<el> reset];<\n>}>
->>
-
-rewriteClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-while ( <referencedElements:{el | [stream_<el> hasNext]}; separator="||"> ) {
-    <alt>
-}
-<referencedElements:{el | [stream_<el> reset];<\n>}>
->>
-
-rewritePositiveClosureBlock(
-	alt,rewriteBlockLevel,
-	referencedElementsDeep, // all nested refs
-	referencedElements, // elements in immediately block; no nested blocks
-	description) ::=
-<<
-// <fileName>:<description>
-{
-if ( !(<referencedElements:{el | [stream_<el> hasNext]}; separator=" || ">) ) {
-    @throw [ANTLRRewriteEarlyExitException newException];
-}
-while ( <referencedElements:{el | [stream_<el> hasNext]}; separator=" || "> ) {
-    <alt>
-}
-<referencedElements:{el | [stream_<el> reset];<\n>}>
-}
->>
-
-rewriteAlt(a) ::= <<
-// <a.description>
-<if(a.pred)>
-if (<a.pred>) {
-    <a.alt>
-}<\n>
-<else>
-{
-    <a.alt>
-}<\n>
-<endif>
->>
-
-/** For empty rewrites: "r : ... -> ;" */
-rewriteEmptyAlt() ::= "root_0 = nil;"
-
-rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
-// <fileName>:<description>
-{
-    <ASTLabelType> *root_<treeLevel> = (<ASTLabelType> *)[[[treeAdaptor class] newEmptyTree] retain];
-    <root:rewriteElement()>
-    <children:rewriteElement()>
-    [treeAdaptor addChild:root_<treeLevel> toTree:root_<enclosingTreeLevel>];
-}<\n>
->>
-
-rewriteElementList(elements) ::= "<elements:rewriteElement()>"
-
-rewriteElement(e) ::= <<
-<@pregen()>
-<e.el>
->>
-
-/** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,terminalOptions,args) ::= <<
- // TODO: args: <args; separator=", ">
-[treeAdaptor addChild:<createRewriteNodeFromElement(...)> toTree:root_<treeLevel>];<\n>
->>
-
-/** Gen $label ... where defined via label=ID */
-rewriteTokenLabelRef(label,elementIndex) ::= <<
-[treeAdaptor addChild:[stream_<label> nextNode] toTree:root_<treeLevel>];<\n>
->>
-
-/** Gen $label ... where defined via label+=ID */
-rewriteTokenListLabelRef(label,elementIndex) ::= <<
-[treeAdaptor addChild:[stream_<label> nextNode] toTree:root_<treeLevel>];<\n>
->>
-
-/** Gen ^($label ...) */
-rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:[stream_<label> nextNode] old:root_<treeLevel>];<\n>
->>
-
-/** Gen ^($label ...) where label+=... */
-rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
-
-/** Gen ^(ID ...) or ^(ID[args] ...) */
-rewriteTokenRefRoot(token,elementIndex,terminalOptions,args) ::= <<
-root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:<createRewriteNodeFromElement(...)> old:root_<treeLevel>];<\n>
->>
-
-rewriteImaginaryTokenRef(args,token,terminalOptions,elementIndex) ::= <<
-[treeAdaptor addChild:<createImaginaryNode(tokenType=token, ...)> toTree:root_<treeLevel>];<\n>
->>
-
-rewriteImaginaryTokenRefRoot(args,token,terminalOptions,elementIndex) ::= <<
-root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:<createImaginaryNode(tokenType=token, ...)> old:root_<treeLevel>];<\n>
->>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-root_0 = <action>;<\n>
->>
-
-/** What is the name of the previous value of this rule's root tree?  This
- *  let's us refer to $rule to mean previous value.  I am reusing the
- *  variable 'tree' sitting in retval struct to hold the value of root_0 right
- *  before I set it during rewrites.  The assign will be to retval.tree.
- */
-prevRuleRootRef() ::= "retval"
-
-rewriteRuleRef(rule) ::= <<
-[treeAdaptor addChild:[stream_<rule> nextTree] toTree:root_<treeLevel>];<\n>
->>
-
-rewriteRuleRefRoot(rule) ::= <<
-root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:(id\<ANTLRTree>)[stream_<rule> nextNode] old:root_<treeLevel>];<\n>
->>
-
-rewriteNodeAction(action) ::= <<
-[treeAdaptor addChild:<action> toTree:root_<treeLevel>];<\n>
->>
-
-rewriteNodeActionRoot(action) ::= <<
-root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:<action> old:root_<treeLevel>];<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel=rule */
-rewriteRuleLabelRef(label) ::= <<
-[treeAdaptor addChild:[stream_<label> nextTree] toTree:root_<treeLevel>];<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
-rewriteRuleListLabelRef(label) ::= <<
-[treeAdaptor addChild:[stream_<label> nextTree] toTree:root_<treeLevel>];<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel=rule */
-rewriteRuleLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:[stream_<label> nextNode] old:root_<treeLevel>];<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
-rewriteRuleListLabelRefRoot(label) ::= <<
-root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:[stream_<label> nextNode] old:root_<treeLevel>];<\n>
->>
-
-rewriteWildcardLabelRef(label) ::= <<
-[treeAdaptor addChild:[stream_<label> nextTree] toTree:root_<treeLevel>];<\n>
->>
-
-createImaginaryNode(tokenType,terminalOptions,args) ::= <<
-<if(terminalOptions.node)>
-    [<terminalOptions.node> new<terminalOptions.node>:<tokenType> <if(args)>, <args; separator=", "><endif>]
-<else>
-    <if(args)>
-        [[treeAdaptor createTree:<tokenType> <if(first(args))>FromToken:<first(args)><endif> <if(first(rest(args)))>Text:<first(rest(args))><else>Text:@"<tokenType>"<endif>] retain]
-    <else>
-        [[treeAdaptor createTree:<tokenType> Text:@"<tokenType>"] retain]
-    <endif>
-<endif>
->>
-
-createRewriteNodeFromElement(token,terminalOptions,args) ::= <<
-<if(terminalOptions.node)>
-    [<terminalOptions.node> new<terminalOptions.node>:[stream_<token> nextToken]<if(args)>, <args; separator=", "><endif>]
-<else>
-    <if(args)> <! must create new node from old !>
-        [[treeAdaptor createTree:<token> Text:<first(rest(args))> <args; separator=", ">] retain]
-    <else>
-        [stream_<token> nextNode]
-    <endif>
-<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTDbg.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTDbg.stg
deleted file mode 100644
index 8756fa5..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTDbg.stg
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2006 Kay Roepke 2010 Alan Condit
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
- *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
- */
-parserMembers() ::= <<
-ANTLRDebugTreeAdaptor *adaptor = [ANTLRDebugTreeAdaptor newTreeAdaptor:(id)dbg Adaptor:[ANTLRCommonTreeAdaptor newTreeAdaptor]];
-
-// fix this
-- (void) setTreeAdaptor:(id<ANTLRTreeAdaptor>)anAdaptor
-{
-    adaptor = [ANTLRDebugTreeAdaptor newTreeAdaptor:dbg Adaptor:anAdaptor];
-<if(grammar.grammarIsRoot)>
-    adaptor = [ANTLRDebugTreeAdaptor newTreeAdaptor:adaptor withDBG:dbg];
-<else>
-    adaptor = (ANTLRDebugTreeAdaptor *)adaptor; // delegator sends dbg adaptor
-<endif><\n>
-    <grammar.directDelegates:{g|[<g:delegateName()> setTreeAdaptor:adaptor];}>
-}
-
-- (id<ANTLRTreeAdaptor>)getTreeAdaptor
-{
-    return adaptor;
-}<\n>
->>
-
-parserCtorBody() ::= <<
-<super.parserCtorBody()>
->>
-
-createListenerAndHandshake() ::= <<
-ANTLRDebugEventSocketProxy proxy =
-    [ANTLRDebugEventSocketProxy newANTLRDebugEventSocketProxy:self, port, <if(TREE_PARSER)>[input getTreeAdaptor]<else>adaptor<endif>];
-[self setDebugListener:proxy];
-[self set<inputStreamType>:[ANTLRANTLRDebug<inputStreamType> newANTLRDebug<inputStreamType>:input with:proxy]];
-try {
-    [proxy handshake];
-}
-@catch (IOException *ioe) {
-    [self reportError:ioe];
-}
->>
-
-@ctorForRootGrammar.finally() ::= <<
-ANTLRCommonTreeAdaptor *adap = [ANTLRCommonTreeAdaptor newTreeAdaptor];
-[self setTreeAdaptor:adap];
-[proxy setTreeAdaptor:adap];
->>
-
-@ctorForProfilingRootGrammar.finally() ::=<<
-ANTLRCommonTreeAdaptor *adap = [ANTLRCommonTreeAdaptor newTreeAdaptor];
-[self setTreeAdaptor:adap];
-[proxy setTreeAdaptor:adap];
->>
-
-@ctorForPredefinedListener.superClassRef() ::= @"super(input, dbg);"
-
-@ctorForPredefinedListener.finally() ::=<<
-<if(grammar.grammarIsRoot)> <! don't create new adaptor for delegates !>
-ANTLRCommonTreeAdaptor *adap = [ANTLRCommonTreeAdaptor newTreeAdaptor];
-[self setTreeAdaptor:adap];<\n>
-<endif>
->>
-
-@treeParserHeaderFile.superClassName ::= "ANTLRDebugTreeParser"
-
-@rewriteElement.pregen() ::= "[debugListener locationLine:<e.line> column:<e.pos>];"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTParser.stg
deleted file mode 100644
index 26dbe6a..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTParser.stg
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2007 Kay Roepke 2010 Alan Condit
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Templates for building ASTs during normal parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  The situation is not too bad as rewrite (->) usage makes ^ and !
- *  invalid. There is no huge explosion of combinations.
- */
-@rule.setErrorReturnValue() ::= <<
-/* ASTParser rule.setErrorReturnValue */
-retval.tree = (<ASTLabelType> *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
-<! System.out.println("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
->>
-
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token, label, elementIndex, terminalOptions) ::= <<
-/* ASTParser tokenRef */
-<super.tokenRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = <createNodeFromToken(...)>;
-[treeAdaptor addChild:<label>_tree  toTree:root_0];
-<if(backtracking)>}<endif>
->>
-
-/* ID! and output=AST (same as plain tokenRef) */
-/* ASTParser tokenRefBang */
-tokenRefBang(token,label,elementIndex,terminalOptions) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-<super.tokenRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = <createNodeFromToken(...)>;
-root_0 = (<ASTLabelType> *)[treeAdaptor becomeRoot:<label>_tree old:root_0];
-<if(backtracking)>}<endif>
->>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-/* ASTParser tokenRefBangAndListLabel */
-<tokenRefBang(...)>
-<listLabel(elem=label,...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-/* ASTParser tokenRefAndListLabel */
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,terminalOptions,elementIndex) ::= <<
-/* ASTParser tokenRefRuleRootAndListLabel */
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label,...)>
->>
-
-// SET AST
-
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
-
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <%
-/* ASTParser matchSet */
-<super.matchSet(postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-    [treeAdaptor addChild:<createNodeFromToken(...)> toTree:root_0 ];}, ...)>
-%>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-/* ASTParser matchRuleBlockSet */
-<matchSet(...)>
->>
-
-matchSetBang(s,label,elementIndex,terminalOptions, postmatchCode) ::= "<super.matchSet(...)>"
-
-// note there is no matchSetTrack because -> rewrites force sets to be
-// plain old blocks of alts: (A|B|...|C)
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-/* ASTParser matchSetRuleRoot */
-<if(label)>
-<label>=(<labelType> *)[input LT:1]; /* matchSetRuleRoot */<\n>
-<endif>
-<super.matchSet(postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-root_0 = (<ASTLabelType> *)[treeAdaptor becomeRoot:<createNodeFromToken(...)> old:root_0];}, ...)>
->>
-
-// RULE REF AST
-
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-/* ASTParser ruleRef */
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-[treeAdaptor addChild:[<label> getTree] toTree:root_0];
->>
-
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
-
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-/* ASTParser ruleRefRuleRoot */
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-root_0 = (<ASTLabelType> *)[treeAdaptor becomeRoot:[<label> getTree] old:root_0];
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-/* ASTParser ruleRefAndListLabel */
-<ruleRef(...)>
-<listLabel(elem = {[<label> getTree]},...)>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-/* ASTParser ruleRefBangAndListLabel */
-<ruleRefBang(...)>
-<listLabel(elem = {[<label> getTree]},...)>
->>
-
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-/* ASTParser ruleRefRuleRootAndListLabel */
-<ruleRefRuleRoot(...)>
-<listLabel(elem = {[<label> getTree]},...)>
->>
-
-// WILDCARD AST
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-/* ASTParser wildcard */
-<super.wildcard(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-    [treeAdaptor addChild:[[treeAdaptor createTree:<label>] retain] toTree:root_0];
-<if(backtracking)>}<endif>
->>
-
-wildcardBang(token,label,elementIndex,terminalOptions) ::= "<super.wildcard(...)>"
-
-wildcardRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-/* ASTParser wildcardRuleRoot */
-<super.wildcard(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-    <label>_tree = [[treeAdaptor createTree:<label>] retain]
-    root_0 = (<ASTLabelType> *)[treeAdaptor becomeRoot:<label>_tree old:root_0];
-<if(backtracking)>}<endif>
->>
-
-createNodeFromToken(label,terminalOptions) ::= <<
-/* ASTParser createNodeFromToken */
-<if(terminalOptions.node)>
-[ANTLR<terminalOptions.node> newANTLR<terminalOptions.node>:<label>] <! new MethodNode(IDLabel) !>
-<else>
-(<ASTLabelType> *)[[treeAdaptor createTree:<label>] retain]
-<endif>
->>
-
-// straight from java cleanup ///
-ruleCleanUp() ::= <<
-/* ASTParser ruleCleanUp */
-<super.ruleCleanUp()>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
-    retval.tree = (<ASTLabelType> *)[treeAdaptor rulePostProcessing:root_0];
-    [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
-<if(backtracking)>}<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTTreeParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTTreeParser.stg
deleted file mode 100644
index 56959a3..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTTreeParser.stg
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2007 Kay Roepke 2010 Alan Condit
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Templates for building ASTs during tree parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  Each combination has its own template except that label/no label
- *  is combined into tokenRef, ruleRef, ...
- */
-
-/* addition memVars for returnscopes */
-@returnScopeInterface.memVars() ::= <<
-/* ASTTreeParser returnScopeInterface.memVars */
-<recognizer.ASTLabelType; null="ANTLRCommonTree"> *tree;
->>
-
-/** the interface of returnScope methodsDecl */
-@returnScopeInterface.methodsDecl() ::= <<
-/* ASTTreeParser returnScopeInterface.methodsDecl */
-- (<recognizer.ASTLabelType; null="ANTLRCommonTree"> *)getTree;
-- (void) setTree:(<recognizer.ASTLabelType; null="ANTLRCommonTree"> *)aTree;<\n>
->>
-
-/** the implementation of returnScope methods */
-@returnScope.methods() ::= <<
-/* ASTTreeParser returnScope.methods */
-- (<ASTLabelType> *)getTree
-{
-    return tree;
-}
-
-- (void) setTree:(<ASTLabelType> *)aTree
-{
-    if (tree != aTree) {
-        if ( tree ) [tree release];
-        if ( aTree ) [aTree retain];
-        tree = aTree;
-    }
-}
-
-- (void) dealloc
-{
-    [self setTree:nil];
-    [super dealloc];
-}
-
-@synthesize tree;
->>
-
-@returnScopeProperties() ::= <<
-@property (retain) <recognizer.ASTLabelType; null="ANTLRCommonTree"> *tree;
->>
-
-/** Add a variable to track last element matched */
-ruleDeclarations() ::= <<
-/* ASTTreeParser ruleDeclarations */
-<super.ruleDeclarations()>
-<ASTLabelType> *_first_0 = nil;
-<ASTLabelType> *_last = nil;<\n>
->>
-
-/** What to emit when there is no rewrite rule.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= <<
-/* ASTTreeParser noRewrite */
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(rewriteMode)>
-retval.tree = (<ASTLabelType> *)_first_0;
-if ( [treeAdaptor getParent:retval.tree] != nil && [treeAdaptor isNil:[treeAdaptor getParent:retval.tree]] ) )
-    retval.tree = (<ASTLabelType> *)[treeAdaptor getParent:retval.tree];
-<endif>
-<if(backtracking)>}<endif>
->>
-
-/** match ^(root children) in tree parser; override here to
- *  add tree construction actions.
- */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-/* ASTTreeParser tree */
-_last = (<ASTLabelType> *)[input LT:1];
-{
-<ASTLabelType> *_save_last_<treeLevel> = _last;
-<ASTLabelType> *_first_<treeLevel> = nil;
-<if(!rewriteMode)>
-<ASTLabelType> *root_<treeLevel> = [[[treeAdaptor class] newEmptyTree] retain];
-<endif>
-<root:element()>
-<if(rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-<if(root.el.rule)>
-if ( _first_<enclosingTreeLevel>==nil ) _first_<enclosingTreeLevel> = <root.el.label>.tree;
-<else>
-if ( _first_<enclosingTreeLevel>==nil ) _first_<enclosingTreeLevel> = <root.el.label>;
-<endif>
-<endif>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( [input LA:1] == ANTLRTokenTypeDOWN ) {
-    [self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; <checkRuleBacktrackFailure()>
-    <children:element()>
-    [self match:input TokenType:ANTLRTokenTypeUP Follow:nil]; <checkRuleBacktrackFailure()>
-}
-<else>
-[self match:input TokenType:ANTLRTokenTypeDOWN Follow:nil]; <checkRuleBacktrackFailure()>
-<children:element()>
-[self match:input TokenType:ANTLRTokenTypeUP Follow:nil]; <checkRuleBacktrackFailure()>
-<endif>
-<if(!rewriteMode)>
-[treeAdaptor addChild:root_<treeLevel> toTree:root_<enclosingTreeLevel>];
-<endif>
-_last = _save_last_<treeLevel>;
-}<\n>
->>
-
-// TOKEN AST STUFF
-
-/** ID! and output=AST (same as plain tokenRef) 'cept add
- *  setting of _last
- */
-tokenRefBang(token,label,elementIndex,terminalOptions) ::= <<
-/* ASTTreeParser tokenRefBang */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.tokenRef(...)>
->>
-
-/** ID auto construct */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-/* ASTTreeParser tokenRef */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-    <label>_tree = [ANTLR<terminalOptions.node> newANTLR<terminalOptions.node>:<label>];
-<else>
-    <label>_tree = (<ASTLabelType> *)[treeAdaptor dupNode:<label>];
-<endif><\n>
-    [treeAdaptor addChild:<label>_tree toTree:root_<treeLevel>];
-<if(backtracking)>}<endif>
-<else> <! rewrite mode !>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-if ( _first_<treeLevel>==nil ) _first_<treeLevel> = <label>;
-<endif>
->>
-
-/** label+=TOKEN auto construct */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-/* ASTTreeParser tokenRefAndListLabel */
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** ^(ID ...) auto construct */
-tokenRefRuleRoot(token,label,elementIndex) ::= <<
-/* ASTTreeParser tokenRefRuleRoot */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = [ANTLR<terminalOptions.node> newANTLR<terminalOptions.node>:<label>];
-<else>
-<label>_tree = (<ASTLabelType> *)[treeAdaptor dupNode:<label>];
-<endif><\n>
-root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:<label>_tree old:root_<treeLevel>];
-<if(backtracking)>}<endif>
-<endif>
->>
-
-/** Match ^(label+=TOKEN ...) auto construct */
-tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-/* ASTTreeParser tokenRefRuleRootAndListLabel */
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard and auto dup the node/subtree */
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-/* ASTTreeParser wildcard */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.wildcard(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<label>_tree = (<ASTLabelType> *)[adaptor dupTree:<label>];
-[adaptor addChild:<label>_tree toTree:root_<treeLevel>];
-<if(backtracking)>}<endif>
-<else> <! rewrite mode !>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
-if ( _first_<treeLevel> == nil ) _first_<treeLevel> = <label>;
-<endif>
->>
-
-// SET AST
-
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-/* ASTTreeParser matchSet */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.matchSet(postmatchCode={
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = [ANTLR<terminalOptions.node> newANTLR<terminalOptions.node>:<label>];
-<else>
-<label>_tree = (<ASTLabelType> *)[adaptor dupNode:<label>];
-<endif><\n>
-[adaptor addChild:<label>_tree toTree:root_<treeLevel>];
-<if(backtracking)>\}<endif>
-<endif>
-}, ...
-)>
->>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-/* ASTTreeParser matchRuleBlockSet */
-<matchSet(...)>
-<noRewrite(...)> <! set return tree !>
->>
-
-matchSetBang(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-/* ASTTreeParser matchSetBang */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.matchSet(...)>
->>
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-/* ASTTreeParser matchSetRuleRoot */
-<super.matchSet(postmatchCode={
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
-<if(terminalOptions.node)>
-<label>_tree = [ANTLR<terminalOptions.node> newANTLR<terminalOptions.node>:<label>];
-<else>
-<label>_tree = (<ASTLabelType> *)[adaptor dupNode:<label>];
-<endif><\n>
-root_<treeLevel> = (<ASTLabelType> *)[adaptor becomeRoot:<label>_tree old:root_<treeLevel>];
-<if(backtracking)>\}<endif>
-<endif>
-}, ...
-)>
->>
-
-// RULE REF AST
-
-/** rule auto construct */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-/* ASTTreeParser ruleRef */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.ruleRef(...)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
-<if(!rewriteMode)>
-    [treeAdaptor addChild:<label>.tree toTree:root_<treeLevel>];
-<else> <! rewrite mode !>
-if ( _first_<treeLevel> == nil ) _first_<treeLevel> = <label>.tree;
-<endif>
->>
-
-/** x+=rule auto construct */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-/* ASTTreeParser ruleRefAndListLabel */
-<ruleRef(...)>
-<! <listLabel(elem = "["+label+" getTree]",...)> !>
-<listLabel(elem = {[<label> getTree]},...)>
->>
-
-/** ^(rule ...) auto construct */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-/* ASTTreeParser ruleRefRuleRoot */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.ruleRef(...)>
-<if(!rewriteMode)>
-<if(backtracking)>if ( state.backtracking == 0 ) <endif>
-root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:<label>.tree old:root_<treeLevel>];
-<endif>
->>
-
-/** ^(x+=rule ...) auto construct */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-/* ASTTreeParser ruleRefRuleRootAndListLabel */
-<ruleRefRuleRoot(...)>
-<listLabel(elem = {[<label> getTree]},...)>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-/* ASTTreeParser ruleRefTrack */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.ruleRefTrack(...)>
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-/* ASTTreeParser ruleRefTrackAndListLabel */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.ruleRefTrackAndListLabel(...)>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-/* ASTTreeParser ruleRefRuleRootTrack */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.ruleRefRootTrack(...)>
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-/* ASTTreeParser ruleRefRuleRootTrackAndListLabel */
-_last = (<ASTLabelType> *)[input LT:1];
-<super.ruleRefRuleRootTrackAndListLabel(...)>
->>
-
-/** Streams for token refs are tree nodes now; override to
- *  change nextToken to nextNode.
- */
-createRewriteNodeFromElement(token,terminalOptions,scope) ::= <<
-/* ASTTreeParser createRewriteNodeFromElement */
-<if(terminalOptions.node)>
-<! new <terminalOptions.node>(stream_<token>.nextNode()) !>
-[[[ANTLR<terminalOptions.node>(stream_<token> alloc] init] nextNode];
-<else>
-<! stream_<token>.nextNode() !>
-[stream_<token> nextNode]
-<endif>
->>
-
-ruleCleanUp() ::= <<
-/* ASTTreeParser ruleCleanUp */
-<super.ruleCleanUp()>
-<if(!rewriteMode)>
-<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
-retval.tree = (<ASTLabelType> *)[treeAdaptor rulePostProcessing:root_0];
-<if(backtracking)>}<endif>
-<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/Dbg.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/Dbg.stg
deleted file mode 100644
index 6c8b42d..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/Dbg.stg
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2006 Kay Roepke 2010 Alan Condit
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/** Template overrides to add debugging to normal Objective-C output;
- *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
- */
-@headerFile.imports() ::= <<
-<@super.imports()>
-#import \<ANTLR/ANTLRDebug.h>
->>
-
-@parserHeaderFile.memVars() ::= <<
-NSInteger ruleLevel;
-NSArray *ruleNames;
->>
-
-@parserHeaderFile.methodsDecl() ::= <<
--(BOOL) evalPredicate:(NSString *)predicate matched:(BOOL)result;<\n>
->>
-
-@genericParser.methods() ::= <<
-<if(grammar.grammarIsRoot)>
-AMutableArray *ruleNames = [AMutableArray arrayWithArray:{
-    @"invalidRule", <grammar.allImportedRules:{rST | @"<rST.name>"}; wrap=@"\n    ", separator=", ">
-};<\n>
-<endif>
-<if(grammar.grammarIsRoot)> <! grammar imports other grammar(s) !>
-    ruleLevel = 0;
-- (NSInteger) getRuleLevel { return ruleLevel; }
-- (void) incRuleLevel { ruleLevel++; }
-- (void) decRuleLevel { ruleLevel--; }
-<if(profile)>
-    <ctorForProfilingRootGrammar()>
-<else>
-    <ctorForRootGrammar()>
-<endif>
-<ctorForPredefinedListener()>
-<else> <! imported grammar !>
-- (NSInteger) getRuleLevel
-{
-    return <grammar.delegators:{g| <g:delegateName()>}>.getRuleLevel();
-}<\n>
-
-- (void) incRuleLevel
-{
-    <grammar.delegators:{g| <g:delegateName()>}>.incRuleLevel();
-}<\n>
-- (void) decRuleLevel
-{
-    <grammar.delegators:{g| <g:delegateName()>}>.decRuleLevel();
-}<\n>
-    <ctorForDelegateGrammar()>
-<endif>
-<if(profile)>
-- (BOOL) alreadyParsedRule:(id<IntStream>) input Index:(NSInteger) ruleIndex
-{
-    [(Profiler)dbg examineRuleMemoization:input, ruleIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames objectAtIndex:ruleIndex];
-    return super.alreadyParsedRule(input, ruleIndex);
-}<\n>
-- (void) memoize:(id<ANTLRIntStream>)input RuleIndex:(NSInteger)ruleIndex StartIndex:(NSInteger)ruleStartIndex
-{
-    [((Profiler)dbg) memoize:input RuleIndex:ruleIndex StartIndex:ruleStartIndex [<grammar.composite.rootGrammar.recognizerName> ruleNames[ruleIndex]];
-    [super memoize:input RuleIndex:ruleIndex StartIndex:ruleStartIndex];
-}<\n>
-<endif>
-- (BOOL) evalPredicate:(BOOL)result Pred:(NSString *)predicate
-{
-    [dbg semanticPredicate:result Pred:predicate];
-    return result;
-}<\n>
->>
-
-@genericParser.init() ::= <<
-ruleNames = [NSArray arrayWithObjects:<rules:{rST | @"<rST.ruleName>"}; separator=", ", wrap="\n	">, nil];<\n>
->>
-
-@genericParser.dealloc() ::= <<
-[ruleNames release];<\n>
->>
-
-@genericParser.methods() ::= <<
--(BOOL) evalPredicate:(NSString *)predicate matched:(BOOL)result
-{
-	[debugListener semanticPredicate:predicate matched:result];
-	return result;
-}<\n>
->>
-
-/* bug: can't use @super.superClassName()> */
-@parserHeaderFile.superClassName() ::= "ANTLRDebug<if(TREE_PARSER)>Tree<endif>Parser"
-
-@rule.preamble() ::= <<
-@try { [debugListener enterRule:@"<ruleName>"];
-if ( ruleLevel==0 ) [debugListener commence];
-ruleLevel++;
-[debugListener locationLine:<ruleDescriptor.tree.line> column:<ruleDescriptor.tree.column>];<\n>
->>
-
-@rule.postamble() ::= <<
-[debugListener locationLine:<ruleDescriptor.EORNode.line> column:<ruleDescriptor.EORNode.column>];<\n>
-}
-@finally {
-    [debugListener exitRule:@"<ruleName>"];
-    ruleLevel--;
-    if ( ruleLevel==0 ) [debugListener terminate];
-}<\n>
->>
-
-/* these are handled in the runtime for now.
- * stinks, but that's the easiest way to avoid having to generate two
- * methods for each synpred
-
-@synpred.start() ::= "[debugListener beginBacktrack:state.backtracking];"
-
-@synpred.stop() ::= "[debugListener endBacktrack:state.backtracking wasSuccessful:success];"
-
- */
-
-// Common debug event triggers used by region overrides below
-
-enterSubRule() ::=
-    "@try { [debugListener enterSubRule:<decisionNumber>];<\n>"
-
-exitSubRule() ::=
-    "} @finally { [debugListener exitSubRule:<decisionNumber>]; }<\n>"
-
-enterDecision() ::=
-    "@try { [debugListener enterDecision:<decisionNumber>];<\n>"
-
-exitDecision() ::=
-    "} @finally { [debugListener exitDecision:<decisionNumber>]; }<\n>"
-
-enterAlt(n) ::= "[debugListener enterAlt:<n>];<\n>"
-
-// Region overrides that tell various constructs to add debugging triggers
-
-@block.predecision() ::= "<enterSubRule()><enterDecision()>"
-
-@block.postdecision() ::= "<exitDecision()>"
-
-@block.postbranch() ::= "<exitSubRule()>"
-
-@ruleBlock.predecision() ::= "<enterDecision()>"
-
-@ruleBlock.postdecision() ::= "<exitDecision()>"
-
-@ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
-@blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
-
-@positiveClosureBlock.preloop() ::= "<enterSubRule()>"
-
-@positiveClosureBlock.postloop() ::= "<exitSubRule()>"
-
-@positiveClosureBlock.predecision() ::= "<enterDecision()>"
-
-@positiveClosureBlock.postdecision() ::= "<exitDecision()>"
-
-@positiveClosureBlock.earlyExitException() ::=
-    "[debugListener recognitionException:eee];<\n>"
-
-@closureBlock.preloop() ::= "<enterSubRule()>"
-
-@closureBlock.postloop() ::= "<exitSubRule()>"
-
-@closureBlock.predecision() ::= "<enterDecision()>"
-
-@closureBlock.postdecision() ::= "<exitDecision()>"
-
-@altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
-
-@element.prematch() ::=
-    "[debugListener locationLine:<it.line> column:<it.pos>];"
-
-@matchSet.mismatchedSetException() ::=
-    "[debugListener recognitionException:mse];"
-
-@dfaState.noViableAltException() ::= "[debugListener recognitionException:nvae];"
-
-@dfaStateSwitch.noViableAltException() ::= "[debugListener recognitionException:nvae];"
-
-dfaDecision(decisionNumber,description) ::= <<
-@try {
-    // isCyclicDecision is only necessary for the Profiler. Which I didn't do, yet.
-    // isCyclicDecision = YES;
-    <super.dfaDecision(...)>
-}
-@catch (ANTLRNoViableAltException *nvae) {
-    [debugListener recognitionException:nvae];
-    @throw nvae;
-}
->>
-
-@cyclicDFA.errorMethod() ::= <<
--(void) error:(ANTLRNoViableAltException *)nvae
-{
-    [[recognizer debugListener] recognitionException:nvae];
-}
->>
-
-/** Force predicate validation to trigger an event */
-evalPredicate(pred,description) ::= <<
-[self evalPredicate:@"<description>" result:<pred>];
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ObjC.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ObjC.stg
deleted file mode 100644
index dbba0ff..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ObjC.stg
+++ /dev/null
@@ -1,2108 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2006, 2007 Kay Roepke 2010 Alan Condit
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/*
- *  Template group file for the Objective C code generator.
- *  Heavily based on Java.stg
- *
- *  Written by Kay Roepke <kroepke(at)classdump.org>
- *  Modified by Alan Condit <acondit(at)ipns.com>
- *
- *  This file is part of ANTLR and subject to the same license as ANTLR itself.
- */
-
-objcTypeInitMap ::= [
-    "int"           : "0",              // Integers     start out being 0
-    "long"          : "0",              // Longs        start out being 0
-    "float"         : "0.0",            // Floats       start out being 0
-    "double"        : "0.0",            // Doubles      start out being 0
-    "BOOL"          : "NO",             // Booleans     start out being Antlr ObjC for false
-    "byte"          : "0",              // Bytes        start out being 0
-    "short"         : "0",              // Shorts       start out being 0
-    "char"          : "0",              // Chars        start out being 0
-    "id"            : "nil",            // ids          start out being nil
-    default         : "nil"             // anything other than an atomic type
-]
-
-className() ::= "<name><!<if(LEXER)>Lexer<else><if(TREE_PARSER)>Tree<endif>Parser<endif>!>"
-leadIn(type) ::=
-<<
-/** \file
- *  This <type> file was generated by $ANTLR version <ANTLRVersion>
- *
- *     -  From the grammar source file : <fileName>
- *     -                            On : <generatedTimestamp>
-<if(LEXER)>
- *     -                 for the lexer : <name>Lexer
-<endif>
-<if(PARSER)>
- *     -                for the parser : <name>Parser
-<endif>
-<if(TREE_PARSER)>
- *     -           for the tree parser : <name>TreeParser
-<endif>
- *
- * Editing it, at least manually, is not wise.
- *
- * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
- *
- *
->>
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile( LEXER,
-            PARSER,
-            TREE_PARSER,
-            actionScope,
-            actions,
-            docComment,
-            recognizer,
-            name,
-            tokens,
-            tokenNames,
-            rules,
-            cyclicDFAs,
-            bitsets,
-            buildTemplate,
-            buildAST,
-            rewriteMode,
-            profile,
-            backtracking,
-            synpreds,
-            memoize,
-            numRules,
-            fileName,
-            ANTLRVersion,
-            generatedTimestamp,
-            trace,
-            scopes,
-            superClass,
-            literals
-            ) ::=
-<<
-<leadIn("OBJC source")>
-*/
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-
-<! <if(actions.(actionScope).header)>
-/* =============================================================================
- * This is what the grammar programmer asked us to put at the top of every file.
- */
-<actions.(actionScope).header>
-/* End of Header action.
- * =============================================================================
- */
-<endif> !>
-
-/* -----------------------------------------
- * Include the ANTLR3 generated header file.
- */
-#import "<name><!<if(LEXER)>Lexer<else><if(TREE_PARSER)>Tree<endif>Parser<endif>!>.h"
-<actions.(actionScope).postinclude>
-/* ----------------------------------------- */
-
-<docComment>
-
-<if(literals)>
-/** String literals used by <name> that we must do things like MATCHS() with.
- *  C will normally just lay down 8 bit characters, and you can use L"xxx" to
- *  get wchar_t, but wchar_t is 16 bits on Windows, which is not UTF32 and so
- *  we perform this little trick of defining the literals as arrays of UINT32
- *  and passing in the address of these.
- */
-<literals:{it | static ANTLR3_UCHAR  lit_<i>[]  = <it>;}; separator="\n">
-
-<endif>
-
-/* ============================================================================= */
-/* =============================================================================
- * Start of recognizer
- */
-<recognizer>
->>
-headerFileExtension() ::= ".h"
-
-headerFile( LEXER,
-            PARSER,
-            TREE_PARSER,
-            actionScope,
-            actions,
-            docComment,
-            recognizer,
-            name,
-            tokens,
-            tokenNames,
-            rules,
-            cyclicDFAs,
-            bitsets,
-            buildTemplate,
-            buildAST,
-            rewriteMode,
-            profile,
-            backtracking,
-            synpreds,
-            memoize,
-            numRules,
-            fileName,
-            ANTLRVersion,
-            generatedTimestamp,
-            trace,
-            scopes,
-            superClass,
-            literals
-          ) ::=
-<<
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-
-<@imports>
-<actions.(actionScope).preincludes>
-/* =============================================================================
- * Standard antlr3 OBJC runtime definitions
- */
-#import \<Foundation/Foundation.h>
-#import \<ANTLR/ANTLR.h>
-/* End of standard antlr3 runtime definitions
- * =============================================================================
- */
-<actions.(actionScope).includes>
-<@end>
-
-<if(LEXER)>
-<lexerHeaderFile(...)>
-<endif>
-<if(PARSER)>
-<parserHeaderFile(...)>
-<endif>
-<if(TREE_PARSER)>
-<treeParserHeaderFile(...)>
-<endif>
-<docComment>
->>
-
-lexerHeaderFile( LEXER,
-            PARSER,
-            TREE_PARSER,
-            actionScope,
-            actions,
-            docComment,
-            recognizer,
-            name,
-            tokens,
-            tokenNames,
-            rules,
-            cyclicDFAs,
-            bitsets,
-            buildTemplate,
-            profile,
-            backtracking,
-            synpreds,
-            memoize,
-            numRules,
-            fileName,
-            ANTLRVersion,
-            generatedTimestamp,
-            trace,
-            scopes,
-            superClass="ANTLRLexer"
-            ) ::=
-<<
-
-<if(actions.(actionScope).header)>
-/* =============================================================================
- * This is what the grammar programmer asked us to put at the top of every file.
- */
-<actions.(actionScope).header>
-/* End of Header action.
- * =============================================================================
- */
-<endif>
-
-/* Start cyclicDFAInterface */
-<cyclicDFAs:cyclicDFAInterface()>
-
-#pragma mark Rule return scopes Interface start
-<rules:{rule |
-<rule.ruleDescriptor:{ruleDescriptor | <returnScopeInterface(scope=ruleDescriptor.returnScope)>}>}>
-#pragma mark Rule return scopes Interface end
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-<tokens:{it | #define <it.name> <it.type>}; separator="\n">
-/* interface lexer class */
-@interface <className()> <@superClassName>: <superClass><@end> { // line 283
-<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> *dfa<dfa.decisionNumber>;}; separator="\n">
-<synpreds:{pred | SEL <pred>Selector;}; separator="\n">
-/* ObjC start of actions.lexer.memVars */
-<actions.lexer.memVars>
-/* ObjC end of actions.lexer.memVars */
-}
-+ (void) initialize;
-+ (<className()> *)new<className()>WithCharStream:(id\<ANTLRCharStream>)anInput;
-/* ObjC start actions.lexer.methodsDecl */
-<actions.lexer.methodsDecl>
-/* ObjC end actions.lexer.methodsDecl */
-<rules:{rule |
-- (<rule.ruleDescriptor:{ruleDescriptor|<returnType()>}>) <if(!rule.ruleDescriptor.isSynPred)>m<rule.ruleName><else><rule.ruleName>_fragment<endif> <if(rule.ruleDescriptor.parameterScope)><rule.ruleDescriptor.parameterScope:parameterScope()><endif>; }; separator="\n"><\n>
-@end /* end of <className()> interface */<\n>
->>
-
-headerReturnScope(ruleDescriptor) ::= "<returnScopeInterface(...)>"
-headerReturnType(ruleDescriptor) ::= <<
-<if(LEXER)>
-<if(!r.ruleDescriptor.isSynPred)>
- void
-<else>
- <ruleDescriptor:returnType()>
-<endif>
-<else>
- <ruleDescriptor:returnType()>
-<endif>
->>
-// Produce the lexer output
-lexer(  grammar,
-        name,
-        tokens,
-        scopes,
-        rules,
-        numRules,
-        filterMode,
-        labelType="ANTLRCommonToken",
-        superClass="ANTLRLexer"
-        ) ::= <<
-<cyclicDFAs:cyclicDFA()>
-
-/** As per Terence: No returns for lexer rules! */
-<!
-#pragma mark Rule return scopes start
-<rules:{rule | <rule.ruleDescriptor:{ruleDescriptor | 
-<returnScopeImplementation(scope=ruleDescriptor.returnScope)>}>
-}>
-#pragma mark Rule return scopes end
-!>
-@implementation <grammar.recognizerName> // line 330
-
-+ (void) initialize
-{
-    [ANTLRBaseRecognizer setGrammarFileName:@"<fileName>"];
-}
-
-+ (NSString *) tokenNameForType:(NSInteger)aTokenType
-{
-    return [[self getTokenNames] objectAtIndex:aTokenType];
-}
-
-+ (<grammar.recognizerName> *)new<grammar.recognizerName>WithCharStream:(id\<ANTLRCharStream>)anInput
-{
-    return [[<grammar.recognizerName> alloc] initWithCharStream:anInput];
-}
-
-- (id) initWithCharStream:(id\<ANTLRCharStream>)anInput
-{
-    self = [super initWithCharStream:anInput State:[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:<numRules>+1]];
-    if ( self != nil ) {
-<if(memoize)>
-        if ( state.ruleMemo == nil ) {
-            state.ruleMemo = [[ANTLRRuleStack newANTLRRuleStackWithSize:<numRules>+1] retain];
-        }
-        if ( [state.ruleMemo count] == 0 ) {
-            // initialize the memoization cache - the indices are 1-based in the runtime code!
-            <! [state.ruleMemo addObject:[NSNull null]];     /* dummy entry to ensure 1-basedness. */ !>
-            for (NSInteger i = 0; i \< <numRules>; i++) {
-                [state.ruleMemo addObject:[ANTLRHashRule newANTLRHashRuleWithLen:17]];
-            }
-        }
-<endif>
-        <synpreds:{pred | <lexerSynpred(name=pred)>};separator="\n">
-        <cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = [DFA<dfa.decisionNumber> newDFA<dfa.decisionNumber>WithRecognizer:self];}; separator="\n">
-        <actions.lexer.init>
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    <cyclicDFAs:{dfa | [dfa<dfa.decisionNumber> release];}; separator="\n">
-<actions.lexer.dealloc>
-    [super dealloc];
-}
-
-/* ObjC Start of actions.lexer.methods */
-<actions.lexer.methods>
-/* ObjC end of actions.lexer.methods */
-/* ObjC start methods() */
-<@methods()>
-/* ObjC end methods() */
-
-<if(actions.lexer.reset)>
-- (void) reset
-{
-    <actions.lexer.reset>
-    [super reset];
-}
-<endif>
-
-<if(filterMode)>
-<filteringNextToken()>
-<endif>
-/* Start of Rules */
-<rules; separator="\n">
-
-@end /* end of <grammar.recognizerName> implementation line 397 */
->>
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-- (id\<ANTLRToken>) nextToken
-{
-    while (YES) {
-        if ( [input LA:1] == ANTLRCharStreamEOF ) {
-            return [<labelType> eofToken];
-        }
-        state.token = nil;
-        state.channel = ANTLRTokenChannelDefault;
-        state.tokenStartCharIndex = input.index;
-        state.tokenStartCharPositionInLine = input.charPositionInLine;
-        state.tokenStartLine = input.line;
-        state.text = nil;
-        @try {
-            NSInteger m = [input mark];
-            state.backtracking = 1; /* means we won't throw slow exception */
-            state.failed = NO;
-            [self mTokens];
-            state.backtracking = 0;
-            /* mTokens backtracks with synpred at backtracking==2
-               and we set the synpredgate to allow actions at level 1. */
-            if ( state.failed ) {
-                [input rewind:m];
-                [input consume]; /* advance one char and try again */
-            } else {
-                [self emit];
-                return state.token;
-            }
-        }
-        @catch (ANTLRRecognitionException *re) {
-            // shouldn't happen in backtracking mode, but...
-            [self reportError:re];
-            [self recover:re];
-        }
-    }
-}
-
-- (void)memoize:(id\<ANTLRIntStream\>)anInput
-      RuleIndex:(NSInteger)ruleIndex
-     StartIndex:(NSInteger)ruleStartIndex
-{
-    if ( state.backtracking > 1 ) [super memoize:anInput RuleIndex:ruleIndex StartIndex:ruleStartIndex];
-}
-
-- (BOOL)alreadyParsedRule:(id\<ANTLRIntStream\>)anInput RuleIndex:(NSInteger)ruleIndex
-{
-    if ( state.backtracking > 1 ) return [super alreadyParsedRule:anInput RuleIndex:ruleIndex];
-    return NO;
-}
->>
-
-actionGate() ::= "state.backtracking == 0"
-
-filteringActionGate() ::= "state.backtracking == 1"
-
-parserHeaderFile( LEXER,
-            PARSER,
-            TREE_PARSER,
-            actionScope,
-            actions,
-            docComment,
-            recognizer,
-            name,
-            tokens,
-            tokenNames,
-            rules,
-            cyclicDFAs,
-            bitsets,
-            buildTemplate,
-            profile,
-            backtracking,
-            synpreds,
-            memoize,
-            numRules,
-            fileName,
-            ANTLRVersion,
-            generatedTimestamp,
-            trace,
-            scopes,
-            literals,
-            superClass="ANTLRParser"
-            ) ::= <<
-/* parserHeaderFile */
-<genericParserHeaderFile(inputStreamType="id\<ANTLRTokenStream>",...)>
->>
-
-treeParserHeaderFile( LEXER,
-            PARSER,
-            TREE_PARSER,
-            actionScope,
-            actions,
-            docComment,
-            recognizer,
-            name,
-            tokens,
-            tokenNames,
-            rules,
-            cyclicDFAs,
-            bitsets,
-            buildTemplate,
-            profile,
-            backtracking,
-            synpreds,
-            memoize,
-            numRules,
-            fileName,
-            ANTLRVersion,
-            generatedTimestamp,
-            trace,
-            scopes,
-            literals,
-            superClass="ANTLRTreeParser"
-            ) ::= <<
-/* treeParserHeaderFile */
-<genericParserHeaderFile(inputStreamType="id\<ANTLRTreeNodeStream>",...)>
->>
-
-genericParserHeaderFile( LEXER,
-            PARSER,
-            TREE_PARSER,
-            actionScope,
-            actions,
-            docComment,
-            recognizer,
-            name,
-            tokens,
-            tokenNames,
-            rules,
-            cyclicDFAs,
-            bitsets,
-            buildTemplate,
-            profile,
-            backtracking,
-            synpreds,
-            memoize,
-            numRules,
-            fileName,
-            ANTLRVersion,
-            generatedTimestamp,
-            trace,
-            scopes,
-            superClass,
-            literals,
-            inputStreamType
-            ) ::=
-<<
-<if(actions.(actionScope).header)>
-/* =============================================================================
- * This is what the grammar programmer asked us to put at the top of every file.
- */
-<actions.(actionScope).header>
-/* End of Header action.
- * =============================================================================
- */
-<endif>
-
-#ifndef ANTLR3TokenTypeAlreadyDefined
-#define ANTLR3TokenTypeAlreadyDefined
-typedef enum {
-    ANTLR_EOF = -1,
-    INVALID,
-    EOR,
-    DOWN,
-    UP,
-    MIN
-} ANTLR3TokenType;
-#endif
-
-<cyclicDFAs:cyclicDFAInterface()>
-#pragma mark Tokens
-#ifdef EOF
-#undef EOF
-#endif
-<tokens:{it | #define <it.name> <it.type>}; separator="\n">
-#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
-<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeInterface(scope=it)><endif>}>
-#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
-<rules:{rule |
-<rule.ruleDescriptor:{ ruleDescriptor | <ruleAttributeScopeInterface(scope=ruleDescriptor.ruleScope)>}>}>
-#pragma mark Rule Return Scopes returnScopeInterface
-<rules:{rule |<rule.ruleDescriptor:{ ruleDescriptor | <returnScopeInterface(scope=ruleDescriptor.returnScope)>}>}>
-
-/* Interface grammar class */
-@interface <className()> <@superClassName> : <superClass><@end> { /* line 572 */
-#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
-<rules:{rule | <rule.ruleDescriptor.ruleScope:ruleAttributeScopeDecl(scope=rule.ruleDescriptor.ruleScope)>}>
-#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
-<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeMemVar(scope=it)><endif>}><\n>
-/* ObjC start of actions.(actionScope).memVars */
-<actions.(actionScope).memVars>
-/* ObjC end of actions.(actionScope).memVars */
-/* ObjC start of memVars */
-<@memVars()>
-/* ObjC end of memVars */
-
-<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> *dfa<dfa.decisionNumber>;}; separator="\n">
-<synpreds:{pred | SEL <pred>Selector;}; separator="\n">
- }
-
-/* ObjC start of actions.(actionScope).properties */
-<actions.(actionScope).properties>
-/* ObjC end of actions.(actionScope).properties */
-/* ObjC start of properties */
-<@properties()>
-/* ObjC end of properties */
-
-+ (void) initialize;
-+ (id) new<className()>:(<inputStreamType>)aStream;
-/* ObjC start of actions.(actionScope).methodsDecl */
-<actions.(actionScope).methodsDecl>
-/* ObjC end of actions.(actionScope).methodsDecl */
-
-/* ObjC start of methodsDecl */
-<@methodsDecl()>
-/* ObjC end of methodsDecl */
-
-<rules:{rule |
-- (<rule.ruleDescriptor:{ruleDescriptor|<returnType()>}>)<if(!rule.ruleDescriptor.isSynPred)><rule.ruleName><else><rule.ruleName>_fragment<endif><if(rule.ruleDescriptor.parameterScope)><rule.ruleDescriptor.parameterScope:parameterScope()><endif>; }; separator="\n"><\n>
-
-@end /* end of <className()> interface */<\n>
->>
-
-parser( grammar,
-        name,
-        scopes,
-        tokens,
-        tokenNames,
-        rules,
-        numRules,
-        bitsets,
-        ASTLabelType="ANTLRCommonTree",
-        superClass="ANTLRParser",
-        labelType="ANTLRCommonToken",
-        members={<actions.parser.members>}
-        ) ::= <<
-<genericParser(inputStreamType="id\<ANTLRTokenStream>", rewriteElementType="Token", ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser( grammar,
-        name,
-        scopes,
-        tokens,
-        tokenNames,
-        globalAction,
-        rules,
-        numRules,
-        bitsets,
-        filterMode,
-        labelType={<ASTLabelType>},
-        ASTLabelType="ANTLRCommonTree",
-        superClass={<if(filterMode)><if(buildAST)>ANTLRTreeRewriter<else>ANTLRTreeFilter<endif><else>ANTLRTreeParser<endif>},
-        members={<actions.treeparser.members>}
-        ) ::= <<
-<genericParser(inputStreamType="id\<ANTLRTreeNodeStream>", rewriteElementType="Node", ...)>
->>
-
-/** How to generate a parser */
-genericParser(  grammar,
-        name,
-        scopes,
-        tokens,
-        tokenNames,
-        rules,
-        numRules,
-        cyclicDFAs,          // parser init -- initializes the DFAs
-        bitsets,
-        labelType,
-        ASTLabelType,
-        superClass,
-        members,
-        filterMode,
-        rewriteElementType,
-        inputStreamType
-        ) ::= <<
-<cyclicDFAs:cyclicDFA()>
-
-#pragma mark Bitsets
-<bitsets:{it | <bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>}>
-
-#pragma mark Dynamic Global globalAttributeScopeImplementation
-<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeImplementation(scope=it)><endif>}>
-
-#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
-<rules:{rule |
-<rule.ruleDescriptor:{ ruleDescriptor | <ruleAttributeScopeImplementation(scope=ruleDescriptor.ruleScope)>}>}>
-
-#pragma mark Rule Return Scopes returnScopeImplementation
-<rules:{rule | <rule.ruleDescriptor:{ ruleDescriptor | <returnScopeImplementation(scope=ruleDescriptor.returnScope)>}>}>
-
-@implementation <grammar.recognizerName>  // line 637
-
-#pragma mark Dynamic Rule Scopes ruleAttributeScope
-<rules:{rule | <rule.ruleDescriptor.ruleScope:ruleAttributeScope()>}>
-#pragma mark global Attribute Scopes globalAttributeScope
-<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScope()><endif>}>
-/* ObjC start actions.(actionScope).synthesize */
-<actions.(actionScope).synthesize>
-/* ObjC start synthesize() */
-<@synthesize()>
-
-+ (void) initialize
-{
-    #pragma mark Bitsets
-    <bitsets:{it | <bitsetInit(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>}>
-    [ANTLRBaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"\<invalid>", @"\<EOR>", @"\<DOWN>", @"\<UP>", <tokenNames:{it | @<it>}; separator=", ", wrap="\n ">, nil] retain]];
-    [ANTLRBaseRecognizer setGrammarFileName:@"<fileName>"];
-    <synpreds:{pred | <synpred(pred)>}>
-}
-
-+ (<grammar.recognizerName> *)new<grammar.recognizerName>:(<inputStreamType>)aStream
-{
-<if(PARSER)>
-    return [[<grammar.recognizerName> alloc] initWithTokenStream:aStream];
-<else><! TREE_PARSER !>
-    return [[<grammar.recognizerName> alloc] initWithStream:aStream];
-<endif>
-}
-
-<if(PARSER)>
-- (id) initWithTokenStream:(<inputStreamType>)aStream
-{
-    self = [super initWithTokenStream:aStream State:[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:<numRules>+1]];
-    if ( self != nil ) {
-<else><! TREE_PARSER !>
-- (id) initWithStream:(<inputStreamType>)aStream
-{
-    self = [super initWithStream:aStream State:[[ANTLRRecognizerSharedState newANTLRRecognizerSharedStateWithRuleLen:<numRules>+1] retain]];
-    if ( self != nil ) {
-<endif>
-        <! <parserCtorBody()> !>
-        <cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = [DFA<dfa.decisionNumber> newDFA<dfa.decisionNumber>WithRecognizer:self];}; separator="\n">
-        <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeInit(scope=it)><endif>}>
-        <rules:{rule | <rule.ruleDescriptor.ruleScope:ruleAttributeScopeInit()>}>
-        /* start of actions-actionScope-init */
-        <actions.(actionScope).init>
-        /* start of init */
-        <@init()>
-    }
-    return self;
-}
-
-- (void) dealloc
-{
-    <cyclicDFAs:{dfa | [dfa<dfa.decisionNumber> release];}; separator="\n">
-    <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeDealloc(scope=it)><endif>}>
-    <actions.(actionScope).dealloc>
-    <@dealloc()>
-    [super dealloc];
-}
-
-/* ObjC start actions.(actionScope).methods */
-<actions.(actionScope).methods>
-/* ObjC end actions.(actionScope).methods */
-/* ObjC start methods() */
-<@methods()>
-/* ObjC end methods() */
-/* ObjC start rules */
-<rules; separator="\n">
-/* ObjC end rules */
-
-@end /* end of <grammar.recognizerName> implementation line 692 */<\n>
->>
-
-parserCtorBody() ::= <<
-<if(memoize)> /* parserCtorBody */
-<if(grammar.grammarIsRoot)>
-state.ruleMemo = [[ANTLRRuleStack newANTLRRuleStack:<numRules>+1] retain];<\n> <! index from 1..n !>
-<endif>
-<endif>
-<grammar.delegators:
- {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-// $ANTLR start <ruleName>_fragment
-- (void) <ruleName>_fragment
-{
-    <ruleLabelDefs()>
-    <if(trace)>
-        [self traceIn:\@"<ruleName>_fragment" Index:<ruleDescriptor.index>];
-    @try {
-        <block>
-    }
-    @finally {
-        [self traceOut:\@"<ruleName>_fragment" Index:<ruleDescriptor.index>];
-    }
-<else>
-    <block>
-<endif>
-} // $ANTLR end <ruleName>_fragment
->>
-
-synpred(name) ::= <<
-SEL <name>Selector = @selector(<name>_fragment);
-<! // $ANTLR start <name>
-- (BOOL) <name>
-{
-    state.backtracking++;
-    <@start()>
-    NSInteger start = [input mark];
-    @try {
-        [self <name>_fragment]; // can never throw exception
-    }
-    @catch (ANTLRRecognitionException *re) {
-        NSLog(@"impossible: %@\n", re.name);
-    }
-    BOOL success = (state.failed == NO);
-    [input rewind:start];
-    <@stop()>
-    state.backtracking--;
-    state.failed=NO;
-    return success;
-} // $ANTLR end <name> <\n> !>
->>
-
-lexerSynpred(name) ::= <<
-<synpred(name)>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if ( state.backtracking > 0 && [self alreadyParsedRule:input RuleIndex:<ruleDescriptor.index>] ) { return <ruleReturnValue()>; }
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if (backtracking)>if ( state.failed ) return <ruleReturnValue()>;<endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>if ( state.backtracking > 0 ) { state.failed = YES; return <ruleReturnValue()>; }<\n><endif>
->>
-
-/** How to generate code for a rule.
- *  The return type aggregates are declared in the header file (headerFile template)
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-
-/*
- * $ANTLR start <ruleName>
- * <fileName>:<description>
- */
-- (<returnType()>) <ruleName><ruleDescriptor.parameterScope:parameterScope()>
-{
-    <if(trace)>[self traceIn:\@"<ruleName>" Index:<ruleDescriptor.index>];<endif>
-    <if(trace)>NSLog(@"enter <ruleName> %@ failed=%@ backtracking=%d", [input LT:1], (state.failed==YES)?@"YES":@"NO", state.backtracking);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    <ruleDescriptor.actions.init>
-    <@preamble()>
-    @try {
-        <ruleMemoization(name=ruleName)>
-        <ruleLabelDefs()>
-        <block>
-        <ruleCleanUp()>
-        <(ruleDescriptor.actions.after):execAction()>
-    }
-<if(exceptions)>
-    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else><if(!emptyRule)><if(actions.(actionScope).rulecatch)>
-    <actions.(actionScope).rulecatch>
-<else>
-    @catch (ANTLRRecognitionException *re) {
-        [self reportError:re];
-        [self recover:input Exception:re];
-        <@setErrorReturnValue()>
-    }<\n>
-<endif><endif><endif>
-    @finally {
-        <if(trace)>[self traceOut:@"<ruleName>" Index:<ruleDescriptor.index>];<endif>
-        <memoize()>
-        <ruleScopeCleanUp()>
-        <finally>
-    }
-    <@postamble()>
-    return <ruleReturnValue()>;
-}
-/* $ANTLR end <ruleName> */
->>
-
-finalCode(finalBlock) ::= <<
-{
-    <finalBlock>
-}
->>
-
-catch(decl,action) ::= <<
-@catch (<e.decl>) {
-    <e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-/* ruleDeclarations */
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<returnType()> retval = [<ruleDescriptor:returnStructName()> new<ruleDescriptor:returnStructName()>];
-[retval setStart:[input LT:1]];<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
-}>
-<endif>
-<if(memoize)>
-NSInteger <ruleDescriptor.name>_StartIndex = input.index;
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-/* ruleScopeSetUp */
-<ruleDescriptor.useScopes:{it | [<it>_stack push:[<it>_Scope new<it>_Scope]];}>
-<ruleDescriptor.ruleScope:{it | [<it.name>_stack push:[<it.name>_Scope new<it.name>_Scope]];}>
->>
-
-ruleScopeCleanUp() ::= <<
-/* ruleScopeCleanUp */
-<ruleDescriptor.useScopes:{it | [<it>_stack pop];}; separator="\n">
-<ruleDescriptor.ruleScope:{it | [<it.name>_stack pop];}; separator="\n">
->>
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels, ruleDescriptor.tokenListLabels,
-  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{it | <labelType> *<it.label.text> = nil;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{it | AMutableArray *list_<it.label.text> = nil;}; separator="\n"
->
-<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
-<ruleDescriptor.ruleListLabels:{ll|ANTLRParserRuleReturnScope *<ll.label.text> = nil;}; separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{it | <labelType> *<it.label.text>=nil;}; separator="\n"
->
-<ruleDescriptor.charLabels:{it | NSInteger <it.label.text>;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels]:{it | AMutableArray *list_<it.label.text>=nil; }; separator="\n">
->>
-
-ruleReturnValue() ::= <%
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-retval
-<endif>
-<endif>
-<endif>
-%>
-
-ruleCleanUp() ::= <<
-/* token+rule list labels */
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{it | [list_<it.label.text> release];}; separator="\n">
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-[retval setStop:[input LT:-1]];<\n>
-<endif><endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if (state.backtracking > 0) [self memoize:input RuleIndex:<ruleDescriptor.index> StartIndex:<ruleDescriptor.name>_StartIndex];
-<endif><endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName, nakedBlock, ruleDescriptor, block, memoize) ::= <<
-// $ANTLR start "<ruleName>"
-- (void) m<ruleName><if(ruleDescriptor.parameterScope)><ruleDescriptor.parameterScope:parameterScope(scope=it)><endif>
-{
-    //<if(trace)>[self traceIn:\@"<ruleName>" Index:<ruleDescriptor.index>];<endif>
-    <if(trace)>NSLog(@"enter <ruleName> %C line=%d:%d failed=%@ backtracking=%d",
-        [input LA:1],
-        self.line,
-        self.charPositionInLine,
-        (state.failed==YES) ? @"YES" : @"NO",
-        state.backtracking);
-    <endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    @try {
-<if(nakedBlock)>
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block><\n>
-<else>
-        NSInteger _type = <ruleName>;
-        NSInteger _channel = ANTLRTokenChannelDefault;
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block>
-        <ruleCleanUp()>
-        state.type = _type;
-        state.channel = _channel;
-        <(ruleDescriptor.actions.after):execAction()>
-<endif>
-    }
-    @finally {
-        //<if(trace)>[self traceOut:[NSString stringWithFormat:@"<ruleName> %d\n", <ruleDescriptor.index>]];<endif>
-        <if(trace)>NSLog(@"exit <ruleName> %C line=%d:%d failed=%@ backtracking=%d",
-                    [input LA:1], self.line, self.charPositionInLine,
-                    (state.failed==YES) ? @"YES" : @"NO", state.backtracking);<endif>
-        <ruleScopeCleanUp()>
-        <memoize()>
-    }
-    return;
-}
-/* $ANTLR end "<ruleName>" */
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-- (void) mTokens
-{
-    <block><\n>
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description> // block
-NSInteger alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-<@prebranch()>
-switch (alt<decisionNumber>) {
-    <alts:{a | <altSwitchCase(i, a)>}>
-}
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description> //ruleblock
-NSInteger alt<decisionNumber>=<maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-switch (alt<decisionNumber>) {
-    <alts:{a | <altSwitchCase(i, a)>}>
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description> // ruleBlockSingleAlt
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description> // blockSingleAlt
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description> // positiveClosureBlock
-NSInteger cnt<decisionNumber> = 0;
-<decls>
-<@preloop()>
-do {
-    NSInteger alt<decisionNumber> = <maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) {
-        <alts:{a | <altSwitchCase(i, a)>}>
-        default :
-            if ( cnt<decisionNumber> >= 1 )
-                goto loop<decisionNumber>;
-            <ruleBacktrackFailure()>
-            ANTLREarlyExitException *eee =
-                [ANTLREarlyExitException newException:input decisionNumber:<decisionNumber>];
-            <@earlyExitException()>
-            @throw eee;
-    }
-    cnt<decisionNumber>++;
-} while (YES);
-loop<decisionNumber>: ;
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 0 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-<decls>
-<@preloop()>
-do {
-    NSInteger alt<decisionNumber>=<maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch (alt<decisionNumber>) {
-        <alts:{a | <altSwitchCase(i, a)>}>
-        default :
-            goto loop<decisionNumber>;
-    }
-} while (YES);
-loop<decisionNumber>: ;
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase(altNum, alt) ::= <<
-case <altNum> : ;
-    <@prealt()>
-    <alt>
-    break;<\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
-// <fileName>:<description> // alt
-{
-<@declarations()>
-<elements:element()>
-<rew>
-<@cleanup()>
-}
->>
-
-/** What to emit when there is no rewrite.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element(e) ::= << <@prematch()><\n><e.el><\n> >>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)><label>=(<labelType> *)<endif>[self match:input TokenType:<token> Follow:FOLLOW_<token>_in_<ruleName><elementIndex>]; <checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-listLabel(label,elem) ::= <<
-if (list_<label> == nil) list_<label> = [[AMutableArray arrayWithCapacity:5] retain];
-[list_<label> addObject:<elem>];<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>NSInteger <label> = [input LA:1];<\n><endif>
-[self matchChar:<char>]; <checkRuleBacktrackFailure()><\n>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)><label> = [input LA:1];<\n><endif>
-[self matchRangeFromChar:<a> to:<b>]; <checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= <<
-<if(label)>
-<if(LEXER)>
-<label> = [input LA:1];<\n>
-<else>
-<label> = (<labelType> *)[input LT:1]; /* matchSet */<\n>
-<endif><endif>
-if (<s>) {
-    [input consume];
-    <postmatchCode>
-<if(!LEXER)>
-    [state setIsErrorRecovery:NO];
-<endif>
-    <if(backtracking)>state.failed = NO;<\n><endif>
-} else {
-    <ruleBacktrackFailure()>
-    ANTLRMismatchedSetException *mse = [ANTLRMismatchedSetException newException:nil stream:input];
-    <@mismatchedSetException()>
-<if(LEXER)>
-<if(label)>
-    mse.c = <label>;
-<endif>
-    [self recover:mse];
-    @throw mse;
-<else>
-    @throw mse;
-    <! use following code to make it recover inline; remove throw mse;
-    [self recoverFromMismatchedSet:input exception:mse follow:FOLLOW_set_in_<ruleName><elementIndex>]; !>
-<endif>
-}<\n>
->>
-
-matchRuleBlockSet ::= matchSet
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex="0") ::= <<
-<if(label)>
-NSInteger <label>Start = input.index;
-[self matchString:<string>]; <checkRuleBacktrackFailure()>
-NSInteger StartLine<elementIndex> = self.line;
-NSInteger <label>StartCharPos<elementIndex> = self.charPositionInLine;
-<label> = [[<labelType> newToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:<label>Start Stop:input.index] retain];
-[self setLine:<label>StartLine<elementIndex>];
-[self setCharPositionInLine:<label>StartCharPos<elementIndex>];
-<else>
-[self matchString:<string>]; <checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)>
-<label> = (<labelType> *)[input LT:1];<\n>
-<endif>
-[self matchAny:input]; <checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<wildcard(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-NSInteger <label> = [input LA:1];<\n>
-<endif>
-[self matchAny]; <checkRuleBacktrackFailure()><\n>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.  The 'rule' argument was the
- *  target rule name, but now is type Rule, whose toString is
- *  same: the rule name.  Now though you can access full rule
- *  descriptor stuff.
- */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-/* ruleRef */
-[self pushFollow:FOLLOW_<rule.name>_in_<ruleName><elementIndex>];
-<if(label)><label> = <endif>[self <if(scope)><scope:delegateName()>.<endif><rule.name><if(args)>:<first(args)> <rest(args):{ a | arg<i>:<rest(args)>}; separator=" "><endif>];<\n>
-[self popFollow];
-<checkRuleBacktrackFailure()><\n>
->>
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** A lexer rule reference.
- *
- *  The 'rule' argument was the target rule name, but now
- *  is type Rule, whose toString is same: the rule name.
- *  Now though you can access full rule descriptor stuff.
- */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
-<if(label)>
-NSInteger <label>Start<elementIndex> = input.index;
-[self m<rule.name><if(args)>:<args; separator=" :"><endif>]; <checkRuleBacktrackFailure()><\n>
-<label> = [[<labelType> newToken:input Type:ANTLRTokenTypeInvalid Channel:ANTLRTokenChannelDefault Start:<label>Start<elementIndex> Stop:input.index-1] retain];
-<label>.line = self.line;
-<else>
-[self <if(scope)><scope:delegateName()>.<endif>m<rule.name><if(args)>:<args; separator=" :"><endif>]; <checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-NSInteger <label>Start<elementIndex> = input.index;
-[self matchChar:ANTLRCharStreamEOF]; <checkRuleBacktrackFailure()><\n>
-<labelType> <label> = [[<labelType> newToken:input Type:ANTLRTokenTypeEOF Channel:ANTLRTokenChannelDefault Start:<label>Start<elementIndex> Stop:input.index-1] retain];
-<label>.line = self.line;
-<else>
-[self matchChar:ANTLRCharStreamEOF]; <checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-// used for left-recursive rules
-recRuleDefArg()                       ::= "int <recRuleArg()>"
-recRuleArg()                          ::= "_p"
-recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
-recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
-recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList, enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( [input LA:1] == DOWN ) {
-    [self match:input TokenType:DOWN Follow:nil]; <checkRuleBacktrackFailure()>
-    <children:element()>
-    [self match:input TokenType:UP Follow:nil]; <checkRuleBacktrackFailure()>
-}
-<else>
-    [self match:input TokenType:DOWN Follow:nil]; <checkRuleBacktrackFailure()>
-    <children:element()>
-    [self match:input TokenType:UP Follow:nil]; <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if ( !(<evalPredicate(...)>) ) {
-    <ruleBacktrackFailure()>
-    @throw [ANTLRFailedPredicateException newException:@"<ruleName>" predicate:@"<description>" stream:input];
-}
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-NSInteger LA<decisionNumber>_<stateNumber> = [input LA:<k>];<\n>
-<edges; separator="\nelse ">
-else {
-<if(eotPredictsAlt)>
-    alt<decisionNumber> = <eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:<decisionNumber> state:<stateNumber> stream:input];
-    nvae.c = LA<decisionNumber>_<stateNumber>;
-    <@noViableAltException()>
-    @throw nvae;<\n>
-<endif>
-}
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-NSInteger LA<decisionNumber>_<stateNumber> = [input LA:<k>];<\n>
-<edges; separator="\nelse ">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-NSInteger LA<decisionNumber>_<stateNumber> = [input LA:<k>];
-<edges; separator="\nelse "><\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
-<else>
-else {
-    alt<decisionNumber> = <eotPredictsAlt>;
-}<\n>
-<endif><endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
-    <targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-unichar charLA<decisionNumber> = [input LA:<k>];
-switch (charLA<decisionNumber>) {
-    <edges; separator="\n"><\n>
-default: ;
-<if(eotPredictsAlt)>
-    alt<decisionNumber> = <eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:<decisionNumber> state:<stateNumber> stream:input];
-    nvae.c = charLA<decisionNumber>;
-    <@noViableAltException()>
-    @throw nvae;<\n>
-<endif>
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ([input LA:<k>]) { // dfaOptionalBlockStateSwitch
-    <edges; separator="\n"><\n>
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ([input LA:<k>]) { // dfaLoopbackStateSwitch
-    <edges; separator="\n"><\n>
-<if(eotPredictsAlt)>
-default:
-    alt<decisionNumber> = <eotPredictsAlt>;
-    break;<\n>
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-<labels:{it | case <it>: ;}; separator="\n">
-    {
-    <targetState>
-    }
-    break;
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-alt<decisionNumber> = [dfa<decisionNumber> predict:input];
->>
-
-/** Used in headerFile */
-cyclicDFAInterface(dfa) ::= <<
-#pragma mark Cyclic DFA interface start DFA<dfa.decisionNumber>
-@interface DFA<dfa.decisionNumber> : ANTLRDFA {
-}
-+ newDFA<dfa.decisionNumber>WithRecognizer:(ANTLRBaseRecognizer *)theRecognizer;
-- initWithRecognizer:(ANTLRBaseRecognizer *)recognizer;
-@end /* end of DFA<dfa.decisionNumber> interface  */<\n>
-#pragma mark Cyclic DFA interface end DFA<dfa.decisionNumber><\n>
->>
-
-/** Used in lexer/parser implementation files */
-/* Dump DFA tables as run-length-encoded Strings of octal values.
- * Can't use hex as compiler translates them before compilation.
- * These strings are split into multiple, concatenated strings.
- * Java puts them back together at compile time thankfully.
- * Java cannot handle large static arrays, so we're stuck with this
- * encode/decode approach.  See analysis and runtime DFA for
- * the encoding methods.
- */
-cyclicDFA(dfa) ::= <<
-#pragma mark Cyclic DFA implementation start DFA<dfa.decisionNumber>
-@implementation DFA<dfa.decisionNumber>
-const static NSInteger dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] =
-    {<dfa.eot; wrap="\n     ", separator=",", null="-1">};
-const static NSInteger dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] =
-    {<dfa.eof; wrap="\n     ", separator=",", null="-1">};
-const static unichar dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] =
-    {<dfa.min; wrap="\n     ", separator=",", null="0">};
-const static unichar dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] =
-    {<dfa.max; wrap="\n     ", separator=",", null="0">};
-const static NSInteger dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] =
-    {<dfa.accept; wrap="\n     ", separator=",", null="-1">};
-const static NSInteger dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] =
-    {<dfa.special; wrap="\n     ", separator=",", null="-1">};
-const static NSInteger dfa<dfa.decisionNumber>_transition[] = {};
-<dfa.edgeTransitionClassMap.keys:{ table |
-const static NSInteger dfa<dfa.decisionNumber>_transition<i0>[] = {<table; separator=", ", wrap="\n ", null="-1">\};
-}; null="">
-
-+ (id) newDFA<dfa.decisionNumber>WithRecognizer:(ANTLRBaseRecognizer *)aRecognizer
-{
-    return [[[DFA<dfa.decisionNumber> alloc] initWithRecognizer:aRecognizer] retain];
-}
-
-- (id) initWithRecognizer:(ANTLRBaseRecognizer *) theRecognizer
-{
-    self = [super initWithRecognizer:theRecognizer];
-    if ( self != nil ) {
-        decisionNumber = <dfa.decisionNumber>;
-        eot = dfa<dfa.decisionNumber>_eot;
-        eof = dfa<dfa.decisionNumber>_eof;
-        min = dfa<dfa.decisionNumber>_min;
-        max = dfa<dfa.decisionNumber>_max;
-        accept = dfa<dfa.decisionNumber>_accept;
-        special = dfa<dfa.decisionNumber>_special;
-        if (!(transition = calloc(<dfa.numberOfStates>, sizeof(void*)))) {
-            [self release];
-            return nil;
-        }
-        len = <dfa.numberOfStates>;
-        <dfa.transitionEdgeTables:{whichTable|transition[<i0>] = dfa<dfa.decisionNumber>_transition<whichTable>;}; separator="\n", null="">
-    }
-    return self;
-}
-
-<if(dfa.specialStateSTs)>
-/* start dfa.specialStateSTs */
-- (NSInteger) specialStateTransition:(NSInteger)s Stream:(id\<ANTLRIntStream\>)anInput
-{
-<if(LEXER)>
-    id\<ANTLRIntStream\> input = anInput;<\n>
-<endif>
-<if(PARSER)>
-    id\<ANTLRTokenStream\> input = (id\<ANTLRTokenStream\>)anInput;<\n>
-<endif>
-<if(TREE_PARSER)>
-    id\<ANTLRTreeNodeStream\> input = (id\<ANTLRTreeNodeStream\>)anInput;<\n>
-<endif>
-    switch (s) {
-        <dfa.specialStateSTs:{state |
-        case <i0> : ;<! compressed special state numbers 0..n-1 !>
-            <state>}; separator="\n">
-    }
-<if(backtracking)>
-    if ( [recognizer getBacktrackingLevel] > 0 ) { [recognizer setFailed:YES]; return -1; }<\n>
-<endif>
-    ANTLRNoViableAltException *nvae = [ANTLRNoViableAltException newException:<dfa.decisionNumber> state:s stream:recognizer.input];
-    // nvae.c = s;
-    /* [self error:nvae]; */ <! for debugger - do later !>
-    @throw nvae;
-}<\n>
-/* end dfa.specialStateSTs */
-<endif>
-
-- (void) dealloc
-{
-    free(transition);
-    [super dealloc];
-}
-
-- (NSString *) description
-{
-    return @"<dfa.description>";
-}
-
-<@errorMethod()>
-
-@end /* end DFA<dfa.decisionNumber> implementation */<\n>
-#pragma mark Cyclic DFA implementation end DFA<dfa.decisionNumber>
-<\n>
->>
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber, stateNumber, edges, needErrorClause, semPredState) ::= <<
-/* cyclicDFAState */
-NSInteger LA<decisionNumber>_<stateNumber> = [input LA:1];<\n>
-<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-NSInteger index<decisionNumber>_<stateNumber> = input.index;
-[input rewind];<\n>
-<endif>
-s = -1;
-<edges; separator="\nelse ">
-<if(semPredState)> <! return input cursor to state before we rewound !>
-[input seek:index<decisionNumber>_<stateNumber>];<\n>
-<endif>
-if ( s >= 0 )
-    return s;
- break;
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-/* cyclicDFAEdge */
-if (<labelExpr><if(predicates)> && (<predicates>)<endif>) { s = <targetStateNumber>;}<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-s = <targetStateNumber>;<\n> /* eotDFAEdge */
->>
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "(<left>&&<right>)"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
-
-notPredicate(pred) ::= "!(<evalPredicate(pred, \"\")>)"
-
-evalPredicate(pred,description) ::= "(<pred>)"
-
-/*
- * evalSynPredicate(pred,description) ::= "<pred>()"
- *
- * synpreds are broken in cyclic DFA special states
- *  Damn! For now, work around with using the selectors directly, and by providing a trampoline evalSynPred method in
- *  ANTLRDFA
- */
-/* evalSynPredicate(pred,description) ::= "[self evaluateSyntacticPredicate:<pred>Selector stream:input]" */
-evalSynPredicate(pred,description) ::= "[self evaluateSyntacticPredicate:@selector(<pred>_fragment)]"
-/* evalSynPredicate(pred,description) ::= "[recognizer <pred>]" */
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "[input LA:<k>] == <atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
-(LA<decisionNumber>_<stateNumber> >= <lower> && LA<decisionNumber>_<stateNumber> \<= <upper>)
-%>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(([input LA:<k>] >= <lower>) && ([input LA:<k>] \<= <upper>))"
-
-setTest(ranges) ::= "<ranges; separator=\"||\">"
-
-// A T T R I B U T E S
-
-memVars(scope) ::= << <scope.attributes:{a|<a.type> <a.name>;<\n>}; separator="\n"> >>
-
-properties(scope) ::= <<
-<scope.attributes:{a|@property (assign, getter=get<a.name>, setter=set<a.name>:) <a.type> <a.name>;<\n>}; separator="\n">
->>
-
-methodsDecl(scope) ::= <<
-<scope.attributes:{a|- (<a.type>)get<a.name>;<\n>- (void)set<a.name>:(<a.type>)aVal;<\n>}; separator="\n">
->>
-
-synthesize(scope) ::= << <scope.attributes:{a|@synthesize <a.name>;}; separator="\n"> >>
-
-methods(scope) ::= <%
-<scope.attributes:{a|
-- (<a.type>)get<a.name> { return( <a.name> ); \}<\n>
-- (void)set<a.name>:(<a.type>)aVal { <a.name> = aVal; \}<\n>}; separator="\n">
-%>
-
-globalAttributeScopeInterface(scope) ::= <%
-/* globalAttributeScopeInterface */<\n>
-@interface <scope.name>_Scope : ANTLRSymbolsScope {<\n>
-<if(scope.attributes)>
-<memVars(scope)>
-<endif>
-}<\n>
-<if(scope.attributes)>
-/* start of globalAttributeScopeInterface properties */<\n>
-<properties(scope)>
-/* end globalAttributeScopeInterface properties */<\n>
-<endif>
-
-+ (<scope.name>_Scope *)new<scope.name>_Scope;<\n>
-- (id) init;<\n>
-<if(scope.attributes)>
-/* start of globalAttributeScopeInterface methodsDecl */<\n>
-<methodsDecl(scope)>
-/* End of globalAttributeScopeInterface methodsDecl */<\n>
-<endif>
-@end /* end of <scope.name>_Scope interface */<\n>
-%>
-
-globalAttributeScopeMemVar(scope) ::= <%
-/* globalAttributeScopeMemVar */<\n>
-ANTLRSymbolStack *<scope.name>_stack;<\n>
-<scope.name>_Scope *<scope.name>_scope;<\n>
-%>
-
-globalAttributeScopeImplementation(scope) ::= <%
-@implementation <scope.name>_Scope  /* globalAttributeScopeImplementation */<\n>
-<if(scope.attributes)>
-/* start of synthesize -- OBJC-Line 1750 */<\n>
-<synthesize(scope)><\n>
-<endif>
-<\n>
-+ (<scope.name>_Scope *)new<scope.name>_Scope<\n>
-{<\n>
-    return [[<scope.name>_Scope alloc] init];<\n>
-}<\n>
-<\n>
-- (id) init<\n>
-{<\n>
-    self = [super init];<\n>
-    return self;<\n>
-}<\n>
-<\n>
-<if(scope.attributes)>
-/* start of iterate get and set functions */<\n>
-<methods(scope)><\n>
-/* End of iterate get and set functions */<\n>
-<endif>
-@end /* end of <scope.name>_Scope implementation */<\n><\n>
-%>
-
-globalAttributeScopeInit(scope) ::= <<
-/* globalAttributeScopeInit */<\n>
-<scope.name>_scope = [<scope.name>_Scope new<scope.name>_Scope];<\n>
-<scope.name>_stack = [ANTLRSymbolStack newANTLRSymbolStackWithLen:30];<\n>
->>
-
-globalAttributeScopeDealloc(scope) ::= << [<scope.name>_stack release];<\n> >>
-
-globalAttributeScope(scope) ::= << static <scope.name>_stack;<\n> >>
-
-ruleAttributeScopeMemVar(scope) ::= <%
-/* ObjC ruleAttributeScopeMemVar */<\n>
-<if(scope.attributes)>
-<scope.name>_Scope *<scope.name>_scope; /* ObjC ruleAttributeScopeMemVar */<\n>
-<endif>
-%>
-
-ruleAttributeScopeInterface(scope) ::= <%
-<if(scope.attributes)>
-/* start of ruleAttributeScopeInterface */<\n>
-@interface <scope.name>_Scope : ANTLRSymbolsScope {<\n>
-    <memVars(scope)><\n>
-}<\n>
-<\n>
-/* start property declarations */<\n>
-<properties(scope)><\n>
-/* start method declarations */<\n>
-+ (<scope.name>_Scope *)new<scope.name>_Scope;<\n>
-- (id) init;<\n>
-<methodsDecl(scope)><\n>
-@end /* end of ruleAttributeScopeInterface */<\n><\n>
-<endif>
-%>
-
-ruleAttributeScopeImplementation(scope) ::= <%
-<if(scope.attributes)>
-@implementation <scope.name>_Scope  /* start of ruleAttributeScopeImplementation */<\n>
-<synthesize(scope)><\n>
-<\n>
-+ (<scope.name>_Scope *)new<scope.name>_Scope<\n>
-{<\n>
-    return [[<scope.name>_Scope alloc] init];<\n>
-}<\n>
-<\n>
-- (id) init<\n>
-{<\n>
-    self = [super init];<\n>
-    return self;<\n>
-}<\n>
-<\n>
-/* start of <scope.name>_Scope get and set functions */<\n>
-<methods(scope)><\n>
-/* End of <scope.name>_Scope get and set functions */<\n>
-@end /* end of ruleAttributeScopeImplementation */<\n><\n>
-<endif>
-%>
-
-ruleAttributeScopeInit(scope) ::= <%
-/* ruleAttributeScopeInit */<\n>
-<scope.name>_scope = [<scope.name>_Scope new<scope.name>_Scope];<\n>
-<scope.name>_stack = [ANTLRSymbolStack newANTLRSymbolStackWithLen:30];<\n>
-%>
-
-ruleAttributeScopeDealloc(scope) ::= <% [<scope.name>_Scope release];<\n> %>
-
-ruleAttributeScope(scope) ::= <%
-<if(scope.attributes)>
-/* ruleAttributeScope */<\n>
-static ANTLRSymbolStack *<scope.name>_stack;<\n>
-<endif>
-%>
-
-ruleAttributeScopeDecl(scope) ::= <%
-/* ruleAttributeScopeDecl */<\n>
-<if(scope.attributes)>
-<scope.name>_Scope *<scope.name>_scope;<\n>
-<endif>
-%>
-
-returnStructName(r) ::= "<className()>_<r.name>_return"
-
-returnType() ::= <%
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor:returnStructName()> *
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
-<else>
-void
-<endif>
-%>
-
-/** Generate the Objective-C type associated with a single or multiple return
- *  values.
- */
-ruleLabelType(referencedRule) ::= <%
-<if(referencedRule.hasMultipleReturnValues)>
-<className()>_<referencedRule.name>_return *<else>
-<if(referencedRule.hasSingleReturnValue)><referencedRule.singleValueReturnType><else>
-void<endif>
-<endif>
-%>
-
-delegateName(d) ::= << <if(d.label)><d.label><else>g<d.name><endif> >>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
- */
-initValue(typeName) ::= <% <objcTypeInitMap.(typeName)> %>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= << <ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n> >>
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScopeInterface(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-/* returnScopeInterface <ruleDescriptor:returnStructName()> */
-@interface <ruleDescriptor:returnStructName()> : ANTLR<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope { /* returnScopeInterface line 1838 */
-<@memVars()> /* ObjC start of memVars() */<\n>
-<if(scope.attributes)>
-<memVars(scope)><\n>
-<endif>
-}
-/* start property declarations */
-<@properties()><\n>
-<if(scope.attributes)>
-<properties(scope)><\n>
-<endif>
-/* start of method declarations */<\n>
-+ (<ruleDescriptor:returnStructName()> *)new<ruleDescriptor:returnStructName()>;
-/* this is start of set and get methods */
-<@methodsDecl()>  /* methodsDecl */<\n>
-<if(scope.attributes)>
-/* start of iterated get and set functions */<\n>
-<methodsDecl(scope)><\n>
-<endif>
-@end /* end of returnScopeInterface interface */<\n>
-<endif>
->>
-
-returnScopeImplementation(scope) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-@implementation <ruleDescriptor:returnStructName()> /* returnScopeImplementation */<\n>
-<@synthesize()> /* start of synthesize -- OBJC-Line 1837 */<\n>
-<if(scope.attributes)>
-    <synthesize(scope)><\n>
-<endif>
-+ (<ruleDescriptor:returnStructName()> *)new<ruleDescriptor:returnStructName()><\n>
-{<\n>
-    return [[[<ruleDescriptor:returnStructName()> alloc] init] retain];<\n>
-}<\n>
-<\n>
-- (id) init<\n>
-{<\n>
-    self = [super init];<\n>
-    return self;<\n>
-}<\n>
-<\n>
-<@methods()><\n>
-<if(scope.attributes)>
-/* start of iterate get and set functions */<\n>
-<methods(scope)><\n>
-/* End of iterate get and set functions */<\n>
-<endif>
-<actions.(actionScope).ruleReturnMethods>
-<@ruleReturnMembers()><\n>
-@end /* end of returnScope implementation */<\n><\n>
-<endif>
-%>
-
-parameterScope(scope) ::= <<
-<! <scope.attributes:{it | :(<it.type>)<it.name>}; separator=" "> !>
-<first(scope.attributes):{ a | :(<a.type>)<a.name>}> <rest(scope.attributes):{ a | arg<i>:(<a.type>)<a.name> }; separator=" ">
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>;"
-
-/** Note that the scopeAttributeRef does not have access to the
- * grammar name directly
- */
-scopeAttributeRef(scope,attr,index,negIndex) ::= <%
-<if(negIndex)>
-([((<scope>_Scope *)[<scope>_stack objectAtIndex:[<scope>_stack size]-<negIndex>-1)]).<attr.name>
-<else>
-<if(index)>
-((<scope>_Scope *)[<scope>_stack objectAtIndex:<index>]).<attr.name>
-<else>
-((<scope>_Scope *)[<scope>_stack peek]).<attr.name>
-<endif>
-<endif>
-%>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
-/* scopeSetAttributeRef */
-<if(negIndex)>
-((<scope>_Scope *)[<scope>_stack objectAtIndex:([<scope>_stack size]-<negIndex>-1)]).<attr.name> = <expr>;
-<else>
-<if(index)>
-((<scope>_Scope *)[<scope>_stack objectAtIndex:<index>]).<attr.name> = <expr>;
-<else>
-((<scope>_Scope *)[<scope>_stack peek]).<attr.name> = <expr>;
-<endif>
-<endif>
-%>
-
-scopeAttributeRefStack() ::= <<
-/* scopeAttributeRefStack */
-<if(negIndex)>
-((<scope>_Scope *)[<scope>_stack objectAtIndex:[<scope>_stack count]-<negIndex>-1]).<attr.name> = <expr>;
-<else>
-<if(index)>
-((<scope>_Scope *)[<scope>_stack objectAtIndex:<index>]).<attr.name> = <expr>;
-<else>
-((<scope>_Scope *)[<scope>_stack peek]).<attr.name> = <expr>;
-<endif>
-<endif>
->>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-(<scope>!=nil?<scope>.<attr.name>:<initValue(attr.type)>)
-<else>
-<scope>
-<endif>
->>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name>  /* added to returnAttributeRef */<\n>
-<else>
-<attr.name><\n>
-<endif>
-%>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
- retval.<attr.name> =<expr>; /* added to returnSetAttributeRef */<\n>
-<else>
-<attr.name> = <expr>;<\n>
-<endif>
-%>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-/* not sure the next are the right approach; and they are evaluated early; */
-/* they cannot see TREE_PARSER or PARSER attributes for example. :( */
-
-tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=nil?<scope>.text:nil)"
-tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=nil?<scope>.type:0)"
-tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=nil?<scope>.line:0)"
-tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=nil?<scope>.charPositionInLine:0)"
-tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=nil?<scope>.channel:0)"
-tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=nil?[<scope> getTokenIndex]:0)"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=nil?[<scope>.text integerValue]:0)"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=nil?((<labelType> *)<scope>.start):nil)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=nil?((<labelType> *)<scope>.stopToken):nil)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=nil?((<ASTLabelType> *)<scope>.tree):nil)"
-ruleLabelPropertyRef_text(scope,attr) ::= <%
-<if(TREE_PARSER)>
-(<scope>!=nil?[[input getTokenStream] toStringFromStart:[[input getTreeAdaptor] getTokenStartIndex:[<scope> getStart]]
-         ToEnd:[[input getTreeAdaptor] getTokenStopIndex:[<scope> getStart]]]:0)
-<else>
-(<scope>!=nil?([input toStringFromStart:[<scope> getStart] ToEnd:[<scope> getStop]]:0)
-<endif>
-%>
-ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=nil?[<scope> st]:nil)"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "(<scope>!=nil?<scope>.type:0)"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "(<scope>!=nil?<scope>.line:0)"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=nil?<scope>.charPositionInLine:-1)"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=nil?<scope>.channel:0)"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "(<scope>!=nil?[<scope> getTokenIndex]:0)"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "(<scope>!=nil?<scope>.text:nil)"
-lexerRuleLabelPropertyRef_int(scope,attr) ::="(<scope>!=nil?[<scope>.text integerValue]:0)"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "((<labelType> *)retval.start)"
-rulePropertyRef_stop(scope,attr) ::= "((<labelType> *)retval.stopToken)"
-rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType> *)retval.tree)"
-rulePropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-[[input getTokenStream] toStringFromStart:[[input getTreeAdaptor] getTokenStartIndex:retval.start.token.startIndex]
-                                    ToEnd:[[input getTreeAdaptor] getTokenStopIndex:retval.start.token.stopIndex]]
-<else>
-[input toStringFromToken:retval.start ToToken:[input LT:-1]]
-<endif>
->>
-rulePropertyRef_st(scope,attr) ::= "retval.st"
-
-/* hideous: find a way to cut down on the number of templates to support read/write access */
-/* TODO: also, which ones are valid to write to? ask Ter */
-lexerRuleSetPropertyRef_text(scope,attr,expr) ::= "state.text = <expr>;"
-lexerRuleSetPropertyRef_type(scope,attr,expr) ::= "_type"
-lexerRuleSetPropertyRef_line(scope,attr,expr) ::= "state.tokenStartLine"
-lexerRuleSetPropertyRef_pos(scope,attr,expr) ::= "state.tokenStartCharPositionInLine"
-lexerRuleSetPropertyRef_index(scope,attr,expr) ::= "-1" /* undefined token index in lexer */
-lexerRuleSetPropertyRef_channel(scope,attr,expr) ::= "state.channel=<expr>;"
-lexerRuleSetPropertyRef_start(scope,attr,expr) ::= "state.tokenStartCharIndex"
-lexerRuleSetPropertyRef_stop(scope,attr,expr) ::= "(input.index-1)"
-
-
-lexerRulePropertyRef_text(scope,attr) ::= "self.text"
-lexerRulePropertyRef_type(scope,attr) ::= "state.type"
-lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
-lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(input.index-1)"
-lexerRulePropertyRef_int(scope,attr) ::= "[<scope>.text integerValue]"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.start =<expr>;"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;" /* "<\n>#error StringTemplates are unsupported<\n>" */
-
-
-/** How to execute an action */
-execAction(action) ::= <<
-<if(backtracking)>
-if ( <actions.(actionScope).synpredgate> ) {
-    <action>
-}
-<else>
-<action>
-<endif>
->>
-
-/** How to always execute an action even when backtracking */
-execForcedAction(action) ::= "<action>"
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-static ANTLRBitSet *<name>;
-static const unsigned long long <name>_data[] = { <words64:{it | <it>LL};separator=", ">};<\n>
->>
-
-bitsetInit(name, words64) ::= <<
-<name> = [[ANTLRBitSet newANTLRBitSetWithBits:(const unsigned long long *)<name>_data Count:(NSUInteger)<length(words64)>] retain];<\n>
->>
-
-codeFileExtension() ::= ".m"
-
-true_value() ::= "YES"
-false_value() ::= "NO"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ST.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ST.stg
deleted file mode 100644
index 8350477..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ST.stg
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/** Template subgroup to add template rewrite output
- *  If debugging, then you'll also get STDbg.stg loaded.
- */
-
-@returnScopeInterface.memVars() ::= <<
-<@super.memVars()>
-/* ST returnInterface.memVars */
-ST *st;
->>
-
-@returnScopeInterface.properties() ::= <<
-<@super.properties()>
-/* ST returnScope.properties */
-@property (retain, getter=getST, setter=setST:) ST *st;
->>
-
-@returnScopeInterface.methodsDecl() ::= <<
-<@super.methodsDecl()>
-/* ST AST returnScopeInterface.methodsDecl */
-- (id) getTemplate;
-- (NSString *) toString;
-- (NSString *) description;
->>
-
-@returnScopeInterface() ::= <<
-/* ST returnScopeInterface */
-@interface <returnScopeInterface.name> : ANTLRReturnScope {
-<returnScopeInterface.memVars()>
-}
-<returnScopeInterface.properties()>
-
-<returnScopeInterface.methods()>
-@end
->>
-
-@returnScopeImplementation.synthesize() ::= <<
-<@super.synthesize()>
-/* ST returnScope.synthesize */
-@synthesize st;
->>
-
-@returnScopeImplementation.methods() ::= <<
-<@super.methods()>
-/* ST returnScope.methods */
-- (id) getTemplate { return st; }
-- (NSString *) toString { return st==nil?nil:[st toString]; }
-- (NSString *) description { [self toString]; }
->>
-
-@returnScopeImplementation() ::= <<
-@implementation <returnScopeImplementation.name>
-<returnScopeImplementation.synthesize()>
-
-<returnScopeImplementation.methods()>
-@end
->>
-
-/** Add this to each rule's return value struct */
-@returnScope.ruleReturnMembers() ::= <<
-<@super.ruleReturnMembers()>
-/* ST returnScope.ruleReturnMembers -- empty */
->>
-
-@genericParserHeaderFile.memVars() ::= <<
-<@super.memVars()>
-/* ST genericParserHeaderFile.memVars -- empty now */
-STGroup *templateLib; /* ST -- really a part of STAttrMap */
->>
-
-@genericParserHeaderFile.properties() ::= <<
-<@super.properties()>
-/* ST genericParser.properties */
-@property (retain, getter=getTemplateLib, setter=setTemplateLib:) STGroup *templateLib;
->>
-
-@genericParserHeaderFile.methodsDecl() ::= <<
-<@super.methodsDecl()>
-/* ST genericParser.methodsDecl */
-- init;
-- (STGroup *) getTemplateLib;
-- (void) setTemplateLib:(STGroup *)aTemplateLib;
-@end
->>
-
-@genericParser.synthesize() ::= <<
-<@super.synthesize()>
-/* ST genericParserImplementation.synthesize */
-@synthesize templateLib;
->>
-
-@genericParser.methods() ::= <<
-<@super.methods()>
-/* ST genericParser.methods */
-
-- (STGroup *)getTemplateLib
-{
-    return templateLib;
-}
-
-- (void) setTemplateLib:(STGroup *)aTemplateLib
-{
-    templateLib = aTemplateLib;
-}
-
->>
-
-@genericParser.members() ::= <<
-<@super.members()>
-STGroup *templateLib = [STGroup  newSTGroup:@"<name>Templates"];
-
-- (STGroup *) getTemplateLib
-{
-  return templateLib;
-}
-
-- (void) setTemplateLib:(STGroup *) templateLib
-{
-  this.templateLib = templateLib;
-}
-
-/** allows convenient multi-value initialization:
- *  "new STAttrMap().put(...).put(...)"
- */
-/* REPLACE THIS STATIC CLASS
-static class STAttrMap extends HashMap {
-- (STAttrMap *) setObject:(id)aValue forKey:(NS*)String attrName
-{
-    [super setObject:value forKey:attrName];
-    return self;
-}
-- (STAttrMap *) setObjectWithInt:(NSInteger)value forKey:(NSString *)attrName
-{
-    [super setObject:[NSNumber numberWithInteger:value] forKey:attrName];
-    return self;
-  }
-}
-*/
->>
-
-@STAttrMap() ::= <<
-/* -------- ST start STAttrMap ------------ */
-<@STAttrMap.interface()>
-<@STAttrMap.implementation()>
-/* -------- ST end STAttrMap ------------ */
->>
-
-@STAttrMap.interface() ::= <<
-/* -------- ST start STAttrMap.interface ------------ */
-@interface STAttrMap : ANTLRHashMap {
-/*    <@STAttrMap.memVars()> */
-    STGroup *templateLib;
-}
-
-/*    <@STAttrMap.properties()> */
-@property (retain, getter=getTemplateLib, setter=setTemplateLib:) STGroup *templateLib;
-/* <@STAttrMap.methodsDecl()> */
-- (id) init;
-- (STAttrMap *) setObject:(id)value forKey:(NSString *)attrName;
-- (STAttrMap *) setObjectWithInt:(NSInteger)value forKey:(NSString *)attrName;
-- (void) setTemplateLib:(STGroup *)aTemplateLib;
-- (STGroup *) getTemplateLib;
-@end
-/* -------- ST end STAttrMap.interface ------------ */
->>
-
-@STAttrMap.implementation() ::= <<
-/* -------- ST start STAttrMap.implementation ------------ */
-/** allows convenient multi-value initialization:
- *  "new STAttrMap().put(...).put(...)"
- */
-@implementation STAttrMap
-@synthesize templateLib;
-
-<@STAttrMap.methods()>
-@end
-/* -------- ST end STAttrMap.implementation ------------ */
->>
-
-@STAttrMap.memVars() ::= <<
-/* -------- ST start STAttrMap.memVars ------------ */
-    STGroup *templateLib;
-/* -------- ST end STAttrMap.memVars ------------ */
->>
-
-@STAttrMap.properties() ::= <<
-/* -------- ST start STAttrMap.properties ------------ */
-@property (retain, getter=getTemplateLib, setter=setTemplateLib:) STGroup *templateLib;
-/* -------- ST end STAttrMap.properties ------------ */
->>
-
-@STAttrMap.methodsDecl() ::= <<
-/* -------- ST start STAttrMap.methodsDecl ------------ */
-- (id) init;
-- (STAttrMap *) setObject:(id)value forKey:(NSString *)attrName;
-- (STAttrMap *) setObjectWithInt:(NSInteger)value forKey:(NSString *)attrName;
-- (void) setTemplateLib:(STGroup *)aTemplateLib;
-- (STGroup *) getTemplateLib;
-/* -------- ST end STAttrMap.methodsDecl ------------ */
->>
-
-@STAttrMap.methods() ::= <<
-/* -------- ST start STAttrMap.methods ------------ */
-- (id) init
-{
-    self = [super initWithLen:16];
-    if ( self != nil ) {
-        templateLib = [STGroup newSTGroup:"<name>Templates"];
-    }
-    return self;
-}
-
-- (STAttrMap *) setObject:(id)aValue forKey:(NSString *)aAttrName
-{
-    [super setObject:aValue forKey:aAttrName];
-    return self;
-}
-
-- (STAttrMap *) setObjectWithInt:(NSInteger)aValue forKey:(NSString *)aAttrName
-{
-    [super setObject:[NSNumber numberWithInteger:aValue] forKey:aAttrName];
-    return self;
-}
-- (void) setTemplateLib:(STGroup *)aTemplateLib
-{
-    templateLib = aTemplateLib;
-}
-
-- (STGroup *)getTemplateLib
-{
-    return templateLib;
-}
-/* -------- ST end STAttrMap.methods ------------ */
->>
-
-/** x+=rule when output=template */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-/* ST ruleRefAndListLable */
-<ruleRef(...)>
-<listLabel(elem=[label getTemplate,...]>
->>
-
-rewriteTemplate(alts) ::= <<
-/* -------- ST start rewriteTemplate ------------ */
-// TEMPLATE REWRITE
-<if(backtracking)>
-if ( <actions.(actionScope).synpredgate> ) {
-    <alts:rewriteTemplateAlt(); separator="else ">
-    <if(rewriteMode)><replaceTextInLine()><endif>
-}
-<else>
-<alts:rewriteTemplateAlt(); separator="else ">
-<if(rewriteMode)><replaceTextInLine()><endif>
-<endif>
-/* -------- ST end rewriteTemplate ------------ */
->>
-
-replaceTextInLine() ::= <<
-/* -------- ST start replaceTextInLine ------------ */
-<if(TREE_PARSER)>
-[[(ANTLRTokenRewriteStream *)input getTokenStream]
-    replaceFromIndex:[[input getTreeAdaptor] getTokenStartIndex:retval.start]
-             ToIndex:[[input getTreeAdaptor] getTokenStopIndex:retval.start]
-                Text:retval.st];
-<else>
-[(ANTLRTokenRewriteStream *)input)
-        replaceFromIndex:[((ANTLRCommonToken *)retval.start) getTokenIndex]
-                 ToIndex:[[input LT:-1] getTokenIndex]
-                    Text:retval.st];
-<endif>
-/* -------- ST end replaceTextInLine ------------ */
->>
-
-rewriteTemplateAlt() ::= <<
-/* -------- ST start rewriteTemplateAlt ------------ */
-/* ST <it.description> */
-<if(it.pred)>
-if (<it.pred>) {
-    retval.st = <it.alt>;
-}<\n>
-<else>
-{
-    retval.st = <it.alt>;
-}<\n>
-<endif>
-/* -------- ST end rewriteTemplateAlt ------------ */
->>
-
-rewriteEmptyTemplate(alts) ::= <<
-nil;
->>
-
-/** Invoke a template with a set of attribute name/value pairs.
- *  Set the value of the rule's template *after* having set
- *  the attributes because the rule's template might be used as
- *  an attribute to build a bigger template; you get a self-embedded
- *  template.
- */
-rewriteExternalTemplate(name,args) ::= <<
-/* -------- ST start rewriteExternalTemplate ------------ */
-[templateLib getInstanceOf:@"<name>"
-<if(args)>[[STAttrMap newSTAttrMap] <args:{a | setObject:<a.value> forKey:@"<a.name>"]}><endif>]
-/* -------- ST end rewriteExternalTemplate ------------ */
->>
-
-/** expr is a string expression that says what template to load */
-rewriteIndirectTemplate(expr,args) ::= <<
-/* -------- ST start rewriteIndirectTemplate ------------ */
-[templateLib getInstanceOf:<expr>
-<if(args)> [[STAttrMap newSTAttrMap]<args:{a | setObject:<a.value> forKey:@"<a.name>"]}>]
-<else>]<endif>
-/* -------- ST end rewriteIndirectTemplate ------------ */
->>
-
-/** Invoke an inline template with a set of attribute name/value pairs */
-rewriteInlineTemplate(args, template) ::= <<
-/* -------- ST start rewriteInlineTemplate ------------ */
-STGroup *templateLib;
-templateLib.templates = [STAttrMap newSTAttrMap];
-<if(args)> [templateLib.templates <args:{a | setObject:<a.value> forKey:@"<a.name>"];}><endif>
-[ST newST:templateLib template:@"<template>"];
-/* -------- ST end rewriteInlineTemplate ------------ */
->>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-/* -------- ST start rewriteAction ------------ */
-<action>
-/* -------- ST end rewriteAction ------------ */
->>
-
-/** An action has %st.attrName=expr; or %{st}.attrName=expr; */
-actionSetAttribute(st,attrName,expr) ::= <<
-/* -------- ST start actionSetAttribute ------------ */
-[[ST attributes] setObject:<expr> forKey:@"<attrName>"];
-<![<st> setAttribute:<expr> name:@"<attrName>"];!>
-/* -------- ST end actionSetAttribute ------------ */
->>
-
-/** Translate %{stringExpr} */
-actionStringConstructor(stringExpr) ::= <<
-/* -------- ST start actionStringConstructor ------------ */
-[ST newSTWithTemplate:<stringExpr>]
-/* -------- ST end actionStringConstructor ------------ */
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Perl5/Perl5.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Perl5/Perl5.stg
deleted file mode 100644
index fa875d1..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Perl5/Perl5.stg
+++ /dev/null
@@ -1,1373 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- Copyright (c) 2007-2008 Ronald Blaschke
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-group Perl5;
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
-           docComment, recognizer,
-           name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
-	   backtracking, synpreds, memoize, numRules,
-	   fileName, ANTLRVersion, generatedTimestamp, trace,
-	   scopes, superClass, literals) ::=
-<<
-# $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-<actions.(actionScope).header>
-
-<@imports>
-<if(TREE_PARSER)>
-<endif>
-<if(backtracking)>
-<endif>
-<@end>
-
-<docComment>
-<recognizer>
->>
-
-lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
-      filterMode, superClass="ANTLR::Runtime::Lexer")  ::= <<
-package <name>;
-
-use Carp;
-use English qw( -no_match_vars ) ;
-use Readonly;
-use Switch;
-
-use ANTLR::Runtime::BaseRecognizer;
-use ANTLR::Runtime::DFA;
-use ANTLR::Runtime::NoViableAltException;
-
-use Moose;
-
-extends 'ANTLR::Runtime::Lexer';
-
-Readonly my $HIDDEN => ANTLR::Runtime::BaseRecognizer->HIDDEN;
-sub HIDDEN { $HIDDEN }
-
-use constant {
-    <tokens:{ <it.name> => <it.type>, }; separator="\n">
-};
-<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-<actions.lexer.members>
-
-sub BUILD {
-    my ($self, $arg_ref) = @_;
-
-    $self->init_dfas();
-}
-
-sub get_grammar_file_name {
-    return "<fileName>";
-}
-
-<if(filterMode)>
-<filteringNextToken()>
-<endif>
-<rules; separator="\n\n">
-
-<synpreds:{p | <lexerSynpred(p)>}>
-
-<cyclicDFAs:{dfa | has 'dfa<dfa.decisionNumber>';}; separator="\n">
-
-sub init_dfas {
-    my ($self) = @_;
-
-    <cyclicDFAs:{dfa |
-    $self->dfa<dfa.decisionNumber>(<name>::DFA<dfa.decisionNumber>->new({ recognizer => $self }));
-    }; separator="\n">
-
-    return;
-}
-
-<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-no Moose;
-__PACKAGE__->meta->make_immutable();
-1;
-
->>
-
-perlTypeInitMap ::= [
-	"$":"undef",
-	"@":"()",
-	"%":"()",
-	default:"undef"
-]
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-public Token nextToken() {
-    while (true) {
-        if ( input.LA(1)==CharStream.EOF ) {
-            return Token.EOF_TOKEN;
-        }
-        token = null;
-	channel = Token.DEFAULT_CHANNEL;
-        tokenStartCharIndex = input.index();
-        tokenStartCharPositionInLine = input.getCharPositionInLine();
-        tokenStartLine = input.getLine();
-	text = null;
-        try {
-            int m = input.mark();
-            backtracking=1; <! means we won't throw slow exception !>
-            failed=false;
-            mTokens();
-            backtracking=0;
-            <! mTokens backtracks with synpred at backtracking==2
-               and we set the synpredgate to allow actions at level 1. !>
-            if ( failed ) {
-                input.rewind(m);
-                input.consume(); <! advance one char and try again !>
-            }
-            else {
-                emit();
-                return token;
-            }
-        }
-        catch (RecognitionException re) {
-            // shouldn't happen in backtracking mode, but...
-            reportError(re);
-            recover(re);
-        }
-    }
-}
-
-public void memoize(IntStream input,
-		int ruleIndex,
-		int ruleStartIndex)
-{
-if ( backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
-}
-
-public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
-if ( backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
-return false;
-}
->>
-
-actionGate() ::= "$self->state->backtracking==0"
-
-filteringActionGate() ::= "backtracking==1"
-
-/** How to generate a parser */
-genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass, filterMode,
-              ASTLabelType="Object", labelType, members) ::= <<
-package <name>;
-
-use English qw( -no_match_vars ) ;
-use Readonly;
-use Switch;
-use Carp;
-use ANTLR::Runtime::BitSet;
-
-use Moose;
-
-extends '<@superClassName><superClass><@end>';
-
-Readonly my $token_names => [
-    "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
-];
-
-use constant {
-<tokens:{ <it.name> => <it.type>, }; separator="\n">
-};
-
-<bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
-                    words64=it.bits)>
-
-<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-<@members>
-<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-
-sub BUILD {
-    my ($self, $arg_ref) = @_;
-
-<if(backtracking)>
-    $self->state->rule_memo({});<\n>
-<endif>
-}
-<@end>
-
-sub get_token_names {
-    return $token_names;
-}
-
-sub get_grammar_file_name {
-    return "<fileName>";
-}
-
-<members>
-
-<rules; separator="\n\n">
-
-<synpreds:{p | <synpred(p)>}>
-
-<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = __PACKAGE__::DFA<dfa.decisionNumber>->new($self);}; separator="\n">
-<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-no Moose;
-__PACKAGE__->meta->make_immutable();
-1;
-__END__
->>
-
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="ANTLR::Runtime::Parser", labelType="ANTLR::Runtime::Token", members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="ANTLR::Runtime::TokenStream", ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="ANTLR::Runtime::TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
-<genericParser(inputStreamType="TreeNodeStream", ...)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-# $ANTLR start <ruleName>
-sub <ruleName>_fragment {
-# <ruleDescriptor.parameterScope:parameterScope(scope=it)>
-
-<if(trace)>
-    $self->traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
-    eval {
-        <block>
-    };
-    $self->traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
-    if ($EVAL_ERROR) {
-        croak $EVAL_ERROR;
-    }
-<else>
-    <block>
-<endif>
-}
-# $ANTLR end <ruleName>
->>
-
-synpred(name) ::= <<
-public final boolean <name>() {
-    backtracking++;
-    <@start()>
-    int start = input.mark();
-    try {
-        <name>_fragment(); // can never throw exception
-    } catch (RecognitionException re) {
-        System.err.println("impossible: "+re);
-    }
-    boolean success = !failed;
-    input.rewind(start);
-    <@stop()>
-    backtracking--;
-    failed=false;
-    return success;
-}<\n>
->>
-
-lexerSynpred(name) ::= <<
-<synpred(name)>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if ( backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>
-if ($self->state->failed) {
-    return <ruleReturnValue()>;
-}
-<endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>if (backtracking>0) {failed=true; return <ruleReturnValue()>;}<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-<returnScope(scope=ruleDescriptor.returnScope)>
-
-# $ANTLR start <ruleName>
-# <fileName>:<description>
-sub <ruleName>() {
-    my ($self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>) = @_;
-    <if(trace)>$self->traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    <ruleLabelDefs()>
-    <ruleDescriptor.actions.init>
-    <@preamble()>
-    eval {
-        <ruleMemoization(name=ruleName)>
-        <block>
-        <ruleCleanUp()>
-        <(ruleDescriptor.actions.after):execAction()>
-    };
-<if(exceptions)>
-    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-    <actions.(actionScope).rulecatch>
-<else>
-    my $exception = $EVAL_ERROR;
-    if (ref $exception && $exception->isa('ANTLR::Runtime::RecognitionException')) {
-        $self->report_error($exception);
-        $self->recover($self->input, $exception);
-        $exception = undef;
-    }<\n>
-<endif>
-<endif>
-<endif>
-    <if(trace)>$self->traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-    <memoize()>
-    <ruleScopeCleanUp()>
-    <finally>
-    if ($exception) {
-        croak $exception;
-        #$exception->rethrow();
-    }
-    <@postamble()>
-    return <ruleReturnValue()>;
-}
-# $ANTLR end <ruleName>
->>
-
-catch(decl,action) ::= <<
-catch (<e.decl>) {
-    <e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-my $retval = <returnType()>->new();
-$retval->set_start($self->input->LT(1));<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-my $<a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
-}>
-<endif>
-<if(memoize)>
-my $<ruleDescriptor.name>_start_index = $self->input->index();
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{<it>_stack.push(new <it>_scope());}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>_stack.push(new <it.name>_scope());}; separator="\n">
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{<it>_stack.pop();}; separator="\n">
-<ruleDescriptor.ruleScope:{<it.name>_stack.pop();}; separator="\n">
->>
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
-    :{my $<it.label.text> = undef;}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
-    :{List list_<it.label.text>=null;}; separator="\n"
->
-<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
-<ruleDescriptor.ruleListLabels:{ll|RuleReturnScope <ll.label.text> = null;}; separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{<labelType> <it.label.text>=null;}; separator="\n"
->
-<ruleDescriptor.charLabels:{my $<it.label.text>;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{List list_<it.label.text>=null;}; separator="\n"
->
->>
-
-ruleReturnValue() ::= <<
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-$<ruleDescriptor.singleValueReturnName>
-<else>
-$retval
-<endif>
-<endif>
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-$retval->set_stop($self->input->LT(-1));<\n>
-<endif>
-<endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if ( backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-# $ANTLR start <ruleName>
-sub m_<ruleName> {
-# <ruleDescriptor.parameterScope:parameterScope(scope=it)>
-    my ($self) = @_;
-    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
-    <ruleDeclarations()>
-    eval {
-<if(nakedBlock)>
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block><\n>
-<else>
-        my $_type = <ruleName>;
-        my $_channel = $self->DEFAULT_TOKEN_CHANNEL;
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block>
-        <ruleCleanUp()>
-        $self->state->type($_type);
-        $self->state->channel($_channel);
-        <(ruleDescriptor.actions.after):execAction()>
-<endif>
-    };
-    <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-    <memoize()>
-
-    if ($EVAL_ERROR) {
-        croak $EVAL_ERROR;
-    }
-}
-# $ANTLR end <ruleName>
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-sub m_tokens {
-    my ($self) = @_;
-    <block><\n>
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# <fileName>:<description>
-my $alt<decisionNumber> = <maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-<@prebranch()>
-switch ($alt<decisionNumber>) {
-    <alts:altSwitchCase()>
-}
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# <fileName>:<description>
-my $alt<decisionNumber> = <maxAlt>;
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-switch ($alt<decisionNumber>) {
-    <alts:altSwitchCase()>
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-# <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-# <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# <fileName>:<description>
-my $cnt<decisionNumber> = 0;
-<decls>
-<@preloop()>
-LOOP<decisionNumber>:
-while (1) {
-    my $alt<decisionNumber> = <maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch ($alt<decisionNumber>) {
-	    <alts:altSwitchCase()>
-	    else {
-	        if ( $cnt<decisionNumber> >= 1 ) { last LOOP<decisionNumber> }
-	        <ruleBacktrackFailure()>
-            my $eee =
-                ANTLR::Runtime::EarlyExitException->new(<decisionNumber>, $self->input);
-            <@earlyExitException()>
-            croak $eee;
-        }
-    }
-    ++$cnt<decisionNumber>;
-}
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# <fileName>:<description>
-<decls>
-<@preloop()>
-LOOP<decisionNumber>:
-while (1) {
-    my $alt<decisionNumber> = <maxAlt>;
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    switch ($alt<decisionNumber>) {
-	    <alts:altSwitchCase()>
-	    else { last LOOP<decisionNumber> }
-    }
-}
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase() ::= <<
-case <i> {
-    <@prealt()>
-    <it>
-}<\n>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
-# <fileName>:<description>
-{
-<@declarations()>
-<elements:element()>
-<rew>
-<@cleanup()>
-}
->>
-
-/** What to emit when there is no rewrite.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element() ::= <<
-<@prematch()>
-<it.el><\n>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)>$<label> =<endif>$self->match($self->input, <token>, $FOLLOW_<token>_in_<ruleName><elementIndex>);
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-listLabel(label,elem) ::= <<
-if (list_<label>==null) list_<label>=new ArrayList();
-list_<label>.add(<elem>);<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> = $self->input->LA(1);<\n>
-<endif>
-$self->match(<char>); <checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> = $self->input->LA(1);<\n>
-<endif>
-$self->match_range(<a>,<b>); <checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,postmatchCode="") ::= <<
-<if(label)>
-<if(LEXER)>
-<label>= $self->input->LA(1);<\n>
-<else>
-<label>=(<labelType>)input.LT(1);<\n>
-<endif>
-<endif>
-if ( <s> ) {
-    $self->input->consume();
-    <postmatchCode>
-<if(!LEXER)>
-    $self->state->error_recovery(0);
-<endif>
-    <if(backtracking)>failed=false;<endif>
-}
-else {
-    <ruleBacktrackFailure()>
-    my $mse =
-        ANTLR::Runtime::MismatchedSetException->new(undef, $self->input);
-    <@mismatchedSetException()>
-<if(LEXER)>
-    $self->recover($mse);
-    $mse->throw();
-<else>
-    $mse->throw();
-    <! use following code to make it recover inline; remove throw mse;
-    $self->recoverFromMismatchedSet($self->input, $mse, $FOLLOW_set_in_<ruleName><elementIndex>);
-    !>
-<endif>
-}<\n>
->>
-
-matchRuleBlockSet ::= matchSet
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex) ::= <<
-<if(label)>
-int <label>Start = getCharIndex();
-$self->match(<string>); <checkRuleBacktrackFailure()>
-<labelType> <label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, getCharIndex()-1);
-<else>
-$self->match(<string>); <checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-wildcard(label,elementIndex) ::= <<
-<if(label)>
-<label>=(<labelType>)input.LT(1);<\n>
-<endif>
-matchAny(input); <checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(label,elementIndex) ::= <<
-<wildcard(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> = $self->input->LA(1);<\n>
-<endif>
-matchAny(); <checkRuleBacktrackFailure()>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.
- */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-$self->push_follow($FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
-<if(label)>
-$<label> = $self-><rule.name>(<args; separator=", ">);<\n>
-<else>
-$self-><rule.name>(<args; separator=", ">);<\n>
-<endif>
-$self->state->_fsp($self->state->_fsp - 1);
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** A lexer rule reference.
- *
- *  The 'rule' argument was the target rule name, but now
- *  is type Rule, whose toString is same: the rule name.
- *  Now though you can access full rule descriptor stuff.
- */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
-<if(label)>
-int <label>Start<elementIndex> = getCharIndex();
-$self->m_<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
-<else>
-$self->m_<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-int <label>Start<elementIndex> = getCharIndex();
-match(EOF); <checkRuleBacktrackFailure()>
-<labelType> <label> = new CommonToken(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
-<else>
-match(EOF); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( input.LA(1)==Token.DOWN ) {
-    match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
-    <children:element()>
-    match(input, Token.UP, null); <checkRuleBacktrackFailure()>
-}
-<else>
-match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
-<children:element()>
-match(input, Token.UP, null); <checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if ( !(<evalPredicate(...)>) ) {
-    <ruleBacktrackFailure()>
-    throw new FailedPredicateException(input, "<ruleName>", "<description>");
-}
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-my $LA<decisionNumber>_<stateNumber> = $self->input->LA(<k>);<\n>
-<edges; separator="\nels">
-else {
-<if(eotPredictsAlt)>
-    $alt<decisionNumber> = <eotPredictsAlt>;
-<else>
-    <ruleBacktrackFailure()>
-    my $nvae =
-        ANTLR::Runtime::NoViableAltException->new({
-            grammar_decision_description => "<description>",
-            decision_number => <decisionNumber>,
-            state_number => <stateNumber>,
-            input => $self->input,
-        });<\n>
-    <@noViableAltException()>
-    croak $nvae;<\n>
-<endif>
-}
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-my $LA<decisionNumber>_<stateNumber> = $self->input->LA(<k>);<\n>
-<edges; separator="\nels">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-my $LA<decisionNumber>_<stateNumber> = $self->input->LA(<k>);<\n>
-<edges; separator="\nels"><\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-$alt<decisionNumber> = <eotPredictsAlt>; <! if no edges, don't gen ELSE !>
-<else>
-else {
-    $alt<decisionNumber> = <eotPredictsAlt>;
-}<\n>
-<endif>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "$alt<decisionNumber> = <alt>;"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
-    <targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( $self->input->LA(<k>) ) {
-    <edges; separator="\n">
-    else {
-    <if(eotPredictsAlt)>
-        $alt<decisionNumber> = <eotPredictsAlt>;
-    <else>
-        <ruleBacktrackFailure()>
-        my $nvae =
-            ANTLR::Runtime::NoViableAltException->new({
-                grammar_decision_description => "<description>",
-                decision_number => <decisionNumber>,
-                state_number => <stateNumber>,
-                input => $self->input,
-            });<\n>
-        <@noViableAltException()>
-        croak $nvae;<\n>
-    <endif>
-    }
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( $self->input->LA(<k>) ) {
-    <edges; separator="\n">
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-switch ( $self->input->LA(<k>) ) {
-<edges; separator="\n"><\n>
-<if(eotPredictsAlt)>
-else { $alt<decisionNumber> = <eotPredictsAlt> }<\n>
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-case [<labels:{ <it> }; separator=", ">] { <targetState> }
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-$alt<decisionNumber> = $self->dfa<decisionNumber>->predict($self->input);
->>
-
-/* Dump DFA tables as run-length-encoded Strings of octal values.
- * Can't use hex as compiler translates them before compilation.
- * These strings are split into multiple, concatenated strings.
- * Java puts them back together at compile time thankfully.
- * Java cannot handle large static arrays, so we're stuck with this
- * encode/decode approach.  See analysis and runtime DFA for
- * the encoding methods.
- */
-cyclicDFA(dfa) ::= <<
-Readonly my $DFA<dfa.decisionNumber>_eot => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedEOT; separator=", "> ]);
-Readonly my $DFA<dfa.decisionNumber>_eof => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedEOF; separator=", "> ]);
-Readonly my $DFA<dfa.decisionNumber>_min => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedMin; separator=", "> ]);
-Readonly my $DFA<dfa.decisionNumber>_max => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedMax; separator=", "> ]);
-Readonly my $DFA<dfa.decisionNumber>_accept => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedAccept; separator=", "> ]);
-Readonly my $DFA<dfa.decisionNumber>_special => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedSpecial; separator=", "> ]);
-Readonly my $DFA<dfa.decisionNumber>_transition => [ <dfa.javaCompressedTransition:{s|ANTLR::Runtime::DFA->unpack_rle([ <s; separator=", "> ])}; separator=", "> ];
-
-{
-package <name>::DFA<dfa.decisionNumber>;
-use ANTLR::Runtime::Class;
-
-use strict;
-use warnings;
-
-extends 'ANTLR::Runtime::DFA';
-
-sub BUILD {
-    my $self = shift;
-    my $param_ref = __PACKAGE__->unpack_params(@_, {
-        spec => [
-            {
-                name => 'recognizer',
-                isa  => 'ANTLR::Runtime::BaseRecognizer'
-            },
-        ]
-    });
-
-    $self->recognizer($param_ref->{recognizer});
-    $self->decision_number(<dfa.decisionNumber>);
-    $self->eot($DFA<dfa.decisionNumber>_eot);
-    $self->eof($DFA<dfa.decisionNumber>_eof);
-    $self->min($DFA<dfa.decisionNumber>_min);
-    $self->max($DFA<dfa.decisionNumber>_max);
-    $self->accept($DFA<dfa.decisionNumber>_accept);
-    $self->special($DFA<dfa.decisionNumber>_special);
-    $self->transition($DFA<dfa.decisionNumber>_transition);
-}
-
-sub get_description {
-    return "<dfa.description>";
-}
-
-<@errorMethod()>
-
-<if(dfa.specialStateSTs)>
-sub special_state_transition {
-    my ($self, $param_ref) = unpack_params(@_, {
-        spec => [
-            {
-                name => 's',
-                type => SCALAR,
-            },
-            {
-                name => 'input',
-                isa  => 'ANTLR::Runtime::IntStream',
-            }
-        ]
-    });
-    my $s = $param_ref->{s};
-    my $input = $param_ref->{input};
-
-    switch ($s) {
-        <dfa.specialStateSTs:{state |
-        case <i0> \{ <! compressed special state numbers 0..n-1 !>
-            <state>}; separator="\n">
-        }
-    }
-
-<if(backtracking)>
-    if ($self->state->backtracking > 0) {
-        $self->state->failed = 1;
-        return -1;
-    }<\n>
-<endif>
-
-    my $nvae =
-        ANTLR::Runtime::NoViableAltException->new({
-            grammar_decision_description => $self->get_description(),
-            decision_number => <dfa.decisionNumber>,
-            state_number => $s,
-            input => $input,
-        });<\n>
-    $self->error($nvae);
-    $nvae->throw();
-    }<\n>
-<endif>
-}<\n>
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-my $input = $self->input;
-my $LA<decisionNumber>_<stateNumber> = $input->LA(1);<\n>
-<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-my $index<decisionNumber>_<stateNumber> = $input->index();
-$input->rewind();<\n>
-<endif>
-s = -1;
-<edges; separator="\nels">
-<if(semPredState)> <! return input cursor to state before we rewound !>
-input.seek(index<decisionNumber>_<stateNumber>);<\n>
-<endif>
-if ( s>=0 ) return s;
-break;
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-s = <targetStateNumber>;<\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "(<left> && <right>)"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
-
-notPredicate(pred) ::= "!(<evalPredicate(...)>)"
-
-evalPredicate(pred,description) ::= "<pred>"
-
-evalSynPredicate(pred,description) ::= "<pred>()"
-
-lookaheadTest(atom,k,atomAsInt) ::= "$LA<decisionNumber>_<stateNumber> eq <atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "$self->input->LA(<k>) eq <atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
-($LA<decisionNumber>_<stateNumber> ge <lower> && $LA<decisionNumber>_<stateNumber> le <upper>)
->>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "($self->input->LA(<k>) ge <lower> && $self->input->LA(<k>) le <upper>)"
-
-setTest(ranges) ::= "<ranges; separator=\" || \">"
-
-// A T T R I B U T E S
-
-globalAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected static class <scope.name>_scope {
-    <scope.attributes:{<it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
-<endif>
->>
-
-ruleAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-protected static class <scope.name>_scope {
-    <scope.attributes:{<it.decl>;}; separator="\n">
-}
-protected Stack <scope.name>_stack = new Stack();<\n>
-<endif>
->>
-
-returnType() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor.name>_return
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-/** Generate the Java type associated with a single or multiple return
- *  values.
- */
-ruleLabelType(referencedRule) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-<referencedRule.name>_return
-<else>
-<if(referencedRule.hasSingleReturnValue)>
-<referencedRule.singleValueReturnType>
-<else>
-void
-<endif>
-<endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "undef".
- */
-initValue(typeName) ::= <<
-<if(typeName)>
-<perlTypeInitMap.(typeName)>
-<else>
-undef
-<endif>
->>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= <<
-my $<label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
->>
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScope(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-{
-    package <returnType()>;
-    use ANTLR::Runtime::Class;
-
-    extends 'ANTLR::Runtime::<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope';
-
-    <scope.attributes:{public <it.decl>;}; separator="\n">
-    <@ruleReturnMembers()>
-}
-<endif>
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{$<it.name>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= "$<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "$<attr.name> =<expr>;"
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <<
-<if(negIndex)>
-((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name>
-<else>
-<if(index)>
-((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name>
-<else>
-((<scope>_scope)<scope>_stack.peek()).<attr.name>
-<endif>
-<endif>
->>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
-<if(negIndex)>
-((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name> =<expr>;
-<else>
-<if(index)>
-((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name> =<expr>;
-<else>
-((<scope>_scope)<scope>_stack.peek()).<attr.name> =<expr>;
-<endif>
-<endif>
->>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-$<scope>.<attr.name>
-<else>
-$<scope>
-<endif>
->>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name>
-<else>
-$<attr.name>
-<endif>
->>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name> =<expr>;
-<else>
-$<attr.name> =<expr>;
-<endif>
->>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "$<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach
-
-tokenLabelPropertyRef_text(scope,attr) ::= "$<scope>->get_text()"
-tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
-tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
-tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
-tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
-tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "((<labelType>)<scope>.start)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "((<labelType>)<scope>.stop)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)<scope>.tree)"
-ruleLabelPropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-input.getTokenStream().toString(
-  input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
-  input.getTreeAdaptor().getTokenStopIndex(<scope>.start))
-<else>
-substr($self->input, $<scope>->start, $<scope>->stop)
-<endif>
->>
-
-ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "$<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
-rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.stop)"
-rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.tree)"
-rulePropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-input.getTokenStream().toString(
-  input.getTreeAdaptor().getTokenStartIndex(retval.start),
-  input.getTreeAdaptor().getTokenStopIndex(retval.start))
-<else>
-input.toString(retval.start,input.LT(-1))
-<endif>
->>
-rulePropertyRef_st(scope,attr) ::= "retval.st"
-
-lexerRulePropertyRef_text(scope,attr) ::= "getText()"
-lexerRulePropertyRef_type(scope,attr) ::= "$_type"
-lexerRulePropertyRef_line(scope,attr) ::= "tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "tokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "$_channel"
-lexerRulePropertyRef_start(scope,attr) ::= "tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(getCharIndex()-1)"
-lexerRulePropertyRef_self(scope,attr) ::= "$self"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
-
-
-/** How to execute an action */
-execAction(action) ::= <<
-<if(backtracking)>
-<if(actions.(actionScope).synpredgate)>
-if ( <actions.(actionScope).synpredgate> ) {
-  <action>
-}
-<else>
-if ( backtracking==0 ) {
-  <action>
-}
-<endif>
-<else>
-<action>
-<endif>
->>
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-Readonly my $<name> => ANTLR::Runtime::BitSet->new({ words64 => [ <words64:{'<it>'};separator=", "> ] });<\n>
->>
-
-codeFileExtension() ::= ".pm"
-
-true() ::= "1"
-false() ::= "0"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/AST.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/AST.stg
deleted file mode 100644
index a9252e3..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/AST.stg
+++ /dev/null
@@ -1,452 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-finishedBacktracking(block) ::= <<
-<if(backtracking)>
-if <actions.(actionScope).synpredgate>:
-    <block>
-<else>
-<block>
-<endif>
->>
-
-@outputFile.imports() ::= <<
-<@super.imports()>
-<if(!TREE_PARSER)><! tree parser would already have imported !>
-from antlr3.tree import *<\n>
-<endif>
->>
-
-/** Add an adaptor property that knows how to build trees */
-@genericParser.init() ::= <<
-self._adaptor = None
-self.adaptor = CommonTreeAdaptor()
-<@super.init()>
->>
-
-@genericParser.members() ::= <<
-<@super.members()>
-<astAccessor()>
->>
-
-astAccessor() ::= <<
-def getTreeAdaptor(self):
-    return self._adaptor
-
-def setTreeAdaptor(self, adaptor):
-    self._adaptor = adaptor
-    <grammar.directDelegates:{g|<g:delegateName()>.adaptor = adaptor}; separator="\n">
-
-adaptor = property(getTreeAdaptor, setTreeAdaptor)
->>
-
-@returnScope.ruleReturnInit() ::= <<
-self.tree = None
->>
-
-
-/** Add a variable to track rule's return AST */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-root_0 = None<\n>
->>
-
-ruleLabelDefs() ::= <<
-<super.ruleLabelDefs()>
-<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
-  ruleDescriptor.wildcardTreeListLabels]
-    :{it | <it.label.text>_tree = None}; separator="\n">
-<ruleDescriptor.tokenListLabels:{it | <it.label.text>_tree = None}; separator="\n">
-<ruleDescriptor.allTokenRefsInAltsWithRewrites
-    :{it | stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>")}; separator="\n">
-<ruleDescriptor.allRuleRefsInAltsWithRewrites
-    :{it | stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "rule <it>")}; separator="\n">
->>
-
-/** When doing auto AST construction, we must define some variables;
- *  These should be turned off if doing rewrites.  This must be a "mode"
- *  as a rule could have both rewrite and AST within the same alternative
- *  block.
- */
-@alt.declarations() ::= <<
-<if(autoAST)>
-<if(outerAlt)>
-<if(!rewriteMode)>
-root_0 = self._adaptor.nil()<\n>
-<endif>
-<endif>
-<endif>
->>
-
-
-// T r a c k i n g  R u l e  E l e m e n t s
-
-/** ID and track it for use in a rewrite rule */
-tokenRefTrack(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)> <! Track implies no auto AST construction!>
-<finishedBacktracking({stream_<token>.add(<label>)})>
->>
-
-/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
- *  to the tracking list stream_ID for use in the rewrite.
- */
-tokenRefTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefTrack(...)>
-<listLabel(elem=label,...)>
->>
-
-/** ^(ID ...) track for rewrite */
-tokenRefRuleRootTrack(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)>
-<finishedBacktracking({stream_<token>.add(<label>)})>
->>
-
-/** Match ^(label+=TOKEN ...) track for rewrite */
-tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefRuleRootTrack(...)>
-<listLabel(elem=label,...)>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<finishedBacktracking({stream_<rule.name>.add(<label>.tree)})>
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefTrack(...)>
-<listLabel(label, {<label>.tree})>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<finishedBacktracking({stream_<rule.name>.add(<label>.tree)})>
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRootTrack(...)>
-<listLabel(label, {<label>.tree})>
->>
-
-// R e w r i t e
-
-rewriteCode(
-        alts, description,
-        referencedElementsDeep, // ALL referenced elements to right of ->
-        referencedTokenLabels,
-        referencedTokenListLabels,
-        referencedRuleLabels,
-        referencedRuleListLabels,
-        referencedWildcardLabels,
-        referencedWildcardListLabels,
-        rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
-<<
-# AST Rewrite
-# elements: <referencedElementsDeep; separator=", ">
-# token labels: <referencedTokenLabels; separator=", ">
-# rule labels: <referencedRuleLabels; separator=", ">
-# token list labels: <referencedTokenListLabels; separator=", ">
-# rule list labels: <referencedRuleListLabels; separator=", ">
-# wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
-<finishedBacktracking({
-<prevRuleRootRef()>.tree = root_0
-<rewriteCodeLabels()>
-root_0 = self._adaptor.nil()
-<first(alts):rewriteAltFirst(); anchor>
-
-<rest(alts):{a | el<rewriteAltRest(a)>}; anchor, separator="\n\n">
-
-<! if tree parser and rewrite=true !>
-<if(TREE_PARSER)>
-<if(rewriteMode)>
-<prevRuleRootRef()>.tree = self._adaptor.rulePostProcessing(root_0)
-self.input.replaceChildren(
-    self._adaptor.getParent(retval.start),
-    self._adaptor.getChildIndex(retval.start),
-    self._adaptor.getChildIndex(_last),
-    retval.tree
-    )<\n>
-<endif>
-<endif>
-
-<! if parser or tree-parser and rewrite!=true, we need to set result !>
-<if(!TREE_PARSER)>
-<prevRuleRootRef()>.tree = root_0<\n>
-<else>
-<if(!rewriteMode)>
-<prevRuleRootRef()>.tree = root_0<\n>
-<endif>
-<endif>
-})>
->>
-
-rewriteCodeLabels() ::= <<
-<referencedTokenLabels
-    :{it | stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>", <it>)};
-    separator="\n"
->
-<referencedTokenListLabels
-    :{it | stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>", list_<it>)};
-    separator="\n"
->
-<referencedWildcardLabels
-    :{it | stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "wildcard <it>", <it>)};
-    separator="\n"
->
-<referencedWildcardListLabels
-    :{it | stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "wildcard <it>", list_<it>)};
-    separator="\n"
->
-<referencedRuleLabels
-    :{it |
-if <it> is not None:
-    stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "rule <it>", <it>.tree)
-else:
-    stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "token <it>", None)
-};
-    separator="\n"
->
-<referencedRuleListLabels
-    :{it| stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "token <it>", list_<it>)};
-    separator="\n"
->
->>
-
-/** Generate code for an optional rewrite block; note it uses the deep ref'd element
-  *  list rather shallow like other blocks.
-  */
-rewriteOptionalBlock(
-        alt,rewriteBlockLevel,
-        referencedElementsDeep, // all nested refs
-        referencedElements, // elements in immediately block; no nested blocks
-        description) ::=
-<<
-# <fileName>:<description>
-if <referencedElementsDeep:{el | stream_<el>.hasNext()}; separator=" or ">:
-    <alt>
-
-<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
->>
-
-rewriteClosureBlock(
-        alt,rewriteBlockLevel,
-        referencedElementsDeep, // all nested refs
-        referencedElements, // elements in immediately block; no nested blocks
-        description) ::=
-<<
-# <fileName>:<description>
-while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
-    <alt>
-
-<referencedElements:{el | stream_<el>.reset();<\n>}>
->>
-
-rewritePositiveClosureBlock(
-        alt,rewriteBlockLevel,
-        referencedElementsDeep, // all nested refs
-        referencedElements, // elements in immediately block; no nested blocks
-        description) ::=
-<<
-# <fileName>:<description>
-if not (<referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">):
-    raise RewriteEarlyExitException()
-
-while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
-    <alt>
-
-<referencedElements:{el | stream_<el>.reset()<\n>}>
->>
-
-rewriteAltRest(a) ::= <<
-<if(a.pred)>if <a.pred>:
-    # <a.description>
-    <a.alt>
-<else>se: <! little hack to get if .. elif .. else block right !>
-    # <a.description>
-    <a.alt>
-<endif>
->>
-
-rewriteAltFirst(a) ::= <<
-<if(a.pred)>
-if <a.pred>:
-    # <a.description>
-    <a.alt>
-<else>
-# <a.description>
-<a.alt>
-<endif>
->>
-
-/** For empty rewrites: "r : ... -> ;" */
-rewriteEmptyAlt() ::= "root_0 = None"
-
-rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
-# <fileName>:<description>
-root_<treeLevel> = self._adaptor.nil()
-<root:rewriteElement()>
-<children:rewriteElement()>
-self._adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>)<\n>
->>
-
-rewriteElementList(elements) ::= "<elements:rewriteElement()>"
-
-rewriteElement(e) ::= <<
-<@pregen><@end>
-<e.el>
->>
-
-/** Gen ID or ID[args] */
-rewriteTokenRef(token,elementIndex,terminalOptions,args) ::= <<
-self._adaptor.addChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>)<\n>
->>
-
-/** Gen $label ... where defined via label=ID */
-rewriteTokenLabelRef(label,elementIndex) ::= <<
-self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode())<\n>
->>
-
-/** Gen $label ... where defined via label+=ID */
-rewriteTokenListLabelRef(label,elementIndex) ::= <<
-self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode())<\n>
->>
-
-/** Gen ^($label ...) */
-rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
-root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
->>
-
-/** Gen ^($label ...) where label+=... */
-rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
-
-/** Gen ^(ID ...) or ^(ID[args] ...) */
-rewriteTokenRefRoot(token,elementIndex,terminalOptions,args) ::= <<
-root_<treeLevel> = self._adaptor.becomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>)<\n>
->>
-
-rewriteImaginaryTokenRef(args,token,terminalOptions,elementIndex) ::= <<
-self._adaptor.addChild(root_<treeLevel>, <createImaginaryNode(token, terminalOptions, args)>)<\n>
->>
-
-rewriteImaginaryTokenRefRoot(args,token,terminalOptions,elementIndex) ::= <<
-root_<treeLevel> = self._adaptor.becomeRoot(<createImaginaryNode(token, terminalOptions, args)>, root_<treeLevel>)<\n>
->>
-
-/** plain -> {foo} action */
-rewriteAction(action) ::= <<
-<!FIXME(96,untested)!>
-root_0 = <action><\n>
->>
-
-/** What is the name of the previous value of this rule's root tree?  This
- *  let's us refer to $rule to mean previous value.  I am reusing the
- *  variable 'tree' sitting in retval struct to hold the value of root_0 right
- *  before I set it during rewrites.  The assign will be to retval.tree.
- */
-prevRuleRootRef() ::= "retval"
-
-rewriteRuleRef(rule) ::= <<
-self._adaptor.addChild(root_<treeLevel>, stream_<rule>.nextTree())<\n>
->>
-
-rewriteRuleRefRoot(rule) ::= <<
-root_<treeLevel> = self._adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>)<\n>
->>
-
-rewriteNodeAction(action) ::= <<
-self._adaptor.addChild(root_<treeLevel>, <action>)<\n>
->>
-
-rewriteNodeActionRoot(action) ::= <<
-root_<treeLevel> = self._adaptor.becomeRoot(<action>, root_<treeLevel>)<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel=rule */
-rewriteRuleLabelRef(label) ::= <<
-self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
->>
-
-/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
-rewriteRuleListLabelRef(label) ::= <<
-self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel=rule */
-rewriteRuleLabelRefRoot(label) ::= <<
-root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
->>
-
-/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
-rewriteRuleListLabelRefRoot(label) ::= <<
-root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
->>
-
-rewriteWildcardLabelRef(label) ::= <<
-self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
->>
-
-createImaginaryNode(tokenType,terminalOptions,args) ::= <<
-<if(terminalOptions.node)>
-<! new MethodNode(IDLabel, args) !>
-<terminalOptions.node>(<tokenType><if(args)>, <args; separator=", "><endif>)
-<else>
-<if (!args)>self._adaptor.createFromType(<tokenType>, "<tokenType>")
-<else>self._adaptor.create(<tokenType>, <args; separator=", ">)
-<endif>
-<endif>
->>
-
-//<! need to call different adaptor.create*() methods depending of argument count !>
-//<if (!args)>self._adaptor.createFromType(<tokenType>, "<tokenType>")
-//<else><if (!rest(args))>self._adaptor.createFromType(<tokenType>, <first(args)>)
-//<else><if (!rest(rest(args)))>self._adaptor.createFromToken(<tokenType>, <first(args)>, <first(rest(args))>)
-//<endif>
-//<endif>
-//<endif>
-
-
-createRewriteNodeFromElement(token,terminalOptions,args) ::= <<
-<if(terminalOptions.node)>
-<terminalOptions.node>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
-<else>
-<if(args)> <! must create new node from old !>
-<! need to call different adaptor.create*() methods depending of argument count !>
-<if (!args)>self._adaptor.createFromType(<token>, "<token>")
-<else><if (!rest(args))>self._adaptor.createFromToken(<token>, <first(args)>)
-<else><if (!rest(rest(args)))>self._adaptor.createFromToken(<token>, <first(args)>, <first(rest(args))>)
-<endif>
-<endif>
-<endif>
-<else>
-stream_<token>.nextNode()
-<endif>
-<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTParser.stg
deleted file mode 100644
index 64ffa68..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTParser.stg
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Templates for building ASTs during normal parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  The situation is not too bad as rewrite (->) usage makes ^ and !
- *  invalid. There is no huge explosion of combinations.
- */
-
-finishedBacktracking(block) ::= <<
-<if(backtracking)>
-if <actions.(actionScope).synpredgate>:
-    <block>
-<else>
-<block>
-<endif>
->>
-
-@ruleBody.setErrorReturnValue() ::= <<
-retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
->>
-
-// TOKEN AST STUFF
-
-/** ID and output=AST */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<super.tokenRef(...)>
-<finishedBacktracking({
-<label>_tree = <createNodeFromToken(...)>
-self._adaptor.addChild(root_0, <label>_tree)
-})>
->>
-
-/** ID! and output=AST (same as plain tokenRef) */
-tokenRefBang(token,label,elementIndex,terminalOptions) ::= "<super.tokenRef(...)>"
-
-/** ID^ and output=AST */
-tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-<super.tokenRef(...)>
-<finishedBacktracking({
-<label>_tree = <createNodeFromToken(...)>
-root_0 = self._adaptor.becomeRoot(<label>_tree, root_0)
-})>
->>
-
-/** ids+=ID! and output=AST */
-tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefBang(...)>
-<listLabel(elem=label,...)>
->>
-
-/** label+=TOKEN when output=AST but not rewrite alt */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match label+=TOKEN^ when output=AST but not rewrite alt */
-tokenRefRuleRootAndListLabel(token,label,terminalOptions,elementIndex) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label,...)>
->>
-
-// SET AST
-
-// the match set stuff is interesting in that it uses an argument list
-// to pass code to the default matchSet; another possible way to alter
-// inherited code.  I don't use the region stuff because I need to pass
-// different chunks depending on the operator.  I don't like making
-// the template name have the operator as the number of templates gets
-// large but this is the most flexible--this is as opposed to having
-// the code generator call matchSet then add root code or ruleroot code
-// plus list label plus ...  The combinations might require complicated
-// rather than just added on code.  Investigate that refactoring when
-// I have more time.
-
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <%
-<super.matchSet(postmatchCode={<finishedBacktracking({self._adaptor.addChild(root_0, <createNodeFromToken(...)>)})>}, ...)>
-%>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-<matchSet(...)>
->>
-
-matchSetBang(s,label,elementIndex,terminalOptions,postmatchCode) ::= "<super.matchSet(...)>"
-
-// note there is no matchSetTrack because -> rewrites force sets to be
-// plain old blocks of alts: (A|B|...|C)
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-<if(label)>
-<label> = self.input.LT(1)<\n>
-<endif>
-<super.matchSet(postmatchCode={<finishedBacktracking({root_0 = self._adaptor.becomeRoot(<createNodeFromToken(...)>, root_0)})>}, ...)>
->>
-
-// RULE REF AST
-
-/** rule when output=AST */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<finishedBacktracking({self._adaptor.addChild(root_0, <label>.tree)})>
->>
-
-/** rule! is same as normal rule ref */
-ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
-
-/** rule^ */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-<super.ruleRef(...)>
-<finishedBacktracking({root_0 = self._adaptor.becomeRoot(<label>.tree, root_0)})>
->>
-
-/** x+=rule when output=AST */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(label, {<label>.tree})>
->>
-
-/** x+=rule! when output=AST is a rule ref with list addition */
-ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefBang(...)>
-<listLabel(label, {<label>.tree})>
->>
-
-/** x+=rule^ */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabel(label, {<label>.tree})>
->>
-
-// WILDCARD AST
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<super.wildcard(...)>
-<finishedBacktracking({
-<label>_tree = self._adaptor.createWithPayload(<label>)
-self._adaptor.addChild(root_0, <label>_tree)
-})>
->>
-
-wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
-
-wildcardRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-<super.wildcard(...)>
-<finishedBacktracking({
-<label>_tree = self._adaptor.createWithPayload(<label>)
-root_0 = self._adaptor.becomeRoot(<label>_tree, root_0)
-})>
->>
-
-createNodeFromToken(label,terminalOptions) ::= <%
-<if(terminalOptions.node)>
-<terminalOptions.node>(<label>) <! new MethodNode(IDLabel) !>
-<else>
-self._adaptor.createWithPayload(<label>)
-<endif>
-%>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<finishedBacktracking({
-retval.tree = self._adaptor.rulePostProcessing(root_0)
-self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
-})>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTTreeParser.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTTreeParser.stg
deleted file mode 100644
index 718a55f..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTTreeParser.stg
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** Templates for building ASTs during tree parsing.
- *
- *  Deal with many combinations.  Dimensions are:
- *  Auto build or rewrite
- *    no label, label, list label  (label/no-label handled together)
- *    child, root
- *    token, set, rule, wildcard
- *
- *  Each combination has its own template except that label/no label
- *  is combined into tokenRef, ruleRef, ...
- */
-
-finishedBacktracking(block) ::= <<
-<if(backtracking)>
-if <actions.(actionScope).synpredgate>:
-    <block>
-<else>
-<block>
-<endif>
->>
-
-/** Add a variable to track last element matched */
-ruleDeclarations() ::= <<
-<super.ruleDeclarations()>
-_first_0 = None
-_last = None<\n>
->>
-
-/** What to emit when there is no rewrite rule.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= <<
-<finishedBacktracking({
-<if(rewriteMode)>
-retval.tree = _first_0
-if self._adaptor.getParent(retval.tree) is not None and self._adaptor.isNil(self._adaptor.getParent(retval.tree)):
-    retval.tree = self._adaptor.getParent(retval.tree)
-<endif>
-})>
->>
-
-/** match ^(root children) in tree parser; override here to
- *  add tree construction actions.
- */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-_last = self.input.LT(1)
-_save_last_<treeLevel> = _last
-_first_<treeLevel> = None
-<if(!rewriteMode)>
-root_<treeLevel> = self._adaptor.nil()<\n>
-<endif>
-<root:element()>
-<if(rewriteMode)>
-<finishedBacktracking({
-<if(root.el.rule)>
-if _first_<enclosingTreeLevel> is None:
-    _first_<enclosingTreeLevel> = <root.el.label>.tree<\n>
-<else>
-if _first_<enclosingTreeLevel> is None:
-    _first_<enclosingTreeLevel> = <root.el.label><\n>
-<endif>
-})>
-<endif>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if self.input.LA(1) == DOWN:
-    self.match(self.input, DOWN, None)
-    <children:element()>
-    self.match(self.input, UP, None)
-
-<else>
-self.match(self.input, DOWN, None)
-<children:element()>
-self.match(self.input, UP, None)<\n>
-<endif>
-<if(!rewriteMode)>
-self._adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>)<\n>
-<endif>
-_last = _save_last_<treeLevel>
-
->>
-
-// TOKEN AST STUFF
-
-/** ID! and output=AST (same as plain tokenRef) 'cept add
- *  setting of _last
- */
-tokenRefBang(token,label,elementIndex,terminalOptions) ::= <<
-_last = self.input.LT(1)
-<super.tokenRef(...)>
->>
-
-/** ID auto construct */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-_last = self.input.LT(1)
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<finishedBacktracking({
-<if(terminalOptions.node)>
-<label>_tree = <terminalOptions.node>(<label>)
-<else>
-<label>_tree = self._adaptor.dupNode(<label>)
-<endif><\n>
-self._adaptor.addChild(root_<treeLevel>, <label>_tree)
-})>
-<else> <! rewrite mode !>
-<finishedBacktracking({
-if _first_<treeLevel> is None:
-    _first_<treeLevel> = <label><\n>
-})>
-<endif>
->>
-
-/** label+=TOKEN auto construct */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** ^(ID ...) auto construct */
-tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
-_last = self.input.LT(1)
-<super.tokenRef(...)>
-<if(!rewriteMode)>
-<finishedBacktracking({
-<if(terminalOptions.node)>
-<label>_tree = <terminalOptions.node>(<label>)
-<else>
-<label>_tree = self._adaptor.dupNode(<label>)
-<endif><\n>
-root_<treeLevel> = self._adaptor.becomeRoot(<label>_tree, root_<treeLevel>)
-})>
-<endif>
->>
-
-/** Match ^(label+=TOKEN ...) auto construct */
-tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRefRuleRoot(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard and auto dup the node/subtree */
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-_last = self.input.LT(1)
-<super.wildcard(...)>
-<if(!rewriteMode)>
-<finishedBacktracking({
-<label>_tree = self._adaptor.dupTree(<label>)
-self._adaptor.addChild(root_<treeLevel>, <label>_tree)
-})>
-<else> <! rewrite mode !>
-<finishedBacktracking({
-if _first_<treeLevel> is None:
-    _first_<treeLevel> = <label>
-})>
-<endif>
->>
-
-// SET AST
-matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-_last = self.input.LT(1)
-<super.matchSet(postmatchCode={
-<if(!rewriteMode)>
-<finishedBacktracking({
-<if(terminalOptions.node)>
-<label>_tree = <terminalOptions.node>(<label>)
-<else>
-<label>_tree = self._adaptor.dupNode(<label>)
-<endif><\n>
-self._adaptor.addChild(root_<treeLevel>, <label>_tree)
-})>
-<endif>
-}, ...)>
->>
-
-matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
-<matchSet(...)>
-<noRewrite(...)> <! set return tree !>
->>
-
-matchSetBang(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
-_last = self.input.LT(1)
-<super.matchSet(...)>
->>
-
-matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
-<super.matchSet(postmatchCode={
-<if(!rewriteMode)>
-<finishedBacktracking({
-<if(terminalOptions.node)>
-<label>_tree = <terminalOptions.node>(<label>)
-<else>
-<label>_tree = self._adaptor.dupNode(<label>)
-<endif><\n>
-root_<treeLevel> = self._adaptor.becomeRoot(<label>_tree, root_<treeLevel>)
-})>
-<endif>
-}, ...)>
->>
-
-// RULE REF AST
-
-/** rule auto construct */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-_last = self.input.LT(1)
-<super.ruleRef(...)>
-<finishedBacktracking({
-<if(!rewriteMode)>
-self._adaptor.addChild(root_<treeLevel>, <label>.tree)
-<else> <! rewrite mode !>
-if _first_<treeLevel> is None:
-    _first_<treeLevel> = <label>.tree<\n>
-<endif>
-})>
->>
-
-/** x+=rule auto construct */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(label, {<label>.tree})>
->>
-
-/** ^(rule ...) auto construct */
-ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
-_last = self.input.LT(1)
-<super.ruleRef(...)>
-<if(!rewriteMode)>
-<finishedBacktracking({
-root_<treeLevel> = self._adaptor.becomeRoot(<label>.tree, root_<treeLevel>)
-})>
-<endif>
->>
-
-/** ^(x+=rule ...) auto construct */
-ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRefRuleRoot(...)>
-<listLabel(label, {<label>.tree})>
->>
-
-/** rule when output=AST and tracking for rewrite */
-ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
-_last = self.input.LT(1)
-<super.ruleRefTrack(...)>
->>
-
-/** x+=rule when output=AST and tracking for rewrite */
-ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-_last = self.input.LT(1)
-<super.ruleRefTrackAndListLabel(...)>
->>
-
-/** ^(rule ...) rewrite */
-ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
-_last = self.input.LT(1)
-<super.ruleRefRootTrack(...)>
->>
-
-/** ^(x+=rule ...) rewrite */
-ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-_last = self.input.LT(1)
-<super.ruleRefRuleRootTrackAndListLabel(...)>
->>
-
-/** Streams for token refs are tree nodes now; override to
- *  change nextToken to nextNode.
- */
-createRewriteNodeFromElement(token,terminalOptions,scope) ::= <<
-<if(terminalOptions.node)>
-<terminalOptions.node>(stream_<token>.nextNode())
-<else>
-stream_<token>.nextNode()
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<super.ruleCleanUp()>
-<if(!rewriteMode)>
-<finishedBacktracking({
-retval.tree = self._adaptor.rulePostProcessing(root_0)
-})>
-<endif>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/Python.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/Python.stg
deleted file mode 100644
index 71c324c..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/Python.stg
+++ /dev/null
@@ -1,1474 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2005-2006 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/** The API version of the runtime that recognizers generated by this runtime
- *  need.
- */
-apiVersion() ::= "1"
-
-// System.Boolean.ToString() returns "True" and "False", but the proper C# literals are "true" and "false"
-// The Java version of Boolean returns "true" and "false", so they map to themselves here.
-booleanLiteral ::= [
-	       "True":"true",
-	       "False":"false",
-	       "true":"true",
-	       "false":"false",
-	       default:"false"
-]
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
-           docComment, recognizer,
-           name, tokens, tokenNames, rules, cyclicDFAs,
-           bitsets, buildTemplate, buildAST, rewriteMode, profile,
-           backtracking, synpreds, memoize, numRules,
-           fileName, ANTLRVersion, generatedTimestamp, trace,
-           scopes, superClass, literals) ::=
-<<
-# $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-
-<@imports>
-import sys
-from antlr3 import *
-<if(TREE_PARSER)>
-from antlr3.tree import *<\n>
-<endif>
-from antlr3.compat import set, frozenset
-<@end>
-
-<actions.(actionScope).header>
-
-<! <docComment> !>
-
-# for convenience in actions
-HIDDEN = BaseRecognizer.HIDDEN
-
-# token types
-<tokens:{it | <it.name>=<it.type>}; separator="\n">
-
-<recognizer>
-
-<if(actions.(actionScope).main)>
-<actions.(actionScope).main>
-<else>
-def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
-<if(LEXER)>
-    from antlr3.main import LexerMain
-    main = LexerMain(<recognizer.name>)<\n>
-<endif>
-<if(PARSER)>
-    from antlr3.main import ParserMain
-    main = ParserMain("<recognizer.grammar.name>Lexer", <recognizer.name>)<\n>
-<endif>
-<if(TREE_PARSER)>
-    from antlr3.main import WalkerMain
-    main = WalkerMain(<recognizer.name>)<\n>
-<endif>
-    main.stdin = stdin
-    main.stdout = stdout
-    main.stderr = stderr
-    main.execute(argv)<\n>
-<endif>
-
-<actions.(actionScope).footer>
-
-if __name__ == '__main__':
-    main(sys.argv)
-
->>
-
-lexer(grammar, name, tokens, scopes, rules, numRules, filterMode,
-      labelType="CommonToken", superClass="Lexer") ::= <<
-<grammar.directDelegates:
- {g|from <g.recognizerName> import <g.recognizerName>}; separator="\n">
-
-class <grammar.recognizerName>(<@superClassName><superClass><@end>):
-    <scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
-
-    grammarFileName = "<fileName>"
-    api_version = <apiVersion()>
-
-    def __init__(self<grammar.delegators:{g|, <g:delegateName()>}>, input=None, state=None):
-        if state is None:
-            state = RecognizerSharedState()
-        super(<grammar.recognizerName>, self).__init__(input, state)
-
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-        self._state.ruleMemo = {}
-<endif>
-<endif>
-
-        <grammar.directDelegates:
-         {g|self.<g:delegateName()> = <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>self, input, state)}; separator="\n">
-        <grammar.delegators:
-         {g|self.<g:delegateName()> = <g:delegateName()>}; separator="\n">
-        <last(grammar.delegators):
-    	 {g|self.gParent = <g:delegateName()>}; separator="\n">
-        self.delegates = [<grammar.delegates: {g|self.<g:delegateName()>}; separator = ", ">]
-
-        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; separator="\n">
-
-        <actions.lexer.init>
-
-
-    <actions.lexer.members>
-
-
-<if(filterMode)>
-    <filteringNextToken()>
-<endif>
-    <rules; separator="\n\n">
-
-    <synpreds:{p | <lexerSynpred(p)>}>
-
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-
->>
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-def nextToken(self):
-    while True:
-        if self.input.LA(1) == EOF:
-            return self.makeEOFToken()
-
-        self._state.token = None
-        self._state.channel = DEFAULT_CHANNEL
-        self._state.tokenStartCharIndex = self.input.index()
-        self._state.tokenStartCharPositionInLine = self.input.charPositionInLine
-        self._state.tokenStartLine = self.input.line
-        self._state._text = None
-        try:
-            m = self.input.mark()
-            try:
-                # means we won't throw slow exception
-                self._state.backtracking = 1
-                try:
-                    self.mTokens()
-                finally:
-                    self._state.backtracking = 0
-
-            except BacktrackingFailed:
-                # mTokens backtracks with synpred at backtracking==2
-                # and we set the synpredgate to allow actions at level 1.
-                self.input.rewind(m)
-                self.input.consume() # advance one char and try again
-
-            else:
-                self.emit()
-                return self._state.token
-
-        except RecognitionException, re:
-            # shouldn't happen in backtracking mode, but...
-            self.reportError(re)
-            self.recover(re)
-
-
-def memoize(self, input, ruleIndex, ruleStartIndex, success):
-    if self._state.backtracking > 1:
-        # is Lexer always superclass?
-        super(<grammar.recognizerName>, self).memoize(input, ruleIndex, ruleStartIndex, success)
-
-
-def alreadyParsedRule(self, input, ruleIndex):
-    if self._state.backtracking > 1:
-        return super(<grammar.recognizerName>, self).alreadyParsedRule(input, ruleIndex)
-    return False
-
-
->>
-
-actionGate() ::= "self._state.backtracking == 0"
-
-filteringActionGate() ::= "self._state.backtracking == 1"
-
-/** How to generate a parser */
-
-genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass, labelType, members,
-	      rewriteElementType, filterMode, init, ASTLabelType="Object") ::= <<
-<if(grammar.grammarIsRoot)>
-# token names
-tokenNames = [
-    "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>",
-    <tokenNames; wrap, separator=", ">
-]<\n>
-<else>
-from <grammar.composite.rootGrammar.recognizerName> import tokenNames<\n>
-<endif>
-<scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScopeClass(scope=it)><endif>}>
-
-<grammar.directDelegates:
- {g|from <g.recognizerName> import <g.recognizerName>}; separator="\n">
-
-<rules:{it|<ruleAttributeScopeClass(scope=it.ruleDescriptor.ruleScope)>}>
-
-class <grammar.recognizerName>(<@superClassName><superClass><@end>):
-    grammarFileName = "<fileName>"
-    api_version = <apiVersion()>
-    tokenNames = tokenNames
-
-    def __init__(self<grammar.delegators:{g|, <g:delegateName()>}>, input, state=None, *args, **kwargs):
-        if state is None:
-            state = RecognizerSharedState()
-
-        <@args()>
-        super(<grammar.recognizerName>, self).__init__(input, state, *args, **kwargs)
-
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-        self._state.ruleMemo = {}
-<endif>
-<endif>
-
-        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; separator="\n">
-
-        <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeStack(scope=it)><endif>}>
-	<rules:{it | <ruleAttributeScopeStack(scope=it.ruleDescriptor.ruleScope)>}>
-
-        <init>
-
-        <grammar.delegators:
-         {g|self.<g:delegateName()> = <g:delegateName()>}; separator="\n">
-        <grammar.directDelegates:
-         {g|self.<g:delegateName()> = <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>self, input, state)}; separator="\n">
-        <grammar.indirectDelegates:
-         {g|<g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>}; separator="\n">
-        <last(grammar.delegators):
-    	 {g|self.gParent = self.<g:delegateName()>}; separator="\n">
-        self.delegates = [<grammar.delegates: {g|self.<g:delegateName()>}; separator = ", ">]
-
-	<@init><@end>
-
-
-    <@members><@end>
-
-    <members>
-
-    <rules; separator="\n\n">
-
-    <! generate rule/method definitions for imported rules so they
-       appear to be defined in this recognizer. !>
-    <grammar.delegatedRules:{ruleDescriptor| <delegateRule(ruleDescriptor)> }; separator="\n">
-
-    <synpreds:{p | <synpred(p)>}>
-
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-    <bitsets:{it | FOLLOW_<it.name>_in_<it.inName><it.tokenIndex> = frozenset([<it.tokenTypes:{it | <it>};separator=", ">])<\n>}>
-
->>
-
-delegateRule(ruleDescriptor) ::= <<
-def <ruleDescriptor.name>(self, <ruleDescriptor.parameterScope:parameterScope()>):
-<\ >   <if(ruleDescriptor.hasReturnValue)>return <endif>self.<ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">)
-
-
->>
-
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
-       ASTLabelType="Object", superClass="Parser", labelType="Token",
-       members={<actions.parser.members>},
-       init={<actions.parser.init>}
-       ) ::= <<
-<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, "TokenStream", superClass,
-              labelType, members, "Token",
-              false, init, ASTLabelType)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
-           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="Object",
-           superClass={<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif>},
-           members={<actions.treeparser.members>},
-	   init={<actions.treeparser.init>}
-           ) ::= <<
-<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, "TreeNodeStream", superClass,
-              labelType, members, "Node",
-              filterMode, init, ASTLabelType)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-# $ANTLR start "<ruleName>"
-def <ruleName>_fragment(self, <ruleDescriptor.parameterScope:parameterScope()>):
-    <ruleLabelDefs()>
-<if(trace)>
-    self.traceIn("<ruleName>_fragment", <ruleDescriptor.index>)
-    try:
-        <block>
-
-    finally:
-        self.traceOut("<ruleName>_fragment", <ruleDescriptor.index>)
-
-<else>
-    <block>
-<endif>
-# $ANTLR end "<ruleName>"
-
-
->>
-
-synpred(name) ::= <<
-def <name>(self):
-    self._state.backtracking += 1
-    <@start()>
-    start = self.input.mark()
-    try:
-        self.<name>_fragment()
-    except BacktrackingFailed:
-        success = False
-    else:
-        success = True
-    self.input.rewind(start)
-    <@stop()>
-    self._state.backtracking -= 1
-    return success
-
-
->>
-
-lexerSynpred(name) ::= <<
-<synpred(name)>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if self._state.backtracking > 0 and self.alreadyParsedRule(self.input, <ruleDescriptor.index>):
-    # for cached failed rules, alreadyParsedRule will raise an exception
-    success = True
-    return <ruleReturnValue()>
-
-<endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>
-if self._state.backtracking > 0:
-    raise BacktrackingFailed
-
-<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-<returnScope(scope=ruleDescriptor.returnScope)>
-
-# $ANTLR start "<ruleName>"
-# <fileName>:<description>
-<ruleDescriptor.actions.decorate>
-def <ruleName>(self, <ruleDescriptor.parameterScope:parameterScope()>):
-<if(trace)>
-    self.traceIn("<ruleName>", <ruleDescriptor.index>)<\n>
-<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    <ruleLabelDefs()>
-    <ruleDescriptor.actions.init>
-    <@preamble()>
-    <@body><ruleBody()><@end>
-    <@postamble()>
-    return <ruleReturnValue()>
-
-# $ANTLR end "<ruleName>"
->>
-
-ruleBody() ::= <<
-<if(memoize)>
-<if(backtracking)>
-success = False<\n>
-<endif>
-<endif>
-try:
-    try:
-        <ruleMemoization(name=ruleName)>
-        <block>
-        <ruleCleanUp()>
-        <(ruleDescriptor.actions.after):execAction()>
-
-<if(memoize)>
-<if(backtracking)>
-        success = True<\n>
-<endif>
-<endif>
-<if(exceptions)>
-    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-    <actions.(actionScope).rulecatch>
-<else>
-    except RecognitionException, re:
-        self.reportError(re)
-        self.recover(self.input, re)
-        <@setErrorReturnValue()>
-
-<endif>
-<else>
-    finally:
-        pass
-
-<endif>
-<endif>
-finally:
-<if(trace)>
-    self.traceOut("<ruleName>", <ruleDescriptor.index>)<\n>
-<endif>
-    <memoize()>
-    <ruleScopeCleanUp()>
-    <finally>
-    pass
->>
-
-catch(decl,action) ::= <<
-except <e.decl>:
-    <e.action>
-
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval = self.<ruleDescriptor.name>_return()
-retval.start = self.input.LT(1)<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-<a.name> = <if(a.initValue)><a.initValue><else>None<endif>
-}>
-<endif>
-<if(memoize)>
-<ruleDescriptor.name>_StartIndex = self.input.index()
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{it | self.<it>_stack.append(<it>_scope())}; separator="\n">
-<ruleDescriptor.ruleScope:{it | self.<it.name>_stack.append(<it.name>_scope())}; separator="\n">
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{it | self.<it>_stack.pop()}; separator="\n">
-<ruleDescriptor.ruleScope:{it | self.<it.name>_stack.pop()}; separator="\n">
->>
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
-  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{it | <it.label.text> = None}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,
-  ruleDescriptor.wildcardTreeListLabels]
-    :{it | list_<it.label.text> = None}; separator="\n"
->
-<[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
-    :ruleLabelDef(); separator="\n"
->
-<ruleDescriptor.ruleListLabels:{it | <it.label.text> = None}; separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{it | <it.label.text> = None}; separator="\n"
->
-<ruleDescriptor.charLabels:{it | <it.label.text> = None}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{it | list_<it.label.text> = None}; separator="\n"
->
->>
-
-ruleReturnValue() ::= <%
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-retval
-<endif>
-<endif>
-<endif>
-%>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-retval.stop = self.input.LT(-1)<\n>
-<endif>
-<endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if self._state.backtracking > 0:
-    self.memoize(self.input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex, success)
-
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-# $ANTLR start "<ruleName>"
-def m<ruleName>(self, <ruleDescriptor.parameterScope:parameterScope()>):
-<if(trace)>
-    self.traceIn("<ruleName>", <ruleDescriptor.index>)<\n>
-<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-<if(memoize)>
-<if(backtracking)>
-    success = False<\n>
-<endif>
-<endif>
-    try:
-<if(nakedBlock)>
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block><\n>
-<else>
-        _type = <ruleName>
-        _channel = DEFAULT_CHANNEL
-
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        <block>
-        <ruleCleanUp()>
-        self._state.type = _type
-        self._state.channel = _channel
-        <(ruleDescriptor.actions.after):execAction()>
-<endif>
-<if(memoize)>
-<if(backtracking)>
-        success = True<\n>
-<endif>
-<endif>
-
-    finally:
-<if(trace)>
-        self.traceOut("<ruleName>", <ruleDescriptor.index>)<\n>
-<endif>
-	<ruleScopeCleanUp()>
-        <memoize()>
-        pass
-
-# $ANTLR end "<ruleName>"
-
-
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-def mTokens(self):
-    <block><\n>
-
-
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# <fileName>:<description>
-alt<decisionNumber> = <maxAlt>
-<decls>
-<@body><blockBody()><@end>
->>
-
-blockBody() ::= <<
-<@predecision()>
-<@decision><decision><@end>
-<@postdecision()>
-<@prebranch()>
-<alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# <fileName>:<description>
-alt<decisionNumber> = <maxAlt>
-<decls>
-<@predecision()>
-<@decision><decision><@end>
-<@postdecision()>
-<alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-# <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-# <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# <fileName>:<description>
-cnt<decisionNumber> = 0
-<decls>
-<@preloop()>
-<@loopBody>
-<positiveClosureBlockLoop()>
-<@end>
-<@postloop()>
->>
-
-positiveClosureBlockLoop() ::= <<
-while True: #loop<decisionNumber>
-    alt<decisionNumber> = <maxAlt>
-    <@predecision()>
-    <@decisionBody><decision><@end>
-    <@postdecision()>
-    <alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
-    else:
-        if cnt<decisionNumber> >= 1:
-            break #loop<decisionNumber>
-
-        <ruleBacktrackFailure()>
-        eee = EarlyExitException(<decisionNumber>, self.input)
-        <@earlyExitException()>
-        raise eee
-
-    cnt<decisionNumber> += 1
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# <fileName>:<description>
-<decls>
-<@preloop()>
-<@loopBody>
-<closureBlockLoop()>
-<@end>
-<@postloop()>
->>
-
-closureBlockLoop() ::= <<
-while True: #loop<decisionNumber>
-    alt<decisionNumber> = <maxAlt>
-    <@predecision()>
-    <@decisionBody><decision><@end>
-    <@postdecision()>
-    <alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
-    else:
-        break #loop<decisionNumber>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase(altNum,alt) ::= <<
-if alt<decisionNumber> == <altNum>:
-    <@prealt()>
-    <alt>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt, treeLevel,rew) ::= <<
-# <fileName>:<description>
-pass <! so empty alternatives are a valid block !>
-<@declarations()>
-<elements:element()>
-<rew>
-<@cleanup()>
->>
-
-/** What to emit when there is no rewrite.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element(e) ::= <<
-<@prematch()>
-<e.el><\n>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)><label> = <endif>self.match(self.input, <token>, self.FOLLOW_<token>_in_<ruleName><elementIndex>)
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(token,label,elementIndex,terminalOptions)>
-<listLabel(label, label)>
->>
-
-listLabel(label, elem) ::= <<
-if list_<label> is None:
-    list_<label> = []
-list_<label>.append(<elem>)<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> = self.input.LA(1)<\n>
-<endif>
-self.match(<char>)
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> = self.input.LA(1)<\n>
-<endif>
-self.matchRange(<a>, <b>)
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= <<
-<if(label)>
-<label> = self.input.LT(1)<\n>
-<endif>
-if <s>:
-    self.input.consume()
-    <postmatchCode>
-<if(!LEXER)>
-    self._state.errorRecovery = False<\n>
-<endif>
-
-else:
-    <ruleBacktrackFailure()>
-    mse = MismatchedSetException(None, self.input)
-    <@mismatchedSetException()>
-<if(LEXER)>
-    self.recover(mse)
-    raise mse
-<else>
-    raise mse
-    <! use following code to make it recover inline; remove throw mse;
-    self.recoverFromMismatchedSet(
-        self.input, mse, self.FOLLOW_set_in_<ruleName><elementIndex>
-        )
-    !>
-<endif>
-<\n>
->>
-
-matchRuleBlockSet ::= matchSet
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(label, label)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex="0") ::= <<
-<if(label)>
-<label>Start = self.getCharIndex()
-self.match(<string>)
-<label>StartLine<elementIndex> = self.getLine()
-<label>StartCharPos<elementIndex> = self.getCharPositionInLine()
-<label> = <labelType>(input=self.input, type=INVALID_TOKEN_TYPE, channel=DEFAULT_CHANNEL, start=<label>Start, stop=self.getCharIndex()-1)
-<label>.setLine(<label>StartLine<elementIndex>)
-<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
-<else>
-self.match(<string>)
-<endif>
->>
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)>
-<label> = self.input.LT(1)<\n>
-<endif>
-self.matchAny(self.input)
->>
-
-wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<wildcard(...)>
-<listLabel(label,label)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> = self.input.LA(1)<\n>
-<endif>
-self.matchAny()
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(label, elementIndex)>
-<listLabel(label, label)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values. The 'rule' argument was the
- *  target rule name, but now is type Rule, whose toString is
- *  same: the rule name.  Now though you can access full rule
- *  descriptor stuff.
- */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-self._state.following.append(self.FOLLOW_<rule.name>_in_<ruleName><elementIndex>)
-<if(label)><label> = <endif>self.<if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">)<\n>
-self._state.following.pop()
->>
-
-/** ids+=rule */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(rule,label,elementIndex,args,scope)>
-<listLabel(label, label)>
->>
-
-/** A lexer rule reference
- *  The 'rule' argument was the target rule name, but now
- *  is type Rule, whose toString is same: the rule name.
- *  Now though you can access full rule descriptor stuff.
- */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
-<if(label)>
-<label>Start<elementIndex> = self.getCharIndex()
-self.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
-<label>StartLine<elementIndex> = self.getLine()
-<label>StartCharPos<elementIndex> = self.getCharPositionInLine()
-<label> = <labelType>(
-    input=self.input,
-    type=INVALID_TOKEN_TYPE,
-    channel=DEFAULT_CHANNEL,
-    start=<label>Start<elementIndex>,
-    stop=self.getCharIndex()-1)
-<label>.setLine(<label>StartLine<elementIndex>)
-<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
-<else>
-self.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
-<endif>
->>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(rule,label,args,elementIndex,scope)>
-<listLabel(label, label)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-<label>Start<elementIndex> = self.getCharIndex()
-<label>StartLine<elementIndex> = self.getLine()
-<label>StartCharPos<elementIndex> = self.getCharPositionInLine()
-self.match(EOF)
-<label> = <labelType>(input=self.input, type=EOF, channel=DEFAULT_CHANNEL, start=<label>Start<elementIndex>, stop=self.getCharIndex()-1)
-<label>.setLine(<label>StartLine<elementIndex>)
-<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
-<else>
-self.match(EOF)
-<endif>
->>
-
-// used for left-recursive rules
-recRuleDefArg()                       ::= "<recRuleArg()>"
-recRuleArg()                          ::= "_p"
-recRuleAltPredicate(ruleName, opPrec) ::= "<recRuleArg()> \<= <opPrec>"
-recRuleSetResultAction()              ::= "root_0 = $<ruleName>_primary.tree"
-recRuleSetReturnAction(src, name)     ::= "$<name> = $<src>.<name>"
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if self.input.LA(1) == DOWN:
-    self.match(self.input, DOWN, None)
-    <children:element()>
-    self.match(self.input, UP, None)
-
-<else>
-self.match(self.input, DOWN, None)
-<children:element()>
-self.match(self.input, UP, None)
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if not (<evalPredicate(pred, description)>):
-    <ruleBacktrackFailure()>
-    raise FailedPredicateException(self.input, "<ruleName>", "<description>")
-
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
-<edges; separator="\nel">
-else:
-<if(eotPredictsAlt)>
-    alt<decisionNumber> = <eotPredictsAlt>
-<else>
-    <ruleBacktrackFailure()>
-    nvae = NoViableAltException("<description>", <decisionNumber>, <stateNumber>, self.input)<\n>
-    <@noViableAltException()>
-    raise nvae<\n>
-<endif>
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
-<edges; separator="\nel">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
-<edges; separator="\nel"><\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-alt<decisionNumber> = <eotPredictsAlt> <! if no edges, don't gen ELSE !>
-<else>
-else:
-    alt<decisionNumber> = <eotPredictsAlt>
-<\n>
-<endif>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if (<labelExpr>) <if(predicates)>and (<predicates>)<endif>:
-    <targetState>
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-<!
-  FIXME: this is one of the few occasion, where I miss a switch statement
-  in Python. ATM this is implemented as a list of if .. elif ..
-  This may be replaced by faster a dictionary lookup, when I find a solution
-  for the cases when an edge is not a plain dfaAcceptState.
-!>
-LA<decisionNumber> = self.input.LA(<k>)
-<edges; separator="\nel">
-else:
-<if(eotPredictsAlt)>
-    alt<decisionNumber> = <eotPredictsAlt>
-<else>
-    <ruleBacktrackFailure()>
-    nvae = NoViableAltException("<description>", <decisionNumber>, <stateNumber>, self.input)<\n>
-    <@noViableAltException()>
-    raise nvae<\n>
-<endif>
-
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-LA<decisionNumber> = self.input.LA(<k>)
-<edges; separator="\nel">
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-LA<decisionNumber> = self.input.LA(<k>)
-<edges; separator="\nel">
-<if(eotPredictsAlt)>
-else:
-    alt<decisionNumber> = <eotPredictsAlt>
-<endif>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-if <labels:{it | LA<decisionNumber> == <it>}; separator=" or ">:
-    <targetState>
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-alt<decisionNumber> = self.dfa<decisionNumber>.predict(self.input)
->>
-
-/* Dump DFA tables as run-length-encoded Strings of octal values.
- * Can't use hex as compiler translates them before compilation.
- * These strings are split into multiple, concatenated strings.
- * Java puts them back together at compile time thankfully.
- * Java cannot handle large static arrays, so we're stuck with this
- * encode/decode approach.  See analysis and runtime DFA for
- * the encoding methods.
- */
-cyclicDFA(dfa) ::= <<
-# lookup tables for DFA #<dfa.decisionNumber>
-
-DFA<dfa.decisionNumber>_eot = DFA.unpack(
-    u"<dfa.javaCompressedEOT; wrap="\"\n    u\"">"
-    )
-
-DFA<dfa.decisionNumber>_eof = DFA.unpack(
-    u"<dfa.javaCompressedEOF; wrap="\"\n    u\"">"
-    )
-
-DFA<dfa.decisionNumber>_min = DFA.unpack(
-    u"<dfa.javaCompressedMin; wrap="\"\n    u\"">"
-    )
-
-DFA<dfa.decisionNumber>_max = DFA.unpack(
-    u"<dfa.javaCompressedMax; wrap="\"\n    u\"">"
-    )
-
-DFA<dfa.decisionNumber>_accept = DFA.unpack(
-    u"<dfa.javaCompressedAccept; wrap="\"\n    u\"">"
-    )
-
-DFA<dfa.decisionNumber>_special = DFA.unpack(
-    u"<dfa.javaCompressedSpecial; wrap="\"\n    u\"">"
-    )
-
-
-DFA<dfa.decisionNumber>_transition = [
-    <dfa.javaCompressedTransition:{s|DFA.unpack(u"<s; wrap="\"\nu\"">")}; separator=",\n">
-]
-
-# class definition for DFA #<dfa.decisionNumber>
-
-class DFA<dfa.decisionNumber>(DFA):
-    pass
-
-    <@errorMethod()>
-
-<if(dfa.specialStateSTs)>
-    def specialStateTransition(self_, s, input):
-        # convince pylint that my self_ magic is ok ;)
-        # pylint: disable-msg=E0213
-
-        # pretend we are a member of the recognizer
-        # thus semantic predicates can be evaluated
-        self = self_.recognizer
-
-        _s = s
-
-        <dfa.specialStateSTs:{state |
-if s == <i0>: <! compressed special state numbers 0..n-1 !>
-    <state>}; separator="\nel">
-
-<if(backtracking)>
-        if self._state.backtracking > 0:
-            raise BacktrackingFailed
-
-<endif>
-        nvae = NoViableAltException(self_.getDescription(), <dfa.decisionNumber>, _s, input)
-        self_.error(nvae)
-        raise nvae<\n>
-<endif>
-
->>
-
-cyclicDFAInit(dfa) ::= <<
-self.dfa<dfa.decisionNumber> = self.DFA<dfa.decisionNumber>(
-    self, <dfa.decisionNumber>,
-    eot = self.DFA<dfa.decisionNumber>_eot,
-    eof = self.DFA<dfa.decisionNumber>_eof,
-    min = self.DFA<dfa.decisionNumber>_min,
-    max = self.DFA<dfa.decisionNumber>_max,
-    accept = self.DFA<dfa.decisionNumber>_accept,
-    special = self.DFA<dfa.decisionNumber>_special,
-    transition = self.DFA<dfa.decisionNumber>_transition
-    )<\n>
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-LA<decisionNumber>_<stateNumber> = input.LA(1)<\n>
-<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-index<decisionNumber>_<stateNumber> = input.index()
-input.rewind()<\n>
-<endif>
-s = -1
-<edges; separator="\nel">
-<if(semPredState)> <! return input cursor to state before we rewound !>
-input.seek(index<decisionNumber>_<stateNumber>)<\n>
-<endif>
-if s >= 0:
-    return s
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if (<labelExpr>)<if(predicates)> and (<predicates>)<endif>:
-    s = <targetStateNumber><\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-se:
-    s = <targetStateNumber><\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "((<left>) and (<right>))"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o |  or <o>}>)"
-
-notPredicate(pred) ::= "not (<evalPredicate(pred, {})>)"
-
-evalPredicate(pred,description) ::= "(<pred>)"
-
-evalSynPredicate(pred,description) ::= "self.<pred>()"
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "self.input.LA(<k>) == <atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
-(<lower> \<= LA<decisionNumber>_<stateNumber> \<= <upper>)
-%>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(<lower> \<= self.input.LA(<k>) \<= <upper>)"
-
-setTest(ranges) ::= "<ranges; separator=\" or \">"
-
-// A T T R I B U T E S
-
-globalAttributeScopeClass(scope) ::= <<
-<if(scope.attributes)>
-class <scope.name>_scope(object):
-    def __init__(self):
-        <scope.attributes:{it | self.<it.decl> = None}; separator="\n">
-
-<endif>
->>
-
-globalAttributeScopeStack(scope) ::= <<
-<if(scope.attributes)>
-self.<scope.name>_stack = []<\n>
-<endif>
->>
-
-ruleAttributeScopeClass(scope) ::= <<
-<if(scope.attributes)>
-class <scope.name>_scope(object):
-    def __init__(self):
-        <scope.attributes:{it | self.<it.decl> = None}; separator="\n">
-
-<endif>
->>
-
-ruleAttributeScopeStack(scope) ::= <<
-<if(scope.attributes)>
-self.<scope.name>_stack = []<\n>
-<endif>
->>
-
-delegateName(d) ::= <<
-<if(d.label)><d.label><else>g<d.name><endif>
->>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= <<
-<label.label.text> = None<\n>
->>
-
-returnStructName(r) ::= "<r.name>_return"
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- */
-returnScope(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-class <ruleDescriptor:returnStructName()>(<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope):
-    def __init__(self):
-        super(<grammar.recognizerName>.<ruleDescriptor:returnStructName()>, self).__init__()
-
-        <scope.attributes:{it | self.<it.decl> = None}; separator="\n">
-        <@ruleReturnInit()>
-
-
-    <@ruleReturnMembers()>
-
-<endif>
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{it | <it.decl>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>"
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <%
-<if(negIndex)>
-self.<scope>_stack[-<negIndex>].<attr.name>
-<else>
-<if(index)>
-self.<scope>_stack[<index>].<attr.name>
-<else>
-self.<scope>_stack[-1].<attr.name>
-<endif>
-<endif>
-%>
-
-/* not applying patch because of bug in action parser!
-
-<if(negIndex)>
-((len(self.<scope>_stack) - <negIndex> - 1) >= 0 and [self.<scope>_stack[-<negIndex>].<attr.name>] or [None])[0]
-<else>
-<if(index)>
-((<index> \< len(self.<scope>_stack)) and [self.<scope>_stack[<index>].<attr.name>] or [None])[0]
-<else>
-((len(self.<scope>_stack) > 0) and [self.<scope>_stack[-1].<attr.name>] or [None])[0]
-<endif>
-<endif>
-
-*/
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
-<if(negIndex)>
-<!FIXME: this seems not to be used by ActionTranslator...!>
-self.<scope>_stack[-<negIndex>].<attr.name> = <expr>
-<else>
-<if(index)>
-<!FIXME: this seems not to be used by ActionTranslator...!>
-self.<scope>_stack[<index>].<attr.name> = <expr>
-<else>
-self.<scope>_stack[-1].<attr.name> = <expr>
-<endif>
-<endif>
-%>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "self.<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <%
-<if(referencedRule.hasMultipleReturnValues)>
-((<scope> is not None) and [<scope>.<attr.name>] or [None])[0]
-<else>
-<scope>
-<endif>
-%>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name>
-<else>
-<attr.name>
-<endif>
-%>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name> = <expr>
-<else>
-<attr.name> = <expr>
-<endif>
-%>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach; and they are evaluated early;
-// they cannot see TREE_PARSER or PARSER attributes for example. :(
-
-tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.text"
-tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.type"
-tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.line"
-tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.charPositionInLine"
-tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.channel"
-tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.index"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "<scope>.start"
-ruleLabelPropertyRef_stop(scope,attr) ::= "<scope>.stop"
-ruleLabelPropertyRef_tree(scope,attr) ::= "<scope>.tree"
-ruleLabelPropertyRef_text(scope,attr) ::= <%
-<if(TREE_PARSER)>
-((<scope> is not None) and [self.input.getTokenStream().toString(
-    self.input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
-    self.input.getTreeAdaptor().getTokenStopIndex(<scope>.start)
-    )] or [None])[0]
-<else>
-((<scope> is not None) and [self.input.toString(<scope>.start,<scope>.stop)] or [None])[0]
-<endif>
-%>
-ruleLabelPropertyRef_st(scope,attr) ::= "((<scope> is not None) and [<scope>.st] or [None])[0]"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::= "((<scope> is not None) and [<scope>.type] or [0])[0]"
-lexerRuleLabelPropertyRef_line(scope,attr) ::= "((<scope> is not None) and [<scope>.line] or [0])[0]"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::= "((<scope> is not None) and [<scope>.charPositionInLine] or [0])[0]"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::= "((<scope> is not None) and [<scope>.channel] or [0])[0]"
-lexerRuleLabelPropertyRef_index(scope,attr) ::= "((<scope> is not None) and [<scope>.index] or [0])[0]"
-lexerRuleLabelPropertyRef_text(scope,attr) ::= "((<scope> is not None) and [<scope>.text] or [None])[0]"
-lexerRuleLabelPropertyRef_int(scope,attr) ::= "((<scope> is not None) and [int(<scope>.text)] or [0])[0]"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "retval.start"
-rulePropertyRef_stop(scope,attr) ::= "retval.stop" //mmm... or input.LT(-1)??
-rulePropertyRef_tree(scope,attr) ::= "retval.tree"
-rulePropertyRef_text(scope,attr) ::= "self.input.toString(retval.start, self.input.LT(-1))"
-rulePropertyRef_st(scope,attr) ::= "retval.st"
-
-lexerRulePropertyRef_text(scope,attr) ::= "self.text"
-lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "self._state.tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "self._state.tokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
-lexerRulePropertyRef_start(scope,attr) ::= "self._state.tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(self.getCharIndex()-1)"
-lexerRulePropertyRef_int(scope,attr) ::= "int(<scope>.text)"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>"
-
-
-/** How to execute an action (only when not backtracking) */
-execAction(action) ::= <<
-<if(backtracking)>
-<if(actions.(actionScope).synpredgate)>
-if <actions.(actionScope).synpredgate>:
-    pass
-    <action>
-
-<else>
-if <actions.(actionScope).synpredgate>:
-    pass
-    <action>
-
-<endif>
-<else>
-#action start
-<action>
-#action end
-<endif>
->>
-
-/** How to always execute an action even when backtracking */
-execForcedAction(action) ::= "<action>"
-
-
-// M I S C (properties, etc...)
-
-codeFileExtension() ::= ".py"
-
-true_value() ::= "True"
-false_value() ::= "False"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/Ruby.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/Ruby.stg
deleted file mode 100644
index dd26c0b..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/Ruby.stg
+++ /dev/null
@@ -1,1477 +0,0 @@
-/******************************************************************************
- *********************  M A J O R   C O M P O N E N T S  **********************
- ******************************************************************************/
-
-// System.Boolean.ToString() returns "True" and "False", but the proper C# literals are "true" and "false"
-// The Java version of Boolean returns "true" and "false", so they map to themselves here.
-booleanLiteral ::= [
-	"True":"true",
-	"False":"false",
-	"true":"true",
-	"false":"false",
-	default:"false"
-]
-
-/** The overall file structure of a recognizer; stores methods
-  * for rules and cyclic DFAs plus support code.
-  */
-outputFile(LEXER, PARSER, TREE_PARSER, actionScope, actions, docComment, recognizer, name,
-  tokens, tokenNames, rules, cyclicDFAs, bitsets, buildTemplate, buildAST, rewriteMode,
-  profile, backtracking, synpreds, memoize, numRules, fileName, ANTLRVersion, generatedTimestamp,
-  trace, scopes, superClass, literals) ::=
-<<
-#!/usr/bin/env ruby
-#
-# <fileName>
-# --
-# Generated using ANTLR version: <ANTLRVersion>
-# Ruby runtime library version: <runtimeLibraryVersion()>
-# Input grammar file: <fileName>
-# Generated at: <generatedTimestamp>
-#
-
-# ~~~\> start load path setup
-this_directory = File.expand_path( File.dirname( __FILE__ ) )
-$LOAD_PATH.unshift( this_directory ) unless $LOAD_PATH.include?( this_directory )
-
-antlr_load_failed = proc do
-  load_path = $LOAD_PATH.map { |dir| '  - ' \<\< dir }.join( $/ )
-  raise LoadError, \<\<-END.strip!
-
-Failed to load the ANTLR3 runtime library (version <runtimeLibraryVersion()>):
-
-Ensure the library has been installed on your system and is available
-on the load path. If rubygems is available on your system, this can
-be done with the command:
-
-  gem install antlr3
-
-Current load path:
-#{ load_path }
-
-  END
-end
-
-defined?( ANTLR3 ) or begin
-
-  # 1: try to load the ruby antlr3 runtime library from the system path
-  require 'antlr3'
-
-rescue LoadError
-
-  # 2: try to load rubygems if it isn't already loaded
-  defined?( Gem ) or begin
-    require 'rubygems'
-  rescue LoadError
-    antlr_load_failed.call
-  end
-
-  # 3: try to activate the antlr3 gem
-  begin
-    Gem.activate( 'antlr3', '~> <runtimeLibraryVersion()>' )
-  rescue Gem::LoadError
-    antlr_load_failed.call
-  end
-
-  require 'antlr3'
-
-end
-# \<~~~ end load path setup
-
-<placeAction(scope="all", name="header")>
-<placeAction(scope=actionScope,name="header")>
-
-<if(recognizer.grammar.grammarIsRoot)>
-<rootGrammarOutputFile()>
-<else>
-<delegateGrammarOutputFile()>
-<endif>
-
-<placeAction(scope=actionScope,name="footer")>
-<placeAction(scope="all", name="footer")>
-
-<if(actions.(actionScope).main)>
-if __FILE__ == $0 and ARGV.first != '--'
-  <placeAction(scope=actionScope,name="main")>
-end
-<endif>
->>
-
-tokenDataModule() ::= <<
-# TokenData defines all of the token type integer values
-# as constants, which will be included in all
-# ANTLR-generated recognizers.
-const_defined?( :TokenData ) or TokenData = ANTLR3::TokenScheme.new
-
-module TokenData
-<if(tokens)>
-
-  # define the token constants
-  define_tokens( <tokens:{it | :<it.name> => <it.type>}; anchor, wrap="\n", separator=", "> )
-
-<endif>
-<if(tokenNames)>
-
-  # register the proper human-readable name or literal value
-  # for each token type
-  #
-  # this is necessary because anonymous tokens, which are
-  # created from literal values in the grammar, do not
-  # have descriptive names
-  register_names( <tokenNames:{it | <it>}; separator=", ", anchor, wrap="\n"> )
-
-<endif>
-
-  <placeAction(scope="token",name="scheme")>
-  <placeAction(scope="token",name="members")>
-end<\n>
->>
-
-rootGrammarOutputFile() ::= <<
-module <recognizer.grammar.name>
-  <placeAction(scope="module",name="head")>
-  <tokenDataModule()>
-  <recognizer>
-  <placeAction(scope="module",name="foot")>
-end
->>
-
-delegateGrammarOutputFile() ::= <<
-require '<recognizer.grammar.delegator.recognizerName>'
-
-<delegateGrammarModuleHead(gram=recognizer.grammar.delegator)>
-  <recognizer>
-<delegateGrammarModuleTail(gram=recognizer.grammar.delegator)>
->>
-
-delegateGrammarModuleHead(gram) ::= <<
-<if(gram.grammarIsRoot)>
-module <gram.name>
-<else>
-<delegateGrammarModuleHead(gram=gram.delegator)><\n>
-class <gram.name>
-<endif>
->>
-
-delegateGrammarModuleTail(gram) ::= <<
-<if(gram.grammarIsRoot)>
-end # module <gram.name>
-<else>
-end # class <gram.name>
-<delegateGrammarModuleTail(gram=gram.delegator)><\n>
-<endif>
->>
-/* * * * * * * * * * R E C O G N I Z E R   C L A S S E S * * * * * * * * * */
-
-parser(
-  grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
-  ASTLabelType="Object", superClass="ANTLR3::Parser", labelType="ANTLR3::Token",
-  members={<actions.parser.members>}
-) ::= <<
-<if(grammar.grammarIsRoot)><autoloadDelegates()><endif>
-
-class <if(grammar.grammarIsRoot)>Parser<else><grammar.name><endif> \< <superClass>
-  <parserBody(inputStreamType="ANTLR3::TokenStream", rewriteElementType="Token", actionScope="parser", ...)>
-end # class <if(grammar.grammarIsRoot)>Parser<else><grammar.name><endif> \< <superClass>
-<if(!actions.(actionScope).main)>
-
-at_exit { <if(grammar.grammarIsRoot)>Parser<else><grammar.name><endif>.main( ARGV ) } if __FILE__ == $0
-<endif>
->>
-
-/** How to generate a tree parser; same as parser except the
-  * input stream is a different type.
-  */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="ANTLR3::TreeParser", members={<actions.treeparser.members>}) ::= <<
-<if(grammar.grammarIsRoot)><autoloadDelegates()><endif>
-
-class <if(grammar.grammarIsRoot)>TreeParser<else><grammar.name><endif> \< <superClass>
-  <parserBody(inputStreamType="TreeNodeStream", rewriteElementType="Node", actionScope="treeparser", ...)>
-end # class <if(grammar.grammarIsRoot)>TreeParser<else><grammar.name><endif> \< <superClass>
-<if(!actions.(actionScope).main)>
-
-at_exit { <if(grammar.grammarIsRoot)>TreeParser<else><grammar.name><endif>.main( ARGV ) } if __FILE__ == $0
-<endif>
->>
-
-parserBody(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, inputStreamType, superClass, filterMode, labelType, members, rewriteElementType, actionScope, ASTLabelType="Object") ::= <<
-@grammar_home = <grammar.name>
-<if(!grammar.grammarIsRoot)><autoloadDelegates()><\n><endif>
-<@mixins()>
-
-RULE_METHODS = [ <rules:{r|:<r.ruleName>}; separator=", ", wrap="\n", anchor> ].freeze
-
-<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeClass()><\n><endif>}>
-<rules:{it | <ruleAttributeScopeClass(.ruleDescriptor.ruleScope)>}>
-<if(grammar.delegators)>
-masters( <grammar.delegators:{d|:<d.name>}; separator=", "> )<\n>
-<endif>
-<if(grammar.directDelegates)>
-imports( <grammar.directDelegates:{d|:<d.name>}; separator=", "> )<\n>
-<endif>
-
-include TokenData
-
-begin
-  generated_using( "<fileName>", "<ANTLRVersion>", "<runtimeLibraryVersion()>" )
-rescue NoMethodError => error
-  # ignore
-end
-
-<if(!grammar.grammarIsRoot)>
-require '<grammar.composite.rootGrammar.recognizerName>'
-include <grammar.composite.rootGrammar.name>::TokenData<\n><\n>
-<endif>
-<parserConstructor()>
-<@additionalMembers()>
-<members>
-# - - - - - - - - - - - - Rules - - - - - - - - - - - - -
-<rules:{it | <it><\n>}>
-
-<if(grammar.delegatedRules)>
-# - - - - - - - - - - Delegated Rules - - - - - - - - - - -
-<grammar.delegatedRules:{ruleDescriptor|<delegateRule(ruleDescriptor)><\n>}>
-<endif>
-<if(cyclicDFAs)>
-# - - - - - - - - - - DFA definitions - - - - - - - - - - -
-<cyclicDFAs:{it | <cyclicDFA(it)>}>
-
-private
-
-def initialize_dfas
-  super rescue nil
-  <cyclicDFAs:{it | <cyclicDFAInit(it)>}>
-end
-
-<endif>
-<bitsets:{it | TOKENS_FOLLOWING_<it.name>_IN_<it.inName>_<it.tokenIndex> = Set[ <it.tokenTypes:{it | <it>}; separator=", "> ]<\n>}>
->>
-
-parserConstructor() ::= <<
-def initialize( <grammar.delegators:{g|<g:delegateName()>, }>input, options = {} )
-  super( input, options )
-<if(memoize)><if(grammar.grammarIsRoot)>
-  @state.rule_memory = {}
-<endif><endif>
-  <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeStack()><\n><endif>}><rules:{it | <ruleAttributeScopeStack(.ruleDescriptor.ruleScope)>}>
-  <placeAction(scope=actionScope,name="init")>
-  <grammar.delegators:{g|@<g:delegateName()> = <g:delegateName()><\n>}><grammar.directDelegates:{g|@<g:delegateName()> = <newDelegate(g)><\n>}><last(grammar.delegators):{g|@parent = @<g:delegateName()><\n>}><@init()>
-end
->>
-
-
-/* * * * * * * * * * * * * R U L E   M E T H O D S * * * * * * * * * * * * */
-
-/** A simpler version of a rule template that is specific to the
-  * imaginary rules created for syntactic predicates.  As they
-  * never have return values nor parameters etc..., just give
-  * simplest possible method.  Don't do any of the normal
-  * memoization stuff in here either; it's a waste. As
-  * predicates cannot be inlined into the invoking rule, they
-  * need to be in a rule by themselves.
-  */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::= <<
-#
-# syntactic predicate <ruleName>
-#
-# (in <fileName>)
-# <description>
-#
-# This is an imaginary rule inserted by ANTLR to
-# implement a syntactic predicate decision
-#
-def <ruleName><if(ruleDescriptor.parameterScope)>( <ruleDescriptor.parameterScope:parameterScope()> )<endif>
-  <traceIn()><ruleLabelDefs()>
-  <block>
-ensure
-  <traceOut()>
-end
->>
-
-
-/** How to generate code for a rule.  This includes any return
-  * type data aggregates required for multiple return values.
-  */
-rule(ruleName, ruleDescriptor, block, emptyRule, description, exceptions, finally, memoize) ::= <<
-<returnScope(scope=ruleDescriptor.returnScope)>
-
-#
-# parser rule <ruleName>
-#
-# (in <fileName>)
-# <description>
-#
-def <ruleName><if(ruleDescriptor.parameterScope)>( <ruleDescriptor.parameterScope:parameterScope()> )<endif>
-  <traceIn()><ruleScopeSetUp()><ruleDeclarations()><ruleLabelDefs()><action(name="init", code=ruleDescriptor.actions.init)>
-  <@body><ruleBody()><@end>
-
-  return <ruleReturnValue()>
-end
-<if(ruleDescriptor.modifier)>
-
-<ruleDescriptor.modifier> :<ruleName> rescue nil<\n>
-<endif>
->>
-
-delegateRule(ruleDescriptor) ::= <<
-# delegated rule <ruleDescriptor.name>
-def <ruleDescriptor.name><if(ruleDescriptor.parameterScope)>( <ruleDescriptor.parameterScope:parameterScope()> )<endif>
-  <methodCall(del=ruleDescriptor.grammar, n=ruleDescriptor.name, args={<ruleDescriptor.parameterScope.attributes:{it | <it.name>}>})>
-end
->>
-// HELPERS
-
-recognizerClassName() ::= <<
-<if(TREE_PARSER)>TreeParser<elseif(PARSER)>Parser<else>Lexer<endif>
->>
-
-initializeDirectDelegate() ::= <<
-@<g:delegateName()> = <g.name>::<recognizerClassName()>.new(
-  <trunc(g.delegators):{p|<p:delegateName()>, }>self, input, options.merge( :state => @state )
-)
->>
-
-initializeDelegator() ::= <<
-@<g:delegateName()> = <g:delegateName()>
->>
-
-altSwitchCase(altNum,alt) ::= <<
-when <altNum>
-  <@prealt()>
-  <alt>
->>
-
-blockBody() ::= <<
-<@decision><decision><@end>
-case alt_<decisionNumber>
-<alts:{a | <altSwitchCase(i,a)>}; separator="\n">
-end
->>
-
-catch(decl, action) ::= <<
-# - - - - - - @catch <e.decl> - - - - - -
-rescue <e.decl>
-  <e.action><\n>
->>
-
-closureBlockLoop() ::= <<
-while true # decision <decisionNumber>
-  alt_<decisionNumber> = <maxAlt>
-  <@decisionBody><decision><@end>
-  case alt_<decisionNumber>
-  <alts:{a | <altSwitchCase(i,a)>}; separator="\n">
-  else
-    break # out of loop for decision <decisionNumber>
-  end
-end # loop for decision <decisionNumber>
->>
-
-delegateName(d) ::= <<
-<if(d.label)><d.label; format="label"><else><d.name; format="snakecase"><endif>
->>
-
-element(e) ::= <<
-<e.el><\n>
->>
-
-execForcedAction(action) ::= "<action>"
-
-globalAttributeScopeClass(scope) ::= <<
-<if(scope.attributes)>@@<scope.name> = Scope( <scope.attributes:{it | <it.decl; format="rubyString">}; separator=", "> )<\n><endif>
->>
-
-globalAttributeScopeStack(scope) ::= <<
-<if(scope.attributes)>@<scope.name>_stack = []<\n><endif>
->>
-
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-parameterScope(scope) ::= <<
-<scope.attributes:{it | <it.decl>}; separator=", ">
->>
-
-positiveClosureBlockLoop() ::= <<
-match_count_<decisionNumber> = 0
-while true
-  alt_<decisionNumber> = <maxAlt>
-  <@decisionBody><decision><@end>
-  case alt_<decisionNumber>
-  <alts:{a | <altSwitchCase(i,a)>}; separator="\n">
-  else
-    match_count_<decisionNumber> > 0 and break
-    <ruleBacktrackFailure()>
-    eee = EarlyExit(<decisionNumber>)
-    <@earlyExitException()><\n>
-    raise eee
-  end
-  match_count_<decisionNumber> += 1
-end<\n>
->>
-
-returnScope(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor:returnStructName(r=it)> = define_return_scope <scope.attributes:{it | :<it.decl>}; separator=", ">
-<endif>
->>
-
-returnStructName(r) ::= "<r.name; format=\"camelcase\">ReturnValue"
-
-ruleAttributeScopeClass ::= globalAttributeScopeClass
-ruleAttributeScopeStack ::= globalAttributeScopeStack
-
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>
-@state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )<\n>
-<endif>
->>
-
-ruleBody() ::= <<
-<if(memoize)><if(backtracking)>
-success = false # flag used for memoization<\n>
-<endif><endif>
-begin
-  <ruleMemoization(ruleName)><block><ruleCleanUp()><(ruleDescriptor.actions.after):execAction()>
-<if(memoize)><if(backtracking)>
-  success = true<\n>
-<endif><endif>
-<if(exceptions)>
-  <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-
-# - - - - - - - - @rulecatch - - - - - - - -
-<actions.(actionScope).rulecatch>
-<else>
-rescue ANTLR3::Error::RecognitionError => re
-  report_error(re)
-  recover(re)
-  <@setErrorReturnValue()>
-<endif>
-<endif>
-<endif>
-
-ensure
-  <traceOut()><memoize()><ruleScopeCleanUp()><finally>
-end
->>
-
-ruleReturnValue() ::= <%
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-return_value
-<endif>
-<endif>
-<endif>
-%>
-
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-return_value = <returnStructName(r=ruleDescriptor)>.new
-
-# $rule.start = the first token seen before matching
-return_value.start = @input.look<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{a|<a.name> = <if(a.initValue)><a.initValue><else>nil<endif><\n>}>
-<endif>
-<if(memoize)>
-<ruleDescriptor.name>_start_index = @input.index<\n>
-<endif>
->>
-
-ruleLabelDef(label) ::= <<
-<label.label.text; format="label"> = nil<\n>
->>
-
-ruleLabelDefs() ::= <<
-<[
-    ruleDescriptor.tokenLabels,
-    ruleDescriptor.tokenListLabels,
-    ruleDescriptor.wildcardTreeLabels,
-    ruleDescriptor.wildcardTreeListLabels,
-    ruleDescriptor.ruleLabels,
-    ruleDescriptor.ruleListLabels
- ]:
- {<it.label.text; format="label"> = nil<\n>}
-><[
-    ruleDescriptor.tokenListLabels,
-    ruleDescriptor.ruleListLabels,
-    ruleDescriptor.wildcardTreeListLabels
-  ]:
-  {list_of_<it.label.text; format="label"> = []<\n>}
->
->>
-
-/* * * * * * * * * * * * * R U L E   H E L P E R S * * * * * * * * * * * * */
-
-traceIn() ::= <<
-<if(trace)>
-trace_in( __method__, <ruleDescriptor.index> )<\n>
-<else>
-# -> uncomment the next line to manually enable rule tracing
-# trace_in( __method__, <ruleDescriptor.index> )<\n>
-<endif>
->>
-
-traceOut() ::= <<
-<if(trace)>
-trace_out( __method__, <ruleDescriptor.index> )<\n>
-<else>
-# -> uncomment the next line to manually enable rule tracing
-# trace_out( __method__, <ruleDescriptor.index> )<\n>
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-# - - - - - - - rule clean up - - - - - - - -
-return_value.stop = @input.look( -1 )<\n>
-<endif>
-<endif>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-# rule memoization
-if @state.backtracking > 0 and already_parsed_rule?( __method__ )
-  success = true
-  return <ruleReturnValue()>
-end<\n>
-<endif>
->>
-
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{it | @<it>_stack.push( @@<it>.new )<\n>}><ruleDescriptor.ruleScope:{it | @<it.name>_stack.push( @@<it.name>.new )<\n>}>
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{it | @<it>_stack.pop<\n>}><ruleDescriptor.ruleScope:{it | @<it.name>_stack.pop<\n>}>
->>
-
-memoize() ::= <<
-<if(memoize)><if(backtracking)>
-memoize( __method__, <ruleDescriptor.name>_start_index, success ) if @state.backtracking > 0<\n>
-<endif><endif>
->>
-
-/** helper template to format a ruby method call */
-methodCall(n, del, args) ::= <<
-<if(del)>@<del:delegateName()>.<endif><n><if(args)>( <args; separator=", "> )<endif>
->>
-
-/* * * * * * * * * * * * * L E X E R   P A R T S * * * * * * * * * * * * * */
-
-actionGate() ::= "@state.backtracking == 0"
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# at line <description>
-alt_<decisionNumber> = <maxAlt>
-<decls>
-<@body><blockBody()><@end>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# at line <description>
-alt_<decisionNumber> = <maxAlt>
-<decls>
-<@decision><decision><@end>
-case alt_<decisionNumber>
-<alts:{a | <altSwitchCase(i,a)>}; separator="\n">
-end
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-<decls>
-<@prealt()>
-<alts>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-# at line <description>
-<decls>
-<@prealt()>
-<alts>
->>
-
-/** A (..)+ block with 0 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# at file <description>
-<decls>
-<@loopBody>
-<positiveClosureBlockLoop()>
-<@end>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 0 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-# at line <description>
-<decls>
-<@loopBody>
-<closureBlockLoop()>
-<@end>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code
-  * generation so we can just use the normal block template
-  */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** An alternative is just a list of elements; at outermost
-  * level
-  */
-alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
-# at line <description>
-<elements:element()><rew>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)><label; format="label"> = <endif>match( <token>, TOKENS_FOLLOWING_<token>_IN_<ruleName>_<elementIndex> )
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<addToList(elem={<label; format="label">},...)>
->>
-
-/* TRY THIS:
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-list_of_<label; format="label"> << match( <token>, TOKENS_FOLLOWING_<token>_IN_<ruleName>_<elementIndex> )
->>
-*/
-
-addToList(label,elem) ::= <<
-list_of_<label; format="label"> \<\< <elem><\n>
->>
-
-listLabel ::= addToList
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,terminalOptions,postmatchCode) ::= <<
-<if(label)>
-<label; format="label"> = @input.look<\n>
-<endif>
-if <s>
-  @input.consume
-  <postmatchCode>
-<if(!LEXER)>
-  @state.error_recovery = false<\n>
-<endif>
-else
-  <ruleBacktrackFailure()>
-  mse = MismatchedSet( nil )
-  <@mismatchedSetException()>
-<if(LEXER)>
-  recover mse
-  raise mse<\n>
-<else>
-  raise mse<\n>
-<endif>
-end
-<\n>
->>
-
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<addToList(elem={<label; format="label">},...)>
->>
-
-matchRuleBlockSet ::= matchSet
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)>
-<label; format="label"> = @input.look<\n>
-<endif>
-match_any
->>
-
-/* TRY THIS:
-wildcard(label,elementIndex) ::= <<
-<if(label)><label; format="label"> = <endif>match_any
->>
-*/
-
-wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<wildcard(...)>
-<addToList(elem={<label; format="label">},...)>
->>
-
-
-/** Match a rule reference by invoking it possibly with
-  * arguments and a return value or values.
-  */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-@state.following.push( TOKENS_FOLLOWING_<rule.name>_IN_<ruleName>_<elementIndex> )
-<if(label)><label; format="label"> = <endif><methodCall(del=scope, n={<rule.name>}, args=args)>
-@state.following.pop
->>
-
-/** ids+=ID */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<addToList(elem={<label; format="label">},...)>
->>
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList, enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if @input.peek == DOWN
-  match( DOWN, nil )
-  <children:element()>
-  match( UP, nil )
-end
-<else>
-match( DOWN, nil )
-<children:element()>
-match( UP, nil )
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when
-  * it is also hoisted into a prediction expression).
-  */
-validateSemanticPredicate(pred,description) ::= <<
-<if(backtracking)>
-unless ( <evalPredicate(...)> )
-  <ruleBacktrackFailure()>
-  raise FailedPredicate( "<ruleName>", "<description>" )
-end
-<else>
-raise FailedPredicate( "<ruleName>", "<description>" ) unless ( <evalPredicate(...)> )
-<endif>
->>
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-look_<decisionNumber>_<stateNumber> = @input.peek( <k> )<\n>
-<edges; separator="\nels">
-else
-<if(eotPredictsAlt)>
-  alt_<decisionNumber> = <eotPredictsAlt><\n>
-<else>
-<if(backtracking)>
-  <ruleBacktrackFailure()><\n>
-<endif>
-<@noViableAltException>
-  raise NoViableAlternative( "<description>", <decisionNumber>, <stateNumber> )<\n>
-<@end>
-<endif>
-end
->>
-
-/** Same as a normal DFA state except that we don't examine
-  * look for the bypass alternative.  It delays error
-  * detection but this is faster, smaller, and more what people
-  * expect.  For (X)? people expect "if ( LA(1)==X ) match(X);"
-  * and that's it. *  If a semPredState, don't force look
-  * lookup; preds might not need.
-  */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-look_<decisionNumber>_<stateNumber> = @input.peek( <k> )<\n>
-<edges; separator="\nels">
-end
->>
-
-
-/** A DFA state that is actually the loopback decision of a
-  * closure loop.  If end-of-token (EOT) predicts any of the
-  * targets then it should act like a default clause (i.e., no
-  * error can be generated). This is used only in the lexer so
-  * that for ('a')* on the end of a rule anything other than 'a'
-  * predicts exiting. *  If a semPredState, don't force
-  * look lookup; preds might not need.
-  */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-look_<decisionNumber>_<stateNumber> = @input.peek( <k> )<\n>
-<edges; separator="\nels"><\n>
-<if(eotPredictsAlt)>
-else
-  alt_<decisionNumber> = <eotPredictsAlt><\n>
-<endif>
-end
->>
-
-
-/** An accept state indicates a unique alternative has been
-  * predicted
-  */
-dfaAcceptState(alt) ::= "alt_<decisionNumber> = <alt>"
-
-/** A simple edge with an expression.  If the expression is
-  * satisfied, enter to the target state.  To handle gated
-  * productions, we may have to evaluate some predicates for
-  * this edge.
-  */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ( <labelExpr> )<if(predicates)> and ( <predicates> )<endif>
-  <targetState>
->>
-
-
-/** A DFA state where a SWITCH may be generated.  The code
-  * generator decides if this is possible:
-  * CodeGenerator.canGenerateSwitch().
-  */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-case look_<decisionNumber> = @input.peek( <k> )
-<edges; separator="\n">
-else
-<if(eotPredictsAlt)>
-  alt_<decisionNumber> = <eotPredictsAlt><\n>
-<else>
-<if(backtracking)>
-  <ruleBacktrackFailure()><\n>
-<endif>
-<@noViableAltException>
-  raise NoViableAlternative( "<description>", <decisionNumber>, <stateNumber> )<\n>
-<@end>
-<endif>
-end
->>
-
-
-dfaOptionalBlockStateSwitch(k, edges, eotPredictsAlt, description, stateNumber, semPredState) ::= <<
-case look_<decisionNumber> = @input.peek( <k> )
-<edges; separator="\n">
-end
->>
-
-dfaLoopbackStateSwitch(k, edges, eotPredictsAlt, description, stateNumber, semPredState) ::= <<
-case look_<decisionNumber> = @input.peek( <k> )
-<edges; separator="\n">
-<if(eotPredictsAlt)>
-else
-  alt_<decisionNumber> = <eotPredictsAlt>
-<endif>
-end
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-when <labels:{it | <it>}; separator=", "> then <targetState>
->>
-
-/** The code to initiate execution of a cyclic DFA; this is used
-  * in the rule to predict an alt just like the fixed DFA case.
-  * The <name> attribute is inherited via the parser, lexer, ...
-  */
-dfaDecision(decisionNumber, description) ::= <<
-alt_<decisionNumber> = @dfa<decisionNumber>.predict( @input )
->>
-
-/** Generate the tables and support code needed for the DFAState
-  * object argument.  Unless there is a semantic predicate (or
-  * syn pred, which become sem preds), all states should be
-  * encoded in the state tables. Consequently,
-  * cyclicDFAState/cyclicDFAEdge,eotDFAEdge templates are not
-  * used except for special DFA states that cannot be encoded as
-  * a transition table.
-  */
-cyclicDFA(dfa) ::= <<
-class DFA<dfa.decisionNumber> \< ANTLR3::DFA
-  EOT = unpack( <dfa.javaCompressedEOT; anchor, separator=", ", wrap="\n"> )
-  EOF = unpack( <dfa.javaCompressedEOF; anchor, separator=", ", wrap="\n"> )
-  MIN = unpack( <dfa.javaCompressedMin; anchor, separator=", ", wrap="\n"> )
-  MAX = unpack( <dfa.javaCompressedMax; anchor, separator=", ", wrap="\n"> )
-  ACCEPT = unpack( <dfa.javaCompressedAccept; anchor, separator=", ", wrap="\n"> )
-  SPECIAL = unpack( <dfa.javaCompressedSpecial; anchor, separator=", ", wrap="\n"> )
-  TRANSITION = [
-    <dfa.javaCompressedTransition:{s|unpack( <s; wrap="\n", anchor, separator=", "> )}; separator=",\n">
-  ].freeze
-
-  ( 0 ... MIN.length ).zip( MIN, MAX ) do | i, a, z |
-    if a \> 0 and z \< 0
-      MAX[ i ] %= 0x10000
-    end
-  end
-
-  @decision = <dfa.decisionNumber>
-
-  <@errorMethod()>
-<if(dfa.description)>
-
-  def description
-    \<\<-'__dfa_description__'.strip!
-      <dfa.description>
-    __dfa_description__
-  end<\n>
-<endif>
-end<\n>
->>
-
-
-specialStateTransitionMethod(dfa) ::= <<
-def special_state_transition_for_dfa<dfa.decisionNumber>(s, input)
-  case s
-  <dfa.specialStateSTs:{state|when <i0>
-  <state>}; separator="\n">
-  end
-<if(backtracking)>
-  @state.backtracking > 0 and raise ANTLR3::Error::BacktrackingFailed<\n>
-<endif>
-  nva = ANTLR3::Error::NoViableAlternative.new( @dfa<dfa.decisionNumber>.description, <dfa.decisionNumber>, s, input )
-  @dfa<dfa.decisionNumber>.error( nva )
-  raise nva
-end
->>
-
-cyclicDFASynpred( name ) ::= <<
-def <name>() @recognizer.<name> end<\n>
->>
-
-cyclicDFAInit(dfa) ::= <<
-<if(dfa.specialStateSTs)>
-@dfa<dfa.decisionNumber> = DFA<dfa.decisionNumber>.new( self, <dfa.decisionNumber> ) do |s|
-  case s
-  <dfa.specialStateSTs:{state|when <i0>
-  <state>}; separator="\n">
-  end
-
-  if s \< 0
-<if(backtracking)>
-    @state.backtracking > 0 and raise ANTLR3::Error::BacktrackingFailed<\n>
-<endif>
-    nva = ANTLR3::Error::NoViableAlternative.new( @dfa<dfa.decisionNumber>.description, <dfa.decisionNumber>, s, input )
-    @dfa<dfa.decisionNumber>.error( nva )
-    raise nva
-  end
-
-  s
-end<\n>
-<else>
-@dfa<dfa.decisionNumber> = DFA<dfa.decisionNumber>.new( self, <dfa.decisionNumber> )<\n>
-<endif>
->>
-
-
-/** A special state in a cyclic DFA; special means has a
-  * semantic predicate or it's a huge set of symbols to check.
-  */
-cyclicDFAState(decisionNumber, stateNumber, edges, needErrorClause, semPredState) ::= <<
-look_<decisionNumber>_<stateNumber> = @input.peek
-<if(semPredState)>
-index_<decisionNumber>_<stateNumber> = @input.index
-@input.rewind( @input.last_marker, false )<\n>
-<endif>
-s = -1
-<edges; separator="els">end
-<if(semPredState)> <! return input cursor to state before we rewound !>
-@input.seek( index_<decisionNumber>_<stateNumber> )<\n>
-<endif>
->>
-
-/** Just like a fixed DFA edge, test the look and indicate
-  * what state to jump to next if successful.  Again, this is
-  * for special states.
-  */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ( <labelExpr> )<if(predicates)> and ( <predicates> )<endif>
-  s = <targetStateNumber><\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any
-  * char; always jump to the target.
-  */
-eotDFAEdge(targetStateNumber, edgeNumber, predicates) ::= <<
-e
-  s = <targetStateNumber><\n>
->>
-
-andPredicates(left,right) ::= "( <left> ) and ( <right> )"
-
-orPredicates(operands) ::= "( <first(operands)> )<rest(operands):{o|  or ( <o> )}>"
-
-notPredicate(pred) ::= "not ( <pred> )"
-
-evalPredicate(pred,description) ::= "( <pred> )"
-
-evalSynPredicate(pred,description) ::= <<
-syntactic_predicate?( :<pred:{it | <it>}> )
->>
-
-lookaheadTest(atom, k, atomAsInt) ::= "look_<decisionNumber>_<stateNumber> == <atom>"
-
-/** Sometimes a look test cannot assume that LA(k) is in a
-  * temp variable somewhere.  Must ask for the look
-  * directly.
-  */
-isolatedLookaheadTest(atom, k, atomAsInt) ::= "@input.peek(<k>) == <atom>"
-
-lookaheadRangeTest(lower, upper, k, rangeNumber, lowerAsInt, upperAsInt) ::= <<
-look_<decisionNumber>_<stateNumber>.between?( <lower>, <upper> )
->>
-
-isolatedLookaheadRangeTest(lower, upper, k, rangeNumber, lowerAsInt, upperAsInt) ::= <<
-@input.peek( <k> ).between?( <lower>, <upper> )
->>
-
-setTest(ranges) ::= <<
-<ranges; separator=" || ">
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-
-parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>"
-
-scopeAttributeRef(scope, attr, index, negIndex) ::= <<
-<if(negIndex)>
-@<scope>_stack[ -<negIndex> ].<attr.name>
-<else>
-<if(index)>
-@<scope>_stack[ <index> ].<attr.name>
-<else>
-@<scope>_stack.last.<attr.name>
-<endif>
-<endif>
->>
-
-
-scopeSetAttributeRef(scope, attr, expr, index, negIndex) ::= <<
-<if(negIndex)>
-@<scope>_stack[ -<negIndex> ].<attr.name> = <expr>
-<else>
-<if(index)>
-@<scope>_stack[ <index> ].<attr.name> = <expr>
-<else>
-@<scope>_stack.last.<attr.name> = <expr>
-<endif>
-<endif>
->>
-
-
-/** $x is either global scope or x is rule with dynamic scope;
-  * refers to stack itself not top of stack.  This is useful for
-  * predicates like {$function.size()>0 &&
-  * $function::name.equals("foo")}?
-  */
-isolatedDynamicScopeRef(scope) ::= "@<scope>_stack"
-
-/** reference an attribute of rule; might only have single
-  * return value
-  */
-ruleLabelRef(referencedRule, scope, attr) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-( <scope; format="label">.nil? ? nil : <scope; format="label">.<attr.name> )
-<else>
-<scope; format="label">
-<endif>
->>
-
-returnAttributeRef(ruleDescriptor, attr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-return_value.<attr.name>
-<else>
-<attr.name>
-<endif>
->>
-
-returnSetAttributeRef(ruleDescriptor, attr, expr) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-return_value.<attr.name> = <expr>
-<else>
-<attr.name> = <expr>
-<endif>
->>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label; format=\"label\">"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_of_<label; format=\"label\">"
-
-tokenLabelPropertyRef_text(scope, attr) ::= "<scope; format=\"label\">.text"
-tokenLabelPropertyRef_type(scope, attr) ::= "<scope; format=\"label\">.type"
-tokenLabelPropertyRef_line(scope, attr) ::= "<scope; format=\"label\">.line"
-tokenLabelPropertyRef_pos(scope, attr) ::= "<scope; format=\"label\">.column"
-tokenLabelPropertyRef_channel(scope, attr) ::= "<scope; format=\"label\">.channel"
-tokenLabelPropertyRef_index(scope, attr) ::= "<scope; format=\"label\">.index"
-tokenLabelPropertyRef_tree(scope, attr) ::= "tree_for_<scope>"
-
-ruleLabelPropertyRef_start(scope, attr) ::= "<scope; format=\"label\">.start"
-ruleLabelPropertyRef_stop(scope, attr) ::= "<scope; format=\"label\">.stop"
-ruleLabelPropertyRef_tree(scope, attr) ::= "<scope; format=\"label\">.tree"
-
-ruleLabelPropertyRef_text(scope, attr) ::= <<
-<if(TREE_PARSER)>
-(
-  @input.token_stream.to_s(
-    @input.tree_adaptor.token_start_index( <scope; format="label">.start ),
-    @input.tree_adaptor.token_stop_index( <scope; format="label">.start )
-  ) if <scope; format="label">
-)
-<else>
-( <scope; format="label"> && @input.to_s( <scope; format="label">.start, <scope; format="label">.stop ) )
-<endif>
->>
-ruleLabelPropertyRef_st(scope, attr) ::= "( <scope; format=\"label\"> && <scope; format=\"label\">.template )"
-
-/******************************************************************************
- *****************  L E X E R - O N L Y   T E M P L A T E S  ******************
- ******************************************************************************/
-
-lexerSynpred(name) ::= ""
-
-lexer(grammar, name, tokens, scopes, rules, numRules, labelType="ANTLR3::Token", filterMode, superClass="ANTLR3::Lexer") ::= <<
-<if(grammar.grammarIsRoot)><autoloadDelegates()><endif>
-
-class <if(grammar.delegator)><grammar.name><else>Lexer<endif> \< <superClass>
-  @grammar_home = <grammar.name>
-<if(!grammar.grammarIsRoot)>
-  <autoloadDelegates()><\n>
-<endif>
-  include TokenData
-<if(filterMode)>
-  include ANTLR3::FilterMode<\n>
-<endif>
-  <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeClass()><\n><endif>}>
-
-  begin
-    generated_using( "<fileName>", "<ANTLRVersion>", "<runtimeLibraryVersion()>" )
-  rescue NoMethodError => error
-    # ignore
-  end
-
-  RULE_NAMES   = [ <trunc(rules):{r|"<r.ruleName>"}; separator=", ", wrap="\n", anchor> ].freeze
-  RULE_METHODS = [ <trunc(rules):{r|:<r.ruleName; format="lexerRule">}; separator=", ", wrap="\n", anchor> ].freeze
-
-<if(grammar.delegators)>
-  masters( <grammar.delegators:{d|:<d.name>}; separator=", "> )<\n>
-<endif>
-<if(grammar.directDelegates)>
-  imports( <grammar.directDelegates:{d|:<d.name>}; separator=", "> )<\n>
-<endif>
-
-  def initialize( <grammar.delegators:{g|<g:delegateName()>, }>input=nil, options = {} )
-    super( input, options )
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-    @state.rule_memory = {}<\n>
-<endif>
-<endif>
-    <grammar.delegators:{g|@<g:delegateName()> = <g:delegateName()><\n>}><grammar.directDelegates:{g|@<g:delegateName()> = <newDelegate(g)><\n>}><last(grammar.delegators):{g|@parent = @<g:delegateName()><\n>}><placeAction(scope="lexer",name="init")>
-  end
-
-  <placeAction(scope="lexer",name="members")>
-
-  # - - - - - - - - - - - lexer rules - - - - - - - - - - - -
-  <rules:{it | <it><\n>}>
-<if(grammar.delegatedRules)>
-
-  # - - - - - - - - - - delegated rules - - - - - - - - - - -
-  <grammar.delegatedRules:{ruleDescriptor|<delegateLexerRule(ruleDescriptor)><\n><\n>}>
-<endif>
-<if(cyclicDFAs)>
-
-  # - - - - - - - - - - DFA definitions - - - - - - - - - - -
-  <cyclicDFAs:cyclicDFA()>
-
-  private
-
-  def initialize_dfas
-    super rescue nil
-    <cyclicDFAs:cyclicDFAInit()>
-  end
-
-<endif>
-end # class <if(grammar.delegator)><grammar.name><else>Lexer<endif> \< <superClass>
-<if(!actions.(actionScope).main)>
-
-at_exit { <if(grammar.delegator)><grammar.name><else>Lexer<endif>.main( ARGV ) } if __FILE__ == $0
-<endif>
->>
-
-
-lexerRuleLabelDefs() ::= <<
-<if([ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,ruleDescriptor.ruleLabels,ruleDescriptor.charLabels,ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels])>
-# - - - - label initialization - - - -
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,ruleDescriptor.ruleLabels,ruleDescriptor.charLabels]:{it | <it.label.text; format="label"> = nil<\n>}>
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{it | list_of_<it.label.text; format="label"> = [] unless defined?(list_of_<it.label.text; format="label">)<\n>}>
-<endif>
->>
-
-
-/** How to generate a rule in the lexer; naked blocks are used
-  * for fragment rules.
-  */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-# lexer rule <ruleName; format="lexerRule"> (<ruleName>)
-# (in <fileName>)
-def <ruleName; format="lexerRule"><if(ruleDescriptor.parameterScope)>( <ruleDescriptor.parameterScope:parameterScope()> )<endif>
-  <traceIn()><ruleScopeSetUp()><ruleDeclarations()><if(memoize)>
-<if(backtracking)>
-
-  # backtracking success
-  success = false<\n>
-<endif>
-<endif>
-<if(nakedBlock)>
-  <ruleMemoization({<ruleName; format="lexerRule">})><lexerRuleLabelDefs()><action(name="init", code=ruleDescriptor.actions.init)>
-
-  # - - - - main rule block - - - -
-  <block>
-<else>
-
-  type = <ruleName>
-  channel = ANTLR3::DEFAULT_CHANNEL
-  <ruleMemoization(ruleName)><lexerRuleLabelDefs()><action(name="init", code=ruleDescriptor.actions.init)>
-
-  # - - - - main rule block - - - -
-  <block>
-  <ruleCleanUp()>
-
-  @state.type = type
-  @state.channel = channel
-<(ruleDescriptor.actions.after):execAction()>
-<endif>
-<if(memoize)><if(backtracking)>
-  success = false<\n>
-<endif><endif>
-ensure
-  <traceOut()><ruleScopeCleanUp()><memoize()>
-end
-<! <if(ruleDescriptor.modifier)>
-
-<ruleDescriptor.modifier> :<ruleName; format="lexerRule"><\n>
-<endif> !>
->>
-
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label; format=\"label\">"
-lexerRuleLabelPropertyRef_line(scope, attr) ::= "<scope; format=\"label\">.line"
-lexerRuleLabelPropertyRef_type(scope, attr) ::= "<scope; format=\"label\">.type"
-lexerRuleLabelPropertyRef_pos(scope, attr) ::= "<scope; format=\"label\">.column"
-lexerRuleLabelPropertyRef_channel(scope, attr) ::= "<scope; format=\"label\">.channel"
-lexerRuleLabelPropertyRef_index(scope, attr) ::= "<scope; format=\"label\">.index"
-lexerRuleLabelPropertyRef_text(scope, attr) ::= "<scope; format=\"label\">.text"
-
-
-/** How to generate code for the implicitly-defined lexer
-  * grammar rule that chooses between lexer rules.
-  */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-# main rule used to study the input at the current position,
-# and choose the proper lexer rule to call in order to
-# fetch the next token
-#
-# usually, you don't make direct calls to this method,
-# but instead use the next_token method, which will
-# build and emit the actual next token
-def <ruleName; format="lexerRule">
-  <block>
-end
->>
-
-lexerRulePropertyRef_text(scope, attr) ::= "self.text"
-lexerRulePropertyRef_type(scope, attr) ::= "type"
-lexerRulePropertyRef_line(scope, attr) ::= "@state.token_start_line"
-lexerRulePropertyRef_pos(scope, attr)  ::= "@state.token_start_column"
-
-/** Undefined, but present for consistency with Token
-  * attributes; set to -1
-  */
-lexerRulePropertyRef_index(scope, attr) ::= "-1"
-lexerRulePropertyRef_channel(scope, attr) ::= "channel"
-lexerRulePropertyRef_start(scope, attr) ::= "@state.token_start_position"
-lexerRulePropertyRef_stop(scope, attr) ::= "( self.character_index - 1 )"
-
-/** A lexer rule reference */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
-<if(label)>
-<label; format="label">_start_<elementIndex> = self.character_index
-<methodCall(n={<rule.name; format="lexerRule">},del=scope,args=args)>
-<label; format="label"> = create_token do |t|
-  t.input   = @input
-  t.type    = ANTLR3::INVALID_TOKEN_TYPE
-  t.channel = ANTLR3::DEFAULT_CHANNEL
-  t.start   = <label; format="label">_start_<elementIndex>
-  t.stop    = self.character_index - 1
-end
-<else>
-<methodCall(n={<rule.name; format="lexerRule">}, del=scope, args=args)>
-<endif>
->>
-
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(...)>
-<addToList(elem={<label; format="label">},...)>
->>
-
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label; format="label"> = @input.peek<\n>
-<endif>
-match_any
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<addToList(elem={<label; format="label">},...)>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label; format="label"> = @input.peek<\n>
-<endif>
-match( <char> )
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label; format="label"> = @input.peek<\n>
-<endif>
-match_range( <a>, <b> )
->>
-
-filteringNextToken() ::= ""
-filteringActionGate() ::= "@state.backtracking == 1"
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex) ::= <<
-<if(label)>
-<label; format="label">_start = self.character_index
-match( <string> )
-<label; format="label"> = create_token do |t|
-  t.input   = @input
-  t.type    = ANTLR3::INVALID_TOKEN_TYPE
-  t.channel = ANTLR3::DEFAULT_CHANNEL
-  t.start   = <label; format="label">_start
-  t.stop    = character_index - 1
-end
-<else>
-match( <string> )
-<endif>
->>
-
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-<label; format="label">_start_<elementIndex> = character_index
-match( ANTLR3::EOF )
-<label; format="label"> = create_token do |t|
-  t.input   = @input
-  t.type    = ANTLR3::INVALID_TOKEN_TYPE
-  t.channel = ANTLR3::DEFAULT_CHANNEL
-  t.start   = <label; format="label">_start_<elementIndex>
-  t.stop    = character_index - 1
-end<\n>
-<else>
-match( ANTLR3::EOF )<\n>
-<endif>
->>
-
-// used for left-recursive rules
-recRuleDefArg()                       ::= "int <recRuleArg()>"
-recRuleArg()                          ::= "_p"
-recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
-recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
-recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
-
-/** $start in parser rule */
-rulePropertyRef_start(scope, attr) ::= "return_value.start"
-
-/** $stop in parser rule */
-rulePropertyRef_stop(scope, attr) ::= "return_value.stop"
-
-/** $tree in parser rule */
-rulePropertyRef_tree(scope, attr) ::= "return_value.tree"
-
-/** $text in parser rule */
-rulePropertyRef_text(scope, attr) ::= "@input.to_s( return_value.start, @input.look( -1 ) )"
-
-/** $template in parser rule */
-rulePropertyRef_st(scope, attr) ::= "return_value.template"
-
-ruleSetPropertyRef_tree(scope, attr, expr) ::= "return_value.tree = <expr>"
-
-ruleSetPropertyRef_st(scope, attr, expr) ::= "return_value.template = <expr>"
-
-/** How to execute an action */
-execAction(action) ::= <<
-<if(backtracking)>
-# syntactic predicate action gate test
-if <actions.(actionScope).synpredgate>
-  # --> action
-  <action>
-  # \<-- action
-end
-<else>
-# --> action
-<action>
-# \<-- action
-<endif>
->>
-
-codeFileExtension() ::= ".rb"
-
-true()  ::= "true"
-false() ::= "false"
-
-action(name, code) ::= <<
-<if(code)>
-# - - - - @<name> action - - - -
-<code><\n>
-<endif>
->>
-
-autoloadDelegates() ::= <<
-<if(grammar.directDelegates)>
-<grammar.directDelegates:{it | autoload :<it.name>, "<it.recognizerName>"<\n>}>
-<endif>
->>
-
-delegateLexerRule(ruleDescriptor) ::= <<
-# delegated lexer rule <ruleDescriptor.name; format="lexerRule"> (<ruleDescriptor.name> in the grammar)
-def <ruleDescriptor.name; format="lexerRule"><if(ruleDescriptor.parameterScope)>( <ruleDescriptor.parameterScope:parameterScope()> )<endif>
-  <methodCall(del=ruleDescriptor.grammar, n={<ruleDescriptor.name; format="lexerRule">}, args=ruleDescriptor.parameterScope.attributes)>
-end
->>
-
-rootClassName() ::= <<
-<if(grammar.grammarIsRoot)><grammar.name><else><grammar.composite.rootGrammar.name><endif>::<if(TREE_PARSER)>TreeParser<elseif(PARSER)>Parser<else>Lexer<endif>
->>
-
-grammarClassName() ::= <<
-<gram.name>::<if(TREE_PARSER)>TreeParser<elseif(PARSER)>Parser<else>Lexer<endif>
->>
-
-newDelegate(gram) ::= <<
-<gram.name>.new( <trunc(gram.delegators):{p|<p:delegateName()>, }>
-  self, @input, :state => @state<@delegateOptions()>
-)
->>
-
-placeAction(scope, name) ::= <<
-<if(actions.(scope).(name))>
-# - - - - - - begin action @<scope>::<name> - - - - - -
-<if(fileName)># <fileName><\n><endif>
-<actions.(scope).(name)>
-# - - - - - - end action @<scope>::<name> - - - - - - -<\n>
-<endif>
->>
-
-runtimeLibraryVersion() ::= "1.8.1"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Scala/Scala.stg b/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Scala/Scala.stg
deleted file mode 100644
index cd3f92e..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Scala/Scala.stg
+++ /dev/null
@@ -1,1385 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2010 Matthew Lloyd
- http://linkedin.com/in/matthewl
-
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-scalaTypeInitMap ::= [
-	"Int":"0",
-	"Long":"0",
-	"Float":"0.0f",
-	"Double":"0.0",
-	"Boolean":"false",
-	"Byte":"0",
-	"Short":"0",
-	"Char":"0",
-	default:"null" // anything other than an atomic type
-]
-
-/** The overall file structure of a recognizer; stores methods for rules
- *  and cyclic DFAs plus support code.
- */
-outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
-           docComment, recognizer,
-           name, tokens, tokenNames, rules, cyclicDFAs,
-	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
-	   backtracking, synpreds, memoize, numRules,
-	   fileName, ANTLRVersion, generatedTimestamp, trace,
-	   scopes, superClass, literals) ::=
-<<
-// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
-<actions.(actionScope).header>
-
-<@imports>
-import org.antlr.runtime._
-<if(TREE_PARSER)>
-import org.antlr.runtime.tree._
-<endif>
-<@end>
-
-<docComment>
-<recognizer>
->>
-
-lexer(grammar, name, tokens, scopes, rules, numRules, filterMode, labelType="CommonToken",
-      superClass="Lexer") ::= <<
-object <grammar.recognizerName> {
-    <tokens:{it | val <it.name> = <it.type>}; separator="\n">
-
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-}
-
-class <grammar.recognizerName>(input: CharStream, state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>: RecognizerSharedState) extends <@superClassName><superClass><@end>(input, state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
-    import <grammar.recognizerName>._
-    <actions.lexer.members>
-
-    // delegates
-    <grammar.delegates:
-         {g|<g.recognizerName> <g:delegateName()>}; separator="\n">
-    // delegators
-    <grammar.delegators:
-         {g|<g.recognizerName> <g:delegateName()>}; separator="\n">
-    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
-
-    <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScope()><endif>}>
-
-    def this(input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>: CharStream) =
-        this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
-
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-        state.ruleMemo = new Array[java.util.Map[_,_]](<numRules>+1)<\n> <! index from 1..n !>
-<endif>
-<endif>
-        <grammar.directDelegates:
-         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this)}; separator="\n">
-        <grammar.delegators:
-         {g|this.<g:delegateName()> = <g:delegateName()>}; separator="\n">
-        <last(grammar.delegators):{g|gParent = <g:delegateName()>}>
-
-    override def getGrammarFileName = "<fileName>"
-
-<if(filterMode)>
-    <filteringNextToken()>
-<endif>
-    <rules; separator="\n\n">
-
-    <synpreds:{p | <lexerSynpred(p)>}>
-    <cyclicDFAs:{dfa | private val dfa<dfa.decisionNumber> = new <grammar.recognizerName>.DFA<dfa.decisionNumber>(this)}; separator="\n">
-}
->>
-
-/** A override of Lexer.nextToken() that backtracks over mTokens() looking
- *  for matches.  No error can be generated upon error; just rewind, consume
- *  a token and then try again.  backtracking needs to be set as well.
- *  Make rule memoization happen only at levels above 1 as we start mTokens
- *  at backtracking==1.
- */
-filteringNextToken() ::= <<
-override def nextToken(): Token = {
-    while (true) {
-        if ( input.LA(1)==CharStream.EOF ) {
-            var eof: Token = new CommonToken((CharStream)input,Token.EOF,
-                                        Token.DEFAULT_CHANNEL,
-                                        input.index(),input.index())
-            eof.setLine(getLine())
-            eof.setCharPositionInLine(getCharPositionInLine())
-            return eof
-        }
-        state.token = null
-	state.channel = Token.DEFAULT_CHANNEL
-        state.tokenStartCharIndex = input.index()
-        state.tokenStartCharPositionInLine = input.getCharPositionInLine()
-        state.tokenStartLine = input.getLine()
-	state.text = null
-        try {
-            val m = input.mark()
-            state.backtracking=1 <! means we won't throw slow exception !>
-            state.failed=false
-            mTokens()
-            state.backtracking=0
-            <! mTokens backtracks with synpred at backtracking==2
-               and we set the synpredgate to allow actions at level 1. !>
-            if ( state.failed ) {
-                input.rewind(m)
-                input.consume() <! advance one char and try again !>
-            }
-            else {
-                emit()
-                return state.token
-            }
-        }
-        catch {
-            case re: RecognitionException =>
-            // shouldn't happen in backtracking mode, but...
-            reportError(re)
-            recover(re)
-        }
-    }
-}
-
-override def memoize(input: IntStream,
-		ruleIndex: Int,
-		ruleStartIndex: Int) = {
-if ( state.backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex)
-}
-
-override def alreadyParsedRule(input: IntStream, ruleIndex: Int):Boolean {
-if ( state.backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex)
-return false
-}
->>
-
-actionGate() ::= "state.backtracking==0"
-
-filteringActionGate() ::= "state.backtracking==1"
-
-/** How to generate a parser */
-genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
-              bitsets, inputStreamType, superClass,
-              labelType, members, rewriteElementType,
-              filterMode, ASTLabelType="Object") ::= <<
-object <grammar.recognizerName> {
-<if(grammar.grammarIsRoot)>
-    val tokenNames = Array(
-        "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
-    )<\n>
-<endif>
-
-    <tokens:{it | val <it.name> = <it.type>}; separator="\n">
-
-    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
-
-    <bitsets:{it | <bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
-                    words64=it.bits)>}>
-}
-
-class <grammar.recognizerName>(input: <inputStreamType>, state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>: RecognizerSharedState) extends <@superClassName><superClass><@end>(input, state) {
-    import <grammar.recognizerName>._
-    // delegates
-    <grammar.delegates:
-         {g|public <g.recognizerName> <g:delegateName()>}; separator="\n">
-    // delegators
-    <grammar.delegators:
-         {g|public <g.recognizerName> <g:delegateName()>}; separator="\n">
-    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
-
-    <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScope()><endif>}>
-
-    <@members>
-    <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
-    def this(input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>: <inputStreamType>) =
-        this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
-
-        <parserCtorBody()>
-        <grammar.directDelegates:
-         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this)}; separator="\n">
-        <grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>}; separator="\n">
-        <last(grammar.delegators):{g|gParent = <g:delegateName()>}>
-    <@end>
-
-    override def getTokenNames: Array[String] = tokenNames
-    override def getGrammarFileName = "<fileName>"
-
-    <members>
-
-    <rules; separator="\n\n">
-
-<! generate rule/method definitions for imported rules so they
-   appear to be defined in this recognizer. !>
-    // Delegated rules
-<grammar.delegatedRules:{ruleDescriptor|
-    @throws(classOf[RecognitionException])
-    def <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope()>): <returnType()> = \{ <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">) \}}; separator="\n">
-
-    <synpreds:{p | <synpred(p)>}>
-
-    <cyclicDFAs:{dfa | private val dfa<dfa.decisionNumber> = new <grammar.recognizerName>.DFA<dfa.decisionNumber>(this)}; separator="\n">
-}
->>
-
-parserCtorBody() ::= <<
-<if(memoize)>
-<if(grammar.grammarIsRoot)>
-this.state.ruleMemo = new Array[java.util.Map[_,_]](<length(grammar.allImportedRules)>+1)<\n> <! index from 1..n !>
-<endif>
-<endif>
-<grammar.delegators:
- {g|this.<g:delegateName()> = <g:delegateName()>}; separator="\n">
->>
-
-parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
-       ASTLabelType="Object", superClass="Parser", labelType="Token",
-       members={<actions.parser.members>}) ::= <<
-<genericParser(inputStreamType="TokenStream", rewriteElementType="Token", ...)>
->>
-
-/** How to generate a tree parser; same as parser except the input
- *  stream is a different type.
- */
-treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
-           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="Object",
-           superClass={<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif>},
-           members={<actions.treeparser.members>}
-           ) ::= <<
-<genericParser(inputStreamType="TreeNodeStream", rewriteElementType="Node", ...)>
->>
-
-/** A simpler version of a rule template that is specific to the imaginary
- *  rules created for syntactic predicates.  As they never have return values
- *  nor parameters etc..., just give simplest possible method.  Don't do
- *  any of the normal memoization stuff in here either; it's a waste.
- *  As predicates cannot be inlined into the invoking rule, they need to
- *  be in a rule by themselves.
- */
-synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
-<<
-// $ANTLR start <ruleName>
-@throws(classOf[RecognitionException])
-def <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope()>): Unit = {
-    <ruleLabelDefs()>
-<if(trace)>
-    traceIn("<ruleName>_fragment", <ruleDescriptor.index>)
-    try {
-        <block>
-    }
-    finally {
-        traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
-    }
-<else>
-    <block>
-<endif>
-}
-// $ANTLR end <ruleName>
->>
-
-synpred(name) ::= <<
-final def <name>(): Boolean = {
-    state.backtracking+=1
-    <@start()>
-    val start = input.mark()
-    try {
-        <name>_fragment() // can never throw exception
-    } catch {
-        case re: RecognitionException =>
-        System.err.println("impossible: "+re)
-    }
-    val success = !state.failed
-    input.rewind(start)
-    <@stop()>
-    state.backtracking-=1
-    state.failed=false
-    success
-}<\n>
->>
-
-lexerSynpred(name) ::= <<
-<synpred(name)>
->>
-
-ruleMemoization(name) ::= <<
-<if(memoize)>
-if ( state.backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()> }
-<endif>
->>
-
-/** How to test for failure and return from rule */
-checkRuleBacktrackFailure() ::= <<
-<if(backtracking)>if (state.failed) return <ruleReturnValue()><endif>
->>
-
-/** This rule has failed, exit indicating failure during backtrack */
-ruleBacktrackFailure() ::= <<
-<if(backtracking)>if (state.backtracking>0) {state.failed=true; return <ruleReturnValue()>}<endif>
->>
-
-/** How to generate code for a rule.  This includes any return type
- *  data aggregates required for multiple return values.
- */
-rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
-<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
-<returnScope(scope=ruleDescriptor.returnScope)>
-
-// $ANTLR start "<ruleName>"
-// <fileName>:<description>
-@throws(classOf[RecognitionException])
-final def <ruleName>(<ruleDescriptor.parameterScope:parameterScope()>): <returnType()> = {
-    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>)<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    <ruleLabelDefs()>
-    <ruleDescriptor.actions.init>
-    <@preamble()>
-    try {
-        <ruleMemoization(name=ruleName)>
-        <block>
-        <ruleCleanUp()>
-        <(ruleDescriptor.actions.after):execAction()>
-    }
-<if(exceptions)>
-    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
-<else>
-<if(!emptyRule)>
-<if(actions.(actionScope).rulecatch)>
-    <actions.(actionScope).rulecatch>
-<else>
-    catch {
-        case re: RecognitionException =>
-        reportError(re)
-        recover(input,re)
-	<@setErrorReturnValue()>
-    }<\n>
-<endif>
-<endif>
-<endif>
-    finally {
-        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
-        <memoize()>
-        <ruleScopeCleanUp()>
-        <finally>
-    }
-    <@postamble()>
-    return <ruleReturnValue()>
-}
-// $ANTLR end "<ruleName>"
->>
-
-catch(decl,action) ::= <<
-catch (<e.decl>) {
-    <e.action>
-}
->>
-
-ruleDeclarations() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-val retval = new <returnType()>()
-retval.start = input.LT(1)<\n>
-<else>
-<ruleDescriptor.returnScope.attributes:{ a |
-var <a.name>: <a.type> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>
-}>
-<endif>
-<if(memoize)>
-val <ruleDescriptor.name>_StartIndex = input.index()
-<endif>
->>
-
-ruleScopeSetUp() ::= <<
-<ruleDescriptor.useScopes:{it | <it>_stack.push(new <it>_scope())}; separator="\n">
-<ruleDescriptor.ruleScope:{it | <it.name>_stack.push(new <it.name>_scope())}; separator="\n">
->>
-
-ruleScopeCleanUp() ::= <<
-<ruleDescriptor.useScopes:{it | <it>_stack.pop()}; separator="\n">
-<ruleDescriptor.ruleScope:{it | <it.name>_stack.pop()}; separator="\n">
->>
-
-
-ruleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
-  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{it | var <it.label.text>: <labelType> = null}; separator="\n"
->
-<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
-    :{it | var list_<it.label.text>: java.util.List=null}; separator="\n"
->
-<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
-<ruleDescriptor.ruleListLabels:{ll|var <ll.label.text>: RuleReturnScope = null}; separator="\n">
->>
-
-lexerRuleLabelDefs() ::= <<
-<[ruleDescriptor.tokenLabels,
-  ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleLabels]
-    :{it | var <it.label.text>: <labelType>=null}; separator="\n"
->
-<ruleDescriptor.charLabels:{it | int <it.label.text>;}; separator="\n">
-<[ruleDescriptor.tokenListLabels,
-  ruleDescriptor.ruleListLabels]
-    :{it | var list_<it.label.text>: java.util.List=null}; separator="\n"
->
->>
-
-ruleReturnValue() ::= <<
-<if(!ruleDescriptor.isSynPred)>
-<if(ruleDescriptor.hasReturnValue)>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnName>
-<else>
-retval
-<endif>
-<endif>
-<endif>
->>
-
-ruleCleanUp() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<if(!TREE_PARSER)>
-retval.stop = input.LT(-1)<\n>
-<endif>
-<endif>
->>
-
-memoize() ::= <<
-<if(memoize)>
-<if(backtracking)>
-if ( state.backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex) }
-<endif>
-<endif>
->>
-
-/** How to generate a rule in the lexer; naked blocks are used for
- *  fragment rules.
- */
-lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
-// $ANTLR start "<ruleName>"
-@throws(classOf[RecognitionException])
-final def m<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>): Unit = {
-    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>)<endif>
-    <ruleScopeSetUp()>
-    <ruleDeclarations()>
-    try {
-<if(nakedBlock)>
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        try <block><\n>
-<else>
-        var _type = <ruleName>
-        var _channel = BaseRecognizer.DEFAULT_TOKEN_CHANNEL
-        <ruleMemoization(name=ruleName)>
-        <lexerRuleLabelDefs()>
-        <ruleDescriptor.actions.init>
-        try <block>
-        <ruleCleanUp()>
-        state.`type` = _type
-        state.channel = _channel
-        <(ruleDescriptor.actions.after):execAction()>
-<endif>
-    }
-    finally {
-        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>)<endif>
-        <ruleScopeCleanUp()>
-        <memoize()>
-    }
-}
-// $ANTLR end "<ruleName>"
->>
-
-/** How to generate code for the implicitly-defined lexer grammar rule
- *  that chooses between lexer rules.
- */
-tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
-@throws(classOf[RecognitionException])
-def mTokens(): Unit = {
-    <block><\n>
-}
->>
-
-// S U B R U L E S
-
-/** A (...) subrule with multiple alternatives */
-block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-var alt<decisionNumber> = <maxAlt>
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-<@prebranch()>
-alt<decisionNumber> match {
-    <alts:{a | <altSwitchCase(i,a)>}>
-    case _ =>
-}
-<@postbranch()>
->>
-
-/** A rule block with multiple alternatives */
-ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-var alt<decisionNumber> = <maxAlt>
-<decls>
-<@predecision()>
-<decision>
-<@postdecision()>
-alt<decisionNumber> match {
-    <alts:{a | <altSwitchCase(i,a)>}>
-    case _ =>
-}
->>
-
-ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A special case of a (...) subrule with a single alternative */
-blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@prealt()>
-<alts>
-<@postalt()>
->>
-
-/** A (..)+ block with 1 or more alternatives */
-positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-var cnt<decisionNumber>: Int = 0
-<decls>
-<@preloop()>
-var loop<decisionNumber>_quitflag = false
-while (!loop<decisionNumber>_quitflag) {
-    var alt<decisionNumber>:Int = <maxAlt>
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    alt<decisionNumber> match {
-        <alts:{a | <altSwitchCase(i,a)>}>
-	case _ =>
-	    if ( cnt<decisionNumber> >= 1 ) loop<decisionNumber>_quitflag = true
-	    else {
-	    <ruleBacktrackFailure()>
-            val eee = new EarlyExitException(<decisionNumber>, input)
-            <@earlyExitException()>
-            throw eee
-      }
-    }
-    cnt<decisionNumber>+=1
-}
-<@postloop()>
->>
-
-positiveClosureBlockSingleAlt ::= positiveClosureBlock
-
-/** A (..)* block with 1 or more alternatives */
-closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
-// <fileName>:<description>
-<decls>
-<@preloop()>
-var loop<decisionNumber>_quitflag = false
-while (!loop<decisionNumber>_quitflag) {
-    var alt<decisionNumber>:Int = <maxAlt>
-    <@predecision()>
-    <decision>
-    <@postdecision()>
-    alt<decisionNumber> match {
-        <alts:{a | <altSwitchCase(i,a)>}>
-	case _ => loop<decisionNumber>_quitflag = true
-    }
-}
-<@postloop()>
->>
-
-closureBlockSingleAlt ::= closureBlock
-
-/** Optional blocks (x)? are translated to (x|) by before code generation
- *  so we can just use the normal block template
- */
-optionalBlock ::= block
-
-optionalBlockSingleAlt ::= block
-
-/** A case in a switch that jumps to an alternative given the alternative
- *  number.  A DFA predicts the alternative and then a simple switch
- *  does the jump to the code that actually matches that alternative.
- */
-altSwitchCase(altNum, alt) ::= <<
-case <altNum> =>
-    <@prealt()>
-    <alt>
->>
-
-/** An alternative is just a list of elements; at outermost level */
-alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
-// <fileName>:<description>
-{
-<@declarations()>
-<elements:element()>
-<rew>
-<@cleanup()>
-}
->>
-
-/** What to emit when there is no rewrite.  For auto build
- *  mode, does nothing.
- */
-noRewrite(rewriteBlockLevel, treeLevel) ::= ""
-
-// E L E M E N T S
-
-/** Dump the elements one per line */
-element(e) ::= <<
-<@prematch()>
-<e.el><\n>
->>
-
-/** match a token optionally with a label in front */
-tokenRef(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)><label>=<endif>`match`(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>)<if(label)>.asInstanceOf[<labelType>]<endif>
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=ID */
-tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<tokenRef(...)>
-<listLabel(elem=label,...)>
->>
-
-listLabel(label,elem) ::= <<
-if (list_<label>==null) list_<label>=new java.util.ArrayList()
-list_<label>.add(<elem>)<\n>
->>
-
-/** match a character */
-charRef(char,label) ::= <<
-<if(label)>
-<label> = input.LA(1)<\n>
-<endif>
-`match`(<char>)
-<checkRuleBacktrackFailure()>
->>
-
-/** match a character range */
-charRangeRef(a,b,label) ::= <<
-<if(label)>
-<label> = input.LA(1)<\n>
-<endif>
-matchRange(<a>,<b>); <checkRuleBacktrackFailure()>
->>
-
-/** For now, sets are interval tests and must be tested inline */
-matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= <<
-<if(label)>
-<if(LEXER)>
-<label>= input.LA(1)<\n>
-<else>
-<label>=input.LT(1).asInstanceOf[<labelType>]<\n>
-<endif>
-<endif>
-if ( <s> ) {
-    input.consume()
-    <postmatchCode>
-<if(!LEXER)>
-    state.errorRecovery=false<\n>
-<endif>
-    <if(backtracking)>state.failed=false<endif>
-}
-else {
-    <ruleBacktrackFailure()>
-    val mse = new MismatchedSetException(null,input)
-    <@mismatchedSetException()>
-<if(LEXER)>
-    recover(mse)
-    throw mse
-<else>
-    throw mse
-    <! use following code to make it recover inline; remove throw mse;
-    recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>)
-    !>
-<endif>
-}<\n>
->>
-
-matchRuleBlockSet ::= matchSet
-
-matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
-<matchSet(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a string literal */
-lexerStringRef(string,label,elementIndex="0") ::= <<
-<if(label)>
-val <label>Start = getCharIndex()
-`match`(<string>)
-<checkRuleBacktrackFailure()>
-val <label>StartLine<elementIndex> = getLine()
-val <label>StartCharPos<elementIndex> = getCharPositionInLine()
-<label> = new <labelType>(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, getCharIndex()-1)
-<label>.setLine(<label>StartLine<elementIndex>)
-<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
-<else>
-`match`(<string>)
-<checkRuleBacktrackFailure()><\n>
-<endif>
->>
-
-wildcard(token,label,elementIndex,terminalOptions) ::= <<
-<if(label)>
-<label>=input.LT(1).asInstanceOf[<labelType>]<\n>
-<endif>
-matchAny(input)
-<checkRuleBacktrackFailure()>
->>
-
-wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
-<wildcard(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match . wildcard in lexer */
-wildcardChar(label, elementIndex) ::= <<
-<if(label)>
-<label> = input.LA(1)<\n>
-<endif>
-matchAny()
-<checkRuleBacktrackFailure()>
->>
-
-wildcardCharListLabel(label, elementIndex) ::= <<
-<wildcardChar(...)>
-<listLabel(elem=label,...)>
->>
-
-/** Match a rule reference by invoking it possibly with arguments
- *  and a return value or values.  The 'rule' argument was the
- *  target rule name, but now is type Rule, whose toString is
- *  same: the rule name.  Now though you can access full rule
- *  descriptor stuff.
- */
-ruleRef(rule,label,elementIndex,args,scope) ::= <<
-pushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>)
-<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">)<\n>
-state._fsp-=1
-<checkRuleBacktrackFailure()>
->>
-
-/** ids+=r */
-ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
-<ruleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** A lexer rule reference.
- *
- *  The 'rule' argument was the target rule name, but now
- *  is type Rule, whose toString is same: the rule name.
- *  Now though you can access full rule descriptor stuff.
- */
-lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
-<if(label)>
-val <label>Start<elementIndex> = getCharIndex()
-val <label>StartLine<elementIndex> = getLine()
-val <label>StartCharPos<elementIndex> = getCharPositionInLine()
-<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
-<checkRuleBacktrackFailure()>
-<label> = new <labelType>(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1)
-<label>.setLine(<label>StartLine<elementIndex>)
-<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
-<else>
-<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
-<checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** i+=INT in lexer */
-lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
-<lexerRuleRef(...)>
-<listLabel(elem=label,...)>
->>
-
-/** EOF in the lexer */
-lexerMatchEOF(label,elementIndex) ::= <<
-<if(label)>
-val <label>Start<elementIndex> = getCharIndex()
-val <label>StartLine<elementIndex> = getLine()
-val <label>StartCharPos<elementIndex> = getCharPositionInLine()
-`match`(EOF)
-<checkRuleBacktrackFailure()>
-val <label> = new <labelType>(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1)
-<label>.setLine(<label>StartLine<elementIndex>)
-<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
-<else>
-`match`(EOF)
-<checkRuleBacktrackFailure()>
-<endif>
->>
-
-// used for left-recursive rules
-recRuleDefArg()                       ::= "int <recRuleArg()>"
-recRuleArg()                          ::= "_p"
-recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
-recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
-recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
-
-/** match ^(root children) in tree parser */
-tree(root, actionsAfterRoot, children, nullableChildList,
-     enclosingTreeLevel, treeLevel) ::= <<
-<root:element()>
-<actionsAfterRoot:element()>
-<if(nullableChildList)>
-if ( input.LA(1)==Token.DOWN ) {
-    `match`(input, Token.DOWN, null)
-    <checkRuleBacktrackFailure()>
-    <children:element()>
-    `match`(input, Token.UP, null)
-    <checkRuleBacktrackFailure()>
-}
-<else>
-`match`(input, Token.DOWN, null)
-<checkRuleBacktrackFailure()>
-<children:element()>
-`match`(input, Token.UP, null)
-<checkRuleBacktrackFailure()>
-<endif>
->>
-
-/** Every predicate is used as a validating predicate (even when it is
- *  also hoisted into a prediction expression).
- */
-validateSemanticPredicate(pred,description) ::= <<
-if ( !(<evalPredicate(...)>) ) {
-    <ruleBacktrackFailure()>
-    throw new FailedPredicateException(input, "<ruleName>", "<description>")
-}
->>
-
-// F i x e d  D F A  (if-then-else)
-
-dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-val LA<decisionNumber>_<stateNumber> = input.LA(<k>)<\n>
-<edges; separator="\nelse ">
-else {
-<if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>
-<else>
-    <ruleBacktrackFailure()>
-    val nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input)<\n>
-    <@noViableAltException()>
-    throw nvae<\n>
-<endif>
-}
->>
-
-/** Same as a normal DFA state except that we don't examine lookahead
- *  for the bypass alternative.  It delays error detection but this
- *  is faster, smaller, and more what people expect.  For (X)? people
- *  expect "if ( LA(1)==X ) match(X);" and that's it.
- */
-dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-val LA<decisionNumber>_<stateNumber> = input.LA(<k>)<\n>
-<edges; separator="\nelse ">
->>
-
-/** A DFA state that is actually the loopback decision of a closure
- *  loop.  If end-of-token (EOT) predicts any of the targets then it
- *  should act like a default clause (i.e., no error can be generated).
- *  This is used only in the lexer so that for ('a')* on the end of a rule
- *  anything other than 'a' predicts exiting.
- */
-dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-val LA<decisionNumber>_<stateNumber> = input.LA(<k>)<\n>
-<edges; separator="\nelse "><\n>
-<if(eotPredictsAlt)>
-<if(!edges)>
-alt<decisionNumber>=<eotPredictsAlt> <! if no edges, don't gen ELSE !>
-<else>
-else {
-    alt<decisionNumber>=<eotPredictsAlt>
-}<\n>
-<endif>
-<endif>
->>
-
-/** An accept state indicates a unique alternative has been predicted */
-dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>"
-
-/** A simple edge with an expression.  If the expression is satisfied,
- *  enter to the target state.  To handle gated productions, we may
- *  have to evaluate some predicates for this edge.
- */
-dfaEdge(labelExpr, targetState, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
-    <targetState>
-}
->>
-
-// F i x e d  D F A  (switch case)
-
-/** A DFA state where a SWITCH may be generated.  The code generator
- *  decides if this is possible: CodeGenerator.canGenerateSwitch().
- */
-dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-input.LA(<k>) match {
-<edges; separator="\n">
-case _ =>
-<if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>
-<else>
-    <ruleBacktrackFailure()>
-    val nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input)<\n>
-    <@noViableAltException()>
-    throw nvae<\n>
-<endif>
-}<\n>
->>
-
-dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-input.LA(<k>) match {
-    <edges; separator="\n">
-    case _ =>
-}<\n>
->>
-
-dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
-input.LA(<k>) match {
-<edges; separator="\n"><\n>
-case _ =>
-<if(eotPredictsAlt)>
-    alt<decisionNumber>=<eotPredictsAlt>;
-<endif>
-}<\n>
->>
-
-dfaEdgeSwitch(labels, targetState) ::= <<
-case <labels:{it | <it>}; separator=" | "> =>
-    {
-    <targetState>
-    }
->>
-
-// C y c l i c  D F A
-
-/** The code to initiate execution of a cyclic DFA; this is used
- *  in the rule to predict an alt just like the fixed DFA case.
- *  The <name> attribute is inherited via the parser, lexer, ...
- */
-dfaDecision(decisionNumber,description) ::= <<
-alt<decisionNumber> = dfa<decisionNumber>.predict(input)
->>
-
-/* Dump DFA tables as run-length-encoded Strings of octal values.
- * Can't use hex as compiler translates them before compilation.
- * These strings are split into multiple, concatenated strings.
- * Java puts them back together at compile time thankfully.
- * Java cannot handle large static arrays, so we're stuck with this
- * encode/decode approach.  See analysis and runtime DFA for
- * the encoding methods.
- */
-cyclicDFA(dfa) ::= <<
-val DFA<dfa.decisionNumber>_eotS =
-    "<dfa.javaCompressedEOT; wrap="\"+\n    \"">"
-val DFA<dfa.decisionNumber>_eofS =
-    "<dfa.javaCompressedEOF; wrap="\"+\n    \"">"
-val DFA<dfa.decisionNumber>_minS =
-    "<dfa.javaCompressedMin; wrap="\"+\n    \"">"
-val DFA<dfa.decisionNumber>_maxS =
-    "<dfa.javaCompressedMax; wrap="\"+\n    \"">"
-val DFA<dfa.decisionNumber>_acceptS =
-    "<dfa.javaCompressedAccept; wrap="\"+\n    \"">"
-val DFA<dfa.decisionNumber>_specialS =
-    "<dfa.javaCompressedSpecial; wrap="\"+\n    \"">}>"
-val DFA<dfa.decisionNumber>_transitionS: Array[String] = Array(
-        <dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
-)
-
-val DFA<dfa.decisionNumber>_eot: Array[Short] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eotS)
-val DFA<dfa.decisionNumber>_eof: Array[Short] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eofS)
-val DFA<dfa.decisionNumber>_min: Array[Char] = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS)
-val DFA<dfa.decisionNumber>_max: Array[Char] = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS)
-val DFA<dfa.decisionNumber>_accept: Array[Short] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_acceptS)
-val DFA<dfa.decisionNumber>_special: Array[Short] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_specialS)
-val DFA<dfa.decisionNumber>_transition = new Array[Array[Short]](DFA<dfa.decisionNumber>_transitionS.length)
-
-for (i \<- DFA<dfa.decisionNumber>_transition.indices) {
-    DFA<dfa.decisionNumber>_transition(i) = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_transitionS(i))
-}
-
-class DFA<dfa.decisionNumber> extends DFA {
-
-    def this(recognizer: BaseRecognizer) = {
-        this()
-        this.recognizer = recognizer
-        this.decisionNumber = <dfa.decisionNumber>
-        this.eot = DFA<dfa.decisionNumber>_eot
-        this.eof = DFA<dfa.decisionNumber>_eof
-        this.min = DFA<dfa.decisionNumber>_min
-        this.max = DFA<dfa.decisionNumber>_max
-        this.accept = DFA<dfa.decisionNumber>_accept
-        this.special = DFA<dfa.decisionNumber>_special
-        this.transition = DFA<dfa.decisionNumber>_transition
-    }
-    override def getDescription = "<dfa.description>"
-    <@errorMethod()>
-<if(dfa.specialStateSTs)>
-    @throws(classOf[NoViableAltException])
-    override def specialStateTransition(s: Int, _input: IntStream):Int = {
-        <if(LEXER)>
-        val input = _input
-        <endif>
-        <if(PARSER)>
-        val input = _input.asInstanceOf[TokenStream]
-        <endif>
-        <if(TREE_PARSER)>
-        val input = _input.asInstanceOf[TreeNodeStream]
-        <endif>
-    	val _s = s
-        s match {
-        <dfa.specialStateSTs:{state |
-        case <i0> => <! compressed special state numbers 0..n-1 !>
-            <state>}; separator="\n">
-        case _ =>
-        }
-<if(backtracking)>
-        if (state.backtracking>0) {state.failed=true; return -1}<\n>
-<endif>
-        val nvae = new NoViableAltException(getDescription(), <dfa.decisionNumber>, _s, input)
-        error(nvae)
-        throw nvae
-    }<\n>
-<endif>
-}<\n>
->>
-
-/** A state in a cyclic DFA; it's a special state and part of a big switch on
- *  state.
- */
-cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
-val LA<decisionNumber>_<stateNumber>: Int = input.LA(1)<\n>
-<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
-val index<decisionNumber>_<stateNumber>: Int = input.index()
-input.rewind()<\n>
-<endif>
-s = -1
-<edges; separator="\nelse ">
-<if(semPredState)> <! return input cursor to state before we rewound !>
-input.seek(index<decisionNumber>_<stateNumber>)<\n>
-<endif>
-if ( s>=0 ) return s
->>
-
-/** Just like a fixed DFA edge, test the lookahead and indicate what
- *  state to jump to next if successful.
- */
-cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
-if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>}<\n>
->>
-
-/** An edge pointing at end-of-token; essentially matches any char;
- *  always jump to the target.
- */
-eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
-s = <targetStateNumber><\n>
->>
-
-
-// D F A  E X P R E S S I O N S
-
-andPredicates(left,right) ::= "(<left>&&<right>)"
-
-orPredicates(operands) ::= "(<first(operands)><rest(operands):{o | ||<o>}>)"
-
-notPredicate(pred) ::= "!(<evalPredicate(pred,\"\")>)"
-
-evalPredicate(pred,description) ::= "(<pred>)"
-
-evalSynPredicate(pred,description) ::= "<pred>()"
-
-lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
-
-/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
- *  somewhere.  Must ask for the lookahead directly.
- */
-isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
-
-lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
-(LA<decisionNumber>_<stateNumber> >= <lower> && LA<decisionNumber>_<stateNumber> \<= <upper>)
->>
-
-isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>) >=<lower> && input.LA(<k>) \<= <upper>)"
-
-setTest(ranges) ::= "<ranges; separator=\"||\">"
-
-// A T T R I B U T E S
-
-globalAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-class <scope.name>_scope {
-    <scope.attributes:{it | var <it.name>: <it.type> = _}; separator="\n">
-}
-val <scope.name>_stack = new collection.mutable.Stack[<scope.name>_scope]<\n>
-<endif>
->>
-
-ruleAttributeScope(scope) ::= <<
-<if(scope.attributes)>
-class <scope.name>_scope {
-    <scope.attributes:{it | var <it.name>: <it.type> = _}; separator="\n">
-}
-val <scope.name>_stack = new collection.mutable.Stack[<scope.name>_scope]<\n>
-<endif>
->>
-
-returnStructName(r) ::= "<r.name>_return"
-
-returnType() ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-<ruleDescriptor:returnStructName()>
-<else>
-<if(ruleDescriptor.hasSingleReturnValue)>
-<ruleDescriptor.singleValueReturnType>
-<else>
-Unit
-<endif>
-<endif>
->>
-
-/** Generate the Java type associated with a single or multiple return
- *  values.
- */
-ruleLabelType(referencedRule) ::= <<
-<if(referencedRule.hasMultipleReturnValues)>
-<referencedRule.name>_return
-<else>
-<if(referencedRule.hasSingleReturnValue)>
-<referencedRule.singleValueReturnType>
-<else>
-Unit
-<endif>
-<endif>
->>
-
-delegateName(d) ::= <<
-<if(d.label)><d.label><else>g<d.name><endif>
->>
-
-/** Using a type to init value map, try to init a type; if not in table
- *  must be an object, default value is "null".
- */
-initValue(typeName) ::= <<
-<scalaTypeInitMap.(typeName)>
->>
-
-/** Define a rule label including default value */
-ruleLabelDef(label) ::= <<
-var <label.label.text>: <ruleLabelType(referencedRule=label.referencedRule)> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))><\n>
->>
-
-/** Define a return struct for a rule if the code needs to access its
- *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
- *  subgroups to stick in members.
- *  TODO(matthewlloyd): make this static
- */
-returnScope(scope) ::= <<
-<if(ruleDescriptor.hasMultipleReturnValues)>
-final class <ruleDescriptor:returnStructName()> extends <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope {
-    <scope.attributes:{it | var <it.name>: <it.type> = _}; separator="\n">
-    <@ruleReturnMembers()>
-}
-<endif>
->>
-
-parameterScope(scope) ::= <<
-<scope.attributes:{it | <it.name>: <it.type>}; separator=", ">
->>
-
-parameterAttributeRef(attr) ::= "<attr.name>"
-parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>"
-
-scopeAttributeRef(scope,attr,index,negIndex) ::= <%
-<if(negIndex)>
-<scope>_stack(<scope>_stack.size-<negIndex>-1).<attr.name>
-<else>
-<if(index)>
-<scope>_stack(<index>).<attr.name>
-<else>
-<scope>_stack.top.<attr.name>
-<endif>
-<endif>
-%>
-
-scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
-<if(negIndex)>
-<scope>_stack(<scope>_stack.size-<negIndex>-1).<attr.name> = <expr>
-<else>
-<if(index)>
-<scope>_stack(<index>).<attr.name> = <expr>
-<else>
-<scope>_stack.top.<attr.name> = <expr>
-<endif>
-<endif>
-%>
-
-/** $x is either global scope or x is rule with dynamic scope; refers
- *  to stack itself not top of stack.  This is useful for predicates
- *  like {$function.size()>0 && $function::name.equals("foo")}?
- */
-isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
-
-/** reference an attribute of rule; might only have single return value */
-ruleLabelRef(referencedRule,scope,attr) ::= <%
-<if(referencedRule.hasMultipleReturnValues)>
-(if (<scope>!=null) <scope>.<attr.name> else <initValue(attr.type)>)
-<else>
-<scope>
-<endif>
-%>
-
-returnAttributeRef(ruleDescriptor,attr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name>
-<else>
-<attr.name>
-<endif>
-%>
-
-returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
-<if(ruleDescriptor.hasMultipleReturnValues)>
-retval.<attr.name> =<expr>
-<else>
-<attr.name> =<expr>
-<endif>
-%>
-
-/** How to translate $tokenLabel */
-tokenLabelRef(label) ::= "<label>"
-
-/** ids+=ID {$ids} or e+=expr {$e} */
-listLabelRef(label) ::= "list_<label>"
-
-
-// not sure the next are the right approach
-
-tokenLabelPropertyRef_text(scope,attr) ::= "(if (<scope>!=null) <scope>.getText() else null)"
-tokenLabelPropertyRef_type(scope,attr) ::= "(if (<scope>!=null) <scope>.getType() else 0)"
-tokenLabelPropertyRef_line(scope,attr) ::= "(if (<scope>!=null) <scope>.getLine() else 0)"
-tokenLabelPropertyRef_pos(scope,attr) ::= "(if (<scope>!=null) <scope>.getCharPositionInLine() else 0)"
-tokenLabelPropertyRef_channel(scope,attr) ::= "(if (<scope>!=null) <scope>.getChannel() else 0)"
-tokenLabelPropertyRef_index(scope,attr) ::= "(if (<scope>!=null) <scope>.getTokenIndex() else 0)"
-tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
-tokenLabelPropertyRef_int(scope,attr) ::= "(if (<scope>!=null) Integer.valueOf(<scope>.getText()) else 0)"
-
-ruleLabelPropertyRef_start(scope,attr) ::= "(if (<scope>!=null) <scope>.start.asInstanceOf[<labelType>] else null)"
-ruleLabelPropertyRef_stop(scope,attr) ::= "(if (<scope>!=null) <scope>.stop.asInstanceOf[<labelType>] else null)"
-ruleLabelPropertyRef_tree(scope,attr) ::= "(if (<scope>!=null) <scope>.tree.asInstanceOf[<ASTLabelType>] else null)"
-ruleLabelPropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-(if (<scope>!=null) (input.getTokenStream().toString(
-  input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
-  input.getTreeAdaptor().getTokenStopIndex(<scope>.start))) else null)
-<else>
-(if (<scope>!=null) input.toString(<scope>.start,<scope>.stop) else null)
-<endif>
->>
-
-ruleLabelPropertyRef_st(scope,attr) ::= "(if (<scope>!=null) <scope>.st else null)"
-
-/** Isolated $RULE ref ok in lexer as it's a Token */
-lexerRuleLabel(label) ::= "<label>"
-
-lexerRuleLabelPropertyRef_type(scope,attr) ::=
-    "(if (<scope>!=null) <scope>.getType() else 0)"
-lexerRuleLabelPropertyRef_line(scope,attr) ::=
-    "(if (<scope>!=null) <scope>.getLine() else 0)"
-lexerRuleLabelPropertyRef_pos(scope,attr) ::=
-    "(if (<scope>!=null) <scope>.getCharPositionInLine() else -1)"
-lexerRuleLabelPropertyRef_channel(scope,attr) ::=
-    "(if (<scope>!=null) <scope>.getChannel() else 0)"
-lexerRuleLabelPropertyRef_index(scope,attr) ::=
-    "(if (<scope>!=null) <scope>.getTokenIndex() else 0)"
-lexerRuleLabelPropertyRef_text(scope,attr) ::=
-    "(if (<scope>!=null) <scope>.getText() else null)"
-lexerRuleLabelPropertyRef_int(scope,attr) ::=
-    "(if (<scope>!=null) Integer.valueOf(<scope>.getText()) else 0)"
-
-// Somebody may ref $template or $tree or $stop within a rule:
-rulePropertyRef_start(scope,attr) ::= "(retval.start.asInstanceOf[<labelType>])"
-rulePropertyRef_stop(scope,attr) ::= "(retval.stop.asInstanceOf[<labelType>])"
-rulePropertyRef_tree(scope,attr) ::= "(retval.tree.asInstanceOf[<ASTLabelType>])"
-rulePropertyRef_text(scope,attr) ::= <<
-<if(TREE_PARSER)>
-input.getTokenStream().toString(
-  input.getTreeAdaptor().getTokenStartIndex(retval.start),
-  input.getTreeAdaptor().getTokenStopIndex(retval.start))
-<else>
-input.toString(retval.start,input.LT(-1))
-<endif>
->>
-rulePropertyRef_st(scope,attr) ::= "retval.st"
-
-lexerRulePropertyRef_text(scope,attr) ::= "getText()"
-lexerRulePropertyRef_type(scope,attr) ::= "_type"
-lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
-lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
-lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
-lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
-lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
-lexerRulePropertyRef_stop(scope,attr) ::= "(getCharIndex()-1)"
-lexerRulePropertyRef_int(scope,attr) ::= "Integer.valueOf(<scope>.getText())"
-
-// setting $st and $tree is allowed in local rule. everything else
-// is flagged as error
-ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>"
-ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>"
-
-/** How to execute an action (only when not backtracking) */
-execAction(action) ::= <<
-<if(backtracking)>
-if ( <actions.(actionScope).synpredgate> ) {
-  <action>
-}
-<else>
-<action>
-<endif>
->>
-
-/** How to always execute an action even when backtracking */
-execForcedAction(action) ::= "<action>"
-
-// M I S C (properties, etc...)
-
-bitset(name, words64) ::= <<
-val <name> = new BitSet(Array[Long](<words64:{it | <it>L};separator=",">))<\n>
->>
-
-codeFileExtension() ::= ".scala"
-
-true_value() ::= "true"
-false_value() ::= "false"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/action-edge.st b/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/action-edge.st
deleted file mode 100644
index 1bb8e96..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/action-edge.st
+++ /dev/null
@@ -1 +0,0 @@
-<src> -> <target> [fontsize=11, fontname="Courier", arrowsize=.7, label = "<label>"<if(arrowhead)>, arrowhead = <arrowhead><endif>];
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/decision-rank.st b/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/decision-rank.st
deleted file mode 100644
index d5142f6..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/decision-rank.st
+++ /dev/null
@@ -1 +0,0 @@
-{rank=same; rankdir=TB; <states; separator="; ">}
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/dfa.st b/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/dfa.st
deleted file mode 100644
index 5b81e70..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/dfa.st
+++ /dev/null
@@ -1,7 +0,0 @@
-digraph NFA {
-<if(rankdir)>rankdir=<rankdir>;<endif>
-<decisionRanks; separator="\n">
-<states; separator="\n">
-<edges; separator="\n">
-}
-
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/dot.stg b/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/dot.stg
deleted file mode 100644
index 96981dc..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/dot.stg
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Conversion to C#:
- * Copyright (c) 2011 Sam Harwell, Tunnel Vision Laboratories, LLC.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-dfa(decisionRanks,states,edges,rankdir,startState,useBox) ::= <<
-digraph NFA {
-<if(rankdir)>rankdir=<rankdir>;<endif>
-<decisionRanks; separator="\n">
-<states; separator="\n">
-<edges; separator="\n">
-}
->>
-
-nfa(decisionRanks,states,edges,rankdir,startState) ::= <<
-digraph NFA {
-rankdir=LR;
-<decisionRanks; separator="\n">
-<states; separator="\n">
-<edges; separator="\n">
-}
->>
-
-decision_rank(states) ::= <<
-{rank=same; rankdir=TB; <states; separator="; ">}
->>
-
-edge(src,target,label,arrowhead) ::= <<
-<src> -> <target> [fontsize=11, fontname="Courier", arrowsize=.7, label = "<label>"<if(arrowhead)>, arrowhead = <arrowhead><endif>];
->>
-
-action_edge(src,target,label,arrowhead) ::= <<
-<src> -> <target> [fontsize=11, fontname="Courier", arrowsize=.7, label = "<label>"<if(arrowhead)>, arrowhead = <arrowhead><endif>];
->>
-
-epsilon_edge(src,target,label,arrowhead) ::= <<
-<src> -> <target> [fontname="Times-Italic", label = "e"];
->>
-
-state(name,useBox) ::= <<
-node [fontsize=11, shape = <if(useBox)>box<else>circle, fixedsize=true, width=.4<endif>]; <name>
->>
-
-stopstate(name,useBox) ::= <<
-node [fontsize=11, shape = <if(useBox)>polygon,sides=4,peripheries=2<else>doublecircle, fixedsize=true, width=.6<endif>]; <name>
->>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/edge.st b/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/edge.st
deleted file mode 100644
index 1bb8e96..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/edge.st
+++ /dev/null
@@ -1 +0,0 @@
-<src> -> <target> [fontsize=11, fontname="Courier", arrowsize=.7, label = "<label>"<if(arrowhead)>, arrowhead = <arrowhead><endif>];
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/epsilon-edge.st b/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/epsilon-edge.st
deleted file mode 100644
index 2a49b2b..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/epsilon-edge.st
+++ /dev/null
@@ -1 +0,0 @@
-<src> -> <target> [fontname="Times-Italic", label = "e"];
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/nfa.st b/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/nfa.st
deleted file mode 100644
index 280ced2..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/nfa.st
+++ /dev/null
@@ -1,6 +0,0 @@
-digraph NFA {
-rankdir=LR;
-<decisionRanks; separator="\n">
-<states; separator="\n">
-<edges; separator="\n">
-}
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/state.st b/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/state.st
deleted file mode 100644
index f68e3a8..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/state.st
+++ /dev/null
@@ -1 +0,0 @@
-node [fontsize=11, shape = <if(useBox)>box<else>circle, fixedsize=true, width=.4<endif>]; <name>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/stopstate.st b/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/stopstate.st
deleted file mode 100644
index 572d460..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/dot/stopstate.st
+++ /dev/null
@@ -1 +0,0 @@
-node [fontsize=11, shape = <if(useBox)>polygon,sides=4,peripheries=2<else>doublecircle, fixedsize=true, width=.6<endif>]; <name>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/messages/languages/en.stg b/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/messages/languages/en.stg
deleted file mode 100644
index ea27ddc..0000000
--- a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/messages/languages/en.stg
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- [The "BSD license"]
- Copyright (c) 2010 Terence Parr
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote products
-    derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/*
- New style messages. This file only contains the messages in English, but no
- information about which file, line, or column it occurred in.
- The location and message ids are taken out of the formats directory.
-														Kay Roepke
-*/
-group en_US;
-
-// TOOL ERRORS
-// file errors
-CANNOT_WRITE_FILE(arg,exception,stackTrace) ::= <<
-cannot write file <arg>: <exception>
-<stackTrace; separator="\n">
->>
-CANNOT_CLOSE_FILE(arg,exception,stackTrace) ::= "cannot close file <arg>: <exception>"
-CANNOT_FIND_TOKENS_FILE(arg) ::= "cannot find tokens file <arg>"
-ERROR_READING_TOKENS_FILE(arg,exception,stackTrace) ::= <<
-problem reading token vocabulary file <arg>: <exception>
-<stackTrace; separator="\n">
->>
-DIR_NOT_FOUND(arg,exception,stackTrace) ::= "directory not found: <arg>"
-OUTPUT_DIR_IS_FILE(arg,exception,stackTrace) ::= "output directory is a file: <arg>"
-CANNOT_OPEN_FILE(arg,exception,stackTrace) ::= "cannot find or open file: <arg><if(exception)>; reason: <exception><endif>"
-CIRCULAR_DEPENDENCY() ::= "your grammars contain a circular dependency and cannot be sorted into a valid build order."
-
-INTERNAL_ERROR(arg,arg2,exception,stackTrace) ::= <<
-internal error: <arg> <arg2><if(exception)>: <exception><endif>
-<stackTrace; separator="\n">
->>
-INTERNAL_WARNING(arg) ::= "internal warning: <arg>"
-ERROR_CREATING_ARTIFICIAL_RULE(arg,exception,stackTrace) ::= <<
-problems creating lexer rule listing all tokens: <exception>
-<stackTrace; separator="\n">
->>
-TOKENS_FILE_SYNTAX_ERROR(arg,arg2) ::=
-	"problems parsing token vocabulary file <arg> on line <arg2>"
-CANNOT_GEN_DOT_FILE(arg,exception,stackTrace) ::=
-	"cannot write DFA DOT file <arg>: <exception>"
-BAD_ACTION_AST_STRUCTURE(exception,stackTrace) ::=
-	"bad internal tree structure for action '<arg>': <exception>"
-BAD_AST_STRUCTURE(arg,exception,stackTrace) ::= <<
-bad internal tree structure '<arg>': <exception>
-<stackTrace; separator="\n">
->>
-FILE_AND_GRAMMAR_NAME_DIFFER(arg,arg2) ::=
-  "file <arg2> contains grammar <arg>; names must be identical"
-FILENAME_EXTENSION_ERROR(arg) ::=
-  "file <arg> must end in a file extension, normally .g"
-
-// code gen errors
-MISSING_CODE_GEN_TEMPLATES(arg) ::=
-	"cannot find code generation templates <arg>.stg"
-MISSING_CYCLIC_DFA_CODE_GEN_TEMPLATES() ::=
-	"cannot find code generation cyclic DFA templates for language <arg>"
-CODE_GEN_TEMPLATES_INCOMPLETE(arg) ::=
-	"at least one code generation template missing for language <arg>"
-CANNOT_CREATE_TARGET_GENERATOR(arg,exception,stackTrace) ::=
-	"cannot create target <arg> code generator: <exception>"
-CANNOT_COMPUTE_SAMPLE_INPUT_SEQ() ::=
-	"cannot generate a sample input sequence from lookahead DFA"
-
-// grammar interpretation errors
-/*
-NO_VIABLE_DFA_ALT(arg,arg2) ::=
-	"no viable transition from state <arg> on <arg2> while interpreting DFA"
-*/
-
-// GRAMMAR ERRORS
-SYNTAX_ERROR(arg) ::= "syntax error: <arg>"
-RULE_REDEFINITION(arg) ::=
-	"rule <arg> redefinition"
-LEXER_RULES_NOT_ALLOWED(arg) ::=
-	"lexer rule <arg> not allowed in parser"
-PARSER_RULES_NOT_ALLOWED(arg) ::=
-	"parser rule <arg> not allowed in lexer"
-CANNOT_FIND_ATTRIBUTE_NAME_IN_DECL(arg) ::=
-	"cannot find an attribute name in attribute declaration"
-NO_TOKEN_DEFINITION(arg) ::=
-	"no lexer rule corresponding to token: <arg>"
-UNDEFINED_RULE_REF(arg) ::=
-	"reference to undefined rule: <arg>"
-LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE(arg) ::=
-	"literal has no associated lexer rule: <arg>"
-CANNOT_ALIAS_TOKENS_IN_LEXER(arg) ::=
-	"literals are illegal in lexer tokens{} section: <arg>"
-ATTRIBUTE_REF_NOT_IN_RULE(arg,arg2) ::=
-	"reference to attribute outside of a rule: <arg><if(arg2)>.<arg2><endif>"
-UNKNOWN_ATTRIBUTE_IN_SCOPE(arg,arg2) ::=
-	"unknown attribute for <arg>: <arg2>"
-UNKNOWN_RULE_ATTRIBUTE(arg,arg2) ::=
-	"unknown attribute for rule <arg>: <arg2>"
-UNKNOWN_SIMPLE_ATTRIBUTE(arg,args2) ::=
-	"attribute is not a token, parameter, or return value: <arg>"
-ISOLATED_RULE_SCOPE(arg) ::=
-	"missing attribute access on rule scope: <arg>"
-INVALID_RULE_PARAMETER_REF(arg,arg2) ::=
-	"cannot access rule <arg>'s parameter: <arg2>"
-INVALID_RULE_SCOPE_ATTRIBUTE_REF(arg,arg2) ::=
-	"cannot access rule <arg>'s dynamically-scoped attribute: <arg2>"
-SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE(arg) ::=
-	"symbol <arg> conflicts with global dynamic scope with same name"
-WRITE_TO_READONLY_ATTR(arg,arg2,arg3) ::=
-	"cannot write to read only attribute: $<arg><if(arg2)>.<arg2><endif>"
-LABEL_CONFLICTS_WITH_RULE(arg) ::=
-	"label <arg> conflicts with rule with same name"
-LABEL_CONFLICTS_WITH_TOKEN(arg) ::=
-	"label <arg> conflicts with token with same name"
-LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE(arg,arg2) ::=
-	"label <arg> conflicts with rule <arg2>'s dynamically-scoped attribute with same name"
-LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL(arg,arg2) ::=
-	"label <arg> conflicts with rule <arg2>'s return value or parameter with same name"
-ATTRIBUTE_CONFLICTS_WITH_RULE(arg,arg2) ::=
-	"rule <arg2>'s dynamically-scoped attribute <arg> conflicts with the rule name"
-ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL(arg,arg2) ::=
-	"rule <arg2>'s dynamically-scoped attribute <arg> conflicts with<arg2>'s return value or parameter with same name"
-LABEL_TYPE_CONFLICT(arg,arg2) ::=
-	"label <arg> type mismatch with previous definition: <arg2>"
-ARG_RETVAL_CONFLICT(arg,arg2) ::=
-	"rule <arg2>'s argument <arg> conflicts a return value with same name"
-NONUNIQUE_REF(arg) ::=
-	"<arg> is a non-unique reference"
-FORWARD_ELEMENT_REF(arg) ::=
-	"illegal forward reference: <arg>"
-MISSING_RULE_ARGS(arg) ::=
-	"missing parameter(s) on rule reference: <arg>"
-RULE_HAS_NO_ARGS(arg) ::=
-	"rule <arg> has no defined parameters"
-ARGS_ON_TOKEN_REF(arg) ::=
-	"token reference <arg> may not have parameters"
-ILLEGAL_OPTION(arg) ::=
-	"illegal option <arg>"
-LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT(arg) ::=
-	"rule '+=' list labels are not allowed w/o output option: <arg>"
-UNDEFINED_TOKEN_REF_IN_REWRITE(arg) ::=
-  "reference to undefined token in rewrite rule: <arg>"
-REWRITE_ELEMENT_NOT_PRESENT_ON_LHS(arg) ::=
-  "reference to rewrite element <arg> without reference on left of ->"
-UNDEFINED_LABEL_REF_IN_REWRITE(arg) ::=
-  "reference to undefined label in rewrite rule: $<arg>"
-NO_GRAMMAR_START_RULE (arg) ::=
-  "grammar <arg>: no start rule (no rule can obviously be followed by EOF)"
-EMPTY_COMPLEMENT(arg) ::= <<
-<if(arg)>
-set complement ~<arg> is empty
-<else>
-set complement is empty
-<endif>
->>
-UNKNOWN_DYNAMIC_SCOPE(arg) ::=
-  "unknown dynamic scope: <arg>"
-UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE(arg,arg2) ::=
-  "unknown dynamically-scoped attribute for scope <arg>: <arg2>"
-RULE_REF_AMBIG_WITH_RULE_IN_ALT(arg) ::=
-  "reference $<arg> is ambiguous; rule <arg> is enclosing rule and referenced in the production (assuming enclosing rule)"
-ISOLATED_RULE_ATTRIBUTE(arg) ::=
-  "reference to locally-defined rule scope attribute without rule name: <arg>"
-INVALID_ACTION_SCOPE(arg,arg2) ::=
-  "unknown or invalid action scope for <arg2> grammar: <arg>"
-ACTION_REDEFINITION(arg) ::=
-  "redefinition of <arg> action"
-DOUBLE_QUOTES_ILLEGAL(arg) ::=
-  "string literals must use single quotes (such as \'begin\'): <arg>"
-INVALID_TEMPLATE_ACTION(arg) ::=
-  "invalid StringTemplate % shorthand syntax: '<arg>'"
-MISSING_ATTRIBUTE_NAME() ::=
-  "missing attribute name on $ reference"
-ARG_INIT_VALUES_ILLEGAL(arg) ::=
-  "rule parameters may not have init values: <arg>"
-REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION(arg) ::=
-  "<if(arg)>rule <arg> uses <endif>rewrite syntax or operator with no output option; setting output=AST"
-AST_OP_WITH_NON_AST_OUTPUT_OPTION(arg) ::=
-  "AST operator with non-AST output option: <arg>"
-NO_RULES(arg) ::= "grammar file <arg> has no rules"
-MISSING_AST_TYPE_IN_TREE_GRAMMAR(arg) ::=
-  "tree grammar <arg> has no ASTLabelType option"
-REWRITE_FOR_MULTI_ELEMENT_ALT(arg) ::=
-  "with rewrite=true, alt <arg> not simple node or obvious tree element; text attribute for rule not guaranteed to be correct"
-RULE_INVALID_SET(arg) ::=
-  "Cannot complement rule <arg>; not a simple set or element"
-HETERO_ILLEGAL_IN_REWRITE_ALT(arg) ::=
-  "alts with rewrites can't use heterogeneous types left of ->"
-NO_SUCH_GRAMMAR_SCOPE(arg,arg2) ::=
-  "reference to undefined grammar in rule reference: <arg>.<arg2>"
-NO_SUCH_RULE_IN_SCOPE(arg,arg2) ::=
-  "rule <arg2> is not defined in grammar <arg>"
-TOKEN_ALIAS_CONFLICT(arg,arg2) ::=
-  "cannot alias <arg>; string already assigned to <arg2>"
-TOKEN_ALIAS_REASSIGNMENT(arg,arg2) ::=
-  "cannot alias <arg>; token name already assigned to <arg2>"
-TOKEN_VOCAB_IN_DELEGATE(arg,arg2) ::=
-  "tokenVocab option ignored in imported grammar <arg>"
-INVALID_IMPORT(arg,arg2) ::=
-  "<arg.grammarTypeString> grammar <arg.name> cannot import <arg2.grammarTypeString> grammar <arg2.name>"
-IMPORTED_TOKENS_RULE_EMPTY(arg,arg2) ::=
-  "no lexer rules contributed to <arg> from imported grammar <arg2>"
-IMPORT_NAME_CLASH(arg,arg2) ::=
-  "combined grammar <arg.name> and imported <arg2.grammarTypeString> grammar <arg2.name> both generate <arg2.recognizerName>; import ignored"
-AST_OP_IN_ALT_WITH_REWRITE(arg,arg2) ::=
-  "rule <arg> alt <arg2> uses rewrite syntax and also an AST operator"
-WILDCARD_AS_ROOT(arg) ::= "Wildcard invalid as root; wildcard can itself be a tree"
-CONFLICTING_OPTION_IN_TREE_FILTER(arg,arg2) ::= "option <arg>=<arg2> conflicts with tree grammar filter mode"
-ILLEGAL_OPTION_VALUE(arg, arg2) ::= "value '<arg2>' invalid for option <arg>"
-ALL_OPS_NEED_SAME_ASSOC(arg) ::= "all operators of alt <alt> of left-recursive rule must have same associativity"
-
-// GRAMMAR WARNINGS
-
-GRAMMAR_NONDETERMINISM(input,conflictingAlts,paths,disabled,hasPredicateBlockedByAction) ::=
-<<
-<if(paths)>
-Decision can match input such as "<input>" using multiple alternatives:
-<paths:{ it |  alt <it.alt> via NFA path <it.states; separator=","><\n>}>
-<else>
-Decision can match input such as "<input>" using multiple alternatives: <conflictingAlts; separator=", ">
-<endif>
-<if(disabled)><\n>As a result, alternative(s) <disabled; separator=","> were disabled for that input<endif><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
->>
-
-DANGLING_STATE(danglingAlts,input) ::= <<
-the decision cannot distinguish between alternative(s) <danglingAlts; separator=","> for input such as "<input>"
->>
-
-UNREACHABLE_ALTS(alts) ::= <<
-The following alternatives can never be matched: <alts; separator=","><\n>
->>
-
-INSUFFICIENT_PREDICATES(upon,altToLocations,hasPredicateBlockedByAction) ::= <<
-Input such as "<upon>" is insufficiently covered with predicates at locations: <altToLocations.keys:{alt|alt <alt>: <altToLocations.(alt):{loc| line <loc.line>:<loc.column> at <loc.text>}; separator=", ">}; separator=", "><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
->>
-
-DUPLICATE_SET_ENTRY(arg) ::=
-	"duplicate token type <arg> when collapsing subrule into set"
-
-ANALYSIS_ABORTED(enclosingRule) ::= <<
-ANTLR could not analyze this decision in rule <enclosingRule>; often this is because of recursive rule references visible from the left edge of alternatives.  ANTLR will re-analyze the decision with a fixed lookahead of k=1.  Consider using "options {k=1;}" for that decision and possibly adding a syntactic predicate.
->>
-
-RECURSION_OVERLOW(alt,input,targetRules,callSiteStates) ::= <<
-Alternative <alt>: after matching input such as <input> decision cannot predict what comes next due to recursion overflow <targetRules,callSiteStates:{t,c|to <t> from <c:{s|<s.enclosingRule.name>};separator=", ">}; separator=" and ">
->>
-
-LEFT_RECURSION(targetRules,alt,callSiteStates) ::= <<
-Alternative <alt> discovers infinite left-recursion <targetRules,callSiteStates:{t,c|to <t> from <c:{s|<s.enclosingRule>};separator=", ">}; separator=" and ">
->>
-
-UNREACHABLE_TOKENS(tokens) ::= <<
-The following token definitions can never be matched because prior tokens match the same input: <tokens; separator=",">
->>
-
-TOKEN_NONDETERMINISM(input,conflictingTokens,paths,disabled,hasPredicateBlockedByAction) ::=
-<<
-<if(paths)>
-Decision can match input such as "<input>" using multiple alternatives:
-<paths:{ it | alt <it.alt> via NFA path <it.states; separator=","><\n>}>
-<else>
-Multiple token rules can match input such as "<input>": <conflictingTokens; separator=", "><\n>
-<endif>
-<if(disabled)><\n>As a result, token(s) <disabled; separator=","> were disabled for that input<endif><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
->>
-
-LEFT_RECURSION_CYCLES(listOfCycles) ::= <<
-The following sets of rules are mutually left-recursive <listOfCycles:{c| [<c:{r|<r.name>}; separator=", ">]}; separator=" and ">
->>
-
-NONREGULAR_DECISION(ruleName,alts) ::= <<
-[fatal] rule <ruleName> has non-LL(*) decision due to recursive rule invocations reachable from alts <alts; separator=",">.  Resolve by left-factoring or using syntactic predicates or using backtrack=true option.
->>
-
-/* l10n for message levels */
-warning() ::= "warning"
-error() ::= "error"
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/BaseTest.java b/antlr-3.4/tool/src/test/java/org/antlr/test/BaseTest.java
deleted file mode 100644
index aae0560..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/BaseTest.java
+++ /dev/null
@@ -1,904 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-
-import org.antlr.Tool;
-import org.antlr.analysis.Label;
-import org.antlr.runtime.CommonTokenStream;
-import org.antlr.runtime.Token;
-import org.antlr.runtime.TokenSource;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STGroup;
-import org.antlr.tool.ANTLRErrorListener;
-import org.antlr.tool.ErrorManager;
-import org.antlr.tool.GrammarSemanticsMessage;
-import org.antlr.tool.Message;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-
-import javax.tools.*;
-import java.io.*;
-import java.util.*;
-
-
-public abstract class BaseTest {
-	public static final String newline = System.getProperty("line.separator");
-
-	public static final String jikes = null;//"/usr/bin/jikes";
-	public static final String pathSep = System.getProperty("path.separator");
-
-   /**
-    * When runnning from Maven, the junit tests are run via the surefire plugin. It sets the
-    * classpath for the test environment into the following property. We need to pick this up
-    * for the junit tests that are going to generate and try to run code.
-    */
-    public static final String SUREFIRE_CLASSPATH = System.getProperty("surefire.test.class.path", "");
-
-    /**
-     * Build up the full classpath we need, including the surefire path (if present)
-     */
-    public static final String CLASSPATH = System.getProperty("java.class.path") + (SUREFIRE_CLASSPATH.equals("") ? "" : pathSep + SUREFIRE_CLASSPATH);
-
-	public String tmpdir = null;
-
-    /** reset during setUp and set to true if we find a problem */
-    protected boolean lastTestFailed = false;
-
-	/** If error during parser execution, store stderr here; can't return
-     *  stdout and stderr.  This doesn't trap errors from running antlr.
-     */
-	protected String stderrDuringParse;
-
-    @Before
-	public void setUp() throws Exception {
-        lastTestFailed = false; // hope for the best, but set to true in asserts that fail
-        // new output dir for each test
-        tmpdir = new File(System.getProperty("java.io.tmpdir"),
-						  "antlr-"+getClass().getName()+"-"+
-						  System.currentTimeMillis()).getAbsolutePath();
-        ErrorManager.resetErrorState();
-        STGroup.defaultGroup = new STGroup();
-    }
-
-    @After
-    public void tearDown() throws Exception {
-        // remove tmpdir if no error.
-        if ( !lastTestFailed ) eraseTempDir();
-
-    }
-
-    protected Tool newTool(String[] args) {
-		Tool tool = new Tool(args);
-		tool.setOutputDirectory(tmpdir);
-		return tool;
-	}
-
-	protected Tool newTool() {
-		Tool tool = new Tool();
-		tool.setOutputDirectory(tmpdir);
-		return tool;
-	}
-
-	protected boolean compile(String fileName) {
-		String classpathOption = "-classpath";
-
-		String[] args = new String[] {
-					"javac", "-d", tmpdir,
-					classpathOption, tmpdir+pathSep+CLASSPATH,
-					tmpdir+"/"+fileName
-		};
-		String cmdLine = "javac" +" -d "+tmpdir+" "+classpathOption+" "+tmpdir+pathSep+CLASSPATH+" "+fileName;
-		//System.out.println("compile: "+cmdLine);
-
-
-		File f = new File(tmpdir, fileName);
-		JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
-
-		StandardJavaFileManager fileManager =
-			compiler.getStandardFileManager(null, null, null);
-
-		Iterable<? extends JavaFileObject> compilationUnits =
-			fileManager.getJavaFileObjectsFromFiles(Arrays.asList(f));
-
-		Iterable<String> compileOptions =
-			Arrays.asList(new String[]{"-d", tmpdir, "-cp", tmpdir+pathSep+CLASSPATH} );
-
-		JavaCompiler.CompilationTask task =
-			compiler.getTask(null, fileManager, null, compileOptions, null,
-							 compilationUnits);
-		boolean ok = task.call();
-
-		try {
-			fileManager.close();
-		}
-		catch (IOException ioe) {
-			ioe.printStackTrace(System.err);
-		}
-		return ok;
-	}
-
-	/** Return true if all is ok, no errors */
-	protected boolean antlr(String fileName, String grammarFileName, String grammarStr, boolean debug) {
-		boolean allIsWell = true;
-		mkdir(tmpdir);
-		writeFile(tmpdir, fileName, grammarStr);
-		try {
-			final List options = new ArrayList();
-			if ( debug ) {
-				options.add("-debug");
-			}
-			options.add("-o");
-			options.add(tmpdir);
-			options.add("-lib");
-			options.add(tmpdir);
-			options.add(new File(tmpdir,grammarFileName).toString());
-			final String[] optionsA = new String[options.size()];
-			options.toArray(optionsA);
-			/*
-			final ErrorQueue equeue = new ErrorQueue();
-			ErrorManager.setErrorListener(equeue);
-			*/
-			Tool antlr = newTool(optionsA);
-			antlr.process();
-			ANTLRErrorListener listener = ErrorManager.getErrorListener();
-			if ( listener instanceof ErrorQueue ) {
-				ErrorQueue equeue = (ErrorQueue)listener;
-				if ( equeue.errors.size()>0 ) {
-					allIsWell = false;
-					System.err.println("antlr reports errors from "+options);
-					for (int i = 0; i < equeue.errors.size(); i++) {
-						Message msg = (Message) equeue.errors.get(i);
-						System.err.println(msg);
-					}
-                    System.out.println("!!!\ngrammar:");
-                    System.out.println(grammarStr);
-                    System.out.println("###");
-                }
-			}
-		}
-		catch (Exception e) {
-			allIsWell = false;
-			System.err.println("problems building grammar: "+e);
-			e.printStackTrace(System.err);
-		}
-		return allIsWell;
-	}
-
-	protected String execLexer(String grammarFileName,
-							   String grammarStr,
-							   String lexerName,
-							   String input,
-							   boolean debug)
-	{
-		rawGenerateAndBuildRecognizer(grammarFileName,
-									  grammarStr,
-									  null,
-									  lexerName,
-									  debug);
-		writeFile(tmpdir, "input", input);
-		return rawExecRecognizer(null,
-								 null,
-								 lexerName,
-								 null,
-								 null,
-								 false,
-								 false,
-								 false,
-								 debug);
-	}
-
-	protected String execParser(String grammarFileName,
-								String grammarStr,
-								String parserName,
-								String lexerName,
-								String startRuleName,
-								String input, boolean debug)
-	{
-		rawGenerateAndBuildRecognizer(grammarFileName,
-									  grammarStr,
-									  parserName,
-									  lexerName,
-									  debug);
-		writeFile(tmpdir, "input", input);
-		boolean parserBuildsTrees =
-			grammarStr.indexOf("output=AST")>=0 ||
-			grammarStr.indexOf("output = AST")>=0;
-		boolean parserBuildsTemplate =
-			grammarStr.indexOf("output=template")>=0 ||
-			grammarStr.indexOf("output = template")>=0;
-		return rawExecRecognizer(parserName,
-								 null,
-								 lexerName,
-								 startRuleName,
-								 null,
-								 parserBuildsTrees,
-								 parserBuildsTemplate,
-								 false,
-								 debug);
-	}
-
-	protected String execTreeParser(String parserGrammarFileName,
-									String parserGrammarStr,
-									String parserName,
-									String treeParserGrammarFileName,
-									String treeParserGrammarStr,
-									String treeParserName,
-									String lexerName,
-									String parserStartRuleName,
-									String treeParserStartRuleName,
-									String input)
-	{
-		return execTreeParser(parserGrammarFileName,
-							  parserGrammarStr,
-							  parserName,
-							  treeParserGrammarFileName,
-							  treeParserGrammarStr,
-							  treeParserName,
-							  lexerName,
-							  parserStartRuleName,
-							  treeParserStartRuleName,
-							  input,
-							  false);
-	}
-
-	protected String execTreeParser(String parserGrammarFileName,
-									String parserGrammarStr,
-									String parserName,
-									String treeParserGrammarFileName,
-									String treeParserGrammarStr,
-									String treeParserName,
-									String lexerName,
-									String parserStartRuleName,
-									String treeParserStartRuleName,
-									String input,
-									boolean debug)
-	{
-		// build the parser
-		rawGenerateAndBuildRecognizer(parserGrammarFileName,
-									  parserGrammarStr,
-									  parserName,
-									  lexerName,
-									  debug);
-
-		// build the tree parser
-		rawGenerateAndBuildRecognizer(treeParserGrammarFileName,
-									  treeParserGrammarStr,
-									  treeParserName,
-									  lexerName,
-									  debug);
-
-		writeFile(tmpdir, "input", input);
-
-		boolean parserBuildsTrees =
-			parserGrammarStr.indexOf("output=AST")>=0 ||
-			parserGrammarStr.indexOf("output = AST")>=0;
-		boolean treeParserBuildsTrees =
-			treeParserGrammarStr.indexOf("output=AST")>=0 ||
-			treeParserGrammarStr.indexOf("output = AST")>=0;
-		boolean parserBuildsTemplate =
-			parserGrammarStr.indexOf("output=template")>=0 ||
-			parserGrammarStr.indexOf("output = template")>=0;
-
-		return rawExecRecognizer(parserName,
-								 treeParserName,
-								 lexerName,
-								 parserStartRuleName,
-								 treeParserStartRuleName,
-								 parserBuildsTrees,
-								 parserBuildsTemplate,
-								 treeParserBuildsTrees,
-								 debug);
-	}
-
-	/** Return true if all is well */
-	protected boolean rawGenerateAndBuildRecognizer(String grammarFileName,
-													String grammarStr,
-													String parserName,
-													String lexerName,
-													boolean debug)
-	{
-		//System.out.println(grammarStr);
-		boolean allIsWell =
-			antlr(grammarFileName, grammarFileName, grammarStr, debug);
-		if ( lexerName!=null ) {
-			boolean ok;
-			if ( parserName!=null ) {
-				ok = compile(parserName+".java");
-				if ( !ok ) { allIsWell = false; }
-			}
-			ok = compile(lexerName+".java");
-			if ( !ok ) { allIsWell = false; }
-		}
-		else {
-			boolean ok = compile(parserName+".java");
-			if ( !ok ) { allIsWell = false; }
-		}
-		return allIsWell;
-	}
-
-	protected String rawExecRecognizer(String parserName,
-									   String treeParserName,
-									   String lexerName,
-									   String parserStartRuleName,
-									   String treeParserStartRuleName,
-									   boolean parserBuildsTrees,
-									   boolean parserBuildsTemplate,
-									   boolean treeParserBuildsTrees,
-									   boolean debug)
-	{
-        this.stderrDuringParse = null;
-		writeRecognizerAndCompile(parserName, treeParserName, lexerName, parserStartRuleName, treeParserStartRuleName, parserBuildsTrees, parserBuildsTemplate, treeParserBuildsTrees, debug);
-
-		return execRecognizer();
-	}
-
-	public String execRecognizer() {
-		try {
-			String inputFile = new File(tmpdir, "input").getAbsolutePath();
-			String[] args = new String[] {
-				"java", "-classpath", tmpdir+pathSep+CLASSPATH,
-				"Test", inputFile
-			};
-			//String cmdLine = "java -classpath "+CLASSPATH+pathSep+tmpdir+" Test " + new File(tmpdir, "input").getAbsolutePath();
-			//System.out.println("execParser: "+cmdLine);
-			Process process =
-				Runtime.getRuntime().exec(args, null, new File(tmpdir));
-			StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream(), inputFile);
-			StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream(), inputFile);
-			stdoutVacuum.start();
-			stderrVacuum.start();
-			process.waitFor();
-			stdoutVacuum.join();
-			stderrVacuum.join();
-			String output = null;
-			output = stdoutVacuum.toString();
-			if ( stderrVacuum.toString().length()>0 ) {
-				this.stderrDuringParse = stderrVacuum.toString();
-				System.err.println("exec stderrVacuum: "+ stderrVacuum);
-			}
-			return output;
-		}
-		catch (Exception e) {
-			System.err.println("can't exec recognizer");
-			e.printStackTrace(System.err);
-		}
-		return null;
-	}
-
-	public void writeRecognizerAndCompile(String parserName, String treeParserName, String lexerName, String parserStartRuleName, String treeParserStartRuleName, boolean parserBuildsTrees, boolean parserBuildsTemplate, boolean treeParserBuildsTrees, boolean debug) {
-		if ( treeParserBuildsTrees && parserBuildsTrees ) {
-			writeTreeAndTreeTestFile(parserName,
-									 treeParserName,
-									 lexerName,
-									 parserStartRuleName,
-									 treeParserStartRuleName,
-									 debug);
-		}
-		else if ( parserBuildsTrees ) {
-			writeTreeTestFile(parserName,
-							  treeParserName,
-							  lexerName,
-							  parserStartRuleName,
-							  treeParserStartRuleName,
-							  debug);
-		}
-		else if ( parserBuildsTemplate ) {
-			writeTemplateTestFile(parserName,
-								  lexerName,
-								  parserStartRuleName,
-								  debug);
-		}
-		else if ( parserName==null ) {
-			writeLexerTestFile(lexerName, debug);
-		}
-		else {
-			writeTestFile(parserName,
-						  lexerName,
-						  parserStartRuleName,
-						  debug);
-		}
-
-		compile("Test.java");
-	}
-
-	protected void checkGrammarSemanticsError(ErrorQueue equeue,
-											  GrammarSemanticsMessage expectedMessage)
-		throws Exception
-	{
-		/*
-				System.out.println(equeue.infos);
-				System.out.println(equeue.warnings);
-				System.out.println(equeue.errors);
-				assertTrue("number of errors mismatch", n, equeue.errors.size());
-						   */
-		Message foundMsg = null;
-		for (int i = 0; i < equeue.errors.size(); i++) {
-			Message m = (Message)equeue.errors.get(i);
-			if (m.msgID==expectedMessage.msgID ) {
-				foundMsg = m;
-			}
-		}
-		assertNotNull("no error; "+expectedMessage.msgID+" expected", foundMsg);
-		assertTrue("error is not a GrammarSemanticsMessage",
-				   foundMsg instanceof GrammarSemanticsMessage);
-		assertEquals(expectedMessage.arg, foundMsg.arg);
-		if ( equeue.size()!=1 ) {
-			System.err.println(equeue);
-		}
-	}
-
-	protected void checkGrammarSemanticsWarning(ErrorQueue equeue,
-												GrammarSemanticsMessage expectedMessage)
-		throws Exception
-	{
-		Message foundMsg = null;
-		for (int i = 0; i < equeue.warnings.size(); i++) {
-			Message m = (Message)equeue.warnings.get(i);
-			if (m.msgID==expectedMessage.msgID ) {
-				foundMsg = m;
-			}
-		}
-		assertNotNull("no error; "+expectedMessage.msgID+" expected", foundMsg);
-		assertTrue("error is not a GrammarSemanticsMessage",
-				   foundMsg instanceof GrammarSemanticsMessage);
-		assertEquals(expectedMessage.arg, foundMsg.arg);
-	}
-
-    protected void checkError(ErrorQueue equeue,
-                              Message expectedMessage)
-        throws Exception
-    {
-        //System.out.println("errors="+equeue);
-        Message foundMsg = null;
-        for (int i = 0; i < equeue.errors.size(); i++) {
-            Message m = (Message)equeue.errors.get(i);
-            if (m.msgID==expectedMessage.msgID ) {
-                foundMsg = m;
-            }
-        }
-        assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size()>0);
-        assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1);
-        assertNotNull("couldn't find expected error: "+expectedMessage.msgID, foundMsg);
-        /*
-        assertTrue("error is not a GrammarSemanticsMessage",
-                   foundMsg instanceof GrammarSemanticsMessage);
-         */
-        assertEquals(expectedMessage.arg, foundMsg.arg);
-        assertEquals(expectedMessage.arg2, foundMsg.arg2);
-        ErrorManager.resetErrorState(); // wack errors for next test
-    }
-
-    public static class StreamVacuum implements Runnable {
-		StringBuffer buf = new StringBuffer();
-		BufferedReader in;
-		Thread sucker;
-		String inputFile;
-		public StreamVacuum(InputStream in, String inputFile) {
-			this.in = new BufferedReader( new InputStreamReader(in) );
-			this.inputFile = inputFile;
-		}
-		public void start() {
-			sucker = new Thread(this);
-			sucker.start();
-		}
-		public void run() {
-			try {
-				String line = in.readLine();
-				while (line!=null) {
-					if (line.startsWith(inputFile))
-						line = line.substring(inputFile.length()+1);
-					buf.append(line);
-					buf.append('\n');
-					line = in.readLine();
-				}
-			}
-			catch (IOException ioe) {
-				System.err.println("can't read output from process");
-			}
-		}
-		/** wait for the thread to finish */
-		public void join() throws InterruptedException {
-			sucker.join();
-		}
-		public String toString() {
-			return buf.toString();
-		}
-	}
-
-    public static class FilteringTokenStream extends CommonTokenStream {
-        public FilteringTokenStream(TokenSource src) { super(src); }
-        Set<Integer> hide = new HashSet<Integer>();
-        protected void sync(int i) {
-            super.sync(i);
-            if ( hide.contains(get(i).getType()) ) get(i).setChannel(Token.HIDDEN_CHANNEL);
-        }
-        public void setTokenTypeChannel(int ttype, int channel) {
-            hide.add(ttype);
-        }
-    }
-
-	protected void writeFile(String dir, String fileName, String content) {
-		try {
-			File f = new File(dir, fileName);
-			FileWriter w = new FileWriter(f);
-			BufferedWriter bw = new BufferedWriter(w);
-			bw.write(content);
-			bw.close();
-			w.close();
-		}
-		catch (IOException ioe) {
-			System.err.println("can't write file");
-			ioe.printStackTrace(System.err);
-		}
-	}
-
-	protected void mkdir(String dir) {
-		File f = new File(dir);
-		f.mkdirs();
-	}
-
-	protected void writeTestFile(String parserName,
-								 String lexerName,
-								 String parserStartRuleName,
-								 boolean debug)
-	{
-		ST outputFileST = new ST(
-			"import org.antlr.runtime.*;\n" +
-			"import org.antlr.runtime.tree.*;\n" +
-			"import org.antlr.runtime.debug.*;\n" +
-			"\n" +
-			"class Profiler2 extends Profiler {\n" +
-			"    public void terminate() { ; }\n" +
-			"}\n"+
-			"public class Test {\n" +
-			"    public static void main(String[] args) throws Exception {\n" +
-			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
-			"        <lexerName> lex = new <lexerName>(input);\n" +
-			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
-			"        <createParser>\n"+
-			"        parser.<parserStartRuleName>();\n" +
-			"    }\n" +
-			"}"
-			);
-		ST createParserST =
-			new ST(
-			"        Profiler2 profiler = new Profiler2();\n"+
-			"        <parserName> parser = new <parserName>(tokens,profiler);\n" +
-			"        profiler.setParser(parser);\n");
-		if ( !debug ) {
-			createParserST =
-				new ST(
-				"        <parserName> parser = new <parserName>(tokens);\n");
-		}
-		outputFileST.add("createParser", createParserST);
-		outputFileST.add("parserName", parserName);
-		outputFileST.add("lexerName", lexerName);
-		outputFileST.add("parserStartRuleName", parserStartRuleName);
-		writeFile(tmpdir, "Test.java", outputFileST.render());
-	}
-
-	protected void writeLexerTestFile(String lexerName, boolean debug) {
-		ST outputFileST = new ST(
-			"import org.antlr.runtime.*;\n" +
-			"import org.antlr.runtime.tree.*;\n" +
-			"import org.antlr.runtime.debug.*;\n" +
-			"\n" +
-			"class Profiler2 extends Profiler {\n" +
-			"    public void terminate() { ; }\n" +
-			"}\n"+
-			"public class Test {\n" +
-			"    public static void main(String[] args) throws Exception {\n" +
-			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
-			"        <lexerName> lex = new <lexerName>(input);\n" +
-			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
-			"        System.out.println(tokens);\n" +
-			"    }\n" +
-			"}"
-			);
-		outputFileST.add("lexerName", lexerName);
-		writeFile(tmpdir, "Test.java", outputFileST.render());
-	}
-
-	protected void writeTreeTestFile(String parserName,
-									 String treeParserName,
-									 String lexerName,
-									 String parserStartRuleName,
-									 String treeParserStartRuleName,
-									 boolean debug)
-	{
-		ST outputFileST = new ST(
-			"import org.antlr.runtime.*;\n" +
-			"import org.antlr.runtime.tree.*;\n" +
-			"import org.antlr.runtime.debug.*;\n" +
-			"\n" +
-			"class Profiler2 extends Profiler {\n" +
-			"    public void terminate() { ; }\n" +
-			"}\n"+
-			"public class Test {\n" +
-			"    public static void main(String[] args) throws Exception {\n" +
-			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
-			"        <lexerName> lex = new <lexerName>(input);\n" +
-			"        TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" +
-			"        <createParser>\n"+
-			"        <parserName>.<parserStartRuleName>_return r = parser.<parserStartRuleName>();\n" +
-			"        <if(!treeParserStartRuleName)>\n" +
-			"        if ( r.tree!=null ) {\n" +
-			"            System.out.println(((Tree)r.tree).toStringTree());\n" +
-			"            ((CommonTree)r.tree).sanityCheckParentAndChildIndexes();\n" +
-			"		 }\n" +
-			"        <else>\n" +
-			"        CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" +
-			"        nodes.setTokenStream(tokens);\n" +
-			"        <treeParserName> walker = new <treeParserName>(nodes);\n" +
-			"        walker.<treeParserStartRuleName>();\n" +
-			"        <endif>\n" +
-			"    }\n" +
-			"}"
-			);
-		ST createParserST =
-			new ST(
-			"        Profiler2 profiler = new Profiler2();\n"+
-			"        <parserName> parser = new <parserName>(tokens,profiler);\n" +
-			"        profiler.setParser(parser);\n");
-		if ( !debug ) {
-			createParserST =
-				new ST(
-				"        <parserName> parser = new <parserName>(tokens);\n");
-		}
-		outputFileST.add("createParser", createParserST);
-		outputFileST.add("parserName", parserName);
-		outputFileST.add("treeParserName", treeParserName);
-		outputFileST.add("lexerName", lexerName);
-		outputFileST.add("parserStartRuleName", parserStartRuleName);
-		outputFileST.add("treeParserStartRuleName", treeParserStartRuleName);
-		writeFile(tmpdir, "Test.java", outputFileST.render());
-	}
-
-	/** Parser creates trees and so does the tree parser */
-	protected void writeTreeAndTreeTestFile(String parserName,
-											String treeParserName,
-											String lexerName,
-											String parserStartRuleName,
-											String treeParserStartRuleName,
-											boolean debug)
-	{
-		ST outputFileST = new ST(
-			"import org.antlr.runtime.*;\n" +
-			"import org.antlr.runtime.tree.*;\n" +
-			"import org.antlr.runtime.debug.*;\n" +
-			"\n" +
-			"class Profiler2 extends Profiler {\n" +
-			"    public void terminate() { ; }\n" +
-			"}\n"+
-			"public class Test {\n" +
-			"    public static void main(String[] args) throws Exception {\n" +
-			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
-			"        <lexerName> lex = new <lexerName>(input);\n" +
-			"        TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" +
-			"        <createParser>\n"+
-			"        <parserName>.<parserStartRuleName>_return r = parser.<parserStartRuleName>();\n" +
-			"        ((CommonTree)r.tree).sanityCheckParentAndChildIndexes();\n" +
-			"        CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" +
-			"        nodes.setTokenStream(tokens);\n" +
-			"        <treeParserName> walker = new <treeParserName>(nodes);\n" +
-			"        <treeParserName>.<treeParserStartRuleName>_return r2 = walker.<treeParserStartRuleName>();\n" +
-			"		 CommonTree rt = ((CommonTree)r2.tree);\n" +
-			"		 if ( rt!=null ) System.out.println(((CommonTree)r2.tree).toStringTree());\n" +
-			"    }\n" +
-			"}"
-			);
-		ST createParserST =
-			new ST(
-			"        Profiler2 profiler = new Profiler2();\n"+
-			"        <parserName> parser = new <parserName>(tokens,profiler);\n" +
-			"        profiler.setParser(parser);\n");
-		if ( !debug ) {
-			createParserST =
-				new ST(
-				"        <parserName> parser = new <parserName>(tokens);\n");
-		}
-		outputFileST.add("createParser", createParserST);
-		outputFileST.add("parserName", parserName);
-		outputFileST.add("treeParserName", treeParserName);
-		outputFileST.add("lexerName", lexerName);
-		outputFileST.add("parserStartRuleName", parserStartRuleName);
-		outputFileST.add("treeParserStartRuleName", treeParserStartRuleName);
-		writeFile(tmpdir, "Test.java", outputFileST.render());
-	}
-
-	protected void writeTemplateTestFile(String parserName,
-										 String lexerName,
-										 String parserStartRuleName,
-										 boolean debug)
-	{
-		ST outputFileST = new ST(
-			"import org.antlr.runtime.*;\n" +
-			"import org.antlr.stringtemplate.*;\n" +
-			"import org.antlr.stringtemplate.language.*;\n" +
-			"import org.antlr.runtime.debug.*;\n" +
-			"import java.io.*;\n" +
-			"\n" +
-			"class Profiler2 extends Profiler {\n" +
-			"    public void terminate() { ; }\n" +
-			"}\n"+
-			"public class Test {\n" +
-			"    static String templates = \"group T; foo(x,y) ::= \\\"\\<x> \\<y>\\\"\";\n" +
-			"    static StringTemplateGroup group ="+
-			"    		new StringTemplateGroup(new StringReader(templates)," +
-			"					AngleBracketTemplateLexer.class);"+
-			"    public static void main(String[] args) throws Exception {\n" +
-			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
-			"        <lexerName> lex = new <lexerName>(input);\n" +
-			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
-			"        <createParser>\n"+
-			"		 parser.setTemplateLib(group);\n"+
-			"        <parserName>.<parserStartRuleName>_return r = parser.<parserStartRuleName>();\n" +
-			"        if ( r.st!=null )\n" +
-			"            System.out.print(r.st.toString());\n" +
-			"	 	 else\n" +
-			"            System.out.print(\"\");\n" +
-			"    }\n" +
-			"}"
-			);
-		ST createParserST =
-			new ST(
-			"        Profiler2 profiler = new Profiler2();\n"+
-			"        <parserName> parser = new <parserName>(tokens,profiler);\n" +
-			"        profiler.setParser(parser);\n");
-		if ( !debug ) {
-			createParserST =
-				new ST(
-				"        <parserName> parser = new <parserName>(tokens);\n");
-		}
-		outputFileST.add("createParser", createParserST);
-		outputFileST.add("parserName", parserName);
-		outputFileST.add("lexerName", lexerName);
-		outputFileST.add("parserStartRuleName", parserStartRuleName);
-		writeFile(tmpdir, "Test.java", outputFileST.render());
-	}
-
-    protected void eraseFiles(final String filesEndingWith) {
-        File tmpdirF = new File(tmpdir);
-        String[] files = tmpdirF.list();
-        for(int i = 0; files!=null && i < files.length; i++) {
-            if ( files[i].endsWith(filesEndingWith) ) {
-                new File(tmpdir+"/"+files[i]).delete();
-            }
-        }
-    }
-
-    protected void eraseFiles() {
-        File tmpdirF = new File(tmpdir);
-        String[] files = tmpdirF.list();
-        for(int i = 0; files!=null && i < files.length; i++) {
-            new File(tmpdir+"/"+files[i]).delete();
-        }
-    }
-
-    protected void eraseTempDir() {
-        File tmpdirF = new File(tmpdir);
-        if ( tmpdirF.exists() ) {
-            eraseFiles();
-            tmpdirF.delete();
-        }
-    }
-
-	public String getFirstLineOfException() {
-		if ( this.stderrDuringParse ==null ) {
-			return null;
-		}
-		String[] lines = this.stderrDuringParse.split("\n");
-		String prefix="Exception in thread \"main\" ";
-		return lines[0].substring(prefix.length(),lines[0].length());
-	}
-
-	public List realElements(List elements) {
-		List n = new ArrayList();
-		for (int i = Label.NUM_FAUX_LABELS+Label.MIN_TOKEN_TYPE - 1; i < elements.size(); i++) {
-			Object o = (Object) elements.get(i);
-			if ( o!=null ) {
-				n.add(o);
-			}
-		}
-		return n;
-	}
-
-	public List<String> realElements(Map<String, Integer> elements) {
-		List n = new ArrayList();
-		Iterator iterator = elements.keySet().iterator();
-		while (iterator.hasNext()) {
-			String tokenID = (String) iterator.next();
-			if ( elements.get(tokenID) >= Label.MIN_TOKEN_TYPE ) {
-				n.add(tokenID+"="+elements.get(tokenID));
-			}
-		}
-		Collections.sort(n);
-		return n;
-	}
-
-    public String sortLinesInString(String s) {
-        String lines[] = s.split("\n");
-        Arrays.sort(lines);
-        List<String> linesL = Arrays.asList(lines);
-        StringBuffer buf = new StringBuffer();
-        for (String l : linesL) {
-            buf.append(l);
-            buf.append('\n');
-        }
-        return buf.toString();
-    }
-
-    /**
-     * When looking at a result set that consists of a Map/HashTable
-     * we cannot rely on the output order, as the hashing algorithm or other aspects
-     * of the implementation may be different on differnt JDKs or platforms. Hence
-     * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a
-     * bit of a hack, but guarantees that we get the same order on all systems. We assume that
-     * the keys are strings.
-     *
-     * @param m The Map that contains keys we wish to return in sorted order
-     * @return A string that represents all the keys in sorted order.
-     */
-    public String sortMapToString(Map m) {
-
-        System.out.println("Map toString looks like: " + m.toString());
-        // Pass in crap, and get nothing back
-        //
-        if  (m == null) {
-            return null;
-        }
-
-        // Sort the keys in the Map
-        //
-        TreeMap nset = new TreeMap(m);
-
-        System.out.println("Tree map looks like: " + nset.toString());
-        return nset.toString();
-    }
-
-    // override to track errors
-
-    public void assertEquals(String msg, Object a, Object b) { try {Assert.assertEquals(msg,a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
-    public void assertEquals(Object a, Object b) { try {Assert.assertEquals(a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
-    public void assertEquals(String msg, long a, long b) { try {Assert.assertEquals(msg,a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
-    public void assertEquals(long a, long b) { try {Assert.assertEquals(a,b);} catch (Error e) {lastTestFailed=true; throw e;} }
-
-    public void assertTrue(String msg, boolean b) { try {Assert.assertTrue(msg,b);} catch (Error e) {lastTestFailed=true; throw e;} }
-    public void assertTrue(boolean b) { try {Assert.assertTrue(b);} catch (Error e) {lastTestFailed=true; throw e;} }
-
-    public void assertFalse(String msg, boolean b) { try {Assert.assertFalse(msg,b);} catch (Error e) {lastTestFailed=true; throw e;} }
-    public void assertFalse(boolean b) { try {Assert.assertFalse(b);} catch (Error e) {lastTestFailed=true; throw e;} }
-
-    public void assertNotNull(String msg, Object p) { try {Assert.assertNotNull(msg, p);} catch (Error e) {lastTestFailed=true; throw e;} }
-    public void assertNotNull(Object p) { try {Assert.assertNotNull(p);} catch (Error e) {lastTestFailed=true; throw e;} }
-
-    public void assertNull(String msg, Object p) { try {Assert.assertNull(msg, p);} catch (Error e) {lastTestFailed=true; throw e;} }
-    public void assertNull(Object p) { try {Assert.assertNull(p);} catch (Error e) {lastTestFailed=true; throw e;} }
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/ErrorQueue.java b/antlr-3.4/tool/src/test/java/org/antlr/test/ErrorQueue.java
deleted file mode 100644
index 90c7b65..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/ErrorQueue.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.tool.ANTLRErrorListener;
-import org.antlr.tool.Message;
-import org.antlr.tool.ToolMessage;
-
-import java.util.LinkedList;
-import java.util.List;
-
-public class ErrorQueue implements ANTLRErrorListener {
-	List infos = new LinkedList();
-	List errors = new LinkedList();
-	List warnings = new LinkedList();
-
-	public void info(String msg) {
-		infos.add(msg);
-	}
-
-	public void error(Message msg) {
-		errors.add(msg);
-	}
-
-	public void warning(Message msg) {
-		warnings.add(msg);
-	}
-
-	public void error(ToolMessage msg) {
-		errors.add(msg);
-	}
-
-	public int size() {
-		return infos.size() + errors.size() + warnings.size();
-	}
-
-	public String toString() {
-		return "infos: "+infos+
-			"errors: "+errors+
-			"warnings: "+warnings;
-	}
-}
-
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestASTConstruction.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestASTConstruction.java
deleted file mode 100644
index 88df12a..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestASTConstruction.java
+++ /dev/null
@@ -1,372 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.tool.Grammar;
-import org.junit.Test;
-
-public class TestASTConstruction extends BaseTest {
-
-    /** Public default constructor used by TestRig */
-    public TestASTConstruction() {
-    }
-
-	@Test public void testA() throws Exception {
-		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : A;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT A <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testEmptyAlt() throws Exception {
-		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : ;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT epsilon <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testNakeRulePlusInLexer() throws Exception {
-		Grammar g = new Grammar(
-				"lexer grammar P;\n"+
-				"A : B+;\n" +
-				"B : 'a';");
-		String expecting =
-			"(rule A ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT B <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("A").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRulePlus() throws Exception {
-		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : (b)+;\n" +
-				"b : B;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testNakedRulePlus() throws Exception {
-		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : b+;\n" +
-				"b : B;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRuleOptional() throws Exception {
-		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : (b)?;\n" +
-				"b : B;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (? (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testNakedRuleOptional() throws Exception {
-		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : b?;\n" +
-				"b : B;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (? (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRuleStar() throws Exception {
-		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : (b)*;\n" +
-				"b : B;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testNakedRuleStar() throws Exception {
-		Grammar g = new Grammar(
-				"parser grammar P;\n"+
-				"a : b*;\n" +
-				"b : B;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testCharStar() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : 'a'*;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT 'a' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testCharStarInLexer() throws Exception {
-		Grammar g = new Grammar(
-				"lexer grammar P;\n"+
-				"B : 'b'*;");
-		String expecting =
-			"(rule B ARG RET scope (BLOCK (ALT (* (BLOCK (ALT 'b' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("B").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testStringStar() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : 'while'*;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT 'while' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testStringStarInLexer() throws Exception {
-		Grammar g = new Grammar(
-				"lexer grammar P;\n"+
-				"B : 'while'*;");
-		String expecting =
-			"(rule B ARG RET scope (BLOCK (ALT (* (BLOCK (ALT 'while' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("B").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testCharPlus() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : 'a'+;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT 'a' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testCharPlusInLexer() throws Exception {
-		Grammar g = new Grammar(
-				"lexer grammar P;\n"+
-				"B : 'b'+;");
-		String expecting =
-			"(rule B ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT 'b' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("B").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testCharOptional() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : 'a'?;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (? (BLOCK (ALT 'a' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testCharOptionalInLexer() throws Exception {
-		Grammar g = new Grammar(
-				"lexer grammar P;\n"+
-				"B : 'b'?;");
-		String expecting =
-			"(rule B ARG RET scope (BLOCK (ALT (? (BLOCK (ALT 'b' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("B").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testCharRangePlus() throws Exception {
-		Grammar g = new Grammar(
-				"lexer grammar P;\n"+
-				"ID : 'a'..'z'+;");
-		String expecting =
-			"(rule ID ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT (.. 'a' 'z') <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("ID").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testLabel() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : x=ID;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (= x ID) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testLabelOfOptional() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : x=ID?;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (? (BLOCK (ALT (= x ID) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testLabelOfClosure() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : x=ID*;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT (= x ID) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRuleLabel() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : x=b;\n" +
-				"b : ID;\n");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (= x b) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testSetLabel() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : x=(A|B);\n");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (= x (BLOCK (ALT A <end-of-alt>) (ALT B <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testNotSetLabel() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : x=~(A|B);\n");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (= x (~ (BLOCK (ALT A <end-of-alt>) (ALT B <end-of-alt>) <end-of-block>))) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testNotSetListLabel() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : x+=~(A|B);\n");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (+= x (~ (BLOCK (ALT A <end-of-alt>) (ALT B <end-of-alt>) <end-of-block>))) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testNotSetListLabelInLoop() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : x+=~(A|B)+;\n");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT (+= x (~ (BLOCK (ALT A <end-of-alt>) (ALT B <end-of-alt>) <end-of-block>))) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRuleLabelOfPositiveClosure() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : x=b+;\n" +
-				"b : ID;\n");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT (= x b) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testListLabelOfClosure() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : x+=ID*;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT (+= x ID) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testListLabelOfClosure2() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n"+
-				"a : x+='int'*;");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT (+= x 'int') <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRuleListLabelOfPositiveClosure() throws Exception {
-		Grammar g = new Grammar(
-				"grammar P;\n" +
-				"options {output=AST;}\n"+
-				"a : x+=b+;\n" +
-				"b : ID;\n");
-		String expecting =
-			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT (+= x b) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("a").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRootTokenInStarLoop() throws Exception {
-		Grammar g = new Grammar(
-				"grammar Expr;\n" +
-				"options { output=AST; backtrack=true; }\n" +
-				"a : ('*'^)* ;\n");  // bug: the synpred had nothing in it
-		String expecting =
-			"(rule synpred1_Expr ARG RET scope (BLOCK (ALT '*' <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("synpred1_Expr").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testActionInStarLoop() throws Exception {
-		Grammar g = new Grammar(
-				"grammar Expr;\n" +
-				"options { backtrack=true; }\n" +
-				"a : ({blort} 'x')* ;\n");  // bug: the synpred had nothing in it
-		String expecting =
-			"(rule synpred1_Expr ARG RET scope (BLOCK (ALT blort 'x' <end-of-alt>) <end-of-block>) <end-of-rule>)";
-		String found = g.getRule("synpred1_Expr").tree.toStringTree();
-		assertEquals(expecting, found);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestAttributes.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestAttributes.java
deleted file mode 100644
index 5a6ffb6..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestAttributes.java
+++ /dev/null
@@ -1,3118 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.Tool;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.grammar.v3.ActionTranslator;
-import org.antlr.runtime.CommonToken;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STGroup;
-import org.antlr.tool.*;
-import org.junit.Test;
-
-import java.io.StringReader;
-import java.util.ArrayList;
-import java.util.List;
-
-/** Check the $x, $x.y attributes.  For checking the actual
- *  translation, assume the Java target.  This is still a great test
- *  for the semantics of the $x.y stuff regardless of the target.
- */
-public class TestAttributes extends BaseTest {
-
-	/** Public default constructor used by TestRig */
-	public TestAttributes() {
-	}
-
-	@Test public void testEscapedLessThanInAction() throws Exception {
-		Grammar g = new Grammar();
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		String action = "i<3; '<xmltag>'";
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),0);
-		String expecting = action;
-		String rawTranslation =
-			translator.translate();
-		STGroup templates =
-			new STGroup();
-		ST actionST = new ST(templates, "<action>");
-		actionST.add("action", rawTranslation);
-		String found = actionST.render();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testEscaped$InAction() throws Exception {
-		String action = "int \\$n; \"\\$in string\\$\"";
-		String expecting = "int $n; \"$in string$\"";
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"@members {"+action+"}\n"+
-				"a[User u, int i]\n" +
-				"        : {"+action+"}\n" +
-				"        ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"a",
-				new CommonToken(ANTLRParser.ACTION,action),0);
-		String found = translator.translate();		assertEquals(expecting, found);
-	}
-
-	@Test public void testArguments() throws Exception {
-		String action = "$i; $i.x; $u; $u.x";
-		String expecting = "i; i.x; u; u.x";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a[User u, int i]\n" +
-				"        : {"+action+"}\n" +
-				"        ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testComplicatedArgParsing() throws Exception {
-		String action = "x, (*a).foo(21,33), 3.2+1, '\\n', "+
-			"\"a,oo\\nick\", {bl, \"fdkj\"eck}";
-		String expecting = "x, (*a).foo(21,33), 3.2+1, '\\n', \"a,oo\\nick\", {bl, \"fdkj\"eck}";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		// now check in actual grammar.
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a[User u, int i]\n" +
-				"        : A a["+action+"] B\n" +
-				"        ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =	translator.translate();
-		assertEquals(expecting, rawTranslation);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testBracketArgParsing() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		// now check in actual grammar.
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a[String[\\] ick, int i]\n" +
-				"        : A \n"+
-				"        ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		Rule r = g.getRule("a");
-		AttributeScope parameters = r.parameterScope;
-		List<Attribute> attrs = parameters.getAttributes();
-		assertEquals("attribute mismatch","String[] ick",attrs.get(0).decl.toString());
-		assertEquals("parameter name mismatch","ick",attrs.get(0).name);
-		assertEquals("declarator mismatch", "String[]", attrs.get(0).type);
-
-		assertEquals("attribute mismatch","int i",attrs.get(1).decl.toString());
-		assertEquals("parameter name mismatch","i",attrs.get(1).name);
-		assertEquals("declarator mismatch", "int", attrs.get(1).type);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testStringArgParsing() throws Exception {
-		String action = "34, '{', \"it's<\", '\"', \"\\\"\", 19";
-		String expecting = "34, '{', \"it's<\", '\"', \"\\\"\", 19";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		// now check in actual grammar.
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a[User u, int i]\n" +
-				"        : A a["+action+"] B\n" +
-				"        ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =	translator.translate();
-		assertEquals(expecting, rawTranslation);
-
-		List<String> expectArgs = new ArrayList<String>() {
-			{add("34");}
-			{add("'{'");}
-			{add("\"it's<\"");}
-			{add("'\"'");}
-			{add("\"\\\"\"");} // that's "\""
-			{add("19");}
-		};
-		List<String> actualArgs = CodeGenerator.getListOfArgumentsFromAction(action, ',');
-		assertEquals("args mismatch", expectArgs, actualArgs);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testComplicatedSingleArgParsing() throws Exception {
-		String action = "(*a).foo(21,33,\",\")";
-		String expecting = "(*a).foo(21,33,\",\")";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		// now check in actual grammar.
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a[User u, int i]\n" +
-				"        : A a["+action+"] B\n" +
-				"        ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =	translator.translate();
-		assertEquals(expecting, rawTranslation);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testArgWithLT() throws Exception {
-		String action = "34<50";
-		String expecting = "34<50";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		// now check in actual grammar.
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a[boolean b]\n" +
-				"        : A a["+action+"] B\n" +
-				"        ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-		assertEquals(expecting, rawTranslation);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testGenericsAsArgumentDefinition() throws Exception {
-		String action = "$foo.get(\"ick\");";
-		String expecting = "foo.get(\"ick\");";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String grammar =
-			"parser grammar T;\n"+
-				"a[HashMap<String,String> foo]\n" +
-				"        : {"+action+"}\n" +
-				"        ;";
-		Grammar g = new Grammar(grammar);
-		Rule ra = g.getRule("a");
-		List<Attribute> attrs = ra.parameterScope.getAttributes();
-		assertEquals("attribute mismatch","HashMap<String,String> foo",attrs.get(0).decl.toString());
-		assertEquals("parameter name mismatch","foo",attrs.get(0).name);
-		assertEquals("declarator mismatch", "HashMap<String,String>", attrs.get(0).type);
-
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testGenericsAsArgumentDefinition2() throws Exception {
-		String action = "$foo.get(\"ick\"); x=3;";
-		String expecting = "foo.get(\"ick\"); x=3;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String grammar =
-			"parser grammar T;\n"+
-				"a[HashMap<String,String> foo, int x, List<String> duh]\n" +
-				"        : {"+action+"}\n" +
-				"        ;";
-		Grammar g = new Grammar(grammar);
-		Rule ra = g.getRule("a");
-		List<Attribute> attrs = ra.parameterScope.getAttributes();
-
-		assertEquals("attribute mismatch","HashMap<String,String> foo",attrs.get(0).decl.toString().trim());
-		assertEquals("parameter name mismatch","foo",attrs.get(0).name);
-		assertEquals("declarator mismatch", "HashMap<String,String>", attrs.get(0).type);
-
-		assertEquals("attribute mismatch","int x",attrs.get(1).decl.toString().trim());
-		assertEquals("parameter name mismatch","x",attrs.get(1).name);
-		assertEquals("declarator mismatch", "int", attrs.get(1).type);
-
-		assertEquals("attribute mismatch","List<String> duh",attrs.get(2).decl.toString().trim());
-		assertEquals("parameter name mismatch","duh",attrs.get(2).name);
-		assertEquals("declarator mismatch", "List<String>", attrs.get(2).type);
-
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testGenericsAsReturnValue() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String grammar =
-			"parser grammar T;\n"+
-				"a returns [HashMap<String,String> foo] : ;\n";
-		Grammar g = new Grammar(grammar);
-		Rule ra = g.getRule("a");
-		List<Attribute> attrs = ra.returnScope.getAttributes();
-		assertEquals("attribute mismatch","HashMap<String,String> foo",attrs.get(0).decl.toString());
-		assertEquals("parameter name mismatch","foo",attrs.get(0).name);
-		assertEquals("declarator mismatch", "HashMap<String,String>", attrs.get(0).type);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testComplicatedArgParsingWithTranslation() throws Exception {
-		String action = "x, $A.text+\"3242\", (*$A).foo(21,33), 3.2+1, '\\n', "+
-			"\"a,oo\\nick\", {bl, \"fdkj\"eck}";
-		String expecting = "x, (A1!=null?A1.getText():null)+\"3242\", (*A1).foo(21,33), 3.2+1, '\\n', \"a,oo\\nick\", {bl, \"fdkj\"eck}";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		// now check in actual grammar.
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a[User u, int i]\n" +
-				"        : A a["+action+"] B\n" +
-				"        ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	/** $x.start refs are checked during translation not before so ANTLR misses
-	 the fact that rule r has refs to predefined attributes if the ref is after
-	 the def of the method or self-referential.  Actually would be ok if I didn't
-	 convert actions to strings; keep as templates.
-	 June 9, 2006: made action translation leave templates not strings
-	 */
-	@Test public void testRefToReturnValueBeforeRefToPredefinedAttr() throws Exception {
-		String action = "$x.foo";
-		String expecting = "(x!=null?x.foo:0)";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a : x=b {"+action+"} ;\n" +
-				"b returns [int foo] : B {$b.start} ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRuleLabelBeforeRefToPredefinedAttr() throws Exception {
-		// As of Mar 2007, I'm removing unused labels.  Unfortunately,
-		// the action is not seen until code gen.  Can't see $x.text
-		// before stripping unused labels.  We really need to translate
-		// actions first so code gen logic can use info.
-		String action = "$x.text";
-		String expecting = "(x!=null?input.toString(x.start,x.stop):null)";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a : x=b {###"+action+"!!!} ;\n" +
-				"b : B ;\n");
-		Tool antlr = newTool();
-
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // codegen phase sets some vars we need
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testInvalidArguments() throws Exception {
-		String action = "$x";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a[User u, int i]\n" +
-				"        : {"+action+"}\n" +
-				"        ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator = new ActionTranslator(generator,
-			"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		int expectedMsgID = ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE;
-		Object expectedArg = "x";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testReturnValue() throws Exception {
-		String action = "$x.i";
-		String expecting = "x";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a returns [int i]\n" +
-				"        : 'a'\n" +
-				"        ;\n" +
-				"b : x=a {"+action+"} ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"b",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found =	translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testActionNotMovedToSynPred() throws Exception {
-		String action = "$b = true;";
-		String expecting = "retval.b = true;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-			"options {output=AST;}\n" + // push b into retval struct
-			"a returns [boolean b]\n" +
-			"options {backtrack=true;}\n" +
-			"   : 'a' {"+action+"}\n" +
-			"   | 'a'\n" +
-			"   ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"a",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found =	translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testReturnValueWithNumber() throws Exception {
-		String action = "$x.i1";
-		String expecting = "x";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a returns [int i1]\n" +
-				"        : 'a'\n" +
-				"        ;\n" +
-				"b : x=a {"+action+"} ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"b",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testReturnValues() throws Exception {
-		String action = "$i; $i.x; $u; $u.x";
-		String expecting = "retval.i; retval.i.x; retval.u; retval.u.x";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a returns [User u, int i]\n" +
-				"        : {"+action+"}\n" +
-				"        ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	/* regression test for ANTLR-46 */
-	@Test public void testReturnWithMultipleRuleRefs() throws Exception {
-		String action1 = "$obj = $rule2.obj;";
-		String action2 = "$obj = $rule3.obj;";
-		String expecting1 = "obj = rule21;";
-		String expecting2 = "obj = rule32;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-				"rule1 returns [ Object obj ]\n" +
-				":	rule2 { "+action1+" }\n" +
-				"|	rule3 { "+action2+" }\n" +
-				";\n"+
-				"rule2 returns [ Object obj ]\n"+
-				":	foo='foo' { $obj = $foo.text; }\n"+
-				";\n"+
-				"rule3 returns [ Object obj ]\n"+
-				":	bar='bar' { $obj = $bar.text; }\n"+
-				";");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		int i = 0;
-		String action = action1;
-		String expecting = expecting1;
-		do {
-			ActionTranslator translator = new ActionTranslator(generator,"rule1",
-				new CommonToken(ANTLRParser.ACTION,action),i+1);
-			String found = translator.translate();
-			assertEquals(expecting, found);
-			action = action2;
-			expecting = expecting2;
-		} while (i++ < 1);
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testInvalidReturnValues() throws Exception {
-		String action = "$x";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a returns [User u, int i]\n" +
-				"        : {"+action+"}\n" +
-				"        ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		int expectedMsgID = ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE;
-		Object expectedArg = "x";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testTokenLabels() throws Exception {
-		String action = "$id; $f; $id.text; $id.getText(); $id.dork " +
-			"$id.type; $id.line; $id.pos; " +
-			"$id.channel; $id.index;";
-		String expecting = "id; f; (id!=null?id.getText():null); id.getText(); id.dork (id!=null?id.getType():0); (id!=null?id.getLine():0); (id!=null?id.getCharPositionInLine():0); (id!=null?id.getChannel():0); (id!=null?id.getTokenIndex():0);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a : id=ID f=FLOAT {"+action+"}\n" +
-				"  ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRuleLabels() throws Exception {
-		String action = "$r.x; $r.start;\n $r.stop;\n $r.tree; $a.x; $a.stop;";
-		String expecting = "(r!=null?r.x:0); (r!=null?((Token)r.start):null);" + newline +
-			"             (r!=null?((Token)r.stop):null);" + newline +
-			"             (r!=null?((Object)r.tree):null); (r!=null?r.x:0); (r!=null?((Token)r.stop):null);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a returns [int x]\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a {###"+action+"!!!}\n" +
-				"  ;");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // codegen phase sets some vars we need
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testAmbiguRuleRef() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a : A a {$a.text} | B ;");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		// error(132): <string>:2:9: reference $a is ambiguous; rule a is enclosing rule and referenced in the production
-		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
-	}
-
-	@Test public void testRuleLabelsWithSpecialToken() throws Exception {
-		String action = "$r.x; $r.start; $r.stop; $r.tree; $a.x; $a.stop;";
-		String expecting = "(r!=null?r.x:0); (r!=null?((MYTOKEN)r.start):null); (r!=null?((MYTOKEN)r.stop):null); (r!=null?((Object)r.tree):null); (r!=null?r.x:0); (r!=null?((MYTOKEN)r.stop):null);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"options {TokenLabelType=MYTOKEN;}\n"+
-				"a returns [int x]\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a {###"+action+"!!!}\n" +
-				"  ;");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // codegen phase sets some vars we need
-
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testForwardRefRuleLabels() throws Exception {
-		String action = "$r.x; $r.start; $r.stop; $r.tree; $a.x; $a.tree;";
-		String expecting = "(r!=null?r.x:0); (r!=null?((Token)r.start):null); (r!=null?((Token)r.stop):null); (r!=null?((Object)r.tree):null); (r!=null?r.x:0); (r!=null?((Object)r.tree):null);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"b : r=a {###"+action+"!!!}\n" +
-				"  ;\n" +
-				"a returns [int x]\n" +
-				"  : ;\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // codegen phase sets some vars we need
-
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testInvalidRuleLabelAccessesParameter() throws Exception {
-		String action = "$r.z";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a[int z] returns [int x]\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a[3] {"+action+"}\n" +
-				"  ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator = new ActionTranslator(generator, "b",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		int expectedMsgID = ErrorManager.MSG_INVALID_RULE_PARAMETER_REF;
-		Object expectedArg = "a";
-		Object expectedArg2 = "z";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testInvalidRuleLabelAccessesScopeAttribute() throws Exception {
-		String action = "$r.n";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a\n" +
-				"scope { int n; }\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a[3] {"+action+"}\n" +
-				"  ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator = new ActionTranslator(generator, "b",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		int expectedMsgID = ErrorManager.MSG_INVALID_RULE_SCOPE_ATTRIBUTE_REF;
-		Object expectedArg = "a";
-		Object expectedArg2 = "n";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testInvalidRuleAttribute() throws Exception {
-		String action = "$r.blort";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a[int z] returns [int x]\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a[3] {"+action+"}\n" +
-				"  ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator = new ActionTranslator(generator, "b",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		int expectedMsgID = ErrorManager.MSG_UNKNOWN_RULE_ATTRIBUTE;
-		Object expectedArg = "a";
-		Object expectedArg2 = "blort";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testMissingRuleAttribute() throws Exception {
-		String action = "$r";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a[int z] returns [int x]\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a[3] {"+action+"}\n" +
-				"  ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator = new ActionTranslator(generator, "b",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-
-		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
-		Object expectedArg = "r";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testMissingUnlabeledRuleAttribute() throws Exception {
-		String action = "$a";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a returns [int x]:\n" +
-				"  ;\n"+
-				"b : a {"+action+"}\n" +
-				"  ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator = new ActionTranslator(generator, "b",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-
-		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
-		Object expectedArg = "a";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testNonDynamicAttributeOutsideRule() throws Exception {
-		String action = "public void foo() { $x; }";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"@members {'+action+'}\n" +
-				"a : ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator = new ActionTranslator(generator,
-			null,
-			new CommonToken(ANTLRParser.ACTION,action),0);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE;
-		Object expectedArg = "x";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testNonDynamicAttributeOutsideRule2() throws Exception {
-		String action = "public void foo() { $x.y; }";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"@members {'+action+'}\n" +
-				"a : ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator = new ActionTranslator(generator,
-			null,
-			new CommonToken(ANTLRParser.ACTION,action),0);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE;
-		Object expectedArg = "x";
-		Object expectedArg2 = "y";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	// D Y N A M I C A L L Y  S C O P E D  A T T R I B U T E S
-
-	@Test public void testBasicGlobalScope() throws Exception {
-		String action = "$Symbols::names.add($id.text);";
-		String expecting = "((Symbols_scope)Symbols_stack.peek()).names.add((id!=null?id.getText():null));";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"  List names;\n" +
-				"}\n" +
-				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
-				"  ;\n" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testUnknownGlobalScope() throws Exception {
-		String action = "$Symbols::names.add($id.text);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
-				"  ;\n" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-
-		assertEquals("unexpected errors: "+equeue, 2, equeue.errors.size());
-
-		int expectedMsgID = ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE;
-		Object expectedArg = "Symbols";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testIndexedGlobalScope() throws Exception {
-		String action = "$Symbols[-1]::names.add($id.text);";
-		String expecting =
-			"((Symbols_scope)Symbols_stack.elementAt(Symbols_stack.size()-1-1)).names.add((id!=null?id.getText():null));";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"  List names;\n" +
-				"}\n" +
-				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
-				"  ;\n" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void test0IndexedGlobalScope() throws Exception {
-		String action = "$Symbols[0]::names.add($id.text);";
-		String expecting =
-			"((Symbols_scope)Symbols_stack.elementAt(0)).names.add((id!=null?id.getText():null));";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"  List names;\n" +
-				"}\n" +
-				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
-				"  ;\n" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testAbsoluteIndexedGlobalScope() throws Exception {
-		String action = "$Symbols[3]::names.add($id.text);";
-		String expecting =
-			"((Symbols_scope)Symbols_stack.elementAt(3)).names.add((id!=null?id.getText():null));";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"  List names;\n" +
-				"}\n" +
-				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
-				"  ;\n" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testScopeAndAttributeWithUnderscore() throws Exception {
-		String action = "$foo_bar::a_b;";
-		String expecting = "((foo_bar_scope)foo_bar_stack.peek()).a_b;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"scope foo_bar {\n" +
-				"  int a_b;\n" +
-				"}\n" +
-				"a scope foo_bar; : (ID {"+action+"} )+\n" +
-				"  ;\n" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testSharedGlobalScope() throws Exception {
-		String action = "$Symbols::x;";
-		String expecting = "((Symbols_scope)Symbols_stack.peek()).x;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  String x;\n" +
-				"}\n" +
-				"a\n"+
-				"scope { int y; }\n"+
-				"scope Symbols;\n" +
-				" : b {"+action+"}\n" +
-				" ;\n" +
-				"b : ID {$Symbols::x=$ID.text} ;\n" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testGlobalScopeOutsideRule() throws Exception {
-		String action = "public void foo() {$Symbols::names.add('foo');}";
-		String expecting = "public void foo() {((Symbols_scope)Symbols_stack.peek()).names.add('foo');}";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"  List names;\n" +
-				"}\n" +
-				"@members {'+action+'}\n" +
-				"a : \n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRuleScopeOutsideRule() throws Exception {
-		String action = "public void foo() {$a::name;}";
-		String expecting = "public void foo() {((a_scope)a_stack.peek()).name;}";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"@members {"+action+"}\n" +
-				"a\n" +
-				"scope { String name; }\n" +
-				"  : {foo();}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,
-			null,
-			new CommonToken(ANTLRParser.ACTION,action),0);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testBasicRuleScope() throws Exception {
-		String action = "$a::n;";
-		String expecting = "((a_scope)a_stack.peek()).n;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testUnqualifiedRuleScopeAccessInsideRule() throws Exception {
-		String action = "$n;";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-
-		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_ATTRIBUTE;
-		Object expectedArg = "n";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg,
-				expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testIsolatedDynamicRuleScopeRef() throws Exception {
-		String action = "$a;"; // refers to stack not top of stack
-		String expecting = "a_stack;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : b ;\n" +
-				"b : {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator, "b",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testDynamicRuleScopeRefInSubrule() throws Exception {
-		String action = "$a::n;";
-		String expecting = "((a_scope)a_stack.peek()).n;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  float n;\n" +
-				"} : b ;\n" +
-				"b : {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator, "b",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testIsolatedGlobalScopeRef() throws Exception {
-		String action = "$Symbols;";
-		String expecting = "Symbols_stack;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  String x;\n" +
-				"}\n" +
-				"a\n"+
-				"scope { int y; }\n"+
-				"scope Symbols;\n" +
-				" : b {"+action+"}\n" +
-				" ;\n" +
-				"b : ID {$Symbols::x=$ID.text} ;\n" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRuleScopeFromAnotherRule() throws Exception {
-		String action = "$a::n;"; // must be qualified
-		String expecting = "((a_scope)a_stack.peek()).n;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  boolean n;\n" +
-				"} : b\n" +
-				"  ;\n" +
-				"b : {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator, "b",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testFullyQualifiedRefToCurrentRuleParameter() throws Exception {
-		String action = "$a.i;";
-		String expecting = "i;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a[int i]: {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testFullyQualifiedRefToCurrentRuleRetVal() throws Exception {
-		String action = "$a.i;";
-		String expecting = "retval.i;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a returns [int i, int j]: {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testSetFullyQualifiedRefToCurrentRuleRetVal() throws Exception {
-		String action = "$a.i = 1;";
-		String expecting = "retval.i = 1;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a returns [int i, int j]: {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testIsolatedRefToCurrentRule() throws Exception {
-		String action = "$a;";
-		String expecting = "";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : 'a' {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-
-		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
-		Object expectedArg = "a";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg,
-				expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testIsolatedRefToRule() throws Exception {
-		String action = "$x;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : x=b {"+action+"}\n" +
-				"  ;\n" +
-				"b : 'b' ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-
-		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
-		Object expectedArg = "x";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
-	}
-
-	/*  I think these have to be errors $a.x makes no sense.
-	@Test public void testFullyQualifiedRefToLabelInCurrentRule() throws Exception {
-			String action = "$a.x;";
-			String expecting = "x;";
-
-			ErrorQueue equeue = new ErrorQueue();
-			ErrorManager.setErrorListener(equeue);
-			Grammar g = new Grammar(
-				"grammar t;\n"+
-					"a : x='a' {"+action+"}\n" +
-					"  ;\n");
-			Tool antlr = newTool();
-			CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-			g.setCodeGenerator(generator);
-			generator.genRecognizer(); // forces load of templates
-			ActionTranslator translator = new ActionTranslator(generator,"a",
-															   new CommonToken(ANTLRParser.ACTION,action),1);
-			String rawTranslation =
-				translator.translate();
-			STGroup templates =
-				new STGroup();
-			ST actionST = new ST(templates, rawTranslation);
-			String found = actionST.render();
-			assertEquals(expecting, found);
-
-			assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-		}
-
-	@Test public void testFullyQualifiedRefToListLabelInCurrentRule() throws Exception {
-		String action = "$a.x;"; // must be qualified
-		String expecting = "list_x;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : x+='a' {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-														   new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-*/
-	@Test public void testFullyQualifiedRefToTemplateAttributeInCurrentRule() throws Exception {
-		String action = "$a.st;"; // can be qualified
-		String expecting = "retval.st;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-				"options {output=template;}\n"+
-				"a : (A->{$A.text}) {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRuleRefWhenRuleHasScope() throws Exception {
-		String action = "$b.start;";
-		String expecting = "(b1!=null?((Token)b1.start):null);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-				"a : b {###"+action+"!!!} ;\n" +
-				"b\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : 'b' \n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testDynamicScopeRefOkEvenThoughRuleRefExists() throws Exception {
-		String action = "$b::n;";
-		String expecting = "((b_scope)b_stack.peek()).n;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-				"s : b ;\n"+
-				"b\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : '(' b ')' {"+action+"}\n" + // refers to current invocation's n
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator, "b",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRefToTemplateAttributeForCurrentRule() throws Exception {
-		String action = "$st=null;";
-		String expecting = "retval.st =null;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-				"options {output=template;}\n"+
-				"a : {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRefToTextAttributeForCurrentRule() throws Exception {
-		String action = "$text";
-		String expecting = "input.toString(retval.start,input.LT(-1))";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-				"options {output=template;}\n"+
-				"a : {###"+action+"!!!}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // codegen phase sets some vars we need
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRefToStartAttributeForCurrentRule() throws Exception {
-		String action = "$start;";
-		String expecting = "((Token)retval.start);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-				"a : {###"+action+"!!!}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testTokenLabelFromMultipleAlts() throws Exception {
-		String action = "$ID.text;"; // must be qualified
-		String action2 = "$INT.text;"; // must be qualified
-		String expecting = "(ID1!=null?ID1.getText():null);";
-		String expecting2 = "(INT2!=null?INT2.getText():null);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : ID {"+action+"}\n" +
-				"  | INT {"+action2+"}\n" +
-				"  ;\n" +
-				"ID : 'a';\n" +
-				"INT : '0';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-		translator = new ActionTranslator(generator,
-			"a",
-			new CommonToken(ANTLRParser.ACTION,action2),2);
-		found = translator.translate();
-		assertEquals(expecting2, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRuleLabelFromMultipleAlts() throws Exception {
-		String action = "$b.text;"; // must be qualified
-		String action2 = "$c.text;"; // must be qualified
-		String expecting = "(b1!=null?input.toString(b1.start,b1.stop):null);";
-		String expecting2 = "(c2!=null?input.toString(c2.start,c2.stop):null);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : b {###"+action+"!!!}\n" +
-				"  | c {^^^"+action2+"&&&}\n" +
-				"  ;\n" +
-				"b : 'a';\n" +
-				"c : '0';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // codegen phase sets some vars we need
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-		found = code.substring(code.indexOf("^^^")+3,code.indexOf("&&&"));
-		assertEquals(expecting2, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testUnknownDynamicAttribute() throws Exception {
-		String action = "$a::x";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : {"+action+"}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"a",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		int expectedMsgID = ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE;
-		Object expectedArg = "a";
-		Object expectedArg2 = "x";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testUnknownGlobalDynamicAttribute() throws Exception {
-		String action = "$Symbols::x";
-		String expecting = action;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"scope Symbols {\n" +
-				"  int n;\n" +
-				"}\n" +
-				"a : {'+action+'}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"a",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		int expectedMsgID = ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE;
-		Object expectedArg = "Symbols";
-		Object expectedArg2 = "x";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testUnqualifiedRuleScopeAttribute() throws Exception {
-		String action = "$n;"; // must be qualified
-		String expecting = "$n;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : b\n" +
-				"  ;\n" +
-				"b : {'+action+'}\n" +
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"b",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();		assertEquals(expecting, found);
-
-		int expectedMsgID = ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE;
-		Object expectedArg = "n";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testRuleAndTokenLabelTypeMismatch() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : id='foo' id=b\n" +
-				"  ;\n" +
-				"b : ;\n");
-		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
-		Object expectedArg = "id";
-		Object expectedArg2 = "rule!=token";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testListAndTokenLabelTypeMismatch() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : ids+='a' ids='b'\n" +
-				"  ;\n" +
-				"b : ;\n");
-		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
-		Object expectedArg = "ids";
-		Object expectedArg2 = "token!=token-list";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testListAndRuleLabelTypeMismatch() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-				"options {output=AST;}\n"+
-				"a : bs+=b bs=b\n" +
-				"  ;\n" +
-				"b : 'b';\n");
-		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
-		Object expectedArg = "bs";
-		Object expectedArg2 = "rule!=rule-list";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testArgReturnValueMismatch() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a[int i] returns [int x, int i]\n" +
-				"  : \n" +
-				"  ;\n" +
-				"b : ;\n");
-		int expectedMsgID = ErrorManager.MSG_ARG_RETVAL_CONFLICT;
-		Object expectedArg = "i";
-		Object expectedArg2 = "a";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testSimplePlusEqualLabel() throws Exception {
-		String action = "$ids.size();"; // must be qualified
-		String expecting = "list_ids.size();";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"a : ids+=ID ( COMMA ids+=ID {"+action+"})* ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"a",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testPlusEqualStringLabel() throws Exception {
-		String action = "$ids.size();"; // must be qualified
-		String expecting = "list_ids.size();";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : ids+='if' ( ',' ids+=ID {"+action+"})* ;" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"a",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testPlusEqualSetLabel() throws Exception {
-		String action = "$ids.size();"; // must be qualified
-		String expecting = "list_ids.size();";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : ids+=('a'|'b') ( ',' ids+=ID {"+action+"})* ;" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"a",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testPlusEqualWildcardLabel() throws Exception {
-		String action = "$ids.size();"; // must be qualified
-		String expecting = "list_ids.size();";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : ids+=. ( ',' ids+=ID {"+action+"})* ;" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"a",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testImplicitTokenLabel() throws Exception {
-		String action = "$ID; $ID.text; $ID.getText()";
-		String expecting = "ID1; (ID1!=null?ID1.getText():null); ID1.getText()";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : ID {"+action+"} ;" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"a",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testImplicitRuleLabel() throws Exception {
-		String action = "$r.start;";
-		String expecting = "(r1!=null?((Token)r1.start):null);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : r {###"+action+"!!!} ;" +
-				"r : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testReuseExistingLabelWithImplicitRuleLabel() throws Exception {
-		String action = "$r.start;";
-		String expecting = "(x!=null?((Token)x.start):null);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : x=r {###"+action+"!!!} ;" +
-				"r : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testReuseExistingListLabelWithImplicitRuleLabel() throws Exception {
-		String action = "$r.start;";
-		String expecting = "(x!=null?((Token)x.start):null);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"options {output=AST;}\n" +
-				"a : x+=r {###"+action+"!!!} ;" +
-				"r : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testReuseExistingLabelWithImplicitTokenLabel() throws Exception {
-		String action = "$ID.text;";
-		String expecting = "(x!=null?x.getText():null);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : x=ID {"+action+"} ;" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testReuseExistingListLabelWithImplicitTokenLabel() throws Exception {
-		String action = "$ID.text;";
-		String expecting = "(x!=null?x.getText():null);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : x+=ID {"+action+"} ;" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRuleLabelWithoutOutputOption() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar T;\n"+
-				"s : x+=a ;" +
-				"a : 'a';\n"+
-				"b : 'b';\n"+
-				"WS : ' '|'\n';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT;
-		Object expectedArg = "x";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testRuleLabelOnTwoDifferentRulesAST() throws Exception {
-		String grammar =
-			"grammar T;\n"+
-				"options {output=AST;}\n"+
-				"s : x+=a x+=b {System.out.println($x);} ;" +
-				"a : 'a';\n"+
-				"b : 'b';\n"+
-				"WS : (' '|'\\n') {skip();};\n";
-		String expecting = "[a, b]\na b\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-			"s", "a b", false);
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRuleLabelOnTwoDifferentRulesTemplate() throws Exception {
-		String grammar =
-			"grammar T;\n"+
-				"options {output=template;}\n"+
-				"s : x+=a x+=b {System.out.println($x);} ;" +
-				"a : 'a' -> {%{\"hi\"}} ;\n"+
-				"b : 'b' -> {%{\"mom\"}} ;\n"+
-				"WS : (' '|'\\n') {skip();};\n";
-		String expecting = "[hi, mom]\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-			"s", "a b", false);
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testMissingArgs() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : r ;" +
-				"r[int i] : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_MISSING_RULE_ARGS;
-		Object expectedArg = "r";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testArgsWhenNoneDefined() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : r[32,34] ;" +
-				"r : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_RULE_HAS_NO_ARGS;
-		Object expectedArg = "r";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testReturnInitValue() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : r ;\n" +
-				"r returns [int x=0] : 'a' {$x = 4;} ;\n");
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-
-		Rule r = g.getRule("r");
-		AttributeScope retScope = r.returnScope;
-		List parameters = retScope.getAttributes();
-		assertNotNull("missing return action", parameters);
-		assertEquals(1, parameters.size());
-		String found = parameters.get(0).toString();
-		String expecting = "int x=0";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testMultipleReturnInitValue() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : r ;\n" +
-				"r returns [int x=0, int y, String s=new String(\"foo\")] : 'a' {$x = 4;} ;\n");
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-
-		Rule r = g.getRule("r");
-		AttributeScope retScope = r.returnScope;
-		List parameters = retScope.getAttributes();
-		assertNotNull("missing return action", parameters);
-		assertEquals(3, parameters.size());
-		assertEquals("int x=0", parameters.get(0).toString());
-		assertEquals("int y", parameters.get(1).toString());
-		assertEquals("String s=new String(\"foo\")", parameters.get(2).toString());
-	}
-
-	@Test public void testCStyleReturnInitValue() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : r ;\n" +
-				"r returns [int (*x)()=NULL] : 'a' ;\n");
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-
-		Rule r = g.getRule("r");
-		AttributeScope retScope = r.returnScope;
-		List parameters = retScope.getAttributes();
-		assertNotNull("missing return action", parameters);
-		assertEquals(1, parameters.size());
-		String found = parameters.get(0).toString();
-		String expecting = "int (*)() x=NULL";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testArgsWithInitValues() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : r[32,34] ;" +
-				"r[int x, int y=3] : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_ARG_INIT_VALUES_ILLEGAL;
-		Object expectedArg = "y";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testArgsOnToken() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : ID[32,34] ;" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_ARGS_ON_TOKEN_REF;
-		Object expectedArg = "ID";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testArgsOnTokenInLexer() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"R : 'z' ID[32,34] ;" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_RULE_HAS_NO_ARGS;
-		Object expectedArg = "ID";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testLabelOnRuleRefInLexer() throws Exception {
-		String action = "$i.text";
-		String expecting = "(i!=null?i.getText():null)";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"R : 'z' i=ID {"+action+"};" +
-				"fragment ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"R",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRefToRuleRefInLexer() throws Exception {
-		String action = "$ID.text";
-		String expecting = "(ID1!=null?ID1.getText():null)";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"R : 'z' ID {"+action+"};" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"R",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testRefToRuleRefInLexerNoAttribute() throws Exception {
-		String action = "$ID";
-		String expecting = "ID1";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"R : 'z' ID {"+action+"};" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"R",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testCharLabelInLexer() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"R : x='z' ;\n");
-
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testCharListLabelInLexer() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"R : x+='z' ;\n");
-
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testWildcardCharLabelInLexer() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"R : x=. ;\n");
-
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testWildcardCharListLabelInLexer() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"R : x+=. ;\n");
-
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testMissingArgsInLexer() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"A : R ;" +
-				"R[int i] : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_MISSING_RULE_ARGS;
-		Object expectedArg = "R";
-		Object expectedArg2 = null;
-		// getting a second error @1:12, probably from nextToken
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testLexerRulePropertyRefs() throws Exception {
-		String action = "$text $type $line $pos $channel $index $start $stop";
-		String expecting = "getText() _type state.tokenStartLine state.tokenStartCharPositionInLine _channel -1 state.tokenStartCharIndex (getCharIndex()-1)";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"R : 'r' {"+action+"};\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"R",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testLexerLabelRefs() throws Exception {
-		String action = "$a $b.text $c $d.text";
-		String expecting = "a (b!=null?b.getText():null) c (d!=null?d.getText():null)";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"R : a='c' b='hi' c=. d=DUH {"+action+"};\n" +
-				"DUH : 'd' ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"R",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testSettingLexerRulePropertyRefs() throws Exception {
-		String action = "$text $type=1 $line=1 $pos=1 $channel=1 $index";
-		String expecting = "getText() _type=1 state.tokenStartLine=1 state.tokenStartCharPositionInLine=1 _channel=1 -1";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-				"R : 'r' {"+action+"};\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"R",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testArgsOnTokenInLexerRuleOfCombined() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : R;\n" +
-				"R : 'z' ID[32] ;\n" +
-				"ID : 'a';\n");
-
-		String lexerGrammarStr = g.getLexerGrammar();
-		StringReader sr = new StringReader(lexerGrammarStr);
-		Grammar lexerGrammar = new Grammar();
-		lexerGrammar.setFileName("<internally-generated-lexer>");
-		lexerGrammar.importTokenVocabulary(g);
-		lexerGrammar.parseAndBuildAST(sr);
-		lexerGrammar.defineGrammarSymbols();
-		lexerGrammar.checkNameSpaceAndActions();
-		sr.close();
-
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, lexerGrammar, "Java");
-		lexerGrammar.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_RULE_HAS_NO_ARGS;
-		Object expectedArg = "ID";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, lexerGrammar, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testMissingArgsOnTokenInLexerRuleOfCombined() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : R;\n" +
-				"R : 'z' ID ;\n" +
-				"ID[int i] : 'a';\n");
-
-		String lexerGrammarStr = g.getLexerGrammar();
-		StringReader sr = new StringReader(lexerGrammarStr);
-		Grammar lexerGrammar = new Grammar();
-		lexerGrammar.setFileName("<internally-generated-lexer>");
-		lexerGrammar.importTokenVocabulary(g);
-		lexerGrammar.parseAndBuildAST(sr);
-		lexerGrammar.defineGrammarSymbols();
-		lexerGrammar.checkNameSpaceAndActions();
-		sr.close();
-
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, lexerGrammar, "Java");
-		lexerGrammar.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_MISSING_RULE_ARGS;
-		Object expectedArg = "ID";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, lexerGrammar, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	// T R E E S
-
-	@Test public void testTokenLabelTreeProperty() throws Exception {
-		String action = "$id.tree;";
-		String expecting = "id_tree;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : id=ID {"+action+"} ;\n" +
-				"ID : 'a';\n");
-
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-				"a",
-				new CommonToken(ANTLRParser.ACTION,action),1);
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testTokenRefTreeProperty() throws Exception {
-		String action = "$ID.tree;";
-		String expecting = "ID1_tree;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : ID {"+action+"} ;" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		ActionTranslator translator = new ActionTranslator(generator,"a",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testAmbiguousTokenRef() throws Exception {
-		String action = "$ID;";
-		String expecting = "";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : ID ID {"+action+"};" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_NONUNIQUE_REF;
-		Object expectedArg = "ID";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testAmbiguousTokenRefWithProp() throws Exception {
-		String action = "$ID.text;";
-		String expecting = "";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-				"a : ID ID {"+action+"};" +
-				"ID : 'a';\n");
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_NONUNIQUE_REF;
-		Object expectedArg = "ID";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testRuleRefWithDynamicScope() throws Exception {
-		String action = "$field::x = $field.st;";
-		String expecting = "((field_scope)field_stack.peek()).x = retval.st;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar a;\n" +
-				"field\n" +
-				"scope { ST x; }\n" +
-				"    :   'y' {"+action+"}\n" +
-				"    ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,
-			"field",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testAssignToOwnRulenameAttr() throws Exception {
-		String action = "$rule.tree = null;";
-		String expecting = "retval.tree = null;";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar a;\n" +
-				"rule\n" +
-				"    : 'y' {" + action +"}\n" +
-				"    ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,
-			"rule",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testAssignToOwnParamAttr() throws Exception {
-		String action = "$rule.i = 42; $i = 23;";
-		String expecting = "i = 42; i = 23;";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar a;\n" +
-				"rule[int i]\n" +
-				"    : 'y' {" + action +"}\n" +
-				"    ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,
-			"rule",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testIllegalAssignToOwnRulenameAttr() throws Exception {
-		String action = "$rule.stop = 0;";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar a;\n" +
-				"rule\n" +
-				"    : 'y' {" + action +"}\n" +
-				"    ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,
-			"rule",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-
-		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
-		Object expectedArg = "rule";
-		Object expectedArg2 = "stop";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testIllegalAssignToLocalAttr() throws Exception {
-		String action = "$tree = null; $st = null; $start = 0; $stop = 0; $text = 0;";
-		String expecting = "retval.tree = null; retval.st = null;   ";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar a;\n" +
-				"rule\n" +
-				"    : 'y' {" + action +"}\n" +
-				"    ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,
-			"rule",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-
-		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
-		ArrayList expectedErrors = new ArrayList(3);
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, "start", "");
-		expectedErrors.add(expectedMessage);
-		GrammarSemanticsMessage expectedMessage2 =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, "stop", "");
-		expectedErrors.add(expectedMessage2);
-		GrammarSemanticsMessage expectedMessage3 =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, "text", "");
-		expectedErrors.add(expectedMessage3);
-		checkErrors(equeue, expectedErrors);
-
-		STGroup templates =
-			new STGroup();
-		ST actionST = new ST(templates, rawTranslation);
-		String found = actionST.render();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testIllegalAssignRuleRefAttr() throws Exception {
-		String action = "$other.tree = null;";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar a;\n" +
-				"options { output = AST;}" +
-				"otherrule\n" +
-				"    : 'y' ;" +
-				"rule\n" +
-				"    : other=otherrule {" + action +"}\n" +
-				"    ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,
-			"rule",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-
-		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
-		Object expectedArg = "other";
-		Object expectedArg2 = "tree";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testIllegalAssignTokenRefAttr() throws Exception {
-		String action = "$ID.text = \"test\";";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar a;\n" +
-				"ID\n" +
-				"    : 'y' ;" +
-				"rule\n" +
-				"    : ID {" + action +"}\n" +
-				"    ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,
-			"rule",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-
-		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
-		Object expectedArg = "ID";
-		Object expectedArg2 = "text";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testAssignToTreeNodeAttribute() throws Exception {
-		String action = "$tree.scope = localScope;";
-		String expecting = "((Object)retval.tree).scope = localScope;";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar a;\n" +
-				"options { output=AST; }" +
-				"rule\n" +
-				"@init {\n" +
-				"   Scope localScope=null;\n" +
-				"}\n" +
-				"@after {\n" +
-				"   ###$tree.scope = localScope;!!!\n" +
-				"}\n" +
-				"   : 'a' -> ^('a')\n" +
-				";");
-		Tool antlr = newTool();
-
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // codegen phase sets some vars we need
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-	}
-
-	@Test public void testDoNotTranslateAttributeCompare() throws Exception {
-		String action = "$a.line == $b.line";
-		String expecting = "(a!=null?a.getLine():0) == (b!=null?b.getLine():0)";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"lexer grammar a;\n" +
-				"RULE:\n" +
-				"     a=ID b=ID {" + action + "}" +
-				"    ;\n" +
-				"ID : 'id';"
-		);
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-		ActionTranslator translator = new ActionTranslator(generator,
-			"RULE",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testDoNotTranslateScopeAttributeCompare() throws Exception {
-		String action = "if ($rule::foo == \"foo\" || 1) { System.out.println(\"ouch\"); }";
-		String expecting = "if (((rule_scope)rule_stack.peek()).foo == \"foo\" || 1) { System.out.println(\"ouch\"); }";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar a;\n" +
-				"rule\n" +
-				"scope {\n" +
-				"   String foo;" +
-				"} :\n" +
-				"     twoIDs" +
-				"    ;\n" +
-				"twoIDs:\n" +
-				"    ID ID {" + action + "}\n" +
-				"    ;\n" +
-				"ID : 'id';"
-		);
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-		ActionTranslator translator = new ActionTranslator(generator,
-			"twoIDs",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-		// check that we didn't use scopeSetAttributeRef int translation!
-		boolean foundScopeSetAttributeRef = false;
-		for (int i = 0; i < translator.chunks.size(); i++) {
-			Object chunk = translator.chunks.get(i);
-			if (chunk instanceof ST) {
-				if (((ST)chunk).getName().equals("/scopeSetAttributeRef")) {
-					foundScopeSetAttributeRef = true;
-				}
-			}
-		}
-		assertFalse("action translator used scopeSetAttributeRef template in comparison!", foundScopeSetAttributeRef);
-		STGroup templates =
-			new STGroup();
-		ST actionST = new ST(templates, rawTranslation);
-		String found = actionST.render();
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testTreeRuleStopAttributeIsInvalid() throws Exception {
-		String action = "$r.x; $r.start; $r.stop";
-		String expecting = "(r!=null?r.x:0); (r!=null?((CommonTree)r.start):null); $r.stop";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"tree grammar t;\n" +
-				"options {ASTLabelType=CommonTree;}\n"+
-				"a returns [int x]\n" +
-				"  :\n" +
-				"  ;\n"+
-				"b : r=a {###"+action+"!!!}\n" +
-				"  ;");
-		System.out.println(g.toString());
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // codegen phase sets some vars we need
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-		int expectedMsgID = ErrorManager.MSG_UNKNOWN_RULE_ATTRIBUTE;
-		Object expectedArg = "a";
-		Object expectedArg2 = "stop";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		System.out.println("equeue:"+equeue);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testRefToTextAttributeForCurrentTreeRule() throws Exception {
-		String action = "$text";
-		String expecting = "input.getTokenStream().toString(" +
-			"input.getTreeAdaptor().getTokenStartIndex(retval.start)," +
-			"input.getTreeAdaptor().getTokenStopIndex(retval.start))";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"tree grammar t;\n" +
-				"options {ASTLabelType=CommonTree;}\n" +
-				"a : {###"+action+"!!!}\n" +
-				"  ;\n");
-
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // codegen phase sets some vars we need
-		ST codeST = generator.getRecognizerST();
-		String code = codeST.render();
-		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testTypeOfGuardedAttributeRefIsCorrect() throws Exception {
-		String action = "int x = $b::n;";
-		String expecting = "int x = ((b_scope)b_stack.peek()).n;";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-				"s : b ;\n"+
-				"b\n" +
-				"scope {\n" +
-				"  int n;\n" +
-				"} : '(' b ')' {"+action+"}\n" + // refers to current invocation's n
-				"  ;\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator, "b",
-			new CommonToken(ANTLRParser.ACTION,action),1);
-		String found = translator.translate();
-		assertEquals(expecting, found);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	// S U P P O R T
-
-	protected void checkError(ErrorQueue equeue,
-							  GrammarSemanticsMessage expectedMessage)
-		throws Exception
-	{
-		/*
-		System.out.println(equeue.infos);
-		System.out.println(equeue.warnings);
-		System.out.println(equeue.errors);
-		*/
-		Message foundMsg = null;
-		for (int i = 0; i < equeue.errors.size(); i++) {
-			Message m = (Message)equeue.errors.get(i);
-			if (m.msgID==expectedMessage.msgID ) {
-				foundMsg = m;
-			}
-		}
-		assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size() > 0);
-		assertNotNull("couldn't find expected error: "+expectedMessage.msgID+" in "+equeue, foundMsg);
-		assertTrue("error is not a GrammarSemanticsMessage",
-			foundMsg instanceof GrammarSemanticsMessage);
-		assertEquals(expectedMessage.arg, foundMsg.arg);
-		assertEquals(expectedMessage.arg2, foundMsg.arg2);
-	}
-
-	/** Allow checking for multiple errors in one test */
-	protected void checkErrors(ErrorQueue equeue,
-							   ArrayList expectedMessages)
-		throws Exception
-	{
-		ArrayList messageExpected = new ArrayList(equeue.errors.size());
-		for (int i = 0; i < equeue.errors.size(); i++) {
-			Message m = (Message)equeue.errors.get(i);
-			boolean foundMsg = false;
-			for (int j = 0; j < expectedMessages.size(); j++) {
-				Message em = (Message)expectedMessages.get(j);
-				if (m.msgID==em.msgID && m.arg.equals(em.arg) && m.arg2.equals(em.arg2)) {
-					foundMsg = true;
-				}
-			}
-			if (foundMsg) {
-				messageExpected.add(i, Boolean.TRUE);
-			} else
-				messageExpected.add(i, Boolean.FALSE);
-		}
-		for (int i = 0; i < equeue.errors.size(); i++) {
-			assertTrue("unexpected error:" + equeue.errors.get(i), ((Boolean)messageExpected.get(i)).booleanValue());
-		}
-	}
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestAutoAST.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestAutoAST.java
deleted file mode 100644
index b6c7ede..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestAutoAST.java
+++ /dev/null
@@ -1,822 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.junit.Ignore;
-import org.junit.Test;
-
-public class TestAutoAST extends BaseTest {
-	protected boolean debug = false;
-
-	@Test public void testTokenList() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "abc 34", debug);
-		assertEquals("abc 34\n", found);
-	}
-
-	@Test public void testTokenListInSingleAltBlock() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : (ID INT) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "abc 34", debug);
-		assertEquals("abc 34\n", found);
-	}
-
-	@Test public void testSimpleRootAtOuterLevel() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : ID^ INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "abc 34", debug);
-		assertEquals("(abc 34)\n", found);
-	}
-
-	@Test public void testSimpleRootAtOuterLevelReverse() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : INT ID^ ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "34 abc", debug);
-		assertEquals("(abc 34)\n", found);
-	}
-
-	@Test public void testBang() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT! ID! INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34 dag 4532", debug);
-		assertEquals("abc 4532\n", found);
-	}
-
-	@Test public void testOptionalThenRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ( ID INT )? ID^ ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a 1 b", debug);
-		assertEquals("(b a 1)\n", found);
-	}
-
-	@Test public void testLabeledStringRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : v='void'^ ID ';' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "void foo;", debug);
-		assertEquals("(void foo ;)\n", found);
-	}
-
-	@Test public void testWildcard() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : v='void'^ . ';' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "void foo;", debug);
-		assertEquals("(void foo ;)\n", found);
-	}
-
-	@Test public void testWildcardRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : v='void' .^ ';' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "void foo;", debug);
-		assertEquals("(foo void ;)\n", found);
-	}
-
-	@Test public void testWildcardRootWithLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : v='void' x=.^ ';' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "void foo;", debug);
-		assertEquals("(foo void ;)\n", found);
-	}
-
-    @Test public void testWildcardRootWithListLabel() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : v='void' x=.^ ';' ;\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-        String found = execParser("T.g", grammar, "TParser", "TLexer",
-                                  "a", "void foo;", debug);
-        assertEquals("(foo void ;)\n", found);
-    }
-
-    @Test public void testWildcardBangWithListLabel() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : v='void' x=.! ';' ;\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-        String found = execParser("T.g", grammar, "TParser", "TLexer",
-                                  "a", "void foo;", debug);
-        assertEquals("void ;\n", found);
-    }
-
-	@Test public void testRootRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID^ INT^ ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a 34 c", debug);
-		assertEquals("(34 a c)\n", found);
-	}
-
-	@Test public void testRootRoot2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT^ ID^ ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a 34 c", debug);
-		assertEquals("(c (34 a))\n", found);
-	}
-
-	@Test public void testRootThenRootInLoop() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID^ (INT '*'^ ID)+ ;\n" +
-			"ID  : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a 34 * b 9 * c", debug);
-		assertEquals("(* (* (a 34) b 9) c)\n", found);
-	}
-
-	@Test public void testNestedSubrule() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'void' (({;}ID|INT) ID | 'null' ) ';' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "void a b;", debug);
-		assertEquals("void a b ;\n", found);
-	}
-
-	@Test public void testInvokeRule() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a  : type ID ;\n" +
-			"type : {;}'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "int a", debug);
-		assertEquals("int a\n", found);
-	}
-
-	@Test public void testInvokeRuleAsRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a  : type^ ID ;\n" +
-			"type : {;}'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "int a", debug);
-		assertEquals("(int a)\n", found);
-	}
-
-	@Test public void testInvokeRuleAsRootWithLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a  : x=type^ ID ;\n" +
-			"type : {;}'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "int a", debug);
-		assertEquals("(int a)\n", found);
-	}
-
-	@Test public void testInvokeRuleAsRootWithListLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a  : x+=type^ ID ;\n" +
-			"type : {;}'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "int a", debug);
-		assertEquals("(int a)\n", found);
-	}
-
-	@Test public void testRuleRootInLoop() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ('+'^ ID)* ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a+b+c+d", debug);
-		assertEquals("(+ (+ (+ a b) c) d)\n", found);
-	}
-
-	@Test public void testRuleInvocationRuleRootInLoop() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID (op^ ID)* ;\n" +
-			"op : {;}'+' | '-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a+b+c-d", debug);
-		assertEquals("(- (+ (+ a b) c) d)\n", found);
-	}
-
-	@Test public void testTailRecursion() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"s : a ;\n" +
-			"a : atom ('exp'^ a)? ;\n" +
-			"atom : INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "s", "3 exp 4 exp 5", debug);
-		assertEquals("(exp 3 (exp 4 5))\n", found);
-	}
-
-	@Test public void testSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID|INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc", debug);
-		assertEquals("abc\n", found);
-	}
-
-	@Test public void testSetRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ('+' | '-')^ ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "+abc", debug);
-		assertEquals("(+ abc)\n", found);
-	}
-
-	@Ignore
-    // TODO: FAILS until I rebuild the antlr.g in v3
-    //
-    public void testSetRootWithLabel() throws Exception {
-		
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : x=('+' | '-')^ ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "+abc", debug);
-		assertEquals("(+ abc)\n", found);
-	}
-
-	@Test public void testSetAsRuleRootInLoop() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID (('+'|'-')^ ID)* ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a+b-c", debug);
-		assertEquals("(- (+ a b) c)\n", found);
-	}
-
-	@Test public void testNotSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ~ID '+' INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "34+2", debug);
-		assertEquals("34 + 2\n", found);
-	}
-
-	@Test public void testNotSetWithLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : x=~ID '+' INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "34+2", debug);
-		assertEquals("34 + 2\n", found);
-	}
-
-	@Test public void testNotSetWithListLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : x=~ID '+' INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "34+2", debug);
-		assertEquals("34 + 2\n", found);
-	}
-
-	@Test public void testNotSetRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ~'+'^ INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "34 55", debug);
-		assertEquals("(34 55)\n", found);
-	}
-
-	@Test public void testNotSetRootWithLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ~'+'^ INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "34 55", debug);
-		assertEquals("(34 55)\n", found);
-	}
-
-	@Test public void testNotSetRootWithListLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ~'+'^ INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "34 55", debug);
-		assertEquals("(34 55)\n", found);
-	}
-
-	@Test public void testNotSetRuleRootInLoop() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : INT (~INT^ INT)* ;\n" +
-			"blort : '+' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "3+4+5", debug);
-		assertEquals("(+ (+ 3 4) 5)\n", found);
-	}
-
-	@Test public void testTokenLabelReuse() throws Exception {
-		// check for compilation problem due to multiple defines
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : id=ID id=ID {System.out.print(\"2nd id=\"+$id.text+';');} ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a b", debug);
-		assertEquals("2nd id=b;a b\n", found);
-	}
-
-	@Test public void testTokenLabelReuse2() throws Exception {
-		// check for compilation problem due to multiple defines
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : id=ID id=ID^ {System.out.print(\"2nd id=\"+$id.text+';');} ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a b", debug);
-		assertEquals("2nd id=b;(b a)\n", found);
-	}
-
-	@Test public void testTokenListLabelReuse() throws Exception {
-		// check for compilation problem due to multiple defines
-		// make sure ids has both ID tokens
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ids+=ID ids+=ID {System.out.print(\"id list=\"+$ids+';');} ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a b", debug);
-		String expecting = "id list=[[@0,0:0='a',<4>,1:0], [@2,2:2='b',<4>,1:2]];a b\n";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testTokenListLabelReuse2() throws Exception {
-		// check for compilation problem due to multiple defines
-		// make sure ids has both ID tokens
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ids+=ID^ ids+=ID {System.out.print(\"id list=\"+$ids+';');} ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a b", debug);
-		String expecting = "id list=[[@0,0:0='a',<4>,1:0], [@2,2:2='b',<4>,1:2]];(a b)\n";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testTokenListLabelRuleRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : id+=ID^ ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a", debug);
-		assertEquals("a\n", found);
-	}
-
-	@Test public void testTokenListLabelBang() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : id+=ID! ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a", debug);
-		assertEquals("", found);
-	}
-
-	@Test public void testRuleListLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : x+=b x+=b {" +
-			"Tree t=(Tree)$x.get(1);" +
-			"System.out.print(\"2nd x=\"+t.toStringTree()+';');} ;\n" +
-			"b : ID;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a b", debug);
-		assertEquals("2nd x=b;a b\n", found);
-	}
-
-	@Test public void testRuleListLabelRuleRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ( x+=b^ )+ {" +
-			"System.out.print(\"x=\"+((CommonTree)$x.get(1)).toStringTree()+';');} ;\n" +
-			"b : ID;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a b", debug);
-		assertEquals("x=(b a);(b a)\n", found);
-	}
-
-	@Test public void testRuleListLabelBang() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : x+=b! x+=b {" +
-			"System.out.print(\"1st x=\"+((CommonTree)$x.get(0)).toStringTree()+';');} ;\n" +
-			"b : ID;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a b", debug);
-		assertEquals("1st x=a;b\n", found);
-	}
-
-	@Test public void testComplicatedMelange() throws Exception {
-		// check for compilation problem
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : A b=B b=B c+=C c+=C D {String s = $D.text;} ;\n" +
-			"A : 'a' ;\n" +
-			"B : 'b' ;\n" +
-			"C : 'c' ;\n" +
-			"D : 'd' ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a b b c c d", debug);
-		assertEquals("a b b c c d\n", found);
-	}
-
-	@Test public void testReturnValueWithAST() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : ID b {System.out.println($b.i);} ;\n" +
-			"b returns [int i] : INT {$i=Integer.parseInt($INT.text);} ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "abc 34", debug);
-		assertEquals("34\nabc 34\n", found);
-	}
-
-	@Test public void testSetLoop() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options { output=AST; }\n" +
-			"r : (INT|ID)+ ; \n" +
-			"ID : 'a'..'z' + ;\n" +
-			"INT : '0'..'9' +;\n" +
-			"WS: (' ' | '\\n' | '\\t')+ {$channel = HIDDEN;};\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "r", "abc 34 d", debug);
-		assertEquals("abc 34 d\n", found);
-	}
-
-	@Test public void testExtraTokenInSimpleDecl() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"decl : type^ ID '='! INT ';'! ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "decl", "int 34 x=1;", debug);
-		assertEquals("line 1:4 extraneous input '34' expecting ID\n", this.stderrDuringParse);
-		assertEquals("(int x 1)\n", found); // tree gets correct x and 1 tokens
-	}
-
-	@Test public void testMissingIDInSimpleDecl() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"tokens {EXPR;}\n" +
-			"decl : type^ ID '='! INT ';'! ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "decl", "int =1;", debug);
-		assertEquals("line 1:4 missing ID at '='\n", this.stderrDuringParse);
-		assertEquals("(int <missing ID> 1)\n", found); // tree gets invented ID token
-	}
-
-	@Test public void testMissingSetInSimpleDecl() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"tokens {EXPR;}\n" +
-			"decl : type^ ID '='! INT ';'! ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "decl", "x=1;", debug);
-		assertEquals("line 1:0 mismatched input 'x' expecting set null\n", this.stderrDuringParse);
-		assertEquals("(<error: x> x 1)\n", found); // tree gets invented ID token
-	}
-
-	@Test public void testMissingTokenGivesErrorNode() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT ;\n" + // follow is EOF
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "abc", debug);
-		assertEquals("line 1:3 missing INT at '<EOF>'\n", this.stderrDuringParse);
-		assertEquals("abc <missing INT>\n", found);
-	}
-
-	@Test public void testMissingTokenGivesErrorNodeInInvokedRule() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : b ;\n" +
-			"b : ID INT ;\n" + // follow should see EOF
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "abc", debug);
-		assertEquals("line 1:3 mismatched input '<EOF>' expecting INT\n", this.stderrDuringParse);
-		assertEquals("<mismatched token: [@1,3:3='<EOF>',<-1>,1:3], resync=abc>\n", found);
-	}
-
-	@Test public void testExtraTokenGivesErrorNode() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : b c ;\n" +
-			"b : ID ;\n" +
-			"c : INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "abc ick 34", debug);
-		assertEquals("line 1:4 extraneous input 'ick' expecting INT\n", this.stderrDuringParse);
-		assertEquals("abc 34\n", found);
-	}
-
-	@Test public void testMissingFirstTokenGivesErrorNode() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "34", debug);
-		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
-		assertEquals("<missing ID> 34\n", found);
-	}
-
-	@Test public void testMissingFirstTokenGivesErrorNode2() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : b c ;\n" +
-			"b : ID ;\n" +
-			"c : INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "34", debug);
-		// finds an error at the first token, 34, and re-syncs.
-		// re-synchronizing does not consume a token because 34 follows
-		// ref to rule b (start of c). It then matches 34 in c.
-		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
-		assertEquals("<missing ID> 34\n", found);
-	}
-
-	@Test public void testNoViableAltGivesErrorNode() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : b | c ;\n" +
-			"b : ID ;\n" +
-			"c : INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"S : '*' ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "*", debug);
-		assertEquals("line 1:0 no viable alternative at input '*'\n", this.stderrDuringParse);
-		assertEquals("<unexpected: [@0,0:0='*',<6>,1:0], resync=*>\n", found);
-	}
-
-
-	// S U P P O R T
-
-	public void _test() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a :  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "abc 34", debug);
-		assertEquals("\n", found);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestBufferedTreeNodeStream.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestBufferedTreeNodeStream.java
deleted file mode 100644
index 834a737..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestBufferedTreeNodeStream.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.runtime.CommonToken;
-import org.antlr.runtime.tree.BufferedTreeNodeStream;
-import org.antlr.runtime.tree.CommonTree;
-import org.antlr.runtime.tree.Tree;
-import org.antlr.runtime.tree.TreeNodeStream;
-import org.junit.Test;
-
-public class TestBufferedTreeNodeStream extends TestTreeNodeStream {
-    // inherits tests; these methods make it use a new buffer
-
-	public TreeNodeStream newStream(Object t) {
-		return new BufferedTreeNodeStream(t);
-	}
-
-    public String toTokenTypeString(TreeNodeStream stream) {
-        return ((BufferedTreeNodeStream)stream).toTokenTypeString();
-    }
-
-    @Test public void testSeek() throws Exception {
-        // ^(101 ^(102 103 ^(106 107) ) 104 105)
-        // stream has 7 real + 6 nav nodes
-        // Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
-        Tree r0 = new CommonTree(new CommonToken(101));
-        Tree r1 = new CommonTree(new CommonToken(102));
-        r0.addChild(r1);
-        r1.addChild(new CommonTree(new CommonToken(103)));
-        Tree r2 = new CommonTree(new CommonToken(106));
-        r2.addChild(new CommonTree(new CommonToken(107)));
-        r1.addChild(r2);
-        r0.addChild(new CommonTree(new CommonToken(104)));
-        r0.addChild(new CommonTree(new CommonToken(105)));
-
-        TreeNodeStream stream = newStream(r0);
-        stream.consume(); // consume 101
-        stream.consume(); // consume DN
-        stream.consume(); // consume 102
-        stream.seek(7);   // seek to 107
-        assertEquals(107, ((Tree)stream.LT(1)).getType());
-        stream.consume(); // consume 107
-        stream.consume(); // consume UP
-        stream.consume(); // consume UP
-        assertEquals(104, ((Tree)stream.LT(1)).getType());
-    }    
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestCharDFAConversion.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestCharDFAConversion.java
deleted file mode 100644
index 15e1100..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestCharDFAConversion.java
+++ /dev/null
@@ -1,548 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.analysis.DFA;
-import org.antlr.analysis.DFAOptimizer;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.tool.*;
-import org.junit.Test;
-
-import java.util.List;
-
-public class TestCharDFAConversion extends BaseTest {
-
-	/** Public default constructor used by TestRig */
-	public TestCharDFAConversion() {
-	}
-
-	// R A N G E S  &  S E T S
-
-	@Test public void testSimpleRangeVersusChar() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a'..'z' '@' | 'k' '$' ;");
-		g.createLookaheadDFAs();
-		String expecting =
-			".s0-'k'->.s1\n" +
-			".s0-{'a'..'j', 'l'..'z'}->:s2=>1\n" +
-			".s1-'$'->:s3=>2\n" +
-			".s1-'@'->:s2=>1\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testRangeWithDisjointSet() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a'..'z' '@'\n" +
-			"  | ('k'|'9'|'p') '$'\n" +
-			"  ;\n");
-		g.createLookaheadDFAs();
-		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'}
-		String expecting =
-			".s0-'9'->:s3=>2\n" +
-			".s0-{'a'..'j', 'l'..'o', 'q'..'z'}->:s2=>1\n" +
-			".s0-{'k', 'p'}->.s1\n" +
-			".s1-'$'->:s3=>2\n" +
-			".s1-'@'->:s2=>1\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testDisjointSetCollidingWithTwoRanges() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : ('a'..'z'|'0'..'9') '@'\n" +
-			"  | ('k'|'9'|'p') '$'\n" +
-			"  ;\n");
-		g.createLookaheadDFAs(false);
-		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
-		// into 0..8
-		String expecting =
-			".s0-{'0'..'8', 'a'..'j', 'l'..'o', 'q'..'z'}->:s2=>1\n" +
-			".s0-{'9', 'k', 'p'}->.s1\n" +
-			".s1-'$'->:s3=>2\n" +
-			".s1-'@'->:s2=>1\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testDisjointSetCollidingWithTwoRangesCharsFirst() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : ('k'|'9'|'p') '$'\n" +
-			"  | ('a'..'z'|'0'..'9') '@'\n" +
-			"  ;\n");
-		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
-		// into 0..8
-		String expecting =
-			".s0-{'0'..'8', 'a'..'j', 'l'..'o', 'q'..'z'}->:s3=>2\n" +
-			".s0-{'9', 'k', 'p'}->.s1\n" +
-			".s1-'$'->:s2=>1\n" +
-			".s1-'@'->:s3=>2\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testDisjointSetCollidingWithTwoRangesAsSeparateAlts() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a'..'z' '@'\n" +
-			"  | 'k' '$'\n" +
-			"  | '9' '$'\n" +
-			"  | 'p' '$'\n" +
-			"  | '0'..'9' '@'\n" +
-			"  ;\n");
-		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
-		// into 0..8
-		String expecting =
-			".s0-'0'..'8'->:s8=>5\n" +
-			".s0-'9'->.s6\n" +
-			".s0-'k'->.s1\n" +
-			".s0-'p'->.s4\n" +
-			".s0-{'a'..'j', 'l'..'o', 'q'..'z'}->:s2=>1\n" +
-			".s1-'$'->:s3=>2\n" +
-			".s1-'@'->:s2=>1\n" +
-			".s4-'$'->:s5=>4\n" +
-			".s4-'@'->:s2=>1\n" +
-			".s6-'$'->:s7=>3\n" +
-			".s6-'@'->:s8=>5\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testKeywordVersusID() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"IF : 'if' ;\n" + // choose this over ID
-			"ID : ('a'..'z')+ ;\n");
-		String expecting =
-			".s0-'a'..'z'->:s2=>1\n" +
-			".s0-<EOT>->:s1=>2\n";
-		checkDecision(g, 1, expecting, null);
-		expecting =
-			".s0-'i'->.s1\n" +
-			".s0-{'a'..'h', 'j'..'z'}->:s4=>2\n" +
-			".s1-'f'->.s2\n" +
-			".s1-<EOT>->:s4=>2\n" +
-			".s2-'a'..'z'->:s4=>2\n" +
-			".s2-<EOT>->:s3=>1\n";
-		checkDecision(g, 2, expecting, null);
-	}
-
-	@Test public void testIdenticalRules() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a' ;\n" +
-			"B : 'a' ;\n"); // can't reach this
-		String expecting =
-			".s0-'a'->.s1\n" +
-			".s1-<EOT>->:s2=>1\n";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		checkDecision(g, 1, expecting, new int[] {2});
-
-		assertEquals("unexpected number of expected problems",
-				    1, equeue.size());
-		Message msg = (Message)equeue.errors.get(0);
-		assertTrue("warning must be an unreachable alt",
-				    msg instanceof GrammarUnreachableAltsMessage);
-		GrammarUnreachableAltsMessage u = (GrammarUnreachableAltsMessage)msg;
-		assertEquals("[2]", u.alts.toString());
-
-	}
-
-	@Test public void testAdjacentNotCharLoops() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : (~'r')+ ;\n" +
-			"B : (~'s')+ ;\n");
-		String expecting =
-			".s0-'r'->:s3=>2\n" +
-			".s0-'s'->:s2=>1\n" +
-			".s0-{'\\u0000'..'q', 't'..'\\uFFFF'}->.s1\n" +
-			".s1-'r'->:s3=>2\n" +
-			".s1-<EOT>->:s2=>1\n" +
-			".s1-{'\\u0000'..'q', 't'..'\\uFFFF'}->.s1\n";
-		checkDecision(g, 3, expecting, null);
-	}
-
-	@Test public void testNonAdjacentNotCharLoops() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : (~'r')+ ;\n" +
-			"B : (~'t')+ ;\n");
-		String expecting =
-			".s0-'r'->:s3=>2\n" +
-			".s0-'t'->:s2=>1\n" +
-			".s0-{'\\u0000'..'q', 's', 'u'..'\\uFFFF'}->.s1\n" +
-			".s1-'r'->:s3=>2\n" +
-			".s1-<EOT>->:s2=>1\n" +
-			".s1-{'\\u0000'..'q', 's', 'u'..'\\uFFFF'}->.s1\n";
-		checkDecision(g, 3, expecting, null);
-	}
-
-	@Test public void testLoopsWithOptimizedOutExitBranches() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'x'* ~'x'+ ;\n");
-		String expecting =
-			".s0-'x'->:s1=>1\n" +
-			".s0-{'\\u0000'..'w', 'y'..'\\uFFFF'}->:s2=>2\n";
-		checkDecision(g, 1, expecting, null);
-
-		// The optimizer yanks out all exit branches from EBNF blocks
-		// This is ok because we've already verified there are no problems
-		// with the enter/exit decision
-		DFAOptimizer optimizer = new DFAOptimizer(g);
-		optimizer.optimize();
-		FASerializer serializer = new FASerializer(g);
-		DFA dfa = g.getLookaheadDFA(1);
-		String result = serializer.serialize(dfa.startState);
-		expecting = ".s0-'x'->:s1=>1\n";
-		assertEquals(expecting, result);
-	}
-
-	// N O N G R E E D Y
-
-	@Test public void testNonGreedy() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"CMT : '/*' ( options {greedy=false;} : . )* '*/' ;");
-		String expecting =
-			".s0-'*'->.s1\n" +
-			".s0-{'\\u0000'..')', '+'..'\\uFFFF'}->:s3=>1\n" +
-			".s1-'/'->:s2=>2\n" +
-			".s1-{'\\u0000'..'.', '0'..'\\uFFFF'}->:s3=>1\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNonGreedyWildcardStar() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"SLCMT : '//' ( options {greedy=false;} : . )* '\n' ;");
-		String expecting =
-			".s0-'\\n'->:s1=>2\n" +
-			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNonGreedyByDefaultWildcardStar() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"SLCMT : '//' .* '\n' ;");
-		String expecting =
-			".s0-'\\n'->:s1=>2\n" +
-			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNonGreedyWildcardPlus() throws Exception {
-		// same DFA as nongreedy .* but code gen checks number of
-		// iterations at runtime
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"SLCMT : '//' ( options {greedy=false;} : . )+ '\n' ;");
-		String expecting =
-			".s0-'\\n'->:s1=>2\n" +
-			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNonGreedyByDefaultWildcardPlus() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"SLCMT : '//' .+ '\n' ;");
-		String expecting =
-			".s0-'\\n'->:s1=>2\n" +
-			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNonGreedyByDefaultWildcardPlusWithParens() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"SLCMT : '//' (.)+ '\n' ;");
-		String expecting =
-			".s0-'\\n'->:s1=>2\n" +
-			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNonWildcardNonGreedy() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"DUH : (options {greedy=false;}:'x'|'y')* 'xy' ;");
-		String expecting =
-			".s0-'x'->.s1\n" +
-			".s0-'y'->:s4=>2\n" +
-			".s1-'x'->:s3=>1\n" +
-			".s1-'y'->:s2=>3\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNonWildcardEOTMakesItWorkWithoutNonGreedyOption() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"DUH : ('x'|'y')* 'xy' ;");
-		String expecting =
-			".s0-'x'->.s1\n" +
-			".s0-'y'->:s4=>1\n" +
-			".s1-'x'->:s4=>1\n" +
-			".s1-'y'->.s2\n" +
-			".s2-'x'..'y'->:s4=>1\n" +
-			".s2-<EOT>->:s3=>2\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testAltConflictsWithLoopThenExit() throws Exception {
-		// \" predicts alt 1, but wildcard then " can predict exit also
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"STRING : '\"' (options {greedy=false;}: '\\\\\"' | .)* '\"' ;\n"
-		);
-		String expecting =
-			".s0-'\"'->:s1=>3\n" +
-				".s0-'\\\\'->.s2\n" +
-				".s0-{'\\u0000'..'!', '#'..'[', ']'..'\\uFFFF'}->:s4=>2\n" +
-				".s2-'\"'->:s3=>1\n" +
-				".s2-{'\\u0000'..'!', '#'..'\\uFFFF'}->:s4=>2\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNonGreedyLoopThatNeverLoops() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"DUH : (options {greedy=false;}:'x')+ ;"); // loop never matched
-		String expecting =
-			":s0=>2\n";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		checkDecision(g, 1, expecting, new int[] {1});
-
-		assertEquals("unexpected number of expected problems",
-				    1, equeue.size());
-		Message msg = (Message)equeue.errors.get(0);
-		assertTrue("warning must be an unreachable alt",
-				   msg instanceof GrammarUnreachableAltsMessage);
-		GrammarUnreachableAltsMessage u = (GrammarUnreachableAltsMessage)msg;
-		assertEquals("[1]", u.alts.toString());
-	}
-
-	@Test public void testRecursive() throws Exception {
-		// this is cool because the 3rd alt includes !(all other possibilities)
-		Grammar g = new Grammar(
-			"lexer grammar duh;\n" +
-			"SUBTEMPLATE\n" +
-			"        :       '{'\n" +
-			"                ( SUBTEMPLATE\n" +
-			"                | ESC\n" +
-			"                | ~('}'|'\\\\'|'{')\n" +
-			"                )*\n" +
-			"                '}'\n" +
-			"        ;\n" +
-			"fragment\n" +
-			"ESC     :       '\\\\' . ;");
-		g.createLookaheadDFAs();
-		String expecting =
-			".s0-'\\\\'->:s2=>2\n" +
-			".s0-'{'->:s1=>1\n" +
-			".s0-'}'->:s4=>4\n" +
-			".s0-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFF'}->:s3=>3\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testRecursive2() throws Exception {
-		// this is also cool because it resolves \\ to be ESC alt; it's just
-		// less efficient of a DFA
-		Grammar g = new Grammar(
-			"lexer grammar duh;\n" +
-			"SUBTEMPLATE\n" +
-			"        :       '{'\n" +
-			"                ( SUBTEMPLATE\n" +
-			"                | ESC\n" +
-			"                | ~('}'|'{')\n" +
-			"                )*\n" +
-			"                '}'\n" +
-			"        ;\n" +
-			"fragment\n" +
-			"ESC     :       '\\\\' . ;");
-		g.createLookaheadDFAs();
-		String expecting =
-			".s0-'\\\\'->.s3\n" +
-			".s0-'{'->:s2=>1\n" +
-			".s0-'}'->:s1=>4\n" +
-			".s0-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFF'}->:s5=>3\n" +
-			".s3-'\\\\'->:s8=>2\n" +
-			".s3-'{'->:s7=>2\n" +
-			".s3-'}'->.s4\n" +
-			".s3-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFF'}->:s6=>2\n" +
-			".s4-'\\u0000'..'\\uFFFF'->:s6=>2\n" +
-			".s4-<EOT>->:s5=>3\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNotFragmentInLexer() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar T;\n"+
-			"A : 'a' | ~B {;} ;\n" +
-			"fragment B : 'a' ;\n");
-		g.createLookaheadDFAs();
-		String expecting =
-			".s0-'a'->:s1=>1\n" +
-			".s0-{'\\u0000'..'`', 'b'..'\\uFFFF'}->:s2=>2\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNotSetFragmentInLexer() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar T;\n"+
-			"A : B | ~B {;} ;\n" +
-			"fragment B : 'a'|'b' ;\n");
-		g.createLookaheadDFAs();
-		String expecting =
-			".s0-'a'..'b'->:s1=>1\n" +
-			".s0-{'\\u0000'..'`', 'c'..'\\uFFFF'}->:s2=>2\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNotTokenInLexer() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar T;\n"+
-			"A : 'x' ('a' | ~B {;}) ;\n" +
-			"B : 'a' ;\n");
-		g.createLookaheadDFAs();
-		String expecting =
-			".s0-'a'->:s1=>1\n" +
-			".s0-{'\\u0000'..'`', 'b'..'\\uFFFF'}->:s2=>2\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNotComplicatedSetRuleInLexer() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar T;\n"+
-			"A : B | ~B {;} ;\n" +
-			"fragment B : 'a'|'b'|'c'..'e'|C ;\n" +
-			"fragment C : 'f' ;\n"); // has to seen from B to C
-		String expecting =
-			".s0-'a'..'f'->:s1=>1\n" +
-			".s0-{'\\u0000'..'`', 'g'..'\\uFFFF'}->:s2=>2\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testNotSetWithRuleInLexer() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar T;\n"+
-			"T : ~('a' | B) | 'a';\n" +
-			"fragment\n" +
-			"B : 'b' ;\n" +
-			"C : ~'x'{;} ;"); // force Tokens to not collapse T|C
-		String expecting =
-			".s0-'b'->:s3=>2\n" +
-			".s0-'x'->:s2=>1\n" +
-			".s0-{'\\u0000'..'a', 'c'..'w', 'y'..'\\uFFFF'}->.s1\n" +
-			".s1-<EOT>->:s2=>1\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testSetCallsRuleWithNot() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar A;\n" +
-			"T : ~'x' ;\n" +
-			"S : 'x' (T | 'x') ;\n");
-		String expecting =
-			".s0-'x'->:s2=>2\n" +
-			".s0-{'\\u0000'..'w', 'y'..'\\uFFFF'}->:s1=>1\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	@Test public void testSynPredInLexer() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar T;\n"+
-			"LT:  '<' ' '*\n" +
-			"  |  ('<' IDENT) => '<' IDENT '>'\n" + // this was causing syntax error
-			"  ;\n" +
-			"IDENT:    'a'+;\n");
-		// basically, Tokens rule should not do set compression test
-		String expecting =
-			".s0-'<'->:s1=>1\n" +
-			".s0-'a'->:s2=>2\n";
-		checkDecision(g, 4, expecting, null); // 4 is Tokens rule
-	}
-
-	// S U P P O R T
-
-	public void _template() throws Exception {
-		Grammar g = new Grammar(
-			"grammar T;\n"+
-			"a : A | B;");
-		String expecting =
-			"\n";
-		checkDecision(g, 1, expecting, null);
-	}
-
-	protected void checkDecision(Grammar g,
-								 int decision,
-								 String expecting,
-								 int[] expectingUnreachableAlts)
-		throws Exception
-	{
-
-		// mimic actions of org.antlr.Tool first time for grammar g
-		if ( g.getCodeGenerator()==null ) {
-			CodeGenerator generator = new CodeGenerator(null, g, "Java");
-			g.setCodeGenerator(generator);
-			g.buildNFA();
-			g.createLookaheadDFAs(false);
-		}
-
-		DFA dfa = g.getLookaheadDFA(decision);
-		assertNotNull("unknown decision #"+decision, dfa);
-		FASerializer serializer = new FASerializer(g);
-		String result = serializer.serialize(dfa.startState);
-		//System.out.print(result);
-		List nonDetAlts = dfa.getUnreachableAlts();
-		//System.out.println("alts w/o predict state="+nonDetAlts);
-
-		// first make sure nondeterministic alts are as expected
-		if ( expectingUnreachableAlts==null ) {
-			if ( nonDetAlts!=null && nonDetAlts.size()!=0 ) {
-				System.err.println("nondeterministic alts (should be empty): "+nonDetAlts);
-			}
-			assertEquals("unreachable alts mismatch", 0, nonDetAlts!=null?nonDetAlts.size():0);
-		}
-		else {
-			for (int i=0; i<expectingUnreachableAlts.length; i++) {
-				assertTrue("unreachable alts mismatch",
-						   nonDetAlts!=null?nonDetAlts.contains(new Integer(expectingUnreachableAlts[i])):false);
-			}
-		}
-		assertEquals(expecting, result);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestCommonTokenStream.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestCommonTokenStream.java
deleted file mode 100644
index 67b1fb8..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestCommonTokenStream.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.test;
-
-import org.antlr.runtime.*;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.Interpreter;
-import org.junit.Test;
-
-/** This actually tests new (12/4/09) buffered but on-demand fetching stream */
-public class TestCommonTokenStream extends BaseTest {
-    @Test public void testFirstToken() throws Exception {
-        Grammar g = new Grammar(
-            "lexer grammar t;\n"+
-            "ID : 'a'..'z'+;\n" +
-            "INT : '0'..'9'+;\n" +
-            "SEMI : ';';\n" +
-            "ASSIGN : '=';\n" +
-            "PLUS : '+';\n" +
-            "MULT : '*';\n" +
-            "WS : ' '+;\n");
-        // Tokens: 012345678901234567
-        // Input:  x = 3 * 0 + 2 * 0;
-        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
-        Interpreter lexEngine = new Interpreter(g, input);
-        BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
-
-        String result = tokens.LT(1).getText();
-        String expecting = "x";
-        assertEquals(expecting, result);
-    }
-
-    @Test public void test2ndToken() throws Exception {
-        Grammar g = new Grammar(
-            "lexer grammar t;\n"+
-            "ID : 'a'..'z'+;\n" +
-            "INT : '0'..'9'+;\n" +
-            "SEMI : ';';\n" +
-            "ASSIGN : '=';\n" +
-            "PLUS : '+';\n" +
-            "MULT : '*';\n" +
-            "WS : ' '+;\n");
-        // Tokens: 012345678901234567
-        // Input:  x = 3 * 0 + 2 * 0;
-        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
-        Interpreter lexEngine = new Interpreter(g, input);
-        BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
-
-        String result = tokens.LT(2).getText();
-        String expecting = " ";
-        assertEquals(expecting, result);
-    }
-
-    @Test public void testCompleteBuffer() throws Exception {
-        Grammar g = new Grammar(
-            "lexer grammar t;\n"+
-            "ID : 'a'..'z'+;\n" +
-            "INT : '0'..'9'+;\n" +
-            "SEMI : ';';\n" +
-            "ASSIGN : '=';\n" +
-            "PLUS : '+';\n" +
-            "MULT : '*';\n" +
-            "WS : ' '+;\n");
-        // Tokens: 012345678901234567
-        // Input:  x = 3 * 0 + 2 * 0;
-        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
-        Interpreter lexEngine = new Interpreter(g, input);
-        BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
-
-        int i = 1;
-        Token t = tokens.LT(i);
-        while ( t.getType()!=Token.EOF ) {
-            i++;
-            t = tokens.LT(i);
-        }
-        tokens.LT(i++); // push it past end
-        tokens.LT(i++);
-
-        String result = tokens.toString();
-        String expecting = "x = 3 * 0 + 2 * 0;";
-        assertEquals(expecting, result);
-    }
-
-    @Test public void testCompleteBufferAfterConsuming() throws Exception {
-        Grammar g = new Grammar(
-            "lexer grammar t;\n"+
-            "ID : 'a'..'z'+;\n" +
-            "INT : '0'..'9'+;\n" +
-            "SEMI : ';';\n" +
-            "ASSIGN : '=';\n" +
-            "PLUS : '+';\n" +
-            "MULT : '*';\n" +
-            "WS : ' '+;\n");
-        // Tokens: 012345678901234567
-        // Input:  x = 3 * 0 + 2 * 0;
-        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
-        Interpreter lexEngine = new Interpreter(g, input);
-        BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
-
-        Token t = tokens.LT(1);
-        while ( t.getType()!=Token.EOF ) {
-            tokens.consume();
-            t = tokens.LT(1);
-        }
-        tokens.consume();
-        tokens.LT(1); // push it past end
-        tokens.consume();
-        tokens.LT(1);
-
-        String result = tokens.toString();
-        String expecting = "x = 3 * 0 + 2 * 0;";
-        assertEquals(expecting, result);
-    }
-
-    @Test public void testLookback() throws Exception {
-        Grammar g = new Grammar(
-            "lexer grammar t;\n"+
-            "ID : 'a'..'z'+;\n" +
-            "INT : '0'..'9'+;\n" +
-            "SEMI : ';';\n" +
-            "ASSIGN : '=';\n" +
-            "PLUS : '+';\n" +
-            "MULT : '*';\n" +
-            "WS : ' '+;\n");
-        // Tokens: 012345678901234567
-        // Input:  x = 3 * 0 + 2 * 0;
-        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
-        Interpreter lexEngine = new Interpreter(g, input);
-        BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
-
-        tokens.consume(); // get x into buffer
-        Token t = tokens.LT(-1);
-        assertEquals("x", t.getText());
-
-        tokens.consume();
-        tokens.consume(); // consume '='
-        t = tokens.LT(-3);
-        assertEquals("x", t.getText());
-        t = tokens.LT(-2);
-        assertEquals(" ", t.getText());
-        t = tokens.LT(-1);
-        assertEquals("=", t.getText());
-    }
-
-    @Test public void testOffChannel() throws Exception {
-        TokenSource lexer = // simulate input " x =34  ;\n"
-            new TokenSource() {
-                int i = 0;
-                Token[] tokens = {
-                    new CommonToken(1," "),
-                    new CommonToken(1,"x"),
-                    new CommonToken(1," "),
-                    new CommonToken(1,"="),
-                    new CommonToken(1,"34"),
-                    new CommonToken(1," "),
-                    new CommonToken(1," "),
-                    new CommonToken(1,";"),
-                    new CommonToken(1,"\n"),
-                    new CommonToken(Token.EOF,"")
-                };
-                {
-                    tokens[0].setChannel(Lexer.HIDDEN);
-                    tokens[2].setChannel(Lexer.HIDDEN);
-                    tokens[5].setChannel(Lexer.HIDDEN);
-                    tokens[6].setChannel(Lexer.HIDDEN);
-                    tokens[8].setChannel(Lexer.HIDDEN);
-                }
-                public Token nextToken() {
-                    return tokens[i++];
-                }
-                public String getSourceName() { return "test"; }
-            };
-
-        CommonTokenStream tokens = new CommonTokenStream(lexer);
-
-        assertEquals("x", tokens.LT(1).getText()); // must skip first off channel token
-        tokens.consume();
-        assertEquals("=", tokens.LT(1).getText());
-        assertEquals("x", tokens.LT(-1).getText());
-
-        tokens.consume();
-        assertEquals("34", tokens.LT(1).getText());
-        assertEquals("=", tokens.LT(-1).getText());
-
-        tokens.consume();
-        assertEquals(";", tokens.LT(1).getText());
-        assertEquals("34", tokens.LT(-1).getText());
-
-        tokens.consume();
-        assertEquals(Token.EOF, tokens.LA(1));
-        assertEquals(";", tokens.LT(-1).getText());
-
-        assertEquals("34", tokens.LT(-2).getText());
-        assertEquals("=", tokens.LT(-3).getText());
-        assertEquals("x", tokens.LT(-4).getText());
-    }
-}
\ No newline at end of file
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestCompositeGrammars.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestCompositeGrammars.java
deleted file mode 100644
index cb13bff..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestCompositeGrammars.java
+++ /dev/null
@@ -1,973 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.Tool;
-import org.antlr.tool.*;
-import org.junit.Test;
-
-import java.io.File;
-
-public class TestCompositeGrammars extends BaseTest {
-	protected boolean debug = false;
-
-	@Test public void testWildcardStillWorks() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String grammar =
-			"parser grammar S;\n" +
-			"a : B . C ;\n"; // not qualified ID
-		Grammar g = new Grammar(grammar);
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testDelegatorInvokesDelegateRule() throws Exception {
-		String slave =
-			"parser grammar S;\n" +
-			"a : B {System.out.println(\"S.a\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"s : a ;\n" +
-			"B : 'b' ;" + // defines B from inherited token space
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		String found = execParser("M.g", master, "MParser", "MLexer",
-								  "s", "b", debug);
-		assertEquals("S.a\n", found);
-	}
-
-	@Test public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception {
-		// must generate something like:
-		// public int a(int x) throws RecognitionException { return gS.a(x); }
-		// in M.
-		String slave =
-			"parser grammar S;\n" +
-			"a[int x] returns [int y] : B {System.out.print(\"S.a\"); $y=1000;} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"s : label=a[3] {System.out.println($label.y);} ;\n" +
-			"B : 'b' ;" + // defines B from inherited token space
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		String found = execParser("M.g", master, "MParser", "MLexer",
-								  "s", "b", debug);
-		assertEquals("S.a1000\n", found);
-	}
-
-	@Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception {
-		// must generate something like:
-		// public int a(int x) throws RecognitionException { return gS.a(x); }
-		// in M.
-		String slave =
-			"parser grammar S;\n" +
-			"a : B {System.out.print(\"S.a\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"s : a {System.out.println($a.text);} ;\n" +
-			"B : 'b' ;" + // defines B from inherited token space
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		String found = execParser("M.g", master, "MParser", "MLexer",
-								  "s", "b", debug);
-		assertEquals("S.ab\n", found);
-	}
-
-	@Test public void testDelegatorAccessesDelegateMembers() throws Exception {
-		String slave =
-			"parser grammar S;\n" +
-			"@members {\n" +
-			"  public void foo() {System.out.println(\"foo\");}\n" +
-			"}\n" +
-			"a : B ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"grammar M;\n" +		// uses no rules from the import
-			"import S;\n" +
-			"s : 'b' {gS.foo();} ;\n" + // gS is import pointer
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		String found = execParser("M.g", master, "MParser", "MLexer",
-								  "s", "b", debug);
-		assertEquals("foo\n", found);
-	}
-
-	@Test public void testDelegatorInvokesFirstVersionOfDelegateRule() throws Exception {
-		String slave =
-			"parser grammar S;\n" +
-			"a : b {System.out.println(\"S.a\");} ;\n" +
-			"b : B ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String slave2 =
-			"parser grammar T;\n" +
-			"a : B {System.out.println(\"T.a\");} ;\n"; // hidden by S.a
-		writeFile(tmpdir, "T.g", slave2);
-		String master =
-			"grammar M;\n" +
-			"import S,T;\n" +
-			"s : a ;\n" +
-			"B : 'b' ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		String found = execParser("M.g", master, "MParser", "MLexer",
-								  "s", "b", debug);
-		assertEquals("S.a\n", found);
-	}
-
-	@Test public void testDelegatesSeeSameTokenType() throws Exception {
-		String slave =
-			"parser grammar S;\n" + // A, B, C token type order
-			"tokens { A; B; C; }\n" +
-			"x : A {System.out.println(\"S.x\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String slave2 =
-			"parser grammar T;\n" +
-			"tokens { C; B; A; }\n" + // reverse order
-			"y : A {System.out.println(\"T.y\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "T.g", slave2);
-		// The lexer will create rules to match letters a, b, c.
-		// The associated token types A, B, C must have the same value
-		// and all import'd parsers.  Since ANTLR regenerates all imports
-		// for use with the delegator M, it can generate the same token type
-		// mapping in each parser:
-		// public static final int C=6;
-		// public static final int EOF=-1;
-		// public static final int B=5;
-		// public static final int WS=7;
-		// public static final int A=4;
-
-		String master =
-			"grammar M;\n" +
-			"import S,T;\n" +
-			"s : x y ;\n" + // matches AA, which should be "aa"
-			"B : 'b' ;\n" + // another order: B, A, C
-			"A : 'a' ;\n" +
-			"C : 'c' ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		String found = execParser("M.g", master, "MParser", "MLexer",
-								  "s", "aa", debug);
-		assertEquals("S.x\n" +
-					 "T.y\n", found);
-	}
-
-	@Test public void testDelegatesSeeSameTokenType2() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"parser grammar S;\n" + // A, B, C token type order
-			"tokens { A; B; C; }\n" +
-			"x : A {System.out.println(\"S.x\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String slave2 =
-			"parser grammar T;\n" +
-			"tokens { C; B; A; }\n" + // reverse order
-			"y : A {System.out.println(\"T.y\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "T.g", slave2);
-
-		String master =
-			"grammar M;\n" +
-			"import S,T;\n" +
-			"s : x y ;\n" + // matches AA, which should be "aa"
-			"B : 'b' ;\n" + // another order: B, A, C
-			"A : 'a' ;\n" +
-			"C : 'c' ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		writeFile(tmpdir, "M.g", master);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-
-		String expectedTokenIDToTypeMap = "[A=4, B=5, C=6, WS=7]";
-		String expectedStringLiteralToTypeMap = "{}";
-		String expectedTypeToTokenList = "[A, B, C, WS]";
-
-		assertEquals(expectedTokenIDToTypeMap,
-					 realElements(g.composite.tokenIDToTypeMap).toString());
-		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
-		assertEquals(expectedTypeToTokenList,
-					 realElements(g.composite.typeToTokenList).toString());
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testCombinedImportsCombined() throws Exception {
-		// for now, we don't allow combined to import combined
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"grammar S;\n" + // A, B, C token type order
-			"tokens { A; B; C; }\n" +
-			"x : 'x' INT {System.out.println(\"S.x\");} ;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"s : x INT ;\n";
-		writeFile(tmpdir, "M.g", master);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-
-		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
-		String expectedError = "error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: combined grammar M cannot import combined grammar S";
-		assertEquals("unexpected errors: "+equeue, expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+",""));
-	}
-
-	@Test public void testSameStringTwoNames() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"parser grammar S;\n" +
-			"tokens { A='a'; }\n" +
-			"x : A {System.out.println(\"S.x\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String slave2 =
-			"parser grammar T;\n" +
-			"tokens { X='a'; }\n" +
-			"y : X {System.out.println(\"T.y\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "T.g", slave2);
-
-		String master =
-			"grammar M;\n" +
-			"import S,T;\n" +
-			"s : x y ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		writeFile(tmpdir, "M.g", master);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-
-		String expectedTokenIDToTypeMap = "[A=4, WS=5, X=6]";
-		String expectedStringLiteralToTypeMap = "{'a'=4}";
-		String expectedTypeToTokenList = "[A, WS, X]";
-
-		assertEquals(expectedTokenIDToTypeMap,
-					 realElements(g.composite.tokenIDToTypeMap).toString());
-		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
-		assertEquals(expectedTypeToTokenList,
-					 realElements(g.composite.typeToTokenList).toString());
-
-		Object expectedArg = "X='a'";
-		Object expectedArg2 = "A";
-		int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_CONFLICT;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-
-		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
-
-		String expectedError =
-			"error(158): T.g:2:10: cannot alias X='a'; string already assigned to A";
-		assertEquals(expectedError, equeue.errors.get(0).toString());
-	}
-
-	@Test public void testSameNameTwoStrings() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"parser grammar S;\n" +
-			"tokens { A='a'; }\n" +
-			"x : A {System.out.println(\"S.x\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String slave2 =
-			"parser grammar T;\n" +
-			"tokens { A='x'; }\n" +
-			"y : A {System.out.println(\"T.y\");} ;\n";
-		
-		writeFile(tmpdir, "T.g", slave2);
-
-		String master =
-			"grammar M;\n" +
-			"import S,T;\n" +
-			"s : x y ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		writeFile(tmpdir, "M.g", master);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-
-		String expectedTokenIDToTypeMap = "[A=4, T__6=6, WS=5]";
-		String expectedStringLiteralToTypeMap = "{'a'=4, 'x'=6}";
-		String expectedTypeToTokenList = "[A, WS, T__6]";
-
-		assertEquals(expectedTokenIDToTypeMap,
-					 realElements(g.composite.tokenIDToTypeMap).toString());
-		assertEquals(expectedStringLiteralToTypeMap, sortMapToString(g.composite.stringLiteralToTypeMap));
-		assertEquals(expectedTypeToTokenList,
-					 realElements(g.composite.typeToTokenList).toString());
-
-		Object expectedArg = "A='x'";
-		Object expectedArg2 = "'a'";
-		int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_REASSIGNMENT;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-
-		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
-
-		String expectedError =
-			"error(159): T.g:2:10: cannot alias A='x'; token name already assigned to 'a'";
-		assertEquals(expectedError, equeue.errors.get(0).toString());
-	}
-
-	@Test public void testImportedTokenVocabIgnoredWithWarning() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"parser grammar S;\n" +
-			"options {tokenVocab=whatever;}\n" +
-			"tokens { A='a'; }\n" +
-			"x : A {System.out.println(\"S.x\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"s : x ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		writeFile(tmpdir, "M.g", master);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-
-		Object expectedArg = "S";
-		int expectedMsgID = ErrorManager.MSG_TOKEN_VOCAB_IN_DELEGATE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsWarning(equeue, expectedMessage);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-		assertEquals("unexpected errors: "+equeue, 1, equeue.warnings.size());
-
-		String expectedError =
-			"warning(160): S.g:2:10: tokenVocab option ignored in imported grammar S";
-		assertEquals(expectedError, equeue.warnings.get(0).toString());
-	}
-
-	@Test public void testImportedTokenVocabWorksInRoot() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"parser grammar S;\n" +
-			"tokens { A='a'; }\n" +
-			"x : A {System.out.println(\"S.x\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-
-		String tokens =
-			"A=99\n";
-		writeFile(tmpdir, "Test.tokens", tokens);
-
-		String master =
-			"grammar M;\n" +
-			"options {tokenVocab=Test;}\n" +
-			"import S;\n" +
-			"s : x ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		writeFile(tmpdir, "M.g", master);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-
-		String expectedTokenIDToTypeMap = "[A=99, WS=101]";
-		String expectedStringLiteralToTypeMap = "{'a'=100}";
-		String expectedTypeToTokenList = "[A, 'a', WS]";
-
-		assertEquals(expectedTokenIDToTypeMap,
-					 realElements(g.composite.tokenIDToTypeMap).toString());
-		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
-		assertEquals(expectedTypeToTokenList,
-					 realElements(g.composite.typeToTokenList).toString());
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testSyntaxErrorsInImportsNotThrownOut() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"parser grammar S;\n" +
-			"options {toke\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"s : x ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		writeFile(tmpdir, "M.g", master);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-
-		// whole bunch of errors from bad S.g file
-		assertEquals("unexpected errors: "+equeue, 5, equeue.errors.size());
-	}
-
-	@Test public void testSyntaxErrorsInImportsNotThrownOut2() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"parser grammar S;\n" +
-			": A {System.out.println(\"S.x\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"s : x ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		writeFile(tmpdir, "M.g", master);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-
-		// whole bunch of errors from bad S.g file
-		assertEquals("unexpected errors: "+equeue, 3, equeue.errors.size());
-	}
-
-	@Test public void testDelegatorRuleOverridesDelegate() throws Exception {
-		String slave =
-			"parser grammar S;\n" +
-			"a : b {System.out.println(\"S.a\");} ;\n" +
-			"b : B ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"b : 'b'|'c' ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		String found = execParser("M.g", master, "MParser", "MLexer",
-								  "a", "c", debug);
-		assertEquals("S.a\n", found);
-	}
-
-	@Test public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception {
-		String slave =
-			"parser grammar JavaDecl;\n" +
-			"type : 'int' ;\n" +
-			"decl : type ID ';'\n" +
-			"     | type ID init ';' {System.out.println(\"JavaDecl: \"+$decl.text);}\n" +
-			"     ;\n" +
-			"init : '=' INT ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "JavaDecl.g", slave);
-		String master =
-			"grammar Java;\n" +
-			"import JavaDecl;\n" +
-			"prog : decl ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"\n" +
-			"ID  : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		// for float to work in decl, type must be overridden
-		String found = execParser("Java.g", master, "JavaParser", "JavaLexer",
-								  "prog", "float x = 3;", debug);
-		assertEquals("JavaDecl: floatx=3;\n", found);
-	}
-
-    @Test public void testDelegatorRuleOverridesDelegates() throws Exception {
-        String slave =
-            "parser grammar S;\n" +
-            "a : b {System.out.println(\"S.a\");} ;\n" +
-            "b : B ;\n" ;
-        mkdir(tmpdir);
-        writeFile(tmpdir, "S.g", slave);
-
-        String slave2 =
-            "parser grammar T;\n" +
-            "tokens { A='x'; }\n" +
-            "b : B {System.out.println(\"T.b\");} ;\n";
-        writeFile(tmpdir, "T.g", slave2);
-
-        String master =
-            "grammar M;\n" +
-            "import S, T;\n" +
-            "b : 'b'|'c' {System.out.println(\"M.b\");}|B|A ;\n" +
-            "WS : (' '|'\\n') {skip();} ;\n" ;
-        String found = execParser("M.g", master, "MParser", "MLexer",
-                                  "a", "c", debug);
-        assertEquals("M.b\n" +
-                     "S.a\n", found);
-    }
-
-	// LEXER INHERITANCE
-
-	@Test public void testLexerDelegatorInvokesDelegateRule() throws Exception {
-		String slave =
-			"lexer grammar S;\n" +
-			"A : 'a' {System.out.println(\"S.A\");} ;\n" +
-			"C : 'c' ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"lexer grammar M;\n" +
-			"import S;\n" +
-			"B : 'b' ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		String found = execLexer("M.g", master, "M", "abc", debug);
-		assertEquals("S.A\nabc\n", found);
-	}
-
-	@Test public void testLexerDelegatorRuleOverridesDelegate() throws Exception {
-		String slave =
-			"lexer grammar S;\n" +
-			"A : 'a' {System.out.println(\"S.A\");} ;\n" +
-			"B : 'b' {System.out.println(\"S.B\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"lexer grammar M;\n" +
-			"import S;\n" +
-			"A : 'a' B {System.out.println(\"M.A\");} ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		String found = execLexer("M.g", master, "M", "ab", debug);
-		assertEquals("S.B\n" +
-					 "M.A\n" +
-					 "ab\n", found);
-	}
-
-	@Test public void testLexerDelegatorRuleOverridesDelegateLeavingNoRules() throws Exception {
-		// M.Tokens has nothing to predict tokens from S.  Should
-		// not include S.Tokens alt in this case?
-		String slave =
-			"lexer grammar S;\n" +
-			"A : 'a' {System.out.println(\"S.A\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"lexer grammar M;\n" +
-			"import S;\n" +
-			"A : 'a' {System.out.println(\"M.A\");} ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		writeFile(tmpdir, "/M.g", master);
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		composite.assignTokenTypes();
-		composite.defineGrammarSymbols();
-		composite.createNFAs();
-		g.createLookaheadDFAs(false);
-
-		// predict only alts from M not S
-		String expectingDFA =
-			".s0-'a'->.s1\n" +
-			".s0-{'\\n', ' '}->:s3=>2\n" +
-			".s1-<EOT>->:s2=>1\n";
-		org.antlr.analysis.DFA dfa = g.getLookaheadDFA(1);
-		FASerializer serializer = new FASerializer(g);
-		String result = serializer.serialize(dfa.startState);
-		assertEquals(expectingDFA, result);
-
-		// must not be a "unreachable alt: Tokens" error
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testInvalidImportMechanism() throws Exception {
-		// M.Tokens has nothing to predict tokens from S.  Should
-		// not include S.Tokens alt in this case?
-		String slave =
-			"lexer grammar S;\n" +
-			"A : 'a' {System.out.println(\"S.A\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"tree grammar M;\n" +
-			"import S;\n" +
-			"a : A ;";
-		writeFile(tmpdir, "/M.g", master);
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-
-		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
-		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
-
-		String expectedError =
-			"error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: tree grammar M cannot import lexer grammar S";
-		assertEquals(expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+",""));
-	}
-
-	@Test public void testSyntacticPredicateRulesAreNotInherited() throws Exception {
-		// if this compiles, it means that synpred1_S is defined in S.java
-		// but not MParser.java.  MParser has its own synpred1_M which must
-		// be separate to compile.
-		String slave =
-			"parser grammar S;\n" +
-			"a : 'a' {System.out.println(\"S.a1\");}\n" +
-			"  | 'a' {System.out.println(\"S.a2\");}\n" +
-			"  ;\n" +
-			"b : 'x' | 'y' {;} ;\n"; // preds generated but not need in DFA here
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"grammar M;\n" +
-			"options {backtrack=true;}\n" +
-			"import S;\n" +
-			"start : a b ;\n" +
-			"nonsense : 'q' | 'q' {;} ;" + // forces def of preds here in M
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		String found = execParser("M.g", master, "MParser", "MLexer",
-								  "start", "ax", debug);
-		assertEquals("S.a1\n", found);
-	}
-
-	@Test public void testKeywordVSIDGivesNoWarning() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"lexer grammar S;\n" +
-			"A : 'abc' {System.out.println(\"S.A\");} ;\n" +
-			"ID : 'a'..'z'+ ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"a : A {System.out.println(\"M.a\");} ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		String found = execParser("M.g", master, "MParser", "MLexer",
-								  "a", "abc", debug);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-		assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size());
-
-		assertEquals("S.A\nM.a\n", found);
-	}
-
-	@Test public void testWarningForUndefinedToken() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"lexer grammar S;\n" +
-			"A : 'abc' {System.out.println(\"S.A\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"a : ABC A {System.out.println(\"M.a\");} ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		// A is defined in S but M should still see it and not give warning.
-		// only problem is ABC.
-
-		rawGenerateAndBuildRecognizer("M.g", master, "MParser", "MLexer", debug);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-		assertEquals("unexpected warnings: "+equeue, 1, equeue.warnings.size());
-
-		String expectedError =
-			"warning(105): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+File.separator+"M.g:3:5: no lexer rule corresponding to token: ABC";
-		assertEquals(expectedError, equeue.warnings.get(0).toString().replaceFirst("\\-[0-9]+",""));
-	}
-
-	/** Make sure that M can import S that imports T. */
-	@Test public void test3LevelImport() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"parser grammar T;\n" +
-			"a : T ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "T.g", slave);
-		String slave2 =
-			"parser grammar S;\n" + // A, B, C token type order
-			"import T;\n" +
-			"a : S ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave2);
-
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"a : M ;\n" ;
-		writeFile(tmpdir, "M.g", master);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-		g.composite.defineGrammarSymbols();
-
-		String expectedTokenIDToTypeMap = "[M=4, S=5, T=6]";
-		String expectedStringLiteralToTypeMap = "{}";
-		String expectedTypeToTokenList = "[M, S, T]";
-
-		assertEquals(expectedTokenIDToTypeMap,
-					 realElements(g.composite.tokenIDToTypeMap).toString());
-		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
-		assertEquals(expectedTypeToTokenList,
-					 realElements(g.composite.typeToTokenList).toString());
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-
-		boolean ok =
-			rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, ok);
-	}
-
-	@Test public void testBigTreeOfImports() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"parser grammar T;\n" +
-			"x : T ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "T.g", slave);
-		slave =
-			"parser grammar S;\n" +
-			"import T;\n" +
-			"y : S ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-
-		slave =
-			"parser grammar C;\n" +
-			"i : C ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "C.g", slave);
-		slave =
-			"parser grammar B;\n" +
-			"j : B ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "B.g", slave);
-		slave =
-			"parser grammar A;\n" +
-			"import B,C;\n" +
-			"k : A ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "A.g", slave);
-
-		String master =
-			"grammar M;\n" +
-			"import S,A;\n" +
-			"a : M ;\n" ;
-		writeFile(tmpdir, "M.g", master);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-		g.composite.defineGrammarSymbols();
-
-		String expectedTokenIDToTypeMap = "[A=4, B=5, C=6, M=7, S=8, T=9]";
-		String expectedStringLiteralToTypeMap = "{}";
-		String expectedTypeToTokenList = "[A, B, C, M, S, T]";
-
-		assertEquals(expectedTokenIDToTypeMap,
-					 realElements(g.composite.tokenIDToTypeMap).toString());
-		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
-		assertEquals(expectedTypeToTokenList,
-					 realElements(g.composite.typeToTokenList).toString());
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-
-		boolean ok =
-			rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, ok);
-	}
-
-	@Test public void testRulesVisibleThroughMultilevelImport() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String slave =
-			"parser grammar T;\n" +
-			"x : T ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "T.g", slave);
-		String slave2 =
-			"parser grammar S;\n" + // A, B, C token type order
-			"import T;\n" +
-			"a : S ;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave2);
-
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"a : M x ;\n" ; // x MUST BE VISIBLE TO M
-		writeFile(tmpdir, "M.g", master);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-		g.composite.defineGrammarSymbols();
-
-		String expectedTokenIDToTypeMap = "[M=4, S=5, T=6]";
-		String expectedStringLiteralToTypeMap = "{}";
-		String expectedTypeToTokenList = "[M, S, T]";
-
-		assertEquals(expectedTokenIDToTypeMap,
-					 realElements(g.composite.tokenIDToTypeMap).toString());
-		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
-		assertEquals(expectedTypeToTokenList,
-					 realElements(g.composite.typeToTokenList).toString());
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testNestedComposite() throws Exception {
-		// Wasn't compiling. http://www.antlr.org/jira/browse/ANTLR-438
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String gstr =
-			"lexer grammar L;\n" +
-			"T1: '1';\n" +
-			"T2: '2';\n" +
-			"T3: '3';\n" +
-			"T4: '4';\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "L.g", gstr);
-		gstr =
-			"parser grammar G1;\n" +
-			"s: a | b;\n" +
-			"a: T1;\n" +
-			"b: T2;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "G1.g", gstr);
-
-		gstr =
-			"parser grammar G2;\n" +
-			"import G1;\n" +
-			"a: T3;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "G2.g", gstr);
-		String G3str =
-			"grammar G3;\n" +
-			"import G2;\n" +
-			"b: T4;\n" ;
-		mkdir(tmpdir);
-		writeFile(tmpdir, "G3.g", G3str);
-
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/G3.g",composite);
-		composite.setDelegationRoot(g);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-		g.composite.defineGrammarSymbols();
-
-		String expectedTokenIDToTypeMap = "[T1=4, T2=5, T3=6, T4=7]";
-		String expectedStringLiteralToTypeMap = "{}";
-		String expectedTypeToTokenList = "[T1, T2, T3, T4]";
-
-		assertEquals(expectedTokenIDToTypeMap,
-					 realElements(g.composite.tokenIDToTypeMap).toString());
-		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
-		assertEquals(expectedTypeToTokenList,
-					 realElements(g.composite.typeToTokenList).toString());
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-
-		boolean ok =
-			rawGenerateAndBuildRecognizer("G3.g", G3str, "G3Parser", null, false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, ok);
-	}
-
-	@Test public void testHeadersPropogatedCorrectlyToImportedGrammars() throws Exception {
-		String slave =
-			"parser grammar S;\n" +
-			"a : B {System.out.print(\"S.a\");} ;\n";
-		mkdir(tmpdir);
-		writeFile(tmpdir, "S.g", slave);
-		String master =
-			"grammar M;\n" +
-			"import S;\n" +
-			"@header{package mypackage;}\n" +
-			"@lexer::header{package mypackage;}\n" +
-			"s : a ;\n" +
-			"B : 'b' ;" + // defines B from inherited token space
-			"WS : (' '|'\\n') {skip();} ;\n" ;
-		boolean ok = antlr("M.g", "M.g", master, debug);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, ok);
-	}
-
-}
\ No newline at end of file
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestDFAConversion.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestDFAConversion.java
deleted file mode 100644
index 387b328..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestDFAConversion.java
+++ /dev/null
@@ -1,1787 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.Tool;
-import org.antlr.analysis.DFA;
-import org.antlr.analysis.DecisionProbe;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.misc.BitSet;
-import org.antlr.tool.*;
-import org.junit.Test;
-
-import java.util.*;
-
-public class TestDFAConversion extends BaseTest {
-
-	@Test public void testA() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : A C | B;");
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-B->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testAB_or_AC() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : A B | A C;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-B->:s2=>1\n" +
-			".s1-C->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testAB_or_AC_k2() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-			"options {k=2;}\n"+
-			"a : A B | A C;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-B->:s2=>1\n" +
-			".s1-C->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testAB_or_AC_k1() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-			"options {k=1;}\n"+
-			"a : A B | A C;");
-		String expecting =
-			".s0-A->:s1=>1\n";
-		int[] unreachableAlts = new int[] {2};
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "A" ;
-		int[] danglingAlts = new int[] {2};
-		int numWarnings = 2; // ambig upon A
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testselfRecurseNonDet() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a ;\n" +
-			"a : A a X | A a Y;");
-		List altsWithRecursion = Arrays.asList(new Object[] {1,2});
-		assertNonLLStar(g, altsWithRecursion);
-	}
-
-	@Test public void testRecursionOverflow() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a Y | A A A A A X ;\n" + // force recursion past m=4
-			"a : A a | Q;");
-		List expectedTargetRules = Arrays.asList(new Object[] {"a"});
-		int expectedAlt = 1;
-		assertRecursionOverflow(g, expectedTargetRules, expectedAlt);
-	}
-
-	@Test public void testRecursionOverflow2() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a Y | A+ X ;\n" + // force recursion past m=4
-			"a : A a | Q;");
-		List expectedTargetRules = Arrays.asList(new Object[] {"a"});
-		int expectedAlt = 1;
-		assertRecursionOverflow(g, expectedTargetRules, expectedAlt);
-	}
-
-	@Test public void testRecursionOverflowWithPredOk() throws Exception {
-		// overflows with k=*, but resolves with pred
-		// no warnings/errors
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : (a Y)=> a Y | A A A A A X ;\n" + // force recursion past m=4
-			"a : A a | Q;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s0-Q&&{synpred1_t}?->:s11=>1\n" +
-			".s1-A->.s2\n" +
-			".s1-Q&&{synpred1_t}?->:s10=>1\n" +
-			".s2-A->.s3\n" +
-			".s2-Q&&{synpred1_t}?->:s9=>1\n" +
-			".s3-A->.s4\n" +
-			".s3-Q&&{synpred1_t}?->:s8=>1\n" +
-			".s4-A->.s5\n" +
-			".s4-Q&&{synpred1_t}?->:s6=>1\n" +
-			".s5-{synpred1_t}?->:s6=>1\n" +
-			".s5-{true}?->:s7=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testRecursionOverflowWithPredOk2() throws Exception {
-		// must predict Z w/o predicate
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : (a Y)=> a Y | A A A A A X | Z;\n" + // force recursion past m=4
-			"a : A a | Q;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s0-Q&&{synpred1_t}?->:s11=>1\n" +
-			".s0-Z->:s12=>3\n" +
-			".s1-A->.s2\n" +
-			".s1-Q&&{synpred1_t}?->:s10=>1\n" +
-			".s2-A->.s3\n" +
-			".s2-Q&&{synpred1_t}?->:s9=>1\n" +
-			".s3-A->.s4\n" +
-			".s3-Q&&{synpred1_t}?->:s8=>1\n" +
-			".s4-A->.s5\n" +
-			".s4-Q&&{synpred1_t}?->:s6=>1\n" +
-			".s5-{synpred1_t}?->:s6=>1\n" +
-			".s5-{true}?->:s7=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testCannotSeePastRecursion() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"x   : y X\n" +
-			"    | y Y\n" +
-			"    ;\n" +
-			"y   : L y R\n" +
-			"    | B\n" +
-			"    ;");
-		List altsWithRecursion = Arrays.asList(new Object[] {1,2});
-		assertNonLLStar(g, altsWithRecursion);
-	}
-
-	@Test public void testSynPredResolvesRecursion() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"x   : (y X)=> y X\n" +
-			"    | y Y\n" +
-			"    ;\n" +
-			"y   : L y R\n" +
-			"    | B\n" +
-			"    ;");
-		String expecting =
-			".s0-B->.s4\n" +
-			".s0-L->.s1\n" +
-			".s1-{synpred1_t}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n" +
-			".s4-{synpred1_t}?->:s2=>1\n" +
-			".s4-{true}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testSynPredMissingInMiddle() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"x   : (A)=> X\n" +
-			"    | X\n" +  // assume missing synpred is true also
-			"	 | (C)=> X" +
-			"    ;\n");
-		String expecting =
-			".s0-X->.s1\n" +
-			".s1-{synpred1_t}?->:s2=>1\n" +
-			".s1-{synpred2_t}?->:s4=>3\n" +
-			".s1-{true}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testAutoBacktrackAndPredMissingInMiddle() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"x   : (A)=> X\n" +
-			"    | X\n" +  // assume missing synpred is true also
-			"	 | (C)=> X" +
-			"    ;\n");
-		String expecting =
-			".s0-X->.s1\n" +
-			".s1-{synpred1_t}?->:s2=>1\n" +  // gen code should have this as (A)=>
-			".s1-{synpred2_t}?->:s3=>2\n" + // gen code should have this as (X)=>
-			".s1-{synpred3_t}?->:s4=>3\n"; // gen code should have this as (C)=>
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testSemPredResolvesRecursion() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"x   : {p}? y X\n" +
-			"    | y Y\n" +
-			"    ;\n" +
-			"y   : L y R\n" +
-			"    | B\n" +
-			"    ;");
-		String expecting =
-			".s0-B->.s4\n" +
-			".s0-L->.s1\n" +
-			".s1-{p}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n" +
-			".s4-{p}?->:s2=>1\n" +
-			".s4-{true}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testSemPredResolvesRecursion2() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"x\n" +
-			"options {k=1;}\n" +
-			"   : {p}? y X\n" +
-			"    | y Y\n" +
-			"    ;\n" +
-			"y   : L y R\n" +
-			"    | B\n" +
-			"    ;");
-		String expecting =
-			".s0-B->.s4\n" +
-			".s0-L->.s1\n" +
-			".s1-{p}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n" +
-			".s4-{p}?->:s2=>1\n" +
-			".s4-{true}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testSemPredResolvesRecursion3() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"x\n" +
-			"options {k=2;}\n" + // just makes bigger DFA
-			"   : {p}? y X\n" +
-			"    | y Y\n" +
-			"    ;\n" +
-			"y   : L y R\n" +
-			"    | B\n" +
-			"    ;");
-		String expecting =
-			".s0-B->.s6\n" +
-			".s0-L->.s1\n" +
-			".s1-B->.s5\n" +
-			".s1-L->.s2\n" +
-			".s2-{p}?->:s3=>1\n" +
-			".s2-{true}?->:s4=>2\n" +
-			".s5-{p}?->:s3=>1\n" +
-			".s5-{true}?->:s4=>2\n" +
-			".s6-X->:s3=>1\n" +
-			".s6-Y->:s4=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testSynPredResolvesRecursion2() throws Exception {
-		// k=* fails and it retries/succeeds with k=1 silently
-		// because of predicate
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"statement\n" +
-			"    :     (reference ASSIGN)=> reference ASSIGN expr\n" +
-			"    |     expr\n" +
-			"    ;\n" +
-			"expr:     reference\n" +
-			"    |     INT\n" +
-			"    |     FLOAT\n" +
-			"    ;\n" +
-			"reference\n" +
-			"    :     ID L argument_list R\n" +
-			"    ;\n" +
-			"argument_list\n" +
-			"    :     expr COMMA expr\n" +
-			"    ;");
-		String expecting =
-			".s0-ID->.s1\n" +
-			".s0-{FLOAT, INT}->:s3=>2\n" +
-			".s1-{synpred1_t}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testSynPredResolvesRecursion3() throws Exception {
-		// No errors with k=1; don't try k=* first
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"statement\n" +
-			"options {k=1;}\n" +
-			"    :     (reference ASSIGN)=> reference ASSIGN expr\n" +
-			"    |     expr\n" +
-			"    ;\n" +
-			"expr:     reference\n" +
-			"    |     INT\n" +
-			"    |     FLOAT\n" +
-			"    ;\n" +
-			"reference\n" +
-			"    :     ID L argument_list R\n" +
-			"    ;\n" +
-			"argument_list\n" +
-			"    :     expr COMMA expr\n" +
-			"    ;");
-		String expecting =
-			".s0-ID->.s1\n" +
-			".s0-{FLOAT, INT}->:s3=>2\n" +
-			".s1-{synpred1_t}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testSynPredResolvesRecursion4() throws Exception {
-		// No errors with k=2; don't try k=* first
-		// Should be ok like k=1 'except bigger DFA
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"statement\n" +
-			"options {k=2;}\n" +
-			"    :     (reference ASSIGN)=> reference ASSIGN expr\n" +
-			"    |     expr\n" +
-			"    ;\n" +
-			"expr:     reference\n" +
-			"    |     INT\n" +
-			"    |     FLOAT\n" +
-			"    ;\n" +
-			"reference\n" +
-			"    :     ID L argument_list R\n" +
-			"    ;\n" +
-			"argument_list\n" +
-			"    :     expr COMMA expr\n" +
-			"    ;");
-		String expecting =
-			".s0-ID->.s1\n" +
-			".s0-{FLOAT, INT}->:s4=>2\n" +
-			".s1-L->.s2\n" +
-			".s2-{synpred1_t}?->:s3=>1\n" +
-			".s2-{true}?->:s4=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testSynPredResolvesRecursionInLexer() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A :     (B ';')=> B ';'\n" +
-			"  |     B '.'\n" +
-			"  ;\n" +
-			"fragment\n" +
-			"B :     '(' B ')'\n" +
-			"  |     'x'\n" +
-			"  ;\n");
-		String expecting =
-			".s0-'('->.s1\n" +
-			".s0-'x'->.s4\n" +
-			".s1-{synpred1_t}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n" +
-			".s4-{synpred1_t}?->:s2=>1\n" +
-			".s4-{true}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testAutoBacktrackResolvesRecursionInLexer() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"options {backtrack=true;}\n"+
-			"A :     B ';'\n" +
-			"  |     B '.'\n" +
-			"  ;\n" +
-			"fragment\n" +
-			"B :     '(' B ')'\n" +
-			"  |     'x'\n" +
-			"  ;\n");
-		String expecting =
-			".s0-'('->.s1\n" +
-			".s0-'x'->.s4\n" +
-			".s1-{synpred1_t}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n" +
-			".s4-{synpred1_t}?->:s2=>1\n" +
-			".s4-{true}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testAutoBacktrackResolvesRecursion() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"x   : y X\n" +
-			"    | y Y\n" +
-			"    ;\n" +
-			"y   : L y R\n" +
-			"    | B\n" +
-			"    ;");
-		String expecting =
-			".s0-B->.s4\n" +
-			".s0-L->.s1\n" +
-			".s1-{synpred1_t}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n" +
-			".s4-{synpred1_t}?->:s2=>1\n" +
-			".s4-{true}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testselfRecurseNonDet2() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a ;\n" +
-			"a : P a P | P;");
-		// nondeterministic from left edge
-		String expecting =
-			".s0-P->.s1\n" +
-			".s1-EOF->:s3=>2\n"+
-			".s1-P->:s2=>1\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "P P";
-		int[] danglingAlts = null;
-		int numWarnings = 1;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testIndirectRecursionLoop() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a ;\n" +
-			"a : b X ;\n"+
-			"b : a B ;\n");
-
-		DecisionProbe.verbose=true; // make sure we get all error info
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		Set<Rule> leftRecursive = g.getLeftRecursiveRules();
-		Set expectedRules =
-			new HashSet() {{add("a"); add("b");}};
-		assertEquals(expectedRules, ruleNames(leftRecursive));
-
-		assertEquals(1, equeue.errors.size());
-		Message msg = (Message)equeue.errors.get(0);
-		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
-				    msg instanceof LeftRecursionCyclesMessage);
-		LeftRecursionCyclesMessage cyclesMsg = (LeftRecursionCyclesMessage)msg;
-
-		// cycle of [a, b]
-		Collection result = cyclesMsg.cycles;
-		Set expecting = new HashSet() {{add("a"); add("b");}};
-		assertEquals(expecting, ruleNames2(result));
-	}
-
-	@Test public void testIndirectRecursionLoop2() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a ;\n" +
-			"a : i b X ;\n"+ // should see through i
-			"b : a B ;\n" +
-			"i : ;\n");
-
-		DecisionProbe.verbose=true; // make sure we get all error info
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		Set leftRecursive = g.getLeftRecursiveRules();
-		Set expectedRules =
-			new HashSet() {{add("a"); add("b");}};
-		assertEquals(expectedRules, ruleNames(leftRecursive));
-
-		assertEquals(1, equeue.errors.size());
-		Message msg = (Message)equeue.errors.get(0);
-		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
-				    msg instanceof LeftRecursionCyclesMessage);
-		LeftRecursionCyclesMessage cyclesMsg = (LeftRecursionCyclesMessage)msg;
-
-		// cycle of [a, b]
-		Collection result = cyclesMsg.cycles;
-		Set expecting = new HashSet() {{add("a"); add("b");}};
-		assertEquals(expecting, ruleNames2(result));
-	}
-
-	@Test public void testIndirectRecursionLoop3() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a ;\n" +
-			"a : i b X ;\n"+ // should see through i
-			"b : a B ;\n" +
-			"i : ;\n" +
-			"d : e ;\n" +
-			"e : d ;\n");
-
-		DecisionProbe.verbose=true; // make sure we get all error info
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		Set leftRecursive = g.getLeftRecursiveRules();
-		Set expectedRules =
-			new HashSet() {{add("a"); add("b"); add("e"); add("d");}};
-		assertEquals(expectedRules, ruleNames(leftRecursive));
-
-		assertEquals(1, equeue.errors.size());
-		Message msg = (Message)equeue.errors.get(0);
-		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
-				    msg instanceof LeftRecursionCyclesMessage);
-		LeftRecursionCyclesMessage cyclesMsg = (LeftRecursionCyclesMessage)msg;
-
-		// cycle of [a, b]
-		Collection result = cyclesMsg.cycles;
-		Set expecting = new HashSet() {{add("a"); add("b"); add("d"); add("e");}};
-		assertEquals(expecting, ruleNames2(result));
-	}
-
-	@Test public void testifThenElse() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : IF s (E s)? | B;\n" +
-			"slist: s SEMI ;");
-		String expecting =
-			".s0-E->:s1=>1\n" +
-			".s0-SEMI->:s2=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "E";
-		int[] danglingAlts = null;
-		int numWarnings = 1;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-		expecting =
-			".s0-B->:s2=>2\n" +
-			".s0-IF->:s1=>1\n";
-		checkDecision(g, 2, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testifThenElseChecksStackSuffixConflict() throws Exception {
-		// if you don't check stack soon enough, this finds E B not just E
-		// as ambig input
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"slist: s SEMI ;\n"+
-			"s : IF s el | B;\n" +
-			"el: (E s)? ;\n");
-		String expecting =
-			".s0-E->:s1=>1\n" +
-			".s0-SEMI->:s2=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "E";
-		int[] danglingAlts = null;
-		int numWarnings = 1;
-		checkDecision(g, 2, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-		expecting =
-			".s0-B->:s2=>2\n" +
-			".s0-IF->:s1=>1\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-    @Test
-    public void testInvokeRule() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : b A\n" +
-			"  | b B\n" +
-			"  | C\n" +
-			"  ;\n" +
-			"b : X\n" +
-			"  ;\n");
-		String expecting =
-			".s0-C->:s4=>3\n" +
-            ".s0-X->.s1\n" +
-            ".s1-A->:s2=>1\n" +
-            ".s1-B->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test
-    public void testDoubleInvokeRuleLeftEdge() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : b X\n" +
-			"  | b Y\n" +
-			"  ;\n" +
-			"b : c B\n" +
-			"  | c\n" +
-			"  ;\n" +
-			"c : C ;\n");
-		String expecting =
-			".s0-C->.s1\n" +
-			".s1-B->.s2\n" +
-			".s1-X->:s3=>1\n" +
-			".s1-Y->:s4=>2\n" +
-			".s2-X->:s3=>1\n" +
-			".s2-Y->:s4=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-		expecting =
-			".s0-C->.s1\n" +
-            ".s1-B->:s2=>1\n" +
-            ".s1-X..Y->:s3=>2\n";
-		checkDecision(g, 2, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testimmediateTailRecursion() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a ;\n" +
-			"a : A a | A B;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-A->:s3=>1\n" +
-			".s1-B->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test
-    public void testAStar_immediateTailRecursion() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a ;\n" +
-			"a : A a | ;");
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-EOF->:s2=>2\n";
-		int[] unreachableAlts = null; // without
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testNoStartRule() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : A a | X;"); // single rule 'a' refers to itself; no start rule
-
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		Message msg = (Message)equeue.warnings.get(0);
-		assertTrue("expecting no start rules; found "+msg.getClass().getName(),
-				   msg instanceof GrammarSemanticsMessage);
-	}
-
-	@Test
-    public void testAStar_immediateTailRecursion2() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a ;\n" +
-			"a : A a | A ;");
-		String expecting =
-			".s0-A->.s1\n" +
-            ".s1-A->:s2=>1\n" +
-            ".s1-EOF->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testimmediateLeftRecursion() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a ;\n" +
-			"a : a A | B;");
-		Set leftRecursive = g.getLeftRecursiveRules();
-		Set expectedRules = new HashSet() {{add("a");}};
-		assertEquals(expectedRules, ruleNames(leftRecursive));
-	}
-
-	@Test public void testIndirectLeftRecursion() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a ;\n" +
-			"a : b | A ;\n" +
-			"b : c ;\n" +
-			"c : a | C ;\n");
-		Set leftRecursive = g.getLeftRecursiveRules();
-		Set expectedRules = new HashSet() {{add("a"); add("b"); add("c");}};
-		assertEquals(expectedRules, ruleNames(leftRecursive));
-	}
-
-	@Test public void testLeftRecursionInMultipleCycles() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-				"s : a x ;\n" +
-				"a : b | A ;\n" +
-				"b : c ;\n" +
-				"c : a | C ;\n" +
-				"x : y | X ;\n" +
-				"y : x ;\n");
-		Set leftRecursive = g.getLeftRecursiveRules();
-		Set expectedRules =
-			new HashSet() {{add("a"); add("b"); add("c"); add("x"); add("y");}};
-		assertEquals(expectedRules, ruleNames(leftRecursive));
-	}
-
-	@Test public void testCycleInsideRuleDoesNotForceInfiniteRecursion() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a ;\n" +
-			"a : (A|)+ B;\n");
-		// before I added a visitedStates thing, it was possible to loop
-		// forever inside of a rule if there was an epsilon loop.
-		Set leftRecursive = g.getLeftRecursiveRules();
-		Set expectedRules = new HashSet();
-		assertEquals(expectedRules, leftRecursive);
-	}
-
-	// L O O P S
-
-	@Test public void testAStar() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : ( A )* ;");
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-EOF->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testAorBorCStar() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : ( A | B | C )* ;");
-		String expecting =
-			".s0-A..C->:s1=>1\n" +
-			".s0-EOF->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testAPlus() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : ( A )+ ;");
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-EOF->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback decision
-	}
-
-	@Test public void testAPlusNonGreedyWhenDeterministic() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : (options {greedy=false;}:A)+ ;\n");
-		// should look the same as A+ since no ambiguity
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-EOF->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testAPlusNonGreedyWhenNonDeterministic() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : (options {greedy=false;}:A)+ A+ ;\n");
-		// should look the same as A+ since no ambiguity
-		String expecting =
-			".s0-A->:s1=>2\n"; // always chooses to exit
-		int[] unreachableAlts = new int[] {1};
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "A";
-		int[] danglingAlts = null;
-		int numWarnings = 2;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testAPlusGreedyWhenNonDeterministic() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : (options {greedy=true;}:A)+ A+ ;\n");
-		// should look the same as A+ since no ambiguity
-		String expecting =
-			".s0-A->:s1=>1\n"; // always chooses to enter loop upon A
-		// turns off 1 of warnings. A can never exit loop now
-		int[] unreachableAlts = new int[] {2};
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 1;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testAorBorCPlus() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : ( A | B | C )+ ;");
-		String expecting =
-			".s0-A..C->:s1=>1\n" +
-			".s0-EOF->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testAOptional() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : ( A )? B ;");
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-B->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback decision
-	}
-
-	@Test public void testAorBorCOptional() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : ( A | B | C )? Z ;");
-		String expecting =
-			".s0-A..C->:s1=>1\n" +
-			".s0-Z->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback decision
-	}
-
-	// A R B I T R A R Y  L O O K A H E A D
-
-	@Test
-    public void testAStarBOrAStarC() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : (A)* B | (A)* C;");
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-B->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback
-		expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-C->:s2=>2\n";
-		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback
-		expecting =
-			".s0-A->.s1\n" +
-            ".s0-B->:s2=>1\n" +
-            ".s0-C->:s3=>2\n" +
-            ".s1-A->.s1\n" +
-            ".s1-B->:s2=>1\n" +
-            ".s1-C->:s3=>2\n";
-		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule block
-	}
-
-	@Test
-    public void testAStarBOrAPlusC() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : (A)* B | (A)+ C;");
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-B->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback
-		expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-C->:s2=>2\n";
-		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback
-		expecting =
-			".s0-A->.s1\n" +
-            ".s0-B->:s2=>1\n" +
-            ".s1-A->.s1\n" +
-            ".s1-B->:s2=>1\n" +
-            ".s1-C->:s3=>2\n";
-		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule block
-	}
-
-
-    @Test
-    public void testAOrBPlusOrAPlus() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : (A|B)* X | (A)+ Y;");
-		String expecting =
-			".s0-A..B->:s1=>1\n" +
-			".s0-X->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback (A|B)*
-		expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-Y->:s2=>2\n";
-		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback (A)+
-		expecting =
-			".s0-A->.s1\n" +
-            ".s0-B..X->:s2=>1\n" +
-            ".s1-A->.s1\n" +
-            ".s1-B..X->:s2=>1\n" +
-            ".s1-Y->:s3=>2\n";
-		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule
-	}
-
-	@Test public void testLoopbackAndExit() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : (A|B)+ B;");
-		String expecting =
-			".s0-A->:s3=>1\n" +
-			".s0-B->.s1\n" +
-			".s1-A..B->:s3=>1\n" +
-			".s1-EOF->:s2=>2\n"; // sees A|B as a set
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testOptionalAltAndBypass() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : (A|B)? B;");
-		String expecting =
-			".s0-A->:s2=>1\n" +
-			".s0-B->.s1\n" +
-			".s1-B->:s2=>1\n" +
-			".s1-EOF->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	// R E S O L V E  S Y N  C O N F L I C T S
-
-	@Test public void testResolveLL1ByChoosingFirst() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : A C | A C;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-C->:s2=>1\n";
-		int[] unreachableAlts = new int[] {2};
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "A C";
-		int[] danglingAlts = null;
-		int numWarnings = 2;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testResolveLL2ByChoosingFirst() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : A B | A B;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-B->:s2=>1\n";
-		int[] unreachableAlts = new int[] {2};
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "A B";
-		int[] danglingAlts = null;
-		int numWarnings = 2;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testResolveLL2MixAlt() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : A B | A C | A B | Z;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s0-Z->:s4=>4\n" +
-			".s1-B->:s2=>1\n" +
-			".s1-C->:s3=>2\n";
-		int[] unreachableAlts = new int[] {3};
-		int[] nonDetAlts = new int[] {1,3};
-		String ambigInput = "A B";
-		int[] danglingAlts = null;
-		int numWarnings = 2;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testIndirectIFThenElseStyleAmbig() throws Exception {
-		// the (c)+ loopback is ambig because it could match "CASE"
-		// by entering the loop or by falling out and ignoring (s)*
-		// back falling back into (cg)* loop which stats over and
-		// calls cg again.  Either choice allows it to get back to
-		// the same node.  The software catches it as:
-		// "avoid infinite closure computation emanating from alt 1
-		// of ():27|2|[8 $]" where state 27 is the first alt of (c)+
-		// and 8 is the first alt of the (cg)* loop.
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-			"s : stat ;\n" +
-			"stat : LCURLY ( cg )* RCURLY | E SEMI  ;\n" +
-			"cg : (c)+ (stat)* ;\n" +
-			"c : CASE E ;\n");
-		String expecting =
-			".s0-CASE->:s2=>1\n" +
-			".s0-E..RCURLY->:s1=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "CASE";
-		int[] danglingAlts = null;
-		int numWarnings = 1;
-		checkDecision(g, 3, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	// S E T S
-
-	@Test public void testComplement() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : ~(A | B | C) | C {;} ;\n" +
-			"b : X Y Z ;");
-		String expecting =
-			".s0-C->:s2=>2\n" +
-			".s0-X..Z->:s1=>1\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testComplementToken() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : ~C | C {;} ;\n" +
-			"b : X Y Z ;");
-		String expecting =
-			".s0-C->:s2=>2\n" +
-			".s0-X..Z->:s1=>1\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testComplementChar() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : ~'x' | 'x' {;} ;\n");
-		String expecting =
-			".s0-'x'->:s2=>2\n" +
-			".s0-{'\\u0000'..'w', 'y'..'\\uFFFF'}->:s1=>1\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testComplementCharSet() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : ~(' '|'\t'|'x'|'y') | 'x';\n" + // collapse into single set
-			"B : 'y' ;");
-		String expecting =
-			".s0-'y'->:s2=>2\n" +
-			".s0-{'\\u0000'..'\\b', '\\n'..'\\u001F', '!'..'x', 'z'..'\\uFFFF'}->:s1=>1\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testNoSetCollapseWithActions() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : (A | B {foo}) | C;");
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-B->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testRuleAltsSetCollapse() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : A | B | C ;"
-		);
-		String expecting = // still looks like block
-			"(grammar t (rule a ARG RET scope (BLOCK (ALT A <end-of-alt>) (ALT B <end-of-alt>) (ALT C <end-of-alt>) <end-of-block>) <end-of-rule>))";
-		assertEquals(expecting, g.getGrammarTree().toStringTree());
-	}
-
-	@Test public void testTokensRuleAltsDoNotCollapse() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';" +
-			"B : 'b';\n"
-		);
-		String expecting =
-			".s0-'a'->:s1=>1\n" +
-			".s0-'b'->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testMultipleSequenceCollision() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-			"a : (A{;}|B)\n" +
-			"  | (A{;}|B)\n" +
-			"  | A\n" +
-			"  ;");
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-B->:s2=>1\n"; // not optimized because states are nondet
-		int[] unreachableAlts = new int[] {2,3};
-		int[] nonDetAlts = new int[] {1,2,3};
-		String ambigInput = "A";
-		int[] danglingAlts = null;
-		int numWarnings = 3;
-		checkDecision(g, 3, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-		/* There are 2 nondet errors, but the checkDecision only checks first one :(
-		The "B" conflicting input is not checked except by virtue of the
-		result DFA.
-<string>:2:5: Decision can match input such as "A" using multiple alternatives:
-alt 1 via NFA path 7,2,3
-alt 2 via NFA path 14,9,10
-alt 3 via NFA path 16,17
-As a result, alternative(s) 2,3 were disabled for that input,
-<string>:2:5: Decision can match input such as "B" using multiple alternatives:
-alt 1 via NFA path 7,8,4,5
-alt 2 via NFA path 14,15,11,12
-As a result, alternative(s) 2 were disabled for that input
-<string>:2:5: The following alternatives are unreachable: 2,3
-*/
-	}
-
-	@Test public void testMultipleAltsSameSequenceCollision() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-			"a : type ID \n" +
-			"  | type ID\n" +
-			"  | type ID\n" +
-			"  | type ID\n" +
-			"  ;\n" +
-			"\n" +
-			"type : I | F;");
-		// nondeterministic from left edge; no stop state
-		String expecting =
-			".s0-F..I->.s1\n" +
-			".s1-ID->:s2=>1\n";
-		int[] unreachableAlts = new int[] {2,3,4};
-		int[] nonDetAlts = new int[] {1,2,3,4};
-		String ambigInput = "F..I ID";
-		int[] danglingAlts = null;
-		int numWarnings = 2;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testFollowReturnsToLoopReenteringSameRule() throws Exception {
-		// D07 can be matched in the (...)? or fall out of esc back into (..)*
-		// loop in sl.  Note that D07 is matched by ~(R|SLASH).  No good
-		// way to write that grammar I guess
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"sl : L ( esc | ~(R|SLASH) )* R ;\n" +
-			"\n" +
-			"esc : SLASH ( N | D03 (D07)? ) ;");
-		String expecting =
-			".s0-D03..N->:s2=>2\n" +
-			".s0-R->:s3=>3\n" +
-			".s0-SLASH->:s1=>1\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "D07";
-		int[] danglingAlts = null;
-		int numWarnings = 1;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testTokenCallsAnotherOnLeftEdge() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"F   :   I '.'\n" +
-			"    ;\n" +
-			"I   :   '0'\n" +
-			"    ;\n"
-		);
-		String expecting =
-			".s0-'0'->.s1\n" +
-			".s1-'.'->:s3=>1\n" +
-			".s1-<EOT>->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-
-	@Test public void testSelfRecursionAmbigAlts() throws Exception {
-		// ambiguous grammar for "L ID R" (alts 1,2 of a)
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : a;\n" +
-			"a   :   L ID R\n" +
-			"    |   L a R\n" + // disabled for L ID R
-			"    |   b\n" +
-			"    ;\n" +
-			"\n" +
-			"b   :   ID\n" +
-			"    ;\n");
-		String expecting =
-			".s0-ID->:s5=>3\n" +
-			".s0-L->.s1\n" +
-			".s1-ID->.s2\n" +
-			".s1-L->:s4=>2\n" +
-			".s2-R->:s3=>1\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "L ID R";
-		int[] danglingAlts = null;
-		int numWarnings = 1;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testIndirectRecursionAmbigAlts() throws Exception {
-		// ambiguous grammar for "L ID R" (alts 1,2 of a)
-		// This was derived from the java grammar 12/4/2004 when it
-		// was not handling a unaryExpression properly.  I traced it
-		// to incorrect closure-busy condition.  It thought that the trace
-		// of a->b->a->b again for "L ID" was an infinite loop, but actually
-		// the repeat call to b only happens *after* an L has been matched.
-		// I added a check to see what the initial stack looks like and it
-		// seems to work now.
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s   :   a ;\n" +
-			"a   :   L ID R\n" +
-			"    |   b\n" +
-			"    ;\n" +
-			"\n" +
-			"b   :   ID\n" +
-			"    |   L a R\n" +
-			"    ;");
-		String expecting =
-			".s0-ID->:s4=>2\n" +
-			".s0-L->.s1\n" +
-			".s1-ID->.s2\n" +
-			".s1-L->:s4=>2\n" +
-			".s2-R->:s3=>1\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "L ID R";
-		int[] danglingAlts = null;
-		int numWarnings = 1;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testTailRecursionInvokedFromArbitraryLookaheadDecision() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : b X\n" +
-			"  | b Y\n" +
-			"  ;\n" +
-			"\n" +
-			"b : A\n" +
-			"  | A b\n" +
-			"  ;\n");
-		List altsWithRecursion = Arrays.asList(new Object[] {1,2});
-		assertNonLLStar(g, altsWithRecursion);
-	}
-
-	@Test public void testWildcardStarK1AndNonGreedyByDefaultInParser() throws Exception {
-		// no error because .* assumes it should finish when it sees R
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-			"s : A block EOF ;\n" +
-			"block : L .* R ;");
-		String expecting =
-			".s0-A..L->:s2=>1\n" +
-			".s0-R->:s1=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testWildcardPlusK1AndNonGreedyByDefaultInParser() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n" +
-			"s : A block EOF ;\n" +
-			"block : L .+ R ;");
-		String expecting =
-			".s0-A..L->:s2=>1\n" +
-			".s0-R->:s1=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-	}
-
-	@Test public void testGatedSynPred() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"x   : (X)=> X\n" +
-			"    | Y\n" +
-			"    ;\n");
-		String expecting =
-			".s0-X&&{synpred1_t}?->:s1=>1\n" + // does not hoist; it gates edges
-			".s0-Y->:s2=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-
-		Set<String> preds = g.synPredNamesUsedInDFA;
-		Set<String> expectedPreds = new HashSet<String>() {{add("synpred1_t");}};
-		assertEquals("predicate names not recorded properly in grammar", expectedPreds, preds);
-	}
-
-	@Test public void testHoistedGatedSynPred() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"x   : (X)=> X\n" +
-			"    | X\n" +
-			"    ;\n");
-		String expecting =
-			".s0-X->.s1\n" +
-			".s1-{synpred1_t}?->:s2=>1\n" + // hoists into decision
-			".s1-{true}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-
-		Set<String> preds = g.synPredNamesUsedInDFA;
-		Set<String> expectedPreds = new HashSet<String>() {{add("synpred1_t");}};
-		assertEquals("predicate names not recorded properly in grammar", expectedPreds, preds);
-	}
-
-	@Test public void testHoistedGatedSynPred2() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"x   : (X)=> (X|Y)\n" +
-			"    | X\n" +
-			"    ;\n");
-		String expecting =
-			".s0-X->.s1\n" +
-			".s0-Y&&{synpred1_t}?->:s2=>1\n" +
-			".s1-{synpred1_t}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-
-		Set<String> preds = g.synPredNamesUsedInDFA;
-		Set<String> expectedPreds = new HashSet<String>() {{add("synpred1_t");}};
-		assertEquals("predicate names not recorded properly in grammar", expectedPreds, preds);
-	}
-
-	@Test public void testGreedyGetsNoErrorForAmbig() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : IF s (options {greedy=true;} : E s)? | B;\n" +
-			"slist: s SEMI ;");
-		String expecting =
-			".s0-E->:s1=>1\n" +
-			".s0-SEMI->:s2=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
-		expecting =
-			".s0-B->:s2=>2\n" +
-			".s0-IF->:s1=>1\n";
-		checkDecision(g, 2, expecting, null, null, null, null, 0);
-	}
-
-	@Test public void testGreedyNonLLStarStillGetsError() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"x   : ( options {greedy=true;}\n" +
-			"	   : y X\n" +
-			"      | y Y\n" +
-			"	   )\n" +
-			"    ;\n" +
-			"y   : L y R\n" +
-			"    | B\n" +
-			"    ;");
-		List altsWithRecursion = Arrays.asList(new Object[] {1,2});
-		assertNonLLStar(g, altsWithRecursion);
-	}
-
-	@Test public void testGreedyRecOverflowStillGetsError() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"s : (options {greedy=true;} : a Y | A A A A A X) ;\n" + // force recursion past m=4
-			"a : A a | Q;");
-		List expectedTargetRules = Arrays.asList(new Object[] {"a"});
-		int expectedAlt = 1;
-		assertRecursionOverflow(g, expectedTargetRules, expectedAlt);
-	}
-
-
-	// Check state table creation
-
-	@Test public void testCyclicTableCreation() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : A+ X | A+ Y ;");
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-B->:s2=>2\n";
-	}
-
-
-	// S U P P O R T
-
-	public void _template() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : A | B;");
-		String expecting =
-			"\n";
-		checkDecision(g, 1, expecting, null, null, null, null, 0);
-	}
-
-	protected void assertNonLLStar(Grammar g, List expectedBadAlts) {
-		DecisionProbe.verbose=true; // make sure we get all error info
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		// mimic actions of org.antlr.Tool first time for grammar g
-		if ( g.getNumberOfDecisions()==0 ) {
-			g.buildNFA();
-			g.createLookaheadDFAs(false);
-		}
-		NonRegularDecisionMessage msg = getNonRegularDecisionMessage(equeue.errors);
-		assertTrue("expected fatal non-LL(*) msg", msg!=null);
-		List<Integer> alts = new ArrayList();
-		alts.addAll(msg.altsWithRecursion);
-		Collections.sort(alts);
-		assertEquals(expectedBadAlts,alts);
-	}
-
-	protected void assertRecursionOverflow(Grammar g,
-										   List expectedTargetRules,
-										   int expectedAlt) {
-		DecisionProbe.verbose=true; // make sure we get all error info
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		// mimic actions of org.antlr.Tool first time for grammar g
-		if ( g.getNumberOfDecisions()==0 ) {
-			g.buildNFA();
-			g.createLookaheadDFAs(false);
-		}
-		RecursionOverflowMessage msg = getRecursionOverflowMessage(equeue.errors);
-		assertTrue("missing expected recursion overflow msg"+msg, msg!=null);
-		assertEquals("target rules mismatch",
-					 expectedTargetRules.toString(), msg.targetRules.toString());
-		assertEquals("mismatched alt", expectedAlt, msg.alt);
-	}
-
-    @Test
-    public void testWildcardInTreeGrammar() throws Exception {
-        Grammar g = new Grammar(
-            "tree grammar t;\n" +
-            "a : A B | A . ;\n");
-        String expecting =
-            ".s0-A->.s1\n" +
-            ".s1-A->:s3=>2\n" +
-            ".s1-B->:s2=>1\n";
-        int[] unreachableAlts = null;
-        int[] nonDetAlts = new int[] {1,2};
-        String ambigInput = null;
-        int[] danglingAlts = null;
-        int numWarnings = 1;
-        checkDecision(g, 1, expecting, unreachableAlts,
-                      nonDetAlts, ambigInput, danglingAlts, numWarnings);
-    }
-
-    @Test
-    public void testWildcardInTreeGrammar2() throws Exception {
-        Grammar g = new Grammar(
-            "tree grammar t;\n" +
-            "a : ^(A X Y) | ^(A . .) ;\n");
-        String expecting =
-            ".s0-A->.s1\n" +
-            ".s1-DOWN->.s2\n" +
-            ".s2-X->.s3\n" +
-            ".s2-{A, Y}->:s6=>2\n" +
-            ".s3-Y->.s4\n" +
-            ".s3-{DOWN, A..X}->:s6=>2\n" +
-            ".s4-DOWN->:s6=>2\n" +
-            ".s4-UP->:s5=>1\n";
-        int[] unreachableAlts = null;
-        int[] nonDetAlts = new int[] {1,2};
-        String ambigInput = null;
-        int[] danglingAlts = null;
-        int numWarnings = 1;
-        checkDecision(g, 1, expecting, unreachableAlts,
-                      nonDetAlts, ambigInput, danglingAlts, numWarnings);
-    }
-
-    protected void checkDecision(Grammar g,
-								 int decision,
-								 String expecting,
-								 int[] expectingUnreachableAlts,
-								 int[] expectingNonDetAlts,
-								 String expectingAmbigInput,
-								 int[] expectingDanglingAlts,
-								 int expectingNumWarnings)
-		throws Exception
-	{
-		DecisionProbe.verbose=true; // make sure we get all error info
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		// mimic actions of org.antlr.Tool first time for grammar g
-		if ( g.getNumberOfDecisions()==0 ) {
-			g.buildNFA();
-			g.createLookaheadDFAs(false);
-		}
-		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
-		g.setCodeGenerator(generator);
-
-		if ( equeue.size()!=expectingNumWarnings ) {
-			System.err.println("Warnings issued: "+equeue);
-		}
-
-		assertEquals("unexpected number of expected problems",
-				   expectingNumWarnings, equeue.size());
-
-		DFA dfa = g.getLookaheadDFA(decision);
-		assertNotNull("no DFA for decision "+decision, dfa);
-		FASerializer serializer = new FASerializer(g);
-		String result = serializer.serialize(dfa.startState);
-
-		List unreachableAlts = dfa.getUnreachableAlts();
-
-		// make sure unreachable alts are as expected
-		if ( expectingUnreachableAlts!=null ) {
-			BitSet s = new BitSet();
-			s.addAll(expectingUnreachableAlts);
-			BitSet s2 = new BitSet();
-			s2.addAll(unreachableAlts);
-			assertEquals("unreachable alts mismatch", s, s2);
-		}
-		else {
-			assertEquals("number of unreachable alts", 0,
-						 unreachableAlts!=null?unreachableAlts.size():0);
-		}
-
-		// check conflicting input
-		if ( expectingAmbigInput!=null ) {
-			// first, find nondet message
-			Message msg = (Message)equeue.warnings.get(0);
-			assertTrue("expecting nondeterminism; found "+msg.getClass().getName(),
-					    msg instanceof GrammarNonDeterminismMessage);
-			GrammarNonDeterminismMessage nondetMsg =
-				getNonDeterminismMessage(equeue.warnings);
-			List labels =
-				nondetMsg.probe.getSampleNonDeterministicInputSequence(nondetMsg.problemState);
-			String input = nondetMsg.probe.getInputSequenceDisplay(labels);
-			assertEquals(expectingAmbigInput, input);
-		}
-
-		// check nondet alts
-		if ( expectingNonDetAlts!=null ) {
-			RecursionOverflowMessage recMsg = null;
-			GrammarNonDeterminismMessage nondetMsg =
-				getNonDeterminismMessage(equeue.warnings);
-			List nonDetAlts = null;
-			if ( nondetMsg!=null ) {
-				nonDetAlts =
-					nondetMsg.probe.getNonDeterministicAltsForState(nondetMsg.problemState);
-			}
-			else {
-				recMsg = getRecursionOverflowMessage(equeue.warnings);
-				if ( recMsg!=null ) {
-					//nonDetAlts = new ArrayList(recMsg.alts);
-				}
-			}
-			// compare nonDetAlts with expectingNonDetAlts
-			BitSet s = new BitSet();
-			s.addAll(expectingNonDetAlts);
-			BitSet s2 = new BitSet();
-			s2.addAll(nonDetAlts);
-			assertEquals("nondet alts mismatch", s, s2);
-			assertTrue("found no nondet alts; expecting: "+
-					    str(expectingNonDetAlts),
-					    nondetMsg!=null||recMsg!=null);
-		}
-		else {
-			// not expecting any nondet alts, make sure there are none
-			GrammarNonDeterminismMessage nondetMsg =
-				getNonDeterminismMessage(equeue.warnings);
-			assertNull("found nondet alts, but expecting none", nondetMsg);
-		}
-
-		assertEquals(expecting, result);
-	}
-
-	protected GrammarNonDeterminismMessage getNonDeterminismMessage(List warnings) {
-		for (int i = 0; i < warnings.size(); i++) {
-			Message m = (Message) warnings.get(i);
-			if ( m instanceof GrammarNonDeterminismMessage ) {
-				return (GrammarNonDeterminismMessage)m;
-			}
-		}
-		return null;
-	}
-
-	protected NonRegularDecisionMessage getNonRegularDecisionMessage(List errors) {
-		for (int i = 0; i < errors.size(); i++) {
-			Message m = (Message) errors.get(i);
-			if ( m instanceof NonRegularDecisionMessage ) {
-				return (NonRegularDecisionMessage)m;
-			}
-		}
-		return null;
-	}
-
-	protected RecursionOverflowMessage getRecursionOverflowMessage(List warnings) {
-		for (int i = 0; i < warnings.size(); i++) {
-			Message m = (Message) warnings.get(i);
-			if ( m instanceof RecursionOverflowMessage ) {
-				return (RecursionOverflowMessage)m;
-			}
-		}
-		return null;
-	}
-
-	protected LeftRecursionCyclesMessage getLeftRecursionCyclesMessage(List warnings) {
-		for (int i = 0; i < warnings.size(); i++) {
-			Message m = (Message) warnings.get(i);
-			if ( m instanceof LeftRecursionCyclesMessage ) {
-				return (LeftRecursionCyclesMessage)m;
-			}
-		}
-		return null;
-	}
-
-	protected GrammarDanglingStateMessage getDanglingStateMessage(List warnings) {
-		for (int i = 0; i < warnings.size(); i++) {
-			Message m = (Message) warnings.get(i);
-			if ( m instanceof GrammarDanglingStateMessage ) {
-				return (GrammarDanglingStateMessage)m;
-			}
-		}
-		return null;
-	}
-
-	protected String str(int[] elements) {
-		StringBuffer buf = new StringBuffer();
-		for (int i = 0; i < elements.length; i++) {
-			if ( i>0 ) {
-				buf.append(", ");
-			}
-			int element = elements[i];
-			buf.append(element);
-		}
-		return buf.toString();
-	}
-
-	protected Set<String> ruleNames(Set<Rule> rules) {
-		Set<String> x = new HashSet<String>();
-		for (Rule r : rules) {
-			x.add(r.name);
-		}
-		return x;
-	}
-
-	protected Set<String> ruleNames2(Collection<HashSet> rules) {
-		Set<String> x = new HashSet<String>();
-		for (HashSet s : rules) {
-			x.addAll(ruleNames(s));
-		}
-		return x;
-	}
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestDFAMatching.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestDFAMatching.java
deleted file mode 100644
index a4fb2fb..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestDFAMatching.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.analysis.DFA;
-import org.antlr.analysis.NFA;
-import org.antlr.runtime.ANTLRStringStream;
-import org.antlr.tool.Grammar;
-import org.junit.Test;
-
-public class TestDFAMatching extends BaseTest {
-
-    /** Public default constructor used by TestRig */
-    public TestDFAMatching() {
-    }
-
-    @Test public void testSimpleAltCharTest() throws Exception {
-        Grammar g = new Grammar(
-                "lexer grammar t;\n"+
-                "A : {;}'a' | 'b' | 'c';");
-		g.buildNFA();
-		g.createLookaheadDFAs(false);
-        DFA dfa = g.getLookaheadDFA(1);
-        checkPrediction(dfa,"a",1);
-        checkPrediction(dfa,"b",2);
-        checkPrediction(dfa,"c",3);
-        checkPrediction(dfa,"d", NFA.INVALID_ALT_NUMBER);
-    }
-
-    @Test public void testSets() throws Exception {
-        Grammar g = new Grammar(
-                "lexer grammar t;\n"+
-                "A : {;}'a'..'z' | ';' | '0'..'9' ;");
-		g.buildNFA();
-        g.createLookaheadDFAs(false);
-        DFA dfa = g.getLookaheadDFA(1);
-        checkPrediction(dfa,"a",1);
-        checkPrediction(dfa,"q",1);
-        checkPrediction(dfa,"z",1);
-        checkPrediction(dfa,";",2);
-        checkPrediction(dfa,"9",3);
-    }
-
-    @Test public void testFiniteCommonLeftPrefixes() throws Exception {
-        Grammar g = new Grammar(
-                "lexer grammar t;\n"+
-                "A : 'a' 'b' | 'a' 'c' | 'd' 'e' ;");
-		g.buildNFA();
-        g.createLookaheadDFAs(false);
-        DFA dfa = g.getLookaheadDFA(1);
-        checkPrediction(dfa,"ab",1);
-        checkPrediction(dfa,"ac",2);
-        checkPrediction(dfa,"de",3);
-        checkPrediction(dfa,"q", NFA.INVALID_ALT_NUMBER);
-    }
-
-    @Test public void testSimpleLoops() throws Exception {
-        Grammar g = new Grammar(
-                "lexer grammar t;\n"+
-                "A : (DIGIT)+ '.' DIGIT | (DIGIT)+ ;\n" +
-                "fragment DIGIT : '0'..'9' ;\n");
-		g.buildNFA();
-        g.createLookaheadDFAs(false);
-        DFA dfa = g.getLookaheadDFA(3);
-        checkPrediction(dfa,"32",2);
-        checkPrediction(dfa,"999.2",1);
-        checkPrediction(dfa,".2", NFA.INVALID_ALT_NUMBER);
-    }
-
-    protected void checkPrediction(DFA dfa, String input, int expected)
-        throws Exception
-    {
-        ANTLRStringStream stream = new ANTLRStringStream(input);
-        assertEquals(dfa.predict(stream), expected);
-    }
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestFastQueue.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestFastQueue.java
deleted file mode 100644
index 9a3c717..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestFastQueue.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.runtime.misc.FastQueue;
-import org.junit.Test;
-
-import java.util.NoSuchElementException;
-
-import static org.junit.Assert.assertEquals;
-
-public class TestFastQueue {
-    @Test public void testQueueNoRemove() throws Exception {
-        FastQueue<String> q = new FastQueue<String>();
-        q.add("a");
-        q.add("b");
-        q.add("c");
-        q.add("d");
-        q.add("e");
-        String expecting = "a b c d e";
-        String found = q.toString();
-        assertEquals(expecting, found);
-    }
-
-    @Test public void testQueueThenRemoveAll() throws Exception {
-        FastQueue<String> q = new FastQueue<String>();
-        q.add("a");
-        q.add("b");
-        q.add("c");
-        q.add("d");
-        q.add("e");
-        StringBuffer buf = new StringBuffer();
-        while ( q.size()>0 ) {
-            String o = q.remove();
-            buf.append(o);
-            if ( q.size()>0 ) buf.append(" ");
-        }
-        assertEquals("queue should be empty", 0, q.size());
-        String expecting = "a b c d e";
-        String found = buf.toString();
-        assertEquals(expecting, found);
-    }
-
-    @Test public void testQueueThenRemoveOneByOne() throws Exception {
-        StringBuffer buf = new StringBuffer();
-        FastQueue<String> q = new FastQueue<String>();
-        q.add("a");
-        buf.append(q.remove());
-        q.add("b");
-        buf.append(q.remove());
-        q.add("c");
-        buf.append(q.remove());
-        q.add("d");
-        buf.append(q.remove());
-        q.add("e");
-        buf.append(q.remove());
-        assertEquals("queue should be empty", 0, q.size());
-        String expecting = "abcde";
-        String found = buf.toString();
-        assertEquals(expecting, found);
-    }
-
-    // E r r o r s
-
-    @Test public void testGetFromEmptyQueue() throws Exception {
-        FastQueue<String> q = new FastQueue<String>();
-        String msg = null;
-        try { q.remove(); }
-        catch (NoSuchElementException nsee) {
-            msg = nsee.getMessage();
-        }
-        String expecting = "queue index 0 > last index -1";
-        String found = msg;
-        assertEquals(expecting, found);
-    }
-
-    @Test public void testGetFromEmptyQueueAfterSomeAdds() throws Exception {
-        FastQueue<String> q = new FastQueue<String>();
-        q.add("a");
-        q.add("b");
-        q.remove();
-        q.remove();
-        String msg = null;
-        try { q.remove(); }
-        catch (NoSuchElementException nsee) {
-            msg = nsee.getMessage();
-        }
-        String expecting = "queue index 0 > last index -1";
-        String found = msg;
-        assertEquals(expecting, found);
-    }
-
-    @Test public void testGetFromEmptyQueueAfterClear() throws Exception {
-        FastQueue<String> q = new FastQueue<String>();
-        q.add("a");
-        q.add("b");
-        q.clear();
-        String msg = null;
-        try { q.remove(); }
-        catch (NoSuchElementException nsee) {
-            msg = nsee.getMessage();
-        }
-        String expecting = "queue index 0 > last index -1";
-        String found = msg;
-        assertEquals(expecting, found);
-    }
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestHeteroAST.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestHeteroAST.java
deleted file mode 100644
index 4078421..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestHeteroAST.java
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.junit.Test;
-
-/** Test hetero trees in parsers and tree parsers */
-public class TestHeteroAST extends BaseTest {
-	protected boolean debug = false;
-
-	// PARSERS -- AUTO AST
-
-    @Test public void testToken() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "@members {static class V extends CommonTree {\n" +
-            "  public V(Token t) { token=t;}\n" +
-            "  public String toString() { return token.getText()+\"<V>\";}\n" +
-            "}\n" +
-            "}\n"+
-            "a : ID<V> ;\n"+
-            "ID : 'a'..'z'+ ;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-        String found = execParser("T.g", grammar, "TParser", "TLexer",
-                    "a", "a", debug);
-        assertEquals("a<V>\n", found);
-    }
-
-	@Test public void testTokenCommonTree() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID<CommonTree> ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-					"a", "a", debug);
-		assertEquals("a\n", found);
-	}
-
-    @Test public void testTokenWithQualifiedType() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "@members {static class V extends CommonTree {\n" +
-            "  public V(Token t) { token=t;}\n" +
-            "  public String toString() { return token.getText()+\"<V>\";}\n" +
-            "}\n" +
-            "}\n"+
-            "a : ID<TParser.V> ;\n"+ // TParser.V is qualified name
-            "ID : 'a'..'z'+ ;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-        String found = execParser("T.g", grammar, "TParser", "TLexer",
-                    "a", "a", debug);
-        assertEquals("a<V>\n", found);
-    }
-
-	@Test public void testNamedType() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {static class V extends CommonTree {\n" +
-			"  public V(Token t) { token=t;}\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID<node=V> ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-					"a", "a", debug);
-		assertEquals("a<V>\n", found);
-	}
-
-
-	@Test public void testTokenWithLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {static class V extends CommonTree {\n" +
-			"  public V(Token t) { token=t;}\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : x=ID<V> ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("a<V>\n", found);
-	}
-
-	@Test public void testTokenWithListLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {static class V extends CommonTree {\n" +
-			"  public V(Token t) { token=t;}\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : x+=ID<V> ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("a<V>\n", found);
-	}
-
-	@Test public void testTokenRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {static class V extends CommonTree {\n" +
-			"  public V(Token t) { token=t;}\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID<V>^ ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("a<V>\n", found);
-	}
-
-	@Test public void testTokenRootWithListLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {static class V extends CommonTree {\n" +
-			"  public V(Token t) { token=t;}\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : x+=ID<V>^ ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("a<V>\n", found);
-	}
-
-	@Test public void testString() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {static class V extends CommonTree {\n" +
-			"  public V(Token t) { token=t;}\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : 'begin'<V> ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "begin", debug);
-		assertEquals("begin<V>\n", found);
-	}
-
-	@Test public void testStringRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {static class V extends CommonTree {\n" +
-			"  public V(Token t) { token=t;}\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : 'begin'<V>^ ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "begin", debug);
-		assertEquals("begin<V>\n", found);
-	}
-
-	// PARSERS -- REWRITE AST
-
-	@Test public void testRewriteToken() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {static class V extends CommonTree {\n" +
-			"  public V(Token t) { token=t;}\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID -> ID<V> ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("a<V>\n", found);
-	}
-
-	@Test public void testRewriteTokenWithArgs() throws Exception {
-		// arg to ID<V>[42,19,30] means you're constructing node not associated with ID
-		// so must pass in token manually
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {\n" +
-			"static class V extends CommonTree {\n" +
-			"  public int x,y,z;\n"+
-			"  public V(int ttype, int x, int y, int z) { this.x=x; this.y=y; this.z=z; token=new CommonToken(ttype,\"\"); }\n" +
-			"  public V(int ttype, Token t, int x) { token=t; this.x=x;}\n" +
-			"  public String toString() { return (token!=null?token.getText():\"\")+\"<V>;\"+x+y+z;}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID -> ID<V>[42,19,30] ID<V>[$ID,99] ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("<V>;421930 a<V>;9900\n", found);
-	}
-
-	@Test public void testRewriteTokenRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {static class V extends CommonTree {\n" +
-			"  public V(Token t) { token=t;}\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID INT -> ^(ID<V> INT) ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a 2", debug);
-		assertEquals("(a<V> 2)\n", found);
-	}
-
-	@Test public void testRewriteString() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {static class V extends CommonTree {\n" +
-			"  public V(Token t) { token=t;}\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : 'begin' -> 'begin'<V> ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "begin", debug);
-		assertEquals("begin<V>\n", found);
-	}
-
-	@Test public void testRewriteStringRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"@members {static class V extends CommonTree {\n" +
-			"  public V(Token t) { token=t;}\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : 'begin' INT -> ^('begin'<V> INT) ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "begin 2", debug);
-		assertEquals("(begin<V> 2)\n", found);
-	}
-
-    @Test public void testRewriteRuleResults() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "tokens {LIST;}\n" +
-            "@members {\n" +
-            "static class V extends CommonTree {\n" +
-            "  public V(Token t) { token=t;}\n" +
-            "  public String toString() { return token.getText()+\"<V>\";}\n" +
-            "}\n" +
-            "static class W extends CommonTree {\n" +
-            "  public W(int tokenType, String txt) { super(new CommonToken(tokenType,txt)); }\n" +
-            "  public W(Token t) { token=t;}\n" +
-            "  public String toString() { return token.getText()+\"<W>\";}\n" +
-            "}\n" +
-            "}\n"+
-            "a : id (',' id)* -> ^(LIST<W>[\"LIST\"] id+);\n" +
-            "id : ID -> ID<V>;\n"+
-            "ID : 'a'..'z'+ ;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-        String found = execParser("T.g", grammar, "TParser", "TLexer",
-                    "a", "a,b,c", debug);
-        assertEquals("(LIST<W> a<V> b<V> c<V>)\n", found);
-    }
-
-    @Test public void testCopySemanticsWithHetero() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "@members {\n" +
-            "static class V extends CommonTree {\n" +
-            "  public V(Token t) { token=t;}\n" +  // for 'int'<V>
-            "  public V(V node) { super(node); }\n\n" + // for dupNode
-            "  public Tree dupNode() { return new V(this); }\n" + // for dup'ing type
-            "  public String toString() { return token.getText()+\"<V>\";}\n" +
-            "}\n" +
-            "}\n" +
-            "a : type ID (',' ID)* ';' -> ^(type ID)+;\n" +
-            "type : 'int'<V> ;\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-        String found = execParser("T.g", grammar, "TParser", "TLexer",
-                    "a", "int a, b, c;", debug);
-        assertEquals("(int<V> a) (int<V> b) (int<V> c)\n", found);
-    }
-
-    // TREE PARSERS -- REWRITE AST
-
-	@Test public void testTreeParserRewriteFlatList() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"@members {\n" +
-			"static class V extends CommonTree {\n" +
-			"  public V(Object t) { super((CommonTree)t); }\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"static class W extends CommonTree {\n" +
-			"  public W(Object t) { super((CommonTree)t); }\n" +
-			"  public String toString() { return token.getText()+\"<W>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID INT -> INT<V> ID<W>\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("34<V> abc<W>\n", found);
-	}
-
-	@Test public void testTreeParserRewriteTree() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"@members {\n" +
-			"static class V extends CommonTree {\n" +
-			"  public V(Object t) { super((CommonTree)t); }\n" +
-			"  public String toString() { return token.getText()+\"<V>\";}\n" +
-			"}\n" +
-			"static class W extends CommonTree {\n" +
-			"  public W(Object t) { super((CommonTree)t); }\n" +
-			"  public String toString() { return token.getText()+\"<W>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID INT -> ^(INT<V> ID<W>)\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("(34<V> abc<W>)\n", found);
-	}
-
-	@Test public void testTreeParserRewriteImaginary() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"tokens { ROOT; }\n" +
-			"@members {\n" +
-			"class V extends CommonTree {\n" +
-			"  public V(int tokenType) { super(new CommonToken(tokenType)); }\n" +
-			"  public String toString() { return tokenNames[token.getType()]+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID -> ROOT<V> ID\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
-		assertEquals("ROOT<V> abc\n", found);
-	}
-
-	@Test public void testTreeParserRewriteImaginaryWithArgs() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"tokens { ROOT; }\n" +
-			"@members {\n" +
-			"class V extends CommonTree {\n" +
-			"  public int x;\n" +
-			"  public V(int tokenType, int x) { super(new CommonToken(tokenType)); this.x=x;}\n" +
-			"  public String toString() { return tokenNames[token.getType()]+\"<V>;\"+x;}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID -> ROOT<V>[42] ID\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
-		assertEquals("ROOT<V>;42 abc\n", found);
-	}
-
-	@Test public void testTreeParserRewriteImaginaryRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"tokens { ROOT; }\n" +
-			"@members {\n" +
-			"class V extends CommonTree {\n" +
-			"  public V(int tokenType) { super(new CommonToken(tokenType)); }\n" +
-			"  public String toString() { return tokenNames[token.getType()]+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID -> ^(ROOT<V> ID)\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
-		assertEquals("(ROOT<V> abc)\n", found);
-	}
-
-	@Test public void testTreeParserRewriteImaginaryFromReal() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"tokens { ROOT; }\n" +
-			"@members {\n" +
-			"class V extends CommonTree {\n" +
-			"  public V(int tokenType) { super(new CommonToken(tokenType)); }\n" +
-			"  public V(int tokenType, Object tree) { super((CommonTree)tree); token.setType(tokenType); }\n" +
-			"  public String toString() { return tokenNames[token.getType()]+\"<V>@\"+token.getLine();}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID -> ROOT<V>[$ID]\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
-		assertEquals("ROOT<V>@1\n", found); // at line 1; shows copy of ID's stuff
-	}
-
-	@Test public void testTreeParserAutoHeteroAST() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ';' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"tokens { ROOT; }\n" +
-			"@members {\n" +
-			"class V extends CommonTree {\n" +
-			"  public V(CommonTree t) { super(t); }\n" + // NEEDS SPECIAL CTOR
-			"  public String toString() { return super.toString()+\"<V>\";}\n" +
-			"}\n" +
-			"}\n"+
-			"a : ID<V> ';'<V>\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc;");
-		assertEquals("abc<V> ;<V>\n", found);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestInterpretedLexing.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestInterpretedLexing.java
deleted file mode 100644
index f0de1ce..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestInterpretedLexing.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.runtime.ANTLRStringStream;
-import org.antlr.runtime.CharStream;
-import org.antlr.runtime.CommonTokenStream;
-import org.antlr.runtime.Token;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.Interpreter;
-import org.junit.Test;
-
-public class TestInterpretedLexing extends BaseTest {
-
-	/*
-	static class Tracer implements ANTLRDebugInterface {
-		Grammar g;
-		public DebugActions(Grammar g) {
-			this.g = g;
-		}
-		public void enterRule(String ruleName) {
-			System.out.println("enterRule("+ruleName+")");
-		}
-
-		public void exitRule(String ruleName) {
-			System.out.println("exitRule("+ruleName+")");
-		}
-
-		public void matchElement(int type) {
-			System.out.println("matchElement("+g.getTokenName(type)+")");
-		}
-
-		public void mismatchedElement(MismatchedTokenException e) {
-			System.out.println(e);
-			e.printStackTrace(System.out);
-		}
-
-		public void mismatchedSet(MismatchedSetException e) {
-			System.out.println(e);
-			e.printStackTrace(System.out);
-		}
-
-		public void noViableAlt(NoViableAltException e) {
-			System.out.println(e);
-			e.printStackTrace(System.out);
-		}
-	}
-    */
-
-    /** Public default constructor used by TestRig */
-    public TestInterpretedLexing() {
-    }
-
-	@Test public void testSimpleAltCharTest() throws Exception {
-        Grammar g = new Grammar(
-                "lexer grammar t;\n"+
-                "A : 'a' | 'b' | 'c';");
-		final int Atype = g.getTokenType("A");
-        Interpreter engine = new Interpreter(g, new ANTLRStringStream("a"));
-        engine = new Interpreter(g, new ANTLRStringStream("b"));
-		Token result = engine.scan("A");
-		assertEquals(result.getType(), Atype);
-        engine = new Interpreter(g, new ANTLRStringStream("c"));
-		result = engine.scan("A");
-		assertEquals(result.getType(), Atype);
-    }
-
-    @Test public void testSingleRuleRef() throws Exception {
-        Grammar g = new Grammar(
-                "lexer grammar t;\n"+
-                "A : 'a' B 'c' ;\n" +
-                "B : 'b' ;\n");
-		final int Atype = g.getTokenType("A");
-		Interpreter engine = new Interpreter(g, new ANTLRStringStream("abc")); // should ignore the x
-		Token result = engine.scan("A");
-		assertEquals(result.getType(), Atype);
-    }
-
-    @Test public void testSimpleLoop() throws Exception {
-        Grammar g = new Grammar(
-                "lexer grammar t;\n"+
-                "INT : (DIGIT)+ ;\n"+
-				"fragment DIGIT : '0'..'9';\n");
-		final int INTtype = g.getTokenType("INT");
-		Interpreter engine = new Interpreter(g, new ANTLRStringStream("12x")); // should ignore the x
-		Token result = engine.scan("INT");
-		assertEquals(result.getType(), INTtype);
-		engine = new Interpreter(g, new ANTLRStringStream("1234"));
-		result = engine.scan("INT");
-		assertEquals(result.getType(), INTtype);
-    }
-
-    @Test public void testMultAltLoop() throws Exception {
-		Grammar g = new Grammar(
-                "lexer grammar t;\n"+
-                "A : ('0'..'9'|'a'|'b')+ ;\n");
-		final int Atype = g.getTokenType("A");
-		Interpreter engine = new Interpreter(g, new ANTLRStringStream("a"));
-		Token result = engine.scan("A");
-        engine = new Interpreter(g, new ANTLRStringStream("a"));
-		result = engine.scan("A");
-		assertEquals(result.getType(), Atype);
-		engine = new Interpreter(g, new ANTLRStringStream("1234"));
-		result = engine.scan("A");
-		assertEquals(result.getType(), Atype);
-        engine = new Interpreter(g, new ANTLRStringStream("aaa"));
-		result = engine.scan("A");
-		assertEquals(result.getType(), Atype);
-        engine = new Interpreter(g, new ANTLRStringStream("aaaa9"));
-		result = engine.scan("A");
-		assertEquals(result.getType(), Atype);
-        engine = new Interpreter(g, new ANTLRStringStream("b"));
-		result = engine.scan("A");
-		assertEquals(result.getType(), Atype);
-        engine = new Interpreter(g, new ANTLRStringStream("baa"));
-		result = engine.scan("A");
-		assertEquals(result.getType(), Atype);
-    }
-
-	@Test public void testSimpleLoops() throws Exception {
-		Grammar g = new Grammar(
-				"lexer grammar t;\n"+
-				"A : ('0'..'9')+ '.' ('0'..'9')* | ('0'..'9')+ ;\n");
-		final int Atype = g.getTokenType("A");
-		CharStream input = new ANTLRStringStream("1234.5");
-		Interpreter engine = new Interpreter(g, input);
-		Token result = engine.scan("A");
-		assertEquals(Atype, result.getType());
-	}
-
-	@Test public void testTokensRules() throws Exception {
-		Grammar pg = new Grammar(
-			"parser grammar p;\n"+
-			"a : (INT|FLOAT|WS)+;\n");
-		Grammar g = new Grammar();
-		g.importTokenVocabulary(pg);
-		g.setFileName("<string>");
-		g.setGrammarContent(
-			"lexer grammar t;\n"+
-			"INT : (DIGIT)+ ;\n"+
-			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
-			"fragment DIGIT : '0'..'9';\n" +
-			"WS : (' ')+ {channel=99;};\n");
-		CharStream input = new ANTLRStringStream("123 139.52");
-		Interpreter lexEngine = new Interpreter(g, input);
-
-		CommonTokenStream tokens = new CommonTokenStream(lexEngine);
-        tokens.LT(5); // make sure it grabs all tokens
-		String result = tokens.toString();
-		//System.out.println(result);
-		String expecting = "123 139.52";
-		assertEquals(expecting, result);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestInterpretedParsing.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestInterpretedParsing.java
deleted file mode 100644
index a575139..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestInterpretedParsing.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.runtime.ANTLRStringStream;
-import org.antlr.runtime.CharStream;
-import org.antlr.runtime.tree.ParseTree;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.Interpreter;
-import org.junit.Test;
-
-public class TestInterpretedParsing extends BaseTest {
-    /** Public default constructor used by TestRig */
-    public TestInterpretedParsing() {
-    }
-
-    @Test public void testSimpleParse() throws Exception {
-        Grammar pg = new Grammar(
-            "parser grammar p;\n"+
-            "prog : WHILE ID LCURLY (assign)* RCURLY EOF;\n" +
-            "assign : ID ASSIGN expr SEMI ;\n" +
-			"expr : INT | FLOAT | ID ;\n");
-		Grammar g = new Grammar();
-		g.importTokenVocabulary(pg);
-		g.setFileName(Grammar.IGNORE_STRING_IN_GRAMMAR_FILE_NAME +"string");
-		g.setGrammarContent(
-			"lexer grammar t;\n"+
-			"WHILE : 'while';\n"+
-			"LCURLY : '{';\n"+
-			"RCURLY : '}';\n"+
-			"ASSIGN : '=';\n"+
-			"SEMI : ';';\n"+
-			"ID : ('a'..'z')+ ;\n"+
-			"INT : (DIGIT)+ ;\n"+
-			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
-			"fragment DIGIT : '0'..'9';\n" +
-			"WS : (' ')+ ;\n");
-		CharStream input = new ANTLRStringStream("while x { i=1; y=3.42; z=y; }");
-		Interpreter lexEngine = new Interpreter(g, input);
-
-		FilteringTokenStream tokens = new FilteringTokenStream(lexEngine);
-		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
-		//System.out.println("tokens="+tokens.toString());
-		Interpreter parseEngine = new Interpreter(pg, tokens);
-		ParseTree t = parseEngine.parse("prog");
-		String result = t.toStringTree();
-		String expecting =
-			"(<grammar p> (prog while x { (assign i = (expr 1) ;) (assign y = (expr 3.42) ;) (assign z = (expr y) ;) } <EOF>))";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testMismatchedTokenError() throws Exception {
-		Grammar pg = new Grammar(
-			"parser grammar p;\n"+
-			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
-			"assign : ID ASSIGN expr SEMI ;\n" +
-			"expr : INT | FLOAT | ID ;\n");
-		Grammar g = new Grammar();
-		g.setFileName(Grammar.IGNORE_STRING_IN_GRAMMAR_FILE_NAME +"string");
-		g.importTokenVocabulary(pg);
-		g.setGrammarContent(
-			"lexer grammar t;\n"+
-			"WHILE : 'while';\n"+
-			"LCURLY : '{';\n"+
-			"RCURLY : '}';\n"+
-			"ASSIGN : '=';\n"+
-			"SEMI : ';';\n"+
-			"ID : ('a'..'z')+ ;\n"+
-			"INT : (DIGIT)+ ;\n"+
-			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
-			"fragment DIGIT : '0'..'9';\n" +
-			"WS : (' ')+ ;\n");
-		CharStream input = new ANTLRStringStream("while x { i=1 y=3.42; z=y; }");
-		Interpreter lexEngine = new Interpreter(g, input);
-
-		FilteringTokenStream tokens = new FilteringTokenStream(lexEngine);
-		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
-		//System.out.println("tokens="+tokens.toString());
-		Interpreter parseEngine = new Interpreter(pg, tokens);
-		ParseTree t = parseEngine.parse("prog");
-		String result = t.toStringTree();
-		String expecting =
-			"(<grammar p> (prog while x { (assign i = (expr 1) MismatchedTokenException(6!=10))))";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testMismatchedSetError() throws Exception {
-		Grammar pg = new Grammar(
-			"parser grammar p;\n"+
-			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
-			"assign : ID ASSIGN expr SEMI ;\n" +
-			"expr : INT | FLOAT | ID ;\n");
-		Grammar g = new Grammar();
-		g.importTokenVocabulary(pg);
-		g.setFileName("<string>");
-		g.setGrammarContent(
-			"lexer grammar t;\n"+
-			"WHILE : 'while';\n"+
-			"LCURLY : '{';\n"+
-			"RCURLY : '}';\n"+
-			"ASSIGN : '=';\n"+
-			"SEMI : ';';\n"+
-			"ID : ('a'..'z')+ ;\n"+
-			"INT : (DIGIT)+ ;\n"+
-			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
-			"fragment DIGIT : '0'..'9';\n" +
-			"WS : (' ')+ ;\n");
-		CharStream input = new ANTLRStringStream("while x { i=; y=3.42; z=y; }");
-		Interpreter lexEngine = new Interpreter(g, input);
-
-		FilteringTokenStream tokens = new FilteringTokenStream(lexEngine);
-		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
-		//System.out.println("tokens="+tokens.toString());
-		Interpreter parseEngine = new Interpreter(pg, tokens);
-		ParseTree t = parseEngine.parse("prog");
-		String result = t.toStringTree();
-		String expecting =
-			"(<grammar p> (prog while x { (assign i = (expr MismatchedSetException(10!={5,6,7})))))";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testNoViableAltError() throws Exception {
-		Grammar pg = new Grammar(
-			"parser grammar p;\n"+
-			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
-			"assign : ID ASSIGN expr SEMI ;\n" +
-			"expr : {;}INT | FLOAT | ID ;\n");
-		Grammar g = new Grammar();
-		g.importTokenVocabulary(pg);
-		g.setFileName("<string>");
-		g.setGrammarContent(
-			"lexer grammar t;\n"+
-			"WHILE : 'while';\n"+
-			"LCURLY : '{';\n"+
-			"RCURLY : '}';\n"+
-			"ASSIGN : '=';\n"+
-			"SEMI : ';';\n"+
-			"ID : ('a'..'z')+ ;\n"+
-			"INT : (DIGIT)+ ;\n"+
-			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
-			"fragment DIGIT : '0'..'9';\n" +
-			"WS : (' ')+ ;\n");
-		CharStream input = new ANTLRStringStream("while x { i=; y=3.42; z=y; }");
-		Interpreter lexEngine = new Interpreter(g, input);
-
-		FilteringTokenStream tokens = new FilteringTokenStream(lexEngine);
-		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
-		//System.out.println("tokens="+tokens.toString());
-		Interpreter parseEngine = new Interpreter(pg, tokens);
-		ParseTree t = parseEngine.parse("prog");
-		String result = t.toStringTree();
-		String expecting =
-			"(<grammar p> (prog while x { (assign i = (expr NoViableAltException(10@[4:1: expr : ( INT | FLOAT | ID );])))))";
-		assertEquals(expecting, result);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestIntervalSet.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestIntervalSet.java
deleted file mode 100644
index 9acc37f..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestIntervalSet.java
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.analysis.Label;
-import org.antlr.misc.IntervalSet;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-
-
-public class TestIntervalSet extends BaseTest {
-
-    /** Public default constructor used by TestRig */
-    public TestIntervalSet() {
-    }
-
-    @Test public void testSingleElement() throws Exception {
-        IntervalSet s = IntervalSet.of(99);
-        String expecting = "99";
-        assertEquals(s.toString(), expecting);
-    }
-
-    @Test public void testIsolatedElements() throws Exception {
-        IntervalSet s = new IntervalSet();
-        s.add(1);
-        s.add('z');
-        s.add('\uFFF0');
-        String expecting = "{1, 122, 65520}";
-        assertEquals(s.toString(), expecting);
-    }
-
-    @Test public void testMixedRangesAndElements() throws Exception {
-        IntervalSet s = new IntervalSet();
-        s.add(1);
-        s.add('a','z');
-        s.add('0','9');
-        String expecting = "{1, 48..57, 97..122}";
-        assertEquals(s.toString(), expecting);
-    }
-
-    @Test public void testSimpleAnd() throws Exception {
-        IntervalSet s = IntervalSet.of(10,20);
-        IntervalSet s2 = IntervalSet.of(13,15);
-        String expecting = "13..15";
-        String result = (s.and(s2)).toString();
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testRangeAndIsolatedElement() throws Exception {
-        IntervalSet s = IntervalSet.of('a','z');
-        IntervalSet s2 = IntervalSet.of('d');
-        String expecting = "100";
-        String result = (s.and(s2)).toString();
-        assertEquals(result, expecting);
-    }
-
-	@Test public void testEmptyIntersection() throws Exception {
-		IntervalSet s = IntervalSet.of('a','z');
-		IntervalSet s2 = IntervalSet.of('0','9');
-		String expecting = "{}";
-		String result = (s.and(s2)).toString();
-		assertEquals(result, expecting);
-	}
-
-	@Test public void testEmptyIntersectionSingleElements() throws Exception {
-		IntervalSet s = IntervalSet.of('a');
-		IntervalSet s2 = IntervalSet.of('d');
-		String expecting = "{}";
-		String result = (s.and(s2)).toString();
-		assertEquals(result, expecting);
-	}
-
-    @Test public void testNotSingleElement() throws Exception {
-        IntervalSet vocabulary = IntervalSet.of(1,1000);
-        vocabulary.add(2000,3000);
-        IntervalSet s = IntervalSet.of(50,50);
-        String expecting = "{1..49, 51..1000, 2000..3000}";
-        String result = (s.complement(vocabulary)).toString();
-        assertEquals(result, expecting);
-    }
-
-	@Test public void testNotSet() throws Exception {
-		IntervalSet vocabulary = IntervalSet.of(1,1000);
-		IntervalSet s = IntervalSet.of(50,60);
-		s.add(5);
-		s.add(250,300);
-		String expecting = "{1..4, 6..49, 61..249, 301..1000}";
-		String result = (s.complement(vocabulary)).toString();
-		assertEquals(result, expecting);
-	}
-
-	@Test public void testNotEqualSet() throws Exception {
-		IntervalSet vocabulary = IntervalSet.of(1,1000);
-		IntervalSet s = IntervalSet.of(1,1000);
-		String expecting = "{}";
-		String result = (s.complement(vocabulary)).toString();
-		assertEquals(result, expecting);
-	}
-
-	@Test public void testNotSetEdgeElement() throws Exception {
-		IntervalSet vocabulary = IntervalSet.of(1,2);
-		IntervalSet s = IntervalSet.of(1);
-		String expecting = "2";
-		String result = (s.complement(vocabulary)).toString();
-		assertEquals(result, expecting);
-	}
-
-    @Test public void testNotSetFragmentedVocabulary() throws Exception {
-        IntervalSet vocabulary = IntervalSet.of(1,255);
-        vocabulary.add(1000,2000);
-        vocabulary.add(9999);
-        IntervalSet s = IntervalSet.of(50,60);
-        s.add(3);
-        s.add(250,300);
-        s.add(10000); // this is outside range of vocab and should be ignored
-        String expecting = "{1..2, 4..49, 61..249, 1000..2000, 9999}";
-        String result = (s.complement(vocabulary)).toString();
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testSubtractOfCompletelyContainedRange() throws Exception {
-        IntervalSet s = IntervalSet.of(10,20);
-        IntervalSet s2 = IntervalSet.of(12,15);
-        String expecting = "{10..11, 16..20}";
-        String result = (s.subtract(s2)).toString();
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testSubtractOfOverlappingRangeFromLeft() throws Exception {
-        IntervalSet s = IntervalSet.of(10,20);
-        IntervalSet s2 = IntervalSet.of(5,11);
-        String expecting = "12..20";
-        String result = (s.subtract(s2)).toString();
-        assertEquals(result, expecting);
-
-        IntervalSet s3 = IntervalSet.of(5,10);
-        expecting = "11..20";
-        result = (s.subtract(s3)).toString();
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testSubtractOfOverlappingRangeFromRight() throws Exception {
-        IntervalSet s = IntervalSet.of(10,20);
-        IntervalSet s2 = IntervalSet.of(15,25);
-        String expecting = "10..14";
-        String result = (s.subtract(s2)).toString();
-        assertEquals(result, expecting);
-
-        IntervalSet s3 = IntervalSet.of(20,25);
-        expecting = "10..19";
-        result = (s.subtract(s3)).toString();
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testSubtractOfCompletelyCoveredRange() throws Exception {
-        IntervalSet s = IntervalSet.of(10,20);
-        IntervalSet s2 = IntervalSet.of(1,25);
-        String expecting = "{}";
-        String result = (s.subtract(s2)).toString();
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testSubtractOfRangeSpanningMultipleRanges() throws Exception {
-        IntervalSet s = IntervalSet.of(10,20);
-        s.add(30,40);
-        s.add(50,60); // s has 3 ranges now: 10..20, 30..40, 50..60
-        IntervalSet s2 = IntervalSet.of(5,55); // covers one and touches 2nd range
-        String expecting = "56..60";
-        String result = (s.subtract(s2)).toString();
-        assertEquals(result, expecting);
-
-        IntervalSet s3 = IntervalSet.of(15,55); // touches both
-        expecting = "{10..14, 56..60}";
-        result = (s.subtract(s3)).toString();
-        assertEquals(result, expecting);
-    }
-
-	/** The following was broken:
-	 	{0..113, 115..65534}-{0..115, 117..65534}=116..65534
-	 */
-	@Test public void testSubtractOfWackyRange() throws Exception {
-		IntervalSet s = IntervalSet.of(0,113);
-		s.add(115,200);
-		IntervalSet s2 = IntervalSet.of(0,115);
-		s2.add(117,200);
-		String expecting = "116";
-		String result = (s.subtract(s2)).toString();
-		assertEquals(result, expecting);
-	}
-
-    @Test public void testSimpleEquals() throws Exception {
-        IntervalSet s = IntervalSet.of(10,20);
-        IntervalSet s2 = IntervalSet.of(10,20);
-        Boolean expecting = new Boolean(true);
-        Boolean result = new Boolean(s.equals(s2));
-        assertEquals(result, expecting);
-
-        IntervalSet s3 = IntervalSet.of(15,55);
-        expecting = new Boolean(false);
-        result = new Boolean(s.equals(s3));
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testEquals() throws Exception {
-        IntervalSet s = IntervalSet.of(10,20);
-        s.add(2);
-        s.add(499,501);
-        IntervalSet s2 = IntervalSet.of(10,20);
-        s2.add(2);
-        s2.add(499,501);
-        Boolean expecting = new Boolean(true);
-        Boolean result = new Boolean(s.equals(s2));
-        assertEquals(result, expecting);
-
-        IntervalSet s3 = IntervalSet.of(10,20);
-        s3.add(2);
-        expecting = new Boolean(false);
-        result = new Boolean(s.equals(s3));
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testSingleElementMinusDisjointSet() throws Exception {
-        IntervalSet s = IntervalSet.of(15,15);
-        IntervalSet s2 = IntervalSet.of(1,5);
-        s2.add(10,20);
-        String expecting = "{}"; // 15 - {1..5, 10..20} = {}
-        String result = s.subtract(s2).toString();
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testMembership() throws Exception {
-        IntervalSet s = IntervalSet.of(15,15);
-        s.add(50,60);
-        assertTrue(!s.member(0));
-        assertTrue(!s.member(20));
-        assertTrue(!s.member(100));
-        assertTrue(s.member(15));
-        assertTrue(s.member(55));
-        assertTrue(s.member(50));
-        assertTrue(s.member(60));
-    }
-
-    // {2,15,18} & 10..20
-    @Test public void testIntersectionWithTwoContainedElements() throws Exception {
-        IntervalSet s = IntervalSet.of(10,20);
-        IntervalSet s2 = IntervalSet.of(2,2);
-        s2.add(15);
-        s2.add(18);
-        String expecting = "{15, 18}";
-        String result = (s.and(s2)).toString();
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testIntersectionWithTwoContainedElementsReversed() throws Exception {
-        IntervalSet s = IntervalSet.of(10,20);
-        IntervalSet s2 = IntervalSet.of(2,2);
-        s2.add(15);
-        s2.add(18);
-        String expecting = "{15, 18}";
-        String result = (s2.and(s)).toString();
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testComplement() throws Exception {
-        IntervalSet s = IntervalSet.of(100,100);
-        s.add(101,101);
-        IntervalSet s2 = IntervalSet.of(100,102);
-        String expecting = "102";
-        String result = (s.complement(s2)).toString();
-        assertEquals(result, expecting);
-    }
-
-	@Test public void testComplement2() throws Exception {
-		IntervalSet s = IntervalSet.of(100,101);
-		IntervalSet s2 = IntervalSet.of(100,102);
-		String expecting = "102";
-		String result = (s.complement(s2)).toString();
-		assertEquals(result, expecting);
-	}
-
-	@Test public void testComplement3() throws Exception {
-		IntervalSet s = IntervalSet.of(1,96);
-		s.add(99,Label.MAX_CHAR_VALUE);
-		String expecting = "97..98";
-		String result = (s.complement(1,Label.MAX_CHAR_VALUE)).toString();
-		assertEquals(result, expecting);
-	}
-
-    @Test public void testMergeOfRangesAndSingleValues() throws Exception {
-        // {0..41, 42, 43..65534}
-        IntervalSet s = IntervalSet.of(0,41);
-        s.add(42);
-        s.add(43,65534);
-        String expecting = "0..65534";
-        String result = s.toString();
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testMergeOfRangesAndSingleValuesReverse() throws Exception {
-        IntervalSet s = IntervalSet.of(43,65534);
-        s.add(42);
-        s.add(0,41);
-        String expecting = "0..65534";
-        String result = s.toString();
-        assertEquals(result, expecting);
-    }
-
-    @Test public void testMergeWhereAdditionMergesTwoExistingIntervals() throws Exception {
-        // 42, 10, {0..9, 11..41, 43..65534}
-        IntervalSet s = IntervalSet.of(42);
-        s.add(10);
-        s.add(0,9);
-        s.add(43,65534);
-        s.add(11,41);
-        String expecting = "0..65534";
-        String result = s.toString();
-        assertEquals(result, expecting);
-    }
-
-	@Test public void testMergeWithDoubleOverlap() throws Exception {
-		IntervalSet s = IntervalSet.of(1,10);
-		s.add(20,30);
-		s.add(5,25); // overlaps two!
-		String expecting = "1..30";
-		String result = s.toString();
-		assertEquals(result, expecting);
-	}
-
-	@Test public void testSize() throws Exception {
-		IntervalSet s = IntervalSet.of(20,30);
-		s.add(50,55);
-		s.add(5,19);
-		String expecting = "32";
-		String result = String.valueOf(s.size());
-		assertEquals(result, expecting);
-	}
-
-	@Test public void testToList() throws Exception {
-		IntervalSet s = IntervalSet.of(20,25);
-		s.add(50,55);
-		s.add(5,5);
-		String expecting = "[5, 20, 21, 22, 23, 24, 25, 50, 51, 52, 53, 54, 55]";
-		List foo = new ArrayList();
-		String result = String.valueOf(s.toList());
-		assertEquals(result, expecting);
-	}
-
-	/** The following was broken:
-	    {'\u0000'..'s', 'u'..'\uFFFE'} & {'\u0000'..'q', 's'..'\uFFFE'}=
-	    {'\u0000'..'q', 's'}!!!! broken...
-	 	'q' is 113 ascii
-	 	'u' is 117
-	*/
-	@Test public void testNotRIntersectionNotT() throws Exception {
-		IntervalSet s = IntervalSet.of(0,'s');
-		s.add('u',200);
-		IntervalSet s2 = IntervalSet.of(0,'q');
-		s2.add('s',200);
-		String expecting = "{0..113, 115, 117..200}";
-		String result = (s.and(s2)).toString();
-		assertEquals(result, expecting);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestJavaCodeGeneration.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestJavaCodeGeneration.java
deleted file mode 100644
index f18a6d0..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestJavaCodeGeneration.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.junit.Test;
-
-/** General code generation testing; compilation and/or execution.
- *  These tests are more about avoiding duplicate var definitions
- *  etc... than testing a particular ANTLR feature.
- */
-public class TestJavaCodeGeneration extends BaseTest {
-	@Test public void testDupVarDefForPinchedState() {
-		// so->s2 and s0->s3->s1 pinches back to s1
-		// LA3_1, s1 state for DFA 3, was defined twice in similar scope
-		// just wrapped in curlies and it's cool.
-		String grammar =
-			"grammar T;\n" +
-			"a : (| A | B) X Y\n" +
-			"  | (| A | B) X Z\n" +
-			"  ;\n" ;
-		boolean found =
-			rawGenerateAndBuildRecognizer(
-				"T.g", grammar, "TParser", null, false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testLabeledNotSetsInLexer() {
-		// d must be an int
-		String grammar =
-			"lexer grammar T;\n" +
-			"A : d=~('x'|'y') e='0'..'9'\n" +
-			"  ; \n" ;
-		boolean found =
-			rawGenerateAndBuildRecognizer(
-				"T.g", grammar, null, "T", false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testLabeledSetsInLexer() {
-		// d must be an int
-		String grammar =
-			"grammar T;\n" +
-			"a : A ;\n" +
-			"A : d=('x'|'y') {System.out.println((char)$d);}\n" +
-			"  ; \n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "x", false);
-		assertEquals("x\n", found);
-	}
-
-	@Test public void testLabeledRangeInLexer() {
-		// d must be an int
-		String grammar =
-			"grammar T;\n" +
-			"a : A;\n" +
-			"A : d='a'..'z' {System.out.println((char)$d);} \n" +
-			"  ; \n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "x", false);
-		assertEquals("x\n", found);
-	}
-
-	@Test public void testLabeledWildcardInLexer() {
-		// d must be an int
-		String grammar =
-			"grammar T;\n" +
-			"a : A;\n" +
-			"A : d=. {System.out.println((char)$d);}\n" +
-			"  ; \n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "x", false);
-		assertEquals("x\n", found);
-	}
-
-	@Test public void testSynpredWithPlusLoop() {
-		String grammar =
-			"grammar T; \n" +
-			"a : (('x'+)=> 'x'+)?;\n";
-		boolean found =
-			rawGenerateAndBuildRecognizer(
-				"T.g", grammar, "TParser", "TLexer", false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testDoubleQuoteEscape() {
-		String grammar =
-			"lexer grammar T; \n" +
-			"A : '\\\\\"';\n" +          // this is A : '\\"', which should give "\\\"" at Java level;
-            "B : '\\\"';\n" +            // this is B: '\"', which shodl give "\"" at Java level;
-            "C : '\\'\\'';\n" +          // this is C: '\'\'', which shoudl give "''" at Java level
-            "D : '\\k';\n";              // this is D: '\k', which shoudl give just "k" at Java level;
-
-		boolean found =
-			rawGenerateAndBuildRecognizer(
-				"T.g", grammar, null, "T", false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testBlankRuleGetsNoException() {
-		String grammar =
-			"grammar T;\n" +
-			"a : sync (ID sync)* ;\n" +
-			"sync : ;\n" +
-			"ID : 'a'..'z'+;\n";
-		boolean found =
-			rawGenerateAndBuildRecognizer(
-				"T.g", grammar, "TParser", "TLexer", false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, found);
-	}
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestLeftRecursion.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestLeftRecursion.java
deleted file mode 100644
index a01b66a..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestLeftRecursion.java
+++ /dev/null
@@ -1,382 +0,0 @@
-package org.antlr.test;
-
-import org.junit.Test;
-
-/** */
-public class TestLeftRecursion extends BaseTest {
-	protected boolean debug = false;
-
-	@Test public void testSimple() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"s : a {System.out.println($a.text);} ;\n" +
-			"a : a ID\n" +
-			"  | ID" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "s", "a b c", debug);
-		String expecting = "abc\n";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testSemPred() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"s : a {System.out.println($a.text);} ;\n" +
-			"a : a {true}? ID\n" +
-			"  | ID" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "s", "a b c", debug);
-		String expecting = "abc\n";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testTernaryExpr() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"e : e '*'^ e" +
-			"  | e '+'^ e" +
-			"  | e '?'<assoc=right>^ e ':'! e" +
-			"  | e '='<assoc=right>^ e" +
-			"  | ID" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		String[] tests = {
-			"a",			"a",
-			"a+b",			"(+ a b)",
-			"a*b",			"(* a b)",
-			"a?b:c",		"(? a b c)",
-			"a=b=c",		"(= a (= b c))",
-			"a?b+c:d",		"(? a (+ b c) d)",
-			"a?b=c:d",		"(? a (= b c) d)",
-			"a? b?c:d : e",	"(? a (? b c d) e)",
-			"a?b: c?d:e",	"(? a b (? c d e))",
-		};
-		runTests(grammar, tests, "e");
-	}
-
-	@Test public void testDeclarationsUsingASTOperators() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"declarator\n" +
-			"        : declarator '['^ e ']'!\n" +
-			"        | declarator '['^ ']'!\n" +
-			"        | declarator '('^ ')'!\n" +
-			"        | '*'^ declarator\n" + // binds less tight than suffixes
-			"        | '('! declarator ')'!\n" +
-			"        | ID\n" +
-			"        ;\n" +
-			"e : INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		String[] tests = {
-			"a",		"a",
-			"*a",		"(* a)",
-			"**a",		"(* (* a))",
-			"a[3]",		"([ a 3)",
-			"b[]",		"([ b)",
-			"(a)",		"a",
-			"a[]()",	"(( ([ a))",
-			"a[][]",	"([ ([ a))",
-			"*a[]",		"(* ([ a))",
-			"(*a)[]",	"([ (* a))",
-		};
-		runTests(grammar, tests, "declarator");
-	}
-
-	@Test public void testDeclarationsUsingRewriteOperators() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"declarator\n" +
-			"        : declarator '[' e ']' -> ^('[' declarator e)\n" +
-			"        | declarator '[' ']' -> ^('[' declarator)\n" +
-			"        | declarator '(' ')' -> ^('(' declarator)\n" +
-			"        | '*' declarator -> ^('*' declarator) \n" + // binds less tight than suffixes
-			"        | '(' declarator ')' -> declarator\n" +
-			"        | ID -> ID\n" +
-			"        ;\n" +
-			"e : INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		String[] tests = {
-			"a",		"a",
-			"*a",		"(* a)",
-			"**a",		"(* (* a))",
-			"a[3]",		"([ a 3)",
-			"b[]",		"([ b)",
-			"(a)",		"a",
-			"a[]()",	"(( ([ a))",
-			"a[][]",	"([ ([ a))",
-			"*a[]",		"(* ([ a))",
-			"(*a)[]",	"([ (* a))",
-		};
-		runTests(grammar, tests, "declarator");
-	}
-
-	@Test public void testExpressionsUsingASTOperators() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"e : e '.'^ ID\n" +
-			"  | e '.'^ 'this'\n" +
-			"  | '-'^ e\n" +
-			"  | e '*'^ e\n" +
-			"  | e ('+'^|'-'^) e\n" +
-			"  | INT\n" +
-			"  | ID\n" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		String[] tests = {
-			"a",		"a",
-			"1",		"1",
-			"a+1",		"(+ a 1)",
-			"a*1",		"(* a 1)",
-			"a.b",		"(. a b)",
-			"a.this",	"(. a this)",
-			"a-b+c",	"(+ (- a b) c)",
-			"a+b*c",	"(+ a (* b c))",
-			"a.b+1",	"(+ (. a b) 1)",
-			"-a",		"(- a)",
-			"-a+b",		"(+ (- a) b)",
-			"-a.b",		"(- (. a b))",
-		};
-		runTests(grammar, tests, "e");
-	}
-
-	@Test public void testExpressionsUsingRewriteOperators() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"e : e '.' ID 				-> ^('.' e ID)\n" +
-			"  | e '.' 'this' 			-> ^('.' e 'this')\n" +
-			"  | '-' e 					-> ^('-' e)\n" +
-			"  | e '*' b=e 				-> ^('*' e $b)\n" +
-			"  | e (op='+'|op='-') b=e	-> ^($op e $b)\n" +
-			"  | INT 					-> INT\n" +
-			"  | ID 					-> ID\n" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		String[] tests = {
-			"a",		"a",
-			"1",		"1",
-			"a+1",		"(+ a 1)",
-			"a*1",		"(* a 1)",
-			"a.b",		"(. a b)",
-			"a.this",	"(. a this)",
-			"a+b*c",	"(+ a (* b c))",
-			"a.b+1",	"(+ (. a b) 1)",
-			"-a",		"(- a)",
-			"-a+b",		"(+ (- a) b)",
-			"-a.b",		"(- (. a b))",
-		};
-		runTests(grammar, tests, "e");
-	}
-
-	@Test public void testExpressionAssociativity() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"e\n" +
-			"  : e '.'^ ID\n" +
-			"  | '-'^ e\n" +
-			"  | e '^'<assoc=right>^ e\n" +
-			"  | e '*'^ e\n" +
-			"  | e ('+'^|'-'^) e\n" +
-			"  | e ('='<assoc=right>^ |'+='<assoc=right>^) e\n" +
-			"  | INT\n" +
-			"  | ID\n" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		String[] tests = {
-			"a",		"a",
-			"1",		"1",
-			"a+1",		"(+ a 1)",
-			"a*1",		"(* a 1)",
-			"a.b",		"(. a b)",
-			"a-b+c",	"(+ (- a b) c)",
-
-			"a+b*c",	"(+ a (* b c))",
-			"a.b+1",	"(+ (. a b) 1)",
-			"-a",		"(- a)",
-			"-a+b",		"(+ (- a) b)",
-			"-a.b",		"(- (. a b))",
-			"a^b^c",	"(^ a (^ b c))",
-			"a=b=c",	"(= a (= b c))",
-			"a=b=c+d.e","(= a (= b (+ c (. d e))))",
-		};
-		runTests(grammar, tests, "e");
-	}
-
-	@Test public void testJavaExpressions() throws Exception {
-		// Generates about 7k in bytecodes for generated e_ rule;
-		// Well within the 64k method limit. e_primary compiles
-		// to about 2k in bytecodes.
-		// this is simplified from real java
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"expressionList\n" +
-			"    :   e (','! e)*\n" +
-			"    ;\n" +
-			"e   :   '('! e ')'!\n" +
-			"    |   'this' \n" +
-			"    |   'super'\n" +
-			"    |   INT\n" +
-			"    |   ID\n" +
-			"    |   type '.'^ 'class'\n" +
-			"    |   e '.'^ ID\n" +
-			"    |   e '.'^ 'this'\n" +
-			"    |   e '.'^ 'super' '('^ expressionList? ')'!\n" +
-			"    |   e '.'^ 'new'^ ID '('! expressionList? ')'!\n" +
-			"	 |	 'new'^ type ( '(' expressionList? ')'! | (options {k=1;}:'[' e ']'!)+)\n" + // ugly; simplified
-			"    |   e '['^ e ']'!\n" +
-			"    |   '('^ type ')'! e\n" +
-			"    |   e ('++'^ | '--'^)\n" +
-			"    |   e '('^ expressionList? ')'!\n" +
-			"    |   ('+'^|'-'^|'++'^|'--'^) e\n" +
-			"    |   ('~'^|'!'^) e\n" +
-			"    |   e ('*'^|'/'^|'%'^) e\n" +
-			"    |   e ('+'^|'-'^) e\n" +
-			"    |   e ('<'^ '<' | '>'^ '>' '>' | '>'^ '>') e\n" +
-			"    |   e ('<='^ | '>='^ | '>'^ | '<'^) e\n" +
-			"    |   e 'instanceof'^ e\n" +
-			"    |   e ('=='^ | '!='^) e\n" +
-			"    |   e '&'^ e\n" +
-			"    |   e '^'<assoc=right>^ e\n" +
-			"    |   e '|'^ e\n" +
-			"    |   e '&&'^ e\n" +
-			"    |   e '||'^ e\n" +
-			"    |   e '?' e ':' e\n" +
-			"    |   e ('='<assoc=right>^\n" +
-			"          |'+='<assoc=right>^\n" +
-			"          |'-='<assoc=right>^\n" +
-			"          |'*='<assoc=right>^\n" +
-			"          |'/='<assoc=right>^\n" +
-			"          |'&='<assoc=right>^\n" +
-			"          |'|='<assoc=right>^\n" +
-			"          |'^='<assoc=right>^\n" +
-			"          |'>>='<assoc=right>^\n" +
-			"          |'>>>='<assoc=right>^\n" +
-			"          |'<<='<assoc=right>^\n" +
-			"          |'%='<assoc=right>^) e\n" +
-			"    ;\n" +
-			"type: ID \n" +
-			"    | ID '['^ ']'!\n" +
-			"    | 'int'\n" +
-			"	 | 'int' '['^ ']'! \n" +
-			"    ;\n" +
-			"ID : ('a'..'z'|'A'..'Z'|'_'|'$')+;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		String[] tests = {
-			"a",		"a",
-			"1",		"1",
-			"a+1",		"(+ a 1)",
-			"a*1",		"(* a 1)",
-			"a.b",		"(. a b)",
-			"a-b+c",	"(+ (- a b) c)",
-
-			"a+b*c",	"(+ a (* b c))",
-			"a.b+1",	"(+ (. a b) 1)",
-			"-a",		"(- a)",
-			"-a+b",		"(+ (- a) b)",
-			"-a.b",		"(- (. a b))",
-			"a^b^c",	"(^ a (^ b c))",
-			"a=b=c",	"(= a (= b c))",
-			"a=b=c+d.e","(= a (= b (+ c (. d e))))",
-			"a|b&c",	"(| a (& b c))",
-			"(a|b)&c",	"(& (| a b) c)",
-			"a > b",	"(> a b)",
-			"a >> b",	"(> a b)",  // text is from one token
-			"a < b",	"(< a b)",
-
-			"(T)x",							"(( T x)",
-			"new A().b",					"(. (new A () b)",
-			"(T)t.f()",						"(( (( T (. t f)))",
-			"a.f(x)==T.c",					"(== (( (. a f) x) (. T c))",
-			"a.f().g(x,1)",					"(( (. (( (. a f)) g) x 1)",
-			"new T[((n-1) * x) + 1]",		"(new T [ (+ (* (- n 1) x) 1))",
-		};
-		runTests(grammar, tests, "e");
-	}
-
-	@Test public void testReturnValueAndActions() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"s : e {System.out.println($e.v);} ;\n" +
-			"e returns [int v, List<String> ignored]\n" +
-			"  : e '*' b=e {$v *= $b.v;}\n" +
-			"  | e '+' b=e {$v += $b.v;}\n" +
-			"  | INT {$v = $INT.int;}\n" +
-			"  ;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		String[] tests = {
-			"4",			"4",
-			"1+2",			"3",
-		};
-		runTests(grammar, tests, "s");
-	}
-
-	@Test public void testReturnValueAndActionsAndASTs() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"s : e {System.out.print(\"v=\"+$e.v+\", \");} ;\n" +
-			"e returns [int v, List<String> ignored]\n" +
-			"  : e '*'^ b=e {$v *= $b.v;}\n" +
-			"  | e '+'^ b=e {$v += $b.v;}\n" +
-			"  | INT {$v = $INT.int;}\n" +
-			"  ;\n" +
-			"INT : '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {skip();} ;\n";
-		String[] tests = {
-			"4",			"v=4, 4",
-			"1+2",			"v=3, (+ 1 2)",
-		};
-		runTests(grammar, tests, "s");
-	}
-
-	public void runTests(String grammar, String[] tests, String startRule) {
-		rawGenerateAndBuildRecognizer("T.g", grammar, "TParser", "TLexer", debug);
-		boolean parserBuildsTrees =
-			grammar.indexOf("output=AST")>=0 ||
-			grammar.indexOf("output = AST")>=0;
-		writeRecognizerAndCompile("TParser",
-										 null,
-										 "TLexer",
-										 startRule,
-										 null,
-										 parserBuildsTrees,
-										 false,
-										 false,
-										 debug);
-
-		for (int i=0; i<tests.length; i+=2) {
-			String test = tests[i];
-			String expecting = tests[i+1]+"\n";
-			writeFile(tmpdir, "input", test);
-			String found = execRecognizer();
-			System.out.print(test+" -> "+found);
-			assertEquals(expecting, found);
-		}
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestLexer.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestLexer.java
deleted file mode 100644
index 0b2651f..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestLexer.java
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.Tool;
-import org.antlr.codegen.CodeGenerator;
-import org.stringtemplate.v4.ST;
-import org.antlr.tool.Grammar;
-import org.junit.Test;
-
-public class TestLexer extends BaseTest {
-	protected boolean debug = false;
-
-	/** Public default constructor used by TestRig */
-	public TestLexer() {
-	}
-
-	@Test public void testSetText() throws Exception {
-		// this must return A not I to the parser; calling a nonfragment rule
-		// from a nonfragment rule does not set the overall token.
-		String grammar =
-			"grammar P;\n"+
-			"a : A {System.out.println(input);} ;\n"+
-			"A : '\\\\' 't' {setText(\"\t\");} ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-				    "a", "\\t", debug);
-		assertEquals("\t\n", found);
-	}
-
-	@Test public void testRefToRuleDoesNotSetTokenNorEmitAnother() throws Exception {
-		// this must return A not I to the parser; calling a nonfragment rule
-		// from a nonfragment rule does not set the overall token.
-		String grammar =
-			"grammar P;\n"+
-			"a : A EOF {System.out.println(input);} ;\n"+
-			"A : '-' I ;\n" +
-			"I : '0'..'9'+ ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-				    "a", "-34", debug);
-		assertEquals("-34\n", found);
-	}
-
-	@Test public void testRefToRuleDoesNotSetChannel() throws Exception {
-		// this must set channel of A to HIDDEN.  $channel is local to rule
-		// like $type.
-		String grammar =
-			"grammar P;\n"+
-			"a : A EOF {System.out.println($A.text+\", channel=\"+$A.channel);} ;\n"+
-			"A : '-' WS I ;\n" +
-			"I : '0'..'9'+ ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-				    "a", "- 34", debug);
-		assertEquals("- 34, channel=0\n", found);
-	}
-
-	@Test public void testWeCanSetType() throws Exception {
-		String grammar =
-			"grammar P;\n"+
-			"tokens {X;}\n" +
-			"a : X EOF {System.out.println(input);} ;\n"+
-			"A : '-' I {$type = X;} ;\n" +
-			"I : '0'..'9'+ ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-				    "a", "-34", debug);
-		assertEquals("-34\n", found);
-	}
-
-	@Test public void testRefToFragment() throws Exception {
-		// this must return A not I to the parser; calling a nonfragment rule
-		// from a nonfragment rule does not set the overall token.
-		String grammar =
-			"grammar P;\n"+
-			"a : A {System.out.println(input);} ;\n"+
-			"A : '-' I ;\n" +
-			"fragment I : '0'..'9'+ ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-				    "a", "-34", debug);
-		assertEquals("-34\n", found);
-	}
-
-	@Test public void testMultipleRefToFragment() throws Exception {
-		// this must return A not I to the parser; calling a nonfragment rule
-		// from a nonfragment rule does not set the overall token.
-		String grammar =
-			"grammar P;\n"+
-			"a : A EOF {System.out.println(input);} ;\n"+
-			"A : I '.' I ;\n" +
-			"fragment I : '0'..'9'+ ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-				    "a", "3.14159", debug);
-		assertEquals("3.14159\n", found);
-	}
-
-	@Test public void testLabelInSubrule() throws Exception {
-		// can we see v outside?
-		String grammar =
-			"grammar P;\n"+
-			"a : A EOF ;\n"+
-			"A : 'hi' WS (v=I)? {$channel=0; System.out.println($v.text);} ;\n" +
-			"fragment I : '0'..'9'+ ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-				    "a", "hi 342", debug);
-		assertEquals("342\n", found);
-	}
-
-	@Test public void testRefToTokenInLexer() throws Exception {
-		String grammar =
-			"grammar P;\n"+
-			"a : A EOF ;\n"+
-			"A : I {System.out.println($I.text);} ;\n" +
-			"fragment I : '0'..'9'+ ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-				    "a", "342", debug);
-		assertEquals("342\n", found);
-	}
-
-	@Test public void testListLabelInLexer() throws Exception {
-		String grammar =
-			"grammar P;\n"+
-			"a : A ;\n"+
-			"A : i+=I+ {for (Object t : $i) System.out.print(\" \"+((Token)t).getText());} ;\n" +
-			"fragment I : '0'..'9'+ ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-				    "a", "33 297", debug);
-		assertEquals(" 33 297\n", found);
-	}
-
-	@Test public void testDupListRefInLexer() throws Exception {
-		String grammar =
-			"grammar P;\n"+
-			"a : A ;\n"+
-			"A : i+=I WS i+=I {$channel=0; for (Object t : $i) System.out.print(\" \"+((Token)t).getText());} ;\n" +
-			"fragment I : '0'..'9'+ ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-				    "a", "33 297", debug);
-		assertEquals(" 33 297\n", found);
-	}
-
-	@Test public void testCharLabelInLexer() {
-		String grammar =
-			"grammar T;\n" +
-			"a : B ;\n" +
-			"B : x='a' {System.out.println((char)$x);} ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "a", debug);
-		assertEquals("a\n", found);
-	}
-
-	@Test public void testRepeatedLabelInLexer() {
-		String grammar =
-			"lexer grammar T;\n" +
-			"B : x='a' x='b' ;\n" ;
-		boolean found =
-			rawGenerateAndBuildRecognizer(
-				"T.g", grammar, null, "T", false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRepeatedRuleLabelInLexer() {
-		String grammar =
-			"lexer grammar T;\n" +
-			"B : x=A x=A ;\n" +
-			"fragment A : 'a' ;\n" ;
-		boolean found =
-			rawGenerateAndBuildRecognizer(
-				"T.g", grammar, null, "T", false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testIsolatedEOTEdge() {
-		String grammar =
-			"lexer grammar T;\n" +
-			"QUOTED_CONTENT \n" +
-			"        : 'q' (~'q')* (('x' 'q') )* 'q' ; \n";
-		boolean found =
-			rawGenerateAndBuildRecognizer(
-				"T.g", grammar, null, "T", false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, found);
-	}	
-
-	@Test public void testEscapedLiterals() {
-		/* Grammar:
-			A : '\"' ;  should match a single double-quote: "
-			B : '\\\"' ; should match input \"
-		*/
-		String grammar =
-			"lexer grammar T;\n" +
-			"A : '\\\"' ;\n" +
-			"B : '\\\\\\\"' ;\n" ; // '\\\"'
-		boolean found =
-			rawGenerateAndBuildRecognizer(
-				"T.g", grammar, null, "T", false);
-		boolean expecting = true; // should be ok
-		assertEquals(expecting, found);
-	}
-
-    @Test public void testNewlineLiterals() throws Exception {
-        Grammar g = new Grammar(
-            "lexer grammar T;\n" +
-            "A : '\\n\\n' ;\n"  // ANTLR sees '\n\n'
-        );
-        String expecting = "match(\"\\n\\n\")";
-
-        Tool antlr = newTool();
-        antlr.setOutputDirectory(null); // write to /dev/null
-        CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-        g.setCodeGenerator(generator);
-        generator.genRecognizer(); // codegen phase sets some vars we need
-        ST codeST = generator.getRecognizerST();
-        String code = codeST.render();
-        int m = code.indexOf("match(\"");
-        String found = code.substring(m,m+expecting.length());
-
-        assertEquals(expecting, found);
-    }
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestMessages.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestMessages.java
deleted file mode 100644
index e135f54..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestMessages.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.Tool;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.grammar.v3.ActionTranslator;
-import org.antlr.runtime.CommonToken;
-import org.antlr.tool.ErrorManager;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.GrammarSemanticsMessage;
-import org.junit.Test;
-
-public class TestMessages extends BaseTest {
-
-	/** Public default constructor used by TestRig */
-	public TestMessages() {
-	}
-
-
-	@Test public void testMessageStringificationIsConsistent() throws Exception {
-		String action = "$other.tree = null;";
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar a;\n" +
-			"options { output = AST;}" +
-			"otherrule\n" +
-			"    : 'y' ;" +
-			"rule\n" +
-			"    : other=otherrule {" + action +"}\n" +
-			"    ;");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,
-																	"rule",
-																	new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-
-		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
-		Object expectedArg = "other";
-		Object expectedArg2 = "tree";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		String expectedMessageString = expectedMessage.toString();
-		assertEquals(expectedMessageString, expectedMessage.toString());
-	}
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestNFAConstruction.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestNFAConstruction.java
deleted file mode 100644
index 4ff774e..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestNFAConstruction.java
+++ /dev/null
@@ -1,1204 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.analysis.State;
-import org.antlr.tool.FASerializer;
-import org.antlr.tool.Grammar;
-import org.junit.Test;
-
-public class TestNFAConstruction extends BaseTest {
-
-	/** Public default constructor used by TestRig */
-	public TestNFAConstruction() {
-	}
-
-	@Test public void testA() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : A;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-A->.s3\n" +
-			".s3->:s4\n" +
-			":s4-EOF->.s5\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAB() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : A B ;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-A->.s3\n" +
-			".s3-B->.s4\n" +
-			".s4->:s5\n" +
-			":s5-EOF->.s6\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAorB() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : A | B {;} ;");
-		/* expecting (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5,end)
-										|                            ^
-									   (6)--Ep-->(7)--B-->(8)--------|
-				 */
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s1->.s7\n" +
-			".s10->.s4\n" +
-			".s2-A->.s3\n" +
-			".s3->.s4\n" +
-			".s4->:s5\n" +
-			".s7->.s8\n" +
-			".s8-B->.s9\n" +
-			".s9-{}->.s10\n" +
-			":s5-EOF->.s6\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testRangeOrRange() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"A : ('a'..'c' 'h' | 'q' 'j'..'l') ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s10-'q'->.s11\n" +
-			".s11-'j'..'l'->.s12\n" +
-			".s12->.s6\n" +
-			".s2->.s3\n" +
-			".s2->.s9\n" +
-			".s3-'a'..'c'->.s4\n" +
-			".s4-'h'->.s5\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			".s9->.s10\n" +
-			":s7-<EOT>->.s8\n";
-		checkRule(g, "A", expecting);
-	}
-
-	@Test public void testRange() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"A : 'a'..'c' ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-'a'..'c'->.s3\n" +
-			".s3->:s4\n" +
-			":s4-<EOT>->.s5\n";
-		checkRule(g, "A", expecting);
-	}
-
-	@Test public void testCharSetInParser() throws Exception {
-		Grammar g = new Grammar(
-			"grammar P;\n"+
-			"a : A|'b' ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-A..'b'->.s3\n" +
-			".s3->:s4\n" +
-			":s4-EOF->.s5\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testABorCD() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : A B | C D;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s1->.s8\n" +
-			".s10-D->.s11\n" +
-			".s11->.s5\n" +
-			".s2-A->.s3\n" +
-			".s3-B->.s4\n" +
-			".s4->.s5\n" +
-			".s5->:s6\n" +
-			".s8->.s9\n" +
-			".s9-C->.s10\n" +
-			":s6-EOF->.s7\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testbA() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : b A ;\n"+
-			"b : B ;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s3->.s4\n" +
-			".s4->.s5\n" +
-			".s5-B->.s6\n" +
-			".s6->:s7\n" +
-			".s8-A->.s9\n" +
-			".s9->:s10\n" +
-			":s10-EOF->.s11\n" +
-			":s7->.s8\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testbA_bC() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : b A ;\n"+
-			"b : B ;\n"+
-			"c : b C;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s12->.s13\n" +
-			".s13-C->.s14\n" +
-			".s14->:s15\n" +
-			".s2->.s3\n" +
-			".s3->.s4\n" +
-			".s4->.s5\n" +
-			".s5-B->.s6\n" +
-			".s6->:s7\n" +
-			".s8-A->.s9\n" +
-			".s9->:s10\n" +
-			":s10-EOF->.s11\n" +
-			":s15-EOF->.s16\n" +
-			":s7->.s12\n" +
-			":s7->.s8\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAorEpsilon() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : A | ;");
-		/* expecting (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5,end)
-										|                            ^
-									   (6)--Ep-->(7)--Ep-->(8)-------|
-				 */
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s1->.s7\n" +
-			".s2-A->.s3\n" +
-			".s3->.s4\n" +
-			".s4->:s5\n" +
-			".s7->.s8\n" +
-			".s8->.s9\n" +
-			".s9->.s4\n" +
-			":s5-EOF->.s6\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAOptional() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : (A)?;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s2->.s8\n" +
-			".s3-A->.s4\n" +
-			".s4->.s5\n" +
-			".s5->:s6\n" +
-			".s8->.s5\n" +
-			":s6-EOF->.s7\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testNakedAoptional() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : A?;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s2->.s8\n" +
-			".s3-A->.s4\n" +
-			".s4->.s5\n" +
-			".s5->:s6\n" +
-			".s8->.s5\n" +
-			":s6-EOF->.s7\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAorBthenC() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : (A | B) C;");
-		/* expecting
-
-				(0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5)--C-->(6)--Ep-->(7,end)
-						   |                            ^
-						  (8)--Ep-->(9)--B-->(10)-------|
-				 */
-	}
-
-	@Test public void testAplus() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : (A)+;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s3->.s4\n" +
-			".s4-A->.s5\n" +
-			".s5->.s3\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			":s7-EOF->.s8\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testNakedAplus() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : A+;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s3->.s4\n" +
-			".s4-A->.s5\n" +
-			".s5->.s3\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			":s7-EOF->.s8\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAplusNonGreedy() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : (options {greedy=false;}:'0'..'9')+ ;\n");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s3->.s4\n" +
-			".s4-'0'..'9'->.s5\n" +
-			".s5->.s3\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			":s7-<EOT>->.s8\n";
-		checkRule(g, "A", expecting);
-	}
-
-	@Test public void testAorBplus() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : (A | B{action})+ ;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s10->.s11\n" +
-			".s11-B->.s12\n" +
-			".s12-{}->.s13\n" +
-			".s13->.s6\n" +
-			".s2->.s3\n" +
-			".s3->.s10\n" +
-			".s3->.s4\n" +
-			".s4-A->.s5\n" +
-			".s5->.s6\n" +
-			".s6->.s3\n" +
-			".s6->.s7\n" +
-			".s7->:s8\n" +
-			":s8-EOF->.s9\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAorBorEmptyPlus() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : (A | B | )+ ;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s10->.s11\n" +
-			".s10->.s13\n" +
-			".s11-B->.s12\n" +
-			".s12->.s6\n" +
-			".s13->.s14\n" +
-			".s14->.s15\n" +
-			".s15->.s6\n" +
-			".s2->.s3\n" +
-			".s3->.s10\n" +
-			".s3->.s4\n" +
-			".s4-A->.s5\n" +
-			".s5->.s6\n" +
-			".s6->.s3\n" +
-			".s6->.s7\n" +
-			".s7->:s8\n" +
-			":s8-EOF->.s9\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAStar() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : (A)*;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s2->.s9\n" +
-			".s3->.s4\n" +
-			".s4-A->.s5\n" +
-			".s5->.s3\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			".s9->.s6\n" +
-			":s7-EOF->.s8\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testNestedAstar() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : (A*)*;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s10->:s11\n" +
-			".s13->.s8\n" +
-			".s14->.s10\n" +
-			".s2->.s14\n" +
-			".s2->.s3\n" +
-			".s3->.s4\n" +
-			".s4->.s13\n" +
-			".s4->.s5\n" +
-			".s5->.s6\n" +
-			".s6-A->.s7\n" +
-			".s7->.s5\n" +
-			".s7->.s8\n" +
-			".s8->.s9\n" +
-			".s9->.s10\n" +
-			".s9->.s3\n" +
-			":s11-EOF->.s12\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testPlusNestedInStar() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : (A+)*;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s10->:s11\n" +
-			".s13->.s10\n" +
-			".s2->.s13\n" +
-			".s2->.s3\n" +
-			".s3->.s4\n" +
-			".s4->.s5\n" +
-			".s5->.s6\n" +
-			".s6-A->.s7\n" +
-			".s7->.s5\n" +
-			".s7->.s8\n" +
-			".s8->.s9\n" +
-			".s9->.s10\n" +
-			".s9->.s3\n" +
-			":s11-EOF->.s12\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testStarNestedInPlus() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : (A*)+;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s10->:s11\n" +
-			".s13->.s8\n" +
-			".s2->.s3\n" +
-			".s3->.s4\n" +
-			".s4->.s13\n" +
-			".s4->.s5\n" +
-			".s5->.s6\n" +
-			".s6-A->.s7\n" +
-			".s7->.s5\n" +
-			".s7->.s8\n" +
-			".s8->.s9\n" +
-			".s9->.s10\n" +
-			".s9->.s3\n" +
-			":s11-EOF->.s12\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testNakedAstar() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : A*;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s2->.s9\n" +
-			".s3->.s4\n" +
-			".s4-A->.s5\n" +
-			".s5->.s3\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			".s9->.s6\n" +
-			":s7-EOF->.s8\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAorBstar() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : (A | B{action})* ;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s10->.s11\n" +
-			".s11-B->.s12\n" +
-			".s12-{}->.s13\n" +
-			".s13->.s6\n" +
-			".s14->.s7\n" +
-			".s2->.s14\n" +
-			".s2->.s3\n" +
-			".s3->.s10\n" +
-			".s3->.s4\n" +
-			".s4-A->.s5\n" +
-			".s5->.s6\n" +
-			".s6->.s3\n" +
-			".s6->.s7\n" +
-			".s7->:s8\n" +
-			":s8-EOF->.s9\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAorBOptionalSubrule() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : ( A | B )? ;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s2->.s8\n" +
-			".s3-A..B->.s4\n" +
-			".s4->.s5\n" +
-			".s5->:s6\n" +
-			".s8->.s5\n" +
-			":s6-EOF->.s7\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testPredicatedAorB() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : {p1}? A | {p2}? B ;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s1->.s8\n" +
-			".s10-B->.s11\n" +
-			".s11->.s5\n" +
-			".s2-{p1}?->.s3\n" +
-			".s3-A->.s4\n" +
-			".s4->.s5\n" +
-			".s5->:s6\n" +
-			".s8->.s9\n" +
-			".s9-{p2}?->.s10\n" +
-			":s6-EOF->.s7\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testMultiplePredicates() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : {p1}? {p1a}? A | {p2}? B | {p3} b;\n" +
-			"b : {p4}? B ;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s1->.s9\n" +
-			".s10-{p2}?->.s11\n" +
-			".s11-B->.s12\n" +
-			".s12->.s6\n" +
-			".s13->.s14\n" +
-			".s14-{}->.s15\n" +
-			".s15->.s16\n" +
-			".s16->.s17\n" +
-			".s17->.s18\n" +
-			".s18-{p4}?->.s19\n" +
-			".s19-B->.s20\n" +
-			".s2-{p1}?->.s3\n" +
-			".s20->:s21\n" +
-			".s22->.s6\n" +
-			".s3-{p1a}?->.s4\n" +
-			".s4-A->.s5\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			".s9->.s10\n" +
-			".s9->.s13\n" +
-			":s21->.s22\n" +
-			":s7-EOF->.s8\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testSets() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : ( A | B )+ ;\n" +
-			"b : ( A | B{;} )+ ;\n" +
-			"c : (A|B) (A|B) ;\n" +
-			"d : ( A | B )* ;\n" +
-			"e : ( A | B )? ;");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s3->.s4\n" +
-			".s4-A..B->.s5\n" +
-			".s5->.s3\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			":s7-EOF->.s8\n";
-		checkRule(g, "a", expecting);
-		expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s10->.s11\n" +
-			".s11-B->.s12\n" +
-			".s12-{}->.s13\n" +
-			".s13->.s6\n" +
-			".s2->.s3\n" +
-			".s3->.s10\n" +
-			".s3->.s4\n" +
-			".s4-A->.s5\n" +
-			".s5->.s6\n" +
-			".s6->.s3\n" +
-			".s6->.s7\n" +
-			".s7->:s8\n" +
-			":s8-EOF->.s9\n";
-		checkRule(g, "b", expecting);
-		expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-A..B->.s3\n" +
-			".s3-A..B->.s4\n" +
-			".s4->:s5\n" +
-			":s5-EOF->.s6\n";
-		checkRule(g, "c", expecting);
-		expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s2->.s9\n" +
-			".s3->.s4\n" +
-			".s4-A..B->.s5\n" +
-			".s5->.s3\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			".s9->.s6\n" +
-			":s7-EOF->.s8\n";
-		checkRule(g, "d", expecting);
-		expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s2->.s8\n" +
-			".s3-A..B->.s4\n" +
-			".s4->.s5\n" +
-			".s5->:s6\n" +
-			".s8->.s5\n" +
-			":s6-EOF->.s7\n";
-		checkRule(g, "e", expecting);
-	}
-
-	@Test public void testNotSet() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"tokens { A; B; C; }\n"+
-			"a : ~A ;\n");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-B..C->.s3\n" +
-			".s3->:s4\n" +
-			":s4-EOF->.s5\n";
-		checkRule(g, "a", expecting);
-
-		String expectingGrammarStr =
-			"1:8: parser grammar P;\n" +
-			"a : ~ A ;";
-		assertEquals(expectingGrammarStr, g.toString());
-	}
-
-	@Test public void testNotSingletonBlockSet() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"tokens { A; B; C; }\n"+
-			"a : ~(A) ;\n");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-B..C->.s3\n" +
-			".s3->:s4\n" +
-			":s4-EOF->.s5\n";
-		checkRule(g, "a", expecting);
-
-		String expectingGrammarStr =
-			"1:8: parser grammar P;\n" +
-			"a : ~ ( A ) ;";
-		assertEquals(expectingGrammarStr, g.toString());
-	}
-
-	@Test public void testNotCharSet() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"A : ~'3' ;\n");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s3\n" +
-			".s3->:s4\n" +
-			":s4-<EOT>->.s5\n";
-		checkRule(g, "A", expecting);
-
-		String expectingGrammarStr =
-			"1:7: lexer grammar P;\n" +
-			"A : ~ '3' ;\n"+
-			"Tokens : A ;";
-		assertEquals(expectingGrammarStr, g.toString());
-	}
-
-	@Test public void testNotBlockSet() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"A : ~('3'|'b') ;\n");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s3\n" +
-			".s3->:s4\n" +
-			":s4-<EOT>->.s5\n";
-		checkRule(g, "A", expecting);
-
-		String expectingGrammarStr =
-			"1:7: lexer grammar P;\n" +
-			"A : ~ ( '3' | 'b' ) ;\n" +
-			"Tokens : A ;";
-		assertEquals(expectingGrammarStr, g.toString());
-	}
-
-	@Test public void testNotSetLoop() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"A : ~('3')* ;\n");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s2->.s9\n" +
-			".s3->.s4\n" +
-			".s4-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s5\n" +
-			".s5->.s3\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			".s9->.s6\n" +
-			":s7-<EOT>->.s8\n";
-		checkRule(g, "A", expecting);
-
-		String expectingGrammarStr =
-			"1:7: lexer grammar P;\n" +
-			"A : (~ ( '3' ) )* ;\n" +
-			"Tokens : A ;";
-		assertEquals(expectingGrammarStr, g.toString());
-	}
-
-	@Test public void testNotBlockSetLoop() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"A : ~('3'|'b')* ;\n");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s2->.s9\n" +
-			".s3->.s4\n" +
-			".s4-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s5\n" +
-			".s5->.s3\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			".s9->.s6\n" +
-			":s7-<EOT>->.s8\n";
-		checkRule(g, "A", expecting);
-
-		String expectingGrammarStr =
-			"1:7: lexer grammar P;\n" +
-			"A : (~ ( '3' | 'b' ) )* ;\n" +
-			"Tokens : A ;";
-		assertEquals(expectingGrammarStr, g.toString());
-	}
-
-	@Test public void testSetsInCombinedGrammarSentToLexer() throws Exception {
-		// not sure this belongs in this test suite, but whatever.
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-			"A : '{' ~('}')* '}';\n");
-		String result = g.getLexerGrammar();
-		String expecting =
-			"lexer grammar t;" +newline +
-			"// $ANTLR src \"<string>\" 2"+newline+
-			"A : '{' ~('}')* '}';";
-		assertEquals(result, expecting);
-	}
-
-	@Test public void testLabeledNotSet() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"tokens { A; B; C; }\n"+
-			"a : t=~A ;\n");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-B..C->.s3\n" +
-			".s3->:s4\n" +
-			":s4-EOF->.s5\n";
-		checkRule(g, "a", expecting);
-
-		String expectingGrammarStr =
-			"1:8: parser grammar P;\n" +
-			"a : t=~ A ;";
-		assertEquals(expectingGrammarStr, g.toString());
-	}
-
-	@Test public void testLabeledNotCharSet() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"A : t=~'3' ;\n");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s3\n" +
-			".s3->:s4\n" +
-			":s4-<EOT>->.s5\n";
-		checkRule(g, "A", expecting);
-
-		String expectingGrammarStr =
-			"1:7: lexer grammar P;\n" +
-			"A : t=~ '3' ;\n"+
-			"Tokens : A ;";
-		assertEquals(expectingGrammarStr, g.toString());
-	}
-
-	@Test public void testLabeledNotBlockSet() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"A : t=~('3'|'b') ;\n");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s3\n" +
-			".s3->:s4\n" +
-			":s4-<EOT>->.s5\n";
-		checkRule(g, "A", expecting);
-
-		String expectingGrammarStr =
-			"1:7: lexer grammar P;\n" +
-			"A : t=~ ( '3' | 'b' ) ;\n" +
-			"Tokens : A ;";
-		assertEquals(expectingGrammarStr, g.toString());
-	}
-
-	@Test public void testEscapedCharLiteral() throws Exception {
-		Grammar g = new Grammar(
-			"grammar P;\n"+
-			"a : '\\n';");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-'\\n'->.s3\n" +
-			".s3->:s4\n" +
-			":s4-EOF->.s5\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testEscapedStringLiteral() throws Exception {
-		Grammar g = new Grammar(
-			"grammar P;\n"+
-			"a : 'a\\nb\\u0030c\\'';");
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-'a\\nb\\u0030c\\''->.s3\n" +
-			".s3->:s4\n" +
-			":s4-EOF->.s5\n";
-		checkRule(g, "a", expecting);
-	}
-
-	// AUTO BACKTRACKING STUFF
-
-	@Test public void testAutoBacktracking_RuleBlock() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : 'a'{;}|'b';"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s1->.s9\n" +
-			".s10-'b'->.s11\n" +
-			".s11->.s6\n" +
-			".s2-{synpred1_t}?->.s3\n" +
-			".s3-'a'->.s4\n" +
-			".s4-{}->.s5\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			".s9->.s10\n" +
-			":s7-EOF->.s8\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_RuleSetBlock() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : 'a'|'b';"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-'a'..'b'->.s3\n" +
-			".s3->:s4\n" +
-			":s4-EOF->.s5\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_SimpleBlock() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a'{;}|'b') ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s10->.s11\n" +
-			".s11-'b'->.s12\n" +
-			".s12->.s7\n" +
-			".s2->.s10\n" +
-			".s2->.s3\n" +
-			".s3-{synpred1_t}?->.s4\n" +
-			".s4-'a'->.s5\n" +
-			".s5-{}->.s6\n" +
-			".s6->.s7\n" +
-			".s7->:s8\n" +
-			":s8-EOF->.s9\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_SetBlock() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a'|'b') ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2-'a'..'b'->.s3\n" +
-			".s3->:s4\n" +
-			":s4-EOF->.s5\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_StarBlock() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a'{;}|'b')* ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s12->.s13\n" +
-			".s13-{synpred2_t}?->.s14\n" +
-			".s14-'b'->.s15\n" +
-			".s15->.s8\n" +
-			".s16->.s9\n" +
-			".s2->.s16\n" +
-			".s2->.s3\n" +
-			".s3->.s12\n" +
-			".s3->.s4\n" +
-			".s4-{synpred1_t}?->.s5\n" +
-			".s5-'a'->.s6\n" +
-			".s6-{}->.s7\n" +
-			".s7->.s8\n" +
-			".s8->.s3\n" +
-			".s8->.s9\n" +
-			".s9->:s10\n" +
-			":s10-EOF->.s11\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_StarSetBlock_IgnoresPreds() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a'|'b')* ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s2->.s9\n" +
-			".s3->.s4\n" +
-			".s4-'a'..'b'->.s5\n" +
-			".s5->.s3\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			".s9->.s6\n" +
-			":s7-EOF->.s8\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_StarSetBlock() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a'|'b'{;})* ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s11->.s12\n" +
-			".s12-{synpred2_t}?->.s13\n" +
-			".s13-'b'->.s14\n" +
-			".s14-{}->.s15\n" +
-			".s15->.s7\n" +
-			".s16->.s8\n" +
-			".s2->.s16\n" +
-			".s2->.s3\n" +
-			".s3->.s11\n" +
-			".s3->.s4\n" +
-			".s4-{synpred1_t}?->.s5\n" +
-			".s5-'a'->.s6\n" +
-			".s6->.s7\n" +
-			".s7->.s3\n" +
-			".s7->.s8\n" +
-			".s8->:s9\n" +
-			":s9-EOF->.s10\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_StarBlock1Alt() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a')* ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s10->.s7\n" +
-			".s2->.s10\n" +
-			".s2->.s3\n" +
-			".s3->.s4\n" +
-			".s4-{synpred1_t}?->.s5\n" +
-			".s5-'a'->.s6\n" +
-			".s6->.s3\n" +
-			".s6->.s7\n" +
-			".s7->:s8\n" +
-			":s8-EOF->.s9\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_PlusBlock() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a'{;}|'b')+ ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s12->.s13\n" +
-			".s13-{synpred2_t}?->.s14\n" +
-			".s14-'b'->.s15\n" +
-			".s15->.s8\n" +
-			".s2->.s3\n" +
-			".s3->.s12\n" +
-			".s3->.s4\n" +
-			".s4-{synpred1_t}?->.s5\n" +
-			".s5-'a'->.s6\n" +
-			".s6-{}->.s7\n" +
-			".s7->.s8\n" +
-			".s8->.s3\n" +
-			".s8->.s9\n" +
-			".s9->:s10\n" +
-			":s10-EOF->.s11\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_PlusSetBlock() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a'|'b'{;})+ ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s11->.s12\n" +
-			".s12-{synpred2_t}?->.s13\n" +
-			".s13-'b'->.s14\n" +
-			".s14-{}->.s15\n" +
-			".s15->.s7\n" +
-			".s2->.s3\n" +
-			".s3->.s11\n" +
-			".s3->.s4\n" +
-			".s4-{synpred1_t}?->.s5\n" +
-			".s5-'a'->.s6\n" +
-			".s6->.s7\n" +
-			".s7->.s3\n" +
-			".s7->.s8\n" +
-			".s8->:s9\n" +
-			":s9-EOF->.s10\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_PlusBlock1Alt() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a')+ ;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s3->.s4\n" +
-			".s4-{synpred1_t}?->.s5\n" +
-			".s5-'a'->.s6\n" +
-			".s6->.s3\n" +
-			".s6->.s7\n" +
-			".s7->:s8\n" +
-			":s8-EOF->.s9\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_OptionalBlock2Alts() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a'{;}|'b')?;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s10->.s11\n" +
-			".s10->.s14\n" +
-			".s11-{synpred2_t}?->.s12\n" +
-			".s12-'b'->.s13\n" +
-			".s13->.s7\n" +
-			".s14->.s7\n" +
-			".s2->.s10\n" +
-			".s2->.s3\n" +
-			".s3-{synpred1_t}?->.s4\n" +
-			".s4-'a'->.s5\n" +
-			".s5-{}->.s6\n" +
-			".s6->.s7\n" +
-			".s7->:s8\n" +
-			":s8-EOF->.s9\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_OptionalBlock1Alt() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a')?;"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s2->.s3\n" +
-			".s2->.s9\n" +
-			".s3-{synpred1_t}?->.s4\n" +
-			".s4-'a'->.s5\n" +
-			".s5->.s6\n" +
-			".s6->:s7\n" +
-			".s9->.s6\n" +
-			":s7-EOF->.s8\n";
-		checkRule(g, "a", expecting);
-	}
-
-	@Test public void testAutoBacktracking_ExistingPred() throws Exception {
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {backtrack=true;}\n"+
-			"a : ('a')=> 'a' | 'b';"
-		);
-		String expecting =
-			".s0->.s1\n" +
-			".s1->.s2\n" +
-			".s1->.s8\n" +
-			".s10->.s5\n" +
-			".s2-{synpred1_t}?->.s3\n" +
-			".s3-'a'->.s4\n" +
-			".s4->.s5\n" +
-			".s5->:s6\n" +
-			".s8->.s9\n" +
-			".s9-'b'->.s10\n" +
-			":s6-EOF->.s7\n";
-		checkRule(g, "a", expecting);
-	}
-
-	private void checkRule(Grammar g, String rule, String expecting)
-	{
-		g.buildNFA();
-		State startState = g.getRuleStartState(rule);
-		FASerializer serializer = new FASerializer(g);
-		String result = serializer.serialize(startState);
-
-		//System.out.print(result);
-		assertEquals(expecting, result);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestRewriteAST.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestRewriteAST.java
deleted file mode 100644
index f50c36d..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestRewriteAST.java
+++ /dev/null
@@ -1,1459 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.Tool;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.tool.ErrorManager;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.GrammarSemanticsMessage;
-import org.junit.Ignore;
-import org.junit.Test;
-
-public class TestRewriteAST extends BaseTest {
-	protected boolean debug = false;
-
-	@Test public void testDelete() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc 34", debug);
-		assertEquals("", found);
-	}
-
-	@Test public void testSingleToken() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID -> ID;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc", debug);
-		assertEquals("abc\n", found);
-	}
-
-	@Test public void testSingleTokenToNewNode() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID -> ID[\"x\"];\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc", debug);
-		assertEquals("x\n", found);
-	}
-
-	@Test public void testSingleTokenToNewNodeRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID -> ^(ID[\"x\"] INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc", debug);
-		assertEquals("(x INT)\n", found);
-	}
-
-	@Test public void testSingleTokenToNewNode2() throws Exception {
-		// Allow creation of new nodes w/o args.
-		String grammar =
-			"grammar TT;\n" +
-			"options {output=AST;}\n" +
-			"a : ID -> ID[ ];\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("TT.g", grammar, "TTParser", "TTLexer",
-				    "a", "abc", debug);
-		assertEquals("ID\n", found);
-	}
-
-	@Test public void testSingleCharLiteral() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'c' -> 'c';\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "c", debug);
-		assertEquals("c\n", found);
-	}
-
-	@Test public void testSingleStringLiteral() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'ick' -> 'ick';\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "ick", debug);
-		assertEquals("ick\n", found);
-	}
-
-	@Test public void testSingleRule() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : b -> b;\n" +
-			"b : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc", debug);
-		assertEquals("abc\n", found);
-	}
-
-	@Test public void testReorderTokens() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> INT ID;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc 34", debug);
-		assertEquals("34 abc\n", found);
-	}
-
-	@Test public void testReorderTokenAndRule() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : b INT -> INT b;\n" +
-			"b : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc 34", debug);
-		assertEquals("34 abc\n", found);
-	}
-
-	@Test public void testTokenTree() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(INT ID);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc 34", debug);
-		assertEquals("(34 abc)\n", found);
-	}
-
-	@Test public void testTokenTreeAfterOtherStuff() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'void' ID INT -> 'void' ^(INT ID);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "void abc 34", debug);
-		assertEquals("void (34 abc)\n", found);
-	}
-
-	@Test public void testNestedTokenTreeWithOuterLoop() throws Exception {
-		// verify that ID and INT both iterate over outer index variable
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {DUH;}\n" +
-			"a : ID INT ID INT -> ^( DUH ID ^( DUH INT) )+ ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a 1 b 2", debug);
-		assertEquals("(DUH a (DUH 1)) (DUH b (DUH 2))\n", found);
-	}
-
-	@Test public void testOptionalSingleToken() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID -> ID? ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc", debug);
-		assertEquals("abc\n", found);
-	}
-
-	@Test public void testClosureSingleToken() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ID -> ID* ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b", debug);
-		assertEquals("a b\n", found);
-	}
-
-	@Test public void testPositiveClosureSingleToken() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ID -> ID+ ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b", debug);
-		assertEquals("a b\n", found);
-	}
-
-	@Test public void testOptionalSingleRule() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : b -> b?;\n" +
-			"b : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc", debug);
-		assertEquals("abc\n", found);
-	}
-
-	@Test public void testClosureSingleRule() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : b b -> b*;\n" +
-			"b : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b", debug);
-		assertEquals("a b\n", found);
-	}
-
-	@Test public void testClosureOfLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : x+=b x+=b -> $x*;\n" +
-			"b : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b", debug);
-		assertEquals("a b\n", found);
-	}
-
-	@Test public void testOptionalLabelNoListLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : (x=ID)? -> $x?;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("a\n", found);
-	}
-
-	@Test public void testPositiveClosureSingleRule() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : b b -> b+;\n" +
-			"b : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b", debug);
-		assertEquals("a b\n", found);
-	}
-
-	@Test public void testSinglePredicateT() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID -> {true}? ID -> ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc", debug);
-		assertEquals("abc\n", found);
-	}
-
-	@Test public void testSinglePredicateF() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID -> {false}? ID -> ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc", debug);
-		assertEquals("", found);
-	}
-
-	@Test public void testMultiplePredicate() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> {false}? ID\n" +
-			"           -> {true}? INT\n" +
-			"           -> \n" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a 2", debug);
-		assertEquals("2\n", found);
-	}
-
-	@Test public void testMultiplePredicateTrees() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> {false}? ^(ID INT)\n" +
-			"           -> {true}? ^(INT ID)\n" +
-			"           -> ID\n" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a 2", debug);
-		assertEquals("(2 a)\n", found);
-	}
-
-	@Test public void testSimpleTree() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : op INT -> ^(op INT);\n" +
-			"op : '+'|'-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "-34", debug);
-		assertEquals("(- 34)\n", found);
-	}
-
-	@Test public void testSimpleTree2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : op INT -> ^(INT op);\n" +
-			"op : '+'|'-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "+ 34", debug);
-		assertEquals("(34 +)\n", found);
-	}
-
-
-	@Test public void testNestedTrees() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'var' (ID ':' type ';')+ -> ^('var' ^(':' ID type)+) ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "var a:int; b:float;", debug);
-		assertEquals("(var (: a int) (: b float))\n", found);
-	}
-
-	@Test public void testImaginaryTokenCopy() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {VAR;}\n" +
-			"a : ID (',' ID)*-> ^(VAR ID)+ ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a,b,c", debug);
-		assertEquals("(VAR a) (VAR b) (VAR c)\n", found);
-	}
-
-	@Test public void testTokenUnreferencedOnLeftButDefined() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {VAR;}\n" +
-			"a : b -> ID ;\n" +
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("ID\n", found);
-	}
-
-	@Test public void testImaginaryTokenCopySetText() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {VAR;}\n" +
-			"a : ID (',' ID)*-> ^(VAR[\"var\"] ID)+ ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a,b,c", debug);
-		assertEquals("(var a) (var b) (var c)\n", found);
-	}
-
-	@Test public void testImaginaryTokenNoCopyFromToken() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "{a b c}", debug);
-		assertEquals("({ a b c)\n", found);
-	}
-
-	@Test public void testImaginaryTokenNoCopyFromTokenSetText() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : lc='{' ID+ '}' -> ^(BLOCK[$lc,\"block\"] ID+) ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "{a b c}", debug);
-		assertEquals("(block a b c)\n", found);
-	}
-
-	@Test public void testMixedRewriteAndAutoAST() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : b b^ ;\n" + // 2nd b matches only an INT; can make it root
-			"b : ID INT -> INT ID\n" +
-			"  | INT\n" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a 1 2", debug);
-		assertEquals("(2 1 a)\n", found);
-	}
-
-	@Test public void testSubruleWithRewrite() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : b b ;\n" +
-			"b : (ID INT -> INT ID | INT INT -> INT+ )\n" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a 1 2 3", debug);
-		assertEquals("1 a 2 3\n", found);
-	}
-
-	@Test public void testSubruleWithRewrite2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {TYPE;}\n" +
-			"a : b b ;\n" +
-			"b : 'int'\n" +
-			"    ( ID -> ^(TYPE 'int' ID)\n" +
-			"    | ID '=' INT -> ^(TYPE 'int' ID INT)\n" +
-			"    )\n" +
-			"    ';'\n" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "int a; int b=3;", debug);
-		assertEquals("(TYPE int a) (TYPE int b 3)\n", found);
-	}
-
-	@Test public void testNestedRewriteShutsOffAutoAST() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : b b ;\n" +
-			"b : ID ( ID (last=ID -> $last)+ ) ';'\n" + // get last ID
-			"  | INT\n" + // should still get auto AST construction
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b c d; 42", debug);
-		assertEquals("d 42\n", found);
-	}
-
-	@Test public void testRewriteActions() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : atom -> ^({adaptor.create(INT,\"9\")} atom) ;\n" +
-			"atom : INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "3", debug);
-		assertEquals("(9 3)\n", found);
-	}
-
-	@Test public void testRewriteActions2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : atom -> {adaptor.create(INT,\"9\")} atom ;\n" +
-			"atom : INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "3", debug);
-		assertEquals("9 3\n", found);
-	}
-
-	@Test public void testRefToOldValue() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : (atom -> atom) (op='+' r=atom -> ^($op $a $r) )* ;\n" +
-			"atom : INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "3+4+5", debug);
-		assertEquals("(+ (+ 3 4) 5)\n", found);
-	}
-
-	@Test public void testCopySemanticsForRules() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : atom -> ^(atom atom) ;\n" + // NOT CYCLE! (dup atom)
-			"atom : INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "3", debug);
-		assertEquals("(3 3)\n", found);
-	}
-
-	@Test public void testCopySemanticsForRules2() throws Exception {
-		// copy type as a root for each invocation of (...)+ in rewrite
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : type ID (',' ID)* ';' -> ^(type ID)+ ;\n" +
-			"type : 'int' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "int a,b,c;", debug);
-		assertEquals("(int a) (int b) (int c)\n", found);
-	}
-
-	@Test public void testCopySemanticsForRules3() throws Exception {
-		// copy type *and* modifier even though it's optional
-		// for each invocation of (...)+ in rewrite
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ;\n" +
-			"type : 'int' ;\n" +
-			"modifier : 'public' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "public int a,b,c;", debug);
-		assertEquals("(int public a) (int public b) (int public c)\n", found);
-	}
-
-	@Test public void testCopySemanticsForRules3Double() throws Exception {
-		// copy type *and* modifier even though it's optional
-		// for each invocation of (...)+ in rewrite
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ^(type modifier? ID)+ ;\n" +
-			"type : 'int' ;\n" +
-			"modifier : 'public' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "public int a,b,c;", debug);
-		assertEquals("(int public a) (int public b) (int public c) (int public a) (int public b) (int public c)\n", found);
-	}
-
-	@Test public void testCopySemanticsForRules4() throws Exception {
-		// copy type *and* modifier even though it's optional
-		// for each invocation of (...)+ in rewrite
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {MOD;}\n" +
-			"a : modifier? type ID (',' ID)* ';' -> ^(type ^(MOD modifier)? ID)+ ;\n" +
-			"type : 'int' ;\n" +
-			"modifier : 'public' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "public int a,b,c;", debug);
-		assertEquals("(int (MOD public) a) (int (MOD public) b) (int (MOD public) c)\n", found);
-	}
-
-	@Test public void testCopySemanticsLists() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {MOD;}\n" +
-			"a : ID (',' ID)* ';' -> ID+ ID+ ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a,b,c;", debug);
-		assertEquals("a b c a b c\n", found);
-	}
-
-	@Test public void testCopyRuleLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : x=b -> $x $x;\n"+
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("a a\n", found);
-	}
-
-	@Test public void testCopyRuleLabel2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : x=b -> ^($x $x);\n"+
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("(a a)\n", found);
-	}
-
-	@Test public void testQueueingOfTokens() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'int' ID (',' ID)* ';' -> ^('int' ID+) ;\n" +
-			"op : '+'|'-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "int a,b,c;", debug);
-		assertEquals("(int a b c)\n", found);
-	}
-
-	@Test public void testCopyOfTokens() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'int' ID ';' -> 'int' ID 'int' ID ;\n" +
-			"op : '+'|'-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "int a;", debug);
-		assertEquals("int a int a\n", found);
-	}
-
-	@Test public void testTokenCopyInLoop() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'int' ID (',' ID)* ';' -> ^('int' ID)+ ;\n" +
-			"op : '+'|'-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "int a,b,c;", debug);
-		assertEquals("(int a) (int b) (int c)\n", found);
-	}
-
-	@Test public void testTokenCopyInLoopAgainstTwoOthers() throws Exception {
-		// must smear 'int' copies across as root of multiple trees
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'int' ID ':' INT (',' ID ':' INT)* ';' -> ^('int' ID INT)+ ;\n" +
-			"op : '+'|'-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "int a:1,b:2,c:3;", debug);
-		assertEquals("(int a 1) (int b 2) (int c 3)\n", found);
-	}
-
-	@Test public void testListRefdOneAtATime() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID+ -> ID ID ID ;\n" + // works if 3 input IDs
-			"op : '+'|'-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b c", debug);
-		assertEquals("a b c\n", found);
-	}
-
-	@Test public void testSplitListWithLabels() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {VAR;}\n"+
-			"a : first=ID others+=ID* -> $first VAR $others+ ;\n" +
-			"op : '+'|'-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b c", debug);
-		assertEquals("a VAR b c\n", found);
-	}
-
-	@Test public void testComplicatedMelange() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : A A b=B B b=B c+=C C c+=C D {String s=$D.text;} -> A+ B+ C+ D ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"A : 'a' ;\n" +
-			"B : 'b' ;\n" +
-			"C : 'c' ;\n" +
-			"D : 'd' ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a a b b b c c c d", debug);
-		assertEquals("a a b b b c c c d\n", found);
-	}
-
-	@Test public void testRuleLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : x=b -> $x;\n"+
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("a\n", found);
-	}
-
-	@Test public void testAmbiguousRule() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID a -> a | INT ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"INT: '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc 34", debug);
-		assertEquals("34\n", found);
-	}
-
-	@Test public void testWeirdRuleRef() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID a -> $a | INT ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"INT: '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		Grammar g = new Grammar(grammar);
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		// $a is ambig; is it previous root or ref to a ref in alt?
-		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());		
-	}
-
-	@Test public void testRuleListLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : x+=b x+=b -> $x+;\n"+
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b", debug);
-		assertEquals("a b\n", found);
-	}
-
-	@Test public void testRuleListLabel2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : x+=b x+=b -> $x $x*;\n"+
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b", debug);
-		assertEquals("a b\n", found);
-	}
-
-	@Test public void testOptional() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : x=b (y=b)? -> $x $y?;\n"+
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("a\n", found);
-	}
-
-	@Test public void testOptional2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : x=ID (y=b)? -> $x $y?;\n"+
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b", debug);
-		assertEquals("a b\n", found);
-	}
-
-	@Test public void testOptional3() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : x=ID (y=b)? -> ($x $y)?;\n"+
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b", debug);
-		assertEquals("a b\n", found);
-	}
-
-	@Test public void testOptional4() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : x+=ID (y=b)? -> ($x $y)?;\n"+
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b", debug);
-		assertEquals("a b\n", found);
-	}
-
-	@Test public void testOptional5() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : ID -> ID? ;\n"+ // match an ID to optional ID
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a", debug);
-		assertEquals("a\n", found);
-	}
-
-	@Test public void testArbitraryExprType() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : x+=b x+=b -> {new CommonTree()};\n"+
-			"b : ID ;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b", debug);
-		assertEquals("", found);
-	}
-
-	@Test public void testSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options { output = AST; } \n" +
-			"a: (INT|ID)+ -> INT+ ID+ ;\n" +
-			"INT: '0'..'9'+;\n" +
-			"ID : 'a'..'z'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "2 a 34 de", debug);
-		assertEquals("2 34 a de\n", found);
-	}
-
-	@Test public void testSet2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options { output = AST; } \n" +
-			"a: (INT|ID) -> INT? ID? ;\n" +
-			"INT: '0'..'9'+;\n" +
-			"ID : 'a'..'z'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "2", debug);
-		assertEquals("2\n", found);
-	}
-
-	@Ignore 
-    // TODO: FAILS. The should probably generate a warning from antlr
-    // See http://www.antlr.org:8888/browse/ANTLR-162
-    //
-    public void testSetWithLabel() throws Exception {
-		
-		String grammar =
-			"grammar T;\n" +
-			"options { output = AST; } \n" +
-			"a : x=(INT|ID) -> $x ;\n" +
-			"INT: '0'..'9'+;\n" +
-			"ID : 'a'..'z'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "2", debug);
-		assertEquals("2\n", found);
-	}
-
-	@Test public void testRewriteAction() throws Exception {
-		String grammar =
-			"grammar T; \n" +
-			"options { output = AST; }\n" +
-			"tokens { FLOAT; }\n" +
-			"r\n" +
-			"    : INT -> {new CommonTree(new CommonToken(FLOAT,$INT.text+\".0\"))} \n" +
-			"    ; \n" +
-			"INT : '0'..'9'+; \n" +
-			"WS: (' ' | '\\n' | '\\t')+ {$channel = HIDDEN;}; \n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "r", "25", debug);
-		assertEquals("25.0\n", found);
-	}
-
-	@Test public void testOptionalSubruleWithoutRealElements() throws Exception {
-		// copy type *and* modifier even though it's optional
-		// for each invocation of (...)+ in rewrite
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;} \n" +
-			"tokens {PARMS;} \n" +
-			"\n" +
-			"modulo \n" +
-			" : 'modulo' ID ('(' parms+ ')')? -> ^('modulo' ID ^(PARMS parms+)?) \n" +
-			" ; \n" +
-			"parms : '#'|ID; \n" +
-			"ID : ('a'..'z' | 'A'..'Z')+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "modulo", "modulo abc (x y #)", debug);
-		assertEquals("(modulo abc (PARMS x y #))\n", found);
-	}
-
-	// C A R D I N A L I T Y  I S S U E S
-
-	@Test public void testCardinality() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {BLOCK;}\n" +
-			"a : ID ID INT INT INT -> (ID INT)+;\n"+
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+; \n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "a b 3 4 5", debug);
-		String expecting =
-			"org.antlr.runtime.tree.RewriteCardinalityException: token ID";
-		String found = getFirstLineOfException();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testCardinality2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID+ -> ID ID ID ;\n" + // only 2 input IDs
-			"op : '+'|'-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		execParser("T.g", grammar, "TParser", "TLexer",
-				   "a", "a b", debug);
-		String expecting =
-			"org.antlr.runtime.tree.RewriteCardinalityException: token ID";
-		String found = getFirstLineOfException();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testCardinality3() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID? INT -> ID INT ;\n" +
-			"op : '+'|'-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		execParser("T.g", grammar, "TParser", "TLexer",
-				   "a", "3", debug);
-		String expecting =
-			"org.antlr.runtime.tree.RewriteEmptyStreamException: token ID";
-		String found = getFirstLineOfException();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testLoopCardinality() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID? INT -> ID+ INT ;\n" +
-			"op : '+'|'-' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		execParser("T.g", grammar, "TParser", "TLexer",
-				   "a", "3", debug);
-		String expecting =
-			"org.antlr.runtime.tree.RewriteEarlyExitException";
-		String found = getFirstLineOfException();
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testWildcard() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID c=. -> $c;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "abc 34", debug);
-		assertEquals("34\n", found);
-	}
-
-	// E R R O R S
-
-	@Test public void testUnknownRule() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : INT -> ugh ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		Grammar g = new Grammar(grammar);
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_UNDEFINED_RULE_REF;
-		Object expectedArg = "ugh";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testKnownRuleButNotInLHS() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : INT -> b ;\n" +
-			"b : 'b' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		Grammar g = new Grammar(grammar);
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_REWRITE_ELEMENT_NOT_PRESENT_ON_LHS;
-		Object expectedArg = "b";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testUnknownToken() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : INT -> ICK ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		Grammar g = new Grammar(grammar);
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE;
-		Object expectedArg = "ICK";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testUnknownLabel() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : INT -> $foo ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		Grammar g = new Grammar(grammar);
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_UNDEFINED_LABEL_REF_IN_REWRITE;
-		Object expectedArg = "foo";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testUnknownCharLiteralToken() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : INT -> 'a' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		Grammar g = new Grammar(grammar);
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE;
-		Object expectedArg = "'a'";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testUnknownStringLiteralToken() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : INT -> 'foo' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		Grammar g = new Grammar(grammar);
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		int expectedMsgID = ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE;
-		Object expectedArg = "'foo'";
-		Object expectedArg2 = null;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testExtraTokenInSimpleDecl() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"tokens {EXPR;}\n" +
-			"decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "decl", "int 34 x=1;", debug);
-		assertEquals("line 1:4 extraneous input '34' expecting ID\n", this.stderrDuringParse);
-		assertEquals("(EXPR int x 1)\n", found); // tree gets correct x and 1 tokens
-	}
-
-	@Test public void testMissingIDInSimpleDecl() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"tokens {EXPR;}\n" +
-			"decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "decl", "int =1;", debug);
-		assertEquals("line 1:4 missing ID at '='\n", this.stderrDuringParse);
-		assertEquals("(EXPR int <missing ID> 1)\n", found); // tree gets invented ID token
-	}
-
-	@Test public void testMissingSetInSimpleDecl() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"tokens {EXPR;}\n" +
-			"decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;\n" +
-			"type : 'int' | 'float' ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "decl", "x=1;", debug);
-		assertEquals("line 1:0 mismatched input 'x' expecting set null\n", this.stderrDuringParse);
-		assertEquals("(EXPR <error: x> x 1)\n", found); // tree gets invented ID token
-	}
-
-	@Test public void testMissingTokenGivesErrorNode() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ID INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "abc", debug);
-		assertEquals("line 1:3 missing INT at '<EOF>'\n", this.stderrDuringParse);
-		// doesn't do in-line recovery for sets (yet?)
-		assertEquals("abc <missing INT>\n", found);
-	}
-
-	@Test public void testExtraTokenGivesErrorNode() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : b c -> b c;\n" +
-			"b : ID -> ID ;\n" +
-			"c : INT -> INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "abc ick 34", debug);
-		assertEquals("line 1:4 extraneous input 'ick' expecting INT\n", this.stderrDuringParse);
-		assertEquals("abc 34\n", found);
-	}
-
-	@Test public void testMissingFirstTokenGivesErrorNode() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ID INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "34", debug);
-		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
-		assertEquals("<missing ID> 34\n", found);
-	}
-
-	@Test public void testMissingFirstTokenGivesErrorNode2() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : b c -> b c;\n" +
-			"b : ID -> ID ;\n" +
-			"c : INT -> INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "34", debug);
-		// finds an error at the first token, 34, and re-syncs.
-		// re-synchronizing does not consume a token because 34 follows
-		// ref to rule b (start of c). It then matches 34 in c.
-		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
-		assertEquals("<missing ID> 34\n", found);
-	}
-
-	@Test public void testNoViableAltGivesErrorNode() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"options {output=AST;}\n" +
-			"a : b -> b | c -> c;\n" +
-			"b : ID -> ID ;\n" +
-			"c : INT -> INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"S : '*' ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-								  "a", "*", debug);
-		// finds an error at the first token, 34, and re-syncs.
-		// re-synchronizing does not consume a token because 34 follows
-		// ref to rule b (start of c). It then matches 34 in c.
-		assertEquals("line 1:0 no viable alternative at input '*'\n", this.stderrDuringParse);
-		assertEquals("<unexpected: [@0,0:0='*',<6>,1:0], resync=*>\n", found);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestRewriteTemplates.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestRewriteTemplates.java
deleted file mode 100644
index 4dc89f2..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestRewriteTemplates.java
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.Tool;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.tool.*;
-import org.junit.Test;
-
-public class TestRewriteTemplates extends BaseTest {
-	protected boolean debug = false;
-
-	@Test public void testDelete() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a : ID INT -> ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34", debug);
-		assertEquals("", found);
-	}
-
-	@Test public void testAction() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a : ID INT -> {new StringTemplate($ID.text)} ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34", debug);
-		assertEquals("abc\n", found);
-	}
-
-	@Test public void testEmbeddedLiteralConstructor() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a : ID INT -> {%{$ID.text}} ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34", debug);
-		assertEquals("abc\n", found);
-	}
-
-	@Test public void testInlineTemplate() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a : ID INT -> template(x={$ID},y={$INT}) <<x:<x.text>, y:<y.text>;>> ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34", debug);
-		assertEquals("x:abc, y:34;\n", found);
-	}
-
-	@Test public void testNamedTemplate() throws Exception {
-		// the support code adds template group in it's output Test.java
-		// that defines template foo.
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a : ID INT -> foo(x={$ID.text},y={$INT.text}) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34", debug);
-		assertEquals("abc 34\n", found);
-	}
-
-	@Test public void testIndirectTemplate() throws Exception {
-		// the support code adds template group in it's output Test.java
-		// that defines template foo.
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a : ID INT -> ({\"foo\"})(x={$ID.text},y={$INT.text}) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34", debug);
-		assertEquals("abc 34\n", found);
-	}
-
-	@Test public void testInlineTemplateInvokingLib() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a : ID INT -> template(x={$ID.text},y={$INT.text}) \"<foo(...)>\" ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34", debug);
-		assertEquals("abc 34\n", found);
-	}
-
-	@Test public void testPredicatedAlts() throws Exception {
-		// the support code adds template group in it's output Test.java
-		// that defines template foo.
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a : ID INT -> {false}? foo(x={$ID.text},y={$INT.text})\n" +
-			"           -> foo(x={\"hi\"}, y={$ID.text})\n" +
-			"  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34", debug);
-		assertEquals("hi abc\n", found);
-	}
-
-	@Test public void testTemplateReturn() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a : b {System.out.println($b.st);} ;\n" +
-			"b : ID INT -> foo(x={$ID.text},y={$INT.text}) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34", debug);
-		assertEquals("abc 34\n", found);
-	}
-
-	@Test public void testReturnValueWithTemplate() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a : b {System.out.println($b.i);} ;\n" +
-			"b returns [int i] : ID INT {$i=8;} ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34", debug);
-		assertEquals("8\n", found);
-	}
-
-	@Test public void testTemplateRefToDynamicAttributes() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a scope {String id;} : ID {$a::id=$ID.text;} b\n" +
-			"	{System.out.println($b.st.toString());}\n" +
-			"   ;\n" +
-			"b : INT -> foo(x={$a::id}) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abc 34", debug);
-		assertEquals("abc \n", found);
-	}
-
-	// tests for rewriting templates in tree parsers
-
-	@Test public void testSingleNode() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {ASTLabelType=CommonTree; output=template;}\n" +
-			"s : a {System.out.println($a.st);} ;\n" +
-			"a : ID -> template(x={$ID.text}) <<|<x>|>> ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "s", "abc");
-		assertEquals("|abc|\n", found);
-	}
-
-	@Test public void testSingleNodeRewriteMode() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
-			"s : a {System.out.println(input.getTokenStream().toString(0,0));} ;\n" +
-			"a : ID -> template(x={$ID.text}) <<|<x>|>> ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "s", "abc");
-		assertEquals("|abc|\n", found);
-	}
-
-	@Test public void testRewriteRuleAndRewriteModeOnSimpleElements() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"tree grammar TP;\n"+
-			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
-			"a: ^(A B) -> {ick}\n" +
-			" | y+=INT -> {ick}\n" +
-			" | x=ID -> {ick}\n" +
-			" | BLORT -> {ick}\n" +
-			" ;\n"
-		);
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
-	}
-
-	@Test public void testRewriteRuleAndRewriteModeIgnoreActionsPredicates() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"tree grammar TP;\n"+
-			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
-			"a: {action} {action2} x=A -> {ick}\n" +
-			" | {pred1}? y+=B -> {ick}\n" +
-			" | C {action} -> {ick}\n" +
-			" | {pred2}?=> z+=D -> {ick}\n" +
-			" | (E)=> ^(F G) -> {ick}\n" +
-			" ;\n"
-		);
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
-	}
-
-	@Test public void testRewriteRuleAndRewriteModeNotSimple() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"tree grammar TP;\n"+
-			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
-			"a  : ID+ -> {ick}\n" +
-			"   | INT INT -> {ick}\n" +
-			"   ;\n"
-		);
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
-	}
-
-	@Test public void testRewriteRuleAndRewriteModeRefRule() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"tree grammar TP;\n"+
-			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
-			"a  : b+ -> {ick}\n" +
-			"   | b b A -> {ick}\n" +
-			"   ;\n" +
-			"b  : B ;\n"
-		);
-		Tool antlr = newTool();
-		antlr.setOutputDirectory(null); // write to /dev/null
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer();
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSemanticPredicateEvaluation.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestSemanticPredicateEvaluation.java
deleted file mode 100644
index 72f8d02..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSemanticPredicateEvaluation.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.junit.Test;
-
-public class TestSemanticPredicateEvaluation extends BaseTest {
-	@Test public void testSimpleCyclicDFAWithPredicate() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"a : {false}? 'x'* 'y' {System.out.println(\"alt1\");}\n" +
-			"  | {true}?  'x'* 'y' {System.out.println(\"alt2\");}\n" +
-			"  ;\n" ;
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "xxxy", false);
-		assertEquals("alt2\n", found);
-	}
-
-	@Test public void testSimpleCyclicDFAWithInstanceVarPredicate() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"@members {boolean v=true;}\n" +
-			"a : {false}? 'x'* 'y' {System.out.println(\"alt1\");}\n" +
-			"  | {v}?     'x'* 'y' {System.out.println(\"alt2\");}\n" +
-			"  ;\n" ;
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "xxxy", false);
-		assertEquals("alt2\n", found);
-	}
-
-	@Test public void testPredicateValidation() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"@members {\n" +
-			"public void reportError(RecognitionException e) {\n" +
-			"    System.out.println(\"error: \"+e.toString());\n" +
-			"}\n" +
-			"}\n" +
-			"\n" +
-			"a : {false}? 'x'\n" +
-			"  ;\n" ;
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "x", false);
-		assertEquals("error: FailedPredicateException(a,{false}?)\n", found);
-	}
-
-	@Test public void testLexerPreds() throws Exception {
-		String grammar =
-			"grammar foo;" +
-			"@lexer::members {boolean p=false;}\n" +
-			"a : (A|B)+ ;\n" +
-			"A : {p}? 'a'  {System.out.println(\"token 1\");} ;\n" +
-			"B : {!p}? 'a' {System.out.println(\"token 2\");} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "a", false);
-		// "a" is ambig; can match both A, B.  Pred says match 2
-		assertEquals("token 2\n", found);
-	}
-
-	@Test public void testLexerPreds2() throws Exception {
-		String grammar =
-			"grammar foo;" +
-			"@lexer::members {boolean p=true;}\n" +
-			"a : (A|B)+ ;\n" +
-			"A : {p}? 'a' {System.out.println(\"token 1\");} ;\n" +
-			"B : ('a'|'b')+ {System.out.println(\"token 2\");} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "a", false);
-		// "a" is ambig; can match both A, B.  Pred says match 1
-		assertEquals("token 1\n", found);
-	}
-
-	@Test public void testLexerPredInExitBranch() throws Exception {
-		// p says it's ok to exit; it has precendence over the !p loopback branch
-		String grammar =
-			"grammar foo;" +
-			"@lexer::members {boolean p=true;}\n" +
-			"a : (A|B)+ ;\n" +
-			"A : ('a' {System.out.print(\"1\");})*\n" +
-			"    {p}?\n" +
-			"    ('a' {System.out.print(\"2\");})* ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "aaa", false);
-		assertEquals("222\n", found);
-	}
-
-	@Test public void testLexerPredInExitBranch2() throws Exception {
-		String grammar =
-			"grammar foo;" +
-			"@lexer::members {boolean p=true;}\n" +
-			"a : (A|B)+ ;\n" +
-			"A : ({p}? 'a' {System.out.print(\"1\");})*\n" +
-			"    ('a' {System.out.print(\"2\");})* ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "aaa", false);
-		assertEquals("111\n", found);
-	}
-
-	@Test public void testLexerPredInExitBranch3() throws Exception {
-		String grammar =
-			"grammar foo;" +
-			"@lexer::members {boolean p=true;}\n" +
-			"a : (A|B)+ ;\n" +
-			"A : ({p}? 'a' {System.out.print(\"1\");} | )\n" +
-			"    ('a' {System.out.print(\"2\");})* ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "aaa", false);
-		assertEquals("122\n", found);
-	}
-
-	@Test public void testLexerPredInExitBranch4() throws Exception {
-		String grammar =
-			"grammar foo;" +
-			"a : (A|B)+ ;\n" +
-			"A @init {int n=0;} : ({n<2}? 'a' {System.out.print(n++);})+\n" +
-			"    ('a' {System.out.print(\"x\");})* ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "aaaaa", false);
-		assertEquals("01xxx\n", found);
-	}
-
-	@Test public void testLexerPredsInCyclicDFA() throws Exception {
-		String grammar =
-			"grammar foo;" +
-			"@lexer::members {boolean p=false;}\n" +
-			"a : (A|B)+ ;\n" +
-			"A : {p}? ('a')+ 'x'  {System.out.println(\"token 1\");} ;\n" +
-			"B :      ('a')+ 'x' {System.out.println(\"token 2\");} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "aax", false);
-		assertEquals("token 2\n", found);
-	}
-
-	@Test public void testLexerPredsInCyclicDFA2() throws Exception {
-		String grammar =
-			"grammar foo;" +
-			"@lexer::members {boolean p=false;}\n" +
-			"a : (A|B)+ ;\n" +
-			"A : {p}? ('a')+ 'x' ('y')? {System.out.println(\"token 1\");} ;\n" +
-			"B :      ('a')+ 'x' {System.out.println(\"token 2\");} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "aax", false);
-		assertEquals("token 2\n", found);
-	}
-
-	@Test public void testGatedPred() throws Exception {
-		String grammar =
-			"grammar foo;" +
-			"a : (A|B)+ ;\n" +
-			"A : {true}?=> 'a' {System.out.println(\"token 1\");} ;\n" +
-			"B : {false}?=>('a'|'b')+ {System.out.println(\"token 2\");} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "aa", false);
-		// "a" is ambig; can match both A, B.  Pred says match A twice
-		assertEquals("token 1\ntoken 1\n", found);
-	}
-
-	@Test public void testGatedPred2() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"@lexer::members {boolean sig=false;}\n"+
-			"a : (A|B)+ ;\n" +
-			"A : 'a' {System.out.print(\"A\"); sig=true;} ;\n" +
-			"B : 'b' ;\n" +
-			"C : {sig}?=> ('a'|'b') {System.out.print(\"C\");} ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "aa", false);
-		assertEquals("AC\n", found);
-	}
-
-	@Test public void testPredWithActionTranslation() throws Exception {
-		String grammar =
-			"grammar foo;\n" +
-			"a : b[2] ;\n" +
-			"b[int i]\n" +
-			"  : {$i==1}?   'a' {System.out.println(\"alt 1\");}\n" +
-			"  | {$b.i==2}? 'a' {System.out.println(\"alt 2\");}\n" +
-			"  ;\n";
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "aa", false);
-		assertEquals("alt 2\n", found);
-	}
-
-	@Test public void testPredicatesOnEOTTarget() throws Exception {
-		String grammar =
-			"grammar foo; \n" +
-			"@lexer::members {boolean p=true, q=false;}" +
-			"a : B ;\n" +
-			"A: '</'; \n" +
-			"B: {p}? '<!' {System.out.println(\"B\");};\n" +
-			"C: {q}? '<' {System.out.println(\"C\");}; \n" +
-			"D: '<';\n" ;
-		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
-				    "a", "<!", false);
-		assertEquals("B\n", found);
-	}
-
-
-	// S U P P O R T
-
-	public void _test() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a :  ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {channel=99;} ;\n";
-		String found = execParser("t.g", grammar, "T", "TLexer",
-				    "a", "abc 34", false);
-		assertEquals("\n", found);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSemanticPredicates.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestSemanticPredicates.java
deleted file mode 100644
index 7b2d77b..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSemanticPredicates.java
+++ /dev/null
@@ -1,932 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.analysis.DFA;
-import org.antlr.analysis.DecisionProbe;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.misc.BitSet;
-import org.antlr.runtime.Token;
-import org.antlr.tool.*;
-import org.junit.Test;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-public class TestSemanticPredicates extends BaseTest {
-
-	/** Public default constructor used by TestRig */
-	public TestSemanticPredicates() {
-	}
-
-	@Test public void testPredsButSyntaxResolves() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : {p1}? A | {p2}? B ;");
-		String expecting =
-			".s0-A->:s1=>1\n" +
-			".s0-B->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testLL_1_Pred() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : {p1}? A | {p2}? A ;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-{p1}?->:s2=>1\n" +
-			".s1-{p2}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testLL_1_Pred_forced_k_1() throws Exception {
-		// should stop just like before w/o k set.
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a options {k=1;} : {p1}? A | {p2}? A ;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-{p1}?->:s2=>1\n" +
-			".s1-{p2}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testLL_2_Pred() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : {p1}? A B | {p2}? A B ;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-B->.s2\n" +
-			".s2-{p1}?->:s3=>1\n" +
-			".s2-{p2}?->:s4=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testPredicatedLoop() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : ( {p1}? A | {p2}? A )+;");
-		String expecting =                   // loop back
-			".s0-A->.s2\n" +
-			".s0-EOF->:s1=>3\n" +
-			".s2-{p1}?->:s3=>1\n" +
-			".s2-{p2}?->:s4=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testPredicatedToStayInLoop() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : ( {p1}? A )+ (A)+;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-{p1}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n";       // loop back
-        checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testAndPredicates() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : {p1}? {p1a}? A | {p2}? A ;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-{(p1&&p1a)}?->:s2=>1\n" +
-			".s1-{p2}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test
-    public void testOrPredicates() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : b | {p2}? A ;\n" +
-			"b : {p1}? A | {p1a}? A ;");
-		String expecting =
-			".s0-A->.s1\n" +
-            ".s1-{(p1a||p1)}?->:s2=>1\n" +
-            ".s1-{p2}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testIgnoresHoistingDepthGreaterThanZero() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : A {p1}? | A {p2}?;");
-		String expecting =
-			".s0-A->:s1=>1\n";
-		checkDecision(g, 1, expecting, new int[] {2},
-					  new int[] {1,2}, "A", null, null, 2, false);
-	}
-
-	@Test public void testIgnoresPredsHiddenByActions() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : {a1} {p1}? A | {a2} {p2}? A ;");
-		String expecting =
-			".s0-A->:s1=>1\n";
-		checkDecision(g, 1, expecting, new int[] {2},
-					  new int[] {1,2}, "A", null, null, 2, true);
-	}
-
-	@Test public void testIgnoresPredsHiddenByActionsOneAlt() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : {p1}? A | {a2} {p2}? A ;"); // ok since 1 pred visible
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-{p1}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null,
-					  null, null, null, null, 0, true);
-	}
-
-	/*
-	@Test public void testIncompleteSemanticHoistedContextk2() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : b | A B;\n" +
-			"b : {p1}? A B | A B ;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-B->:s2=>1\n";
-		checkDecision(g, 1, expecting, new int[] {2},
-					  new int[] {1,2}, "A B", new int[] {1}, null, 3);
-	}	
-	 */
-
-	@Test public void testHoist2() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : b | c ;\n" +
-			"b : {p1}? A ;\n" +
-			"c : {p2}? A ;\n");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-{p1}?->:s2=>1\n" +
-			".s1-{p2}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testHoistCorrectContext() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : b | {p2}? ID ;\n" +
-			"b : {p1}? ID | INT ;\n");
-		String expecting =  // only tests after ID, not INT :)
-			".s0-ID->.s1\n" +
-			".s0-INT->:s2=>1\n" +
-			".s1-{p1}?->:s2=>1\n" +
-			".s1-{p2}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testDefaultPredNakedAltIsLast() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : b | ID ;\n" +
-			"b : {p1}? ID | INT ;\n");
-		String expecting =
-			".s0-ID->.s1\n" +
-			".s0-INT->:s2=>1\n" +
-			".s1-{p1}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testDefaultPredNakedAltNotLast() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : ID | b ;\n" +
-			"b : {p1}? ID | INT ;\n");
-		String expecting =
-			".s0-ID->.s1\n" +
-			".s0-INT->:s3=>2\n" +
-			".s1-{!(p1)}?->:s2=>1\n" +
-			".s1-{p1}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testLeftRecursivePred() throws Exception {
-		// No analysis possible. but probably good to fail.  Not sure we really want
-		// left-recursion even if guarded with pred.
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"s : a ;\n" +
-			"a : {p1}? a | ID ;\n");
-		String expecting =
-			".s0-ID->.s1\n" +
-			".s1-{p1}?->:s2=>1\n" +
-			".s1-{true}?->:s3=>2\n";
-
-		DecisionProbe.verbose=true; // make sure we get all error info
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
-		g.setCodeGenerator(generator);
-		if ( g.getNumberOfDecisions()==0 ) {
-			g.buildNFA();
-			g.createLookaheadDFAs(false);
-		}
-
-		DFA dfa = g.getLookaheadDFA(1);
-		assertEquals(null, dfa); // can't analyze.
-
-		/*
-		String result = serializer.serialize(dfa.startState);
-		assertEquals(expecting, result);
-		*/
-
-		assertEquals("unexpected number of expected problems", 1, equeue.size());
-		Message msg = (Message)equeue.errors.get(0);
-		assertTrue("warning must be a left recursion msg",
-				    msg instanceof LeftRecursionCyclesMessage);
-	}
-
-	@Test public void testIgnorePredFromLL2AltLastAltIsDefaultTrue() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : {p1}? A B | A C | {p2}? A | {p3}? A | A ;\n");
-		// two situations of note:
-		// 1. A B syntax is enough to predict that alt, so p1 is not used
-		//    to distinguish it from alts 2..5
-		// 2. Alts 3, 4, 5 are nondeterministic with upon A.  p2, p3 and the
-		//    complement of p2||p3 is sufficient to resolve the conflict. Do
-		//    not include alt 1's p1 pred in the "complement of other alts"
-		//    because it is not considered nondeterministic with alts 3..5
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-B->:s2=>1\n" +
-			".s1-C->:s3=>2\n" +
-			".s1-{p2}?->:s4=>3\n" +
-			".s1-{p3}?->:s5=>4\n" +
-			".s1-{true}?->:s6=>5\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testIgnorePredFromLL2AltPredUnionNeeded() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : {p1}? A B | A C | {p2}? A | A | {p3}? A ;\n");
-		// two situations of note:
-		// 1. A B syntax is enough to predict that alt, so p1 is not used
-		//    to distinguish it from alts 2..5
-		// 2. Alts 3, 4, 5 are nondeterministic with upon A.  p2, p3 and the
-		//    complement of p2||p3 is sufficient to resolve the conflict. Do
-		//    not include alt 1's p1 pred in the "complement of other alts"
-		//    because it is not considered nondeterministic with alts 3..5
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-B->:s2=>1\n" +
-			".s1-C->:s3=>2\n" +
-			".s1-{!((p3||p2))}?->:s5=>4\n" +
-			".s1-{p2}?->:s4=>3\n" +
-			".s1-{p3}?->:s6=>5\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testPredGets2SymbolSyntacticContext() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : b | A B | C ;\n" +
-			"b : {p1}? A B ;\n");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s0-C->:s5=>3\n" +
-			".s1-B->.s2\n" +
-			".s2-{p1}?->:s3=>1\n" +
-			".s2-{true}?->:s4=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testMatchesLongestThenTestPred() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"a : b | c ;\n" +
-			"b : {p}? A ;\n" +
-			"c : {q}? (A|B)+ ;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s0-B->:s3=>2\n" +
-			".s1-{p}?->:s2=>1\n" +
-			".s1-{q}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testPredsUsedAfterRecursionOverflow() throws Exception {
-		// analysis must bail out due to non-LL(*) nature (ovf)
-		// retries with k=1 (but with LL(*) algorithm not optimized version
-		// as it has preds)
-		Grammar g = new Grammar(
-			"parser grammar P;\n"+
-			"s : {p1}? e '.' | {p2}? e ':' ;\n" +
-			"e : '(' e ')' | INT ;\n");
-		String expecting =
-			".s0-'('->.s1\n" +
-			".s0-INT->.s4\n" +
-			".s1-{p1}?->:s2=>1\n" +
-			".s1-{p2}?->:s3=>2\n" +
-			".s4-{p1}?->:s2=>1\n" +
-			".s4-{p2}?->:s3=>2\n";
-		DecisionProbe.verbose=true; // make sure we get all error info
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
-		g.setCodeGenerator(generator);
-		if ( g.getNumberOfDecisions()==0 ) {
-			g.buildNFA();
-			g.createLookaheadDFAs(false);
-		}
-
-		assertEquals("unexpected number of expected problems", 0, equeue.size());
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testPredsUsedAfterK2FailsNoRecursionOverflow() throws Exception {
-		// analysis must bail out due to non-LL(*) nature (ovf)
-		// retries with k=1 (but with LL(*) algorithm not optimized version
-		// as it has preds)
-		Grammar g = new Grammar(
-			"grammar P;\n" +
-			"options {k=2;}\n"+
-			"s : {p1}? e '.' | {p2}? e ':' ;\n" +
-			"e : '(' e ')' | INT ;\n");
-		String expecting =
-			".s0-'('->.s1\n" +
-			".s0-INT->.s6\n" +
-			".s1-'('->.s2\n" +
-			".s1-INT->.s5\n" +
-			".s2-{p1}?->:s3=>1\n" +
-			".s2-{p2}?->:s4=>2\n" +
-			".s5-{p1}?->:s3=>1\n" +
-			".s5-{p2}?->:s4=>2\n" +
-			".s6-'.'->:s3=>1\n" +
-			".s6-':'->:s4=>2\n";
-		DecisionProbe.verbose=true; // make sure we get all error info
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
-		g.setCodeGenerator(generator);
-		if ( g.getNumberOfDecisions()==0 ) {
-			g.buildNFA();
-			g.createLookaheadDFAs(false);
-		}
-
-		assertEquals("unexpected number of expected problems", 0, equeue.size());
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testLexerMatchesLongestThenTestPred() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"B : {p}? 'a' ;\n" +
-			"C : {q}? ('a'|'b')+ ;");
-		String expecting =
-			".s0-'a'->.s1\n" +
-			".s0-'b'->:s4=>2\n" +
-			".s1-'a'..'b'->:s4=>2\n" +
-			".s1-<EOT>->.s2\n" +
-			".s2-{p}?->:s3=>1\n" +
-			".s2-{q}?->:s4=>2\n";
-		checkDecision(g, 2, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testLexerMatchesLongestMinusPred() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"B : 'a' ;\n" +
-			"C : ('a'|'b')+ ;");
-		String expecting =
-			".s0-'a'->.s1\n" +
-			".s0-'b'->:s3=>2\n" +
-			".s1-'a'..'b'->:s3=>2\n" +
-			".s1-<EOT>->:s2=>1\n";
-		checkDecision(g, 2, expecting, null, null, null, null, null, 0, false);
-	}
-
-    @Test
-    public void testGatedPred() throws Exception {
-		// gated preds are present on all arcs in predictor
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"B : {p}? => 'a' ;\n" +
-			"C : {q}? => ('a'|'b')+ ;");
-		String expecting =
-			".s0-'a'&&{(q||p)}?->.s1\n" +
-            ".s0-'b'&&{q}?->:s4=>2\n" +
-            ".s1-'a'..'b'&&{q}?->:s4=>2\n" +
-            ".s1-<EOT>&&{(q||p)}?->.s2\n" +
-            ".s2-{p}?->:s3=>1\n" +
-            ".s2-{q}?->:s4=>2\n";
-		checkDecision(g, 2, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testGatedPredHoistsAndCanBeInStopState() throws Exception {
-		// I found a bug where merging stop states made us throw away
-		// a stop state with a gated pred!
-		Grammar g = new Grammar(
-			"grammar u;\n" +
-			"a : b+ ;\n" +
-			"b : 'x' | {p}?=> 'y' ;");
-		String expecting =
-			".s0-'x'->:s2=>1\n" +
-			".s0-'y'&&{p}?->:s3=>1\n" +
-			".s0-EOF->:s1=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test
-    public void testGatedPredInCyclicDFA() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"A : {p}?=> ('a')+ 'x' ;\n" +
-			"B : {q}?=> ('a'|'b')+ 'x' ;");
-		String expecting =
-			".s0-'a'&&{(q||p)}?->.s1\n" +
-            ".s0-'b'&&{q}?->:s5=>2\n" +
-            ".s1-'a'&&{(q||p)}?->.s1\n" +
-            ".s1-'b'&&{q}?->:s5=>2\n" +
-            ".s1-'x'&&{(q||p)}?->.s2\n" +
-            ".s2-<EOT>&&{(q||p)}?->.s3\n" +
-            ".s3-{p}?->:s4=>1\n" +
-            ".s3-{q}?->:s5=>2\n";
-		checkDecision(g, 3, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testGatedPredNotActuallyUsedOnEdges() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar P;\n"+
-			"A : ('a' | {p}?=> 'a')\n" +
-			"  | 'a' 'b'\n" +
-			"  ;");
-		String expecting1 =
-			".s0-'a'->.s1\n" +
-			".s1-{!(p)}?->:s2=>1\n" +  	// Used to disambig subrule
-			".s1-{p}?->:s3=>2\n";
-		// rule A decision can't test p from s0->1 because 'a' is valid
-		// for alt1 *and* alt2 w/o p.  Can't test p from s1 to s3 because
-		// we might have passed the first alt of subrule.  The same state
-		// is listed in s2 in 2 different configurations: one with and one
-		// w/o p.  Can't test therefore.  p||true == true.
-		String expecting2 =
-			".s0-'a'->.s1\n" +
-			".s1-'b'->:s2=>2\n" +
-			".s1-<EOT>->:s3=>1\n";
-		checkDecision(g, 1, expecting1, null, null, null, null, null, 0, false);
-		checkDecision(g, 2, expecting2, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testGatedPredDoesNotForceAllToBeGated() throws Exception {
-		Grammar g = new Grammar(
-			"grammar w;\n" +
-			"a : b | c ;\n" +
-			"b : {p}? B ;\n" +
-			"c : {q}?=> d ;\n" +
-			"d : {r}? C ;\n");
-		String expecting =
-			".s0-B->:s1=>1\n" +
-			".s0-C&&{q}?->:s2=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testGatedPredDoesNotForceAllToBeGated2() throws Exception {
-		Grammar g = new Grammar(
-			"grammar w;\n" +
-			"a : b | c ;\n" +
-			"b : {p}? B ;\n" +
-			"c : {q}?=> d ;\n" +
-			"d : {r}?=> C\n" +
-			"  | B\n" +
-			"  ;\n");
-		String expecting =
-			".s0-B->.s1\n" +
-			".s0-C&&{(q&&r)}?->:s3=>2\n" +
-			".s1-{p}?->:s2=>1\n" +
-			".s1-{q}?->:s3=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	@Test public void testORGatedPred() throws Exception {
-		Grammar g = new Grammar(
-			"grammar w;\n" +
-			"a : b | c ;\n" +
-			"b : {p}? B ;\n" +
-			"c : {q}?=> d ;\n" +
-			"d : {r}?=> C\n" +
-			"  | {s}?=> B\n" +
-			"  ;\n");
-		String expecting =
-			".s0-B->.s1\n" +
-			".s0-C&&{(q&&r)}?->:s3=>2\n" +
-			".s1-{(q&&s)}?->:s3=>2\n" +
-			".s1-{p}?->:s2=>1\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	/** The following grammar should yield an error that rule 'a' has
-	 *  insufficient semantic info pulled from 'b'.
-	 */
-	@Test public void testIncompleteSemanticHoistedContext() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : b | B;\n" +
-			"b : {p1}? B | B ;");
-		String expecting =
-			".s0-B->:s1=>1\n";
-		checkDecision(g, 1, expecting, new int[] {2},
-					  new int[] {1,2}, "B", new int[] {1}, null, 3, false);
-	}
-
-	@Test public void testIncompleteSemanticHoistedContextk2() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : b | A B;\n" +
-			"b : {p1}? A B | A B ;");
-		String expecting =
-			".s0-A->.s1\n" +
-			".s1-B->:s2=>1\n";
-		checkDecision(g, 1, expecting, new int[] {2},
-					  new int[] {1,2}, "A B", new int[] {1}, null, 3, false);
-	}
-
-	@Test public void testIncompleteSemanticHoistedContextInFOLLOW() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"options {k=1;}\n" + // limit to k=1 because it's LL(2); force pred hoist
-			"a : A? ;\n" + // need FOLLOW
-			"b : X a {p1}? A | Y a A ;"); // only one A is covered
-		String expecting =
-			".s0-A->:s1=>1\n"; // s0-EOF->s2 branch pruned during optimization
-		checkDecision(g, 1, expecting, new int[] {2},
-					  new int[] {1,2}, "A", new int[] {2}, null, 3, false);
-	}
-
-	@Test public void testIncompleteSemanticHoistedContextInFOLLOWk2() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : (A B)? ;\n" + // need FOLLOW
-			"b : X a {p1}? A B | Y a A B | Z a ;"); // only first alt is covered
-		String expecting =
-			".s0-A->.s1\n" +
-			".s0-EOF->:s3=>2\n" +
-			".s1-B->:s2=>1\n";
-		checkDecision(g, 1, expecting, null,
-					  new int[] {1,2}, "A B", new int[] {2}, null, 2, false);
-	}
-
-	@Test public void testIncompleteSemanticHoistedContextInFOLLOWDueToHiddenPred() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : (A B)? ;\n" + // need FOLLOW
-			"b : X a {p1}? A B | Y a {a1} {p2}? A B | Z a ;"); // only first alt is covered
-		String expecting =
-			".s0-A->.s1\n" +
-			".s0-EOF->:s3=>2\n" +
-			".s1-B->:s2=>1\n";
-		checkDecision(g, 1, expecting, null,
-					  new int[] {1,2}, "A B", new int[] {2}, null, 2, true);
-	}
-
-	/** The following grammar should yield an error that rule 'a' has
-	 *  insufficient semantic info pulled from 'b'.  This is the same
-	 *  as the previous case except that the D prevents the B path from
-	 *  "pinching" together into a single NFA state.
-	 *
-	 *  This test also demonstrates that just because B D could predict
-	 *  alt 1 in rule 'a', it is unnecessary to continue NFA->DFA
-	 *  conversion to include an edge for D.  Alt 1 is the only possible
-	 *  prediction because we resolve the ambiguity by choosing alt 1.
-	 */
-	@Test public void testIncompleteSemanticHoistedContext2() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : b | B;\n" +
-			"b : {p1}? B | B D ;");
-		String expecting =
-			".s0-B->:s1=>1\n";
-		checkDecision(g, 1, expecting, new int[] {2},
-					  new int[] {1,2}, "B", new int[] {1},
-					  null, 3, false);
-	}
-
-	@Test public void testTooFewSemanticPredicates() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : {p1}? A | A | A ;");
-		String expecting =
-			".s0-A->:s1=>1\n";
-		checkDecision(g, 1, expecting, new int[] {2,3},
-					  new int[] {1,2,3}, "A",
-					  null, null, 2, false);
-	}
-
-	@Test public void testPredWithK1() throws Exception {
-		Grammar g = new Grammar(
-			"\tlexer grammar TLexer;\n" +
-			"A\n" +
-			"options {\n" +
-			"  k=1;\n" +
-			"}\n" +
-			"  : {p1}? ('x')+ '.'\n" +
-			"  | {p2}? ('x')+ '.'\n" +
-			"  ;\n");
-		String expecting =
-			".s0-'x'->.s1\n" +
-			".s1-{p1}?->:s2=>1\n" +
-			".s1-{p2}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] insufficientPredAlts = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 3, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, insufficientPredAlts,
-					  danglingAlts, numWarnings, false);
-	}
-
-	@Test public void testPredWithArbitraryLookahead() throws Exception {
-		Grammar g = new Grammar(
-			"\tlexer grammar TLexer;\n" +
-			"A : {p1}? ('x')+ '.'\n" +
-			"  | {p2}? ('x')+ '.'\n" +
-			"  ;\n");
-		String expecting =
-			".s0-'x'->.s1\n" +
-			".s1-'.'->.s2\n" +
-			".s1-'x'->.s1\n" +
-			".s2-{p1}?->:s3=>1\n" +
-			".s2-{p2}?->:s4=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] insufficientPredAlts = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 3, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, insufficientPredAlts,
-					  danglingAlts, numWarnings, false);
-	}
-
-	@Test
-    /** For a DFA state with lots of configurations that have the same
-	 *  predicate, don't just OR them all together as it's a waste to
-	 *  test a||a||b||a||a etc...  ANTLR makes a unique set and THEN
-	 *  OR's them together.
-	 */
-    public void testUniquePredicateOR() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar v;\n" +
-			"\n" +
-			"a : {a}? b\n" +
-			"  | {b}? b\n" +
-			"  ;\n" +
-			"\n" +
-			"b : {c}? (X)+ ;\n" +
-			"\n" +
-			"c : a\n" +
-			"  | b\n" +
-			"  ;\n");
-		String expecting =
-			".s0-X->.s1\n" +
-            ".s1-{((a&&c)||(b&&c))}?->:s2=>1\n" +
-            ".s1-{c}?->:s3=>2\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = null;
-		String ambigInput = null;
-		int[] insufficientPredAlts = null;
-		int[] danglingAlts = null;
-		int numWarnings = 0;
-		checkDecision(g, 3, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, insufficientPredAlts,
-					  danglingAlts, numWarnings, false);
-	}
-
-    @Test
-    public void testSemanticContextPreventsEarlyTerminationOfClosure() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar T;\n" +
-			"a : loop SEMI | ID SEMI\n" +
-			"  ;\n" +
-			"loop\n" +
-			"    : {while}? ID\n" +
-			"    | {do}? ID\n" +
-			"    | {for}? ID\n" +
-			"    ;");
-		String expecting =
-			".s0-ID->.s1\n" +
-            ".s1-SEMI->.s2\n" +
-            ".s2-{(for||do||while)}?->:s3=>1\n" +
-            ".s2-{true}?->:s4=>2\n";
-		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
-	}
-
-	// S U P P O R T
-
-	public void _template() throws Exception {
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : A | B;");
-		String expecting =
-			"\n";
-		int[] unreachableAlts = null;
-		int[] nonDetAlts = new int[] {1,2};
-		String ambigInput = "L ID R";
-		int[] insufficientPredAlts = new int[] {1};
-		int[] danglingAlts = null;
-		int numWarnings = 1;
-		checkDecision(g, 1, expecting, unreachableAlts,
-					  nonDetAlts, ambigInput, insufficientPredAlts,
-					  danglingAlts, numWarnings, false);
-	}
-
-	protected void checkDecision(Grammar g,
-								 int decision,
-								 String expecting,
-								 int[] expectingUnreachableAlts,
-								 int[] expectingNonDetAlts,
-								 String expectingAmbigInput,
-								 int[] expectingInsufficientPredAlts,
-								 int[] expectingDanglingAlts,
-								 int expectingNumWarnings,
-								 boolean hasPredHiddenByAction)
-		throws Exception
-	{
-		DecisionProbe.verbose=true; // make sure we get all error info
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
-		g.setCodeGenerator(generator);
-		// mimic actions of org.antlr.Tool first time for grammar g
-		if ( g.getNumberOfDecisions()==0 ) {
-			g.buildNFA();
-			g.createLookaheadDFAs(false);
-		}
-
-		if ( equeue.size()!=expectingNumWarnings ) {
-			System.err.println("Warnings issued: "+equeue);
-		}
-
-		assertEquals("unexpected number of expected problems",
-				   expectingNumWarnings, equeue.size());
-
-		DFA dfa = g.getLookaheadDFA(decision);
-		FASerializer serializer = new FASerializer(g);
-		String result = serializer.serialize(dfa.startState);
-		//System.out.print(result);
-		List unreachableAlts = dfa.getUnreachableAlts();
-
-		// make sure unreachable alts are as expected
-		if ( expectingUnreachableAlts!=null ) {
-			BitSet s = new BitSet();
-			s.addAll(expectingUnreachableAlts);
-			BitSet s2 = new BitSet();
-			s2.addAll(unreachableAlts);
-			assertEquals("unreachable alts mismatch", s, s2);
-		}
-		else {
-			assertEquals("unreachable alts mismatch", 0,
-						 unreachableAlts!=null?unreachableAlts.size():0);
-		}
-
-		// check conflicting input
-		if ( expectingAmbigInput!=null ) {
-			// first, find nondet message
-			Message msg = getNonDeterminismMessage(equeue.warnings);
-			assertNotNull("no nondeterminism warning?", msg);
-			assertTrue("expecting nondeterminism; found "+msg.getClass().getName(),
-			msg instanceof GrammarNonDeterminismMessage);
-			GrammarNonDeterminismMessage nondetMsg =
-				getNonDeterminismMessage(equeue.warnings);
-			List labels =
-				nondetMsg.probe.getSampleNonDeterministicInputSequence(nondetMsg.problemState);
-			String input = nondetMsg.probe.getInputSequenceDisplay(labels);
-			assertEquals(expectingAmbigInput, input);
-		}
-
-		// check nondet alts
-		if ( expectingNonDetAlts!=null ) {
-			GrammarNonDeterminismMessage nondetMsg =
-				getNonDeterminismMessage(equeue.warnings);
-			assertNotNull("found no nondet alts; expecting: "+
-										str(expectingNonDetAlts), nondetMsg);
-			List nonDetAlts =
-				nondetMsg.probe.getNonDeterministicAltsForState(nondetMsg.problemState);
-			// compare nonDetAlts with expectingNonDetAlts
-			BitSet s = new BitSet();
-			s.addAll(expectingNonDetAlts);
-			BitSet s2 = new BitSet();
-			s2.addAll(nonDetAlts);
-			assertEquals("nondet alts mismatch", s, s2);
-			assertEquals("mismatch between expected hasPredHiddenByAction", hasPredHiddenByAction,
-						 nondetMsg.problemState.dfa.hasPredicateBlockedByAction);
-		}
-		else {
-			// not expecting any nondet alts, make sure there are none
-			GrammarNonDeterminismMessage nondetMsg =
-				getNonDeterminismMessage(equeue.warnings);
-			assertNull("found nondet alts, but expecting none", nondetMsg);
-		}
-
-		if ( expectingInsufficientPredAlts!=null ) {
-			GrammarInsufficientPredicatesMessage insuffPredMsg =
-				getGrammarInsufficientPredicatesMessage(equeue.warnings);
-			assertNotNull("found no GrammarInsufficientPredicatesMessage alts; expecting: "+
-										str(expectingNonDetAlts), insuffPredMsg);
-			Map<Integer, Set<Token>> locations = insuffPredMsg.altToLocations;
-			Set actualAlts = locations.keySet();
-			BitSet s = new BitSet();
-			s.addAll(expectingInsufficientPredAlts);
-			BitSet s2 = new BitSet();
-			s2.addAll(actualAlts);
-			assertEquals("mismatch between insufficiently covered alts", s, s2);
-			assertEquals("mismatch between expected hasPredHiddenByAction", hasPredHiddenByAction,
-						 insuffPredMsg.problemState.dfa.hasPredicateBlockedByAction);
-		}
-		else {
-			// not expecting any nondet alts, make sure there are none
-			GrammarInsufficientPredicatesMessage nondetMsg =
-				getGrammarInsufficientPredicatesMessage(equeue.warnings);
-			if ( nondetMsg!=null ) {
-				System.out.println(equeue.warnings);
-			}
-			assertNull("found insufficiently covered alts, but expecting none", nondetMsg);
-		}
-
-		assertEquals(expecting, result);
-	}
-
-	protected GrammarNonDeterminismMessage getNonDeterminismMessage(List warnings) {
-		for (int i = 0; i < warnings.size(); i++) {
-			Message m = (Message) warnings.get(i);
-			if ( m instanceof GrammarNonDeterminismMessage ) {
-				return (GrammarNonDeterminismMessage)m;
-			}
-		}
-		return null;
-	}
-
-	protected GrammarInsufficientPredicatesMessage getGrammarInsufficientPredicatesMessage(List warnings) {
-		for (int i = 0; i < warnings.size(); i++) {
-			Message m = (Message) warnings.get(i);
-			if ( m instanceof GrammarInsufficientPredicatesMessage ) {
-				return (GrammarInsufficientPredicatesMessage)m;
-			}
-		}
-		return null;
-	}
-
-	protected String str(int[] elements) {
-		StringBuffer buf = new StringBuffer();
-		for (int i = 0; i < elements.length; i++) {
-			if ( i>0 ) {
-				buf.append(", ");
-			}
-			int element = elements[i];
-			buf.append(element);
-		}
-		return buf.toString();
-	}
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSets.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestSets.java
deleted file mode 100644
index 6f21bd3..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSets.java
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.junit.Test;
-
-/** Test the set stuff in lexer and parser */
-public class TestSets extends BaseTest {
-	protected boolean debug = false;
-
-	/** Public default constructor used by TestRig */
-	public TestSets() {
-	}
-
-	@Test public void testSeqDoesNotBecomeSet() throws Exception {
-		// this must return A not I to the parser; calling a nonfragment rule
-		// from a nonfragment rule does not set the overall token.
-		String grammar =
-			"grammar P;\n" +
-			"a : C {System.out.println(input);} ;\n" +
-			"fragment A : '1' | '2';\n" +
-			"fragment B : '3' '4';\n" +
-			"C : A | B;\n";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-								  "a", "34", debug);
-		assertEquals("34\n", found);
-	}
-
-	@Test public void testParserSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : t=('x'|'y') {System.out.println($t.text);} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "x", debug);
-		assertEquals("x\n", found);
-	}
-
-	@Test public void testParserNotSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : t=~('x'|'y') 'z' {System.out.println($t.text);} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "zz", debug);
-		assertEquals("z\n", found);
-	}
-
-	@Test public void testParserNotToken() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : ~'x' 'z' {System.out.println(input);} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "zz", debug);
-		assertEquals("zz\n", found);
-	}
-
-	@Test public void testParserNotTokenWithLabel() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : t=~'x' 'z' {System.out.println($t.text);} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "zz", debug);
-		assertEquals("z\n", found);
-	}
-
-	@Test public void testRuleAsSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a @after {System.out.println(input);} : 'a' | 'b' |'c' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "b", debug);
-		assertEquals("b\n", found);
-	}
-
-	@Test public void testRuleAsSetAST() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'a' | 'b' |'c' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "b", debug);
-		assertEquals("b\n", found);
-	}
-
-	@Test public void testNotChar() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println($A.text);} ;\n" +
-			"A : ~'b' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "x", debug);
-		assertEquals("x\n", found);
-	}
-
-	@Test public void testOptionalSingleElement() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A? 'c' {System.out.println(input);} ;\n" +
-			"A : 'b' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "bc", debug);
-		assertEquals("bc\n", found);
-	}
-
-	@Test public void testOptionalLexerSingleElement() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println(input);} ;\n" +
-			"A : 'b'? 'c' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "bc", debug);
-		assertEquals("bc\n", found);
-	}
-
-	@Test public void testStarLexerSingleElement() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println(input);} ;\n" +
-			"A : 'b'* 'c' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "bbbbc", debug);
-		assertEquals("bbbbc\n", found);
-		found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "c", debug);
-		assertEquals("c\n", found);
-	}
-
-	@Test public void testPlusLexerSingleElement() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println(input);} ;\n" +
-			"A : 'b'+ 'c' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "bbbbc", debug);
-		assertEquals("bbbbc\n", found);
-	}
-
-	@Test public void testOptionalSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : ('a'|'b')? 'c' {System.out.println(input);} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "ac", debug);
-		assertEquals("ac\n", found);
-	}
-
-	@Test public void testStarSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : ('a'|'b')* 'c' {System.out.println(input);} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abaac", debug);
-		assertEquals("abaac\n", found);
-	}
-
-	@Test public void testPlusSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : ('a'|'b')+ 'c' {System.out.println(input);} ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abaac", debug);
-		assertEquals("abaac\n", found);
-	}
-
-	@Test public void testLexerOptionalSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println(input);} ;\n" +
-			"A : ('a'|'b')? 'c' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "ac", debug);
-		assertEquals("ac\n", found);
-	}
-
-	@Test public void testLexerStarSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println(input);} ;\n" +
-			"A : ('a'|'b')* 'c' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abaac", debug);
-		assertEquals("abaac\n", found);
-	}
-
-	@Test public void testLexerPlusSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println(input);} ;\n" +
-			"A : ('a'|'b')+ 'c' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "abaac", debug);
-		assertEquals("abaac\n", found);
-	}
-
-	@Test public void testNotCharSet() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println($A.text);} ;\n" +
-			"A : ~('b'|'c') ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "x", debug);
-		assertEquals("x\n", found);
-	}
-
-	@Test public void testNotCharSetWithLabel() throws Exception {
-		// This doesn't work in lexer yet.
-		// Generates: h=input.LA(1); but h is defined as a Token
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println($A.text);} ;\n" +
-			"A : h=~('b'|'c') ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "x", debug);
-		assertEquals("x\n", found);
-	}
-
-	@Test public void testNotCharSetWithRuleRef() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println($A.text);} ;\n" +
-			"A : ~('a'|B) ;\n" +
-			"B : 'b' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "x", debug);
-		assertEquals("x\n", found);
-	}
-
-	@Test public void testNotCharSetWithRuleRef2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println($A.text);} ;\n" +
-			"A : ~('a'|B) ;\n" +
-			"B : 'b'|'c' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "x", debug);
-		assertEquals("x\n", found);
-	}
-
-	@Test public void testNotCharSetWithRuleRef3() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println($A.text);} ;\n" +
-			"A : ('a'|B) ;\n" +
-			"fragment\n" +
-			"B : ~('a'|'c') ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "x", debug);
-		assertEquals("x\n", found);
-	}
-
-	@Test public void testNotCharSetWithRuleRef4() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : A {System.out.println($A.text);} ;\n" +
-			"A : ('a'|B) ;\n" +
-			"fragment\n" +
-			"B : ~('a'|C) ;\n" +
-			"fragment\n" +
-			"C : 'c'|'d' ;\n ";
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-								  "a", "x", debug);
-		assertEquals("x\n", found);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSymbolDefinitions.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestSymbolDefinitions.java
deleted file mode 100644
index dcf87b9..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSymbolDefinitions.java
+++ /dev/null
@@ -1,913 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.Tool;
-import org.antlr.analysis.Label;
-import org.antlr.codegen.CodeGenerator;
-import org.stringtemplate.v4.ST;
-import org.antlr.tool.*;
-import org.junit.Test;
-
-import java.util.*;
-
-public class TestSymbolDefinitions extends BaseTest {
-
-	/** Public default constructor used by TestRig */
-	public TestSymbolDefinitions() {
-	}
-
-	@Test public void testParserSimpleTokens() throws Exception {
-		Grammar g = new Grammar(
-				"parser grammar t;\n"+
-				"a : A | B;\n" +
-				"b : C ;");
-		String rules = "a, b";
-		String tokenNames = "A, B, C";
-		checkSymbols(g, rules, tokenNames);
-	}
-
-	@Test public void testParserTokensSection() throws Exception {
-		Grammar g = new Grammar(
-				"parser grammar t;\n" +
-				"tokens {\n" +
-				"  C;\n" +
-				"  D;" +
-				"}\n"+
-				"a : A | B;\n" +
-				"b : C ;");
-		String rules = "a, b";
-		String tokenNames = "A, B, C, D";
-		checkSymbols(g, rules, tokenNames);
-	}
-
-	@Test public void testLexerTokensSection() throws Exception {
-		Grammar g = new Grammar(
-				"lexer grammar t;\n" +
-				"tokens {\n" +
-				"  C;\n" +
-				"  D;" +
-				"}\n"+
-				"A : 'a';\n" +
-				"C : 'c' ;");
-		String rules = "A, C, Tokens";
-		String tokenNames = "A, C, D";
-		checkSymbols(g, rules, tokenNames);
-	}
-
-	@Test public void testTokensSectionWithAssignmentSection() throws Exception {
-		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"tokens {\n" +
-				"  C='c';\n" +
-				"  D;" +
-				"}\n"+
-				"a : A | B;\n" +
-				"b : C ;");
-		String rules = "a, b";
-		String tokenNames = "A, B, C, D, 'c'";
-		checkSymbols(g, rules, tokenNames);
-	}
-
-	@Test public void testCombinedGrammarLiterals() throws Exception {
-		Grammar g = new Grammar(
-				"grammar t;\n"+
-				"a : 'begin' b 'end';\n" +
-				"b : C ';' ;\n" +
-				"ID : 'a' ;\n" +
-				"FOO : 'foo' ;\n" +  // "foo" is not a token name
-				"C : 'c' ;\n");        // nor is 'c'
-		String rules = "a, b";
-		String tokenNames = "C, FOO, ID, 'begin', 'end', ';'";
-		checkSymbols(g, rules, tokenNames);
-	}
-
-	@Test public void testLiteralInParserAndLexer() throws Exception {
-		// 'x' is token and char in lexer rule
-		Grammar g = new Grammar(
-				"grammar t;\n" +
-				"a : 'x' E ; \n" +
-				"E: 'x' '0' ;\n");        // nor is 'c'
-		String literals = "['x']";
-		String foundLiterals = g.getStringLiterals().toString();
-		assertEquals(literals, foundLiterals);
-
-		String implicitLexer =
-			"lexer grammar t;" + newline +
-			"T__5 : 'x' ;" + newline +
-			"" + newline +
-			"// $ANTLR src \"<string>\" 3" + newline +
-			"E: 'x' '0' ;";
-		assertEquals(implicitLexer, g.getLexerGrammar());
-	}
-
-	@Test public void testCombinedGrammarWithRefToLiteralButNoTokenIDRef() throws Exception {
-		Grammar g = new Grammar(
-				"grammar t;\n"+
-				"a : 'a' ;\n" +
-				"A : 'a' ;\n");
-		String rules = "a";
-		String tokenNames = "A, 'a'";
-		checkSymbols(g, rules, tokenNames);
-	}
-
-	@Test public void testSetDoesNotMissTokenAliases() throws Exception {
-		Grammar g = new Grammar(
-				"grammar t;\n"+
-				"a : 'a'|'b' ;\n" +
-				"A : 'a' ;\n" +
-				"B : 'b' ;\n");
-		String rules = "a";
-		String tokenNames = "A, 'a', B, 'b'";
-		checkSymbols(g, rules, tokenNames);
-	}
-
-	@Test public void testSimplePlusEqualLabel() throws Exception {
-		Grammar g = new Grammar(
-				"parser grammar t;\n"+
-				"a : ids+=ID ( COMMA ids+=ID )* ;\n");
-		String rule = "a";
-		String tokenLabels = "ids";
-		String ruleLabels = null;
-		checkPlusEqualsLabels(g, rule, tokenLabels, ruleLabels);
-	}
-
-	@Test public void testMixedPlusEqualLabel() throws Exception {
-		Grammar g = new Grammar(
-				"grammar t;\n"+
-				"options {output=AST;}\n" +
-				"a : id+=ID ( ',' e+=expr )* ;\n" +
-				"expr : 'e';\n" +
-				"ID : 'a';\n");
-		String rule = "a";
-		String tokenLabels = "id";
-		String ruleLabels = "e";
-		checkPlusEqualsLabels(g, rule, tokenLabels, ruleLabels);
-	}
-
-	// T E S T  L I T E R A L  E S C A P E S
-
-	@Test public void testParserCharLiteralWithEscape() throws Exception {
-		Grammar g = new Grammar(
-				"grammar t;\n"+
-				"a : '\\n';\n");
-		Set literals = g.getStringLiterals();
-		// must store literals how they appear in the antlr grammar
-		assertEquals("'\\n'", literals.toArray()[0]);
-	}
-
-	@Test public void testTokenInTokensSectionAndTokenRuleDef() throws Exception {
-		// this must return A not I to the parser; calling a nonfragment rule
-		// from a nonfragment rule does not set the overall token.
-		String grammar =
-			"grammar P;\n" +
-			"tokens { B='}'; }\n"+
-			"a : A B {System.out.println(input);} ;\n"+
-			"A : 'a' ;\n" +
-			"B : '}' ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-								  "a", "a}", false);
-		assertEquals("a}\n", found);
-	}
-
-	@Test public void testTokenInTokensSectionAndTokenRuleDef2() throws Exception {
-		// this must return A not I to the parser; calling a nonfragment rule
-		// from a nonfragment rule does not set the overall token.
-		String grammar =
-			"grammar P;\n" +
-			"tokens { B='}'; }\n"+
-			"a : A '}' {System.out.println(input);} ;\n"+
-			"A : 'a' ;\n" +
-			"B : '}' {/* */} ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
-		String found = execParser("P.g", grammar, "PParser", "PLexer",
-								  "a", "a}", false);
-		assertEquals("a}\n", found);
-	}
-
-
-	@Test public void testRefToRuleWithNoReturnValue() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-
-		String grammarStr =
-			"grammar P;\n" +
-			"a : x=b ;\n" +
-			"b : B ;\n" +
-			"B : 'b' ;\n";
-		Grammar g = new Grammar(grammarStr);
-
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		ST recogST = generator.genRecognizer();
-		String code = recogST.render();
-		assertTrue("not expecting label", code.indexOf("x=b();")<0);
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	// T E S T  E R R O R S
-
-	@Test public void testParserStringLiterals() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-				"parser grammar t;\n"+
-				"a : 'begin' b ;\n" +
-				"b : C ;");
-		Object expectedArg = "'begin'";
-		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testParserCharLiterals() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-				"parser grammar t;\n"+
-				"a : '(' b ;\n" +
-				"b : C ;");
-		Object expectedArg = "'('";
-		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testEmptyNotChar() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-				"grammar foo;\n" +
-				"a : (~'x')+ ;\n");
-		g.buildNFA();
-		Object expectedArg = "'x'";
-		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testEmptyNotToken() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-				"grammar foo;\n" +
-				"a : (~A)+ ;\n");
-		g.buildNFA();
-		Object expectedArg = "A";
-		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testEmptyNotSet() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-				"grammar foo;\n" +
-				"a : (~(A|B))+ ;\n");
-		g.buildNFA();
-		Object expectedArg = null;
-		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testStringLiteralInParserTokensSection() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"parser grammar t;\n" +
-				"tokens {\n" +
-				"  B='begin';\n" +
-				"}\n"+
-				"a : A B;\n" +
-				"b : C ;");
-		Object expectedArg = "'begin'";
-		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testCharLiteralInParserTokensSection() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"parser grammar t;\n" +
-				"tokens {\n" +
-				"  B='(';\n" +
-				"}\n"+
-				"a : A B;\n" +
-				"b : C ;");
-		Object expectedArg = "'('";
-		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testCharLiteralInLexerTokensSection() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"lexer grammar t;\n" +
-				"tokens {\n" +
-				"  B='(';\n" +
-				"}\n"+
-				"ID : 'a';\n");
-		Object expectedArg = "'('";
-		int expectedMsgID = ErrorManager.MSG_CANNOT_ALIAS_TOKENS_IN_LEXER;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testRuleRedefinition() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"parser grammar t;\n"+
-				"a : A | B;\n" +
-				"a : C ;");
-
-		Object expectedArg = "a";
-		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testLexerRuleRedefinition() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"lexer grammar t;\n"+
-				"ID : 'a' ;\n" +
-				"ID : 'd' ;");
-
-		Object expectedArg = "ID";
-		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testCombinedRuleRedefinition() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"grammar t;\n"+
-				"x : ID ;\n" +
-				"ID : 'a' ;\n" +
-				"x : ID ID ;");
-
-		Object expectedArg = "x";
-		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testUndefinedToken() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"grammar t;\n"+
-				"x : ID ;");
-
-		Object expectedArg = "ID";
-		int expectedMsgID = ErrorManager.MSG_NO_TOKEN_DEFINITION;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsWarning(equeue, expectedMessage);
-	}
-
-	@Test public void testUndefinedTokenOkInParser() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"parser grammar t;\n"+
-				"x : ID ;");
-		assertEquals("should not be an error", 0, equeue.errors.size());
-	}
-
-	@Test public void testUndefinedRule() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"grammar t;\n"+
-				"x : r ;");
-
-		Object expectedArg = "r";
-		int expectedMsgID = ErrorManager.MSG_UNDEFINED_RULE_REF;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testLexerRuleInParser() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"parser grammar t;\n"+
-				"X : ;");
-
-		Object expectedArg = "X";
-		int expectedMsgID = ErrorManager.MSG_LEXER_RULES_NOT_ALLOWED;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testParserRuleInLexer() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"lexer grammar t;\n"+
-				"a : ;");
-
-		Object expectedArg = "a";
-		int expectedMsgID = ErrorManager.MSG_PARSER_RULES_NOT_ALLOWED;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testRuleScopeConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-			"scope a {\n" +
-			"  int n;\n" +
-			"}\n" +
-			"a : \n" +
-			"  ;\n");
-
-		Object expectedArg = "a";
-		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testTokenRuleScopeConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-			"scope ID {\n" +
-			"  int n;\n" +
-			"}\n" +
-			"ID : 'a'\n" +
-			"  ;\n");
-
-		Object expectedArg = "ID";
-		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testTokenScopeConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"grammar t;\n"+
-			"tokens { ID; }\n"+
-			"scope ID {\n" +
-			"  int n;\n" +
-			"}\n" +
-			"a : \n" +
-			"  ;\n");
-
-		Object expectedArg = "ID";
-		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testTokenRuleScopeConflictInLexerGrammar() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"scope ID {\n" +
-			"  int n;\n" +
-			"}\n" +
-			"ID : 'a'\n" +
-			"  ;\n");
-
-		Object expectedArg = "ID";
-		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testTokenLabelScopeConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"scope s {\n" +
-			"  int n;\n" +
-			"}\n" +
-			"a : s=ID \n" +
-			"  ;\n");
-
-		Object expectedArg = "s";
-		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testRuleLabelScopeConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"scope s {\n" +
-			"  int n;\n" +
-			"}\n" +
-			"a : s=b \n" +
-			"  ;\n" +
-			"b : ;\n");
-
-		Object expectedArg = "s";
-		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testLabelAndRuleNameConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : c=b \n" +
-			"  ;\n" +
-			"b : ;\n" +
-			"c : ;\n");
-
-		Object expectedArg = "c";
-		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testLabelAndTokenNameConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a : ID=b \n" +
-			"  ;\n" +
-			"b : ID ;\n" +
-			"c : ;\n");
-
-		Object expectedArg = "ID";
-		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_TOKEN;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testLabelAndArgConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a[int i] returns [int x]: i=ID \n" +
-			"  ;\n");
-
-		Object expectedArg = "i";
-		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testLabelAndParameterConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a[int i] returns [int x]: x=ID \n" +
-			"  ;\n");
-
-		Object expectedArg = "x";
-		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testLabelRuleScopeConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a\n" +
-			"scope {" +
-			"  int n;" +
-			"}\n" +
-			"  : n=ID\n" +
-			"  ;\n");
-
-		Object expectedArg = "n";
-		Object expectedArg2 = "a";
-		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testRuleScopeArgConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a[int n]\n" +
-			"scope {" +
-			"  int n;" +
-			"}\n" +
-			"  : \n" +
-			"  ;\n");
-
-		Object expectedArg = "n";
-		Object expectedArg2 = "a";
-		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testRuleScopeReturnValueConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a returns [int n]\n" +
-			"scope {" +
-			"  int n;" +
-			"}\n" +
-			"  : \n" +
-			"  ;\n");
-
-		Object expectedArg = "n";
-		Object expectedArg2 = "a";
-		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testRuleScopeRuleNameConflict() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-			"parser grammar t;\n"+
-			"a\n" +
-			"scope {" +
-			"  int a;" +
-			"}\n" +
-			"  : \n" +
-			"  ;\n");
-
-		Object expectedArg = "a";
-		Object expectedArg2 = null;
-		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testBadGrammarOption() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Tool antlr = newTool();
-		Grammar g = new Grammar(antlr,
-								"grammar t;\n"+
-								"options {foo=3; language=Java;}\n" +
-								"a : 'a';\n");
-
-		Object expectedArg = "foo";
-		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testBadRuleOption() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"grammar t;\n"+
-				"a\n"+
-				"options {k=3; tokenVocab=blort;}\n" +
-				"  : 'a';\n");
-
-		Object expectedArg = "tokenVocab";
-		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testBadSubRuleOption() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue); // unique listener per thread
-		Grammar g = new Grammar(
-				"grammar t;\n"+
-				"a : ( options {k=3; language=Java;}\n" +
-				"    : 'a'\n" +
-				"    | 'b'\n" +
-				"    )\n" +
-				"  ;\n");
-		Object expectedArg = "language";
-		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkGrammarSemanticsError(equeue, expectedMessage);
-	}
-
-	@Test public void testTokenVocabStringUsedInLexer() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String tokens =
-			"';'=4\n";
-        mkdir(tmpdir);
-        writeFile(tmpdir, "T.tokens", tokens);
-
-		String importer =
-			"lexer grammar B; \n" +
-			"options\t{tokenVocab=T;} \n" +
-			"SEMI:';' ; \n" ;
-		writeFile(tmpdir, "B.g", importer);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/B.g",composite);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-
-		String expectedTokenIDToTypeMap = "[SEMI=4]";
-		String expectedStringLiteralToTypeMap = "{';'=4}";
-		String expectedTypeToTokenList = "[SEMI]";
-
-		assertEquals(expectedTokenIDToTypeMap,
-					 realElements(g.composite.tokenIDToTypeMap).toString());
-		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
-		assertEquals(expectedTypeToTokenList,
-					 realElements(g.composite.typeToTokenList).toString());
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	@Test public void testTokenVocabStringUsedInCombined() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		String tokens =
-			"';'=4\n";
-        mkdir(tmpdir);
-		writeFile(tmpdir, "T.tokens", tokens);
-
-		String importer =
-			"grammar B; \n" +
-			"options\t{tokenVocab=T;} \n" +
-			"SEMI:';' ; \n" ;
-		writeFile(tmpdir, "B.g", importer);
-		Tool antlr = newTool(new String[] {"-lib", tmpdir});
-		CompositeGrammar composite = new CompositeGrammar();
-		Grammar g = new Grammar(antlr,tmpdir+"/B.g",composite);
-		g.parseAndBuildAST();
-		g.composite.assignTokenTypes();
-
-		String expectedTokenIDToTypeMap = "[SEMI=4]";
-		String expectedStringLiteralToTypeMap = "{';'=4}";
-		String expectedTypeToTokenList = "[SEMI]";
-
-		assertEquals(expectedTokenIDToTypeMap,
-					 realElements(g.composite.tokenIDToTypeMap).toString());
-		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
-		assertEquals(expectedTypeToTokenList,
-					 realElements(g.composite.typeToTokenList).toString());
-
-		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
-	}
-
-	protected void checkPlusEqualsLabels(Grammar g,
-										 String ruleName,
-										 String tokenLabelsStr,
-										 String ruleLabelsStr)
-		throws Exception
-	{
-		// make sure expected += labels are there
-		Rule r = g.getRule(ruleName);
-		StringTokenizer st = new StringTokenizer(tokenLabelsStr, ", ");
-		Set tokenLabels = null;
-		while ( st.hasMoreTokens() ) {
-			if ( tokenLabels==null ) {
-				tokenLabels = new HashSet();
-			}
-			String labelName = st.nextToken();
-			tokenLabels.add(labelName);
-		}
-		Set ruleLabels = null;
-		if ( ruleLabelsStr!=null ) {
-			st = new StringTokenizer(ruleLabelsStr, ", ");
-			ruleLabels = new HashSet();
-			while ( st.hasMoreTokens() ) {
-				String labelName = st.nextToken();
-				ruleLabels.add(labelName);
-			}
-		}
-		assertTrue("token += labels mismatch; "+tokenLabels+"!="+r.tokenListLabels,
-				   (tokenLabels!=null && r.tokenListLabels!=null) ||
-				   (tokenLabels==null && r.tokenListLabels==null));
-		assertTrue("rule += labels mismatch; "+ruleLabels+"!="+r.ruleListLabels,
-				   (ruleLabels!=null && r.ruleListLabels!=null) ||
-				   (ruleLabels==null && r.ruleListLabels==null));
-		if ( tokenLabels!=null ) {
-			assertEquals(tokenLabels, r.tokenListLabels.keySet());
-		}
-		if ( ruleLabels!=null ) {
-			assertEquals(ruleLabels, r.ruleListLabels.keySet());
-		}
-	}
-
-	protected void checkSymbols(Grammar g,
-								String rulesStr,
-								String tokensStr)
-		throws Exception
-	{
-		Set tokens = g.getTokenDisplayNames();
-
-		// make sure expected tokens are there
-		StringTokenizer st = new StringTokenizer(tokensStr, ", ");
-		while ( st.hasMoreTokens() ) {
-			String tokenName = st.nextToken();
-			assertTrue("token "+tokenName+" expected",
-					   g.getTokenType(tokenName)!=Label.INVALID);
-			tokens.remove(tokenName);
-		}
-		// make sure there are not any others (other than <EOF> etc...)
-		for (Iterator iter = tokens.iterator(); iter.hasNext();) {
-			String tokenName = (String) iter.next();
-			assertTrue("unexpected token name "+tokenName,
-					   g.getTokenType(tokenName)<Label.MIN_TOKEN_TYPE);
-		}
-
-		// make sure all expected rules are there
-		st = new StringTokenizer(rulesStr, ", ");
-		int n = 0;
-		while ( st.hasMoreTokens() ) {
-			String ruleName = st.nextToken();
-			assertNotNull("rule "+ruleName+" expected", g.getRule(ruleName));
-			n++;
-		}
-		Collection rules = g.getRules();
-		//System.out.println("rules="+rules);
-		// make sure there are no extra rules
-		assertEquals("number of rules mismatch; expecting "+n+"; found "+rules.size(), n, rules.size());
-
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSyntacticPredicateEvaluation.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestSyntacticPredicateEvaluation.java
deleted file mode 100644
index 9080726..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSyntacticPredicateEvaluation.java
+++ /dev/null
@@ -1,420 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.junit.Test;
-
-public class TestSyntacticPredicateEvaluation extends BaseTest {
-	@Test public void testTwoPredsWithNakedAlt() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"s : (a ';')+ ;\n" +
-			"a\n" +
-			"options {\n" +
-			"  k=1;\n" +
-			"}\n" +
-			"  : (b '.')=> b '.' {System.out.println(\"alt 1\");}\n" +
-			"  | (b)=> b {System.out.println(\"alt 2\");}\n" +
-			"  | c       {System.out.println(\"alt 3\");}\n" +
-			"  ;\n" +
-			"b\n" +
-			"@init {System.out.println(\"enter b\");}\n" +
-			"   : '(' 'x' ')' ;\n" +
-			"c\n" +
-			"@init {System.out.println(\"enter c\");}\n" +
-			"   : '(' c ')' | 'x' ;\n" +
-			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
-			"   ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "(x) ;", false);
-		String expecting =
-			"enter b\n" +
-			"enter b\n" +
-			"enter b\n" +
-			"alt 2\n";
-		assertEquals(expecting, found);
-
-		found = execParser("T.g", grammar, "TParser", "TLexer",
-			    "a", "(x). ;", false);
-		expecting =
-			"enter b\n" +
-			"enter b\n" +
-			"alt 1\n";
-		assertEquals(expecting, found);
-
-		found = execParser("T.g", grammar, "TParser", "TLexer",
-			    "a", "((x)) ;", false);
-		expecting =
-			"enter b\n" +
-			"enter b\n" +
-			"enter c\n" +
-			"enter c\n" +
-			"enter c\n" +
-			"alt 3\n";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testTwoPredsWithNakedAltNotLast() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"s : (a ';')+ ;\n" +
-			"a\n" +
-			"options {\n" +
-			"  k=1;\n" +
-			"}\n" +
-			"  : (b '.')=> b '.' {System.out.println(\"alt 1\");}\n" +
-			"  | c       {System.out.println(\"alt 2\");}\n" +
-			"  | (b)=> b {System.out.println(\"alt 3\");}\n" +
-			"  ;\n" +
-			"b\n" +
-			"@init {System.out.println(\"enter b\");}\n" +
-			"   : '(' 'x' ')' ;\n" +
-			"c\n" +
-			"@init {System.out.println(\"enter c\");}\n" +
-			"   : '(' c ')' | 'x' ;\n" +
-			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
-			"   ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "(x) ;", false);
-		String expecting =
-			"enter b\n" +
-			"enter c\n" +
-			"enter c\n" +
-			"alt 2\n";
-		assertEquals(expecting, found);
-
-		found = execParser("T.g", grammar, "TParser", "TLexer",
-			    "a", "(x). ;", false);
-		expecting =
-			"enter b\n" +
-			"enter b\n" +
-			"alt 1\n";
-		assertEquals(expecting, found);
-
-		found = execParser("T.g", grammar, "TParser", "TLexer",
-			    "a", "((x)) ;", false);
-		expecting =
-			"enter b\n" +
-			"enter c\n" +
-			"enter c\n" +
-			"enter c\n" +
-			"alt 2\n";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testLexerPred() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"s : A ;\n" +
-			"A options {k=1;}\n" + // force backtracking
-			"  : (B '.')=>B '.' {System.out.println(\"alt1\");}\n" +
-			"  | B {System.out.println(\"alt2\");}" +
-			"  ;\n" +
-			"fragment\n" +
-			"B : 'x'+ ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "s", "xxx", false);
-
-		assertEquals("alt2\n", found);
-
-		found = execParser("T.g", grammar, "TParser", "TLexer",
-			    "s", "xxx.", false);
-
-		assertEquals("alt1\n", found);
-	}
-
-	@Test public void testLexerWithPredLongerThanAlt() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"s : A ;\n" +
-			"A options {k=1;}\n" + // force backtracking
-			"  : (B '.')=>B {System.out.println(\"alt1\");}\n" +
-			"  | B {System.out.println(\"alt2\");}" +
-			"  ;\n" +
-			"D : '.' {System.out.println(\"D\");} ;\n" +
-			"fragment\n" +
-			"B : 'x'+ ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "s", "xxx", false);
-
-		assertEquals("alt2\n", found);
-
-		found = execParser("T.g", grammar, "TParser", "TLexer",
-			    "s", "xxx.", false);
-
-		assertEquals("alt1\nD\n", found);
-	}
-
-	@Test public void testLexerPredCyclicPrediction() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"s : A ;\n" +
-			"A : (B)=>(B|'y'+) {System.out.println(\"alt1\");}\n" +
-			"  | B {System.out.println(\"alt2\");}\n" +
-			"  | 'y'+ ';'" +
-			"  ;\n" +
-			"fragment\n" +
-			"B : 'x'+ ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "s", "xxx", false);
-
-		assertEquals("alt1\n", found);
-	}
-
-	@Test public void testLexerPredCyclicPrediction2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"s : A ;\n" +
-			"A : (B '.')=>(B|'y'+) {System.out.println(\"alt1\");}\n" +
-			"  | B {System.out.println(\"alt2\");}\n" +
-			"  | 'y'+ ';'" +
-			"  ;\n" +
-			"fragment\n" +
-			"B : 'x'+ ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "s", "xxx", false);
-		assertEquals("alt2\n", found);
-	}
-
-	@Test public void testSimpleNestedPred() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"s : (expr ';')+ ;\n" +
-			"expr\n" +
-			"options {\n" +
-			"  k=1;\n" +
-			"}\n" +
-			"@init {System.out.println(\"enter expr \"+input.LT(1).getText());}\n" +
-			"  : (atom 'x') => atom 'x'\n" +
-			"  | atom\n" +
-			";\n" +
-			"atom\n" +
-			"@init {System.out.println(\"enter atom \"+input.LT(1).getText());}\n" +
-			"   : '(' expr ')'\n" +
-			"   | INT\n" +
-			"   ;\n" +
-			"INT: '0'..'9'+ ;\n" +
-			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
-			"   ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "s", "(34)x;", false);
-		String expecting =
-			"enter expr (\n" +
-			"enter atom (\n" +
-			"enter expr 34\n" +
-			"enter atom 34\n" +
-			"enter atom 34\n" +
-			"enter atom (\n" +
-			"enter expr 34\n" +
-			"enter atom 34\n" +
-			"enter atom 34\n";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testTripleNestedPredInLexer() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"s : (.)+ {System.out.println(\"done\");} ;\n" +
-			"EXPR\n" +
-			"options {\n" +
-			"  k=1;\n" +
-			"}\n" +
-			"@init {System.out.println(\"enter expr \"+(char)input.LT(1));}\n" +
-			"  : (ATOM 'x') => ATOM 'x' {System.out.println(\"ATOM x\");}\n" +
-			"  | ATOM {System.out.println(\"ATOM \"+$ATOM.text);}\n" +
-			";\n" +
-			"fragment ATOM\n" +
-			"@init {System.out.println(\"enter atom \"+(char)input.LT(1));}\n" +
-			"   : '(' EXPR ')'\n" +
-			"   | INT\n" +
-			"   ;\n" +
-			"fragment INT: '0'..'9'+ ;\n" +
-			"fragment WS : (' '|'\\n')+ \n" +
-			"   ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "s", "((34)x)x", false);
-		String expecting = // has no memoization
-			"enter expr (\n" +
-			"enter atom (\n" +
-			"enter expr (\n" +
-			"enter atom (\n" +
-			"enter expr 3\n" +
-			"enter atom 3\n" +
-			"enter atom 3\n" +
-			"enter atom (\n" +
-			"enter expr 3\n" +
-			"enter atom 3\n" +
-			"enter atom 3\n" +
-			"enter atom (\n" +
-			"enter expr (\n" +
-			"enter atom (\n" +
-			"enter expr 3\n" +
-			"enter atom 3\n" +
-			"enter atom 3\n" +
-			"enter atom (\n" +
-			"enter expr 3\n" +
-			"enter atom 3\n" +
-			"enter atom 3\n" +
-			"ATOM 34\n" +
-			"ATOM x\n" +
-			"ATOM x\n" +
-			"done\n";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testTreeParserWithSynPred() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT+ (PERIOD|SEMI);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"SEMI : ';' ;\n"+
-			"PERIOD : '.' ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n" +
-			"options {k=1; backtrack=true; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ID INT+ PERIOD {System.out.print(\"alt 1\");}"+
-			"  | ID INT+ SEMI   {System.out.print(\"alt 2\");}\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 2 3;");
-		assertEquals("alt 2\n", found);
-	}
-
-	@Test public void testTreeParserWithNestedSynPred() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT+ (PERIOD|SEMI);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"SEMI : ';' ;\n"+
-			"PERIOD : '.' ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		// backtracks in a and b due to k=1
-		String treeGrammar =
-			"tree grammar TP;\n" +
-			"options {k=1; backtrack=true; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ID b {System.out.print(\" a:alt 1\");}"+
-			"  | ID INT+ SEMI   {System.out.print(\" a:alt 2\");}\n" +
-			"  ;\n" +
-			"b : INT PERIOD  {System.out.print(\"b:alt 1\");}" + // choose this alt for just one INT
-			"  | INT+ PERIOD {System.out.print(\"b:alt 2\");}" +
-			"  ;";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 2 3.");
-		assertEquals("b:alt 2 a:alt 1\n", found);
-	}
-
-	@Test public void testSynPredWithOutputTemplate() throws Exception {
-		// really just seeing if it will compile
-		String grammar =
-			"grammar T;\n" +
-			"options {output=template;}\n" +
-			"a\n" +
-			"options {\n" +
-			"  k=1;\n" +
-			"}\n" +
-			"  : ('x'+ 'y')=> 'x'+ 'y' -> template(a={$text}) <<1:<a>;>>\n" +
-			"  | 'x'+ 'z' -> template(a={$text}) <<2:<a>;>>\n"+
-			"  ;\n" +
-			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
-			"   ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "xxxy", false);
-
-		assertEquals("1:xxxy;\n", found);
-	}
-
-	@Test public void testSynPredWithOutputAST() throws Exception {
-		// really just seeing if it will compile
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a\n" +
-			"options {\n" +
-			"  k=1;\n" +
-			"}\n" +
-			"  : ('x'+ 'y')=> 'x'+ 'y'\n" +
-			"  | 'x'+ 'z'\n"+
-			"  ;\n" +
-			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
-			"   ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "xxxy", false);
-
-		assertEquals("x x x y\n", found);
-	}
-
-	@Test public void testOptionalBlockWithSynPred() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-				"\n" +
-				"a : ( (b)=> b {System.out.println(\"b\");})? b ;\n" +
-				"b : 'x' ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "xx", false);
-		assertEquals("b\n", found);
-		found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "x", false);
-		assertEquals("", found);
-	}
-
-	@Test public void testSynPredK2() throws Exception {
-		// all manually specified syn predicates are gated (i.e., forced
-		// to execute).
-		String grammar =
-			"grammar T;\n" +
-				"\n" +
-				"a : (b)=> b {System.out.println(\"alt1\");} | 'a' 'c' ;\n" +
-				"b : 'a' 'b' ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "ab", false);
-
-		assertEquals("alt1\n", found);
-	}
-
-	@Test public void testSynPredKStar() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-				"\n" +
-				"a : (b)=> b {System.out.println(\"alt1\");} | 'a'+ 'c' ;\n" +
-				"b : 'a'+ 'b' ;\n" ;
-		String found = execParser("T.g", grammar, "TParser", "TLexer",
-				    "a", "aaab", false);
-
-		assertEquals("alt1\n", found);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSyntaxErrors.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestSyntaxErrors.java
deleted file mode 100644
index 31ad9f2..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestSyntaxErrors.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2011 Terence Parr
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-package org.antlr.test;
-
-import org.junit.Test;
-
-/** test runtime parse errors */
-public class TestSyntaxErrors extends BaseTest {
-	@Test public void testLL2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : 'a' 'b'" +
-			"  | 'a' 'c'" +
-			";\n" +
-			"q : 'e' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "ae", false);
-		String expecting = "line 1:1 no viable alternative at input 'e'\n";
-		String result = stderrDuringParse;
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testLL3() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : 'a' 'b'* 'c'" +
-			"  | 'a' 'b' 'd'" +
-			"  ;\n" +
-			"q : 'e' ;\n";
-		System.out.println(grammar);
-		String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "abe", false);
-		String expecting = "line 1:2 no viable alternative at input 'e'\n";
-		String result = stderrDuringParse;
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testLLStar() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : 'a'+ 'b'" +
-			"  | 'a'+ 'c'" +
-			";\n" +
-			"q : 'e' ;\n";
-		String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "aaae", false);
-		String expecting = "line 1:3 no viable alternative at input 'e'\n";
-		String result = stderrDuringParse;
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testSynPred() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"a : (e '.')=> e '.'" +
-			"  | (e ';')=> e ';'" +
-			"  | 'z'" +
-			"  ;\n" +
-			"e : '(' e ')'" +
-			"  | 'i'" +
-			"  ;\n";
-		System.out.println(grammar);
-		String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "((i))z", false);
-		String expecting = "line 1:0 no viable alternative at input '('\n";
-		String result = stderrDuringParse;
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testLL1ErrorInfo() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"start : animal (AND acClass)? service EOF;\n" +
-			"animal : (DOG | CAT );\n" +
-			"service : (HARDWARE | SOFTWARE) ;\n" +
-			"AND : 'and';\n" +
-			"DOG : 'dog';\n" +
-			"CAT : 'cat';\n" +
-			"HARDWARE: 'hardware';\n" +
-			"SOFTWARE: 'software';\n" +
-			"WS : ' ' {skip();} ;" +
-			"acClass\n" +
-			"@init\n" +
-			"{ System.out.println(computeContextSensitiveRuleFOLLOW().toString(tokenNames)); }\n" +
-			"  : ;\n";
-		String result = execParser("T.g", grammar, "TParser", "TLexer", "start", "dog and software", false);
-		String expecting = "{HARDWARE,SOFTWARE}\n";
-		assertEquals(expecting, result);
-	}
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTemplates.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestTemplates.java
deleted file mode 100644
index 8026319..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTemplates.java
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.Tool;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.grammar.v3.ANTLRParser;
-import org.antlr.grammar.v3.ActionTranslator;
-import org.antlr.runtime.CommonToken;
-import org.stringtemplate.v4.ST;
-import org.stringtemplate.v4.STGroup;
-import org.antlr.tool.ErrorManager;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.GrammarSemanticsMessage;
-import org.antlr.tool.Message;
-import org.junit.Test;
-
-/** Test templates in actions; %... shorthands */
-public class TestTemplates extends BaseTest {
-	private static final String LINE_SEP = System.getProperty("line.separator");
-
-	@Test
-    public void testTemplateConstructor() throws Exception {
-		String action = "x = %foo(name={$ID.text});";
-		String expecting = "x = templateLib.getInstanceOf(\"foo\"," +
-			"new STAttrMap().put(\"name\", (ID1!=null?ID1.getText():null)));";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {\n" +
-			"    output=template;\n" +
-			"}\n" +
-			"\n" +
-			"a : ID {"+action+"}\n" +
-			"  ;\n" +
-			"\n" +
-			"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-										"a",
-										new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-		STGroup templates =
-			new STGroup();
-		ST actionST = new ST(templates, rawTranslation);
-		String found = actionST.render();
-
-		assertNoErrors(equeue);
-
-		assertEquals(expecting, found);
-	}
-
-	@Test
-    public void testTemplateConstructorNoArgs() throws Exception {
-		String action = "x = %foo();";
-		String expecting = "x = templateLib.getInstanceOf(\"foo\");";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {\n" +
-			"    output=template;\n" +
-			"}\n" +
-			"\n" +
-			"a : ID {"+action+"}\n" +
-			"  ;\n" +
-			"\n" +
-			"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-										"a",
-										new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-		STGroup templates =
-			new STGroup();
-		ST actionST = new ST(templates, rawTranslation);
-		String found = actionST.render();
-
-		assertNoErrors(equeue);
-
-		assertEquals(expecting, found);
-	}
-
-	@Test
-    public void testIndirectTemplateConstructor() throws Exception {
-		String action = "x = %({\"foo\"})(name={$ID.text});";
-		String expecting = "x = templateLib.getInstanceOf(\"foo\"," +
-			"new STAttrMap().put(\"name\", (ID1!=null?ID1.getText():null)));";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {\n" +
-			"    output=template;\n" +
-			"}\n" +
-			"\n" +
-			"a : ID {"+action+"}\n" +
-			"  ;\n" +
-			"\n" +
-			"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-										"a",
-										new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-		STGroup templates =
-			new STGroup();
-		ST actionST = new ST(templates, rawTranslation);
-		String found = actionST.render();
-
-		assertNoErrors(equeue);
-
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testStringConstructor() throws Exception {
-		String action = "x = %{$ID.text};";
-		String expecting = "x = new StringTemplate(templateLib,(ID1!=null?ID1.getText():null));";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {\n" +
-			"    output=template;\n" +
-			"}\n" +
-			"\n" +
-			"a : ID {"+action+"}\n" +
-			"  ;\n" +
-			"\n" +
-			"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,
-																	 "a",
-																	 new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-		STGroup templates =
-			new STGroup();
-		ST actionST = new ST(templates, rawTranslation);
-		String found = actionST.render();
-
-		assertNoErrors(equeue);
-
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testSetAttr() throws Exception {
-		String action = "%x.y = z;";
-		String expecting = "(x).setAttribute(\"y\", z);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {\n" +
-			"    output=template;\n" +
-			"}\n" +
-			"\n" +
-			"a : ID {"+action+"}\n" +
-			"  ;\n" +
-			"\n" +
-			"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator =
-			new ActionTranslator(generator,
-										"a",
-										new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-		STGroup templates =
-			new STGroup();
-		ST actionST = new ST(templates, rawTranslation);
-		String found = actionST.render();
-
-		assertNoErrors(equeue);
-
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testSetAttrOfExpr() throws Exception {
-		String action = "%{foo($ID.text).getST()}.y = z;";
-		String expecting = "(foo((ID1!=null?ID1.getText():null)).getST()).setAttribute(\"y\", z);";
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {\n" +
-			"    output=template;\n" +
-			"}\n" +
-			"\n" +
-			"a : ID {"+action+"}\n" +
-			"  ;\n" +
-			"\n" +
-			"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-		ActionTranslator translator = new ActionTranslator(generator,
-																	 "a",
-																	 new CommonToken(ANTLRParser.ACTION,action),1);
-		String rawTranslation =
-			translator.translate();
-		STGroup templates =
-			new STGroup();
-		ST actionST = new ST(templates, rawTranslation);
-		String found = actionST.render();
-
-		assertNoErrors(equeue);
-
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testSetAttrOfExprInMembers() throws Exception {
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {\n" +
-			"    output=template;\n" +
-			"}\n" +
-			"@members {\n" +
-			"%code.instr = o;" + // must not get null ptr!
-			"}\n" +
-			"a : ID\n" +
-			"  ;\n" +
-			"\n" +
-			"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-
-		assertNoErrors(equeue);
-	}
-
-	@Test public void testCannotHaveSpaceBeforeDot() throws Exception {
-		String action = "%x .y = z;";
-		String expecting = null;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {\n" +
-			"    output=template;\n" +
-			"}\n" +
-			"\n" +
-			"a : ID {"+action+"}\n" +
-			"  ;\n" +
-			"\n" +
-			"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-
-		int expectedMsgID = ErrorManager.MSG_INVALID_TEMPLATE_ACTION;
-		Object expectedArg = "%x";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
-	}
-
-	@Test public void testCannotHaveSpaceAfterDot() throws Exception {
-		String action = "%x. y = z;";
-		String expecting = null;
-
-		ErrorQueue equeue = new ErrorQueue();
-		ErrorManager.setErrorListener(equeue);
-		Grammar g = new Grammar(
-			"grammar t;\n" +
-			"options {\n" +
-			"    output=template;\n" +
-			"}\n" +
-			"\n" +
-			"a : ID {"+action+"}\n" +
-			"  ;\n" +
-			"\n" +
-			"ID : 'a';\n");
-		Tool antlr = newTool();
-		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-		g.setCodeGenerator(generator);
-		generator.genRecognizer(); // forces load of templates
-
-		int expectedMsgID = ErrorManager.MSG_INVALID_TEMPLATE_ACTION;
-		Object expectedArg = "%x.";
-		GrammarSemanticsMessage expectedMessage =
-			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
-		checkError(equeue, expectedMessage);
-	}
-
-	protected void checkError(ErrorQueue equeue,
-							  GrammarSemanticsMessage expectedMessage)
-		throws Exception
-	{
-		/*
-		System.out.println(equeue.infos);
-		System.out.println(equeue.warnings);
-		System.out.println(equeue.errors);
-		*/
-		Message foundMsg = null;
-		for (int i = 0; i < equeue.errors.size(); i++) {
-			Message m = (Message)equeue.errors.get(i);
-			if (m.msgID==expectedMessage.msgID ) {
-				foundMsg = m;
-			}
-		}
-		assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size()>0);
-		assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1);
-		assertTrue("couldn't find expected error: "+expectedMessage.msgID, foundMsg!=null);
-		assertTrue("error is not a GrammarSemanticsMessage",
-				   foundMsg instanceof GrammarSemanticsMessage);
-		assertEquals(expectedMessage.arg, foundMsg.arg);
-		assertEquals(expectedMessage.arg2, foundMsg.arg2);
-	}
-
-	// S U P P O R T
-	private void assertNoErrors(ErrorQueue equeue) {
-		assertTrue("unexpected errors: "+equeue, equeue.errors.size()==0);
-	}
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTokenRewriteStream.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestTokenRewriteStream.java
deleted file mode 100644
index cc864ee..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTokenRewriteStream.java
+++ /dev/null
@@ -1,808 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.runtime.ANTLRStringStream;
-import org.antlr.runtime.CharStream;
-import org.antlr.runtime.TokenRewriteStream;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.Interpreter;
-import org.junit.Test;
-
-public class TestTokenRewriteStream extends BaseTest {
-
-    /** Public default constructor used by TestRig */
-    public TestTokenRewriteStream() {
-    }
-
-	@Test public void testInsertBeforeIndex0() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.insertBefore(0, "0");
-		String result = tokens.toString();
-		String expecting = "0abc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testInsertAfterLastIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.insertAfter(2, "x");
-		String result = tokens.toString();
-		String expecting = "abcx";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void test2InsertBeforeAfterMiddleIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.insertBefore(1, "x");
-		tokens.insertAfter(1, "x");
-		String result = tokens.toString();
-		String expecting = "axbxc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testReplaceIndex0() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(0, "x");
-		String result = tokens.toString();
-		String expecting = "xbc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testReplaceLastIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(2, "x");
-		String result = tokens.toString();
-		String expecting = "abx";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testReplaceMiddleIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(1, "x");
-		String result = tokens.toString();
-		String expecting = "axc";
-		assertEquals(expecting, result);
-	}
-
-    @Test public void testToStringStartStop() throws Exception {
-        Grammar g = new Grammar(
-            "lexer grammar t;\n"+
-            "ID : 'a'..'z'+;\n" +
-            "INT : '0'..'9'+;\n" +
-            "SEMI : ';';\n" +
-            "MUL : '*';\n" +
-            "ASSIGN : '=';\n" +
-            "WS : ' '+;\n");
-        // Tokens: 0123456789
-        // Input:  x = 3 * 0;
-        CharStream input = new ANTLRStringStream("x = 3 * 0;");
-        Interpreter lexEngine = new Interpreter(g, input);
-        TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-        tokens.fill();
-        tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
-
-        String result = tokens.toOriginalString();
-        String expecting = "x = 3 * 0;";
-        assertEquals(expecting, result);
-
-        result = tokens.toString();
-        expecting = "x = 0;";
-        assertEquals(expecting, result);
-
-        result = tokens.toString(0,9);
-        expecting = "x = 0;";
-        assertEquals(expecting, result);
-
-        result = tokens.toString(4,8);
-        expecting = "0";
-        assertEquals(expecting, result);
-    }
-
-    @Test public void testToStringStartStop2() throws Exception {
-        Grammar g = new Grammar(
-            "lexer grammar t;\n"+
-            "ID : 'a'..'z'+;\n" +
-            "INT : '0'..'9'+;\n" +
-            "SEMI : ';';\n" +
-            "ASSIGN : '=';\n" +
-            "PLUS : '+';\n" +
-            "MULT : '*';\n" +
-            "WS : ' '+;\n");
-        // Tokens: 012345678901234567
-        // Input:  x = 3 * 0 + 2 * 0;
-        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
-        Interpreter lexEngine = new Interpreter(g, input);
-        TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-        tokens.fill();
-
-        String result = tokens.toOriginalString();
-        String expecting = "x = 3 * 0 + 2 * 0;";
-        assertEquals(expecting, result);
-
-        tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
-        result = tokens.toString();
-        expecting = "x = 0 + 2 * 0;";
-        assertEquals(expecting, result);
-
-        result = tokens.toString(0,17);
-        expecting = "x = 0 + 2 * 0;";
-        assertEquals(expecting, result);
-
-        result = tokens.toString(4,8);
-        expecting = "0";
-        assertEquals(expecting, result);
-
-        result = tokens.toString(0,8);
-        expecting = "x = 0";
-        assertEquals(expecting, result);
-
-        result = tokens.toString(12,16);
-        expecting = "2 * 0";
-        assertEquals(expecting, result);
-
-        tokens.insertAfter(17, "// comment");
-        result = tokens.toString(12,18);
-        expecting = "2 * 0;// comment";
-        assertEquals(expecting, result);
-
-        result = tokens.toString(0,8); // try again after insert at end
-        expecting = "x = 0";
-        assertEquals(expecting, result);
-    }
-
-
-    @Test public void test2ReplaceMiddleIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(1, "x");
-		tokens.replace(1, "y");
-		String result = tokens.toString();
-		String expecting = "ayc";
-		assertEquals(expecting, result);
-	}
-
-    @Test public void test2ReplaceMiddleIndex1InsertBefore() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-        tokens.insertBefore(0, "_");
-        tokens.replace(1, "x");
-		tokens.replace(1, "y");
-		String result = tokens.toString();
-		String expecting = "_ayc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testReplaceThenDeleteMiddleIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(1, "x");
-		tokens.delete(1);
-		String result = tokens.toString();
-		String expecting = "ac";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testInsertInPriorReplace() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(0, 2, "x");
-		tokens.insertBefore(1, "0");
-		Exception exc = null;
-		try {
-			tokens.toString();
-		}
-		catch (IllegalArgumentException iae) {
-			exc = iae;
-		}
-		String expecting = "insert op <InsertBeforeOp@[@1,1:1='b',<5>,1:1]:\"0\"> within boundaries of previous <ReplaceOp@[@0,0:0='a',<4>,1:0]..[@2,2:2='c',<6>,1:2]:\"x\">";
-		assertNotNull(exc);
-		assertEquals(expecting, exc.getMessage());
-	}
-
-	@Test public void testInsertThenReplaceSameIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.insertBefore(0, "0");
-		tokens.replace(0, "x"); // supercedes insert at 0
-		String result = tokens.toString();
-		String expecting = "0xbc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void test2InsertMiddleIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.insertBefore(1, "x");
-		tokens.insertBefore(1, "y");
-		String result = tokens.toString();
-		String expecting = "ayxbc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void test2InsertThenReplaceIndex0() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.insertBefore(0, "x");
-		tokens.insertBefore(0, "y");
-		tokens.replace(0, "z");
-		String result = tokens.toString();
-		String expecting = "yxzbc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testReplaceThenInsertBeforeLastIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(2, "x");
-		tokens.insertBefore(2, "y");
-		String result = tokens.toString();
-		String expecting = "abyx";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testInsertThenReplaceLastIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.insertBefore(2, "y");
-		tokens.replace(2, "x");
-		String result = tokens.toString();
-		String expecting = "abyx";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testReplaceThenInsertAfterLastIndex() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(2, "x");
-		tokens.insertAfter(2, "y");
-		String result = tokens.toString();
-		String expecting = "abxy";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testReplaceRangeThenInsertAtLeftEdge() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(2, 4, "x");
-		tokens.insertBefore(2, "y");
-		String result = tokens.toString();
-		String expecting = "abyxba";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testReplaceRangeThenInsertAtRightEdge() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(2, 4, "x");
-		tokens.insertBefore(4, "y"); // no effect; within range of a replace
-		Exception exc = null;
-		try {
-			tokens.toString();
-		}
-		catch (IllegalArgumentException iae) {
-			exc = iae;
-		}
-		String expecting = "insert op <InsertBeforeOp@[@4,4:4='c',<6>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<6>,1:2]..[@4,4:4='c',<6>,1:4]:\"x\">";
-		assertNotNull(exc);
-		assertEquals(expecting, exc.getMessage());
-	}
-
-	@Test public void testReplaceRangeThenInsertAfterRightEdge() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(2, 4, "x");
-		tokens.insertAfter(4, "y");
-		String result = tokens.toString();
-		String expecting = "abxyba";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testReplaceAll() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(0, 6, "x");
-		String result = tokens.toString();
-		String expecting = "x";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testReplaceSubsetThenFetch() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(2, 4, "xyz");
-		String result = tokens.toString(0,6);
-		String expecting = "abxyzba";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testReplaceThenReplaceSuperset() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(2, 4, "xyz");
-		tokens.replace(3, 5, "foo"); // overlaps, error
-		Exception exc = null;
-		try {
-			tokens.toString();
-		}
-		catch (IllegalArgumentException iae) {
-			exc = iae;
-		}
-		String expecting = "replace op boundaries of <ReplaceOp@[@3,3:3='c',<6>,1:3]..[@5,5:5='b',<5>,1:5]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<6>,1:2]..[@4,4:4='c',<6>,1:4]:\"xyz\">";
-		assertNotNull(exc);
-		assertEquals(expecting, exc.getMessage());
-	}
-
-	@Test public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcccba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(2, 4, "xyz");
-		tokens.replace(1, 3, "foo"); // overlap, error
-		Exception exc = null;
-		try {
-			tokens.toString();
-		}
-		catch (IllegalArgumentException iae) {
-			exc = iae;
-		}
-		String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<5>,1:1]..[@3,3:3='c',<6>,1:3]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<6>,1:2]..[@4,4:4='c',<6>,1:4]:\"xyz\">";
-		assertNotNull(exc);
-		assertEquals(expecting, exc.getMessage());
-	}
-
-	@Test public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcba");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(2, 2, "xyz");
-		tokens.replace(0, 3, "foo");
-		String result = tokens.toString();
-		String expecting = "fooa";
-		assertEquals(expecting, result);
-	}
-
-	// June 2, 2008 I rewrote core of rewrite engine; just adding lots more tests here
-
-	@Test public void testCombineInserts() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.insertBefore(0, "x");
-		tokens.insertBefore(0, "y");
-		String result = tokens.toString();
-		String expecting = "yxabc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testCombine3Inserts() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.insertBefore(1, "x");
-		tokens.insertBefore(0, "y");
-		tokens.insertBefore(1, "z");
-		String result = tokens.toString();
-		String expecting = "yazxbc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testCombineInsertOnLeftWithReplace() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(0, 2, "foo");
-		tokens.insertBefore(0, "z"); // combine with left edge of rewrite
-		String result = tokens.toString();
-		String expecting = "zfoo";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testCombineInsertOnLeftWithDelete() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.delete(0, 2);
-		tokens.insertBefore(0, "z"); // combine with left edge of rewrite
-		String result = tokens.toString();
-		String expecting = "z"; // make sure combo is not znull
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testDisjointInserts() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.insertBefore(1, "x");
-		tokens.insertBefore(2, "y");
-		tokens.insertBefore(0, "z");
-		String result = tokens.toString();
-		String expecting = "zaxbyc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testOverlappingReplace() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(1, 2, "foo");
-		tokens.replace(0, 3, "bar"); // wipes prior nested replace
-		String result = tokens.toString();
-		String expecting = "bar";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testOverlappingReplace2() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(0, 3, "bar");
-		tokens.replace(1, 2, "foo"); // cannot split earlier replace
-		Exception exc = null;
-		try {
-			tokens.toString();
-		}
-		catch (IllegalArgumentException iae) {
-			exc = iae;
-		}
-		String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<5>,1:1]..[@2,2:2='c',<6>,1:2]:\"foo\"> overlap with previous <ReplaceOp@[@0,0:0='a',<4>,1:0]..[@3,3:3='c',<6>,1:3]:\"bar\">";
-		assertNotNull(exc);
-		assertEquals(expecting, exc.getMessage());
-	}
-
-	@Test public void testOverlappingReplace3() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(1, 2, "foo");
-		tokens.replace(0, 2, "bar"); // wipes prior nested replace
-		String result = tokens.toString();
-		String expecting = "barc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testOverlappingReplace4() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(1, 2, "foo");
-		tokens.replace(1, 3, "bar"); // wipes prior nested replace
-		String result = tokens.toString();
-		String expecting = "abar";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testDropIdenticalReplace() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(1, 2, "foo");
-		tokens.replace(1, 2, "foo"); // drop previous, identical
-		String result = tokens.toString();
-		String expecting = "afooc";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testDropPrevCoveredInsert() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.insertBefore(1, "foo");
-		tokens.replace(1, 2, "foo"); // kill prev insert
-		String result = tokens.toString();
-		String expecting = "afoofoo";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testLeaveAloneDisjointInsert() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.insertBefore(1, "x");
-		tokens.replace(2, 3, "foo");
-		String result = tokens.toString();
-		String expecting = "axbfoo";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testLeaveAloneDisjointInsert2() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abcc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.replace(2, 3, "foo");
-		tokens.insertBefore(1, "x");
-		String result = tokens.toString();
-		String expecting = "axbfoo";
-		assertEquals(expecting, result);
-	}
-
-	@Test public void testInsertBeforeTokenThenDeleteThatToken() throws Exception {
-		Grammar g = new Grammar(
-			"lexer grammar t;\n"+
-			"A : 'a';\n" +
-			"B : 'b';\n" +
-			"C : 'c';\n");
-		CharStream input = new ANTLRStringStream("abc");
-		Interpreter lexEngine = new Interpreter(g, input);
-		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
-		tokens.fill();
-		tokens.insertBefore(2, "y");
-		tokens.delete(2);
-		String result = tokens.toString();
-		String expecting = "aby";
-		assertEquals(expecting, result);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTopologicalSort.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestTopologicalSort.java
deleted file mode 100644
index 43ffbee..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTopologicalSort.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.misc.Graph;
-import org.junit.Test;
-
-import java.util.List;
-
-/** Test topo sort in GraphNode. */
-public class TestTopologicalSort extends BaseTest {
-    @Test
-    public void testFairlyLargeGraph() throws Exception {
-        Graph g = new Graph();
-        g.addEdge("C", "F");
-        g.addEdge("C", "G");
-        g.addEdge("C", "A");
-        g.addEdge("C", "B");
-        g.addEdge("A", "D");
-        g.addEdge("A", "E");
-        g.addEdge("B", "E");
-        g.addEdge("D", "E");
-        g.addEdge("D", "F");
-        g.addEdge("F", "H");
-        g.addEdge("E", "F");
-
-        String expecting = "[H, F, E, D, G, A, B, C]";
-        List nodes = g.sort();
-        String result = nodes.toString();
-        assertEquals(expecting, result);
-    }
-
-    @Test
-    public void testCyclicGraph() throws Exception {
-        Graph g = new Graph();
-        g.addEdge("A", "B");
-        g.addEdge("B", "C");
-        g.addEdge("C", "A");
-        g.addEdge("C", "D");
-
-        String expecting = "[D, C, B, A]";
-        List nodes = g.sort();
-        String result = nodes.toString();
-        assertEquals(expecting, result);
-    }
-
-    @Test
-    public void testRepeatedEdges() throws Exception {
-        Graph g = new Graph();
-        g.addEdge("A", "B");
-        g.addEdge("B", "C");
-        g.addEdge("A", "B"); // dup
-        g.addEdge("C", "D");
-
-        String expecting = "[D, C, B, A]";
-        List nodes = g.sort();
-        String result = nodes.toString();
-        assertEquals(expecting, result);
-    }
-
-    @Test
-    public void testSimpleTokenDependence() throws Exception {
-        Graph g = new Graph();
-        g.addEdge("Java.g", "MyJava.tokens"); // Java feeds off manual token file
-        g.addEdge("Java.tokens", "Java.g");        
-        g.addEdge("Def.g", "Java.tokens");    // walkers feed off generated tokens
-        g.addEdge("Ref.g", "Java.tokens");
-
-        String expecting = "[MyJava.tokens, Java.g, Java.tokens, Ref.g, Def.g]";
-        List nodes = g.sort();
-        String result = nodes.toString();
-        assertEquals(expecting, result);
-    }
-
-    @Test
-    public void testParserLexerCombo() throws Exception {
-        Graph g = new Graph();
-        g.addEdge("JavaLexer.tokens", "JavaLexer.g");
-        g.addEdge("JavaParser.g", "JavaLexer.tokens");
-        g.addEdge("Def.g", "JavaLexer.tokens");
-        g.addEdge("Ref.g", "JavaLexer.tokens");
-
-        String expecting = "[JavaLexer.g, JavaLexer.tokens, JavaParser.g, Ref.g, Def.g]";
-        List nodes = g.sort();
-        String result = nodes.toString();
-        assertEquals(expecting, result);
-    }
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeGrammarRewriteAST.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeGrammarRewriteAST.java
deleted file mode 100644
index 518e48a..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeGrammarRewriteAST.java
+++ /dev/null
@@ -1,1121 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.Tool;
-import org.antlr.codegen.CodeGenerator;
-import org.antlr.runtime.RecognitionException;
-import org.antlr.tool.ErrorManager;
-import org.antlr.tool.Grammar;
-import org.antlr.tool.GrammarSyntaxMessage;
-import org.junit.Ignore;
-import org.junit.Test;
-
-/** Tree rewrites in tree parsers are basically identical to rewrites
- *  in a normal grammar except that the atomic element is a node not
- *  a Token.  Tests here ensure duplication of nodes occurs properly
- *  and basic functionality.
- */
-public class TestTreeGrammarRewriteAST extends BaseTest {
-	protected boolean debug = false;
-
-	@Test public void testFlatList() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ID INT -> INT ID\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("34 abc\n", found);
-	}
-
-	@Test public void testSimpleTree() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(ID INT) -> ^(INT ID)\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("(34 abc)\n", found);
-	}
-
-	@Test public void testNonImaginaryWithCtor() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : INT ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : INT -> INT[\"99\"]\n" + // make new INT node
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "34");
-		assertEquals("99\n", found);
-	}
-
-	@Test public void testCombinedRewriteAndAuto() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT) | INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(ID INT) -> ^(INT ID) | INT\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("(34 abc)\n", found);
-
-		found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-							   treeGrammar, "TP", "TLexer", "a", "a", "34");
-		assertEquals("34\n", found);
-	}
-
-	@Test public void testAvoidDup() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ID -> ^(ID ID)\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc");
-		assertEquals("(abc abc)\n", found);
-	}
-
-	@Test public void testLoop() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID+ INT+ -> (^(ID INT))+ ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : (^(ID INT))+ -> INT+ ID+\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a b c 3 4 5");
-		assertEquals("3 4 5 a b c\n", found);
-	}
-
-	@Test public void testAutoDup() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ID \n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc");
-		assertEquals("abc\n", found);
-	}
-
-	@Test public void testAutoDupRule() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : b c ;\n" +
-			"b : ID ;\n" +
-			"c : INT ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a 1");
-		assertEquals("a 1\n", found);
-	}
-
-    @Test public void testAutoWildcard() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT ;\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-            "a : ID . \n" +
-            "  ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-        assertEquals("abc 34\n", found);
-    }
-
-    @Test public void testNoWildcardAsRootError() throws Exception {
-        ErrorQueue equeue = new ErrorQueue();
-        ErrorManager.setErrorListener(equeue);
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST;}\n" +
-            "a : ^(. INT) \n" +
-            "  ;\n";
-
-        Grammar g = new Grammar(treeGrammar);
-        Tool antlr = newTool();
-        antlr.setOutputDirectory(null); // write to /dev/null
-        CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
-        g.setCodeGenerator(generator);
-        generator.genRecognizer();
-
-        assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
-
-        int expectedMsgID = ErrorManager.MSG_WILDCARD_AS_ROOT;
-        Object expectedArg = null;
-        RecognitionException expectedExc = null;
-        GrammarSyntaxMessage expectedMessage =
-            new GrammarSyntaxMessage(expectedMsgID, g, null, expectedArg, expectedExc);
-
-        checkError(equeue, expectedMessage);        
-    }
-
-    @Test public void testAutoWildcard2() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT -> ^(ID INT);\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-            "a : ^(ID .) \n" +
-            "  ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-        assertEquals("(abc 34)\n", found);
-    }
-
-    @Test public void testAutoWildcardWithLabel() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT ;\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-            "a : ID c=. \n" +
-            "  ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-        assertEquals("abc 34\n", found);
-    }
-
-    @Test public void testAutoWildcardWithListLabel() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT ;\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-            "a : ID c+=. \n" +
-            "  ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-        assertEquals("abc 34\n", found);
-    }
-
-    @Test public void testAutoDupMultiple() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ID INT;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ID ID INT\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a b 3");
-		assertEquals("a b 3\n", found);
-	}
-
-	@Test public void testAutoDupTree() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(ID INT)\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
-		assertEquals("(a 3)\n", found);
-	}
-
-	@Test public void testAutoDupTree2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT INT -> ^(ID INT INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(ID b b)\n" +
-			"  ;\n" +
-			"b : INT ;";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a 3 4");
-		assertEquals("(a 3 4)\n", found);
-	}
-
-	@Test public void testAutoDupTreeWithLabels() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(x=ID y=INT)\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
-		assertEquals("(a 3)\n", found);
-	}
-
-	@Test public void testAutoDupTreeWithListLabels() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(x+=ID y+=INT)\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
-		assertEquals("(a 3)\n", found);
-	}
-
-	@Test public void testAutoDupTreeWithRuleRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(b INT) ;\n" +
-			"b : ID ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
-		assertEquals("(a 3)\n", found);
-	}
-
-	@Test public void testAutoDupTreeWithRuleRootAndLabels() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(x=b INT) ;\n" +
-			"b : ID ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
-		assertEquals("(a 3)\n", found);
-	}
-
-	@Test public void testAutoDupTreeWithRuleRootAndListLabels() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(x+=b y+=c) ;\n" +
-			"b : ID ;\n" +
-			"c : INT ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
-		assertEquals("(a 3)\n", found);
-	}
-
-	@Test public void testAutoDupNestedTree() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : x=ID y=ID INT -> ^($x ^($y INT));\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(ID ^(ID INT))\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a b 3");
-		assertEquals("(a (b 3))\n", found);
-	}
-
-	@Test public void testAutoDupTreeWithSubruleInside() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"tokens {OP;}\n" +
-			"a : (x=ID|x=INT) -> ^(OP $x) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(OP (b|c)) ;\n" +
-			"b : ID ;\n" +
-			"c : INT ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "a");
-		assertEquals("(OP a)\n", found);
-	}
-
-	@Test public void testDelete() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ID -> \n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc");
-		assertEquals("", found);
-	}
-
-	@Test public void testSetMatchNoRewrite() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : b INT\n" +
-			"  ;\n" +
-			"b : ID | INT ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("abc 34\n", found);
-	}
-
-	@Test public void testSetOptionalMatchNoRewrite() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : (ID|INT)? INT ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("abc 34\n", found);
-	}
-
-
-	@Test public void testSetMatchNoRewriteLevel2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : x=ID INT -> ^($x INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(ID (ID | INT) ) ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("(abc 34)\n", found);
-	}
-
-	@Test public void testSetMatchNoRewriteLevel2Root() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : x=ID INT -> ^($x INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^((ID | INT) INT) ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("(abc 34)\n", found);
-	}
-
-
-	// REWRITE MODE
-
-	@Test public void testRewriteModeCombinedRewriteAndAuto() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT) | INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-			"a : ^(ID INT) -> ^(ID[\"ick\"] INT)\n" +
-			"  | INT\n" + // leaves it alone, returning $a.start
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("(ick 34)\n", found);
-
-		found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-							   treeGrammar, "TP", "TLexer", "a", "a", "34");
-		assertEquals("34\n", found);
-	}
-
-	@Test public void testRewriteModeFlatTree() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ID INT | INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-			"s : ID a ;\n" +
-			"a : INT -> INT[\"1\"]\n"+
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-		assertEquals("abc 1\n", found);
-	}
-
-	@Test public void testRewriteModeChainRuleFlatTree() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ID INT | INT ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-			"s : a ;\n" +
-			"a : b ;\n" +
-			"b : ID INT -> INT ID\n"+
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-		assertEquals("34 abc\n", found);
-	}
-
-	@Test public void testRewriteModeChainRuleTree() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-			"s : a ;\n" +
-			"a : b ;\n" + // a.tree must become b.tree
-			"b : ^(ID INT) -> INT\n"+
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-		assertEquals("34\n", found);
-	}
-
-	@Test public void testRewriteModeChainRuleTree2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-			"tokens { X; }\n" +
-			"s : a* b ;\n" + // only b contributes to tree, but it's after a*; s.tree = b.tree
-			"a : X ;\n" +
-			"b : ^(ID INT) -> INT\n"+
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-		assertEquals("34\n", found);
-	}
-
-	@Test public void testRewriteModeChainRuleTree3() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'boo' ID INT -> 'boo' ^(ID INT) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-			"tokens { X; }\n" +
-			"s : 'boo' a* b ;\n" + // don't reset s.tree to b.tree due to 'boo'
-			"a : X ;\n" +
-			"b : ^(ID INT) -> INT\n"+
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "s", "boo abc 34");
-		assertEquals("boo 34\n", found);
-	}
-
-	@Test public void testRewriteModeChainRuleTree4() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-			"tokens { X; }\n" +
-			"s : ^('boo' a* b) ;\n" + // don't reset s.tree to b.tree due to 'boo'
-			"a : X ;\n" +
-			"b : ^(ID INT) -> INT\n"+
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "s", "boo abc 34");
-		assertEquals("(boo 34)\n", found);
-	}
-
-	@Test public void testRewriteModeChainRuleTree5() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-			"tokens { X; }\n" +
-			"s : ^(a b) ;\n" + // s.tree is a.tree
-			"a : 'boo' ;\n" +
-			"b : ^(ID INT) -> INT\n"+
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "s", "boo abc 34");
-		assertEquals("(boo 34)\n", found);
-	}
-
-    @Test public void testRewriteOfRuleRef() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT -> ID INT | INT ;\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-            "s : a -> a ;\n" +
-            "a : ID INT -> ID INT ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-        assertEquals("abc 34\n", found);
-    }
-
-    @Test public void testRewriteOfRuleRefRoot() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT INT -> ^(INT ^(ID INT));\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-            "s : ^(a ^(ID INT)) -> a ;\n" +
-            "a : INT ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 12 34");
-        // emits whole tree when you ref the root since I can't know whether
-        // you want the children or not.  You might be returning a whole new
-        // tree.  Hmm...still seems weird.  oh well.
-        assertEquals("(12 (abc 34))\n", found);
-    }
-
-    @Test public void testRewriteOfRuleRefRootLabeled() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT INT -> ^(INT ^(ID INT));\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-            "s : ^(label=a ^(ID INT)) -> a ;\n" +
-            "a : INT ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 12 34");
-        // emits whole tree when you ref the root since I can't know whether
-        // you want the children or not.  You might be returning a whole new
-        // tree.  Hmm...still seems weird.  oh well.
-        assertEquals("(12 (abc 34))\n", found);
-    }
-
-    @Ignore
-    @Test public void testRewriteOfRuleRefRootListLabeled() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT INT -> ^(INT ^(ID INT));\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-            "s : ^(label+=a ^(ID INT)) -> a ;\n" +
-            "a : INT ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 12 34");
-        // emits whole tree when you ref the root since I can't know whether
-        // you want the children or not.  You might be returning a whole new
-        // tree.  Hmm...still seems weird.  oh well.
-        assertEquals("(12 (abc 34))\n", found);
-    }
-
-    @Test public void testRewriteOfRuleRefChild() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT -> ^(ID ^(INT INT));\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-            "s : ^(ID a) -> a ;\n" +
-            "a : ^(INT INT) ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-        assertEquals("(34 34)\n", found);
-    }
-
-    @Test public void testRewriteOfRuleRefLabel() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT -> ^(ID ^(INT INT));\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-            "s : ^(ID label=a) -> a ;\n" +
-            "a : ^(INT INT) ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-        assertEquals("(34 34)\n", found);
-    }
-
-    @Test public void testRewriteOfRuleRefListLabel() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT -> ^(ID ^(INT INT));\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-            "s : ^(ID label+=a) -> a ;\n" +
-            "a : ^(INT INT) ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-        assertEquals("(34 34)\n", found);
-    }
-
-    @Test public void testRewriteModeWithPredicatedRewrites() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID[\"root\"] ^(ID INT)) | INT -> ^(ID[\"root\"] INT) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-			"s : ^(ID a) {System.out.println(\"altered tree=\"+$s.start.toStringTree());};\n" +
-			"a : ^(ID INT) -> {true}? ^(ID[\"ick\"] INT)\n" +
-			"              -> INT\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-		assertEquals("altered tree=(root (ick 34))\n" +
-					 "(root (ick 34))\n", found);
-	}
-
-    @Test public void testWildcardSingleNode() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT -> ^(ID[\"root\"] INT);\n"+
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-            "s : ^(ID c=.) -> $c\n" +
-            "  ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-        assertEquals("34\n", found);
-    }
-
-    @Test public void testWildcardUnlabeledSingleNode() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID INT -> ^(ID INT);\n"+
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-            "s : ^(ID .) -> ID\n" +
-            "  ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-        assertEquals("abc\n", found);
-    }
-
-    @Test public void testWildcardGrabsSubtree() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID x=INT y=INT z=INT -> ^(ID[\"root\"] ^($x $y $z));\n"+
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-            "s : ^(ID c=.) -> $c\n" +
-            "  ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 1 2 3");
-        assertEquals("(1 2 3)\n", found);
-    }
-
-    @Test public void testWildcardGrabsSubtree2() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID x=INT y=INT z=INT -> ID ^($x $y $z);\n"+
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-            "s : ID c=. -> $c\n" +
-            "  ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 1 2 3");
-        assertEquals("(1 2 3)\n", found);
-    }
-
-    @Test public void testWildcardListLabel() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : INT INT INT ;\n"+
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-            "s : (c+=.)+ -> $c+\n" +
-            "  ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "1 2 3");
-        assertEquals("1 2 3\n", found);
-    }
-
-    @Test public void testWildcardListLabel2() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST; ASTLabelType=CommonTree;}\n" +
-            "a  : x=INT y=INT z=INT -> ^($x ^($y $z) ^($y $z));\n"+
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP;\n"+
-            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
-            "s : ^(INT (c+=.)+) -> $c+\n" +
-            "  ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                                      treeGrammar, "TP", "TLexer", "a", "s", "1 2 3");
-        assertEquals("(2 3) (2 3)\n", found);
-    }
-
-	@Test public void testRuleResultAsRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID '=' INT -> ^('=' ID INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"COLON : ':' ;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n"+
-			"options {output=AST; rewrite=true; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
-			"a : ^(eq e1=ID e2=.) -> ^(eq $e2 $e1) ;\n" +
-			"eq : '=' | ':' {;} ;\n";  // bug in set match, doesn't add to tree!! booh. force nonset.
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-									  treeGrammar, "TP", "TLexer", "a", "a", "abc = 34");
-		assertEquals("(= 34 abc)\n", found);
-	}
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeIterator.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeIterator.java
deleted file mode 100644
index 33bfb2d..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeIterator.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.runtime.tree.*;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-
-public class TestTreeIterator {
-    static final String[] tokens = new String[] {
-        "<invalid>", "<EOR>", "<DOWN>", "<UP>", "A", "B", "C", "D", "E", "F", "G"
-    };
-
-    @Test public void testNode() {
-        TreeAdaptor adaptor = new CommonTreeAdaptor();
-        TreeWizard wiz = new TreeWizard(adaptor, tokens);
-        CommonTree t = (CommonTree)wiz.create("A");
-        TreeIterator it = new TreeIterator(t);
-        StringBuffer buf = toString(it);
-        String expecting = "A EOF";
-        String found = buf.toString();
-        assertEquals(expecting, found);
-    }
-
-    @Test public void testFlatAB() {
-        TreeAdaptor adaptor = new CommonTreeAdaptor();
-        TreeWizard wiz = new TreeWizard(adaptor, tokens);
-        CommonTree t = (CommonTree)wiz.create("(nil A B)");
-        TreeIterator it = new TreeIterator(t);
-        StringBuffer buf = toString(it);
-        String expecting = "nil DOWN A B UP EOF";
-        String found = buf.toString();
-        assertEquals(expecting, found);
-    }
-
-    @Test public void testAB() {
-        TreeAdaptor adaptor = new CommonTreeAdaptor();
-        TreeWizard wiz = new TreeWizard(adaptor, tokens);
-        CommonTree t = (CommonTree)wiz.create("(A B)");
-        TreeIterator it = new TreeIterator(t);
-        StringBuffer buf = toString(it);
-        String expecting = "A DOWN B UP EOF";
-        String found = buf.toString();
-        assertEquals(expecting, found);
-    }
-
-    @Test public void testABC() {
-        TreeAdaptor adaptor = new CommonTreeAdaptor();
-        TreeWizard wiz = new TreeWizard(adaptor, tokens);
-        CommonTree t = (CommonTree)wiz.create("(A B C)");
-        TreeIterator it = new TreeIterator(t);
-        StringBuffer buf = toString(it);
-        String expecting = "A DOWN B C UP EOF";
-        String found = buf.toString();
-        assertEquals(expecting, found);
-    }
-
-    @Test public void testVerticalList() {
-        TreeAdaptor adaptor = new CommonTreeAdaptor();
-        TreeWizard wiz = new TreeWizard(adaptor, tokens);
-        CommonTree t = (CommonTree)wiz.create("(A (B C))");
-        TreeIterator it = new TreeIterator(t);
-        StringBuffer buf = toString(it);
-        String expecting = "A DOWN B DOWN C UP UP EOF";
-        String found = buf.toString();
-        assertEquals(expecting, found);
-    }
-
-    @Test public void testComplex() {
-        TreeAdaptor adaptor = new CommonTreeAdaptor();
-        TreeWizard wiz = new TreeWizard(adaptor, tokens);
-        CommonTree t = (CommonTree)wiz.create("(A (B (C D E) F) G)");
-        TreeIterator it = new TreeIterator(t);
-        StringBuffer buf = toString(it);
-        String expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF";
-        String found = buf.toString();
-        assertEquals(expecting, found);
-    }
-
-    @Test public void testReset() {
-        TreeAdaptor adaptor = new CommonTreeAdaptor();
-        TreeWizard wiz = new TreeWizard(adaptor, tokens);
-        CommonTree t = (CommonTree)wiz.create("(A (B (C D E) F) G)");
-        TreeIterator it = new TreeIterator(t);
-        StringBuffer buf = toString(it);
-        String expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF";
-        String found = buf.toString();
-        assertEquals(expecting, found);
-
-        it.reset();
-        buf = toString(it);
-        expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF";
-        found = buf.toString();
-        assertEquals(expecting, found);
-    }
-
-    protected static StringBuffer toString(TreeIterator it) {
-        StringBuffer buf = new StringBuffer();
-        while ( it.hasNext() ) {
-            CommonTree n = (CommonTree)it.next();
-            buf.append(n);
-            if ( it.hasNext() ) buf.append(" ");
-        }
-        return buf;
-    }
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeNodeStream.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeNodeStream.java
deleted file mode 100644
index 747fc3a..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeNodeStream.java
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.runtime.CommonToken;
-import org.antlr.runtime.Token;
-import org.antlr.runtime.tree.*;
-import org.junit.Test;
-
-/** Test the tree node stream. */
-public class TestTreeNodeStream extends BaseTest {
-
-	/** Build new stream; let's us override to test other streams. */
-	public TreeNodeStream newStream(Object t) {
-		return new CommonTreeNodeStream(t);
-	}
-
-    public String toTokenTypeString(TreeNodeStream stream) {
-        return ((CommonTreeNodeStream)stream).toTokenTypeString();
-    }
-
-	@Test public void testSingleNode() throws Exception {
-		Tree t = new CommonTree(new CommonToken(101));
-
-		TreeNodeStream stream = newStream(t);
-		String expecting = " 101";
-		String found = toNodesOnlyString(stream);
-		assertEquals(expecting, found);
-
-		expecting = " 101";
-		found = toTokenTypeString(stream);
-		assertEquals(expecting, found);
-	}
-
-	@Test public void test4Nodes() throws Exception {
-		// ^(101 ^(102 103) 104)
-		Tree t = new CommonTree(new CommonToken(101));
-		t.addChild(new CommonTree(new CommonToken(102)));
-		t.getChild(0).addChild(new CommonTree(new CommonToken(103)));
-		t.addChild(new CommonTree(new CommonToken(104)));
-
-		TreeNodeStream stream = newStream(t);
-		String expecting = " 101 102 103 104";
-		String found = toNodesOnlyString(stream);
-		assertEquals(expecting, found);
-
-		expecting = " 101 2 102 2 103 3 104 3";
-		found = toTokenTypeString(stream);
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testList() throws Exception {
-		Tree root = new CommonTree((Token)null);
-
-		Tree t = new CommonTree(new CommonToken(101));
-		t.addChild(new CommonTree(new CommonToken(102)));
-		t.getChild(0).addChild(new CommonTree(new CommonToken(103)));
-		t.addChild(new CommonTree(new CommonToken(104)));
-
-		Tree u = new CommonTree(new CommonToken(105));
-
-		root.addChild(t);
-		root.addChild(u);
-
-		TreeNodeStream stream = newStream(root);
-		String expecting = " 101 102 103 104 105";
-		String found = toNodesOnlyString(stream);
-		assertEquals(expecting, found);
-
-		expecting = " 101 2 102 2 103 3 104 3 105";
-		found = toTokenTypeString(stream);
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testFlatList() throws Exception {
-		Tree root = new CommonTree((Token)null);
-
-		root.addChild(new CommonTree(new CommonToken(101)));
-		root.addChild(new CommonTree(new CommonToken(102)));
-		root.addChild(new CommonTree(new CommonToken(103)));
-
-		TreeNodeStream stream = newStream(root);
-		String expecting = " 101 102 103";
-		String found = toNodesOnlyString(stream);
-		assertEquals(expecting, found);
-
-		expecting = " 101 102 103";
-		found = toTokenTypeString(stream);
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testListWithOneNode() throws Exception {
-		Tree root = new CommonTree((Token)null);
-
-		root.addChild(new CommonTree(new CommonToken(101)));
-
-		TreeNodeStream stream = newStream(root);
-		String expecting = " 101";
-		String found = toNodesOnlyString(stream);
-		assertEquals(expecting, found);
-
-		expecting = " 101";
-		found = toTokenTypeString(stream);
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testAoverB() throws Exception {
-		Tree t = new CommonTree(new CommonToken(101));
-		t.addChild(new CommonTree(new CommonToken(102)));
-
-		TreeNodeStream stream = newStream(t);
-		String expecting = " 101 102";
-		String found = toNodesOnlyString(stream);
-		assertEquals(expecting, found);
-
-		expecting = " 101 2 102 3";
-		found = toTokenTypeString(stream);
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testLT() throws Exception {
-		// ^(101 ^(102 103) 104)
-		Tree t = new CommonTree(new CommonToken(101));
-		t.addChild(new CommonTree(new CommonToken(102)));
-		t.getChild(0).addChild(new CommonTree(new CommonToken(103)));
-		t.addChild(new CommonTree(new CommonToken(104)));
-
-		TreeNodeStream stream = newStream(t);
-		assertEquals(101, ((Tree)stream.LT(1)).getType());
-		assertEquals(Token.DOWN, ((Tree)stream.LT(2)).getType());
-		assertEquals(102, ((Tree)stream.LT(3)).getType());
-		assertEquals(Token.DOWN, ((Tree)stream.LT(4)).getType());
-		assertEquals(103, ((Tree)stream.LT(5)).getType());
-		assertEquals(Token.UP, ((Tree)stream.LT(6)).getType());
-		assertEquals(104, ((Tree)stream.LT(7)).getType());
-		assertEquals(Token.UP, ((Tree)stream.LT(8)).getType());
-		assertEquals(Token.EOF, ((Tree)stream.LT(9)).getType());
-		// check way ahead
-		assertEquals(Token.EOF, ((Tree)stream.LT(100)).getType());
-	}
-
-	@Test public void testMarkRewindEntire() throws Exception {
-		// ^(101 ^(102 103 ^(106 107) ) 104 105)
-		// stream has 7 real + 6 nav nodes
-		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
-		Tree r0 = new CommonTree(new CommonToken(101));
-		Tree r1 = new CommonTree(new CommonToken(102));
-		r0.addChild(r1);
-		r1.addChild(new CommonTree(new CommonToken(103)));
-		Tree r2 = new CommonTree(new CommonToken(106));
-		r2.addChild(new CommonTree(new CommonToken(107)));
-		r1.addChild(r2);
-		r0.addChild(new CommonTree(new CommonToken(104)));
-		r0.addChild(new CommonTree(new CommonToken(105)));
-
-		TreeNodeStream stream = newStream(r0);
-		int m = stream.mark(); // MARK
-		for (int k=1; k<=13; k++) { // consume til end
-			stream.LT(1);
-			stream.consume();
-		}
-		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
-		stream.rewind(m);      // REWIND
-
-		// consume til end again :)
-		for (int k=1; k<=13; k++) { // consume til end
-			stream.LT(1);
-			stream.consume();
-		}
-		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
-	}
-
-	@Test public void testMarkRewindInMiddle() throws Exception {
-		// ^(101 ^(102 103 ^(106 107) ) 104 105)
-		// stream has 7 real + 6 nav nodes
-		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
-		Tree r0 = new CommonTree(new CommonToken(101));
-		Tree r1 = new CommonTree(new CommonToken(102));
-		r0.addChild(r1);
-		r1.addChild(new CommonTree(new CommonToken(103)));
-		Tree r2 = new CommonTree(new CommonToken(106));
-		r2.addChild(new CommonTree(new CommonToken(107)));
-		r1.addChild(r2);
-		r0.addChild(new CommonTree(new CommonToken(104)));
-		r0.addChild(new CommonTree(new CommonToken(105)));
-
-		TreeNodeStream stream = newStream(r0);
-		for (int k=1; k<=7; k++) { // consume til middle
-			//System.out.println(((Tree)stream.LT(1)).getType());
-			stream.consume();
-		}
-		assertEquals(107, ((Tree)stream.LT(1)).getType());
-		stream.mark(); // MARK
-		stream.consume(); // consume 107
-		stream.consume(); // consume UP
-		stream.consume(); // consume UP
-		stream.consume(); // consume 104
-		stream.rewind(); // REWIND
-        stream.mark();   // keep saving nodes though
-
-		assertEquals(107, ((Tree)stream.LT(1)).getType());
-		stream.consume();
-		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
-		stream.consume();
-		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
-		stream.consume();
-		assertEquals(104, ((Tree)stream.LT(1)).getType());
-		stream.consume();
-		// now we're past rewind position
-		assertEquals(105, ((Tree)stream.LT(1)).getType());
-		stream.consume();
-		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
-		stream.consume();
-		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
-		assertEquals(Token.UP, ((Tree)stream.LT(-1)).getType());
-	}
-
-	@Test public void testMarkRewindNested() throws Exception {
-		// ^(101 ^(102 103 ^(106 107) ) 104 105)
-		// stream has 7 real + 6 nav nodes
-		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
-		Tree r0 = new CommonTree(new CommonToken(101));
-		Tree r1 = new CommonTree(new CommonToken(102));
-		r0.addChild(r1);
-		r1.addChild(new CommonTree(new CommonToken(103)));
-		Tree r2 = new CommonTree(new CommonToken(106));
-		r2.addChild(new CommonTree(new CommonToken(107)));
-		r1.addChild(r2);
-		r0.addChild(new CommonTree(new CommonToken(104)));
-		r0.addChild(new CommonTree(new CommonToken(105)));
-
-		TreeNodeStream stream = newStream(r0);
-		int m = stream.mark(); // MARK at start
-		stream.consume(); // consume 101
-		stream.consume(); // consume DN
-		int m2 = stream.mark(); // MARK on 102
-		stream.consume(); // consume 102
-		stream.consume(); // consume DN
-		stream.consume(); // consume 103
-		stream.consume(); // consume 106
-		stream.rewind(m2);      // REWIND to 102
-		assertEquals(102, ((Tree)stream.LT(1)).getType());
-		stream.consume();
-		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
-		stream.consume();
-		// stop at 103 and rewind to start
-		stream.rewind(m); // REWIND to 101
-		assertEquals(101, ((Tree)stream.LT(1)).getType());
-		stream.consume();
-		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
-		stream.consume();
-		assertEquals(102, ((Tree)stream.LT(1)).getType());
-		stream.consume();
-		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
-	}
-
-	@Test public void testSeekFromStart() throws Exception {
-		// ^(101 ^(102 103 ^(106 107) ) 104 105)
-		// stream has 7 real + 6 nav nodes
-		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
-		Tree r0 = new CommonTree(new CommonToken(101));
-		Tree r1 = new CommonTree(new CommonToken(102));
-		r0.addChild(r1);
-		r1.addChild(new CommonTree(new CommonToken(103)));
-		Tree r2 = new CommonTree(new CommonToken(106));
-		r2.addChild(new CommonTree(new CommonToken(107)));
-		r1.addChild(r2);
-		r0.addChild(new CommonTree(new CommonToken(104)));
-		r0.addChild(new CommonTree(new CommonToken(105)));
-
-		TreeNodeStream stream = newStream(r0);
-		stream.seek(7);   // seek to 107
-		assertEquals(107, ((Tree)stream.LT(1)).getType());
-		stream.consume(); // consume 107
-		stream.consume(); // consume UP
-		stream.consume(); // consume UP
-		assertEquals(104, ((Tree)stream.LT(1)).getType());
-	}
-
-    @Test public void testReset() throws Exception {
-        // ^(101 ^(102 103 ^(106 107) ) 104 105)
-        // stream has 7 real + 6 nav nodes
-        // Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
-        Tree r0 = new CommonTree(new CommonToken(101));
-        Tree r1 = new CommonTree(new CommonToken(102));
-        r0.addChild(r1);
-        r1.addChild(new CommonTree(new CommonToken(103)));
-        Tree r2 = new CommonTree(new CommonToken(106));
-        r2.addChild(new CommonTree(new CommonToken(107)));
-        r1.addChild(r2);
-        r0.addChild(new CommonTree(new CommonToken(104)));
-        r0.addChild(new CommonTree(new CommonToken(105)));
-
-        TreeNodeStream stream = newStream(r0);
-        String v = toNodesOnlyString(stream); // scan all
-        stream.reset();
-        String v2 = toNodesOnlyString(stream); // scan all
-        assertEquals(v, v2);
-    }
-
-	@Test public void testDeepTree() throws Exception {
-		// ^(10 100 101 ^(20 ^(30 40 (50 (60 70)))) (80 90)))
-		// stream has 8 real + 10 nav nodes
-		int n = 9;
-		CommonTree[] nodes = new CommonTree[n];
-		for (int i=0; i< n; i++) {
-			nodes[i] = new CommonTree(new CommonToken((i+1)*10));
-		}
-		Tree g = nodes[0];
-		Tree rules = nodes[1];
-		Tree rule1 = nodes[2];
-		Tree id = nodes[3];
-		Tree block = nodes[4];
-		Tree alt = nodes[5];
-		Tree s = nodes[6];
-		Tree rule2 = nodes[7];
-		Tree id2 = nodes[8];
-		g.addChild(new CommonTree(new CommonToken(100)));
-		g.addChild(new CommonTree(new CommonToken(101)));
-		g.addChild(rules);
-		rules.addChild(rule1);
-		rule1.addChild(id);
-		rule1.addChild(block);
-		block.addChild(alt);
-		alt.addChild(s);
-		rules.addChild(rule2);
-		rule2.addChild(id2);
-
-		TreeNodeStream stream = newStream(g);
-		String expecting = " 10 2 100 101 20 2 30 2 40 50 2 60 2 70 3 3 3 80 2 90 3 3 3";
-		String found = toTokenTypeString(stream);
-		assertEquals(expecting, found);
-	}
-
-	public String toNodesOnlyString(TreeNodeStream nodes) {
-        TreeAdaptor adaptor = nodes.getTreeAdaptor();
-		StringBuffer buf = new StringBuffer();
-        Object o = nodes.LT(1);
-        int type = adaptor.getType(o);
-        while ( o!=null && type!=Token.EOF ) {
-			if ( !(type==Token.DOWN||type==Token.UP) ) {
-				buf.append(" ");
-				buf.append(type);
-			}
-            nodes.consume();
-            o = nodes.LT(1);
-            type = adaptor.getType(o);
-		}
-		return buf.toString();
-	}
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeParsing.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeParsing.java
deleted file mode 100644
index 9664dcb..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeParsing.java
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.junit.Test;
-
-public class TestTreeParsing extends BaseTest {
-	@Test public void testFlatList() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
-			"a : ID INT\n" +
-			"    {System.out.println($ID+\", \"+$INT);}\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("abc, 34\n", found);
-	}
-
-	@Test public void testSimpleTree() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT -> ^(ID INT);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
-			"a : ^(ID INT)\n" +
-			"    {System.out.println($ID+\", \"+$INT);}\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
-		assertEquals("abc, 34\n", found);
-	}
-
-	@Test public void testFlatVsTreeDecision() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : b c ;\n" +
-			"b : ID INT -> ^(ID INT);\n" +
-			"c : ID INT;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
-			"a : b b ;\n" +
-			"b : ID INT    {System.out.print($ID+\" \"+$INT);}\n" +
-			"  | ^(ID INT) {System.out.print(\"^(\"+$ID+\" \"+$INT+')');}\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 b 2");
-		assertEquals("^(a 1)b 2\n", found);
-	}
-
-	@Test public void testFlatVsTreeDecision2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : b c ;\n" +
-			"b : ID INT+ -> ^(ID INT+);\n" +
-			"c : ID INT+;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
-			"a : b b ;\n" +
-			"b : ID INT+    {System.out.print($ID+\" \"+$INT);}\n" +
-			"  | ^(x=ID (y=INT)+) {System.out.print(\"^(\"+$x+' '+$y+')');}\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a",
-				    "a 1 2 3 b 4 5");
-		assertEquals("^(a 3)b 5\n", found);
-	}
-
-	@Test public void testCyclicDFALookahead() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT+ PERIOD;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"SEMI : ';' ;\n"+
-			"PERIOD : '.' ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
-			"a : ID INT+ PERIOD {System.out.print(\"alt 1\");}"+
-			"  | ID INT+ SEMI   {System.out.print(\"alt 2\");}\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 2 3.");
-		assertEquals("alt 1\n", found);
-	}
-
-	@Test public void testTemplateOutput() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP;\n" +
-			"options {output=template; ASTLabelType=CommonTree;}\n" +
-			"s : a {System.out.println($a.st);};\n" +
-			"a : ID INT -> {new StringTemplate($INT.text)}\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
-		assertEquals("34\n", found);
-	}
-
-	@Test public void testNullableChildList() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT? -> ^(ID INT?);\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
-			"a : ^(ID INT?)\n" +
-			"    {System.out.println($ID);}\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
-		assertEquals("abc\n", found);
-	}
-
-	@Test public void testNullableChildList2() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : ID INT? SEMI -> ^(ID INT?) SEMI ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"SEMI : ';' ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
-			"a : ^(ID INT?) SEMI\n" +
-			"    {System.out.println($ID);}\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc;");
-		assertEquals("abc\n", found);
-	}
-
-	@Test public void testNullableChildList3() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : x=ID INT? (y=ID)? SEMI -> ^($x INT? $y?) SEMI ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"SEMI : ';' ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
-			"a : ^(ID INT? b) SEMI\n" +
-			"    {System.out.println($ID+\", \"+$b.text);}\n" +
-			"  ;\n"+
-			"b : ID? ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc def;");
-		assertEquals("abc, def\n", found);
-	}
-
-	@Test public void testActionsAfterRoot() throws Exception {
-		String grammar =
-			"grammar T;\n" +
-			"options {output=AST;}\n" +
-			"a : x=ID INT? SEMI -> ^($x INT?) ;\n" +
-			"ID : 'a'..'z'+ ;\n" +
-			"INT : '0'..'9'+;\n" +
-			"SEMI : ';' ;\n"+
-			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-		String treeGrammar =
-			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
-			"a @init {int x=0;} : ^(ID {x=1;} {x=2;} INT?)\n" +
-			"    {System.out.println($ID+\", \"+x);}\n" +
-			"  ;\n";
-
-		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-				    treeGrammar, "TP", "TLexer", "a", "a", "abc;");
-		assertEquals("abc, 2\n", found);
-	}
-
-    @Test public void testWildcardLookahead() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID '+'^ INT;\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "SEMI : ';' ;\n"+
-            "PERIOD : '.' ;\n"+
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
-            "a : ^('+' . INT) {System.out.print(\"alt 1\");}"+
-            "  ;\n";
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
-        assertEquals("alt 1\n", found);
-    }
-
-    @Test public void testWildcardLookahead2() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID '+'^ INT;\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "SEMI : ';' ;\n"+
-            "PERIOD : '.' ;\n"+
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
-            "a : ^('+' . INT) {System.out.print(\"alt 1\");}"+
-            "  | ^('+' . .)   {System.out.print(\"alt 2\");}\n" +
-            "  ;\n";
-
-        // AMBIG upon '+' DOWN INT UP etc.. but so what.
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
-        assertEquals("alt 1\n", found);
-    }
-
-    @Test public void testWildcardLookahead3() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID '+'^ INT;\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "SEMI : ';' ;\n"+
-            "PERIOD : '.' ;\n"+
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
-            "a : ^('+' ID INT) {System.out.print(\"alt 1\");}"+
-            "  | ^('+' . .)   {System.out.print(\"alt 2\");}\n" +
-            "  ;\n";
-
-        // AMBIG upon '+' DOWN INT UP etc.. but so what.
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
-        assertEquals("alt 1\n", found);
-    }
-
-    @Test public void testWildcardPlusLookahead() throws Exception {
-        String grammar =
-            "grammar T;\n" +
-            "options {output=AST;}\n" +
-            "a : ID '+'^ INT;\n" +
-            "ID : 'a'..'z'+ ;\n" +
-            "INT : '0'..'9'+;\n" +
-            "SEMI : ';' ;\n"+
-            "PERIOD : '.' ;\n"+
-            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
-
-        String treeGrammar =
-            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
-            "a : ^('+' INT INT ) {System.out.print(\"alt 1\");}"+
-            "  | ^('+' .+)   {System.out.print(\"alt 2\");}\n" +
-            "  ;\n";
-
-        // AMBIG upon '+' DOWN INT UP etc.. but so what.
-
-        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
-                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
-        assertEquals("alt 2\n", found);
-    }
-
-}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeWizard.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeWizard.java
deleted file mode 100644
index 3f2bd1b..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTreeWizard.java
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.runtime.tree.CommonTree;
-import org.antlr.runtime.tree.CommonTreeAdaptor;
-import org.antlr.runtime.tree.TreeAdaptor;
-import org.antlr.runtime.tree.TreeWizard;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-
-public class TestTreeWizard extends BaseTest {
-	protected static final String[] tokens =
-		new String[] {"", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR"};
-	protected static final TreeAdaptor adaptor = new CommonTreeAdaptor();
-
-	@Test public void testSingleNode() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("ID");
-		String found = t.toStringTree();
-		String expecting = "ID";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testSingleNodeWithArg() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("ID[foo]");
-		String found = t.toStringTree();
-		String expecting = "foo";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testSingleNodeTree() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A)");
-		String found = t.toStringTree();
-		String expecting = "A";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testSingleLevelTree() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C D)");
-		String found = t.toStringTree();
-		String expecting = "(A B C D)";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testListTree() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(nil A B C)");
-		String found = t.toStringTree();
-		String expecting = "A B C";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testInvalidListTree() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("A B C");
-		assertTrue(t==null);
-	}
-
-	@Test public void testDoubleLevelTree() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A (B C) (B D) E)");
-		String found = t.toStringTree();
-		String expecting = "(A (B C) (B D) E)";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testSingleNodeIndex() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("ID");
-		Map m = wiz.index(t);
-		String found = m.toString();
-		String expecting = "{10=[ID]}";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testNoRepeatsIndex() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C D)");
-		Map m = wiz.index(t);
-		String found = sortMapToString(m);
-        String expecting = "{5=[A], 6=[B], 7=[C], 8=[D]}";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRepeatsIndex() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
-		Map m = wiz.index(t);
-		String found =  sortMapToString(m);
-        String expecting = "{5=[A, A], 6=[B, B, B], 7=[C], 8=[D, D]}";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testNoRepeatsVisit() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C D)");
-		final List elements = new ArrayList();
-		wiz.visit(t, wiz.getTokenType("B"), new TreeWizard.Visitor() {
-			public void visit(Object t) {
-				elements.add(t);
-			}
-		});
-		String found = elements.toString();
-		String expecting = "[B]";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testNoRepeatsVisit2() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
-		final List elements = new ArrayList();
-		wiz.visit(t, wiz.getTokenType("C"),
-					   new TreeWizard.Visitor() {
-							public void visit(Object t) {
-								elements.add(t);
-							}
-					   });
-		String found = elements.toString();
-		String expecting = "[C]";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRepeatsVisit() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
-		final List elements = new ArrayList();
-		wiz.visit(t, wiz.getTokenType("B"),
-					   new TreeWizard.Visitor() {
-							public void visit(Object t) {
-								elements.add(t);
-							}
-					   });
-		String found = elements.toString();
-		String expecting = "[B, B, B]";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRepeatsVisit2() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
-		final List elements = new ArrayList();
-		wiz.visit(t, wiz.getTokenType("A"),
-					   new TreeWizard.Visitor() {
-							public void visit(Object t) {
-								elements.add(t);
-							}
-					   });
-		String found = elements.toString();
-		String expecting = "[A, A]";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRepeatsVisitWithContext() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
-		final List elements = new ArrayList();
-		wiz.visit(t, wiz.getTokenType("B"),
-		   new TreeWizard.ContextVisitor() {
-			   public void visit(Object t, Object parent, int childIndex, Map labels) {
-				   elements.add(adaptor.getText(t)+"@"+
-								(parent!=null?adaptor.getText(parent):"nil")+
-								"["+childIndex+"]");
-			   }
-		   });
-		String found = elements.toString();
-		String expecting = "[B@A[0], B@A[1], B@A[2]]";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testRepeatsVisitWithNullParentAndContext() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
-		final List elements = new ArrayList();
-		wiz.visit(t, wiz.getTokenType("A"),
-		   new TreeWizard.ContextVisitor() {
-			   public void visit(Object t, Object parent, int childIndex, Map labels) {
-				   elements.add(adaptor.getText(t)+"@"+
-								(parent!=null?adaptor.getText(parent):"nil")+
-								"["+childIndex+"]");
-			   }
-		   });
-		String found = elements.toString();
-		String expecting = "[A@nil[0], A@A[1]]";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testVisitPattern() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C (A B) D)");
-		final List elements = new ArrayList();
-		wiz.visit(t, "(A B)",
-					   new TreeWizard.Visitor() {
-							public void visit(Object t) {
-								elements.add(t);
-							}
-					   });
-		String found = elements.toString();
-		String expecting = "[A]"; // shouldn't match overall root, just (A B)
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testVisitPatternMultiple() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C (A B) (D (A B)))");
-		final List elements = new ArrayList();
-		wiz.visit(t, "(A B)",
-					   new TreeWizard.ContextVisitor() {
-						   public void visit(Object t, Object parent, int childIndex, Map labels) {
-							   elements.add(adaptor.getText(t)+"@"+
-											(parent!=null?adaptor.getText(parent):"nil")+
-											"["+childIndex+"]");
-						   }
-					   });
-		String found = elements.toString();
-		String expecting = "[A@A[2], A@D[0]]"; // shouldn't match overall root, just (A B)
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testVisitPatternMultipleWithLabels() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))");
-		final List elements = new ArrayList();
-		wiz.visit(t, "(%a:A %b:B)",
-					   new TreeWizard.ContextVisitor() {
-						   public void visit(Object t, Object parent, int childIndex, Map labels) {
-							   elements.add(adaptor.getText(t)+"@"+
-											(parent!=null?adaptor.getText(parent):"nil")+
-											"["+childIndex+"]"+labels.get("a")+"&"+labels.get("b"));
-						   }
-					   });
-		String found = elements.toString();
-		String expecting = "[foo@A[2]foo&bar, big@D[0]big&dog]";
-		assertEquals(expecting, found);
-	}
-
-	@Test public void testParse() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C)");
-		boolean valid = wiz.parse(t, "(A B C)");
-		assertTrue(valid);
-	}
-
-	@Test public void testParseSingleNode() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("A");
-		boolean valid = wiz.parse(t, "A");
-		assertTrue(valid);
-	}
-
-	@Test public void testParseFlatTree() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(nil A B C)");
-		boolean valid = wiz.parse(t, "(nil A B C)");
-		assertTrue(valid);
-	}
-
-	@Test public void testWildcard() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C)");
-		boolean valid = wiz.parse(t, "(A . .)");
-		assertTrue(valid);
-	}
-
-	@Test public void testParseWithText() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B[foo] C[bar])");
-		// C pattern has no text arg so despite [bar] in t, no need
-		// to match text--check structure only.
-		boolean valid = wiz.parse(t, "(A B[foo] C)");
-		assertTrue(valid);
-	}
-
-	@Test public void testParseWithText2() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B[T__32] (C (D E[a])))");
-		// C pattern has no text arg so despite [bar] in t, no need
-		// to match text--check structure only.
-		boolean valid = wiz.parse(t, "(A B[foo] C)");
-		assertEquals("(A T__32 (C (D a)))", t.toStringTree());
-	}
-
-	@Test public void testParseWithTextFails() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C)");
-		boolean valid = wiz.parse(t, "(A[foo] B C)");
-		assertTrue(!valid); // fails
-	}
-
-	@Test public void testParseLabels() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C)");
-		Map labels = new HashMap();
-		boolean valid = wiz.parse(t, "(%a:A %b:B %c:C)", labels);
-		assertTrue(valid);
-		assertEquals("A", labels.get("a").toString());
-		assertEquals("B", labels.get("b").toString());
-		assertEquals("C", labels.get("c").toString());
-	}
-
-	@Test public void testParseWithWildcardLabels() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C)");
-		Map labels = new HashMap();
-		boolean valid = wiz.parse(t, "(A %b:. %c:.)", labels);
-		assertTrue(valid);
-		assertEquals("B", labels.get("b").toString());
-		assertEquals("C", labels.get("c").toString());
-	}
-
-	@Test public void testParseLabelsAndTestText() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B[foo] C)");
-		Map labels = new HashMap();
-		boolean valid = wiz.parse(t, "(%a:A %b:B[foo] %c:C)", labels);
-		assertTrue(valid);
-		assertEquals("A", labels.get("a").toString());
-		assertEquals("foo", labels.get("b").toString());
-		assertEquals("C", labels.get("c").toString());
-	}
-
-	@Test public void testParseLabelsInNestedTree() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A (B C) (D E))");
-		Map labels = new HashMap();
-		boolean valid = wiz.parse(t, "(%a:A (%b:B %c:C) (%d:D %e:E) )", labels);
-		assertTrue(valid);
-		assertEquals("A", labels.get("a").toString());
-		assertEquals("B", labels.get("b").toString());
-		assertEquals("C", labels.get("c").toString());
-		assertEquals("D", labels.get("d").toString());
-		assertEquals("E", labels.get("e").toString());
-	}
-
-	@Test public void testEquals() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t1 = (CommonTree)wiz.create("(A B C)");
-		CommonTree t2 = (CommonTree)wiz.create("(A B C)");
-		boolean same = TreeWizard.equals(t1, t2, adaptor);
-		assertTrue(same);
-	}
-
-	@Test public void testEqualsWithText() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t1 = (CommonTree)wiz.create("(A B[foo] C)");
-		CommonTree t2 = (CommonTree)wiz.create("(A B[foo] C)");
-		boolean same = TreeWizard.equals(t1, t2, adaptor);
-		assertTrue(same);
-	}
-	
-	@Test public void testEqualsWithMismatchedText() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t1 = (CommonTree)wiz.create("(A B[foo] C)");
-		CommonTree t2 = (CommonTree)wiz.create("(A B C)");
-		boolean same = TreeWizard.equals(t1, t2, adaptor);
-		assertTrue(!same);
-	}
-
-	@Test public void testFindPattern() throws Exception {
-		TreeWizard wiz = new TreeWizard(adaptor, tokens);
-		CommonTree t = (CommonTree)wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))");
-		final List subtrees = wiz.find(t, "(A B)");
-		List elements = subtrees;
-		String found = elements.toString();
-		String expecting = "[foo, big]";
-		assertEquals(expecting, found);
-	}
-	
-}
\ No newline at end of file
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTrees.java b/antlr-3.4/tool/src/test/java/org/antlr/test/TestTrees.java
deleted file mode 100644
index ad3e479..0000000
--- a/antlr-3.4/tool/src/test/java/org/antlr/test/TestTrees.java
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * [The "BSD license"]
- *  Copyright (c) 2010 Terence Parr
- *  All rights reserved.
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *  1. Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-package org.antlr.test;
-
-import org.antlr.runtime.CommonToken;
-import org.antlr.runtime.Token;
-import org.antlr.runtime.tree.CommonTree;
-import org.antlr.runtime.tree.CommonTreeAdaptor;
-import org.antlr.runtime.tree.Tree;
-import org.antlr.runtime.tree.TreeAdaptor;
-import org.junit.Test;
-
-public class TestTrees extends BaseTest {
-	TreeAdaptor adaptor = new CommonTreeAdaptor();
-	protected boolean debug = false;
-
-	static class V extends CommonTree {
-		public int x;
-		public V(Token t) { this.token = t;}
-		public V(int ttype, int x) { this.x=x; token=new CommonToken(ttype); }
-		public V(int ttype, Token t, int x) { token=t; this.x=x;}
-		public String toString() { return (token!=null?token.getText():"")+"<V>";}
-	}
-
-	@Test public void testSingleNode() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(101));
-		assertNull(t.parent);
-		assertEquals(-1, t.childIndex);
-	}
-
-	@Test public void testTwoChildrenOfNilRoot() throws Exception {
-		CommonTree root_0 = (CommonTree)adaptor.nil();
-		CommonTree t = new V(101, 2);
-		CommonTree u = new V(new CommonToken(102,"102"));
-		adaptor.addChild(root_0, t);
-		adaptor.addChild(root_0, u);
-		assertNull(root_0.parent);
-		assertEquals(-1, root_0.childIndex);
-		assertEquals(0, t.childIndex);
-		assertEquals(1, u.childIndex);
-	}
-
-	@Test public void test4Nodes() throws Exception {
-		// ^(101 ^(102 103) 104)
-		CommonTree r0 = new CommonTree(new CommonToken(101));
-		r0.addChild(new CommonTree(new CommonToken(102)));
-		r0.getChild(0).addChild(new CommonTree(new CommonToken(103)));
-		r0.addChild(new CommonTree(new CommonToken(104)));
-
-		assertNull(r0.parent);
-		assertEquals(-1, r0.childIndex);
-	}
-
-	@Test public void testList() throws Exception {
-		// ^(nil 101 102 103)
-		CommonTree r0 = new CommonTree((Token)null);
-		CommonTree c0, c1, c2;
-		r0.addChild(c0=new CommonTree(new CommonToken(101)));
-		r0.addChild(c1=new CommonTree(new CommonToken(102)));
-		r0.addChild(c2=new CommonTree(new CommonToken(103)));
-
-		assertNull(r0.parent);
-		assertEquals(-1, r0.childIndex);
-		assertEquals(r0, c0.parent);
-		assertEquals(0, c0.childIndex);
-		assertEquals(r0, c1.parent);
-		assertEquals(1, c1.childIndex);		
-		assertEquals(r0, c2.parent);
-		assertEquals(2, c2.childIndex);
-	}
-
-	@Test public void testList2() throws Exception {
-		// Add child ^(nil 101 102 103) to root 5
-		// should pull 101 102 103 directly to become 5's child list
-		CommonTree root = new CommonTree(new CommonToken(5));
-
-		// child tree
-		CommonTree r0 = new CommonTree((Token)null);
-		CommonTree c0, c1, c2;
-		r0.addChild(c0=new CommonTree(new CommonToken(101)));
-		r0.addChild(c1=new CommonTree(new CommonToken(102)));
-		r0.addChild(c2=new CommonTree(new CommonToken(103)));
-
-		root.addChild(r0);
-
-		assertNull(root.parent);
-		assertEquals(-1, root.childIndex);
-		// check children of root all point at root
-		assertEquals(root, c0.parent);
-		assertEquals(0, c0.childIndex);
-		assertEquals(root, c0.parent);
-		assertEquals(1, c1.childIndex);
-		assertEquals(root, c0.parent);
-		assertEquals(2, c2.childIndex);
-	}
-
-	@Test public void testAddListToExistChildren() throws Exception {
-		// Add child ^(nil 101 102 103) to root ^(5 6)
-		// should add 101 102 103 to end of 5's child list
-		CommonTree root = new CommonTree(new CommonToken(5));
-		root.addChild(new CommonTree(new CommonToken(6)));
-
-		// child tree
-		CommonTree r0 = new CommonTree((Token)null);
-		CommonTree c0, c1, c2;
-		r0.addChild(c0=new CommonTree(new CommonToken(101)));
-		r0.addChild(c1=new CommonTree(new CommonToken(102)));
-		r0.addChild(c2=new CommonTree(new CommonToken(103)));
-
-		root.addChild(r0);
-
-		assertNull(root.parent);
-		assertEquals(-1, root.childIndex);
-		// check children of root all point at root
-		assertEquals(root, c0.parent);
-		assertEquals(1, c0.childIndex);
-		assertEquals(root, c0.parent);
-		assertEquals(2, c1.childIndex);
-		assertEquals(root, c0.parent);
-		assertEquals(3, c2.childIndex);
-	}
-
-	@Test public void testDupTree() throws Exception {
-		// ^(101 ^(102 103 ^(106 107) ) 104 105)
-		CommonTree r0 = new CommonTree(new CommonToken(101));
-		CommonTree r1 = new CommonTree(new CommonToken(102));
-		r0.addChild(r1);
-		r1.addChild(new CommonTree(new CommonToken(103)));
-		Tree r2 = new CommonTree(new CommonToken(106));
-		r2.addChild(new CommonTree(new CommonToken(107)));
-		r1.addChild(r2);
-		r0.addChild(new CommonTree(new CommonToken(104)));
-		r0.addChild(new CommonTree(new CommonToken(105)));
-
-		CommonTree dup = (CommonTree)(new CommonTreeAdaptor()).dupTree(r0);
-
-		assertNull(dup.parent);
-		assertEquals(-1, dup.childIndex);
-		dup.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testBecomeRoot() throws Exception {
-		// 5 becomes new root of ^(nil 101 102 103)
-		CommonTree newRoot = new CommonTree(new CommonToken(5));
-
-		CommonTree oldRoot = new CommonTree((Token)null);
-		oldRoot.addChild(new CommonTree(new CommonToken(101)));
-		oldRoot.addChild(new CommonTree(new CommonToken(102)));
-		oldRoot.addChild(new CommonTree(new CommonToken(103)));
-
-		TreeAdaptor adaptor = new CommonTreeAdaptor();
-		adaptor.becomeRoot(newRoot, oldRoot);
-		newRoot.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testBecomeRoot2() throws Exception {
-		// 5 becomes new root of ^(101 102 103)
-		CommonTree newRoot = new CommonTree(new CommonToken(5));
-
-		CommonTree oldRoot = new CommonTree(new CommonToken(101));
-		oldRoot.addChild(new CommonTree(new CommonToken(102)));
-		oldRoot.addChild(new CommonTree(new CommonToken(103)));
-
-		TreeAdaptor adaptor = new CommonTreeAdaptor();
-		adaptor.becomeRoot(newRoot, oldRoot);
-		newRoot.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testBecomeRoot3() throws Exception {
-		// ^(nil 5) becomes new root of ^(nil 101 102 103)
-		CommonTree newRoot = new CommonTree((Token)null);
-		newRoot.addChild(new CommonTree(new CommonToken(5)));
-
-		CommonTree oldRoot = new CommonTree((Token)null);
-		oldRoot.addChild(new CommonTree(new CommonToken(101)));
-		oldRoot.addChild(new CommonTree(new CommonToken(102)));
-		oldRoot.addChild(new CommonTree(new CommonToken(103)));
-
-		TreeAdaptor adaptor = new CommonTreeAdaptor();
-		adaptor.becomeRoot(newRoot, oldRoot);
-		newRoot.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testBecomeRoot5() throws Exception {
-		// ^(nil 5) becomes new root of ^(101 102 103)
-		CommonTree newRoot = new CommonTree((Token)null);
-		newRoot.addChild(new CommonTree(new CommonToken(5)));
-
-		CommonTree oldRoot = new CommonTree(new CommonToken(101));
-		oldRoot.addChild(new CommonTree(new CommonToken(102)));
-		oldRoot.addChild(new CommonTree(new CommonToken(103)));
-
-		TreeAdaptor adaptor = new CommonTreeAdaptor();
-		adaptor.becomeRoot(newRoot, oldRoot);
-		newRoot.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testBecomeRoot6() throws Exception {
-		// emulates construction of ^(5 6)
-		CommonTree root_0 = (CommonTree)adaptor.nil();
-		CommonTree root_1 = (CommonTree)adaptor.nil();
-		root_1 = (CommonTree)adaptor.becomeRoot(new CommonTree(new CommonToken(5)), root_1);
-
-		adaptor.addChild(root_1, new CommonTree(new CommonToken(6)));
-
-		adaptor.addChild(root_0, root_1);
-
-		root_0.sanityCheckParentAndChildIndexes();
-	}
-
-	// Test replaceChildren
-
-	@Test public void testReplaceWithNoChildren() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(101));
-		CommonTree newChild = new CommonTree(new CommonToken(5));
-		boolean error = false;
-		try {
-			t.replaceChildren(0, 0, newChild);
-		}
-		catch (IllegalArgumentException iae) {
-			error = true;
-		}
-		assertTrue(error);
-	}
-
-	@Test public void testReplaceWithOneChildren() throws Exception {
-		// assume token type 99 and use text
-		CommonTree t = new CommonTree(new CommonToken(99,"a"));
-		CommonTree c0 = new CommonTree(new CommonToken(99, "b"));
-		t.addChild(c0);
-
-		CommonTree newChild = new CommonTree(new CommonToken(99, "c"));
-		t.replaceChildren(0, 0, newChild);
-		String expecting = "(a c)";
-		assertEquals(expecting, t.toStringTree());
-		t.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testReplaceInMiddle() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(99, "a"));
-		t.addChild(new CommonTree(new CommonToken(99, "b")));
-		t.addChild(new CommonTree(new CommonToken(99, "c"))); // index 1
-		t.addChild(new CommonTree(new CommonToken(99, "d")));
-
-		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
-		t.replaceChildren(1, 1, newChild);
-		String expecting = "(a b x d)";
-		assertEquals(expecting, t.toStringTree());
-		t.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testReplaceAtLeft() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(99, "a"));
-		t.addChild(new CommonTree(new CommonToken(99, "b"))); // index 0
-		t.addChild(new CommonTree(new CommonToken(99, "c")));
-		t.addChild(new CommonTree(new CommonToken(99, "d")));
-
-		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
-		t.replaceChildren(0, 0, newChild);
-		String expecting = "(a x c d)";
-		assertEquals(expecting, t.toStringTree());
-		t.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testReplaceAtRight() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(99, "a"));
-		t.addChild(new CommonTree(new CommonToken(99, "b")));
-		t.addChild(new CommonTree(new CommonToken(99, "c")));
-		t.addChild(new CommonTree(new CommonToken(99, "d"))); // index 2
-
-		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
-		t.replaceChildren(2, 2, newChild);
-		String expecting = "(a b c x)";
-		assertEquals(expecting, t.toStringTree());
-		t.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testReplaceOneWithTwoAtLeft() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(99, "a"));
-		t.addChild(new CommonTree(new CommonToken(99, "b")));
-		t.addChild(new CommonTree(new CommonToken(99, "c")));
-		t.addChild(new CommonTree(new CommonToken(99, "d")));
-
-		CommonTree newChildren = (CommonTree)adaptor.nil();
-		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
-		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
-
-		t.replaceChildren(0, 0, newChildren);
-		String expecting = "(a x y c d)";
-		assertEquals(expecting, t.toStringTree());
-		t.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testReplaceOneWithTwoAtRight() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(99, "a"));
-		t.addChild(new CommonTree(new CommonToken(99, "b")));
-		t.addChild(new CommonTree(new CommonToken(99, "c")));
-		t.addChild(new CommonTree(new CommonToken(99, "d")));
-
-		CommonTree newChildren = (CommonTree)adaptor.nil();
-		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
-		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
-
-		t.replaceChildren(2, 2, newChildren);
-		String expecting = "(a b c x y)";
-		assertEquals(expecting, t.toStringTree());
-		t.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testReplaceOneWithTwoInMiddle() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(99, "a"));
-		t.addChild(new CommonTree(new CommonToken(99, "b")));
-		t.addChild(new CommonTree(new CommonToken(99, "c")));
-		t.addChild(new CommonTree(new CommonToken(99, "d")));
-
-		CommonTree newChildren = (CommonTree)adaptor.nil();
-		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
-		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
-
-		t.replaceChildren(1, 1, newChildren);
-		String expecting = "(a b x y d)";
-		assertEquals(expecting, t.toStringTree());
-		t.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testReplaceTwoWithOneAtLeft() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(99, "a"));
-		t.addChild(new CommonTree(new CommonToken(99, "b")));
-		t.addChild(new CommonTree(new CommonToken(99, "c")));
-		t.addChild(new CommonTree(new CommonToken(99, "d")));
-
-		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
-
-		t.replaceChildren(0, 1, newChild);
-		String expecting = "(a x d)";
-		assertEquals(expecting, t.toStringTree());
-		t.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testReplaceTwoWithOneAtRight() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(99, "a"));
-		t.addChild(new CommonTree(new CommonToken(99, "b")));
-		t.addChild(new CommonTree(new CommonToken(99, "c")));
-		t.addChild(new CommonTree(new CommonToken(99, "d")));
-
-		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
-
-		t.replaceChildren(1, 2, newChild);
-		String expecting = "(a b x)";
-		assertEquals(expecting, t.toStringTree());
-		t.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testReplaceAllWithOne() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(99, "a"));
-		t.addChild(new CommonTree(new CommonToken(99, "b")));
-		t.addChild(new CommonTree(new CommonToken(99, "c")));
-		t.addChild(new CommonTree(new CommonToken(99, "d")));
-
-		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
-
-		t.replaceChildren(0, 2, newChild);
-		String expecting = "(a x)";
-		assertEquals(expecting, t.toStringTree());
-		t.sanityCheckParentAndChildIndexes();
-	}
-
-	@Test public void testReplaceAllWithTwo() throws Exception {
-		CommonTree t = new CommonTree(new CommonToken(99, "a"));
-		t.addChild(new CommonTree(new CommonToken(99, "b")));
-		t.addChild(new CommonTree(new CommonToken(99, "c")));
-		t.addChild(new CommonTree(new CommonToken(99, "d")));
-
-		CommonTree newChildren = (CommonTree)adaptor.nil();
-		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
-		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
-
-		t.replaceChildren(0, 2, newChildren);
-		String expecting = "(a x y)";
-		assertEquals(expecting, t.toStringTree());
-		t.sanityCheckParentAndChildIndexes();
-	}
-}
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/Readme.txt b/antlr-ant/main/antlr3-task/Readme.txt
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/Readme.txt
rename to antlr-ant/main/antlr3-task/Readme.txt
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/antlr3-src/org/apache/tools/ant/antlr/ANTLR3.java b/antlr-ant/main/antlr3-task/antlr3-src/org/apache/tools/ant/antlr/ANTLR3.java
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/antlr3-src/org/apache/tools/ant/antlr/ANTLR3.java
rename to antlr-ant/main/antlr3-task/antlr3-src/org/apache/tools/ant/antlr/ANTLR3.java
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/antlr3-src/org/apache/tools/ant/antlr/antlib.xml b/antlr-ant/main/antlr3-task/antlr3-src/org/apache/tools/ant/antlr/antlib.xml
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/antlr3-src/org/apache/tools/ant/antlr/antlib.xml
rename to antlr-ant/main/antlr3-task/antlr3-src/org/apache/tools/ant/antlr/antlib.xml
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/antlr3-task.doc b/antlr-ant/main/antlr3-task/antlr3-task.doc
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/antlr3-task.doc
rename to antlr-ant/main/antlr3-task/antlr3-task.doc
Binary files differ
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/antlr3-task.htm b/antlr-ant/main/antlr3-task/antlr3-task.htm
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/antlr3-task.htm
rename to antlr-ant/main/antlr3-task/antlr3-task.htm
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/antlr3.jar b/antlr-ant/main/antlr3-task/antlr3.jar
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/antlr3.jar
rename to antlr-ant/main/antlr3-task/antlr3.jar
Binary files differ
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/composite-java_build.zip b/antlr-ant/main/antlr3-task/composite-java_build.zip
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/composite-java_build.zip
rename to antlr-ant/main/antlr3-task/composite-java_build.zip
Binary files differ
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/composite-netbeans.zip b/antlr-ant/main/antlr3-task/composite-netbeans.zip
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/composite-netbeans.zip
rename to antlr-ant/main/antlr3-task/composite-netbeans.zip
Binary files differ
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/d2u.zip b/antlr-ant/main/antlr3-task/d2u.zip
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/d2u.zip
rename to antlr-ant/main/antlr3-task/d2u.zip
Binary files differ
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/polydiff-netbeans.zip b/antlr-ant/main/antlr3-task/polydiff-netbeans.zip
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/polydiff-netbeans.zip
rename to antlr-ant/main/antlr3-task/polydiff-netbeans.zip
Binary files differ
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/polydiff_build.zip b/antlr-ant/main/antlr3-task/polydiff_build.zip
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/polydiff_build.zip
rename to antlr-ant/main/antlr3-task/polydiff_build.zip
Binary files differ
diff --git a/antlr-3.4/antlr-ant/main/antlr3-task/simplecTreeParser.zip b/antlr-ant/main/antlr3-task/simplecTreeParser.zip
similarity index 100%
rename from antlr-3.4/antlr-ant/main/antlr3-task/simplecTreeParser.zip
rename to antlr-ant/main/antlr3-task/simplecTreeParser.zip
Binary files differ
diff --git a/antlr-3.4/tool/antlr.config b/antlr-complete/antlr.config
similarity index 100%
rename from antlr-3.4/tool/antlr.config
rename to antlr-complete/antlr.config
diff --git a/antlr-complete/pom.xml b/antlr-complete/pom.xml
new file mode 100644
index 0000000..aad3c68
--- /dev/null
+++ b/antlr-complete/pom.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <artifactId>antlr-complete</artifactId>
+    <packaging>jar</packaging>
+
+    <name>ANTLR 3 Complete</name>
+    <description>Complete distribution for ANTLR 3</description>
+
+  <!--
+
+    Inherit from the ANTLR master pom, which tells us what
+    version we are and allows us to inherit dependencies
+    and so on.
+
+    -->
+    <parent>
+        <groupId>org.antlr</groupId>
+        <artifactId>antlr-master</artifactId>
+        <version>3.5.2</version>
+    </parent>
+
+    <url>http://antlr.org/</url>
+
+    <!--
+        The complete distribution includes the following modules and their dependencies:
+            ANTLR 3 Tool
+            ANTLR 3 Runtime
+            gUnit for ANTLR 3
+            StringTemplate 4 (dependency of code generator in the ANTLR 3 Tool)
+            StringTemplate 3 (dependency of grammars with output=template)
+            ANTLR 2.7.7 (dependency of template parser in StringTemplate 3)
+    -->
+    <dependencies>
+
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>antlr</artifactId>
+            <version>${project.version}</version>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>antlr-runtime</artifactId>
+            <version>${project.version}</version>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>gunit</artifactId>
+            <version>${project.version}</version>
+            <scope>compile</scope>
+        </dependency>
+
+    </dependencies>
+
+    <build>
+
+        <plugins>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-shade-plugin</artifactId>
+                <version>2.0</version>
+                <configuration>
+                    <minimizeJar>false</minimizeJar>
+                    <createSourcesJar>true</createSourcesJar>
+                    <filters>
+                        <filter>
+                            <artifact>org.antlr:antlr-complete</artifact>
+                            <includes>
+                                <include>META-INF/**</include>
+                            </includes>
+                        </filter>
+                    </filters>
+                    <transformers>
+                        <transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
+                            <mainClass>org.antlr.Tool</mainClass>
+                        </transformer>
+                    </transformers>
+                </configuration>
+                <executions>
+                    <execution>
+                        <id>complete-no-st3</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>shade</goal>
+                        </goals>
+                        <configuration>
+                            <createDependencyReducedPom>false</createDependencyReducedPom>
+                            <shadedArtifactAttached>true</shadedArtifactAttached>
+                            <shadedClassifierName>no-st3</shadedClassifierName>
+                            <filters>
+                                <filter>
+                                    <artifact>antlr:antlr</artifact>
+                                    <excludes>
+                                        <exclude>**</exclude>
+                                    </excludes>
+                                </filter>
+                                <filter>
+                                    <artifact>org.antlr:stringtemplate</artifact>
+                                    <excludes>
+                                        <exclude>**</exclude>
+                                    </excludes>
+                                </filter>
+                            </filters>
+                        </configuration>
+                    </execution>
+
+                    <execution>
+                        <id>complete</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>shade</goal>
+                        </goals>
+                        <configuration>
+                            <createDependencyReducedPom>false</createDependencyReducedPom>
+                            <shadedArtifactAttached>false</shadedArtifactAttached>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-javadoc-plugin</artifactId>
+                <configuration>
+                    <includeDependencySources>true</includeDependencySources>
+                </configuration>
+            </plugin>
+        </plugins>
+
+    </build>
+
+</project>
diff --git a/antlr-3.4/antlr3-maven-archetype/antlr.config b/antlr.config
similarity index 100%
copy from antlr-3.4/antlr3-maven-archetype/antlr.config
copy to antlr.config
diff --git a/antlr-3.4/antlr3-maven-archetype/antlr.config b/antlr3-maven-archetype/antlr.config
similarity index 100%
rename from antlr-3.4/antlr3-maven-archetype/antlr.config
rename to antlr3-maven-archetype/antlr.config
diff --git a/antlr3-maven-archetype/pom.xml b/antlr3-maven-archetype/pom.xml
new file mode 100644
index 0000000..c4b084b
--- /dev/null
+++ b/antlr3-maven-archetype/pom.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+    
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.antlr</groupId>
+    <artifactId>antlr3-maven-archetype</artifactId>
+    <packaging>maven-archetype</packaging>
+    <name>ANTLR 3 Maven Archetype</name>
+    <description>ANTLR 3 Maven Archetype</description>
+    
+
+    <!--
+
+    Inherit from the ANTLR master pom, which tells us what
+    version we are and allows us to inherit dependencies
+    and so on.
+    -->
+    <parent>
+        <groupId>org.antlr</groupId>
+        <artifactId>antlr-master</artifactId>
+        <version>3.5.2</version>
+    </parent>
+
+  <build>
+
+    <extensions>
+
+      <extension>
+        <groupId>org.apache.maven.archetype</groupId>
+        <artifactId>archetype-packaging</artifactId>
+        <version>2.2</version>
+      </extension>
+
+    </extensions>
+
+        <plugins>
+
+            <plugin>
+                <artifactId>maven-archetype-plugin</artifactId>
+                <version>2.2</version>
+                <extensions>true</extensions>
+            </plugin>
+
+        </plugins>
+
+  </build>
+
+</project>
diff --git a/antlr-3.4/antlr3-maven-archetype/src/main/resources/META-INF/maven/archetype-metadata.xml b/antlr3-maven-archetype/src/main/resources/META-INF/maven/archetype-metadata.xml
similarity index 100%
rename from antlr-3.4/antlr3-maven-archetype/src/main/resources/META-INF/maven/archetype-metadata.xml
rename to antlr3-maven-archetype/src/main/resources/META-INF/maven/archetype-metadata.xml
diff --git a/antlr-3.4/antlr3-maven-archetype/src/main/resources/META-INF/maven/archetype.xml b/antlr3-maven-archetype/src/main/resources/META-INF/maven/archetype.xml
similarity index 100%
rename from antlr-3.4/antlr3-maven-archetype/src/main/resources/META-INF/maven/archetype.xml
rename to antlr3-maven-archetype/src/main/resources/META-INF/maven/archetype.xml
diff --git a/antlr3-maven-archetype/src/main/resources/archetype-resources/pom.xml b/antlr3-maven-archetype/src/main/resources/archetype-resources/pom.xml
new file mode 100644
index 0000000..d0509c9
--- /dev/null
+++ b/antlr3-maven-archetype/src/main/resources/archetype-resources/pom.xml
@@ -0,0 +1,182 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <!-- =======================================================================
+         A quickstart pom.xml that creates a sample project that uses ANTLR 3.x
+         grammars. You should replace the sample grammars in src/main/antlr3
+         with your own grammar files and use packages.
+
+         A .g file in
+          
+            src/main/antlr3/com/temporalwave
+
+          belongs in the package
+          
+            com.temporalwave
+
+         See http://antlr.org/antlr3-maven-plugin for more details.
+
+         This project produces both a jar file of the project and an executeable
+         jar file that contains all the dependencies so you can run it standalone.
+         See below for more details.
+         
+         Archetype by Jim Idle (jimi@temporal-wave.com) - Oct 2009
+         Report bugs to the ANTLR interest list at http://www.antlr.org
+
+         Generated by antlr3-maven-archetype version 3.4.1-SNAPSHOT
+         =======================================================================
+      -->
+
+    <!-- This is your organizations normal group name
+         such as org.antlr
+         All the artifacts you create will be under this
+         group id.
+      -->
+    <groupId>${groupId}</groupId>
+
+    <!-- This is how maven knows your artifact
+      -->
+    <artifactId>${artifactId}</artifactId>
+
+    <!-- This is the human oriented name for the package
+         so you can call it anything you like
+      -->
+    <name>ANTLR3 project: ${package}</name>
+
+    <!-- This is the version of YOUR project -->
+    <version>${version}</version>
+
+    <packaging>jar</packaging>
+    <url>http://antlr.org</url>
+
+    <dependencies>
+
+        <!--
+          We need to have the ANTLR runtime jar when running and compiling.
+        -->
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>antlr-runtime</artifactId>
+            <version>3.4.1-SNAPSHOT</version>
+            <scope>compile</scope>
+        </dependency>
+
+    </dependencies>
+
+  <!--
+
+    Tell Maven which other artifacts we need in order to
+    build with the ANTLR Tool. Here we also make the default
+    goal be install so that you can just type mvn at the command
+    line instead of mvn install. And we add the java compiler plugin
+    for convenience to show how you can use 1.6 source files but
+    generate 1.4 compatible .class files (as few people seem to
+    know about the jsr14 target).
+    -->
+    <build>
+
+        <defaultGoal>install</defaultGoal>
+
+        <plugins>
+
+            <plugin>
+
+                <groupId>org.antlr</groupId>
+                <artifactId>antlr3-maven-plugin</artifactId>
+                <version>3.4.1-SNAPSHOT</version>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>antlr</goal>
+                        </goals>
+                    </execution>
+                </executions>
+
+            </plugin>
+
+            <!--
+              Strictly speaking, we did not need to generate this for you from
+              the prototype, but we use it to illustrate how you can get
+              the JDK 6 Java compiler to accept 1.5 or 1.6 targeted source code
+              but produce class files that are compatible with JRE 1.4. As
+              Michael Caine might say, "Not a lot of people know that!"
+              -->
+            <plugin>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <version>2.0.2</version>
+                <configuration>
+                    <source>1.6</source>
+                    <target>jsr14</target>
+                    <sourceDirectory>src</sourceDirectory>
+                </configuration>
+            </plugin>
+
+            <plugin>
+                
+                <!--
+
+                    Build an uber-jar that is packaged with all the other dependencies,
+                    such as the antlr-runtime and so on. This will be useful
+                    for developers, who then do not need to download anything else or
+                    remember that they need antlr.jar in their CLASSPATH and so
+                    on.
+
+                    You can delete this plugin of course and you will then
+                    get a jar file with only the code generated and included
+                    directly in this project. With this plugin though you will
+                    find that when you build with:
+
+                       mvn install
+
+                    There will be an executable jar generated. You can run this
+                    as:
+
+                      java -jar ${artifactId}-${version}-jar-with-dependencies.jar demosource.dmo
+
+                    assuming you have a file called demosource.dmo to attempt a parse.
+
+                  -->
+                <artifactId>maven-assembly-plugin</artifactId>
+
+                <configuration>
+                    <descriptorRefs>
+                        <descriptorRef>jar-with-dependencies</descriptorRef>
+                    </descriptorRefs>
+                    <!--
+
+                        Specify that we want the resulting jar to be executable
+                        via java -jar, which we do by modifying the manifest
+                        of course.
+                      -->
+                    <archive>
+
+                        <manifest>
+                            <mainClass>${package}.Main</mainClass>
+                        </manifest>
+                    </archive>
+
+                </configuration>
+
+                <!--
+
+                    We don't want to have to specifically ask for the uber jar, so we attach the
+                    running of this plugin to the execution of the package life-cycle
+                    phase.
+                  -->
+                <executions>
+                    <execution>
+                        <id>make-assembly</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>attached</goal>
+                        </goals>
+                    </execution>
+                </executions>
+
+            </plugin>
+
+        </plugins>
+    </build>
+
+</project>
diff --git a/antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TLexer.g b/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TLexer.g
similarity index 100%
rename from antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TLexer.g
rename to antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TLexer.g
diff --git a/antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TParser.g b/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TParser.g
similarity index 100%
rename from antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TParser.g
rename to antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TParser.g
diff --git a/antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TTree.g b/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TTree.g
similarity index 100%
rename from antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TTree.g
rename to antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/TTree.g
diff --git a/antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/imports/Ruleb.g b/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/imports/Ruleb.g
similarity index 100%
rename from antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/imports/Ruleb.g
rename to antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/antlr3/imports/Ruleb.g
diff --git a/antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/AbstractTLexer.java b/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/AbstractTLexer.java
similarity index 100%
rename from antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/AbstractTLexer.java
rename to antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/AbstractTLexer.java
diff --git a/antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/AbstractTParser.java b/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/AbstractTParser.java
similarity index 100%
rename from antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/AbstractTParser.java
rename to antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/AbstractTParser.java
diff --git a/antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/Main.java b/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/Main.java
similarity index 100%
rename from antlr-3.4/antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/Main.java
rename to antlr3-maven-archetype/src/main/resources/archetype-resources/src/main/java/Main.java
diff --git a/antlr-3.4/gunit/antlr.config b/antlr3-maven-plugin/antlr.config
similarity index 100%
copy from antlr-3.4/gunit/antlr.config
copy to antlr3-maven-plugin/antlr.config
diff --git a/antlr3-maven-plugin/pom.xml b/antlr3-maven-plugin/pom.xml
new file mode 100644
index 0000000..6fc1770
--- /dev/null
+++ b/antlr3-maven-plugin/pom.xml
@@ -0,0 +1,233 @@
+<!--
+
+ [The "BSD license"]
+
+ ANTLR        - Copyright (c) 2005-2010 Terence Parr
+ Maven Plugin - Copyright (c) 2009      Jim Idle
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  -->
+
+
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <!-- Maven model we are inheriting from
+      -->
+    <modelVersion>4.0.0</modelVersion>
+
+    <!--
+
+     Now that the ANTLR project has adopted Maven with a vengence,
+     all ANTLR tools will be grouped under org.antlr and will be
+     controlled by a project member.
+     -->
+    <groupId>org.antlr</groupId>
+
+
+    <!--
+
+     This is the ANTLR plugin for ANTLR version 3.1.3 and above. It might
+     have been best to change the name of the plugin as the 3.1.2 plugins
+     behave a little differently, however for the sake of one transitional
+     phase to a much better plugin, it was decided that the name should
+     remain the same.
+      -->
+    <artifactId>antlr3-maven-plugin</artifactId>
+    <packaging>maven-plugin</packaging>
+
+    <parent>
+        <groupId>org.antlr</groupId>
+        <artifactId>antlr-master</artifactId>
+        <version>3.5.2</version>
+    </parent>
+
+    <name>ANTLR 3 Maven plugin</name>
+    <prerequisites>
+        <maven>2.0</maven>
+    </prerequisites>
+
+    <!--
+     Where does our actual project live on the interwebs.
+      -->
+    <url>http://antlr.org</url>
+
+    <properties>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    </properties>
+
+    <description>
+
+This is the brand new, re-written from scratch plugin for ANTLR v3.
+
+Previous valiant efforts all suffered from being unable to modify the ANTLR Tool
+itself to provide support not just for Maven oriented things but any other tool
+that might wish to invoke ANTLR without resorting to the command line interface.
+
+Rather than try to shoe-horn new code into the existing Mojo (in fact I think that
+by incorporating a patch supplied by someone I ended up with tow versions of the
+Mojo, I elected to rewrite everything from scratch, including the documentation, so
+that we might end up with a perfect Mojo that can do everything that ANTLR v3 supports
+such as imported grammar processing, proper support for library directories and
+locating token files from generated sources, and so on.
+
+In the end I decided to also change the the ANTLR Tool.java code so that it
+would be the provider of all the things that a build tool needs, rather than
+delegating things to 5 different tools. So, things like dependencies, dependency
+sorting, option tracking, generating sources and so on are all folded back
+in to ANTLR's Tool.java code, where they belong, and they now provide a
+public interface to anyone that might want to interface with them.
+
+One other goal of this rewrite was to completely document the whole thing
+to death. Hence even this pom has more comments than funcitonal elements,
+in case I get run over by a bus or fall off a cliff while skiing.
+
+Jim Idle - March 2009
+
+    </description>
+
+    <developers>
+
+        <developer>
+            <name>Jim Idle</name>
+            <url>http://www.temporal-wave.com</url>
+            <roles>
+                <role>Originator, version 3.1.3</role>
+            </roles>
+        </developer>
+
+        <developer>
+            <name>Terence Parr</name>
+            <url>http://antlr.org/wiki/display/~admin/Home</url>
+            <roles>
+                <role>Project lead - ANTLR</role>
+            </roles>
+        </developer>
+
+        <developer>
+            <name>David Holroyd</name>
+            <url>http://david.holroyd.me.uk/</url>
+            <roles>
+                <role>Originator - prior version</role>
+            </roles>
+        </developer>
+
+        <developer>
+            <name>Kenny MacDermid</name>
+            <url>mailto:kenny "at" kmdconsulting.ca</url>
+            <roles>
+                <role>Contributor - prior versions</role>
+            </roles>
+        </developer>
+
+    </developers>
+
+    <!-- ============================================================================= -->
+
+    <!--
+
+     What are we depedent on for the Mojos to execute? We need the
+     plugin API itself and of course we need the ANTLR Tool and runtime
+     and any of their dependencies, which we inherit. The Tool itself provides
+     us with all the dependencies, so we need only name it here.
+      -->
+    <dependencies>
+
+        <!--
+          The things we need to build the target language recognizer
+          -->
+        <dependency>
+            <groupId>org.apache.maven</groupId>
+            <artifactId>maven-plugin-api</artifactId>
+            <version>2.0</version>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.maven</groupId>
+            <artifactId>maven-project</artifactId>
+            <version>2.0</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.codehaus.plexus</groupId>
+            <artifactId>plexus-compiler-api</artifactId>
+            <version>2.0</version>
+        </dependency>
+
+        <!--
+         The version of ANTLR tool that this version of the plugin controls.
+         We have decided that this should be in lockstep with ANTLR itself, other
+         than -1 -2 -3 etc patch releases.
+          -->
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>antlr</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.maven.shared</groupId>
+            <artifactId>maven-plugin-testing-harness</artifactId>
+            <version>1.1</version>
+            <scope>test</scope>
+        </dependency>
+        
+    </dependencies>
+    
+    <build>
+
+        <plugins>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-site-plugin</artifactId>
+                <version>3.3</version>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-project-info-reports-plugin</artifactId>
+                <version>2.7</version>
+                <configuration>
+                    <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
+                </configuration>
+            </plugin>
+
+        </plugins>
+
+    </build>
+
+    <reporting>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-plugin-plugin</artifactId>
+                <version>3.2</version>
+            </plugin>
+        </plugins>
+    </reporting>
+</project>
diff --git a/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3ErrorLog.java b/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3ErrorLog.java
new file mode 100644
index 0000000..5a315da
--- /dev/null
+++ b/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3ErrorLog.java
@@ -0,0 +1,90 @@
+/**
+ [The "BSD licence"]
+
+ ANTLR        - Copyright (c) 2005-2008 Terence Parr
+ Maven Plugin - Copyright (c) 2009      Jim Idle
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.mojo.antlr3;
+
+import org.antlr.tool.ANTLRErrorListener;
+import org.antlr.tool.Message;
+import org.antlr.tool.ToolMessage;
+import org.apache.maven.plugin.logging.Log;
+
+/**
+ * The Maven plexus container gives us a Log logging provider
+ * which we can use to install an error listener for the ANTLR
+ * tool to report errors by.
+ */
+public class Antlr3ErrorLog implements ANTLRErrorListener {
+
+    private Log log;
+
+    /**
+     * Instantiate an ANTLR ErrorListner that communicates any messages
+     * it receives to the Maven error sink.
+     *
+     * @param log The Maven Error Log
+     */
+    public Antlr3ErrorLog(Log log) {
+        this.log = log;
+    }
+
+    /**
+     * Sends an informational message to the Maven log sink.
+     * @param message The message to send to Maven
+     */
+    public void info(String message) {
+        log.info(message);
+    }
+
+    /**
+     * Sends an error message from ANTLR analysis to the Maven Log sink.
+     *
+     * @param message The message to send to Maven.
+     */
+    public void error(Message message) {
+        log.error(message.toString());
+    }
+
+    /**
+     * Sends a warning message to the Maven log sink.
+     *
+     * @param message
+     */
+    public void warning(Message message) {
+        log.warn(message.toString());
+    }
+
+    /**
+     * Sends an error message from the ANTLR tool to the Maven Log sink.
+     * @param toolMessage
+     */
+    public void error(ToolMessage toolMessage) {
+        log.error(toolMessage.toString());
+    }
+}
diff --git a/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3Mojo.java b/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3Mojo.java
new file mode 100644
index 0000000..6c18c55
--- /dev/null
+++ b/antlr3-maven-plugin/src/main/java/org/antlr/mojo/antlr3/Antlr3Mojo.java
@@ -0,0 +1,497 @@
+/**
+[The "BSD licence"]
+
+ANTLR        - Copyright (c) 2005-2008 Terence Parr
+Maven Plugin - Copyright (c) 2009      Jim Idle
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* ========================================================================
+ * This is the definitive ANTLR3 Mojo set. All other sets are belong to us.
+ */
+package org.antlr.mojo.antlr3;
+
+import org.apache.maven.plugin.AbstractMojo;
+import org.apache.maven.plugin.MojoExecutionException;
+import org.apache.maven.plugin.MojoFailureException;
+import org.apache.maven.project.MavenProject;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import org.antlr.Tool;
+import org.apache.maven.plugin.logging.Log;
+import org.codehaus.plexus.compiler.util.scan.InclusionScanException;
+import org.codehaus.plexus.compiler.util.scan.SimpleSourceInclusionScanner;
+import org.codehaus.plexus.compiler.util.scan.SourceInclusionScanner;
+import org.codehaus.plexus.compiler.util.scan.mapping.SourceMapping;
+import org.codehaus.plexus.compiler.util.scan.mapping.SuffixMapping;
+
+/**
+ * Parses ANTLR grammar files {@code *.g} and transforms them into Java source
+ * files.
+ *
+ * @goal antlr
+ * @phase generate-sources
+ * @requiresDependencyResolution compile
+ * @requiresProject true
+ * 
+ * @author <a href="mailto:jimi@temporal-wave.com">Jim Idle</a>
+ */
+public class Antlr3Mojo
+        extends AbstractMojo {
+
+    // First, let's deal with the options that the ANTLR tool itself
+    // can be configured by.
+    //
+    /**
+     * If set to true, then after the tool has processed an input grammar file
+     * it will report various statistics about the parser, such as information
+     * on cyclic DFAs, which rules may use backtracking, and so on.
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean report;
+    /**
+     * If set to true, then the ANTLR tool will print a version of the input
+     * grammar(s) which are stripped of any embedded actions.
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean printGrammar;
+    /**
+     * If set to true, then the code generated by the ANTLR code generator will
+     * be set to debug mode. This means that when run, the code will 'hang' and
+     * wait for a debug connection on a TCP port (49100 by default).
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean debug;
+    /**
+     * If set to true, then the generated parser will compute and report profile
+     * information at runtime.
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean profile;
+    /**
+     * If set to true, then the ANTLR tool will generate a description of the
+     * NFA for each rule in <a href="http://www.graphviz.org">Dot format</a>
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean nfa;
+    /**
+     * If set to true, then the ANTLR tool will generate a description of the
+     * DFA for each decision in the grammar in
+     * <a href="http://www.graphviz.org">Dot format</a>.
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean dfa;
+    /**
+     * If set to true, the generated parser code will log rule entry and exit
+     * points to stdout ({@link System#out} for the Java target) as an aid to
+     * debugging.
+     *
+     * @parameter default-value="false"
+     */
+    protected boolean trace;
+    /**
+     * If this parameter is set, it indicates that any warning or error messages
+     * returned by ANLTR, should be formatted in the specified way. Currently,
+     * ANTLR supports the built-in formats {@code antlr}, {@code gnu} and
+     * {@code vs2005}.
+     *
+     * @parameter default-value="antlr"
+     */
+    protected String messageFormat;
+    /**
+     * If set to true, then ANTLR will report verbose messages during the code
+     * generation process. This includes the names of files, the version of
+     * ANTLR, and more.
+     *
+     * @parameter default-value="true"
+     */
+    protected boolean verbose;
+
+    /**
+     * The maximum number of alternatives allowed in an inline switch statement.
+     * Beyond this, ANTLR will not generate a switch statement for the DFA.
+     *
+     * @parameter default-value="300"
+     */
+    private int maxSwitchCaseLabels;
+
+    /**
+     * The minimum number of alternatives for ANTLR to generate a switch
+     * statement. For decisions with fewer alternatives, an if/else if/else
+     * statement will be used instead.
+     *
+     * @parameter default-value="3"
+     */
+    private int minSwitchAlts;
+
+    /* --------------------------------------------------------------------
+     * The following are Maven specific parameters, rather than specific
+     * options that the ANTLR tool can use.
+     */
+
+    /**
+     * Provides an explicit list of all the grammars that should be included in
+     * the generate phase of the plugin. Note that the plugin is smart enough to
+     * realize that imported grammars should be included but not acted upon
+     * directly by the ANTLR Tool.
+     * <p>
+     * A set of Ant-like inclusion patterns used to select files from the source
+     * directory for processing. By default, the pattern <code>**&#47;*.g</code>
+     * is used to select grammar files.</p>
+     *
+     * @parameter
+     */
+    protected Set<String> includes = new HashSet<String>();
+    /**
+     * A set of Ant-like exclusion patterns used to prevent certain files from
+     * being processed. By default, this set is empty such that no files are
+     * excluded.
+     *
+     * @parameter
+     */
+    protected Set<String> excludes = new HashSet<String>();
+    /**
+     * The current Maven project.
+     *
+     * @parameter expression="${project}"
+     * @required
+     * @readonly
+     */
+    protected MavenProject project;
+    /**
+     * The directory where the ANTLR grammar files ({@code *.g}) are located.
+     *
+     * @parameter default-value="${basedir}/src/main/antlr3"
+     */
+    private File sourceDirectory;
+    /**
+     * The directory where the parser files generated by ANTLR will be stored.
+     * The directory will be registered as a compile source root of the project
+     * such that the generated files will participate in later build phases like
+     * compiling and packaging.
+     *
+     * @parameter default-value="${project.build.directory}/generated-sources/antlr3"
+     * @required
+     */
+    private File outputDirectory;
+    /**
+     * Location for imported token files, e.g. {@code *.tokens} and imported
+     * grammars. Note that ANTLR will not try to process grammars that it finds
+     * to be imported into other grammars (in the same processing session).
+     *
+     * @parameter default-value="${basedir}/src/main/antlr3/imports"
+     */
+    private File libDirectory;
+
+    public File getSourceDirectory() {
+        return sourceDirectory;
+    }
+
+    public File getOutputDirectory() {
+        return outputDirectory;
+    }
+
+    public File getLibDirectory() {
+        return libDirectory;
+    }
+
+    void addSourceRoot(File outputDir) {
+        project.addCompileSourceRoot(outputDir.getPath());
+    }
+    /**
+     * An instance of the ANTLR tool build.
+     */
+    protected Tool tool;
+
+    /**
+     * The main entry point for this Mojo, it is responsible for converting
+     * ANTLR 3.x grammars into the target language specified by the grammar.
+     *
+     * @throws MojoExecutionException if a configuration or grammar error causes
+     * the code generation process to fail
+     * @throws MojoFailureException if an instance of the ANTLR 3 {@link Tool}
+     * cannot be created
+     */
+    public void execute()
+            throws MojoExecutionException, MojoFailureException {
+
+        Log log = getLog();
+
+        // Check to see if the user asked for debug information, then dump all the
+        // parameters we have picked up if they did.
+        //
+        if (log.isDebugEnabled()) {
+
+            // Excludes
+            //
+            for (String e : excludes) {
+                log.debug("ANTLR: Exclude: " + e);
+            }
+
+            // Includes
+            //
+            for (String e : includes) {
+                log.debug("ANTLR: Include: " + e);
+            }
+
+            // Output location
+            //
+            log.debug("ANTLR: Output: " + outputDirectory);
+
+            // Library directory
+            //
+            log.debug("ANTLR: Library: " + libDirectory);
+
+            // Flags
+            //
+            log.debug("ANTLR: report              : " + report);
+            log.debug("ANTLR: printGrammar        : " + printGrammar);
+            log.debug("ANTLR: debug               : " + debug);
+            log.debug("ANTLR: profile             : " + profile);
+            log.debug("ANTLR: nfa                 : " + nfa);
+            log.debug("ANTLR: dfa                 : " + dfa);
+            log.debug("ANTLR: trace               : " + trace);
+            log.debug("ANTLR: messageFormat       : " + messageFormat);
+            log.debug("ANTLR: maxSwitchCaseLabels : " + maxSwitchCaseLabels);
+            log.debug("ANTLR: minSwitchAlts       : " + minSwitchAlts);
+            log.debug("ANTLR: verbose             : " + verbose);
+        }
+
+        // Ensure that the output directory path is all in tact so that
+        // ANTLR can just write into it.
+        //
+        File outputDir = getOutputDirectory();
+
+        if (!outputDir.exists()) {
+            outputDir.mkdirs();
+        }
+
+        // First thing we need is an instance of the ANTLR 3.1 build tool
+        //
+        try {
+            // ANTLR Tool buld interface
+            //
+            tool = new Tool();
+        } catch (Exception e) {
+            log.error("The attempt to create the ANTLR build tool failed, see exception report for details");
+
+            throw new MojoFailureException("Jim failed you!");
+        }
+
+        // Next we need to set the options given to us in the pom into the
+        // tool instance we have created.
+        //
+        tool.setDebug(debug);
+        tool.setGenerate_DFA_dot(dfa);
+        tool.setGenerate_NFA_dot(nfa);
+        tool.setProfile(profile);
+        tool.setReport(report);
+        tool.setPrintGrammar(printGrammar);
+        tool.setTrace(trace);
+        tool.setVerbose(verbose);
+        tool.setMessageFormat(messageFormat);
+        tool.setMaxSwitchCaseLabels(maxSwitchCaseLabels);
+        tool.setMinSwitchAlts(minSwitchAlts);
+
+        // Where do we want ANTLR to produce its output? (Base directory)
+        //
+        if (log.isDebugEnabled())
+        {
+            log.debug("Output directory base will be " + outputDirectory.getAbsolutePath());
+        }
+        tool.setOutputDirectory(outputDirectory.getAbsolutePath());
+
+        // Tell ANTLR that we always want the output files to be produced in the output directory
+        // using the same relative path as the input file was to the input directory.
+        //
+        tool.setForceRelativeOutput(true);
+
+        // Where do we want ANTLR to look for .tokens and import grammars?
+        //
+        tool.setLibDirectory(libDirectory.getAbsolutePath());
+
+        if (!sourceDirectory.exists()) {
+            if (log.isInfoEnabled()) {
+                log.info("No ANTLR grammars to compile in " + sourceDirectory.getAbsolutePath());
+            }
+            return;
+        } else {
+            if (log.isInfoEnabled()) {
+                log.info("ANTLR: Processing source directory " + sourceDirectory.getAbsolutePath());
+            }
+        }
+
+        // Set working directory for ANTLR to be the base source directory
+        //
+        tool.setInputDirectory(sourceDirectory.getAbsolutePath());
+
+        try {
+
+            // Now pick up all the files and process them with the Tool
+            //
+            processGrammarFiles(sourceDirectory, outputDirectory);
+
+        } catch (InclusionScanException ie) {
+
+            log.error(ie);
+            throw new MojoExecutionException("Fatal error occured while evaluating the names of the grammar files to analyze");
+
+        } catch (Exception e) {
+
+            getLog().error(e);
+            throw new MojoExecutionException(e.getMessage());
+        }
+
+
+
+        tool.process();
+
+        // If any of the grammar files caused errors but did nto throw exceptions
+        // then we should have accumulated errors in the counts
+        //
+        if (tool.getNumErrors() > 0) {
+            throw new MojoExecutionException("ANTLR caught " + tool.getNumErrors() + " build errors.");
+        }
+
+        // All looks good, so we need to tel Maven about the sources that
+        // we just created.
+        //
+        if (project != null) {
+            // Tell Maven that there are some new source files underneath
+            // the output directory.
+            //
+            addSourceRoot(this.getOutputDirectory());
+        }
+
+    }
+
+
+    /**
+     *
+     * @param sourceDirectory
+     * @param outputDirectory
+     * @throws IOException
+     * @throws InclusionScanException
+     */
+    private void processGrammarFiles(File sourceDirectory, File outputDirectory)
+            throws IOException, InclusionScanException {
+        // Which files under the source set should we be looking for as grammar files
+        //
+        SourceMapping mapping = new SuffixMapping("g", Collections.<String>emptySet());
+
+        // What are the sets of includes (defaulted or otherwise).
+        //
+        Set<String> includes = getIncludesPatterns();
+
+        // Now, to the excludes, we need to add the imports directory
+        // as this is autoscanned for importd grammars and so is auto-excluded from the
+        // set of gramamr fiels we shuold be analyzing.
+        //
+        excludes.add("imports/**");
+
+        SourceInclusionScanner scan = new SimpleSourceInclusionScanner(includes, excludes);
+
+        scan.addSourceMapping(mapping);
+        Set<File> grammarFiles = scan.getIncludedSources(sourceDirectory, null);
+
+        if (grammarFiles.isEmpty()) {
+            if (getLog().isInfoEnabled()) {
+                getLog().info("No grammars to process");
+            }
+        } else {
+
+            // Tell the ANTLR tool that we want sorted build mode
+            //
+            tool.setMake(true);
+            
+            // Iterate each grammar file we were given and add it into the tool's list of
+            // grammars to process.
+            //
+            for (File grammar : grammarFiles) {
+
+                if (getLog().isDebugEnabled()) {
+                    getLog().debug("Grammar file '" + grammar.getPath() + "' detected.");
+                }
+
+
+                String relPath = findSourceSubdir(sourceDirectory, grammar.getPath()) + grammar.getName();
+
+                if (getLog().isDebugEnabled()) {
+                    getLog().debug("  ... relative path is: " + relPath);
+                }
+                tool.addGrammarFile(relPath);
+
+            }
+
+        }
+
+
+    }
+
+    public Set<String> getIncludesPatterns() {
+        if (includes == null || includes.isEmpty()) {
+            return Collections.singleton("**/*.g");
+        }
+        return includes;
+    }
+
+    /**
+     * Given the source directory File object and the full PATH to a
+     * grammar, produce the path to the named grammar file in relative
+     * terms to the {@code sourceDirectory}. This will then allow ANTLR to
+     * produce output relative to the base of the output directory and
+     * reflect the input organization of the grammar files.
+     *
+     * @param sourceDirectory The source directory {@link File} object
+     * @param grammarFileName The full path to the input grammar file
+     * @return The path to the grammar file relative to the source directory
+     */
+    private String findSourceSubdir(File sourceDirectory, String grammarFileName) {
+        String srcPath = sourceDirectory.getPath() + File.separator;
+
+        if (!grammarFileName.startsWith(srcPath)) {
+            throw new IllegalArgumentException("expected " + grammarFileName + " to be prefixed with " + sourceDirectory);
+        }
+
+        File unprefixedGrammarFileName = new File(grammarFileName.substring(srcPath.length()));
+	if ( unprefixedGrammarFileName.getParent()!=null ) {
+	    return unprefixedGrammarFileName.getParent() + File.separator;
+        }
+	else {
+            return "";
+	}
+    }
+}
diff --git a/antlr3-maven-plugin/src/site/apt/examples/import.apt b/antlr3-maven-plugin/src/site/apt/examples/import.apt
new file mode 100644
index 0000000..befd508
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/apt/examples/import.apt
@@ -0,0 +1,8 @@
+Imported Grammar Files
+
+ In order to have the ANTLR plugin automatically locate and use grammars used
+ as imports in your main <<<.g>>> files, you need to place the imported grammar
+ files in the imports directory beneath the root directory of your grammar
+ files (which is <<<src/main/antlr3>>> by default of course).
+
+ For a default layout, place your import grammars in the directory: <<<src/main/antlr3/imports>>>
diff --git a/antlr3-maven-plugin/src/site/apt/examples/libraries.apt b/antlr3-maven-plugin/src/site/apt/examples/libraries.apt
new file mode 100644
index 0000000..822a9ac
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/apt/examples/libraries.apt
@@ -0,0 +1,46 @@
+Libraries
+
+ The introduction of the import directive in a grammar allows reuse of common grammar files
+ as well as the ability to divide up functional components of large grammars. However it has
+ caused some confusion in regard to the fact that generated vocabulary files (<<<*.tokens>>>) can also
+ be searched for with the <<<<libDirectory>>>> directive.
+
+ This has confused two separate functions and imposes a structure upon the layout of
+ your grammar files in certain cases. If you have grammars that both use the import
+ directive and also require the use of a vocabulary file then you will need to locate
+ the grammar that generates the <<<.tokens>>> file alongside the grammar that uses it. This
+ is because you will need to use the <<<<libDirectory>>>> directive to specify the
+ location of your imported grammars and ANTLR will not find any vocabulary files in
+ this directory.
+
+ The <<<.tokens>>> files for any grammars are generated within the same output directory structure
+ as the <<<.java>>> files. So, wherever the <<<.java>>> files are generated, you will also find the <<<.tokens>>>
+ files. ANTLR looks for <<<.tokens>>> files in both the <<<<libDirectory>>>> and the output directory
+ where it is placing the generated <<<.java>>> files. Hence when you locate the grammars that generate
+ <<<.tokens>>> files in the same source directory as the ones that use the <<<.tokens>>> files, then
+ the Maven plugin will find the expected <<<.tokens>>> files.
+
+ The <<<<libDirectory>>>> is specified like any other directory parameter in Maven. Here is an
+ example:
+
++--
+<plugin>
+    <groupId>org.antlr</groupId>
+    <artifactId>antlr3-maven-plugin</artifactId>
+    <version>${plugin.version}</version>
+
+    <executions>
+        <execution>
+            <configuration>
+                <goals>
+                    <goal>antlr</goal>
+                </goals>
+                <libDirectory>src/main/antlr_imports</libDirectory>
+            </configuration>
+        </execution>
+    </executions>
+</plugin>
++--
+
+
+
diff --git a/antlr3-maven-plugin/src/site/apt/examples/simple.apt b/antlr3-maven-plugin/src/site/apt/examples/simple.apt
new file mode 100644
index 0000000..ed1918b
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/apt/examples/simple.apt
@@ -0,0 +1,40 @@
+Simple configuration
+
+ If your grammar files are organized into the default locations as described in the {{{../index.html}introduction}},
+ then configuring the <<<pom.xml>>> file for your project is as simple as adding this to it
+
++--
+<plugins>
+<plugin>
+    <groupId>org.antlr</groupId>
+    <artifactId>antlr3-maven-plugin</artifactId>
+    <version>3.1.3-1</version>
+    <executions>
+        <execution>
+            <goals>
+                <goal>antlr</goal>
+            </goals>
+        </execution>
+    </executions>
+</plugin>
+...
+</plugins>
++--
+
+ When the <<<mvn>>> command is executed all grammar files under <<<src/main/antlr3>>>, except any
+ import grammars under <<<src/main/antlr3/imports>>> will be analyzed and converted to
+ Java source code in the output directory <<<target/generated-sources/antlr3>>>.
+
+ Your input files under <<<antlr3>>> should be stored in sub directories that
+ reflect the package structure of your java parsers. If your grammar file <<<parser.g>>> contains:
+
++---
+@header {
+package org.jimi.themuss;
+}
++---
+
+ Then the <<<.g>>> file should be stored in: <<<src/main/antlr3/org/jimi/themuss/parser.g>>>. This way
+ the generated <<<.java>>> files will correctly reflect the package structure in which they will
+ finally rest as classes.
+
diff --git a/antlr3-maven-plugin/src/site/apt/faq.apt.vm b/antlr3-maven-plugin/src/site/apt/faq.apt.vm
new file mode 100644
index 0000000..d6360a1
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/apt/faq.apt.vm
@@ -0,0 +1 @@
+FAQ
diff --git a/antlr3-maven-plugin/src/site/apt/index.apt b/antlr3-maven-plugin/src/site/apt/index.apt
new file mode 100644
index 0000000..2b57f13
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/apt/index.apt
@@ -0,0 +1,62 @@
+         -------------
+         ANTLR v3 Maven Plugin
+         -------------
+         Jim Idle
+         -------------
+         March 2009
+         -------------
+
+ANTLR v3 Maven plugin
+
+ The ANTLR v3 Maven plugin is completely re-written as of version 3.1.3; if you are familiar
+ with prior versions, you should note that there are some behavioral differences that make
+ it worthwhile reading this documentation. 
+
+ The job of the plugin is essentially to tell the standard ANTLR parser generator where the
+ input grammar files are and where the output files should be generated. As with all Maven
+ plugins, there are defaults, which you are advised to comply to, but are not forced to
+ comply to.
+
+ This version of the plugin allows full control over ANTLR and allows configuration of all
+ options that are useful for a build system. The code required to calculate dependencies,
+ check the build order, and otherwise work with your grammar files is built into the ANTLR
+ tool as of version 3.1.3 of ANTLR and this plugin.
+
+* Plugin Versioning
+
+ The plugin version tracks the version of the ANTLR tool that it controls. Hence if you
+ use version 3.1.3 of the plugin, you will build your grammars using version 3.1.3 of the
+ ANTLR tool, version 3.2 of the plugin will use version 3.2 of the ANTLR tool and so on.
+
+ You may also find that there are patch versions of the plugin such as 3.1.3-1 3.1.3-2 and
+ so on. Use the latest patch release of the plugin.
+
+ The current version of the plugin is shown at the top of this page after the <<Last Deployed>> date.
+ 
+
+* Default directories
+
+ As with all Maven plugins, this plugin will automatically default to standard locations
+ for your grammar and import files. Organizing your source code to reflect this standard
+ layout will greatly reduce the configuration effort required. The standard layout lookd
+ like this:
+
++--
+ src/main/
+      |
+      +--- antlr3/... .g files organized in the required package structure
+             |
+             +--- imports/  .g files that are imported by other grammars.
++--
+
+ If your grammar is intended to be part of a package called <<<org.foo.bar>>> then you would
+ place it in the directory <<<src/main/antlr3/org/foo/bar>>>. The plugin will then produce
+ <<<.java>>> and <<<.tokens>>> files in the output directory <<<target/generated-sources/antlr3/org/foo/bar>>>
+ When the Java files are compiled they will be in the correct location for the Javac
+ compiler without any special configuration. The generated java files are automatically
+ submitted for compilation by the plugin.
+
+ The <<<src/main/antlr3/imports>>> directory is treated in a special way. It should contain
+ any grammar files that are imported by other grammar files (do not make subdirectories here.)
+ Such files are never built on their own, but the plugin will automatically tell the ANTLR
+ tool to look in this directory for library files.
diff --git a/antlr3-maven-plugin/src/site/apt/usage.apt.vm b/antlr3-maven-plugin/src/site/apt/usage.apt.vm
new file mode 100644
index 0000000..1b28890
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/apt/usage.apt.vm
@@ -0,0 +1,59 @@
+Usage
+
+ The ANTLR 3 plugin for Maven can generate parsers for any number of grammars in
+ your project.
+
+* Compiling Grammars into Parsers
+
+ By default, the <<<{{{./antlr-mojo.html}antlr}}>>> goal will search for grammar
+ files in the directory <<<$\{basedir\}/src/main/antlr3>>> and any additional
+ <<<.tokens>>> files in the directory <<<$\{basedir\}/src/main/antlr3/imports>>>.
+ This can be configured to search other directories using the plugin configuration
+ parameters as described in the <<<{{{./antlr-mojo.html}antlr}}>>> goal
+ documentation.
+
+ The following figure shows the expected layout of files for the default
+ configuration of this plugin.
+
++--
+ src/main/
+      |
+      +--- antlr3/...       .g files organized in the required package structure
+             |
+             +--- imports/  user-created .tokens files and .g files that are imported by other grammars
++--
+
+ The next step is to configure your POM to call the plugin. The goals will
+ normally run during the generate-sources phase of the build. Examples of how to
+ configure your POM can be found on the various examples pages, reachable via
+ the page menu. If you stick with the default values, the snippet below will
+ suffice:
+
++--
+<project>
+  ...
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.antlr</groupId>
+        <artifactId>antlr3-maven-plugin</artifactId>
+        <version>${project.version}</version>
+        <executions>
+          <execution>
+            <id>antlr</id>
+            <goals>
+              <goal>antlr</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+    ...
+  </build>
+  ...
+</project>
++--
+
+ Note that you can create multiple executions, and thus build some grammars with
+ different options to others (such as setting the <<<debug>>> option for
+ instance).
diff --git a/antlr3-maven-plugin/src/site/site.xml b/antlr3-maven-plugin/src/site/site.xml
new file mode 100644
index 0000000..92bcea3
--- /dev/null
+++ b/antlr3-maven-plugin/src/site/site.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<project name="ANTLR v3 Maven plugin">
+
+  <publishDate position="left"/>
+  <version position="left"/>
+
+  <poweredBy>
+    <logo name="ANTLR Web Site" href="http://antlr3.org/"
+          img="http://www.antlr.org/wiki/download/attachments/292/ANTLR3"/>
+  </poweredBy>
+
+  <body>
+    <links>
+      <item name="Antlr Web Site" href="http://www.antlr3.org/"/>
+    </links>
+
+    <menu name="Overview">
+      <item name="Introduction" href="index.html"/>
+      <item name="Goals" href="plugin-info.html"/>
+      <item name="Usage" href="usage.html"/>
+      <item name="FAQ" href="faq.html"/>
+    </menu>
+
+    <menu name="Examples">
+      <item name="Simple configurations" href="examples/simple.html"/>
+      <item name="Using library directories" href="examples/libraries.html"/>
+      <item name="Using imported grammars" href="examples/import.html"/>
+    </menu>
+
+    <menu ref="reports" />
+    <menu ref="modules" />
+
+  </body>
+</project>
diff --git a/contributors.txt b/contributors.txt
new file mode 100644
index 0000000..c5d133d
--- /dev/null
+++ b/contributors.txt
@@ -0,0 +1,59 @@
+ANTLR Project Contributors Certification of Origin and Rights
+
+All contributors to ANTLR v3 must formally agree to abide by this
+certificate of origin by signing on the bottom with their github
+userid, full name, email address (you can obscure your e-mail, but it
+must be computable by human), and date.
+
+By signing this agreement, you are warranting and representing that
+you have the right to release code contributions or other content free
+of any obligations to third parties and are granting Terence Parr and
+ANTLR project contributors, henceforth referred to as The ANTLR
+Project, a license to incorporate it into The ANTLR Project tools
+(such as ANTLRWorks and StringTemplate) or related works under the BSD
+license. You understand that The ANTLR Project may or may not
+incorporate your contribution and you warrant and represent the
+following:
+
+1. I am the creator of all my contributions. I am the author of all
+   contributed work submitted and further warrant and represent that
+   such work is my original creation and I have the right to license
+   it to The ANTLR Project for release under the 3-clause BSD
+   license. I hereby grant The ANTLR Project a nonexclusive,
+   irrevocable, royalty-free, worldwide license to reproduce,
+   distribute, prepare derivative works, and otherwise use this
+   contribution as part of the ANTLR project, associated
+   documentation, books, and tools at no cost to The ANTLR Project.
+
+2. I have the right to submit. This submission does not violate the
+   rights of any person or entity and that I have legal authority over
+   this submission and to make this certification.
+
+3. If I violate another's rights, liability lies with me. I agree to
+   defend, indemnify, and hold The ANTLR Project and ANTLR users
+   harmless from any claim or demand, including reasonable attorney
+   fees, made by any third party due to or arising out of my violation
+   of these terms and conditions or my violation of the rights of
+   another person or entity.
+
+4. I understand and agree that this project and the contribution are
+   public and that a record of the contribution (including all
+   personal information I submit with it, including my sign-off) is
+   maintained indefinitely and may be redistributed consistent with
+   this project or the open source license indicated in the file.
+
+I have read this agreement and do so certify by adding my signoff to
+the end of the following contributors list.
+
+CONTRIBUTORS:
+
+YYYY/MM/DD, github id, Full name, email
+2013/04/17, ibre5041, Ivan Brezina, ibre5041@ibrezina.net
+2013/02/19, murrayju, Justin Murray, murrayju@addpcs.com
+2012/07/12, parrt, Terence Parr, parrt@antlr.org
+2012/08/08, Zannick, Benjamin S Wolf, jokeserver@gmail.com
+2012/09/15, martint, Martin Traverso, mtraverso@gmail.com
+2012/09/16, qmx, Douglas Campos, qmx@qmx.me
+2012/09/17, ksgokul, Gokulakannan Somasundaram, gokul007@gmail.com
+2012/11/22, sharwell, Sam Harwell, sam@tunnelvisionlabs.com
+2012/09/24, mike-lischke, Mike Lischke, mike@lischke-online.de
diff --git a/antlr-3.4/gunit/antlr.config b/gunit-maven-plugin/antlr.config
similarity index 100%
copy from antlr-3.4/gunit/antlr.config
copy to gunit-maven-plugin/antlr.config
diff --git a/gunit-maven-plugin/pom.xml b/gunit-maven-plugin/pom.xml
new file mode 100644
index 0000000..1a50e34
--- /dev/null
+++ b/gunit-maven-plugin/pom.xml
@@ -0,0 +1,143 @@
+<!--
+
+ [The "BSD license"]
+
+ ANTLR        - Copyright (c) 2005-2010 Terence Parr
+ Maven Plugin - Copyright (c) 2009      Jim Idle
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <prerequisites>
+        <maven>2.0</maven>
+    </prerequisites>
+
+    <groupId>org.antlr</groupId>
+    <artifactId>maven-gunit-plugin</artifactId>
+    <packaging>maven-plugin</packaging>
+
+    <name>ANTLR 3 gUnit Maven plugin</name>
+	<description>A Maven plugin for incorporating gUnit testing of grammars</description>
+    <url>http://antlr.org</url>
+
+    <!--
+
+    Inherit from the ANTLR master pom, which tells us what
+    version we are and allows us to inherit dependencies
+    and so on.
+    -->
+    <parent>
+        <groupId>org.antlr</groupId>
+        <artifactId>antlr-master</artifactId>
+        <version>3.5.2</version>
+    </parent>
+
+    <properties>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    </properties>
+
+    <!--
+
+     What are we depedent on for the Mojos to execute? We need the
+     plugin API itself and of course we need the ANTLR Tool and runtime
+     and any of their dependencies, which we inherit. The Tool itself provides
+     us with all the dependencies, so we need only name it here.
+      -->
+    <dependencies>
+
+        <!--
+          The things we need to build the target language recognizer
+          -->
+        <dependency>
+            <groupId>org.apache.maven</groupId>
+            <artifactId>maven-plugin-api</artifactId>
+            <version>2.0</version>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.maven</groupId>
+            <artifactId>maven-project</artifactId>
+            <version>2.0</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.codehaus.plexus</groupId>
+            <artifactId>plexus-compiler-api</artifactId>
+            <version>2.0</version>
+        </dependency>
+
+        <!--
+         The version of ANTLR tool that this version of the plugin controls.
+         We have decided that this should be in lockstep with ANTLR itself, other
+         than -1 -2 -3 etc patch releases.
+          -->
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>antlr</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <!--
+         Dependency on the gUnit artifact.
+        -->
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>gunit</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.maven.shared</groupId>
+            <artifactId>maven-plugin-testing-harness</artifactId>
+            <version>1.1</version>
+            <scope>test</scope>
+        </dependency>
+        
+    </dependencies>
+    
+    <build>
+
+        <plugins>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-project-info-reports-plugin</artifactId>
+                <version>2.6</version>
+                <configuration>
+                    <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
+                </configuration>
+            </plugin>
+
+        </plugins>
+
+    </build>
+
+</project>
diff --git a/gunit-maven-plugin/src/main/java/org/antlr/mojo/antlr3/GUnitExecuteMojo.java b/gunit-maven-plugin/src/main/java/org/antlr/mojo/antlr3/GUnitExecuteMojo.java
new file mode 100644
index 0000000..a7e2317
--- /dev/null
+++ b/gunit-maven-plugin/src/main/java/org/antlr/mojo/antlr3/GUnitExecuteMojo.java
@@ -0,0 +1,397 @@
+package org.antlr.mojo.antlr3;
+
+import java.util.List;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.io.File;
+import java.io.IOException;
+import java.io.Writer;
+import java.io.FileWriter;
+import java.io.BufferedWriter;
+import java.net.URL;
+import java.net.MalformedURLException;
+import java.net.URLClassLoader;
+
+import org.apache.maven.plugin.AbstractMojo;
+import org.apache.maven.plugin.MojoExecutionException;
+import org.apache.maven.plugin.MojoFailureException;
+import org.apache.maven.project.MavenProject;
+import org.apache.maven.artifact.Artifact;
+import org.apache.maven.artifact.DependencyResolutionRequiredException;
+import org.apache.maven.artifact.versioning.ArtifactVersion;
+import org.apache.maven.artifact.versioning.DefaultArtifactVersion;
+import org.apache.maven.artifact.versioning.OverConstrainedVersionException;
+import org.codehaus.plexus.util.StringUtils;
+import org.codehaus.plexus.util.FileUtils;
+import org.codehaus.plexus.compiler.util.scan.mapping.SourceMapping;
+import org.codehaus.plexus.compiler.util.scan.mapping.SuffixMapping;
+import org.codehaus.plexus.compiler.util.scan.SourceInclusionScanner;
+import org.codehaus.plexus.compiler.util.scan.SimpleSourceInclusionScanner;
+import org.codehaus.plexus.compiler.util.scan.InclusionScanException;
+import org.antlr.runtime.ANTLRFileStream;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.gunit.GrammarInfo;
+import org.antlr.gunit.gUnitExecutor;
+import org.antlr.gunit.AbstractTest;
+import org.antlr.gunit.Interp;
+
+/**
+ * Takes gUnit scripts and directly performs testing.
+ *
+ * @goal gunit
+ *
+ * @phase test
+ * @requiresDependencyResolution test
+ * @requiresProject true
+ *
+ * @author Steve Ebersole
+ */
+public class GUnitExecuteMojo extends AbstractMojo {
+	public static final String ANTLR_GROUP_ID = "org.antlr";
+	public static final String ANTLR_ARTIFACT_NAME = "antlr";
+	public static final String ANTLR_RUNTIME_ARTIFACT_NAME = "antlr-runtime";
+
+	/**
+     * INTERNAL : The Maven Project to which we are attached
+     *
+     * @parameter expression="${project}"
+     * @required
+     */
+    private MavenProject project;
+
+	/**
+	 * INTERNAL : The artifacts associated to the dependencies defined as part
+	 * of our configuration within the project to which we are being attached.
+	 *
+	 * @parameter expression="${plugin.artifacts}"
+     * @required
+     * @readonly
+	 */
+	private List<Artifact> pluginArtifacts;
+
+	/**
+     * Specifies the directory containing the gUnit testing files.
+     *
+     * @parameter expression="${basedir}/src/test/gunit"
+     * @required
+     */
+    private File sourceDirectory;
+
+    /**
+     * A set of patterns for matching files from the sourceDirectory that
+     * should be included as gUnit source files.
+     *
+     * @parameter
+     */
+    private Set<String> includes;
+
+    /**
+     * A set of exclude patterns.
+     *
+     * @parameter
+     */
+    private Set<String> excludes;
+
+	/**
+     * Specifies directory to which gUnit reports should get written.
+     *
+     * @parameter expression="${basedir}/target/gunit-report"
+     * @required
+     */
+    private File reportDirectory;
+
+	/**
+	 * Should gUnit functionality be completely by-passed?
+	 * <p>
+	 * By default we skip gUnit tests if the user requested that all testing be skipped using 'maven.test.skip'</p>
+	 *
+	 * @parameter expression="${maven.test.skip}"
+	 */
+	private boolean skip;
+
+	public Set<String> getIncludePatterns() {
+		return includes == null || includes.isEmpty()
+				? Collections.singleton( "**/*.testsuite" )
+				: includes;
+	}
+
+	public Set<String> getExcludePatterns() {
+		return excludes == null
+				? Collections.<String>emptySet()
+				: excludes;
+	}
+
+
+	public final void execute() throws MojoExecutionException, MojoFailureException {
+		if ( skip ) {
+			getLog().info( "Skipping gUnit processing" );
+			return;
+		}
+		Artifact pluginAntlrArtifact = determinePluginAntlrArtifact();
+
+		validateProjectsAntlrVersion( determineArtifactVersion( pluginAntlrArtifact ) );
+
+		performExecution( determineProjectCompileScopeClassLoader( pluginAntlrArtifact ) );
+	}
+
+	private Artifact determinePluginAntlrArtifact() throws MojoExecutionException {
+		for ( Artifact artifact : pluginArtifacts ) {
+			boolean match = ANTLR_GROUP_ID.equals( artifact.getGroupId() )
+					&& ANTLR_ARTIFACT_NAME.equals( artifact.getArtifactId() );
+			if ( match ) {
+				return artifact;
+			}
+		}
+		throw new MojoExecutionException(
+				"Unexpected state : could not locate " + ANTLR_GROUP_ID + ':' + ANTLR_ARTIFACT_NAME +
+						" in plugin dependencies"
+		);
+	}
+
+	private ArtifactVersion determineArtifactVersion(Artifact artifact) throws MojoExecutionException {
+		try {
+			return artifact.getVersion() != null
+					? new DefaultArtifactVersion( artifact.getVersion() )
+					: artifact.getSelectedVersion();
+		}
+		catch ( OverConstrainedVersionException e ) {
+			throw new MojoExecutionException( "artifact [" + artifact.getId() + "] defined an overly constrained version range" );
+		}
+	}
+
+	private void validateProjectsAntlrVersion(ArtifactVersion pluginAntlrVersion) throws MojoExecutionException {
+		Artifact antlrArtifact = null;
+		Artifact antlrRuntimeArtifact = null;
+
+		if ( project.getCompileArtifacts() != null ) {
+			for ( Object o : project.getCompileArtifacts() ) {
+				final Artifact artifact = ( Artifact ) o;
+				if ( ANTLR_GROUP_ID.equals( artifact.getGroupId() ) ) {
+					if ( ANTLR_ARTIFACT_NAME.equals( artifact.getArtifactId() ) ) {
+						antlrArtifact = artifact;
+						break;
+					}
+					if ( ANTLR_RUNTIME_ARTIFACT_NAME.equals( artifact.getArtifactId() ) ) {
+						antlrRuntimeArtifact = artifact;
+					}
+				}
+			}
+		}
+
+		validateBuildTimeArtifact( antlrArtifact, pluginAntlrVersion );
+		validateRunTimeArtifact( antlrRuntimeArtifact, pluginAntlrVersion );
+	}
+
+	@SuppressWarnings(value = "unchecked")
+	protected void validateBuildTimeArtifact(Artifact antlrArtifact, ArtifactVersion pluginAntlrVersion)
+			throws MojoExecutionException {
+		if ( antlrArtifact == null ) {
+			validateMissingBuildtimeArtifact();
+			return;
+		}
+
+		// otherwise, lets make sure they match...
+		ArtifactVersion projectAntlrVersion = determineArtifactVersion( antlrArtifact );
+		if ( pluginAntlrVersion.compareTo( projectAntlrVersion ) != 0 ) {
+			getLog().warn(
+					"Encountered " + ANTLR_GROUP_ID + ':' + ANTLR_ARTIFACT_NAME + ':' + projectAntlrVersion.toString() +
+							" which did not match Antlr version used by plugin [" + pluginAntlrVersion.toString() + "]"
+			);
+		}
+	}
+
+	protected void validateMissingBuildtimeArtifact() {
+		// generally speaking, its ok for the project to not define a dep on the build-time artifact...
+	}
+
+	@SuppressWarnings(value = "unchecked")
+	protected void validateRunTimeArtifact(Artifact antlrRuntimeArtifact, ArtifactVersion pluginAntlrVersion)
+			throws MojoExecutionException {
+		if ( antlrRuntimeArtifact == null ) {
+			// its possible, if the project instead depends on the build-time (or full) artifact.
+			return;
+		}
+
+		ArtifactVersion projectAntlrVersion = determineArtifactVersion( antlrRuntimeArtifact );
+		if ( pluginAntlrVersion.compareTo( projectAntlrVersion ) != 0 ) {
+			getLog().warn(
+					"Encountered " + ANTLR_GROUP_ID + ':' + ANTLR_RUNTIME_ARTIFACT_NAME + ':' + projectAntlrVersion.toString() +
+							" which did not match Antlr version used by plugin [" + pluginAntlrVersion.toString() + "]"
+			);
+		}
+	}
+
+	/**
+	 * Builds the classloader to pass to gUnit.
+	 *
+	 * @param antlrArtifact The plugin's (our) Antlr dependency artifact.
+	 *
+	 * @return The classloader for gUnit to use
+	 *
+	 * @throws MojoExecutionException Problem resolving artifacts to {@link java.net.URL urls}.
+	 */
+	private ClassLoader determineProjectCompileScopeClassLoader(Artifact antlrArtifact)
+			throws MojoExecutionException {
+		ArrayList<URL> classPathUrls = new ArrayList<URL>();
+		getLog().info( "Adding Antlr artifact : " + antlrArtifact.getId() );
+		classPathUrls.add( resolveLocalURL( antlrArtifact ) );
+
+		for ( String path : classpathElements() ) {
+			try {
+				getLog().info( "Adding project compile classpath element : " + path );
+				classPathUrls.add( new File( path ).toURI().toURL() );
+			}
+			catch ( MalformedURLException e ) {
+				throw new MojoExecutionException( "Unable to build path URL [" + path + "]" );
+			}
+		}
+
+		return new URLClassLoader( classPathUrls.toArray( new URL[classPathUrls.size()] ), getClass().getClassLoader() );
+	}
+
+	protected static URL resolveLocalURL(Artifact artifact) throws MojoExecutionException {
+		try {
+			return artifact.getFile().toURI().toURL();
+		}
+		catch ( MalformedURLException e ) {
+			throw new MojoExecutionException( "Unable to resolve artifact url : " + artifact.getId(), e );
+		}
+	}
+
+	@SuppressWarnings( "unchecked" )
+	private List<String> classpathElements() throws MojoExecutionException {
+		try {
+			// todo : should we combine both compile and test scoped elements?
+			return ( List<String> ) project.getTestClasspathElements();
+		}
+		catch ( DependencyResolutionRequiredException e ) {
+			throw new MojoExecutionException( "Call to Project#getCompileClasspathElements required dependency resolution" );
+		}
+	}
+
+	private void performExecution(ClassLoader projectCompileScopeClassLoader) throws MojoExecutionException {
+		getLog().info( "gUnit report directory : " + reportDirectory.getAbsolutePath() );
+		if ( !reportDirectory.exists() ) {
+			boolean directoryCreated = reportDirectory.mkdirs();
+			if ( !directoryCreated ) {
+				getLog().warn( "mkdirs() reported problem creating report directory" );
+			}
+		}
+
+		Result runningResults = new Result();
+		ArrayList<String> failureNames = new ArrayList<String>();
+
+		System.out.println();
+		System.out.println( "-----------------------------------------------------------" );
+		System.out.println( " G U N I T   R E S U L T S" );
+		System.out.println( "-----------------------------------------------------------" );
+
+		for ( File script : collectIncludedSourceGrammars() ) {
+			final String scriptPath = script.getAbsolutePath();
+			System.out.println( "Executing script " + scriptPath );
+			try {
+				String scriptBaseName = StringUtils.chompLast( FileUtils.basename( script.getName() ), "." );
+
+				ANTLRFileStream antlrStream = new ANTLRFileStream( scriptPath );
+				GrammarInfo grammarInfo = Interp.parse( antlrStream );
+				gUnitExecutor executor = new gUnitExecutor(
+						grammarInfo,
+						projectCompileScopeClassLoader,
+						script.getParentFile().getAbsolutePath()
+				);
+
+				String report = executor.execTest();
+				writeReportFile( new File( reportDirectory, scriptBaseName + ".txt" ), report );
+
+				Result testResult = new Result();
+				testResult.tests = executor.numOfTest;
+				testResult.failures = executor.numOfFailure;
+				testResult.invalids = executor.numOfInvalidInput;
+
+				System.out.println( testResult.render() );
+
+				runningResults.add( testResult );
+				for ( AbstractTest test : executor.failures ) {
+					failureNames.add( scriptBaseName + "#" + test.getHeader() );
+				}
+			}
+			catch ( IOException e ) {
+				throw new MojoExecutionException( "Could not open specified script file", e );
+			}
+			catch ( RecognitionException e ) {
+				throw new MojoExecutionException( "Could not parse gUnit script", e );
+			}
+		}
+
+		System.out.println();
+		System.out.println( "Summary :" );
+		if ( ! failureNames.isEmpty() ) {
+			System.out.println( "  Found " + failureNames.size() + " failures" );
+			for ( String name : failureNames ) {
+				System.out.println( "    - " + name );
+			}
+		}
+		System.out.println( runningResults.render() );
+		System.out.println();
+
+		if ( runningResults.failures > 0 ) {
+			throw new MojoExecutionException( "Found gUnit test failures" );
+		}
+
+		if ( runningResults.invalids > 0 ) {
+			throw new MojoExecutionException( "Found invalid gUnit tests" );
+		}
+	}
+
+	private Set<File> collectIncludedSourceGrammars() throws MojoExecutionException {
+		SourceMapping mapping = new SuffixMapping( "g", Collections.<String>emptySet() );
+        SourceInclusionScanner scan = new SimpleSourceInclusionScanner( getIncludePatterns(), getExcludePatterns() );
+        scan.addSourceMapping( mapping );
+		try {
+			return scan.getIncludedSources( sourceDirectory, null );
+		}
+		catch ( InclusionScanException e ) {
+			throw new MojoExecutionException( "Error determining gUnit sources", e );
+		}
+	}
+
+	private void writeReportFile(File reportFile, String results) {
+		try {
+			Writer writer = new FileWriter( reportFile );
+			writer = new BufferedWriter( writer );
+			try {
+				writer.write( results );
+				writer.flush();
+			}
+			finally {
+				try {
+					writer.close();
+				}
+				catch ( IOException ignore ) {
+				}
+			}
+		}
+		catch ( IOException e ) {
+			getLog().warn(  "Error writing gUnit report file", e );
+		}
+	}
+
+	private static class Result {
+		private int tests = 0;
+		private int failures = 0;
+		private int invalids = 0;
+
+		public String render() {
+			return String.format( "Tests run: %d,  Failures: %d,  Invalid: %d", tests, failures, invalids );
+		}
+
+		public void add(Result result) {
+			this.tests += result.tests;
+			this.failures += result.failures;
+			this.invalids += result.invalids;
+		}
+	}
+
+}
diff --git a/antlr-3.4/gunit/CHANGES.txt b/gunit/CHANGES.txt
similarity index 100%
rename from antlr-3.4/gunit/CHANGES.txt
rename to gunit/CHANGES.txt
diff --git a/antlr-3.4/gunit/LICENSE.txt b/gunit/LICENSE.txt
similarity index 100%
rename from antlr-3.4/gunit/LICENSE.txt
rename to gunit/LICENSE.txt
diff --git a/antlr-3.4/gunit/README.txt b/gunit/README.txt
similarity index 100%
rename from antlr-3.4/gunit/README.txt
rename to gunit/README.txt
diff --git a/antlr-3.4/gunit/antlr.config b/gunit/antlr.config
similarity index 100%
rename from antlr-3.4/gunit/antlr.config
rename to gunit/antlr.config
diff --git a/gunit/pom.xml b/gunit/pom.xml
new file mode 100644
index 0000000..8a8da9e
--- /dev/null
+++ b/gunit/pom.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <artifactId>gunit</artifactId>
+    <packaging>jar</packaging>
+  
+    <name>ANTLR 3 gUnit</name>
+    <description>gUnit grammar testing tool for ANTLR 3</description>
+
+  <!--
+
+    Inherit from the ANTLR master pom, which tells us what
+    version we are and allows us to inherit dependencies
+    and so on.
+
+    -->
+    <parent>
+        <groupId>org.antlr</groupId>
+        <artifactId>antlr-master</artifactId>
+        <version>3.5.2</version>
+    </parent>
+
+    <url>http://www.antlr.org/wiki/display/ANTLR3/gUnit+-+Grammar+Unit+Testing</url>
+
+  <!--
+
+    Tell Maven which other artifacts we need in order to
+    build, run and test the ANTLR Tool. The ANTLR Tool uses earlier versions
+    of ANTLR at runtime (for the moment), uses the current
+    released version of ANTLR String template, but obviously is
+    reliant on the latest snapshot of the runtime, which will either be
+    taken from the antlr-snapshot repository, or your local .m2
+    repository if you built and installed that locally.
+
+    -->
+    <dependencies>
+
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>antlr</artifactId>
+            <version>${project.version}</version>
+            <scope>compile</scope>
+            
+        </dependency>
+        
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>stringtemplate</artifactId>
+            <scope>compile</scope>
+        </dependency>
+
+    </dependencies>
+
+    <build>
+
+        <plugins>
+
+            <plugin>
+                <groupId>org.antlr</groupId>
+                <artifactId>antlr3-maven-plugin</artifactId>
+                <version>${project.version}</version>
+                <configuration />
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>antlr</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+
+        </plugins>
+
+    </build>
+
+</project>
diff --git a/antlr-3.4/gunit/src/main/antlr3/org/antlr/gunit/gUnit.g b/gunit/src/main/antlr3/org/antlr/gunit/gUnit.g
similarity index 100%
rename from antlr-3.4/gunit/src/main/antlr3/org/antlr/gunit/gUnit.g
rename to gunit/src/main/antlr3/org/antlr/gunit/gUnit.g
diff --git a/antlr-3.4/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/ANTLRv3.g b/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/ANTLRv3.g
similarity index 100%
rename from antlr-3.4/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/ANTLRv3.g
rename to gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/ANTLRv3.g
diff --git a/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/StGUnit.g b/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/StGUnit.g
new file mode 100644
index 0000000..e645f62
--- /dev/null
+++ b/gunit/src/main/antlr3/org/antlr/gunit/swingui/parsers/StGUnit.g
@@ -0,0 +1,213 @@
+/*
+[The "BSD licence"]
+Copyright (c) 2007-2008 Leon Jen-Yuan Su
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+grammar StGUnit;
+
+options {language=Java;}
+
+tokens {
+	OK = 'OK';
+	FAIL = 'FAIL';
+	DOC_COMMENT;
+}
+
+@header {
+package org.antlr.gunit.swingui.parsers;
+import org.antlr.gunit.swingui.model.*;
+import org.antlr.gunit.swingui.runner.*;
+}
+
+@lexer::header {package org.antlr.gunit.swingui.parsers;}
+
+@members {
+public TestSuiteAdapter adapter ;;
+}
+
+gUnitDef
+	:	'gunit' name=id {adapter.setGrammarName($name.text);}
+	    ('walks' id)? ';' 
+		header? suite*
+	;
+
+header
+	:	'@header' ACTION
+	;
+		
+suite
+	:	(	parserRule=RULE_REF ('walks' RULE_REF)? 
+	        {adapter.startRule($parserRule.text);}
+		|	lexerRule=TOKEN_REF 
+			{adapter.startRule($lexerRule.text);}
+		)
+		':'
+		test+
+		{adapter.endRule();}
+	;
+
+test
+	:	input expect
+		{adapter.addTestCase($input.in, $expect.out);}
+	;
+	
+expect returns [ITestCaseOutput out]
+	:	OK			{$out = TestSuiteAdapter.createBoolOutput(true);}
+	|	FAIL		{$out = TestSuiteAdapter.createBoolOutput(false);}
+	|	'returns' RETVAL {$out = TestSuiteAdapter.createReturnOutput($RETVAL.text);}
+	|	'->' output {$out = TestSuiteAdapter.createStdOutput($output.text);}
+	|	'->' AST	{$out = TestSuiteAdapter.createAstOutput($AST.text);}
+	;
+
+input returns [ITestCaseInput in]
+	:	STRING 		{$in = TestSuiteAdapter.createStringInput($STRING.text);}
+	|	ML_STRING	{$in = TestSuiteAdapter.createMultiInput($ML_STRING.text);}
+	|	fileInput	{$in = TestSuiteAdapter.createFileInput($fileInput.path);}
+	;
+
+output
+	:	STRING
+	|	ML_STRING
+	|	ACTION
+	;
+	
+fileInput returns [String path]
+	:	id {$path = $id.text;} (EXT {$path += $EXT.text;})? 
+	;
+
+id 	:	TOKEN_REF
+	|	RULE_REF
+	;
+
+// L E X I C A L   R U L E S
+
+SL_COMMENT
+ 	:	'//' ~('\r'|'\n')* '\r'? '\n' {$channel=HIDDEN;}
+	;
+
+ML_COMMENT
+	:	'/*' {$channel=HIDDEN;} .* '*/'
+	;
+
+STRING
+	:	'"' ( ESC | ~('\\'|'"') )* '"'
+	;
+
+ML_STRING
+	:	'<<' .* '>>' 
+	;
+
+TOKEN_REF
+	:	'A'..'Z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+RULE_REF
+	:	'a'..'z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+EXT	:	'.'('a'..'z'|'A'..'Z'|'0'..'9')+;
+
+RETVAL	:	NESTED_RETVAL
+	;
+
+fragment
+NESTED_RETVAL :
+	'['
+	(	options {greedy=false;}
+	:	NESTED_RETVAL
+	|	.
+	)*
+	']'
+	;
+
+AST	:	NESTED_AST (' '? NESTED_AST)*;
+
+fragment
+NESTED_AST :
+	'('
+	(	options {greedy=false;}
+	:	NESTED_AST
+	|	.
+	)*
+	')'
+	;
+
+ACTION
+	:	NESTED_ACTION
+	;
+
+fragment
+NESTED_ACTION :
+	'{'
+	(	options {greedy=false; k=3;}
+	:	NESTED_ACTION
+	|	STRING_LITERAL
+	|	CHAR_LITERAL
+	|	.
+	)*
+	'}'
+	;
+
+fragment
+CHAR_LITERAL
+	:	'\'' ( ESC | ~('\''|'\\') ) '\''
+	;
+
+fragment
+STRING_LITERAL
+	:	'"' ( ESC | ~('\\'|'"') )* '"'
+	;
+
+fragment
+ESC	:	'\\'
+		(	'n'
+		|	'r'
+		|	't'
+		|	'b'
+		|	'f'
+		|	'"'
+		|	'\''
+		|	'\\'
+		|	'>'
+		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
+		|	. // unknown, leave as it is
+		)
+	;
+	
+fragment
+XDIGIT :
+		'0' .. '9'
+	|	'a' .. 'f'
+	|	'A' .. 'F'
+	;
+
+WS	:	(	' '
+		|	'\t'
+		|	'\r'? '\n'
+		)+
+		{$channel=HIDDEN;}
+	;
diff --git a/gunit/src/main/java/org/antlr/gunit/AbstractTest.java b/gunit/src/main/java/org/antlr/gunit/AbstractTest.java
new file mode 100644
index 0000000..9d2c8ec
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/AbstractTest.java
@@ -0,0 +1,86 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2007 Kenny MacDermid
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit;
+
+public abstract class AbstractTest implements ITestCase {
+	// store essential individual test result for string template
+	protected String header;
+	protected String actual;
+	
+	protected boolean hasErrorMsg;
+	
+	private String testedRuleName;
+	private int testCaseIndex;
+	
+	// TODO: remove these. They're only used as part of a refactor to keep the
+	//       code cleaner. It is a mock-instanceOf() replacement.
+	public abstract int getType();
+	public abstract String getText();
+	
+	public abstract String getExpected();
+	// return an escaped string of the expected result
+	public String getExpectedResult() {
+		String expected = getExpected();
+		if ( expected!=null ) expected = JUnitCodeGen.escapeForJava(expected);
+		return expected;
+	}
+	public abstract String getResult(gUnitTestResult testResult);
+	public String getHeader() { return this.header; }
+	public String getActual() { return this.actual; }
+	// return an escaped string of the actual result
+	public String getActualResult() {
+		String actual = getActual();
+		// there is no need to escape the error message from ANTLR 
+		if ( actual!=null && !hasErrorMsg ) actual = JUnitCodeGen.escapeForJava(actual);
+		return actual;
+	}
+	
+	public String getTestedRuleName() { return this.testedRuleName; }
+	public int getTestCaseIndex() { return this.testCaseIndex; }
+	
+	public void setHeader(String rule, String lexicalRule, String treeRule, int numOfTest, int line, String input) {
+		StringBuffer buf = new StringBuffer();
+		buf.append("test" + numOfTest + " (");
+		if ( treeRule!=null ) {
+			buf.append(treeRule+" walks ");
+		}
+		if ( lexicalRule!=null ) {
+			buf.append(lexicalRule + ", line"+line+")" + " - ");
+		}
+		else buf.append(rule + ", line"+line+")" + " - ");
+		buf.append( "\"" );
+		buf.append( input );
+		buf.append( "\"" );
+		this.header = buf.toString();
+	}
+	public void setActual(String actual) { this.actual = actual; }
+	
+	public void setTestedRuleName(String testedRuleName) { this.testedRuleName = testedRuleName; }
+	public void setTestCaseIndex(int testCaseIndex) { this.testCaseIndex = testCaseIndex; }
+	
+}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/BooleanTest.java b/gunit/src/main/java/org/antlr/gunit/BooleanTest.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/BooleanTest.java
rename to gunit/src/main/java/org/antlr/gunit/BooleanTest.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/GrammarInfo.java b/gunit/src/main/java/org/antlr/gunit/GrammarInfo.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/GrammarInfo.java
rename to gunit/src/main/java/org/antlr/gunit/GrammarInfo.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/ITestCase.java b/gunit/src/main/java/org/antlr/gunit/ITestCase.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/ITestCase.java
rename to gunit/src/main/java/org/antlr/gunit/ITestCase.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/ITestSuite.java b/gunit/src/main/java/org/antlr/gunit/ITestSuite.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/ITestSuite.java
rename to gunit/src/main/java/org/antlr/gunit/ITestSuite.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/Interp.java b/gunit/src/main/java/org/antlr/gunit/Interp.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/Interp.java
rename to gunit/src/main/java/org/antlr/gunit/Interp.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/InvalidInputException.java b/gunit/src/main/java/org/antlr/gunit/InvalidInputException.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/InvalidInputException.java
rename to gunit/src/main/java/org/antlr/gunit/InvalidInputException.java
diff --git a/gunit/src/main/java/org/antlr/gunit/JUnitCodeGen.java b/gunit/src/main/java/org/antlr/gunit/JUnitCodeGen.java
new file mode 100644
index 0000000..ed5282e
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/JUnitCodeGen.java
@@ -0,0 +1,407 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Leon Jen-Yuan Su
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit;
+
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.stringtemplate.StringTemplateGroupLoader;
+import org.antlr.stringtemplate.CommonGroupLoader;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+
+import java.io.*;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.ConsoleHandler;
+import java.util.logging.Handler;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+public class JUnitCodeGen {
+    public GrammarInfo grammarInfo;
+    public Map<String, String> ruleWithReturn;
+    private final String testsuiteDir;
+    private String outputDirectoryPath = ".";
+
+    private final static Handler console = new ConsoleHandler();
+    private static final Logger logger = Logger.getLogger(JUnitCodeGen.class.getName());
+    static {
+        logger.addHandler(console);
+    }
+
+    public JUnitCodeGen(GrammarInfo grammarInfo, String testsuiteDir) throws ClassNotFoundException {
+        this( grammarInfo, determineClassLoader(), testsuiteDir);
+    }
+
+    private static ClassLoader determineClassLoader() {
+        ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+        if ( classLoader == null ) {
+            classLoader = JUnitCodeGen.class.getClassLoader();
+        }
+        return classLoader;
+    }
+
+    public JUnitCodeGen(GrammarInfo grammarInfo, ClassLoader classLoader, String testsuiteDir) throws ClassNotFoundException {
+        this.grammarInfo = grammarInfo;
+        this.testsuiteDir = testsuiteDir;
+        /** Map the name of rules having return value to its return type */
+        ruleWithReturn = new HashMap<String, String>();
+        Class<?> parserClass = locateParserClass( grammarInfo, classLoader );
+        Method[] methods = parserClass.getDeclaredMethods();
+        for(Method method : methods) {
+            if ( !method.getReturnType().getName().equals("void") ) {
+                ruleWithReturn.put(method.getName(), method.getReturnType().getName().replace('$', '.'));
+            }
+        }
+    }
+
+    private Class<?> locateParserClass(GrammarInfo grammarInfo, ClassLoader classLoader) throws ClassNotFoundException {
+        String parserClassName = grammarInfo.getGrammarName() + "Parser";
+        if ( grammarInfo.getGrammarPackage() != null ) {
+            parserClassName = grammarInfo.getGrammarPackage()+ "." + parserClassName;
+        }
+        return classLoader.loadClass( parserClassName );
+    }
+
+    public String getOutputDirectoryPath() {
+        return outputDirectoryPath;
+    }
+
+    public void setOutputDirectoryPath(String outputDirectoryPath) {
+        this.outputDirectoryPath = outputDirectoryPath;
+    }
+
+    public void compile() throws IOException{
+        String junitFileName;
+        if ( grammarInfo.getTreeGrammarName()!=null ) {
+            junitFileName = "Test"+grammarInfo.getTreeGrammarName();
+        }
+        else {
+            junitFileName = "Test"+grammarInfo.getGrammarName();
+        }
+        String lexerName = grammarInfo.getGrammarName()+"Lexer";
+        String parserName = grammarInfo.getGrammarName()+"Parser";
+
+        StringTemplateGroupLoader loader = new CommonGroupLoader("org/antlr/gunit", null);
+        StringTemplateGroup.registerGroupLoader(loader);
+        StringTemplateGroup.registerDefaultLexer(AngleBracketTemplateLexer.class);
+        StringBuffer buf = compileToBuffer(junitFileName, lexerName, parserName);
+        writeTestFile(".", junitFileName+".java", buf.toString());
+    }
+
+    public StringBuffer compileToBuffer(String className, String lexerName, String parserName) {
+        StringTemplateGroup group = StringTemplateGroup.loadGroup("junit");
+        StringBuffer buf = new StringBuffer();
+        buf.append(genClassHeader(group, className, lexerName, parserName));
+        buf.append(genTestRuleMethods(group));
+        buf.append("\n\n}");
+        return buf;
+    }
+
+    protected String genClassHeader(StringTemplateGroup group, String junitFileName, String lexerName, String parserName) {
+        StringTemplate classHeaderST = group.getInstanceOf("classHeader");
+        if ( grammarInfo.getTestPackage()!=null ) {	// Set up class package if there is
+            classHeaderST.setAttribute("header", "package "+grammarInfo.getTestPackage()+";");
+        }
+        classHeaderST.setAttribute("junitFileName", junitFileName);
+
+        String lexerPath = null;
+        String parserPath = null;
+        String treeParserPath = null;
+        String packagePath = null;
+        boolean isTreeGrammar = false;
+        boolean hasPackage = false;
+        /** Set up appropriate class path for parser/tree parser if using package */
+        if ( grammarInfo.getGrammarPackage()!=null ) {
+            hasPackage = true;
+            packagePath = "./"+grammarInfo.getGrammarPackage().replace('.', '/');
+            lexerPath = grammarInfo.getGrammarPackage()+"."+lexerName;
+            parserPath = grammarInfo.getGrammarPackage()+"."+parserName;
+            if ( grammarInfo.getTreeGrammarName()!=null ) {
+                treeParserPath = grammarInfo.getGrammarPackage()+"."+grammarInfo.getTreeGrammarName();
+                isTreeGrammar = true;
+            }
+        }
+        else {
+            lexerPath = lexerName;
+            parserPath = parserName;
+            if ( grammarInfo.getTreeGrammarName()!=null ) {
+                treeParserPath = grammarInfo.getTreeGrammarName();
+                isTreeGrammar = true;
+            }
+        }
+        // also set up custom tree adaptor if necessary
+        String treeAdaptorPath = null;
+        boolean hasTreeAdaptor = false;
+        if ( grammarInfo.getAdaptor()!=null ) {
+            hasTreeAdaptor = true;
+            treeAdaptorPath = grammarInfo.getAdaptor();
+        }
+        classHeaderST.setAttribute("hasTreeAdaptor", hasTreeAdaptor);
+        classHeaderST.setAttribute("treeAdaptorPath", treeAdaptorPath);
+        classHeaderST.setAttribute("hasPackage", hasPackage);
+        classHeaderST.setAttribute("packagePath", packagePath);
+        classHeaderST.setAttribute("lexerPath", lexerPath);
+        classHeaderST.setAttribute("parserPath", parserPath);
+        classHeaderST.setAttribute("treeParserPath", treeParserPath);
+        classHeaderST.setAttribute("isTreeGrammar", isTreeGrammar);
+        return classHeaderST.toString();
+    }
+
+    protected String genTestRuleMethods(StringTemplateGroup group) {
+        StringBuffer buf = new StringBuffer();
+        if ( grammarInfo.getTreeGrammarName()!=null ) {	// Generate junit codes of for tree grammar rule
+            genTreeMethods(group, buf);
+        }
+        else {	// Generate junit codes of for grammar rule
+            genParserMethods(group, buf);
+        }
+        return buf.toString();
+    }
+
+    private void genParserMethods(StringTemplateGroup group, StringBuffer buf) {
+        for ( gUnitTestSuite ts: grammarInfo.getRuleTestSuites() ) {
+            int i = 0;
+            for ( Map.Entry<gUnitTestInput, AbstractTest> entry : ts.testSuites.entrySet() ) {	// each rule may contain multiple tests
+                gUnitTestInput input = entry.getKey();
+                i++;
+                StringTemplate testRuleMethodST;
+                /** If rule has multiple return values or ast*/
+                if ( entry.getValue().getType()== gUnitParser.ACTION && ruleWithReturn.containsKey(ts.getRuleName()) ) {
+                    testRuleMethodST = group.getInstanceOf("testRuleMethod2");
+                    String outputString = entry.getValue().getText();
+                    testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(ts.getRuleName())+i);
+                    testRuleMethodST.setAttribute("testRuleName", '"'+ts.getRuleName()+'"');
+                    testRuleMethodST.setAttribute("test", input);
+                    testRuleMethodST.setAttribute("returnType", ruleWithReturn.get(ts.getRuleName()));
+                    testRuleMethodST.setAttribute("expecting", outputString);
+                }
+                else {
+                    String testRuleName;
+                    // need to determine whether it's a test for parser rule or lexer rule
+                    if ( ts.isLexicalRule() ) testRuleName = ts.getLexicalRuleName();
+                    else testRuleName = ts.getRuleName();
+                    testRuleMethodST = group.getInstanceOf("testRuleMethod");
+                    String outputString = entry.getValue().getText();
+                    testRuleMethodST.setAttribute("isLexicalRule", ts.isLexicalRule());
+                    testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(testRuleName)+i);
+                    testRuleMethodST.setAttribute("testRuleName", '"'+testRuleName+'"');
+                    testRuleMethodST.setAttribute("test", input);
+                    testRuleMethodST.setAttribute("tokenType", getTypeString(entry.getValue().getType()));
+
+                    // normalize whitespace
+                    outputString = normalizeTreeSpec(outputString);
+
+                    if ( entry.getValue().getType()==gUnitParser.ACTION ) {	// trim ';' at the end of ACTION if there is...
+                        //testRuleMethodST.setAttribute("expecting", outputString.substring(0, outputString.length()-1));
+                        testRuleMethodST.setAttribute("expecting", outputString);
+                    }
+                    else if ( entry.getValue().getType()==gUnitParser.RETVAL ) {	// Expected: RETVAL
+                        testRuleMethodST.setAttribute("expecting", outputString);
+                    }
+                    else {	// Attach "" to expected STRING or AST
+                        // strip newlines for (...) tree stuff
+                        outputString = outputString.replaceAll("\n", "");
+                        testRuleMethodST.setAttribute("expecting", '"'+escapeForJava(outputString)+'"');
+                    }
+                }
+                buf.append(testRuleMethodST.toString());
+            }
+        }
+    }
+
+    private void genTreeMethods(StringTemplateGroup group, StringBuffer buf) {
+        for ( gUnitTestSuite ts: grammarInfo.getRuleTestSuites() ) {
+            int i = 0;
+            for ( Map.Entry<gUnitTestInput, AbstractTest> entry : ts.testSuites.entrySet() ) {	// each rule may contain multiple tests
+                gUnitTestInput input = entry.getKey();
+                i++;
+                StringTemplate testRuleMethodST;
+                /** If rule has multiple return values or ast*/
+                if ( entry.getValue().getType()== gUnitParser.ACTION && ruleWithReturn.containsKey(ts.getTreeRuleName()) ) {
+                    testRuleMethodST = group.getInstanceOf("testTreeRuleMethod2");
+                    String outputString = entry.getValue().getText();
+                    testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(ts.getTreeRuleName())+"_walks_"+
+                                                                changeFirstCapital(ts.getRuleName())+i);
+                    testRuleMethodST.setAttribute("testTreeRuleName", '"'+ts.getTreeRuleName()+'"');
+                    testRuleMethodST.setAttribute("testRuleName", '"'+ts.getRuleName()+'"');
+                    testRuleMethodST.setAttribute("test", input);
+                    testRuleMethodST.setAttribute("returnType", ruleWithReturn.get(ts.getTreeRuleName()));
+                    testRuleMethodST.setAttribute("expecting", outputString);
+                }
+                else {
+                    testRuleMethodST = group.getInstanceOf("testTreeRuleMethod");
+                    String outputString = entry.getValue().getText();
+                    testRuleMethodST.setAttribute("methodName", "test"+changeFirstCapital(ts.getTreeRuleName())+"_walks_"+
+                                                                changeFirstCapital(ts.getRuleName())+i);
+                    testRuleMethodST.setAttribute("testTreeRuleName", '"'+ts.getTreeRuleName()+'"');
+                    testRuleMethodST.setAttribute("testRuleName", '"'+ts.getRuleName()+'"');
+                    testRuleMethodST.setAttribute("test", input);
+                    testRuleMethodST.setAttribute("tokenType", getTypeString(entry.getValue().getType()));
+
+                    if ( entry.getValue().getType()==gUnitParser.ACTION ) {	// trim ';' at the end of ACTION if there is...
+                        //testRuleMethodST.setAttribute("expecting", outputString.substring(0, outputString.length()-1));
+                        testRuleMethodST.setAttribute("expecting", outputString);
+                    }
+                    else if ( entry.getValue().getType()==gUnitParser.RETVAL ) {	// Expected: RETVAL
+                        testRuleMethodST.setAttribute("expecting", outputString);
+                    }
+                    else {	// Attach "" to expected STRING or AST
+                        testRuleMethodST.setAttribute("expecting", '"'+escapeForJava(outputString)+'"');
+                    }
+                }
+                buf.append(testRuleMethodST.toString());
+            }
+        }
+    }
+
+    // return a meaningful gUnit token type name instead of using the magic number
+    public String getTypeString(int type) {
+        String typeText;
+        switch (type) {
+            case gUnitParser.OK :
+                typeText = "org.antlr.gunit.gUnitParser.OK";
+                break;
+            case gUnitParser.FAIL :
+                typeText = "org.antlr.gunit.gUnitParser.FAIL";
+                break;
+            case gUnitParser.STRING :
+                typeText = "org.antlr.gunit.gUnitParser.STRING";
+                break;
+            case gUnitParser.ML_STRING :
+                typeText = "org.antlr.gunit.gUnitParser.ML_STRING";
+                break;
+            case gUnitParser.RETVAL :
+                typeText = "org.antlr.gunit.gUnitParser.RETVAL";
+                break;
+            case gUnitParser.AST :
+                typeText = "org.antlr.gunit.gUnitParser.AST";
+                break;
+            default :
+                typeText = "org.antlr.gunit.gUnitParser.EOF";
+                break;
+        }
+        return typeText;
+    }
+
+    protected void writeTestFile(String dir, String fileName, String content) {
+        try {
+            File f = new File(dir, fileName);
+            FileWriter w = new FileWriter(f);
+            BufferedWriter bw = new BufferedWriter(w);
+            bw.write(content);
+            bw.close();
+            w.close();
+        }
+        catch (IOException ioe) {
+            logger.log(Level.SEVERE, "can't write file", ioe);
+        }
+    }
+
+    public static String escapeForJava(String inputString) {
+        // Gotta escape literal backslash before putting in specials that use escape.
+        inputString = inputString.replace("\\", "\\\\");
+        // Then double quotes need escaping (singles are OK of course).
+        inputString = inputString.replace("\"", "\\\"");
+        // note: replace newline to String ".\n", replace tab to String ".\t"
+        inputString = inputString.replace("\n", "\\n").replace("\t", "\\t").replace("\r", "\\r").replace("\b", "\\b").replace("\f", "\\f");
+
+        return inputString;
+    }
+
+    protected String changeFirstCapital(String ruleName) {
+        String firstChar = String.valueOf(ruleName.charAt(0));
+        return firstChar.toUpperCase()+ruleName.substring(1);
+    }
+
+    public static String normalizeTreeSpec(String t) {
+        List<String> words = new ArrayList<String>();
+        int i = 0;
+        StringBuilder word = new StringBuilder();
+        while ( i<t.length() ) {
+            if ( t.charAt(i)=='(' || t.charAt(i)==')' ) {
+                if ( word.length()>0 ) {
+                    words.add(word.toString());
+                    word.setLength(0);
+                }
+                words.add(String.valueOf(t.charAt(i)));
+                i++;
+                continue;
+            }
+            if ( Character.isWhitespace(t.charAt(i)) ) {
+                // upon WS, save word
+                if ( word.length()>0 ) {
+                    words.add(word.toString());
+                    word.setLength(0);
+                }
+                i++;
+                continue;
+            }
+
+            // ... "x" or ...("x"
+            if ( t.charAt(i)=='"' && (i-1)>=0 &&
+                 (t.charAt(i-1)=='(' || Character.isWhitespace(t.charAt(i-1))) )
+            {
+                i++;
+                while ( i<t.length() && t.charAt(i)!='"' ) {
+                    if ( t.charAt(i)=='\\' &&
+                         (i+1)<t.length() && t.charAt(i+1)=='"' ) // handle \"
+                    {
+                        word.append('"');
+                        i+=2;
+                        continue;
+                    }
+                    word.append(t.charAt(i));
+                    i++;
+                }
+                i++; // skip final "
+                words.add(word.toString());
+                word.setLength(0);
+                continue;
+            }
+            word.append(t.charAt(i));
+            i++;
+        }
+        if ( word.length()>0 ) {
+            words.add(word.toString());
+        }
+        //System.out.println("words="+words);
+        StringBuilder buf = new StringBuilder();
+        for (int j=0; j<words.size(); j++) {
+            if ( j>0 && !words.get(j).equals(")") &&
+                 !words.get(j-1).equals("(") ) {
+                buf.append(' ');
+            }
+            buf.append(words.get(j));
+        }
+        return buf.toString();
+    }
+
+}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/OutputTest.java b/gunit/src/main/java/org/antlr/gunit/OutputTest.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/OutputTest.java
rename to gunit/src/main/java/org/antlr/gunit/OutputTest.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/ReturnTest.java b/gunit/src/main/java/org/antlr/gunit/ReturnTest.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/ReturnTest.java
rename to gunit/src/main/java/org/antlr/gunit/ReturnTest.java
diff --git a/gunit/src/main/java/org/antlr/gunit/gUnitBaseTest.java b/gunit/src/main/java/org/antlr/gunit/gUnitBaseTest.java
new file mode 100644
index 0000000..2b3b085
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/gUnitBaseTest.java
@@ -0,0 +1,475 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Leon, Jen-Yuan Su
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit;
+
+import junit.framework.TestCase;
+import org.antlr.runtime.*;
+import org.antlr.runtime.tree.*;
+import org.antlr.stringtemplate.StringTemplate;
+
+import java.io.*;
+import java.lang.reflect.*;
+
+/** All gUnit-generated JUnit class should extend this class
+ *  which implements the essential methods for triggering
+ *  ANTLR parser/tree walker
+ */
+public abstract class gUnitBaseTest extends TestCase {
+
+	public String treeAdaptorPath;
+	public String packagePath;
+	public String lexerPath;
+	public String parserPath;
+	public String treeParserPath;
+
+	protected String stdout;
+	protected String stderr;
+
+	private PrintStream console = System.out;
+	private PrintStream consoleErr = System.err;
+
+	// Invoke target lexer.rule
+	public String execLexer(String testRuleName, int line, String testInput, boolean isFile) throws Exception {
+		CharStream input;
+		/** Set up ANTLR input stream based on input source, file or String */
+		if ( isFile ) {
+			String filePath = testInput;
+			File testInputFile = new File(filePath);
+			// if input test file is not found under the current dir, also try to look for it under the package dir
+			if ( !testInputFile.exists() && packagePath!=null ) {
+				testInputFile = new File(packagePath, filePath);
+				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
+			}
+			input = new ANTLRFileStream(filePath);
+		}
+		else {
+			input = new ANTLRStringStream(testInput);
+		}
+		Class<? extends Lexer> lexer;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+        try {
+            /** Use Reflection to create instances of lexer and parser */
+        	lexer = Class.forName(lexerPath).asSubclass(Lexer.class);
+            Constructor<? extends Lexer> lexConstructor = lexer.getConstructor(CharStream.class);
+            Lexer lexObj = lexConstructor.newInstance(input);				// makes new instance of lexer
+            input.setLine(line);
+
+            Method ruleName = lexer.getMethod("m"+testRuleName);
+
+            /** Start of I/O Redirecting */
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            ByteArrayOutputStream err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+            /** Invoke lexer rule, and get the current index in CharStream */
+            ruleName.invoke(lexObj, new Object[0]);
+            Method ruleName2 = lexer.getMethod("getCharIndex");
+            int currentIndex = (Integer) ruleName2.invoke(lexObj, new Object[0]);
+            if ( currentIndex!=input.size() ) {
+            	ps2.println("extra text found, '"+input.substring(currentIndex, input.size()-1)+"'");
+            }
+
+            this.stdout = null;
+			this.stderr = null;
+
+			if ( err.toString().length()>0 ) {
+				this.stderr = err.toString();
+				return this.stderr;
+			}
+			if ( out.toString().length()>0 ) {
+				this.stdout = out.toString();
+			}
+			if ( err.toString().length()==0 && out.toString().length()==0 ) {
+				return null;
+			}
+        } catch (ClassNotFoundException e) {
+        	handleUnexpectedException(e);
+        } catch (SecurityException e) {
+        	handleUnexpectedException(e);
+        } catch (NoSuchMethodException e) {
+        	handleUnexpectedException(e);
+        } catch (IllegalArgumentException e) {
+        	handleUnexpectedException(e);
+        } catch (InstantiationException e) {
+        	handleUnexpectedException(e);
+        } catch (IllegalAccessException e) {
+        	handleUnexpectedException(e);
+        } catch (InvocationTargetException e) {	// This exception could be caused from ANTLR Runtime Exception, e.g. MismatchedTokenException
+        	if ( e.getCause()!=null ) this.stderr = e.getCause().toString();
+			else this.stderr = e.toString();
+        	return this.stderr;
+        } finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+        return this.stdout;
+	}
+
+	// Invoke target parser.rule
+
+	public Object execParser(String testRuleName, int line, String testInput, boolean isFile) throws Exception {
+		CharStream input;
+		/** Set up ANTLR input stream based on input source, file or String */
+		if ( isFile ) {
+			String filePath = testInput;
+			File testInputFile = new File(filePath);
+			// if input test file is not found under the current dir, also try to look for it under the package dir
+			if ( !testInputFile.exists() && packagePath!=null ) {
+				testInputFile = new File(packagePath, filePath);
+				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
+			}
+			input = new ANTLRFileStream(filePath);
+		}
+		else {
+			input = new ANTLRStringStream(testInput);
+		}
+		Class<? extends Lexer> lexer;
+		Class<? extends Parser> parser;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+        ByteArrayOutputStream out = null;
+        ByteArrayOutputStream err = null;
+		try {
+			/** Use Reflection to create instances of lexer and parser */
+			lexer = Class.forName(lexerPath).asSubclass(Lexer.class);
+            Constructor<? extends Lexer> lexConstructor = lexer.getConstructor(CharStream.class);
+            Lexer lexObj = lexConstructor.newInstance(input);				// makes new instance of lexer
+            input.setLine(line);
+
+            CommonTokenStream tokens = new CommonTokenStream(lexObj);
+            parser = Class.forName(parserPath).asSubclass(Parser.class);
+            Constructor<? extends Parser> parConstructor = parser.getConstructor(TokenStream.class);
+            Parser parObj = parConstructor.newInstance(tokens);				// makes new instance of parser
+
+            // set up customized tree adaptor if necessary
+            if ( treeAdaptorPath!=null ) {
+            	Method _setTreeAdaptor = parser.getMethod("setTreeAdaptor", TreeAdaptor.class);
+            	Class<? extends TreeAdaptor> _treeAdaptor = Class.forName(treeAdaptorPath).asSubclass(TreeAdaptor.class);
+            	_setTreeAdaptor.invoke(parObj, _treeAdaptor.newInstance());
+            }
+
+            Method ruleName = parser.getMethod(testRuleName);
+
+            /** Start of I/O Redirecting */
+            out = new ByteArrayOutputStream();
+            err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+			/** Invoke grammar rule, and store if there is a return value */
+            Object ruleReturn = ruleName.invoke(parObj);
+            String astString = null;
+            String stString = null;
+            /** If rule has return value, determine if it contains an AST or a ST */
+            if ( ruleReturn!=null ) {
+                if ( ruleReturn.getClass().toString().indexOf(testRuleName+"_return")>0 ) {
+                	try {	// NullPointerException may happen here...
+                		Class<?> _return = Class.forName(parserPath+"$"+testRuleName+"_return");
+                		Method[] methods = _return.getDeclaredMethods();
+                		for(Method method : methods) {
+			                if ( method.getName().equals("getTree") ) {
+			                	Method returnName = _return.getMethod("getTree");
+		                    	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
+		                    	astString = tree.toStringTree();
+			                }
+			                else if ( method.getName().equals("getTemplate") ) {
+			                	Method returnName = _return.getMethod("getTemplate");
+			                	StringTemplate st = (StringTemplate) returnName.invoke(ruleReturn);
+			                	stString = st.toString();
+			                }
+			            }
+                	}
+                	catch(Exception e) {
+                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
+                	}
+                }
+            }
+
+			this.stdout = "";
+			this.stderr = "";
+
+			/** Invalid input */
+            if ( tokens.index()!=tokens.size()-1 ) {
+            	//throw new InvalidInputException();
+            	this.stderr += "Stopped parsing at token index "+tokens.index()+": ";
+            }
+
+			// retVal could be actual return object from rule, stderr or stdout
+            this.stdout += out.toString();
+            this.stderr += err.toString();
+
+			if ( err.toString().length()>0 ) return this.stderr;
+			if ( out.toString().length()>0 ) return this.stdout;
+			if ( astString!=null ) {	// Return toStringTree of AST
+				return astString;
+			}
+			else if ( stString!=null ) {// Return toString of ST
+				return stString;
+			}
+			if ( ruleReturn!=null ) {
+				return ruleReturn;
+			}
+			if ( err.toString().length()==0 && out.toString().length()==0 ) {
+				return null;
+			}
+		}
+        catch (ClassNotFoundException e) {
+        	handleUnexpectedException(e);
+		}
+        catch (SecurityException e) {
+        	handleUnexpectedException(e);
+		}
+        catch (NoSuchMethodException e) {
+        	handleUnexpectedException(e);
+		}
+        catch (IllegalAccessException e) {
+        	handleUnexpectedException(e);
+		}
+        catch (InvocationTargetException e) {
+            this.stdout = out.toString();
+            this.stderr = err.toString();
+
+			if ( e.getCause()!=null ) this.stderr += e.getCause().toString();
+			else this.stderr += e.toString();
+        	return this.stderr;
+		} finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+		return this.stdout;
+	}
+
+	// Invoke target parser.rule
+	public Object execTreeParser(String testTreeRuleName, String testRuleName, String testInput, boolean isFile) throws Exception {
+		CharStream input;
+		if ( isFile ) {
+			String filePath = testInput;
+			File testInputFile = new File(filePath);
+			// if input test file is not found under the current dir, also try to look for it under the package dir
+			if ( !testInputFile.exists() && packagePath!=null ) {
+				testInputFile = new File(packagePath, filePath);
+				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
+			}
+			input = new ANTLRFileStream(filePath);
+		}
+		else {
+			input = new ANTLRStringStream(testInput);
+		}
+		Class<? extends Lexer> lexer;
+		Class<? extends Parser> parser;
+		Class<? extends TreeParser> treeParser;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+		try {
+			/** Use Reflection to create instances of lexer and parser */
+        	lexer = Class.forName(lexerPath).asSubclass(Lexer.class);
+            Constructor<? extends Lexer> lexConstructor = lexer.getConstructor(CharStream.class);
+            Lexer lexObj = lexConstructor.newInstance(input);				// makes new instance of lexer
+
+            CommonTokenStream tokens = new CommonTokenStream(lexObj);
+
+            parser = Class.forName(parserPath).asSubclass(Parser.class);
+            Constructor<? extends Parser> parConstructor = parser.getConstructor(TokenStream.class);
+            Parser parObj = parConstructor.newInstance(tokens);				// makes new instance of parser
+
+            // set up customized tree adaptor if necessary
+            TreeAdaptor customTreeAdaptor = null;
+            if ( treeAdaptorPath!=null ) {
+            	Method _setTreeAdaptor = parser.getMethod("setTreeAdaptor", TreeAdaptor.class);
+            	Class<? extends TreeAdaptor> _treeAdaptor = Class.forName(treeAdaptorPath).asSubclass(TreeAdaptor.class);
+            	customTreeAdaptor = _treeAdaptor.newInstance();
+            	_setTreeAdaptor.invoke(parObj, customTreeAdaptor);
+            }
+
+            Method ruleName = parser.getMethod(testRuleName);
+
+            /** Start of I/O Redirecting */
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            ByteArrayOutputStream err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+            /** Invoke grammar rule, and get the return value */
+            Object ruleReturn = ruleName.invoke(parObj);
+
+            Class<?> _return = Class.forName(parserPath+"$"+testRuleName+"_return");
+        	Method returnName = _return.getMethod("getTree");
+        	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
+
+        	// Walk resulting tree; create tree nodes stream first
+        	CommonTreeNodeStream nodes;
+        	if ( customTreeAdaptor!=null ) {
+        		nodes = new CommonTreeNodeStream(customTreeAdaptor, tree);
+        	}
+        	else {
+        		nodes = new CommonTreeNodeStream(tree);
+        	}
+        	// AST nodes have payload that point into token stream
+        	nodes.setTokenStream(tokens);
+        	// Create a tree walker attached to the nodes stream
+        	treeParser = Class.forName(treeParserPath).asSubclass(TreeParser.class);
+            Constructor<? extends TreeParser> treeParConstructor = treeParser.getConstructor(TreeNodeStream.class);
+            TreeParser treeParObj = treeParConstructor.newInstance(nodes);	// makes new instance of tree parser
+        	// Invoke the tree rule, and store the return value if there is
+            Method treeRuleName = treeParser.getMethod(testTreeRuleName);
+            Object treeRuleReturn = treeRuleName.invoke(treeParObj);
+
+            String astString = null;
+            String stString = null;
+            /** If tree rule has return value, determine if it contains an AST or a ST */
+            if ( treeRuleReturn!=null ) {
+                if ( treeRuleReturn.getClass().toString().indexOf(testTreeRuleName+"_return")>0 ) {
+                	try {	// NullPointerException may happen here...
+                		Class<?> _treeReturn = Class.forName(treeParserPath+"$"+testTreeRuleName+"_return");
+                		Method[] methods = _treeReturn.getDeclaredMethods();
+			            for(Method method : methods) {
+			                if ( method.getName().equals("getTree") ) {
+			                	Method treeReturnName = _treeReturn.getMethod("getTree");
+		                    	CommonTree returnTree = (CommonTree) treeReturnName.invoke(treeRuleReturn);
+		                        astString = returnTree.toStringTree();
+			                }
+			                else if ( method.getName().equals("getTemplate") ) {
+			                	Method treeReturnName = _return.getMethod("getTemplate");
+			                	StringTemplate st = (StringTemplate) treeReturnName.invoke(treeRuleReturn);
+			                	stString = st.toString();
+			                }
+			            }
+                	}
+                	catch(Exception e) {
+                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
+                	}
+                }
+            }
+
+			this.stdout = null;
+			this.stderr = null;
+
+			/** Invalid input */
+            if ( tokens.index()!=tokens.size()-1 ) {
+            	throw new InvalidInputException();
+            }
+
+			// retVal could be actual return object from rule, stderr or stdout
+			if ( err.toString().length()>0 ) {
+				this.stderr = err.toString();
+				return this.stderr;
+			}
+			if ( out.toString().length()>0 ) {
+				this.stdout = out.toString();
+			}
+			if ( astString!=null ) {	// Return toStringTree of AST
+				return astString;
+			}
+			else if ( stString!=null ) {// Return toString of ST
+				return stString;
+			}
+			if ( treeRuleReturn!=null ) {
+				return treeRuleReturn;
+			}
+			if ( err.toString().length()==0 && out.toString().length()==0 ) {
+				return null;
+			}
+		} catch (ClassNotFoundException e) {
+			handleUnexpectedException(e);
+		} catch (SecurityException e) {
+			handleUnexpectedException(e);
+		} catch (NoSuchMethodException e) {
+			handleUnexpectedException(e);
+		} catch (IllegalAccessException e) {
+			handleUnexpectedException(e);
+		} catch (InvocationTargetException e) {
+			if ( e.getCause()!=null ) this.stderr = e.getCause().toString();
+			else this.stderr = e.toString();
+        	return this.stderr;
+		} finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+		return stdout;
+	}
+
+	// Modify the return value if the expected token type is OK or FAIL
+	public Object examineExecResult(int tokenType, Object retVal) {
+		System.out.println("expect "+(tokenType==gUnitParser.OK?"OK":"FAIL")+
+						   "stderr=="+stderr);
+		if ( tokenType==gUnitParser.OK ) {	// expected Token: OK
+			if ( this.stderr==null || this.stderr.length()==0 ) {
+				return "OK";
+			}
+			else {
+				return "FAIL, "+this.stderr;
+			}
+		}
+		else if ( tokenType==gUnitParser.FAIL ) {	// expected Token: FAIL
+			if ( this.stderr!=null && this.stderr.length()>0 ) {
+				return "FAIL";
+			}
+			else {
+				return "OK";
+			}
+		}
+		else {	// return the same object for the other token types
+			return retVal;
+		}
+	}
+
+	protected void handleUnexpectedException(Exception e) {
+		e.printStackTrace();
+		System.exit(1);
+	}
+
+}
diff --git a/gunit/src/main/java/org/antlr/gunit/gUnitExecutor.java b/gunit/src/main/java/org/antlr/gunit/gUnitExecutor.java
new file mode 100644
index 0000000..ab759c8
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/gUnitExecutor.java
@@ -0,0 +1,655 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2007-2008 Leon Jen-Yuan Su
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit;
+
+import org.antlr.runtime.*;
+import org.antlr.runtime.tree.CommonTree;
+import org.antlr.runtime.tree.CommonTreeNodeStream;
+import org.antlr.runtime.tree.TreeAdaptor;
+import org.antlr.runtime.tree.TreeNodeStream;
+import org.antlr.stringtemplate.CommonGroupLoader;
+import org.antlr.stringtemplate.StringTemplate;
+import org.antlr.stringtemplate.StringTemplateGroup;
+import org.antlr.stringtemplate.StringTemplateGroupLoader;
+import org.antlr.stringtemplate.language.AngleBracketTemplateLexer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.antlr.runtime.tree.TreeParser;
+
+public class gUnitExecutor implements ITestSuite {
+	public GrammarInfo grammarInfo;
+
+	private final ClassLoader grammarClassLoader;
+
+	private final String testsuiteDir;
+
+	public int numOfTest;
+
+	public int numOfSuccess;
+
+	public int numOfFailure;
+
+	private String title;
+
+	public int numOfInvalidInput;
+
+	private String parserName;
+
+	private String lexerName;
+
+	public List<AbstractTest> failures;
+	public List<AbstractTest> invalids;
+
+	private PrintStream console = System.out;
+    private PrintStream consoleErr = System.err;
+
+    public gUnitExecutor(GrammarInfo grammarInfo, String testsuiteDir) {
+    	this( grammarInfo, determineClassLoader(), testsuiteDir);
+    }
+
+    private static ClassLoader determineClassLoader() {
+    	ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+    	if ( classLoader == null ) {
+    		classLoader = gUnitExecutor.class.getClassLoader();
+    	}
+    	return classLoader;
+    }
+
+	public gUnitExecutor(GrammarInfo grammarInfo, ClassLoader grammarClassLoader, String testsuiteDir) {
+		this.grammarInfo = grammarInfo;
+		this.grammarClassLoader = grammarClassLoader;
+		this.testsuiteDir = testsuiteDir;
+		numOfTest = 0;
+		numOfSuccess = 0;
+		numOfFailure = 0;
+		numOfInvalidInput = 0;
+		failures = new ArrayList<AbstractTest>();
+		invalids = new ArrayList<AbstractTest>();
+	}
+
+	protected ClassLoader getGrammarClassLoader() {
+		return grammarClassLoader;
+	}
+
+	protected final Class<?> classForName(String name) throws ClassNotFoundException {
+		return getGrammarClassLoader().loadClass( name );
+	}
+
+	public String execTest() throws IOException{
+		// Set up string template for testing result
+		StringTemplate testResultST = getTemplateGroup().getInstanceOf("testResult");
+		try {
+			/** Set up appropriate path for parser/lexer if using package */
+			if (grammarInfo.getGrammarPackage()!=null ) {
+				parserName = grammarInfo.getGrammarPackage()+"."+grammarInfo.getGrammarName()+"Parser";
+				lexerName = grammarInfo.getGrammarPackage()+"."+grammarInfo.getGrammarName()+"Lexer";
+			}
+			else {
+				parserName = grammarInfo.getGrammarName()+"Parser";
+				lexerName = grammarInfo.getGrammarName()+"Lexer";
+			}
+
+			/*** Start Unit/Functional Testing ***/
+			// Execute unit test of for parser, lexer and tree grammar
+			if ( grammarInfo.getTreeGrammarName()!=null ) {
+				title = "executing testsuite for tree grammar:"+grammarInfo.getTreeGrammarName()+" walks "+parserName;
+			}
+			else {
+				title = "executing testsuite for grammar:"+grammarInfo.getGrammarName();
+			}
+			executeTests();
+			// End of exection of unit testing
+
+			// Fill in the template holes with the test results
+			testResultST.setAttribute("title", title);
+			testResultST.setAttribute("num_of_test", numOfTest);
+			testResultST.setAttribute("num_of_failure", numOfFailure);
+			if ( numOfFailure>0 ) {
+				testResultST.setAttribute("failure", failures);
+			}
+			if ( numOfInvalidInput>0 ) {
+				testResultST.setAttribute("has_invalid", true);
+				testResultST.setAttribute("num_of_invalid", numOfInvalidInput);
+				testResultST.setAttribute("invalid", invalids);
+			}
+		}
+		catch (Exception e) {
+			handleUnexpectedException(e);
+        }
+		return testResultST.toString();
+	}
+
+	private StringTemplateGroup getTemplateGroup() {
+		StringTemplateGroupLoader loader = new CommonGroupLoader("org/antlr/gunit", null);
+		StringTemplateGroup.registerGroupLoader(loader);
+		StringTemplateGroup.registerDefaultLexer(AngleBracketTemplateLexer.class);
+		StringTemplateGroup group = StringTemplateGroup.loadGroup("gUnitTestResult");
+		return group;
+	}
+
+	// TODO: throw more specific exceptions
+	private gUnitTestResult runCorrectParser(String parserName, String lexerName, String rule, String lexicalRule, String treeRule, gUnitTestInput input) throws Exception
+	{
+		if ( lexicalRule!=null ) return runLexer(lexerName, lexicalRule, input);
+		else if ( treeRule!=null ) return runTreeParser(parserName, lexerName, rule, treeRule, input);
+		else return runParser(parserName, lexerName, rule, input);
+	}
+
+	private void executeTests() throws Exception {
+		for ( gUnitTestSuite ts: grammarInfo.getRuleTestSuites() ) {
+			String rule = ts.getRuleName();
+			String lexicalRule = ts.getLexicalRuleName();
+			String treeRule = ts.getTreeRuleName();
+			for ( Map.Entry<gUnitTestInput, AbstractTest> entry : ts.testSuites.entrySet() ) {	// each rule may contain multiple tests
+				gUnitTestInput input = entry.getKey();
+				numOfTest++;
+				// Run parser, and get the return value or stdout or stderr if there is
+				gUnitTestResult result = null;
+				AbstractTest test = entry.getValue();
+				try {
+					// TODO: create a -debug option to turn on logging, which shows progress of running tests
+					//System.out.print(numOfTest + ". Running rule: " + rule + "; input: '" + input.testInput + "'");
+					result = runCorrectParser(parserName, lexerName, rule, lexicalRule, treeRule, input);
+					// TODO: create a -debug option to turn on logging, which shows progress of running tests
+					//System.out.println("; Expecting " + test.getExpected() + "; Success?: " + test.getExpected().equals(test.getResult(result)));
+				} catch ( InvalidInputException e) {
+					numOfInvalidInput++;
+					test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.line, input.input);
+					test.setActual(input.input);
+					invalids.add(test);
+					continue;
+				}	// TODO: ensure there's no other exceptions required to be handled here...
+
+				String expected = test.getExpected();
+				String actual = test.getResult(result);
+				test.setActual(actual);
+
+				if (actual == null) {
+					numOfFailure++;
+					test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.line, input.input);
+					test.setActual("null");
+					failures.add(test);
+					onFail(test);
+				}
+				// the 2nd condition is used for the assertFAIL test of lexer rule because BooleanTest return err msg instead of 'FAIL' if isLexerTest
+				else if ( expected.equals(actual) || (expected.equals("FAIL")&&!actual.equals("OK") ) ) {
+					numOfSuccess++;
+					onPass(test);
+				}
+				// TODO: something with ACTIONS - at least create action test type and throw exception.
+				else if ( ts.testSuites.get(input).getType()==gUnitParser.ACTION ) {	// expected Token: ACTION
+					numOfFailure++;
+					test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.line, input.input);
+					test.setActual("\t"+"{ACTION} is not supported in the grammarInfo yet...");
+					failures.add(test);
+					onFail(test);
+				}
+				else {
+					numOfFailure++;
+					test.setHeader(rule, lexicalRule, treeRule, numOfTest, input.line, input.input);
+					failures.add(test);
+					onFail(test);
+				}
+			}	// end of 2nd for-loop: tests for individual rule
+		}	// end of 1st for-loop: testsuites for grammar
+	}
+
+	// TODO: throw proper exceptions
+	protected gUnitTestResult runLexer(String lexerName, String testRuleName, gUnitTestInput testInput) throws Exception {
+		CharStream input;
+		Class<? extends Lexer> lexer;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+		try {
+			/** Set up ANTLR input stream based on input source, file or String */
+			input = getANTLRInputStream(testInput);
+
+            /** Use Reflection to create instances of lexer and parser */
+        	lexer = classForName(lexerName).asSubclass(Lexer.class);
+            Constructor<? extends Lexer> lexConstructor = lexer.getConstructor(CharStream.class);
+            Lexer lexObj = lexConstructor.newInstance(input);				// makes new instance of lexer
+
+            Method ruleName = lexer.getMethod("m"+testRuleName);
+
+            /** Start of I/O Redirecting */
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            ByteArrayOutputStream err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+            /** Invoke lexer rule, and get the current index in CharStream */
+            ruleName.invoke(lexObj);
+            Method ruleName2 = lexer.getMethod("getCharIndex");
+            int currentIndex = (Integer) ruleName2.invoke(lexObj);
+            if ( currentIndex!=input.size() ) {
+            	ps2.print("extra text found, '"+input.substring(currentIndex, input.size()-1)+"'");
+            }
+
+			if ( err.toString().length()>0 ) {
+				gUnitTestResult testResult = new gUnitTestResult(false, err.toString(), true);
+				testResult.setError(err.toString());
+				return testResult;
+			}
+			String stdout = null;
+			if ( out.toString().length()>0 ) {
+				stdout = out.toString();
+			}
+			return new gUnitTestResult(true, stdout, true);
+		} catch (IOException e) {
+			return getTestExceptionResult(e);
+        } catch (ClassNotFoundException e) {
+        	handleUnexpectedException( e );
+        } catch (SecurityException e) {
+        	handleUnexpectedException( e );
+        } catch (NoSuchMethodException e) {
+        	handleUnexpectedException( e );
+        } catch (IllegalArgumentException e) {
+        	handleUnexpectedException( e );
+        } catch (InstantiationException e) {
+        	handleUnexpectedException( e );
+        } catch (IllegalAccessException e) {
+        	handleUnexpectedException( e );
+        } catch (InvocationTargetException e) {	// This exception could be caused from ANTLR Runtime Exception, e.g. MismatchedTokenException
+        	return getTestExceptionResult(e);
+        } finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+        // TODO: verify this:
+        throw new Exception("This should be unreachable?");
+	}
+
+	// TODO: throw proper exceptions
+	protected gUnitTestResult runParser(String parserName, String lexerName, String testRuleName, gUnitTestInput testInput) throws Exception {
+		CharStream input;
+		Class<? extends Lexer> lexer;
+		Class<? extends Parser> parser;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+		try {
+			/** Set up ANTLR input stream based on input source, file or String */
+			input = getANTLRInputStream(testInput);
+
+            /** Use Reflection to create instances of lexer and parser */
+        	lexer = classForName(lexerName).asSubclass(Lexer.class);
+            Constructor<? extends Lexer> lexConstructor = lexer.getConstructor(CharStream.class);
+            Lexer lexObj = lexConstructor.newInstance(input);				// makes new instance of lexer
+
+            CommonTokenStream tokens = new CommonTokenStream(lexObj);
+
+            parser = classForName(parserName).asSubclass(Parser.class);
+            Constructor<? extends Parser> parConstructor = parser.getConstructor(TokenStream.class);
+            Parser parObj = parConstructor.newInstance(tokens);				// makes new instance of parser
+
+            // set up customized tree adaptor if necessary
+            if ( grammarInfo.getAdaptor()!=null ) {
+            	Method _setTreeAdaptor = parser.getMethod("setTreeAdaptor", TreeAdaptor.class);
+            	Class<? extends TreeAdaptor> _treeAdaptor = classForName(grammarInfo.getAdaptor()).asSubclass(TreeAdaptor.class);
+            	_setTreeAdaptor.invoke(parObj, _treeAdaptor.newInstance());
+            }
+
+            Method ruleName = parser.getMethod(testRuleName);
+
+            /** Start of I/O Redirecting */
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            ByteArrayOutputStream err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+            /** Invoke grammar rule, and store if there is a return value */
+            Object ruleReturn = ruleName.invoke(parObj);
+            String astString = null;
+            String stString = null;
+            /** If rule has return value, determine if it contains an AST or a ST */
+            if ( ruleReturn!=null ) {
+                if ( ruleReturn.getClass().toString().indexOf(testRuleName+"_return")>0 ) {
+                	try {	// NullPointerException may happen here...
+                		Class<?> _return = classForName(parserName+"$"+testRuleName+"_return");
+                		Method[] methods = _return.getDeclaredMethods();
+                		for(Method method : methods) {
+			                if ( method.getName().equals("getTree") ) {
+			                	Method returnName = _return.getMethod("getTree");
+		                    	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
+		                    	astString = tree.toStringTree();
+			                }
+			                else if ( method.getName().equals("getTemplate") ) {
+			                	Method returnName = _return.getMethod("getTemplate");
+			                	StringTemplate st = (StringTemplate) returnName.invoke(ruleReturn);
+			                	stString = st.toString();
+			                }
+			            }
+                	}
+                	catch(Exception e) {
+                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
+                	}
+                }
+            }
+
+            checkForValidInput(tokens, ps2);
+
+			if ( err.toString().length()>0 ) {
+				gUnitTestResult testResult = new gUnitTestResult(false, err.toString());
+				testResult.setError(err.toString());
+				return testResult;
+			}
+			String stdout = null;
+			// TODO: need to deal with the case which has both ST return value and stdout
+			if ( out.toString().length()>0 ) {
+				stdout = out.toString();
+			}
+			if ( astString!=null ) {	// Return toStringTree of AST
+				return new gUnitTestResult(true, stdout, astString);
+			}
+			else if ( stString!=null ) {// Return toString of ST
+				return new gUnitTestResult(true, stdout, stString);
+			}
+
+			if ( ruleReturn!=null ) {
+				// TODO: currently only works for a single return with int or String value
+				return new gUnitTestResult(true, stdout, String.valueOf(ruleReturn));
+			}
+			return new gUnitTestResult(true, stdout, stdout);
+		} catch (IOException e) {
+			return getTestExceptionResult(e);
+		} catch (ClassNotFoundException e) {
+			handleUnexpectedException( e );
+        } catch (SecurityException e) {
+        	handleUnexpectedException( e );
+        } catch (NoSuchMethodException e) {
+        	handleUnexpectedException( e );
+        } catch (IllegalArgumentException e) {
+        	handleUnexpectedException( e );
+        } catch (InstantiationException e) {
+        	handleUnexpectedException( e );
+        } catch (IllegalAccessException e) {
+        	handleUnexpectedException( e );
+        } catch (InvocationTargetException e) {	// This exception could be caused from ANTLR Runtime Exception, e.g. MismatchedTokenException
+        	return getTestExceptionResult(e);
+        } finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+        // TODO: verify this:
+        throw new Exception("This should be unreachable?");
+	}
+
+	protected gUnitTestResult runTreeParser(String parserName, String lexerName, String testRuleName, String testTreeRuleName, gUnitTestInput testInput) throws Exception {
+		CharStream input;
+		String treeParserPath;
+		Class<? extends Lexer> lexer;
+		Class<? extends Parser> parser;
+		Class<? extends TreeParser> treeParser;
+		PrintStream ps = null;		// for redirecting stdout later
+		PrintStream ps2 = null;		// for redirecting stderr later
+		try {
+			/** Set up ANTLR input stream based on input source, file or String */
+			input = getANTLRInputStream(testInput);
+
+			/** Set up appropriate path for tree parser if using package */
+			if ( grammarInfo.getGrammarPackage()!=null ) {
+				treeParserPath = grammarInfo.getGrammarPackage()+"."+grammarInfo.getTreeGrammarName();
+			}
+			else {
+				treeParserPath = grammarInfo.getTreeGrammarName();
+			}
+
+            /** Use Reflection to create instances of lexer and parser */
+        	lexer = classForName(lexerName).asSubclass(Lexer.class);
+            Constructor<? extends Lexer> lexConstructor = lexer.getConstructor(CharStream.class);
+            Lexer lexObj = lexConstructor.newInstance(input);				// makes new instance of lexer
+
+            CommonTokenStream tokens = new CommonTokenStream(lexObj);
+
+            parser = classForName(parserName).asSubclass(Parser.class);
+            Constructor<? extends Parser> parConstructor = parser.getConstructor(TokenStream.class);
+            Parser parObj = parConstructor.newInstance(tokens);				// makes new instance of parser
+
+            // set up customized tree adaptor if necessary
+            TreeAdaptor customTreeAdaptor = null;
+            if ( grammarInfo.getAdaptor()!=null ) {
+            	Method _setTreeAdaptor = parser.getMethod("setTreeAdaptor", TreeAdaptor.class);
+            	Class<? extends TreeAdaptor> _treeAdaptor = classForName(grammarInfo.getAdaptor()).asSubclass(TreeAdaptor.class);
+            	customTreeAdaptor = _treeAdaptor.newInstance();
+            	_setTreeAdaptor.invoke(parObj, customTreeAdaptor);
+            }
+
+            Method ruleName = parser.getMethod(testRuleName);
+
+            /** Start of I/O Redirecting */
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            ByteArrayOutputStream err = new ByteArrayOutputStream();
+            ps = new PrintStream(out);
+            ps2 = new PrintStream(err);
+            System.setOut(ps);
+            System.setErr(ps2);
+            /** End of redirecting */
+
+            /** Invoke grammar rule, and get the return value */
+            Object ruleReturn = ruleName.invoke(parObj);
+
+            Class<?> _return = classForName(parserName+"$"+testRuleName+"_return");
+        	Method returnName = _return.getMethod("getTree");
+        	CommonTree tree = (CommonTree) returnName.invoke(ruleReturn);
+
+        	// Walk resulting tree; create tree nodes stream first
+        	CommonTreeNodeStream nodes;
+        	if ( customTreeAdaptor!=null ) {
+        		nodes = new CommonTreeNodeStream(customTreeAdaptor, tree);
+        	}
+        	else {
+        		nodes = new CommonTreeNodeStream(tree);
+        	}
+        	// AST nodes have payload that point into token stream
+        	nodes.setTokenStream(tokens);
+        	// Create a tree walker attached to the nodes stream
+        	treeParser = classForName(treeParserPath).asSubclass(TreeParser.class);
+            Constructor<? extends TreeParser> treeParConstructor = treeParser.getConstructor(TreeNodeStream.class);
+            TreeParser treeParObj = treeParConstructor.newInstance(nodes);	// makes new instance of tree parser
+        	// Invoke the tree rule, and store the return value if there is
+            Method treeRuleName = treeParser.getMethod(testTreeRuleName);
+            Object treeRuleReturn = treeRuleName.invoke(treeParObj);
+
+            String astString = null;
+            String stString = null;
+            /** If tree rule has return value, determine if it contains an AST or a ST */
+            if ( treeRuleReturn!=null ) {
+                if ( treeRuleReturn.getClass().toString().indexOf(testTreeRuleName+"_return")>0 ) {
+                	try {	// NullPointerException may happen here...
+                		Class<?> _treeReturn = classForName(treeParserPath+"$"+testTreeRuleName+"_return");
+                		Method[] methods = _treeReturn.getDeclaredMethods();
+			            for(Method method : methods) {
+			                if ( method.getName().equals("getTree") ) {
+			                	Method treeReturnName = _treeReturn.getMethod("getTree");
+		                    	CommonTree returnTree = (CommonTree) treeReturnName.invoke(treeRuleReturn);
+		                        astString = returnTree.toStringTree();
+			                }
+			                else if ( method.getName().equals("getTemplate") ) {
+			                	Method treeReturnName = _return.getMethod("getTemplate");
+			                	StringTemplate st = (StringTemplate) treeReturnName.invoke(treeRuleReturn);
+			                	stString = st.toString();
+			                }
+			            }
+                	}
+                	catch(Exception e) {
+                		System.err.println(e);	// Note: If any exception occurs, the test is viewed as failed.
+                	}
+                }
+            }
+
+			checkForValidInput( tokens, ps2 );
+
+			if ( err.toString().length()>0 ) {
+				gUnitTestResult testResult = new gUnitTestResult(false, err.toString());
+				testResult.setError(err.toString());
+				return testResult;
+			}
+
+			String stdout = null;
+			// TODO: need to deal with the case which has both ST return value and stdout
+			if ( out.toString().length()>0 ) {
+				stdout = out.toString();
+			}
+			if ( astString!=null ) {	// Return toStringTree of AST
+				return new gUnitTestResult(true, stdout, astString);
+			}
+			else if ( stString!=null ) {// Return toString of ST
+				return new gUnitTestResult(true, stdout, stString);
+			}
+
+			if ( treeRuleReturn!=null ) {
+				// TODO: again, currently only works for a single return with int or String value
+				return new gUnitTestResult(true, stdout, String.valueOf(treeRuleReturn));
+			}
+			return new gUnitTestResult(true, stdout, stdout);
+		} catch (IOException e) {
+			return getTestExceptionResult(e);
+		} catch (ClassNotFoundException e) {
+			handleUnexpectedException( e );
+        } catch (SecurityException e) {
+        	handleUnexpectedException( e );
+        } catch (NoSuchMethodException e) {
+        	handleUnexpectedException( e );
+        } catch (IllegalArgumentException e) {
+        	handleUnexpectedException( e );
+        } catch (InstantiationException e) {
+        	handleUnexpectedException( e );
+        } catch (IllegalAccessException e) {
+        	handleUnexpectedException( e );
+        } catch (InvocationTargetException e) {	// note: This exception could be caused from ANTLR Runtime Exception...
+        	return getTestExceptionResult(e);
+        } finally {
+        	try {
+        		if ( ps!=null ) ps.close();
+    			if ( ps2!=null ) ps2.close();
+    			System.setOut(console);			// Reset standard output
+    			System.setErr(consoleErr);		// Reset standard err out
+        	} catch (Exception e) {
+        		e.printStackTrace();
+        	}
+        }
+        // TODO: verify this:
+        throw new Exception("Should not be reachable?");
+	}
+
+	// Create ANTLR input stream based on input source, file or String
+	private CharStream getANTLRInputStream(gUnitTestInput testInput) throws IOException {
+		CharStream input;
+		if ( testInput.isFile) {
+			String filePath = testInput.input;
+			File testInputFile = new File(filePath);
+			// if input test file is not found under the current dir, try to look for it from dir where the testsuite file locates
+			if ( !testInputFile.exists() ) {
+				testInputFile = new File(this.testsuiteDir, filePath);
+				if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
+				// if still not found, also try to look for it under the package dir
+				else if ( grammarInfo.getGrammarPackage()!=null ) {
+					testInputFile = new File("."+File.separator+grammarInfo.getGrammarPackage().replace(".", File.separator), filePath);
+					if ( testInputFile.exists() ) filePath = testInputFile.getCanonicalPath();
+				}
+			}
+			input = new ANTLRFileStream(filePath);
+		}
+		else {
+			input = new ANTLRStringStream(testInput.input);
+		}
+		return input;
+	}
+
+	// set up the cause of exception or the exception name into a gUnitTestResult instance
+	private gUnitTestResult getTestExceptionResult(Exception e) {
+		gUnitTestResult testResult;
+    	if ( e.getCause()!=null ) {
+    		testResult = new gUnitTestResult(false, e.getCause().toString(), true);
+    		testResult.setError(e.getCause().toString());
+    	}
+    	else {
+    		testResult = new gUnitTestResult(false, e.toString(), true);
+    		testResult.setError(e.toString());
+    	}
+    	return testResult;
+	}
+
+	/**
+	 * Verify the input has been properly consumed
+	 */
+	protected void checkForValidInput(CommonTokenStream tokens, PrintStream ps2) {
+		if ( tokens.index() != tokens.size() - 1 ) {
+			//At this point we need to check for redundant EOF tokens
+			//which might have been added by the Parser:
+			List<? extends Token> endingTokens = tokens.getTokens(tokens.index(), tokens.size() -1);
+			for (Token endToken : endingTokens) {
+				if (! "<EOF>".equals(endToken.getText())) {
+					//writing to ps2 will mark the test as failed:
+					ps2.print( "Invalid input" );
+					return;
+				}
+			}
+		}
+	}
+
+    public void onPass(ITestCase passTest) {
+
+    }
+
+    public void onFail(ITestCase failTest) {
+
+    }
+
+	protected void handleUnexpectedException(Exception e) {
+		e.printStackTrace();
+		System.exit(1);
+	}
+
+}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitTestInput.java b/gunit/src/main/java/org/antlr/gunit/gUnitTestInput.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitTestInput.java
rename to gunit/src/main/java/org/antlr/gunit/gUnitTestInput.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitTestResult.java b/gunit/src/main/java/org/antlr/gunit/gUnitTestResult.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitTestResult.java
rename to gunit/src/main/java/org/antlr/gunit/gUnitTestResult.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitTestSuite.java b/gunit/src/main/java/org/antlr/gunit/gUnitTestSuite.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/gUnitTestSuite.java
rename to gunit/src/main/java/org/antlr/gunit/gUnitTestSuite.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/AbstractInputEditor.java b/gunit/src/main/java/org/antlr/gunit/swingui/AbstractInputEditor.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/AbstractInputEditor.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/AbstractInputEditor.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/IController.java b/gunit/src/main/java/org/antlr/gunit/swingui/IController.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/IController.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/IController.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/ImageFactory.java b/gunit/src/main/java/org/antlr/gunit/swingui/ImageFactory.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/ImageFactory.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/ImageFactory.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/RuleListController.java b/gunit/src/main/java/org/antlr/gunit/swingui/RuleListController.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/RuleListController.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/RuleListController.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/RunnerController.java b/gunit/src/main/java/org/antlr/gunit/swingui/RunnerController.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/RunnerController.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/RunnerController.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/StatusBarController.java b/gunit/src/main/java/org/antlr/gunit/swingui/StatusBarController.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/StatusBarController.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/StatusBarController.java
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/TestCaseEditController.java b/gunit/src/main/java/org/antlr/gunit/swingui/TestCaseEditController.java
new file mode 100644
index 0000000..2f15229
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/TestCaseEditController.java
@@ -0,0 +1,633 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package org.antlr.gunit.swingui;
+
+import org.antlr.gunit.swingui.model.*;
+import org.antlr.gunit.swingui.ImageFactory;
+import java.awt.*;
+import java.awt.event.*;
+import java.util.HashMap;
+import javax.swing.*;
+import javax.swing.event.*;
+
+/**
+ *
+ * @author scai
+ */
+public class TestCaseEditController implements IController {
+
+    private JPanel view = new JPanel();
+
+    private JScrollPane scroll;
+    private JPanel paneDetail;
+    private AbstractEditorPane paneDetailInput, paneDetailOutput;
+    private JToolBar toolbar;
+    private JList listCases;
+    private ListModel listModel ;
+
+    public ActionListener onTestCaseNumberChange;
+
+    /* EDITORS */
+    private InputFileEditor editInputFile;
+    private InputStringEditor editInputString;
+    private InputMultiEditor editInputMulti;
+    private OutputResultEditor editOutputResult;
+    private OutputAstEditor editOutputAST;
+    private OutputStdEditor editOutputStd;
+    private OutputReturnEditor editOutputReturn;
+    
+    private JComboBox comboInputType, comboOutputType;
+
+    /* TYPE NAME */
+    private static final String IN_TYPE_STRING = "Single-line Text";
+    private static final String IN_TYPE_MULTI = "Multi-line Text";
+    private static final String IN_TYPE_FILE = "Disk File";
+    private static final String OUT_TYPE_BOOL = "OK or Fail";
+    private static final String OUT_TYPE_AST = "AST";
+    private static final String OUT_TYPE_STD = "Standard Output";
+    private static final String OUT_TYPE_RET = "Return Value";
+
+    private static final String DEFAULT_IN_SCRIPT = "";
+    private static final String DEFAULT_OUT_SCRIPT = "";
+
+    private static final Object[] INPUT_TYPE =  {
+        IN_TYPE_STRING, IN_TYPE_MULTI, IN_TYPE_FILE
+    };
+
+    private static final Object[] OUTPUT_TYPE = {
+        OUT_TYPE_BOOL, OUT_TYPE_AST, OUT_TYPE_STD, OUT_TYPE_RET
+    };
+
+    /* SIZE */
+    private static final int TEST_CASE_DETAIL_WIDTH = 300;
+    private static final int TEST_EDITOR_WIDTH = 280;
+    private static final int TEST_CASE_DETAIL_HEIGHT = 250;
+    private static final int TEST_EDITOR_HEIGHT = 120;
+
+    /* MODEL */
+    private Rule currentRule = null;
+    private TestCase currentTestCase = null;
+
+    /* END OF MODEL*/
+
+    private static final HashMap<Class<?>, String> TypeNameTable;
+    static {
+        TypeNameTable = new HashMap<Class<?>, String> ();
+        TypeNameTable.put(TestCaseInputString.class, IN_TYPE_STRING);
+        TypeNameTable.put(TestCaseInputMultiString.class, IN_TYPE_MULTI);
+        TypeNameTable.put(TestCaseInputFile.class, IN_TYPE_FILE);
+
+        TypeNameTable.put(TestCaseOutputResult.class, OUT_TYPE_BOOL);
+        TypeNameTable.put(TestCaseOutputAST.class, OUT_TYPE_AST);
+        TypeNameTable.put(TestCaseOutputStdOut.class, OUT_TYPE_STD);
+        TypeNameTable.put(TestCaseOutputReturn.class, OUT_TYPE_RET);
+    }
+
+    //private WorkSpaceView owner;
+
+    public TestCaseEditController(WorkSpaceView workspace) {
+        //this.owner = workspace;
+        initComponents();
+    }
+
+    public TestCaseEditController() {
+        initComponents();
+    }
+
+    public void OnLoadRule(Rule rule) {
+        if(rule == null) throw new IllegalArgumentException("Null");
+        this.currentRule = rule;
+        this.currentTestCase = null;
+        this.listModel = rule;
+        this.listCases.setModel(this.listModel);      
+    }
+
+    public void setCurrentTestCase(TestCase testCase) {
+        if(testCase == null) throw new IllegalArgumentException("Null");
+        this.listCases.setSelectedValue(testCase, true);
+        this.currentTestCase = testCase;
+    }
+
+    public Rule getCurrentRule() {
+        return this.currentRule;
+    }
+    
+    private void initComponents() {
+
+        /* CASE LIST */
+        listCases = new JList();
+        listCases.addListSelectionListener(new TestCaseListSelectionListener());
+        listCases.setCellRenderer(listRenderer);
+        listCases.setOpaque(false);
+        
+        scroll = new JScrollPane(listCases);
+        scroll.setBorder(BorderFactory.createTitledBorder(
+                BorderFactory.createEmptyBorder(), "Test Cases"));
+        scroll.setOpaque(false);
+        scroll.setViewportBorder(BorderFactory.createEtchedBorder());
+
+        /* CASE DETAIL */
+
+        editInputString = new InputStringEditor();
+        editInputMulti = new InputMultiEditor();
+        editInputFile = new InputFileEditor();
+
+        editOutputResult = new OutputResultEditor();
+        editOutputAST = new OutputAstEditor();
+        editOutputStd = new OutputStdEditor();
+        editOutputReturn = new OutputReturnEditor();
+        
+        paneDetail = new JPanel();
+        paneDetail.setBorder(BorderFactory.createEmptyBorder());
+        paneDetail.setOpaque(false);
+
+        comboInputType = new JComboBox(INPUT_TYPE);
+        comboInputType.addActionListener(new ActionListener() {
+            public void actionPerformed(ActionEvent event) {
+                OnInputTestCaseTypeChanged(comboInputType.getSelectedItem());
+            }
+        });
+        comboOutputType = new JComboBox(OUTPUT_TYPE);
+        comboOutputType.addActionListener(new ActionListener() {
+            public void actionPerformed(ActionEvent event) {
+                OnOutputTestCaseTypeChanged(comboOutputType.getSelectedItem());
+            }
+        });
+        paneDetailInput = new InputEditorPane(comboInputType);
+        paneDetailOutput = new OutputEditorPane(comboOutputType);
+
+        BoxLayout layout = new BoxLayout(paneDetail, BoxLayout.PAGE_AXIS);
+        paneDetail.setLayout(layout);
+        
+        paneDetail.add(this.paneDetailInput);
+        paneDetail.add(this.paneDetailOutput);
+
+        /* TOOLBAR */
+        toolbar = new JToolBar("Edit TestCases", JToolBar.VERTICAL);
+        toolbar.setFloatable(false);
+        toolbar.add(new AddTestCaseAction());
+        toolbar.add(new RemoveTestCaseAction());
+
+        /* COMPOSITE */
+        view.setLayout(new BorderLayout());
+        view.setBorder(BorderFactory.createEmptyBorder());
+        view.setOpaque(false);
+        view.add(toolbar, BorderLayout.WEST);
+        view.add(scroll, BorderLayout.CENTER);
+        view.add(paneDetail, BorderLayout.EAST);
+    }
+
+    private void updateInputEditor() {
+        JComponent editor = null;
+
+        if(currentTestCase != null ) {
+            ITestCaseInput input = this.currentTestCase.getInput();
+            if(input instanceof TestCaseInputString) {
+                this.editInputString.setText(input.getScript());
+                editor = this.editInputString;
+                comboInputType.setSelectedItem(IN_TYPE_STRING);
+            } else if(input instanceof TestCaseInputMultiString) {
+                this.editInputMulti.setText(input.getScript());
+                editor = this.editInputMulti.getView();
+                comboInputType.setSelectedItem(IN_TYPE_MULTI);
+            } else if(input instanceof TestCaseInputFile) {
+                this.editInputFile.setText(input.getScript());
+                editor = this.editInputFile;
+                comboInputType.setSelectedItem(IN_TYPE_FILE);
+            } else {
+                throw new Error("Wrong type");
+            }
+        }
+        
+        paneDetailInput.setEditor(editor);
+    }
+
+    private void updateOutputEditor() {
+        JComponent editor = null;
+        
+        if(currentTestCase != null) {
+            
+            ITestCaseOutput output = this.currentTestCase.getOutput();
+
+            if(output instanceof TestCaseOutputAST) {
+
+                this.editOutputAST.setText(output.getScript());
+                editor = this.editOutputAST.getView();
+                comboOutputType.setSelectedItem(OUT_TYPE_AST);
+
+            } else if(output instanceof TestCaseOutputResult) {
+
+                this.editOutputResult.setValue(output.getScript());
+                editor = this.editOutputResult;
+                comboOutputType.setSelectedItem(OUT_TYPE_BOOL);
+
+            } else if(output instanceof TestCaseOutputStdOut) {
+
+                this.editOutputStd.setText(output.getScript());
+                editor = this.editOutputStd.getView();
+                comboOutputType.setSelectedItem(OUT_TYPE_STD);
+
+            } else if(output instanceof TestCaseOutputReturn) {
+
+                this.editOutputReturn.setText(output.getScript());
+                editor = this.editOutputReturn.getView();
+                comboOutputType.setSelectedItem(OUT_TYPE_RET);
+
+            } else {
+
+                throw new Error("Wrong type");
+                
+            }
+
+        }
+        this.paneDetailOutput.setEditor(editor);
+    }
+
+    private void OnInputTestCaseTypeChanged(Object inputTypeStr) {
+        if(this.currentTestCase != null) {
+            ITestCaseInput input ;
+            if(inputTypeStr == IN_TYPE_STRING) {
+                input = new TestCaseInputString(DEFAULT_IN_SCRIPT);
+            } else if(inputTypeStr == IN_TYPE_MULTI) {
+                input = new TestCaseInputMultiString(DEFAULT_IN_SCRIPT);
+            } else if(inputTypeStr == IN_TYPE_FILE) {
+                input = new TestCaseInputFile(DEFAULT_IN_SCRIPT);
+            } else {
+                throw new Error("Wrong Type");
+            }
+
+            if(input.getClass().equals(this.currentTestCase.getInput().getClass()))
+                return ;
+
+            this.currentTestCase.setInput(input);
+        }
+        this.updateInputEditor();
+    }
+
+    private void OnOutputTestCaseTypeChanged(Object outputTypeStr) {
+        if(this.currentTestCase != null) {
+
+            ITestCaseOutput output ;
+            if(outputTypeStr == OUT_TYPE_AST) {
+                output = new TestCaseOutputAST(DEFAULT_OUT_SCRIPT);
+            } else if(outputTypeStr == OUT_TYPE_BOOL) {
+                output = new TestCaseOutputResult(false);
+            } else if(outputTypeStr == OUT_TYPE_STD) {
+                output = new TestCaseOutputStdOut(DEFAULT_OUT_SCRIPT);
+            } else if(outputTypeStr == OUT_TYPE_RET) {
+                output = new TestCaseOutputReturn(DEFAULT_OUT_SCRIPT);
+            } else {
+                throw new Error("Wrong Type");
+            }
+
+            if(output.getClass().equals(this.currentTestCase.getOutput().getClass()))
+                return ;
+
+            this.currentTestCase.setOutput(output);
+        }
+        this.updateOutputEditor();
+    }
+
+
+    private void OnTestCaseSelected(TestCase testCase) {
+        //if(testCase == null) throw new RuntimeException("Null TestCase");
+        this.currentTestCase = testCase;
+        updateInputEditor();
+        updateOutputEditor();
+
+    }
+
+    private void OnAddTestCase() {
+        if(currentRule == null) return;
+        
+        final TestCase newCase = new TestCase(
+                new TestCaseInputString(""),
+                new TestCaseOutputResult(true));
+        this.currentRule.addTestCase(newCase);
+        setCurrentTestCase(newCase);
+
+        this.listCases.setSelectedValue(newCase, true);
+        this.listCases.updateUI();
+        this.OnTestCaseSelected(newCase);
+        this.onTestCaseNumberChange.actionPerformed(null);
+    }
+
+    private void OnRemoveTestCase() {
+        if(currentTestCase == null) return;
+        currentRule.removeElement(currentTestCase);
+        listCases.updateUI();
+
+        final TestCase nextActiveCase = listCases.isSelectionEmpty() ?
+            null : (TestCase) listCases.getSelectedValue() ;
+        OnTestCaseSelected(nextActiveCase);
+        this.onTestCaseNumberChange.actionPerformed(null);
+    }
+
+    public Object getModel() {
+        return currentRule;
+    }
+
+    public Component getView() {
+        return view;
+    }
+
+    /* EDITOR CONTAINER */
+
+    abstract public class AbstractEditorPane extends JPanel {
+
+        private JComboBox combo;
+        private JComponent editor;
+        private String title;
+        private JLabel placeHolder = new JLabel();
+
+        public AbstractEditorPane(JComboBox comboBox, String title) {
+            this.combo = comboBox;
+            this.editor = placeHolder;
+            this.title = title;
+            this.initComponents();
+        }
+
+        private void initComponents() {
+            placeHolder.setPreferredSize(new Dimension(
+                    TEST_CASE_DETAIL_WIDTH, TEST_CASE_DETAIL_HEIGHT));
+            this.setLayout(new BoxLayout(this, BoxLayout.Y_AXIS));
+            this.add(combo, BorderLayout.NORTH);
+            this.add(editor, BorderLayout.CENTER);
+            this.setOpaque(false);
+            this.setBorder(BorderFactory.createTitledBorder(title));
+            this.setPreferredSize(new Dimension(
+                    TEST_CASE_DETAIL_WIDTH, TEST_CASE_DETAIL_HEIGHT));
+        }
+
+        public void setEditor(JComponent newEditor) {
+            if(newEditor == null) newEditor = placeHolder;
+            this.remove(editor);
+            this.add(newEditor);
+            this.editor = newEditor;
+            this.updateUI();
+        }
+    }
+
+    public class InputEditorPane extends AbstractEditorPane {
+        public InputEditorPane(JComboBox comboBox) {
+            super(comboBox, "Input");
+        }
+    }
+
+    public class OutputEditorPane extends AbstractEditorPane {
+        public OutputEditorPane(JComboBox comboBox) {
+            super(comboBox, "Output");
+        }
+    }
+
+    /* INPUT EDITORS */
+
+    public class InputStringEditor extends JTextField implements CaretListener {
+        public InputStringEditor() {
+            super();
+
+            this.setBorder(BorderFactory.createLineBorder(Color.LIGHT_GRAY));
+            this.addCaretListener(this);
+        }
+
+        public void caretUpdate(CaretEvent arg0) {
+            currentTestCase.getInput().setScript(getText());
+            listCases.updateUI();
+        }
+    }
+
+    public class InputMultiEditor implements CaretListener {
+        private JTextArea textArea = new JTextArea(20, 30);
+        private JScrollPane scroll = new JScrollPane(textArea,
+                JScrollPane.VERTICAL_SCROLLBAR_ALWAYS,
+                JScrollPane.HORIZONTAL_SCROLLBAR_ALWAYS);
+
+        public InputMultiEditor() {
+            super();
+            scroll.setBorder(BorderFactory.createLineBorder(Color.LIGHT_GRAY));
+            textArea.addCaretListener(this);
+        }
+
+        public void caretUpdate(CaretEvent arg0) {
+            currentTestCase.getInput().setScript(getText());
+            listCases.updateUI();
+        }
+
+        public String getText() {
+            return textArea.getText();
+        }
+
+        public void setText(String text) {
+            textArea.setText(text);
+        }
+
+        public JComponent getView() {
+            return scroll;
+        }
+    }
+
+    public class InputFileEditor extends InputStringEditor {};
+
+    public class OutputResultEditor extends JPanel implements ActionListener {
+        
+        private JToggleButton tbFail, tbOk;
+
+        public OutputResultEditor() {
+            super();
+
+            tbFail = new JToggleButton("Fail");
+            tbOk = new JToggleButton("OK");
+            ButtonGroup group = new ButtonGroup();
+            group.add(tbFail);
+            group.add(tbOk);
+
+            this.add(tbFail);
+            this.add(tbOk);
+
+            this.tbFail.addActionListener(this);
+            this.tbOk.addActionListener(this);
+
+            this.setPreferredSize(
+                    new Dimension(TEST_EDITOR_WIDTH, 100));
+        }
+
+        public void actionPerformed(ActionEvent e) {
+            TestCaseOutputResult output =
+                    (TestCaseOutputResult) currentTestCase.getOutput();
+
+            if(e.getSource() == tbFail) {
+                output.setScript(false);
+            } else {
+                output.setScript(true);
+            }
+
+            listCases.updateUI();
+        }
+
+        public void setValue(String value) {
+            if(TestCaseOutputResult.OK.equals(value)) {
+                this.tbOk.setSelected(true);
+            } else {
+                this.tbFail.setSelected(true);
+            }
+        }
+    }
+    
+
+    public class OutputAstEditor implements CaretListener {
+        private JTextArea textArea = new JTextArea(20, 30);
+        private JScrollPane scroll = new JScrollPane(textArea,
+                JScrollPane.VERTICAL_SCROLLBAR_ALWAYS,
+                JScrollPane.HORIZONTAL_SCROLLBAR_ALWAYS);
+
+        public OutputAstEditor() {
+            super();
+            scroll.setBorder(BorderFactory.createLineBorder(Color.LIGHT_GRAY));
+            textArea.addCaretListener(this);
+        }
+
+        public void caretUpdate(CaretEvent arg0) {
+            currentTestCase.getOutput().setScript(getText());
+            listCases.updateUI();
+        }
+
+        public void setText(String text) {
+            this.textArea.setText(text);
+        }
+
+        public String getText() {
+            return this.textArea.getText();
+        }
+
+        public JScrollPane getView() {
+            return this.scroll;
+        }
+    }
+
+
+    public class OutputStdEditor extends OutputAstEditor {}
+    public class OutputReturnEditor extends OutputAstEditor {}
+
+    /* EVENT HANDLERS */
+
+    private class TestCaseListSelectionListener implements ListSelectionListener {
+
+        public void valueChanged(ListSelectionEvent e) {
+            
+            if(e.getValueIsAdjusting()) return;
+            final JList list = (JList) e.getSource();
+            final TestCase value = (TestCase) list.getSelectedValue();
+            if(value != null) OnTestCaseSelected(value);
+            
+        }
+
+    }
+
+    /* ACTIONS */
+
+    private class AddTestCaseAction extends AbstractAction {
+        public AddTestCaseAction() {
+            super("Add", ImageFactory.getSingleton().ADD);
+            putValue(SHORT_DESCRIPTION, "Add a gUnit test case.");
+        }
+        public void actionPerformed(ActionEvent e) {
+            OnAddTestCase();
+        }
+    }
+
+    private class RemoveTestCaseAction extends AbstractAction {
+        public RemoveTestCaseAction() {
+            super("Remove", ImageFactory.getSingleton().DELETE);
+            putValue(SHORT_DESCRIPTION, "Remove a gUnit test case.");
+        }
+        public void actionPerformed(ActionEvent e) {
+            OnRemoveTestCase();
+        }
+    }
+
+    /* CELL RENDERERS */
+
+    private static TestCaseListRenderer listRenderer
+            = new TestCaseListRenderer();
+
+    private static class TestCaseListRenderer implements ListCellRenderer {
+
+        private static Font IN_FONT = new Font("mono", Font.PLAIN, 12);
+        private static Font OUT_FONT = new Font("default", Font.BOLD, 12);
+
+        public static String clamp(String text, int len) {
+            if(text.length() > len) {
+                return text.substring(0, len - 3).concat("...");
+            } else {
+                return text;
+            }
+        }
+
+        public static String clampAtNewLine(String text) {
+            int pos = text.indexOf('\n');
+            if(pos >= 0) {
+                return text.substring(0, pos).concat("...");
+            } else {
+                return text;
+            }
+        }
+
+        public Component getListCellRendererComponent(
+                JList list, Object value, int index,
+                boolean isSelected, boolean hasFocus) {
+
+            final JPanel pane = new JPanel();
+            
+            if (value instanceof TestCase) {
+                final TestCase item = (TestCase) value;
+
+                // create components
+                final JLabel labIn = new JLabel(
+                        clamp(clampAtNewLine(item.getInput().getScript()), 18));
+                final JLabel labOut = new JLabel(
+                        clamp(clampAtNewLine(item.getOutput().getScript()), 18));
+                labOut.setFont(OUT_FONT);
+                labIn.setFont(IN_FONT);
+
+                labIn.setIcon(item.getInput() instanceof TestCaseInputFile ?
+                    ImageFactory.getSingleton().FILE16 :
+                    ImageFactory.getSingleton().EDIT16);
+
+                pane.setBorder(BorderFactory.createEtchedBorder());
+                pane.setLayout(new BoxLayout(pane, BoxLayout.Y_AXIS));
+                pane.add(labIn);
+                pane.add(labOut);
+                pane.setBackground(isSelected ? Color.LIGHT_GRAY : Color.WHITE);
+            } 
+
+            return pane;
+        }
+    }
+
+}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/Tool.java b/gunit/src/main/java/org/antlr/gunit/swingui/Tool.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/Tool.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/Tool.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceController.java b/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceController.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceController.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceController.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceView.java b/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceView.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceView.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/WorkSpaceView.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseInput.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseInput.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseInput.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseInput.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseOutput.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseOutput.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseOutput.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/ITestCaseOutput.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/Rule.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/Rule.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/Rule.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/Rule.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCase.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCase.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCase.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCase.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputFile.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputFile.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputFile.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputFile.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputMultiString.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputMultiString.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputMultiString.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputMultiString.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputString.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputString.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputString.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseInputString.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputAST.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputAST.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputAST.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputAST.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputResult.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputResult.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputResult.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputResult.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputReturn.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputReturn.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputReturn.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputReturn.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputStdOut.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputStdOut.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputStdOut.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/TestCaseOutputStdOut.java
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuite.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuite.java
new file mode 100644
index 0000000..1264f30
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuite.java
@@ -0,0 +1,100 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit.swingui.model;
+
+import java.io.*;
+import java.util.*;
+import org.antlr.runtime.*;
+
+public class TestSuite {
+
+    protected List<Rule> rules ;
+    protected String grammarName ;
+    protected CommonTokenStream tokens;
+    protected File testSuiteFile;      
+
+    protected TestSuite(String gname, File testFile) {
+        grammarName = gname;
+        testSuiteFile = testFile;
+        rules = new ArrayList<Rule>();
+    }
+    
+    /* Get the gUnit test suite file name. */
+    public File getTestSuiteFile() {
+        return testSuiteFile;
+    }       
+
+    public void addRule(Rule currentRule) {
+        if(currentRule == null) throw new IllegalArgumentException("Null rule");
+        rules.add(currentRule);
+    }
+
+    // test rule name
+    public boolean hasRule(Rule rule) {
+        for(Rule r: rules) {
+            if(r.getName().equals(rule.getName())) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    public int getRuleCount() {
+        return rules.size();
+    }
+    
+    public void setRules(List<Rule> newRules) {
+        rules.clear();
+        rules.addAll(newRules);
+    }
+
+    /* GETTERS AND SETTERS */
+
+    public void setGrammarName(String name) { grammarName = name;}
+
+    public String getGrammarName() { return grammarName; }
+
+    public Rule getRule(int index) { return rules.get(index); }
+
+    public CommonTokenStream getTokens() { return tokens; }
+    
+    public void setTokens(CommonTokenStream ts) { tokens = ts; }
+
+    public Rule getRule(String name) {
+        for(Rule rule: rules) {
+            if(rule.getName().equals(name)) {
+                return rule;
+            }
+        }
+        return null;
+    }
+    
+    // only for stringtemplate use
+    public List<Rule> getRulesForStringTemplate() {return rules;}
+    
+}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuiteFactory.java b/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuiteFactory.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuiteFactory.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/model/TestSuiteFactory.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/runner/NotifiedTestExecuter.java b/gunit/src/main/java/org/antlr/gunit/swingui/runner/NotifiedTestExecuter.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/runner/NotifiedTestExecuter.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/runner/NotifiedTestExecuter.java
diff --git a/gunit/src/main/java/org/antlr/gunit/swingui/runner/ParserLoader.java b/gunit/src/main/java/org/antlr/gunit/swingui/runner/ParserLoader.java
new file mode 100644
index 0000000..dcad501
--- /dev/null
+++ b/gunit/src/main/java/org/antlr/gunit/swingui/runner/ParserLoader.java
@@ -0,0 +1,124 @@
+/*
+ [The "BSD licence"]
+ Copyright (c) 2009 Shaoting Cai
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+package org.antlr.gunit.swingui.runner;
+
+import java.io.*;
+import java.util.HashMap;
+
+/**
+ * Class loader for parser &amp; lexer generated by antlr.
+ * @author Shaoting
+ */
+public class ParserLoader extends ClassLoader {
+
+    private HashMap<String, Class<?>> classList;
+    private String grammar;
+
+    /**
+     * Create a class loader for antlr parser/lexer.
+     * @param grammarName
+     * @param classDir
+     */
+    public ParserLoader(String grammarName, String classDir) throws IOException, ClassNotFoundException {
+
+        final String lexerName = grammarName + "Lexer";
+
+        // load all the class files in the "classDir" related to the grammarName
+        File dir = new File(classDir);
+        if(dir.isDirectory()) {
+            classList = new HashMap<String, Class<?>>();
+            grammar = grammarName;
+            File[] files = dir.listFiles(new ClassFilenameFilter(grammarName));
+            for(File f : files) {
+
+                // load class data
+                final InputStream in = new BufferedInputStream(new FileInputStream(f));
+                final byte[] classData = new byte[in.available()];
+                in.read(classData);
+                in.close();
+
+                // define class
+                final Class<?> newClass = defineClass(null, classData, 0, classData.length);
+                assert(newClass != null);
+                resolveClass(newClass);
+
+                // save to hashtable
+                final String fileName = f.getName();
+                final String className = fileName.substring(0, fileName.lastIndexOf("."));
+                classList.put(className, newClass);
+                //System.out.println("adding: " + className);
+            }
+        } else {
+            throw new IOException(classDir + " is not a directory.");
+        }
+
+        if(classList.isEmpty() || !classList.containsKey(lexerName)) {
+            throw new ClassNotFoundException(lexerName + " not found.");
+        }
+
+    }
+
+
+
+    @Override
+    public synchronized Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
+        //System.out.print("loading: " + name);
+        if(name.startsWith(grammar)) {
+            if(classList.containsKey(name)) {
+                //System.out.println(" .... found");
+                return classList.get(name);
+            } else {
+                //System.out.println(" .... not found");
+                throw new ClassNotFoundException(name);
+            }
+            
+        } else {
+            final Class<?> c = findSystemClass(name);
+            //System.out.println(" .... system found " + c.getName());
+            return c;
+        }
+    }
+
+    /**
+     * Accepts grammarname...($...)?.class
+     */
+    protected static class ClassFilenameFilter implements FilenameFilter {
+
+        private String grammarName;
+
+        protected ClassFilenameFilter(String name) {
+            grammarName = name;
+        }
+
+        public boolean accept(File dir, String name) {
+            return name.startsWith(grammarName) && name.endsWith(".class");
+        }
+
+    }
+
+}
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/runner/TestSuiteAdapter.java b/gunit/src/main/java/org/antlr/gunit/swingui/runner/TestSuiteAdapter.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/runner/TestSuiteAdapter.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/runner/TestSuiteAdapter.java
diff --git a/antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/runner/gUnitAdapter.java b/gunit/src/main/java/org/antlr/gunit/swingui/runner/gUnitAdapter.java
similarity index 100%
rename from antlr-3.4/gunit/src/main/java/org/antlr/gunit/swingui/runner/gUnitAdapter.java
rename to gunit/src/main/java/org/antlr/gunit/swingui/runner/gUnitAdapter.java
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/gUnitTestResult.stg b/gunit/src/main/resources/org/antlr/gunit/gUnitTestResult.stg
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/gUnitTestResult.stg
rename to gunit/src/main/resources/org/antlr/gunit/gUnitTestResult.stg
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/junit.stg b/gunit/src/main/resources/org/antlr/gunit/junit.stg
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/junit.stg
rename to gunit/src/main/resources/org/antlr/gunit/junit.stg
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/gunit.stg b/gunit/src/main/resources/org/antlr/gunit/swingui/gunit.stg
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/gunit.stg
rename to gunit/src/main/resources/org/antlr/gunit/swingui/gunit.stg
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/accept.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/accept.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/accept.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/accept.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/add.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/add.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/add.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/add.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/addfile24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/addfile24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/addfile24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/addfile24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/delete24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/delete24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/delete24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/delete24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/edit16.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/edit16.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/edit16.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/edit16.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb16.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb16.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb16.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/favb16.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/favb24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/favb24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/file16.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/file16.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/file16.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/file16.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/filesearch24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/filesearch24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/filesearch24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/filesearch24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/floppy24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/floppy24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/floppy24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/floppy24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/folder24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/folder24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/folder24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/folder24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/help24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/help24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/help24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/help24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/next24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/next24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/next24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/next24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/redo24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/redo24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/redo24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/redo24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/refresh24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/refresh24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/refresh24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/refresh24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/runfail.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/runfail.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/runfail.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/runfail.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/runpass.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/runpass.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/runpass.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/runpass.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/saveas24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/saveas24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/saveas24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/saveas24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroup.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroup.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroup.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroup.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroupx.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroupx.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroupx.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/testgroupx.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/testsuite.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/testsuite.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/testsuite.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/testsuite.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile16.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile16.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile16.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile16.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/textfile24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/undo24.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/undo24.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/undo24.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/undo24.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/windowb16.png b/gunit/src/main/resources/org/antlr/gunit/swingui/images/windowb16.png
similarity index 100%
rename from antlr-3.4/gunit/src/main/resources/org/antlr/gunit/swingui/images/windowb16.png
rename to gunit/src/main/resources/org/antlr/gunit/swingui/images/windowb16.png
Binary files differ
diff --git a/antlr-3.4/gunit/src/test/java/org/antlr/gunit/GunitTest.java b/gunit/src/test/java/org/antlr/gunit/GunitTest.java
similarity index 100%
rename from antlr-3.4/gunit/src/test/java/org/antlr/gunit/GunitTest.java
rename to gunit/src/test/java/org/antlr/gunit/GunitTest.java
diff --git a/patches/fix_infinite_recursion.diff b/patches/fix_infinite_recursion.diff
new file mode 100644
index 0000000..06d8815
--- /dev/null
+++ b/patches/fix_infinite_recursion.diff
@@ -0,0 +1,24 @@
+From 12f69f5bb0e10f608b1899bab67b1813e0fdaf14 Mon Sep 17 00:00:00 2001
+From: Andreas Gampe <agampe@google.com>
+Date: Tue, 20 Feb 2018 09:51:42 -0800
+Subject: [PATCH] Antlr: Fix infinite recursion
+
+Bug: 73645371
+Test: m javac-check RUN_ERROR_PRONE=true
+Change-Id: Ie8702ad59aab1af7e23038fbffd5bd34902f1f7c
+---
+
+Index: antlr/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java
+===================================================================
+--- antlr.orig/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java
++++ antlr/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java
+@@ -227,7 +227,7 @@ public class DebugTreeAdaptor implements
+ 
+ 	@Override
+ 	public Object deleteChild(Object t, int i) {
+-		return deleteChild(t, i);
++		return adaptor.deleteChild(t, i);
+ 	}
+ 
+ 	@Override
+
diff --git a/pom.xml b/pom.xml
new file mode 100644
index 0000000..bcec4b5
--- /dev/null
+++ b/pom.xml
@@ -0,0 +1,324 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <parent>
+        <groupId>org.sonatype.oss</groupId>
+        <artifactId>oss-parent</artifactId>
+        <version>9</version>
+    </parent>
+
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.antlr</groupId>
+    <artifactId>antlr-master</artifactId>
+    <packaging>pom</packaging>
+    <version>3.5.2</version>
+    <name>ANTLR 3 Master build control POM</name>
+    <description>Master build POM for ANTLR 3</description>
+    <url>http://antlr.org</url>
+    <inceptionYear>1992</inceptionYear>
+    <organization>
+        <name>ANTLR</name>
+        <url>http://www.antlr.org</url>
+    </organization>
+
+  <!--
+    What version of ANTLR are we building? This sets the
+    the version number for all other things that are built
+    as part of an ANTLR release, unless they override or
+    ignore it. We do this via a properites file for this
+    pom.
+    -->
+
+  <!--
+     This is the master pom for building the ANTLR
+     toolset and runtime (Java) at the specific level
+     defined above. Hence we specify here the modules that
+     this pom will build when we build this pom
+    -->
+
+    <modules>
+        <module>runtime/Java</module>
+        <module>tool</module>
+        <module>antlr3-maven-plugin</module>
+        <module>gunit</module>
+        <module>gunit-maven-plugin</module>
+        <module>antlr3-maven-archetype</module>
+        <module>antlr-complete</module>
+    </modules>
+
+  <!--
+    Make sure that the build is not platform dependent (I.E show that
+    all the files in the source tree are in UTF-8 format.
+    -->
+    <properties>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+        <java5.home>${env.JAVA5_HOME}</java5.home>
+        <java6.home>${env.JAVA6_HOME}</java6.home>
+        <bootclasspath.java5>${java5.home}/lib/rt.jar</bootclasspath.java5>
+        <bootclasspath.java6>${java6.home}/lib/rt.jar</bootclasspath.java6>
+        <bootclasspath.compile>${bootclasspath.java5}</bootclasspath.compile>
+        <bootclasspath.testCompile>${bootclasspath.java6}</bootclasspath.testCompile>
+    </properties>
+
+    <licenses>
+        <license>
+            <name>BSD licence</name>
+            <url>http://antlr.org/license.html</url>
+            <distribution>repo</distribution>
+        </license>
+    </licenses>
+
+    <issueManagement>
+        <system>GitHub Issues</system>
+        <url>https://github.com/antlr/antlr3/issues</url>
+    </issueManagement>
+
+    <mailingLists>
+        <mailingList>
+            <name>antlr-discussion</name>
+            <archive>https://groups.google.com/forum/?fromgroups#!forum/antlr-discussion</archive>
+        </mailingList>
+    </mailingLists>
+
+    <scm>
+        <url>https://github.com/antlr/antlr3/tree/master</url>
+        <connection>scm:git:git://github.com/antlr/antlr3.git</connection>
+        <developerConnection>scm:git:git@github.com:antlr/antlr3.git</developerConnection>
+      <tag>3.5.2</tag>
+  </scm>
+  <!--
+
+    Tell Maven which other artifacts we need in order to
+    build, run and test the ANTLR jars.
+    This is the master pom, and so it only contains those
+    dependencies that are common to all the modules below
+    or are just included for test
+    -->
+    <dependencyManagement>
+
+        <dependencies>
+
+            <dependency>
+                <groupId>junit</groupId>
+                <artifactId>junit</artifactId>
+                <version>4.10</version>
+                <scope>test</scope>
+            </dependency>
+
+            <dependency>
+                <groupId>antlr</groupId>
+                <artifactId>antlr</artifactId>
+                <version>2.7.7</version>
+                <scope>compile</scope>
+            </dependency>
+
+            <dependency>
+              <groupId>org.antlr</groupId>
+              <artifactId>stringtemplate</artifactId>
+              <version>3.2.1</version>
+              <scope>compile</scope>
+            </dependency>
+
+            <dependency>
+                <groupId>org.antlr</groupId>
+                <artifactId>ST4</artifactId>
+                <version>4.0.8</version>
+                <scope>compile</scope>
+            </dependency>
+
+        </dependencies>
+
+    </dependencyManagement>
+
+    <profiles>
+        <profile>
+            <id>sonatype-oss-release</id>
+            <modules>
+                <module>antlr-complete</module>
+            </modules>
+            <build>
+                <plugins>
+                    <plugin>
+                        <groupId>org.apache.maven.plugins</groupId>
+                        <artifactId>maven-compiler-plugin</artifactId>
+                        <executions>
+                            <execution>
+                                <id>default-compile</id>
+                                <configuration>
+                                    <source>1.5</source>
+                                    <target>1.5</target>
+                                    <compilerArgs>
+                                        <arg>-Xlint</arg>
+                                        <arg>-Xlint:-serial</arg>
+                                        <arg>-bootclasspath</arg>
+                                        <arg>${bootclasspath.compile}</arg>
+                                    </compilerArgs>
+                                </configuration>
+                            </execution>
+                            <execution>
+                                <id>default-testCompile</id>
+                                <configuration>
+                                    <source>1.6</source>
+                                    <target>1.6</target>
+                                    <compilerArgs>
+                                        <arg>-Xlint</arg>
+                                        <arg>-Xlint:-serial</arg>
+                                        <arg>-bootclasspath</arg>
+                                        <arg>${bootclasspath.testCompile}</arg>
+                                    </compilerArgs>
+                                </configuration>
+                            </execution>
+                        </executions>
+                    </plugin>
+                </plugins>
+            </build>
+        </profile>
+    </profiles>
+
+    <build>
+
+        <defaultGoal>install</defaultGoal>
+
+        <!--
+            The following filter definition means that both the master
+            project and the sub projects will read in a file in the same
+            directory as the pom.xml is located and set any properties
+            that are defined there in the standard x=y format. These
+            properties can then be referenced via ${x} in any resource
+            file specified in any pom. So, there is a master antlr.config
+            file in the same location as this pom.xml file and here you can
+            define anything that is relevant to all the modules that we
+            build here. However each module also has an antlr.config file
+            where you can override property values from the master file or
+            define things that are only relevant to that module.
+          -->
+        <filters>
+            <filter>antlr.config</filter>
+        </filters>
+
+        <resources>
+            <resource>
+                <directory>src/main/resources</directory>
+                <filtering>true</filtering>
+            </resource>
+        </resources>
+
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <version>2.4</version>
+                <configuration>
+                    <archive>
+                        <manifest>
+                            <addDefaultImplementationEntries>true</addDefaultImplementationEntries>
+                        </manifest>
+                    </archive>
+                </configuration>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <version>3.1</version>
+                <configuration>
+                    <sourceDirectory>src</sourceDirectory>
+                    <showWarnings>true</showWarnings>
+                    <showDeprecation>true</showDeprecation>
+                    <compilerArgs>
+                        <arg>-Xlint</arg>
+                        <arg>-Xlint:-serial</arg>
+                    </compilerArgs>
+                </configuration>
+
+                <executions>
+                    <execution>
+                        <id>default-compile</id>
+                        <configuration>
+                            <source>1.5</source>
+                            <target>1.5</target>
+                        </configuration>
+                    </execution>
+                    <execution>
+                        <id>default-testCompile</id>
+                        <configuration>
+                            <source>1.6</source>
+                            <target>1.6</target>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-release-plugin</artifactId>
+                <!-- override the version inherited from the parent -->
+                <version>2.5</version>
+                <configuration>
+                    <arguments>-Psonatype-oss-release ${release.arguments}</arguments>
+                </configuration>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-surefire-plugin</artifactId>
+                <!-- override the version inherited from the parent -->
+                <version>2.17</version>
+            </plugin>
+
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>findbugs-maven-plugin</artifactId>
+                <!-- override the version inherited from the parent -->
+                <version>2.5.3</version>
+                <configuration>
+                    <findbugsXmlOutput>true</findbugsXmlOutput>
+                    <xmlOutput>true</xmlOutput>
+                </configuration>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-source-plugin</artifactId>
+                <!-- override the version inherited from the parent -->
+                <version>2.2.1</version>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-javadoc-plugin</artifactId>
+                <!-- override the version inherited from the parent -->
+                <version>2.9.1</version>
+                <configuration>
+                    <quiet>true</quiet>
+                </configuration>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-gpg-plugin</artifactId>
+                <!-- override the version inherited from the parent -->
+                <version>1.5</version>
+            </plugin>
+
+        </plugins>
+
+    </build>
+
+    <reporting>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-javadoc-plugin</artifactId>
+                <version>2.9.1</version>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jxr-plugin</artifactId>
+                <version>2.3</version>
+            </plugin>
+        </plugins>
+    </reporting>
+
+</project>
diff --git a/post_update.sh b/post_update.sh
new file mode 100755
index 0000000..2995f11
--- /dev/null
+++ b/post_update.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+# $1 Path to the new version.
+# $2 Path to the old version.
+
+cp -a -n $2/build.gradle $1/
diff --git a/antlr-3.4/runtime/ActionScript/AUTHORS b/runtime/ActionScript/AUTHORS
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/AUTHORS
rename to runtime/ActionScript/AUTHORS
diff --git a/antlr-3.4/runtime/ActionScript/LICENSE b/runtime/ActionScript/LICENSE
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/LICENSE
rename to runtime/ActionScript/LICENSE
diff --git a/antlr-3.4/runtime/ActionScript/README b/runtime/ActionScript/README
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/README
rename to runtime/ActionScript/README
diff --git a/antlr-3.4/runtime/ActionScript/lib/antlr3.swc b/runtime/ActionScript/lib/antlr3.swc
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/lib/antlr3.swc
rename to runtime/ActionScript/lib/antlr3.swc
Binary files differ
diff --git a/antlr-3.4/runtime/ActionScript/project/.actionScriptProperties b/runtime/ActionScript/project/.actionScriptProperties
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/.actionScriptProperties
rename to runtime/ActionScript/project/.actionScriptProperties
diff --git a/antlr-3.4/runtime/ActionScript/project/.flexLibProperties b/runtime/ActionScript/project/.flexLibProperties
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/.flexLibProperties
rename to runtime/ActionScript/project/.flexLibProperties
diff --git a/antlr-3.4/runtime/ActionScript/project/.project b/runtime/ActionScript/project/.project
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/.project
rename to runtime/ActionScript/project/.project
diff --git a/antlr-3.4/runtime/ActionScript/project/.settings/org.eclipse.core.resources.prefs b/runtime/ActionScript/project/.settings/org.eclipse.core.resources.prefs
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/.settings/org.eclipse.core.resources.prefs
rename to runtime/ActionScript/project/.settings/org.eclipse.core.resources.prefs
diff --git a/antlr-3.4/runtime/ActionScript/project/README.txt b/runtime/ActionScript/project/README.txt
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/README.txt
rename to runtime/ActionScript/project/README.txt
diff --git a/antlr-3.4/runtime/ActionScript/project/build.xml b/runtime/ActionScript/project/build.xml
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/build.xml
rename to runtime/ActionScript/project/build.xml
diff --git a/antlr-3.4/runtime/ActionScript/project/lib/FlexAntTasks.jar b/runtime/ActionScript/project/lib/FlexAntTasks.jar
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/lib/FlexAntTasks.jar
rename to runtime/ActionScript/project/lib/FlexAntTasks.jar
Binary files differ
diff --git a/antlr-3.4/runtime/ActionScript/project/lib/FlexUnitOptional.swc b/runtime/ActionScript/project/lib/FlexUnitOptional.swc
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/lib/FlexUnitOptional.swc
rename to runtime/ActionScript/project/lib/FlexUnitOptional.swc
Binary files differ
diff --git a/antlr-3.4/runtime/ActionScript/project/lib/flexunit.swc b/runtime/ActionScript/project/lib/flexunit.swc
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/lib/flexunit.swc
rename to runtime/ActionScript/project/lib/flexunit.swc
Binary files differ
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/ANTLRFileStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/ANTLRFileStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/ANTLRFileStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/ANTLRFileStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/ANTLRStringStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/ANTLRStringStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/ANTLRStringStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/ANTLRStringStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/BaseRecognizer.as b/runtime/ActionScript/project/src/org/antlr/runtime/BaseRecognizer.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/BaseRecognizer.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/BaseRecognizer.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/BitSet.as b/runtime/ActionScript/project/src/org/antlr/runtime/BitSet.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/BitSet.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/BitSet.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/CharStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/CharStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/CharStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/CharStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/CharStreamConstants.as b/runtime/ActionScript/project/src/org/antlr/runtime/CharStreamConstants.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/CharStreamConstants.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/CharStreamConstants.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/CharStreamState.as b/runtime/ActionScript/project/src/org/antlr/runtime/CharStreamState.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/CharStreamState.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/CharStreamState.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/CommonToken.as b/runtime/ActionScript/project/src/org/antlr/runtime/CommonToken.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/CommonToken.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/CommonToken.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/CommonTokenStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/CommonTokenStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/CommonTokenStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/CommonTokenStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/DFA.as b/runtime/ActionScript/project/src/org/antlr/runtime/DFA.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/DFA.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/DFA.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/EarlyExitException.as b/runtime/ActionScript/project/src/org/antlr/runtime/EarlyExitException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/EarlyExitException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/EarlyExitException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/FailedPredicateException.as b/runtime/ActionScript/project/src/org/antlr/runtime/FailedPredicateException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/FailedPredicateException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/FailedPredicateException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/IntStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/IntStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/IntStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/IntStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/Lexer.as b/runtime/ActionScript/project/src/org/antlr/runtime/Lexer.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/Lexer.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/Lexer.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedNotSetException.as b/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedNotSetException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedNotSetException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/MismatchedNotSetException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedRangeException.as b/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedRangeException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedRangeException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/MismatchedRangeException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedSetException.as b/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedSetException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedSetException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/MismatchedSetException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedTokenException.as b/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedTokenException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedTokenException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/MismatchedTokenException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedTreeNodeException.as b/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedTreeNodeException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MismatchedTreeNodeException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/MismatchedTreeNodeException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MissingTokenException.as b/runtime/ActionScript/project/src/org/antlr/runtime/MissingTokenException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/MissingTokenException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/MissingTokenException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/NoViableAltException.as b/runtime/ActionScript/project/src/org/antlr/runtime/NoViableAltException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/NoViableAltException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/NoViableAltException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/Parser.as b/runtime/ActionScript/project/src/org/antlr/runtime/Parser.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/Parser.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/Parser.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/ParserRuleReturnScope.as b/runtime/ActionScript/project/src/org/antlr/runtime/ParserRuleReturnScope.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/ParserRuleReturnScope.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/ParserRuleReturnScope.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/RecognitionException.as b/runtime/ActionScript/project/src/org/antlr/runtime/RecognitionException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/RecognitionException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/RecognitionException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/RecognizerSharedState.as b/runtime/ActionScript/project/src/org/antlr/runtime/RecognizerSharedState.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/RecognizerSharedState.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/RecognizerSharedState.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/RuleReturnScope.as b/runtime/ActionScript/project/src/org/antlr/runtime/RuleReturnScope.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/RuleReturnScope.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/RuleReturnScope.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/Token.as b/runtime/ActionScript/project/src/org/antlr/runtime/Token.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/Token.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/Token.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/TokenConstants.as b/runtime/ActionScript/project/src/org/antlr/runtime/TokenConstants.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/TokenConstants.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/TokenConstants.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/TokenRewriteStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/TokenRewriteStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/TokenRewriteStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/TokenRewriteStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/TokenSource.as b/runtime/ActionScript/project/src/org/antlr/runtime/TokenSource.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/TokenSource.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/TokenSource.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/TokenStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/TokenStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/TokenStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/TokenStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/UnwantedTokenException.as b/runtime/ActionScript/project/src/org/antlr/runtime/UnwantedTokenException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/UnwantedTokenException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/UnwantedTokenException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/BaseTree.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/BaseTree.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/BaseTree.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/BaseTree.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/BaseTreeAdaptor.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/BaseTreeAdaptor.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/BaseTreeAdaptor.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/BaseTreeAdaptor.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonErrorNode.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonErrorNode.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonErrorNode.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonErrorNode.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTree.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTree.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTree.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTree.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTreeAdaptor.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTreeAdaptor.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTreeAdaptor.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTreeAdaptor.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTreeNodeStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTreeNodeStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTreeNodeStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/CommonTreeNodeStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteCardinalityException.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteCardinalityException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteCardinalityException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteCardinalityException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteEarlyExitException.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteEarlyExitException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteEarlyExitException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteEarlyExitException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteEmptyStreamException.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteEmptyStreamException.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteEmptyStreamException.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteEmptyStreamException.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleElementStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleElementStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleElementStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleElementStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleNodeStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleNodeStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleNodeStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleNodeStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleTokenStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleTokenStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleTokenStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/RewriteRuleTokenStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/Tree.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/Tree.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/Tree.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/Tree.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeAdaptor.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeAdaptor.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeAdaptor.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeAdaptor.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeConstants.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeConstants.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeConstants.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeConstants.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeNodeStream.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeNodeStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeNodeStream.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeNodeStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeParser.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeParser.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeParser.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeParser.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeRuleReturnScope.as b/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeRuleReturnScope.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeRuleReturnScope.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/tree/TreeRuleReturnScope.as
diff --git a/antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/version.as b/runtime/ActionScript/project/src/org/antlr/runtime/version.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/src/org/antlr/runtime/version.as
rename to runtime/ActionScript/project/src/org/antlr/runtime/version.as
diff --git a/antlr-3.4/runtime/ActionScript/project/test/Antlr3Test.mxml b/runtime/ActionScript/project/test/Antlr3Test.mxml
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/test/Antlr3Test.mxml
rename to runtime/ActionScript/project/test/Antlr3Test.mxml
diff --git a/antlr-3.4/runtime/ActionScript/project/test/org/antlr/runtime/test/AllTests.as b/runtime/ActionScript/project/test/org/antlr/runtime/test/AllTests.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/test/org/antlr/runtime/test/AllTests.as
rename to runtime/ActionScript/project/test/org/antlr/runtime/test/AllTests.as
diff --git a/antlr-3.4/runtime/ActionScript/project/test/org/antlr/runtime/test/TestANTLRStringStream.as b/runtime/ActionScript/project/test/org/antlr/runtime/test/TestANTLRStringStream.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/test/org/antlr/runtime/test/TestANTLRStringStream.as
rename to runtime/ActionScript/project/test/org/antlr/runtime/test/TestANTLRStringStream.as
diff --git a/antlr-3.4/runtime/ActionScript/project/test/org/antlr/runtime/test/TestBitSet.as b/runtime/ActionScript/project/test/org/antlr/runtime/test/TestBitSet.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/test/org/antlr/runtime/test/TestBitSet.as
rename to runtime/ActionScript/project/test/org/antlr/runtime/test/TestBitSet.as
diff --git a/antlr-3.4/runtime/ActionScript/project/test/org/antlr/runtime/test/TestDFA.as b/runtime/ActionScript/project/test/org/antlr/runtime/test/TestDFA.as
similarity index 100%
rename from antlr-3.4/runtime/ActionScript/project/test/org/antlr/runtime/test/TestDFA.as
rename to runtime/ActionScript/project/test/org/antlr/runtime/test/TestDFA.as
diff --git a/antlr-3.4/runtime/C/AUTHORS b/runtime/C/AUTHORS
similarity index 100%
rename from antlr-3.4/runtime/C/AUTHORS
rename to runtime/C/AUTHORS
diff --git a/runtime/C/C.sln b/runtime/C/C.sln
new file mode 100644
index 0000000..e86daeb
--- /dev/null
+++ b/runtime/C/C.sln
@@ -0,0 +1,43 @@
+Microsoft Visual Studio Solution File, Format Version 10.00
+# Visual Studio 2008
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "C", "C.vcproj", "{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}"
+EndProject
+Global
+	GlobalSection(SolutionConfigurationPlatforms) = preSolution
+		Debug|Win32 = Debug|Win32
+		Debug|x64 = Debug|x64
+		DebugDLL|Win32 = DebugDLL|Win32
+		DebugDLL|x64 = DebugDLL|x64
+		Deployment|Win32 = Deployment|Win32
+		Deployment|x64 = Deployment|x64
+		Release|Win32 = Release|Win32
+		Release|x64 = Release|x64
+		ReleaseDLL|Win32 = ReleaseDLL|Win32
+		ReleaseDLL|x64 = ReleaseDLL|x64
+	EndGlobalSection
+	GlobalSection(ProjectConfigurationPlatforms) = postSolution
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Debug|Win32.ActiveCfg = Debug|Win32
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Debug|Win32.Build.0 = Debug|Win32
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Debug|x64.ActiveCfg = Debug|x64
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Debug|x64.Build.0 = Debug|x64
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.DebugDLL|Win32.ActiveCfg = DebugDLL|Win32
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.DebugDLL|Win32.Build.0 = DebugDLL|Win32
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.DebugDLL|x64.ActiveCfg = DebugDLL|x64
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.DebugDLL|x64.Build.0 = DebugDLL|x64
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Deployment|Win32.ActiveCfg = DebugDLL|Win32
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Deployment|Win32.Build.0 = DebugDLL|Win32
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Deployment|x64.ActiveCfg = DebugDLL|x64
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Deployment|x64.Build.0 = DebugDLL|x64
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Release|Win32.ActiveCfg = Release|Win32
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Release|Win32.Build.0 = Release|Win32
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Release|x64.ActiveCfg = Release|x64
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.Release|x64.Build.0 = Release|x64
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.ReleaseDLL|Win32.ActiveCfg = ReleaseDLL|Win32
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.ReleaseDLL|Win32.Build.0 = ReleaseDLL|Win32
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.ReleaseDLL|x64.ActiveCfg = ReleaseDLL|x64
+		{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}.ReleaseDLL|x64.Build.0 = ReleaseDLL|x64
+	EndGlobalSection
+	GlobalSection(SolutionProperties) = preSolution
+		HideSolutionNode = FALSE
+	EndGlobalSection
+EndGlobal
diff --git a/runtime/C/C.vcproj b/runtime/C/C.vcproj
new file mode 100644
index 0000000..25aaf8b
--- /dev/null
+++ b/runtime/C/C.vcproj
@@ -0,0 +1,1040 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+	ProjectType="Visual C++"
+	Version="9.00"
+	Name="C"
+	ProjectGUID="{0F0FE03A-78F3-4B34-9DCE-0CDFF1FB5C40}"
+	RootNamespace="C"
+	Keyword="Win32Proj"
+	TargetFrameworkVersion="131072"
+	>
+	<Platforms>
+		<Platform
+			Name="Win32"
+		/>
+		<Platform
+			Name="x64"
+		/>
+	</Platforms>
+	<ToolFiles>
+	</ToolFiles>
+	<Configurations>
+		<Configuration
+			Name="Debug|Win32"
+			OutputDirectory="Debug"
+			IntermediateDirectory="Debug"
+			ConfigurationType="4"
+			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
+			CharacterSet="2"
+			>
+			<Tool
+				Name="VCPreBuildEventTool"
+			/>
+			<Tool
+				Name="VCCustomBuildTool"
+			/>
+			<Tool
+				Name="VCXMLDataGeneratorTool"
+			/>
+			<Tool
+				Name="VCWebServiceProxyGeneratorTool"
+			/>
+			<Tool
+				Name="VCMIDLTool"
+			/>
+			<Tool
+				Name="VCCLCompilerTool"
+				Optimization="0"
+				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;;&quot;$(SolutionDir)\..\..\include&quot;"
+				PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
+				StringPooling="true"
+				MinimalRebuild="false"
+				BasicRuntimeChecks="3"
+				RuntimeLibrary="3"
+				StructMemberAlignment="0"
+				EnableFunctionLevelLinking="true"
+				EnableEnhancedInstructionSet="0"
+				FloatingPointModel="0"
+				FloatingPointExceptions="true"
+				DisableLanguageExtensions="false"
+				UsePrecompiledHeader="0"
+				ExpandAttributedSource="true"
+				AssemblerOutput="2"
+				BrowseInformation="1"
+				WarningLevel="4"
+				WarnAsError="false"
+				Detect64BitPortabilityProblems="false"
+				DebugInformationFormat="3"
+				CallingConvention="0"
+				CompileAs="0"
+			/>
+			<Tool
+				Name="VCManagedResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCPreLinkEventTool"
+			/>
+			<Tool
+				Name="VCLibrarianTool"
+				AdditionalDependencies="Ws2_32.lib"
+				OutputFile="$(OutDir)/antlr3cd.lib"
+			/>
+			<Tool
+				Name="VCALinkTool"
+			/>
+			<Tool
+				Name="VCXDCMakeTool"
+			/>
+			<Tool
+				Name="VCBscMakeTool"
+			/>
+			<Tool
+				Name="VCFxCopTool"
+			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+			/>
+		</Configuration>
+		<Configuration
+			Name="Debug|x64"
+			OutputDirectory="$(PlatformName)\$(ConfigurationName)"
+			IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
+			ConfigurationType="4"
+			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
+			CharacterSet="2"
+			>
+			<Tool
+				Name="VCPreBuildEventTool"
+			/>
+			<Tool
+				Name="VCCustomBuildTool"
+			/>
+			<Tool
+				Name="VCXMLDataGeneratorTool"
+			/>
+			<Tool
+				Name="VCWebServiceProxyGeneratorTool"
+			/>
+			<Tool
+				Name="VCMIDLTool"
+				TargetEnvironment="3"
+			/>
+			<Tool
+				Name="VCCLCompilerTool"
+				Optimization="0"
+				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;;&quot;$(SolutionDir)\..\..\include&quot;"
+				PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
+				StringPooling="true"
+				MinimalRebuild="false"
+				BasicRuntimeChecks="3"
+				RuntimeLibrary="3"
+				StructMemberAlignment="0"
+				EnableFunctionLevelLinking="true"
+				EnableEnhancedInstructionSet="0"
+				FloatingPointModel="0"
+				FloatingPointExceptions="true"
+				DisableLanguageExtensions="false"
+				UsePrecompiledHeader="0"
+				ExpandAttributedSource="true"
+				AssemblerOutput="2"
+				BrowseInformation="1"
+				WarningLevel="4"
+				WarnAsError="false"
+				Detect64BitPortabilityProblems="false"
+				DebugInformationFormat="3"
+				CallingConvention="0"
+				CompileAs="0"
+			/>
+			<Tool
+				Name="VCManagedResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCPreLinkEventTool"
+			/>
+			<Tool
+				Name="VCLibrarianTool"
+				AdditionalDependencies="Ws2_32.lib"
+				OutputFile="$(OutDir)/antlr3cd.lib"
+			/>
+			<Tool
+				Name="VCALinkTool"
+			/>
+			<Tool
+				Name="VCXDCMakeTool"
+			/>
+			<Tool
+				Name="VCBscMakeTool"
+			/>
+			<Tool
+				Name="VCFxCopTool"
+			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+			/>
+		</Configuration>
+		<Configuration
+			Name="Release|Win32"
+			OutputDirectory="Release"
+			IntermediateDirectory="Release"
+			ConfigurationType="4"
+			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
+			CharacterSet="2"
+			>
+			<Tool
+				Name="VCPreBuildEventTool"
+			/>
+			<Tool
+				Name="VCCustomBuildTool"
+			/>
+			<Tool
+				Name="VCXMLDataGeneratorTool"
+			/>
+			<Tool
+				Name="VCWebServiceProxyGeneratorTool"
+			/>
+			<Tool
+				Name="VCMIDLTool"
+			/>
+			<Tool
+				Name="VCCLCompilerTool"
+				Optimization="3"
+				InlineFunctionExpansion="2"
+				EnableIntrinsicFunctions="true"
+				FavorSizeOrSpeed="1"
+				OmitFramePointers="true"
+				EnableFiberSafeOptimizations="true"
+				WholeProgramOptimization="true"
+				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
+				PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
+				StringPooling="true"
+				ExceptionHandling="1"
+				RuntimeLibrary="2"
+				BufferSecurityCheck="false"
+				EnableEnhancedInstructionSet="2"
+				FloatingPointModel="2"
+				DisableLanguageExtensions="false"
+				RuntimeTypeInfo="false"
+				UsePrecompiledHeader="0"
+				AssemblerListingLocation=".\asm\release"
+				WarningLevel="4"
+				WarnAsError="true"
+				Detect64BitPortabilityProblems="false"
+				DebugInformationFormat="3"
+				CallingConvention="0"
+				CompileAs="1"
+			/>
+			<Tool
+				Name="VCManagedResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCPreLinkEventTool"
+			/>
+			<Tool
+				Name="VCLibrarianTool"
+				AdditionalOptions="/LTCG"
+				AdditionalDependencies="Ws2_32.lib"
+				OutputFile="$(OutDir)/antlr3c.lib"
+			/>
+			<Tool
+				Name="VCALinkTool"
+			/>
+			<Tool
+				Name="VCXDCMakeTool"
+				ValidateIntelliSense="true"
+			/>
+			<Tool
+				Name="VCBscMakeTool"
+			/>
+			<Tool
+				Name="VCFxCopTool"
+			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+			/>
+		</Configuration>
+		<Configuration
+			Name="Release|x64"
+			OutputDirectory="$(PlatformName)\$(ConfigurationName)"
+			IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
+			ConfigurationType="4"
+			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
+			CharacterSet="2"
+			>
+			<Tool
+				Name="VCPreBuildEventTool"
+			/>
+			<Tool
+				Name="VCCustomBuildTool"
+			/>
+			<Tool
+				Name="VCXMLDataGeneratorTool"
+			/>
+			<Tool
+				Name="VCWebServiceProxyGeneratorTool"
+			/>
+			<Tool
+				Name="VCMIDLTool"
+				TargetEnvironment="3"
+			/>
+			<Tool
+				Name="VCCLCompilerTool"
+				Optimization="3"
+				InlineFunctionExpansion="2"
+				EnableIntrinsicFunctions="true"
+				FavorSizeOrSpeed="1"
+				OmitFramePointers="true"
+				EnableFiberSafeOptimizations="true"
+				WholeProgramOptimization="true"
+				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
+				PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
+				StringPooling="true"
+				ExceptionHandling="1"
+				RuntimeLibrary="2"
+				BufferSecurityCheck="false"
+				EnableEnhancedInstructionSet="0"
+				FloatingPointModel="2"
+				RuntimeTypeInfo="false"
+				UsePrecompiledHeader="0"
+				AssemblerListingLocation=".\asm\release"
+				WarningLevel="4"
+				WarnAsError="true"
+				Detect64BitPortabilityProblems="false"
+				DebugInformationFormat="3"
+				CallingConvention="0"
+				CompileAs="1"
+			/>
+			<Tool
+				Name="VCManagedResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCPreLinkEventTool"
+			/>
+			<Tool
+				Name="VCLibrarianTool"
+				AdditionalOptions="/LTCG"
+				AdditionalDependencies="Ws2_32.lib"
+				OutputFile="$(OutDir)/antlr3c.lib"
+			/>
+			<Tool
+				Name="VCALinkTool"
+			/>
+			<Tool
+				Name="VCXDCMakeTool"
+				ValidateIntelliSense="true"
+			/>
+			<Tool
+				Name="VCBscMakeTool"
+			/>
+			<Tool
+				Name="VCFxCopTool"
+			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+			/>
+		</Configuration>
+		<Configuration
+			Name="ReleaseDLL|Win32"
+			OutputDirectory="$(ConfigurationName)"
+			IntermediateDirectory="$(ConfigurationName)"
+			ConfigurationType="2"
+			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
+			CharacterSet="2"
+			WholeProgramOptimization="1"
+			>
+			<Tool
+				Name="VCPreBuildEventTool"
+			/>
+			<Tool
+				Name="VCCustomBuildTool"
+			/>
+			<Tool
+				Name="VCXMLDataGeneratorTool"
+			/>
+			<Tool
+				Name="VCWebServiceProxyGeneratorTool"
+			/>
+			<Tool
+				Name="VCMIDLTool"
+			/>
+			<Tool
+				Name="VCCLCompilerTool"
+				Optimization="3"
+				InlineFunctionExpansion="2"
+				EnableIntrinsicFunctions="true"
+				FavorSizeOrSpeed="1"
+				OmitFramePointers="true"
+				EnableFiberSafeOptimizations="true"
+				WholeProgramOptimization="true"
+				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
+				PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
+				StringPooling="true"
+				ExceptionHandling="1"
+				RuntimeLibrary="2"
+				BufferSecurityCheck="false"
+				EnableEnhancedInstructionSet="2"
+				FloatingPointModel="2"
+				DisableLanguageExtensions="false"
+				RuntimeTypeInfo="false"
+				UsePrecompiledHeader="0"
+				AssemblerListingLocation=".\asm\release"
+				WarningLevel="4"
+				WarnAsError="true"
+				Detect64BitPortabilityProblems="false"
+				DebugInformationFormat="3"
+				CallingConvention="0"
+				CompileAs="0"
+			/>
+			<Tool
+				Name="VCManagedResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCPreLinkEventTool"
+			/>
+			<Tool
+				Name="VCLinkerTool"
+				AdditionalDependencies="Ws2_32.lib"
+				OutputFile="$(OutDir)\antlr3c.dll"
+				Version="3.1.1"
+				OptimizeReferences="2"
+				EnableCOMDATFolding="2"
+				RandomizedBaseAddress="1"
+				DataExecutionPrevention="0"
+				ImportLibrary="$(TargetDir)$(TargetName)_dll.lib"
+			/>
+			<Tool
+				Name="VCALinkTool"
+			/>
+			<Tool
+				Name="VCManifestTool"
+			/>
+			<Tool
+				Name="VCXDCMakeTool"
+				ValidateIntelliSense="true"
+			/>
+			<Tool
+				Name="VCBscMakeTool"
+			/>
+			<Tool
+				Name="VCFxCopTool"
+			/>
+			<Tool
+				Name="VCAppVerifierTool"
+			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+			/>
+		</Configuration>
+		<Configuration
+			Name="ReleaseDLL|x64"
+			OutputDirectory="$(PlatformName)\$(ConfigurationName)"
+			IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
+			ConfigurationType="2"
+			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
+			CharacterSet="2"
+			WholeProgramOptimization="1"
+			>
+			<Tool
+				Name="VCPreBuildEventTool"
+			/>
+			<Tool
+				Name="VCCustomBuildTool"
+			/>
+			<Tool
+				Name="VCXMLDataGeneratorTool"
+			/>
+			<Tool
+				Name="VCWebServiceProxyGeneratorTool"
+			/>
+			<Tool
+				Name="VCMIDLTool"
+				TargetEnvironment="3"
+			/>
+			<Tool
+				Name="VCCLCompilerTool"
+				Optimization="3"
+				InlineFunctionExpansion="2"
+				EnableIntrinsicFunctions="true"
+				FavorSizeOrSpeed="1"
+				OmitFramePointers="true"
+				EnableFiberSafeOptimizations="true"
+				WholeProgramOptimization="true"
+				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
+				PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
+				StringPooling="true"
+				ExceptionHandling="1"
+				RuntimeLibrary="2"
+				BufferSecurityCheck="false"
+				EnableEnhancedInstructionSet="0"
+				FloatingPointModel="2"
+				DisableLanguageExtensions="false"
+				RuntimeTypeInfo="false"
+				UsePrecompiledHeader="0"
+				AssemblerListingLocation=".\asm\release"
+				WarningLevel="4"
+				WarnAsError="true"
+				Detect64BitPortabilityProblems="false"
+				DebugInformationFormat="3"
+				CallingConvention="0"
+				CompileAs="0"
+			/>
+			<Tool
+				Name="VCManagedResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCPreLinkEventTool"
+			/>
+			<Tool
+				Name="VCLinkerTool"
+				AdditionalDependencies="Ws2_32.lib"
+				OutputFile="$(OutDir)\antlr3c64.dll"
+				Version="3.1.1"
+				OptimizeReferences="2"
+				EnableCOMDATFolding="2"
+				RandomizedBaseAddress="1"
+				DataExecutionPrevention="0"
+				ImportLibrary="$(TargetDir)$(TargetName)_dll.lib"
+				TargetMachine="17"
+			/>
+			<Tool
+				Name="VCALinkTool"
+			/>
+			<Tool
+				Name="VCManifestTool"
+			/>
+			<Tool
+				Name="VCXDCMakeTool"
+				ValidateIntelliSense="true"
+			/>
+			<Tool
+				Name="VCBscMakeTool"
+			/>
+			<Tool
+				Name="VCFxCopTool"
+			/>
+			<Tool
+				Name="VCAppVerifierTool"
+			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+			/>
+		</Configuration>
+		<Configuration
+			Name="DebugDLL|Win32"
+			OutputDirectory="$(ConfigurationName)"
+			IntermediateDirectory="$(ConfigurationName)"
+			ConfigurationType="2"
+			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
+			CharacterSet="2"
+			>
+			<Tool
+				Name="VCPreBuildEventTool"
+			/>
+			<Tool
+				Name="VCCustomBuildTool"
+				Outputs="$(TargetDir)$(TargetName)_dll.lib"
+			/>
+			<Tool
+				Name="VCXMLDataGeneratorTool"
+			/>
+			<Tool
+				Name="VCWebServiceProxyGeneratorTool"
+			/>
+			<Tool
+				Name="VCMIDLTool"
+			/>
+			<Tool
+				Name="VCCLCompilerTool"
+				Optimization="0"
+				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
+				PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
+				StringPooling="true"
+				MinimalRebuild="false"
+				BasicRuntimeChecks="3"
+				RuntimeLibrary="3"
+				StructMemberAlignment="0"
+				EnableFunctionLevelLinking="true"
+				EnableEnhancedInstructionSet="0"
+				FloatingPointModel="0"
+				FloatingPointExceptions="true"
+				DisableLanguageExtensions="false"
+				UsePrecompiledHeader="0"
+				ExpandAttributedSource="true"
+				AssemblerOutput="2"
+				BrowseInformation="1"
+				WarningLevel="4"
+				WarnAsError="false"
+				Detect64BitPortabilityProblems="false"
+				DebugInformationFormat="3"
+				CallingConvention="0"
+				CompileAs="0"
+			/>
+			<Tool
+				Name="VCManagedResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCPreLinkEventTool"
+			/>
+			<Tool
+				Name="VCLinkerTool"
+				AdditionalDependencies="Ws2_32.lib"
+				OutputFile="$(OutDir)\antlr3cd.dll"
+				Version="3.1.1"
+				GenerateDebugInformation="true"
+				RandomizedBaseAddress="1"
+				DataExecutionPrevention="0"
+				ImportLibrary="$(TargetDir)$(TargetName)_dll.lib"
+			/>
+			<Tool
+				Name="VCALinkTool"
+			/>
+			<Tool
+				Name="VCManifestTool"
+			/>
+			<Tool
+				Name="VCXDCMakeTool"
+			/>
+			<Tool
+				Name="VCBscMakeTool"
+			/>
+			<Tool
+				Name="VCFxCopTool"
+			/>
+			<Tool
+				Name="VCAppVerifierTool"
+			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+			/>
+		</Configuration>
+		<Configuration
+			Name="DebugDLL|x64"
+			OutputDirectory="$(PlatformName)\$(ConfigurationName)"
+			IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
+			ConfigurationType="2"
+			InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC71.vsprops"
+			CharacterSet="2"
+			>
+			<Tool
+				Name="VCPreBuildEventTool"
+			/>
+			<Tool
+				Name="VCCustomBuildTool"
+				Outputs="$(TargetDir)$(TargetName)_dll.lib"
+			/>
+			<Tool
+				Name="VCXMLDataGeneratorTool"
+			/>
+			<Tool
+				Name="VCWebServiceProxyGeneratorTool"
+			/>
+			<Tool
+				Name="VCMIDLTool"
+				TargetEnvironment="3"
+			/>
+			<Tool
+				Name="VCCLCompilerTool"
+				Optimization="0"
+				AdditionalIncludeDirectories="&quot;$(SolutionDir)\include&quot;"
+				PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
+				StringPooling="true"
+				MinimalRebuild="false"
+				BasicRuntimeChecks="3"
+				RuntimeLibrary="3"
+				StructMemberAlignment="0"
+				EnableFunctionLevelLinking="true"
+				EnableEnhancedInstructionSet="0"
+				FloatingPointModel="0"
+				FloatingPointExceptions="true"
+				DisableLanguageExtensions="false"
+				UsePrecompiledHeader="0"
+				ExpandAttributedSource="true"
+				AssemblerOutput="2"
+				BrowseInformation="1"
+				WarningLevel="4"
+				WarnAsError="false"
+				Detect64BitPortabilityProblems="false"
+				DebugInformationFormat="3"
+				CallingConvention="0"
+				CompileAs="0"
+			/>
+			<Tool
+				Name="VCManagedResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCPreLinkEventTool"
+			/>
+			<Tool
+				Name="VCLinkerTool"
+				AdditionalDependencies="Ws2_32.lib"
+				OutputFile="$(OutDir)\antlr3c64d.dll"
+				Version="3.1.1"
+				GenerateDebugInformation="true"
+				RandomizedBaseAddress="1"
+				DataExecutionPrevention="0"
+				ImportLibrary="$(TargetDir)$(TargetName)_dll.lib"
+				TargetMachine="17"
+			/>
+			<Tool
+				Name="VCALinkTool"
+			/>
+			<Tool
+				Name="VCManifestTool"
+			/>
+			<Tool
+				Name="VCXDCMakeTool"
+			/>
+			<Tool
+				Name="VCBscMakeTool"
+			/>
+			<Tool
+				Name="VCFxCopTool"
+			/>
+			<Tool
+				Name="VCAppVerifierTool"
+			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+			/>
+		</Configuration>
+	</Configurations>
+	<References>
+	</References>
+	<Files>
+		<Filter
+			Name="Source Files"
+			Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
+			UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
+			>
+			<File
+				RelativePath=".\src\antlr3baserecognizer.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3basetree.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3basetreeadaptor.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3bitset.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3collections.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3commontoken.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3commontree.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3commontreeadaptor.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3commontreenodestream.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3convertutf.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3cyclicdfa.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3debughandlers.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3encodings.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3exception.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3filestream.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3inputstream.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3intstream.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3lexer.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3parser.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3rewritestreams.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3string.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3tokenstream.c"
+				>
+			</File>
+			<File
+				RelativePath=".\src\antlr3treeparser.c"
+				>
+			</File>
+		</Filter>
+		<Filter
+			Name="Header Files"
+			Filter="h;hpp;hxx;hm;inl;inc;xsd"
+			UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"
+			>
+			<File
+				RelativePath=".\include\antlr3.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3baserecognizer.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3basetree.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3basetreeadaptor.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3bitset.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3collections.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3commontoken.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3commontree.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3commontreeadaptor.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3commontreenodestream.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3convertutf.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3cyclicdfa.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3debugeventlistener.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3defs.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3encodings.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3errors.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3exception.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3filestream.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3input.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3interfaces.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3intstream.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3lexer.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3memory.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3parser.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3parsetree.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3recognizersharedstate.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3rewritestreams.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3string.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3tokenstream.h"
+				>
+			</File>
+			<File
+				RelativePath=".\include\antlr3treeparser.h"
+				>
+			</File>
+		</Filter>
+		<Filter
+			Name="Templates"
+			Filter=".stg"
+			>
+			<File
+				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\AST.stg"
+				>
+			</File>
+			<File
+				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\ASTDbg.stg"
+				>
+			</File>
+			<File
+				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\ASTParser.stg"
+				>
+			</File>
+			<File
+				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\ASTTreeParser.stg"
+				>
+			</File>
+			<File
+				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\C.stg"
+				>
+			</File>
+			<File
+				RelativePath="..\..\tool\src\main\resources\org\antlr\codegen\templates\C\Dbg.stg"
+				>
+			</File>
+		</Filter>
+		<Filter
+			Name="Java"
+			Filter="*.java"
+			>
+			<File
+				RelativePath="..\..\tool\src\main\java\org\antlr\codegen\CTarget.java"
+				>
+			</File>
+		</Filter>
+		<Filter
+			Name="Doxygen"
+			>
+			<File
+				RelativePath=".\doxygen\atsections.dox"
+				>
+			</File>
+			<File
+				RelativePath=".\doxygen\build.dox"
+				>
+			</File>
+			<File
+				RelativePath=".\doxygen\buildrec.dox"
+				>
+			</File>
+			<File
+				RelativePath=".\doxygen\changes31.dox"
+				>
+			</File>
+			<File
+				RelativePath=".\doxygen\doxygengroups.dox"
+				>
+			</File>
+			<File
+				RelativePath=".\doxygen\generate.dox"
+				>
+			</File>
+			<File
+				RelativePath=".\doxygen\interop.dox"
+				>
+			</File>
+			<File
+				RelativePath=".\doxygen\mainpage.dox"
+				>
+			</File>
+			<File
+				RelativePath=".\doxygen\runtime.dox"
+				>
+			</File>
+			<File
+				RelativePath=".\doxygen\using.dox"
+				>
+			</File>
+		</Filter>
+	</Files>
+	<Globals>
+		<Global
+			Name="DevPartner_IsInstrumented"
+			Value="0"
+		/>
+	</Globals>
+</VisualStudioProject>
diff --git a/antlr-3.4/runtime/C/C.vcproj.vspscc b/runtime/C/C.vcproj.vspscc
similarity index 100%
rename from antlr-3.4/runtime/C/C.vcproj.vspscc
rename to runtime/C/C.vcproj.vspscc
diff --git a/antlr-3.4/runtime/C/C.vssscc b/runtime/C/C.vssscc
similarity index 100%
rename from antlr-3.4/runtime/C/C.vssscc
rename to runtime/C/C.vssscc
diff --git a/antlr-3.4/runtime/C/COPYING b/runtime/C/COPYING
similarity index 100%
rename from antlr-3.4/runtime/C/COPYING
rename to runtime/C/COPYING
diff --git a/antlr-3.4/runtime/C/ChangeLog b/runtime/C/ChangeLog
similarity index 100%
rename from antlr-3.4/runtime/C/ChangeLog
rename to runtime/C/ChangeLog
diff --git a/antlr-3.4/runtime/C/Cvs2005.sln b/runtime/C/Cvs2005.sln
similarity index 100%
rename from antlr-3.4/runtime/C/Cvs2005.sln
rename to runtime/C/Cvs2005.sln
diff --git a/antlr-3.4/runtime/C/Cvs2005.vcproj b/runtime/C/Cvs2005.vcproj
similarity index 100%
rename from antlr-3.4/runtime/C/Cvs2005.vcproj
rename to runtime/C/Cvs2005.vcproj
diff --git a/antlr-3.4/runtime/C/INSTALL b/runtime/C/INSTALL
similarity index 100%
rename from antlr-3.4/runtime/C/INSTALL
rename to runtime/C/INSTALL
diff --git a/antlr-3.4/runtime/C/Makefile.am b/runtime/C/Makefile.am
similarity index 100%
rename from antlr-3.4/runtime/C/Makefile.am
rename to runtime/C/Makefile.am
diff --git a/antlr-3.4/runtime/C/NEWS b/runtime/C/NEWS
similarity index 100%
rename from antlr-3.4/runtime/C/NEWS
rename to runtime/C/NEWS
diff --git a/antlr-3.4/runtime/C/README b/runtime/C/README
similarity index 100%
rename from antlr-3.4/runtime/C/README
rename to runtime/C/README
diff --git a/antlr-3.4/runtime/C/configure.ac b/runtime/C/configure.ac
similarity index 100%
rename from antlr-3.4/runtime/C/configure.ac
rename to runtime/C/configure.ac
diff --git a/runtime/C/dist/libantlr3c-3.1.4-SNAPSHOT.tar.gz b/runtime/C/dist/libantlr3c-3.1.4-SNAPSHOT.tar.gz
new file mode 100644
index 0000000..a9ad784
--- /dev/null
+++ b/runtime/C/dist/libantlr3c-3.1.4-SNAPSHOT.tar.gz
Binary files differ
diff --git a/runtime/C/dist/libantlr3c-3.3.1-SNAPSHOT.tar.gz b/runtime/C/dist/libantlr3c-3.3.1-SNAPSHOT.tar.gz
new file mode 100644
index 0000000..ca21cf7
--- /dev/null
+++ b/runtime/C/dist/libantlr3c-3.3.1-SNAPSHOT.tar.gz
Binary files differ
diff --git a/runtime/C/dist/libantlr3c-3.4-beta3.tar.gz b/runtime/C/dist/libantlr3c-3.4-beta3.tar.gz
new file mode 100644
index 0000000..f5e9fbb
--- /dev/null
+++ b/runtime/C/dist/libantlr3c-3.4-beta3.tar.gz
Binary files differ
diff --git a/antlr-3.4/runtime/C/doxyfile b/runtime/C/doxyfile
similarity index 100%
rename from antlr-3.4/runtime/C/doxyfile
rename to runtime/C/doxyfile
diff --git a/antlr-3.4/runtime/C/doxygen/atsections.dox b/runtime/C/doxygen/atsections.dox
similarity index 100%
rename from antlr-3.4/runtime/C/doxygen/atsections.dox
rename to runtime/C/doxygen/atsections.dox
diff --git a/antlr-3.4/runtime/C/doxygen/build.dox b/runtime/C/doxygen/build.dox
similarity index 100%
rename from antlr-3.4/runtime/C/doxygen/build.dox
rename to runtime/C/doxygen/build.dox
diff --git a/antlr-3.4/runtime/C/doxygen/buildrec.dox b/runtime/C/doxygen/buildrec.dox
similarity index 100%
rename from antlr-3.4/runtime/C/doxygen/buildrec.dox
rename to runtime/C/doxygen/buildrec.dox
diff --git a/antlr-3.4/runtime/C/doxygen/changes31.dox b/runtime/C/doxygen/changes31.dox
similarity index 100%
rename from antlr-3.4/runtime/C/doxygen/changes31.dox
rename to runtime/C/doxygen/changes31.dox
diff --git a/antlr-3.4/runtime/C/doxygen/doxygengroups.dox b/runtime/C/doxygen/doxygengroups.dox
similarity index 100%
rename from antlr-3.4/runtime/C/doxygen/doxygengroups.dox
rename to runtime/C/doxygen/doxygengroups.dox
diff --git a/antlr-3.4/runtime/C/doxygen/generate.dox b/runtime/C/doxygen/generate.dox
similarity index 100%
rename from antlr-3.4/runtime/C/doxygen/generate.dox
rename to runtime/C/doxygen/generate.dox
diff --git a/antlr-3.4/runtime/C/doxygen/interop.dox b/runtime/C/doxygen/interop.dox
similarity index 100%
rename from antlr-3.4/runtime/C/doxygen/interop.dox
rename to runtime/C/doxygen/interop.dox
diff --git a/antlr-3.4/runtime/C/doxygen/knownissues.dox b/runtime/C/doxygen/knownissues.dox
similarity index 100%
rename from antlr-3.4/runtime/C/doxygen/knownissues.dox
rename to runtime/C/doxygen/knownissues.dox
diff --git a/antlr-3.4/runtime/C/doxygen/mainpage.dox b/runtime/C/doxygen/mainpage.dox
similarity index 100%
rename from antlr-3.4/runtime/C/doxygen/mainpage.dox
rename to runtime/C/doxygen/mainpage.dox
diff --git a/antlr-3.4/runtime/C/doxygen/runtime.dox b/runtime/C/doxygen/runtime.dox
similarity index 100%
rename from antlr-3.4/runtime/C/doxygen/runtime.dox
rename to runtime/C/doxygen/runtime.dox
diff --git a/antlr-3.4/runtime/C/doxygen/using.dox b/runtime/C/doxygen/using.dox
similarity index 100%
rename from antlr-3.4/runtime/C/doxygen/using.dox
rename to runtime/C/doxygen/using.dox
diff --git a/antlr-3.4/runtime/C/include/antlr3.h b/runtime/C/include/antlr3.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3.h
rename to runtime/C/include/antlr3.h
diff --git a/antlr-3.4/runtime/C/include/antlr3baserecognizer.h b/runtime/C/include/antlr3baserecognizer.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3baserecognizer.h
rename to runtime/C/include/antlr3baserecognizer.h
diff --git a/antlr-3.4/runtime/C/include/antlr3basetree.h b/runtime/C/include/antlr3basetree.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3basetree.h
rename to runtime/C/include/antlr3basetree.h
diff --git a/antlr-3.4/runtime/C/include/antlr3basetreeadaptor.h b/runtime/C/include/antlr3basetreeadaptor.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3basetreeadaptor.h
rename to runtime/C/include/antlr3basetreeadaptor.h
diff --git a/antlr-3.4/runtime/C/include/antlr3bitset.h b/runtime/C/include/antlr3bitset.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3bitset.h
rename to runtime/C/include/antlr3bitset.h
diff --git a/antlr-3.4/runtime/C/include/antlr3collections.h b/runtime/C/include/antlr3collections.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3collections.h
rename to runtime/C/include/antlr3collections.h
diff --git a/antlr-3.4/runtime/C/include/antlr3commontoken.h b/runtime/C/include/antlr3commontoken.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3commontoken.h
rename to runtime/C/include/antlr3commontoken.h
diff --git a/antlr-3.4/runtime/C/include/antlr3commontree.h b/runtime/C/include/antlr3commontree.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3commontree.h
rename to runtime/C/include/antlr3commontree.h
diff --git a/antlr-3.4/runtime/C/include/antlr3commontreeadaptor.h b/runtime/C/include/antlr3commontreeadaptor.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3commontreeadaptor.h
rename to runtime/C/include/antlr3commontreeadaptor.h
diff --git a/antlr-3.4/runtime/C/include/antlr3commontreenodestream.h b/runtime/C/include/antlr3commontreenodestream.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3commontreenodestream.h
rename to runtime/C/include/antlr3commontreenodestream.h
diff --git a/runtime/C/include/antlr3convertutf.h b/runtime/C/include/antlr3convertutf.h
new file mode 100644
index 0000000..e0c5603
--- /dev/null
+++ b/runtime/C/include/antlr3convertutf.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2001-2004 Unicode, Inc.
+ * 
+ * Disclaimer
+ * 
+ * This source code is provided as is by Unicode, Inc. No claims are
+ * made as to fitness for any particular purpose. No warranties of any
+ * kind are expressed or implied. The recipient agrees to determine
+ * applicability of information provided. If this file has been
+ * purchased on magnetic or optical media from Unicode, Inc., the
+ * sole remedy for any claim will be exchange of defective media
+ * within 90 days of receipt.
+ * 
+ * Limitations on Rights to Redistribute This Code
+ * 
+ * Unicode, Inc. hereby grants the right to freely use the information
+ * supplied in this file in the creation of products supporting the
+ * Unicode Standard, and to make copies of this file in any form
+ * for internal or external distribution as long as this notice
+ * remains attached.
+ */
+
+/* ---------------------------------------------------------------------
+
+    Conversions between UTF32, UTF-16, and UTF-8.  Header file.
+
+    Several functions are included here, forming a complete set of
+    conversions between the three formats.  UTF-7 is not included
+    here, but is handled in a separate source file.
+
+    Each of these routines takes pointers to input buffers and output
+    buffers.  The input buffers are const.
+
+    Each routine converts the text between *sourceStart and sourceEnd,
+    putting the result into the buffer between *targetStart and
+    targetEnd. Note: the end pointers are *after* the last item: e.g. 
+    *(sourceEnd - 1) is the last item.
+
+    The return result indicates whether the conversion was successful,
+    and if not, whether the problem was in the source or target buffers.
+    (Only the first encountered problem is indicated.)
+
+    After the conversion, *sourceStart and *targetStart are both
+    updated to point to the end of last text successfully converted in
+    the respective buffers.
+
+    Input parameters:
+	sourceStart - pointer to a pointer to the source buffer.
+		The contents of this are modified on return so that
+		it points at the next thing to be converted.
+	targetStart - similarly, pointer to pointer to the target buffer.
+	sourceEnd, targetEnd - respectively pointers to the ends of the
+		two buffers, for overflow checking only.
+
+    These conversion functions take a ConversionFlags argument. When this
+    flag is set to strict, both irregular sequences and isolated surrogates
+    will cause an error.  When the flag is set to lenient, both irregular
+    sequences and isolated surrogates are converted.
+
+    Whether the flag is strict or lenient, all illegal sequences will cause
+    an error return. This includes sequences such as: <F4 90 80 80>, <C0 80>,
+    or <A0> in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code
+    must check for illegal sequences.
+
+    When the flag is set to lenient, characters over 0x10FFFF are converted
+    to the replacement character; otherwise (when the flag is set to strict)
+    they constitute an error.
+
+    Output parameters:
+	The value "sourceIllegal" is returned from some routines if the input
+	sequence is malformed.  When "sourceIllegal" is returned, the source
+	value will point to the illegal value that caused the problem. E.g.,
+	in UTF-8 when a sequence is malformed, it points to the start of the
+	malformed sequence.  
+
+    Author: Mark E. Davis, 1994.
+    Rev History: Rick McGowan, fixes & updates May 2001.
+		 Fixes & updates, Sept 2001.
+
+------------------------------------------------------------------------ */
+
+/* ---------------------------------------------------------------------
+    The following 4 definitions are compiler-specific.
+    The C standard does not guarantee that wchar_t has at least
+    16 bits, so wchar_t is no less portable than unsigned short!
+    All should be unsigned values to avoid sign extension during
+    bit mask & shift operations.
+------------------------------------------------------------------------ */
+
+
+// Changes for ANTLR3 - Jim Idle, January 2008.
+// builtin types defined for Unicode types changed to
+// aliases for the types that are system determined by
+// ANTLR at compile time.
+//
+// typedef unsigned long	UTF32;	/* at least 32 bits */
+// typedef unsigned short	UTF16;	/* at least 16 bits */
+// typedef unsigned char	UTF8;	/* typically 8 bits */
+// typedef unsigned char	Boolean; /* 0 or 1 */
+
+#ifndef	_ANTLR3_CONVERTUTF_H
+#define	_ANTLR3_CONVERTUTF_H
+
+#include	<antlr3defs.h>
+
+typedef ANTLR3_UINT32	UTF32;	/* at least 32 bits */
+typedef ANTLR3_UINT16	UTF16;	/* at least 16 bits */
+typedef ANTLR3_UINT8	UTF8;	/* typically 8 bits */
+
+/* Some fundamental constants */
+#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
+#define UNI_MAX_BMP (UTF32)0x0000FFFF
+#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
+#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
+#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
+
+#define UNI_SUR_HIGH_START  (UTF32)0xD800
+#define UNI_SUR_HIGH_END    (UTF32)0xDBFF
+#define UNI_SUR_LOW_START   (UTF32)0xDC00
+#define UNI_SUR_LOW_END     (UTF32)0xDFFF
+#define halfShift           ((UTF32)10)
+#define halfBase            ((UTF32)0x0010000UL)
+#define halfMask            ((UTF32)0x3FFUL)
+
+typedef enum {
+	conversionOK, 		/* conversion successful */
+	sourceExhausted,	/* partial character in source, but hit end */
+	targetExhausted,	/* insuff. room in target for conversion */
+	sourceIllegal		/* source sequence is illegal/malformed */
+} ConversionResult;
+
+typedef enum {
+	strictConversion = 0,
+	lenientConversion
+} ConversionFlags;
+
+/* This is for C++ and does no harm in C */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ConversionResult ConvertUTF8toUTF16 (
+		const UTF8** sourceStart, const UTF8* sourceEnd, 
+		UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF16toUTF8 (
+		const UTF16** sourceStart, const UTF16* sourceEnd, 
+		UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+		
+ConversionResult ConvertUTF8toUTF32 (
+		const UTF8** sourceStart, const UTF8* sourceEnd, 
+		UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF32toUTF8 (
+		const UTF32** sourceStart, const UTF32* sourceEnd, 
+		UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+		
+ConversionResult ConvertUTF16toUTF32 (
+		const UTF16** sourceStart, const UTF16* sourceEnd, 
+		UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF32toUTF16 (
+		const UTF32** sourceStart, const UTF32* sourceEnd, 
+		UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
+
+ANTLR3_BOOLEAN isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+/* --------------------------------------------------------------------- */
diff --git a/antlr-3.4/runtime/C/include/antlr3cyclicdfa.h b/runtime/C/include/antlr3cyclicdfa.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3cyclicdfa.h
rename to runtime/C/include/antlr3cyclicdfa.h
diff --git a/runtime/C/include/antlr3debugeventlistener.h b/runtime/C/include/antlr3debugeventlistener.h
new file mode 100644
index 0000000..1d1b38e
--- /dev/null
+++ b/runtime/C/include/antlr3debugeventlistener.h
@@ -0,0 +1,398 @@
+/**
+ * \file
+ * The definition of all debugging events that a recognizer can trigger.
+ *
+ * \remark
+ *  From the java implementation by Terence Parr...
+ *  I did not create a separate AST debugging interface as it would create
+ *  lots of extra classes and DebugParser has a dbg var defined, which makes
+ *  it hard to change to ASTDebugEventListener.  I looked hard at this issue
+ *  and it is easier to understand as one monolithic event interface for all
+ *  possible events.  Hopefully, adding ST debugging stuff won't be bad.  Leave
+ *  for future. 4/26/2006.
+ */
+
+#ifndef	ANTLR3_DEBUG_EVENT_LISTENER_H
+#define	ANTLR3_DEBUG_EVENT_LISTENER_H
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3defs.h>
+#include    <antlr3basetree.h>
+#include    <antlr3commontoken.h>
+
+
+/// Default debugging port
+///
+#define DEFAULT_DEBUGGER_PORT		0xBFCC;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** The ANTLR3 debugging interface for communicating with ANLTR Works. Function comments
+ *  mostly taken from the Java version.
+ */
+typedef struct ANTLR3_DEBUG_EVENT_LISTENER_struct
+{
+	/// The port number which the debug listener should listen on for a connection
+	///
+	ANTLR3_UINT32		port;
+
+	/// The socket structure we receive after a successful accept on the serverSocket
+	///
+	SOCKET				socket;
+
+	/** The version of the debugging protocol supported by the providing
+	 *  instance of the debug event listener.
+	 */
+	int					protocol_version;
+
+	/// The name of the grammar file that we are debugging
+	///
+	pANTLR3_STRING		grammarFileName;
+
+	/// Indicates whether we have already connected or not
+	///
+	ANTLR3_BOOLEAN		initialized;
+
+	/// Used to serialize the values of any particular token we need to
+	/// send back to the debugger.
+	///
+	pANTLR3_STRING		tokenString;
+
+
+	/// Allows the debug event system to access the adapter in use
+	/// by the recognizer, if this is a tree parser of some sort.
+	///
+	pANTLR3_BASE_TREE_ADAPTOR	adaptor;
+
+	/// Wait for a connection from the debugger and initiate the
+	/// debugging session.
+	///
+	ANTLR3_BOOLEAN	(*handshake)		(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+
+	/** The parser has just entered a rule.  No decision has been made about
+	 *  which alt is predicted.  This is fired AFTER init actions have been
+	 *  executed.  Attributes are defined and available etc...
+	 */
+	void			(*enterRule)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName);
+
+	/** Because rules can have lots of alternatives, it is very useful to
+	 *  know which alt you are entering.  This is 1..n for n alts.
+	 */
+	void			(*enterAlt)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int alt);
+
+	/** This is the last thing executed before leaving a rule.  It is
+	 *  executed even if an exception is thrown.  This is triggered after
+	 *  error reporting and recovery have occurred (unless the exception is
+	 *  not caught in this rule).  This implies an "exitAlt" event.
+	 */
+	void			(*exitRule)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName);
+
+	/** Track entry into any (...) subrule other EBNF construct 
+	 */
+	void			(*enterSubRule)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
+	
+	void			(*exitSubRule)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
+
+	/** Every decision, fixed k or arbitrary, has an enter/exit event
+	 *  so that a GUI can easily track what LT/consume events are
+	 *  associated with prediction.  You will see a single enter/exit
+	 *  subrule but multiple enter/exit decision events, one for each
+	 *  loop iteration.
+	 */
+	void			(*enterDecision)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
+
+	void			(*exitDecision)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
+
+	/** An input token was consumed; matched by any kind of element.
+	 *  Trigger after the token was matched by things like match(), matchAny().
+	 */
+	void			(*consumeToken)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t);
+
+	/** An off-channel input token was consumed.
+	 *  Trigger after the token was matched by things like match(), matchAny().
+	 *  (unless of course the hidden token is first stuff in the input stream).
+	 */
+	void			(*consumeHiddenToken)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t);
+
+	/** Somebody (anybody) looked ahead.  Note that this actually gets
+	 *  triggered by both LA and LT calls.  The debugger will want to know
+	 *  which Token object was examined.  Like consumeToken, this indicates
+	 *  what token was seen at that depth.  A remote debugger cannot look
+	 *  ahead into a file it doesn't have so LT events must pass the token
+	 *  even if the info is redundant.
+	 */
+	void			(*LT)				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_COMMON_TOKEN t);
+
+	/** The parser is going to look arbitrarily ahead; mark this location,
+	 *  the token stream's marker is sent in case you need it.
+	 */
+	void			(*mark)				(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker);
+
+	/** After an arbitrarily long lookahead as with a cyclic DFA (or with
+	 *  any backtrack), this informs the debugger that stream should be
+	 *  rewound to the position associated with marker.
+	 */
+	void			(*rewind)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker);
+
+	/** Rewind to the input position of the last marker.
+	 *  Used currently only after a cyclic DFA and just
+	 *  before starting a sem/syn predicate to get the
+	 *  input position back to the start of the decision.
+	 *  Do not "pop" the marker off the state.  mark(i)
+	 *  and rewind(i) should balance still.
+	 */
+	void			(*rewindLast)		(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+
+	void			(*beginBacktrack)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level);
+
+	void			(*endBacktrack)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level, ANTLR3_BOOLEAN successful);
+
+	/** To watch a parser move through the grammar, the parser needs to
+	 *  inform the debugger what line/charPos it is passing in the grammar.
+	 *  For now, this does not know how to switch from one grammar to the
+	 *  other and back for island grammars etc...
+	 *
+	 *  This should also allow breakpoints because the debugger can stop
+	 *  the parser whenever it hits this line/pos.
+	 */
+	void			(*location)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int line, int pos);
+
+	/** A recognition exception occurred such as NoViableAltException.  I made
+	 *  this a generic event so that I can alter the exception hierarchy later
+	 *  without having to alter all the debug objects.
+	 *
+	 *  Upon error, the stack of enter rule/subrule must be properly unwound.
+	 *  If no viable alt occurs it is within an enter/exit decision, which
+	 *  also must be rewound.  Even the rewind for each mark must be unwound.
+	 *  In the Java target this is pretty easy using try/finally, if a bit
+	 *  ugly in the generated code.  The rewind is generated in DFA.predict()
+	 *  actually so no code needs to be generated for that.  For languages
+	 *  w/o this "finally" feature (C++?), the target implementor will have
+	 *  to build an event stack or something.
+	 *
+	 *  Across a socket for remote debugging, only the RecognitionException
+	 *  data fields are transmitted.  The token object or whatever that
+	 *  caused the problem was the last object referenced by LT.  The
+	 *  immediately preceding LT event should hold the unexpected Token or
+	 *  char.
+	 *
+	 *  Here is a sample event trace for grammar:
+	 *
+	 *  b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
+     *    | D
+     *    ;
+     *
+	 *  The sequence for this rule (with no viable alt in the subrule) for
+	 *  input 'c c' (there are 3 tokens) is:
+	 *
+	 *		commence
+	 *		LT(1)
+	 *		enterRule b
+	 *		location 7 1
+	 *		enter decision 3
+	 *		LT(1)
+	 *		exit decision 3
+	 *		enterAlt1
+	 *		location 7 5
+	 *		LT(1)
+	 *		consumeToken [c/<4>,1:0]
+	 *		location 7 7
+	 *		enterSubRule 2
+	 *		enter decision 2
+	 *		LT(1)
+	 *		LT(1)
+	 *		recognitionException NoViableAltException 2 1 2
+	 *		exit decision 2
+	 *		exitSubRule 2
+	 *		beginResync
+	 *		LT(1)
+	 *		consumeToken [c/<4>,1:1]
+	 *		LT(1)
+	 *		endResync
+	 *		LT(-1)
+	 *		exitRule b
+	 *		terminate
+	 */
+	void			(*recognitionException)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_EXCEPTION e);
+
+	/** Indicates the recognizer is about to consume tokens to resynchronize
+	 *  the parser.  Any consume events from here until the recovered event
+	 *  are not part of the parse--they are dead tokens.
+	 */
+	void			(*beginResync)			(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+
+	/** Indicates that the recognizer has finished consuming tokens in order
+	 *  to resynchronize.  There may be multiple beginResync/endResync pairs
+	 *  before the recognizer comes out of errorRecovery mode (in which
+	 *  multiple errors are suppressed).  This will be useful
+	 *  in a gui where you want to probably grey out tokens that are consumed
+	 *  but not matched to anything in grammar.  Anything between
+	 *  a beginResync/endResync pair was tossed out by the parser.
+	 */
+	void			(*endResync)			(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+
+	/** A semantic predicate was evaluate with this result and action text 
+	*/
+	void			(*semanticPredicate)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_BOOLEAN result, const char * predicate);
+
+	/** Announce that parsing has begun.  Not technically useful except for
+	 *  sending events over a socket.  A GUI for example will launch a thread
+	 *  to connect and communicate with a remote parser.  The thread will want
+	 *  to notify the GUI when a connection is made.  ANTLR parsers
+	 *  trigger this upon entry to the first rule (the ruleLevel is used to
+	 *  figure this out).
+	 */
+	void			(*commence)				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+
+	/** Parsing is over; successfully or not.  Mostly useful for telling
+	 *  remote debugging listeners that it's time to quit.  When the rule
+	 *  invocation level goes to zero at the end of a rule, we are done
+	 *  parsing.
+	 */
+	void			(*terminate)			(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+
+	/// Retrieve acknowledge response from the debugger. in fact this
+	/// response is never used at the moment. So we just read whatever
+	/// is in the socket buffer and throw it away.
+	///
+	void			(*ack)					(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+
+	// T r e e  P a r s i n g
+
+	/** Input for a tree parser is an AST, but we know nothing for sure
+	 *  about a node except its type and text (obtained from the adaptor).
+	 *  This is the analog of the consumeToken method.  The ID is usually 
+	 *  the memory address of the node.
+	 *  If the type is UP or DOWN, then
+	 *  the ID is not really meaningful as it's fixed--there is
+	 *  just one UP node and one DOWN navigation node.
+	 *
+	 *  Note that unlike the Java version, the node type of the C parsers
+	 *  is always fixed as pANTLR3_BASE_TREE because all such structures
+	 *  contain a super pointer to their parent, which is generally COMMON_TREE and within
+	 *  that there is a super pointer that can point to a user type that encapsulates it.
+	 *  Almost akin to saying that it is an interface pointer except we don't need to
+	 *  know what the interface is in full, just those bits that are the base.
+	 * @param t
+	 */
+	void			(*consumeNode)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
+
+	/** The tree parser looked ahead.  If the type is UP or DOWN,
+	 *  then the ID is not really meaningful as it's fixed--there is
+	 *  just one UP node and one DOWN navigation node.
+	 */
+	void			(*LTT)					(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_BASE_TREE t);
+
+
+	// A S T  E v e n t s
+
+	/** A nil was created (even nil nodes have a unique ID...
+	 *  they are not "null" per se).  As of 4/28/2006, this
+	 *  seems to be uniquely triggered when starting a new subtree
+	 *  such as when entering a subrule in automatic mode and when
+	 *  building a tree in rewrite mode.
+     *
+ 	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID is set.
+	 */
+	void			(*nilNode)				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
+
+	/** If a syntax error occurs, recognizers bracket the error
+	 *  with an error node if they are building ASTs. This event
+	 *  notifies the listener that this is the case
+	 */
+	void			(*errorNode)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
+
+	/** Announce a new node built from token elements such as type etc...
+	 * 
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID, type, text are
+	 *  set.
+	 */
+	void			(*createNode)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
+
+	/** Announce a new node built from an existing token.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only node.ID and token.tokenIndex
+	 *  are set.
+	 */
+	void			(*createNodeTok)		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE node, pANTLR3_COMMON_TOKEN token);
+
+	/** Make a node the new root of an existing root.  See
+	 *
+	 *  Note: the newRootID parameter is possibly different
+	 *  than the TreeAdaptor.becomeRoot() newRoot parameter.
+	 *  In our case, it will always be the result of calling
+	 *  TreeAdaptor.becomeRoot() and not root_n or whatever.
+	 *
+	 *  The listener should assume that this event occurs
+	 *  only when the current subrule (or rule) subtree is
+	 *  being reset to newRootID.
+	 * 
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only IDs are set.
+	 *
+	 *  @see org.antlr.runtime.tree.TreeAdaptor.becomeRoot()
+	 */
+	void			(*becomeRoot)			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE newRoot, pANTLR3_BASE_TREE oldRoot);
+
+	/** Make childID a child of rootID.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only IDs are set.
+	 * 
+	 *  @see org.antlr.runtime.tree.TreeAdaptor.addChild()
+	 */
+	void			(*addChild)				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE root, pANTLR3_BASE_TREE child);
+
+	/** Set the token start/stop token index for a subtree root or node.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID is set.
+	 */
+	void			(*setTokenBoundaries)	(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t, ANTLR3_MARKER tokenStartIndex, ANTLR3_MARKER tokenStopIndex);
+
+	/// Free up the resources allocated to this structure
+	///
+	void			(*free)					(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+
+}
+	ANTLR3_DEBUG_EVENT_LISTENER;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/runtime/C/include/antlr3defs.h b/runtime/C/include/antlr3defs.h
new file mode 100644
index 0000000..84efd01
--- /dev/null
+++ b/runtime/C/include/antlr3defs.h
@@ -0,0 +1,636 @@
+/** \file
+ * Basic type and constant definitions for ANTLR3 Runtime.
+ */
+#ifndef	_ANTLR3DEFS_H
+#define	_ANTLR3DEFS_H
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/* Following are for generated code, they are not referenced internally!!!
+ */
+#if !defined(ANTLR3_HUGE) && !defined(ANTLR3_AVERAGE) && !defined(ANTLR3_SMALL)
+#define	ANTLR3_AVERAGE
+#endif
+
+#ifdef	ANTLR3_HUGE
+#ifndef	ANTLR3_SIZE_HINT
+#define	ANTLR3_SIZE_HINT        2049
+#endif
+#ifndef	ANTLR3_LIST_SIZE_HINT
+#define	ANTLR3_LIST_SIZE_HINT   127
+#endif
+#endif
+
+#ifdef	ANTLR3_AVERAGE
+#ifndef	ANTLR3_SIZE_HINT
+#define	ANTLR3_SIZE_HINT        1025
+#define	ANTLR3_LIST_SIZE_HINT   63
+#endif
+#endif
+
+#ifdef	ANTLR3_SMALL
+#ifndef	ANTLR3_SIZE_HINT
+#define	ANTLR3_SIZE_HINT        211
+#define	ANTLR3_LIST_SIZE_HINT   31
+#endif
+#endif
+
+// Definitions that indicate the encoding scheme character streams and strings etc
+//
+/// Indicates Big Endian for encodings where this makes sense
+///
+#define ANTLR3_BE           1
+
+/// Indicates Little Endian for encoidngs where this makes sense
+///
+#define ANTLR3_LE           2
+
+/// General latin-1 or other 8 bit encoding scheme such as straight ASCII
+///
+#define ANTLR3_ENC_8BIT     4
+
+/// UTF-8 encoding scheme
+///
+#define ANTLR3_ENC_UTF8     8
+
+/// UTF-16 encoding scheme (which also covers UCS2 as that does not have surrogates)
+///
+#define ANTLR3_ENC_UTF16        16
+#define ANTLR3_ENC_UTF16BE      16 + ANTLR3_BE
+#define ANTLR3_ENC_UTF16LE      16 + ANTLR3_LE
+
+/// UTF-32 encoding scheme (basically straight 32 bit)
+///
+#define ANTLR3_ENC_UTF32        32
+#define ANTLR3_ENC_UTF32BE      32 + ANTLR3_BE
+#define ANTLR3_ENC_UTF32LE      32 + ANTLR3_LE
+
+/// Input is 8 bit EBCDIC (which we convert to 8 bit ASCII on the fly
+///
+#define ANTLR3_ENC_EBCDIC       64
+
+/* Common definitions come first
+ */
+#include    <antlr3errors.h>
+
+/* Work out what operating system/compiler this is. We just do this once
+ * here and use an internal symbol after this.
+ */
+#ifdef	_WIN64
+
+# ifndef	ANTLR3_WINDOWS
+#   define	ANTLR3_WINDOWS
+# endif
+# define	ANTLR3_WIN64
+# define	ANTLR3_USE_64BIT
+
+#elif __LP64__
+
+# define ANTLR3_USE_64BIT
+
+#else
+
+#ifdef	_WIN32
+# ifndef	ANTLR3_WINDOWS
+#  define	ANTLR3_WINDOWS
+# endif
+
+#define	ANTLR3_WIN32
+#endif
+
+#endif
+
+#ifdef	ANTLR3_WINDOWS 
+
+#ifndef WIN32_LEAN_AND_MEAN
+#define	WIN32_LEAN_AND_MEAN
+#endif
+
+/* Allow VC 8 (vs2005) and above to use 'secure' versions of various functions such as sprintf
+ */
+#ifndef	_CRT_SECURE_NO_DEPRECATE 
+#define	_CRT_SECURE_NO_DEPRECATE 
+#endif
+
+#include    <windows.h>
+#include    <stdlib.h>
+#include    <winsock.h>
+#include    <stdio.h>
+#include    <sys/types.h>
+#include    <sys/stat.h>
+#include    <stdarg.h>
+
+#define	ANTLR3_API      __declspec(dllexport)
+#define	ANTLR3_CDECL    __cdecl
+#define ANTLR3_FASTCALL __fastcall
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef __MINGW32__
+// Standard Windows types
+//
+typedef	INT32	ANTLR3_CHAR,	*pANTLR3_CHAR;
+typedef	UINT32	ANTLR3_UCHAR,	*pANTLR3_UCHAR;
+
+typedef	INT8	ANTLR3_INT8,	*pANTLR3_INT8;
+typedef	INT16	ANTLR3_INT16,	*pANTLR3_INT16;
+typedef	INT32	ANTLR3_INT32,	*pANTLR3_INT32;
+typedef	INT64	ANTLR3_INT64,	*pANTLR3_INT64;
+typedef	UINT8	ANTLR3_UINT8,	*pANTLR3_UINT8;
+typedef	UINT16	ANTLR3_UINT16,	*pANTLR3_UINT16;
+typedef	UINT32	ANTLR3_UINT32,	*pANTLR3_UINT32;
+typedef	UINT64	ANTLR3_UINT64,	*pANTLR3_UINT64;
+typedef UINT64  ANTLR3_BITWORD, *pANTLR3_BITWORD;
+typedef	UINT8	ANTLR3_BOOLEAN, *pANTLR3_BOOLEAN;
+
+#else
+// Mingw uses stdint.h and fails to define standard Microsoft typedefs
+// such as UINT16, hence we must use stdint.h for Mingw.
+//
+#include <stdint.h>
+typedef int32_t     ANTLR3_CHAR,    *pANTLR3_CHAR;
+typedef uint32_t    ANTLR3_UCHAR,   *pANTLR3_UCHAR;
+
+typedef int8_t	    ANTLR3_INT8,    *pANTLR3_INT8;
+typedef int16_t	    ANTLR3_INT16,   *pANTLR3_INT16;
+typedef int32_t	    ANTLR3_INT32,   *pANTLR3_INT32;
+typedef int64_t	    ANTLR3_INT64,   *pANTLR3_INT64;
+
+typedef uint8_t	    ANTLR3_UINT8,   *pANTLR3_UINT8;
+typedef uint16_t    ANTLR3_UINT16,  *pANTLR3_UINT16;
+typedef uint32_t    ANTLR3_UINT32,  *pANTLR3_UINT32;
+typedef uint64_t    ANTLR3_UINT64,  *pANTLR3_UINT64;
+typedef uint64_t    ANTLR3_BITWORD, *pANTLR3_BITWORD;
+
+typedef	uint8_t	    ANTLR3_BOOLEAN, *pANTLR3_BOOLEAN;
+
+#endif
+
+
+
+#define	ANTLR3_UINT64_LIT(lit)  lit##ULL
+
+#define	ANTLR3_INLINE	        __inline
+
+typedef FILE *	    ANTLR3_FDSC;
+typedef	struct stat ANTLR3_FSTAT_STRUCT;
+
+#ifdef	ANTLR3_USE_64BIT
+#define	ANTLR3_FUNC_PTR(ptr)	(void *)((ANTLR3_UINT64)(ptr))
+#define ANTLR3_UINT64_CAST(ptr) (ANTLR3_UINT64)(ptr))
+#define	ANTLR3_UINT32_CAST(ptr)	(ANTLR3_UINT32)((ANTLR3_UINT64)(ptr))
+typedef ANTLR3_INT64		ANTLR3_MARKER;			
+typedef ANTLR3_UINT64		ANTLR3_INTKEY;
+#else
+#define	ANTLR3_FUNC_PTR(ptr)	(void *)((ANTLR3_UINT32)(ptr))
+#define ANTLR3_UINT64_CAST(ptr) (ANTLR3_UINT64)((ANTLR3_UINT32)(ptr))
+#define	ANTLR3_UINT32_CAST(ptr)	(ANTLR3_UINT32)(ptr)
+typedef	ANTLR3_INT32		ANTLR3_MARKER;
+typedef ANTLR3_UINT32		ANTLR3_INTKEY;
+#endif
+
+#ifdef	ANTLR3_WIN32
+#endif
+
+#ifdef	ANTLR3_WIN64
+#endif
+
+
+typedef	int			ANTLR3_SALENT;								// Type used for size of accept structure
+typedef struct sockaddr_in	ANTLR3_SOCKADDRT, * pANTLR3_SOCKADDRT;	// Type used for socket address declaration
+typedef struct sockaddr		ANTLR3_SOCKADDRC, * pANTLR3_SOCKADDRC;	// Type used for cast on accept()
+
+#define	ANTLR3_CLOSESOCKET	closesocket
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Warnings that are over-zealous such as complaining about strdup, we
+ * can turn off.
+ */
+
+/* Don't complain about "deprecated" functions such as strdup
+ */
+#pragma warning( disable : 4996 )
+
+#else
+
+/* Include configure generated header file
+ */
+#include	<antlr3config.h>
+
+#include <stdio.h>
+
+#if HAVE_STDINT_H
+# include <stdint.h>
+#endif
+
+#if HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+
+#if HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+
+#if STDC_HEADERS
+# include   <stdlib.h>
+# include   <stddef.h>
+# include   <stdarg.h>
+#else
+# if HAVE_STDLIB_H
+#  include  <stdlib.h>
+# endif
+# if HAVE_STDARG_H
+#  include  <stdarg.h>
+# endif
+#endif
+
+#if HAVE_STRING_H
+# if !STDC_HEADERS && HAVE_MEMORY_H
+#  include <memory.h>
+# endif
+# include <string.h>
+#endif
+
+#if HAVE_STRINGS_H
+# include <strings.h>
+#endif
+
+#if HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+
+#if HAVE_UNISTD_H
+# include <unistd.h>
+#endif
+
+#ifdef HAVE_NETINET_IN_H
+#include	<netinet/in.h>
+#endif
+
+#ifdef HAVE_SOCKET_H
+# include	<socket.h>
+#else
+# if HAVE_SYS_SOCKET_H
+#  include	<sys/socket.h>
+# endif
+#endif
+
+#ifdef HAVE_NETINET_TCP_H
+#include	<netinet/tcp.h>
+#endif
+
+#ifdef HAVE_ARPA_NAMESER_H
+#include <arpa/nameser.h> /* DNS HEADER struct */
+#endif
+
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+
+
+#ifdef HAVE_SYS_RESOLVE_H
+#include	<sys/resolv.h>
+#endif
+
+#ifdef HAVE_RESOLVE_H
+#include	<resolv.h>
+#endif
+
+
+#ifdef	HAVE_MALLOC_H
+# include    <malloc.h>
+#else
+# ifdef	HAVE_SYS_MALLOC_H
+#  include    <sys/malloc.h>
+# endif
+#endif
+
+#ifdef  HAVE_CTYPE_H
+# include   <ctype.h>
+#endif
+
+/* Some platforms define a macro, index() in string.h. AIX is
+ * one of these for instance. We must get rid of that definition
+ * as we use ->index all over the place. defining macros like this in system header
+ * files is a really bad idea, but I doubt that IBM will listen to me ;-)
+ */
+#ifdef	index
+#undef	index
+#endif
+
+#define _stat   stat
+
+// SOCKET not defined on Unix
+// 
+typedef	int	SOCKET;
+
+#define ANTLR3_API
+#define	ANTLR3_CDECL
+#define ANTLR3_FASTCALL
+
+#ifdef	__hpux
+
+ // HPUX is always different usually for no good reason. Tru64 should have kicked it
+ // into touch and everyone knows it ;-)
+ //
+ typedef struct sockaddr_in ANTLR3_SOCKADDRT, * pANTLR3_SOCKADDRT;	// Type used for socket address declaration
+ typedef void *		    pANTLR3_SOCKADDRC;				// Type used for cast on accept()
+ typedef int		    ANTLR3_SALENT;
+
+#else
+
+# if defined(_AIX) || __GNUC__ > 3 
+
+   typedef  socklen_t   ANTLR3_SALENT;
+
+# else
+
+   typedef  size_t	ANTLR3_SALENT;
+
+# endif
+
+   typedef struct sockaddr_in   ANTLR3_SOCKADDRT, * pANTLR3_SOCKADDRT;	// Type used for socket address declaration
+   typedef struct sockaddr	* pANTLR3_SOCKADDRC;                    // Type used for cast on accept()
+
+#endif
+
+#define INVALID_SOCKET      ((SOCKET)-1)
+#define	ANTLR3_CLOSESOCKET  close
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Inherit type definitions for autoconf
+ */
+typedef int32_t	    ANTLR3_CHAR,    *pANTLR3_CHAR;
+typedef uint32_t    ANTLR3_UCHAR,   *pANTLR3_UCHAR;
+
+typedef int8_t	    ANTLR3_INT8,    *pANTLR3_INT8;
+typedef int16_t	    ANTLR3_INT16,   *pANTLR3_INT16;
+typedef int32_t	    ANTLR3_INT32,   *pANTLR3_INT32;
+typedef int64_t	    ANTLR3_INT64,   *pANTLR3_INT64;
+
+typedef uint8_t	    ANTLR3_UINT8,   *pANTLR3_UINT8;
+typedef uint16_t    ANTLR3_UINT16,  *pANTLR3_UINT16;
+typedef uint32_t    ANTLR3_UINT32,  *pANTLR3_UINT32;
+typedef uint64_t    ANTLR3_UINT64,  *pANTLR3_UINT64;
+typedef uint64_t    ANTLR3_BITWORD, *pANTLR3_BITWORD;
+
+typedef uint32_t    ANTLR3_BOOLEAN, *pANTLR3_BOOLEAN;
+
+#define ANTLR3_INLINE   inline
+#define	ANTLR3_API
+
+typedef FILE *	    ANTLR3_FDSC;
+typedef	struct stat ANTLR3_FSTAT_STRUCT;
+
+#ifdef	ANTLR3_USE_64BIT
+#define	ANTLR3_FUNC_PTR(ptr)    (void *)((ANTLR3_UINT64)(ptr))
+#define ANTLR3_UINT64_CAST(ptr)	(ANTLR3_UINT64)(ptr))
+#define	ANTLR3_UINT32_CAST(ptr) (ANTLR3_UINT32)((ANTLR3_UINT64)(ptr))
+typedef ANTLR3_INT64		ANTLR3_MARKER;
+typedef ANTLR3_UINT64		ANTLR3_INTKEY;
+#else
+#define	ANTLR3_FUNC_PTR(ptr)	(void *)((ANTLR3_UINT32)(ptr))
+#define ANTLR3_UINT64_CAST(ptr) (ANTLR3_UINT64)((ANTLR3_UINT32)(ptr))
+#define	ANTLR3_UINT32_CAST(ptr)	(ANTLR3_UINT32)(ptr)
+typedef	ANTLR3_INT32		ANTLR3_MARKER;
+typedef ANTLR3_UINT32		ANTLR3_INTKEY;
+#endif
+#define	ANTLR3_UINT64_LIT(lit)	lit##ULL
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+#ifdef ANTLR3_USE_64BIT
+#define ANTLR3_TRIE_DEPTH 63
+#else
+#define ANTLR3_TRIE_DEPTH 31
+#endif
+/* Pre declare the typedefs for all the interfaces, then 
+ * they can be inter-dependant and we will let the linker
+ * sort it out for us.
+ */
+#include    <antlr3interfaces.h>
+
+// Include the unicode.org conversion library header.
+//
+#include    <antlr3convertutf.h>
+
+/* Prototypes
+ */
+#ifndef ANTLR3_MALLOC
+/// Default definition of ANTLR3_MALLOC. You can override this before including
+/// antlr3.h if you wish to use your own implementation.
+///
+#define	ANTLR3_MALLOC(request)          malloc  ((size_t)(request))
+#endif
+
+#ifndef ANTLR3_CALLOC
+/// Default definition of ANTLR3_CALLOC. You can override this before including
+/// antlr3.h if you wish to use your own implementation.
+///
+#define	ANTLR3_CALLOC(numEl, elSize)    calloc  (numEl, (size_t)(elSize))
+#endif
+
+#ifndef ANTLR3_REALLOC
+/// Default definition of ANTLR3_REALLOC. You can override this before including
+/// antlr3.h if you wish to use your own implementation.
+///
+#define ANTLR3_REALLOC(current, request)    realloc ((void *)(current), (size_t)(request))
+#endif
+#ifndef ANTLR3_FREE
+/// Default definition of ANTLR3_FREE. You can override this before including
+/// antlr3.h if you wish to use your own implementation.
+///
+#define	ANTLR3_FREE(ptr)		free    ((void *)(ptr))
+#endif
+#ifndef ANTLR3_FREE_FUNC						
+/// Default definition of ANTLR3_FREE_FUNC. You can override this before including
+/// antlr3.h if you wish to use your own implementation.
+///
+#define	ANTLR3_FREE_FUNC		free
+#endif
+#ifndef ANTLR3_STRDUP
+/// Default definition of ANTLR3_STRDUP. You can override this before including
+/// antlr3.h if you wish to use your own implementation.
+///
+#define	ANTLR3_STRDUP(instr)		(pANTLR3_UINT8)(strdup  ((const char *)(instr)))
+#endif
+#ifndef ANTLR3_MEMCPY
+/// Default definition of ANTLR3_MEMCPY. You can override this before including
+/// antlr3.h if you wish to use your own implementation.
+///
+#define	ANTLR3_MEMCPY(target, source, size) memcpy((void *)(target), (const void *)(source), (size_t)(size))
+#endif
+#ifndef ANTLR3_MEMMOVE
+/// Default definition of ANTLR3_MEMMOVE. You can override this before including
+/// antlr3.h if you wish to use your own implementation.
+///
+#define	ANTLR3_MEMMOVE(target, source, size)    memmove((void *)(target), (const void *)(source), (size_t)(size))
+#endif
+#ifndef ANTLR3_MEMSET
+/// Default definition of ANTLR3_MEMSET. You can override this before including
+/// antlr3.h if you wish to use your own implementation.
+///
+#define	ANTLR3_MEMSET(target, byte, size)   memset((void *)(target), (int)(byte), (size_t)(size))
+#endif
+
+#ifndef	ANTLR3_PRINTF
+/// Default definition of printf, set this to something other than printf before including antlr3.h
+/// if your system does not have a printf. Note that you can define this to be <code>//</code>
+/// without harming the runtime.
+///
+#define	ANTLR3_PRINTF   printf
+#endif
+
+#ifndef	ANTLR3_FPRINTF
+/// Default definition of fprintf, set this to something other than fprintf before including antlr3.h
+/// if your system does not have a fprintf. Note that you can define this to be <code>//</code>
+/// without harming the runtime. 
+///
+#define	ANTLR3_FPRINTF	fprintf
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ANTLR3_API pANTLR3_INT_TRIE antlr3IntTrieNew    (ANTLR3_UINT32 depth);
+
+ANTLR3_API pANTLR3_BITSET   antlr3BitsetNew	(ANTLR3_UINT32 numBits);
+ANTLR3_API pANTLR3_BITSET   antlr3BitsetOf	(ANTLR3_INT32 bit, ...);
+ANTLR3_API pANTLR3_BITSET   antlr3BitsetList	(pANTLR3_HASH_TABLE list);
+ANTLR3_API pANTLR3_BITSET   antlr3BitsetCopy	(pANTLR3_BITSET_LIST blist);
+ANTLR3_API pANTLR3_BITSET   antlr3BitsetLoad    (pANTLR3_BITSET_LIST blist);
+ANTLR3_API void             antlr3BitsetSetAPI  (pANTLR3_BITSET bitset);
+
+
+ANTLR3_API pANTLR3_BASE_RECOGNIZER  antlr3BaseRecognizerNew                     (ANTLR3_UINT32 type, ANTLR3_UINT32 sizeHint, pANTLR3_RECOGNIZER_SHARED_STATE state);
+ANTLR3_API void			    antlr3RecognitionExceptionNew               (pANTLR3_BASE_RECOGNIZER recognizer);
+ANTLR3_API void			    antlr3MTExceptionNew                        (pANTLR3_BASE_RECOGNIZER recognizer);
+ANTLR3_API void			    antlr3MTNExceptionNew                       (pANTLR3_BASE_RECOGNIZER recognizer);
+ANTLR3_API pANTLR3_HASH_TABLE	    antlr3HashTableNew                          (ANTLR3_UINT32 sizeHint);
+ANTLR3_API ANTLR3_UINT32	    antlr3Hash                                  (void * key, ANTLR3_UINT32 keylen);
+ANTLR3_API pANTLR3_HASH_ENUM	    antlr3EnumNew                               (pANTLR3_HASH_TABLE table);
+ANTLR3_API pANTLR3_LIST		    antlr3ListNew                               (ANTLR3_UINT32 sizeHint);
+ANTLR3_API pANTLR3_VECTOR_FACTORY   antlr3VectorFactoryNew                      (ANTLR3_UINT32 sizeHint);
+ANTLR3_API pANTLR3_VECTOR	    antlr3VectorNew                             (ANTLR3_UINT32 sizeHint);
+ANTLR3_API pANTLR3_STACK	    antlr3StackNew                              (ANTLR3_UINT32 sizeHint);
+ANTLR3_API void                     antlr3SetVectorApi                          (pANTLR3_VECTOR vector, ANTLR3_UINT32 sizeHint);
+ANTLR3_API ANTLR3_UCHAR		    antlr3c8toAntlrc                            (ANTLR3_INT8 inc);
+ANTLR3_API pANTLR3_TOPO             antlr3TopoNew();
+
+ANTLR3_API pANTLR3_EXCEPTION	    antlr3ExceptionNew                          (ANTLR3_UINT32 exception, void * name, void * message, ANTLR3_BOOLEAN freeMessage);
+
+
+ANTLR3_API pANTLR3_INPUT_STREAM     antlr3FileStreamNew                         (pANTLR3_UINT8 fileName, ANTLR3_UINT32 encoding);
+ANTLR3_API pANTLR3_INPUT_STREAM     antlr3StringStreamNew                       (pANTLR3_UINT8 data, ANTLR3_UINT32 encoding, ANTLR3_UINT32 size, pANTLR3_UINT8 name);
+
+ANTLR3_API pANTLR3_INT_STREAM	    antlr3IntStreamNew                          (void);
+
+ANTLR3_API pANTLR3_STRING_FACTORY   antlr3StringFactoryNew                      (ANTLR3_UINT32 encoding);
+
+ANTLR3_API pANTLR3_COMMON_TOKEN	    antlr3CommonTokenNew                        (ANTLR3_UINT32 ttype);
+ANTLR3_API pANTLR3_TOKEN_FACTORY    antlr3TokenFactoryNew                       (pANTLR3_INPUT_STREAM input);
+ANTLR3_API void			    antlr3SetTokenAPI                           (pANTLR3_COMMON_TOKEN token);
+
+ANTLR3_API pANTLR3_LEXER	    antlr3LexerNewStream                        (ANTLR3_UINT32 sizeHint, pANTLR3_INPUT_STREAM input, pANTLR3_RECOGNIZER_SHARED_STATE state);
+ANTLR3_API pANTLR3_LEXER	    antlr3LexerNew                              (ANTLR3_UINT32 sizeHint, pANTLR3_RECOGNIZER_SHARED_STATE state);
+ANTLR3_API pANTLR3_PARSER	    antlr3ParserNewStreamDbg                    (ANTLR3_UINT32 sizeHint, pANTLR3_TOKEN_STREAM tstream, pANTLR3_DEBUG_EVENT_LISTENER dbg, pANTLR3_RECOGNIZER_SHARED_STATE state);
+ANTLR3_API pANTLR3_PARSER	    antlr3ParserNewStream                       (ANTLR3_UINT32 sizeHint, pANTLR3_TOKEN_STREAM tstream, pANTLR3_RECOGNIZER_SHARED_STATE state);
+ANTLR3_API pANTLR3_PARSER           antlr3ParserNew                             (ANTLR3_UINT32 sizeHint, pANTLR3_RECOGNIZER_SHARED_STATE state);
+
+ANTLR3_API pANTLR3_COMMON_TOKEN_STREAM  antlr3CommonTokenStreamSourceNew        (ANTLR3_UINT32 hint, pANTLR3_TOKEN_SOURCE source);
+ANTLR3_API pANTLR3_COMMON_TOKEN_STREAM	antlr3CommonTokenStreamNew              (ANTLR3_UINT32 hint);
+ANTLR3_API pANTLR3_COMMON_TOKEN_STREAM	antlr3CommonTokenDebugStreamSourceNew   (ANTLR3_UINT32 hint, pANTLR3_TOKEN_SOURCE source, pANTLR3_DEBUG_EVENT_LISTENER debugger);
+
+ANTLR3_API pANTLR3_BASE_TREE_ADAPTOR	ANTLR3_TREE_ADAPTORNew                  (pANTLR3_STRING_FACTORY strFactory);
+ANTLR3_API pANTLR3_BASE_TREE_ADAPTOR	ANTLR3_TREE_ADAPTORDebugNew             (pANTLR3_STRING_FACTORY strFactory, pANTLR3_DEBUG_EVENT_LISTENER	debugger);
+ANTLR3_API pANTLR3_COMMON_TREE		antlr3CommonTreeNew                     (void);
+ANTLR3_API pANTLR3_COMMON_TREE		antlr3CommonTreeNewFromTree             (pANTLR3_COMMON_TREE tree);
+ANTLR3_API pANTLR3_COMMON_TREE		antlr3CommonTreeNewFromToken            (pANTLR3_COMMON_TOKEN tree);
+ANTLR3_API pANTLR3_ARBORETUM		antlr3ArboretumNew                      (pANTLR3_STRING_FACTORY factory);
+ANTLR3_API void				antlr3SetCTAPI                          (pANTLR3_COMMON_TREE tree);
+ANTLR3_API pANTLR3_BASE_TREE		antlr3BaseTreeNew                       (pANTLR3_BASE_TREE tree);
+
+ANTLR3_API void				antlr3BaseTreeAdaptorInit               (pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_DEBUG_EVENT_LISTENER debugger);
+
+ANTLR3_API pANTLR3_TREE_PARSER		antlr3TreeParserNewStream               (ANTLR3_UINT32 sizeHint, pANTLR3_COMMON_TREE_NODE_STREAM ctnstream, pANTLR3_RECOGNIZER_SHARED_STATE state);
+
+ANTLR3_API ANTLR3_INT32			antlr3dfaspecialTransition              (void * ctx, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_INT_STREAM is, pANTLR3_CYCLIC_DFA dfa, ANTLR3_INT32 s);
+ANTLR3_API ANTLR3_INT32			antlr3dfaspecialStateTransition         (void * ctx, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_INT_STREAM is, pANTLR3_CYCLIC_DFA dfa, ANTLR3_INT32 s);
+ANTLR3_API ANTLR3_INT32			antlr3dfapredict                        (void * ctx, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_INT_STREAM is, pANTLR3_CYCLIC_DFA cdfa);
+
+ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM  antlr3CommonTreeNodeStreamNewTree   (pANTLR3_BASE_TREE tree, ANTLR3_UINT32 hint);
+ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM  antlr3CommonTreeNodeStreamNew       (pANTLR3_STRING_FACTORY strFactory, ANTLR3_UINT32 hint);
+ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM  antlr3UnbufTreeNodeStreamNewTree    (pANTLR3_BASE_TREE tree, ANTLR3_UINT32 hint);
+ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM  antlr3UnbufTreeNodeStreamNew        (pANTLR3_STRING_FACTORY strFactory, ANTLR3_UINT32 hint);
+ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM  antlr3CommonTreeNodeStreamNewStream (pANTLR3_COMMON_TREE_NODE_STREAM inStream);
+ANTLR3_API pANTLR3_TREE_NODE_STREAM         antlr3TreeNodeStreamNew             ();
+ANTLR3_API void				    fillBufferExt                       (pANTLR3_COMMON_TOKEN_STREAM tokenStream);
+
+ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
+    antlr3RewriteRuleTOKENStreamNewAE	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description);
+ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
+    antlr3RewriteRuleTOKENStreamNewAEE	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement);
+ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
+    antlr3RewriteRuleTOKENStreamNewAEV	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector);
+
+ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
+    antlr3RewriteRuleNODEStreamNewAE	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description);
+ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
+    antlr3RewriteRuleNODEStreamNewAEE	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement);
+ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
+    antlr3RewriteRuleNODEStreamNewAEV	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector);
+
+ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
+    antlr3RewriteRuleSubtreeStreamNewAE	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description);
+ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
+    antlr3RewriteRuleSubtreeStreamNewAEE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement);
+ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
+    antlr3RewriteRuleSubtreeStreamNewAEV(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector);
+
+ANTLR3_API pANTLR3_DEBUG_EVENT_LISTENER antlr3DebugListenerNew();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* _ANTLR3DEFS_H	*/
diff --git a/antlr-3.4/runtime/C/include/antlr3encodings.h b/runtime/C/include/antlr3encodings.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3encodings.h
rename to runtime/C/include/antlr3encodings.h
diff --git a/antlr-3.4/runtime/C/include/antlr3errors.h b/runtime/C/include/antlr3errors.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3errors.h
rename to runtime/C/include/antlr3errors.h
diff --git a/antlr-3.4/runtime/C/include/antlr3exception.h b/runtime/C/include/antlr3exception.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3exception.h
rename to runtime/C/include/antlr3exception.h
diff --git a/antlr-3.4/runtime/C/include/antlr3filestream.h b/runtime/C/include/antlr3filestream.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3filestream.h
rename to runtime/C/include/antlr3filestream.h
diff --git a/antlr-3.4/runtime/C/include/antlr3input.h b/runtime/C/include/antlr3input.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3input.h
rename to runtime/C/include/antlr3input.h
diff --git a/antlr-3.4/runtime/C/include/antlr3interfaces.h b/runtime/C/include/antlr3interfaces.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3interfaces.h
rename to runtime/C/include/antlr3interfaces.h
diff --git a/antlr-3.4/runtime/C/include/antlr3intstream.h b/runtime/C/include/antlr3intstream.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3intstream.h
rename to runtime/C/include/antlr3intstream.h
diff --git a/antlr-3.4/runtime/C/include/antlr3lexer.h b/runtime/C/include/antlr3lexer.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3lexer.h
rename to runtime/C/include/antlr3lexer.h
diff --git a/antlr-3.4/runtime/C/include/antlr3memory.h b/runtime/C/include/antlr3memory.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3memory.h
rename to runtime/C/include/antlr3memory.h
diff --git a/antlr-3.4/runtime/C/include/antlr3parser.h b/runtime/C/include/antlr3parser.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3parser.h
rename to runtime/C/include/antlr3parser.h
diff --git a/antlr-3.4/runtime/C/include/antlr3parsetree.h b/runtime/C/include/antlr3parsetree.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3parsetree.h
rename to runtime/C/include/antlr3parsetree.h
diff --git a/antlr-3.4/runtime/C/include/antlr3recognizersharedstate.h b/runtime/C/include/antlr3recognizersharedstate.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3recognizersharedstate.h
rename to runtime/C/include/antlr3recognizersharedstate.h
diff --git a/antlr-3.4/runtime/C/include/antlr3rewritestreams.h b/runtime/C/include/antlr3rewritestreams.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3rewritestreams.h
rename to runtime/C/include/antlr3rewritestreams.h
diff --git a/antlr-3.4/runtime/C/include/antlr3string.h b/runtime/C/include/antlr3string.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3string.h
rename to runtime/C/include/antlr3string.h
diff --git a/antlr-3.4/runtime/C/include/antlr3tokenstream.h b/runtime/C/include/antlr3tokenstream.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3tokenstream.h
rename to runtime/C/include/antlr3tokenstream.h
diff --git a/antlr-3.4/runtime/C/include/antlr3treeparser.h b/runtime/C/include/antlr3treeparser.h
similarity index 100%
rename from antlr-3.4/runtime/C/include/antlr3treeparser.h
rename to runtime/C/include/antlr3treeparser.h
diff --git a/antlr-3.4/runtime/C/m4/dummy b/runtime/C/m4/dummy
similarity index 100%
rename from antlr-3.4/runtime/C/m4/dummy
rename to runtime/C/m4/dummy
diff --git a/runtime/C/src/antlr3baserecognizer.c b/runtime/C/src/antlr3baserecognizer.c
new file mode 100644
index 0000000..8c6b105
--- /dev/null
+++ b/runtime/C/src/antlr3baserecognizer.c
@@ -0,0 +1,2243 @@
+/** \file
+ * Contains the base functions that all recognizers require.
+ * Any function can be overridden by a lexer/parser/tree parser or by the
+ * ANTLR3 programmer.
+ * 
+ * \addtogroup pANTLR3_BASE_RECOGNIZER
+ * @{
+ */
+#include    <antlr3baserecognizer.h>
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifdef	ANTLR3_WINDOWS
+#pragma warning( disable : 4100 )
+#endif
+
+/* Interface functions -standard implementations cover parser and treeparser
+ * almost completely but are overridden by the parser or tree parser as needed. Lexer overrides
+ * most of these functions.
+ */
+static void					beginResync					(pANTLR3_BASE_RECOGNIZER recognizer);
+static pANTLR3_BITSET		computeErrorRecoverySet	    (pANTLR3_BASE_RECOGNIZER recognizer);
+static void					endResync					(pANTLR3_BASE_RECOGNIZER recognizer);
+static void					beginBacktrack				(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 level);
+static void					endBacktrack				(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 level, ANTLR3_BOOLEAN successful);
+
+static void *				match						(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow);
+static void					matchAny					(pANTLR3_BASE_RECOGNIZER recognizer);
+static void					mismatch					(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow);
+static ANTLR3_BOOLEAN		mismatchIsUnwantedToken		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, ANTLR3_UINT32 ttype);
+static ANTLR3_BOOLEAN		mismatchIsMissingToken		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, pANTLR3_BITSET_LIST follow);
+static void					reportError					(pANTLR3_BASE_RECOGNIZER recognizer);
+static pANTLR3_BITSET		computeCSRuleFollow			(pANTLR3_BASE_RECOGNIZER recognizer);
+static pANTLR3_BITSET		combineFollows				(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_BOOLEAN exact);
+static void					displayRecognitionError	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_UINT8 * tokenNames);
+static void					recover						(pANTLR3_BASE_RECOGNIZER recognizer);
+static void	*				recoverFromMismatchedToken  (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow);
+static void	*				recoverFromMismatchedSet    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET_LIST follow);
+static ANTLR3_BOOLEAN		recoverFromMismatchedElement(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET_LIST follow);
+static void					consumeUntil				(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 tokenType);
+static void					consumeUntilSet				(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET set);
+static pANTLR3_STACK		getRuleInvocationStack	    (pANTLR3_BASE_RECOGNIZER recognizer);
+static pANTLR3_STACK		getRuleInvocationStackNamed (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_UINT8 name);
+static pANTLR3_HASH_TABLE	toStrings					(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_HASH_TABLE);
+static ANTLR3_MARKER		getRuleMemoization			(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_INTKEY ruleIndex, ANTLR3_MARKER ruleParseStart);
+static ANTLR3_BOOLEAN		alreadyParsedRule			(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_MARKER ruleIndex);
+static void					memoize						(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_MARKER ruleIndex, ANTLR3_MARKER ruleParseStart);
+static ANTLR3_BOOLEAN		synpred						(pANTLR3_BASE_RECOGNIZER recognizer, void * ctx, void (*predicate)(void * ctx));
+static void					reset						(pANTLR3_BASE_RECOGNIZER recognizer);
+static void					freeBR						(pANTLR3_BASE_RECOGNIZER recognizer);
+static void *				getCurrentInputSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream);
+static void *				getMissingSymbol			(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
+															ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow);
+static ANTLR3_UINT32		getNumberOfSyntaxErrors		(pANTLR3_BASE_RECOGNIZER recognizer);
+
+ANTLR3_API pANTLR3_BASE_RECOGNIZER
+antlr3BaseRecognizerNew(ANTLR3_UINT32 type, ANTLR3_UINT32 sizeHint, pANTLR3_RECOGNIZER_SHARED_STATE state)
+{
+    pANTLR3_BASE_RECOGNIZER recognizer;
+
+    // Allocate memory for the structure
+    //
+    recognizer	    = (pANTLR3_BASE_RECOGNIZER) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_BASE_RECOGNIZER));
+
+    if	(recognizer == NULL)
+    {
+		// Allocation failed
+		//
+		return	NULL;
+    }
+
+	
+	// If we have been supplied with a pre-existing recognizer state
+	// then we just install it, otherwise we must create one from scratch
+	//
+	if	(state == NULL)
+	{
+		recognizer->state = (pANTLR3_RECOGNIZER_SHARED_STATE) ANTLR3_CALLOC(1, (size_t)sizeof(ANTLR3_RECOGNIZER_SHARED_STATE));
+
+		if	(recognizer->state == NULL)
+		{
+			ANTLR3_FREE(recognizer);
+			return	NULL;
+		}
+
+		// Initialize any new recognizer state
+		//
+		recognizer->state->errorRecovery	= ANTLR3_FALSE;
+		recognizer->state->lastErrorIndex	= -1;
+		recognizer->state->failed		= ANTLR3_FALSE;
+		recognizer->state->errorCount		= 0;
+		recognizer->state->backtracking		= 0;
+		recognizer->state->following		= NULL;
+		recognizer->state->ruleMemo		= NULL;
+		recognizer->state->tokenNames		= NULL;
+		recognizer->state->sizeHint             = sizeHint;
+		recognizer->state->tokSource		= NULL;
+                recognizer->state->tokFactory           = NULL;
+
+		// Rather than check to see if we must initialize
+		// the stack every time we are asked for an new rewrite stream
+		// we just always create an empty stack and then just
+		// free it when the base recognizer is freed.
+		//
+		recognizer->state->rStreams		= antlr3VectorNew(0);  // We don't know the size.
+
+		if	(recognizer->state->rStreams == NULL)
+		{
+			// Out of memory
+			//
+			ANTLR3_FREE(recognizer->state);
+			ANTLR3_FREE(recognizer);
+			return	NULL;
+		}
+	}
+	else
+	{
+		// Install the one we were given, and do not reset it here
+		// as it will either already have been initialized or will
+		// be in a state that needs to be preserved.
+		//
+		recognizer->state = state;
+	}
+		
+    // Install the BR API
+    //
+    recognizer->alreadyParsedRule           = alreadyParsedRule;
+    recognizer->beginResync                 = beginResync;
+    recognizer->combineFollows              = combineFollows;
+    recognizer->beginBacktrack              = beginBacktrack;
+    recognizer->endBacktrack                = endBacktrack;
+    recognizer->computeCSRuleFollow         = computeCSRuleFollow;
+    recognizer->computeErrorRecoverySet     = computeErrorRecoverySet;
+    recognizer->consumeUntil                = consumeUntil;
+    recognizer->consumeUntilSet             = consumeUntilSet;
+    recognizer->displayRecognitionError     = displayRecognitionError;
+    recognizer->endResync                   = endResync;
+    recognizer->exConstruct                 = antlr3MTExceptionNew;
+    recognizer->getRuleInvocationStack      = getRuleInvocationStack;
+    recognizer->getRuleInvocationStackNamed = getRuleInvocationStackNamed;
+    recognizer->getRuleMemoization          = getRuleMemoization;
+    recognizer->match                       = match;
+    recognizer->matchAny                    = matchAny;
+    recognizer->memoize                     = memoize;
+    recognizer->mismatch                    = mismatch;
+    recognizer->mismatchIsUnwantedToken     = mismatchIsUnwantedToken;
+    recognizer->mismatchIsMissingToken      = mismatchIsMissingToken;
+    recognizer->recover                     = recover;
+    recognizer->recoverFromMismatchedElement= recoverFromMismatchedElement;
+    recognizer->recoverFromMismatchedSet    = recoverFromMismatchedSet;
+    recognizer->recoverFromMismatchedToken  = recoverFromMismatchedToken;
+    recognizer->getNumberOfSyntaxErrors     = getNumberOfSyntaxErrors;
+    recognizer->reportError                 = reportError;
+    recognizer->reset                       = reset;
+    recognizer->synpred                     = synpred;
+    recognizer->toStrings                   = toStrings;
+    recognizer->getCurrentInputSymbol       = getCurrentInputSymbol;
+    recognizer->getMissingSymbol            = getMissingSymbol;
+    recognizer->debugger                    = NULL;
+
+    recognizer->free = freeBR;
+
+    /* Initialize variables
+     */
+    recognizer->type			= type;
+
+
+    return  recognizer;
+}
+static void	
+freeBR	    (pANTLR3_BASE_RECOGNIZER recognizer)
+{
+    pANTLR3_EXCEPTION thisE;
+
+	// Did we have a state allocated?
+	//
+	if	(recognizer->state != NULL)
+	{
+		// Free any rule memoization we set up
+		//
+		if	(recognizer->state->ruleMemo != NULL)
+		{
+			recognizer->state->ruleMemo->free(recognizer->state->ruleMemo);
+			recognizer->state->ruleMemo = NULL;
+		}
+
+		// Free any exception space we have left around
+		//
+		thisE = recognizer->state->exception;
+		if	(thisE != NULL)
+		{
+			thisE->freeEx(thisE);
+		}
+
+		// Free any rewrite streams we have allocated
+		//
+		if	(recognizer->state->rStreams != NULL)
+		{
+			recognizer->state->rStreams->free(recognizer->state->rStreams);
+		}
+
+		// Free up any token factory we created (error recovery for instance)
+		//
+		if	(recognizer->state->tokFactory != NULL)
+		{
+			recognizer->state->tokFactory->close(recognizer->state->tokFactory);
+		}
+		// Free the shared state memory
+		//
+		ANTLR3_FREE(recognizer->state);
+	}
+
+	// Free the actual recognizer space
+	//
+    ANTLR3_FREE(recognizer);
+}
+
+/**
+ * Creates a new Mismatched Token Exception and inserts in the recognizer
+ * exception stack.
+ * 
+ * \param recognizer
+ * Context pointer for this recognizer
+ * 
+ */
+ANTLR3_API	void
+antlr3MTExceptionNew(pANTLR3_BASE_RECOGNIZER recognizer)
+{
+    /* Create a basic recognition exception structure
+     */
+    antlr3RecognitionExceptionNew(recognizer);
+
+    /* Now update it to indicate this is a Mismatched token exception
+     */
+    recognizer->state->exception->name		= ANTLR3_MISMATCHED_EX_NAME;
+    recognizer->state->exception->type		= ANTLR3_MISMATCHED_TOKEN_EXCEPTION;
+
+    return;
+}
+
+ANTLR3_API	void
+antlr3RecognitionExceptionNew(pANTLR3_BASE_RECOGNIZER recognizer)
+{
+	pANTLR3_EXCEPTION				ex;
+	pANTLR3_LEXER					lexer;
+	pANTLR3_PARSER					parser;
+	pANTLR3_TREE_PARSER				tparser;
+
+	pANTLR3_INPUT_STREAM			ins;
+	pANTLR3_INT_STREAM				is;
+	pANTLR3_COMMON_TOKEN_STREAM	    cts;
+	pANTLR3_TREE_NODE_STREAM	    tns;
+
+	ins	    = NULL;
+	cts	    = NULL;
+	tns	    = NULL;
+	is	    = NULL;
+	lexer   = NULL;
+	parser  = NULL;
+	tparser = NULL;
+
+	switch	(recognizer->type)
+	{
+	case	ANTLR3_TYPE_LEXER:
+
+		lexer	= (pANTLR3_LEXER) (recognizer->super);
+		ins	= lexer->input;
+		is	= ins->istream;
+
+		break;
+
+	case	ANTLR3_TYPE_PARSER:
+
+		parser  = (pANTLR3_PARSER) (recognizer->super);
+		cts	= (pANTLR3_COMMON_TOKEN_STREAM)(parser->tstream->super);
+		is	= parser->tstream->istream;
+
+		break;
+
+	case	ANTLR3_TYPE_TREE_PARSER:
+
+		tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+		tns	= tparser->ctnstream->tnstream;
+		is	= tns->istream;
+
+		break;
+
+	default:
+
+		ANTLR3_FPRINTF(stderr, "Base recognizer function antlr3RecognitionExceptionNew called by unknown parser type - provide override for this function\n");
+		return;
+
+		break;
+	}
+
+	/* Create a basic exception structure
+	 */
+	ex = antlr3ExceptionNew(ANTLR3_RECOGNITION_EXCEPTION,
+		(void *)ANTLR3_RECOGNITION_EX_NAME,
+		NULL,
+		ANTLR3_FALSE);
+
+	/* Rest of information depends on the base type of the 
+	 * input stream.
+	 */
+	switch  (is->type & ANTLR3_INPUT_MASK)
+	{
+	case    ANTLR3_CHARSTREAM:
+
+		ex->c			= is->_LA		    	(is, 1);					/* Current input character			*/
+		ex->line		= ins->getLine			(ins);						/* Line number comes from stream		*/
+		ex->charPositionInLine	= ins->getCharPositionInLine	(ins);	    /* Line offset also comes from the stream   */
+		ex->index		= is->index			(is);
+		ex->streamName		= ins->fileName;
+		ex->message		= "Unexpected character";
+		break;
+
+	case    ANTLR3_TOKENSTREAM:
+
+		ex->token		= cts->tstream->_LT						(cts->tstream, 1);	    /* Current input token			    */
+		ex->line		= ((pANTLR3_COMMON_TOKEN)(ex->token))->getLine			((pANTLR3_COMMON_TOKEN)(ex->token));
+		ex->charPositionInLine	= ((pANTLR3_COMMON_TOKEN)(ex->token))->getCharPositionInLine	((pANTLR3_COMMON_TOKEN)(ex->token));
+		ex->index		= cts->tstream->istream->index					(cts->tstream->istream);
+		if	(((pANTLR3_COMMON_TOKEN)(ex->token))->type == ANTLR3_TOKEN_EOF)
+		{
+			ex->streamName		= NULL;
+		}
+		else
+		{
+			ex->streamName		= ((pANTLR3_COMMON_TOKEN)(ex->token))->input->fileName;
+		}
+		ex->message		= "Unexpected token";
+		break;
+
+	case    ANTLR3_COMMONTREENODE:
+
+		ex->token		= tns->_LT						    (tns, 1);	    /* Current input tree node			    */
+		ex->line		= ((pANTLR3_BASE_TREE)(ex->token))->getLine		    ((pANTLR3_BASE_TREE)(ex->token));
+		ex->charPositionInLine	= ((pANTLR3_BASE_TREE)(ex->token))->getCharPositionInLine   ((pANTLR3_BASE_TREE)(ex->token));
+		ex->index		= tns->istream->index					    (tns->istream);
+
+		// Are you ready for this? Deep breath now...
+		//
+		{
+			pANTLR3_COMMON_TREE tnode;
+
+			tnode		= ((pANTLR3_COMMON_TREE)(((pANTLR3_BASE_TREE)(ex->token))->super));
+
+			if	(tnode->token    == NULL)
+			{
+				ex->streamName = ((pANTLR3_BASE_TREE)(ex->token))->strFactory->newStr(((pANTLR3_BASE_TREE)(ex->token))->strFactory, (pANTLR3_UINT8)"-unknown source-");
+			}
+			else
+			{
+				if	(tnode->token->input == NULL)
+				{
+					ex->streamName		= NULL;
+				}
+				else
+				{
+					ex->streamName		= tnode->token->input->fileName;
+				}
+			}
+			ex->message		= "Unexpected node";
+		}
+		break;
+	}
+
+	ex->input						= is;
+	ex->nextException				= recognizer->state->exception;	/* So we don't leak the memory */
+	recognizer->state->exception	= ex;
+	recognizer->state->error	    = ANTLR3_TRUE;	    /* Exception is outstanding	*/
+
+	return;
+}
+
+
+/// Match current input symbol against ttype.  Upon error, do one token
+/// insertion or deletion if possible.  
+/// To turn off single token insertion or deletion error
+/// recovery, override mismatchRecover() and have it call
+/// plain mismatch(), which does not recover.  Then any error
+/// in a rule will cause an exception and immediate exit from
+/// rule.  Rule would recover by resynchronizing to the set of
+/// symbols that can follow rule ref.
+///
+static void *
+match(	pANTLR3_BASE_RECOGNIZER recognizer,
+		ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow)
+{
+    pANTLR3_PARSER			parser;
+    pANTLR3_TREE_PARSER	    tparser;
+    pANTLR3_INT_STREAM	    is;
+	void					* matchedSymbol;
+
+    switch	(recognizer->type)
+    {
+		case	ANTLR3_TYPE_PARSER:
+
+			parser  = (pANTLR3_PARSER) (recognizer->super);
+			tparser	= NULL;
+			is	= parser->tstream->istream;
+
+			break;
+
+		case	ANTLR3_TYPE_TREE_PARSER:
+
+			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+			parser	= NULL;
+			is	= tparser->ctnstream->tnstream->istream;
+
+			break;
+
+		default:
+		    
+			ANTLR3_FPRINTF(stderr, "Base recognizer function 'match' called by unknown parser type - provide override for this function\n");
+			return ANTLR3_FALSE;
+
+			break;
+    }
+
+	// Pick up the current input token/node for assignment to labels
+	//
+	matchedSymbol = recognizer->getCurrentInputSymbol(recognizer, is);
+
+    if	(is->_LA(is, 1) == ttype)
+    {
+		// The token was the one we were told to expect
+		//
+		is->consume(is);									// Consume that token from the stream
+		recognizer->state->errorRecovery	= ANTLR3_FALSE;	// Not in error recovery now (if we were)
+		recognizer->state->failed			= ANTLR3_FALSE;	// The match was a success
+		return matchedSymbol;								// We are done
+    }
+
+    // We did not find the expected token type, if we are backtracking then
+    // we just set the failed flag and return.
+    //
+    if	(recognizer->state->backtracking > 0)
+    {
+		// Backtracking is going on
+		//
+		recognizer->state->failed  = ANTLR3_TRUE;
+		return matchedSymbol;
+	}
+
+    // We did not find the expected token and there is no backtracking
+    // going on, so we mismatch, which creates an exception in the recognizer exception
+    // stack.
+    //
+	matchedSymbol = recognizer->recoverFromMismatchedToken(recognizer, ttype, follow);
+    return matchedSymbol;
+}
+
+/// Consumes the next token, whatever it is, and resets the recognizer state
+/// so that it is not in error.
+///
+/// \param recognizer
+/// Recognizer context pointer
+///
+static void
+matchAny(pANTLR3_BASE_RECOGNIZER recognizer)
+{
+    pANTLR3_PARSER	    parser;
+    pANTLR3_TREE_PARSER	    tparser;
+    pANTLR3_INT_STREAM	    is;
+
+    switch	(recognizer->type)
+    {
+		case	ANTLR3_TYPE_PARSER:
+
+			parser  = (pANTLR3_PARSER) (recognizer->super);
+			tparser	= NULL;
+			is	= parser->tstream->istream;
+
+			break;
+
+		case	ANTLR3_TYPE_TREE_PARSER:
+
+			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+			parser	= NULL;
+			is	= tparser->ctnstream->tnstream->istream;
+
+			break;
+
+		default:
+		    
+			ANTLR3_FPRINTF(stderr, "Base recognizer function 'matchAny' called by unknown parser type - provide override for this function\n");
+			return;
+
+		break;
+    }
+    recognizer->state->errorRecovery	= ANTLR3_FALSE;
+    recognizer->state->failed		    = ANTLR3_FALSE;
+    is->consume(is);
+
+    return;
+}
+///
+///
+static ANTLR3_BOOLEAN
+mismatchIsUnwantedToken(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, ANTLR3_UINT32 ttype)
+{
+	ANTLR3_UINT32 nextt;
+
+	nextt = is->_LA(is, 2);
+
+	if	(nextt == ttype)
+	{
+		if	(recognizer->state->exception != NULL)
+		{
+			recognizer->state->exception->expecting = nextt;
+		}
+		return ANTLR3_TRUE;		// This token is unknown, but the next one is the one we wanted
+	}
+	else
+	{
+		return ANTLR3_FALSE;	// Neither this token, nor the one following is the one we wanted
+	}
+}
+
+///
+///
+static ANTLR3_BOOLEAN
+mismatchIsMissingToken(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, pANTLR3_BITSET_LIST follow)
+{
+	ANTLR3_BOOLEAN	retcode;
+	pANTLR3_BITSET	followClone;
+	pANTLR3_BITSET	viableTokensFollowingThisRule;
+
+	if	(follow == NULL)
+	{
+		// There is no information about the tokens that can follow the last one
+		// hence we must say that the current one we found is not a member of the 
+		// follow set and does not indicate a missing token. We will just consume this
+		// single token and see if the parser works it out from there.
+		//
+		return	ANTLR3_FALSE;
+	}
+
+	followClone						= NULL;
+	viableTokensFollowingThisRule	= NULL;
+
+	// The C bitset maps are laid down at compile time by the
+	// C code generation. Hence we cannot remove things from them
+	// and so on. So, in order to remove EOR (if we need to) then
+	// we clone the static bitset.
+	//
+	followClone = antlr3BitsetLoad(follow);
+	if	(followClone == NULL)
+	{
+		return ANTLR3_FALSE;
+	}
+
+	// Compute what can follow this grammar reference
+	//
+	if	(followClone->isMember(followClone, ANTLR3_EOR_TOKEN_TYPE))
+	{
+		// EOR can follow, but if we are not the start symbol, we
+		// need to remove it.
+		//
+		//if	(recognizer->state->following->vector->count >= 0) ml: always true
+		{
+			followClone->remove(followClone, ANTLR3_EOR_TOKEN_TYPE);
+		}
+
+		// Now compute the visiable tokens that can follow this rule, according to context
+		// and make them part of the follow set.
+		//
+		viableTokensFollowingThisRule = recognizer->computeCSRuleFollow(recognizer);
+		followClone->borInPlace(followClone, viableTokensFollowingThisRule);
+	}
+
+	/// if current token is consistent with what could come after set
+	/// then we know we're missing a token; error recovery is free to
+	/// "insert" the missing token
+	///
+	/// BitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
+	/// in follow set to indicate that the fall of the start symbol is
+	/// in the set (EOF can follow).
+	///
+	if	(		followClone->isMember(followClone, is->_LA(is, 1))
+			||	followClone->isMember(followClone, ANTLR3_EOR_TOKEN_TYPE)
+		)
+	{
+		retcode = ANTLR3_TRUE;
+	}
+	else
+	{
+		retcode	= ANTLR3_FALSE;
+	}
+
+	if	(viableTokensFollowingThisRule != NULL)
+	{
+		viableTokensFollowingThisRule->free(viableTokensFollowingThisRule);
+	}
+	if	(followClone != NULL)
+	{
+		followClone->free(followClone);
+	}
+
+	return retcode;
+
+}
+
+/// Factor out what to do upon token mismatch so tree parsers can behave
+/// differently.  Override and call mismatchRecover(input, ttype, follow)
+/// to get single token insertion and deletion.  Use this to turn off
+/// single token insertion and deletion. Override mismatchRecover
+/// to call this instead.
+///
+/// \remark mismatch only works for parsers and must be overridden for anything else.
+///
+static	void
+mismatch(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow)
+{
+    pANTLR3_PARSER	    parser;
+    pANTLR3_TREE_PARSER	    tparser;
+    pANTLR3_INT_STREAM	    is;
+
+    // Install a mismatched token exception in the exception stack
+    //
+    antlr3MTExceptionNew(recognizer);
+    recognizer->state->exception->expecting    = ttype;
+
+    switch	(recognizer->type)
+    {
+		case	ANTLR3_TYPE_PARSER:
+
+			parser  = (pANTLR3_PARSER) (recognizer->super);
+			tparser	= NULL;
+			is	= parser->tstream->istream;
+
+			break;
+
+		default:
+		    
+			ANTLR3_FPRINTF(stderr, "Base recognizer function 'mismatch' called by unknown parser type - provide override for this function\n");
+			return;
+
+			break;
+    }
+
+	if	(mismatchIsUnwantedToken(recognizer, is, ttype))
+	{
+		// Create a basic recognition exception structure
+		//
+	    antlr3RecognitionExceptionNew(recognizer);
+		
+		// Now update it to indicate this is an unwanted token exception
+		//
+		recognizer->state->exception->name		= ANTLR3_UNWANTED_TOKEN_EXCEPTION_NAME;
+		recognizer->state->exception->type		= ANTLR3_UNWANTED_TOKEN_EXCEPTION;
+
+		return;
+	}
+	
+	if	(mismatchIsMissingToken(recognizer, is, follow))
+	{
+		// Create a basic recognition exception structure
+		//
+	    antlr3RecognitionExceptionNew(recognizer);
+		
+		// Now update it to indicate this is an unwanted token exception
+		//
+		recognizer->state->exception->name		= ANTLR3_MISSING_TOKEN_EXCEPTION_NAME;
+		recognizer->state->exception->type		= ANTLR3_MISSING_TOKEN_EXCEPTION;
+
+		return;
+	}
+
+	// Just a mismatched token is all we can dtermine
+	//
+	antlr3MTExceptionNew(recognizer);
+
+	return;
+}
+/// Report a recognition problem.
+///
+/// This method sets errorRecovery to indicate the parser is recovering
+/// not parsing.  Once in recovery mode, no errors are generated.
+/// To get out of recovery mode, the parser must successfully match
+/// a token (after a resync).  So it will go:
+///
+///		1. error occurs
+///		2. enter recovery mode, report error
+///		3. consume until token found in resynch set
+///		4. try to resume parsing
+///		5. next match() will reset errorRecovery mode
+///
+/// If you override, make sure to update errorCount if you care about that.
+///
+static void			
+reportError		    (pANTLR3_BASE_RECOGNIZER recognizer)
+{
+    	// Invoke the debugger event if there is a debugger listening to us
+	//
+	if	(recognizer->debugger != NULL)
+	{
+		recognizer->debugger->recognitionException(recognizer->debugger, recognizer->state->exception);
+	}
+
+    if	(recognizer->state->errorRecovery == ANTLR3_TRUE)
+    {
+		// Already in error recovery so don't display another error while doing so
+		//
+		return;
+    }
+
+    // Signal we are in error recovery now
+    //
+    recognizer->state->errorRecovery = ANTLR3_TRUE;
+	
+	// Indicate this recognizer had an error while processing.
+	//
+	recognizer->state->errorCount++;
+
+	// Call the error display routine
+	//
+    recognizer->displayRecognitionError(recognizer, recognizer->state->tokenNames);
+}
+
+static void
+beginBacktrack		(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 level)
+{
+	if	(recognizer->debugger != NULL)
+	{
+		recognizer->debugger->beginBacktrack(recognizer->debugger, level);
+	}
+}
+
+static void
+endBacktrack		(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 level, ANTLR3_BOOLEAN successful)
+{
+	if	(recognizer->debugger != NULL)
+	{
+		recognizer->debugger->endBacktrack(recognizer->debugger, level, successful);
+	}
+}
+static void			
+beginResync		    (pANTLR3_BASE_RECOGNIZER recognizer)
+{
+	if	(recognizer->debugger != NULL)
+	{
+		recognizer->debugger->beginResync(recognizer->debugger);
+	}
+}
+
+static void			
+endResync		    (pANTLR3_BASE_RECOGNIZER recognizer)
+{
+	if	(recognizer->debugger != NULL)
+	{
+		recognizer->debugger->endResync(recognizer->debugger);
+	}
+}
+
+/// Compute the error recovery set for the current rule.
+/// Documentation below is from the Java implementation.
+///
+/// During rule invocation, the parser pushes the set of tokens that can
+/// follow that rule reference on the stack; this amounts to
+/// computing FIRST of what follows the rule reference in the
+/// enclosing rule. This local follow set only includes tokens
+/// from within the rule; i.e., the FIRST computation done by
+/// ANTLR stops at the end of a rule.
+//
+/// EXAMPLE
+//
+/// When you find a "no viable alt exception", the input is not
+/// consistent with any of the alternatives for rule r.  The best
+/// thing to do is to consume tokens until you see something that
+/// can legally follow a call to r *or* any rule that called r.
+/// You don't want the exact set of viable next tokens because the
+/// input might just be missing a token--you might consume the
+/// rest of the input looking for one of the missing tokens.
+///
+/// Consider grammar:
+///
+/// a : '[' b ']'
+///   | '(' b ')'
+///   ;
+/// b : c '^' INT ;
+/// c : ID
+///   | INT
+///   ;
+///
+/// At each rule invocation, the set of tokens that could follow
+/// that rule is pushed on a stack.  Here are the various "local"
+/// follow sets:
+///
+/// FOLLOW(b1_in_a) = FIRST(']') = ']'
+/// FOLLOW(b2_in_a) = FIRST(')') = ')'
+/// FOLLOW(c_in_b) = FIRST('^') = '^'
+///
+/// Upon erroneous input "[]", the call chain is
+///
+/// a -> b -> c
+///
+/// and, hence, the follow context stack is:
+///
+/// depth  local follow set     after call to rule
+///   0         <EOF>                    a (from main())
+///   1          ']'                     b
+///   3          '^'                     c
+///
+/// Notice that ')' is not included, because b would have to have
+/// been called from a different context in rule a for ')' to be
+/// included.
+///
+/// For error recovery, we cannot consider FOLLOW(c)
+/// (context-sensitive or otherwise).  We need the combined set of
+/// all context-sensitive FOLLOW sets--the set of all tokens that
+/// could follow any reference in the call chain.  We need to
+/// resync to one of those tokens.  Note that FOLLOW(c)='^' and if
+/// we resync'd to that token, we'd consume until EOF.  We need to
+/// sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+/// In this case, for input "[]", LA(1) is in this set so we would
+/// not consume anything and after printing an error rule c would
+/// return normally.  It would not find the required '^' though.
+/// At this point, it gets a mismatched token error and throws an
+/// exception (since LA(1) is not in the viable following token
+/// set).  The rule exception handler tries to recover, but finds
+/// the same recovery set and doesn't consume anything.  Rule b
+/// exits normally returning to rule a.  Now it finds the ']' (and
+/// with the successful match exits errorRecovery mode).
+///
+/// So, you can see that the parser walks up call chain looking
+/// for the token that was a member of the recovery set.
+///
+/// Errors are not generated in errorRecovery mode.
+///
+/// ANTLR's error recovery mechanism is based upon original ideas:
+///
+/// "Algorithms + Data Structures = Programs" by Niklaus Wirth
+///
+/// and
+///
+/// "A note on error recovery in recursive descent parsers":
+/// http://portal.acm.org/citation.cfm?id=947902.947905
+///
+/// Later, Josef Grosch had some good ideas:
+///
+/// "Efficient and Comfortable Error Recovery in Recursive Descent
+/// Parsers":
+/// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+///
+/// Like Grosch I implemented local FOLLOW sets that are combined
+/// at run-time upon error to avoid overhead during parsing.
+///
+static pANTLR3_BITSET		
+computeErrorRecoverySet	    (pANTLR3_BASE_RECOGNIZER recognizer)
+{
+    return   recognizer->combineFollows(recognizer, ANTLR3_FALSE);
+}
+
+/// Compute the context-sensitive FOLLOW set for current rule.
+/// Documentation below is from the Java runtime.
+///
+/// This is the set of token types that can follow a specific rule
+/// reference given a specific call chain.  You get the set of
+/// viable tokens that can possibly come next (look ahead depth 1)
+/// given the current call chain.  Contrast this with the
+/// definition of plain FOLLOW for rule r:
+///
+///  FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
+///
+/// where x in T* and alpha, beta in V*; T is set of terminals and
+/// V is the set of terminals and non terminals.  In other words,
+/// FOLLOW(r) is the set of all tokens that can possibly follow
+/// references to r in///any* sentential form (context).  At
+/// runtime, however, we know precisely which context applies as
+/// we have the call chain.  We may compute the exact (rather
+/// than covering superset) set of following tokens.
+///
+/// For example, consider grammar:
+///
+/// stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
+///      | "return" expr '.'
+///      ;
+/// expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
+/// atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
+///      | '(' expr ')'
+///      ;
+///
+/// The FOLLOW sets are all inclusive whereas context-sensitive
+/// FOLLOW sets are precisely what could follow a rule reference.
+/// For input input "i=(3);", here is the derivation:
+///
+/// stat => ID '=' expr ';'
+///      => ID '=' atom ('+' atom)* ';'
+///      => ID '=' '(' expr ')' ('+' atom)* ';'
+///      => ID '=' '(' atom ')' ('+' atom)* ';'
+///      => ID '=' '(' INT ')' ('+' atom)* ';'
+///      => ID '=' '(' INT ')' ';'
+///
+/// At the "3" token, you'd have a call chain of
+///
+///   stat -> expr -> atom -> expr -> atom
+///
+/// What can follow that specific nested ref to atom?  Exactly ')'
+/// as you can see by looking at the derivation of this specific
+/// input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
+///
+/// You want the exact viable token set when recovering from a
+/// token mismatch.  Upon token mismatch, if LA(1) is member of
+/// the viable next token set, then you know there is most likely
+/// a missing token in the input stream.  "Insert" one by just not
+/// throwing an exception.
+///
+static pANTLR3_BITSET		
+computeCSRuleFollow	    (pANTLR3_BASE_RECOGNIZER recognizer)
+{
+    return   recognizer->combineFollows(recognizer, ANTLR3_FALSE);
+}
+
+/// Compute the current followset for the input stream.
+///
+static pANTLR3_BITSET		
+combineFollows		    (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_BOOLEAN exact)
+{
+    pANTLR3_BITSET	followSet;
+    pANTLR3_BITSET	localFollowSet;
+    ANTLR3_UINT32	top;
+    ANTLR3_UINT32	i;
+
+    top	= recognizer->state->following->size(recognizer->state->following);
+
+    followSet	    = antlr3BitsetNew(0);
+	localFollowSet	= NULL;
+
+    for (i = top; i>0; i--)
+    {
+		localFollowSet = antlr3BitsetLoad((pANTLR3_BITSET_LIST) recognizer->state->following->get(recognizer->state->following, i-1));
+
+		if  (localFollowSet != NULL)
+		{
+			followSet->borInPlace(followSet, localFollowSet);
+
+			if	(exact == ANTLR3_TRUE)
+			{
+				if	(localFollowSet->isMember(localFollowSet, ANTLR3_EOR_TOKEN_TYPE) == ANTLR3_FALSE)
+				{
+					// Only leave EOR in the set if at top (start rule); this lets us know
+					// if we have to include the follow(start rule); I.E., EOF
+					//
+					if	(i>1)
+					{
+						followSet->remove(followSet, ANTLR3_EOR_TOKEN_TYPE);
+					}
+				}
+				else
+				{
+					break;	// Cannot see End Of Rule from here, just drop out
+				}
+			}
+			localFollowSet->free(localFollowSet);
+			localFollowSet = NULL;
+		}
+    }
+
+	if	(localFollowSet != NULL)
+	{
+		localFollowSet->free(localFollowSet);
+	}
+    return  followSet;
+}
+
+/// Standard/Example error display method.
+/// No generic error message display funciton coudl possibly do everything correctly
+/// for all possible parsers. Hence you are provided with this example routine, which
+/// you should override in your parser/tree parser to do as you will.
+///
+/// Here we depart somewhat from the Java runtime as that has now split up a lot
+/// of the error display routines into spearate units. However, ther is little advantage
+/// to this in the C version as you will probably implement all such routines as a 
+/// separate translation unit, rather than install them all as pointers to functions
+/// in the base recognizer.
+///
+static void			
+displayRecognitionError	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_UINT8 * tokenNames)
+{
+	pANTLR3_PARSER			parser;
+	pANTLR3_TREE_PARSER	    tparser;
+	pANTLR3_INT_STREAM	    is;
+	pANTLR3_STRING			ttext;
+	pANTLR3_STRING			ftext;
+	pANTLR3_EXCEPTION	    ex;
+	pANTLR3_COMMON_TOKEN    theToken;
+	pANTLR3_BASE_TREE	    theBaseTree;
+	pANTLR3_COMMON_TREE	    theCommonTree;
+
+	// Retrieve some info for easy reading.
+	//
+	ex	    =		recognizer->state->exception;
+	ttext   =		NULL;
+
+	// See if there is a 'filename' we can use
+	//
+	if	(ex->streamName == NULL)
+	{
+		if	(((pANTLR3_COMMON_TOKEN)(ex->token))->type == ANTLR3_TOKEN_EOF)
+		{
+			ANTLR3_FPRINTF(stderr, "-end of input-(");
+		}
+		else
+		{
+			ANTLR3_FPRINTF(stderr, "-unknown source-(");
+		}
+	}
+	else
+	{
+		ftext = ex->streamName->to8(ex->streamName);
+		ANTLR3_FPRINTF(stderr, "%s(", ftext->chars);
+	}
+
+	// Next comes the line number
+	//
+
+	ANTLR3_FPRINTF(stderr, "%d) ", recognizer->state->exception->line);
+	ANTLR3_FPRINTF(stderr, " : error %d : %s", 
+										recognizer->state->exception->type,
+					(pANTLR3_UINT8)	   (recognizer->state->exception->message));
+
+
+	// How we determine the next piece is dependent on which thing raised the
+	// error.
+	//
+	switch	(recognizer->type)
+	{
+	case	ANTLR3_TYPE_PARSER:
+
+		// Prepare the knowledge we know we have
+		//
+		parser	    = (pANTLR3_PARSER) (recognizer->super);
+		tparser	    = NULL;
+		is			= parser->tstream->istream;
+		theToken    = (pANTLR3_COMMON_TOKEN)(recognizer->state->exception->token);
+		ttext	    = theToken->toString(theToken);
+
+		ANTLR3_FPRINTF(stderr, ", at offset %d", recognizer->state->exception->charPositionInLine);
+		if  (theToken != NULL)
+		{
+			if (theToken->type == ANTLR3_TOKEN_EOF)
+			{
+				ANTLR3_FPRINTF(stderr, ", at <EOF>");
+			}
+			else
+			{
+				// Guard against null text in a token
+				//
+				ANTLR3_FPRINTF(stderr, "\n    near %s\n    ", ttext == NULL ? (pANTLR3_UINT8)"<no text for the token>" : ttext->chars);
+			}
+		}
+		break;
+
+	case	ANTLR3_TYPE_TREE_PARSER:
+
+		tparser		= (pANTLR3_TREE_PARSER) (recognizer->super);
+		parser		= NULL;
+		is			= tparser->ctnstream->tnstream->istream;
+		theBaseTree	= (pANTLR3_BASE_TREE)(recognizer->state->exception->token);
+		ttext		= theBaseTree->toStringTree(theBaseTree);
+
+		if  (theBaseTree != NULL)
+		{
+			theCommonTree	= (pANTLR3_COMMON_TREE)	    theBaseTree->super;
+
+			if	(theCommonTree != NULL)
+			{
+				theToken	= (pANTLR3_COMMON_TOKEN)    theBaseTree->getToken(theBaseTree);
+			}
+			ANTLR3_FPRINTF(stderr, ", at offset %d", theBaseTree->getCharPositionInLine(theBaseTree));
+			ANTLR3_FPRINTF(stderr, ", near %s", ttext->chars);
+		}
+		break;
+
+	default:
+
+		ANTLR3_FPRINTF(stderr, "Base recognizer function displayRecognitionError called by unknown parser type - provide override for this function\n");
+		return;
+		break;
+	}
+
+	// Although this function should generally be provided by the implementation, this one
+	// should be as helpful as possible for grammar developers and serve as an example
+	// of what you can do with each exception type. In general, when you make up your
+	// 'real' handler, you should debug the routine with all possible errors you expect
+	// which will then let you be as specific as possible about all circumstances.
+	//
+	// Note that in the general case, errors thrown by tree parsers indicate a problem
+	// with the output of the parser or with the tree grammar itself. The job of the parser
+	// is to produce a perfect (in traversal terms) syntactically correct tree, so errors
+	// at that stage should really be semantic errors that your own code determines and handles
+	// in whatever way is appropriate.
+	//
+	switch  (ex->type)
+	{
+	case	ANTLR3_UNWANTED_TOKEN_EXCEPTION:
+
+		// Indicates that the recognizer was fed a token which seesm to be
+		// spurious input. We can detect this when the token that follows
+		// this unwanted token would normally be part of the syntactically
+		// correct stream. Then we can see that the token we are looking at
+		// is just something that should not be there and throw this exception.
+		//
+		if	(tokenNames == NULL)
+		{
+			ANTLR3_FPRINTF(stderr, " : Extraneous input...");
+		}
+		else
+		{
+			if	(ex->expecting == ANTLR3_TOKEN_EOF)
+			{
+				ANTLR3_FPRINTF(stderr, " : Extraneous input - expected <EOF>\n");
+			}
+			else
+			{
+				ANTLR3_FPRINTF(stderr, " : Extraneous input - expected %s ...\n", tokenNames[ex->expecting]);
+			}
+		}
+		break;
+
+	case	ANTLR3_MISSING_TOKEN_EXCEPTION:
+
+		// Indicates that the recognizer detected that the token we just
+		// hit would be valid syntactically if preceeded by a particular 
+		// token. Perhaps a missing ';' at line end or a missing ',' in an
+		// expression list, and such like.
+		//
+		if	(tokenNames == NULL)
+		{
+			ANTLR3_FPRINTF(stderr, " : Missing token (%d)...\n", ex->expecting);
+		}
+		else
+		{
+			if	(ex->expecting == ANTLR3_TOKEN_EOF)
+			{
+				ANTLR3_FPRINTF(stderr, " : Missing <EOF>\n");
+			}
+			else
+			{
+				ANTLR3_FPRINTF(stderr, " : Missing %s \n", tokenNames[ex->expecting]);
+			}
+		}
+		break;
+
+	case	ANTLR3_RECOGNITION_EXCEPTION:
+
+		// Indicates that the recognizer received a token
+		// in the input that was not predicted. This is the basic exception type 
+		// from which all others are derived. So we assume it was a syntax error.
+		// You may get this if there are not more tokens and more are needed
+		// to complete a parse for instance.
+		//
+		ANTLR3_FPRINTF(stderr, " : syntax error...\n");    
+		break;
+
+	case    ANTLR3_MISMATCHED_TOKEN_EXCEPTION:
+
+		// We were expecting to see one thing and got another. This is the
+		// most common error if we coudl not detect a missing or unwanted token.
+		// Here you can spend your efforts to
+		// derive more useful error messages based on the expected
+		// token set and the last token and so on. The error following
+		// bitmaps do a good job of reducing the set that we were looking
+		// for down to something small. Knowing what you are parsing may be
+		// able to allow you to be even more specific about an error.
+		//
+		if	(tokenNames == NULL)
+		{
+			ANTLR3_FPRINTF(stderr, " : syntax error...\n");
+		}
+		else
+		{
+			if	(ex->expecting == ANTLR3_TOKEN_EOF)
+			{
+				ANTLR3_FPRINTF(stderr, " : expected <EOF>\n");
+			}
+			else
+			{
+				ANTLR3_FPRINTF(stderr, " : expected %s ...\n", tokenNames[ex->expecting]);
+			}
+		}
+		break;
+
+	case	ANTLR3_NO_VIABLE_ALT_EXCEPTION:
+
+		// We could not pick any alt decision from the input given
+		// so god knows what happened - however when you examine your grammar,
+		// you should. It means that at the point where the current token occurred
+		// that the DFA indicates nowhere to go from here.
+		//
+		ANTLR3_FPRINTF(stderr, " : cannot match to any predicted input...\n");
+
+		break;
+
+	case	ANTLR3_MISMATCHED_SET_EXCEPTION:
+
+		{
+			ANTLR3_UINT32	  count;
+			ANTLR3_UINT32	  bit;
+			ANTLR3_UINT32	  size;
+			ANTLR3_UINT32	  numbits;
+			pANTLR3_BITSET	  errBits;
+
+			// This means we were able to deal with one of a set of
+			// possible tokens at this point, but we did not see any
+			// member of that set.
+			//
+			ANTLR3_FPRINTF(stderr, " : unexpected input...\n  expected one of : ");
+
+			// What tokens could we have accepted at this point in the
+			// parse?
+			//
+			count   = 0;
+			errBits = antlr3BitsetLoad		(ex->expectingSet);
+			numbits = errBits->numBits		(errBits);
+			size    = errBits->size			(errBits);
+
+			if  (size > 0)
+			{
+				// However many tokens we could have dealt with here, it is usually
+				// not useful to print ALL of the set here. I arbitrarily chose 8
+				// here, but you should do whatever makes sense for you of course.
+				// No token number 0, so look for bit 1 and on.
+				//
+				for	(bit = 1; bit < numbits && count < 8 && count < size; bit++)
+				{
+					// TODO: This doesn;t look right - should be asking if the bit is set!!
+					//
+					if  (tokenNames[bit])
+					{
+						ANTLR3_FPRINTF(stderr, "%s%s", count > 0 ? ", " : "", tokenNames[bit]); 
+						count++;
+					}
+				}
+				ANTLR3_FPRINTF(stderr, "\n");
+			}
+			else
+			{
+				ANTLR3_FPRINTF(stderr, "Actually dude, we didn't seem to be expecting anything here, or at least\n");
+				ANTLR3_FPRINTF(stderr, "I could not work out what I was expecting, like so many of us these days!\n");
+			}
+		}
+		break;
+
+	case	ANTLR3_EARLY_EXIT_EXCEPTION:
+
+		// We entered a loop requiring a number of token sequences
+		// but found a token that ended that sequence earlier than
+		// we should have done.
+		//
+		ANTLR3_FPRINTF(stderr, " : missing elements...\n");
+		break;
+
+	default:
+
+		// We don't handle any other exceptions here, but you can
+		// if you wish. If we get an exception that hits this point
+		// then we are just going to report what we know about the
+		// token.
+		//
+		ANTLR3_FPRINTF(stderr, " : syntax not recognized...\n");
+		break;
+	}
+
+	// Here you have the token that was in error which if this is
+	// the standard implementation will tell you the line and offset
+	// and also record the address of the start of the line in the
+	// input stream. You could therefore print the source line and so on.
+	// Generally though, I would expect that your lexer/parser will keep
+	// its own map of lines and source pointers or whatever as there
+	// are a lot of specific things you need to know about the input
+	// to do something like that.
+	// Here is where you do it though :-).
+	//
+}
+
+/// Return how many syntax errors were detected by this recognizer
+///
+static ANTLR3_UINT32
+getNumberOfSyntaxErrors(pANTLR3_BASE_RECOGNIZER recognizer)
+{
+	return	recognizer->state->errorCount;
+}
+
+/// Recover from an error found on the input stream.  Mostly this is
+/// NoViableAlt exceptions, but could be a mismatched token that
+/// the match() routine could not recover from.
+///
+static void			
+recover			    (pANTLR3_BASE_RECOGNIZER recognizer)
+{
+    // Used to compute the follow set of tokens
+    //
+    pANTLR3_BITSET			followSet;
+    pANTLR3_PARSER			parser;
+    pANTLR3_TREE_PARSER	    tparser;
+    pANTLR3_INT_STREAM	    is;
+
+    switch	(recognizer->type)
+    {
+		case	ANTLR3_TYPE_PARSER:
+
+		parser  = (pANTLR3_PARSER) (recognizer->super);
+		tparser	= NULL;
+		is		= parser->tstream->istream;
+
+	break;
+
+    case	ANTLR3_TYPE_TREE_PARSER:
+
+		tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+		parser	= NULL;
+		is		= tparser->ctnstream->tnstream->istream;
+
+	break;
+
+    default:
+	    
+		ANTLR3_FPRINTF(stderr, "Base recognizer function recover called by unknown parser type - provide override for this function\n");
+		return;
+
+	break;
+    }
+
+	// Are we about to repeat the same error?
+	//
+    if	(recognizer->state->lastErrorIndex == is->index(is))
+    {
+		// The last error was at the same token index point. This must be a case
+		// where LT(1) is in the recovery token set so nothing is
+		// consumed. Consume a single token so at least to prevent
+		// an infinite loop; this is a failsafe.
+		//
+		is->consume(is);
+    }
+
+    // Record error index position
+    //
+    recognizer->state->lastErrorIndex	 = is->index(is);
+    
+    // Work out the follows set for error recovery
+    //
+    followSet	= recognizer->computeErrorRecoverySet(recognizer);
+
+    // Call resync hook (for debuggers and so on)
+    //
+    recognizer->beginResync(recognizer);
+
+    // Consume tokens until we have resynced to something in the follows set
+    //
+    recognizer->consumeUntilSet(recognizer, followSet);
+
+    // End resync hook 
+    //
+    recognizer->endResync(recognizer);
+
+    // Destroy the temporary bitset we produced.
+    //
+    followSet->free(followSet);
+
+    // Reset the inError flag so we don't re-report the exception
+    //
+    recognizer->state->error	= ANTLR3_FALSE;
+    recognizer->state->failed	= ANTLR3_FALSE;
+}
+
+
+/// Attempt to recover from a single missing or extra token.
+///
+/// EXTRA TOKEN
+///
+/// LA(1) is not what we are looking for.  If LA(2) has the right token,
+/// however, then assume LA(1) is some extra spurious token.  Delete it
+/// and LA(2) as if we were doing a normal match(), which advances the
+/// input.
+///
+/// MISSING TOKEN
+///
+/// If current token is consistent with what could come after
+/// ttype then it is ok to "insert" the missing token, else throw
+/// exception For example, Input "i=(3;" is clearly missing the
+/// ')'.  When the parser returns from the nested call to expr, it
+/// will have call chain:
+///
+///    stat -> expr -> atom
+///
+/// and it will be trying to match the ')' at this point in the
+/// derivation:
+///
+///       => ID '=' '(' INT ')' ('+' atom)* ';'
+///                          ^
+/// match() will see that ';' doesn't match ')' and report a
+/// mismatched token error.  To recover, it sees that LA(1)==';'
+/// is in the set of tokens that can follow the ')' token
+/// reference in rule atom.  It can assume that you forgot the ')'.
+///
+/// The exception that was passed in, in the java implementation is
+/// sorted in the recognizer exception stack in the C version. To 'throw' it we set the
+/// error flag and rules cascade back when this is set.
+///
+static void *	
+recoverFromMismatchedToken  (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow)
+{
+	pANTLR3_PARSER			  parser;
+	pANTLR3_TREE_PARSER	      tparser;
+	pANTLR3_INT_STREAM	      is;
+	void					* matchedSymbol;
+
+
+
+	switch	(recognizer->type)
+	{
+	case	ANTLR3_TYPE_PARSER:
+
+		parser  = (pANTLR3_PARSER) (recognizer->super);
+		tparser	= NULL;
+		is	= parser->tstream->istream;
+
+		break;
+
+	case	ANTLR3_TYPE_TREE_PARSER:
+
+		tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+		parser	= NULL;
+		is	= tparser->ctnstream->tnstream->istream;
+
+		break;
+
+	default:
+
+		ANTLR3_FPRINTF(stderr, "Base recognizer function recoverFromMismatchedToken called by unknown parser type - provide override for this function\n");
+		return NULL;
+
+		break;
+	}
+
+	// Create an exception if we need one
+	//
+	if	(recognizer->state->exception == NULL)
+	{
+		antlr3RecognitionExceptionNew(recognizer);
+	}
+
+	// If the next token after the one we are looking at in the input stream
+	// is what we are looking for then we remove the one we have discovered
+	// from the stream by consuming it, then consume this next one along too as
+	// if nothing had happened.
+	//
+	if	( recognizer->mismatchIsUnwantedToken(recognizer, is, ttype) == ANTLR3_TRUE)
+	{
+		recognizer->state->exception->type		= ANTLR3_UNWANTED_TOKEN_EXCEPTION;
+		recognizer->state->exception->message	= ANTLR3_UNWANTED_TOKEN_EXCEPTION_NAME;
+
+		// Call resync hook (for debuggers and so on)
+		//
+		if	(recognizer->debugger != NULL)
+		{
+			recognizer->debugger->beginResync(recognizer->debugger);
+		}
+
+		// "delete" the extra token
+		//
+		recognizer->beginResync(recognizer);
+		is->consume(is);
+		recognizer->endResync(recognizer);
+		// End resync hook 
+		//
+		if	(recognizer->debugger != NULL)
+		{
+			recognizer->debugger->endResync(recognizer->debugger);
+		}
+
+		// Print out the error after we consume so that ANTLRWorks sees the
+		// token in the exception.
+		//
+		recognizer->reportError(recognizer);
+
+		// Return the token we are actually matching
+		//
+		matchedSymbol = recognizer->getCurrentInputSymbol(recognizer, is);
+
+		// Consume the token that the rule actually expected to get as if everything
+		// was hunky dory.
+		//
+		is->consume(is);
+
+		recognizer->state->error  = ANTLR3_FALSE;	// Exception is not outstanding any more
+
+		return	matchedSymbol;
+	}
+
+	// Single token deletion (Unwanted above) did not work
+	// so we see if we can insert a token instead by calculating which
+	// token would be missing
+	//
+	if	(mismatchIsMissingToken(recognizer, is, follow))
+	{
+		// We can fake the missing token and proceed
+		//
+		matchedSymbol = recognizer->getMissingSymbol(recognizer, is, recognizer->state->exception, ttype, follow);
+		recognizer->state->exception->type		= ANTLR3_MISSING_TOKEN_EXCEPTION;
+		recognizer->state->exception->message	= ANTLR3_MISSING_TOKEN_EXCEPTION_NAME;
+		recognizer->state->exception->token		= matchedSymbol;
+		recognizer->state->exception->expecting	= ttype;
+
+		// Print out the error after we insert so that ANTLRWorks sees the
+		// token in the exception.
+		//
+		recognizer->reportError(recognizer);
+
+		recognizer->state->error  = ANTLR3_FALSE;	// Exception is not outstanding any more
+
+		return	matchedSymbol;
+	}
+
+
+	// Neither deleting nor inserting tokens allows recovery
+	// must just report the exception.
+	//
+	recognizer->state->error	    = ANTLR3_TRUE;
+	return NULL;
+}
+
+static void *
+recoverFromMismatchedSet	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET_LIST follow)
+{
+    pANTLR3_PARSER			parser;
+    pANTLR3_TREE_PARSER	    tparser;
+    pANTLR3_INT_STREAM	    is;
+	pANTLR3_COMMON_TOKEN	matchedSymbol;
+
+    switch	(recognizer->type)
+    {
+    case	ANTLR3_TYPE_PARSER:
+
+		parser  = (pANTLR3_PARSER) (recognizer->super);
+		tparser	= NULL;
+		is	= parser->tstream->istream;
+
+	break;
+
+    case	ANTLR3_TYPE_TREE_PARSER:
+
+		tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+		parser	= NULL;
+		is	= tparser->ctnstream->tnstream->istream;
+
+	break;
+
+    default:
+	    
+		ANTLR3_FPRINTF(stderr, "Base recognizer function recoverFromMismatchedSet called by unknown parser type - provide override for this function\n");
+		return NULL;
+
+	break;
+    }
+
+	if	(recognizer->mismatchIsMissingToken(recognizer, is, follow) == ANTLR3_TRUE)
+	{
+		// We can fake the missing token and proceed
+		//
+		matchedSymbol = (pANTLR3_COMMON_TOKEN)recognizer->getMissingSymbol(recognizer, is, recognizer->state->exception, ANTLR3_TOKEN_INVALID, follow);
+		recognizer->state->exception->type	= ANTLR3_MISSING_TOKEN_EXCEPTION;
+		recognizer->state->exception->token	= matchedSymbol;
+
+		// Print out the error after we insert so that ANTLRWorks sees the
+		// token in the exception.
+		//
+		recognizer->reportError(recognizer);
+
+		recognizer->state->error  = ANTLR3_FALSE;	// Exception is not outstanding any more
+
+		return	matchedSymbol;
+	}
+
+    // TODO - Single token deletion like in recoverFromMismatchedToken()
+    //
+    recognizer->state->error	= ANTLR3_TRUE;
+	recognizer->state->failed	= ANTLR3_TRUE;
+	return NULL;
+}
+
+/// This code is factored out from mismatched token and mismatched set
+///  recovery.  It handles "single token insertion" error recovery for
+/// both.  No tokens are consumed to recover from insertions.  Return
+/// true if recovery was possible else return false.
+///
+static ANTLR3_BOOLEAN	
+recoverFromMismatchedElement	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET_LIST followBits)
+{
+    pANTLR3_BITSET	    viableToksFollowingRule;
+    pANTLR3_BITSET	    follow;
+    pANTLR3_PARSER	    parser;
+    pANTLR3_TREE_PARSER	    tparser;
+    pANTLR3_INT_STREAM	    is;
+
+    switch	(recognizer->type)
+    {
+    case	ANTLR3_TYPE_PARSER:
+
+		parser  = (pANTLR3_PARSER) (recognizer->super);
+		tparser	= NULL;
+		is	= parser->tstream->istream;
+
+	break;
+
+    case	ANTLR3_TYPE_TREE_PARSER:
+
+		tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+		parser	= NULL;
+		is	= tparser->ctnstream->tnstream->istream;
+
+	break;
+
+    default:
+	    
+		ANTLR3_FPRINTF(stderr, "Base recognizer function recover called by unknown parser type - provide override for this function\n");
+		return ANTLR3_FALSE;
+
+	break;
+    }
+
+    follow	= antlr3BitsetLoad(followBits);
+
+    if	(follow == NULL)
+    {
+		/* The follow set is NULL, which means we don't know what can come 
+		 * next, so we "hit and hope" by just signifying that we cannot
+		 * recover, which will just cause the next token to be consumed,
+		 * which might dig us out.
+		 */
+		return	ANTLR3_FALSE;
+    }
+
+    /* We have a bitmap for the follow set, hence we can compute 
+     * what can follow this grammar element reference.
+     */
+    if	(follow->isMember(follow, ANTLR3_EOR_TOKEN_TYPE) == ANTLR3_TRUE)
+    {
+		/* First we need to know which of the available tokens are viable
+		 * to follow this reference.
+		 */
+		viableToksFollowingRule	= recognizer->computeCSRuleFollow(recognizer);
+
+		/* Remove the EOR token, which we do not wish to compute with
+		 */
+		follow->remove(follow, ANTLR3_EOR_TOKEN_TYPE);
+		viableToksFollowingRule->free(viableToksFollowingRule);
+		/* We now have the computed set of what can follow the current token
+		 */
+    }
+
+    /* We can now see if the current token works with the set of tokens
+     * that could follow the current grammar reference. If it looks like it
+     * is consistent, then we can "insert" that token by not throwing
+     * an exception and assuming that we saw it. 
+     */
+    if	( follow->isMember(follow, is->_LA(is, 1)) == ANTLR3_TRUE)
+    {
+		/* report the error, but don't cause any rules to abort and stuff
+		 */
+		recognizer->reportError(recognizer);
+		if	(follow != NULL)
+		{
+			follow->free(follow);
+		}
+		recognizer->state->error			= ANTLR3_FALSE;
+		recognizer->state->failed			= ANTLR3_FALSE;
+		return ANTLR3_TRUE;	/* Success in recovery	*/
+    }
+
+    if	(follow != NULL)
+    {
+		follow->free(follow);
+    }
+
+    /* We could not find anything viable to do, so this is going to 
+     * cause an exception.
+     */
+    return  ANTLR3_FALSE;
+}
+
+/// Eat tokens from the input stream until we get one of JUST the right type
+///
+static void		
+consumeUntil	(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 tokenType)
+{
+    ANTLR3_UINT32			ttype;
+    pANTLR3_PARSER			parser;
+    pANTLR3_TREE_PARSER	    tparser;
+    pANTLR3_INT_STREAM	    is;
+
+    switch	(recognizer->type)
+    {
+		case	ANTLR3_TYPE_PARSER:
+
+			parser  = (pANTLR3_PARSER) (recognizer->super);
+			tparser	= NULL;
+			is	= parser->tstream->istream;
+
+			break;
+
+		case	ANTLR3_TYPE_TREE_PARSER:
+
+			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+			parser	= NULL;
+			is	= tparser->ctnstream->tnstream->istream;
+
+			break;
+
+		default:
+		    
+			ANTLR3_FPRINTF(stderr, "Base recognizer function 'consumeUntil' called by unknown parser type - provide override for this function\n");
+			return;
+
+			break;
+    }
+
+    // What do have at the moment?
+    //
+    ttype	= is->_LA(is, 1);
+
+    // Start eating tokens until we get to the one we want.
+    //
+    while   (ttype != ANTLR3_TOKEN_EOF && ttype != tokenType)
+    {
+		is->consume(is);
+		ttype	= is->_LA(is, 1);
+    }
+}
+
+/// Eat tokens from the input stream until we find one that
+/// belongs to the supplied set.
+///
+static void		
+consumeUntilSet			    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_BITSET set)
+{
+    ANTLR3_UINT32	    ttype;
+    pANTLR3_PARSER	    parser;
+    pANTLR3_TREE_PARSER	    tparser;
+    pANTLR3_INT_STREAM	    is;
+
+    switch	(recognizer->type)
+    {
+		case	ANTLR3_TYPE_PARSER:
+
+			parser  = (pANTLR3_PARSER) (recognizer->super);
+			tparser	= NULL;
+			is	= parser->tstream->istream;
+
+			break;
+
+		case	ANTLR3_TYPE_TREE_PARSER:
+
+			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+			parser	= NULL;
+			is	= tparser->ctnstream->tnstream->istream;
+
+			break;
+
+		default:
+		    
+			ANTLR3_FPRINTF(stderr, "Base recognizer function 'consumeUntilSet' called by unknown parser type - provide override for this function\n");
+			return;
+
+			break;
+    }
+
+    // What do have at the moment?
+    //
+    ttype	= is->_LA(is, 1);
+
+    // Start eating tokens until we get to one we want.
+    //
+    while   (ttype != ANTLR3_TOKEN_EOF && set->isMember(set, ttype) == ANTLR3_FALSE)
+    {
+		is->consume(is);
+		ttype	= is->_LA(is, 1);
+    }
+}
+
+/** Return the rule invocation stack (how we got here in the parse.
+ *  In the java version Ter just asks the JVM for all the information
+ *  but in C we don't get this information, so I am going to do nothing 
+ *  right now.
+ */
+static pANTLR3_STACK	
+getRuleInvocationStack		    (pANTLR3_BASE_RECOGNIZER recognizer)
+{
+    return NULL;
+}
+
+static pANTLR3_STACK	
+getRuleInvocationStackNamed	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_UINT8 name)
+{
+    return NULL;
+}
+
+/** Convenience method for template rewrites - NYI.
+ */
+static pANTLR3_HASH_TABLE	
+toStrings			    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_HASH_TABLE tokens)
+{
+    return NULL;
+}
+
+static	void ANTLR3_CDECL
+freeIntTrie    (void * trie)
+{
+    ((pANTLR3_INT_TRIE)trie)->free((pANTLR3_INT_TRIE)trie);
+}
+
+
+/** Pointer to a function to return whether the rule has parsed input starting at the supplied 
+ *  start index before. If the rule has not parsed input starting from the supplied start index,
+ *  then it will return ANTLR3_MEMO_RULE_UNKNOWN. If it has parsed from the suppled start point
+ *  then it will return the point where it last stopped parsing after that start point.
+ *
+ * \remark
+ * The rule memos are an ANTLR3_LIST of ANTLR3_LISTS, however if this becomes any kind of performance
+ * issue (it probably won't, the hash tables are pretty quick) then we could make a special int only
+ * version of the table.
+ */
+static ANTLR3_MARKER	
+getRuleMemoization		    (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_INTKEY ruleIndex, ANTLR3_MARKER ruleParseStart)
+{
+    /* The rule memos are an ANTLR3_LIST of ANTLR3_LIST.
+     */
+    pANTLR3_INT_TRIE	ruleList;
+    ANTLR3_MARKER	stopIndex;
+    pANTLR3_TRIE_ENTRY	entry;
+
+    /* See if we have a list in the ruleMemos for this rule, and if not, then create one
+     * as we will need it eventually if we are being asked for the memo here.
+     */
+    entry	= recognizer->state->ruleMemo->get(recognizer->state->ruleMemo, (ANTLR3_INTKEY)ruleIndex);
+
+    if	(entry == NULL)
+    {
+		/* Did not find it, so create a new one for it, with a bit depth based on the 
+		 * size of the input stream. We need the bit depth to incorporate the number if
+		 * bits required to represent the largest possible stop index in the input, which is the
+		 * last character. An int stream is free to return the largest 64 bit offset if it has
+		 * no idea of the size, but you should remember that this will cause the leftmost
+		 * bit match algorithm to run to 63 bits, which will be the whole time spent in the trie ;-)
+		 */
+		ruleList    = antlr3IntTrieNew(63);	/* Depth is theoretically 64 bits, but probably not ;-)	*/
+
+		if (ruleList != NULL)
+		{
+			recognizer->state->ruleMemo->add(recognizer->state->ruleMemo, (ANTLR3_INTKEY)ruleIndex, ANTLR3_HASH_TYPE_STR, 0, ANTLR3_FUNC_PTR(ruleList), freeIntTrie);
+		}
+
+		/* We cannot have a stopIndex in a trie we have just created of course
+		 */
+		return	MEMO_RULE_UNKNOWN;
+    }
+
+    ruleList	= (pANTLR3_INT_TRIE) (entry->data.ptr);
+
+    /* See if there is a stop index associated with the supplied start index.
+     */
+    stopIndex	= 0;
+
+    entry = ruleList->get(ruleList, ruleParseStart);
+    if (entry != NULL)
+    {
+		stopIndex = (ANTLR3_MARKER)(entry->data.intVal);
+    }
+
+    if	(stopIndex == 0)
+    {
+		return MEMO_RULE_UNKNOWN;
+    }
+
+    return  stopIndex;
+}
+
+/** Has this rule already parsed input at the current index in the
+ *  input stream?  Return ANTLR3_TRUE if we have and ANTLR3_FALSE
+ *  if we have not.
+ *
+ *  This method has a side-effect: if we have seen this input for
+ *  this rule and successfully parsed before, then seek ahead to
+ *  1 past the stop token matched for this rule last time.
+ */
+static ANTLR3_BOOLEAN	
+alreadyParsedRule		    (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_MARKER ruleIndex)
+{
+    ANTLR3_MARKER			stopIndex;
+    pANTLR3_LEXER			lexer;
+    pANTLR3_PARSER			parser;
+    pANTLR3_TREE_PARSER	    tparser;
+    pANTLR3_INT_STREAM	    is;
+
+    switch	(recognizer->type)
+    {
+		case	ANTLR3_TYPE_PARSER:
+
+			parser  = (pANTLR3_PARSER) (recognizer->super);
+			tparser	= NULL;
+			lexer	= NULL;
+			is	= parser->tstream->istream;
+
+			break;
+
+		case	ANTLR3_TYPE_TREE_PARSER:
+
+			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+			parser	= NULL;
+			lexer	= NULL;
+			is	= tparser->ctnstream->tnstream->istream;
+
+			break;
+
+		case	ANTLR3_TYPE_LEXER:
+
+			lexer	= (pANTLR3_LEXER)   (recognizer->super);
+			parser	= NULL;
+			tparser	= NULL;
+			is	= lexer->input->istream;
+			break;
+
+		default:
+		    
+			ANTLR3_FPRINTF(stderr, "Base recognizer function 'alreadyParsedRule' called by unknown parser type - provide override for this function\n");
+			return ANTLR3_FALSE;
+
+			break;
+    }
+
+    /* See if we have a memo marker for this.
+     */
+    stopIndex	    = recognizer->getRuleMemoization(recognizer, ruleIndex, is->index(is));
+
+    if	(stopIndex  == MEMO_RULE_UNKNOWN)
+    {
+		return ANTLR3_FALSE;
+    }
+
+    if	(stopIndex == MEMO_RULE_FAILED)
+    {
+		recognizer->state->failed = ANTLR3_TRUE;
+    }
+    else
+    {
+		is->seek(is, stopIndex+1);
+    }
+
+    /* If here then the rule was executed for this input already
+     */
+    return  ANTLR3_TRUE;
+}
+
+/** Record whether or not this rule parsed the input at this position
+ *  successfully.
+ */
+static void		
+memoize	(pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_MARKER ruleIndex, ANTLR3_MARKER ruleParseStart)
+{
+    /* The rule memos are an ANTLR3_LIST of ANTLR3_LIST.
+     */
+    pANTLR3_INT_TRIE	    ruleList;
+    pANTLR3_TRIE_ENTRY	    entry;
+    ANTLR3_MARKER	    stopIndex;
+    pANTLR3_LEXER	    lexer;
+    pANTLR3_PARSER	    parser;
+    pANTLR3_TREE_PARSER	    tparser;
+    pANTLR3_INT_STREAM	    is;
+
+    switch	(recognizer->type)
+    {
+		case	ANTLR3_TYPE_PARSER:
+
+			parser  = (pANTLR3_PARSER) (recognizer->super);
+			tparser	= NULL;
+			is	= parser->tstream->istream;
+
+			break;
+
+		case	ANTLR3_TYPE_TREE_PARSER:
+
+			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+			parser	= NULL;
+			is	= tparser->ctnstream->tnstream->istream;
+
+			break;
+
+		case	ANTLR3_TYPE_LEXER:
+
+			lexer	= (pANTLR3_LEXER)   (recognizer->super);
+			parser	= NULL;
+			tparser	= NULL;
+			is		= lexer->input->istream;
+			break;
+
+		default:
+		    
+			ANTLR3_FPRINTF(stderr, "Base recognizer function consumeUntilSet called by unknown parser type - provide override for this function\n");
+			return;
+
+			break;
+    }
+    
+    stopIndex	= recognizer->state->failed == ANTLR3_TRUE ? MEMO_RULE_FAILED : is->index(is) - 1;
+
+    entry	= recognizer->state->ruleMemo->get(recognizer->state->ruleMemo, (ANTLR3_INTKEY)ruleIndex);
+
+    if	(entry != NULL)
+    {
+		ruleList = (pANTLR3_INT_TRIE)(entry->data.ptr);
+
+		/* If we don't already have this entry, append it. The memoize trie does not
+		 * accept duplicates so it won't add it if already there and we just ignore the
+		 * return code as we don't care if it is there already.
+		 */
+		ruleList->add(ruleList, ruleParseStart, ANTLR3_HASH_TYPE_INT, stopIndex, NULL, NULL);
+    }
+}
+/** A syntactic predicate.  Returns true/false depending on whether
+ *  the specified grammar fragment matches the current input stream.
+ *  This resets the failed instance var afterwards.
+ */
+static ANTLR3_BOOLEAN	
+synpred	(pANTLR3_BASE_RECOGNIZER recognizer, void * ctx, void (*predicate)(void * ctx))
+{
+    ANTLR3_MARKER   start;
+    pANTLR3_PARSER	    parser;
+    pANTLR3_TREE_PARSER	    tparser;
+    pANTLR3_INT_STREAM	    is;
+
+    switch	(recognizer->type)
+    {
+		case	ANTLR3_TYPE_PARSER:
+
+			parser  = (pANTLR3_PARSER) (recognizer->super);
+			tparser	= NULL;
+			is	= parser->tstream->istream;
+
+			break;
+
+		case	ANTLR3_TYPE_TREE_PARSER:
+
+			tparser = (pANTLR3_TREE_PARSER) (recognizer->super);
+			parser	= NULL;
+			is	= tparser->ctnstream->tnstream->istream;
+
+			break;
+
+		default:
+		    
+			ANTLR3_FPRINTF(stderr, "Base recognizer function 'synPred' called by unknown parser type - provide override for this function\n");
+			return ANTLR3_FALSE;
+
+			break;
+    }
+
+    /* Begin backtracking so we can get back to where we started after trying out
+     * the syntactic predicate.
+     */
+    start   = is->mark(is);
+    recognizer->state->backtracking++;
+
+    /* Try the syntactical predicate
+     */
+    predicate(ctx);
+
+    /* Reset
+     */
+    is->rewind(is, start);
+    recognizer->state->backtracking--;
+
+    if	(recognizer->state->failed == ANTLR3_TRUE)
+    {
+		/* Predicate failed
+		 */
+		recognizer->state->failed = ANTLR3_FALSE;
+		return	ANTLR3_FALSE;
+    }
+    else
+    {
+		/* Predicate was successful
+		 */
+		recognizer->state->failed	= ANTLR3_FALSE;
+		return	ANTLR3_TRUE;
+    }
+}
+
+static void
+reset(pANTLR3_BASE_RECOGNIZER recognizer)
+{
+    if	(recognizer->state->following != NULL)
+    {
+		recognizer->state->following->free(recognizer->state->following);
+    }
+
+	// Reset the state flags
+	//
+	recognizer->state->errorRecovery	= ANTLR3_FALSE;
+	recognizer->state->lastErrorIndex	= -1;
+	recognizer->state->failed			= ANTLR3_FALSE;
+	recognizer->state->errorCount		= 0;
+	recognizer->state->backtracking		= 0;
+	recognizer->state->following		= NULL;
+
+	if	(recognizer->state != NULL)
+	{
+		if	(recognizer->state->ruleMemo != NULL)
+		{
+			recognizer->state->ruleMemo->free(recognizer->state->ruleMemo);
+			recognizer->state->ruleMemo = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */
+		}
+	}
+	
+  // ml: 2013-11-05, added reset of old exceptions.
+  pANTLR3_EXCEPTION thisE = recognizer->state->exception;
+  if	(thisE != NULL)
+  {
+    thisE->freeEx(thisE);
+    recognizer->state->exception = NULL;
+  }
+
+    // Install a new following set
+    //
+    recognizer->state->following   = antlr3StackNew(8);
+
+}
+
+// Default implementation is for parser and assumes a token stream as supplied by the runtime.
+// You MAY need override this function if the standard TOKEN_STREAM is not what you are using.
+//
+static void *				
+getCurrentInputSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream)
+{
+	return ((pANTLR3_TOKEN_STREAM)istream->super)->_LT((pANTLR3_TOKEN_STREAM)istream->super, 1);
+}
+
+// Default implementation is for parser and assumes a token stream as supplied by the runtime.
+// You MAY need override this function if the standard COMMON_TOKEN_STREAM is not what you are using.
+//
+static void *				
+getMissingSymbol			(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
+									ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow)
+{
+	pANTLR3_TOKEN_STREAM			ts;
+	pANTLR3_COMMON_TOKEN_STREAM		cts;
+	pANTLR3_COMMON_TOKEN			token;
+	pANTLR3_COMMON_TOKEN			current;
+	pANTLR3_STRING					text;
+
+	// Dereference the standard pointers
+	//
+	ts		= (pANTLR3_TOKEN_STREAM)istream->super;
+	cts		= (pANTLR3_COMMON_TOKEN_STREAM)ts->super;
+	
+	// Work out what to use as the current symbol to make a line and offset etc
+	// If we are at EOF, we use the token before EOF
+	//
+	current	= ts->_LT(ts, 1);
+	if	(current->getType(current) == ANTLR3_TOKEN_EOF)
+	{
+		current = ts->_LT(ts, -1);
+	}
+
+	// Create a new empty token
+	//
+	if	(recognizer->state->tokFactory == NULL)
+	{
+		// We don't yet have a token factory for making tokens
+		// we just need a fake one using the input stream of the current
+		// token.
+		//
+		recognizer->state->tokFactory = antlr3TokenFactoryNew(current->input);
+	}
+	token	= recognizer->state->tokFactory->newToken(recognizer->state->tokFactory);
+	if (token == NULL) { return NULL; }
+
+	// Set some of the token properties based on the current token
+	//
+	token->setLine					(token, current->getLine(current));
+	token->setCharPositionInLine	(token, current->getCharPositionInLine(current));
+	token->setChannel				(token, ANTLR3_TOKEN_DEFAULT_CHANNEL);
+	token->setType					(token, expectedTokenType);
+    token->user1                    = current->user1;
+    token->user2                    = current->user2;
+    token->user3                    = current->user3;
+    token->custom                   = current->custom;
+    token->lineStart                = current->lineStart;
+    
+	// Create the token text that shows it has been inserted
+	//
+	token->setText8(token, (pANTLR3_UINT8)"<missing ");
+	text = token->getText(token);
+
+	if	(text != NULL)
+	{
+		text->append8(text, (const char *)recognizer->state->tokenNames[expectedTokenType]);
+		text->append8(text, (const char *)">");
+	}
+	
+	// Finally return the pointer to our new token
+	//
+	return	token;
+}
+
+
+#ifdef	ANTLR3_WINDOWS
+#pragma warning( default : 4100 )
+#endif
+
+/// @}
+///
+
diff --git a/runtime/C/src/antlr3basetree.c b/runtime/C/src/antlr3basetree.c
new file mode 100644
index 0000000..f191f6f
--- /dev/null
+++ b/runtime/C/src/antlr3basetree.c
@@ -0,0 +1,493 @@
+#include    <antlr3basetree.h>
+
+#ifdef	ANTLR3_WINDOWS
+#pragma warning( disable : 4100 )
+#endif
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+static void				*	getChild			(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i);
+static ANTLR3_UINT32		getChildCount		(pANTLR3_BASE_TREE tree);
+static ANTLR3_UINT32		getCharPositionInLine
+(pANTLR3_BASE_TREE tree);
+static ANTLR3_UINT32		getLine				(pANTLR3_BASE_TREE tree);
+static pANTLR3_BASE_TREE    
+getFirstChildWithType
+(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 type);
+static void					addChild			(pANTLR3_BASE_TREE tree, pANTLR3_BASE_TREE child);
+static void					addChildren			(pANTLR3_BASE_TREE tree, pANTLR3_LIST kids);
+static void					replaceChildren		(pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE t);
+
+static	void				freshenPACIndexesAll(pANTLR3_BASE_TREE tree);
+static	void				freshenPACIndexes	(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 offset);
+
+static void					setChild			(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i, void * child);
+static void				*	deleteChild			(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i);
+static void				*	dupTree				(pANTLR3_BASE_TREE tree);
+static pANTLR3_STRING		toStringTree		(pANTLR3_BASE_TREE tree);
+
+
+ANTLR3_API pANTLR3_BASE_TREE
+antlr3BaseTreeNew(pANTLR3_BASE_TREE  tree)
+{
+	/* api */
+	tree->getChild				= getChild;
+	tree->getChildCount			= getChildCount;
+	tree->addChild				= (void (*)(pANTLR3_BASE_TREE, void *))(addChild);
+	tree->addChildren			= addChildren;
+	tree->setChild				= setChild;
+	tree->deleteChild			= deleteChild;
+	tree->dupTree				= dupTree;
+	tree->toStringTree			= toStringTree;
+	tree->getCharPositionInLine	= getCharPositionInLine;
+	tree->getLine				= getLine;
+	tree->replaceChildren		= replaceChildren;
+	tree->freshenPACIndexesAll	= freshenPACIndexesAll;
+	tree->freshenPACIndexes		= freshenPACIndexes;
+	tree->getFirstChildWithType	= (void *(*)(pANTLR3_BASE_TREE, ANTLR3_UINT32))(getFirstChildWithType);
+	tree->children				= NULL;
+	tree->strFactory			= NULL;
+
+	/* Rest must be filled in by caller.
+	*/
+	return  tree;
+}
+
+static ANTLR3_UINT32	
+getCharPositionInLine	(pANTLR3_BASE_TREE tree)
+{
+	return  0;
+}
+
+static ANTLR3_UINT32	
+getLine	(pANTLR3_BASE_TREE tree)
+{
+	return  0;
+}
+static pANTLR3_BASE_TREE
+getFirstChildWithType	(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 type)
+{
+	ANTLR3_UINT32   i;
+	ANTLR3_UINT32   cs;
+
+	pANTLR3_BASE_TREE	t;
+	if	(tree->children != NULL)
+	{
+		cs	= tree->children->size(tree->children);
+		for	(i = 0; i < cs; i++)
+		{
+			t = (pANTLR3_BASE_TREE) (tree->children->get(tree->children, i));
+			if  (tree->getType(t) == type)
+			{
+				return  (pANTLR3_BASE_TREE)t;
+			}
+		}
+	}
+	return  NULL;
+}
+
+
+
+static void    *
+getChild		(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i)
+{
+	if	(      tree->children == NULL
+		|| i >= tree->children->size(tree->children))
+	{
+		return NULL;
+	}
+	return  tree->children->get(tree->children, i);
+}
+
+
+static ANTLR3_UINT32
+getChildCount	(pANTLR3_BASE_TREE tree)
+{
+	if	(tree->children == NULL)
+	{
+		return 0;
+	}
+	else
+	{
+		return	tree->children->size(tree->children);
+	}
+}
+
+void	    
+addChild (pANTLR3_BASE_TREE tree, pANTLR3_BASE_TREE child)
+{
+	ANTLR3_UINT32   n;
+	ANTLR3_UINT32   i;
+
+	if	(child == NULL)
+	{
+		return;
+	}
+
+	if	(child->isNilNode(child) == ANTLR3_TRUE)
+	{
+		if  (child->children != NULL && child->children == tree->children)
+		{
+			// TODO: Change to exception rather than ANTLR3_FPRINTF?
+			//
+			ANTLR3_FPRINTF(stderr, "ANTLR3: An attempt was made to add a child list to itself!\n");
+			return;
+		}
+
+        // Add all of the children's children to this list
+        //
+        if (child->children != NULL)
+        {
+            if (tree->children == NULL)
+            {
+                // We are build ing the tree structure here, so we need not
+                // worry about duplication of pointers as the tree node
+                // factory will only clean up each node once. So we just
+                // copy in the child's children pointer as the child is
+                // a nil node (has not root itself).
+                //
+                tree->children = child->children;
+                child->children = NULL;
+                freshenPACIndexesAll(tree);
+                
+            }
+            else
+            {
+                // Need to copy the children
+                //
+                n = child->children->size(child->children);
+
+                for (i = 0; i < n; i++)
+                {
+                    pANTLR3_BASE_TREE entry;
+                    entry = (pANTLR3_BASE_TREE)child->children->get(child->children, i);
+
+                    // ANTLR3 lists can be sparse, unlike Array Lists
+                    //
+                    if (entry != NULL)
+                    {
+                        ANTLR3_UINT32 count = tree->children->add(tree->children, entry, (void (ANTLR3_CDECL *) (void *))child->free);
+
+                        entry->setChildIndex(entry, count - 1);
+                        entry->setParent(entry, tree);
+                    }
+                }
+            }
+		}
+	}
+	else
+	{
+		// Tree we are adding is not a Nil and might have children to copy
+		//
+		if  (tree->children == NULL)
+		{
+			// No children in the tree we are adding to, so create a new list on
+			// the fly to hold them.
+			//
+			tree->createChildrenList(tree);
+		}
+
+		ANTLR3_UINT32 count = tree->children->add(tree->children, child, (void (ANTLR3_CDECL *)(void *))child->free);
+		child->setChildIndex(child, count - 1);
+		child->setParent(child, tree);
+	}
+}
+
+/// Add all elements of the supplied list as children of this node
+///
+static void
+addChildren	(pANTLR3_BASE_TREE tree, pANTLR3_LIST kids)
+{
+	ANTLR3_UINT32    i;
+	ANTLR3_UINT32    s;
+
+	s = kids->size(kids);
+	for	(i = 0; i<s; i++)
+	{
+		tree->addChild(tree, (pANTLR3_BASE_TREE)(kids->get(kids, i+1)));
+	}
+}
+
+
+static    void
+setChild	(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i, void * child)
+{
+	if	(tree->children == NULL)
+	{
+		tree->createChildrenList(tree);
+	}
+	tree->children->set(tree->children, i, child, NULL, ANTLR3_FALSE);
+}
+
+static void    *
+deleteChild	(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i)
+{
+	if	( tree->children == NULL)
+	{
+		return	NULL;
+	}
+
+	return  tree->children->remove(tree->children, i);
+}
+
+static void    *
+dupTree		(pANTLR3_BASE_TREE tree)
+{
+	pANTLR3_BASE_TREE	newTree;
+	ANTLR3_UINT32	i;
+	ANTLR3_UINT32	s;
+
+	newTree = (pANTLR3_BASE_TREE)tree->dupNode	    (tree);
+
+	if	(tree->children != NULL)
+	{
+		s	    = tree->children->size  (tree->children);
+
+		for	(i = 0; i < s; i++)
+		{
+			pANTLR3_BASE_TREE    t;
+			pANTLR3_BASE_TREE    newNode;
+
+			t   = (pANTLR3_BASE_TREE) tree->children->get(tree->children, i);
+
+			if  (t!= NULL)
+			{
+				newNode	    = (pANTLR3_BASE_TREE)t->dupTree(t);
+				newTree->addChild(newTree, newNode);
+			}
+		}
+	}
+
+	return newTree;
+}
+
+static pANTLR3_STRING
+toStringTree	(pANTLR3_BASE_TREE tree)
+{
+	pANTLR3_STRING  string;
+	ANTLR3_UINT32   i;
+	ANTLR3_UINT32   n;
+	pANTLR3_BASE_TREE   t;
+
+	if	(tree->children == NULL || tree->children->size(tree->children) == 0)
+	{
+		return	tree->toString(tree);
+	}
+
+	/* Need a new string with nothing at all in it.
+	*/
+	string	= tree->strFactory->newRaw(tree->strFactory);
+
+	if	(tree->isNilNode(tree) == ANTLR3_FALSE)
+	{
+		string->append8	(string, "(");
+		string->appendS	(string, tree->toString(tree));
+		string->append8	(string, " ");
+	}
+	if	(tree->children != NULL)
+	{
+		n = tree->children->size(tree->children);
+
+		for	(i = 0; i < n; i++)
+		{   
+			t   = (pANTLR3_BASE_TREE) tree->children->get(tree->children, i);
+
+			if  (i > 0)
+			{
+				string->append8(string, " ");
+			}
+			string->appendS(string, t->toStringTree(t));
+		}
+	}
+	if	(tree->isNilNode(tree) == ANTLR3_FALSE)
+	{
+		string->append8(string,")");
+	}
+
+	return  string;
+}
+
+/// Delete children from start to stop and replace with t even if t is
+/// a list (nil-root tree). Num of children can increase or decrease.
+/// For huge child lists, inserting children can force walking rest of
+/// children to set their child index; could be slow.
+///
+static void					
+replaceChildren		(pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE newTree)
+{
+	ANTLR3_INT32	replacingHowMany;		// How many nodes will go away
+	ANTLR3_INT32	replacingWithHowMany;	// How many nodes will replace them
+	ANTLR3_INT32	numNewChildren;			// Tracking variable
+	ANTLR3_INT32	delta;					// Difference in new vs existing count
+
+	ANTLR3_INT32	i;
+	ANTLR3_INT32	j;
+
+	pANTLR3_VECTOR	newChildren;			// Iterator for whatever we are going to add in
+	ANTLR3_BOOLEAN	freeNewChildren;		// Whether we created the iterator locally or reused it
+
+	if	(parent->children == NULL)
+	{
+		ANTLR3_FPRINTF(stderr, "replaceChildren call: Indexes are invalid; no children in list for %s", parent->getText(parent)->chars);
+		return;
+	}
+
+	// Either use the existing list of children in the supplied nil node, or build a vector of the
+	// tree we were given if it is not a nil node, then we treat both situations exactly the same
+	//
+	if	(newTree->isNilNode(newTree))
+	{
+		newChildren = newTree->children;
+		freeNewChildren = ANTLR3_FALSE;		// We must NO free this memory
+	}
+	else
+	{
+		newChildren = antlr3VectorNew(1);
+		if	(newChildren == NULL)
+		{
+			ANTLR3_FPRINTF(stderr, "replaceChildren: out of memory!!");
+			exit(1);
+		}
+		newChildren->add(newChildren, (void *)newTree, NULL);
+
+		freeNewChildren = ANTLR3_TRUE;		// We must free this memory
+	}
+
+	// Initialize
+	//
+	replacingHowMany		= stopChildIndex - startChildIndex + 1;
+	replacingWithHowMany	= newChildren->size(newChildren);
+	delta					= replacingHowMany - replacingWithHowMany;
+	numNewChildren			= newChildren->size(newChildren);
+
+	// If it is the same number of nodes, then do a direct replacement
+	//
+	if	(delta == 0)
+	{
+		pANTLR3_BASE_TREE	child;
+
+		// Same number of nodes
+		//
+		j	= 0;
+		for	(i = startChildIndex; i <= stopChildIndex; i++)
+		{
+			child = (pANTLR3_BASE_TREE) newChildren->get(newChildren, j);
+			parent->children->set(parent->children, i, child, NULL, ANTLR3_FALSE);
+			child->setParent(child, parent);
+			child->setChildIndex(child, i);
+		}
+	}
+	else if (delta > 0)
+	{
+		ANTLR3_UINT32	indexToDelete;
+
+		// Less nodes than there were before
+		// reuse what we have then delete the rest
+		//
+		for	(j = 0; j < numNewChildren; j++)
+		{
+			parent->children->set(parent->children, startChildIndex + j, newChildren->get(newChildren, j), NULL, ANTLR3_FALSE);
+		}
+
+		// We just delete the same index position until done
+		//
+		indexToDelete = startChildIndex + numNewChildren;
+
+		for	(j = indexToDelete; j <= (ANTLR3_INT32)stopChildIndex; j++)
+		{
+			parent->children->remove(parent->children, indexToDelete);
+		}
+
+		parent->freshenPACIndexes(parent, startChildIndex);
+	}
+	else
+	{
+		ANTLR3_UINT32 numToInsert;
+
+		// More nodes than there were before
+		// Use what we can, then start adding
+		//
+		for	(j = 0; j < replacingHowMany; j++)
+		{
+			parent->children->set(parent->children, startChildIndex + j, newChildren->get(newChildren, j), NULL, ANTLR3_FALSE);
+		}
+
+		numToInsert = replacingWithHowMany - replacingHowMany;
+
+		for	(j = replacingHowMany; j < replacingWithHowMany; j++)
+		{
+			parent->children->add(parent->children, newChildren->get(newChildren, j), NULL);
+		}
+
+		parent->freshenPACIndexes(parent, startChildIndex);
+	}
+
+	if	(freeNewChildren == ANTLR3_TRUE)
+	{
+		ANTLR3_FREE(newChildren->elements);
+		newChildren->elements = NULL;
+		newChildren->size = 0;
+		ANTLR3_FREE(newChildren);		// Will not free the nodes
+	}
+}
+
+/// Set the parent and child indexes for all children of the
+/// supplied tree.
+///
+static	void
+freshenPACIndexesAll(pANTLR3_BASE_TREE tree)
+{
+	tree->freshenPACIndexes(tree, 0);
+}
+
+/// Set the parent and child indexes for some of the children of the
+/// supplied tree, starting with the child at the supplied index.
+///
+static	void
+freshenPACIndexes	(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 offset)
+{
+	ANTLR3_UINT32	count;
+	ANTLR3_UINT32	c;
+
+	count	= tree->getChildCount(tree);		// How many children do we have 
+
+	// Loop from the supplied index and set the indexes and parent
+	//
+	for	(c = offset; c < count; c++)
+	{
+		pANTLR3_BASE_TREE	child;
+
+		child = (pANTLR3_BASE_TREE)tree->getChild(tree, c);
+
+		child->setChildIndex(child, c);
+		child->setParent(child, tree);
+	}
+}
+
diff --git a/runtime/C/src/antlr3basetreeadaptor.c b/runtime/C/src/antlr3basetreeadaptor.c
new file mode 100644
index 0000000..1f42751
--- /dev/null
+++ b/runtime/C/src/antlr3basetreeadaptor.c
@@ -0,0 +1,909 @@
+/** \file
+ * Contains the base functions that all tree adaptors start with.
+ * this implementation can then be overridden by any higher implementation.
+ * 
+ */
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3basetreeadaptor.h>
+
+#ifdef	ANTLR3_WINDOWS
+#pragma warning( disable : 4100 )
+#endif
+
+/* Interface functions
+ */
+static	pANTLR3_BASE_TREE	nilNode					(pANTLR3_BASE_TREE_ADAPTOR adaptor);
+static	pANTLR3_BASE_TREE	dbgNil					(pANTLR3_BASE_TREE_ADAPTOR adaptor);
+static	pANTLR3_BASE_TREE	dupTree					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	pANTLR3_BASE_TREE	dbgDupTree				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	pANTLR3_BASE_TREE	dupTreeTT				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE parent);
+static	void				addChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE child);
+static	void				dbgAddChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE child);
+static	pANTLR3_BASE_TREE	becomeRoot				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE newRoot, pANTLR3_BASE_TREE oldRoot);
+static	pANTLR3_BASE_TREE	dbgBecomeRoot			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE newRoot, pANTLR3_BASE_TREE oldRoot);
+static	pANTLR3_BASE_TREE	rulePostProcessing		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE root);
+static	void				addChildToken			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN child);
+static	void				dbgAddChildToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN child);
+static	pANTLR3_BASE_TREE	becomeRootToken			(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * newRoot, pANTLR3_BASE_TREE oldRoot);
+static	pANTLR3_BASE_TREE	dbgBecomeRootToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * newRoot, pANTLR3_BASE_TREE oldRoot);
+static	pANTLR3_BASE_TREE	createTypeToken			(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken);
+static	pANTLR3_BASE_TREE	dbgCreateTypeToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken);
+static	pANTLR3_BASE_TREE	createTypeTokenText		(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken, pANTLR3_UINT8 text);
+static	pANTLR3_BASE_TREE	dbgCreateTypeTokenText	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken, pANTLR3_UINT8 text);
+static	pANTLR3_BASE_TREE	createTypeText			(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text);
+static	pANTLR3_BASE_TREE	dbgCreateTypeText		(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text);
+static	ANTLR3_UINT32		getType					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	void				setType					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 type);
+static	pANTLR3_STRING		getText					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	void				setText					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_STRING t);
+static	void				setText8				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_UINT8 t);
+static	pANTLR3_BASE_TREE	getChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i);
+static	ANTLR3_UINT32		getChildCount			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	ANTLR3_UINT32		getUniqueID				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	ANTLR3_BOOLEAN		isNilNode				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	pANTLR3_STRING		makeDot					(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * theTree);
+
+/** Given a pointer to a base tree adaptor structure (which is usually embedded in the
+ *  super class the implements the tree adaptor used in the parse), initialize its
+ *  function pointers and so on.
+ */
+ANTLR3_API void
+antlr3BaseTreeAdaptorInit(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_DEBUG_EVENT_LISTENER	debugger)
+{
+	// Initialize the interface
+	//
+	if	(debugger == NULL)
+	{
+		adaptor->nilNode				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR)) 								
+																				nilNode;
+		adaptor->addChild				= (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))								
+																				addChild;
+		adaptor->becomeRoot				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))				
+																				becomeRoot;
+		adaptor->addChildToken			= (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, pANTLR3_COMMON_TOKEN))	
+																				addChildToken;
+		adaptor->becomeRootToken		= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
+																				becomeRootToken;
+		adaptor->createTypeToken		= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_COMMON_TOKEN))
+																				createTypeToken;
+		adaptor->createTypeTokenText	= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_COMMON_TOKEN, pANTLR3_UINT8))
+																				createTypeTokenText;
+		adaptor->createTypeText			= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_UINT8))
+																				createTypeText;
+		adaptor->dupTree				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))		 				
+																				dupTree;
+	}
+	else
+	{
+		adaptor->nilNode				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR))
+                                                                                dbgNil;
+		adaptor->addChild				= (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
+                                                                                dbgAddChild;
+		adaptor->becomeRoot				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
+																				dbgBecomeRoot;
+		adaptor->addChildToken			= (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, pANTLR3_COMMON_TOKEN))
+                                                                                dbgAddChildToken;
+		adaptor->becomeRootToken		= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
+                                                                                dbgBecomeRootToken;
+		adaptor->createTypeToken		= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_COMMON_TOKEN))
+                                                                                dbgCreateTypeToken;
+		adaptor->createTypeTokenText	= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_COMMON_TOKEN, pANTLR3_UINT8))
+                                                                                dbgCreateTypeTokenText;
+		adaptor->createTypeText			= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, ANTLR3_UINT32, pANTLR3_UINT8))
+                                                                                dbgCreateTypeText;
+		adaptor->dupTree				= (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                                                dbgDupTree;
+		debugger->adaptor				= adaptor;
+	}
+
+	adaptor->dupTreeTT				=  (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
+                                                                                dupTreeTT;
+	adaptor->rulePostProcessing		=  (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                                                rulePostProcessing;
+	adaptor->getType				=  (ANTLR3_UINT32 (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                                                getType;
+	adaptor->setType				=  (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32))
+																				setType;
+	adaptor->getText				=  (pANTLR3_STRING (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                                                getText;
+	adaptor->setText8				=  (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_UINT8))
+																				setText8;
+	adaptor->setText				=  (void   (*)(pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_STRING))
+                                                                                setText;
+	adaptor->getChild				=  (void * (*)(pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32))
+                                                                                getChild;
+	adaptor->getChildCount			=  (ANTLR3_UINT32 (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                                                getChildCount;
+	adaptor->getUniqueID			=  (ANTLR3_UINT32 (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                                                getUniqueID;
+	adaptor->isNilNode				=  (ANTLR3_BOOLEAN (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                                                isNilNode;
+
+	adaptor->makeDot				=  (pANTLR3_STRING  (*)(pANTLR3_BASE_TREE_ADAPTOR, void *))
+																				makeDot;
+	
+	/* Remaining functions filled in by the caller.
+	 */
+	return;
+}
+
+static void
+defineDotNodes(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * t, pANTLR3_STRING dotSpec )
+{
+	// How many nodes are we talking about?
+	//
+	int	nCount;
+	int i;
+    pANTLR3_BASE_TREE child;
+	char	buff[64];
+	pANTLR3_STRING	text;
+	int		j;
+
+
+
+
+
+	// Count the nodes
+	//
+	nCount = adaptor->getChildCount(adaptor, t);
+
+	if	(nCount == 0)
+	{
+		// This will already have been included as a child of another node
+		// so there is nothing to add.
+		//
+		return;
+	}
+
+	// For each child of the current tree, define a node using the
+	// memory address of the node to name it
+	//
+	for	(i = 0; i<nCount; i++)
+	{
+
+		// Pick up a pointer for the child
+		//
+		child = (pANTLR3_BASE_TREE)adaptor->getChild(adaptor, t, i);
+
+		// Name the node
+		//
+		sprintf(buff, "\tn%p[label=\"", child);
+		dotSpec->append8(dotSpec, buff);
+		text = adaptor->getText(adaptor, child);
+		for (j = 0; j < (ANTLR3_INT32)(text->len); j++)
+		{
+            switch(text->charAt(text, j))
+            {
+                case '"':
+
+                    dotSpec->append8(dotSpec, "\\\"");
+                    break;
+
+                case '\n':
+
+                    dotSpec->append8(dotSpec, "\\n");
+                    break;
+
+                case '\r':
+
+                    dotSpec->append8(dotSpec, "\\r");
+                    break;
+
+                default:
+
+                    dotSpec->addc(dotSpec, text->charAt(text, j));
+                    break;
+            }
+		}
+		dotSpec->append8(dotSpec, "\"]\n");
+
+		// And now define the children of this child (if any)
+		//
+		defineDotNodes(adaptor, child, dotSpec);
+	}
+	
+	// Done
+	//
+	return;
+}
+
+static void
+defineDotEdges(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * t, pANTLR3_STRING dotSpec)
+{
+	// How many nodes are we talking about?
+	//
+	int	nCount;
+	int i;
+
+	if	(t == NULL)
+	{
+		// No tree, so do nothing
+		//
+		return;
+	}
+
+	// Count the nodes
+	//
+	nCount = adaptor->getChildCount(adaptor, t);
+
+	if	(nCount == 0)
+	{
+		// This will already have been included as a child of another node
+		// so there is nothing to add.
+		//
+		return;
+	}
+
+	// For each child, define an edge from this parent, then process
+	// and children of this child in the same way
+	//
+	for	(i=0; i<nCount; i++)
+	{
+		pANTLR3_BASE_TREE child;
+		char	buff[128];
+        pANTLR3_STRING text;
+        int                 j;
+
+		// Next child
+		//
+		child	= (pANTLR3_BASE_TREE)adaptor->getChild(adaptor, t, i);
+
+		// Create the edge relation
+		//
+		sprintf(buff, "\t\tn%p -> n%p\t\t// ",  t, child);
+        
+		dotSpec->append8(dotSpec, buff);
+
+		// Document the relationship
+		//
+        text = adaptor->getText(adaptor, t);
+		for (j = 0; j < (ANTLR3_INT32)(text->len); j++)
+        {
+                switch(text->charAt(text, j))
+                {
+                    case '"':
+
+                        dotSpec->append8(dotSpec, "\\\"");
+                        break;
+
+                    case '\n':
+
+                        dotSpec->append8(dotSpec, "\\n");
+                        break;
+
+                    case '\r':
+
+                        dotSpec->append8(dotSpec, "\\r");
+                        break;
+
+                    default:
+
+                        dotSpec->addc(dotSpec, text->charAt(text, j));
+                        break;
+                }
+        }
+
+        dotSpec->append8(dotSpec, " -> ");
+
+        text = adaptor->getText(adaptor, child);
+        for (j = 0; j < (ANTLR3_INT32)(text->len); j++)
+        {
+                switch(text->charAt(text, j))
+                {
+                    case '"':
+
+                        dotSpec->append8(dotSpec, "\\\"");
+                        break;
+
+                    case '\n':
+
+                        dotSpec->append8(dotSpec, "\\n");
+                        break;
+
+                    case '\r':
+
+                        dotSpec->append8(dotSpec, "\\r");
+                        break;
+
+                    default:
+
+                        dotSpec->addc(dotSpec, text->charAt(text, j));
+                        break;
+                }
+        }
+		dotSpec->append8(dotSpec, "\n");
+
+        
+		// Define edges for this child
+		//
+		defineDotEdges(adaptor, child, dotSpec);
+	}
+
+	// Done
+	//
+	return;
+}
+
+/// Produce a DOT specification for graphviz
+//
+static pANTLR3_STRING
+makeDot	(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * theTree)
+{
+	// The string we are building up
+	//
+	pANTLR3_STRING		dotSpec;
+	char                buff[64];
+	pANTLR3_STRING      text;
+	int                 j;
+
+	dotSpec = adaptor->strFactory->newStr8
+		
+		(
+			adaptor->strFactory,
+
+			// Default look and feel
+			//
+			(pANTLR3_UINT8)
+			"digraph {\n\n"
+			"\tordering=out;\n"
+			"\tranksep=.4;\n"
+			"\tbgcolor=\"lightgrey\";  node [shape=box, fixedsize=false, fontsize=12, fontname=\"Helvetica-bold\", fontcolor=\"blue\"\n"
+			"\twidth=.25, height=.25, color=\"black\", fillcolor=\"white\", style=\"filled, solid, bold\"];\n\n"
+			"\tedge [arrowsize=.5, color=\"black\", style=\"bold\"]\n\n"
+		);
+
+    if	(theTree == NULL)
+	{
+		// No tree, so create a blank spec
+		//
+		dotSpec->append8(dotSpec, "n0[label=\"EMPTY TREE\"]\n");
+		return dotSpec;
+	}
+
+    sprintf(buff, "\tn%p[label=\"", theTree);
+	dotSpec->append8(dotSpec, buff);
+    text = adaptor->getText(adaptor, theTree);
+    for (j = 0; j < (ANTLR3_INT32)(text->len); j++)
+    {
+            switch(text->charAt(text, j))
+            {
+                case '"':
+
+                    dotSpec->append8(dotSpec, "\\\"");
+                    break;
+
+                case '\n':
+
+                    dotSpec->append8(dotSpec, "\\n");
+                    break;
+
+                case '\r':
+
+                    dotSpec->append8(dotSpec, "\\r");
+                    break;
+
+                default:
+
+                    dotSpec->addc(dotSpec, text->charAt(text, j));
+                    break;
+            }
+    }
+	dotSpec->append8(dotSpec, "\"]\n");
+
+	// First produce the node defintions
+	//
+	defineDotNodes(adaptor, theTree, dotSpec);
+	dotSpec->append8(dotSpec, "\n");
+	defineDotEdges(adaptor, theTree, dotSpec);
+	
+	// Terminate the spec
+	//
+	dotSpec->append8(dotSpec, "\n}");
+
+	// Result
+	//
+	return dotSpec;
+}
+
+
+/** Create and return a nil tree node (no token payload)
+ */
+static	pANTLR3_BASE_TREE	
+nilNode	    (pANTLR3_BASE_TREE_ADAPTOR adaptor)
+{
+	return	(pANTLR3_BASE_TREE)adaptor->create(adaptor, NULL);
+}
+
+static	pANTLR3_BASE_TREE	
+dbgNil	    (pANTLR3_BASE_TREE_ADAPTOR adaptor)
+{
+	pANTLR3_BASE_TREE t;
+
+	t = (pANTLR3_BASE_TREE)adaptor->create				(adaptor, NULL);
+	adaptor->debugger->createNode	(adaptor->debugger, t);
+
+	return	t;
+}
+
+/** Return a duplicate of the entire tree (implementation provided by the 
+ *  BASE_TREE interface.)
+ */
+static	pANTLR3_BASE_TREE	
+dupTree  (pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
+{
+	return	(pANTLR3_BASE_TREE)adaptor->dupTreeTT(adaptor, t, NULL);
+}
+
+pANTLR3_BASE_TREE
+dupTreeTT			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE parent)
+{
+	pANTLR3_BASE_TREE	newTree;
+	pANTLR3_BASE_TREE	child;
+	pANTLR3_BASE_TREE	newSubTree;
+	ANTLR3_UINT32		n;
+	ANTLR3_UINT32		i;
+
+	if	(t == NULL)
+	{
+		return NULL;
+	}
+	newTree = (pANTLR3_BASE_TREE)t->dupNode(t);
+
+	// Ensure new subtree root has parent/child index set
+	//
+	adaptor->setChildIndex		(adaptor, newTree, t->getChildIndex(t));
+	adaptor->setParent			(adaptor, newTree, parent);
+	n = adaptor->getChildCount	(adaptor, t);
+
+	for	(i=0; i < n; i++)
+	{
+		child = (pANTLR3_BASE_TREE)adaptor->getChild		(adaptor, t, i);
+		newSubTree = (pANTLR3_BASE_TREE)adaptor->dupTreeTT	(adaptor, child, t);
+		adaptor->addChild				(adaptor, newTree, newSubTree);
+	}
+	return	newTree;
+}
+
+/// Sends the required debugging events for duplicating a tree
+/// to the debugger.
+///
+static void
+simulateTreeConstruction(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE tree)
+{
+	ANTLR3_UINT32		n;
+	ANTLR3_UINT32		i;
+	pANTLR3_BASE_TREE	child;
+
+	// Send the create node event
+	//
+	adaptor->debugger->createNode(adaptor->debugger, tree);
+
+	n = adaptor->getChildCount(adaptor, tree);
+	for	(i = 0; i < n; i++)
+	{
+		child = (pANTLR3_BASE_TREE)adaptor->getChild(adaptor, tree, i);
+		simulateTreeConstruction(adaptor, child);
+		adaptor->debugger->addChild(adaptor->debugger, tree, child);
+	}
+}
+
+pANTLR3_BASE_TREE
+dbgDupTree		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE tree)
+{
+	pANTLR3_BASE_TREE t;
+
+	// Call the normal dup tree mechanism first
+	//
+	t = (pANTLR3_BASE_TREE)adaptor->dupTreeTT(adaptor, tree, NULL);
+
+	// In order to tell the debugger what we have just done, we now
+	// simulate the tree building mechanism. THis will fire
+	// lots of debugging events to the client and look like we
+	// duped the tree..
+	//
+	simulateTreeConstruction(adaptor, t);
+
+	return t;
+}
+
+/** Add a child to the tree t.  If child is a flat tree (a list), make all
+ *  in list children of t. Warning: if t has no children, but child does
+ *  and child isNilNode then it is ok to move children to t via
+ *  t.children = child.children; i.e., without copying the array.  This
+ *  is for construction and I'm not sure it's completely general for
+ *  a tree's addChild method to work this way.  Make sure you differentiate
+ *  between your tree's addChild and this parser tree construction addChild
+ *  if it's not ok to move children to t with a simple assignment.
+ */
+static	void	
+addChild (pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE child)
+{
+	if	(t != NULL && child != NULL)
+	{
+		t->addChild(t, child);
+	}
+}
+static	void	
+dbgAddChild (pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_BASE_TREE child)
+{
+	if	(t != NULL && child != NULL)
+	{
+		t->addChild(t, child);
+		adaptor->debugger->addChild(adaptor->debugger, t, child);
+	}
+}
+/** Use the adaptor implementation to add a child node with the supplied token
+ */
+static	void		
+addChildToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN child)
+{
+	if	(t != NULL && child != NULL)
+	{
+		adaptor->addChild(adaptor, t, adaptor->create(adaptor, child));
+	}
+}
+static	void		
+dbgAddChildToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN child)
+{
+	pANTLR3_BASE_TREE	tc;
+
+	if	(t != NULL && child != NULL)
+	{
+		tc = (pANTLR3_BASE_TREE)adaptor->create(adaptor, child);
+		adaptor->addChild(adaptor, t, tc);
+		adaptor->debugger->addChild(adaptor->debugger, t, tc);
+	}
+}
+
+/** If oldRoot is a nil root, just copy or move the children to newRoot.
+ *  If not a nil root, make oldRoot a child of newRoot.
+ *
+ * \code
+ *    old=^(nil a b c), new=r yields ^(r a b c)
+ *    old=^(a b c), new=r yields ^(r ^(a b c))
+ * \endcode
+ *
+ *  If newRoot is a nil-rooted single child tree, use the single
+ *  child as the new root node.
+ *
+ * \code
+ *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
+ *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
+ * \endcode
+ *
+ *  If oldRoot was null, it's ok, just return newRoot (even if isNilNode).
+ *
+ * \code
+ *    old=null, new=r yields r
+ *    old=null, new=^(nil r) yields ^(nil r)
+ * \endcode
+ *
+ *  Return newRoot.  Throw an exception if newRoot is not a
+ *  simple node or nil root with a single child node--it must be a root
+ *  node.  If newRoot is <code>^(nil x)</endcode> return x as newRoot.
+ *
+ *  Be advised that it's ok for newRoot to point at oldRoot's
+ *  children; i.e., you don't have to copy the list.  We are
+ *  constructing these nodes so we should have this control for
+ *  efficiency.
+ */
+static	pANTLR3_BASE_TREE	
+becomeRoot	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE newRootTree, pANTLR3_BASE_TREE oldRootTree)
+{
+    pANTLR3_BASE_TREE saveRoot;
+
+	/* Protect against tree rewrites if we are in some sort of error
+	 * state, but have tried to recover. In C we can end up with a null pointer
+	 * for a tree that was not produced.
+	 */
+	if	(newRootTree == NULL)
+	{
+		return	oldRootTree;
+	}
+
+	/* root is just the new tree as is if there is no
+	 * current root tree.
+	 */
+	if	(oldRootTree == NULL)
+	{
+		return	newRootTree;
+	}
+
+	/* Produce ^(nil real-node)
+	 */
+	if	(newRootTree->isNilNode(newRootTree))
+	{
+		if	(newRootTree->getChildCount(newRootTree) > 1)
+		{
+			/* TODO: Handle tree exceptions 
+			 */
+			ANTLR3_FPRINTF(stderr, "More than one node as root! TODO: Create tree exception handling\n");
+			return newRootTree;
+		}
+
+		/* The new root is the first child, keep track of the original newRoot
+         * because if it was a Nil Node, then we can reuse it now.
+		 */
+        saveRoot    = newRootTree;
+		newRootTree = (pANTLR3_BASE_TREE)newRootTree->getChild(newRootTree, 0);
+
+        // Reclaim the old nilNode()
+        //
+        saveRoot->reuse(saveRoot);
+	}
+
+	/* Add old root into new root. addChild takes care of the case where oldRoot
+	 * is a flat list (nill rooted tree). All children of oldroot are added to
+	 * new root.
+	 */
+	newRootTree->addChild(newRootTree, oldRootTree);
+
+    // If the oldroot tree was a nil node, then we know at this point
+    // it has become orphaned by the rewrite logic, so we tell it to do
+    // whatever it needs to do to be reused.
+    //
+    if  (oldRootTree->isNilNode(oldRootTree))
+    {
+        // We have taken an old Root Tree and appended all its children to the new
+        // root. In addition though it was a nil node, which means the generated code
+        // will not reuse it again, so we will reclaim it here. First we want to zero out
+        // any pointers it was carrying around. We are just the baseTree handler so we
+        // don't know necessarilly know how to do this for the real node, we just ask the tree itself
+        // to do it.
+        //
+        oldRootTree->reuse(oldRootTree);
+    }
+	/* Always returns new root structure
+	 */
+	return	newRootTree;
+
+}
+static	pANTLR3_BASE_TREE	
+dbgBecomeRoot	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE newRootTree, pANTLR3_BASE_TREE oldRootTree)
+{
+	pANTLR3_BASE_TREE t;
+	
+	t = becomeRoot(adaptor, newRootTree, oldRootTree);
+
+	adaptor->debugger->becomeRoot(adaptor->debugger, newRootTree, oldRootTree);
+
+	return t;
+}
+/** Transform ^(nil x) to x 
+ */
+static	pANTLR3_BASE_TREE	
+   rulePostProcessing	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE root)
+{
+    pANTLR3_BASE_TREE saveRoot;
+
+    // Keep track of the root we are given. If it is a nilNode, then we
+    // can reuse it rather than orphaning it!
+    //
+    saveRoot = root;
+
+	if (root != NULL && root->isNilNode(root))
+	{
+		if	(root->getChildCount(root) == 0)
+		{
+			root = NULL;
+		}
+		else if	(root->getChildCount(root) == 1)
+		{
+			root = (pANTLR3_BASE_TREE)root->getChild(root, 0);
+			root->setParent(root, NULL);
+			root->setChildIndex(root, -1);
+
+            // The root we were given was a nil node, wiht one child, which means it has
+            // been abandoned and would be lost in the node factory. However
+            // nodes can be flagged as resuable to prevent this terrible waste
+            //
+            saveRoot->reuse(saveRoot);
+		}
+	}
+
+	return root;
+}
+ 
+/** Use the adaptor interface to set a new tree node with the supplied token
+ *  to the root of the tree.
+ */
+static	pANTLR3_BASE_TREE	
+   becomeRootToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * newRoot, pANTLR3_BASE_TREE oldRoot)
+{
+	return	(pANTLR3_BASE_TREE)adaptor->becomeRoot(adaptor, adaptor->create(adaptor, (pANTLR3_COMMON_TOKEN)newRoot), oldRoot);
+}
+static	pANTLR3_BASE_TREE	
+dbgBecomeRootToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, void * newRoot, pANTLR3_BASE_TREE oldRoot)
+{
+	pANTLR3_BASE_TREE	t;
+
+	t =	(pANTLR3_BASE_TREE)adaptor->becomeRoot(adaptor, adaptor->create(adaptor, (pANTLR3_COMMON_TOKEN)newRoot), oldRoot);
+
+	adaptor->debugger->becomeRoot(adaptor->debugger,t, oldRoot);
+
+	return t;
+}
+
+/** Use the super class supplied create() method to create a new node
+ *  from the supplied token.
+ */
+static	pANTLR3_BASE_TREE	
+createTypeToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken)
+{
+	/* Create the new token
+	 */
+	fromToken = adaptor->createTokenFromToken(adaptor, fromToken);
+
+	/* Set the type of the new token to that supplied
+	 */
+	fromToken->setType(fromToken, tokenType);
+
+	/* Return a new node based upon this token
+	 */
+	return	(pANTLR3_BASE_TREE)adaptor->create(adaptor, fromToken);
+}
+static	pANTLR3_BASE_TREE	
+dbgCreateTypeToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken)
+{
+	pANTLR3_BASE_TREE t;
+
+	t = createTypeToken(adaptor, tokenType, fromToken);
+
+	adaptor->debugger->createNode(adaptor->debugger, t);
+
+	return t;
+}
+
+static	pANTLR3_BASE_TREE	
+createTypeTokenText	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken, pANTLR3_UINT8 text)
+{
+	/* Create the new token
+	 */
+	fromToken = adaptor->createTokenFromToken(adaptor, fromToken);
+
+	/* Set the type of the new token to that supplied
+	 */
+	fromToken->setType(fromToken, tokenType);
+
+	/* Set the text of the token accordingly
+	 */
+	fromToken->setText8(fromToken, text);
+
+	/* Return a new node based upon this token
+	 */
+	return	(pANTLR3_BASE_TREE)adaptor->create(adaptor, fromToken);
+}
+static	pANTLR3_BASE_TREE	
+dbgCreateTypeTokenText	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_COMMON_TOKEN fromToken, pANTLR3_UINT8 text)
+{
+	pANTLR3_BASE_TREE t;
+
+	t = createTypeTokenText(adaptor, tokenType, fromToken, text);
+
+	adaptor->debugger->createNode(adaptor->debugger, t);
+
+	return t;
+}
+
+static	pANTLR3_BASE_TREE	
+   createTypeText	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text)
+{
+	pANTLR3_COMMON_TOKEN	fromToken;
+
+	/* Create the new token
+	 */
+	fromToken = adaptor->createToken(adaptor, tokenType, text);
+
+	/* Return a new node based upon this token
+	 */
+	return	(pANTLR3_BASE_TREE)adaptor->create(adaptor, fromToken);
+}
+static	pANTLR3_BASE_TREE	
+   dbgCreateTypeText	(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text)
+{
+	pANTLR3_BASE_TREE t;
+
+	t = createTypeText(adaptor, tokenType, text);
+
+	adaptor->debugger->createNode(adaptor->debugger, t);
+
+	return t;
+
+}
+/** Dummy implementation - will be supplied by super class
+ */
+static	ANTLR3_UINT32	
+   getType		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
+{
+	return	0;
+}
+
+/** Dummy implementation - will be supplied by super class
+ */
+static	void		
+   setType		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 type)
+{
+	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement setType()\n");
+}
+
+/** Dummy implementation - will be supplied by super class
+ */
+static	pANTLR3_STRING	
+   getText		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
+{
+	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement getText()\n");
+	return	NULL;
+}
+
+/** Dummy implementation - will be supplied by super class
+ */
+static	void		
+   setText		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_STRING t)
+{
+	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement setText()\n");
+}
+/** Dummy implementation - will be supplied by super class
+ */
+static	void		
+setText8		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_UINT8 t)
+{
+	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement setText()\n");
+}
+
+static	pANTLR3_BASE_TREE	
+   getChild		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE tree, ANTLR3_UINT32 i)
+{
+	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement getChild()\n");
+	return NULL;
+}
+
+static	ANTLR3_UINT32	
+   getChildCount	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE tree)
+{
+	ANTLR3_FPRINTF(stderr, "Internal error - implementor of superclass containing ANTLR3_TREE_ADAPTOR did not implement getChildCount()\n");
+	return 0;
+}
+
+/** Returns a uniqueID for the node. Because this is the C implementation
+ *  we can just use its address suitably converted/cast to an integer.
+ */
+static	ANTLR3_UINT32	
+   getUniqueID		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE node)
+{
+	return	ANTLR3_UINT32_CAST(node);
+}
+
+static	ANTLR3_BOOLEAN
+isNilNode					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
+{
+	return t->isNilNode(t);
+}
diff --git a/runtime/C/src/antlr3bitset.c b/runtime/C/src/antlr3bitset.c
new file mode 100644
index 0000000..4434ed1
--- /dev/null
+++ b/runtime/C/src/antlr3bitset.c
@@ -0,0 +1,680 @@
+///
+/// \file
+/// Contains the C implementation of ANTLR3 bitsets as adapted from Terence Parr's
+/// Java implementation.
+///
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3bitset.h>
+
+// External interface
+//
+
+static	pANTLR3_BITSET  antlr3BitsetClone		(pANTLR3_BITSET inSet);
+static	pANTLR3_BITSET  antlr3BitsetOR			(pANTLR3_BITSET bitset1, pANTLR3_BITSET bitset2);
+static	void			antlr3BitsetORInPlace	(pANTLR3_BITSET bitset, pANTLR3_BITSET bitset2);
+static	ANTLR3_UINT32	antlr3BitsetSize		(pANTLR3_BITSET bitset);
+static	void			antlr3BitsetAdd			(pANTLR3_BITSET bitset, ANTLR3_INT32 bit);
+static	ANTLR3_BOOLEAN	antlr3BitsetEquals		(pANTLR3_BITSET bitset1, pANTLR3_BITSET bitset2);
+static	ANTLR3_BOOLEAN	antlr3BitsetMember		(pANTLR3_BITSET bitset, ANTLR3_UINT32 bit);
+static	ANTLR3_UINT32	antlr3BitsetNumBits		(pANTLR3_BITSET bitset);
+static	void			antlr3BitsetRemove		(pANTLR3_BITSET bitset, ANTLR3_UINT32 bit);
+static	ANTLR3_BOOLEAN	antlr3BitsetIsNil		(pANTLR3_BITSET bitset);
+static	pANTLR3_INT32	antlr3BitsetToIntList	(pANTLR3_BITSET bitset);
+
+// Local functions
+//
+static	void			growToInclude		(pANTLR3_BITSET bitset, ANTLR3_INT32 bit);
+static	void			grow				(pANTLR3_BITSET bitset, ANTLR3_INT32 newSize);
+static	ANTLR3_UINT64	bitMask				(ANTLR3_UINT32 bitNumber);
+static	ANTLR3_UINT32	numWordsToHold		(ANTLR3_UINT32 bit);
+static	ANTLR3_UINT32	wordNumber			(ANTLR3_UINT32 bit);
+static	void			antlr3BitsetFree	(pANTLR3_BITSET bitset);
+
+static void
+antlr3BitsetFree(pANTLR3_BITSET bitset)
+{
+    if	(bitset->blist.bits != NULL)
+    {
+		ANTLR3_FREE(bitset->blist.bits);
+		bitset->blist.bits = NULL;
+    }
+    ANTLR3_FREE(bitset);
+
+    return;
+}
+
+ANTLR3_API pANTLR3_BITSET
+antlr3BitsetNew(ANTLR3_UINT32 numBits)
+{
+	pANTLR3_BITSET  bitset;
+
+	ANTLR3_UINT32   numelements;
+
+	// Allocate memory for the bitset structure itself
+	//
+	bitset  = (pANTLR3_BITSET) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_BITSET));
+
+	if	(bitset == NULL)
+	{
+		return	NULL;
+	}
+
+	// Avoid memory thrashing at the up front expense of a few bytes
+	//
+	if	(numBits < (8 * ANTLR3_BITSET_BITS))
+	{
+		numBits = 8 * ANTLR3_BITSET_BITS;
+	}
+
+	// No we need to allocate the memory for the number of bits asked for
+	// in multiples of ANTLR3_UINT64. 
+	//
+	numelements	= ((numBits -1) >> ANTLR3_BITSET_LOG_BITS) + 1;
+
+	bitset->blist.bits    = (pANTLR3_BITWORD) ANTLR3_MALLOC((size_t)(numelements * sizeof(ANTLR3_BITWORD)));
+	if	(bitset->blist.bits == NULL)
+	{
+		ANTLR3_FREE(bitset);
+		return	NULL;
+	}
+	memset(bitset->blist.bits, 0, (size_t)(numelements * sizeof(ANTLR3_BITWORD)));
+	bitset->blist.length  = numelements;
+
+	antlr3BitsetSetAPI(bitset);
+
+
+	// All seems good
+	//
+	return  bitset;
+}
+
+ANTLR3_API void
+antlr3BitsetSetAPI(pANTLR3_BITSET bitset)
+{
+    bitset->clone		=    antlr3BitsetClone;
+    bitset->bor			=    antlr3BitsetOR;
+    bitset->borInPlace	=    antlr3BitsetORInPlace;
+    bitset->size		=    antlr3BitsetSize;
+    bitset->add			=    antlr3BitsetAdd;
+    bitset->grow		=    grow;
+    bitset->equals		=    antlr3BitsetEquals;
+    bitset->isMember	=    antlr3BitsetMember;
+    bitset->numBits		=    antlr3BitsetNumBits;
+    bitset->remove		=    antlr3BitsetRemove;
+    bitset->isNilNode		=    antlr3BitsetIsNil;
+    bitset->toIntList	=    antlr3BitsetToIntList;
+
+    bitset->free		=    antlr3BitsetFree;
+}
+
+ANTLR3_API pANTLR3_BITSET
+antlr3BitsetCopy(pANTLR3_BITSET_LIST blist)
+{
+    pANTLR3_BITSET  bitset;
+	int				numElements;
+
+    // Allocate memory for the bitset structure itself
+    //
+    bitset  = (pANTLR3_BITSET) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_BITSET));
+
+    if	(bitset == NULL)
+    {
+		return	NULL;
+    }
+
+	numElements = blist->length;
+
+    // Avoid memory thrashing at the expense of a few more bytes
+    //
+    if	(numElements < 8)
+    {
+		numElements = 8;
+    }
+
+    // Install the length in ANTLR3_UINT64 units
+    //
+    bitset->blist.length  = numElements;
+
+    bitset->blist.bits    = (pANTLR3_BITWORD)ANTLR3_MALLOC((size_t)(numElements * sizeof(ANTLR3_BITWORD)));
+
+    if	(bitset->blist.bits == NULL)
+    {
+		ANTLR3_FREE(bitset);
+		return	NULL;
+    }
+
+	ANTLR3_MEMCPY(bitset->blist.bits, blist->bits, (ANTLR3_UINT64)(numElements * sizeof(ANTLR3_BITWORD)));
+
+    // All seems good
+    //
+    return  bitset;
+}
+
+static pANTLR3_BITSET
+antlr3BitsetClone(pANTLR3_BITSET inSet)
+{
+    pANTLR3_BITSET  bitset;
+
+    // Allocate memory for the bitset structure itself
+    //
+    bitset  = antlr3BitsetNew(ANTLR3_BITSET_BITS * inSet->blist.length);
+
+    if	(bitset == NULL)
+    {
+		return	NULL;
+    }
+
+    // Install the actual bits in the source set
+    //
+    ANTLR3_MEMCPY(bitset->blist.bits, inSet->blist.bits, (ANTLR3_UINT64)(inSet->blist.length * sizeof(ANTLR3_BITWORD)));
+
+    // All seems good
+    //
+    return  bitset;
+}
+
+
+ANTLR3_API pANTLR3_BITSET
+antlr3BitsetList(pANTLR3_HASH_TABLE list)
+{
+    pANTLR3_BITSET		bitSet;
+    pANTLR3_HASH_ENUM	en;
+    pANTLR3_HASH_KEY	key;
+    ANTLR3_UINT64		bit;
+
+    // We have no idea what exactly is in the list
+    // so create a default bitset and then just add stuff
+    // as we enumerate.
+    //
+    bitSet  = antlr3BitsetNew(0);
+
+    en		= antlr3EnumNew(list);
+
+    while   (en->next(en, &key, (void **)(&bit)) == ANTLR3_SUCCESS)
+    {
+		bitSet->add(bitSet, (ANTLR3_UINT32)bit);
+    }
+    en->free(en);
+
+    return NULL;
+}
+
+///
+/// \brief
+/// Creates a new bitset with at least one 64 bit bset of bits, but as
+/// many 64 bit sets as are required.
+///
+/// \param[in] bset
+/// A variable number of bits to add to the set, ending in -1 (impossible bit).
+/// 
+/// \returns
+/// A new bit set with all of the specified bitmaps in it and the API
+/// initialized.
+/// 
+/// Call as:
+///  - pANTLR3_BITSET = antlrBitsetLoad(bset, bset11, ..., -1);
+///  - pANTLR3_BITSET = antlrBitsetOf(-1);  Create empty bitset 
+///
+/// \remarks
+/// Stdargs function - must supply -1 as last paremeter, which is NOT
+/// added to the set.
+/// 
+///
+ANTLR3_API pANTLR3_BITSET
+antlr3BitsetLoad(pANTLR3_BITSET_LIST inBits)
+{
+	pANTLR3_BITSET  bitset;
+	ANTLR3_UINT32  count;
+
+	// Allocate memory for the bitset structure itself
+	// the input parameter is the bit number (0 based)
+	// to include in the bitset, so we need at at least
+	// bit + 1 bits. If any arguments indicate a 
+	// a bit higher than the default number of bits (0 means default size)
+	// then Add() will take care
+	// of it.
+	//
+	bitset  = antlr3BitsetNew(0);
+
+	if	(bitset == NULL)
+	{
+		return	NULL;
+	}
+
+	if	(inBits != NULL)
+	{
+		// Now we can add the element bits into the set
+		//
+		count=0;
+		while (count < inBits->length)
+		{
+			if  (bitset->blist.length <= count)
+			{
+				bitset->grow(bitset, count+1);
+			}
+
+			bitset->blist.bits[count] = *((inBits->bits)+count);
+			count++;
+		}
+	}
+
+	// return the new bitset
+	//
+	return  bitset;
+}
+
+///
+/// \brief
+/// Creates a new bitset with at least one element, but as
+/// many elements are required.
+/// 
+/// \param[in] bit
+/// A variable number of bits to add to the set, ending in -1 (impossible bit).
+/// 
+/// \returns
+/// A new bit set with all of the specified elements added into it.
+/// 
+/// Call as:
+///  - pANTLR3_BITSET = antlrBitsetOf(n, n1, n2, -1);
+///  - pANTLR3_BITSET = antlrBitsetOf(-1);  Create empty bitset 
+///
+/// \remarks
+/// Stdargs function - must supply -1 as last paremeter, which is NOT
+/// added to the set.
+/// 
+///
+ANTLR3_API pANTLR3_BITSET
+antlr3BitsetOf(ANTLR3_INT32 bit, ...)
+{
+    pANTLR3_BITSET  bitset;
+
+    va_list ap;
+
+    // Allocate memory for the bitset structure itself
+    // the input parameter is the bit number (0 based)
+    // to include in the bitset, so we need at at least
+    // bit + 1 bits. If any arguments indicate a 
+    // a bit higher than the default number of bits (0 menas default size)
+    // then Add() will take care
+    // of it.
+    //
+    bitset  = antlr3BitsetNew(0);
+
+    if	(bitset == NULL)
+    {
+		return	NULL;
+    }
+
+    // Now we can add the element bits into the set
+    //
+    va_start(ap, bit);
+    while   (bit != -1)
+    {
+		antlr3BitsetAdd(bitset, bit);
+		bit = va_arg(ap, ANTLR3_UINT32);
+    }
+    va_end(ap);
+
+    // return the new bitset
+    //
+    return  bitset;
+}
+
+static pANTLR3_BITSET
+antlr3BitsetOR(pANTLR3_BITSET bitset1, pANTLR3_BITSET bitset2)
+{
+    pANTLR3_BITSET  bitset;
+
+    if	(bitset1 == NULL)
+    {
+		return antlr3BitsetClone(bitset2);
+    }
+
+    if	(bitset2 == NULL)
+    {
+		return	antlr3BitsetClone(bitset1);
+    }
+
+    // Allocate memory for the newly ordered bitset structure itself.
+    //
+    bitset  = antlr3BitsetClone(bitset1);
+    
+    antlr3BitsetORInPlace(bitset, bitset2);
+
+    return  bitset;
+
+}
+
+static void
+antlr3BitsetAdd(pANTLR3_BITSET bitset, ANTLR3_INT32 bit)
+{
+    ANTLR3_UINT32   word;
+
+    word    = wordNumber(bit);
+
+    if	(word	>= bitset->blist.length)
+    {
+		growToInclude(bitset, bit);
+    }
+
+    bitset->blist.bits[word] |= bitMask(bit);
+
+}
+
+static void
+grow(pANTLR3_BITSET bitset, ANTLR3_INT32 newSize)
+{
+    pANTLR3_BITWORD   newBits;
+
+    // Space for newly sized bitset - TODO: come back to this and use realloc?, it may
+    // be more efficient...
+    //
+    newBits = (pANTLR3_BITWORD) ANTLR3_CALLOC(1, (size_t)(newSize * sizeof(ANTLR3_BITWORD)));
+    if	(bitset->blist.bits != NULL)
+    {
+		// Copy existing bits
+		//
+		ANTLR3_MEMCPY((void *)newBits, (const void *)bitset->blist.bits, (size_t)(bitset->blist.length * sizeof(ANTLR3_BITWORD)));
+
+		// Out with the old bits... de de de derrr
+		//
+		ANTLR3_FREE(bitset->blist.bits);
+    }
+
+    // In with the new bits... keerrrang.
+    //
+    bitset->blist.bits      = newBits;
+    bitset->blist.length    = newSize;
+}
+
+static void
+growToInclude(pANTLR3_BITSET bitset, ANTLR3_INT32 bit)
+{
+	ANTLR3_UINT32	bl;
+	ANTLR3_UINT32	nw;
+
+	bl = (bitset->blist.length << 1);
+	nw = numWordsToHold(bit);
+
+	if	(bl > nw)
+	{
+		bitset->grow(bitset, bl);
+	}
+	else
+	{
+		bitset->grow(bitset, nw);
+	}
+}
+
+static void
+antlr3BitsetORInPlace(pANTLR3_BITSET bitset, pANTLR3_BITSET bitset2)
+{
+    ANTLR3_UINT32   minimum;
+    ANTLR3_UINT32   i;
+
+    if	(bitset2 == NULL)
+    {
+		return;
+    }
+
+
+    // First make sure that the target bitset is big enough
+    // for the new bits to be ored in.
+    //
+    if	(bitset->blist.length < bitset2->blist.length)
+    {
+		growToInclude(bitset, (bitset2->blist.length * sizeof(ANTLR3_BITWORD)));
+    }
+    
+    // Or the miniimum number of bits after any resizing went on
+    //
+    if	(bitset->blist.length < bitset2->blist.length)
+	{
+		minimum = bitset->blist.length;
+	}
+	else
+	{
+		minimum = bitset2->blist.length;
+	}
+
+    for	(i = minimum; i > 0; i--)
+    {
+		bitset->blist.bits[i-1] |= bitset2->blist.bits[i-1];
+    }
+}
+
+static ANTLR3_UINT64
+bitMask(ANTLR3_UINT32 bitNumber)
+{
+    return  ((ANTLR3_UINT64)1) << (bitNumber & (ANTLR3_BITSET_MOD_MASK));
+}
+
+static ANTLR3_UINT32
+antlr3BitsetSize(pANTLR3_BITSET bitset)
+{
+    ANTLR3_UINT32   degree;
+    ANTLR3_INT32   i;
+    ANTLR3_INT8    bit;
+    
+    // TODO: Come back to this, it may be faster to & with 0x01
+    // then shift right a copy of the 4 bits, than shift left a constant of 1.
+    // But then again, the optimizer might just work this out
+    // anyway.
+    //
+    degree  = 0;
+    for	(i = bitset->blist.length - 1; i>= 0; i--)
+    {
+		if  (bitset->blist.bits[i] != 0)
+		{
+			for	(bit = ANTLR3_BITSET_BITS - 1; bit >= 0; bit--)
+			{
+				if  ((bitset->blist.bits[i] & (((ANTLR3_BITWORD)1) << bit)) != 0)
+				{
+					degree++;
+				}
+			}
+		}
+    }
+    return degree;
+}
+
+static ANTLR3_BOOLEAN
+antlr3BitsetEquals(pANTLR3_BITSET bitset1, pANTLR3_BITSET bitset2)
+{
+    ANTLR3_INT32   minimum;
+    ANTLR3_INT32   i;
+
+    if	(bitset1 == NULL || bitset2 == NULL)
+    {
+	return	ANTLR3_FALSE;
+    }
+
+    // Work out the minimum comparison set
+    //
+    if	(bitset1->blist.length < bitset2->blist.length)
+    {
+		minimum = bitset1->blist.length;
+    }
+    else
+    {
+		minimum = bitset2->blist.length;
+    }
+
+    // Make sure explict in common bits are equal
+    //
+    for	(i = minimum - 1; i >=0 ; i--)
+    {
+		if  (bitset1->blist.bits[i] != bitset2->blist.bits[i])
+		{
+			return  ANTLR3_FALSE;
+		}
+    }
+
+    // Now make sure the bits of the larger set are all turned
+    // off.
+    //
+    if	(bitset1->blist.length > (ANTLR3_UINT32)minimum)
+    {
+		for (i = minimum ; (ANTLR3_UINT32)i < bitset1->blist.length; i++)
+		{
+			if	(bitset1->blist.bits[i] != 0)
+			{
+				return	ANTLR3_FALSE;
+			}
+		}
+    }
+    else if (bitset2->blist.length > (ANTLR3_UINT32)minimum)
+    {
+		for (i = minimum; (ANTLR3_UINT32)i < bitset2->blist.length; i++)
+		{
+			if	(bitset2->blist.bits[i] != 0)
+			{
+				return	ANTLR3_FALSE;
+			}
+		}
+    }
+
+    return  ANTLR3_TRUE;
+}
+
+static ANTLR3_BOOLEAN
+antlr3BitsetMember(pANTLR3_BITSET bitset, ANTLR3_UINT32 bit)
+{
+    ANTLR3_UINT32    wordNo;
+
+    wordNo  = wordNumber(bit);
+
+    if	(wordNo >= bitset->blist.length)
+    {
+		return	ANTLR3_FALSE;
+    }
+    
+    if	((bitset->blist.bits[wordNo] & bitMask(bit)) == 0)
+    {
+		return	ANTLR3_FALSE;
+    }
+    else
+    {
+		return	ANTLR3_TRUE;
+    }
+}
+
+static void
+antlr3BitsetRemove(pANTLR3_BITSET bitset, ANTLR3_UINT32 bit)
+{
+    ANTLR3_UINT32    wordNo;
+
+    wordNo  = wordNumber(bit);
+
+    if	(wordNo < bitset->blist.length)
+    {
+		bitset->blist.bits[wordNo] &= ~(bitMask(bit));
+    }
+}
+static ANTLR3_BOOLEAN
+antlr3BitsetIsNil(pANTLR3_BITSET bitset)
+{
+   ANTLR3_INT32    i;
+
+   for	(i = bitset->blist.length -1; i>= 0; i--)
+   {
+       if   (bitset->blist.bits[i] != 0)
+       {
+			return ANTLR3_FALSE;
+       }
+   }
+   
+   return   ANTLR3_TRUE;
+}
+
+static ANTLR3_UINT32
+numWordsToHold(ANTLR3_UINT32 bit)
+{
+    return  (bit >> ANTLR3_BITSET_LOG_BITS) + 1;
+}
+
+static	ANTLR3_UINT32
+wordNumber(ANTLR3_UINT32 bit)
+{
+    return  bit >> ANTLR3_BITSET_LOG_BITS;
+}
+
+static ANTLR3_UINT32
+antlr3BitsetNumBits(pANTLR3_BITSET bitset)
+{
+    return  bitset->blist.length << ANTLR3_BITSET_LOG_BITS;
+}
+
+/** Produce an integer list of all the bits that are turned on
+ *  in this bitset. Used for error processing in the main as the bitset
+ *  reresents a number of integer tokens which we use for follow sets
+ *  and so on.
+ *
+ *  The first entry is the number of elements following in the list.
+ */
+static	pANTLR3_INT32	
+antlr3BitsetToIntList	(pANTLR3_BITSET bitset)
+{
+    ANTLR3_UINT32   numInts;	    // How many integers we will need
+    ANTLR3_UINT32   numBits;	    // How many bits are in the set
+    ANTLR3_UINT32   i;
+    ANTLR3_UINT32   index;
+
+    pANTLR3_INT32  intList;
+
+    numInts = bitset->size(bitset) + 1;
+    numBits = bitset->numBits(bitset);
+ 
+    intList = (pANTLR3_INT32)ANTLR3_MALLOC(numInts * sizeof(ANTLR3_INT32));
+
+    if	(intList == NULL)
+    {
+		return NULL;	// Out of memory
+    }
+
+    intList[0] = numInts;
+
+    // Enumerate the bits that are turned on
+    //
+    for	(i = 0, index = 1; i<numBits; i++)
+    {
+		if  (bitset->isMember(bitset, i) == ANTLR3_TRUE)
+		{
+			intList[index++]    = i;
+		}
+    }
+
+    // Result set
+    //
+    return  intList;
+}
+
diff --git a/runtime/C/src/antlr3collections.c b/runtime/C/src/antlr3collections.c
new file mode 100644
index 0000000..9d79edd
--- /dev/null
+++ b/runtime/C/src/antlr3collections.c
@@ -0,0 +1,2794 @@
+/// \file
+/// Provides a number of useful functions that are roughly equivalent
+/// to java HashTable and List for the purposes of Antlr 3 C runtime.
+/// Also useable by the C programmer for things like symbol tables pointers
+/// and so on.
+///
+///
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3.h>
+
+#include "antlr3collections.h"
+
+// Interface functions for hash table
+//
+
+// String based keys
+//
+static void					antlr3HashDelete    (pANTLR3_HASH_TABLE table, void * key);
+static void *				antlr3HashGet	(pANTLR3_HASH_TABLE table, void * key);
+static pANTLR3_HASH_ENTRY   antlr3HashRemove    (pANTLR3_HASH_TABLE table, void * key);
+static ANTLR3_INT32			antlr3HashPut	(pANTLR3_HASH_TABLE table, void * key, void * element, void (ANTLR3_CDECL *freeptr)(void *));
+
+// Integer based keys (Lists and so on)
+//
+static void					antlr3HashDeleteI   (pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key);
+static void *				antlr3HashGetI	(pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key);
+static pANTLR3_HASH_ENTRY   antlr3HashRemoveI   (pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key);
+static ANTLR3_INT32			antlr3HashPutI	(pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key, void * element, void (ANTLR3_CDECL *freeptr)(void *));
+
+static void					antlr3HashFree	(pANTLR3_HASH_TABLE table);
+static ANTLR3_UINT32	    antlr3HashSize	(pANTLR3_HASH_TABLE table);
+
+// -----------
+
+// Interface functions for enumeration
+//
+static int	    antlr3EnumNext	    (pANTLR3_HASH_ENUM en, pANTLR3_HASH_KEY * key, void ** data);
+static void	    antlr3EnumFree	    (pANTLR3_HASH_ENUM en);
+
+// Interface functions for List
+//
+static void				antlr3ListFree	(pANTLR3_LIST list);
+static void				antlr3ListDelete(pANTLR3_LIST list, ANTLR3_INTKEY key);
+static void *			antlr3ListGet	(pANTLR3_LIST list, ANTLR3_INTKEY key);
+static ANTLR3_INT32		antlr3ListPut	(pANTLR3_LIST list, ANTLR3_INTKEY key, void * element, void (ANTLR3_CDECL *freeptr)(void *));
+static ANTLR3_INT32		antlr3ListAdd   (pANTLR3_LIST list, void * element, void (ANTLR3_CDECL *freeptr)(void *));
+static void *			antlr3ListRemove(pANTLR3_LIST list, ANTLR3_INTKEY key);
+static ANTLR3_UINT32	antlr3ListSize	(pANTLR3_LIST list);
+
+// Interface functions for Stack
+//
+static void				antlr3StackFree	(pANTLR3_STACK  stack);
+static void *			antlr3StackPop	(pANTLR3_STACK	stack);
+static void *			antlr3StackGet	(pANTLR3_STACK	stack, ANTLR3_INTKEY key);
+static ANTLR3_BOOLEAN	antlr3StackPush	(pANTLR3_STACK	stack, void * element, void (ANTLR3_CDECL *freeptr)(void *));
+static ANTLR3_UINT32	antlr3StackSize	(pANTLR3_STACK	stack);
+static void *			antlr3StackPeek	(pANTLR3_STACK	stack);
+
+// Interface functions for vectors
+//
+static	void ANTLR3_CDECL	antlr3VectorFree	(pANTLR3_VECTOR vector);
+static	void				antlr3VectorDel		(pANTLR3_VECTOR vector, ANTLR3_UINT32 entry);
+static	void *				antlr3VectorGet		(pANTLR3_VECTOR vector, ANTLR3_UINT32 entry);
+static	void *				antrl3VectorRemove	(pANTLR3_VECTOR vector, ANTLR3_UINT32 entry);
+static	void				antlr3VectorClear	(pANTLR3_VECTOR vector);
+static	ANTLR3_UINT32		antlr3VectorAdd		(pANTLR3_VECTOR vector, void * element, void (ANTLR3_CDECL *freeptr)(void *));
+static	ANTLR3_UINT32		antlr3VectorSet		(pANTLR3_VECTOR vector, ANTLR3_UINT32 entry, void * element, void (ANTLR3_CDECL *freeptr)(void *), ANTLR3_BOOLEAN freeExisting);
+static	ANTLR3_UINT32		antlr3VectorSize    (pANTLR3_VECTOR vector);
+static	ANTLR3_BOOLEAN      antlr3VectorSwap	(pANTLR3_VECTOR vector, ANTLR3_UINT32 entry1, ANTLR3_UINT32 entry2);
+
+static  ANTLR3_BOOLEAN      newPool             (pANTLR3_VECTOR_FACTORY factory);
+static  void				closeVectorFactory  (pANTLR3_VECTOR_FACTORY factory);
+static	pANTLR3_VECTOR		newVector			(pANTLR3_VECTOR_FACTORY factory);
+static	void				returnVector		(pANTLR3_VECTOR_FACTORY factory, pANTLR3_VECTOR vector);
+
+
+// Interface functions for int TRIE
+//
+static	pANTLR3_TRIE_ENTRY	intTrieGet		(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key);
+static	ANTLR3_BOOLEAN		intTrieDel		(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key);
+static	ANTLR3_BOOLEAN		intTrieAdd		(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key, ANTLR3_UINT32 type, ANTLR3_INTKEY intType, void * data, void (ANTLR3_CDECL *freeptr)(void *));
+static	void				intTrieFree		(pANTLR3_INT_TRIE trie);
+
+
+// Interface functions for topological sorter
+//
+static  void            addEdge          (pANTLR3_TOPO topo, ANTLR3_UINT32 edge, ANTLR3_UINT32 dependency);
+static  pANTLR3_UINT32  sortToArray      (pANTLR3_TOPO topo);
+static  void            sortVector       (pANTLR3_TOPO topo, pANTLR3_VECTOR v);
+static  void            freeTopo         (pANTLR3_TOPO topo);
+
+// Local function to advance enumeration structure pointers
+//
+static void antlr3EnumNextEntry(pANTLR3_HASH_ENUM en);
+
+pANTLR3_HASH_TABLE
+antlr3HashTableNew(ANTLR3_UINT32 sizeHint)
+{
+	// All we have to do is create the hashtable tracking structure
+	// and allocate memory for the requested number of buckets.
+	//
+	pANTLR3_HASH_TABLE	table;
+
+	ANTLR3_UINT32	bucket;	// Used to traverse the buckets
+
+	table   = (pANTLR3_HASH_TABLE)ANTLR3_MALLOC(sizeof(ANTLR3_HASH_TABLE));
+
+	// Error out if no memory left
+	if	(table	== NULL)
+	{
+		return	NULL;
+	}
+
+	// Allocate memory for the buckets
+	//
+	table->buckets = (pANTLR3_HASH_BUCKET) ANTLR3_MALLOC((size_t) (sizeof(ANTLR3_HASH_BUCKET) * sizeHint)); 
+
+	if	(table->buckets == NULL)
+	{
+		ANTLR3_FREE((void *)table);
+		return	NULL;
+	}
+
+	// Modulo of the table, (bucket count).
+	//
+	table->modulo   = sizeHint;
+
+	table->count    = 0;	    /* Nothing in there yet ( I hope)	*/
+
+	/* Initialize the buckets to empty
+	*/
+	for	(bucket = 0; bucket < sizeHint; bucket++)
+	{
+		table->buckets[bucket].entries = NULL;
+	}
+
+	/* Exclude duplicate entries by default
+	*/
+	table->allowDups	= ANTLR3_FALSE;
+
+    /* Assume that keys should by strduped before they are
+     * entered in the table.
+     */
+    table->doStrdup     = ANTLR3_TRUE;
+
+	/* Install the interface
+	*/
+
+	table->get		=  antlr3HashGet;
+	table->put		=  antlr3HashPut;
+	table->del		=  antlr3HashDelete;
+	table->remove	=  antlr3HashRemove;
+
+	table->getI		=  antlr3HashGetI;
+	table->putI		=  antlr3HashPutI;
+	table->delI		=  antlr3HashDeleteI;
+	table->removeI	=  antlr3HashRemoveI;
+
+	table->size		=  antlr3HashSize;
+	table->free		=  antlr3HashFree;
+
+	return  table;
+}
+
+static void
+antlr3HashFree(pANTLR3_HASH_TABLE table)
+{
+    ANTLR3_UINT32	bucket;	/* Used to traverse the buckets	*/
+
+    pANTLR3_HASH_BUCKET	thisBucket;
+    pANTLR3_HASH_ENTRY	entry;
+    pANTLR3_HASH_ENTRY	nextEntry;
+
+    /* Free the table, all buckets and all entries, and all the
+     * keys and data (if the table exists)
+     */
+    if	(table	!= NULL)
+    {
+	for	(bucket = 0; bucket < table->modulo; bucket++)
+	{
+	    thisBucket	= &(table->buckets[bucket]);
+
+	    /* Allow sparse tables, though we don't create them as such at present
+	     */
+	    if	( thisBucket != NULL)
+	    {
+		entry	= thisBucket->entries;
+
+		/* Search all entries in the bucket and free them up
+		 */
+		while	(entry != NULL)
+		{
+		    /* Save next entry - we do not want to access memory in entry after we
+		     * have freed it.
+		     */
+		    nextEntry	= entry->nextEntry;
+
+		    /* Free any data pointer, this only happens if the user supplied
+		     * a pointer to a routine that knwos how to free the structure they
+		     * added to the table.
+		     */
+		    if	(entry->free != NULL)
+		    {
+			entry->free(entry->data);
+		    }
+
+		    /* Free the key memory - we know that we allocated this
+		     */
+		    if	(entry->keybase.type == ANTLR3_HASH_TYPE_STR && entry->keybase.key.sKey != NULL)
+		    {
+			ANTLR3_FREE(entry->keybase.key.sKey);
+		    }
+
+		    /* Free this entry
+		     */
+		    ANTLR3_FREE(entry);
+		    entry   = nextEntry;    /* Load next pointer to see if we shoud free it */
+		}
+		/* Invalidate the current pointer
+		 */
+		thisBucket->entries = NULL;
+	    }
+	}
+
+	/* Now we can free the bucket memory
+	 */
+	ANTLR3_FREE(table->buckets);
+    }
+
+    /* Now we free teh memory for the table itself
+     */
+    ANTLR3_FREE(table);
+}
+
+/** return the current size of the hash table
+ */
+static ANTLR3_UINT32	antlr3HashSize	    (pANTLR3_HASH_TABLE table)
+{
+    return  table->count;
+}
+
+/** Remove a numeric keyed entry from a hash table if it exists,
+ *  no error if it does not exist.
+ */
+static pANTLR3_HASH_ENTRY   antlr3HashRemoveI   (pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key)
+{
+    ANTLR3_UINT32	    hash;
+    pANTLR3_HASH_BUCKET	    bucket;
+    pANTLR3_HASH_ENTRY	    entry;
+    pANTLR3_HASH_ENTRY	    * nextPointer;
+
+    /* First we need to know the hash of the provided key
+     */
+    hash    = (ANTLR3_UINT32)(key % (ANTLR3_INTKEY)(table->modulo));
+
+    /* Knowing the hash, we can find the bucket
+     */
+    bucket  = table->buckets + hash;
+
+    /* Now, we traverse the entries in the bucket until
+     * we find the key or the end of the entries in the bucket. 
+     * We track the element prior to the one we are examining
+     * as we need to set its next pointer to the next pointer
+     * of the entry we are deleting (if we find it).
+     */
+    entry	    =   bucket->entries;    /* Entry to examine					    */
+    nextPointer	    = & bucket->entries;    /* Where to put the next pointer of the deleted entry   */
+
+    while   (entry != NULL)
+    {
+	/* See if this is the entry we wish to delete
+	 */
+	if  (entry->keybase.key.iKey == key)
+	{
+	    /* It was the correct entry, so we set the next pointer
+	     * of the previous entry to the next pointer of this
+	     * located one, which takes it out of the chain.
+	     */
+	    (*nextPointer)		= entry->nextEntry;
+
+	    table->count--;
+
+	    return entry;
+	}
+	else
+	{
+	    /* We found an entry but it wasn't the one that was wanted, so
+	     * move to the next one, if any.
+	     */
+	    nextPointer	= & (entry->nextEntry);	    /* Address of the next pointer in the current entry	    */
+	    entry	= entry->nextEntry;	    /* Address of the next element in the bucket (if any)   */
+	}
+    }
+
+    return NULL;  /* Not found */
+}
+
+/** Remove the element in the hash table for a particular
+ *  key value, if it exists - no error if it does not.
+ */
+static pANTLR3_HASH_ENTRY
+antlr3HashRemove(pANTLR3_HASH_TABLE table, void * key)
+{
+    ANTLR3_UINT32	    hash;
+    pANTLR3_HASH_BUCKET	    bucket;
+    pANTLR3_HASH_ENTRY	    entry;
+    pANTLR3_HASH_ENTRY	    * nextPointer;
+
+    /* First we need to know the hash of the provided key
+     */
+    hash    = antlr3Hash(key, (ANTLR3_UINT32)strlen((const char *)key));
+
+    /* Knowing the hash, we can find the bucket
+     */
+    bucket  = table->buckets + (hash % table->modulo);
+
+    /* Now, we traverse the entries in the bucket until
+     * we find the key or the end of the entires in the bucket. 
+     * We track the element prior to the one we are exmaining
+     * as we need to set its next pointer to the next pointer
+     * of the entry we are deleting (if we find it).
+     */
+    entry	    =   bucket->entries;    /* Entry to examine					    */
+    nextPointer	    = & bucket->entries;    /* Where to put the next pointer of the deleted entry   */
+
+    while   (entry != NULL)
+    {
+	/* See if this is the entry we wish to delete
+	 */
+	if  (strcmp((const char *)key, (const char *)entry->keybase.key.sKey) == 0)
+	{
+	    /* It was the correct entry, so we set the next pointer
+	     * of the previous entry to the next pointer of this
+	     * located one, which takes it out of the chain.
+	     */
+	    (*nextPointer)		= entry->nextEntry;
+
+	    /* Release the key - if we allocated that
+	     */
+        if (table->doStrdup == ANTLR3_TRUE)
+        {
+            ANTLR3_FREE(entry->keybase.key.sKey);
+        }
+	    entry->keybase.key.sKey	= NULL;
+
+	    table->count--;
+
+	    return entry;
+	}
+	else
+	{
+	    /* We found an entry but it wasn't the one that was wanted, so
+	     * move to the next one, if any.
+	     */
+	    nextPointer	= & (entry->nextEntry);	    /* Address of the next pointer in the current entry	    */
+	    entry	= entry->nextEntry;	    /* Address of the next element in the bucket (if any)   */
+	}
+    }
+
+    return NULL;  /* Not found */
+}
+
+/** Takes the element with the supplied key out of the list, and deletes the data
+ *  calling the supplied free() routine if any. 
+ */
+static void
+antlr3HashDeleteI    (pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key)
+{
+    pANTLR3_HASH_ENTRY	entry;
+
+    entry = antlr3HashRemoveI(table, key);
+	
+    /* Now we can free the elements and the entry in order
+     */
+    if	(entry != NULL && entry->free != NULL)
+    {
+	/* Call programmer supplied function to release this entry data
+	 */
+	entry->free(entry->data);
+	entry->data = NULL;
+    }
+    /* Finally release the space for this entry block.
+     */
+    ANTLR3_FREE(entry);
+}
+
+/** Takes the element with the supplied key out of the list, and deletes the data
+ *  calling the supplied free() routine if any. 
+ */
+static void
+antlr3HashDelete    (pANTLR3_HASH_TABLE table, void * key)
+{
+    pANTLR3_HASH_ENTRY	entry;
+
+    entry = antlr3HashRemove(table, key);
+	
+    /* Now we can free the elements and the entry in order
+     */
+    if	(entry != NULL && entry->free != NULL)
+    {
+	/* Call programmer supplied function to release this entry data
+	 */
+	entry->free(entry->data);
+	entry->data = NULL;
+    }
+    /* Finally release the space for this entry block.
+     */
+    ANTLR3_FREE(entry);
+}
+
+/** Return the element pointer in the hash table for a particular
+ *  key value, or NULL if it don't exist (or was itself NULL).
+ */
+static void *
+antlr3HashGetI(pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key)
+{
+    ANTLR3_UINT32	    hash;
+    pANTLR3_HASH_BUCKET	    bucket;
+    pANTLR3_HASH_ENTRY	    entry;
+
+    /* First we need to know the hash of the provided key
+     */
+    hash    = (ANTLR3_UINT32)(key % (ANTLR3_INTKEY)(table->modulo));
+
+    /* Knowing the hash, we can find the bucket
+     */
+    bucket  = table->buckets + hash;
+
+    /* Now we can inspect the key at each entry in the bucket
+     * and see if we have a match.
+     */
+    entry   = bucket->entries;
+
+    while   (entry != NULL)
+    {
+	if  (entry->keybase.key.iKey == key)
+	{
+	    /* Match was found, return the data pointer for this entry
+	     */
+	    return  entry->data;
+	}
+	entry = entry->nextEntry;
+    }
+
+    /* If we got here, then we did not find the key
+     */
+    return  NULL;
+}
+
+/** Return the element pointer in the hash table for a particular
+ *  key value, or NULL if it don't exist (or was itself NULL).
+ */
+static void *
+antlr3HashGet(pANTLR3_HASH_TABLE table, void * key)
+{
+    ANTLR3_UINT32	    hash;
+    pANTLR3_HASH_BUCKET	    bucket;
+    pANTLR3_HASH_ENTRY	    entry;
+
+
+    /* First we need to know the hash of the provided key
+     */
+    hash    = antlr3Hash(key, (ANTLR3_UINT32)strlen((const char *)key));
+
+    /* Knowing the hash, we can find the bucket
+     */
+    bucket  = table->buckets + (hash % table->modulo);
+
+    /* Now we can inspect the key at each entry in the bucket
+     * and see if we have a match.
+     */
+    entry   = bucket->entries;
+
+    while   (entry != NULL)
+    {
+	if  (strcmp((const char *)key, (const char *)entry->keybase.key.sKey) == 0)
+	{
+	    /* Match was found, return the data pointer for this entry
+	     */
+	    return  entry->data;
+	}
+	entry = entry->nextEntry;
+    }
+
+    /* If we got here, then we did not find the key
+     */
+    return  NULL;
+}
+
+/** Add the element pointer in to the table, based upon the 
+ *  hash of the provided key.
+ */
+static	ANTLR3_INT32
+antlr3HashPutI(pANTLR3_HASH_TABLE table, ANTLR3_INTKEY key, void * element, void (ANTLR3_CDECL *freeptr)(void *))
+{
+	ANTLR3_UINT32	    hash;
+	pANTLR3_HASH_BUCKET	    bucket;
+	pANTLR3_HASH_ENTRY	    entry;
+	pANTLR3_HASH_ENTRY	    * newPointer;
+
+	/* First we need to know the hash of the provided key
+	*/
+	hash    = (ANTLR3_UINT32)(key % (ANTLR3_INTKEY)(table->modulo));
+
+	/* Knowing the hash, we can find the bucket
+	*/
+	bucket  = table->buckets + hash;
+
+	/* Knowing the bucket, we can traverse the entries until we
+	* we find a NULL pointer or we find that this is already 
+	* in the table and duplicates were not allowed.
+	*/
+	newPointer	= &bucket->entries;
+
+	while   (*newPointer !=  NULL)
+	{
+		/* The value at new pointer is pointing to an existing entry.
+		* If duplicates are allowed then we don't care what it is, but
+		* must reject this add if the key is the same as the one we are
+		* supplied with.
+		*/
+		if  (table->allowDups == ANTLR3_FALSE)
+		{
+			if	((*newPointer)->keybase.key.iKey == key)
+			{
+				return	ANTLR3_ERR_HASHDUP;
+			}
+		}
+
+		/* Point to the next entry pointer of the current entry we
+		* are traversing, if it is NULL we will create our new
+		* structure and point this to it.
+		*/
+		newPointer = &((*newPointer)->nextEntry);
+	}
+
+	/* newPointer is now pointing at the pointer where we need to
+	* add our new entry, so let's crate the entry and add it in.
+	*/
+	entry   = (pANTLR3_HASH_ENTRY)ANTLR3_MALLOC((size_t)sizeof(ANTLR3_HASH_ENTRY));
+
+	if	(entry == NULL)
+	{
+		return	ANTLR3_ERR_NOMEM;
+	}
+
+	entry->data			= element;		/* Install the data element supplied			*/
+	entry->free			= freeptr;		/* Function that knows how to release the entry		*/
+	entry->keybase.type		= ANTLR3_HASH_TYPE_INT;	/* Indicate the key type stored here for when we free	*/
+	entry->keybase.key.iKey	= key;			/* Record the key value					*/
+	entry->nextEntry		= NULL;			/* Ensure that the forward pointer ends the chain	*/
+
+	*newPointer	= entry;    /* Install the next entry in this bucket	*/
+
+	table->count++;
+
+	return  ANTLR3_SUCCESS;
+}
+
+
+/** Add the element pointer in to the table, based upon the 
+ *  hash of the provided key.
+ */
+static	ANTLR3_INT32
+antlr3HashPut(pANTLR3_HASH_TABLE table, void * key, void * element, void (ANTLR3_CDECL *freeptr)(void *))
+{
+	ANTLR3_UINT32	    hash;
+	pANTLR3_HASH_BUCKET	    bucket;
+	pANTLR3_HASH_ENTRY	    entry;
+	pANTLR3_HASH_ENTRY	    * newPointer;
+
+	/* First we need to know the hash of the provided key
+	*/
+	hash    = antlr3Hash(key, (ANTLR3_UINT32)strlen((const char *)key));
+
+	/* Knowing the hash, we can find the bucket
+	*/
+	bucket  = table->buckets + (hash % table->modulo);
+
+	/* Knowign the bucket, we can traverse the entries until we
+	* we find a NULL pointer ofr we find that this is already 
+	* in the table and duplicates were not allowed.
+	*/
+	newPointer	= &bucket->entries;
+
+	while   (*newPointer !=  NULL)
+	{
+		/* The value at new pointer is pointing to an existing entry.
+		* If duplicates are allowed then we don't care what it is, but
+		* must reject this add if the key is the same as the one we are
+		* supplied with.
+		*/
+		if  (table->allowDups == ANTLR3_FALSE)
+		{
+			if	(strcmp((const char*) key, (const char *)(*newPointer)->keybase.key.sKey) == 0)
+			{
+				return	ANTLR3_ERR_HASHDUP;
+			}
+		}
+
+		/* Point to the next entry pointer of the current entry we
+		* are traversing, if it is NULL we will create our new
+		* structure and point this to it.
+		*/
+		newPointer = &((*newPointer)->nextEntry);
+	}
+
+	/* newPointer is now poiting at the pointer where we need to
+	* add our new entry, so let's crate the entry and add it in.
+	*/
+	entry   = (pANTLR3_HASH_ENTRY)ANTLR3_MALLOC((size_t)sizeof(ANTLR3_HASH_ENTRY));
+
+	if	(entry == NULL)
+	{
+		return	ANTLR3_ERR_NOMEM;
+	}
+
+	entry->data			= element;					/* Install the data element supplied				*/
+	entry->free			= freeptr;					/* Function that knows how to release the entry	    */
+	entry->keybase.type	= ANTLR3_HASH_TYPE_STR;     /* Indicate the key type stored here for free()	    */
+    if  (table->doStrdup == ANTLR3_TRUE)
+    {
+        entry->keybase.key.sKey	= ANTLR3_STRDUP(key);	/* Record the key value								*/
+    }
+    else
+    {
+        entry->keybase.key.sKey	= (pANTLR3_UINT8)key;                  /* Record the key value								*/
+    }
+	entry->nextEntry		= NULL;					/* Ensure that the forward pointer ends the chain   */
+
+	*newPointer	= entry;    /* Install the next entry in this bucket	*/
+
+	table->count++;
+
+	return  ANTLR3_SUCCESS;
+}
+
+/** \brief Creates an enumeration structure to traverse the hash table.
+ *
+ * \param table Table to enumerate
+ * \return Pointer to enumeration structure.
+ */
+pANTLR3_HASH_ENUM
+antlr3EnumNew	(pANTLR3_HASH_TABLE table)
+{
+    pANTLR3_HASH_ENUM	en;
+
+    /* Allocate structure memory
+     */
+    en    = (pANTLR3_HASH_ENUM) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_HASH_ENUM));
+
+    /* Check that the allocation was good 
+     */
+    if	(en == NULL)
+    {
+	return	(pANTLR3_HASH_ENUM) ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
+    }
+    
+    /* Initialize the start pointers
+    */
+    en->table	= table;
+    en->bucket	= 0;				/* First bucket		    */
+    en->entry	= en->table->buckets->entries;	/* First entry to return    */
+
+    /* Special case in that the first bucket may not have anything in it
+     * but the antlr3EnumNext() function expects that the en->entry is
+     * set to the next valid pointer. Hence if it is not a valid element
+     * pointer, attempt to find the next one that is, (table may be empty
+     * of course.
+     */
+    if	(en->entry == NULL)
+    {
+	antlr3EnumNextEntry(en);
+    }
+
+    /* Install the interface
+     */
+    en->free	=  antlr3EnumFree;
+    en->next	=  antlr3EnumNext;
+
+    /* All is good
+     */
+    return  en;
+}
+
+/** \brief Return the next entry in the hashtable being traversed by the supplied
+ *         enumeration.
+ *
+ * \param[in] en Pointer to the enumeration tracking structure
+ * \param key	 Pointer to void pointer, where the key pointer is returned.
+ * \param data	 Pointer to void pointer where the data pointer is returned.
+ * \return 
+ *	- ANTLR3_SUCCESS if there was a next key
+ *	- ANTLR3_FAIL	 if there were no more keys
+ *
+ * \remark
+ *  No checking of input structure is performed!
+ */
+static int
+antlr3EnumNext	(pANTLR3_HASH_ENUM en, pANTLR3_HASH_KEY * key, void ** data)
+{
+    /* If the current entry is valid, then use it
+     */
+    if  (en->bucket >= en->table->modulo)
+    {
+        /* Already exhausted the table
+         */
+        return	ANTLR3_FAIL;
+    }
+
+    /* Pointers are already set to the current entry to return, or
+     * we would not be at this point in the logic flow.
+     */
+    *key	= &(en->entry->keybase);
+    *data	= en->entry->data;
+
+    /* Return pointers are set up, so now we move the element
+     * pointer to the next in the table (if any).
+     */
+    antlr3EnumNextEntry(en);
+
+    return	ANTLR3_SUCCESS;
+}
+
+/** \brief Local function to advance the entry pointer of an enumeration 
+ * structure to the next valid entry (if there is one).
+ *
+ * \param[in] enum Pointer to ANTLR3 enumeration structure returned by antlr3EnumNew()
+ *
+ * \remark
+ *   - The function always leaves the pointers pointing at a valid entry if there
+ *     is one, so if the entry pointer is NULL when this function exits, there were
+ *     no more entries in the table.
+ */
+static void
+antlr3EnumNextEntry(pANTLR3_HASH_ENUM en)
+{
+    pANTLR3_HASH_BUCKET	bucket;
+
+    /* See if the current entry pointer is valid first of all
+     */
+    if	(en->entry != NULL)
+    {
+	/* Current entry was a valid point, see if there is another
+	 * one in the chain.
+	 */
+	if  (en->entry->nextEntry != NULL)
+	{
+	    /* Next entry in the enumeration is just the next entry
+	     * in the chain.
+	     */
+	    en->entry = en->entry->nextEntry;
+	    return;
+	}
+    }
+
+    /* There were no more entries in the current bucket, if there are
+     * more buckets then chase them until we find an entry.
+     */
+    en->bucket++;
+
+    while   (en->bucket < en->table->modulo)
+    {
+	/* There was one more bucket, see if it has any elements in it
+	 */
+	bucket	= en->table->buckets + en->bucket;
+
+	if  (bucket->entries != NULL)
+	{
+	    /* There was an entry in this bucket, so we can use it
+	     * for the next entry in the enumeration.
+	     */
+	    en->entry	= bucket->entries;
+	    return;
+	}
+
+	/* There was nothing in the bucket we just examined, move to the
+	 * next one.
+	 */
+	en->bucket++;
+    }
+
+    /* Here we have exhausted all buckets and the enumeration pointer will 
+     * have its bucket count = table->modulo which signifies that we are done.
+     */
+}
+
+/** \brief Frees up the memory structures that represent a hash table
+ *  enumeration.
+ * \param[in] enum Pointer to ANTLR3 enumeration structure returned by antlr3EnumNew()
+ */
+static void
+antlr3EnumFree	(pANTLR3_HASH_ENUM en)
+{
+    /* Nothing to check, we just free it.
+     */
+    ANTLR3_FREE(en);
+}
+
+/** Given an input key of arbitrary length, return a hash value of
+ *  it. This can then be used (with suitable modulo) to index other
+ *  structures.
+ */
+ANTLR3_API ANTLR3_UINT32
+antlr3Hash(void * key, ANTLR3_UINT32 keylen)
+{
+    /* Accumulate the hash value of the key
+     */
+    ANTLR3_UINT32   hash;
+    pANTLR3_UINT8   keyPtr;
+    ANTLR3_UINT32   i1;
+
+    hash    = 0;
+    keyPtr  = (pANTLR3_UINT8) key;
+
+    /* Iterate the key and accumulate the hash
+     */
+    while(keylen > 0)
+    {
+	hash = (hash << 4) + (*(keyPtr++));
+
+	if ((i1=hash&0xf0000000) != 0)
+	{
+		hash = hash ^ (i1 >> 24);
+		hash = hash ^ i1;
+	}
+	keylen--;
+    }
+
+    return  hash;
+}
+
+ANTLR3_API  pANTLR3_LIST
+antlr3ListNew	(ANTLR3_UINT32 sizeHint)
+{
+    pANTLR3_LIST    list;
+
+    /* Allocate memory
+     */
+    list    = (pANTLR3_LIST)ANTLR3_MALLOC((size_t)sizeof(ANTLR3_LIST));
+
+    if	(list == NULL)
+    {
+	return	(pANTLR3_LIST)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
+    }
+
+    /* Now we need to add a new table
+     */
+    list->table	= antlr3HashTableNew(sizeHint);
+
+    if	(list->table == (pANTLR3_HASH_TABLE)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM))
+    {
+	return	(pANTLR3_LIST)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
+    }
+
+    /* Allocation was good, install interface
+     */
+    list->free	    =  antlr3ListFree;
+    list->del	    =  antlr3ListDelete;
+    list->get	    =  antlr3ListGet;
+    list->add	    =  antlr3ListAdd;
+    list->remove    =  antlr3ListRemove;
+    list->put	    =  antlr3ListPut;
+    list->size	    =  antlr3ListSize;
+
+    return  list;
+}
+
+static ANTLR3_UINT32	antlr3ListSize	    (pANTLR3_LIST list)
+{
+    return  list->table->size(list->table);
+}
+
+static void
+antlr3ListFree	(pANTLR3_LIST list)
+{
+    /* Free the hashtable that stores the list
+     */
+    list->table->free(list->table);
+
+    /* Free the allocation for the list itself
+     */
+    ANTLR3_FREE(list);
+}
+
+static void
+antlr3ListDelete    (pANTLR3_LIST list, ANTLR3_INTKEY key)
+{
+    list->table->delI(list->table, key);
+}
+
+static void *
+antlr3ListGet	    (pANTLR3_LIST list, ANTLR3_INTKEY key)
+{
+    return list->table->getI(list->table, key);
+}
+
+/** Add the supplied element to the list, at the next available key
+ */
+static ANTLR3_INT32	antlr3ListAdd   (pANTLR3_LIST list, void * element, void (ANTLR3_CDECL *freeptr)(void *))
+{
+    ANTLR3_INTKEY   key;
+
+    key	    = list->table->size(list->table) + 1;
+    return list->put(list, key, element, freeptr);
+}
+
+/** Remove from the list, but don't free the element, just send it back to the
+ *  caller.
+ */
+static	void *
+antlr3ListRemove	    (pANTLR3_LIST list, ANTLR3_INTKEY key)
+{
+    pANTLR3_HASH_ENTRY	    entry;
+
+    entry = list->table->removeI(list->table, key);
+
+    if	(entry != NULL)
+    {
+        return  entry->data;
+    }
+    else
+    {
+	return	NULL;
+    }
+}
+
+static	ANTLR3_INT32
+antlr3ListPut	    (pANTLR3_LIST list, ANTLR3_INTKEY key, void * element, void (ANTLR3_CDECL *freeptr)(void *))
+{
+    return  list->table->putI(list->table, key, element, freeptr);
+}
+
+ANTLR3_API  pANTLR3_STACK
+antlr3StackNew	(ANTLR3_UINT32 sizeHint)
+{
+    pANTLR3_STACK   stack;
+
+    /* Allocate memory
+     */
+    stack    = (pANTLR3_STACK)ANTLR3_MALLOC((size_t)sizeof(ANTLR3_STACK));
+
+    if	(stack == NULL)
+    {
+	return	(pANTLR3_STACK)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
+    }
+
+    /* Now we need to add a new table
+     */
+    stack->vector   = antlr3VectorNew(sizeHint);
+    stack->top	    = NULL;
+
+    if	(stack->vector == (pANTLR3_VECTOR)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM))
+    {
+	return	(pANTLR3_STACK)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
+    }
+
+    /* Looks good, now add the interface
+     */
+    stack->get	=  antlr3StackGet;
+    stack->free	=  antlr3StackFree;
+    stack->pop	=  antlr3StackPop;
+    stack->push	=  antlr3StackPush;
+    stack->size	=  antlr3StackSize;
+    stack->peek	=  antlr3StackPeek;
+
+    return  stack;
+}
+
+static ANTLR3_UINT32	antlr3StackSize	    (pANTLR3_STACK stack)
+{
+    return  stack->vector->count;
+}
+
+
+static void
+antlr3StackFree	(pANTLR3_STACK  stack)
+{
+    /* Free the list that supports the stack
+     */
+    stack->vector->free(stack->vector);
+    stack->vector   = NULL;
+    stack->top	    = NULL;
+
+    ANTLR3_FREE(stack);
+}
+
+static void *
+antlr3StackPop	(pANTLR3_STACK	stack)
+{
+    // Delete the element that is currently at the top of the stack
+    //
+    stack->vector->del(stack->vector, stack->vector->count - 1);
+
+    // And get the element that is the now the top of the stack (if anything)
+    // NOTE! This is not quite like a 'real' stack, which would normally return you
+    // the current top of the stack, then remove it from the stack.
+    // TODO: Review this, it is correct for follow sets which is what this was done for
+    //       but is not as obvious when using it as a 'real'stack.
+    //
+    stack->top = stack->vector->get(stack->vector, stack->vector->count - 1);
+    return stack->top;
+}
+
+static void *
+antlr3StackGet	(pANTLR3_STACK stack, ANTLR3_INTKEY key)
+{
+    return  stack->vector->get(stack->vector, (ANTLR3_UINT32)key);
+}
+
+static void *
+antlr3StackPeek	(pANTLR3_STACK	stack)
+{
+    return  stack->top;
+}
+
+static ANTLR3_BOOLEAN 
+antlr3StackPush	(pANTLR3_STACK stack, void * element, void (ANTLR3_CDECL *freeptr)(void *))
+{
+    stack->top	= element;
+    return (ANTLR3_BOOLEAN)(stack->vector->add(stack->vector, element, freeptr));
+}
+
+ANTLR3_API  pANTLR3_VECTOR
+antlr3VectorNew	(ANTLR3_UINT32 sizeHint)
+{
+	pANTLR3_VECTOR  vector;
+
+
+	// Allocate memory for the vector structure itself
+	//
+	vector  = (pANTLR3_VECTOR) ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_VECTOR)));
+
+	if	(vector == NULL)
+	{
+		return	(pANTLR3_VECTOR)ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
+	}
+
+	// Now fill in the defaults
+	//
+    antlr3SetVectorApi(vector, sizeHint);
+
+	// And everything is hunky dory
+	//
+	return  vector;
+}
+
+ANTLR3_API void
+antlr3SetVectorApi  (pANTLR3_VECTOR vector, ANTLR3_UINT32 sizeHint)
+{
+    ANTLR3_UINT32   initialSize;
+
+    // Allow vectors to be guessed by ourselves, so input size can be zero
+    //
+    if	(sizeHint > ANTLR3_VECTOR_INTERNAL_SIZE)
+    {
+        initialSize = sizeHint;
+    }
+    else
+    {
+        initialSize = ANTLR3_VECTOR_INTERNAL_SIZE;
+    }
+
+    if  (sizeHint > ANTLR3_VECTOR_INTERNAL_SIZE)
+    {
+        vector->elements	= (pANTLR3_VECTOR_ELEMENT)ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_VECTOR_ELEMENT) * initialSize));
+    }
+    else
+    {
+        vector->elements    = vector->internal;
+    }
+
+    if	(vector->elements == NULL)
+    {
+        ANTLR3_FREE(vector);
+        return;
+    }
+
+    // Memory allocated successfully
+    //
+    vector->count			= 0;			// No entries yet of course
+    vector->elementsSize    = initialSize;  // Available entries
+
+    // Now we can install the API
+    //
+    vector->add	    = antlr3VectorAdd;
+    vector->del	    = antlr3VectorDel;
+    vector->get	    = antlr3VectorGet;
+    vector->free    = antlr3VectorFree;
+    vector->set	    = antlr3VectorSet;
+    vector->remove  = antrl3VectorRemove;
+    vector->clear   = antlr3VectorClear;
+    vector->size    = antlr3VectorSize;
+    vector->swap    = antlr3VectorSwap;
+
+    // Assume that this is not a factory made vector
+    //
+    vector->factoryMade	= ANTLR3_FALSE;
+}
+
+// Clear the entries in a vector.
+// Clearing the vector leaves its capacity the same but
+// it walks the entries first to see if any of them
+// have a free routine that must be called.
+//
+static	void				
+antlr3VectorClear	(pANTLR3_VECTOR vector)
+{
+	ANTLR3_UINT32   entry;
+
+	// We must traverse every entry in the vector and if it has
+	// a pointer to a free function then we call it with the
+	// the entry pointer
+	//
+	for	(entry = 0; entry < vector->count; entry++)
+	{
+		if  (vector->elements[entry].freeptr != NULL)
+		{
+			vector->elements[entry].freeptr(vector->elements[entry].element);
+		}
+		vector->elements[entry].freeptr    = NULL;
+		vector->elements[entry].element    = NULL;
+	}
+
+	// Having called any free pointers, we just reset the entry count
+	// back to zero.
+	//
+	vector->count	= 0;
+}
+
+static	
+void	ANTLR3_CDECL	antlr3VectorFree    (pANTLR3_VECTOR vector)
+{
+	ANTLR3_UINT32   entry;
+
+	// We must traverse every entry in the vector and if it has
+	// a pointer to a free function then we call it with the
+	// the entry pointer
+	//
+	for	(entry = 0; entry < vector->count; entry++)
+	{
+		if  (vector->elements[entry].freeptr != NULL)
+		{
+			vector->elements[entry].freeptr(vector->elements[entry].element);
+		}
+		vector->elements[entry].freeptr    = NULL;
+		vector->elements[entry].element    = NULL;
+	}
+
+	if	(vector->factoryMade == ANTLR3_FALSE)
+	{
+		// The entries are freed, so free the element allocation
+		//
+        if  (vector->elementsSize > ANTLR3_VECTOR_INTERNAL_SIZE)
+        {
+            ANTLR3_FREE(vector->elements);
+        }
+		vector->elements = NULL;
+
+		// Finally, free the allocation for the vector itself
+		//
+		ANTLR3_FREE(vector);
+	}
+}
+
+static	void		antlr3VectorDel	    (pANTLR3_VECTOR vector, ANTLR3_UINT32 entry)
+{
+	// Check this is a valid request first
+	//
+	if	(entry >= vector->count)
+	{
+		return;
+	}
+
+	// Valid request, check for free pointer and call it if present
+	//
+	if	(vector->elements[entry].freeptr != NULL)
+	{
+		vector->elements[entry].freeptr(vector->elements[entry].element);
+		vector->elements[entry].freeptr    = NULL;
+	}
+
+	if	(entry == vector->count - 1)
+	{
+		// Ensure the pointer is never reused by accident, but otherwise just 
+		// decrement the pointer.
+		//
+		vector->elements[entry].element    = NULL;
+	}
+	else
+	{
+		// Need to shuffle trailing pointers back over the deleted entry
+		//
+		ANTLR3_MEMMOVE(vector->elements + entry, vector->elements + entry + 1, sizeof(ANTLR3_VECTOR_ELEMENT) * (vector->count - entry - 1));
+	}
+
+	// One less entry in the vector now
+	//
+	vector->count--;
+}
+
+static	void *		antlr3VectorGet     (pANTLR3_VECTOR vector, ANTLR3_UINT32 entry)
+{
+	// Ensure this is a valid request
+	//
+	if	(entry < vector->count)
+	{
+		return	vector->elements[entry].element;
+	}
+	else
+	{
+		// I know nothing, Mr. Fawlty!
+		//
+		return	NULL;
+	}
+}
+
+/// Remove the entry from the vector, but do not free any entry, even if it has
+/// a free pointer.
+///
+static	void *		antrl3VectorRemove  (pANTLR3_VECTOR vector, ANTLR3_UINT32 entry)
+{
+	void * element;
+
+	// Check this is a valid request first 
+	//
+	if	(entry >= vector->count)
+	{
+		return NULL;
+	}
+
+	// Valid request, return the sorted pointer
+	//
+
+	element				    = vector->elements[entry].element;
+
+	if	(entry == vector->count - 1)
+	{
+		// Ensure the pointer is never reused by accident, but otherwise just 
+		// decrement the pointer.
+		///
+		vector->elements[entry].element    = NULL;
+		vector->elements[entry].freeptr    = NULL;
+	}
+	else
+	{
+		// Need to shuffle trailing pointers back over the deleted entry
+		//
+		ANTLR3_MEMMOVE(vector->elements + entry, vector->elements + entry + 1, sizeof(ANTLR3_VECTOR_ELEMENT) * (vector->count - entry - 1));
+	}
+
+	// One less entry in the vector now
+	//
+	vector->count--;
+
+	return  element;
+}
+
+static  ANTLR3_BOOLEAN
+antlr3VectorResize  (pANTLR3_VECTOR vector, ANTLR3_UINT32 hint)
+{
+	ANTLR3_UINT32	newSize;
+
+	// Need to resize the element pointers. We double the allocation
+	// we already have unless asked for a specific increase.
+    //
+    if (hint == 0 || hint < vector->elementsSize)
+    {
+        newSize = vector->elementsSize * 2;
+    }
+    else
+    {
+        newSize = hint * 2;
+    }
+
+    // Now we know how many we need, so we see if we have just expanded
+    // past the built in vector elements or were already past that
+    //
+    if  (vector->elementsSize > ANTLR3_VECTOR_INTERNAL_SIZE)
+    {
+        // We were already larger than the internal size, so we just
+        // use realloc so that the pointers are copied for us
+        //
+		pANTLR3_VECTOR_ELEMENT newElements = (pANTLR3_VECTOR_ELEMENT)ANTLR3_REALLOC(vector->elements, (sizeof(ANTLR3_VECTOR_ELEMENT)* newSize));
+		if (newElements == NULL)
+		{
+			// realloc failed, but the old allocation is still there
+			return ANTLR3_FALSE;
+		}
+        vector->elements = newElements;
+    }
+    else
+    {
+        // The current size was less than or equal to the internal array size and as we always start
+        // with a size that is at least the maximum internal size, then we must need to allocate new memory
+        // for external pointers. We don't want to take the time to calculate if a requested element
+        // is part of the internal or external entries, so we copy the internal ones to the new space
+        //
+        vector->elements	= (pANTLR3_VECTOR_ELEMENT)ANTLR3_MALLOC((sizeof(ANTLR3_VECTOR_ELEMENT)* newSize));
+		if (vector->elements == NULL)
+		{
+			// malloc failed
+			return ANTLR3_FALSE;
+		}
+        ANTLR3_MEMCPY(vector->elements, vector->internal, ANTLR3_VECTOR_INTERNAL_SIZE * sizeof(ANTLR3_VECTOR_ELEMENT));
+    }
+
+	vector->elementsSize	= newSize;
+	return ANTLR3_TRUE;
+}
+
+/// Add the supplied pointer and freeing function pointer to the list,
+/// expanding the vector if needed.
+///
+static	ANTLR3_UINT32    antlr3VectorAdd	    (pANTLR3_VECTOR vector, void * element, void (ANTLR3_CDECL *freeptr)(void *))
+{
+	// Do we need to resize the vector table?
+	//
+	if	(vector->count == vector->elementsSize)
+	{
+		// Give no hint, we let it add 1024 or double it
+		if (!antlr3VectorResize(vector, 0))
+		{
+			// Resize failed
+			return 0;
+		}
+	}
+
+	// Insert the new entry
+	//
+	vector->elements[vector->count].element	= element;
+	vector->elements[vector->count].freeptr	= freeptr;
+
+	vector->count++;	    // One more element counted
+
+	return  (ANTLR3_UINT32)(vector->count);
+
+}
+
+/// Replace the element at the specified entry point with the supplied
+/// entry.
+///
+static	ANTLR3_UINT32    
+antlr3VectorSet	    (pANTLR3_VECTOR vector, ANTLR3_UINT32 entry, void * element, void (ANTLR3_CDECL *freeptr)(void *), ANTLR3_BOOLEAN freeExisting)
+{
+
+	// If the vector is currently not big enough, then we expand it
+	//
+	if (entry >= vector->elementsSize)
+	{
+		// We will get at least this many
+		if (!antlr3VectorResize(vector, entry))
+		{
+			// Resize failed
+			return 0;
+		}
+	}
+
+	// Valid request, replace the current one, freeing any prior entry if told to
+	//
+	if	(		entry < vector->count						// If actually replacing an element
+			&&	freeExisting								// And told to free any existing element
+			&&	vector->elements[entry].freeptr != NULL		// And the existing element has a free pointer
+		)
+	{
+		vector->elements[entry].freeptr(vector->elements[entry].element);
+	}
+
+	// Install the new pointers
+	//
+	vector->elements[entry].freeptr	= freeptr;
+	vector->elements[entry].element	= element;
+
+	if (entry >= vector->count)
+	{
+		vector->count = entry + 1;
+	}
+	return  (ANTLR3_UINT32)(entry);	    // Indicates the replacement was successful
+
+}
+
+/// Replace the element at the specified entry point with the supplied
+/// entry.
+///
+static	ANTLR3_BOOLEAN
+antlr3VectorSwap	    (pANTLR3_VECTOR vector, ANTLR3_UINT32 entry1, ANTLR3_UINT32 entry2)
+{
+
+    void               * tempEntry;
+    void (ANTLR3_CDECL *freeptr)(void *);
+
+	// If the vector is currently not big enough, then we do nothing
+	//
+	if (entry1 >= vector->elementsSize || entry2 >= vector->elementsSize)
+	{
+        return ANTLR3_FALSE;
+	}
+
+	// Valid request, swap them
+	//
+    tempEntry   = vector->elements[entry1].element;
+    freeptr     = vector->elements[entry1].freeptr;
+
+	// Install the new pointers
+	//
+    vector->elements[entry1].freeptr	= vector->elements[entry2].freeptr;
+	vector->elements[entry1].element	= vector->elements[entry2].element;
+
+	vector->elements[entry2].freeptr	= freeptr;
+	vector->elements[entry2].element	= tempEntry;
+
+	return  ANTLR3_TRUE;
+
+}
+
+static	ANTLR3_UINT32   antlr3VectorSize    (pANTLR3_VECTOR vector)
+{
+    return  vector->count;
+}
+
+#ifdef ANTLR3_WINDOWS
+#pragma warning	(push)
+#pragma warning (disable : 4100)
+#endif
+/// Vector factory creation
+///
+ANTLR3_API pANTLR3_VECTOR_FACTORY
+antlr3VectorFactoryNew	    (ANTLR3_UINT32 sizeHint)
+{
+	pANTLR3_VECTOR_FACTORY  factory;
+
+	// Allocate memory for the factory
+	//
+	factory = (pANTLR3_VECTOR_FACTORY)ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_VECTOR_FACTORY)));
+
+	if	(factory == NULL)
+	{
+		return	NULL;
+	}
+
+	// Factory memory is good, so create a new vector pool
+	//
+    factory->pools      = NULL;
+    factory->thisPool   = -1;
+
+    newPool(factory);
+
+    // Initialize the API, ignore the hint as this algorithm does
+    // a better job really.
+    //
+    antlr3SetVectorApi(&(factory->unTruc), ANTLR3_VECTOR_INTERNAL_SIZE);
+    
+    factory->unTruc.factoryMade = ANTLR3_TRUE;
+
+	// Install the factory API
+	//
+	factory->close			= closeVectorFactory;
+	factory->newVector		= newVector;
+	factory->returnVector	= returnVector;
+
+	// Create a stack to accumulate reusable vectors
+	//
+	factory->freeStack		= antlr3StackNew(16);
+	return  factory;
+}
+#ifdef ANTLR3_WINDOWS
+#pragma warning	(pop)
+#endif
+
+static	void				
+returnVector		(pANTLR3_VECTOR_FACTORY factory, pANTLR3_VECTOR vector)
+{
+	// First we need to clear out anything that is still in the vector
+	//
+	vector->clear(vector);
+
+	// We have a free stack available so we can add the vector we were
+	// given into the free chain. The vector has to have come from this
+	// factory, so we already know how to release its memory when it
+	// dies by virtue of the factory being closed.
+	//
+	factory->freeStack->push(factory->freeStack, vector, NULL);
+
+	// TODO: remove this line once happy printf("Returned vector %08X to the pool, stack size is %d\n", vector, factory->freeStack->size(factory->freeStack));
+}
+
+static ANTLR3_BOOLEAN
+newPool(pANTLR3_VECTOR_FACTORY factory)
+{
+	pANTLR3_VECTOR *newPools;
+
+    /* Increment factory count
+     */
+    ++factory->thisPool;
+
+    /* Ensure we have enough pointers allocated
+     */
+	newPools = (pANTLR3_VECTOR *)
+		ANTLR3_REALLOC(	(void *)factory->pools,	    /* Current pools pointer (starts at NULL)	*/
+					(ANTLR3_UINT32)((factory->thisPool + 1) * sizeof(pANTLR3_VECTOR *))	/* Memory for new pool pointers */
+					);
+	if (newPools == NULL)
+	{
+		// realloc failed, but we still have the old allocation
+		--factory->thisPool;
+		return ANTLR3_FALSE;
+	}
+	factory->pools = newPools;
+
+    /* Allocate a new pool for the factory
+     */
+    factory->pools[factory->thisPool]	=
+			    (pANTLR3_VECTOR)
+				ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_VECTOR) * ANTLR3_FACTORY_VPOOL_SIZE));
+	if (factory->pools[factory->thisPool] == NULL)
+	{
+		// malloc failed
+		--factory->thisPool;
+		return ANTLR3_FALSE;
+	}
+
+
+    /* Reset the counters
+     */
+    factory->nextVector	= 0;
+
+    /* Done
+     */
+    return ANTLR3_TRUE;
+}
+
+static  void		
+closeVectorFactory  (pANTLR3_VECTOR_FACTORY factory)
+{
+    pANTLR3_VECTOR      pool;
+    ANTLR3_INT32        poolCount;
+    ANTLR3_UINT32       limit;
+    ANTLR3_UINT32       vector;
+    pANTLR3_VECTOR      check;
+
+	// First see if we have a free chain stack to release?
+	//
+	if	(factory->freeStack != NULL)
+	{
+		factory->freeStack->free(factory->freeStack);
+	}
+
+    /* We iterate the vector pools one at a time
+     */
+    for (poolCount = 0; poolCount <= factory->thisPool; poolCount++)
+    {
+        /* Pointer to current pool
+         */
+        pool = factory->pools[poolCount];
+
+        /* Work out how many tokens we need to check in this pool.
+         */
+        limit = (poolCount == factory->thisPool ? factory->nextVector : ANTLR3_FACTORY_VPOOL_SIZE);
+
+        /* Marginal condition, we might be at the start of a brand new pool
+         * where the nextToken is 0 and nothing has been allocated.
+         */
+        if (limit > 0)
+        {
+            /* We have some vectors allocated from this pool
+             */
+            for (vector = 0; vector < limit; vector++)
+            {
+                /* Next one in the chain
+                 */
+                check = pool + vector;
+
+                // Call the free function on each of the vectors in the pool,
+                // which in turn will cause any elements it holds that also have a free
+                // pointer to be freed. However, because any vector may be in any other
+                // vector, we don't free the element allocations yet. We do that in a
+                // a specific pass, coming up next. The vector free function knows that
+                // this is a factory allocated pool vector and so it won't free things it
+                // should not.
+                //
+                check->free(check);
+            }
+        }
+    }
+
+    /* We iterate the vector pools one at a time once again, but this time
+     * we are going to free up any allocated element pointers. Note that we are doing this
+     * so that we do not try to release vectors twice. When building ASTs we just copy
+     * the vectors all over the place and they may be embedded in this vector pool
+     * numerous times.
+     */
+    for (poolCount = 0; poolCount <= factory->thisPool; poolCount++)
+    {
+        /* Pointer to current pool
+         */
+        pool = factory->pools[poolCount];
+
+        /* Work out how many tokens we need to check in this pool.
+         */
+        limit = (poolCount == factory->thisPool ? factory->nextVector : ANTLR3_FACTORY_VPOOL_SIZE);
+
+        /* Marginal condition, we might be at the start of a brand new pool
+         * where the nextToken is 0 and nothing has been allocated.
+         */
+        if (limit > 0)
+        {
+            /* We have some vectors allocated from this pool
+             */
+            for (vector = 0; vector < limit; vector++)
+            {
+                /* Next one in the chain
+                 */
+                check = pool + vector;
+
+                // Anything in here should be factory made, but we do this just
+                // to triple check. We just free up the elements if they were
+                // allocated beyond the internal size.
+                //
+                if (check->factoryMade == ANTLR3_TRUE && check->elementsSize > ANTLR3_VECTOR_INTERNAL_SIZE)
+                {
+                    ANTLR3_FREE(check->elements);
+                    check->elements = NULL;
+                }
+            }
+        }
+
+        // We can now free this pool allocation as we have called free on every element in every vector
+        // and freed any memory for pointers the grew beyond the internal size limit.
+        //
+        ANTLR3_FREE(factory->pools[poolCount]);
+        factory->pools[poolCount] = NULL;
+    }
+
+    /* All the pools are deallocated we can free the pointers to the pools
+     * now.
+     */
+    ANTLR3_FREE(factory->pools);
+
+    /* Finally, we can free the space for the factory itself
+     */
+    ANTLR3_FREE(factory);
+
+}
+
+static pANTLR3_VECTOR
+newVector(pANTLR3_VECTOR_FACTORY factory)
+{
+    pANTLR3_VECTOR vector;
+
+	// If we have anything on the re claim stack, reuse it
+	//
+	vector = (pANTLR3_VECTOR)factory->freeStack->peek(factory->freeStack);
+
+	if  (vector != NULL)
+	{
+		// Cool we got something we could reuse
+		//
+		factory->freeStack->pop(factory->freeStack);
+
+		// TODO: remove this line once happy printf("Reused vector %08X from stack, size is now %d\n", vector, factory->freeStack->size(factory->freeStack));
+		return vector;
+
+	}
+
+	// See if we need a new vector pool before allocating a new
+    // one
+    //
+    if (factory->nextVector >= ANTLR3_FACTORY_VPOOL_SIZE)
+    {
+        // We ran out of vectors in the current pool, so we need a new pool
+        //
+        if (!newPool(factory))
+		{
+			// new pool creation failed
+			return NULL;
+		}
+    }
+
+    // Assuming everything went well (we are trying for performance here so doing minimal
+    // error checking. Then we can work out what the pointer is to the next vector.
+    //
+    vector = factory->pools[factory->thisPool] + factory->nextVector;
+    factory->nextVector++;
+
+    // We have our token pointer now, so we can initialize it to the predefined model.
+    //
+    antlr3SetVectorApi(vector, ANTLR3_VECTOR_INTERNAL_SIZE);
+    vector->factoryMade = ANTLR3_TRUE;
+
+    // We know that the pool vectors are created at the default size, which means they
+    // will start off using their internal entry pointers. We must initialize our pool vector
+    // to point to its own internal entry table and not the pre-made one.
+    //
+    vector->elements = vector->internal;
+
+		// TODO: remove this line once happy printf("Used a new vector at %08X from the pools as nothing on the reusue stack\n", vector);
+
+    // And we are done
+    //
+    return vector;
+}
+
+/** Array of left most significant bit positions for an 8 bit
+ *  element provides an efficient way to find the highest bit
+ *  that is set in an n byte value (n>0). Assuming the values will all hit the data cache,
+ *  coding without conditional elements should allow branch
+ *  prediction to work well and of course a parallel instruction cache
+ *  will whip through this. Otherwise we must loop shifting a one
+ *  bit and masking. The values we tend to be placing in out integer
+ *  patricia trie are usually a lot lower than the 64 bits we
+ *  allow for the key allows. Hence there is a lot of redundant looping and
+ *  shifting in a while loop. Whereas, the lookup table is just
+ *  a few ands and indirect lookups, while testing for 0. This
+ *  is likely to be done in parallel on many processors available
+ *  when I wrote this. If this code survives as long as yacc, then
+ *  I may already be dead by the time you read this and maybe there is
+ *  a single machine instruction to perform the operation. What
+ *  else are you going to do with all those transistors? Jim 2007
+ *
+ * The table is probably obvious but it is just the number 0..7
+ * of the MSB in each integer value 0..256
+ */
+static ANTLR3_UINT8 bitIndex[256] = 
+{ 
+    0,													// 0 - Just for padding
+    0,													// 1
+    1, 1,												// 2..3
+    2, 2, 2, 2,											// 4..7
+    3, 3, 3, 3, 3, 3, 3, 3,								// 8+
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,	    // 16+
+    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,	    // 32+
+	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,	    
+    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,	    // 64+
+	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 
+    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,	    // 128+
+	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 
+	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+};
+
+/** Rather than use the bit index of a trie node to shift
+ *  0x01 left that many times, then & with the result, it is
+ *  faster to use the bit index as an index into this table
+ *  which holds precomputed masks for any of the 64 bits
+ *  we need to mask off singly. The data values will stay in
+ *  cache while ever a trie is in heavy use, such as in
+ *  memoization. It is also pretty enough to be ASCII art.
+ */
+static ANTLR3_UINT64 bitMask[64] = 
+{
+    0x0000000000000001ULL, 0x0000000000000002ULL, 0x0000000000000004ULL, 0x0000000000000008ULL,
+    0x0000000000000010ULL, 0x0000000000000020ULL, 0x0000000000000040ULL, 0x0000000000000080ULL,
+    0x0000000000000100ULL, 0x0000000000000200ULL, 0x0000000000000400ULL, 0x0000000000000800ULL,
+    0x0000000000001000ULL, 0x0000000000002000ULL, 0x0000000000004000ULL, 0x0000000000008000ULL,
+    0x0000000000010000ULL, 0x0000000000020000ULL, 0x0000000000040000ULL, 0x0000000000080000ULL,
+    0x0000000000100000ULL, 0x0000000000200000ULL, 0x0000000000400000ULL, 0x0000000000800000ULL,
+    0x0000000001000000ULL, 0x0000000002000000ULL, 0x0000000004000000ULL, 0x0000000008000000ULL,
+    0x0000000010000000ULL, 0x0000000020000000ULL, 0x0000000040000000ULL, 0x0000000080000000ULL,
+    0x0000000100000000ULL, 0x0000000200000000ULL, 0x0000000400000000ULL, 0x0000000800000000ULL,
+    0x0000001000000000ULL, 0x0000002000000000ULL, 0x0000004000000000ULL, 0x0000008000000000ULL,
+    0x0000010000000000ULL, 0x0000020000000000ULL, 0x0000040000000000ULL, 0x0000080000000000ULL,
+    0x0000100000000000ULL, 0x0000200000000000ULL, 0x0000400000000000ULL, 0x0000800000000000ULL,
+    0x0001000000000000ULL, 0x0002000000000000ULL, 0x0004000000000000ULL, 0x0008000000000000ULL,
+    0x0010000000000000ULL, 0x0020000000000000ULL, 0x0040000000000000ULL, 0x0080000000000000ULL,
+    0x0100000000000000ULL, 0x0200000000000000ULL, 0x0400000000000000ULL, 0x0800000000000000ULL,
+    0x1000000000000000ULL, 0x2000000000000000ULL, 0x4000000000000000ULL, 0x8000000000000000ULL
+};
+
+/* INT TRIE Implementation of depth 64 bits, being the number of bits
+ * in a 64 bit integer. 
+ */
+
+pANTLR3_INT_TRIE
+antlr3IntTrieNew(ANTLR3_UINT32 depth)
+{
+	pANTLR3_INT_TRIE	trie;
+
+	trie    = (pANTLR3_INT_TRIE) ANTLR3_CALLOC(1, sizeof(ANTLR3_INT_TRIE));	/* Base memory required	*/
+
+	if (trie == NULL)
+	{
+		return	(pANTLR3_INT_TRIE) ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
+	}
+
+	/* Now we need to allocate the root node. This makes it easier
+	 * to use the tree as we don't have to do anything special 
+	 * for the root node.
+	 */
+	trie->root	= (pANTLR3_INT_TRIE_NODE) ANTLR3_CALLOC(1, sizeof(ANTLR3_INT_TRIE));
+
+	if (trie->root == NULL)
+	{
+		ANTLR3_FREE(trie);
+		return	(pANTLR3_INT_TRIE) ANTLR3_FUNC_PTR(ANTLR3_ERR_NOMEM);
+	}
+
+	trie->add	= intTrieAdd;
+	trie->del	= intTrieDel;
+	trie->free	= intTrieFree;
+	trie->get	= intTrieGet;
+
+	/* Now we seed the root node with the index being the
+	 * highest left most bit we want to test, which limits the
+	 * keys in the trie. This is the trie 'depth'. The limit for
+	 * this implementation is 63 (bits 0..63).
+	 */
+	trie->root->bitNum = depth;
+
+	/* And as we have nothing in here yet, we set both child pointers
+	 * of the root node to point back to itself.
+	 */
+	trie->root->leftN	= trie->root;
+	trie->root->rightN	= trie->root;
+	trie->count			= 0;
+
+	/* Finally, note that the key for this root node is 0 because
+	 * we use calloc() to initialise it.
+	 */
+
+	return trie;
+}
+
+/** Search the int Trie and return a pointer to the first bucket indexed
+ *  by the key if it is contained in the trie, otherwise NULL.
+ */
+static	pANTLR3_TRIE_ENTRY   
+intTrieGet	(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key)
+{
+	pANTLR3_INT_TRIE_NODE    thisNode; 
+	pANTLR3_INT_TRIE_NODE    nextNode; 
+
+	if (trie->count == 0)
+	{
+		return NULL;	    /* Nothing in this trie yet	*/
+	}
+	/* Starting at the root node in the trie, compare the bit index
+	 * of the current node with its next child node (starts left from root).
+	 * When the bit index of the child node is greater than the bit index of the current node
+	 * then by definition (as the bit index decreases as we descent the trie)
+	 * we have reached a 'backward' pointer. A backward pointer means we
+	 * have reached the only node that can be reached by the bits given us so far
+	 * and it must either be the key we are looking for, or if not then it
+	 * means the entry was not in the trie, and we return NULL. A backward pointer
+	 * points back in to the tree structure rather than down (deeper) within the
+	 * tree branches.
+	 */
+	thisNode	= trie->root;		/* Start at the root node		*/
+	nextNode	= thisNode->leftN;	/* Examine the left node from the root	*/
+
+	/* While we are descending the tree nodes...
+	 */
+	while (thisNode->bitNum > nextNode->bitNum)
+	{
+		/* Next node now becomes the new 'current' node
+		 */
+		thisNode    = nextNode;
+
+		/* We now test the bit indicated by the bitmap in the next node
+		 * in the key we are searching for. The new next node is the
+		 * right node if that bit is set and the left node it is not.
+		 */
+		if (key & bitMask[nextNode->bitNum])
+		{
+			nextNode = nextNode->rightN;	/* 1 is right	*/
+		}
+		else
+		{
+			nextNode = nextNode->leftN;		/* 0 is left	*/
+		}
+	}
+
+	/* Here we have reached a node where the bitMap index is lower than
+	 * its parent. This means it is pointing backward in the tree and
+	 * must therefore be a terminal node, being the only point than can
+	 * be reached with the bits seen so far. It is either the actual key
+	 * we wanted, or if that key is not in the trie it is another key
+	 * that is currently the only one that can be reached by those bits.
+	 * That situation would obviously change if the key was to be added
+	 * to the trie.
+	 *
+	 * Hence it only remains to test whether this is actually the key or not.
+	 */
+	if (nextNode->key == key)
+	{
+		/* This was the key, so return the entry pointer
+		 */
+		return	nextNode->buckets;
+	}
+	else
+	{
+		return	NULL;	/* That key is not in the trie (note that we set the pointer to -1 if no payload) */
+	}
+}
+
+
+static	ANTLR3_BOOLEAN		
+intTrieDel	(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key)
+{
+    pANTLR3_INT_TRIE_NODE   p;
+
+    p=trie->root;
+
+    return ANTLR3_FALSE;
+}
+
+/** Add an entry into the INT trie.
+ *  Basically we descend the trie as we do when searching it, which will
+ *  locate the only node in the trie that can be reached by the bit pattern of the
+ *  key. If the key is actually at that node, then if the trie accepts duplicates
+ *  we add the supplied data in a new chained bucket to that data node. If it does
+ *  not accept duplicates then we merely return FALSE in case the caller wants to know
+ *  whether the key was already in the trie.
+ *  If the node we locate is not the key we are looking to add, then we insert a new node
+ *  into the trie with a bit index of the leftmost differing bit and the left or right 
+ *  node pointing to itself or the data node we are inserting 'before'. 
+ */
+static	ANTLR3_BOOLEAN		
+intTrieAdd	(pANTLR3_INT_TRIE trie, ANTLR3_INTKEY key, ANTLR3_UINT32 type, ANTLR3_INTKEY intVal, void * data, void (ANTLR3_CDECL *freeptr)(void *))
+{
+	pANTLR3_INT_TRIE_NODE   thisNode;
+	pANTLR3_INT_TRIE_NODE   nextNode;
+	pANTLR3_INT_TRIE_NODE   entNode;
+	ANTLR3_UINT32			depth;
+	pANTLR3_TRIE_ENTRY	    newEnt;
+	pANTLR3_TRIE_ENTRY	    nextEnt;
+	ANTLR3_INTKEY		    xorKey;
+
+	/* Cache the bit depth of this trie, which is always the highest index, 
+	 * which is in the root node
+	 */
+	depth   = trie->root->bitNum;
+
+	thisNode	= trie->root;		/* Start with the root node	    */
+	nextNode	= trie->root->leftN;	/* And assume we start to the left  */
+
+	/* Now find the only node that can be currently reached by the bits in the
+	 * key we are being asked to insert.
+	 */
+	while (thisNode->bitNum  > nextNode->bitNum)
+	{
+		/* Still descending the structure, next node becomes current.
+		 */
+		thisNode = nextNode;
+
+		if (key & bitMask[nextNode->bitNum])
+		{
+			/* Bit at the required index was 1, so travers the right node from here
+			 */
+			nextNode = nextNode->rightN;
+		}
+		else
+		{
+			/* Bit at the required index was 0, so we traverse to the left
+			 */
+			nextNode = nextNode->leftN;
+		}
+	}
+	/* Here we have located the only node that can be reached by the
+	 * bits in the requested key. It could in fact be that key or the node
+	 * we need to use to insert the new key.
+	 */
+	if (nextNode->key == key)
+	{
+		/* We have located an exact match, but we will only append to the bucket chain
+		 * if this trie accepts duplicate keys.
+		 */
+		if (trie->allowDups ==ANTLR3_TRUE)
+		{
+			/* Yes, we are accepting duplicates
+			 */
+			newEnt = (pANTLR3_TRIE_ENTRY)ANTLR3_CALLOC(1, sizeof(ANTLR3_TRIE_ENTRY));
+
+			if (newEnt == NULL)
+			{
+				/* Out of memory, all we can do is return the fact that the insert failed.
+				 */
+				return	ANTLR3_FALSE;
+			}
+
+			/* Otherwise insert this in the chain
+			*/
+			newEnt->type	= type;
+			newEnt->freeptr	= freeptr;
+			if (type == ANTLR3_HASH_TYPE_STR)
+			{
+				newEnt->data.ptr = data;
+			}
+			else
+			{
+				newEnt->data.intVal = intVal;
+			}
+
+			/* We want to be able to traverse the stored elements in the order that they were
+			 * added as duplicate keys. We might need to revise this opinion if we end up having many duplicate keys
+			 * as perhaps reverse order is just as good, so long as it is ordered.
+			 */
+			nextEnt = nextNode->buckets;
+			while (nextEnt->next != NULL)
+			{
+				nextEnt = nextEnt->next;    
+			}
+			nextEnt->next = newEnt;
+
+			trie->count++;
+			return  ANTLR3_TRUE;
+		}
+		else
+		{
+			/* We found the key is already there and we are not allowed duplicates in this
+			 * trie.
+			 */
+			return  ANTLR3_FALSE;
+		}
+	}
+
+	/* Here we have discovered the only node that can be reached by the bits in the key
+	 * but we have found that this node is not the key we need to insert. We must find the
+	 * the leftmost bit by which the current key for that node and the new key we are going 
+	 * to insert, differ. While this nested series of ifs may look a bit strange, experimentation
+	 * showed that it allows a machine code path that works well with predicated execution
+	 */
+	xorKey = (key ^ nextNode->key);   /* Gives 1 bits only where they differ then we find the left most 1 bit*/
+
+	/* Most common case is a 32 bit key really
+	 */
+#ifdef	ANTLR3_USE_64BIT
+	if	(xorKey & 0xFFFFFFFF00000000)
+	{
+		if  (xorKey & 0xFFFF000000000000)
+		{
+			if	(xorKey & 0xFF00000000000000)
+			{
+				depth = 56 + bitIndex[((xorKey & 0xFF00000000000000)>>56)];
+			}
+			else
+			{
+				depth = 48 + bitIndex[((xorKey & 0x00FF000000000000)>>48)];
+			}
+		}
+		else
+		{
+			if	(xorKey & 0x0000FF0000000000)
+			{
+				depth = 40 + bitIndex[((xorKey & 0x0000FF0000000000)>>40)];
+			}
+			else
+			{
+				depth = 32 + bitIndex[((xorKey & 0x000000FF00000000)>>32)];
+			}
+		}
+	}
+	else
+#endif
+	{
+		if  (xorKey & 0x00000000FFFF0000)
+		{
+			if	(xorKey & 0x00000000FF000000)
+			{
+				depth = 24 + bitIndex[((xorKey & 0x00000000FF000000)>>24)];
+			}
+			else
+			{
+				depth = 16 + bitIndex[((xorKey & 0x0000000000FF0000)>>16)];
+			}
+		}
+		else
+		{
+			if	(xorKey & 0x000000000000FF00)
+			{
+				depth = 8 + bitIndex[((xorKey & 0x0000000000000FF00)>>8)];
+			}
+			else
+			{
+				depth = bitIndex[xorKey & 0x00000000000000FF];
+			}
+		}
+	}
+
+    /* We have located the leftmost differing bit, indicated by the depth variable. So, we know what
+     * bit index we are to insert the new entry at. There are two cases, being where the two keys
+     * differ at a bit position that is not currently part of the bit testing, where they differ on a bit
+     * that is currently being skipped in the indexed comparisons, and where they differ on a bit
+     * that is merely lower down in the current bit search. If the bit index went bit 4, bit 2 and they differ
+     * at bit 3, then we have the "skipped" bit case. But if that chain was Bit 4, Bit 2 and they differ at bit 1
+     * then we have the easy bit <pun>.
+     *
+     * So, set up to descend the tree again, but this time looking for the insert point
+     * according to whether we skip the bit that differs or not.
+     */
+    thisNode	= trie->root;
+    entNode	= trie->root->leftN;
+
+    /* Note the slight difference in the checks here to cover both cases
+     */
+    while (thisNode->bitNum > entNode->bitNum && entNode->bitNum > depth)
+    {
+	/* Still descending the structure, next node becomes current.
+	 */
+	thisNode = entNode;
+
+	if (key & bitMask[entNode->bitNum])
+	{
+	    /* Bit at the required index was 1, so traverse the right node from here
+	     */
+	    entNode = entNode->rightN;
+	}
+	else
+	{
+	    /* Bit at the required index was 0, so we traverse to the left
+	     */
+	    entNode = entNode->leftN;
+	}
+    }
+
+    /* We have located the correct insert point for this new key, so we need
+     * to allocate our entry and insert it etc.
+     */
+    nextNode	= (pANTLR3_INT_TRIE_NODE)ANTLR3_CALLOC(1, sizeof(ANTLR3_INT_TRIE_NODE));
+    if (nextNode == NULL)
+    {
+	/* All that work and no memory - bummer.
+	 */
+	return	ANTLR3_FALSE;
+    }
+
+    /* Build a new entry block for the new node
+     */
+    newEnt = (pANTLR3_TRIE_ENTRY)ANTLR3_CALLOC(1, sizeof(ANTLR3_TRIE_ENTRY));
+
+    if (newEnt == NULL)
+    {
+	/* Out of memory, all we can do is return the fact that the insert failed.
+	 */
+	return	ANTLR3_FALSE;
+    }
+
+    /* Otherwise enter this in our new node
+    */
+    newEnt->type	= type;
+    newEnt->freeptr	= freeptr;
+    if (type == ANTLR3_HASH_TYPE_STR)
+    {
+	newEnt->data.ptr = data;
+    }
+    else
+    {
+	newEnt->data.intVal = intVal;
+    }
+    /* Install it
+     */
+    nextNode->buckets	= newEnt;
+    nextNode->key	= key;
+    nextNode->bitNum	= depth;
+
+    /* Work out the right and left pointers for this new node, which involve
+     * terminating with the current found node either right or left according
+     * to whether the current index bit is 1 or 0
+     */
+    if (key & bitMask[depth])
+    {
+	nextNode->leftN	    = entNode;	    /* Terminates at previous position	*/
+	nextNode->rightN    = nextNode;	    /* Terminates with itself		*/
+    }
+    else
+    {
+	nextNode->rightN   = entNode;	    /* Terminates at previous position	*/
+	nextNode->leftN    = nextNode;	    /* Terminates with itself		*/		
+    }
+
+    /* Finally, we need to change the pointers at the node we located
+     * for inserting. If the key bit at its index is set then the right
+     * pointer for that node becomes the newly created node, otherwise the left 
+     * pointer does.
+     */
+    if (key & bitMask[thisNode->bitNum] )
+    {
+	thisNode->rightN    = nextNode;
+    }
+    else
+    {
+	thisNode->leftN	    = nextNode;
+    }
+
+    /* Et voila
+     */
+    trie->count++;
+    return  ANTLR3_TRUE;
+
+}
+/** Release memory allocated to this tree.
+ *  Basic algorithm is that we do a depth first left descent and free
+ *  up any nodes that are not backward pointers.
+ */
+static void
+freeIntNode(pANTLR3_INT_TRIE_NODE node)
+{
+    pANTLR3_TRIE_ENTRY	thisEntry;
+    pANTLR3_TRIE_ENTRY	nextEntry;
+
+    /* If this node has a left pointer that is not a back pointer
+     * then recursively call to free this
+     */
+    if (node->bitNum > node->leftN->bitNum)
+    {
+	/* We have a left node that needs descending, so do it.
+	 */
+	freeIntNode(node->leftN);
+    }
+
+    /* The left nodes from here should now be dealt with, so 
+     * we need to descend any right nodes that are not back pointers
+     */
+    if (node->bitNum > node->rightN->bitNum)
+    {
+	/* There are some right nodes to descend and deal with.
+	 */
+	freeIntNode(node->rightN);
+    }
+
+    /* Now all the children are dealt with, we can destroy
+     * this node too
+     */
+    thisEntry	= node->buckets;
+
+    while (thisEntry != NULL)
+    {
+	nextEntry   = thisEntry->next;
+
+	/* Do we need to call a custom free pointer for this string entry?
+	 */
+	if (thisEntry->type == ANTLR3_HASH_TYPE_STR && thisEntry->freeptr != NULL)
+	{
+	    thisEntry->freeptr(thisEntry->data.ptr);
+	}
+
+	/* Now free the data for this bucket entry
+	 */
+	ANTLR3_FREE(thisEntry);
+	thisEntry = nextEntry;	    /* See if there are any more to free    */
+    }
+
+    /* The bucket entry is now gone, so we can free the memory for
+     * the entry itself.
+     */
+    ANTLR3_FREE(node);
+
+    /* And that should be it for everything under this node and itself
+     */
+}
+
+/** Called to free all nodes and the structure itself.
+ */
+static	void			
+intTrieFree	(pANTLR3_INT_TRIE trie)
+{
+    /* Descend from the root and free all the nodes
+     */
+    freeIntNode(trie->root);
+
+    /* the nodes are all gone now, so we need only free the memory
+     * for the structure itself
+     */
+    ANTLR3_FREE(trie);
+}
+
+
+/**
+ * Allocate and initialize a new ANTLR3 topological sorter, which can be
+ * used to define edges that identify numerical node indexes that depend on other
+ * numerical node indexes, which can then be sorted topologically such that
+ * any node is sorted after all its dependent nodes.
+ *
+ * Use:
+ *
+ * /verbatim
+
+  pANTLR3_TOPO topo;
+  topo = antlr3NewTopo();
+
+  if (topo == NULL) { out of memory }
+
+  topo->addEdge(topo, 3, 0); // Node 3 depends on node 0
+  topo->addEdge(topo, 0, 1); // Node - depends on node 1
+  topo->sortVector(topo, myVector); // Sort the vector in place (node numbers are the vector entry numbers)
+
+ * /verbatim
+ */
+ANTLR3_API pANTLR3_TOPO
+antlr3TopoNew()
+{
+    pANTLR3_TOPO topo = (pANTLR3_TOPO)ANTLR3_MALLOC(sizeof(ANTLR3_TOPO));
+
+    if  (topo == NULL)
+    {
+        return NULL;
+    }
+
+    // Initialize variables
+    //
+
+    topo->visited   = NULL;                 // Don't know how big it is yet
+    topo->limit     = 1;                    // No edges added yet
+    topo->edges     = NULL;                 // No edges added yet
+    topo->sorted    = NULL;                 // Nothing sorted at the start
+    topo->cycle     = NULL;                 // No cycles at the start
+    topo->cycleMark = 0;                    // No cycles at the start
+    topo->hasCycle  = ANTLR3_FALSE;         // No cycle at the start
+    
+    // API
+    //
+    topo->addEdge       = addEdge;
+    topo->sortToArray   = sortToArray;
+    topo->sortVector    = sortVector;
+    topo->free          = freeTopo;
+
+    return topo;
+}
+// Topological sorter
+//
+static  void
+addEdge          (pANTLR3_TOPO topo, ANTLR3_UINT32 edge, ANTLR3_UINT32 dependency)
+{
+    ANTLR3_UINT32   i;
+    ANTLR3_UINT32   maxEdge;
+    pANTLR3_BITSET  edgeDeps;
+
+    if (edge>dependency)
+    {
+        maxEdge = edge;
+    }
+    else
+    {
+        maxEdge = dependency;
+    }
+    // We need to add an edge to says that the node indexed by 'edge' is
+    // dependent on the node indexed by 'dependency'
+    //
+
+    // First see if we have enough room in the edges array to add the edge?
+    //
+    if (topo->edges == NULL)
+    {
+        // We don't have any edges yet, so create an array to hold them
+        //
+        topo->edges = (pANTLR3_BITSET*)ANTLR3_CALLOC(sizeof(pANTLR3_BITSET) * (maxEdge + 1), 1);
+        if (topo->edges == NULL)
+        {
+            return;
+        }
+
+        // Set the limit to what we have now
+        //
+        topo->limit = maxEdge + 1;
+    }
+    else if (topo->limit <= maxEdge)
+    {
+        // WE have some edges but not enough
+        //
+        topo->edges = (pANTLR3_BITSET*)ANTLR3_REALLOC(topo->edges, sizeof(pANTLR3_BITSET) * (maxEdge + 1));
+        if (topo->edges == NULL)
+        {
+            return;
+        }
+
+        // Initialize the new bitmaps to ;indicate we have no edges defined yet
+        //
+        for (i = topo->limit; i <= maxEdge; i++)
+        {
+            *((topo->edges) + i) = NULL;
+        }
+
+        // Set the limit to what we have now
+        //
+        topo->limit = maxEdge + 1;
+    }
+
+    // If the edge was flagged as depending on itself, then we just
+    // do nothing as it means this routine was just called to add it
+    // in to the list of nodes.
+    //
+    if  (edge == dependency)
+    {
+        return;
+    }
+
+    // Pick up the bit map for the requested edge
+    //
+    edgeDeps = *((topo->edges) + edge);
+
+    if  (edgeDeps == NULL)
+    {
+        // No edges are defined yet for this node
+        //
+        edgeDeps                = antlr3BitsetNew(0);
+        *((topo->edges) + edge) = edgeDeps;
+        if (edgeDeps == NULL )
+        {
+            return;  // Out of memory
+        }
+    }
+
+    // Set the bit in the bitmap that corresponds to the requested
+    // dependency.
+    //
+    edgeDeps->add(edgeDeps, dependency);
+
+    // And we are all set
+    //
+    return;
+}
+
+
+/**
+ * Given a starting node, descend its dependent nodes (ones that it has edges
+ * to) until we find one without edges. Having found a node without edges, we have
+ * discovered the bottom of a depth first search, which we can then ascend, adding
+ * the nodes in order from the bottom, which gives us the dependency order.
+ */
+static void
+DFS(pANTLR3_TOPO topo, ANTLR3_UINT32 node)
+{
+    pANTLR3_BITSET edges;
+
+    // Guard against a revisit and check for cycles
+    //
+    if  (topo->hasCycle == ANTLR3_TRUE)
+    {
+        return; // We don't do anything else if we found a cycle
+    }
+
+    if  (topo->visited->isMember(topo->visited, node))
+    {
+        // Check to see if we found a cycle. To do this we search the
+        // current cycle stack and see if we find this node already in the stack.
+        //
+        ANTLR3_UINT32   i;
+
+        for (i=0; i<topo->cycleMark; i++)
+        {
+            if  (topo->cycle[i] == node)
+            {
+                // Stop! We found a cycle in the input, so rejig the cycle
+                // stack so that it only contains the cycle and set the cycle flag
+                // which will tell the caller what happened
+                //
+                ANTLR3_UINT32 l;
+
+                for (l = i; l < topo->cycleMark; l++)
+                {
+                    topo->cycle[l - i] = topo->cycle[l];    // Move to zero base in the cycle list
+                }
+
+                // Recalculate the limit
+                //
+                topo->cycleMark -= i;
+
+                // Signal disaster
+                //
+                topo->hasCycle = ANTLR3_TRUE;
+            }
+        }
+        return;
+    }
+
+    // So far, no cycles have been found and we have not visited this node yet,
+    // so this node needs to go into the cycle stack before we continue
+    // then we will take it out of the stack once we have descended all its
+    // dependencies.
+    //
+    topo->cycle[topo->cycleMark++] = node;
+
+    // First flag that we have visited this node
+    //
+    topo->visited->add(topo->visited, node);
+
+    // Now, if this node has edges, then we want to ensure we visit
+    // them all before we drop through and add this node into the sorted
+    // list.
+    //
+    edges = *((topo->edges) + node);
+    if  (edges != NULL)
+    {
+        // We have some edges, so visit each of the edge nodes
+        // that have not already been visited.
+        //
+        ANTLR3_UINT32   numBits;	    // How many bits are in the set
+        ANTLR3_UINT32   i;
+        ANTLR3_UINT32   range;
+
+        numBits = edges->numBits(edges);
+        range   = edges->size(edges);   // Number of set bits
+
+        // Stop if we exahust the bit list or have checked the
+        // number of edges that this node refers to (so we don't
+        // check bits at the end that cannot possibly be set).
+        //
+        for (i=0; i<= numBits && range > 0; i++)
+        {
+            if  (edges->isMember(edges, i))
+            {
+                range--;        // About to check another one
+
+                // Found an edge, make sure we visit and descend it
+                //
+                DFS(topo, i);
+            }
+        }
+    }
+
+    // At this point we will have visited all the dependencies
+    // of this node and they will be ordered (even if there are cycles)
+    // So we just add the node into the sorted list at the
+    // current index position.
+    //
+    topo->sorted[topo->limit++] = node;
+
+    // Remove this node from the cycle list if we have not detected a cycle
+    //
+    if  (topo->hasCycle == ANTLR3_FALSE)
+    {
+        topo->cycleMark--;
+    }
+
+    return;
+}
+
+static  pANTLR3_UINT32
+sortToArray      (pANTLR3_TOPO topo)
+{
+    ANTLR3_UINT32 v;
+    ANTLR3_UINT32 oldLimit;
+
+    // Guard against being called with no edges defined
+    //
+    if  (topo->edges == NULL)
+    {
+        return NULL;
+    }
+    // First we need a vector to populate with enough
+    // entries to accommodate the sorted list and another to accommodate
+    // the maximum cycle we could detect which is all nodes such as 0->1->2->3->0
+    //
+    topo->sorted    = (pANTLR3_UINT32)ANTLR3_MALLOC(topo->limit * sizeof(ANTLR3_UINT32));
+	if (topo->sorted == NULL)
+	{
+		return NULL;
+	}
+    topo->cycle     = (pANTLR3_UINT32)ANTLR3_MALLOC(topo->limit * sizeof(ANTLR3_UINT32));
+	if (topo->cycle == NULL)
+	{
+		return NULL;
+	}
+
+    // Next we need an empty bitset to show whether we have visited a node
+    // or not. This is the bit that gives us linear time of course as we are essentially
+    // dropping through the nodes in depth first order and when we get to a node that
+    // has no edges, we pop back up the stack adding the nodes we traversed in reverse
+    // order.
+    //
+    topo->visited   = antlr3BitsetNew(0);
+
+    // Now traverse the nodes as if we were just going left to right, but
+    // then descend each node unless it has already been visited.
+    //
+    oldLimit    = topo->limit;     // Number of nodes to traverse linearly
+    topo->limit = 0;               // Next entry in the sorted table
+
+    for (v = 0; v < oldLimit; v++)
+    {
+        // If we did not already visit this node, then descend it until we
+        // get a node without edges or arrive at a node we have already visited.
+        //
+        if  (topo->visited->isMember(topo->visited, v) == ANTLR3_FALSE)
+        {
+            // We have not visited this one so descend it
+            //
+            DFS(topo, v);
+        }
+
+        // Break the loop if we detect a cycle as we have no need to go any
+        // further
+        //
+        if  (topo->hasCycle == ANTLR3_TRUE)
+        {
+            break;
+        }
+    }
+
+    // Reset the limit to the number we recorded as if we hit a
+    // cycle, then limit will have stopped at the node where we
+    // discovered the cycle, but in order to free the edge bitmaps
+    // we need to know how many we may have allocated and traverse them all.
+    //
+    topo->limit = oldLimit;
+
+    // Having traversed all the nodes we were given, we
+    // are guaranteed to have ordered all the nodes or detected a
+    // cycle.
+    //
+    return topo->sorted;
+}
+
+static  void
+sortVector       (pANTLR3_TOPO topo, pANTLR3_VECTOR v)
+{
+    // To sort a vector, we first perform the
+    // sort to an array, then use the results to reorder the vector
+    // we are given. This is just a convenience routine that allows you to
+    // sort the children of a tree node into topological order before or
+    // during an AST walk. This can be useful for optimizations that require
+    // dag reorders and also when the input stream defines things that are
+    // interdependent and you want to walk the list of the generated trees
+    // for those things in topological order so you can ignore the interdependencies
+    // at that point.
+    //
+    ANTLR3_UINT32 i;
+
+    // Used as a lookup index to find the current location in the vector of
+    // the vector entry that was originally at position [0], [1], [2] etc
+    //
+    pANTLR3_UINT32  vIndex;
+
+    // Sort into an array, then we can use the array that is
+    // stored in the topo
+    //
+    if  (topo->sortToArray(topo) == 0)
+    {
+        return;     // There were no edges
+    }
+
+    if  (topo->hasCycle == ANTLR3_TRUE)
+    {
+        return;  // Do nothing if we detected a cycle
+    }
+
+    // Ensure that the vector we are sorting is at least as big as the
+    // the input sequence we were asked to sort. It does not matter if it is
+    // bigger as that probably just means that nodes numbered higher than the
+    // limit had no dependencies and so can be left alone.
+    //
+    if  (topo->limit > v->count)
+    {
+        // We can only sort the entries that we have dude! The caller is
+        // responsible for ensuring the vector is the correct one and is the
+        // correct size etc.
+        //
+        topo->limit = v->count;
+    }
+    // We need to know the locations of each of the entries
+    // in the vector as we don't want to duplicate them in a new vector. We
+    // just use an indirection table to get the vector entry for a particular sequence
+    // according to where we moved it last. Then we can just swap vector entries until
+    // we are done :-)
+    //
+    vIndex = (pANTLR3_UINT32)ANTLR3_MALLOC(topo->limit * sizeof(ANTLR3_UINT32));
+	if (vIndex == NULL)
+	{
+		// malloc failed
+		return;
+	}
+
+    // Start index, each vector entry is located where you think it is
+    //
+    for (i = 0; i < topo->limit; i++)
+    {
+        vIndex[i] = i;
+    }
+
+    // Now we traverse the sorted array and moved the entries of
+    // the vector around according to the sort order and the indirection
+    // table we just created. The index telsl us where in the vector the
+    // original element entry n is now located via vIndex[n].
+    //
+    for (i=0; i < topo->limit; i++)
+    {
+        ANTLR3_UINT32   ind;
+
+        // If the vector entry at i is already the one that it
+        // should be, then we skip moving it of course.
+        //
+        if  (vIndex[topo->sorted[i]] == i)
+        {
+            continue;
+        }
+
+        // The vector entry at i, should be replaced with the
+        // vector entry indicated by topo->sorted[i]. The vector entry
+        // at topo->sorted[i] may have already been swapped out though, so we
+        // find where it is now and move it from there to i.
+        //
+        ind     = vIndex[topo->sorted[i]];
+        v->swap(v, i, ind);
+
+        // Update our index. The element at i is now the one we wanted
+        // to be sorted here and the element we swapped out is now the
+        // element that was at i just before we swapped it. If you are lost now
+        // don't worry about it, we are just reindexing on the fly is all.
+        //
+        vIndex[topo->sorted[i]] = i;
+        vIndex[i] = ind;
+    }
+
+    // Having traversed all the entries, we have sorted the vector in place.
+    //
+    ANTLR3_FREE(vIndex);
+    return;
+}
+
+static  void
+freeTopo             (pANTLR3_TOPO topo)
+{
+    ANTLR3_UINT32   i;
+
+    // Free the result vector
+    //
+    if  (topo->sorted != NULL)
+    {
+        ANTLR3_FREE(topo->sorted);
+        topo->sorted = NULL;
+    }
+
+    // Free the visited map
+    //
+    if  (topo->visited != NULL)
+    {
+
+        topo->visited->free(topo->visited);
+        topo->visited = NULL;
+    }
+
+    // Free any edgemaps
+    //
+    if  (topo->edges != NULL)
+    {
+        pANTLR3_BITSET edgeList;
+
+        
+        for (i=0; i<topo->limit; i++)
+        {
+            edgeList = *((topo->edges) + i);
+            if  (edgeList != NULL)
+            {
+                edgeList->free(edgeList);
+            }
+        }
+
+        ANTLR3_FREE(topo->edges);
+    }
+    topo->edges = NULL;
+    
+    // Free any cycle map
+    //
+    if  (topo->cycle != NULL)
+    {
+        ANTLR3_FREE(topo->cycle);
+    }
+
+    ANTLR3_FREE(topo);
+}
diff --git a/runtime/C/src/antlr3commontoken.c b/runtime/C/src/antlr3commontoken.c
new file mode 100644
index 0000000..da4ef63
--- /dev/null
+++ b/runtime/C/src/antlr3commontoken.c
@@ -0,0 +1,609 @@
+/**
+ * Contains the default implementation of the common token used within
+ * java. Custom tokens should create this structure and then append to it using the 
+ * custom pointer to install their own structure and API.
+ */
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3.h>
+
+/* Token API
+ */
+static  pANTLR3_STRING	getText					(pANTLR3_COMMON_TOKEN token);
+static  void			setText					(pANTLR3_COMMON_TOKEN token, pANTLR3_STRING text);
+static  void			setText8				(pANTLR3_COMMON_TOKEN token, pANTLR3_UINT8 text);
+static	ANTLR3_UINT32   getType					(pANTLR3_COMMON_TOKEN token);
+static  void			setType					(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 type);
+static  ANTLR3_UINT32   getLine					(pANTLR3_COMMON_TOKEN token);
+static  void			setLine					(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 line);
+static  ANTLR3_INT32    getCharPositionInLine	(pANTLR3_COMMON_TOKEN token);
+static  void			setCharPositionInLine	(pANTLR3_COMMON_TOKEN token, ANTLR3_INT32 pos);
+static  ANTLR3_UINT32   getChannel				(pANTLR3_COMMON_TOKEN token);
+static  void			setChannel				(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 channel);
+static  ANTLR3_MARKER   getTokenIndex			(pANTLR3_COMMON_TOKEN token);
+static  void			setTokenIndex			(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER);
+static  ANTLR3_MARKER   getStartIndex			(pANTLR3_COMMON_TOKEN token);
+static  void			setStartIndex			(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER index);
+static  ANTLR3_MARKER   getStopIndex			(pANTLR3_COMMON_TOKEN token);
+static  void			setStopIndex			(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER index);
+static  pANTLR3_STRING  toString				(pANTLR3_COMMON_TOKEN token);
+
+/* Factory API
+ */
+static	void			factoryClose	(pANTLR3_TOKEN_FACTORY factory);
+static	pANTLR3_COMMON_TOKEN	newToken	(void);
+static  void			setInputStream	(pANTLR3_TOKEN_FACTORY factory, pANTLR3_INPUT_STREAM input);
+static	void                    factoryReset    (pANTLR3_TOKEN_FACTORY factory);
+
+/* Internal management functions
+ */
+static	ANTLR3_BOOLEAN			newPool		(pANTLR3_TOKEN_FACTORY factory);
+static	pANTLR3_COMMON_TOKEN    newPoolToken	(pANTLR3_TOKEN_FACTORY factory);
+
+
+ANTLR3_API pANTLR3_COMMON_TOKEN
+antlr3CommonTokenNew(ANTLR3_UINT32 ttype)
+{
+	pANTLR3_COMMON_TOKEN    token;
+
+	// Create a raw token with the interface installed
+	//
+	token   = newToken();
+
+	if	(token != NULL)
+	{
+		token->setType(token, ttype);
+	}
+
+	// All good
+	//
+	return  token;
+}
+
+ANTLR3_API pANTLR3_TOKEN_FACTORY
+antlr3TokenFactoryNew(pANTLR3_INPUT_STREAM input)
+{
+    pANTLR3_TOKEN_FACTORY   factory;
+
+    /* allocate memory
+     */
+    factory	= (pANTLR3_TOKEN_FACTORY) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_TOKEN_FACTORY));
+
+    if	(factory == NULL)
+    {
+	return	NULL;
+    }
+
+    /* Install factory API
+     */
+    factory->newToken	    = newPoolToken;
+    factory->close	    = factoryClose;
+    factory->setInputStream = setInputStream;
+    factory->reset          = factoryReset;
+    
+    /* Allocate the initial pool
+     */
+    factory->thisPool	= -1;
+    factory->pools      = NULL;
+    factory->maxPool    = -1;
+    newPool(factory);
+
+    /* Factory space is good, we now want to initialize our cheating token
+     * which one it is initialized is the model for all tokens we manufacture
+     */
+    antlr3SetTokenAPI(&factory->unTruc);
+
+    /* Set some initial variables for future copying
+     */
+    factory->unTruc.factoryMade	= ANTLR3_TRUE;
+
+    // Input stream
+    //
+    setInputStream(factory, input);
+    
+    return  factory;
+
+}
+
+static void
+setInputStream	(pANTLR3_TOKEN_FACTORY factory, pANTLR3_INPUT_STREAM input)
+{
+    factory->input          =  input;
+    factory->unTruc.input   =  input;
+	if	(input != NULL)
+	{
+		factory->unTruc.strFactory	= input->strFactory;
+	}
+	else
+	{
+		factory->unTruc.strFactory = NULL;
+    }
+}
+
+static ANTLR3_BOOLEAN
+newPool(pANTLR3_TOKEN_FACTORY factory)
+{
+    /* Increment factory count
+     */
+    ++(factory->thisPool);
+
+    // If we were reusing this token factory then we may already have a pool
+    // allocated. If we exceeded the max available then we must allocate a new
+    // one.
+    if  (factory->thisPool > factory->maxPool)
+    {
+        /* Ensure we have enough pointers allocated
+         */
+		pANTLR3_COMMON_TOKEN *newPools = (pANTLR3_COMMON_TOKEN *)
+			ANTLR3_REALLOC((void *)factory->pools,	    /* Current pools pointer (starts at NULL)	*/
+		                   (ANTLR3_UINT32)((factory->thisPool + 1) * sizeof(pANTLR3_COMMON_TOKEN *))	/* Memory for new pool pointers */
+			);
+		if (newPools == NULL)
+		{
+			// We are out of memory, but the old allocation is still valid for now
+			--(factory->thisPool);
+			return ANTLR3_FALSE;
+		}
+
+        factory->pools = newPools;
+
+        /* Allocate a new pool for the factory
+         */
+        factory->pools[factory->thisPool]	=
+			        (pANTLR3_COMMON_TOKEN) 
+				    ANTLR3_CALLOC(1, (size_t)(sizeof(ANTLR3_COMMON_TOKEN) * ANTLR3_FACTORY_POOL_SIZE));
+		if (factory->pools[factory->thisPool] == NULL)
+		{
+			// Allocation failed
+			--(factory->thisPool);
+			return ANTLR3_FALSE;
+		}
+
+        // We now have a new pool and can track it as the maximum we have created so far
+        //
+        factory->maxPool = factory->thisPool;
+    }
+
+    /* Reset the counters
+     */
+    factory->nextToken	= 0;
+  
+    /* Done
+     */
+    return ANTLR3_TRUE;
+}
+
+static pANTLR3_COMMON_TOKEN
+newPoolToken(pANTLR3_TOKEN_FACTORY factory)
+{
+    pANTLR3_COMMON_TOKEN token;
+
+	if (factory == NULL) { return NULL; }
+
+    /* See if we need a new token pool before allocating a new
+     * one
+     */
+    if (factory->nextToken >= ANTLR3_FACTORY_POOL_SIZE)
+    {
+        /* We ran out of tokens in the current pool, so we need a new pool
+         */
+        if (!newPool(factory))
+		{
+			return NULL;
+		}
+    }
+
+	// make sure the factory is sane
+	if (factory->pools == NULL) { return NULL; }
+	if (factory->pools[factory->thisPool] == NULL) { return NULL; }
+
+    /* Assuming everything went well (we are trying for performance here so doing minimal
+     * error checking. Then we can work out what the pointer is to the next token.
+     */
+    token = factory->pools[factory->thisPool] + factory->nextToken;
+    factory->nextToken++;
+
+    /* We have our token pointer now, so we can initialize it to the predefined model.
+     * We only need do this though if the token is not already initialized, we just check
+     * an api function pointer for this as they are allocated via calloc.
+     */
+    if  (token->setStartIndex == NULL)
+    {
+        antlr3SetTokenAPI(token);
+
+        // It is factory made, and we need to copy the string factory pointer
+        //
+        token->factoryMade  = ANTLR3_TRUE;
+        token->strFactory   = factory->input == NULL ? NULL : factory->input->strFactory;
+        token->input        = factory->input;
+    }
+
+    /* And we are done
+     */
+    return token;
+}
+
+static	void
+factoryReset	    (pANTLR3_TOKEN_FACTORY factory)
+{
+    // Just start again with pool #0 when we are
+    // called.
+    //
+    factory->thisPool   = -1;
+    newPool(factory);
+}
+
+static	void
+factoryClose	    (pANTLR3_TOKEN_FACTORY factory)
+{
+    pANTLR3_COMMON_TOKEN    pool;
+    ANTLR3_INT32	    poolCount;
+    ANTLR3_UINT32	    limit;
+    ANTLR3_UINT32	    token;
+    pANTLR3_COMMON_TOKEN    check;
+
+    /* We iterate the token pools one at a time
+     */
+    for	(poolCount = 0; poolCount <= factory->thisPool; poolCount++)
+    {
+	/* Pointer to current pool
+	 */
+	pool	= factory->pools[poolCount];
+
+	/* Work out how many tokens we need to check in this pool.
+	 */
+	limit	= (poolCount == factory->thisPool ? factory->nextToken : ANTLR3_FACTORY_POOL_SIZE);
+	
+	/* Marginal condition, we might be at the start of a brand new pool
+	 * where the nextToken is 0 and nothing has been allocated.
+	 */
+	if  (limit > 0)
+	{
+	    /* We have some tokens allocated from this pool
+	     */
+	    for (token = 0; token < limit; token++)
+	    {
+		/* Next one in the chain
+		 */
+		check	= pool + token;
+
+		/* If the programmer made this a custom token, then
+		 * see if we need to call their free routine.
+		 */
+		if  (check->custom != NULL && check->freeCustom != NULL)
+		{
+		    check->freeCustom(check->custom);
+		    check->custom = NULL;
+		}
+	    }
+	}
+
+	/* We can now free this pool allocation
+	 */
+	ANTLR3_FREE(factory->pools[poolCount]);
+	factory->pools[poolCount] = NULL;
+    }
+
+    /* All the pools are deallocated we can free the pointers to the pools
+     * now.
+     */
+    ANTLR3_FREE(factory->pools);
+
+    /* Finally, we can free the space for the factory itself
+     */
+    ANTLR3_FREE(factory);
+}
+
+
+static	pANTLR3_COMMON_TOKEN	
+newToken(void)
+{
+    pANTLR3_COMMON_TOKEN    token;
+
+    /* Allocate memory for this
+     */
+    token   = (pANTLR3_COMMON_TOKEN) ANTLR3_CALLOC(1, (size_t)(sizeof(ANTLR3_COMMON_TOKEN)));
+
+    if	(token == NULL)
+    {
+	return	NULL;
+    }
+
+    // Install the API
+    //
+    antlr3SetTokenAPI(token);
+    token->factoryMade = ANTLR3_FALSE;
+
+    return  token;
+}
+
+ANTLR3_API void
+antlr3SetTokenAPI(pANTLR3_COMMON_TOKEN token)
+{
+    token->getText		    = getText;
+    token->setText		    = setText;
+    token->setText8		    = setText8;
+    token->getType		    = getType;
+    token->setType		    = setType;
+    token->getLine		    = getLine;
+    token->setLine		    = setLine;
+    token->setLine		    = setLine;
+    token->getCharPositionInLine    = getCharPositionInLine;
+    token->setCharPositionInLine    = setCharPositionInLine;
+    token->getChannel		    = getChannel;
+    token->setChannel		    = setChannel;
+    token->getTokenIndex	    = getTokenIndex;
+    token->setTokenIndex	    = setTokenIndex;
+    token->getStartIndex	    = getStartIndex;
+    token->setStartIndex	    = setStartIndex;
+    token->getStopIndex		    = getStopIndex;
+    token->setStopIndex		    = setStopIndex;
+    token->toString		    = toString;
+
+    return;
+}
+
+static  pANTLR3_STRING  getText			(pANTLR3_COMMON_TOKEN token)
+{
+	switch (token->textState)
+	{
+		case ANTLR3_TEXT_STRING:
+
+			// Someone already created a string for this token, so we just
+			// use it.
+			//
+			return	token->tokText.text;
+			break;
+    
+		case ANTLR3_TEXT_CHARP:
+
+			// We had a straight text pointer installed, now we
+			// must convert it to a string. Note we have to do this here
+			// or otherwise setText8() will just install the same char*
+			//
+			if	(token->strFactory != NULL)
+			{
+				token->tokText.text	= token->strFactory->newStr8(token->strFactory, (pANTLR3_UINT8)token->tokText.chars);
+				token->textState	= ANTLR3_TEXT_STRING;
+				return token->tokText.text;
+			}
+			else
+			{
+				// We cannot do anything here
+				//
+				return NULL;
+			}
+			break;
+
+		default:
+
+			// EOF is a special case
+			//
+			if (token->type == ANTLR3_TOKEN_EOF)
+			{
+				token->tokText.text				= token->strFactory->newStr8(token->strFactory, (pANTLR3_UINT8)"<EOF>");
+				token->textState				= ANTLR3_TEXT_STRING;
+				token->tokText.text->factory	= token->strFactory;
+				return token->tokText.text;
+			}
+
+
+			// We had nothing installed in the token, create a new string
+			// from the input stream
+			//
+
+			if	(token->input != NULL)
+			{
+			
+				return	token->input->substr(	token->input, 
+												token->getStartIndex(token), 
+ 												token->getStopIndex(token)
+											);
+			}
+
+			// Nothing to return, there is no input stream
+			//
+			return NULL;
+			break;
+	}
+}
+static  void		setText8		(pANTLR3_COMMON_TOKEN token, pANTLR3_UINT8 text)
+{
+	// No text to set, so ignore
+	//
+	if	(text == NULL) return;
+
+	switch	(token->textState)
+	{
+		case	ANTLR3_TEXT_NONE:
+		case	ANTLR3_TEXT_CHARP:	// Caller must free before setting again, if it needs to be freed
+
+			// Nothing in there yet, or just a char *, so just set the
+			// text as a pointer
+			//
+			token->textState		= ANTLR3_TEXT_CHARP;
+			token->tokText.chars	= (pANTLR3_UCHAR)text;
+			break;
+
+		default:
+
+			// It was already a pANTLR3_STRING, so just override it
+			//
+			token->tokText.text->set8(token->tokText.text, (const char *)text);
+			break;
+	}
+
+	// We are done 
+	//
+	return;
+}
+
+/** \brief Install the supplied text string as teh text for the token.
+ * The method assumes that the existing text (if any) was created by a factory
+ * and so does not attempt to release any memory it is using.Text not created
+ * by a string fctory (not advised) should be released prior to this call.
+ */
+static  void		setText			(pANTLR3_COMMON_TOKEN token, pANTLR3_STRING text)
+{
+	// Merely replaces and existing pre-defined text with the supplied
+	// string
+	//
+	token->textState	= ANTLR3_TEXT_STRING;
+	token->tokText.text	= text;
+
+	/* We are done 
+	*/
+	return;
+}
+
+static	ANTLR3_UINT32   getType			(pANTLR3_COMMON_TOKEN token)
+{
+    return  token->type;
+}
+
+static  void		setType			(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 type)
+{
+    token->type = type;
+}
+
+static  ANTLR3_UINT32   getLine			(pANTLR3_COMMON_TOKEN token)
+{
+    return  token->line;
+}
+
+static  void		setLine			(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 line)
+{
+    token->line = line;
+}
+
+static  ANTLR3_INT32    getCharPositionInLine	(pANTLR3_COMMON_TOKEN token)
+{
+    return  token->charPosition;
+}
+
+static  void		setCharPositionInLine	(pANTLR3_COMMON_TOKEN token, ANTLR3_INT32 pos)
+{
+    token->charPosition = pos;
+}
+
+static  ANTLR3_UINT32   getChannel		(pANTLR3_COMMON_TOKEN token)
+{
+    return  token->channel;
+}
+
+static  void		setChannel		(pANTLR3_COMMON_TOKEN token, ANTLR3_UINT32 channel)
+{
+    token->channel  = channel;
+}
+
+static  ANTLR3_MARKER   getTokenIndex		(pANTLR3_COMMON_TOKEN token)
+{
+    return  token->index;
+}
+
+static  void		setTokenIndex		(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER index)
+{
+    token->index    = index;
+}
+
+static  ANTLR3_MARKER   getStartIndex		(pANTLR3_COMMON_TOKEN token)
+{
+	return  token->start == -1 ? (ANTLR3_MARKER)(token->input->data) : token->start;
+}
+
+static  void		setStartIndex		(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER start)
+{
+    token->start    = start;
+}
+
+static  ANTLR3_MARKER   getStopIndex		(pANTLR3_COMMON_TOKEN token)
+{
+    return  token->stop;
+}
+
+static  void		setStopIndex		(pANTLR3_COMMON_TOKEN token, ANTLR3_MARKER stop)
+{
+    token->stop	= stop;
+}
+
+static  pANTLR3_STRING    toString		(pANTLR3_COMMON_TOKEN token)
+{
+    pANTLR3_STRING  text;
+    pANTLR3_STRING  outtext;
+
+    text    =	token->getText(token);
+    
+    if	(text == NULL)
+    {
+		return NULL;
+    }
+
+	if	(text->factory == NULL)
+	{
+		return text;		// This usally means it is the EOF token
+	}
+
+    /* A new empty string to assemble all the stuff in
+     */
+    outtext = text->factory->newRaw(text->factory);
+
+    /* Now we use our handy dandy string utility to assemble the
+     * the reporting string
+     * return "[@"+getTokenIndex()+","+start+":"+stop+"='"+txt+"',<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+"]";
+     */
+    outtext->append8(outtext, "[Index: ");
+    outtext->addi   (outtext, (ANTLR3_INT32)token->getTokenIndex(token));
+    outtext->append8(outtext, " (Start: ");
+    outtext->addi   (outtext, (ANTLR3_INT32)token->getStartIndex(token));
+    outtext->append8(outtext, "-Stop: ");
+    outtext->addi   (outtext, (ANTLR3_INT32)token->getStopIndex(token));
+    outtext->append8(outtext, ") ='");
+    outtext->appendS(outtext, text);
+    outtext->append8(outtext, "', type<");
+    outtext->addi   (outtext, token->type);
+    outtext->append8(outtext, "> ");
+
+    if	(token->getChannel(token) > ANTLR3_TOKEN_DEFAULT_CHANNEL)
+    {
+		outtext->append8(outtext, "(channel = ");
+		outtext->addi	(outtext, (ANTLR3_INT32)token->getChannel(token));
+		outtext->append8(outtext, ") ");
+    }
+
+    outtext->append8(outtext, "Line: ");
+    outtext->addi   (outtext, (ANTLR3_INT32)token->getLine(token));
+    outtext->append8(outtext, " LinePos:");
+    outtext->addi   (outtext, token->getCharPositionInLine(token));
+    outtext->addc   (outtext, ']');
+
+    return  outtext;
+}
+
diff --git a/runtime/C/src/antlr3commontree.c b/runtime/C/src/antlr3commontree.c
new file mode 100644
index 0000000..e275263
--- /dev/null
+++ b/runtime/C/src/antlr3commontree.c
@@ -0,0 +1,568 @@
+// \file
+//
+// Implementation of ANTLR3 CommonTree, which you can use as a
+// starting point for your own tree. Though it is often easier just to tag things on
+// to the user pointer in the tree unless you are building a different type
+// of structure.
+//
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3commontree.h>
+
+
+static pANTLR3_COMMON_TOKEN getToken				(pANTLR3_BASE_TREE tree);
+static pANTLR3_BASE_TREE    dupNode					(pANTLR3_BASE_TREE tree);
+static ANTLR3_BOOLEAN	    isNilNode					(pANTLR3_BASE_TREE tree);
+static ANTLR3_UINT32	    getType					(pANTLR3_BASE_TREE tree);
+static pANTLR3_STRING	    getText					(pANTLR3_BASE_TREE tree);
+static ANTLR3_UINT32	    getLine					(pANTLR3_BASE_TREE tree);
+static ANTLR3_UINT32	    getCharPositionInLine	(pANTLR3_BASE_TREE tree);
+static pANTLR3_STRING	    toString				(pANTLR3_BASE_TREE tree);
+static pANTLR3_BASE_TREE	getParent				(pANTLR3_BASE_TREE tree);
+static void					setParent				(pANTLR3_BASE_TREE tree, pANTLR3_BASE_TREE parent);
+static void    				setChildIndex			(pANTLR3_BASE_TREE tree, ANTLR3_INT32 i);
+static ANTLR3_INT32			getChildIndex			(pANTLR3_BASE_TREE tree);
+static void					createChildrenList		(pANTLR3_BASE_TREE tree);
+static void                 reuse                   (pANTLR3_BASE_TREE tree);
+
+// Factory functions for the Arboretum
+//
+static ANTLR3_BOOLEAN		newPool				(pANTLR3_ARBORETUM factory);
+static pANTLR3_BASE_TREE    newPoolTree			(pANTLR3_ARBORETUM factory);
+static pANTLR3_BASE_TREE    newFromTree			(pANTLR3_ARBORETUM factory, pANTLR3_COMMON_TREE tree);
+static pANTLR3_BASE_TREE    newFromToken		(pANTLR3_ARBORETUM factory, pANTLR3_COMMON_TOKEN token);
+static void					factoryClose		(pANTLR3_ARBORETUM factory);
+
+ANTLR3_API pANTLR3_ARBORETUM
+antlr3ArboretumNew(pANTLR3_STRING_FACTORY strFactory)
+{
+    pANTLR3_ARBORETUM   factory;
+
+    // Allocate memory
+    //
+    factory	= (pANTLR3_ARBORETUM) ANTLR3_MALLOC((size_t)sizeof(ANTLR3_ARBORETUM));
+    if	(factory == NULL)
+    {
+		return	NULL;
+    }
+
+	// Install a vector factory to create, track and free() any child
+	// node lists.
+	//
+	factory->vFactory					= antlr3VectorFactoryNew(0);
+	if	(factory->vFactory == NULL)
+	{
+		free(factory);
+		return	NULL;
+	}
+
+    // We also keep a reclaim stack, so that any Nil nodes that are
+    // orphaned are not just left in the pool but are reused, other wise
+    // we create 6 times as many nilNodes as ordinary nodes and use loads of
+    // memory. Perhaps at some point, the analysis phase will generate better
+    // code and we won't need to do this here.
+    //
+    factory->nilStack       =  antlr3StackNew(0);
+
+    // Install factory API
+    //
+    factory->newTree	    =  newPoolTree;
+    factory->newFromTree    =  newFromTree;
+    factory->newFromToken   =  newFromToken;
+    factory->close			=  factoryClose;
+
+    // Allocate the initial pool
+    //
+    factory->thisPool	= -1;
+    factory->pools		= NULL;
+    newPool(factory);
+
+    // Factory space is good, we now want to initialize our cheating token
+    // which one it is initialized is the model for all tokens we manufacture
+    //
+    antlr3SetCTAPI(&factory->unTruc);
+
+    // Set some initial variables for future copying, including a string factory
+    // that we can use later for converting trees to strings.
+    //
+	factory->unTruc.factory				= factory;
+    factory->unTruc.baseTree.strFactory	= strFactory;
+
+    return  factory;
+
+}
+
+static ANTLR3_BOOLEAN
+newPool(pANTLR3_ARBORETUM factory)
+{
+	pANTLR3_COMMON_TREE *newPools;
+
+    // Increment factory count
+    //
+    ++factory->thisPool;
+
+    // Ensure we have enough pointers allocated
+    //
+    newPools = (pANTLR3_COMMON_TREE *)
+					ANTLR3_REALLOC(	(void *)factory->pools,										// Current pools pointer (starts at NULL)
+					(ANTLR3_UINT32)((factory->thisPool + 1) * sizeof(pANTLR3_COMMON_TREE *))	// Memory for new pool pointers
+					);
+	if (newPools == NULL)
+	{
+		// realloc failed, but we still have the old allocation
+		--factory->thisPool;
+		return ANTLR3_FALSE;
+	}
+	factory->pools = newPools;
+
+    // Allocate a new pool for the factory
+    //
+    factory->pools[factory->thisPool]	=
+			    (pANTLR3_COMMON_TREE) 
+				ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_COMMON_TREE) * ANTLR3_FACTORY_POOL_SIZE));
+	if (factory->pools[factory->thisPool] == NULL)
+	{
+		// malloc failed
+		--factory->thisPool;
+		return ANTLR3_FALSE;
+	}
+
+
+    // Reset the counters
+    //
+    factory->nextTree	= 0;
+  
+    // Done
+    //
+    return ANTLR3_TRUE;
+}
+
+static	pANTLR3_BASE_TREE    
+newPoolTree	    (pANTLR3_ARBORETUM factory)
+{
+	pANTLR3_COMMON_TREE    tree;
+
+    // If we have anything on the re claim stack, reuse that sucker first
+    //
+    tree = (pANTLR3_COMMON_TREE)factory->nilStack->peek(factory->nilStack);
+
+    if  (tree != NULL)
+    {
+        // Cool we got something we could reuse, it will have been cleaned up by
+        // whatever put it back on the stack (for instance if it had a child vector,
+        // that will have been cleared to hold zero entries and that vector will get reused too.
+        // It is the basetree pointer that is placed on the stack of course
+        //
+        factory->nilStack->pop(factory->nilStack);
+        return (pANTLR3_BASE_TREE)tree;
+
+    }
+	// See if we need a new tree pool before allocating a new tree
+	//
+	if	(factory->nextTree >= ANTLR3_FACTORY_POOL_SIZE)
+	{
+		// We ran out of tokens in the current pool, so we need a new pool
+		//
+		if (!newPool(factory))
+		{
+			// new pool creation failed
+			return NULL;
+		}
+	}
+
+	// Assuming everything went well - we are trying for performance here so doing minimal
+	// error checking - then we can work out what the pointer is to the next commontree.
+	//
+	tree   = factory->pools[factory->thisPool] + factory->nextTree;
+	factory->nextTree++;
+
+	// We have our token pointer now, so we can initialize it to the predefined model.
+	//
+    antlr3SetCTAPI(tree);
+
+    // Set some initial variables for future copying, including a string factory
+    // that we can use later for converting trees to strings.
+    //
+	tree->factory				= factory;
+    tree->baseTree.strFactory	= factory->unTruc.baseTree.strFactory;
+
+	// The super points to the common tree so we must override the one used by
+	// by the pre-built tree as otherwise we will always poitn to the same initial
+	// common tree and we might spend 3 hours trying to debug why - this would never
+	// happen to me of course! :-(
+	//
+	tree->baseTree.super	= tree;
+
+
+	// And we are done
+	//
+	return  &(tree->baseTree);
+}
+
+
+static pANTLR3_BASE_TREE	    
+newFromTree(pANTLR3_ARBORETUM factory, pANTLR3_COMMON_TREE tree)
+{
+	pANTLR3_BASE_TREE	newTree;
+
+	newTree = factory->newTree(factory);
+
+	if	(newTree == NULL)
+	{
+		return	NULL;
+	}
+
+	// Pick up the payload we had in the supplied tree
+	//
+	((pANTLR3_COMMON_TREE)(newTree->super))->token   = tree->token;
+	newTree->u		    = tree->baseTree.u;							// Copy any user pointer
+
+	return  newTree;
+}
+
+static pANTLR3_BASE_TREE	    
+newFromToken(pANTLR3_ARBORETUM factory, pANTLR3_COMMON_TOKEN token)
+{
+	pANTLR3_BASE_TREE	newTree;
+
+	newTree = factory->newTree(factory);
+
+	if	(newTree == NULL)
+	{
+		return	NULL;
+	}
+
+	// Pick up the payload we had in the supplied tree
+	//
+	((pANTLR3_COMMON_TREE)(newTree->super))->token = token;
+
+	return newTree;
+}
+
+static	void
+factoryClose	    (pANTLR3_ARBORETUM factory)
+{
+	ANTLR3_INT32	    poolCount;
+
+	// First close the vector factory that supplied all the child pointer
+	// vectors.
+	//
+	factory->vFactory->close(factory->vFactory);
+
+    if  (factory->nilStack !=  NULL)
+    {
+        factory->nilStack->free(factory->nilStack);
+    }
+
+	// We now JUST free the pools because the C runtime CommonToken based tree
+	// cannot contain anything that was not made by this factory.
+	//
+	for	(poolCount = 0; poolCount <= factory->thisPool; poolCount++)
+	{
+		// We can now free this pool allocation
+		//
+		ANTLR3_FREE(factory->pools[poolCount]);
+		factory->pools[poolCount] = NULL;
+	}
+
+	// All the pools are deallocated we can free the pointers to the pools
+	// now.
+	//
+	ANTLR3_FREE(factory->pools);
+
+	// Finally, we can free the space for the factory itself
+	//
+	ANTLR3_FREE(factory);
+}
+
+
+ANTLR3_API void 
+antlr3SetCTAPI(pANTLR3_COMMON_TREE tree)
+{
+    // Init base tree
+    //
+    antlr3BaseTreeNew(&(tree->baseTree));
+
+    // We need a pointer to ourselves for 
+    // the payload and few functions that we
+    // provide.
+    //
+    tree->baseTree.super    =  tree;
+
+    // Common tree overrides
+
+    tree->baseTree.isNilNode                = isNilNode;
+    tree->baseTree.toString					= toString;
+    tree->baseTree.dupNode					= (void *(*)(pANTLR3_BASE_TREE))(dupNode);
+    tree->baseTree.getLine					= getLine;
+    tree->baseTree.getCharPositionInLine	= getCharPositionInLine;
+    tree->baseTree.toString					= toString;
+    tree->baseTree.getType					= getType;
+    tree->baseTree.getText					= getText;
+    tree->baseTree.getToken					= getToken;
+	tree->baseTree.getParent				= getParent;
+	tree->baseTree.setParent				= setParent;
+	tree->baseTree.setChildIndex			= setChildIndex;
+	tree->baseTree.getChildIndex			= getChildIndex;
+	tree->baseTree.createChildrenList		= createChildrenList;
+    tree->baseTree.reuse                    = reuse;
+	tree->baseTree.free						= NULL;	    // Factory trees have no free function
+    tree->baseTree.u                        = NULL;     // Initialize user pointer            
+
+	tree->baseTree.children	= NULL;
+
+    tree->token				= NULL;	// No token as yet
+    tree->startIndex		= 0;
+    tree->stopIndex			= 0;
+	tree->parent			= NULL;	// No parent yet
+	tree->childIndex		= -1;
+
+    return;
+}
+
+// --------------------------------------
+// Non factory node constructors.
+//
+
+ANTLR3_API pANTLR3_COMMON_TREE
+antlr3CommonTreeNew()
+{
+	pANTLR3_COMMON_TREE	tree;
+	tree = (pANTLR3_COMMON_TREE)ANTLR3_CALLOC(1, sizeof(ANTLR3_COMMON_TREE));
+
+	if	(tree == NULL)
+	{
+		return NULL;
+	}
+
+	antlr3SetCTAPI(tree);
+
+	return tree;
+}
+
+ANTLR3_API pANTLR3_COMMON_TREE	    
+antlr3CommonTreeNewFromToken(pANTLR3_COMMON_TOKEN token)
+{
+	pANTLR3_COMMON_TREE	newTree;
+
+	newTree = antlr3CommonTreeNew();
+
+	if	(newTree == NULL)
+	{
+		return	NULL;
+	}
+
+	//Pick up the payload we had in the supplied tree
+	//
+	newTree->token = token;
+	return newTree;
+}
+
+/// Create a new vector for holding child nodes using the inbuilt
+/// vector factory.
+///
+static void
+createChildrenList  (pANTLR3_BASE_TREE tree)
+{
+	tree->children = ((pANTLR3_COMMON_TREE)(tree->super))->factory->vFactory->newVector(((pANTLR3_COMMON_TREE)(tree->super))->factory->vFactory);
+}
+
+
+static pANTLR3_COMMON_TOKEN 
+getToken			(pANTLR3_BASE_TREE tree)
+{
+    // The token is the payload of the common tree or other implementor
+    // so it is stored within ourselves, which is the super pointer.Note 
+	// that whatever the actual token is, it is passed around by its pointer
+	// to the common token implementation, which it may of course surround
+	// with its own super structure.
+    //
+    return  ((pANTLR3_COMMON_TREE)(tree->super))->token;
+}
+
+static pANTLR3_BASE_TREE    
+dupNode			(pANTLR3_BASE_TREE tree)
+{
+    // The node we are duplicating is in fact the common tree (that's why we are here)
+    // so we use the super pointer to duplicate.
+    //
+    pANTLR3_COMMON_TREE	    theOld;
+    
+	theOld	= (pANTLR3_COMMON_TREE)(tree->super);
+
+	// The pointer we return is the base implementation of course
+    //
+	return  theOld->factory->newFromTree(theOld->factory, theOld);
+}
+
+static ANTLR3_BOOLEAN	    
+isNilNode			(pANTLR3_BASE_TREE tree)
+{
+	// This is a Nil tree if it has no payload (Token in our case)
+	//
+	if	(((pANTLR3_COMMON_TREE)(tree->super))->token == NULL)
+	{
+		return ANTLR3_TRUE;
+	}
+	else
+	{
+		return ANTLR3_FALSE;
+	}
+}
+
+static ANTLR3_UINT32	    
+getType			(pANTLR3_BASE_TREE tree)
+{
+	pANTLR3_COMMON_TREE    theTree;
+
+	theTree = (pANTLR3_COMMON_TREE)(tree->super);
+
+	if	(theTree->token == NULL)
+	{
+		return	0;
+	}
+	else
+	{
+		return	theTree->token->getType(theTree->token);
+	}
+}
+
+static pANTLR3_STRING	    
+getText			(pANTLR3_BASE_TREE tree)
+{
+	return	tree->toString(tree);
+}
+
+static ANTLR3_UINT32	    getLine			(pANTLR3_BASE_TREE tree)
+{
+	pANTLR3_COMMON_TREE	    cTree;
+	pANTLR3_COMMON_TOKEN    token;
+
+	cTree   = (pANTLR3_COMMON_TREE)(tree->super);
+
+	token   = cTree->token;
+
+	if	(token == NULL || token->getLine(token) == 0)
+	{
+		if  (tree->getChildCount(tree) > 0)
+		{
+			pANTLR3_BASE_TREE	child;
+
+			child   = (pANTLR3_BASE_TREE)tree->getChild(tree, 0);
+			return child->getLine(child);
+		}
+		return 0;
+	}
+	return  token->getLine(token);
+}
+
+static ANTLR3_UINT32	    getCharPositionInLine	(pANTLR3_BASE_TREE tree)
+{
+	pANTLR3_COMMON_TOKEN    token;
+
+	token   = ((pANTLR3_COMMON_TREE)(tree->super))->token;
+
+	if	(token == NULL || token->getCharPositionInLine(token) == -1)
+	{
+		if  (tree->getChildCount(tree) > 0)
+		{
+			pANTLR3_BASE_TREE	child;
+
+			child   = (pANTLR3_BASE_TREE)tree->getChild(tree, 0);
+
+			return child->getCharPositionInLine(child);
+		}
+		return 0;
+	}
+	return  token->getCharPositionInLine(token);
+}
+
+static pANTLR3_STRING	    toString			(pANTLR3_BASE_TREE tree)
+{
+	if  (tree->isNilNode(tree) == ANTLR3_TRUE)
+	{
+		pANTLR3_STRING  nilNode;
+
+		nilNode	= tree->strFactory->newPtr(tree->strFactory, (pANTLR3_UINT8)"nil", 3);
+
+		return nilNode;
+	}
+
+	return	((pANTLR3_COMMON_TREE)(tree->super))->token->getText(((pANTLR3_COMMON_TREE)(tree->super))->token);
+}
+
+static pANTLR3_BASE_TREE	
+getParent				(pANTLR3_BASE_TREE tree)
+{
+	if (((pANTLR3_COMMON_TREE)(tree->super))->parent == NULL)
+		return NULL;
+	return & (((pANTLR3_COMMON_TREE)(tree->super))->parent->baseTree);
+}
+
+static void					
+setParent				(pANTLR3_BASE_TREE tree, pANTLR3_BASE_TREE parent)
+{
+	((pANTLR3_COMMON_TREE)(tree->super))->parent = parent == NULL ? NULL : ((pANTLR3_COMMON_TREE)(parent->super));
+}
+
+static void    				
+setChildIndex			(pANTLR3_BASE_TREE tree, ANTLR3_INT32 i)
+{
+	((pANTLR3_COMMON_TREE)(tree->super))->childIndex = i;
+}
+static	ANTLR3_INT32			
+getChildIndex			(pANTLR3_BASE_TREE tree )
+{
+	return ((pANTLR3_COMMON_TREE)(tree->super))->childIndex;
+}
+
+/** Clean up any child vector that the tree might have, so it can be reused,
+ *  then add it into the reuse stack.
+ */
+static void
+reuse                   (pANTLR3_BASE_TREE tree)
+{
+    pANTLR3_COMMON_TREE	    cTree;
+
+	cTree   = (pANTLR3_COMMON_TREE)(tree->super);
+
+    if  (cTree->factory != NULL)
+    {
+
+        if  (cTree->baseTree.children != NULL)
+        {
+            
+            cTree->baseTree.children->clear(cTree->baseTree.children);
+        }
+       cTree->factory->nilStack->push(cTree->factory->nilStack, tree, NULL);
+       
+    }
+}
diff --git a/runtime/C/src/antlr3commontreeadaptor.c b/runtime/C/src/antlr3commontreeadaptor.c
new file mode 100644
index 0000000..38697f1
--- /dev/null
+++ b/runtime/C/src/antlr3commontreeadaptor.c
@@ -0,0 +1,496 @@
+/** \file
+ * This is the standard tree adaptor used by the C runtime unless the grammar
+ * source file says to use anything different. It embeds a BASE_TREE to which
+ * it adds its own implementation of anything that the base tree is not 
+ * good for, plus a number of methods that any other adaptor type
+ * needs to implement too.
+ * \ingroup pANTLR3_COMMON_TREE_ADAPTOR
+ */
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3commontreeadaptor.h>
+
+#ifdef	ANTLR3_WINDOWS
+#pragma warning( disable : 4100 )
+#endif
+
+/* BASE_TREE_ADAPTOR overrides... */
+static	pANTLR3_BASE_TREE		dupNode					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE treeNode);
+static	pANTLR3_BASE_TREE		create					(pANTLR3_BASE_TREE_ADAPTOR adpator, pANTLR3_COMMON_TOKEN payload);
+static	pANTLR3_BASE_TREE		dbgCreate				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_COMMON_TOKEN payload);
+static	pANTLR3_COMMON_TOKEN	createToken				(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text);
+static	pANTLR3_COMMON_TOKEN	createTokenFromToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_COMMON_TOKEN fromToken);
+static	pANTLR3_COMMON_TOKEN    getToken				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	pANTLR3_STRING			getText					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	ANTLR3_UINT32			getType					(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	pANTLR3_BASE_TREE		getChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i);
+static	ANTLR3_UINT32			getChildCount			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	void					replaceChildren			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE t);
+static	void					setDebugEventListener	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_DEBUG_EVENT_LISTENER debugger);
+static  void					setChildIndex			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_INT32 i);
+static  ANTLR3_INT32			getChildIndex			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static	void					setParent				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE child, pANTLR3_BASE_TREE parent);
+static	pANTLR3_BASE_TREE    	getParent				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE child);
+static  void					setChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i, pANTLR3_BASE_TREE child);
+static	void					deleteChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i);
+static	pANTLR3_BASE_TREE		errorNode				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_TOKEN_STREAM ctnstream, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken, pANTLR3_EXCEPTION e);
+/* Methods specific to each tree adaptor
+ */
+static	void			setTokenBoundaries		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken);
+static	void			dbgSetTokenBoundaries	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken);
+static	ANTLR3_MARKER   getTokenStartIndex		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+static  ANTLR3_MARKER   getTokenStopIndex		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t);
+
+static	void		ctaFree			(pANTLR3_BASE_TREE_ADAPTOR adaptor);
+
+/** Create a new tree adaptor. Note that despite the fact that this is
+ *  creating a new COMMON_TREE adaptor, we return the address of the
+ *  BASE_TREE interface, as should any other adaptor that wishes to be 
+ *  used as the tree element of a tree parse/build. It needs to be given the
+ *  address of a valid string factory as we do not know what the originating
+ *  input stream encoding type was. This way we can rely on just using
+ *  the original input stream's string factory or one of the correct type
+ *  which the user supplies us.
+ */
+ANTLR3_API pANTLR3_BASE_TREE_ADAPTOR
+ANTLR3_TREE_ADAPTORNew(pANTLR3_STRING_FACTORY strFactory)
+{
+	pANTLR3_COMMON_TREE_ADAPTOR	cta;
+
+	// First job is to create the memory we need for the tree adaptor interface.
+	//
+	cta	= (pANTLR3_COMMON_TREE_ADAPTOR) ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_COMMON_TREE_ADAPTOR)));
+
+	if	(cta == NULL)
+	{
+		return	NULL;
+	}
+
+	// Memory is initialized, so initialize the base tree adaptor
+	//
+	antlr3BaseTreeAdaptorInit(&(cta->baseAdaptor), NULL);
+
+	// Install our interface overrides. Strangeness is to allow generated code to treat them
+    // as returning void *
+	//
+	cta->baseAdaptor.dupNode				=  (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
+													dupNode;
+	cta->baseAdaptor.create					=  (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_COMMON_TOKEN))
+													create;
+	cta->baseAdaptor.createToken			=  
+													createToken;
+	cta->baseAdaptor.createTokenFromToken   =  
+													createTokenFromToken;
+	cta->baseAdaptor.setTokenBoundaries	    =  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, pANTLR3_COMMON_TOKEN, pANTLR3_COMMON_TOKEN))
+													setTokenBoundaries;
+	cta->baseAdaptor.getTokenStartIndex	    =  (ANTLR3_MARKER  (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                    getTokenStartIndex;
+	cta->baseAdaptor.getTokenStopIndex	    =  (ANTLR3_MARKER  (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                    getTokenStopIndex;
+	cta->baseAdaptor.getText				=  (pANTLR3_STRING (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                    getText;
+	cta->baseAdaptor.getType				=  (ANTLR3_UINT32  (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                    getType;
+	cta->baseAdaptor.getChild				=  (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32))
+                                                    getChild;
+	cta->baseAdaptor.setChild				=  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32, void *))
+                                                    setChild;
+	cta->baseAdaptor.setParent				=  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, void *))
+                                                    setParent;
+    cta->baseAdaptor.getParent				=  (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                    getParent;
+	cta->baseAdaptor.setChildIndex			=  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32))
+                                                    setChildIndex;
+	cta->baseAdaptor.deleteChild			=  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_UINT32))
+                                                    deleteChild;
+	cta->baseAdaptor.getChildCount			=  (ANTLR3_UINT32  (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                    getChildCount;
+	cta->baseAdaptor.getChildIndex			=  (ANTLR3_INT32  (*) (pANTLR3_BASE_TREE_ADAPTOR, void *))
+                                                    getChildIndex;
+	cta->baseAdaptor.free					=  (void  (*) (pANTLR3_BASE_TREE_ADAPTOR))
+                                                    ctaFree;
+	cta->baseAdaptor.setDebugEventListener	=  
+													setDebugEventListener;
+	cta->baseAdaptor.replaceChildren		=  (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, ANTLR3_INT32, ANTLR3_INT32, void *))
+                                                    replaceChildren;
+	cta->baseAdaptor.errorNode				=  (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_TOKEN_STREAM, pANTLR3_COMMON_TOKEN, pANTLR3_COMMON_TOKEN, pANTLR3_EXCEPTION))
+                                                    errorNode;
+
+	// Install the super class pointer
+	//
+	cta->baseAdaptor.super	    = cta;
+
+	// Install a tree factory for creating new tree nodes
+	//
+	cta->arboretum  = antlr3ArboretumNew(strFactory);
+
+	// Install a token factory for imaginary tokens, these imaginary
+	// tokens do not require access to the input stream so we can
+	// dummy the creation of it, but they will need a string factory.
+	//
+	cta->baseAdaptor.tokenFactory						= antlr3TokenFactoryNew(NULL);
+	cta->baseAdaptor.tokenFactory->unTruc.strFactory	= strFactory;
+
+	// Allow the base tree adaptor to share the tree factory's string factory.
+	//
+	cta->baseAdaptor.strFactory	= strFactory;
+
+	// Return the address of the base adaptor interface.
+	//
+	return  &(cta->baseAdaptor);
+}
+
+/// Debugging version of the tree adaptor (not normally called as generated code
+/// calls setDebugEventListener instead which changes a normal token stream to
+/// a debugging stream and means that a user's instantiation code does not need
+/// to be changed just to debug with AW.
+///
+ANTLR3_API pANTLR3_BASE_TREE_ADAPTOR
+ANTLR3_TREE_ADAPTORDebugNew(pANTLR3_STRING_FACTORY strFactory, pANTLR3_DEBUG_EVENT_LISTENER	debugger)
+{
+	pANTLR3_BASE_TREE_ADAPTOR	ta;
+
+	// Create a normal one first
+	//
+	ta	= ANTLR3_TREE_ADAPTORNew(strFactory);
+	
+	if	(ta != NULL)
+	{
+		// Reinitialize as a debug version
+		//
+		antlr3BaseTreeAdaptorInit(ta, debugger);
+		ta->create				= (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_COMMON_TOKEN))
+									dbgCreate;
+		ta->setTokenBoundaries	= (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, pANTLR3_COMMON_TOKEN, pANTLR3_COMMON_TOKEN))
+									dbgSetTokenBoundaries;
+	}
+
+	return	ta;
+}
+
+/// Causes an existing common tree adaptor to become a debug version
+///
+static	void
+setDebugEventListener	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_DEBUG_EVENT_LISTENER debugger)
+{
+	// Reinitialize as a debug version
+	//
+	antlr3BaseTreeAdaptorInit(adaptor, debugger);
+
+	adaptor->create				= (void * (*) (pANTLR3_BASE_TREE_ADAPTOR, pANTLR3_COMMON_TOKEN))
+                                    dbgCreate;
+	adaptor->setTokenBoundaries	= (void   (*) (pANTLR3_BASE_TREE_ADAPTOR, void *, pANTLR3_COMMON_TOKEN, pANTLR3_COMMON_TOKEN))
+                                    dbgSetTokenBoundaries;
+
+}
+
+static void
+ctaFree(pANTLR3_BASE_TREE_ADAPTOR adaptor)
+{
+    pANTLR3_COMMON_TREE_ADAPTOR cta;
+
+    cta	= (pANTLR3_COMMON_TREE_ADAPTOR)(adaptor->super);
+
+    /* Free the tree factory we created
+     */
+    cta->arboretum->close(((pANTLR3_COMMON_TREE_ADAPTOR)(adaptor->super))->arboretum);
+
+    /* Free the token factory we created
+     */
+    adaptor->tokenFactory->close(adaptor->tokenFactory);
+
+    /* Free the super pointer, as it is this that was allocated
+     * and is the common tree structure.
+     */
+    ANTLR3_FREE(adaptor->super);
+}
+
+/* BASE_TREE_ADAPTOR overrides */
+
+static	pANTLR3_BASE_TREE
+errorNode				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_TOKEN_STREAM ctnstream, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken, pANTLR3_EXCEPTION e)
+{
+	// Use the supplied common tree node stream to get another tree from the factory
+	// TODO: Look at creating the erronode as in Java, but this is complicated by the
+	// need to track and free the memory allocated to it, so for now, we just
+	// want something in the tree that isn't a NULL pointer.
+	//
+	return (pANTLR3_BASE_TREE)adaptor->createTypeText(adaptor, ANTLR3_TOKEN_INVALID, (pANTLR3_UINT8)"Tree Error Node");
+
+}
+
+/** Duplicate the supplied node.
+ */
+static	pANTLR3_BASE_TREE
+dupNode		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE treeNode)
+{
+	return  treeNode == NULL ? NULL : (pANTLR3_BASE_TREE)treeNode->dupNode(treeNode);
+}
+
+static	pANTLR3_BASE_TREE
+create		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_COMMON_TOKEN payload)
+{
+    pANTLR3_BASE_TREE	ct;
+    
+    /* Create a new common tree as this is what this adaptor deals with
+     */
+    ct = ((pANTLR3_COMMON_TREE_ADAPTOR)(adaptor->super))->arboretum->newFromToken(((pANTLR3_COMMON_TREE_ADAPTOR)(adaptor->super))->arboretum, payload);
+
+    /* But all adaptors return the pointer to the base interface.
+     */
+    return  ct;
+}
+static	pANTLR3_BASE_TREE
+dbgCreate		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_COMMON_TOKEN payload)
+{
+	pANTLR3_BASE_TREE	ct;
+
+	ct = create(adaptor, payload);
+	adaptor->debugger->createNode(adaptor->debugger, ct);
+
+	return ct;
+}
+
+/** Tell me how to create a token for use with imaginary token nodes.
+ *  For example, there is probably no input symbol associated with imaginary
+ *  token DECL, but you need to create it as a payload or whatever for
+ *  the DECL node as in ^(DECL type ID).
+ *
+ *  If you care what the token payload objects' type is, you should
+ *  override this method and any other createToken variant.
+ */
+static	pANTLR3_COMMON_TOKEN
+createToken		(pANTLR3_BASE_TREE_ADAPTOR adaptor, ANTLR3_UINT32 tokenType, pANTLR3_UINT8 text)
+{
+    pANTLR3_COMMON_TOKEN    newToken;
+
+    newToken	= adaptor->tokenFactory->newToken(adaptor->tokenFactory);
+
+    if	(newToken != NULL)
+    {	
+		newToken->textState		= ANTLR3_TEXT_CHARP;
+		newToken->tokText.chars = (pANTLR3_UCHAR)text;
+		newToken->setType(newToken, tokenType);
+		newToken->input				= adaptor->tokenFactory->input;
+        newToken->strFactory        = adaptor->strFactory;
+    }
+    return  newToken;
+}
+
+/** Tell me how to create a token for use with imaginary token nodes.
+ *  For example, there is probably no input symbol associated with imaginary
+ *  token DECL, but you need to create it as a payload or whatever for
+ *  the DECL node as in ^(DECL type ID).
+ *
+ *  This is a variant of createToken where the new token is derived from
+ *  an actual real input token.  Typically this is for converting '{'
+ *  tokens to BLOCK etc...  You'll see
+ *
+ *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
+ *
+ *  If you care what the token payload objects' type is, you should
+ *  override this method and any other createToken variant.
+ *
+ * NB: this being C it is not so easy to extend the types of creaeteToken.
+ *     We will have to see if anyone needs to do this and add any variants to
+ *     this interface.
+ */
+static	pANTLR3_COMMON_TOKEN
+createTokenFromToken	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_COMMON_TOKEN fromToken)
+{
+    pANTLR3_COMMON_TOKEN    newToken;
+
+    newToken	= adaptor->tokenFactory->newToken(adaptor->tokenFactory);
+    
+    if	(newToken != NULL)
+    {
+		// Create the text using our own string factory to avoid complicating
+		// commontoken.
+		//
+		pANTLR3_STRING	text;
+
+		newToken->toString  = fromToken->toString;
+
+		if	(fromToken->textState == ANTLR3_TEXT_CHARP)
+		{
+			newToken->textState		= ANTLR3_TEXT_CHARP;
+			newToken->tokText.chars	= fromToken->tokText.chars;
+		}
+		else
+		{
+			text						= fromToken->getText(fromToken);
+			newToken->textState			= ANTLR3_TEXT_STRING;
+			newToken->tokText.text	    = adaptor->strFactory->newPtr(adaptor->strFactory, text->chars, text->len);
+		}
+
+		newToken->setLine				(newToken, fromToken->getLine(fromToken));
+		newToken->setTokenIndex			(newToken, fromToken->getTokenIndex(fromToken));
+		newToken->setCharPositionInLine	(newToken, fromToken->getCharPositionInLine(fromToken));
+		newToken->setChannel			(newToken, fromToken->getChannel(fromToken));
+		newToken->setType				(newToken, fromToken->getType(fromToken));
+    }
+
+    return  newToken;
+}
+
+/* Specific methods for a TreeAdaptor */
+
+/** Track start/stop token for subtree root created for a rule.
+ *  Only works with CommonTree nodes.  For rules that match nothing,
+ *  seems like this will yield start=i and stop=i-1 in a nil node.
+ *  Might be useful info so I'll not force to be i..i.
+ */
+static	void
+setTokenBoundaries	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken)
+{
+	ANTLR3_MARKER   start;
+	ANTLR3_MARKER   stop;
+
+	pANTLR3_COMMON_TREE	    ct;
+
+	if	(t == NULL)
+	{
+		return;
+	}
+
+	if	( startToken != NULL)
+	{
+		start = startToken->getTokenIndex(startToken);
+	}
+	else
+	{
+		start = 0;
+	}
+
+	if	( stopToken != NULL)
+	{
+		stop = stopToken->getTokenIndex(stopToken);
+	}
+	else
+	{
+		stop = 0;
+	}
+
+	ct	= (pANTLR3_COMMON_TREE)(t->super);
+
+	ct->startIndex  = start;
+	ct->stopIndex   = stop;
+
+}
+static	void
+dbgSetTokenBoundaries	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, pANTLR3_COMMON_TOKEN startToken, pANTLR3_COMMON_TOKEN stopToken)
+{
+	setTokenBoundaries(adaptor, t, startToken, stopToken);
+
+	if	(t != NULL && startToken != NULL && stopToken != NULL)
+	{
+		adaptor->debugger->setTokenBoundaries(adaptor->debugger, t, startToken->getTokenIndex(startToken), stopToken->getTokenIndex(stopToken));
+	}
+}
+
+static	ANTLR3_MARKER   
+getTokenStartIndex	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
+{
+    return  ((pANTLR3_COMMON_TREE)(t->super))->startIndex;
+}
+
+static	ANTLR3_MARKER   
+getTokenStopIndex	(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
+{
+    return  ((pANTLR3_COMMON_TREE)(t->super))->stopIndex;
+}
+
+static	pANTLR3_STRING
+getText		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
+{
+    return  t->getText(t);
+}
+
+static	ANTLR3_UINT32
+getType		(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
+{
+    return  t->getType(t);
+}
+
+static	void					
+replaceChildren
+(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE t)
+{
+	if	(parent != NULL)
+	{
+		parent->replaceChildren(parent, startChildIndex, stopChildIndex, t);
+	}
+}
+
+static	pANTLR3_BASE_TREE
+getChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i)
+{
+	return (pANTLR3_BASE_TREE)t->getChild(t, i);
+}
+static  void
+setChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i, pANTLR3_BASE_TREE child)
+{
+	t->setChild(t, i, child);
+}
+
+static	void
+deleteChild				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_UINT32 i)
+{
+	t->deleteChild(t, i);
+}
+
+static	ANTLR3_UINT32
+getChildCount			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
+{
+	return t->getChildCount(t);
+}
+
+static  void
+setChildIndex			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t, ANTLR3_INT32 i)
+{
+	t->setChildIndex(t, i);
+}
+
+static  ANTLR3_INT32
+getChildIndex			(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE t)
+{
+	return t->getChildIndex(t);
+}
+static	void
+setParent				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE child, pANTLR3_BASE_TREE parent)
+{
+	child->setParent(child, parent);
+}
+static	pANTLR3_BASE_TREE
+getParent				(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_TREE child)
+{
+	return child->getParent(child);
+}
diff --git a/runtime/C/src/antlr3commontreenodestream.c b/runtime/C/src/antlr3commontreenodestream.c
new file mode 100644
index 0000000..54d5184
--- /dev/null
+++ b/runtime/C/src/antlr3commontreenodestream.c
@@ -0,0 +1,968 @@
+/// \file
+/// Defines the implementation of the common node stream the default
+/// tree node stream used by ANTLR.
+///
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3commontreenodestream.h>
+
+#ifdef	ANTLR3_WINDOWS
+#pragma warning( disable : 4100 )
+#endif
+
+// COMMON TREE STREAM API
+//
+static	void						addNavigationNode			(pANTLR3_COMMON_TREE_NODE_STREAM ctns, ANTLR3_UINT32 ttype);
+static	ANTLR3_BOOLEAN				hasUniqueNavigationNodes	(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
+static	pANTLR3_BASE_TREE			newDownNode					(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
+static	pANTLR3_BASE_TREE			newUpNode					(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
+static	void						reset						(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
+static	void						push						(pANTLR3_COMMON_TREE_NODE_STREAM ctns, ANTLR3_INT32 index);
+static	ANTLR3_INT32				pop							(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
+//static	ANTLR3_INT32				index						(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
+static	ANTLR3_UINT32				getLookaheadSize			(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
+// TREE NODE STREAM API
+//
+static	pANTLR3_BASE_TREE_ADAPTOR   getTreeAdaptor				(pANTLR3_TREE_NODE_STREAM tns);
+static	pANTLR3_BASE_TREE			getTreeSource				(pANTLR3_TREE_NODE_STREAM tns);
+static	pANTLR3_BASE_TREE			_LT							(pANTLR3_TREE_NODE_STREAM tns, ANTLR3_INT32 k);
+static	pANTLR3_BASE_TREE			get							(pANTLR3_TREE_NODE_STREAM tns, ANTLR3_INT32 k);
+static	void						setUniqueNavigationNodes	(pANTLR3_TREE_NODE_STREAM tns, ANTLR3_BOOLEAN uniqueNavigationNodes);
+static	pANTLR3_STRING				toString					(pANTLR3_TREE_NODE_STREAM tns);
+static	pANTLR3_STRING				toStringSS					(pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE start, pANTLR3_BASE_TREE stop);
+static	void						toStringWork				(pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE start, pANTLR3_BASE_TREE stop, pANTLR3_STRING buf);
+static	void						replaceChildren				(pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE t);
+
+// INT STREAM API
+//
+static	void						consume						(pANTLR3_INT_STREAM is);
+static	ANTLR3_MARKER				tindex						(pANTLR3_INT_STREAM is);
+static	ANTLR3_UINT32				_LA							(pANTLR3_INT_STREAM is, ANTLR3_INT32 i);
+static	ANTLR3_MARKER				mark						(pANTLR3_INT_STREAM is);
+static	void						release						(pANTLR3_INT_STREAM is, ANTLR3_MARKER marker);
+static	void						rewindMark					(pANTLR3_INT_STREAM is, ANTLR3_MARKER marker);
+static	void						rewindLast					(pANTLR3_INT_STREAM is);
+static	void						seek						(pANTLR3_INT_STREAM is, ANTLR3_MARKER index);
+static	ANTLR3_UINT32				size						(pANTLR3_INT_STREAM is);
+
+
+// Helper functions
+//
+static	void						fillBuffer					(pANTLR3_COMMON_TREE_NODE_STREAM ctns, pANTLR3_BASE_TREE t);
+static	void						fillBufferRoot				(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
+
+// Constructors
+//
+static	void						antlr3TreeNodeStreamFree			(pANTLR3_TREE_NODE_STREAM tns);
+static	void						antlr3CommonTreeNodeStreamFree		(pANTLR3_COMMON_TREE_NODE_STREAM ctns);
+
+ANTLR3_API pANTLR3_TREE_NODE_STREAM
+antlr3TreeNodeStreamNew()
+{
+    pANTLR3_TREE_NODE_STREAM stream;
+
+    // Memory for the interface structure
+    //
+    stream  = (pANTLR3_TREE_NODE_STREAM) ANTLR3_CALLOC(1, sizeof(ANTLR3_TREE_NODE_STREAM));
+
+    if	(stream == NULL)
+    {
+		return	NULL;
+    }
+
+    // Install basic API 
+    //
+	stream->replaceChildren = replaceChildren;
+    stream->free			= antlr3TreeNodeStreamFree;
+    
+    return stream;
+}
+
+static void
+antlr3TreeNodeStreamFree(pANTLR3_TREE_NODE_STREAM stream)
+{   
+    ANTLR3_FREE(stream);
+}
+
+ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM
+antlr3CommonTreeNodeStreamNewTree(pANTLR3_BASE_TREE tree, ANTLR3_UINT32 hint)
+{
+	pANTLR3_COMMON_TREE_NODE_STREAM stream;
+
+	stream = antlr3CommonTreeNodeStreamNew(tree->strFactory, hint);
+
+	if	(stream == NULL)
+	{
+		return	NULL;
+	}
+	stream->root    = tree;
+
+	return stream;
+}
+
+ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM
+antlr3CommonTreeNodeStreamNewStream(pANTLR3_COMMON_TREE_NODE_STREAM inStream)
+{
+	pANTLR3_COMMON_TREE_NODE_STREAM stream;
+
+	// Memory for the interface structure
+	//
+	stream  = (pANTLR3_COMMON_TREE_NODE_STREAM) ANTLR3_CALLOC(1, sizeof(ANTLR3_COMMON_TREE_NODE_STREAM));
+
+	if	(stream == NULL)
+	{
+		return	NULL;
+	}
+
+	// Copy in all the reusable parts of the originating stream and create new
+	// pieces where necessary.
+	//
+
+	// String factory for tree walker
+	//
+	stream->stringFactory		= inStream->stringFactory;
+
+	// Create an adaptor for the common tree node stream
+	//
+	stream->adaptor				= inStream->adaptor;
+
+	// Create space for the tree node stream interface
+	//
+	stream->tnstream	    = antlr3TreeNodeStreamNew();
+
+	if	(stream->tnstream == NULL)
+	{
+		stream->free				(stream);
+
+		return	NULL;
+	}
+
+	// Create space for the INT_STREAM interface
+	//
+	stream->tnstream->istream		    =  antlr3IntStreamNew();
+
+	if	(stream->tnstream->istream == NULL)
+	{
+		stream->tnstream->free		(stream->tnstream);
+		stream->free				(stream);
+
+		return	NULL;
+	}
+
+	// Install the common tree node stream API
+	//
+	stream->addNavigationNode		    =  addNavigationNode;
+	stream->hasUniqueNavigationNodes    =  hasUniqueNavigationNodes;
+	stream->newDownNode					=  newDownNode;
+	stream->newUpNode					=  newUpNode;
+	stream->reset						=  reset;
+	stream->push						=  push;
+	stream->pop							=  pop;
+	stream->getLookaheadSize			=  getLookaheadSize;
+
+	stream->free			    =  antlr3CommonTreeNodeStreamFree;
+
+	// Install the tree node stream API
+	//
+	stream->tnstream->getTreeAdaptor			=  getTreeAdaptor;
+	stream->tnstream->getTreeSource				=  getTreeSource;
+	stream->tnstream->_LT						=  _LT;
+	stream->tnstream->setUniqueNavigationNodes	=  setUniqueNavigationNodes;
+	stream->tnstream->toString					=  toString;
+	stream->tnstream->toStringSS				=  toStringSS;
+	stream->tnstream->toStringWork				=  toStringWork;
+	stream->tnstream->get						=  get;
+
+	// Install INT_STREAM interface
+	//
+	stream->tnstream->istream->consume	    =  consume;
+	stream->tnstream->istream->index	    =  tindex;
+	stream->tnstream->istream->_LA			=  _LA;
+	stream->tnstream->istream->mark			=  mark;
+	stream->tnstream->istream->release	    =  release;
+	stream->tnstream->istream->rewind	    =  rewindMark;
+	stream->tnstream->istream->rewindLast   =  rewindLast;
+	stream->tnstream->istream->seek			=  seek;
+	stream->tnstream->istream->size			=  size;
+
+	// Initialize data elements of INT stream
+	//
+	stream->tnstream->istream->type			= ANTLR3_COMMONTREENODE;
+	stream->tnstream->istream->super	    =  (stream->tnstream);
+
+	// Initialize data elements of TREE stream
+	//
+	stream->tnstream->ctns =  stream;
+
+	// Initialize data elements of the COMMON TREE NODE stream
+	//
+	stream->super					= NULL;
+	stream->uniqueNavigationNodes	= ANTLR3_FALSE;
+	stream->markers					= NULL;
+	stream->nodeStack				= inStream->nodeStack;
+
+	// Create the node list map
+	//
+	stream->nodes	= antlr3VectorNew(DEFAULT_INITIAL_BUFFER_SIZE);
+	stream->p		= -1;
+
+	// Install the navigation nodes     
+	//
+	
+	// Install the navigation nodes     
+	//
+	antlr3SetCTAPI(&(stream->UP));
+	antlr3SetCTAPI(&(stream->DOWN));
+	antlr3SetCTAPI(&(stream->EOF_NODE));
+	antlr3SetCTAPI(&(stream->INVALID_NODE));
+
+	stream->UP.token						= inStream->UP.token;
+	inStream->UP.token->strFactory			= stream->stringFactory;
+	stream->DOWN.token						= inStream->DOWN.token;
+	inStream->DOWN.token->strFactory		= stream->stringFactory;
+	stream->EOF_NODE.token					= inStream->EOF_NODE.token;
+	inStream->EOF_NODE.token->strFactory	= stream->stringFactory;
+	stream->INVALID_NODE.token				= inStream->INVALID_NODE.token;
+	inStream->INVALID_NODE.token->strFactory= stream->stringFactory;
+
+	// Reuse the root tree of the originating stream
+	//
+	stream->root		= inStream->root;
+
+	// Signal that this is a rewriting stream so we don't
+	// free the originating tree. Anything that we rewrite or
+	// duplicate here will be done through the adaptor or 
+	// the original tree factory.
+	//
+	stream->isRewriter	= ANTLR3_TRUE;
+	return stream;
+}
+
+ANTLR3_API pANTLR3_COMMON_TREE_NODE_STREAM
+antlr3CommonTreeNodeStreamNew(pANTLR3_STRING_FACTORY strFactory, ANTLR3_UINT32 hint)
+{
+	pANTLR3_COMMON_TREE_NODE_STREAM stream;
+	pANTLR3_COMMON_TOKEN			token;
+
+	// Memory for the interface structure
+	//
+	stream  = (pANTLR3_COMMON_TREE_NODE_STREAM) ANTLR3_CALLOC(1, sizeof(ANTLR3_COMMON_TREE_NODE_STREAM));
+
+	if	(stream == NULL)
+	{
+		return	NULL;
+	}
+
+	// String factory for tree walker
+	//
+	stream->stringFactory		= strFactory;
+
+	// Create an adaptor for the common tree node stream
+	//
+	stream->adaptor				= ANTLR3_TREE_ADAPTORNew(strFactory);
+
+	if	(stream->adaptor == NULL)
+	{
+		stream->free(stream);
+		return	NULL;
+	}
+
+	// Create space for the tree node stream interface
+	//
+	stream->tnstream	    = antlr3TreeNodeStreamNew();
+
+	if	(stream->tnstream == NULL)
+	{
+		stream->adaptor->free		(stream->adaptor);
+		stream->free				(stream);
+
+		return	NULL;
+	}
+
+	// Create space for the INT_STREAM interface
+	//
+	stream->tnstream->istream		    =  antlr3IntStreamNew();
+
+	if	(stream->tnstream->istream == NULL)
+	{
+		stream->adaptor->free		(stream->adaptor);
+		stream->tnstream->free		(stream->tnstream);
+		stream->free				(stream);
+
+		return	NULL;
+	}
+
+	// Install the common tree node stream API
+	//
+	stream->addNavigationNode		    =  addNavigationNode;
+	stream->hasUniqueNavigationNodes    =  hasUniqueNavigationNodes;
+	stream->newDownNode					=  newDownNode;
+	stream->newUpNode					=  newUpNode;
+	stream->reset						=  reset;
+	stream->push						=  push;
+	stream->pop							=  pop;
+
+	stream->free			    =  antlr3CommonTreeNodeStreamFree;
+
+	// Install the tree node stream API
+	//
+	stream->tnstream->getTreeAdaptor			=  getTreeAdaptor;
+	stream->tnstream->getTreeSource				=  getTreeSource;
+	stream->tnstream->_LT						=  _LT;
+	stream->tnstream->setUniqueNavigationNodes	=  setUniqueNavigationNodes;
+	stream->tnstream->toString					=  toString;
+	stream->tnstream->toStringSS				=  toStringSS;
+	stream->tnstream->toStringWork				=  toStringWork;
+	stream->tnstream->get						=  get;
+
+	// Install INT_STREAM interface
+	//
+	stream->tnstream->istream->consume	    =  consume;
+	stream->tnstream->istream->index	    =  tindex;
+	stream->tnstream->istream->_LA			=  _LA;
+	stream->tnstream->istream->mark			=  mark;
+	stream->tnstream->istream->release	    =  release;
+	stream->tnstream->istream->rewind	    =  rewindMark;
+	stream->tnstream->istream->rewindLast   =  rewindLast;
+	stream->tnstream->istream->seek			=  seek;
+	stream->tnstream->istream->size			=  size;
+
+	// Initialize data elements of INT stream
+	//
+	stream->tnstream->istream->type			= ANTLR3_COMMONTREENODE;
+	stream->tnstream->istream->super	    =  (stream->tnstream);
+
+	// Initialize data elements of TREE stream
+	//
+	stream->tnstream->ctns =  stream;
+
+	// Initialize data elements of the COMMON TREE NODE stream
+	//
+	stream->super					= NULL;
+	stream->uniqueNavigationNodes	= ANTLR3_FALSE;
+	stream->markers					= NULL;
+	stream->nodeStack				= antlr3StackNew(INITIAL_CALL_STACK_SIZE);
+
+	// Create the node list map
+	//
+	if	(hint == 0)
+	{
+		hint = DEFAULT_INITIAL_BUFFER_SIZE;
+	}
+	stream->nodes	= antlr3VectorNew(hint);
+	stream->p		= -1;
+
+	// Install the navigation nodes     
+	//
+	antlr3SetCTAPI(&(stream->UP));
+	antlr3SetCTAPI(&(stream->DOWN));
+	antlr3SetCTAPI(&(stream->EOF_NODE));
+	antlr3SetCTAPI(&(stream->INVALID_NODE));
+
+	token						= antlr3CommonTokenNew(ANTLR3_TOKEN_UP);
+	token->strFactory			= strFactory;
+	token->textState			= ANTLR3_TEXT_CHARP;
+	token->tokText.chars		= (pANTLR3_UCHAR)"UP";
+	stream->UP.token			= token;
+
+	token						= antlr3CommonTokenNew(ANTLR3_TOKEN_DOWN);
+	token->strFactory			= strFactory;
+	token->textState			= ANTLR3_TEXT_CHARP;
+	token->tokText.chars		= (pANTLR3_UCHAR)"DOWN";
+	stream->DOWN.token			= token;
+
+	token						= antlr3CommonTokenNew(ANTLR3_TOKEN_EOF);
+	token->strFactory			= strFactory;
+	token->textState			= ANTLR3_TEXT_CHARP;
+	token->tokText.chars		= (pANTLR3_UCHAR)"EOF";
+	stream->EOF_NODE.token		= token;
+
+	token						= antlr3CommonTokenNew(ANTLR3_TOKEN_INVALID);
+	token->strFactory			= strFactory;
+	token->textState			= ANTLR3_TEXT_CHARP;
+	token->tokText.chars		= (pANTLR3_UCHAR)"INVALID";
+	stream->INVALID_NODE.token	= token;
+
+
+	return  stream;
+}
+
+/// Free up any resources that belong to this common tree node stream.
+///
+static	void			    antlr3CommonTreeNodeStreamFree  (pANTLR3_COMMON_TREE_NODE_STREAM ctns)
+{
+
+	// If this is a rewrting stream, then certain resources
+	// belong to the originating node stream and we do not
+	// free them here.
+	//
+	if	(ctns->isRewriter != ANTLR3_TRUE)
+	{
+		ctns->adaptor			->free  (ctns->adaptor);
+
+		if	(ctns->nodeStack != NULL)
+		{
+			ctns->nodeStack->free(ctns->nodeStack);
+		}
+
+		ANTLR3_FREE(ctns->INVALID_NODE.token);
+		ANTLR3_FREE(ctns->EOF_NODE.token);
+		ANTLR3_FREE(ctns->DOWN.token);
+		ANTLR3_FREE(ctns->UP.token);
+	}
+	
+	if	(ctns->nodes != NULL)
+	{
+		ctns->nodes			->free  (ctns->nodes);
+	}
+	ctns->tnstream->istream ->free  (ctns->tnstream->istream);
+    ctns->tnstream			->free  (ctns->tnstream);
+
+
+    ANTLR3_FREE(ctns);
+}
+
+// ------------------------------------------------------------------------------
+// Local helpers
+//
+
+/// Walk and fill the tree node buffer from the root tree
+///
+static void
+fillBufferRoot(pANTLR3_COMMON_TREE_NODE_STREAM ctns)
+{
+	// Call the generic buffer routine with the root as the
+	// argument
+	//
+	fillBuffer(ctns, ctns->root);
+	ctns->p = 0;					// Indicate we are at buffer start
+}
+
+/// Walk tree with depth-first-search and fill nodes buffer.
+/// Don't add in DOWN, UP nodes if the supplied tree is a list (t is isNilNode)
+// such as the root tree is.
+///
+static void
+fillBuffer(pANTLR3_COMMON_TREE_NODE_STREAM ctns, pANTLR3_BASE_TREE t)
+{
+	ANTLR3_BOOLEAN	nilNode;
+	ANTLR3_UINT32	nCount;
+	ANTLR3_UINT32	c;
+
+	nilNode = ctns->adaptor->isNilNode(ctns->adaptor, t);
+
+	// If the supplied node is not a nil (list) node then we
+	// add in the node itself to the vector
+	//
+	if	(nilNode == ANTLR3_FALSE)
+	{
+		ctns->nodes->add(ctns->nodes, t, NULL);	
+	}
+
+	// Only add a DOWN node if the tree is not a nil tree and
+	// the tree does have children.
+	//
+	nCount = t->getChildCount(t);
+
+	if	(nilNode == ANTLR3_FALSE && nCount>0)
+	{
+		ctns->addNavigationNode(ctns, ANTLR3_TOKEN_DOWN);
+	}
+
+	// We always add any children the tree contains, which is
+	// a recursive call to this function, which will cause similar
+	// recursion and implement a depth first addition
+	//
+	for	(c = 0; c < nCount; c++)
+	{
+		fillBuffer(ctns, (pANTLR3_BASE_TREE)ctns->adaptor->getChild(ctns->adaptor, t, c));
+	}
+
+	// If the tree had children and was not a nil (list) node, then we
+	// we need to add an UP node here to match the DOWN node
+	//
+	if	(nilNode == ANTLR3_FALSE && nCount > 0)
+	{
+		ctns->addNavigationNode(ctns, ANTLR3_TOKEN_UP);
+	}
+}
+
+
+// ------------------------------------------------------------------------------
+// Interface functions
+//
+
+/// Reset the input stream to the start of the input nodes.
+///
+static	void		
+reset	    (pANTLR3_COMMON_TREE_NODE_STREAM ctns)
+{
+	if	(ctns->p != -1)
+	{
+		ctns->p									= 0;
+	}
+	ctns->tnstream->istream->lastMarker		= 0;
+
+
+	// Free and reset the node stack only if this is not
+	// a rewriter, which is going to reuse the originating
+	// node streams node stack
+	//
+	if  (ctns->isRewriter != ANTLR3_TRUE)
+    {
+		if	(ctns->nodeStack != NULL)
+		{
+			ctns->nodeStack->free(ctns->nodeStack);
+			ctns->nodeStack = antlr3StackNew(INITIAL_CALL_STACK_SIZE);
+		}
+	}
+}
+
+
+static pANTLR3_BASE_TREE
+LB(pANTLR3_TREE_NODE_STREAM tns, ANTLR3_INT32 k)
+{
+	if	( k==0)
+	{
+		return	&(tns->ctns->INVALID_NODE.baseTree);
+	}
+
+	if	( (tns->ctns->p - k) < 0)
+	{
+		return	&(tns->ctns->INVALID_NODE.baseTree);
+	}
+
+	return (pANTLR3_BASE_TREE)tns->ctns->nodes->get(tns->ctns->nodes, tns->ctns->p - k);
+}
+
+/// Get tree node at current input pointer + i ahead where i=1 is next node.
+/// i<0 indicates nodes in the past.  So -1 is previous node and -2 is
+/// two nodes ago. LT(0) is undefined.  For i>=n, return null.
+/// Return null for LT(0) and any index that results in an absolute address
+/// that is negative.
+///
+/// This is analogous to the _LT() method of the TokenStream, but this
+/// returns a tree node instead of a token.  Makes code gen identical
+/// for both parser and tree grammars. :)
+///
+static	pANTLR3_BASE_TREE	    
+_LT	    (pANTLR3_TREE_NODE_STREAM tns, ANTLR3_INT32 k)
+{
+	if	(tns->ctns->p == -1)
+	{
+		fillBufferRoot(tns->ctns);
+	}
+
+	if	(k < 0)
+	{
+		return LB(tns, -k);
+	}
+	else if	(k == 0)
+	{
+		return	&(tns->ctns->INVALID_NODE.baseTree);
+	}
+
+	// k was a legitimate request, 
+	//
+	if	(( tns->ctns->p + k - 1) >= (ANTLR3_INT32)(tns->ctns->nodes->count))
+	{
+		return &(tns->ctns->EOF_NODE.baseTree);
+	}
+
+	return	(pANTLR3_BASE_TREE)tns->ctns->nodes->get(tns->ctns->nodes, tns->ctns->p + k - 1);
+}
+
+/// Where is this stream pulling nodes from?  This is not the name, but
+/// the object that provides node objects.
+///
+static	pANTLR3_BASE_TREE	    
+getTreeSource	(pANTLR3_TREE_NODE_STREAM tns)
+{
+    return  tns->ctns->root;
+}
+
+/// Consume the next node from the input stream
+///
+static	void		    
+consume	(pANTLR3_INT_STREAM is)
+{
+    pANTLR3_TREE_NODE_STREAM		tns;
+    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
+
+    tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
+    ctns    = tns->ctns;
+
+	if	(ctns->p == -1)
+	{
+		fillBufferRoot(ctns);
+	}
+	ctns->p++;
+}
+
+static	ANTLR3_UINT32	    
+_LA	    (pANTLR3_INT_STREAM is, ANTLR3_INT32 i)
+{
+	pANTLR3_TREE_NODE_STREAM		tns;
+	pANTLR3_BASE_TREE				t;
+
+	tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
+
+	// Ask LT for the 'token' at that position
+	//
+	t = tns->_LT(tns, i);
+
+	if	(t == NULL)
+	{
+		return	ANTLR3_TOKEN_INVALID;
+	}
+
+	// Token node was there so return the type of it
+	//
+	return  t->getType(t);
+}
+
+/// Mark the state of the input stream so that we can come back to it
+/// after a syntactic predicate and so on.
+///
+static	ANTLR3_MARKER	    
+mark	(pANTLR3_INT_STREAM is)
+{
+	pANTLR3_TREE_NODE_STREAM		tns;
+	pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
+
+	tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
+	ctns    = tns->ctns;
+
+	if	(tns->ctns->p == -1)
+	{
+		fillBufferRoot(tns->ctns);
+	}
+
+	// Return the current mark point
+	//
+	ctns->tnstream->istream->lastMarker = ctns->tnstream->istream->index(ctns->tnstream->istream);
+
+	return ctns->tnstream->istream->lastMarker;
+}
+
+static	void		    
+release	(pANTLR3_INT_STREAM is, ANTLR3_MARKER marker)
+{
+}
+
+/// Rewind the current state of the tree walk to the state it
+/// was in when mark() was called and it returned marker.  Also,
+/// wipe out the lookahead which will force reloading a few nodes
+/// but it is better than making a copy of the lookahead buffer
+/// upon mark().
+///
+static	void		    
+rewindMark	    (pANTLR3_INT_STREAM is, ANTLR3_MARKER marker)
+{
+	is->seek(is, marker);
+}
+
+static	void		    
+rewindLast	(pANTLR3_INT_STREAM is)
+{
+   is->seek(is, is->lastMarker);
+}
+
+/// consume() ahead until we hit index.  Can't just jump ahead--must
+/// spit out the navigation nodes.
+///
+static	void		    
+seek	(pANTLR3_INT_STREAM is, ANTLR3_MARKER index)
+{
+    pANTLR3_TREE_NODE_STREAM		tns;
+    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
+
+    tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
+    ctns    = tns->ctns;
+
+	ctns->p = ANTLR3_UINT32_CAST(index);
+}
+
+static	ANTLR3_MARKER		    
+tindex	(pANTLR3_INT_STREAM is)
+{
+    pANTLR3_TREE_NODE_STREAM		tns;
+    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
+
+    tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
+    ctns    = tns->ctns;
+
+	return (ANTLR3_MARKER)(ctns->p);
+}
+
+/// Expensive to compute the size of the whole tree while parsing.
+/// This method only returns how much input has been seen so far.  So
+/// after parsing it returns true size.
+///
+static	ANTLR3_UINT32		    
+size	(pANTLR3_INT_STREAM is)
+{
+    pANTLR3_TREE_NODE_STREAM		tns;
+    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
+
+    tns	    = (pANTLR3_TREE_NODE_STREAM)(is->super);
+    ctns    = tns->ctns;
+
+	if	(ctns->p == -1)
+	{
+		fillBufferRoot(ctns);
+	}
+
+	return ctns->nodes->size(ctns->nodes);
+}
+
+/// As we flatten the tree, we use UP, DOWN nodes to represent
+/// the tree structure.  When debugging we need unique nodes
+/// so instantiate new ones when uniqueNavigationNodes is true.
+///
+static	void		    
+addNavigationNode	    (pANTLR3_COMMON_TREE_NODE_STREAM ctns, ANTLR3_UINT32 ttype)
+{
+	pANTLR3_BASE_TREE	    node;
+
+	node = NULL;
+
+	if	(ttype == ANTLR3_TOKEN_DOWN)
+	{
+		if  (ctns->hasUniqueNavigationNodes(ctns) == ANTLR3_TRUE)
+		{
+			node    = ctns->newDownNode(ctns);
+		}
+		else
+		{
+			node    = &(ctns->DOWN.baseTree);
+		}
+	}
+	else
+	{
+		if  (ctns->hasUniqueNavigationNodes(ctns) == ANTLR3_TRUE)
+		{
+			node    = ctns->newUpNode(ctns);
+		}
+		else
+		{
+			node    = &(ctns->UP.baseTree);
+		}
+	}
+
+	// Now add the node we decided upon.
+	//
+	ctns->nodes->add(ctns->nodes, node, NULL);
+}
+
+
+static	pANTLR3_BASE_TREE_ADAPTOR			    
+getTreeAdaptor	(pANTLR3_TREE_NODE_STREAM tns)
+{
+    return  tns->ctns->adaptor;
+}
+
+static	ANTLR3_BOOLEAN	    
+hasUniqueNavigationNodes	    (pANTLR3_COMMON_TREE_NODE_STREAM ctns)
+{
+    return  ctns->uniqueNavigationNodes;
+}
+
+static	void		    
+setUniqueNavigationNodes	    (pANTLR3_TREE_NODE_STREAM tns, ANTLR3_BOOLEAN uniqueNavigationNodes)
+{
+    tns->ctns->uniqueNavigationNodes = uniqueNavigationNodes;
+}
+
+
+/// Print out the entire tree including DOWN/UP nodes.  Uses
+/// a recursive walk.  Mostly useful for testing as it yields
+/// the token types not text.
+///
+static	pANTLR3_STRING	    
+toString	    (pANTLR3_TREE_NODE_STREAM tns)
+{
+
+    return  tns->toStringSS(tns, tns->ctns->root, NULL);
+}
+
+static	pANTLR3_STRING	    
+toStringSS	    (pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE start, pANTLR3_BASE_TREE stop)
+{
+    pANTLR3_STRING  buf;
+
+    buf = tns->ctns->stringFactory->newRaw(tns->ctns->stringFactory);
+
+    tns->toStringWork(tns, start, stop, buf);
+
+    return  buf;
+}
+
+static	void	    
+toStringWork	(pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE p, pANTLR3_BASE_TREE stop, pANTLR3_STRING buf)
+{
+
+	ANTLR3_UINT32   n;
+	ANTLR3_UINT32   c;
+
+	if	(!p->isNilNode(p) )
+	{
+		pANTLR3_STRING	text;
+
+		text	= p->toString(p);
+
+		if  (text == NULL)
+		{
+			text = tns->ctns->stringFactory->newRaw(tns->ctns->stringFactory);
+
+			text->addc	(text, ' ');
+			text->addi	(text, p->getType(p));
+		}
+
+		buf->appendS(buf, text);
+	}
+
+	if	(p == stop)
+	{
+		return;		/* Finished */
+	}
+
+	n = p->getChildCount(p);
+
+	if	(n > 0 && ! p->isNilNode(p) )
+	{
+		buf->addc   (buf, ' ');
+		buf->addi   (buf, ANTLR3_TOKEN_DOWN);
+	}
+
+	for	(c = 0; c<n ; c++)
+	{
+		pANTLR3_BASE_TREE   child;
+
+		child = (pANTLR3_BASE_TREE)p->getChild(p, c);
+		tns->toStringWork(tns, child, stop, buf);
+	}
+
+	if	(n > 0 && ! p->isNilNode(p) )
+	{
+		buf->addc   (buf, ' ');
+		buf->addi   (buf, ANTLR3_TOKEN_UP);
+	}
+}
+
+static	ANTLR3_UINT32	    
+getLookaheadSize	(pANTLR3_COMMON_TREE_NODE_STREAM ctns)
+{
+    return	ctns->tail < ctns->head 
+	    ?	(ctns->lookAheadLength - ctns->head + ctns->tail)
+	    :	(ctns->tail - ctns->head);
+}
+
+static	pANTLR3_BASE_TREE	    
+newDownNode		(pANTLR3_COMMON_TREE_NODE_STREAM ctns)
+{
+    pANTLR3_COMMON_TREE	    dNode;
+    pANTLR3_COMMON_TOKEN    token;
+
+    token					= antlr3CommonTokenNew(ANTLR3_TOKEN_DOWN);
+	token->textState		= ANTLR3_TEXT_CHARP;
+	token->tokText.chars	= (pANTLR3_UCHAR)"DOWN";
+    dNode					= antlr3CommonTreeNewFromToken(token);
+
+    return  &(dNode->baseTree);
+}
+
+static	pANTLR3_BASE_TREE	    
+newUpNode		(pANTLR3_COMMON_TREE_NODE_STREAM ctns)
+{
+    pANTLR3_COMMON_TREE	    uNode;
+    pANTLR3_COMMON_TOKEN    token;
+
+    token					= antlr3CommonTokenNew(ANTLR3_TOKEN_UP);
+	token->textState		= ANTLR3_TEXT_CHARP;
+	token->tokText.chars	= (pANTLR3_UCHAR)"UP";
+    uNode					= antlr3CommonTreeNewFromToken(token);
+
+    return  &(uNode->baseTree);
+}
+
+/// Replace from start to stop child index of parent with t, which might
+/// be a list.  Number of children may be different
+/// after this call.  The stream is notified because it is walking the
+/// tree and might need to know you are monkey-ing with the underlying
+/// tree.  Also, it might be able to modify the node stream to avoid
+/// re-streaming for future phases.
+///
+/// If parent is null, don't do anything; must be at root of overall tree.
+/// Can't replace whatever points to the parent externally.  Do nothing.
+///
+static	void						
+replaceChildren				(pANTLR3_TREE_NODE_STREAM tns, pANTLR3_BASE_TREE parent, ANTLR3_INT32 startChildIndex, ANTLR3_INT32 stopChildIndex, pANTLR3_BASE_TREE t)
+{
+	if	(parent != NULL)
+	{
+		pANTLR3_BASE_TREE_ADAPTOR	adaptor;
+		pANTLR3_COMMON_TREE_ADAPTOR	cta;
+
+		adaptor	= tns->getTreeAdaptor(tns);
+		cta		= (pANTLR3_COMMON_TREE_ADAPTOR)(adaptor->super);
+
+		adaptor->replaceChildren(adaptor, parent, startChildIndex, stopChildIndex, t);
+	}
+}
+
+static	pANTLR3_BASE_TREE
+get							(pANTLR3_TREE_NODE_STREAM tns, ANTLR3_INT32 k)
+{
+	if	(tns->ctns->p == -1)
+	{
+		fillBufferRoot(tns->ctns);
+	}
+
+	return (pANTLR3_BASE_TREE)tns->ctns->nodes->get(tns->ctns->nodes, k);
+}
+
+static	void
+push						(pANTLR3_COMMON_TREE_NODE_STREAM ctns, ANTLR3_INT32 index)
+{
+	ctns->nodeStack->push(ctns->nodeStack, ANTLR3_FUNC_PTR(ctns->p), NULL);	// Save current index
+	ctns->tnstream->istream->seek(ctns->tnstream->istream, index);
+}
+
+static	ANTLR3_INT32
+pop							(pANTLR3_COMMON_TREE_NODE_STREAM ctns)
+{
+	ANTLR3_INT32	retVal;
+
+	retVal = ANTLR3_UINT32_CAST(ctns->nodeStack->pop(ctns->nodeStack));
+	ctns->tnstream->istream->seek(ctns->tnstream->istream, retVal);
+	return retVal;
+}
diff --git a/runtime/C/src/antlr3convertutf.c b/runtime/C/src/antlr3convertutf.c
new file mode 100644
index 0000000..9d2bcf1
--- /dev/null
+++ b/runtime/C/src/antlr3convertutf.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright 2001-2004 Unicode, Inc.
+ * 
+ * Disclaimer
+ * 
+ * This source code is provided as is by Unicode, Inc. No claims are
+ * made as to fitness for any particular purpose. No warranties of any
+ * kind are expressed or implied. The recipient agrees to determine
+ * applicability of information provided. If this file has been
+ * purchased on magnetic or optical media from Unicode, Inc., the
+ * sole remedy for any claim will be exchange of defective media
+ * within 90 days of receipt.
+ * 
+ * Limitations on Rights to Redistribute This Code
+ * 
+ * Unicode, Inc. hereby grants the right to freely use the information
+ * supplied in this file in the creation of products supporting the
+ * Unicode Standard, and to make copies of this file in any form
+ * for internal or external distribution as long as this notice
+ * remains attached.
+ */
+
+/* ---------------------------------------------------------------------
+
+    Conversions between UTF32, UTF-16, and UTF-8. Source code file.
+    Author: Mark E. Davis, 1994.
+    Rev History: Rick McGowan, fixes & updates May 2001.
+    Sept 2001: fixed const & error conditions per
+	mods suggested by S. Parent & A. Lillich.
+    June 2002: Tim Dodd added detection and handling of incomplete
+	source sequences, enhanced error detection, added casts
+	to eliminate compiler warnings.
+    July 2003: slight mods to back out aggressive FFFE detection.
+    Jan 2004: updated switches in from-UTF8 conversions.
+    Oct 2004: updated to use UNI_MAX_LEGAL_UTF32 in UTF-32 conversions.
+
+    See the header file "ConvertUTF.h" for complete documentation.
+
+------------------------------------------------------------------------ */
+
+
+#include "antlr3convertutf.h"
+
+#ifdef CVTUTF_DEBUG
+#include <stdio.h>
+#endif
+
+
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF32toUTF16 (
+	const UTF32** sourceStart, const UTF32* sourceEnd, 
+	UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
+    ConversionResult result = conversionOK;
+    const UTF32* source = *sourceStart;
+    UTF16* target = *targetStart;
+    while (source < sourceEnd) {
+	UTF32 ch;
+	if (target >= targetEnd) {
+	    result = targetExhausted; break;
+	}
+	ch = *source++;
+	if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
+	    /* UTF-16 surrogate values are illegal in UTF-32; 0xffff or 0xfffe are both reserved values */
+	    if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+		if (flags == strictConversion) {
+		    --source; /* return to the illegal value itself */
+		    result = sourceIllegal;
+		    break;
+		} else {
+		    *target++ = UNI_REPLACEMENT_CHAR;
+		}
+	    } else {
+		*target++ = (UTF16)ch; /* normal case */
+	    }
+	} else if (ch > UNI_MAX_LEGAL_UTF32) {
+	    if (flags == strictConversion) {
+		result = sourceIllegal;
+	    } else {
+		*target++ = UNI_REPLACEMENT_CHAR;
+	    }
+	} else {
+	    /* target is a character in range 0xFFFF - 0x10FFFF. */
+	    if (target + 1 >= targetEnd) {
+		--source; /* Back up source pointer! */
+		result = targetExhausted; break;
+	    }
+	    ch -= halfBase;
+	    *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START);
+	    *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START);
+	}
+    }
+    *sourceStart = source;
+    *targetStart = target;
+    return result;
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF16toUTF32 (
+	const UTF16** sourceStart, const UTF16* sourceEnd, 
+	UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
+    ConversionResult result = conversionOK;
+    const UTF16* source = *sourceStart;
+    UTF32* target = *targetStart;
+    UTF32 ch, ch2;
+    while (source < sourceEnd) {
+	const UTF16* oldSource = source; /*  In case we have to back up because of target overflow. */
+	ch = *source++;
+	/* If we have a surrogate pair, convert to UTF32 first. */
+	if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+	    /* If the 16 bits following the high surrogate are in the source buffer... */
+	    if (source < sourceEnd) {
+		ch2 = *source;
+		/* If it's a low surrogate, convert to UTF32. */
+		if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) {
+		    ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+			+ (ch2 - UNI_SUR_LOW_START) + halfBase;
+		    ++source;
+		} else if (flags == strictConversion) { /* it's an unpaired high surrogate */
+		    --source; /* return to the illegal value itself */
+		    result = sourceIllegal;
+		    break;
+		}
+	    } else { /* We don't have the 16 bits following the high surrogate. */
+		--source; /* return to the high surrogate */
+		result = sourceExhausted;
+		break;
+	    }
+	} else if (flags == strictConversion) {
+	    /* UTF-16 surrogate values are illegal in UTF-32 */
+	    if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) {
+		--source; /* return to the illegal value itself */
+		result = sourceIllegal;
+		break;
+	    }
+	}
+	if (target >= targetEnd) {
+	    source = oldSource; /* Back up source pointer! */
+	    result = targetExhausted; break;
+	}
+	*target++ = ch;
+    }
+    *sourceStart = source;
+    *targetStart = target;
+#ifdef CVTUTF_DEBUG
+if (result == sourceIllegal) {
+    ANTLR3_FPRINTF(stderr, "ConvertUTF16toUTF32 illegal seq 0x%04x,%04x\n", ch, ch2);
+    fflush(stderr);
+}
+#endif
+    return result;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Index into the table below with the first byte of a UTF-8 sequence to
+ * get the number of trailing bytes that are supposed to follow it.
+ * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is
+ * left as-is for anyone who may want to do such conversion, which was
+ * allowed in earlier algorithms.
+ */
+static const char trailingBytesForUTF8[256] = {
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+    2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
+};
+
+/*
+ * Magic values subtracted from a buffer value during UTF8 conversion.
+ * This table contains as many values as there might be trailing bytes
+ * in a UTF-8 sequence.
+ */
+static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, 
+		     0x03C82080UL, 0xFA082080UL, 0x82082080UL };
+
+/*
+ * Once the bits are split out into bytes of UTF-8, this is a mask OR-ed
+ * into the first byte, depending on how many bytes follow.  There are
+ * as many entries in this table as there are UTF-8 sequence types.
+ * (I.e., one byte sequence, two byte... etc.). Remember that sequencs
+ * for *legal* UTF-8 will be 4 or fewer bytes total.
+ */
+static const UTF8 firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC };
+
+/* --------------------------------------------------------------------- */
+
+/* The interface converts a whole buffer to avoid function-call overhead.
+ * Constants have been gathered. Loops & conditionals have been removed as
+ * much as possible for efficiency, in favor of drop-through switches.
+ * (See "Note A" at the bottom of the file for equivalent code.)
+ * If your compiler supports it, the "isLegalUTF8" call can be turned
+ * into an inline function.
+ */
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF16toUTF8 (
+	const UTF16** sourceStart, const UTF16* sourceEnd, 
+	UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) {
+    ConversionResult result = conversionOK;
+    const UTF16* source = *sourceStart;
+    UTF8* target = *targetStart;
+    while (source < sourceEnd) {
+	UTF32 ch;
+	unsigned short bytesToWrite = 0;
+	const UTF32 byteMask = 0xBF;
+	const UTF32 byteMark = 0x80; 
+	const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */
+	ch = *source++;
+	/* If we have a surrogate pair, convert to UTF32 first. */
+	if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+	    /* If the 16 bits following the high surrogate are in the source buffer... */
+	    if (source < sourceEnd) {
+		UTF32 ch2 = *source;
+		/* If it's a low surrogate, convert to UTF32. */
+		if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) {
+		    ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+			+ (ch2 - UNI_SUR_LOW_START) + halfBase;
+		    ++source;
+		} else if (flags == strictConversion) { /* it's an unpaired high surrogate */
+		    --source; /* return to the illegal value itself */
+		    result = sourceIllegal;
+		    break;
+		}
+	    } else { /* We don't have the 16 bits following the high surrogate. */
+		--source; /* return to the high surrogate */
+		result = sourceExhausted;
+		break;
+	    }
+        } else if (flags == strictConversion) {
+	    /* UTF-16 surrogate values are illegal in UTF-32 */
+	    if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) {
+		--source; /* return to the illegal value itself */
+		result = sourceIllegal;
+		break;
+	    }
+	}
+	/* Figure out how many bytes the result will require */
+	if (ch < (UTF32)0x80) {	     bytesToWrite = 1;
+	} else if (ch < (UTF32)0x800) {     bytesToWrite = 2;
+	} else if (ch < (UTF32)0x10000) {   bytesToWrite = 3;
+	} else if (ch < (UTF32)0x110000) {  bytesToWrite = 4;
+	} else {			    bytesToWrite = 3;
+					    ch = UNI_REPLACEMENT_CHAR;
+	}
+
+	target += bytesToWrite;
+	if (target > targetEnd) {
+	    source = oldSource; /* Back up source pointer! */
+	    target -= bytesToWrite; result = targetExhausted; break;
+	}
+	switch (bytesToWrite) { /* note: everything falls through. */
+	    case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+	    case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+	    case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+	    case 1: *--target =  (UTF8)(ch | firstByteMark[bytesToWrite]);
+	}
+	target += bytesToWrite;
+    }
+    *sourceStart = source;
+    *targetStart = target;
+    return result;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Utility routine to tell whether a sequence of bytes is legal UTF-8.
+ * This must be called with the length pre-determined by the first byte.
+ * If not calling this from ConvertUTF8to*, then the length can be set by:
+ *  length = trailingBytesForUTF8[*source]+1;
+ * and the sequence is illegal right away if there aren't that many bytes
+ * available.
+ * If presented with a length > 4, this returns false.  The Unicode
+ * definition of UTF-8 goes up to 4-byte sequences.
+ */
+
+static ANTLR3_BOOLEAN
+isLegalUTF8(const UTF8 *source, int length) {
+    UTF8 a;
+    const UTF8 *srcptr = source+length;
+    switch (length) {
+    default: return ANTLR3_FALSE;
+	/* Everything else falls through when "true"... */
+    case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return ANTLR3_FALSE;
+    case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return ANTLR3_FALSE;
+    case 2: if ((a = (*--srcptr)) > 0xBF) return ANTLR3_FALSE;
+
+	switch (*source) {
+	    /* no fall-through in this inner switch */
+	    case 0xE0: if (a < 0xA0) return ANTLR3_FALSE; break;
+	    case 0xED: if (a > 0x9F) return ANTLR3_FALSE; break;
+	    case 0xF0: if (a < 0x90) return ANTLR3_FALSE; break;
+	    case 0xF4: if (a > 0x8F) return ANTLR3_FALSE; break;
+	    default:   if (a < 0x80) return ANTLR3_FALSE;
+	}
+
+    case 1: if (*source >= 0x80 && *source < 0xC2) return ANTLR3_FALSE;
+    }
+    if (*source > 0xF4) return ANTLR3_FALSE;
+    return ANTLR3_TRUE;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Exported function to return whether a UTF-8 sequence is legal or not.
+ * This is not used here; it's just exported.
+ */
+ANTLR3_BOOLEAN
+isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd) {
+    int length = trailingBytesForUTF8[*source]+1;
+    if (source+length > sourceEnd) {
+	return ANTLR3_FALSE;
+    }
+    return isLegalUTF8(source, length);
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF8toUTF16 (
+	const UTF8** sourceStart, const UTF8* sourceEnd, 
+	UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
+    ConversionResult result = conversionOK;
+    const UTF8* source = *sourceStart;
+    UTF16* target = *targetStart;
+    while (source < sourceEnd) {
+	UTF32 ch = 0;
+	unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
+	if (source + extraBytesToRead >= sourceEnd) {
+	    result = sourceExhausted; break;
+	}
+	/* Do this check whether lenient or strict */
+	if (! isLegalUTF8(source, extraBytesToRead+1)) {
+	    result = sourceIllegal;
+	    break;
+	}
+	/*
+	 * The cases all fall through. See "Note A" below.
+	 */
+	switch (extraBytesToRead) {
+	    case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */
+	    case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */
+	    case 3: ch += *source++; ch <<= 6;
+	    case 2: ch += *source++; ch <<= 6;
+	    case 1: ch += *source++; ch <<= 6;
+	    case 0: ch += *source++;
+	}
+	ch -= offsetsFromUTF8[extraBytesToRead];
+
+	if (target >= targetEnd) {
+	    source -= (extraBytesToRead+1); /* Back up source pointer! */
+	    result = targetExhausted; break;
+	}
+	if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
+	    /* UTF-16 surrogate values are illegal in UTF-32 */
+	    if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+		if (flags == strictConversion) {
+		    source -= (extraBytesToRead+1); /* return to the illegal value itself */
+		    result = sourceIllegal;
+		    break;
+		} else {
+		    *target++ = UNI_REPLACEMENT_CHAR;
+		}
+	    } else {
+		*target++ = (UTF16)ch; /* normal case */
+	    }
+	} else if (ch > UNI_MAX_UTF16) {
+	    if (flags == strictConversion) {
+		result = sourceIllegal;
+		source -= (extraBytesToRead+1); /* return to the start */
+		break; /* Bail out; shouldn't continue */
+	    } else {
+		*target++ = UNI_REPLACEMENT_CHAR;
+	    }
+	} else {
+	    /* target is a character in range 0xFFFF - 0x10FFFF. */
+	    if (target + 1 >= targetEnd) {
+		source -= (extraBytesToRead+1); /* Back up source pointer! */
+		result = targetExhausted; break;
+	    }
+	    ch -= halfBase;
+	    *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START);
+	    *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START);
+	}
+    }
+    *sourceStart = source;
+    *targetStart = target;
+    return result;
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF32toUTF8 (
+	const UTF32** sourceStart, const UTF32* sourceEnd, 
+	UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) {
+    ConversionResult result = conversionOK;
+    const UTF32* source = *sourceStart;
+    UTF8* target = *targetStart;
+    while (source < sourceEnd) {
+	UTF32 ch;
+	unsigned short bytesToWrite = 0;
+	const UTF32 byteMask = 0xBF;
+	const UTF32 byteMark = 0x80; 
+	ch = *source++;
+	if (flags == strictConversion ) {
+	    /* UTF-16 surrogate values are illegal in UTF-32 */
+	    if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+		--source; /* return to the illegal value itself */
+		result = sourceIllegal;
+		break;
+	    }
+	}
+	/*
+	 * Figure out how many bytes the result will require. Turn any
+	 * illegally large UTF32 things (> Plane 17) into replacement chars.
+	 */
+	if (ch < (UTF32)0x80) {	     bytesToWrite = 1;
+	} else if (ch < (UTF32)0x800) {     bytesToWrite = 2;
+	} else if (ch < (UTF32)0x10000) {   bytesToWrite = 3;
+	} else if (ch <= UNI_MAX_LEGAL_UTF32) {  bytesToWrite = 4;
+	} else {			    bytesToWrite = 3;
+					    ch = UNI_REPLACEMENT_CHAR;
+					    result = sourceIllegal;
+	}
+	
+	target += bytesToWrite;
+	if (target > targetEnd) {
+	    --source; /* Back up source pointer! */
+	    target -= bytesToWrite; result = targetExhausted; break;
+	}
+	switch (bytesToWrite) { /* note: everything falls through. */
+	    case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+	    case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+	    case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+	    case 1: *--target = (UTF8) (ch | firstByteMark[bytesToWrite]);
+	}
+	target += bytesToWrite;
+    }
+    *sourceStart = source;
+    *targetStart = target;
+    return result;
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF8toUTF32 (
+	const UTF8** sourceStart, const UTF8* sourceEnd, 
+	UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
+    ConversionResult result = conversionOK;
+    const UTF8* source = *sourceStart;
+    UTF32* target = *targetStart;
+    while (source < sourceEnd) {
+	UTF32 ch = 0;
+	unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
+	if (source + extraBytesToRead >= sourceEnd) {
+	    result = sourceExhausted; break;
+	}
+	/* Do this check whether lenient or strict */
+	if (! isLegalUTF8(source, extraBytesToRead+1)) {
+	    result = sourceIllegal;
+	    break;
+	}
+	/*
+	 * The cases all fall through. See "Note A" below.
+	 */
+	switch (extraBytesToRead) {
+	    case 5: ch += *source++; ch <<= 6;
+	    case 4: ch += *source++; ch <<= 6;
+	    case 3: ch += *source++; ch <<= 6;
+	    case 2: ch += *source++; ch <<= 6;
+	    case 1: ch += *source++; ch <<= 6;
+	    case 0: ch += *source++;
+	}
+	ch -= offsetsFromUTF8[extraBytesToRead];
+
+	if (target >= targetEnd) {
+	    source -= (extraBytesToRead+1); /* Back up the source pointer! */
+	    result = targetExhausted; break;
+	}
+	if (ch <= UNI_MAX_LEGAL_UTF32) {
+	    /*
+	     * UTF-16 surrogate values are illegal in UTF-32, and anything
+	     * over Plane 17 (> 0x10FFFF) is illegal.
+	     */
+	    if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+		if (flags == strictConversion) {
+		    source -= (extraBytesToRead+1); /* return to the illegal value itself */
+		    result = sourceIllegal;
+		    break;
+		} else {
+		    *target++ = UNI_REPLACEMENT_CHAR;
+		}
+	    } else {
+		*target++ = ch;
+	    }
+	} else { /* i.e., ch > UNI_MAX_LEGAL_UTF32 */
+	    result = sourceIllegal;
+	    *target++ = UNI_REPLACEMENT_CHAR;
+	}
+    }
+    *sourceStart = source;
+    *targetStart = target;
+    return result;
+}
+
+/* ---------------------------------------------------------------------
+
+    Note A.
+    The fall-through switches in UTF-8 reading code save a
+    temp variable, some decrements & conditionals.  The switches
+    are equivalent to the following loop:
+	{
+	    int tmpBytesToRead = extraBytesToRead+1;
+	    do {
+		ch += *source++;
+		--tmpBytesToRead;
+		if (tmpBytesToRead) ch <<= 6;
+	    } while (tmpBytesToRead > 0);
+	}
+    In UTF-8 writing code, the switches on "bytesToWrite" are
+    similarly unrolled loops.
+
+   --------------------------------------------------------------------- */
diff --git a/antlr-3.4/runtime/C/src/antlr3cyclicdfa.c b/runtime/C/src/antlr3cyclicdfa.c
similarity index 100%
rename from antlr-3.4/runtime/C/src/antlr3cyclicdfa.c
rename to runtime/C/src/antlr3cyclicdfa.c
diff --git a/runtime/C/src/antlr3debughandlers.c b/runtime/C/src/antlr3debughandlers.c
new file mode 100644
index 0000000..5202e8e
--- /dev/null
+++ b/runtime/C/src/antlr3debughandlers.c
@@ -0,0 +1,1047 @@
+/// \file
+/// Provides the debugging functions invoked by a recognizer
+/// built using the debug generator mode of the antlr tool.
+/// See antlr3debugeventlistener.h for documentation.
+///
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3.h>
+
+// Not everyone wishes to include the debugger stuff in their final deployment because
+// it will then rely on being linked with the socket libraries. Hence if the programmer turns
+// off the debugging, we do some dummy stuff that satifies compilers etc but means there is
+// no debugger and no reliance on the socket librarires. If you set this flag, then using the -debug
+// option to generate your code will produce code that just crashes, but then I presme you are smart
+// enough to realize that building the libraries without debugger support means you can't call the
+// debugger ;-)
+// 
+#ifdef ANTLR3_NODEBUGGER
+ANTLR3_API pANTLR3_DEBUG_EVENT_LISTENER
+antlr3DebugListenerNew()
+{
+		ANTLR3_PRINTF("C runtime was compiled without debugger support. This program will crash!!");
+		return NULL;
+}
+#else
+
+static	ANTLR3_BOOLEAN	handshake		(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+static	void	enterRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName);
+static	void	enterAlt				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int alt);
+static	void	exitRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName);
+static	void	enterSubRule			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
+static	void	exitSubRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
+static	void	enterDecision			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
+static	void	exitDecision			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber);
+static	void	consumeToken			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t);
+static	void	consumeHiddenToken		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t);
+static	void	LT						(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_COMMON_TOKEN t);
+static	void	mark					(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker);
+static	void	rewindMark				(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker);
+static	void	rewindLast				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+static	void	beginBacktrack			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level);
+static	void	endBacktrack			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level, ANTLR3_BOOLEAN successful);
+static	void	location				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int line, int pos);
+static	void	recognitionException	(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_EXCEPTION e);
+static	void	beginResync				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+static	void	endResync				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+static	void	semanticPredicate		(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_BOOLEAN result, const char * predicate);
+static	void	commence				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+static	void	terminate				(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+static	void	consumeNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
+static	void	LTT						(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_BASE_TREE t);
+static	void	nilNode					(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
+static	void	errorNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
+static	void	createNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t);
+static	void	createNodeTok			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE node, pANTLR3_COMMON_TOKEN token);
+static	void	becomeRoot				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE newRoot, pANTLR3_BASE_TREE oldRoot);
+static	void	addChild				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE root, pANTLR3_BASE_TREE child);
+static	void	setTokenBoundaries		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t, ANTLR3_MARKER tokenStartIndex, ANTLR3_MARKER tokenStopIndex);
+static	void	ack						(pANTLR3_DEBUG_EVENT_LISTENER delboy);
+
+/// Create and initialize a new debug event listener that can be connected to
+/// by ANTLRWorks and any other debugger via a socket.
+///
+ANTLR3_API pANTLR3_DEBUG_EVENT_LISTENER
+antlr3DebugListenerNew()
+{
+	pANTLR3_DEBUG_EVENT_LISTENER	delboy;
+
+	delboy = (pANTLR3_DEBUG_EVENT_LISTENER)ANTLR3_CALLOC(1, sizeof(ANTLR3_DEBUG_EVENT_LISTENER));
+
+	if	(delboy == NULL)
+	{
+		return NULL;
+	}
+
+	// Initialize the API
+	//
+	delboy->addChild				= addChild;
+	delboy->becomeRoot				= becomeRoot;
+	delboy->beginBacktrack			= beginBacktrack;
+	delboy->beginResync				= beginResync;
+	delboy->commence				= commence;
+	delboy->consumeHiddenToken		= consumeHiddenToken;
+	delboy->consumeNode				= consumeNode;
+	delboy->consumeToken			= consumeToken;
+	delboy->createNode				= createNode;
+	delboy->createNodeTok			= createNodeTok;
+	delboy->endBacktrack			= endBacktrack;
+	delboy->endResync				= endResync;
+	delboy->enterAlt				= enterAlt;
+	delboy->enterDecision			= enterDecision;
+	delboy->enterRule				= enterRule;
+	delboy->enterSubRule			= enterSubRule;
+	delboy->exitDecision			= exitDecision;
+	delboy->exitRule				= exitRule;
+	delboy->exitSubRule				= exitSubRule;
+	delboy->handshake				= handshake;
+	delboy->location				= location;
+	delboy->LT						= LT;
+	delboy->LTT						= LTT;
+	delboy->mark					= mark;
+	delboy->nilNode					= nilNode;
+	delboy->recognitionException	= recognitionException;
+	delboy->rewind					= rewindMark;
+	delboy->rewindLast				= rewindLast;
+	delboy->semanticPredicate		= semanticPredicate;
+	delboy->setTokenBoundaries		= setTokenBoundaries;
+	delboy->terminate				= terminate;
+	delboy->errorNode				= errorNode;
+
+	delboy->protocol_version		= 2;	// ANTLR 3.1 is at protocol version 2
+
+	delboy->port					= DEFAULT_DEBUGGER_PORT;
+
+	return delboy;
+}
+
+pANTLR3_DEBUG_EVENT_LISTENER
+antlr3DebugListenerNewPort(ANTLR3_UINT32 port)
+{
+	pANTLR3_DEBUG_EVENT_LISTENER	delboy;
+
+	delboy		 = antlr3DebugListenerNew();
+
+	if	(delboy != NULL)
+	{
+		delboy->port = port;
+	}
+
+	return delboy;
+}
+
+//--------------------------------------------------------------------------------
+// Support functions for sending stuff over the socket interface
+//
+static int 
+sockSend(SOCKET sock, const char * ptr, int len)
+{
+	int		sent;
+	int		thisSend;
+
+	sent	= 0;
+		
+	while	(sent < len)
+	{
+		// Send as many bytes as we can
+		//
+		thisSend =	send(sock, ptr, len - sent, 0);
+
+		// Check for errors and tell the user if we got one
+		//
+		if	(thisSend	== -1)
+		{
+			return	ANTLR3_FALSE;
+		}
+
+		// Increment our offset by how many we were able to send
+		//
+		ptr			+= thisSend;
+		sent		+= thisSend;
+	}
+	return	ANTLR3_TRUE;
+}
+
+static	ANTLR3_BOOLEAN	
+handshake				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
+{
+	/// Connection structure with which to wait and accept a connection from
+	/// a debugger.
+	///
+	SOCKET				serverSocket;
+
+	// Connection structures to deal with the client after we accept the connection
+	// and the server while we accept a connection.
+	//
+	ANTLR3_SOCKADDRT	client;
+	ANTLR3_SOCKADDRT	server;
+
+	// Buffer to construct our message in
+	//
+	char	message[256];
+
+	// Specifies the length of the connection structure to accept()
+	// Windows use int, everyone else uses size_t
+	//
+	ANTLR3_SALENT				sockaddr_len;
+
+	// Option holder for setsockopt()
+	//
+	int		optVal;
+
+	if	(delboy->initialized == ANTLR3_FALSE)
+	{
+		// Windows requires us to initialize WinSock.
+		//
+#ifdef ANTLR3_WINDOWS
+		{
+			WORD		wVersionRequested;
+			WSADATA		wsaData;
+			int			err;			// Return code from WSAStartup
+
+			// We must initialise the Windows socket system when the DLL is loaded.
+			// We are asking for Winsock 1.1 or better as we don't need anything
+			// too complicated for this.
+			//
+			wVersionRequested = MAKEWORD( 1, 1);
+
+			err = WSAStartup( wVersionRequested, &wsaData );
+
+			if ( err != 0 ) 
+			{
+				// Tell the user that we could not find a usable
+				// WinSock DLL
+				//
+				return FALSE;
+			}
+		}
+#endif
+
+		// Create the server socket, we are the server because we just wait until
+		// a debugger connects to the port we are listening on.
+		//
+		serverSocket	= socket(AF_INET, SOCK_STREAM, 0);
+
+		if	(serverSocket == INVALID_SOCKET)
+		{
+			return ANTLR3_FALSE;
+		}
+
+		// Set the listening port
+		//
+		server.sin_port			= htons((unsigned short)delboy->port);
+		server.sin_family		= AF_INET;
+		server.sin_addr.s_addr	= htonl (INADDR_ANY);
+
+		// We could allow a rebind on the same addr/port pair I suppose, but
+		// I imagine that most people will just want to start debugging one parser at once.
+		// Maybe change this at some point, but rejecting the bind at this point will ensure
+		// that people realize they have left something running in the background.
+		//
+		if	(bind(serverSocket, (pANTLR3_SOCKADDRC)&server, sizeof(server)) == -1)
+		{
+			return ANTLR3_FALSE;
+		}
+
+		// We have bound the socket to the port and address so we now ask the TCP subsystem
+		// to start listening on that address/port
+		//
+		if	(listen(serverSocket, 1) == -1)
+		{
+			// Some error, just fail
+			//
+			return	ANTLR3_FALSE;
+		}
+
+		// Now we can try to accept a connection on the port
+		//
+		sockaddr_len	= sizeof(client);
+		delboy->socket	= accept(serverSocket, (pANTLR3_SOCKADDRC)&client, &sockaddr_len);
+
+		// Having accepted a connection, we can stop listening and close down the socket
+		//
+		shutdown		(serverSocket, 0x02);
+		ANTLR3_CLOSESOCKET		(serverSocket);
+
+		if	(delboy->socket == -1)
+		{
+			return ANTLR3_FALSE;
+		}
+
+		// Disable Nagle as this is essentially a chat exchange
+		//
+		optVal	= 1;
+		setsockopt(delboy->socket, SOL_SOCKET, TCP_NODELAY, (const char *)&optVal, sizeof(optVal));
+		
+	}
+
+	// We now have a good socket connection with the debugging client, so we
+	// send it the protocol version we are using and what the name of the grammar
+	// is that we represent.
+	//
+	sprintf		(message, "ANTLR %d\n", delboy->protocol_version);
+	sockSend	(delboy->socket, message, (int)strlen(message));
+	sprintf		(message, "grammar \"%s\n", delboy->grammarFileName->chars);
+	sockSend	(delboy->socket, message, (int)strlen(message));
+	ack			(delboy);
+
+	delboy->initialized = ANTLR3_TRUE;
+
+	return	ANTLR3_TRUE;
+}
+
+// Send the supplied text and wait for an ack from the client
+static void
+transmit(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * ptr)
+{
+	sockSend(delboy->socket, ptr, (int)strlen(ptr));
+	ack(delboy);
+}
+
+static	void
+ack						(pANTLR3_DEBUG_EVENT_LISTENER delboy)
+{
+	// Local buffer to read the next character in to
+	//
+	char	buffer;
+	int		rCount;
+
+	// Ack terminates in a line feed, so we just wait for
+	// one of those. Speed is not of the essence so we don't need
+	// to buffer the input or anything.
+	//
+	do
+	{
+		rCount = recv(delboy->socket, &buffer, 1, 0);
+	}
+	while	(rCount == 1 && buffer != '\n');
+
+	// If the socket ws closed on us, then we will get an error or
+	// (with a graceful close), 0. We can assume the the debugger stopped for some reason
+	// (such as Java crashing again). Therefore we just exit the program
+	// completely if we don't get the terminating '\n' for the ack.
+	//
+	if	(rCount != 1)
+	{
+		ANTLR3_PRINTF("Exiting debugger as remote client closed the socket\n");
+		ANTLR3_PRINTF("Received char count was %d, and last char received was %02X\n", rCount, buffer);
+		exit(0);
+	}
+}
+
+// Given a buffer string and a source string, serialize the
+// text, escaping any newlines and linefeeds. We have no need
+// for speed here, this is the debugger.
+//
+void
+serializeText(pANTLR3_STRING buffer, pANTLR3_STRING text)
+{
+	ANTLR3_UINT32	c;
+	ANTLR3_UCHAR	character;
+
+	// strings lead in with a "
+	//
+	buffer->append(buffer, "\t\"");
+
+	if	(text == NULL)
+	{
+		return;
+	}
+
+	// Now we replace linefeeds, newlines and the escape
+	// leadin character '%' with their hex equivalents
+	// prefixed by '%'
+	//
+	for	(c = 0; c < text->len; c++)
+	{
+		switch	(character = text->charAt(text, c))
+		{
+			case	'\n':
+
+				buffer->append(buffer, "%0A");
+				break;
+
+			case	'\r':
+			
+				buffer->append(buffer, "%0D");
+				break;
+
+			case	'\\':
+
+				buffer->append(buffer, "%25");
+				break;
+
+				// Other characters: The Song Remains the Same.
+				//
+			default:
+					
+				buffer->addc(buffer, character);
+				break;
+		}
+	}
+}
+
+// Given a token, create a stringified version of it, in the supplied
+// buffer. We create a string for this in the debug 'object', if there 
+// is not one there already, and then reuse it here if asked to do this
+// again.
+//
+pANTLR3_STRING
+serializeToken(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t)
+{
+	// Do we already have a serialization buffer?
+	//
+	if	(delboy->tokenString == NULL)
+	{
+		// No, so create one, using the string factory that
+		// the grammar name used, which is guaranteed to exist.
+		// 64 bytes will do us here for starters. 
+		//
+		delboy->tokenString = delboy->grammarFileName->factory->newSize(delboy->grammarFileName->factory, 64);
+	}
+
+	// Empty string
+	//
+	delboy->tokenString->set(delboy->tokenString, (const char *)"");
+
+	// Now we serialize the elements of the token.Note that the debugger only
+	// uses 32 bits.
+	//
+	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(t->getTokenIndex(t)));
+	delboy->tokenString->addc(delboy->tokenString, '\t');
+	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(t->getType(t)));
+	delboy->tokenString->addc(delboy->tokenString, '\t');
+	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(t->getChannel(t)));
+	delboy->tokenString->addc(delboy->tokenString, '\t');
+	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(t->getLine(t)));
+	delboy->tokenString->addc(delboy->tokenString, '\t');
+	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(t->getCharPositionInLine(t)));
+
+	// Now send the text that the token represents.
+	//
+	serializeText(delboy->tokenString, t->getText(t));
+
+	// Finally, as the debugger is a Java program it will expect to get UTF-8
+	// encoded strings. We don't use UTF-8 internally to the C runtime, so we 
+	// must force encode it. We have a method to do this in the string class, but
+	// it returns malloc space that we must free afterwards.
+	//
+	return delboy->tokenString->toUTF8(delboy->tokenString);
+}
+
+// Given a tree node, create a stringified version of it in the supplied
+// buffer.
+//
+pANTLR3_STRING
+serializeNode(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE node)
+{
+	pANTLR3_COMMON_TOKEN	token;
+
+
+	// Do we already have a serialization buffer?
+	//
+	if	(delboy->tokenString == NULL)
+	{
+		// No, so create one, using the string factory that
+		// the grammar name used, which is guaranteed to exist.
+		// 64 bytes will do us here for starters. 
+		//
+		delboy->tokenString = delboy->grammarFileName->factory->newSize(delboy->grammarFileName->factory, 64);
+	}
+
+	// Empty string
+	//
+	delboy->tokenString->set(delboy->tokenString, (const char *)"");
+
+	// Protect against bugs/errors etc
+	//
+	if	(node == NULL)
+	{
+		return delboy->tokenString;
+	}
+
+	// Now we serialize the elements of the node.Note that the debugger only
+	// uses 32 bits.
+	//
+	delboy->tokenString->addc(delboy->tokenString, '\t');
+
+	// Adaptor ID
+	//
+	delboy->tokenString->addi(delboy->tokenString, delboy->adaptor->getUniqueID(delboy->adaptor, node));
+	delboy->tokenString->addc(delboy->tokenString, '\t');
+
+	// Type of the current token (which may be imaginary)
+	//
+	delboy->tokenString->addi(delboy->tokenString, delboy->adaptor->getType(delboy->adaptor, node));
+
+	// See if we have an actual token or just an imaginary
+	//
+	token	= delboy->adaptor->getToken(delboy->adaptor, node);
+
+	delboy->tokenString->addc(delboy->tokenString, '\t');
+	if	(token != NULL)
+	{
+		// Real token
+		//
+		delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(token->getLine(token)));
+		delboy->tokenString->addc(delboy->tokenString, ' ');
+		delboy->tokenString->addi(delboy->tokenString, (ANTLR3_INT32)(token->getCharPositionInLine(token)));
+	}
+	else
+	{
+		// Imaginary tokens have no location
+		//
+		delboy->tokenString->addi(delboy->tokenString, -1);
+		delboy->tokenString->addc(delboy->tokenString, '\t');
+		delboy->tokenString->addi(delboy->tokenString, -1);
+	}
+
+	// Start Index of the node
+	//
+	delboy->tokenString->addc(delboy->tokenString, '\t');
+	delboy->tokenString->addi(delboy->tokenString, (ANTLR3_UINT32)(delboy->adaptor->getTokenStartIndex(delboy->adaptor, node)));
+
+	// Now send the text that the node represents.
+	//
+	serializeText(delboy->tokenString, delboy->adaptor->getText(delboy->adaptor, node));
+
+	// Finally, as the debugger is a Java program it will expect to get UTF-8
+	// encoded strings. We don't use UTF-8 internally to the C runtime, so we 
+	// must force encode it. We have a method to do this in the string class, but
+	// there is no utf8 string implementation as of yet
+	//
+	return delboy->tokenString->toUTF8(delboy->tokenString);
+}
+
+//------------------------------------------------------------------------------------------------------------------
+// EVENTS
+//
+static	void
+enterRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName)
+{
+	char	buffer[512];
+
+	// Create the message (speed is not of the essence)
+	//
+	sprintf(buffer, "enterRule\t%s\t%s\n", grammarFileName, ruleName);
+	transmit(delboy, buffer);
+}
+
+static	void	
+enterAlt				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int alt)
+{
+	char	buffer[512];
+
+	// Create the message (speed is not of the essence)
+	//
+	sprintf(buffer, "enterAlt\t%d\n", alt);
+	transmit(delboy, buffer);
+}
+
+static	void	
+exitRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, const char * grammarFileName, const char * ruleName)
+{
+	char	buffer[512];
+
+	// Create the message (speed is not of the essence)
+	//
+	sprintf(buffer, "exitRule\t%s\t%s\n", grammarFileName, ruleName);
+	transmit(delboy, buffer);
+}
+
+static	void	
+enterSubRule			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber)
+{
+	char	buffer[512];
+
+	// Create the message (speed is not of the essence)
+	//
+	sprintf(buffer, "enterSubRule\t%d\n", decisionNumber);
+	transmit(delboy, buffer);
+}
+
+static	void	
+exitSubRule				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber)
+{
+	char	buffer[512];
+
+	// Create the message (speed is not of the essence)
+	//
+	sprintf(buffer, "exitSubRule\t%d\n", decisionNumber);
+	transmit(delboy, buffer);
+}
+
+static	void	
+enterDecision			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber)
+{
+	char	buffer[512];
+
+	// Create the message (speed is not of the essence)
+	//
+	sprintf(buffer, "enterDecision\t%d\n", decisionNumber);
+	transmit(delboy, buffer);
+
+}
+
+static	void	
+exitDecision			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int decisionNumber)
+{
+	char	buffer[512];
+
+	// Create the message (speed is not of the essence)
+	//
+	sprintf(buffer, "exitDecision\t%d\n", decisionNumber);
+	transmit(delboy, buffer);
+}
+
+static	void	
+consumeToken			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t)
+{
+	pANTLR3_STRING msg;
+
+	// Create the serialized token
+	//
+	msg = serializeToken(delboy, t);
+
+	// Insert the debug event indicator
+	//
+	msg->insert8(msg, 0, "consumeToken\t");
+
+	msg->addc(msg, '\n');
+
+	// Transmit the message and wait for ack
+	//
+	transmit(delboy, (const char *)(msg->chars));
+}
+
+static	void	
+consumeHiddenToken		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_COMMON_TOKEN t)
+{
+	pANTLR3_STRING msg;
+
+	// Create the serialized token
+	//
+	msg = serializeToken(delboy, t);
+
+	// Insert the debug event indicator
+	//
+	msg->insert8(msg, 0, "consumeHiddenToken\t");
+
+	msg->addc(msg, '\n');
+
+	// Transmit the message and wait for ack
+	//
+	transmit(delboy, (const char *)(msg->chars));
+}
+
+// Looking at the next token event.
+//
+static	void	
+LT						(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_COMMON_TOKEN t)
+{
+	pANTLR3_STRING msg;
+
+	if	(t != NULL)
+	{
+		// Create the serialized token
+		//
+		msg = serializeToken(delboy, t);
+
+		// Insert the index parameter
+		//
+		msg->insert8(msg, 0, "\t");
+		msg->inserti(msg, 0, i);
+
+		// Insert the debug event indicator
+		//
+		msg->insert8(msg, 0, "LT\t");
+
+		msg->addc(msg, '\n');
+
+		// Transmit the message and wait for ack
+		//
+		transmit(delboy, (const char *)(msg->chars));
+	}
+}
+
+static	void	
+mark					(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker)
+{
+	char buffer[128];
+
+	sprintf(buffer, "mark\t%d\n", (ANTLR3_UINT32)(marker & 0xFFFFFFFF));
+
+	// Transmit the message and wait for ack
+	//
+	transmit(delboy, buffer);
+}
+
+static	void	
+rewindMark					(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_MARKER marker)
+{
+	char buffer[128];
+
+	sprintf(buffer, "rewind\t%d\n", (ANTLR3_UINT32)(marker & 0xFFFFFFFF));
+
+	// Transmit the message and wait for ack
+	//
+	transmit(delboy, buffer);
+
+}
+
+static	void	
+rewindLast				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
+{
+	transmit(delboy, (const char *)"rewind\n");
+}
+
+static	void	
+beginBacktrack			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level)
+{
+	char buffer[128];
+
+	sprintf(buffer, "beginBacktrack\t%d\n", (ANTLR3_UINT32)(level & 0xFFFFFFFF));
+
+	// Transmit the message and wait for ack
+	//
+	transmit(delboy, buffer);
+}
+
+static	void	
+endBacktrack			(pANTLR3_DEBUG_EVENT_LISTENER delboy, int level, ANTLR3_BOOLEAN successful)
+{
+	char buffer[128];
+
+	sprintf(buffer, "endBacktrack\t%d\t%d\n", level, successful);
+
+	// Transmit the message and wait for ack
+	//
+	transmit(delboy, buffer);
+}
+
+static	void	
+location				(pANTLR3_DEBUG_EVENT_LISTENER delboy, int line, int pos)
+{
+	char buffer[128];
+
+	sprintf(buffer, "location\t%d\t%d\n", line, pos);
+
+	// Transmit the message and wait for ack
+	//
+	transmit(delboy, buffer);
+}
+
+static	void	
+recognitionException	(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_EXCEPTION e)
+{
+	char	buffer[256];
+
+	sprintf(buffer, "exception\t%s\t%d\t%d\t%d\n", (char *)(e->name), (ANTLR3_INT32)(e->index), e->line, e->charPositionInLine);
+
+	// Transmit the message and wait for ack
+	//
+	transmit(delboy, buffer);
+}
+
+static	void	
+beginResync				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
+{
+	transmit(delboy, (const char *)"beginResync\n");
+}
+
+static	void	
+endResync				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
+{
+	transmit(delboy, (const char *)"endResync\n");
+}
+
+static	void	
+semanticPredicate		(pANTLR3_DEBUG_EVENT_LISTENER delboy, ANTLR3_BOOLEAN result, const char * predicate)
+{
+	unsigned char * buffer;
+	unsigned char * out;
+
+	if	(predicate != NULL)
+	{
+		buffer	= (unsigned char *)ANTLR3_MALLOC(64 + 2*strlen(predicate));
+
+		if	(buffer != NULL)
+		{
+			out = buffer + sprintf((char *)buffer, "semanticPredicate\t%s\t", result == ANTLR3_TRUE ? "true" : "false");
+
+			while (*predicate != '\0')
+			{
+				switch(*predicate)
+				{
+					case	'\n':
+						
+						*out++	= '%';
+						*out++	= '0';
+						*out++	= 'A';
+						break;
+
+					case	'\r':
+
+						*out++	= '%';
+						*out++	= '0';
+						*out++	= 'D';
+						break;
+
+					case	'%':
+
+						*out++	= '%';
+						*out++	= '0';
+						*out++	= 'D';
+						break;
+
+
+					default:
+
+						*out++	= *predicate;
+						break;
+				}
+
+				predicate++;
+			}
+			*out++	= '\n';
+			*out++	= '\0';
+		}
+
+		// Send it and wait for the ack
+		//
+		transmit(delboy, (const char *)buffer);
+	}
+}
+
+#ifdef ANTLR3_WINDOWS
+#pragma warning	(push)
+#pragma warning (disable : 4100)
+#endif
+
+static	void	
+commence				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
+{
+	// Nothing to see here
+	//
+}
+
+#ifdef ANTLR3_WINDOWS
+#pragma warning	(pop)
+#endif
+
+static	void	
+terminate				(pANTLR3_DEBUG_EVENT_LISTENER delboy)
+{
+	// Terminate sequence
+	//
+	sockSend(delboy->socket, "terminate\n", 10);		// Send out the command
+}
+
+//----------------------------------------------------------------
+// Tree parsing events
+//
+static	void	
+consumeNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t)
+{
+	pANTLR3_STRING	buffer;
+
+	buffer = serializeNode	(delboy, t);
+
+	// Now prepend the command
+	//
+	buffer->insert8	(buffer, 0, "consumeNode\t");
+	buffer->addc	(buffer, '\n');
+
+	// Send to the debugger and wait for the ack
+	//
+	transmit		(delboy, (const char *)(delboy->tokenString->toUTF8(delboy->tokenString)->chars));
+}
+
+static	void	
+LTT						(pANTLR3_DEBUG_EVENT_LISTENER delboy, int i, pANTLR3_BASE_TREE t)
+{
+	pANTLR3_STRING	buffer;
+
+	buffer = serializeNode	(delboy, t);
+
+	// Now prepend the command
+	//
+	buffer->insert8	(buffer, 0, "\t");
+	buffer->inserti	(buffer, 0, i);
+	buffer->insert8	(buffer, 0, "LN\t");
+	buffer->addc	(buffer, '\n');
+
+	// Send to the debugger and wait for the ack
+	//
+	transmit		(delboy, (const char *)(delboy->tokenString->toUTF8(delboy->tokenString)->chars));
+}
+
+static	void	
+nilNode					(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t)
+{
+	char	buffer[128];
+	sprintf(buffer, "nilNode\t%d\n", delboy->adaptor->getUniqueID(delboy->adaptor, t));
+	transmit(delboy, buffer);
+}
+
+static	void	
+createNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t)
+{
+	// Do we already have a serialization buffer?
+	//
+	if	(delboy->tokenString == NULL)
+	{
+		// No, so create one, using the string factory that
+		// the grammar name used, which is guaranteed to exist.
+		// 64 bytes will do us here for starters. 
+		//
+		delboy->tokenString = delboy->grammarFileName->factory->newSize(delboy->grammarFileName->factory, 64);
+	}
+
+	// Empty string
+	//
+	delboy->tokenString->set8(delboy->tokenString, (const char *)"createNodeFromTokenElements ");
+
+	// Now we serialize the elements of the node.Note that the debugger only
+	// uses 32 bits.
+	//
+	// Adaptor ID
+	//
+	delboy->tokenString->addi(delboy->tokenString, delboy->adaptor->getUniqueID(delboy->adaptor, t));
+	delboy->tokenString->addc(delboy->tokenString, '\t');
+
+	// Type of the current token (which may be imaginary)
+	//
+	delboy->tokenString->addi(delboy->tokenString, delboy->adaptor->getType(delboy->adaptor, t));
+
+	// The text that this node represents
+	//
+	serializeText(delboy->tokenString, delboy->adaptor->getText(delboy->adaptor, t));
+	delboy->tokenString->addc(delboy->tokenString, '\n');
+
+	// Finally, as the debugger is a Java program it will expect to get UTF-8
+	// encoded strings. We don't use UTF-8 internally to the C runtime, so we 
+	// must force encode it. We have a method to do this in the string class, but
+	// there is no utf8 string implementation as of yet
+	//
+	transmit(delboy, (const char *)(delboy->tokenString->toUTF8(delboy->tokenString)->chars));
+
+}
+static void
+errorNode				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t)
+{
+	// Do we already have a serialization buffer?
+	//
+	if	(delboy->tokenString == NULL)
+	{
+		// No, so create one, using the string factory that
+		// the grammar name used, which is guaranteed to exist.
+		// 64 bytes will do us here for starters. 
+		//
+		delboy->tokenString = delboy->grammarFileName->factory->newSize(delboy->grammarFileName->factory, 64);
+	}
+
+	// Empty string
+	//
+	delboy->tokenString->set8(delboy->tokenString, (const char *)"errorNode\t");
+
+	// Now we serialize the elements of the node.Note that the debugger only
+	// uses 32 bits.
+	//
+	// Adaptor ID
+	//
+	delboy->tokenString->addi(delboy->tokenString, delboy->adaptor->getUniqueID(delboy->adaptor, t));
+	delboy->tokenString->addc(delboy->tokenString, '\t');
+
+	// Type of the current token (which is an error)
+	//
+	delboy->tokenString->addi(delboy->tokenString, ANTLR3_TOKEN_INVALID);
+
+	// The text that this node represents
+	//
+	serializeText(delboy->tokenString, delboy->adaptor->getText(delboy->adaptor, t));
+	delboy->tokenString->addc(delboy->tokenString, '\n');
+
+	// Finally, as the debugger is a Java program it will expect to get UTF-8
+	// encoded strings. We don't use UTF-8 internally to the C runtime, so we 
+	// must force encode it. We have a method to do this in the string class, but
+	// there is no utf8 string implementation as of yet
+	//
+	transmit(delboy, (const char *)(delboy->tokenString->toUTF8(delboy->tokenString)->chars));
+
+}
+
+static	void	
+createNodeTok			(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE node, pANTLR3_COMMON_TOKEN token)
+{
+	char	buffer[128];
+
+	sprintf(buffer, "createNode\t%d\t%d\n",	delboy->adaptor->getUniqueID(delboy->adaptor, node), (ANTLR3_UINT32)token->getTokenIndex(token));
+
+	transmit(delboy, buffer);
+}
+
+static	void	
+becomeRoot				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE newRoot, pANTLR3_BASE_TREE oldRoot)
+{
+	char	buffer[128];
+
+	sprintf(buffer, "becomeRoot\t%d\t%d\n",	delboy->adaptor->getUniqueID(delboy->adaptor, newRoot),
+											delboy->adaptor->getUniqueID(delboy->adaptor, oldRoot)
+											);
+	transmit(delboy, buffer);
+}
+
+
+static	void	
+addChild				(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE root, pANTLR3_BASE_TREE child)
+{
+	char	buffer[128];
+
+	sprintf(buffer, "addChild\t%d\t%d\n",	delboy->adaptor->getUniqueID(delboy->adaptor, root),
+											delboy->adaptor->getUniqueID(delboy->adaptor, child)
+											);
+	transmit(delboy, buffer);
+}
+
+static	void	
+setTokenBoundaries		(pANTLR3_DEBUG_EVENT_LISTENER delboy, pANTLR3_BASE_TREE t, ANTLR3_MARKER tokenStartIndex, ANTLR3_MARKER tokenStopIndex)
+{
+	char	buffer[128];
+
+	sprintf(buffer, "becomeRoot\t%d\t%d\t%d\n",	delboy->adaptor->getUniqueID(delboy->adaptor, t),
+												(ANTLR3_UINT32)tokenStartIndex,
+												(ANTLR3_UINT32)tokenStopIndex
+											);
+	transmit(delboy, buffer);
+}
+#endif
+
diff --git a/antlr-3.4/runtime/C/src/antlr3encodings.c b/runtime/C/src/antlr3encodings.c
similarity index 100%
rename from antlr-3.4/runtime/C/src/antlr3encodings.c
rename to runtime/C/src/antlr3encodings.c
diff --git a/antlr-3.4/runtime/C/src/antlr3exception.c b/runtime/C/src/antlr3exception.c
similarity index 100%
rename from antlr-3.4/runtime/C/src/antlr3exception.c
rename to runtime/C/src/antlr3exception.c
diff --git a/antlr-3.4/runtime/C/src/antlr3filestream.c b/runtime/C/src/antlr3filestream.c
similarity index 100%
rename from antlr-3.4/runtime/C/src/antlr3filestream.c
rename to runtime/C/src/antlr3filestream.c
diff --git a/runtime/C/src/antlr3inputstream.c b/runtime/C/src/antlr3inputstream.c
new file mode 100644
index 0000000..dd9f56e
--- /dev/null
+++ b/runtime/C/src/antlr3inputstream.c
@@ -0,0 +1,2057 @@
+/// \file
+/// Base functions to initialize and manipulate any input stream
+///
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3input.h>
+
+// -----------------------------------
+// Generic 8 bit input such as latin-1
+//
+
+// 8Bit INT Stream API
+//
+static	    void	    antlr38BitConsume		(pANTLR3_INT_STREAM is);
+static	    ANTLR3_UCHAR    antlr38BitLA		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
+static	    ANTLR3_UCHAR    antlr38BitLA_ucase		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
+static	    ANTLR3_MARKER   antlr38BitIndex		(pANTLR3_INT_STREAM is);
+static	    ANTLR3_MARKER   antlr38BitMark		(pANTLR3_INT_STREAM is);
+static	    void	    antlr38BitRewind		(pANTLR3_INT_STREAM is, ANTLR3_MARKER mark);
+static	    void	    antlr38BitRewindLast	(pANTLR3_INT_STREAM is);
+static	    void	    antlr38BitRelease		(pANTLR3_INT_STREAM is, ANTLR3_MARKER mark);
+static	    void	    antlr38BitSeek		(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint);
+static	    pANTLR3_STRING  antlr38BitGetSourceName	(pANTLR3_INT_STREAM is);
+
+// 8Bit Charstream API functions
+//
+static	    void	    antlr3InputClose		(pANTLR3_INPUT_STREAM input);
+static	    void	    antlr3InputReset		(pANTLR3_INPUT_STREAM input);
+static      void            antlr38BitReuse            (pANTLR3_INPUT_STREAM input, pANTLR3_UINT8 inString, ANTLR3_UINT32 size, pANTLR3_UINT8 name);
+static	    void *	    antlr38BitLT		(pANTLR3_INPUT_STREAM input, ANTLR3_INT32 lt);
+static	    ANTLR3_UINT32   antlr38BitSize		(pANTLR3_INPUT_STREAM input);
+static	    pANTLR3_STRING  antlr38BitSubstr		(pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop);
+static	    ANTLR3_UINT32   antlr38BitGetLine		(pANTLR3_INPUT_STREAM input);
+static	    void	  * antlr38BitGetLineBuf	(pANTLR3_INPUT_STREAM input);
+static	    ANTLR3_UINT32   antlr38BitGetCharPosition	(pANTLR3_INPUT_STREAM input);
+static	    void	    antlr38BitSetLine		(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 line);
+static	    void	    antlr38BitSetCharPosition	(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 position);
+static	    void	    antlr38BitSetNewLineChar	(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 newlineChar);
+static	    void	    antlr38BitSetUcaseLA	(pANTLR3_INPUT_STREAM input, ANTLR3_BOOLEAN flag);
+
+// -----------------------------------
+// UTF16 (also covers UCS2)
+//
+// INT Stream API
+//
+static	    void	    antlr3UTF16Consume	        (pANTLR3_INT_STREAM is);
+static	    ANTLR3_UCHAR    antlr3UTF16LA		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
+static	    void	    antlr3UTF16ConsumeLE        (pANTLR3_INT_STREAM is);
+static	    ANTLR3_UCHAR    antlr3UTF16LALE		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
+static	    void	    antlr3UTF16ConsumeBE        (pANTLR3_INT_STREAM is);
+static	    ANTLR3_UCHAR    antlr3UTF16LABE		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
+static	    ANTLR3_MARKER   antlr3UTF16Index		(pANTLR3_INT_STREAM is);
+static	    void	    antlr3UTF16Seek		(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint);
+
+// UTF16 Charstream API functions
+//
+static	    pANTLR3_STRING	antlr3UTF16Substr	(pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop);
+
+// -----------------------------------
+// UTF32 (also covers UCS2)
+//
+// INT Stream API
+//
+static	    void	    antlr3UTF32Consume	        (pANTLR3_INT_STREAM is);
+static	    ANTLR3_UCHAR    antlr3UTF32LA		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
+static	    ANTLR3_UCHAR    antlr3UTF32LALE		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
+static	    ANTLR3_UCHAR    antlr3UTF32LABE		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
+static	    ANTLR3_MARKER   antlr3UTF32Index		(pANTLR3_INT_STREAM is);
+static	    void	    antlr3UTF32Seek		(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint);
+
+// UTF16 Charstream API functions
+//
+static	    pANTLR3_STRING  antlr3UTF32Substr	        (pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop);
+
+// ------------------------------------
+// UTF-8
+//
+static	    void	    antlr3UTF8Consume	        (pANTLR3_INT_STREAM is);
+static	    ANTLR3_UCHAR    antlr3UTF8LA		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
+
+// ------------------------------------
+// EBCDIC
+//
+static	    ANTLR3_UCHAR    antlr3EBCDICLA		(pANTLR3_INT_STREAM is, ANTLR3_INT32 la);
+
+/// \brief Common function to setup function interface for an 8 bit input stream.
+///
+/// \param input Input stream context pointer
+///
+/// \remark
+///   - Many of the 8 bit oriented file stream handling functions will be usable
+///     by any or at least some, other input streams. Therefore it is perfectly acceptable
+///     to call this function to install the 8Bit handler then override just those functions
+///     that would not work for the particular input encoding, such as consume for instance.
+/// 
+void 
+antlr38BitSetupStream	(pANTLR3_INPUT_STREAM input)
+{
+    // Build a string factory for this stream
+    //
+    input->strFactory	= antlr3StringFactoryNew(input->encoding);
+
+    // Default stream API set up is for 8Bit, so we are done
+    //
+}
+
+void
+antlr3GenericSetupStream  (pANTLR3_INPUT_STREAM input)
+{
+    /* Install function pointers for an 8 bit input
+     */
+
+    /* Allocate stream interface
+     */
+    input->istream		= antlr3IntStreamNew();
+    input->istream->type        = ANTLR3_CHARSTREAM;
+    input->istream->super       = input;
+
+    /* Intstream API
+     */
+    input->istream->consume	    = antlr38BitConsume;	    // Consume the next 8 bit character in the buffer			
+    input->istream->_LA		    = antlr38BitLA;	            // Return the UTF32 character at offset n (1 based)			
+    input->istream->index	    = antlr38BitIndex;	            // Current index (offset from first character			    
+    input->istream->mark	    = antlr38BitMark;		    // Record the current lex state for later restore			
+    input->istream->rewind	    = antlr38BitRewind;	            // How to rewind the input									
+    input->istream->rewindLast	    = antlr38BitRewindLast;	    // How to rewind the input									
+    input->istream->seek	    = antlr38BitSeek;		    // How to seek to a specific point in the stream		    
+    input->istream->release	    = antlr38BitRelease;	    // Reset marks after mark n									
+    input->istream->getSourceName   = antlr38BitGetSourceName;      // Return a string that names the input source
+
+    /* Charstream API
+     */
+    input->close		    =  antlr3InputClose;	    // Close down the stream completely										
+    input->free			    =  antlr3InputClose;	    // Synonym for free														
+    input->reset		    =  antlr3InputReset;	    // Reset input to start	
+    input->reuse                    =  antlr38BitReuse;             // Install a new input string and reset
+    input->_LT			    =  antlr38BitLT;		    // Same as _LA for 8 bit file										
+    input->size			    =  antlr38BitSize;		    // Return the size of the input buffer									
+    input->substr		    =  antlr38BitSubstr;	    // Return a string from the input stream								
+    input->getLine		    =  antlr38BitGetLine;	    // Return the current line number in the input stream					
+    input->getLineBuf		    =  antlr38BitGetLineBuf;	    // Return a pointer to the start of the current line being consumed	    
+    input->getCharPositionInLine    =  antlr38BitGetCharPosition;   // Return the offset into the current line of input						
+    input->setLine		    =  antlr38BitSetLine;	    // Set the input stream line number (does not set buffer pointers)	    
+    input->setCharPositionInLine    =  antlr38BitSetCharPosition;   // Set the offset in to the current line (does not set any pointers)   
+    input->SetNewLineChar	    =  antlr38BitSetNewLineChar;    // Set the value of the newline trigger character						
+    input->setUcaseLA		    =  antlr38BitSetUcaseLA;        // Changes the LA function to return upper case always
+
+    input->charByteSize		    = 1;		// Size in bytes of characters in this stream.
+
+    /* Initialize entries for tables etc
+     */
+    input->markers  = NULL;
+
+    /* Set up the input stream brand new
+     */
+    input->reset(input);
+    
+    /* Install default line separator character (it can be replaced
+     * by the grammar programmer later)
+     */
+    input->SetNewLineChar(input, (ANTLR3_UCHAR)'\n');
+}
+
+static pANTLR3_STRING
+antlr38BitGetSourceName(pANTLR3_INT_STREAM is)
+{
+	return	is->streamName;
+}
+
+/** \brief Close down an input stream and free any memory allocated by it.
+ *
+ * \param input Input stream context pointer
+ */
+static void
+antlr3InputClose(pANTLR3_INPUT_STREAM input)
+{
+    // Close any markers in the input stream
+    //
+    if	(input->markers != NULL)
+    {
+		input->markers->free(input->markers);
+		input->markers = NULL;
+    }
+
+    // Close the string factory
+    //
+    if	(input->strFactory != NULL)
+    {
+		input->strFactory->close(input->strFactory);
+    }
+
+    // Free the input stream buffer if we allocated it
+    //
+    if	(input->isAllocated && input->data != NULL)
+    {
+		ANTLR3_FREE(input->data);
+		input->data = NULL;
+    }
+    
+    input->istream->free(input->istream);
+
+    // Finally, free the space for the structure itself
+    //
+    ANTLR3_FREE(input);
+
+    // Done
+    //
+}
+
+static void		
+antlr38BitSetUcaseLA		(pANTLR3_INPUT_STREAM input, ANTLR3_BOOLEAN flag)
+{
+	if	(flag)
+	{
+		// Return the upper case version of the characters
+		//
+		input->istream->_LA		    =  antlr38BitLA_ucase;
+	}
+	else
+	{
+		// Return the raw characters as they are in the buffer
+		//
+		input->istream->_LA		    =  antlr38BitLA;
+	}
+}
+
+
+/** \brief Reset a re-startable input stream to the start
+ *
+ * \param input Input stream context pointer
+ */
+static void
+antlr3InputReset(pANTLR3_INPUT_STREAM input)
+{
+
+    input->nextChar		= input->data;	/* Input at first character */
+    input->line			= 1;		/* starts at line 1	    */
+    input->charPositionInLine	= 0;
+    input->currentLine		= input->data;
+    input->markDepth		= 0;		/* Reset markers	    */
+    
+    /* Clear out up the markers table if it is there
+     */
+    if	(input->markers != NULL)
+    {
+        input->markers->clear(input->markers);
+    }
+    else
+    {
+        /* Install a new markers table
+         */
+        input->markers  = antlr3VectorNew(0);
+    }
+}
+
+/** Install a new source code in to a working input stream so that the
+ *  input stream can be reused.
+ */
+static void
+antlr38BitReuse(pANTLR3_INPUT_STREAM input, pANTLR3_UINT8 inString, ANTLR3_UINT32 size, pANTLR3_UINT8 name)
+{
+    input->isAllocated	= ANTLR3_FALSE;
+    input->data		= inString;
+    input->sizeBuf	= size;
+    
+    // Now we can set up the file name. As we are reusing the stream, there may already
+    // be a string that we can reuse for holding the filename.
+    //
+	if	(input->istream->streamName == NULL) 
+	{
+		input->istream->streamName	= input->strFactory->newStr(input->strFactory, name == NULL ? (pANTLR3_UINT8)"-memory-" : name);
+		input->fileName		= input->istream->streamName;
+	}
+	else
+	{
+		input->istream->streamName->set(input->istream->streamName,  (name == NULL ? (const char *)"-memory-" : (const char *)name));
+	}
+
+    input->reset(input);
+}
+
+/** \brief Consume the next character in an 8 bit input stream
+ *
+ * \param input Input stream context pointer
+ */
+static void
+antlr38BitConsume(pANTLR3_INT_STREAM is)
+{
+    pANTLR3_INPUT_STREAM input;
+
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+    {	
+	/* Indicate one more character in this line
+	 */
+	input->charPositionInLine++;
+	
+	if  ((ANTLR3_UCHAR)(*((pANTLR3_UINT8)input->nextChar)) == input->newlineChar)
+	{
+	    /* Reset for start of a new line of input
+	     */
+	    input->line++;
+	    input->charPositionInLine	= 0;
+	    input->currentLine		= (void *)(((pANTLR3_UINT8)input->nextChar) + 1);
+	}
+
+	/* Increment to next character position
+	 */
+	input->nextChar = (void *)(((pANTLR3_UINT8)input->nextChar) + 1);
+    }
+}
+
+/** \brief Return the input element assuming an 8 bit ascii input
+ *
+ * \param[in] input Input stream context pointer
+ * \param[in] la 1 based offset of next input stream element
+ *
+ * \return Next input character in internal ANTLR3 encoding (UTF32)
+ */
+static ANTLR3_UCHAR 
+antlr38BitLA(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
+{
+    pANTLR3_INPUT_STREAM input;
+	
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+    {
+		return	ANTLR3_CHARSTREAM_EOF;
+    }
+    else
+    {
+		return	(ANTLR3_UCHAR)(*((pANTLR3_UINT8)input->nextChar + la - 1));
+    }
+}
+
+/** \brief Return the input element assuming an 8 bit input and
+ *         always return the UPPER CASE character.
+ *		   Note that this is 8 bit and so we assume that the toupper
+ *		   function will use the correct locale for 8 bits.
+ *
+ * \param[in] input Input stream context pointer
+ * \param[in] la 1 based offset of next input stream element
+ *
+ * \return Next input character in internal ANTLR3 encoding (UTF32)
+ */
+static ANTLR3_UCHAR
+antlr38BitLA_ucase	(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
+{
+    pANTLR3_INPUT_STREAM input;
+	
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+    {
+		return	ANTLR3_CHARSTREAM_EOF;
+    }
+    else
+    {
+		return	(ANTLR3_UCHAR)toupper((*((pANTLR3_UINT8)input->nextChar + la - 1)));
+    }
+}
+
+
+/** \brief Return the input element assuming an 8 bit ascii input
+ *
+ * \param[in] input Input stream context pointer
+ * \param[in] lt 1 based offset of next input stream element
+ *
+ * \return Next input character in internal ANTLR3 encoding (UTF32)
+ */
+static void * 
+antlr38BitLT(pANTLR3_INPUT_STREAM input, ANTLR3_INT32 lt)
+{
+    /* Casting is horrible but it means no warnings and LT should never be called
+     * on a character stream anyway I think. If it is then, the void * will need to be 
+     * cast back in a similar manner. Yuck! But this means that LT for Token streams and
+     * tree streams is correct.
+     */
+    return (ANTLR3_FUNC_PTR(input->istream->_LA(input->istream, lt)));
+}
+
+/** \brief Calculate the current index in the output stream.
+ * \param[in] input Input stream context pointer
+ */
+static ANTLR3_MARKER
+antlr38BitIndex(pANTLR3_INT_STREAM is)
+{
+    pANTLR3_INPUT_STREAM input;
+
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    return  (ANTLR3_MARKER)(((pANTLR3_UINT8)input->nextChar));
+}
+
+/** \brief Return the size of the current input stream, as an 8Bit file
+ *   which in this case is the total input. Other implementations may provide
+ *   more sophisticated implementations to deal with non-recoverable streams 
+ *   and so on.
+ *
+ * \param[in] input Input stream context pointer
+ */
+static	ANTLR3_UINT32 
+antlr38BitSize(pANTLR3_INPUT_STREAM input)
+{
+    return  input->sizeBuf;
+}
+
+/** \brief Mark the current input point in an 8Bit 8 bit stream
+ *  such as a file stream, where all the input is available in the
+ *  buffer.
+ *
+ * \param[in] is Input stream context pointer
+ */
+static ANTLR3_MARKER
+antlr38BitMark	(pANTLR3_INT_STREAM is)
+{
+    pANTLR3_LEX_STATE	    state;
+    pANTLR3_INPUT_STREAM    input;
+
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    /* New mark point 
+     */
+    ++input->markDepth;
+
+    /* See if we are revisiting a mark as we can just reuse the vector
+     * entry if we are, otherwise, we need a new one
+     */
+    if	(input->markDepth > input->markers->count)
+    {	
+		state = (pANTLR3_LEX_STATE)ANTLR3_MALLOC(sizeof(ANTLR3_LEX_STATE));
+		if (state == NULL)
+		{
+			// malloc failed
+			--input->markDepth;
+			return 0;
+		}
+
+		/* Add it to the table
+		 */
+		input->markers->add(input->markers, state, ANTLR3_FREE_FUNC);	/* No special structure, just free() on delete */
+    }
+    else
+    {
+		state	= (pANTLR3_LEX_STATE)input->markers->get(input->markers, input->markDepth - 1);
+
+		/* Assume no errors for speed, it will just blow up if the table failed
+		 * for some reasons, hence lots of unit tests on the tables ;-)
+		 */
+    }
+
+    /* We have created or retrieved the state, so update it with the current
+     * elements of the lexer state.
+     */
+    state->charPositionInLine	= input->charPositionInLine;
+    state->currentLine		= input->currentLine;
+    state->line			= input->line;
+    state->nextChar		= input->nextChar;
+
+    is->lastMarker  = input->markDepth;
+
+    /* And that's it
+     */
+    return  input->markDepth;
+}
+/** \brief Rewind the lexer input to the state specified by the last produced mark.
+ * 
+ * \param[in] input Input stream context pointer
+ *
+ * \remark
+ * Assumes 8 Bit input stream.
+ */
+static void
+antlr38BitRewindLast	(pANTLR3_INT_STREAM is)
+{
+    is->rewind(is, is->lastMarker);
+}
+
+/** \brief Rewind the lexer input to the state specified by the supplied mark.
+ * 
+ * \param[in] input Input stream context pointer
+ *
+ * \remark
+ * Assumes 8 Bit input stream.
+ */
+static void
+antlr38BitRewind	(pANTLR3_INT_STREAM is, ANTLR3_MARKER mark)
+{
+    pANTLR3_LEX_STATE	state;
+    pANTLR3_INPUT_STREAM input;
+
+    input   = ((pANTLR3_INPUT_STREAM) is->super);
+
+    /* Perform any clean up of the marks
+     */
+    input->istream->release(input->istream, mark);
+
+    /* Find the supplied mark state 
+     */
+    state   = (pANTLR3_LEX_STATE)input->markers->get(input->markers, (ANTLR3_UINT32)(mark - 1));
+	if (state == NULL) { return; }
+
+    /* Seek input pointer to the requested point (note we supply the void *pointer
+     * to whatever is implementing the int stream to seek).
+     */
+    antlr38BitSeek(is, (ANTLR3_MARKER)(state->nextChar));
+
+    /* Reset to the reset of the information in the mark
+     */
+    input->charPositionInLine	= state->charPositionInLine;
+    input->currentLine		= state->currentLine;
+    input->line			= state->line;
+    input->nextChar		= state->nextChar;
+
+    /* And we are done
+     */
+}
+
+/** \brief Rewind the lexer input to the state specified by the supplied mark.
+ * 
+ * \param[in] input Input stream context pointer
+ *
+ * \remark
+ * Assumes 8 Bit input stream.
+ */
+static void
+antlr38BitRelease	(pANTLR3_INT_STREAM is, ANTLR3_MARKER mark)
+{
+    pANTLR3_INPUT_STREAM input;
+
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    /* We don't do much here in fact as we never free any higher marks in
+     * the hashtable as we just resuse any memory allocated for them.
+     */
+    input->markDepth	= (ANTLR3_UINT32)(mark - 1);
+}
+
+/** \brief Rewind the lexer input to the state specified by the supplied mark.
+ * 
+ * \param[in] input Input stream context pointer
+ *
+ * \remark
+ * Assumes 8 Bit input stream.
+ */
+static void
+antlr38BitSeek	(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint)
+{
+	ANTLR3_INT32   count;
+	pANTLR3_INPUT_STREAM input;
+
+	input   = (pANTLR3_INPUT_STREAM)ANTLR3_FUNC_PTR(((pANTLR3_INPUT_STREAM) is->super));
+
+	/* If the requested seek point is less than the current
+	* input point, then we assume that we are resetting from a mark
+	* and do not need to scan, but can just set to there.
+	*/
+	if	(seekPoint <= (ANTLR3_MARKER)(input->nextChar))
+	{
+		input->nextChar	= ((pANTLR3_UINT8) seekPoint);
+	}
+	else
+	{
+		count	= (ANTLR3_UINT32)(seekPoint - (ANTLR3_MARKER)(input->nextChar));
+
+		while (count--)
+		{
+			is->consume(is);
+		}
+	}
+}
+/** Return a substring of the 8 bit input stream in
+ *  newly allocated memory.
+ *
+ * \param input Input stream context pointer
+ * \param start Offset in input stream where the string starts
+ * \param stop  Offset in the input stream where the string ends.
+ */
+static pANTLR3_STRING
+antlr38BitSubstr		(pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop)
+{
+	return  input->strFactory->newPtr(input->strFactory, (pANTLR3_UINT8)start, (ANTLR3_UINT32)(stop - start + 1));
+}
+
+/** \brief Return the line number as understood by the 8 bit input stream.
+ *
+ * \param input Input stream context pointer
+ * \return	Line number in input stream that we believe we are working on.
+ */
+static ANTLR3_UINT32   
+antlr38BitGetLine		(pANTLR3_INPUT_STREAM input)
+{
+    return  input->line;
+}
+
+/** Return a pointer into the input stream that points at the start
+ *  of the current input line as triggered by the end of line character installed
+ *  for the stream ('\n' unless told differently).
+ *
+ * \param[in] input 
+ */
+static void	  * 
+antlr38BitGetLineBuf	(pANTLR3_INPUT_STREAM input)
+{
+    return  input->currentLine;
+}
+
+/** Return the current offset in to the current line in the input stream.
+ *
+ * \param input Input stream context pointer
+ * \return      Current line offset
+ */
+static ANTLR3_UINT32
+antlr38BitGetCharPosition	(pANTLR3_INPUT_STREAM input)
+{
+    return  input->charPositionInLine;
+}
+
+/** Set the current line number as understood by the input stream.
+ *
+ * \param input Input stream context pointer
+ * \param line  Line number to tell the input stream we are on
+ *
+ * \remark
+ *  This function does not change any pointers, it just allows the programmer to set the
+ *  line number according to some external criterion, such as finding a lexed directive
+ *  like: #nnn "file.c" for instance, such that error reporting and so on in is in sync
+ *  with some original source format.
+ */
+static void
+antlr38BitSetLine		(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 line)
+{
+    input->line	= line;
+}
+
+/** Set the current offset in the current line to be a particular setting.
+ *
+ * \param[in] input    Input stream context pointer
+ * \param[in] position New setting for current offset.
+ *
+ * \remark
+ * This does not set the actual pointers in the input stream, it is purely for reporting
+ * purposes and so on as per antlr38BitSetLine();
+ */
+static void
+antlr38BitSetCharPosition	(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 position)
+{
+    input->charPositionInLine = position;
+}
+
+/** Set the newline trigger character in the input stream to the supplied parameter.
+ *
+ * \param[in] input	    Input stream context pointer
+ * \param[in] newlineChar   Character to set to be the newline trigger.
+ *
+ * \remark
+ *  - The supplied newLineChar is in UTF32 encoding (which means ASCII and latin1 etc
+ *    are the same encodings), but the input stream catered to by this function is 8 bit
+ *    only, so it is up to the programmer to ensure that the character supplied is valid.
+ */
+static void 
+antlr38BitSetNewLineChar	(pANTLR3_INPUT_STREAM input, ANTLR3_UINT32 newlineChar)
+{
+    input->newlineChar	= newlineChar;
+}
+
+
+/// \brief Common function to setup function interface for a UTF16 or UCS2 input stream.
+///
+/// \param input Input stream context pointer
+///
+/// \remark
+///  - Strictly speaking, there is no such thing as a UCS2 input stream as the term
+///    tends to confuse the notions of character encoding, unicode and so on. UCS2 is
+///    essentially UTF16 without any surrogates and so the standard UTF16
+///    input stream is able to handle it without any special code.
+///
+void 
+antlr3UTF16SetupStream	(pANTLR3_INPUT_STREAM input, ANTLR3_BOOLEAN machineBigEndian, ANTLR3_BOOLEAN inputBigEndian)
+{
+    // Build a string factory for this stream. This is a UTF16 string factory which is a standard
+    // part of the ANTLR3 string. The string factory is then passed through the whole chain 
+    // of lexer->parser->tree->treeparser and so on.
+    //
+    input->strFactory	= antlr3StringFactoryNew(input->encoding);
+
+    // Generic API that does not care about endianess.
+    //
+    input->istream->index	    =  antlr3UTF16Index;            // Calculate current index in input stream, UTF16 based
+    input->substr		    =  antlr3UTF16Substr;	    // Return a string from the input stream
+    input->istream->seek	    =  antlr3UTF16Seek;		    // How to seek to a specific point in the stream
+
+    // We must install different UTF16 routines according to whether the input
+    // is the same endianess as the machine we are executing upon or not. If it is not
+    // then we must install methods that can convert the endianess on the fly as they go
+    //
+
+    switch (machineBigEndian)
+    {
+        case    ANTLR3_TRUE:
+
+            // Machine is Big Endian, if the input is also then install the 
+            // methods that do not access input by bytes and reverse them.
+            // Otherwise install endian aware methods.
+            //
+            if  (inputBigEndian == ANTLR3_TRUE) 
+            {
+                // Input is machine compatible
+                //
+                input->istream->consume	    =  antlr3UTF16Consume;	    // Consume the next UTF16 character in the buffer
+                input->istream->_LA         =  antlr3UTF16LA;		    // Return the UTF32 character at offset n (1 based)    
+            }
+            else
+            {
+                // Need to use methods that know that the input is little endian
+                //
+                input->istream->consume	    =  antlr3UTF16ConsumeLE;	    // Consume the next UTF16 character in the buffer
+                input->istream->_LA         =  antlr3UTF16LALE;		    // Return the UTF32 character at offset n (1 based) 
+            }
+            break;
+
+        case    ANTLR3_FALSE:
+
+            // Machine is Little Endian, if the input is also then install the 
+            // methods that do not access input by bytes and reverse them.
+            // Otherwise install endian aware methods.
+            //
+            if  (inputBigEndian == ANTLR3_FALSE) 
+            {
+                // Input is machine compatible
+                //
+                input->istream->consume	    =  antlr3UTF16Consume;	    // Consume the next UTF16 character in the buffer
+                input->istream->_LA         =  antlr3UTF16LA;		    // Return the UTF32 character at offset n (1 based)    
+            }
+            else
+            {
+                // Need to use methods that know that the input is Big Endian
+                //
+                input->istream->consume	    =  antlr3UTF16ConsumeBE;	    // Consume the next UTF16 character in the buffer
+                input->istream->_LA         =  antlr3UTF16LABE;		    // Return the UTF32 character at offset n (1 based) 
+            }
+            break;
+    }
+
+        
+    input->charByteSize		    = 2;			    // Size in bytes of characters in this stream.
+
+}
+
+/// \brief Consume the next character in a UTF16 input stream
+///
+/// \param input Input stream context pointer
+///
+static void
+antlr3UTF16Consume(pANTLR3_INT_STREAM is)
+{
+	pANTLR3_INPUT_STREAM input;
+        UTF32   ch;
+        UTF32   ch2;
+
+	input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+        // Buffer size is always in bytes
+        //
+	if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+	{	
+		// Indicate one more character in this line
+		//
+		input->charPositionInLine++;
+
+		if  ((ANTLR3_UCHAR)(*((pANTLR3_UINT16)input->nextChar)) == input->newlineChar)
+		{
+			// Reset for start of a new line of input
+			//
+			input->line++;
+			input->charPositionInLine	= 0;
+			input->currentLine		= (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
+		}
+
+		// Increment to next character position, accounting for any surrogates
+		//
+                // Next char in natural machine byte order
+                //
+                ch  = *((UTF16*)input->nextChar);
+
+                // We consumed one 16 bit character
+                //
+		input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
+
+                // If we have a surrogate pair then we need to consume
+                // a following valid LO surrogate.
+                //
+                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+
+                    // If the 16 bits following the high surrogate are in the source buffer...
+                    //
+                    if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+                    {
+                        // Next character is in natural machine byte order
+                        //
+                        ch2 = *((UTF16*)input->nextChar);
+
+                        // If it's a valid low surrogate, consume it
+                        //
+                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                        {
+                            // We consumed one 16 bit character
+                            //
+		            input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
+                        }
+                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                        // it.
+                        //
+                    } 
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it because the buffer ended
+                    //
+                } 
+                // Note that we did not check for an invalid low surrogate here, or that fact that the
+                // lo surrogate was missing. We just picked out one 16 bit character unless the character
+                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+                //
+	}
+}
+
+/// \brief Return the input element assuming an 8 bit ascii input
+///
+/// \param[in] input Input stream context pointer
+/// \param[in] la 1 based offset of next input stream element
+///
+/// \return Next input character in internal ANTLR3 encoding (UTF32)
+///
+static ANTLR3_UCHAR 
+antlr3UTF16LA(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
+{
+	pANTLR3_INPUT_STREAM input;
+        UTF32   ch;
+        UTF32   ch2;
+        UTF16   * nextChar;
+
+        // Find the input interface and where we are currently pointing to
+        // in the input stream
+        //
+	input       = ((pANTLR3_INPUT_STREAM) (is->super));
+        nextChar    = (UTF16*)input->nextChar;
+
+        // If a positive offset then advance forward, else retreat
+        //
+        if  (la >= 0)
+        {
+            while   (--la > 0 && (pANTLR3_UINT8)nextChar < ((pANTLR3_UINT8)input->data) + input->sizeBuf )
+            {
+                // Advance our copy of the input pointer
+                //
+                // Next char in natural machine byte order
+                //
+                ch  = *nextChar++;
+
+                // If we have a surrogate pair then we need to consume
+                // a following valid LO surrogate.
+                //
+                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+                {
+                    // If the 16 bits following the high surrogate are in the source buffer...
+                    //
+                    if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+                    {
+                        // Next character is in natural machine byte order
+                        //
+                        ch2 = *nextChar;
+
+                        // If it's a valid low surrogate, consume it
+                        //
+                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                        {
+                            // We consumed one 16 bit character
+                            //
+		            nextChar++;
+                        }
+                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                        // it.
+                        //
+                    } 
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it because the buffer ended
+                    //
+                }
+                // Note that we did not check for an invalid low surrogate here, or that fact that the
+                // lo surrogate was missing. We just picked out one 16 bit character unless the character
+                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+                //
+            }
+        }
+        else
+        {
+            // We need to go backwards from our input point
+            //
+            while   (la++ < 0 && (pANTLR3_UINT8)nextChar > (pANTLR3_UINT8)input->data )
+            {
+                // Get the previous 16 bit character
+                //
+                ch = *--nextChar;
+
+                // If we found a low surrogate then go back one more character if
+                // the hi surrogate is there
+                //
+                if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) 
+                {
+                    ch2 = *(nextChar-1);
+                    if (ch2 >= UNI_SUR_HIGH_START && ch2 <= UNI_SUR_HIGH_END) 
+                    {
+                        // Yes, there is a high surrogate to match it so decrement one more and point to that
+                        //
+                        nextChar--;
+                    }
+                }
+            }
+        }
+
+        // Our local copy of nextChar is now pointing to either the correct character or end of file
+        //
+        // Input buffer size is always in bytes
+        //
+	if	( (pANTLR3_UINT8)nextChar >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+	{
+		return	ANTLR3_CHARSTREAM_EOF;
+	}
+	else
+	{
+            // Pick up the next 16 character (native machine byte order)
+            //
+            ch = *nextChar++;
+
+            // If we have a surrogate pair then we need to consume
+            // a following valid LO surrogate.
+            //
+            if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+            {
+                // If the 16 bits following the high surrogate are in the source buffer...
+                //
+                if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+                {
+                    // Next character is in natural machine byte order
+                    //
+                    ch2 = *nextChar;
+
+                    // If it's a valid low surrogate, consume it
+                    //
+                    if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                    {
+                        // Construct the UTF32 code point
+                        //
+                        ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+			    + (ch2 - UNI_SUR_LOW_START) + halfBase;
+                    }
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it.
+                    //
+                } 
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it because the buffer ended
+                //
+            }
+        }
+        return ch;
+}
+
+
+/// \brief Calculate the current index in the output stream.
+/// \param[in] input Input stream context pointer
+///
+static ANTLR3_MARKER 
+antlr3UTF16Index(pANTLR3_INT_STREAM is)
+{
+    pANTLR3_INPUT_STREAM input;
+
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    return  (ANTLR3_MARKER)(input->nextChar);
+}
+
+/// \brief Rewind the lexer input to the state specified by the supplied mark.
+///
+/// \param[in] input Input stream context pointer
+///
+/// \remark
+/// Assumes UTF16 input stream.
+///
+static void
+antlr3UTF16Seek	(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint)
+{
+	pANTLR3_INPUT_STREAM input;
+
+	input   = ((pANTLR3_INPUT_STREAM) is->super);
+
+	// If the requested seek point is less than the current
+	// input point, then we assume that we are resetting from a mark
+	// and do not need to scan, but can just set to there as rewind will
+        // reset line numbers and so on.
+	//
+	if	(seekPoint <= (ANTLR3_MARKER)(input->nextChar))
+	{
+		input->nextChar	= (void *)seekPoint;
+	}
+	else
+	{
+            // Call consume until we reach the asked for seek point or EOF
+            //
+            while (is->_LA(is, 1) != ANTLR3_CHARSTREAM_EOF && seekPoint < (ANTLR3_MARKER)input->nextChar)
+	    {
+		is->consume(is);
+	    }
+	}
+}
+/// \brief Return a substring of the UTF16 input stream in
+///  newly allocated memory.
+///
+/// \param input Input stream context pointer
+/// \param start Offset in input stream where the string starts
+/// \param stop  Offset in the input stream where the string ends.
+///
+static pANTLR3_STRING
+antlr3UTF16Substr		(pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop)
+{
+    return  input->strFactory->newPtr(input->strFactory, (pANTLR3_UINT8)start, ((ANTLR3_UINT32_CAST(stop - start))/2) + 1);
+}
+
+/// \brief Consume the next character in a UTF16 input stream when the input is Little Endian and the machine is not
+/// Note that the UTF16 routines do not do any substantial verification of the input stream as for performance
+/// sake, we assume it is validly encoded. So if a low surrogate is found at the curent input position then we
+/// just consume it. Surrogate pairs should be seen as Hi, Lo. So if we have a Lo first, then the input stream
+/// is fubar but we just ignore that.
+///
+/// \param input Input stream context pointer
+///
+static void
+antlr3UTF16ConsumeLE(pANTLR3_INT_STREAM is)
+{
+	pANTLR3_INPUT_STREAM input;
+        UTF32   ch;
+        UTF32   ch2;
+
+	input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+        // Buffer size is always in bytes
+        //
+	if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+	{	
+		// Indicate one more character in this line
+		//
+		input->charPositionInLine++;
+
+		if  ((ANTLR3_UCHAR)(*((pANTLR3_UINT16)input->nextChar)) == input->newlineChar)
+		{
+			// Reset for start of a new line of input
+			//
+			input->line++;
+			input->charPositionInLine	= 0;
+			input->currentLine		= (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
+		}
+
+		// Increment to next character position, accounting for any surrogates
+		//
+                // Next char in litle endian form
+                //
+                ch  = *((pANTLR3_UINT8)input->nextChar) + (*((pANTLR3_UINT8)input->nextChar + 1) <<8);
+
+                // We consumed one 16 bit character
+                //
+		input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
+
+                // If we have a surrogate pair then we need to consume
+                // a following valid LO surrogate.
+                //
+                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+
+                    // If the 16 bits following the high surrogate are in the source buffer...
+                    //
+                    if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+                    {
+                        ch2 = *((pANTLR3_UINT8)input->nextChar) + (*((pANTLR3_UINT8)input->nextChar + 1) <<8);
+
+                        // If it's a valid low surrogate, consume it
+                        //
+                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                        {
+                            // We consumed one 16 bit character
+                            //
+		            input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
+                        }
+                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                        // it.
+                        //
+                    } 
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it because the buffer ended
+                    //
+                } 
+                // Note that we did not check for an invalid low surrogate here, or that fact that the
+                // lo surrogate was missing. We just picked out one 16 bit character unless the character
+                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+                //
+	}
+}
+
+/// \brief Return the input element assuming a UTF16 input when the input is Little Endian and the machine is not
+///
+/// \param[in] input Input stream context pointer
+/// \param[in] la 1 based offset of next input stream element
+///
+/// \return Next input character in internal ANTLR3 encoding (UTF32)
+///
+static ANTLR3_UCHAR 
+antlr3UTF16LALE(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
+{
+	pANTLR3_INPUT_STREAM input;
+        UTF32           ch;
+        UTF32           ch2;
+        pANTLR3_UCHAR   nextChar;
+
+        // Find the input interface and where we are currently pointing to
+        // in the input stream
+        //
+	input       = ((pANTLR3_INPUT_STREAM) (is->super));
+        nextChar    = (pANTLR3_UCHAR)input->nextChar;
+
+        // If a positive offset then advance forward, else retreat
+        //
+        if  (la >= 0)
+        {
+            while   (--la > 0 && (pANTLR3_UINT8)nextChar < ((pANTLR3_UINT8)input->data) + input->sizeBuf )
+            {
+                // Advance our copy of the input pointer
+                //
+                // Next char in Little Endian byte order
+                //
+                ch  = (*nextChar) + (*(nextChar+1) << 8);
+                nextChar += 2;
+
+                // If we have a surrogate pair then we need to consume
+                // a following valid LO surrogate.
+                //
+                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+                {
+                    // If the 16 bits following the high surrogate are in the source buffer...
+                    //
+                    if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+                    {
+                        // Next character is in little endian byte order
+                        //
+                        ch2 = (*nextChar) + (*(nextChar+1) << 8);
+
+                        // If it's a valid low surrogate, consume it
+                        //
+                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                        {
+                            // We consumed one 16 bit character
+                            //
+		            nextChar += 2;
+                        }
+                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                        // it.
+                        //
+                    } 
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it because the buffer ended
+                    //
+                }
+                // Note that we did not check for an invalid low surrogate here, or that fact that the
+                // lo surrogate was missing. We just picked out one 16 bit character unless the character
+                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+                //
+            }
+        }
+        else
+        {
+            // We need to go backwards from our input point
+            //
+            while   (la++ < 0 && (pANTLR3_UINT8)nextChar > (pANTLR3_UINT8)input->data )
+            {
+                // Get the previous 16 bit character
+                //
+                ch = (*nextChar - 2) + ((*nextChar -1) << 8);
+                nextChar -= 2;
+
+                // If we found a low surrogate then go back one more character if
+                // the hi surrogate is there
+                //
+                if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) 
+                {
+                    ch2 = (*nextChar - 2) + ((*nextChar -1) << 8);
+                    if (ch2 >= UNI_SUR_HIGH_START && ch2 <= UNI_SUR_HIGH_END) 
+                    {
+                        // Yes, there is a high surrogate to match it so decrement one more and point to that
+                        //
+                        nextChar -=2;
+                    }
+                }
+            }
+        }
+
+        // Our local copy of nextChar is now pointing to either the correct character or end of file
+        //
+        // Input buffer size is always in bytes
+        //
+	if	( (pANTLR3_UINT8)nextChar >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+	{
+		return	ANTLR3_CHARSTREAM_EOF;
+	}
+	else
+	{
+            // Pick up the next 16 character (little endian byte order)
+            //
+            ch = (*nextChar) + (*(nextChar+1) << 8);
+            nextChar += 2;
+
+            // If we have a surrogate pair then we need to consume
+            // a following valid LO surrogate.
+            //
+            if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+            {
+                // If the 16 bits following the high surrogate are in the source buffer...
+                //
+                if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+                {
+                    // Next character is in little endian byte order
+                    //
+                    ch2 = (*nextChar) + (*(nextChar+1) << 8);
+
+                    // If it's a valid low surrogate, consume it
+                    //
+                    if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                    {
+                        // Construct the UTF32 code point
+                        //
+                        ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+			    + (ch2 - UNI_SUR_LOW_START) + halfBase;
+                    }
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it.
+                    //
+                } 
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it because the buffer ended
+                //
+            }
+        }
+        return ch;
+}
+
+/// \brief Consume the next character in a UTF16 input stream when the input is Big Endian and the machine is not
+///
+/// \param input Input stream context pointer
+///
+static void
+antlr3UTF16ConsumeBE(pANTLR3_INT_STREAM is)
+{
+	pANTLR3_INPUT_STREAM input;
+        UTF32   ch;
+        UTF32   ch2;
+
+	input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+        // Buffer size is always in bytes
+        //
+	if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+	{	
+		// Indicate one more character in this line
+		//
+		input->charPositionInLine++;
+
+		if  ((ANTLR3_UCHAR)(*((pANTLR3_UINT16)input->nextChar)) == input->newlineChar)
+		{
+			// Reset for start of a new line of input
+			//
+			input->line++;
+			input->charPositionInLine	= 0;
+			input->currentLine		= (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
+		}
+
+		// Increment to next character position, accounting for any surrogates
+		//
+                // Next char in big endian form
+                //
+                ch  = *((pANTLR3_UINT8)input->nextChar + 1) + (*((pANTLR3_UINT8)input->nextChar ) <<8);
+
+                // We consumed one 16 bit character
+                //
+		input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
+
+                // If we have a surrogate pair then we need to consume
+                // a following valid LO surrogate.
+                //
+                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+
+                    // If the 16 bits following the high surrogate are in the source buffer...
+                    //
+                    if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+                    {
+                        // Big endian
+                        //
+                        ch2 = *((pANTLR3_UINT8)input->nextChar + 1) + (*((pANTLR3_UINT8)input->nextChar ) <<8);
+
+                        // If it's a valid low surrogate, consume it
+                        //
+                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                        {
+                            // We consumed one 16 bit character
+                            //
+		            input->nextChar = (void *)(((pANTLR3_UINT16)input->nextChar) + 1);
+                        }
+                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                        // it.
+                        //
+                    } 
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it because the buffer ended
+                    //
+                } 
+                // Note that we did not check for an invalid low surrogate here, or that fact that the
+                // lo surrogate was missing. We just picked out one 16 bit character unless the character
+                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+                //
+	}
+}
+
+/// \brief Return the input element assuming a UTF16 input when the input is Little Endian and the machine is not
+///
+/// \param[in] input Input stream context pointer
+/// \param[in] la 1 based offset of next input stream element
+///
+/// \return Next input character in internal ANTLR3 encoding (UTF32)
+///
+static ANTLR3_UCHAR 
+antlr3UTF16LABE(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
+{
+	pANTLR3_INPUT_STREAM input;
+        UTF32           ch;
+        UTF32           ch2;
+        pANTLR3_UCHAR   nextChar;
+
+        // Find the input interface and where we are currently pointing to
+        // in the input stream
+        //
+	input       = ((pANTLR3_INPUT_STREAM) (is->super));
+        nextChar    = (pANTLR3_UCHAR)input->nextChar;
+
+        // If a positive offset then advance forward, else retreat
+        //
+        if  (la >= 0)
+        {
+            while   (--la > 0 && (pANTLR3_UINT8)nextChar < ((pANTLR3_UINT8)input->data) + input->sizeBuf )
+            {
+                // Advance our copy of the input pointer
+                //
+                // Next char in Big Endian byte order
+                //
+                ch  = ((*nextChar) << 8) + *(nextChar+1);
+                nextChar += 2;
+
+                // If we have a surrogate pair then we need to consume
+                // a following valid LO surrogate.
+                //
+                if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+                {
+                    // If the 16 bits following the high surrogate are in the source buffer...
+                    //
+                    if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+                    {
+                        // Next character is in big endian byte order
+                        //
+                        ch2 = ((*nextChar) << 8) + *(nextChar+1);
+
+                        // If it's a valid low surrogate, consume it
+                        //
+                        if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                        {
+                            // We consumed one 16 bit character
+                            //
+		            nextChar += 2;
+                        }
+                        // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                        // it.
+                        //
+                    } 
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it because the buffer ended
+                    //
+                }
+                // Note that we did not check for an invalid low surrogate here, or that fact that the
+                // lo surrogate was missing. We just picked out one 16 bit character unless the character
+                // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+                //
+            }
+        }
+        else
+        {
+            // We need to go backwards from our input point
+            //
+            while   (la++ < 0 && (pANTLR3_UINT8)nextChar > (pANTLR3_UINT8)input->data )
+            {
+                // Get the previous 16 bit character
+                //
+                ch = ((*nextChar - 2) << 8) + (*nextChar -1);
+                nextChar -= 2;
+
+                // If we found a low surrogate then go back one more character if
+                // the hi surrogate is there
+                //
+                if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) 
+                {
+                    ch2 = ((*nextChar - 2) << 8) + (*nextChar -1);
+                    if (ch2 >= UNI_SUR_HIGH_START && ch2 <= UNI_SUR_HIGH_END) 
+                    {
+                        // Yes, there is a high surrogate to match it so decrement one more and point to that
+                        //
+                        nextChar -=2;
+                    }
+                }
+            }
+        }
+
+        // Our local copy of nextChar is now pointing to either the correct character or end of file
+        //
+        // Input buffer size is always in bytes
+        //
+	if	( (pANTLR3_UINT8)nextChar >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+	{
+		return	ANTLR3_CHARSTREAM_EOF;
+	}
+	else
+	{
+            // Pick up the next 16 character (big endian byte order)
+            //
+            ch = ((*nextChar) << 8) + *(nextChar+1);
+            nextChar += 2;
+
+            // If we have a surrogate pair then we need to consume
+            // a following valid LO surrogate.
+            //
+            if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+            {
+                // If the 16 bits following the high surrogate are in the source buffer...
+                //
+                if	((pANTLR3_UINT8)(nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+                {
+                    // Next character is in big endian byte order
+                    //
+                    ch2 = ((*nextChar) << 8) + *(nextChar+1);
+
+                    // If it's a valid low surrogate, consume it
+                    //
+                    if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                    {
+                        // Construct the UTF32 code point
+                        //
+                        ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+			    + (ch2 - UNI_SUR_LOW_START) + halfBase;
+                    }
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it.
+                    //
+                } 
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it because the buffer ended
+                //
+            }
+        }
+        return ch;
+}
+
+/// \brief Common function to setup function interface for a UTF3 input stream.
+///
+/// \param input Input stream context pointer
+///
+void 
+antlr3UTF32SetupStream	(pANTLR3_INPUT_STREAM input, ANTLR3_BOOLEAN machineBigEndian, ANTLR3_BOOLEAN inputBigEndian)
+{
+    // Build a string factory for this stream. This is a UTF32 string factory which is a standard
+    // part of the ANTLR3 string. The string factory is then passed through the whole chain of lexer->parser->tree->treeparser
+    // and so on.
+    //
+    input->strFactory	= antlr3StringFactoryNew(input->encoding);
+
+    // Generic API that does not care about endianess.
+    //
+    input->istream->index	    =  antlr3UTF32Index;            // Calculate current index in input stream, UTF16 based
+    input->substr		    =  antlr3UTF32Substr;	    // Return a string from the input stream
+    input->istream->seek	    =  antlr3UTF32Seek;		    // How to seek to a specific point in the stream
+    input->istream->consume	    =  antlr3UTF32Consume;	    // Consume the next UTF32 character in the buffer
+
+    // We must install different UTF32 LA routines according to whether the input
+    // is the same endianess as the machine we are executing upon or not. If it is not
+    // then we must install methods that can convert the endianess on the fly as they go
+    //
+    switch (machineBigEndian)
+    {
+        case    ANTLR3_TRUE:
+
+            // Machine is Big Endian, if the input is also then install the 
+            // methods that do not access input by bytes and reverse them.
+            // Otherwise install endian aware methods.
+            //
+            if  (inputBigEndian == ANTLR3_TRUE) 
+            {
+                // Input is machine compatible
+                //
+                input->istream->_LA         =  antlr3UTF32LA;		    // Return the UTF32 character at offset n (1 based)    
+            }
+            else
+            {
+                // Need to use methods that know that the input is little endian
+                //
+                input->istream->_LA         =  antlr3UTF32LALE;		    // Return the UTF32 character at offset n (1 based) 
+            }
+            break;
+
+        case    ANTLR3_FALSE:
+
+            // Machine is Little Endian, if the input is also then install the 
+            // methods that do not access input by bytes and reverse them.
+            // Otherwise install endian aware methods.
+            //
+            if  (inputBigEndian == ANTLR3_FALSE) 
+            {
+                // Input is machine compatible
+                //
+                input->istream->_LA         =  antlr3UTF32LA;		    // Return the UTF32 character at offset n (1 based)    
+            }
+            else
+            {
+                // Need to use methods that know that the input is Big Endian
+                //
+                input->istream->_LA         =  antlr3UTF32LABE;		    // Return the UTF32 character at offset n (1 based) 
+            }
+            break;
+    }
+
+    input->charByteSize		    = 4;			    // Size in bytes of characters in this stream.
+}
+
+/** \brief Consume the next character in a UTF32 input stream
+ *
+ * \param input Input stream context pointer
+ */
+static void
+antlr3UTF32Consume(pANTLR3_INT_STREAM is)
+{
+    pANTLR3_INPUT_STREAM input;
+
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    // SizeBuf is always in bytes
+    //
+    if	((pANTLR3_UINT8)(input->nextChar) < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+    {	
+	/* Indicate one more character in this line
+	 */
+	input->charPositionInLine++;
+	
+	if  ((ANTLR3_UCHAR)(*((pANTLR3_UINT32)input->nextChar)) == input->newlineChar)
+	{
+	    /* Reset for start of a new line of input
+	     */
+	    input->line++;
+	    input->charPositionInLine	= 0;
+	    input->currentLine		= (void *)(((pANTLR3_UINT32)input->nextChar) + 1);
+	}
+
+	/* Increment to next character position
+	 */
+	input->nextChar = (void *)(((pANTLR3_UINT32)input->nextChar) + 1);
+    }
+}
+
+/// \brief Calculate the current index in the output stream.
+/// \param[in] input Input stream context pointer
+///
+static ANTLR3_MARKER 
+antlr3UTF32Index(pANTLR3_INT_STREAM is)
+{
+    pANTLR3_INPUT_STREAM input;
+
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    return  (ANTLR3_MARKER)(input->nextChar);
+}
+
+/// \brief Return a substring of the UTF16 input stream in
+///  newly allocated memory.
+///
+/// \param input Input stream context pointer
+/// \param start Offset in input stream where the string starts
+/// \param stop  Offset in the input stream where the string ends.
+///
+static pANTLR3_STRING
+antlr3UTF32Substr		(pANTLR3_INPUT_STREAM input, ANTLR3_MARKER start, ANTLR3_MARKER stop)
+{
+    return  input->strFactory->newPtr(input->strFactory, (pANTLR3_UINT8)start, ((ANTLR3_UINT32_CAST(stop - start))/4) + 1);
+}
+
+/// \brief Rewind the lexer input to the state specified by the supplied mark.
+///
+/// \param[in] input Input stream context pointer
+///
+/// \remark
+/// Assumes UTF32 input stream.
+///
+static void
+antlr3UTF32Seek	(pANTLR3_INT_STREAM is, ANTLR3_MARKER seekPoint)
+{
+	pANTLR3_INPUT_STREAM input;
+
+	input   = ((pANTLR3_INPUT_STREAM) is->super);
+
+	// If the requested seek point is less than the current
+	// input point, then we assume that we are resetting from a mark
+	// and do not need to scan, but can just set to there as rewind will
+        // reset line numbers and so on.
+	//
+	if	(seekPoint <= (ANTLR3_MARKER)(input->nextChar))
+	{
+		input->nextChar	= (void *)seekPoint;
+	}
+	else
+	{
+            // Call consume until we reach the asked for seek point or EOF
+            //
+            while (is->_LA(is, 1) != ANTLR3_CHARSTREAM_EOF && seekPoint < (ANTLR3_MARKER)input->nextChar)
+	    {
+		is->consume(is);
+	    }
+	}
+}
+
+/** \brief Return the input element assuming a UTF32 input in natural machine byte order
+ *
+ * \param[in] input Input stream context pointer
+ * \param[in] la 1 based offset of next input stream element
+ *
+ * \return Next input character in internal ANTLR3 encoding (UTF32)
+ */
+static ANTLR3_UCHAR 
+antlr3UTF32LA(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
+{
+    pANTLR3_INPUT_STREAM input;
+	
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+    {
+		return	ANTLR3_CHARSTREAM_EOF;
+    }
+    else
+    {
+		return	(ANTLR3_UCHAR)(*((pANTLR3_UINT32)input->nextChar + la - 1));
+    }
+}
+
+/** \brief Return the input element assuming a UTF32 input in little endian byte order
+ *
+ * \param[in] input Input stream context pointer
+ * \param[in] la 1 based offset of next input stream element
+ *
+ * \return Next input character in internal ANTLR3 encoding (UTF32)
+ */
+static ANTLR3_UCHAR 
+antlr3UTF32LALE(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
+{
+    pANTLR3_INPUT_STREAM input;
+	
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+    {
+		return	ANTLR3_CHARSTREAM_EOF;
+    }
+    else
+    {
+        ANTLR3_UCHAR   c;
+
+        c = (ANTLR3_UCHAR)(*((pANTLR3_UINT32)input->nextChar + la - 1));
+
+        // Swap Endianess to Big Endian
+        //
+        return (c>>24) | ((c<<8) & 0x00FF0000) | ((c>>8) & 0x0000FF00) | (c<<24);
+    }
+}
+
+/** \brief Return the input element assuming a UTF32 input in big endian byte order
+ *
+ * \param[in] input Input stream context pointer
+ * \param[in] la 1 based offset of next input stream element
+ *
+ * \return Next input character in internal ANTLR3 encoding (UTF32)
+ * \remark This is the same code as LE version but seprated in case there are better optimisations fo rendinan swap
+ */
+static ANTLR3_UCHAR 
+antlr3UTF32LABE(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
+{
+    pANTLR3_INPUT_STREAM input;
+	
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+    {
+		return	ANTLR3_CHARSTREAM_EOF;
+    }
+    else
+    {
+        ANTLR3_UCHAR   c;
+
+        c = (ANTLR3_UCHAR)(*((pANTLR3_UINT32)input->nextChar + la - 1));
+
+        // Swap Endianess to Little Endian
+        //
+        return (c>>24) | ((c<<8) & 0x00FF0000) | ((c>>8) & 0x0000FF00) | (c<<24);
+    }
+}
+
+
+/// \brief Common function to setup function interface for a UTF8 input stream.
+///
+/// \param input Input stream context pointer
+///
+void 
+antlr3UTF8SetupStream	(pANTLR3_INPUT_STREAM input)
+{
+    // Build a string factory for this stream. This is a UTF16 string factory which is a standard
+    // part of the ANTLR3 string. The string factory is then passed through the whole chain of lexer->parser->tree->treeparser
+    // and so on.
+    //
+    input->strFactory	= antlr3StringFactoryNew(input->encoding);
+
+    // Generic API that does not care about endianess.
+    //
+    input->istream->consume	= antlr3UTF8Consume;	// Consume the next UTF32 character in the buffer
+    input->istream->_LA         = antlr3UTF8LA;         // Return the UTF32 character at offset n (1 based)    
+    input->charByteSize		= 0;	                // Size in bytes of characters in this stream.
+}
+
+// ------------------------------------------------------
+// Following is from Unicode.org (see antlr3convertutf.c)
+//
+
+/// Index into the table below with the first byte of a UTF-8 sequence to
+/// get the number of trailing bytes that are supposed to follow it.
+/// Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is
+/// left as-is for anyone who may want to do such conversion, which was
+/// allowed in earlier algorithms.
+///
+static const ANTLR3_UINT32 trailingBytesForUTF8[256] = {
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+    1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+    2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
+};
+
+/// Magic values subtracted from a buffer value during UTF8 conversion.
+/// This table contains as many values as there might be trailing bytes
+/// in a UTF-8 sequence.
+///
+static const UTF32 offsetsFromUTF8[6] = 
+    {   0x00000000UL, 0x00003080UL, 0x000E2080UL, 
+	0x03C82080UL, 0xFA082080UL, 0x82082080UL 
+    };
+
+// End of Unicode.org tables
+// -------------------------
+
+
+/** \brief Consume the next character in a UTF8 input stream
+ *
+ * \param input Input stream context pointer
+ */
+static void
+antlr3UTF8Consume(pANTLR3_INT_STREAM is)
+{
+    pANTLR3_INPUT_STREAM    input;
+    ANTLR3_UINT32           extraBytesToRead;
+    ANTLR3_UCHAR            ch;
+    pANTLR3_UINT8           nextChar;
+
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    nextChar = (pANTLR3_UINT8)input->nextChar;
+
+    if	(nextChar < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+    {	
+	// Indicate one more character in this line
+	//
+	input->charPositionInLine++;
+	
+        // Are there more bytes needed to make up the whole thing?
+        //
+        extraBytesToRead = trailingBytesForUTF8[*nextChar];
+
+        if	(nextChar + extraBytesToRead >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+        {
+            input->nextChar = (((pANTLR3_UINT8)input->data) + input->sizeBuf);
+            return;
+        }
+
+        // Cases deliberately fall through (see note A in antlrconvertutf.c)
+        // Legal UTF8 is only 4 bytes but 6 bytes could be used in old UTF8 so
+        // we allow it.
+        //
+        ch  = 0;
+       	switch (extraBytesToRead) {
+	    case 5: ch += *nextChar++; ch <<= 6;
+	    case 4: ch += *nextChar++; ch <<= 6;
+	    case 3: ch += *nextChar++; ch <<= 6;
+	    case 2: ch += *nextChar++; ch <<= 6;
+	    case 1: ch += *nextChar++; ch <<= 6;
+	    case 0: ch += *nextChar++;
+	}
+
+        // Magically correct the input value
+        //
+	ch -= offsetsFromUTF8[extraBytesToRead];
+	if  (ch == input->newlineChar)
+	{
+	    /* Reset for start of a new line of input
+	     */
+	    input->line++;
+	    input->charPositionInLine	= 0;
+	    input->currentLine		= (void *)nextChar;
+	}
+
+        // Update input pointer
+        //
+        input->nextChar = nextChar;
+    }
+}
+/** \brief Return the input element assuming a UTF8 input
+ *
+ * \param[in] input Input stream context pointer
+ * \param[in] la 1 based offset of next input stream element
+ *
+ * \return Next input character in internal ANTLR3 encoding (UTF32)
+ */
+static ANTLR3_UCHAR 
+antlr3UTF8LA(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
+{
+    pANTLR3_INPUT_STREAM    input;
+    ANTLR3_UINT32           extraBytesToRead;
+    ANTLR3_UCHAR            ch;
+    pANTLR3_UINT8           nextChar;
+
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    nextChar = (pANTLR3_UINT8)input->nextChar;
+
+    // Do we need to traverse forwards or backwards?
+    // - LA(0) is treated as LA(1) and we assume that the nextChar is
+    //   already positioned.
+    // - LA(n+) ; n>1 means we must traverse forward n-1 characters catering for UTF8 encoding
+    // - LA(-n) means we must traverse backwards n chracters
+    //
+    if (la > 1) {
+
+        // Make sure that we have at least one character left before trying to
+        // loop through the buffer.
+        //
+        if	(nextChar < (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+        {	
+            // Now traverse n-1 characters forward
+            //
+            while (--la > 0)
+            {
+                // Does the next character require trailing bytes?
+                // If so advance the pointer by that many bytes as well as advancing
+                // one position for what will be at least a single byte character.
+                //
+                nextChar += trailingBytesForUTF8[*nextChar] + 1;
+
+                // Does that calculation take us past the byte length of the buffer?
+                //
+                if	(nextChar >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+                {
+                    return ANTLR3_CHARSTREAM_EOF;
+                }
+            }
+        }
+        else
+        {
+            return ANTLR3_CHARSTREAM_EOF;
+        }
+    }
+    else
+    {
+        // LA is negative so we decrease the pointer by n character positions
+        //
+        while   (nextChar > (pANTLR3_UINT8)input->data && la++ < 0)
+        {
+            // Traversing backwards in UTF8 means decermenting by one
+            // then continuing to decrement while ever a character pattern
+            // is flagged as being a trailing byte of an encoded code point.
+            // Trailing UTF8 bytes always start with 10 in binary. We assumne that
+            // the UTF8 is well formed and do not check boundary conditions
+            //
+            nextChar--;
+            while ((*nextChar & 0xC0) == 0x80)
+            {
+                nextChar--;
+            }
+        }
+    }
+
+    // nextChar is now pointing at the UTF8 encoded character that we need to
+    // decode and return.
+    //
+    // Are there more bytes needed to make up the whole thing?
+    //
+    extraBytesToRead = trailingBytesForUTF8[*nextChar];
+    if	(nextChar + extraBytesToRead >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+    {
+        return ANTLR3_CHARSTREAM_EOF;
+    }
+
+    // Cases deliberately fall through (see note A in antlrconvertutf.c)
+    // 
+    ch  = 0;
+    switch (extraBytesToRead) {
+            case 5: ch += *nextChar++; ch <<= 6;
+            case 4: ch += *nextChar++; ch <<= 6;
+            case 3: ch += *nextChar++; ch <<= 6;
+            case 2: ch += *nextChar++; ch <<= 6;
+            case 1: ch += *nextChar++; ch <<= 6;
+            case 0: ch += *nextChar++;
+    }
+
+    // Magically correct the input value
+    //
+    ch -= offsetsFromUTF8[extraBytesToRead];
+
+    return ch;
+}
+
+// EBCDIC to ASCII conversion table
+//
+// This for EBCDIC EDF04 translated to ISO-8859.1 which is the usually accepted POSIX
+// translation and the character tables are published all over the interweb.
+// 
+const ANTLR3_UCHAR e2a[256] =
+{
+    0x00, 0x01, 0x02, 0x03, 0x85, 0x09, 0x86, 0x7f,
+    0x87, 0x8d, 0x8e, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+    0x10, 0x11, 0x12, 0x13, 0x8f, 0x0a, 0x08, 0x97,
+    0x18, 0x19, 0x9c, 0x9d, 0x1c, 0x1d, 0x1e, 0x1f,
+    0x80, 0x81, 0x82, 0x83, 0x84, 0x92, 0x17, 0x1b,
+    0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x05, 0x06, 0x07, 
+    0x90, 0x91, 0x16, 0x93, 0x94, 0x95, 0x96, 0x04,
+    0x98, 0x99, 0x9a, 0x9b, 0x14, 0x15, 0x9e, 0x1a,
+    0x20, 0xa0, 0xe2, 0xe4, 0xe0, 0xe1, 0xe3, 0xe5,
+    0xe7, 0xf1, 0x60, 0x2e, 0x3c, 0x28, 0x2b, 0x7c,
+    0x26, 0xe9, 0xea, 0xeb, 0xe8, 0xed, 0xee, 0xef,
+    0xec, 0xdf, 0x21, 0x24, 0x2a, 0x29, 0x3b, 0x9f,
+    0x2d, 0x2f, 0xc2, 0xc4, 0xc0, 0xc1, 0xc3, 0xc5,
+    0xc7, 0xd1, 0x5e, 0x2c, 0x25, 0x5f, 0x3e, 0x3f,
+    0xf8, 0xc9, 0xca, 0xcb, 0xc8, 0xcd, 0xce, 0xcf,
+    0xcc, 0xa8, 0x3a, 0x23, 0x40, 0x27, 0x3d, 0x22,
+    0xd8, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+    0x68, 0x69, 0xab, 0xbb, 0xf0, 0xfd, 0xfe, 0xb1,
+    0xb0, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
+    0x71, 0x72, 0xaa, 0xba, 0xe6, 0xb8, 0xc6, 0xa4,
+    0xb5, 0xaf, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+    0x79, 0x7a, 0xa1, 0xbf, 0xd0, 0xdd, 0xde, 0xae,
+    0xa2, 0xa3, 0xa5, 0xb7, 0xa9, 0xa7, 0xb6, 0xbc,
+    0xbd, 0xbe, 0xac, 0x5b, 0x5c, 0x5d, 0xb4, 0xd7,
+    0xf9, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+    0x48, 0x49, 0xad, 0xf4, 0xf6, 0xf2, 0xf3, 0xf5,
+    0xa6, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
+    0x51, 0x52, 0xb9, 0xfb, 0xfc, 0xdb, 0xfa, 0xff,
+    0xd9, 0xf7, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+    0x59, 0x5a, 0xb2, 0xd4, 0xd6, 0xd2, 0xd3, 0xd5,
+    0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+    0x38, 0x39, 0xb3, 0x7b, 0xdc, 0x7d, 0xda, 0x7e
+};
+
+/// \brief Common function to setup function interface for a EBCDIC input stream.
+///
+/// \param input Input stream context pointer
+///
+void 
+antlr3EBCDICSetupStream	(pANTLR3_INPUT_STREAM input)
+{
+    // EBCDIC streams can use the standard 8 bit string factory
+    //
+    input->strFactory	= antlr3StringFactoryNew(input->encoding);
+
+    // Generic API that does not care about endianess.
+    //
+    input->istream->_LA         = antlr3EBCDICLA;       // Return the UTF32 character at offset n (1 based)    
+    input->charByteSize		= 1;	                // Size in bytes of characters in this stream.
+}
+
+/// \brief Return the input element assuming an 8 bit EBCDIC input
+///
+/// \param[in] input Input stream context pointer
+/// \param[in] la 1 based offset of next input stream element
+///
+/// \return Next input character in internal ANTLR3 encoding (UTF32) after translation
+///         from EBCDIC to ASCII
+///
+static ANTLR3_UCHAR 
+antlr3EBCDICLA(pANTLR3_INT_STREAM is, ANTLR3_INT32 la)
+{
+    pANTLR3_INPUT_STREAM input;
+
+    input   = ((pANTLR3_INPUT_STREAM) (is->super));
+
+    if	(( ((pANTLR3_UINT8)input->nextChar) + la - 1) >= (((pANTLR3_UINT8)input->data) + input->sizeBuf))
+    {
+        return	ANTLR3_CHARSTREAM_EOF;
+    }
+    else
+    {
+        // Translate the required character via the constant conversion table
+        //
+        return	e2a[(*((pANTLR3_UINT8)input->nextChar + la - 1))];
+    }
+}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/C/src/antlr3intstream.c b/runtime/C/src/antlr3intstream.c
similarity index 100%
rename from antlr-3.4/runtime/C/src/antlr3intstream.c
rename to runtime/C/src/antlr3intstream.c
diff --git a/runtime/C/src/antlr3lexer.c b/runtime/C/src/antlr3lexer.c
new file mode 100644
index 0000000..962163d
--- /dev/null
+++ b/runtime/C/src/antlr3lexer.c
@@ -0,0 +1,905 @@
+/** \file
+ *
+ * Base implementation of an antlr 3 lexer.
+ *
+ * An ANTLR3 lexer implements a base recongizer, a token source and
+ * a lexer interface. It constructs a base recognizer with default
+ * functions, then overrides any of these that are parser specific (usual
+ * default implementation of base recognizer.
+ */
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3lexer.h>
+
+static void					mTokens						(pANTLR3_LEXER lexer);
+static void					setCharStream				(pANTLR3_LEXER lexer,  pANTLR3_INPUT_STREAM input);
+static void					pushCharStream				(pANTLR3_LEXER lexer,  pANTLR3_INPUT_STREAM input);
+static void					popCharStream				(pANTLR3_LEXER lexer);
+
+static void					emitNew						(pANTLR3_LEXER lexer,  pANTLR3_COMMON_TOKEN token);
+static pANTLR3_COMMON_TOKEN emit						(pANTLR3_LEXER lexer);
+static ANTLR3_BOOLEAN	    matchs						(pANTLR3_LEXER lexer, ANTLR3_UCHAR * string);
+static ANTLR3_BOOLEAN	    matchc						(pANTLR3_LEXER lexer, ANTLR3_UCHAR c);
+static ANTLR3_BOOLEAN	    matchRange					(pANTLR3_LEXER lexer, ANTLR3_UCHAR low, ANTLR3_UCHAR high);
+static void					matchAny					(pANTLR3_LEXER lexer);
+static void					recover						(pANTLR3_LEXER lexer);
+static ANTLR3_UINT32	    getLine						(pANTLR3_LEXER lexer);
+static ANTLR3_MARKER	    getCharIndex				(pANTLR3_LEXER lexer);
+static ANTLR3_UINT32	    getCharPositionInLine		(pANTLR3_LEXER lexer);
+static pANTLR3_STRING	    getText						(pANTLR3_LEXER lexer);
+static pANTLR3_COMMON_TOKEN nextToken					(pANTLR3_TOKEN_SOURCE toksource);
+
+static void					displayRecognitionError	    (pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 * tokenNames);
+static void					reportError					(pANTLR3_BASE_RECOGNIZER rec);
+static void *				getCurrentInputSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream);
+static void *				getMissingSymbol			(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
+															ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow);
+
+static void					reset						(pANTLR3_BASE_RECOGNIZER rec);
+
+static void					freeLexer					(pANTLR3_LEXER lexer);
+
+
+ANTLR3_API pANTLR3_LEXER
+antlr3LexerNew(ANTLR3_UINT32 sizeHint, pANTLR3_RECOGNIZER_SHARED_STATE state)
+{
+    pANTLR3_LEXER   lexer;
+    pANTLR3_COMMON_TOKEN	specialT;
+
+	/* Allocate memory
+	*/
+	lexer   = (pANTLR3_LEXER) ANTLR3_MALLOC(sizeof(ANTLR3_LEXER));
+
+	if	(lexer == NULL)
+	{
+		return	NULL;
+	}
+
+	/* Now we need to create the base recognizer
+	*/
+	lexer->rec	    =  antlr3BaseRecognizerNew(ANTLR3_TYPE_LEXER, sizeHint, state);
+
+	if	(lexer->rec == NULL)
+	{
+		lexer->free(lexer);
+		return	NULL;
+	}
+	lexer->rec->super  =  lexer;
+
+	lexer->rec->displayRecognitionError	    = displayRecognitionError;
+	lexer->rec->reportError					= reportError;
+	lexer->rec->reset						= reset;
+	lexer->rec->getCurrentInputSymbol		= getCurrentInputSymbol;
+	lexer->rec->getMissingSymbol			= getMissingSymbol;
+
+	/* Now install the token source interface
+	*/
+	if	(lexer->rec->state->tokSource == NULL) 
+	{
+		lexer->rec->state->tokSource	= (pANTLR3_TOKEN_SOURCE)ANTLR3_CALLOC(1, sizeof(ANTLR3_TOKEN_SOURCE));
+
+		if	(lexer->rec->state->tokSource == NULL) 
+		{
+			lexer->rec->free(lexer->rec);
+			lexer->free(lexer);
+
+			return	NULL;
+		}
+		lexer->rec->state->tokSource->super    =  lexer;
+
+		/* Install the default nextToken() method, which may be overridden
+		 * by generated code, or by anything else in fact.
+		 */
+		lexer->rec->state->tokSource->nextToken	    =  nextToken;
+		lexer->rec->state->tokSource->strFactory    = NULL;
+
+		lexer->rec->state->tokFactory				= NULL;
+	}
+
+    /* Install the lexer API
+     */
+    lexer->setCharStream			=  setCharStream;
+    lexer->mTokens					= (void (*)(void *))(mTokens);
+    lexer->setCharStream			=  setCharStream;
+    lexer->pushCharStream			=  pushCharStream;
+    lexer->popCharStream			=  popCharStream;
+    lexer->emit						=  emit;
+    lexer->emitNew					=  emitNew;
+    lexer->matchs					=  matchs;
+    lexer->matchc					=  matchc;
+    lexer->matchRange				=  matchRange;
+    lexer->matchAny					=  matchAny;
+    lexer->recover					=  recover;
+    lexer->getLine					=  getLine;
+    lexer->getCharIndex				=  getCharIndex;
+    lexer->getCharPositionInLine    =  getCharPositionInLine;
+    lexer->getText					=  getText;
+    lexer->free						=  freeLexer;
+    
+    /* Initialise the eof token
+     */
+    specialT					= &(lexer->rec->state->tokSource->eofToken);
+    antlr3SetTokenAPI	  (specialT);
+    specialT->setType	  (specialT, ANTLR3_TOKEN_EOF);
+    specialT->factoryMade		= ANTLR3_TRUE;					// Prevent things trying to free() it
+    specialT->strFactory        = NULL;
+	specialT->textState			= ANTLR3_TEXT_NONE;
+	specialT->custom			= NULL;
+	specialT->user1				= 0;
+	specialT->user2				= 0;
+	specialT->user3				= 0;
+
+	// Initialize the skip token.
+	//
+    specialT					= &(lexer->rec->state->tokSource->skipToken);
+    antlr3SetTokenAPI	  (specialT);
+    specialT->setType	  (specialT, ANTLR3_TOKEN_INVALID);
+    specialT->factoryMade		= ANTLR3_TRUE;					// Prevent things trying to free() it
+    specialT->strFactory        = NULL;
+	specialT->custom			= NULL;
+	specialT->user1				= 0;
+	specialT->user2				= 0;
+	specialT->user3				= 0;
+    return  lexer;
+}
+
+static void
+reset	(pANTLR3_BASE_RECOGNIZER rec)
+{
+    pANTLR3_LEXER   lexer;
+
+    lexer   = (pANTLR3_LEXER)rec->super;
+
+    lexer->rec->state->token			    = NULL;
+    lexer->rec->state->type			    = ANTLR3_TOKEN_INVALID;
+    lexer->rec->state->channel			    = ANTLR3_TOKEN_DEFAULT_CHANNEL;
+    lexer->rec->state->tokenStartCharIndex	    = -1;
+    lexer->rec->state->tokenStartCharPositionInLine = -1;
+    lexer->rec->state->tokenStartLine		    = -1;
+
+    lexer->rec->state->text	                    = NULL;
+
+    // OK - that's all hunky dory, but we may well have had
+    // a token factory that needs a reset. Do that here
+    //
+    if  (lexer->rec->state->tokFactory != NULL)
+    {
+        lexer->rec->state->tokFactory->reset(lexer->rec->state->tokFactory);
+    }
+}
+
+///
+/// \brief
+/// Returns the next available token from the current input stream.
+/// 
+/// \param toksource
+/// Points to the implementation of a token source. The lexer is 
+/// addressed by the super structure pointer.
+/// 
+/// \returns
+/// The next token in the current input stream or the EOF token
+/// if there are no more tokens.
+/// 
+/// \remarks
+/// Write remarks for nextToken here.
+/// 
+/// \see nextToken
+///
+ANTLR3_INLINE static pANTLR3_COMMON_TOKEN
+nextTokenStr	    (pANTLR3_TOKEN_SOURCE toksource)
+{
+    pANTLR3_LEXER                   lexer;
+    pANTLR3_RECOGNIZER_SHARED_STATE state;
+    pANTLR3_INPUT_STREAM            input;
+    pANTLR3_INT_STREAM              istream;
+
+    lexer   = (pANTLR3_LEXER)(toksource->super);
+    state   = lexer->rec->state;
+    input   = lexer->input;
+    istream = input->istream;
+
+    /// Loop until we get a non skipped token or EOF
+    ///
+    for	(;;)
+    {
+        // Get rid of any previous token (token factory takes care of
+        // any de-allocation when this token is finally used up.
+        //
+        state->token		    = NULL;
+        state->error		    = ANTLR3_FALSE;	    // Start out without an exception
+        state->failed		    = ANTLR3_FALSE;
+
+        // Now call the matching rules and see if we can generate a new token
+        //
+        for	(;;)
+        {
+            // Record the start of the token in our input stream.
+            //
+            state->channel			    = ANTLR3_TOKEN_DEFAULT_CHANNEL;
+            state->tokenStartCharIndex	            = (ANTLR3_MARKER)(((pANTLR3_UINT8)input->nextChar));
+            state->tokenStartCharPositionInLine     = input->charPositionInLine;
+            state->tokenStartLine		    = input->line;
+            state->text			            = NULL;
+            state->custom                           = NULL;
+            state->user1                            = 0;
+            state->user2                            = 0;
+            state->user3                            = 0;
+
+            if  (istream->_LA(istream, 1) == ANTLR3_CHARSTREAM_EOF)
+            {
+                // Reached the end of the current stream, nothing more to do if this is
+                // the last in the stack.
+                //
+                pANTLR3_COMMON_TOKEN    teof = &(toksource->eofToken);
+
+                teof->setStartIndex (teof, lexer->getCharIndex(lexer));
+                teof->setStopIndex  (teof, lexer->getCharIndex(lexer));
+                teof->setLine	    (teof, lexer->getLine(lexer));
+                teof->factoryMade = ANTLR3_TRUE;	// This isn't really manufactured but it stops things from trying to free it
+                return  teof;
+            }
+
+            state->token		= NULL;
+            state->error		= ANTLR3_FALSE;	    // Start out without an exception
+            state->failed		= ANTLR3_FALSE;
+
+            // Call the generated lexer, see if it can get a new token together.
+            //
+            lexer->mTokens(lexer->ctx);
+
+            if  (state->error  == ANTLR3_TRUE)
+            {
+                // Recognition exception, report it and try to recover.
+                //
+                state->failed	    = ANTLR3_TRUE;
+                lexer->rec->reportError(lexer->rec);
+                lexer->recover(lexer); 
+            }
+            else
+            {
+                if (state->token == NULL)
+                {
+                    // Emit the real token, which adds it in to the token stream basically
+                    //
+                    emit(lexer);
+                }
+                else if	(state->token ==  &(toksource->skipToken))
+                {
+                    // A real token could have been generated, but "Computer say's naaaaah" and it
+                    // it is just something we need to skip altogether.
+                    //
+                    continue;
+                }
+
+                // Good token, not skipped, not EOF token
+                //
+                return  state->token;
+            }
+        }
+    }
+}
+
+/**
+ * \brief
+ * Default implementation of the nextToken() call for a lexer.
+ * 
+ * \param toksource
+ * Points to the implementation of a token source. The lexer is 
+ * addressed by the super structure pointer.
+ * 
+ * \returns
+ * The next token in the current input stream or the EOF token
+ * if there are no more tokens in any input stream in the stack.
+ * 
+ * Write detailed description for nextToken here.
+ * 
+ * \remarks
+ * Write remarks for nextToken here.
+ * 
+ * \see nextTokenStr
+ */
+static pANTLR3_COMMON_TOKEN
+nextToken	    (pANTLR3_TOKEN_SOURCE toksource)
+{
+	pANTLR3_COMMON_TOKEN tok;
+
+	// Find the next token in the current stream
+	//
+	tok = nextTokenStr(toksource);
+
+	// If we got to the EOF token then switch to the previous
+	// input stream if there were any and just return the
+	// EOF if there are none. We must check the next token
+	// in any outstanding input stream we pop into the active
+	// role to see if it was sitting at EOF after PUSHing the
+	// stream we just consumed, otherwise we will return EOF
+	// on the reinstalled input stream, when in actual fact
+	// there might be more input streams to POP before the
+	// real EOF of the whole logical input stream. Hence we
+	// use a while loop here until we find something in the stream
+	// that isn't EOF or we reach the actual end of the last input
+	// stream on the stack.
+	//
+	while	((tok != NULL) && (tok->type == ANTLR3_TOKEN_EOF))
+	{
+		pANTLR3_LEXER   lexer;
+
+		lexer   = (pANTLR3_LEXER)(toksource->super);
+
+		if  (lexer->rec->state->streams != NULL && lexer->rec->state->streams->size(lexer->rec->state->streams) > 0)
+		{
+			// We have another input stream in the stack so we
+			// need to revert to it, then resume the loop to check
+			// it wasn't sitting at EOF itself.
+			//
+			lexer->popCharStream(lexer);
+			tok = nextTokenStr(toksource);
+		}
+		else
+		{
+			// There were no more streams on the input stack
+			// so this EOF is the 'real' logical EOF for
+			// the input stream. So we just exit the loop and 
+			// return the EOF we have found.
+			//
+			break;
+		}
+		
+	}
+
+	// return whatever token we have, which may be EOF
+	//
+	return  tok;
+}
+
+ANTLR3_API pANTLR3_LEXER
+antlr3LexerNewStream(ANTLR3_UINT32 sizeHint, pANTLR3_INPUT_STREAM input, pANTLR3_RECOGNIZER_SHARED_STATE state)
+{
+    pANTLR3_LEXER   lexer;
+
+    // Create a basic lexer first
+    //
+    lexer   = antlr3LexerNew(sizeHint, state);
+
+    if	(lexer != NULL) 
+    {
+		// Install the input stream and reset the lexer
+		//
+		setCharStream(lexer, input);
+    }
+
+    return  lexer;
+}
+
+static void mTokens	    (pANTLR3_LEXER lexer)
+{
+    if	(lexer)	    // Fool compiler, avoid pragmas
+    {
+		ANTLR3_FPRINTF(stderr, "lexer->mTokens(): Error: No lexer rules were added to the lexer yet!\n");
+    }
+}
+
+static void			
+reportError		    (pANTLR3_BASE_RECOGNIZER rec)
+{
+    // Indicate this recognizer had an error while processing.
+	//
+	rec->state->errorCount++;
+
+    rec->displayRecognitionError(rec, rec->state->tokenNames);
+}
+
+#ifdef	ANTLR3_WINDOWS
+#pragma warning( disable : 4100 )
+#endif
+
+/** Default lexer error handler (works for 8 bit streams only!!!)
+ */
+static void			
+displayRecognitionError	    (pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_UINT8 * tokenNames)
+{
+    pANTLR3_LEXER			lexer;
+	pANTLR3_EXCEPTION	    ex;
+	pANTLR3_STRING			ftext;
+
+    lexer   = (pANTLR3_LEXER)(recognizer->super);
+	ex		= lexer->rec->state->exception;
+
+	// See if there is a 'filename' we can use
+    //
+    if	(ex->name == NULL)
+    {
+		ANTLR3_FPRINTF(stderr, "-unknown source-(");
+    }
+    else
+    {
+		ftext = ex->streamName->to8(ex->streamName);
+		ANTLR3_FPRINTF(stderr, "%s(", ftext->chars);
+    }
+
+    ANTLR3_FPRINTF(stderr, "%d) ", recognizer->state->exception->line);
+    ANTLR3_FPRINTF(stderr, ": lexer error %d :\n\t%s at offset %d, ", 
+						ex->type,
+						(pANTLR3_UINT8)	   (ex->message),
+					    ex->charPositionInLine+1
+		    );
+	{
+		ANTLR3_INT32	width;
+
+		width	= ANTLR3_UINT32_CAST(( (pANTLR3_UINT8)(lexer->input->data) + (lexer->input->size(lexer->input) )) - (pANTLR3_UINT8)(ex->index));
+
+		if	(width >= 1)
+		{			
+			if	(isprint(ex->c))
+			{
+				ANTLR3_FPRINTF(stderr, "near '%c' :\n", ex->c);
+			}
+			else
+			{
+				ANTLR3_FPRINTF(stderr, "near char(%#02X) :\n", (ANTLR3_UINT8)(ex->c));
+			}
+			ANTLR3_FPRINTF(stderr, "\t%.*s\n", width > 20 ? 20 : width ,((pANTLR3_UINT8)ex->index));
+		}
+		else
+		{
+			ANTLR3_FPRINTF(stderr, "(end of input).\n\t This indicates a poorly specified lexer RULE\n\t or unterminated input element such as: \"STRING[\"]\n");
+			ANTLR3_FPRINTF(stderr, "\t The lexer was matching from line %d, offset %d, which\n\t ", 
+								(ANTLR3_UINT32)(lexer->rec->state->tokenStartLine),
+								(ANTLR3_UINT32)(lexer->rec->state->tokenStartCharPositionInLine)
+								);
+			width = ANTLR3_UINT32_CAST(((pANTLR3_UINT8)(lexer->input->data)+(lexer->input->size(lexer->input))) - (pANTLR3_UINT8)(lexer->rec->state->tokenStartCharIndex));
+
+			if	(width >= 1)
+			{
+				ANTLR3_FPRINTF(stderr, "looks like this:\n\t\t%.*s\n", width > 20 ? 20 : width ,(pANTLR3_UINT8)(lexer->rec->state->tokenStartCharIndex));
+			}
+			else
+			{
+				ANTLR3_FPRINTF(stderr, "is also the end of the line, so you must check your lexer rules\n");
+			}
+		}
+	}
+}
+
+static void setCharStream   (pANTLR3_LEXER lexer,  pANTLR3_INPUT_STREAM input)
+{
+    /* Install the input interface
+     */
+    lexer->input	= input;
+
+    /* We may need a token factory for the lexer; we don't destroy any existing factory
+     * until the lexer is destroyed, as people may still be using the tokens it produced.
+     * TODO: Later I will provide a dup() method for a token so that it can extract itself
+     * out of the factory. 
+     */
+    if	(lexer->rec->state->tokFactory == NULL)
+    {
+	lexer->rec->state->tokFactory	= antlr3TokenFactoryNew(input);
+    }
+    else
+    {
+	/* When the input stream is being changed on the fly, rather than
+	 * at the start of a new lexer, then we must tell the tokenFactory
+	 * which input stream to adorn the tokens with so that when they
+	 * are asked to provide their original input strings they can
+	 * do so from the correct text stream.
+	 */
+	lexer->rec->state->tokFactory->setInputStream(lexer->rec->state->tokFactory, input);
+    }
+
+    /* Propagate the string factory so that we preserve the encoding form from
+     * the input stream.
+     */
+    if	(lexer->rec->state->tokSource->strFactory == NULL)
+    {
+        lexer->rec->state->tokSource->strFactory	= input->strFactory;
+
+        // Set the newly acquired string factory up for our pre-made tokens
+        // for EOF.
+        //
+        if (lexer->rec->state->tokSource->eofToken.strFactory == NULL)
+        {
+            lexer->rec->state->tokSource->eofToken.strFactory = input->strFactory;
+        }
+    }
+
+    /* This is a lexer, install the appropriate exception creator
+     */
+    lexer->rec->exConstruct = antlr3RecognitionExceptionNew;
+
+    /* Set the current token to nothing
+     */
+    lexer->rec->state->token		= NULL;
+    lexer->rec->state->text			= NULL;
+    lexer->rec->state->tokenStartCharIndex	= -1;
+
+    /* Copy the name of the char stream to the token source
+     */
+    lexer->rec->state->tokSource->fileName = input->fileName;
+}
+
+/*!
+ * \brief
+ * Change to a new input stream, remembering the old one.
+ * 
+ * \param lexer
+ * Pointer to the lexer instance to switch input streams for.
+ * 
+ * \param input
+ * New input stream to install as the current one.
+ * 
+ * Switches the current character input stream to 
+ * a new one, saving the old one, which we will revert to at the end of this 
+ * new one.
+ */
+static void
+pushCharStream  (pANTLR3_LEXER lexer,  pANTLR3_INPUT_STREAM input)
+{
+	// Do we need a new input stream stack?
+	//
+	if	(lexer->rec->state->streams == NULL)
+	{
+		// This is the first call to stack a new
+		// stream and so we must create the stack first.
+		//
+		lexer->rec->state->streams = antlr3StackNew(0);
+
+		if  (lexer->rec->state->streams == NULL)
+		{
+			// Could not do this, we just fail to push it.
+			// TODO: Consider if this is what we want to do, but then
+			//       any programmer can override this method to do something else.
+			return;
+		}
+	}
+
+	// We have a stack, so we can save the current input stream
+	// into it.
+	//
+	lexer->input->istream->mark(lexer->input->istream);
+	lexer->rec->state->streams->push(lexer->rec->state->streams, lexer->input, NULL);
+
+	// And now we can install this new one
+	//
+	lexer->setCharStream(lexer, input);
+}
+
+/*!
+ * \brief
+ * Stops using the current input stream and reverts to any prior
+ * input stream on the stack.
+ * 
+ * \param lexer
+ * Description of parameter lexer.
+ * 
+ * Pointer to a function that abandons the current input stream, whether it
+ * is empty or not and reverts to the previous stacked input stream.
+ *
+ * \remark
+ * The function fails silently if there are no prior input streams.
+ */
+static void
+popCharStream   (pANTLR3_LEXER lexer)
+{
+    pANTLR3_INPUT_STREAM input;
+
+    // If we do not have a stream stack or we are already at the
+    // stack bottom, then do nothing.
+    //
+    if	(lexer->rec->state->streams != NULL && lexer->rec->state->streams->size(lexer->rec->state->streams) > 0)
+    {
+	// We just leave the current stream to its fate, we do not close
+	// it or anything as we do not know what the programmer intended
+	// for it. This method can always be overridden of course.
+	// So just find out what was currently saved on the stack and use
+	// that now, then pop it from the stack.
+	//
+	input	= (pANTLR3_INPUT_STREAM)(lexer->rec->state->streams->top);
+	lexer->rec->state->streams->pop(lexer->rec->state->streams);
+
+	// Now install the stream as the current one.
+	//
+	lexer->setCharStream(lexer, input);
+	lexer->input->istream->rewindLast(lexer->input->istream);
+    }
+    return;
+}
+
+static void emitNew	    (pANTLR3_LEXER lexer,  pANTLR3_COMMON_TOKEN token)
+{
+    lexer->rec->state->token    = token;	/* Voila!   */
+}
+
+static pANTLR3_COMMON_TOKEN
+emit	    (pANTLR3_LEXER lexer)
+{
+    pANTLR3_COMMON_TOKEN	token;
+
+    /* We could check pointers to token factories and so on, but
+    * we are in code that we want to run as fast as possible
+    * so we are not checking any errors. So make sure you have installed an input stream before
+    * trying to emit a new token.
+    */
+    token   = lexer->rec->state->tokFactory->newToken(lexer->rec->state->tokFactory);
+	if (token == NULL) { return NULL; }
+
+    /* Install the supplied information, and some other bits we already know
+    * get added automatically, such as the input stream it is associated with
+    * (though it can all be overridden of course)
+    */
+    token->type		    = lexer->rec->state->type;
+    token->channel	    = lexer->rec->state->channel;
+    token->start	    = lexer->rec->state->tokenStartCharIndex;
+    token->stop		    = lexer->getCharIndex(lexer) - 1;
+    token->line		    = lexer->rec->state->tokenStartLine;
+    token->charPosition	= lexer->rec->state->tokenStartCharPositionInLine;
+
+    if	(lexer->rec->state->text != NULL)
+    {
+        token->textState	    = ANTLR3_TEXT_STRING;
+        token->tokText.text	    = lexer->rec->state->text;
+    }
+    else
+    {
+        token->textState	= ANTLR3_TEXT_NONE;
+    }
+    token->lineStart	= lexer->input->currentLine;
+    token->user1	= lexer->rec->state->user1;
+    token->user2	= lexer->rec->state->user2;
+    token->user3	= lexer->rec->state->user3;
+    token->custom	= lexer->rec->state->custom;
+
+    lexer->rec->state->token	    = token;
+
+    return  token;
+}
+
+/**
+ * Free the resources allocated by a lexer
+ */
+static void 
+freeLexer    (pANTLR3_LEXER lexer)
+{
+	// This may have ben a delegate or delegator lexer, in which case the
+	// state may already have been freed (and set to NULL therefore)
+	// so we ignore the state if we don't have it.
+	//
+	if	(lexer->rec->state != NULL)
+	{
+		if	(lexer->rec->state->streams != NULL)
+		{
+			lexer->rec->state->streams->free(lexer->rec->state->streams);
+		}
+		if	(lexer->rec->state->tokFactory != NULL)
+		{
+			lexer->rec->state->tokFactory->close(lexer->rec->state->tokFactory);
+			lexer->rec->state->tokFactory = NULL;
+		}
+		if	(lexer->rec->state->tokSource != NULL)
+		{
+			ANTLR3_FREE(lexer->rec->state->tokSource);
+			lexer->rec->state->tokSource = NULL;
+		}
+	}
+	if	(lexer->rec != NULL)
+	{
+		lexer->rec->free(lexer->rec);
+		lexer->rec = NULL;
+	}
+	ANTLR3_FREE(lexer);
+}
+
+/** Implementation of matchs for the lexer, overrides any
+ *  base implementation in the base recognizer. 
+ *
+ *  \remark
+ *  Note that the generated code lays down arrays of ints for constant
+ *  strings so that they are int UTF32 form!
+ */
+static ANTLR3_BOOLEAN
+matchs(pANTLR3_LEXER lexer, ANTLR3_UCHAR * string)
+{
+	while   (*string != ANTLR3_STRING_TERMINATOR)
+	{
+		if  (lexer->input->istream->_LA(lexer->input->istream, 1) != (*string))
+		{
+			if	(lexer->rec->state->backtracking > 0)
+			{
+				lexer->rec->state->failed = ANTLR3_TRUE;
+				return ANTLR3_FALSE;
+			}
+
+			lexer->rec->exConstruct(lexer->rec);
+			lexer->rec->state->failed	 = ANTLR3_TRUE;
+
+			/* TODO: Implement exception creation more fully perhaps
+			 */
+			lexer->recover(lexer);
+			return  ANTLR3_FALSE;
+		}
+
+		/* Matched correctly, do consume it
+		 */
+		lexer->input->istream->consume(lexer->input->istream);
+		string++;
+
+		/* Reset any failed indicator
+		 */
+		lexer->rec->state->failed = ANTLR3_FALSE;
+	}
+
+
+	return  ANTLR3_TRUE;
+}
+
+/** Implementation of matchc for the lexer, overrides any
+ *  base implementation in the base recognizer. 
+ *
+ *  \remark
+ *  Note that the generated code lays down arrays of ints for constant
+ *  strings so that they are int UTF32 form!
+ */
+static ANTLR3_BOOLEAN
+matchc(pANTLR3_LEXER lexer, ANTLR3_UCHAR c)
+{
+	if	(lexer->input->istream->_LA(lexer->input->istream, 1) == c)
+	{
+		/* Matched correctly, do consume it
+		 */
+		lexer->input->istream->consume(lexer->input->istream);
+
+		/* Reset any failed indicator
+		 */
+		lexer->rec->state->failed = ANTLR3_FALSE;
+
+		return	ANTLR3_TRUE;
+	}
+
+	/* Failed to match, exception and recovery time.
+	 */
+	if	(lexer->rec->state->backtracking > 0)
+	{
+		lexer->rec->state->failed  = ANTLR3_TRUE;
+		return	ANTLR3_FALSE;
+	}
+
+	lexer->rec->exConstruct(lexer->rec);
+
+	/* TODO: Implement exception creation more fully perhaps
+	 */
+	lexer->recover(lexer);
+
+	return  ANTLR3_FALSE;
+}
+
+/** Implementation of match range for the lexer, overrides any
+ *  base implementation in the base recognizer. 
+ *
+ *  \remark
+ *  Note that the generated code lays down arrays of ints for constant
+ *  strings so that they are int UTF32 form!
+ */
+static ANTLR3_BOOLEAN
+matchRange(pANTLR3_LEXER lexer, ANTLR3_UCHAR low, ANTLR3_UCHAR high)
+{
+    ANTLR3_UCHAR    c;
+
+    /* What is in the stream at the moment?
+     */
+    c	= lexer->input->istream->_LA(lexer->input->istream, 1);
+    if	( c >= low && c <= high)
+    {
+	/* Matched correctly, consume it
+	 */
+	lexer->input->istream->consume(lexer->input->istream);
+
+	/* Reset any failed indicator
+	 */
+	lexer->rec->state->failed = ANTLR3_FALSE;
+
+	return	ANTLR3_TRUE;
+    }
+    
+    /* Failed to match, execption and recovery time.
+     */
+
+    if	(lexer->rec->state->backtracking > 0)
+    {
+	lexer->rec->state->failed  = ANTLR3_TRUE;
+	return	ANTLR3_FALSE;
+    }
+
+    lexer->rec->exConstruct(lexer->rec);
+
+    /* TODO: Implement exception creation more fully
+     */
+    lexer->recover(lexer);
+
+    return  ANTLR3_FALSE;
+}
+
+static void
+matchAny	    (pANTLR3_LEXER lexer)
+{
+    lexer->input->istream->consume(lexer->input->istream);
+}
+
+static void
+recover	    (pANTLR3_LEXER lexer)
+{
+    lexer->input->istream->consume(lexer->input->istream);
+}
+
+static ANTLR3_UINT32
+getLine	    (pANTLR3_LEXER lexer)
+{
+    return  lexer->input->getLine(lexer->input);
+}
+
+static ANTLR3_UINT32
+getCharPositionInLine	(pANTLR3_LEXER lexer)
+{
+    return  lexer->input->charPositionInLine;
+}
+
+static ANTLR3_MARKER	getCharIndex	    (pANTLR3_LEXER lexer)
+{
+    return lexer->input->istream->index(lexer->input->istream);
+}
+
+static pANTLR3_STRING
+getText	    (pANTLR3_LEXER lexer)
+{
+	if (lexer->rec->state->text)
+	{
+		return	lexer->rec->state->text;
+
+	}
+	return  lexer->input->substr(
+									lexer->input, 
+									lexer->rec->state->tokenStartCharIndex,
+									lexer->getCharIndex(lexer) - lexer->input->charByteSize
+							);
+
+}
+
+static void *				
+getCurrentInputSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream)
+{
+	return NULL;
+}
+
+static void *				
+getMissingSymbol			(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
+									ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow)
+{
+	return NULL;
+}
diff --git a/antlr-3.4/runtime/C/src/antlr3parser.c b/runtime/C/src/antlr3parser.c
similarity index 100%
rename from antlr-3.4/runtime/C/src/antlr3parser.c
rename to runtime/C/src/antlr3parser.c
diff --git a/runtime/C/src/antlr3rewritestreams.c b/runtime/C/src/antlr3rewritestreams.c
new file mode 100644
index 0000000..8da3011
--- /dev/null
+++ b/runtime/C/src/antlr3rewritestreams.c
@@ -0,0 +1,844 @@
+/// \file
+/// Implementation of token/tree streams that are used by the
+/// tree re-write rules to manipulate the tokens and trees produced
+/// by rules that are subject to rewrite directives.
+///
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3rewritestreams.h>
+
+// Static support function forward declarations for the stream types.
+//
+static	void				reset			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream); 
+static	void				add				(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el, void (ANTLR3_CDECL *freePtr)(void *));
+static	void *				next			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+static	pANTLR3_BASE_TREE	nextTree		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+static	void *				nextToken		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+static	void *				_next			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+static	void *				dupTok			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el);
+static	void *				dupTree			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el);
+static	void *				dupTreeNode		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el);
+static	pANTLR3_BASE_TREE	toTree			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element);
+static	pANTLR3_BASE_TREE	toTreeNode		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element);
+static	ANTLR3_BOOLEAN		hasNext			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+static	pANTLR3_BASE_TREE	nextNode		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+static	pANTLR3_BASE_TREE	nextNodeNode	(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+static	pANTLR3_BASE_TREE	nextNodeToken	(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+static	ANTLR3_UINT32		size			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+static	void *				getDescription	(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+static	void				freeRS			(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+static	void				expungeRS		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream);
+
+
+// Place a now unused rewrite stream back on the rewrite stream pool
+// so we can reuse it if we need to.
+//
+static void
+freeRS	(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+	// Before placing the stream back in the pool, we
+	// need to clear any vector it has. This is so any
+	// free pointers that are associated with the
+	// entires are called.
+	//
+	if	(stream->elements != NULL)
+	{
+		// Factory generated vectors can be returned to the
+		// vector factory for later reuse.
+		//
+		if	(stream->elements->factoryMade == ANTLR3_TRUE)
+		{
+			pANTLR3_VECTOR_FACTORY factory = ((pANTLR3_COMMON_TREE_ADAPTOR)(stream->adaptor->super))->arboretum->vFactory;
+			factory->returnVector(factory, stream->elements);
+
+			stream->elements = NULL;
+		} 
+		else
+		{
+			// Other vectors we clear and allow to be reused if they come off the
+			// rewrite stream free stack and are reused.
+			//
+			stream->elements->clear(stream->elements);
+			stream->freeElements = ANTLR3_TRUE;
+		}
+	}
+	else
+	{
+		stream->freeElements = ANTLR3_FALSE; // Just in case
+	}
+
+	// Add the stream into the recognizer stream stack vector
+	// adding the stream memory free routine so that
+	// it is thrown away when the stack vector is destroyed
+	//
+	stream->rec->state->rStreams->add(stream->rec->state->rStreams, stream, (void(*)(void *))expungeRS);
+}
+
+/** Do special nilNode reuse detection for node streams.
+ */
+static void
+freeNodeRS(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+    pANTLR3_BASE_TREE tree;
+
+    // Before placing the stream back in the pool, we
+	// need to clear any vector it has. This is so any
+	// free pointers that are associated with the
+	// entires are called. However, if this particular function is called
+    // then we know that the entries in the stream are definately
+    // tree nodes. Hence we check to see if any of them were nilNodes as
+    // if they were, we can reuse them.
+	//
+	if	(stream->elements != NULL)
+	{
+        // We have some elements to traverse
+        //
+        ANTLR3_UINT32 i;
+
+        for (i = 1; i<= stream->elements->count; i++)
+        {
+            tree = (pANTLR3_BASE_TREE)(stream->elements->elements[i-1].element);
+            if  (tree != NULL && tree->isNilNode(tree))
+            {
+                // Had to remove this for now, check is not comprehensive enough
+                // tree->reuse(tree);
+            }
+
+        }
+		// Factory generated vectors can be returned to the
+		// vector factory for later reuse.
+		//
+		if	(stream->elements->factoryMade == ANTLR3_TRUE)
+		{
+			pANTLR3_VECTOR_FACTORY factory = ((pANTLR3_COMMON_TREE_ADAPTOR)(stream->adaptor->super))->arboretum->vFactory;
+			factory->returnVector(factory, stream->elements);
+
+			stream->elements = NULL;
+		} 
+		else
+		{
+			stream->elements->clear(stream->elements);
+			stream->freeElements = ANTLR3_TRUE;
+		}
+	}
+	else
+	{
+        if  (stream->singleElement != NULL)
+        {
+            tree = (pANTLR3_BASE_TREE)(stream->singleElement);
+            if  (tree->isNilNode(tree))
+            {
+                // Had to remove this for now, check is not comprehensive enough
+              //   tree->reuse(tree);
+            }
+        }
+        stream->singleElement = NULL;
+		stream->freeElements = ANTLR3_FALSE; // Just in case
+	}
+
+	// Add the stream into the recognizer stream stack vector
+	// adding the stream memory free routine so that
+	// it is thrown away when the stack vector is destroyed
+	//
+	stream->rec->state->rStreams->add(stream->rec->state->rStreams, stream, (void(*)(void *))expungeRS);
+}
+static void
+expungeRS(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+
+	if (stream->freeElements == ANTLR3_TRUE && stream->elements != NULL)
+	{
+		stream->elements->free(stream->elements);
+	}
+	ANTLR3_FREE(stream);
+}
+
+// Functions for creating streams
+//
+static  pANTLR3_REWRITE_RULE_ELEMENT_STREAM 
+antlr3RewriteRuleElementStreamNewAE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description)
+{
+	pANTLR3_REWRITE_RULE_ELEMENT_STREAM	stream;
+
+	// First - do we already have a rewrite stream that was returned
+	// to the pool? If we do, then we will just reuse it by resetting
+	// the generic interface.
+	//
+	if	(rec->state->rStreams->count > 0)
+	{
+		// Remove the entry from the vector. We do not
+		// cause it to be freed by using remove.
+		//
+		stream = (pANTLR3_REWRITE_RULE_ELEMENT_STREAM)rec->state->rStreams->remove(rec->state->rStreams, rec->state->rStreams->count - 1);
+
+		// We found a stream we can reuse.
+		// If the stream had a vector, then it will have been cleared
+		// when the freeRS was called that put it in this stack
+		//
+	}
+	else
+	{
+		// Ok, we need to allocate a new one as there were none on the stack.
+		// First job is to create the memory we need.
+		//
+		stream	= (pANTLR3_REWRITE_RULE_ELEMENT_STREAM) ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_REWRITE_RULE_ELEMENT_STREAM)));
+
+		if	(stream == NULL)
+		{
+			return	NULL;
+		}
+		stream->elements		= NULL;
+		stream->freeElements	= ANTLR3_FALSE;
+	}
+
+	// Populate the generic interface
+	//
+	stream->rec				= rec;
+	stream->reset			= reset;
+	stream->add				= add;
+	stream->next			= next;
+	stream->nextTree		= nextTree;
+	stream->nextNode		= nextNode;
+	stream->nextToken		= nextToken;
+	stream->_next			= _next;
+	stream->hasNext			= hasNext;
+	stream->size			= size;
+	stream->getDescription  = getDescription;
+	stream->toTree			= toTree;
+	stream->free			= freeRS;
+	stream->singleElement	= NULL;
+
+	// Reset the stream to empty.
+	//
+
+	stream->cursor			= 0;
+	stream->dirty			= ANTLR3_FALSE;
+
+	// Install the description
+	//
+	stream->elementDescription	= description;
+
+	// Install the adaptor
+	//
+	stream->adaptor		= adaptor;
+
+	return stream;
+}
+
+static pANTLR3_REWRITE_RULE_ELEMENT_STREAM 
+antlr3RewriteRuleElementStreamNewAEE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement)
+{
+	pANTLR3_REWRITE_RULE_ELEMENT_STREAM	stream;
+
+	// First job is to create the memory we need.
+	//
+	stream	= antlr3RewriteRuleElementStreamNewAE(adaptor, rec, description);
+
+	if (stream == NULL)
+	{
+		return NULL;
+	}
+
+	// Stream seems good so we need to add the supplied element
+	//
+	if	(oneElement != NULL)
+	{
+		stream->add(stream, oneElement, NULL);
+	}
+	return stream;
+}
+
+static pANTLR3_REWRITE_RULE_ELEMENT_STREAM 
+antlr3RewriteRuleElementStreamNewAEV(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector)
+{
+	pANTLR3_REWRITE_RULE_ELEMENT_STREAM	stream;
+
+	// First job is to create the memory we need.
+	//
+	stream	= antlr3RewriteRuleElementStreamNewAE(adaptor, rec, description);
+
+	if (stream == NULL)
+	{
+		return stream;
+	}
+
+	// Stream seems good so we need to install the vector we were
+	// given. We assume that someone else is going to free the
+	// vector.
+	//
+	if	(stream->elements != NULL && stream->elements->factoryMade == ANTLR3_FALSE && stream->freeElements == ANTLR3_TRUE )
+	{
+		stream->elements->free(stream->elements);
+	}
+	stream->elements		= vector;
+	stream->freeElements	= ANTLR3_FALSE;
+	return stream;
+}
+
+// Token rewrite stream ...
+//
+ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
+antlr3RewriteRuleTOKENStreamNewAE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description)
+{
+	pANTLR3_REWRITE_RULE_TOKEN_STREAM	stream;
+
+	// First job is to create the memory we need.
+	//
+	stream	= antlr3RewriteRuleElementStreamNewAE(adaptor, rec, description);
+
+	if (stream == NULL)
+	{
+		return stream;
+	}
+
+	// Install the token based overrides
+	//
+	stream->dup			= dupTok;
+	stream->nextNode	= nextNodeToken;
+
+	// No nextNode implementation for a token rewrite stream
+	//
+	return stream;
+}
+
+ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
+antlr3RewriteRuleTOKENStreamNewAEE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement)
+{
+	pANTLR3_REWRITE_RULE_TOKEN_STREAM	stream;
+
+	// First job is to create the memory we need.
+	//
+	stream	= antlr3RewriteRuleElementStreamNewAEE(adaptor, rec, description, oneElement);
+
+	// Install the token based overrides
+	//
+	stream->dup			= dupTok;
+	stream->nextNode	= nextNodeToken;
+
+	// No nextNode implementation for a token rewrite stream
+	//
+	return stream;
+}
+
+ANTLR3_API pANTLR3_REWRITE_RULE_TOKEN_STREAM 
+antlr3RewriteRuleTOKENStreamNewAEV(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector)
+{
+	pANTLR3_REWRITE_RULE_TOKEN_STREAM	stream;
+
+	// First job is to create the memory we need.
+	//
+	stream	= antlr3RewriteRuleElementStreamNewAEV(adaptor, rec, description, vector);
+
+	// Install the token based overrides
+	//
+	stream->dup			= dupTok;
+	stream->nextNode	= nextNodeToken;
+
+	// No nextNode implementation for a token rewrite stream
+	//
+	return stream;
+}
+
+// Subtree rewrite stream
+//
+ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
+antlr3RewriteRuleSubtreeStreamNewAE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description)
+{
+	pANTLR3_REWRITE_RULE_SUBTREE_STREAM	stream;
+
+	// First job is to create the memory we need.
+	//
+	stream	= antlr3RewriteRuleElementStreamNewAE(adaptor, rec, description);
+
+	if (stream == NULL)
+	{
+		return stream;
+	}
+
+	// Install the subtree based overrides
+	//
+	stream->dup			= dupTree;
+	stream->nextNode	= nextNode;
+    stream->free        = freeNodeRS;
+	return stream;
+
+}
+ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
+antlr3RewriteRuleSubtreeStreamNewAEE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement)
+{
+	pANTLR3_REWRITE_RULE_SUBTREE_STREAM	stream;
+
+	// First job is to create the memory we need.
+	//
+	stream	= antlr3RewriteRuleElementStreamNewAEE(adaptor, rec, description, oneElement);
+
+	if (stream == NULL)
+	{
+		return stream;
+	}
+
+	// Install the subtree based overrides
+	//
+	stream->dup			= dupTree;
+	stream->nextNode	= nextNode;
+    stream->free        = freeNodeRS;
+
+	return stream;
+}
+
+ANTLR3_API pANTLR3_REWRITE_RULE_SUBTREE_STREAM 
+antlr3RewriteRuleSubtreeStreamNewAEV(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector)
+{
+	pANTLR3_REWRITE_RULE_SUBTREE_STREAM	stream;
+
+	// First job is to create the memory we need.
+	//
+	stream	= antlr3RewriteRuleElementStreamNewAEV(adaptor, rec, description, vector);
+
+	if (stream == NULL)
+	{
+		return NULL;
+	}
+
+	// Install the subtree based overrides
+	//
+	stream->dup			= dupTree;
+	stream->nextNode	= nextNode;
+    stream->free        = freeNodeRS;
+
+	return stream;
+}
+// Node rewrite stream ...
+//
+ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
+antlr3RewriteRuleNODEStreamNewAE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description)
+{
+	pANTLR3_REWRITE_RULE_NODE_STREAM	stream;
+
+	// First job is to create the memory we need.
+	//
+	stream	= antlr3RewriteRuleElementStreamNewAE(adaptor, rec, description);
+
+	if (stream == NULL)
+	{
+		return stream;
+	}
+
+	// Install the node based overrides
+	//
+	stream->dup			= dupTreeNode;
+	stream->toTree		= toTreeNode;
+	stream->nextNode	= nextNodeNode;
+    stream->free        = freeNodeRS;
+
+	return stream;
+}
+
+ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
+antlr3RewriteRuleNODEStreamNewAEE(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, void * oneElement)
+{
+	pANTLR3_REWRITE_RULE_NODE_STREAM	stream;
+
+	// First job is to create the memory we need.
+	//
+	stream	= antlr3RewriteRuleElementStreamNewAEE(adaptor, rec, description, oneElement);
+
+	// Install the node based overrides
+	//
+	stream->dup			= dupTreeNode;
+	stream->toTree		= toTreeNode;
+	stream->nextNode	= nextNodeNode;
+    stream->free        = freeNodeRS;
+
+	return stream;
+}
+
+ANTLR3_API pANTLR3_REWRITE_RULE_NODE_STREAM 
+antlr3RewriteRuleNODEStreamNewAEV(pANTLR3_BASE_TREE_ADAPTOR adaptor, pANTLR3_BASE_RECOGNIZER rec, pANTLR3_UINT8 description, pANTLR3_VECTOR vector)
+{
+	pANTLR3_REWRITE_RULE_NODE_STREAM	stream;
+
+	// First job is to create the memory we need.
+	//
+	stream	= antlr3RewriteRuleElementStreamNewAEV(adaptor, rec, description, vector);
+
+	// Install the Node based overrides
+	//
+	stream->dup			= dupTreeNode;
+	stream->toTree		= toTreeNode;
+	stream->nextNode	= nextNodeNode;
+    stream->free        = freeNodeRS;
+    
+	return stream;
+}
+
+//----------------------------------------------------------------------
+// Static support functions 
+
+/// Reset the condition of this stream so that it appears we have
+/// not consumed any of its elements.  Elements themselves are untouched.
+///
+static void		
+reset    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+	stream->dirty	= ANTLR3_TRUE;
+	stream->cursor	= 0;
+}
+
+// Add a new pANTLR3_BASE_TREE to this stream
+//
+static void		
+add	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el, void (ANTLR3_CDECL *freePtr)(void *))
+{
+	if (el== NULL)
+	{
+		return;
+	}
+	// As we may be reusing a stream, we may already have allocated
+	// a rewrite stream vector. If we have then is will be empty if
+	// we have either zero or just one element in the rewrite stream
+	//
+	if (stream->elements != NULL && stream->elements->count > 0)
+	{
+		// We already have >1 entries in the stream. So we can just add this new element to the existing
+		// collection. 
+		//
+		stream->elements->add(stream->elements, el, freePtr);
+		return;
+	}
+	if (stream->singleElement == NULL)
+	{
+		stream->singleElement = el;
+		return;
+	}
+
+	// If we got here then we had only the one element so far
+	// and we must now create a vector to hold a collection of them
+	//
+	if	(stream->elements == NULL)
+	{
+        pANTLR3_VECTOR_FACTORY factory = ((pANTLR3_COMMON_TREE_ADAPTOR)(stream->adaptor->super))->arboretum->vFactory;
+
+        
+		stream->elements		= factory->newVector(factory);
+		stream->freeElements	= ANTLR3_TRUE;			// We 'ummed it, so we play it son.
+	}
+    
+	stream->elements->add	(stream->elements, stream->singleElement, freePtr);
+	stream->elements->add	(stream->elements, el, freePtr);
+	stream->singleElement	= NULL;
+
+	return;
+}
+
+/// Return the next element in the stream.  If out of elements, throw
+/// an exception unless size()==1.  If size is 1, then return elements[0].
+/// Return a duplicate node/subtree if stream is out of elements and
+/// size==1.  If we've already used the element, dup (dirty bit set).
+///
+static pANTLR3_BASE_TREE
+nextTree(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream) 
+{
+	ANTLR3_UINT32		n;
+	void			*  el;
+
+	n = stream->size(stream);
+
+	if ( stream->dirty || (stream->cursor >=n && n==1) ) 
+	{
+		// if out of elements and size is 1, dup
+		//
+		el = stream->_next(stream);
+		return (pANTLR3_BASE_TREE)stream->dup(stream, el);
+	}
+
+	// test size above then fetch
+	//
+	el = stream->_next(stream);
+	return (pANTLR3_BASE_TREE)el;
+}
+
+/// Return the next element for a caller that wants just the token
+///
+static	void *
+nextToken		(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+	return stream->_next(stream);
+}
+
+/// Return the next element in the stream.  If out of elements, throw
+/// an exception unless size()==1.  If size is 1, then return elements[0].
+///
+static void *	
+next	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+	ANTLR3_UINT32   s;
+
+	s = stream->size(stream);
+	if (stream->cursor >= s && s == 1)
+	{
+		pANTLR3_BASE_TREE el;
+
+		el = (pANTLR3_BASE_TREE)stream->_next(stream);
+
+		return	stream->dup(stream, el);
+	}
+
+	return stream->_next(stream);
+}
+
+/// Do the work of getting the next element, making sure that it's
+/// a tree node or subtree.  Deal with the optimization of single-
+/// element list versus list of size > 1.  Throw an exception (or something similar)
+/// if the stream is empty or we're out of elements and size>1.
+/// You can override in a 'subclass' if necessary.
+///
+static void *
+_next    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+	ANTLR3_UINT32		n;
+	pANTLR3_BASE_TREE	t;
+
+	n = stream->size(stream);
+
+	if (n == 0)
+	{
+		// This means that the stream is empty
+		//
+		return NULL;	// Caller must cope with this
+	}
+
+	// Traversed all the available elements already?
+	//
+	if (stream->cursor >= n)
+	{
+		if (n == 1)
+		{
+			// Special case when size is single element, it will just dup a lot
+			//
+			return stream->toTree(stream, stream->singleElement);
+		}
+
+		// Out of elements and the size is not 1, so we cannot assume
+		// that we just duplicate the entry n times (such as ID ent+ -> ^(ID ent)+)
+		// This means we ran out of elements earlier than was expected.
+		//
+		return NULL;	// Caller must cope with this
+	}
+
+	// Elements available either for duping or just available
+	//
+	if (stream->singleElement != NULL)
+	{
+		stream->cursor++;   // Cursor advances even for single element as this tells us to dup()
+		return stream->toTree(stream, stream->singleElement);
+	}
+
+	// More than just a single element so we extract it from the 
+	// vector.
+	//
+	t = stream->toTree(stream, stream->elements->get(stream->elements, stream->cursor));
+	stream->cursor++;
+	return t;
+}
+
+#ifdef ANTLR3_WINDOWS
+#pragma warning(push)
+#pragma warning(disable : 4100)
+#endif
+/// When constructing trees, sometimes we need to dup a token or AST
+/// subtree.  Dup'ing a token means just creating another AST node
+/// around it.  For trees, you must call the adaptor.dupTree().
+///
+static void *	
+dupTok	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * el)
+{
+	ANTLR3_FPRINTF(stderr, "dup() cannot be called on a token rewrite stream!!");
+	return NULL;
+}
+#ifdef ANTLR3_WINDOWS
+#pragma warning(pop)
+#endif
+
+/// When constructing trees, sometimes we need to dup a token or AST
+/// subtree.  Dup'ing a token means just creating another AST node
+/// around it.  For trees, you must call the adaptor.dupTree().
+///
+static void *	
+dupTree	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element)
+{
+	return stream->adaptor->dupNode(stream->adaptor, (pANTLR3_BASE_TREE)element);
+}
+
+#ifdef ANTLR3_WINDOWS
+#pragma warning(push)
+#pragma warning(disable : 4100)
+#endif
+/// When constructing trees, sometimes we need to dup a token or AST
+/// subtree.  Dup'ing a token means just creating another AST node
+/// around it.  For trees, you must call the adaptor.dupTree().
+///
+static void *	
+dupTreeNode	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element)
+{
+	ANTLR3_FPRINTF(stderr, "dup() cannot be called on a node rewrite stream!!!");
+	return NULL;
+}
+
+
+/// We don;t explicitly convert to a tree unless the call goes to 
+/// nextTree, which means rewrites are heterogeneous 
+///
+static pANTLR3_BASE_TREE	
+toTree   (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element)
+{
+	return (pANTLR3_BASE_TREE)element;
+}
+#ifdef ANTLR3_WINDOWS
+#pragma warning(pop)
+#endif
+
+/// Ensure stream emits trees; tokens must be converted to AST nodes.
+/// AST nodes can be passed through unmolested.
+///
+#ifdef ANTLR3_WINDOWS
+#pragma warning(push)
+#pragma warning(disable : 4100)
+#endif
+
+static pANTLR3_BASE_TREE	
+toTreeNode   (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream, void * element)
+{
+	return (pANTLR3_BASE_TREE)stream->adaptor->dupNode(stream->adaptor, (pANTLR3_BASE_TREE)element);
+}
+
+#ifdef ANTLR3_WINDOWS
+#pragma warning(pop)
+#endif
+
+/// Returns ANTLR3_TRUE if there is a next element available
+///
+static ANTLR3_BOOLEAN	
+hasNext  (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+	if (	(stream->singleElement != NULL && stream->cursor < 1)
+		||	(stream->elements != NULL && stream->cursor < stream->elements->size(stream->elements)))
+	{
+		return ANTLR3_TRUE;
+	}
+	else
+	{
+		return ANTLR3_FALSE;
+	}
+}
+
+/// Get the next token from the list and create a node for it
+/// This is the implementation for token streams.
+///
+static pANTLR3_BASE_TREE
+nextNodeToken(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+	return (pANTLR3_BASE_TREE)stream->adaptor->create(stream->adaptor, (pANTLR3_COMMON_TOKEN)stream->_next(stream));
+}
+
+static pANTLR3_BASE_TREE
+nextNodeNode(pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+	return (pANTLR3_BASE_TREE)stream->_next(stream);
+}
+
+/// Treat next element as a single node even if it's a subtree.
+/// This is used instead of next() when the result has to be a
+/// tree root node.  Also prevents us from duplicating recently-added
+/// children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
+/// must dup the type node, but ID has been added.
+///
+/// Referencing to a rule result twice is ok; dup entire tree as
+/// we can't be adding trees; e.g., expr expr. 
+///
+static pANTLR3_BASE_TREE	
+nextNode (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+
+	ANTLR3_UINT32	n;
+	pANTLR3_BASE_TREE	el = (pANTLR3_BASE_TREE)stream->_next(stream);
+
+	n = stream->size(stream);
+	if (stream->dirty == ANTLR3_TRUE || (stream->cursor > n && n == 1))
+	{
+		// We are out of elements and the size is 1, which means we just 
+		// dup the node that we have
+		//
+		return	(pANTLR3_BASE_TREE)stream->adaptor->dupNode(stream->adaptor, el);
+	}
+
+	// We were not out of nodes, so the one we received is the one to return
+	//
+	return  el;
+}
+
+/// Number of elements available in the stream
+///
+static ANTLR3_UINT32	
+size	    (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+	ANTLR3_UINT32   n = 0;
+
+	/// Should be a count of one if singleElement is set. I copied this
+	/// logic from the java implementation, which I suspect is just guarding
+	/// against someone setting singleElement and forgetting to NULL it out
+	///
+	if (stream->singleElement != NULL)
+	{
+		n = 1;
+	}
+	else
+	{
+		if (stream->elements != NULL)
+		{
+			return (ANTLR3_UINT32)(stream->elements->count);
+		}
+	}
+	return n;
+}
+
+/// Returns the description string if there is one available (check for NULL).
+///
+static void *	
+getDescription  (pANTLR3_REWRITE_RULE_ELEMENT_STREAM stream)
+{
+	if (stream->elementDescription == NULL)
+	{
+		stream->elementDescription = (void*)"<unknown source>";
+	}
+
+	return  stream->elementDescription;
+}
diff --git a/runtime/C/src/antlr3string.c b/runtime/C/src/antlr3string.c
new file mode 100644
index 0000000..36d0de0
--- /dev/null
+++ b/runtime/C/src/antlr3string.c
@@ -0,0 +1,1465 @@
+/** \file
+ * Implementation of the ANTLR3 string and string factory classes
+ */
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3string.h>
+
+/* Factory API
+ */
+static    pANTLR3_STRING    newRaw8	(pANTLR3_STRING_FACTORY factory);
+static    pANTLR3_STRING    newRawUTF16	(pANTLR3_STRING_FACTORY factory);
+static    pANTLR3_STRING    newSize8	(pANTLR3_STRING_FACTORY factory, ANTLR3_UINT32 size);
+static    pANTLR3_STRING    newSizeUTF16	(pANTLR3_STRING_FACTORY factory, ANTLR3_UINT32 size);
+static    pANTLR3_STRING    newPtr8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string, ANTLR3_UINT32 size);
+static    pANTLR3_STRING    newPtrUTF16_8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string, ANTLR3_UINT32 size);
+static    pANTLR3_STRING    newPtrUTF16_UTF16	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string, ANTLR3_UINT32 size);
+static    pANTLR3_STRING    newStr8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string);
+static    pANTLR3_STRING    newStrUTF16_8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string);
+static    pANTLR3_STRING    newStrUTF16_UTF16	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 string);
+static    void		    destroy	(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING string);
+static    pANTLR3_STRING    printable8	(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING string);
+static    pANTLR3_STRING    printableUTF16	(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING string);
+static    void		    closeFactory(pANTLR3_STRING_FACTORY factory);
+
+/* String API
+ */
+static    pANTLR3_UINT8	    set8	(pANTLR3_STRING string, const char * chars);
+static    pANTLR3_UINT8	    setUTF16_8	(pANTLR3_STRING string, const char * chars);
+static    pANTLR3_UINT8	    setUTF16_UTF16	(pANTLR3_STRING string, const char * chars);
+static    pANTLR3_UINT8	    append8	(pANTLR3_STRING string, const char * newbit);
+static    pANTLR3_UINT8	    appendUTF16_8	(pANTLR3_STRING string, const char * newbit);
+static    pANTLR3_UINT8	    appendUTF16_UTF16	(pANTLR3_STRING string, const char * newbit);
+static	  pANTLR3_UINT8	    insert8	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit);
+static	  pANTLR3_UINT8	    insertUTF16_8	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit);
+static	  pANTLR3_UINT8	    insertUTF16_UTF16	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit);
+
+static    pANTLR3_UINT8	    setS	(pANTLR3_STRING string, pANTLR3_STRING chars);
+static    pANTLR3_UINT8	    appendS	(pANTLR3_STRING string, pANTLR3_STRING newbit);
+static	  pANTLR3_UINT8	    insertS	(pANTLR3_STRING string, ANTLR3_UINT32 point, pANTLR3_STRING newbit);
+
+static    pANTLR3_UINT8	    addc8	(pANTLR3_STRING string, ANTLR3_UINT32 c);
+static    pANTLR3_UINT8	    addcUTF16	(pANTLR3_STRING string, ANTLR3_UINT32 c);
+static    pANTLR3_UINT8	    addi8	(pANTLR3_STRING string, ANTLR3_INT32 i);
+static    pANTLR3_UINT8	    addiUTF16	(pANTLR3_STRING string, ANTLR3_INT32 i);
+static	  pANTLR3_UINT8	    inserti8	(pANTLR3_STRING string, ANTLR3_UINT32 point, ANTLR3_INT32 i);
+static	  pANTLR3_UINT8	    insertiUTF16	(pANTLR3_STRING string, ANTLR3_UINT32 point, ANTLR3_INT32 i);
+
+static    ANTLR3_UINT32     compare8	(pANTLR3_STRING string, const char * compStr);
+static    ANTLR3_UINT32     compareUTF16_8	(pANTLR3_STRING string, const char * compStr);
+static    ANTLR3_UINT32     compareUTF16_UTF16(pANTLR3_STRING string, const char * compStr);
+static    ANTLR3_UINT32     compareS	(pANTLR3_STRING string, pANTLR3_STRING compStr);
+static    ANTLR3_UCHAR      charAt8	(pANTLR3_STRING string, ANTLR3_UINT32 offset);
+static    ANTLR3_UCHAR      charAtUTF16	(pANTLR3_STRING string, ANTLR3_UINT32 offset);
+static    pANTLR3_STRING    subString8	(pANTLR3_STRING string, ANTLR3_UINT32 startIndex, ANTLR3_UINT32 endIndex);
+static    pANTLR3_STRING    subStringUTF16	(pANTLR3_STRING string, ANTLR3_UINT32 startIndex, ANTLR3_UINT32 endIndex);
+static	  ANTLR3_INT32	    toInt32_8	(pANTLR3_STRING string);
+static	  ANTLR3_INT32	    toInt32_UTF16  (pANTLR3_STRING string);
+static	  pANTLR3_STRING    to8_8		(pANTLR3_STRING string);
+static	  pANTLR3_STRING    to8_UTF16		(pANTLR3_STRING string);
+static	pANTLR3_STRING		toUTF8_8	(pANTLR3_STRING string);
+static	pANTLR3_STRING		toUTF8_UTF16	(pANTLR3_STRING string);
+
+/* Local helpers
+ */
+static	void			stringInit8	(pANTLR3_STRING string);
+static	void			stringInitUTF16	(pANTLR3_STRING string);
+static	void	ANTLR3_CDECL	stringFree	(pANTLR3_STRING string);
+
+ANTLR3_API pANTLR3_STRING_FACTORY 
+antlr3StringFactoryNew(ANTLR3_UINT32 encoding)
+{
+	pANTLR3_STRING_FACTORY  factory;
+
+	/* Allocate memory
+	*/
+	factory	= (pANTLR3_STRING_FACTORY) ANTLR3_CALLOC(1, sizeof(ANTLR3_STRING_FACTORY));
+
+	if	(factory == NULL)
+	{
+		return	NULL;
+	}
+
+	/* Now we make a new list to track the strings.
+	*/
+	factory->strings	= antlr3VectorNew(0);
+	factory->index	= 0;
+
+	if	(factory->strings == NULL)
+	{
+		ANTLR3_FREE(factory);
+		return	NULL;
+	}
+
+    // Install the API
+    //
+    // TODO: These encodings need equivalent functions to
+    // UTF16 and 8Bit if I am going to support those encodings in the STRING stuff.
+	// The STRING stuff was intended as a quick and dirty hack for people that did not
+	// want to worry about memory and performance very much, but nobody ever reads the 
+	// notes or comments or uses the email list search. I want to discourage using these
+	// interfaces as it is much more efficient to use the pointers within the tokens
+	// directly, so I am not implementing the string stuff for the newer encodings.
+    // We install the standard 8 and 16 bit functions for the UTF 8 and 16 but they
+	// will not be useful beyond returning the text.
+	// 
+    switch(encoding)
+    {
+		case    ANTLR3_ENC_UTF32:
+			break;
+
+		case    ANTLR3_ENC_UTF32BE:
+			break;
+
+		case    ANTLR3_ENC_UTF32LE:
+			break;
+
+		case    ANTLR3_ENC_UTF16BE:
+		case    ANTLR3_ENC_UTF16LE:
+		case    ANTLR3_ENC_UTF16:
+
+			factory->newRaw	    =  newRawUTF16;
+			factory->newSize	=  newSizeUTF16;
+			factory->newPtr	    =  newPtrUTF16_UTF16;
+			factory->newPtr8	=  newPtrUTF16_8;
+			factory->newStr	    =  newStrUTF16_UTF16;
+			factory->newStr8	=  newStrUTF16_8;
+			factory->printable	=  printableUTF16;
+			factory->destroy	=  destroy;
+			factory->close	    =  closeFactory;
+			break;
+	 
+		case    ANTLR3_ENC_UTF8:
+		case    ANTLR3_ENC_EBCDIC:
+		case    ANTLR3_ENC_8BIT:
+		default:
+
+			factory->newRaw	    =  newRaw8;
+			factory->newSize	=  newSize8;
+			factory->newPtr	    =  newPtr8;
+			factory->newPtr8	=  newPtr8;
+			factory->newStr	    =  newStr8;
+			factory->newStr8	=  newStr8;
+			factory->printable	=  printable8;
+			factory->destroy	=  destroy;
+			factory->close	    =  closeFactory;
+			break;
+    }
+	return  factory;
+}
+
+
+/**
+ *
+ * \param factory 
+ * \return 
+ */
+static    pANTLR3_STRING    
+newRaw8	(pANTLR3_STRING_FACTORY factory)
+{
+    pANTLR3_STRING  string;
+
+    string  = (pANTLR3_STRING) ANTLR3_MALLOC(sizeof(ANTLR3_STRING));
+
+    if	(string == NULL)
+    {
+		return	NULL;
+    }
+
+    /* Structure is allocated, now fill in the API etc.
+     */
+    stringInit8(string);
+    string->factory = factory;
+
+    /* Add the string into the allocated list
+     */
+    factory->strings->set(factory->strings, factory->index, (void *) string, (void (ANTLR3_CDECL *)(void *))(stringFree), ANTLR3_TRUE);
+    string->index   = factory->index++;
+
+    return string;
+}
+/**
+ *
+ * \param factory 
+ * \return 
+ */
+static    pANTLR3_STRING    
+newRawUTF16	(pANTLR3_STRING_FACTORY factory)
+{
+    pANTLR3_STRING  string;
+
+    string  = (pANTLR3_STRING) ANTLR3_MALLOC(sizeof(ANTLR3_STRING));
+
+    if	(string == NULL)
+    {
+		return	NULL;
+    }
+
+    /* Structure is allocated, now fill in the API etc.
+     */
+    stringInitUTF16(string);
+    string->factory = factory;
+
+    /* Add the string into the allocated list
+     */
+    factory->strings->set(factory->strings, factory->index, (void *) string, (void (ANTLR3_CDECL *)(void *))(stringFree), ANTLR3_TRUE);
+    string->index   = factory->index++;
+
+    return string;
+}
+static	 
+void	ANTLR3_CDECL stringFree  (pANTLR3_STRING string)
+{
+    /* First free the string itself if there was anything in it
+     */
+    if	(string->chars)
+    {
+	ANTLR3_FREE(string->chars);
+    }
+
+    /* Now free the space for this string
+     */
+    ANTLR3_FREE(string);
+
+    return;
+}
+/**
+ *
+ * \param string 
+ * \return 
+ */
+static	void
+stringInit8  (pANTLR3_STRING string)
+{
+    string->len			= 0;
+    string->size		= 0;
+    string->chars		= NULL;
+    string->encoding	= ANTLR3_ENC_8BIT ;
+
+    /* API for 8 bit strings*/
+
+    string->set		= set8;
+    string->set8	= set8;
+    string->append	= append8;
+    string->append8	= append8;
+    string->insert	= insert8;
+    string->insert8	= insert8;
+    string->addi	= addi8;
+    string->inserti	= inserti8;
+    string->addc	= addc8;
+    string->charAt	= charAt8;
+    string->compare	= compare8;
+    string->compare8	= compare8;
+    string->subString	= subString8;
+    string->toInt32	= toInt32_8;
+    string->to8		= to8_8;
+    string->toUTF8	= toUTF8_8;
+    string->compareS	= compareS;
+    string->setS	= setS;
+    string->appendS	= appendS;
+    string->insertS	= insertS;
+
+}
+/**
+ *
+ * \param string 
+ * \return 
+ */
+static	void
+stringInitUTF16  (pANTLR3_STRING string)
+{
+    string->len		= 0;
+    string->size	= 0;
+    string->chars	= NULL;
+    string->encoding	= ANTLR3_ENC_8BIT;
+
+    /* API for UTF16 strings */
+
+    string->set		= setUTF16_UTF16;
+    string->set8	= setUTF16_8;
+    string->append	= appendUTF16_UTF16;
+    string->append8	= appendUTF16_8;
+    string->insert	= insertUTF16_UTF16;
+    string->insert8	= insertUTF16_8;
+    string->addi	= addiUTF16;
+    string->inserti	= insertiUTF16;
+    string->addc	= addcUTF16;
+    string->charAt	= charAtUTF16;
+    string->compare	= compareUTF16_UTF16;
+    string->compare8	= compareUTF16_8;
+    string->subString	= subStringUTF16;
+    string->toInt32	= toInt32_UTF16;
+    string->to8		= to8_UTF16;
+    string->toUTF8	= toUTF8_UTF16;
+
+    string->compareS	= compareS;
+    string->setS	= setS;
+    string->appendS	= appendS;
+    string->insertS	= insertS;
+}
+/**
+ *
+ * \param string 
+ * \return 
+ * TODO: Implement UTF-8
+ */
+static	void
+stringInitUTF8  (pANTLR3_STRING string)
+{
+    string->len	    = 0;
+    string->size    = 0;
+    string->chars   = NULL;
+
+    /* API */
+
+}
+
+// Convert an 8 bit string into a UTF8 representation, which is in fact just the string itself
+// a memcpy as we make no assumptions about the 8 bit encoding.
+//
+static	pANTLR3_STRING		
+toUTF8_8	(pANTLR3_STRING string)
+{
+	return string->factory->newPtr(string->factory, (pANTLR3_UINT8)(string->chars), string->len);
+}
+
+// Convert a UTF16 string into a UTF8 representation using the Unicode.org
+// supplied C algorithms, which are now contained within the ANTLR3 C runtime
+// as permitted by the Unicode license (within the source code antlr3convertutf.c/.h
+// UCS2 has the same encoding as UTF16 so we can use UTF16 converter.
+//
+static	pANTLR3_STRING	
+toUTF8_UTF16	(pANTLR3_STRING string)
+{
+
+    UTF8	      * outputEnd;	
+    UTF16	      * inputEnd;
+    pANTLR3_STRING	utf8String;
+
+    ConversionResult	cResult;
+
+    // Allocate the output buffer, which needs to accommodate potentially
+    // 3X (in bytes) the input size (in chars).
+    //
+    utf8String	= string->factory->newStr8(string->factory, (pANTLR3_UINT8)"");
+
+    if	(utf8String != NULL)
+    {
+        // Free existing allocation
+        //
+        ANTLR3_FREE(utf8String->chars);
+
+        // Reallocate according to maximum expected size
+        //
+        utf8String->size	= string->len *3;
+        utf8String->chars	= (pANTLR3_UINT8)ANTLR3_MALLOC(utf8String->size +1);
+
+        if	(utf8String->chars != NULL)
+        {
+            inputEnd  = (UTF16 *)	(string->chars);
+            outputEnd = (UTF8 *)	(utf8String->chars);
+
+            // Call the Unicode converter
+            //
+            cResult =  ConvertUTF16toUTF8
+                (
+                (const UTF16**)&inputEnd, 
+                ((const UTF16 *)(string->chars)) + string->len, 
+                &outputEnd, 
+                outputEnd + utf8String->size - 1,
+                lenientConversion
+                );
+
+            // We don't really care if things failed or not here, we just converted
+            // everything that was vaguely possible and stopped when it wasn't. It is
+            // up to the grammar programmer to verify that the input is sensible.
+            //
+            utf8String->len = ANTLR3_UINT32_CAST(((pANTLR3_UINT8)outputEnd) - utf8String->chars);
+
+            *(outputEnd+1) = '\0';		// Always null terminate
+        }
+    }
+    return utf8String;
+}
+
+/**
+ * Creates a new string with enough capacity for size 8 bit characters plus a terminator.
+ *
+ * \param[in] factory - Pointer to the string factory that owns strings
+ * \param[in] size - In characters
+ * \return pointer to the new string.
+ */
+static    pANTLR3_STRING    
+newSize8	(pANTLR3_STRING_FACTORY factory, ANTLR3_UINT32 size)
+{
+    pANTLR3_STRING  string;
+
+    string  = factory->newRaw(factory);
+
+    if	(string == NULL)
+    {
+        return	string;
+    }
+
+    /* Always add one more byte for a terminator ;-)
+    */
+    string->chars	= (pANTLR3_UINT8) ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_UINT8) * (size+1)));
+	if (string->chars != NULL)
+    {
+		*(string->chars)	= '\0';
+		string->size	= size + 1;
+	}
+
+    return string;
+}
+/**
+ * Creates a new string with enough capacity for size UTF16 characters plus a terminator.
+ *
+ * \param[in] factory - Pointer to the string factory that owns strings
+ * \param[in] size - In characters (count double for surrogate pairs!!!)
+ * \return pointer to the new string.
+ */
+static    pANTLR3_STRING    
+newSizeUTF16	(pANTLR3_STRING_FACTORY factory, ANTLR3_UINT32 size)
+{
+    pANTLR3_STRING  string;
+
+    string  = factory->newRaw(factory);
+
+    if	(string == NULL)
+    {
+        return	string;
+    }
+
+    /* Always add one more byte for a terminator ;-)
+    */	
+    string->chars	= (pANTLR3_UINT8) ANTLR3_MALLOC((size_t)(sizeof(ANTLR3_UINT16) * (size+1)));
+    if (string->chars != NULL)
+	{
+		*(string->chars)	= '\0';
+		string->size	= size+1;	/* Size is always in characters, as is len */
+	}
+
+    return string;
+}
+
+/** Creates a new 8 bit string initialized with the 8 bit characters at the 
+ *  supplied ptr, of pre-determined size.
+ * \param[in] factory - Pointer to the string factory that owns the strings
+ * \param[in] ptr - Pointer to 8 bit encoded characters
+ * \return pointer to the new string
+ */
+static    pANTLR3_STRING    
+newPtr8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr, ANTLR3_UINT32 size)
+{
+	pANTLR3_STRING  string;
+
+	string  = factory->newSize(factory, size);
+
+	if	(string == NULL)
+	{
+		return	NULL;
+	}
+
+	if	(size <= 0)
+	{
+		return	string;
+	}
+
+	if	(ptr != NULL)
+	{
+		ANTLR3_MEMMOVE(string->chars, (const void *)ptr, size);
+		*(string->chars + size) = '\0';	    /* Terminate, these strings are usually used for Token streams and printing etc.	*/
+		string->len = size;
+	}
+
+	return  string;
+}
+
+/** Creates a new UTF16 string initialized with the 8 bit characters at the 
+ *  supplied 8 bit character ptr, of pre-determined size.
+ * \param[in] factory - Pointer to the string factory that owns the strings
+ * \param[in] ptr - Pointer to 8 bit encoded characters
+ * \return pointer to the new string
+ */
+static    pANTLR3_STRING    
+newPtrUTF16_8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr, ANTLR3_UINT32 size)
+{
+	pANTLR3_STRING  string;
+
+	/* newSize accepts size in characters, not bytes
+	*/
+	string  = factory->newSize(factory, size);
+
+	if	(string == NULL)
+	{
+		return	NULL;
+	}
+
+	if	(size <= 0)
+	{
+		return	string;
+	}
+
+	if	(ptr != NULL)
+	{
+		pANTLR3_UINT16	out;
+		ANTLR3_INT32    inSize;
+
+		out = (pANTLR3_UINT16)(string->chars);
+		inSize	= size;
+
+		while	(inSize-- > 0)
+		{
+			*out++ = (ANTLR3_UINT16)(*ptr++);
+		}
+
+		/* Terminate, these strings are usually used for Token streams and printing etc.	
+		*/
+		*(((pANTLR3_UINT16)(string->chars)) + size) = '\0';
+
+		string->len = size;
+	}
+
+	return  string;
+}
+
+/** Creates a new UTF16 string initialized with the UTF16 characters at the 
+ *  supplied ptr, of pre-determined size.
+ * \param[in] factory - Pointer to the string factory that owns the strings
+ * \param[in] ptr - Pointer to UTF16 encoded characters
+ * \return pointer to the new string
+ */
+static    pANTLR3_STRING    
+newPtrUTF16_UTF16	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr, ANTLR3_UINT32 size)
+{
+	pANTLR3_STRING  string;
+
+	string  = factory->newSize(factory, size);
+
+	if	(string == NULL)
+	{
+		return	NULL;
+	}
+
+	if	(size <= 0)
+	{
+		return	string;
+	}
+
+	if	(ptr != NULL)
+	{
+		ANTLR3_MEMMOVE(string->chars, (const void *)ptr, (size * sizeof(ANTLR3_UINT16)));
+
+		/* Terminate, these strings are usually used for Token streams and printing etc.	
+		*/
+		*(((pANTLR3_UINT16)(string->chars)) + size) = '\0';	    
+		string->len = size;
+	}
+
+	return  string;
+}
+
+/** Create a new 8 bit string from the supplied, null terminated, 8 bit string pointer.
+ * \param[in] factory - Pointer to the string factory that owns strings.
+ * \param[in] ptr - Pointer to the 8 bit encoded string
+ * \return Pointer to the newly initialized string
+ */
+static    pANTLR3_STRING    
+newStr8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr)
+{
+    return factory->newPtr8(factory, ptr, (ANTLR3_UINT32)strlen((const char *)ptr));
+}
+
+/** Create a new UTF16 string from the supplied, null terminated, 8 bit string pointer.
+ * \param[in] factory - Pointer to the string factory that owns strings.
+ * \param[in] ptr - Pointer to the 8 bit encoded string
+ * \return POinter to the newly initialized string
+ */
+static    pANTLR3_STRING    
+newStrUTF16_8	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr)
+{
+    return factory->newPtr8(factory, ptr, (ANTLR3_UINT32)strlen((const char *)ptr));
+}
+
+/** Create a new UTF16 string from the supplied, null terminated, UTF16 string pointer.
+ * \param[in] factory - Pointer to the string factory that owns strings.
+ * \param[in] ptr - Pointer to the UTF16 encoded string
+ * \return Pointer to the newly initialized string
+ */
+static    pANTLR3_STRING    
+newStrUTF16_UTF16	(pANTLR3_STRING_FACTORY factory, pANTLR3_UINT8 ptr)
+{
+    pANTLR3_UINT16  in;
+    ANTLR3_UINT32   count;
+
+    /** First, determine the length of the input string
+     */
+    in	    = (pANTLR3_UINT16)ptr;
+    count   = 0;
+
+    while   (*in++ != '\0')
+    {
+		count++;
+    }
+    return factory->newPtr(factory, ptr, count);
+}
+
+static    void		    
+destroy	(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING string)
+{
+    // Record which string we are deleting
+    //
+    ANTLR3_UINT32 strIndex = string->index;
+    
+    // Ensure that the string was not factory made, or we would try
+    // to delete memory that wasn't allocated outside the factory
+    // block.
+    // Remove the specific indexed string from the vector
+    //
+    factory->strings->del(factory->strings, strIndex);
+
+    // One less string in the vector, so decrement the factory index
+    // so that the next string allocated is indexed correctly with
+    // respect to the vector.
+    //
+    factory->index--;
+
+    // Now we have to reindex the strings in the vector that followed
+    // the one we just deleted. We only do this if the one we just deleted
+    // was not the last one.
+    //
+    if  (strIndex< factory->index)
+    {
+        // We must reindex the strings after the one we just deleted.
+        // The one that follows the one we just deleted is also out
+        // of whack, so we start there.
+        //
+        ANTLR3_UINT32 i;
+
+        for (i = strIndex; i < factory->index; i++)
+        {
+            // Renumber the entry
+            //
+            ((pANTLR3_STRING)(factory->strings->elements[i].element))->index = i;
+        }
+    }
+
+    // The string has been destroyed and the elements of the factory are reindexed.
+    //
+
+}
+
+static    pANTLR3_STRING    
+printable8(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING instr)
+{
+    pANTLR3_STRING  string;
+    
+    /* We don't need to be too efficient here, this is mostly for error messages and so on.
+     */
+    pANTLR3_UINT8   scannedText;
+    ANTLR3_UINT32   i;
+
+    /* Assume we need as much as twice as much space to parse out the control characters
+     */
+    string  = factory->newSize(factory, instr->len *2 + 1);
+
+    /* Scan through and replace unprintable (in terms of this routine)
+     * characters
+     */
+    scannedText = string->chars;
+
+    for	(i = 0; i < instr->len; i++)
+    {
+		if (*(instr->chars + i) == '\n')
+		{
+			*scannedText++ = '\\';
+			*scannedText++ = 'n';
+		}
+		else if (*(instr->chars + i) == '\r')
+		{
+			*scannedText++ = '\\';
+			*scannedText++ = 'r';
+		}
+		else if	(!isprint(*(instr->chars +i)))
+		{
+			*scannedText++ = '?';
+		}
+		else
+		{
+			*scannedText++ = *(instr->chars + i);
+		}
+    }
+    *scannedText  = '\0';
+
+    string->len	= (ANTLR3_UINT32)(scannedText - string->chars);
+    
+    return  string;
+}
+
+static    pANTLR3_STRING    
+printableUTF16(pANTLR3_STRING_FACTORY factory, pANTLR3_STRING instr)
+{
+    pANTLR3_STRING  string;
+    
+    /* We don't need to be too efficient here, this is mostly for error messages and so on.
+     */
+    pANTLR3_UINT16  scannedText;
+    pANTLR3_UINT16  inText;
+    ANTLR3_UINT32   i;
+    ANTLR3_UINT32   outLen;
+
+    /* Assume we need as much as twice as much space to parse out the control characters
+     */
+    string  = factory->newSize(factory, instr->len *2 + 1);
+
+    /* Scan through and replace unprintable (in terms of this routine)
+     * characters
+     */
+    scannedText = (pANTLR3_UINT16)(string->chars);
+    inText	= (pANTLR3_UINT16)(instr->chars);
+    outLen	= 0;
+
+    for	(i = 0; i < instr->len; i++)
+    {
+		if (*(inText + i) == '\n')
+		{
+			*scannedText++   = '\\';
+			*scannedText++   = 'n';
+			outLen	    += 2;
+		}
+		else if (*(inText + i) == '\r')
+		{
+			*scannedText++   = '\\';
+			*scannedText++   = 'r';
+			outLen	    += 2;
+		}
+		else if	(!isprint(*(inText +i)))
+		{
+			*scannedText++ = '?';
+			outLen++;
+		}
+		else
+		{
+			*scannedText++ = *(inText + i);
+			outLen++;
+		}
+    }
+    *scannedText  = '\0';
+
+    string->len	= outLen;
+    
+    return  string;
+}
+
+/** Fascist Capitalist Pig function created
+ *  to oppress the workers comrade.
+ */
+static    void		    
+closeFactory	(pANTLR3_STRING_FACTORY factory)
+{
+    /* Delete the vector we were tracking the strings with, this will
+     * causes all the allocated strings to be deallocated too
+     */
+    factory->strings->free(factory->strings);
+
+    /* Delete the space for the factory itself
+     */
+    ANTLR3_FREE((void *)factory);
+}
+
+static    pANTLR3_UINT8   
+append8	(pANTLR3_STRING string, const char * newbit)
+{
+    ANTLR3_UINT32 len;
+
+    len	= (ANTLR3_UINT32)strlen(newbit);
+
+    if	(string->size < (string->len + len + 1))
+    {
+		pANTLR3_UINT8 newAlloc = (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(string->len + len + 1));
+		if (newAlloc == NULL)
+		{
+			return NULL;
+		}
+		string->chars	= newAlloc;
+		string->size	= string->len + len + 1;
+    }
+
+    /* Note we copy one more byte than the strlen in order to get the trailing
+     */
+    ANTLR3_MEMMOVE((void *)(string->chars + string->len), newbit, (ANTLR3_UINT32)(len+1));
+    string->len	+= len;
+
+    return string->chars;
+}
+
+static    pANTLR3_UINT8   
+appendUTF16_8	(pANTLR3_STRING string, const char * newbit)
+{
+    ANTLR3_UINT32   len;
+    pANTLR3_UINT16  apPoint;
+    ANTLR3_UINT32   count;
+
+    len	= (ANTLR3_UINT32)strlen(newbit);
+
+    if	(string->size < (string->len + len + 1))
+    {
+		pANTLR3_UINT8 newAlloc = (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)((sizeof(ANTLR3_UINT16)*(string->len + len + 1))));
+		if (newAlloc == NULL)
+		{
+			return NULL;
+		}
+		string->chars	= newAlloc;
+		string->size	= string->len + len + 1;
+    }
+
+    apPoint = ((pANTLR3_UINT16)string->chars) + string->len;
+    string->len	+= len;
+
+    for	(count = 0; count < len; count++)
+    {
+		*apPoint++   = *(newbit + count);
+    }
+    *apPoint = '\0';
+
+    return string->chars;
+}
+
+static    pANTLR3_UINT8   
+appendUTF16_UTF16	(pANTLR3_STRING string, const char * newbit)
+{
+    ANTLR3_UINT32 len;
+    pANTLR3_UINT16  in;
+
+    /** First, determine the length of the input string
+     */
+    in	    = (pANTLR3_UINT16)newbit;
+    len   = 0;
+
+    while   (*in++ != '\0')
+    {
+		len++;
+    }
+
+    if	(string->size < (string->len + len + 1))
+    {
+		pANTLR3_UINT8 newAlloc = (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)( sizeof(ANTLR3_UINT16) *(string->len + len + 1) ));
+		if (newAlloc == NULL)
+		{
+			return NULL;
+		}
+		string->chars	= newAlloc;
+		string->size	= string->len + len + 1;
+    }
+
+    /* Note we copy one more byte than the strlen in order to get the trailing delimiter
+     */
+    ANTLR3_MEMMOVE((void *)(((pANTLR3_UINT16)string->chars) + string->len), newbit, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(len+1)));
+    string->len	+= len;
+
+    return string->chars;
+}
+
+static    pANTLR3_UINT8   
+set8	(pANTLR3_STRING string, const char * chars)
+{
+    ANTLR3_UINT32	len;
+
+    len = (ANTLR3_UINT32)strlen(chars);
+    if	(string->size < len + 1)
+    {
+		pANTLR3_UINT8 newAlloc = (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(len + 1));
+		if (newAlloc == NULL)
+		{
+			return NULL;
+		}
+		string->chars	= newAlloc;
+		string->size	= len + 1;
+    }
+
+    /* Note we copy one more byte than the strlen in order to get the trailing '\0'
+     */
+    ANTLR3_MEMMOVE((void *)(string->chars), chars, (ANTLR3_UINT32)(len+1));
+    string->len	    = len;
+
+    return  string->chars;
+
+}
+
+static    pANTLR3_UINT8   
+setUTF16_8	(pANTLR3_STRING string, const char * chars)
+{
+    ANTLR3_UINT32	len;
+    ANTLR3_UINT32	count;
+    pANTLR3_UINT16	apPoint;
+
+    len = (ANTLR3_UINT32)strlen(chars);
+    if	(string->size < len + 1)
+	{
+		pANTLR3_UINT8 newAlloc = (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(len + 1)));
+		if (newAlloc == NULL)
+		{
+			return NULL;
+		}
+		string->chars	= newAlloc;
+		string->size	= len + 1;
+    }
+    apPoint = ((pANTLR3_UINT16)string->chars);
+    string->len	= len;
+
+    for	(count = 0; count < string->len; count++)
+    {
+		*apPoint++   = *(chars + count);
+    }
+    *apPoint = '\0';
+
+    return  string->chars;
+}
+
+static    pANTLR3_UINT8   
+setUTF16_UTF16    (pANTLR3_STRING string, const char * chars)
+{
+    ANTLR3_UINT32   len;
+    pANTLR3_UINT16  in;
+
+    /** First, determine the length of the input string
+     */
+    in	    = (pANTLR3_UINT16)chars;
+    len   = 0;
+
+    while   (*in++ != '\0')
+    {
+		len++;
+    }
+
+    if	(string->size < len + 1)
+    {
+		pANTLR3_UINT8 newAlloc = (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(len + 1)));
+		if (newAlloc == NULL)
+		{
+			return NULL;
+		}
+		string->chars	= newAlloc;
+		string->size	= len + 1;
+    }
+
+    /* Note we copy one more byte than the strlen in order to get the trailing '\0'
+     */
+    ANTLR3_MEMMOVE((void *)(string->chars), chars, (ANTLR3_UINT32)((len+1) * sizeof(ANTLR3_UINT16)));
+    string->len	    = len;
+
+    return  string->chars;
+
+}
+
+static    pANTLR3_UINT8   
+addc8	(pANTLR3_STRING string, ANTLR3_UINT32 c)
+{
+    if	(string->size < string->len + 2)
+    {
+		pANTLR3_UINT8 newAlloc = (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(string->len + 2));
+		if (newAlloc == NULL)
+		{
+			return NULL;
+		}
+		string->chars	= newAlloc;
+		string->size	= string->len + 2;
+    }
+    *(string->chars + string->len)	= (ANTLR3_UINT8)c;
+    *(string->chars + string->len + 1)	= '\0';
+    string->len++;
+
+    return  string->chars;
+}
+
+static    pANTLR3_UINT8   
+addcUTF16	(pANTLR3_STRING string, ANTLR3_UINT32 c)
+{
+    pANTLR3_UINT16  ptr;
+
+    if	(string->size < string->len + 2)
+    {
+		pANTLR3_UINT8 newAlloc = (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16) * (string->len + 2)));
+		if (newAlloc == NULL)
+		{
+			return NULL;
+		}
+		string->chars	= newAlloc;
+		string->size	= string->len + 2;
+    }
+    ptr	= (pANTLR3_UINT16)(string->chars);
+
+    *(ptr + string->len)	= (ANTLR3_UINT16)c;
+    *(ptr + string->len + 1)	= '\0';
+    string->len++;
+
+    return  string->chars;
+}
+
+static    pANTLR3_UINT8   
+addi8	(pANTLR3_STRING string, ANTLR3_INT32 i)
+{
+    ANTLR3_UINT8	    newbit[32];
+
+    sprintf((char *)newbit, "%d", i);
+
+    return  string->append8(string, (const char *)newbit);
+}
+static    pANTLR3_UINT8   
+addiUTF16	(pANTLR3_STRING string, ANTLR3_INT32 i)
+{
+    ANTLR3_UINT8	    newbit[32];
+
+    sprintf((char *)newbit, "%d", i);
+
+    return  string->append8(string, (const char *)newbit);
+}
+
+static	  pANTLR3_UINT8
+inserti8    (pANTLR3_STRING string, ANTLR3_UINT32 point, ANTLR3_INT32 i)
+{
+    ANTLR3_UINT8	    newbit[32];
+
+    sprintf((char *)newbit, "%d", i);
+    return  string->insert8(string, point, (const char *)newbit);
+}
+static	  pANTLR3_UINT8
+insertiUTF16    (pANTLR3_STRING string, ANTLR3_UINT32 point, ANTLR3_INT32 i)
+{
+    ANTLR3_UINT8	    newbit[32];
+
+    sprintf((char *)newbit, "%d", i);
+    return  string->insert8(string, point, (const char *)newbit);
+}
+
+static	pANTLR3_UINT8
+insert8	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit)
+{
+    ANTLR3_UINT32	len;
+
+    if	(point >= string->len)
+    {
+		return	string->append(string, newbit);
+    }
+ 
+    len	= (ANTLR3_UINT32)strlen(newbit);
+
+    if	(len == 0)
+    {
+		return	string->chars;
+    }
+
+    if	(string->size < (string->len + len + 1))
+    {
+		pANTLR3_UINT8 newAlloc = (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(string->len + len + 1));
+		if (newAlloc == NULL)
+		{
+			return NULL;
+		}
+		string->chars	= newAlloc;
+		string->size	= string->len + len + 1;
+    }
+
+    /* Move the characters we are inserting before, including the delimiter
+     */
+    ANTLR3_MEMMOVE((void *)(string->chars + point + len), (void *)(string->chars + point), (ANTLR3_UINT32)(string->len - point + 1));
+
+    /* Note we copy the exact number of bytes
+     */
+    ANTLR3_MEMMOVE((void *)(string->chars + point), newbit, (ANTLR3_UINT32)(len));
+    
+    string->len += len;
+
+    return  string->chars;
+}
+
+static	pANTLR3_UINT8
+insertUTF16_8	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit)
+{
+    ANTLR3_UINT32	len;
+    ANTLR3_UINT32	count;
+    pANTLR3_UINT16	inPoint;
+
+    if	(point >= string->len)
+    {
+		return	string->append8(string, newbit);
+    }
+ 
+    len	= (ANTLR3_UINT32)strlen(newbit);
+
+    if	(len == 0)
+    {
+		return	string->chars;
+    }
+
+    if	(string->size < (string->len + len + 1))
+    {
+		pANTLR3_UINT8 newAlloc = (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(string->len + len + 1)));
+		if (newAlloc == NULL)
+		{
+			return NULL;
+		}
+		string->chars	= newAlloc;
+		string->size	= string->len + len + 1;
+    }
+
+    /* Move the characters we are inserting before, including the delimiter
+     */
+    ANTLR3_MEMMOVE((void *)(((pANTLR3_UINT16)string->chars) + point + len), (void *)(((pANTLR3_UINT16)string->chars) + point), (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(string->len - point + 1)));
+
+    string->len += len;
+    
+    inPoint = ((pANTLR3_UINT16)(string->chars))+point;
+    for	(count = 0; count<len; count++)
+    {
+		*(inPoint + count) = (ANTLR3_UINT16)(*(newbit+count));
+    }
+
+    return  string->chars;
+}
+
+static	pANTLR3_UINT8
+insertUTF16_UTF16	(pANTLR3_STRING string, ANTLR3_UINT32 point, const char * newbit)
+{
+    ANTLR3_UINT32	len;
+    pANTLR3_UINT16	in;
+
+    if	(point >= string->len)
+    {
+		return	string->append(string, newbit);
+    }
+ 
+    /** First, determine the length of the input string
+     */
+    in	    = (pANTLR3_UINT16)newbit;
+    len	    = 0;
+
+    while   (*in++ != '\0')
+    {
+		len++;
+    }
+
+    if	(len == 0)
+    {
+		return	string->chars;
+    }
+
+    if	(string->size < (string->len + len + 1))
+    {
+		pANTLR3_UINT8 newAlloc = (pANTLR3_UINT8) ANTLR3_REALLOC((void *)string->chars, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(string->len + len + 1)));
+		if (newAlloc == NULL)
+		{
+			return NULL;
+		}
+		string->chars	= newAlloc;
+		string->size	= string->len + len + 1;
+    }
+
+    /* Move the characters we are inserting before, including the delimiter
+     */
+    ANTLR3_MEMMOVE((void *)(((pANTLR3_UINT16)string->chars) + point + len), (void *)(((pANTLR3_UINT16)string->chars) + point), (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(string->len - point + 1)));
+
+
+    /* Note we copy the exact number of characters
+     */
+    ANTLR3_MEMMOVE((void *)(((pANTLR3_UINT16)string->chars) + point), newbit, (ANTLR3_UINT32)(sizeof(ANTLR3_UINT16)*(len)));
+    
+    string->len += len;
+
+    return  string->chars;
+}
+
+static    pANTLR3_UINT8	    setS	(pANTLR3_STRING string, pANTLR3_STRING chars)
+{
+    return  string->set(string, (const char *)(chars->chars));
+}
+
+static    pANTLR3_UINT8	    appendS	(pANTLR3_STRING string, pANTLR3_STRING newbit)
+{
+    /* We may be passed an empty string, in which case we just return the current pointer
+     */
+    if	(newbit == NULL || newbit->len == 0 || newbit->size == 0 || newbit->chars == NULL)
+    {
+		return	string->chars;
+    }
+    else
+    {
+		return  string->append(string, (const char *)(newbit->chars));
+    }
+}
+
+static	  pANTLR3_UINT8	    insertS	(pANTLR3_STRING string, ANTLR3_UINT32 point, pANTLR3_STRING newbit)
+{
+    return  string->insert(string, point, (const char *)(newbit->chars));
+}
+
+/* Function that compares the text of a string to the supplied
+ * 8 bit character string and returns a result a la strcmp()
+ */
+static ANTLR3_UINT32   
+compare8	(pANTLR3_STRING string, const char * compStr)
+{
+    return  strcmp((const char *)(string->chars), compStr);
+}
+
+/* Function that compares the text of a string with the supplied character string
+ * (which is assumed to be in the same encoding as the string itself) and returns a result
+ * a la strcmp()
+ */
+static ANTLR3_UINT32   
+compareUTF16_8	(pANTLR3_STRING string, const char * compStr)
+{
+    pANTLR3_UINT16  ourString;
+    ANTLR3_UINT32   charDiff;
+
+    ourString	= (pANTLR3_UINT16)(string->chars);
+
+    while   (((ANTLR3_UCHAR)(*ourString) != '\0') && ((ANTLR3_UCHAR)(*compStr) != '\0'))
+    {
+		charDiff = *ourString - *compStr;
+		if  (charDiff != 0)
+		{
+			return charDiff;
+		}
+		ourString++;
+		compStr++;
+    }
+
+    /* At this point, one of the strings was terminated
+     */
+    return (ANTLR3_UINT32)((ANTLR3_UCHAR)(*ourString) - (ANTLR3_UCHAR)(*compStr));
+
+}
+
+/* Function that compares the text of a string with the supplied character string
+ * (which is assumed to be in the same encoding as the string itself) and returns a result
+ * a la strcmp()
+ */
+static ANTLR3_UINT32   
+compareUTF16_UTF16	(pANTLR3_STRING string, const char * compStr8)
+{
+    pANTLR3_UINT16  ourString;
+    pANTLR3_UINT16  compStr;
+    ANTLR3_UINT32   charDiff;
+
+    ourString	= (pANTLR3_UINT16)(string->chars);
+    compStr	= (pANTLR3_UINT16)(compStr8);
+
+    while   (((ANTLR3_UCHAR)(*ourString) != '\0') && ((ANTLR3_UCHAR)(*((pANTLR3_UINT16)compStr)) != '\0'))
+    {
+		charDiff = *ourString - *compStr;
+		if  (charDiff != 0)
+		{
+			return charDiff;
+		}
+		ourString++;
+		compStr++;
+    }
+
+    /* At this point, one of the strings was terminated
+     */
+    return (ANTLR3_UINT32)((ANTLR3_UCHAR)(*ourString) - (ANTLR3_UCHAR)(*compStr));
+}
+
+/* Function that compares the text of a string with the supplied string
+ * (which is assumed to be in the same encoding as the string itself) and returns a result
+ * a la strcmp()
+ */
+static ANTLR3_UINT32   
+compareS    (pANTLR3_STRING string, pANTLR3_STRING compStr)
+{
+    return  string->compare(string, (const char *)compStr->chars);
+}
+
+
+/* Function that returns the character indexed at the supplied
+ * offset as a 32 bit character.
+ */
+static ANTLR3_UCHAR    
+charAt8	    (pANTLR3_STRING string, ANTLR3_UINT32 offset)
+{
+    if	(offset > string->len)
+    {
+		return (ANTLR3_UCHAR)'\0';
+    }
+    else
+    {
+		return  (ANTLR3_UCHAR)(*(string->chars + offset));
+    }
+}
+
+/* Function that returns the character indexed at the supplied
+ * offset as a 32 bit character.
+ */
+static ANTLR3_UCHAR    
+charAtUTF16    (pANTLR3_STRING string, ANTLR3_UINT32 offset)
+{
+    if	(offset > string->len)
+    {
+		return (ANTLR3_UCHAR)'\0';
+    }
+    else
+    {
+		return  (ANTLR3_UCHAR)(*((pANTLR3_UINT16)(string->chars) + offset));
+    }
+}
+
+/* Function that returns a substring of the supplied string a la .subString(s,e)
+ * in java runtimes.
+ */
+static pANTLR3_STRING
+subString8   (pANTLR3_STRING string, ANTLR3_UINT32 startIndex, ANTLR3_UINT32 endIndex)
+{
+    pANTLR3_STRING newStr;
+
+    if	(endIndex > string->len)
+    {
+		endIndex = string->len + 1;
+    }
+    newStr  = string->factory->newPtr(string->factory, string->chars + startIndex, endIndex - startIndex);
+
+    return newStr;
+}
+
+/* Returns a substring of the supplied string a la .subString(s,e)
+ * in java runtimes.
+ */
+static pANTLR3_STRING
+subStringUTF16  (pANTLR3_STRING string, ANTLR3_UINT32 startIndex, ANTLR3_UINT32 endIndex)
+{
+    pANTLR3_STRING newStr;
+
+    if	(endIndex > string->len)
+    {
+		endIndex = string->len + 1;
+    }
+    newStr  = string->factory->newPtr(string->factory, (pANTLR3_UINT8)((pANTLR3_UINT16)(string->chars) + startIndex), endIndex - startIndex);
+
+    return newStr;
+}
+
+/* Function that can convert the characters in the string to an integer
+ */
+static ANTLR3_INT32
+toInt32_8	    (struct ANTLR3_STRING_struct * string)
+{
+    return  atoi((const char *)(string->chars));
+}
+
+/* Function that can convert the characters in the string to an integer
+ */
+static ANTLR3_INT32
+toInt32_UTF16       (struct ANTLR3_STRING_struct * string)
+{
+    pANTLR3_UINT16  input;
+    ANTLR3_INT32   value;
+    ANTLR3_BOOLEAN  negate;
+
+    value   = 0;
+    input   = (pANTLR3_UINT16)(string->chars);
+    negate  = ANTLR3_FALSE;
+
+    if	(*input == (ANTLR3_UCHAR)'-')
+    {
+		negate = ANTLR3_TRUE;
+		input++;
+    }
+    else if (*input == (ANTLR3_UCHAR)'+')
+    {
+		input++;
+    }
+
+    while   (*input != '\0' && isdigit(*input))
+    {
+		value	 = value * 10;
+		value	+= ((ANTLR3_UINT32)(*input) - (ANTLR3_UINT32)'0');
+		input++;
+    }
+
+    return negate ? -value : value;
+}
+
+/* Function that returns a pointer to an 8 bit version of the string,
+ * which in this case is just the string as this is 
+ * 8 bit encodiing anyway.
+ */
+static	  pANTLR3_STRING	    to8_8	(pANTLR3_STRING string)
+{
+    return  string;
+}
+
+/* Function that returns an 8 bit version of the string,
+ * which in this case is returning all the UTF16 characters
+ * narrowed back into 8 bits, with characters that are too large
+ * replaced with '_'
+ */
+static	  pANTLR3_STRING    to8_UTF16	(pANTLR3_STRING string)
+{
+	pANTLR3_STRING  newStr;
+	ANTLR3_UINT32   i;
+
+	/* Create a new 8 bit string
+	*/
+	newStr  = newRaw8(string->factory);
+
+	if	(newStr == NULL)
+	{
+		return	NULL;
+	}
+
+	/* Always add one more byte for a terminator
+	*/
+	newStr->chars   = (pANTLR3_UINT8) ANTLR3_MALLOC((size_t)(string->len + 1));
+	if (newStr->chars != NULL)
+	{
+		newStr->size    = string->len + 1;
+		newStr->len	    = string->len;
+
+		/* Now copy each UTF16 charActer , making it an 8 bit character of 
+		* some sort.
+		*/
+		for	(i=0; i<string->len; i++)
+		{
+			ANTLR3_UCHAR	c;
+
+			c = *(((pANTLR3_UINT16)(string->chars)) + i);
+
+			*(newStr->chars + i) = (ANTLR3_UINT8)(c > 255 ? '_' : c);
+		}
+
+		/* Terminate
+		*/
+		*(newStr->chars + newStr->len) = '\0';
+	}
+
+	return newStr;
+}
diff --git a/antlr-3.4/runtime/C/src/antlr3tokenstream.c b/runtime/C/src/antlr3tokenstream.c
similarity index 100%
rename from antlr-3.4/runtime/C/src/antlr3tokenstream.c
rename to runtime/C/src/antlr3tokenstream.c
diff --git a/runtime/C/src/antlr3treeparser.c b/runtime/C/src/antlr3treeparser.c
new file mode 100644
index 0000000..c40a1ef
--- /dev/null
+++ b/runtime/C/src/antlr3treeparser.c
@@ -0,0 +1,255 @@
+/** \file
+ *  Implementation of the tree parser and overrides for the base recognizer
+ */
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <antlr3treeparser.h>
+
+/* BASE Recognizer overrides
+ */
+static void				mismatch	    (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow);
+
+/* Tree parser API
+ */
+static void			setTreeNodeStream	    (pANTLR3_TREE_PARSER parser, pANTLR3_COMMON_TREE_NODE_STREAM input);
+static pANTLR3_COMMON_TREE_NODE_STREAM	
+					getTreeNodeStream	    (pANTLR3_TREE_PARSER parser);
+static void			freeParser				(pANTLR3_TREE_PARSER parser);    
+static void *		getCurrentInputSymbol	(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream);
+static void *		getMissingSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
+												ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow);
+
+
+ANTLR3_API pANTLR3_TREE_PARSER
+antlr3TreeParserNewStream(ANTLR3_UINT32 sizeHint, pANTLR3_COMMON_TREE_NODE_STREAM ctnstream, pANTLR3_RECOGNIZER_SHARED_STATE state)
+{
+	pANTLR3_TREE_PARSER	    parser;
+
+	/** Allocate tree parser memory
+	*/
+	parser  =(pANTLR3_TREE_PARSER) ANTLR3_MALLOC(sizeof(ANTLR3_TREE_PARSER));
+
+	if	(parser == NULL)
+	{
+		return	NULL;
+	}
+
+	/* Create and install a base recognizer which does most of the work for us
+	*/
+	parser->rec =  antlr3BaseRecognizerNew(ANTLR3_TYPE_PARSER, sizeHint, state);
+
+	if	(parser->rec == NULL)
+	{
+		parser->free(parser);
+		return	NULL;
+	}
+
+	/* Ensure we can track back to the tree parser super structure
+	* from the base recognizer structure
+	*/
+	parser->rec->super	= parser;
+	parser->rec->type	= ANTLR3_TYPE_TREE_PARSER;
+
+	/* Install our base recognizer overrides
+	*/
+	parser->rec->mismatch				= mismatch;
+	parser->rec->exConstruct			= antlr3MTNExceptionNew;
+	parser->rec->getCurrentInputSymbol	= getCurrentInputSymbol;
+	parser->rec->getMissingSymbol		= getMissingSymbol;
+
+	/* Install tree parser API
+	*/
+	parser->getTreeNodeStream	=  getTreeNodeStream;
+	parser->setTreeNodeStream	=  setTreeNodeStream;
+	parser->free		=  freeParser;
+
+	/* Install the tree node stream
+	*/
+	parser->setTreeNodeStream(parser, ctnstream);
+
+	return  parser;
+}
+
+/**
+ * \brief
+ * Creates a new Mismatched Tree Nde Exception and inserts in the recognizer
+ * exception stack.
+ * 
+ * \param recognizer
+ * Context pointer for this recognizer
+ * 
+ */
+ANTLR3_API	void
+antlr3MTNExceptionNew(pANTLR3_BASE_RECOGNIZER recognizer)
+{
+    /* Create a basic recognition exception structure
+     */
+    antlr3RecognitionExceptionNew(recognizer);
+
+    /* Now update it to indicate this is a Mismatched token exception
+     */
+    recognizer->state->exception->name		= ANTLR3_MISMATCHED_TREE_NODE_NAME;
+    recognizer->state->exception->type		= ANTLR3_MISMATCHED_TREE_NODE_EXCEPTION;
+
+    return;
+}
+
+
+static void
+freeParser	(pANTLR3_TREE_PARSER parser)
+{
+	if	(parser->rec != NULL)
+	{
+		// This may have ben a delegate or delegator parser, in which case the
+		// state may already have been freed (and set to NULL therefore)
+		// so we ignore the state if we don't have it.
+		//
+		if	(parser->rec->state != NULL)
+		{
+			if	(parser->rec->state->following != NULL)
+			{
+				parser->rec->state->following->free(parser->rec->state->following);
+				parser->rec->state->following = NULL;
+			}
+		}
+	    parser->rec->free(parser->rec);
+	    parser->rec	= NULL;
+    }
+
+    ANTLR3_FREE(parser);
+}
+
+/** Set the input stream and reset the parser
+ */
+static void
+setTreeNodeStream	(pANTLR3_TREE_PARSER parser, pANTLR3_COMMON_TREE_NODE_STREAM input)
+{
+    parser->ctnstream = input;
+    parser->rec->reset		(parser->rec);
+    parser->ctnstream->reset	(parser->ctnstream);
+}
+
+/** Return a pointer to the input stream
+ */
+static pANTLR3_COMMON_TREE_NODE_STREAM
+getTreeNodeStream	(pANTLR3_TREE_PARSER parser)
+{
+    return  parser->ctnstream;
+}
+
+
+/** Override for standard base recognizer mismatch function
+ *  as we have DOWN/UP nodes in the stream that have no line info,
+ *  plus we want to alter the exception type.
+ */
+static void
+mismatch	    (pANTLR3_BASE_RECOGNIZER recognizer, ANTLR3_UINT32 ttype, pANTLR3_BITSET_LIST follow)
+{
+    recognizer->exConstruct(recognizer);
+    recognizer->recoverFromMismatchedToken(recognizer, ttype, follow);
+}
+
+#ifdef ANTLR3_WINDOWS
+#pragma warning	(push)
+#pragma warning (disable : 4100)
+#endif
+
+// Default implementation is for parser and assumes a token stream as supplied by the runtime.
+// You MAY need override this function if the standard TOKEN_STREAM is not what you are using.
+//
+static void *				
+getCurrentInputSymbol		(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM istream)
+{
+	pANTLR3_TREE_NODE_STREAM		tns;
+    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
+
+    tns	    = (pANTLR3_TREE_NODE_STREAM)(istream->super);
+    ctns    = tns->ctns;
+	return tns->_LT(tns, 1);
+}
+
+
+// Default implementation is for parser and assumes a token stream as supplied by the runtime.
+// You MAY need override this function if the standard BASE_TREE is not what you are using.
+//
+static void *				
+getMissingSymbol			(pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM	istream, pANTLR3_EXCEPTION	e,
+									ANTLR3_UINT32 expectedTokenType, pANTLR3_BITSET_LIST follow)
+{
+	pANTLR3_TREE_NODE_STREAM		tns;
+    pANTLR3_COMMON_TREE_NODE_STREAM	ctns;
+	pANTLR3_BASE_TREE				node;
+	pANTLR3_BASE_TREE				current;
+	pANTLR3_COMMON_TOKEN			token;
+	pANTLR3_STRING					text;
+    ANTLR3_INT32                   i;
+
+	// Dereference the standard pointers
+	//
+    tns	    = (pANTLR3_TREE_NODE_STREAM)(istream->super);
+    ctns    = tns->ctns;
+    
+	// Create a new empty node, by stealing the current one, or the previous one if the current one is EOF
+	//
+	current	= tns->_LT(tns, 1);
+    i       = -1;
+
+	if	(current == &ctns->EOF_NODE.baseTree)
+	{
+		current = tns->_LT(tns, -1);
+        i--;
+	}
+    while (((pANTLR3_COMMON_TREE)(current->super))->factory == NULL)
+	{
+		current = tns->_LT(tns, i--);
+    }
+
+	node	= (pANTLR3_BASE_TREE)current->dupNode(current);
+
+	// Find the newly dupicated token
+	//
+	token	= node->getToken(node);
+
+	// Create the token text that shows it has been inserted
+	//
+	token->setText8			(token, (pANTLR3_UINT8)"<missing ");
+	text = token->getText	(token);
+	text->append8			(text, (const char *)recognizer->state->tokenNames[expectedTokenType]);
+	text->append8			(text, (const char *)">");
+	
+	// Finally return the pointer to our new node
+	//
+	return	node;
+}
+#ifdef ANTLR3_WINDOWS
+#pragma warning	(pop)
+#endif
+
diff --git a/antlr-3.4/runtime/C/vsrulefiles/antlr3lexer.rules b/runtime/C/vsrulefiles/antlr3lexer.rules
similarity index 100%
rename from antlr-3.4/runtime/C/vsrulefiles/antlr3lexer.rules
rename to runtime/C/vsrulefiles/antlr3lexer.rules
diff --git a/antlr-3.4/runtime/C/vsrulefiles/antlr3lexerandparser.rules b/runtime/C/vsrulefiles/antlr3lexerandparser.rules
similarity index 100%
rename from antlr-3.4/runtime/C/vsrulefiles/antlr3lexerandparser.rules
rename to runtime/C/vsrulefiles/antlr3lexerandparser.rules
diff --git a/antlr-3.4/runtime/C/vsrulefiles/antlr3parser.rules b/runtime/C/vsrulefiles/antlr3parser.rules
similarity index 100%
rename from antlr-3.4/runtime/C/vsrulefiles/antlr3parser.rules
rename to runtime/C/vsrulefiles/antlr3parser.rules
diff --git a/antlr-3.4/runtime/C/vsrulefiles/antlr3treeparser.rules b/runtime/C/vsrulefiles/antlr3treeparser.rules
similarity index 100%
rename from antlr-3.4/runtime/C/vsrulefiles/antlr3treeparser.rules
rename to runtime/C/vsrulefiles/antlr3treeparser.rules
diff --git a/antlr-3.4/runtime/CSharp2/API CHANGES.TXT b/runtime/CSharp2/API CHANGES.TXT
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/API CHANGES.TXT
rename to runtime/CSharp2/API CHANGES.TXT
diff --git a/antlr-3.4/runtime/CSharp2/Cutting a new release.txt b/runtime/CSharp2/Cutting a new release.txt
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Cutting a new release.txt
rename to runtime/CSharp2/Cutting a new release.txt
diff --git a/antlr-3.4/runtime/CSharp2/LICENSE.TXT b/runtime/CSharp2/LICENSE.TXT
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/LICENSE.TXT
rename to runtime/CSharp2/LICENSE.TXT
diff --git a/antlr-3.4/runtime/CSharp2/Libraries/MbUnit/MbUnit.Framework.dll b/runtime/CSharp2/Libraries/MbUnit/MbUnit.Framework.dll
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Libraries/MbUnit/MbUnit.Framework.dll
rename to runtime/CSharp2/Libraries/MbUnit/MbUnit.Framework.dll
Binary files differ
diff --git a/antlr-3.4/runtime/CSharp2/Libraries/MbUnit/MbUnit.MSBuild.Tasks.dll b/runtime/CSharp2/Libraries/MbUnit/MbUnit.MSBuild.Tasks.dll
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Libraries/MbUnit/MbUnit.MSBuild.Tasks.dll
rename to runtime/CSharp2/Libraries/MbUnit/MbUnit.MSBuild.Tasks.dll
Binary files differ
diff --git a/antlr-3.4/runtime/CSharp2/Libraries/MbUnit/MbUnit.Tasks.dll b/runtime/CSharp2/Libraries/MbUnit/MbUnit.Tasks.dll
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Libraries/MbUnit/MbUnit.Tasks.dll
rename to runtime/CSharp2/Libraries/MbUnit/MbUnit.Tasks.dll
Binary files differ
diff --git a/antlr-3.4/runtime/CSharp2/Libraries/MbUnit/QuickGraph.Algorithms.dll b/runtime/CSharp2/Libraries/MbUnit/QuickGraph.Algorithms.dll
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Libraries/MbUnit/QuickGraph.Algorithms.dll
rename to runtime/CSharp2/Libraries/MbUnit/QuickGraph.Algorithms.dll
Binary files differ
diff --git a/antlr-3.4/runtime/CSharp2/Libraries/MbUnit/QuickGraph.dll b/runtime/CSharp2/Libraries/MbUnit/QuickGraph.dll
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Libraries/MbUnit/QuickGraph.dll
rename to runtime/CSharp2/Libraries/MbUnit/QuickGraph.dll
Binary files differ
diff --git a/antlr-3.4/runtime/CSharp2/Libraries/StringTemplate.NET/net-2.0/StringTemplate.dll b/runtime/CSharp2/Libraries/StringTemplate.NET/net-2.0/StringTemplate.dll
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Libraries/StringTemplate.NET/net-2.0/StringTemplate.dll
rename to runtime/CSharp2/Libraries/StringTemplate.NET/net-2.0/StringTemplate.dll
Binary files differ
diff --git a/antlr-3.4/runtime/CSharp2/Libraries/StringTemplate.NET/net-2.0/antlr.runtime.dll b/runtime/CSharp2/Libraries/StringTemplate.NET/net-2.0/antlr.runtime.dll
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Libraries/StringTemplate.NET/net-2.0/antlr.runtime.dll
rename to runtime/CSharp2/Libraries/StringTemplate.NET/net-2.0/antlr.runtime.dll
Binary files differ
diff --git a/antlr-3.4/runtime/CSharp2/NOTICE.TXT b/runtime/CSharp2/NOTICE.TXT
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/NOTICE.TXT
rename to runtime/CSharp2/NOTICE.TXT
diff --git a/antlr-3.4/runtime/CSharp2/README.TXT b/runtime/CSharp2/README.TXT
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/README.TXT
rename to runtime/CSharp2/README.TXT
diff --git "a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime \050VS2005\051.sln" "b/runtime/CSharp2/Sources/Antlr3.Runtime \050VS2005\051.sln"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime \050VS2005\051.sln"
rename to "runtime/CSharp2/Sources/Antlr3.Runtime \050VS2005\051.sln"
diff --git "a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime \050VS2008\051.sln" "b/runtime/CSharp2/Sources/Antlr3.Runtime \050VS2008\051.sln"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime \050VS2008\051.sln"
rename to "runtime/CSharp2/Sources/Antlr3.Runtime \050VS2008\051.sln"
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ANTLRxxxxStreamFixture.cs b/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ANTLRxxxxStreamFixture.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ANTLRxxxxStreamFixture.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ANTLRxxxxStreamFixture.cs
diff --git "a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Antlr3.Runtime.Tests \050VS2005\051.csproj" "b/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Antlr3.Runtime.Tests \050VS2005\051.csproj"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Antlr3.Runtime.Tests \050VS2005\051.csproj"
rename to "runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Antlr3.Runtime.Tests \050VS2005\051.csproj"
diff --git "a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Antlr3.Runtime.Tests \050VS2008\051.csproj" "b/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Antlr3.Runtime.Tests \050VS2008\051.csproj"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Antlr3.Runtime.Tests \050VS2008\051.csproj"
rename to "runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Antlr3.Runtime.Tests \050VS2008\051.csproj"
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ITreeFixture.cs b/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ITreeFixture.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ITreeFixture.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ITreeFixture.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ITreeNodeStreamFixture.cs b/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ITreeNodeStreamFixture.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ITreeNodeStreamFixture.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime.Tests/ITreeNodeStreamFixture.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Properties/AssemblyInfo.cs b/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Properties/AssemblyInfo.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Properties/AssemblyInfo.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime.Tests/Properties/AssemblyInfo.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/RewriteRuleXxxxStreamFixture.cs b/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/RewriteRuleXxxxStreamFixture.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/RewriteRuleXxxxStreamFixture.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime.Tests/RewriteRuleXxxxStreamFixture.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/TestDriver.cs b/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/TestDriver.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/TestDriver.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime.Tests/TestDriver.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/TestFixtureBase.cs b/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/TestFixtureBase.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime.Tests/TestFixtureBase.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime.Tests/TestFixtureBase.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Collections/HashList.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Collections/HashList.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Collections/HashList.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Collections/HashList.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Collections/StackList.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Collections/StackList.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Collections/StackList.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Collections/StackList.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/BlankDebugEventListener.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/BlankDebugEventListener.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/BlankDebugEventListener.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/BlankDebugEventListener.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventHub.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventHub.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventHub.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventHub.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventListenerConstants.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventListenerConstants.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventListenerConstants.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventListenerConstants.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventRepeater.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventRepeater.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventRepeater.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventRepeater.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventSocketProxy.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventSocketProxy.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventSocketProxy.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugEventSocketProxy.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugParser.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugParser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugParser.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugParser.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTokenStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTokenStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTokenStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeAdaptor.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeAdaptor.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeAdaptor.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeAdaptor.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeNodeStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeNodeStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeNodeStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeNodeStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeParser.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeParser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeParser.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/DebugTreeParser.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/IDebugEventListener.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/IDebugEventListener.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/IDebugEventListener.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/IDebugEventListener.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/ParseTreeBuilder.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/ParseTreeBuilder.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/ParseTreeBuilder.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/ParseTreeBuilder.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/Profiler.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/Profiler.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/Profiler.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/Profiler.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/RemoteDebugEventSocketListener.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/RemoteDebugEventSocketListener.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/RemoteDebugEventSocketListener.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/RemoteDebugEventSocketListener.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/TraceDebugEventListener.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/TraceDebugEventListener.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/TraceDebugEventListener.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/TraceDebugEventListener.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/Tracer.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/Tracer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/Tracer.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Debug/Tracer.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/Check.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/Check.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/Check.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/Check.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/DictionaryExtensions.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/DictionaryExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/DictionaryExtensions.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/DictionaryExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/EnumerableExtensions.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/EnumerableExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/EnumerableExtensions.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/EnumerableExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/ExceptionExtensions.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/ExceptionExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/ExceptionExtensions.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/ExceptionExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/IOExtensions.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/IOExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/IOExtensions.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/IOExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/JSystem.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/JSystem.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/JSystem.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/JSystem.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/LexerExtensions.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/LexerExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/LexerExtensions.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/LexerExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/StringExtensions.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/StringExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/StringExtensions.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/StringExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/StringTokenizer.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/StringTokenizer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/StringTokenizer.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.JavaExtensions/StringTokenizer.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/DoubleKeyMap.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/DoubleKeyMap.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/DoubleKeyMap.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/DoubleKeyMap.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/ErrorManager.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/ErrorManager.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/ErrorManager.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/ErrorManager.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/FastQueue.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/FastQueue.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/FastQueue.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/FastQueue.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/LookaheadStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/LookaheadStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/LookaheadStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/LookaheadStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/Stats.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/Stats.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/Stats.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Misc/Stats.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BaseTree.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BaseTree.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BaseTree.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BaseTree.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BaseTreeAdaptor.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BaseTreeAdaptor.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BaseTreeAdaptor.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BaseTreeAdaptor.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BufferedTreeNodeStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BufferedTreeNodeStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BufferedTreeNodeStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/BufferedTreeNodeStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonErrorNode.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonErrorNode.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonErrorNode.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonErrorNode.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTree.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTree.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTree.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTree.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTreeAdaptor.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTreeAdaptor.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTreeAdaptor.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTreeAdaptor.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTreeNodeStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTreeNodeStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTreeNodeStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/CommonTreeNodeStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/DotTreeGenerator.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/DotTreeGenerator.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/DotTreeGenerator.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/DotTreeGenerator.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITree.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITree.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITree.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITree.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeAdaptor.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeAdaptor.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeAdaptor.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeAdaptor.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeNodeStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeNodeStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeNodeStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeNodeStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeVisitorAction.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeVisitorAction.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeVisitorAction.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ITreeVisitorAction.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ParseTree.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ParseTree.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ParseTree.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ParseTree.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteCardinalityException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteCardinalityException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteCardinalityException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteCardinalityException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteEarlyExitException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteEarlyExitException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteEarlyExitException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteEarlyExitException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteEmptyStreamException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteEmptyStreamException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteEmptyStreamException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteEmptyStreamException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleElementStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleElementStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleElementStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleElementStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleNodeStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleNodeStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleNodeStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleNodeStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleSubtreeStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleSubtreeStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleSubtreeStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleSubtreeStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleTokenStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleTokenStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/RewriteRuleTokenStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeConstants.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeConstants.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeConstants.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeConstants.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeFilter.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeFilter.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeFilter.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeFilter.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeIterator.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeIterator.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeIterator.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeIterator.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeParser.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeParser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeParser.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeParser.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreePatternLexer.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreePatternLexer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreePatternLexer.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreePatternLexer.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreePatternParser.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreePatternParser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreePatternParser.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreePatternParser.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeRewriter.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeRewriter.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeRewriter.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeRewriter.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeRuleReturnScope.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeRuleReturnScope.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeRuleReturnScope.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeRuleReturnScope.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeVisitor.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeVisitor.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeVisitor.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeVisitor.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeWizard.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeWizard.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeWizard.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/TreeWizard.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRFileStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRFileStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRFileStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRFileStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRInputStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRInputStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRInputStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRInputStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRReaderStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRReaderStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRReaderStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRReaderStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRStringStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRStringStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRStringStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ANTLRStringStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BaseRecognizer.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BaseRecognizer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BaseRecognizer.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BaseRecognizer.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BitSet.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BitSet.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BitSet.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BitSet.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BufferedTokenStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BufferedTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BufferedTokenStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/BufferedTokenStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CharStreamConstants.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CharStreamConstants.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CharStreamConstants.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CharStreamConstants.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CharStreamState.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CharStreamState.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CharStreamState.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CharStreamState.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ClassicToken.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ClassicToken.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ClassicToken.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ClassicToken.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CommonToken.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CommonToken.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CommonToken.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CommonToken.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CommonTokenStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CommonTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CommonTokenStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/CommonTokenStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Constants.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Constants.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Constants.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Constants.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/DFA.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/DFA.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/DFA.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/DFA.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/EarlyExitException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/EarlyExitException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/EarlyExitException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/EarlyExitException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/FailedPredicateException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/FailedPredicateException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/FailedPredicateException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/FailedPredicateException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/GrammarRuleAttribute.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/GrammarRuleAttribute.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/GrammarRuleAttribute.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/GrammarRuleAttribute.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ICharStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ICharStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ICharStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ICharStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/IIntStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/IIntStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/IIntStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/IIntStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/IToken.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/IToken.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/IToken.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/IToken.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenSource.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenSource.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenSource.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenSource.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenStreamInformation.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenStreamInformation.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenStreamInformation.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ITokenStreamInformation.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/LegacyCommonTokenStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/LegacyCommonTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/LegacyCommonTokenStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/LegacyCommonTokenStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Lexer.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Lexer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Lexer.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Lexer.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedNotSetException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedNotSetException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedNotSetException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedNotSetException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedRangeException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedRangeException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedRangeException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedRangeException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedSetException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedSetException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedSetException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedSetException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedTokenException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedTokenException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedTokenException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedTokenException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedTreeNodeException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedTreeNodeException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedTreeNodeException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MismatchedTreeNodeException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MissingTokenException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MissingTokenException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MissingTokenException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/MissingTokenException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/NoViableAltException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/NoViableAltException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/NoViableAltException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/NoViableAltException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Parser.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Parser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Parser.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/Parser.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ParserRuleReturnScope.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ParserRuleReturnScope.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ParserRuleReturnScope.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/ParserRuleReturnScope.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RecognitionException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RecognitionException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RecognitionException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RecognitionException.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RecognizerSharedState.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RecognizerSharedState.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RecognizerSharedState.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RecognizerSharedState.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RuleReturnScope.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RuleReturnScope.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RuleReturnScope.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/RuleReturnScope.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/TokenConstants.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/TokenConstants.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/TokenConstants.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/TokenConstants.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/TokenRewriteStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/TokenRewriteStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/TokenRewriteStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/TokenRewriteStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/UnbufferedTokenStream.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/UnbufferedTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/UnbufferedTokenStream.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/UnbufferedTokenStream.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/UnwantedTokenException.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/UnwantedTokenException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/UnwantedTokenException.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime/UnwantedTokenException.cs
diff --git "a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr3.Runtime \050VS2005\051.csproj" "b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr3.Runtime \050VS2005\051.csproj"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr3.Runtime \050VS2005\051.csproj"
rename to "runtime/CSharp2/Sources/Antlr3.Runtime/Antlr3.Runtime \050VS2005\051.csproj"
diff --git "a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr3.Runtime \050VS2008\051.csproj" "b/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr3.Runtime \050VS2008\051.csproj"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr3.Runtime \050VS2008\051.csproj"
rename to "runtime/CSharp2/Sources/Antlr3.Runtime/Antlr3.Runtime \050VS2008\051.csproj"
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/AssemblyInfo.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/AssemblyInfo.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/AssemblyInfo.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/AssemblyInfo.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Messages.Designer.cs b/runtime/CSharp2/Sources/Antlr3.Runtime/Messages.Designer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Messages.Designer.cs
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Messages.Designer.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Messages.resx b/runtime/CSharp2/Sources/Antlr3.Runtime/Messages.resx
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Messages.resx
rename to runtime/CSharp2/Sources/Antlr3.Runtime/Messages.resx
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/default.build b/runtime/CSharp2/Sources/Antlr3.Runtime/default.build
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/default.build
rename to runtime/CSharp2/Sources/Antlr3.Runtime/default.build
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Utility/Antlr.Utility.Tree/DOTTreeGenerator.cs b/runtime/CSharp2/Sources/Antlr3.Utility/Antlr.Utility.Tree/DOTTreeGenerator.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Utility/Antlr.Utility.Tree/DOTTreeGenerator.cs
rename to runtime/CSharp2/Sources/Antlr3.Utility/Antlr.Utility.Tree/DOTTreeGenerator.cs
diff --git "a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Utility/Antlr3.Utility \050VS2005\051.csproj" "b/runtime/CSharp2/Sources/Antlr3.Utility/Antlr3.Utility \050VS2005\051.csproj"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp2/Sources/Antlr3.Utility/Antlr3.Utility \050VS2005\051.csproj"
rename to "runtime/CSharp2/Sources/Antlr3.Utility/Antlr3.Utility \050VS2005\051.csproj"
diff --git "a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Utility/Antlr3.Utility \050VS2008\051.csproj" "b/runtime/CSharp2/Sources/Antlr3.Utility/Antlr3.Utility \050VS2008\051.csproj"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp2/Sources/Antlr3.Utility/Antlr3.Utility \050VS2008\051.csproj"
rename to "runtime/CSharp2/Sources/Antlr3.Utility/Antlr3.Utility \050VS2008\051.csproj"
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Utility/AssemblyInfo.cs b/runtime/CSharp2/Sources/Antlr3.Utility/AssemblyInfo.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Utility/AssemblyInfo.cs
rename to runtime/CSharp2/Sources/Antlr3.Utility/AssemblyInfo.cs
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Utility/default.build b/runtime/CSharp2/Sources/Antlr3.Utility/default.build
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3.Utility/default.build
rename to runtime/CSharp2/Sources/Antlr3.Utility/default.build
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3_KeyPair.snk b/runtime/CSharp2/Sources/Antlr3_KeyPair.snk
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3_KeyPair.snk
rename to runtime/CSharp2/Sources/Antlr3_KeyPair.snk
Binary files differ
diff --git a/antlr-3.4/runtime/CSharp2/Sources/Antlr3_PublicKey.snk b/runtime/CSharp2/Sources/Antlr3_PublicKey.snk
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/Sources/Antlr3_PublicKey.snk
rename to runtime/CSharp2/Sources/Antlr3_PublicKey.snk
Binary files differ
diff --git a/antlr-3.4/runtime/CSharp2/all.antlr3.runtime.net.build b/runtime/CSharp2/all.antlr3.runtime.net.build
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/all.antlr3.runtime.net.build
rename to runtime/CSharp2/all.antlr3.runtime.net.build
diff --git a/antlr-3.4/runtime/CSharp2/antlr3.runtime.net.common.inc b/runtime/CSharp2/antlr3.runtime.net.common.inc
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/antlr3.runtime.net.common.inc
rename to runtime/CSharp2/antlr3.runtime.net.common.inc
diff --git a/antlr-3.4/runtime/CSharp2/dist/DOT-NET-runtime-3.1.3.zip b/runtime/CSharp2/dist/DOT-NET-runtime-3.1.3.zip
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/dist/DOT-NET-runtime-3.1.3.zip
rename to runtime/CSharp2/dist/DOT-NET-runtime-3.1.3.zip
Binary files differ
diff --git a/antlr-3.4/runtime/CSharp2/doxyfile b/runtime/CSharp2/doxyfile
similarity index 100%
rename from antlr-3.4/runtime/CSharp2/doxyfile
rename to runtime/CSharp2/doxyfile
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Antlr3.Runtime.Debug.csproj b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Antlr3.Runtime.Debug.csproj
new file mode 100644
index 0000000..9c52017
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Antlr3.Runtime.Debug.csproj
@@ -0,0 +1,79 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+    <ProductVersion>9.0.30729</ProductVersion>
+    <SchemaVersion>2.0</SchemaVersion>
+    <ProjectGuid>{5EE27A90-B023-42C9-AAF1-52B0424C5D0B}</ProjectGuid>
+    <OutputType>Library</OutputType>
+    <AppDesignerFolder>Properties</AppDesignerFolder>
+    <RootNamespace>Antlr.Runtime.Debug</RootNamespace>
+    <AssemblyName>Antlr3.Runtime.Debug</AssemblyName>
+    <TargetFrameworkVersion>v2.0</TargetFrameworkVersion>
+    <FileAlignment>512</FileAlignment>
+    <SignAssembly>true</SignAssembly>
+    <AssemblyOriginatorKeyFile>..\..\..\..\..\..\..\..\..\keys\antlr\Key.snk</AssemblyOriginatorKeyFile>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
+    <DebugSymbols>true</DebugSymbols>
+    <DebugType>full</DebugType>
+    <Optimize>false</Optimize>
+    <OutputPath>bin\Debug\</OutputPath>
+    <DefineConstants>DEBUG;TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+    <DocumentationFile>bin\Debug\Antlr3.Runtime.Debug.xml</DocumentationFile>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
+    <DebugType>pdbonly</DebugType>
+    <Optimize>true</Optimize>
+    <OutputPath>bin\Release\</OutputPath>
+    <DefineConstants>TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+    <DocumentationFile>bin\Release\Antlr3.Runtime.Debug.xml</DocumentationFile>
+  </PropertyGroup>
+  <ItemGroup>
+    <Reference Include="System" />
+  </ItemGroup>
+  <ItemGroup>
+    <Compile Include="BlankDebugEventListener.cs" />
+    <Compile Include="DebugEventHub.cs" />
+    <Compile Include="DebugEventListenerConstants.cs" />
+    <Compile Include="DebugEventRepeater.cs" />
+    <Compile Include="DebugEventSocketProxy.cs" />
+    <Compile Include="DebugParser.cs" />
+    <Compile Include="DebugTokenStream.cs" />
+    <Compile Include="DebugTreeAdaptor.cs" />
+    <Compile Include="DebugTreeNodeStream.cs" />
+    <Compile Include="DebugTreeParser.cs" />
+    <Compile Include="JavaExtensions\ExceptionExtensions.cs" />
+    <Compile Include="Misc\DoubleKeyMap`3.cs" />
+    <Compile Include="Misc\Stats.cs" />
+    <None Include="..\..\..\..\..\..\..\..\..\keys\antlr\Key.snk">
+      <Link>Key.snk</Link>
+    </None>
+    <None Include="ParserDebugger.cs" />
+    <Compile Include="ParseTreeBuilder.cs" />
+    <Compile Include="Profiler.cs" />
+    <Compile Include="Properties\AssemblyInfo.cs" />
+    <Compile Include="RemoteDebugEventSocketListener.cs" />
+    <Compile Include="TraceDebugEventListener.cs" />
+    <Compile Include="Tracer.cs" />
+  </ItemGroup>
+  <ItemGroup>
+    <ProjectReference Include="..\Antlr3.Runtime\Antlr3.Runtime.csproj">
+      <Project>{8FDC0A87-9005-4D5A-AB75-E55CEB575559}</Project>
+      <Name>Antlr3.Runtime</Name>
+    </ProjectReference>
+  </ItemGroup>
+  <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
+  <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
+       Other similar extension points exist, see Microsoft.Common.targets.
+  <Target Name="BeforeBuild">
+  </Target>
+  <Target Name="AfterBuild">
+  </Target>
+  -->
+</Project>
\ No newline at end of file
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Antlr3.Runtime.Debug.csproj.vspscc b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Antlr3.Runtime.Debug.csproj.vspscc
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Antlr3.Runtime.Debug.csproj.vspscc
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Antlr3.Runtime.Debug.csproj.vspscc
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/BlankDebugEventListener.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/BlankDebugEventListener.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/BlankDebugEventListener.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/BlankDebugEventListener.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventHub.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventHub.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventHub.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventHub.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventListenerConstants.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventListenerConstants.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventListenerConstants.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventListenerConstants.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventRepeater.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventRepeater.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventRepeater.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventRepeater.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventSocketProxy.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventSocketProxy.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventSocketProxy.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugEventSocketProxy.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugParser.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugParser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugParser.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugParser.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTokenStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTokenStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTokenStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeAdaptor.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeAdaptor.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeAdaptor.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeAdaptor.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeNodeStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeNodeStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeNodeStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeNodeStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeParser.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeParser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeParser.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/DebugTreeParser.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/JavaExtensions/ExceptionExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/JavaExtensions/ExceptionExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/JavaExtensions/ExceptionExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/JavaExtensions/ExceptionExtensions.cs
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Misc/DoubleKeyMap\1403.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Misc/DoubleKeyMap\1403.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Misc/DoubleKeyMap\1403.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Misc/DoubleKeyMap\1403.cs"
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Misc/Stats.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Misc/Stats.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Misc/Stats.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Misc/Stats.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/ParseTreeBuilder.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/ParseTreeBuilder.cs
new file mode 100644
index 0000000..cb5f678
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/ParseTreeBuilder.cs
@@ -0,0 +1,140 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime.Debug
+{
+    using System.Collections.Generic;
+    using ParseTree = Antlr.Runtime.Tree.ParseTree;
+
+    /** <summary>
+     *  This parser listener tracks rule entry/exit and token matches
+     *  to build a simple parse tree using ParseTree nodes.
+     *  </summary>
+     */
+    public class ParseTreeBuilder : BlankDebugEventListener
+    {
+        public const string EPSILON_PAYLOAD = "<epsilon>";
+
+        Stack<ParseTree> callStack = new Stack<ParseTree>();
+        List<IToken> hiddenTokens = new List<IToken>();
+        int backtracking = 0;
+
+        public ParseTreeBuilder( string grammarName )
+        {
+            ParseTree root = Create( "<grammar " + grammarName + ">" );
+            callStack.Push( root );
+        }
+
+        public virtual ParseTree Tree
+        {
+            get
+            {
+                ParseTree[] stack = callStack.ToArray();
+                return stack[stack.Length - 1];
+            }
+        }
+
+        /** <summary>
+         *  What kind of node to create.  You might want to override
+         *  so I factored out creation here.
+         *  </summary>
+         */
+        public virtual ParseTree Create( object payload )
+        {
+            return new ParseTree( payload );
+        }
+
+        public virtual ParseTree EpsilonNode()
+        {
+            return Create( EPSILON_PAYLOAD );
+        }
+
+        /** <summary>Backtracking or cyclic DFA, don't want to add nodes to tree</summary> */
+        public override void EnterDecision( int d, bool couldBacktrack )
+        {
+            backtracking++;
+        }
+        public override void ExitDecision( int i )
+        {
+            backtracking--;
+        }
+
+        public override void EnterRule( string filename, string ruleName )
+        {
+            if ( backtracking > 0 )
+                return;
+            ParseTree parentRuleNode = callStack.Peek();
+            ParseTree ruleNode = Create( ruleName );
+            parentRuleNode.AddChild( ruleNode );
+            callStack.Push( ruleNode );
+        }
+
+        public override void ExitRule( string filename, string ruleName )
+        {
+            if ( backtracking > 0 )
+                return;
+            ParseTree ruleNode = callStack.Peek();
+            if ( ruleNode.ChildCount == 0 )
+            {
+                ruleNode.AddChild( EpsilonNode() );
+            }
+            callStack.Pop();
+        }
+
+        public override void ConsumeToken( IToken token )
+        {
+            if ( backtracking > 0 )
+                return;
+            ParseTree ruleNode = callStack.Peek();
+            ParseTree elementNode = Create( token );
+            elementNode.hiddenTokens = this.hiddenTokens;
+            this.hiddenTokens = new List<IToken>();
+            ruleNode.AddChild( elementNode );
+        }
+
+        public override void ConsumeHiddenToken( IToken token )
+        {
+            if ( backtracking > 0 )
+                return;
+            hiddenTokens.Add( token );
+        }
+
+        public override void RecognitionException( RecognitionException e )
+        {
+            if ( backtracking > 0 )
+                return;
+            ParseTree ruleNode = callStack.Peek();
+            ParseTree errorNode = Create( e );
+            ruleNode.AddChild( errorNode );
+        }
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/ParserDebugger.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/ParserDebugger.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/ParserDebugger.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/ParserDebugger.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Profiler.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Profiler.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Profiler.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Profiler.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Properties/AssemblyInfo.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..038cddb
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Properties/AssemblyInfo.cs
@@ -0,0 +1,70 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+using System;
+using System.Reflection;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following 
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle( "Antlr3.Runtime.Debug" )]
+[assembly: AssemblyDescription( "" )]
+[assembly: AssemblyConfiguration( "" )]
+[assembly: AssemblyCompany( "Tunnel Vision Laboratories, LLC" )]
+[assembly: AssemblyProduct( "Antlr3.Runtime.Debug" )]
+[assembly: AssemblyCopyright( "Copyright © Sam Harwell 2013" )]
+[assembly: AssemblyTrademark( "" )]
+[assembly: AssemblyCulture( "" )]
+[assembly: CLSCompliant( true )]
+
+// Setting ComVisible to false makes the types in this assembly not visible 
+// to COM components.  If you need to access a type in this assembly from 
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible( false )]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid( "9f8fa018-6766-404c-9e72-551407e1b173" )]
+
+/* Version information for an assembly consists of four values in the following order:
+ *
+ *   Major.Minor.Build.Revision
+ *
+ * These values are updated according to the following:
+ *   1. Major.Minor follows the ANTLR release schedule
+ *   2. Build is incremented each time the C# port is packaged for release (regardless
+ *      of whether it's an incremental or nightly). The value resets to zero whenever
+ *      the Major or Minor version is incremented.
+ *   3. Revision is the Perforce changelist number associated with the release.
+ */
+[assembly: AssemblyVersion("3.5.0.2")]
+[assembly: AssemblyFileVersion("3.5.0.2")]
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/RemoteDebugEventSocketListener.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/RemoteDebugEventSocketListener.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/RemoteDebugEventSocketListener.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/RemoteDebugEventSocketListener.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/TraceDebugEventListener.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/TraceDebugEventListener.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/TraceDebugEventListener.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/TraceDebugEventListener.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Tracer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Tracer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Tracer.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Debug/Tracer.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Antlr3.Runtime.JavaExtensions.csproj b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Antlr3.Runtime.JavaExtensions.csproj
new file mode 100644
index 0000000..2c90504
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Antlr3.Runtime.JavaExtensions.csproj
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+    <ProductVersion>9.0.30729</ProductVersion>
+    <SchemaVersion>2.0</SchemaVersion>
+    <ProjectGuid>{A7EEC557-EB14-451C-9616-B7A61F4ECE69}</ProjectGuid>
+    <OutputType>Library</OutputType>
+    <AppDesignerFolder>Properties</AppDesignerFolder>
+    <RootNamespace>Antlr3.Runtime.JavaExtensions</RootNamespace>
+    <AssemblyName>Antlr3.Runtime.JavaExtensions</AssemblyName>
+    <TargetFrameworkVersion>v3.5</TargetFrameworkVersion>
+    <FileAlignment>512</FileAlignment>
+    <TargetFrameworkProfile>Client</TargetFrameworkProfile>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
+    <DebugSymbols>true</DebugSymbols>
+    <DebugType>full</DebugType>
+    <Optimize>false</Optimize>
+    <OutputPath>bin\Debug\</OutputPath>
+    <DefineConstants>DEBUG;TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
+    <DebugType>pdbonly</DebugType>
+    <Optimize>true</Optimize>
+    <OutputPath>bin\Release\</OutputPath>
+    <DefineConstants>TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SignAssembly>true</SignAssembly>
+  </PropertyGroup>
+  <PropertyGroup>
+    <AssemblyOriginatorKeyFile>..\..\..\..\..\..\..\..\..\keys\antlr\Key.snk</AssemblyOriginatorKeyFile>
+  </PropertyGroup>
+  <ItemGroup>
+    <Reference Include="System" />
+    <Reference Include="System.Core">
+      <RequiredTargetFramework>3.5</RequiredTargetFramework>
+    </Reference>
+    <Reference Include="System.Xml.Linq">
+      <RequiredTargetFramework>3.5</RequiredTargetFramework>
+    </Reference>
+    <Reference Include="System.Data.DataSetExtensions">
+      <RequiredTargetFramework>3.5</RequiredTargetFramework>
+    </Reference>
+    <Reference Include="System.Data" />
+    <Reference Include="System.Xml" />
+  </ItemGroup>
+  <ItemGroup>
+    <Compile Include="DictionaryExtensions.cs" />
+    <Compile Include="ExceptionExtensions.cs" />
+    <Compile Include="IOExtensions.cs" />
+    <Compile Include="LexerExtensions.cs" />
+    <Compile Include="JSystem.cs" />
+    <Compile Include="ListExtensions.cs" />
+    <Compile Include="ObjectExtensions.cs" />
+    <Compile Include="Properties\AssemblyInfo.cs" />
+    <Compile Include="SetExtensions.cs" />
+    <Compile Include="StackExtensions.cs" />
+    <Compile Include="StringBuilderExtensions.cs" />
+    <Compile Include="StringExtensions.cs" />
+    <Compile Include="StringTokenizer.cs" />
+    <Compile Include="SubList.cs" />
+    <Compile Include="TreeExtensions.cs" />
+    <Compile Include="TypeExtensions.cs" />
+  </ItemGroup>
+  <ItemGroup>
+    <ProjectReference Include="..\Antlr3.Runtime\Antlr3.Runtime.csproj">
+      <Project>{8FDC0A87-9005-4D5A-AB75-E55CEB575559}</Project>
+      <Name>Antlr3.Runtime</Name>
+    </ProjectReference>
+  </ItemGroup>
+  <ItemGroup>
+    <None Include="..\..\..\..\..\..\..\..\..\keys\antlr\Key.snk">
+      <Link>Key.snk</Link>
+    </None>
+  </ItemGroup>
+  <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
+  <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
+       Other similar extension points exist, see Microsoft.Common.targets.
+  <Target Name="BeforeBuild">
+  </Target>
+  <Target Name="AfterBuild">
+  </Target>
+  -->
+</Project>
\ No newline at end of file
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Antlr3.Runtime.JavaExtensions.csproj.vspscc b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Antlr3.Runtime.JavaExtensions.csproj.vspscc
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Antlr3.Runtime.JavaExtensions.csproj.vspscc
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Antlr3.Runtime.JavaExtensions.csproj.vspscc
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/DictionaryExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/DictionaryExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/DictionaryExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/DictionaryExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ExceptionExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ExceptionExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ExceptionExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ExceptionExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/IOExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/IOExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/IOExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/IOExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/JSystem.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/JSystem.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/JSystem.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/JSystem.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/LexerExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/LexerExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/LexerExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/LexerExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ListExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ListExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ListExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ListExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ObjectExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ObjectExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ObjectExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/ObjectExtensions.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Properties/AssemblyInfo.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..00e66a9
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/Properties/AssemblyInfo.cs
@@ -0,0 +1,69 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2010 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+using System;
+using System.Reflection;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following 
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle( "Antlr3.Runtime.JavaExtensions" )]
+[assembly: AssemblyDescription( "" )]
+[assembly: AssemblyConfiguration( "" )]
+[assembly: AssemblyCompany( "Tunnel Vision Laboratories, LLC" )]
+[assembly: AssemblyProduct( "Antlr3.Runtime.JavaExtensions" )]
+[assembly: AssemblyCopyright( "Copyright © Sam Harwell 2013" )]
+[assembly: AssemblyTrademark( "" )]
+[assembly: AssemblyCulture( "" )]
+[assembly: CLSCompliant( true )]
+
+// Setting ComVisible to false makes the types in this assembly not visible 
+// to COM components.  If you need to access a type in this assembly from 
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible( false )]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid( "ad48c7f7-0b1d-4b1e-9602-83425cb5699f" )]
+
+/* Version information for an assembly consists of four values in the following order:
+ *
+ *   Major.Minor.Build.Revision
+ *
+ * These values are updated according to the following:
+ *   1. Major.Minor follows the ANTLR release schedule
+ *   2. Build is incremented each time the C# port is packaged for release (regardless
+ *      of whether it's an incremental or nightly). The value resets to zero whenever
+ *      the Major or Minor version is incremented.
+ */
+[assembly: AssemblyVersion("3.5.0.2")]
+[assembly: AssemblyFileVersion("3.5.0.2")]
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/SetExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/SetExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/SetExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/SetExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StackExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StackExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StackExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StackExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringBuilderExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringBuilderExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringBuilderExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringBuilderExtensions.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringExtensions.cs
new file mode 100644
index 0000000..837908a
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringExtensions.cs
@@ -0,0 +1,151 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if DEBUG
+namespace Antlr.Runtime.JavaExtensions
+{
+    using ObsoleteAttribute = System.ObsoleteAttribute;
+    using Regex = System.Text.RegularExpressions.Regex;
+    using StringBuilder = System.Text.StringBuilder;
+
+    public static class StringExtensions
+    {
+        [Obsolete]
+        public static char charAt( this string str, int index )
+        {
+            return str[index];
+        }
+
+        [Obsolete]
+        public static bool endsWith( this string str, string value )
+        {
+            return str.EndsWith( value );
+        }
+
+        [Obsolete]
+        public static int indexOf( this string str, char value )
+        {
+            return str.IndexOf( value );
+        }
+
+        [Obsolete]
+        public static int indexOf( this string str, char value, int startIndex )
+        {
+            return str.IndexOf( value, startIndex );
+        }
+
+        [Obsolete]
+        public static int indexOf( this string str, string value )
+        {
+            return str.IndexOf( value );
+        }
+
+        [Obsolete]
+        public static int indexOf( this string str, string value, int startIndex )
+        {
+            return str.IndexOf( value, startIndex );
+        }
+
+        [Obsolete]
+        public static int lastIndexOf( this string str, char value )
+        {
+            return str.LastIndexOf( value );
+        }
+
+        [Obsolete]
+        public static int lastIndexOf( this string str, string value )
+        {
+            return str.LastIndexOf( value );
+        }
+
+        [Obsolete]
+        public static int length( this string str )
+        {
+            return str.Length;
+        }
+
+        [Obsolete]
+        public static string replace(this string str, char oldValue, char newValue)
+        {
+            return str.Replace(oldValue, newValue);
+        }
+
+        [Obsolete]
+        public static string replaceAll( this string str, string regex, string newValue )
+        {
+            return Regex.Replace( str, regex, newValue );
+        }
+
+        [Obsolete]
+        public static string replaceFirst( this string str, string regex, string replacement )
+        {
+            return Regex.Replace( str, regex, replacement );
+        }
+
+        [Obsolete]
+        public static bool startsWith( this string str, string value )
+        {
+            return str.StartsWith( value );
+        }
+
+        [Obsolete]
+        public static string substring( this string str, int startOffset )
+        {
+            return str.Substring( startOffset );
+        }
+
+        [Obsolete]
+        public static string substring(this string str, int startOffset, int endOffset)
+        {
+            return str.Substring( startOffset, endOffset - startOffset );
+        }
+
+        [Obsolete]
+        public static char[] toCharArray( this string str )
+        {
+            return str.ToCharArray();
+        }
+
+        [Obsolete]
+        public static string toUpperCase( this string str )
+        {
+            return str.ToUpperInvariant();
+        }
+
+        [Obsolete]
+        public static string trim( this string str )
+        {
+            return str.Trim();
+        }
+    }
+}
+#endif
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringTokenizer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringTokenizer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringTokenizer.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/StringTokenizer.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/SubList.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/SubList.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/SubList.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/SubList.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/TreeExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/TreeExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/TreeExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/TreeExtensions.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/TypeExtensions.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/TypeExtensions.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/TypeExtensions.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.JavaExtensions/TypeExtensions.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Antlr3.Runtime.Test.csproj b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Antlr3.Runtime.Test.csproj
new file mode 100644
index 0000000..75c4cac
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Antlr3.Runtime.Test.csproj
@@ -0,0 +1,187 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+    <ProductVersion>9.0.30729</ProductVersion>
+    <SchemaVersion>2.0</SchemaVersion>
+    <ProjectGuid>{19B965DE-5100-4064-A580-159644F6980E}</ProjectGuid>
+    <OutputType>Library</OutputType>
+    <AppDesignerFolder>Properties</AppDesignerFolder>
+    <RootNamespace>Antlr3.Runtime.Test</RootNamespace>
+    <AssemblyName>Antlr3.Runtime.Test</AssemblyName>
+    <TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
+    <FileAlignment>512</FileAlignment>
+    <ProjectTypeGuids>{3AC096D0-A1C2-E12C-1390-A8335801FDAB};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}</ProjectTypeGuids>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
+    <DebugSymbols>true</DebugSymbols>
+    <DebugType>full</DebugType>
+    <Optimize>false</Optimize>
+    <OutputPath>bin\Debug\</OutputPath>
+    <DefineConstants>DEBUG;TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
+    <DebugType>pdbonly</DebugType>
+    <Optimize>true</Optimize>
+    <OutputPath>bin\Release\</OutputPath>
+    <DefineConstants>TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+  </PropertyGroup>
+  <ItemGroup>
+    <Reference Include="Microsoft.VisualStudio.QualityTools.UnitTestFramework, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL" />
+    <Reference Include="System" />
+    <Reference Include="System.Core">
+      <RequiredTargetFramework>3.5</RequiredTargetFramework>
+    </Reference>
+    <Reference Include="System.Numerics" />
+  </ItemGroup>
+  <ItemGroup>
+    <ProjectReference Include="..\..\..\..\..\..\Antlr3.StringTemplate\Antlr3.StringTemplate.csproj">
+      <Project>{B5910BE2-DE21-4AA9-95C1-486F42B9E794}</Project>
+      <Name>Antlr3.StringTemplate</Name>
+    </ProjectReference>
+    <ProjectReference Include="..\Antlr3.Runtime.Debug\Antlr3.Runtime.Debug.csproj">
+      <Project>{5EE27A90-B023-42C9-AAF1-52B0424C5D0B}</Project>
+      <Name>Antlr3.Runtime.Debug</Name>
+    </ProjectReference>
+    <ProjectReference Include="..\Antlr3.Runtime.JavaExtensions\Antlr3.Runtime.JavaExtensions.csproj">
+      <Project>{A7EEC557-EB14-451C-9616-B7A61F4ECE69}</Project>
+      <Name>Antlr3.Runtime.JavaExtensions</Name>
+    </ProjectReference>
+    <ProjectReference Include="..\Antlr3.Runtime\Antlr3.Runtime.csproj">
+      <Project>{8FDC0A87-9005-4D5A-AB75-E55CEB575559}</Project>
+      <Name>Antlr3.Runtime</Name>
+    </ProjectReference>
+  </ItemGroup>
+  <ItemGroup>
+    <Antlr3 Include="SimpleExpression.g3" />
+    <Antlr3 Include="FastSimpleExpression.g3" />
+    <None Include="JavaCompat\Expr.g3" />
+    <Antlr3 Include="BuildOptions\DebugGrammar.g3">
+      <!--<GrammarOptions>-debug</GrammarOptions>-->
+    </Antlr3>
+    <Antlr3 Include="BuildOptions\DebugTreeGrammar.g3">
+      <!--<GrammarOptions>-debug</GrammarOptions>-->
+    </Antlr3>
+    <Antlr3 Include="StringTemplateOutput.g3" />
+    <Antlr3 Include="TestActionFeatures.g3" />
+    <Antlr3 Include="SemanticPredicateReduction.g3" />
+    <Antlr3 Include="Composition\Reduce.g3" />
+    <Antlr3 Include="Composition\Simplify.g3" />
+    <Antlr3 Include="Composition\VecMath.g3" />
+    <AntlrAbstractGrammar Include="Composition\VecMath_Lexer.g3">
+      <Generator>MSBuild:Compile</Generator>
+    </AntlrAbstractGrammar>
+    <AntlrAbstractGrammar Include="Composition\VecMath_Parser.g3">
+      <Generator>MSBuild:Compile</Generator>
+    </AntlrAbstractGrammar>
+    <Antlr3 Include="PreprocessorLexer.g3" />
+    <Antlr3 Include="SynpredTreeParser.g3" />
+  </ItemGroup>
+  <ItemGroup>
+    <Compile Include="Composition\Program.cs" />
+    <Compile Include="PreprocessorLexer.g3.cs">
+      <DependentUpon>PreprocessorLexer.g3</DependentUpon>
+    </Compile>
+    <Compile Include="PreprocessorTests.cs" />
+    <Compile Include="Properties\AssemblyInfo.cs" />
+    <Compile Include="SemanticPredicateReduction.g3.lexer.cs">
+      <DependentUpon>SemanticPredicateReduction.g3</DependentUpon>
+    </Compile>
+    <Compile Include="SemanticPredicateReduction.g3.parser.cs">
+      <DependentUpon>SemanticPredicateReduction.g3</DependentUpon>
+    </Compile>
+    <Compile Include="SimpleExpressionLexerHelper.cs">
+      <DependentUpon>SimpleExpression.g3</DependentUpon>
+    </Compile>
+    <Compile Include="SimpleExpressionParserHelper.cs">
+      <DependentUpon>SimpleExpression.g3</DependentUpon>
+    </Compile>
+    <Compile Include="FastSimpleExpressionLexerHelper.cs">
+      <DependentUpon>FastSimpleExpression.g3</DependentUpon>
+    </Compile>
+    <Compile Include="FastSimpleExpressionParserHelper.cs">
+      <DependentUpon>FastSimpleExpression.g3</DependentUpon>
+    </Compile>
+    <Compile Include="BuildOptions\DebugGrammarLexerHelper.cs">
+      <DependentUpon>DebugGrammar.g3</DependentUpon>
+    </Compile>
+    <Compile Include="BuildOptions\DebugGrammarParserHelper.cs">
+      <DependentUpon>DebugGrammar.g3</DependentUpon>
+    </Compile>
+    <Compile Include="BuildOptions\DebugTreeGrammarHelper.cs">
+      <DependentUpon>DebugTreeGrammar.g3</DependentUpon>
+    </Compile>
+    <Compile Include="SlimParsing\ITokenSource`1.cs" />
+    <Compile Include="SlimParsing\ITokenStream`1.cs" />
+    <Compile Include="SlimParsing\SlimLexer.cs" />
+    <Compile Include="SlimParsing\SlimStringStream.cs" />
+    <Compile Include="SlimParsing\SlimToken.cs" />
+    <Compile Include="SlimParsing\SlimTokenStream.cs" />
+    <Compile Include="SlimParsing\Tree\ITreeAdaptor`1.cs" />
+    <Compile Include="SlimParsing\Tree\ITreeFactory.cs" />
+    <Compile Include="SlimParsing\Tree\ITreeNodeStream`1.cs" />
+    <Compile Include="StringTemplateOutput.g3.lexer.cs">
+      <DependentUpon>StringTemplateOutput.g3</DependentUpon>
+    </Compile>
+    <Compile Include="StringTemplateOutput.g3.parser.cs">
+      <DependentUpon>StringTemplateOutput.g3</DependentUpon>
+    </Compile>
+    <Compile Include="SynpredTreeParser.g3.cs">
+      <DependentUpon>SynpredTreeParser.g3</DependentUpon>
+    </Compile>
+    <Compile Include="TestActionFeatures.g3.lexer.cs">
+      <DependentUpon>TestActionFeatures.g3</DependentUpon>
+    </Compile>
+    <Compile Include="TestActionFeatures.g3.parser.cs">
+      <DependentUpon>TestActionFeatures.g3</DependentUpon>
+    </Compile>
+    <Compile Include="TestDotTreeGenerator.cs" />
+    <Compile Include="TestExpressionFeatures.g3.lexer.cs">
+      <DependentUpon>TestExpressionFeatures.g3</DependentUpon>
+    </Compile>
+    <Compile Include="TestExpressionFeatures.g3.parser.cs">
+      <DependentUpon>TestExpressionFeatures.g3</DependentUpon>
+    </Compile>
+    <Compile Include="TestFastLexer.cs" />
+  </ItemGroup>
+  <ItemGroup>
+    <None Include="BuildOptions\ProfileGrammar.g3">
+      <!--<GrammarOptions>-profile</GrammarOptions>-->
+    </None>
+    <None Include="BuildOptions\ProfileGrammarLexerHelper.cs">
+      <DependentUpon>ProfileGrammar.g3</DependentUpon>
+    </None>
+    <None Include="BuildOptions\ProfileGrammarParserHelper.cs">
+      <DependentUpon>ProfileGrammar.g3</DependentUpon>
+    </None>
+    <None Include="TestExpressionFeatures.g3" />
+  </ItemGroup>
+  <ItemGroup>
+    <None Include="BuildOptions\ProfileTreeGrammar.g3">
+      <!--<GrammarOptions>-profile</GrammarOptions>-->
+    </None>
+    <None Include="BuildOptions\ProfileTreeGrammarHelper.cs">
+      <DependentUpon>ProfileTreeGrammar.g3</DependentUpon>
+    </None>
+  </ItemGroup>
+  <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
+  <PropertyGroup>
+    <!-- Folder containing AntlrBuildTask.dll -->
+    <AntlrBuildTaskPath>$(ProjectDir)..\..\..\..\..\..\bin\Bootstrap</AntlrBuildTaskPath>
+    <!-- Path to the ANTLR Tool itself. -->
+    <AntlrToolPath>$(ProjectDir)..\..\..\..\..\..\bin\Bootstrap\Antlr3.exe</AntlrToolPath>
+  </PropertyGroup>
+  <Import Project="$(ProjectDir)..\..\..\..\..\..\bin\Bootstrap\Antlr3.targets" />
+  <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
+       Other similar extension points exist, see Microsoft.Common.targets.
+  <Target Name="BeforeBuild">
+  </Target>
+  <Target Name="AfterBuild">
+  </Target>
+  -->
+</Project>
\ No newline at end of file
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Antlr3.Runtime.Test.csproj.vspscc b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Antlr3.Runtime.Test.csproj.vspscc
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Antlr3.Runtime.Test.csproj.vspscc
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/Antlr3.Runtime.Test.csproj.vspscc
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammar.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammar.g3
new file mode 100644
index 0000000..6edb6d3
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammar.g3
@@ -0,0 +1,103 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+grammar DebugGrammar;
+
+options
+{
+	language=CSharp3;
+	output=AST;
+	ASTLabelType=CommonTree;
+}
+
+tokens
+{
+	// define pseudo-operations
+	FUNC;
+	CALL;
+}
+
+@lexer::namespace{Antlr3.Runtime.Test.BuildOptions}
+@parser::namespace{Antlr3.Runtime.Test.BuildOptions}
+
+// START:stat
+prog: ( stat )*
+    ;
+
+stat:   expr NEWLINE                    -> expr
+    |   ID '=' expr NEWLINE             -> ^('=' ID expr)
+    |   func NEWLINE                    -> func
+    |   NEWLINE                         -> // ignore
+    ;
+
+func:   ID  '(' formalPar ')' '=' expr  -> ^(FUNC ID formalPar expr)
+    ;
+	finally {
+	  functionDefinitions.Add($func.tree);
+	}
+
+formalPar
+    :   ID
+	|   INT
+	;
+
+// END:stat
+
+// START:expr
+expr:   multExpr (('+'^|'-'^) multExpr)*
+    ;
+
+multExpr
+    :   atom (('*'|'/'|'%')^ atom)*
+    ;
+
+atom:   INT
+    |   ID
+    |   '(' expr ')'    -> expr
+    |   ID '(' expr ')' -> ^(CALL ID expr)
+    ;
+// END:expr
+
+// START:tokens
+ID  :   ('a'..'z'|'A'..'Z')+
+	;
+
+INT :   '0'..'9'+
+    ;
+
+NEWLINE
+    :	'\r'? '\n'
+    ;
+
+WS  :   (' '|'\t')+ { Skip(); }
+    ;
+// END:tokens
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarLexer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarLexer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarLexer.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarLexer.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarLexerHelper.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarLexerHelper.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarLexerHelper.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarLexerHelper.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarParser.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarParser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarParser.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarParser.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarParserHelper.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarParserHelper.cs
new file mode 100644
index 0000000..638100e
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugGrammarParserHelper.cs
@@ -0,0 +1,43 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr3.Runtime.Test.BuildOptions
+{
+    using System.Collections.Generic;
+    using Antlr.Runtime.Tree;
+
+    partial class DebugGrammarParser
+    {
+        /** List of function definitions. Must point at the FUNC nodes. */
+        List<CommonTree> functionDefinitions = new List<CommonTree>();
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammar.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammar.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammar.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammar.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammar.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammar.g3
new file mode 100644
index 0000000..40ee359
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammar.g3
@@ -0,0 +1,90 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+tree grammar DebugTreeGrammar;
+
+options
+{
+	language=CSharp3;
+	tokenVocab=DebugGrammar;
+	ASTLabelType=CommonTree;
+}
+
+// START:members
+@header
+{
+//import java.util.Map;
+//import java.util.HashMap;
+using BigInteger = System.Numerics.BigInteger;
+using Console = System.Console;
+}
+// END:members
+
+@namespace{Antlr3.Runtime.Test.BuildOptions}
+
+// START:rules
+prog:   stat*
+    ;
+
+stat:   expr                       { string result = $expr.value.ToString();
+                                     Console.Out.WriteLine($expr.value + " (about " + result[0] + "*10^" + (result.Length-1) + ")");
+                                   }
+    |   ^('=' ID expr)             { globalMemory[$ID.text] = $expr.value; }
+    |   ^(FUNC .+)	               // ignore FUNCs - we added them to functionDefinitions already in parser.
+    ;
+
+expr returns [BigInteger value]
+    :   ^('+' a=expr b=expr)       { $value = $a.value + $b.value; }
+    |   ^('-' a=expr b=expr)       { $value = $a.value - $b.value; }
+    |   ^('*' a=expr b=expr)       { $value = $a.value * $b.value; }
+    |   ^('/' a=expr b=expr)       { $value = $a.value / $b.value; }
+    |   ^('%' a=expr b=expr)       { $value = $a.value \% $b.value; }
+    |   ID                         { $value = getValue($ID.text); }
+    |   INT                        { $value = BigInteger.Parse($INT.text); }
+    |   call                       { $value = $call.value; }
+    ;
+
+call returns [BigInteger value]
+    :   ^(CALL ID expr)            { BigInteger p = $expr.value;
+                                     CommonTree funcRoot = findFunction($ID.text, p);
+                                     if (funcRoot == null) {
+                                         Console.Error.WriteLine("No match found for " + $ID.text + "(" + p + ")");
+                                     } else {
+                                         // Here we set up the local evaluator to run over the
+                                         // function definition with the parameter value.
+                                         // This re-reads a sub-AST of our input AST!
+                                         DebugTreeGrammar e = new DebugTreeGrammar(funcRoot, functionDefinitions, globalMemory, p);
+                                         $value = e.expr();
+                                     }
+                                   }
+    ;
+// END:rules
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammarHelper.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammarHelper.cs
new file mode 100644
index 0000000..c139aff
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/DebugTreeGrammarHelper.cs
@@ -0,0 +1,119 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr3.Runtime.Test.BuildOptions
+{
+    using System.Collections.Generic;
+    using Antlr.Runtime.Tree;
+
+    using BigInteger = System.Numerics.BigInteger;
+    using Console = System.Console;
+
+    partial class DebugTreeGrammar
+    {
+        /** Points to functions tracked by tree builder. */
+        private List<CommonTree> functionDefinitions;
+
+        /** Remember local variables. Currently, this is only the function parameter.
+         */
+        private readonly IDictionary<string, BigInteger> localMemory = new Dictionary<string, BigInteger>();
+
+        /** Remember global variables set by =. */
+        private IDictionary<string, BigInteger> globalMemory = new Dictionary<string, BigInteger>();
+
+        /** Set up an evaluator with a node stream; and a set of function definition ASTs. */
+        public DebugTreeGrammar(CommonTreeNodeStream nodes, List<CommonTree> functionDefinitions)
+            : this(nodes)
+        {
+            this.functionDefinitions = functionDefinitions;
+        }
+
+        /** Set up a local evaluator for a nested function call. The evaluator gets the definition
+         *  tree of the function; the set of all defined functions (to find locally called ones); a
+         *  pointer to the global variable memory; and the value of the function parameter to be
+         *  added to the local memory.
+         */
+        private DebugTreeGrammar(CommonTree function,
+                     List<CommonTree> functionDefinitions,
+                     IDictionary<string, BigInteger> globalMemory,
+                     BigInteger paramValue)
+            // Expected tree for function: ^(FUNC ID ( INT | ID ) expr)
+            : this(new CommonTreeNodeStream(function.GetChild(2)), functionDefinitions)
+        {
+            this.globalMemory = globalMemory;
+            localMemory[function.GetChild(1).Text] = paramValue;
+        }
+
+        /** Find matching function definition for a function name and parameter
+         *  value. The first definition is returned where (a) the name matches
+         *  and (b) the formal parameter agrees if it is defined as constant.
+         */
+        private CommonTree findFunction(string name, BigInteger paramValue)
+        {
+            foreach (CommonTree f in functionDefinitions)
+            {
+                // Expected tree for f: ^(FUNC ID (ID | INT) expr)
+                if (f.GetChild(0).Text.Equals(name))
+                {
+                    // Check whether parameter matches
+                    CommonTree formalPar = (CommonTree)f.GetChild(1);
+                    if (formalPar.Token.Type == INT
+                        && !BigInteger.Parse(formalPar.Token.Text).Equals(paramValue))
+                    {
+                        // Constant in formalPar list does not match actual value -> no match.
+                        continue;
+                    }
+                    // Parameter (value for INT formal arg) as well as fct name agrees!
+                    return f;
+                }
+            }
+            return null;
+        }
+
+        /** Get value of name up call stack. */
+        internal BigInteger getValue(string name)
+        {
+            BigInteger value;
+            if (localMemory.TryGetValue(name, out value) && value != null)
+            {
+                return value;
+            }
+            if (globalMemory.TryGetValue(name, out value) && value != null)
+            {
+                return value;
+            }
+            // not found in local memory or global memory
+            Console.Error.WriteLine("undefined variable " + name);
+            return BigInteger.Zero;
+        }
+    }
+}
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammar.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammar.g3
new file mode 100644
index 0000000..9230134
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammar.g3
@@ -0,0 +1,103 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+grammar ProfileGrammar;
+
+options
+{
+	language=CSharp3;
+	output=AST;
+	ASTLabelType=CommonTree;
+}
+
+tokens
+{
+	// define pseudo-operations
+	FUNC;
+	CALL;
+}
+
+@lexer::namespace{Antlr3.Runtime.Test.BuildOptions}
+@parser::namespace{Antlr3.Runtime.Test.BuildOptions}
+
+// START:stat
+prog: ( stat )*
+    ;
+
+stat:   expr NEWLINE                    -> expr
+    |   ID '=' expr NEWLINE             -> ^('=' ID expr)
+    |   func NEWLINE                    -> func
+    |   NEWLINE                         -> // ignore
+    ;
+
+func:   ID  '(' formalPar ')' '=' expr  -> ^(FUNC ID formalPar expr)
+    ;
+	finally {
+	  functionDefinitions.Add($func.tree);
+	}
+
+formalPar
+    :   ID
+	|   INT
+	;
+
+// END:stat
+
+// START:expr
+expr:   multExpr (('+'^|'-'^) multExpr)*
+    ;
+
+multExpr
+    :   atom (('*'|'/'|'%')^ atom)*
+    ;
+
+atom:   INT
+    |   ID
+    |   '(' expr ')'    -> expr
+    |   ID '(' expr ')' -> ^(CALL ID expr)
+    ;
+// END:expr
+
+// START:tokens
+ID  :   ('a'..'z'|'A'..'Z')+
+	;
+
+INT :   '0'..'9'+
+    ;
+
+NEWLINE
+    :	'\r'? '\n'
+    ;
+
+WS  :   (' '|'\t')+ { Skip(); }
+    ;
+// END:tokens
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarLexer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarLexer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarLexer.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarLexer.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarLexerHelper.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarLexerHelper.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarLexerHelper.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarLexerHelper.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarParser.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarParser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarParser.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarParser.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarParserHelper.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarParserHelper.cs
new file mode 100644
index 0000000..205a088
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileGrammarParserHelper.cs
@@ -0,0 +1,43 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr3.Runtime.Test.BuildOptions
+{
+    using System.Collections.Generic;
+    using Antlr.Runtime.Tree;
+
+    partial class ProfileGrammarParser
+    {
+        /** List of function definitions. Must point at the FUNC nodes. */
+        List<CommonTree> functionDefinitions = new List<CommonTree>();
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammar.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammar.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammar.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammar.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammar.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammar.g3
new file mode 100644
index 0000000..5bf107b
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammar.g3
@@ -0,0 +1,90 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+tree grammar ProfileTreeGrammar;
+
+options
+{
+	language=CSharp3;
+	tokenVocab=ProfileGrammar;
+	ASTLabelType=CommonTree;
+}
+
+// START:members
+@header
+{
+//import java.util.Map;
+//import java.util.HashMap;
+using BigInteger = java.math.BigInteger;
+using Console = System.Console;
+}
+// END:members
+
+@namespace{Antlr3.Runtime.Test.BuildOptions}
+
+// START:rules
+prog:   stat*
+    ;
+
+stat:   expr                       { string result = $expr.value.ToString();
+                                     Console.Out.WriteLine($expr.value + " (about " + result[0] + "*10^" + (result.Length-1) + ")");
+                                   }
+    |   ^('=' ID expr)             { globalMemory[$ID.text] = $expr.value; }
+    |   ^(FUNC .+)	               // ignore FUNCs - we added them to functionDefinitions already in parser.
+    ;
+
+expr returns [BigInteger value]
+    :   ^('+' a=expr b=expr)       { $value = $a.value.add($b.value); }
+    |   ^('-' a=expr b=expr)       { $value = $a.value.subtract($b.value); }
+    |   ^('*' a=expr b=expr)       { $value = $a.value.multiply($b.value); }
+    |   ^('/' a=expr b=expr)       { $value = $a.value.divide($b.value); }
+    |   ^('%' a=expr b=expr)       { $value = $a.value.remainder($b.value); }
+    |   ID                         { $value = getValue($ID.text); }
+    |   INT                        { $value = new BigInteger($INT.text); }
+    |   call                       { $value = $call.value; }
+    ;
+
+call returns [BigInteger value]
+    :   ^(CALL ID expr)            { BigInteger p = $expr.value;
+                                     CommonTree funcRoot = findFunction($ID.text, p);
+                                     if (funcRoot == null) {
+                                         Console.Error.WriteLine("No match found for " + $ID.text + "(" + p + ")");
+                                     } else {
+                                         // Here we set up the local evaluator to run over the
+                                         // function definition with the parameter value.
+                                         // This re-reads a sub-AST of our input AST!
+                                         ProfileTreeGrammar e = new ProfileTreeGrammar(funcRoot, functionDefinitions, globalMemory, p);
+                                         $value = e.expr();
+                                     }
+                                   }
+    ;
+// END:rules
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammarHelper.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammarHelper.cs
new file mode 100644
index 0000000..06d678a
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/BuildOptions/ProfileTreeGrammarHelper.cs
@@ -0,0 +1,119 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr3.Runtime.Test.BuildOptions
+{
+    using System.Collections.Generic;
+    using Antlr.Runtime.Tree;
+
+    using BigInteger = java.math.BigInteger;
+    using Console = System.Console;
+
+    partial class ProfileTreeGrammar
+    {
+        /** Points to functions tracked by tree builder. */
+        private List<CommonTree> functionDefinitions;
+
+        /** Remember local variables. Currently, this is only the function parameter.
+         */
+        private readonly IDictionary<string, BigInteger> localMemory = new Dictionary<string, BigInteger>();
+
+        /** Remember global variables set by =. */
+        private IDictionary<string, BigInteger> globalMemory = new Dictionary<string, BigInteger>();
+
+        /** Set up an evaluator with a node stream; and a set of function definition ASTs. */
+        public ProfileTreeGrammar(CommonTreeNodeStream nodes, List<CommonTree> functionDefinitions)
+            : this(nodes)
+        {
+            this.functionDefinitions = functionDefinitions;
+        }
+
+        /** Set up a local evaluator for a nested function call. The evaluator gets the definition
+         *  tree of the function; the set of all defined functions (to find locally called ones); a
+         *  pointer to the global variable memory; and the value of the function parameter to be
+         *  added to the local memory.
+         */
+        private ProfileTreeGrammar(CommonTree function,
+                     List<CommonTree> functionDefinitions,
+                     IDictionary<string, BigInteger> globalMemory,
+                     BigInteger paramValue)
+            // Expected tree for function: ^(FUNC ID ( INT | ID ) expr)
+            : this(new CommonTreeNodeStream(function.GetChild(2)), functionDefinitions)
+        {
+            this.globalMemory = globalMemory;
+            localMemory[function.GetChild(1).Text] = paramValue;
+        }
+
+        /** Find matching function definition for a function name and parameter
+         *  value. The first definition is returned where (a) the name matches
+         *  and (b) the formal parameter agrees if it is defined as constant.
+         */
+        private CommonTree findFunction(string name, BigInteger paramValue)
+        {
+            foreach (CommonTree f in functionDefinitions)
+            {
+                // Expected tree for f: ^(FUNC ID (ID | INT) expr)
+                if (f.GetChild(0).Text.Equals(name))
+                {
+                    // Check whether parameter matches
+                    CommonTree formalPar = (CommonTree)f.GetChild(1);
+                    if (formalPar.Token.Type == INT
+                        && !new BigInteger(formalPar.Token.Text).Equals(paramValue))
+                    {
+                        // Constant in formalPar list does not match actual value -> no match.
+                        continue;
+                    }
+                    // Parameter (value for INT formal arg) as well as fct name agrees!
+                    return f;
+                }
+            }
+            return null;
+        }
+
+        /** Get value of name up call stack. */
+        public BigInteger getValue(string name)
+        {
+            BigInteger value;
+            if (localMemory.TryGetValue(name, out value) && value != null)
+            {
+                return value;
+            }
+            if (globalMemory.TryGetValue(name, out value) && value != null)
+            {
+                return value;
+            }
+            // not found in local memory or global memory
+            Console.Error.WriteLine("undefined variable " + name);
+            return new BigInteger("0");
+        }
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Program.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Program.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Program.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Program.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Reduce.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Reduce.g3
new file mode 100644
index 0000000..db61e42
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Reduce.g3
@@ -0,0 +1,20 @@
+tree grammar Reduce;
+
+options
+{
+	tokenVocab=VecMath;
+	ASTLabelType=CommonTree;
+	output=AST;
+	filter=true;
+	language=CSharp3;
+}
+
+@namespace{Antlr3.Runtime.Test.Composition}
+
+/** Rewrite: x+x to be 2*x, 2*x to be x<<1, x<<n<<m to be x<<(n+m) */
+bottomup
+    :  ^(PLUS i=INT j=INT {$i.int==$j.int}?) -> ^(MULT["*"] INT["2"] $j)
+    |  ^(MULT x=INT {$x.int==2}? y=.)        -> ^(SHIFT["<<"] $y INT["1"])
+    |  ^(SHIFT ^(SHIFT e=. n=INT) m=INT)
+       -> ^(SHIFT["<<"] $e INT[($n.int+$m.int).ToString()])
+    ;
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Simplify.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Simplify.g3
new file mode 100644
index 0000000..f1c2550
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/Simplify.g3
@@ -0,0 +1,20 @@
+tree grammar Simplify;
+
+options {
+    tokenVocab=VecMath;    
+    ASTLabelType=CommonTree;
+    output=AST;
+    language=CSharp3;
+    filter=true;
+    //rewrite=true;
+}
+
+@namespace{Antlr3.Runtime.Test.Composition}
+
+topdown
+    :   ^( MULT INT ^(VEC (e+=.)+) ) -> ^(VEC ^(MULT INT $e)+)
+    ;
+
+bottomup
+    :  ^(MULT a=. b=INT {$b.int==0}?) -> $b // x*0 -> 0
+    ;
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath.g3
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath.g3
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath.g3
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath_Lexer.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath_Lexer.g3
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath_Lexer.g3
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath_Lexer.g3
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath_Parser.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath_Parser.g3
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath_Parser.g3
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/Composition/VecMath_Parser.g3
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpression.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpression.g3
new file mode 100644
index 0000000..b0d5dfd
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpression.g3
@@ -0,0 +1,88 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+grammar FastSimpleExpression;
+
+options
+{
+	language=CSharp3;
+	//slim=true;
+	output=AST;
+	ASTLabelType=CommonTree;
+}
+
+@lexer::superClass{Antlr.Runtime.SlimLexer}
+@lexer::namespace{Antlr3.Runtime.Test}
+@parser::namespace{Antlr3.Runtime.Test}
+
+public
+expression
+	:	additive_expression
+		EOF
+	;
+
+additive_expression
+	:	multiplicative_expression
+		(	('+'^ | '-'^)
+			multiplicative_expression
+		)*
+	;
+
+multiplicative_expression
+	:	atom
+		(	('*'^ | '/'^ | '%'^)
+			atom
+		)*
+	;
+
+atom
+	:	IDENTIFIER
+	|	NUMBER
+	;
+
+//
+// LEXER
+//
+
+IDENTIFIER
+	:	('a'..'z' | 'A'..'Z' | '_')
+		('a'..'z' | 'A'..'Z' | '_' | '0'..'9')*
+	;
+
+NUMBER
+	:	'0'..'9'+
+	;
+
+WS
+	:	(' ' | '\t' | '\n' | '\r' | '\f')
+		{$channel = Hidden;}
+	;
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpressionLexerHelper.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpressionLexerHelper.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpressionLexerHelper.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpressionLexerHelper.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpressionParserHelper.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpressionParserHelper.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpressionParserHelper.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/FastSimpleExpressionParserHelper.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/JavaCompat/Expr.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/JavaCompat/Expr.g3
new file mode 100644
index 0000000..78c79cc
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/JavaCompat/Expr.g3
@@ -0,0 +1,113 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+grammar Expr;
+
+/*
+	I had to make the following changes to the basic Expr grammar to make it work with the CSharp3 target in "Java compatibility mode".
+	For reference, see http://www.antlr.org/wiki/display/ANTLR3/Expression+evaluator.
+
+	Add an options section to set the language to CSharp3.
+
+	In the @header section, add:
+		// 'member' is obsolete
+		#pragma warning disable 612
+		using Antlr.Runtime.JavaExtensions;
+
+	In the @header section, replace:
+			import java.util.HashMap;
+		with:
+			using HashMap = System.Collections.Generic.Dictionary<object,object>;
+
+	Change all instances of "System.out" with "JSystem.@out".
+
+	Change all instances of "System.err" with "JSystem.err".
+	
+	Change all instances of "skip()" with "Skip()".
+ */
+
+options
+{
+	language=CSharp3;
+}
+
+@header {
+// 'member' is obsolete
+#pragma warning disable 612
+
+using Antlr.Runtime.JavaExtensions;
+using HashMap = System.Collections.Generic.Dictionary<object,object>;
+using Integer = java.lang.Integer;
+}
+
+@members {
+/** Map variable name to Integer object holding value */
+HashMap memory = new HashMap();
+}
+
+@lexer::namespace{Antlr3.Runtime.Test.JavaCompat}
+@parser::namespace{Antlr3.Runtime.Test.JavaCompat}
+
+prog:   stat+ ;
+
+stat:   expr NEWLINE {JSystem.@out.println($expr.value);}
+    |   ID '=' expr NEWLINE
+        {memory.put($ID.text, new Integer($expr.value));}
+    |   NEWLINE
+    ;
+
+expr returns [int value]
+    :   e=multExpr {$value = $e.value;}
+        (   '+' e=multExpr {$value += $e.value;}
+        |   '-' e=multExpr {$value -= $e.value;}
+        )*
+    ;
+
+multExpr returns [int value]
+    :   e=atom {$value = $e.value;} ('*' e=atom {$value *= $e.value;})*
+    ; 
+
+atom returns [int value]
+    :   INT {$value = Integer.parseInt($INT.text);}
+    |   ID
+        {
+        Integer v = (Integer)memory.get($ID.text);
+        if ( v!=null ) $value = v.intValue();
+        else JSystem.err.println("undefined variable "+$ID.text);
+        }
+    |   '(' expr ')' {$value = $expr.value;}
+    ;
+
+ID  :   ('a'..'z'|'A'..'Z')+ ;
+INT :   '0'..'9'+ ;
+NEWLINE:'\r'? '\n' ;
+WS  :   (' '|'\t')+ {Skip();} ;
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/PreprocessorLexer.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/PreprocessorLexer.g3
new file mode 100644
index 0000000..29655ef
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/PreprocessorLexer.g3
@@ -0,0 +1,69 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+lexer grammar PreprocessorLexer;
+
+options
+{
+	language=CSharp3;
+}
+
+@namespace{Antlr3.Runtime.Test}
+
+PP_SKIPPED_CHARACTERS
+	:	{false}? => ~(F_NEW_LINE_CHARACTER | F_PP_POUND_SIGN) F_INPUT_CHARACTER*
+	;
+  
+DELIMITED_COMMENT
+	:	{true}? => '/*' .* '*/'
+	;
+  
+WHITESPACE
+	:	F_WHITESPACE {Skip();}
+	;
+  
+fragment F_WHITESPACE
+	:	(' ' | '\t' | '\v' | '\f')+ 
+	;
+
+fragment F_NEW_LINE_CHARACTER
+	:	'\r'
+	|	'\n'
+	;
+  
+fragment F_PP_POUND_SIGN
+	:	'#'
+	;
+  
+fragment F_INPUT_CHARACTER
+	:	~F_NEW_LINE_CHARACTER
+	;
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/PreprocessorLexer.g3.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/PreprocessorLexer.g3.cs
new file mode 100644
index 0000000..e095fbf
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/PreprocessorLexer.g3.cs
@@ -0,0 +1,38 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr3.Runtime.Test
+{
+    partial class PreprocessorLexer
+    {
+    }
+}
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/PreprocessorTests.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/PreprocessorTests.cs
new file mode 100644
index 0000000..967f668
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/PreprocessorTests.cs
@@ -0,0 +1,58 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr3.Runtime.Test
+{
+    using System.Collections.Generic;
+    using Antlr.Runtime;
+    using Microsoft.VisualStudio.TestTools.UnitTesting;
+
+    [TestClass]
+    public class PreprocessorTests
+    {
+        [TestMethod]
+        public void TestEmptyComment()
+        {
+            string inputText = "/**/ ";
+            var input = new ANTLRStringStream(inputText);
+            var lexer = new PreprocessorLexer(input);
+            var tokenStream = new CommonTokenStream(lexer);
+            tokenStream.Fill();
+
+            List<IToken> tokens = tokenStream.GetTokens();
+            Assert.AreEqual(2, tokens.Count);
+            Assert.AreEqual(PreprocessorLexer.DELIMITED_COMMENT, tokens[0].Type);
+            Assert.AreEqual("/**/", tokens[0].Text);
+            Assert.AreEqual(PreprocessorLexer.EOF, tokens[1].Type);
+        }
+    }
+}
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Properties/AssemblyInfo.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..1e0fc38
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/Properties/AssemblyInfo.cs
@@ -0,0 +1,70 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+using System;
+using System.Reflection;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following 
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle( "Antlr3.Runtime.Test" )]
+[assembly: AssemblyDescription( "" )]
+[assembly: AssemblyConfiguration( "" )]
+[assembly: AssemblyCompany( "Pixel Mine, Inc." )]
+[assembly: AssemblyProduct( "Antlr3.Runtime.Test" )]
+[assembly: AssemblyCopyright("Copyright © Sam Harwell 2013")]
+[assembly: AssemblyTrademark( "" )]
+[assembly: AssemblyCulture( "" )]
+[assembly: CLSCompliant( true )]
+
+// Setting ComVisible to false makes the types in this assembly not visible 
+// to COM components.  If you need to access a type in this assembly from 
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible( false )]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid( "1352b15b-eded-4380-9122-acde32f7ff38" )]
+
+/* Version information for an assembly consists of four values in the following order:
+ *
+ *   Major.Minor.Build.Revision
+ *
+ * These values are updated according to the following:
+ *   1. Major.Minor follows the ANTLR release schedule
+ *   2. Build is incremented each time the C# port is packaged for release (regardless
+ *      of whether it's an incremental or nightly). The value resets to zero whenever
+ *      the Major or Minor version is incremented.
+ *   3. Revision is the Perforce changelist number associated with the release.
+ */
+[assembly: AssemblyVersion("3.5.0.2")]
+[assembly: AssemblyFileVersion("3.5.0.2")]
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3.lexer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3.lexer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3.lexer.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3.lexer.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3.parser.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3.parser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3.parser.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/SemanticPredicateReduction.g3.parser.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpression.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpression.g3
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpression.g3
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpression.g3
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpressionLexerHelper.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpressionLexerHelper.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpressionLexerHelper.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpressionLexerHelper.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpressionParserHelper.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpressionParserHelper.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpressionParserHelper.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/SimpleExpressionParserHelper.cs
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/ITokenSource\1401.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/ITokenSource\1401.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/ITokenSource\1401.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/ITokenSource\1401.cs"
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/ITokenStream\1401.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/ITokenStream\1401.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/ITokenStream\1401.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/ITokenStream\1401.cs"
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimLexer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimLexer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimLexer.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimLexer.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimStringStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimStringStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimStringStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimStringStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimToken.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimToken.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimToken.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimToken.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimTokenStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimTokenStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/SlimTokenStream.cs
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeAdaptor\1401.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeAdaptor\1401.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeAdaptor\1401.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeAdaptor\1401.cs"
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeFactory.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeFactory.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeFactory.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeFactory.cs
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeNodeStream\1401.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeNodeStream\1401.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeNodeStream\1401.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime.Test/SlimParsing/Tree/ITreeNodeStream\1401.cs"
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3.lexer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3.lexer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3.lexer.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3.lexer.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3.parser.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3.parser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3.parser.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/StringTemplateOutput.g3.parser.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SynpredTreeParser.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SynpredTreeParser.g3
new file mode 100644
index 0000000..6bb7df4
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SynpredTreeParser.g3
@@ -0,0 +1,47 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+tree grammar SynpredTreeParser;
+
+options
+{
+	language=CSharp3;
+	ASTLabelType=ITree;
+	output=AST;
+}
+
+@namespace{Antlr3.Runtime.Test}
+
+enterInsertOrDeleteData 
+	:	^( INSERT ( (DATA)=>DATA { _value = true; } | { _value = true; } ) .* )   
+	|	^( DELETE ( (DATA)=>DATA { _value = false; } | { _value = false; } ) .* )      
+	;
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SynpredTreeParser.g3.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SynpredTreeParser.g3.cs
new file mode 100644
index 0000000..f127aad
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/SynpredTreeParser.g3.cs
@@ -0,0 +1,41 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma warning disable 414 // The field 'fieldname' is assigned but its value is never used
+
+namespace Antlr3.Runtime.Test
+{
+    partial class SynpredTreeParser
+    {
+        private bool _value;
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3.lexer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3.lexer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3.lexer.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3.lexer.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3.parser.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3.parser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3.parser.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestActionFeatures.g3.parser.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestDotTreeGenerator.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestDotTreeGenerator.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestDotTreeGenerator.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestDotTreeGenerator.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3 b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3.lexer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3.lexer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3.lexer.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3.lexer.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3.parser.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3.parser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3.parser.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestExpressionFeatures.g3.parser.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestFastLexer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestFastLexer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestFastLexer.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime.Test/TestFastLexer.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.sln b/runtime/CSharp3/Sources/Antlr3.Runtime.sln
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.sln
rename to runtime/CSharp3/Sources/Antlr3.Runtime.sln
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.vssscc b/runtime/CSharp3/Sources/Antlr3.Runtime.vssscc
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime.vssscc
rename to runtime/CSharp3/Sources/Antlr3.Runtime.vssscc
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRFileStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRFileStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRFileStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRFileStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRInputStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRInputStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRInputStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRInputStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRReaderStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRReaderStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRReaderStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRReaderStream.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRStringStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRStringStream.cs
new file mode 100644
index 0000000..b3f11b4
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/ANTLRStringStream.cs
@@ -0,0 +1,327 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime
+{
+    using System.Collections.Generic;
+    using ArgumentException = System.ArgumentException;
+    using ArgumentOutOfRangeException = System.ArgumentOutOfRangeException;
+    using ArgumentNullException = System.ArgumentNullException;
+
+    /** <summary>
+     *  A pretty quick CharStream that pulls all data from an array
+     *  directly.  Every method call counts in the lexer.  Java's
+     *  strings aren't very good so I'm avoiding.
+     *  </summary>
+     */
+    [System.Serializable]
+    public class ANTLRStringStream : ICharStream
+    {
+        /** <summary>The data being scanned</summary> */
+        protected char[] data;
+
+        /** <summary>How many characters are actually in the buffer</summary> */
+        protected int n;
+
+        /** <summary>0..n-1 index into string of next char</summary> */
+        protected int p = 0;
+
+        /** <summary>line number 1..n within the input</summary> */
+        int line = 1;
+
+        /** <summary>The index of the character relative to the beginning of the line 0..n-1</summary> */
+        int charPositionInLine = 0;
+
+        /** <summary>tracks how deep mark() calls are nested</summary> */
+        protected int markDepth = 0;
+
+        /** <summary>
+         *  A list of CharStreamState objects that tracks the stream state
+         *  values line, charPositionInLine, and p that can change as you
+         *  move through the input stream.  Indexed from 1..markDepth.
+         *  A null is kept @ index 0.  Create upon first call to mark().
+         *  </summary>
+         */
+        protected IList<CharStreamState> markers;
+
+        /** <summary>Track the last mark() call result value for use in rewind().</summary> */
+        protected int lastMarker;
+
+        /** <summary>What is name or source of this char stream?</summary> */
+        public string name;
+
+        /** <summary>Copy data in string to a local char array</summary> */
+        public ANTLRStringStream( string input )
+            : this( input, null )
+        {
+        }
+
+        public ANTLRStringStream( string input, string sourceName )
+            : this( input.ToCharArray(), input.Length, sourceName )
+        {
+        }
+
+        /** <summary>This is the preferred constructor as no data is copied</summary> */
+        public ANTLRStringStream( char[] data, int numberOfActualCharsInArray )
+            : this( data, numberOfActualCharsInArray, null )
+        {
+        }
+
+        public ANTLRStringStream( char[] data, int numberOfActualCharsInArray, string sourceName )
+        {
+            if (data == null)
+                throw new ArgumentNullException("data");
+            if (numberOfActualCharsInArray < 0)
+                throw new ArgumentOutOfRangeException();
+            if (numberOfActualCharsInArray > data.Length)
+                throw new ArgumentException();
+
+            this.data = data;
+            this.n = numberOfActualCharsInArray;
+            this.name = sourceName;
+        }
+
+        protected ANTLRStringStream()
+        {
+            this.data = new char[0];
+        }
+
+        /** <summary>
+         *  Return the current input symbol index 0..n where n indicates the
+         *  last symbol has been read.  The index is the index of char to
+         *  be returned from LA(1).
+         *  </summary>
+         */
+        public virtual int Index
+        {
+            get
+            {
+                return p;
+            }
+        }
+        public virtual int Line
+        {
+            get
+            {
+                return line;
+            }
+            set
+            {
+                line = value;
+            }
+        }
+        public virtual int CharPositionInLine
+        {
+            get
+            {
+                return charPositionInLine;
+            }
+            set
+            {
+                charPositionInLine = value;
+            }
+        }
+
+        /** <summary>
+         *  Reset the stream so that it's in the same state it was
+         *  when the object was created *except* the data array is not
+         *  touched.
+         *  </summary>
+         */
+        public virtual void Reset()
+        {
+            p = 0;
+            line = 1;
+            charPositionInLine = 0;
+            markDepth = 0;
+        }
+
+        public virtual void Consume()
+        {
+            //System.out.println("prev p="+p+", c="+(char)data[p]);
+            if ( p < n )
+            {
+                charPositionInLine++;
+                if ( data[p] == '\n' )
+                {
+                    /*
+                    System.out.println("newline char found on line: "+line+
+                                       "@ pos="+charPositionInLine);
+                    */
+                    line++;
+                    charPositionInLine = 0;
+                }
+                p++;
+                //System.out.println("p moves to "+p+" (c='"+(char)data[p]+"')");
+            }
+        }
+
+        public virtual int LA( int i )
+        {
+            if ( i == 0 )
+            {
+                return 0; // undefined
+            }
+            if ( i < 0 )
+            {
+                i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
+                if ( ( p + i - 1 ) < 0 )
+                {
+                    return CharStreamConstants.EndOfFile; // invalid; no char before first char
+                }
+            }
+
+            if ( ( p + i - 1 ) >= n )
+            {
+                //System.out.println("char LA("+i+")=EOF; p="+p);
+                return CharStreamConstants.EndOfFile;
+            }
+            //System.out.println("char LA("+i+")="+(char)data[p+i-1]+"; p="+p);
+            //System.out.println("LA("+i+"); p="+p+" n="+n+" data.length="+data.length);
+            return data[p + i - 1];
+        }
+
+        public virtual int LT( int i )
+        {
+            return LA( i );
+        }
+
+        public virtual int Count
+        {
+            get
+            {
+                return n;
+            }
+        }
+
+        public virtual int Mark()
+        {
+            if ( markers == null )
+            {
+                markers = new List<CharStreamState>();
+                markers.Add( null ); // depth 0 means no backtracking, leave blank
+            }
+            markDepth++;
+            CharStreamState state = null;
+            if ( markDepth >= markers.Count )
+            {
+                state = new CharStreamState();
+                markers.Add( state );
+            }
+            else
+            {
+                state = markers[markDepth];
+            }
+            state.p = Index;
+            state.line = Line;
+            state.charPositionInLine = CharPositionInLine;
+            lastMarker = markDepth;
+            return markDepth;
+        }
+
+        public virtual void Rewind( int m )
+        {
+            if (m < 0)
+                throw new ArgumentOutOfRangeException();
+
+            //if (m > markDepth)
+            //    throw new ArgumentException();
+
+            CharStreamState state = markers[m];
+            // restore stream state
+            Seek( state.p );
+            line = state.line;
+            charPositionInLine = state.charPositionInLine;
+            Release( m );
+        }
+
+        public virtual void Rewind()
+        {
+            Rewind( lastMarker );
+        }
+
+        public virtual void Release( int marker )
+        {
+            // unwind any other markers made after m and release m
+            markDepth = marker;
+            // release this marker
+            markDepth--;
+        }
+
+        /** <summary>
+         *  consume() ahead until p==index; can't just set p=index as we must
+         *  update line and charPositionInLine.
+         *  </summary>
+         */
+        public virtual void Seek( int index )
+        {
+            if ( index <= p )
+            {
+                p = index; // just jump; don't update stream state (line, ...)
+                return;
+            }
+            // seek forward, consume until p hits index
+            while ( p < index )
+            {
+                Consume();
+            }
+        }
+
+        public virtual string Substring( int start, int length )
+        {
+            if (start < 0)
+                throw new ArgumentOutOfRangeException();
+            if (length < 0)
+                throw new ArgumentOutOfRangeException();
+            if (start + length > data.Length)
+                throw new ArgumentException();
+
+            if (length == 0)
+                return string.Empty;
+
+            return new string( data, start, length );
+        }
+
+        public virtual string SourceName
+        {
+            get
+            {
+                return name;
+            }
+        }
+
+        public override string ToString()
+        {
+            return new string(data);
+        }
+    }
+}
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Antlr3.Runtime.csproj b/runtime/CSharp3/Sources/Antlr3.Runtime/Antlr3.Runtime.csproj
new file mode 100644
index 0000000..5da739f
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Antlr3.Runtime.csproj
@@ -0,0 +1,143 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+    <ProductVersion>9.0.30729</ProductVersion>
+    <SchemaVersion>2.0</SchemaVersion>
+    <ProjectGuid>{8FDC0A87-9005-4D5A-AB75-E55CEB575559}</ProjectGuid>
+    <OutputType>Library</OutputType>
+    <AppDesignerFolder>Properties</AppDesignerFolder>
+    <RootNamespace>Antlr.Runtime</RootNamespace>
+    <AssemblyName>Antlr3.Runtime</AssemblyName>
+    <TargetFrameworkVersion>v2.0</TargetFrameworkVersion>
+    <FileAlignment>512</FileAlignment>
+    <SignAssembly>true</SignAssembly>
+    <AssemblyOriginatorKeyFile>..\..\..\..\..\..\..\..\..\keys\antlr\Key.snk</AssemblyOriginatorKeyFile>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
+    <DebugSymbols>true</DebugSymbols>
+    <DebugType>full</DebugType>
+    <Optimize>false</Optimize>
+    <OutputPath>bin\Debug\</OutputPath>
+    <DefineConstants>DEBUG;TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+    <DocumentationFile>bin\Debug\Antlr3.Runtime.xml</DocumentationFile>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
+    <DebugType>pdbonly</DebugType>
+    <Optimize>true</Optimize>
+    <OutputPath>bin\Release\</OutputPath>
+    <DefineConstants>TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+    <DocumentationFile>bin\Release\Antlr3.Runtime.xml</DocumentationFile>
+  </PropertyGroup>
+  <ItemGroup>
+    <Reference Include="System" />
+  </ItemGroup>
+  <ItemGroup>
+    <Compile Include="ANTLRFileStream.cs" />
+    <Compile Include="ANTLRInputStream.cs" />
+    <Compile Include="ANTLRReaderStream.cs" />
+    <Compile Include="ANTLRStringStream.cs" />
+    <Compile Include="AstParserRuleReturnScope`2.cs" />
+    <Compile Include="BaseRecognizer.cs" />
+    <Compile Include="BitSet.cs" />
+    <Compile Include="BufferedTokenStream.cs" />
+    <Compile Include="CharStreamConstants.cs" />
+    <Compile Include="CharStreamState.cs" />
+    <Compile Include="ClassicToken.cs" />
+    <Compile Include="CommonToken.cs" />
+    <Compile Include="CommonTokenStream.cs" />
+    <Compile Include="Debug\IDebugEventListener.cs" />
+    <Compile Include="DFA.cs" />
+    <Compile Include="EarlyExitException.cs" />
+    <Compile Include="FailedPredicateException.cs" />
+    <Compile Include="GrammarRuleAttribute.cs" />
+    <Compile Include="IAstRuleReturnScope.cs" />
+    <Compile Include="IAstRuleReturnScope`1.cs" />
+    <Compile Include="ICharStream.cs" />
+    <Compile Include="IIntStream.cs" />
+    <Compile Include="IRuleReturnScope.cs" />
+    <Compile Include="IRuleReturnScope`1.cs" />
+    <Compile Include="ITemplateRuleReturnScope.cs" />
+    <Compile Include="ITemplateRuleReturnScope`1.cs" />
+    <Compile Include="IToken.cs" />
+    <Compile Include="ITokenSource.cs" />
+    <Compile Include="ITokenStream.cs" />
+    <Compile Include="ITokenStreamInformation.cs" />
+    <Compile Include="LegacyCommonTokenStream.cs" />
+    <Compile Include="Lexer.cs" />
+    <Compile Include="Misc\FastQueue.cs" />
+    <Compile Include="Misc\FunctionDelegates.cs" />
+    <Compile Include="Misc\ListStack`1.cs" />
+    <Compile Include="Misc\LookaheadStream.cs" />
+    <Compile Include="MismatchedNotSetException.cs" />
+    <Compile Include="MismatchedRangeException.cs" />
+    <Compile Include="MismatchedSetException.cs" />
+    <Compile Include="MismatchedTokenException.cs" />
+    <Compile Include="MismatchedTreeNodeException.cs" />
+    <Compile Include="MissingTokenException.cs" />
+    <Compile Include="NoViableAltException.cs" />
+    <Compile Include="Parser.cs" />
+    <Compile Include="ParserRuleReturnScope.cs" />
+    <Compile Include="Properties\AssemblyInfo.cs" />
+    <Compile Include="RecognitionException.cs" />
+    <Compile Include="RecognizerSharedState.cs" />
+    <Compile Include="TemplateParserRuleReturnScope`2.cs" />
+    <Compile Include="TokenChannels.cs" />
+    <Compile Include="TokenRewriteStream.cs" />
+    <Compile Include="Tokens.cs" />
+    <Compile Include="TokenTypes.cs" />
+    <Compile Include="Tree\AstTreeRuleReturnScope`2.cs" />
+    <Compile Include="Tree\BaseTree.cs" />
+    <Compile Include="Tree\BaseTreeAdaptor.cs" />
+    <Compile Include="Tree\AntlrRuntime_BaseTreeDebugView.cs" />
+    <Compile Include="Tree\BufferedTreeNodeStream.cs" />
+    <Compile Include="Tree\CommonErrorNode.cs" />
+    <Compile Include="Tree\CommonTree.cs" />
+    <Compile Include="Tree\CommonTreeAdaptor.cs" />
+    <Compile Include="Tree\CommonTreeNodeStream.cs" />
+    <Compile Include="Tree\DotTreeGenerator.cs" />
+    <Compile Include="Tree\IPositionTrackingStream.cs" />
+    <Compile Include="Tree\ITree.cs" />
+    <Compile Include="Tree\ITreeAdaptor.cs" />
+    <Compile Include="Tree\ITreeNodeStream.cs" />
+    <Compile Include="Tree\ITreeVisitorAction.cs" />
+    <Compile Include="Tree\ParseTree.cs" />
+    <Compile Include="Tree\RewriteCardinalityException.cs" />
+    <Compile Include="Tree\RewriteEarlyExitException.cs" />
+    <Compile Include="Tree\RewriteEmptyStreamException.cs" />
+    <Compile Include="Tree\RewriteRuleElementStream.cs" />
+    <Compile Include="Tree\RewriteRuleNodeStream.cs" />
+    <Compile Include="Tree\RewriteRuleSubtreeStream.cs" />
+    <Compile Include="Tree\RewriteRuleTokenStream.cs" />
+    <Compile Include="Tree\TemplateTreeRuleReturnScope`2.cs" />
+    <Compile Include="Tree\TreeFilter.cs" />
+    <Compile Include="Tree\TreeIterator.cs" />
+    <Compile Include="Tree\TreeParser.cs" />
+    <Compile Include="Tree\TreePatternLexer.cs" />
+    <Compile Include="Tree\TreePatternParser.cs" />
+    <Compile Include="Tree\TreeRewriter.cs" />
+    <Compile Include="Tree\TreeRuleReturnScope`1.cs" />
+    <Compile Include="Tree\TreeVisitor.cs" />
+    <Compile Include="Tree\TreeWizard.cs" />
+    <Compile Include="UnbufferedTokenStream.cs" />
+    <Compile Include="UnwantedTokenException.cs" />
+  </ItemGroup>
+  <ItemGroup>
+    <None Include="..\..\..\..\..\..\..\..\..\keys\antlr\Key.snk">
+      <Link>Key.snk</Link>
+    </None>
+  </ItemGroup>
+  <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
+  <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
+       Other similar extension points exist, see Microsoft.Common.targets.
+  <Target Name="BeforeBuild">
+  </Target>
+  <Target Name="AfterBuild">
+  </Target>
+  -->
+</Project>
\ No newline at end of file
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Antlr3.Runtime.csproj.vspscc b/runtime/CSharp3/Sources/Antlr3.Runtime/Antlr3.Runtime.csproj.vspscc
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Antlr3.Runtime.csproj.vspscc
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Antlr3.Runtime.csproj.vspscc
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/AstParserRuleReturnScope\1402.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime/AstParserRuleReturnScope\1402.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/AstParserRuleReturnScope\1402.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime/AstParserRuleReturnScope\1402.cs"
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/BaseRecognizer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/BaseRecognizer.cs
new file mode 100644
index 0000000..c62a5bf
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/BaseRecognizer.cs
@@ -0,0 +1,1186 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime
+{
+    using System.Collections.Generic;
+
+    using ArgumentNullException = System.ArgumentNullException;
+    using Array = System.Array;
+    using Conditional = System.Diagnostics.ConditionalAttribute;
+    using IDebugEventListener = Antlr.Runtime.Debug.IDebugEventListener;
+    using MethodBase = System.Reflection.MethodBase;
+    using Regex = System.Text.RegularExpressions.Regex;
+    using StackFrame = System.Diagnostics.StackFrame;
+    using StackTrace = System.Diagnostics.StackTrace;
+    using TextWriter = System.IO.TextWriter;
+
+    /** <summary>
+     *  A generic recognizer that can handle recognizers generated from
+     *  lexer, parser, and tree grammars.  This is all the parsing
+     *  support code essentially; most of it is error recovery stuff and
+     *  backtracking.
+     *  </summary>
+     */
+    public abstract class BaseRecognizer
+    {
+        public const int MemoRuleFailed = -2;
+        public const int MemoRuleUnknown = -1;
+        public const int InitialFollowStackSize = 100;
+
+        // copies from Token object for convenience in actions
+        public const int DefaultTokenChannel = TokenChannels.Default;
+        public const int Hidden = TokenChannels.Hidden;
+
+        public const string NextTokenRuleName = "nextToken";
+
+        /** <summary>
+         *  State of a lexer, parser, or tree parser are collected into a state
+         *  object so the state can be shared.  This sharing is needed to
+         *  have one grammar import others and share same error variables
+         *  and other state variables.  It's a kind of explicit multiple
+         *  inheritance via delegation of methods and shared state.
+         *  </summary>
+         */
+        protected internal RecognizerSharedState state;
+
+        public BaseRecognizer()
+            : this(new RecognizerSharedState())
+        {
+        }
+
+        public BaseRecognizer( RecognizerSharedState state )
+        {
+            if ( state == null )
+            {
+                state = new RecognizerSharedState();
+            }
+            this.state = state;
+            InitDFAs();
+        }
+
+        public TextWriter TraceDestination
+        {
+            get;
+            set;
+        }
+
+        public virtual void SetState(RecognizerSharedState value)
+        {
+            this.state = value;
+        }
+
+        protected virtual void InitDFAs()
+        {
+        }
+
+        /** <summary>reset the parser's state; subclasses must rewinds the input stream</summary> */
+        public virtual void Reset()
+        {
+            // wack everything related to error recovery
+            if ( state == null )
+            {
+                return; // no shared state work to do
+            }
+            state._fsp = -1;
+            state.errorRecovery = false;
+            state.lastErrorIndex = -1;
+            state.failed = false;
+            state.syntaxErrors = 0;
+            // wack everything related to backtracking and memoization
+            state.backtracking = 0;
+            for ( int i = 0; state.ruleMemo != null && i < state.ruleMemo.Length; i++ )
+            { // wipe cache
+                state.ruleMemo[i] = null;
+            }
+        }
+
+
+        /** <summary>
+         *  Match current input symbol against ttype.  Attempt
+         *  single token insertion or deletion error recovery.  If
+         *  that fails, throw MismatchedTokenException.
+         *  </summary>
+         *
+         *  <remarks>
+         *  To turn off single token insertion or deletion error
+         *  recovery, override recoverFromMismatchedToken() and have it
+         *  throw an exception. See TreeParser.recoverFromMismatchedToken().
+         *  This way any error in a rule will cause an exception and
+         *  immediate exit from rule.  Rule would recover by resynchronizing
+         *  to the set of symbols that can follow rule ref.
+         *  </remarks>
+         */
+        public virtual object Match( IIntStream input, int ttype, BitSet follow )
+        {
+            //System.out.println("match "+((TokenStream)input).LT(1));
+            object matchedSymbol = GetCurrentInputSymbol( input );
+            if ( input.LA( 1 ) == ttype )
+            {
+                input.Consume();
+                state.errorRecovery = false;
+                state.failed = false;
+                return matchedSymbol;
+            }
+            if ( state.backtracking > 0 )
+            {
+                state.failed = true;
+                return matchedSymbol;
+            }
+            matchedSymbol = RecoverFromMismatchedToken( input, ttype, follow );
+            return matchedSymbol;
+        }
+
+        /** <summary>Match the wildcard: in a symbol</summary> */
+        public virtual void MatchAny( IIntStream input )
+        {
+            state.errorRecovery = false;
+            state.failed = false;
+            input.Consume();
+        }
+
+        public virtual bool MismatchIsUnwantedToken( IIntStream input, int ttype )
+        {
+            return input.LA( 2 ) == ttype;
+        }
+
+        public virtual bool MismatchIsMissingToken( IIntStream input, BitSet follow )
+        {
+            if ( follow == null )
+            {
+                // we have no information about the follow; we can only consume
+                // a single token and hope for the best
+                return false;
+            }
+            // compute what can follow this grammar element reference
+            if ( follow.Member( TokenTypes.EndOfRule ) )
+            {
+                BitSet viableTokensFollowingThisRule = ComputeContextSensitiveRuleFOLLOW();
+                follow = follow.Or( viableTokensFollowingThisRule );
+                if ( state._fsp >= 0 )
+                { // remove EOR if we're not the start symbol
+                    follow.Remove( TokenTypes.EndOfRule );
+                }
+            }
+            // if current token is consistent with what could come after set
+            // then we know we're missing a token; error recovery is free to
+            // "insert" the missing token
+
+            //System.out.println("viable tokens="+follow.toString(getTokenNames()));
+            //System.out.println("LT(1)="+((TokenStream)input).LT(1));
+
+            // BitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
+            // in follow set to indicate that the fall of the start symbol is
+            // in the set (EOF can follow).
+            if ( follow.Member( input.LA( 1 ) ) || follow.Member( TokenTypes.EndOfRule ) )
+            {
+                //System.out.println("LT(1)=="+((TokenStream)input).LT(1)+" is consistent with what follows; inserting...");
+                return true;
+            }
+            return false;
+        }
+
+        /** <summary>Report a recognition problem.</summary>
+         *
+         *  <remarks>
+         *  This method sets errorRecovery to indicate the parser is recovering
+         *  not parsing.  Once in recovery mode, no errors are generated.
+         *  To get out of recovery mode, the parser must successfully match
+         *  a token (after a resync).  So it will go:
+         *
+         * 		1. error occurs
+         * 		2. enter recovery mode, report error
+         * 		3. consume until token found in resynch set
+         * 		4. try to resume parsing
+         * 		5. next match() will reset errorRecovery mode
+         *
+         *  If you override, make sure to update syntaxErrors if you care about that.
+         *  </remarks>
+         */
+        public virtual void ReportError( RecognitionException e )
+        {
+            // if we've already reported an error and have not matched a token
+            // yet successfully, don't report any errors.
+            if ( state.errorRecovery )
+            {
+                //System.err.print("[SPURIOUS] ");
+                return;
+            }
+            state.syntaxErrors++; // don't count spurious
+            state.errorRecovery = true;
+
+            DisplayRecognitionError( this.TokenNames, e );
+        }
+
+        public virtual void DisplayRecognitionError( string[] tokenNames,
+                                            RecognitionException e )
+        {
+            string hdr = GetErrorHeader( e );
+            string msg = GetErrorMessage( e, tokenNames );
+            EmitErrorMessage( hdr + " " + msg );
+        }
+
+        /** <summary>What error message should be generated for the various exception types?</summary>
+         *
+         *  <remarks>
+         *  Not very object-oriented code, but I like having all error message
+         *  generation within one method rather than spread among all of the
+         *  exception classes. This also makes it much easier for the exception
+         *  handling because the exception classes do not have to have pointers back
+         *  to this object to access utility routines and so on. Also, changing
+         *  the message for an exception type would be difficult because you
+         *  would have to subclassing exception, but then somehow get ANTLR
+         *  to make those kinds of exception objects instead of the default.
+         *  This looks weird, but trust me--it makes the most sense in terms
+         *  of flexibility.
+         *
+         *  For grammar debugging, you will want to override this to add
+         *  more information such as the stack frame with
+         *  getRuleInvocationStack(e, this.getClass().getName()) and,
+         *  for no viable alts, the decision description and state etc...
+         *
+         *  Override this to change the message generated for one or more
+         *  exception types.
+         *  </remarks>
+         */
+        public virtual string GetErrorMessage( RecognitionException e, string[] tokenNames )
+        {
+            string msg = e.Message;
+            if ( e is UnwantedTokenException )
+            {
+                UnwantedTokenException ute = (UnwantedTokenException)e;
+                string tokenName = "<unknown>";
+                if ( ute.Expecting == TokenTypes.EndOfFile )
+                {
+                    tokenName = "EndOfFile";
+                }
+                else
+                {
+                    tokenName = tokenNames[ute.Expecting];
+                }
+                msg = "extraneous input " + GetTokenErrorDisplay( ute.UnexpectedToken ) +
+                    " expecting " + tokenName;
+            }
+            else if ( e is MissingTokenException )
+            {
+                MissingTokenException mte = (MissingTokenException)e;
+                string tokenName = "<unknown>";
+                if ( mte.Expecting == TokenTypes.EndOfFile )
+                {
+                    tokenName = "EndOfFile";
+                }
+                else
+                {
+                    tokenName = tokenNames[mte.Expecting];
+                }
+                msg = "missing " + tokenName + " at " + GetTokenErrorDisplay( e.Token );
+            }
+            else if ( e is MismatchedTokenException )
+            {
+                MismatchedTokenException mte = (MismatchedTokenException)e;
+                string tokenName = "<unknown>";
+                if ( mte.Expecting == TokenTypes.EndOfFile )
+                {
+                    tokenName = "EndOfFile";
+                }
+                else
+                {
+                    tokenName = tokenNames[mte.Expecting];
+                }
+                msg = "mismatched input " + GetTokenErrorDisplay( e.Token ) +
+                    " expecting " + tokenName;
+            }
+            else if ( e is MismatchedTreeNodeException )
+            {
+                MismatchedTreeNodeException mtne = (MismatchedTreeNodeException)e;
+                string tokenName = "<unknown>";
+                if ( mtne.Expecting == TokenTypes.EndOfFile )
+                {
+                    tokenName = "EndOfFile";
+                }
+                else
+                {
+                    tokenName = tokenNames[mtne.Expecting];
+                }
+                // workaround for a .NET framework bug (NullReferenceException)
+                string nodeText = ( mtne.Node != null ) ? mtne.Node.ToString() ?? string.Empty : string.Empty;
+                msg = "mismatched tree node: " + nodeText + " expecting " + tokenName;
+            }
+            else if ( e is NoViableAltException )
+            {
+                //NoViableAltException nvae = (NoViableAltException)e;
+                // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
+                // and "(decision="+nvae.decisionNumber+") and
+                // "state "+nvae.stateNumber
+                msg = "no viable alternative at input " + GetTokenErrorDisplay( e.Token );
+            }
+            else if ( e is EarlyExitException )
+            {
+                //EarlyExitException eee = (EarlyExitException)e;
+                // for development, can add "(decision="+eee.decisionNumber+")"
+                msg = "required (...)+ loop did not match anything at input " +
+                    GetTokenErrorDisplay( e.Token );
+            }
+            else if ( e is MismatchedSetException )
+            {
+                MismatchedSetException mse = (MismatchedSetException)e;
+                msg = "mismatched input " + GetTokenErrorDisplay( e.Token ) +
+                    " expecting set " + mse.Expecting;
+            }
+            else if ( e is MismatchedNotSetException )
+            {
+                MismatchedNotSetException mse = (MismatchedNotSetException)e;
+                msg = "mismatched input " + GetTokenErrorDisplay( e.Token ) +
+                    " expecting set " + mse.Expecting;
+            }
+            else if ( e is FailedPredicateException )
+            {
+                FailedPredicateException fpe = (FailedPredicateException)e;
+                msg = "rule " + fpe.RuleName + " failed predicate: {" +
+                    fpe.PredicateText + "}?";
+            }
+            return msg;
+        }
+
+        /** <summary>
+         *  Get number of recognition errors (lexer, parser, tree parser).  Each
+         *  recognizer tracks its own number.  So parser and lexer each have
+         *  separate count.  Does not count the spurious errors found between
+         *  an error and next valid token match
+         *  </summary>
+         *
+         *  <seealso cref="reportError()"/>
+         */
+        public virtual int NumberOfSyntaxErrors
+        {
+            get
+            {
+                return state.syntaxErrors;
+            }
+        }
+
+        /** <summary>What is the error header, normally line/character position information?</summary> */
+        public virtual string GetErrorHeader( RecognitionException e )
+        {
+            string prefix = SourceName ?? string.Empty;
+            if (prefix.Length > 0)
+                prefix += ' ';
+
+            return string.Format("{0}line {1}:{2}", prefix, e.Line, e.CharPositionInLine + 1);
+        }
+
+        /** <summary>
+         *  How should a token be displayed in an error message? The default
+         *  is to display just the text, but during development you might
+         *  want to have a lot of information spit out.  Override in that case
+         *  to use t.ToString() (which, for CommonToken, dumps everything about
+         *  the token). This is better than forcing you to override a method in
+         *  your token objects because you don't have to go modify your lexer
+         *  so that it creates a new Java type.
+         *  </summary>
+         */
+        public virtual string GetTokenErrorDisplay( IToken t )
+        {
+            string s = t.Text;
+            if ( s == null )
+            {
+                if ( t.Type == TokenTypes.EndOfFile )
+                {
+                    s = "<EOF>";
+                }
+                else
+                {
+                    s = "<" + t.Type + ">";
+                }
+            }
+            s = Regex.Replace( s, "\n", "\\\\n" );
+            s = Regex.Replace( s, "\r", "\\\\r" );
+            s = Regex.Replace( s, "\t", "\\\\t" );
+            return "'" + s + "'";
+        }
+
+        /** <summary>Override this method to change where error messages go</summary> */
+        public virtual void EmitErrorMessage( string msg )
+        {
+            if (TraceDestination != null)
+                TraceDestination.WriteLine( msg );
+        }
+
+        /** <summary>
+         *  Recover from an error found on the input stream.  This is
+         *  for NoViableAlt and mismatched symbol exceptions.  If you enable
+         *  single token insertion and deletion, this will usually not
+         *  handle mismatched symbol exceptions but there could be a mismatched
+         *  token that the match() routine could not recover from.
+         *  </summary>
+         */
+        public virtual void Recover( IIntStream input, RecognitionException re )
+        {
+            if ( state.lastErrorIndex == input.Index )
+            {
+                // uh oh, another error at same token index; must be a case
+                // where LT(1) is in the recovery token set so nothing is
+                // consumed; consume a single token so at least to prevent
+                // an infinite loop; this is a failsafe.
+                input.Consume();
+            }
+            state.lastErrorIndex = input.Index;
+            BitSet followSet = ComputeErrorRecoverySet();
+            BeginResync();
+            ConsumeUntil( input, followSet );
+            EndResync();
+        }
+
+        /** <summary>
+         *  A hook to listen in on the token consumption during error recovery.
+         *  The DebugParser subclasses this to fire events to the listenter.
+         *  </summary>
+         */
+        public virtual void BeginResync()
+        {
+        }
+
+        public virtual void EndResync()
+        {
+        }
+
+        /*  Compute the error recovery set for the current rule.  During
+         *  rule invocation, the parser pushes the set of tokens that can
+         *  follow that rule reference on the stack; this amounts to
+         *  computing FIRST of what follows the rule reference in the
+         *  enclosing rule. This local follow set only includes tokens
+         *  from within the rule; i.e., the FIRST computation done by
+         *  ANTLR stops at the end of a rule.
+         *
+         *  EXAMPLE
+         *
+         *  When you find a "no viable alt exception", the input is not
+         *  consistent with any of the alternatives for rule r.  The best
+         *  thing to do is to consume tokens until you see something that
+         *  can legally follow a call to r *or* any rule that called r.
+         *  You don't want the exact set of viable next tokens because the
+         *  input might just be missing a token--you might consume the
+         *  rest of the input looking for one of the missing tokens.
+         *
+         *  Consider grammar:
+         *
+         *  a : '[' b ']'
+         *    | '(' b ')'
+         *    ;
+         *  b : c '^' INT ;
+         *  c : ID
+         *    | INT
+         *    ;
+         *
+         *  At each rule invocation, the set of tokens that could follow
+         *  that rule is pushed on a stack.  Here are the various "local"
+         *  follow sets:
+         *
+         *  FOLLOW(b1_in_a) = FIRST(']') = ']'
+         *  FOLLOW(b2_in_a) = FIRST(')') = ')'
+         *  FOLLOW(c_in_b) = FIRST('^') = '^'
+         *
+         *  Upon erroneous input "[]", the call chain is
+         *
+         *  a -> b -> c
+         *
+         *  and, hence, the follow context stack is:
+         *
+         *  depth  local follow set     after call to rule
+         *    0         <EOF>                    a (from main())
+         *    1          ']'                     b
+         *    3          '^'                     c
+         *
+         *  Notice that ')' is not included, because b would have to have
+         *  been called from a different context in rule a for ')' to be
+         *  included.
+         *
+         *  For error recovery, we cannot consider FOLLOW(c)
+         *  (context-sensitive or otherwise).  We need the combined set of
+         *  all context-sensitive FOLLOW sets--the set of all tokens that
+         *  could follow any reference in the call chain.  We need to
+         *  resync to one of those tokens.  Note that FOLLOW(c)='^' and if
+         *  we resync'd to that token, we'd consume until EOF.  We need to
+         *  sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+         *  In this case, for input "[]", LA(1) is in this set so we would
+         *  not consume anything and after printing an error rule c would
+         *  return normally.  It would not find the required '^' though.
+         *  At this point, it gets a mismatched token error and throws an
+         *  exception (since LA(1) is not in the viable following token
+         *  set).  The rule exception handler tries to recover, but finds
+         *  the same recovery set and doesn't consume anything.  Rule b
+         *  exits normally returning to rule a.  Now it finds the ']' (and
+         *  with the successful match exits errorRecovery mode).
+         *
+         *  So, you cna see that the parser walks up call chain looking
+         *  for the token that was a member of the recovery set.
+         *
+         *  Errors are not generated in errorRecovery mode.
+         *
+         *  ANTLR's error recovery mechanism is based upon original ideas:
+         *
+         *  "Algorithms + Data Structures = Programs" by Niklaus Wirth
+         *
+         *  and
+         *
+         *  "A note on error recovery in recursive descent parsers":
+         *  http://portal.acm.org/citation.cfm?id=947902.947905
+         *
+         *  Later, Josef Grosch had some good ideas:
+         *
+         *  "Efficient and Comfortable Error Recovery in Recursive Descent
+         *  Parsers":
+         *  ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+         *
+         *  Like Grosch I implemented local FOLLOW sets that are combined
+         *  at run-time upon error to avoid overhead during parsing.
+         */
+        protected virtual BitSet ComputeErrorRecoverySet()
+        {
+            return CombineFollows( false );
+        }
+
+        /** <summary>
+         *  Compute the context-sensitive FOLLOW set for current rule.
+         *  This is set of token types that can follow a specific rule
+         *  reference given a specific call chain.  You get the set of
+         *  viable tokens that can possibly come next (lookahead depth 1)
+         *  given the current call chain.  Contrast this with the
+         *  definition of plain FOLLOW for rule r:
+         *  </summary>
+         *
+         *   FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
+         *
+         *  where x in T* and alpha, beta in V*; T is set of terminals and
+         *  V is the set of terminals and nonterminals.  In other words,
+         *  FOLLOW(r) is the set of all tokens that can possibly follow
+         *  references to r in *any* sentential form (context).  At
+         *  runtime, however, we know precisely which context applies as
+         *  we have the call chain.  We may compute the exact (rather
+         *  than covering superset) set of following tokens.
+         *
+         *  For example, consider grammar:
+         *
+         *  stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
+         *       | "return" expr '.'
+         *       ;
+         *  expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
+         *  atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
+         *       | '(' expr ')'
+         *       ;
+         *
+         *  The FOLLOW sets are all inclusive whereas context-sensitive
+         *  FOLLOW sets are precisely what could follow a rule reference.
+         *  For input input "i=(3);", here is the derivation:
+         *
+         *  stat => ID '=' expr ';'
+         *       => ID '=' atom ('+' atom)* ';'
+         *       => ID '=' '(' expr ')' ('+' atom)* ';'
+         *       => ID '=' '(' atom ')' ('+' atom)* ';'
+         *       => ID '=' '(' INT ')' ('+' atom)* ';'
+         *       => ID '=' '(' INT ')' ';'
+         *
+         *  At the "3" token, you'd have a call chain of
+         *
+         *    stat -> expr -> atom -> expr -> atom
+         *
+         *  What can follow that specific nested ref to atom?  Exactly ')'
+         *  as you can see by looking at the derivation of this specific
+         *  input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
+         *
+         *  You want the exact viable token set when recovering from a
+         *  token mismatch.  Upon token mismatch, if LA(1) is member of
+         *  the viable next token set, then you know there is most likely
+         *  a missing token in the input stream.  "Insert" one by just not
+         *  throwing an exception.
+         */
+        protected virtual BitSet ComputeContextSensitiveRuleFOLLOW()
+        {
+            return CombineFollows( true );
+        }
+
+        // what is exact? it seems to only add sets from above on stack
+        // if EOR is in set i.  When it sees a set w/o EOR, it stops adding.
+        // Why would we ever want them all?  Maybe no viable alt instead of
+        // mismatched token?
+        protected virtual BitSet CombineFollows(bool exact)
+        {
+            int top = state._fsp;
+            BitSet followSet = new BitSet();
+            for ( int i = top; i >= 0; i-- )
+            {
+                BitSet localFollowSet = (BitSet)state.following[i];
+                /*
+                System.out.println("local follow depth "+i+"="+
+                                   localFollowSet.toString(getTokenNames())+")");
+                 */
+                followSet.OrInPlace( localFollowSet );
+                if ( exact )
+                {
+                    // can we see end of rule?
+                    if ( localFollowSet.Member( TokenTypes.EndOfRule ) )
+                    {
+                        // Only leave EOR in set if at top (start rule); this lets
+                        // us know if have to include follow(start rule); i.e., EOF
+                        if ( i > 0 )
+                        {
+                            followSet.Remove( TokenTypes.EndOfRule );
+                        }
+                    }
+                    else
+                    { // can't see end of rule, quit
+                        break;
+                    }
+                }
+            }
+            return followSet;
+        }
+
+        /** <summary>Attempt to recover from a single missing or extra token.</summary>
+         *
+         *  EXTRA TOKEN
+         *
+         *  LA(1) is not what we are looking for.  If LA(2) has the right token,
+         *  however, then assume LA(1) is some extra spurious token.  Delete it
+         *  and LA(2) as if we were doing a normal match(), which advances the
+         *  input.
+         *
+         *  MISSING TOKEN
+         *
+         *  If current token is consistent with what could come after
+         *  ttype then it is ok to "insert" the missing token, else throw
+         *  exception For example, Input "i=(3;" is clearly missing the
+         *  ')'.  When the parser returns from the nested call to expr, it
+         *  will have call chain:
+         *
+         *    stat -> expr -> atom
+         *
+         *  and it will be trying to match the ')' at this point in the
+         *  derivation:
+         *
+         *       => ID '=' '(' INT ')' ('+' atom)* ';'
+         *                          ^
+         *  match() will see that ';' doesn't match ')' and report a
+         *  mismatched token error.  To recover, it sees that LA(1)==';'
+         *  is in the set of tokens that can follow the ')' token
+         *  reference in rule atom.  It can assume that you forgot the ')'.
+         */
+        protected virtual object RecoverFromMismatchedToken( IIntStream input, int ttype, BitSet follow )
+        {
+            RecognitionException e = null;
+            // if next token is what we are looking for then "delete" this token
+            if ( MismatchIsUnwantedToken( input, ttype ) )
+            {
+                e = new UnwantedTokenException( ttype, input, TokenNames );
+                /*
+                System.err.println("recoverFromMismatchedToken deleting "+
+                                   ((TokenStream)input).LT(1)+
+                                   " since "+((TokenStream)input).LT(2)+" is what we want");
+                 */
+                BeginResync();
+                input.Consume(); // simply delete extra token
+                EndResync();
+                ReportError( e );  // report after consuming so AW sees the token in the exception
+                // we want to return the token we're actually matching
+                object matchedSymbol = GetCurrentInputSymbol( input );
+                input.Consume(); // move past ttype token as if all were ok
+                return matchedSymbol;
+            }
+            // can't recover with single token deletion, try insertion
+            if ( MismatchIsMissingToken( input, follow ) )
+            {
+                object inserted = GetMissingSymbol( input, e, ttype, follow );
+                e = new MissingTokenException( ttype, input, inserted );
+                ReportError( e );  // report after inserting so AW sees the token in the exception
+                return inserted;
+            }
+            // even that didn't work; must throw the exception
+            e = new MismatchedTokenException(ttype, input, TokenNames);
+            throw e;
+        }
+
+        /** Not currently used */
+        public virtual object RecoverFromMismatchedSet( IIntStream input,
+                                               RecognitionException e,
+                                               BitSet follow )
+        {
+            if ( MismatchIsMissingToken( input, follow ) )
+            {
+                // System.out.println("missing token");
+                ReportError( e );
+                // we don't know how to conjure up a token for sets yet
+                return GetMissingSymbol( input, e, TokenTypes.Invalid, follow );
+            }
+            // TODO do single token deletion like above for Token mismatch
+            throw e;
+        }
+
+        /** <summary>
+         *  Match needs to return the current input symbol, which gets put
+         *  into the label for the associated token ref; e.g., x=ID.  Token
+         *  and tree parsers need to return different objects. Rather than test
+         *  for input stream type or change the IntStream interface, I use
+         *  a simple method to ask the recognizer to tell me what the current
+         *  input symbol is.
+         *  </summary>
+         *
+         *  <remarks>This is ignored for lexers.</remarks>
+         */
+        protected virtual object GetCurrentInputSymbol( IIntStream input )
+        {
+            return null;
+        }
+
+        /** <summary>Conjure up a missing token during error recovery.</summary>
+         *
+         *  <remarks>
+         *  The recognizer attempts to recover from single missing
+         *  symbols. But, actions might refer to that missing symbol.
+         *  For example, x=ID {f($x);}. The action clearly assumes
+         *  that there has been an identifier matched previously and that
+         *  $x points at that token. If that token is missing, but
+         *  the next token in the stream is what we want we assume that
+         *  this token is missing and we keep going. Because we
+         *  have to return some token to replace the missing token,
+         *  we have to conjure one up. This method gives the user control
+         *  over the tokens returned for missing tokens. Mostly,
+         *  you will want to create something special for identifier
+         *  tokens. For literals such as '{' and ',', the default
+         *  action in the parser or tree parser works. It simply creates
+         *  a CommonToken of the appropriate type. The text will be the token.
+         *  If you change what tokens must be created by the lexer,
+         *  override this method to create the appropriate tokens.
+         *  </remarks>
+         */
+        protected virtual object GetMissingSymbol( IIntStream input,
+                                          RecognitionException e,
+                                          int expectedTokenType,
+                                          BitSet follow )
+        {
+            return null;
+        }
+
+        public virtual void ConsumeUntil( IIntStream input, int tokenType )
+        {
+            //System.out.println("consumeUntil "+tokenType);
+            int ttype = input.LA( 1 );
+            while ( ttype != TokenTypes.EndOfFile && ttype != tokenType )
+            {
+                input.Consume();
+                ttype = input.LA( 1 );
+            }
+        }
+
+        /** <summary>Consume tokens until one matches the given token set</summary> */
+        public virtual void ConsumeUntil( IIntStream input, BitSet set )
+        {
+            //System.out.println("consumeUntil("+set.toString(getTokenNames())+")");
+            int ttype = input.LA( 1 );
+            while ( ttype != TokenTypes.EndOfFile && !set.Member( ttype ) )
+            {
+                //System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
+                input.Consume();
+                ttype = input.LA( 1 );
+            }
+        }
+
+        /** <summary>Push a rule's follow set using our own hardcoded stack</summary> */
+        protected void PushFollow( BitSet fset )
+        {
+            if ( ( state._fsp + 1 ) >= state.following.Length )
+            {
+                Array.Resize(ref state.following, state.following.Length * 2);
+            }
+            state.following[++state._fsp] = fset;
+        }
+
+        protected void PopFollow()
+        {
+            state._fsp--;
+        }
+
+        /** <summary>
+         *  Return List<String> of the rules in your parser instance
+         *  leading up to a call to this method.  You could override if
+         *  you want more details such as the file/line info of where
+         *  in the parser java code a rule is invoked.
+         *  </summary>
+         *
+         *  <remarks>
+         *  This is very useful for error messages and for context-sensitive
+         *  error recovery.
+         *  </remarks>
+         */
+        public virtual IList<string> GetRuleInvocationStack()
+        {
+            return GetRuleInvocationStack( new StackTrace(true) );
+        }
+
+        /** <summary>
+         *  A more general version of GetRuleInvocationStack where you can
+         *  pass in the StackTrace of, for example, a RecognitionException
+         *  to get it's rule stack trace.
+         *  </summary>
+         */
+        public static IList<string> GetRuleInvocationStack(StackTrace trace)
+        {
+            if (trace == null)
+                throw new ArgumentNullException("trace");
+
+            List<string> rules = new List<string>();
+            StackFrame[] stack = trace.GetFrames() ?? new StackFrame[0];
+
+            for (int i = stack.Length - 1; i >= 0; i--)
+            {
+                StackFrame frame = stack[i];
+                MethodBase method = frame.GetMethod();
+                GrammarRuleAttribute[] attributes = (GrammarRuleAttribute[])method.GetCustomAttributes(typeof(GrammarRuleAttribute), true);
+                if (attributes != null && attributes.Length > 0)
+                    rules.Add(attributes[0].Name);
+            }
+
+            return rules;
+        }
+
+        public virtual int BacktrackingLevel
+        {
+            get
+            {
+                return state.backtracking;
+            }
+            set
+            {
+                state.backtracking = value;
+            }
+        }
+
+        /** <summary>Return whether or not a backtracking attempt failed.</summary> */
+        public virtual bool Failed
+        {
+            get
+            {
+                return state.failed;
+            }
+        }
+
+        /** <summary>
+         *  Used to print out token names like ID during debugging and
+         *  error reporting.  The generated parsers implement a method
+         *  that overrides this to point to their String[] tokenNames.
+         *  </summary>
+         */
+        public virtual string[] TokenNames
+        {
+            get
+            {
+                return null;
+            }
+        }
+
+        /** <summary>
+         *  For debugging and other purposes, might want the grammar name.
+         *  Have ANTLR generate an implementation for this method.
+         *  </summary>
+         */
+        public virtual string GrammarFileName
+        {
+            get
+            {
+                return null;
+            }
+        }
+
+        public abstract string SourceName
+        {
+            get;
+        }
+
+        /** <summary>
+         *  A convenience method for use most often with template rewrites.
+         *  Convert a List<Token> to List<String>
+         *  </summary>
+         */
+        public virtual List<string> ToStrings( ICollection<IToken> tokens )
+        {
+            if ( tokens == null )
+                return null;
+
+            List<string> strings = new List<string>( tokens.Count );
+            foreach ( IToken token in tokens )
+            {
+                strings.Add( token.Text );
+            }
+
+            return strings;
+        }
+
+        /** <summary>
+         *  Given a rule number and a start token index number, return
+         *  MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
+         *  start index.  If this rule has parsed input starting from the
+         *  start index before, then return where the rule stopped parsing.
+         *  It returns the index of the last token matched by the rule.
+         *  </summary>
+         *
+         *  <remarks>
+         *  For now we use a hashtable and just the slow Object-based one.
+         *  Later, we can make a special one for ints and also one that
+         *  tosses out data after we commit past input position i.
+         *  </remarks>
+         */
+        public virtual int GetRuleMemoization( int ruleIndex, int ruleStartIndex )
+        {
+            if ( state.ruleMemo[ruleIndex] == null )
+            {
+                state.ruleMemo[ruleIndex] = new Dictionary<int, int>();
+            }
+
+            int stopIndex;
+            if ( !state.ruleMemo[ruleIndex].TryGetValue( ruleStartIndex, out stopIndex ) )
+                return MemoRuleUnknown;
+
+            return stopIndex;
+        }
+
+        /** <summary>
+         *  Has this rule already parsed input at the current index in the
+         *  input stream?  Return the stop token index or MEMO_RULE_UNKNOWN.
+         *  If we attempted but failed to parse properly before, return
+         *  MEMO_RULE_FAILED.
+         *  </summary>
+         *
+         *  <remarks>
+         *  This method has a side-effect: if we have seen this input for
+         *  this rule and successfully parsed before, then seek ahead to
+         *  1 past the stop token matched for this rule last time.
+         *  </remarks>
+         */
+        public virtual bool AlreadyParsedRule( IIntStream input, int ruleIndex )
+        {
+            int stopIndex = GetRuleMemoization( ruleIndex, input.Index );
+            if ( stopIndex == MemoRuleUnknown )
+            {
+                return false;
+            }
+            if ( stopIndex == MemoRuleFailed )
+            {
+                //System.out.println("rule "+ruleIndex+" will never succeed");
+                state.failed = true;
+            }
+            else
+            {
+                //System.out.println("seen rule "+ruleIndex+" before; skipping ahead to @"+(stopIndex+1)+" failed="+state.failed);
+                input.Seek( stopIndex + 1 ); // jump to one past stop token
+            }
+            return true;
+        }
+
+        /** <summary>
+         *  Record whether or not this rule parsed the input at this position
+         *  successfully.  Use a standard java hashtable for now.
+         *  </summary>
+         */
+        public virtual void Memoize( IIntStream input,
+                            int ruleIndex,
+                            int ruleStartIndex )
+        {
+            int stopTokenIndex = state.failed ? MemoRuleFailed : input.Index - 1;
+            if ( state.ruleMemo == null )
+            {
+                if (TraceDestination != null)
+                    TraceDestination.WriteLine( "!!!!!!!!! memo array is null for " + GrammarFileName );
+            }
+            if ( ruleIndex >= state.ruleMemo.Length )
+            {
+                if (TraceDestination != null)
+                    TraceDestination.WriteLine("!!!!!!!!! memo size is " + state.ruleMemo.Length + ", but rule index is " + ruleIndex);
+            }
+            if ( state.ruleMemo[ruleIndex] != null )
+            {
+                state.ruleMemo[ruleIndex][ruleStartIndex] = stopTokenIndex;
+            }
+        }
+
+        /** <summary>return how many rule/input-index pairs there are in total.</summary>
+         *  TODO: this includes synpreds. :(
+         */
+        public virtual int GetRuleMemoizationCacheSize()
+        {
+            int n = 0;
+            for ( int i = 0; state.ruleMemo != null && i < state.ruleMemo.Length; i++ )
+            {
+                var ruleMap = state.ruleMemo[i];
+                if ( ruleMap != null )
+                {
+                    n += ruleMap.Count; // how many input indexes are recorded?
+                }
+            }
+            return n;
+        }
+
+        public virtual void TraceIn(string ruleName, int ruleIndex, object inputSymbol)
+        {
+            if (TraceDestination == null)
+                return;
+
+            TraceDestination.Write("enter " + ruleName + " " + inputSymbol);
+            if (state.backtracking > 0)
+            {
+                TraceDestination.Write(" backtracking=" + state.backtracking);
+            }
+            TraceDestination.WriteLine();
+        }
+
+        public virtual void TraceOut(string ruleName, int ruleIndex, object inputSymbol)
+        {
+            if (TraceDestination == null)
+                return;
+
+            TraceDestination.Write("exit " + ruleName + " " + inputSymbol);
+            if (state.backtracking > 0)
+            {
+                TraceDestination.Write(" backtracking=" + state.backtracking);
+                if (state.failed)
+                    TraceDestination.Write(" failed");
+                else
+                    TraceDestination.Write(" succeeded");
+            }
+            TraceDestination.WriteLine();
+        }
+
+        #region Debugging support
+        public virtual IDebugEventListener DebugListener
+        {
+            get
+            {
+                return null;
+            }
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugEnterRule(string grammarFileName, string ruleName)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.EnterRule(grammarFileName, ruleName);
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugExitRule(string grammarFileName, string ruleName)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.ExitRule(grammarFileName, ruleName);
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugEnterSubRule(int decisionNumber)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.EnterSubRule(decisionNumber);
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugExitSubRule(int decisionNumber)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.ExitSubRule(decisionNumber);
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugEnterAlt(int alt)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.EnterAlt(alt);
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugEnterDecision(int decisionNumber, bool couldBacktrack)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.EnterDecision(decisionNumber, couldBacktrack);
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugExitDecision(int decisionNumber)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.ExitDecision(decisionNumber);
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugLocation(int line, int charPositionInLine)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.Location(line, charPositionInLine);
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugSemanticPredicate(bool result, string predicate)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.SemanticPredicate(result, predicate);
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugBeginBacktrack(int level)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.BeginBacktrack(level);
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugEndBacktrack(int level, bool successful)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.EndBacktrack(level, successful);
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugRecognitionException(RecognitionException ex)
+        {
+            IDebugEventListener dbg = DebugListener;
+            if (dbg != null)
+                dbg.RecognitionException(ex);
+        }
+        #endregion
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/BitSet.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/BitSet.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/BitSet.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/BitSet.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/BufferedTokenStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/BufferedTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/BufferedTokenStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/BufferedTokenStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/CharStreamConstants.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/CharStreamConstants.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/CharStreamConstants.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/CharStreamConstants.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/CharStreamState.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/CharStreamState.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/CharStreamState.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/CharStreamState.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ClassicToken.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/ClassicToken.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ClassicToken.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/ClassicToken.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/CommonToken.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/CommonToken.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/CommonToken.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/CommonToken.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/CommonTokenStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/CommonTokenStream.cs
new file mode 100644
index 0000000..28813cb
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/CommonTokenStream.cs
@@ -0,0 +1,181 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime
+{
+    using System.Collections.Generic;
+
+    using InvalidOperationException = System.InvalidOperationException;
+    using StringBuilder = System.Text.StringBuilder;
+
+    /** <summary>
+     *  The most common stream of tokens is one where every token is buffered up
+     *  and tokens are prefiltered for a certain channel (the parser will only
+     *  see these tokens and cannot change the filter channel number during the
+     *  parse).
+     *  </summary>
+     *
+     *  <remarks>TODO: how to access the full token stream?  How to track all tokens matched per rule?</remarks>
+     */
+    [System.Serializable]
+    public class CommonTokenStream : BufferedTokenStream
+    {
+        /** Skip tokens on any channel but this one; this is how we skip whitespace... */
+        private int _channel;
+
+        public CommonTokenStream()
+        {
+        }
+
+        public CommonTokenStream(ITokenSource tokenSource)
+            : this(tokenSource, TokenChannels.Default)
+        {
+        }
+
+        public CommonTokenStream(ITokenSource tokenSource, int channel)
+            : base(tokenSource)
+        {
+            this._channel = channel;
+        }
+
+        public int Channel
+        {
+            get
+            {
+                return _channel;
+            }
+        }
+
+        /** Reset this token stream by setting its token source. */
+        public override ITokenSource TokenSource
+        {
+            get
+            {
+                return base.TokenSource;
+            }
+            set
+            {
+                base.TokenSource = value;
+                _channel = TokenChannels.Default;
+            }
+        }
+
+        /** Always leave p on an on-channel token. */
+        public override void Consume()
+        {
+            if (_p == -1)
+                Setup();
+            _p++;
+            _p = SkipOffTokenChannels(_p);
+        }
+
+        protected override IToken LB(int k)
+        {
+            if (k == 0 || (_p - k) < 0)
+                return null;
+
+            int i = _p;
+            int n = 1;
+            // find k good tokens looking backwards
+            while (n <= k)
+            {
+                // skip off-channel tokens
+                i = SkipOffTokenChannelsReverse(i - 1);
+                n++;
+            }
+            if (i < 0)
+                return null;
+            return _tokens[i];
+        }
+
+        public override IToken LT(int k)
+        {
+            if (_p == -1)
+                Setup();
+            if (k == 0)
+                return null;
+            if (k < 0)
+                return LB(-k);
+            int i = _p;
+            int n = 1; // we know tokens[p] is a good one
+            // find k good tokens
+            while (n < k)
+            {
+                // skip off-channel tokens
+                i = SkipOffTokenChannels(i + 1);
+                n++;
+            }
+
+            if (i > Range)
+                Range = i;
+
+            return _tokens[i];
+        }
+
+        /** Given a starting index, return the index of the first on-channel
+         *  token.
+         */
+        protected virtual int SkipOffTokenChannels(int i)
+        {
+            Sync(i);
+            while (_tokens[i].Channel != _channel)
+            {
+                // also stops at EOF (it's on channel)
+                i++;
+                Sync(i);
+            }
+            return i;
+        }
+
+        protected virtual int SkipOffTokenChannelsReverse(int i)
+        {
+            while (i >= 0 && ((IToken)_tokens[i]).Channel != _channel)
+            {
+                i--;
+            }
+
+            return i;
+        }
+
+        public override void Reset()
+        {
+            base.Reset();
+            _p = SkipOffTokenChannels(0);
+        }
+
+        protected override void Setup()
+        {
+            _p = 0;
+            _p = SkipOffTokenChannels(_p);
+        }
+    }
+}
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/DFA.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/DFA.cs
new file mode 100644
index 0000000..76c4083
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/DFA.cs
@@ -0,0 +1,318 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime
+{
+    using ArgumentNullException = System.ArgumentNullException;
+    using ConditionalAttribute = System.Diagnostics.ConditionalAttribute;
+    using Console = System.Console;
+    using IDebugEventListener = Antlr.Runtime.Debug.IDebugEventListener;
+
+    public delegate int SpecialStateTransitionHandler( DFA dfa, int s, IIntStream input );
+
+    /** <summary>A DFA implemented as a set of transition tables.</summary>
+     *
+     *  <remarks>
+     *  Any state that has a semantic predicate edge is special; those states
+     *  are generated with if-then-else structures in a specialStateTransition()
+     *  which is generated by cyclicDFA template.
+     *
+     *  There are at most 32767 states (16-bit signed short).
+     *  Could get away with byte sometimes but would have to generate different
+     *  types and the simulation code too.  For a point of reference, the Java
+     *  lexer's Tokens rule DFA has 326 states roughly.
+     *  </remarks>
+     */
+    public class DFA
+    {
+        protected short[] eot;
+        protected short[] eof;
+        protected char[] min;
+        protected char[] max;
+        protected short[] accept;
+        protected short[] special;
+        protected short[][] transition;
+
+        protected int decisionNumber;
+
+        /** <summary>Which recognizer encloses this DFA?  Needed to check backtracking</summary> */
+        protected BaseRecognizer recognizer;
+
+        public bool debug = false;
+
+        public DFA()
+            : this(SpecialStateTransitionDefault)
+        {
+        }
+
+        public DFA( SpecialStateTransitionHandler specialStateTransition )
+        {
+            this.SpecialStateTransition = specialStateTransition ?? SpecialStateTransitionDefault;
+        }
+
+        public virtual string Description
+        {
+            get
+            {
+                return "n/a";
+            }
+        }
+
+        /** <summary>
+         *  From the input stream, predict what alternative will succeed
+         *  using this DFA (representing the covering regular approximation
+         *  to the underlying CFL).  Return an alternative number 1..n.  Throw
+         *  an exception upon error.
+         *  </summary>
+         */
+        public virtual int Predict( IIntStream input )
+        {
+            if (input == null)
+                throw new ArgumentNullException("input");
+
+            DfaDebugMessage("Enter DFA.Predict for decision {0}", decisionNumber);
+
+            int mark = input.Mark(); // remember where decision started in input
+            int s = 0; // we always start at s0
+            try
+            {
+                while (true)
+                {
+                    DfaDebugMessage("DFA {0} state {1} LA(1)={2}({3}), index={4}", decisionNumber, s, (char)input.LA(1), input.LA(1), input.Index);
+
+                    int specialState = special[s];
+                    if ( specialState >= 0 )
+                    {
+                        DfaDebugMessage("DFA {0} state {1} is special state {2}", decisionNumber, s, specialState);
+
+                        s = SpecialStateTransition( this, specialState, input );
+
+                        DfaDebugMessage("DFA {0} returns from special state {1} to {2}", decisionNumber, specialState, s);
+
+                        if ( s == -1 )
+                        {
+                            NoViableAlt( s, input );
+                            return 0;
+                        }
+
+                        input.Consume();
+                        continue;
+                    }
+
+                    if ( accept[s] >= 1 )
+                    {
+                        DfaDebugMessage("accept; predict {0} from state {1}", accept[s], s);
+                        return accept[s];
+                    }
+
+                    // look for a normal char transition
+                    char c = (char)input.LA( 1 ); // -1 == \uFFFF, all tokens fit in 65000 space
+                    if ( c >= min[s] && c <= max[s] )
+                    {
+                        int snext = transition[s][c - min[s]]; // move to next state
+                        if ( snext < 0 )
+                        {
+                            // was in range but not a normal transition
+                            // must check EOT, which is like the else clause.
+                            // eot[s]>=0 indicates that an EOT edge goes to another
+                            // state.
+                            if ( eot[s] >= 0 )
+                            {
+                                // EOT Transition to accept state?
+                                DfaDebugMessage("EOT transition");
+                                s = eot[s];
+                                input.Consume();
+                                // TODO: I had this as return accept[eot[s]]
+                                // which assumed here that the EOT edge always
+                                // went to an accept...faster to do this, but
+                                // what about predicated edges coming from EOT
+                                // target?
+                                continue;
+                            }
+
+                            NoViableAlt( s, input );
+                            return 0;
+                        }
+
+                        s = snext;
+                        input.Consume();
+                        continue;
+                    }
+
+                    if ( eot[s] >= 0 )
+                    {
+                        // EOT Transition?
+                        DfaDebugMessage("EOT transition");
+                        s = eot[s];
+                        input.Consume();
+                        continue;
+                    }
+
+                    if ( c == unchecked( (char)TokenTypes.EndOfFile ) && eof[s] >= 0 )
+                    {
+                        // EOF Transition to accept state?
+                        DfaDebugMessage("accept via EOF; predict {0} from {1}", accept[eof[s]], eof[s]);
+                        return accept[eof[s]];
+                    }
+
+                    // not in range and not EOF/EOT, must be invalid symbol
+                    DfaDebugInvalidSymbol(s);
+
+                    NoViableAlt( s, input );
+                    return 0;
+                }
+            }
+            finally
+            {
+                input.Rewind( mark );
+            }
+        }
+
+        [Conditional("DEBUG_DFA")]
+        private void DfaDebugMessage(string format, params object[] args)
+        {
+            Console.Error.WriteLine(format, args);
+        }
+
+        [Conditional("DEBUG_DFA")]
+        private void DfaDebugInvalidSymbol(int s)
+        {
+            Console.Error.WriteLine("min[{0}]={1}", s, min[s]);
+            Console.Error.WriteLine("max[{0}]={1}", s, max[s]);
+            Console.Error.WriteLine("eot[{0}]={1}", s, eot[s]);
+            Console.Error.WriteLine("eof[{0}]={1}", s, eof[s]);
+
+            for (int p = 0; p < transition[s].Length; p++)
+                Console.Error.Write(transition[s][p] + " ");
+
+            Console.Error.WriteLine();
+        }
+
+        protected virtual void NoViableAlt( int s, IIntStream input )
+        {
+            if ( recognizer.state.backtracking > 0 )
+            {
+                recognizer.state.failed = true;
+                return;
+            }
+            NoViableAltException nvae =
+                new NoViableAltException( Description,
+                                         decisionNumber,
+                                         s,
+                                         input );
+            Error( nvae );
+            throw nvae;
+        }
+
+        /** <summary>A hook for debugging interface</summary> */
+        public virtual void Error( NoViableAltException nvae )
+        {
+        }
+
+        public SpecialStateTransitionHandler SpecialStateTransition
+        {
+            get;
+            private set;
+        }
+        //public virtual int specialStateTransition( int s, IntStream input )
+        //{
+        //    return -1;
+        //}
+
+        static int SpecialStateTransitionDefault( DFA dfa, int s, IIntStream input )
+        {
+            return -1;
+        }
+
+        /** <summary>
+         *  Given a String that has a run-length-encoding of some unsigned shorts
+         *  like "\1\2\3\9", convert to short[] {2,9,9,9}.  We do this to avoid
+         *  static short[] which generates so much init code that the class won't
+         *  compile. :(
+         *  </summary>
+         */
+        public static short[] UnpackEncodedString( string encodedString )
+        {
+            // walk first to find how big it is.
+            int size = 0;
+            for ( int i = 0; i < encodedString.Length; i += 2 )
+            {
+                size += encodedString[i];
+            }
+            short[] data = new short[size];
+            int di = 0;
+            for ( int i = 0; i < encodedString.Length; i += 2 )
+            {
+                char n = encodedString[i];
+                char v = encodedString[i + 1];
+                // add v n times to data
+                for ( int j = 1; j <= n; j++ )
+                {
+                    data[di++] = (short)v;
+                }
+            }
+            return data;
+        }
+
+        /** <summary>Hideous duplication of code, but I need different typed arrays out :(</summary> */
+        public static char[] UnpackEncodedStringToUnsignedChars( string encodedString )
+        {
+            // walk first to find how big it is.
+            int size = 0;
+            for ( int i = 0; i < encodedString.Length; i += 2 )
+            {
+                size += encodedString[i];
+            }
+            char[] data = new char[size];
+            int di = 0;
+            for ( int i = 0; i < encodedString.Length; i += 2 )
+            {
+                char n = encodedString[i];
+                char v = encodedString[i + 1];
+                // add v n times to data
+                for ( int j = 1; j <= n; j++ )
+                {
+                    data[di++] = v;
+                }
+            }
+            return data;
+        }
+
+        [Conditional("ANTLR_DEBUG")]
+        protected virtual void DebugRecognitionException(RecognitionException ex)
+        {
+            IDebugEventListener dbg = recognizer.DebugListener;
+            if (dbg != null)
+                dbg.RecognitionException(ex);
+        }
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Debug/IDebugEventListener.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Debug/IDebugEventListener.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Debug/IDebugEventListener.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Debug/IDebugEventListener.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/EarlyExitException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/EarlyExitException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/EarlyExitException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/EarlyExitException.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/FailedPredicateException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/FailedPredicateException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/FailedPredicateException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/FailedPredicateException.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/GrammarRuleAttribute.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/GrammarRuleAttribute.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/GrammarRuleAttribute.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/GrammarRuleAttribute.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IAstRuleReturnScope.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/IAstRuleReturnScope.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IAstRuleReturnScope.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/IAstRuleReturnScope.cs
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IAstRuleReturnScope\1401.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime/IAstRuleReturnScope\1401.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IAstRuleReturnScope\1401.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime/IAstRuleReturnScope\1401.cs"
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ICharStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/ICharStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ICharStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/ICharStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IIntStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/IIntStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IIntStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/IIntStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IRuleReturnScope.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/IRuleReturnScope.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IRuleReturnScope.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/IRuleReturnScope.cs
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IRuleReturnScope\1401.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime/IRuleReturnScope\1401.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IRuleReturnScope\1401.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime/IRuleReturnScope\1401.cs"
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ITemplateRuleReturnScope.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/ITemplateRuleReturnScope.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ITemplateRuleReturnScope.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/ITemplateRuleReturnScope.cs
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ITemplateRuleReturnScope\1401.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime/ITemplateRuleReturnScope\1401.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ITemplateRuleReturnScope\1401.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime/ITemplateRuleReturnScope\1401.cs"
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IToken.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/IToken.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/IToken.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/IToken.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ITokenSource.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/ITokenSource.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ITokenSource.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/ITokenSource.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ITokenStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/ITokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ITokenStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/ITokenStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ITokenStreamInformation.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/ITokenStreamInformation.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ITokenStreamInformation.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/ITokenStreamInformation.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/LegacyCommonTokenStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/LegacyCommonTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/LegacyCommonTokenStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/LegacyCommonTokenStream.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Lexer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Lexer.cs
new file mode 100644
index 0000000..daf530d
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Lexer.cs
@@ -0,0 +1,443 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime
+{
+    using ConditionalAttribute = System.Diagnostics.ConditionalAttribute;
+
+    /** <summary>
+     *  A lexer is recognizer that draws input symbols from a character stream.
+     *  lexer grammars result in a subclass of this object. A Lexer object
+     *  uses simplified match() and error recovery mechanisms in the interest
+     *  of speed.
+     *  </summary>
+     */
+    public abstract class Lexer : BaseRecognizer, ITokenSource
+    {
+        /** <summary>Where is the lexer drawing characters from?</summary> */
+        protected ICharStream input;
+
+        public Lexer()
+        {
+        }
+
+        public Lexer( ICharStream input )
+        {
+            this.input = input;
+        }
+
+        public Lexer( ICharStream input, RecognizerSharedState state )
+            : base(state)
+        {
+            this.input = input;
+        }
+
+        #region Properties
+        public string Text
+        {
+            /** <summary>Return the text matched so far for the current token or any text override.</summary> */
+            get
+            {
+                if ( state.text != null )
+                {
+                    return state.text;
+                }
+                return input.Substring( state.tokenStartCharIndex, CharIndex - state.tokenStartCharIndex );
+            }
+            /** <summary>Set the complete text of this token; it wipes any previous changes to the text.</summary> */
+            set
+            {
+                state.text = value;
+            }
+        }
+        public int Line
+        {
+            get
+            {
+                return input.Line;
+            }
+            set
+            {
+                input.Line = value;
+            }
+        }
+        public int CharPositionInLine
+        {
+            get
+            {
+                return input.CharPositionInLine;
+            }
+            set
+            {
+                input.CharPositionInLine = value;
+            }
+        }
+        #endregion
+
+        public override void Reset()
+        {
+            base.Reset(); // reset all recognizer state variables
+            // wack Lexer state variables
+            if ( input != null )
+            {
+                input.Seek( 0 ); // rewind the input
+            }
+            if ( state == null )
+            {
+                return; // no shared state work to do
+            }
+            state.token = null;
+            state.type = TokenTypes.Invalid;
+            state.channel = TokenChannels.Default;
+            state.tokenStartCharIndex = -1;
+            state.tokenStartCharPositionInLine = -1;
+            state.tokenStartLine = -1;
+            state.text = null;
+        }
+
+        /** <summary>Return a token from this source; i.e., match a token on the char stream.</summary> */
+        public virtual IToken NextToken()
+        {
+            for ( ; ; )
+            {
+                state.token = null;
+                state.channel = TokenChannels.Default;
+                state.tokenStartCharIndex = input.Index;
+                state.tokenStartCharPositionInLine = input.CharPositionInLine;
+                state.tokenStartLine = input.Line;
+                state.text = null;
+                if ( input.LA( 1 ) == CharStreamConstants.EndOfFile )
+                {
+                    return GetEndOfFileToken();
+                }
+                try
+                {
+                    ParseNextToken();
+                    if ( state.token == null )
+                    {
+                        Emit();
+                    }
+                    else if ( state.token == Tokens.Skip )
+                    {
+                        continue;
+                    }
+                    return state.token;
+                }
+                catch (MismatchedRangeException mre)
+                {
+                    ReportError(mre);
+                    // MatchRange() routine has already called recover()
+                }
+                catch (MismatchedTokenException mte)
+                {
+                    ReportError(mte);
+                    // Match() routine has already called recover()
+                }
+                catch ( RecognitionException re )
+                {
+                    ReportError( re );
+                    Recover( re ); // throw out current char and try again
+                }
+            }
+        }
+
+        /** Returns the EOF token (default), if you need
+         *  to return a custom token instead override this method.
+         */
+        public virtual IToken GetEndOfFileToken()
+        {
+            IToken eof = new CommonToken((ICharStream)input, CharStreamConstants.EndOfFile, TokenChannels.Default, input.Index, input.Index);
+            eof.Line = Line;
+            eof.CharPositionInLine = CharPositionInLine;
+            return eof;
+        }
+
+        /** <summary>
+         *  Instruct the lexer to skip creating a token for current lexer rule
+         *  and look for another token.  nextToken() knows to keep looking when
+         *  a lexer rule finishes with token set to SKIP_TOKEN.  Recall that
+         *  if token==null at end of any token rule, it creates one for you
+         *  and emits it.
+         *  </summary>
+         */
+        public virtual void Skip()
+        {
+            state.token = Tokens.Skip;
+        }
+
+        /** <summary>This is the lexer entry point that sets instance var 'token'</summary> */
+        public abstract void mTokens();
+
+        public virtual ICharStream CharStream
+        {
+            get
+            {
+                return input;
+            }
+            /** <summary>Set the char stream and reset the lexer</summary> */
+            set
+            {
+                input = null;
+                Reset();
+                input = value;
+            }
+        }
+
+        public override string SourceName
+        {
+            get
+            {
+                return input.SourceName;
+            }
+        }
+
+        /** <summary>
+         *  Currently does not support multiple emits per nextToken invocation
+         *  for efficiency reasons.  Subclass and override this method and
+         *  nextToken (to push tokens into a list and pull from that list rather
+         *  than a single variable as this implementation does).
+         *  </summary>
+         */
+        public virtual void Emit( IToken token )
+        {
+            state.token = token;
+        }
+
+        /** <summary>
+         *  The standard method called to automatically emit a token at the
+         *  outermost lexical rule.  The token object should point into the
+         *  char buffer start..stop.  If there is a text override in 'text',
+         *  use that to set the token's text.  Override this method to emit
+         *  custom Token objects.
+         *  </summary>
+         *
+         *  <remarks>
+         *  If you are building trees, then you should also override
+         *  Parser or TreeParser.getMissingSymbol().
+         *  </remarks>
+         */
+        public virtual IToken Emit()
+        {
+            IToken t = new CommonToken( input, state.type, state.channel, state.tokenStartCharIndex, CharIndex - 1 );
+            t.Line = state.tokenStartLine;
+            t.Text = state.text;
+            t.CharPositionInLine = state.tokenStartCharPositionInLine;
+            Emit( t );
+            return t;
+        }
+
+        public virtual void Match( string s )
+        {
+            int i = 0;
+            while ( i < s.Length )
+            {
+                if ( input.LA( 1 ) != s[i] )
+                {
+                    if ( state.backtracking > 0 )
+                    {
+                        state.failed = true;
+                        return;
+                    }
+                    MismatchedTokenException mte = new MismatchedTokenException(s[i], input, TokenNames);
+                    Recover( mte );
+                    throw mte;
+                }
+                i++;
+                input.Consume();
+                state.failed = false;
+            }
+        }
+
+        public virtual void MatchAny()
+        {
+            input.Consume();
+        }
+
+        public virtual void Match( int c )
+        {
+            if ( input.LA( 1 ) != c )
+            {
+                if ( state.backtracking > 0 )
+                {
+                    state.failed = true;
+                    return;
+                }
+                MismatchedTokenException mte = new MismatchedTokenException(c, input, TokenNames);
+                Recover( mte );  // don't really recover; just consume in lexer
+                throw mte;
+            }
+            input.Consume();
+            state.failed = false;
+        }
+
+        public virtual void MatchRange( int a, int b )
+        {
+            if ( input.LA( 1 ) < a || input.LA( 1 ) > b )
+            {
+                if ( state.backtracking > 0 )
+                {
+                    state.failed = true;
+                    return;
+                }
+                MismatchedRangeException mre = new MismatchedRangeException(a, b, input);
+                Recover( mre );
+                throw mre;
+            }
+            input.Consume();
+            state.failed = false;
+        }
+
+        /** <summary>What is the index of the current character of lookahead?</summary> */
+        public virtual int CharIndex
+        {
+            get
+            {
+                return input.Index;
+            }
+        }
+
+        public override void ReportError( RecognitionException e )
+        {
+            /** TODO: not thought about recovery in lexer yet.
+             *
+            // if we've already reported an error and have not matched a token
+            // yet successfully, don't report any errors.
+            if ( errorRecovery ) {
+                //System.err.print("[SPURIOUS] ");
+                return;
+            }
+            errorRecovery = true;
+             */
+
+            DisplayRecognitionError( this.TokenNames, e );
+        }
+
+        public override string GetErrorMessage( RecognitionException e, string[] tokenNames )
+        {
+            string msg = null;
+            if ( e is MismatchedTokenException )
+            {
+                MismatchedTokenException mte = (MismatchedTokenException)e;
+                msg = "mismatched character " + GetCharErrorDisplay( e.Character ) + " expecting " + GetCharErrorDisplay( mte.Expecting );
+            }
+            else if ( e is NoViableAltException )
+            {
+                NoViableAltException nvae = (NoViableAltException)e;
+                // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
+                // and "(decision="+nvae.decisionNumber+") and
+                // "state "+nvae.stateNumber
+                msg = "no viable alternative at character " + GetCharErrorDisplay( e.Character );
+            }
+            else if ( e is EarlyExitException )
+            {
+                EarlyExitException eee = (EarlyExitException)e;
+                // for development, can add "(decision="+eee.decisionNumber+")"
+                msg = "required (...)+ loop did not match anything at character " + GetCharErrorDisplay( e.Character );
+            }
+            else if ( e is MismatchedNotSetException )
+            {
+                MismatchedNotSetException mse = (MismatchedNotSetException)e;
+                msg = "mismatched character " + GetCharErrorDisplay( e.Character ) + " expecting set " + mse.Expecting;
+            }
+            else if ( e is MismatchedSetException )
+            {
+                MismatchedSetException mse = (MismatchedSetException)e;
+                msg = "mismatched character " + GetCharErrorDisplay( e.Character ) + " expecting set " + mse.Expecting;
+            }
+            else if ( e is MismatchedRangeException )
+            {
+                MismatchedRangeException mre = (MismatchedRangeException)e;
+                msg = "mismatched character " + GetCharErrorDisplay( e.Character ) + " expecting set " +
+                      GetCharErrorDisplay( mre.A ) + ".." + GetCharErrorDisplay( mre.B );
+            }
+            else
+            {
+                msg = base.GetErrorMessage( e, tokenNames );
+            }
+            return msg;
+        }
+
+        public virtual string GetCharErrorDisplay( int c )
+        {
+            string s = ( (char)c ).ToString();
+            switch ( c )
+            {
+            case TokenTypes.EndOfFile:
+                s = "<EOF>";
+                break;
+            case '\n':
+                s = "\\n";
+                break;
+            case '\t':
+                s = "\\t";
+                break;
+            case '\r':
+                s = "\\r";
+                break;
+            }
+            return "'" + s + "'";
+        }
+
+        /** <summary>
+         *  Lexers can normally match any char in it's vocabulary after matching
+         *  a token, so do the easy thing and just kill a character and hope
+         *  it all works out.  You can instead use the rule invocation stack
+         *  to do sophisticated error recovery if you are in a fragment rule.
+         *  </summary>
+         */
+        public virtual void Recover( RecognitionException re )
+        {
+            //System.out.println("consuming char "+(char)input.LA(1)+" during recovery");
+            //re.printStackTrace();
+            input.Consume();
+        }
+
+        [Conditional("ANTLR_TRACE")]
+        public virtual void TraceIn( string ruleName, int ruleIndex )
+        {
+            string inputSymbol = ( (char)input.LT( 1 ) ) + " line=" + Line + ":" + CharPositionInLine;
+            base.TraceIn( ruleName, ruleIndex, inputSymbol );
+        }
+
+        [Conditional("ANTLR_TRACE")]
+        public virtual void TraceOut( string ruleName, int ruleIndex )
+        {
+            string inputSymbol = ( (char)input.LT( 1 ) ) + " line=" + Line + ":" + CharPositionInLine;
+            base.TraceOut( ruleName, ruleIndex, inputSymbol );
+        }
+
+        protected virtual void ParseNextToken()
+        {
+            mTokens();
+        }
+    }
+}
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/FastQueue.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/FastQueue.cs
new file mode 100644
index 0000000..af975d7
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/FastQueue.cs
@@ -0,0 +1,143 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime.Misc
+{
+    using System.Collections.Generic;
+    using ArgumentException = System.ArgumentException;
+    using InvalidOperationException = System.InvalidOperationException;
+
+    /** A queue that can dequeue and get(i) in O(1) and grow arbitrarily large.
+     *  A linked list is fast at dequeue but slow at get(i).  An array is
+     *  the reverse.  This is O(1) for both operations.
+     *
+     *  List grows until you dequeue last element at end of buffer. Then
+     *  it resets to start filling at 0 again.  If adds/removes are balanced, the
+     *  buffer will not grow too large.
+     *
+     *  No iterator stuff as that's not how we'll use it.
+     */
+    public class FastQueue<T>
+    {
+        /** <summary>dynamically-sized buffer of elements</summary> */
+        internal List<T> _data = new List<T>();
+        /** <summary>index of next element to fill</summary> */
+        internal int _p = 0;
+
+        public virtual int Count
+        {
+            get
+            {
+                return _data.Count - _p;
+            }
+        }
+
+        /// <summary>
+        /// How deep have we gone?
+        /// </summary>
+        public virtual int Range
+        {
+            get;
+            protected set;
+        }
+
+        /** <summary>
+         * Return element {@code i} elements ahead of current element. {@code i==0}
+         * gets current element. This is not an absolute index into {@link #data}
+         * since {@code p} defines the start of the real list.
+         *  </summary>
+         */
+        public virtual T this[int i]
+        {
+            get
+            {
+                int absIndex = _p + i;
+                if (absIndex >= _data.Count)
+                    throw new ArgumentException(string.Format("queue index {0} > last index {1}", absIndex, _data.Count - 1));
+                if (absIndex < 0)
+                    throw new ArgumentException(string.Format("queue index {0} < 0", absIndex));
+
+                if (absIndex > Range)
+                    Range = absIndex;
+
+                return _data[absIndex];
+            }
+        }
+
+        /** <summary>Get and remove first element in queue</summary> */
+        public virtual T Dequeue()
+        {
+            if (Count == 0)
+                throw new InvalidOperationException();
+
+            T o = this[0];
+            _p++;
+            // have we hit end of buffer?
+            if ( _p == _data.Count )
+            {
+                // if so, it's an opportunity to start filling at index 0 again
+                Clear(); // size goes to 0, but retains memory
+            }
+            return o;
+        }
+
+        public virtual void Enqueue( T o )
+        {
+            _data.Add( o );
+        }
+
+        public virtual T Peek()
+        {
+            return this[0];
+        }
+
+        public virtual void Clear()
+        {
+            _p = 0;
+            _data.Clear();
+        }
+
+        /** <summary>Return string of current buffer contents; non-destructive</summary> */
+        public override string ToString()
+        {
+            System.Text.StringBuilder buf = new System.Text.StringBuilder();
+            int n = Count;
+            for ( int i = 0; i < n; i++ )
+            {
+                buf.Append( this[i] );
+                if ( ( i + 1 ) < n )
+                    buf.Append( " " );
+            }
+            return buf.ToString();
+        }
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/FunctionDelegates.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/FunctionDelegates.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/FunctionDelegates.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Misc/FunctionDelegates.cs
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/ListStack\1401.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/ListStack\1401.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/ListStack\1401.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime/Misc/ListStack\1401.cs"
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/LookaheadStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/LookaheadStream.cs
new file mode 100644
index 0000000..14454a8
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Misc/LookaheadStream.cs
@@ -0,0 +1,271 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime.Misc
+{
+    using ArgumentException = System.ArgumentException;
+    using Debug = System.Diagnostics.Debug;
+    using InvalidOperationException = System.InvalidOperationException;
+    using NotSupportedException = System.NotSupportedException;
+    using ArgumentOutOfRangeException = System.ArgumentOutOfRangeException;
+
+    /** <summary>
+     * A lookahead queue that knows how to mark/release locations in the buffer for
+     * backtracking purposes. Any markers force the {@link FastQueue} superclass to
+     * keep all elements until no more markers; then can reset to avoid growing a
+     * huge buffer.
+     *  </summary>
+     */
+    public abstract class LookaheadStream<T>
+        : FastQueue<T>
+        where T : class
+    {
+        /** Absolute token index. It's the index of the symbol about to be
+         *  read via {@code LT(1)}. Goes from 0 to numtokens.
+         */
+        private int _currentElementIndex = 0;
+
+        /**
+         * This is the {@code LT(-1)} element for the first element in {@link #data}.
+         */
+        private T _previousElement;
+
+        /** Track object returned by nextElement upon end of stream;
+         *  Return it later when they ask for LT passed end of input.
+         */
+        T _eof = null;
+
+        /** <summary>Track the last mark() call result value for use in rewind().</summary> */
+        int _lastMarker;
+
+        /** <summary>tracks how deep mark() calls are nested</summary> */
+        int _markDepth;
+
+        public T EndOfFile
+        {
+            get
+            {
+                return _eof;
+            }
+            protected set
+            {
+                _eof = value;
+            }
+        }
+
+        public T PreviousElement
+        {
+            get
+            {
+                return _previousElement;
+            }
+        }
+
+        public virtual void Reset()
+        {
+            Clear();
+            _currentElementIndex = 0;
+            _p = 0;
+            _previousElement = null;
+        }
+
+        /** <summary>
+         *  Implement nextElement to supply a stream of elements to this
+         *  lookahead buffer.  Return EOF upon end of the stream we're pulling from.
+         *  </summary>
+         */
+        public abstract T NextElement();
+
+        public abstract bool IsEndOfFile(T o);
+
+        /** <summary>
+         * Get and remove first element in queue; override
+         * {@link FastQueue#remove()}; it's the same, just checks for backtracking.
+         * </summary> */
+        public override T Dequeue()
+        {
+            T o = this[0];
+            _p++;
+            // have we hit end of buffer and not backtracking?
+            if ( _p == _data.Count && _markDepth == 0 )
+            {
+                _previousElement = o;
+                // if so, it's an opportunity to start filling at index 0 again
+                Clear(); // size goes to 0, but retains memory
+            }
+            return o;
+        }
+
+        /** <summary>Make sure we have at least one element to remove, even if EOF</summary> */
+        public virtual void Consume()
+        {
+            SyncAhead(1);
+            Dequeue();
+            _currentElementIndex++;
+        }
+
+        /** <summary>
+         *  Make sure we have 'need' elements from current position p. Last valid
+         *  p index is data.size()-1.  p+need-1 is the data index 'need' elements
+         *  ahead.  If we need 1 element, (p+1-1)==p must be &lt; data.size().
+         *  </summary>
+         */
+        protected virtual void SyncAhead( int need )
+        {
+            int n = ( _p + need - 1 ) - _data.Count + 1; // how many more elements we need?
+            if ( n > 0 )
+                Fill( n );                 // out of elements?
+        }
+
+        /** <summary>add n elements to buffer</summary> */
+        public virtual void Fill( int n )
+        {
+            for ( int i = 0; i < n; i++ )
+            {
+                T o = NextElement();
+                if ( IsEndOfFile(o) )
+                    _eof = o;
+
+                _data.Add( o );
+            }
+        }
+
+        /** <summary>Size of entire stream is unknown; we only know buffer size from FastQueue</summary> */
+        public override int Count
+        {
+            get
+            {
+                throw new System.NotSupportedException( "streams are of unknown size" );
+            }
+        }
+
+        public virtual T LT( int k )
+        {
+            if ( k == 0 )
+            {
+                return null;
+            }
+            if ( k < 0 )
+            {
+                return LB(-k);
+            }
+
+            SyncAhead( k );
+            if ((_p + k - 1) > _data.Count)
+                return _eof;
+
+            return this[k - 1];
+        }
+
+        public virtual int Index
+        {
+            get
+            {
+                return _currentElementIndex;
+            }
+        }
+
+        public virtual int Mark()
+        {
+            _markDepth++;
+            _lastMarker = _p; // track where we are in buffer, not absolute token index
+            return _lastMarker;
+        }
+
+        public virtual void Release( int marker )
+        {
+            if (_markDepth == 0)
+                throw new InvalidOperationException();
+
+            _markDepth--;
+        }
+
+        public virtual void Rewind( int marker )
+        {
+            _markDepth--;
+            int delta = _p - marker;
+            _currentElementIndex -= delta;
+            _p = marker;
+        }
+
+        public virtual void Rewind()
+        {
+            // rewind but do not release marker
+            int delta = _p - _lastMarker;
+            _currentElementIndex -= delta;
+            _p = _lastMarker;
+        }
+
+        /** <summary>
+         * Seek to a 0-indexed absolute token index. Normally used to seek backwards
+         * in the buffer. Does not force loading of nodes.
+         *  </summary>
+         *  <remarks>
+         * To preserve backward compatibility, this method allows seeking past the
+         * end of the currently buffered data. In this case, the input pointer will
+         * be moved but the data will only actually be loaded upon the next call to
+         * {@link #consume} or {@link #LT} for {@code k>0}.
+         *  </remarks>
+         */
+        public virtual void Seek( int index )
+        {
+            if (index < 0)
+                throw new ArgumentOutOfRangeException("index");
+
+            int delta = _currentElementIndex - index;
+            if (_p - delta < 0)
+                throw new NotSupportedException("can't seek before the beginning of this stream's buffer");
+
+            _p -= delta;
+            _currentElementIndex = index;
+        }
+
+        protected virtual T LB(int k)
+        {
+            Debug.Assert(k > 0);
+
+            int index = _p - k;
+            if (index == -1)
+                return _previousElement;
+
+            // if k>0 then we know index < data.size(). avoid the double-check for
+            // performance.
+            if (index >= 0 /*&& index < data.size()*/)
+                return _data[index];
+
+            if (index < -1)
+                throw new NotSupportedException("can't look more than one token before the beginning of this stream's buffer");
+
+            throw new NotSupportedException("can't look past the end of this stream's buffer using LB(int)");
+        }
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedNotSetException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedNotSetException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedNotSetException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedNotSetException.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedRangeException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedRangeException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedRangeException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedRangeException.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedSetException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedSetException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedSetException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedSetException.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedTokenException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedTokenException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedTokenException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedTokenException.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedTreeNodeException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedTreeNodeException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedTreeNodeException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/MismatchedTreeNodeException.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MissingTokenException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/MissingTokenException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/MissingTokenException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/MissingTokenException.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/NoViableAltException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/NoViableAltException.cs
new file mode 100644
index 0000000..4f37001
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/NoViableAltException.cs
@@ -0,0 +1,165 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Tunnel Vision Laboratories, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime
+{
+    using ArgumentNullException = System.ArgumentNullException;
+    using Exception = System.Exception;
+    using SerializationInfo = System.Runtime.Serialization.SerializationInfo;
+    using StreamingContext = System.Runtime.Serialization.StreamingContext;
+
+    [System.Serializable]
+    public class NoViableAltException : RecognitionException
+    {
+        private readonly string _grammarDecisionDescription;
+        private readonly int _decisionNumber;
+        private readonly int _stateNumber;
+
+        public NoViableAltException()
+        {
+        }
+
+        public NoViableAltException(string grammarDecisionDescription)
+        {
+            this._grammarDecisionDescription = grammarDecisionDescription;
+        }
+
+        public NoViableAltException(string message, string grammarDecisionDescription)
+            : base(message)
+        {
+            this._grammarDecisionDescription = grammarDecisionDescription;
+        }
+
+        public NoViableAltException(string message, string grammarDecisionDescription, Exception innerException)
+            : base(message, innerException)
+        {
+            this._grammarDecisionDescription = grammarDecisionDescription;
+        }
+
+        public NoViableAltException(string grammarDecisionDescription, int decisionNumber, int stateNumber, IIntStream input)
+            : this(grammarDecisionDescription, decisionNumber, stateNumber, input, 1)
+        {
+        }
+
+        public NoViableAltException(string grammarDecisionDescription, int decisionNumber, int stateNumber, IIntStream input, int k)
+            : base(input, k)
+        {
+            this._grammarDecisionDescription = grammarDecisionDescription;
+            this._decisionNumber = decisionNumber;
+            this._stateNumber = stateNumber;
+        }
+
+        public NoViableAltException(string message, string grammarDecisionDescription, int decisionNumber, int stateNumber, IIntStream input)
+            : this(message, grammarDecisionDescription, decisionNumber, stateNumber, input, 1)
+        {
+        }
+
+        public NoViableAltException(string message, string grammarDecisionDescription, int decisionNumber, int stateNumber, IIntStream input, int k)
+            : base(message, input, k)
+        {
+            this._grammarDecisionDescription = grammarDecisionDescription;
+            this._decisionNumber = decisionNumber;
+            this._stateNumber = stateNumber;
+        }
+
+        public NoViableAltException(string message, string grammarDecisionDescription, int decisionNumber, int stateNumber, IIntStream input, Exception innerException)
+            : this(message, grammarDecisionDescription, decisionNumber, stateNumber, input, 1, innerException)
+        {
+        }
+
+        public NoViableAltException(string message, string grammarDecisionDescription, int decisionNumber, int stateNumber, IIntStream input, int k, Exception innerException)
+            : base(message, input, k, innerException)
+        {
+            this._grammarDecisionDescription = grammarDecisionDescription;
+            this._decisionNumber = decisionNumber;
+            this._stateNumber = stateNumber;
+        }
+
+        protected NoViableAltException(SerializationInfo info, StreamingContext context)
+            : base(info, context)
+        {
+            if (info == null)
+                throw new ArgumentNullException("info");
+
+            this._grammarDecisionDescription = info.GetString("GrammarDecisionDescription");
+            this._decisionNumber = info.GetInt32("DecisionNumber");
+            this._stateNumber = info.GetInt32("StateNumber");
+        }
+
+        public int DecisionNumber
+        {
+            get
+            {
+                return _decisionNumber;
+            }
+        }
+
+        public string GrammarDecisionDescription
+        {
+            get
+            {
+                return _grammarDecisionDescription;
+            }
+        }
+
+        public int StateNumber
+        {
+            get
+            {
+                return _stateNumber;
+            }
+        }
+
+        public override void GetObjectData(SerializationInfo info, StreamingContext context)
+        {
+            if (info == null)
+                throw new ArgumentNullException("info");
+
+            base.GetObjectData(info, context);
+            info.AddValue("GrammarDecisionDescription", _grammarDecisionDescription);
+            info.AddValue("DecisionNumber", _decisionNumber);
+            info.AddValue("StateNumber", _stateNumber);
+        }
+
+        public override string ToString()
+        {
+            if ( Input is ICharStream )
+            {
+                return "NoViableAltException('" + (char)UnexpectedType + "'@[" + GrammarDecisionDescription + "])";
+            }
+            else
+            {
+                return "NoViableAltException(" + UnexpectedType + "@[" + GrammarDecisionDescription + "])";
+            }
+        }
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Parser.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Parser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Parser.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Parser.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ParserRuleReturnScope.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/ParserRuleReturnScope.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/ParserRuleReturnScope.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/ParserRuleReturnScope.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Properties/AssemblyInfo.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..cc3425f
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Properties/AssemblyInfo.cs
@@ -0,0 +1,72 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+using System;
+using System.Reflection;
+using System.Runtime.InteropServices;
+using System.Security;
+
+// General Information about an assembly is controlled through the following 
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle( "Antlr3.Runtime" )]
+[assembly: AssemblyDescription( "" )]
+[assembly: AssemblyConfiguration( "" )]
+[assembly: AssemblyCompany( "Tunnel Vision Laboratories, LLC" )]
+[assembly: AssemblyProduct( "Antlr3.Runtime" )]
+[assembly: AssemblyCopyright("Copyright © Sam Harwell 2013")]
+[assembly: AssemblyTrademark( "" )]
+[assembly: AssemblyCulture( "" )]
+[assembly: CLSCompliant( true )]
+[assembly: AllowPartiallyTrustedCallers]
+
+// Setting ComVisible to false makes the types in this assembly not visible 
+// to COM components.  If you need to access a type in this assembly from 
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible( false )]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid( "7a0b4db7-f127-4cf5-ac2c-e294957efcd6" )]
+
+/* Version information for an assembly consists of four values in the following order:
+ *
+ *   Major.Minor.Build.Revision
+ *
+ * These values are updated according to the following:
+ *   1. Major.Minor follows the ANTLR release schedule
+ *   2. Build is incremented each time the C# port is packaged for release (regardless
+ *      of whether it's an incremental or nightly). The value resets to zero whenever
+ *      the Major or Minor version is incremented.
+ *   3. Revision is the Perforce changelist number associated with the release.
+ */
+[assembly: AssemblyVersion("3.5.0.2")]
+[assembly: AssemblyFileVersion("3.5.0.2")]
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/RecognitionException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/RecognitionException.cs
new file mode 100644
index 0000000..007560d
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/RecognitionException.cs
@@ -0,0 +1,474 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Tunnel Vision Laboratories, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime
+{
+    using Antlr.Runtime.Tree;
+
+    using ArgumentNullException = System.ArgumentNullException;
+    using Exception = System.Exception;
+    using NotSupportedException = System.NotSupportedException;
+    using SerializationInfo = System.Runtime.Serialization.SerializationInfo;
+    using StreamingContext = System.Runtime.Serialization.StreamingContext;
+
+    /** <summary>The root of the ANTLR exception hierarchy.</summary>
+     *
+     *  <remarks>
+     *  To avoid English-only error messages and to generally make things
+     *  as flexible as possible, these exceptions are not created with strings,
+     *  but rather the information necessary to generate an error.  Then
+     *  the various reporting methods in Parser and Lexer can be overridden
+     *  to generate a localized error message.  For example, MismatchedToken
+     *  exceptions are built with the expected token type.
+     *  So, don't expect getMessage() to return anything.
+     *
+     *  Note that as of Java 1.4, you can access the stack trace, which means
+     *  that you can compute the complete trace of rules from the start symbol.
+     *  This gives you considerable context information with which to generate
+     *  useful error messages.
+     *
+     *  ANTLR generates code that throws exceptions upon recognition error and
+     *  also generates code to catch these exceptions in each rule.  If you
+     *  want to quit upon first error, you can turn off the automatic error
+     *  handling mechanism using rulecatch action, but you still need to
+     *  override methods mismatch and recoverFromMismatchSet.
+     *
+     *  In general, the recognition exceptions can track where in a grammar a
+     *  problem occurred and/or what was the expected input.  While the parser
+     *  knows its state (such as current input symbol and line info) that
+     *  state can change before the exception is reported so current token index
+     *  is computed and stored at exception time.  From this info, you can
+     *  perhaps print an entire line of input not just a single token, for example.
+     *  Better to just say the recognizer had a problem and then let the parser
+     *  figure out a fancy report.
+     *  </remarks>
+     */
+    [System.Serializable]
+    public class RecognitionException : Exception
+    {
+        /** <summary>What input stream did the error occur in?</summary> */
+        private IIntStream _input;
+
+        /// <summary>
+        /// What was the lookahead index when this exception was thrown?
+        /// </summary>
+        private int _k;
+
+        /** <summary>What is index of token/char were we looking at when the error occurred?</summary> */
+        private int _index;
+
+        /** <summary>
+         *  The current Token when an error occurred.  Since not all streams
+         *  can retrieve the ith Token, we have to track the Token object.
+         *  For parsers.  Even when it's a tree parser, token might be set.
+         *  </summary>
+         */
+        private IToken _token;
+
+        /** <summary>
+         *  If this is a tree parser exception, node is set to the node with
+         *  the problem.
+         *  </summary>
+         */
+        private object _node;
+
+        /** <summary>The current char when an error occurred. For lexers.</summary> */
+        private int _c;
+
+        /** <summary>
+         *  Track the line (1-based) at which the error occurred in case this is
+         *  generated from a lexer.  We need to track this since the
+         *  unexpected char doesn't carry the line info.
+         *  </summary>
+         */
+        private int _line;
+
+        /// <summary>
+        /// The 0-based index into the line where the error occurred.
+        /// </summary>
+        private int _charPositionInLine;
+
+        /** <summary>
+         *  If you are parsing a tree node stream, you will encounter som
+         *  imaginary nodes w/o line/col info.  We now search backwards looking
+         *  for most recent token with line/col info, but notify getErrorHeader()
+         *  that info is approximate.
+         *  </summary>
+         */
+        private bool _approximateLineInfo;
+
+        /** <summary>Used for remote debugger deserialization</summary> */
+        public RecognitionException()
+            : this("A recognition error occurred.", null, null)
+        {
+        }
+
+        public RecognitionException(IIntStream input)
+            : this("A recognition error occurred.", input, 1, null)
+        {
+        }
+
+        public RecognitionException(IIntStream input, int k)
+            : this("A recognition error occurred.", input, k, null)
+        {
+        }
+
+        public RecognitionException(string message)
+            : this(message, null, null)
+        {
+        }
+
+        public RecognitionException(string message, IIntStream input)
+            : this(message, input, 1, null)
+        {
+        }
+
+        public RecognitionException(string message, IIntStream input, int k)
+            : this(message, input, k, null)
+        {
+        }
+
+        public RecognitionException(string message, Exception innerException)
+            : this(message, null, innerException)
+        {
+        }
+
+        public RecognitionException(string message, IIntStream input, Exception innerException)
+            : this(message, input, 1, innerException)
+        {
+        }
+
+        public RecognitionException(string message, IIntStream input, int k, Exception innerException)
+            : base(message, innerException)
+        {
+            this._input = input;
+            this._k = k;
+            if (input != null)
+            {
+                this._index = input.Index + k - 1;
+                if (input is ITokenStream)
+                {
+                    this._token = ((ITokenStream)input).LT(k);
+                    this._line = _token.Line;
+                    this._charPositionInLine = _token.CharPositionInLine;
+                }
+
+                ITreeNodeStream tns = input as ITreeNodeStream;
+                if (tns != null)
+                {
+                    ExtractInformationFromTreeNodeStream(tns, k);
+                }
+                else
+                {
+                    ICharStream charStream = input as ICharStream;
+                    if (charStream != null)
+                    {
+                        int mark = input.Mark();
+                        try
+                        {
+                            for (int i = 0; i < k - 1; i++)
+                                input.Consume();
+
+                            this._c = input.LA(1);
+                            this._line = ((ICharStream)input).Line;
+                            this._charPositionInLine = ((ICharStream)input).CharPositionInLine;
+                        }
+                        finally
+                        {
+                            input.Rewind(mark);
+                        }
+                    }
+                    else
+                    {
+                        this._c = input.LA(k);
+                    }
+                }
+            }
+        }
+
+        protected RecognitionException(SerializationInfo info, StreamingContext context)
+            : base(info, context)
+        {
+            if (info == null)
+                throw new ArgumentNullException("info");
+
+            _index = info.GetInt32("Index");
+            _c = info.GetInt32("C");
+            _line = info.GetInt32("Line");
+            _charPositionInLine = info.GetInt32("CharPositionInLine");
+            _approximateLineInfo = info.GetBoolean("ApproximateLineInfo");
+        }
+
+        /** <summary>Return the token type or char of the unexpected input element</summary> */
+        public virtual int UnexpectedType
+        {
+            get
+            {
+                if ( _input is ITokenStream )
+                {
+                    return _token.Type;
+                }
+
+                ITreeNodeStream treeNodeStream = _input as ITreeNodeStream;
+                if ( treeNodeStream != null )
+                {
+                    ITreeAdaptor adaptor = treeNodeStream.TreeAdaptor;
+                    return adaptor.GetType( _node );
+                }
+
+                return _c;
+            }
+        }
+
+        public bool ApproximateLineInfo
+        {
+            get
+            {
+                return _approximateLineInfo;
+            }
+            protected set
+            {
+                _approximateLineInfo = value;
+            }
+        }
+
+        public IIntStream Input
+        {
+            get
+            {
+                return _input;
+            }
+            protected set
+            {
+                _input = value;
+            }
+        }
+
+        public int Lookahead
+        {
+            get
+            {
+                return _k;
+            }
+        }
+
+        public IToken Token
+        {
+            get
+            {
+                return _token;
+            }
+            set
+            {
+                _token = value;
+            }
+        }
+
+        public object Node
+        {
+            get
+            {
+                return _node;
+            }
+            protected set
+            {
+                _node = value;
+            }
+        }
+
+        public int Character
+        {
+            get
+            {
+                return _c;
+            }
+            protected set
+            {
+                _c = value;
+            }
+        }
+
+        public int Index
+        {
+            get
+            {
+                return _index;
+            }
+            protected set
+            {
+                _index = value;
+            }
+        }
+
+        public int Line
+        {
+            get
+            {
+                return _line;
+            }
+            set
+            {
+                _line = value;
+            }
+        }
+
+        public int CharPositionInLine
+        {
+            get
+            {
+                return _charPositionInLine;
+            }
+            set
+            {
+                _charPositionInLine = value;
+            }
+        }
+
+        public override void GetObjectData(SerializationInfo info, StreamingContext context)
+        {
+            if (info == null)
+                throw new ArgumentNullException("info");
+
+            base.GetObjectData(info, context);
+            info.AddValue("Index", _index);
+            info.AddValue("C", _c);
+            info.AddValue("Line", _line);
+            info.AddValue("CharPositionInLine", _charPositionInLine);
+            info.AddValue("ApproximateLineInfo", _approximateLineInfo);
+        }
+
+        protected virtual void ExtractInformationFromTreeNodeStream(ITreeNodeStream input)
+        {
+            this._node = input.LT(1);
+
+            object positionNode = null;
+            IPositionTrackingStream positionTrackingStream = input as IPositionTrackingStream;
+            if (positionTrackingStream != null)
+            {
+                positionNode = positionTrackingStream.GetKnownPositionElement(false);
+                if (positionNode == null)
+                {
+                    positionNode = positionTrackingStream.GetKnownPositionElement(true);
+                    this._approximateLineInfo = positionNode != null;
+                }
+            }
+
+            ITokenStreamInformation streamInformation = input as ITokenStreamInformation;
+            if (streamInformation != null)
+            {
+                IToken lastToken = streamInformation.LastToken;
+                IToken lastRealToken = streamInformation.LastRealToken;
+                if (lastRealToken != null)
+                {
+                    this._token = lastRealToken;
+                    this._line = lastRealToken.Line;
+                    this._charPositionInLine = lastRealToken.CharPositionInLine;
+                    this._approximateLineInfo = lastRealToken.Equals(lastToken);
+                }
+            }
+            else
+            {
+                ITreeAdaptor adaptor = input.TreeAdaptor;
+                IToken payload = adaptor.GetToken(positionNode ?? _node);
+                if (payload != null)
+                {
+                    this._token = payload;
+                    if (payload.Line <= 0)
+                    {
+                        // imaginary node; no line/pos info; scan backwards
+                        int i = -1;
+                        object priorNode = input.LT(i);
+                        while (priorNode != null)
+                        {
+                            IToken priorPayload = adaptor.GetToken(priorNode);
+                            if (priorPayload != null && priorPayload.Line > 0)
+                            {
+                                // we found the most recent real line / pos info
+                                this._line = priorPayload.Line;
+                                this._charPositionInLine = priorPayload.CharPositionInLine;
+                                this._approximateLineInfo = true;
+                                break;
+                            }
+
+                            --i;
+                            try
+                            {
+                                priorNode = input.LT(i);
+                            }
+                            catch (NotSupportedException)
+                            {
+                                priorNode = null;
+                            }
+                        }
+                    }
+                    else
+                    {
+                        // node created from real token
+                        this._line = payload.Line;
+                        this._charPositionInLine = payload.CharPositionInLine;
+                    }
+                }
+                else if (this._node is Tree.ITree)
+                {
+                    this._line = ((Tree.ITree)this._node).Line;
+                    this._charPositionInLine = ((Tree.ITree)this._node).CharPositionInLine;
+                    if (this._node is CommonTree)
+                    {
+                        this._token = ((CommonTree)this._node).Token;
+                    }
+                }
+                else
+                {
+                    int type = adaptor.GetType(this._node);
+                    string text = adaptor.GetText(this._node);
+                    this._token = new CommonToken(type, text);
+                }
+            }
+        }
+
+        protected virtual void ExtractInformationFromTreeNodeStream(ITreeNodeStream input, int k)
+        {
+            int mark = input.Mark();
+            try
+            {
+                for (int i = 0; i < k - 1; i++)
+                    input.Consume();
+
+                ExtractInformationFromTreeNodeStream(input);
+            }
+            finally
+            {
+                input.Rewind(mark);
+            }
+        }
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/RecognizerSharedState.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/RecognizerSharedState.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/RecognizerSharedState.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/RecognizerSharedState.cs
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/TemplateParserRuleReturnScope\1402.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime/TemplateParserRuleReturnScope\1402.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/TemplateParserRuleReturnScope\1402.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime/TemplateParserRuleReturnScope\1402.cs"
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/TokenChannels.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/TokenChannels.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/TokenChannels.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/TokenChannels.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/TokenRewriteStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/TokenRewriteStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/TokenRewriteStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/TokenRewriteStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/TokenTypes.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/TokenTypes.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/TokenTypes.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/TokenTypes.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tokens.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tokens.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tokens.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tokens.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/AntlrRuntime_BaseTreeDebugView.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/AntlrRuntime_BaseTreeDebugView.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/AntlrRuntime_BaseTreeDebugView.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/AntlrRuntime_BaseTreeDebugView.cs
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/AstTreeRuleReturnScope\1402.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/AstTreeRuleReturnScope\1402.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/AstTreeRuleReturnScope\1402.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime/Tree/AstTreeRuleReturnScope\1402.cs"
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BaseTree.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BaseTree.cs
new file mode 100644
index 0000000..9327860
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BaseTree.cs
@@ -0,0 +1,575 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Tunnel Vision Laboratories, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime.Tree
+{
+    using System;
+    using System.Collections.Generic;
+
+    using StringBuilder = System.Text.StringBuilder;
+
+    /** <summary>
+     *  A generic tree implementation with no payload.  You must subclass to
+     *  actually have any user data.  ANTLR v3 uses a list of children approach
+     *  instead of the child-sibling approach in v2.  A flat tree (a list) is
+     *  an empty node whose children represent the list.  An empty, but
+     *  non-null node is called "nil".
+     *  </summary>
+     */
+    [System.Serializable]
+    [System.Diagnostics.DebuggerTypeProxy(typeof(AntlrRuntime_BaseTreeDebugView))]
+    public abstract class BaseTree : ITree
+    {
+        private IList<ITree> _children;
+
+        public BaseTree()
+        {
+        }
+
+        /** <summary>
+         *  Create a new node from an existing node does nothing for BaseTree
+         *  as there are no fields other than the children list, which cannot
+         *  be copied as the children are not considered part of this node. 
+         *  </summary>
+         */
+        public BaseTree( ITree node )
+        {
+        }
+
+        /** <summary>
+         *  Get the children internal List; note that if you directly mess with
+         *  the list, do so at your own risk.
+         *  </summary>
+         */
+        public virtual IList<ITree> Children
+        {
+            get
+            {
+                return _children;
+            }
+
+            private set
+            {
+                _children = value;
+            }
+        }
+
+        #region ITree Members
+
+        public virtual int ChildCount
+        {
+            get
+            {
+                if ( Children == null )
+                    return 0;
+
+                return Children.Count;
+            }
+        }
+
+        /** <summary>BaseTree doesn't track parent pointers.</summary> */
+        public virtual ITree Parent
+        {
+            get
+            {
+                return null;
+            }
+            set
+            {
+            }
+        }
+
+        /** <summary>BaseTree doesn't track child indexes.</summary> */
+        public virtual int ChildIndex
+        {
+            get
+            {
+                return 0;
+            }
+            set
+            {
+            }
+        }
+
+        public virtual bool IsNil
+        {
+            get
+            {
+                return false;
+            }
+        }
+
+        public abstract int TokenStartIndex
+        {
+            get;
+            set;
+        }
+
+        public abstract int TokenStopIndex
+        {
+            get;
+            set;
+        }
+
+        public abstract int Type
+        {
+            get;
+            set;
+        }
+
+        public abstract string Text
+        {
+            get;
+            set;
+        }
+
+        public virtual int Line
+        {
+            get;
+            set;
+        }
+
+        public virtual int CharPositionInLine
+        {
+            get;
+            set;
+        }
+
+        #endregion
+
+        public virtual ITree GetChild( int i )
+        {
+            if (i < 0)
+                throw new ArgumentOutOfRangeException();
+
+            if ( Children == null || i >= Children.Count )
+                return null;
+
+            return Children[i];
+        }
+
+        public virtual ITree GetFirstChildWithType( int type )
+        {
+            foreach ( ITree child in Children )
+            {
+                if ( child.Type == type )
+                    return child;
+            }
+
+            return null;
+        }
+
+        /** <summary>Add t as child of this node.</summary>
+         *
+         *  <remarks>
+         *  Warning: if t has no children, but child does
+         *  and child isNil then this routine moves children to t via
+         *  t.children = child.children; i.e., without copying the array.
+         *  </remarks>
+         */
+        public virtual void AddChild( ITree t )
+        {
+            //System.out.println("add child "+t.toStringTree()+" "+this.toStringTree());
+            //System.out.println("existing children: "+children);
+            if ( t == null )
+            {
+                return; // do nothing upon addChild(null)
+            }
+            if ( t.IsNil )
+            {
+                // t is an empty node possibly with children
+                BaseTree childTree = t as BaseTree;
+                if ( childTree != null && this.Children != null && this.Children == childTree.Children )
+                {
+                    throw new Exception( "attempt to add child list to itself" );
+                }
+                // just add all of childTree's children to this
+                if ( t.ChildCount > 0 )
+                {
+                    if ( this.Children != null || childTree == null )
+                    {
+                        if ( this.Children == null )
+                            this.Children = CreateChildrenList();
+
+                        // must copy, this has children already
+                        int n = t.ChildCount;
+                        for ( int i = 0; i < n; i++ )
+                        {
+                            ITree c = t.GetChild( i );
+                            this.Children.Add( c );
+                            // handle double-link stuff for each child of nil root
+                            c.Parent = this;
+                            c.ChildIndex = Children.Count - 1;
+                        }
+                    }
+                    else
+                    {
+                        // no children for this but t is a BaseTree with children;
+                        // just set pointer call general freshener routine
+                        this.Children = childTree.Children;
+                        this.FreshenParentAndChildIndexes();
+                    }
+                }
+            }
+            else
+            {
+                // child is not nil (don't care about children)
+                if ( Children == null )
+                {
+                    Children = CreateChildrenList(); // create children list on demand
+                }
+                Children.Add( t );
+                t.Parent = this;
+                t.ChildIndex = Children.Count - 1;
+            }
+            // System.out.println("now children are: "+children);
+        }
+
+        /** <summary>Add all elements of kids list as children of this node</summary> */
+        public virtual void AddChildren( IEnumerable<ITree> kids )
+        {
+            if (kids == null)
+                throw new ArgumentNullException("kids");
+
+            foreach ( ITree t in kids )
+                AddChild( t );
+        }
+
+        public virtual void SetChild( int i, ITree t )
+        {
+            if (i < 0)
+                throw new ArgumentOutOfRangeException("i");
+
+            if ( t == null )
+            {
+                return;
+            }
+            if ( t.IsNil )
+            {
+                throw new ArgumentException( "Can't set single child to a list" );
+            }
+            if ( Children == null )
+            {
+                Children = CreateChildrenList();
+            }
+            Children[i] = t;
+            t.Parent = this;
+            t.ChildIndex = i;
+        }
+
+        /** Insert child t at child position i (0..n-1) by shifting children
+         *  i+1..n-1 to the right one position. Set parent / indexes properly
+         *  but does NOT collapse nil-rooted t's that come in here like addChild.
+         */
+        public virtual void InsertChild(int i, ITree t)
+        {
+            if (i < 0)
+                throw new ArgumentOutOfRangeException("i");
+            if (i > ChildCount)
+                throw new ArgumentException();
+
+            if (i == ChildCount)
+            {
+                AddChild(t);
+                return;
+            }
+
+            Children.Insert(i, t);
+
+            // walk others to increment their child indexes
+            // set index, parent of this one too
+            this.FreshenParentAndChildIndexes(i);
+        }
+
+        public virtual object DeleteChild( int i )
+        {
+            if (i < 0)
+                throw new ArgumentOutOfRangeException("i");
+            if (i >= ChildCount)
+                throw new ArgumentException();
+
+            if ( Children == null )
+                return null;
+
+            ITree killed = Children[i];
+            Children.RemoveAt( i );
+            // walk rest and decrement their child indexes
+            this.FreshenParentAndChildIndexes( i );
+            return killed;
+        }
+
+        /** <summary>
+         *  Delete children from start to stop and replace with t even if t is
+         *  a list (nil-root tree).  num of children can increase or decrease.
+         *  For huge child lists, inserting children can force walking rest of
+         *  children to set their childindex; could be slow.
+         *  </summary>
+         */
+        public virtual void ReplaceChildren( int startChildIndex, int stopChildIndex, object t )
+        {
+            if (startChildIndex < 0)
+                throw new ArgumentOutOfRangeException();
+            if (stopChildIndex < 0)
+                throw new ArgumentOutOfRangeException();
+            if (t == null)
+                throw new ArgumentNullException("t");
+            if (stopChildIndex < startChildIndex)
+                throw new ArgumentException();
+
+            /*
+            System.out.println("replaceChildren "+startChildIndex+", "+stopChildIndex+
+                               " with "+((BaseTree)t).toStringTree());
+            System.out.println("in="+toStringTree());
+            */
+            if ( Children == null )
+            {
+                throw new ArgumentException( "indexes invalid; no children in list" );
+            }
+            int replacingHowMany = stopChildIndex - startChildIndex + 1;
+            int replacingWithHowMany;
+            ITree newTree = (ITree)t;
+            IList<ITree> newChildren = null;
+            // normalize to a list of children to add: newChildren
+            if ( newTree.IsNil )
+            {
+                BaseTree baseTree = newTree as BaseTree;
+                if ( baseTree != null && baseTree.Children != null )
+                {
+                    newChildren = baseTree.Children;
+                }
+                else
+                {
+                    newChildren = CreateChildrenList();
+                    int n = newTree.ChildCount;
+                    for ( int i = 0; i < n; i++ )
+                        newChildren.Add( newTree.GetChild( i ) );
+                }
+            }
+            else
+            {
+                newChildren = new List<ITree>( 1 );
+                newChildren.Add( newTree );
+            }
+            replacingWithHowMany = newChildren.Count;
+            int numNewChildren = newChildren.Count;
+            int delta = replacingHowMany - replacingWithHowMany;
+            // if same number of nodes, do direct replace
+            if ( delta == 0 )
+            {
+                int j = 0; // index into new children
+                for ( int i = startChildIndex; i <= stopChildIndex; i++ )
+                {
+                    ITree child = newChildren[j];
+                    Children[i] = child;
+                    child.Parent = this;
+                    child.ChildIndex = i;
+                    j++;
+                }
+            }
+            else if ( delta > 0 )
+            {
+                // fewer new nodes than there were
+                // set children and then delete extra
+                for ( int j = 0; j < numNewChildren; j++ )
+                {
+                    Children[startChildIndex + j] = newChildren[j];
+                }
+                int indexToDelete = startChildIndex + numNewChildren;
+                for ( int c = indexToDelete; c <= stopChildIndex; c++ )
+                {
+                    // delete same index, shifting everybody down each time
+                    Children.RemoveAt( indexToDelete );
+                }
+                FreshenParentAndChildIndexes( startChildIndex );
+            }
+            else
+            {
+                // more new nodes than were there before
+                // fill in as many children as we can (replacingHowMany) w/o moving data
+                for ( int j = 0; j < replacingHowMany; j++ )
+                {
+                    Children[startChildIndex + j] = newChildren[j];
+                }
+                int numToInsert = replacingWithHowMany - replacingHowMany;
+                for ( int j = replacingHowMany; j < replacingWithHowMany; j++ )
+                {
+                    Children.Insert( startChildIndex + j, newChildren[j] );
+                }
+                FreshenParentAndChildIndexes( startChildIndex );
+            }
+            //System.out.println("out="+toStringTree());
+        }
+
+        /** <summary>Override in a subclass to change the impl of children list</summary> */
+        protected virtual IList<ITree> CreateChildrenList()
+        {
+            return new List<ITree>();
+        }
+
+        /** <summary>Set the parent and child index values for all child of t</summary> */
+        public virtual void FreshenParentAndChildIndexes()
+        {
+            FreshenParentAndChildIndexes( 0 );
+        }
+
+        public virtual void FreshenParentAndChildIndexes( int offset )
+        {
+            int n = ChildCount;
+            for ( int c = offset; c < n; c++ )
+            {
+                ITree child = GetChild( c );
+                child.ChildIndex = c;
+                child.Parent = this;
+            }
+        }
+
+        public virtual void FreshenParentAndChildIndexesDeeply()
+        {
+            FreshenParentAndChildIndexesDeeply(0);
+        }
+
+        public virtual void FreshenParentAndChildIndexesDeeply(int offset)
+        {
+            int n = ChildCount;
+            for (int c = offset; c < n; c++)
+            {
+                ITree child = GetChild(c);
+                child.ChildIndex = c;
+                child.Parent = this;
+                BaseTree baseTree = child as BaseTree;
+                if (baseTree != null)
+                    baseTree.FreshenParentAndChildIndexesDeeply();
+            }
+        }
+
+        public virtual void SanityCheckParentAndChildIndexes()
+        {
+            SanityCheckParentAndChildIndexes( null, -1 );
+        }
+
+        public virtual void SanityCheckParentAndChildIndexes( ITree parent, int i )
+        {
+            if ( parent != this.Parent )
+            {
+                throw new InvalidOperationException( "parents don't match; expected " + parent + " found " + this.Parent );
+            }
+            if ( i != this.ChildIndex )
+            {
+                throw new InvalidOperationException( "child indexes don't match; expected " + i + " found " + this.ChildIndex );
+            }
+            int n = this.ChildCount;
+            for ( int c = 0; c < n; c++ )
+            {
+                BaseTree child = (BaseTree)this.GetChild( c );
+                child.SanityCheckParentAndChildIndexes( this, c );
+            }
+        }
+
+        /** <summary>Walk upwards looking for ancestor with this token type.</summary> */
+        public virtual bool HasAncestor( int ttype )
+        {
+            return GetAncestor( ttype ) != null;
+        }
+
+        /** <summary>Walk upwards and get first ancestor with this token type.</summary> */
+        public virtual ITree GetAncestor( int ttype )
+        {
+            ITree t = this;
+            t = t.Parent;
+            while ( t != null )
+            {
+                if ( t.Type == ttype )
+                    return t;
+                t = t.Parent;
+            }
+            return null;
+        }
+
+        /** <summary>
+         *  Return a list of all ancestors of this node.  The first node of
+         *  list is the root and the last is the parent of this node.
+         *  </summary>
+         */
+        public virtual IList<ITree> GetAncestors()
+        {
+            if ( Parent == null )
+                return null;
+
+            List<ITree> ancestors = new List<ITree>();
+            ITree t = this;
+            t = t.Parent;
+            while ( t != null )
+            {
+                ancestors.Insert( 0, t ); // insert at start
+                t = t.Parent;
+            }
+            return ancestors;
+        }
+
+        /** <summary>Print out a whole tree not just a node</summary> */
+        public virtual string ToStringTree()
+        {
+            if ( Children == null || Children.Count == 0 )
+            {
+                return this.ToString();
+            }
+            StringBuilder buf = new StringBuilder();
+            if ( !IsNil )
+            {
+                buf.Append( "(" );
+                buf.Append( this.ToString() );
+                buf.Append( ' ' );
+            }
+            for ( int i = 0; Children != null && i < Children.Count; i++ )
+            {
+                ITree t = Children[i];
+                if ( i > 0 )
+                {
+                    buf.Append( ' ' );
+                }
+                buf.Append( t.ToStringTree() );
+            }
+            if ( !IsNil )
+            {
+                buf.Append( ")" );
+            }
+            return buf.ToString();
+        }
+
+        /** <summary>Override to say how a node (not a tree) should look as text</summary> */
+        public override abstract string ToString();
+
+        #region Tree Members
+        public abstract ITree DupNode();
+        #endregion
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BaseTreeAdaptor.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BaseTreeAdaptor.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BaseTreeAdaptor.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BaseTreeAdaptor.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BufferedTreeNodeStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BufferedTreeNodeStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BufferedTreeNodeStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/BufferedTreeNodeStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonErrorNode.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonErrorNode.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonErrorNode.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonErrorNode.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTree.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTree.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTree.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTree.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTreeAdaptor.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTreeAdaptor.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTreeAdaptor.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTreeAdaptor.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTreeNodeStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTreeNodeStream.cs
new file mode 100644
index 0000000..f9cb0a7
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/CommonTreeNodeStream.cs
@@ -0,0 +1,312 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime.Tree
+{
+    using System.Collections.Generic;
+    using Antlr.Runtime.Misc;
+
+    using StringBuilder = System.Text.StringBuilder;
+
+    [System.Serializable]
+    public class CommonTreeNodeStream : LookaheadStream<object>, ITreeNodeStream, IPositionTrackingStream
+    {
+        public const int DEFAULT_INITIAL_BUFFER_SIZE = 100;
+        public const int INITIAL_CALL_STACK_SIZE = 10;
+
+        /** <summary>Pull nodes from which tree?</summary> */
+        private readonly object _root;
+
+        /** <summary>If this tree (root) was created from a token stream, track it.</summary> */
+        protected ITokenStream tokens;
+
+        /** <summary>What tree adaptor was used to build these trees</summary> */
+        [System.NonSerialized]
+        private ITreeAdaptor _adaptor;
+
+        /** The tree iterator we are using */
+        private readonly TreeIterator _it;
+
+        /** <summary>Stack of indexes used for push/pop calls</summary> */
+        private Stack<int> _calls;
+
+        /** <summary>Tree (nil A B C) trees like flat A B C streams</summary> */
+        private bool _hasNilRoot = false;
+
+        /** <summary>Tracks tree depth.  Level=0 means we're at root node level.</summary> */
+        private int _level = 0;
+
+        /**
+         * Tracks the last node before the start of {@link #data} which contains
+         * position information to provide information for error reporting. This is
+         * tracked in addition to {@link #prevElement} which may or may not contain
+         * position information.
+         *
+         * @see #hasPositionInformation
+         * @see RecognitionException#extractInformationFromTreeNodeStream
+         */
+        private object _previousLocationElement;
+
+        public CommonTreeNodeStream( object tree )
+            : this( new CommonTreeAdaptor(), tree )
+        {
+        }
+
+        public CommonTreeNodeStream( ITreeAdaptor adaptor, object tree )
+        {
+            this._root = tree;
+            this._adaptor = adaptor;
+            _it = new TreeIterator( adaptor, _root );
+        }
+
+        #region Properties
+
+        public virtual string SourceName
+        {
+            get
+            {
+                if ( TokenStream == null )
+                    return null;
+
+                return TokenStream.SourceName;
+            }
+        }
+
+        public virtual ITokenStream TokenStream
+        {
+            get
+            {
+                return tokens;
+            }
+
+            set
+            {
+                tokens = value;
+            }
+        }
+
+        public virtual ITreeAdaptor TreeAdaptor
+        {
+            get
+            {
+                return _adaptor;
+            }
+
+            set
+            {
+                _adaptor = value;
+            }
+        }
+
+        public virtual object TreeSource
+        {
+            get
+            {
+                return _root;
+            }
+        }
+
+        public virtual bool UniqueNavigationNodes
+        {
+            get
+            {
+                return false;
+            }
+
+            set
+            {
+            }
+        }
+
+        #endregion
+
+        public override void Reset()
+        {
+            base.Reset();
+            _it.Reset();
+            _hasNilRoot = false;
+            _level = 0;
+            _previousLocationElement = null;
+            if ( _calls != null )
+                _calls.Clear();
+        }
+
+        public override object NextElement()
+        {
+            _it.MoveNext();
+            object t = _it.Current;
+            //System.out.println("pulled "+adaptor.getType(t));
+            if ( t == _it.up )
+            {
+                _level--;
+                if ( _level == 0 && _hasNilRoot )
+                {
+                    _it.MoveNext();
+                    return _it.Current; // don't give last UP; get EOF
+                }
+            }
+            else if ( t == _it.down )
+            {
+                _level++;
+            }
+
+            if ( _level == 0 && TreeAdaptor.IsNil( t ) )
+            {
+                // if nil root, scarf nil, DOWN
+                _hasNilRoot = true;
+                _it.MoveNext();
+                t = _it.Current; // t is now DOWN, so get first real node next
+                _level++;
+                _it.MoveNext();
+                t = _it.Current;
+            }
+
+            return t;
+        }
+
+        public override object Dequeue()
+        {
+            object result = base.Dequeue();
+            if (_p == 0 && HasPositionInformation(PreviousElement))
+                _previousLocationElement = PreviousElement;
+
+            return result;
+        }
+
+        public override bool IsEndOfFile(object o)
+        {
+            return TreeAdaptor.GetType(o) == CharStreamConstants.EndOfFile;
+        }
+
+        public virtual int LA( int i )
+        {
+            return TreeAdaptor.GetType( LT( i ) );
+        }
+
+        /** Make stream jump to a new location, saving old location.
+         *  Switch back with pop().
+         */
+        public virtual void Push( int index )
+        {
+            if ( _calls == null )
+                _calls = new Stack<int>();
+
+            _calls.Push( _p ); // save current index
+            Seek( index );
+        }
+
+        /** Seek back to previous index saved during last push() call.
+         *  Return top of stack (return index).
+         */
+        public virtual int Pop()
+        {
+            int ret = _calls.Pop();
+            Seek( ret );
+            return ret;
+        }
+
+        /**
+         * Returns an element containing position information. If {@code allowApproximateLocation} is {@code false}, then
+         * this method will return the {@code LT(1)} element if it contains position information, and otherwise return {@code null}.
+         * If {@code allowApproximateLocation} is {@code true}, then this method will return the last known element containing position information.
+         *
+         * @see #hasPositionInformation
+         */
+        public object GetKnownPositionElement(bool allowApproximateLocation)
+        {
+            object node = _data[_p];
+            if (HasPositionInformation(node))
+                return node;
+
+            if (!allowApproximateLocation)
+                return null;
+
+            for (int index = _p - 1; index >= 0; index--)
+            {
+                node = _data[index];
+                if (HasPositionInformation(node))
+                    return node;
+            }
+
+            return _previousLocationElement;
+        }
+
+        public bool HasPositionInformation(object node)
+        {
+            IToken token = TreeAdaptor.GetToken(node);
+            if (token == null)
+                return false;
+
+            if (token.Line <= 0)
+                return false;
+
+            return true;
+        }
+
+        #region Tree rewrite interface
+
+        public virtual void ReplaceChildren( object parent, int startChildIndex, int stopChildIndex, object t )
+        {
+            if ( parent != null )
+            {
+                TreeAdaptor.ReplaceChildren( parent, startChildIndex, stopChildIndex, t );
+            }
+        }
+
+        #endregion
+
+        public virtual string ToString( object start, object stop )
+        {
+            // we'll have to walk from start to stop in tree; we're not keeping
+            // a complete node stream buffer
+            return "n/a";
+        }
+
+        /** <summary>For debugging; destructive: moves tree iterator to end.</summary> */
+        public virtual string ToTokenTypeString()
+        {
+            Reset();
+            StringBuilder buf = new StringBuilder();
+            object o = LT( 1 );
+            int type = TreeAdaptor.GetType( o );
+            while ( type != TokenTypes.EndOfFile )
+            {
+                buf.Append( " " );
+                buf.Append( type );
+                Consume();
+                o = LT( 1 );
+                type = TreeAdaptor.GetType( o );
+            }
+            return buf.ToString();
+        }
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/DotTreeGenerator.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/DotTreeGenerator.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/DotTreeGenerator.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/DotTreeGenerator.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/IPositionTrackingStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/IPositionTrackingStream.cs
new file mode 100644
index 0000000..8bc8945
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/IPositionTrackingStream.cs
@@ -0,0 +1,59 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2012 Terence Parr
+ Copyright (c) 2012 Sam Harwell
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime.Tree
+{
+    /**
+     *
+     * @author Sam Harwell
+     */
+    public interface IPositionTrackingStream
+    {
+        /**
+         * Returns an element containing concrete information about the current
+         * position in the stream.
+         *
+         * @param allowApproximateLocation if {@code false}, this method returns
+         * {@code null} if an element containing exact information about the current
+         * position is not available
+         */
+        object GetKnownPositionElement(bool allowApproximateLocation);
+
+        /**
+         * Determines if the specified {@code element} contains concrete position
+         * information.
+         *
+         * @param element the element to check
+         * @return {@code true} if {@code element} contains concrete position
+         * information, otherwise {@code false}
+         */
+        bool HasPositionInformation(object element);
+
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITree.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITree.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITree.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITree.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeAdaptor.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeAdaptor.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeAdaptor.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeAdaptor.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeNodeStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeNodeStream.cs
new file mode 100644
index 0000000..8f3f30a
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeNodeStream.cs
@@ -0,0 +1,146 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime.Tree
+{
+    /** <summary>A stream of tree nodes, accessing nodes from a tree of some kind</summary> */
+    public interface ITreeNodeStream : IIntStream
+    {
+        /** <summary>
+         *  Get a tree node at an absolute index i; 0..n-1.
+         *  If you don't want to buffer up nodes, then this method makes no
+         *  sense for you.
+         *  </summary>
+         */
+        object this[int i]
+        {
+            get;
+        }
+
+        /** <summary>
+         * Get tree node at current input pointer + {@code k} ahead where
+         * {@code k==1} is next node. {@code k<0} indicates nodes in the past. So
+         * {@code LT(-1)} is previous node, but implementations are not required to
+         * provide results for {@code k < -1}. {@code LT(0)} is undefined. For
+         * {@code k<=n}, return {@code null}. Return {@code null} for {@code LT(0)}
+         * and any index that results in an absolute address that is negative.
+         *  </summary>
+         *
+         *  <remarks>
+         * This is analogous to {@link TokenStream#LT}, but this returns a tree node
+         * instead of a {@link Token}. Makes code generation identical for both
+         * parser and tree grammars.
+         *  </remarks>
+         */
+        object LT( int k );
+
+        /** <summary>
+         *  Where is this stream pulling nodes from?  This is not the name, but
+         *  the object that provides node objects.
+         *  </summary>
+         */
+        object TreeSource
+        {
+            get;
+        }
+
+        /** <summary>
+         * If the tree associated with this stream was created from a
+         * {@link TokenStream}, you can specify it here. Used to do rule
+         * {@code $text} attribute in tree parser. Optional unless you use tree
+         * parser rule {@code $text} attribute or {@code output=template} and
+         * {@code rewrite=true} options.
+         *  </summary>
+         */
+        ITokenStream TokenStream
+        {
+            get;
+        }
+
+        /** <summary>
+         *  What adaptor can tell me how to interpret/navigate nodes and
+         *  trees.  E.g., get text of a node.
+         *  </summary>
+         */
+        ITreeAdaptor TreeAdaptor
+        {
+            get;
+        }
+
+        /** <summary>
+         * As we flatten the tree, we use {@link Token#UP}, {@link Token#DOWN} nodes
+         * to represent the tree structure. When debugging we need unique nodes so
+         * we have to instantiate new ones. When doing normal tree parsing, it's
+         * slow and a waste of memory to create unique navigation nodes. Default
+         * should be {@code false}.
+         *  </summary>
+         */
+        bool UniqueNavigationNodes
+        {
+            get;
+            set;
+        }
+
+        /** <summary>
+         * Return the text of all nodes from {@code start} to {@code stop},
+         * inclusive. If the stream does not buffer all the nodes then it can still
+         * walk recursively from start until stop. You can always return
+         * {@code null} or {@code ""} too, but users should not access
+         * {@code $ruleLabel.text} in an action of course in that case.
+         *  </summary>
+         */
+        string ToString( object start, object stop );
+
+
+        #region REWRITING TREES (used by tree parser)
+
+        /** <summary>
+         * Replace children of {@code parent} from index {@code startChildIndex} to
+         * {@code stopChildIndex} with {@code t}, which might be a list. Number of
+         * children may be different after this call. The stream is notified because
+         * it is walking the tree and might need to know you are monkeying with the
+         * underlying tree. Also, it might be able to modify the node stream to
+         * avoid restreaming for future phases.
+         *  </summary>
+         *
+         *  <remarks>
+         * If {@code parent} is {@code null}, don't do anything; must be at root of
+         * overall tree. Can't replace whatever points to the parent externally. Do
+         * nothing.
+         *  </remarks>
+         */
+        void ReplaceChildren( object parent, int startChildIndex, int stopChildIndex, object t );
+
+        #endregion
+
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeVisitorAction.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeVisitorAction.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeVisitorAction.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ITreeVisitorAction.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ParseTree.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ParseTree.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ParseTree.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ParseTree.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteCardinalityException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteCardinalityException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteCardinalityException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteCardinalityException.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteEarlyExitException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteEarlyExitException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteEarlyExitException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteEarlyExitException.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteEmptyStreamException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteEmptyStreamException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteEmptyStreamException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteEmptyStreamException.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleElementStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleElementStream.cs
new file mode 100644
index 0000000..cb76ace
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleElementStream.cs
@@ -0,0 +1,253 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime.Tree
+{
+    using System.Collections.Generic;
+    using IList = System.Collections.IList;
+
+    /** <summary>
+     *  A generic list of elements tracked in an alternative to be used in
+     *  a -> rewrite rule.  We need to subclass to fill in the next() method,
+     *  which returns either an AST node wrapped around a token payload or
+     *  an existing subtree.
+     *  </summary>
+     *
+     *  <remarks>
+     *  Once you start next()ing, do not try to add more elements.  It will
+     *  break the cursor tracking I believe.
+     *
+     *  TODO: add mechanism to detect/puke on modification after reading from stream
+     *  </remarks>
+     *
+     *  <see cref="RewriteRuleSubtreeStream"/>
+     *  <see cref="RewriteRuleTokenStream"/>
+     */
+    [System.Serializable]
+    public abstract class RewriteRuleElementStream
+    {
+        /** <summary>
+         *  Cursor 0..n-1.  If singleElement!=null, cursor is 0 until you next(),
+         *  which bumps it to 1 meaning no more elements.
+         *  </summary>
+         */
+        protected int cursor = 0;
+
+        /** <summary>Track single elements w/o creating a list.  Upon 2nd add, alloc list */
+        protected object singleElement;
+
+        /** <summary>The list of tokens or subtrees we are tracking */
+        protected IList elements;
+
+        /** <summary>Once a node / subtree has been used in a stream, it must be dup'd
+         *  from then on.  Streams are reset after subrules so that the streams
+         *  can be reused in future subrules.  So, reset must set a dirty bit.
+         *  If dirty, then next() always returns a dup.</summary>
+         */
+        protected bool dirty = false;
+
+        /** <summary>The element or stream description; usually has name of the token or
+         *  rule reference that this list tracks.  Can include rulename too, but
+         *  the exception would track that info.
+         */
+        protected string elementDescription;
+        protected ITreeAdaptor adaptor;
+
+        public RewriteRuleElementStream( ITreeAdaptor adaptor, string elementDescription )
+        {
+            this.elementDescription = elementDescription;
+            this.adaptor = adaptor;
+        }
+
+        /** <summary>Create a stream with one element</summary> */
+        public RewriteRuleElementStream( ITreeAdaptor adaptor, string elementDescription, object oneElement )
+            : this( adaptor, elementDescription )
+        {
+            Add( oneElement );
+        }
+
+        /** <summary>Create a stream, but feed off an existing list</summary> */
+        public RewriteRuleElementStream( ITreeAdaptor adaptor, string elementDescription, IList elements )
+            : this( adaptor, elementDescription )
+        {
+            this.singleElement = null;
+            this.elements = elements;
+        }
+
+        /** <summary>
+         *  Reset the condition of this stream so that it appears we have
+         *  not consumed any of its elements.  Elements themselves are untouched.
+         *  Once we reset the stream, any future use will need duplicates.  Set
+         *  the dirty bit.
+         *  </summary>
+         */
+        public virtual void Reset()
+        {
+            cursor = 0;
+            dirty = true;
+        }
+
+        public virtual void Add( object el )
+        {
+            //System.out.println("add '"+elementDescription+"' is "+el);
+            if ( el == null )
+            {
+                return;
+            }
+            if ( elements != null )
+            { // if in list, just add
+                elements.Add( el );
+                return;
+            }
+            if ( singleElement == null )
+            { // no elements yet, track w/o list
+                singleElement = el;
+                return;
+            }
+            // adding 2nd element, move to list
+            elements = new List<object>( 5 );
+            elements.Add( singleElement );
+            singleElement = null;
+            elements.Add( el );
+        }
+
+        /** <summary>
+         *  Return the next element in the stream.  If out of elements, throw
+         *  an exception unless size()==1.  If size is 1, then return elements[0].
+         *  Return a duplicate node/subtree if stream is out of elements and
+         *  size==1.  If we've already used the element, dup (dirty bit set).
+         *  </summary>
+         */
+        public virtual object NextTree()
+        {
+            int n = Count;
+            if ( dirty || ( cursor >= n && n == 1 ) )
+            {
+                // if out of elements and size is 1, dup
+                object el = NextCore();
+                return Dup( el );
+            }
+            // test size above then fetch
+            object el2 = NextCore();
+            return el2;
+        }
+
+        /** <summary>
+         *  Do the work of getting the next element, making sure that it's
+         *  a tree node or subtree.  Deal with the optimization of single-
+         *  element list versus list of size > 1.  Throw an exception
+         *  if the stream is empty or we're out of elements and size>1.
+         *  protected so you can override in a subclass if necessary.
+         *  </summary>
+         */
+        protected virtual object NextCore()
+        {
+            int n = Count;
+            if ( n == 0 )
+            {
+                throw new RewriteEmptyStreamException( elementDescription );
+            }
+            if ( cursor >= n )
+            { // out of elements?
+                if ( n == 1 )
+                {  // if size is 1, it's ok; return and we'll dup
+                    return ToTree( singleElement );
+                }
+                // out of elements and size was not 1, so we can't dup
+                throw new RewriteCardinalityException( elementDescription );
+            }
+            // we have elements
+            if ( singleElement != null )
+            {
+                cursor++; // move cursor even for single element list
+                return ToTree( singleElement );
+            }
+            // must have more than one in list, pull from elements
+            object o = ToTree( elements[cursor] );
+            cursor++;
+            return o;
+        }
+
+        /** <summary>
+         *  When constructing trees, sometimes we need to dup a token or AST
+         * 	subtree.  Dup'ing a token means just creating another AST node
+         *  around it.  For trees, you must call the adaptor.dupTree() unless
+         *  the element is for a tree root; then it must be a node dup.
+         *  </summary>
+         */
+        protected abstract object Dup( object el );
+
+        /** <summary>
+         *  Ensure stream emits trees; tokens must be converted to AST nodes.
+         *  AST nodes can be passed through unmolested.
+         *  </summary>
+         */
+        protected virtual object ToTree( object el )
+        {
+            return el;
+        }
+
+        public virtual bool HasNext
+        {
+            get
+            {
+                return ( singleElement != null && cursor < 1 ) ||
+                      ( elements != null && cursor < elements.Count );
+            }
+        }
+
+        public virtual int Count
+        {
+            get
+            {
+                int n = 0;
+                if ( singleElement != null )
+                {
+                    n = 1;
+                }
+                if ( elements != null )
+                {
+                    return elements.Count;
+                }
+                return n;
+            }
+        }
+
+        public virtual string Description
+        {
+            get
+            {
+                return elementDescription;
+            }
+        }
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleNodeStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleNodeStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleNodeStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleNodeStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleSubtreeStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleSubtreeStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleSubtreeStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleSubtreeStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleTokenStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleTokenStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/RewriteRuleTokenStream.cs
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TemplateTreeRuleReturnScope\1402.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TemplateTreeRuleReturnScope\1402.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TemplateTreeRuleReturnScope\1402.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TemplateTreeRuleReturnScope\1402.cs"
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeFilter.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeFilter.cs
new file mode 100644
index 0000000..ba44e2d
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeFilter.cs
@@ -0,0 +1,99 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime.Tree
+{
+    using Antlr.Runtime.Misc;
+
+    public class TreeFilter : TreeParser
+    {
+        protected ITokenStream originalTokenStream;
+        protected ITreeAdaptor originalAdaptor;
+
+        public TreeFilter( ITreeNodeStream input )
+            : this( input, new RecognizerSharedState() )
+        {
+        }
+        public TreeFilter( ITreeNodeStream input, RecognizerSharedState state )
+            : base( input, state )
+        {
+            originalAdaptor = input.TreeAdaptor;
+            originalTokenStream = input.TokenStream;
+        }
+
+        public virtual void ApplyOnce( object t, Action whichRule )
+        {
+            if ( t == null )
+                return;
+
+            try
+            {
+                // share TreeParser object but not parsing-related state
+                SetState(new RecognizerSharedState());
+                SetTreeNodeStream(new CommonTreeNodeStream(originalAdaptor, t));
+                ( (CommonTreeNodeStream)input ).TokenStream = originalTokenStream;
+                BacktrackingLevel = 1;
+                whichRule();
+                BacktrackingLevel = 0;
+            }
+            catch ( RecognitionException )
+            {
+            }
+        }
+
+        public virtual void Downup( object t )
+        {
+            TreeVisitor v = new TreeVisitor( new CommonTreeAdaptor() );
+            Func<object, object> pre = ( o ) =>
+            {
+                ApplyOnce( o, Topdown );
+                return o;
+            };
+            Func<object, object> post = ( o ) =>
+            {
+                ApplyOnce( o, Bottomup );
+                return o;
+            };
+            v.Visit( t, pre, post );
+        }
+
+        // methods the downup strategy uses to do the up and down rules.
+        // to override, just define tree grammar rule topdown and turn on
+        // filter=true.
+        protected virtual void Topdown()
+        {
+        }
+        protected virtual void Bottomup()
+        {
+        }
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeIterator.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeIterator.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeIterator.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeIterator.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeParser.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeParser.cs
new file mode 100644
index 0000000..f5a1508
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeParser.cs
@@ -0,0 +1,209 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime.Tree
+{
+    using ConditionalAttribute = System.Diagnostics.ConditionalAttribute;
+    using Regex = System.Text.RegularExpressions.Regex;
+    using RegexOptions = System.Text.RegularExpressions.RegexOptions;
+
+    /** <summary>
+     *  A parser for a stream of tree nodes.  "tree grammars" result in a subclass
+     *  of this.  All the error reporting and recovery is shared with Parser via
+     *  the BaseRecognizer superclass.
+     *  </summary>
+    */
+    public class TreeParser : BaseRecognizer
+    {
+        public const int DOWN = TokenTypes.Down;
+        public const int UP = TokenTypes.Up;
+
+        // precompiled regex used by inContext
+        static string dotdot = ".*[^.]\\.\\.[^.].*";
+        static string doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*";
+        static Regex dotdotPattern = new Regex( dotdot, RegexOptions.Compiled );
+        static Regex doubleEtcPattern = new Regex( doubleEtc, RegexOptions.Compiled );
+
+        protected ITreeNodeStream input;
+
+        public TreeParser( ITreeNodeStream input )
+            : base() // highlight that we go to super to set state object
+        {
+            this.input = input;
+        }
+
+        public TreeParser( ITreeNodeStream input, RecognizerSharedState state )
+            : base( state ) // share the state object with another parser
+        {
+            this.input = input;
+        }
+
+        public override void Reset()
+        {
+            base.Reset(); // reset all recognizer state variables
+            if ( input != null )
+            {
+                input.Seek( 0 ); // rewind the input
+            }
+        }
+
+        /** <summary>Set the input stream</summary> */
+        public virtual void SetTreeNodeStream( ITreeNodeStream input )
+        {
+            this.input = input;
+        }
+
+        public virtual ITreeNodeStream GetTreeNodeStream()
+        {
+            return input;
+        }
+
+        public override string SourceName
+        {
+            get
+            {
+                return input.SourceName;
+            }
+        }
+
+        protected override object GetCurrentInputSymbol( IIntStream input )
+        {
+            return ( (ITreeNodeStream)input ).LT( 1 );
+        }
+
+        protected override object GetMissingSymbol( IIntStream input,
+                                          RecognitionException e,
+                                          int expectedTokenType,
+                                          BitSet follow )
+        {
+            string tokenText =
+                "<missing " + TokenNames[expectedTokenType] + ">";
+            ITreeAdaptor adaptor = ((ITreeNodeStream)e.Input).TreeAdaptor;
+            return adaptor.Create(new CommonToken(expectedTokenType, tokenText));
+        }
+
+        /** <summary>
+         *  Match '.' in tree parser has special meaning.  Skip node or
+         *  entire tree if node has children.  If children, scan until
+         *  corresponding UP node.
+         *  </summary>
+         */
+        public override void MatchAny( IIntStream ignore )
+        {
+            state.errorRecovery = false;
+            state.failed = false;
+            // always consume the current node
+            input.Consume();
+            // if the next node is DOWN, then the current node is a subtree:
+            // skip to corresponding UP. must count nesting level to get right UP
+            int look = input.LA( 1 );
+            if ( look == DOWN )
+            {
+                input.Consume();
+                int level = 1;
+                while ( level > 0 )
+                {
+                    switch ( input.LA( 1 ) )
+                    {
+                    case DOWN:
+                        level++;
+                        break;
+                    case UP:
+                        level--;
+                        break;
+                    case TokenTypes.EndOfFile:
+                        return;
+                    default:
+                        break;
+                    }
+                    input.Consume();
+                }
+            }
+        }
+
+        /** <summary>
+         *  We have DOWN/UP nodes in the stream that have no line info; override.
+         *  plus we want to alter the exception type.  Don't try to recover
+         *  from tree parser errors inline...
+         *  </summary>
+         */
+        protected override object RecoverFromMismatchedToken( IIntStream input, int ttype, BitSet follow )
+        {
+            throw new MismatchedTreeNodeException( ttype, (ITreeNodeStream)input );
+        }
+
+        /** <summary>
+         *  Prefix error message with the grammar name because message is
+         *  always intended for the programmer because the parser built
+         *  the input tree not the user.
+         *  </summary>
+         */
+        public override string GetErrorHeader( RecognitionException e )
+        {
+            return GrammarFileName + ": node from " +
+                   ( e.ApproximateLineInfo ? "after " : "" ) + "line " + e.Line + ":" + e.CharPositionInLine;
+        }
+
+        /** <summary>
+         *  Tree parsers parse nodes they usually have a token object as
+         *  payload. Set the exception token and do the default behavior.
+         *  </summary>
+         */
+        public override string GetErrorMessage( RecognitionException e, string[] tokenNames )
+        {
+            if ( this is TreeParser )
+            {
+                ITreeAdaptor adaptor = ( (ITreeNodeStream)e.Input ).TreeAdaptor;
+                e.Token = adaptor.GetToken( e.Node );
+                if ( e.Token == null )
+                { // could be an UP/DOWN node
+                    e.Token = new CommonToken( adaptor.GetType( e.Node ),
+                                              adaptor.GetText( e.Node ) );
+                }
+            }
+            return base.GetErrorMessage( e, tokenNames );
+        }
+
+        [Conditional("ANTLR_TRACE")]
+        public virtual void TraceIn( string ruleName, int ruleIndex )
+        {
+            base.TraceIn( ruleName, ruleIndex, input.LT( 1 ) );
+        }
+
+        [Conditional("ANTLR_TRACE")]
+        public virtual void TraceOut( string ruleName, int ruleIndex )
+        {
+            base.TraceOut( ruleName, ruleIndex, input.LT( 1 ) );
+        }
+
+    }
+}
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreePatternLexer.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreePatternLexer.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreePatternLexer.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreePatternLexer.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreePatternParser.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreePatternParser.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreePatternParser.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreePatternParser.cs
diff --git a/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeRewriter.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeRewriter.cs
new file mode 100644
index 0000000..16a38a2
--- /dev/null
+++ b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeRewriter.cs
@@ -0,0 +1,144 @@
+/*
+ * [The "BSD licence"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+namespace Antlr.Runtime.Tree
+{
+    using Antlr.Runtime.Misc;
+
+    using Console = System.Console;
+
+    public class TreeRewriter : TreeParser
+    {
+        protected bool showTransformations;
+
+        protected ITokenStream originalTokenStream;
+        protected ITreeAdaptor originalAdaptor;
+
+        Func<IAstRuleReturnScope> topdown_func;
+        Func<IAstRuleReturnScope> bottomup_func;
+
+        public TreeRewriter( ITreeNodeStream input )
+            : this( input, new RecognizerSharedState() )
+        {
+        }
+        public TreeRewriter( ITreeNodeStream input, RecognizerSharedState state )
+            : base( input, state )
+        {
+            originalAdaptor = input.TreeAdaptor;
+            originalTokenStream = input.TokenStream;
+            topdown_func = () => Topdown();
+            bottomup_func = () => Bottomup();
+        }
+
+        public virtual object ApplyOnce( object t, Func<IAstRuleReturnScope> whichRule )
+        {
+            if ( t == null )
+                return null;
+
+            try
+            {
+                // share TreeParser object but not parsing-related state
+                SetState(new RecognizerSharedState());
+                SetTreeNodeStream(new CommonTreeNodeStream(originalAdaptor, t));
+                ( (CommonTreeNodeStream)input ).TokenStream = originalTokenStream;
+                BacktrackingLevel = 1;
+                IAstRuleReturnScope r = whichRule();
+                BacktrackingLevel = 0;
+                if ( Failed )
+                    return t;
+
+                if (showTransformations && r != null && !t.Equals(r.Tree) && r.Tree != null)
+                    ReportTransformation(t, r.Tree);
+
+                if ( r != null && r.Tree != null )
+                    return r.Tree;
+                else
+                    return t;
+            }
+            catch ( RecognitionException )
+            {
+            }
+
+            return t;
+        }
+
+        public virtual object ApplyRepeatedly( object t, Func<IAstRuleReturnScope> whichRule )
+        {
+            bool treeChanged = true;
+            while ( treeChanged )
+            {
+                object u = ApplyOnce( t, whichRule );
+                treeChanged = !t.Equals( u );
+                t = u;
+            }
+            return t;
+        }
+
+        public virtual object Downup( object t )
+        {
+            return Downup( t, false );
+        }
+
+        public virtual object Downup( object t, bool showTransformations )
+        {
+            this.showTransformations = showTransformations;
+            TreeVisitor v = new TreeVisitor( new CommonTreeAdaptor() );
+            t = v.Visit( t, ( o ) => ApplyOnce( o, topdown_func ), ( o ) => ApplyRepeatedly( o, bottomup_func ) );
+            return t;
+        }
+
+        // methods the downup strategy uses to do the up and down rules.
+        // to override, just define tree grammar rule topdown and turn on
+        // filter=true.
+        protected virtual IAstRuleReturnScope Topdown()
+        {
+            return null;
+        }
+
+        protected virtual IAstRuleReturnScope Bottomup()
+        {
+            return null;
+        }
+
+        /** Override this if you need transformation tracing to go somewhere
+         *  other than stdout or if you're not using ITree-derived trees.
+         */
+        protected virtual void ReportTransformation(object oldTree, object newTree)
+        {
+            ITree old = oldTree as ITree;
+            ITree @new = newTree as ITree;
+            string oldMessage = old != null ? old.ToStringTree() : "??";
+            string newMessage = @new != null ? @new.ToStringTree() : "??";
+            Console.WriteLine("{0} -> {1}", oldMessage, newMessage);
+        }
+    }
+}
diff --git "a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeRuleReturnScope\1401.cs" "b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeRuleReturnScope\1401.cs"
similarity index 100%
rename from "antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeRuleReturnScope\1401.cs"
rename to "runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeRuleReturnScope\1401.cs"
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeVisitor.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeVisitor.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeVisitor.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeVisitor.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeWizard.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeWizard.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeWizard.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/Tree/TreeWizard.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/UnbufferedTokenStream.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/UnbufferedTokenStream.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/UnbufferedTokenStream.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/UnbufferedTokenStream.cs
diff --git a/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/UnwantedTokenException.cs b/runtime/CSharp3/Sources/Antlr3.Runtime/UnwantedTokenException.cs
similarity index 100%
rename from antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/UnwantedTokenException.cs
rename to runtime/CSharp3/Sources/Antlr3.Runtime/UnwantedTokenException.cs
diff --git a/runtime/Cpp/include/antlr3.hpp b/runtime/Cpp/include/antlr3.hpp
new file mode 100755
index 0000000..4e40ba4
--- /dev/null
+++ b/runtime/Cpp/include/antlr3.hpp
@@ -0,0 +1,60 @@
+#ifndef	_ANTLR3_HPP
+#define	_ANTLR3_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <string>
+#include <sstream>
+
+#include    "antlr3defs.hpp"
+
+#include    "antlr3errors.hpp"
+#include    "antlr3memory.hpp"
+
+#include	"antlr3recognizersharedstate.hpp"
+#include    "antlr3baserecognizer.hpp"
+#include    "antlr3bitset.hpp"
+#include    "antlr3collections.hpp"
+#include    "antlr3commontoken.hpp"
+#include	"antlr3commontree.hpp"
+#include    "antlr3commontreeadaptor.hpp"
+#include    "antlr3cyclicdfa.hpp"
+#include	"antlr3debugeventlistener.hpp"
+#include    "antlr3exception.hpp"
+#include    "antlr3filestream.hpp"
+#include    "antlr3intstream.hpp"
+#include    "antlr3input.hpp"
+#include    "antlr3tokenstream.hpp"
+#include	"antlr3commontreenodestream.hpp"
+#include    "antlr3lexer.hpp"
+#include    "antlr3parser.hpp"
+#include    "antlr3rewritestreams.hpp"
+#include	"antlr3traits.hpp"
+#include    "antlr3treeparser.hpp"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3.inl b/runtime/Cpp/include/antlr3.inl
new file mode 100755
index 0000000..79974af
--- /dev/null
+++ b/runtime/Cpp/include/antlr3.inl
@@ -0,0 +1,9 @@
+ANTLR_BEGIN_NAMESPACE()
+
+//static 
+ANTLR_INLINE void GenericStream::displayRecognitionError( const StringType& str )
+{
+	fprintf(stderr, str.c_str() );
+}
+
+ANTLR_END_NAMESPACE()
\ No newline at end of file
diff --git a/runtime/Cpp/include/antlr3baserecognizer.hpp b/runtime/Cpp/include/antlr3baserecognizer.hpp
new file mode 100755
index 0000000..f125400
--- /dev/null
+++ b/runtime/Cpp/include/antlr3baserecognizer.hpp
@@ -0,0 +1,512 @@
+/** \file
+ * Defines the basic structure to support recognizing by either a lexer,
+ * parser, or tree parser.
+ * \addtogroup BaseRecognizer
+ * @{
+ */
+#ifndef	_ANTLR3_BASERECOGNIZER_HPP
+#define	_ANTLR3_BASERECOGNIZER_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+#include    "antlr3collections.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+/** \brief Base tracking context structure for all types of
+ * recognizers.
+ */
+template< class ImplTraits, class StreamType >
+class BaseRecognizer : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType	AllocPolicyType;
+	typedef typename StreamType::IntStreamType	IntStreamType;
+	typedef typename ComponentTypeFinder<ImplTraits, StreamType>::ComponentType  SuperType;
+	typedef typename StreamType::UnitType		UnitType;
+	typedef typename ImplTraits::template ExceptionBaseType<StreamType> ExceptionBaseType;
+	typedef typename ImplTraits::BitsetType BitsetType;
+	typedef typename ImplTraits::BitsetListType		BitsetListType;
+	typedef typename ImplTraits::StringType	StringType;
+	typedef typename ImplTraits::template RecognizerSharedStateType<StreamType>  RecognizerSharedStateType;
+	typedef typename ImplTraits::DebugEventListenerType DebugEventListenerType;
+	typedef typename ImplTraits::LexerType LexerType;
+	typedef typename ImplTraits::ParserType ParserType;
+	typedef typename ImplTraits::TreeParserType TreeParserType;
+
+	typedef typename AllocPolicyType::template StackType<StringType>  StringStackType;
+	typedef typename AllocPolicyType::template ListType<StringType>  StringListType;
+
+private:
+	/// A pointer to the shared recognizer state, such that multiple
+	/// recognizers can use the same inputs streams and so on (in
+	/// the case of grammar inheritance for instance.
+	///
+	RecognizerSharedStateType*		m_state;
+
+	/// If set to something other than NULL, then this structure is
+	/// points to an instance of the debugger interface. In general, the
+	/// debugger is only referenced internally in recovery/error operations
+	/// so that it does not cause overhead by having to check this pointer
+	/// in every function/method
+	///
+	DebugEventListenerType*		m_debugger;
+
+
+public:
+	BaseRecognizer(ANTLR_UINT32 sizeHint, RecognizerSharedStateType* state);
+
+	SuperType* get_super();
+	RecognizerSharedStateType* get_state() const;
+	DebugEventListenerType* get_debugger() const;
+	void  set_state( RecognizerSharedStateType* state );
+	void  set_debugger( DebugEventListenerType* debugger );
+
+    /// Match current input symbol against ttype.  Upon error, do one token
+	/// insertion or deletion if possible.
+	/// To turn off single token insertion or deletion error
+	/// recovery, override mismatchRecover() and have it call
+	/// plain mismatch(), which does not recover.  Then any error
+	/// in a rule will cause an exception and immediate exit from
+	/// rule.  Rule would recover by resynchronizing to the set of
+	/// symbols that can follow rule ref.
+	///
+    const UnitType*	match(ANTLR_UINT32 ttype, BitsetListType* follow);
+
+	/// Consumes the next token, whatever it is, and resets the recognizer state
+	/// so that it is not in error.
+	///
+	/// \param recognizer
+	/// Recognizer context pointer
+	///
+    void	matchAny();
+
+	/// function that decides if the token ahead of the current one is the
+	/// one we were loking for, in which case the curernt one is very likely extraneous
+	/// and can be reported that way.
+	///
+	bool mismatchIsUnwantedToken(IntStreamType* input, ANTLR_UINT32 ttype);
+
+	/// function that decides if the current token is one that can logically
+	/// follow the one we were looking for, in which case the one we were looking for is
+	/// probably missing from the input.
+	///
+	bool mismatchIsMissingToken(IntStreamType* input, BitsetListType* follow);
+
+    /// Factor out what to do upon token mismatch so tree parsers can behave
+	/// differently.  Override and call mismatchRecover(input, ttype, follow)
+	/// to get single token insertion and deletion.  Use this to turn off
+	/// single token insertion and deletion. Override mismatchRecover
+	/// to call this instead.
+	///
+	/// \remark mismatch only works for parsers and must be overridden for anything else.
+	///
+    void mismatch(ANTLR_UINT32 ttype, BitsetListType* follow);
+
+    /// Report a recognition problem.
+	///
+	/// This method sets errorRecovery to indicate the parser is recovering
+	/// not parsing.  Once in recovery mode, no errors are generated.
+	/// To get out of recovery mode, the parser must successfully match
+	/// a token (after a resync).  So it will go:
+	///
+	///		1. error occurs
+	///		2. enter recovery mode, report error
+	///		3. consume until token found in resynch set
+	///		4. try to resume parsing
+	///		5. next match() will reset errorRecovery mode
+	///
+	/// If you override, make sure to update errorCount if you care about that.
+	///
+    void	reportError();
+	void	reportError( ClassForwarder<LexerType> );
+	template<typename CompType>
+	void	reportError( ClassForwarder<CompType> );
+
+    /** Function that is called to display a recognition error message. You may
+     *  override this function independently of (*reportError)() above as that function calls
+     *  this one to do the actual exception printing.
+     */
+    void	displayRecognitionError(ANTLR_UINT8** tokenNames);
+
+	/// Get number of recognition errors (lexer, parser, tree parser).  Each
+	/// recognizer tracks its own number.  So parser and lexer each have
+	/// separate count.  Does not count the spurious errors found between
+	/// an error and next valid token match
+	///
+	/// \see reportError()
+	///
+	ANTLR_UINT32 getNumberOfSyntaxErrors();
+
+    /** Function that recovers from an error found in the input stream.
+     *  Generally, this will be a #ANTLR3_EXCEPTION_NOVIABLE_ALT but it could also
+     *  be from a mismatched token that the (*match)() could not recover from.
+     */
+    void	recover();
+
+    /** function that is a hook to listen to token consumption during error recovery.
+     *  This is mainly used by the debug parser to send events to the listener.
+     */
+    void	beginResync();
+
+    /** function that is a hook to listen to token consumption during error recovery.
+     *  This is mainly used by the debug parser to send events to the listener.
+     */
+    void	endResync();
+
+	/** function that is a hook to listen to token consumption during error recovery.
+     *  This is mainly used by the debug parser to send events to the listener.
+     */
+    void	beginBacktrack(ANTLR_UINT32 level);
+
+    /** function that is a hook to listen to token consumption during error recovery.
+     *  This is mainly used by the debug parser to send events to the listener.
+     */
+    void	endBacktrack(ANTLR_UINT32 level, bool successful);
+
+    /// Compute the error recovery set for the current rule.
+	/// Documentation below is from the Java implementation.
+	///
+	/// During rule invocation, the parser pushes the set of tokens that can
+	/// follow that rule reference on the stack; this amounts to
+	/// computing FIRST of what follows the rule reference in the
+	/// enclosing rule. This local follow set only includes tokens
+	/// from within the rule; i.e., the FIRST computation done by
+	/// ANTLR stops at the end of a rule.
+	//
+	/// EXAMPLE
+	//
+	/// When you find a "no viable alt exception", the input is not
+	/// consistent with any of the alternatives for rule r.  The best
+	/// thing to do is to consume tokens until you see something that
+	/// can legally follow a call to r *or* any rule that called r.
+	/// You don't want the exact set of viable next tokens because the
+	/// input might just be missing a token--you might consume the
+	/// rest of the input looking for one of the missing tokens.
+	///
+	/// Consider grammar:
+	///
+	/// a : '[' b ']'
+	///   | '(' b ')'
+	///   ;
+	/// b : c '^' INT ;
+	/// c : ID
+	///   | INT
+	///   ;
+	///
+	/// At each rule invocation, the set of tokens that could follow
+	/// that rule is pushed on a stack.  Here are the various "local"
+	/// follow sets:
+	///
+	/// FOLLOW(b1_in_a) = FIRST(']') = ']'
+	/// FOLLOW(b2_in_a) = FIRST(')') = ')'
+	/// FOLLOW(c_in_b) = FIRST('^') = '^'
+	///
+	/// Upon erroneous input "[]", the call chain is
+	///
+	/// a -> b -> c
+	///
+	/// and, hence, the follow context stack is:
+	///
+	/// depth  local follow set     after call to rule
+	///   0         <EOF>                    a (from main())
+	///   1          ']'                     b
+	///   3          '^'                     c
+	///
+	/// Notice that ')' is not included, because b would have to have
+	/// been called from a different context in rule a for ')' to be
+	/// included.
+	///
+	/// For error recovery, we cannot consider FOLLOW(c)
+	/// (context-sensitive or otherwise).  We need the combined set of
+	/// all context-sensitive FOLLOW sets--the set of all tokens that
+	/// could follow any reference in the call chain.  We need to
+	/// resync to one of those tokens.  Note that FOLLOW(c)='^' and if
+	/// we resync'd to that token, we'd consume until EOF.  We need to
+	/// sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+	/// In this case, for input "[]", LA(1) is in this set so we would
+	/// not consume anything and after printing an error rule c would
+	/// return normally.  It would not find the required '^' though.
+	/// At this point, it gets a mismatched token error and throws an
+	/// exception (since LA(1) is not in the viable following token
+	/// set).  The rule exception handler tries to recover, but finds
+	/// the same recovery set and doesn't consume anything.  Rule b
+	/// exits normally returning to rule a.  Now it finds the ']' (and
+	/// with the successful match exits errorRecovery mode).
+	///
+	/// So, you can see that the parser walks up call chain looking
+	/// for the token that was a member of the recovery set.
+	///
+	/// Errors are not generated in errorRecovery mode.
+	///
+	/// ANTLR's error recovery mechanism is based upon original ideas:
+	///
+	/// "Algorithms + Data Structures = Programs" by Niklaus Wirth
+	///
+	/// and
+	///
+	/// "A note on error recovery in recursive descent parsers":
+	/// http://portal.acm.org/citation.cfm?id=947902.947905
+	///
+	/// Later, Josef Grosch had some good ideas:
+	///
+	/// "Efficient and Comfortable Error Recovery in Recursive Descent
+	/// Parsers":
+	/// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+	///
+	/// Like Grosch I implemented local FOLLOW sets that are combined
+	/// at run-time upon error to avoid overhead during parsing.
+	///
+    BitsetType*	computeErrorRecoverySet();
+
+    /// Compute the context-sensitive FOLLOW set for current rule.
+	/// Documentation below is from the Java runtime.
+	///
+	/// This is the set of token types that can follow a specific rule
+	/// reference given a specific call chain.  You get the set of
+	/// viable tokens that can possibly come next (look ahead depth 1)
+	/// given the current call chain.  Contrast this with the
+	/// definition of plain FOLLOW for rule r:
+	///
+	///  FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
+	///
+	/// where x in T* and alpha, beta in V*; T is set of terminals and
+	/// V is the set of terminals and non terminals.  In other words,
+	/// FOLLOW(r) is the set of all tokens that can possibly follow
+	/// references to r in///any* sentential form (context).  At
+	/// runtime, however, we know precisely which context applies as
+	/// we have the call chain.  We may compute the exact (rather
+	/// than covering superset) set of following tokens.
+	///
+	/// For example, consider grammar:
+	///
+	/// stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
+	///      | "return" expr '.'
+	///      ;
+	/// expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
+	/// atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
+	///      | '(' expr ')'
+	///      ;
+	///
+	/// The FOLLOW sets are all inclusive whereas context-sensitive
+	/// FOLLOW sets are precisely what could follow a rule reference.
+	/// For input input "i=(3);", here is the derivation:
+	///
+	/// stat => ID '=' expr ';'
+	///      => ID '=' atom ('+' atom)* ';'
+	///      => ID '=' '(' expr ')' ('+' atom)* ';'
+	///      => ID '=' '(' atom ')' ('+' atom)* ';'
+	///      => ID '=' '(' INT ')' ('+' atom)* ';'
+	///      => ID '=' '(' INT ')' ';'
+	///
+	/// At the "3" token, you'd have a call chain of
+	///
+	///   stat -> expr -> atom -> expr -> atom
+	///
+	/// What can follow that specific nested ref to atom?  Exactly ')'
+	/// as you can see by looking at the derivation of this specific
+	/// input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
+	///
+	/// You want the exact viable token set when recovering from a
+	/// token mismatch.  Upon token mismatch, if LA(1) is member of
+	/// the viable next token set, then you know there is most likely
+	/// a missing token in the input stream.  "Insert" one by just not
+	/// throwing an exception.
+	///
+    BitsetType*	computeCSRuleFollow();
+
+    /// Compute the current followset for the input stream.
+	///
+    BitsetType*	combineFollows(bool exact);
+
+    /// Attempt to recover from a single missing or extra token.
+	///
+	/// EXTRA TOKEN
+	///
+	/// LA(1) is not what we are looking for.  If LA(2) has the right token,
+	/// however, then assume LA(1) is some extra spurious token.  Delete it
+	/// and LA(2) as if we were doing a normal match(), which advances the
+	/// input.
+	///
+	/// MISSING TOKEN
+	///
+	/// If current token is consistent with what could come after
+	/// ttype then it is ok to "insert" the missing token, else throw
+	/// exception For example, Input "i=(3;" is clearly missing the
+	/// ')'.  When the parser returns from the nested call to expr, it
+	/// will have call chain:
+	///
+	///    stat -> expr -> atom
+	///
+	/// and it will be trying to match the ')' at this point in the
+	/// derivation:
+	///
+	///       => ID '=' '(' INT ')' ('+' atom)* ';'
+	///                          ^
+	/// match() will see that ';' doesn't match ')' and report a
+	/// mismatched token error.  To recover, it sees that LA(1)==';'
+	/// is in the set of tokens that can follow the ')' token
+	/// reference in rule atom.  It can assume that you forgot the ')'.
+	///
+	/// The exception that was passed in, in the java implementation is
+	/// sorted in the recognizer exception stack in the C version. To 'throw' it we set the
+	/// error flag and rules cascade back when this is set.
+	///
+    const UnitType* recoverFromMismatchedToken( ANTLR_UINT32	ttype, BitsetListType*	follow);
+
+    /** Function that recovers from a mismatched set in the token stream, in a similar manner
+     *  to (*recoverFromMismatchedToken)
+     */
+    const UnitType* recoverFromMismatchedSet(BitsetListType*	follow);
+
+    /** common routine to handle single token insertion for recovery functions.
+     */
+	/// This code is factored out from mismatched token and mismatched set
+	///  recovery.  It handles "single token insertion" error recovery for
+	/// both.  No tokens are consumed to recover from insertions.  Return
+	/// true if recovery was possible else return false.
+	///
+    bool	recoverFromMismatchedElement(BitsetListType*	follow);
+
+    /** function that consumes input until the next token matches
+     *  the given token.
+     */
+    void	consumeUntil(ANTLR_UINT32   tokenType);
+
+    /** function that consumes input until the next token matches
+     *  one in the given set.
+     */
+    void	consumeUntilSet(BitsetType*	set);
+
+    /** function that returns an ANTLR3_LIST of the strings that identify
+     *  the rules in the parser that got you to this point. Can be overridden by installing your
+     *	own function set.
+     *
+     * \todo Document how to override invocation stack functions.
+     */
+	StringStackType	getRuleInvocationStack();
+	StringStackType	getRuleInvocationStackNamed(ANTLR_UINT8*    name);
+
+    /** function that converts an ANLR3_LIST of tokens to an ANTLR3_LIST of
+     *  string token names. As this is mostly used in string template processing it may not be useful
+     *  in the C runtime.
+     */
+    StringListType	toStrings( const StringListType& );
+
+    /** function to return whether the rule has parsed input starting at the supplied
+     *  start index before. If the rule has not parsed input starting from the supplied start index,
+     *  then it will return ANTLR3_MEMO_RULE_UNKNOWN. If it has parsed from the suppled start point
+     *  then it will return the point where it last stopped parsing after that start point.
+     */
+    ANTLR_MARKER	getRuleMemoization( ANTLR_INTKEY	ruleIndex,
+												ANTLR_MARKER	ruleParseStart);
+
+    /** function that determines whether the rule has parsed input at the current index
+     *  in the input stream
+     */
+    bool	alreadyParsedRule(ANTLR_MARKER	ruleIndex);
+
+    /** Function that records whether the rule has parsed the input at a
+     *  current position successfully or not.
+     */
+    void	memoize(ANTLR_MARKER	ruleIndex,
+								ANTLR_MARKER	ruleParseStart);
+
+	/// Function that returns the current input symbol.
+    /// The is placed into any label for the associated token ref; e.g., x=ID.  Token
+	/// and tree parsers need to return different objects. Rather than test
+	/// for input stream type or change the IntStream interface, I use
+	/// a simple method to ask the recognizer to tell me what the current
+	/// input symbol is.
+	///
+	/// This is ignored for lexers and the lexer implementation of this
+	/// function should return NULL.
+	///
+	const UnitType*	getCurrentInputSymbol(IntStreamType* istream);
+	const UnitType*	getCurrentInputSymbol(IntStreamType* istream, ClassForwarder<LexerType>);
+	const UnitType*	getCurrentInputSymbol(IntStreamType* istream, ClassForwarder<ParserType>);
+	const UnitType*	getCurrentInputSymbol(IntStreamType* istream, ClassForwarder<TreeParserType>);
+
+	/// Conjure up a missing token during error recovery.
+	///
+	/// The recognizer attempts to recover from single missing
+	/// symbols. But, actions might refer to that missing symbol.
+	/// For example, x=ID {f($x);}. The action clearly assumes
+	/// that there has been an identifier matched previously and that
+	/// $x points at that token. If that token is missing, but
+	/// the next token in the stream is what we want we assume that
+	/// this token is missing and we keep going. Because we
+	/// have to return some token to replace the missing token,
+	/// we have to conjure one up. This method gives the user control
+	/// over the tokens returned for missing tokens. Mostly,
+	/// you will want to create something special for identifier
+	/// tokens. For literals such as '{' and ',', the default
+	/// action in the parser or tree parser works. It simply creates
+	/// a CommonToken of the appropriate type. The text will be the token.
+	/// If you change what tokens must be created by the lexer,
+	/// override this method to create the appropriate tokens.
+	///
+	UnitType*	getMissingSymbol( IntStreamType*		istream, ExceptionBaseType*		e,
+												ANTLR_UINT32			expectedTokenType,
+												BitsetListType*		follow);
+
+    /** Function that returns whether the supplied grammar function
+     *  will parse the current input stream or not. This is the way that syntactic
+     *  predicates are evaluated. Unlike java, C is perfectly happy to invoke code
+     *  via a pointer to a function (hence that's what all the ANTLR3 C interfaces
+     *  do.
+     */
+	template<typename Predicate>
+    bool  synpred( ClassForwarder<Predicate> );
+
+	//In place of exConstruct, just directly instantiate the Exception Object
+
+    /** Reset the recognizer
+     */
+    void  reset();
+	void  reset( ClassForwarder<LexerType> );
+	template<typename CompType>
+	void  reset( ClassForwarder<CompType> );
+
+	void exConstruct();
+
+    ~BaseRecognizer();
+
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3baserecognizer.inl"
+
+/// @}
+///
+
+#endif	    /* _ANTLR3_BASERECOGNIZER_H	*/
+
diff --git a/runtime/Cpp/include/antlr3baserecognizer.inl b/runtime/Cpp/include/antlr3baserecognizer.inl
new file mode 100755
index 0000000..5d5acbf
--- /dev/null
+++ b/runtime/Cpp/include/antlr3baserecognizer.inl
@@ -0,0 +1,919 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template< class ImplTraits, class StreamType >
+BaseRecognizer<ImplTraits, StreamType>::BaseRecognizer(ANTLR_UINT32 sizeHint,
+											RecognizerSharedStateType* state)
+{
+	m_debugger = NULL;
+
+	// If we have been supplied with a pre-existing recognizer state
+	// then we just install it, otherwise we must create one from scratch
+	//
+	if	(state == NULL)
+	{
+		m_state = new RecognizerSharedStateType();
+		m_state->set_sizeHint( sizeHint );
+	}
+	else
+	{
+		// Install the one we were given, and do not reset it here
+		// as it will either already have been initialized or will
+		// be in a state that needs to be preserved.
+		//
+		m_state = state;
+	}
+}
+
+template< class ImplTraits, class StreamType >
+ANTLR_INLINE typename BaseRecognizer<ImplTraits, StreamType>::SuperType* BaseRecognizer<ImplTraits, StreamType>::get_super()
+{
+	return static_cast<SuperType*>(this);
+}
+
+template< class ImplTraits, class StreamType >
+ANTLR_INLINE typename BaseRecognizer<ImplTraits, StreamType>::RecognizerSharedStateType* BaseRecognizer<ImplTraits, StreamType>::get_state() const
+{
+	return m_state;
+}
+template< class ImplTraits, class StreamType >
+ANTLR_INLINE typename BaseRecognizer<ImplTraits, StreamType>::DebugEventListenerType* BaseRecognizer<ImplTraits, StreamType>::get_debugger() const
+{
+	return m_debugger;
+}
+template< class ImplTraits, class StreamType >
+ANTLR_INLINE void BaseRecognizer<ImplTraits, StreamType>::set_state( RecognizerSharedStateType* state )
+{
+	m_state = state;
+}
+template< class ImplTraits, class StreamType >
+ANTLR_INLINE void BaseRecognizer<ImplTraits, StreamType>::set_debugger( DebugEventListenerType* debugger )
+{
+	m_debugger = debugger;
+}
+
+template< class ImplTraits, class StreamType >
+const typename BaseRecognizer<ImplTraits, StreamType>::UnitType* 
+BaseRecognizer<ImplTraits, StreamType>::match(ANTLR_UINT32 ttype, BitsetListType* follow)
+{
+	SuperType*  super = static_cast<SuperType*>(this);
+	IntStreamType* is = super->get_istream();
+
+	// Pick up the current input token/node for assignment to labels
+	//
+	const UnitType* matchedSymbol = this->getCurrentInputSymbol(is);
+
+    if	(is->_LA(1) == ttype)
+    {
+		// The token was the one we were told to expect
+		//
+		is->consume();					   // Consume that token from the stream
+		m_state->set_errorRecovery(false); // Not in error recovery now (if we were)
+		m_state->set_failed(false);	// The match was a success
+		return matchedSymbol;								// We are done
+    }
+
+    // We did not find the expected token type, if we are backtracking then
+    // we just set the failed flag and return.
+    //
+    if	( m_state->get_backtracking() > 0)
+    {
+		// Backtracking is going on
+		//
+		m_state->set_failed(true);
+		return matchedSymbol;
+	}
+
+    // We did not find the expected token and there is no backtracking
+    // going on, so we mismatch, which creates an exception in the recognizer exception
+    // stack.
+    //
+	matchedSymbol = this->recoverFromMismatchedToken(ttype, follow);
+    return matchedSymbol;
+
+}
+
+template< class ImplTraits, class StreamType >
+void BaseRecognizer<ImplTraits, StreamType>::matchAny()
+{
+	SuperType*  super = static_cast<SuperType*>(this);
+	IntStreamType* is = super->get_istream();
+
+	is->consume();
+	m_state->set_errorRecovery(false);
+	m_state->set_failed(false);
+    return;
+}
+
+template< class ImplTraits, class StreamType >
+bool BaseRecognizer<ImplTraits, StreamType>::mismatchIsUnwantedToken(IntStreamType* is, ANTLR_UINT32 ttype)
+{
+	ANTLR_UINT32 nextt = is->_LA(2);
+
+	if	(nextt == ttype)
+	{
+		if(m_state->get_exception() != NULL)
+			m_state->get_exception()->set_expecting(nextt);
+		return true;		// This token is unknown, but the next one is the one we wanted
+	}
+	else
+		return false;	// Neither this token, nor the one following is the one we wanted
+}
+
+template< class ImplTraits, class StreamType >
+bool BaseRecognizer<ImplTraits, StreamType>::mismatchIsMissingToken(IntStreamType* is, BitsetListType* follow)
+{
+	bool	retcode;
+	BitsetType*	followClone;
+	BitsetType*	viableTokensFollowingThisRule;
+
+	if	(follow == NULL)
+	{
+		// There is no information about the tokens that can follow the last one
+		// hence we must say that the current one we found is not a member of the
+		// follow set and does not indicate a missing token. We will just consume this
+		// single token and see if the parser works it out from there.
+		//
+		return	false;
+	}
+
+	followClone						= NULL;
+	viableTokensFollowingThisRule	= NULL;
+
+	// The C bitset maps are laid down at compile time by the
+	// C code generation. Hence we cannot remove things from them
+	// and so on. So, in order to remove EOR (if we need to) then
+	// we clone the static bitset.
+	//
+	followClone = follow->bitsetLoad();
+	if	(followClone == NULL)
+		return false;
+
+	// Compute what can follow this grammar reference
+	//
+	if	(followClone->isMember( ImplTraits::CommonTokenType::EOR_TOKEN_TYPE))
+	{
+		// EOR can follow, but if we are not the start symbol, we
+		// need to remove it.
+		//
+		followClone->remove(ImplTraits::CommonTokenType::EOR_TOKEN_TYPE);
+
+		// Now compute the visiable tokens that can follow this rule, according to context
+		// and make them part of the follow set.
+		//
+		viableTokensFollowingThisRule = this->computeCSRuleFollow();
+		followClone->borInPlace(viableTokensFollowingThisRule);
+	}
+
+	/// if current token is consistent with what could come after set
+	/// then we know we're missing a token; error recovery is free to
+	/// "insert" the missing token
+	///
+	/// BitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
+	/// in follow set to indicate that the fall of the start symbol is
+	/// in the set (EOF can follow).
+	///
+	if	(		followClone->isMember(is->_LA(1))
+			||	followClone->isMember(ImplTraits::CommonTokenType::EOR_TOKEN_TYPE)
+		)
+	{
+		retcode = true;
+	}
+	else
+	{
+		retcode	= false;
+	}
+
+	if	(viableTokensFollowingThisRule != NULL)
+	{
+		delete viableTokensFollowingThisRule;
+	}
+	if	(followClone != NULL)
+	{
+		delete followClone;
+	}
+
+	return retcode;
+}
+
+template< class ImplTraits, class StreamType >
+void BaseRecognizer<ImplTraits, StreamType>::mismatch(ANTLR_UINT32 ttype, BitsetListType* follow)
+{
+	this->get_super()->mismatch( ttype, follow );
+}
+
+template< class ImplTraits, class StreamType >
+void	BaseRecognizer<ImplTraits, StreamType>::reportError()
+{
+	this->reportError( ClassForwarder<SuperType>() );
+}
+
+template< class ImplTraits, class StreamType >
+void	BaseRecognizer<ImplTraits, StreamType>::reportError( ClassForwarder<LexerType> )
+{
+	// Indicate this recognizer had an error while processing.
+	//
+	m_state->inc_errorCount();
+
+    this->displayRecognitionError(m_state->get_tokenNames());
+}
+
+template< class ImplTraits, class StreamType >
+template<typename CompType>
+void	BaseRecognizer<ImplTraits, StreamType>::reportError(ClassForwarder<CompType> )
+{
+	    // Invoke the debugger event if there is a debugger listening to us
+	//
+	if	( m_debugger != NULL)
+	{
+		m_debugger->recognitionException( m_state->get_exception() );
+	}
+
+    if	( m_state->get_errorRecovery() == true)
+    {
+		// Already in error recovery so don't display another error while doing so
+		//
+		return;
+    }
+
+    // Signal we are in error recovery now
+    //
+    m_state->set_errorRecovery(true);
+
+	// Indicate this recognizer had an error while processing.
+	//
+	m_state->inc_errorCount();
+
+	// Call the error display routine
+	//
+    this->displayRecognitionError( m_state->get_tokenNames() );
+}
+
+template< class ImplTraits, class StreamType >
+void	BaseRecognizer<ImplTraits, StreamType>::displayRecognitionError(ANTLR_UINT8** tokenNames)
+{
+	// Retrieve some info for easy reading.
+	//
+	ExceptionBaseType* ex	    =		m_state->get_exception();
+	StringType ttext;
+
+	// See if there is a 'filename' we can use
+	//
+	SuperType* super = static_cast<SuperType*>(this);
+	super->displayRecognitionError(tokenNames, ex);
+}
+
+template< class ImplTraits, class StreamType >
+ANTLR_UINT32 BaseRecognizer<ImplTraits, StreamType>::getNumberOfSyntaxErrors()
+{
+	return	m_state->get_errorCount();
+}
+
+template< class ImplTraits, class StreamType >
+void	BaseRecognizer<ImplTraits, StreamType>::recover()
+{
+	SuperType* super = static_cast<SuperType*>(this);
+	IntStreamType* is = super->get_parser_istream();
+	// Are we about to repeat the same error?
+	//
+    if	( m_state->get_lastErrorIndex() == is->index())
+    {
+		// The last error was at the same token index point. This must be a case
+		// where LT(1) is in the recovery token set so nothing is
+		// consumed. Consume a single token so at least to prevent
+		// an infinite loop; this is a failsafe.
+		//
+		is->consume();
+    }
+
+    // Record error index position
+    //
+    m_state->set_lastErrorIndex( is->index() );
+
+    // Work out the follows set for error recovery
+    //
+    BitsetType* followSet	= this->computeErrorRecoverySet();
+
+    // Call resync hook (for debuggers and so on)
+    //
+    this->beginResync();
+
+    // Consume tokens until we have resynced to something in the follows set
+    //
+    this->consumeUntilSet(followSet);
+
+    // End resync hook
+    //
+    this->endResync();
+
+    // Destroy the temporary bitset we produced.
+    //
+    delete followSet;
+
+    // Reset the inError flag so we don't re-report the exception
+    //
+    m_state->set_error(false);
+    m_state->set_failed(false);
+}
+
+template< class ImplTraits, class StreamType >
+void	BaseRecognizer<ImplTraits, StreamType>::beginResync()
+{
+	if	(m_debugger != NULL)
+	{
+		m_debugger->beginResync();
+	}
+}
+
+template< class ImplTraits, class StreamType >
+void	BaseRecognizer<ImplTraits, StreamType>::endResync()
+{
+	if	(m_debugger != NULL)
+	{
+		m_debugger->endResync();
+	}
+}
+
+template< class ImplTraits, class StreamType >
+void	BaseRecognizer<ImplTraits, StreamType>::beginBacktrack(ANTLR_UINT32 level)
+{
+	if	(m_debugger != NULL)
+	{
+		m_debugger->beginBacktrack(level);
+	}
+}
+
+template< class ImplTraits, class StreamType >
+void	BaseRecognizer<ImplTraits, StreamType>::endBacktrack(ANTLR_UINT32 level, bool successful)
+{
+	if	(m_debugger != NULL)
+	{
+		m_debugger->endBacktrack(level);
+	}
+}
+
+template< class ImplTraits, class StreamType >
+typename BaseRecognizer<ImplTraits, StreamType>::BitsetType*	BaseRecognizer<ImplTraits, StreamType>::computeErrorRecoverySet()
+{
+	return   this->combineFollows(false);
+}
+
+template< class ImplTraits, class StreamType >
+typename BaseRecognizer<ImplTraits, StreamType>::BitsetType*	BaseRecognizer<ImplTraits, StreamType>::computeCSRuleFollow()
+{
+	return   this->combineFollows(false);
+}
+
+template< class ImplTraits, class StreamType >
+typename BaseRecognizer<ImplTraits, StreamType>::BitsetType*	BaseRecognizer<ImplTraits, StreamType>::combineFollows(bool exact)
+{
+	BitsetType*	followSet;
+    BitsetType*	localFollowSet;
+    ANTLR_UINT32	top;
+    ANTLR_UINT32	i;
+
+    top	= static_cast<ANTLR_UINT32>( m_state->get_following().size() );
+
+    followSet	    = new BitsetType(0);
+	localFollowSet	= NULL;
+
+    for (i = top; i>0; i--)
+    {
+		localFollowSet =  m_state->get_following().at(i-1).bitsetLoad();
+
+		if  (localFollowSet != NULL)
+		{
+			followSet->borInPlace(localFollowSet);
+
+			if	(exact == true)
+			{
+				if	(localFollowSet->isMember( ImplTraits::CommonTokenType::EOR_TOKEN_TYPE) == false)
+				{
+					// Only leave EOR in the set if at top (start rule); this lets us know
+					// if we have to include the follow(start rule); I.E., EOF
+					//
+					if	(i>1)
+					{
+						followSet->remove(ImplTraits::CommonTokenType::EOR_TOKEN_TYPE);
+					}
+				}
+				else
+				{
+					break;	// Cannot see End Of Rule from here, just drop out
+				}
+			}
+			delete localFollowSet;
+			localFollowSet = NULL;
+		}
+    }
+
+	if	(localFollowSet != NULL)
+	{
+		delete localFollowSet;
+	}
+    return  followSet;
+}
+
+template< class ImplTraits, class StreamType >
+const typename BaseRecognizer<ImplTraits, StreamType>::UnitType* 
+BaseRecognizer<ImplTraits, StreamType>::recoverFromMismatchedToken( ANTLR_UINT32	ttype, BitsetListType*	follow)
+{
+	SuperType* super = static_cast<SuperType*>(this);
+	IntStreamType* is = super->get_parser_istream();
+	const UnitType* matchedSymbol;
+
+	// If the next token after the one we are looking at in the input stream
+	// is what we are looking for then we remove the one we have discovered
+	// from the stream by consuming it, then consume this next one along too as
+	// if nothing had happened.
+	//
+	if	( this->mismatchIsUnwantedToken( is, ttype) == true)
+	{
+		// Create an exception if we need one
+		//
+		new ANTLR_Exception<ImplTraits, UNWANTED_TOKEN_EXCEPTION, StreamType>(this, "");
+
+		// Call resync hook (for debuggers and so on)
+		//
+		if	(m_debugger != NULL)
+		{
+			m_debugger->beginResync();
+		}
+
+		// "delete" the extra token
+		//
+		this->beginResync();
+		is->consume();
+		this->endResync();
+		// End resync hook
+		//
+		if	(m_debugger != NULL)
+		{
+			m_debugger->endResync();
+		}
+
+		// Print out the error after we consume so that ANTLRWorks sees the
+		// token in the exception.
+		//
+		this->reportError();
+
+		// Return the token we are actually matching
+		//
+		matchedSymbol = this->getCurrentInputSymbol(is);
+
+		// Consume the token that the rule actually expected to get as if everything
+		// was hunky dory.
+		//
+		is->consume();
+
+		m_state->set_error(false); // Exception is not outstanding any more
+
+		return	matchedSymbol;
+	}
+
+	// Single token deletion (Unwanted above) did not work
+	// so we see if we can insert a token instead by calculating which
+	// token would be missing
+	//
+	if	( this->mismatchIsMissingToken(is, follow))
+	{
+		// We can fake the missing token and proceed
+		//
+		new ANTLR_Exception<ImplTraits, MISSING_TOKEN_EXCEPTION, StreamType>(this, "");
+		matchedSymbol = this->getMissingSymbol( is, m_state->get_exception(), ttype, follow);
+		m_state->get_exception()->set_token( matchedSymbol );
+		m_state->get_exception()->set_expecting(ttype);
+
+		// Print out the error after we insert so that ANTLRWorks sees the
+		// token in the exception.
+		//
+		this->reportError();
+
+		m_state->set_error(false);	// Exception is not outstanding any more
+
+		return	matchedSymbol;
+	}
+
+	// Create an exception if we need one
+	//
+	new ANTLR_Exception<ImplTraits, RECOGNITION_EXCEPTION, StreamType>(this, "");
+
+	// Neither deleting nor inserting tokens allows recovery
+	// must just report the exception.
+	//
+	m_state->set_error(true);
+	return NULL;
+}
+
+template< class ImplTraits, class StreamType >
+const typename BaseRecognizer<ImplTraits, StreamType>::UnitType* 
+BaseRecognizer<ImplTraits, StreamType>::recoverFromMismatchedSet(BitsetListType*	follow)
+{
+	SuperType* super = static_cast<SuperType*>(this);
+	IntStreamType* is = super->get_parser_istream();
+	const UnitType* matchedSymbol;
+
+	if	(this->mismatchIsMissingToken(is, follow) == true)
+	{
+		// We can fake the missing token and proceed
+		//
+		new ANTLR_Exception<ImplTraits, MISSING_TOKEN_EXCEPTION, StreamType>(this);
+		matchedSymbol = this->getMissingSymbol(is, m_state->get_exception(), follow);
+		m_state->get_exception()->set_token(matchedSymbol);
+
+		// Print out the error after we insert so that ANTLRWorks sees the
+		// token in the exception.
+		//
+		this->reportError();
+
+		m_state->set_error(false);	// Exception is not outstanding any more
+
+		return	matchedSymbol;
+	}
+
+    // TODO - Single token deletion like in recoverFromMismatchedToken()
+    //
+    m_state->set_error(true);
+	m_state->set_failed(true);
+	return NULL;
+}
+
+template< class ImplTraits, class StreamType >
+bool  BaseRecognizer<ImplTraits, StreamType>::recoverFromMismatchedElement(BitsetListType*	followBits)
+{
+	SuperType* super = static_cast<SuperType*>(this);
+	IntStreamType* is = super->get_parser_istream();
+
+	BitsetType* follow	= followBits->load();
+	BitsetType*   viableToksFollowingRule;
+
+    if	(follow == NULL)
+    {
+		/* The follow set is NULL, which means we don't know what can come
+		 * next, so we "hit and hope" by just signifying that we cannot
+		 * recover, which will just cause the next token to be consumed,
+		 * which might dig us out.
+		 */
+		return	false;
+    }
+
+    /* We have a bitmap for the follow set, hence we can compute
+     * what can follow this grammar element reference.
+     */
+    if	(follow->isMember( ImplTraits::CommonTokenType::EOR_TOKEN_TYPE) == true)
+    {
+		/* First we need to know which of the available tokens are viable
+		 * to follow this reference.
+		 */
+		viableToksFollowingRule	= this->computeCSRuleFollow();
+
+		/* Remove the EOR token, which we do not wish to compute with
+		 */
+		follow->remove( ImplTraits::CommonTokenType::EOR_TOKEN_TYPE);
+		delete viableToksFollowingRule;
+		/* We now have the computed set of what can follow the current token
+		 */
+    }
+
+    /* We can now see if the current token works with the set of tokens
+     * that could follow the current grammar reference. If it looks like it
+     * is consistent, then we can "insert" that token by not throwing
+     * an exception and assuming that we saw it.
+     */
+    if	( follow->isMember(is->_LA(1)) == true)
+    {
+		/* report the error, but don't cause any rules to abort and stuff
+		 */
+		this->reportError();
+		if	(follow != NULL)
+		{
+			delete follow;
+		}
+		m_state->set_error(false);
+		m_state->set_failed(false);
+		return true;	/* Success in recovery	*/
+    }
+
+    if	(follow != NULL)
+    {
+		delete follow;
+    }
+
+    /* We could not find anything viable to do, so this is going to
+     * cause an exception.
+     */
+    return  false;
+}
+
+template< class ImplTraits, class StreamType >
+void	BaseRecognizer<ImplTraits, StreamType>::consumeUntil(ANTLR_UINT32   tokenType)
+{
+	SuperType* super = static_cast<SuperType*>(this);
+	IntStreamType* is = super->get_parser_istream();
+
+	// What do have at the moment?
+    //
+    ANTLR_UINT32 ttype	= is->_LA(1);
+
+    // Start eating tokens until we get to the one we want.
+    //
+    while   (ttype != ImplTraits::CommonTokenType::TOKEN_EOF && ttype != tokenType)
+    {
+		is->consume();
+		ttype	= is->_LA(1);
+    }
+}
+
+template< class ImplTraits, class StreamType >
+void	BaseRecognizer<ImplTraits, StreamType>::consumeUntilSet(BitsetType*	set)
+{
+    ANTLR_UINT32	    ttype;
+	SuperType* super = static_cast<SuperType*>(this);
+	IntStreamType* is = super->get_parser_istream();
+
+    // What do have at the moment?
+    //
+    ttype	= is->_LA(1);
+
+    // Start eating tokens until we get to one we want.
+    //
+    while   (ttype != ImplTraits::CommonTokenType::TOKEN_EOF && set->isMember(ttype) == false)
+    {
+		is->consume();
+		ttype	= is->_LA(1);
+    }
+
+}
+
+template< class ImplTraits, class StreamType >
+ANTLR_MARKER	BaseRecognizer<ImplTraits, StreamType>::getRuleMemoization( ANTLR_INTKEY	ruleIndex, ANTLR_MARKER	ruleParseStart)
+{
+	/* The rule memos are an ANTLR3_LIST of ANTLR3_LIST.
+     */
+	typedef IntTrie<ImplTraits, ANTLR_MARKER> RuleListType;
+	typedef TrieEntry<ImplTraits, RuleListType*> EntryType;
+	typedef TrieEntry<ImplTraits, ANTLR_MARKER> SubEntryType;
+    ANTLR_MARKER	stopIndex;
+    EntryType*	entry;
+
+    /* See if we have a list in the ruleMemos for this rule, and if not, then create one
+     * as we will need it eventually if we are being asked for the memo here.
+     */
+    entry	= m_state->get_ruleMemo()->get(ruleIndex);
+
+    if	(entry == NULL)
+    {
+		/* Did not find it, so create a new one for it, with a bit depth based on the
+		 * size of the input stream. We need the bit depth to incorporate the number if
+		 * bits required to represent the largest possible stop index in the input, which is the
+		 * last character. An int stream is free to return the largest 64 bit offset if it has
+		 * no idea of the size, but you should remember that this will cause the leftmost
+		 * bit match algorithm to run to 63 bits, which will be the whole time spent in the trie ;-)
+		 */
+		m_state->get_ruleMemo()->add( ruleIndex, new RuleListType(63) );
+
+		/* We cannot have a stopIndex in a trie we have just created of course
+		 */
+		return	MEMO_RULE_UNKNOWN;
+    }
+
+    RuleListType* ruleList	= entry->get_data();
+
+    /* See if there is a stop index associated with the supplied start index.
+     */
+    stopIndex	= 0;
+
+    SubEntryType* sub_entry = ruleList->get(ruleParseStart);
+    if (sub_entry != NULL)
+    {
+		stopIndex = sub_entry->get_data();
+    }
+
+    if	(stopIndex == 0)
+    {
+		return MEMO_RULE_UNKNOWN;
+    }
+
+    return  stopIndex;
+}
+
+template< class ImplTraits, class StreamType >
+bool	BaseRecognizer<ImplTraits, StreamType>::alreadyParsedRule(ANTLR_MARKER	ruleIndex)
+{
+	SuperType* super = static_cast<SuperType*>(this);
+	IntStreamType* is = super->get_istream();
+
+    /* See if we have a memo marker for this.
+     */
+    ANTLR_MARKER stopIndex	    = this->getRuleMemoization( ruleIndex, is->index() );
+
+    if	(stopIndex  == MEMO_RULE_UNKNOWN)
+    {
+		return false;
+    }
+
+    if	(stopIndex == MEMO_RULE_FAILED)
+    {
+		m_state->set_failed(true);
+    }
+    else
+    {
+		is->seek(stopIndex+1);
+    }
+
+    /* If here then the rule was executed for this input already
+     */
+    return  true;
+}
+
+template< class ImplTraits, class StreamType >
+void	BaseRecognizer<ImplTraits, StreamType>::memoize(ANTLR_MARKER ruleIndex, ANTLR_MARKER ruleParseStart)
+{
+   /* The rule memos are an ANTLR3_LIST of ANTLR3_LIST.
+    */
+	typedef IntTrie<ImplTraits, ANTLR_MARKER> RuleListType;
+	typedef TrieEntry<ImplTraits, RuleListType*> EntryType;
+    EntryType*	    entry;
+    ANTLR_MARKER	    stopIndex;
+	SuperType* super = static_cast<SuperType*>(this);
+	IntStreamType* is = super->get_istream();
+
+    stopIndex	= (m_state->get_failed() == true) ? MEMO_RULE_FAILED : is->index() - 1;
+
+    entry	= m_state->get_ruleMemo()->get(ruleIndex);
+
+    if	(entry != NULL)
+    {
+		RuleListType*	ruleList = entry->get_data();
+
+		/* If we don't already have this entry, append it. The memoize trie does not
+		 * accept duplicates so it won't add it if already there and we just ignore the
+		 * return code as we don't care if it is there already.
+		 */
+		ruleList->add(ruleParseStart, stopIndex);
+    }
+}
+
+template< class ImplTraits, class StreamType >
+const typename BaseRecognizer<ImplTraits, StreamType>::UnitType*
+BaseRecognizer<ImplTraits, StreamType>::getCurrentInputSymbol( IntStreamType* istream )
+{
+	return this->getCurrentInputSymbol( istream, ClassForwarder<SuperType>() );
+}
+
+template< class ImplTraits, class StreamType >
+const typename BaseRecognizer<ImplTraits, StreamType>::UnitType*
+BaseRecognizer<ImplTraits, StreamType>::getCurrentInputSymbol(IntStreamType* istream, ClassForwarder<LexerType>)
+{
+	return NULL;
+}
+
+template< class ImplTraits, class StreamType >
+const typename BaseRecognizer<ImplTraits, StreamType>::UnitType*
+BaseRecognizer<ImplTraits, StreamType>::getCurrentInputSymbol(IntStreamType* istream, ClassForwarder<ParserType>)
+{
+	typedef typename ImplTraits::TokenStreamType TokenStreamType;
+	TokenStreamType* token_stream = static_cast<TokenStreamType*>(istream);
+	return token_stream->_LT(1);
+}
+
+template< class ImplTraits, class StreamType >
+const typename BaseRecognizer<ImplTraits, StreamType>::UnitType*
+BaseRecognizer<ImplTraits, StreamType>::getCurrentInputSymbol(IntStreamType* istream, ClassForwarder<TreeParserType>)
+{
+	typedef typename ImplTraits::TreeNodeStreamType TreeNodeStreamType;
+	TreeNodeStreamType*	ctns = static_cast<TreeNodeStreamType*>(istream);
+	return ctns->_LT(1);
+}
+
+
+template< class ImplTraits, class StreamType >
+typename BaseRecognizer<ImplTraits, StreamType>::UnitType*	BaseRecognizer<ImplTraits, StreamType>::getMissingSymbol( IntStreamType* istream,
+										  ExceptionBaseType*		e,
+										  ANTLR_UINT32			expectedTokenType,
+										  BitsetListType*	follow)
+{
+	return this->get_super()->getMissingSymbol( istream, e, expectedTokenType, follow );
+}
+
+
+template< class ImplTraits, class StreamType >
+	template<typename Predicate>
+bool  BaseRecognizer<ImplTraits, StreamType>::synpred(ClassForwarder<Predicate> pred)
+{
+	ANTLR_MARKER   start;
+    SuperType* super = static_cast<SuperType*>(this);
+	IntStreamType* is = super->get_istream();
+
+    /* Begin backtracking so we can get back to where we started after trying out
+     * the syntactic predicate.
+     */
+    start   = is->mark();
+    m_state->inc_backtracking();
+
+    /* Try the syntactical predicate
+     */
+    this->get_super()->synpred( pred );
+
+    /* Reset
+     */
+    is->rewind(start);
+    m_state->dec_backtracking();
+
+    if	( m_state->get_failed() == true)
+    {
+		/* Predicate failed
+		 */
+		m_state->set_failed(false);
+		return	false;
+    }
+    else
+    {
+		/* Predicate was successful
+		 */
+		m_state->set_failed(false);
+		return	true;
+    }
+}
+
+template< class ImplTraits, class StreamType >
+void BaseRecognizer<ImplTraits, StreamType>::exConstruct()
+{
+	this->get_super()->exConstruct();
+}
+
+template< class ImplTraits, class StreamType >
+void  BaseRecognizer<ImplTraits, StreamType>::reset()
+{
+	this->reset( ClassForwarder<SuperType>() );
+}
+
+template< class ImplTraits, class StreamType >
+template< typename CompType >
+void  BaseRecognizer<ImplTraits, StreamType>::reset( ClassForwarder<CompType> )
+{
+	typedef typename RecognizerSharedStateType::RuleMemoType RuleMemoType;
+	 m_state->get_following().clear();
+
+	// Reset the state flags
+	//
+	m_state->set_errorRecovery(false);
+	m_state->set_lastErrorIndex(-1);
+	m_state->set_failed(false);
+	m_state->set_errorCount(0);
+	m_state->set_backtracking(0);
+
+	if	(m_state->get_ruleMemo() != NULL)
+	{
+		delete m_state->get_ruleMemo();
+		m_state->set_ruleMemo( new RuleMemoType(15) );	/* 16 bit depth is enough for 32768 rules! */
+	}
+}
+
+template< class ImplTraits, class StreamType >
+void  BaseRecognizer<ImplTraits, StreamType>::reset( ClassForwarder<LexerType> )
+{
+	m_state->set_token_present( false );
+    m_state->set_type( ImplTraits::CommonTokenType::TOKEN_INVALID );
+    m_state->set_channel( TOKEN_DEFAULT_CHANNEL );
+    m_state->set_tokenStartCharIndex( -1 );
+    m_state->set_tokenStartCharPositionInLine(-1);
+    m_state->set_tokenStartLine( -1 );
+    m_state->set_text("");
+}
+
+template< class ImplTraits, class StreamType >
+BaseRecognizer<ImplTraits, StreamType>::~BaseRecognizer()
+{
+	// Did we have a state allocated?
+	//
+	if	(m_state != NULL)
+	{
+		// Free any rule memoization we set up
+		//
+		if	(m_state->get_ruleMemo() != NULL)
+		{
+			delete m_state->get_ruleMemo();
+			m_state->set_ruleMemo(NULL);
+		}
+
+
+		// Free any exception space we have left around
+		//
+		ExceptionBaseType* thisE = m_state->get_exception();
+		if	(thisE != NULL)
+		{
+			delete thisE;
+		}
+
+		// Free the shared state memory
+		//
+		delete m_state;
+	}
+
+	// Free the actual recognizer space
+	//
+}
+
+
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3bitset.hpp b/runtime/Cpp/include/antlr3bitset.hpp
new file mode 100755
index 0000000..a711b8a
--- /dev/null
+++ b/runtime/Cpp/include/antlr3bitset.hpp
@@ -0,0 +1,224 @@
+/**
+ * \file
+ * Defines the basic structures of an ANTLR3 bitset. this is a C version of the 
+ * cut down Bitset class provided with the java version of antlr 3.
+ * 
+ * 
+ */
+#ifndef	_ANTLR3_BITSET_HPP
+#define	_ANTLR3_BITSET_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+/** How many bits in the elements
+ */
+static const ANTLR_UINT32	ANTLR_BITSET_BITS =	64;
+
+/** How many bits in a nible of bits
+ */
+static const ANTLR_UINT32	ANTLR_BITSET_NIBBLE	= 4;
+
+/** log2 of ANTLR3_BITSET_BITS 2^ANTLR3_BITSET_LOG_BITS = ANTLR3_BITSET_BITS
+ */
+static const ANTLR_UINT32	ANTLR_BITSET_LOG_BITS =	6;
+
+/** We will often need to do a mod operator (i mod nbits).
+ *  For powers of two, this mod operation is the
+ *  same as:
+ *   - (i & (nbits-1)).  
+ *
+ * Since mod is relatively slow, we use an easily
+ * precomputed mod mask to do the mod instead.
+ */
+static const ANTLR_UINT32	ANTLR_BITSET_MOD_MASK = ANTLR_BITSET_BITS - 1;
+
+template <class ImplTraits>
+class BitsetList : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename ImplTraits::BitsetType BitsetType;
+
+private:
+	/// Pointer to the allocated array of bits for this bit set, which
+    /// is an array of 64 bit elements (of the architecture). If we find a 
+    /// machine/C compiler that does not know anything about 64 bit values
+    ///	then it should be easy enough to produce a 32 bit (or less) version
+    /// of the bitset code. Note that the pointer here may be static if laid down
+	/// by the code generation, and it must be copied if it is to be manipulated
+	/// to perform followset calculations.
+    ///
+    ANTLR_BITWORD*  m_bits;
+
+    /// Length of the current bit set in ANTLR3_UINT64 units.
+    ///
+    ANTLR_UINT32    m_length;
+
+public:
+	BitsetList();
+	BitsetList( ANTLR_BITWORD* bits, ANTLR_UINT32 length );
+
+	ANTLR_BITWORD* get_bits() const;
+	ANTLR_UINT32 get_length() const;
+	void set_bits( ANTLR_BITWORD* bits );
+	void set_length( ANTLR_UINT32 length );
+
+	///
+	/// \brief
+	/// Creates a new bitset with at least one 64 bit bset of bits, but as
+	/// many 64 bit sets as are required.
+	///
+	/// \param[in] bset
+	/// A variable number of bits to add to the set, ending in -1 (impossible bit).
+	/// 
+	/// \returns
+	/// A new bit set with all of the specified bitmaps in it and the API
+	/// initialized.
+	/// 
+	/// Call as:
+	///  - pANTLR3_BITSET = antlrBitsetLoad(bset, bset11, ..., -1);
+	///  - pANTLR3_BITSET = antlrBitsetOf(-1);  Create empty bitset 
+	///
+	/// \remarks
+	/// Stdargs function - must supply -1 as last paremeter, which is NOT
+	/// added to the set.
+	/// 
+	///
+	BitsetType* bitsetLoad();
+
+	BitsetType* bitsetCopy();
+
+};
+
+template <class ImplTraits>
+class Bitset : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename AllocPolicyType::template ListType<ANTLR_UINT32> IntListType;
+	typedef typename ImplTraits::BitsetListType BitsetListType;
+
+private:
+	/// The actual bits themselves
+	///
+	BitsetListType		m_blist;
+
+public:
+	Bitset( ANTLR_UINT32 nbits=0 );
+	Bitset( const Bitset& bitset );
+    Bitset*  clone() const;
+	Bitset*  bor(Bitset* bitset2);
+
+	BitsetListType& get_blist();
+	void	 borInPlace(Bitset* bitset2);
+	ANTLR_UINT32 size() const;
+	void	add(ANTLR_INT32 bit);
+	void	grow(ANTLR_INT32 newSize);
+	bool	equals(Bitset* bitset2) const;
+	bool	isMember(ANTLR_UINT32 bit) const;
+	ANTLR_UINT32 numBits() const;
+	void remove(ANTLR_UINT32 bit);
+	bool isNilNode() const;
+
+	/** Produce an integer list of all the bits that are turned on
+	 *  in this bitset. Used for error processing in the main as the bitset
+	 *  reresents a number of integer tokens which we use for follow sets
+	 *  and so on.
+	 *
+	 *  The first entry is the number of elements following in the list.
+	 */
+	ANTLR_INT32* toIntList() const;
+
+	///
+	/// \brief
+	/// Creates a new bitset with at least one element, but as
+	/// many elements are required.
+	/// 
+	/// \param[in] bit
+	/// A variable number of bits to add to the set, ending in -1 (impossible bit).
+	/// 
+	/// \returns
+	/// A new bit set with all of the specified elements added into it.
+	/// 
+	/// Call as:
+	///  - pANTLR3_BITSET = antlrBitsetOf(n, n1, n2, -1);
+	///  - pANTLR3_BITSET = antlrBitsetOf(-1);  Create empty bitset 
+	///
+	/// \remarks
+	/// Stdargs function - must supply -1 as last paremeter, which is NOT
+	/// added to the set.
+	/// 
+	///
+	//C++ doesn't like variable length arguments. so use function overloading
+	static Bitset* BitsetOf(ANTLR_INT32 bit);
+	static Bitset* BitsetOf(ANTLR_INT32 bit1, ANTLR_INT32 bit2);
+	
+	///
+	/// \brief
+	/// Creates a new bitset with at least one 64 bit bset of bits, but as
+	/// many 64 bit sets as are required.
+	///
+	/// \param[in] bset
+	/// A variable number of bits to add to the set, ending in -1 (impossible bit).
+	/// 
+	/// \returns
+	/// A new bit set with all of the specified bitmaps in it and the API
+	/// initialized.
+	/// 
+	/// Call as:
+	///  - pANTLR3_BITSET = antlrBitsetLoad(bset, bset11, ..., -1);
+	///  - pANTLR3_BITSET = antlrBitsetOf(-1);  Create empty bitset 
+	///
+	/// \remarks
+	/// Stdargs function - must supply -1 as last paremeter, which is NOT
+	/// added to the set.
+	/// 
+	///antlr3BitsetList
+	static Bitset*  BitsetFromList(const IntListType& list);
+	~Bitset();
+
+private:
+	void	growToInclude(ANTLR_INT32 bit);
+	static ANTLR_UINT64	BitMask(ANTLR_UINT32 bitNumber);
+	static ANTLR_UINT32	NumWordsToHold(ANTLR_UINT32 bit);
+	static ANTLR_UINT32	WordNumber(ANTLR_UINT32 bit);
+	void bitsetORInPlace(Bitset* bitset2);
+	
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3bitset.inl"
+
+#endif
+
diff --git a/runtime/Cpp/include/antlr3bitset.inl b/runtime/Cpp/include/antlr3bitset.inl
new file mode 100755
index 0000000..ad2f620
--- /dev/null
+++ b/runtime/Cpp/include/antlr3bitset.inl
@@ -0,0 +1,492 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template <class ImplTraits>
+ANTLR_INLINE BitsetList<ImplTraits>::BitsetList()
+{
+	m_bits = NULL;
+	m_length  = 0;
+}
+
+template <class ImplTraits>
+ANTLR_INLINE BitsetList<ImplTraits>::BitsetList( ANTLR_BITWORD* bits, ANTLR_UINT32 length )
+{
+	m_bits = bits;
+	m_length  = length;
+}
+
+template <class ImplTraits>
+ANTLR_INLINE ANTLR_BITWORD* BitsetList<ImplTraits>::get_bits() const
+{
+	return m_bits;
+}
+
+template <class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32 BitsetList<ImplTraits>::get_length() const
+{
+	return m_length;
+}
+
+template <class ImplTraits>
+ANTLR_INLINE void BitsetList<ImplTraits>::set_bits( ANTLR_BITWORD* bits )
+{
+	m_bits = bits;
+}
+
+template <class ImplTraits>
+ANTLR_INLINE void BitsetList<ImplTraits>::set_length( ANTLR_UINT32 length )
+{
+	m_length = length;
+}
+
+template <class ImplTraits>
+typename BitsetList<ImplTraits>::BitsetType* BitsetList<ImplTraits>::bitsetLoad()
+{
+	// Allocate memory for the bitset structure itself
+	// the input parameter is the bit number (0 based)
+	// to include in the bitset, so we need at at least
+	// bit + 1 bits. If any arguments indicate a 
+	// a bit higher than the default number of bits (0 means default size)
+	// then Add() will take care
+	// of it.
+	//
+	BitsetType* bitset  = new BitsetType();
+
+	if	(this != NULL)
+	{
+		// Now we can add the element bits into the set
+		//
+		ANTLR_UINT32 count=0;
+		while (count < m_length)
+		{
+			if( bitset->get_blist().get_length() <= count)
+				bitset->grow(count+1);
+
+			typename ImplTraits::BitsetListType& blist = bitset->get_blist();
+			blist.m_bits[count] = *(m_bits+count);
+			count++;
+		}
+	}
+
+	// return the new bitset
+	//
+	return  bitset;
+}
+
+template <class ImplTraits>
+typename BitsetList<ImplTraits>::BitsetType* BitsetList<ImplTraits>::bitsetCopy()
+{
+	BitsetType*  bitset;
+	ANTLR_UINT32 numElements = m_length;
+
+    // Avoid memory thrashing at the expense of a few more bytes
+    //
+    if	(numElements < 8)
+		numElements = 8;
+
+    // Allocate memory for the bitset structure itself
+    //
+    bitset  = new Bitset<ImplTraits>(numElements);
+	memcpy(bitset->get_blist().get_bits(), m_bits, numElements * sizeof(ANTLR_BITWORD));
+
+    // All seems good
+    //
+    return  bitset;
+}
+
+template <class ImplTraits>
+Bitset<ImplTraits>::Bitset( ANTLR_UINT32 numBits )
+{
+	// Avoid memory thrashing at the up front expense of a few bytes
+	if	(numBits < (8 * ANTLR_BITSET_BITS))
+		numBits = 8 * ANTLR_BITSET_BITS;
+
+	// No we need to allocate the memory for the number of bits asked for
+	// in multiples of ANTLR3_UINT64. 
+	//
+	ANTLR_UINT32 numelements	= ((numBits -1) >> ANTLR_BITSET_LOG_BITS) + 1;
+
+	m_blist.set_bits( (ANTLR_BITWORD*) AllocPolicyType::alloc0(numelements * sizeof(ANTLR_BITWORD)));
+
+	m_blist.set_length( numelements );
+}
+
+template <class ImplTraits>
+Bitset<ImplTraits>::Bitset( const Bitset& bitset )
+	:m_blist(bitset.m_blist)
+{
+}
+
+template <class ImplTraits>
+ANTLR_INLINE Bitset<ImplTraits>*  Bitset<ImplTraits>::clone() const
+{
+	Bitset*  bitset;
+
+    // Allocate memory for the bitset structure itself
+    //
+    bitset  = new Bitset( ANTLR_BITSET_BITS * m_blist.get_length() );
+
+    // Install the actual bits in the source set
+    //
+    memcpy(bitset->m_blist.get_bits(), m_blist.get_bits(), 
+				m_blist.get_length() * sizeof(ANTLR_BITWORD) );
+
+    // All seems good
+    //
+    return  bitset;
+}
+
+template <class ImplTraits>
+Bitset<ImplTraits>*  Bitset<ImplTraits>::bor(Bitset* bitset2)
+{
+	Bitset*  bitset;
+
+    if	(this == NULL)
+		return bitset2->clone();
+
+    if	(bitset2 == NULL)
+		return	this->clone();
+
+    // Allocate memory for the newly ordered bitset structure itself.
+    //
+    bitset  = this->clone();
+    bitset->bitsetORInPlace(bitset2);
+    return  bitset;
+}
+
+template <class ImplTraits>
+void	 Bitset<ImplTraits>::borInPlace(Bitset* bitset2)
+{
+	ANTLR_UINT32   minimum;
+
+    if	(bitset2 == NULL)
+		return;
+
+	// First make sure that the target bitset is big enough
+    // for the new bits to be ored in.
+    //
+    if	( m_blist.get_length() < bitset2->m_blist.get_length() )
+		this->growToInclude( bitset2->m_blist.get_length() * sizeof(ANTLR_BITWORD) );
+    
+    // Or the miniimum number of bits after any resizing went on
+    //
+    if	( m_blist.get_length() < bitset2->m_blist.get_length() )
+		minimum = m_blist.get_length();
+	else
+		minimum = bitset2->m_blist.get_length();
+
+	ANTLR_BITWORD* bits1 = m_blist.get_bits();
+	ANTLR_BITWORD* bits2 = bitset2->m_blist.get_bits();
+	for	(ANTLR_UINT32 i = minimum; i > 0; i--)
+		bits1[i-1] |= bits2[i-1];
+}
+
+template <class ImplTraits>
+ANTLR_UINT32 Bitset<ImplTraits>::size() const
+{
+    ANTLR_UINT32   degree;
+    ANTLR_INT32   i;
+    ANTLR_INT8    bit;
+    
+    // TODO: Come back to this, it may be faster to & with 0x01
+    // then shift right a copy of the 4 bits, than shift left a constant of 1.
+    // But then again, the optimizer might just work this out
+    // anyway.
+    //
+    degree  = 0;
+	ANTLR_BITWORD* bits = m_blist.get_bits();
+    for	(i = m_blist.get_length() - 1; i>= 0; i--)
+    {
+		if  (bits[i] != 0)
+		{
+			for(bit = ANTLR_BITSET_BITS - 1; bit >= 0; bit--)
+			{
+				if((bits[i] & (((ANTLR_BITWORD)1) << bit)) != 0)
+				{
+					degree++;
+				}
+			}
+		}
+    }
+    return degree;
+}
+
+template <class ImplTraits>
+ANTLR_INLINE void	Bitset<ImplTraits>::add(ANTLR_INT32 bit)
+{
+	ANTLR_UINT32   word = Bitset::WordNumber(bit);
+
+    if	(word	>= m_blist.get_length() )
+		this->growToInclude(bit);
+ 
+	ANTLR_BITWORD* bits = m_blist.get_bits();
+	bits[word] |= Bitset::BitMask(bit);
+}
+
+template <class ImplTraits>
+void	Bitset<ImplTraits>::grow(ANTLR_INT32 newSize)
+{
+	ANTLR_BITWORD*   newBits;
+
+    // Space for newly sized bitset - TODO: come back to this and use realloc?, it may
+    // be more efficient...
+    //
+    newBits =  (ANTLR_BITWORD*) AllocPolicyType::alloc0(newSize * sizeof(ANTLR_BITWORD) );
+    if	( m_blist.get_bits() != NULL)
+    {
+		// Copy existing bits
+		//
+		memcpy( newBits, m_blist.get_bits(), m_blist.get_length() * sizeof(ANTLR_BITWORD) );
+
+		// Out with the old bits... de de de derrr
+		//
+		AllocPolicyType::free( m_blist.get_bits() );
+    }
+
+    // In with the new bits... keerrrang.
+    //
+    m_blist.set_bits(newBits);
+    m_blist.set_length(newSize);
+}
+
+template <class ImplTraits>
+bool	Bitset<ImplTraits>::equals(Bitset* bitset2) const
+{
+    ANTLR_UINT32   minimum;
+    ANTLR_UINT32   i;
+
+    if	(this == NULL || bitset2 == NULL)
+		return	false;
+
+    // Work out the minimum comparison set
+    //
+    if	( m_blist.get_length() < bitset2->m_blist.get_length() )
+		minimum = m_blist.get_length();
+    else
+		minimum = bitset2->m_blist.get_length();
+
+    // Make sure explict in common bits are equal
+    //
+    for	(i = minimum - 1; i < minimum ; i--)
+    {
+		ANTLR_BITWORD* bits1 = m_blist.get_bits();
+		ANTLR_BITWORD* bits2 = bitset2->m_blist.get_bits();
+		if  ( bits1[i] != bits2[i])
+			return false;
+    }
+
+    // Now make sure the bits of the larger set are all turned
+    // off.
+    //
+    if	( m_blist.get_length() > minimum)
+    {
+		for (i = minimum ; i < m_blist.get_length(); i++)
+		{
+			ANTLR_BITWORD* bits = m_blist.get_bits();
+			if(bits[i] != 0)
+				return false;
+		}
+    }
+    else if (bitset2->m_blist.get_length() > minimum)
+    {
+		ANTLR_BITWORD* bits = m_blist.get_bits();
+		for (i = minimum; i < bitset2->m_blist.get_length(); i++)
+		{
+			if	( bits[i] != 0 )
+				return	false;
+		}
+    }
+
+    return  true;
+}
+
+template <class ImplTraits>
+bool	Bitset<ImplTraits>::isMember(ANTLR_UINT32 bit) const
+{
+    ANTLR_UINT32    wordNo = Bitset::WordNumber(bit);
+
+    if	(wordNo >= m_blist.get_length())
+		return false;
+    
+	ANTLR_BITWORD* bits = m_blist.get_bits();
+    if	( (bits[wordNo] & Bitset::BitMask(bit)) == 0)
+		return false;
+    else
+		return true;
+}
+
+template <class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32 Bitset<ImplTraits>::numBits() const
+{
+	return  m_blist.get_length() << ANTLR_BITSET_LOG_BITS;
+}
+
+template <class ImplTraits>
+ANTLR_INLINE typename ImplTraits::BitsetListType& Bitset<ImplTraits>::get_blist()
+{
+	return m_blist;
+}
+
+template <class ImplTraits>
+ANTLR_INLINE void Bitset<ImplTraits>::remove(ANTLR_UINT32 bit)
+{
+    ANTLR_UINT32    wordNo = Bitset::WordNumber(bit);
+
+    if	(wordNo < m_blist.get_length())
+	{
+		ANTLR_BITWORD* bits = m_blist.get_bits();
+		bits[wordNo] &= ~(Bitset::BitMask(bit));
+	}
+}
+
+template <class ImplTraits>
+ANTLR_INLINE bool Bitset<ImplTraits>::isNilNode() const
+{
+	ANTLR_UINT32    i;
+	ANTLR_BITWORD* bits = m_blist.get_bits();
+	for	(i = m_blist.get_length() -1 ; i < m_blist.get_length(); i--)
+	{
+		if(bits[i] != 0)
+			return false;
+	}
+	return  true;
+}
+
+template <class ImplTraits>
+ANTLR_INT32* Bitset<ImplTraits>::toIntList() const
+{
+	ANTLR_UINT32   numInts;	    // How many integers we will need
+    ANTLR_UINT32   numBits;	    // How many bits are in the set
+    ANTLR_UINT32   i;
+    ANTLR_UINT32   index;
+
+    ANTLR_INT32*  intList;
+
+    numInts = this->size() + 1;
+    numBits = this->numBits();
+ 
+    intList = (ANTLR_INT32*) AllocPolicyType::alloc(numInts * sizeof(ANTLR_INT32));
+    
+    intList[0] = numInts;
+
+    // Enumerate the bits that are turned on
+    //
+    for	(i = 0, index = 1; i<numBits; i++)
+    {
+		if  (this->isMember(i) == true)
+			intList[index++]    = i;
+    }
+
+    // Result set
+    //
+    return  intList;
+}
+
+template <class ImplTraits>
+ANTLR_INLINE Bitset<ImplTraits>::~Bitset()
+{
+	if	(m_blist.get_bits() != NULL)
+		AllocPolicyType::free(m_blist.get_bits());
+    return;
+}
+
+template <class ImplTraits>
+void	Bitset<ImplTraits>::growToInclude(ANTLR_INT32 bit)
+{
+	ANTLR_UINT32	bl;
+	ANTLR_UINT32	nw;
+
+	bl = (m_blist.get_length() << 1);
+	nw = Bitset::NumWordsToHold(bit);
+
+	if	(bl > nw)
+		this->grow(bl);
+	else
+		this->grow(nw);
+}
+
+template <class ImplTraits>
+ANTLR_INLINE ANTLR_UINT64	Bitset<ImplTraits>::BitMask(ANTLR_UINT32 bitNumber)
+{
+	return  ((ANTLR_UINT64)1) << (bitNumber & (ANTLR_BITSET_MOD_MASK));
+}
+
+template <class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32	Bitset<ImplTraits>::NumWordsToHold(ANTLR_UINT32 bit)
+{
+	return  (bit >> ANTLR_BITSET_LOG_BITS) + 1;
+}
+
+template <class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32	Bitset<ImplTraits>::WordNumber(ANTLR_UINT32 bit)
+{
+	return  bit >> ANTLR_BITSET_LOG_BITS;
+}
+
+template <class ImplTraits>
+void Bitset<ImplTraits>::bitsetORInPlace(Bitset* bitset2)
+{
+	ANTLR_UINT32   minimum;
+    ANTLR_UINT32   i;
+
+    if	(bitset2 == NULL)
+		return;
+
+    // First make sure that the target bitset is big enough
+    // for the new bits to be ored in.
+    //
+    if	( m_blist.get_length() < bitset2->m_blist.get_length() )
+		this->growToInclude( bitset2->m_blist.get_length() * sizeof(ANTLR_BITWORD) );
+    
+    // Or the miniimum number of bits after any resizing went on
+    //
+    if	( m_blist.get_length() < bitset2->m_blist.get_length() )
+		minimum = m_blist.get_length();
+	else
+		minimum = bitset2->m_blist.get_length();
+
+	ANTLR_BITWORD* bits1 = m_blist.get_bits();
+	ANTLR_BITWORD* bits2 = bitset2->m_blist.get_bits();
+    for	(i = minimum; i > 0; i--)
+		bits1[i-1] |= bits2[i-1];
+}
+
+template <class ImplTraits>
+Bitset<ImplTraits>* Bitset<ImplTraits>::BitsetOf(ANTLR_INT32 bit)
+{
+	// Allocate memory for the bitset structure itself
+    // the input parameter is the bit number (0 based)
+    // to include in the bitset, so we need at at least
+    // bit + 1 bits. If any arguments indicate a 
+    // a bit higher than the default number of bits (0 menas default size)
+    // then Add() will take care
+    // of it.
+    //
+	Bitset<ImplTraits>* bitset = new Bitset<ImplTraits>(0);
+	bitset->add(bit);
+	return bitset;
+}
+
+template <class ImplTraits>
+Bitset<ImplTraits>* Bitset<ImplTraits>::BitsetOf(ANTLR_INT32 bit1, ANTLR_INT32 bit2)
+{
+	Bitset<ImplTraits>* bitset = Bitset<ImplTraits>::BitsetOf(bit1);
+	bitset->add(bit2);
+	return bitset;
+}
+
+//static 
+template <class ImplTraits>
+Bitset<ImplTraits>* Bitset<ImplTraits>::BitsetFromList(const IntListType& list)
+{
+	// We have no idea what exactly is in the list
+    // so create a default bitset and then just add stuff
+    // as we enumerate.
+    //
+    Bitset<ImplTraits>* bitset  = new Bitset<ImplTraits>(0);
+	for( int i = 0; i < list.size(); ++i )
+		bitset->add( list[i] );
+
+	return bitset;
+}
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3collections.hpp b/runtime/Cpp/include/antlr3collections.hpp
new file mode 100755
index 0000000..2111403
--- /dev/null
+++ b/runtime/Cpp/include/antlr3collections.hpp
@@ -0,0 +1,285 @@
+#ifndef	ANTLR3COLLECTIONS_HPP
+#define	ANTLR3COLLECTIONS_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+/* -------------- TRIE Interfaces ---------------- */
+
+/** Structure that holds the payload entry in an ANTLR3_INT_TRIE or ANTLR3_STRING_TRIE
+ */
+template< class ImplTraits, class DataType >
+class TrieEntry : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType AllocPolicy;
+
+private:
+	DataType			m_data;
+	TrieEntry*			m_next;	    /* Allows duplicate entries for same key in insertion order	*/
+
+public:
+	TrieEntry(const DataType& data, TrieEntry* next);
+	DataType& get_data();
+	const DataType& get_data() const;
+	TrieEntry* get_next() const;
+	void set_next( TrieEntry* next );
+};
+
+/** Structure that defines an element/node in an ANTLR_INT_TRIE
+ */
+template< class ImplTraits, class DataType >
+class IntTrieNode : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef TrieEntry<ImplTraits, DataType> TrieEntryType;
+	typedef TrieEntryType BucketsType;
+	
+private:
+    ANTLR_UINT32	m_bitNum;	/**< This is the left/right bit index for traversal along the nodes				*/
+    ANTLR_INTKEY	m_key;		/**< This is the actual key that the entry represents if it is a terminal node  */
+    BucketsType*	m_buckets;	/**< This is the data bucket(s) that the key indexes, which may be NULL			*/
+    IntTrieNode*	m_leftN;	/**< Pointer to the left node from here when sKey & bitNum = 0					*/
+    IntTrieNode*	m_rightN;	/**< Pointer to the right node from here when sKey & bitNum, = 1				*/
+
+public:
+	IntTrieNode();
+	~IntTrieNode();
+
+	ANTLR_UINT32 get_bitNum() const;
+	ANTLR_INTKEY get_key() const;
+	BucketsType* get_buckets() const;
+	IntTrieNode* get_leftN() const;
+	IntTrieNode* get_rightN() const;
+	void  set_bitNum( ANTLR_UINT32 bitNum );
+	void  set_key( ANTLR_INTKEY key );
+	void  set_buckets( BucketsType* buckets );
+	void  set_leftN( IntTrieNode* leftN );
+	void  set_rightN( IntTrieNode* rightN );
+};
+  
+/** Structure that defines an ANTLR3_INT_TRIE. For this particular implementation,
+ *  as you might expect, the key is turned into a "string" by looking at bit(key, depth)
+ *  of the integer key. Using 64 bit keys gives us a depth limit of 64 (or bit 0..63)
+ *  and potentially a huge trie. This is the algorithm for a Patricia Trie.
+ *  Note also that this trie [can] accept multiple entries for the same key and is
+ *  therefore a kind of elastic bucket patricia trie.
+ *
+ *  If you find this code useful, please feel free to 'steal' it for any purpose
+ *  as covered by the BSD license under which ANTLR is issued. You can cut the code
+ *  but as the ANTLR library is only about 50K (Windows Vista), you might find it 
+ *  easier to just link the library. Please keep all comments and licenses and so on
+ *  in any version of this you create of course.
+ *
+ *  Jim Idle.
+ *  
+ */
+class IntTrieBase
+{
+public:
+	static const ANTLR_UINT8* get_bitIndex();
+	static const ANTLR_UINT64* get_bitMask();
+};
+ 
+template< class ImplTraits, class DataType >
+class IntTrie : public ImplTraits::AllocPolicyType, public IntTrieBase
+{
+public:
+	typedef TrieEntry<ImplTraits, DataType> TrieEntryType;
+	typedef IntTrieNode<ImplTraits, DataType> IntTrieNodeType;
+	
+private:
+    IntTrieNodeType*	m_root;			/* Root node of this integer trie					*/
+    IntTrieNodeType*	m_current;		/* Used to traverse the TRIE with the next() method	*/
+    ANTLR_UINT32	m_count;			/* Current entry count								*/
+    bool			m_allowDups;		/* Whether this trie accepts duplicate keys			*/
+
+public:
+	/* INT TRIE Implementation of depth 64 bits, being the number of bits
+	 * in a 64 bit integer. 
+	 */
+    IntTrie( ANTLR_UINT32 depth );
+
+	/** Search the int Trie and return a pointer to the first bucket indexed
+	 *  by the key if it is contained in the trie, otherwise NULL.
+	 */
+    TrieEntryType*	get( ANTLR_INTKEY key);
+    bool		del( ANTLR_INTKEY key);
+
+	/** Add an entry into the INT trie.
+	 *  Basically we descend the trie as we do when searching it, which will
+	 *  locate the only node in the trie that can be reached by the bit pattern of the
+	 *  key. If the key is actually at that node, then if the trie accepts duplicates
+	 *  we add the supplied data in a new chained bucket to that data node. If it does
+	 *  not accept duplicates then we merely return FALSE in case the caller wants to know
+	 *  whether the key was already in the trie.
+	 *  If the node we locate is not the key we are looking to add, then we insert a new node
+	 *  into the trie with a bit index of the leftmost differing bit and the left or right 
+	 *  node pointing to itself or the data node we are inserting 'before'. 
+	 */
+    bool		add( ANTLR_INTKEY key, const DataType& data );
+    ~IntTrie();
+};
+
+/**
+ * A topological sort system that given a set of dependencies of a node m on node n,
+ * can sort them in dependency order. This is a generally useful utility object
+ * that does not care what the things are it is sorting. Generally the set
+ * to be sorted will be numeric indexes into some other structure such as an ANTLR3_VECTOR.
+ * I have provided a sort method that given ANTLR3_VECTOR as an input will sort
+ * the vector entries in place, as well as a sort method that just returns an
+ * array of the sorted noded indexes, in case you are not sorting ANTLR3_VECTORS but
+ * some set of your own device.
+ *
+ * Of the two main algorithms that could be used, I chose to use the depth first
+ * search for unvisited nodes as a) This runs in linear time, and b) it is what
+ * we used in the ANTLR Tool to perform a topological sort of the input grammar files
+ * based on their dependencies.
+ */
+template<class ImplTraits>
+class Topo : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::BitsetType BitsetType;
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+
+private:
+    /**
+     * A vector of vectors of edges, built by calling the addEdge method()
+     * to indicate that node number n depends on node number m. Each entry in the vector
+     * contains a bitset, which has a bit index set for each node upon which the
+     * entry node depends.
+     */
+    BitsetType**	m_edges;
+
+    /**
+     * A vector used to build up the sorted output order. Note that
+     * as the vector contains UINT32 then the maximum node index is
+     * 'limited' to 2^32, as nodes should be zero based.
+     */
+    ANTLR_UINT32*				m_sorted;
+
+    /**
+     * A vector used to detect cycles in the edge dependecies. It is used
+     * as a stack and each time we descend a node to one of its edges we
+     * add the node into this stack. If we find a node that we have already
+     * visited in the stack, then it means there wasa cycle such as 9->8->1->9
+     * as the only way a node can be on the stack is if we are currently
+     * descnding from it as we remove it from the stack as we exit from
+     * descending its dependencies
+     */
+    ANTLR_UINT32*		m_cycle;
+
+    /**
+     * A flag that indicates the algorithm found a cycle in the edges
+     * such as 9->8->1->9
+     * If this flag is set after you have called one of the sort routines
+     * then the detected cycle will be contained in the cycle array and
+     * cycleLimit will point to the one after the last entry in the cycle.
+     */
+    bool				m_hasCycle;
+
+    /**
+     * A watermark used to accumulate potential cycles in the cycle array.
+     * This should be zero when we are done. Check hasCycle after calling one
+     * of the sort methods and if it is true then you can find the cycle
+     * in cycle[0]...cycle[cycleMark-1]
+     */
+    ANTLR_UINT32		m_cycleMark;
+    
+    /**
+     * One more than the largest node index that is contained in edges/sorted.
+     */
+    ANTLR_UINT32		m_limit;
+
+    /**
+     * The set of visited nodes as determined by a set entry in
+     * the bitmap.
+     */
+    BitsetType*			m_visited;
+
+public:
+	Topo();
+    /**
+     * A method that adds an edge from one node to another. An edge
+     * of n -> m indicates that node n is dependent on node m. Note that
+     * while building these edges, it is perfectly OK to add nodes out of
+     * sequence. So, if you have edges:
+     *
+     * 3 -> 0
+     * 2 -> 1
+     * 1 -> 3
+     *
+     * The you can add them in that order and so add node 3 before nodes 2 and 1
+     *
+     */
+    void  addEdge(ANTLR_UINT32 edge, ANTLR_UINT32 dependency);
+
+
+    /**
+     * A method that returns a pointer to an array of sorted node indexes.
+     * The array is sorted in topological sorted order. Note that the array
+     * is only as large as the largest node index you created an edge for. This means
+     * that if you had an input of 32 nodes, but that largest node with an edge
+     * was 16, then the returned array will be the sorted order of the first 16
+     * nodes and the last 16 nodes of your array are basically fine as they are
+     * as they had no dependencies and do not need any particular sort order.
+     *
+     * NB: If the structure that contains the array is freed, then the sorted
+     * array will be freed too so you should use the value of limit to
+     * make a long term copy of this array if you do not want to keep the topo
+     * structure around as well.
+     */
+    ANTLR_UINT32*  sortToArray();
+
+    /** 
+     * A method that sorts the supplied ANTLR3_VECTOR in place based
+     * on the previously supplied edge data.
+     */
+	template<typename DataType>
+    void   sortVector( typename ImplTraits::template VectorType<DataType>& v);
+
+	void   DFS(ANTLR_UINT32 node);
+
+    /**
+     *  A method to free this structure and any associated memory.
+     */
+	~Topo();
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3collections.inl"
+    
+#endif
+
+
diff --git a/runtime/Cpp/include/antlr3collections.inl b/runtime/Cpp/include/antlr3collections.inl
new file mode 100755
index 0000000..fb713c2
--- /dev/null
+++ b/runtime/Cpp/include/antlr3collections.inl
@@ -0,0 +1,995 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template< class ImplTraits, class DataType >
+ANTLR_INLINE TrieEntry<ImplTraits, DataType>::TrieEntry(const DataType& data, TrieEntry* next)
+	:m_data(data)
+{
+	m_next = next;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE DataType& TrieEntry<ImplTraits, DataType>::get_data()
+{
+	return m_data;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE const DataType& TrieEntry<ImplTraits, DataType>::get_data() const
+{
+	return m_data;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE TrieEntry<ImplTraits, DataType>* TrieEntry<ImplTraits, DataType>::get_next() const
+{
+	return m_next;
+}
+
+template< class ImplTraits, class DataType >
+ANTLR_INLINE void TrieEntry<ImplTraits, DataType>::set_next( TrieEntry* next )
+{
+	m_next = next;
+}
+
+template< class ImplTraits, class DataType >
+ANTLR_INLINE ANTLR_UINT32 IntTrieNode<ImplTraits, DataType>::get_bitNum() const
+{
+	return m_bitNum;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE ANTLR_INTKEY IntTrieNode<ImplTraits, DataType>::get_key() const
+{
+	return m_key;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE typename IntTrieNode<ImplTraits, DataType>::BucketsType* IntTrieNode<ImplTraits, DataType>::get_buckets() const
+{
+	return m_buckets;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE IntTrieNode<ImplTraits, DataType>* IntTrieNode<ImplTraits, DataType>::get_leftN() const
+{
+	return m_leftN;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE IntTrieNode<ImplTraits, DataType>* IntTrieNode<ImplTraits, DataType>::get_rightN() const
+{
+	return m_rightN;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE void IntTrieNode<ImplTraits, DataType>::set_bitNum( ANTLR_UINT32 bitNum )
+{
+	m_bitNum = bitNum;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE void IntTrieNode<ImplTraits, DataType>::set_key( ANTLR_INTKEY key )
+{
+	m_key = key;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE void IntTrieNode<ImplTraits, DataType>::set_buckets( BucketsType* buckets )
+{
+	m_buckets = buckets;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE void IntTrieNode<ImplTraits, DataType>::set_leftN( IntTrieNode* leftN )
+{
+	m_leftN = leftN;
+}
+template< class ImplTraits, class DataType >
+ANTLR_INLINE void IntTrieNode<ImplTraits, DataType>::set_rightN( IntTrieNode* rightN )
+{
+	m_rightN = rightN;
+}
+
+ANTLR_INLINE const ANTLR_UINT8* IntTrieBase::get_bitIndex()
+{
+	static ANTLR_UINT8 bitIndex[256] = 
+	{ 
+		0,													// 0 - Just for padding
+		0,													// 1
+		1, 1,												// 2..3
+		2, 2, 2, 2,											// 4..7
+		3, 3, 3, 3, 3, 3, 3, 3,								// 8+
+		4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,	    // 16+
+		5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,	    // 32+
+		5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,	    
+		6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,	    // 64+
+		6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+		6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+		6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 
+		7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,	    // 128+
+		7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+		7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+		7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+		7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+		7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 
+		7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+		7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+	};
+	return bitIndex;
+}
+
+ANTLR_INLINE const ANTLR_UINT64* IntTrieBase::get_bitMask()
+{
+	static ANTLR_UINT64 bitMask[64] = 
+	{
+		0x0000000000000001ULL, 0x0000000000000002ULL, 0x0000000000000004ULL, 0x0000000000000008ULL,
+		0x0000000000000010ULL, 0x0000000000000020ULL, 0x0000000000000040ULL, 0x0000000000000080ULL,
+		0x0000000000000100ULL, 0x0000000000000200ULL, 0x0000000000000400ULL, 0x0000000000000800ULL,
+		0x0000000000001000ULL, 0x0000000000002000ULL, 0x0000000000004000ULL, 0x0000000000008000ULL,
+		0x0000000000010000ULL, 0x0000000000020000ULL, 0x0000000000040000ULL, 0x0000000000080000ULL,
+		0x0000000000100000ULL, 0x0000000000200000ULL, 0x0000000000400000ULL, 0x0000000000800000ULL,
+		0x0000000001000000ULL, 0x0000000002000000ULL, 0x0000000004000000ULL, 0x0000000008000000ULL,
+		0x0000000010000000ULL, 0x0000000020000000ULL, 0x0000000040000000ULL, 0x0000000080000000ULL,
+		0x0000000100000000ULL, 0x0000000200000000ULL, 0x0000000400000000ULL, 0x0000000800000000ULL,
+		0x0000001000000000ULL, 0x0000002000000000ULL, 0x0000004000000000ULL, 0x0000008000000000ULL,
+		0x0000010000000000ULL, 0x0000020000000000ULL, 0x0000040000000000ULL, 0x0000080000000000ULL,
+		0x0000100000000000ULL, 0x0000200000000000ULL, 0x0000400000000000ULL, 0x0000800000000000ULL,
+		0x0001000000000000ULL, 0x0002000000000000ULL, 0x0004000000000000ULL, 0x0008000000000000ULL,
+		0x0010000000000000ULL, 0x0020000000000000ULL, 0x0040000000000000ULL, 0x0080000000000000ULL,
+		0x0100000000000000ULL, 0x0200000000000000ULL, 0x0400000000000000ULL, 0x0800000000000000ULL,
+		0x1000000000000000ULL, 0x2000000000000000ULL, 0x4000000000000000ULL, 0x8000000000000000ULL
+	};
+
+	return bitMask;
+}
+
+template< class ImplTraits, class DataType >
+IntTrie<ImplTraits, DataType>::IntTrie( ANTLR_UINT32 depth )
+{
+	/* Now we need to allocate the root node. This makes it easier
+	 * to use the tree as we don't have to do anything special 
+	 * for the root node.
+	 */
+	m_root	= new IntTrieNodeType;
+
+	/* Now we seed the root node with the index being the
+	 * highest left most bit we want to test, which limits the
+	 * keys in the trie. This is the trie 'depth'. The limit for
+	 * this implementation is 63 (bits 0..63).
+	 */
+	m_root->set_bitNum( depth );
+
+	/* And as we have nothing in here yet, we set both child pointers
+	 * of the root node to point back to itself.
+	 */
+	m_root->set_leftN( m_root );
+	m_root->set_rightN( m_root );
+	m_count			= 0;
+
+	/* Finally, note that the key for this root node is 0 because
+	 * we use calloc() to initialise it.
+	 */
+	m_allowDups = false;
+	m_current   = NULL;
+}
+
+template< class ImplTraits, class DataType >
+IntTrie<ImplTraits, DataType>::~IntTrie()
+{
+    /* Descend from the root and free all the nodes
+     */
+    delete m_root;
+
+    /* the nodes are all gone now, so we need only free the memory
+     * for the structure itself
+     */
+}
+
+template< class ImplTraits, class DataType >
+typename IntTrie<ImplTraits, DataType>::TrieEntryType*	IntTrie<ImplTraits, DataType>::get( ANTLR_INTKEY key)
+{
+	IntTrieNodeType*    thisNode; 
+	IntTrieNodeType*    nextNode; 
+
+	if (m_count == 0)
+		return NULL;	    /* Nothing in this trie yet	*/
+
+	/* Starting at the root node in the trie, compare the bit index
+	 * of the current node with its next child node (starts left from root).
+	 * When the bit index of the child node is greater than the bit index of the current node
+	 * then by definition (as the bit index decreases as we descent the trie)
+	 * we have reached a 'backward' pointer. A backward pointer means we
+	 * have reached the only node that can be reached by the bits given us so far
+	 * and it must either be the key we are looking for, or if not then it
+	 * means the entry was not in the trie, and we return NULL. A backward pointer
+	 * points back in to the tree structure rather than down (deeper) within the
+	 * tree branches.
+	 */
+	thisNode	= m_root;		/* Start at the root node		*/
+	nextNode	= thisNode->get_leftN();	/* Examine the left node from the root	*/
+
+	/* While we are descending the tree nodes...
+	 */
+	const ANTLR_UINT64* bitMask = this->get_bitMask();
+	while( thisNode->get_bitNum() > nextNode->get_bitNum() )
+	{
+		/* Next node now becomes the new 'current' node
+		 */
+		thisNode    = nextNode;
+
+		/* We now test the bit indicated by the bitmap in the next node
+		 * in the key we are searching for. The new next node is the
+		 * right node if that bit is set and the left node it is not.
+		 */
+		if (key & bitMask[nextNode->get_bitNum()])
+		{
+			nextNode = nextNode->get_rightN();	/* 1 is right	*/
+		}
+		else
+		{
+			nextNode = nextNode->get_leftN();		/* 0 is left	*/
+		}
+	}
+
+	/* Here we have reached a node where the bitMap index is lower than
+	 * its parent. This means it is pointing backward in the tree and
+	 * must therefore be a terminal node, being the only point than can
+	 * be reached with the bits seen so far. It is either the actual key
+	 * we wanted, or if that key is not in the trie it is another key
+	 * that is currently the only one that can be reached by those bits.
+	 * That situation would obviously change if the key was to be added
+	 * to the trie.
+	 *
+	 * Hence it only remains to test whether this is actually the key or not.
+	 */
+	if (nextNode->get_key() == key)
+	{
+		/* This was the key, so return the entry pointer
+		 */
+		return	nextNode->get_buckets();
+	}
+	else
+	{
+		return	NULL;	/* That key is not in the trie (note that we set the pointer to -1 if no payload) */
+	}
+}
+
+template< class ImplTraits, class DataType >
+bool	IntTrie<ImplTraits, DataType>::del( ANTLR_INTKEY key)
+{
+    IntTrieNodeType*   p;
+
+    p = m_root;
+    
+    return false;
+
+}
+
+template< class ImplTraits, class DataType >
+bool	IntTrie<ImplTraits, DataType>::add( ANTLR_INTKEY key, const DataType& data  )
+{
+	IntTrieNodeType*   thisNode;
+	IntTrieNodeType*   nextNode;
+	IntTrieNodeType*   entNode;
+	ANTLR_UINT32	depth;
+	TrieEntryType*	    newEnt;
+	TrieEntryType*	    nextEnt;
+	ANTLR_INTKEY		    xorKey;
+
+	/* Cache the bit depth of this trie, which is always the highest index, 
+	 * which is in the root node
+	 */
+	depth   = m_root->get_bitNum();
+
+	thisNode	= m_root;		/* Start with the root node	    */
+	nextNode	= m_root->get_leftN();	/* And assume we start to the left  */
+
+	/* Now find the only node that can be currently reached by the bits in the
+	 * key we are being asked to insert.
+	 */
+	const ANTLR_UINT64* bitMask = this->get_bitMask();
+	while (thisNode->get_bitNum()  > nextNode->get_bitNum() )
+	{
+		/* Still descending the structure, next node becomes current.
+		 */
+		thisNode = nextNode;
+
+		if (key & bitMask[nextNode->get_bitNum()])
+		{
+			/* Bit at the required index was 1, so travers the right node from here
+			 */
+			nextNode = nextNode->get_rightN();
+		}
+		else
+		{
+			/* Bit at the required index was 0, so we traverse to the left
+			 */
+			nextNode = nextNode->get_leftN();
+		}
+	}
+	/* Here we have located the only node that can be reached by the
+	 * bits in the requested key. It could in fact be that key or the node
+	 * we need to use to insert the new key.
+	 */
+	if (nextNode->get_key() == key)
+	{
+		/* We have located an exact match, but we will only append to the bucket chain
+		 * if this trie accepts duplicate keys.
+		 */
+		if (m_allowDups ==true)
+		{
+			/* Yes, we are accepting duplicates
+			 */
+			newEnt = new TrieEntryType(data, NULL);
+
+			/* We want to be able to traverse the stored elements in the order that they were
+			 * added as duplicate keys. We might need to revise this opinion if we end up having many duplicate keys
+			 * as perhaps reverse order is just as good, so long as it is ordered.
+			 */
+			nextEnt = nextNode->get_buckets();
+			while (nextEnt->get_next() != NULL)
+			{
+				nextEnt = nextEnt->get_next();    
+			}
+			nextEnt->set_next(newEnt);
+
+			m_count++;
+			return  true;
+		}
+		else
+		{
+			/* We found the key is already there and we are not allowed duplicates in this
+			 * trie.
+			 */
+			return  false;
+		}
+	}
+
+	/* Here we have discovered the only node that can be reached by the bits in the key
+	 * but we have found that this node is not the key we need to insert. We must find the
+	 * the leftmost bit by which the current key for that node and the new key we are going 
+	 * to insert, differ. While this nested series of ifs may look a bit strange, experimentation
+	 * showed that it allows a machine code path that works well with predicated execution
+	 */
+	xorKey = (key ^ nextNode->get_key() );   /* Gives 1 bits only where they differ then we find the left most 1 bit*/
+
+	/* Most common case is a 32 bit key really
+	 */
+	const ANTLR_UINT8* bitIndex = this->get_bitIndex();
+#ifdef	ANTLR_USE_64BIT
+	if	(xorKey & 0xFFFFFFFF00000000)
+	{
+		if  (xorKey & 0xFFFF000000000000)
+		{
+			if	(xorKey & 0xFF00000000000000)
+			{
+				depth = 56 + bitIndex[((xorKey & 0xFF00000000000000)>>56)];
+			}
+			else
+			{
+				depth = 48 + bitIndex[((xorKey & 0x00FF000000000000)>>48)];
+			}
+		}
+		else
+		{
+			if	(xorKey & 0x0000FF0000000000)
+			{
+				depth = 40 + bitIndex[((xorKey & 0x0000FF0000000000)>>40)];
+			}
+			else
+			{
+				depth = 32 + bitIndex[((xorKey & 0x000000FF00000000)>>32)];
+			}
+		}
+	}
+	else
+#endif
+	{
+		if  (xorKey & 0x00000000FFFF0000)
+		{
+			if	(xorKey & 0x00000000FF000000)
+			{
+				depth = 24 + bitIndex[((xorKey & 0x00000000FF000000)>>24)];
+			}
+			else
+			{
+				depth = 16 + bitIndex[((xorKey & 0x0000000000FF0000)>>16)];
+			}
+		}
+		else
+		{
+			if	(xorKey & 0x000000000000FF00)
+			{
+				depth = 8 + bitIndex[((xorKey & 0x0000000000000FF00)>>8)];
+			}
+			else
+			{
+				depth = bitIndex[xorKey & 0x00000000000000FF];
+			}
+		}
+	}
+
+    /* We have located the leftmost differing bit, indicated by the depth variable. So, we know what
+     * bit index we are to insert the new entry at. There are two cases, being where the two keys
+     * differ at a bit position that is not currently part of the bit testing, where they differ on a bit
+     * that is currently being skipped in the indexed comparisons, and where they differ on a bit
+     * that is merely lower down in the current bit search. If the bit index went bit 4, bit 2 and they differ
+     * at bit 3, then we have the "skipped" bit case. But if that chain was Bit 4, Bit 2 and they differ at bit 1
+     * then we have the easy bit <pun>.
+     *
+     * So, set up to descend the tree again, but this time looking for the insert point
+     * according to whether we skip the bit that differs or not.
+     */
+    thisNode	= m_root;
+    entNode	= m_root->get_leftN();
+
+    /* Note the slight difference in the checks here to cover both cases
+     */
+    while (thisNode->get_bitNum() > entNode->get_bitNum() && entNode->get_bitNum() > depth)
+    {
+		/* Still descending the structure, next node becomes current.
+		 */
+		thisNode = entNode;
+
+		if (key & bitMask[entNode->get_bitNum()])
+		{
+			/* Bit at the required index was 1, so traverse the right node from here
+			 */
+			entNode = entNode->get_rightN();
+		}
+		else
+		{
+			/* Bit at the required index was 0, so we traverse to the left
+			 */
+			entNode = entNode->get_leftN();
+		}
+    }
+
+    /* We have located the correct insert point for this new key, so we need
+     * to allocate our entry and insert it etc.
+     */
+    nextNode	= new IntTrieNodeType();
+
+    /* Build a new entry block for the new node
+     */
+    newEnt = new TrieEntryType(data, NULL);
+
+	/* Install it
+     */
+    nextNode->set_buckets(newEnt);
+    nextNode->set_key(key);
+    nextNode->set_bitNum( depth );
+
+    /* Work out the right and left pointers for this new node, which involve
+     * terminating with the current found node either right or left according
+     * to whether the current index bit is 1 or 0
+     */
+    if (key & bitMask[depth])
+    {
+		nextNode->set_leftN(entNode);	    /* Terminates at previous position	*/
+		nextNode->set_rightN(nextNode);	    /* Terminates with itself		*/
+    }
+    else
+    {
+		nextNode->set_rightN(entNode);	    /* Terminates at previous position	*/
+		nextNode->set_leftN(nextNode);	    /* Terminates with itself		*/		
+    }
+
+    /* Finally, we need to change the pointers at the node we located
+     * for inserting. If the key bit at its index is set then the right
+     * pointer for that node becomes the newly created node, otherwise the left 
+     * pointer does.
+     */
+    if (key & bitMask[thisNode->get_bitNum()] )
+    {
+		thisNode->set_rightN( nextNode );
+    }
+    else
+    {
+		thisNode->set_leftN(nextNode);
+    }
+
+    /* Et voila
+     */
+    m_count++;
+    return  true;
+}
+
+template< class ImplTraits, class DataType >
+IntTrieNode<ImplTraits, DataType>::IntTrieNode()
+{
+	m_bitNum = 0;
+	m_key = 0;
+	m_buckets = NULL;
+	m_leftN = NULL;
+	m_rightN = NULL;
+}
+
+template< class ImplTraits, class DataType >
+IntTrieNode<ImplTraits, DataType>::~IntTrieNode()
+{
+	TrieEntryType*	thisEntry;
+    TrieEntryType*	nextEntry;
+
+    /* If this node has a left pointer that is not a back pointer
+     * then recursively call to free this
+     */
+    if ( m_bitNum > m_leftN->get_bitNum())
+    {
+		/* We have a left node that needs descending, so do it.
+		 */
+		delete m_leftN;
+    }
+
+    /* The left nodes from here should now be dealt with, so 
+     * we need to descend any right nodes that are not back pointers
+     */
+    if ( m_bitNum > m_rightN->get_bitNum() )
+    {
+		/* There are some right nodes to descend and deal with.
+		 */
+		delete m_rightN;
+    }
+
+    /* Now all the children are dealt with, we can destroy
+     * this node too
+     */
+    thisEntry	= m_buckets;
+
+    while (thisEntry != NULL)
+    {
+		nextEntry   = thisEntry->get_next();
+
+		/* Now free the data for this bucket entry
+		 */
+		delete thisEntry;
+		thisEntry = nextEntry;	    /* See if there are any more to free    */
+    }
+
+    /* The bucket entry is now gone, so we can free the memory for
+     * the entry itself.
+     */
+
+    /* And that should be it for everything under this node and itself
+     */
+}
+
+/**
+ * Allocate and initialize a new ANTLR3 topological sorter, which can be
+ * used to define edges that identify numerical node indexes that depend on other
+ * numerical node indexes, which can then be sorted topologically such that
+ * any node is sorted after all its dependent nodes.
+ *
+ * Use:
+ *
+ * /verbatim
+
+  pANTLR3_TOPO topo;
+  topo = antlr3NewTopo();
+
+  if (topo == NULL) { out of memory }
+
+  topo->addEdge(topo, 3, 0); // Node 3 depends on node 0
+  topo->addEdge(topo, 0, 1); // Node - depends on node 1
+  topo->sortVector(topo, myVector); // Sort the vector in place (node numbers are the vector entry numbers)
+
+ * /verbatim
+ */
+template<class ImplTraits>
+Topo<ImplTraits>::Topo()
+{
+    // Initialize variables
+    //
+    m_visited   = NULL;                 // Don't know how big it is yet
+    m_limit     = 1;                    // No edges added yet
+    m_edges     = NULL;                 // No edges added yet
+    m_sorted    = NULL;                 // Nothing sorted at the start
+    m_cycle     = NULL;                 // No cycles at the start
+    m_cycleMark = 0;                    // No cycles at the start
+    m_hasCycle  = false;         // No cycle at the start
+}
+
+// Topological sorter
+//
+template<class ImplTraits>
+void Topo<ImplTraits>::addEdge(ANTLR_UINT32 edge, ANTLR_UINT32 dependency)
+{
+	ANTLR_UINT32   i;
+    ANTLR_UINT32   maxEdge;
+    BitsetType*  edgeDeps;
+
+    if (edge>dependency)
+    {
+        maxEdge = edge;
+    }
+    else
+    {
+        maxEdge = dependency;
+    }
+    // We need to add an edge to says that the node indexed by 'edge' is
+    // dependent on the node indexed by 'dependency'
+    //
+
+    // First see if we have enough room in the edges array to add the edge?
+    //
+    if ( m_edges == NULL)
+    {
+        // We don't have any edges yet, so create an array to hold them
+        //
+        m_edges = AllocPolicyType::alloc0(sizeof(BitsetType*) * (maxEdge + 1));
+
+        // Set the limit to what we have now
+        //
+        m_limit = maxEdge + 1;
+    }
+    else if (m_limit <= maxEdge)
+    {
+        // WE have some edges but not enough
+        //
+        m_edges = AllocPolicyType::realloc(m_edges, sizeof(BitsetType*) * (maxEdge + 1));
+
+        // Initialize the new bitmaps to ;indicate we have no edges defined yet
+        //
+        for (i = m_limit; i <= maxEdge; i++)
+        {
+            *((m_edges) + i) = NULL;
+        }
+
+        // Set the limit to what we have now
+        //
+        m_limit = maxEdge + 1;
+    }
+
+    // If the edge was flagged as depending on itself, then we just
+    // do nothing as it means this routine was just called to add it
+    // in to the list of nodes.
+    //
+    if  (edge == dependency)
+    {
+        return;
+    }
+
+    // Pick up the bit map for the requested edge
+    //
+    edgeDeps = *((m_edges) + edge);
+
+    if  (edgeDeps == NULL)
+    {
+        // No edges are defined yet for this node
+        //
+        edgeDeps                = new BitsetType(0);
+        *((m_edges) + edge) = edgeDeps;
+    }
+
+    // Set the bit in the bitmap that corresponds to the requested
+    // dependency.
+    //
+    edgeDeps->add(dependency);
+
+    // And we are all set
+    //
+    return;
+
+}
+
+/**
+ * Given a starting node, descend its dependent nodes (ones that it has edges
+ * to) until we find one without edges. Having found a node without edges, we have
+ * discovered the bottom of a depth first search, which we can then ascend, adding
+ * the nodes in order from the bottom, which gives us the dependency order.
+ */
+template<class ImplTraits>
+void Topo<ImplTraits>::DFS(ANTLR_UINT32 node)
+{
+	BitsetType* edges;
+
+    // Guard against a revisit and check for cycles
+    //
+    if  (m_hasCycle == true)
+    {
+        return; // We don't do anything else if we found a cycle
+    }
+
+    if  ( m_visited->isMember(node))
+    {
+        // Check to see if we found a cycle. To do this we search the
+        // current cycle stack and see if we find this node already in the stack.
+        //
+        ANTLR_UINT32   i;
+
+        for (i=0; i< m_cycleMark; i++)
+        {
+            if  ( m_cycle[i] == node)
+            {
+                // Stop! We found a cycle in the input, so rejig the cycle
+                // stack so that it only contains the cycle and set the cycle flag
+                // which will tell the caller what happened
+                //
+                ANTLR_UINT32 l;
+
+                for (l = i; l < m_cycleMark; l++)
+                {
+                    m_cycle[l - i] = m_cycle[l];    // Move to zero base in the cycle list
+                }
+
+                // Recalculate the limit
+                //
+                m_cycleMark -= i;
+
+                // Signal disaster
+                //
+                m_hasCycle = true;
+            }
+        }
+        return;
+    }
+
+    // So far, no cycles have been found and we have not visited this node yet,
+    // so this node needs to go into the cycle stack before we continue
+    // then we will take it out of the stack once we have descended all its
+    // dependencies.
+    //
+    m_cycle[m_cycleMark++] = node;
+
+    // First flag that we have visited this node
+    //
+    m_visited->add(node);
+
+    // Now, if this node has edges, then we want to ensure we visit
+    // them all before we drop through and add this node into the sorted
+    // list.
+    //
+    edges = *((m_edges) + node);
+    if  (edges != NULL)
+    {
+        // We have some edges, so visit each of the edge nodes
+        // that have not already been visited.
+        //
+        ANTLR_UINT32   numBits;	    // How many bits are in the set
+        ANTLR_UINT32   i;
+        ANTLR_UINT32   range;
+
+        numBits = edges->numBits();
+        range   = edges->size();   // Number of set bits
+
+        // Stop if we exahust the bit list or have checked the
+        // number of edges that this node refers to (so we don't
+        // check bits at the end that cannot possibly be set).
+        //
+        for (i=0; i<= numBits && range > 0; i++)
+        {
+            if  (edges->isMember(i))
+            {
+                range--;        // About to check another one
+
+                // Found an edge, make sure we visit and descend it
+                //
+                this->DFS(i);
+            }
+        }
+    }
+
+    // At this point we will have visited all the dependencies
+    // of this node and they will be ordered (even if there are cycles)
+    // So we just add the node into the sorted list at the
+    // current index position.
+    //
+    m_sorted[m_limit++] = node;
+
+    // Remove this node from the cycle list if we have not detected a cycle
+    //
+    if  (m_hasCycle == false)
+    {
+        m_cycleMark--;
+    }
+
+    return;
+}
+
+template<class ImplTraits>
+ANTLR_UINT32*  Topo<ImplTraits>::sortToArray()
+{
+	ANTLR_UINT32 v;
+    ANTLR_UINT32 oldLimit;
+
+    // Guard against being called with no edges defined
+    //
+    if  (m_edges == NULL)
+    {
+        return 0;
+    }
+    // First we need a vector to populate with enough
+    // entries to accomodate the sorted list and another to accomodate
+    // the maximum cycle we could detect which is all nodes such as 0->1->2->3->0
+    //
+    m_sorted    = AllocPolicyType::alloc( m_limit * sizeof(ANTLR_UINT32) );
+    m_cycle     = AllocPolicyType::alloc( m_limit * sizeof(ANTLR_UINT32));
+
+    // Next we need an empty bitset to show whether we have visited a node
+    // or not. This is the bit that gives us linear time of course as we are essentially
+    // dropping through the nodes in depth first order and when we get to a node that
+    // has no edges, we pop back up the stack adding the nodes we traversed in reverse
+    // order.
+    //
+    m_visited   = new BitsetType(0);
+
+    // Now traverse the nodes as if we were just going left to right, but
+    // then descend each node unless it has already been visited.
+    //
+    oldLimit    = m_limit;     // Number of nodes to traverse linearly
+    m_limit = 0;               // Next entry in the sorted table
+
+    for (v = 0; v < oldLimit; v++)
+    {
+        // If we did not already visit this node, then descend it until we
+        // get a node without edges or arrive at a node we have already visited.
+        //
+        if  (m_visited->isMember(v) == false)
+        {
+            // We have not visited this one so descend it
+            //
+            this->DFS(v);
+        }
+
+        // Break the loop if we detect a cycle as we have no need to go any
+        // further
+        //
+        if  (m_hasCycle == true)
+        {
+            break;
+        }
+    }
+
+    // Reset the limit to the number we recorded as if we hit a
+    // cycle, then limit will have stopped at the node where we
+    // discovered the cycle, but in order to free the edge bitmaps
+    // we need to know how many we may have allocated and traverse them all.
+    //
+    m_limit = oldLimit;
+
+    // Having traversed all the nodes we were given, we
+    // are guaranteed to have ordered all the nodes or detected a
+    // cycle.
+    //
+    return m_sorted;
+}
+
+template<class ImplTraits>
+	template<typename DataType>
+void   Topo<ImplTraits>::sortVector(  typename ImplTraits::template VectorType<DataType>& v )
+{
+    // To sort a vector, we first perform the
+    // sort to an array, then use the results to reorder the vector
+    // we are given. This is just a convenience routine that allows you to
+    // sort the children of a tree node into topological order before or
+    // during an AST walk. This can be useful for optimizations that require
+    // dag reorders and also when the input stream defines thigns that are
+    // interdependent and you want to walk the list of the generated trees
+    // for those things in topological order so you can ignore the interdependencies
+    // at that point.
+    //
+    ANTLR_UINT32 i;
+
+    // Used as a lookup index to find the current location in the vector of
+    // the vector entry that was originally at position [0], [1], [2] etc
+    //
+    ANTLR_UINT32*  vIndex;
+
+    // Sort into an array, then we can use the array that is
+    // stored in the topo
+    //
+    if  (this->sortToArray() == 0)
+    {
+        return;     // There were no edges
+    }
+
+    if  (m_hasCycle == true)
+    {
+        return;  // Do nothing if we detected a cycle
+    }
+
+    // Ensure that the vector we are sorting is at least as big as the
+    // the input sequence we were adsked to sort. It does not matter if it is
+    // bigger as thaat probably just means that nodes numbered higher than the
+    // limit had no dependencies and so can be left alone.
+    //
+    if  (m_limit > v.size() )
+    {
+        // We can only sort the entries that we have dude! The caller is
+        // responsible for ensuring the vector is the correct one and is the
+        // correct size etc.
+        //
+        m_limit = v.size();
+    }
+    // We need to know the locations of each of the entries
+    // in the vector as we don't want to duplicate them in a new vector. We
+    // just use an indirection table to get the vector entry for a particular sequence
+    // acording to where we moved it last. Then we can just swap vector entries until
+    // we are done :-)
+    //
+    vIndex = AllocPolicyType::alloc(m_limit * sizeof(ANTLR_UINT32));
+
+    // Start index, each vector entry is located where you think it is
+    //
+    for (i = 0; i < m_limit; i++)
+    {
+        vIndex[i] = i;
+    }
+
+    // Now we traverse the sorted array and moved the entries of
+    // the vector around according to the sort order and the indirection
+    // table we just created. The index telsl us where in the vector the
+    // original element entry n is now located via vIndex[n].
+    //
+    for (i=0; i < m_limit; i++)
+    {
+        ANTLR_UINT32   ind;
+
+        // If the vector entry at i is already the one that it
+        // should be, then we skip moving it of course.
+        //
+        if  (vIndex[m_sorted[i]] == i)
+        {
+            continue;
+        }
+
+        // The vector entry at i, should be replaced with the
+        // vector entry indicated by topo->sorted[i]. The vector entry
+        // at topo->sorted[i] may have already been swapped out though, so we
+        // find where it is now and move it from there to i.
+        //
+        ind     = vIndex[m_sorted[i]];
+		std::swap( v[i], v[ind] );
+
+        // Update our index. The element at i is now the one we wanted
+        // to be sorted here and the element we swapped out is now the
+        // element that was at i just before we swapped it. If you are lost now
+        // don't worry about it, we are just reindexing on the fly is all.
+        //
+        vIndex[m_sorted[i]] = i;
+        vIndex[i] = ind;
+    }
+
+    // Having traversed all the entries, we have sorted the vector in place.
+    //
+    AllocPolicyType::free(vIndex);
+    return;
+}
+
+template<class ImplTraits>
+Topo<ImplTraits>::~Topo()
+{
+    ANTLR_UINT32   i;
+
+    // Free the result vector
+    //
+    if  (m_sorted != NULL)
+    {
+        AllocPolicyType::free(m_sorted);
+    }
+
+    // Free the visited map
+    //
+    if  (m_visited != NULL)
+    {
+		delete m_visited;
+    }
+
+    // Free any edgemaps
+    //
+    if  (m_edges != NULL)
+    {
+        Bitset<AllocPolicyType>* edgeList;
+
+        for (i=0; i<m_limit; i++)
+        {
+            edgeList = *((m_edges) + i);
+            if  (edgeList != NULL)
+            {
+				delete edgeList;
+            }
+        }
+
+        AllocPolicyType::free( m_edges );
+    }
+    m_edges = NULL;
+    
+    // Free any cycle map
+    //
+    if  (m_cycle != NULL)
+    {
+        AllocPolicyType::free(m_cycle);
+    }
+}
+
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3commontoken.hpp b/runtime/Cpp/include/antlr3commontoken.hpp
new file mode 100755
index 0000000..fd306e7
--- /dev/null
+++ b/runtime/Cpp/include/antlr3commontoken.hpp
@@ -0,0 +1,250 @@
+/** \file
+ * \brief Defines the interface for a common token.
+ *
+ * All token streams should provide their tokens using an instance
+ * of this common token. A custom pointer is provided, wher you may attach
+ * a further structure to enhance the common token if you feel the need
+ * to do so. The C runtime will assume that a token provides implementations
+ * of the interface functions, but all of them may be rplaced by your own
+ * implementation if you require it.
+ */
+#ifndef	_ANTLR3_COMMON_TOKEN_HPP
+#define	_ANTLR3_COMMON_TOKEN_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    <stdlib.h>
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+/** The definition of an ANTLR3 common token structure, which all implementations
+ * of a token stream should provide, installing any further structures in the
+ * custom pointer element of this structure.
+ *
+ * \remark
+ * Token streams are in essence provided by lexers or other programs that serve
+ * as lexers.
+ */
+
+template<class ImplTraits>
+class CommonToken : public ImplTraits::AllocPolicyType
+{
+public:
+	/* Base token types, which all lexer/parser tokens come after in sequence.
+	*/
+	enum TOKEN_TYPE
+	{
+		/** Indicator of an invalid token
+		 */
+		TOKEN_INVALID =	0
+		, EOR_TOKEN_TYPE	
+		/** Imaginary token type to cause a traversal of child nodes in a tree parser
+		 */
+		, TOKEN_DOWN		
+		/** Imaginary token type to signal the end of a stream of child nodes.
+		 */
+		, TOKEN_UP	
+		/** First token that can be used by users/generated code
+		 */
+		, MIN_TOKEN_TYPE =	TOKEN_UP + 1
+
+		/** End of file token
+		 */
+		, TOKEN_EOF =	(ANTLR_CHARSTREAM_EOF & 0xFFFFFFFF)
+	};
+
+	typedef typename ImplTraits::TokenIntStreamType TokenIntStreamType;
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::InputStreamType InputStreamType;
+	typedef typename ImplTraits::StreamDataType StreamDataType;
+
+private:
+    /** The actual type of this token
+     */
+    ANTLR_UINT32   m_type;
+
+	/** The virtual channel that this token exists in.
+     */
+    ANTLR_UINT32	m_channel;
+	
+	mutable StringType		m_tokText;
+
+    /** The offset into the input stream that the line in which this
+     *  token resides starts.
+     */
+	const StreamDataType*	m_lineStart;
+
+	/** The line number in the input stream where this token was derived from
+     */
+    ANTLR_UINT32	m_line;
+
+    /** The character position in the line that this token was derived from
+     */
+    ANTLR_INT32		m_charPositionInLine;
+
+    /** Pointer to the input stream that this token originated in.
+     */
+    InputStreamType*    m_input;
+
+    /** What the index of this token is, 0, 1, .., n-2, n-1 tokens
+     */
+    ANTLR_MARKER		m_index;
+
+    /** The character offset in the input stream where the text for this token
+     *  starts.
+     */
+    ANTLR_MARKER		m_startIndex;
+
+    /** The character offset in the input stream where the text for this token
+     *  stops.
+     */
+    ANTLR_MARKER		m_stopIndex;
+
+public:
+	CommonToken();
+	CommonToken(ANTLR_UINT32 type);
+	CommonToken(TOKEN_TYPE type);
+	CommonToken( const CommonToken& ctoken );
+
+	CommonToken& operator=( const CommonToken& ctoken );
+	bool operator==( const CommonToken& ctoken ) const;
+	bool operator<( const CommonToken& ctoken ) const;
+
+	InputStreamType* get_input() const;
+	ANTLR_MARKER get_index() const;
+	void set_index( ANTLR_MARKER index );
+	void set_input( InputStreamType* input );
+
+    /* ==============================
+     * API 
+     */
+
+    /** Function that returns the text pointer of a token, use
+     *  toString() if you want a pANTLR3_STRING version of the token.
+     */
+    StringType  getText() const;
+	
+    /** Pointer to a function that 'might' be able to set the text associated
+     *  with a token. Imaginary tokens such as an ANTLR3_CLASSIC_TOKEN may actually
+     *  do this, however many tokens such as ANTLR3_COMMON_TOKEN do not actaully have
+     *  strings associated with them but just point into the current input stream. These
+     *  tokens will implement this function with a function that errors out (probably
+     *  drastically.
+     */
+    void set_tokText( const StringType& text );
+
+    /** Pointer to a function that 'might' be able to set the text associated
+     *  with a token. Imaginary tokens such as an ANTLR3_CLASSIC_TOKEN may actually
+     *  do this, however many tokens such as ANTLR3_COMMON_TOKEN do not actully have
+     *  strings associated with them but just point into the current input stream. These
+     *  tokens will implement this function with a function that errors out (probably
+     *  drastically.
+     */
+    void	setText(ANTLR_UINT8* text);
+	void	setText(const char* text);
+
+    /** Pointer to a function that returns the token type of this token
+     */
+    ANTLR_UINT32  get_type() const;
+	ANTLR_UINT32  getType() const;
+
+    /** Pointer to a function that sets the type of this token
+     */
+    void	set_type(ANTLR_UINT32 ttype);
+
+    /** Pointer to a function that gets the 'line' number where this token resides
+     */
+    ANTLR_UINT32   get_line() const;
+
+    /** Pointer to a function that sets the 'line' number where this token reside
+     */
+    void set_line(ANTLR_UINT32 line);
+
+    /** Pointer to a function that gets the offset in the line where this token exists
+     */ 
+    ANTLR_INT32  get_charPositionInLine() const;
+	ANTLR_INT32  getCharPositionInLine() const;
+
+    /** Pointer to a function that sets the offset in the line where this token exists
+     */
+    void	set_charPositionInLine(ANTLR_INT32 pos);
+
+    /** Pointer to a function that gets the channel that this token was placed in (parsers
+     *  can 'tune' to these channels.
+     */
+    ANTLR_UINT32   get_channel() const;
+
+    /** Pointer to a function that sets the channel that this token should belong to
+     */
+    void set_channel(ANTLR_UINT32 channel);
+
+    /** Pointer to a function that returns an index 0...n-1 of the token in the token
+     *  input stream.
+     */
+    ANTLR_MARKER  get_tokenIndex() const;
+
+    /** Pointer to a function that can set the token index of this token in the token
+     *  input stream.
+     */
+    void	set_tokenIndex(ANTLR_MARKER tokenIndex);
+
+    /** Pointer to a function that gets the start index in the input stream for this token.
+     */
+    ANTLR_MARKER   get_startIndex() const;
+
+    /** Pointer to a function that sets the start index in the input stream for this token.
+     */
+    void	set_startIndex(ANTLR_MARKER index);
+    
+    /** Pointer to a function that gets the stop index in the input stream for this token.
+     */
+    ANTLR_MARKER  get_stopIndex() const;
+
+    /** Pointer to a function that sets the stop index in the input stream for this token.
+     */
+    void	set_stopIndex(ANTLR_MARKER index);
+	const StreamDataType* get_lineStart() const;
+	void	set_lineStart( const StreamDataType* lineStart );
+
+    /** Pointer to a function that returns this token as a text representation that can be 
+     *  printed with embedded control codes such as \n replaced with the printable sequence "\\n"
+     *  This also yields a string structure that can be used more easily than the pointer to 
+     *  the input stream in certain situations.
+     */
+    StringType  toString() const;
+
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3commontoken.inl"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3commontoken.inl b/runtime/Cpp/include/antlr3commontoken.inl
new file mode 100755
index 0000000..87194dc
--- /dev/null
+++ b/runtime/Cpp/include/antlr3commontoken.inl
@@ -0,0 +1,322 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+CommonToken<ImplTraits>::CommonToken()
+{
+	m_type = 0;
+    m_channel = 0;
+	m_lineStart = NULL;
+	m_line = 0;
+	m_charPositionInLine = 0;
+	m_input = NULL;
+	m_index = 0;
+	m_startIndex = 0;
+	m_stopIndex = 0;
+}
+
+template<class ImplTraits>
+CommonToken<ImplTraits>::CommonToken(ANTLR_UINT32 type)
+{
+	m_type = type;
+	m_channel = 0;
+	m_lineStart = NULL;
+	m_line = 0;
+	m_charPositionInLine = 0;
+	m_input = NULL;
+	m_index = 0;
+	m_startIndex = 0;
+	m_stopIndex = 0;
+}
+
+template<class ImplTraits>
+CommonToken<ImplTraits>::CommonToken(TOKEN_TYPE type)
+{
+	m_type = type;
+	m_channel = 0;
+	m_lineStart = NULL;
+	m_line = 0;
+	m_charPositionInLine = 0;
+	m_input = NULL;
+	m_index = 0;
+	m_startIndex = 0;
+	m_stopIndex = 0;
+}
+
+template<class ImplTraits>
+CommonToken<ImplTraits>::CommonToken( const CommonToken& ctoken )
+	:m_tokText( ctoken.m_tokText )
+{
+	m_type = ctoken.m_type;
+	m_channel = ctoken.m_channel;
+	m_lineStart = ctoken.m_lineStart;
+	m_line = ctoken.m_line;
+	m_charPositionInLine = ctoken.m_charPositionInLine;
+	m_input = ctoken.m_input;
+	m_index = ctoken.m_index;
+	m_startIndex = ctoken.m_startIndex;
+	m_stopIndex = ctoken.m_stopIndex;
+}
+
+template<class ImplTraits>
+CommonToken<ImplTraits>& CommonToken<ImplTraits>::operator=( const CommonToken& ctoken )
+{
+	m_type = ctoken.m_type;
+	m_channel = ctoken.m_channel;
+	m_lineStart = ctoken.m_lineStart;
+	m_line = ctoken.m_line;
+	m_charPositionInLine = ctoken.m_charPositionInLine;
+	m_input = ctoken.m_input;
+	m_index = ctoken.m_index;
+	m_startIndex = ctoken.m_startIndex;
+	m_stopIndex = ctoken.m_stopIndex;
+
+	m_tokText = ctoken.m_tokText;
+	return *this;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE bool CommonToken<ImplTraits>::operator<( const CommonToken& ctoken ) const
+{
+	return (m_index < ctoken.m_index);
+}
+
+template<class ImplTraits>
+bool CommonToken<ImplTraits>::operator==( const CommonToken& ctoken ) const
+{
+	return ( (m_type == ctoken.m_type) &&
+		     (m_channel == ctoken.m_channel) &&
+			 (m_lineStart == ctoken.m_lineStart) &&
+			 (m_line == ctoken.m_line) &&
+			 (m_charPositionInLine == ctoken.m_charPositionInLine) &&
+			 (m_input == ctoken.m_input) &&
+			 (m_index == ctoken.m_index) &&
+			 (m_startIndex == ctoken.m_startIndex) &&
+			 (m_stopIndex == ctoken.m_stopIndex) );
+}
+
+template<class ImplTraits>
+ANTLR_INLINE typename CommonToken<ImplTraits>::InputStreamType* CommonToken<ImplTraits>::get_input() const
+{
+	return m_input;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_MARKER CommonToken<ImplTraits>::get_index() const
+{
+	return m_index;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void CommonToken<ImplTraits>::set_index( ANTLR_MARKER index )
+{
+	m_index = index;
+}
+
+template<class ImplTraits>
+void CommonToken<ImplTraits>::set_input( InputStreamType* input )
+{
+	m_input = input;
+}
+
+template<class ImplTraits>
+typename CommonToken<ImplTraits>::StringType  CommonToken<ImplTraits>::getText() const
+{
+	if ( !m_tokText.empty() )
+		return m_tokText;
+
+	// EOF is a special case
+	//
+	if ( m_type == TOKEN_EOF)
+	{
+		m_tokText	= "<EOF>";
+		return m_tokText;
+	}
+
+	// We had nothing installed in the token, create a new string
+	// from the input stream
+	//
+	if	(m_input != NULL)
+		return	m_input->substr(	this->get_startIndex(), this->get_stopIndex() );
+
+	// Nothing to return, there is no input stream
+	//
+	return "";
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void CommonToken<ImplTraits>::set_tokText( const StringType& text )
+{
+	m_tokText = text;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void CommonToken<ImplTraits>::setText(ANTLR_UINT8* text)
+{
+	if( text == NULL )
+		m_tokText.clear();
+	else
+		m_tokText = (const char*) text;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void	CommonToken<ImplTraits>::setText(const char* text)
+{
+	if( text == NULL )
+		m_tokText.clear();
+	else
+		m_tokText = (const char*) text;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32  CommonToken<ImplTraits>::get_type() const
+{
+	return m_type;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32  CommonToken<ImplTraits>::getType() const
+{
+	return m_type;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void	CommonToken<ImplTraits>::set_type(ANTLR_UINT32 ttype)
+{
+	m_type = ttype;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32   CommonToken<ImplTraits>::get_line() const
+{
+	return m_line;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void CommonToken<ImplTraits>::set_line(ANTLR_UINT32 line)
+{
+	m_line = line;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_INT32  CommonToken<ImplTraits>::get_charPositionInLine() const
+{
+	return m_charPositionInLine;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_INT32  CommonToken<ImplTraits>::getCharPositionInLine() const
+{
+	return this->get_charPositionInLine();
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void	CommonToken<ImplTraits>::set_charPositionInLine(ANTLR_INT32 pos)
+{
+	m_charPositionInLine = pos;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32   CommonToken<ImplTraits>::get_channel() const
+{
+	return m_channel;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void CommonToken<ImplTraits>::set_channel(ANTLR_UINT32 channel)
+{
+	m_channel = channel;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_MARKER  CommonToken<ImplTraits>::get_tokenIndex() const
+{
+	return m_index;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void	CommonToken<ImplTraits>::set_tokenIndex(ANTLR_MARKER tokenIndex)
+{
+	m_index = tokenIndex;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_MARKER   CommonToken<ImplTraits>::get_startIndex() const
+{
+	return (m_startIndex == -1) ? (ANTLR_MARKER)(m_input->get_data()) : m_startIndex;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void	CommonToken<ImplTraits>::set_startIndex(ANTLR_MARKER index)
+{
+	m_startIndex = index;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_MARKER  CommonToken<ImplTraits>::get_stopIndex() const
+{
+	return m_stopIndex;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void	CommonToken<ImplTraits>::set_stopIndex(ANTLR_MARKER index)
+{
+	m_stopIndex = index;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE const typename CommonToken<ImplTraits>::StreamDataType* CommonToken<ImplTraits>::get_lineStart() const
+{
+	return m_lineStart;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void	CommonToken<ImplTraits>::set_lineStart( const StreamDataType* lineStart )
+{
+	m_lineStart = lineStart;
+}
+
+template<class ImplTraits>
+typename CommonToken<ImplTraits>::StringType  CommonToken<ImplTraits>::toString() const
+{
+    StringType  text;
+    typedef typename ImplTraits::StringStreamType StringStreamType;
+    StringStreamType  outtext; 
+
+    text    =	this->getText();
+
+    if	(text.empty())
+		return "";
+
+    /* Now we use our handy dandy string utility to assemble the
+     * the reporting string
+     * return "[@"+getTokenIndex()+","+start+":"+stop+"='"+txt+"',<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+"]";
+     */
+    outtext << "[Index: ";
+    outtext << (int)this->get_tokenIndex();
+    outtext << " (Start: ";
+    outtext << (int)this->get_startIndex();
+    outtext << "-Stop: ";
+    outtext << (int)this->get_stopIndex();
+    outtext << ") ='";
+    outtext << text;
+    outtext << "', type<";
+    outtext << (int)m_type;
+    outtext << "> ";
+
+    if	(this->get_channel() > TOKEN_DEFAULT_CHANNEL)
+    {
+		outtext << "(channel = ";
+		outtext << (int)this->get_channel();
+		outtext << ") ";
+    }
+
+    outtext << "Line: ";
+    outtext << (int)this->get_line();
+    outtext << " LinePos:";
+    outtext << (int)this->get_charPositionInLine();
+    outtext << "]";
+
+    return  outtext.str();
+}
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3commontree.hpp b/runtime/Cpp/include/antlr3commontree.hpp
new file mode 100755
index 0000000..39096f8
--- /dev/null
+++ b/runtime/Cpp/include/antlr3commontree.hpp
@@ -0,0 +1,139 @@
+/** Interface for an ANTLR3 common tree which is what gets
+ *  passed around by the AST producing parser.
+ */
+
+#ifndef	_ANTLR3_COMMON_TREE_HPP
+#define	_ANTLR3_COMMON_TREE_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+class CommonTree : public ImplTraits::AllocPolicyType				   
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename ImplTraits::StringType	StringType;
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+	typedef typename ImplTraits::TreeType TreeType;
+	typedef CommonTree TokenType;
+	typedef typename AllocPolicyType::template VectorType<TreeType*> ChildrenType;
+	typedef typename AllocPolicyType::template ListType<TreeType*>	ChildListType;
+
+private:
+	/// The list of all the children that belong to this node. They are not part of the node
+    /// as they belong to the common tree node that implements this.
+    ///
+    ChildrenType		m_children;
+
+    /// This is used to store the current child index position while descending
+    /// and ascending trees as the tree walk progresses.
+    ///
+    ANTLR_MARKER		m_savedIndex;
+
+    /// Start token index that encases this tree
+    ///
+    ANTLR_MARKER		m_startIndex;
+
+    /// End token that encases this tree
+    ///
+    ANTLR_MARKER		m_stopIndex;
+
+    /// A single token, this is the payload for the tree
+    ///
+    CommonTokenType*    m_token;
+
+	/// Points to the node that has this node as a child.
+	/// If this is NULL, then this is the root node.
+	///
+	CommonTree*			m_parent;
+
+	/// What index is this particular node in the child list it
+	/// belongs to?
+	///
+	ANTLR_INT32			m_childIndex;
+
+public:
+	CommonTree();
+	CommonTree( CommonTokenType* token );
+	CommonTree( CommonTree* token );
+	CommonTree( const CommonTree& ctree );
+
+	TokenType*   get_token() const;
+	ChildrenType& get_children();
+	const ChildrenType& get_children() const;
+	ChildrenType* get_children_p();
+	ANTLR_INT32	get_childIndex() const;
+	TreeType* get_parent() const;
+
+	void    set_parent( TreeType* parent);
+	void    set_childIndex( ANTLR_INT32 );
+
+	void	addChild(TreeType* child);
+	/// Add all elements of the supplied list as children of this node
+	///
+	void	addChildren(const ChildListType& kids);
+	void    createChildrenList();
+	TreeType*	deleteChild(ANTLR_UINT32 i);
+	/// Delete children from start to stop and replace with t even if t is
+	/// a list (nil-root tree). Num of children can increase or decrease.
+	/// For huge child lists, inserting children can force walking rest of
+	/// children to set their child index; could be slow.
+	///
+	void	replaceChildren(ANTLR_INT32 startChildIndex, ANTLR_INT32 stopChildIndex, TreeType* t);
+	CommonTree*	dupNode() const;
+	TreeType*	dupTree();
+	ANTLR_UINT32	getCharPositionInLine();
+	TreeType*	getChild(ANTLR_UINT32 i);
+	
+	ANTLR_UINT32	getChildCount() const;
+	ANTLR_UINT32	getType();
+	TreeType*	getFirstChildWithType(ANTLR_UINT32 type);
+	ANTLR_UINT32	getLine();
+	StringType	getText();
+	bool	isNilNode();
+	void	setChild(ANTLR_UINT32 i, TreeType* child);
+	StringType	toStringTree();
+	StringType	toString();
+	void	freshenPACIndexesAll();
+	void	freshenPACIndexes(ANTLR_UINT32 offset);
+	void    reuse();
+	~CommonTree();
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3commontree.inl"
+
+#endif
+
+
diff --git a/runtime/Cpp/include/antlr3commontree.inl b/runtime/Cpp/include/antlr3commontree.inl
new file mode 100755
index 0000000..8a3111b
--- /dev/null
+++ b/runtime/Cpp/include/antlr3commontree.inl
@@ -0,0 +1,565 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+CommonTree<ImplTraits>::CommonTree()
+{
+	m_savedIndex = 0;
+	m_startIndex = 0;
+	m_stopIndex  = 0;
+	m_token		 = NULL;
+	m_parent     = NULL;
+	m_childIndex = 0;
+}
+
+template<class ImplTraits>
+CommonTree<ImplTraits>::CommonTree( const CommonTree& ctree )
+	:m_children( ctree.m_children)
+{
+	m_savedIndex = ctree.m_savedIndex;
+	m_startIndex = ctree.m_startIndex;
+	m_stopIndex  = ctree.m_stopIndex;
+	m_token		 = ctree.m_token;
+	m_parent     = ctree.m_parent;
+	m_childIndex = ctree.m_childIndex;
+}
+
+template<class ImplTraits>
+CommonTree<ImplTraits>::CommonTree( CommonTokenType* token )
+{
+	m_savedIndex = 0;
+	m_startIndex = 0;
+	m_stopIndex  = 0;
+	m_token		 = token;
+	m_parent     = NULL;
+	m_childIndex = 0;
+}
+
+template<class ImplTraits>
+CommonTree<ImplTraits>::CommonTree( CommonTree* tree )
+{
+	m_savedIndex = 0;
+	m_startIndex = 0;
+	m_stopIndex  = 0;
+	m_token		 = tree->get_token();
+	m_parent     = NULL;
+	m_childIndex = 0;
+}
+
+template<class ImplTraits>
+typename CommonTree<ImplTraits>::TokenType*   CommonTree<ImplTraits>::get_token() const
+{
+	return m_token;
+}
+
+template<class ImplTraits>
+typename CommonTree<ImplTraits>::ChildrenType& CommonTree<ImplTraits>::get_children()
+{
+	return m_children;
+}
+
+template<class ImplTraits>
+const typename CommonTree<ImplTraits>::ChildrenType& CommonTree<ImplTraits>::get_children() const
+{
+	return m_children;
+}
+
+template<class ImplTraits>
+typename CommonTree<ImplTraits>::ChildrenType* CommonTree<ImplTraits>::get_children_p()
+{
+	return &m_children;
+}
+
+template<class ImplTraits>
+void	CommonTree<ImplTraits>::addChild(TreeType* child)
+{
+	ANTLR_UINT32   n;
+	ANTLR_UINT32   i;
+
+	if	(child == NULL)
+		return;
+
+	ChildrenType& child_children = child->get_children();
+	ChildrenType& tree_children  = this->get_children();
+
+	if	(child->isNilNode() == true)
+	{
+		if ( !child_children.empty() && child_children == tree_children )
+		{
+			// TODO: Change to exception rather than ANTLR3_FPRINTF?
+			//
+			fprintf(stderr, "ANTLR3: An attempt was made to add a child list to itself!\n");
+			return;
+		}
+
+        // Add all of the children's children to this list
+        //
+        if ( !child_children.empty() )
+        {
+            if (tree_children.empty())
+            {
+                // We are build ing the tree structure here, so we need not
+                // worry about duplication of pointers as the tree node
+                // factory will only clean up each node once. So we just
+                // copy in the child's children pointer as the child is
+                // a nil node (has not root itself).
+                //
+                tree_children.swap( child_children );
+                this->freshenPACIndexesAll();               
+            }
+            else
+            {
+                // Need to copy the children
+                //
+                n = child_children.size();
+
+                for (i = 0; i < n; i++)
+                {
+                    TreeType* entry;
+                    entry = child_children[i];
+
+                    // ANTLR3 lists can be sparse, unlike Array Lists
+                    //
+                    if (entry != NULL)
+                    {
+                        tree_children.push_back(entry);
+                    }
+                }
+            }
+		}
+	}
+	else
+	{
+		// Tree we are adding is not a Nil and might have children to copy
+		//
+		if  (tree_children.empty())
+		{
+			// No children in the tree we are adding to, so create a new list on
+			// the fly to hold them.
+			//
+			this->createChildrenList();
+		}
+		tree_children.push_back( child );
+	}
+}
+
+template<class ImplTraits>
+void	CommonTree<ImplTraits>::addChildren(const ChildListType& kids)
+{
+	for( typename ChildListType::const_iterator iter = kids.begin();
+		 iter != kids.end(); ++iter )
+	{
+		this->addChild( *iter );
+	}
+}
+
+//dummy one, as vector is always there
+template<class ImplTraits>
+void    CommonTree<ImplTraits>::createChildrenList()
+{
+}
+
+template<class ImplTraits>
+typename CommonTree<ImplTraits>::TreeType*	CommonTree<ImplTraits>::deleteChild(ANTLR_UINT32 i)
+{
+	if( m_children.empty() )
+		return	NULL;
+
+	return  m_children.erase( m_children.begin() + i);
+}
+
+template<class ImplTraits>
+void	CommonTree<ImplTraits>::replaceChildren(ANTLR_INT32 startChildIndex, ANTLR_INT32 stopChildIndex, TreeType* newTree)
+{
+	ANTLR_INT32	replacingHowMany;		// How many nodes will go away
+	ANTLR_INT32	replacingWithHowMany;	// How many nodes will replace them
+	ANTLR_INT32	numNewChildren;			// Tracking variable
+	ANTLR_INT32	delta;					// Difference in new vs existing count
+
+	ANTLR_INT32	i;
+	ANTLR_INT32	j;
+
+	if	( m_children.empty() )
+	{
+		fprintf(stderr, "replaceChildren call: Indexes are invalid; no children in list for %s", this->getText().c_str() );
+		return;
+	}
+
+	// Either use the existing list of children in the supplied nil node, or build a vector of the
+	// tree we were given if it is not a nil node, then we treat both situations exactly the same
+	//
+	ChildrenType newChildren_temp;
+	ChildrenType*	newChildren;			// Iterator for whatever we are going to add in
+
+	if	(newTree->isNilNode())
+	{
+		newChildren = newTree->get_children_p();
+	}
+	else
+	{
+		newChildren = &newChildren_temp;
+		newChildren->push_back(newTree);
+	}
+
+	// Initialize
+	//
+	replacingHowMany		= stopChildIndex - startChildIndex + 1;
+	replacingWithHowMany	= newChildren->size();
+	delta					= replacingHowMany - replacingWithHowMany;
+	numNewChildren			= newChildren->size();
+
+	// If it is the same number of nodes, then do a direct replacement
+	//
+	if	(delta == 0)
+	{
+		TreeType*	child;
+
+		// Same number of nodes
+		//
+		j	= 0;
+		for	(i = startChildIndex; i <= stopChildIndex; i++)
+		{
+			child = newChildren->at(j);
+			ChildrenType& parent_children = this->get_children();
+			parent_children[i] = child;
+			child->setParent(this);
+			child->setChildIndex(i);
+		}
+	}
+	else if (delta > 0)
+	{
+		ANTLR_UINT32	indexToDelete;
+
+		// Less nodes than there were before
+		// reuse what we have then delete the rest
+		//
+		ChildrenType& parent_children = this->get_children();
+		for	(j = 0; j < numNewChildren; j++)
+		{
+			parent_children[ startChildIndex + j ] = newChildren->at(j);
+		}
+
+		// We just delete the same index position until done
+		//
+		indexToDelete = startChildIndex + numNewChildren;
+
+		for	(j = indexToDelete; j <= stopChildIndex; j++)
+		{
+			parent_children.erase( parent_children.begin() + indexToDelete);
+		}
+
+		this->freshenPACIndexes(startChildIndex);
+	}
+	else
+	{
+		ChildrenType& parent_children = this->get_children();
+		ANTLR_UINT32 numToInsert;
+
+		// More nodes than there were before
+		// Use what we can, then start adding
+		//
+		for	(j = 0; j < replacingHowMany; j++)
+		{
+			parent_children[ startChildIndex + j ] = newChildren->at(j);
+		}
+
+		numToInsert = replacingWithHowMany - replacingHowMany;
+
+		for	(j = replacingHowMany; j < replacingWithHowMany; j++)
+		{
+			parent_children.push_back( newChildren->at(j) );
+		}
+
+		this->freshenPACIndexes(startChildIndex);
+	}
+}
+
+template<class ImplTraits>
+CommonTree<ImplTraits>*	CommonTree<ImplTraits>::dupNode() const
+{
+	// The node we are duplicating is in fact the common tree (that's why we are here)
+    // so we use the super pointer to duplicate.
+    //
+    TreeType*   clone = new TreeType();
+
+	// The pointer we return is the base implementation of course
+    //
+	clone->set_token( m_token );
+	return  clone;
+}
+
+template<class ImplTraits>
+typename CommonTree<ImplTraits>::TreeType*	CommonTree<ImplTraits>::dupTree()
+{
+	TreeType*	newTree;
+	ANTLR_UINT32	i;
+	ANTLR_UINT32	s;
+
+	newTree = this->dupNode();
+
+	if	( !m_children.empty() )
+	{
+		s	    = m_children.size();
+
+		for	(i = 0; i < s; i++)
+		{
+			TreeType*    t;
+			TreeType*    newNode;
+
+			t   = m_children[i];
+
+			if  (t!= NULL)
+			{
+				newNode	    = t->dupTree();
+				newTree->addChild(newNode);
+			}
+		}
+	}
+
+	return newTree;
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	CommonTree<ImplTraits>::getCharPositionInLine()
+{
+	CommonTokenType*    token;
+	token   = m_token;
+
+	if	(token == NULL || (token->getCharPositionInLine() == -1) )
+	{
+		if  (this->getChildCount() > 0)
+		{
+			TreeType*	child;
+
+			child   = this->getChild(0);
+
+			return child->getCharPositionInLine();
+		}
+		return 0;
+	}
+	return  token->getCharPositionInLine();
+}
+
+template<class ImplTraits>
+typename CommonTree<ImplTraits>::TreeType*	CommonTree<ImplTraits>::getChild(ANTLR_UINT32 i)
+{
+	if	(  m_children.empty()
+		|| i >= m_children.size() )
+	{
+		return NULL;
+	}
+	return  m_children[i];
+
+}
+
+template<class ImplTraits>
+void    CommonTree<ImplTraits>::set_childIndex( ANTLR_INT32 i)
+{
+	m_childIndex = i;
+}
+
+template<class ImplTraits>
+ANTLR_INT32	CommonTree<ImplTraits>::get_childIndex() const
+{
+	return m_childIndex;
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	CommonTree<ImplTraits>::getChildCount() const
+{
+	return static_cast<ANTLR_UINT32>( m_children.size() );
+}
+
+template<class ImplTraits>
+typename CommonTree<ImplTraits>::TreeType* CommonTree<ImplTraits>::get_parent() const
+{
+	return m_parent;
+}
+
+template<class ImplTraits>
+void     CommonTree<ImplTraits>::set_parent( TreeType* parent)
+{
+	m_parent = parent;
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	CommonTree<ImplTraits>::getType()
+{
+	if	(this == NULL)
+	{
+		return	0;
+	}
+	else
+	{
+		return	m_token->getType();
+	}
+}
+
+template<class ImplTraits>
+typename CommonTree<ImplTraits>::TreeType*	CommonTree<ImplTraits>::getFirstChildWithType(ANTLR_UINT32 type)
+{
+	ANTLR_UINT32   i;
+	std::size_t   cs;
+
+	TreeType*	t;
+	if	( !m_children.empty() )
+	{
+		cs	= m_children.size();
+		for	(i = 0; i < cs; i++)
+		{
+			t = m_children[i];
+			if  (t->getType() == type)
+			{
+				return  t;
+			}
+		}
+	}
+	return  NULL;
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	CommonTree<ImplTraits>::getLine()
+{
+	TreeType*	    cTree = this;
+	CommonTokenType* token;
+	token   = cTree->get_token();
+
+	if	(token == NULL || token->getLine() == 0)
+	{
+		if  ( this->getChildCount() > 0)
+		{
+			TreeType*	child;
+			child   = this->getChild(0);
+			return child->getLine();
+		}
+		return 0;
+	}
+	return  token->getLine();
+}
+
+template<class ImplTraits>
+typename CommonTree<ImplTraits>::StringType	CommonTree<ImplTraits>::getText()
+{
+	return this->toString();
+}
+
+template<class ImplTraits>
+bool	CommonTree<ImplTraits>::isNilNode()
+{
+	// This is a Nil tree if it has no payload (Token in our case)
+	//
+	if(m_token == NULL)
+	{
+		return true;
+	}
+	else
+	{
+		return false;
+	}
+}
+
+template<class ImplTraits>
+void	CommonTree<ImplTraits>::setChild(ANTLR_UINT32 i, TreeType* child)
+{
+	if( m_children.size() >= i )
+		m_children.resize(i+1);
+	m_children[i] = child;
+}
+
+template<class ImplTraits>
+typename CommonTree<ImplTraits>::StringType	CommonTree<ImplTraits>::toStringTree()
+{
+	StringType  string;
+	ANTLR_UINT32   i;
+	ANTLR_UINT32   n;
+	TreeType*   t;
+
+	if( m_children.empty() )
+	{
+		return	this->toString();
+	}
+
+	/* Need a new string with nothing at all in it.
+	*/
+	if	(this->isNilNode() == false)
+	{
+		string.append("(");
+		string.append(this->toString());
+		string.append(" ");
+	}
+	if	( m_children != NULL)
+	{
+		n = m_children.size();
+
+		for	(i = 0; i < n; i++)
+		{   
+			t   = m_children[i];
+
+			if  (i > 0)
+			{
+				string.append(" ");
+			}
+			string.append(t->toStringTree());
+		}
+	}
+	if	(this->isNilNode() == false)
+	{
+		string.append(")");
+	}
+
+	return  string;
+}
+
+template<class ImplTraits>
+typename CommonTree<ImplTraits>::StringType	CommonTree<ImplTraits>::toString()
+{
+	if  (this->isNilNode() )
+	{
+		StringType  nilNode;
+
+		nilNode	= "nil";
+
+		return nilNode;
+	}
+
+	return	m_token->getText();
+}
+
+template<class ImplTraits>
+void	CommonTree<ImplTraits>::freshenPACIndexesAll()
+{
+	this->freshenPACIndexes(0);
+}
+
+template<class ImplTraits>
+void	CommonTree<ImplTraits>::freshenPACIndexes(ANTLR_UINT32 offset)
+{
+	ANTLR_UINT32	count;
+	ANTLR_UINT32	c;
+
+	count	= this->getChildCount();		// How many children do we have 
+
+	// Loop from the supplied index and set the indexes and parent
+	//
+	for	(c = offset; c < count; c++)
+	{
+		TreeType*	child;
+
+		child = this->getChild(c);
+
+		child->setChildIndex(c);
+		child->setParent(this);
+	}
+}
+
+template<class ImplTraits>
+void    CommonTree<ImplTraits>::reuse()
+{
+	delete this; //memory re-use should be taken by the library user
+}
+
+template<class ImplTraits>
+CommonTree<ImplTraits>::~CommonTree()
+{
+}
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3commontreeadaptor.hpp b/runtime/Cpp/include/antlr3commontreeadaptor.hpp
new file mode 100755
index 0000000..8b40f1c
--- /dev/null
+++ b/runtime/Cpp/include/antlr3commontreeadaptor.hpp
@@ -0,0 +1,163 @@
+/** \file
+ * Definition of the ANTLR3 common tree adaptor.
+ */
+
+#ifndef	_ANTLR3_COMMON_TREE_ADAPTOR_HPP
+#define	_ANTLR3_COMMON_TREE_ADAPTOR_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+class CommonTreeAdaptor : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::TreeType TreeType;
+	typedef	TreeType TokenType;
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+	typedef typename ImplTraits::DebugEventListenerType DebuggerType;
+
+public:
+	//The parameter is there only to provide uniform constructor interface
+	CommonTreeAdaptor(DebuggerType* dbg = NULL);
+    TreeType*	  nilNode();
+	TreeType*	  dupTree( TreeType* tree);
+    TreeType*	  dupTreeTT( TreeType* t, TreeType* tree);
+
+    void	addChild( TreeType* t, TreeType* child);
+    void	addChildToken( TreeType* t, CommonTokenType* child);
+    void	setParent( TreeType* child, TreeType* parent);
+    TreeType*		getParent( TreeType* child);
+
+	TreeType*		errorNode( CommonTokenType* tnstream, CommonTokenType* startToken, CommonTokenType* stopToken);
+	bool	isNilNode( TreeType* t);
+
+    TreeType*	    becomeRoot( TreeType* newRoot, TreeType* oldRoot);
+    TreeType*	   	rulePostProcessing( TreeType* root);
+
+    TreeType*	becomeRootToken(CommonTokenType* newRoot, TreeType* oldRoot);
+
+    TreeType*	 	create( CommonTokenType* payload);
+    TreeType* 		createTypeToken( ANTLR_UINT32 tokenType, CommonTokenType* fromToken);
+    TreeType*	   	createTypeTokenText	( ANTLR_UINT32 tokenType, CommonTokenType* fromToken, const ANTLR_UINT8* text);
+    TreeType*	    createTypeText		( ANTLR_UINT32 tokenType, const ANTLR_UINT8* text);
+
+    TreeType*	    dupNode( TreeType* treeNode);
+    ANTLR_UINT32			getType( TreeType* t);
+    StringType			getText( TreeType* t);
+        
+    TreeType*	    getChild( TreeType* t, ANTLR_UINT32 i);
+    void	setChild( TreeType* t, ANTLR_UINT32 i, TreeType* child);
+    void	deleteChild( TreeType* t, ANTLR_UINT32 i);
+    void	setChildIndex( TreeType* t, ANTLR_INT32 i);
+    ANTLR_INT32	getChildIndex( TreeType* t);
+
+    ANTLR_UINT32	getChildCount( TreeType*);
+	ANTLR_UINT64	getUniqueID( TreeType*);
+
+    CommonTokenType*    createToken( ANTLR_UINT32 tokenType, const ANTLR_UINT8* text);
+    CommonTokenType*    createTokenFromToken( CommonTokenType* fromToken);
+    CommonTokenType*    getToken( TreeType* t);
+
+    void setTokenBoundaries( TreeType* t, CommonTokenType* startToken, CommonTokenType* stopToken);
+    ANTLR_MARKER	getTokenStartIndex( TreeType* t);
+    ANTLR_MARKER	getTokenStopIndex( TreeType* t);
+
+	/// Produce a DOT (see graphviz freeware suite) from a base tree
+	///
+	StringType			makeDot( TreeType* theTree);
+
+	/// Replace from start to stop child index of parent with t, which might
+	/// be a list.  Number of children may be different
+	/// after this call.
+	///
+	/// If parent is null, don't do anything; must be at root of overall tree.
+	/// Can't replace whatever points to the parent externally.  Do nothing.
+	///
+	void replaceChildren( TreeType* parent, ANTLR_INT32 startChildIndex,
+								  ANTLR_INT32 stopChildIndex, TreeType* t);
+
+    ~CommonTreeAdaptor();
+
+protected:
+	void defineDotNodes(TreeType* t, const StringType& dotSpec);
+	void defineDotEdges(TreeType* t, const StringType& dotSpec);
+};
+
+//If someone can override the CommonTreeAdaptor at the compile time, that will be 
+//inherited here. Still you can choose to override the DebugTreeAdaptor, if you wish to
+//change the DebugTreeAdaptor
+template<class ImplTraits>
+class DebugTreeAdaptor : public ImplTraits::CommonTreeAdaptorType
+{
+public:
+	//DebugEventListener implements functionality through virtual functions
+	//the template parameter is required for pointing back at the adaptor
+	typedef typename ImplTraits::DebugEventListener DebuggerType;
+	typedef typename ImplTraits::TreeType TreeType;
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+
+private:
+	/// If set to something other than NULL, then this structure is
+	/// points to an instance of the debugger interface. In general, the
+	/// debugger is only referenced internally in recovery/error operations
+	/// so that it does not cause overhead by having to check this pointer
+	/// in every function/method
+	///
+	DebuggerType*		m_debugger;
+
+public:
+	DebugTreeAdaptor( DebuggerType* debugger );
+	void setDebugEventListener( DebuggerType* debugger);
+	TreeType*	  nilNode();
+	void	addChild(TreeType* t, TreeType* child);
+	void	addChildToken(TreeType* t, CommonTokenType* child);
+	TreeType* becomeRoot( TreeType* newRootTree, TreeType* oldRootTree );
+	TreeType* becomeRootToken(TreeType* newRoot, TreeType* oldRoot);
+	TreeType* createTypeToken(ANTLR_UINT32 tokenType, CommonTokenType* fromToken);
+	TreeType* createTypeTokenText(ANTLR_UINT32 tokenType, CommonTokenType* fromToken, ANTLR_UINT8* text);	
+	TreeType* createTypeText( ANTLR_UINT32 tokenType, ANTLR_UINT8* text);
+	TreeType* dupTree( TreeType* tree);
+
+	/// Sends the required debugging events for duplicating a tree
+	/// to the debugger.
+	///
+	void simulateTreeConstruction(TreeType* tree);
+};
+
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3commontreeadaptor.inl"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3commontreeadaptor.inl b/runtime/Cpp/include/antlr3commontreeadaptor.inl
new file mode 100755
index 0000000..698c7d7
--- /dev/null
+++ b/runtime/Cpp/include/antlr3commontreeadaptor.inl
@@ -0,0 +1,801 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+ANTLR_INLINE CommonTreeAdaptor<ImplTraits>::CommonTreeAdaptor(DebuggerType*)
+{
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*	  CommonTreeAdaptor<ImplTraits>::nilNode()
+{
+	return this->create(NULL);
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*	  CommonTreeAdaptor<ImplTraits>::dupTree( TreeType* tree)
+{
+	return this->dupTreeTT(tree, NULL);
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*	  CommonTreeAdaptor<ImplTraits>::dupTreeTT( TreeType* t, TreeType* parent)
+{
+	TreeType*	newTree;
+	TreeType*	child;
+	TreeType*	newSubTree;
+	ANTLR_UINT32		n;
+	ANTLR_UINT32		i;
+
+	if	(t == NULL)
+		return NULL;
+
+	newTree = t->dupNode();
+
+	// Ensure new subtree root has parent/child index set
+	//
+	this->setChildIndex( newTree, t->getChildIndex() );
+	this->setParent(newTree, parent);
+	n = this->getChildCount(t);
+
+	for	(i=0; i < n; i++)
+	{
+		child = this->getChild(t, i);
+		newSubTree = this->dupTreeTT(child, t);
+		this->addChild(newTree, newSubTree);
+	}
+	return	newTree;
+}
+
+template<class ImplTraits>
+void	CommonTreeAdaptor<ImplTraits>::addChild( TreeType* t, TreeType* child)
+{
+	if	(t != NULL && child != NULL)
+	{
+		t->addChild(child);
+	}
+}
+
+template<class ImplTraits>
+void	CommonTreeAdaptor<ImplTraits>::addChildToken( TreeType* t, CommonTokenType* child)
+{
+	if	(t != NULL && child != NULL)
+	{
+		this->addChild(t, this->create(child));
+	}
+}
+
+template<class ImplTraits>
+void	CommonTreeAdaptor<ImplTraits>::setParent( TreeType* child, TreeType* parent)
+{
+	child->setParent(parent);
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*		CommonTreeAdaptor<ImplTraits>::getParent( TreeType* child)
+{
+	return child->getParent();
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*		CommonTreeAdaptor<ImplTraits>::errorNode( CommonTokenType* tnstream, CommonTokenType* startToken, CommonTokenType* stopToken)
+{
+	// Use the supplied common tree node stream to get another tree from the factory
+	// TODO: Look at creating the erronode as in Java, but this is complicated by the
+	// need to track and free the memory allocated to it, so for now, we just
+	// want something in the tree that isn't a NULL pointer.
+	//
+	return this->createTypeText( CommonTokenType::TOKEN_INVALID, "Tree Error Node");
+
+}
+
+template<class ImplTraits>
+bool	CommonTreeAdaptor<ImplTraits>::isNilNode( TreeType* t)
+{
+	return t->isNilNode();
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*	    CommonTreeAdaptor<ImplTraits>::becomeRoot( TreeType* newRootTree, TreeType* oldRootTree)
+{
+	TreeType* saveRoot;
+
+	/* Protect against tree rewrites if we are in some sort of error
+	 * state, but have tried to recover. In C we can end up with a null pointer
+	 * for a tree that was not produced.
+	 */
+	if	(newRootTree == NULL)
+	{
+		return	oldRootTree;
+	}
+
+	/* root is just the new tree as is if there is no
+	 * current root tree.
+	 */
+	if	(oldRootTree == NULL)
+	{
+		return	newRootTree;
+	}
+
+	/* Produce ^(nil real-node)
+	 */
+	if	(newRootTree->isNilNode())
+	{
+		if	(newRootTree->getChildCount() > 1)
+		{
+			/* TODO: Handle tree exceptions 
+			 */
+			fprintf(stderr, "More than one node as root! TODO: Create tree exception handling\n");
+			return newRootTree;
+		}
+
+		/* The new root is the first child, keep track of the original newRoot
+         * because if it was a Nil Node, then we can reuse it now.
+		 */
+        saveRoot    = newRootTree;
+		newRootTree = newRootTree->getChild(0);
+
+        // Reclaim the old nilNode()
+        //
+        saveRoot->reuse();
+	}
+
+	/* Add old root into new root. addChild takes care of the case where oldRoot
+	 * is a flat list (nill rooted tree). All children of oldroot are added to
+	 * new root.
+	 */
+	newRootTree->addChild(oldRootTree);
+
+    // If the oldroot tree was a nil node, then we know at this point
+    // it has become orphaned by the rewrite logic, so we tell it to do
+    // whatever it needs to do to be reused.
+    //
+    if  (oldRootTree->isNilNode())
+    {
+        // We have taken an old Root Tree and appended all its children to the new
+        // root. In addition though it was a nil node, which means the generated code
+        // will not reuse it again, so we will reclaim it here. First we want to zero out
+        // any pointers it was carrying around. We are just the baseTree handler so we
+        // don't know necessarilly know how to do this for the real node, we just ask the tree itself
+        // to do it.
+        //
+        oldRootTree->reuse();
+    }
+	/* Always returns new root structure
+	 */
+	return	newRootTree;
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*	CommonTreeAdaptor<ImplTraits>::becomeRootToken(CommonTokenType* newRoot, TreeType* oldRoot)
+{
+	return	this->becomeRoot(this->create(newRoot), oldRoot);
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*	 	CommonTreeAdaptor<ImplTraits>::create( CommonTokenType* payload)
+{
+	return new TreeType(payload);
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*  CommonTreeAdaptor<ImplTraits>::createTypeToken( ANTLR_UINT32 tokenType, 
+																						  CommonTokenType* fromToken)
+{
+	/* Create the new token
+	 */
+	fromToken = this->createTokenFromToken(fromToken);
+
+	/* Set the type of the new token to that supplied
+	 */
+	fromToken->setType(tokenType);
+
+	/* Return a new node based upon this token
+	 */
+	return	this->create(fromToken);
+
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*	CommonTreeAdaptor<ImplTraits>::createTypeTokenText( ANTLR_UINT32 tokenType, CommonTokenType* fromToken, const ANTLR_UINT8* text)
+{
+	/* Create the new token
+	 */
+	fromToken = this->createTokenFromToken(fromToken);
+
+	/* Set the type of the new token to that supplied
+	 */
+	fromToken->setType(tokenType);
+
+	/* Set the text of the token accordingly
+	 */
+	fromToken->setText(text);
+
+	/* Return a new node based upon this token
+	 */
+	return	this->create(fromToken);
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*	    CommonTreeAdaptor<ImplTraits>::createTypeText( ANTLR_UINT32 tokenType, const ANTLR_UINT8* text)
+{
+	CommonTokenType*	fromToken;
+
+	/* Create the new token
+	 */
+	fromToken = this->createToken(tokenType, text);
+
+	/* Return a new node based upon this token
+	 */
+	return	this->create(fromToken);
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*	CommonTreeAdaptor<ImplTraits>::dupNode( TreeType* treeNode)
+{
+	return  (treeNode == NULL) ? NULL : treeNode->dupNode();
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	CommonTreeAdaptor<ImplTraits>::getType( TreeType* t)
+{
+	return t->getType();
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::StringType	CommonTreeAdaptor<ImplTraits>::getText( TreeType* t)
+{
+	return t->getText();
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType*	    CommonTreeAdaptor<ImplTraits>::getChild( TreeType* t, ANTLR_UINT32 i)
+{
+	return t->getChild(i);
+}
+
+template<class ImplTraits>
+void	CommonTreeAdaptor<ImplTraits>::setChild( TreeType* t, ANTLR_UINT32 i, TreeType* child)
+{
+	t->setChild(i, child);
+}
+
+template<class ImplTraits>
+void	CommonTreeAdaptor<ImplTraits>::deleteChild( TreeType* t, ANTLR_UINT32 i)
+{
+	t->deleteChild(i);
+}
+
+template<class ImplTraits>
+void	CommonTreeAdaptor<ImplTraits>::setChildIndex( TreeType* t, ANTLR_INT32 i)
+{
+	t->setChildIndex(i);
+}
+
+template<class ImplTraits>
+ANTLR_INT32	CommonTreeAdaptor<ImplTraits>::getChildIndex( TreeType * t)
+{
+	return t->getChildIndex();
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	CommonTreeAdaptor<ImplTraits>::getChildCount( TreeType* t)
+{
+	return t->getChildCount();
+}
+
+template<class ImplTraits>
+ANTLR_UINT64	CommonTreeAdaptor<ImplTraits>::getUniqueID( TreeType* node )
+{
+	return	reinterpret_cast<ANTLR_UINT64>(node);
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::CommonTokenType*    
+	     CommonTreeAdaptor<ImplTraits>::createToken( ANTLR_UINT32 tokenType, const ANTLR_UINT8* text)
+{
+	CommonTokenType*    newToken = new CommonTokenType;
+
+    if	(newToken != NULL)
+    {	
+		newToken->set_tokText( (const char*) text );
+		newToken->setType(tokenType);
+    }
+    return  newToken;
+
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::CommonTokenType*    
+	CommonTreeAdaptor<ImplTraits>::createTokenFromToken( CommonTokenType* fromToken)
+{
+	CommonTokenType*    newToken;
+
+    newToken	= new CommonTokenType;
+    
+    if	(newToken != NULL)
+    {
+		// Create the text using our own string factory to avoid complicating
+		// commontoken.
+		//
+		StringType	text = fromToken->getText();
+		newToken->set_tokText( text );
+		newToken->setLine( fromToken->getLine() );
+		newToken->setTokenIndex( fromToken->getTokenIndex() );
+		newToken->setCharPositionInLine( fromToken->getCharPositionInLine() );
+		newToken->setChannel( fromToken->getChannel() );
+		newToken->setType( fromToken->getType() );
+    }
+
+    return  newToken;
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::CommonTokenType*  
+		 CommonTreeAdaptor<ImplTraits>::getToken( TreeType* t)
+{
+	return t->getToken();
+}
+
+template<class ImplTraits>
+void CommonTreeAdaptor<ImplTraits>::setTokenBoundaries( TreeType* t, CommonTokenType* startToken, CommonTokenType* stopToken)
+{
+	ANTLR_MARKER   start;
+	ANTLR_MARKER   stop;
+
+	TreeType*	    ct;
+
+	if	(t == NULL)
+	{
+		return;
+	}
+
+	if	( startToken != NULL)
+	{
+		start = startToken->getTokenIndex();
+	}
+	else
+	{
+		start = 0;
+	}
+
+	if	( stopToken != NULL)
+	{
+		stop = stopToken->getTokenIndex();
+	}
+	else
+	{
+		stop = 0;
+	}
+
+	ct	= t;
+
+	ct->set_startIndex(start);
+	ct->set_stopIndex(stop);
+}
+
+template<class ImplTraits>
+ANTLR_MARKER	CommonTreeAdaptor<ImplTraits>::getTokenStartIndex( TreeType* t)
+{
+	return t->get_tokenStartIndex();
+}
+
+template<class ImplTraits>
+ANTLR_MARKER	CommonTreeAdaptor<ImplTraits>::getTokenStopIndex( TreeType* t)
+{
+	return t->get_tokenStopIndex();
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::StringType	 CommonTreeAdaptor<ImplTraits>::makeDot( TreeType* theTree)
+{
+	// The string we are building up
+	//
+	StringType		dotSpec;
+	char            buff[64];
+	StringType      text;
+	
+	dotSpec = "digraph {\n\n"
+			"\tordering=out;\n"
+			"\tranksep=.4;\n"
+			"\tbgcolor=\"lightgrey\";  node [shape=box, fixedsize=false, fontsize=12, fontname=\"Helvetica-bold\", fontcolor=\"blue\"\n"
+			"\twidth=.25, height=.25, color=\"black\", fillcolor=\"white\", style=\"filled, solid, bold\"];\n\n"
+			"\tedge [arrowsize=.5, color=\"black\", style=\"bold\"]\n\n";
+
+    if	(theTree == NULL)
+	{
+		// No tree, so create a blank spec
+		//
+		dotSpec->append("n0[label=\"EMPTY TREE\"]\n");
+		return dotSpec;
+	}
+
+    sprintf(buff, "\tn%p[label=\"", theTree);
+	dotSpec.append(buff);
+    text = this->getText(theTree);
+    for (std::size_t j = 0; j < text.size(); j++)
+    {
+            switch(text[j])
+            {
+                case '"':
+                    dotSpec.append("\\\"");
+                    break;
+
+                case '\n':
+                    dotSpec.append("\\n");
+                    break;
+
+                case '\r':
+                    dotSpec.append("\\r");
+                    break;
+
+                default:
+                    dotSpec += text[j];
+                    break;
+            }
+    }
+	dotSpec->append("\"]\n");
+
+	// First produce the node defintions
+	//
+	this->defineDotNodes(theTree, dotSpec);
+	dotSpec.append("\n");
+	this->defineDotEdges(theTree, dotSpec);
+	
+	// Terminate the spec
+	//
+	dotSpec.append("\n}");
+
+	// Result
+	//
+	return dotSpec;
+}
+
+template<class ImplTraits>
+void CommonTreeAdaptor<ImplTraits>::replaceChildren( TreeType* parent, ANTLR_INT32 startChildIndex, ANTLR_INT32 stopChildIndex, TreeType* t)
+{
+	if	(parent != NULL)
+		parent->replaceChildren(startChildIndex, stopChildIndex, t);
+}
+
+template<class ImplTraits>
+CommonTreeAdaptor<ImplTraits>::~CommonTreeAdaptor()
+{
+}
+
+template<class ImplTraits>
+void CommonTreeAdaptor<ImplTraits>::defineDotNodes(TreeType* t, const StringType& dotSpec)
+{
+	// How many nodes are we talking about?
+	//
+	int	nCount;
+	int i;
+    TreeType* child;
+	char	buff[64];
+	StringType	text;
+	int		j;
+
+	// Count the nodes
+	//
+	nCount = this->getChildCount(t);
+
+	if	(nCount == 0)
+	{
+		// This will already have been included as a child of another node
+		// so there is nothing to add.
+		//
+		return;
+	}
+
+	// For each child of the current tree, define a node using the
+	// memory address of the node to name it
+	//
+	for	(i = 0; i<nCount; i++)
+	{
+
+		// Pick up a pointer for the child
+		//
+		child = this->getChild(t, i);
+
+		// Name the node
+		//
+		sprintf(buff, "\tn%p[label=\"", child);
+		dotSpec->append(buff);
+		text = this->getText(child);
+		for (j = 0; j < text.size(); j++)
+		{
+            switch(text[j])
+            {
+                case '"':
+                    dotSpec.append("\\\"");
+                    break;
+
+                case '\n':
+                    dotSpec.append("\\n");
+                    break;
+
+                case '\r':
+                    dotSpec.append("\\r");
+                    break;
+
+                default:
+                    dotSpec += text[j];
+                    break;
+            }
+		}
+		dotSpec.append("\"]\n");
+
+		// And now define the children of this child (if any)
+		//
+		this->defineDotNodes(child, dotSpec);
+	}
+	
+	// Done
+	//
+	return;
+}
+
+template<class ImplTraits>
+void CommonTreeAdaptor<ImplTraits>::defineDotEdges(TreeType* t, const StringType& dotSpec)
+{
+	// How many nodes are we talking about?
+	//
+	int	nCount;
+	if	(t == NULL)
+	{
+		// No tree, so do nothing
+		//
+		return;
+	}
+
+	// Count the nodes
+	//
+	nCount = this->getChildCount(t);
+
+	if	(nCount == 0)
+	{
+		// This will already have been included as a child of another node
+		// so there is nothing to add.
+		//
+		return;
+	}
+
+	// For each child, define an edge from this parent, then process
+	// and children of this child in the same way
+	//
+	for	(int i=0; i<nCount; i++)
+	{
+		TreeType* child;
+		char	buff[128];
+        StringType text;
+
+		// Next child
+		//
+		child	= this->getChild(t, i);
+
+		// Create the edge relation
+		//
+		sprintf(buff, "\t\tn%p -> n%p\t\t// ",  t, child);
+        
+		dotSpec.append(buff);
+
+		// Document the relationship
+		//
+        text = this->getText(t);
+		for (std::size_t j = 0; j < text.size(); j++)
+        {
+                switch(text[j])
+                {
+                    case '"':
+                        dotSpec.append("\\\"");
+                        break;
+
+                    case '\n':
+                        dotSpec.append("\\n");
+                        break;
+
+                    case '\r':
+                        dotSpec.append("\\r");
+                        break;
+
+                    default:
+                        dotSpec += text[j];
+                        break;
+                }
+        }
+
+        dotSpec.append(" -> ");
+
+        text = this->getText(child);
+        for (std::size_t j = 0; j < text.size(); j++)
+        {
+                switch(text[j])
+                {
+                    case '"':
+                        dotSpec.append("\\\"");
+                        break;
+
+                    case '\n':
+                        dotSpec.append("\\n");
+                        break;
+
+                    case '\r':
+                        dotSpec.append("\\r");
+                        break;
+
+                    default:
+                        dotSpec += text[j];
+                        break;
+                }
+        }
+		dotSpec.append("\n");
+        
+		// Define edges for this child
+		//
+		this->defineDotEdges(child, dotSpec);
+	}
+
+	// Done
+	//
+	return;
+}
+
+template<class ImplTraits>
+typename CommonTreeAdaptor<ImplTraits>::TreeType* CommonTreeAdaptor<ImplTraits>::rulePostProcessing( TreeType* root)
+{
+	TreeType* saveRoot;
+
+    // Keep track of the root we are given. If it is a nilNode, then we
+    // can reuse it rather than orphaning it!
+    //
+    saveRoot = root;
+
+	if (root != NULL && root->isNilNode())
+	{
+		if	(root->getChildCount() == 0)
+		{
+			root = NULL;
+		}
+		else if	(root->getChildCount() == 1)
+		{
+			root = root->getChild(0);
+			root->setParent(NULL);
+			root->setChildIndex(-1);
+
+            // The root we were given was a nil node, wiht one child, which means it has
+            // been abandoned and would be lost in the node factory. However
+            // nodes can be flagged as resuable to prevent this terrible waste
+            //
+            saveRoot->reuse();
+		}
+	}
+	return root;
+}
+
+template<class ImplTraits>
+DebugTreeAdaptor<ImplTraits>::DebugTreeAdaptor( DebuggerType* debugger )
+{
+	m_debugger = debugger;
+}
+
+template<class ImplTraits>
+void DebugTreeAdaptor<ImplTraits>::setDebugEventListener( DebuggerType* debugger)
+{
+	m_debugger = debugger;
+}
+
+template<class ImplTraits>
+typename DebugTreeAdaptor<ImplTraits>::TreeType*	  DebugTreeAdaptor<ImplTraits>::nilNode()
+{
+	TreeType*	t = this->create(NULL);
+	m_debugger->createNode(t);
+	return	t;
+}
+
+template<class ImplTraits>
+void	DebugTreeAdaptor<ImplTraits>::addChild(TreeType* t, TreeType* child)
+{
+	if	(t != NULL && child != NULL)
+	{
+		t->addChild(child);
+		m_debugger->addChild(t, child);
+	}
+}
+
+template<class ImplTraits>
+void	DebugTreeAdaptor<ImplTraits>::addChildToken(TreeType* t, CommonTokenType* child)
+{
+	TreeType*	tc;
+	if	(t != NULL && child != NULL)
+	{
+		tc = this->create(child);
+		this->addChild(t, tc);
+		m_debugger->addChild(t, tc);
+	}
+}
+
+template<class ImplTraits>
+typename DebugTreeAdaptor<ImplTraits>::TreeType* DebugTreeAdaptor<ImplTraits>::becomeRoot( TreeType* newRootTree, TreeType* oldRootTree )
+{
+	TreeType* t;
+	t = this->becomeRoot(newRootTree, oldRootTree);
+	m_debugger->becomeRoot(newRootTree, oldRootTree);
+	return t;
+}
+
+template<class ImplTraits>
+typename DebugTreeAdaptor<ImplTraits>::TreeType* DebugTreeAdaptor<ImplTraits>::becomeRootToken(TreeType* newRoot, TreeType* oldRoot)
+{
+	TreeType*	t;
+	t =	this->becomeRoot(this->create(newRoot), oldRoot);
+	m_debugger->becomeRoot(t, oldRoot);
+	return t;
+}
+
+template<class ImplTraits>
+typename DebugTreeAdaptor<ImplTraits>::TreeType* DebugTreeAdaptor<ImplTraits>::createTypeToken(ANTLR_UINT32 tokenType, CommonTokenType* fromToken)
+{
+	TreeType* t;
+	t = this->createTypeToken(tokenType, fromToken);
+	m_debugger->createNode(t);
+	return t;
+}
+
+template<class ImplTraits>
+typename DebugTreeAdaptor<ImplTraits>::TreeType* DebugTreeAdaptor<ImplTraits>::createTypeTokenText(ANTLR_UINT32 tokenType, CommonTokenType* fromToken, ANTLR_UINT8* text)
+{
+	TreeType* t;
+	t = this->createTypeTokenText(tokenType, fromToken, text);
+	m_debugger->createNode(t);
+	return t;
+}
+	
+template<class ImplTraits>
+typename DebugTreeAdaptor<ImplTraits>::TreeType* DebugTreeAdaptor<ImplTraits>::createTypeText( ANTLR_UINT32 tokenType, ANTLR_UINT8* text)
+{
+	TreeType* t;
+	t = this->createTypeText(tokenType, text);
+	m_debugger->createNode(t);
+	return t;
+}
+
+template<class ImplTraits>
+typename DebugTreeAdaptor<ImplTraits>::TreeType* DebugTreeAdaptor<ImplTraits>::dupTree( TreeType* tree)
+{
+	TreeType* t;
+
+	// Call the normal dup tree mechanism first
+	//
+	t = this->dupTreeTT(tree, NULL);
+
+	// In order to tell the debugger what we have just done, we now
+	// simulate the tree building mechanism. THis will fire
+	// lots of debugging events to the client and look like we
+	// duped the tree..
+	//
+	this->simulateTreeConstruction( t);
+
+	return t;
+}
+
+template<class ImplTraits>
+void DebugTreeAdaptor<ImplTraits>::simulateTreeConstruction(TreeType* tree)
+{
+	ANTLR_UINT32		n;
+	ANTLR_UINT32		i;
+	TreeType*	child;
+
+	// Send the create node event
+	//
+	m_debugger->createNode(tree);
+
+	n = this->getChildCount(tree);
+	for	(i = 0; i < n; i++)
+	{
+		child = this->getChild(tree, i);
+		this->simulateTreeConstruction(child);
+		m_debugger->addChild(tree, child);
+	}
+}
+
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3commontreenodestream.hpp b/runtime/Cpp/include/antlr3commontreenodestream.hpp
new file mode 100755
index 0000000..962758f
--- /dev/null
+++ b/runtime/Cpp/include/antlr3commontreenodestream.hpp
@@ -0,0 +1,317 @@
+/// \file
+/// Definition of the ANTLR3 common tree node stream.
+///
+
+#ifndef	_ANTLR_COMMON_TREE_NODE_STREAM__HPP
+#define	_ANTLR_COMMON_TREE_NODE_STREAM__HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+class CommonTreeNodeStream : public ImplTraits::TreeNodeIntStreamType
+{
+public:
+	enum Constants
+	{
+		/// Token buffer initial size settings ( will auto increase)
+		///
+		DEFAULT_INITIAL_BUFFER_SIZE	= 100
+		, INITIAL_CALL_STACK_SIZE	= 10
+	};
+
+	typedef typename ImplTraits::TreeType TreeType;
+	typedef TreeType UnitType;
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::StringStreamType StringStreamType;
+	typedef typename ImplTraits::TreeAdaptorType TreeAdaptorType;
+	typedef typename ImplTraits::TreeNodeIntStreamType IntStreamType;
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename AllocPolicyType::template VectorType<TreeType*>	NodesType;
+	typedef typename AllocPolicyType::template VectorType< TreeWalkState<ImplTraits> > MarkersType;
+	typedef typename AllocPolicyType::template StackType< ANTLR_INT32 > NodeStackType;
+	typedef typename ImplTraits::TreeParserType ComponentType;
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+	typedef typename ImplTraits::TreeNodeIntStreamType BaseType;
+
+public:
+    /// Dummy tree node that indicates a descent into a child
+    /// tree. Initialized by a call to create a new interface.
+    ///
+    TreeType			m_DOWN;
+
+    /// Dummy tree node that indicates a descent up to a parent
+    /// tree. Initialized by a call to create a new interface.
+    ///
+    TreeType			m_UP;
+
+    /// Dummy tree node that indicates the termination point of the
+    /// tree. Initialized by a call to create a new interface.
+    ///
+    TreeType			m_EOF_NODE;
+
+    /// Dummy node that is returned if we need to indicate an invalid node
+    /// for any reason.
+    ///
+    TreeType			m_INVALID_NODE;
+
+	/// The complete mapping from stream index to tree node.
+	/// This buffer includes pointers to DOWN, UP, and EOF nodes.
+	/// It is built upon ctor invocation.  The elements are type
+	/// Object as we don't what the trees look like.
+	///
+	/// Load upon first need of the buffer so we can set token types
+	/// of interest for reverseIndexing.  Slows us down a wee bit to
+	/// do all of the if p==-1 testing everywhere though, though in C
+	/// you won't really be able to measure this.
+	///
+	/// Must be freed when the tree node stream is torn down.
+	///
+	NodesType				m_nodes;
+
+	/// Which tree are we navigating ?
+    ///
+    TreeType*				m_root;
+
+    /// Pointer to tree adaptor interface that manipulates/builds
+    /// the tree.
+    ///
+    TreeAdaptorType*		m_adaptor;
+
+    /// As we walk down the nodes, we must track parent nodes so we know
+    /// where to go after walking the last child of a node.  When visiting
+    /// a child, push current node and current index (current index
+    /// is first stored in the tree node structure to avoid two stacks.
+    ///
+    NodeStackType			m_nodeStack;
+
+	/// The current index into the nodes vector of the current tree
+	/// we are parsing and possibly rewriting.
+	///
+	ANTLR_INT32			m_p;
+
+    /// Which node are we currently visiting?
+    ///
+    TreeType*		m_currentNode;
+
+    /// Which node did we last visit? Used for LT(-1)
+    ///
+    TreeType*		m_previousNode;
+
+    /// Which child are we currently visiting?  If -1 we have not visited
+    /// this node yet; next consume() request will set currentIndex to 0.
+    ///
+    ANTLR_INT32		m_currentChildIndex;
+
+    /// What node index did we just consume?  i=0..n-1 for n node trees.
+    /// IntStream.next is hence 1 + this value.  Size will be same.
+    ///
+    ANTLR_MARKER	m_absoluteNodeIndex;
+
+    /// Buffer tree node stream for use with LT(i).  This list grows
+    /// to fit new lookahead depths, but consume() wraps like a circular
+    /// buffer.
+    ///
+    TreeType**		m_lookAhead;
+
+    /// Number of elements available in the lookahead buffer at any point in
+    ///  time. This is the current size of the array.
+    ///
+    ANTLR_UINT32		m_lookAheadLength;
+
+    /// lookAhead[head] is the first symbol of lookahead, LT(1). 
+    ///
+    ANTLR_UINT32		m_head;
+
+    /// Add new lookahead at lookahead[tail].  tail wraps around at the
+    /// end of the lookahead buffer so tail could be less than head.
+    ///
+    ANTLR_UINT32		m_tail;
+
+    /// Calls to mark() may be nested so we have to track a stack of
+    /// them.  The marker is an index into this stack.  Index 0 is
+    /// the first marker.  This is a List<TreeWalkState>
+    ///
+    MarkersType			m_markers;
+
+	/// Indicates whether this node stream was derived from a prior
+	/// node stream to be used by a rewriting tree parser for instance.
+	/// If this flag is set to ANTLR_TRUE, then when this stream is
+	/// closed it will not free the root tree as this tree always
+	/// belongs to the origniating node stream.
+	///
+	bool				m_isRewriter;
+
+    /// If set to ANTLR_TRUE then the navigation nodes UP, DOWN are
+    /// duplicated rather than reused within the tree.
+    ///
+    bool				m_uniqueNavigationNodes;
+
+public:
+    // INTERFACE
+	//
+	CommonTreeNodeStream( ANTLR_UINT32 hint );
+	CommonTreeNodeStream( const CommonTreeNodeStream& ctn );
+	CommonTreeNodeStream( TreeType* tree, ANTLR_UINT32 hint );
+
+	void init( ANTLR_UINT32 hint );
+	~CommonTreeNodeStream();
+
+	/// Get tree node at current input pointer + i ahead where i=1 is next node.
+	/// i<0 indicates nodes in the past.  So LT(-1) is previous node, but
+	/// implementations are not required to provide results for k < -1.
+	/// LT(0) is undefined.  For i>=n, return null.
+	/// Return NULL for LT(0) and any index that results in an absolute address
+	/// that is negative (beyond the start of the list).
+	///
+	/// This is analogous to the LT() method of the TokenStream, but this
+	/// returns a tree node instead of a token.  Makes code gen identical
+	/// for both parser and tree grammars. :)
+	///
+    TreeType*	_LT(ANTLR_INT32 k);
+
+	/// Where is this stream pulling nodes from?  This is not the name, but
+	/// the object that provides node objects.
+	///
+    TreeType*	getTreeSource();
+
+	/// What adaptor can tell me how to interpret/navigate nodes and
+	/// trees.  E.g., get text of a node.
+	///
+    TreeAdaptorType*	getTreeAdaptor();
+
+	/// As we flatten the tree, we use UP, DOWN nodes to represent
+	/// the tree structure.  When debugging we need unique nodes
+	/// so we have to instantiate new ones.  When doing normal tree
+	/// parsing, it's slow and a waste of memory to create unique
+	/// navigation nodes.  Default should be false;
+	///
+    void  set_uniqueNavigationNodes(bool uniqueNavigationNodes);
+
+    StringType	toString();
+
+	/// Return the text of all nodes from start to stop, inclusive.
+	/// If the stream does not buffer all the nodes then it can still
+	/// walk recursively from start until stop.  You can always return
+	/// null or "" too, but users should not access $ruleLabel.text in
+	/// an action of course in that case.
+	///
+    StringType	toStringSS(TreeType* start, TreeType* stop);
+
+	/// Return the text of all nodes from start to stop, inclusive, into the
+	/// supplied buffer.
+	/// If the stream does not buffer all the nodes then it can still
+	/// walk recursively from start until stop.  You can always return
+	/// null or "" too, but users should not access $ruleLabel.text in
+	/// an action of course in that case.
+	///
+    void toStringWork(TreeType* start, TreeType* stop, StringType& buf);
+
+	/// Get a tree node at an absolute index i; 0..n-1.
+	/// If you don't want to buffer up nodes, then this method makes no
+	/// sense for you.
+	///
+	TreeType*	get(ANTLR_INT32 i);
+
+	// REWRITING TREES (used by tree parser)
+
+	/// Replace from start to stop child index of parent with t, which might
+	/// be a list.  Number of children may be different
+	/// after this call.  The stream is notified because it is walking the
+	/// tree and might need to know you are monkeying with the underlying
+	/// tree.  Also, it might be able to modify the node stream to avoid
+	/// restreaming for future phases.
+	///
+	/// If parent is null, don't do anything; must be at root of overall tree.
+	/// Can't replace whatever points to the parent externally.  Do nothing.
+	///
+	void replaceChildren(TreeType* parent, ANTLR_INT32 startChildIndex, 
+										ANTLR_INT32 stopChildIndex, TreeType* t);
+
+	TreeType* LB(ANTLR_INT32 k);
+
+	/// As we flatten the tree, we use UP, DOWN nodes to represent
+	/// the tree structure.  When debugging we need unique nodes
+	/// so instantiate new ones when uniqueNavigationNodes is true.
+	///
+    void	addNavigationNode(ANTLR_UINT32 ttype);
+
+    TreeType*	newDownNode();
+
+	TreeType*	newUpNode();
+
+    bool	hasUniqueNavigationNodes() const;
+
+    ANTLR_UINT32	getLookaheadSize();
+
+	void	push(ANTLR_INT32 index);
+
+	ANTLR_INT32	pop();
+
+    void	reset();
+
+	void fillBufferRoot();
+	void fillBuffer(TreeType* t);
+	
+};
+
+/** This structure is used to save the state information in the treenodestream
+ *  when walking ahead with cyclic DFA or for syntactic predicates,
+ *  we need to record the state of the tree node stream.  This
+ *  class wraps up the current state of the CommonTreeNodeStream.
+ *  Calling mark() will push another of these on the markers stack.
+ */
+template<class ImplTraits>
+class TreeWalkState : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::TreeType TreeType;
+
+private:
+    ANTLR_UINT32		m_currentChildIndex;
+    ANTLR_MARKER		m_absoluteNodeIndex;
+    TreeType*		m_currentNode;
+    TreeType*		m_previousNode;
+    ANTLR_UINT32		m_nodeStackSize;
+    TreeType*		m_lookAhead;
+    ANTLR_UINT32		m_lookAheadLength;
+    ANTLR_UINT32		m_tail;
+    ANTLR_UINT32		m_head;
+
+
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3commontreenodestream.inl"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3commontreenodestream.inl b/runtime/Cpp/include/antlr3commontreenodestream.inl
new file mode 100755
index 0000000..4dce47c
--- /dev/null
+++ b/runtime/Cpp/include/antlr3commontreenodestream.inl
@@ -0,0 +1,422 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+CommonTreeNodeStream<ImplTraits>::CommonTreeNodeStream(ANTLR_UINT32 hint)
+{
+	this->init(hint);
+}
+
+template<class ImplTraits>
+void CommonTreeNodeStream<ImplTraits>::init( ANTLR_UINT32 hint )
+{
+	m_root = NULL;
+	m_adaptor = new TreeAdaptorType;
+	// Create the node list map
+	//
+	if	(hint == 0)
+		hint = DEFAULT_INITIAL_BUFFER_SIZE;
+	m_nodes.reserve( DEFAULT_INITIAL_BUFFER_SIZE );
+
+	m_p = -1;
+	m_currentNode = NULL;
+	m_previousNode = NULL;
+	m_currentChildIndex = 0; 
+	m_absoluteNodeIndex = 0;
+	m_lookAhead = NULL;
+	m_lookAheadLength = 0;
+	m_head = 0;
+	m_tail = 0;
+	m_uniqueNavigationNodes = false;
+	m_isRewriter = false;
+
+	CommonTokenType* token		= new CommonTokenType(CommonTokenType::TOKEN_UP);
+	token->set_tokText( "UP" );
+	m_UP.set_token( token );
+
+	token		= new CommonTokenType(CommonTokenType::TOKEN_DOWN);
+	token->set_tokText( "DOWN" );
+	m_DOWN.set_token( token );
+
+	token		= new CommonTokenType(CommonTokenType::TOKEN_EOF);
+	token->set_tokText( "EOF" );
+	m_EOF_NODE.set_token( token );
+
+	token		= new CommonTokenType(CommonTokenType::TOKEN_INVALID);
+	token->set_tokText( "INVALID" );
+	m_EOF_NODE.set_token( token );
+}
+
+template<class ImplTraits>
+CommonTreeNodeStream<ImplTraits>::CommonTreeNodeStream( const CommonTreeNodeStream& ctn )
+{
+	m_root = ctn.m_root;
+	m_adaptor = ctn.m_adaptor;
+	m_nodes.reserve( DEFAULT_INITIAL_BUFFER_SIZE );
+	m_nodeStack = ctn.m_nodeStack;
+	m_p = -1;
+	m_currentNode = NULL;
+	m_previousNode = NULL;
+	m_currentChildIndex = 0; 
+	m_absoluteNodeIndex = 0;
+	m_lookAhead = NULL;
+	m_lookAheadLength = 0;
+	m_head = 0;
+	m_tail = 0;
+	m_uniqueNavigationNodes = false;
+	m_isRewriter = true;
+
+	m_UP.set_token( ctn.m_UP.get_token() );
+	m_DOWN.set_token( ctn.m_DOWN.get_token() );
+	m_EOF_NODE.set_token( ctn.m_EOF_NODE.get_token() );
+	m_INVALID_NODE.set_token( ctn.m_INVALID_NODE.get_token() );
+}
+
+template<class ImplTraits>
+CommonTreeNodeStream<ImplTraits>::CommonTreeNodeStream( TreeType* tree, ANTLR_UINT32 hint )
+{
+	this->init(hint);
+	m_root = tree;
+}
+
+template<class ImplTraits>
+CommonTreeNodeStream<ImplTraits>::~CommonTreeNodeStream()
+{
+	// If this is a rewrting stream, then certain resources
+	// belong to the originating node stream and we do not
+	// free them here.
+	//
+	if	( m_isRewriter != true)
+	{
+		delete m_adaptor;
+
+		m_nodeStack.clear();
+
+		delete m_INVALID_NODE.get_token();
+		delete m_EOF_NODE.get_token();
+		delete m_DOWN.get_token();
+		delete m_UP.get_token();
+	}
+	
+	m_nodes.clear();
+}
+
+template<class ImplTraits>
+typename CommonTreeNodeStream<ImplTraits>::TreeType*	CommonTreeNodeStream<ImplTraits>::_LT(ANTLR_INT32 k)
+{
+	if	( m_p == -1)
+	{
+		this->fillBufferRoot();
+	}
+
+	if	(k < 0)
+	{
+		return this->LB(-k);
+	}
+	else if	(k == 0)
+	{
+		return	&(m_INVALID_NODE);
+	}
+
+	// k was a legitimate request, 
+	//
+	if	(( m_p + k - 1) >= (ANTLR_INT32)(m_nodes.size()))
+	{
+		return &(m_EOF_NODE);
+	}
+
+	return	m_nodes[ m_p + k - 1 ];
+}
+
+template<class ImplTraits>
+typename CommonTreeNodeStream<ImplTraits>::TreeType*	CommonTreeNodeStream<ImplTraits>::getTreeSource()
+{
+	return m_root;
+}
+
+template<class ImplTraits>
+typename CommonTreeNodeStream<ImplTraits>::TreeAdaptorType*	CommonTreeNodeStream<ImplTraits>::getTreeAdaptor()
+{
+	return m_adaptor;
+}
+
+template<class ImplTraits>
+void  CommonTreeNodeStream<ImplTraits>::set_uniqueNavigationNodes(bool uniqueNavigationNodes)
+{
+	m_uniqueNavigationNodes = uniqueNavigationNodes;
+}
+
+template<class ImplTraits>
+typename CommonTreeNodeStream<ImplTraits>::StringType  CommonTreeNodeStream<ImplTraits>::toString()
+{
+    return  this->toStringSS(m_root, NULL);
+}
+
+template<class ImplTraits>
+typename CommonTreeNodeStream<ImplTraits>::StringType  CommonTreeNodeStream<ImplTraits>::toStringSS(TreeType* start, TreeType* stop)
+{
+	StringType  buf;
+    this->toStringWork(start, stop, buf);
+    return  buf;
+}
+
+template<class ImplTraits>
+void CommonTreeNodeStream<ImplTraits>::toStringWork(TreeType* start, TreeType* stop, StringType& str)
+{
+	ANTLR_UINT32   n;
+	ANTLR_UINT32   c;
+	StringStreamType buf;
+
+	if	(!start->isNilNode() )
+	{
+		StringType	text;
+
+		text	= start->toString();
+
+		if  (text.empty())
+		{
+			buf << ' ';
+			buf << start->getType();
+		}
+		else
+			buf << text;
+	}
+
+	if	(start == stop)
+	{
+		return;		/* Finished */
+	}
+
+	n = start->getChildCount();
+
+	if	(n > 0 && ! start->isNilNode() )
+	{
+		buf << ' ';
+		buf << CommonTokenType::TOKEN_DOWN;
+	}
+
+	for	(c = 0; c<n ; c++)
+	{
+		TreeType*   child;
+
+		child = start->getChild(c);
+		this->toStringWork(child, stop, buf);
+	}
+
+	if	(n > 0 && ! start->isNilNode() )
+	{
+		buf << ' ';
+		buf << CommonTokenType::TOKEN_UP;
+	}
+	str = buf.str();
+}
+
+template<class ImplTraits>
+typename  CommonTreeNodeStream<ImplTraits>::TreeType*	CommonTreeNodeStream<ImplTraits>::get(ANTLR_INT32 k)
+{
+	if( m_p == -1 )
+	{
+		this->fillBufferRoot();
+	}
+
+	return m_nodes[k];
+}
+
+template<class ImplTraits>
+void	CommonTreeNodeStream<ImplTraits>::replaceChildren(TreeType* parent, 
+															ANTLR_INT32 startChildIndex, 
+															ANTLR_INT32 stopChildIndex, 
+															TreeType* t)
+{
+	if	(parent != NULL)
+	{
+		TreeAdaptorType*	adaptor;
+		adaptor	= this->getTreeAdaptor();
+		adaptor->replaceChildren(parent, startChildIndex, stopChildIndex, t);
+	}
+}
+
+template<class ImplTraits>
+typename CommonTreeNodeStream<ImplTraits>::TreeType* CommonTreeNodeStream<ImplTraits>::LB(ANTLR_INT32 k)
+{
+	if	( k==0)
+	{
+		return	&(m_INVALID_NODE);
+	}
+
+	if	( (m_p - k) < 0)
+	{
+		return	&(m_INVALID_NODE);
+	}
+
+	return m_nodes[ m_p - k ];
+}
+
+template<class ImplTraits>
+void CommonTreeNodeStream<ImplTraits>::addNavigationNode(ANTLR_UINT32 ttype)
+{
+	TreeType*	    node;
+
+	node = NULL;
+
+	if	(ttype == CommonTokenType::TOKEN_DOWN)
+	{
+		if  (this->hasUniqueNavigationNodes() == true)
+		{
+			node    = this->newDownNode();
+		}
+		else
+		{
+			node    = &m_DOWN;
+		}
+	}
+	else
+	{
+		if  (this->hasUniqueNavigationNodes() == true)
+		{
+			node    = this->newUpNode();
+		}
+		else
+		{
+			node    = &m_UP;
+		}
+	}
+
+	// Now add the node we decided upon.
+	//
+	m_nodes.push_back(node);
+}
+
+template<class ImplTraits>
+typename CommonTreeNodeStream<ImplTraits>::TreeType*	CommonTreeNodeStream<ImplTraits>::newDownNode()
+{
+	TreeType*	    dNode;
+    CommonTokenType*    token;
+
+    token					= new CommonTokenType(CommonTokenType::TOKEN_DOWN);
+	token->set_tokText("DOWN");
+    dNode					= new TreeType(token);
+    return  &dNode;
+}
+
+template<class ImplTraits>
+typename CommonTreeNodeStream<ImplTraits>::TreeType*	CommonTreeNodeStream<ImplTraits>::newUpNode()
+{
+	TreeType*	    uNode;
+    CommonTokenType*    token;
+
+    token					= new CommonTokenType(CommonTokenType::TOKEN_UP);
+	token->set_tokText("UP");
+    uNode					= new TreeType(token);
+    return  &uNode;
+
+}
+
+template<class ImplTraits>
+bool  CommonTreeNodeStream<ImplTraits>::hasUniqueNavigationNodes() const
+{
+	 return  m_uniqueNavigationNodes;
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	CommonTreeNodeStream<ImplTraits>::getLookaheadSize()
+{
+	return	m_tail < m_head 
+	    ?	(m_lookAheadLength - m_head + m_tail)
+	    :	(m_tail - m_head);
+}
+
+template<class ImplTraits>
+void	CommonTreeNodeStream<ImplTraits>::push(ANTLR_INT32 index)
+{
+	m_nodeStack.push(m_p);	// Save current index
+	this->seek(index);
+}
+
+template<class ImplTraits>
+ANTLR_INT32	CommonTreeNodeStream<ImplTraits>::pop()
+{
+	ANTLR_INT32	retVal;
+
+	retVal = m_nodeStack.top();
+	m_nodeStack.pop();
+	this->seek(retVal);
+	return retVal;
+}
+
+template<class ImplTraits>
+void	CommonTreeNodeStream<ImplTraits>::reset()
+{
+	if	( m_p != -1)
+	{
+		m_p	= 0;
+	}
+	BaseType::m_lastMarker		= 0;
+
+
+	// Free and reset the node stack only if this is not
+	// a rewriter, which is going to reuse the originating
+	// node streams node stack
+	//
+	if  (m_isRewriter != true)
+		m_nodeStack.clear();
+}
+
+template<class ImplTraits>
+void CommonTreeNodeStream<ImplTraits>::fillBufferRoot()
+{
+	// Call the generic buffer routine with the root as the
+	// argument
+	//
+	this->fillBuffer(m_root);
+	m_p = 0;					// Indicate we are at buffer start
+}
+
+template<class ImplTraits>
+void CommonTreeNodeStream<ImplTraits>::fillBuffer(TreeType* t)
+{
+	bool	nilNode;
+	ANTLR_UINT32	nCount;
+	ANTLR_UINT32	c;
+
+	nilNode = m_adaptor->isNilNode(t);
+
+	// If the supplied node is not a nil (list) node then we
+	// add in the node itself to the vector
+	//
+	if	(nilNode == false)
+	{
+		m_nodes.push_back(t);	
+	}
+
+	// Only add a DOWN node if the tree is not a nil tree and
+	// the tree does have children.
+	//
+	nCount = t->getChildCount();
+
+	if	(nilNode == false && nCount>0)
+	{
+		this->addNavigationNode( CommonTokenType::TOKEN_DOWN);
+	}
+
+	// We always add any children the tree contains, which is
+	// a recursive call to this function, which will cause similar
+	// recursion and implement a depth first addition
+	//
+	for	(c = 0; c < nCount; c++)
+	{
+		this->fillBuffer( m_adaptor->getChild(t, c));
+	}
+
+	// If the tree had children and was not a nil (list) node, then we
+	// we need to add an UP node here to match the DOWN node
+	//
+	if	(nilNode == false && nCount > 0)
+	{
+		this->addNavigationNode(CommonTokenType::TOKEN_UP);
+	}
+}
+
+
+
+ANTLR_END_NAMESPACE()
+
diff --git a/runtime/Cpp/include/antlr3convertutf.hpp b/runtime/Cpp/include/antlr3convertutf.hpp
new file mode 100755
index 0000000..8085c29
--- /dev/null
+++ b/runtime/Cpp/include/antlr3convertutf.hpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2001-2004 Unicode, Inc.
+ * 
+ * Disclaimer
+ * 
+ * This source code is provided as is by Unicode, Inc. No claims are
+ * made as to fitness for any particular purpose. No warranties of any
+ * kind are expressed or implied. The recipient agrees to determine
+ * applicability of information provided. If this file has been
+ * purchased on magnetic or optical media from Unicode, Inc., the
+ * sole remedy for any claim will be exchange of defective media
+ * within 90 days of receipt.
+ * 
+ * Limitations on Rights to Redistribute This Code
+ * 
+ * Unicode, Inc. hereby grants the right to freely use the information
+ * supplied in this file in the creation of products supporting the
+ * Unicode Standard, and to make copies of this file in any form
+ * for internal or external distribution as long as this notice
+ * remains attached.
+ */
+
+/* ---------------------------------------------------------------------
+
+    Conversions between UTF32, UTF-16, and UTF-8.  Header file.
+
+    Several functions are included here, forming a complete set of
+    conversions between the three formats.  UTF-7 is not included
+    here, but is handled in a separate source file.
+
+    Each of these routines takes pointers to input buffers and output
+    buffers.  The input buffers are const.
+
+    Each routine converts the text between *sourceStart and sourceEnd,
+    putting the result into the buffer between *targetStart and
+    targetEnd. Note: the end pointers are *after* the last item: e.g. 
+    *(sourceEnd - 1) is the last item.
+
+    The return result indicates whether the conversion was successful,
+    and if not, whether the problem was in the source or target buffers.
+    (Only the first encountered problem is indicated.)
+
+    After the conversion, *sourceStart and *targetStart are both
+    updated to point to the end of last text successfully converted in
+    the respective buffers.
+
+    Input parameters:
+	sourceStart - pointer to a pointer to the source buffer.
+		The contents of this are modified on return so that
+		it points at the next thing to be converted.
+	targetStart - similarly, pointer to pointer to the target buffer.
+	sourceEnd, targetEnd - respectively pointers to the ends of the
+		two buffers, for overflow checking only.
+
+    These conversion functions take a ConversionFlags argument. When this
+    flag is set to strict, both irregular sequences and isolated surrogates
+    will cause an error.  When the flag is set to lenient, both irregular
+    sequences and isolated surrogates are converted.
+
+    Whether the flag is strict or lenient, all illegal sequences will cause
+    an error return. This includes sequences such as: <F4 90 80 80>, <C0 80>,
+    or <A0> in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code
+    must check for illegal sequences.
+
+    When the flag is set to lenient, characters over 0x10FFFF are converted
+    to the replacement character; otherwise (when the flag is set to strict)
+    they constitute an error.
+
+    Output parameters:
+	The value "sourceIllegal" is returned from some routines if the input
+	sequence is malformed.  When "sourceIllegal" is returned, the source
+	value will point to the illegal value that caused the problem. E.g.,
+	in UTF-8 when a sequence is malformed, it points to the start of the
+	malformed sequence.  
+
+    Author: Mark E. Davis, 1994.
+    Rev History: Rick McGowan, fixes & updates May 2001.
+		 Fixes & updates, Sept 2001.
+
+------------------------------------------------------------------------ */
+
+/* ---------------------------------------------------------------------
+    The following 4 definitions are compiler-specific.
+    The C standard does not guarantee that wchar_t has at least
+    16 bits, so wchar_t is no less portable than unsigned short!
+    All should be unsigned values to avoid sign extension during
+    bit mask & shift operations.
+------------------------------------------------------------------------ */
+
+
+// Changes for ANTLR3 - Jim Idle, January 2008.
+// builtin types defined for Unicode types changed to
+// aliases for the types that are system determined by
+// ANTLR at compile time.
+//
+// typedef unsigned long	UTF32;	/* at least 32 bits */
+// typedef unsigned short	UTF16;	/* at least 16 bits */
+// typedef unsigned char	UTF8;	/* typically 8 bits */
+// typedef unsigned char	Boolean; /* 0 or 1 */
+
+#ifndef	_ANTLR3_CONVERTUTF_H
+#define	_ANTLR3_CONVERTUTF_H
+
+ANTLR_BEGIN_NAMESPACE()
+
+typedef ANTLR_UINT32	UTF32;	/* at least 32 bits */
+typedef ANTLR_UINT16	UTF16;	/* at least 16 bits */
+typedef ANTLR_UINT8		UTF8;	/* typically 8 bits */
+
+/* Some fundamental constants */
+#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
+#define UNI_MAX_BMP (UTF32)0x0000FFFF
+#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
+#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
+#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
+
+#define UNI_SUR_HIGH_START  (UTF32)0xD800
+#define UNI_SUR_HIGH_END    (UTF32)0xDBFF
+#define UNI_SUR_LOW_START   (UTF32)0xDC00
+#define UNI_SUR_LOW_END     (UTF32)0xDFFF
+#define halfShift           ((UTF32)10)
+#define halfBase            ((UTF32)0x0010000UL)
+#define halfMask            ((UTF32)0x3FFUL)
+
+enum ConversionResult {
+	conversionOK, 		/* conversion successful */
+	sourceExhausted,	/* partial character in source, but hit end */
+	targetExhausted,	/* insuff. room in target for conversion */
+	sourceIllegal		/* source sequence is illegal/malformed */
+};
+
+enum ConversionFlags {
+	strictConversion = 0,
+	lenientConversion
+} ;
+
+
+
+ANTLR_END_NAMESPACE()
+
+#endif
+
+/* --------------------------------------------------------------------- */
diff --git a/runtime/Cpp/include/antlr3cyclicdfa.hpp b/runtime/Cpp/include/antlr3cyclicdfa.hpp
new file mode 100755
index 0000000..a0d66b9
--- /dev/null
+++ b/runtime/Cpp/include/antlr3cyclicdfa.hpp
@@ -0,0 +1,108 @@
+/// Definition of a cyclic dfa structure such that it can be
+/// initialized at compile time and have only a single
+/// runtime function that can deal with all cyclic dfa
+/// structures and show Java how it is done ;-)
+///
+#ifndef	ANTLR3_CYCLICDFA_HPP
+#define	ANTLR3_CYCLICDFA_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include   "antlr3defs.hpp"
+
+#ifdef ANTLR3_WINDOWS
+#pragma warning	(push)
+#pragma warning (disable : 4510)
+#pragma warning (disable : 4512)
+#pragma warning (disable : 4610)
+#endif
+
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits, class CtxType>
+class CyclicDFA : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename CtxType::StreamType StreamType;
+	typedef typename CtxType::ExceptionBaseType ExceptionBaseType;
+	typedef typename ImplTraits::template RecognizerType<StreamType> RecognizerType;
+	typedef typename StreamType::IntStreamType IntStreamType;
+	typedef typename StreamType::TokenType	TokenType;
+	typedef TokenType	CommonTokenType;
+	typedef CtxType ContextType;
+
+private:
+    /// Decision number that a particular static structure
+    ///  represents.
+    ///
+    const ANTLR_INT32		m_decisionNumber;
+
+    /// What this decision represents
+    ///
+    const ANTLR_UCHAR*			m_description;
+	const ANTLR_INT32* const	m_eot;
+    const ANTLR_INT32* const	m_eof;
+    const ANTLR_INT32* const	m_min;
+    const ANTLR_INT32* const	m_max;
+    const ANTLR_INT32* const	m_accept;
+    const ANTLR_INT32* const	m_special;
+    const ANTLR_INT32* const *const	m_transition;
+
+public:
+	CyclicDFA( ANTLR_INT32	decisionNumber
+				, const ANTLR_UCHAR*	description
+				, const ANTLR_INT32* const	eot
+				, const ANTLR_INT32* const	eof
+				, const ANTLR_INT32* const	min
+				, const ANTLR_INT32* const	max
+				, const ANTLR_INT32* const	accept
+				, const ANTLR_INT32* const	special
+				, const ANTLR_INT32* const *const	transition );
+	CyclicDFA( const CyclicDFA& cdfa );
+    CyclicDFA& operator=( const CyclicDFA& dfa);
+	
+	ANTLR_INT32	specialStateTransition(CtxType * ctx, RecognizerType* recognizer, IntStreamType* is, ANTLR_INT32 s);
+    ANTLR_INT32	specialTransition(CtxType * ctx, RecognizerType* recognizer, IntStreamType* is, ANTLR_INT32 s);
+
+	template<typename SuperType>
+    ANTLR_INT32	predict(CtxType* ctx, RecognizerType* recognizer, IntStreamType* is, SuperType& super);
+	
+private:
+	void noViableAlt(RecognizerType* rec, ANTLR_UINT32	s);
+};
+
+ANTLR_END_NAMESPACE()
+
+#ifdef ANTLR3_WINDOWS
+#pragma warning	(pop)
+#endif
+
+#include "antlr3cyclicdfa.inl"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3cyclicdfa.inl b/runtime/Cpp/include/antlr3cyclicdfa.inl
new file mode 100755
index 0000000..b7b526f
--- /dev/null
+++ b/runtime/Cpp/include/antlr3cyclicdfa.inl
@@ -0,0 +1,204 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits, class CtxType>
+CyclicDFA<ImplTraits, CtxType>::CyclicDFA( ANTLR_INT32	decisionNumber
+				, const ANTLR_UCHAR*	description
+				, const ANTLR_INT32* const	eot
+				, const ANTLR_INT32* const	eof
+				, const ANTLR_INT32* const	min
+				, const ANTLR_INT32* const	max
+				, const ANTLR_INT32* const	accept
+				, const ANTLR_INT32* const	special
+				, const ANTLR_INT32* const *const	transition )
+				:m_decisionNumber(decisionNumber)
+				, m_eot(eot)
+				, m_eof(eof)
+				, m_min(min)
+				, m_max(max)
+				, m_accept(accept)
+				, m_special(special)
+				, m_transition(transition)
+{
+	m_description = description;
+}
+
+template<class ImplTraits, class CtxType>
+CyclicDFA<ImplTraits, CtxType>::CyclicDFA( const CyclicDFA& dfa )
+{
+	m_decisionNumber = dfa.m_decisionNumber;
+	m_description = dfa.m_description;
+	m_eot = dfa.m_eot;
+	m_eof = dfa.m_eof;
+	m_min = dfa.m_min;
+	m_max = dfa.m_max;
+	m_accept = dfa.m_accept;
+	m_special = dfa.m_special;
+	m_transition = dfa.m_transition;
+}
+
+template<class ImplTraits, class CtxType>
+CyclicDFA<ImplTraits, CtxType>& CyclicDFA<ImplTraits, CtxType>::operator=( const CyclicDFA& dfa)
+{
+	m_decisionNumber = dfa.m_decisionNumber;
+	m_description = dfa.m_description;
+	m_eot = dfa.m_eot;
+	m_eof = dfa.m_eof;
+	m_min = dfa.m_min;
+	m_max = dfa.m_max;
+	m_accept = dfa.m_accept;
+	m_special = dfa.m_special;
+	m_transition = dfa.m_transition;
+	return *this;
+}
+
+template<class ImplTraits, class CtxType>
+ANTLR_INT32	CyclicDFA<ImplTraits, CtxType>::specialStateTransition(CtxType * ,
+																	RecognizerType* ,
+																	IntStreamType* , ANTLR_INT32 )
+{
+	return -1;
+}
+
+template<class ImplTraits, class CtxType>
+ANTLR_INT32	CyclicDFA<ImplTraits, CtxType>::specialTransition(CtxType * ctx,
+																	RecognizerType* recognizer,
+																	IntStreamType* is, ANTLR_INT32 s)
+{
+	return 0;
+}
+
+template<class ImplTraits, class CtxType>
+  template<typename SuperType>
+ANTLR_INT32	CyclicDFA<ImplTraits, CtxType>::predict(CtxType * ctx,
+															RecognizerType* recognizer,
+															IntStreamType* is, SuperType& super)
+{
+	ANTLR_MARKER	mark;
+    ANTLR_INT32	s;
+    ANTLR_INT32	specialState;
+    ANTLR_INT32	c;
+
+    mark	= is->mark();	    /* Store where we are right now	*/
+    s		= 0;		    /* Always start with state 0	*/
+
+	for (;;)
+	{
+		/* Pick out any special state entry for this state
+		 */
+		specialState	= m_special[s];
+
+		/* Transition the special state and consume an input token
+		 */
+		if  (specialState >= 0)
+		{
+			s = super.specialStateTransition(ctx, recognizer, is, specialState);
+
+			// Error?
+			//
+			if	(s<0)
+			{
+				// If the predicate/rule raised an exception then we leave it
+				// in tact, else we have an NVA.
+				//
+				if	(recognizer->get_state()->get_error() != true)
+				{
+					this->noViableAlt(recognizer, s);
+				}
+				is->rewind(mark);
+				return	0;
+			}
+			is->consume();
+			continue;
+		}
+
+		/* Accept state?
+		 */
+		if  (m_accept[s] >= 1)
+		{
+			is->rewind(mark);
+			return  m_accept[s];
+		}
+
+		/* Look for a normal transition state based upon the input token element
+		 */
+		c = is->_LA(1);
+
+		/* Check against min and max for this state
+		 */
+		if  (c>= m_min[s] && c <= m_max[s])
+		{
+			ANTLR_INT32   snext;
+
+			/* What is the next state?
+			 */
+			snext = m_transition[s][c - m_min[s]];
+
+			if	(snext < 0)
+			{
+				/* Was in range but not a normal transition
+				 * must check EOT, which is like the else clause.
+				 * eot[s]>=0 indicates that an EOT edge goes to another
+				 * state.
+				 */
+				if  ( m_eot[s] >= 0)
+				{
+					s = m_eot[s];
+					is->consume();
+					continue;
+				}
+				this->noViableAlt(recognizer, s);
+				is->rewind(mark);
+				return	0;
+			}
+
+			/* New current state - move to it
+			 */
+			s	= snext;
+			is->consume();
+			continue;
+		}
+		/* EOT Transition?
+		 */
+		if  ( m_eot[s] >= 0)
+		{
+			s	= m_eot[s];
+			is->consume();
+			continue;
+		}
+		/* EOF transition to accept state?
+		 */
+		if  ( c == ImplTraits::CommonTokenType::TOKEN_EOF && m_eof[s] >= 0)
+		{
+			is->rewind(mark);
+			return  m_accept[m_eof[s]];
+		}
+
+		/* No alt, so bomb
+		 */
+		this->noViableAlt(recognizer, s);
+		is->rewind(mark);
+		return 0;
+	}
+}
+
+template<class ImplTraits, class CtxType>
+void CyclicDFA<ImplTraits, CtxType>::noViableAlt(RecognizerType* rec, ANTLR_UINT32 s)
+{
+	// In backtracking mode, we just set the failed flag so that the
+	// alt can just exit right now. If we are parsing though, then
+	// we want the exception to be raised.
+	//
+    if	(rec->get_state()->get_backtracking() > 0)
+    {
+		rec->get_state()->set_failed(true);
+    }
+	else
+	{
+		ANTLR_Exception<ImplTraits, NO_VIABLE_ALT_EXCEPTION, StreamType>* ex 
+			= new ANTLR_Exception<ImplTraits, NO_VIABLE_ALT_EXCEPTION, StreamType>( rec, (const char*)m_description );
+		ex->set_decisionNum( m_decisionNumber );
+		ex->set_state(s);
+	}
+}
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3debugeventlistener.hpp b/runtime/Cpp/include/antlr3debugeventlistener.hpp
new file mode 100755
index 0000000..21fcf59
--- /dev/null
+++ b/runtime/Cpp/include/antlr3debugeventlistener.hpp
@@ -0,0 +1,400 @@
+/**
+ * \file
+ * The definition of all debugging events that a recognizer can trigger.
+ *
+ * \remark
+ *  From the java implementation by Terence Parr...
+ *  I did not create a separate AST debugging interface as it would create
+ *  lots of extra classes and DebugParser has a dbg var defined, which makes
+ *  it hard to change to ASTDebugEventListener.  I looked hard at this issue
+ *  and it is easier to understand as one monolithic event interface for all
+ *  possible events.  Hopefully, adding ST debugging stuff won't be bad.  Leave
+ *  for future. 4/26/2006.
+ */
+
+#ifndef	ANTLR3_DEBUG_EVENT_LISTENER_HPP
+#define	ANTLR3_DEBUG_EVENT_LISTENER_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+/// Default debugging port
+///
+#define DEFAULT_DEBUGGER_PORT		0xBFCC;
+
+/** The ANTLR3 debugging interface for communicating with ANLTR Works. Function comments
+ *  mostly taken from the Java version.
+ */
+
+template<class ImplTraits>
+class DebugEventListener  : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::TreeType TreeType;
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+	typedef typename ImplTraits::TreeAdaptorType TreeAdaptorType;
+
+private:
+	/// The port number which the debug listener should listen on for a connection
+	///
+	ANTLR_UINT32		m_port;
+
+	/// The socket structure we receive after a successful accept on the serverSocket
+	///
+	SOCKET				m_socket;
+
+	/** The version of the debugging protocol supported by the providing
+	 *  instance of the debug event listener.
+	 */
+	int					m_PROTOCOL_VERSION;
+
+	/// The name of the grammar file that we are debugging
+	///
+	StringType			m_grammarFileName;
+
+	/// Indicates whether we have already connected or not
+	///
+	bool			m_initialized;
+
+	/// Used to serialize the values of any particular token we need to
+	/// send back to the debugger.
+	///
+	StringType		m_tokenString;
+
+
+	/// Allows the debug event system to access the adapter in use
+	/// by the recognizer, if this is a tree parser of some sort.
+	///
+	TreeAdaptorType*	m_adaptor;
+
+
+public:
+	/// Wait for a connection from the debugger and initiate the
+	/// debugging session.
+	///
+	virtual bool	handshake();
+
+	/** The parser has just entered a rule.  No decision has been made about
+	 *  which alt is predicted.  This is fired AFTER init actions have been
+	 *  executed.  Attributes are defined and available etc...
+	 */
+	virtual void	enterRule( const char * grammarFileName, const char * ruleName);
+
+	/** Because rules can have lots of alternatives, it is very useful to
+	 *  know which alt you are entering.  This is 1..n for n alts.
+	 */
+	virtual void			enterAlt( int alt);
+
+	/** This is the last thing executed before leaving a rule.  It is
+	 *  executed even if an exception is thrown.  This is triggered after
+	 *  error reporting and recovery have occurred (unless the exception is
+	 *  not caught in this rule).  This implies an "exitAlt" event.
+	 */
+	virtual void			exitRule( const char * grammarFileName, const char * ruleName);
+
+	/** Track entry into any (...) subrule other EBNF construct
+	 */
+	virtual void			enterSubRule( int decisionNumber);
+
+	virtual void			exitSubRule( int decisionNumber);
+
+	/** Every decision, fixed k or arbitrary, has an enter/exit event
+	 *  so that a GUI can easily track what LT/consume events are
+	 *  associated with prediction.  You will see a single enter/exit
+	 *  subrule but multiple enter/exit decision events, one for each
+	 *  loop iteration.
+	 */
+	virtual void			enterDecision( int decisionNumber);
+
+	virtual void			exitDecision( int decisionNumber);
+
+	/** An input token was consumed; matched by any kind of element.
+	 *  Trigger after the token was matched by things like match(), matchAny().
+	 */
+	virtual void			consumeToken( CommonTokenType* t);
+
+	/** An off-channel input token was consumed.
+	 *  Trigger after the token was matched by things like match(), matchAny().
+	 *  (unless of course the hidden token is first stuff in the input stream).
+	 */
+	virtual void			consumeHiddenToken( CommonTokenType* t);
+
+	/** Somebody (anybody) looked ahead.  Note that this actually gets
+	 *  triggered by both LA and LT calls.  The debugger will want to know
+	 *  which Token object was examined.  Like consumeToken, this indicates
+	 *  what token was seen at that depth.  A remote debugger cannot look
+	 *  ahead into a file it doesn't have so LT events must pass the token
+	 *  even if the info is redundant.
+	 */
+	virtual void			LT( int i, CommonTokenType* t);
+
+	/** The parser is going to look arbitrarily ahead; mark this location,
+	 *  the token stream's marker is sent in case you need it.
+	 */
+	virtual void			mark( ANTLR_MARKER marker);
+
+	/** After an arbitrarily long lookahead as with a cyclic DFA (or with
+	 *  any backtrack), this informs the debugger that stream should be
+	 *  rewound to the position associated with marker.
+	 */
+	virtual void			rewind( ANTLR_MARKER marker);
+
+	/** Rewind to the input position of the last marker.
+	 *  Used currently only after a cyclic DFA and just
+	 *  before starting a sem/syn predicate to get the
+	 *  input position back to the start of the decision.
+	 *  Do not "pop" the marker off the state.  mark(i)
+	 *  and rewind(i) should balance still.
+	 */
+	virtual void			rewindLast();
+
+	virtual void			beginBacktrack( int level);
+
+	virtual void			endBacktrack( int level, bool successful);
+
+	/** To watch a parser move through the grammar, the parser needs to
+	 *  inform the debugger what line/charPos it is passing in the grammar.
+	 *  For now, this does not know how to switch from one grammar to the
+	 *  other and back for island grammars etc...
+	 *
+	 *  This should also allow breakpoints because the debugger can stop
+	 *  the parser whenever it hits this line/pos.
+	 */
+	virtual void			location( int line, int pos);
+
+	/** A recognition exception occurred such as NoViableAltException.  I made
+	 *  this a generic event so that I can alter the exception hierarchy later
+	 *  without having to alter all the debug objects.
+	 *
+	 *  Upon error, the stack of enter rule/subrule must be properly unwound.
+	 *  If no viable alt occurs it is within an enter/exit decision, which
+	 *  also must be rewound.  Even the rewind for each mark must be unwound.
+	 *  In the Java target this is pretty easy using try/finally, if a bit
+	 *  ugly in the generated code.  The rewind is generated in DFA.predict()
+	 *  actually so no code needs to be generated for that.  For languages
+	 *  w/o this "finally" feature (C++?), the target implementor will have
+	 *  to build an event stack or something.
+	 *
+	 *  Across a socket for remote debugging, only the RecognitionException
+	 *  data fields are transmitted.  The token object or whatever that
+	 *  caused the problem was the last object referenced by LT.  The
+	 *  immediately preceding LT event should hold the unexpected Token or
+	 *  char.
+	 *
+	 *  Here is a sample event trace for grammar:
+	 *
+	 *  b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
+     *    | D
+     *    ;
+     *
+	 *  The sequence for this rule (with no viable alt in the subrule) for
+	 *  input 'c c' (there are 3 tokens) is:
+	 *
+	 *		commence
+	 *		LT(1)
+	 *		enterRule b
+	 *		location 7 1
+	 *		enter decision 3
+	 *		LT(1)
+	 *		exit decision 3
+	 *		enterAlt1
+	 *		location 7 5
+	 *		LT(1)
+	 *		consumeToken [c/<4>,1:0]
+	 *		location 7 7
+	 *		enterSubRule 2
+	 *		enter decision 2
+	 *		LT(1)
+	 *		LT(1)
+	 *		recognitionException NoViableAltException 2 1 2
+	 *		exit decision 2
+	 *		exitSubRule 2
+	 *		beginResync
+	 *		LT(1)
+	 *		consumeToken [c/<4>,1:1]
+	 *		LT(1)
+	 *		endResync
+	 *		LT(-1)
+	 *		exitRule b
+	 *		terminate
+	 */
+	template<typename ExceptionBaseType>
+	void recognitionException( ExceptionBaseType* ) {}
+
+	/** Indicates the recognizer is about to consume tokens to resynchronize
+	 *  the parser.  Any consume events from here until the recovered event
+	 *  are not part of the parse--they are dead tokens.
+	 */
+	virtual void			beginResync();
+
+	/** Indicates that the recognizer has finished consuming tokens in order
+	 *  to resynchronize.  There may be multiple beginResync/endResync pairs
+	 *  before the recognizer comes out of errorRecovery mode (in which
+	 *  multiple errors are suppressed).  This will be useful
+	 *  in a gui where you want to probably grey out tokens that are consumed
+	 *  but not matched to anything in grammar.  Anything between
+	 *  a beginResync/endResync pair was tossed out by the parser.
+	 */
+	virtual void			endResync();
+
+	/** A semantic predicate was evaluate with this result and action text
+	*/
+	virtual void			semanticPredicate( bool result, const char * predicate);
+
+	/** Announce that parsing has begun.  Not technically useful except for
+	 *  sending events over a socket.  A GUI for example will launch a thread
+	 *  to connect and communicate with a remote parser.  The thread will want
+	 *  to notify the GUI when a connection is made.  ANTLR parsers
+	 *  trigger this upon entry to the first rule (the ruleLevel is used to
+	 *  figure this out).
+	 */
+	virtual void			commence();
+
+	/** Parsing is over; successfully or not.  Mostly useful for telling
+	 *  remote debugging listeners that it's time to quit.  When the rule
+	 *  invocation level goes to zero at the end of a rule, we are done
+	 *  parsing.
+	 */
+	virtual void	terminate();
+
+	/// Retrieve acknowledge response from the debugger. in fact this
+	/// response is never used at the moment. So we just read whatever
+	/// is in the socket buffer and throw it away.
+	///
+	virtual void	ack();
+
+	// T r e e  P a r s i n g
+
+	/** Input for a tree parser is an AST, but we know nothing for sure
+	 *  about a node except its type and text (obtained from the adaptor).
+	 *  This is the analog of the consumeToken method.  The ID is usually
+	 *  the memory address of the node.
+	 *  If the type is UP or DOWN, then
+	 *  the ID is not really meaningful as it's fixed--there is
+	 *  just one UP node and one DOWN navigation node.
+	 *
+	 *  Note that unlike the Java version, the node type of the C parsers
+	 *  is always fixed as pANTLR3_BASE_TREE because all such structures
+	 *  contain a super pointer to their parent, which is generally COMMON_TREE and within
+	 *  that there is a super pointer that can point to a user type that encapsulates it.
+	 *  Almost akin to saying that it is an interface pointer except we don't need to
+	 *  know what the interface is in full, just those bits that are the base.
+	 * @param t
+	 */
+	virtual void	consumeNode( TreeType* t);
+
+	/** The tree parser looked ahead.  If the type is UP or DOWN,
+	 *  then the ID is not really meaningful as it's fixed--there is
+	 *  just one UP node and one DOWN navigation node.
+	 */
+	virtual void	LTT( int i, TreeType* t);
+
+
+	// A S T  E v e n t s
+
+	/** A nil was created (even nil nodes have a unique ID...
+	 *  they are not "null" per se).  As of 4/28/2006, this
+	 *  seems to be uniquely triggered when starting a new subtree
+	 *  such as when entering a subrule in automatic mode and when
+	 *  building a tree in rewrite mode.
+     *
+ 	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID is set.
+	 */
+	virtual void	nilNode( TreeType* t);
+
+	/** If a syntax error occurs, recognizers bracket the error
+	 *  with an error node if they are building ASTs. This event
+	 *  notifies the listener that this is the case
+	 */
+	virtual void	errorNode( TreeType* t);
+
+	/** Announce a new node built from token elements such as type etc...
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID, type, text are
+	 *  set.
+	 */
+	virtual void	createNode( TreeType* t);
+
+	/** Announce a new node built from an existing token.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only node.ID and token.tokenIndex
+	 *  are set.
+	 */
+	virtual void	createNodeTok( TreeType* node, CommonTokenType* token);
+
+	/** Make a node the new root of an existing root.  See
+	 *
+	 *  Note: the newRootID parameter is possibly different
+	 *  than the TreeAdaptor.becomeRoot() newRoot parameter.
+	 *  In our case, it will always be the result of calling
+	 *  TreeAdaptor.becomeRoot() and not root_n or whatever.
+	 *
+	 *  The listener should assume that this event occurs
+	 *  only when the current subrule (or rule) subtree is
+	 *  being reset to newRootID.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only IDs are set.
+	 *
+	 *  @see org.antlr.runtime.tree.TreeAdaptor.becomeRoot()
+	 */
+	virtual void	becomeRoot( TreeType* newRoot, TreeType* oldRoot);
+
+	/** Make childID a child of rootID.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only IDs are set.
+	 *
+	 *  @see org.antlr.runtime.tree.TreeAdaptor.addChild()
+	 */
+	virtual void	addChild( TreeType* root, TreeType* child);
+
+	/** Set the token start/stop token index for a subtree root or node.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID is set.
+	 */
+	virtual void	setTokenBoundaries( TreeType* t, ANTLR_MARKER tokenStartIndex, ANTLR_MARKER tokenStopIndex);
+
+	/// Free up the resources allocated to this structure
+	///
+	virtual ~DebugEventListener();
+};
+
+ANTLR_END_NAMESPACE()
+
+#endif
+
diff --git a/runtime/Cpp/include/antlr3defs.hpp b/runtime/Cpp/include/antlr3defs.hpp
new file mode 100755
index 0000000..b27db5f
--- /dev/null
+++ b/runtime/Cpp/include/antlr3defs.hpp
@@ -0,0 +1,321 @@
+/** \file
+ * Basic type and constant definitions for ANTLR3 Runtime.
+ */
+#ifndef	_ANTLR3DEFS_HPP
+#define	_ANTLR3DEFS_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/* Following are for generated code, they are not referenced internally!!!
+ */
+#if !defined(ANTLR_HUGE) && !defined(ANTLR_AVERAGE) && !defined(ANTLR_SMALL)
+#define	ANTLR_AVERAGE
+#endif
+
+#ifdef	ANTLR_HUGE
+#ifndef	ANTLR_SIZE_HINT
+#define	ANTLR_SIZE_HINT        2049
+#endif
+#ifndef	ANTLR_LIST_SIZE_HINT
+#define	ANTLR_LIST_SIZE_HINT   127
+#endif
+#endif
+
+#ifdef	ANTLR_AVERAGE
+#ifndef	ANTLR_SIZE_HINT
+#define	ANTLR_SIZE_HINT        1025
+#define	ANTLR_LIST_SIZE_HINT   63
+#endif
+#endif
+
+#ifdef	ANTLR_SMALL
+#ifndef	ANTLR_SIZE_HINT
+#define	ANTLR_SIZE_HINT        211
+#define	ANTLR_LIST_SIZE_HINT   31
+#endif
+#endif
+
+// Definitions that indicate the encoding scheme character streams and strings etc
+//
+/// Indicates Big Endian for encodings where this makes sense
+///
+#define ANTLR_BE           1
+
+/// Indicates Little Endian for encoidngs where this makes sense
+///
+#define ANTLR_LE           2
+
+/// General latin-1 or other 8 bit encoding scheme such as straight ASCII
+///
+#define ANTLR_ENC_8BIT     4
+
+/// UTF-8 encoding scheme
+///
+#define ANTLR_ENC_UTF8     8
+
+/// UTF-16 encoding scheme (which also covers UCS2 as that does not have surrogates)
+///
+#define ANTLR_ENC_UTF16        16
+#define ANTLR_ENC_UTF16BE      16 + ANTLR_BE
+#define ANTLR_ENC_UTF16LE      16 + ANTLR_LE
+
+/// UTF-32 encoding scheme (basically straight 32 bit)
+///
+#define ANTLR_ENC_UTF32        32
+#define ANTLR_ENC_UTF32BE      32 + ANTLR_BE
+#define ANTLR_ENC_UTF32LE      32 + ANTLR_LE
+
+/// Input is 8 bit EBCDIC (which we convert to 8 bit ASCII on the fly
+///
+#define ANTLR_ENC_EBCDIC       64
+
+#define ANTLR_BEGIN_NAMESPACE() namespace antlr3 {
+#define ANTLR_END_NAMESPACE() }
+
+#define ANTLR_USE_64BIT
+
+/* Common definitions come first
+ */
+#include    <antlr3errors.hpp>
+
+/* Work out what operating system/compiler this is. We just do this once
+ * here and use an internal symbol after this.
+ */
+#ifdef	_WIN64
+
+# ifndef	ANTLR_WINDOWS
+#   define	ANTLR_WINDOWS
+# endif
+# define	ANTLR_WIN64
+# define	ANTLR_USE_64BIT
+
+#else
+
+#ifdef	_WIN32
+# ifndef	ANTLR_WINDOWS
+#  define	ANTLR_WINDOWS
+# endif
+
+#define	ANTLR_WIN32
+#endif
+
+#endif
+
+#ifdef	ANTLR_WINDOWS 
+
+#ifndef WIN32_LEAN_AND_MEAN
+#define	WIN32_LEAN_AND_MEAN
+#endif
+
+/* Allow VC 8 (vs2005) and above to use 'secure' versions of various functions such as sprintf
+ */
+#ifndef	_CRT_SECURE_NO_DEPRECATE 
+#define	_CRT_SECURE_NO_DEPRECATE 
+#endif
+
+#include    <stdlib.h>
+#include    <winsock2.h>
+#include    <sys/types.h>
+#include    <sys/stat.h>
+#include    <stdarg.h>
+
+#define	ANTLR_API      __declspec(dllexport)
+#define	ANTLR_CDECL    __cdecl
+#define ANTLR_FASTCALL __fastcall
+
+
+#ifndef __MINGW32__
+// Standard Windows types
+//
+typedef	INT32	ANTLR_CHAR;
+typedef	UINT32	ANTLR_UCHAR;
+
+typedef	INT8	ANTLR_INT8;
+typedef	INT16	ANTLR_INT16;
+typedef	INT32	ANTLR_INT32;
+typedef	INT64	ANTLR_INT64;
+typedef	UINT8	ANTLR_UINT8;
+typedef	UINT16	ANTLR_UINT16;
+typedef	UINT32	ANTLR_UINT32;
+typedef	UINT64	ANTLR_UINT64;
+typedef UINT64  ANTLR_BITWORD;
+
+#else
+// Mingw uses stdint.h and fails to define standard Microsoft typedefs
+// such as UINT16, hence we must use stdint.h for Mingw.
+//
+#include <stdint.h>
+typedef int32_t     ANTLR_CHAR;
+typedef uint32_t    ANTLR_UCHAR;
+
+typedef int8_t	    ANTLR_INT8;
+typedef int16_t	    ANTLR_INT16;
+typedef int32_t	    ANTLR_INT32;
+typedef int64_t	    ANTLR_INT64;
+
+typedef uint8_t	    ANTLR_UINT8;
+typedef uint16_t    ANTLR_UINT16;
+typedef uint32_t    ANTLR_UINT32;
+typedef uint64_t    ANTLR_UINT64;
+typedef uint64_t    ANTLR_BITWORD;
+
+#endif
+
+
+
+#define	ANTLR_UINT64_LIT(lit)  lit##ULL
+
+#define	ANTLR_INLINE	        __inline
+
+typedef FILE *	    ANTLR_FDSC;
+typedef	struct stat ANTLR_FSTAT_STRUCT;
+
+
+
+#ifdef	ANTLR_USE_64BIT
+#define ANTLR_UINT64_CAST(ptr) ((ANTLR_UINT64)(ptr))
+#define	ANTLR_UINT32_CAST(ptr)	(ANTLR_UINT32)((ANTLR_UINT64)(ptr))
+typedef ANTLR_INT64		ANTLR_MARKER;			
+typedef ANTLR_UINT64		ANTLR_INTKEY;
+#else
+#define ANTLR_UINT64_CAST(ptr) (ANTLR_UINT64)((ANTLR_UINT32)(ptr))
+#define	ANTLR_UINT32_CAST(ptr)	(ANTLR_UINT32)(ptr)
+typedef	ANTLR_INT32		ANTLR_MARKER;
+typedef ANTLR_UINT32	ANTLR_INTKEY;
+#endif
+
+#ifdef	ANTLR_WIN32
+#endif
+
+#ifdef	ANTLR_WIN64
+#endif
+
+
+typedef	int			ANTLR_SALENT;								// Type used for size of accept structure
+typedef struct sockaddr_in	ANTLR_SOCKADDRT, * pANTLR_SOCKADDRT;	// Type used for socket address declaration
+typedef struct sockaddr		ANTLR_SOCKADDRC, * pANTLR_SOCKADDRC;	// Type used for cast on accept()
+
+#define	ANTLR_CLOSESOCKET	closesocket
+
+/* Warnings that are over-zealous such as complaining about strdup, we
+ * can turn off.
+ */
+
+/* Don't complain about "deprecated" functions such as strdup
+ */
+#pragma warning( disable : 4996 )
+
+#else
+
+#ifdef __LP64__
+#define ANTLR_USE_64BIT
+#endif
+
+#include <stdio.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <unistd.h>
+
+#define _stat stat
+
+typedef int SOCKET;
+
+/* Inherit type definitions for autoconf
+ */
+typedef int32_t	    ANTLR_CHAR;
+typedef uint32_t    ANTLR_UCHAR;
+
+typedef int8_t	    ANTLR_INT8;
+typedef int16_t	    ANTLR_INT16;
+typedef int32_t	    ANTLR_INT32;
+typedef int64_t	    ANTLR_INT64;
+
+typedef uint8_t	    ANTLR_UINT8;
+typedef uint16_t    ANTLR_UINT16;
+typedef uint32_t    ANTLR_UINT32;
+typedef uint64_t    ANTLR_UINT64;
+typedef uint64_t    ANTLR_BITWORD;
+
+#define ANTLR_INLINE   inline
+#define	ANTLR_API
+
+typedef FILE *	    ANTLR_FDSC;
+typedef	struct stat ANTLR_FSTAT_STRUCT;
+
+#ifdef	ANTLR_USE_64BIT
+#define	ANTLR_FUNC_PTR(ptr)    (void *)((ANTLR_UINT64)(ptr))
+#define ANTLR_UINT64_CAST(ptr)	(ANTLR_UINT64)(ptr))
+#define	ANTLR_UINT32_CAST(ptr) (ANTLR_UINT32)((ANTLR_UINT64)(ptr))
+typedef ANTLR_INT64		ANTLR_MARKER;
+typedef ANTLR_UINT64		ANTLR_INTKEY;
+#else
+#define	ANTLR_FUNC_PTR(ptr)	(void *)((ANTLR_UINT32)(ptr))
+#define ANTLR_UINT64_CAST(ptr) (ANTLR_UINT64)((ANTLR_UINT32)(ptr))
+#define	ANTLR_UINT32_CAST(ptr)	(ANTLR_UINT32)(ptr)
+typedef	ANTLR_INT32		ANTLR_MARKER;
+typedef ANTLR_UINT32		ANTLR_INTKEY;
+#endif
+#define	ANTLR_UINT64_LIT(lit)	lit##ULL
+
+#endif
+
+#ifdef ANTLR_USE_64BIT
+#define ANTLR_TRIE_DEPTH 63
+#else
+#define ANTLR_TRIE_DEPTH 31
+#endif
+/* Pre declare the typedefs for all the interfaces, then 
+ * they can be inter-dependant and we will let the linker
+ * sort it out for us.
+ */
+#include    <antlr3interfaces.hpp>
+
+// Include the unicode.org conversion library header.
+//
+#include    <antlr3convertutf.hpp>
+
+enum ChannelType
+{
+	/** Default channel for a token
+	*/
+	TOKEN_DEFAULT_CHANNEL	 = 0
+	/** Reserved channel number for a HIDDEN token - a token that
+		*  is hidden from the parser.
+		*/
+	,	HIDDEN		=		99
+};
+
+#endif	/* _ANTLR3DEFS_H	*/
diff --git a/runtime/Cpp/include/antlr3errors.hpp b/runtime/Cpp/include/antlr3errors.hpp
new file mode 100755
index 0000000..876df4f
--- /dev/null
+++ b/runtime/Cpp/include/antlr3errors.hpp
@@ -0,0 +1,49 @@
+#ifndef	_ANTLR3ERRORS_HPP
+#define	_ANTLR3ERRORS_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#define	ANTLR_SUCCESS	0
+#define	ANTLR_FAIL	1
+
+/** Indicates end of character stream and is an invalid Unicode code point. */
+#define ANTLR_CHARSTREAM_EOF	0xFFFFFFFF
+
+/** Indicates  memoizing on a rule failed.
+ */
+#define	MEMO_RULE_FAILED	0xFFFFFFFE
+#define	MEMO_RULE_UNKNOWN	0xFFFFFFFF
+
+
+#define	ANTLR_ERR_BASE	    0
+#define	ANTLR_ERR_NOMEM    (ANTLR_ERR_BASE + 1)
+#define	ANTLR_ERR_NOFILE   (ANTLR_ERR_BASE + 2)
+#define	ANTLR_ERR_HASHDUP  (ANTLR_ERR_BASE + 3)
+
+#endif	/* _ANTLR3ERRORS_H */
diff --git a/runtime/Cpp/include/antlr3exception.hpp b/runtime/Cpp/include/antlr3exception.hpp
new file mode 100755
index 0000000..beca1fc
--- /dev/null
+++ b/runtime/Cpp/include/antlr3exception.hpp
@@ -0,0 +1,209 @@
+/** \file
+ *  Contains the definition of a basic ANTLR3 exception structure created
+ *  by a recognizer when errors are found/predicted.
+
+ * Two things to be noted for C++ Target:
+   a) This is not the C++ Exception. Consider this just as yet another class. This
+   has to be like this because there is a inbuilt recovery and hence there is a try..catch
+   block for every new token. This is not how C++ Exceptions work.Still there is exception support, as we are handling things like OutofMemory by
+   throwing exceptions
+
+   b) There is no use in implementing templates here, as all the exceptions are grouped in
+   one container and hence needs virtual functions. But this would occur only when there is
+   a exception/ while deleting base recognizer. So shouldn't incur the overhead in normal operation
+ */
+#ifndef	_ANTLR3_EXCEPTION_HPP
+#define	_ANTLR3_EXCEPTION_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+/** Base structure for an ANTLR3 exception tracker
+ */
+
+template<class ImplTraits, class StreamType>
+class ANTLR_ExceptionBase
+{
+public:
+	typedef typename StreamType::UnitType TokenType;
+	typedef typename StreamType::IntStreamType IntStreamType;
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::StringStreamType StringStreamType;
+	typedef typename ImplTraits::BitsetType BitsetType;
+	typedef typename ImplTraits::BitsetListType BitsetListType;
+	typedef typename ImplTraits::template ExceptionBaseType<StreamType> ExceptionBaseType;
+
+protected:
+    /** The printable message that goes with this exception, in your preferred
+     *  encoding format. ANTLR just uses ASCII by default but you can ignore these
+     *  messages or convert them to another format or whatever of course. They are
+     *  really internal messages that you then decide how to print out in a form that
+     *  the users of your product will understand, as they are unlikely to know what
+     *  to do with "Recognition exception at: [[TOK_GERUND..... " ;-)
+     */
+    StringType		m_message;
+
+    /** Name of the file/input source for reporting. Note that this may be empty!!
+     */
+    StringType		m_streamName;
+
+    /** Indicates the index of the 'token' we were looking at when the
+     *  exception occurred.
+     */
+    ANTLR_MARKER	m_index;
+
+    /** Indicates what the current token/tree was when the error occurred. Since not
+     *  all input streams will be able to retrieve the nth token, we track it here
+     *  instead. This is for parsers, and even tree parsers may set this.
+     */
+    const TokenType*		m_token;
+
+    /** Pointer to the next exception in the chain (if any)
+     */
+    ExceptionBaseType*  m_nextException;
+
+    /** Indicates the token we were expecting to see next when the error occurred
+     */
+    ANTLR_UINT32	m_expecting;
+
+    /** Indicates a set of tokens that we were expecting to see one of when the
+     *  error occurred. It is a following bitset list, so you can use load it and use ->toIntList() on it
+     *  to generate an array of integer tokens that it represents.
+     */
+    BitsetListType*	m_expectingSet;
+
+    /** If this is a tree parser exception then the node is set to point to the node
+     * that caused the issue.
+     */
+    TokenType*		m_node;
+
+    /** The current character when an error occurred - for lexers.
+     */
+    ANTLR_UCHAR		m_c;
+
+    /** Track the line at which the error occurred in case this is
+     *  generated from a lexer.  We need to track this since the
+     *  unexpected char doesn't carry the line info.
+     */
+    ANTLR_UINT32   	m_line;
+
+    /** Character position in the line where the error occurred.
+     */
+    ANTLR_INT32   	m_charPositionInLine;
+
+    /** decision number for NVE
+     */
+    ANTLR_UINT32   	m_decisionNum;
+
+    /** State for NVE
+     */
+    ANTLR_UINT32	m_state;
+
+    /** Rule name for failed predicate exception
+     */
+    StringType		m_ruleName;
+
+    /** Pointer to the input stream that this exception occurred in.
+     */
+    IntStreamType* 	m_input;
+
+public:
+	StringType& get_message();
+	StringType& get_streamName();
+	ANTLR_MARKER get_index() const;
+	const TokenType* get_token() const;
+	ExceptionBaseType* get_nextException() const;
+	ANTLR_UINT32 get_expecting() const;
+	BitsetListType* get_expectingSet() const;
+	TokenType* get_node() const;
+	ANTLR_UCHAR get_c() const;
+	ANTLR_UINT32 get_line() const;
+	ANTLR_INT32 get_charPositionInLine() const;
+	ANTLR_UINT32 get_decisionNum() const;
+	ANTLR_UINT32 get_state() const;
+	StringType& get_ruleName();
+	IntStreamType* get_input() const;
+	void  set_message( const StringType& message );
+	void  set_streamName( const StringType& streamName );
+	void  set_index( ANTLR_MARKER index );
+	void  set_token( const TokenType* token );
+	void  set_nextException( ExceptionBaseType* nextException );
+	void  set_expecting( ANTLR_UINT32 expecting );
+	void  set_expectingSet( BitsetListType* expectingSet );
+	void  set_node( TokenType* node );
+	void  set_c( ANTLR_UCHAR c );
+	void  set_line( ANTLR_UINT32 line );
+	void  set_charPositionInLine( ANTLR_INT32 charPositionInLine );
+	void  set_decisionNum( ANTLR_UINT32 decisionNum );
+	void  set_state( ANTLR_UINT32 state );
+	void  set_ruleName( const StringType& ruleName );
+	void  set_input( IntStreamType* input );
+	StringType getDescription() const;
+	
+	virtual StringType getName() const = 0;
+	virtual ANTLR_UINT32 getType() const = 0;
+	virtual void print() const = 0;
+	virtual void displayRecognitionError( ANTLR_UINT8** tokenNames, StringStreamType& str ) const = 0;
+
+    virtual ~ANTLR_ExceptionBase();
+
+protected:
+	ANTLR_ExceptionBase(const StringType& message);
+};
+
+
+template<class ImplTraits, ExceptionType Ex, class StreamType>
+class ANTLR_Exception  :  public ImplTraits::template ExceptionBaseType<StreamType>
+{
+public:
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::StringStreamType StringStreamType;
+	typedef typename ImplTraits::BitsetType BitsetType;
+	typedef typename ImplTraits::template ExceptionBaseType<StreamType> BaseType;
+
+public:
+	template<typename BaseRecognizerType>
+	ANTLR_Exception(BaseRecognizerType* recognizer, const StringType& message);
+
+	const StringType& get_name() const;
+	virtual StringType getName() const;
+	virtual ANTLR_UINT32 getType() const;
+	virtual void print() const;
+	virtual void displayRecognitionError( ANTLR_UINT8** tokenNames, StringStreamType& str_stream) const;
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3exception.inl"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3exception.inl b/runtime/Cpp/include/antlr3exception.inl
new file mode 100755
index 0000000..b73f1aa
--- /dev/null
+++ b/runtime/Cpp/include/antlr3exception.inl
@@ -0,0 +1,379 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits, class StreamType>
+ANTLR_ExceptionBase<ImplTraits, StreamType>::ANTLR_ExceptionBase(const StringType& message)
+	:m_message(message)
+	,m_input(NULL)
+{
+	m_index = 0;
+	m_token	= NULL;
+	m_expecting = 0;
+	m_expectingSet = NULL;
+	m_node = NULL;
+	m_c = 0;
+	m_line = 0;
+	m_charPositionInLine = 0;
+	m_decisionNum = 0;
+	m_state = 0;
+	m_nextException = NULL;
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename ANTLR_ExceptionBase<ImplTraits, StreamType>::StringType& ANTLR_ExceptionBase<ImplTraits, StreamType>::get_message()
+{
+	return m_message;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename ANTLR_ExceptionBase<ImplTraits, StreamType>::StringType& ANTLR_ExceptionBase<ImplTraits, StreamType>::get_streamName()
+{
+	return m_streamName;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_MARKER ANTLR_ExceptionBase<ImplTraits, StreamType>::get_index() const
+{
+	return m_index;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE const typename ANTLR_ExceptionBase<ImplTraits, StreamType>::TokenType* ANTLR_ExceptionBase<ImplTraits, StreamType>::get_token() const
+{
+	return m_token;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename ANTLR_ExceptionBase<ImplTraits, StreamType>::ExceptionBaseType* ANTLR_ExceptionBase<ImplTraits, StreamType>::get_nextException() const
+{
+	return m_nextException;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_UINT32 ANTLR_ExceptionBase<ImplTraits, StreamType>::get_expecting() const
+{
+	return m_expecting;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename ANTLR_ExceptionBase<ImplTraits, StreamType>::BitsetListType* ANTLR_ExceptionBase<ImplTraits, StreamType>::get_expectingSet() const
+{
+	return m_expectingSet;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename ANTLR_ExceptionBase<ImplTraits, StreamType>::TokenType* ANTLR_ExceptionBase<ImplTraits, StreamType>::get_node() const
+{
+	return m_node;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_UCHAR ANTLR_ExceptionBase<ImplTraits, StreamType>::get_c() const
+{
+	return m_c;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_UINT32 ANTLR_ExceptionBase<ImplTraits, StreamType>::get_line() const
+{
+	return m_line;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_INT32 ANTLR_ExceptionBase<ImplTraits, StreamType>::get_charPositionInLine() const
+{
+	return m_charPositionInLine;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_UINT32 ANTLR_ExceptionBase<ImplTraits, StreamType>::get_decisionNum() const
+{
+	return m_decisionNum;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_UINT32 ANTLR_ExceptionBase<ImplTraits, StreamType>::get_state() const
+{
+	return m_state;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename ANTLR_ExceptionBase<ImplTraits, StreamType>::StringType& ANTLR_ExceptionBase<ImplTraits, StreamType>::get_ruleName()
+{
+	return m_ruleName;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename ANTLR_ExceptionBase<ImplTraits, StreamType>::IntStreamType* ANTLR_ExceptionBase<ImplTraits, StreamType>::get_input() const
+{
+	return m_input;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_message( const StringType& message )
+{
+	m_message = message;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_streamName( const StringType& streamName )
+{
+	m_streamName = streamName;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_index( ANTLR_MARKER index )
+{
+	m_index = index;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_token( const TokenType* token )
+{
+	m_token = token;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_nextException( ExceptionBaseType* nextException )
+{
+	m_nextException = nextException;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_expecting( ANTLR_UINT32 expecting )
+{
+	m_expecting = expecting;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_expectingSet( BitsetListType* expectingSet )
+{
+	m_expectingSet = expectingSet;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_node( TokenType* node )
+{
+	m_node = node;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_c( ANTLR_UCHAR c )
+{
+	m_c = c;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_line( ANTLR_UINT32 line )
+{
+	m_line = line;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_charPositionInLine( ANTLR_INT32 charPositionInLine )
+{
+	m_charPositionInLine = charPositionInLine;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_decisionNum( ANTLR_UINT32 decisionNum )
+{
+	m_decisionNum = decisionNum;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_state( ANTLR_UINT32 state )
+{
+	m_state = state;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_ruleName( const StringType& ruleName )
+{
+	m_ruleName = ruleName;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void ANTLR_ExceptionBase<ImplTraits, StreamType>::set_input( IntStreamType* input )
+{
+	m_input = input;
+}
+
+
+template<class ImplTraits, ExceptionType Ex, class StreamType>
+	template<typename BaseRecognizerType>
+ANTLR_Exception<ImplTraits, Ex, StreamType>::ANTLR_Exception(BaseRecognizerType* recognizer, const StringType& message)
+	:BaseType( message )
+{
+	recognizer->get_super()->fillExceptionData( this );
+	BaseType::m_input	= recognizer->get_super()->get_istream();
+	BaseType::m_nextException	= recognizer->get_state()->get_exception();	/* So we don't leak the memory */
+	recognizer->get_state()->set_exception(this);
+	recognizer->get_state()->set_error( true );	    /* Exception is outstanding	*/
+}
+
+template<class ImplTraits, ExceptionType Ex, class StreamType>
+ANTLR_UINT32 ANTLR_Exception<ImplTraits, Ex, StreamType>::getType() const
+{
+	return static_cast<ANTLR_UINT32>(Ex);
+}
+
+template<class ImplTraits, ExceptionType Ex, class StreamType>
+void ANTLR_Exception<ImplTraits, Ex, StreamType>::print() const
+{
+   /* Ensure valid pointer
+     */
+	/* Number if no message, else the message
+	*/
+	if  ( BaseType::m_message.empty() )
+	{
+		fprintf(stderr, "ANTLR3_EXCEPTION number %d (%08X).\n", Ex, Ex);
+	}
+	else
+	{
+		fprintf(stderr, "ANTLR3_EXCEPTION: %s\n", BaseType::m_message.c_str() );
+	}
+}
+
+template<class ImplTraits, ExceptionType Ex, class StreamType>
+typename ANTLR_Exception<ImplTraits, Ex, StreamType>::StringType 
+	ANTLR_Exception<ImplTraits, Ex, StreamType>::getName() const
+{
+	const char* exArray[] = {
+						"org.antlr.runtime.RecognitionException"
+						, "org.antlr.runtime.MismatchedTokenException"
+						, "org.antlr.runtime.NoViableAltException"
+						, "org.antlr.runtime.MismatchedSetException"
+						, "org.antlr.runtime.EarlyExitException"
+						, "org.antlr.runtime.FailedPredicateException"
+						, "org.antlr.runtime.MismatchedTreeNodeException"
+						, "org.antlr.runtime.tree.RewriteEarlyExitException"
+						, "org.antlr.runtime.UnwantedTokenException"
+						, "org.antlr.runtime.MissingTokenException"
+					  };
+	return StringType(exArray[Ex]);
+}
+
+template<class ImplTraits, ExceptionType Ex, class StreamType>
+void ANTLR_Exception<ImplTraits, Ex, StreamType>::displayRecognitionError( ANTLR_UINT8** tokenNames, 
+																			StringStreamType& str_stream ) const
+{
+	switch( Ex )
+	{
+	case RECOGNITION_EXCEPTION:
+		// Indicates that the recognizer received a token
+		// in the input that was not predicted. This is the basic exception type 
+		// from which all others are derived. So we assume it was a syntax error.
+		// You may get this if there are not more tokens and more are needed
+		// to complete a parse for instance.
+		//
+		str_stream << " : syntax error...\n"; 
+		break;
+	case UNWANTED_TOKEN_EXCEPTION:
+		// Indicates that the recognizer was fed a token which seesm to be
+		// spurious input. We can detect this when the token that follows
+		// this unwanted token would normally be part of the syntactically
+		// correct stream. Then we can see that the token we are looking at
+		// is just something that should not be there and throw this exception.
+		//
+		if	(tokenNames == NULL)
+		{
+			str_stream << " : Extraneous input...";
+		}
+		else
+		{
+			if	( BaseType::m_expecting == ImplTraits::CommonTokenType::TOKEN_EOF)
+			{
+				str_stream << " : Extraneous input - expected <EOF>\n";
+			}
+			else
+			{
+				str_stream << " : Extraneous input - expected "
+						   << tokenNames[ BaseType::m_expecting] << " ...\n";
+			}
+		}
+		break;
+	case MISSING_TOKEN_EXCEPTION:
+		// Indicates that the recognizer detected that the token we just
+		// hit would be valid syntactically if preceeded by a particular 
+		// token. Perhaps a missing ';' at line end or a missing ',' in an
+		// expression list, and such like.
+		//
+		if	(tokenNames == NULL)
+		{
+			str_stream << " : Missing token ("
+					   << BaseType::m_expecting << ")...\n";
+		}
+		else
+		{
+			if	( BaseType::m_expecting == ImplTraits::CommonTokenType::TOKEN_EOF )
+			{
+				str_stream <<" : Missing <EOF>\n";
+			}
+			else
+			{
+				str_stream << " : Missing " << tokenNames[BaseType::m_expecting] <<" \n";
+			}
+		}
+		break;
+	case NO_VIABLE_ALT_EXCEPTION:
+		// We could not pick any alt decision from the input given
+		// so god knows what happened - however when you examine your grammar,
+		// you should. It means that at the point where the current token occurred
+		// that the DFA indicates nowhere to go from here.
+		//
+		str_stream << " : cannot match to any predicted input...\n";
+		break;
+	case MISMATCHED_SET_EXCEPTION:
+		{
+			ANTLR_UINT32	  count;
+			ANTLR_UINT32	  bit;
+			ANTLR_UINT32	  size;
+			ANTLR_UINT32	  numbits;
+			BitsetType*	  errBits;
+
+			// This means we were able to deal with one of a set of
+			// possible tokens at this point, but we did not see any
+			// member of that set.
+			//
+			str_stream << " : unexpected input...\n  expected one of : ";
+
+			// What tokens could we have accepted at this point in the
+			// parse?
+			//
+			count   = 0;
+			errBits = BaseType::m_expectingSet->bitsetLoad();
+			numbits = errBits->numBits();
+			size    = errBits->size();
+
+			if  (size > 0)
+			{
+				// However many tokens we could have dealt with here, it is usually
+				// not useful to print ALL of the set here. I arbitrarily chose 8
+				// here, but you should do whatever makes sense for you of course.
+				// No token number 0, so look for bit 1 and on.
+				//
+				for	(bit = 1; bit < numbits && count < 8 && count < size; bit++)
+				{
+					// TODO: This doesn;t look right - should be asking if the bit is set!!
+					//
+					if  (tokenNames[bit])
+					{
+						str_stream <<  ( count > 0 ? ", " : "" )
+								   <<  tokenNames[bit]; 
+						count++;
+					}
+				}
+				str_stream << "\n";
+			}
+			else
+			{
+				str_stream << "Actually dude, we didn't seem to be expecting anything here, or at least\n";
+				str_stream << "I could not work out what I was expecting, like so many of us these days!\n";
+			}
+		}
+		break;
+	case EARLY_EXIT_EXCEPTION:
+		str_stream << " : missing elements...\n";
+		break;
+	default:
+		str_stream << " : syntax not recognized...\n"; 
+		break;
+	}
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_ExceptionBase<ImplTraits,StreamType>::~ANTLR_ExceptionBase()
+{
+	ANTLR_ExceptionBase<ImplTraits,StreamType>* next;
+	ANTLR_ExceptionBase<ImplTraits,StreamType>* ex = m_nextException;
+
+    /* Ensure valid pointer
+     */
+    while   (ex != NULL)
+    {
+		/* Pick up anythign following now, before we free the
+		 * current memory block.
+		 */
+		next	= ex->m_nextException;
+		ex->m_nextException = NULL;
+
+		/* Free the actual structure itself
+		 */
+		delete ex;
+
+		ex = next;
+    }
+}
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3filestream.hpp b/runtime/Cpp/include/antlr3filestream.hpp
new file mode 100755
index 0000000..9a46d35
--- /dev/null
+++ b/runtime/Cpp/include/antlr3filestream.hpp
@@ -0,0 +1,75 @@
+#ifndef	_ANTLR3_FILESTREAM_HPP
+#define	_ANTLR3_FILESTREAM_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include	<exception>
+#include	<string>
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+class FileUtils
+{
+public:
+	/** \brief Open an operating system file and return the descriptor
+	 * We just use the common open() and related functions here. 
+	 * Later we might find better ways on systems
+	 * such as Windows and OpenVMS for instance. But the idea is to read the 
+	 * while file at once anyway, so it may be irrelevant.
+	 */
+	static ANTLR_FDSC	AntlrFopen(const ANTLR_UINT8* filename, const char * mode);
+
+	/** \brief Close an operating system file and free any handles
+	 *  etc.
+	 */
+	static void		AntlrFclose	(ANTLR_FDSC fd);
+
+	static ANTLR_UINT32	AntlrFsize(const ANTLR_UINT8* filename);
+	template<typename InputStreamType>
+	static ANTLR_UINT32	AntlrRead8Bit(InputStreamType* input, const ANTLR_UINT8* fileName);
+	static ANTLR_UINT32	AntlrFread(ANTLR_FDSC fdsc, ANTLR_UINT32 count,  void* data);
+
+};
+
+class ParseFileAbsentException : public std::exception
+{
+	virtual const char* what() const throw()
+	{
+		return " Parse File not Present";
+	}
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3filestream.inl"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3filestream.inl b/runtime/Cpp/include/antlr3filestream.inl
new file mode 100755
index 0000000..59b4f06
--- /dev/null
+++ b/runtime/Cpp/include/antlr3filestream.inl
@@ -0,0 +1,74 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+ANTLR_FDSC	FileUtils<ImplTraits>::AntlrFopen(const ANTLR_UINT8* filename, const char * mode)
+{
+	return  (ANTLR_FDSC)fopen((const char *)filename, mode);
+}
+
+template<class ImplTraits>
+void	FileUtils<ImplTraits>::AntlrFclose	(ANTLR_FDSC fd)
+{
+	fclose(fd);
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	FileUtils<ImplTraits>::AntlrFsize(const ANTLR_UINT8* filename)
+{
+    struct _stat	statbuf;
+
+    _stat((const char *)filename, &statbuf);
+
+    return (ANTLR_UINT32)statbuf.st_size;
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	FileUtils<ImplTraits>::AntlrFread(ANTLR_FDSC fdsc, ANTLR_UINT32 count,  void* data)
+{
+	return  (ANTLR_UINT32)fread(data, (size_t)count, 1, fdsc);
+}
+
+template<class ImplTraits>
+	template<typename InputStreamType>
+ANTLR_UINT32	FileUtils<ImplTraits>::AntlrRead8Bit(InputStreamType* input, const ANTLR_UINT8* fileName)
+{
+	ANTLR_FDSC	    infile;
+	ANTLR_UINT32	    fSize;
+
+	/* Open the OS file in read binary mode
+	*/
+	infile  = FileUtils<ImplTraits>::AntlrFopen(fileName, "rb");
+
+	/* Check that it was there
+	*/
+	if	(infile == NULL)
+	{
+		ParseFileAbsentException ex;
+		throw ex;
+	}
+
+	/* It was there, so we can read the bytes now
+	*/
+	fSize   = FileUtils<ImplTraits>::AntlrFsize(fileName);	/* Size of input file	*/
+
+	/* Allocate buffer for this input set   
+	*/
+	void* data = ImplTraits::AllocPolicyType::alloc(fSize);
+	/* Now we read the file. Characters are not converted to
+	* the internal ANTLR encoding until they are read from the buffer
+	*/
+	FileUtils<ImplTraits>::AntlrFread(infile, fSize, data );
+
+	input->set_data( (unsigned char*) data );
+	input->set_sizeBuf( fSize );
+
+	input->set_isAllocated(true);
+
+	/* And close the file handle
+	*/
+	FileUtils<ImplTraits>::AntlrFclose(infile);
+
+	return  ANTLR_SUCCESS;
+}
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3input.hpp b/runtime/Cpp/include/antlr3input.hpp
new file mode 100755
index 0000000..175a5da
--- /dev/null
+++ b/runtime/Cpp/include/antlr3input.hpp
@@ -0,0 +1,327 @@
+/** \file
+ * Defines the basic structures used to manipulate character
+ * streams from any input source. Any character size and encoding
+ * can in theory be used, so long as a set of functinos is provided that
+ * can return a 32 bit Integer representation of their characters amd efficiently mark and revert
+ * to specific offsets into their input streams.
+ */
+#ifndef	_ANTLR_INPUT_HPP
+#define	_ANTLR_INPUT_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+/// Master context structure for an ANTLR3 C runtime based input stream.
+/// \ingroup apistructures. Calling _LT on this doesn't seem right. You would
+/// call it only with parser / TreeParser, and their respective input streams 
+/// has that function. calling it from lexer will throw a compile time error
+///
+
+template<class ImplTraits>
+class	InputStream :   public ImplTraits::template IntStreamType< typename ImplTraits::InputStreamType >
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename ImplTraits::LexStateType LexStateType;
+	typedef typename ImplTraits::template IntStreamType< typename ImplTraits::InputStreamType > IntStreamType;
+	typedef IntStreamType BaseType;
+	typedef typename ImplTraits::StreamDataType UnitType;
+	typedef UnitType DataType;
+	typedef UnitType TokenType;
+	typedef typename AllocPolicyType::template VectorType<LexStateType> MarkersType;
+	typedef typename ImplTraits::StringType StringType;
+
+private:
+    /** Pointer the start of the input string, characters may be
+     *  taken as offsets from here and in original input format encoding.
+     */
+    const DataType*		m_data;
+
+    /** Pointer to the next character to be consumed from the input data
+     *  This is cast to point at the encoding of the original file that
+     *  was read by the functions installed as pointer in this input stream
+     *  context instance at file/string/whatever load time.
+     */
+    const DataType*		m_nextChar;
+
+    /** Number of characters that can be consumed at this point in time.
+     *  Mostly this is just what is left in the pre-read buffer, but if the
+     *  input source is a stream such as a socket or something then we may
+     *  call special read code to wait for more input.
+     */
+    ANTLR_UINT32	m_sizeBuf;
+
+    /** The line number we are traversing in the input file. This gets incremented
+     *  by a newline() call in the lexer grammar actions.
+     */
+    ANTLR_UINT32	m_line;
+
+    /** Pointer into the input buffer where the current line
+     *  started.
+     */
+    const DataType*		m_currentLine;
+
+    /** The offset within the current line of the current character
+     */
+    ANTLR_INT32		m_charPositionInLine;
+
+    /** Tracks how deep mark() calls are nested
+     */
+    ANTLR_UINT32	m_markDepth;
+
+    /** List of mark() points in the input stream
+     */
+    MarkersType		m_markers;
+
+    /** File name string, set to pointer to memory if
+     * you set it manually as it will be free()d
+     */
+    StringType		m_fileName;
+
+    /** File number, needs to be set manually to some file index of your devising.
+     */
+    ANTLR_UINT32	m_fileNo;
+
+	/// Character that automatically causes an internal line count
+    ///  increment.
+    ///
+    ANTLR_UCHAR		m_newlineChar;
+
+    /// Indicates the size, in 8 bit units, of a single character. Note that
+    /// the C runtime does not deal with surrogates as this would be
+    /// slow and complicated. If this is a UTF-8 stream then this field
+    /// will be set to 0. Generally you are best working internally with 32 bit characters
+    /// as this is the most efficient.
+    ///
+    ANTLR_UINT8		m_charByteSize;
+
+   /** Indicates if the data pointer was allocated by us, and so should be freed
+     *  when the stream dies.
+     */
+    bool			m_isAllocated;
+
+    /// Indicates the encoding scheme used in this input stream
+    ///
+    ANTLR_UINT32    m_encoding;
+
+    /* API */
+public:
+	InputStream(const ANTLR_UINT8* fileName, ANTLR_UINT32 encoding);
+	InputStream(const ANTLR_UINT8* data, ANTLR_UINT32 encoding, ANTLR_UINT32 size, ANTLR_UINT8* name);
+	~InputStream();
+	const DataType* get_data() const;
+	bool get_isAllocated() const;
+	const DataType* get_nextChar() const;
+	ANTLR_UINT32 get_sizeBuf() const;
+	ANTLR_UINT32 get_line() const;
+	const DataType* get_currentLine() const;
+	ANTLR_INT32 get_charPositionInLine() const;
+	ANTLR_UINT32 get_markDepth() const;
+	MarkersType& get_markers();
+	const StringType& get_fileName() const;
+	ANTLR_UINT32 get_fileNo() const;
+	ANTLR_UCHAR get_newlineChar() const;
+	ANTLR_UINT8 get_charByteSize() const;
+	ANTLR_UINT32 get_encoding() const;
+
+	void  set_data( DataType* data );
+	void  set_isAllocated( bool isAllocated );
+	void  set_nextChar( const DataType* nextChar );
+	void  set_sizeBuf( ANTLR_UINT32 sizeBuf );
+	void  set_line( ANTLR_UINT32 line );
+	void  set_currentLine( const DataType* currentLine );
+	void  set_charPositionInLine( ANTLR_INT32 charPositionInLine );
+	void  set_markDepth( ANTLR_UINT32 markDepth );
+	void  set_markers( const MarkersType& markers );
+	void  set_fileName( const StringType& fileName );
+	void  set_fileNo( ANTLR_UINT32 fileNo );
+	void  set_newlineChar( ANTLR_UCHAR newlineChar );
+	void  set_charByteSize( ANTLR_UINT8 charByteSize );
+	void  set_encoding( ANTLR_UINT32 encoding );
+
+	void inc_charPositionInLine();
+	void inc_line();	
+	void inc_markDepth();
+
+	IntStreamType*	get_istream();
+
+    /** Function that resets the input stream
+     */
+    void	reset();
+
+    /** Pointer to a function that reuses and resets an input stream by
+     *  supplying a new 'source'
+     */
+    void    reuse(ANTLR_UINT8* inString, ANTLR_UINT32 size, ANTLR_UINT8* name);
+
+	
+    /** Function to return the total size of the input buffer. For streams
+     *  this may be just the total we have available so far. This means of course that
+     *  the input stream must be careful to accumulate enough input so that any backtracking
+     *  can be satisfied.
+     */
+    ANTLR_UINT32	size();
+
+    /** Function to return a substring of the input stream. String is returned in allocated
+     *  memory and is in same encoding as the input stream itself, NOT internal ANTLR_UCHAR form.
+     */
+    StringType	substr(ANTLR_MARKER start, ANTLR_MARKER stop);
+
+    /** Function to return the current line number in the input stream
+     */
+    ANTLR_UINT32	get_line();
+
+    /** Function to return the current line buffer in the input stream
+     *  The pointer returned is directly into the input stream so you must copy
+     *  it if you wish to manipulate it without damaging the input stream. Encoding
+     *  is obviously in the same form as the input stream.
+     *  \remark
+     *    - Note taht this function wil lbe inaccurate if setLine is called as there
+     *      is no way at the moment to position the input stream at a particular line 
+     *	    number offset.
+     */
+    const DataType*	getLineBuf();
+
+    /** Function to return the current offset in the current input stream line
+     */
+    ANTLR_UINT32	get_charPositionInLine();
+
+    /** Function to set the current position in the current line.
+     */
+    void	set_charPositionInLine(ANTLR_UINT32 position);
+
+    /** Function to override the default newline character that the input stream
+     *  looks for to trigger the line/offset and line buffer recording information.
+     *  \remark
+     *   - By default the chracter '\n' will be installed as the newline trigger character. When this
+     *     character is seen by the consume() function then the current line number is incremented and the
+     *     current line offset is reset to 0. The Pointer for the line of input we are consuming
+     *     is updated to point to the next character after this one in the input stream (which means it
+     *     may become invalid if the last newline character in the file is seen (so watch out).
+     *   - If for some reason you do not want the counters and pointers to be restee, you can set the 
+     *     chracter to some impossible character such as '\0' or whatever.
+     *   - This is a single character only, so choose the last character in a sequence of two or more.
+     *   - This is only a simple aid to error reporting - if you have a complicated binary input structure
+     *     it may not be adequate, but you can always override every function in the input stream with your
+     *     own of course, and can even write your own complete input stream set if you like.
+     *   - It is your responsiblity to set a valid character for the input stream type. There is no point 
+     *     setting this to 0xFFFFFFFF if the input stream is 8 bit ASCII, as this will just be truncated and never
+     *	   trigger as the comparison will be (INT32)0xFF == (INT32)0xFFFFFFFF
+     */
+    void	set_newLineChar(ANTLR_UINT32 newlineChar);
+	
+	ANTLR_MARKER index_impl();
+
+private:
+	/** \brief Use the contents of an operating system file as the input
+	 *         for an input stream.
+	 *
+	 * \param fileName Name of operating system file to read.
+	 * \return
+	 *	- Pointer to new input stream context upon success
+	 *	- One of the ANTLR3_ERR_ defines on error.
+	 */
+	void createFileStream(const ANTLR_UINT8* fileName);
+
+	/** \brief Use the supplied 'string' as input to the stream
+	 *
+	 * \param data Pointer to the input data
+	 * \return
+	 *	- Pointer to new input stream context upon success
+	 *	- NULL defines on error.
+	 */
+	void createStringStream(const ANTLR_UINT8* data);
+	void genericSetupStream();
+
+	/// Determine endianess of the input stream and install the
+	/// API required for the encoding in that format.
+	///
+	void setupInputStream();
+
+};
+
+/** \brief Structure for track lex input states as part of mark()
+ *  and rewind() of lexer.
+ */
+template<class ImplTraits>
+class	LexState : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::StreamDataType DataType;
+
+private:
+        /** Pointer to the next character to be consumed from the input data
+     *  This is cast to point at the encoding of the original file that
+     *  was read by the functions installed as pointer in this input stream
+     *  context instance at file/string/whatever load time.
+     */
+    const DataType*			m_nextChar;
+
+    /** The line number we are traversing in the input file. This gets incremented
+     *  by a newline() call in the lexer grammer actions.
+     */
+    ANTLR_UINT32	m_line;
+
+    /** Pointer into the input buffer where the current line
+     *  started.
+     */
+    const DataType*			m_currentLine;
+
+    /** The offset within the current line of the current character
+     */
+    ANTLR_INT32		m_charPositionInLine;
+
+public:
+	LexState();
+	const DataType* get_nextChar() const;
+	ANTLR_UINT32 get_line() const;
+	const DataType* get_currentLine() const;
+	ANTLR_INT32 get_charPositionInLine() const;
+	void  set_nextChar( const DataType* nextChar );
+	void  set_line( ANTLR_UINT32 line );
+	void  set_currentLine( const DataType* currentLine );
+	void  set_charPositionInLine( ANTLR_INT32 charPositionInLine );
+};
+
+class ParseNullStringException : public std::exception
+{
+	virtual const char* what() const throw()
+	{
+		return "Null String";
+	}
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3input.inl"
+
+#endif	/* _ANTLR_INPUT_H  */
diff --git a/runtime/Cpp/include/antlr3input.inl b/runtime/Cpp/include/antlr3input.inl
new file mode 100755
index 0000000..f3362cd
--- /dev/null
+++ b/runtime/Cpp/include/antlr3input.inl
@@ -0,0 +1,619 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+InputStream<ImplTraits>::InputStream(const ANTLR_UINT8* fileName, ANTLR_UINT32 encoding)
+{
+    // First order of business is to read the file into some buffer space
+    // as just straight 8 bit bytes. Then we will work out the encoding and
+    // byte order and adjust the API functions that are installed for the
+    // default 8Bit stream accordingly.
+    //
+    this->createFileStream(fileName);
+
+    // We have the data in memory now so we can deal with it according to 
+    // the encoding scheme we were given by the user.
+    //
+    m_encoding = encoding;
+
+    // Now we need to work out the endian type and install any 
+    // API functions that differ from 8Bit
+    //
+    this->setupInputStream();
+
+    // Now we can set up the file name
+    //	
+    BaseType::m_streamName	= (const char* )fileName;
+    m_fileName		= BaseType::m_streamName;
+}
+
+template<class ImplTraits>
+InputStream<ImplTraits>::InputStream(const ANTLR_UINT8* data, ANTLR_UINT32 encoding, ANTLR_UINT32 size, ANTLR_UINT8* name)
+{
+	// First order of business is to set up the stream and install the data pointer.
+    // Then we will work out the encoding and byte order and adjust the API functions that are installed for the
+    // default 8Bit stream accordingly.
+    //
+    this->createStringStream(data);
+    
+    // Size (in bytes) of the given 'string'
+    //
+    m_sizeBuf		= size;
+
+    // We have the data in memory now so we can deal with it according to 
+    // the encoding scheme we were given by the user.
+    //
+    m_encoding = encoding;
+
+    // Now we need to work out the endian type and install any 
+    // API functions that differ from 8Bit
+    //
+    this->setupInputStream();
+
+    // Now we can set up the file name
+    //	
+    BaseType::m_streamName	= (name == NULL ) ? "" : (const char*)name;
+    m_fileName		= BaseType::m_streamName;
+
+}
+
+template<class ImplTraits>
+void InputStream<ImplTraits>::createStringStream(const ANTLR_UINT8* data)
+{
+	if	(data == NULL)
+	{
+		ParseNullStringException ex;
+		throw ex;
+	}
+
+	// Structure was allocated correctly, now we can install the pointer
+	//
+    m_data             = data;
+    m_isAllocated	   = false;
+
+	// Call the common 8 bit input stream handler
+	// initialization.
+	//
+	this->genericSetupStream();
+}
+
+template<class ImplTraits>
+void InputStream<ImplTraits>::createFileStream(const ANTLR_UINT8* fileName)
+{
+	if	(fileName == NULL)
+	{
+		ParseFileAbsentException ex;
+		throw ex;
+	}
+
+	// Structure was allocated correctly, now we can read the file.
+	//
+	FileUtils<ImplTraits>::AntlrRead8Bit(this, fileName);
+
+	// Call the common 8 bit input stream handler
+	// initialization.
+	//
+	this->genericSetupStream();
+}
+
+template<class ImplTraits>
+void InputStream<ImplTraits>::genericSetupStream()
+{
+	this->set_charByteSize(1);
+	
+    /* Set up the input stream brand new
+     */
+    this->reset();
+    
+    /* Install default line separator character (it can be replaced
+     * by the grammar programmer later)
+     */
+    this->set_newLineChar((ANTLR_UCHAR)'\n');
+}
+
+template<class ImplTraits>
+InputStream<ImplTraits>::~InputStream()
+{
+	// Free the input stream buffer if we allocated it
+    //
+    if	(m_isAllocated && (m_data != NULL))
+		AllocPolicyType::free((void*)m_data); //const_cast is required
+}
+
+template<class ImplTraits>
+ANTLR_INLINE const typename InputStream<ImplTraits>::DataType* InputStream<ImplTraits>::get_data() const
+{
+	return m_data;
+}
+template<class ImplTraits>
+ANTLR_INLINE bool InputStream<ImplTraits>::get_isAllocated() const
+{
+	return m_isAllocated;
+}
+template<class ImplTraits>
+ANTLR_INLINE const typename InputStream<ImplTraits>::DataType* InputStream<ImplTraits>::get_nextChar() const
+{
+	return m_nextChar;
+}
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32 InputStream<ImplTraits>::get_sizeBuf() const
+{
+	return m_sizeBuf;
+}
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32 InputStream<ImplTraits>::get_line() const
+{
+	return m_line;
+}
+template<class ImplTraits>
+ANTLR_INLINE const typename InputStream<ImplTraits>::DataType* InputStream<ImplTraits>::get_currentLine() const
+{
+	return m_currentLine;
+}
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_INT32 InputStream<ImplTraits>::get_charPositionInLine() const
+{
+	return m_charPositionInLine;
+}
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32 InputStream<ImplTraits>::get_markDepth() const
+{
+	return m_markDepth;
+}
+template<class ImplTraits>
+ANTLR_INLINE typename InputStream<ImplTraits>::MarkersType& InputStream<ImplTraits>::get_markers()
+{
+	return m_markers;
+}
+template<class ImplTraits>
+ANTLR_INLINE const typename InputStream<ImplTraits>::StringType& InputStream<ImplTraits>::get_fileName() const
+{
+	return m_fileName;
+}
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32 InputStream<ImplTraits>::get_fileNo() const
+{
+	return m_fileNo;
+}
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UCHAR InputStream<ImplTraits>::get_newlineChar() const
+{
+	return m_newlineChar;
+}
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT8 InputStream<ImplTraits>::get_charByteSize() const
+{
+	return m_charByteSize;
+}
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32 InputStream<ImplTraits>::get_encoding() const
+{
+	return m_encoding;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_data( DataType* data )
+{
+	m_data = data;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_isAllocated( bool isAllocated )
+{
+	m_isAllocated = isAllocated;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_nextChar( const DataType* nextChar )
+{
+	m_nextChar = nextChar;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_sizeBuf( ANTLR_UINT32 sizeBuf )
+{
+	m_sizeBuf = sizeBuf;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_line( ANTLR_UINT32 line )
+{
+	m_line = line;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_currentLine( const DataType* currentLine )
+{
+	m_currentLine = currentLine;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_charPositionInLine( ANTLR_INT32 charPositionInLine )
+{
+	m_charPositionInLine = charPositionInLine;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_markDepth( ANTLR_UINT32 markDepth )
+{
+	m_markDepth = markDepth;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_markers( const MarkersType& markers )
+{
+	m_markers = markers;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_fileName( const StringType& fileName )
+{
+	m_fileName = fileName;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_fileNo( ANTLR_UINT32 fileNo )
+{
+	m_fileNo = fileNo;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_newlineChar( ANTLR_UCHAR newlineChar )
+{
+	m_newlineChar = newlineChar;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_charByteSize( ANTLR_UINT8 charByteSize )
+{
+	m_charByteSize = charByteSize;
+}
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::set_encoding( ANTLR_UINT32 encoding )
+{
+	m_encoding = encoding;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::inc_charPositionInLine()
+{
+	++m_charPositionInLine;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::inc_line()
+{
+	++m_line;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void InputStream<ImplTraits>::inc_markDepth()
+{
+	++m_markDepth;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void	InputStream<ImplTraits>::reset()
+{
+	m_nextChar		= m_data;	/* Input at first character */
+    m_line			= 1;		/* starts at line 1	    */
+    m_charPositionInLine	= 0;
+    m_currentLine		= m_data;
+    m_markDepth		= 0;		/* Reset markers	    */
+    
+    /* Clear out up the markers table if it is there
+     */
+	m_markers.clear();
+}
+
+template<class ImplTraits>
+void    InputStream<ImplTraits>::reuse(ANTLR_UINT8* inString, ANTLR_UINT32 size, ANTLR_UINT8* name)
+{
+	m_isAllocated	= false;
+    m_data		= inString;
+    m_sizeBuf	= size;
+    
+    // Now we can set up the file name. As we are reusing the stream, there may already
+    // be a string that we can reuse for holding the filename.
+    //
+	if	( BaseType::m_streamName.empty() ) 
+	{
+		BaseType::m_streamName	= ((name == NULL) ? "-memory-" : (const char *)name);
+		m_fileName		= BaseType::m_streamName;
+	}
+	else
+	{
+		BaseType::m_streamName = ((name == NULL) ? "-memory-" : (const char *)name);
+	}
+
+    this->reset();
+}
+
+/*
+template<class ImplTraits>
+typename InputStream<ImplTraits>::DataType*	InputStream<ImplTraits>::_LT(ANTLR_INT32 lt)
+{
+	return this->_LA(lt);
+}
+*/
+
+template<class ImplTraits>
+ANTLR_UINT32	InputStream<ImplTraits>::size()
+{
+	return m_sizeBuf;
+}
+
+template<class ImplTraits>
+ANTLR_MARKER	InputStream<ImplTraits>::index_impl()
+{
+	return (ANTLR_MARKER)m_nextChar;
+}
+
+
+template<class ImplTraits>
+typename InputStream<ImplTraits>::StringType	InputStream<ImplTraits>::substr(ANTLR_MARKER start, ANTLR_MARKER stop)
+{
+	std::size_t len = static_cast<std::size_t>( (stop-start)/sizeof(DataType) + 1 );
+	StringType str( (const char*)start, len );
+	return str;
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	InputStream<ImplTraits>::get_line()
+{
+	return m_line;
+}
+
+template<class ImplTraits>
+const typename InputStream<ImplTraits>::DataType*	InputStream<ImplTraits>::getLineBuf()
+{
+	return m_currentLine;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32	InputStream<ImplTraits>::get_charPositionInLine()
+{
+	return m_charPositionInLine;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void	InputStream<ImplTraits>::set_charPositionInLine(ANTLR_UINT32 position)
+{
+	m_charPositionInLine = position;
+}
+
+template<class ImplTraits>
+void	InputStream<ImplTraits>::set_newLineChar(ANTLR_UINT32 newlineChar)
+{
+	m_newlineChar = newlineChar;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE LexState<ImplTraits>::LexState()
+{
+	m_nextChar = NULL;
+	m_line = 0;
+	m_currentLine = NULL;
+	m_charPositionInLine = 0;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE const typename LexState<ImplTraits>::DataType* LexState<ImplTraits>::get_nextChar() const
+{
+	return m_nextChar;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32 LexState<ImplTraits>::get_line() const
+{
+	return m_line;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE const typename LexState<ImplTraits>::DataType* LexState<ImplTraits>::get_currentLine() const
+{
+	return m_currentLine;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_INT32 LexState<ImplTraits>::get_charPositionInLine() const
+{
+	return m_charPositionInLine;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void LexState<ImplTraits>::set_nextChar( const DataType* nextChar )
+{
+	m_nextChar = nextChar;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void LexState<ImplTraits>::set_line( ANTLR_UINT32 line )
+{
+	m_line = line;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void LexState<ImplTraits>::set_currentLine( const DataType* currentLine )
+{
+	m_currentLine = currentLine;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void LexState<ImplTraits>::set_charPositionInLine( ANTLR_INT32 charPositionInLine )
+{
+	m_charPositionInLine = charPositionInLine;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE typename InputStream<ImplTraits>::IntStreamType*	InputStream<ImplTraits>::get_istream()
+{
+	return this;
+}
+
+template<class ImplTraits>
+void InputStream<ImplTraits>::setupInputStream()
+{
+	bool  isBigEndian;
+
+    // Used to determine the endianness of the machine we are currently
+    // running on.
+    //
+    ANTLR_UINT16 bomTest = 0xFEFF;
+    
+    // What endianess is the machine we are running on? If the incoming
+    // encoding endianess is the same as this machine's natural byte order
+    // then we can use more efficient API calls.
+    //
+    if  (*((ANTLR_UINT8*)(&bomTest)) == 0xFE)
+    {
+        isBigEndian = true;
+    }
+    else
+    {
+        isBigEndian = false;
+    }
+
+    // What encoding did the user tell us {s}he thought it was? I am going
+    // to get sick of the questions on antlr-interest, I know I am.
+    //
+    switch  (m_encoding)
+    {
+        case    ANTLR_ENC_UTF8:
+
+            // See if there is a BOM at the start of this UTF-8 sequence
+            // and just eat it if there is. Windows .TXT files have this for instance
+            // as it identifies UTF-8 even though it is of no consequence for byte order
+            // as UTF-8 does not have a byte order.
+            //
+            if  (       (*(m_nextChar))      == 0xEF
+                    &&  (*(m_nextChar+1))    == 0xBB
+                    &&  (*(m_nextChar+2))    == 0xBF
+                )
+            {
+                // The UTF8 BOM is present so skip it
+                //
+                m_nextChar += 3;
+            }
+
+            // Install the UTF8 input routines
+            //
+			this->setupIntStream( isBigEndian, isBigEndian );
+			this->set_charByteSize(0);
+            break;
+
+        case    ANTLR_ENC_UTF16:
+
+            // See if there is a BOM at the start of the input. If not then
+            // we assume that the byte order is the natural order of this
+            // machine (or it is really UCS2). If there is a BOM we determine if the encoding
+            // is the same as the natural order of this machine.
+            //
+            if  (       (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar))      == 0xFE
+                    &&  (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar+1))    == 0xFF
+                )
+            {
+                // BOM Present, indicates Big Endian
+                //
+                m_nextChar += 1;
+
+				this->setupIntStream( isBigEndian, true );
+            }
+            else if  (      (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar))      == 0xFF
+                        &&  (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar+1))    == 0xFE
+                )
+            {
+                // BOM present, indicates Little Endian
+                //
+                m_nextChar += 1;
+
+                this->setupIntStream( isBigEndian, false );
+            }
+            else
+            {
+                // No BOM present, assume local computer byte order
+                //
+                this->setupIntStream(isBigEndian, isBigEndian);
+            }
+			this->set_charByteSize(2);
+            break;
+
+        case    ANTLR_ENC_UTF32:
+
+            // See if there is a BOM at the start of the input. If not then
+            // we assume that the byte order is the natural order of this
+            // machine. If there is we determine if the encoding
+            // is the same as the natural order of this machine.
+            //
+            if  (       (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar))      == 0x00
+                    &&  (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar+1))    == 0x00
+                    &&  (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar+2))    == 0xFE
+                    &&  (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar+3))    == 0xFF
+                )
+            {
+                // BOM Present, indicates Big Endian
+                //
+                m_nextChar += 1;
+
+                this->setupIntStream(isBigEndian, true);
+            }
+            else if  (      (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar))      == 0xFF
+                        &&  (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar+1))    == 0xFE
+                        &&  (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar+1))    == 0x00
+                        &&  (ANTLR_UINT8)(*((ANTLR_UINT8*)m_nextChar+1))    == 0x00
+                )
+            {
+                // BOM present, indicates Little Endian
+                //
+                m_nextChar += 1;
+
+				this->setupIntStream( isBigEndian, false );
+            }
+            else
+            {
+                // No BOM present, assume local computer byte order
+                //
+				this->setupIntStream( isBigEndian, isBigEndian );
+            }
+			this->set_charByteSize(4);
+            break;
+
+        case    ANTLR_ENC_UTF16BE:
+
+            // Encoding is definately Big Endian with no BOM
+            //
+			this->setupIntStream( isBigEndian, true );
+			this->set_charByteSize(2);
+            break;
+
+        case    ANTLR_ENC_UTF16LE:
+
+            // Encoding is definately Little Endian with no BOM
+            //
+            this->setupIntStream( isBigEndian, false );
+			this->set_charByteSize(2);
+            break;
+
+        case    ANTLR_ENC_UTF32BE:
+
+            // Encoding is definately Big Endian with no BOM
+            //
+			this->setupIntStream( isBigEndian, true );
+			this->set_charByteSize(4);
+            break;
+
+        case    ANTLR_ENC_UTF32LE:
+
+            // Encoding is definately Little Endian with no BOM
+            //
+			this->setupIntStream( isBigEndian, false );
+			this->set_charByteSize(4);
+            break;
+
+        case    ANTLR_ENC_EBCDIC:
+
+            // EBCDIC is basically the same as ASCII but with an on the
+            // fly translation to ASCII
+            //
+            this->setupIntStream( isBigEndian, isBigEndian );
+			this->set_charByteSize(1);
+            break;
+
+        case    ANTLR_ENC_8BIT:
+        default:
+
+            // Standard 8bit/ASCII
+            //
+            this->setupIntStream( isBigEndian, isBigEndian );
+			this->set_charByteSize(1);
+            break;
+    }    
+}
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3interfaces.hpp b/runtime/Cpp/include/antlr3interfaces.hpp
new file mode 100755
index 0000000..f629784
--- /dev/null
+++ b/runtime/Cpp/include/antlr3interfaces.hpp
@@ -0,0 +1,301 @@
+/** \file
+ * Declarations for all the antlr3 C runtime interfaces/classes. This
+ * allows the structures that define the interfaces to contain pointers to
+ * each other without trying to sort out the cyclic interdependencies that
+ * would otherwise result.
+ */
+#ifndef	_ANTLR3_INTERFACES_HPP
+#define	_ANTLR3_INTERFACES_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits, class SuperType>
+class IntStream;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_RECOGNIZER_SHARED_STATE
+/// \ingroup ANTLR3_RECOGNIZER_SHARED_STATE
+///
+template<class ImplTraits, class SuperType>
+class RecognizerSharedState;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_BITSET_LIST
+/// \ingroup ANTLR3_BITSET_LIST
+///
+template<class AllocatorType>
+class BitsetList;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_BITSET
+/// \ingroup ANTLR3_BITSET
+///
+template<class AllocatorType>
+class Bitset;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_COMMON_TOKEN
+/// \ingroup ANTLR3_COMMON_TOKEN
+///
+template<class ImplTraits>
+class CommonToken;
+
+template<class ImplTraits>
+class CommonTokenFunctions;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_EXCEPTION
+/// \ingroup ANTLR3_EXCEPTION
+///
+enum ExceptionType
+{
+	/** Indicates that the recognizer received a token
+	 *  in the input that was not predicted.
+	 */
+	RECOGNITION_EXCEPTION = 0
+	/** Indicates that the recognizer was expecting one token and found a
+	 *  a different one.
+	 */
+	, MISMATCHED_TOKEN_EXCEPTION
+
+	/** Recognizer could not find a valid alternative from the input
+	 */
+	, NO_VIABLE_ALT_EXCEPTION
+
+	/* Character in a set was not found
+	 */
+	, MISMATCHED_SET_EXCEPTION
+
+	/* A rule predicting at least n elements found less than that,
+	 * such as: WS: " "+;
+	 */
+	, EARLY_EXIT_EXCEPTION
+
+	, FAILED_PREDICATE_EXCEPTION
+
+	, MISMATCHED_TREE_NODE_EXCEPTION
+
+	, REWRITE_EARLY_EXCEPTION
+
+	, UNWANTED_TOKEN_EXCEPTION
+
+	, MISSING_TOKEN_EXCEPTION
+};
+
+template<class ImplTraits, ExceptionType Ex, class StreamType>
+class ANTLR_Exception;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_TOPO
+/// \ingroup ANTLR3_TOPO
+///
+template<class AllocPolicyType>
+class Topo;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_INPUT_STREAM
+/// \ingroup ANTLR3_INPUT_STREAM
+///
+template<class ImplTraits>
+class InputStream;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_LEX_STATE
+/// \ingroup ANTLR3_LEX_STATE
+///
+template<class ImplTraits>
+class LexState;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_TOKEN_SOURCE
+/// \ingroup ANTLR3_TOKEN_SOURCE
+///
+template<class ImplTraits>
+class TokenSource;
+template<class ImplTraits, class SuperType>
+class TokenSourceFunctions;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_TOKEN_STREAM
+/// \ingroup ANTLR3_TOKEN_STREAM
+///
+template<class ImplTraits>
+class TokenStream;
+template<class ImplTraits>
+class TokenStreamFunctions;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_COMMON_TOKEN_STREAM
+/// \ingroup ANTLR3_COMMON_TOKEN_STREAM
+///
+template<class ImplTraits>
+class CommonTokenStream;
+template<class ImplTraits>
+class CommonTokenStreamFunctions;
+
+
+/// Pointer to an instantiation of 'class' #ANTLR3_CYCLIC_DFA
+/// \ingroup ANTLR3_CYCLIC_DFA
+///
+template<class ImplTraits, class ComponentType>
+class CyclicDFA;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_LEXER
+/// \ingroup ANTLR3_LEXER
+///
+template<class ImplTraits>
+class Lexer;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_PARSER
+/// \ingroup ANTLR3_PARSER
+///
+template<class ImplTraits>
+class Parser;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_BASE_TREE
+/// \ingroup ANTLR3_BASE_TREE
+///
+template<class ImplTraits>
+class BaseTree;
+template<class ImplTraits>
+class BaseTreeFunctions;
+
+
+/// Pointer to an instantiation of 'class' #ANTLR3_COMMON_TREE
+/// \ingroup ANTLR3_COMMON_TREE
+///
+template<class ImplTraits>
+class CommonTree;
+template<class ImplTraits>
+class CommonTreeFunctions;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_PARSE_TREE
+/// \ingroup ANTLR3_PARSE_TREE
+///
+template<class ImplTraits>
+class ParseTree;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_TREE_NODE_STREAM
+/// \ingroup ANTLR3_TREE_NODE_STREAM
+///
+template<class ImplTraits>
+class TreeNodeStream;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_COMMON_TREE_NODE_STREAM
+/// \ingroup ANTLR3_COMMON_TREE_NODE_STREAM
+///
+template<class ImplTraits>
+class CommonTreeNodeStream;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_TREE_WALK_STATE
+/// \ingroup ANTLR3_TREE_WALK_STATE
+///
+template<class ImplTraits>
+class TreeWalkState;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_BASE_TREE_ADAPTOR
+/// \ingroup ANTLR3_BASE_TREE_ADAPTOR
+///
+template<class ImplTraits>
+class BaseTreeAdaptor;
+template<class ImplTraits>
+class BaseTreeAdaptorFunctions;
+
+
+/// Pointer to an instantiation of 'class' #ANTLR3_COMMON_TREE_ADAPTOR
+/// \ingroup ANTLR3_COMMON_TREE_ADAPTOR
+///
+template<class ImplTraits>
+class CommonTreeAdaptor;
+template<class ImplTraits>
+class CommonTreeAdaptorFunctions;
+
+
+/// Pointer to an instantiation of 'class' #ANTLR3_TREE_PARSER
+/// \ingroup ANTLR3_TREE_PARSER
+///
+template<class ImplTraits>
+class TreeParser;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_INT_TRIE
+/// \ingroup ANTLR3_INT_TRIE
+///
+template< class DataType, class AllocPolicyType >
+class IntTrie;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_REWRITE_RULE_ELEMENT_STREAM
+/// \ingroup ANTLR3_REWRITE_RULE_ELEMENT_STREAM
+///
+template<class ImplTraits, class SuperType>
+class RewriteRuleElementStream;
+
+template<class ImplTraits>
+class RewriteRuleTokenStream;
+
+template<class ImplTraits>
+class RewriteRuleSubtreeStream;
+
+template<class ImplTraits>
+class RewriteRuleNodeStream;
+
+/// Pointer to an instantiation of 'class' #ANTLR3_DEBUG_EVENT_LISTENER
+/// \ingroup ANTLR3_DEBUG_EVENT_LISTENER
+///
+template<class ImplTraits>
+class  DebugEventListener;
+
+//A Class just used for forwarding other classes for simplifying class forwarding
+//Logic: constructor is made simple
+template<class A>
+class ClassForwarder {};
+
+template<bool b>
+class BoolForwarder {};
+class Empty {};
+
+template<class ImplTraits, class StreamType>
+class ComponentTypeFinder
+{
+};
+
+template<class ImplTraits>
+class ComponentTypeFinder< ImplTraits, typename ImplTraits::InputStreamType>
+{
+public:
+	typedef typename ImplTraits::LexerType ComponentType;
+};
+
+template<class ImplTraits>
+class ComponentTypeFinder< ImplTraits, typename ImplTraits::TokenStreamType>
+{
+public:
+	typedef typename ImplTraits::ParserType ComponentType;
+};
+
+template<class ImplTraits>
+class ComponentTypeFinder< ImplTraits, typename ImplTraits::TreeNodeStreamType>
+{
+public:
+	typedef typename ImplTraits::TreeParserType ComponentType;
+};
+
+
+ANTLR_END_NAMESPACE()
+
+#endif
diff --git a/runtime/Cpp/include/antlr3intstream.hpp b/runtime/Cpp/include/antlr3intstream.hpp
new file mode 100755
index 0000000..82c116b
--- /dev/null
+++ b/runtime/Cpp/include/antlr3intstream.hpp
@@ -0,0 +1,404 @@
+/** \file
+ * Defines the the class interface for an antlr3 INTSTREAM.
+ * 
+ * Certain functionality (such as DFAs for instance) abstract the stream of tokens
+ * or characters in to a steam of integers. Hence this structure should be included
+ * in any stream that is able to provide the output as a stream of integers (which is anything
+ * basically.
+ *
+ * There are no specific implementations of the methods in this interface in general. Though
+ * for purposes of casting and so on, it may be necesssary to implement a function with
+ * the signature in this interface which abstracts the base immplementation. In essence though
+ * the base stream provides a pointer to this interface, within which it installs its
+ * normal match() functions and so on. Interaces such as DFA are then passed the pANTLR3_INT_STREAM
+ * and can treat any input as an int stream. 
+ *
+ * For instance, a lexer implements a pANTLR3_BASE_RECOGNIZER, within which there is a pANTLR3_INT_STREAM.
+ * However, a pANTLR3_INPUT_STREAM also provides a pANTLR3_INT_STREAM, which it has constructed from
+ * it's normal interface when it was created. This is then pointed at by the pANTLR_BASE_RECOGNIZER
+ * when it is intialized with a pANTLR3_INPUT_STREAM.
+ *
+ * Similarly if a pANTLR3_BASE_RECOGNIZER is initialized with a pANTLR3_TOKEN_STREAM, then the 
+ * pANTLR3_INT_STREAM is taken from the pANTLR3_TOKEN_STREAM. 
+ *
+ * If a pANTLR3_BASE_RECOGNIZER is initialized with a pANTLR3_TREENODE_STREAM, then guess where
+ * the pANTLR3_INT_STREAM comes from?
+ *
+ * Note that because the context pointer points to the actual interface structure that is providing
+ * the ANTLR3_INT_STREAM it is defined as a (void *) in this interface. There is no direct implementation
+ * of an ANTLR3_INT_STREAM (unless someone did not understand what I was doing here =;?P
+ */
+#ifndef	_ANTLR3_INTSTREAM_HPP
+#define	_ANTLR3_INTSTREAM_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include  <cassert>
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+enum STREAM_TYPE
+{
+	/** Type indicator for a character stream
+	 * \remark if a custom stream is created but it can be treated as
+	 * a char stream, then you may OR in this value to your type indicator
+	 */
+	CHARSTREAM	= 0x0001
+
+	/** Type indicator for a Token stream
+	 * \remark if a custom stream is created but it can be treated as
+	 * a token stream, then you may OR in this value to your type indicator
+	 */
+	, TOKENSTREAM = 0x0002
+
+	/** Type indicator for a common tree node stream
+	 * \remark if a custom stream is created but it can be treated as
+	 * a common tree node stream, then you may OR in this value to your type indicator
+	 */
+	, COMMONTREENODE = 0x0004
+
+	/** Type mask for input stream so we can switch in the above types
+	*  \remark DO NOT USE 0x0000 as a stream type!
+	*/
+	, INPUT_MASK =	0x0007
+};
+
+class RESOLVE_ENDIAN_AT_RUNTIME {};
+class BYTE_AGNOSTIC {};
+class ANTLR_LITTLE_ENDIAN {};
+class ANTLR_BIG_ENDIAN {};
+
+template<class ImplTraits, class SuperType>
+class IntStream : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::StringType StringType;
+	
+protected:
+    /** Potentially useful in error reporting and so on, this string is
+     *  an identification of the input source. It may be NULL, so anything
+     *  attempting to access it needs to check this and substitute a sensible
+     *  default.
+     */
+    StringType		m_streamName;
+
+    /** Last marker position allocated
+     */
+    ANTLR_MARKER	m_lastMarker;
+	
+    bool		m_upper_case; //if set, values should be returbed in upper case	
+
+    /// Indicates whether we should implement endian-specific logic
+    /// 0 - Undefined 1 - Default(machine and input are both same), 2 - Little Endian, 3 - Big Endian
+    ANTLR_UINT8		m_endian_spec;	
+
+public:
+	IntStream();
+	
+	// Return a string that identifies the input source
+	//
+	StringType		getSourceName();
+	StringType& 	get_streamName();
+	const StringType& 	get_streamName() const;
+	ANTLR_MARKER get_lastMarker() const;
+
+	SuperType* get_super();
+	/**
+     * Function that installs a version of LA that always
+     * returns upper case. Only valid for character streams and creates a case
+     * insensitive lexer if the lexer tokens are described in upper case. The
+     * tokens will preserve case in the token text.
+     */
+    void	setUcaseLA(bool flag);
+
+    /** Consume the next 'ANTR3_UINT32' in the stream
+     */
+    void		    consume();
+
+    /** Get ANTLR3_UINT32 at current input pointer + i ahead where i=1 is next ANTLR3_UINT32 
+     */
+    ANTLR_UINT32	_LA( ANTLR_INT32 i);
+
+    /** Tell the stream to start buffering if it hasn't already.  Return
+     *  current input position, index(), or some other marker so that
+     *  when passed to rewind() you get back to the same spot.
+     *  rewind(mark()) should not affect the input cursor.
+     */
+    ANTLR_MARKER	    mark();
+    
+    /** Return the current input symbol index 0..n where n indicates the
+     *  last symbol has been read.
+     */
+    ANTLR_MARKER	    index();
+
+    /** Reset the stream so that next call to index would return marker.
+     *  The marker will usually be index() but it doesn't have to be.  It's
+     *  just a marker to indicate what state the stream was in.  This is
+     *  essentially calling release() and seek().  If there are markers
+     *  created after this marker argument, this routine must unroll them
+     *  like a stack.  Assume the state the stream was in when this marker
+     *  was created.
+     */
+    void	rewind(ANTLR_MARKER marker);
+
+    /** Reset the stream to the last marker position, witouh destryoing the
+     *  last marker position.
+     */
+    void	rewindLast();
+
+    /** You may want to commit to a backtrack but don't want to force the
+     *  stream to keep bookkeeping objects around for a marker that is
+     *  no longer necessary.  This will have the same behavior as
+     *  rewind() except it releases resources without the backward seek.
+     */
+    void	release(ANTLR_MARKER mark);
+
+    /** Set the input cursor to the position indicated by index.  This is
+     *  normally used to seek ahead in the input stream.  No buffering is
+     *  required to do this unless you know your stream will use seek to
+     *  move backwards such as when backtracking.
+     *
+     *  This is different from rewind in its multi-directional
+     *  requirement and in that its argument is strictly an input cursor (index).
+     *
+     *  For char streams, seeking forward must update the stream state such
+     *  as line number.  For seeking backwards, you will be presumably
+     *  backtracking using the mark/rewind mechanism that restores state and
+     *  so this method does not need to update state when seeking backwards.
+     *
+     *  Currently, this method is only used for efficient backtracking, but
+     *  in the future it may be used for incremental parsing.
+     */
+    void	seek(ANTLR_MARKER index);
+
+	/// Debug only method to flag consumption of initial off-channel
+	/// tokens in the input stream
+	///
+	void consumeInitialHiddenTokens();
+
+	void  rewindMark(ANTLR_MARKER marker);
+	ANTLR_MARKER tindex();
+
+    /** Frees any resources that were allocated for the implementation of this
+     *  interface. Usually this is just releasing the memory allocated
+     *  for the structure itself, but it may of course do anything it need to
+     *  so long as it does not stamp on anything else.
+     */
+	~IntStream();
+
+protected:
+	void setupIntStream(bool machineBigEndian, bool inputBigEndian);
+	void findout_endian_spec(bool machineBigEndian, bool inputBigEndian);
+
+	//If the user chooses this option, then we will be resolving stuffs at run-time
+	ANTLR_UINT32	_LA( ANTLR_INT32 i, ClassForwarder<RESOLVE_ENDIAN_AT_RUNTIME> );
+
+	//resolve into one of the three categories below at runtime
+	void	consume( ClassForwarder<RESOLVE_ENDIAN_AT_RUNTIME> );
+};
+
+template<class ImplTraits, class SuperType>
+class EBCDIC_IntStream : public IntStream<ImplTraits, SuperType>
+{
+public:
+	ANTLR_UINT32	_LA( ANTLR_INT32 i);
+
+protected:
+	void setupIntStream();
+};
+
+template<class ImplTraits, class SuperType>
+class UTF8_IntStream : public IntStream<ImplTraits, SuperType>
+{
+public:
+	ANTLR_UINT32	_LA( ANTLR_INT32 i);
+	void consume();
+
+protected:
+	void setupIntStream(bool machineBigEndian, bool inputBigEndian);
+
+private:
+	static const ANTLR_UINT32* TrailingBytesForUTF8();
+	static const UTF32* OffsetsFromUTF8();
+};
+
+template<class ImplTraits, class SuperType>
+class UTF16_IntStream : public IntStream<ImplTraits, SuperType>
+{
+public:
+	ANTLR_UINT32	_LA( ANTLR_INT32 i);
+	void		    consume();
+	ANTLR_MARKER	index();
+	void seek(ANTLR_MARKER seekPoint);
+
+protected:
+	void setupIntStream(bool machineBigEndian, bool inputBigEndian);
+
+	/// \brief Return the input element assuming an 8 bit ascii input
+	///
+	/// \param[in] input Input stream context pointer
+	/// \param[in] la 1 based offset of next input stream element
+	///
+	/// \return Next input character in internal ANTLR3 encoding (UTF32)
+	///
+	ANTLR_UINT32	_LA( ANTLR_INT32 i, ClassForwarder<BYTE_AGNOSTIC> );
+
+	/// \brief Return the input element assuming a UTF16 input when the input is Little Endian and the machine is not
+	///
+	/// \param[in] input Input stream context pointer
+	/// \param[in] la 1 based offset of next input stream element
+	///
+	/// \return Next input character in internal ANTLR3 encoding (UTF32)
+	///
+	ANTLR_UINT32	_LA( ANTLR_INT32 i, ClassForwarder<ANTLR_LITTLE_ENDIAN> );
+	
+	/// \brief Return the input element assuming a UTF16 input when the input is Little Endian and the machine is not
+	///
+	/// \param[in] input Input stream context pointer
+	/// \param[in] la 1 based offset of next input stream element
+	///
+	/// \return Next input character in internal ANTLR3 encoding (UTF32)
+	///
+	ANTLR_UINT32	_LA( ANTLR_INT32 i, ClassForwarder<ANTLR_BIG_ENDIAN> );
+
+	/// \brief Consume the next character in a UTF16 input stream
+	///
+	/// \param input Input stream context pointer
+	///
+	void	consume( ClassForwarder<BYTE_AGNOSTIC> );
+
+	/// \brief Consume the next character in a UTF16 input stream when the input is Little Endian and the machine is not
+	/// Note that the UTF16 routines do not do any substantial verification of the input stream as for performance
+	/// sake, we assume it is validly encoded. So if a low surrogate is found at the curent input position then we
+	/// just consume it. Surrogate pairs should be seen as Hi, Lo. So if we have a Lo first, then the input stream
+	/// is fubar but we just ignore that.
+	///
+	/// \param input Input stream context pointer
+	///
+	void	consume( ClassForwarder<ANTLR_LITTLE_ENDIAN> );
+
+	/// \brief Consume the next character in a UTF16 input stream when the input is Big Endian and the machine is not
+	///
+	/// \param input Input stream context pointer
+	///
+	void	consume( ClassForwarder<ANTLR_BIG_ENDIAN> );
+};
+
+
+
+template<class ImplTraits, class SuperType>
+class UTF32_IntStream : public IntStream<ImplTraits, SuperType>
+{
+public:
+	ANTLR_UINT32	_LA( ANTLR_INT32 i);
+	void		    consume();
+	
+	/// \brief Calculate the current index in the output stream.
+	/// \param[in] input Input stream context pointer
+	///
+	ANTLR_MARKER	index();
+	void seek(ANTLR_MARKER seekPoint);
+
+protected:
+	void setupIntStream(bool machineBigEndian, bool inputBigEndian);
+	ANTLR_UINT32	_LA( ANTLR_INT32 i, ClassForwarder<RESOLVE_ENDIAN_AT_RUNTIME> );
+	ANTLR_UINT32	_LA( ANTLR_INT32 i, ClassForwarder<BYTE_AGNOSTIC> );
+	ANTLR_UINT32	_LA( ANTLR_INT32 i, ClassForwarder<ANTLR_LITTLE_ENDIAN> );
+	ANTLR_UINT32	_LA( ANTLR_INT32 i, ClassForwarder<ANTLR_BIG_ENDIAN> );
+
+	void	consume( ClassForwarder<RESOLVE_ENDIAN_AT_RUNTIME> );
+	void	consume( ClassForwarder<BYTE_AGNOSTIC> );
+	void	consume( ClassForwarder<ANTLR_LITTLE_ENDIAN> );
+	void	consume( ClassForwarder<ANTLR_BIG_ENDIAN> );
+};
+
+template<class ImplTraits>
+class TokenIntStream : public IntStream<ImplTraits, typename ImplTraits::TokenStreamType >
+{
+public:
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::TokenStreamType TokenStreamType;
+	typedef IntStream<ImplTraits, TokenStreamType > BaseType;
+
+private:
+	/** Because the indirect call, though small in individual cases can
+     *  mount up if there are thousands of tokens (very large input streams), callers
+     *  of size can optionally use this cached size field.
+     */
+    ANTLR_UINT32	    m_cachedSize;
+
+public:
+	TokenIntStream();
+	ANTLR_UINT32 get_cachedSize() const;
+	void set_cachedSize( ANTLR_UINT32 cachedSize );
+
+	void consume();
+	void  consumeInitialHiddenTokens();
+	ANTLR_UINT32  _LA( ANTLR_INT32 i );
+	ANTLR_MARKER  mark();
+	ANTLR_UINT32  size();
+	void release();
+	ANTLR_MARKER  tindex();
+	void rewindLast();
+	void rewind(ANTLR_MARKER marker);
+	void seek(ANTLR_MARKER index);
+	StringType getSourceName();
+
+};
+
+template<class ImplTraits>
+class TreeNodeIntStream : public IntStream<ImplTraits, typename ImplTraits::CommonTreeNodeStreamType>
+{
+public:
+	typedef typename ImplTraits::CommonTreeNodeStreamType CommonTreeNodeStreamType;
+	typedef IntStream<ImplTraits, CommonTreeNodeStreamType > BaseType;
+	typedef typename ImplTraits::TreeType TreeType;
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+
+public:
+	void				consume();
+	ANTLR_MARKER		tindex();
+	ANTLR_UINT32		_LA(ANTLR_INT32 i);
+	ANTLR_MARKER		mark();
+	void				release(ANTLR_MARKER marker);
+	void				rewindMark(ANTLR_MARKER marker);
+	void				rewindLast();
+	void				seek(ANTLR_MARKER index);
+	ANTLR_UINT32		size();
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3intstream.inl"
+
+#endif
+
diff --git a/runtime/Cpp/include/antlr3intstream.inl b/runtime/Cpp/include/antlr3intstream.inl
new file mode 100755
index 0000000..e4de290
--- /dev/null
+++ b/runtime/Cpp/include/antlr3intstream.inl
@@ -0,0 +1,1661 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits, class SuperType>
+ANTLR_INLINE IntStream<ImplTraits, SuperType>::IntStream()
+{
+	m_lastMarker = 0;
+	m_upper_case = false;
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_INLINE typename IntStream<ImplTraits, SuperType>::StringType	IntStream<ImplTraits, SuperType>::getSourceName()
+{
+	return m_streamName;
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_INLINE typename IntStream<ImplTraits, SuperType>::StringType& 	IntStream<ImplTraits, SuperType>::get_streamName()
+{
+	return m_streamName;
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_INLINE const typename IntStream<ImplTraits, SuperType>::StringType& 	IntStream<ImplTraits, SuperType>::get_streamName() const
+{
+	return m_streamName;
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_INLINE ANTLR_MARKER IntStream<ImplTraits, SuperType>::get_lastMarker() const
+{
+	return m_lastMarker;
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_INLINE void	IntStream<ImplTraits, SuperType>::setUcaseLA(bool flag)
+{
+	m_upper_case = flag;
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_INLINE SuperType* IntStream<ImplTraits, SuperType>::get_super()
+{
+	return static_cast<SuperType*>(this);
+}
+
+template<class ImplTraits, class SuperType>
+void	IntStream<ImplTraits, SuperType>::consume()
+{
+	SuperType* input = this->get_super();
+
+	const ANTLR_UINT8* nextChar = input->get_nextChar();
+	const ANTLR_UINT8* data = input->get_data();
+	ANTLR_UINT32 sizeBuf = input->get_sizeBuf();
+
+    if	( nextChar < ( data + sizeBuf ) )
+    {	
+		/* Indicate one more character in this line
+		 */
+		input->inc_charPositionInLine();
+	
+		if  ((ANTLR_UCHAR)(*(nextChar)) == input->get_newlineChar() )
+		{
+			/* Reset for start of a new line of input
+			 */
+			input->inc_line();
+			input->set_charPositionInLine(0);
+			input->set_currentLine(nextChar + 1);
+		}
+
+		/* Increment to next character position
+		 */
+		input->set_nextChar( nextChar + 1 );
+    }
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32	IntStream<ImplTraits, SuperType>::_LA( ANTLR_INT32 la )
+{
+	SuperType* input = this->get_super();
+	const ANTLR_UINT8* nextChar = input->get_nextChar();
+	const ANTLR_UINT8* data = input->get_data();
+	ANTLR_UINT32 sizeBuf = input->get_sizeBuf();
+
+    if	(( nextChar + la - 1) >= (data + sizeBuf))
+    {
+		return	ANTLR_CHARSTREAM_EOF;
+    }
+    else
+    {
+		if( !m_upper_case )
+			return	(ANTLR_UCHAR)(*(nextChar + la - 1));
+		else
+			return	(ANTLR_UCHAR)toupper(*(nextChar + la - 1));
+    }
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_MARKER IntStream<ImplTraits, SuperType>::mark()
+{
+	LexState<ImplTraits>*	    state;
+    SuperType* input = this->get_super();
+
+    /* New mark point 
+     */
+    input->inc_markDepth();
+
+    /* See if we are revisiting a mark as we can just reuse the vector
+     * entry if we are, otherwise, we need a new one
+     */
+    if	(input->get_markDepth() > input->get_markers().size() )
+    {	
+		input->get_markers().push_back( LexState<ImplTraits>() );
+		LexState<ImplTraits>& state_r = input->get_markers().back();
+		state = &state_r;
+    }
+    else
+    {
+		LexState<ImplTraits>& state_r = input->get_markers().at( input->get_markDepth() - 1 );
+		state	= &state_r;
+
+		/* Assume no errors for speed, it will just blow up if the table failed
+		 * for some reasons, hence lots of unit tests on the tables ;-)
+		 */
+    }
+
+    /* We have created or retrieved the state, so update it with the current
+     * elements of the lexer state.
+     */
+    state->set_charPositionInLine( input->get_charPositionInLine() );
+    state->set_currentLine( input->get_currentLine() );
+    state->set_line( input->get_line() );
+    state->set_nextChar( input->get_nextChar() );
+
+    m_lastMarker = input->get_markDepth();
+
+    /* And that's it
+     */
+    return  input->get_markDepth();
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_MARKER	IntStream<ImplTraits, SuperType>::index()
+{
+	SuperType* input = this->get_super();
+	return input->index_impl();
+}
+
+template<class ImplTraits, class SuperType>
+void	IntStream<ImplTraits, SuperType>::rewind(ANTLR_MARKER mark)
+{
+    SuperType* input = this->get_super();
+
+    /* Perform any clean up of the marks
+     */
+    this->release(mark);
+
+    /* Find the supplied mark state 
+     */
+	ANTLR_UINT32 idx = static_cast<ANTLR_UINT32>( mark-1 );
+    typename ImplTraits::LexStateType&   state = input->get_markers().at( idx );
+
+    /* Seek input pointer to the requested point (note we supply the void *pointer
+     * to whatever is implementing the int stream to seek).
+     */
+	this->seek( (ANTLR_MARKER)state.get_nextChar() );
+    
+    /* Reset to the reset of the information in the mark
+     */
+    input->set_charPositionInLine( state.get_charPositionInLine() );
+    input->set_currentLine( state.get_currentLine() );
+    input->set_line( state.get_line() );
+    input->set_nextChar( state.get_nextChar() );
+
+    /* And we are done
+     */
+}
+
+template<class ImplTraits, class SuperType>
+void	IntStream<ImplTraits, SuperType>::rewindLast()
+{
+	this->rewind(m_lastMarker);
+}
+
+template<class ImplTraits, class SuperType>
+void	IntStream<ImplTraits, SuperType>::release(ANTLR_MARKER mark)
+{
+	SuperType* input = this->get_super();
+
+	/* We don't do much here in fact as we never free any higher marks in
+     * the hashtable as we just resuse any memory allocated for them.
+     */
+    input->set_markDepth( (ANTLR_UINT32)(mark - 1) );
+
+}
+
+template<class ImplTraits, class SuperType>
+void IntStream<ImplTraits, SuperType>::setupIntStream(bool, bool)
+{
+}
+
+template<class ImplTraits, class SuperType>
+void	IntStream<ImplTraits, SuperType>::seek(ANTLR_MARKER seekPoint)
+{
+	ANTLR_INT32   count;
+	SuperType* input = this->get_super();
+
+	ANTLR_MARKER nextChar = (ANTLR_MARKER) input->get_nextChar();
+	/* If the requested seek point is less than the current
+	* input point, then we assume that we are resetting from a mark
+	* and do not need to scan, but can just set to there.
+	*/
+	if	(seekPoint <= nextChar)
+	{
+		input->set_nextChar((ANTLR_UINT8*) seekPoint);
+	}
+	else
+	{
+		count	= (ANTLR_UINT32)(seekPoint - nextChar);
+
+		while (count--)
+		{
+			this->consume();
+		}
+	}
+}
+
+template<class ImplTraits, class SuperType>
+IntStream<ImplTraits, SuperType>::~IntStream()
+{
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32	EBCDIC_IntStream<ImplTraits, SuperType>::_LA( ANTLR_INT32 la)
+{
+	// EBCDIC to ASCII conversion table
+	//
+	// This for EBCDIC EDF04 translated to ISO-8859.1 which is the usually accepted POSIX
+	// translation and the character tables are published all over the interweb.
+	// 
+	const ANTLR_UCHAR e2a[256] =
+	{
+		0x00, 0x01, 0x02, 0x03, 0x85, 0x09, 0x86, 0x7f,
+		0x87, 0x8d, 0x8e, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+		0x10, 0x11, 0x12, 0x13, 0x8f, 0x0a, 0x08, 0x97,
+		0x18, 0x19, 0x9c, 0x9d, 0x1c, 0x1d, 0x1e, 0x1f,
+		0x80, 0x81, 0x82, 0x83, 0x84, 0x92, 0x17, 0x1b,
+		0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x05, 0x06, 0x07, 
+		0x90, 0x91, 0x16, 0x93, 0x94, 0x95, 0x96, 0x04,
+		0x98, 0x99, 0x9a, 0x9b, 0x14, 0x15, 0x9e, 0x1a,
+		0x20, 0xa0, 0xe2, 0xe4, 0xe0, 0xe1, 0xe3, 0xe5,
+		0xe7, 0xf1, 0x60, 0x2e, 0x3c, 0x28, 0x2b, 0x7c,
+		0x26, 0xe9, 0xea, 0xeb, 0xe8, 0xed, 0xee, 0xef,
+		0xec, 0xdf, 0x21, 0x24, 0x2a, 0x29, 0x3b, 0x9f,
+		0x2d, 0x2f, 0xc2, 0xc4, 0xc0, 0xc1, 0xc3, 0xc5,
+		0xc7, 0xd1, 0x5e, 0x2c, 0x25, 0x5f, 0x3e, 0x3f,
+		0xf8, 0xc9, 0xca, 0xcb, 0xc8, 0xcd, 0xce, 0xcf,
+		0xcc, 0xa8, 0x3a, 0x23, 0x40, 0x27, 0x3d, 0x22,
+		0xd8, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+		0x68, 0x69, 0xab, 0xbb, 0xf0, 0xfd, 0xfe, 0xb1,
+		0xb0, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
+		0x71, 0x72, 0xaa, 0xba, 0xe6, 0xb8, 0xc6, 0xa4,
+		0xb5, 0xaf, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+		0x79, 0x7a, 0xa1, 0xbf, 0xd0, 0xdd, 0xde, 0xae,
+		0xa2, 0xa3, 0xa5, 0xb7, 0xa9, 0xa7, 0xb6, 0xbc,
+		0xbd, 0xbe, 0xac, 0x5b, 0x5c, 0x5d, 0xb4, 0xd7,
+		0xf9, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+		0x48, 0x49, 0xad, 0xf4, 0xf6, 0xf2, 0xf3, 0xf5,
+		0xa6, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
+		0x51, 0x52, 0xb9, 0xfb, 0xfc, 0xdb, 0xfa, 0xff,
+		0xd9, 0xf7, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+		0x59, 0x5a, 0xb2, 0xd4, 0xd6, 0xd2, 0xd3, 0xd5,
+		0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+		0x38, 0x39, 0xb3, 0x7b, 0xdc, 0x7d, 0xda, 0x7e
+	};
+
+	SuperType* input = this->get_super();
+
+    if	(( input->get_nextChar() + la - 1) >= ( input->get_data() + input->get_sizeBuf() ))
+    {
+        return	ANTLR_CHARSTREAM_EOF;
+    }
+    else
+    {
+        // Translate the required character via the constant conversion table
+        //
+        return	e2a[(*(input->get_nextChar() + la - 1))];
+    }
+}
+
+template<class ImplTraits, class SuperType>
+void EBCDIC_IntStream<ImplTraits, SuperType>::setupIntStream()
+{
+	SuperType* super = this->get_super();
+	super->set_charByteSize(1);
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32	UTF16_IntStream<ImplTraits, SuperType>::_LA( ANTLR_INT32 i)
+{
+	return this->_LA(i, ClassForwarder< typename ImplTraits::Endianness >() );
+}
+
+template<class ImplTraits, class SuperType>
+void UTF16_IntStream<ImplTraits, SuperType>::consume()
+{
+	this->consume( ClassForwarder< typename ImplTraits::Endianness >() );
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_MARKER	UTF16_IntStream<ImplTraits, SuperType>::index()
+{
+	SuperType* input = this->get_super();
+    return  (ANTLR_MARKER)(input->get_nextChar());
+}
+
+template<class ImplTraits, class SuperType>
+void UTF16_IntStream<ImplTraits, SuperType>::seek(ANTLR_MARKER seekPoint)
+{
+	SuperType* input = this->get_super();
+
+	// If the requested seek point is less than the current
+	// input point, then we assume that we are resetting from a mark
+	// and do not need to scan, but can just set to there as rewind will
+    // reset line numbers and so on.
+	//
+	if	(seekPoint <= (ANTLR_MARKER)(input->get_nextChar()))
+	{
+		input->set_nextChar( seekPoint );
+	}
+	else
+	{
+        // Call consume until we reach the asked for seek point or EOF
+        //
+        while( (this->_LA(1) != ANTLR_CHARSTREAM_EOF) && (seekPoint < (ANTLR_MARKER)input->get_nextChar() ) )
+	    {
+			this->consume();
+	    }
+	}
+}
+
+template<class ImplTraits, class SuperType>
+void IntStream<ImplTraits, SuperType>::findout_endian_spec(bool machineBigEndian, bool inputBigEndian)
+{
+	// We must install different UTF16 routines according to whether the input
+	// is the same endianess as the machine we are executing upon or not. If it is not
+	// then we must install methods that can convert the endianess on the fly as they go
+	//
+
+	if(machineBigEndian == true)
+	{
+		// Machine is Big Endian, if the input is also then install the 
+		// methods that do not access input by bytes and reverse them.
+		// Otherwise install endian aware methods.
+		//
+		if  (inputBigEndian == true) 
+		{
+			// Input is machine compatible
+			//
+			m_endian_spec = 1;
+		}
+		else
+		{
+			// Need to use methods that know that the input is little endian
+			//
+			m_endian_spec = 2;
+		}
+	}
+	else
+	{
+		// Machine is Little Endian, if the input is also then install the 
+		// methods that do not access input by bytes and reverse them.
+		// Otherwise install endian aware methods.
+		//
+		if  (inputBigEndian == false) 
+		{
+			// Input is machine compatible
+			//
+			m_endian_spec =  1;
+		}
+		else
+		{
+			// Need to use methods that know that the input is Big Endian
+			//
+			m_endian_spec	= 3;
+		}
+	}
+}
+
+template<class ImplTraits, class SuperType>
+void UTF16_IntStream<ImplTraits, SuperType>::setupIntStream(bool machineBigEndian, bool inputBigEndian)
+{
+	SuperType* super = this->get_super();
+	super->set_charByteSize(2);
+
+	this->findout_endian_spec( machineBigEndian, inputBigEndian );
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32 IntStream<ImplTraits, SuperType>::_LA( ANTLR_INT32 i, ClassForwarder<RESOLVE_ENDIAN_AT_RUNTIME> )
+{
+	assert( (m_endian_spec >= 1) && (m_endian_spec <= 3));
+	switch(m_endian_spec)
+	{
+	case 1:
+		return this->_LA(i, ClassForwarder<BYTE_AGNOSTIC>() );
+		break;
+	case 2:
+		return this->_LA(i, ClassForwarder<ANTLR_LITTLE_ENDIAN>() );
+		break;
+	case 3:
+		return this->_LA(i, ClassForwarder<ANTLR_BIG_ENDIAN>() );
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+template<class ImplTraits, class SuperType>
+void	IntStream<ImplTraits, SuperType>::consume( ClassForwarder<RESOLVE_ENDIAN_AT_RUNTIME> )
+{
+	assert( (m_endian_spec >= 1) && (m_endian_spec <= 3));
+	switch(m_endian_spec)
+	{
+	case 1:
+		this->consume( ClassForwarder<BYTE_AGNOSTIC>() );
+		break;
+	case 2:
+		this->consume( ClassForwarder<ANTLR_LITTLE_ENDIAN>() );
+		break;
+	case 3:
+		this->consume( ClassForwarder<ANTLR_BIG_ENDIAN>() );
+		break;
+	default:
+		break;
+	}
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32	UTF16_IntStream<ImplTraits, SuperType>::_LA( ANTLR_INT32 la, ClassForwarder<BYTE_AGNOSTIC> )
+{
+	SuperType* input;
+    UTF32   ch;
+    UTF32   ch2;
+    UTF16*	nextChar;
+
+    // Find the input interface and where we are currently pointing to
+    // in the input stream
+    //
+	input   = this->get_super;
+	nextChar    = input->get_nextChar();
+
+    // If a positive offset then advance forward, else retreat
+    //
+    if  (la >= 0)
+    {
+        while   (--la > 0 && (ANTLR_UINT8*)nextChar < ((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf() )
+        {
+            // Advance our copy of the input pointer
+            //
+            // Next char in natural machine byte order
+            //
+            ch  = *nextChar++;
+
+            // If we have a surrogate pair then we need to consume
+            // a following valid LO surrogate.
+            //
+            if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+            {
+                // If the 16 bits following the high surrogate are in the source buffer...
+                //
+                if	((ANTLR_UINT8*)(nextChar) < (((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf() ))
+                {
+                    // Next character is in natural machine byte order
+                    //
+                    ch2 = *nextChar;
+
+                    // If it's a valid low surrogate, consume it
+                    //
+                    if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                    {
+                        // We consumed one 16 bit character
+                        //
+						nextChar++;
+                    }
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it.
+                    //
+                } 
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it because the buffer ended
+                //
+            }
+            // Note that we did not check for an invalid low surrogate here, or that fact that the
+            // lo surrogate was missing. We just picked out one 16 bit character unless the character
+            // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+            //
+        }
+    }
+    else
+    {
+        // We need to go backwards from our input point
+        //
+        while   (la++ < 0 && (ANTLR_UINT8*)nextChar > (ANTLR_UINT8*)input->get_data() )
+        {
+            // Get the previous 16 bit character
+            //
+            ch = *--nextChar;
+
+            // If we found a low surrogate then go back one more character if
+            // the hi surrogate is there
+            //
+            if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) 
+            {
+                ch2 = *(nextChar-1);
+                if (ch2 >= UNI_SUR_HIGH_START && ch2 <= UNI_SUR_HIGH_END) 
+                {
+                    // Yes, there is a high surrogate to match it so decrement one more and point to that
+                    //
+                    nextChar--;
+                }
+            }
+        }
+    }
+
+    // Our local copy of nextChar is now pointing to either the correct character or end of file
+    //
+    // Input buffer size is always in bytes
+    //
+	if	( (ANTLR_UINT8*)nextChar >= (((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf() ))
+	{
+		return	ANTLR_CHARSTREAM_EOF;
+	}
+	else
+	{
+        // Pick up the next 16 character (native machine byte order)
+        //
+        ch = *nextChar++;
+
+        // If we have a surrogate pair then we need to consume
+        // a following valid LO surrogate.
+        //
+        if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+        {
+            // If the 16 bits following the high surrogate are in the source buffer...
+            //
+            if	((ANTLR_UINT8*)(nextChar) < (((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf()))
+            {
+                // Next character is in natural machine byte order
+                //
+                ch2 = *nextChar;
+
+                // If it's a valid low surrogate, consume it
+                //
+                if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                {
+                    // Construct the UTF32 code point
+                    //
+                    ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+								+ (ch2 - UNI_SUR_LOW_START) + halfBase;
+                }
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it.
+                //
+            } 
+            // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+            // it because the buffer ended
+            //
+        }
+    }
+    return ch;
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32	UTF16_IntStream<ImplTraits, SuperType>::_LA( ANTLR_INT32 la, ClassForwarder<ANTLR_LITTLE_ENDIAN> )
+{
+	SuperType* input;
+    UTF32           ch;
+    UTF32           ch2;
+    ANTLR_UCHAR*   nextChar;
+
+    // Find the input interface and where we are currently pointing to
+    // in the input stream
+    //
+	input       = this->get_super();
+    nextChar    = input->get_nextChar();
+
+    // If a positive offset then advance forward, else retreat
+    //
+    if  (la >= 0)
+    {
+        while   (--la > 0 && (ANTLR_UINT8*)nextChar < ((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf() )
+        {
+            // Advance our copy of the input pointer
+            //
+            // Next char in Little Endian byte order
+            //
+            ch  = (*nextChar) + (*(nextChar+1) << 8);
+            nextChar += 2;
+
+            // If we have a surrogate pair then we need to consume
+            // a following valid LO surrogate.
+            //
+            if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+            {
+                // If the 16 bits following the high surrogate are in the source buffer...
+                //
+                if	((ANTLR_UINT8*)(nextChar) < (((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf() ))
+                {
+                    // Next character is in little endian byte order
+                    //
+                    ch2 = (*nextChar) + (*(nextChar+1) << 8);
+
+                    // If it's a valid low surrogate, consume it
+                    //
+                    if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                    {
+                        // We consumed one 16 bit character
+                        //
+						nextChar += 2;
+                    }
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it.
+                    //
+                } 
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it because the buffer ended
+                //
+            }
+            // Note that we did not check for an invalid low surrogate here, or that fact that the
+            // lo surrogate was missing. We just picked out one 16 bit character unless the character
+            // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+            //
+        }
+    }
+    else
+    {
+        // We need to go backwards from our input point
+        //
+        while   (la++ < 0 && (ANTLR_UINT8*)nextChar > (ANTLR_UINT8*)input->get_data() )
+        {
+            // Get the previous 16 bit character
+            //
+            ch = (*nextChar - 2) + ((*nextChar -1) << 8);
+            nextChar -= 2;
+
+            // If we found a low surrogate then go back one more character if
+            // the hi surrogate is there
+            //
+            if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) 
+            {
+                ch2 = (*nextChar - 2) + ((*nextChar -1) << 8);
+                if (ch2 >= UNI_SUR_HIGH_START && ch2 <= UNI_SUR_HIGH_END) 
+                {
+                    // Yes, there is a high surrogate to match it so decrement one more and point to that
+                    //
+                    nextChar -=2;
+                }
+            }
+        }
+    }
+
+    // Our local copy of nextChar is now pointing to either the correct character or end of file
+    //
+    // Input buffer size is always in bytes
+    //
+	if	( (ANTLR_UINT8*)nextChar >= (((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf()))
+	{
+		return	ANTLR_CHARSTREAM_EOF;
+	}
+	else
+	{
+        // Pick up the next 16 character (little endian byte order)
+        //
+        ch = (*nextChar) + (*(nextChar+1) << 8);
+        nextChar += 2;
+
+        // If we have a surrogate pair then we need to consume
+        // a following valid LO surrogate.
+        //
+        if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+        {
+            // If the 16 bits following the high surrogate are in the source buffer...
+            //
+            if	((ANTLR_UINT8*)(nextChar) < (((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf()))
+            {
+                // Next character is in little endian byte order
+                //
+                ch2 = (*nextChar) + (*(nextChar+1) << 8);
+
+                // If it's a valid low surrogate, consume it
+                //
+                if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                {
+                    // Construct the UTF32 code point
+                    //
+                    ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+								+ (ch2 - UNI_SUR_LOW_START) + halfBase;
+                }
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it.
+                //
+            } 
+            // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+            // it because the buffer ended
+            //
+        }
+    }
+    return ch;
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32	UTF16_IntStream<ImplTraits, SuperType>::_LA( ANTLR_INT32 la, ClassForwarder<ANTLR_BIG_ENDIAN> )
+{
+	SuperType* input;
+    UTF32           ch;
+    UTF32           ch2;
+    ANTLR_UCHAR*   nextChar;
+
+    // Find the input interface and where we are currently pointing to
+    // in the input stream
+    //
+	input       = this->get_super();
+    nextChar    = input->get_nextChar();
+
+    // If a positive offset then advance forward, else retreat
+    //
+    if  (la >= 0)
+    {
+        while   (--la > 0 && (ANTLR_UINT8*)nextChar < ((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf() )
+        {
+            // Advance our copy of the input pointer
+            //
+            // Next char in Big Endian byte order
+            //
+            ch  = ((*nextChar) << 8) + *(nextChar+1);
+            nextChar += 2;
+
+            // If we have a surrogate pair then we need to consume
+            // a following valid LO surrogate.
+            //
+            if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+            {
+                // If the 16 bits following the high surrogate are in the source buffer...
+                //
+                if	((ANTLR_UINT8*)(nextChar) < (((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf()))
+                {
+                    // Next character is in big endian byte order
+                    //
+                    ch2 = ((*nextChar) << 8) + *(nextChar+1);
+
+                    // If it's a valid low surrogate, consume it
+                    //
+                    if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                    {
+                        // We consumed one 16 bit character
+                        //
+						nextChar += 2;
+                    }
+                    // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                    // it.
+                    //
+                } 
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it because the buffer ended
+                //
+            }
+            // Note that we did not check for an invalid low surrogate here, or that fact that the
+            // lo surrogate was missing. We just picked out one 16 bit character unless the character
+            // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+            //
+        }
+    }
+    else
+    {
+        // We need to go backwards from our input point
+        //
+        while   (la++ < 0 && (ANTLR_UINT8*)nextChar > (ANTLR_UINT8*)input->get_data() )
+        {
+            // Get the previous 16 bit character
+            //
+            ch = ((*nextChar - 2) << 8) + (*nextChar -1);
+            nextChar -= 2;
+
+            // If we found a low surrogate then go back one more character if
+            // the hi surrogate is there
+            //
+            if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) 
+            {
+                ch2 = ((*nextChar - 2) << 8) + (*nextChar -1);
+                if (ch2 >= UNI_SUR_HIGH_START && ch2 <= UNI_SUR_HIGH_END) 
+                {
+                    // Yes, there is a high surrogate to match it so decrement one more and point to that
+                    //
+                    nextChar -=2;
+                }
+            }
+        }
+    }
+
+    // Our local copy of nextChar is now pointing to either the correct character or end of file
+    //
+    // Input buffer size is always in bytes
+    //
+	if	( (ANTLR_UINT8*)nextChar >= (((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf()))
+	{
+		return	ANTLR_CHARSTREAM_EOF;
+	}
+	else
+	{
+        // Pick up the next 16 character (big endian byte order)
+        //
+        ch = ((*nextChar) << 8) + *(nextChar+1);
+        nextChar += 2;
+
+        // If we have a surrogate pair then we need to consume
+        // a following valid LO surrogate.
+        //
+        if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+        {
+            // If the 16 bits following the high surrogate are in the source buffer...
+            //
+            if	((ANTLR_UINT8*)(nextChar) < (((ANTLR_UINT8*)input->get_data()) + input->get_sizeBuf()))
+            {
+                // Next character is in big endian byte order
+                //
+                ch2 = ((*nextChar) << 8) + *(nextChar+1);
+
+                // If it's a valid low surrogate, consume it
+                //
+                if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                {
+                    // Construct the UTF32 code point
+                    //
+                    ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+								+ (ch2 - UNI_SUR_LOW_START) + halfBase;
+                }
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it.
+                //
+            } 
+            // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+            // it because the buffer ended
+            //
+        }
+    }
+    return ch;
+}
+
+template<class ImplTraits, class SuperType>
+void	UTF16_IntStream<ImplTraits, SuperType>::consume( ClassForwarder<BYTE_AGNOSTIC> )
+{
+	SuperType* input;
+    UTF32   ch;
+    UTF32   ch2;
+
+	input   = this->get_super();
+
+    // Buffer size is always in bytes
+    //
+	if(input->get_nextChar() < (input->get_data() + input->get_sizeBuf()/2) )
+	{	
+		// Indicate one more character in this line
+		//
+		input->inc_charPositionInLine();
+
+		if  ((ANTLR_UCHAR)(*(input->get_nextChar())) == input->get_newlineChar())
+		{
+			// Reset for start of a new line of input
+			//
+			input->inc_line();
+			input->set_charPositionInLine(0);
+			input->set_currentLine( input->get_nextChar() + 1 );
+		}
+
+		// Increment to next character position, accounting for any surrogates
+		//
+        // Next char in natural machine byte order
+        //
+        ch  = *(input->get_nextChar());
+
+        // We consumed one 16 bit character
+        //
+		input->set_nextChar( input->get_nextChar() + 1 );
+
+        // If we have a surrogate pair then we need to consume
+        // a following valid LO surrogate.
+        //
+        if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+
+            // If the 16 bits following the high surrogate are in the source buffer...
+            //
+            if(input->get_nextChar() < (input->get_data() + input->get_sizeBuf()/2) )
+            {
+                // Next character is in natural machine byte order
+                //
+                ch2 = *(input->get_nextChar());
+
+                // If it's a valid low surrogate, consume it
+                //
+                if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                {
+                    // We consumed one 16 bit character
+                    //
+					input->set_nextChar( input->get_nextChar() + 1 );
+                }
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it.
+                //
+            } 
+            // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+            // it because the buffer ended
+            //
+        } 
+        // Note that we did not check for an invalid low surrogate here, or that fact that the
+        // lo surrogate was missing. We just picked out one 16 bit character unless the character
+        // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+        //
+	}
+
+}
+
+template<class ImplTraits, class SuperType>
+void	UTF16_IntStream<ImplTraits, SuperType>::consume( ClassForwarder<ANTLR_LITTLE_ENDIAN> )
+{
+	SuperType* input;
+    UTF32   ch;
+    UTF32   ch2;
+
+	input   = this->get_super();
+
+    // Buffer size is always in bytes
+    //
+	if(input->get_nextChar() < (input->get_data() + input->get_sizeBuf()/2) )
+	{	
+		// Indicate one more character in this line
+		//
+		input->inc_charPositionInLine();
+
+		if  ((ANTLR_UCHAR)(*(input->get_nextChar())) == input->get_newlineChar())
+		{
+			// Reset for start of a new line of input
+			//
+			input->inc_line();
+			input->set_charPositionInLine(0);
+			input->set_currentLine(input->get_nextChar() + 1);
+		}
+
+		// Increment to next character position, accounting for any surrogates
+		//
+        // Next char in litle endian form
+        //
+        ch  = *((ANTLR_UINT8*)input->get_nextChar()) + (*((ANTLR_UINT8*)input->get_nextChar() + 1) <<8);
+
+        // We consumed one 16 bit character
+        //
+		input->set_nextChar( input->get_nextChar() + 1);
+
+        // If we have a surrogate pair then we need to consume
+        // a following valid LO surrogate.
+        //
+        if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+		{
+            // If the 16 bits following the high surrogate are in the source buffer...
+            //
+            if(input->get_nextChar() < (input->get_data() + input->get_sizeBuf()/2) )
+            {
+                ch2 = *((ANTLR_UINT8*)input->get_nextChar()) + (*((ANTLR_UINT8*)input->get_nextChar() + 1) <<8);
+
+                // If it's a valid low surrogate, consume it
+                //
+                if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                {
+                    // We consumed one 16 bit character
+                    //
+					input->set_nextChar( input->get_nextChar() + 1);
+                }
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it.
+                //
+            } 
+            // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+            // it because the buffer ended
+            //
+        } 
+        // Note that we did not check for an invalid low surrogate here, or that fact that the
+        // lo surrogate was missing. We just picked out one 16 bit character unless the character
+        // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+        //
+	}
+}
+
+template<class ImplTraits, class SuperType>
+void	UTF16_IntStream<ImplTraits, SuperType>::consume( ClassForwarder<ANTLR_BIG_ENDIAN> )
+{
+	SuperType* input;
+    UTF32   ch;
+    UTF32   ch2;
+
+	input   = this->get_super();
+
+    // Buffer size is always in bytes
+    //
+	if(input->get_nextChar() < (input->get_data() + input->get_sizeBuf()/2) )
+	{	
+		// Indicate one more character in this line
+		//
+		input->inc_charPositionInLine();
+
+		if  ((ANTLR_UCHAR)(*(input->get_nextChar())) == input->get_newlineChar())
+		{
+			// Reset for start of a new line of input
+			//
+			input->inc_line();
+			input->set_charPositionInLine(0);
+			input->set_currentLine(input->get_nextChar() + 1);
+		}
+
+		// Increment to next character position, accounting for any surrogates
+		//
+        // Next char in big endian form
+        //
+        ch  = *((ANTLR_UINT8*)input->get_nextChar() + 1) + (*((ANTLR_UINT8*)input->get_nextChar() ) <<8);
+
+        // We consumed one 16 bit character
+        //
+		input->set_nextChar( input->get_nextChar() + 1);
+
+        // If we have a surrogate pair then we need to consume
+        // a following valid LO surrogate.
+        //
+        if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) 
+		{
+            // If the 16 bits following the high surrogate are in the source buffer...
+            //
+            if(input->get_nextChar() < (input->get_data() + input->get_sizeBuf()/2) )
+            {
+                // Big endian
+                //
+                ch2 = *((ANTLR_UINT8*)input->get_nextChar() + 1) + (*((ANTLR_UINT8*)input->get_nextChar() ) <<8);
+
+                // If it's a valid low surrogate, consume it
+                //
+                if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) 
+                {
+                    // We consumed one 16 bit character
+                    //
+					input->set_nextChar( input->get_nextChar() + 1);
+                }
+                // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+                // it.
+                //
+            } 
+            // Note that we ignore a valid hi surrogate that has no lo surrogate to go with
+            // it because the buffer ended
+            //
+        } 
+        // Note that we did not check for an invalid low surrogate here, or that fact that the
+        // lo surrogate was missing. We just picked out one 16 bit character unless the character
+        // was a valid hi surrogate, in whcih case we consumed two 16 bit characters.
+        //
+	}
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32	UTF32_IntStream<ImplTraits, SuperType>::_LA( ANTLR_INT32 i)
+{
+	return this->_LA( i, ClassForwarder<typename ImplTraits::Endianness>() );
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_MARKER	UTF32_IntStream<ImplTraits, SuperType>::index()
+{
+	SuperType* input = this->get_super();
+    return  (ANTLR_MARKER)(input->get_nextChar());
+}
+
+template<class ImplTraits, class SuperType>
+void UTF32_IntStream<ImplTraits, SuperType>::seek(ANTLR_MARKER seekPoint)
+{
+	SuperType* input;
+
+	input   = this->get_super();
+
+	// If the requested seek point is less than the current
+	// input point, then we assume that we are resetting from a mark
+	// and do not need to scan, but can just set to there as rewind will
+        // reset line numbers and so on.
+	//
+	if	(seekPoint <= (ANTLR_MARKER)(input->get_nextChar()))
+	{
+		input->set_nextChar( static_cast<typename ImplTraits::DataType*>(seekPoint) );
+	}
+	else
+	{
+        // Call consume until we reach the asked for seek point or EOF
+        //
+        while( (this->_LA(1) != ANTLR_CHARSTREAM_EOF) && (seekPoint < (ANTLR_MARKER)input->get_nextChar()) )
+	    {
+			this->consume();
+	    }
+	}
+
+}
+
+template<class ImplTraits, class SuperType>
+void UTF32_IntStream<ImplTraits, SuperType>::setupIntStream(bool machineBigEndian, bool inputBigEndian)
+{
+	SuperType* super = this->get_super();
+	super->set_charByteSize(4);
+
+	this->findout_endian_spec(machineBigEndian, inputBigEndian);
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32	UTF32_IntStream<ImplTraits, SuperType>::_LA( ANTLR_INT32 la, ClassForwarder<BYTE_AGNOSTIC> )
+{
+    SuperType* input = this->get_super();
+
+    if	(( input->get_nextChar() + la - 1) >= (input->get_data() + input->get_sizeBuf()/4 ))
+    {
+		return	ANTLR_CHARSTREAM_EOF;
+    }
+    else
+    {
+		return	(ANTLR_UCHAR)(*(input->get_nextChar() + la - 1));
+    }
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32	UTF32_IntStream<ImplTraits, SuperType>::_LA( ANTLR_INT32 la, ClassForwarder<ANTLR_LITTLE_ENDIAN> )
+{
+	SuperType* input = this->get_super();
+
+    if	(( input->get_nextChar() + la - 1) >= (input->get_data() + input->get_sizeBuf()/4 ))
+    {
+		return	ANTLR_CHARSTREAM_EOF;
+    }
+    else
+    {
+        ANTLR_UCHAR   c;
+
+        c = (ANTLR_UCHAR)(*(input->get_nextChar() + la - 1));
+
+        // Swap Endianess to Big Endian
+        //
+        return (c>>24) | ((c<<8) & 0x00FF0000) | ((c>>8) & 0x0000FF00) | (c<<24);
+    }
+}
+
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32	UTF32_IntStream<ImplTraits, SuperType>::_LA( ANTLR_INT32 la, ClassForwarder<ANTLR_BIG_ENDIAN> )
+{
+	SuperType* input = this->get_super();
+
+    if	(( input->get_nextChar() + la - 1) >= (input->get_data() + input->get_sizeBuf()/4 ))
+    {
+		return	ANTLR_CHARSTREAM_EOF;
+    }
+    else
+    {
+        ANTLR_UCHAR   c;
+
+        c = (ANTLR_UCHAR)(*(input->get_nextChar() + la - 1));
+
+        // Swap Endianess to Little Endian
+        //
+        return (c>>24) | ((c<<8) & 0x00FF0000) | ((c>>8) & 0x0000FF00) | (c<<24);
+    }
+}
+
+template<class ImplTraits, class SuperType>
+void	UTF32_IntStream<ImplTraits, SuperType>::consume()
+{
+	SuperType* input = this->get_super();
+
+    // SizeBuf is always in bytes
+    //
+	if	( input->get_nextChar()  < (input->get_data() + input->get_sizeBuf()/4 ))
+    {	
+		/* Indicate one more character in this line
+		 */
+		input->inc_charPositionInLine();
+	
+		if  ((ANTLR_UCHAR)(*(input->get_nextChar())) == input->get_newlineChar())
+		{
+			/* Reset for start of a new line of input
+			 */
+			input->inc_line();
+			input->set_charPositionInLine(0);
+			input->set_currentLine(	input->get_nextChar() + 1 );
+		}
+
+		/* Increment to next character position
+		 */
+		input->set_nextChar( input->get_nextChar() + 1 );
+    }
+}
+
+template<class ImplTraits, class SuperType>
+void UTF8_IntStream<ImplTraits, SuperType>::setupIntStream(bool, bool)
+{
+	SuperType* super = this->get_super();
+	super->set_charByteSize(0);
+}
+
+// ------------------------------------------------------
+// Following is from Unicode.org (see antlr3convertutf.c)
+//
+
+/// Index into the table below with the first byte of a UTF-8 sequence to
+/// get the number of trailing bytes that are supposed to follow it.
+/// Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is
+/// left as-is for anyone who may want to do such conversion, which was
+/// allowed in earlier algorithms.
+///
+template<class ImplTraits, class SuperType>
+const ANTLR_UINT32* UTF8_IntStream<ImplTraits, SuperType>::TrailingBytesForUTF8()
+{
+	static const ANTLR_UINT32 trailingBytesForUTF8[256] = {
+		0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+		0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+		0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+		0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+		0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+		0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+		1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+		2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
+	};
+
+	return trailingBytesForUTF8;
+}
+
+/// Magic values subtracted from a buffer value during UTF8 conversion.
+/// This table contains as many values as there might be trailing bytes
+/// in a UTF-8 sequence.
+///
+template<class ImplTraits, class SuperType>
+const UTF32* UTF8_IntStream<ImplTraits, SuperType>::OffsetsFromUTF8()
+{
+	static const UTF32 offsetsFromUTF8[6] = 
+		{   0x00000000UL, 0x00003080UL, 0x000E2080UL, 
+			0x03C82080UL, 0xFA082080UL, 0x82082080UL 
+		};
+	return 	offsetsFromUTF8;
+}
+
+// End of Unicode.org tables
+// -------------------------
+
+
+/** \brief Consume the next character in a UTF8 input stream
+ *
+ * \param input Input stream context pointer
+ */
+template<class ImplTraits, class SuperType>
+void UTF8_IntStream<ImplTraits, SuperType>::consume()
+{
+    SuperType* input = this->get_super();
+	const ANTLR_UINT32* trailingBytesForUTF8 = UTF8_IntStream::TrailingBytesForUTF8();
+	const UTF32* offsetsFromUTF8 = UTF8_IntStream::OffsetsFromUTF8();
+
+    ANTLR_UINT32           extraBytesToRead;
+    ANTLR_UCHAR            ch;
+    ANTLR_UINT8*           nextChar;
+
+    nextChar = input->get_nextChar();
+
+    if	(nextChar < (input->get_data() + input->get_sizeBuf()))
+    {	
+		// Indicate one more character in this line
+		//
+		input->inc_charPositionInLine();
+	
+        // Are there more bytes needed to make up the whole thing?
+        //
+        extraBytesToRead = trailingBytesForUTF8[*nextChar];
+
+        if	((nextChar + extraBytesToRead) >= (input->get_data() + input->get_sizeBuf()))
+        {
+            input->set_nextChar( input->get_data() + input->get_sizeBuf() );
+            return;
+        }
+
+        // Cases deliberately fall through (see note A in antlrconvertutf.c)
+        // Legal UTF8 is only 4 bytes but 6 bytes could be used in old UTF8 so
+        // we allow it.
+        //
+        ch  = 0;
+       	switch (extraBytesToRead) 
+		{
+			case 5: ch += *nextChar++; ch <<= 6;
+			case 4: ch += *nextChar++; ch <<= 6;
+			case 3: ch += *nextChar++; ch <<= 6;
+			case 2: ch += *nextChar++; ch <<= 6;
+			case 1: ch += *nextChar++; ch <<= 6;
+			case 0: ch += *nextChar++;
+		}
+
+        // Magically correct the input value
+        //
+		ch -= offsetsFromUTF8[extraBytesToRead];
+		if  (ch == input->get_newlineChar())
+		{
+			/* Reset for start of a new line of input
+			 */
+			input->inc_line();
+			input->set_charPositionInLine(0);
+			input->set_currentLine(nextChar);
+		}
+
+        // Update input pointer
+        //
+        input->set_nextChar(nextChar);
+    }
+}
+
+/** \brief Return the input element assuming a UTF8 input
+ *
+ * \param[in] input Input stream context pointer
+ * \param[in] la 1 based offset of next input stream element
+ *
+ * \return Next input character in internal ANTLR3 encoding (UTF32)
+ */
+template<class ImplTraits, class SuperType>
+ANTLR_UCHAR UTF8_IntStream<ImplTraits, SuperType>::_LA(ANTLR_INT32 la)
+{
+    SuperType* input = this->get_super();
+	const ANTLR_UINT32* trailingBytesForUTF8 = UTF8_IntStream::TrailingBytesForUTF8();
+	const UTF32* offsetsFromUTF8 = UTF8_IntStream::OffsetsFromUTF8();
+    ANTLR_UINT32           extraBytesToRead;
+    ANTLR_UCHAR            ch;
+    ANTLR_UINT8*           nextChar;
+
+    nextChar = input->get_nextChar();
+
+    // Do we need to traverse forwards or backwards?
+    // - LA(0) is treated as LA(1) and we assume that the nextChar is
+    //   already positioned.
+    // - LA(n+) ; n>1 means we must traverse forward n-1 characters catering for UTF8 encoding
+    // - LA(-n) means we must traverse backwards n chracters
+    //
+    if (la > 1) {
+
+        // Make sure that we have at least one character left before trying to
+        // loop through the buffer.
+        //
+        if	(nextChar < (input->get_data() + input->get_sizeBuf()))
+        {	
+            // Now traverse n-1 characters forward
+            //
+            while (--la > 0)
+            {
+                // Does the next character require trailing bytes?
+                // If so advance the pointer by that many bytes as well as advancing
+                // one position for what will be at least a single byte character.
+                //
+                nextChar += trailingBytesForUTF8[*nextChar] + 1;
+
+                // Does that calculation take us past the byte length of the buffer?
+                //
+                if	(nextChar >= (input->get_data() + input->get_sizeBuf()))
+                {
+                    return ANTLR_CHARSTREAM_EOF;
+                }
+            }
+        }
+        else
+        {
+            return ANTLR_CHARSTREAM_EOF;
+        }
+    }
+    else
+    {
+        // LA is negative so we decrease the pointer by n character positions
+        //
+        while   (nextChar > input->get_data() && la++ < 0)
+        {
+            // Traversing backwards in UTF8 means decermenting by one
+            // then continuing to decrement while ever a character pattern
+            // is flagged as being a trailing byte of an encoded code point.
+            // Trailing UTF8 bytes always start with 10 in binary. We assumne that
+            // the UTF8 is well formed and do not check boundary conditions
+            //
+            nextChar--;
+            while ((*nextChar & 0xC0) == 0x80)
+            {
+                nextChar--;
+            }
+        }
+    }
+
+    // nextChar is now pointing at the UTF8 encoded character that we need to
+    // decode and return.
+    //
+    // Are there more bytes needed to make up the whole thing?
+    //
+    extraBytesToRead = trailingBytesForUTF8[*nextChar];
+    if	(nextChar + extraBytesToRead >= (input->get_data() + input->get_sizeBuf()))
+    {
+        return ANTLR_CHARSTREAM_EOF;
+    }
+
+    // Cases deliberately fall through (see note A in antlrconvertutf.c)
+    // 
+    ch  = 0;
+    switch (extraBytesToRead) 
+	{
+        case 5: ch += *nextChar++; ch <<= 6;
+        case 4: ch += *nextChar++; ch <<= 6;
+        case 3: ch += *nextChar++; ch <<= 6;
+        case 2: ch += *nextChar++; ch <<= 6;
+        case 1: ch += *nextChar++; ch <<= 6;
+        case 0: ch += *nextChar++;
+    }
+
+    // Magically correct the input value
+    //
+    ch -= offsetsFromUTF8[extraBytesToRead];
+
+    return ch;
+}
+
+template<class ImplTraits>
+TokenIntStream<ImplTraits>::TokenIntStream()
+{
+	m_cachedSize = 0;
+}
+
+template<class ImplTraits>
+ANTLR_UINT32 TokenIntStream<ImplTraits>::get_cachedSize() const
+{
+	return m_cachedSize;
+}
+
+template<class ImplTraits>
+void TokenIntStream<ImplTraits>::set_cachedSize( ANTLR_UINT32 cachedSize )
+{
+	m_cachedSize = cachedSize;
+}
+
+/** Move the input pointer to the next incoming token.  The stream
+ *  must become active with LT(1) available.  consume() simply
+ *  moves the input pointer so that LT(1) points at the next
+ *  input symbol. Consume at least one token.
+ *
+ *  Walk past any token not on the channel the parser is listening to.
+ */
+template<class ImplTraits>
+void TokenIntStream<ImplTraits>::consume()
+{
+	TokenStreamType* cts = static_cast<TokenStreamType*>(this);
+
+    if((ANTLR_UINT32)cts->get_p() < m_cachedSize )
+	{
+		cts->inc_p();
+		cts->set_p( cts->skipOffTokenChannels(cts->get_p()) );
+	}
+}
+template<class ImplTraits>
+void  TokenIntStream<ImplTraits>::consumeInitialHiddenTokens()
+{
+	ANTLR_MARKER	first;
+	ANTLR_INT32	i;
+	TokenStreamType*	ts;
+
+	ts	    = this->get_super();
+	first	= this->index();
+
+	for	(i=0; i<first; i++)
+	{
+		ts->get_debugger()->consumeHiddenToken(ts->get(i));
+	}
+
+	ts->set_initialStreamState(false);
+}
+
+
+template<class ImplTraits>
+ANTLR_UINT32	TokenIntStream<ImplTraits>::_LA( ANTLR_INT32 i )
+{
+	const CommonTokenType*    tok;
+	TokenStreamType*    ts	    = static_cast<TokenStreamType*>(this);
+
+	tok	    =  ts->_LT(i);
+
+	if	(tok != NULL)
+	{
+		return	tok->get_type();
+	}
+	else
+	{
+		return	CommonTokenType::TOKEN_INVALID;
+	}
+
+}
+
+template<class ImplTraits>
+ANTLR_MARKER	TokenIntStream<ImplTraits>::mark()
+{
+    BaseType::m_lastMarker = this->index();
+    return  BaseType::m_lastMarker;
+}
+
+template<class ImplTraits>
+ANTLR_UINT32 TokenIntStream<ImplTraits>::size()
+{
+    if (this->get_cachedSize() > 0)
+    {
+		return  this->get_cachedSize();
+    }
+    TokenStreamType* cts   = this->get_super();
+
+    this->set_cachedSize( static_cast<ANTLR_UINT32>(cts->get_tokens().size()) );
+    return  this->get_cachedSize();
+}
+
+template<class ImplTraits>
+void	TokenIntStream<ImplTraits>::release()
+{
+    return;
+}
+
+template<class ImplTraits>
+ANTLR_MARKER   TokenIntStream<ImplTraits>::tindex()
+{
+	return this->get_super()->get_p();
+}
+
+template<class ImplTraits>
+void	TokenIntStream<ImplTraits>::rewindLast()
+{
+    this->rewind( this->get_lastMarker() );
+}
+
+template<class ImplTraits>
+void	TokenIntStream<ImplTraits>::rewind(ANTLR_MARKER marker)
+{
+	return this->seek(marker);
+}
+
+template<class ImplTraits>
+void	TokenIntStream<ImplTraits>::seek(ANTLR_MARKER index)
+{
+    TokenStreamType* cts = static_cast<TokenStreamType*>(this);
+
+    cts->set_p( static_cast<ANTLR_INT32>(index) );
+}
+
+
+/// Return a string that represents the name assoicated with the input source
+///
+/// /param[in] is The ANTLR3_INT_STREAM interface that is representing this token stream.
+///
+/// /returns 
+/// /implements ANTLR3_INT_STREAM_struct::getSourceName()
+///
+template<class ImplTraits>
+typename TokenIntStream<ImplTraits>::StringType
+TokenIntStream<ImplTraits>::getSourceName()
+{
+	// Slightly convoluted as we must trace back to the lexer's input source
+	// via the token source. The streamName that is here is not initialized
+	// because this is a token stream, not a file or string stream, which are the
+	// only things that have a context for a source name.
+	//
+	return this->get_super()->get_tokenSource()->get_fileName();
+}
+
+template<class ImplTraits>
+void  TreeNodeIntStream<ImplTraits>::consume()
+{
+	CommonTreeNodeStreamType* ctns = this->get_super();
+	if( ctns->get_p() == -1 )
+		ctns->fillBufferRoot();
+	ctns->inc_p();
+}
+template<class ImplTraits>
+ANTLR_MARKER		TreeNodeIntStream<ImplTraits>::tindex()
+{
+	CommonTreeNodeStreamType* ctns = this->get_super();
+	return (ANTLR_MARKER)(ctns->get_p());
+}
+
+template<class ImplTraits>
+ANTLR_UINT32		TreeNodeIntStream<ImplTraits>::_LA(ANTLR_INT32 i)
+{
+	CommonTreeNodeStreamType* tns	    = this->get_super();
+
+	// Ask LT for the 'token' at that position
+	//
+	TreeType* t = tns->_LT(i);
+
+	if	(t == NULL)
+	{
+		return	CommonTokenType::TOKEN_INVALID;
+	}
+
+	// Token node was there so return the type of it
+	//
+	return  t->get_type();
+}
+
+template<class ImplTraits>
+ANTLR_MARKER	TreeNodeIntStream<ImplTraits>::mark()
+{
+	CommonTreeNodeStreamType* ctns	    = this->get_super();
+	
+	if	(ctns->get_p() == -1)
+	{
+		ctns->fillBufferRoot();
+	}
+
+	// Return the current mark point
+	//
+	this->set_lastMarker( this->index() );
+
+	return this->get_lastMarker();
+
+}
+
+template<class ImplTraits>
+void  TreeNodeIntStream<ImplTraits>::release(ANTLR_MARKER marker)
+{
+
+}
+
+template<class ImplTraits>
+void TreeNodeIntStream<ImplTraits>::rewindMark(ANTLR_MARKER marker)
+{
+	this->seek(marker);
+}
+
+template<class ImplTraits>
+void TreeNodeIntStream<ImplTraits>::rewindLast()
+{
+	this->seek( this->get_lastMarker() );
+}
+
+template<class ImplTraits>
+void	TreeNodeIntStream<ImplTraits>::seek(ANTLR_MARKER index)
+{
+	CommonTreeNodeStreamType* ctns	    = this->get_super();
+	ctns->set_p( ANTLR_UINT32_CAST(index) );
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	TreeNodeIntStream<ImplTraits>::size()
+{
+	CommonTreeNodeStreamType* ctns	    = this->get_super();
+	
+	if	(ctns->get_p() == -1)
+	{
+		ctns->fillBufferRoot();
+	}
+
+	return ctns->get_nodes().size();
+}
+
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3lexer.hpp b/runtime/Cpp/include/antlr3lexer.hpp
new file mode 100755
index 0000000..cfff29f
--- /dev/null
+++ b/runtime/Cpp/include/antlr3lexer.hpp
@@ -0,0 +1,248 @@
+/** \file
+ * Base interface for any ANTLR3 lexer.
+ *
+ * An ANLTR3 lexer builds from two sets of components:
+ *
+ *  - The runtime components that provide common functionality such as
+ *    traversing character streams, building tokens for output and so on.
+ *  - The generated rules and struutre of the actual lexer, which call upon the
+ *    runtime components.
+ *
+ * A lexer class contains  a character input stream, a base recognizer interface
+ * (which it will normally implement) and a token source interface (which it also
+ * implements. The Tokensource interface is called by a token consumer (such as
+ * a parser, but in theory it can be anything that wants a set of abstract
+ * tokens in place of a raw character stream.
+ *
+ * So then, we set up a lexer in a sequence akin to:
+ *
+ *  - Create a character stream (something which implements ANTLR3_INPUT_STREAM)
+ *    and initialize it.
+ *  - Create a lexer interface and tell it where it its input stream is.
+ *    This will cause the creation of a base recognizer class, which it will
+ *    override with its own implementations of some methods. The lexer creator
+ *    can also then in turn override anything it likes.
+ *  - The lexer token source interface is then passed to some interface that
+ *    knows how to use it, byte calling for a next token.
+ *  - When a next token is called, let ze lexing begin.
+ *
+ */
+#ifndef	_ANTLR3_LEXER_HPP
+#define	_ANTLR3_LEXER_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/* Definitions
+ */
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+static const ANTLR_UINT32	ANTLR_STRING_TERMINATOR	= 0xFFFFFFFF;
+
+template<class ImplTraits>
+class  Lexer : public ImplTraits::template RecognizerType< typename ImplTraits::InputStreamType >,
+			   public ImplTraits::TokenSourceType
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename ImplTraits::InputStreamType InputStreamType;
+	typedef InputStreamType StreamType;
+	typedef typename InputStreamType::IntStreamType IntStreamType;
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+	typedef typename ImplTraits::StreamDataType TokenType;
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::StringStreamType StringStreamType;
+	typedef typename ImplTraits::template RecognizerType< InputStreamType > RecognizerType;
+	typedef typename RecognizerType::RecognizerSharedStateType RecognizerSharedStateType;
+	typedef typename ImplTraits::template ExceptionBaseType<InputStreamType> ExceptionBaseType;
+	typedef typename ImplTraits::BitsetListType BitsetListType;
+	typedef typename ImplTraits::TokenSourceType TokenSourceType;
+
+	typedef typename RecognizerSharedStateType::RuleMemoType RuleMemoType;
+	typedef typename RecognizerType::DebugEventListenerType DebuggerType;
+
+private:
+    /** A pointer to the character stream whence this lexer is receiving
+     *  characters.
+     *  TODO: I may come back to this and implement charstream outside
+     *  the input stream as per the java implementation.
+     */
+    InputStreamType*		m_input;
+
+public:
+	Lexer(ANTLR_UINT32 sizeHint, RecognizerSharedStateType* state);
+	Lexer(ANTLR_UINT32 sizeHint, InputStreamType* input, RecognizerSharedStateType* state);
+
+	InputStreamType* get_input() const;
+	IntStreamType* get_istream() const;
+	RecognizerType* get_rec();
+	const RecognizerType* get_rec() const;
+	TokenSourceType* get_tokSource();
+	
+	//functions used in .stg file
+	const RecognizerType* get_recognizer() const;
+	RecognizerSharedStateType* get_lexstate() const;
+	void set_lexstate( RecognizerSharedStateType* lexstate );
+	const TokenSourceType* get_tokSource() const;
+	CommonTokenType* get_ltoken() const;
+	void set_ltoken( const CommonTokenType* ltoken );
+	bool hasFailed() const;
+	ANTLR_INT32 get_backtracking() const;
+	void inc_backtracking();
+	void dec_backtracking();
+	bool get_failedflag() const;
+	void set_failedflag( bool failed );
+	InputStreamType* get_strstream() const;
+	ANTLR_MARKER  index() const;
+	void	seek(ANTLR_MARKER index);
+	const CommonTokenType* EOF_Token() const;
+	bool hasException() const;
+	ExceptionBaseType* get_exception() const;
+	void constructEx();
+	void lrecover();
+	ANTLR_MARKER mark();
+	void rewind(ANTLR_MARKER marker);
+	void rewindLast();
+	void setText( const StringType& text );
+	void skip();
+	RuleMemoType* getRuleMemo() const;
+	DebuggerType* get_debugger() const;
+	void setRuleMemo(RuleMemoType* rulememo);
+	ANTLR_UINT32 LA(ANTLR_INT32 i);
+	void consume();
+	void memoize(ANTLR_MARKER	ruleIndex, ANTLR_MARKER	ruleParseStart);
+	bool haveParsedRule(ANTLR_MARKER	ruleIndex);
+
+    /** Pointer to a function that sets the charstream source for the lexer and
+     *  causes it to  be reset.
+     */
+    void	setCharStream(InputStreamType* input);
+
+    /*!
+	 * \brief
+	 * Change to a new input stream, remembering the old one.
+	 *
+	 * \param lexer
+	 * Pointer to the lexer instance to switch input streams for.
+	 *
+	 * \param input
+	 * New input stream to install as the current one.
+	 *
+	 * Switches the current character input stream to
+	 * a new one, saving the old one, which we will revert to at the end of this
+	 * new one.
+	 */
+    void	pushCharStream(InputStreamType* input);
+
+	/*!
+	 * \brief
+	 * Stops using the current input stream and reverts to any prior
+	 * input stream on the stack.
+	 *
+	 * \param lexer
+	 * Description of parameter lexer.
+	 *
+	 * Pointer to a function that abandons the current input stream, whether it
+	 * is empty or not and reverts to the previous stacked input stream.
+	 *
+	 * \remark
+	 * The function fails silently if there are no prior input streams.
+	 */
+    void	popCharStream();
+
+    /** Function that emits (a copy of ) the supplied token as the next token in
+     *  the stream.
+     */
+    void	emit(const CommonTokenType* token);
+
+    /** Pointer to a function that constructs a new token from the lexer stored information
+     */
+    CommonTokenType*	emit();
+
+    /** Pointer to a function that attempts to match and consume the specified string from the input
+     *  stream. Note that strings muse be passed as terminated arrays of ANTLR3_UCHAR. Strings are terminated
+     *  with 0xFFFFFFFF, which is an invalid UTF32 character
+     */
+    bool	matchs(ANTLR_UCHAR* string);
+
+    /** Pointer to a function that matches and consumes the specified character from the input stream.
+     *  The input stream is required to provide characters via LA() as UTF32 characters. The default lexer
+     *  implementation is source encoding agnostic and so input streams do not generally need to
+     *  override the default implmentation.
+     */
+    bool	matchc(ANTLR_UCHAR c);
+
+    /** Pointer to a function that matches any character in the supplied range (I suppose it could be a token range too
+     *  but this would only be useful if the tokens were in tsome guaranteed order which is
+     *  only going to happen with a hand crafted token set).
+     */
+    bool	matchRange(ANTLR_UCHAR low, ANTLR_UCHAR high);
+
+    /** Pointer to a function that matches the next token/char in the input stream
+     *  regardless of what it actaully is.
+     */
+    void		matchAny();
+
+    /** Pointer to a function that recovers from an error found in the input stream.
+     *  Generally, this will be a #ANTLR3_EXCEPTION_NOVIABLE_ALT but it could also
+     *  be from a mismatched token that the (*match)() could not recover from.
+     */
+    void		recover();
+
+    /** Function to return the current line number in the input stream
+     */
+    ANTLR_UINT32	getLine();
+    ANTLR_MARKER	getCharIndex();
+    ANTLR_UINT32	getCharPositionInLine();
+
+    /** Function to return the text so far for the current token being generated
+     */
+    StringType 	getText();
+
+	//Other utility functions
+	void fillExceptionData( ExceptionBaseType* ex );
+
+	/** Default lexer error handler (works for 8 bit streams only!!!)
+	 */
+	void displayRecognitionError( ANTLR_UINT8** tokenNames, ExceptionBaseType* ex);
+	void exConstruct();
+	TokenType*	getMissingSymbol( IntStreamType* istream, ExceptionBaseType* e,
+								  ANTLR_UINT32	expectedTokenType, BitsetListType*	follow);
+
+    /** Pointer to a function that knows how to free the resources of a lexer
+     */
+	~Lexer();
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3lexer.inl"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3lexer.inl b/runtime/Cpp/include/antlr3lexer.inl
new file mode 100755
index 0000000..c8ccbd8
--- /dev/null
+++ b/runtime/Cpp/include/antlr3lexer.inl
@@ -0,0 +1,592 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+Lexer<ImplTraits>::Lexer(ANTLR_UINT32 sizeHint, RecognizerSharedStateType* state)
+	:Lexer<ImplTraits>::RecognizerType(sizeHint, state)
+	,m_input(NULL)
+{
+}
+
+template<class ImplTraits>
+Lexer<ImplTraits>::Lexer(ANTLR_UINT32 sizeHint, InputStreamType* input, RecognizerSharedStateType* state)
+	:Lexer<ImplTraits>::RecognizerType(sizeHint, state)
+{
+	this->setCharStream(input);
+}
+
+template<class ImplTraits>
+typename Lexer<ImplTraits>::InputStreamType* Lexer<ImplTraits>::get_input() const
+{
+	return m_input;
+}
+
+template<class ImplTraits>
+typename Lexer<ImplTraits>::IntStreamType* Lexer<ImplTraits>::get_istream() const
+{
+	return m_input;
+}
+
+template<class ImplTraits>
+typename Lexer<ImplTraits>::RecognizerType* Lexer<ImplTraits>::get_rec()
+{
+	return this;
+}
+
+template<class ImplTraits>
+typename Lexer<ImplTraits>::TokenSourceType* Lexer<ImplTraits>::get_tokSource()
+{
+	return this;
+}
+
+template<class ImplTraits>
+void Lexer<ImplTraits>::displayRecognitionError( ANTLR_UINT8** , ExceptionBaseType* ex)
+{
+	StringStreamType	err_stream;
+
+	// See if there is a 'filename' we can use
+    //
+    if( ex->getName().empty() )
+    {
+		err_stream << "-unknown source-(";
+    }
+    else
+    {
+		err_stream << ex->get_streamName().c_str();
+		err_stream << "(";
+    }
+    err_stream << ex->get_line() << ")";
+
+	err_stream << ": lexer error " <<  ex->getName() << '(' << ex->getType() << ')' << " :\n\t"
+		   << ex->get_message() << " at position [" << ex->get_line() << ", "
+		   << ex->get_charPositionInLine()+1 << "], ";
+
+	{
+		ANTLR_UINT32	width;
+
+		width	= ANTLR_UINT32_CAST(( (ANTLR_UINT8*)(m_input->get_data()) +
+									  (m_input->size() )) - (ANTLR_UINT8*)( ex->get_index() ));
+
+		if	(width >= 1)
+		{
+			if	(isprint(ex->get_c() ))
+			{
+				err_stream << "near '" << (typename StringType::value_type) ex->get_c() << "' :\n";
+			}
+			else
+			{
+				err_stream << "near char(" << std::hex << ex->get_c() << std::dec << ") :\n";
+			}
+			err_stream << "\t";
+			err_stream.width( width > 20 ? 20 : width );
+			err_stream << (typename StringType::const_pointer)ex->get_index() << "\n";
+		}
+		else
+		{
+			err_stream << "(end of input).\n\t This indicates a poorly specified lexer RULE\n\t or unterminated input element such as: \"STRING[\"]\n";
+			err_stream << "\t The lexer was matching from line "
+					   << this->get_state()->get_tokenStartLine()
+					   << ", offset " << this->get_state()->get_tokenStartCharPositionInLine()
+					   << ", which\n\t ";
+			width = ANTLR_UINT32_CAST(((ANTLR_UINT8*)(m_input->get_data() )+
+										(m_input->size())) -
+										(ANTLR_UINT8*)(this->get_state()->get_tokenStartCharIndex() ));
+
+			if	(width >= 1)
+			{
+				err_stream << "looks like this:\n\t\t";
+				err_stream.width( width > 20 ? 20 : width );
+				err_stream << (typename StringType::const_pointer)this->get_state()->get_tokenStartCharIndex() << "\n";
+			}
+			else
+			{
+				err_stream << "is also the end of the line, so you must check your lexer rules\n";
+			}
+		}
+	}
+	ImplTraits::displayRecognitionError( err_stream.str() );
+}
+
+template<class ImplTraits>
+void Lexer<ImplTraits>::fillExceptionData( ExceptionBaseType* ex )
+{
+	ex->set_c( m_input->_LA(1) );					/* Current input character			*/
+	ex->set_line( m_input->get_line() );						/* Line number comes from stream		*/
+	ex->set_charPositionInLine( m_input->get_charPositionInLine() );	    /* Line offset also comes from the stream   */
+	ex->set_index( m_input->index() );
+	ex->set_streamName( m_input->get_fileName() );
+	ex->set_message( "Unexpected character" );
+}
+
+template<class ImplTraits>
+void	Lexer<ImplTraits>::setCharStream(InputStreamType* input)
+{
+    /* Install the input interface
+     */
+    m_input	= input;
+
+    /* Set the current token to nothing
+     */
+	RecognizerSharedStateType* state = this->get_rec()->get_state();
+    state->set_token_present( false );
+	state->set_text("");
+    state->set_tokenStartCharIndex(-1);
+
+    /* Copy the name of the char stream to the token source
+     */
+    this->get_tokSource()->set_fileName( input->get_fileName() );
+}
+
+template<class ImplTraits>
+void	Lexer<ImplTraits>::pushCharStream(InputStreamType* input)
+{
+	// We have a stack, so we can save the current input stream
+	// into it.
+	//
+	this->get_istream()->mark();
+	this->get_rec()->get_state()->get_streams().push(this->get_input());
+
+	// And now we can install this new one
+	//
+	this->setCharStream(input);
+}
+
+template<class ImplTraits>
+void	Lexer<ImplTraits>::popCharStream()
+{
+	InputStreamType* input;
+
+    // If we do not have a stream stack or we are already at the
+    // stack bottom, then do nothing.
+    //
+    typename RecognizerSharedStateType::StreamsType& streams = this->get_rec()->get_state()->get_streams();
+    if	( streams.size() > 0)
+    {
+		// We just leave the current stream to its fate, we do not close
+		// it or anything as we do not know what the programmer intended
+		// for it. This method can always be overridden of course.
+		// So just find out what was currently saved on the stack and use
+		// that now, then pop it from the stack.
+		//
+		input	= streams.top();
+		streams.pop();
+
+		// Now install the stream as the current one.
+		//
+		this->setCharStream(input);
+		this->get_istream()->rewindLast();
+    }
+    return;
+}
+
+template<class ImplTraits>
+void	Lexer<ImplTraits>::emit(const CommonTokenType* token)
+{
+	this->get_rec()->get_state()->set_token(token);
+}
+
+template<class ImplTraits>
+typename Lexer<ImplTraits>::CommonTokenType*	Lexer<ImplTraits>::emit()
+{
+	/* We could check pointers to token factories and so on, but
+    * we are in code that we want to run as fast as possible
+    * so we are not checking any errors. So make sure you have installed an input stream before
+    * trying to emit a new token.
+    */
+	RecognizerSharedStateType* state = this->get_rec()->get_state();
+	state->set_token_present(true);
+    CommonTokenType* token = state->get_token();
+	token->set_input( this->get_input() );
+
+    /* Install the supplied information, and some other bits we already know
+    * get added automatically, such as the input stream it is associated with
+    * (though it can all be overridden of course)
+    */
+    token->set_type( state->get_type() );
+    token->set_channel( state->get_channel() );
+    token->set_startIndex( state->get_tokenStartCharIndex() );
+    token->set_stopIndex( this->getCharIndex() - 1 );
+    token->set_line( state->get_tokenStartLine() );
+    token->set_charPositionInLine( state->get_tokenStartCharPositionInLine() );
+
+	token->set_tokText( state->get_text() );
+    token->set_lineStart( this->get_input()->get_currentLine() );
+
+    return  token;
+}
+
+template<class ImplTraits>
+Lexer<ImplTraits>::~Lexer()
+{
+	// This may have ben a delegate or delegator lexer, in which case the
+	// state may already have been freed (and set to NULL therefore)
+	// so we ignore the state if we don't have it.
+	//
+	RecognizerSharedStateType* state = this->get_rec()->get_state();
+
+	if	( state != NULL)
+	{
+		state->get_streams().clear();
+	}
+}
+
+template<class ImplTraits>
+bool	Lexer<ImplTraits>::matchs(ANTLR_UCHAR* str )
+{
+	RecognizerSharedStateType* state = this->get_rec()->get_state();
+	while   (*str != ANTLR_STRING_TERMINATOR)
+	{
+		if  ( this->get_istream()->_LA(1) != (*str))
+		{
+			if	( state->get_backtracking() > 0)
+			{
+				state->set_failed(true);
+				return false;
+			}
+
+			this->exConstruct();
+			state->set_failed( true );
+
+			/* TODO: Implement exception creation more fully perhaps
+			 */
+			this->recover();
+			return  false;
+		}
+
+		/* Matched correctly, do consume it
+		 */
+		this->get_istream()->consume();
+		str++;
+
+	}
+	/* Reset any failed indicator
+	 */
+	state->set_failed( false );
+	return  true;
+}
+
+template<class ImplTraits>
+bool	Lexer<ImplTraits>::matchc(ANTLR_UCHAR c)
+{
+	if	(this->get_istream()->_LA(1) == c)
+	{
+		/* Matched correctly, do consume it
+		 */
+		this->get_istream()->consume();
+
+		/* Reset any failed indicator
+		 */
+		this->get_rec()->get_state()->set_failed( false );
+
+		return	true;
+	}
+
+	/* Failed to match, exception and recovery time.
+	 */
+	if(this->get_rec()->get_state()->get_backtracking() > 0)
+	{
+		this->get_rec()->get_state()->set_failed( true );
+		return	false;
+	}
+
+	this->exConstruct();
+
+	/* TODO: Implement exception creation more fully perhaps
+	 */
+	this->recover();
+
+	return  false;
+}
+
+template<class ImplTraits>
+bool	Lexer<ImplTraits>::matchRange(ANTLR_UCHAR low, ANTLR_UCHAR high)
+{
+    ANTLR_UCHAR    c;
+
+    /* What is in the stream at the moment?
+     */
+    c	= this->get_istream()->_LA(1);
+    if	( c >= low && c <= high)
+    {
+		/* Matched correctly, consume it
+		 */
+		this->get_istream()->consume();
+
+		/* Reset any failed indicator
+		 */
+		this->get_rec()->get_state()->set_failed( false );
+
+		return	true;
+    }
+
+    /* Failed to match, execption and recovery time.
+     */
+
+    if	(this->get_rec()->get_state()->get_backtracking() > 0)
+    {
+		this->get_rec()->get_state()->set_failed( true );
+		return	false;
+    }
+
+    this->exConstruct();
+
+    /* TODO: Implement exception creation more fully
+     */
+    this->recover();
+
+    return  false;
+}
+
+template<class ImplTraits>
+void		Lexer<ImplTraits>::matchAny()
+{
+	this->get_istream()->consume();
+}
+
+template<class ImplTraits>
+void		Lexer<ImplTraits>::recover()
+{
+	this->get_istream()->consume();
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	Lexer<ImplTraits>::getLine()
+{
+	return  this->get_input()->get_line();
+}
+
+template<class ImplTraits>
+ANTLR_MARKER	Lexer<ImplTraits>::getCharIndex()
+{
+	return this->get_istream()->index();
+}
+
+template<class ImplTraits>
+ANTLR_UINT32	Lexer<ImplTraits>::getCharPositionInLine()
+{
+	return  this->get_input()->get_charPositionInLine();
+}
+
+template<class ImplTraits>
+typename Lexer<ImplTraits>::StringType	Lexer<ImplTraits>::getText()
+{
+	RecognizerSharedStateType* state = this->get_rec()->get_state();
+	if ( !state->get_text().empty() )
+	{
+		return	state->get_text();
+
+	}
+	return  this->get_input()->substr( state->get_tokenStartCharIndex(),
+									this->getCharIndex() - this->get_input()->get_charByteSize()
+							);
+}
+
+template<class ImplTraits>
+void Lexer<ImplTraits>::exConstruct()
+{
+	new ANTLR_Exception<ImplTraits, RECOGNITION_EXCEPTION, InputStreamType>( this->get_rec(), "" );
+}
+
+template< class ImplTraits>
+typename Lexer<ImplTraits>::TokenType*	Lexer<ImplTraits>::getMissingSymbol( IntStreamType*,
+										  ExceptionBaseType*,
+										  ANTLR_UINT32	, BitsetListType*)
+{
+	return NULL;
+}
+
+template< class ImplTraits>
+ANTLR_INLINE const typename Lexer<ImplTraits>::RecognizerType* Lexer<ImplTraits>::get_rec() const
+{
+	return this;
+}
+
+template< class ImplTraits>
+ANTLR_INLINE const typename Lexer<ImplTraits>::RecognizerType* Lexer<ImplTraits>::get_recognizer() const
+{
+	return this->get_rec();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Lexer<ImplTraits>::RecognizerSharedStateType* Lexer<ImplTraits>::get_lexstate() const
+{
+	return this->get_rec()->get_state();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::set_lexstate( RecognizerSharedStateType* lexstate )
+{
+	this->get_rec()->set_state(lexstate);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE const typename Lexer<ImplTraits>::TokenSourceType* Lexer<ImplTraits>::get_tokSource() const
+{
+	return this;
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Lexer<ImplTraits>::CommonTokenType* Lexer<ImplTraits>::get_ltoken() const
+{
+	return this->get_lexstate()->token();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::set_ltoken( const CommonTokenType* ltoken )
+{
+	this->get_lexstate()->set_token( ltoken );
+}
+
+template< class ImplTraits>
+ANTLR_INLINE bool Lexer<ImplTraits>::hasFailed() const
+{
+	return this->get_lexstate()->get_failed();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE ANTLR_INT32 Lexer<ImplTraits>::get_backtracking() const
+{
+	return this->get_lexstate()->get_backtracking();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::inc_backtracking()
+{
+	this->get_lexstate()->inc_backtracking();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::dec_backtracking()
+{
+	this->get_lexstate()->dec_backtracking();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE bool Lexer<ImplTraits>::get_failedflag() const
+{
+	return this->get_lexstate()->get_failed();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::set_failedflag( bool failed )
+{
+	this->get_lexstate()->set_failed(failed);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Lexer<ImplTraits>::InputStreamType* Lexer<ImplTraits>::get_strstream() const
+{
+	return this->get_input();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE ANTLR_MARKER  Lexer<ImplTraits>::index() const
+{
+	return this->get_istream()->index();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void	Lexer<ImplTraits>::seek(ANTLR_MARKER index)
+{
+	this->get_istream()->seek(index);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE const typename Lexer<ImplTraits>::CommonTokenType* Lexer<ImplTraits>::EOF_Token() const
+{
+	const CommonTokenType& eof_token = this->get_tokSource()->get_eofToken();
+	return &eof_token;
+}
+
+template< class ImplTraits>
+ANTLR_INLINE bool Lexer<ImplTraits>::hasException() const
+{
+	return this->get_lexstate()->get_error();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Lexer<ImplTraits>::ExceptionBaseType* Lexer<ImplTraits>::get_exception() const
+{
+	return this->get_lexstate()->get_exception();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::constructEx()
+{
+	this->get_rec()->exConstruct();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE ANTLR_MARKER Lexer<ImplTraits>::mark()
+{
+	return this->get_istream()->mark();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::rewind(ANTLR_MARKER marker)
+{
+	this->get_istream()->rewind(marker);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::rewindLast()
+{
+	this->get_istream()->rewindLast();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::memoize(ANTLR_MARKER	ruleIndex, ANTLR_MARKER	ruleParseStart)
+{
+	this->get_rec()->memoize( ruleIndex, ruleParseStart );
+}
+
+template< class ImplTraits>
+ANTLR_INLINE bool Lexer<ImplTraits>::haveParsedRule(ANTLR_MARKER	ruleIndex)
+{
+	return this->get_rec()->alreadyParsedRule(ruleIndex);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::setText( const StringType& text )
+{
+	this->get_lexstate()->set_text(text);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::skip()
+{
+	CommonTokenType& skipToken = this->get_tokSource()->get_skipToken();
+	this->get_lexstate()->set_token( &skipToken );
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Lexer<ImplTraits>::RuleMemoType* Lexer<ImplTraits>::getRuleMemo() const
+{
+	return this->get_lexstate()->get_rulememo();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::setRuleMemo(RuleMemoType* rulememo)
+{
+	return this->get_lexstate()->set_rulememo(rulememo);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Lexer<ImplTraits>::DebuggerType* Lexer<ImplTraits>::get_debugger() const
+{
+	return this->get_rec()->get_debugger();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32 Lexer<ImplTraits>::LA(ANTLR_INT32 i)
+{
+	return this->get_istream()->_LA(i);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Lexer<ImplTraits>::consume()
+{
+	return this->get_istream()->consume();
+}
+
+ANTLR_END_NAMESPACE()
+
diff --git a/runtime/Cpp/include/antlr3memory.hpp b/runtime/Cpp/include/antlr3memory.hpp
new file mode 100755
index 0000000..7713613
--- /dev/null
+++ b/runtime/Cpp/include/antlr3memory.hpp
@@ -0,0 +1,164 @@
+#ifndef	_ANTLR3MEMORY_HPP
+#define	_ANTLR3MEMORY_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <string.h>
+
+#include <deque>
+#include <map>
+#include <new>
+#include <set>
+#include <vector>
+
+#include   "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+class DefaultAllocPolicy
+{
+public:
+	//limitation of c++. unable to write a typedef 
+	template <class TYPE>
+	class AllocatorType : public std::allocator<TYPE>
+	{
+	public:
+		typedef TYPE value_type;
+		typedef value_type* pointer;
+		typedef const value_type* const_pointer;
+		typedef value_type& reference;
+		typedef const value_type& const_reference;
+		typedef size_t size_type;
+		typedef ptrdiff_t difference_type;
+		template<class U> struct rebind {
+			typedef AllocatorType<U> other;
+		};
+
+		AllocatorType() throw() {}
+		AllocatorType( const AllocatorType& alloc ) throw() {}
+		template<typename U> AllocatorType(const AllocatorType<U>& alloc) throw(){}
+	};
+
+	template<class TYPE>
+	class VectorType : public std::vector< TYPE, AllocatorType<TYPE> >
+	{
+	};
+	
+	template<class TYPE>
+	class ListType : public std::deque< TYPE, AllocatorType<TYPE> >
+	{
+	};	
+
+	template<class TYPE>
+	class StackType : public std::deque< TYPE, AllocatorType<TYPE> >
+	{
+	public:
+		void push( const TYPE& elem ) {  this->push_back(elem); 	}
+		void pop()  { this->pop_back(); }
+		TYPE& peek() { return this->back(); }
+		TYPE& top() { return this->back(); }
+		const TYPE& peek() const { return this->back(); }
+		const TYPE& top() const { return this->back(); }
+	};	
+
+
+	template<class TYPE>
+	class OrderedSetType : public std::set< TYPE, std::less<TYPE>, AllocatorType<TYPE> >
+	{
+	};
+
+	template<class TYPE>
+	class UnOrderedSetType : public std::set< TYPE, std::less<TYPE>, AllocatorType<TYPE> >
+	{
+	};
+
+	template<class KeyType, class ValueType>
+	class UnOrderedMapType : public std::map< KeyType, ValueType, std::less<KeyType>, 
+										AllocatorType<std::pair<KeyType, ValueType> > >
+	{
+	};
+
+	template<class KeyType, class ValueType>
+	class OrderedMapType : public std::map< KeyType, ValueType, std::less<KeyType>, 
+										AllocatorType<std::pair<KeyType, ValueType> > >
+	{
+	};
+
+	ANTLR_INLINE static void* operator new (std::size_t bytes)
+	{ 
+		void* p = alloc(bytes);
+		return p;
+	}
+	ANTLR_INLINE static void* operator new (std::size_t , void* p) { return p; }
+	ANTLR_INLINE static void* operator new[]( std::size_t bytes)
+	{
+		void* p = alloc(bytes); 
+		return p;
+	}
+	ANTLR_INLINE static void operator delete(void* p)
+	{
+		DefaultAllocPolicy::free(p);
+	}
+	ANTLR_INLINE static void operator delete(void* , void* ) {} //placement delete
+
+	ANTLR_INLINE static void operator delete[](void* p)
+	{
+		DefaultAllocPolicy::free(p);
+	}
+
+	ANTLR_INLINE static void* alloc( std::size_t bytes )
+	{
+		void* p = malloc(bytes); 
+		if( p== NULL )
+			throw std::bad_alloc();
+		return p;
+	}
+
+	ANTLR_INLINE static void* alloc0( std::size_t bytes )
+	{
+		void* p = calloc(1, bytes);
+		if( p== NULL )
+			throw std::bad_alloc();
+		return p;
+	}
+
+	ANTLR_INLINE static void  free( void* p )
+	{
+		return ::free(p);
+	}
+	
+	ANTLR_INLINE static void* realloc(void *ptr, size_t size)
+	{
+		return ::realloc( ptr, size );
+	}
+};
+
+ANTLR_END_NAMESPACE()
+
+#endif	/* _ANTLR3MEMORY_H */
diff --git a/runtime/Cpp/include/antlr3parser.hpp b/runtime/Cpp/include/antlr3parser.hpp
new file mode 100755
index 0000000..cc9b473
--- /dev/null
+++ b/runtime/Cpp/include/antlr3parser.hpp
@@ -0,0 +1,200 @@
+/** \file
+ * Base implementation of an ANTLR3 parser.
+ *
+ *
+ */
+#ifndef	_ANTLR3_PARSER_HPP
+#define	_ANTLR3_PARSER_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+/** This is the main interface for an ANTLR3 parser.
+ */
+template< class ImplTraits >
+class Parser  :  public ImplTraits::template RecognizerType< typename ImplTraits::TokenStreamType >
+{
+public:
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::TokenStreamType  TokenStreamType;
+	typedef typename TokenStreamType::IntStreamType  IntStreamType;
+	typedef TokenStreamType StreamType;
+
+	typedef typename ImplTraits::template RecognizerType< typename ImplTraits::TokenStreamType > RecognizerType;
+	typedef typename RecognizerType::RecognizerSharedStateType RecognizerSharedStateType;
+
+	typedef DebugEventListener<ImplTraits> DebugEventListenerType;
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+	typedef CommonTokenType TokenType;
+	typedef typename ImplTraits::BitsetListType BitsetListType;
+	typedef ANTLR_ExceptionBase<ImplTraits, TokenStreamType> ExceptionBaseType;
+	typedef Empty TokenSourceType;
+
+	typedef typename RecognizerSharedStateType::FollowingType FollowingType;
+	typedef typename RecognizerSharedStateType::RuleMemoType RuleMemoType;
+	typedef typename ImplTraits::DebugEventListenerType DebuggerType;
+
+private:
+    /** A provider of a tokenstream interface, for the parser to consume
+     *  tokens from.
+     */
+    TokenStreamType*			m_tstream;
+
+public:
+	Parser( ANTLR_UINT32 sizeHint, RecognizerSharedStateType* state );
+	Parser( ANTLR_UINT32 sizeHint, TokenStreamType* tstream, RecognizerSharedStateType* state );
+	Parser( ANTLR_UINT32 sizeHint, TokenStreamType* tstream, DebugEventListenerType* dbg,
+											RecognizerSharedStateType* state );
+	TokenStreamType* get_tstream() const;
+	TokenStreamType* get_input() const;
+	IntStreamType* get_istream() const;
+	RecognizerType* get_rec();
+
+	//same as above. Just that get_istream exists for lexer, parser, treeparser
+	//get_parser_istream exists only for parser, treeparser. So use it accordingly
+	IntStreamType* get_parser_istream() const;
+
+	/** A pointer to a function that installs a debugger object (it also
+	 *  installs the debugging versions of the parser methods. This means that
+	 *  a non debug parser incurs no overhead because of the debugging stuff.
+	 */
+	void	setDebugListener(DebugEventListenerType* dbg);
+
+    /** A pointer to a function that installs a token stream
+     * for the parser.
+     */
+    void	setTokenStream(TokenStreamType*);
+
+    /** A pointer to a function that returns the token stream for this
+     *  parser.
+     */
+    TokenStreamType*	getTokenStream();
+
+	void exConstruct();
+	TokenType*	getMissingSymbol( IntStreamType* istream, ExceptionBaseType* e,
+								  ANTLR_UINT32	expectedTokenType, BitsetListType*	follow);
+
+	void mismatch(ANTLR_UINT32 ttype, BitsetListType* follow);
+
+    /** Pointer to a function that knows how to free resources of an ANTLR3 parser.
+     */
+	~Parser();
+
+	void fillExceptionData( ExceptionBaseType* ex );
+	void displayRecognitionError( ANTLR_UINT8** tokenNames, ExceptionBaseType* ex );
+
+	//convenience functions exposed in .stg
+	const RecognizerType* get_recognizer() const;
+	RecognizerSharedStateType* get_psrstate() const;
+	void set_psrstate(RecognizerSharedStateType* state);
+	bool haveParsedRule(ANTLR_MARKER	ruleIndex);
+	void memoize(ANTLR_MARKER	ruleIndex, ANTLR_MARKER	ruleParseStart);
+	ANTLR_MARKER  index() const;
+	bool hasException() const;
+	ExceptionBaseType* get_exception() const;
+	const CommonTokenType* matchToken( ANTLR_UINT32 ttype, BitsetListType* follow );
+	void matchAnyToken();
+	const FollowingType& get_follow_stack() const;
+	void followPush( const BitsetListType& follow );
+	void followPop();
+	void precover();
+	void preporterror();
+	ANTLR_UINT32 LA(ANTLR_INT32 i);
+	const CommonTokenType*  LT(ANTLR_INT32 k);
+	void constructEx();
+	void consume();
+	ANTLR_MARKER mark();
+	void rewind(ANTLR_MARKER marker);
+	void rewindLast();
+	void seek(ANTLR_MARKER index);
+	bool get_perror_recovery() const;
+	void set_perror_recovery( bool val );
+	bool hasFailed() const;
+	bool get_failedflag() const;
+	void set_failedflag( bool failed );
+	ANTLR_INT32 get_backtracking() const;
+	void inc_backtracking();
+	void dec_backtracking();
+	CommonTokenType* recoverFromMismatchedSet(BitsetListType*	follow);
+	bool	recoverFromMismatchedElement(BitsetListType*	follow);
+	RuleMemoType* getRuleMemo() const;
+	DebuggerType* get_debugger() const;
+	TokenStreamType* get_strstream() const;
+	void setRuleMemo(RuleMemoType* rulememo);
+
+};
+
+//Generic rule return value. Unlike the general ANTLR, this gets generated for
+//every rule in the target. Handle rule exit here
+template<class ImplTraits>
+class RuleReturnValue
+{
+public:
+	typedef typename ImplTraits::BaseParserType BaseParserType;
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+	
+public:
+	const CommonTokenType*		start;
+	const CommonTokenType*		stop;
+	BaseParserType*			parser;
+
+	RuleReturnValue(BaseParserType* psr = NULL );
+	RuleReturnValue( const RuleReturnValue& val );
+	RuleReturnValue& operator=( const RuleReturnValue& val );
+	void call_start_placeholder(); 
+	void call_stop_placeholder(); 
+	RuleReturnValue& get_struct();
+	~RuleReturnValue();
+};
+
+//This kind makes sure that whenever tokens are condensed into a rule,
+//all the tokens except the start and stop tokens are deleted
+template<class ImplTraits>
+class RuleReturnValue_1 : public RuleReturnValue<ImplTraits>
+{
+public:
+	typedef RuleReturnValue<ImplTraits> BaseType;
+	typedef typename BaseType::BaseParserType BaseParserType;
+
+public:
+	RuleReturnValue_1();
+	RuleReturnValue_1( BaseParserType* psr);
+	RuleReturnValue_1( const RuleReturnValue_1& val );
+	void call_start_placeholder();  //its dummy here
+	~RuleReturnValue_1();
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3parser.inl"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3parser.inl b/runtime/Cpp/include/antlr3parser.inl
new file mode 100755
index 0000000..6f4d152
--- /dev/null
+++ b/runtime/Cpp/include/antlr3parser.inl
@@ -0,0 +1,585 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template< class ImplTraits >
+Parser<ImplTraits>::Parser( ANTLR_UINT32 sizeHint, RecognizerSharedStateType* state )
+	:RecognizerType( sizeHint, state )
+{
+	m_tstream = NULL;
+}
+
+template< class ImplTraits >
+Parser<ImplTraits>::Parser( ANTLR_UINT32 sizeHint, TokenStreamType* tstream,
+												RecognizerSharedStateType* state )
+												:RecognizerType( sizeHint, state )
+{
+	this->setTokenStream( tstream );
+}
+
+template< class ImplTraits >
+Parser<ImplTraits>::Parser( ANTLR_UINT32 sizeHint, TokenStreamType* tstream,
+											DebugEventListenerType* dbg,
+											RecognizerSharedStateType* state )
+											:RecognizerType( sizeHint, state )
+{
+	this->setTokenStream( tstream );
+	this->setDebugListener( dbg );
+}
+
+template< class ImplTraits >
+ANTLR_INLINE typename Parser<ImplTraits>::TokenStreamType* Parser<ImplTraits>::get_tstream() const
+{
+	return m_tstream;
+}
+
+template< class ImplTraits >
+ANTLR_INLINE typename Parser<ImplTraits>::IntStreamType* Parser<ImplTraits>::get_istream() const
+{
+	return m_tstream;
+}
+
+template< class ImplTraits >
+ANTLR_INLINE typename Parser<ImplTraits>::IntStreamType* Parser<ImplTraits>::get_parser_istream() const
+{
+	return m_tstream;
+}
+
+template< class ImplTraits >
+ANTLR_INLINE typename Parser<ImplTraits>::TokenStreamType* Parser<ImplTraits>::get_input() const
+{
+	return m_tstream;
+}
+
+template< class ImplTraits >
+void Parser<ImplTraits>::fillExceptionData( ExceptionBaseType* ex )
+{
+	ex->set_token( m_tstream->_LT(1) );	    /* Current input token			    */
+	ex->set_line( ex->get_token()->get_line() );
+	ex->set_charPositionInLine( ex->get_token()->get_charPositionInLine() );
+	ex->set_index( this->get_istream()->index() );
+	if( ex->get_token()->get_type() == CommonTokenType::TOKEN_EOF)
+	{
+		ex->set_streamName("");
+	}
+	else
+	{
+		ex->set_streamName( ex->get_token()->get_input()->get_fileName() );
+	}
+	ex->set_message("Unexpected token");
+}
+
+template< class ImplTraits >
+void Parser<ImplTraits>::displayRecognitionError( ANTLR_UINT8** tokenNames, ExceptionBaseType* ex )
+{
+	typename ImplTraits::StringStreamType errtext;
+	// See if there is a 'filename' we can use
+	//
+	if( ex->get_streamName().empty() )
+	{
+		if(ex->get_token()->get_type() == CommonTokenType::TOKEN_EOF)
+		{
+			errtext << "-end of input-(";
+		}
+		else
+		{
+			errtext << "-unknown source-(";
+		}
+	}
+	else
+	{
+		errtext << ex->get_streamName() << "(";
+	}
+
+	// Next comes the line number
+	//
+	errtext << this->get_rec()->get_state()->get_exception()->get_line() << ") ";
+	errtext << " : error " << this->get_rec()->get_state()->get_exception()->getType()
+							<< " : "
+							<< this->get_rec()->get_state()->get_exception()->get_message();
+
+	// Prepare the knowledge we know we have
+	//
+	const CommonTokenType* theToken   = this->get_rec()->get_state()->get_exception()->get_token();
+	StringType ttext			= theToken->toString();
+
+	errtext << ", at offset , "
+			<< this->get_rec()->get_state()->get_exception()->get_charPositionInLine();
+	if  (theToken != NULL)
+	{
+		if (theToken->get_type() == CommonTokenType::TOKEN_EOF)
+		{
+			errtext << ", at <EOF>";
+		}
+		else
+		{
+			// Guard against null text in a token
+			//
+			errtext << "\n    near " << ( ttext.empty()
+											? "<no text for the token>" : ttext ) << "\n";
+		}
+	}
+
+	ex->displayRecognitionError( tokenNames, errtext );
+	ImplTraits::displayRecognitionError( errtext.str() );
+}
+
+template< class ImplTraits >
+Parser<ImplTraits>::~Parser()
+{
+    if	(this->get_rec() != NULL)
+    {
+		// This may have ben a delegate or delegator parser, in which case the
+		// state may already have been freed (and set to NULL therefore)
+		// so we ignore the state if we don't have it.
+		//
+		RecognizerSharedStateType* state = this->get_rec()->get_state();
+		if	(state != NULL)
+		{
+			state->get_following().clear();
+		}
+    }
+}
+
+template< class ImplTraits >
+void	Parser<ImplTraits>::setDebugListener(DebugEventListenerType* dbg)
+{
+		// Set the debug listener. There are no methods to override
+	// because currently the only ones that notify the debugger
+	// are error reporting and recovery. Hence we can afford to
+	// check and see if the debugger interface is null or not
+	// there. If there is ever an occasion for a performance
+	// sensitive function to use the debugger interface, then
+	// a replacement function for debug mode should be supplied
+	// and installed here.
+	//
+	this->get_rec()->set_debugger(dbg);
+
+	// If there was a tokenstream installed already
+	// then we need to tell it about the debug interface
+	//
+	if	(this->get_tstream() != NULL)
+	{
+		this->get_tstream()->setDebugListener(dbg);
+	}
+}
+
+template< class ImplTraits >
+ANTLR_INLINE void	Parser<ImplTraits>::setTokenStream(TokenStreamType* tstream)
+{
+	m_tstream = tstream;
+    this->get_rec()->reset();
+}
+
+template< class ImplTraits >
+ANTLR_INLINE typename Parser<ImplTraits>::TokenStreamType*	Parser<ImplTraits>::getTokenStream()
+{
+	return m_tstream;
+}
+
+template< class ImplTraits >
+ANTLR_INLINE typename Parser<ImplTraits>::RecognizerType* Parser<ImplTraits>::get_rec()
+{
+	return this;
+}
+
+template< class ImplTraits >
+ANTLR_INLINE void Parser<ImplTraits>::exConstruct()
+{
+	new ANTLR_Exception<ImplTraits, MISMATCHED_TOKEN_EXCEPTION, StreamType>( this->get_rec(), "" );
+}
+
+template< class ImplTraits >
+typename Parser<ImplTraits>::TokenType*	Parser<ImplTraits>::getMissingSymbol( IntStreamType* istream,
+										  ExceptionBaseType*,
+										  ANTLR_UINT32			expectedTokenType,
+										  BitsetListType*	)
+{
+	TokenStreamType*		cts;
+	CommonTokenType*		token;
+	const CommonTokenType*		current;
+	StringType				text;
+
+	// Dereference the standard pointers
+	//
+	cts		= static_cast<TokenStreamType*>(istream);
+
+	// Work out what to use as the current symbol to make a line and offset etc
+	// If we are at EOF, we use the token before EOF
+	//
+	current	= cts->_LT(1);
+	if	(current->get_type() == CommonTokenType::TOKEN_EOF)
+	{
+		current = cts->_LT(-1);
+	}
+
+	token	= new CommonTokenType;
+
+	// Set some of the token properties based on the current token
+	//
+	token->set_line(current->get_line());
+	token->set_charPositionInLine( current->get_charPositionInLine());
+	token->set_channel( TOKEN_DEFAULT_CHANNEL );
+	token->set_type(expectedTokenType);
+    token->set_lineStart( current->get_lineStart() );
+
+	// Create the token text that shows it has been inserted
+	//
+	token->setText("<missing ");
+	text = token->getText();
+
+	if	(!text.empty())
+	{
+		text.append((const char *) this->get_rec()->get_state()->get_tokenName(expectedTokenType) );
+		text.append(">");
+	}
+
+	// Finally return the pointer to our new token
+	//
+	return	token;
+}
+
+template< class ImplTraits >
+void Parser<ImplTraits>::mismatch(ANTLR_UINT32 ttype, BitsetListType* follow)
+{
+    // Install a mismatched token exception in the exception stack
+    //
+	new ANTLR_Exception<ImplTraits, MISMATCHED_TOKEN_EXCEPTION, StreamType>(this, "");
+
+	//With the statement below, only the parsers are allowed to compile fine
+	IntStreamType* is = this->get_istream();
+
+
+	if	(this->mismatchIsUnwantedToken(is, ttype))
+	{
+		// Now update it to indicate this is an unwanted token exception
+		//
+		new ANTLR_Exception<ImplTraits, UNWANTED_TOKEN_EXCEPTION, StreamType>(this, "");
+		return;
+	}
+
+	if	( this->mismatchIsMissingToken(is, follow))
+	{
+		// Now update it to indicate this is an unwanted token exception
+		//
+		new ANTLR_Exception<ImplTraits, MISSING_TOKEN_EXCEPTION, StreamType>(this, "");
+		return;
+	}
+
+	// Just a mismatched token is all we can dtermine
+	//
+	new ANTLR_Exception<ImplTraits, MISMATCHED_TOKEN_EXCEPTION, StreamType>(this, "");
+
+	return;
+}
+
+template< class ImplTraits>
+ANTLR_INLINE const typename Parser<ImplTraits>::RecognizerType* Parser<ImplTraits>::get_recognizer() const
+{
+	return this;
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Parser<ImplTraits>::RecognizerSharedStateType* Parser<ImplTraits>::get_psrstate() const
+{
+	return this->get_recognizer()->get_state();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::set_psrstate(RecognizerSharedStateType* state)
+{
+	this->get_rec()->set_state( state );
+}
+
+template< class ImplTraits>
+ANTLR_INLINE bool Parser<ImplTraits>::haveParsedRule(ANTLR_MARKER	ruleIndex)
+{
+	return this->get_rec()->alreadyParsedRule(ruleIndex);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::memoize(ANTLR_MARKER	ruleIndex, ANTLR_MARKER	ruleParseStart)
+{
+	return this->get_rec()->memoize( ruleIndex, ruleParseStart );
+}
+
+template< class ImplTraits>
+ANTLR_INLINE ANTLR_MARKER  Parser<ImplTraits>::index() const
+{
+	return this->get_istream()->index();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE bool Parser<ImplTraits>::hasException() const
+{
+	return this->get_psrstate()->get_error();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Parser<ImplTraits>::ExceptionBaseType* Parser<ImplTraits>::get_exception() const
+{
+	return this->get_psrstate()->get_exception();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE const typename Parser<ImplTraits>::CommonTokenType* Parser<ImplTraits>::matchToken( ANTLR_UINT32 ttype, BitsetListType* follow )
+{
+	return this->get_rec()->match( ttype, follow );
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::matchAnyToken()
+{
+	return this->get_rec()->matchAny();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE const typename Parser<ImplTraits>::FollowingType& Parser<ImplTraits>::get_follow_stack() const
+{
+	return this->get_psrstate()->get_following();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::followPush(const BitsetListType& follow)
+{
+#ifndef  SKIP_FOLLOW_SETS
+	this->get_rec()->get_state()->get_following().push(follow);
+#endif
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::followPop()
+{
+#ifndef  SKIP_FOLLOW_SETS
+	this->get_rec()->get_state()->get_following().pop();
+#endif
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::precover()
+{
+	return this->get_rec()->recover();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::preporterror()
+{
+	return this->get_rec()->reportError();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE ANTLR_UINT32 Parser<ImplTraits>::LA(ANTLR_INT32 i)
+{
+	return this->get_istream()->_LA(i);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE const typename Parser<ImplTraits>::CommonTokenType*  Parser<ImplTraits>::LT(ANTLR_INT32 k)
+{
+	return this->get_input()->_LT(k);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::constructEx()
+{
+	this->get_rec()->constructEx();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::consume()
+{
+	this->get_istream()->consume();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE ANTLR_MARKER Parser<ImplTraits>::mark()
+{
+	return this->get_istream()->mark();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::rewind(ANTLR_MARKER marker)
+{
+	this->get_istream()->rewind(marker);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::rewindLast()
+{
+	this->get_istream()->rewindLast();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::seek(ANTLR_MARKER index)
+{
+	this->get_istream()->seek(index);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE bool Parser<ImplTraits>::get_perror_recovery() const
+{
+	return this->get_psrstate()->get_errorRecovery();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::set_perror_recovery( bool val )
+{
+	this->get_psrstate()->set_errorRecovery(val);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE bool Parser<ImplTraits>::hasFailed() const
+{
+	return this->get_psrstate()->get_failed();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE bool Parser<ImplTraits>::get_failedflag() const
+{
+	return this->get_psrstate()->get_failed();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::set_failedflag( bool failed )
+{
+	this->get_psrstate()->set_failed(failed);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE ANTLR_INT32 Parser<ImplTraits>::get_backtracking() const
+{
+	return this->get_psrstate()->get_backtracking();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::inc_backtracking()
+{
+	this->get_psrstate()->inc_backtracking();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::dec_backtracking()
+{
+	this->get_psrstate()->dec_backtracking();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Parser<ImplTraits>::CommonTokenType* Parser<ImplTraits>::recoverFromMismatchedSet(BitsetListType*	follow)
+{
+	return this->get_rec()->recoverFromMismatchedSet(follow);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE bool	Parser<ImplTraits>::recoverFromMismatchedElement(BitsetListType*	follow)
+{
+	return this->get_rec()->recoverFromMismatchedElement(follow);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Parser<ImplTraits>::RuleMemoType* Parser<ImplTraits>::getRuleMemo() const
+{
+	return this->get_psrstate()->get_ruleMemo();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void Parser<ImplTraits>::setRuleMemo(RuleMemoType* rulememo)
+{
+	this->get_psrstate()->set_ruleMemo(rulememo);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Parser<ImplTraits>::DebuggerType* Parser<ImplTraits>::get_debugger() const
+{
+	return this->get_rec()->get_debugger();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE typename Parser<ImplTraits>::TokenStreamType* Parser<ImplTraits>::get_strstream() const
+{
+	return this->get_tstream();
+}
+
+template< class ImplTraits>
+ANTLR_INLINE RuleReturnValue<ImplTraits>::RuleReturnValue(BaseParserType* psr) 
+{ 
+	parser = psr; 
+	start = NULL;
+	stop = NULL;
+}
+
+template< class ImplTraits>
+ANTLR_INLINE RuleReturnValue<ImplTraits>::RuleReturnValue( const RuleReturnValue& val )
+{
+	parser	= val.parser; 
+	start	= val.start;
+	stop	= val.stop;
+}
+
+template< class ImplTraits>
+ANTLR_INLINE RuleReturnValue<ImplTraits>& RuleReturnValue<ImplTraits>::operator=( const RuleReturnValue& val )
+{
+	parser	= val.parser; 
+	start	= val.start;
+	stop	= val.stop;
+	return *this;
+}
+
+template< class ImplTraits>
+ANTLR_INLINE RuleReturnValue<ImplTraits>::~RuleReturnValue()
+{
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void RuleReturnValue<ImplTraits>::call_start_placeholder()
+{
+	start = parser->LT(1); 
+	stop = start;
+}
+
+template< class ImplTraits>
+ANTLR_INLINE void RuleReturnValue<ImplTraits>::call_stop_placeholder()
+{
+	stop = parser->LT(-1);
+}
+
+template< class ImplTraits>
+ANTLR_INLINE RuleReturnValue_1<ImplTraits>::RuleReturnValue_1()
+{
+}
+
+template< class ImplTraits>
+RuleReturnValue_1<ImplTraits>::RuleReturnValue_1( BaseParserType* psr )
+	:RuleReturnValue_1<ImplTraits>::BaseType(psr)
+{
+	BaseType::start = psr->LT(1);
+	BaseType::stop = BaseType::start;
+}
+
+template< class ImplTraits>
+RuleReturnValue_1<ImplTraits>::RuleReturnValue_1( const RuleReturnValue_1& val )
+	:BaseType(val)
+{
+}
+
+template< class ImplTraits>
+void RuleReturnValue_1<ImplTraits>::call_start_placeholder()
+{
+}
+
+template< class ImplTraits>
+RuleReturnValue_1<ImplTraits>::~RuleReturnValue_1()
+{
+	if( BaseType::parser && ( BaseType::parser->get_backtracking() == 0 ) )
+	{
+		if( BaseType::stop == NULL )
+			BaseType::stop = BaseType::parser->LT(-1);
+		if( BaseType::stop != NULL )
+		{
+			ANTLR_MARKER start_token_idx	= BaseType::start->get_index() + 1;
+			ANTLR_MARKER stop_token_idx		= BaseType::stop->get_index() - 1;
+			if( start_token_idx > stop_token_idx )
+				return;
+			BaseType::parser->getTokenStream()->discardTokens( start_token_idx, stop_token_idx); 
+		}
+	}
+}
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3recognizersharedstate.hpp b/runtime/Cpp/include/antlr3recognizersharedstate.hpp
new file mode 100755
index 0000000..0554396
--- /dev/null
+++ b/runtime/Cpp/include/antlr3recognizersharedstate.hpp
@@ -0,0 +1,265 @@
+/** \file
+ * While the C runtime does not need to model the state of
+ * multiple lexers and parsers in the same way as the Java runtime does
+ * it is no overhead to reflect that model. In fact the
+ * C runtime has always been able to share recognizer state.
+ *
+ * This 'class' therefore defines all the elements of a recognizer
+ * (either lexer, parser or tree parser) that are need to
+ * track the current recognition state. Multiple recognizers
+ * may then share this state, for instance when one grammar
+ * imports another.
+ */
+
+#ifndef	_ANTLR3_RECOGNIZER_SHARED_STATE_HPP
+#define	_ANTLR3_RECOGNIZER_SHARED_STATE_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "antlr3defs.hpp"
+
+ANTLR_BEGIN_NAMESPACE()
+
+/** All the data elements required to track the current state
+ *  of any recognizer (lexer, parser, tree parser).
+ * May be share between multiple recognizers such that
+ * grammar inheritance is easily supported.
+ */
+template<class ImplTraits, class StreamType>
+class RecognizerSharedState  : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename StreamType::UnitType TokenType;
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+
+	typedef typename ComponentTypeFinder<ImplTraits, StreamType>::ComponentType  ComponentType;
+	typedef typename ImplTraits::template RewriteStreamType< ComponentType > RewriteStreamType;
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::TokenSourceType TokenSourceType;
+	typedef typename ImplTraits::template ExceptionBaseType<StreamType> ExceptionBaseType;
+	typedef typename ImplTraits::BitsetType BitsetType;
+	typedef typename ImplTraits::BitsetListType BitsetListType;
+
+	typedef typename AllocPolicyType::template StackType< BitsetListType > FollowingType;
+	typedef typename AllocPolicyType::template StackType< typename ImplTraits::InputStreamType* > InputStreamsType;
+	typedef InputStreamsType StreamsType;
+	typedef typename AllocPolicyType::template VectorType<RewriteStreamType> RewriteStreamsType;
+
+	typedef IntTrie<ImplTraits, ANTLR_MARKER> RuleListType;
+	typedef IntTrie<ImplTraits, RuleListType*> RuleMemoType;
+
+private:
+	/** Points to the first in a possible chain of exceptions that the
+     *  recognizer has discovered.
+     */
+    ExceptionBaseType*			m_exception;
+
+
+    /** Track the set of token types that can follow any rule invocation.
+     *  Stack structure, to support: List<BitSet>.
+     */
+    FollowingType		m_following;
+
+    /** Track around a hint from the creator of the recognizer as to how big this
+     *  thing is going to get, as the actress said to the bishop. This allows us
+     *  to tune hash tables accordingly. This might not be the best place for this
+     *  in the end but we will see.
+     */
+    ANTLR_UINT32		m_sizeHint;
+
+
+    /** If set to true then the recognizer has an exception
+     * condition (this is tested by the generated code for the rules of
+     * the grammar).
+     */
+    bool				m_error;
+
+
+    /** This is true when we see an error and before having successfully
+     *  matched a token.  Prevents generation of more than one error message
+     *  per error.
+     */
+    bool				m_errorRecovery;
+
+	/** In lieu of a return value, this indicates that a rule or token
+     *  has failed to match.  Reset to false upon valid token match.
+     */
+    bool				m_failed;
+
+	/*
+	Instead of allocating CommonTokenType, we do it in the stack. hence we need a null indicator
+	*/
+	bool				m_token_present;
+
+    /** The index into the input stream where the last error occurred.
+     * 	This is used to prevent infinite loops where an error is found
+     *  but no token is consumed during recovery...another error is found,
+     *  ad nauseam.  This is a failsafe mechanism to guarantee that at least
+     *  one token/tree node is consumed for two errors.
+     */
+    ANTLR_MARKER		m_lastErrorIndex;
+
+    /** When the recognizer terminates, the error handling functions
+     *  will have incremented this value if any error occurred (that was displayed). It can then be
+     *  used by the grammar programmer without having to use static globals.
+     */
+    ANTLR_UINT32		m_errorCount;
+
+    /** If 0, no backtracking is going on.  Safe to exec actions etc...
+     *  If >0 then it's the level of backtracking.
+     */
+    ANTLR_INT32			m_backtracking;
+
+    /** ANTLR3_VECTOR of ANTLR3_LIST for rule memoizing.
+     *  Tracks  the stop token index for each rule.  ruleMemo[ruleIndex] is
+     *  the memoization table for ruleIndex.  For key ruleStartIndex, you
+     *  get back the stop token for associated rule or MEMO_RULE_FAILED.
+     *
+     *  This is only used if rule memoization is on.
+     */
+    RuleMemoType*		m_ruleMemo;
+
+    /** Pointer to an array of token names
+     *  that are generally useful in error reporting. The generated parsers install
+     *  this pointer. The table it points to is statically allocated as 8 bit ascii
+     *  at parser compile time - grammar token names are thus restricted in character
+     *  sets, which does not seem to terrible.
+     */
+    ANTLR_UINT8**		m_tokenNames;
+
+    /** The goal of all lexer rules/methods is to create a token object.
+     *  This is an instance variable as multiple rules may collaborate to
+     *  create a single token.  For example, NUM : INT | FLOAT ;
+     *  In this case, you want the INT or FLOAT rule to set token and not
+     *  have it reset to a NUM token in rule NUM.
+     */
+    CommonTokenType		m_token;
+
+    /** A lexer is a source of tokens, produced by all the generated (or
+     *  hand crafted if you like) matching rules. As such it needs to provide
+     *  a token source interface implementation. For others, this will become a empty class
+     */
+    TokenSourceType*	m_tokSource;
+
+    /** The channel number for the current token
+     */
+    ANTLR_UINT32			m_channel;
+
+    /** The token type for the current token
+     */
+    ANTLR_UINT32			m_type;
+
+    /** The input line (where it makes sense) on which the first character of the current
+     *  token resides.
+     */
+    ANTLR_INT32			m_tokenStartLine;
+
+    /** The character position of the first character of the current token
+     *  within the line specified by tokenStartLine
+     */
+    ANTLR_INT32		m_tokenStartCharPositionInLine;
+
+    /** What character index in the stream did the current token start at?
+     *  Needed, for example, to get the text for current token.  Set at
+     *  the start of nextToken.
+     */
+    ANTLR_MARKER		m_tokenStartCharIndex;
+
+    /** Text for the current token. This can be overridden by setting this
+     *  variable directly or by using the SETTEXT() macro (preferred) in your
+     *  lexer rules.
+     */
+    StringType			m_text;
+
+    /** Input stream stack, which allows the C programmer to switch input streams
+     *  easily and allow the standard nextToken() implementation to deal with it
+     *  as this is a common requirement.
+     */
+    InputStreamsType	m_streams;
+
+public:
+	RecognizerSharedState();
+	ExceptionBaseType* get_exception() const;
+	FollowingType& get_following();
+	ANTLR_UINT32 get_sizeHint() const;
+	bool get_error() const;
+	bool get_errorRecovery() const;
+	bool get_failed() const;
+	bool get_token_present() const;
+	ANTLR_MARKER get_lastErrorIndex() const;
+	ANTLR_UINT32 get_errorCount() const;
+	ANTLR_INT32 get_backtracking() const;
+	RuleMemoType* get_ruleMemo() const;
+	ANTLR_UINT8** get_tokenNames() const;
+	ANTLR_UINT8* get_tokenName( ANTLR_UINT32 i ) const;
+	CommonTokenType* get_token();
+	TokenSourceType* get_tokSource() const;
+	ANTLR_UINT32& get_channel();
+	ANTLR_UINT32 get_type() const;
+	ANTLR_INT32 get_tokenStartLine() const;
+	ANTLR_INT32 get_tokenStartCharPositionInLine() const;
+	ANTLR_MARKER get_tokenStartCharIndex() const;
+	StringType& get_text();
+	InputStreamsType& get_streams();
+
+	void  set_following( const FollowingType& following );
+	void  set_sizeHint( ANTLR_UINT32 sizeHint );
+	void  set_error( bool error );
+	void  set_errorRecovery( bool errorRecovery );
+	void  set_failed( bool failed );
+	void  set_token_present(bool token_present);
+	void  set_lastErrorIndex( ANTLR_MARKER lastErrorIndex );
+	void  set_errorCount( ANTLR_UINT32 errorCount );
+	void  set_backtracking( ANTLR_INT32 backtracking );
+	void  set_ruleMemo( RuleMemoType* ruleMemo );
+	void  set_tokenNames( ANTLR_UINT8** tokenNames );
+	void  set_tokSource( TokenSourceType* tokSource );
+	void  set_channel( ANTLR_UINT32 channel );
+	void  set_exception( ExceptionBaseType* exception );
+	void  set_type( ANTLR_UINT32 type );
+	void  set_token( const CommonTokenType* tok);
+	void  set_tokenStartLine( ANTLR_INT32 tokenStartLine );
+	void  set_tokenStartCharPositionInLine( ANTLR_INT32 tokenStartCharPositionInLine );
+	void  set_tokenStartCharIndex( ANTLR_MARKER tokenStartCharIndex );
+	void  set_text( const StringType& text );
+	void  set_streams( const InputStreamsType& streams );
+
+	void inc_errorCount();
+	void inc_backtracking();
+	void dec_backtracking();
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3recognizersharedstate.inl"
+
+#endif
+
+
diff --git a/runtime/Cpp/include/antlr3recognizersharedstate.inl b/runtime/Cpp/include/antlr3recognizersharedstate.inl
new file mode 100755
index 0000000..8d9cc7f
--- /dev/null
+++ b/runtime/Cpp/include/antlr3recognizersharedstate.inl
@@ -0,0 +1,267 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits, class StreamType>
+RecognizerSharedState<ImplTraits, StreamType>::RecognizerSharedState()
+{
+	m_exception = NULL;
+	m_sizeHint = 0;
+	m_error = false;
+	m_errorRecovery = false;
+	m_failed = false;
+	m_lastErrorIndex = 0;
+	m_errorCount = 0;
+	m_backtracking = false;
+	m_ruleMemo = NULL;
+	m_tokenNames = NULL;
+	m_tokSource = NULL;
+	m_channel = 0;
+	m_type = 0;
+	m_tokenStartLine = 0;
+	m_tokenStartCharPositionInLine = 0;
+	m_tokenStartCharIndex = 0;
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename RecognizerSharedState<ImplTraits, StreamType>::FollowingType& RecognizerSharedState<ImplTraits, StreamType>::get_following()
+{
+	return m_following;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_UINT32 RecognizerSharedState<ImplTraits, StreamType>::get_sizeHint() const
+{
+	return m_sizeHint;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE bool RecognizerSharedState<ImplTraits, StreamType>::get_error() const
+{
+	return m_error;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename RecognizerSharedState<ImplTraits, StreamType>::ExceptionBaseType* 
+RecognizerSharedState<ImplTraits, StreamType>::get_exception() const
+{
+	return m_exception;
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE bool RecognizerSharedState<ImplTraits, StreamType>::get_errorRecovery() const
+{
+	return m_errorRecovery;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE bool RecognizerSharedState<ImplTraits, StreamType>::get_failed() const
+{
+	return m_failed;
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE bool RecognizerSharedState<ImplTraits, StreamType>::get_token_present() const
+{
+	return m_token_present;
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_MARKER RecognizerSharedState<ImplTraits, StreamType>::get_lastErrorIndex() const
+{
+	return m_lastErrorIndex;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_UINT32 RecognizerSharedState<ImplTraits, StreamType>::get_errorCount() const
+{
+	return m_errorCount;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_INT32 RecognizerSharedState<ImplTraits, StreamType>::get_backtracking() const
+{
+	return m_backtracking;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename RecognizerSharedState<ImplTraits, StreamType>::RuleMemoType* RecognizerSharedState<ImplTraits, StreamType>::get_ruleMemo() const
+{
+	return m_ruleMemo;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_UINT8** RecognizerSharedState<ImplTraits, StreamType>::get_tokenNames() const
+{
+	return m_tokenNames;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_UINT8* RecognizerSharedState<ImplTraits, StreamType>::get_tokenName( ANTLR_UINT32 i ) const
+{
+	return m_tokenNames[i];
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename RecognizerSharedState<ImplTraits, StreamType>::CommonTokenType* RecognizerSharedState<ImplTraits, StreamType>::get_token()
+{
+	return &m_token;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename RecognizerSharedState<ImplTraits, StreamType>::TokenSourceType* RecognizerSharedState<ImplTraits, StreamType>::get_tokSource() const
+{
+	return m_tokSource;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_UINT32& RecognizerSharedState<ImplTraits, StreamType>::get_channel()
+{
+	return m_channel;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_UINT32 RecognizerSharedState<ImplTraits, StreamType>::get_type() const
+{
+	return m_type;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_INT32 RecognizerSharedState<ImplTraits, StreamType>::get_tokenStartLine() const
+{
+	return m_tokenStartLine;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_INT32 RecognizerSharedState<ImplTraits, StreamType>::get_tokenStartCharPositionInLine() const
+{
+	return m_tokenStartCharPositionInLine;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE ANTLR_MARKER RecognizerSharedState<ImplTraits, StreamType>::get_tokenStartCharIndex() const
+{
+	return m_tokenStartCharIndex;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename RecognizerSharedState<ImplTraits, StreamType>::StringType& RecognizerSharedState<ImplTraits, StreamType>::get_text()
+{
+	return m_text;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE typename RecognizerSharedState<ImplTraits, StreamType>::StreamsType& RecognizerSharedState<ImplTraits, StreamType>::get_streams()
+{
+	return m_streams;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_exception( ExceptionBaseType* exception )
+{
+	m_exception = exception;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_following( const FollowingType& following )
+{
+	m_following = following;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_sizeHint( ANTLR_UINT32 sizeHint )
+{
+	m_sizeHint = sizeHint;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_error( bool error )
+{
+	m_error = error;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_errorRecovery( bool errorRecovery )
+{
+	m_errorRecovery = errorRecovery;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_failed( bool failed )
+{
+	m_failed = failed;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void  RecognizerSharedState<ImplTraits, StreamType>::set_token_present(bool token_present)
+{
+	m_token_present = token_present;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_lastErrorIndex( ANTLR_MARKER lastErrorIndex )
+{
+	m_lastErrorIndex = lastErrorIndex;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_errorCount( ANTLR_UINT32 errorCount )
+{
+	m_errorCount = errorCount;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_backtracking( ANTLR_INT32 backtracking )
+{
+	m_backtracking = backtracking;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_ruleMemo( RuleMemoType* ruleMemo )
+{
+	m_ruleMemo = ruleMemo;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_tokenNames( ANTLR_UINT8** tokenNames )
+{
+	m_tokenNames = tokenNames;
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_tokSource( TokenSourceType* tokSource )
+{
+	m_tokSource = tokSource;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_channel( ANTLR_UINT32 channel )
+{
+	m_channel = channel;
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void  RecognizerSharedState<ImplTraits, StreamType>::set_token(const CommonTokenType* tok)
+{
+	this->set_token_present( tok != NULL );
+	if( tok != NULL )
+		m_token = *tok;
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_type( ANTLR_UINT32 type )
+{
+	m_type = type;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_tokenStartLine( ANTLR_INT32 tokenStartLine )
+{
+	m_tokenStartLine = tokenStartLine;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_tokenStartCharPositionInLine( ANTLR_INT32 tokenStartCharPositionInLine )
+{
+	m_tokenStartCharPositionInLine = tokenStartCharPositionInLine;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_tokenStartCharIndex( ANTLR_MARKER tokenStartCharIndex )
+{
+	m_tokenStartCharIndex = tokenStartCharIndex;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_text( const StringType& text )
+{
+	m_text = text;
+}
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::set_streams( const InputStreamsType& streams )
+{
+	m_streams = streams;
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::inc_errorCount()
+{
+	++m_errorCount;
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::inc_backtracking()
+{
+	++m_backtracking;
+}
+
+template<class ImplTraits, class StreamType>
+ANTLR_INLINE void RecognizerSharedState<ImplTraits, StreamType>::dec_backtracking()
+{
+	--m_backtracking;
+}
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3rewritestreams.hpp b/runtime/Cpp/include/antlr3rewritestreams.hpp
new file mode 100755
index 0000000..e89d70a
--- /dev/null
+++ b/runtime/Cpp/include/antlr3rewritestreams.hpp
@@ -0,0 +1,254 @@
+#ifndef	ANTLR3REWRITESTREAM_HPP
+#define	ANTLR3REWRITESTREAM_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+/// A generic list of elements tracked in an alternative to be used in
+/// a -> rewrite rule.  
+///
+/// In the C implementation, all tree oriented streams return a pointer to 
+/// the same type: pANTLR3_BASE_TREE. Anything that has subclassed from this
+/// still passes this type, within which there is a super pointer, which points
+/// to it's own data and methods. Hence we do not need to implement this as
+/// the equivalent of an abstract class, but just fill in the appropriate interface
+/// as usual with this model.
+///
+/// Once you start next()ing, do not try to add more elements.  It will
+/// break the cursor tracking I believe.
+///
+/// 
+/// \see #pANTLR3_REWRITE_RULE_NODE_STREAM
+/// \see #pANTLR3_REWRITE_RULE_ELEMENT_STREAM
+/// \see #pANTLR3_REWRITE_RULE_SUBTREE_STREAM
+///
+/// TODO: add mechanism to detect/puke on modification after reading from stream
+///
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits, class SuperType>
+class RewriteRuleElementStream  : public ImplTraits::AllocPolicyType
+{
+public:
+	typedef typename ImplTraits::TreeType TreeType;
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename ImplTraits::TreeAdaptorType TreeAdaptorType;
+
+	typedef typename ImplTraits::template RecognizerType< typename SuperType::StreamType > RecognizerType;
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename SuperType::TokenType TokenType; 
+	typedef typename AllocPolicyType::template VectorType< TokenType* > ElementsType;
+
+protected:
+	/// Track single elements w/o creating a list.  Upon 2nd add, alloc list 
+    ///
+    TokenType*			m_singleElement;
+
+    /// The list of tokens or subtrees we are tracking 
+    ///
+    ElementsType		m_elements;
+
+    /// The element or stream description; usually has name of the token or
+    /// rule reference that this list tracks.  Can include rulename too, but
+    /// the exception would track that info.
+    ///
+    StringType			m_elementDescription;
+
+	/// Pointer to the tree adaptor in use for this stream
+	///
+    TreeAdaptorType*	m_adaptor;
+
+	// Pointer to the recognizer shared state to which this stream belongs
+	//
+	RecognizerType*			m_rec;
+
+	/// Cursor 0..n-1.  If singleElement!=NULL, cursor is 0 until you next(),
+    /// which bumps it to 1 meaning no more elements.
+    ///
+    ANTLR_UINT32		m_cursor;
+
+	/// Once a node / subtree has been used in a stream, it must be dup'ed
+	/// from then on.  Streams are reset after sub rules so that the streams
+	/// can be reused in future sub rules.  So, reset must set a dirty bit.
+	/// If dirty, then next() always returns a dup.
+	///
+	bool				m_dirty;
+
+public:
+	RewriteRuleElementStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description);
+	RewriteRuleElementStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description, TokenType* oneElement);
+	RewriteRuleElementStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description, const ElementsType& elements);
+
+	~RewriteRuleElementStream();
+    //   Methods 
+
+    /// Reset the condition of this stream so that it appears we have
+    ///  not consumed any of its elements.  Elements themselves are untouched.
+    ///
+    void	reset(); 
+
+    /// Add a new pANTLR3_BASE_TREE to this stream
+    ///
+    void	add(TokenType* el);
+
+    /// Return the next element in the stream.  If out of elements, throw
+    /// an exception unless size()==1.  If size is 1, then return elements[0].
+    ///
+	TokenType*	next();
+    TreeType*	nextTree();
+    TokenType*	nextToken();
+    TokenType*	_next();
+
+	/// When constructing trees, sometimes we need to dup a token or AST
+    ///	subtree.  Dup'ing a token means just creating another AST node
+    /// around it.  For trees, you must call the adaptor.dupTree().
+    ///
+	TokenType* dup( TokenType* el );
+
+    /// Ensure stream emits trees; tokens must be converted to AST nodes.
+    /// AST nodes can be passed through unmolested.
+    ///
+    TreeType*	toTree(TreeType* el);
+
+    /// Returns true if there is a next element available
+    ///
+    bool	hasNext();
+
+    /// Treat next element as a single node even if it's a subtree.
+    /// This is used instead of next() when the result has to be a
+    /// tree root node.  Also prevents us from duplicating recently-added
+    /// children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
+    /// must dup the type node, but ID has been added.
+    ///
+    /// Referencing to a rule result twice is ok; dup entire tree as
+    /// we can't be adding trees; e.g., expr expr. 
+    ///
+    TreeType*	nextNode();
+
+    /// Number of elements available in the stream
+    ///
+    ANTLR_UINT32	size();
+
+    /// Returns the description string if there is one available (check for NULL).
+    ///
+    StringType getDescription();
+
+protected:
+	void init(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description);
+};
+
+/// This is an implementation of a token stream, which is basically an element
+///  stream that deals with tokens only.
+///
+template<class ImplTraits>
+class RewriteRuleTokenStream : public ImplTraits::template RewriteRuleElementStreamType< typename ImplTraits::ParserType> 
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename ImplTraits::TreeAdaptorType TreeAdaptorType;
+	typedef typename ImplTraits::ParserType ComponentType;
+	typedef typename ComponentType::StreamType StreamType;
+	typedef typename ImplTraits::CommonTokenType TokenType;
+	typedef typename ImplTraits::TreeType TreeType;
+	typedef typename AllocPolicyType::template VectorType< TokenType* > ElementsType;
+	typedef typename ImplTraits::template RecognizerType< StreamType > RecognizerType;
+	typedef typename ImplTraits::template RewriteRuleElementStreamType< typename ImplTraits::ParserType> BaseType;
+
+public:
+	RewriteRuleTokenStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description);
+	RewriteRuleTokenStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description, TokenType* oneElement);
+	RewriteRuleTokenStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description, const ElementsType& elements);
+	TreeType*	nextNode();
+
+private:
+	TreeType*	nextNodeToken();
+};
+
+/// This is an implementation of a subtree stream which is a set of trees
+///  modelled as an element stream.
+///
+template<class ImplTraits>
+class RewriteRuleSubtreeStream : public ImplTraits::template RewriteRuleElementStreamType< typename ImplTraits::TreeParserType> 
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename ImplTraits::TreeAdaptorType TreeAdaptorType;
+	typedef typename ImplTraits::TreeParserType ComponentType;
+	typedef typename ComponentType::StreamType StreamType;
+	typedef typename ImplTraits::TreeType TreeType;
+	typedef TreeType TokenType;
+	typedef typename ImplTraits::template RecognizerType< StreamType > RecognizerType;
+	typedef typename AllocPolicyType::template VectorType< TokenType* > ElementsType;
+	typedef typename ImplTraits::template RewriteRuleElementStreamType< typename ImplTraits::TreeParserType>  BaseType;
+
+public:
+	RewriteRuleSubtreeStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description);
+	RewriteRuleSubtreeStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description, TokenType* oneElement);
+	RewriteRuleSubtreeStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description, const ElementsType& elements);
+
+	TreeType* dup( TreeType* el );
+
+private:
+	TreeType* dupTree( TreeType* el );
+};
+
+/// This is an implementation of a node stream, which is basically an element
+///  stream that deals with tree nodes only.
+///
+template<class ImplTraits>
+class RewriteRuleNodeStream : public ImplTraits::template RewriteRuleElementStreamType< typename ImplTraits::TreeParserType> 
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename ImplTraits::TreeAdaptorType TreeAdaptorType;
+	typedef typename ImplTraits::TreeParserType ComponentType;
+	typedef typename ComponentType::StreamType StreamType;
+	typedef typename ImplTraits::TreeType TreeType;
+	typedef TreeType TokenType;	
+	typedef typename ImplTraits::template RecognizerType< StreamType > RecognizerType;
+	typedef typename AllocPolicyType::template VectorType< TokenType* > ElementsType;
+	typedef typename ImplTraits::template RewriteRuleElementStreamType< typename ImplTraits::TreeParserType>  BaseType;
+
+public:
+	RewriteRuleNodeStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description);
+	RewriteRuleNodeStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description, TokenType* oneElement);
+	RewriteRuleNodeStream(TreeAdaptorType* adaptor, RecognizerType* rec, ANTLR_UINT8* description, const ElementsType& elements);
+
+	TreeType*	toTree(TreeType* element);
+
+private:
+	TreeType*	toTreeNode(TreeType* element);
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3rewritestreams.inl"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3rewritestreams.inl b/runtime/Cpp/include/antlr3rewritestreams.inl
new file mode 100755
index 0000000..3303e8c
--- /dev/null
+++ b/runtime/Cpp/include/antlr3rewritestreams.inl
@@ -0,0 +1,374 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits, class SuperType>
+RewriteRuleElementStream<ImplTraits, SuperType>::RewriteRuleElementStream(TreeAdaptorType* adaptor, 
+													RecognizerType* rec, ANTLR_UINT8* description)
+{
+	this->init(adaptor, rec, description);
+}
+
+template<class ImplTraits, class SuperType>
+RewriteRuleElementStream<ImplTraits, SuperType>::RewriteRuleElementStream(TreeAdaptorType* adaptor, 
+								RecognizerType* rec, ANTLR_UINT8* description, TokenType* oneElement)
+{
+	this->init(adaptor, rec, description);
+	if( oneElement != NULL )
+		this->add( oneElement );
+}
+
+template<class ImplTraits, class SuperType>
+RewriteRuleElementStream<ImplTraits, SuperType>::RewriteRuleElementStream(TreeAdaptorType* adaptor, 
+						RecognizerType* rec, ANTLR_UINT8* description, const ElementsType& elements)
+						:m_elements(elements)
+{
+	this->init(adaptor, rec, description);
+}
+
+template<class ImplTraits, class SuperType>
+void RewriteRuleElementStream<ImplTraits, SuperType>::init(TreeAdaptorType* adaptor, 
+								RecognizerType* rec, ANTLR_UINT8* description)
+{
+	m_rec = rec;
+	m_adaptor = adaptor;
+	m_cursor  = 0;
+	m_dirty	  = false;
+	m_singleElement = NULL;
+}
+
+template<class ImplTraits>
+RewriteRuleTokenStream<ImplTraits>::RewriteRuleTokenStream(TreeAdaptorType* adaptor, 
+							    RecognizerType* rec, ANTLR_UINT8* description)
+                                                              :BaseType(adaptor, rec, description)
+{
+}
+
+template<class ImplTraits>
+RewriteRuleTokenStream<ImplTraits>::RewriteRuleTokenStream(TreeAdaptorType* adaptor, RecognizerType* rec, 
+							    ANTLR_UINT8* description, TokenType* oneElement)
+							:BaseType(adaptor, rec, description, oneElement)
+{
+}
+
+template<class ImplTraits>
+RewriteRuleTokenStream<ImplTraits>::RewriteRuleTokenStream(TreeAdaptorType* adaptor, 
+						RecognizerType* rec, ANTLR_UINT8* description, const ElementsType& elements)
+						:BaseType(adaptor, rec, description, elements)
+{
+}
+
+template<class ImplTraits>
+RewriteRuleSubtreeStream<ImplTraits>::RewriteRuleSubtreeStream(TreeAdaptorType* adaptor, 
+								RecognizerType* rec, ANTLR_UINT8* description)
+						 :BaseType(adaptor, rec, description)
+{
+}
+
+template<class ImplTraits>
+RewriteRuleSubtreeStream<ImplTraits>::RewriteRuleSubtreeStream(TreeAdaptorType* adaptor, RecognizerType* rec, 
+								ANTLR_UINT8* description, TokenType* oneElement)
+							:BaseType(adaptor, rec, description, oneElement)
+{
+}
+
+template<class ImplTraits>
+RewriteRuleSubtreeStream<ImplTraits>::RewriteRuleSubtreeStream(TreeAdaptorType* adaptor, 
+						RecognizerType* rec, ANTLR_UINT8* description, const ElementsType& elements)
+						:BaseType(adaptor, rec, description, elements)
+{
+}
+
+template<class ImplTraits>
+RewriteRuleNodeStream<ImplTraits>::RewriteRuleNodeStream(TreeAdaptorType* adaptor, 
+							 RecognizerType* rec, ANTLR_UINT8* description)
+						:BaseType(adaptor, rec, description)
+{
+}
+
+template<class ImplTraits>
+RewriteRuleNodeStream<ImplTraits>::RewriteRuleNodeStream(TreeAdaptorType* adaptor, RecognizerType* rec, 
+							ANTLR_UINT8* description, TokenType* oneElement)
+						:BaseType(adaptor, rec, description, oneElement)
+{
+}
+
+template<class ImplTraits>
+RewriteRuleNodeStream<ImplTraits>::RewriteRuleNodeStream(TreeAdaptorType* adaptor, 
+						RecognizerType* rec, ANTLR_UINT8* description, const ElementsType& elements)
+						:BaseType(adaptor, rec, description, elements)
+{
+}
+
+template<class ImplTraits, class SuperType>
+void	RewriteRuleElementStream<ImplTraits, SuperType>::reset()
+{
+	m_dirty = true;
+	m_cursor = 0;
+}
+
+template<class ImplTraits, class SuperType>
+void	RewriteRuleElementStream<ImplTraits, SuperType>::add(TokenType* el)
+{
+	if ( el== NULL ) 
+		return;
+
+	if ( !m_elements.empty() ) 
+	{ 
+		// if in list, just add
+		m_elements.push_back(el);
+		return;
+	}
+		
+	if ( m_singleElement == NULL ) 
+	{ 
+		// no elements yet, track w/o list
+		m_singleElement = el;
+		return;
+	}
+
+	// adding 2nd element, move to list
+	m_elements.push_back(m_singleElement);
+	m_singleElement = NULL;
+	m_elements.push_back(el);
+}
+
+template<class ImplTraits, class SuperType>
+typename RewriteRuleElementStream<ImplTraits, SuperType>::TokenType*  
+RewriteRuleElementStream<ImplTraits, SuperType>::_next()
+{
+	ANTLR_UINT32		n;
+	TreeType*	t;
+
+	n = this->size();
+
+	if (n == 0)
+	{
+		// This means that the stream is empty
+		//
+		return NULL;	// Caller must cope with this
+	}
+
+	// Traversed all the available elements already?
+	//
+	if ( m_cursor >= n)
+	{
+		if (n == 1)
+		{
+			// Special case when size is single element, it will just dup a lot
+			//
+			return this->toTree(m_singleElement);
+		}
+
+		// Out of elements and the size is not 1, so we cannot assume
+		// that we just duplicate the entry n times (such as ID ent+ -> ^(ID ent)+)
+		// This means we ran out of elements earlier than was expected.
+		//
+		return NULL;	// Caller must cope with this
+	}
+
+	// Elements available either for duping or just available
+	//
+	if ( m_singleElement != NULL)
+	{
+		m_cursor++;   // Cursor advances even for single element as this tells us to dup()
+		return this->toTree(m_singleElement);
+	}
+
+	// More than just a single element so we extract it from the 
+	// vector.
+	//
+	t = this->toTree( m_elements.at(m_cursor));
+	m_cursor++;
+	return t;
+}
+
+template<class ImplTraits, class SuperType>
+typename RewriteRuleElementStream<ImplTraits, SuperType>::TreeType*  
+RewriteRuleElementStream<ImplTraits, SuperType>::nextTree()
+{
+	ANTLR_UINT32		n;
+	TreeType*  el;
+
+	n = this->size();
+
+	if ( m_dirty || ( (m_cursor >=n) && (n==1)) ) 
+	{
+		// if out of elements and size is 1, dup
+		//
+		el = this->_next();
+		return this->dup(el);
+	}
+
+	// test size above then fetch
+	//
+	el = this->_next();
+	return el;
+}
+
+template<class ImplTraits, class SuperType>
+typename RewriteRuleElementStream<ImplTraits, SuperType>::TokenType*	
+RewriteRuleElementStream<ImplTraits, SuperType>::nextToken()
+{
+	return this->_next();
+}
+
+template<class ImplTraits, class SuperType>
+typename RewriteRuleElementStream<ImplTraits, SuperType>::TokenType*		
+RewriteRuleElementStream<ImplTraits, SuperType>::next()
+{
+	ANTLR_UINT32   s;
+	s = this->size();
+	if ( (m_cursor >= s) && (s == 1) )
+	{
+		TreeType* el;
+		el = this->_next();
+		return	this->dup(el);
+	}
+	return this->_next();
+}
+
+template<class ImplTraits>
+typename RewriteRuleSubtreeStream<ImplTraits>::TreeType*	
+RewriteRuleSubtreeStream<ImplTraits>::dup(TreeType* element)
+{
+	return this->dupTree(element);
+}
+
+template<class ImplTraits>
+typename RewriteRuleSubtreeStream<ImplTraits>::TreeType*	
+RewriteRuleSubtreeStream<ImplTraits>::dupTree(TreeType* element)
+{
+	return BaseType::m_adaptor->dupNode(element);
+}
+
+template<class ImplTraits, class SuperType>
+typename RewriteRuleElementStream<ImplTraits, SuperType>::TreeType*	
+RewriteRuleElementStream<ImplTraits, SuperType>::toTree( TreeType* element)
+{
+	return element;
+}
+
+template<class ImplTraits>
+typename RewriteRuleNodeStream<ImplTraits>::TreeType*	
+RewriteRuleNodeStream<ImplTraits>::toTree(TreeType* element)
+{
+	return this->toTreeNode(element);
+}
+
+template<class ImplTraits>
+typename RewriteRuleNodeStream<ImplTraits>::TreeType*	
+RewriteRuleNodeStream<ImplTraits>::toTreeNode(TreeType* element)
+{
+	return BaseType::m_adaptor->dupNode(element);
+}
+
+template<class ImplTraits, class SuperType>
+bool RewriteRuleElementStream<ImplTraits, SuperType>::hasNext()
+{
+	if (	((m_singleElement != NULL) && (m_cursor < 1))
+		||	 ( !m_elements.empty() && m_cursor < m_elements.size()))
+	{
+		return true;
+	}
+	else
+	{
+		return false;
+	}
+}
+
+template<class ImplTraits >
+typename RewriteRuleTokenStream<ImplTraits>::TreeType*
+RewriteRuleTokenStream<ImplTraits>::nextNode()
+{
+	return this->nextNodeToken();
+}
+
+template<class ImplTraits>
+typename RewriteRuleTokenStream<ImplTraits>::TreeType*
+RewriteRuleTokenStream<ImplTraits>::nextNodeToken()
+{
+	return BaseType::m_adaptor->create(this->_next());
+}
+
+/// Number of elements available in the stream
+///
+template<class ImplTraits, class SuperType>
+ANTLR_UINT32	RewriteRuleElementStream<ImplTraits, SuperType>::size()
+{
+	ANTLR_UINT32   n = 0;
+
+	/// Should be a count of one if singleElement is set. I copied this
+	/// logic from the java implementation, which I suspect is just guarding
+	/// against someone setting singleElement and forgetting to NULL it out
+	///
+	if ( m_singleElement != NULL)
+	{
+		n = 1;
+	}
+	else
+	{
+		if ( !m_elements.empty() )
+		{
+			return (ANTLR_UINT32)(m_elements.size());
+		}
+	}
+	return n;
+
+}
+
+template<class ImplTraits, class SuperType>
+typename RewriteRuleElementStream<ImplTraits, SuperType>::StringType
+RewriteRuleElementStream<ImplTraits, SuperType>::getDescription()
+{
+	if ( m_elementDescription.empty() )
+	{
+		m_elementDescription = "<unknown source>";
+	}
+	return  m_elementDescription;
+}
+
+template<class ImplTraits, class SuperType>
+RewriteRuleElementStream<ImplTraits, SuperType>::~RewriteRuleElementStream()
+{
+	TreeType* tree;
+
+    // Before placing the stream back in the pool, we
+	// need to clear any vector it has. This is so any
+	// free pointers that are associated with the
+	// entires are called. However, if this particular function is called
+    // then we know that the entries in the stream are definately
+    // tree nodes. Hence we check to see if any of them were nilNodes as
+    // if they were, we can reuse them.
+	//
+	if	( !m_elements.empty() )
+	{
+        // We have some elements to traverse
+        //
+        ANTLR_UINT32 i;
+
+        for (i = 1; i<= m_elements.size(); i++)
+        {
+            tree = m_elements.at(i-1);
+            if  ( (tree != NULL) && tree->isNilNode() )
+            {
+                // Had to remove this for now, check is not comprehensive enough
+                // tree->reuse(tree);
+            }
+        }
+		m_elements.clear();
+	}
+	else
+	{
+        if  (m_singleElement != NULL)
+        {
+            tree = m_singleElement;
+            if  (tree->isNilNode())
+            {
+                // Had to remove this for now, check is not comprehensive enough
+              //   tree->reuse(tree);
+            }
+        }
+        m_singleElement = NULL;
+	}
+}
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3tokenstream.hpp b/runtime/Cpp/include/antlr3tokenstream.hpp
new file mode 100755
index 0000000..421ba7f
--- /dev/null
+++ b/runtime/Cpp/include/antlr3tokenstream.hpp
@@ -0,0 +1,408 @@
+/** \file
+ * Defines the interface for an ANTLR3 common token stream. Custom token streams should create
+ * one of these and then override any functions by installing their own pointers
+ * to implement the various functions.
+ */
+#ifndef	_ANTLR3_TOKENSTREAM_HPP
+#define	_ANTLR3_TOKENSTREAM_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+/** Definition of a token source, which has a pointer to a function that
+ *  returns the next token (using a token factory if it is going to be
+ *  efficient) and a pointer to an ANTLR3_INPUT_STREAM. This is slightly
+ *  different to the Java interface because we have no way to implement
+ *  multiple interfaces without defining them in the interface structure
+ *  or casting (void *), which is too convoluted.
+ */
+ANTLR_BEGIN_NAMESPACE()
+
+//We are not making it subclass AllocPolicy, as this will always be a base class
+template<class ImplTraits>
+class TokenSource
+{
+public:
+	typedef typename ImplTraits::CommonTokenType TokenType;
+	typedef TokenType CommonTokenType;
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::LexerType LexerType;
+
+private:
+    /** A special pre-allocated token, which signifies End Of Tokens. Because this must
+     *  be set up with the current input index and so on, we embed the structure and
+     *  return the address of it. It is marked as factoryMade, so that it is never
+     *  attempted to be freed.
+     */
+    TokenType				m_eofToken;
+
+	/// A special pre-allocated token, which is returned by mTokens() if the
+	/// lexer rule said to just skip the generated token altogether.
+	/// Having this single token stops us wasting memory by have the token factory
+	/// actually create something that we are going to SKIP(); anyway.
+	///
+	TokenType				m_skipToken;
+
+    /** When the token source is constructed, it is populated with the file
+     *  name from whence the tokens were produced by the lexer. This pointer is a
+     *  copy of the one supplied by the CharStream (and may be NULL) so should
+     *  not be manipulated other than to copy or print it.
+     */
+    StringType				m_fileName;
+
+public:
+	TokenType& get_eofToken();
+	const TokenType& get_eofToken() const;
+	TokenType& get_skipToken();
+	StringType& get_fileName();
+	LexerType* get_super();
+
+	void set_fileName( const StringType& fileName );
+
+	/**
+	 * \brief
+	 * Default implementation of the nextToken() call for a lexer.
+	 *
+	 * \param toksource
+	 * Points to the implementation of a token source. The lexer is
+	 * addressed by the super structure pointer.
+	 *
+	 * \returns
+	 * The next token in the current input stream or the EOF token
+	 * if there are no more tokens in any input stream in the stack.
+	 *
+	 * Write detailed description for nextToken here.
+	 *
+	 * \remarks
+	 * Write remarks for nextToken here.
+	 *
+	 * \see nextTokenStr
+	 */
+    TokenType*  nextToken();
+	CommonTokenType* nextToken( BoolForwarder<true> /*isFiltered*/ );
+	CommonTokenType* nextToken( BoolForwarder<false> /*isFiltered*/ );
+
+	///
+	/// \brief
+	/// Returns the next available token from the current input stream.
+	///
+	/// \param toksource
+	/// Points to the implementation of a token source. The lexer is
+	/// addressed by the super structure pointer.
+	///
+	/// \returns
+	/// The next token in the current input stream or the EOF token
+	/// if there are no more tokens.
+	///
+	/// \remarks
+	/// Write remarks for nextToken here.
+	///
+	/// \see nextToken
+	///
+	TokenType*	nextTokenStr();
+
+protected:
+	TokenSource();
+};
+
+/** Definition of the ANTLR3 common token stream interface.
+ * \remark
+ * Much of the documentation for this interface is stolen from Ter's Java implementation.
+ */
+template<class ImplTraits>
+class TokenStream  : public ImplTraits::TokenIntStreamType
+{
+public:
+	typedef typename ImplTraits::TokenSourceType TokenSourceType;
+	typedef typename ImplTraits::TokenIntStreamType IntStreamType;
+	typedef typename ImplTraits::CommonTokenType TokenType;
+	typedef TokenType UnitType;
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::DebugEventListenerType DebugEventListenerType;
+	typedef typename ImplTraits::TokenStreamType TokenStreamType;
+	typedef typename ImplTraits::ParserType ComponentType;
+
+protected:
+    /** Pointer to the token source for this stream
+     */
+    TokenSourceType*    m_tokenSource;
+
+	/// Debugger interface, is this is a debugging token stream
+	///
+	DebugEventListenerType*	m_debugger;
+
+	/// Indicates the initial stream state for dbgConsume()
+	///
+	bool				m_initialStreamState;
+
+public:
+	TokenStream(TokenSourceType* source, DebugEventListenerType* debugger);
+	IntStreamType* get_istream();
+	TokenSourceType* get_tokenSource() const;
+	void set_tokenSource( TokenSourceType* tokenSource );
+
+    /** Get Token at current input pointer + i ahead where i=1 is next Token.
+     *  i<0 indicates tokens in the past.  So -1 is previous token and -2 is
+     *  two tokens ago. LT(0) is undefined.  For i>=n, return Token.EOFToken.
+     *  Return null for LT(0) and any index that results in an absolute address
+     *  that is negative.
+     */
+    const TokenType*  _LT(ANTLR_INT32 k);
+
+    /** Where is this stream pulling tokens from?  This is not the name, but
+     *  a pointer into an interface that contains a ANTLR3_TOKEN_SOURCE interface.
+     *  The Token Source interface contains a pointer to the input stream and a pointer
+     *  to a function that returns the next token.
+     */
+    TokenSourceType*   getTokenSource();
+
+    /** Function that installs a token source for teh stream
+     */
+    void	setTokenSource(TokenSourceType*   tokenSource);
+
+    /** Return the text of all the tokens in the stream, as the old tramp in
+     *  Leeds market used to say; "Get the lot!"
+     */
+    StringType	toString();
+
+    /** Return the text of all tokens from start to stop, inclusive.
+     *  If the stream does not buffer all the tokens then it can just
+     *  return an empty ANTLR3_STRING or NULL;  Grammars should not access $ruleLabel.text in
+     *  an action in that case.
+     */
+    StringType	 toStringSS(ANTLR_MARKER start, ANTLR_MARKER stop);
+
+    /** Because the user is not required to use a token with an index stored
+     *  in it, we must provide a means for two token objects themselves to
+     *  indicate the start/end location.  Most often this will just delegate
+     *  to the other toString(int,int).  This is also parallel with
+     *  the pTREENODE_STREAM->toString(Object,Object).
+     */
+    StringType	 toStringTT(const TokenType* start, const TokenType* stop);
+
+
+    /** Function that sets the token stream into debugging mode
+     */
+    void	setDebugListener(DebugEventListenerType* debugger);
+
+	TokenStream();
+
+};
+
+/** Common token stream is an implementation of ANTLR_TOKEN_STREAM for the default
+ *  parsers and recognizers. You may of course build your own implementation if
+ *  you are so inclined.
+ */
+template<bool TOKENS_ACCESSED_FROM_OWNING_RULE, class ListType, class MapType>
+class TokenStoreSelector
+{
+public:
+	typedef ListType TokensType;
+};
+
+template<class ListType, class MapType>
+class TokenStoreSelector<true, ListType, MapType>
+{
+public:
+	typedef MapType TokensType;
+};
+
+template<class ImplTraits>
+class	CommonTokenStream : public TokenStream<ImplTraits>
+{
+public:
+	typedef typename ImplTraits::AllocPolicyType AllocPolicyType;
+	typedef typename ImplTraits::BitsetType BitsetType;
+	typedef typename ImplTraits::CommonTokenType TokenType;
+	typedef typename ImplTraits::TokenSourceType TokenSourceType;
+	typedef typename ImplTraits::DebugEventListenerType DebugEventListenerType;
+	typedef typename AllocPolicyType::template ListType<TokenType> TokensListType;
+	typedef typename AllocPolicyType::template OrderedMapType<ANTLR_MARKER, TokenType> TokensMapType;
+	typedef typename TokenStoreSelector< ImplTraits::TOKENS_ACCESSED_FROM_OWNING_RULE,
+	                                       TokensListType, TokensMapType >::TokensType TokensType;
+
+	typedef typename AllocPolicyType::template UnOrderedMapType<ANTLR_UINT32, ANTLR_UINT32> ChannelOverridesType;
+	typedef typename AllocPolicyType::template OrderedSetType<ANTLR_UINT32> DiscardSetType;
+	typedef typename AllocPolicyType::template ListType<ANTLR_UINT32> IntListType;
+	typedef TokenStream<ImplTraits> BaseType;
+
+private:
+    /** Records every single token pulled from the source indexed by the token index.
+     *  There might be more efficient ways to do this, such as referencing directly in to
+     *  the token factory pools, but for now this is convenient and the ANTLR3_LIST is not
+     *  a huge overhead as it only stores pointers anyway, but allows for iterations and
+     *  so on.
+     */
+    TokensType			m_tokens;
+
+    /** Override map of tokens. If a token type has an entry in here, then
+     *  the pointer in the table points to an int, being the override channel number
+     *  that should always be used for this token type.
+     */
+    ChannelOverridesType	m_channelOverrides;
+
+    /** Discared set. If a token has an entry in this table, then it is thrown
+     *  away (data pointer is always NULL).
+     */
+    DiscardSetType			m_discardSet;
+
+    /* The channel number that this token stream is tuned to. For instance, whitespace
+     * is usually tuned to channel 99, which no token stream would normally tune to and
+     * so it is thrown away.
+     */
+    ANTLR_UINT32			m_channel;
+
+	/** The index into the tokens list of the current token (the next one that will be
+     *  consumed. p = -1 indicates that the token list is empty.
+     */
+    ANTLR_INT32				m_p;
+
+	/* The total number of tokens issued till now. For streams that delete tokens,
+	   this helps in issuing the index
+	 */
+	ANTLR_UINT32			m_nissued;
+
+    /** If this flag is set to true, then tokens that the stream sees that are not
+     *  in the channel that this stream is tuned to, are not tracked in the
+     *  tokens table. When set to false, ALL tokens are added to the tracking.
+     */
+    bool					m_discardOffChannel;
+
+public:
+	CommonTokenStream(ANTLR_UINT32 hint, TokenSourceType* source = NULL,
+										DebugEventListenerType* debugger = NULL);
+	~CommonTokenStream();
+	TokensType& get_tokens();
+	const TokensType& get_tokens() const;
+	DiscardSetType& get_discardSet();
+	const DiscardSetType& get_discardSet() const;
+	ANTLR_INT32 get_p() const;
+	void set_p( ANTLR_INT32 p );
+	void inc_p();
+	void dec_p();
+
+    /** A simple filter mechanism whereby you can tell this token stream
+     *  to force all tokens of type ttype to be on channel.  For example,
+     *  when interpreting, we cannot exec actions so we need to tell
+     *  the stream to force all WS and NEWLINE to be a different, ignored
+     *  channel.
+     */
+    void setTokenTypeChannel(ANTLR_UINT32 ttype, ANTLR_UINT32 channel);
+
+    /** Add a particular token type to the discard set. If a token is found to belong
+     *  to this set, then it is skipped/thrown away
+     */
+    void discardTokenType(ANTLR_INT32 ttype);
+
+	//This will discard tokens of a particular rule after the rule execution completion
+	void discardTokens( ANTLR_MARKER start, ANTLR_MARKER stop );
+	void discardTokens( ANTLR_MARKER start, ANTLR_MARKER stop, 
+								BoolForwarder<true>  tokens_accessed_from_owning_rule  );
+	void discardTokens( ANTLR_MARKER start, ANTLR_MARKER stop, 
+								BoolForwarder<false>  tokens_accessed_from_owning_rule  );
+
+	void insertToken( const TokenType& tok );
+	void insertToken( const TokenType& tok, BoolForwarder<true>  tokens_accessed_from_owning_rule  );
+	void insertToken( const TokenType& tok, BoolForwarder<false>  tokens_accessed_from_owning_rule  );
+
+	/** Get a token at an absolute index i; 0..n-1.  This is really only
+     *  needed for profiling and debugging and token stream rewriting.
+     *  If you don't want to buffer up tokens, then this method makes no
+     *  sense for you.  Naturally you can't use the rewrite stream feature.
+     *  I believe DebugTokenStream can easily be altered to not use
+     *  this method, removing the dependency.
+     */
+    const TokenType*   get(ANTLR_MARKER i);
+	const TokenType*   getToken(ANTLR_MARKER i);
+	const TokenType* getToken( ANTLR_MARKER tok_idx, BoolForwarder<true>  tokens_accessed_from_owning_rule );
+	const TokenType* getToken( ANTLR_MARKER tok_idx, BoolForwarder<false>  tokens_accessed_from_owning_rule  );
+
+    /** Signal to discard off channel tokens from here on in.
+     */
+    void discardOffChannelToks(bool discard);
+
+    /** Function that returns a pointer to the ANTLR3_LIST of all tokens
+     *  in the stream (this causes the buffer to fill if we have not get any yet)
+     */
+    TokensType*	getTokens();
+
+    /** Function that returns all the tokens between a start and a stop index.
+     */
+    void getTokenRange(ANTLR_UINT32 start, ANTLR_UINT32 stop, TokensListType& tokenRange);
+
+    /** Function that returns all the tokens indicated by the specified bitset, within a range of tokens
+     */
+    void getTokensSet(ANTLR_UINT32 start, ANTLR_UINT32 stop, BitsetType* types, TokensListType& tokenSet);
+
+    /** Function that returns all the tokens indicated by being a member of the supplied List
+     */
+    void getTokensList(ANTLR_UINT32 start, ANTLR_UINT32 stop,
+									const IntListType& list, TokensListType& tokenList);
+
+    /** Function that returns all tokens of a certain type within a range.
+     */
+    void getTokensType(ANTLR_UINT32 start, ANTLR_UINT32 stop, ANTLR_UINT32 type, TokensListType& tokens);
+
+    /** Function that resets the token stream so that it can be reused, but
+     *  but that does not free up any resources, such as the token factory
+     *  the factory pool and so on. This prevents the need to keep freeing
+     *  and reallocating the token pools if the thing you are building is
+     *  a multi-shot dameon or somethign like that. It is much faster to
+     *  just reuse all the vectors.
+     */
+    void  reset();
+
+	const TokenType* LB(ANTLR_INT32 k);
+
+
+	void fillBufferExt();
+	void fillBuffer();
+
+	bool hasReachedFillbufferTarget( ANTLR_UINT32 cnt, BoolForwarder<true>  tokens_accessed_from_owning_rule  );
+	bool hasReachedFillbufferTarget( ANTLR_UINT32 cnt, BoolForwarder<false>  tokens_accessed_from_owning_rule  );
+
+	ANTLR_UINT32 skipOffTokenChannels(ANTLR_INT32 i);
+	ANTLR_UINT32 skipOffTokenChannelsReverse(ANTLR_INT32 x);
+	ANTLR_MARKER index_impl();
+};
+
+class TokenAccessException : public std::exception
+{
+	virtual const char* what() const throw()
+	{
+		return " Attempted access on Deleted Token";
+	}
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3tokenstream.inl"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3tokenstream.inl b/runtime/Cpp/include/antlr3tokenstream.inl
new file mode 100755
index 0000000..9c1c5a9
--- /dev/null
+++ b/runtime/Cpp/include/antlr3tokenstream.inl
@@ -0,0 +1,937 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+TokenSource<ImplTraits>::TokenSource()
+	:m_eofToken( ImplTraits::CommonTokenType::TOKEN_EOF), 
+	m_skipToken( ImplTraits::CommonTokenType::TOKEN_INVALID)
+{
+}
+
+template<class ImplTraits>
+ANTLR_INLINE typename TokenSource<ImplTraits>::CommonTokenType& TokenSource<ImplTraits>::get_eofToken()
+{
+	return m_eofToken;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE const typename TokenSource<ImplTraits>::TokenType& TokenSource<ImplTraits>::get_eofToken() const
+{
+	return m_eofToken;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE typename TokenSource<ImplTraits>::CommonTokenType& TokenSource<ImplTraits>::get_skipToken()
+{
+	return m_skipToken;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE typename TokenSource<ImplTraits>::StringType& TokenSource<ImplTraits>::get_fileName()
+{
+	return m_fileName;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void TokenSource<ImplTraits>::set_fileName( const StringType& fileName )
+{
+	m_fileName = fileName;
+}
+
+template<class ImplTraits>
+typename TokenSource<ImplTraits>::LexerType* TokenSource<ImplTraits>::get_super()
+{
+	return static_cast<LexerType*>(this);
+}
+
+template<class ImplTraits>
+typename TokenSource<ImplTraits>::TokenType*	TokenSource<ImplTraits>::nextTokenStr()
+{
+	typedef typename LexerType::RecognizerSharedStateType RecognizerSharedStateType;
+	typedef typename LexerType::InputStreamType InputStreamType;
+	typedef typename LexerType::IntStreamType IntStreamType;
+	LexerType*                  lexer;
+    RecognizerSharedStateType*	state;
+    InputStreamType*            input;
+    IntStreamType*              istream;
+
+    lexer   = this->get_super();
+    state   = lexer->get_rec()->get_state();
+    input   = lexer->get_input();
+    istream = input->get_istream();
+
+    /// Loop until we get a non skipped token or EOF
+    ///
+    for	(;;)
+    {
+        // Get rid of any previous token (token factory takes care of
+        // any de-allocation when this token is finally used up.
+        //
+        state->set_token_present(false);
+        state->set_error(false);	    // Start out without an exception
+        state->set_failed(false);
+
+        // Now call the matching rules and see if we can generate a new token
+        //
+        for	(;;)
+        {
+            // Record the start of the token in our input stream.
+            //
+            state->set_channel( TOKEN_DEFAULT_CHANNEL );
+            state->set_tokenStartCharIndex( (ANTLR_MARKER)input->get_nextChar() );
+            state->set_tokenStartCharPositionInLine( input->get_charPositionInLine() );
+            state->set_tokenStartLine( input->get_line() );
+            state->set_text("");
+
+            if  (istream->_LA(1) == ANTLR_CHARSTREAM_EOF)
+            {
+                // Reached the end of the current stream, nothing more to do if this is
+                // the last in the stack.
+                //
+                TokenType&    teof = m_eofToken;
+
+                teof.set_startIndex(lexer->getCharIndex());
+                teof.set_stopIndex(lexer->getCharIndex());
+                teof.set_line(lexer->getLine());
+                return  &teof;
+            }
+
+            state->set_token_present( false );
+            state->set_error(false);	    // Start out without an exception
+            state->set_failed(false);
+
+            // Call the generated lexer, see if it can get a new token together.
+            //
+            lexer->mTokens();
+
+            if  (state->get_error()  == true)
+            {
+                // Recognition exception, report it and try to recover.
+                //
+                state->set_failed(true);
+                lexer->get_rec()->reportError();
+                lexer->recover(); 
+            }
+            else
+            {
+                if ( !state->get_token_present() )
+                {
+                    // Emit the real token, which adds it in to the token stream basically
+                    //
+                    lexer->emit();
+                }
+                else if	( *(state->get_token()) ==  m_skipToken )
+                {
+                    // A real token could have been generated, but "Computer say's naaaaah" and it
+                    // it is just something we need to skip altogether.
+                    //
+                    continue;
+                }
+
+                // Good token, not skipped, not EOF token
+                //
+                return  state->get_token();
+            }
+        }
+    }
+}
+
+template<class ImplTraits>
+typename TokenSource<ImplTraits>::TokenType*  TokenSource<ImplTraits>::nextToken()
+{
+	return this->nextToken( BoolForwarder<LexerType::IsFiltered>() );
+}
+
+template<class ImplTraits>
+typename TokenSource<ImplTraits>::CommonTokenType*	TokenSource<ImplTraits>::nextToken( BoolForwarder<true> /*isFiltered*/ )
+{
+	LexerType*   lexer;
+	typename LexerType::RecognizerSharedStateType* state;
+
+	lexer   = this->get_super();
+	state	= lexer->get_lexstate();
+
+	/* Get rid of any previous token (token factory takes care of
+		* any deallocation when this token is finally used up.
+		*/
+	state->set_token_present( false );
+	state->set_error( false );	    /* Start out without an exception	*/
+	state->set_failed(false);
+
+	/* Record the start of the token in our input stream.
+		*/
+	state->set_tokenStartCharIndex( lexer->index() );
+	state->set_tokenStartCharPositionInLine( lexer->getCharPositionInLine() );
+	state->set_tokenStartLine( lexer->getLine() );
+	state->set_text("");
+
+	/* Now call the matching rules and see if we can generate a new token
+		*/
+	for	(;;)
+	{
+		if (lexer->LA(1) == ANTLR_CHARSTREAM_EOF)
+		{
+			/* Reached the end of the stream, nothing more to do.
+				*/
+			CommonTokenType&    teof = m_eofToken;
+
+			teof.set_startIndex(lexer->getCharIndex());
+			teof.set_stopIndex(lexer->getCharIndex());
+			teof.set_line(lexer->getLine());
+			return  &teof;
+		}
+
+		state->set_token_present(false);
+		state->set_error(false);	    /* Start out without an exception	*/
+
+		{
+			ANTLR_MARKER   m;
+
+			m	= lexer->get_istream()->mark();
+			state->set_backtracking(1);				/* No exceptions */
+			state->set_failed(false);
+
+			/* Call the generated lexer, see if it can get a new token together.
+				*/
+			lexer->mTokens();
+    		state->set_backtracking(0);
+
+    		/* mTokens backtracks with synpred at BACKTRACKING==2
+				and we set the synpredgate to allow actions at level 1. */
+
+			if(state->get_failed())
+			{
+				lexer->rewind(m);
+				lexer->consume(); //<! advance one char and try again !>
+			}
+			else
+			{
+				lexer->emit();					/* Assemble the token and emit it to the stream */
+				TokenType* tok = state->get_token();
+				return tok;
+			}
+		}
+	}
+}
+
+template<class ImplTraits>
+typename TokenSource<ImplTraits>::CommonTokenType*	TokenSource<ImplTraits>::nextToken( BoolForwarder<false> /*isFiltered*/ )
+{
+	// Find the next token in the current stream
+	//
+	CommonTokenType* tok = this->nextTokenStr();
+
+	// If we got to the EOF token then switch to the previous
+	// input stream if there were any and just return the
+	// EOF if there are none. We must check the next token
+	// in any outstanding input stream we pop into the active
+	// role to see if it was sitting at EOF after PUSHing the
+	// stream we just consumed, otherwise we will return EOF
+	// on the reinstalled input stream, when in actual fact
+	// there might be more input streams to POP before the
+	// real EOF of the whole logical inptu stream. Hence we
+	// use a while loop here until we find somethign in the stream
+	// that isn't EOF or we reach the actual end of the last input
+	// stream on the stack.
+	//
+	while(tok->get_type() == CommonTokenType::TOKEN_EOF)
+	{
+		typename ImplTraits::LexerType*   lexer;
+		lexer   = static_cast<typename ImplTraits::LexerType*>( this->get_super() );
+
+		if  ( lexer->get_rec()->get_state()->get_streams().size() > 0)
+		{
+			// We have another input stream in the stack so we
+			// need to revert to it, then resume the loop to check
+			// it wasn't sitting at EOF itself.
+			//
+			lexer->popCharStream();
+			tok = this->nextTokenStr();
+		}
+		else
+		{
+			// There were no more streams on the input stack
+			// so this EOF is the 'real' logical EOF for
+			// the input stream. So we just exit the loop and 
+			// return the EOF we have found.
+			//
+			break;
+		}
+		
+	}
+
+	// return whatever token we have, which may be EOF
+	//
+	return  tok;
+}
+
+template<class ImplTraits>
+TokenStream<ImplTraits>::TokenStream()
+{
+	m_tokenSource = NULL;
+	m_debugger = NULL;
+	m_initialStreamState = false;
+}
+
+template<class ImplTraits>
+typename TokenStream<ImplTraits>::IntStreamType* TokenStream<ImplTraits>::get_istream()
+{
+	return this;
+}
+
+template<class ImplTraits>
+TokenStream<ImplTraits>::TokenStream(TokenSourceType* source, DebugEventListenerType* debugger)
+{
+	m_initialStreamState = false;
+	m_tokenSource = source;
+	m_debugger = debugger;
+}
+
+template<class ImplTraits>
+CommonTokenStream<ImplTraits>::CommonTokenStream(ANTLR_UINT32 , TokenSourceType* source, 
+													DebugEventListenerType* debugger)
+					: CommonTokenStream<ImplTraits>::BaseType( source, debugger )
+{
+	m_p = -1;
+	m_channel = TOKEN_DEFAULT_CHANNEL;
+	m_discardOffChannel = false;
+	m_nissued = 0;
+}
+
+template<class ImplTraits>
+typename CommonTokenStream<ImplTraits>::TokensType& CommonTokenStream<ImplTraits>::get_tokens()
+{
+	return m_tokens;
+}
+
+template<class ImplTraits>
+const typename CommonTokenStream<ImplTraits>::TokensType& CommonTokenStream<ImplTraits>::get_tokens() const
+{
+	return m_tokens;
+}
+
+template<class ImplTraits>
+typename CommonTokenStream<ImplTraits>::DiscardSetType& CommonTokenStream<ImplTraits>::get_discardSet()
+{
+	return m_discardSet;
+}
+
+template<class ImplTraits>
+const typename CommonTokenStream<ImplTraits>::DiscardSetType& CommonTokenStream<ImplTraits>::get_discardSet() const
+{
+	return m_discardSet;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_INT32 CommonTokenStream<ImplTraits>::get_p() const
+{
+	return m_p;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void CommonTokenStream<ImplTraits>::set_p( ANTLR_INT32 p )
+{
+	m_p = p;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void CommonTokenStream<ImplTraits>::inc_p()
+{
+	++m_p;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE void CommonTokenStream<ImplTraits>::dec_p()
+{
+	--m_p;
+}
+
+template<class ImplTraits>
+ANTLR_INLINE ANTLR_MARKER CommonTokenStream<ImplTraits>::index_impl()
+{
+	return m_p;
+}
+
+// Reset a token stream so it can be used again and can reuse it's
+// resources.
+//
+template<class ImplTraits>
+void  CommonTokenStream<ImplTraits>::reset()
+{
+	// Free any resources that ar most like specifc to the
+    // run we just did.
+    //
+	m_discardSet.clear();
+	m_channelOverrides.clear();
+
+    // Now, if there were any existing tokens in the stream,
+    // then we just reset the vector count so that it starts
+    // again. We must traverse the entries unfortunately as
+    // there may be free pointers for custom token types and
+    // so on. However that is just a quick NULL check on the
+    // vector entries.
+    //
+	m_tokens.clear();
+
+    // Reset to defaults
+    //
+    m_discardOffChannel  = false;
+    m_channel            = ImplTraits::CommonTokenType::TOKEN_DEFAULT_CHANNEL;
+    m_p	            = -1;
+}
+
+template<class ImplTraits>
+void	TokenStream<ImplTraits>::setDebugListener(DebugEventListenerType* debugger)
+{
+	m_debugger = debugger;
+	m_initialStreamState = false;
+}
+
+template<class ImplTraits>
+const typename TokenStream<ImplTraits>::TokenType*  TokenStream<ImplTraits>::_LT(ANTLR_INT32 k)
+{
+	ANTLR_INT32    i;
+	ANTLR_INT32    n;
+	TokenStreamType* cts;
+
+	cts	    = this->get_super();
+
+    if(k < 0)
+	{
+		return cts->LB(-k);
+	}
+
+	ANTLR_INT32 req_idx = cts->get_p() + k - 1;
+	ANTLR_INT32 cached_size = static_cast<ANTLR_INT32>(this->get_istream()->get_cachedSize());
+
+	if(	(cts->get_p() == -1) ||
+		( ( req_idx >= cached_size ) && ( (cached_size % ImplTraits::TOKEN_FILL_BUFFER_INCREMENT) == 0 ) )
+	  )
+	{
+		cts->fillBuffer();
+	}
+
+    // Here we used to check for k == 0 and return 0, but this seems
+    // a superfluous check to me. LT(k=0) is therefore just undefined
+    // and we won't waste the clock cycles on the check
+    //
+	cached_size = static_cast<ANTLR_INT32>(this->get_istream()->get_cachedSize());
+	if	( req_idx >= cached_size )
+	{
+		TokenType&    teof = cts->get_tokenSource()->get_eofToken();
+
+		teof.set_startIndex( this->get_istream()->index());
+		teof.set_stopIndex( this->get_istream()->index());
+		return  &teof;
+	}
+
+	i	= cts->get_p();
+	n	= 1;
+
+	/* Need to find k good tokens, skipping ones that are off channel
+	*/
+	while( n < k)
+	{
+		/* Skip off-channel tokens */
+		i = cts->skipOffTokenChannels(i+1); /* leave p on valid token    */
+		n++;
+	}
+	
+	if( ( i >= cached_size ) && ( (cached_size % ImplTraits::TOKEN_FILL_BUFFER_INCREMENT) == 0 ) )
+	{
+		cts->fillBuffer();
+	}
+	if	( (ANTLR_UINT32) i >= this->get_istream()->get_cachedSize() )
+	{
+		TokenType&    teof = cts->get_tokenSource()->get_eofToken();
+
+		teof.set_startIndex(this->get_istream()->index());
+		teof.set_stopIndex(this->get_istream()->index());
+		return  &teof;
+	}
+
+	// Here the token must be in the input vector. Rather then incur
+	// function call penalty, we just return the pointer directly
+	// from the vector
+	//
+	return cts->getToken(i);
+}
+
+template<class ImplTraits>
+const typename CommonTokenStream<ImplTraits>::TokenType* CommonTokenStream<ImplTraits>::LB(ANTLR_INT32 k)
+{
+    ANTLR_INT32 i;
+    ANTLR_INT32 n;
+
+    if (m_p == -1)
+    {
+        this->fillBuffer();
+    }
+    if (k == 0)
+    {
+        return NULL;
+    }
+    if ((m_p - k) < 0)
+    {
+        return NULL;
+    }
+
+    i = m_p;
+    n = 1;
+
+    /* Need to find k good tokens, going backwards, skipping ones that are off channel
+     */
+    while (n <= k)
+    {
+        /* Skip off-channel tokens
+         */
+
+        i = this->skipOffTokenChannelsReverse(i - 1); /* leave p on valid token    */
+        n++;
+    }
+    if (i < 0)
+    {
+        return NULL;
+    }
+	
+	// Here the token must be in the input vector. Rather then incut
+	// function call penalty, we jsut return the pointer directly
+	// from the vector
+	//
+	return this->getToken(i);
+}
+
+template<class ImplTraits>
+const typename CommonTokenStream<ImplTraits>::TokenType*   CommonTokenStream<ImplTraits>::getToken(ANTLR_MARKER i)
+{
+	return this->get(i);
+}
+
+
+template<class ImplTraits>
+const typename CommonTokenStream<ImplTraits>::TokenType* CommonTokenStream<ImplTraits>::get(ANTLR_MARKER i)
+{
+	return this->getToken( static_cast<ANTLR_MARKER>(i), 
+							BoolForwarder<ImplTraits::TOKENS_ACCESSED_FROM_OWNING_RULE>() );
+}
+
+template<class ImplTraits>
+const typename CommonTokenStream<ImplTraits>::TokenType* CommonTokenStream<ImplTraits>::getToken( ANTLR_MARKER tok_idx,
+															BoolForwarder<true>  /*tokens_accessed_from_owning_rule*/  )
+{
+	typename TokensType::iterator iter = m_tokens.find(tok_idx);
+	if( iter == m_tokens.end() )
+	{
+		TokenAccessException ex;
+		throw ex;
+	}
+	const TokenType& tok = iter->second;
+    return  &tok; 
+}
+
+template<class ImplTraits>
+const typename CommonTokenStream<ImplTraits>::TokenType* CommonTokenStream<ImplTraits>::getToken( ANTLR_MARKER tok_idx, BoolForwarder<false>  /*tokens_accessed_from_owning_rule*/   )
+{
+	TokenType& tok = m_tokens.at( static_cast<ANTLR_UINT32>(tok_idx) );
+    return  &tok; 
+}
+
+template<class ImplTraits>
+typename TokenStream<ImplTraits>::TokenSourceType* TokenStream<ImplTraits>::get_tokenSource() const
+{
+	return m_tokenSource;
+}
+
+template<class ImplTraits>
+void TokenStream<ImplTraits>::set_tokenSource( TokenSourceType* tokenSource )
+{
+	m_tokenSource = tokenSource;
+}
+
+template<class ImplTraits>
+typename TokenStream<ImplTraits>::StringType	TokenStream<ImplTraits>::toString()
+{
+	TokenStreamType* cts = static_cast<TokenStreamType>(this);
+
+	if	(cts->get_p() == -1)
+    {
+		cts->fillBuffer();
+    }
+
+    return  this->toStringSS(0, this->get_istream()->size());
+}
+
+template<class ImplTraits>
+typename TokenStream<ImplTraits>::StringType
+TokenStream<ImplTraits>::toStringSS(ANTLR_MARKER start, ANTLR_MARKER stop)
+{
+    StringType string;
+    TokenSourceType* tsource;
+    const TokenType* tok;
+    TokenStreamType* cts;
+
+    cts = this->get_super();
+
+    if (cts->get_p() == -1)
+    {
+        cts->fillBuffer();
+    }
+    if (stop >= this->get_istream()->size())
+    {
+        stop = this->get_istream()->size() - 1;
+    }
+
+    /* Who is giving us these tokens?
+     */
+    tsource = cts->get_tokenSource();
+
+    if (tsource != NULL && !cts->get_tokens().empty() )
+    {
+        /* Finally, let's get a string
+         */
+        for (ANTLR_MARKER i = start; i <= stop; i++)
+        {
+            tok = cts->get(i);
+            if (tok != NULL)
+            {
+                string.append( tok->getText() );
+            }
+        }
+
+        return string;
+    }
+    return "";
+}
+
+template<class ImplTraits>
+typename TokenStream<ImplTraits>::StringType
+TokenStream<ImplTraits>::toStringTT(const TokenType* start, const TokenType* stop)
+{
+	if	(start != NULL && stop != NULL)
+	{
+		return	this->toStringSS( start->get_tokenIndex(), 
+								  stop->get_tokenIndex());
+	}
+	else
+	{
+		return	"";
+	}
+}
+
+/** A simple filter mechanism whereby you can tell this token stream
+ *  to force all tokens of type ttype to be on channel.  For example,
+ *  when interpreting, we cannot execute actions so we need to tell
+ *  the stream to force all WS and NEWLINE to be a different, ignored,
+ *  channel.
+ */
+template<class ImplTraits>
+void	CommonTokenStream<ImplTraits>::setTokenTypeChannel ( ANTLR_UINT32 ttype, ANTLR_UINT32 channel)
+{
+    /* We add one to the channel so we can distinguish NULL as being no entry in the
+     * table for a particular token type.
+     */
+    m_channelOverrides[ttype] = (ANTLR_UINT32)channel + 1;
+
+}
+
+template<class ImplTraits>
+void  CommonTokenStream<ImplTraits>::discardTokenType(ANTLR_INT32 ttype)
+{
+	 /* We add one to the channel so we can distinguish NULL as being no entry in the
+     * table for a particular token type. We could use bitsets for this I suppose too.
+     */
+	m_discardSet.insert(ttype);
+}
+
+template<class ImplTraits>
+void CommonTokenStream<ImplTraits>::discardOffChannelToks(bool discard)
+{
+	m_discardOffChannel = discard;
+}
+
+template<class ImplTraits>
+typename CommonTokenStream<ImplTraits>::TokensType*  CommonTokenStream<ImplTraits>::getTokens()
+{
+	if	(m_p == -1)
+    {
+		this->fillBuffer();
+    }
+
+    return  &m_tokens;
+}
+
+template<class ImplTraits>
+void CommonTokenStream<ImplTraits>::getTokenRange(ANTLR_UINT32 start, ANTLR_UINT32 stop, 
+																	TokensListType& tokenRange)
+{
+	return this->getTokensSet(start, stop, NULL, tokenRange);
+}
+
+/** Given a start and stop index, return a List of all tokens in
+ *  the token type BitSet.  Return null if no tokens were found.  This
+ *  method looks at both on and off channel tokens.
+ */
+template<class ImplTraits>
+void
+CommonTokenStream<ImplTraits>::getTokensSet(ANTLR_UINT32 start, ANTLR_UINT32 stop, BitsetType* types,
+                                                    TokensListType& filteredList )
+{
+    ANTLR_UINT32	    i;
+    ANTLR_UINT32	    n;
+    TokenType*	tok;
+
+    if	( m_p == -1)
+    {
+		this->fillBuffer();
+    }
+    if	(stop > this->get_istream()->size())
+    {
+		stop = this->get_istream()->size();
+    }
+    if	(start > stop)
+    {
+		return;
+    }
+
+    /* We have the range set, now we need to iterate through the
+     * installed tokens and create a new list with just the ones we want
+     * in it. We are just moving pointers about really.
+     */
+    for(i = start, n = 0; i<= stop; i++)
+    {
+		tok = this->get(i);
+
+		if  (	   types == NULL
+			|| (types->isMember( tok->get_type() ) == true )
+			)
+		{
+			filteredList.push_back(tok);
+		}
+	}
+    
+    return ;
+}
+
+template<class ImplTraits>
+void
+CommonTokenStream<ImplTraits>::getTokensList(ANTLR_UINT32 start, ANTLR_UINT32 stop, 
+													const IntListType& list, TokensListType& newlist)
+{
+    BitsetType*		bitSet;
+
+    bitSet  = Bitset<ImplTraits>::BitsetFromList(list);
+    this->getTokensSet(start, stop, bitSet, newlist);
+    delete bitSet;
+}
+
+template<class ImplTraits>
+void 
+CommonTokenStream<ImplTraits>::getTokensType(ANTLR_UINT32 start, ANTLR_UINT32 stop, ANTLR_UINT32 type,
+                                                  TokensListType& newlist   )
+{
+    BitsetType*  bitSet;
+
+    bitSet  = BitsetType::BitsetOf(type, -1);
+    this->getTokensSet(start, stop, bitSet, newlist);
+
+    delete bitSet;
+}
+
+template<class ImplTraits>
+void CommonTokenStream<ImplTraits>::fillBufferExt()
+{
+    this->fillBuffer();
+}
+
+template<class ImplTraits>
+bool CommonTokenStream<ImplTraits>::hasReachedFillbufferTarget( ANTLR_UINT32 cnt, 
+																BoolForwarder<true> )
+{
+	return ( cnt >= ImplTraits::TOKEN_FILL_BUFFER_INCREMENT );
+}
+
+template<class ImplTraits>
+bool CommonTokenStream<ImplTraits>::hasReachedFillbufferTarget( ANTLR_UINT32, 
+																BoolForwarder<false>  )
+{
+	return false;
+}
+
+
+template<class ImplTraits>
+void CommonTokenStream<ImplTraits>::fillBuffer() 
+{
+    ANTLR_UINT32 index;
+    TokenType* tok;
+    bool discard;
+    
+    /* Start at index 0 of course
+     */
+	ANTLR_UINT32 cached_p = (m_p < 0) ? 0 : m_p;
+    index = m_nissued;
+	ANTLR_UINT32 cnt = 0;
+
+    /* Pick out the next token from the token source
+     * Remember we just get a pointer (reference if you like) here
+     * and so if we store it anywhere, we don't set any pointers to auto free it.
+     */
+    tok = this->get_tokenSource()->nextToken();
+
+    while ( tok->get_type() != TokenType::TOKEN_EOF )
+    {
+        discard = false; /* Assume we are not discarding	*/
+
+        /* I employ a bit of a trick, or perhaps hack here. Rather than
+         * store a pointer to a structure in the override map and discard set
+         * we store the value + 1 cast to a void *. Hence on systems where NULL = (void *)0
+         * we can distinguish "not being there" from "being channel or type 0"
+         */
+
+        if ( m_discardSet.find(tok->get_type()) != m_discardSet.end() )
+        {
+            discard = true;
+        }
+        else if (   m_discardOffChannel == true
+                 && tok->get_channel() != m_channel
+                 )
+        {
+            discard = true;
+        }
+        else if (!m_channelOverrides.empty())
+        {
+            /* See if this type is in the override map
+             */
+			typename ChannelOverridesType::iterator iter = m_channelOverrides.find( tok->get_type() + 1 );
+
+            if (iter != m_channelOverrides.end())
+            {
+                /* Override found
+                 */
+                tok->set_channel( ANTLR_UINT32_CAST(iter->second) - 1);
+            }
+        }
+
+        /* If not discarding it, add it to the list at the current index
+         */
+        if (discard == false)
+        {
+            /* Add it, indicating that we will delete it and the table should not
+             */
+            tok->set_tokenIndex(index);
+            ++m_p;
+            this->insertToken(*tok);
+            index++;
+			m_nissued++;
+			cnt++;
+        }
+
+		if( !this->hasReachedFillbufferTarget( cnt, 
+						BoolForwarder<ImplTraits::TOKENS_ACCESSED_FROM_OWNING_RULE>()  ) )
+			tok = this->get_tokenSource()->nextToken();
+		else
+			break;
+    }
+
+    /* Cache the size so we don't keep doing indirect method calls. We do this as
+     * early as possible so that anything after this may utilize the cached value.
+     */
+    this->get_istream()->set_cachedSize( m_nissued );
+
+    /* Set the consume pointer to the first token that is on our channel, we just read
+     */
+    m_p = cached_p;
+    m_p = this->skipOffTokenChannels( m_p );
+
+}
+/// Given a starting index, return the index of the first on-channel
+///  token.
+///
+template<class ImplTraits>
+ANTLR_UINT32 CommonTokenStream<ImplTraits>::skipOffTokenChannels(ANTLR_INT32 i)
+{
+    ANTLR_INT32 n;
+    n = this->get_istream()->get_cachedSize();
+
+    while (i < n)
+    {
+        const TokenType* tok =  this->getToken(i);
+
+        if (tok->get_channel() != m_channel )
+        {
+            i++;
+        }
+        else
+        {
+            return i;
+        }
+    }
+    return i;
+}
+
+template<class ImplTraits>
+ANTLR_UINT32  CommonTokenStream<ImplTraits>::skipOffTokenChannelsReverse(ANTLR_INT32 x)
+{
+    while (x >= 0)
+    {
+        const TokenType* tok =  this->getToken(x);
+        
+        if( tok->get_channel() != m_channel )
+        {
+            x--;
+        }
+        else
+        {
+            return x;
+        }
+    }
+    return x;
+}
+
+template<class ImplTraits>
+void CommonTokenStream<ImplTraits>::discardTokens( ANTLR_MARKER start, ANTLR_MARKER stop )
+{
+	this->discardTokens( start, stop, BoolForwarder< ImplTraits::TOKENS_ACCESSED_FROM_OWNING_RULE >() );
+}
+
+template<class ImplTraits>
+void CommonTokenStream<ImplTraits>::discardTokens( ANTLR_MARKER start, ANTLR_MARKER stop, 
+											BoolForwarder<true>  /*tokens_accessed_from_owning_rule */ )
+{
+	typename TokensType::iterator iter1 = m_tokens.lower_bound(start);
+	typename TokensType::iterator iter2 = m_tokens.upper_bound(stop);
+	m_tokens.erase( iter1, iter2 );
+}
+
+template<class ImplTraits>
+void CommonTokenStream<ImplTraits>::discardTokens( ANTLR_MARKER start, ANTLR_MARKER stop, 
+											BoolForwarder<false>  /*tokens_accessed_from_owning_rule*/ )
+{
+	m_tokens.erase( m_tokens.begin() + start, m_tokens.begin() + stop );
+}
+
+template<class ImplTraits>
+void CommonTokenStream<ImplTraits>::insertToken( const TokenType& tok )
+{
+	this->insertToken( tok, BoolForwarder< ImplTraits::TOKENS_ACCESSED_FROM_OWNING_RULE >() );
+}
+
+template<class ImplTraits>
+void CommonTokenStream<ImplTraits>::insertToken( const TokenType& tok, BoolForwarder<true>  /*tokens_accessed_from_owning_rule*/  )
+{
+	assert( m_tokens.find( tok.get_index() ) == m_tokens.end() );
+	assert( tok.get_index() == m_nissued );
+	m_tokens[ tok.get_index() ] = tok;
+}
+
+template<class ImplTraits>
+void CommonTokenStream<ImplTraits>::insertToken( const TokenType& tok, BoolForwarder<false>  /*tokens_accessed_from_owning_rule*/  )
+{
+	m_tokens.push_back( tok );
+}
+
+template<class ImplTraits>
+CommonTokenStream<ImplTraits>::~CommonTokenStream()
+{
+	m_tokens.clear();
+}
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/include/antlr3traits.hpp b/runtime/Cpp/include/antlr3traits.hpp
new file mode 100755
index 0000000..c876044
--- /dev/null
+++ b/runtime/Cpp/include/antlr3traits.hpp
@@ -0,0 +1,320 @@
+#ifndef _ANTLR3_TRAITS_HPP
+#define _ANTLR3_TRAITS_HPP
+
+#include	"antlr3defs.hpp"
+
+
+ANTLR_BEGIN_NAMESPACE()
+	
+//Users implementing overrides should inherit from this
+template<class ImplTraits>
+class CustomTraitsBase
+{
+public:
+	typedef Empty AllocPolicyType;
+	typedef Empty StringType;
+	typedef Empty StringStreamType;
+	typedef Empty StreamDataType;
+	typedef Empty Endianness;
+
+	//collections
+	typedef Empty BitsetType;
+	typedef Empty BitsetListType;
+
+	typedef Empty InputStreamType;
+
+	template<class StreamType>
+	class IntStreamType : public Empty
+	{
+	public:
+		typedef Empty BaseType;
+	};
+
+	typedef Empty LexStateType;
+
+	typedef Empty CommonTokenType;
+	typedef Empty TokenIntStreamType;
+
+	typedef Empty TokenStreamType;
+	typedef Empty TreeNodeStreamType;
+
+
+	typedef Empty DebugEventListenerType;
+	template<class StreamType>
+	class RecognizerSharedStateType : public Empty
+	{
+	public:
+		typedef Empty BaseType;
+	};
+
+	template<class StreamType>
+	class RecognizerType : public Empty
+	{
+	public:
+		typedef Empty BaseType;
+	};
+	
+	typedef Empty TreeType;
+	typedef Empty TreeAdaptorType;
+	
+	template<class StreamType>
+	class ExceptionBaseType : public Empty
+	{
+	public:
+		typedef Empty BaseType;
+	};
+
+	//this should be overridden with generated lexer
+	typedef Empty BaseLexerType;
+	
+	typedef Empty TokenSourceType;
+	typedef Empty BaseParserType;//this should be overridden with generated lexer
+	typedef Empty BaseTreeParserType;
+	
+	template<class StreamType>
+	class RewriteStreamType : public Empty
+	{
+	public:
+		typedef Empty BaseType;
+	};
+
+	typedef Empty  RuleReturnValueType;
+	
+	//If we want to change the way tokens are stored
+	static const bool TOKENS_ACCESSED_FROM_OWNING_RULE = false;
+	static const int  TOKEN_FILL_BUFFER_INCREMENT = 100; //used only if the above val is true
+
+	static void displayRecognitionError( const std::string& str ) {  printf("%s", str.c_str() ); }
+};
+
+template<class A, class B>
+class TraitsSelector
+{
+public:
+	typedef A selected;
+};
+
+template<class B>
+class TraitsSelector<Empty, B>
+{
+public:
+	typedef B selected;
+};
+
+template<class A, class B, class C>
+class TraitsOneArgSelector
+{
+public:
+	typedef A selected;
+};
+
+template<class A, class B>
+class TraitsOneArgSelector<A,B,Empty>
+{
+public:
+	typedef B selected;
+};
+
+template<bool v, class A, class B>
+class BoolSelector
+{
+public:
+	typedef A selected;
+};
+
+template<class A, class B>
+class BoolSelector<false, A, B>
+{
+public:
+	typedef B selected;
+};
+
+template< template<class ImplTraits> class UserTraits >
+class TraitsBase
+{
+public:
+	typedef TraitsBase  TraitsType;
+	
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::AllocPolicyType, DefaultAllocPolicy >::selected  AllocPolicyType;
+
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::StringType, 
+									std::string >::selected StringType;
+	
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::StringStreamType, 
+		                             std::stringstream >::selected StringStreamType;
+
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::StreamDataType, 
+		                             ANTLR_UINT8 >::selected StreamDataType;
+	
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::Endianness, 
+									 RESOLVE_ENDIAN_AT_RUNTIME >::selected Endianness;
+
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::BitsetType, 
+		                             Bitset<TraitsType> >::selected BitsetType;
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::BitsetListType, 
+		                             BitsetList<TraitsType> >::selected BitsetListType;
+
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::InputStreamType, 
+		                              InputStream<TraitsType> >::selected InputStreamType;
+
+	template<class SuperType>
+	class IntStreamType 
+		: public TraitsOneArgSelector< 
+					typename UserTraits<TraitsType>::template IntStreamType<SuperType>, 
+		            IntStream<TraitsType, SuperType>,
+					typename UserTraits<TraitsType>::template IntStreamType<SuperType>::BaseType
+								>::selected  
+	{ };
+	
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::LexStateType, 
+		                             LexState<TraitsType> >::selected LexStateType;
+
+	static const bool TOKENS_ACCESSED_FROM_OWNING_RULE = UserTraits<TraitsType>::TOKENS_ACCESSED_FROM_OWNING_RULE;
+	static const int  TOKEN_FILL_BUFFER_INCREMENT = UserTraits<TraitsType>::TOKEN_FILL_BUFFER_INCREMENT; //used only if the above val is true
+
+	static void displayRecognitionError( const StringType& str ) { UserTraits<TraitsType>::displayRecognitionError(str);  }
+};
+
+template<
+	     class LxrType, 
+         class PsrType, 
+		 template<class ImplTraits> class UserTraits = CustomTraitsBase, 
+		 class TreePsrType = antlr3::Empty
+         >
+class Traits  : public TraitsBase<UserTraits>
+{
+public:
+	typedef Traits TraitsType;
+	typedef TraitsBase<UserTraits> BaseTraitsType;	
+
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::CommonTokenType, 
+									 CommonToken<TraitsType> >::selected CommonTokenType;
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::TokenIntStreamType, 
+		                             TokenIntStream<TraitsType> >::selected TokenIntStreamType;
+	
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::TokenStreamType, 
+									CommonTokenStream<TraitsType> >::selected TokenStreamType;
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::TreeNodeStreamType, 
+		                            CommonTreeNodeStream<TraitsType> >::selected TreeNodeStreamType;
+	
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::DebugEventListenerType, 
+		                            DebugEventListener<TraitsType> >::selected DebugEventListenerType;
+		
+	template<class StreamType>
+	class  RecognizerSharedStateType 
+		          : public TraitsOneArgSelector< 
+							typename UserTraits<TraitsType>::template RecognizerSharedStateType<StreamType>, 
+							RecognizerSharedState<TraitsType, StreamType>,
+							typename UserTraits<TraitsType>::template RecognizerSharedStateType<StreamType>::BaseType
+										>::selected 
+	{};
+
+	template<class StreamType>
+	class  RecognizerType 
+				   : public TraitsOneArgSelector< 
+				             typename UserTraits<TraitsType>::template RecognizerType<StreamType>, 
+							 BaseRecognizer<TraitsType, StreamType>,
+							 typename UserTraits<TraitsType>::template RecognizerType<StreamType>::BaseType
+									>::selected 
+	{
+	public:
+		typedef typename TraitsOneArgSelector< 
+							typename UserTraits<TraitsType>::template RecognizerType<StreamType>, 
+							BaseRecognizer<TraitsType, StreamType>,
+							typename UserTraits<TraitsType>::template RecognizerType<StreamType>::BaseType
+										>::selected  BaseType;
+		typedef typename BaseType::RecognizerSharedStateType RecognizerSharedStateType;
+
+	public:
+		RecognizerType(ANTLR_UINT32 sizeHint, RecognizerSharedStateType* state)
+			: BaseType( sizeHint, state )
+		{
+		}
+	};
+
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::TreeType, 
+										CommonTree<TraitsType> >::selected TreeType;
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::TreeAdaptorType, 
+										CommonTreeAdaptor<TraitsType> >::selected TreeAdaptorType;
+		
+	template<class StreamType>
+	class ExceptionBaseType : public TraitsOneArgSelector< 
+								typename UserTraits<TraitsType>::template ExceptionBaseType<StreamType>, 
+								ANTLR_ExceptionBase<TraitsType, StreamType>, 
+								typename UserTraits<TraitsType>::template ExceptionBaseType<StreamType>::BaseType
+													>::selected 
+	{
+	public:
+		typedef typename TraitsOneArgSelector< 
+			                    typename UserTraits<TraitsType>::template ExceptionBaseType<StreamType>, 
+								ANTLR_ExceptionBase<TraitsType, StreamType>,
+								typename UserTraits<TraitsType>::template ExceptionBaseType<StreamType>::BaseType
+							>::selected BaseType;
+	
+	protected:
+		ExceptionBaseType( const typename BaseTraitsType::StringType& message )
+			:BaseType(message)
+		{
+		}
+	};
+
+	//this should be overridden with generated lexer
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::BaseLexerType, 
+											Lexer<TraitsType> >::selected BaseLexerType;
+	typedef LxrType LexerType;
+
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::TokenSourceType, 
+											TokenSource<TraitsType> >::selected TokenSourceType;
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::BaseParserType, 
+											Parser<TraitsType> >::selected BaseParserType;
+	
+	typedef PsrType ParserType;
+
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::BaseTreeParserType, 
+											 TreeParser<TraitsType> >::selected BaseTreeParserType;
+	typedef TreePsrType TreeParserType;
+
+	template<class SuperType>
+	class RewriteStreamType : public TraitsOneArgSelector< 
+								typename UserTraits<TraitsType>::template RewriteStreamType<SuperType>, 
+								RewriteRuleElementStream<TraitsType, SuperType>,
+								typename UserTraits<TraitsType>::template RewriteStreamType<SuperType>::BaseType
+													>::selected 
+	{
+	public:
+		typedef typename TraitsOneArgSelector< 
+				typename UserTraits<TraitsType>::template RewriteStreamType<SuperType>, 
+				RewriteRuleElementStream<TraitsType, SuperType>, 
+				typename UserTraits<TraitsType>::template RewriteStreamType<SuperType>::BaseType
+						>::selected BaseType;
+
+		typedef typename SuperType::StreamType StreamType;
+		typedef typename BaseType::RecognizerType Recognizer_Type;
+		typedef typename BaseType::TokenType TokenType;
+		typedef typename BaseType::ElementsType ElementsType;			
+
+	public:
+		RewriteStreamType(TreeAdaptorType* adaptor = NULL, Recognizer_Type* rec=NULL, ANTLR_UINT8* description = NULL)
+			:BaseType(adaptor, rec, description)
+		{
+		}
+		RewriteStreamType(TreeAdaptorType* adaptor, Recognizer_Type* rec, ANTLR_UINT8* description, TokenType* oneElement)
+			:BaseType(adaptor, rec, description, oneElement)
+		{
+		}
+		RewriteStreamType(TreeAdaptorType* adaptor, Recognizer_Type* rec, ANTLR_UINT8* description, const ElementsType& elements)
+			:BaseType(adaptor, rec, description, elements)
+		{
+		}
+	};
+
+	typedef typename TraitsSelector< typename UserTraits<TraitsType>::RuleReturnValueType, 
+										typename BoolSelector< TraitsType::TOKENS_ACCESSED_FROM_OWNING_RULE, 
+														RuleReturnValue_1<TraitsType>, RuleReturnValue<TraitsType> >::selected
+								   >::selected RuleReturnValueType;
+};
+
+
+ANTLR_END_NAMESPACE()
+
+#endif //_ANTLR3_TRAITS_HPP
diff --git a/runtime/Cpp/include/antlr3treeparser.hpp b/runtime/Cpp/include/antlr3treeparser.hpp
new file mode 100755
index 0000000..8f83ff8
--- /dev/null
+++ b/runtime/Cpp/include/antlr3treeparser.hpp
@@ -0,0 +1,101 @@
+#ifndef	ANTLR3TREEPARSER_HPP
+#define	ANTLR3TREEPARSER_HPP
+
+// [The "BSD licence"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include    "antlr3defs.hpp"
+
+/** Internal structure representing an element in a hash bucket.
+ *  Stores the original key so that duplicate keys can be rejected
+ *  if necessary, and contains function can be supported If the hash key
+ *  could be unique I would have invented the perfect compression algorithm ;-)
+ */
+ANTLR_BEGIN_NAMESPACE()
+
+template<class ImplTraits>
+class	TreeParser : public ImplTraits::template RecognizerType< TreeParser<ImplTraits> >
+{
+public:
+	typedef typename ImplTraits::TreeNodeStreamType TreeNodeStreamType;
+	typedef TreeNodeStreamType StreamType;
+	typedef typename TreeNodeStreamType::IntStreamType IntStreamType;
+	typedef typename ImplTraits::TreeType TreeType;
+	typedef TreeType TokenType;
+	typedef typename ImplTraits::template ExceptionBase<TreeNodeStreamType> ExceptionBaseType;
+	typedef typename ImplTraits::template RecognizerType< TreeParser<ImplTraits> > RecognizerType;
+	typedef typename RecognizerType::RecognizerSharedStateType RecognizerSharedStateType;
+	typedef Empty TokenSourceType;
+	typedef typename ImplTraits::BitsetListType BitsetListType;
+	typedef typename ImplTraits::StringType StringType;
+	typedef typename ImplTraits::CommonTokenType CommonTokenType;
+
+private:
+    /** Pointer to the common tree node stream for the parser
+     */
+    TreeNodeStreamType*		m_ctnstream;
+
+public:
+	TreeParser( ANTLR_UINT32 sizeHint, TreeNodeStreamType* ctnstream,
+											RecognizerSharedStateType* state);
+	TreeNodeStreamType* get_ctnstream() const;
+	IntStreamType* get_istream() const;
+	RecognizerType* get_rec();
+
+	//same as above. Just that get_istream exists for lexer, parser, treeparser
+	//get_parser_istream exists only for parser, treeparser. So use it accordingly
+	IntStreamType* get_parser_istream() const;
+
+    /** Set the input stream and reset the parser
+     */
+    void	setTreeNodeStream(TreeNodeStreamType* input);
+
+    /** Return a pointer to the input stream
+     */
+    TreeNodeStreamType* getTreeNodeStream();
+
+	TokenType*	getMissingSymbol( IntStreamType* istream,
+										  ExceptionBaseType*		e,
+										  ANTLR_UINT32			expectedTokenType,
+										  BitsetListType*	follow);
+
+    /** Pointer to a function that knows how to free resources of an ANTLR3 tree parser.
+     */
+	~TreeParser();
+
+	void fillExceptionData( ExceptionBaseType* ex );
+	void displayRecognitionError( ANTLR_UINT8** tokenNames, ExceptionBaseType* ex );
+	void exConstruct();
+	void mismatch(ANTLR_UINT32 ttype, BitsetListType* follow);
+};
+
+ANTLR_END_NAMESPACE()
+
+#include "antlr3treeparser.inl"
+
+#endif
diff --git a/runtime/Cpp/include/antlr3treeparser.inl b/runtime/Cpp/include/antlr3treeparser.inl
new file mode 100755
index 0000000..f08e3ef
--- /dev/null
+++ b/runtime/Cpp/include/antlr3treeparser.inl
@@ -0,0 +1,198 @@
+ANTLR_BEGIN_NAMESPACE()
+
+template< class ImplTraits >
+TreeParser<ImplTraits>::TreeParser( ANTLR_UINT32 sizeHint, TreeNodeStreamType* ctnstream,
+											RecognizerSharedStateType* state)
+											:RecognizerType( sizeHint, state )
+{
+	/* Install the tree node stream
+	*/
+	this->setTreeNodeStream(ctnstream);
+
+}
+
+template< class ImplTraits >
+TreeParser<ImplTraits>::~TreeParser()
+{
+	this->get_rec()->get_state()->get_following().clear();
+}
+
+template< class ImplTraits >
+typename TreeParser<ImplTraits>::TreeNodeStreamType* TreeParser<ImplTraits>::get_ctnstream() const
+{
+	return m_ctnstream;
+}
+
+template< class ImplTraits >
+typename TreeParser<ImplTraits>::IntStreamType* TreeParser<ImplTraits>::get_istream() const
+{
+	return m_ctnstream;
+}
+
+template< class ImplTraits >
+typename TreeParser<ImplTraits>::IntStreamType* TreeParser<ImplTraits>::get_parser_istream() const
+{
+	return m_ctnstream;
+}
+
+template< class ImplTraits >
+typename TreeParser<ImplTraits>::RecognizerType* TreeParser<ImplTraits>::get_rec()
+{
+	return this;
+}
+
+template< class ImplTraits >
+void TreeParser<ImplTraits>::fillExceptionData( ExceptionBaseType* ex )
+{
+	ex->set_token( m_ctnstream->_LT(1) );	    /* Current input tree node */
+	ex->set_line( ex->get_token()->getLine() );
+	ex->set_charPositionInLine( ex->get_token()->getCharPositionInLine() );
+	ex->set_index( m_ctnstream->index() );
+
+	// Are you ready for this? Deep breath now...
+	//
+	{
+		TreeType* tnode;
+
+		tnode		= ex->get_token();
+
+		if	(tnode->get_token()    == NULL)
+		{
+			ex->set_streamName("-unknown source-" );
+		}
+		else
+		{
+			if	( tnode->get_token()->get_input() == NULL)
+			{
+				ex->set_streamName("");
+			}
+			else
+			{
+				ex->set_streamName(	tnode->get_token()->get_input()->get_fileName() );
+			}
+		}
+		ex->set_message("Unexpected node");
+	}
+}
+
+template< class ImplTraits >
+void TreeParser<ImplTraits>::displayRecognitionError( ANTLR_UINT8** tokenNames, ExceptionBaseType* ex )
+{
+	typename ImplTraits::StringStreamType errtext;
+	// See if there is a 'filename' we can use
+	//
+	if( ex->get_streamName().empty() )
+	{
+		if(ex->get_token()->get_type() == ImplTraits::CommonTokenType::TOKEN_EOF)
+		{
+			errtext << "-end of input-(";
+		}
+		else
+		{
+			errtext << "-unknown source-(";
+		}
+	}
+	else
+	{
+		errtext << ex->get_streamName() << "(";
+	}
+
+	// Next comes the line number
+	//
+	errtext << this->get_rec()->get_state()->get_exception()->get_line() << ") ";
+	errtext << " : error " << this->get_rec()->get_state()->get_exception()->getType()
+							<< " : "
+							<< this->get_rec()->get_state()->get_exception()->get_message();
+
+	IntStreamType* is			= this->get_istream();
+	TreeType* theBaseTree	= this->get_rec()->get_state()->get_exception()->get_token();
+	StringType ttext		= theBaseTree->toStringTree();
+
+	if  (theBaseTree != NULL)
+	{
+		TreeType*  theCommonTree	=  static_cast<TreeType*>(theBaseTree);
+		if	(theCommonTree != NULL)
+		{
+			CommonTokenType* theToken	= theBaseTree->getToken();
+		}
+		errtext << ", at offset "
+			    << theBaseTree->getCharPositionInLine();
+		errtext << ", near " << ttext;
+	}
+	ex->displayRecognitionError( errtext );
+	ImplTraits::displayRecognitionError( errtext.str() );
+}
+
+template< class ImplTraits >
+void	TreeParser<ImplTraits>::setTreeNodeStream(TreeNodeStreamType* input)
+{
+	m_ctnstream = input;
+    this->get_rec()->reset();
+    m_ctnstream->reset();
+}
+
+template< class ImplTraits >
+typename TreeParser<ImplTraits>::TreeNodeStreamType* TreeParser<ImplTraits>::getTreeNodeStream()
+{
+	return m_ctnstream;
+}
+
+template< class ImplTraits >
+void TreeParser<ImplTraits>::exConstruct()
+{
+	new ANTLR_Exception<ImplTraits, MISMATCHED_TREE_NODE_EXCEPTION, TreeNodeStreamType>( this->get_rec(), "" );
+}
+
+template< class ImplTraits >
+void TreeParser<ImplTraits>::mismatch(ANTLR_UINT32 ttype, BitsetListType* follow)
+{
+	this->exConstruct();
+    this->recoverFromMismatchedToken(ttype, follow);
+}
+
+template< class ImplTraits >
+typename TreeParser<ImplTraits>::TokenType*
+TreeParser<ImplTraits>::getMissingSymbol( IntStreamType* istream, ExceptionBaseType*		e,
+					  ANTLR_UINT32	 expectedTokenType, BitsetListType*	follow)
+{
+	TreeNodeStreamType*		tns;
+	TreeType*				node;
+	TreeType*				current;
+	CommonTokenType*		token;
+	StringType				text;
+        ANTLR_INT32             i;
+
+	// Dereference the standard pointers
+	//
+    tns	    = static_cast<TreeNodeStreamType*>(istream);
+
+	// Create a new empty node, by stealing the current one, or the previous one if the current one is EOF
+	//
+	current	= tns->_LT(1);
+    i       = -1;
+
+	if	(current == tns->get_EOF_NODE_p())
+	{
+		current = tns->_LT(-1);
+        i--;
+	}
+	node	= current->dupNode();
+
+	// Find the newly dupicated token
+	//
+	token	= node->getToken();
+
+	// Create the token text that shows it has been inserted
+	//
+	token->setText("<missing ");
+	text = token->getText();
+	text.append((const char *)this->get_rec()->get_state()->get_tokenName(expectedTokenType));
+	text.append((const char *)">");
+
+	// Finally return the pointer to our new node
+	//
+	return	node;
+}
+
+
+ANTLR_END_NAMESPACE()
diff --git a/runtime/Cpp/tests/.gitignore b/runtime/Cpp/tests/.gitignore
new file mode 100644
index 0000000..dadd82c
--- /dev/null
+++ b/runtime/Cpp/tests/.gitignore
@@ -0,0 +1,22 @@
+# Windows test files
+[st][0-9][0-9][0-9].exe
+
+# Unix test files
+[st[0-9][0-9][0-9]
+
+# ANTLR generated files
+[st][0-9][0-9][0-9]?*.cpp
+[st][0-9][0-9][0-9]?*.hpp
+[st][0-9][0-9][0-9]*.tokens
+
+# MSVC files
+*.vcxproj
+*.vcxproj.user
+Debug/
+Release/
+
+# Eclipse CDT files
+.cproject
+
+# tests output files
+t012.lxr.output
diff --git a/runtime/Cpp/tests/Makefile b/runtime/Cpp/tests/Makefile
new file mode 100644
index 0000000..1efad37
--- /dev/null
+++ b/runtime/Cpp/tests/Makefile
@@ -0,0 +1,97 @@
+.PRECIOUS=.o
+
+ANTLRGRAMMARS ?= $(wildcard t0[01]*.g)
+HEADERS = $(wildcard *.hpp) $(wildcard *.inl)
+RUNTIME_HEADERS = $(wildcard ../include/*.hpp) $(wildcard ../include/*.inl)
+SOURCES = $(wildcard *.cpp)
+POBJS = $(PSOURCES:.cpp=.o)
+TOKENS = $(ANTLRGRAMMARS:.g=.tokens)
+
+GRAMMAROPTIONS= #-report -Xconversiontimeout 1500000 -Xmultithreaded -Xwatchconversion
+ANTLR = ../../../antlr-complete/target/antlr-complete-3.5.2-SNAPSHOT.jar
+STGS  = $(wildcard ../../../tool/src/main/resources/org/antlr/codegen/templates/Cpp/*.stg)
+
+.SUFFIXES:
+
+INCLUDES= -I. -I../include/
+
+CFLAGS=-ggdb3 -O0 -fno-inline -Wall -Wno-unused-variable
+#CFLAGS=-ggdb3 -O3
+
+TOBJS= utils.o
+
+all: $(ANTLR) $(TOKENS) t001 t002 t003 t004 t005 t006 t006 t007 t008 t009 t010 t011 t012
+
+# For devel only. This will replace .stg files in the tool in a fast way
+#
+$(ANTLR): $(STGS)
+	jar uvf $(ANTLR) -C ../../../tool/src/main/resources \
+	org/antlr/codegen/templates/Cpp/Cpp.stg
+
+t001:	t001.cpp t001lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t002:	t002.cpp t002lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t003:	t003.cpp t003lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t004:	t004.cpp t004lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t005:	t005.cpp t005lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t006:	t006.cpp t006lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t007:	t007.cpp t007lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t008:	t008.cpp t008lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t009:	t009.cpp t009lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t010:	t010.cpp t010lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t011:	t011.cpp t011lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t012:	t012.cpp t012lexerXMLLexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+t051:	t051.cpp t051lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+# AST commented out
+# t039:	t039.cpp t039labels.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+# 	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+#
+# t042:	t042.cpp t005lexer.tokens UserTestTraits.hpp Makefile $(ANTLR) $(RUNTIME_HEADERS)
+# 	g++ $(CFLAGS) -DUSESTL $(INCLUDES) $< $@?*.cpp -o $@
+
+clean:
+	rm -f *.o t0[0-9][0-9]??*.[ch]pp *.tokens t[0-9][0-9][0-9] t0[0-9][0-9].exe
+
+# %.u: %.g
+# 	@echo "Bulding dependencies for "$<
+# 	java -jar $(ANTLR) -depend $< > $@
+# 	@grep ":" $@ |awk 'BEGIN {printf "ANTLRGENLIST := " }{printf " " $$1}END {print ""}'  >> $@.tmp
+# 	@cat $@.tmp >> $@
+# 	$(RM) $@.tmp
+
+%.tokens %.cpp %Lexer.c %Parser.c %Lexer.h %Parser.h %.hpp: %.g $(ANTLR)
+	java -jar $(ANTLR) $(GRAMMAROPTIONS) $<
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(strip $(ANTLRGRAMMARS)),)
+#-include $(ANTLRGRAMMARS:.g=.u)
+endif
+endif
+
+%.o: %.cpp $(HEADERS) utils.hpp
+	g++ $(CFLAGS) -DUSESTL $(INCLUDES) -c $< -o $@
diff --git a/runtime/Cpp/tests/UserTestTraits.hpp b/runtime/Cpp/tests/UserTestTraits.hpp
new file mode 100644
index 0000000..85a496c
--- /dev/null
+++ b/runtime/Cpp/tests/UserTestTraits.hpp
@@ -0,0 +1,68 @@
+#ifndef	_T_TEST_TRAITS_H
+#define	_T_TEST_TRAITS_H
+
+// First thing we always do is include the ANTLR3 generated files, which
+// will automatically include the antlr3 runtime header files.
+// The compiler must use -I (or set the project settings in VS2005)
+// to locate the antlr3 runtime files and -I. to find this file
+#include <antlr3.hpp>
+
+// Forward declaration for Lexer&Parser class(es)
+namespace Antlr3Test {
+	class S1Lexer;
+	class S1Parser;
+
+	class t001lexer;
+	class t002lexer;
+	class t003lexer;
+	class t004lexer;
+	class t005lexer;
+	class t006lexer;
+	class t007lexer;
+	class t008lexer;
+	class t009lexer;
+	class t010lexer;
+	class t011lexer;
+	class t012lexerXMLLexer;
+	class t051lexer;
+
+	class t039labelsLexer;
+	class t039labelsParser;
+};
+
+namespace Antlr3Test {
+
+	//code for overriding
+	template<class ImplTraits>
+	class UserTraits : public antlr3::CustomTraitsBase<ImplTraits>
+	{
+	public:
+	};
+
+	// Even Lexer only samples need some Parser class as a template parameter
+	class NoParser {
+	};
+		
+	// Instantiate the Traits class(will be used for Lexer/Parser template instantiations)
+	typedef antlr3::Traits<S1Lexer, S1Parser, UserTraits> S1LexerTraits;
+	typedef antlr3::Traits<S1Lexer, S1Parser, UserTraits> S1ParserTraits;
+
+	typedef antlr3::Traits<t001lexer, NoParser, UserTraits> t001lexerTraits;
+	typedef antlr3::Traits<t002lexer, NoParser, UserTraits> t002lexerTraits;
+	typedef antlr3::Traits<t003lexer, NoParser, UserTraits> t003lexerTraits;
+	typedef antlr3::Traits<t004lexer, NoParser, UserTraits> t004lexerTraits;
+	typedef antlr3::Traits<t005lexer, NoParser, UserTraits> t005lexerTraits;
+	typedef antlr3::Traits<t006lexer, NoParser, UserTraits> t006lexerTraits;
+	typedef antlr3::Traits<t007lexer, NoParser, UserTraits> t007lexerTraits;
+	typedef antlr3::Traits<t008lexer, NoParser, UserTraits> t008lexerTraits;
+	typedef antlr3::Traits<t009lexer, NoParser, UserTraits> t009lexerTraits;
+	typedef antlr3::Traits<t010lexer, NoParser, UserTraits> t010lexerTraits;
+	typedef antlr3::Traits<t011lexer, NoParser, UserTraits> t011lexerTraits;
+	typedef antlr3::Traits<t012lexerXMLLexer, NoParser, UserTraits> t012lexerXMLLexerTraits;
+	typedef antlr3::Traits<t051lexer, NoParser, UserTraits> t051lexerTraits;
+
+	typedef antlr3::Traits<t039labelsLexer, t039labelsParser, UserTraits> t039labelsLexerTraits;
+	typedef t039labelsLexerTraits t039labelsParserTraits;
+};
+
+#endif
diff --git a/runtime/Cpp/tests/t001.cpp b/runtime/Cpp/tests/t001.cpp
new file mode 100644
index 0000000..453a9b0
--- /dev/null
+++ b/runtime/Cpp/tests/t001.cpp
@@ -0,0 +1,96 @@
+#include "UserTestTraits.hpp"
+#include "t001lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testIteratorInterface(string const& data);
+int testMalformedInput(string const& data);
+
+static    t001lexer*		    lxr;
+
+int main (int argc, char *argv[])
+{
+	testValid("0");
+	testIteratorInterface("0");
+	testMalformedInput("1");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t001lexerTraits::InputStreamType* input	= new t001lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t001");
+	if (lxr == NULL)
+		lxr = new t001lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	t001lexerTraits::CommonTokenType *token0 = lxr->nextToken();
+	t001lexerTraits::CommonTokenType *token1 = lxr->nextToken();
+
+	std::cout << token0->getText() << std::endl;
+	std::cout << token1->getText() << std::endl;
+	
+	delete lxr; lxr = NULL;
+	delete input;
+	return 0;
+}
+
+int testIteratorInterface(string const& data)
+{
+	t001lexerTraits::InputStreamType* input	= new t001lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t001");
+	if (lxr == NULL)
+		lxr = new t001lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testIteratorInterface: \"" << data << '"' <<std::endl;
+
+	t001lexerTraits::TokenStreamType *tstream = new t001lexerTraits::TokenStreamType(ANTLR_SIZE_HINT, lxr->get_tokSource());
+	t001lexerTraits::CommonTokenType const *token0 = tstream->_LT(1);
+	t001lexerTraits::CommonTokenType const *token1 = tstream->_LT(2);
+
+	std::cout << token0->getText() << std::endl;
+	std::cout << token1->getText() << std::endl;
+
+	delete tstream;
+	delete lxr; lxr = NULL;
+	delete input;
+	return 0;
+}
+
+int testMalformedInput(string const& data)
+{
+	t001lexerTraits::InputStreamType* input	= new t001lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t001");
+	if (lxr == NULL)
+		lxr = new t001lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput: \"" << data << '"' <<std::endl;
+
+	t001lexerTraits::CommonTokenType *token0 = lxr->nextToken();
+	std::cout << token0->getText() << std::endl;
+	
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
diff --git a/runtime/Cpp/tests/t001lexer.g b/runtime/Cpp/tests/t001lexer.g
new file mode 100644
index 0000000..a9594d4
--- /dev/null
+++ b/runtime/Cpp/tests/t001lexer.g
@@ -0,0 +1,13 @@
+lexer grammar t001lexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+ZERO: '0';
diff --git a/runtime/Cpp/tests/t002.cpp b/runtime/Cpp/tests/t002.cpp
new file mode 100644
index 0000000..776a4dc
--- /dev/null
+++ b/runtime/Cpp/tests/t002.cpp
@@ -0,0 +1,107 @@
+#include "UserTestTraits.hpp"
+#include "t002lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testIteratorInterface(string const& data);
+int testMalformedInput(string const& data);
+
+static t002lexer *lxr;
+static t002lexerTokens::Tokens ExpectedTokens[] =
+  {
+    t002lexerTokens::ZERO,
+    t002lexerTokens::ONE,
+    t002lexerTokens::EOF_TOKEN
+  };
+
+int main (int argc, char *argv[])
+{
+	testValid("01");
+	testIteratorInterface("01");
+	testMalformedInput("2");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t002lexerTraits::InputStreamType* input	= new t002lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t002");
+	if (lxr == NULL)
+		lxr = new t002lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	for(unsigned i = 0; i <= 2 ; i++)
+	{
+		// nextToken does not allocate any new Token instance(the same instance is returned again and again)
+		t002lexerTraits::CommonTokenType *token = lxr->nextToken();
+		std::cout << token->getText() << '\t'
+			  << (token->getType() == ExpectedTokens[i] ? "OK" : "Fail")
+			  << std::endl;
+		
+	}
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testIteratorInterface(string const& data)
+{
+	t002lexerTraits::InputStreamType* input	= new t002lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t002");
+	if (lxr == NULL)
+		lxr = new t002lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testIteratorInterface: \"" << data << '"' <<std::endl;
+	
+	t002lexerTraits::TokenStreamType *tstream = new t002lexerTraits::TokenStreamType(ANTLR_SIZE_HINT, lxr->get_tokSource());
+	t002lexerTraits::CommonTokenType const *token0 = tstream->_LT(1);
+	t002lexerTraits::CommonTokenType const *token1 = tstream->_LT(2);
+	t002lexerTraits::CommonTokenType const *token2 = tstream->_LT(3);
+
+	std::cout << token0->getText() << std::endl;
+	std::cout << token1->getText() << std::endl;
+	std::cout << token2->getText() << std::endl;
+
+	delete tstream;
+	delete lxr; lxr = NULL;
+	delete input;
+	return 0;
+}
+
+int testMalformedInput(string const& data)
+{
+	t002lexerTraits::InputStreamType* input	= new t002lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t002");
+	if (lxr == NULL)
+		lxr = new t002lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput: \"" << data << '"' <<std::endl;
+	
+	t002lexerTraits::CommonTokenType *token0 = lxr->nextToken();
+	std::cout << token0->getText() << std::endl;
+	
+	delete lxr; lxr = NULL;
+	delete input;
+	return 0;
+}
diff --git a/runtime/Cpp/tests/t002lexer.g b/runtime/Cpp/tests/t002lexer.g
new file mode 100644
index 0000000..c367547
--- /dev/null
+++ b/runtime/Cpp/tests/t002lexer.g
@@ -0,0 +1,14 @@
+lexer grammar t002lexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+ZERO: '0';
+ONE: '1';
diff --git a/runtime/Cpp/tests/t003.cpp b/runtime/Cpp/tests/t003.cpp
new file mode 100644
index 0000000..e7a163a
--- /dev/null
+++ b/runtime/Cpp/tests/t003.cpp
@@ -0,0 +1,119 @@
+#include "UserTestTraits.hpp"
+#include "t003lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testIteratorInterface(string const& data);
+int testMalformedInput(string const& data);
+
+static t003lexer *lxr;
+
+struct TokenData
+{
+	t003lexerTokens::Tokens type;
+	//unsigned start;
+	//unsigned stop;
+	//const char* text;
+};
+
+static TokenData ExpectedTokens[] =
+{
+	{ t003lexerTokens::ZERO      },
+	{ t003lexerTokens::FOOZE     },
+	{ t003lexerTokens::ONE       },
+	{ t003lexerTokens::EOF_TOKEN }
+};
+
+int main (int argc, char *argv[])
+{
+	testValid("0fooze1");
+	testIteratorInterface("0fooze1");
+	testMalformedInput("2");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t003lexerTraits::InputStreamType* input	= new t003lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t003");
+	if (lxr == NULL)
+		lxr = new t003lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	for(unsigned i = 0; i < sizeof(ExpectedTokens)/sizeof(TokenData) ; i++)
+	{
+		// nextToken does not allocate any new Token instance(the same instance is returned again and again)
+		t003lexerTraits::CommonTokenType *token = lxr->nextToken();
+		std::cout << token->getText() << '\t'
+			  << (token->getType() == ExpectedTokens[i].type ? "OK" : "Fail")
+			  << std::endl;
+		
+	}
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testIteratorInterface(string const& data)
+{
+	t003lexerTraits::InputStreamType* input	= new t003lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t003");
+	if (lxr == NULL)
+		lxr = new t003lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testIteratorInterface: \"" << data << '"' <<std::endl;
+		
+	t003lexerTraits::TokenStreamType *tstream = new t003lexerTraits::TokenStreamType(ANTLR_SIZE_HINT, lxr->get_tokSource());
+	t003lexerTraits::CommonTokenType const *token0 = tstream->_LT(1);
+	t003lexerTraits::CommonTokenType const *token1 = tstream->_LT(2);
+	t003lexerTraits::CommonTokenType const *token2 = tstream->_LT(3);
+	t003lexerTraits::CommonTokenType const *token3 = tstream->_LT(4);
+
+	std::cout << token0->getText() << std::endl;
+	std::cout << token1->getText() << std::endl;
+	std::cout << token2->getText() << std::endl;
+	std::cout << token3->getText() << std::endl;
+
+	delete tstream;
+	delete lxr; lxr = NULL;
+	delete input;
+	return 0;
+}
+
+int testMalformedInput(string const& data)
+{
+	t003lexerTraits::InputStreamType* input	= new t003lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t003");
+	if (lxr == NULL)
+		lxr = new t003lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput: \"" << data << '"' <<std::endl;
+	
+	t003lexerTraits::CommonTokenType *token0 = lxr->nextToken();
+	std::cout << token0->getText() << std::endl;
+	
+	delete lxr; lxr = NULL;
+	delete input;
+	return 0;
+}
diff --git a/runtime/Cpp/tests/t003lexer.g b/runtime/Cpp/tests/t003lexer.g
new file mode 100644
index 0000000..72e671b
--- /dev/null
+++ b/runtime/Cpp/tests/t003lexer.g
@@ -0,0 +1,15 @@
+lexer grammar t003lexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+ZERO: '0';
+ONE: '1';
+FOOZE: 'fooze';
diff --git a/runtime/Cpp/tests/t004.cpp b/runtime/Cpp/tests/t004.cpp
new file mode 100644
index 0000000..fc0ecd3
--- /dev/null
+++ b/runtime/Cpp/tests/t004.cpp
@@ -0,0 +1,100 @@
+#include "UserTestTraits.hpp"
+#include "t004lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testMalformedInput(string const& data);
+
+static t004lexer *lxr;
+
+struct TokenData
+{
+	t004lexerTokens::Tokens type;
+	unsigned start;
+	unsigned stop;
+	const char* text;
+};
+
+static TokenData ExpectedTokens[] =
+{
+	{ t004lexerTokens::FOO, 0, 0, "f"},
+	{ t004lexerTokens::FOO, 1, 2, "fo"},
+	{ t004lexerTokens::FOO, 3, 5, "foo"},
+	{ t004lexerTokens::FOO, 6, 9, "fooo"}
+};
+
+int main (int argc, char *argv[])
+{
+	testValid("ffofoofooo");
+	testMalformedInput("2");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t004lexerTraits::InputStreamType* input	= new t004lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t004");
+	if (lxr == NULL)
+		lxr = new t004lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	std::cout << "Text:"  << '\t'
+		  << "Type:"  << '\t'
+		  << "Start:" << '\t'
+		  << "Stop:"  << '\t'
+		  << "Text:"  << '\t' << std::endl;
+	
+	for(unsigned i = 0; i < sizeof(ExpectedTokens)/sizeof(TokenData) ; i++)
+	{
+		// nextToken does not allocate any new Token instance(the same instance is returned again and again)
+		t004lexerTraits::CommonTokenType *token = lxr->nextToken();
+
+		size_t startIndex = ((const char*)token->get_startIndex()) - data.c_str();
+		size_t stopIndex = ((const char*)token->get_stopIndex()) - data.c_str();
+
+		std::cout << token->getText()
+			  << '\t' << (token->getType()       == ExpectedTokens[i].type ?  "OK" : "Fail")
+			  << '\t' << (startIndex == ExpectedTokens[i].start ? "OK" : "Fail")
+			  << '\t' << (stopIndex  == ExpectedTokens[i].stop ?  "OK" : "Fail")
+			  << '\t' << (token->getText()       == ExpectedTokens[i].text ?  "OK" : "Fail")
+			  << std::endl;
+		
+	}
+	delete lxr; lxr = NULL;
+	delete input;
+	return 0;
+}
+
+int testMalformedInput(string const& data)
+{
+	t004lexerTraits::InputStreamType* input	= new t004lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t004");
+	if (lxr == NULL)
+		lxr = new t004lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput: \"" << data << '"' <<std::endl;
+	
+	t004lexerTraits::CommonTokenType *token0 = lxr->nextToken();
+	std::cout << token0->getText() << std::endl;
+	
+	delete lxr; lxr = NULL;
+	delete input;
+	return 0;
+}
diff --git a/runtime/Cpp/tests/t004lexer.g b/runtime/Cpp/tests/t004lexer.g
new file mode 100644
index 0000000..a16e407
--- /dev/null
+++ b/runtime/Cpp/tests/t004lexer.g
@@ -0,0 +1,16 @@
+lexer grammar t004lexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+FOO: 'f' f=OO;
+
+fragment
+OO: 'o'*;
diff --git a/runtime/Cpp/tests/t005.cpp b/runtime/Cpp/tests/t005.cpp
new file mode 100644
index 0000000..7484630
--- /dev/null
+++ b/runtime/Cpp/tests/t005.cpp
@@ -0,0 +1,131 @@
+#include "UserTestTraits.hpp"
+#include "t005lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testMalformedInput1(string const& data);
+int testMalformedInput2(string const& data);
+
+static t005lexer *lxr;
+
+struct TokenData
+{
+	t005lexerTokens::Tokens type;
+	unsigned start;
+	unsigned stop;
+	const char* text;
+};
+
+static TokenData ExpectedTokens[] =
+{
+	// "fofoofooo"
+	{ t005lexerTokens::FOO, 0, 1, "fo"},
+	{ t005lexerTokens::FOO, 2, 4, "foo"},
+	{ t005lexerTokens::FOO, 5, 8, "fooo"},
+	{ t005lexerTokens::EOF_TOKEN, 9, 9, "<EOF>"}
+};
+
+int main (int argc, char *argv[])
+{
+	testValid("fofoofooo");
+	testMalformedInput1("2");
+	testMalformedInput2("f");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t005lexerTraits::InputStreamType* input	= new t005lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t005");
+	if (lxr == NULL)
+		lxr = new t005lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	std::cout << "Text:"  << '\t'
+		  << "Type:"  << '\t'
+		  << "Start:" << '\t'
+		  << "Stop:"  << '\t'
+		  << "Text:"  << '\t' << std::endl;
+	
+	for(unsigned i = 0; i < sizeof(ExpectedTokens)/sizeof(TokenData) ; i++)
+	{
+		// nextToken does not allocate any new Token instance(the same instance is returned again and again)
+		t005lexerTraits::CommonTokenType *token = lxr->nextToken();
+
+		size_t startIndex = ((const char*)token->get_startIndex()) - data.c_str();
+		size_t stopIndex = ((const char*)token->get_stopIndex()) - data.c_str();
+
+		std::cout << token->getText()
+			  << '\t' << (token->getType()       == ExpectedTokens[i].type ?  "OK" : "Fail")
+			  << '\t' << (startIndex == ExpectedTokens[i].start ? "OK" : "Fail")
+			  << '\t' << (stopIndex  == ExpectedTokens[i].stop ?  "OK" : "Fail")
+			  << '\t' << (token->getText()       == ExpectedTokens[i].text ?  "OK" : "Fail")
+			  << std::endl;
+		
+	}
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testMalformedInput1(string const& data)
+{
+	t005lexerTraits::InputStreamType* input	= new t005lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t005");
+	if (lxr == NULL)
+		lxr = new t005lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput1: \"" << data << '"' <<std::endl;
+	
+	t005lexerTraits::CommonTokenType *token0 = lxr->nextToken();
+	std::cout << token0->getText() << std::endl;
+
+	//except antlr3.MismatchedTokenException as exc:
+	//self.assertEqual(exc.expecting, 'f')
+	//self.assertEqual(exc.unexpectedType, '2')
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testMalformedInput2(string const& data)
+{
+	t005lexerTraits::InputStreamType* input	= new t005lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t005");
+	if (lxr == NULL)
+		lxr = new t005lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput2: \"" << data << '"' <<std::endl;
+	
+	t005lexerTraits::CommonTokenType *token0 = lxr->nextToken();
+	std::cout << token0->getText() << std::endl;
+
+	//except antlr3.EarlyExitException as exc:
+	//self.assertEqual(exc.unexpectedType, antlr3.EOF)
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
diff --git a/runtime/Cpp/tests/t005lexer.g b/runtime/Cpp/tests/t005lexer.g
new file mode 100644
index 0000000..64e856e
--- /dev/null
+++ b/runtime/Cpp/tests/t005lexer.g
@@ -0,0 +1,13 @@
+lexer grammar t005lexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+FOO: 'f' 'o'+;
diff --git a/runtime/Cpp/tests/t006.cpp b/runtime/Cpp/tests/t006.cpp
new file mode 100644
index 0000000..38c6d2f
--- /dev/null
+++ b/runtime/Cpp/tests/t006.cpp
@@ -0,0 +1,111 @@
+#include "UserTestTraits.hpp"
+#include "t006lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testMalformedInput(string const& data);
+
+static t006lexer *lxr;
+
+struct TokenData
+{
+	t006lexerTokens::Tokens type;
+	unsigned start;
+	unsigned stop;
+	const char* text;
+};
+
+static TokenData ExpectedTokens[] =
+{
+	// "fofaaooa"
+	{ t006lexerTokens::FOO, 0, 1, "fo"},
+	{ t006lexerTokens::FOO, 2, 7, "faaooa"},
+	{ t006lexerTokens::EOF_TOKEN, 8, 8, "<EOF>"}
+};
+
+int main (int argc, char *argv[])
+{
+	testValid("fofaaooa");
+	testMalformedInput("fofoaooaoa2");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t006lexerTraits::InputStreamType* input	= new t006lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t006");
+	if (lxr == NULL)
+		lxr = new t006lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	std::cout << "Text:"  << '\t'
+		  << "Type:"  << '\t'
+		  << "Start:" << '\t'
+		  << "Stop:"  << '\t'
+		  << "Text:"  << '\t' << std::endl;
+	
+	for(unsigned i = 0; i < sizeof(ExpectedTokens)/sizeof(TokenData) ; i++)
+	{
+		// nextToken does not allocate any new Token instance(the same instance is returned again and again)
+		t006lexerTraits::CommonTokenType *token = lxr->nextToken();
+
+		size_t startIndex = ((const char*)token->get_startIndex()) - data.c_str();
+		size_t stopIndex = ((const char*)token->get_stopIndex()) - data.c_str();
+
+		std::cout << token->getText()
+			  << '\t' << (token->getType()       == ExpectedTokens[i].type ?  "OK" : "Fail")
+			  << '\t' << (startIndex == ExpectedTokens[i].start ? "OK" : "Fail")
+			  << '\t' << (stopIndex  == ExpectedTokens[i].stop ?  "OK" : "Fail")
+			  << '\t' << (token->getText()       == ExpectedTokens[i].text ?  "OK" : "Fail")
+			  << std::endl;
+		
+	}
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testMalformedInput(string const& data)
+{
+	t006lexerTraits::InputStreamType* input	= new t006lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t006");
+	if (lxr == NULL)
+		lxr = new t006lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput: \"" << data << '"' <<std::endl;
+	
+	t006lexerTraits::CommonTokenType *token;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+
+    //except antlr3.MismatchedTokenException as exc:
+    //self.assertEqual(exc.expecting, 'f')
+    //self.assertEqual(exc.unexpectedType, '2')
+    //self.assertEqual(exc.charPositionInLine, 10)
+    //self.assertEqual(exc.line, 1)
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
diff --git a/runtime/Cpp/tests/t006lexer.g b/runtime/Cpp/tests/t006lexer.g
new file mode 100644
index 0000000..ce7a9a2
--- /dev/null
+++ b/runtime/Cpp/tests/t006lexer.g
@@ -0,0 +1,13 @@
+lexer grammar t006lexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+FOO: 'f' ('o' | 'a')*;
diff --git a/runtime/Cpp/tests/t007.cpp b/runtime/Cpp/tests/t007.cpp
new file mode 100644
index 0000000..dc9b5a3
--- /dev/null
+++ b/runtime/Cpp/tests/t007.cpp
@@ -0,0 +1,105 @@
+#include "UserTestTraits.hpp"
+#include "t007lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testMalformedInput(string const& data);
+
+static t007lexer *lxr;
+
+struct TokenData
+{
+	t007lexerTokens::Tokens type;
+	unsigned start;
+	unsigned stop;
+	const char* text;
+};
+
+static TokenData ExpectedTokens[] =
+{
+	// "fofababbooabb"
+	{ t007lexerTokens::FOO, 0, 1, "fo"},
+	{ t007lexerTokens::FOO, 2, 12, "fababbooabb"},
+	{ t007lexerTokens::EOF_TOKEN, 13, 13, "<EOF>"}
+};
+
+int main (int argc, char *argv[])
+{
+	testValid("fofababbooabb");
+	testMalformedInput("foaboao");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t007lexerTraits::InputStreamType* input	= new t007lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t007");
+	if (lxr == NULL)
+		lxr = new t007lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	std::cout << "Text:"  << '\t'
+		  << "Type:"  << '\t'
+		  << "Start:" << '\t'
+		  << "Stop:"  << '\t'
+		  << "Text:"  << '\t' << std::endl;
+	
+	for(unsigned i = 0; i < sizeof(ExpectedTokens)/sizeof(TokenData) ; i++)
+	{
+		// nextToken does not allocate any new Token instance(the same instance is returned again and again)
+		t007lexerTraits::CommonTokenType *token = lxr->nextToken();
+
+		size_t startIndex = ((const char*)token->get_startIndex()) - data.c_str();
+		size_t stopIndex = ((const char*)token->get_stopIndex()) - data.c_str();
+
+		std::cout << token->getText()
+			  << '\t' << (token->getType()       == ExpectedTokens[i].type ?  "OK" : "Fail")
+			  << '\t' << (startIndex == ExpectedTokens[i].start ? "OK" : "Fail")
+			  << '\t' << (stopIndex  == ExpectedTokens[i].stop ?  "OK" : "Fail")
+			  << '\t' << (token->getText()       == ExpectedTokens[i].text ?  "OK" : "Fail")
+			  << std::endl;
+		
+	}
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testMalformedInput(string const& data)
+{
+	t007lexerTraits::InputStreamType* input	= new t007lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t007");
+	if (lxr == NULL)
+		lxr = new t007lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput: \"" << data << '"' <<std::endl;
+	
+	t007lexerTraits::CommonTokenType *token0 = lxr->nextToken();
+	std::cout << token0->getText() << std::endl;
+
+    //except antlr3.EarlyExitException as exc:
+    //   self.assertEqual(exc.unexpectedType, 'o')
+    //   self.assertEqual(exc.charPositionInLine, 6)
+    //   self.assertEqual(exc.line, 1)
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
diff --git a/runtime/Cpp/tests/t007lexer.g b/runtime/Cpp/tests/t007lexer.g
new file mode 100644
index 0000000..1eac531
--- /dev/null
+++ b/runtime/Cpp/tests/t007lexer.g
@@ -0,0 +1,13 @@
+lexer grammar t007lexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+FOO: 'f' ('o' | 'a' 'b'+)*;
diff --git a/runtime/Cpp/tests/t008.cpp b/runtime/Cpp/tests/t008.cpp
new file mode 100644
index 0000000..fbb541b
--- /dev/null
+++ b/runtime/Cpp/tests/t008.cpp
@@ -0,0 +1,111 @@
+#include "UserTestTraits.hpp"
+#include "t008lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testMalformedInput(string const& data);
+
+static t008lexer *lxr;
+
+struct TokenData
+{
+	t008lexerTokens::Tokens type;
+	unsigned start;
+	unsigned stop;
+	const char* text;
+};
+
+static TokenData ExpectedTokens[] =
+{
+	// "ffaf"
+	{ t008lexerTokens::FOO, 0, 0, "f"},
+	{ t008lexerTokens::FOO, 1, 2, "fa"},
+	{ t008lexerTokens::FOO, 3, 3, "f"},
+	{ t008lexerTokens::EOF_TOKEN, 4, 4, "<EOF>"}
+};
+
+int main (int argc, char *argv[])
+{
+	testValid("ffaf");
+	testMalformedInput("fafb");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t008lexerTraits::InputStreamType* input	= new t008lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t008");
+	if (lxr == NULL)
+		lxr = new t008lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	std::cout << "Text:"  << '\t'
+		  << "Type:"  << '\t'
+		  << "Start:" << '\t'
+		  << "Stop:"  << '\t'
+		  << "Text:"  << '\t' << std::endl;
+	
+	for(unsigned i = 0; i < sizeof(ExpectedTokens)/sizeof(TokenData) ; i++)
+	{
+		// nextToken does not allocate any new Token instance(the same instance is returned again and again)
+		t008lexerTraits::CommonTokenType *token = lxr->nextToken();
+
+		size_t startIndex = ((const char*)token->get_startIndex()) - data.c_str();
+		size_t stopIndex = ((const char*)token->get_stopIndex()) - data.c_str();
+
+		std::cout << token->getText()
+			  << '\t' << (token->getType()       == ExpectedTokens[i].type ?  "OK" : "Fail")
+			  << '\t' << (startIndex == ExpectedTokens[i].start ? "OK" : "Fail")
+			  << '\t' << (stopIndex  == ExpectedTokens[i].stop ?  "OK" : "Fail")
+			  << '\t' << (token->getText()       == ExpectedTokens[i].text ?  "OK" : "Fail")
+			  << std::endl;
+		
+	}
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testMalformedInput(string const& data)
+{
+	t008lexerTraits::InputStreamType* input	= new t008lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t008");
+	if (lxr == NULL)
+		lxr = new t008lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput: \"" << data << '"' <<std::endl;
+	
+	t008lexerTraits::CommonTokenType *token;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+
+	//except antlr3.MismatchedTokenException as exc:
+    //   self.assertEqual(exc.unexpectedType, 'b')
+    //   self.assertEqual(exc.charPositionInLine, 3)
+    //   self.assertEqual(exc.line, 1)
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
diff --git a/runtime/Cpp/tests/t008lexer.g b/runtime/Cpp/tests/t008lexer.g
new file mode 100644
index 0000000..0294155
--- /dev/null
+++ b/runtime/Cpp/tests/t008lexer.g
@@ -0,0 +1,13 @@
+lexer grammar t008lexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+FOO: 'f' 'a'?;
diff --git a/runtime/Cpp/tests/t009.cpp b/runtime/Cpp/tests/t009.cpp
new file mode 100644
index 0000000..75b2fca
--- /dev/null
+++ b/runtime/Cpp/tests/t009.cpp
@@ -0,0 +1,111 @@
+#include "UserTestTraits.hpp"
+#include "t009lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testMalformedInput(string const& data);
+
+static t009lexer *lxr;
+
+struct TokenData
+{
+	t009lexerTokens::Tokens type;
+	unsigned start;
+	unsigned stop;
+	const char* text;
+};
+
+static TokenData ExpectedTokens[] =
+{
+	// "085"
+	{ t009lexerTokens::DIGIT, 0, 0, "0"},
+	{ t009lexerTokens::DIGIT, 1, 1, "8"},
+	{ t009lexerTokens::DIGIT, 2, 2, "5"},
+	{ t009lexerTokens::EOF_TOKEN, 3, 3, "<EOF>"}
+};
+
+int main (int argc, char *argv[])
+{
+	testValid("085"); 
+	testMalformedInput("2a");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t009lexerTraits::InputStreamType* input	= new t009lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t009");
+	if (lxr == NULL)
+		lxr = new t009lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	std::cout << "Text:"  << '\t'
+		  << "Type:"  << '\t'
+		  << "Start:" << '\t'
+		  << "Stop:"  << '\t'
+		  << "Text:"  << '\t' << std::endl;
+	
+	for(unsigned i = 0; i < sizeof(ExpectedTokens)/sizeof(TokenData) ; i++)
+	{
+		// nextToken does not allocate any new Token instance(the same instance is returned again and again)
+		t009lexerTraits::CommonTokenType *token = lxr->nextToken();
+
+		size_t startIndex = ((const char*)token->get_startIndex()) - data.c_str();
+		size_t stopIndex = ((const char*)token->get_stopIndex()) - data.c_str();
+
+		std::cout << token->getText()
+			  << '\t' << (token->getType()       == ExpectedTokens[i].type ?  "OK" : "Fail")
+			  << '\t' << (startIndex == ExpectedTokens[i].start ? "OK" : "Fail")
+			  << '\t' << (stopIndex  == ExpectedTokens[i].stop ?  "OK" : "Fail")
+			  << '\t' << (token->getText()       == ExpectedTokens[i].text ?  "OK" : "Fail")
+			  << std::endl;
+		
+	}
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testMalformedInput(string const& data)
+{
+	t009lexerTraits::InputStreamType* input	= new t009lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t009");
+	if (lxr == NULL)
+		lxr = new t009lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput: \"" << data << '"' <<std::endl;
+	
+	t009lexerTraits::CommonTokenType *token;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+
+	//except antlr3.MismatchedSetException as exc:
+	//   # TODO: This should provide more useful information
+	//   self.assertIsNone(exc.expecting)
+	//   self.assertEqual(exc.unexpectedType, 'a')
+	//   self.assertEqual(exc.charPositionInLine, 1)
+	//   self.assertEqual(exc.line, 1)
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
diff --git a/runtime/Cpp/tests/t009lexer.g b/runtime/Cpp/tests/t009lexer.g
new file mode 100644
index 0000000..928f97b
--- /dev/null
+++ b/runtime/Cpp/tests/t009lexer.g
@@ -0,0 +1,13 @@
+lexer grammar t009lexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+DIGIT: '0' .. '9';
diff --git a/runtime/Cpp/tests/t010.cpp b/runtime/Cpp/tests/t010.cpp
new file mode 100644
index 0000000..0b9fca7
--- /dev/null
+++ b/runtime/Cpp/tests/t010.cpp
@@ -0,0 +1,111 @@
+#include "UserTestTraits.hpp"
+#include "t010lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testMalformedInput(string const& data);
+
+static t010lexer *lxr;
+
+struct TokenData
+{
+	t010lexerTokens::Tokens type;
+	unsigned start;
+	unsigned stop;
+	const char* text;
+};
+
+static TokenData ExpectedTokens[] =
+{
+	// "foobar _Ab98 \n A12sdf"
+	{ t010lexerTokens::IDENTIFIER,  0,   5, "foobar"},
+	{ t010lexerTokens::WS,          6,   6, " "},
+	{ t010lexerTokens::IDENTIFIER,  7,  11, "_Ab98"},
+	{ t010lexerTokens::WS,         12, 14, " \n "},
+	{ t010lexerTokens::IDENTIFIER, 15, 20, "A12sdf"},
+	{ t010lexerTokens::EOF_TOKEN,  21, 21, "<EOF>"}
+};
+
+int main (int argc, char *argv[])
+{
+	testValid("foobar _Ab98 \n A12sdf");
+	testMalformedInput("a-b");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t010lexerTraits::InputStreamType* input	= new t010lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t010");
+	if (lxr == NULL)
+		lxr = new t010lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	std::cout << "Text:"  << '\t'
+		  << "Type:"  << '\t'
+		  << "Start:" << '\t'
+		  << "Stop:"  << '\t'
+		  << "Text:"  << '\t' << std::endl;
+	
+	for(unsigned i = 0; i < sizeof(ExpectedTokens)/sizeof(TokenData) ; i++)
+	{
+		// nextToken does not allocate any new Token instance(the same instance is returned again and again)
+		t010lexerTraits::CommonTokenType *token = lxr->nextToken();
+
+		size_t startIndex = ((const char*)token->get_startIndex()) - data.c_str();
+		size_t stopIndex = ((const char*)token->get_stopIndex()) - data.c_str();
+
+		std::cout << token->getText()
+			  << '\t' << (token->getType()       == ExpectedTokens[i].type ?  "OK" : "Fail")
+			  << '\t' << (startIndex == ExpectedTokens[i].start ? "OK" : "Fail")
+			  << '\t' << (stopIndex  == ExpectedTokens[i].stop ?  "OK" : "Fail")
+			  << '\t' << (token->getText()       == ExpectedTokens[i].text ?  "OK" : "Fail")
+			  << std::endl;
+		
+	}
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testMalformedInput(string const& data)
+{
+	t010lexerTraits::InputStreamType* input	= new t010lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t010");
+	if (lxr == NULL)
+		lxr = new t010lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput: \"" << data << '"' <<std::endl;
+	
+	t010lexerTraits::CommonTokenType *token;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+
+	//except antlr3.NoViableAltException as exc:
+	//    self.assertEqual(exc.unexpectedType, '-')
+	//    self.assertEqual(exc.charPositionInLine, 1)
+	//    self.assertEqual(exc.line, 1)
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
diff --git a/runtime/Cpp/tests/t010lexer.g b/runtime/Cpp/tests/t010lexer.g
new file mode 100644
index 0000000..bb5a53e
--- /dev/null
+++ b/runtime/Cpp/tests/t010lexer.g
@@ -0,0 +1,14 @@
+lexer grammar t010lexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
+WS: (' ' | '\n')+;
diff --git a/runtime/Cpp/tests/t011.cpp b/runtime/Cpp/tests/t011.cpp
new file mode 100644
index 0000000..d8e40c7
--- /dev/null
+++ b/runtime/Cpp/tests/t011.cpp
@@ -0,0 +1,113 @@
+#include "UserTestTraits.hpp"
+#include "t011lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testMalformedInput(string const& data);
+
+static t011lexer *lxr;
+
+struct TokenData
+{
+	t011lexerTokens::Tokens type;
+	unsigned start;
+	unsigned stop;
+	const char* text;
+};
+
+static TokenData ExpectedTokens[] =
+{
+	// "foobar _Ab98 \n A12sdf"
+	{ t011lexerTokens::IDENTIFIER,  0,   5, "foobar"},
+	{ t011lexerTokens::WS,          6,   6, " "},
+	{ t011lexerTokens::IDENTIFIER,  7,  11, "_Ab98"},
+	{ t011lexerTokens::WS,         12, 14, " \n "},
+	{ t011lexerTokens::IDENTIFIER, 15, 20, "A12sdf"},
+	{ t011lexerTokens::EOF_TOKEN,  21, 21, "<EOF>"}
+};
+
+int main (int argc, char *argv[])
+{
+	testValid("foobar _Ab98 \n A12sdf");
+	testMalformedInput("a-b");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t011lexerTraits::InputStreamType* input	= new t011lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t011");
+	if (lxr == NULL)
+		lxr = new t011lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	std::cout << "Text:"  << '\t'
+		  << "Type:"  << '\t'
+		  << "Start:" << '\t'
+		  << "Stop:"  << '\t'
+		  << "Text:"  << '\t' << std::endl;
+	
+	for(unsigned i = 0; i < sizeof(ExpectedTokens)/sizeof(TokenData) ; i++)
+	{
+		// nextToken does not allocate any new Token instance(the same instance is returned again and again)
+		t011lexerTraits::CommonTokenType *token = lxr->nextToken();
+
+		size_t startIndex = ((const char*)token->get_startIndex()) - data.c_str();
+		size_t stopIndex = ((const char*)token->get_stopIndex()) - data.c_str();
+
+		std::cout << token->getText()
+			  << '\t' << (token->getType()       == ExpectedTokens[i].type ?  "OK" : "Fail")
+			  << '\t' << (startIndex == ExpectedTokens[i].start ? "OK" : "Fail")
+			  << '\t' << (stopIndex  == ExpectedTokens[i].stop ?  "OK" : "Fail")
+			  << '\t' << (token->getText()       == ExpectedTokens[i].text ?  "OK" : "Fail")
+			  << std::endl;
+		
+	}
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testMalformedInput(string const& data)
+{
+	t011lexerTraits::InputStreamType* input	= new t011lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t011");
+	if (lxr == NULL)
+		lxr = new t011lexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput: \"" << data << '"' <<std::endl;
+	
+	t011lexerTraits::CommonTokenType *token;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+
+	//except antlr3.NoViableAltException as exc:
+	//    self.assertEqual(exc.unexpectedType, '-')
+	//    self.assertEqual(exc.charPositionInLine, 1)
+	//    self.assertEqual(exc.line, 1)
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
diff --git a/runtime/Cpp/tests/t011lexer.g b/runtime/Cpp/tests/t011lexer.g
new file mode 100644
index 0000000..06a8704
--- /dev/null
+++ b/runtime/Cpp/tests/t011lexer.g
@@ -0,0 +1,27 @@
+lexer grammar t011lexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+#include <iostream>
+}
+@lexer::namespace
+{ Antlr3Test }
+
+IDENTIFIER: 
+        ('a'..'z'|'A'..'Z'|'_') 
+        ('a'..'z'
+        |'A'..'Z'
+        |'0'..'9'
+        |'_'
+            { 
+              std::cout << "Underscore";
+              std::cout << "foo";
+            }
+        )*
+    ;
+
+WS: (' ' | '\n')+;
diff --git a/runtime/Cpp/tests/t012.cpp b/runtime/Cpp/tests/t012.cpp
new file mode 100644
index 0000000..fe9d0c9
--- /dev/null
+++ b/runtime/Cpp/tests/t012.cpp
@@ -0,0 +1,187 @@
+#include "UserTestTraits.hpp"
+#include "t012lexerXMLLexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& in, string const& out);
+int testMalformedInput1(string const& data);
+int testMalformedInput2(string const& data);
+int testMalformedInput3(string const& data);
+string slurp(string const& fileName);
+
+static t012lexerXMLLexer *lxr;
+
+int main (int argc, char *argv[])
+{
+	testValid("t012lexerXML.input", "t012lexerXML.output");
+	testMalformedInput1("<?xml version='1.0'?>\n<document d>\n</document>\n");
+	testMalformedInput2("<?tml version='1.0'?>\n<document>\n</document>\n");
+	testMalformedInput3("<?xml version='1.0'?>\n<docu ment attr=\"foo\">\n</document>\n");
+
+	return 0;
+}
+
+int testValid(string const& inFilename, string const& outFilename)
+{
+	string data = slurp(inFilename);
+	t012lexerXMLLexerTraits::InputStreamType* input	= new t012lexerXMLLexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+											   (ANTLR_UINT8*)inFilename.c_str());
+	if (lxr == NULL)
+		lxr = new t012lexerXMLLexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << inFilename << '"' <<std::endl;
+	for(;;)
+	{
+		t012lexerXMLLexerTraits::CommonTokenType *token = lxr->nextToken();
+		if( token->getType() == t012lexerXMLLexerTokens::EOF_TOKEN)
+			break;
+	}
+	
+	string expOutput = slurp(outFilename);
+	string lxrOutput = lxr->outbuf.str();
+
+	ofstream out("t012.lxr.output");
+	out << lxrOutput;
+
+	std::cout << inFilename << '\t' << (expOutput == lxrOutput ?  "OK" : "Fail") << std::endl;
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testMalformedInput1(string const& data)
+{
+	t012lexerXMLLexerTraits::InputStreamType* input	= new t012lexerXMLLexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t012");
+	if (lxr == NULL)
+		lxr = new t012lexerXMLLexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput1: \"" << data << '"' <<std::endl;
+	
+	t012lexerXMLLexerTraits::CommonTokenType *token;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+
+        // try:
+        //     while True:
+        //         token = lexer.nextToken()
+        //         # Should raise NoViableAltException before hitting EOF
+        //         if token.type == antlr3.EOF:
+        //             self.fail()
+	//
+        // except antlr3.NoViableAltException as exc:
+        //     self.assertEqual(exc.unexpectedType, '>')
+        //     self.assertEqual(exc.charPositionInLine, 11)
+        //     self.assertEqual(exc.line, 2)
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testMalformedInput2(string const& data)
+{
+	t012lexerXMLLexerTraits::InputStreamType* input	= new t012lexerXMLLexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t012");
+	if (lxr == NULL)
+		lxr = new t012lexerXMLLexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput2: \"" << data << '"' <<std::endl;
+	
+	t012lexerXMLLexerTraits::CommonTokenType *token;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+
+        // try:
+        //     while True:
+        //         token = lexer.nextToken()
+        //         # Should raise NoViableAltException before hitting EOF
+        //         if token.type == antlr3.EOF:
+        //             self.fail()
+	//
+        // except antlr3.MismatchedSetException as exc:
+        //     self.assertEqual(exc.unexpectedType, 't')
+        //     self.assertEqual(exc.charPositionInLine, 2)
+        //     self.assertEqual(exc.line, 1)
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+int testMalformedInput3(string const& data)
+{
+	t012lexerXMLLexerTraits::InputStreamType* input	= new t012lexerXMLLexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t012");
+	if (lxr == NULL)
+		lxr = new t012lexerXMLLexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testMalformedInput3: \"" << data << '"' <<std::endl;
+	
+	t012lexerXMLLexerTraits::CommonTokenType *token;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+	token = lxr->nextToken();
+	std::cout << token->getText() << std::endl;
+
+        // try:
+        //     while True:
+        //         token = lexer.nextToken()
+        //         # Should raise NoViableAltException before hitting EOF
+        //         if token.type == antlr3.EOF:
+        //             self.fail()
+	//
+        // except antlr3.NoViableAltException as exc:
+        //     self.assertEqual(exc.unexpectedType, 'a')
+        //     self.assertEqual(exc.charPositionInLine, 11)
+        //     self.assertEqual(exc.line, 2)
+
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+ 
+string slurp(string const& fileName)
+{
+	ifstream ifs(fileName.c_str(), ios::in | ios::binary | ios::ate);
+	ifstream::pos_type fileSize = ifs.tellg();
+	ifs.seekg(0, ios::beg);
+
+	stringstream sstr;
+	sstr << ifs.rdbuf();
+	return sstr.str();
+}
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXML.input b/runtime/Cpp/tests/t012lexerXML.input
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t012lexerXML.input
copy to runtime/Cpp/tests/t012lexerXML.input
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXML.output b/runtime/Cpp/tests/t012lexerXML.output
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t012lexerXML.output
copy to runtime/Cpp/tests/t012lexerXML.output
diff --git a/runtime/Cpp/tests/t012lexerXMLLexer.g b/runtime/Cpp/tests/t012lexerXMLLexer.g
new file mode 100644
index 0000000..5edd2a0
--- /dev/null
+++ b/runtime/Cpp/tests/t012lexerXMLLexer.g
@@ -0,0 +1,156 @@
+lexer grammar t012lexerXMLLexer;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+#include <iostream>
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@lexer::context {
+ImplTraits::StringStreamType outbuf;
+
+void output(const char* line)
+{
+    outbuf << line << "\r\n";
+}
+
+void output(const char* line1, const char *line2)
+{
+    outbuf << line1 << line2 << "\r\n";
+}
+
+void output(const char* line1, ImplTraits::StringType const& line2)
+{
+    outbuf << line1 << line2 << "\r\n";
+}
+
+void appendArribute(const char* prefix, ImplTraits::StringType const& name, ImplTraits::StringType const& value)
+{
+    outbuf << prefix << name << '=' << value << "\r\n";
+}
+
+void appendString(const char* name, ImplTraits::StringType const& value)
+{
+    outbuf << name << '"' << value << '"' << "\r\n";
+}
+
+}
+DOCUMENT
+    :  XMLDECL? WS? DOCTYPE? WS? ELEMENT WS? 
+    ;
+
+fragment DOCTYPE
+    :
+        '<!DOCTYPE' WS rootElementName=GENERIC_ID 
+        { output("ROOTELEMENT: ", $rootElementName.text);}
+        WS
+        ( 
+            ( 'SYSTEM' WS sys1=VALUE
+                {output("SYSTEM: ", $sys1.text);}
+                
+            | 'PUBLIC' WS pub=VALUE WS sys2=VALUE
+                {output("PUBLIC: ", $pub.text);}
+                {output("SYSTEM: ", $sys2.text);}   
+            )
+            ( WS )?
+        )?
+        ( dtd=INTERNAL_DTD
+            {output("INTERNAL DTD: ", $dtd.text);}
+        )?
+		'>'
+	;
+
+fragment INTERNAL_DTD : '[' (options {greedy=false;} : .)* ']' ;
+
+fragment PI :
+        '<?' target=GENERIC_ID WS? 
+          {output("PI: ", $target.text);}
+        ( ATTRIBUTE WS? )*  '?>'
+	;
+
+fragment XMLDECL :
+        '<?' ('x'|'X') ('m'|'M') ('l'|'L') WS? 
+          {output("XML declaration");}
+        ( ATTRIBUTE WS? )*  '?>'
+	;
+
+
+fragment ELEMENT
+    : ( START_TAG
+            (ELEMENT
+            | t=PCDATA
+                {appendString("PCDATA: ", $t.text);}
+            | t=CDATA
+                {appendString("CDATA: ", $t.text);}
+            | t=COMMENT
+                {appendString("Comment: ", $t.text);}
+            | pi=PI
+            )*
+            END_TAG
+        | EMPTY_ELEMENT
+        )
+    ;
+
+fragment START_TAG 
+    : '<' WS? name=GENERIC_ID WS?
+          {output("Start Tag: ", $name.text);}
+        ( ATTRIBUTE WS? )* '>'
+    ;
+
+fragment EMPTY_ELEMENT 
+    : '<' WS? name=GENERIC_ID WS?
+          {output("Empty Element: ", $name.text);}
+        ( ATTRIBUTE WS? )* '/>'
+    ;
+
+fragment ATTRIBUTE 
+    : name=GENERIC_ID WS? '=' WS? value=VALUE
+        {appendArribute("Attr: ", $name.text, $value.text);}
+    ;
+
+fragment END_TAG 
+    : '</' WS? name=GENERIC_ID WS? '>'
+        {output("End Tag: ", $name.text);}
+    ;
+
+fragment COMMENT
+	:	'<!--' (options {greedy=false;} : .)* '-->'
+	;
+
+fragment CDATA
+	:	'<![CDATA[' (options {greedy=false;} : .)* ']]>'
+	;
+
+fragment PCDATA : (~'<')+ ; 
+
+fragment VALUE : 
+        ( '\"' (~'\"')* '\"'
+        | '\'' (~'\'')* '\''
+        )
+	;
+
+fragment GENERIC_ID 
+    : ( LETTER | '_' | ':') 
+        ( options {greedy=true;} : LETTER | '0'..'9' | '.' | '-' | '_' | ':' )*
+	;
+
+fragment LETTER
+	: 'a'..'z' 
+	| 'A'..'Z'
+	;
+
+fragment WS  :
+        (   ' '
+        |   '\t'
+        |  ( '\n'
+            |	'\r\n'
+            |	'\r'
+            )
+        )+
+    ;    
+
diff --git a/runtime/Cpp/tests/t013parser.g b/runtime/Cpp/tests/t013parser.g
new file mode 100644
index 0000000..c962132
--- /dev/null
+++ b/runtime/Cpp/tests/t013parser.g
@@ -0,0 +1,36 @@
+grammar t013parser;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+@parser::init {
+self.identifiers = []
+self.reportedErrors = []
+}
+
+@parser::members {
+def foundIdentifier(self, name):
+    self.identifiers.append(name)
+
+def emitErrorMessage(self, msg):
+    self.reportedErrors.append(msg)
+}
+
+document:
+        t=IDENTIFIER {self.foundIdentifier($t.text)}
+        ;
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
diff --git a/runtime/Cpp/tests/t014parser.g b/runtime/Cpp/tests/t014parser.g
new file mode 100644
index 0000000..4fa5d1d
--- /dev/null
+++ b/runtime/Cpp/tests/t014parser.g
@@ -0,0 +1,48 @@
+grammar t014parser;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+@parser::init {
+self.events = []
+self.reportedErrors = []
+}
+
+@parser::members {
+def emitErrorMessage(self, msg):
+    self.reportedErrors.append(msg)
+}
+        
+
+document:
+        ( declaration
+        | call
+        )*
+        EOF
+    ;
+
+declaration:
+        'var' t=IDENTIFIER ';'
+        {self.events.append(('decl', $t.text))}
+    ;
+
+call:
+        t=IDENTIFIER '(' ')' ';'
+        {self.events.append(('call', $t.text))}
+    ;
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
+WS:  (' '|'\r'|'\t'|'\n') {$channel=HIDDEN;};
diff --git a/runtime/Cpp/tests/t015calc.g b/runtime/Cpp/tests/t015calc.g
new file mode 100644
index 0000000..5af908a
--- /dev/null
+++ b/runtime/Cpp/tests/t015calc.g
@@ -0,0 +1,67 @@
+grammar t015calc;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+@header {
+import math
+}
+
+@parser::init {
+self.reportedErrors = []
+}
+
+@parser::members {
+def emitErrorMessage(self, msg):
+    self.reportedErrors.append(msg)
+}
+
+evaluate returns [result]: r=expression {result = r};
+
+expression returns [result]: r=mult (
+    '+' r2=mult {r += r2}
+  | '-' r2=mult {r -= r2}
+  )* {result = r};
+
+mult returns [result]: r=log (
+    '*' r2=log {r *= r2}
+  | '/' r2=log {r /= r2}
+//  | '%' r2=log {r %= r2}
+  )* {result = r};
+
+log returns [result]: 'ln' r=exp {result = math.log(r)}
+    | r=exp {result = r}
+    ;
+
+exp returns [result]: r=atom ('^' r2=atom {r = math.pow(r,r2)} )? {result = r}
+    ;
+
+atom returns [result]:
+    n=INTEGER {result = int($n.text)}
+  | n=DECIMAL {result = float($n.text)} 
+  | '(' r=expression {result = r} ')'
+  | 'PI' {result = math.pi}
+  | 'E' {result = math.e}
+  ;
+
+INTEGER: DIGIT+;
+
+DECIMAL: DIGIT+ '.' DIGIT+;
+
+fragment
+DIGIT: '0'..'9';
+
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN};
diff --git a/runtime/Cpp/tests/t016actions.g b/runtime/Cpp/tests/t016actions.g
new file mode 100644
index 0000000..ca2189a
--- /dev/null
+++ b/runtime/Cpp/tests/t016actions.g
@@ -0,0 +1,44 @@
+grammar t016actions;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+declaration returns [name]
+    :   functionHeader ';'
+        {$name = $functionHeader.name}
+    ;
+
+functionHeader returns [name]
+    :   type ID
+	{$name = $ID.text}
+    ;
+
+type
+    :   'int'   
+    |   'char'  
+    |   'void'
+    ;
+
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+    ;
+
+WS  :   (   ' '
+        |   '\t'
+        |   '\r'
+        |   '\n'
+        )+
+        {$channel=HIDDEN}
+    ;    
diff --git a/runtime/Cpp/tests/t017parser.g b/runtime/Cpp/tests/t017parser.g
new file mode 100644
index 0000000..cd251b9
--- /dev/null
+++ b/runtime/Cpp/tests/t017parser.g
@@ -0,0 +1,104 @@
+grammar t017parser;
+
+options {
+    language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+program
+    :   declaration+
+    ;
+
+declaration
+    :   variable
+    |   functionHeader ';'
+    |   functionHeader block
+    ;
+
+variable
+    :   type declarator ';'
+    ;
+
+declarator
+    :   ID 
+    ;
+
+functionHeader
+    :   type ID '(' ( formalParameter ( ',' formalParameter )* )? ')'
+    ;
+
+formalParameter
+    :   type declarator        
+    ;
+
+type
+    :   'int'   
+    |   'char'  
+    |   'void'
+    |   ID        
+    ;
+
+block
+    :   '{'
+            variable*
+            stat*
+        '}'
+    ;
+
+stat: forStat
+    | expr ';'      
+    | block
+    | assignStat ';'
+    | ';'
+    ;
+
+forStat
+    :   'for' '(' assignStat ';' expr ';' assignStat ')' block        
+    ;
+
+assignStat
+    :   ID '=' expr        
+    ;
+
+expr:   condExpr
+    ;
+
+condExpr
+    :   aexpr ( ('==' | '<') aexpr )?
+    ;
+
+aexpr
+    :   atom ( '+' atom )*
+    ;
+
+atom
+    : ID      
+    | INT      
+    | '(' expr ')'
+    ; 
+
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+    ;
+
+INT :	('0'..'9')+
+    ;
+
+WS  :   (   ' '
+        |   '\t'
+        |   '\r'
+        |   '\n'
+        )+
+        {$channel=HIDDEN}
+    ;    
diff --git a/runtime/Cpp/tests/t018llstar.g b/runtime/Cpp/tests/t018llstar.g
new file mode 100644
index 0000000..e682d2c
--- /dev/null
+++ b/runtime/Cpp/tests/t018llstar.g
@@ -0,0 +1,124 @@
+grammar t018llstar;
+
+options {
+    language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+@header {
+from io import StringIO
+}
+
+@init {
+self.output = StringIO()
+}
+
+program
+    :   declaration+
+    ;
+
+/** In this rule, the functionHeader left prefix on the last two
+ *  alternatives is not LL(k) for a fixed k.  However, it is
+ *  LL(*).  The LL(*) algorithm simply scans ahead until it sees
+ *  either the ';' or the '{' of the block and then it picks
+ *  the appropriate alternative.  Lookhead can be arbitrarily
+ *  long in theory, but is <=10 in most cases.  Works great.
+ *  Use ANTLRWorks to see the lookahead use (step by Location)
+ *  and look for blue tokens in the input window pane. :)
+ */
+declaration
+    :   variable
+    |   functionHeader ';'
+	{self.output.write($functionHeader.name+" is a declaration\n")}
+    |   functionHeader block
+	{self.output.write($functionHeader.name+" is a definition\n")}
+    ;
+
+variable
+    :   type declarator ';'
+    ;
+
+declarator
+    :   ID 
+    ;
+
+functionHeader returns [name]
+    :   type ID '(' ( formalParameter ( ',' formalParameter )* )? ')'
+	{$name = $ID.text}
+    ;
+
+formalParameter
+    :   type declarator        
+    ;
+
+type
+    :   'int'   
+    |   'char'  
+    |   'void'
+    |   ID        
+    ;
+
+block
+    :   '{'
+            variable*
+            stat*
+        '}'
+    ;
+
+stat: forStat
+    | expr ';'      
+    | block
+    | assignStat ';'
+    | ';'
+    ;
+
+forStat
+    :   'for' '(' assignStat ';' expr ';' assignStat ')' block        
+    ;
+
+assignStat
+    :   ID '=' expr        
+    ;
+
+expr:   condExpr
+    ;
+
+condExpr
+    :   aexpr ( ('==' | '<') aexpr )?
+    ;
+
+aexpr
+    :   atom ( '+' atom )*
+    ;
+
+atom
+    : ID      
+    | INT      
+    | '(' expr ')'
+    ; 
+
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+    ;
+
+INT :	('0'..'9')+
+    ;
+
+WS  :   (   ' '
+        |   '\t'
+        |   '\r'
+        |   '\n'
+        )+
+        {$channel=HIDDEN}
+    ;    
diff --git a/runtime/Cpp/tests/t019lexer.g b/runtime/Cpp/tests/t019lexer.g
new file mode 100644
index 0000000..192f993
--- /dev/null
+++ b/runtime/Cpp/tests/t019lexer.g
@@ -0,0 +1,71 @@
+lexer grammar t019lexer;
+options {
+    language=Cpp;
+    filter=true;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+IMPORT
+	:	'import' WS name=QIDStar WS? ';'
+	;
+	
+/** Avoids having "return foo;" match as a field */
+RETURN
+	:	'return' (options {greedy=false;}:.)* ';'
+	;
+
+CLASS
+	:	'class' WS name=ID WS? ('extends' WS QID WS?)?
+		('implements' WS QID WS? (',' WS? QID WS?)*)? '{'
+	;
+	
+COMMENT
+    :   '/*' (options {greedy=false;} : . )* '*/'
+    ;
+
+STRING
+    :	'"' (options {greedy=false;}: ESC | .)* '"'
+	;
+
+CHAR
+	:	'\'' (options {greedy=false;}: ESC | .)* '\''
+	;
+
+WS  :   (' '|'\t'|'\n')+
+    ;
+
+fragment
+QID :	ID ('.' ID)*
+	;
+	
+/** QID cannot see beyond end of token so using QID '.*'? somewhere won't
+ *  ever match since k=1 lookahead in the QID loop of '.' will make it loop.
+ *  I made this rule to compensate.
+ */
+fragment
+QIDStar
+	:	ID ('.' ID)* '.*'?
+	;
+
+fragment
+TYPE:   QID '[]'?
+    ;
+    
+fragment
+ARG :   TYPE WS ID
+    ;
+
+fragment
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+    ;
+
+fragment
+ESC	:	'\\' ('"'|'\''|'\\')
+	;
+
diff --git a/runtime/Cpp/tests/t020fuzzyLexer.g b/runtime/Cpp/tests/t020fuzzyLexer.g
new file mode 100644
index 0000000..0fc913f
--- /dev/null
+++ b/runtime/Cpp/tests/t020fuzzyLexer.g
@@ -0,0 +1,103 @@
+lexer grammar t020fuzzyLexer;
+options {
+    language=Cpp;
+    filter=true;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@header {
+from io import StringIO
+}
+
+@init {
+self.output = StringIO()
+}
+
+IMPORT
+	:	'import' WS name=QIDStar WS? ';'
+	;
+	
+/** Avoids having "return foo;" match as a field */
+RETURN
+	:	'return' (options {greedy=false;}:.)* ';'
+	;
+
+CLASS
+	:	'class' WS name=ID WS? ('extends' WS QID WS?)?
+		('implements' WS QID WS? (',' WS? QID WS?)*)? '{'
+        {self.output.write("found class "+$name.text+"\n")}
+	;
+	
+METHOD
+    :   TYPE WS name=ID WS? '(' ( ARG WS? (',' WS? ARG WS?)* )? ')' WS? 
+       ('throws' WS QID WS? (',' WS? QID WS?)*)? '{'
+        {self.output.write("found method "+$name.text+"\n");}
+    ;
+
+FIELD
+    :   TYPE WS name=ID '[]'? WS? (';'|'=')
+        {self.output.write("found var "+$name.text+"\n");}
+    ;
+
+STAT:	('if'|'while'|'switch'|'for') WS? '(' ;
+	
+CALL
+    :   name=QID WS? '('
+        {self.output.write("found call "+$name.text+"\n");}
+    ;
+
+COMMENT
+    :   '/*' (options {greedy=false;} : . )* '*/'
+        {self.output.write("found comment "+self.getText()+"\n");}
+    ;
+
+SL_COMMENT
+    :   '//' (options {greedy=false;} : . )* '\n'
+        {self.output.write("found // comment "+self.getText()+"\n");}
+    ;
+	
+STRING
+	:	'"' (options {greedy=false;}: ESC | .)* '"'
+	;
+
+CHAR
+	:	'\'' (options {greedy=false;}: ESC | .)* '\''
+	;
+
+WS  :   (' '|'\t'|'\n')+
+    ;
+
+fragment
+QID :	ID ('.' ID)*
+	;
+	
+/** QID cannot see beyond end of token so using QID '.*'? somewhere won't
+ *  ever match since k=1 lookahead in the QID loop of '.' will make it loop.
+ *  I made this rule to compensate.
+ */
+fragment
+QIDStar
+	:	ID ('.' ID)* '.*'?
+	;
+
+fragment
+TYPE:   QID '[]'?
+    ;
+    
+fragment
+ARG :   TYPE WS ID
+    ;
+
+fragment
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+    ;
+
+fragment
+ESC	:	'\\' ('"'|'\''|'\\')
+	;
diff --git a/runtime/Cpp/tests/t021hoist.g b/runtime/Cpp/tests/t021hoist.g
new file mode 100644
index 0000000..51201c1
--- /dev/null
+++ b/runtime/Cpp/tests/t021hoist.g
@@ -0,0 +1,50 @@
+grammar t021hoist;
+options {
+    language=Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+/* With this true, enum is seen as a keyword.  False, it's an identifier */
+@parser::init {
+self.enableEnum = False
+}
+
+stat returns [enumIs]
+    : identifier    {enumIs = "ID"}
+    | enumAsKeyword {enumIs = "keyword"}
+    ;
+
+identifier
+    : ID
+    | enumAsID
+    ;
+
+enumAsKeyword : {self.enableEnum}? 'enum' ;
+
+enumAsID : {not self.enableEnum}? 'enum' ;
+
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+    ;
+
+INT :	('0'..'9')+
+    ;
+
+WS  :   (   ' '
+        |   '\t'
+        |   '\r'
+        |   '\n'
+        )+
+        {$channel=HIDDEN}
+    ;    
diff --git a/runtime/Cpp/tests/t022scopes.g b/runtime/Cpp/tests/t022scopes.g
new file mode 100644
index 0000000..b7870c0
--- /dev/null
+++ b/runtime/Cpp/tests/t022scopes.g
@@ -0,0 +1,138 @@
+grammar t022scopes;
+
+options {
+    language=Cpp;
+}
+
+/* global scopes */
+scope aScope {
+names
+}
+
+@lexer::includes{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+a
+scope aScope;
+    :   {$aScope::names = [];} ID*
+    ;
+
+
+/* rule scopes, from the book, final beta, p.147 */
+
+b[v]
+scope {x}
+    : {$b::x = v;} b2
+    ;
+
+b2
+    : b3
+    ;
+
+b3 
+    : {$b::x}?=> ID // only visible, if b was called with True
+    | NUM
+    ;
+
+
+/* rule scopes, from the book, final beta, p.148 */
+
+c returns [res]
+scope {
+    symbols
+}
+@init {
+    $c::symbols = set();
+}
+    : '{' c1* c2+ '}'
+        { $res = $c::symbols; }
+    ;
+
+c1
+    : 'int' ID {$c::symbols.add($ID.text)} ';'
+    ;
+
+c2
+    : ID '=' NUM ';'
+        {
+            if $ID.text not in $c::symbols:
+                raise RuntimeError($ID.text)
+        }
+    ;
+
+/* recursive rule scopes, from the book, final beta, p.150 */
+
+d returns [res]
+scope {
+    symbols
+}
+@init {
+    $d::symbols = set();
+}
+    : '{' d1* d2* '}'
+        { $res = $d::symbols; }
+    ;
+
+d1
+    : 'int' ID {$d::symbols.add($ID.text)} ';'
+    ;
+
+d2
+    : ID '=' NUM ';'
+        {
+            for s in reversed(range(len($d))):
+                if $ID.text in $d[s]::symbols:
+                    break
+            else:
+                raise RuntimeError($ID.text)
+        }
+    | d
+    ;
+
+/* recursive rule scopes, access bottom-most scope */
+
+e returns [res]
+scope {
+    a
+}
+@after {
+    $res = $e::a;
+}
+    : NUM { $e[0]::a = int($NUM.text); }
+    | '{' e '}'
+    ;
+
+
+/* recursive rule scopes, access with negative index */
+
+f returns [res]
+scope {
+    a
+}
+@after {
+    $res = $f::a;
+}
+    : NUM { $f[-2]::a = int($NUM.text); }
+    | '{' f '}'
+    ;
+
+
+/* tokens */
+
+ID  :   ('a'..'z')+
+    ;
+
+NUM :   ('0'..'9')+
+    ;
+
+WS  :   (' '|'\n'|'\r')+ {$channel=HIDDEN}
+    ;
diff --git a/runtime/Cpp/tests/t023scopes.g b/runtime/Cpp/tests/t023scopes.g
new file mode 100644
index 0000000..f5b89fb
--- /dev/null
+++ b/runtime/Cpp/tests/t023scopes.g
@@ -0,0 +1,31 @@
+grammar t023scopes;
+
+options {
+    language=Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+prog
+scope {
+name
+}
+    :   ID {$prog::name=$ID.text;}
+    ;
+
+ID  :   ('a'..'z')+
+    ;
+
+WS  :   (' '|'\n'|'\r')+ {$channel=HIDDEN}
+    ;
diff --git a/runtime/Cpp/tests/t024finally.g b/runtime/Cpp/tests/t024finally.g
new file mode 100644
index 0000000..2cbda0a
--- /dev/null
+++ b/runtime/Cpp/tests/t024finally.g
@@ -0,0 +1,32 @@
+grammar t024finally;
+
+options {
+    language=Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+prog returns [events]
+@init {events = []}
+@after {events.append('after')}
+    :   ID {raise RuntimeError}
+    ;
+    catch [RuntimeError] {events.append('catch')}
+    finally {events.append('finally')}
+
+ID  :   ('a'..'z')+
+    ;
+
+WS  :   (' '|'\n'|'\r')+ {$channel=HIDDEN}
+    ;
diff --git a/runtime/Cpp/tests/t025lexerRulePropertyRef.g b/runtime/Cpp/tests/t025lexerRulePropertyRef.g
new file mode 100644
index 0000000..3271dd4
--- /dev/null
+++ b/runtime/Cpp/tests/t025lexerRulePropertyRef.g
@@ -0,0 +1,25 @@
+lexer grammar t025lexerRulePropertyRef;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@lexer::init {
+self.properties = []
+}
+
+IDENTIFIER: 
+        ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+        {
+self.properties.append(
+    ($text, $type, $line, $pos, $index, $channel, $start, $stop)
+)
+        }
+    ;
+WS: (' ' | '\n')+;
diff --git a/runtime/Cpp/tests/t026actions.g b/runtime/Cpp/tests/t026actions.g
new file mode 100644
index 0000000..d699c06
--- /dev/null
+++ b/runtime/Cpp/tests/t026actions.g
@@ -0,0 +1,52 @@
+grammar t026actions;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+@lexer::init {
+    self.foobar = 'attribute;'
+}
+
+prog
+@init {
+    self.capture('init;')
+}
+@after {
+    self.capture('after;')
+}
+    :   IDENTIFIER EOF
+    ;
+    catch [ RecognitionException as exc ] {
+        self.capture('catch;')
+        raise
+    }
+    finally {
+        self.capture('finally;')
+    }
+
+
+IDENTIFIER
+    : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+        {
+            # a comment
+          self.capture('action;')
+            self.capture('{!r} {!r} {!r} {!r} {!r} {!r} {!r} {!r};'.format($text, $type, $line, $pos, $index, $channel, $start, $stop))
+            if True:
+                self.capture(self.foobar)
+        }
+    ;
+
+WS: (' ' | '\n')+;
diff --git a/runtime/Cpp/tests/t027eof.g b/runtime/Cpp/tests/t027eof.g
new file mode 100644
index 0000000..6cf6d6a
--- /dev/null
+++ b/runtime/Cpp/tests/t027eof.g
@@ -0,0 +1,15 @@
+lexer grammar t027eof;
+
+options {
+    language=Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+END: EOF;
+SPACE: ' ';
diff --git a/runtime/Cpp/tests/t029synpredgate.g b/runtime/Cpp/tests/t029synpredgate.g
new file mode 100644
index 0000000..5a1a4ab
--- /dev/null
+++ b/runtime/Cpp/tests/t029synpredgate.g
@@ -0,0 +1,23 @@
+lexer grammar t029synpredgate;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+FOO
+    : ('ab')=> A
+    | ('ac')=> B
+    ;
+
+fragment
+A: 'a';
+
+fragment
+B: 'a';
+
diff --git a/runtime/Cpp/tests/t030specialStates.g b/runtime/Cpp/tests/t030specialStates.g
new file mode 100644
index 0000000..62d2dbf
--- /dev/null
+++ b/runtime/Cpp/tests/t030specialStates.g
@@ -0,0 +1,39 @@
+grammar t030specialStates;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+@init {
+self.cond = True
+}
+
+@members {
+def recover(self, input, re):
+    # no error recovery yet, just crash!
+    raise re
+}
+
+r
+    : ( {self.cond}? NAME
+        | {not self.cond}? NAME WS+ NAME
+        )
+        ( WS+ NAME )?
+        EOF
+    ;
+
+NAME: ('a'..'z') ('a'..'z' | '0'..'9')+;
+NUMBER: ('0'..'9')+;
+WS: ' '+;
diff --git a/runtime/Cpp/tests/t031emptyAlt.g b/runtime/Cpp/tests/t031emptyAlt.g
new file mode 100644
index 0000000..23b32f3
--- /dev/null
+++ b/runtime/Cpp/tests/t031emptyAlt.g
@@ -0,0 +1,29 @@
+grammar t031emptyAlt;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+r
+    : NAME 
+        ( {self.cond}?=> WS+ NAME
+        | 
+        )
+        EOF
+    ;
+
+NAME: ('a'..'z') ('a'..'z' | '0'..'9')+;
+NUMBER: ('0'..'9')+;
+WS: ' '+;
diff --git a/runtime/Cpp/tests/t032subrulePredict.g b/runtime/Cpp/tests/t032subrulePredict.g
new file mode 100644
index 0000000..4b760e1
--- /dev/null
+++ b/runtime/Cpp/tests/t032subrulePredict.g
@@ -0,0 +1,21 @@
+grammar t032subrulePredict;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+a: 'BEGIN' b WS+ 'END';
+b: ( WS+ 'A' )+;
+WS: ' ';
diff --git a/runtime/Cpp/tests/t033backtracking.g b/runtime/Cpp/tests/t033backtracking.g
new file mode 100644
index 0000000..a5b233b
--- /dev/null
+++ b/runtime/Cpp/tests/t033backtracking.g
@@ -0,0 +1,528 @@
+grammar t033backtracking;
+options {
+    language=Cpp;
+    backtrack=true;
+    memoize=true;
+    k=2;
+}
+
+scope Symbols {
+	types;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+@members {
+    def isTypeName(self, name):
+        for scope in reversed(self.Symbols_stack):
+            if name in scope.types:
+                return True
+
+        return False
+
+}
+
+translation_unit
+scope Symbols; // entire file is a scope
+@init {
+  $Symbols::types = set()
+}
+	: external_declaration+
+	;
+
+/** Either a function definition or any other kind of C decl/def.
+ *  The LL(*) analysis algorithm fails to deal with this due to
+ *  recursion in the declarator rules.  I'm putting in a
+ *  manual predicate here so that we don't backtrack over
+ *  the entire function.  Further, you get a better error
+ *  as errors within the function itself don't make it fail
+ *  to predict that it's a function.  Weird errors previously.
+ *  Remember: the goal is to avoid backtrack like the plague
+ *  because it makes debugging, actions, and errors harder.
+ *
+ *  Note that k=1 results in a much smaller predictor for the 
+ *  fixed lookahead; k=2 made a few extra thousand lines. ;)
+ *  I'll have to optimize that in the future.
+ */
+external_declaration
+options {k=1;}
+	: ( declaration_specifiers? declarator declaration* '{' )=> function_definition
+	| declaration
+	;
+
+function_definition
+scope Symbols; // put parameters and locals into same scope for now
+@init {
+  $Symbols::types = set()
+}
+	:	declaration_specifiers? declarator
+// 		(	declaration+ compound_statement	// K&R style
+// 		|	compound_statement				// ANSI style
+// 		)
+	;
+
+declaration
+scope {
+  isTypedef;
+}
+@init {
+  $declaration::isTypedef = False
+}
+	: 'typedef' declaration_specifiers? {$declaration::isTypedef = True}
+	  init_declarator_list ';' // special case, looking for typedef	
+	| declaration_specifiers init_declarator_list? ';'
+	;
+
+declaration_specifiers
+	:   (   storage_class_specifier
+		|   type_specifier
+        |   type_qualifier
+        )+
+	;
+
+init_declarator_list
+	: init_declarator (',' init_declarator)*
+	;
+
+init_declarator
+	: declarator //('=' initializer)?
+	;
+
+storage_class_specifier
+	: 'extern'
+	| 'static'
+	| 'auto'
+	| 'register'
+	;
+
+type_specifier
+	: 'void'
+	| 'char'
+	| 'short'
+	| 'int'
+	| 'long'
+	| 'float'
+	| 'double'
+	| 'signed'
+	| 'unsigned'
+// 	| struct_or_union_specifier
+// 	| enum_specifier
+	| type_id
+	;
+
+type_id
+    :   {self.isTypeName(self.input.LT(1).getText())}? IDENTIFIER
+//    	{System.out.println($IDENTIFIER.text+" is a type");}
+    ;
+
+// struct_or_union_specifier
+// options {k=3;}
+// scope Symbols; // structs are scopes
+// @init {
+//   $Symbols::types = set()
+// }
+// 	: struct_or_union IDENTIFIER? '{' struct_declaration_list '}'
+// 	| struct_or_union IDENTIFIER
+// 	;
+
+// struct_or_union
+// 	: 'struct'
+// 	| 'union'
+// 	;
+
+// struct_declaration_list
+// 	: struct_declaration+
+// 	;
+
+// struct_declaration
+// 	: specifier_qualifier_list struct_declarator_list ';'
+// 	;
+
+// specifier_qualifier_list
+// 	: ( type_qualifier | type_specifier )+
+// 	;
+
+// struct_declarator_list
+// 	: struct_declarator (',' struct_declarator)*
+// 	;
+
+// struct_declarator
+// 	: declarator (':' constant_expression)?
+// 	| ':' constant_expression
+// 	;
+
+// enum_specifier
+// options {k=3;}
+// 	: 'enum' '{' enumerator_list '}'
+// 	| 'enum' IDENTIFIER '{' enumerator_list '}'
+// 	| 'enum' IDENTIFIER
+// 	;
+
+// enumerator_list
+// 	: enumerator (',' enumerator)*
+// 	;
+
+// enumerator
+// 	: IDENTIFIER ('=' constant_expression)?
+// 	;
+
+type_qualifier
+	: 'const'
+	| 'volatile'
+	;
+
+declarator
+	: pointer? direct_declarator
+	| pointer
+	;
+
+direct_declarator
+	:   (	IDENTIFIER
+			{
+			if $declaration and $declaration::isTypedef:
+				$Symbols::types.add($IDENTIFIER.text)
+				print("define type "+$IDENTIFIER.text)
+			}
+		|	'(' declarator ')'
+		)
+        declarator_suffix*
+	;
+
+declarator_suffix
+	:   /*'[' constant_expression ']'
+    |*/   '[' ']'
+//     |   '(' parameter_type_list ')'
+//     |   '(' identifier_list ')'
+    |   '(' ')'
+	;
+
+pointer
+	: '*' type_qualifier+ pointer?
+	| '*' pointer
+	| '*'
+	;
+
+// parameter_type_list
+// 	: parameter_list (',' '...')?
+// 	;
+
+// parameter_list
+// 	: parameter_declaration (',' parameter_declaration)*
+// 	;
+
+// parameter_declaration
+// 	: declaration_specifiers (declarator|abstract_declarator)*
+// 	;
+
+// identifier_list
+// 	: IDENTIFIER (',' IDENTIFIER)*
+// 	;
+
+// type_name
+// 	: specifier_qualifier_list abstract_declarator?
+// 	;
+
+// abstract_declarator
+// 	: pointer direct_abstract_declarator?
+// 	| direct_abstract_declarator
+// 	;
+
+// direct_abstract_declarator
+// 	:	( '(' abstract_declarator ')' | abstract_declarator_suffix ) abstract_declarator_suffix*
+// 	;
+
+// abstract_declarator_suffix
+// 	:	'[' ']'
+// 	|	'[' constant_expression ']'
+// 	|	'(' ')'
+// 	|	'(' parameter_type_list ')'
+// 	;
+	
+// initializer
+// 	: assignment_expression
+// 	| '{' initializer_list ','? '}'
+// 	;
+
+// initializer_list
+// 	: initializer (',' initializer)*
+// 	;
+
+// // E x p r e s s i o n s
+
+// argument_expression_list
+// 	:   assignment_expression (',' assignment_expression)*
+// 	;
+
+// additive_expression
+// 	: (multiplicative_expression) ('+' multiplicative_expression | '-' multiplicative_expression)*
+// 	;
+
+// multiplicative_expression
+// 	: (cast_expression) ('*' cast_expression | '/' cast_expression | '%' cast_expression)*
+// 	;
+
+// cast_expression
+// 	: '(' type_name ')' cast_expression
+// 	| unary_expression
+// 	;
+
+// unary_expression
+// 	: postfix_expression
+// 	| '++' unary_expression
+// 	| '--' unary_expression
+// 	| unary_operator cast_expression
+// 	| 'sizeof' unary_expression
+// 	| 'sizeof' '(' type_name ')'
+// 	;
+
+// postfix_expression
+// 	:   primary_expression
+//         (   '[' expression ']'
+//         |   '(' ')'
+//         |   '(' argument_expression_list ')'
+//         |   '.' IDENTIFIER
+//         |   '*' IDENTIFIER
+//         |   '->' IDENTIFIER
+//         |   '++'
+//         |   '--'
+//         )*
+// 	;
+
+// unary_operator
+// 	: '&'
+// 	| '*'
+// 	| '+'
+// 	| '-'
+// 	| '~'
+// 	| '!'
+// 	;
+
+// primary_expression
+// 	: IDENTIFIER
+// 	| constant
+// 	| '(' expression ')'
+// 	;
+
+// constant
+//     :   HEX_LITERAL
+//     |   OCTAL_LITERAL
+//     |   DECIMAL_LITERAL
+//     |	CHARACTER_LITERAL
+// 	|	STRING_LITERAL
+//     |   FLOATING_POINT_LITERAL
+//     ;
+
+// /////
+
+// expression
+// 	: assignment_expression (',' assignment_expression)*
+// 	;
+
+// constant_expression
+// 	: conditional_expression
+// 	;
+
+// assignment_expression
+// 	: lvalue assignment_operator assignment_expression
+// 	| conditional_expression
+// 	;
+	
+// lvalue
+// 	:	unary_expression
+// 	;
+
+// assignment_operator
+// 	: '='
+// 	| '*='
+// 	| '/='
+// 	| '%='
+// 	| '+='
+// 	| '-='
+// 	| '<<='
+// 	| '>>='
+// 	| '&='
+// 	| '^='
+// 	| '|='
+// 	;
+
+// conditional_expression
+// 	: logical_or_expression ('?' expression ':' conditional_expression)?
+// 	;
+
+// logical_or_expression
+// 	: logical_and_expression ('||' logical_and_expression)*
+// 	;
+
+// logical_and_expression
+// 	: inclusive_or_expression ('&&' inclusive_or_expression)*
+// 	;
+
+// inclusive_or_expression
+// 	: exclusive_or_expression ('|' exclusive_or_expression)*
+// 	;
+
+// exclusive_or_expression
+// 	: and_expression ('^' and_expression)*
+// 	;
+
+// and_expression
+// 	: equality_expression ('&' equality_expression)*
+// 	;
+// equality_expression
+// 	: relational_expression (('=='|'!=') relational_expression)*
+// 	;
+
+// relational_expression
+// 	: shift_expression (('<'|'>'|'<='|'>=') shift_expression)*
+// 	;
+
+// shift_expression
+// 	: additive_expression (('<<'|'>>') additive_expression)*
+// 	;
+
+// // S t a t e m e n t s
+
+// statement
+// 	: labeled_statement
+// 	| compound_statement
+// 	| expression_statement
+// 	| selection_statement
+// 	| iteration_statement
+// 	| jump_statement
+// 	;
+
+// labeled_statement
+// 	: IDENTIFIER ':' statement
+// 	| 'case' constant_expression ':' statement
+// 	| 'default' ':' statement
+// 	;
+
+// compound_statement
+// scope Symbols; // blocks have a scope of symbols
+// @init {
+//   $Symbols::types = {}
+// }
+// 	: '{' declaration* statement_list? '}'
+// 	;
+
+// statement_list
+// 	: statement+
+// 	;
+
+// expression_statement
+// 	: ';'
+// 	| expression ';'
+// 	;
+
+// selection_statement
+// 	: 'if' '(' expression ')' statement (options {k=1; backtrack=false;}:'else' statement)?
+// 	| 'switch' '(' expression ')' statement
+// 	;
+
+// iteration_statement
+// 	: 'while' '(' expression ')' statement
+// 	| 'do' statement 'while' '(' expression ')' ';'
+// 	| 'for' '(' expression_statement expression_statement expression? ')' statement
+// 	;
+
+// jump_statement
+// 	: 'goto' IDENTIFIER ';'
+// 	| 'continue' ';'
+// 	| 'break' ';'
+// 	| 'return' ';'
+// 	| 'return' expression ';'
+// 	;
+
+IDENTIFIER
+	:	LETTER (LETTER|'0'..'9')*
+	;
+	
+fragment
+LETTER
+	:	'$'
+	|	'A'..'Z'
+	|	'a'..'z'
+	|	'_'
+	;
+
+CHARACTER_LITERAL
+    :   '\'' ( EscapeSequence | ~('\''|'\\') ) '\''
+    ;
+
+STRING_LITERAL
+    :  '"' ( EscapeSequence | ~('\\'|'"') )* '"'
+    ;
+
+HEX_LITERAL : '0' ('x'|'X') HexDigit+ IntegerTypeSuffix? ;
+
+DECIMAL_LITERAL : ('0' | '1'..'9' '0'..'9'*) IntegerTypeSuffix? ;
+
+OCTAL_LITERAL : '0' ('0'..'7')+ IntegerTypeSuffix? ;
+
+fragment
+HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;
+
+fragment
+IntegerTypeSuffix
+	:	('u'|'U')? ('l'|'L')
+	|	('u'|'U')  ('l'|'L')?
+	;
+
+FLOATING_POINT_LITERAL
+    :   ('0'..'9')+ '.' ('0'..'9')* Exponent? FloatTypeSuffix?
+    |   '.' ('0'..'9')+ Exponent? FloatTypeSuffix?
+    |   ('0'..'9')+ Exponent FloatTypeSuffix?
+    |   ('0'..'9')+ Exponent? FloatTypeSuffix
+	;
+
+fragment
+Exponent : ('e'|'E') ('+'|'-')? ('0'..'9')+ ;
+
+fragment
+FloatTypeSuffix : ('f'|'F'|'d'|'D') ;
+
+fragment
+EscapeSequence
+    :   '\\' ('b'|'t'|'n'|'f'|'r'|'\"'|'\''|'\\')
+    |   OctalEscape
+    ;
+
+fragment
+OctalEscape
+    :   '\\' ('0'..'3') ('0'..'7') ('0'..'7')
+    |   '\\' ('0'..'7') ('0'..'7')
+    |   '\\' ('0'..'7')
+    ;
+
+fragment
+UnicodeEscape
+    :   '\\' 'u' HexDigit HexDigit HexDigit HexDigit
+    ;
+
+WS  :  (' '|'\r'|'\t'|'\u000C'|'\n') {$channel=HIDDEN;}
+    ;
+
+COMMENT
+    :   '/*' ( options {greedy=false;} : . )* '*/' {$channel=HIDDEN;}
+    ;
+
+LINE_COMMENT
+    : '//' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;}
+    ;
+
+// ignore #line info for now
+LINE_COMMAND 
+    : '#' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;}
+    ;
+
diff --git a/runtime/Cpp/tests/t034tokenLabelPropertyRef.g b/runtime/Cpp/tests/t034tokenLabelPropertyRef.g
new file mode 100644
index 0000000..6a03bf2
--- /dev/null
+++ b/runtime/Cpp/tests/t034tokenLabelPropertyRef.g
@@ -0,0 +1,43 @@
+grammar t034tokenLabelPropertyRef;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+a: t=A
+        {
+            print($t.text)
+            print($t.type)
+            print($t.line)
+            print($t.pos)
+            print($t.channel)
+            print($t.index)
+            #print($t.tree)
+        }
+    ;
+
+A: 'a'..'z';
+
+WS  :
+        (   ' '
+        |   '\t'
+        |  ( '\n'
+            |	'\r\n'
+            |	'\r'
+            )
+        )+
+        { $channel = HIDDEN }
+    ;    
+
diff --git a/runtime/Cpp/tests/t035ruleLabelPropertyRef.g b/runtime/Cpp/tests/t035ruleLabelPropertyRef.g
new file mode 100644
index 0000000..c3b9eb9
--- /dev/null
+++ b/runtime/Cpp/tests/t035ruleLabelPropertyRef.g
@@ -0,0 +1,29 @@
+grammar t035ruleLabelPropertyRef;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+a returns [bla]: t=b
+        {
+            $bla = $t.start, $t.stop, $t.text
+        }
+    ;
+
+b: A+;
+
+A: 'a'..'z';
+
+WS: ' '+  { $channel = HIDDEN };
diff --git a/runtime/Cpp/tests/t036multipleReturnValues.g b/runtime/Cpp/tests/t036multipleReturnValues.g
new file mode 100644
index 0000000..8c4748c
--- /dev/null
+++ b/runtime/Cpp/tests/t036multipleReturnValues.g
@@ -0,0 +1,38 @@
+grammar t036multipleReturnValues;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+a returns [foo, bar]: A
+        {
+            $foo = "foo";
+            $bar = "bar";
+        }
+    ;
+
+A: 'a'..'z';
+
+WS  :
+        (   ' '
+        |   '\t'
+        |  ( '\n'
+            |	'\r\n'
+            |	'\r'
+            )
+        )+
+        { $channel = HIDDEN }
+    ;    
+
diff --git a/runtime/Cpp/tests/t037rulePropertyRef.g b/runtime/Cpp/tests/t037rulePropertyRef.g
new file mode 100644
index 0000000..b510092
--- /dev/null
+++ b/runtime/Cpp/tests/t037rulePropertyRef.g
@@ -0,0 +1,28 @@
+grammar t037rulePropertyRef;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+a returns [bla]
+@after {
+    $bla = $start, $stop, $text
+}
+    : A+
+    ;
+
+A: 'a'..'z';
+
+WS: ' '+  { $channel = HIDDEN };
diff --git a/runtime/Cpp/tests/t038lexerRuleLabel.g b/runtime/Cpp/tests/t038lexerRuleLabel.g
new file mode 100644
index 0000000..28dbedc
--- /dev/null
+++ b/runtime/Cpp/tests/t038lexerRuleLabel.g
@@ -0,0 +1,35 @@
+lexer grammar t038lexerRuleLabel;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+A: 'a'..'z' WS '0'..'9'
+        {
+            print($WS)
+            print($WS.type)
+            print($WS.line)
+            print($WS.pos)
+            print($WS.channel)
+            print($WS.index)
+            print($WS.text)
+        }
+    ;
+
+fragment WS  :
+        (   ' '
+        |   '\t'
+        |  ( '\n'
+            |	'\r\n'
+            |	'\r'
+            )
+        )+
+        { $channel = HIDDEN }
+    ;    
+
diff --git a/runtime/Cpp/tests/t039.cpp b/runtime/Cpp/tests/t039.cpp
new file mode 100644
index 0000000..f87ea87
--- /dev/null
+++ b/runtime/Cpp/tests/t039.cpp
@@ -0,0 +1,122 @@
+#include "UserTestTraits.hpp"
+#include "t039labelsLexer.hpp"
+#include "t039labelsParser.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+int testValid(string const& data);
+int testMalformedInput(string const& data);
+
+static t039labelsLexer *lxr;
+
+
+struct TokenData
+{
+	//t039labelsLexerTokens::Tokens type;
+	//unsigned start;
+	//unsigned stop;
+	const char* text;
+};
+
+static TokenData ExpectedTokens[] =
+{
+  /*
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        ids, w = parser.a()
+
+        self.assertEqual(len(ids), 6, ids)
+        self.assertEqual(ids[0].text, 'a', ids[0])
+        self.assertEqual(ids[1].text, 'b', ids[1])
+        self.assertEqual(ids[2].text, 'c', ids[2])
+        self.assertEqual(ids[3].text, '1', ids[3])
+        self.assertEqual(ids[4].text, '2', ids[4])
+        self.assertEqual(ids[5].text, 'A', ids[5])
+
+        self.assertEqual(w.text, 'GNU1', w)
+  */
+	// "a, b, c, 1, 2 A FOOBAR GNU1 A BLARZ"
+	{ "a"},
+	{ "b"},
+	{ "c"},
+	{ "1"},
+	{ "2"},
+	{ "A"},
+};
+
+
+int main (int argc, char *argv[])
+{
+	testValid("a, b, c, 1, 2 A FOOBAR GNU1 A BLARZ");
+	return 0;
+}
+
+int testValid(string const& data)
+{
+	t039labelsLexerTraits::InputStreamType* input	= new t039labelsLexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+										       ANTLR_ENC_8BIT,
+										       data.length(), //strlen(data.c_str()),
+										       (ANTLR_UINT8*)"t039");
+	if (lxr == NULL)
+		lxr = new t039labelsLexer(input);
+	else
+		lxr->setCharStream(input);
+
+	std::cout << "testValid: \"" << data << '"' <<std::endl;
+
+	t039labelsLexerTraits::TokenStreamType *tstream = new t039labelsLexerTraits::TokenStreamType(ANTLR_SIZE_HINT, lxr->get_tokSource());
+	t039labelsParser *psr = new t039labelsParser(tstream);
+	t039labelsParser::TokenList r = psr->a();	
+	
+	for(unsigned i = 0; i < r.tokens.size() ; i++)
+	{
+		t039labelsLexerTraits::CommonTokenType *token = r.tokens.at(i);
+
+		size_t startIndex = ((const char*)token->get_startIndex()) - data.c_str();
+		size_t stopIndex = ((const char*)token->get_stopIndex()) - data.c_str();
+
+		std::cout << token->getText()
+			  << '\t' << (token->getText()       == ExpectedTokens[i].text ?  "OK" : "Fail")
+			  << std::endl;
+		
+	}
+	delete lxr; lxr = NULL;
+	delete input; 
+	return 0;
+}
+
+/*
+    def testValid1(self):
+        cStream = antlr3.StringStream(
+            'a, b, c, 1, 2 A FOOBAR GNU1 A BLARZ'
+            )
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        ids, w = parser.a()
+
+        self.assertEqual(len(ids), 6, ids)
+        self.assertEqual(ids[0].text, 'a', ids[0])
+        self.assertEqual(ids[1].text, 'b', ids[1])
+        self.assertEqual(ids[2].text, 'c', ids[2])
+        self.assertEqual(ids[3].text, '1', ids[3])
+        self.assertEqual(ids[4].text, '2', ids[4])
+        self.assertEqual(ids[5].text, 'A', ids[5])
+
+        self.assertEqual(w.text, 'GNU1', w)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
+
+*/
diff --git a/runtime/Cpp/tests/t039labels.g b/runtime/Cpp/tests/t039labels.g
new file mode 100644
index 0000000..dc24366
--- /dev/null
+++ b/runtime/Cpp/tests/t039labels.g
@@ -0,0 +1,43 @@
+grammar t039labels;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+#include <iostream>
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+#include "t039labelsLexer.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+@parser::members {
+	class TokenList {
+	public:
+        TokenList() : token() {}
+        TokenList(TokenList const& other) : tokens(other.tokens), token(other.token) {}
+        TokenList(ImplTraits::TokenPtrsListType const& lst, ImplTraits::CommonTokenType *t) : tokens(lst), token(t) {}
+	//private:
+        ImplTraits::TokenPtrsListType tokens;
+        ImplTraits::CommonTokenType* token;
+    };
+}
+a returns [t039labelsParser::TokenList retval]
+    : ids+=A ( ',' ids+=(A|B) )* C D w=. ids+=. F EOF
+        { retval = t039labelsParser::TokenList($ids, $w); }
+    ;
+
+A: 'a'..'z';
+B: '0'..'9';
+C: a='A'         { std::cout << $a << std::endl; };
+D: a='FOOBAR'    { std::cout << $a << std::endl; };
+E: 'GNU' a=.     { std::cout << $a << std::endl; };
+F: 'BLARZ' a=EOF { std::cout << $a << std::endl; };
+
+WS: ' '+  { $channel = HIDDEN; };
diff --git a/runtime/Cpp/tests/t040bug80.g b/runtime/Cpp/tests/t040bug80.g
new file mode 100644
index 0000000..3aa55d0
--- /dev/null
+++ b/runtime/Cpp/tests/t040bug80.g
@@ -0,0 +1,20 @@
+lexer grammar t040bug80; 
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+ID_LIKE
+    : 'defined' 
+    | {False}? Identifier 
+    | Identifier 
+    ; 
+ 
+fragment 
+Identifier: 'a'..'z'+ ; // with just 'a', output compiles 
diff --git a/runtime/Cpp/tests/t041parameters.g b/runtime/Cpp/tests/t041parameters.g
new file mode 100644
index 0000000..47b64b7
--- /dev/null
+++ b/runtime/Cpp/tests/t041parameters.g
@@ -0,0 +1,29 @@
+grammar t041parameters;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+a[arg1, arg2] returns [l]
+    : A+ EOF
+        { 
+            l = ($arg1, $arg2) 
+            $arg1 = "gnarz"
+        }
+    ;
+
+A: 'a'..'z';
+
+WS: ' '+  { $channel = HIDDEN };
diff --git a/runtime/Cpp/tests/t042ast.g b/runtime/Cpp/tests/t042ast.g
new file mode 100644
index 0000000..e2d4fc1
--- /dev/null
+++ b/runtime/Cpp/tests/t042ast.g
@@ -0,0 +1,366 @@
+grammar t042ast;
+options {
+    language =Cpp;
+    output = AST;
+}
+
+tokens {
+    VARDEF;
+    FLOAT;
+    EXPR;
+    BLOCK;
+    VARIABLE;
+    FIELD;
+    CALL;
+    INDEX;
+    FIELDACCESS;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+@init {
+self.flag = False
+}
+
+r1
+    : INT ('+'^ INT)*
+    ;
+
+r2
+    : 'assert'^ x=expression (':'! y=expression)? ';'!
+    ;
+
+r3
+    : 'if'^ expression s1=statement ('else'! s2=statement)?
+    ;
+
+r4
+    : 'while'^ expression statement
+    ;
+
+r5
+    : 'return'^ expression? ';'!
+    ;
+
+r6
+    : (INT|ID)+
+    ;
+
+r7
+    : INT -> 
+    ;
+
+r8
+    : 'var' ID ':' type -> ^('var' type ID) 
+    ;
+
+r9
+    : type ID ';' -> ^(VARDEF type ID) 
+    ;
+
+r10
+    : INT -> {CommonTree(CommonToken(type=FLOAT, text=$INT.text + ".0"))}
+    ;
+
+r11
+    : expression -> ^(EXPR expression)
+    | -> EXPR
+    ;
+
+r12
+    : ID (',' ID)* -> ID+
+    ;
+
+r13
+    : type ID (',' ID)* ';' -> ^(type ID+)
+    ;
+
+r14
+    :   expression? statement* type+
+        -> ^(EXPR expression? statement* type+)
+    ;
+
+r15
+    : INT -> INT INT
+    ;
+
+r16
+    : 'int' ID (',' ID)* -> ^('int' ID)+
+    ;
+
+r17
+    : 'for' '(' start=statement ';' expression ';' next=statement ')' statement
+        -> ^('for' $start expression $next statement)
+    ;
+
+r18
+    : t='for' -> ^(BLOCK)
+    ;
+
+r19
+    : t='for' -> ^(BLOCK[$t])
+    ;
+
+r20
+    : t='for' -> ^(BLOCK[$t,"FOR"])
+    ;
+
+r21
+    : t='for' -> BLOCK
+    ;
+
+r22
+    : t='for' -> BLOCK[$t]
+    ;
+
+r23
+    : t='for' -> BLOCK[$t,"FOR"]
+    ;
+
+r24
+    : r=statement expression -> ^($r expression)
+    ;
+
+r25
+    : r+=statement (',' r+=statement)+ expression -> ^($r expression)
+    ;
+
+r26
+    : r+=statement (',' r+=statement)+ -> ^(BLOCK $r+)
+    ;
+
+r27
+    : r=statement expression -> ^($r ^($r expression))
+    ;
+
+r28
+    : ('foo28a'|'foo28b') ->
+    ;
+
+r29
+    : (r+=statement)* -> ^(BLOCK $r+)
+    ;
+
+r30
+    : statement* -> ^(BLOCK statement?)
+    ;
+
+r31
+    : modifier type ID ('=' expression)? ';'
+        -> {self.flag == 0}? ^(VARDEF ID modifier* type expression?)
+        -> {self.flag == 1}? ^(VARIABLE ID modifier* type expression?)
+        ->                   ^(FIELD ID modifier* type expression?)
+    ;
+
+r32[which]
+  : ID INT -> {which==1}? ID
+           -> {which==2}? INT
+           -> // yield nothing as else-clause
+  ;
+
+r33
+    :   modifiers! statement
+    ;
+
+r34
+    :   modifiers! r34a[$modifiers.tree]
+    //|   modifiers! r33b[$modifiers.tree]
+    ;
+
+r34a[mod]
+    :   'class' ID ('extends' sup=type)?
+        ( 'implements' i+=type (',' i+=type)*)?
+        '{' statement* '}'
+        -> ^('class' ID {$mod} ^('extends' $sup)? ^('implements' $i+)? statement* )
+    ;
+
+r35
+    : '{' 'extends' (sup=type)? '}'
+        ->  ^('extends' $sup)?
+    ;
+
+r36
+    : 'if' '(' expression ')' s1=statement
+        ( 'else' s2=statement -> ^('if' ^(EXPR expression) $s1 $s2)
+        |                     -> ^('if' ^(EXPR expression) $s1)
+        )
+    ;
+
+r37
+    : (INT -> INT) ('+' i=INT -> ^('+' $r37 $i) )* 
+    ;
+
+r38
+    : INT ('+'^ INT)*
+    ;
+
+r39
+    : (primary->primary) // set return tree to just primary
+        ( '(' arg=expression ')'
+            -> ^(CALL $r39 $arg)
+        | '[' ie=expression ']'
+            -> ^(INDEX $r39 $ie)
+        | '.' p=primary
+            -> ^(FIELDACCESS $r39 $p)
+        )*
+    ;
+
+r40
+    : (INT -> INT) ( ('+' i+=INT)* -> ^('+' $r40 $i*) ) ';'
+    ;
+
+r41
+    : (INT -> INT) ( ('+' i=INT) -> ^($i $r41) )* ';'
+    ;
+
+r42
+    : ids+=ID (','! ids+=ID)*
+    ;
+
+r43 returns [res]
+    : ids+=ID! (','! ids+=ID!)* {$res = [id.text for id in $ids]}
+    ;
+
+r44
+    : ids+=ID^ (','! ids+=ID^)*
+    ;
+
+r45
+    : primary^
+    ;
+
+r46 returns [res]
+    : ids+=primary! (','! ids+=primary!)* {$res = [id.text for id in $ids]}
+    ;
+
+r47
+    : ids+=primary (','! ids+=primary)*
+    ;
+
+r48
+    : ids+=. (','! ids+=.)*
+    ;
+
+r49
+    : .^ ID
+    ;
+
+r50
+    : ID 
+        -> ^({CommonTree(CommonToken(type=FLOAT, text="1.0"))} ID)
+    ;
+
+/** templates tested:
+    tokenLabelPropertyRef_tree
+*/
+r51 returns [res]
+    : ID t=ID ID
+        { $res = $t.tree }
+    ;
+
+/** templates tested:
+    rulePropertyRef_tree
+*/
+r52 returns [res]
+@after {
+    $res = $tree
+}
+    : ID
+    ;
+
+/** templates tested:
+    ruleLabelPropertyRef_tree
+*/
+r53 returns [res]
+    : t=primary
+        { $res = $t.tree }
+    ;
+
+/** templates tested:
+    ruleSetPropertyRef_tree
+*/
+r54 returns [res]
+@after {
+    $tree = $t.tree;
+}
+    : ID t=expression ID
+    ;
+
+/** backtracking */
+r55
+options { backtrack=true; k=1; }
+    : (modifier+ INT)=> modifier+ expression
+    | modifier+ statement
+    ;
+
+
+/** templates tested:
+    rewriteTokenRef with len(args)>0
+*/
+r56
+    : t=ID* -> ID[$t,'foo']
+    ;
+
+/** templates tested:
+    rewriteTokenRefRoot with len(args)>0
+*/
+r57
+    : t=ID* -> ^(ID[$t,'foo'])
+    ;
+
+/** templates tested:
+    ???
+*/
+r58
+    : ({CommonTree(CommonToken(type=FLOAT, text="2.0"))})^
+    ;
+
+/** templates tested:
+    rewriteTokenListLabelRefRoot
+*/
+r59
+    : (t+=ID)+ statement -> ^($t statement)+
+    ;
+
+primary
+    : ID
+    ;
+
+expression
+    : r1
+    ;
+
+statement
+    : 'fooze'
+    | 'fooze2'
+    ;
+
+modifiers
+    : modifier+
+    ;
+
+modifier
+    : 'public'
+    | 'private'
+    ;
+
+type
+    : 'int'
+    | 'bool'
+    ;
+
+ID : 'a'..'z' + ;
+INT : '0'..'9' +;
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;};
+
diff --git a/runtime/Cpp/tests/t043synpred.g b/runtime/Cpp/tests/t043synpred.g
new file mode 100644
index 0000000..b54cef8
--- /dev/null
+++ b/runtime/Cpp/tests/t043synpred.g
@@ -0,0 +1,27 @@
+grammar t043synpred;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+a: ((s+ P)=> s+ b)? E;
+b: P 'foo';
+
+s: S;
+
+
+S: ' ';
+P: '+';
+E: '>';
diff --git a/runtime/Cpp/tests/t044trace.g b/runtime/Cpp/tests/t044trace.g
new file mode 100644
index 0000000..002aa2f
--- /dev/null
+++ b/runtime/Cpp/tests/t044trace.g
@@ -0,0 +1,33 @@
+grammar t044trace;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+@init {
+    self._stack = None
+}
+
+a: '<' ((INT '+')=>b|c) '>';
+b: c ('+' c)*;
+c: INT 
+    {
+        if self._stack is None:
+            self._stack = self.getRuleInvocationStack()
+    }
+    ;
+
+INT: ('0'..'9')+;
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;};
diff --git a/runtime/Cpp/tests/t045dfabug.g b/runtime/Cpp/tests/t045dfabug.g
new file mode 100644
index 0000000..101c582
--- /dev/null
+++ b/runtime/Cpp/tests/t045dfabug.g
@@ -0,0 +1,44 @@
+grammar t045dfabug;
+options {
+    language =Cpp;
+    output = AST;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+// this rule used to generate an infinite loop in DFA.predict
+r
+options { backtrack=true; }
+    : (modifier+ INT)=> modifier+ expression
+    | modifier+ statement
+    ;
+
+expression
+    : INT '+' INT
+    ;
+
+statement
+    : 'fooze'
+    | 'fooze2'
+    ;
+
+modifier
+    : 'public'
+    | 'private'
+    ;
+
+ID : 'a'..'z' + ;
+INT : '0'..'9' +;
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;};
+
diff --git a/runtime/Cpp/tests/t046rewrite.g b/runtime/Cpp/tests/t046rewrite.g
new file mode 100644
index 0000000..60d8a41
--- /dev/null
+++ b/runtime/Cpp/tests/t046rewrite.g
@@ -0,0 +1,67 @@
+grammar t046rewrite;
+options {
+    language=Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+program
+@init {
+    start = self.input.LT(1)
+}
+    :   method+
+        {
+        self.input.insertBefore(start,"public class Wrapper {\n")
+        self.input.insertAfter($method.stop, "\n}\n")
+        }
+    ;
+
+method
+    :   m='method' ID '(' ')' body
+        {self.input.replace($m, "public void");}
+    ; 
+
+body
+scope {
+    decls
+}
+@init {
+    $body::decls = set()
+}
+    :   lcurly='{' stat* '}'
+        {
+        for it in $body::decls:
+            self.input.insertAfter($lcurly, "\nint "+it+";")
+        }
+    ;
+
+stat:   ID '=' expr ';' {$body::decls.add($ID.text);}
+    ;
+
+expr:   mul ('+' mul)* 
+    ;
+
+mul :   atom ('*' atom)*
+    ;
+
+atom:   ID
+    |   INT
+    ;
+
+ID  :   ('a'..'z'|'A'..'Z')+ ;
+
+INT :   ('0'..'9')+ ;
+
+WS  :   (' '|'\t'|'\n')+ {$channel=HIDDEN;}
+    ;
diff --git a/runtime/Cpp/tests/t047treeparser.g b/runtime/Cpp/tests/t047treeparser.g
new file mode 100644
index 0000000..375d84f
--- /dev/null
+++ b/runtime/Cpp/tests/t047treeparser.g
@@ -0,0 +1,126 @@
+grammar t047treeparser;
+options {
+    language=Cpp;
+    output=AST;
+}
+
+tokens {
+    VAR_DEF;
+    ARG_DEF;
+    FUNC_HDR;
+    FUNC_DECL;
+    FUNC_DEF;
+    BLOCK;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+
+program
+    :   declaration+
+    ;
+
+declaration
+    :   variable
+    |   functionHeader ';' -> ^(FUNC_DECL functionHeader)
+    |   functionHeader block -> ^(FUNC_DEF functionHeader block)
+    ;
+
+variable
+    :   type declarator ';' -> ^(VAR_DEF type declarator)
+    ;
+
+declarator
+    :   ID 
+    ;
+
+functionHeader
+    :   type ID '(' ( formalParameter ( ',' formalParameter )* )? ')'
+        -> ^(FUNC_HDR type ID formalParameter+)
+    ;
+
+formalParameter
+    :   type declarator -> ^(ARG_DEF type declarator)
+    ;
+
+type
+    :   'int'   
+    |   'char'  
+    |   'void'
+    |   ID        
+    ;
+
+block
+    :   lc='{'
+            variable*
+            stat*
+        '}'
+        -> ^(BLOCK[$lc,"BLOCK"] variable* stat*)
+    ;
+
+stat: forStat
+    | expr ';'!
+    | block
+    | assignStat ';'!
+    | ';'!
+    ;
+
+forStat
+    :   'for' '(' start=assignStat ';' expr ';' next=assignStat ')' block
+        -> ^('for' $start expr $next block)
+    ;
+
+assignStat
+    :   ID EQ expr -> ^(EQ ID expr)
+    ;
+
+expr:   condExpr
+    ;
+
+condExpr
+    :   aexpr ( ('=='^ | '<'^) aexpr )?
+    ;
+
+aexpr
+    :   atom ( '+'^ atom )*
+    ;
+
+atom
+    : ID      
+    | INT      
+    | '(' expr ')' -> expr
+    ; 
+
+FOR : 'for' ;
+INT_TYPE : 'int' ;
+CHAR: 'char';
+VOID: 'void';
+
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+    ;
+
+INT :	('0'..'9')+
+    ;
+
+EQ   : '=' ;
+EQEQ : '==' ;
+LT   : '<' ;
+PLUS : '+' ;
+
+WS  :   (   ' '
+        |   '\t'
+        |   '\r'
+        |   '\n'
+        )+
+        { $channel=HIDDEN }
+    ;    
diff --git a/runtime/Cpp/tests/t047treeparserWalker.g b/runtime/Cpp/tests/t047treeparserWalker.g
new file mode 100644
index 0000000..1d4d622
--- /dev/null
+++ b/runtime/Cpp/tests/t047treeparserWalker.g
@@ -0,0 +1,73 @@
+tree grammar t047treeparserWalker;
+options {
+    language=Cpp;
+    tokenVocab=t047treeparser;
+    ASTLabelType=CommonTree;
+}
+
+@includes {
+#include "UserTestTraits.hpp"
+}
+@namespace
+{ Antlr3Test }
+
+program
+    :   declaration+
+    ;
+
+declaration
+    :   variable
+    |   ^(FUNC_DECL functionHeader)
+    |   ^(FUNC_DEF functionHeader block)
+    ;
+
+variable returns [res]
+    :   ^(VAR_DEF type declarator)
+        { 
+            $res = $declarator.text; 
+        }
+    ;
+
+declarator
+    :   ID 
+    ;
+
+functionHeader
+    :   ^(FUNC_HDR type ID formalParameter+)
+    ;
+
+formalParameter
+    :   ^(ARG_DEF type declarator)
+    ;
+
+type
+    :   'int'
+    |   'char'
+    |   'void'
+    |   ID        
+    ;
+
+block
+    :   ^(BLOCK variable* stat*)
+    ;
+
+stat: forStat
+    | expr
+    | block
+    ;
+
+forStat
+    :   ^('for' expr expr expr block)
+    ;
+
+expr:   ^(EQEQ expr expr)
+    |   ^(LT expr expr)
+    |   ^(PLUS expr expr)
+    |   ^(EQ ID expr)
+    |   atom
+    ;
+
+atom
+    : ID      
+    | INT      
+    ; 
diff --git a/runtime/Cpp/tests/t048rewrite.g b/runtime/Cpp/tests/t048rewrite.g
new file mode 100644
index 0000000..2cd40fe
--- /dev/null
+++ b/runtime/Cpp/tests/t048rewrite.g
@@ -0,0 +1,16 @@
+lexer grammar t048rewrite;
+options {
+    language=Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+A: 'a';
+B: 'b';
+C: 'c';
+
diff --git a/runtime/Cpp/tests/t048rewrite2.g b/runtime/Cpp/tests/t048rewrite2.g
new file mode 100644
index 0000000..c7bb2dd
--- /dev/null
+++ b/runtime/Cpp/tests/t048rewrite2.g
@@ -0,0 +1,19 @@
+lexer grammar t048rewrite2;
+options {
+    language=Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+ID : 'a'..'z'+;
+INT : '0'..'9'+;
+SEMI : ';';
+PLUS : '+';
+MUL : '*';
+ASSIGN : '=';
+WS : ' '+;
diff --git a/runtime/Cpp/tests/t050decorate.g b/runtime/Cpp/tests/t050decorate.g
new file mode 100644
index 0000000..2f6ea71
--- /dev/null
+++ b/runtime/Cpp/tests/t050decorate.g
@@ -0,0 +1,42 @@
+grammar t050decorate;
+options {
+  language =Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+@parser::includes {
+#include "UserTestTraits.hpp"
+}
+@parser::namespace
+{ Antlr3Test }
+        
+@header {
+    def logme(func):
+        def decorated(self, *args, **kwargs):
+            self.events.append('before')
+            try:
+                return func(self, *args, **kwargs)
+            finally:
+                self.events.append('after')
+
+        return decorated
+}
+
+@parser::init {
+self.events = []
+}
+
+document
+@decorate {
+    @logme
+}
+    : IDENTIFIER
+    ;
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
diff --git a/runtime/Cpp/tests/t051.cpp b/runtime/Cpp/tests/t051.cpp
new file mode 100644
index 0000000..c89e3a5
--- /dev/null
+++ b/runtime/Cpp/tests/t051.cpp
@@ -0,0 +1,98 @@
+#include "UserTestTraits.hpp"
+#include "t051lexer.hpp"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <sstream>
+#include <fstream>
+
+using namespace Antlr3Test;
+using namespace std;
+
+static t051lexer* lxr;
+
+static string slurp(string const& fileName);
+static void parseFile(const char* fName);
+
+int main (int argc, char *argv[])
+{
+	if (argc < 2 || argv[1] == NULL)
+	{
+		parseFile("./t051.input"); // Note in VS2005 debug, working directory must be configured
+	}
+	else
+	{
+		for (int i = 1; i < argc; i++)
+		{
+			parseFile(argv[i]);
+		}
+	}
+
+	printf("finished parsing OK\n");	// Finnish parking is pretty good - I think it is all the snow
+
+	return 0;
+}
+
+void parseFile(const char* fName)
+{
+	t051lexerTraits::InputStreamType* input;
+	t051lexerTraits::TokenStreamType* tstream;
+	
+	string data = slurp(fName);
+
+	input	= new t051lexerTraits::InputStreamType((const ANTLR_UINT8 *)data.c_str(),
+						       ANTLR_ENC_8BIT,
+						       data.length(), //strlen(data.c_str()),
+						       (ANTLR_UINT8*)fName);
+
+	input->setUcaseLA(true);
+
+	// Our input stream is now open and all set to go, so we can create a new instance of our
+	// lexer and set the lexer input to our input stream:
+	//  (file | memory | ?) --> inputstream -> lexer --> tokenstream --> parser ( --> treeparser )?
+	//
+	if (lxr == NULL)
+	{
+		lxr = new t051lexer(input);	    // javaLexerNew is generated by ANTLR
+	}
+	else
+	{
+		lxr->setCharStream(input);
+	}
+
+	tstream = new t051lexerTraits::TokenStreamType(ANTLR_SIZE_HINT, lxr->get_tokSource());
+
+	putc('L', stdout); fflush(stdout);
+	{
+		ANTLR_INT32 T = 0;
+		while	(T != t051lexer::EOF_TOKEN)
+		{
+			T = tstream->_LA(1);
+			t051lexerTraits::CommonTokenType const* token = tstream->_LT(1);
+			  
+			printf("%d\t\"%s\"\n",
+			       T,
+			       tstream->_LT(1)->getText().c_str()
+				);
+			tstream->consume();
+		}
+	}
+
+	tstream->_LT(1);	// Don't do this mormally, just causes lexer to run for timings here
+
+	delete tstream; 
+	delete lxr; lxr = NULL;
+	delete input; 
+}
+
+string slurp(string const& fileName)
+{
+	ifstream ifs(fileName.c_str(), ios::in | ios::binary | ios::ate);
+	ifstream::pos_type fileSize = ifs.tellg();
+	ifs.seekg(0, ios::beg);
+
+	stringstream sstr;
+	sstr << ifs.rdbuf();
+	return sstr.str();
+}
diff --git a/runtime/Cpp/tests/t051.input b/runtime/Cpp/tests/t051.input
new file mode 100644
index 0000000..7f1dfb7
--- /dev/null
+++ b/runtime/Cpp/tests/t051.input
@@ -0,0 +1 @@
+9.99
diff --git a/runtime/Cpp/tests/t051lexer.g b/runtime/Cpp/tests/t051lexer.g
new file mode 100644
index 0000000..f5790b7
--- /dev/null
+++ b/runtime/Cpp/tests/t051lexer.g
@@ -0,0 +1,28 @@
+// Lexer grammar using synpreds
+lexer grammar t051lexer;
+
+options {
+	language=Cpp;
+}
+
+@lexer::includes
+{
+#include "UserTestTraits.hpp"
+}
+@lexer::namespace
+{ Antlr3Test }
+
+fragment
+POINT
+	:	'.'
+	;
+NUMBER
+	: (	( NUM POINT NUM ) => NUM POINT NUM
+		|	POINT NUM
+		|	NUM
+		)
+    ;
+fragment
+NUM
+	: '0' .. '9' ( '0' .. '9' )*
+	;
diff --git a/antlr-3.4/runtime/Delphi/LICENSE.TXT b/runtime/Delphi/LICENSE.TXT
similarity index 100%
rename from antlr-3.4/runtime/Delphi/LICENSE.TXT
rename to runtime/Delphi/LICENSE.TXT
diff --git a/antlr-3.4/runtime/Delphi/NOTICE.TXT b/runtime/Delphi/NOTICE.TXT
similarity index 100%
rename from antlr-3.4/runtime/Delphi/NOTICE.TXT
rename to runtime/Delphi/NOTICE.TXT
diff --git a/antlr-3.4/runtime/Delphi/README.TXT b/runtime/Delphi/README.TXT
similarity index 100%
rename from antlr-3.4/runtime/Delphi/README.TXT
rename to runtime/Delphi/README.TXT
diff --git a/antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Collections.Tests.pas b/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Collections.Tests.pas
similarity index 100%
rename from antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Collections.Tests.pas
rename to runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Collections.Tests.pas
diff --git a/antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tests.pas b/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tests.pas
similarity index 100%
rename from antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tests.pas
rename to runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tests.pas
diff --git a/antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tools.Tests.pas b/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tools.Tests.pas
similarity index 100%
rename from antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tools.Tests.pas
rename to runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tools.Tests.pas
diff --git a/antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tree.Tests.pas b/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tree.Tests.pas
similarity index 100%
rename from antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tree.Tests.pas
rename to runtime/Delphi/Sources/Antlr3.Runtime.Tests/Antlr.Runtime.Tree.Tests.pas
diff --git a/antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.dpr b/runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.dpr
similarity index 100%
rename from antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.dpr
rename to runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.dpr
diff --git a/antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.dproj b/runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.dproj
similarity index 100%
rename from antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.dproj
rename to runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.dproj
diff --git a/antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.res b/runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.res
similarity index 100%
rename from antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.res
rename to runtime/Delphi/Sources/Antlr3.Runtime.Tests/TestDriver.res
Binary files differ
diff --git a/antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Collections.pas b/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Collections.pas
similarity index 100%
rename from antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Collections.pas
rename to runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Collections.pas
diff --git a/antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Tools.pas b/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Tools.pas
similarity index 100%
rename from antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Tools.pas
rename to runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Tools.pas
diff --git a/antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Tree.pas b/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Tree.pas
similarity index 100%
rename from antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Tree.pas
rename to runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.Tree.pas
diff --git a/antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.pas b/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.pas
similarity index 100%
rename from antlr-3.4/runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.pas
rename to runtime/Delphi/Sources/Antlr3.Runtime/Antlr.Runtime.pas
diff --git a/antlr-3.4/runtime/Java/antlr.config b/runtime/Java/antlr.config
similarity index 100%
rename from antlr-3.4/runtime/Java/antlr.config
rename to runtime/Java/antlr.config
diff --git a/runtime/Java/doxyfile b/runtime/Java/doxyfile
new file mode 100644
index 0000000..250fec1
--- /dev/null
+++ b/runtime/Java/doxyfile
@@ -0,0 +1,264 @@
+# Doxyfile 1.5.2
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+DOXYFILE_ENCODING      = UTF-8
+PROJECT_NAME           = "ANTLR API"
+PROJECT_NUMBER         = 3.3
+OUTPUT_DIRECTORY       = api
+CREATE_SUBDIRS         = NO
+OUTPUT_LANGUAGE        = English
+BRIEF_MEMBER_DESC      = YES
+REPEAT_BRIEF           = YES
+ABBREVIATE_BRIEF       = "The $name class" \
+                         "The $name widget" \
+                         "The $name file" \
+                         is \
+                         provides \
+                         specifies \
+                         contains \
+                         represents \
+                         a \
+                         an \
+                         the
+ALWAYS_DETAILED_SEC    = YES
+INLINE_INHERITED_MEMB  = NO
+FULL_PATH_NAMES        = YES
+STRIP_FROM_PATH        = /Applications/
+STRIP_FROM_INC_PATH    = 
+SHORT_NAMES            = NO
+JAVADOC_AUTOBRIEF      = NO
+MULTILINE_CPP_IS_BRIEF = NO
+DETAILS_AT_TOP         = NO
+INHERIT_DOCS           = YES
+SEPARATE_MEMBER_PAGES  = NO
+TAB_SIZE               = 8
+ALIASES                = 
+OPTIMIZE_OUTPUT_FOR_C  = NO
+OPTIMIZE_OUTPUT_JAVA   = YES
+BUILTIN_STL_SUPPORT    = NO
+CPP_CLI_SUPPORT        = NO
+DISTRIBUTE_GROUP_DOC   = NO
+SUBGROUPING            = YES
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+EXTRACT_ALL            = YES
+EXTRACT_PRIVATE        = YES
+EXTRACT_STATIC         = YES
+EXTRACT_LOCAL_CLASSES  = YES
+EXTRACT_LOCAL_METHODS  = NO
+HIDE_UNDOC_MEMBERS     = NO
+HIDE_UNDOC_CLASSES     = NO
+HIDE_FRIEND_COMPOUNDS  = NO
+HIDE_IN_BODY_DOCS      = NO
+INTERNAL_DOCS          = NO
+CASE_SENSE_NAMES       = NO
+HIDE_SCOPE_NAMES       = NO
+SHOW_INCLUDE_FILES     = YES
+INLINE_INFO            = YES
+SORT_MEMBER_DOCS       = YES
+SORT_BRIEF_DOCS        = NO
+SORT_BY_SCOPE_NAME     = NO
+GENERATE_TODOLIST      = YES
+GENERATE_TESTLIST      = NO
+GENERATE_BUGLIST       = NO
+GENERATE_DEPRECATEDLIST= NO
+ENABLED_SECTIONS       = 
+MAX_INITIALIZER_LINES  = 30
+SHOW_USED_FILES        = YES
+SHOW_DIRECTORIES       = NO
+FILE_VERSION_FILTER    = 
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+QUIET                  = NO
+WARNINGS               = YES
+WARN_IF_UNDOCUMENTED   = YES
+WARN_IF_DOC_ERROR      = YES
+WARN_NO_PARAMDOC       = NO
+WARN_FORMAT            = "$file:$line: $text"
+WARN_LOGFILE           = 
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+INPUT                  = /Users/parrt/antlr/code/antlr/main/runtime/Java/src
+INPUT_ENCODING         = UTF-8
+FILE_PATTERNS          = *.c \
+                         *.cc \
+                         *.cxx \
+                         *.cpp \
+                         *.c++ \
+                         *.d \
+                         *.java \
+                         *.ii \
+                         *.ixx \
+                         *.ipp \
+                         *.i++ \
+                         *.inl \
+                         *.h \
+                         *.hh \
+                         *.hxx \
+                         *.hpp \
+                         *.h++ \
+                         *.idl \
+                         *.odl \
+                         *.cs \
+                         *.php \
+                         *.php3 \
+                         *.inc \
+                         *.m \
+                         *.mm \
+                         *.dox \
+                         *.py
+RECURSIVE              = YES
+EXCLUDE                = 
+EXCLUDE_SYMLINKS       = NO
+EXCLUDE_PATTERNS       = 
+EXCLUDE_SYMBOLS        = java::util \
+                         java::io
+EXAMPLE_PATH           = 
+EXAMPLE_PATTERNS       = *
+EXAMPLE_RECURSIVE      = NO
+IMAGE_PATH             = 
+INPUT_FILTER           = 
+FILTER_PATTERNS        = 
+FILTER_SOURCE_FILES    = NO
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+SOURCE_BROWSER         = YES
+INLINE_SOURCES         = NO
+STRIP_CODE_COMMENTS    = YES
+REFERENCED_BY_RELATION = NO
+REFERENCES_RELATION    = NO
+REFERENCES_LINK_SOURCE = YES
+USE_HTAGS              = NO
+VERBATIM_HEADERS       = YES
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+ALPHABETICAL_INDEX     = NO
+COLS_IN_ALPHA_INDEX    = 5
+IGNORE_PREFIX          = 
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+GENERATE_HTML          = YES
+HTML_OUTPUT            = .
+HTML_FILE_EXTENSION    = .html
+HTML_HEADER            = 
+HTML_FOOTER            = 
+HTML_STYLESHEET        = 
+HTML_ALIGN_MEMBERS     = YES
+GENERATE_HTMLHELP      = NO
+CHM_FILE               = 
+HHC_LOCATION           = 
+GENERATE_CHI           = NO
+BINARY_TOC             = NO
+TOC_EXPAND             = NO
+DISABLE_INDEX          = NO
+ENUM_VALUES_PER_LINE   = 4
+GENERATE_TREEVIEW      = NO
+TREEVIEW_WIDTH         = 250
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+GENERATE_LATEX         = NO
+LATEX_OUTPUT           = latex
+LATEX_CMD_NAME         = latex
+MAKEINDEX_CMD_NAME     = makeindex
+COMPACT_LATEX          = NO
+PAPER_TYPE             = a4wide
+EXTRA_PACKAGES         = 
+LATEX_HEADER           = 
+PDF_HYPERLINKS         = NO
+USE_PDFLATEX           = YES
+LATEX_BATCHMODE        = NO
+LATEX_HIDE_INDICES     = NO
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+GENERATE_RTF           = NO
+RTF_OUTPUT             = rtf
+COMPACT_RTF            = NO
+RTF_HYPERLINKS         = NO
+RTF_STYLESHEET_FILE    = 
+RTF_EXTENSIONS_FILE    = 
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+GENERATE_MAN           = NO
+MAN_OUTPUT             = man
+MAN_EXTENSION          = .3
+MAN_LINKS              = NO
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+GENERATE_XML           = NO
+XML_OUTPUT             = xml
+XML_SCHEMA             = 
+XML_DTD                = 
+XML_PROGRAMLISTING     = YES
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+GENERATE_AUTOGEN_DEF   = NO
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+GENERATE_PERLMOD       = NO
+PERLMOD_LATEX          = NO
+PERLMOD_PRETTY         = YES
+PERLMOD_MAKEVAR_PREFIX = 
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+ENABLE_PREPROCESSING   = YES
+MACRO_EXPANSION        = NO
+EXPAND_ONLY_PREDEF     = NO
+SEARCH_INCLUDES        = YES
+INCLUDE_PATH           = 
+INCLUDE_FILE_PATTERNS  = 
+PREDEFINED             = 
+EXPAND_AS_DEFINED      = 
+SKIP_FUNCTION_MACROS   = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references   
+#---------------------------------------------------------------------------
+TAGFILES               = 
+GENERATE_TAGFILE       = 
+ALLEXTERNALS           = NO
+EXTERNAL_GROUPS        = YES
+PERL_PATH              = /usr/bin/perl
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool   
+#---------------------------------------------------------------------------
+CLASS_DIAGRAMS         = NO
+MSCGEN_PATH            = /Applications/Doxygen.app/Contents/Resources/
+HIDE_UNDOC_RELATIONS   = YES
+HAVE_DOT               = YES
+CLASS_GRAPH            = YES
+COLLABORATION_GRAPH    = YES
+GROUP_GRAPHS           = YES
+UML_LOOK               = NO
+TEMPLATE_RELATIONS     = NO
+INCLUDE_GRAPH          = YES
+INCLUDED_BY_GRAPH      = YES
+CALL_GRAPH             = NO
+CALLER_GRAPH           = NO
+GRAPHICAL_HIERARCHY    = YES
+DIRECTORY_GRAPH        = YES
+DOT_IMAGE_FORMAT       = png
+DOT_PATH               = /Applications/Doxygen.app/Contents/Resources/
+DOTFILE_DIRS           = 
+DOT_GRAPH_MAX_NODES    = 50
+DOT_TRANSPARENT        = NO
+DOT_MULTI_TARGETS      = NO
+GENERATE_LEGEND        = YES
+DOT_CLEANUP            = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine   
+#---------------------------------------------------------------------------
+SEARCHENGINE           = NO
diff --git a/runtime/Java/pom.xml b/runtime/Java/pom.xml
new file mode 100644
index 0000000..7d655c0
--- /dev/null
+++ b/runtime/Java/pom.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.antlr</groupId>
+  <artifactId>antlr-runtime</artifactId>
+  <packaging>jar</packaging>
+  
+  <!--
+
+    Inherit from the ANTLR master pom, which tells us what
+    version we are and allows us to inherit dependencies
+    and so on.
+
+    -->
+  <parent>
+      <groupId>org.antlr</groupId>
+      <artifactId>antlr-master</artifactId>
+      <version>3.5.2</version>
+      <relativePath>../..</relativePath>
+  </parent>
+  
+  <name>ANTLR 3 Runtime</name>
+
+  <description>A framework for constructing recognizers, compilers, and translators from grammatical descriptions containing Java, C#, C++, or Python actions.</description>
+  <url>http://www.antlr.org</url>
+      <developers>
+        <developer>
+            <name>Terence Parr</name>
+            <organization>USFCA</organization>
+            <organizationUrl>http://www.cs.usfca.edu</organizationUrl>
+            <email>parrt@antlr.org</email>
+            <roles>
+                <role>Project Leader</role>
+                <role>Developer - Java Target</role>
+            </roles>
+            <timezone>PST</timezone>
+        </developer>
+        <developer>
+            <name>Jim Idle</name>
+            <organization>Temporal Wave LLC</organization>
+            <organizationUrl>http://www.temporal-wave.com</organizationUrl>
+            <email>jimi@temporal-wave.com</email>
+            <roles>
+                <role>Developer - Maven stuff</role>
+                <role>Developer - C Target</role>
+            </roles>
+            <timezone>PST</timezone>
+        </developer>
+    </developers>
+
+  <dependencies>
+    
+    <dependency>
+      <groupId>org.antlr</groupId>
+      <artifactId>stringtemplate</artifactId>
+      <scope>compile</scope>
+      <optional>true</optional>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+    </dependency>
+
+  </dependencies>
+
+</project>
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/ANTLRFileStream.java b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRFileStream.java
new file mode 100644
index 0000000..1e2e698
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRFileStream.java
@@ -0,0 +1,79 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import java.io.*;
+
+/** This is a char buffer stream that is loaded from a file
+ *  all at once when you construct the object.  This looks very
+ *  much like an ANTLReader or ANTLRInputStream, but it's a special case
+ *  since we know the exact size of the object to load.  We can avoid lots
+ *  of data copying. 
+ */
+public class ANTLRFileStream extends ANTLRStringStream {
+	protected String fileName;
+
+	public ANTLRFileStream(String fileName) throws IOException {
+		this(fileName, null);
+	}
+
+	public ANTLRFileStream(String fileName, String encoding) throws IOException {
+		this.fileName = fileName;
+		load(fileName, encoding);
+	}
+
+	public void load(String fileName, String encoding)
+		throws IOException
+	{
+		if ( fileName==null ) {
+			return;
+		}
+		File f = new File(fileName);
+		int size = (int)f.length();
+		InputStreamReader isr;
+		FileInputStream fis = new FileInputStream(fileName);
+		if ( encoding!=null ) {
+			isr = new InputStreamReader(fis, encoding);
+		}
+		else {
+			isr = new InputStreamReader(fis);
+		}
+		try {
+			data = new char[size];
+			super.n = isr.read(data);
+		}
+		finally {
+			isr.close();
+		}
+	}
+
+	@Override
+	public String getSourceName() {
+		return fileName;
+	}
+}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ANTLRInputStream.java b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRInputStream.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/ANTLRInputStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/ANTLRInputStream.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/ANTLRReaderStream.java b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRReaderStream.java
new file mode 100644
index 0000000..21881a4
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRReaderStream.java
@@ -0,0 +1,95 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import java.io.*;
+
+/** Vacuum all input from a Reader and then treat it like a StringStream.
+ *  Manage the buffer manually to avoid unnecessary data copying.
+ *
+ *  If you need encoding, use ANTLRInputStream.
+ */
+public class ANTLRReaderStream extends ANTLRStringStream {
+	public static final int READ_BUFFER_SIZE = 1024;
+	public static final int INITIAL_BUFFER_SIZE = 1024;
+
+	public ANTLRReaderStream() {
+	}
+
+	public ANTLRReaderStream(Reader r) throws IOException {
+		this(r, INITIAL_BUFFER_SIZE, READ_BUFFER_SIZE);
+	}
+
+	public ANTLRReaderStream(Reader r, int size) throws IOException {
+		this(r, size, READ_BUFFER_SIZE);
+	}
+
+	public ANTLRReaderStream(Reader r, int size, int readChunkSize) throws IOException {
+		load(r, size, readChunkSize);
+	}
+
+	public void load(Reader r, int size, int readChunkSize)
+		throws IOException
+	{
+		if ( r==null ) {
+			return;
+		}
+		if ( size<=0 ) {
+			size = INITIAL_BUFFER_SIZE;
+		}
+		if ( readChunkSize<=0 ) {
+			readChunkSize = READ_BUFFER_SIZE;
+		}
+		// System.out.println("load "+size+" in chunks of "+readChunkSize);
+		try {
+			// alloc initial buffer size.
+			data = new char[size];
+			// read all the data in chunks of readChunkSize
+			int numRead;
+			int p = 0;
+			do {
+				if ( p+readChunkSize > data.length ) { // overflow?
+					// System.out.println("### overflow p="+p+", data.length="+data.length);
+					char[] newdata = new char[data.length*2]; // resize
+					System.arraycopy(data, 0, newdata, 0, data.length);
+					data = newdata;
+				}
+				numRead = r.read(data, p, readChunkSize);
+				// System.out.println("read "+numRead+" chars; p was "+p+" is now "+(p+numRead));
+				p += numRead;
+			} while (numRead!=-1); // while not EOF
+			// set the actual size of the data available;
+			// EOF subtracted one above in p+=numRead; add one back
+			super.n = p+1;
+			//System.out.println("n="+n);
+		}
+		finally {
+			r.close();
+		}
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/ANTLRStringStream.java b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRStringStream.java
new file mode 100644
index 0000000..194a2fc
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/ANTLRStringStream.java
@@ -0,0 +1,247 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/** A pretty quick CharStream that pulls all data from an array
+ *  directly.  Every method call counts in the lexer.  Java's
+ *  strings aren't very good so I'm avoiding.
+ */
+public class ANTLRStringStream implements CharStream {
+	/** The data being scanned */
+	protected char[] data;
+
+	/** How many characters are actually in the buffer */
+	protected int n;
+
+	/** 0..n-1 index into string of next char */
+	protected int p=0;
+
+	/** line number 1..n within the input */
+	protected int line = 1;
+
+	/** The index of the character relative to the beginning of the line 0..n-1 */
+	protected int charPositionInLine = 0;
+
+	/** tracks how deep mark() calls are nested */
+	protected int markDepth = 0;
+
+	/** A list of CharStreamState objects that tracks the stream state
+	 *  values line, charPositionInLine, and p that can change as you
+	 *  move through the input stream.  Indexed from 1..markDepth.
+     *  A null is kept @ index 0.  Create upon first call to mark().
+	 */
+	protected List<CharStreamState> markers;
+
+	/** Track the last mark() call result value for use in rewind(). */
+	protected int lastMarker;
+
+	/** What is name or source of this char stream? */
+	public String name;
+
+	public ANTLRStringStream() {
+	}
+
+	/** Copy data in string to a local char array */
+	public ANTLRStringStream(String input) {
+		this();
+		this.data = input.toCharArray();
+		this.n = input.length();
+	}
+
+	/** This is the preferred constructor as no data is copied */
+	public ANTLRStringStream(char[] data, int numberOfActualCharsInArray) {
+		this();
+		this.data = data;
+		this.n = numberOfActualCharsInArray;
+	}
+
+	/** Reset the stream so that it's in the same state it was
+	 *  when the object was created *except* the data array is not
+	 *  touched.
+	 */
+	public void reset() {
+		p = 0;
+		line = 1;
+		charPositionInLine = 0;
+		markDepth = 0;
+	}
+
+	@Override
+    public void consume() {
+		//System.out.println("prev p="+p+", c="+(char)data[p]);
+        if ( p < n ) {
+			charPositionInLine++;
+			if ( data[p]=='\n' ) {
+				/*
+				System.out.println("newline char found on line: "+line+
+								   "@ pos="+charPositionInLine);
+				*/
+				line++;
+				charPositionInLine=0;
+			}
+            p++;
+			//System.out.println("p moves to "+p+" (c='"+(char)data[p]+"')");
+        }
+    }
+
+	@Override
+    public int LA(int i) {
+		if ( i==0 ) {
+			return 0; // undefined
+		}
+		if ( i<0 ) {
+			i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
+			if ( (p+i-1) < 0 ) {
+				return CharStream.EOF; // invalid; no char before first char
+			}
+		}
+
+		if ( (p+i-1) >= n ) {
+            //System.out.println("char LA("+i+")=EOF; p="+p);
+            return CharStream.EOF;
+        }
+        //System.out.println("char LA("+i+")="+(char)data[p+i-1]+"; p="+p);
+		//System.out.println("LA("+i+"); p="+p+" n="+n+" data.length="+data.length);
+		return data[p+i-1];
+    }
+
+	@Override
+	public int LT(int i) {
+		return LA(i);
+	}
+
+	/** Return the current input symbol index 0..n where n indicates the
+     *  last symbol has been read.  The index is the index of char to
+	 *  be returned from LA(1).
+     */
+	@Override
+    public int index() {
+        return p;
+    }
+
+	@Override
+	public int size() {
+		return n;
+	}
+
+	@Override
+	public int mark() {
+        if ( markers==null ) {
+            markers = new ArrayList<CharStreamState>();
+            markers.add(null); // depth 0 means no backtracking, leave blank
+        }
+        markDepth++;
+		CharStreamState state;
+		if ( markDepth>=markers.size() ) {
+			state = new CharStreamState();
+			markers.add(state);
+		}
+		else {
+			state = markers.get(markDepth);
+		}
+		state.p = p;
+		state.line = line;
+		state.charPositionInLine = charPositionInLine;
+		lastMarker = markDepth;
+		return markDepth;
+    }
+
+	@Override
+    public void rewind(int m) {
+		CharStreamState state = markers.get(m);
+		// restore stream state
+		seek(state.p);
+		line = state.line;
+		charPositionInLine = state.charPositionInLine;
+		release(m);
+	}
+
+	@Override
+	public void rewind() {
+		rewind(lastMarker);
+	}
+
+	@Override
+	public void release(int marker) {
+		// unwind any other markers made after m and release m
+		markDepth = marker;
+		// release this marker
+		markDepth--;
+	}
+
+	/** consume() ahead until p==index; can't just set p=index as we must
+	 *  update line and charPositionInLine.
+	 */
+	@Override
+	public void seek(int index) {
+		if ( index<=p ) {
+			p = index; // just jump; don't update stream state (line, ...)
+			return;
+		}
+		// seek forward, consume until p hits index
+		while ( p<index ) {
+			consume();
+		}
+	}
+
+	@Override
+	public String substring(int start, int stop) {
+		return new String(data,start,stop-start+1);
+	}
+
+	@Override
+	public int getLine() {
+		return line;
+	}
+
+	@Override
+	public int getCharPositionInLine() {
+		return charPositionInLine;
+	}
+
+	@Override
+	public void setLine(int line) {
+		this.line = line;
+	}
+
+	@Override
+	public void setCharPositionInLine(int pos) {
+		this.charPositionInLine = pos;
+	}
+
+	@Override
+	public String getSourceName() {
+		return name;
+	}
+
+	@Override
+    public String toString() { return new String(data); }
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/BaseRecognizer.java b/runtime/Java/src/main/java/org/antlr/runtime/BaseRecognizer.java
new file mode 100644
index 0000000..bdba790
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/BaseRecognizer.java
@@ -0,0 +1,884 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/** A generic recognizer that can handle recognizers generated from
+ *  lexer, parser, and tree grammars.  This is all the parsing
+ *  support code essentially; most of it is error recovery stuff and
+ *  backtracking.
+ */
+public abstract class BaseRecognizer {
+	public static final int MEMO_RULE_FAILED = -2;
+	public static final int MEMO_RULE_UNKNOWN = -1;
+	public static final int INITIAL_FOLLOW_STACK_SIZE = 100;
+
+	// copies from Token object for convenience in actions
+	public static final int DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL;
+	public static final int HIDDEN = Token.HIDDEN_CHANNEL;
+
+	public static final String NEXT_TOKEN_RULE_NAME = "nextToken";
+
+	/** State of a lexer, parser, or tree parser are collected into a state
+	 *  object so the state can be shared.  This sharing is needed to
+	 *  have one grammar import others and share same error variables
+	 *  and other state variables.  It's a kind of explicit multiple
+	 *  inheritance via delegation of methods and shared state.
+	 */
+	protected RecognizerSharedState state;
+
+	public BaseRecognizer() {
+		state = new RecognizerSharedState();
+	}
+
+	public BaseRecognizer(RecognizerSharedState state) {
+		if ( state==null ) {
+			state = new RecognizerSharedState();
+		}
+		this.state = state;
+	}
+
+	/** reset the parser's state; subclasses must rewinds the input stream */
+	public void reset() {
+		// wack everything related to error recovery
+		if ( state==null ) {
+			return; // no shared state work to do
+		}
+		state._fsp = -1;
+		state.errorRecovery = false;
+		state.lastErrorIndex = -1;
+		state.failed = false;
+		state.syntaxErrors = 0;
+		// wack everything related to backtracking and memoization
+		state.backtracking = 0;
+		for (int i = 0; state.ruleMemo!=null && i < state.ruleMemo.length; i++) { // wipe cache
+			state.ruleMemo[i] = null;
+		}
+	}
+
+
+	/** Match current input symbol against ttype.  Attempt
+	 *  single token insertion or deletion error recovery.  If
+	 *  that fails, throw MismatchedTokenException.
+	 *
+	 *  To turn off single token insertion or deletion error
+	 *  recovery, override recoverFromMismatchedToken() and have it
+     *  throw an exception. See TreeParser.recoverFromMismatchedToken().
+     *  This way any error in a rule will cause an exception and
+     *  immediate exit from rule.  Rule would recover by resynchronizing
+     *  to the set of symbols that can follow rule ref.
+	 */
+	public Object match(IntStream input, int ttype, BitSet follow)
+		throws RecognitionException
+	{
+		//System.out.println("match "+((TokenStream)input).LT(1));
+		Object matchedSymbol = getCurrentInputSymbol(input);
+		if ( input.LA(1)==ttype ) {
+			input.consume();
+			state.errorRecovery = false;
+			state.failed = false;
+			return matchedSymbol;
+		}
+		if ( state.backtracking>0 ) {
+			state.failed = true;
+			return matchedSymbol;
+		}
+		matchedSymbol = recoverFromMismatchedToken(input, ttype, follow);
+		return matchedSymbol;
+	}
+
+	/** Match the wildcard: in a symbol */
+	public void matchAny(IntStream input) {
+		state.errorRecovery = false;
+		state.failed = false;
+		input.consume();
+	}
+
+	public boolean mismatchIsUnwantedToken(IntStream input, int ttype) {
+		return input.LA(2)==ttype;
+	}
+
+	public boolean mismatchIsMissingToken(IntStream input, BitSet follow) {
+		if ( follow==null ) {
+			// we have no information about the follow; we can only consume
+			// a single token and hope for the best
+			return false;
+		}
+		// compute what can follow this grammar element reference
+		if ( follow.member(Token.EOR_TOKEN_TYPE) ) {
+			BitSet viableTokensFollowingThisRule = computeContextSensitiveRuleFOLLOW();
+			follow = follow.or(viableTokensFollowingThisRule);
+            if ( state._fsp>=0 ) { // remove EOR if we're not the start symbol
+                follow.remove(Token.EOR_TOKEN_TYPE);
+            }
+		}
+		// if current token is consistent with what could come after set
+		// then we know we're missing a token; error recovery is free to
+		// "insert" the missing token
+
+		//System.out.println("viable tokens="+follow.toString(getTokenNames()));
+		//System.out.println("LT(1)="+((TokenStream)input).LT(1));
+
+		// BitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
+		// in follow set to indicate that the fall of the start symbol is
+		// in the set (EOF can follow).
+		if ( follow.member(input.LA(1)) || follow.member(Token.EOR_TOKEN_TYPE) ) {
+			//System.out.println("LT(1)=="+((TokenStream)input).LT(1)+" is consistent with what follows; inserting...");
+			return true;
+		}
+		return false;
+	}
+
+	/** Report a recognition problem.
+	 *
+	 *  This method sets errorRecovery to indicate the parser is recovering
+	 *  not parsing.  Once in recovery mode, no errors are generated.
+	 *  To get out of recovery mode, the parser must successfully match
+	 *  a token (after a resync).  So it will go:
+	 *
+	 * 		1. error occurs
+	 * 		2. enter recovery mode, report error
+	 * 		3. consume until token found in resynch set
+	 * 		4. try to resume parsing
+	 * 		5. next match() will reset errorRecovery mode
+	 *
+	 *  If you override, make sure to update syntaxErrors if you care about that.
+	 */
+	public void reportError(RecognitionException e) {
+		// if we've already reported an error and have not matched a token
+		// yet successfully, don't report any errors.
+		if ( state.errorRecovery ) {
+			//System.err.print("[SPURIOUS] ");
+			return;
+		}
+		state.syntaxErrors++; // don't count spurious
+		state.errorRecovery = true;
+
+		displayRecognitionError(this.getTokenNames(), e);
+	}
+
+	public void displayRecognitionError(String[] tokenNames,
+										RecognitionException e)
+	{
+		String hdr = getErrorHeader(e);
+		String msg = getErrorMessage(e, tokenNames);
+		emitErrorMessage(hdr+" "+msg);
+	}
+
+	/** What error message should be generated for the various
+	 *  exception types?
+	 *
+	 *  Not very object-oriented code, but I like having all error message
+	 *  generation within one method rather than spread among all of the
+	 *  exception classes. This also makes it much easier for the exception
+	 *  handling because the exception classes do not have to have pointers back
+	 *  to this object to access utility routines and so on. Also, changing
+	 *  the message for an exception type would be difficult because you
+	 *  would have to subclassing exception, but then somehow get ANTLR
+	 *  to make those kinds of exception objects instead of the default.
+	 *  This looks weird, but trust me--it makes the most sense in terms
+	 *  of flexibility.
+	 *
+	 *  For grammar debugging, you will want to override this to add
+	 *  more information such as the stack frame with
+	 *  getRuleInvocationStack(e, this.getClass().getName()) and,
+	 *  for no viable alts, the decision description and state etc...
+	 *
+	 *  Override this to change the message generated for one or more
+	 *  exception types.
+	 */
+	public String getErrorMessage(RecognitionException e, String[] tokenNames) {
+		String msg = e.getMessage();
+		if ( e instanceof UnwantedTokenException ) {
+			UnwantedTokenException ute = (UnwantedTokenException)e;
+			String tokenName;
+			if ( ute.expecting== Token.EOF ) {
+				tokenName = "EOF";
+			}
+			else {
+				tokenName = tokenNames[ute.expecting];
+			}
+			msg = "extraneous input "+getTokenErrorDisplay(ute.getUnexpectedToken())+
+				" expecting "+tokenName;
+		}
+		else if ( e instanceof MissingTokenException ) {
+			MissingTokenException mte = (MissingTokenException)e;
+			String tokenName;
+			if ( mte.expecting== Token.EOF ) {
+				tokenName = "EOF";
+			}
+			else {
+				tokenName = tokenNames[mte.expecting];
+			}
+			msg = "missing "+tokenName+" at "+getTokenErrorDisplay(e.token);
+		}
+		else if ( e instanceof MismatchedTokenException ) {
+			MismatchedTokenException mte = (MismatchedTokenException)e;
+			String tokenName;
+			if ( mte.expecting== Token.EOF ) {
+				tokenName = "EOF";
+			}
+			else {
+				tokenName = tokenNames[mte.expecting];
+			}
+			msg = "mismatched input "+getTokenErrorDisplay(e.token)+
+				" expecting "+tokenName;
+		}
+		else if ( e instanceof MismatchedTreeNodeException ) {
+			MismatchedTreeNodeException mtne = (MismatchedTreeNodeException)e;
+			String tokenName;
+			if ( mtne.expecting==Token.EOF ) {
+				tokenName = "EOF";
+			}
+			else {
+				tokenName = tokenNames[mtne.expecting];
+			}
+			msg = "mismatched tree node: "+mtne.node+
+				" expecting "+tokenName;
+		}
+		else if ( e instanceof NoViableAltException ) {
+			//NoViableAltException nvae = (NoViableAltException)e;
+			// for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
+			// and "(decision="+nvae.decisionNumber+") and
+			// "state "+nvae.stateNumber
+			msg = "no viable alternative at input "+getTokenErrorDisplay(e.token);
+		}
+		else if ( e instanceof EarlyExitException ) {
+			//EarlyExitException eee = (EarlyExitException)e;
+			// for development, can add "(decision="+eee.decisionNumber+")"
+			msg = "required (...)+ loop did not match anything at input "+
+				getTokenErrorDisplay(e.token);
+		}
+		else if ( e instanceof MismatchedSetException ) {
+			MismatchedSetException mse = (MismatchedSetException)e;
+			msg = "mismatched input "+getTokenErrorDisplay(e.token)+
+				" expecting set "+mse.expecting;
+		}
+		else if ( e instanceof MismatchedNotSetException ) {
+			MismatchedNotSetException mse = (MismatchedNotSetException)e;
+			msg = "mismatched input "+getTokenErrorDisplay(e.token)+
+				" expecting set "+mse.expecting;
+		}
+		else if ( e instanceof FailedPredicateException ) {
+			FailedPredicateException fpe = (FailedPredicateException)e;
+			msg = "rule "+fpe.ruleName+" failed predicate: {"+
+				fpe.predicateText+"}?";
+		}
+		return msg;
+	}
+
+	/** Get number of recognition errors (lexer, parser, tree parser).  Each
+	 *  recognizer tracks its own number.  So parser and lexer each have
+	 *  separate count.  Does not count the spurious errors found between
+	 *  an error and next valid token match
+	 *
+	 *  See also reportError()
+	 */
+	public int getNumberOfSyntaxErrors() {
+		return state.syntaxErrors;
+	}
+
+	/** What is the error header, normally line/character position information? */
+	public String getErrorHeader(RecognitionException e) {
+		if ( getSourceName()!=null )
+			return getSourceName()+" line "+e.line+":"+e.charPositionInLine;
+				
+		return "line "+e.line+":"+e.charPositionInLine;
+	}
+
+	/** How should a token be displayed in an error message? The default
+	 *  is to display just the text, but during development you might
+	 *  want to have a lot of information spit out.  Override in that case
+	 *  to use t.toString() (which, for CommonToken, dumps everything about
+	 *  the token). This is better than forcing you to override a method in
+	 *  your token objects because you don't have to go modify your lexer
+	 *  so that it creates a new Java type.
+	 */
+	public String getTokenErrorDisplay(Token t) {
+		String s = t.getText();
+		if ( s==null ) {
+			if ( t.getType()==Token.EOF ) {
+				s = "<EOF>";
+			}
+			else {
+				s = "<"+t.getType()+">";
+			}
+		}
+		s = s.replaceAll("\n","\\\\n");
+		s = s.replaceAll("\r","\\\\r");
+		s = s.replaceAll("\t","\\\\t");
+		return "'"+s+"'";
+	}
+
+	/** Override this method to change where error messages go */
+	public void emitErrorMessage(String msg) {
+		System.err.println(msg);
+	}
+
+	/** Recover from an error found on the input stream.  This is
+	 *  for NoViableAlt and mismatched symbol exceptions.  If you enable
+	 *  single token insertion and deletion, this will usually not
+	 *  handle mismatched symbol exceptions but there could be a mismatched
+	 *  token that the match() routine could not recover from.
+	 */
+	public void recover(IntStream input, RecognitionException re) {
+		if ( state.lastErrorIndex==input.index() ) {
+			// uh oh, another error at same token index; must be a case
+			// where LT(1) is in the recovery token set so nothing is
+			// consumed; consume a single token so at least to prevent
+			// an infinite loop; this is a failsafe.
+			input.consume();
+		}
+		state.lastErrorIndex = input.index();
+		BitSet followSet = computeErrorRecoverySet();
+		beginResync();
+		consumeUntil(input, followSet);
+		endResync();
+	}
+
+	/** A hook to listen in on the token consumption during error recovery.
+	 *  The DebugParser subclasses this to fire events to the listenter.
+	 */
+	public void beginResync() {
+	}
+
+	public void endResync() {
+	}
+
+	/*  Compute the error recovery set for the current rule.  During
+	 *  rule invocation, the parser pushes the set of tokens that can
+	 *  follow that rule reference on the stack; this amounts to
+	 *  computing FIRST of what follows the rule reference in the
+	 *  enclosing rule. This local follow set only includes tokens
+	 *  from within the rule; i.e., the FIRST computation done by
+	 *  ANTLR stops at the end of a rule.
+	 *
+	 *  EXAMPLE
+	 *
+	 *  When you find a "no viable alt exception", the input is not
+	 *  consistent with any of the alternatives for rule r.  The best
+	 *  thing to do is to consume tokens until you see something that
+	 *  can legally follow a call to r *or* any rule that called r.
+	 *  You don't want the exact set of viable next tokens because the
+	 *  input might just be missing a token--you might consume the
+	 *  rest of the input looking for one of the missing tokens.
+	 *
+	 *  Consider grammar:
+	 *
+	 *  a : '[' b ']'
+	 *    | '(' b ')'
+	 *    ;
+	 *  b : c '^' INT ;
+	 *  c : ID
+	 *    | INT
+	 *    ;
+	 *
+	 *  At each rule invocation, the set of tokens that could follow
+	 *  that rule is pushed on a stack.  Here are the various "local"
+	 *  follow sets:
+	 *
+	 *  FOLLOW(b1_in_a) = FIRST(']') = ']'
+	 *  FOLLOW(b2_in_a) = FIRST(')') = ')'
+	 *  FOLLOW(c_in_b) = FIRST('^') = '^'
+	 *
+	 *  Upon erroneous input "[]", the call chain is
+	 *
+	 *  a -> b -> c
+	 *
+	 *  and, hence, the follow context stack is:
+	 *
+	 *  depth  local follow set     after call to rule
+	 *    0         <EOF>                    a (from main())
+	 *    1          ']'                     b
+	 *    3          '^'                     c
+	 *
+	 *  Notice that ')' is not included, because b would have to have
+	 *  been called from a different context in rule a for ')' to be
+	 *  included.
+	 *
+	 *  For error recovery, we cannot consider FOLLOW(c)
+	 *  (context-sensitive or otherwise).  We need the combined set of
+	 *  all context-sensitive FOLLOW sets--the set of all tokens that
+	 *  could follow any reference in the call chain.  We need to
+	 *  resync to one of those tokens.  Note that FOLLOW(c)='^' and if
+	 *  we resync'd to that token, we'd consume until EOF.  We need to
+	 *  sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+	 *  In this case, for input "[]", LA(1) is in this set so we would
+	 *  not consume anything and after printing an error rule c would
+	 *  return normally.  It would not find the required '^' though.
+	 *  At this point, it gets a mismatched token error and throws an
+	 *  exception (since LA(1) is not in the viable following token
+	 *  set).  The rule exception handler tries to recover, but finds
+	 *  the same recovery set and doesn't consume anything.  Rule b
+	 *  exits normally returning to rule a.  Now it finds the ']' (and
+	 *  with the successful match exits errorRecovery mode).
+	 *
+	 *  So, you cna see that the parser walks up call chain looking
+	 *  for the token that was a member of the recovery set.
+	 *
+	 *  Errors are not generated in errorRecovery mode.
+	 *
+	 *  ANTLR's error recovery mechanism is based upon original ideas:
+	 *
+	 *  "Algorithms + Data Structures = Programs" by Niklaus Wirth
+	 *
+	 *  and
+	 *
+	 *  "A note on error recovery in recursive descent parsers":
+	 *  http://portal.acm.org/citation.cfm?id=947902.947905
+	 *
+	 *  Later, Josef Grosch had some good ideas:
+	 *
+	 *  "Efficient and Comfortable Error Recovery in Recursive Descent
+	 *  Parsers":
+	 *  ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+	 *
+	 *  Like Grosch I implemented local FOLLOW sets that are combined
+	 *  at run-time upon error to avoid overhead during parsing.
+	 */
+	protected BitSet computeErrorRecoverySet() {
+		return combineFollows(false);
+	}
+
+	/** Compute the context-sensitive FOLLOW set for current rule.
+	 *  This is set of token types that can follow a specific rule
+	 *  reference given a specific call chain.  You get the set of
+	 *  viable tokens that can possibly come next (lookahead depth 1)
+	 *  given the current call chain.  Contrast this with the
+	 *  definition of plain FOLLOW for rule r:
+	 *
+	 *   FOLLOW(r)={x | S=&gt;*alpha r beta in G and x in FIRST(beta)}
+	 *
+	 *  where x in T* and alpha, beta in V*; T is set of terminals and
+	 *  V is the set of terminals and nonterminals.  In other words,
+	 *  FOLLOW(r) is the set of all tokens that can possibly follow
+	 *  references to r in *any* sentential form (context).  At
+	 *  runtime, however, we know precisely which context applies as
+	 *  we have the call chain.  We may compute the exact (rather
+	 *  than covering superset) set of following tokens.
+	 *
+	 *  For example, consider grammar:
+	 *
+	 *  stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
+	 *       | "return" expr '.'
+	 *       ;
+	 *  expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
+	 *  atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
+	 *       | '(' expr ')'
+	 *       ;
+	 *
+	 *  The FOLLOW sets are all inclusive whereas context-sensitive
+	 *  FOLLOW sets are precisely what could follow a rule reference.
+	 *  For input input "i=(3);", here is the derivation:
+	 *
+	 *  stat =&gt; ID '=' expr ';'
+	 *       =&gt; ID '=' atom ('+' atom)* ';'
+	 *       =&gt; ID '=' '(' expr ')' ('+' atom)* ';'
+	 *       =&gt; ID '=' '(' atom ')' ('+' atom)* ';'
+	 *       =&gt; ID '=' '(' INT ')' ('+' atom)* ';'
+	 *       =&gt; ID '=' '(' INT ')' ';'
+	 *
+	 *  At the "3" token, you'd have a call chain of
+	 *
+	 *    stat &rarr; expr &rarr; atom &rarr; expr &rarr; atom
+	 *
+	 *  What can follow that specific nested ref to atom?  Exactly ')'
+	 *  as you can see by looking at the derivation of this specific
+	 *  input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
+	 *
+	 *  You want the exact viable token set when recovering from a
+	 *  token mismatch.  Upon token mismatch, if LA(1) is member of
+	 *  the viable next token set, then you know there is most likely
+	 *  a missing token in the input stream.  "Insert" one by just not
+	 *  throwing an exception.
+	 */
+	protected BitSet computeContextSensitiveRuleFOLLOW() {
+		return combineFollows(true);
+	}
+
+	// what is exact? it seems to only add sets from above on stack
+	// if EOR is in set i.  When it sees a set w/o EOR, it stops adding.
+	// Why would we ever want them all?  Maybe no viable alt instead of
+	// mismatched token?
+	protected BitSet combineFollows(boolean exact) {
+		int top = state._fsp;
+		BitSet followSet = new BitSet();
+		for (int i=top; i>=0; i--) {
+			BitSet localFollowSet = state.following[i];
+			/*
+			System.out.println("local follow depth "+i+"="+
+							   localFollowSet.toString(getTokenNames())+")");
+			 */
+			followSet.orInPlace(localFollowSet);
+			if ( exact ) {
+				// can we see end of rule?
+				if ( localFollowSet.member(Token.EOR_TOKEN_TYPE) ) {
+					// Only leave EOR in set if at top (start rule); this lets
+					// us know if have to include follow(start rule); i.e., EOF
+					if ( i>0 ) {
+						followSet.remove(Token.EOR_TOKEN_TYPE);
+					}
+				}
+				else { // can't see end of rule, quit
+					break;
+				}
+			}
+		}
+		return followSet;
+	}
+
+	/** Attempt to recover from a single missing or extra token.
+	 *
+	 *  EXTRA TOKEN
+	 *
+	 *  LA(1) is not what we are looking for.  If LA(2) has the right token,
+	 *  however, then assume LA(1) is some extra spurious token.  Delete it
+	 *  and LA(2) as if we were doing a normal match(), which advances the
+	 *  input.
+	 *
+	 *  MISSING TOKEN
+	 *
+	 *  If current token is consistent with what could come after
+	 *  ttype then it is ok to "insert" the missing token, else throw
+	 *  exception For example, Input "i=(3;" is clearly missing the
+	 *  ')'.  When the parser returns from the nested call to expr, it
+	 *  will have call chain:
+	 *
+	 *    stat &rarr; expr &rarr; atom
+	 *
+	 *  and it will be trying to match the ')' at this point in the
+	 *  derivation:
+	 *
+	 *       =&gt; ID '=' '(' INT ')' ('+' atom)* ';'
+	 *                          ^
+	 *  match() will see that ';' doesn't match ')' and report a
+	 *  mismatched token error.  To recover, it sees that LA(1)==';'
+	 *  is in the set of tokens that can follow the ')' token
+	 *  reference in rule atom.  It can assume that you forgot the ')'.
+	 */
+	protected Object recoverFromMismatchedToken(IntStream input, int ttype, BitSet follow)
+		throws RecognitionException
+	{
+		RecognitionException e = null;
+		// if next token is what we are looking for then "delete" this token
+		if ( mismatchIsUnwantedToken(input, ttype) ) {
+			e = new UnwantedTokenException(ttype, input);
+			/*
+			System.err.println("recoverFromMismatchedToken deleting "+
+							   ((TokenStream)input).LT(1)+
+							   " since "+((TokenStream)input).LT(2)+" is what we want");
+			 */
+			beginResync();
+			input.consume(); // simply delete extra token
+			endResync();
+			reportError(e);  // report after consuming so AW sees the token in the exception
+			// we want to return the token we're actually matching
+			Object matchedSymbol = getCurrentInputSymbol(input);
+			input.consume(); // move past ttype token as if all were ok
+			return matchedSymbol;
+		}
+		// can't recover with single token deletion, try insertion
+		if ( mismatchIsMissingToken(input, follow) ) {
+			Object inserted = getMissingSymbol(input, e, ttype, follow);
+			e = new MissingTokenException(ttype, input, inserted);
+			reportError(e);  // report after inserting so AW sees the token in the exception
+			return inserted;
+		}
+		// even that didn't work; must throw the exception
+		e = new MismatchedTokenException(ttype, input);
+		throw e;
+	}
+
+	/** Not currently used */
+	public Object recoverFromMismatchedSet(IntStream input,
+										   RecognitionException e,
+										   BitSet follow)
+		throws RecognitionException
+	{
+		if ( mismatchIsMissingToken(input, follow) ) {
+			// System.out.println("missing token");
+			reportError(e);
+			// we don't know how to conjure up a token for sets yet
+			return getMissingSymbol(input, e, Token.INVALID_TOKEN_TYPE, follow);
+		}
+		// TODO do single token deletion like above for Token mismatch
+		throw e;
+	}
+
+	/** Match needs to return the current input symbol, which gets put
+	 *  into the label for the associated token ref; e.g., x=ID.  Token
+	 *  and tree parsers need to return different objects. Rather than test
+	 *  for input stream type or change the IntStream interface, I use
+	 *  a simple method to ask the recognizer to tell me what the current
+	 *  input symbol is.
+	 * 
+	 *  This is ignored for lexers.
+	 */
+	protected Object getCurrentInputSymbol(IntStream input) { return null; }
+
+	/** Conjure up a missing token during error recovery.
+	 *
+	 *  The recognizer attempts to recover from single missing
+	 *  symbols. But, actions might refer to that missing symbol.
+	 *  For example, x=ID {f($x);}. The action clearly assumes
+	 *  that there has been an identifier matched previously and that
+	 *  $x points at that token. If that token is missing, but
+	 *  the next token in the stream is what we want we assume that
+	 *  this token is missing and we keep going. Because we
+	 *  have to return some token to replace the missing token,
+	 *  we have to conjure one up. This method gives the user control
+	 *  over the tokens returned for missing tokens. Mostly,
+	 *  you will want to create something special for identifier
+	 *  tokens. For literals such as '{' and ',', the default
+	 *  action in the parser or tree parser works. It simply creates
+	 *  a CommonToken of the appropriate type. The text will be the token.
+	 *  If you change what tokens must be created by the lexer,
+	 *  override this method to create the appropriate tokens.
+	 */
+	protected Object getMissingSymbol(IntStream input,
+									  RecognitionException e,
+									  int expectedTokenType,
+									  BitSet follow)
+	{
+		return null;
+	}
+
+	public void consumeUntil(IntStream input, int tokenType) {
+		//System.out.println("consumeUntil "+tokenType);
+		int ttype = input.LA(1);
+		while (ttype != Token.EOF && ttype != tokenType) {
+			input.consume();
+			ttype = input.LA(1);
+		}
+	}
+
+	/** Consume tokens until one matches the given token set */
+	public void consumeUntil(IntStream input, BitSet set) {
+		//System.out.println("consumeUntil("+set.toString(getTokenNames())+")");
+		int ttype = input.LA(1);
+		while (ttype != Token.EOF && !set.member(ttype) ) {
+			//System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
+			input.consume();
+			ttype = input.LA(1);
+		}
+	}
+
+	/** Push a rule's follow set using our own hardcoded stack */
+	protected void pushFollow(BitSet fset) {
+		if ( (state._fsp +1)>=state.following.length ) {
+			BitSet[] f = new BitSet[state.following.length*2];
+			System.arraycopy(state.following, 0, f, 0, state.following.length);
+			state.following = f;
+		}
+		state.following[++state._fsp] = fset;
+	}
+
+	/** Return List&lt;String&gt; of the rules in your parser instance
+	 *  leading up to a call to this method.  You could override if
+	 *  you want more details such as the file/line info of where
+	 *  in the parser java code a rule is invoked.
+	 *
+	 *  This is very useful for error messages and for context-sensitive
+	 *  error recovery.
+	 */
+	public List<String> getRuleInvocationStack() {
+		String parserClassName = getClass().getName();
+		return getRuleInvocationStack(new Throwable(), parserClassName);
+	}
+
+	/** A more general version of getRuleInvocationStack where you can
+	 *  pass in, for example, a RecognitionException to get it's rule
+	 *  stack trace.  This routine is shared with all recognizers, hence,
+	 *  static.
+	 *
+	 *  TODO: move to a utility class or something; weird having lexer call this
+	 */
+	public static List<String> getRuleInvocationStack(Throwable e,
+											  String recognizerClassName)
+	{
+		List<String> rules = new ArrayList<String>();
+		StackTraceElement[] stack = e.getStackTrace();
+		int i;
+		for (i=stack.length-1; i>=0; i--) {
+			StackTraceElement t = stack[i];
+			if ( t.getClassName().startsWith("org.antlr.runtime.") ) {
+				continue; // skip support code such as this method
+			}
+			if ( t.getMethodName().equals(NEXT_TOKEN_RULE_NAME) ) {
+				continue;
+			}
+			if ( !t.getClassName().equals(recognizerClassName) ) {
+				continue; // must not be part of this parser
+			}
+            rules.add(t.getMethodName());
+		}
+		return rules;
+	}
+
+    public int getBacktrackingLevel() { return state.backtracking; }
+
+    public void setBacktrackingLevel(int n) { state.backtracking = n; }
+
+    /** Return whether or not a backtracking attempt failed. */
+    public boolean failed() { return state.failed; }
+
+	/** Used to print out token names like ID during debugging and
+	 *  error reporting.  The generated parsers implement a method
+	 *  that overrides this to point to their String[] tokenNames.
+	 */
+	public String[] getTokenNames() {
+		return null;
+	}
+
+	/** For debugging and other purposes, might want the grammar name.
+	 *  Have ANTLR generate an implementation for this method.
+	 */
+	public String getGrammarFileName() {
+		return null;
+	}
+
+	public abstract String getSourceName();
+
+	/** A convenience method for use most often with template rewrites.
+	 *  Convert a List&lt;Token&gt; to List&lt;String&gt;
+	 */
+	public List<String> toStrings(List<? extends Token> tokens) {
+		if ( tokens==null ) return null;
+		List<String> strings = new ArrayList<String>(tokens.size());
+		for (int i=0; i<tokens.size(); i++) {
+			strings.add(tokens.get(i).getText());
+		}
+		return strings;
+	}
+
+	/** Given a rule number and a start token index number, return
+	 *  MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
+	 *  start index.  If this rule has parsed input starting from the
+	 *  start index before, then return where the rule stopped parsing.
+	 *  It returns the index of the last token matched by the rule.
+	 *
+	 *  For now we use a hashtable and just the slow Object-based one.
+	 *  Later, we can make a special one for ints and also one that
+	 *  tosses out data after we commit past input position i.
+	 */
+	public int getRuleMemoization(int ruleIndex, int ruleStartIndex) {
+		if ( state.ruleMemo[ruleIndex]==null ) {
+			state.ruleMemo[ruleIndex] = new HashMap<Integer, Integer>();
+		}
+		Integer stopIndexI =
+			state.ruleMemo[ruleIndex].get(ruleStartIndex);
+		if ( stopIndexI==null ) {
+			return MEMO_RULE_UNKNOWN;
+		}
+		return stopIndexI;
+	}
+
+	/** Has this rule already parsed input at the current index in the
+	 *  input stream?  Return the stop token index or MEMO_RULE_UNKNOWN.
+	 *  If we attempted but failed to parse properly before, return
+	 *  MEMO_RULE_FAILED.
+	 *
+	 *  This method has a side-effect: if we have seen this input for
+	 *  this rule and successfully parsed before, then seek ahead to
+	 *  1 past the stop token matched for this rule last time.
+	 */
+	public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+		int stopIndex = getRuleMemoization(ruleIndex, input.index());
+		if ( stopIndex==MEMO_RULE_UNKNOWN ) {
+			return false;
+		}
+		if ( stopIndex==MEMO_RULE_FAILED ) {
+			//System.out.println("rule "+ruleIndex+" will never succeed");
+			state.failed=true;
+		}
+		else {
+			//System.out.println("seen rule "+ruleIndex+" before; skipping ahead to @"+(stopIndex+1)+" failed="+state.failed);
+			input.seek(stopIndex+1); // jump to one past stop token
+		}
+		return true;
+	}
+
+	/** Record whether or not this rule parsed the input at this position
+	 *  successfully.  Use a standard java hashtable for now.
+	 */
+	public void memoize(IntStream input,
+						int ruleIndex,
+						int ruleStartIndex)
+	{
+		int stopTokenIndex = state.failed?MEMO_RULE_FAILED:input.index()-1;
+		if ( state.ruleMemo==null ) {
+			System.err.println("!!!!!!!!! memo array is null for "+ getGrammarFileName());
+		}
+		if ( ruleIndex >= state.ruleMemo.length ) {
+			System.err.println("!!!!!!!!! memo size is "+state.ruleMemo.length+", but rule index is "+ruleIndex);
+		}
+		if ( state.ruleMemo[ruleIndex]!=null ) {
+			state.ruleMemo[ruleIndex].put(ruleStartIndex, stopTokenIndex);
+		}
+	}
+
+	/** return how many rule/input-index pairs there are in total.
+	 *  TODO: this includes synpreds. :(
+	 */
+	public int getRuleMemoizationCacheSize() {
+		int n = 0;
+		for (int i = 0; state.ruleMemo!=null && i < state.ruleMemo.length; i++) {
+			Map<Integer, Integer> ruleMap = state.ruleMemo[i];
+			if ( ruleMap!=null ) {
+				n += ruleMap.size(); // how many input indexes are recorded?
+			}
+		}
+		return n;
+	}
+
+	public void traceIn(String ruleName, int ruleIndex, Object inputSymbol)  {
+		System.out.print("enter "+ruleName+" "+inputSymbol);
+		if ( state.backtracking>0 ) {
+			System.out.print(" backtracking="+state.backtracking);
+		}
+		System.out.println();
+	}
+
+	public void traceOut(String ruleName,
+						 int ruleIndex,
+						 Object inputSymbol)
+	{
+		System.out.print("exit "+ruleName+" "+inputSymbol);
+		if ( state.backtracking>0 ) {
+            System.out.print(" backtracking="+state.backtracking);
+            if ( state.failed ) System.out.print(" failed");
+            else System.out.print(" succeeded");
+        }
+		System.out.println();
+	}
+
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/BitSet.java b/runtime/Java/src/main/java/org/antlr/runtime/BitSet.java
new file mode 100644
index 0000000..5216518
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/BitSet.java
@@ -0,0 +1,328 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import java.util.List;
+
+/**A stripped-down version of org.antlr.misc.BitSet that is just
+ * good enough to handle runtime requirements such as FOLLOW sets
+ * for automatic error recovery.
+ */
+public class BitSet implements Cloneable {
+    protected final static int BITS = 64;    // number of bits / long
+    protected final static int LOG_BITS = 6; // 2^6 == 64
+
+    /* We will often need to do a mod operator (i mod nbits).  Its
+     * turns out that, for powers of two, this mod operation is
+     * same as (i & (nbits-1)).  Since mod is slow, we use a
+     * precomputed mod mask to do the mod instead.
+     */
+    protected final static int MOD_MASK = BITS - 1;
+
+    /** The actual data bits */
+    protected long bits[];
+
+    /** Construct a bitset of size one word (64 bits) */
+    public BitSet() {
+        this(BITS);
+    }
+
+    /** Construction from a static array of longs */
+    public BitSet(long[] bits_) {
+        bits = bits_;
+    }
+
+	/** Construction from a list of integers */
+	public BitSet(List<Integer> items) {
+		this();
+		for (int i = 0; i < items.size(); i++) {
+			Integer v = items.get(i);
+			add(v);
+		}
+	}
+
+    /** Construct a bitset given the size
+     * @param nbits The size of the bitset in bits
+     */
+    public BitSet(int nbits) {
+        bits = new long[((nbits - 1) >> LOG_BITS) + 1];
+    }
+
+	public static BitSet of(int el) {
+		BitSet s = new BitSet(el + 1);
+		s.add(el);
+		return s;
+	}
+
+	public static BitSet of(int a, int b) {
+		BitSet s = new BitSet(Math.max(a,b)+1);
+		s.add(a);
+		s.add(b);
+		return s;
+	}
+
+	public static BitSet of(int a, int b, int c) {
+		BitSet s = new BitSet();
+		s.add(a);
+		s.add(b);
+		s.add(c);
+		return s;
+	}
+
+	public static BitSet of(int a, int b, int c, int d) {
+		BitSet s = new BitSet();
+		s.add(a);
+		s.add(b);
+		s.add(c);
+		s.add(d);
+		return s;
+	}
+
+	/** return this | a in a new set */
+	public BitSet or(BitSet a) {
+		if ( a==null ) {
+			return this;
+		}
+		BitSet s = (BitSet)this.clone();
+		s.orInPlace(a);
+		return s;
+	}
+
+	/** or this element into this set (grow as necessary to accommodate) */
+	public void add(int el) {
+		int n = wordNumber(el);
+		if (n >= bits.length) {
+			growToInclude(el);
+		}
+		bits[n] |= bitMask(el);
+	}
+
+	/**
+	 * Grows the set to a larger number of bits.
+	 * @param bit element that must fit in set
+	 */
+	public void growToInclude(int bit) {
+		int newSize = Math.max(bits.length << 1, numWordsToHold(bit));
+		long newbits[] = new long[newSize];
+		System.arraycopy(bits, 0, newbits, 0, bits.length);
+		bits = newbits;
+	}
+
+	public void orInPlace(BitSet a) {
+		if ( a==null ) {
+			return;
+		}
+		// If this is smaller than a, grow this first
+		if (a.bits.length > bits.length) {
+			setSize(a.bits.length);
+		}
+		int min = Math.min(bits.length, a.bits.length);
+		for (int i = min - 1; i >= 0; i--) {
+			bits[i] |= a.bits[i];
+		}
+	}
+
+	/**
+	 * Sets the size of a set.
+	 * @param nwords how many words the new set should be
+	 */
+	private void setSize(int nwords) {
+		long newbits[] = new long[nwords];
+		int n = Math.min(nwords, bits.length);
+		System.arraycopy(bits, 0, newbits, 0, n);
+		bits = newbits;
+	}
+
+    private final static long bitMask(int bitNumber) {
+        int bitPosition = bitNumber & MOD_MASK; // bitNumber mod BITS
+        return 1L << bitPosition;
+    }
+
+	@Override
+    public Object clone() {
+        BitSet s;
+        try {
+            s = (BitSet)super.clone();
+            s.bits = new long[bits.length];
+            System.arraycopy(bits, 0, s.bits, 0, bits.length);
+        }
+        catch (CloneNotSupportedException e) {
+            throw new InternalError();
+        }
+        return s;
+    }
+
+    public int size() {
+        int deg = 0;
+        for (int i = bits.length - 1; i >= 0; i--) {
+            long word = bits[i];
+            if (word != 0L) {
+                for (int bit = BITS - 1; bit >= 0; bit--) {
+                    if ((word & (1L << bit)) != 0) {
+                        deg++;
+                    }
+                }
+            }
+        }
+        return deg;
+    }
+
+	@Override
+    public boolean equals(Object other) {
+        if ( other == null || !(other instanceof BitSet) ) {
+            return false;
+        }
+
+        BitSet otherSet = (BitSet)other;
+
+        int n = Math.min(this.bits.length, otherSet.bits.length);
+
+        // for any bits in common, compare
+        for (int i=0; i<n; i++) {
+            if (this.bits[i] != otherSet.bits[i]) {
+                return false;
+            }
+        }
+
+        // make sure any extra bits are off
+
+        if (this.bits.length > n) {
+            for (int i = n+1; i<this.bits.length; i++) {
+                if (this.bits[i] != 0) {
+                    return false;
+                }
+            }
+        }
+        else if (otherSet.bits.length > n) {
+            for (int i = n+1; i<otherSet.bits.length; i++) {
+                if (otherSet.bits[i] != 0) {
+                    return false;
+                }
+            }
+        }
+
+        return true;
+    }
+
+    public boolean member(int el) {
+		if ( el<0 ) {
+			return false;
+		}
+        int n = wordNumber(el);
+        if (n >= bits.length) return false;
+        return (bits[n] & bitMask(el)) != 0;
+    }
+
+	// remove this element from this set
+	public void remove(int el) {
+		int n = wordNumber(el);
+		if (n < bits.length) {
+			bits[n] &= ~bitMask(el);
+		}
+	}
+
+    public boolean isNil() {
+        for (int i = bits.length - 1; i >= 0; i--) {
+            if (bits[i] != 0) return false;
+        }
+        return true;
+    }
+
+    private final int numWordsToHold(int el) {
+        return (el >> LOG_BITS) + 1;
+    }
+
+    public int numBits() {
+        return bits.length << LOG_BITS; // num words * bits per word
+    }
+
+    /** return how much space is being used by the bits array not
+     *  how many actually have member bits on.
+     */
+    public int lengthInLongWords() {
+        return bits.length;
+    }
+
+    /**Is this contained within a? */
+    /*
+	public boolean subset(BitSet a) {
+        if (a == null || !(a instanceof BitSet)) return false;
+        return this.and(a).equals(this);
+    }
+	*/
+
+    public int[] toArray() {
+        int[] elems = new int[size()];
+        int en = 0;
+        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+            if (member(i)) {
+                elems[en++] = i;
+            }
+        }
+        return elems;
+    }
+
+    public long[] toPackedArray() {
+        return bits;
+    }
+
+	private final static int wordNumber(int bit) {
+		return bit >> LOG_BITS; // bit / BITS
+	}
+
+	@Override
+	public String toString() {
+		return toString(null);
+	}
+
+	public String toString(String[] tokenNames) {
+		StringBuilder buf = new StringBuilder();
+		String separator = ",";
+		boolean havePrintedAnElement = false;
+		buf.append('{');
+
+		for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+			if (member(i)) {
+				if (i > 0 && havePrintedAnElement ) {
+					buf.append(separator);
+				}
+				if ( tokenNames!=null ) {
+					buf.append(tokenNames[i]);
+				}
+				else {
+					buf.append(i);
+				}
+				havePrintedAnElement = true;
+			}
+		}
+		buf.append('}');
+		return buf.toString();
+	}
+
+
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/BufferedTokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/BufferedTokenStream.java
new file mode 100644
index 0000000..90d218b
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/BufferedTokenStream.java
@@ -0,0 +1,289 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.runtime;
+
+import java.util.List;
+import java.util.ArrayList;
+import java.util.NoSuchElementException;
+
+/** Buffer all input tokens but do on-demand fetching of new tokens from
+ *  lexer. Useful when the parser or lexer has to set context/mode info before
+ *  proper lexing of future tokens. The ST template parser needs this,
+ *  for example, because it has to constantly flip back and forth between
+ *  inside/output templates. E.g., {@code <names:{hi, <it>}>} has to parse names
+ *  as part of an expression but {@code "hi, <it>"} as a nested template.
+ *
+ *  You can't use this stream if you pass whitespace or other off-channel
+ *  tokens to the parser. The stream can't ignore off-channel tokens.
+ *  (UnbufferedTokenStream is the same way.)
+ *
+ *  This is not a subclass of UnbufferedTokenStream because I don't want
+ *  to confuse small moving window of tokens it uses for the full buffer.
+ */
+public class BufferedTokenStream implements TokenStream {
+    protected TokenSource tokenSource;
+
+    /** Record every single token pulled from the source so we can reproduce
+     *  chunks of it later.  The buffer in LookaheadStream overlaps sometimes
+     *  as its moving window moves through the input.  This list captures
+     *  everything so we can access complete input text.
+     */
+    protected List<Token> tokens = new ArrayList<Token>(100);
+
+    /** Track the last mark() call result value for use in rewind(). */
+    protected int lastMarker;
+
+    /** The index into the tokens list of the current token (next token
+     *  to consume).  tokens[p] should be LT(1).  p=-1 indicates need
+     *  to initialize with first token.  The ctor doesn't get a token.
+     *  First call to LT(1) or whatever gets the first token and sets p=0;
+     */
+    protected int p = -1;
+
+	protected int range = -1; // how deep have we gone?
+
+    public BufferedTokenStream() {}
+
+    public BufferedTokenStream(TokenSource tokenSource) {
+        this.tokenSource = tokenSource;
+    }
+
+	@Override
+    public TokenSource getTokenSource() { return tokenSource; }
+
+	@Override
+	public int index() { return p; }
+
+	@Override
+	public int range() { return range; }
+
+	@Override
+    public int mark() {
+        if ( p == -1 ) setup();
+		lastMarker = index();
+		return lastMarker;
+	}
+
+	@Override
+	public void release(int marker) {
+		// no resources to release
+	}
+
+	@Override
+    public void rewind(int marker) {
+        seek(marker);
+    }
+
+	@Override
+    public void rewind() {
+        seek(lastMarker);
+    }
+
+    public void reset() {
+        p = 0;
+        lastMarker = 0;
+    }
+
+	@Override
+    public void seek(int index) { p = index; }
+
+	@Override
+    public int size() { return tokens.size(); }
+
+    /** Move the input pointer to the next incoming token.  The stream
+     *  must become active with LT(1) available.  consume() simply
+     *  moves the input pointer so that LT(1) points at the next
+     *  input symbol. Consume at least one token.
+     *
+     *  Walk past any token not on the channel the parser is listening to.
+     */
+	@Override
+    public void consume() {
+        if ( p == -1 ) setup();
+        p++;
+        sync(p);
+    }
+
+    /** Make sure index i in tokens has a token. */
+    protected void sync(int i) {
+        int n = i - tokens.size() + 1; // how many more elements we need?
+        //System.out.println("sync("+i+") needs "+n);
+        if ( n > 0 ) fetch(n);
+    }
+
+    /** add n elements to buffer */
+    protected void fetch(int n) {
+        for (int i=1; i<=n; i++) {
+            Token t = tokenSource.nextToken();
+            t.setTokenIndex(tokens.size());
+            //System.out.println("adding "+t+" at index "+tokens.size());
+            tokens.add(t);
+            if ( t.getType()==Token.EOF ) break;
+        }
+    }
+
+	@Override
+    public Token get(int i) {
+        if ( i < 0 || i >= tokens.size() ) {
+            throw new NoSuchElementException("token index "+i+" out of range 0.."+(tokens.size()-1));
+        }
+        return tokens.get(i);
+    }
+
+	/** Get all tokens from start..stop inclusively */
+	public List<? extends Token> get(int start, int stop) {
+		if ( start<0 || stop<0 ) return null;
+		if ( p == -1 ) setup();
+		List<Token> subset = new ArrayList<Token>();
+		if ( stop>=tokens.size() ) stop = tokens.size()-1;
+		for (int i = start; i <= stop; i++) {
+			Token t = tokens.get(i);
+			if ( t.getType()==Token.EOF ) break;
+			subset.add(t);
+		}
+		return subset;
+	}
+
+	@Override
+	public int LA(int i) { return LT(i).getType(); }
+
+    protected Token LB(int k) {
+        if ( (p-k)<0 ) return null;
+        return tokens.get(p-k);
+    }
+
+	@Override
+    public Token LT(int k) {
+        if ( p == -1 ) setup();
+        if ( k==0 ) return null;
+        if ( k < 0 ) return LB(-k);
+
+		int i = p + k - 1;
+		sync(i);
+        if ( i >= tokens.size() ) { // return EOF token
+            // EOF must be last token
+            return tokens.get(tokens.size()-1);
+        }
+		if ( i>range ) range = i; 		
+        return tokens.get(i);
+    }
+
+    protected void setup() { sync(0); p = 0; }
+
+    /** Reset this token stream by setting its token source. */
+    public void setTokenSource(TokenSource tokenSource) {
+        this.tokenSource = tokenSource;
+        tokens.clear();
+        p = -1;
+    }
+    
+    public List<? extends Token> getTokens() { return tokens; }
+
+    public List<? extends Token> getTokens(int start, int stop) {
+        return getTokens(start, stop, (BitSet)null);
+    }
+
+    /** Given a start and stop index, return a List of all tokens in
+     *  the token type BitSet.  Return null if no tokens were found.  This
+     *  method looks at both on and off channel tokens.
+     */
+    public List<? extends Token> getTokens(int start, int stop, BitSet types) {
+        if ( p == -1 ) setup();
+        if ( stop>=tokens.size() ) stop=tokens.size()-1;
+        if ( start<0 ) start=0;
+        if ( start>stop ) return null;
+
+        // list = tokens[start:stop]:{Token t, t.getType() in types}
+        List<Token> filteredTokens = new ArrayList<Token>();
+        for (int i=start; i<=stop; i++) {
+            Token t = tokens.get(i);
+            if ( types==null || types.member(t.getType()) ) {
+                filteredTokens.add(t);
+            }
+        }
+        if ( filteredTokens.isEmpty() ) {
+            filteredTokens = null;
+        }
+        return filteredTokens;
+    }
+
+    public List<? extends Token> getTokens(int start, int stop, List<Integer> types) {
+        return getTokens(start,stop,new BitSet(types));
+    }
+
+    public List<? extends Token> getTokens(int start, int stop, int ttype) {
+        return getTokens(start,stop,BitSet.of(ttype));
+    }
+
+	@Override
+    public String getSourceName() {	return tokenSource.getSourceName();	}
+
+    /** Grab *all* tokens from stream and return string */
+	@Override
+    public String toString() {
+        if ( p == -1 ) setup();
+        fill();
+        return toString(0, tokens.size()-1);
+    }
+
+	@Override
+    public String toString(int start, int stop) {
+        if ( start<0 || stop<0 ) return null;
+        if ( p == -1 ) setup();
+        if ( stop>=tokens.size() ) stop = tokens.size()-1;
+        StringBuilder buf = new StringBuilder();
+        for (int i = start; i <= stop; i++) {
+            Token t = tokens.get(i);
+            if ( t.getType()==Token.EOF ) break;
+            buf.append(t.getText());
+        }
+        return buf.toString();
+    }
+
+	@Override
+    public String toString(Token start, Token stop) {
+        if ( start!=null && stop!=null ) {
+            return toString(start.getTokenIndex(), stop.getTokenIndex());
+        }
+        return null;
+    }
+
+    /** Get all tokens from lexer until EOF */
+    public void fill() {
+        if ( p == -1 ) setup();
+        if ( tokens.get(p).getType()==Token.EOF ) return;
+
+        int i = p+1;
+        sync(i);
+        while ( tokens.get(i).getType()!=Token.EOF ) {
+            i++;
+            sync(i);
+        }
+    }
+}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/CharStream.java b/runtime/Java/src/main/java/org/antlr/runtime/CharStream.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/CharStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/CharStream.java
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/CharStreamState.java b/runtime/Java/src/main/java/org/antlr/runtime/CharStreamState.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/CharStreamState.java
rename to runtime/Java/src/main/java/org/antlr/runtime/CharStreamState.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/ClassicToken.java b/runtime/Java/src/main/java/org/antlr/runtime/ClassicToken.java
new file mode 100644
index 0000000..b73734c
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/ClassicToken.java
@@ -0,0 +1,156 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+/** A Token object like we'd use in ANTLR 2.x; has an actual string created
+ *  and associated with this object.  These objects are needed for imaginary
+ *  tree nodes that have payload objects.  We need to create a Token object
+ *  that has a string; the tree node will point at this token.  CommonToken
+ *  has indexes into a char stream and hence cannot be used to introduce
+ *  new strings.
+ */
+public class ClassicToken implements Token {
+	protected String text;
+	protected int type;
+	protected int line;
+	protected int charPositionInLine;
+	protected int channel=DEFAULT_CHANNEL;
+
+	/** What token number is this from 0..n-1 tokens */
+	protected int index;
+
+	public ClassicToken(int type) {
+		this.type = type;
+	}
+
+	public ClassicToken(Token oldToken) {
+		text = oldToken.getText();
+		type = oldToken.getType();
+		line = oldToken.getLine();
+		charPositionInLine = oldToken.getCharPositionInLine();
+		channel = oldToken.getChannel();
+	}
+
+	public ClassicToken(int type, String text) {
+		this.type = type;
+		this.text = text;
+	}
+
+	public ClassicToken(int type, String text, int channel) {
+		this.type = type;
+		this.text = text;
+		this.channel = channel;
+	}
+
+	@Override
+	public int getType() {
+		return type;
+	}
+
+	@Override
+	public void setLine(int line) {
+		this.line = line;
+	}
+
+	@Override
+	public String getText() {
+		return text;
+	}
+
+	@Override
+	public void setText(String text) {
+		this.text = text;
+	}
+
+	@Override
+	public int getLine() {
+		return line;
+	}
+
+	@Override
+	public int getCharPositionInLine() {
+		return charPositionInLine;
+	}
+
+	@Override
+	public void setCharPositionInLine(int charPositionInLine) {
+		this.charPositionInLine = charPositionInLine;
+	}
+
+	@Override
+	public int getChannel() {
+		return channel;
+	}
+
+	@Override
+	public void setChannel(int channel) {
+		this.channel = channel;
+	}
+
+	@Override
+	public void setType(int type) {
+		this.type = type;
+	}
+
+	@Override
+	public int getTokenIndex() {
+		return index;
+	}
+
+	@Override
+	public void setTokenIndex(int index) {
+		this.index = index;
+	}
+
+	@Override
+	public CharStream getInputStream() {
+		return null;
+	}
+
+	@Override
+	public void setInputStream(CharStream input) {
+	}
+	
+	@Override
+	public String toString() {
+		String channelStr = "";
+		if ( channel>0 ) {
+			channelStr=",channel="+channel;
+		}
+		String txt = getText();
+		if ( txt!=null ) {
+			txt = txt.replaceAll("\n","\\\\n");
+			txt = txt.replaceAll("\r","\\\\r");
+			txt = txt.replaceAll("\t","\\\\t");
+		}
+		else {
+			txt = "<no text>";
+		}
+		return "[@"+getTokenIndex()+",'"+txt+"',<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+"]";
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/CommonToken.java b/runtime/Java/src/main/java/org/antlr/runtime/CommonToken.java
new file mode 100644
index 0000000..2647fe8
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/CommonToken.java
@@ -0,0 +1,206 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import java.io.Serializable;
+
+public class CommonToken implements Token, Serializable {
+	protected int type;
+	protected int line;
+	protected int charPositionInLine = -1; // set to invalid position
+	protected int channel=DEFAULT_CHANNEL;
+	protected transient CharStream input;
+
+	/** We need to be able to change the text once in a while.  If
+	 *  this is non-null, then getText should return this.  Note that
+	 *  start/stop are not affected by changing this.
+	  */
+	protected String text;
+
+	/** What token number is this from 0..n-1 tokens; &lt; 0 implies invalid index */
+	protected int index = -1;
+
+	/** The char position into the input buffer where this token starts */
+	protected int start;
+
+	/** The char position into the input buffer where this token stops */
+	protected int stop;
+
+	public CommonToken(int type) {
+		this.type = type;
+	}
+
+	public CommonToken(CharStream input, int type, int channel, int start, int stop) {
+		this.input = input;
+		this.type = type;
+		this.channel = channel;
+		this.start = start;
+		this.stop = stop;
+	}
+
+	public CommonToken(int type, String text) {
+		this.type = type;
+		this.channel = DEFAULT_CHANNEL;
+		this.text = text;
+	}
+
+	public CommonToken(Token oldToken) {
+		text = oldToken.getText();
+		type = oldToken.getType();
+		line = oldToken.getLine();
+		index = oldToken.getTokenIndex();
+		charPositionInLine = oldToken.getCharPositionInLine();
+		channel = oldToken.getChannel();
+        input = oldToken.getInputStream();
+		if ( oldToken instanceof CommonToken ) {
+			start = ((CommonToken)oldToken).start;
+			stop = ((CommonToken)oldToken).stop;
+		}
+	}
+
+	@Override
+	public int getType() {
+		return type;
+	}
+
+	@Override
+	public void setLine(int line) {
+		this.line = line;
+	}
+
+	@Override
+	public String getText() {
+		if ( text!=null ) {
+			return text;
+		}
+		if ( input==null ) {
+			return null;
+		}
+		int n = input.size();
+		if ( start<n && stop<n) {
+			return input.substring(start,stop);
+		}
+		else {
+			return "<EOF>";
+		}
+	}
+
+	/** Override the text for this token.  getText() will return this text
+	 *  rather than pulling from the buffer.  Note that this does not mean
+	 *  that start/stop indexes are not valid.  It means that that input
+	 *  was converted to a new string in the token object.
+	 */
+	@Override
+	public void setText(String text) {
+		this.text = text;
+	}
+
+	@Override
+	public int getLine() {
+		return line;
+	}
+
+	@Override
+	public int getCharPositionInLine() {
+		return charPositionInLine;
+	}
+
+	@Override
+	public void setCharPositionInLine(int charPositionInLine) {
+		this.charPositionInLine = charPositionInLine;
+	}
+
+	@Override
+	public int getChannel() {
+		return channel;
+	}
+
+	@Override
+	public void setChannel(int channel) {
+		this.channel = channel;
+	}
+
+	@Override
+	public void setType(int type) {
+		this.type = type;
+	}
+
+	public int getStartIndex() {
+		return start;
+	}
+
+	public void setStartIndex(int start) {
+		this.start = start;
+	}
+
+	public int getStopIndex() {
+		return stop;
+	}
+
+	public void setStopIndex(int stop) {
+		this.stop = stop;
+	}
+
+	@Override
+	public int getTokenIndex() {
+		return index;
+	}
+
+	@Override
+	public void setTokenIndex(int index) {
+		this.index = index;
+	}
+
+	@Override
+	public CharStream getInputStream() {
+		return input;
+	}
+
+	@Override
+	public void setInputStream(CharStream input) {
+		this.input = input;
+	}
+
+	@Override
+	public String toString() {
+		String channelStr = "";
+		if ( channel>0 ) {
+			channelStr=",channel="+channel;
+		}
+		String txt = getText();
+		if ( txt!=null ) {
+			txt = txt.replaceAll("\n","\\\\n");
+			txt = txt.replaceAll("\r","\\\\r");
+			txt = txt.replaceAll("\t","\\\\t");
+		}
+		else {
+			txt = "<no text>";
+		}
+		return "[@"+getTokenIndex()+","+start+":"+stop+"='"+txt+"',<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+"]";
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/CommonTokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/CommonTokenStream.java
new file mode 100644
index 0000000..99cfece
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/CommonTokenStream.java
@@ -0,0 +1,164 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.runtime;
+
+/** The most common stream of tokens where every token is buffered up
+ *  and tokens are filtered for a certain channel (the parser will only
+ *  see these tokens).
+ *
+ *  Even though it buffers all of the tokens, this token stream pulls tokens
+ *  from the tokens source on demand. In other words, until you ask for a
+ *  token using consume(), LT(), etc. the stream does not pull from the lexer.
+ *
+ *  The only difference between this stream and BufferedTokenStream superclass
+ *  is that this stream knows how to ignore off channel tokens. There may be
+ *  a performance advantage to using the superclass if you don't pass
+ *  whitespace and comments etc. to the parser on a hidden channel (i.e.,
+ *  you set $channel instead of calling skip() in lexer rules.)
+ *
+ *  @see org.antlr.runtime.UnbufferedTokenStream
+ *  @see org.antlr.runtime.BufferedTokenStream
+ */
+public class CommonTokenStream extends BufferedTokenStream {
+    /** Skip tokens on any channel but this one; this is how we skip whitespace... */
+    protected int channel = Token.DEFAULT_CHANNEL;
+
+    public CommonTokenStream() { }
+
+    public CommonTokenStream(TokenSource tokenSource) {
+        super(tokenSource);
+    }
+
+    public CommonTokenStream(TokenSource tokenSource, int channel) {
+        this(tokenSource);
+        this.channel = channel;
+    }
+
+    /** Always leave p on an on-channel token. */
+	@Override
+    public void consume() {
+        if ( p == -1 ) setup();
+        p++;
+        sync(p);
+        while ( tokens.get(p).getChannel()!=channel ) {
+            p++;
+            sync(p);
+        }
+    }
+
+	@Override
+    protected Token LB(int k) {
+        if ( k==0 || (p-k)<0 ) return null;
+
+        int i = p;
+        int n = 1;
+        // find k good tokens looking backwards
+        while ( n<=k ) {
+            // skip off-channel tokens
+            i = skipOffTokenChannelsReverse(i-1);
+            n++;
+        }
+        if ( i<0 ) return null;
+        return tokens.get(i);
+    }
+
+	@Override
+    public Token LT(int k) {
+        //System.out.println("enter LT("+k+")");
+        if ( p == -1 ) setup();
+        if ( k == 0 ) return null;
+        if ( k < 0 ) return LB(-k);
+        int i = p;
+        int n = 1; // we know tokens[p] is a good one
+        // find k good tokens
+        while ( n<k ) {
+            // skip off-channel tokens
+            i = skipOffTokenChannels(i+1);
+            n++;
+        }
+		if ( i>range ) range = i;
+        return tokens.get(i);
+    }
+
+    /** Given a starting index, return the index of the first on-channel
+     *  token.
+     */
+    protected int skipOffTokenChannels(int i) {
+        sync(i);
+        while ( tokens.get(i).getChannel()!=channel ) { // also stops at EOF (it's onchannel)
+            i++;
+            sync(i);
+        }
+        return i;
+    }
+
+    protected int skipOffTokenChannelsReverse(int i) {
+        while ( i>=0 && tokens.get(i).getChannel()!=channel ) {
+            i--;
+        }
+        return i;
+    }
+
+	@Override
+	public void reset() {
+		super.reset();
+		p = skipOffTokenChannels(0);
+	}
+
+	@Override
+	protected void setup() {
+        p = 0;
+        sync(0);
+        int i = 0;
+        while ( tokens.get(i).getChannel()!=channel ) {
+            i++;
+            sync(i);
+        }
+        p = i;
+    }
+
+	/** Count EOF just once. */
+	public int getNumberOfOnChannelTokens() {
+		int n = 0;
+		fill();
+		for (int i = 0; i < tokens.size(); i++) {
+			Token t = tokens.get(i);
+			if ( t.getChannel()==channel ) n++;
+			if ( t.getType()==Token.EOF ) break;
+		}
+		return n;
+	}
+
+    /** Reset this token stream by setting its token source. */
+	@Override
+    public void setTokenSource(TokenSource tokenSource) {
+        super.setTokenSource(tokenSource);
+        channel = Token.DEFAULT_CHANNEL;
+    }
+}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/DFA.java b/runtime/Java/src/main/java/org/antlr/runtime/DFA.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/DFA.java
rename to runtime/Java/src/main/java/org/antlr/runtime/DFA.java
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/EarlyExitException.java b/runtime/Java/src/main/java/org/antlr/runtime/EarlyExitException.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/EarlyExitException.java
rename to runtime/Java/src/main/java/org/antlr/runtime/EarlyExitException.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/FailedPredicateException.java b/runtime/Java/src/main/java/org/antlr/runtime/FailedPredicateException.java
new file mode 100644
index 0000000..c072479
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/FailedPredicateException.java
@@ -0,0 +1,55 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+/** A semantic predicate failed during validation.  Validation of predicates
+ *  occurs when normally parsing the alternative just like matching a token.
+ *  Disambiguating predicate evaluation occurs when we hoist a predicate into
+ *  a prediction decision.
+ */
+public class FailedPredicateException extends RecognitionException {
+	public String ruleName;
+	public String predicateText;
+
+	/** Used for remote debugger deserialization */
+	public FailedPredicateException() {;}
+
+	public FailedPredicateException(IntStream input,
+									String ruleName,
+									String predicateText)
+	{
+		super(input);
+		this.ruleName = ruleName;
+		this.predicateText = predicateText;
+	}
+
+	@Override
+	public String toString() {
+		return "FailedPredicateException("+ruleName+",{"+predicateText+"}?)";
+	}
+}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/IntStream.java b/runtime/Java/src/main/java/org/antlr/runtime/IntStream.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/IntStream.java
rename to runtime/Java/src/main/java/org/antlr/runtime/IntStream.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/LegacyCommonTokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/LegacyCommonTokenStream.java
new file mode 100644
index 0000000..2c84969
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/LegacyCommonTokenStream.java
@@ -0,0 +1,410 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import java.util.*;
+
+/** The most common stream of tokens is one where every token is buffered up
+ *  and tokens are prefiltered for a certain channel (the parser will only
+ *  see these tokens and cannot change the filter channel number during the
+ *  parse).
+ *
+ *  TODO: how to access the full token stream?  How to track all tokens matched per rule?
+ */
+public class LegacyCommonTokenStream implements TokenStream {
+    protected TokenSource tokenSource;
+
+	/** Record every single token pulled from the source so we can reproduce
+	 *  chunks of it later.
+	 */
+	protected List<Token> tokens;
+
+	/** Map&lt;tokentype, channel&gt; to override some Tokens' channel numbers */
+	protected Map<Integer, Integer> channelOverrideMap;
+
+	/** Set&lt;tokentype&gt;; discard any tokens with this type */
+	protected Set<Integer> discardSet;
+
+	/** Skip tokens on any channel but this one; this is how we skip whitespace... */
+	protected int channel = Token.DEFAULT_CHANNEL;
+
+	/** By default, track all incoming tokens */
+	protected boolean discardOffChannelTokens = false;
+
+	/** Track the last mark() call result value for use in rewind(). */
+	protected int lastMarker;
+
+	protected int range = -1; // how deep have we gone?	
+
+	/** The index into the tokens list of the current token (next token
+     *  to consume).  p==-1 indicates that the tokens list is empty
+     */
+    protected int p = -1;
+
+	public LegacyCommonTokenStream() {
+		tokens = new ArrayList<Token>(500);
+	}
+
+	public LegacyCommonTokenStream(TokenSource tokenSource) {
+	    this();
+		this.tokenSource = tokenSource;
+	}
+
+	public LegacyCommonTokenStream(TokenSource tokenSource, int channel) {
+		this(tokenSource);
+		this.channel = channel;
+	}
+
+	/** Reset this token stream by setting its token source. */
+	public void setTokenSource(TokenSource tokenSource) {
+		this.tokenSource = tokenSource;
+		tokens.clear();
+		p = -1;
+		channel = Token.DEFAULT_CHANNEL;
+	}
+
+	/** Load all tokens from the token source and put in tokens.
+	 *  This is done upon first LT request because you might want to
+	 *  set some token type / channel overrides before filling buffer.
+	 */
+	protected void fillBuffer() {
+		int index = 0;
+		Token t = tokenSource.nextToken();
+		while ( t!=null && t.getType()!=CharStream.EOF ) {
+			boolean discard = false;
+			// is there a channel override for token type?
+			if ( channelOverrideMap!=null ) {
+				Integer channelI = channelOverrideMap.get(t.getType());
+				if ( channelI!=null ) {
+					t.setChannel(channelI);
+				}
+			}
+			if ( discardSet!=null &&
+				 discardSet.contains(new Integer(t.getType())) )
+			{
+				discard = true;
+			}
+			else if ( discardOffChannelTokens && t.getChannel()!=this.channel ) {
+				discard = true;
+			}
+			if ( !discard )	{
+				t.setTokenIndex(index);
+				tokens.add(t);
+				index++;
+			}
+			t = tokenSource.nextToken();
+		}
+		// leave p pointing at first token on channel
+		p = 0;
+		p = skipOffTokenChannels(p);
+    }
+
+	/** Move the input pointer to the next incoming token.  The stream
+	 *  must become active with LT(1) available.  consume() simply
+	 *  moves the input pointer so that LT(1) points at the next
+	 *  input symbol. Consume at least one token.
+	 *
+	 *  Walk past any token not on the channel the parser is listening to.
+	 */
+	@Override
+	public void consume() {
+		if ( p<tokens.size() ) {
+            p++;
+			p = skipOffTokenChannels(p); // leave p on valid token
+        }
+    }
+
+	/** Given a starting index, return the index of the first on-channel
+	 *  token.
+	 */
+	protected int skipOffTokenChannels(int i) {
+		int n = tokens.size();
+		while ( i<n && tokens.get(i).getChannel()!=channel ) {
+			i++;
+		}
+		return i;
+	}
+
+	protected int skipOffTokenChannelsReverse(int i) {
+		while ( i>=0 && tokens.get(i).getChannel()!=channel ) {
+			i--;
+		}
+		return i;
+	}
+
+	/** A simple filter mechanism whereby you can tell this token stream
+	 *  to force all tokens of type ttype to be on channel.  For example,
+	 *  when interpreting, we cannot exec actions so we need to tell
+	 *  the stream to force all WS and NEWLINE to be a different, ignored
+	 *  channel.
+	 */
+	public void setTokenTypeChannel(int ttype, int channel) {
+		if ( channelOverrideMap==null ) {
+			channelOverrideMap = new HashMap<Integer, Integer>();
+		}
+        channelOverrideMap.put(ttype, channel);
+	}
+
+	public void discardTokenType(int ttype) {
+		if ( discardSet==null ) {
+			discardSet = new HashSet<Integer>();
+		}
+        discardSet.add(ttype);
+	}
+
+	public void discardOffChannelTokens(boolean discardOffChannelTokens) {
+		this.discardOffChannelTokens = discardOffChannelTokens;
+	}
+
+	public List<? extends Token> getTokens() {
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		return tokens;
+	}
+
+	public List<? extends Token> getTokens(int start, int stop) {
+		return getTokens(start, stop, (BitSet)null);
+	}
+
+	/** Given a start and stop index, return a List of all tokens in
+	 *  the token type BitSet.  Return null if no tokens were found.  This
+	 *  method looks at both on and off channel tokens.
+	 */
+	public List<? extends Token> getTokens(int start, int stop, BitSet types) {
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		if ( stop>=tokens.size() ) {
+			stop=tokens.size()-1;
+		}
+		if ( start<0 ) {
+			start=0;
+		}
+		if ( start>stop ) {
+			return null;
+		}
+
+		// list = tokens[start:stop]:{Token t, t.getType() in types}
+		List<Token> filteredTokens = new ArrayList<Token>();
+		for (int i=start; i<=stop; i++) {
+			Token t = tokens.get(i);
+			if ( types==null || types.member(t.getType()) ) {
+				filteredTokens.add(t);
+			}
+		}
+		if ( filteredTokens.isEmpty() ) {
+			filteredTokens = null;
+		}
+		return filteredTokens;
+	}
+
+	public List<? extends Token> getTokens(int start, int stop, List<Integer> types) {
+		return getTokens(start,stop,new BitSet(types));
+	}
+
+	public List<? extends Token> getTokens(int start, int stop, int ttype) {
+		return getTokens(start,stop,BitSet.of(ttype));
+	}
+
+	/** Get the ith token from the current position 1..n where k=1 is the
+	 *  first symbol of lookahead.
+	 */
+	@Override
+	public Token LT(int k) {
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		if ( k==0 ) {
+			return null;
+		}
+		if ( k<0 ) {
+			return LB(-k);
+		}
+		//System.out.print("LT(p="+p+","+k+")=");
+		if ( (p+k-1) >= tokens.size() ) {
+            return tokens.get(tokens.size()-1);
+		}
+		//System.out.println(tokens.get(p+k-1));
+		int i = p;
+		int n = 1;
+		// find k good tokens
+		while ( n<k ) {
+			// skip off-channel tokens
+			i = skipOffTokenChannels(i+1); // leave p on valid token
+			n++;
+		}
+		if ( i>=tokens.size() ) {
+            return tokens.get(tokens.size()-1); // must be EOF
+		}
+
+		if ( i>range ) range = i;
+        return tokens.get(i);
+    }
+
+	/** Look backwards k tokens on-channel tokens */
+	protected Token LB(int k) {
+		//System.out.print("LB(p="+p+","+k+") ");
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		if ( k==0 ) {
+			return null;
+		}
+		if ( (p-k)<0 ) {
+			return null;
+		}
+
+		int i = p;
+		int n = 1;
+		// find k good tokens looking backwards
+		while ( n<=k ) {
+			// skip off-channel tokens
+			i = skipOffTokenChannelsReverse(i-1); // leave p on valid token
+			n++;
+		}
+		if ( i<0 ) {
+			return null;
+		}
+		return tokens.get(i);
+	}
+
+	/** Return absolute token i; ignore which channel the tokens are on;
+	 *  that is, count all tokens not just on-channel tokens.
+	 */
+	@Override
+	public Token get(int i) {
+		return tokens.get(i);
+	}
+
+	/** Get all tokens from start..stop inclusively */
+	public List<? extends Token> get(int start, int stop) {
+		if ( p == -1 ) fillBuffer();
+		if ( start<0 || stop<0 ) return null;
+		return tokens.subList(start, stop);
+	}
+
+	@Override
+	public int LA(int i) {
+        return LT(i).getType();
+    }
+
+	@Override
+    public int mark() {
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		lastMarker = index();
+		return lastMarker;
+	}
+
+	@Override
+	public void release(int marker) {
+		// no resources to release
+	}
+
+	@Override
+	public int size() {
+		return tokens.size();
+	}
+
+	@Override
+    public int index() {
+        return p;
+    }
+
+	@Override
+	public int range() {
+		return range;
+	}
+
+	@Override
+	public void rewind(int marker) {
+		seek(marker);
+	}
+
+	@Override
+	public void rewind() {
+		seek(lastMarker);
+	}
+
+	public void reset() {
+		p = 0;
+		lastMarker = 0;
+	}
+	
+	@Override
+	public void seek(int index) {
+		p = index;
+	}
+
+	@Override
+	public TokenSource getTokenSource() {
+		return tokenSource;
+	}
+
+	@Override
+	public String getSourceName() {
+		return getTokenSource().getSourceName();
+	}
+
+	@Override
+	public String toString() {
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		return toString(0, tokens.size()-1);
+	}
+
+	@Override
+	public String toString(int start, int stop) {
+		if ( start<0 || stop<0 ) {
+			return null;
+		}
+		if ( p == -1 ) {
+			fillBuffer();
+		}
+		if ( stop>=tokens.size() ) {
+			stop = tokens.size()-1;
+		}
+ 		StringBuilder buf = new StringBuilder();
+		for (int i = start; i <= stop; i++) {
+			Token t = tokens.get(i);
+			buf.append(t.getText());
+		}
+		return buf.toString();
+	}
+
+	@Override
+	public String toString(Token start, Token stop) {
+		if ( start!=null && stop!=null ) {
+			return toString(start.getTokenIndex(), stop.getTokenIndex());
+		}
+		return null;
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/Lexer.java b/runtime/Java/src/main/java/org/antlr/runtime/Lexer.java
new file mode 100644
index 0000000..66c005f
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/Lexer.java
@@ -0,0 +1,355 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+/** A lexer is recognizer that draws input symbols from a character stream.
+ *  lexer grammars result in a subclass of this object. A Lexer object
+ *  uses simplified match() and error recovery mechanisms in the interest
+ *  of speed.
+ */
+public abstract class Lexer extends BaseRecognizer implements TokenSource {
+	/** Where is the lexer drawing characters from? */
+	protected CharStream input;
+
+	public Lexer() {
+	}
+
+	public Lexer(CharStream input) {
+		this.input = input;
+	}
+
+	public Lexer(CharStream input, RecognizerSharedState state) {
+		super(state);
+		this.input = input;
+	}
+
+	@Override
+	public void reset() {
+		super.reset(); // reset all recognizer state variables
+		// wack Lexer state variables
+		if ( input!=null ) {
+			input.seek(0); // rewind the input
+		}
+		if ( state==null ) {
+			return; // no shared state work to do
+		}
+		state.token = null;
+		state.type = Token.INVALID_TOKEN_TYPE;
+		state.channel = Token.DEFAULT_CHANNEL;
+		state.tokenStartCharIndex = -1;
+		state.tokenStartCharPositionInLine = -1;
+		state.tokenStartLine = -1;
+		state.text = null;
+	}
+
+	/** Return a token from this source; i.e., match a token on the char
+	 *  stream.
+	 */
+	public Token nextToken() {
+		while (true) {
+			state.token = null;
+			state.channel = Token.DEFAULT_CHANNEL;
+			state.tokenStartCharIndex = input.index();
+			state.tokenStartCharPositionInLine = input.getCharPositionInLine();
+			state.tokenStartLine = input.getLine();
+			state.text = null;
+			if ( input.LA(1)==CharStream.EOF ) {
+				return getEOFToken();
+			}
+			try {
+				mTokens();
+				if ( state.token==null ) {
+					emit();
+				}
+				else if ( state.token==Token.SKIP_TOKEN ) {
+					continue;
+				}
+				return state.token;
+			}
+			catch (MismatchedRangeException re) {
+				reportError(re);
+				// matchRange() routine has already called recover()
+			}
+			catch (MismatchedTokenException re) {
+				reportError(re);
+				// match() routine has already called recover()
+			}
+			catch (RecognitionException re) {
+				reportError(re);
+				recover(re); // throw out current char and try again
+			}
+		}
+	}
+
+	/** Returns the EOF token (default), if you need
+	 *  to return a custom token instead override this method.
+	 */
+	public Token getEOFToken() {
+		Token eof = new CommonToken(input,Token.EOF,
+									Token.DEFAULT_CHANNEL,
+									input.index(),input.index());
+		eof.setLine(getLine());
+		eof.setCharPositionInLine(getCharPositionInLine());
+		return eof;
+	}
+
+	/** Instruct the lexer to skip creating a token for current lexer rule
+	 *  and look for another token.  nextToken() knows to keep looking when
+	 *  a lexer rule finishes with token set to SKIP_TOKEN.  Recall that
+	 *  if token==null at end of any token rule, it creates one for you
+	 *  and emits it.
+	 */
+	public void skip() {
+		state.token = Token.SKIP_TOKEN;
+	}
+
+	/** This is the lexer entry point that sets instance var 'token' */
+	public abstract void mTokens() throws RecognitionException;
+
+	/** Set the char stream and reset the lexer */
+	public void setCharStream(CharStream input) {
+		this.input = null;
+		reset();
+		this.input = input;
+	}
+
+	public CharStream getCharStream() {
+		return this.input;
+	}
+
+	@Override
+	public String getSourceName() {
+		return input.getSourceName();
+	}
+
+	/** Currently does not support multiple emits per nextToken invocation
+	 *  for efficiency reasons.  Subclass and override this method and
+	 *  nextToken (to push tokens into a list and pull from that list rather
+	 *  than a single variable as this implementation does).
+	 */
+	public void emit(Token token) {
+		state.token = token;
+	}
+
+	/** The standard method called to automatically emit a token at the
+	 *  outermost lexical rule.  The token object should point into the
+	 *  char buffer start..stop.  If there is a text override in 'text',
+	 *  use that to set the token's text.  Override this method to emit
+	 *  custom Token objects.
+	 *
+	 *  If you are building trees, then you should also override
+	 *  Parser or TreeParser.getMissingSymbol().
+	 */
+	public Token emit() {
+		Token t = new CommonToken(input, state.type, state.channel, state.tokenStartCharIndex, getCharIndex()-1);
+		t.setLine(state.tokenStartLine);
+		t.setText(state.text);
+		t.setCharPositionInLine(state.tokenStartCharPositionInLine);
+		emit(t);
+		return t;
+	}
+
+	public void match(String s) throws MismatchedTokenException {
+		int i = 0;
+		while ( i<s.length() ) {
+			if ( input.LA(1)!=s.charAt(i) ) {
+				if ( state.backtracking>0 ) {
+					state.failed = true;
+					return;
+				}
+				MismatchedTokenException mte =
+					new MismatchedTokenException(s.charAt(i), input);
+				recover(mte);
+				throw mte;
+			}
+			i++;
+			input.consume();
+			state.failed = false;
+		}
+	}
+
+	public void matchAny() {
+		input.consume();
+	}
+
+	public void match(int c) throws MismatchedTokenException {
+		if ( input.LA(1)!=c ) {
+			if ( state.backtracking>0 ) {
+				state.failed = true;
+				return;
+			}
+			MismatchedTokenException mte =
+				new MismatchedTokenException(c, input);
+			recover(mte);  // don't really recover; just consume in lexer
+			throw mte;
+		}
+		input.consume();
+		state.failed = false;
+	}
+
+	public void matchRange(int a, int b)
+		throws MismatchedRangeException
+	{
+		if ( input.LA(1)<a || input.LA(1)>b ) {
+			if ( state.backtracking>0 ) {
+				state.failed = true;
+				return;
+			}
+			MismatchedRangeException mre =
+				new MismatchedRangeException(a,b,input);
+			recover(mre);
+			throw mre;
+		}
+		input.consume();
+		state.failed = false;
+	}
+
+	public int getLine() {
+		return input.getLine();
+	}
+
+	public int getCharPositionInLine() {
+		return input.getCharPositionInLine();
+	}
+
+	/** What is the index of the current character of lookahead? */
+	public int getCharIndex() {
+		return input.index();
+	}
+
+	/** Return the text matched so far for the current token or any
+	 *  text override.
+	 */
+	public String getText() {
+		if ( state.text!=null ) {
+			return state.text;
+		}
+		return input.substring(state.tokenStartCharIndex,getCharIndex()-1);
+	}
+
+	/** Set the complete text of this token; it wipes any previous
+	 *  changes to the text.
+	 */
+	public void setText(String text) {
+		state.text = text;
+	}
+
+	@Override
+	public void reportError(RecognitionException e) {
+		/** TODO: not thought about recovery in lexer yet.
+		 *
+		// if we've already reported an error and have not matched a token
+		// yet successfully, don't report any errors.
+		if ( errorRecovery ) {
+			//System.err.print("[SPURIOUS] ");
+			return;
+		}
+		errorRecovery = true;
+		 */
+
+		displayRecognitionError(this.getTokenNames(), e);
+	}
+
+	@Override
+	public String getErrorMessage(RecognitionException e, String[] tokenNames) {
+		String msg;
+		if ( e instanceof MismatchedTokenException ) {
+			MismatchedTokenException mte = (MismatchedTokenException)e;
+			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting "+getCharErrorDisplay(mte.expecting);
+		}
+		else if ( e instanceof NoViableAltException ) {
+			NoViableAltException nvae = (NoViableAltException)e;
+			// for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
+			// and "(decision="+nvae.decisionNumber+") and
+			// "state "+nvae.stateNumber
+			msg = "no viable alternative at character "+getCharErrorDisplay(e.c);
+		}
+		else if ( e instanceof EarlyExitException ) {
+			EarlyExitException eee = (EarlyExitException)e;
+			// for development, can add "(decision="+eee.decisionNumber+")"
+			msg = "required (...)+ loop did not match anything at character "+getCharErrorDisplay(e.c);
+		}
+		else if ( e instanceof MismatchedNotSetException ) {
+			MismatchedNotSetException mse = (MismatchedNotSetException)e;
+			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+mse.expecting;
+		}
+		else if ( e instanceof MismatchedSetException ) {
+			MismatchedSetException mse = (MismatchedSetException)e;
+			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+mse.expecting;
+		}
+		else if ( e instanceof MismatchedRangeException ) {
+			MismatchedRangeException mre = (MismatchedRangeException)e;
+			msg = "mismatched character "+getCharErrorDisplay(e.c)+" expecting set "+
+				  getCharErrorDisplay(mre.a)+".."+getCharErrorDisplay(mre.b);
+		}
+		else {
+			msg = super.getErrorMessage(e, tokenNames);
+		}
+		return msg;
+	}
+
+	public String getCharErrorDisplay(int c) {
+		String s = String.valueOf((char)c);
+		switch ( c ) {
+			case Token.EOF :
+				s = "<EOF>";
+				break;
+			case '\n' :
+				s = "\\n";
+				break;
+			case '\t' :
+				s = "\\t";
+				break;
+			case '\r' :
+				s = "\\r";
+				break;
+		}
+		return "'"+s+"'";
+	}
+
+	/** Lexers can normally match any char in it's vocabulary after matching
+	 *  a token, so do the easy thing and just kill a character and hope
+	 *  it all works out.  You can instead use the rule invocation stack
+	 *  to do sophisticated error recovery if you are in a fragment rule.
+	 */
+	public void recover(RecognitionException re) {
+		//System.out.println("consuming char "+(char)input.LA(1)+" during recovery");
+		//re.printStackTrace();
+		input.consume();
+	}
+
+	public void traceIn(String ruleName, int ruleIndex)  {
+		String inputSymbol = ((char)input.LT(1))+" line="+getLine()+":"+getCharPositionInLine();
+		super.traceIn(ruleName, ruleIndex, inputSymbol);
+	}
+
+	public void traceOut(String ruleName, int ruleIndex)  {
+		String inputSymbol = ((char)input.LT(1))+" line="+getLine()+":"+getCharPositionInLine();
+		super.traceOut(ruleName, ruleIndex, inputSymbol);
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/MismatchedNotSetException.java b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedNotSetException.java
new file mode 100644
index 0000000..62f14ba
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedNotSetException.java
@@ -0,0 +1,42 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+public class MismatchedNotSetException extends MismatchedSetException {
+	/** Used for remote debugger deserialization */
+	public MismatchedNotSetException() {;}
+
+	public MismatchedNotSetException(BitSet expecting, IntStream input) {
+		super(expecting, input);
+	}
+
+	@Override
+	public String toString() {
+		return "MismatchedNotSetException("+getUnexpectedType()+"!="+expecting+")";
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/MismatchedRangeException.java b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedRangeException.java
new file mode 100644
index 0000000..f6c02c6
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedRangeException.java
@@ -0,0 +1,46 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+public class MismatchedRangeException extends RecognitionException {
+	public int a,b;
+
+	/** Used for remote debugger deserialization */
+	public MismatchedRangeException() {;}
+
+	public MismatchedRangeException(int a, int b, IntStream input) {
+		super(input);
+		this.a = a;
+		this.b = b;
+	}
+
+	@Override
+	public String toString() {
+		return "MismatchedNotSetException("+getUnexpectedType()+" not in ["+a+","+b+"])";
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/MismatchedSetException.java b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedSetException.java
new file mode 100644
index 0000000..36bd47b
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedSetException.java
@@ -0,0 +1,45 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+public class MismatchedSetException extends RecognitionException {
+	public BitSet expecting;
+
+	/** Used for remote debugger deserialization */
+	public MismatchedSetException() {;}
+
+	public MismatchedSetException(BitSet expecting, IntStream input) {
+		super(input);
+		this.expecting = expecting;
+	}
+
+	@Override
+	public String toString() {
+		return "MismatchedSetException("+getUnexpectedType()+"!="+expecting+")";
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTokenException.java b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTokenException.java
new file mode 100644
index 0000000..4af2269
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTokenException.java
@@ -0,0 +1,46 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+/** A mismatched char or Token or tree node */
+public class MismatchedTokenException extends RecognitionException {
+	public int expecting = Token.INVALID_TOKEN_TYPE;
+
+	/** Used for remote debugger deserialization */
+	public MismatchedTokenException() {;}
+
+	public MismatchedTokenException(int expecting, IntStream input) {
+		super(input);
+		this.expecting = expecting;
+	}
+
+	@Override
+	public String toString() {
+		return "MismatchedTokenException("+getUnexpectedType()+"!="+expecting+")";
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTreeNodeException.java b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTreeNodeException.java
new file mode 100644
index 0000000..66403d0
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MismatchedTreeNodeException.java
@@ -0,0 +1,49 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import org.antlr.runtime.tree.TreeNodeStream;
+
+/**
+ */
+public class MismatchedTreeNodeException extends RecognitionException {
+	public int expecting;
+
+	public MismatchedTreeNodeException() {
+	}
+
+	public MismatchedTreeNodeException(int expecting, TreeNodeStream input) {
+		super(input);
+		this.expecting = expecting;
+	}
+
+	@Override
+	public String toString() {
+		return "MismatchedTreeNodeException("+getUnexpectedType()+"!="+expecting+")";
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/MissingTokenException.java b/runtime/Java/src/main/java/org/antlr/runtime/MissingTokenException.java
new file mode 100644
index 0000000..5b44e06
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/MissingTokenException.java
@@ -0,0 +1,57 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+/** We were expecting a token but it's not found.  The current token
+ *  is actually what we wanted next.  Used for tree node errors too.
+ */
+public class MissingTokenException extends MismatchedTokenException {
+	public Object inserted;
+	/** Used for remote debugger deserialization */
+	public MissingTokenException() {;}
+
+	public MissingTokenException(int expecting, IntStream input, Object inserted) {
+		super(expecting, input);
+		this.inserted = inserted;
+	}
+
+	public int getMissingType() {
+		return expecting;
+	}
+
+	@Override
+	public String toString() {
+		if ( inserted!=null && token!=null ) {
+			return "MissingTokenException(inserted "+inserted+" at "+token.getText()+")";
+		}
+		if ( token!=null ) {
+			return "MissingTokenException(at "+token.getText()+")";
+		}
+		return "MissingTokenException";
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/NoViableAltException.java b/runtime/Java/src/main/java/org/antlr/runtime/NoViableAltException.java
new file mode 100644
index 0000000..29513a3
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/NoViableAltException.java
@@ -0,0 +1,58 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+public class NoViableAltException extends RecognitionException {
+	public String grammarDecisionDescription;
+	public int decisionNumber;
+	public int stateNumber;
+
+	/** Used for remote debugger deserialization */
+	public NoViableAltException() {;}
+	
+	public NoViableAltException(String grammarDecisionDescription,
+								int decisionNumber,
+								int stateNumber,
+								IntStream input)
+	{
+		super(input);
+		this.grammarDecisionDescription = grammarDecisionDescription;
+		this.decisionNumber = decisionNumber;
+		this.stateNumber = stateNumber;
+	}
+
+	@Override
+	public String toString() {
+		if ( input instanceof CharStream ) {
+			return "NoViableAltException('"+(char)getUnexpectedType()+"'@["+grammarDecisionDescription+"])";
+		}
+		else {
+			return "NoViableAltException("+getUnexpectedType()+"@["+grammarDecisionDescription+"])";
+		}
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/Parser.java b/runtime/Java/src/main/java/org/antlr/runtime/Parser.java
new file mode 100644
index 0000000..779828f
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/Parser.java
@@ -0,0 +1,103 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+/** A parser for TokenStreams.  "parser grammars" result in a subclass
+ *  of this.
+ */
+public class Parser extends BaseRecognizer {
+	public TokenStream input;
+
+	public Parser(TokenStream input) {
+		super(); // highlight that we go to super to set state object
+		setTokenStream(input);
+    }
+
+	public Parser(TokenStream input, RecognizerSharedState state) {
+		super(state); // share the state object with another parser
+		this.input = input;
+    }
+
+	@Override
+	public void reset() {
+		super.reset(); // reset all recognizer state variables
+		if ( input!=null ) {
+			input.seek(0); // rewind the input
+		}
+	}
+
+	@Override
+	protected Object getCurrentInputSymbol(IntStream input) {
+		return ((TokenStream)input).LT(1);
+	}
+
+	@Override
+	protected Object getMissingSymbol(IntStream input,
+									  RecognitionException e,
+									  int expectedTokenType,
+									  BitSet follow)
+	{
+		String tokenText;
+		if ( expectedTokenType==Token.EOF ) tokenText = "<missing EOF>";
+		else tokenText = "<missing "+getTokenNames()[expectedTokenType]+">";
+		CommonToken t = new CommonToken(expectedTokenType, tokenText);
+		Token current = ((TokenStream)input).LT(1);
+		if ( current.getType() == Token.EOF ) {
+			current = ((TokenStream)input).LT(-1);
+		}
+		t.line = current.getLine();
+		t.charPositionInLine = current.getCharPositionInLine();
+		t.channel = DEFAULT_TOKEN_CHANNEL;
+		t.input = current.getInputStream();
+		return t;
+	}
+
+	/** Set the token stream and reset the parser */
+	public void setTokenStream(TokenStream input) {
+		this.input = null;
+		reset();
+		this.input = input;
+	}
+
+    public TokenStream getTokenStream() {
+		return input;
+	}
+
+	@Override
+	public String getSourceName() {
+		return input.getSourceName();
+	}
+
+	public void traceIn(String ruleName, int ruleIndex)  {
+		super.traceIn(ruleName, ruleIndex, input.LT(1));
+	}
+
+	public void traceOut(String ruleName, int ruleIndex)  {
+		super.traceOut(ruleName, ruleIndex, input.LT(1));
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/ParserRuleReturnScope.java b/runtime/Java/src/main/java/org/antlr/runtime/ParserRuleReturnScope.java
new file mode 100644
index 0000000..1ecb78a
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/ParserRuleReturnScope.java
@@ -0,0 +1,57 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+/** Rules that return more than a single value must return an object
+ *  containing all the values.  Besides the properties defined in
+ *  RuleLabelScope.predefinedRulePropertiesScope there may be user-defined
+ *  return values.  This class simply defines the minimum properties that
+ *  are always defined and methods to access the others that might be
+ *  available depending on output option such as template and tree.
+ *
+ *  Note text is not an actual property of the return value, it is computed
+ *  from start and stop using the input stream's toString() method.  I
+ *  could add a ctor to this so that we can pass in and store the input
+ *  stream, but I'm not sure we want to do that.  It would seem to be undefined
+ *  to get the .text property anyway if the rule matches tokens from multiple
+ *  input streams.
+ *
+ *  I do not use getters for fields of objects that are used simply to
+ *  group values such as this aggregate.  The getters/setters are there to
+ *  satisfy the superclass interface.
+ */
+public class ParserRuleReturnScope extends RuleReturnScope {
+	public Token start, stop;
+	@Override
+	public Object getStart() { return start; }
+	@Override
+	public Object getStop() { return stop; }
+
+	@Override
+	public Object getTree() { return null; }
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/RecognitionException.java b/runtime/Java/src/main/java/org/antlr/runtime/RecognitionException.java
new file mode 100644
index 0000000..c3f811d
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/RecognitionException.java
@@ -0,0 +1,196 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import org.antlr.runtime.tree.*;
+
+/** The root of the ANTLR exception hierarchy.
+ *
+ *  To avoid English-only error messages and to generally make things
+ *  as flexible as possible, these exceptions are not created with strings,
+ *  but rather the information necessary to generate an error.  Then
+ *  the various reporting methods in Parser and Lexer can be overridden
+ *  to generate a localized error message.  For example, MismatchedToken
+ *  exceptions are built with the expected token type.
+ *  So, don't expect getMessage() to return anything.
+ *
+ *  Note that as of Java 1.4, you can access the stack trace, which means
+ *  that you can compute the complete trace of rules from the start symbol.
+ *  This gives you considerable context information with which to generate
+ *  useful error messages.
+ *
+ *  ANTLR generates code that throws exceptions upon recognition error and
+ *  also generates code to catch these exceptions in each rule.  If you
+ *  want to quit upon first error, you can turn off the automatic error
+ *  handling mechanism using rulecatch action, but you still need to
+ *  override methods mismatch and recoverFromMismatchSet.
+ *
+ *  In general, the recognition exceptions can track where in a grammar a
+ *  problem occurred and/or what was the expected input.  While the parser
+ *  knows its state (such as current input symbol and line info) that
+ *  state can change before the exception is reported so current token index
+ *  is computed and stored at exception time.  From this info, you can
+ *  perhaps print an entire line of input not just a single token, for example.
+ *  Better to just say the recognizer had a problem and then let the parser
+ *  figure out a fancy report.
+ */
+public class RecognitionException extends Exception {
+	/** What input stream did the error occur in? */
+	public transient IntStream input;
+
+	/** What is index of token/char were we looking at when the error occurred? */
+	public int index;
+
+	/** The current Token when an error occurred.  Since not all streams
+	 *  can retrieve the ith Token, we have to track the Token object.
+	 *  For parsers.  Even when it's a tree parser, token might be set.
+	 */
+	public Token token;
+
+	/** If this is a tree parser exception, node is set to the node with
+	 *  the problem.
+	 */
+	public Object node;
+
+	/** The current char when an error occurred. For lexers. */
+	public int c;
+
+	/** Track the line at which the error occurred in case this is
+	 *  generated from a lexer.  We need to track this since the
+	 *  unexpected char doesn't carry the line info.
+	 */
+	public int line;
+
+	public int charPositionInLine;
+
+	/** If you are parsing a tree node stream, you will encounter som
+	 *  imaginary nodes w/o line/col info.  We now search backwards looking
+	 *  for most recent token with line/col info, but notify getErrorHeader()
+	 *  that info is approximate.
+	 */
+	public boolean approximateLineInfo;
+
+	/** Used for remote debugger deserialization */
+	public RecognitionException() {
+	}
+
+	public RecognitionException(IntStream input) {
+		this.input = input;
+		this.index = input.index();
+		if ( input instanceof TokenStream ) {
+			this.token = ((TokenStream)input).LT(1);
+			this.line = token.getLine();
+			this.charPositionInLine = token.getCharPositionInLine();
+		}
+		if ( input instanceof TreeNodeStream ) {
+			extractInformationFromTreeNodeStream(input);
+		}
+		else if ( input instanceof CharStream ) {
+			this.c = input.LA(1);
+			this.line = ((CharStream)input).getLine();
+			this.charPositionInLine = ((CharStream)input).getCharPositionInLine();
+		}
+		else {
+			this.c = input.LA(1);
+		}
+	}
+
+	protected void extractInformationFromTreeNodeStream(IntStream input) {
+		TreeNodeStream nodes = (TreeNodeStream)input;
+
+		this.node = nodes.LT(1);
+
+		Object positionNode = null;
+		if (nodes instanceof PositionTrackingStream) {
+			positionNode = ((PositionTrackingStream<?>)nodes).getKnownPositionElement(false);
+			if (positionNode == null) {
+				positionNode = ((PositionTrackingStream<?>)nodes).getKnownPositionElement(true);
+				this.approximateLineInfo = positionNode != null;
+			}
+		}
+
+		TreeAdaptor adaptor = nodes.getTreeAdaptor();
+		Token payload = adaptor.getToken(positionNode != null ? positionNode : this.node);
+		if ( payload!=null ) {
+			this.token = payload;
+			if ( payload.getLine()<= 0 ) {
+				// imaginary node; no line/pos info; scan backwards
+				int i = -1;
+				Object priorNode = nodes.LT(i);
+				while ( priorNode!=null ) {
+					Token priorPayload = adaptor.getToken(priorNode);
+					if ( priorPayload!=null && priorPayload.getLine()>0 ) {
+						// we found the most recent real line / pos info
+						this.line = priorPayload.getLine();
+						this.charPositionInLine = priorPayload.getCharPositionInLine();
+						this.approximateLineInfo = true;
+						break;
+					}
+
+					--i;
+					try {
+						priorNode = nodes.LT(i);
+					} catch (UnsupportedOperationException ex) {
+						priorNode = null;
+					}
+				}
+			}
+			else { // node created from real token
+				this.line = payload.getLine();
+				this.charPositionInLine = payload.getCharPositionInLine();
+			}
+		}
+		else if ( this.node instanceof Tree) {
+			this.line = ((Tree)this.node).getLine();
+			this.charPositionInLine = ((Tree)this.node).getCharPositionInLine();
+			if ( this.node instanceof CommonTree) {
+				this.token = ((CommonTree)this.node).token;
+			}
+		}
+		else {
+			int type = adaptor.getType(this.node);
+			String text = adaptor.getText(this.node);
+			this.token = new CommonToken(type, text);
+		}
+	}
+
+	/** Return the token type or char of the unexpected input element */
+	public int getUnexpectedType() {
+		if ( input instanceof TokenStream ) {
+			return token.getType();
+		}
+		else if ( input instanceof TreeNodeStream ) {
+			TreeNodeStream nodes = (TreeNodeStream)input;
+			TreeAdaptor adaptor = nodes.getTreeAdaptor();
+			return adaptor.getType(node);
+		}
+		else {
+			return c;
+		}
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/RecognizerSharedState.java b/runtime/Java/src/main/java/org/antlr/runtime/RecognizerSharedState.java
new file mode 100644
index 0000000..c6cd1a4
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/RecognizerSharedState.java
@@ -0,0 +1,146 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import java.util.Map;
+
+/** The set of fields needed by an abstract recognizer to recognize input
+ *  and recover from errors etc...  As a separate state object, it can be
+ *  shared among multiple grammars; e.g., when one grammar imports another.
+ *
+ *  These fields are publically visible but the actual state pointer per
+ *  parser is protected.
+ */
+public class RecognizerSharedState {
+	/** Track the set of token types that can follow any rule invocation.
+	 *  Stack grows upwards.  When it hits the max, it grows 2x in size
+	 *  and keeps going.
+	 */
+	public BitSet[] following = new BitSet[BaseRecognizer.INITIAL_FOLLOW_STACK_SIZE];
+	public int _fsp = -1;
+
+	/** This is true when we see an error and before having successfully
+	 *  matched a token.  Prevents generation of more than one error message
+	 *  per error.
+	 */
+	public boolean errorRecovery = false;
+
+	/** The index into the input stream where the last error occurred.
+	 * 	This is used to prevent infinite loops where an error is found
+	 *  but no token is consumed during recovery...another error is found,
+	 *  ad naseum.  This is a failsafe mechanism to guarantee that at least
+	 *  one token/tree node is consumed for two errors.
+	 */
+	public int lastErrorIndex = -1;
+
+	/** In lieu of a return value, this indicates that a rule or token
+	 *  has failed to match.  Reset to false upon valid token match.
+	 */
+	public boolean failed = false;
+
+	/** Did the recognizer encounter a syntax error?  Track how many. */
+	public int syntaxErrors = 0;
+
+	/** If 0, no backtracking is going on.  Safe to exec actions etc...
+	 *  If &gt;0 then it's the level of backtracking.
+	 */
+	public int backtracking = 0;
+
+	/** An array[size num rules] of Map&lt;Integer,Integer&gt; that tracks
+	 *  the stop token index for each rule.  ruleMemo[ruleIndex] is
+	 *  the memoization table for ruleIndex.  For key ruleStartIndex, you
+	 *  get back the stop token for associated rule or MEMO_RULE_FAILED.
+	 *
+	 *  This is only used if rule memoization is on (which it is by default).
+	 */
+	public Map<Integer, Integer>[] ruleMemo;
+
+
+	// LEXER FIELDS (must be in same state object to avoid casting
+	//               constantly in generated code and Lexer object) :(
+
+
+	/** The goal of all lexer rules/methods is to create a token object.
+	 *  This is an instance variable as multiple rules may collaborate to
+	 *  create a single token.  nextToken will return this object after
+	 *  matching lexer rule(s).  If you subclass to allow multiple token
+	 *  emissions, then set this to the last token to be matched or
+	 *  something nonnull so that the auto token emit mechanism will not
+	 *  emit another token.
+	 */
+    public Token token;
+
+	/** What character index in the stream did the current token start at?
+	 *  Needed, for example, to get the text for current token.  Set at
+	 *  the start of nextToken.
+ 	 */
+	public int tokenStartCharIndex = -1;
+
+	/** The line on which the first character of the token resides */
+	public int tokenStartLine;
+
+	/** The character position of first character within the line */
+	public int tokenStartCharPositionInLine;
+
+	/** The channel number for the current token */
+	public int channel;
+
+	/** The token type for the current token */
+	public int type;
+
+	/** You can set the text for the current token to override what is in
+	 *  the input char buffer.  Use setText() or can set this instance var.
+ 	 */
+	public String text;
+
+    public RecognizerSharedState() {}
+    
+	@SuppressWarnings("unchecked")
+    public RecognizerSharedState(RecognizerSharedState state) {
+        if ( this.following.length < state.following.length ) {
+            this.following = new BitSet[state.following.length];
+        }
+        System.arraycopy(state.following, 0, this.following, 0, state.following.length);
+        this._fsp = state._fsp;
+        this.errorRecovery = state.errorRecovery;
+        this.lastErrorIndex = state.lastErrorIndex;
+        this.failed = state.failed;
+        this.syntaxErrors = state.syntaxErrors;
+        this.backtracking = state.backtracking;
+        if ( state.ruleMemo!=null ) {
+            this.ruleMemo = (Map<Integer, Integer>[])new Map<?, ?>[state.ruleMemo.length];
+            System.arraycopy(state.ruleMemo, 0, this.ruleMemo, 0, state.ruleMemo.length);
+        }
+        this.token = state.token;
+        this.tokenStartCharIndex = state.tokenStartCharIndex;
+        this.tokenStartCharPositionInLine = state.tokenStartCharPositionInLine;
+        this.channel = state.channel;
+        this.type = state.type;
+        this.text = state.text;
+    }
+}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/RuleReturnScope.java b/runtime/Java/src/main/java/org/antlr/runtime/RuleReturnScope.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/RuleReturnScope.java
rename to runtime/Java/src/main/java/org/antlr/runtime/RuleReturnScope.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/SerializedGrammar.java b/runtime/Java/src/main/java/org/antlr/runtime/SerializedGrammar.java
new file mode 100644
index 0000000..7ccfad3
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/SerializedGrammar.java
@@ -0,0 +1,209 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.runtime;
+
+import java.io.IOException;
+import java.io.FileInputStream;
+import java.io.BufferedInputStream;
+import java.io.DataInputStream;
+import java.util.List;
+import java.util.ArrayList;
+
+public class SerializedGrammar {
+    public static final String COOKIE = "$ANTLR";
+    public static final int FORMAT_VERSION = 1;
+    //public static org.antlr.tool.Grammar gr; // TESTING ONLY; remove later
+
+    public String name;
+    public char type; // in {l, p, t, c}
+    public List<? extends Rule> rules;
+
+    protected class Rule {
+        String name;
+        Block block;
+        public Rule(String name, Block block) {
+            this.name = name;
+            this.block = block;
+        }
+		@Override
+        public String toString() {
+            return name+":"+block;
+        }
+    }
+
+	protected abstract class Node {
+		@Override
+		public abstract String toString();
+	}
+
+    protected class Block extends Node {
+        List[] alts;
+        public Block(List[] alts) {
+            this.alts = alts;
+        }
+		@Override
+        public String toString() {
+            StringBuilder buf = new StringBuilder();
+            buf.append("(");
+            for (int i = 0; i < alts.length; i++) {
+                List<?> alt = alts[i];
+                if ( i>0 ) buf.append("|");
+                buf.append(alt.toString());
+            }
+            buf.append(")");
+            return buf.toString();
+        }
+    }
+
+    protected class TokenRef extends Node {
+        int ttype;
+        public TokenRef(int ttype) { this.ttype = ttype; }
+		@Override
+        public String toString() { return String.valueOf(ttype); }
+    }
+
+    protected class RuleRef extends Node {
+        int ruleIndex;
+        public RuleRef(int ruleIndex) { this.ruleIndex = ruleIndex; }
+		@Override
+        public String toString() { return String.valueOf(ruleIndex); }
+    }
+
+    public SerializedGrammar(String filename) throws IOException {
+        System.out.println("loading "+filename);
+        FileInputStream fis = new FileInputStream(filename);
+        BufferedInputStream bos = new BufferedInputStream(fis);
+        DataInputStream in = new DataInputStream(bos);
+        readFile(in);
+        in.close();
+    }
+
+    protected void readFile(DataInputStream in) throws IOException {
+        String cookie = readString(in); // get $ANTLR
+        if ( !cookie.equals(COOKIE) ) throw new IOException("not a serialized grammar file");
+        int version = in.readByte();
+        char grammarType = (char)in.readByte();
+        this.type = grammarType;
+        String grammarName = readString(in);
+        this.name = grammarName;
+        System.out.println(grammarType+" grammar "+grammarName);
+        int numRules = in.readShort();
+        System.out.println("num rules = "+numRules);
+        rules = readRules(in, numRules);
+    }
+
+    protected List<? extends Rule> readRules(DataInputStream in, int numRules) throws IOException {
+        List<Rule> rules = new ArrayList<Rule>();
+        for (int i=0; i<numRules; i++) {
+            Rule r = readRule(in);
+            rules.add(r);
+        }
+        return rules;
+    }
+
+    protected Rule readRule(DataInputStream in) throws IOException {
+        byte R = in.readByte();
+        if ( R!='R' ) throw new IOException("missing R on start of rule");
+        String name = readString(in);
+        System.out.println("rule: "+name);
+        byte B = in.readByte();
+        Block b = readBlock(in);
+        byte period = in.readByte();
+        if ( period!='.' ) throw new IOException("missing . on end of rule");
+        return new Rule(name, b);
+    }
+
+    protected Block readBlock(DataInputStream in) throws IOException {
+        int nalts = in.readShort();
+		@SuppressWarnings("unchecked")
+        List<Node>[] alts = (List<Node>[])new List<?>[nalts];
+        //System.out.println("enter block n="+nalts);
+        for (int i=0; i<nalts; i++) {
+            List<Node> alt = readAlt(in);
+            alts[i] = alt;
+        }
+        //System.out.println("exit block");
+        return new Block(alts);
+    }
+
+    protected List<Node> readAlt(DataInputStream in) throws IOException {
+        List<Node> alt = new ArrayList<Node>();
+        byte A = in.readByte();
+        if ( A!='A' ) throw new IOException("missing A on start of alt");
+        byte cmd = in.readByte();
+        while ( cmd!=';' ) {
+            switch (cmd) {
+                case 't' :
+                    int ttype = in.readShort();
+                    alt.add(new TokenRef(ttype));
+                    //System.out.println("read token "+gr.getTokenDisplayName(ttype));
+                    break;
+                case 'r' :
+                    int ruleIndex = in.readShort();
+                    alt.add(new RuleRef(ruleIndex));
+                    //System.out.println("read rule "+gr.getRuleName(ruleIndex));
+                    break;
+                case '.' : // wildcard
+                    break;
+                case '-' : // range
+                    int from = in.readChar();
+                    int to = in.readChar();
+                    break;
+                case '~' : // not
+                    int notThisTokenType = in.readShort();
+                    break;
+                case 'B' : // nested block
+                    Block b = readBlock(in);
+                    alt.add(b);
+                    break;
+            }
+            cmd = in.readByte();
+        }
+        //System.out.println("exit alt");
+        return alt;
+    }
+
+    protected String readString(DataInputStream in) throws IOException {
+        byte c = in.readByte();
+        StringBuilder buf = new StringBuilder();
+        while ( c!=';' ) {
+            buf.append((char)c);
+            c = in.readByte();
+        }
+        return buf.toString();
+    }
+
+	@Override
+    public String toString() {
+        StringBuilder buf = new StringBuilder();
+        buf.append(type).append(" grammar ").append(name);
+        buf.append(rules);
+        return buf.toString();
+    }
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/Token.java b/runtime/Java/src/main/java/org/antlr/runtime/Token.java
new file mode 100644
index 0000000..1073276
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/Token.java
@@ -0,0 +1,90 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+public interface Token {
+	public static final int EOR_TOKEN_TYPE = 1;
+
+	/** imaginary tree navigation type; traverse "get child" link */
+	public static final int DOWN = 2;
+	/** imaginary tree navigation type; finish with a child list */
+	public static final int UP = 3;
+
+	public static final int MIN_TOKEN_TYPE = UP+1;
+
+    public static final int EOF = CharStream.EOF;
+
+	public static final int INVALID_TOKEN_TYPE = 0;
+	public static final Token INVALID_TOKEN = new CommonToken(INVALID_TOKEN_TYPE);
+
+	/** In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR
+	 *  will avoid creating a token for this symbol and try to fetch another.
+	 */
+	public static final Token SKIP_TOKEN = new CommonToken(INVALID_TOKEN_TYPE);
+
+	/** All tokens go to the parser (unless skip() is called in that rule)
+	 *  on a particular "channel".  The parser tunes to a particular channel
+	 *  so that whitespace etc... can go to the parser on a "hidden" channel.
+	 */
+	public static final int DEFAULT_CHANNEL = 0;
+	
+	/** Anything on different channel than DEFAULT_CHANNEL is not parsed
+	 *  by parser.
+	 */
+	public static final int HIDDEN_CHANNEL = 99;
+
+	/** Get the text of the token */
+	public String getText();
+	public void setText(String text);
+
+	public int getType();
+	public void setType(int ttype);
+	/**  The line number on which this token was matched; line=1..n */
+	public int getLine();
+    public void setLine(int line);
+
+	/** The index of the first character relative to the beginning of the line 0..n-1 */
+	public int getCharPositionInLine();
+	public void setCharPositionInLine(int pos);
+
+	public int getChannel();
+	public void setChannel(int channel);
+
+	/** An index from 0..n-1 of the token object in the input stream.
+	 *  This must be valid in order to use the ANTLRWorks debugger.
+	 */
+	public int getTokenIndex();
+	public void setTokenIndex(int index);
+
+	/** From what character stream was this token created?  You don't have to
+	 *  implement but it's nice to know where a Token comes from if you have
+	 *  include files etc... on the input.
+	 */
+	public CharStream getInputStream();
+	public void setInputStream(CharStream input);
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/TokenRewriteStream.java b/runtime/Java/src/main/java/org/antlr/runtime/TokenRewriteStream.java
new file mode 100644
index 0000000..f444210
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/TokenRewriteStream.java
@@ -0,0 +1,594 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import java.util.*;
+
+/** Useful for dumping out the input stream after doing some
+ *  augmentation or other manipulations.
+ *
+ *  You can insert stuff, replace, and delete chunks.  Note that the
+ *  operations are done lazily--only if you convert the buffer to a
+ *  String.  This is very efficient because you are not moving data around
+ *  all the time.  As the buffer of tokens is converted to strings, the
+ *  toString() method(s) check to see if there is an operation at the
+ *  current index.  If so, the operation is done and then normal String
+ *  rendering continues on the buffer.  This is like having multiple Turing
+ *  machine instruction streams (programs) operating on a single input tape. :)
+ *
+ *  Since the operations are done lazily at toString-time, operations do not
+ *  screw up the token index values.  That is, an insert operation at token
+ *  index i does not change the index values for tokens i+1..n-1.
+ *
+ *  Because operations never actually alter the buffer, you may always get
+ *  the original token stream back without undoing anything.  Since
+ *  the instructions are queued up, you can easily simulate transactions and
+ *  roll back any changes if there is an error just by removing instructions.
+ *  For example,
+ *
+ *   CharStream input = new ANTLRFileStream("input");
+ *   TLexer lex = new TLexer(input);
+ *   TokenRewriteStream tokens = new TokenRewriteStream(lex);
+ *   T parser = new T(tokens);
+ *   parser.startRule();
+ *
+ * 	 Then in the rules, you can execute
+ *      Token t,u;
+ *      ...
+ *      input.insertAfter(t, "text to put after t");}
+ * 		input.insertAfter(u, "text after u");}
+ * 		System.out.println(tokens.toString());
+ *
+ *  Actually, you have to cast the 'input' to a TokenRewriteStream. :(
+ *
+ *  You can also have multiple "instruction streams" and get multiple
+ *  rewrites from a single pass over the input.  Just name the instruction
+ *  streams and use that name again when printing the buffer.  This could be
+ *  useful for generating a C file and also its header file--all from the
+ *  same buffer:
+ *
+ *      tokens.insertAfter("pass1", t, "text to put after t");}
+ * 		tokens.insertAfter("pass2", u, "text after u");}
+ * 		System.out.println(tokens.toString("pass1"));
+ * 		System.out.println(tokens.toString("pass2"));
+ *
+ *  If you don't use named rewrite streams, a "default" stream is used as
+ *  the first example shows.
+ */
+public class TokenRewriteStream extends CommonTokenStream {
+	public static final String DEFAULT_PROGRAM_NAME = "default";
+    public static final int PROGRAM_INIT_SIZE = 100;
+	public static final int MIN_TOKEN_INDEX = 0;
+
+	// Define the rewrite operation hierarchy
+
+	public class RewriteOperation {
+        /** What index into rewrites List are we? */
+        protected int instructionIndex;
+        /** Token buffer index. */
+        protected int index;
+		protected Object text;
+
+		protected RewriteOperation(int index) {
+			this.index = index;
+		}
+
+		protected RewriteOperation(int index, Object text) {
+			this.index = index;
+			this.text = text;
+		}
+		/** Execute the rewrite operation by possibly adding to the buffer.
+		 *  Return the index of the next token to operate on.
+		 */
+		public int execute(StringBuffer buf) {
+			return index;
+		}
+		@Override
+		public String toString() {
+			String opName = getClass().getName();
+			int $index = opName.indexOf('$');
+			opName = opName.substring($index+1, opName.length());
+			return "<"+opName+"@"+tokens.get(index)+
+				   ":\""+text+"\">";
+		}
+	}
+
+	class InsertBeforeOp extends RewriteOperation {
+		public InsertBeforeOp(int index, Object text) {
+			super(index,text);
+		}
+		@Override
+		public int execute(StringBuffer buf) {
+			buf.append(text);
+			if ( tokens.get(index).getType()!=Token.EOF ) {
+				buf.append(tokens.get(index).getText());
+			}
+			return index+1;
+		}
+	}
+
+	/** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+	 *  instructions.
+	 */
+	class ReplaceOp extends RewriteOperation {
+		protected int lastIndex;
+		public ReplaceOp(int from, int to, Object text) {
+			super(from,text);
+			lastIndex = to;
+		}
+		@Override
+		public int execute(StringBuffer buf) {
+			if ( text!=null ) {
+				buf.append(text);
+			}
+			return lastIndex+1;
+		}
+		@Override
+		public String toString() {
+			if ( text==null ) {
+				return "<DeleteOp@"+tokens.get(index)+
+					   ".."+tokens.get(lastIndex)+">";
+			}
+			return "<ReplaceOp@"+tokens.get(index)+
+				   ".."+tokens.get(lastIndex)+":\""+text+"\">";
+		}
+	}
+
+	/** You may have multiple, named streams of rewrite operations.
+	 *  I'm calling these things "programs."
+	 *  Maps String (name) &rarr; rewrite (List)
+	 */
+	protected Map<String, List<RewriteOperation>> programs = null;
+
+	/** Map String (program name) &rarr; Integer index */
+	protected Map<String, Integer> lastRewriteTokenIndexes = null;
+
+	public TokenRewriteStream() {
+		init();
+	}
+
+	protected void init() {
+		programs = new HashMap<String, List<RewriteOperation>>();
+		programs.put(DEFAULT_PROGRAM_NAME, new ArrayList<RewriteOperation>(PROGRAM_INIT_SIZE));
+		lastRewriteTokenIndexes = new HashMap<String, Integer>();
+	}
+
+	public TokenRewriteStream(TokenSource tokenSource) {
+	    super(tokenSource);
+		init();
+	}
+
+	public TokenRewriteStream(TokenSource tokenSource, int channel) {
+		super(tokenSource, channel);
+		init();
+	}
+
+	public void rollback(int instructionIndex) {
+		rollback(DEFAULT_PROGRAM_NAME, instructionIndex);
+	}
+
+	/** Rollback the instruction stream for a program so that
+	 *  the indicated instruction (via instructionIndex) is no
+	 *  longer in the stream.  UNTESTED!
+	 */
+	public void rollback(String programName, int instructionIndex) {
+		List<RewriteOperation> is = programs.get(programName);
+		if ( is!=null ) {
+			programs.put(programName, is.subList(MIN_TOKEN_INDEX,instructionIndex));
+		}
+	}
+
+	public void deleteProgram() {
+		deleteProgram(DEFAULT_PROGRAM_NAME);
+	}
+
+	/** Reset the program so that no instructions exist */
+	public void deleteProgram(String programName) {
+		rollback(programName, MIN_TOKEN_INDEX);
+	}
+
+	public void insertAfter(Token t, Object text) {
+		insertAfter(DEFAULT_PROGRAM_NAME, t, text);
+	}
+
+	public void insertAfter(int index, Object text) {
+		insertAfter(DEFAULT_PROGRAM_NAME, index, text);
+	}
+
+	public void insertAfter(String programName, Token t, Object text) {
+		insertAfter(programName,t.getTokenIndex(), text);
+	}
+
+	public void insertAfter(String programName, int index, Object text) {
+		// to insert after, just insert before next index (even if past end)
+		insertBefore(programName,index+1, text);
+	}
+
+	public void insertBefore(Token t, Object text) {
+		insertBefore(DEFAULT_PROGRAM_NAME, t, text);
+	}
+
+	public void insertBefore(int index, Object text) {
+		insertBefore(DEFAULT_PROGRAM_NAME, index, text);
+	}
+
+	public void insertBefore(String programName, Token t, Object text) {
+		insertBefore(programName, t.getTokenIndex(), text);
+	}
+
+	public void insertBefore(String programName, int index, Object text) {
+		RewriteOperation op = new InsertBeforeOp(index,text);
+		List<? super RewriteOperation> rewrites = getProgram(programName);
+        op.instructionIndex = rewrites.size();
+        rewrites.add(op);		
+	}
+
+	public void replace(int index, Object text) {
+		replace(DEFAULT_PROGRAM_NAME, index, index, text);
+	}
+
+	public void replace(int from, int to, Object text) {
+		replace(DEFAULT_PROGRAM_NAME, from, to, text);
+	}
+
+	public void replace(Token indexT, Object text) {
+		replace(DEFAULT_PROGRAM_NAME, indexT, indexT, text);
+	}
+
+	public void replace(Token from, Token to, Object text) {
+		replace(DEFAULT_PROGRAM_NAME, from, to, text);
+	}
+
+	public void replace(String programName, int from, int to, Object text) {
+		if ( from > to || from<0 || to<0 || to >= tokens.size() ) {
+			throw new IllegalArgumentException("replace: range invalid: "+from+".."+to+"(size="+tokens.size()+")");
+		}
+		RewriteOperation op = new ReplaceOp(from, to, text);
+		List<? super RewriteOperation> rewrites = getProgram(programName);
+        op.instructionIndex = rewrites.size();
+        rewrites.add(op);
+	}
+
+	public void replace(String programName, Token from, Token to, Object text) {
+		replace(programName,
+				from.getTokenIndex(),
+				to.getTokenIndex(),
+				text);
+	}
+
+	public void delete(int index) {
+		delete(DEFAULT_PROGRAM_NAME, index, index);
+	}
+
+	public void delete(int from, int to) {
+		delete(DEFAULT_PROGRAM_NAME, from, to);
+	}
+
+	public void delete(Token indexT) {
+		delete(DEFAULT_PROGRAM_NAME, indexT, indexT);
+	}
+
+	public void delete(Token from, Token to) {
+		delete(DEFAULT_PROGRAM_NAME, from, to);
+	}
+
+	public void delete(String programName, int from, int to) {
+		replace(programName,from,to,null);
+	}
+
+	public void delete(String programName, Token from, Token to) {
+		replace(programName,from,to,null);
+	}
+
+	public int getLastRewriteTokenIndex() {
+		return getLastRewriteTokenIndex(DEFAULT_PROGRAM_NAME);
+	}
+
+	protected int getLastRewriteTokenIndex(String programName) {
+		Integer I = lastRewriteTokenIndexes.get(programName);
+		if ( I==null ) {
+			return -1;
+		}
+		return I;
+	}
+
+	protected void setLastRewriteTokenIndex(String programName, int i) {
+		lastRewriteTokenIndexes.put(programName, i);
+	}
+
+	protected List<RewriteOperation> getProgram(String name) {
+		List<RewriteOperation> is = programs.get(name);
+		if ( is==null ) {
+			is = initializeProgram(name);
+		}
+		return is;
+	}
+
+	private List<RewriteOperation> initializeProgram(String name) {
+		List<RewriteOperation> is = new ArrayList<RewriteOperation>(PROGRAM_INIT_SIZE);
+		programs.put(name, is);
+		return is;
+	}
+
+	public String toOriginalString() {
+        fill();
+		return toOriginalString(MIN_TOKEN_INDEX, size()-1);
+	}
+
+	public String toOriginalString(int start, int end) {
+		StringBuilder buf = new StringBuilder();
+		for (int i=start; i>=MIN_TOKEN_INDEX && i<=end && i<tokens.size(); i++) {
+			if ( get(i).getType()!=Token.EOF ) buf.append(get(i).getText());
+		}
+		return buf.toString();
+	}
+
+	@Override
+	public String toString() {
+        fill();
+		return toString(MIN_TOKEN_INDEX, size()-1);
+	}
+
+	public String toString(String programName) {
+        fill();
+		return toString(programName, MIN_TOKEN_INDEX, size()-1);
+	}
+
+	@Override
+	public String toString(int start, int end) {
+		return toString(DEFAULT_PROGRAM_NAME, start, end);
+	}
+
+	public String toString(String programName, int start, int end) {
+		List<RewriteOperation> rewrites = programs.get(programName);
+
+        // ensure start/end are in range
+        if ( end>tokens.size()-1 ) end = tokens.size()-1;
+        if ( start<0 ) start = 0;
+
+        if ( rewrites==null || rewrites.isEmpty() ) {
+			return toOriginalString(start,end); // no instructions to execute
+		}
+		StringBuffer buf = new StringBuffer();
+
+		// First, optimize instruction stream
+		Map<Integer, ? extends RewriteOperation> indexToOp = reduceToSingleOperationPerIndex(rewrites);
+
+        // Walk buffer, executing instructions and emitting tokens
+        int i = start;
+        while ( i <= end && i < tokens.size() ) {
+			RewriteOperation op = indexToOp.get(i);
+			indexToOp.remove(i); // remove so any left have index size-1
+			Token t = tokens.get(i);
+			if ( op==null ) {
+				// no operation at that index, just dump token
+				if ( t.getType()!=Token.EOF ) buf.append(t.getText());
+				i++; // move to next token
+			}
+			else {
+				i = op.execute(buf); // execute operation and skip
+			}
+		}
+
+        // include stuff after end if it's last index in buffer
+        // So, if they did an insertAfter(lastValidIndex, "foo"), include
+        // foo if end==lastValidIndex.
+        if ( end==tokens.size()-1 ) {
+            // Scan any remaining operations after last token
+            // should be included (they will be inserts).
+            for (RewriteOperation op : indexToOp.values()) {
+                if ( op.index >= tokens.size()-1 ) buf.append(op.text);
+            }
+        }
+        return buf.toString();
+	}
+
+	/** We need to combine operations and report invalid operations (like
+	 *  overlapping replaces that are not completed nested).  Inserts to
+	 *  same index need to be combined etc...   Here are the cases:
+	 *
+	 *  I.i.u I.j.v								leave alone, nonoverlapping
+	 *  I.i.u I.i.v								combine: Iivu
+	 *
+	 *  R.i-j.u R.x-y.v	| i-j in x-y			delete first R
+	 *  R.i-j.u R.i-j.v							delete first R
+	 *  R.i-j.u R.x-y.v	| x-y in i-j			ERROR
+	 *  R.i-j.u R.x-y.v	| boundaries overlap	ERROR
+	 *
+	 *  Delete special case of replace (text==null):
+	 *  D.i-j.u D.x-y.v	| boundaries overlap	combine to max(min)..max(right)
+	 *
+	 *  I.i.u R.x-y.v | i in (x+1)-y			delete I (since insert before
+	 *											we're not deleting i)
+	 *  I.i.u R.x-y.v | i not in (x+1)-y		leave alone, nonoverlapping
+	 *  R.x-y.v I.i.u | i in x-y				ERROR
+	 *  R.x-y.v I.x.u 							R.x-y.uv (combine, delete I)
+	 *  R.x-y.v I.i.u | i not in x-y			leave alone, nonoverlapping
+	 *
+	 *  I.i.u = insert u before op @ index i
+	 *  R.x-y.u = replace x-y indexed tokens with u
+	 *
+	 *  First we need to examine replaces.  For any replace op:
+	 *
+	 * 		1. wipe out any insertions before op within that range.
+	 *		2. Drop any replace op before that is contained completely within
+	 *         that range.
+	 *		3. Throw exception upon boundary overlap with any previous replace.
+	 *
+	 *  Then we can deal with inserts:
+	 *
+	 * 		1. for any inserts to same index, combine even if not adjacent.
+	 * 		2. for any prior replace with same left boundary, combine this
+	 *         insert with replace and delete this replace.
+	 * 		3. throw exception if index in same range as previous replace
+	 *
+	 *  Don't actually delete; make op null in list. Easier to walk list.
+	 *  Later we can throw as we add to index &rarr; op map.
+	 *
+	 *  Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+	 *  inserted stuff would be before the replace range.  But, if you
+	 *  add tokens in front of a method body '{' and then delete the method
+	 *  body, I think the stuff before the '{' you added should disappear too.
+	 *
+	 *  Return a map from token index to operation.
+	 */
+	protected Map<Integer, ? extends RewriteOperation> reduceToSingleOperationPerIndex(List<? extends RewriteOperation> rewrites) {
+//		System.out.println("rewrites="+rewrites);
+
+		// WALK REPLACES
+		for (int i = 0; i < rewrites.size(); i++) {
+			RewriteOperation op = rewrites.get(i);
+			if ( op==null ) continue;
+			if ( !(op instanceof ReplaceOp) ) continue;
+			ReplaceOp rop = (ReplaceOp)rewrites.get(i);
+			// Wipe prior inserts within range
+			List<? extends InsertBeforeOp> inserts = getKindOfOps(rewrites, InsertBeforeOp.class, i);
+			for (int j = 0; j < inserts.size(); j++) {
+				InsertBeforeOp iop = inserts.get(j);
+				if ( iop.index == rop.index ) {
+					// E.g., insert before 2, delete 2..2; update replace
+					// text to include insert before, kill insert
+					rewrites.set(iop.instructionIndex, null);
+					rop.text = iop.text.toString() + (rop.text!=null?rop.text.toString():"");
+				}
+				else if ( iop.index > rop.index && iop.index <= rop.lastIndex ) {
+                    // delete insert as it's a no-op.
+                    rewrites.set(iop.instructionIndex, null);
+				}
+			}
+			// Drop any prior replaces contained within
+			List<? extends ReplaceOp> prevReplaces = getKindOfOps(rewrites, ReplaceOp.class, i);
+			for (int j = 0; j < prevReplaces.size(); j++) {
+				ReplaceOp prevRop = prevReplaces.get(j);
+				if ( prevRop.index>=rop.index && prevRop.lastIndex <= rop.lastIndex ) {
+                    // delete replace as it's a no-op.
+                    rewrites.set(prevRop.instructionIndex, null);
+					continue;
+				}
+				// throw exception unless disjoint or identical
+				boolean disjoint =
+					prevRop.lastIndex<rop.index || prevRop.index > rop.lastIndex;
+				boolean same =
+					prevRop.index==rop.index && prevRop.lastIndex==rop.lastIndex;
+				// Delete special case of replace (text==null):
+				// D.i-j.u D.x-y.v	| boundaries overlap	combine to max(min)..max(right)
+				if ( prevRop.text==null && rop.text==null && !disjoint ) {
+					//System.out.println("overlapping deletes: "+prevRop+", "+rop);
+					rewrites.set(prevRop.instructionIndex, null); // kill first delete
+					rop.index = Math.min(prevRop.index, rop.index);
+					rop.lastIndex = Math.max(prevRop.lastIndex, rop.lastIndex);
+					System.out.println("new rop "+rop);
+				}
+				else if ( !disjoint && !same ) {
+					throw new IllegalArgumentException("replace op boundaries of "+rop+
+													   " overlap with previous "+prevRop);
+				}
+			}
+		}
+
+		// WALK INSERTS
+		for (int i = 0; i < rewrites.size(); i++) {
+			RewriteOperation op = rewrites.get(i);
+			if ( op==null ) continue;
+			if ( !(op instanceof InsertBeforeOp) ) continue;
+			InsertBeforeOp iop = (InsertBeforeOp)rewrites.get(i);
+			// combine current insert with prior if any at same index
+			List<? extends InsertBeforeOp> prevInserts = getKindOfOps(rewrites, InsertBeforeOp.class, i);
+			for (int j = 0; j < prevInserts.size(); j++) {
+				InsertBeforeOp prevIop = prevInserts.get(j);
+				if ( prevIop.index == iop.index ) { // combine objects
+					// convert to strings...we're in process of toString'ing
+					// whole token buffer so no lazy eval issue with any templates
+					iop.text = catOpText(iop.text,prevIop.text);
+                    // delete redundant prior insert
+                    rewrites.set(prevIop.instructionIndex, null);
+				}
+			}
+			// look for replaces where iop.index is in range; error
+			List<? extends ReplaceOp> prevReplaces = getKindOfOps(rewrites, ReplaceOp.class, i);
+			for (int j = 0; j < prevReplaces.size(); j++) {
+				ReplaceOp rop = prevReplaces.get(j);
+				if ( iop.index == rop.index ) {
+					rop.text = catOpText(iop.text,rop.text);
+					rewrites.set(i, null);  // delete current insert
+					continue;
+				}
+				if ( iop.index >= rop.index && iop.index <= rop.lastIndex ) {
+					throw new IllegalArgumentException("insert op "+iop+
+													   " within boundaries of previous "+rop);
+				}
+			}
+		}
+		// System.out.println("rewrites after="+rewrites);
+		Map<Integer, RewriteOperation> m = new HashMap<Integer, RewriteOperation>();
+		for (int i = 0; i < rewrites.size(); i++) {
+			RewriteOperation op = rewrites.get(i);
+			if ( op==null ) continue; // ignore deleted ops
+			if ( m.get(op.index)!=null ) {
+				throw new Error("should only be one op per index");
+			}
+			m.put(op.index, op);
+		}
+		//System.out.println("index to op: "+m);
+		return m;
+	}
+
+	protected String catOpText(Object a, Object b) {
+		String x = "";
+		String y = "";
+		if ( a!=null ) x = a.toString();
+		if ( b!=null ) y = b.toString();
+		return x+y;
+	}
+	protected <T extends RewriteOperation> List<? extends T> getKindOfOps(List<? extends RewriteOperation> rewrites, Class<T> kind) {
+		return getKindOfOps(rewrites, kind, rewrites.size());
+	}
+
+    /** Get all operations before an index of a particular kind */
+    protected <T extends RewriteOperation> List<? extends T> getKindOfOps(List<? extends RewriteOperation> rewrites, Class<T> kind, int before) {
+		List<T> ops = new ArrayList<T>();
+		for (int i=0; i<before && i<rewrites.size(); i++) {
+			RewriteOperation op = rewrites.get(i);
+			if ( op==null ) continue; // ignore deleted
+			if ( kind.isInstance(op) ) ops.add(kind.cast(op));
+		}		
+		return ops;
+	}
+
+	public String toDebugString() {
+		return toDebugString(MIN_TOKEN_INDEX, size()-1);
+	}
+
+	public String toDebugString(int start, int end) {
+		StringBuilder buf = new StringBuilder();
+		for (int i=start; i>=MIN_TOKEN_INDEX && i<=end && i<tokens.size(); i++) {
+			buf.append(get(i));
+		}
+		return buf.toString();
+	}
+}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/TokenSource.java b/runtime/Java/src/main/java/org/antlr/runtime/TokenSource.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/TokenSource.java
rename to runtime/Java/src/main/java/org/antlr/runtime/TokenSource.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/TokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/TokenStream.java
new file mode 100644
index 0000000..94b25d4
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/TokenStream.java
@@ -0,0 +1,73 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+/** A stream of tokens accessing tokens from a TokenSource */
+public interface TokenStream extends IntStream {
+    /** Get Token at current input pointer + i ahead where i=1 is next Token.
+	 *  i&lt;0 indicates tokens in the past.  So -1 is previous token and -2 is
+	 *  two tokens ago. LT(0) is undefined.  For i&gt;=n, return Token.EOFToken.
+	 *  Return null for LT(0) and any index that results in an absolute address
+	 *  that is negative.
+	 */
+    public Token LT(int k);
+
+	/** How far ahead has the stream been asked to look?  The return
+	 *  value is a valid index from 0..n-1.
+	 */
+	int range();
+	
+	/** Get a token at an absolute index i; 0..n-1.  This is really only
+	 *  needed for profiling and debugging and token stream rewriting.
+	 *  If you don't want to buffer up tokens, then this method makes no
+	 *  sense for you.  Naturally you can't use the rewrite stream feature.
+	 *  I believe DebugTokenStream can easily be altered to not use
+	 *  this method, removing the dependency.
+	 */
+	public Token get(int i);
+
+	/** Where is this stream pulling tokens from?  This is not the name, but
+	 *  the object that provides Token objects.
+	 */
+	public TokenSource getTokenSource();
+
+	/** Return the text of all tokens from start to stop, inclusive.
+	 *  If the stream does not buffer all the tokens then it can just
+	 *  return "" or null;  Users should not access $ruleLabel.text in
+	 *  an action of course in that case.
+	 */
+	public String toString(int start, int stop);
+
+	/** Because the user is not required to use a token with an index stored
+	 *  in it, we must provide a means for two token objects themselves to
+	 *  indicate the start/end location.  Most often this will just delegate
+	 *  to the other toString(int,int).  This is also parallel with
+	 *  the TreeNodeStream.toString(Object,Object).
+	 */
+	public String toString(Token start, Token stop);
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/UnbufferedTokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/UnbufferedTokenStream.java
new file mode 100644
index 0000000..d9ae3d5
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/UnbufferedTokenStream.java
@@ -0,0 +1,87 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+import org.antlr.runtime.misc.LookaheadStream;
+
+/** A token stream that pulls tokens from the code source on-demand and
+ *  without tracking a complete buffer of the tokens. This stream buffers
+ *  the minimum number of tokens possible.  It's the same as
+ *  OnDemandTokenStream except that OnDemandTokenStream buffers all tokens.
+ *
+ *  You can't use this stream if you pass whitespace or other off-channel
+ *  tokens to the parser. The stream can't ignore off-channel tokens.
+ * 
+ *  You can only look backwards 1 token: LT(-1).
+ *
+ *  Use this when you need to read from a socket or other infinite stream.
+ *
+ *  @see BufferedTokenStream
+ *  @see CommonTokenStream
+ */
+public class UnbufferedTokenStream extends LookaheadStream<Token> implements TokenStream {
+	protected TokenSource tokenSource;
+    protected int tokenIndex = 0; // simple counter to set token index in tokens
+
+    /** Skip tokens on any channel but this one; this is how we skip whitespace... */
+    protected int channel = Token.DEFAULT_CHANNEL;
+
+	public UnbufferedTokenStream(TokenSource tokenSource) {
+		this.tokenSource = tokenSource;
+	}
+
+	@Override
+	public Token nextElement() {
+		Token t = tokenSource.nextToken();
+        t.setTokenIndex(tokenIndex++);
+		return t;
+	}
+
+	@Override
+    public boolean isEOF(Token o) { return o.getType() == Token.EOF; }    
+
+	@Override
+	public TokenSource getTokenSource() { return tokenSource; }
+
+	@Override
+	public String toString(int start, int stop) { return "n/a"; }
+
+	@Override
+	public String toString(Token start, Token stop) { return "n/a"; }
+
+	@Override
+    public int LA(int i) { return LT(i).getType(); }
+
+	@Override
+    public Token get(int i) {
+        throw new UnsupportedOperationException("Absolute token indexes are meaningless in an unbuffered stream");
+    }
+
+	@Override
+	public String getSourceName() {	return tokenSource.getSourceName();	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/UnwantedTokenException.java b/runtime/Java/src/main/java/org/antlr/runtime/UnwantedTokenException.java
new file mode 100644
index 0000000..62ccabc
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/UnwantedTokenException.java
@@ -0,0 +1,54 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime;
+
+/** An extra token while parsing a TokenStream */
+public class UnwantedTokenException extends MismatchedTokenException {
+	/** Used for remote debugger deserialization */
+	public UnwantedTokenException() {;}
+
+	public UnwantedTokenException(int expecting, IntStream input) {
+		super(expecting, input);
+	}
+
+	public Token getUnexpectedToken() {
+		return token;
+	}
+
+	@Override
+	public String toString() {
+		String exp = ", expected "+expecting;
+		if ( expecting==Token.INVALID_TOKEN_TYPE ) {
+			exp = "";
+		}
+		if ( token==null ) {
+			return "UnwantedTokenException(found="+null+exp+")";
+		}
+		return "UnwantedTokenException(found="+token.getText()+exp+")";
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/BlankDebugEventListener.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/BlankDebugEventListener.java
new file mode 100755
index 0000000..053f1d0
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/BlankDebugEventListener.java
@@ -0,0 +1,77 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+
+/** A blank listener that does nothing; useful for real classes so
+ *  they don't have to have lots of blank methods and are less
+ *  sensitive to updates to debug interface.
+ */
+public class BlankDebugEventListener implements DebugEventListener {
+	@Override public void enterRule(String grammarFileName, String ruleName) {}
+	@Override public void exitRule(String grammarFileName, String ruleName) {}
+	@Override public void enterAlt(int alt) {}
+	@Override public void enterSubRule(int decisionNumber) {}
+	@Override public void exitSubRule(int decisionNumber) {}
+	@Override public void enterDecision(int decisionNumber, boolean couldBacktrack) {}
+	@Override public void exitDecision(int decisionNumber) {}
+	@Override public void location(int line, int pos) {}
+	@Override public void consumeToken(Token token) {}
+	@Override public void consumeHiddenToken(Token token) {}
+	@Override public void LT(int i, Token t) {}
+	@Override public void mark(int i) {}
+	@Override public void rewind(int i) {}
+	@Override public void rewind() {}
+	@Override public void beginBacktrack(int level) {}
+	@Override public void endBacktrack(int level, boolean successful) {}
+	@Override public void recognitionException(RecognitionException e) {}
+	@Override public void beginResync() {}
+	@Override public void endResync() {}
+	@Override public void semanticPredicate(boolean result, String predicate) {}
+	@Override public void commence() {}
+	@Override public void terminate() {}
+
+	// Tree parsing stuff
+
+	@Override public void consumeNode(Object t) {}
+	@Override public void LT(int i, Object t) {}
+
+	// AST Stuff
+
+	@Override public void nilNode(Object t) {}
+	@Override public void errorNode(Object t) {}
+	@Override public void createNode(Object t) {}
+	@Override public void createNode(Object node, Token token) {}
+	@Override public void becomeRoot(Object newRoot, Object oldRoot) {}
+	@Override public void addChild(Object root, Object child) {}
+	@Override public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {}
+}
+
+
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventHub.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventHub.java
new file mode 100644
index 0000000..593e092
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventHub.java
@@ -0,0 +1,323 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.RecognitionException;
+
+import java.util.List;
+import java.util.ArrayList;
+
+/** Broadcast debug events to multiple listeners.  Lets you debug and still
+ *  use the event mechanism to build parse trees etc...  Not thread-safe.
+ *  Don't add events in one thread while parser fires events in another.
+ * 
+ *  @see DebugEventRepeater
+ */
+public class DebugEventHub implements DebugEventListener {
+	protected List<DebugEventListener> listeners = new ArrayList<DebugEventListener>();
+
+	public DebugEventHub(DebugEventListener listener) {
+		listeners.add(listener);
+	}
+
+	public DebugEventHub(DebugEventListener a, DebugEventListener b) {
+		listeners.add(a);
+		listeners.add(b);
+	}
+
+	/** Add another listener to broadcast events too.  Not thread-safe.
+	 *  Don't add events in one thread while parser fires events in another.
+	 */
+	public void addListener(DebugEventListener listener) {
+		listeners.add(listener);
+	}
+	
+	/* To avoid a mess like this:
+		public void enterRule(final String ruleName) {
+			broadcast(new Code(){
+				public void exec(DebugEventListener listener) {listener.enterRule(ruleName);}}
+				);
+		}
+		I am dup'ing the for-loop in each.  Where are Java closures!? blech!
+	 */
+
+	@Override
+	public void enterRule(String grammarFileName, String ruleName) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.enterRule(grammarFileName,ruleName);
+		}
+	}
+
+	@Override
+	public void exitRule(String grammarFileName, String ruleName) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.exitRule(grammarFileName, ruleName);
+		}
+	}
+
+	@Override
+	public void enterAlt(int alt) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.enterAlt(alt);
+		}
+	}
+
+	@Override
+	public void enterSubRule(int decisionNumber) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.enterSubRule(decisionNumber);
+		}
+	}
+
+	@Override
+	public void exitSubRule(int decisionNumber) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.exitSubRule(decisionNumber);
+		}
+	}
+
+	@Override
+	public void enterDecision(int decisionNumber, boolean couldBacktrack) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.enterDecision(decisionNumber, couldBacktrack);
+		}
+	}
+
+	@Override
+	public void exitDecision(int decisionNumber) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.exitDecision(decisionNumber);
+		}
+	}
+
+	@Override
+	public void location(int line, int pos) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.location(line, pos);
+		}
+	}
+
+	@Override
+	public void consumeToken(Token token) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.consumeToken(token);
+		}
+	}
+
+	@Override
+	public void consumeHiddenToken(Token token) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.consumeHiddenToken(token);
+		}
+	}
+
+	@Override
+	public void LT(int index, Token t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.LT(index, t);
+		}
+	}
+
+	@Override
+	public void mark(int index) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.mark(index);
+		}
+	}
+
+	@Override
+	public void rewind(int index) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.rewind(index);
+		}
+	}
+
+	@Override
+	public void rewind() {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.rewind();
+		}
+	}
+
+	@Override
+	public void beginBacktrack(int level) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.beginBacktrack(level);
+		}
+	}
+
+	@Override
+	public void endBacktrack(int level, boolean successful) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.endBacktrack(level, successful);
+		}
+	}
+
+	@Override
+	public void recognitionException(RecognitionException e) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.recognitionException(e);
+		}
+	}
+
+	@Override
+	public void beginResync() {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.beginResync();
+		}
+	}
+
+	@Override
+	public void endResync() {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.endResync();
+		}
+	}
+
+	@Override
+	public void semanticPredicate(boolean result, String predicate) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.semanticPredicate(result, predicate);
+		}
+	}
+
+	@Override
+	public void commence() {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.commence();
+		}
+	}
+
+	@Override
+	public void terminate() {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.terminate();
+		}
+	}
+
+
+	// Tree parsing stuff
+
+	@Override
+	public void consumeNode(Object t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.consumeNode(t);
+		}
+	}
+
+	@Override
+	public void LT(int index, Object t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.LT(index, t);
+		}
+	}
+
+
+	// AST Stuff
+
+	@Override
+	public void nilNode(Object t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.nilNode(t);
+		}
+	}
+
+	@Override
+	public void errorNode(Object t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.errorNode(t);
+		}
+	}
+
+	@Override
+	public void createNode(Object t) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.createNode(t);
+		}
+	}
+
+	@Override
+	public void createNode(Object node, Token token) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.createNode(node, token);
+		}
+	}
+
+	@Override
+	public void becomeRoot(Object newRoot, Object oldRoot) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.becomeRoot(newRoot, oldRoot);
+		}
+	}
+
+	@Override
+	public void addChild(Object root, Object child) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.addChild(root, child);
+		}
+	}
+
+	@Override
+	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
+		for (int i = 0; i < listeners.size(); i++) {
+			DebugEventListener listener = listeners.get(i);
+			listener.setTokenBoundaries(t, tokenStartIndex, tokenStopIndex);
+		}
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventListener.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventListener.java
new file mode 100644
index 0000000..4b4a1c5
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventListener.java
@@ -0,0 +1,323 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+
+/** All debugging events that a recognizer can trigger.
+ *
+ *  I did not create a separate AST debugging interface as it would create
+ *  lots of extra classes and DebugParser has a dbg var defined, which makes
+ *  it hard to change to ASTDebugEventListener.  I looked hard at this issue
+ *  and it is easier to understand as one monolithic event interface for all
+ *  possible events.  Hopefully, adding ST debugging stuff won't be bad.  Leave
+ *  for future. 4/26/2006.
+ */
+public interface DebugEventListener {
+	/** Moved to version 2 for v3.1: added grammar name to enter/exit Rule */
+	public static final String PROTOCOL_VERSION = "2";
+	
+	/** serialized version of true */
+	public static final int TRUE = 1;
+	public static final int FALSE = 0;
+
+	/** The parser has just entered a rule.  No decision has been made about
+	 *  which alt is predicted.  This is fired AFTER init actions have been
+	 *  executed.  Attributes are defined and available etc...
+	 *  The grammarFileName allows composite grammars to jump around among
+	 *  multiple grammar files.
+	 */
+	public void enterRule(String grammarFileName, String ruleName);
+
+	/** Because rules can have lots of alternatives, it is very useful to
+	 *  know which alt you are entering.  This is 1..n for n alts.
+	 */
+	public void enterAlt(int alt);
+
+	/** This is the last thing executed before leaving a rule.  It is
+	 *  executed even if an exception is thrown.  This is triggered after
+	 *  error reporting and recovery have occurred (unless the exception is
+	 *  not caught in this rule).  This implies an "exitAlt" event.
+	 *  The grammarFileName allows composite grammars to jump around among
+	 *  multiple grammar files.
+	 */
+	public void exitRule(String grammarFileName, String ruleName);
+
+	/** Track entry into any (...) subrule other EBNF construct */
+	public void enterSubRule(int decisionNumber);
+
+	public void exitSubRule(int decisionNumber);
+
+	/** Every decision, fixed k or arbitrary, has an enter/exit event
+	 *  so that a GUI can easily track what LT/consume events are
+	 *  associated with prediction.  You will see a single enter/exit
+	 *  subrule but multiple enter/exit decision events, one for each
+	 *  loop iteration.
+	 */
+	public void enterDecision(int decisionNumber, boolean couldBacktrack);
+
+	public void exitDecision(int decisionNumber);
+
+	/** An input token was consumed; matched by any kind of element.
+	 *  Trigger after the token was matched by things like match(), matchAny().
+	 */
+	public void consumeToken(Token t);
+
+	/** An off-channel input token was consumed.
+	 *  Trigger after the token was matched by things like match(), matchAny().
+	 *  (unless of course the hidden token is first stuff in the input stream).
+	 */
+	public void consumeHiddenToken(Token t);
+
+	/** Somebody (anybody) looked ahead.  Note that this actually gets
+	 *  triggered by both LA and LT calls.  The debugger will want to know
+	 *  which Token object was examined.  Like consumeToken, this indicates
+	 *  what token was seen at that depth.  A remote debugger cannot look
+	 *  ahead into a file it doesn't have so LT events must pass the token
+	 *  even if the info is redundant.
+	 */
+	public void LT(int i, Token t);
+
+	/** The parser is going to look arbitrarily ahead; mark this location,
+	 *  the token stream's marker is sent in case you need it.
+	 */
+	public void mark(int marker);
+
+	/** After an arbitrairly long lookahead as with a cyclic DFA (or with
+	 *  any backtrack), this informs the debugger that stream should be
+	 *  rewound to the position associated with marker.
+	 */
+	public void rewind(int marker);
+
+	/** Rewind to the input position of the last marker.
+	 *  Used currently only after a cyclic DFA and just
+	 *  before starting a sem/syn predicate to get the
+	 *  input position back to the start of the decision.
+	 *  Do not "pop" the marker off the state.  mark(i)
+	 *  and rewind(i) should balance still.
+	 */
+	public void rewind();
+
+	public void beginBacktrack(int level);
+
+	public void endBacktrack(int level, boolean successful);
+
+	/** To watch a parser move through the grammar, the parser needs to
+	 *  inform the debugger what line/charPos it is passing in the grammar.
+	 *  For now, this does not know how to switch from one grammar to the
+	 *  other and back for island grammars etc...
+	 *
+	 *  This should also allow breakpoints because the debugger can stop
+	 *  the parser whenever it hits this line/pos.
+	 */
+	public void location(int line, int pos);
+
+	/** A recognition exception occurred such as NoViableAltException.  I made
+	 *  this a generic event so that I can alter the exception hierachy later
+	 *  without having to alter all the debug objects.
+	 *
+	 *  Upon error, the stack of enter rule/subrule must be properly unwound.
+	 *  If no viable alt occurs it is within an enter/exit decision, which
+	 *  also must be rewound.  Even the rewind for each mark must be unwount.
+	 *  In the Java target this is pretty easy using try/finally, if a bit
+	 *  ugly in the generated code.  The rewind is generated in DFA.predict()
+	 *  actually so no code needs to be generated for that.  For languages
+	 *  w/o this "finally" feature (C++?), the target implementor will have
+	 *  to build an event stack or something.
+	 *
+	 *  Across a socket for remote debugging, only the RecognitionException
+	 *  data fields are transmitted.  The token object or whatever that
+	 *  caused the problem was the last object referenced by LT.  The
+	 *  immediately preceding LT event should hold the unexpected Token or
+	 *  char.
+	 *
+	 *  Here is a sample event trace for grammar:
+	 *
+	 *  b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
+     *    | D
+     *    ;
+     *
+	 *  The sequence for this rule (with no viable alt in the subrule) for
+	 *  input 'c c' (there are 3 tokens) is:
+	 *
+	 *		commence
+	 *		LT(1)
+	 *		enterRule b
+	 *		location 7 1
+	 *		enter decision 3
+	 *		LT(1)
+	 *		exit decision 3
+	 *		enterAlt1
+	 *		location 7 5
+	 *		LT(1)
+	 *		consumeToken [c/&lt;4&gt;,1:0]
+	 *		location 7 7
+	 *		enterSubRule 2
+	 *		enter decision 2
+	 *		LT(1)
+	 *		LT(1)
+	 *		recognitionException NoViableAltException 2 1 2
+	 *		exit decision 2
+	 *		exitSubRule 2
+	 *		beginResync
+	 *		LT(1)
+	 *		consumeToken [c/&lt;4&gt;,1:1]
+	 *		LT(1)
+	 *		endResync
+	 *		LT(-1)
+	 *		exitRule b
+	 *		terminate
+	 */
+	public void recognitionException(RecognitionException e);
+
+	/** Indicates the recognizer is about to consume tokens to resynchronize
+	 *  the parser.  Any consume events from here until the recovered event
+	 *  are not part of the parse--they are dead tokens.
+	 */
+	public void beginResync();
+
+	/** Indicates that the recognizer has finished consuming tokens in order
+	 *  to resychronize.  There may be multiple beginResync/endResync pairs
+	 *  before the recognizer comes out of errorRecovery mode (in which
+	 *  multiple errors are suppressed).  This will be useful
+	 *  in a gui where you want to probably grey out tokens that are consumed
+	 *  but not matched to anything in grammar.  Anything between
+	 *  a beginResync/endResync pair was tossed out by the parser.
+	 */
+	public void endResync();
+
+	/** A semantic predicate was evaluate with this result and action text */
+	public void semanticPredicate(boolean result, String predicate);
+
+	/** Announce that parsing has begun.  Not technically useful except for
+	 *  sending events over a socket.  A GUI for example will launch a thread
+	 *  to connect and communicate with a remote parser.  The thread will want
+	 *  to notify the GUI when a connection is made.  ANTLR parsers
+	 *  trigger this upon entry to the first rule (the ruleLevel is used to
+	 *  figure this out).
+	 */
+	public void commence();
+
+	/** Parsing is over; successfully or not.  Mostly useful for telling
+	 *  remote debugging listeners that it's time to quit.  When the rule
+	 *  invocation level goes to zero at the end of a rule, we are done
+	 *  parsing.
+	 */
+	public void terminate();
+
+
+	// T r e e  P a r s i n g
+
+	/** Input for a tree parser is an AST, but we know nothing for sure
+	 *  about a node except its type and text (obtained from the adaptor).
+	 *  This is the analog of the consumeToken method.  Again, the ID is
+	 *  the hashCode usually of the node so it only works if hashCode is
+	 *  not implemented.  If the type is UP or DOWN, then
+	 *  the ID is not really meaningful as it's fixed--there is
+	 *  just one UP node and one DOWN navigation node.
+	 * @param t
+	 */
+	public void consumeNode(Object t);
+
+	/** The tree parser lookedahead.  If the type is UP or DOWN,
+	 *  then the ID is not really meaningful as it's fixed--there is
+	 *  just one UP node and one DOWN navigation node.
+	 */
+	public void LT(int i, Object t);
+
+
+	// A S T  E v e n t s
+
+	/** A nil was created (even nil nodes have a unique ID...
+	 *  they are not "null" per se).  As of 4/28/2006, this
+	 *  seems to be uniquely triggered when starting a new subtree
+	 *  such as when entering a subrule in automatic mode and when
+	 *  building a tree in rewrite mode.
+     *
+ 	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID is set.
+	 */
+	public void nilNode(Object t);
+
+	/** Upon syntax error, recognizers bracket the error with an error node
+	 *  if they are building ASTs.
+	 * @param t
+	 */
+	public void errorNode(Object t);
+
+	/** Announce a new node built from token elements such as type etc...
+	 * 
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID, type, text are
+	 *  set.
+	 */
+	public void createNode(Object t);
+
+	/** Announce a new node built from an existing token.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only node.ID and token.tokenIndex
+	 *  are set.
+	 */
+	public void createNode(Object node, Token token);
+
+	/** Make a node the new root of an existing root.  See
+	 *
+	 *  Note: the newRootID parameter is possibly different
+	 *  than the TreeAdaptor.becomeRoot() newRoot parameter.
+	 *  In our case, it will always be the result of calling
+	 *  TreeAdaptor.becomeRoot() and not root_n or whatever.
+	 *
+	 *  The listener should assume that this event occurs
+	 *  only when the current subrule (or rule) subtree is
+	 *  being reset to newRootID.
+	 * 
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only IDs are set.
+	 *
+	 *  @see org.antlr.runtime.tree.TreeAdaptor#becomeRoot
+	 */
+	public void becomeRoot(Object newRoot, Object oldRoot);
+
+	/** Make childID a child of rootID.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only IDs are set.
+	 * 
+	 *  @see org.antlr.runtime.tree.TreeAdaptor#addChild
+	 */
+	public void addChild(Object root, Object child);
+
+	/** Set the token start/stop token index for a subtree root or node.
+	 *
+	 *  If you are receiving this event over a socket via
+	 *  RemoteDebugEventSocketListener then only t.ID is set.
+	 */
+	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex);
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventRepeater.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventRepeater.java
new file mode 100644
index 0000000..92af59b
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventRepeater.java
@@ -0,0 +1,88 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.RecognitionException;
+
+/** A simple event repeater (proxy) that delegates all functionality to the
+ *  listener sent into the ctor.  Useful if you want to listen in on a few
+ *  debug events w/o interrupting the debugger.  Just subclass the repeater
+ *  and override the methods you want to listen in on.  Remember to call
+ *  the method in this class so the event will continue on to the original
+ *  recipient.
+ *
+ *  @see DebugEventHub
+ */
+public class DebugEventRepeater implements DebugEventListener {
+	protected DebugEventListener listener;
+
+	public DebugEventRepeater(DebugEventListener listener) {
+		this.listener = listener;
+	}
+	
+	@Override public void enterRule(String grammarFileName, String ruleName) { listener.enterRule(grammarFileName, ruleName); }
+	@Override public void exitRule(String grammarFileName, String ruleName) { listener.exitRule(grammarFileName, ruleName); }
+	@Override public void enterAlt(int alt) { listener.enterAlt(alt); }
+	@Override public void enterSubRule(int decisionNumber) { listener.enterSubRule(decisionNumber); }
+	@Override public void exitSubRule(int decisionNumber) { listener.exitSubRule(decisionNumber); }
+	@Override public void enterDecision(int decisionNumber, boolean couldBacktrack) { listener.enterDecision(decisionNumber, couldBacktrack); }
+	@Override public void exitDecision(int decisionNumber) { listener.exitDecision(decisionNumber); }
+	@Override public void location(int line, int pos) { listener.location(line, pos); }
+	@Override public void consumeToken(Token token) { listener.consumeToken(token); }
+	@Override public void consumeHiddenToken(Token token) { listener.consumeHiddenToken(token); }
+	@Override public void LT(int i, Token t) { listener.LT(i, t); }
+	@Override public void mark(int i) { listener.mark(i); }
+	@Override public void rewind(int i) { listener.rewind(i); }
+	@Override public void rewind() { listener.rewind(); }
+	@Override public void beginBacktrack(int level) { listener.beginBacktrack(level); }
+	@Override public void endBacktrack(int level, boolean successful) { listener.endBacktrack(level, successful); }
+	@Override public void recognitionException(RecognitionException e) { listener.recognitionException(e); }
+	@Override public void beginResync() { listener.beginResync(); }
+	@Override public void endResync() { listener.endResync(); }
+	@Override public void semanticPredicate(boolean result, String predicate) { listener.semanticPredicate(result, predicate); }
+	@Override public void commence() { listener.commence(); }
+	@Override public void terminate() { listener.terminate(); }
+
+	// Tree parsing stuff
+
+	@Override public void consumeNode(Object t) { listener.consumeNode(t); }
+	@Override public void LT(int i, Object t) { listener.LT(i, t); }
+
+	// AST Stuff
+
+	@Override public void nilNode(Object t) { listener.nilNode(t); }
+	@Override public void errorNode(Object t) { listener.errorNode(t); }
+	@Override public void createNode(Object t) { listener.createNode(t); }
+	@Override public void createNode(Object node, Token token) { listener.createNode(node, token); }
+	@Override public void becomeRoot(Object newRoot, Object oldRoot) { listener.becomeRoot(newRoot, oldRoot); }
+	@Override public void addChild(Object root, Object child) { listener.addChild(root, child); }
+	@Override public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
+		listener.setTokenBoundaries(t, tokenStartIndex, tokenStopIndex);
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventSocketProxy.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventSocketProxy.java
new file mode 100644
index 0000000..8590368
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugEventSocketProxy.java
@@ -0,0 +1,385 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.BaseRecognizer;
+import org.antlr.runtime.tree.TreeAdaptor;
+
+import java.io.*;
+import java.net.ServerSocket;
+import java.net.Socket;
+
+/** A proxy debug event listener that forwards events over a socket to
+ *  a debugger (or any other listener) using a simple text-based protocol;
+ *  one event per line.  ANTLRWorks listens on server socket with a
+ *  RemoteDebugEventSocketListener instance.  These two objects must therefore
+ *  be kept in sync.  New events must be handled on both sides of socket.
+ */
+public class DebugEventSocketProxy extends BlankDebugEventListener {
+	public static final int DEFAULT_DEBUGGER_PORT = 49100; // was 49153
+	protected int port = DEFAULT_DEBUGGER_PORT;
+	protected ServerSocket serverSocket;
+	protected Socket socket;
+	protected String grammarFileName;
+	protected PrintWriter out;
+	protected BufferedReader in;
+
+	/** Who am i debugging? */
+	protected BaseRecognizer recognizer;
+
+	/** Almost certainly the recognizer will have adaptor set, but
+	 *  we don't know how to cast it (Parser or TreeParser) to get
+	 *  the adaptor field.  Must be set with a constructor. :(
+	 */
+	protected TreeAdaptor adaptor;
+
+	public DebugEventSocketProxy(BaseRecognizer recognizer, TreeAdaptor adaptor) {
+		this(recognizer, DEFAULT_DEBUGGER_PORT, adaptor);
+	}
+
+	public DebugEventSocketProxy(BaseRecognizer recognizer, int port, TreeAdaptor adaptor) {
+		this.grammarFileName = recognizer.getGrammarFileName();
+		this.adaptor = adaptor;
+		this.port = port;
+	}
+
+	public void handshake() throws IOException {
+		if ( serverSocket==null ) {
+			serverSocket = new ServerSocket(port);
+			socket = serverSocket.accept();
+			socket.setTcpNoDelay(true);
+			OutputStream os = socket.getOutputStream();
+			OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
+			out = new PrintWriter(new BufferedWriter(osw));
+			InputStream is = socket.getInputStream();
+			InputStreamReader isr = new InputStreamReader(is, "UTF8");
+			in = new BufferedReader(isr);
+			out.println("ANTLR "+ DebugEventListener.PROTOCOL_VERSION);
+			out.println("grammar \""+ grammarFileName);
+			out.flush();
+			ack();
+		}
+	}
+
+	@Override
+	public void commence() {
+		// don't bother sending event; listener will trigger upon connection
+	}
+
+	@Override
+	public void terminate() {
+		transmit("terminate");
+		out.close();
+		try {
+			socket.close();
+		}
+		catch (IOException ioe) {
+			ioe.printStackTrace(System.err);
+		}
+	}
+
+	protected void ack() {
+		try {
+			in.readLine();
+		}
+		catch (IOException ioe) {
+			ioe.printStackTrace(System.err);
+		}
+	}
+
+	protected void transmit(String event) {
+		out.println(event);
+		out.flush();
+		ack();
+	}
+
+	@Override
+	public void enterRule(String grammarFileName, String ruleName) {
+		transmit("enterRule\t"+grammarFileName+"\t"+ruleName);
+	}
+
+	@Override
+	public void enterAlt(int alt) {
+		transmit("enterAlt\t"+alt);
+	}
+
+	@Override
+	public void exitRule(String grammarFileName, String ruleName) {
+		transmit("exitRule\t"+grammarFileName+"\t"+ruleName);
+	}
+
+	@Override
+	public void enterSubRule(int decisionNumber) {
+		transmit("enterSubRule\t"+decisionNumber);
+	}
+
+	@Override
+	public void exitSubRule(int decisionNumber) {
+		transmit("exitSubRule\t"+decisionNumber);
+	}
+
+	@Override
+	public void enterDecision(int decisionNumber, boolean couldBacktrack) {
+		transmit("enterDecision\t"+decisionNumber+"\t"+couldBacktrack);
+	}
+
+	@Override
+	public void exitDecision(int decisionNumber) {
+		transmit("exitDecision\t"+decisionNumber);
+	}
+
+	@Override
+	public void consumeToken(Token t) {
+		String buf = serializeToken(t);
+		transmit("consumeToken\t"+buf);
+	}
+
+	@Override
+	public void consumeHiddenToken(Token t) {
+		String buf = serializeToken(t);
+		transmit("consumeHiddenToken\t"+buf);
+	}
+
+	@Override
+	public void LT(int i, Token t) {
+        if(t != null)
+            transmit("LT\t"+i+"\t"+serializeToken(t));
+	}
+
+	@Override
+	public void mark(int i) {
+		transmit("mark\t"+i);
+	}
+
+	@Override
+	public void rewind(int i) {
+		transmit("rewind\t"+i);
+	}
+
+	@Override
+	public void rewind() {
+		transmit("rewind");
+	}
+
+	@Override
+	public void beginBacktrack(int level) {
+		transmit("beginBacktrack\t"+level);
+	}
+
+	@Override
+	public void endBacktrack(int level, boolean successful) {
+		transmit("endBacktrack\t"+level+"\t"+(successful?TRUE:FALSE));
+	}
+
+	@Override
+	public void location(int line, int pos) {
+		transmit("location\t"+line+"\t"+pos);
+	}
+
+	@Override
+	public void recognitionException(RecognitionException e) {
+		StringBuilder buf = new StringBuilder(50);
+		buf.append("exception\t");
+		buf.append(e.getClass().getName());
+		// dump only the data common to all exceptions for now
+		buf.append("\t");
+		buf.append(e.index);
+		buf.append("\t");
+		buf.append(e.line);
+		buf.append("\t");
+		buf.append(e.charPositionInLine);
+		transmit(buf.toString());
+	}
+
+	@Override
+	public void beginResync() {
+		transmit("beginResync");
+	}
+
+	@Override
+	public void endResync() {
+		transmit("endResync");
+	}
+
+	@Override
+	public void semanticPredicate(boolean result, String predicate) {
+		StringBuffer buf = new StringBuffer(50);
+		buf.append("semanticPredicate\t");
+		buf.append(result);
+		serializeText(buf, predicate);
+		transmit(buf.toString());
+	}
+
+	// A S T  P a r s i n g  E v e n t s
+
+	@Override
+	public void consumeNode(Object t) {
+		StringBuffer buf = new StringBuffer(50);
+		buf.append("consumeNode");
+		serializeNode(buf, t);
+		transmit(buf.toString());
+	}
+
+	@Override
+	public void LT(int i, Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		StringBuffer buf = new StringBuffer(50);
+		buf.append("LN\t"); // lookahead node; distinguish from LT in protocol
+		buf.append(i);
+		serializeNode(buf, t);
+		transmit(buf.toString());
+	}
+
+	protected void serializeNode(StringBuffer buf, Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		buf.append("\t");
+		buf.append(ID);
+		buf.append("\t");
+		buf.append(type);
+		Token token = adaptor.getToken(t);
+		int line = -1;
+		int pos = -1;
+		if ( token!=null ) {
+			line = token.getLine();
+			pos = token.getCharPositionInLine();
+		}
+		buf.append("\t");
+		buf.append(line);
+		buf.append("\t");
+		buf.append(pos);
+		int tokenIndex = adaptor.getTokenStartIndex(t);
+		buf.append("\t");
+		buf.append(tokenIndex);
+		serializeText(buf, text);
+	}
+
+
+	// A S T  E v e n t s
+
+	@Override
+	public void nilNode(Object t) {
+		int ID = adaptor.getUniqueID(t);
+		transmit("nilNode\t"+ID);
+	}
+
+	@Override
+	public void errorNode(Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = t.toString();
+		StringBuffer buf = new StringBuffer(50);
+		buf.append("errorNode\t");
+		buf.append(ID);
+		buf.append("\t");
+		buf.append(Token.INVALID_TOKEN_TYPE);
+		serializeText(buf, text);
+		transmit(buf.toString());
+	}
+
+	@Override
+	public void createNode(Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		StringBuffer buf = new StringBuffer(50);
+		buf.append("createNodeFromTokenElements\t");
+		buf.append(ID);
+		buf.append("\t");
+		buf.append(type);
+		serializeText(buf, text);
+		transmit(buf.toString());
+	}
+
+	@Override
+	public void createNode(Object node, Token token) {
+		int ID = adaptor.getUniqueID(node);
+		int tokenIndex = token.getTokenIndex();
+		transmit("createNode\t"+ID+"\t"+tokenIndex);
+	}
+
+	@Override
+	public void becomeRoot(Object newRoot, Object oldRoot) {
+		int newRootID = adaptor.getUniqueID(newRoot);
+		int oldRootID = adaptor.getUniqueID(oldRoot);
+		transmit("becomeRoot\t"+newRootID+"\t"+oldRootID);
+	}
+
+	@Override
+	public void addChild(Object root, Object child) {
+		int rootID = adaptor.getUniqueID(root);
+		int childID = adaptor.getUniqueID(child);
+		transmit("addChild\t"+rootID+"\t"+childID);
+	}
+
+	@Override
+	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
+		int ID = adaptor.getUniqueID(t);
+		transmit("setTokenBoundaries\t"+ID+"\t"+tokenStartIndex+"\t"+tokenStopIndex);
+	}
+
+
+    // support
+
+    public void setTreeAdaptor(TreeAdaptor adaptor) { this.adaptor = adaptor; }
+    public TreeAdaptor getTreeAdaptor() { return adaptor; }
+
+    protected String serializeToken(Token t) {
+        StringBuffer buf = new StringBuffer(50);
+        buf.append(t.getTokenIndex()); buf.append('\t');
+        buf.append(t.getType()); buf.append('\t');
+        buf.append(t.getChannel()); buf.append('\t');
+        buf.append(t.getLine()); buf.append('\t');
+		buf.append(t.getCharPositionInLine());
+		serializeText(buf, t.getText());
+		return buf.toString();
+	}
+
+	protected void serializeText(StringBuffer buf, String text) {
+		buf.append("\t\"");
+		if ( text==null ) {
+			text = "";
+		}
+		// escape \n and \r all text for token appears to exist on one line
+		// this escape is slow but easy to understand
+		text = escapeNewlines(text);
+		buf.append(text);
+	}
+
+	protected String escapeNewlines(String txt) {
+		txt = txt.replaceAll("%","%25");   // escape all escape char ;)
+		txt = txt.replaceAll("\n","%0A");  // escape \n
+		txt = txt.replaceAll("\r","%0D");  // escape \r
+		return txt;
+	}
+}
+
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugParser.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugParser.java
new file mode 100644
index 0000000..042609f
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugParser.java
@@ -0,0 +1,101 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.*;
+
+import java.io.IOException;
+
+public class DebugParser extends Parser {
+	/** Who to notify when events in the parser occur. */
+	protected DebugEventListener dbg = null;
+
+	/** Used to differentiate between fixed lookahead and cyclic DFA decisions
+	 *  while profiling.
+ 	 */
+	public boolean isCyclicDecision = false;
+
+	/** Create a normal parser except wrap the token stream in a debug
+	 *  proxy that fires consume events.
+	 */
+	public DebugParser(TokenStream input, DebugEventListener dbg, RecognizerSharedState state) {
+		super(input instanceof DebugTokenStream?input:new DebugTokenStream(input,dbg), state);
+		setDebugListener(dbg);
+	}
+
+	public DebugParser(TokenStream input, RecognizerSharedState state) {
+		super(input instanceof DebugTokenStream?input:new DebugTokenStream(input,null), state);
+	}
+
+	public DebugParser(TokenStream input, DebugEventListener dbg) {
+		this(input instanceof DebugTokenStream?input:new DebugTokenStream(input,dbg), dbg, null);
+	}
+
+	/** Provide a new debug event listener for this parser.  Notify the
+	 *  input stream too that it should send events to this listener.
+	 */
+	public void setDebugListener(DebugEventListener dbg) {
+		if ( input instanceof DebugTokenStream ) {
+			((DebugTokenStream)input).setDebugListener(dbg);
+		}
+		this.dbg = dbg;
+	}
+
+	public DebugEventListener getDebugListener() {
+		return dbg;
+	}
+
+	public void reportError(IOException e) {
+		System.err.println(e);
+		e.printStackTrace(System.err);
+	}
+
+	@Override
+	public void beginResync() {
+		dbg.beginResync();
+	}
+
+	@Override
+	public void endResync() {
+		dbg.endResync();
+	}
+
+	public void beginBacktrack(int level) {
+		dbg.beginBacktrack(level);
+	}
+
+	public void endBacktrack(int level, boolean successful) {
+		dbg.endBacktrack(level,successful);		
+	}
+
+	@Override
+	public void reportError(RecognitionException e) {
+		super.reportError(e);
+		dbg.recognitionException(e);
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTokenStream.java
new file mode 100644
index 0000000..8fec032
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTokenStream.java
@@ -0,0 +1,171 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.*;
+
+public class DebugTokenStream implements TokenStream {
+	protected DebugEventListener dbg;
+	public TokenStream input;
+	protected boolean initialStreamState = true;
+
+	/** Track the last mark() call result value for use in rewind(). */
+	protected int lastMarker;
+
+	public DebugTokenStream(TokenStream input, DebugEventListener dbg) {
+		this.input = input;
+		setDebugListener(dbg);
+		// force TokenStream to get at least first valid token
+		// so we know if there are any hidden tokens first in the stream
+		input.LT(1);
+	}
+
+	public void setDebugListener(DebugEventListener dbg) {
+		this.dbg = dbg;
+	}
+
+	@Override
+	public void consume() {
+		if ( initialStreamState ) {
+			consumeInitialHiddenTokens();
+		}
+		int a = input.index();
+		Token t = input.LT(1);
+		input.consume();
+		int b = input.index();
+		dbg.consumeToken(t);
+		if ( b>a+1 ) {
+			// then we consumed more than one token; must be off channel tokens
+			for (int i=a+1; i<b; i++) {
+				dbg.consumeHiddenToken(input.get(i));
+			}
+		}
+	}
+
+	/* consume all initial off-channel tokens */
+	protected void consumeInitialHiddenTokens() {
+		int firstOnChannelTokenIndex = input.index();
+		for (int i=0; i<firstOnChannelTokenIndex; i++) {
+			dbg.consumeHiddenToken(input.get(i));
+		}
+		initialStreamState = false;
+	}
+
+	@Override
+	public Token LT(int i) {
+		if ( initialStreamState ) {
+			consumeInitialHiddenTokens();
+		}
+		dbg.LT(i, input.LT(i));
+		return input.LT(i);
+	}
+
+	@Override
+	public int LA(int i) {
+		if ( initialStreamState ) {
+			consumeInitialHiddenTokens();
+		}
+		dbg.LT(i, input.LT(i));
+		return input.LA(i);
+	}
+
+	@Override
+	public Token get(int i) {
+		return input.get(i);
+	}
+
+	@Override
+	public int mark() {
+		lastMarker = input.mark();
+		dbg.mark(lastMarker);
+		return lastMarker;
+	}
+
+	@Override
+	public int index() {
+		return input.index();
+	}
+
+	@Override
+	public int range() {
+		return input.range();
+	}
+
+	@Override
+	public void rewind(int marker) {
+		dbg.rewind(marker);
+		input.rewind(marker);
+	}
+
+	@Override
+	public void rewind() {
+		dbg.rewind();
+		input.rewind(lastMarker);
+	}
+
+	@Override
+	public void release(int marker) {
+	}
+
+	@Override
+	public void seek(int index) {
+		// TODO: implement seek in dbg interface
+		// db.seek(index);
+		input.seek(index);
+	}
+
+	@Override
+	public int size() {
+		return input.size();
+	}
+
+	@Override
+	public TokenSource getTokenSource() {
+		return input.getTokenSource();
+	}
+
+	@Override
+	public String getSourceName() {
+		return getTokenSource().getSourceName();
+	}
+
+	@Override
+	public String toString() {
+		return input.toString();
+	}
+
+	@Override
+	public String toString(int start, int stop) {
+		return input.toString(start,stop);
+	}
+
+	@Override
+	public String toString(Token start, Token stop) {
+		return input.toString(start,stop);
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java
new file mode 100644
index 0000000..bc1be66
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeAdaptor.java
@@ -0,0 +1,281 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenStream;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.tree.TreeAdaptor;
+
+/** A TreeAdaptor proxy that fires debugging events to a DebugEventListener
+ *  delegate and uses the TreeAdaptor delegate to do the actual work.  All
+ *  AST events are triggered by this adaptor; no code gen changes are needed
+ *  in generated rules.  Debugging events are triggered *after* invoking
+ *  tree adaptor routines.
+ *
+ *  Trees created with actions in rewrite actions like "-&gt; ^(ADD {foo} {bar})"
+ *  cannot be tracked as they might not use the adaptor to create foo, bar.
+ *  The debug listener has to deal with tree node IDs for which it did
+ *  not see a createNode event.  A single &lt;unknown&gt; node is sufficient even
+ *  if it represents a whole tree.
+ */
+public class DebugTreeAdaptor implements TreeAdaptor {
+	protected DebugEventListener dbg;
+	protected TreeAdaptor adaptor;
+
+	public DebugTreeAdaptor(DebugEventListener dbg, TreeAdaptor adaptor) {
+		this.dbg = dbg;
+		this.adaptor = adaptor;
+	}
+
+	@Override
+	public Object create(Token payload) {
+		if ( payload.getTokenIndex() < 0 ) {
+			// could be token conjured up during error recovery
+			return create(payload.getType(), payload.getText());
+		}
+		Object node = adaptor.create(payload);
+		dbg.createNode(node, payload);
+		return node;
+	}
+
+	@Override
+	public Object errorNode(TokenStream input, Token start, Token stop,
+							RecognitionException e)
+	{
+		Object node = adaptor.errorNode(input, start, stop, e);
+		if ( node!=null ) {
+			dbg.errorNode(node);
+		}
+		return node;
+	}
+
+	@Override
+	public Object dupTree(Object tree) {
+		Object t = adaptor.dupTree(tree);
+		// walk the tree and emit create and add child events
+		// to simulate what dupTree has done. dupTree does not call this debug
+		// adapter so I must simulate.
+		simulateTreeConstruction(t);
+		return t;
+	}
+
+	/** ^(A B C): emit create A, create B, add child, ...*/
+	protected void simulateTreeConstruction(Object t) {
+		dbg.createNode(t);
+		int n = adaptor.getChildCount(t);
+		for (int i=0; i<n; i++) {
+			Object child = adaptor.getChild(t, i);
+			simulateTreeConstruction(child);
+			dbg.addChild(t, child);
+		}
+	}
+
+	@Override
+	public Object dupNode(Object treeNode) {
+		Object d = adaptor.dupNode(treeNode);
+		dbg.createNode(d);
+		return d;
+	}
+
+	@Override
+	public Object nil() {
+		Object node = adaptor.nil();
+		dbg.nilNode(node);
+		return node;
+	}
+
+	@Override
+	public boolean isNil(Object tree) {
+		return adaptor.isNil(tree);
+	}
+
+	@Override
+	public void addChild(Object t, Object child) {
+		if ( t==null || child==null ) {
+			return;
+		}
+		adaptor.addChild(t,child);
+		dbg.addChild(t, child);
+	}
+
+	@Override
+	public Object becomeRoot(Object newRoot, Object oldRoot) {
+		Object n = adaptor.becomeRoot(newRoot, oldRoot);
+		dbg.becomeRoot(newRoot, oldRoot);
+		return n;
+	}
+
+	@Override
+	public Object rulePostProcessing(Object root) {
+		return adaptor.rulePostProcessing(root);
+	}
+
+	public void addChild(Object t, Token child) {
+		Object n = this.create(child);
+		this.addChild(t, n);
+	}
+
+	@Override
+	public Object becomeRoot(Token newRoot, Object oldRoot) {
+		Object n = this.create(newRoot);
+		adaptor.becomeRoot(n, oldRoot);
+		dbg.becomeRoot(newRoot, oldRoot);
+		return n;
+	}
+
+	@Override
+	public Object create(int tokenType, Token fromToken) {
+		Object node = adaptor.create(tokenType, fromToken);
+		dbg.createNode(node);
+		return node;
+	}
+
+	@Override
+	public Object create(int tokenType, Token fromToken, String text) {
+		Object node = adaptor.create(tokenType, fromToken, text);
+		dbg.createNode(node);
+		return node;
+	}
+
+	@Override
+	public Object create(int tokenType, String text) {
+		Object node = adaptor.create(tokenType, text);
+		dbg.createNode(node);
+		return node;
+	}
+
+	@Override
+	public int getType(Object t) {
+		return adaptor.getType(t);
+	}
+
+	@Override
+	public void setType(Object t, int type) {
+		adaptor.setType(t, type);
+	}
+
+	@Override
+	public String getText(Object t) {
+		return adaptor.getText(t);
+	}
+
+	@Override
+	public void setText(Object t, String text) {
+		adaptor.setText(t, text);
+	}
+
+	@Override
+	public Token getToken(Object t) {
+		return adaptor.getToken(t);
+	}
+
+	@Override
+	public void setTokenBoundaries(Object t, Token startToken, Token stopToken) {
+		adaptor.setTokenBoundaries(t, startToken, stopToken);
+		if ( t!=null && startToken!=null && stopToken!=null ) {
+			dbg.setTokenBoundaries(
+				t, startToken.getTokenIndex(),
+				stopToken.getTokenIndex());
+		}
+	}
+
+	@Override
+	public int getTokenStartIndex(Object t) {
+		return adaptor.getTokenStartIndex(t);
+	}
+
+	@Override
+	public int getTokenStopIndex(Object t) {
+		return adaptor.getTokenStopIndex(t);
+	}
+
+	@Override
+	public Object getChild(Object t, int i) {
+		return adaptor.getChild(t, i);
+	}
+
+	@Override
+	public void setChild(Object t, int i, Object child) {
+		adaptor.setChild(t, i, child);
+	}
+
+	@Override
+	public Object deleteChild(Object t, int i) {
+		return adaptor.deleteChild(t, i);
+	}
+
+	@Override
+	public int getChildCount(Object t) {
+		return adaptor.getChildCount(t);
+	}
+
+	@Override
+	public int getUniqueID(Object node) {
+		return adaptor.getUniqueID(node);
+	}
+
+	@Override
+	public Object getParent(Object t) {
+		return adaptor.getParent(t);
+	}
+
+	@Override
+	public int getChildIndex(Object t) {
+		return adaptor.getChildIndex(t);
+	}
+
+	@Override
+	public void setParent(Object t, Object parent) {
+		adaptor.setParent(t, parent);
+	}
+
+	@Override
+	public void setChildIndex(Object t, int index) {
+		adaptor.setChildIndex(t, index);
+	}
+
+	@Override
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
+		adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t);
+	}
+
+	// support
+
+	public DebugEventListener getDebugListener() {
+		return dbg;
+	}
+
+	public void setDebugListener(DebugEventListener dbg) {
+		this.dbg = dbg;
+	}
+
+	public TreeAdaptor getTreeAdaptor() {
+		return adaptor;
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeNodeStream.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeNodeStream.java
new file mode 100644
index 0000000..6b5f976
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeNodeStream.java
@@ -0,0 +1,174 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.tree.TreeAdaptor;
+import org.antlr.runtime.tree.TreeNodeStream;
+import org.antlr.runtime.TokenStream;
+
+/** Debug any tree node stream.  The constructor accepts the stream
+ *  and a debug listener.  As node stream calls come in, debug events
+ *  are triggered.
+ */
+public class DebugTreeNodeStream implements TreeNodeStream {
+	protected DebugEventListener dbg;
+	protected TreeAdaptor adaptor;
+	protected TreeNodeStream input;
+	protected boolean initialStreamState = true;
+
+	/** Track the last mark() call result value for use in rewind(). */
+	protected int lastMarker;
+
+	public DebugTreeNodeStream(TreeNodeStream input,
+							   DebugEventListener dbg)
+	{
+		this.input = input;
+		this.adaptor = input.getTreeAdaptor();
+		this.input.setUniqueNavigationNodes(true);
+		setDebugListener(dbg);
+	}
+
+	public void setDebugListener(DebugEventListener dbg) {
+		this.dbg = dbg;
+	}
+
+	@Override
+	public TreeAdaptor getTreeAdaptor() {
+		return adaptor;
+	}
+
+	@Override
+	public void consume() {
+		Object node = input.LT(1);
+		input.consume();
+		dbg.consumeNode(node);
+	}
+
+	@Override
+	public Object get(int i) {
+		return input.get(i);
+	}
+
+	@Override
+	public Object LT(int i) {
+		Object node = input.LT(i);
+		int ID = adaptor.getUniqueID(node);
+		String text = adaptor.getText(node);
+		int type = adaptor.getType(node);
+		dbg.LT(i, node);
+		return node;
+	}
+
+	@Override
+	public int LA(int i) {
+		Object node = input.LT(i);
+		int ID = adaptor.getUniqueID(node);
+		String text = adaptor.getText(node);
+		int type = adaptor.getType(node);
+		dbg.LT(i, node);
+		return type;
+	}
+
+	@Override
+	public int mark() {
+		lastMarker = input.mark();
+		dbg.mark(lastMarker);
+		return lastMarker;
+	}
+
+	@Override
+	public int index() {
+		return input.index();
+	}
+
+	@Override
+	public void rewind(int marker) {
+		dbg.rewind(marker);
+		input.rewind(marker);
+	}
+
+	@Override
+	public void rewind() {
+		dbg.rewind();
+		input.rewind(lastMarker);
+	}
+
+	@Override
+	public void release(int marker) {
+	}
+
+	@Override
+	public void seek(int index) {
+		// TODO: implement seek in dbg interface
+		// db.seek(index);
+		input.seek(index);
+	}
+
+	@Override
+	public int size() {
+		return input.size();
+	}
+
+	@Override
+    public void reset() { ; }
+
+	@Override
+    public Object getTreeSource() {
+		return input;
+	}
+
+	@Override
+	public String getSourceName() {
+		return getTokenStream().getSourceName();
+	}
+
+	@Override
+	public TokenStream getTokenStream() {
+		return input.getTokenStream();
+	}
+
+	/** It is normally this object that instructs the node stream to
+	 *  create unique nav nodes, but to satisfy interface, we have to
+	 *  define it.  It might be better to ignore the parameter but
+	 *  there might be a use for it later, so I'll leave.
+	 */
+	@Override
+	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes) {
+		input.setUniqueNavigationNodes(uniqueNavigationNodes);
+	}
+
+	@Override
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
+		input.replaceChildren(parent, startChildIndex, stopChildIndex, t);
+	}
+
+	@Override
+	public String toString(Object start, Object stop) {
+		return input.toString(start,stop);
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeParser.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeParser.java
new file mode 100644
index 0000000..f4445ac
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/DebugTreeParser.java
@@ -0,0 +1,113 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.*;
+import org.antlr.runtime.tree.TreeNodeStream;
+import org.antlr.runtime.tree.TreeParser;
+
+import java.io.IOException;
+
+public class DebugTreeParser extends TreeParser {
+	/** Who to notify when events in the parser occur. */
+	protected DebugEventListener dbg = null;
+
+	/** Used to differentiate between fixed lookahead and cyclic DFA decisions
+	 *  while profiling.
+ 	 */
+	public boolean isCyclicDecision = false;
+
+	/** Create a normal parser except wrap the token stream in a debug
+	 *  proxy that fires consume events.
+	 */
+	public DebugTreeParser(TreeNodeStream input, DebugEventListener dbg, RecognizerSharedState state) {
+		super(input instanceof DebugTreeNodeStream?input:new DebugTreeNodeStream(input,dbg), state);
+		setDebugListener(dbg);
+	}
+
+	public DebugTreeParser(TreeNodeStream input, RecognizerSharedState state) {
+		super(input instanceof DebugTreeNodeStream?input:new DebugTreeNodeStream(input,null), state);
+	}
+
+	public DebugTreeParser(TreeNodeStream input, DebugEventListener dbg) {
+		this(input instanceof DebugTreeNodeStream?input:new DebugTreeNodeStream(input,dbg), dbg, null);
+	}
+
+	/** Provide a new debug event listener for this parser.  Notify the
+	 *  input stream too that it should send events to this listener.
+	 */
+	public void setDebugListener(DebugEventListener dbg) {
+		if ( input instanceof DebugTreeNodeStream ) {
+			((DebugTreeNodeStream)input).setDebugListener(dbg);
+		}
+		this.dbg = dbg;
+	}
+
+	public DebugEventListener getDebugListener() {
+		return dbg;
+	}
+
+	public void reportError(IOException e) {
+		System.err.println(e);
+		e.printStackTrace(System.err);
+	}
+
+	@Override
+	public void reportError(RecognitionException e) {
+		dbg.recognitionException(e);
+	}
+
+	@Override
+	protected Object getMissingSymbol(IntStream input,
+									  RecognitionException e,
+									  int expectedTokenType,
+									  BitSet follow)
+	{
+		Object o = super.getMissingSymbol(input, e, expectedTokenType, follow);
+		dbg.consumeNode(o);
+		return o;
+	}
+
+	@Override
+	public void beginResync() {
+		dbg.beginResync();
+	}
+
+	@Override
+	public void endResync() {
+		dbg.endResync();
+	}
+
+	public void beginBacktrack(int level) {
+		dbg.beginBacktrack(level);
+	}
+
+	public void endBacktrack(int level, boolean successful) {
+		dbg.endBacktrack(level,successful);		
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/ParseTreeBuilder.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/ParseTreeBuilder.java
new file mode 100644
index 0000000..ac62cad
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/ParseTreeBuilder.java
@@ -0,0 +1,116 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.ParseTree;
+
+import java.util.Stack;
+import java.util.ArrayList;
+import java.util.List;
+
+/** This parser listener tracks rule entry/exit and token matches
+ *  to build a simple parse tree using ParseTree nodes.
+ */
+public class ParseTreeBuilder extends BlankDebugEventListener {
+	public static final String EPSILON_PAYLOAD = "<epsilon>";
+	
+	Stack<ParseTree> callStack = new Stack<ParseTree>();
+	List<Token> hiddenTokens = new ArrayList<Token>();
+	int backtracking = 0;
+
+	public ParseTreeBuilder(String grammarName) {
+		ParseTree root = create("<grammar "+grammarName+">");
+		callStack.push(root);
+	}
+
+	public ParseTree getTree() {
+		return callStack.elementAt(0);
+	}
+
+	/**  What kind of node to create.  You might want to override
+	 *   so I factored out creation here.
+	 */
+	public ParseTree create(Object payload) {
+		return new ParseTree(payload);
+	}
+
+	public ParseTree epsilonNode() {
+		return create(EPSILON_PAYLOAD);
+	}
+
+	/** Backtracking or cyclic DFA, don't want to add nodes to tree */
+	@Override
+	public void enterDecision(int d, boolean couldBacktrack) { backtracking++; }
+	@Override
+	public void exitDecision(int i) { backtracking--; }
+
+	@Override
+	public void enterRule(String filename, String ruleName) {
+		if ( backtracking>0 ) return;
+		ParseTree parentRuleNode = callStack.peek();
+		ParseTree ruleNode = create(ruleName);
+		parentRuleNode.addChild(ruleNode);
+		callStack.push(ruleNode);
+	}
+
+	@Override
+	public void exitRule(String filename, String ruleName) {
+		if ( backtracking>0 ) return;
+		ParseTree ruleNode = callStack.peek();
+		if ( ruleNode.getChildCount()==0 ) {
+			ruleNode.addChild(epsilonNode());
+		}
+		callStack.pop();		
+	}
+
+	@Override
+	public void consumeToken(Token token) {
+		if ( backtracking>0 ) return;
+		ParseTree ruleNode = callStack.peek();
+		ParseTree elementNode = create(token);
+		elementNode.hiddenTokens = this.hiddenTokens;
+		this.hiddenTokens = new ArrayList<Token>();
+		ruleNode.addChild(elementNode);
+	}
+
+	@Override
+	public void consumeHiddenToken(Token token) {
+		if ( backtracking>0 ) return;
+		hiddenTokens.add(token);
+	}
+
+	@Override
+	public void recognitionException(RecognitionException e) {
+		if ( backtracking>0 ) return;
+		ParseTree ruleNode = callStack.peek();
+		ParseTree errorNode = create(e);
+		ruleNode.addChild(errorNode);
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/Profiler.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/Profiler.java
new file mode 100644
index 0000000..d76e28f
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/Profiler.java
@@ -0,0 +1,747 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.*;
+import org.antlr.runtime.misc.DoubleKeyMap;
+
+import java.util.*;
+
+/** Using the debug event interface, track what is happening in the parser
+ *  and record statistics about the runtime.
+ */
+public class Profiler extends BlankDebugEventListener {
+	public static final String DATA_SEP = "\t";
+	public static final String newline = System.getProperty("line.separator");
+
+	static boolean dump = false;
+
+	public static class ProfileStats {
+		public String Version;
+		public String name;
+		public int numRuleInvocations;
+		public int numUniqueRulesInvoked;
+		public int numDecisionEvents;
+		public int numDecisionsCovered;
+		public int numDecisionsThatPotentiallyBacktrack;
+		public int numDecisionsThatDoBacktrack;
+		public int maxRuleInvocationDepth;
+		public float avgkPerDecisionEvent;
+		public float avgkPerBacktrackingDecisionEvent;
+		public float averageDecisionPercentBacktracks;
+		public int numBacktrackOccurrences; // doesn't count gated DFA edges
+
+		public int numFixedDecisions;
+		public int minDecisionMaxFixedLookaheads;
+		public int maxDecisionMaxFixedLookaheads;
+		public int avgDecisionMaxFixedLookaheads;
+		public int stddevDecisionMaxFixedLookaheads;
+		public int numCyclicDecisions;
+		public int minDecisionMaxCyclicLookaheads;
+		public int maxDecisionMaxCyclicLookaheads;
+		public int avgDecisionMaxCyclicLookaheads;
+		public int stddevDecisionMaxCyclicLookaheads;
+//		int Stats.min(toArray(decisionMaxSynPredLookaheads);
+//		int Stats.max(toArray(decisionMaxSynPredLookaheads);
+//		int Stats.avg(toArray(decisionMaxSynPredLookaheads);
+//		int Stats.stddev(toArray(decisionMaxSynPredLookaheads);
+		public int numSemanticPredicates;
+		public int numTokens;
+		public int numHiddenTokens;
+		public int numCharsMatched;
+		public int numHiddenCharsMatched;
+		public int numReportedErrors;
+		public int numMemoizationCacheHits;
+		public int numMemoizationCacheMisses;
+		public int numGuessingRuleInvocations;
+		public int numMemoizationCacheEntries;
+	}
+
+	public static class DecisionDescriptor {
+		public int decision;
+		public String fileName;
+		public String ruleName;
+		public int line;
+		public int pos;
+		public boolean couldBacktrack;
+
+		public int n;
+		public float avgk; // avg across all decision events
+		public int maxk;
+		public int numBacktrackOccurrences;
+		public int numSemPredEvals;
+	}
+
+	// all about a specific exec of a single decision
+	public static class DecisionEvent {
+		public DecisionDescriptor decision;
+		public int startIndex;
+		public int k;
+		public boolean backtracks; // doesn't count gated DFA edges
+		public boolean evalSemPred;
+		public long startTime;
+		public long stopTime;
+		public int numMemoizationCacheHits;
+		public int numMemoizationCacheMisses;
+	}
+
+	/** Because I may change the stats, I need to track that for later
+	 *  computations to be consistent.
+	 */
+	public static final String Version = "3";
+	public static final String RUNTIME_STATS_FILENAME = "runtime.stats";
+
+	/** Ack, should not store parser; can't do remote stuff.  Well, we pass
+	 *  input stream around too so I guess it's ok.
+	 */
+	public DebugParser parser = null;
+
+	// working variables
+
+	protected int ruleLevel = 0;
+	//protected int decisionLevel = 0;
+	protected Token lastRealTokenTouchedInDecision;
+	protected Set<String> uniqueRules = new HashSet<String>();
+	protected Stack<String> currentGrammarFileName = new Stack<String>();
+	protected Stack<String> currentRuleName = new Stack<String>();
+	protected Stack<Integer> currentLine = new Stack<Integer>();
+	protected Stack<Integer> currentPos = new Stack<Integer>();
+
+	// Vector<DecisionStats>
+	//protected Vector decisions = new Vector(200); // need setSize
+	protected DoubleKeyMap<String,Integer, DecisionDescriptor> decisions =
+		new DoubleKeyMap<String,Integer, DecisionDescriptor>();
+
+	// Record a DecisionData for each decision we hit while parsing
+	protected List<DecisionEvent> decisionEvents = new ArrayList<DecisionEvent>();
+	protected Stack<DecisionEvent> decisionStack = new Stack<DecisionEvent>();
+
+	protected int backtrackDepth;
+	
+	ProfileStats stats = new ProfileStats();
+
+	public Profiler() {
+	}
+
+	public Profiler(DebugParser parser) {
+		this.parser = parser;
+	}
+
+	@Override
+	public void enterRule(String grammarFileName, String ruleName) {
+//		System.out.println("enterRule "+grammarFileName+":"+ruleName);
+		ruleLevel++;
+		stats.numRuleInvocations++;
+		uniqueRules.add(grammarFileName+":"+ruleName);
+		stats.maxRuleInvocationDepth = Math.max(stats.maxRuleInvocationDepth, ruleLevel);
+		currentGrammarFileName.push( grammarFileName );
+		currentRuleName.push( ruleName );
+	}
+
+	@Override
+	public void exitRule(String grammarFileName, String ruleName) {
+		ruleLevel--;
+		currentGrammarFileName.pop();
+		currentRuleName.pop();
+	}
+
+	/** Track memoization; this is not part of standard debug interface
+	 *  but is triggered by profiling.  Code gen inserts an override
+	 *  for this method in the recognizer, which triggers this method.
+	 *  Called from alreadyParsedRule().
+	 */
+	public void examineRuleMemoization(IntStream input,
+									   int ruleIndex,
+									   int stopIndex, // index or MEMO_RULE_UNKNOWN...
+									   String ruleName)
+	{
+		if (dump) System.out.println("examine memo "+ruleName+" at "+input.index()+": "+stopIndex);
+		if ( stopIndex==BaseRecognizer.MEMO_RULE_UNKNOWN ) {
+			//System.out.println("rule "+ruleIndex+" missed @ "+input.index());
+			stats.numMemoizationCacheMisses++;
+			stats.numGuessingRuleInvocations++; // we'll have to enter
+			currentDecision().numMemoizationCacheMisses++;
+		}
+		else {
+			// regardless of rule success/failure, if in cache, we have a cache hit
+			//System.out.println("rule "+ruleIndex+" hit @ "+input.index());
+			stats.numMemoizationCacheHits++;
+			currentDecision().numMemoizationCacheHits++;
+		}
+	}
+
+	/** Warning: doesn't track success/failure, just unique recording event */
+	public void memoize(IntStream input,
+						int ruleIndex,
+						int ruleStartIndex,
+						String ruleName)
+	{
+		// count how many entries go into table
+		if (dump) System.out.println("memoize "+ruleName);
+		stats.numMemoizationCacheEntries++;
+	}
+
+	@Override
+	public void location(int line, int pos) {
+		currentLine.push(line);
+		currentPos.push(pos);
+	}
+
+	@Override
+	public void enterDecision(int decisionNumber, boolean couldBacktrack) {
+		lastRealTokenTouchedInDecision = null;
+		stats.numDecisionEvents++;
+		int startingLookaheadIndex = parser.getTokenStream().index();
+		TokenStream input = parser.getTokenStream();
+		if ( dump ) System.out.println("enterDecision canBacktrack="+couldBacktrack+" "+ decisionNumber +
+						   " backtrack depth " + backtrackDepth +
+						   " @ " + input.get(input.index()) +
+						   " rule " +locationDescription());
+		String g = currentGrammarFileName.peek();
+		DecisionDescriptor descriptor = decisions.get(g, decisionNumber);
+		if ( descriptor == null ) {
+			descriptor = new DecisionDescriptor();
+			decisions.put(g, decisionNumber, descriptor);
+			descriptor.decision = decisionNumber;
+			descriptor.fileName = currentGrammarFileName.peek();
+			descriptor.ruleName = currentRuleName.peek();
+			descriptor.line = currentLine.peek();
+			descriptor.pos = currentPos.peek();
+			descriptor.couldBacktrack = couldBacktrack;
+		}
+		descriptor.n++;
+
+		DecisionEvent d = new DecisionEvent();
+		decisionStack.push(d);
+		d.decision = descriptor;
+		d.startTime = System.currentTimeMillis();
+		d.startIndex = startingLookaheadIndex;
+	}
+
+	@Override
+	public void exitDecision(int decisionNumber) {
+		DecisionEvent d = decisionStack.pop();
+		d.stopTime = System.currentTimeMillis();
+
+		int lastTokenIndex = lastRealTokenTouchedInDecision.getTokenIndex();
+		int numHidden = getNumberOfHiddenTokens(d.startIndex, lastTokenIndex);
+		int depth = lastTokenIndex - d.startIndex - numHidden + 1; // +1 counts consuming start token as 1
+		d.k = depth;
+		d.decision.maxk = Math.max(d.decision.maxk, depth);
+
+		if (dump) System.out.println("exitDecision "+decisionNumber+" in "+d.decision.ruleName+
+						   " lookahead "+d.k +" max token "+lastRealTokenTouchedInDecision);
+		decisionEvents.add(d); // done with decision; track all
+	}
+
+	@Override
+	public void consumeToken(Token token) {
+		if (dump) System.out.println("consume token "+token);
+		if ( !inDecision() ) {
+			stats.numTokens++;
+			return;
+		}
+		if ( lastRealTokenTouchedInDecision==null ||
+			 lastRealTokenTouchedInDecision.getTokenIndex() < token.getTokenIndex() )
+		{
+			lastRealTokenTouchedInDecision = token;
+		}
+		DecisionEvent d = currentDecision();
+		// compute lookahead depth
+		int thisRefIndex = token.getTokenIndex();
+		int numHidden = getNumberOfHiddenTokens(d.startIndex, thisRefIndex);
+		int depth = thisRefIndex - d.startIndex - numHidden + 1; // +1 counts consuming start token as 1
+		//d.maxk = Math.max(d.maxk, depth);
+		if (dump) System.out.println("consume "+thisRefIndex+" "+depth+" tokens ahead in "+
+						   d.decision.ruleName+"-"+d.decision.decision+" start index "+d.startIndex);		
+	}
+
+	/** The parser is in a decision if the decision depth &gt; 0.  This
+	 *  works for backtracking also, which can have nested decisions.
+	 */
+	public boolean inDecision() {
+		return decisionStack.size()>0;
+	}
+
+	@Override
+	public void consumeHiddenToken(Token token) {
+		//System.out.println("consume hidden token "+token);
+		if ( !inDecision() ) stats.numHiddenTokens++;
+	}
+
+	/** Track refs to lookahead if in a fixed/nonfixed decision.
+	 */
+	@Override
+	public void LT(int i, Token t) {
+		if ( inDecision() && i>0 ) {
+			DecisionEvent d = currentDecision();
+			if (dump) System.out.println("LT("+i+")="+t+" index "+t.getTokenIndex()+" relative to "+d.decision.ruleName+"-"+
+							   d.decision.decision+" start index "+d.startIndex);
+			if ( lastRealTokenTouchedInDecision==null ||
+				 lastRealTokenTouchedInDecision.getTokenIndex() < t.getTokenIndex() )
+			{
+				lastRealTokenTouchedInDecision = t;
+				if (dump) System.out.println("set last token "+lastRealTokenTouchedInDecision);
+			}
+			// get starting index off stack
+//			int stackTop = lookaheadStack.size()-1;
+//			Integer startingIndex = (Integer)lookaheadStack.get(stackTop);
+//			// compute lookahead depth
+//			int thisRefIndex = parser.getTokenStream().index();
+//			int numHidden =
+//				getNumberOfHiddenTokens(startingIndex.intValue(), thisRefIndex);
+//			int depth = i + thisRefIndex - startingIndex.intValue() - numHidden;
+//			/*
+//			System.out.println("LT("+i+") @ index "+thisRefIndex+" is depth "+depth+
+//				" max is "+maxLookaheadInCurrentDecision);
+//			*/
+//			if ( depth>maxLookaheadInCurrentDecision ) {
+//				maxLookaheadInCurrentDecision = depth;
+//			}
+//			d.maxk = currentDecision()/
+		}
+	}
+
+	/** Track backtracking decisions.  You'll see a fixed or cyclic decision
+	 *  and then a backtrack.
+	 *
+	 * 		enter rule
+	 * 		...
+	 * 		enter decision
+	 * 		LA and possibly consumes (for cyclic DFAs)
+	 * 		begin backtrack level
+	 * 		mark m
+	 * 		rewind m
+	 * 		end backtrack level, success
+	 * 		exit decision
+	 * 		...
+	 * 		exit rule
+	 */
+	@Override
+	public void beginBacktrack(int level) {
+		if (dump) System.out.println("enter backtrack "+level);
+		backtrackDepth++;
+		DecisionEvent e = currentDecision();
+		if ( e.decision.couldBacktrack ) {
+			stats.numBacktrackOccurrences++;
+			e.decision.numBacktrackOccurrences++;
+			e.backtracks = true;
+		}
+	}
+
+	/** Successful or not, track how much lookahead synpreds use */
+	@Override
+	public void endBacktrack(int level, boolean successful) {
+		if (dump) System.out.println("exit backtrack "+level+": "+successful);
+		backtrackDepth--;		
+	}
+
+	@Override
+	public void mark(int i) {
+		if (dump) System.out.println("mark "+i);
+	}
+
+	@Override
+	public void rewind(int i) {
+		if (dump) System.out.println("rewind "+i);
+	}
+
+	@Override
+	public void rewind() {
+		if (dump) System.out.println("rewind");
+	}
+
+
+
+	protected DecisionEvent currentDecision() {
+		return decisionStack.peek();
+	}
+
+	@Override
+	public void recognitionException(RecognitionException e) {
+		stats.numReportedErrors++;
+	}
+
+	@Override
+	public void semanticPredicate(boolean result, String predicate) {
+		stats.numSemanticPredicates++;
+		if ( inDecision() ) {
+			DecisionEvent d = currentDecision();
+			d.evalSemPred = true;
+			d.decision.numSemPredEvals++;
+			if (dump) System.out.println("eval "+predicate+" in "+d.decision.ruleName+"-"+
+							   d.decision.decision);
+		}
+	}
+
+	@Override
+	public void terminate() {
+		for (DecisionEvent e : decisionEvents) {
+			//System.out.println("decision "+e.decision.decision+": k="+e.k);
+			e.decision.avgk += e.k;
+			stats.avgkPerDecisionEvent += e.k;
+			if ( e.backtracks ) { // doesn't count gated syn preds on DFA edges
+				stats.avgkPerBacktrackingDecisionEvent += e.k;
+			}
+		}
+		stats.averageDecisionPercentBacktracks = 0.0f;
+		for (DecisionDescriptor d : decisions.values()) {
+			stats.numDecisionsCovered++;
+			d.avgk /= (double)d.n;
+			if ( d.couldBacktrack ) {
+				stats.numDecisionsThatPotentiallyBacktrack++;
+				float percentBacktracks = d.numBacktrackOccurrences / (float)d.n;
+				//System.out.println("dec "+d.decision+" backtracks "+percentBacktracks*100+"%");
+				stats.averageDecisionPercentBacktracks += percentBacktracks;
+			}
+			// ignore rules that backtrack along gated DFA edges
+			if ( d.numBacktrackOccurrences > 0 ) {
+				stats.numDecisionsThatDoBacktrack++;
+			}
+		}
+		stats.averageDecisionPercentBacktracks /= stats.numDecisionsThatPotentiallyBacktrack;
+		stats.averageDecisionPercentBacktracks *= 100; // it's a percentage
+		stats.avgkPerDecisionEvent /= stats.numDecisionEvents;
+		stats.avgkPerBacktrackingDecisionEvent /= (double)stats.numBacktrackOccurrences;
+
+		System.err.println(toString());
+		System.err.println(getDecisionStatsDump());
+
+//		String stats = toNotifyString();
+//		try {
+//			Stats.writeReport(RUNTIME_STATS_FILENAME,stats);
+//		}
+//		catch (IOException ioe) {
+//			System.err.println(ioe);
+//			ioe.printStackTrace(System.err);
+//		}
+	}
+
+	public void setParser(DebugParser parser) {
+		this.parser = parser;
+	}
+
+	// R E P O R T I N G
+
+	public String toNotifyString() {
+		StringBuilder buf = new StringBuilder();
+		buf.append(Version);
+		buf.append('\t');
+		buf.append(parser.getClass().getName());
+//		buf.append('\t');
+//		buf.append(numRuleInvocations);
+//		buf.append('\t');
+//		buf.append(maxRuleInvocationDepth);
+//		buf.append('\t');
+//		buf.append(numFixedDecisions);
+//		buf.append('\t');
+//		buf.append(Stats.min(decisionMaxFixedLookaheads));
+//		buf.append('\t');
+//		buf.append(Stats.max(decisionMaxFixedLookaheads));
+//		buf.append('\t');
+//		buf.append(Stats.avg(decisionMaxFixedLookaheads));
+//		buf.append('\t');
+//		buf.append(Stats.stddev(decisionMaxFixedLookaheads));
+//		buf.append('\t');
+//		buf.append(numCyclicDecisions);
+//		buf.append('\t');
+//		buf.append(Stats.min(decisionMaxCyclicLookaheads));
+//		buf.append('\t');
+//		buf.append(Stats.max(decisionMaxCyclicLookaheads));
+//		buf.append('\t');
+//		buf.append(Stats.avg(decisionMaxCyclicLookaheads));
+//		buf.append('\t');
+//		buf.append(Stats.stddev(decisionMaxCyclicLookaheads));
+//		buf.append('\t');
+//		buf.append(numBacktrackDecisions);
+//		buf.append('\t');
+//		buf.append(Stats.min(toArray(decisionMaxSynPredLookaheads)));
+//		buf.append('\t');
+//		buf.append(Stats.max(toArray(decisionMaxSynPredLookaheads)));
+//		buf.append('\t');
+//		buf.append(Stats.avg(toArray(decisionMaxSynPredLookaheads)));
+//		buf.append('\t');
+//		buf.append(Stats.stddev(toArray(decisionMaxSynPredLookaheads)));
+//		buf.append('\t');
+//		buf.append(numSemanticPredicates);
+//		buf.append('\t');
+//		buf.append(parser.getTokenStream().size());
+//		buf.append('\t');
+//		buf.append(numHiddenTokens);
+//		buf.append('\t');
+//		buf.append(numCharsMatched);
+//		buf.append('\t');
+//		buf.append(numHiddenCharsMatched);
+//		buf.append('\t');
+//		buf.append(numberReportedErrors);
+//		buf.append('\t');
+//		buf.append(numMemoizationCacheHits);
+//		buf.append('\t');
+//		buf.append(numMemoizationCacheMisses);
+//		buf.append('\t');
+//		buf.append(numGuessingRuleInvocations);
+//		buf.append('\t');
+//		buf.append(numMemoizationCacheEntries);
+		return buf.toString();
+	}
+
+	@Override
+	public String toString() {
+		return toString(getReport());
+	}
+
+	public ProfileStats getReport() {
+//		TokenStream input = parser.getTokenStream();
+//		for (int i=0; i<input.size()&& lastRealTokenTouchedInDecision !=null&&i<= lastRealTokenTouchedInDecision.getTokenIndex(); i++) {
+//			Token t = input.get(i);
+//			if ( t.getChannel()!=Token.DEFAULT_CHANNEL ) {
+//				stats.numHiddenTokens++;
+//				stats.numHiddenCharsMatched += t.getText().length();
+//			}
+//		}
+		stats.Version = Version;
+		stats.name = parser.getClass().getName();
+		stats.numUniqueRulesInvoked = uniqueRules.size();
+		//stats.numCharsMatched = lastTokenConsumed.getStopIndex() + 1;
+		return stats;
+	}
+
+	public DoubleKeyMap<String, Integer, DecisionDescriptor> getDecisionStats() {
+		return decisions;
+	}
+
+	public List<DecisionEvent> getDecisionEvents() {
+		return decisionEvents;
+	}
+
+	public static String toString(ProfileStats stats) {
+		StringBuilder buf = new StringBuilder();
+		buf.append("ANTLR Runtime Report; Profile Version ");
+		buf.append(stats.Version);
+		buf.append(newline);
+		buf.append("parser name ");
+		buf.append(stats.name);
+		buf.append(newline);
+		buf.append("Number of rule invocations ");
+		buf.append(stats.numRuleInvocations);
+		buf.append(newline);
+		buf.append("Number of unique rules visited ");
+		buf.append(stats.numUniqueRulesInvoked);
+		buf.append(newline);
+		buf.append("Number of decision events ");
+		buf.append(stats.numDecisionEvents);
+		buf.append(newline);
+		buf.append("Overall average k per decision event ");
+		buf.append(stats.avgkPerDecisionEvent);
+		buf.append(newline);
+		buf.append("Number of backtracking occurrences (can be multiple per decision) ");
+		buf.append(stats.numBacktrackOccurrences);
+		buf.append(newline);
+		buf.append("Overall average k per decision event that backtracks ");
+		buf.append(stats.avgkPerBacktrackingDecisionEvent);
+		buf.append(newline);
+		buf.append("Number of rule invocations while backtracking ");
+		buf.append(stats.numGuessingRuleInvocations);
+		buf.append(newline);
+		buf.append("num decisions that potentially backtrack ");
+		buf.append(stats.numDecisionsThatPotentiallyBacktrack);
+		buf.append(newline);
+		buf.append("num decisions that do backtrack ");
+		buf.append(stats.numDecisionsThatDoBacktrack);
+		buf.append(newline);
+		buf.append("num decisions that potentially backtrack but don't ");
+		buf.append(stats.numDecisionsThatPotentiallyBacktrack - stats.numDecisionsThatDoBacktrack);
+		buf.append(newline);
+		buf.append("average % of time a potentially backtracking decision backtracks ");
+		buf.append(stats.averageDecisionPercentBacktracks);
+		buf.append(newline);
+		buf.append("num unique decisions covered ");
+		buf.append(stats.numDecisionsCovered);
+		buf.append(newline);
+		buf.append("max rule invocation nesting depth ");
+		buf.append(stats.maxRuleInvocationDepth);
+		buf.append(newline);
+
+//		buf.append("number of fixed lookahead decisions ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("min lookahead used in a fixed lookahead decision ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("max lookahead used in a fixed lookahead decision ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("average lookahead depth used in fixed lookahead decisions ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("standard deviation of depth used in fixed lookahead decisions ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("number of arbitrary lookahead decisions ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("min lookahead used in an arbitrary lookahead decision ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("max lookahead used in an arbitrary lookahead decision ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("average lookahead depth used in arbitrary lookahead decisions ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("standard deviation of depth used in arbitrary lookahead decisions ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("number of evaluated syntactic predicates ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("min lookahead used in a syntactic predicate ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("max lookahead used in a syntactic predicate ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("average lookahead depth used in syntactic predicates ");
+//		buf.append();
+//		buf.append('\n');
+//		buf.append("standard deviation of depth used in syntactic predicates ");
+//		buf.append();
+//		buf.append('\n');
+		buf.append("rule memoization cache size ");
+		buf.append(stats.numMemoizationCacheEntries);
+		buf.append(newline);
+		buf.append("number of rule memoization cache hits ");
+		buf.append(stats.numMemoizationCacheHits);
+		buf.append(newline);
+		buf.append("number of rule memoization cache misses ");
+		buf.append(stats.numMemoizationCacheMisses);
+		buf.append(newline);
+//		buf.append("number of evaluated semantic predicates ");
+//		buf.append();
+//		buf.append(newline);
+		buf.append("number of tokens ");
+		buf.append(stats.numTokens);
+		buf.append(newline);
+		buf.append("number of hidden tokens ");
+		buf.append(stats.numHiddenTokens);
+		buf.append(newline);
+		buf.append("number of char ");
+		buf.append(stats.numCharsMatched);
+		buf.append(newline);
+		buf.append("number of hidden char ");
+		buf.append(stats.numHiddenCharsMatched);
+		buf.append(newline);
+		buf.append("number of syntax errors ");
+		buf.append(stats.numReportedErrors);
+		buf.append(newline);
+		return buf.toString();
+	}
+
+	public String getDecisionStatsDump() {
+		StringBuilder buf = new StringBuilder();
+		buf.append("location");
+		buf.append(DATA_SEP);
+		buf.append("n");
+		buf.append(DATA_SEP);
+		buf.append("avgk");
+		buf.append(DATA_SEP);
+		buf.append("maxk");
+		buf.append(DATA_SEP);
+		buf.append("synpred");
+		buf.append(DATA_SEP);
+		buf.append("sempred");
+		buf.append(DATA_SEP);
+		buf.append("canbacktrack");
+		buf.append("\n");
+		for (String fileName : decisions.keySet()) {
+			for (int d : decisions.keySet(fileName)) {
+				DecisionDescriptor s = decisions.get(fileName, d);
+				buf.append(s.decision);
+				buf.append("@");
+				buf.append(locationDescription(s.fileName,s.ruleName,s.line,s.pos)); // decision number
+				buf.append(DATA_SEP);
+				buf.append(s.n);
+				buf.append(DATA_SEP);
+				buf.append(String.format("%.2f",s.avgk));
+				buf.append(DATA_SEP);
+				buf.append(s.maxk);
+				buf.append(DATA_SEP);
+				buf.append(s.numBacktrackOccurrences);
+				buf.append(DATA_SEP);
+				buf.append(s.numSemPredEvals);
+				buf.append(DATA_SEP);
+				buf.append(s.couldBacktrack ?"1":"0");
+				buf.append(newline);
+			}
+		}
+		return buf.toString();
+	}
+
+	protected int[] trim(int[] X, int n) {
+		if ( n<X.length ) {
+			int[] trimmed = new int[n];
+			System.arraycopy(X,0,trimmed,0,n);
+			X = trimmed;
+		}
+		return X;
+	}
+
+	protected int[] toArray(List<Integer> a) {
+		int[] x = new int[a.size()];
+		for (int i = 0; i < a.size(); i++) {
+			Integer I = a.get(i);
+			x[i] = I;
+		}
+		return x;
+	}
+
+	/** Get num hidden tokens between i..j inclusive */
+	public int getNumberOfHiddenTokens(int i, int j) {
+		int n = 0;
+		TokenStream input = parser.getTokenStream();
+		for (int ti = i; ti<input.size() && ti <= j; ti++) {
+			Token t = input.get(ti);
+			if ( t.getChannel()!=Token.DEFAULT_CHANNEL ) {
+				n++;
+			}
+		}
+		return n;
+	}
+
+	protected String locationDescription() {
+		return locationDescription(
+			currentGrammarFileName.peek(),
+			currentRuleName.peek(),
+			currentLine.peek(),
+			currentPos.peek());
+	}
+
+	protected String locationDescription(String file, String rule, int line, int pos) {
+		return file+":"+line+":"+pos+"(" + rule + ")";
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java
new file mode 100644
index 0000000..a04fd04
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/RemoteDebugEventSocketListener.java
@@ -0,0 +1,557 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.tree.BaseTree;
+import org.antlr.runtime.tree.Tree;
+
+import java.io.*;
+import java.net.ConnectException;
+import java.net.Socket;
+import java.util.StringTokenizer;
+
+public class RemoteDebugEventSocketListener implements Runnable {
+	static final int MAX_EVENT_ELEMENTS = 8;
+	DebugEventListener listener;
+	String machine;
+	int port;
+	Socket channel = null;
+	PrintWriter out;
+	BufferedReader in;
+	String event;
+	/** Version of ANTLR (dictates events) */
+	public String version;
+	public String grammarFileName;
+	/** Track the last token index we saw during a consume.  If same, then
+	 *  set a flag that we have a problem.
+	 */
+	int previousTokenIndex = -1;
+	boolean tokenIndexesInvalid = false;
+
+	public static class ProxyToken implements Token {
+		int index;
+		int type;
+		int channel;
+		int line;
+		int charPos;
+		String text;
+		public ProxyToken(int index) { this.index = index; }		
+		public ProxyToken(int index, int type, int channel,
+						  int line, int charPos, String text)
+		{
+			this.index = index;
+			this.type = type;
+			this.channel = channel;
+			this.line = line;
+			this.charPos = charPos;
+			this.text = text;
+		}
+
+		@Override
+		public String getText() {
+			return text;
+		}
+
+		@Override
+		public void setText(String text) {
+			this.text = text;
+		}
+
+		@Override
+		public int getType() {
+			return type;
+		}
+
+		@Override
+		public void setType(int ttype) {
+			this.type = ttype;
+		}
+
+		@Override
+		public int getLine() {
+			return line;
+		}
+
+		@Override
+		public void setLine(int line) {
+			this.line = line;
+		}
+
+		@Override
+		public int getCharPositionInLine() {
+			return charPos;
+		}
+
+		@Override
+		public void setCharPositionInLine(int pos) {
+			this.charPos = pos;
+		}
+
+		@Override
+		public int getChannel() {
+			return channel;
+		}
+
+		@Override
+		public void setChannel(int channel) {
+			this.channel = channel;
+		}
+
+		@Override
+		public int getTokenIndex() {
+			return index;
+		}
+
+		@Override
+		public void setTokenIndex(int index) {
+			this.index = index;
+		}
+
+		@Override
+		public CharStream getInputStream() {
+			return null;
+		}
+
+		@Override
+		public void setInputStream(CharStream input) {
+		}
+
+		@Override
+		public String toString() {
+			String channelStr = "";
+			if ( channel!=Token.DEFAULT_CHANNEL ) {
+				channelStr=",channel="+channel;
+			}
+			return "["+getText()+"/<"+type+">"+channelStr+","+line+":"+getCharPositionInLine()+",@"+index+"]";
+		}
+	}
+
+	public static class ProxyTree extends BaseTree {
+		public int ID;
+		public int type;
+		public int line = 0;
+		public int charPos = -1;
+		public int tokenIndex = -1;
+		public String text;
+		
+		public ProxyTree(int ID, int type, int line, int charPos, int tokenIndex, String text) {
+			this.ID = ID;
+			this.type = type;
+			this.line = line;
+			this.charPos = charPos;
+			this.tokenIndex = tokenIndex;
+			this.text = text;
+		}
+
+		public ProxyTree(int ID) { this.ID = ID; }
+
+		@Override public int getTokenStartIndex() { return tokenIndex; }
+		@Override public void setTokenStartIndex(int index) {	}
+		@Override public int getTokenStopIndex() { return 0; }
+		@Override public void setTokenStopIndex(int index) { }
+		@Override public Tree dupNode() {	return null; }
+		@Override public int getType() { return type; }
+		@Override public String getText() { return text; }
+		@Override public String toString() {
+			return "fix this";
+		}
+	}
+
+	public RemoteDebugEventSocketListener(DebugEventListener listener,
+										  String machine,
+										  int port) throws IOException
+	{
+		this.listener = listener;
+		this.machine = machine;
+		this.port = port;
+
+        if( !openConnection() ) {
+            throw new ConnectException();
+        }
+	}
+
+	protected void eventHandler() {
+		try {
+			handshake();
+			event = in.readLine();
+			while ( event!=null ) {
+				dispatch(event);
+				ack();
+				event = in.readLine();
+			}
+		}
+		catch (Exception e) {
+			System.err.println(e);
+			e.printStackTrace(System.err);
+		}
+		finally {
+            closeConnection();
+		}
+	}
+
+    protected boolean openConnection() {
+        boolean success = false;
+        try {
+            channel = new Socket(machine, port);
+            channel.setTcpNoDelay(true);
+			OutputStream os = channel.getOutputStream();
+			OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
+			out = new PrintWriter(new BufferedWriter(osw));
+			InputStream is = channel.getInputStream();
+			InputStreamReader isr = new InputStreamReader(is, "UTF8");
+			in = new BufferedReader(isr);
+            success = true;
+        } catch(Exception e) {
+            System.err.println(e);
+        }
+        return success;
+    }
+
+    protected void closeConnection() {
+        try {
+            in.close(); in = null;
+            out.close(); out = null;
+            channel.close(); channel=null;
+        }
+        catch (Exception e) {
+            System.err.println(e);
+            e.printStackTrace(System.err);
+        }
+        finally {
+            if ( in!=null ) {
+                try {in.close();} catch (IOException ioe) {
+                    System.err.println(ioe);
+                }
+            }
+            if ( out!=null ) {
+                out.close();
+            }
+            if ( channel!=null ) {
+                try {channel.close();} catch (IOException ioe) {
+                    System.err.println(ioe);
+                }
+            }
+        }
+
+    }
+
+	protected void handshake() throws IOException {
+		String antlrLine = in.readLine();
+		String[] antlrElements = getEventElements(antlrLine);
+		version = antlrElements[1];
+		String grammarLine = in.readLine();
+		String[] grammarElements = getEventElements(grammarLine);
+		grammarFileName = grammarElements[1];
+		ack();
+		listener.commence(); // inform listener after handshake
+	}
+
+	protected void ack() {
+        out.println("ack");
+		out.flush();
+	}
+
+	protected void dispatch(String line) {
+        //System.out.println("event: "+line);
+        String[] elements = getEventElements(line);
+		if ( elements==null || elements[0]==null ) {
+			System.err.println("unknown debug event: "+line);
+			return;
+		}
+		if ( elements[0].equals("enterRule") ) {
+			listener.enterRule(elements[1], elements[2]);
+		}
+		else if ( elements[0].equals("exitRule") ) {
+			listener.exitRule(elements[1], elements[2]);
+		}
+		else if ( elements[0].equals("enterAlt") ) {
+			listener.enterAlt(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("enterSubRule") ) {
+			listener.enterSubRule(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("exitSubRule") ) {
+			listener.exitSubRule(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("enterDecision") ) {
+			listener.enterDecision(Integer.parseInt(elements[1]), elements[2].equals("true"));
+		}
+		else if ( elements[0].equals("exitDecision") ) {
+			listener.exitDecision(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("location") ) {
+			listener.location(Integer.parseInt(elements[1]),
+							  Integer.parseInt(elements[2]));
+		}
+		else if ( elements[0].equals("consumeToken") ) {
+			ProxyToken t = deserializeToken(elements, 1);
+			if ( t.getTokenIndex() == previousTokenIndex ) {
+				tokenIndexesInvalid = true;
+			}
+			previousTokenIndex = t.getTokenIndex();
+			listener.consumeToken(t);
+		}
+		else if ( elements[0].equals("consumeHiddenToken") ) {
+			ProxyToken t = deserializeToken(elements, 1);
+			if ( t.getTokenIndex() == previousTokenIndex ) {
+				tokenIndexesInvalid = true;
+			}
+			previousTokenIndex = t.getTokenIndex();
+			listener.consumeHiddenToken(t);
+		}
+		else if ( elements[0].equals("LT") ) {
+			Token t = deserializeToken(elements, 2);
+			listener.LT(Integer.parseInt(elements[1]), t);
+		}
+		else if ( elements[0].equals("mark") ) {
+			listener.mark(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("rewind") ) {
+			if ( elements[1]!=null ) {
+				listener.rewind(Integer.parseInt(elements[1]));
+			}
+			else {
+				listener.rewind();
+			}
+		}
+		else if ( elements[0].equals("beginBacktrack") ) {
+			listener.beginBacktrack(Integer.parseInt(elements[1]));
+		}
+		else if ( elements[0].equals("endBacktrack") ) {
+			int level = Integer.parseInt(elements[1]);
+			int successI = Integer.parseInt(elements[2]);
+			listener.endBacktrack(level, successI==DebugEventListener.TRUE);
+		}
+		else if ( elements[0].equals("exception") ) {
+			String excName = elements[1];
+			String indexS = elements[2];
+			String lineS = elements[3];
+			String posS = elements[4];
+			Class<? extends RecognitionException> excClass;
+			try {
+				excClass = Class.forName(excName).asSubclass(RecognitionException.class);
+				RecognitionException e = excClass.newInstance();
+				e.index = Integer.parseInt(indexS);
+				e.line = Integer.parseInt(lineS);
+				e.charPositionInLine = Integer.parseInt(posS);
+				listener.recognitionException(e);
+			}
+			catch (ClassNotFoundException cnfe) {
+				System.err.println("can't find class "+cnfe);
+				cnfe.printStackTrace(System.err);
+			}
+			catch (InstantiationException ie) {
+				System.err.println("can't instantiate class "+ie);
+				ie.printStackTrace(System.err);
+			}
+			catch (IllegalAccessException iae) {
+				System.err.println("can't access class "+iae);
+				iae.printStackTrace(System.err);
+			}
+		}
+		else if ( elements[0].equals("beginResync") ) {
+			listener.beginResync();
+		}
+		else if ( elements[0].equals("endResync") ) {
+			listener.endResync();
+		}
+		else if ( elements[0].equals("terminate") ) {
+			listener.terminate();
+		}
+		else if ( elements[0].equals("semanticPredicate") ) {
+			Boolean result = Boolean.valueOf(elements[1]);
+			String predicateText = elements[2];
+			predicateText = unEscapeNewlines(predicateText);
+			listener.semanticPredicate(result,
+									   predicateText);
+		}
+		else if ( elements[0].equals("consumeNode") ) {
+			ProxyTree node = deserializeNode(elements, 1);
+			listener.consumeNode(node);
+		}
+		else if ( elements[0].equals("LN") ) {
+			int i = Integer.parseInt(elements[1]);
+			ProxyTree node = deserializeNode(elements, 2);
+			listener.LT(i, node);
+		}
+		else if ( elements[0].equals("createNodeFromTokenElements") ) {
+			int ID = Integer.parseInt(elements[1]);
+			int type = Integer.parseInt(elements[2]);
+			String text = elements[3];
+			text = unEscapeNewlines(text);
+			ProxyTree node = new ProxyTree(ID, type, -1, -1, -1, text);
+			listener.createNode(node);
+		}
+		else if ( elements[0].equals("createNode") ) {
+			int ID = Integer.parseInt(elements[1]);
+			int tokenIndex = Integer.parseInt(elements[2]);
+			// create dummy node/token filled with ID, tokenIndex
+			ProxyTree node = new ProxyTree(ID);
+			ProxyToken token = new ProxyToken(tokenIndex);
+			listener.createNode(node, token);
+		}
+		else if ( elements[0].equals("nilNode") ) {
+			int ID = Integer.parseInt(elements[1]);
+			ProxyTree node = new ProxyTree(ID);
+			listener.nilNode(node);
+		}
+		else if ( elements[0].equals("errorNode") ) {
+			// TODO: do we need a special tree here?
+			int ID = Integer.parseInt(elements[1]);
+			int type = Integer.parseInt(elements[2]);
+			String text = elements[3];
+			text = unEscapeNewlines(text);
+			ProxyTree node = new ProxyTree(ID, type, -1, -1, -1, text);
+			listener.errorNode(node);
+		}
+		else if ( elements[0].equals("becomeRoot") ) {
+			int newRootID = Integer.parseInt(elements[1]);
+			int oldRootID = Integer.parseInt(elements[2]);
+			ProxyTree newRoot = new ProxyTree(newRootID);
+			ProxyTree oldRoot = new ProxyTree(oldRootID);
+			listener.becomeRoot(newRoot, oldRoot);
+		}
+		else if ( elements[0].equals("addChild") ) {
+			int rootID = Integer.parseInt(elements[1]);
+			int childID = Integer.parseInt(elements[2]);
+			ProxyTree root = new ProxyTree(rootID);
+			ProxyTree child = new ProxyTree(childID);
+			listener.addChild(root, child);
+		}
+		else if ( elements[0].equals("setTokenBoundaries") ) {
+			int ID = Integer.parseInt(elements[1]);
+			ProxyTree node = new ProxyTree(ID);
+			listener.setTokenBoundaries(
+				node,
+				Integer.parseInt(elements[2]),
+				Integer.parseInt(elements[3]));
+		}
+		else {
+			System.err.println("unknown debug event: "+line);
+		}
+	}
+
+	protected ProxyTree deserializeNode(String[] elements, int offset) {
+		int ID = Integer.parseInt(elements[offset+0]);
+		int type = Integer.parseInt(elements[offset+1]);
+		int tokenLine = Integer.parseInt(elements[offset+2]);
+		int charPositionInLine = Integer.parseInt(elements[offset+3]);
+		int tokenIndex = Integer.parseInt(elements[offset+4]);
+		String text = elements[offset+5];
+		text = unEscapeNewlines(text);
+		return new ProxyTree(ID, type, tokenLine, charPositionInLine, tokenIndex, text);
+	}
+
+	protected ProxyToken deserializeToken(String[] elements,
+										  int offset)
+	{
+		String indexS = elements[offset+0];
+		String typeS = elements[offset+1];
+		String channelS = elements[offset+2];
+		String lineS = elements[offset+3];
+		String posS = elements[offset+4];
+		String text = elements[offset+5];
+		text = unEscapeNewlines(text);
+		int index = Integer.parseInt(indexS);
+		ProxyToken t =
+			new ProxyToken(index,
+						   Integer.parseInt(typeS),
+						   Integer.parseInt(channelS),
+						   Integer.parseInt(lineS),
+						   Integer.parseInt(posS),
+						   text);
+		return t;
+	}
+
+	/** Create a thread to listen to the remote running recognizer */
+	public void start() {
+		Thread t = new Thread(this);
+		t.start();
+	}
+
+	@Override
+	public void run() {
+		eventHandler();
+	}
+
+	// M i s c
+
+	public String[] getEventElements(String event) {
+		if ( event==null ) {
+			return null;
+		}
+		String[] elements = new String[MAX_EVENT_ELEMENTS];
+		String str = null; // a string element if present (must be last)
+		try {
+			int firstQuoteIndex = event.indexOf('"');
+			if ( firstQuoteIndex>=0 ) {
+				// treat specially; has a string argument like "a comment\n
+				// Note that the string is terminated by \n not end quote.
+				// Easier to parse that way.
+				String eventWithoutString = event.substring(0,firstQuoteIndex);
+				str = event.substring(firstQuoteIndex+1,event.length());
+				event = eventWithoutString;
+			}
+			StringTokenizer st = new StringTokenizer(event, "\t", false);
+			int i = 0;
+			while ( st.hasMoreTokens() ) {
+				if ( i>=MAX_EVENT_ELEMENTS ) {
+					// ErrorManager.internalError("event has more than "+MAX_EVENT_ELEMENTS+" args: "+event);
+					return elements;
+				}
+				elements[i] = st.nextToken();
+				i++;
+			}
+			if ( str!=null ) {
+				elements[i] = str;
+			}
+		}
+		catch (Exception e) {
+			e.printStackTrace(System.err);
+		}
+		return elements;
+	}
+
+	protected String unEscapeNewlines(String txt) {
+		// this unescape is slow but easy to understand
+		txt = txt.replaceAll("%0A","\n");  // unescape \n
+		txt = txt.replaceAll("%0D","\r");  // unescape \r
+		txt = txt.replaceAll("%25","%");   // undo escaped escape chars
+		return txt;
+	}
+
+	public boolean tokenIndexesAreInvalid() {
+		return false;
+		//return tokenIndexesInvalid;
+	}
+
+}
+
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/TraceDebugEventListener.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/TraceDebugEventListener.java
new file mode 100644
index 0000000..a405d35
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/TraceDebugEventListener.java
@@ -0,0 +1,107 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.TreeAdaptor;
+
+/** Print out (most of) the events... Useful for debugging, testing... */
+public class TraceDebugEventListener extends BlankDebugEventListener {
+	TreeAdaptor adaptor;
+
+	public TraceDebugEventListener(TreeAdaptor adaptor) {
+		this.adaptor = adaptor;
+	}
+
+	public void enterRule(String ruleName) { System.out.println("enterRule "+ruleName); }
+	public void exitRule(String ruleName) { System.out.println("exitRule "+ruleName); }
+	@Override
+	public void enterSubRule(int decisionNumber) { System.out.println("enterSubRule"); }
+	@Override
+	public void exitSubRule(int decisionNumber) { System.out.println("exitSubRule"); }
+	@Override
+	public void location(int line, int pos) {System.out.println("location "+line+":"+pos);}
+
+	// Tree parsing stuff
+
+	@Override
+	public void consumeNode(Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		System.out.println("consumeNode "+ID+" "+text+" "+type);
+	}
+
+	@Override
+	public void LT(int i, Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		System.out.println("LT "+i+" "+ID+" "+text+" "+type);
+	}
+
+
+	// AST stuff
+	@Override
+	public void nilNode(Object t) {System.out.println("nilNode "+adaptor.getUniqueID(t));}
+
+	@Override
+	public void createNode(Object t) {
+		int ID = adaptor.getUniqueID(t);
+		String text = adaptor.getText(t);
+		int type = adaptor.getType(t);
+		System.out.println("create "+ID+": "+text+", "+type);
+	}
+
+	@Override
+	public void createNode(Object node, Token token) {
+		int ID = adaptor.getUniqueID(node);
+		String text = adaptor.getText(node);
+		int tokenIndex = token.getTokenIndex();
+		System.out.println("create "+ID+": "+tokenIndex);
+	}
+
+	@Override
+	public void becomeRoot(Object newRoot, Object oldRoot) {
+		System.out.println("becomeRoot "+adaptor.getUniqueID(newRoot)+", "+
+						   adaptor.getUniqueID(oldRoot));
+	}
+
+	@Override
+	public void addChild(Object root, Object child) {
+		System.out.println("addChild "+adaptor.getUniqueID(root)+", "+
+						   adaptor.getUniqueID(child));
+	}
+
+	@Override
+	public void setTokenBoundaries(Object t, int tokenStartIndex, int tokenStopIndex) {
+		System.out.println("setTokenBoundaries "+adaptor.getUniqueID(t)+", "+
+						   tokenStartIndex+", "+tokenStopIndex);
+	}
+}
+
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/debug/Tracer.java b/runtime/Java/src/main/java/org/antlr/runtime/debug/Tracer.java
new file mode 100644
index 0000000..f9da2a1
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/debug/Tracer.java
@@ -0,0 +1,65 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.debug;
+
+import org.antlr.runtime.IntStream;
+import org.antlr.runtime.TokenStream;
+
+/** The default tracer mimics the traceParser behavior of ANTLR 2.x.
+ *  This listens for debugging events from the parser and implies
+ *  that you cannot debug and trace at the same time.
+ */
+public class Tracer extends BlankDebugEventListener {
+	public IntStream input;
+	protected int level = 0;
+
+	public Tracer(IntStream input) {
+		this.input = input;
+	}
+
+	public void enterRule(String ruleName) {
+		for (int i=1; i<=level; i++) {System.out.print(" ");}
+		System.out.println("> "+ruleName+" lookahead(1)="+getInputSymbol(1));
+		level++;
+	}
+
+	public void exitRule(String ruleName) {
+		level--;
+		for (int i=1; i<=level; i++) {System.out.print(" ");}
+		System.out.println("< "+ruleName+" lookahead(1)="+getInputSymbol(1));
+	}
+
+	public Object getInputSymbol(int k) {
+		if ( input instanceof TokenStream ) {
+			return ((TokenStream)input).LT(k);
+		}
+		return (char) input.LA(k);
+	}
+}
+
+
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/DoubleKeyMap.java b/runtime/Java/src/main/java/org/antlr/runtime/misc/DoubleKeyMap.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/DoubleKeyMap.java
rename to runtime/Java/src/main/java/org/antlr/runtime/misc/DoubleKeyMap.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/misc/FastQueue.java b/runtime/Java/src/main/java/org/antlr/runtime/misc/FastQueue.java
new file mode 100644
index 0000000..aec154d
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/misc/FastQueue.java
@@ -0,0 +1,102 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.misc;
+
+import java.util.List;
+import java.util.ArrayList;
+import java.util.NoSuchElementException;
+
+/** A queue that can dequeue and get(i) in O(1) and grow arbitrarily large.
+ *  A linked list is fast at dequeue but slow at get(i).  An array is
+ *  the reverse.  This is O(1) for both operations.
+ *
+ *  List grows until you dequeue last element at end of buffer. Then
+ *  it resets to start filling at 0 again.  If adds/removes are balanced, the
+ *  buffer will not grow too large.
+ *
+ *  No iterator stuff as that's not how we'll use it.
+ */
+public class FastQueue<T> {
+    /** dynamically-sized buffer of elements */
+    protected List<T> data = new ArrayList<T>();
+    /** index of next element to fill */
+    protected int p = 0;
+	protected int range = -1; // how deep have we gone?	
+
+    public void reset() { clear(); }
+    public void clear() { p = 0; data.clear(); }
+
+    /** Get and remove first element in queue */
+    public T remove() {
+        T o = elementAt(0);
+        p++;
+        // have we hit end of buffer?
+        if ( p == data.size() ) {
+            // if so, it's an opportunity to start filling at index 0 again
+            clear(); // size goes to 0, but retains memory
+        }
+        return o;
+    }
+
+    public void add(T o) { data.add(o); }
+
+    public int size() { return data.size() - p; }
+
+	public int range() { return range; }
+
+    public T head() { return elementAt(0); }
+
+    /**
+     * Return element {@code i} elements ahead of current element. {@code i==0}
+     * gets current element. This is not an absolute index into {@link #data}
+     * since {@code p} defines the start of the real list.
+     */
+    public T elementAt(int i) {
+		int absIndex = p + i;
+		if ( absIndex >= data.size() ) {
+            throw new NoSuchElementException("queue index "+ absIndex +" > last index "+(data.size()-1));
+        }
+        if ( absIndex < 0 ) {
+            throw new NoSuchElementException("queue index "+ absIndex +" < 0");
+        }
+		if ( absIndex>range ) range = absIndex;
+        return data.get(absIndex);
+    }
+
+    /** Return string of current buffer contents; non-destructive */
+	@Override
+    public String toString() {
+        StringBuilder buf = new StringBuilder();
+        int n = size();
+        for (int i=0; i<n; i++) {
+            buf.append(elementAt(i));
+            if ( (i+1)<n ) buf.append(" ");
+        }
+        return buf.toString();
+    }
+}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/IntArray.java b/runtime/Java/src/main/java/org/antlr/runtime/misc/IntArray.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/IntArray.java
rename to runtime/Java/src/main/java/org/antlr/runtime/misc/IntArray.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/misc/LookaheadStream.java b/runtime/Java/src/main/java/org/antlr/runtime/misc/LookaheadStream.java
new file mode 100644
index 0000000..02c92e6
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/misc/LookaheadStream.java
@@ -0,0 +1,208 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.misc;
+
+/**
+ * A lookahead queue that knows how to mark/release locations in the buffer for
+ * backtracking purposes. Any markers force the {@link FastQueue} superclass to
+ * keep all elements until no more markers; then can reset to avoid growing a
+ * huge buffer.
+ */
+public abstract class LookaheadStream<T> extends FastQueue<T> {
+    public static final int UNINITIALIZED_EOF_ELEMENT_INDEX = Integer.MAX_VALUE;
+
+    /** Absolute token index. It's the index of the symbol about to be
+	 *  read via {@code LT(1)}. Goes from 0 to numtokens.
+     */
+    protected int currentElementIndex = 0;
+
+    /**
+     * This is the {@code LT(-1)} element for the first element in {@link #data}.
+     */
+    protected T prevElement;
+
+    /** Track object returned by nextElement upon end of stream;
+     *  Return it later when they ask for LT passed end of input.
+     */
+    public T eof = null;
+
+    /** Track the last mark() call result value for use in rewind(). */
+    protected int lastMarker;
+
+    /** tracks how deep mark() calls are nested */
+    protected int markDepth = 0;
+
+	@Override
+    public void reset() {
+        super.reset();
+        currentElementIndex = 0;
+        p = 0;
+        prevElement = null;
+    }
+    
+    /** Implement nextElement to supply a stream of elements to this
+     *  lookahead buffer.  Return EOF upon end of the stream we're pulling from.
+     *
+     * @see #isEOF
+     */
+    public abstract T nextElement();
+
+    public abstract boolean isEOF(T o);
+
+    /**
+     * Get and remove first element in queue; override
+     * {@link FastQueue#remove()}; it's the same, just checks for backtracking.
+     */
+	@Override
+    public T remove() {
+        T o = elementAt(0);
+        p++;
+        // have we hit end of buffer and not backtracking?
+        if ( p == data.size() && markDepth==0 ) {
+            prevElement = o;
+            // if so, it's an opportunity to start filling at index 0 again
+            clear(); // size goes to 0, but retains memory
+        }
+        return o;
+    }
+
+    /** Make sure we have at least one element to remove, even if EOF */
+    public void consume() {
+        syncAhead(1);
+        remove();
+        currentElementIndex++;
+    }
+
+    /** Make sure we have 'need' elements from current position p. Last valid
+     *  p index is data.size()-1.  p+need-1 is the data index 'need' elements
+     *  ahead.  If we need 1 element, (p+1-1)==p must be &lt; data.size().
+     */
+    protected void syncAhead(int need) {
+        int n = (p+need-1) - data.size() + 1; // how many more elements we need?
+        if ( n > 0 ) fill(n);                 // out of elements?
+    }
+
+    /** add n elements to buffer */
+    public void fill(int n) {
+        for (int i=1; i<=n; i++) {
+            T o = nextElement();
+            if ( isEOF(o) ) eof = o;
+            data.add(o);
+        }
+    }
+
+    /** Size of entire stream is unknown; we only know buffer size from FastQueue. */
+	@Override
+    public int size() { throw new UnsupportedOperationException("streams are of unknown size"); }
+
+    public T LT(int k) {
+		if ( k==0 ) {
+			return null;
+		}
+		if ( k<0 ) return LB(-k);
+		//System.out.print("LT(p="+p+","+k+")=");
+        syncAhead(k);
+        if ( (p+k-1) > data.size() ) return eof;
+        return elementAt(k-1);
+	}
+
+    public int index() { return currentElementIndex; }
+
+	public int mark() {
+        markDepth++;
+        lastMarker = p; // track where we are in buffer not absolute token index
+        return lastMarker;
+	}
+
+	public void release(int marker) {
+		// no resources to release
+	}
+
+	public void rewind(int marker) {
+    markDepth--;
+    int delta = p - marker;
+    currentElementIndex -= delta;
+    p = marker;
+  }
+
+  public void rewind() {
+    // rewind but do not release marker
+    int delta = p - lastMarker;
+    currentElementIndex -= delta;
+    p = lastMarker;
+  }
+
+	/**
+	 * Seek to a 0-indexed absolute token index. Normally used to seek backwards
+	 * in the buffer. Does not force loading of nodes.
+	 * <p>
+	 * To preserve backward compatibility, this method allows seeking past the
+	 * end of the currently buffered data. In this case, the input pointer will
+	 * be moved but the data will only actually be loaded upon the next call to
+	 * {@link #consume} or {@link #LT} for {@code k>0}.</p>
+	 *
+	 * @throws IllegalArgumentException if {@code index} is less than 0
+	 * @throws UnsupportedOperationException if {@code index} lies before the
+	 * beginning of the moving window buffer
+	 * ({@code index < }{@link #currentElementIndex currentElementIndex}<code> - </code>{@link #p p}).
+	 */
+    public void seek(int index) {
+        if (index < 0) {
+            throw new IllegalArgumentException("can't seek before the beginning of the input");
+        }
+
+        int delta = currentElementIndex - index;
+        if (p - delta < 0) {
+            throw new UnsupportedOperationException("can't seek before the beginning of this stream's buffer");
+        }
+
+        p -= delta;
+        currentElementIndex = index;
+    }
+
+    protected T LB(int k) {
+        assert k > 0;
+
+        int index = p - k;
+        if (index == -1) {
+            return prevElement;
+        }
+
+        // if k>0 then we know index < data.size(). avoid the double-check for
+        // performance.
+        if (index >= 0 /*&& index < data.size()*/) {
+            return data.get(index);
+        }
+
+        if (index < -1) {
+            throw new UnsupportedOperationException("can't look more than one token before the beginning of this stream's buffer");
+        }
+
+        throw new UnsupportedOperationException("can't look past the end of this stream's buffer using LB(int)");
+    }
+}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/Stats.java b/runtime/Java/src/main/java/org/antlr/runtime/misc/Stats.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/misc/Stats.java
rename to runtime/Java/src/main/java/org/antlr/runtime/misc/Stats.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTree.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTree.java
new file mode 100644
index 0000000..81620a0
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTree.java
@@ -0,0 +1,401 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/** A generic tree implementation with no payload.  You must subclass to
+ *  actually have any user data.  ANTLR v3 uses a list of children approach
+ *  instead of the child-sibling approach in v2.  A flat tree (a list) is
+ *  an empty node whose children represent the list.  An empty, but
+ *  non-null node is called "nil".
+ */
+public abstract class BaseTree implements Tree {
+	protected List<Object> children;
+
+	public BaseTree() {
+	}
+
+	/** Create a new node from an existing node does nothing for BaseTree
+	 *  as there are no fields other than the children list, which cannot
+	 *  be copied as the children are not considered part of this node.
+	 */
+	public BaseTree(Tree node) {
+	}
+
+	@Override
+	public Tree getChild(int i) {
+		if ( children==null || i>=children.size() ) {
+			return null;
+		}
+		return (Tree)children.get(i);
+	}
+
+	/** Get the children internal List; note that if you directly mess with
+	 *  the list, do so at your own risk.
+	 */
+	public List<? extends Object> getChildren() {
+		return children;
+	}
+
+	public Tree getFirstChildWithType(int type) {
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			Tree t = (Tree) children.get(i);
+			if ( t.getType()==type ) {
+				return t;
+			}
+		}
+		return null;
+	}
+
+	@Override
+	public int getChildCount() {
+		if ( children==null ) {
+			return 0;
+		}
+		return children.size();
+	}
+
+	/** Add t as child of this node.
+	 *
+	 *  Warning: if t has no children, but child does
+	 *  and child isNil then this routine moves children to t via
+	 *  t.children = child.children; i.e., without copying the array.
+	 */
+	@Override
+	public void addChild(Tree t) {
+		//System.out.println("add child "+t.toStringTree()+" "+this.toStringTree());
+		//System.out.println("existing children: "+children);
+		if ( t==null ) {
+			return; // do nothing upon addChild(null)
+		}
+		BaseTree childTree = (BaseTree)t;
+		if ( childTree.isNil() ) { // t is an empty node possibly with children
+			if ( this.children!=null && this.children == childTree.children ) {
+				throw new RuntimeException("attempt to add child list to itself");
+			}
+			// just add all of childTree's children to this
+			if ( childTree.children!=null ) {
+				if ( this.children!=null ) { // must copy, this has children already
+					int n = childTree.children.size();
+					for (int i = 0; i < n; i++) {
+						Tree c = (Tree)childTree.children.get(i);
+						this.children.add(c);
+						// handle double-link stuff for each child of nil root
+						c.setParent(this);
+						c.setChildIndex(children.size()-1);
+					}
+				}
+				else {
+					// no children for this but t has children; just set pointer
+					// call general freshener routine
+					this.children = childTree.children;
+					this.freshenParentAndChildIndexes();
+				}
+			}
+		}
+		else { // child is not nil (don't care about children)
+			if ( children==null ) {
+				children = createChildrenList(); // create children list on demand
+			}
+			children.add(t);
+			childTree.setParent(this);
+			childTree.setChildIndex(children.size()-1);
+		}
+		// System.out.println("now children are: "+children);
+	}
+
+	/** Add all elements of kids list as children of this node */
+	public void addChildren(List<? extends Tree> kids) {
+		for (int i = 0; i < kids.size(); i++) {
+			Tree t = kids.get(i);
+			addChild(t);
+		}
+	}
+
+	@Override
+	public void setChild(int i, Tree t) {
+		if ( t==null ) {
+			return;
+		}
+		if ( t.isNil() ) {
+			throw new IllegalArgumentException("Can't set single child to a list");
+		}
+		if ( children==null ) {
+			children = createChildrenList();
+		}
+		children.set(i, t);
+		t.setParent(this);
+		t.setChildIndex(i);
+	}
+
+	/** Insert child t at child position i (0..n-1) by shifting children
+		i+1..n-1 to the right one position. Set parent / indexes properly
+	 	but does NOT collapse nil-rooted t's that come in here like addChild.
+	 */
+	public void insertChild(int i, Object t) {
+		if (i < 0 || i > getChildCount()) {
+			throw new IndexOutOfBoundsException(i+" out or range");
+		}
+
+		if (children == null) {
+			children = createChildrenList();
+		}
+
+		children.add(i, t);
+		// walk others to increment their child indexes
+		// set index, parent of this one too
+		this.freshenParentAndChildIndexes(i);
+	}
+
+	@Override
+	public Object deleteChild(int i) {
+		if ( children==null ) {
+			return null;
+		}
+		Tree killed = (Tree)children.remove(i);
+		// walk rest and decrement their child indexes
+		this.freshenParentAndChildIndexes(i);
+		return killed;
+	}
+
+	/** Delete children from start to stop and replace with t even if t is
+	 *  a list (nil-root tree).  num of children can increase or decrease.
+	 *  For huge child lists, inserting children can force walking rest of
+	 *  children to set their childindex; could be slow.
+	 */
+	@Override
+	public void replaceChildren(int startChildIndex, int stopChildIndex, Object t) {
+		/*
+		System.out.println("replaceChildren "+startChildIndex+", "+stopChildIndex+
+						   " with "+((BaseTree)t).toStringTree());
+		System.out.println("in="+toStringTree());
+		*/
+		if ( children==null ) {
+			throw new IllegalArgumentException("indexes invalid; no children in list");
+		}
+		int replacingHowMany = stopChildIndex - startChildIndex + 1;
+		int replacingWithHowMany;
+		BaseTree newTree = (BaseTree)t;
+		List<Object> newChildren;
+		// normalize to a list of children to add: newChildren
+		if ( newTree.isNil() ) {
+			newChildren = newTree.children;
+		}
+		else {
+			newChildren = new ArrayList<Object>(1);
+			newChildren.add(newTree);
+		}
+		replacingWithHowMany = newChildren.size();
+		int numNewChildren = newChildren.size();
+		int delta = replacingHowMany - replacingWithHowMany;
+		// if same number of nodes, do direct replace
+		if ( delta == 0 ) {
+			int j = 0; // index into new children
+			for (int i=startChildIndex; i<=stopChildIndex; i++) {
+				BaseTree child = (BaseTree)newChildren.get(j);
+				children.set(i, child);
+				child.setParent(this);
+				child.setChildIndex(i);
+                j++;
+            }
+		}
+		else if ( delta > 0 ) { // fewer new nodes than there were
+			// set children and then delete extra
+			for (int j=0; j<numNewChildren; j++) {
+				children.set(startChildIndex+j, newChildren.get(j));
+			}
+			int indexToDelete = startChildIndex+numNewChildren;
+			for (int c=indexToDelete; c<=stopChildIndex; c++) {
+				// delete same index, shifting everybody down each time
+				children.remove(indexToDelete);
+			}
+			freshenParentAndChildIndexes(startChildIndex);
+		}
+		else { // more new nodes than were there before
+			// fill in as many children as we can (replacingHowMany) w/o moving data
+			for (int j=0; j<replacingHowMany; j++) {
+				children.set(startChildIndex+j, newChildren.get(j));
+			}
+			int numToInsert = replacingWithHowMany-replacingHowMany;
+			for (int j=replacingHowMany; j<replacingWithHowMany; j++) {
+				children.add(startChildIndex+j, newChildren.get(j));
+			}
+			freshenParentAndChildIndexes(startChildIndex);
+		}
+		//System.out.println("out="+toStringTree());
+	}
+
+	/** Override in a subclass to change the impl of children list */
+	protected List<Object> createChildrenList() {
+		return new ArrayList<Object>();
+	}
+
+	@Override
+	public boolean isNil() {
+		return false;
+	}
+
+	/** Set the parent and child index values for all child of t */
+	@Override
+	public void freshenParentAndChildIndexes() {
+		freshenParentAndChildIndexes(0);
+	}
+
+	public void freshenParentAndChildIndexes(int offset) {
+		int n = getChildCount();
+		for (int c = offset; c < n; c++) {
+			Tree child = getChild(c);
+			child.setChildIndex(c);
+			child.setParent(this);
+		}
+	}
+
+	public void freshenParentAndChildIndexesDeeply() {
+		freshenParentAndChildIndexesDeeply(0);
+	}
+
+	public void freshenParentAndChildIndexesDeeply(int offset) {
+		int n = getChildCount();
+		for (int c = offset; c < n; c++) {
+			BaseTree child = (BaseTree)getChild(c);
+			child.setChildIndex(c);
+			child.setParent(this);
+			child.freshenParentAndChildIndexesDeeply();
+		}
+	}
+
+	public void sanityCheckParentAndChildIndexes() {
+		sanityCheckParentAndChildIndexes(null, -1);
+	}
+
+	public void sanityCheckParentAndChildIndexes(Tree parent, int i) {
+		if ( parent!=this.getParent() ) {
+			throw new IllegalStateException("parents don't match; expected "+parent+" found "+this.getParent());
+		}
+		if ( i!=this.getChildIndex() ) {
+			throw new IllegalStateException("child indexes don't match; expected "+i+" found "+this.getChildIndex());
+		}
+		int n = this.getChildCount();
+		for (int c = 0; c < n; c++) {
+			CommonTree child = (CommonTree)this.getChild(c);
+			child.sanityCheckParentAndChildIndexes(this, c);
+		}
+	}
+
+	/** BaseTree doesn't track child indexes. */
+	@Override
+	public int getChildIndex() {
+		return 0;
+	}
+	@Override
+	public void setChildIndex(int index) {
+	}
+
+	/** BaseTree doesn't track parent pointers. */
+	@Override
+	public Tree getParent() {
+		return null;
+	}
+
+	@Override
+    public void setParent(Tree t) {
+	}
+
+    /** Walk upwards looking for ancestor with this token type. */
+	@Override
+    public boolean hasAncestor(int ttype) { return getAncestor(ttype)!=null; }
+
+    /** Walk upwards and get first ancestor with this token type. */
+	@Override
+    public Tree getAncestor(int ttype) {
+        Tree t = this;
+        t = t.getParent();
+        while ( t!=null ) {
+            if ( t.getType()==ttype ) return t;
+            t = t.getParent();
+        }
+        return null;
+    }
+
+    /** Return a list of all ancestors of this node.  The first node of
+     *  list is the root and the last is the parent of this node.
+     */
+	@Override
+    public List<? extends Tree> getAncestors() {
+        if ( getParent()==null ) return null;
+        List<Tree> ancestors = new ArrayList<Tree>();
+        Tree t = this;
+        t = t.getParent();
+        while ( t!=null ) {
+            ancestors.add(0, t); // insert at start
+            t = t.getParent();
+        }
+        return ancestors;
+    }
+
+    /** Print out a whole tree not just a node */
+	@Override
+    public String toStringTree() {
+		if ( children==null || children.isEmpty() ) {
+			return this.toString();
+		}
+		StringBuilder buf = new StringBuilder();
+		if ( !isNil() ) {
+			buf.append("(");
+			buf.append(this.toString());
+			buf.append(' ');
+		}
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			Tree t = (Tree)children.get(i);
+			if ( i>0 ) {
+				buf.append(' ');
+			}
+			buf.append(t.toStringTree());
+		}
+		if ( !isNil() ) {
+			buf.append(")");
+		}
+		return buf.toString();
+	}
+
+	@Override
+    public int getLine() {
+		return 0;
+	}
+
+	@Override
+	public int getCharPositionInLine() {
+		return 0;
+	}
+
+	/** Override to say how a node (not a tree) should look as text */
+	@Override
+	public abstract String toString();
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTreeAdaptor.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTreeAdaptor.java
new file mode 100644
index 0000000..486682e
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/BaseTreeAdaptor.java
@@ -0,0 +1,299 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenStream;
+import org.antlr.runtime.RecognitionException;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/** A TreeAdaptor that works with any Tree implementation. */
+public abstract class BaseTreeAdaptor implements TreeAdaptor {
+	/** System.identityHashCode() is not always unique; we have to
+	 *  track ourselves.  That's ok, it's only for debugging, though it's
+	 *  expensive: we have to create a hashtable with all tree nodes in it.
+	 */
+	protected Map<Object, Integer> treeToUniqueIDMap;
+	protected int uniqueNodeID = 1;
+
+	@Override
+	public Object nil() {
+		return create(null);
+	}
+
+	/** create tree node that holds the start and stop tokens associated
+	 *  with an error.
+	 *
+	 *  If you specify your own kind of tree nodes, you will likely have to
+	 *  override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
+	 *  if no token payload but you might have to set token type for diff
+	 *  node type.
+     *
+     *  You don't have to subclass CommonErrorNode; you will likely need to
+     *  subclass your own tree node class to avoid class cast exception.
+	 */
+	@Override
+	public Object errorNode(TokenStream input, Token start, Token stop,
+							RecognitionException e)
+	{
+		CommonErrorNode t = new CommonErrorNode(input, start, stop, e);
+		//System.out.println("returning error node '"+t+"' @index="+input.index());
+		return t;
+	}
+
+	@Override
+	public boolean isNil(Object tree) {
+		return ((Tree)tree).isNil();
+	}
+
+	@Override
+	public Object dupTree(Object tree) {
+		return dupTree(tree, null);
+	}
+
+	/** This is generic in the sense that it will work with any kind of
+	 *  tree (not just Tree interface).  It invokes the adaptor routines
+	 *  not the tree node routines to do the construction.  
+	 */
+	public Object dupTree(Object t, Object parent) {
+		if ( t==null ) {
+			return null;
+		}
+		Object newTree = dupNode(t);
+		// ensure new subtree root has parent/child index set
+		setChildIndex(newTree, getChildIndex(t)); // same index in new tree
+		setParent(newTree, parent);
+		int n = getChildCount(t);
+		for (int i = 0; i < n; i++) {
+			Object child = getChild(t, i);
+			Object newSubTree = dupTree(child, t);
+			addChild(newTree, newSubTree);
+		}
+		return newTree;
+	}
+
+	/** Add a child to the tree t.  If child is a flat tree (a list), make all
+	 *  in list children of t.  Warning: if t has no children, but child does
+	 *  and child isNil then you can decide it is ok to move children to t via
+	 *  t.children = child.children; i.e., without copying the array.  Just
+	 *  make sure that this is consistent with have the user will build
+	 *  ASTs.
+	 */
+	@Override
+	public void addChild(Object t, Object child) {
+		if ( t!=null && child!=null ) {
+			((Tree)t).addChild((Tree)child);
+		}
+	}
+
+	/** If oldRoot is a nil root, just copy or move the children to newRoot.
+	 *  If not a nil root, make oldRoot a child of newRoot.
+	 *
+	 *    old=^(nil a b c), new=r yields ^(r a b c)
+	 *    old=^(a b c), new=r yields ^(r ^(a b c))
+	 *
+	 *  If newRoot is a nil-rooted single child tree, use the single
+	 *  child as the new root node.
+	 *
+	 *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
+	 *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
+	 *
+	 *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
+	 *
+	 *    old=null, new=r yields r
+	 *    old=null, new=^(nil r) yields ^(nil r)
+	 *
+	 *  Return newRoot.  Throw an exception if newRoot is not a
+	 *  simple node or nil root with a single child node--it must be a root
+	 *  node.  If newRoot is ^(nil x) return x as newRoot.
+	 *
+	 *  Be advised that it's ok for newRoot to point at oldRoot's
+	 *  children; i.e., you don't have to copy the list.  We are
+	 *  constructing these nodes so we should have this control for
+	 *  efficiency.
+	 */
+	@Override
+	public Object becomeRoot(Object newRoot, Object oldRoot) {
+        //System.out.println("becomeroot new "+newRoot.toString()+" old "+oldRoot);
+        Tree newRootTree = (Tree)newRoot;
+		Tree oldRootTree = (Tree)oldRoot;
+		if ( oldRoot==null ) {
+			return newRoot;
+		}
+		// handle ^(nil real-node)
+		if ( newRootTree.isNil() ) {
+            int nc = newRootTree.getChildCount();
+            if ( nc==1 ) newRootTree = newRootTree.getChild(0);
+            else if ( nc >1 ) {
+				// TODO: make tree run time exceptions hierarchy
+				throw new RuntimeException("more than one node as root (TODO: make exception hierarchy)");
+			}
+        }
+		// add oldRoot to newRoot; addChild takes care of case where oldRoot
+		// is a flat list (i.e., nil-rooted tree).  All children of oldRoot
+		// are added to newRoot.
+		newRootTree.addChild(oldRootTree);
+		return newRootTree;
+	}
+
+	/** Transform ^(nil x) to x and nil to null */
+	@Override
+	public Object rulePostProcessing(Object root) {
+		//System.out.println("rulePostProcessing: "+((Tree)root).toStringTree());
+		Tree r = (Tree)root;
+		if ( r!=null && r.isNil() ) {
+			if ( r.getChildCount()==0 ) {
+				r = null;
+			}
+			else if ( r.getChildCount()==1 ) {
+				r = r.getChild(0);
+				// whoever invokes rule will set parent and child index
+				r.setParent(null);
+				r.setChildIndex(-1);
+			}
+		}
+		return r;
+	}
+
+	@Override
+	public Object becomeRoot(Token newRoot, Object oldRoot) {
+		return becomeRoot(create(newRoot), oldRoot);
+	}
+
+	@Override
+	public Object create(int tokenType, Token fromToken) {
+		fromToken = createToken(fromToken);
+		//((ClassicToken)fromToken).setType(tokenType);
+		fromToken.setType(tokenType);
+		Tree t = (Tree)create(fromToken);
+		return t;
+	}
+
+	@Override
+	public Object create(int tokenType, Token fromToken, String text) {
+        if (fromToken == null) return create(tokenType, text);
+		fromToken = createToken(fromToken);
+		fromToken.setType(tokenType);
+		fromToken.setText(text);
+		Tree t = (Tree)create(fromToken);
+		return t;
+	}
+
+	@Override
+	public Object create(int tokenType, String text) {
+		Token fromToken = createToken(tokenType, text);
+		Tree t = (Tree)create(fromToken);
+		return t;
+	}
+
+	@Override
+	public int getType(Object t) {
+		return ((Tree)t).getType();
+	}
+
+	@Override
+	public void setType(Object t, int type) {
+		throw new NoSuchMethodError("don't know enough about Tree node");
+	}
+
+	@Override
+	public String getText(Object t) {
+		return ((Tree)t).getText();
+	}
+
+	@Override
+	public void setText(Object t, String text) {
+		throw new NoSuchMethodError("don't know enough about Tree node");
+	}
+
+	@Override
+	public Object getChild(Object t, int i) {
+		return ((Tree)t).getChild(i);
+	}
+
+	@Override
+	public void setChild(Object t, int i, Object child) {
+		((Tree)t).setChild(i, (Tree)child);
+	}
+
+	@Override
+	public Object deleteChild(Object t, int i) {
+		return ((Tree)t).deleteChild(i);
+	}
+
+	@Override
+	public int getChildCount(Object t) {
+		return ((Tree)t).getChildCount();
+	}
+
+	@Override
+	public int getUniqueID(Object node) {
+		if ( treeToUniqueIDMap==null ) {
+			 treeToUniqueIDMap = new HashMap<Object, Integer>();
+		}
+		Integer prevID = treeToUniqueIDMap.get(node);
+		if ( prevID!=null ) {
+			return prevID;
+		}
+		int ID = uniqueNodeID;
+		treeToUniqueIDMap.put(node, ID);
+		uniqueNodeID++;
+		return ID;
+		// GC makes these nonunique:
+		// return System.identityHashCode(node);
+	}
+
+	/** Tell me how to create a token for use with imaginary token nodes.
+	 *  For example, there is probably no input symbol associated with imaginary
+	 *  token DECL, but you need to create it as a payload or whatever for
+	 *  the DECL node as in ^(DECL type ID).
+	 *
+	 *  If you care what the token payload objects' type is, you should
+	 *  override this method and any other createToken variant.
+	 */
+	public abstract Token createToken(int tokenType, String text);
+
+	/** Tell me how to create a token for use with imaginary token nodes.
+	 *  For example, there is probably no input symbol associated with imaginary
+	 *  token DECL, but you need to create it as a payload or whatever for
+	 *  the DECL node as in ^(DECL type ID).
+	 *
+	 *  This is a variant of createToken where the new token is derived from
+	 *  an actual real input token.  Typically this is for converting '{'
+	 *  tokens to BLOCK etc...  You'll see
+	 *
+	 *    r : lc='{' ID+ '}' -&gt; ^(BLOCK[$lc] ID+) ;
+	 *
+	 *  If you care what the token payload objects' type is, you should
+	 *  override this method and any other createToken variant.
+	 */
+	public abstract Token createToken(Token fromToken);
+}
+
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/BufferedTreeNodeStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/BufferedTreeNodeStream.java
new file mode 100644
index 0000000..879f2fe
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/BufferedTreeNodeStream.java
@@ -0,0 +1,500 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenStream;
+import org.antlr.runtime.misc.IntArray;
+import java.util.*;
+
+/** A buffered stream of tree nodes.  Nodes can be from a tree of ANY kind.
+ *
+ *  This node stream sucks all nodes out of the tree specified in
+ *  the constructor during construction and makes pointers into
+ *  the tree using an array of Object pointers. The stream necessarily
+ *  includes pointers to DOWN and UP and EOF nodes.
+ *
+ *  This stream knows how to mark/release for backtracking.
+ *
+ *  This stream is most suitable for tree interpreters that need to
+ *  jump around a lot or for tree parsers requiring speed (at cost of memory).
+ *  There is some duplicated functionality here with UnBufferedTreeNodeStream
+ *  but just in bookkeeping, not tree walking etc...
+ *
+ *  TARGET DEVELOPERS:
+ *
+ *  This is the old CommonTreeNodeStream that buffered up entire node stream.
+ *  No need to implement really as new CommonTreeNodeStream is much better
+ *  and covers what we need.
+ *
+ *  @see CommonTreeNodeStream
+ */
+public class BufferedTreeNodeStream implements TreeNodeStream {
+	public static final int DEFAULT_INITIAL_BUFFER_SIZE = 100;
+	public static final int INITIAL_CALL_STACK_SIZE = 10;
+
+    protected class StreamIterator implements Iterator<Object> {
+		int i = 0;
+		@Override
+		public boolean hasNext() {
+			return i<nodes.size();
+		}
+
+		@Override
+		public Object next() {
+			int current = i;
+			i++;
+			if ( current < nodes.size() ) {
+				return nodes.get(current);
+			}
+			return eof;
+		}
+
+		@Override
+		public void remove() {
+			throw new RuntimeException("cannot remove nodes from stream");
+		}
+	}
+
+	// all these navigation nodes are shared and hence they
+	// cannot contain any line/column info
+
+	protected Object down;
+	protected Object up;
+	protected Object eof;
+
+	/** The complete mapping from stream index to tree node.
+	 *  This buffer includes pointers to DOWN, UP, and EOF nodes.
+	 *  It is built upon ctor invocation.  The elements are type
+	 *  Object as we don't what the trees look like.
+	 *
+	 *  Load upon first need of the buffer so we can set token types
+	 *  of interest for reverseIndexing.  Slows us down a wee bit to
+	 *  do all of the if p==-1 testing everywhere though.
+	 */
+	protected List<Object> nodes;
+
+	/** Pull nodes from which tree? */
+	protected Object root;
+
+	/** IF this tree (root) was created from a token stream, track it. */
+	protected TokenStream tokens;
+
+	/** What tree adaptor was used to build these trees */
+	TreeAdaptor adaptor;
+
+	/** Reuse same DOWN, UP navigation nodes unless this is true */
+	protected boolean uniqueNavigationNodes = false;
+
+	/** The index into the nodes list of the current node (next node
+	 *  to consume).  If -1, nodes array not filled yet.
+	 */
+	protected int p = -1;
+
+	/** Track the last mark() call result value for use in rewind(). */
+	protected int lastMarker;
+
+	/** Stack of indexes used for push/pop calls */
+	protected IntArray calls;
+
+	public BufferedTreeNodeStream(Object tree) {
+		this(new CommonTreeAdaptor(), tree);
+	}
+
+	public BufferedTreeNodeStream(TreeAdaptor adaptor, Object tree) {
+		this(adaptor, tree, DEFAULT_INITIAL_BUFFER_SIZE);
+	}
+
+	public BufferedTreeNodeStream(TreeAdaptor adaptor, Object tree, int initialBufferSize) {
+		this.root = tree;
+		this.adaptor = adaptor;
+		nodes = new ArrayList<Object>(initialBufferSize);
+		down = adaptor.create(Token.DOWN, "DOWN");
+		up = adaptor.create(Token.UP, "UP");
+		eof = adaptor.create(Token.EOF, "EOF");
+	}
+
+	/** Walk tree with depth-first-search and fill nodes buffer.
+	 *  Don't do DOWN, UP nodes if its a list (t is isNil).
+	 */
+	protected void fillBuffer() {
+		fillBuffer(root);
+		//System.out.println("revIndex="+tokenTypeToStreamIndexesMap);
+		p = 0; // buffer of nodes intialized now
+	}
+
+	public void fillBuffer(Object t) {
+		boolean nil = adaptor.isNil(t);
+		if ( !nil ) {
+			nodes.add(t); // add this node
+		}
+		// add DOWN node if t has children
+		int n = adaptor.getChildCount(t);
+		if ( !nil && n>0 ) {
+			addNavigationNode(Token.DOWN);
+		}
+		// and now add all its children
+		for (int c=0; c<n; c++) {
+			Object child = adaptor.getChild(t,c);
+			fillBuffer(child);
+		}
+		// add UP node if t has children
+		if ( !nil && n>0 ) {
+			addNavigationNode(Token.UP);
+		}
+	}
+
+	/** What is the stream index for node? 0..n-1
+	 *  Return -1 if node not found.
+	 */
+	protected int getNodeIndex(Object node) {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		for (int i = 0; i < nodes.size(); i++) {
+			Object t = nodes.get(i);
+			if ( t==node ) {
+				return i;
+			}
+		}
+		return -1;
+	}
+
+	/** As we flatten the tree, we use UP, DOWN nodes to represent
+	 *  the tree structure.  When debugging we need unique nodes
+	 *  so instantiate new ones when uniqueNavigationNodes is true.
+	 */
+	protected void addNavigationNode(final int ttype) {
+		Object navNode;
+		if ( ttype==Token.DOWN ) {
+			if ( hasUniqueNavigationNodes() ) {
+				navNode = adaptor.create(Token.DOWN, "DOWN");
+			}
+			else {
+				navNode = down;
+			}
+		}
+		else {
+			if ( hasUniqueNavigationNodes() ) {
+				navNode = adaptor.create(Token.UP, "UP");
+			}
+			else {
+				navNode = up;
+			}
+		}
+		nodes.add(navNode);
+	}
+
+	@Override
+	public Object get(int i) {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		return nodes.get(i);
+	}
+
+	@Override
+	public Object LT(int k) {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		if ( k==0 ) {
+			return null;
+		}
+		if ( k<0 ) {
+			return LB(-k);
+		}
+		//System.out.print("LT(p="+p+","+k+")=");
+		if ( (p+k-1) >= nodes.size() ) {
+			return eof;
+		}
+		return nodes.get(p+k-1);
+	}
+
+	public Object getCurrentSymbol() { return LT(1); }
+
+/*
+	public Object getLastTreeNode() {
+		int i = index();
+		if ( i>=size() ) {
+			i--; // if at EOF, have to start one back
+		}
+		System.out.println("start last node: "+i+" size=="+nodes.size());
+		while ( i>=0 &&
+			(adaptor.getType(get(i))==Token.EOF ||
+			 adaptor.getType(get(i))==Token.UP ||
+			 adaptor.getType(get(i))==Token.DOWN) )
+		{
+			i--;
+		}
+		System.out.println("stop at node: "+i+" "+nodes.get(i));
+		return nodes.get(i);
+	}
+*/
+	
+	/** Look backwards k nodes */
+	protected Object LB(int k) {
+		if ( k==0 ) {
+			return null;
+		}
+		if ( (p-k)<0 ) {
+			return null;
+		}
+		return nodes.get(p-k);
+	}
+
+	@Override
+	public Object getTreeSource() {
+		return root;
+	}
+
+	@Override
+	public String getSourceName() {
+		return getTokenStream().getSourceName();
+	}
+
+	@Override
+	public TokenStream getTokenStream() {
+		return tokens;
+	}
+
+	public void setTokenStream(TokenStream tokens) {
+		this.tokens = tokens;
+	}
+
+	@Override
+	public TreeAdaptor getTreeAdaptor() {
+		return adaptor;
+	}
+
+	public void setTreeAdaptor(TreeAdaptor adaptor) {
+		this.adaptor = adaptor;
+	}
+
+	public boolean hasUniqueNavigationNodes() {
+		return uniqueNavigationNodes;
+	}
+
+	@Override
+	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes) {
+		this.uniqueNavigationNodes = uniqueNavigationNodes;
+	}
+
+	@Override
+	public void consume() {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		p++;
+	}
+
+	@Override
+	public int LA(int i) {
+		return adaptor.getType(LT(i));
+	}
+
+	@Override
+	public int mark() {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		lastMarker = index();
+		return lastMarker;
+	}
+
+	@Override
+	public void release(int marker) {
+		// no resources to release
+	}
+
+	@Override
+	public int index() {
+		return p;
+	}
+
+	@Override
+	public void rewind(int marker) {
+		seek(marker);
+	}
+
+	@Override
+	public void rewind() {
+		seek(lastMarker);
+	}
+
+	@Override
+	public void seek(int index) {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		p = index;
+	}
+
+	/** Make stream jump to a new location, saving old location.
+	 *  Switch back with pop().
+	 */
+	public void push(int index) {
+		if ( calls==null ) {
+			calls = new IntArray();
+		}
+		calls.push(p); // save current index
+		seek(index);
+	}
+
+	/** Seek back to previous index saved during last push() call.
+	 *  Return top of stack (return index).
+	 */
+	public int pop() {
+		int ret = calls.pop();
+		seek(ret);
+		return ret;
+	}
+
+	@Override
+	public void reset() {
+		p = 0;
+		lastMarker = 0;
+        if (calls != null) {
+            calls.clear();
+        }
+    }
+
+	@Override
+	public int size() {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		return nodes.size();
+	}
+
+	public Iterator<Object> iterator() {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		return new StreamIterator();
+	}
+
+	// TREE REWRITE INTERFACE
+
+	@Override
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
+		if ( parent!=null ) {
+			adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t);
+		}
+	}
+
+	/** Used for testing, just return the token type stream */
+	public String toTokenTypeString() {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		StringBuilder buf = new StringBuilder();
+		for (int i = 0; i < nodes.size(); i++) {
+			Object t = nodes.get(i);
+			buf.append(" ");
+			buf.append(adaptor.getType(t));
+		}
+		return buf.toString();
+	}
+
+	/** Debugging */
+	public String toTokenString(int start, int stop) {
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		StringBuilder buf = new StringBuilder();
+		for (int i = start; i < nodes.size() && i <= stop; i++) {
+			Object t = nodes.get(i);
+			buf.append(" ");
+			buf.append(adaptor.getToken(t));
+		}
+		return buf.toString();
+	}
+
+	@Override
+	public String toString(Object start, Object stop) {
+		System.out.println("toString");
+		if ( start==null || stop==null ) {
+			return null;
+		}
+		if ( p==-1 ) {
+			fillBuffer();
+		}
+		//System.out.println("stop: "+stop);
+		if ( start instanceof CommonTree )
+			System.out.print("toString: "+((CommonTree)start).getToken()+", ");
+		else
+			System.out.println(start);
+		if ( stop instanceof CommonTree )
+			System.out.println(((CommonTree)stop).getToken());
+		else
+			System.out.println(stop);
+		// if we have the token stream, use that to dump text in order
+		if ( tokens!=null ) {
+			int beginTokenIndex = adaptor.getTokenStartIndex(start);
+			int endTokenIndex = adaptor.getTokenStopIndex(stop);
+			// if it's a tree, use start/stop index from start node
+			// else use token range from start/stop nodes
+			if ( adaptor.getType(stop)==Token.UP ) {
+				endTokenIndex = adaptor.getTokenStopIndex(start);
+			}
+			else if ( adaptor.getType(stop)==Token.EOF ) {
+				endTokenIndex = size()-2; // don't use EOF
+			}
+			return tokens.toString(beginTokenIndex, endTokenIndex);
+		}
+		// walk nodes looking for start
+		Object t;
+		int i = 0;
+		for (; i < nodes.size(); i++) {
+			t = nodes.get(i);
+			if ( t==start ) {
+				break;
+			}
+		}
+		// now walk until we see stop, filling string buffer with text
+		StringBuilder buf = new StringBuilder();
+		t = nodes.get(i);
+		while ( t!=stop ) {
+			String text = adaptor.getText(t);
+			if ( text==null ) {
+				text = " "+String.valueOf(adaptor.getType(t));
+			}
+			buf.append(text);
+			i++;
+			t = nodes.get(i);
+		}
+		// include stop node too
+		String text = adaptor.getText(stop);
+		if ( text==null ) {
+			text = " "+String.valueOf(adaptor.getType(stop));
+		}
+		buf.append(text);
+		return buf.toString();
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonErrorNode.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonErrorNode.java
new file mode 100644
index 0000000..b3fb375
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonErrorNode.java
@@ -0,0 +1,112 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.*;
+
+/** A node representing erroneous token range in token stream */
+public class CommonErrorNode extends CommonTree {
+	public IntStream input;
+	public Token start;
+	public Token stop;
+	public RecognitionException trappedException;
+
+	public CommonErrorNode(TokenStream input, Token start, Token stop,
+						   RecognitionException e)
+	{
+		//System.out.println("start: "+start+", stop: "+stop);
+		if ( stop==null ||
+			 (stop.getTokenIndex() < start.getTokenIndex() &&
+			  stop.getType()!=Token.EOF) )
+		{
+			// sometimes resync does not consume a token (when LT(1) is
+			// in follow set.  So, stop will be 1 to left to start. adjust.
+			// Also handle case where start is the first token and no token
+			// is consumed during recovery; LT(-1) will return null.
+			stop = start;
+		}
+		this.input = input;
+		this.start = start;
+		this.stop = stop;
+		this.trappedException = e;
+	}
+
+	@Override
+	public boolean isNil() {
+		return false;
+	}
+
+	@Override
+	public int getType() {
+		return Token.INVALID_TOKEN_TYPE;
+	}
+
+	@Override
+	public String getText() {
+		String badText;
+		if ( start instanceof Token ) {
+			int i = start.getTokenIndex();
+			int j = stop.getTokenIndex();
+			if ( stop.getType() == Token.EOF ) {
+				j = ((TokenStream)input).size();
+			}
+			badText = ((TokenStream)input).toString(i, j);
+		}
+		else if ( start instanceof Tree ) {
+			badText = ((TreeNodeStream)input).toString(start, stop);
+		}
+		else {
+			// people should subclass if they alter the tree type so this
+			// next one is for sure correct.
+			badText = "<unknown>";
+		}
+		return badText;
+	}
+
+	@Override
+	public String toString() {
+		if ( trappedException instanceof MissingTokenException ) {
+			return "<missing type: "+
+				   ((MissingTokenException)trappedException).getMissingType()+
+				   ">";
+		}
+		else if ( trappedException instanceof UnwantedTokenException ) {
+			return "<extraneous: "+
+				   ((UnwantedTokenException)trappedException).getUnexpectedToken()+
+				   ", resync="+getText()+">";
+		}
+		else if ( trappedException instanceof MismatchedTokenException ) {
+			return "<mismatched token: "+trappedException.token+", resync="+getText()+">";
+		}
+		else if ( trappedException instanceof NoViableAltException ) {
+			return "<unexpected: "+trappedException.token+
+				   ", resync="+getText()+">";
+		}
+		return "<error: "+getText()+">";
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTree.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTree.java
new file mode 100644
index 0000000..82353c8
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTree.java
@@ -0,0 +1,200 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+/** A tree node that is wrapper for a Token object.  After 3.0 release
+ *  while building tree rewrite stuff, it became clear that computing
+ *  parent and child index is very difficult and cumbersome.  Better to
+ *  spend the space in every tree node.  If you don't want these extra
+ *  fields, it's easy to cut them out in your own BaseTree subclass.
+ */
+public class CommonTree extends BaseTree {
+	/** A single token is the payload */
+	public Token token;
+
+	/** What token indexes bracket all tokens associated with this node
+	 *  and below?
+	 */
+	protected int startIndex=-1, stopIndex=-1;
+
+	/** Who is the parent node of this node; if null, implies node is root */
+	public CommonTree parent;
+
+	/** What index is this node in the child list? Range: 0..n-1 */
+	public int childIndex = -1;
+
+	public CommonTree() { }
+	
+	public CommonTree(CommonTree node) {
+		super(node);
+		this.token = node.token;
+		this.startIndex = node.startIndex;
+		this.stopIndex = node.stopIndex;
+	}
+
+	public CommonTree(Token t) {
+		this.token = t;
+	}
+
+	public Token getToken() {
+		return token;
+	}
+
+	@Override
+	public Tree dupNode() {
+		return new CommonTree(this);
+	}
+
+	@Override
+	public boolean isNil() {
+		return token==null;
+	}
+
+	@Override
+	public int getType() {
+		if ( token==null ) {
+			return Token.INVALID_TOKEN_TYPE;
+		}
+		return token.getType();
+	}
+
+	@Override
+	public String getText() {
+		if ( token==null ) {
+			return null;
+		}
+		return token.getText();
+	}
+
+	@Override
+	public int getLine() {
+		if ( token==null || token.getLine()==0 ) {
+			if ( getChildCount()>0 ) {
+				return getChild(0).getLine();
+			}
+			return 0;
+		}
+		return token.getLine();
+	}
+
+	@Override
+	public int getCharPositionInLine() {
+		if ( token==null || token.getCharPositionInLine()==-1 ) {
+			if ( getChildCount()>0 ) {
+				return getChild(0).getCharPositionInLine();
+			}
+			return 0;
+		}
+		return token.getCharPositionInLine();
+	}
+
+	@Override
+	public int getTokenStartIndex() {
+		if ( startIndex==-1 && token!=null ) {
+			return token.getTokenIndex();
+		}
+		return startIndex;
+	}
+
+	@Override
+	public void setTokenStartIndex(int index) {
+		startIndex = index;
+	}
+
+	@Override
+	public int getTokenStopIndex() {
+		if ( stopIndex==-1 && token!=null ) {
+			return token.getTokenIndex();
+		}
+		return stopIndex;
+	}
+
+	@Override
+	public void setTokenStopIndex(int index) {
+		stopIndex = index;
+	}
+
+    /** For every node in this subtree, make sure it's start/stop token's
+     *  are set.  Walk depth first, visit bottom up.  Only updates nodes
+     *  with at least one token index &lt; 0.
+     */
+    public void setUnknownTokenBoundaries() {
+        if ( children==null ) {
+            if ( startIndex<0 || stopIndex<0 ) {
+                startIndex = stopIndex = token.getTokenIndex();
+            }
+            return;
+        }
+        for (int i=0; i<children.size(); i++) {
+            ((CommonTree)children.get(i)).setUnknownTokenBoundaries();
+        }
+        if ( startIndex>=0 && stopIndex>=0 ) return; // already set
+        if ( children.size() > 0 ) {
+            CommonTree firstChild = (CommonTree)children.get(0);
+            CommonTree lastChild = (CommonTree)children.get(children.size()-1);
+            startIndex = firstChild.getTokenStartIndex();
+            stopIndex = lastChild.getTokenStopIndex();
+        }
+    }
+
+	@Override
+	public int getChildIndex() {
+		return childIndex;
+	}
+
+	@Override
+	public Tree getParent() {
+		return parent;
+	}
+
+	@Override
+	public void setParent(Tree t) {
+		this.parent = (CommonTree)t;
+	}
+
+	@Override
+	public void setChildIndex(int index) {
+		this.childIndex = index;
+	}
+
+	@Override
+	public String toString() {
+		if ( isNil() ) {
+			return "nil";
+		}
+		if ( getType()==Token.INVALID_TOKEN_TYPE ) {
+			return "<errornode>";
+		}
+		if ( token==null ) {
+			return null;
+		}
+		return token.getText();
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeAdaptor.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeAdaptor.java
new file mode 100644
index 0000000..c4f1661
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeAdaptor.java
@@ -0,0 +1,185 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+
+/** A TreeAdaptor that works with any Tree implementation.  It provides
+ *  really just factory methods; all the work is done by BaseTreeAdaptor.
+ *  If you would like to have different tokens created than ClassicToken
+ *  objects, you need to override this and then set the parser tree adaptor to
+ *  use your subclass.
+ *
+ *  To get your parser to build nodes of a different type, override
+ *  create(Token), errorNode(), and to be safe, YourTreeClass.dupNode().
+ *  dupNode is called to duplicate nodes during rewrite operations.
+ */
+public class CommonTreeAdaptor extends BaseTreeAdaptor {
+	/** Duplicate a node.  This is part of the factory;
+	 *	override if you want another kind of node to be built.
+	 *
+	 *  I could use reflection to prevent having to override this
+	 *  but reflection is slow.
+	 */
+	@Override
+	public Object dupNode(Object t) {
+		if ( t==null ) return null;
+		return ((Tree)t).dupNode();
+	}
+
+	@Override
+	public Object create(Token payload) {
+		return new CommonTree(payload);
+	}
+
+	/** Tell me how to create a token for use with imaginary token nodes.
+	 *  For example, there is probably no input symbol associated with imaginary
+	 *  token DECL, but you need to create it as a payload or whatever for
+	 *  the DECL node as in ^(DECL type ID).
+	 *
+	 *  If you care what the token payload objects' type is, you should
+	 *  override this method and any other createToken variant.
+	 */
+	@Override
+	public Token createToken(int tokenType, String text) {
+		return new CommonToken(tokenType, text);
+	}
+
+	/** Tell me how to create a token for use with imaginary token nodes.
+	 *  For example, there is probably no input symbol associated with imaginary
+	 *  token DECL, but you need to create it as a payload or whatever for
+	 *  the DECL node as in ^(DECL type ID).
+	 *
+	 *  This is a variant of createToken where the new token is derived from
+	 *  an actual real input token.  Typically this is for converting '{'
+	 *  tokens to BLOCK etc...  You'll see
+	 *
+	 *    r : lc='{' ID+ '}' -&gt; ^(BLOCK[$lc] ID+) ;
+	 *
+	 *  If you care what the token payload objects' type is, you should
+	 *  override this method and any other createToken variant.
+	 */
+	@Override
+	public Token createToken(Token fromToken) {
+		return new CommonToken(fromToken);
+	}
+
+	/** Track start/stop token for subtree root created for a rule.
+	 *  Only works with Tree nodes.  For rules that match nothing,
+	 *  seems like this will yield start=i and stop=i-1 in a nil node.
+	 *  Might be useful info so I'll not force to be i..i.
+	 */
+	@Override
+	public void setTokenBoundaries(Object t, Token startToken, Token stopToken) {
+		if ( t==null ) return;
+		int start = 0;
+		int stop = 0;
+		if ( startToken!=null ) start = startToken.getTokenIndex();
+		if ( stopToken!=null ) stop = stopToken.getTokenIndex();
+		((Tree)t).setTokenStartIndex(start);
+		((Tree)t).setTokenStopIndex(stop);
+	}
+
+	@Override
+	public int getTokenStartIndex(Object t) {
+		if ( t==null ) return -1;
+		return ((Tree)t).getTokenStartIndex();
+	}
+
+	@Override
+	public int getTokenStopIndex(Object t) {
+		if ( t==null ) return -1;
+		return ((Tree)t).getTokenStopIndex();
+	}
+
+	@Override
+	public String getText(Object t) {
+		if ( t==null ) return null;
+		return ((Tree)t).getText();
+	}
+
+	@Override
+    public int getType(Object t) {
+		if ( t==null ) return Token.INVALID_TOKEN_TYPE;
+		return ((Tree)t).getType();
+	}
+
+	/** What is the Token associated with this node?  If
+	 *  you are not using CommonTree, then you must
+	 *  override this in your own adaptor.
+	 */
+	@Override
+	public Token getToken(Object t) {
+		if ( t instanceof CommonTree ) {
+			return ((CommonTree)t).getToken();
+		}
+		return null; // no idea what to do
+	}
+
+	@Override
+	public Object getChild(Object t, int i) {
+		if ( t==null ) return null;
+        return ((Tree)t).getChild(i);
+    }
+
+	@Override
+    public int getChildCount(Object t) {
+		if ( t==null ) return 0;
+        return ((Tree)t).getChildCount();
+    }
+
+	@Override
+	public Object getParent(Object t) {
+		if ( t==null ) return null;
+        return ((Tree)t).getParent();
+	}
+
+	@Override
+	public void setParent(Object t, Object parent) {
+        if ( t!=null ) ((Tree)t).setParent((Tree)parent);
+	}
+
+	@Override
+	public int getChildIndex(Object t) {
+        if ( t==null ) return 0;
+		return ((Tree)t).getChildIndex();
+	}
+
+	@Override
+	public void setChildIndex(Object t, int index) {
+        if ( t!=null ) ((Tree)t).setChildIndex(index);
+	}
+
+	@Override
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
+		if ( parent!=null ) {
+			((Tree)parent).replaceChildren(startChildIndex, stopChildIndex, t);
+		}
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeNodeStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeNodeStream.java
new file mode 100644
index 0000000..29b9546
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/CommonTreeNodeStream.java
@@ -0,0 +1,246 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenStream;
+import org.antlr.runtime.misc.LookaheadStream;
+import org.antlr.runtime.misc.IntArray;
+
+public class CommonTreeNodeStream extends LookaheadStream<Object> implements TreeNodeStream, PositionTrackingStream<Object> {
+	public static final int DEFAULT_INITIAL_BUFFER_SIZE = 100;
+	public static final int INITIAL_CALL_STACK_SIZE = 10;
+
+	/** Pull nodes from which tree? */
+	protected Object root;
+
+	/** If this tree (root) was created from a {@link TokenStream}, track it. */
+	protected TokenStream tokens;
+
+	/** What {@link TreeAdaptor} was used to build these trees */
+	TreeAdaptor adaptor;
+
+    /** The {@link TreeIterator} we using. */
+    protected TreeIterator it;
+
+    /** Stack of indexes used for push/pop calls. */
+    protected IntArray calls;
+
+    /** Tree {@code (nil A B C)} trees like flat {@code A B C} streams */
+    protected boolean hasNilRoot = false;
+
+    /** Tracks tree depth.  Level=0 means we're at root node level. */
+    protected int level = 0;
+
+	/**
+	 * Tracks the last node before the start of {@link #data} which contains
+	 * position information to provide information for error reporting. This is
+	 * tracked in addition to {@link #prevElement} which may or may not contain
+	 * position information.
+	 *
+	 * @see #hasPositionInformation
+	 * @see RecognitionException#extractInformationFromTreeNodeStream
+	 */
+	protected Object previousLocationElement;
+
+	public CommonTreeNodeStream(Object tree) {
+		this(new CommonTreeAdaptor(), tree);
+	}
+
+	public CommonTreeNodeStream(TreeAdaptor adaptor, Object tree) {
+		this.root = tree;
+		this.adaptor = adaptor;
+        it = new TreeIterator(adaptor,root);
+	}
+
+	@Override
+    public void reset() {
+        super.reset();
+        it.reset();
+        hasNilRoot = false;
+        level = 0;
+		previousLocationElement = null;
+        if ( calls != null ) calls.clear();
+    }
+
+    /** Pull elements from tree iterator.  Track tree level 0..max_level.
+     *  If nil rooted tree, don't give initial nil and DOWN nor final UP.
+     */
+	@Override
+    public Object nextElement() {
+        Object t = it.next();
+        //System.out.println("pulled "+adaptor.getType(t));
+        if ( t == it.up ) {
+            level--;
+            if ( level==0 && hasNilRoot ) return it.next(); // don't give last UP; get EOF
+        }
+        else if ( t == it.down ) level++;
+        if ( level==0 && adaptor.isNil(t) ) { // if nil root, scarf nil, DOWN
+            hasNilRoot = true;
+            t = it.next(); // t is now DOWN, so get first real node next
+            level++;
+            t = it.next();
+        }
+        return t;
+    }
+
+	@Override
+	public Object remove() {
+		Object result = super.remove();
+		if (p == 0 && hasPositionInformation(prevElement)) {
+			previousLocationElement = prevElement;
+		}
+
+		return result;
+	}
+
+	@Override
+    public boolean isEOF(Object o) { return adaptor.getType(o) == Token.EOF; }
+
+	@Override
+    public void setUniqueNavigationNodes(boolean uniqueNavigationNodes) { }
+
+	@Override
+	public Object getTreeSource() {	return root; }
+
+	@Override
+	public String getSourceName() { return getTokenStream().getSourceName(); }
+
+	@Override
+	public TokenStream getTokenStream() { return tokens; }
+
+	public void setTokenStream(TokenStream tokens) { this.tokens = tokens; }
+
+	@Override
+	public TreeAdaptor getTreeAdaptor() { return adaptor; }
+
+	public void setTreeAdaptor(TreeAdaptor adaptor) { this.adaptor = adaptor; }
+
+	@Override
+    public Object get(int i) {
+        throw new UnsupportedOperationException("Absolute node indexes are meaningless in an unbuffered stream");
+    }
+
+	@Override
+    public int LA(int i) { return adaptor.getType(LT(i)); }
+
+    /** Make stream jump to a new location, saving old location.
+     *  Switch back with pop().
+     */
+    public void push(int index) {
+        if ( calls==null ) {
+            calls = new IntArray();
+        }
+        calls.push(p); // save current index
+        seek(index);
+    }
+
+    /** Seek back to previous index saved during last {@link #push} call.
+     *  Return top of stack (return index).
+     */
+    public int pop() {
+        int ret = calls.pop();
+        seek(ret);
+        return ret;
+    }
+
+	/**
+	 * Returns an element containing position information. If {@code allowApproximateLocation} is {@code false}, then
+	 * this method will return the {@code LT(1)} element if it contains position information, and otherwise return {@code null}.
+	 * If {@code allowApproximateLocation} is {@code true}, then this method will return the last known element containing position information.
+	 *
+	 * @see #hasPositionInformation
+	 */
+	@Override
+	public Object getKnownPositionElement(boolean allowApproximateLocation) {
+		Object node = data.get(p);
+		if (hasPositionInformation(node)) {
+			return node;
+		}
+
+		if (!allowApproximateLocation) {
+			return null;
+		}
+
+		for (int index = p - 1; index >= 0; index--) {
+			node = data.get(index);
+			if (hasPositionInformation(node)) {
+				return node;
+			}
+		}
+
+		return previousLocationElement;
+	}
+
+	@Override
+	public boolean hasPositionInformation(Object node) {
+		Token token = adaptor.getToken(node);
+		if (token == null) {
+			return false;
+		}
+
+		if (token.getLine() <= 0) {
+			return false;
+		}
+
+		return true;
+	}
+
+	// TREE REWRITE INTERFACE
+
+	@Override
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t) {
+		if ( parent!=null ) {
+			adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t);
+		}
+	}
+
+	@Override
+	public String toString(Object start, Object stop) {
+        // we'll have to walk from start to stop in tree; we're not keeping
+        // a complete node stream buffer
+        return "n/a";
+	}
+
+    /** For debugging; destructive: moves tree iterator to end. */
+    public String toTokenTypeString() {
+        reset();
+		StringBuilder buf = new StringBuilder();
+        Object o = LT(1);
+        int type = adaptor.getType(o);
+        while ( type!=Token.EOF ) {
+            buf.append(" ");
+            buf.append(type);
+            consume();
+            o = LT(1);
+            type = adaptor.getType(o);
+		}
+		return buf.toString();
+    }
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/DOTTreeGenerator.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/DOTTreeGenerator.java
new file mode 100644
index 0000000..9d2e6f7
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/DOTTreeGenerator.java
@@ -0,0 +1,224 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.stringtemplate.StringTemplate;
+
+import java.util.HashMap;
+
+/** A utility class to generate DOT diagrams (graphviz) from
+ *  arbitrary trees.  You can pass in your own templates and
+ *  can pass in any kind of tree or use Tree interface method.
+ *  I wanted this separator so that you don't have to include
+ *  ST just to use the org.antlr.runtime.tree.* package.
+ *  This is a set of non-static methods so you can subclass
+ *  to override.  For example, here is an invocation:
+ *
+ *      CharStream input = new ANTLRInputStream(System.in);
+ *      TLexer lex = new TLexer(input);
+ *      CommonTokenStream tokens = new CommonTokenStream(lex);
+ *      TParser parser = new TParser(tokens);
+ *      TParser.e_return r = parser.e();
+ *      Tree t = (Tree)r.tree;
+ *      System.out.println(t.toStringTree());
+ *      DOTTreeGenerator gen = new DOTTreeGenerator();
+ *      StringTemplate st = gen.toDOT(t);
+ *      System.out.println(st);
+ */
+public class DOTTreeGenerator {
+
+	public static StringTemplate _treeST =
+		new StringTemplate(
+			"digraph {\n\n" +
+			"\tordering=out;\n" +
+			"\tranksep=.4;\n" +
+			"\tbgcolor=\"lightgrey\"; node [shape=box, fixedsize=false, fontsize=12, fontname=\"Helvetica-bold\", fontcolor=\"blue\"\n" +
+			"\t\twidth=.25, height=.25, color=\"black\", fillcolor=\"white\", style=\"filled, solid, bold\"];\n" +
+			"\tedge [arrowsize=.5, color=\"black\", style=\"bold\"]\n\n" +
+			"  $nodes$\n" +
+			"  $edges$\n" +
+			"}\n");
+
+	public static StringTemplate _nodeST =
+			new StringTemplate("$name$ [label=\"$text$\"];\n");
+
+	public static StringTemplate _edgeST =
+			new StringTemplate("$parent$ -> $child$ // \"$parentText$\" -> \"$childText$\"\n");
+
+	/** Track node to number mapping so we can get proper node name back */
+	HashMap<Object, Integer> nodeToNumberMap = new HashMap<Object, Integer>();
+
+	/** Track node number so we can get unique node names */
+	int nodeNumber = 0;
+
+	public StringTemplate toDOT(Object tree,
+								TreeAdaptor adaptor,
+								StringTemplate _treeST,
+								StringTemplate _edgeST)
+	{
+		StringTemplate treeST = _treeST.getInstanceOf();
+		nodeNumber = 0;
+		toDOTDefineNodes(tree, adaptor, treeST);
+		nodeNumber = 0;
+		toDOTDefineEdges(tree, adaptor, treeST);
+		/*
+		if ( adaptor.getChildCount(tree)==0 ) {
+            // single node, don't do edge.
+            treeST.add("nodes", adaptor.getText(tree));
+        }
+        */
+		return treeST;
+	}
+
+	public StringTemplate toDOT(Object tree,
+								TreeAdaptor adaptor)
+	{
+		return toDOT(tree, adaptor, _treeST, _edgeST);
+	}
+
+	/** Generate DOT (graphviz) for a whole tree not just a node.
+	 *  For example, 3+4*5 should generate:
+	 *
+	 * digraph {
+	 *   node [shape=plaintext, fixedsize=true, fontsize=11, fontname="Courier",
+	 *         width=.4, height=.2];
+	 *   edge [arrowsize=.7]
+	 *   "+"-&gt;3
+	 *   "+"-&gt;"*"
+	 *   "*"-&gt;4
+	 *   "*"-&gt;5
+	 * }
+	 *
+	 * Return the ST not a string in case people want to alter.
+	 *
+	 * Takes a Tree interface object.
+	 */
+	public StringTemplate toDOT(Tree tree) {
+		return toDOT(tree, new CommonTreeAdaptor());
+	}
+
+	protected void toDOTDefineNodes(Object tree,
+									TreeAdaptor adaptor,
+									StringTemplate treeST)
+	{
+		if ( tree==null ) {
+			return;
+		}
+		int n = adaptor.getChildCount(tree);
+		if ( n==0 ) {
+			// must have already dumped as child from previous
+			// invocation; do nothing
+			return;
+		}
+
+		// define parent node
+		StringTemplate parentNodeST = getNodeST(adaptor, tree);
+		treeST.setAttribute("nodes", parentNodeST);
+
+		// for each child, do a "<unique-name> [label=text]" node def
+		for (int i = 0; i < n; i++) {
+			Object child = adaptor.getChild(tree, i);
+			StringTemplate nodeST = getNodeST(adaptor, child);
+			treeST.setAttribute("nodes", nodeST);
+			toDOTDefineNodes(child, adaptor, treeST);
+		}
+	}
+
+	protected void toDOTDefineEdges(Object tree,
+									TreeAdaptor adaptor,
+									StringTemplate treeST)
+	{
+		if ( tree==null ) {
+			return;
+		}
+		int n = adaptor.getChildCount(tree);
+		if ( n==0 ) {
+			// must have already dumped as child from previous
+			// invocation; do nothing
+			return;
+		}
+
+		String parentName = "n"+getNodeNumber(tree);
+
+		// for each child, do a parent -> child edge using unique node names
+		String parentText = adaptor.getText(tree);
+		for (int i = 0; i < n; i++) {
+			Object child = adaptor.getChild(tree, i);
+			String childText = adaptor.getText(child);
+			String childName = "n"+getNodeNumber(child);
+			StringTemplate edgeST = _edgeST.getInstanceOf();
+			edgeST.setAttribute("parent", parentName);
+			edgeST.setAttribute("child", childName);
+			edgeST.setAttribute("parentText", fixString(parentText));
+			edgeST.setAttribute("childText", fixString(childText));
+			treeST.setAttribute("edges", edgeST);
+			toDOTDefineEdges(child, adaptor, treeST);
+		}
+	}
+
+	protected StringTemplate getNodeST(TreeAdaptor adaptor, Object t) {
+		String text = adaptor.getText(t);
+		StringTemplate nodeST = _nodeST.getInstanceOf();
+		String uniqueName = "n"+getNodeNumber(t);
+		nodeST.setAttribute("name", uniqueName);
+
+		nodeST.setAttribute("text", fixString(text));
+		return nodeST;
+	}
+
+	protected int getNodeNumber(Object t) {
+		Integer nI = nodeToNumberMap.get(t);
+		if ( nI!=null ) {
+			return nI;
+		}
+		else {
+			nodeToNumberMap.put(t, nodeNumber);
+			nodeNumber++;
+			return nodeNumber-1;
+		}
+	}
+
+    protected String fixString(String in)
+    {
+        String text = in;
+
+        if (text!=null) {
+
+            text = text.replaceAll("\"", "\\\\\"");
+            text = text.replaceAll("\\t", "    ");
+            text = text.replaceAll("\\n", "\\\\n");
+            text = text.replaceAll("\\r", "\\\\r");
+            if  (text.length() > 20)    {
+                text = text.substring(0, 8) + "..." + text.substring(text.length()-8);
+            }
+
+        }
+
+        return text;
+    }
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/ParseTree.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/ParseTree.java
new file mode 100644
index 0000000..fd30891
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/ParseTree.java
@@ -0,0 +1,127 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+import java.util.List;
+
+/** A record of the rules used to match a token sequence.  The tokens
+ *  end up as the leaves of this tree and rule nodes are the interior nodes.
+ *  This really adds no functionality, it is just an alias for CommonTree
+ *  that is more meaningful (specific) and holds a String to display for a node.
+ */
+public class ParseTree extends BaseTree {
+	public Object payload;
+	public List<Token> hiddenTokens;
+
+	public ParseTree(Object label) {
+		this.payload = label;
+	}
+
+	@Override
+	public Tree dupNode() {
+		return null;
+	}
+
+	@Override
+	public int getType() {
+		return 0;
+	}
+
+	@Override
+	public String getText() {
+		return toString();
+	}
+
+	@Override
+	public int getTokenStartIndex() {
+		return 0;
+	}
+
+	@Override
+	public void setTokenStartIndex(int index) {
+	}
+
+	@Override
+	public int getTokenStopIndex() {
+		return 0;
+	}
+
+	@Override
+	public void setTokenStopIndex(int index) {
+	}
+
+	@Override
+	public String toString() {
+		if ( payload instanceof Token ) {
+			Token t = (Token)payload;
+			if ( t.getType() == Token.EOF ) {
+				return "<EOF>";
+			}
+			return t.getText();
+		}
+		return payload.toString();
+	}
+
+	/** Emit a token and all hidden nodes before.  EOF node holds all
+	 *  hidden tokens after last real token.
+	 */
+	public String toStringWithHiddenTokens() {
+		StringBuilder buf = new StringBuilder();
+		if ( hiddenTokens!=null ) {
+			for (int i = 0; i < hiddenTokens.size(); i++) {
+				Token hidden = hiddenTokens.get(i);
+				buf.append(hidden.getText());
+			}
+		}
+		String nodeText = this.toString();
+		if ( !nodeText.equals("<EOF>") ) buf.append(nodeText);
+		return buf.toString();
+	}
+
+	/** Print out the leaves of this tree, which means printing original
+	 *  input back out.
+	 */
+	public String toInputString() {
+		StringBuffer buf = new StringBuffer();
+		_toStringLeaves(buf);
+		return buf.toString();
+	}
+
+	public void _toStringLeaves(StringBuffer buf) {
+		if ( payload instanceof Token ) { // leaf node token?
+			buf.append(this.toStringWithHiddenTokens());
+			return;
+		}
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			ParseTree t = (ParseTree)children.get(i);
+			t._toStringLeaves(buf);
+		}
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/PositionTrackingStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/PositionTrackingStream.java
new file mode 100644
index 0000000..233548a
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/PositionTrackingStream.java
@@ -0,0 +1,57 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2012 Terence Parr
+ Copyright (c) 2012 Sam Harwell
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+/**
+ *
+ * @author Sam Harwell
+ */
+public interface PositionTrackingStream<T> {
+
+	/**
+	 * Returns an element containing concrete information about the current
+	 * position in the stream.
+	 *
+	 * @param allowApproximateLocation if {@code false}, this method returns
+	 * {@code null} if an element containing exact information about the current
+	 * position is not available
+	 */
+	T getKnownPositionElement(boolean allowApproximateLocation);
+
+	/**
+	 * Determines if the specified {@code element} contains concrete position
+	 * information.
+	 *
+	 * @param element the element to check
+	 * @return {@code true} if {@code element} contains concrete position
+	 * information, otherwise {@code false}
+	 */
+	boolean hasPositionInformation(T element);
+
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteCardinalityException.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteCardinalityException.java
new file mode 100644
index 0000000..3cd6c8e
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteCardinalityException.java
@@ -0,0 +1,48 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+/** Base class for all exceptions thrown during AST rewrite construction.
+ *  This signifies a case where the cardinality of two or more elements
+ *  in a subrule are different: (ID INT)+ where |ID|!=|INT|
+ */
+public class RewriteCardinalityException extends RuntimeException {
+	public String elementDescription;
+
+	public RewriteCardinalityException(String elementDescription) {
+		this.elementDescription = elementDescription;
+	}
+
+	@Override
+	public String getMessage() {
+		if ( elementDescription!=null ) {
+			return elementDescription;
+		}
+		return null;
+	}
+}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEarlyExitException.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEarlyExitException.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEarlyExitException.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEarlyExitException.java
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEmptyStreamException.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEmptyStreamException.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEmptyStreamException.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteEmptyStreamException.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleElementStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleElementStream.java
new file mode 100644
index 0000000..bcbaeae
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleElementStream.java
@@ -0,0 +1,211 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/** A generic list of elements tracked in an alternative to be used in
+ *  a -&gt; rewrite rule.  We need to subclass to fill in the next() method,
+ *  which returns either an AST node wrapped around a token payload or
+ *  an existing subtree.
+ *
+ *  Once you start next()ing, do not try to add more elements.  It will
+ *  break the cursor tracking I believe.
+ *
+ *  @see org.antlr.runtime.tree.RewriteRuleSubtreeStream
+ *  @see org.antlr.runtime.tree.RewriteRuleTokenStream
+ *
+ *  TODO: add mechanism to detect/puke on modification after reading from stream
+ */
+public abstract class RewriteRuleElementStream {
+	/** Cursor 0..n-1.  If singleElement!=null, cursor is 0 until you next(),
+	 *  which bumps it to 1 meaning no more elements.
+	 */
+	protected int cursor = 0;
+
+	/** Track single elements w/o creating a list.  Upon 2nd add, alloc list */
+	protected Object singleElement;
+
+	/** The list of tokens or subtrees we are tracking */
+	protected List<Object> elements;
+
+	/** Once a node / subtree has been used in a stream, it must be dup'd
+	 *  from then on.  Streams are reset after subrules so that the streams
+	 *  can be reused in future subrules.  So, reset must set a dirty bit.
+	 *  If dirty, then next() always returns a dup.
+	 *
+	 *  I wanted to use "naughty bit" here, but couldn't think of a way
+	 *  to use "naughty".
+	 *
+	 *  TODO: unused?
+	 */
+	protected boolean dirty = false;
+
+	/** The element or stream description; usually has name of the token or
+	 *  rule reference that this list tracks.  Can include rulename too, but
+	 *  the exception would track that info.
+	 */
+	protected String elementDescription;
+	protected TreeAdaptor adaptor;
+
+	public RewriteRuleElementStream(TreeAdaptor adaptor, String elementDescription) {
+		this.elementDescription = elementDescription;
+		this.adaptor = adaptor;
+	}
+
+	/** Create a stream with one element */
+	@SuppressWarnings("OverridableMethodCallInConstructor")
+	public RewriteRuleElementStream(TreeAdaptor adaptor,
+									String elementDescription,
+									Object oneElement)
+	{
+		this(adaptor, elementDescription);
+		add(oneElement);
+	}
+
+	/** Create a stream, but feed off an existing list */
+	public RewriteRuleElementStream(TreeAdaptor adaptor,
+									String elementDescription,
+									List<Object> elements)
+	{
+		this(adaptor, elementDescription);
+		this.singleElement = null;
+		this.elements = elements;
+	}
+
+	/** Reset the condition of this stream so that it appears we have
+	 *  not consumed any of its elements.  Elements themselves are untouched.
+	 *  Once we reset the stream, any future use will need duplicates.  Set
+	 *  the dirty bit.
+	 */
+	public void reset() {
+		cursor = 0;
+		dirty = true;
+	}
+
+	public void add(Object el) {
+		//System.out.println("add '"+elementDescription+"' is "+el);
+		if ( el==null ) {
+			return;
+		}
+		if ( elements!=null ) { // if in list, just add
+			elements.add(el);
+			return;
+		}
+		if ( singleElement == null ) { // no elements yet, track w/o list
+			singleElement = el;
+			return;
+		}
+		// adding 2nd element, move to list
+		elements = new ArrayList<Object>(5);
+		elements.add(singleElement);
+		singleElement = null;
+		elements.add(el);
+	}
+
+	/** Return the next element in the stream.  If out of elements, throw
+	 *  an exception unless size()==1.  If size is 1, then return elements[0].
+	 *  Return a duplicate node/subtree if stream is out of elements and
+	 *  size==1.  If we've already used the element, dup (dirty bit set).
+	 */
+	public Object nextTree() {
+		int n = size();
+		if ( dirty || (cursor>=n && n==1) ) {
+			// if out of elements and size is 1, dup
+			Object el = _next();
+			return dup(el);
+		}
+		// test size above then fetch
+		Object el = _next();
+		return el;
+	}
+
+	/** do the work of getting the next element, making sure that it's
+	 *  a tree node or subtree.  Deal with the optimization of single-
+	 *  element list versus list of size &gt; 1.  Throw an exception
+	 *  if the stream is empty or we're out of elements and size&gt;1.
+	 *  protected so you can override in a subclass if necessary.
+	 */
+	protected Object _next() {
+		int n = size();
+		if ( n ==0 ) {
+			throw new RewriteEmptyStreamException(elementDescription);
+		}
+		if ( cursor>= n) { // out of elements?
+			if ( n ==1 ) {  // if size is 1, it's ok; return and we'll dup
+				return toTree(singleElement);
+			}
+			// out of elements and size was not 1, so we can't dup
+			throw new RewriteCardinalityException(elementDescription);
+		}
+		// we have elements
+		if ( singleElement!=null ) {
+			cursor++; // move cursor even for single element list
+			return toTree(singleElement);
+		}
+		// must have more than one in list, pull from elements
+		Object o = toTree(elements.get(cursor));
+		cursor++;
+		return o;
+	}
+
+	/** When constructing trees, sometimes we need to dup a token or AST
+	 * 	subtree.  Dup'ing a token means just creating another AST node
+	 *  around it.  For trees, you must call the adaptor.dupTree() unless
+	 *  the element is for a tree root; then it must be a node dup.
+	 */
+	protected abstract Object dup(Object el);
+
+	/** Ensure stream emits trees; tokens must be converted to AST nodes.
+	 *  AST nodes can be passed through unmolested.
+	 */
+	protected Object toTree(Object el) {
+		return el;
+	}
+
+	public boolean hasNext() {
+		 return (singleElement != null && cursor < 1) ||
+			   (elements!=null && cursor < elements.size());
+	}
+
+	public int size() {
+		int n = 0;
+		if ( singleElement != null ) {
+			n = 1;
+		}
+		if ( elements!=null ) {
+			return elements.size();
+		}
+		return n;
+	}
+
+	public String getDescription() {
+		return elementDescription;
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleNodeStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleNodeStream.java
new file mode 100644
index 0000000..e41b745
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleNodeStream.java
@@ -0,0 +1,72 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import java.util.List;
+
+/** Queues up nodes matched on left side of -&gt; in a tree parser. This is
+ *  the analog of RewriteRuleTokenStream for normal parsers. 
+ */
+public class RewriteRuleNodeStream extends RewriteRuleElementStream {
+
+	public RewriteRuleNodeStream(TreeAdaptor adaptor, String elementDescription) {
+		super(adaptor, elementDescription);
+	}
+
+	/** Create a stream with one element */
+	public RewriteRuleNodeStream(TreeAdaptor adaptor,
+								 String elementDescription,
+								 Object oneElement)
+	{
+		super(adaptor, elementDescription, oneElement);
+	}
+
+	/** Create a stream, but feed off an existing list */
+	public RewriteRuleNodeStream(TreeAdaptor adaptor,
+								 String elementDescription,
+								 List<Object> elements)
+	{
+		super(adaptor, elementDescription, elements);
+	}
+
+	public Object nextNode() {
+		return _next();
+	}
+
+	@Override
+	protected Object toTree(Object el) {
+		return adaptor.dupNode(el);
+	}
+
+	@Override
+	protected Object dup(Object el) {
+		// we dup every node, so don't have to worry about calling dup; short-
+		// circuited next() so it doesn't call.
+		throw new UnsupportedOperationException("dup can't be called for a node stream.");
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java
new file mode 100644
index 0000000..2c8ac80
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleSubtreeStream.java
@@ -0,0 +1,89 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import java.util.List;
+
+public class RewriteRuleSubtreeStream extends RewriteRuleElementStream {
+
+	public RewriteRuleSubtreeStream(TreeAdaptor adaptor, String elementDescription) {
+		super(adaptor, elementDescription);
+	}
+
+	/** Create a stream with one element */
+	public RewriteRuleSubtreeStream(TreeAdaptor adaptor,
+									String elementDescription,
+									Object oneElement)
+	{
+		super(adaptor, elementDescription, oneElement);
+	}
+
+	/** Create a stream, but feed off an existing list */
+	public RewriteRuleSubtreeStream(TreeAdaptor adaptor,
+									String elementDescription,
+									List<Object> elements)
+	{
+		super(adaptor, elementDescription, elements);
+	}
+
+	/** Treat next element as a single node even if it's a subtree.
+	 *  This is used instead of next() when the result has to be a
+	 *  tree root node.  Also prevents us from duplicating recently-added
+	 *  children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
+	 *  must dup the type node, but ID has been added.
+	 *
+	 *  Referencing a rule result twice is ok; dup entire tree as
+	 *  we can't be adding trees as root; e.g., expr expr.
+	 *
+	 *  Hideous code duplication here with super.next().  Can't think of
+	 *  a proper way to refactor.  This needs to always call dup node
+	 *  and super.next() doesn't know which to call: dup node or dup tree.
+	 */
+	public Object nextNode() {
+		//System.out.println("nextNode: elements="+elements+", singleElement="+((Tree)singleElement).toStringTree());
+		int n = size();
+		if ( dirty || (cursor>=n && n==1) ) {
+			// if out of elements and size is 1, dup (at most a single node
+			// since this is for making root nodes).
+			Object el = _next();
+			return adaptor.dupNode(el);
+		}
+		// test size above then fetch
+		Object tree = _next();
+		while (adaptor.isNil(tree) && adaptor.getChildCount(tree) == 1)
+			tree = adaptor.getChild(tree, 0);
+		//System.out.println("_next="+((Tree)tree).toStringTree());
+		Object el = adaptor.dupNode(tree); // dup just the root (want node here)
+		return el;
+	}
+
+	@Override
+	protected Object dup(Object el) {
+		return adaptor.dupTree(el);
+	}
+}
\ No newline at end of file
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleTokenStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleTokenStream.java
new file mode 100644
index 0000000..41ce04b
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/RewriteRuleTokenStream.java
@@ -0,0 +1,78 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+import java.util.List;
+
+public class RewriteRuleTokenStream extends RewriteRuleElementStream {
+
+	public RewriteRuleTokenStream(TreeAdaptor adaptor, String elementDescription) {
+		super(adaptor, elementDescription);
+	}
+
+	/** Create a stream with one element */
+	public RewriteRuleTokenStream(TreeAdaptor adaptor,
+								  String elementDescription,
+								  Object oneElement)
+	{
+		super(adaptor, elementDescription, oneElement);
+	}
+
+	/** Create a stream, but feed off an existing list */
+	public RewriteRuleTokenStream(TreeAdaptor adaptor,
+								  String elementDescription,
+								  List<Object> elements)
+	{
+		super(adaptor, elementDescription, elements);
+	}
+
+	/** Get next token from stream and make a node for it */
+	public Object nextNode() {
+		Token t = (Token)_next();
+		return adaptor.create(t);
+	}
+
+	public Token nextToken() {
+		return (Token)_next();
+	}
+
+	/** Don't convert to a tree unless they explicitly call nextTree.
+	 *  This way we can do hetero tree nodes in rewrite.
+	 */
+	@Override
+	protected Object toTree(Object el) {
+		return el;
+	}
+
+	@Override
+	protected Object dup(Object el) {
+		throw new UnsupportedOperationException("dup can't be called for a token stream.");
+	}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/Tree.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/Tree.java
new file mode 100644
index 0000000..a79283d
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/Tree.java
@@ -0,0 +1,128 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+import java.util.List;
+
+/** What does a tree look like?  ANTLR has a number of support classes
+ *  such as CommonTreeNodeStream that work on these kinds of trees.  You
+ *  don't have to make your trees implement this interface, but if you do,
+ *  you'll be able to use more support code.
+ *
+ *  NOTE: When constructing trees, ANTLR can build any kind of tree; it can
+ *  even use Token objects as trees if you add a child list to your tokens.
+ *
+ *  This is a tree node without any payload; just navigation and factory stuff.
+ */
+public interface Tree {
+	public static final Tree INVALID_NODE = new CommonTree(Token.INVALID_TOKEN);
+
+	Tree getChild(int i);
+
+	int getChildCount();
+
+	// Tree tracks parent and child index now > 3.0
+
+	public Tree getParent();
+
+	public void setParent(Tree t);
+
+    /** Is there is a node above with token type ttype? */
+    public boolean hasAncestor(int ttype);
+
+    /** Walk upwards and get first ancestor with this token type. */
+    public Tree getAncestor(int ttype);
+
+    /** Return a list of all ancestors of this node.  The first node of
+     *  list is the root and the last is the parent of this node.
+     */
+    public List<?> getAncestors();
+
+    /** This node is what child index? 0..n-1 */
+	public int getChildIndex();
+
+	public void setChildIndex(int index);
+
+	/** Set the parent and child index values for all children */
+	public void freshenParentAndChildIndexes();
+
+	/** Add t as a child to this node.  If t is null, do nothing.  If t
+	 *  is nil, add all children of t to this' children.
+	 */
+	void addChild(Tree t);
+
+	/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
+	public void setChild(int i, Tree t);
+
+	public Object deleteChild(int i);
+
+	/** Delete children from start to stop and replace with t even if t is
+	 *  a list (nil-root tree).  num of children can increase or decrease.
+	 *  For huge child lists, inserting children can force walking rest of
+	 *  children to set their childindex; could be slow.
+	 */
+	public void replaceChildren(int startChildIndex, int stopChildIndex, Object t);	
+
+	/** Indicates the node is a nil node but may still have children, meaning
+	 *  the tree is a flat list.
+	 */
+	boolean isNil();
+
+	/**  What is the smallest token index (indexing from 0) for this node
+	 *   and its children?
+	 */
+	int getTokenStartIndex();
+
+	void setTokenStartIndex(int index);
+
+	/**  What is the largest token index (indexing from 0) for this node
+	 *   and its children?
+	 */
+	int getTokenStopIndex();
+
+	void setTokenStopIndex(int index);
+
+	Tree dupNode();
+
+	/** Return a token type; needed for tree parsing */
+	int getType();
+
+	String getText();
+
+	/** In case we don't have a token payload, what is the line for errors? */
+	int getLine();
+
+	int getCharPositionInLine();
+
+	String toStringTree();
+
+	@Override
+	String toString();
+}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeAdaptor.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeAdaptor.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeAdaptor.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreeAdaptor.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeFilter.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeFilter.java
new file mode 100644
index 0000000..4699610
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeFilter.java
@@ -0,0 +1,139 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.RecognizerSharedState;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.TokenStream;
+
+/**
+ Cut-n-paste from material I'm not using in the book anymore (edit later
+ to make sense):
+
+ Now, how are we going to test these tree patterns against every
+subtree in our original tree?  In what order should we visit nodes?
+For this application, it turns out we need a simple ``apply once''
+rule application strategy and a ``down then up'' tree traversal
+strategy.  Let's look at rule application first.
+
+As we visit each node, we need to see if any of our patterns match. If
+a pattern matches, we execute the associated tree rewrite and move on
+to the next node. In other words, we only look for a single rule
+application opportunity (we'll see below that we sometimes need to
+repeatedly apply rules). The following method applies a rule in a @cl
+TreeParser (derived from a tree grammar) to a tree:
+
+here is where weReferenced code/walking/patterns/TreePatternMatcher.java
+
+It uses reflection to lookup the appropriate rule within the generated
+tree parser class (@cl Simplify in this case). Most of the time, the
+rule will not match the tree.  To avoid issuing syntax errors and
+attempting error recovery, it bumps up the backtracking level.  Upon
+failure, the invoked rule immediately returns. If you don't plan on
+using this technique in your own ANTLR-based application, don't sweat
+the details. This method boils down to ``call a rule to match a tree,
+executing any embedded actions and rewrite rules.''
+
+At this point, we know how to define tree grammar rules and how to
+apply them to a particular subtree. The final piece of the tree
+pattern matcher is the actual tree traversal. We have to get the
+correct node visitation order.  In particular, we need to perform the
+scalar-vector multiply transformation on the way down (preorder) and
+we need to reduce multiply-by-zero subtrees on the way up (postorder).
+
+To implement a top-down visitor, we do a depth first walk of the tree,
+executing an action in the preorder position. To get a bottom-up
+visitor, we execute an action in the postorder position.  ANTLR
+provides a standard @cl TreeVisitor class with a depth first search @v
+visit method. That method executes either a @m pre or @m post method
+or both. In our case, we need to call @m applyOnce in both. On the way
+down, we'll look for @r vmult patterns. On the way up,
+we'll look for @r mult0 patterns.
+ */
+public class TreeFilter extends TreeParser {
+    public interface fptr {
+        public void rule() throws RecognitionException;
+    }
+
+    protected TokenStream originalTokenStream;
+    protected TreeAdaptor originalAdaptor;
+
+    public TreeFilter(TreeNodeStream input) {
+        this(input, new RecognizerSharedState());
+    }
+    public TreeFilter(TreeNodeStream input, RecognizerSharedState state) {
+        super(input, state);
+        originalAdaptor = input.getTreeAdaptor();
+        originalTokenStream = input.getTokenStream();
+    }
+
+    public void applyOnce(Object t, fptr whichRule) {
+        if ( t==null ) return;
+        try {
+            // share TreeParser object but not parsing-related state
+            state = new RecognizerSharedState();
+            input = new CommonTreeNodeStream(originalAdaptor, t);
+            ((CommonTreeNodeStream)input).setTokenStream(originalTokenStream);
+            setBacktrackingLevel(1);
+            whichRule.rule();
+            setBacktrackingLevel(0);
+        }
+        catch (RecognitionException e) { ; }
+    }
+
+    public void downup(Object t) {
+        TreeVisitor v = new TreeVisitor(new CommonTreeAdaptor());
+        TreeVisitorAction actions = new TreeVisitorAction() {
+			@Override
+            public Object pre(Object t)  { applyOnce(t, topdown_fptr); return t; }
+			@Override
+            public Object post(Object t) { applyOnce(t, bottomup_fptr); return t; }
+        };
+        v.visit(t, actions);
+    }
+        
+    fptr topdown_fptr = new fptr() {
+		@Override
+        public void rule() throws RecognitionException {
+            topdown();
+        }
+    };
+
+    fptr bottomup_fptr = new fptr() {
+		@Override
+        public void rule() throws RecognitionException {
+            bottomup();
+        }
+    };
+
+    // methods the downup strategy uses to do the up and down rules.
+    // to override, just define tree grammar rule topdown and turn on
+    // filter=true.
+    public void topdown() throws RecognitionException {;}
+    public void bottomup() throws RecognitionException {;}
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeIterator.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeIterator.java
new file mode 100644
index 0000000..b1d891e
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeIterator.java
@@ -0,0 +1,134 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+import org.antlr.runtime.misc.FastQueue;
+
+import java.util.Iterator;
+
+/** Return a node stream from a doubly-linked tree whose nodes
+ *  know what child index they are.  No remove() is supported.
+ *
+ *  Emit navigation nodes (DOWN, UP, and EOF) to let show tree structure.
+ */
+public class TreeIterator implements Iterator<Object> {
+    protected TreeAdaptor adaptor;
+    protected Object root;
+    protected Object tree;
+    protected boolean firstTime = true;
+
+    // navigation nodes to return during walk and at end
+    public Object up;
+    public Object down;
+    public Object eof;
+
+    /** If we emit UP/DOWN nodes, we need to spit out multiple nodes per
+     *  next() call.
+     */
+    protected FastQueue<Object> nodes;
+
+    public TreeIterator(Object tree) {
+        this(new CommonTreeAdaptor(),tree);
+    }
+
+    public TreeIterator(TreeAdaptor adaptor, Object tree) {
+        this.adaptor = adaptor;
+        this.tree = tree;
+        this.root = tree;
+        nodes = new FastQueue<Object>();
+        down = adaptor.create(Token.DOWN, "DOWN");
+        up = adaptor.create(Token.UP, "UP");
+        eof = adaptor.create(Token.EOF, "EOF");
+    }
+
+    public void reset() {
+        firstTime = true;
+        tree = root;
+        nodes.clear();
+    }
+
+	@Override
+    public boolean hasNext() {
+        if ( firstTime ) return root!=null;
+        if ( nodes!=null && nodes.size()>0 ) return true;
+        if ( tree==null ) return false;
+        if ( adaptor.getChildCount(tree)>0 ) return true;
+        return adaptor.getParent(tree)!=null; // back at root?
+    }
+
+	@Override
+    public Object next() {
+        if ( firstTime ) { // initial condition
+            firstTime = false;
+            if ( adaptor.getChildCount(tree)==0 ) { // single node tree (special)
+                nodes.add(eof);
+                return tree;
+            }
+            return tree;
+        }
+        // if any queued up, use those first
+        if ( nodes!=null && nodes.size()>0 ) return nodes.remove();
+
+        // no nodes left?
+        if ( tree==null ) return eof;
+
+        // next node will be child 0 if any children
+        if ( adaptor.getChildCount(tree)>0 ) {
+            tree = adaptor.getChild(tree, 0);
+            nodes.add(tree); // real node is next after DOWN
+            return down;
+        }
+        // if no children, look for next sibling of tree or ancestor
+        Object parent = adaptor.getParent(tree);
+        // while we're out of siblings, keep popping back up towards root
+        while ( parent!=null &&
+                adaptor.getChildIndex(tree)+1 >= adaptor.getChildCount(parent) )
+        {
+            nodes.add(up); // we're moving back up
+            tree = parent;
+            parent = adaptor.getParent(tree);
+        }
+        // no nodes left?
+        if ( parent==null ) {
+            tree = null; // back at root? nothing left then
+            nodes.add(eof); // add to queue, might have UP nodes in there
+            return nodes.remove();
+        }
+
+        // must have found a node with an unvisited sibling
+        // move to it and return it
+        int nextSiblingIndex = adaptor.getChildIndex(tree) + 1;
+        tree = adaptor.getChild(parent, nextSiblingIndex);
+        nodes.add(tree); // add to queue, might have UP nodes in there
+        return nodes.remove();
+    }
+
+	@Override
+    public void remove() { throw new UnsupportedOperationException(); }
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeNodeStream.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeNodeStream.java
new file mode 100644
index 0000000..4e8cb55
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeNodeStream.java
@@ -0,0 +1,113 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.IntStream;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenStream;
+
+/** A stream of tree nodes, accessing nodes from a tree of some kind */
+public interface TreeNodeStream extends IntStream {
+	/** Get a tree node at an absolute index {@code i}; 0..n-1.
+	 *  If you don't want to buffer up nodes, then this method makes no
+	 *  sense for you.
+	 */
+	public Object get(int i);
+
+	/**
+	 * Get tree node at current input pointer + {@code k} ahead where
+	 * {@code k==1} is next node. {@code k<0} indicates nodes in the past. So
+	 * {@code LT(-1)} is previous node, but implementations are not required to
+	 * provide results for {@code k < -1}. {@code LT(0)} is undefined. For
+	 * {@code k<=n}, return {@code null}. Return {@code null} for {@code LT(0)}
+	 * and any index that results in an absolute address that is negative.
+	 * <p>
+	 * This is analogous to {@link TokenStream#LT}, but this returns a tree node
+	 * instead of a {@link Token}. Makes code generation identical for both
+	 * parser and tree grammars.</p>
+	 */
+	public Object LT(int k);
+
+	/** Where is this stream pulling nodes from?  This is not the name, but
+	 *  the object that provides node objects.
+	 */
+	public Object getTreeSource();
+
+	/**
+	 * If the tree associated with this stream was created from a
+	 * {@link TokenStream}, you can specify it here. Used to do rule
+	 * {@code $text} attribute in tree parser. Optional unless you use tree
+	 * parser rule {@code $text} attribute or {@code output=template} and
+	 * {@code rewrite=true} options.
+	 */
+	public TokenStream getTokenStream();
+
+	/** What adaptor can tell me how to interpret/navigate nodes and
+	 *  trees.  E.g., get text of a node.
+	 */
+	public TreeAdaptor getTreeAdaptor();
+
+	/**
+	 * As we flatten the tree, we use {@link Token#UP}, {@link Token#DOWN} nodes
+	 * to represent the tree structure. When debugging we need unique nodes so
+	 * we have to instantiate new ones. When doing normal tree parsing, it's
+	 * slow and a waste of memory to create unique navigation nodes. Default
+	 * should be {@code false}.
+	 */
+	public void setUniqueNavigationNodes(boolean uniqueNavigationNodes);
+
+    /** Reset the tree node stream in such a way that it acts like
+     *  a freshly constructed stream.
+     */
+    public void reset();
+
+	/**
+	 * Return the text of all nodes from {@code start} to {@code stop},
+	 * inclusive. If the stream does not buffer all the nodes then it can still
+	 * walk recursively from start until stop. You can always return
+	 * {@code null} or {@code ""} too, but users should not access
+	 * {@code $ruleLabel.text} in an action of course in that case.
+	 */
+	public String toString(Object start, Object stop);
+
+	// REWRITING TREES (used by tree parser)
+
+	/**
+	 * Replace children of {@code parent} from index {@code startChildIndex} to
+	 * {@code stopChildIndex} with {@code t}, which might be a list. Number of
+	 * children may be different after this call. The stream is notified because
+	 * it is walking the tree and might need to know you are monkeying with the
+	 * underlying tree. Also, it might be able to modify the node stream to
+	 * avoid restreaming for future phases.
+	 * <p>
+	 * If {@code parent} is {@code null}, don't do anything; must be at root of
+	 * overall tree. Can't replace whatever points to the parent externally. Do
+	 * nothing.</p>
+	 */
+	public void replaceChildren(Object parent, int startChildIndex, int stopChildIndex, Object t);
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeParser.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeParser.java
new file mode 100644
index 0000000..0eaf954
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeParser.java
@@ -0,0 +1,246 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.*;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/** A parser for a stream of tree nodes.  "tree grammars" result in a subclass
+ *  of this.  All the error reporting and recovery is shared with Parser via
+ *  the BaseRecognizer superclass.
+*/
+public class TreeParser extends BaseRecognizer {
+	public static final int DOWN = Token.DOWN;
+	public static final int UP = Token.UP;
+
+    // precompiled regex used by inContext
+    static String dotdot = ".*[^.]\\.\\.[^.].*";
+    static String doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*";
+    static Pattern dotdotPattern = Pattern.compile(dotdot);
+    static Pattern doubleEtcPattern = Pattern.compile(doubleEtc);
+
+	protected TreeNodeStream input;
+
+	public TreeParser(TreeNodeStream input) {
+		super(); // highlight that we go to super to set state object
+		setTreeNodeStream(input);
+	}
+
+	public TreeParser(TreeNodeStream input, RecognizerSharedState state) {
+		super(state); // share the state object with another parser
+		setTreeNodeStream(input);
+    }
+
+	@Override
+	public void reset() {
+		super.reset(); // reset all recognizer state variables
+		if ( input!=null ) {
+			input.seek(0); // rewind the input
+		}
+	}
+
+	/** Set the input stream */
+	public void setTreeNodeStream(TreeNodeStream input) {
+		this.input = input;
+	}
+
+	public TreeNodeStream getTreeNodeStream() {
+		return input;
+	}
+
+	@Override
+	public String getSourceName() {
+		return input.getSourceName();
+	}
+
+	@Override
+	protected Object getCurrentInputSymbol(IntStream input) {
+		return ((TreeNodeStream)input).LT(1);
+	}
+
+	@Override
+	protected Object getMissingSymbol(IntStream input,
+									  RecognitionException e,
+									  int expectedTokenType,
+									  BitSet follow)
+	{
+		String tokenText =
+			"<missing "+getTokenNames()[expectedTokenType]+">";
+        TreeAdaptor adaptor = ((TreeNodeStream)e.input).getTreeAdaptor();
+        return adaptor.create(new CommonToken(expectedTokenType, tokenText));
+	}
+
+    /** Match '.' in tree parser has special meaning.  Skip node or
+	 *  entire tree if node has children.  If children, scan until
+	 *  corresponding UP node.
+	 */
+	@Override
+	public void matchAny(IntStream ignore) { // ignore stream, copy of input
+		state.errorRecovery = false;
+		state.failed = false;
+		Object look = input.LT(1);
+		if ( input.getTreeAdaptor().getChildCount(look)==0 ) {
+			input.consume(); // not subtree, consume 1 node and return
+			return;
+		}
+		// current node is a subtree, skip to corresponding UP.
+		// must count nesting level to get right UP
+		int level=0;
+		int tokenType = input.getTreeAdaptor().getType(look);
+		while ( tokenType!=Token.EOF && !(tokenType==UP && level==0) ) {
+			input.consume();
+			look = input.LT(1);
+			tokenType = input.getTreeAdaptor().getType(look);
+			if ( tokenType == DOWN ) {
+				level++;
+			}
+			else if ( tokenType == UP ) {
+				level--;
+			}
+		}
+		input.consume(); // consume UP
+	}
+
+    /** We have DOWN/UP nodes in the stream that have no line info; override.
+	 *  plus we want to alter the exception type.  Don't try to recover
+	 *  from tree parser errors inline...
+     */
+	@Override
+    protected Object recoverFromMismatchedToken(IntStream input,
+                                                int ttype,
+                                                BitSet follow)
+        throws RecognitionException
+    {
+        throw new MismatchedTreeNodeException(ttype, (TreeNodeStream)input);
+    }
+
+    /** Prefix error message with the grammar name because message is
+	 *  always intended for the programmer because the parser built
+	 *  the input tree not the user.
+	 */
+	@Override
+	public String getErrorHeader(RecognitionException e) {
+		return getGrammarFileName()+": node from "+
+			   (e.approximateLineInfo?"after ":"")+"line "+e.line+":"+e.charPositionInLine;
+	}
+
+	/** Tree parsers parse nodes they usually have a token object as
+	 *  payload. Set the exception token and do the default behavior.
+	 */
+	@Override
+	public String getErrorMessage(RecognitionException e, String[] tokenNames) {
+		if ( this instanceof TreeParser ) {
+			TreeAdaptor adaptor = ((TreeNodeStream)e.input).getTreeAdaptor();
+			e.token = adaptor.getToken(e.node);
+			if ( e.token==null ) { // could be an UP/DOWN node
+				e.token = new CommonToken(adaptor.getType(e.node),
+										  adaptor.getText(e.node));
+			}
+		}
+		return super.getErrorMessage(e, tokenNames);
+	}
+
+	/** Check if current node in input has a context.  Context means sequence
+	 *  of nodes towards root of tree.  For example, you might say context
+	 *  is "MULT" which means my parent must be MULT.  "CLASS VARDEF" says
+	 *  current node must be child of a VARDEF and whose parent is a CLASS node.
+	 *  You can use "..." to mean zero-or-more nodes.  "METHOD ... VARDEF"
+	 *  means my parent is VARDEF and somewhere above that is a METHOD node.
+	 *  The first node in the context is not necessarily the root.  The context
+	 *  matcher stops matching and returns true when it runs out of context.
+	 *  There is no way to force the first node to be the root.
+	 */
+	public boolean inContext(String context) {
+		return inContext(input.getTreeAdaptor(), getTokenNames(), input.LT(1), context);
+	}
+
+	/** The worker for inContext.  It's static and full of parameters for
+	 *  testing purposes.
+	 */
+	public static boolean inContext(TreeAdaptor adaptor,
+									String[] tokenNames,
+									Object t,
+									String context)
+	{
+		Matcher dotdotMatcher = dotdotPattern.matcher(context);
+		Matcher doubleEtcMatcher = doubleEtcPattern.matcher(context);
+		if ( dotdotMatcher.find() ) { // don't allow "..", must be "..."
+			throw new IllegalArgumentException("invalid syntax: ..");
+		}
+		if ( doubleEtcMatcher.find() ) { // don't allow double "..."
+			throw new IllegalArgumentException("invalid syntax: ... ...");
+		}
+		context = context.replaceAll("\\.\\.\\.", " ... "); // ensure spaces around ...
+		context = context.trim();
+		String[] nodes = context.split("\\s+");
+		int ni = nodes.length-1;
+		t = adaptor.getParent(t);
+		while ( ni>=0 && t!=null ) {
+			if ( nodes[ni].equals("...") ) {
+				// walk upwards until we see nodes[ni-1] then continue walking
+				if ( ni==0 ) return true; // ... at start is no-op
+				String goal = nodes[ni-1];
+				Object ancestor = getAncestor(adaptor, tokenNames, t, goal);
+				if ( ancestor==null ) return false;
+				t = ancestor;
+				ni--;
+			}
+			String name = tokenNames[adaptor.getType(t)];
+			if ( !name.equals(nodes[ni]) ) {
+				//System.err.println("not matched: "+nodes[ni]+" at "+t);
+				return false;
+			}
+			// advance to parent and to previous element in context node list
+			ni--;
+			t = adaptor.getParent(t);
+		}
+
+		if ( t==null && ni>=0 ) return false; // at root but more nodes to match
+		return true;
+	}
+
+	/** Helper for static inContext */
+	protected static Object getAncestor(TreeAdaptor adaptor, String[] tokenNames, Object t, String goal) {
+		while ( t!=null ) {
+			String name = tokenNames[adaptor.getType(t)];
+			if ( name.equals(goal) ) return t;
+			t = adaptor.getParent(t);
+		}
+		return null;
+	}
+
+	public void traceIn(String ruleName, int ruleIndex)  {
+		super.traceIn(ruleName, ruleIndex, input.LT(1));
+	}
+
+	public void traceOut(String ruleName, int ruleIndex)  {
+		super.traceOut(ruleName, ruleIndex, input.LT(1));
+	}
+}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternLexer.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternLexer.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternLexer.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternLexer.java
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternParser.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternParser.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternParser.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreePatternParser.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRewriter.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRewriter.java
new file mode 100644
index 0000000..f77bf23
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRewriter.java
@@ -0,0 +1,124 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.RecognizerSharedState;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.TokenStream;
+
+public class TreeRewriter extends TreeParser {
+    public interface fptr {
+        public Object rule() throws RecognitionException;
+    }
+
+    protected boolean showTransformations = false;
+
+    protected TokenStream originalTokenStream;
+    protected TreeAdaptor originalAdaptor;
+    
+    public TreeRewriter(TreeNodeStream input) {
+        this(input, new RecognizerSharedState());
+    }
+    public TreeRewriter(TreeNodeStream input, RecognizerSharedState state) {
+        super(input, state);
+        originalAdaptor = input.getTreeAdaptor();
+        originalTokenStream = input.getTokenStream();        
+    }
+
+    public Object applyOnce(Object t, fptr whichRule) {
+        if ( t==null ) return null;
+        try {
+            // share TreeParser object but not parsing-related state
+            state = new RecognizerSharedState();
+            input = new CommonTreeNodeStream(originalAdaptor, t);
+            ((CommonTreeNodeStream)input).setTokenStream(originalTokenStream);
+            setBacktrackingLevel(1);
+            TreeRuleReturnScope r = (TreeRuleReturnScope)whichRule.rule();
+            setBacktrackingLevel(0);
+            if ( failed() ) return t;
+            if ( showTransformations &&
+                 r!=null && !t.equals(r.getTree()) && r.getTree()!=null )
+            {
+                reportTransformation(t, r.getTree());
+            }
+            if ( r!=null && r.getTree()!=null ) return r.getTree();
+            else return t;
+        }
+        catch (RecognitionException e) { ; }
+        return t;
+    }
+
+    public Object applyRepeatedly(Object t, fptr whichRule) {
+        boolean treeChanged = true;
+        while ( treeChanged ) {
+            Object u = applyOnce(t, whichRule);
+            treeChanged = !t.equals(u);
+            t = u;
+        }
+        return t;
+    }
+
+    public Object downup(Object t) { return downup(t, false); }
+
+    public Object downup(Object t, boolean showTransformations) {
+        this.showTransformations = showTransformations;
+        TreeVisitor v = new TreeVisitor(new CommonTreeAdaptor());
+        TreeVisitorAction actions = new TreeVisitorAction() {
+			@Override
+            public Object pre(Object t)  { return applyOnce(t, topdown_fptr); }
+			@Override
+            public Object post(Object t) { return applyRepeatedly(t, bottomup_ftpr); }
+        };
+        t = v.visit(t, actions);
+        return t;
+    }
+
+    /** Override this if you need transformation tracing to go somewhere
+     *  other than stdout or if you're not using Tree-derived trees.
+     */
+    public void reportTransformation(Object oldTree, Object newTree) {
+        System.out.println(((Tree)oldTree).toStringTree()+" -> "+
+                           ((Tree)newTree).toStringTree());
+    }
+
+    fptr topdown_fptr = new fptr() {
+		@Override
+        public Object rule() throws RecognitionException { return topdown(); }
+    };
+    
+    fptr bottomup_ftpr = new fptr() {
+		@Override
+        public Object rule() throws RecognitionException { return bottomup(); }
+    };
+
+    // methods the downup strategy uses to do the up and down rules.
+    // to override, just define tree grammar rule topdown and turn on
+    // filter=true.
+    public Object topdown() throws RecognitionException { return null; }
+    public Object bottomup() throws RecognitionException { return null; }
+}
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRuleReturnScope.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRuleReturnScope.java
new file mode 100644
index 0000000..ae8b3c2
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeRuleReturnScope.java
@@ -0,0 +1,44 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.ParserRuleReturnScope;
+import org.antlr.runtime.RuleReturnScope;
+import org.antlr.runtime.Token;
+
+/** This is identical to the {@link ParserRuleReturnScope} except that
+ *  the start property is a tree nodes not {@link Token} object
+ *  when you are parsing trees.  To be generic the tree node types
+ *  have to be {@link Object}.
+ */
+public class TreeRuleReturnScope extends RuleReturnScope {
+	/** First node or root node of tree matched for this rule. */
+	public Object start;
+	@Override
+	public Object getStart() { return start; }	
+}
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitor.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitor.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitor.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitor.java
diff --git a/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitorAction.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitorAction.java
similarity index 100%
rename from antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitorAction.java
rename to runtime/Java/src/main/java/org/antlr/runtime/tree/TreeVisitorAction.java
diff --git a/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeWizard.java b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeWizard.java
new file mode 100644
index 0000000..a89112d
--- /dev/null
+++ b/runtime/Java/src/main/java/org/antlr/runtime/tree/TreeWizard.java
@@ -0,0 +1,537 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.Token;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/** Build and navigate trees with this object.  Must know about the names
+ *  of tokens so you have to pass in a map or array of token names (from which
+ *  this class can build the map).  I.e., Token DECL means nothing unless the
+ *  class can translate it to a token type.
+ *
+ *  In order to create nodes and navigate, this class needs a TreeAdaptor.
+ *
+ *  This class can build a token type &rarr; node index for repeated use or for
+ *  iterating over the various nodes with a particular type.
+ *
+ *  This class works in conjunction with the TreeAdaptor rather than moving
+ *  all this functionality into the adaptor.  An adaptor helps build and
+ *  navigate trees using methods.  This class helps you do it with string
+ *  patterns like "(A B C)".  You can create a tree from that pattern or
+ *  match subtrees against it.
+ */
+public class TreeWizard {
+	protected TreeAdaptor adaptor;
+	protected Map<String, Integer> tokenNameToTypeMap;
+
+	public interface ContextVisitor {
+		// TODO: should this be called visit or something else?
+		public void visit(Object t, Object parent, int childIndex, Map<String, Object> labels);
+	}
+
+	public static abstract class Visitor implements ContextVisitor {
+		@Override
+		public void visit(Object t, Object parent, int childIndex, Map<String, Object> labels) {
+			visit(t);
+		}
+		public abstract void visit(Object t);
+	}
+
+	/** When using %label:TOKENNAME in a tree for parse(), we must
+	 *  track the label.
+	 */
+	public static class TreePattern extends CommonTree {
+		public String label;
+		public boolean hasTextArg;
+		public TreePattern(Token payload) {
+			super(payload);
+		}
+		@Override
+		public String toString() {
+			if ( label!=null ) {
+				return "%"+label+":"+super.toString();
+			}
+			else {
+				return super.toString();				
+			}
+		}
+	}
+
+	public static class WildcardTreePattern extends TreePattern {
+		public WildcardTreePattern(Token payload) {
+			super(payload);
+		}
+	}
+
+	/** This adaptor creates TreePattern objects for use during scan() */
+	public static class TreePatternTreeAdaptor extends CommonTreeAdaptor {
+		@Override
+		public Object create(Token payload) {
+			return new TreePattern(payload);
+		}
+	}
+
+	// TODO: build indexes for the wizard
+
+	/** During fillBuffer(), we can make a reverse index from a set
+	 *  of token types of interest to the list of indexes into the
+	 *  node stream.  This lets us convert a node pointer to a
+	 *  stream index semi-efficiently for a list of interesting
+	 *  nodes such as function definition nodes (you'll want to seek
+	 *  to their bodies for an interpreter).  Also useful for doing
+	 *  dynamic searches; i.e., go find me all PLUS nodes.
+	protected Map tokenTypeToStreamIndexesMap;
+
+	/** If tokenTypesToReverseIndex set to INDEX_ALL then indexing
+	 *  occurs for all token types.
+	public static final Set INDEX_ALL = new HashSet();
+
+	/** A set of token types user would like to index for faster lookup.
+	 *  If this is INDEX_ALL, then all token types are tracked.  If null,
+	 *  then none are indexed.
+	protected Set tokenTypesToReverseIndex = null;
+	*/
+
+	public TreeWizard(TreeAdaptor adaptor) {
+		this.adaptor = adaptor;
+	}
+
+	public TreeWizard(TreeAdaptor adaptor, Map<String, Integer> tokenNameToTypeMap) {
+		this.adaptor = adaptor;
+		this.tokenNameToTypeMap = tokenNameToTypeMap;
+	}
+
+	public TreeWizard(TreeAdaptor adaptor, String[] tokenNames) {
+		this.adaptor = adaptor;
+		this.tokenNameToTypeMap = computeTokenTypes(tokenNames);
+	}
+
+	public TreeWizard(String[] tokenNames) {
+		this(new CommonTreeAdaptor(), tokenNames);
+	}
+
+	/** Compute a Map&lt;String, Integer&gt; that is an inverted index of
+	 *  tokenNames (which maps int token types to names).
+	 */
+	public Map<String, Integer> computeTokenTypes(String[] tokenNames) {
+		Map<String, Integer> m = new HashMap<String, Integer>();
+		if ( tokenNames==null ) {
+			return m;
+		}
+		for (int ttype = Token.MIN_TOKEN_TYPE; ttype < tokenNames.length; ttype++) {
+			String name = tokenNames[ttype];
+			m.put(name, ttype);
+		}
+		return m;
+	}
+
+	/** Using the map of token names to token types, return the type. */
+	public int getTokenType(String tokenName) {
+	 	if ( tokenNameToTypeMap==null ) {
+			 return Token.INVALID_TOKEN_TYPE;
+		 }
+		Integer ttypeI = tokenNameToTypeMap.get(tokenName);
+		if ( ttypeI!=null ) {
+			return ttypeI;
+		}
+		return Token.INVALID_TOKEN_TYPE;
+	}
+
+	/** Walk the entire tree and make a node name to nodes mapping.
+	 *  For now, use recursion but later nonrecursive version may be
+	 *  more efficient.  Returns Map&lt;Integer, List&gt; where the List is
+	 *  of your AST node type.  The Integer is the token type of the node.
+	 *
+	 *  TODO: save this index so that find and visit are faster
+	 */
+	public Map<Integer, List<Object>> index(Object t) {
+		Map<Integer, List<Object>> m = new HashMap<Integer, List<Object>>();
+		_index(t, m);
+		return m;
+	}
+
+	/** Do the work for index */
+	protected void _index(Object t, Map<Integer, List<Object>> m) {
+		if ( t==null ) {
+			return;
+		}
+		int ttype = adaptor.getType(t);
+		List<Object> elements = m.get(ttype);
+		if ( elements==null ) {
+			elements = new ArrayList<Object>();
+			m.put(ttype, elements);
+		}
+		elements.add(t);
+		int n = adaptor.getChildCount(t);
+		for (int i=0; i<n; i++) {
+			Object child = adaptor.getChild(t, i);
+			_index(child, m);
+		}
+	}
+
+	/** Return a List of tree nodes with token type ttype */
+	public List<? extends Object> find(Object t, int ttype) {
+		final List<Object> nodes = new ArrayList<Object>();
+		visit(t, ttype, new TreeWizard.Visitor() {
+			@Override
+			public void visit(Object t) {
+				nodes.add(t);
+			}
+		});
+		return nodes;
+	}
+
+	/** Return a List of subtrees matching pattern. */
+	public List<? extends Object> find(Object t, String pattern) {
+		final List<Object> subtrees = new ArrayList<Object>();
+		// Create a TreePattern from the pattern
+		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
+		TreePatternParser parser =
+			new TreePatternParser(tokenizer, this, new TreePatternTreeAdaptor());
+		final TreePattern tpattern = (TreePattern)parser.pattern();
+		// don't allow invalid patterns
+		if ( tpattern==null ||
+			 tpattern.isNil() ||
+			 tpattern.getClass()==WildcardTreePattern.class )
+		{
+			return null;
+		}
+		int rootTokenType = tpattern.getType();
+		visit(t, rootTokenType, new TreeWizard.ContextVisitor() {
+			@Override
+			public void visit(Object t, Object parent, int childIndex, Map labels) {
+				if ( _parse(t, tpattern, null) ) {
+					subtrees.add(t);
+				}
+			}
+		});
+		return subtrees;
+	}
+
+	public Object findFirst(Object t, int ttype) {
+		return null;
+	}
+
+	public Object findFirst(Object t, String pattern) {
+		return null;
+	}
+
+	/** Visit every ttype node in t, invoking the visitor.  This is a quicker
+	 *  version of the general visit(t, pattern) method.  The labels arg
+	 *  of the visitor action method is never set (it's null) since using
+	 *  a token type rather than a pattern doesn't let us set a label.
+	 */
+	public void visit(Object t, int ttype, ContextVisitor visitor) {
+		_visit(t, null, 0, ttype, visitor);
+	}
+
+	/** Do the recursive work for visit */
+	protected void _visit(Object t, Object parent, int childIndex, int ttype, ContextVisitor visitor) {
+		if ( t==null ) {
+			return;
+		}
+		if ( adaptor.getType(t)==ttype ) {
+			visitor.visit(t, parent, childIndex, null);
+		}
+		int n = adaptor.getChildCount(t);
+		for (int i=0; i<n; i++) {
+			Object child = adaptor.getChild(t, i);
+			_visit(child, t, i, ttype, visitor);
+		}
+	}
+
+	/** For all subtrees that match the pattern, execute the visit action.
+	 *  The implementation uses the root node of the pattern in combination
+	 *  with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
+	 *  Patterns with wildcard roots are also not allowed.
+	 */
+	public void visit(Object t, final String pattern, final ContextVisitor visitor) {
+		// Create a TreePattern from the pattern
+		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
+		TreePatternParser parser =
+			new TreePatternParser(tokenizer, this, new TreePatternTreeAdaptor());
+		final TreePattern tpattern = (TreePattern)parser.pattern();
+		// don't allow invalid patterns
+		if ( tpattern==null ||
+			 tpattern.isNil() ||
+			 tpattern.getClass()==WildcardTreePattern.class )
+		{
+			return;
+		}
+		final Map<String, Object> labels = new HashMap<String, Object>(); // reused for each _parse
+		int rootTokenType = tpattern.getType();
+		visit(t, rootTokenType, new TreeWizard.ContextVisitor() {
+			@Override
+			public void visit(Object t, Object parent, int childIndex, Map<String, Object> unusedlabels) {
+				// the unusedlabels arg is null as visit on token type doesn't set.
+				labels.clear();
+				if ( _parse(t, tpattern, labels) ) {
+					visitor.visit(t, parent, childIndex, labels);
+				}
+			}
+		});
+	}
+
+	/** Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
+	 *  on the various nodes and '.' (dot) as the node/subtree wildcard,
+	 *  return true if the pattern matches and fill the labels Map with
+	 *  the labels pointing at the appropriate nodes.  Return false if
+	 *  the pattern is malformed or the tree does not match.
+	 *
+	 *  If a node specifies a text arg in pattern, then that must match
+	 *  for that node in t.
+	 *
+	 *  TODO: what's a better way to indicate bad pattern? Exceptions are a hassle 
+	 */
+	public boolean parse(Object t, String pattern, Map<String, Object> labels) {
+		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
+		TreePatternParser parser =
+			new TreePatternParser(tokenizer, this, new TreePatternTreeAdaptor());
+		TreePattern tpattern = (TreePattern)parser.pattern();
+		/*
+		System.out.println("t="+((Tree)t).toStringTree());
+		System.out.println("scant="+tpattern.toStringTree());
+		*/
+		boolean matched = _parse(t, tpattern, labels);
+		return matched;
+	}
+
+	public boolean parse(Object t, String pattern) {
+		return parse(t, pattern, null);
+	}
+
+	/** Do the work for parse. Check to see if the t2 pattern fits the
+	 *  structure and token types in t1.  Check text if the pattern has
+	 *  text arguments on nodes.  Fill labels map with pointers to nodes
+	 *  in tree matched against nodes in pattern with labels.
+	 */
+	protected boolean _parse(Object t1, TreePattern tpattern, Map<String, Object> labels) {
+		// make sure both are non-null
+		if ( t1==null || tpattern==null ) {
+			return false;
+		}
+		// check roots (wildcard matches anything)
+		if ( tpattern.getClass() != WildcardTreePattern.class ) {
+			if ( adaptor.getType(t1) != tpattern.getType() ) return false;
+            // if pattern has text, check node text
+			if ( tpattern.hasTextArg && !adaptor.getText(t1).equals(tpattern.getText()) ) {
+				return false;
+			}
+		}
+		if ( tpattern.label!=null && labels!=null ) {
+			// map label in pattern to node in t1
+			labels.put(tpattern.label, t1);
+		}
+		// check children
+		int n1 = adaptor.getChildCount(t1);
+		int n2 = tpattern.getChildCount();
+		if ( n1 != n2 ) {
+			return false;
+		}
+		for (int i=0; i<n1; i++) {
+			Object child1 = adaptor.getChild(t1, i);
+			TreePattern child2 = (TreePattern)tpattern.getChild(i);
+			if ( !_parse(child1, child2, labels) ) {
+				return false;
+			}
+		}
+		return true;
+	}
+
+	/** Create a tree or node from the indicated tree pattern that closely
+	 *  follows ANTLR tree grammar tree element syntax:
+	 *
+	 * 		(root child1 ... child2).
+	 *
+	 *  You can also just pass in a node: ID
+	 * 
+	 *  Any node can have a text argument: ID[foo]
+	 *  (notice there are no quotes around foo--it's clear it's a string).
+	 *
+	 *  nil is a special name meaning "give me a nil node".  Useful for
+	 *  making lists: (nil A B C) is a list of A B C.
+ 	 */
+	public Object create(String pattern) {
+		TreePatternLexer tokenizer = new TreePatternLexer(pattern);
+		TreePatternParser parser = new TreePatternParser(tokenizer, this, adaptor);
+		Object t = parser.pattern();
+		return t;
+	}
+
+	/** Compare t1 and t2; return true if token types/text, structure match exactly.
+	 *  The trees are examined in their entirety so that (A B) does not match
+	 *  (A B C) nor (A (B C)). 
+	 // TODO: allow them to pass in a comparator
+	 *  TODO: have a version that is nonstatic so it can use instance adaptor
+	 *
+	 *  I cannot rely on the tree node's equals() implementation as I make
+	 *  no constraints at all on the node types nor interface etc... 
+	 */
+	public static boolean equals(Object t1, Object t2, TreeAdaptor adaptor) {
+		return _equals(t1, t2, adaptor);
+	}
+
+	/** Compare type, structure, and text of two trees, assuming adaptor in
+	 *  this instance of a TreeWizard.
+	 */
+	public boolean equals(Object t1, Object t2) {
+		return _equals(t1, t2, adaptor);
+	}
+
+	protected static boolean _equals(Object t1, Object t2, TreeAdaptor adaptor) {
+		// make sure both are non-null
+		if ( t1==null || t2==null ) {
+			return false;
+		}
+		// check roots
+		if ( adaptor.getType(t1) != adaptor.getType(t2) ) {
+			return false;
+		}
+		if ( !adaptor.getText(t1).equals(adaptor.getText(t2)) ) {
+			return false;
+		}
+		// check children
+		int n1 = adaptor.getChildCount(t1);
+		int n2 = adaptor.getChildCount(t2);
+		if ( n1 != n2 ) {
+			return false;
+		}
+		for (int i=0; i<n1; i++) {
+			Object child1 = adaptor.getChild(t1, i);
+			Object child2 = adaptor.getChild(t2, i);
+			if ( !_equals(child1, child2, adaptor) ) {
+				return false;
+			}
+		}
+		return true;
+	}
+
+	// TODO: next stuff taken from CommonTreeNodeStream
+	
+		/** Given a node, add this to the reverse index tokenTypeToStreamIndexesMap.
+	 *  You can override this method to alter how indexing occurs.  The
+	 *  default is to create a
+	 *
+	 *    Map<Integer token type,ArrayList<Integer stream index>>
+	 *
+	 *  This data structure allows you to find all nodes with type INT in order.
+	 *
+	 *  If you really need to find a node of type, say, FUNC quickly then perhaps
+	 *
+	 *    Map<Integertoken type,Map<Object tree node,Integer stream index>>
+	 *
+	 *  would be better for you.  The interior maps map a tree node to
+	 *  the index so you don't have to search linearly for a specific node.
+	 *
+	 *  If you change this method, you will likely need to change
+	 *  getNodeIndex(), which extracts information.
+	protected void fillReverseIndex(Object node, int streamIndex) {
+		//System.out.println("revIndex "+node+"@"+streamIndex);
+		if ( tokenTypesToReverseIndex==null ) {
+			return; // no indexing if this is empty (nothing of interest)
+		}
+		if ( tokenTypeToStreamIndexesMap==null ) {
+			tokenTypeToStreamIndexesMap = new HashMap(); // first indexing op
+		}
+		int tokenType = adaptor.getType(node);
+		Integer tokenTypeI = new Integer(tokenType);
+		if ( !(tokenTypesToReverseIndex==INDEX_ALL ||
+			   tokenTypesToReverseIndex.contains(tokenTypeI)) )
+		{
+			return; // tokenType not of interest
+		}
+		Integer streamIndexI = new Integer(streamIndex);
+		ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
+		if ( indexes==null ) {
+			indexes = new ArrayList(); // no list yet for this token type
+			indexes.add(streamIndexI); // not there yet, add
+			tokenTypeToStreamIndexesMap.put(tokenTypeI, indexes);
+		}
+		else {
+			if ( !indexes.contains(streamIndexI) ) {
+				indexes.add(streamIndexI); // not there yet, add
+			}
+		}
+	}
+
+	/** Track the indicated token type in the reverse index.  Call this
+	 *  repeatedly for each type or use variant with Set argument to
+	 *  set all at once.
+	 * @param tokenType
+	public void reverseIndex(int tokenType) {
+		if ( tokenTypesToReverseIndex==null ) {
+			tokenTypesToReverseIndex = new HashSet();
+		}
+		else if ( tokenTypesToReverseIndex==INDEX_ALL ) {
+			return;
+		}
+		tokenTypesToReverseIndex.add(new Integer(tokenType));
+	}
+
+	/** Track the indicated token types in the reverse index. Set
+	 *  to INDEX_ALL to track all token types.
+	public void reverseIndex(Set tokenTypes) {
+		tokenTypesToReverseIndex = tokenTypes;
+	}
+
+	/** Given a node pointer, return its index into the node stream.
+	 *  This is not its Token stream index.  If there is no reverse map
+	 *  from node to stream index or the map does not contain entries
+	 *  for node's token type, a linear search of entire stream is used.
+	 *
+	 *  Return -1 if exact node pointer not in stream.
+	public int getNodeIndex(Object node) {
+		//System.out.println("get "+node);
+		if ( tokenTypeToStreamIndexesMap==null ) {
+			return getNodeIndexLinearly(node);
+		}
+		int tokenType = adaptor.getType(node);
+		Integer tokenTypeI = new Integer(tokenType);
+		ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
+		if ( indexes==null ) {
+			//System.out.println("found linearly; stream index = "+getNodeIndexLinearly(node));
+			return getNodeIndexLinearly(node);
+		}
+		for (int i = 0; i < indexes.size(); i++) {
+			Integer streamIndexI = (Integer)indexes.get(i);
+			Object n = get(streamIndexI.intValue());
+			if ( n==node ) {
+				//System.out.println("found in index; stream index = "+streamIndexI);
+				return streamIndexI.intValue(); // found it!
+			}
+		}
+		return -1;
+	}
+
+	*/
+}
diff --git a/runtime/Java/src/test/java/org/antlr/runtime/TestLookaheadStream.java b/runtime/Java/src/test/java/org/antlr/runtime/TestLookaheadStream.java
new file mode 100644
index 0000000..13c8add
--- /dev/null
+++ b/runtime/Java/src/test/java/org/antlr/runtime/TestLookaheadStream.java
@@ -0,0 +1,46 @@
+package org.antlr.runtime;
+
+import junit.framework.TestCase;
+
+public class TestLookaheadStream extends TestCase {
+
+  public void testSeek() {
+    UnbufferedTokenStream stream = new UnbufferedTokenStream(createTokenSource());
+
+    stream.consume();
+    assertEquals(0, stream.LA(-1));
+    assertEquals(1, stream.LA(1));
+
+    stream.mark();
+
+    stream.consume();
+    assertEquals(1, stream.LA(-1));
+    assertEquals(2, stream.LA(1));
+
+    int index = stream.index();
+    stream.rewind();
+    assertEquals(0, stream.LA(-1));
+    assertEquals(1, stream.LA(1));
+
+    stream.seek(index);
+    assertEquals(1, stream.LA(-1));
+    assertEquals(2, stream.LA(1));
+  }
+
+  private TokenSource createTokenSource() {
+    return new TokenSource() {
+      int count = 0;
+
+      @Override
+      public Token nextToken() {
+        return new CommonToken(count++);
+      }
+
+      @Override
+      public String getSourceName() {
+        return "test";
+      }
+    };
+
+  }
+}
diff --git a/antlr-3.4/runtime/JavaScript/AUTHORS b/runtime/JavaScript/AUTHORS
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/AUTHORS
rename to runtime/JavaScript/AUTHORS
diff --git a/antlr-3.4/runtime/JavaScript/ChangeLog b/runtime/JavaScript/ChangeLog
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/ChangeLog
rename to runtime/JavaScript/ChangeLog
diff --git a/antlr-3.4/runtime/JavaScript/build/README b/runtime/JavaScript/build/README
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/build/README
rename to runtime/JavaScript/build/README
diff --git a/antlr-3.4/runtime/JavaScript/build/antlr3.properties b/runtime/JavaScript/build/antlr3.properties
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/build/antlr3.properties
rename to runtime/JavaScript/build/antlr3.properties
diff --git a/antlr-3.4/runtime/JavaScript/build/build.xml b/runtime/JavaScript/build/build.xml
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/build/build.xml
rename to runtime/JavaScript/build/build.xml
diff --git a/antlr-3.4/runtime/JavaScript/build/license.txt b/runtime/JavaScript/build/license.txt
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/build/license.txt
rename to runtime/JavaScript/build/license.txt
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr.js b/runtime/JavaScript/src/org/antlr.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr.js
rename to runtime/JavaScript/src/org/antlr.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/ANTLRFileStream.js b/runtime/JavaScript/src/org/antlr/runtime/ANTLRFileStream.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/ANTLRFileStream.js
rename to runtime/JavaScript/src/org/antlr/runtime/ANTLRFileStream.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/ANTLRStringStream.js b/runtime/JavaScript/src/org/antlr/runtime/ANTLRStringStream.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/ANTLRStringStream.js
rename to runtime/JavaScript/src/org/antlr/runtime/ANTLRStringStream.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/BaseRecognizer.js b/runtime/JavaScript/src/org/antlr/runtime/BaseRecognizer.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/BaseRecognizer.js
rename to runtime/JavaScript/src/org/antlr/runtime/BaseRecognizer.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/BitSet.js b/runtime/JavaScript/src/org/antlr/runtime/BitSet.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/BitSet.js
rename to runtime/JavaScript/src/org/antlr/runtime/BitSet.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/CharStream.js b/runtime/JavaScript/src/org/antlr/runtime/CharStream.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/CharStream.js
rename to runtime/JavaScript/src/org/antlr/runtime/CharStream.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/CommonToken.js b/runtime/JavaScript/src/org/antlr/runtime/CommonToken.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/CommonToken.js
rename to runtime/JavaScript/src/org/antlr/runtime/CommonToken.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/CommonTokenStream.js b/runtime/JavaScript/src/org/antlr/runtime/CommonTokenStream.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/CommonTokenStream.js
rename to runtime/JavaScript/src/org/antlr/runtime/CommonTokenStream.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/DFA.js b/runtime/JavaScript/src/org/antlr/runtime/DFA.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/DFA.js
rename to runtime/JavaScript/src/org/antlr/runtime/DFA.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/EarlyExitException.js b/runtime/JavaScript/src/org/antlr/runtime/EarlyExitException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/EarlyExitException.js
rename to runtime/JavaScript/src/org/antlr/runtime/EarlyExitException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/FailedPredicateException.js b/runtime/JavaScript/src/org/antlr/runtime/FailedPredicateException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/FailedPredicateException.js
rename to runtime/JavaScript/src/org/antlr/runtime/FailedPredicateException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/IndexOutOfBoundsExceptions.js b/runtime/JavaScript/src/org/antlr/runtime/IndexOutOfBoundsExceptions.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/IndexOutOfBoundsExceptions.js
rename to runtime/JavaScript/src/org/antlr/runtime/IndexOutOfBoundsExceptions.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/Lexer.js b/runtime/JavaScript/src/org/antlr/runtime/Lexer.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/Lexer.js
rename to runtime/JavaScript/src/org/antlr/runtime/Lexer.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MismatchedNotSetException.js b/runtime/JavaScript/src/org/antlr/runtime/MismatchedNotSetException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MismatchedNotSetException.js
rename to runtime/JavaScript/src/org/antlr/runtime/MismatchedNotSetException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MismatchedRangeExceptions.js b/runtime/JavaScript/src/org/antlr/runtime/MismatchedRangeExceptions.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MismatchedRangeExceptions.js
rename to runtime/JavaScript/src/org/antlr/runtime/MismatchedRangeExceptions.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MismatchedSetException.js b/runtime/JavaScript/src/org/antlr/runtime/MismatchedSetException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MismatchedSetException.js
rename to runtime/JavaScript/src/org/antlr/runtime/MismatchedSetException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MismatchedTokenException.js b/runtime/JavaScript/src/org/antlr/runtime/MismatchedTokenException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MismatchedTokenException.js
rename to runtime/JavaScript/src/org/antlr/runtime/MismatchedTokenException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MismatchedTreeNodeException.js b/runtime/JavaScript/src/org/antlr/runtime/MismatchedTreeNodeException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MismatchedTreeNodeException.js
rename to runtime/JavaScript/src/org/antlr/runtime/MismatchedTreeNodeException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MissingTokenException.js b/runtime/JavaScript/src/org/antlr/runtime/MissingTokenException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/MissingTokenException.js
rename to runtime/JavaScript/src/org/antlr/runtime/MissingTokenException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/NoViableAltException.js b/runtime/JavaScript/src/org/antlr/runtime/NoViableAltException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/NoViableAltException.js
rename to runtime/JavaScript/src/org/antlr/runtime/NoViableAltException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/Parser.js b/runtime/JavaScript/src/org/antlr/runtime/Parser.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/Parser.js
rename to runtime/JavaScript/src/org/antlr/runtime/Parser.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/ParserRuleReturnScope.js b/runtime/JavaScript/src/org/antlr/runtime/ParserRuleReturnScope.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/ParserRuleReturnScope.js
rename to runtime/JavaScript/src/org/antlr/runtime/ParserRuleReturnScope.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/RecognitionException.js b/runtime/JavaScript/src/org/antlr/runtime/RecognitionException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/RecognitionException.js
rename to runtime/JavaScript/src/org/antlr/runtime/RecognitionException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/RecognizerSharedState.js b/runtime/JavaScript/src/org/antlr/runtime/RecognizerSharedState.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/RecognizerSharedState.js
rename to runtime/JavaScript/src/org/antlr/runtime/RecognizerSharedState.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/Token.js b/runtime/JavaScript/src/org/antlr/runtime/Token.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/Token.js
rename to runtime/JavaScript/src/org/antlr/runtime/Token.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/TokenRewriteStream.js b/runtime/JavaScript/src/org/antlr/runtime/TokenRewriteStream.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/TokenRewriteStream.js
rename to runtime/JavaScript/src/org/antlr/runtime/TokenRewriteStream.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/UnwantedTokenException.js b/runtime/JavaScript/src/org/antlr/runtime/UnwantedTokenException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/UnwantedTokenException.js
rename to runtime/JavaScript/src/org/antlr/runtime/UnwantedTokenException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/BaseTree.js b/runtime/JavaScript/src/org/antlr/runtime/tree/BaseTree.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/BaseTree.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/BaseTree.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/BaseTreeAdaptor.js b/runtime/JavaScript/src/org/antlr/runtime/tree/BaseTreeAdaptor.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/BaseTreeAdaptor.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/BaseTreeAdaptor.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/CommonErrorNode.js b/runtime/JavaScript/src/org/antlr/runtime/tree/CommonErrorNode.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/CommonErrorNode.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/CommonErrorNode.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/CommonTree.js b/runtime/JavaScript/src/org/antlr/runtime/tree/CommonTree.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/CommonTree.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/CommonTree.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/CommonTreeAdaptor.js b/runtime/JavaScript/src/org/antlr/runtime/tree/CommonTreeAdaptor.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/CommonTreeAdaptor.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/CommonTreeAdaptor.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/CommonTreeNodeStream.js b/runtime/JavaScript/src/org/antlr/runtime/tree/CommonTreeNodeStream.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/CommonTreeNodeStream.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/CommonTreeNodeStream.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteCardinalityException.js b/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteCardinalityException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteCardinalityException.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/RewriteCardinalityException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteEarlyExitException.js b/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteEarlyExitException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteEarlyExitException.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/RewriteEarlyExitException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteEmptyStreamException.js b/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteEmptyStreamException.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteEmptyStreamException.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/RewriteEmptyStreamException.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleElementStream.js b/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleElementStream.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleElementStream.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleElementStream.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleNodeStream.js b/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleNodeStream.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleNodeStream.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleNodeStream.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.js b/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleSubtreeStream.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleTokenStream.js b/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleTokenStream.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleTokenStream.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/RewriteRuleTokenStream.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/Tree.js b/runtime/JavaScript/src/org/antlr/runtime/tree/Tree.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/Tree.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/Tree.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/TreeNodeStream.js b/runtime/JavaScript/src/org/antlr/runtime/tree/TreeNodeStream.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/TreeNodeStream.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/TreeNodeStream.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/TreeParser.js b/runtime/JavaScript/src/org/antlr/runtime/tree/TreeParser.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/TreeParser.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/TreeParser.js
diff --git a/antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/TreeRuleReturnScope.js b/runtime/JavaScript/src/org/antlr/runtime/tree/TreeRuleReturnScope.js
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/src/org/antlr/runtime/tree/TreeRuleReturnScope.js
rename to runtime/JavaScript/src/org/antlr/runtime/tree/TreeRuleReturnScope.js
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/Python.g b/runtime/JavaScript/tests/functional/Python.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/Python.g
rename to runtime/JavaScript/tests/functional/Python.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/all.html b/runtime/JavaScript/tests/functional/all.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/all.html
rename to runtime/JavaScript/tests/functional/all.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/rhino-python.extensions b/runtime/JavaScript/tests/functional/rhino-python.extensions
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/rhino-python.extensions
rename to runtime/JavaScript/tests/functional/rhino-python.extensions
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/rhino-python.input b/runtime/JavaScript/tests/functional/rhino-python.input
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/rhino-python.input
rename to runtime/JavaScript/tests/functional/rhino-python.input
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/rhino-python.output b/runtime/JavaScript/tests/functional/rhino-python.output
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/rhino-python.output
rename to runtime/JavaScript/tests/functional/rhino-python.output
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/rhino-python.prog b/runtime/JavaScript/tests/functional/rhino-python.prog
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/rhino-python.prog
rename to runtime/JavaScript/tests/functional/rhino-python.prog
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t001lexer.g b/runtime/JavaScript/tests/functional/t001lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t001lexer.g
rename to runtime/JavaScript/tests/functional/t001lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t001lexer.html b/runtime/JavaScript/tests/functional/t001lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t001lexer.html
rename to runtime/JavaScript/tests/functional/t001lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t002lexer.g b/runtime/JavaScript/tests/functional/t002lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t002lexer.g
rename to runtime/JavaScript/tests/functional/t002lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t002lexer.html b/runtime/JavaScript/tests/functional/t002lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t002lexer.html
rename to runtime/JavaScript/tests/functional/t002lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t003lexer.g b/runtime/JavaScript/tests/functional/t003lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t003lexer.g
rename to runtime/JavaScript/tests/functional/t003lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t003lexer.html b/runtime/JavaScript/tests/functional/t003lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t003lexer.html
rename to runtime/JavaScript/tests/functional/t003lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t004lexer.g b/runtime/JavaScript/tests/functional/t004lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t004lexer.g
rename to runtime/JavaScript/tests/functional/t004lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t004lexer.html b/runtime/JavaScript/tests/functional/t004lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t004lexer.html
rename to runtime/JavaScript/tests/functional/t004lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t005lexer.g b/runtime/JavaScript/tests/functional/t005lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t005lexer.g
rename to runtime/JavaScript/tests/functional/t005lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t005lexer.html b/runtime/JavaScript/tests/functional/t005lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t005lexer.html
rename to runtime/JavaScript/tests/functional/t005lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t006lexer.g b/runtime/JavaScript/tests/functional/t006lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t006lexer.g
rename to runtime/JavaScript/tests/functional/t006lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t006lexer.html b/runtime/JavaScript/tests/functional/t006lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t006lexer.html
rename to runtime/JavaScript/tests/functional/t006lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t007lexer.g b/runtime/JavaScript/tests/functional/t007lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t007lexer.g
rename to runtime/JavaScript/tests/functional/t007lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t007lexer.html b/runtime/JavaScript/tests/functional/t007lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t007lexer.html
rename to runtime/JavaScript/tests/functional/t007lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t008lexer.g b/runtime/JavaScript/tests/functional/t008lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t008lexer.g
rename to runtime/JavaScript/tests/functional/t008lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t008lexer.html b/runtime/JavaScript/tests/functional/t008lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t008lexer.html
rename to runtime/JavaScript/tests/functional/t008lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t009lexer.g b/runtime/JavaScript/tests/functional/t009lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t009lexer.g
rename to runtime/JavaScript/tests/functional/t009lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t009lexer.html b/runtime/JavaScript/tests/functional/t009lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t009lexer.html
rename to runtime/JavaScript/tests/functional/t009lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t010lexer.g b/runtime/JavaScript/tests/functional/t010lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t010lexer.g
rename to runtime/JavaScript/tests/functional/t010lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t010lexer.html b/runtime/JavaScript/tests/functional/t010lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t010lexer.html
rename to runtime/JavaScript/tests/functional/t010lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t011lexer.g b/runtime/JavaScript/tests/functional/t011lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t011lexer.g
rename to runtime/JavaScript/tests/functional/t011lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t011lexer.html b/runtime/JavaScript/tests/functional/t011lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t011lexer.html
rename to runtime/JavaScript/tests/functional/t011lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t012lexerXML.g b/runtime/JavaScript/tests/functional/t012lexerXML.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t012lexerXML.g
rename to runtime/JavaScript/tests/functional/t012lexerXML.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t012lexerXML.html b/runtime/JavaScript/tests/functional/t012lexerXML.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t012lexerXML.html
rename to runtime/JavaScript/tests/functional/t012lexerXML.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t013parser.g b/runtime/JavaScript/tests/functional/t013parser.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t013parser.g
rename to runtime/JavaScript/tests/functional/t013parser.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t013parser.html b/runtime/JavaScript/tests/functional/t013parser.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t013parser.html
rename to runtime/JavaScript/tests/functional/t013parser.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t014parser.g b/runtime/JavaScript/tests/functional/t014parser.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t014parser.g
rename to runtime/JavaScript/tests/functional/t014parser.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t014parser.html b/runtime/JavaScript/tests/functional/t014parser.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t014parser.html
rename to runtime/JavaScript/tests/functional/t014parser.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t015calc.g b/runtime/JavaScript/tests/functional/t015calc.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t015calc.g
rename to runtime/JavaScript/tests/functional/t015calc.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t015calc.html b/runtime/JavaScript/tests/functional/t015calc.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t015calc.html
rename to runtime/JavaScript/tests/functional/t015calc.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t016actions.g b/runtime/JavaScript/tests/functional/t016actions.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t016actions.g
rename to runtime/JavaScript/tests/functional/t016actions.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t016actions.html b/runtime/JavaScript/tests/functional/t016actions.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t016actions.html
rename to runtime/JavaScript/tests/functional/t016actions.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t017parser.g b/runtime/JavaScript/tests/functional/t017parser.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t017parser.g
rename to runtime/JavaScript/tests/functional/t017parser.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t017parser.html b/runtime/JavaScript/tests/functional/t017parser.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t017parser.html
rename to runtime/JavaScript/tests/functional/t017parser.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t018llstar.g b/runtime/JavaScript/tests/functional/t018llstar.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t018llstar.g
rename to runtime/JavaScript/tests/functional/t018llstar.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t018llstar.html b/runtime/JavaScript/tests/functional/t018llstar.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t018llstar.html
rename to runtime/JavaScript/tests/functional/t018llstar.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t019lexer.g b/runtime/JavaScript/tests/functional/t019lexer.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t019lexer.g
rename to runtime/JavaScript/tests/functional/t019lexer.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t019lexer.html b/runtime/JavaScript/tests/functional/t019lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t019lexer.html
rename to runtime/JavaScript/tests/functional/t019lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t020fuzzy.g b/runtime/JavaScript/tests/functional/t020fuzzy.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t020fuzzy.g
rename to runtime/JavaScript/tests/functional/t020fuzzy.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t020fuzzy.html b/runtime/JavaScript/tests/functional/t020fuzzy.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t020fuzzy.html
rename to runtime/JavaScript/tests/functional/t020fuzzy.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t021hoist.g b/runtime/JavaScript/tests/functional/t021hoist.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t021hoist.g
rename to runtime/JavaScript/tests/functional/t021hoist.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t021hoist.html b/runtime/JavaScript/tests/functional/t021hoist.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t021hoist.html
rename to runtime/JavaScript/tests/functional/t021hoist.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t022scopes.g b/runtime/JavaScript/tests/functional/t022scopes.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t022scopes.g
rename to runtime/JavaScript/tests/functional/t022scopes.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t022scopes.html b/runtime/JavaScript/tests/functional/t022scopes.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t022scopes.html
rename to runtime/JavaScript/tests/functional/t022scopes.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t023scopes.g b/runtime/JavaScript/tests/functional/t023scopes.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t023scopes.g
rename to runtime/JavaScript/tests/functional/t023scopes.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t023scopes.html b/runtime/JavaScript/tests/functional/t023scopes.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t023scopes.html
rename to runtime/JavaScript/tests/functional/t023scopes.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t024finally.g b/runtime/JavaScript/tests/functional/t024finally.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t024finally.g
rename to runtime/JavaScript/tests/functional/t024finally.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t024finally.html b/runtime/JavaScript/tests/functional/t024finally.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t024finally.html
rename to runtime/JavaScript/tests/functional/t024finally.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t025lexerRulePropertyRef.g b/runtime/JavaScript/tests/functional/t025lexerRulePropertyRef.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t025lexerRulePropertyRef.g
rename to runtime/JavaScript/tests/functional/t025lexerRulePropertyRef.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t025lexerRulePropertyRef.html b/runtime/JavaScript/tests/functional/t025lexerRulePropertyRef.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t025lexerRulePropertyRef.html
rename to runtime/JavaScript/tests/functional/t025lexerRulePropertyRef.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t026actions.g b/runtime/JavaScript/tests/functional/t026actions.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t026actions.g
rename to runtime/JavaScript/tests/functional/t026actions.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t026actions.html b/runtime/JavaScript/tests/functional/t026actions.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t026actions.html
rename to runtime/JavaScript/tests/functional/t026actions.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t027eof.g b/runtime/JavaScript/tests/functional/t027eof.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t027eof.g
rename to runtime/JavaScript/tests/functional/t027eof.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t027eof.html b/runtime/JavaScript/tests/functional/t027eof.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t027eof.html
rename to runtime/JavaScript/tests/functional/t027eof.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t029synpredgate.g b/runtime/JavaScript/tests/functional/t029synpredgate.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t029synpredgate.g
rename to runtime/JavaScript/tests/functional/t029synpredgate.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t029synpredgate.html b/runtime/JavaScript/tests/functional/t029synpredgate.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t029synpredgate.html
rename to runtime/JavaScript/tests/functional/t029synpredgate.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t030specialStates.g b/runtime/JavaScript/tests/functional/t030specialStates.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t030specialStates.g
rename to runtime/JavaScript/tests/functional/t030specialStates.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t030specialStates.html b/runtime/JavaScript/tests/functional/t030specialStates.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t030specialStates.html
rename to runtime/JavaScript/tests/functional/t030specialStates.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t031emptyAlt.g b/runtime/JavaScript/tests/functional/t031emptyAlt.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t031emptyAlt.g
rename to runtime/JavaScript/tests/functional/t031emptyAlt.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t031emptyAlt.html b/runtime/JavaScript/tests/functional/t031emptyAlt.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t031emptyAlt.html
rename to runtime/JavaScript/tests/functional/t031emptyAlt.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t032subrulePredict.g b/runtime/JavaScript/tests/functional/t032subrulePredict.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t032subrulePredict.g
rename to runtime/JavaScript/tests/functional/t032subrulePredict.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t032subrulePredict.html b/runtime/JavaScript/tests/functional/t032subrulePredict.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t032subrulePredict.html
rename to runtime/JavaScript/tests/functional/t032subrulePredict.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t033backtracking.g b/runtime/JavaScript/tests/functional/t033backtracking.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t033backtracking.g
rename to runtime/JavaScript/tests/functional/t033backtracking.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t033backtracking.html b/runtime/JavaScript/tests/functional/t033backtracking.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t033backtracking.html
rename to runtime/JavaScript/tests/functional/t033backtracking.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t034tokenLabelPropertyRef.g b/runtime/JavaScript/tests/functional/t034tokenLabelPropertyRef.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t034tokenLabelPropertyRef.g
rename to runtime/JavaScript/tests/functional/t034tokenLabelPropertyRef.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t034tokenLabelPropertyRef.html b/runtime/JavaScript/tests/functional/t034tokenLabelPropertyRef.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t034tokenLabelPropertyRef.html
rename to runtime/JavaScript/tests/functional/t034tokenLabelPropertyRef.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t035ruleLabelPropertyRef.g b/runtime/JavaScript/tests/functional/t035ruleLabelPropertyRef.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t035ruleLabelPropertyRef.g
rename to runtime/JavaScript/tests/functional/t035ruleLabelPropertyRef.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t035ruleLabelPropertyRef.html b/runtime/JavaScript/tests/functional/t035ruleLabelPropertyRef.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t035ruleLabelPropertyRef.html
rename to runtime/JavaScript/tests/functional/t035ruleLabelPropertyRef.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t036multipleReturnValues.g b/runtime/JavaScript/tests/functional/t036multipleReturnValues.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t036multipleReturnValues.g
rename to runtime/JavaScript/tests/functional/t036multipleReturnValues.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t036multipleReturnValues.html b/runtime/JavaScript/tests/functional/t036multipleReturnValues.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t036multipleReturnValues.html
rename to runtime/JavaScript/tests/functional/t036multipleReturnValues.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t037rulePropertyRef.g b/runtime/JavaScript/tests/functional/t037rulePropertyRef.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t037rulePropertyRef.g
rename to runtime/JavaScript/tests/functional/t037rulePropertyRef.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t037rulePropertyRef.html b/runtime/JavaScript/tests/functional/t037rulePropertyRef.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t037rulePropertyRef.html
rename to runtime/JavaScript/tests/functional/t037rulePropertyRef.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t038lexerRuleLabel.g b/runtime/JavaScript/tests/functional/t038lexerRuleLabel.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t038lexerRuleLabel.g
rename to runtime/JavaScript/tests/functional/t038lexerRuleLabel.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t038lexerRuleLabel.html b/runtime/JavaScript/tests/functional/t038lexerRuleLabel.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t038lexerRuleLabel.html
rename to runtime/JavaScript/tests/functional/t038lexerRuleLabel.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t039labels.g b/runtime/JavaScript/tests/functional/t039labels.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t039labels.g
rename to runtime/JavaScript/tests/functional/t039labels.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t039labels.html b/runtime/JavaScript/tests/functional/t039labels.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t039labels.html
rename to runtime/JavaScript/tests/functional/t039labels.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t040bug80.g b/runtime/JavaScript/tests/functional/t040bug80.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t040bug80.g
rename to runtime/JavaScript/tests/functional/t040bug80.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t040bug80.html b/runtime/JavaScript/tests/functional/t040bug80.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t040bug80.html
rename to runtime/JavaScript/tests/functional/t040bug80.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t041parameters.g b/runtime/JavaScript/tests/functional/t041parameters.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t041parameters.g
rename to runtime/JavaScript/tests/functional/t041parameters.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t041parameters.html b/runtime/JavaScript/tests/functional/t041parameters.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t041parameters.html
rename to runtime/JavaScript/tests/functional/t041parameters.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t042ast.g b/runtime/JavaScript/tests/functional/t042ast.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t042ast.g
rename to runtime/JavaScript/tests/functional/t042ast.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t042ast.html b/runtime/JavaScript/tests/functional/t042ast.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t042ast.html
rename to runtime/JavaScript/tests/functional/t042ast.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t043synpred.g b/runtime/JavaScript/tests/functional/t043synpred.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t043synpred.g
rename to runtime/JavaScript/tests/functional/t043synpred.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t043synpred.html b/runtime/JavaScript/tests/functional/t043synpred.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t043synpred.html
rename to runtime/JavaScript/tests/functional/t043synpred.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t044trace.g b/runtime/JavaScript/tests/functional/t044trace.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t044trace.g
rename to runtime/JavaScript/tests/functional/t044trace.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t044trace.html b/runtime/JavaScript/tests/functional/t044trace.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t044trace.html
rename to runtime/JavaScript/tests/functional/t044trace.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t045dfabug.g b/runtime/JavaScript/tests/functional/t045dfabug.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t045dfabug.g
rename to runtime/JavaScript/tests/functional/t045dfabug.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t045dfabug.html b/runtime/JavaScript/tests/functional/t045dfabug.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t045dfabug.html
rename to runtime/JavaScript/tests/functional/t045dfabug.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t046rewrite.g b/runtime/JavaScript/tests/functional/t046rewrite.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t046rewrite.g
rename to runtime/JavaScript/tests/functional/t046rewrite.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t046rewrite.html b/runtime/JavaScript/tests/functional/t046rewrite.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t046rewrite.html
rename to runtime/JavaScript/tests/functional/t046rewrite.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t047treeparser.g b/runtime/JavaScript/tests/functional/t047treeparser.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t047treeparser.g
rename to runtime/JavaScript/tests/functional/t047treeparser.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t047treeparser.html b/runtime/JavaScript/tests/functional/t047treeparser.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t047treeparser.html
rename to runtime/JavaScript/tests/functional/t047treeparser.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t047treeparserWalker.g b/runtime/JavaScript/tests/functional/t047treeparserWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t047treeparserWalker.g
rename to runtime/JavaScript/tests/functional/t047treeparserWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t048rewrite.g b/runtime/JavaScript/tests/functional/t048rewrite.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t048rewrite.g
rename to runtime/JavaScript/tests/functional/t048rewrite.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t048rewrite.html b/runtime/JavaScript/tests/functional/t048rewrite.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t048rewrite.html
rename to runtime/JavaScript/tests/functional/t048rewrite.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparser.html b/runtime/JavaScript/tests/functional/t049treeparser.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparser.html
rename to runtime/JavaScript/tests/functional/t049treeparser.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparsera.g b/runtime/JavaScript/tests/functional/t049treeparsera.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparsera.g
rename to runtime/JavaScript/tests/functional/t049treeparsera.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparseraWalker.g b/runtime/JavaScript/tests/functional/t049treeparseraWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparseraWalker.g
rename to runtime/JavaScript/tests/functional/t049treeparseraWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserb.g b/runtime/JavaScript/tests/functional/t049treeparserb.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserb.g
rename to runtime/JavaScript/tests/functional/t049treeparserb.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserbWalker.g b/runtime/JavaScript/tests/functional/t049treeparserbWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserbWalker.g
rename to runtime/JavaScript/tests/functional/t049treeparserbWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserc.g b/runtime/JavaScript/tests/functional/t049treeparserc.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserc.g
rename to runtime/JavaScript/tests/functional/t049treeparserc.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparsercWalker.g b/runtime/JavaScript/tests/functional/t049treeparsercWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparsercWalker.g
rename to runtime/JavaScript/tests/functional/t049treeparsercWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserd.g b/runtime/JavaScript/tests/functional/t049treeparserd.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserd.g
rename to runtime/JavaScript/tests/functional/t049treeparserd.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserdWalker.g b/runtime/JavaScript/tests/functional/t049treeparserdWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserdWalker.g
rename to runtime/JavaScript/tests/functional/t049treeparserdWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparsere.g b/runtime/JavaScript/tests/functional/t049treeparsere.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparsere.g
rename to runtime/JavaScript/tests/functional/t049treeparsere.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparsereWalker.g b/runtime/JavaScript/tests/functional/t049treeparsereWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparsereWalker.g
rename to runtime/JavaScript/tests/functional/t049treeparsereWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserf.g b/runtime/JavaScript/tests/functional/t049treeparserf.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserf.g
rename to runtime/JavaScript/tests/functional/t049treeparserf.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserfWalker.g b/runtime/JavaScript/tests/functional/t049treeparserfWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserfWalker.g
rename to runtime/JavaScript/tests/functional/t049treeparserfWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserg.g b/runtime/JavaScript/tests/functional/t049treeparserg.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserg.g
rename to runtime/JavaScript/tests/functional/t049treeparserg.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparsergWalker.g b/runtime/JavaScript/tests/functional/t049treeparsergWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparsergWalker.g
rename to runtime/JavaScript/tests/functional/t049treeparsergWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserh.g b/runtime/JavaScript/tests/functional/t049treeparserh.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserh.g
rename to runtime/JavaScript/tests/functional/t049treeparserh.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserhWalker.g b/runtime/JavaScript/tests/functional/t049treeparserhWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparserhWalker.g
rename to runtime/JavaScript/tests/functional/t049treeparserhWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparseri.g b/runtime/JavaScript/tests/functional/t049treeparseri.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparseri.g
rename to runtime/JavaScript/tests/functional/t049treeparseri.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t049treeparseriWalker.g b/runtime/JavaScript/tests/functional/t049treeparseriWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t049treeparseriWalker.g
rename to runtime/JavaScript/tests/functional/t049treeparseriWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteAST.html b/runtime/JavaScript/tests/functional/t051treeRewriteAST.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteAST.html
rename to runtime/JavaScript/tests/functional/t051treeRewriteAST.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTa.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTa.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTa.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTa.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTaWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTaWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTaWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTaWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTaa.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTaa.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTaa.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTaa.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTaaWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTaaWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTaaWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTaaWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTab.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTab.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTab.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTab.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTabWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTabWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTabWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTabWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTac.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTac.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTac.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTac.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTacWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTacWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTacWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTacWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTb.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTb.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTb.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTb.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTbWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTbWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTbWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTbWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTc.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTc.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTc.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTc.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTcWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTcWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTcWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTcWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTd.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTd.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTd.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTd.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTdWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTdWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTdWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTdWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTe.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTe.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTe.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTe.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTeWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTeWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTeWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTeWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTf.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTf.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTf.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTf.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTfWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTfWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTfWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTfWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTg.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTg.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTg.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTg.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTgWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTgWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTgWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTgWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTh.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTh.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTh.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTh.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASThWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASThWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASThWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASThWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTi.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTi.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTi.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTi.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTiWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTiWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTiWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTiWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTj.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTj.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTj.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTj.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTjWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTjWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTjWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTjWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTk.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTk.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTk.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTk.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTkWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTkWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTkWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTkWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTl.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTl.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTl.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTl.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTlWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTlWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTlWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTlWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTm.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTm.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTm.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTm.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTmWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTmWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTmWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTmWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTn.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTn.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTn.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTn.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTnWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTnWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTnWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTnWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTo.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTo.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTo.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTo.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASToWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASToWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASToWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASToWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTp.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTp.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTp.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTp.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTpWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTpWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTpWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTpWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTq.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTq.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTq.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTq.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTqWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTqWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTqWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTqWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTr.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTr.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTr.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTr.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTrWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTrWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTrWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTrWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTs.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTs.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTs.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTs.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTsWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTsWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTsWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTsWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTt.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTt.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTt.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTt.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTtWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTtWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTtWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTtWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTu.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTu.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTu.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTu.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTuWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTuWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTuWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTuWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTv.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTv.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTv.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTv.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTvWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTvWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTvWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTvWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTw.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTw.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTw.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTw.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTwWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTwWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTwWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTwWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTx.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTx.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTx.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTx.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTxWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTxWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTxWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTxWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTy.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTy.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTy.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTy.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTyWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTyWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTyWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTyWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTz.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTz.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTz.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTz.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTzWalker.g b/runtime/JavaScript/tests/functional/t051treeRewriteASTzWalker.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t051treeRewriteASTzWalker.g
rename to runtime/JavaScript/tests/functional/t051treeRewriteASTzWalker.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052import.html b/runtime/JavaScript/tests/functional/t052import.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052import.html
rename to runtime/JavaScript/tests/functional/t052import.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importM1.g b/runtime/JavaScript/tests/functional/t052importM1.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importM1.g
rename to runtime/JavaScript/tests/functional/t052importM1.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importM2.g b/runtime/JavaScript/tests/functional/t052importM2.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importM2.g
rename to runtime/JavaScript/tests/functional/t052importM2.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importM3.g b/runtime/JavaScript/tests/functional/t052importM3.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importM3.g
rename to runtime/JavaScript/tests/functional/t052importM3.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importM4.g b/runtime/JavaScript/tests/functional/t052importM4.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importM4.g
rename to runtime/JavaScript/tests/functional/t052importM4.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importM5.g b/runtime/JavaScript/tests/functional/t052importM5.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importM5.g
rename to runtime/JavaScript/tests/functional/t052importM5.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importM6.g b/runtime/JavaScript/tests/functional/t052importM6.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importM6.g
rename to runtime/JavaScript/tests/functional/t052importM6.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importM7.g b/runtime/JavaScript/tests/functional/t052importM7.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importM7.g
rename to runtime/JavaScript/tests/functional/t052importM7.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importM8.g b/runtime/JavaScript/tests/functional/t052importM8.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importM8.g
rename to runtime/JavaScript/tests/functional/t052importM8.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importS1.g b/runtime/JavaScript/tests/functional/t052importS1.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importS1.g
rename to runtime/JavaScript/tests/functional/t052importS1.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importS2.g b/runtime/JavaScript/tests/functional/t052importS2.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importS2.g
rename to runtime/JavaScript/tests/functional/t052importS2.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importS3.g b/runtime/JavaScript/tests/functional/t052importS3.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importS3.g
rename to runtime/JavaScript/tests/functional/t052importS3.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importS4.g b/runtime/JavaScript/tests/functional/t052importS4.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importS4.g
rename to runtime/JavaScript/tests/functional/t052importS4.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importS5.g b/runtime/JavaScript/tests/functional/t052importS5.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importS5.g
rename to runtime/JavaScript/tests/functional/t052importS5.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importS6.g b/runtime/JavaScript/tests/functional/t052importS6.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importS6.g
rename to runtime/JavaScript/tests/functional/t052importS6.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importS7.g b/runtime/JavaScript/tests/functional/t052importS7.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importS7.g
rename to runtime/JavaScript/tests/functional/t052importS7.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importS8.g b/runtime/JavaScript/tests/functional/t052importS8.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importS8.g
rename to runtime/JavaScript/tests/functional/t052importS8.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importT4.g b/runtime/JavaScript/tests/functional/t052importT4.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importT4.g
rename to runtime/JavaScript/tests/functional/t052importT4.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t052importT5.g b/runtime/JavaScript/tests/functional/t052importT5.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t052importT5.g
rename to runtime/JavaScript/tests/functional/t052importT5.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053hetero.html b/runtime/JavaScript/tests/functional/t053hetero.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053hetero.html
rename to runtime/JavaScript/tests/functional/t053hetero.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT.g b/runtime/JavaScript/tests/functional/t053heteroT.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT.g
rename to runtime/JavaScript/tests/functional/t053heteroT.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT1.g b/runtime/JavaScript/tests/functional/t053heteroT1.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT1.g
rename to runtime/JavaScript/tests/functional/t053heteroT1.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT10.g b/runtime/JavaScript/tests/functional/t053heteroT10.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT10.g
rename to runtime/JavaScript/tests/functional/t053heteroT10.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT11.g b/runtime/JavaScript/tests/functional/t053heteroT11.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT11.g
rename to runtime/JavaScript/tests/functional/t053heteroT11.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT12.g b/runtime/JavaScript/tests/functional/t053heteroT12.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT12.g
rename to runtime/JavaScript/tests/functional/t053heteroT12.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT13.g b/runtime/JavaScript/tests/functional/t053heteroT13.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT13.g
rename to runtime/JavaScript/tests/functional/t053heteroT13.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT14.g b/runtime/JavaScript/tests/functional/t053heteroT14.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT14.g
rename to runtime/JavaScript/tests/functional/t053heteroT14.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT15.g b/runtime/JavaScript/tests/functional/t053heteroT15.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT15.g
rename to runtime/JavaScript/tests/functional/t053heteroT15.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT16.g b/runtime/JavaScript/tests/functional/t053heteroT16.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT16.g
rename to runtime/JavaScript/tests/functional/t053heteroT16.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT17.g b/runtime/JavaScript/tests/functional/t053heteroT17.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT17.g
rename to runtime/JavaScript/tests/functional/t053heteroT17.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT18.g b/runtime/JavaScript/tests/functional/t053heteroT18.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT18.g
rename to runtime/JavaScript/tests/functional/t053heteroT18.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT2.g b/runtime/JavaScript/tests/functional/t053heteroT2.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT2.g
rename to runtime/JavaScript/tests/functional/t053heteroT2.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT3.g b/runtime/JavaScript/tests/functional/t053heteroT3.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT3.g
rename to runtime/JavaScript/tests/functional/t053heteroT3.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT4.g b/runtime/JavaScript/tests/functional/t053heteroT4.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT4.g
rename to runtime/JavaScript/tests/functional/t053heteroT4.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT5.g b/runtime/JavaScript/tests/functional/t053heteroT5.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT5.g
rename to runtime/JavaScript/tests/functional/t053heteroT5.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT6.g b/runtime/JavaScript/tests/functional/t053heteroT6.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT6.g
rename to runtime/JavaScript/tests/functional/t053heteroT6.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT7.g b/runtime/JavaScript/tests/functional/t053heteroT7.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT7.g
rename to runtime/JavaScript/tests/functional/t053heteroT7.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT8.g b/runtime/JavaScript/tests/functional/t053heteroT8.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT8.g
rename to runtime/JavaScript/tests/functional/t053heteroT8.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT9.g b/runtime/JavaScript/tests/functional/t053heteroT9.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroT9.g
rename to runtime/JavaScript/tests/functional/t053heteroT9.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP.g b/runtime/JavaScript/tests/functional/t053heteroTP.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP.g
rename to runtime/JavaScript/tests/functional/t053heteroTP.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP13.g b/runtime/JavaScript/tests/functional/t053heteroTP13.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP13.g
rename to runtime/JavaScript/tests/functional/t053heteroTP13.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP14.g b/runtime/JavaScript/tests/functional/t053heteroTP14.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP14.g
rename to runtime/JavaScript/tests/functional/t053heteroTP14.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP15.g b/runtime/JavaScript/tests/functional/t053heteroTP15.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP15.g
rename to runtime/JavaScript/tests/functional/t053heteroTP15.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP16.g b/runtime/JavaScript/tests/functional/t053heteroTP16.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP16.g
rename to runtime/JavaScript/tests/functional/t053heteroTP16.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP17.g b/runtime/JavaScript/tests/functional/t053heteroTP17.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP17.g
rename to runtime/JavaScript/tests/functional/t053heteroTP17.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP18.g b/runtime/JavaScript/tests/functional/t053heteroTP18.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t053heteroTP18.g
rename to runtime/JavaScript/tests/functional/t053heteroTP18.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer.html b/runtime/JavaScript/tests/functional/t056lexer.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer.html
rename to runtime/JavaScript/tests/functional/t056lexer.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer1.g b/runtime/JavaScript/tests/functional/t056lexer1.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer1.g
rename to runtime/JavaScript/tests/functional/t056lexer1.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer10.g b/runtime/JavaScript/tests/functional/t056lexer10.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer10.g
rename to runtime/JavaScript/tests/functional/t056lexer10.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer11.g b/runtime/JavaScript/tests/functional/t056lexer11.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer11.g
rename to runtime/JavaScript/tests/functional/t056lexer11.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer12.g b/runtime/JavaScript/tests/functional/t056lexer12.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer12.g
rename to runtime/JavaScript/tests/functional/t056lexer12.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer13.g b/runtime/JavaScript/tests/functional/t056lexer13.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer13.g
rename to runtime/JavaScript/tests/functional/t056lexer13.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer14.g b/runtime/JavaScript/tests/functional/t056lexer14.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer14.g
rename to runtime/JavaScript/tests/functional/t056lexer14.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer15.g b/runtime/JavaScript/tests/functional/t056lexer15.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer15.g
rename to runtime/JavaScript/tests/functional/t056lexer15.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer2.g b/runtime/JavaScript/tests/functional/t056lexer2.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer2.g
rename to runtime/JavaScript/tests/functional/t056lexer2.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer3.g b/runtime/JavaScript/tests/functional/t056lexer3.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer3.g
rename to runtime/JavaScript/tests/functional/t056lexer3.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer4.g b/runtime/JavaScript/tests/functional/t056lexer4.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer4.g
rename to runtime/JavaScript/tests/functional/t056lexer4.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer5.g b/runtime/JavaScript/tests/functional/t056lexer5.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer5.g
rename to runtime/JavaScript/tests/functional/t056lexer5.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer6.g b/runtime/JavaScript/tests/functional/t056lexer6.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer6.g
rename to runtime/JavaScript/tests/functional/t056lexer6.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer7.g b/runtime/JavaScript/tests/functional/t056lexer7.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer7.g
rename to runtime/JavaScript/tests/functional/t056lexer7.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer8.g b/runtime/JavaScript/tests/functional/t056lexer8.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer8.g
rename to runtime/JavaScript/tests/functional/t056lexer8.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t056lexer9.g b/runtime/JavaScript/tests/functional/t056lexer9.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t056lexer9.g
rename to runtime/JavaScript/tests/functional/t056lexer9.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST.html b/runtime/JavaScript/tests/functional/t057autoAST.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST.html
rename to runtime/JavaScript/tests/functional/t057autoAST.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST1.g b/runtime/JavaScript/tests/functional/t057autoAST1.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST1.g
rename to runtime/JavaScript/tests/functional/t057autoAST1.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST10.g b/runtime/JavaScript/tests/functional/t057autoAST10.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST10.g
rename to runtime/JavaScript/tests/functional/t057autoAST10.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST11.g b/runtime/JavaScript/tests/functional/t057autoAST11.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST11.g
rename to runtime/JavaScript/tests/functional/t057autoAST11.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST12.g b/runtime/JavaScript/tests/functional/t057autoAST12.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST12.g
rename to runtime/JavaScript/tests/functional/t057autoAST12.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST13.g b/runtime/JavaScript/tests/functional/t057autoAST13.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST13.g
rename to runtime/JavaScript/tests/functional/t057autoAST13.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST14.g b/runtime/JavaScript/tests/functional/t057autoAST14.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST14.g
rename to runtime/JavaScript/tests/functional/t057autoAST14.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST15.g b/runtime/JavaScript/tests/functional/t057autoAST15.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST15.g
rename to runtime/JavaScript/tests/functional/t057autoAST15.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST16.g b/runtime/JavaScript/tests/functional/t057autoAST16.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST16.g
rename to runtime/JavaScript/tests/functional/t057autoAST16.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST17.g b/runtime/JavaScript/tests/functional/t057autoAST17.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST17.g
rename to runtime/JavaScript/tests/functional/t057autoAST17.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST18.g b/runtime/JavaScript/tests/functional/t057autoAST18.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST18.g
rename to runtime/JavaScript/tests/functional/t057autoAST18.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST19.g b/runtime/JavaScript/tests/functional/t057autoAST19.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST19.g
rename to runtime/JavaScript/tests/functional/t057autoAST19.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST2.g b/runtime/JavaScript/tests/functional/t057autoAST2.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST2.g
rename to runtime/JavaScript/tests/functional/t057autoAST2.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST20.g b/runtime/JavaScript/tests/functional/t057autoAST20.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST20.g
rename to runtime/JavaScript/tests/functional/t057autoAST20.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST21.g b/runtime/JavaScript/tests/functional/t057autoAST21.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST21.g
rename to runtime/JavaScript/tests/functional/t057autoAST21.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST22.g b/runtime/JavaScript/tests/functional/t057autoAST22.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST22.g
rename to runtime/JavaScript/tests/functional/t057autoAST22.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST23.g b/runtime/JavaScript/tests/functional/t057autoAST23.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST23.g
rename to runtime/JavaScript/tests/functional/t057autoAST23.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST24.g b/runtime/JavaScript/tests/functional/t057autoAST24.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST24.g
rename to runtime/JavaScript/tests/functional/t057autoAST24.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST25.g b/runtime/JavaScript/tests/functional/t057autoAST25.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST25.g
rename to runtime/JavaScript/tests/functional/t057autoAST25.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST26.g b/runtime/JavaScript/tests/functional/t057autoAST26.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST26.g
rename to runtime/JavaScript/tests/functional/t057autoAST26.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST27.g b/runtime/JavaScript/tests/functional/t057autoAST27.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST27.g
rename to runtime/JavaScript/tests/functional/t057autoAST27.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST28.g b/runtime/JavaScript/tests/functional/t057autoAST28.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST28.g
rename to runtime/JavaScript/tests/functional/t057autoAST28.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST29.g b/runtime/JavaScript/tests/functional/t057autoAST29.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST29.g
rename to runtime/JavaScript/tests/functional/t057autoAST29.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST3.g b/runtime/JavaScript/tests/functional/t057autoAST3.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST3.g
rename to runtime/JavaScript/tests/functional/t057autoAST3.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST30.g b/runtime/JavaScript/tests/functional/t057autoAST30.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST30.g
rename to runtime/JavaScript/tests/functional/t057autoAST30.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST31.g b/runtime/JavaScript/tests/functional/t057autoAST31.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST31.g
rename to runtime/JavaScript/tests/functional/t057autoAST31.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST32.g b/runtime/JavaScript/tests/functional/t057autoAST32.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST32.g
rename to runtime/JavaScript/tests/functional/t057autoAST32.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST33.g b/runtime/JavaScript/tests/functional/t057autoAST33.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST33.g
rename to runtime/JavaScript/tests/functional/t057autoAST33.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST34.g b/runtime/JavaScript/tests/functional/t057autoAST34.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST34.g
rename to runtime/JavaScript/tests/functional/t057autoAST34.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST35.g b/runtime/JavaScript/tests/functional/t057autoAST35.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST35.g
rename to runtime/JavaScript/tests/functional/t057autoAST35.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST36.g b/runtime/JavaScript/tests/functional/t057autoAST36.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST36.g
rename to runtime/JavaScript/tests/functional/t057autoAST36.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST37.g b/runtime/JavaScript/tests/functional/t057autoAST37.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST37.g
rename to runtime/JavaScript/tests/functional/t057autoAST37.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST38.g b/runtime/JavaScript/tests/functional/t057autoAST38.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST38.g
rename to runtime/JavaScript/tests/functional/t057autoAST38.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST39.g b/runtime/JavaScript/tests/functional/t057autoAST39.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST39.g
rename to runtime/JavaScript/tests/functional/t057autoAST39.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST4.g b/runtime/JavaScript/tests/functional/t057autoAST4.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST4.g
rename to runtime/JavaScript/tests/functional/t057autoAST4.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST40.g b/runtime/JavaScript/tests/functional/t057autoAST40.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST40.g
rename to runtime/JavaScript/tests/functional/t057autoAST40.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST41.g b/runtime/JavaScript/tests/functional/t057autoAST41.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST41.g
rename to runtime/JavaScript/tests/functional/t057autoAST41.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST42.g b/runtime/JavaScript/tests/functional/t057autoAST42.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST42.g
rename to runtime/JavaScript/tests/functional/t057autoAST42.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST43.g b/runtime/JavaScript/tests/functional/t057autoAST43.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST43.g
rename to runtime/JavaScript/tests/functional/t057autoAST43.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST44.g b/runtime/JavaScript/tests/functional/t057autoAST44.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST44.g
rename to runtime/JavaScript/tests/functional/t057autoAST44.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST45.g b/runtime/JavaScript/tests/functional/t057autoAST45.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST45.g
rename to runtime/JavaScript/tests/functional/t057autoAST45.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST46.g b/runtime/JavaScript/tests/functional/t057autoAST46.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST46.g
rename to runtime/JavaScript/tests/functional/t057autoAST46.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST47.g b/runtime/JavaScript/tests/functional/t057autoAST47.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST47.g
rename to runtime/JavaScript/tests/functional/t057autoAST47.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST48.g b/runtime/JavaScript/tests/functional/t057autoAST48.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST48.g
rename to runtime/JavaScript/tests/functional/t057autoAST48.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST49.g b/runtime/JavaScript/tests/functional/t057autoAST49.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST49.g
rename to runtime/JavaScript/tests/functional/t057autoAST49.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST5.g b/runtime/JavaScript/tests/functional/t057autoAST5.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST5.g
rename to runtime/JavaScript/tests/functional/t057autoAST5.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST50.g b/runtime/JavaScript/tests/functional/t057autoAST50.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST50.g
rename to runtime/JavaScript/tests/functional/t057autoAST50.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST51.g b/runtime/JavaScript/tests/functional/t057autoAST51.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST51.g
rename to runtime/JavaScript/tests/functional/t057autoAST51.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST52.g b/runtime/JavaScript/tests/functional/t057autoAST52.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST52.g
rename to runtime/JavaScript/tests/functional/t057autoAST52.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST53.g b/runtime/JavaScript/tests/functional/t057autoAST53.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST53.g
rename to runtime/JavaScript/tests/functional/t057autoAST53.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST54.g b/runtime/JavaScript/tests/functional/t057autoAST54.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST54.g
rename to runtime/JavaScript/tests/functional/t057autoAST54.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST6.g b/runtime/JavaScript/tests/functional/t057autoAST6.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST6.g
rename to runtime/JavaScript/tests/functional/t057autoAST6.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST7.g b/runtime/JavaScript/tests/functional/t057autoAST7.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST7.g
rename to runtime/JavaScript/tests/functional/t057autoAST7.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST8.g b/runtime/JavaScript/tests/functional/t057autoAST8.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST8.g
rename to runtime/JavaScript/tests/functional/t057autoAST8.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST9.g b/runtime/JavaScript/tests/functional/t057autoAST9.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t057autoAST9.g
rename to runtime/JavaScript/tests/functional/t057autoAST9.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST.html b/runtime/JavaScript/tests/functional/t058rewriteAST.html
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST.html
rename to runtime/JavaScript/tests/functional/t058rewriteAST.html
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST1.g b/runtime/JavaScript/tests/functional/t058rewriteAST1.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST1.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST1.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST10.g b/runtime/JavaScript/tests/functional/t058rewriteAST10.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST10.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST10.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST11.g b/runtime/JavaScript/tests/functional/t058rewriteAST11.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST11.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST11.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST12.g b/runtime/JavaScript/tests/functional/t058rewriteAST12.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST12.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST12.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST13.g b/runtime/JavaScript/tests/functional/t058rewriteAST13.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST13.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST13.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST14.g b/runtime/JavaScript/tests/functional/t058rewriteAST14.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST14.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST14.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST15.g b/runtime/JavaScript/tests/functional/t058rewriteAST15.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST15.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST15.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST16.g b/runtime/JavaScript/tests/functional/t058rewriteAST16.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST16.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST16.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST17.g b/runtime/JavaScript/tests/functional/t058rewriteAST17.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST17.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST17.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST18.g b/runtime/JavaScript/tests/functional/t058rewriteAST18.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST18.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST18.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST19.g b/runtime/JavaScript/tests/functional/t058rewriteAST19.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST19.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST19.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST2.g b/runtime/JavaScript/tests/functional/t058rewriteAST2.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST2.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST2.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST20.g b/runtime/JavaScript/tests/functional/t058rewriteAST20.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST20.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST20.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST21.g b/runtime/JavaScript/tests/functional/t058rewriteAST21.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST21.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST21.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST22.g b/runtime/JavaScript/tests/functional/t058rewriteAST22.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST22.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST22.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST23.g b/runtime/JavaScript/tests/functional/t058rewriteAST23.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST23.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST23.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST24.g b/runtime/JavaScript/tests/functional/t058rewriteAST24.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST24.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST24.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST25.g b/runtime/JavaScript/tests/functional/t058rewriteAST25.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST25.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST25.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST26.g b/runtime/JavaScript/tests/functional/t058rewriteAST26.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST26.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST26.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST27.g b/runtime/JavaScript/tests/functional/t058rewriteAST27.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST27.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST27.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST28.g b/runtime/JavaScript/tests/functional/t058rewriteAST28.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST28.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST28.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST29.g b/runtime/JavaScript/tests/functional/t058rewriteAST29.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST29.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST29.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST3.g b/runtime/JavaScript/tests/functional/t058rewriteAST3.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST3.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST3.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST30.g b/runtime/JavaScript/tests/functional/t058rewriteAST30.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST30.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST30.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST31.g b/runtime/JavaScript/tests/functional/t058rewriteAST31.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST31.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST31.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST32.g b/runtime/JavaScript/tests/functional/t058rewriteAST32.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST32.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST32.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST33.g b/runtime/JavaScript/tests/functional/t058rewriteAST33.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST33.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST33.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST34.g b/runtime/JavaScript/tests/functional/t058rewriteAST34.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST34.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST34.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST35.g b/runtime/JavaScript/tests/functional/t058rewriteAST35.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST35.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST35.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST36.g b/runtime/JavaScript/tests/functional/t058rewriteAST36.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST36.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST36.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST37.g b/runtime/JavaScript/tests/functional/t058rewriteAST37.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST37.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST37.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST38.g b/runtime/JavaScript/tests/functional/t058rewriteAST38.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST38.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST38.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST39.g b/runtime/JavaScript/tests/functional/t058rewriteAST39.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST39.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST39.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST4.g b/runtime/JavaScript/tests/functional/t058rewriteAST4.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST4.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST4.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST40.g b/runtime/JavaScript/tests/functional/t058rewriteAST40.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST40.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST40.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST41.g b/runtime/JavaScript/tests/functional/t058rewriteAST41.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST41.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST41.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST42.g b/runtime/JavaScript/tests/functional/t058rewriteAST42.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST42.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST42.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST43.g b/runtime/JavaScript/tests/functional/t058rewriteAST43.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST43.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST43.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST44.g b/runtime/JavaScript/tests/functional/t058rewriteAST44.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST44.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST44.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST45.g b/runtime/JavaScript/tests/functional/t058rewriteAST45.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST45.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST45.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST46.g b/runtime/JavaScript/tests/functional/t058rewriteAST46.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST46.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST46.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST47.g b/runtime/JavaScript/tests/functional/t058rewriteAST47.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST47.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST47.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST48.g b/runtime/JavaScript/tests/functional/t058rewriteAST48.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST48.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST48.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST49.g b/runtime/JavaScript/tests/functional/t058rewriteAST49.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST49.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST49.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST5.g b/runtime/JavaScript/tests/functional/t058rewriteAST5.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST5.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST5.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST50.g b/runtime/JavaScript/tests/functional/t058rewriteAST50.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST50.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST50.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST51.g b/runtime/JavaScript/tests/functional/t058rewriteAST51.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST51.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST51.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST52.g b/runtime/JavaScript/tests/functional/t058rewriteAST52.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST52.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST52.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST53.g b/runtime/JavaScript/tests/functional/t058rewriteAST53.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST53.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST53.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST54.g b/runtime/JavaScript/tests/functional/t058rewriteAST54.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST54.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST54.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST55.g b/runtime/JavaScript/tests/functional/t058rewriteAST55.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST55.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST55.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST56.g b/runtime/JavaScript/tests/functional/t058rewriteAST56.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST56.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST56.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST57.g b/runtime/JavaScript/tests/functional/t058rewriteAST57.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST57.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST57.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST58.g b/runtime/JavaScript/tests/functional/t058rewriteAST58.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST58.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST58.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST59.g b/runtime/JavaScript/tests/functional/t058rewriteAST59.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST59.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST59.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST6.g b/runtime/JavaScript/tests/functional/t058rewriteAST6.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST6.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST6.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST60.g b/runtime/JavaScript/tests/functional/t058rewriteAST60.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST60.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST60.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST61.g b/runtime/JavaScript/tests/functional/t058rewriteAST61.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST61.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST61.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST62.g b/runtime/JavaScript/tests/functional/t058rewriteAST62.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST62.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST62.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST63.g b/runtime/JavaScript/tests/functional/t058rewriteAST63.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST63.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST63.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST64.g b/runtime/JavaScript/tests/functional/t058rewriteAST64.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST64.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST64.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST65.g b/runtime/JavaScript/tests/functional/t058rewriteAST65.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST65.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST65.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST66.g b/runtime/JavaScript/tests/functional/t058rewriteAST66.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST66.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST66.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST67.g b/runtime/JavaScript/tests/functional/t058rewriteAST67.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST67.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST67.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST68.g b/runtime/JavaScript/tests/functional/t058rewriteAST68.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST68.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST68.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST69.g b/runtime/JavaScript/tests/functional/t058rewriteAST69.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST69.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST69.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST7.g b/runtime/JavaScript/tests/functional/t058rewriteAST7.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST7.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST7.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST70.g b/runtime/JavaScript/tests/functional/t058rewriteAST70.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST70.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST70.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST71.g b/runtime/JavaScript/tests/functional/t058rewriteAST71.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST71.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST71.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST72.g b/runtime/JavaScript/tests/functional/t058rewriteAST72.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST72.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST72.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST73.g b/runtime/JavaScript/tests/functional/t058rewriteAST73.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST73.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST73.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST74.g b/runtime/JavaScript/tests/functional/t058rewriteAST74.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST74.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST74.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST75.g b/runtime/JavaScript/tests/functional/t058rewriteAST75.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST75.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST75.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST76.g b/runtime/JavaScript/tests/functional/t058rewriteAST76.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST76.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST76.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST77.g b/runtime/JavaScript/tests/functional/t058rewriteAST77.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST77.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST77.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST78.g b/runtime/JavaScript/tests/functional/t058rewriteAST78.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST78.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST78.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST79.g b/runtime/JavaScript/tests/functional/t058rewriteAST79.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST79.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST79.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST8.g b/runtime/JavaScript/tests/functional/t058rewriteAST8.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST8.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST8.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST80.g b/runtime/JavaScript/tests/functional/t058rewriteAST80.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST80.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST80.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST81.g b/runtime/JavaScript/tests/functional/t058rewriteAST81.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST81.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST81.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST82.g b/runtime/JavaScript/tests/functional/t058rewriteAST82.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST82.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST82.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST83.g b/runtime/JavaScript/tests/functional/t058rewriteAST83.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST83.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST83.g
diff --git a/antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST9.g b/runtime/JavaScript/tests/functional/t058rewriteAST9.g
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/tests/functional/t058rewriteAST9.g
rename to runtime/JavaScript/tests/functional/t058rewriteAST9.g
diff --git a/antlr-3.4/runtime/JavaScript/third/antcontrib.properties b/runtime/JavaScript/third/antcontrib.properties
similarity index 100%
rename from antlr-3.4/runtime/JavaScript/third/antcontrib.properties
rename to runtime/JavaScript/third/antcontrib.properties
diff --git a/runtime/ObjC/ANTLR.framework.zip b/runtime/ObjC/ANTLR.framework.zip
new file mode 100644
index 0000000..fae9dbf
--- /dev/null
+++ b/runtime/ObjC/ANTLR.framework.zip
Binary files differ
diff --git a/runtime/ObjC/ANTLR.framework/ANTLR b/runtime/ObjC/ANTLR.framework/ANTLR
new file mode 100755
index 0000000..9d93d63
--- /dev/null
+++ b/runtime/ObjC/ANTLR.framework/ANTLR
@@ -0,0 +1 @@
+Versions/Current/ANTLR
\ No newline at end of file
diff --git a/runtime/ObjC/ANTLR.framework/Headers b/runtime/ObjC/ANTLR.framework/Headers
new file mode 100755
index 0000000..a177d2a
--- /dev/null
+++ b/runtime/ObjC/ANTLR.framework/Headers
@@ -0,0 +1 @@
+Versions/Current/Headers
\ No newline at end of file
diff --git a/runtime/ObjC/ANTLR.framework/Resources b/runtime/ObjC/ANTLR.framework/Resources
new file mode 100755
index 0000000..953ee36
--- /dev/null
+++ b/runtime/ObjC/ANTLR.framework/Resources
@@ -0,0 +1 @@
+Versions/Current/Resources
\ No newline at end of file
diff --git a/runtime/ObjC/ANTLR.framework/Versions/Current b/runtime/ObjC/ANTLR.framework/Versions/Current
new file mode 100755
index 0000000..8c7e5a6
--- /dev/null
+++ b/runtime/ObjC/ANTLR.framework/Versions/Current
@@ -0,0 +1 @@
+A
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/ACBTree.h b/runtime/ObjC/Framework/ACBTree.h
new file mode 100644
index 0000000..3783cc9
--- /dev/null
+++ b/runtime/ObjC/Framework/ACBTree.h
@@ -0,0 +1,105 @@
+//
+//  ACBtree.h
+//  ST4
+//
+//  Created by Alan Condit on 4/18/11.
+//  Copyright 2011 Alan Condit. All rights reserved.
+//
+
+typedef enum {
+    BTNODE,
+    LEAF
+} NodeType;
+
+#import <Foundation/Foundation.h>
+
+@class AMutableDictionary;
+
+#define BTNODESIZE 11
+#define BTHNODESIZE ((BTNODESIZE-1)/2)
+#define BTKeySize  38
+#if defined FAILURE
+#undef FAILURE
+#endif
+#define FAILURE -1
+#if defined SUCCESS
+#undef SUCCESS
+#endif
+#define SUCCESS 0
+
+@interface ACBKey : NSObject {
+    NSInteger recnum;               /*  record number                   */
+    __strong NSString *key;         /*  key pointer id                  */
+    char      kstr[BTKeySize];      /*  key entry                       */
+}
+
+@property (assign) NSInteger recnum;
+@property (retain) NSString *key;
+
++ (ACBKey *)newKey;
++ (ACBKey *)newKeyWithKStr:(NSString *)aKey;
+- (id) init;
+- (id) initWithKStr:(NSString *)aKey;
+- (void)dealloc;
+- (NSString *) description;
+@end
+
+@interface ACBTree : NSObject {
+    __strong AMutableDictionary *dict;  /* The dictionary that this node belongs to */
+    __strong ACBTree *lnode;            /* pointer to left node            */
+    __strong ACBTree *rnode;            /* pointer to right node           */
+    __strong ACBKey  **keys;            /* pointer to keys                 */
+    __strong ACBTree **btNodes;         /* pointers to btNodes             */
+    __strong ACBKey  *keyArray[BTNODESIZE];
+    __strong ACBTree *btNodeArray[BTNODESIZE];
+    NSInteger lnodeid;                  /* nodeid of left node             */
+    NSInteger rnodeid;                  /* nodeid of right node            */
+    NSInteger nodeid;                   /* node id                         */
+    NSInteger nodeType;                 /* 1 = node, 2 = leaf, -1 = unused */
+    NSInteger numkeys;                  /* number of active entries        */
+    NSInteger numrecs;                  /* number of records               */
+    NSInteger updtd;                    /* modified since update flag      */
+    NSInteger keylen;                   /* length of key                   */
+    NSInteger kidx;
+}
+
+@property (retain) AMutableDictionary *dict;
+@property (retain) ACBTree  *lnode;
+@property (retain) ACBTree  *rnode;
+@property (assign) ACBKey   **keys;
+@property (assign) ACBTree  **btNodes;
+@property (assign) NSInteger lnodeid;
+@property (assign) NSInteger rnodeid;
+@property (assign) NSInteger nodeid;
+@property (assign) NSInteger nodeType;
+@property (assign) NSInteger numkeys;
+@property (assign) NSInteger numrecs;
+@property (assign) NSInteger updtd;
+@property (assign) NSInteger keylen;
+@property (assign) NSInteger kidx;
+
++ (ACBTree *) newNodeWithDictionary:(AMutableDictionary *)theDict;
+
+- (id)initWithDictionary:(AMutableDictionary *)theDict;
+- (void)dealloc;
+
+- (ACBTree *)createnode:(ACBKey *)kp0;
+- (ACBTree *)deletekey:(NSString *)dkey;
+- (ACBTree *)insertkey:(ACBKey *)ikp value:(id)value;
+- (ACBKey *)internaldelete:(ACBKey *)dkp;
+- (ACBTree *) internalinsert:(ACBKey *)key value:(id)value split:(NSInteger *)h;
+- (ACBTree *) insert:(ACBKey *)key value:(id)value index:(NSInteger)hi split:(NSInteger *)h;
+- (NSInteger)delfrmnode:(ACBKey *)ikp;
+- (NSInteger)insinnode:(ACBKey *)key value:(id)value;
+- (void)mergenode:(NSInteger)i;
+- (ACBTree *)splitnode:(NSInteger)idx;
+- (ACBTree *)search:(id)key;
+- (NSInteger)searchnode:(id)key match:(BOOL)match;
+- (void)borrowleft:(NSInteger)i;
+- (void)borrowright:(NSInteger)i;
+- (void)rotateleft:(NSInteger)j;
+- (void)rotateright:(NSInteger)j;
+- (NSInteger) keyWalkLeaves;
+- (NSInteger) objectWalkLeaves;
+- (NSString *) description;
+@end
diff --git a/runtime/ObjC/Framework/ACBTree.m b/runtime/ObjC/Framework/ACBTree.m
new file mode 100644
index 0000000..fd7f1b7
--- /dev/null
+++ b/runtime/ObjC/Framework/ACBTree.m
@@ -0,0 +1,747 @@
+//
+//  ACBTree.m
+//  ST4
+//
+//  Created by Alan Condit on 4/18/11.
+//  Copyright 2011 Alan Condit. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+#import "ACBTree.h"
+#import "AMutableDictionary.h"
+#import "RuntimeException.h"
+
+@class AMutableDictionary;
+
+@implementation ACBKey
+
+static NSInteger RECNUM = 0;
+
+@synthesize recnum;
+@synthesize key;
+
++ (ACBKey *)newKey
+{
+    return [[ACBKey alloc] init];
+}
+
++ (ACBKey *)newKeyWithKStr:(NSString *)aKey
+{
+    return [[ACBKey alloc] initWithKStr:(NSString *)aKey];
+}
+
+- (id) init
+{
+    self =[super init];
+    if ( self != nil ) {
+        recnum = RECNUM++;
+    }
+    return self;
+}
+
+- (id) initWithKStr:(NSString *)aKey
+{
+    self =[super init];
+    if ( self != nil ) {
+        NSInteger len;
+        recnum = RECNUM++;
+        key = aKey;
+        len = [aKey length];
+        if ( len >= BTKeySize ) {
+            len = BTKeySize - 1;
+        }
+        strncpy( kstr, [aKey cStringUsingEncoding:NSASCIIStringEncoding], len);
+        kstr[len] = '\0';
+    }
+    return self;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in ACBKey" );
+#endif
+    [super dealloc];
+}
+
+- (NSString *) description
+{
+    return [NSString stringWithFormat:@"len =%02d\nrecnum=%04d\nkey=%@\n", [key length], recnum, key];
+}
+
+@end
+
+@implementation ACBTree
+
+@synthesize dict;
+@synthesize lnode;
+@synthesize rnode;
+@synthesize keys;
+@synthesize btNodes;
+@synthesize lnodeid;
+@synthesize rnodeid;
+@synthesize nodeid;
+@synthesize nodeType;
+@synthesize numkeys;
+@synthesize numrecs;
+@synthesize updtd;
+@synthesize keylen;
+@synthesize kidx;
+
++ (ACBTree *) newNodeWithDictionary:(AMutableDictionary *)theDict
+{
+    return [[ACBTree alloc] initWithDictionary:theDict];
+}
+
+- (id)initWithDictionary:(AMutableDictionary *)theDict
+{
+    self = [super init];
+    if (self) {
+        // Initialization code here.
+        dict = theDict;
+        nodeid = theDict.nxt_nodeid++;
+        keys = keyArray;
+        btNodes = btNodeArray;
+        if ( nodeid == 0 ) {
+            numkeys = 0;
+        }
+    }
+    
+    return self;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in ACBTree" );
+#endif
+    [super dealloc];
+}
+
+- (ACBTree *)createnode:(ACBKey *)kp
+{
+    ACBTree *tmp;
+    
+    tmp = [ACBTree newNodeWithDictionary:dict];
+    tmp.nodeType = nodeType;
+    tmp.lnode = self;
+    tmp.rnode = self.rnode;
+    self.rnode = tmp;
+    //tmp.btNodes[0] = self;
+    //tmp.keys[0] = kp;
+    tmp.updtd = YES;
+    tmp.numrecs = ((nodeType == LEAF)?1:numrecs);
+    updtd = YES;
+    tmp.numkeys = 1;
+    [tmp retain];
+    return(tmp);
+}
+
+- (ACBTree *)deletekey:(NSString *)dkey
+{
+    ACBKey /* *del, */ *dkp;
+    ACBTree *told, *sNode;
+    BOOL mustRelease = NO;
+
+    if ( [dkey isKindOfClass:[NSString class]] ) {
+        dkp = [ACBKey newKeyWithKStr:dkey];
+        mustRelease = YES;
+    }
+    else if ( [dkey isKindOfClass:[ACBKey class]] )
+        dkp = (ACBKey *)dkey;
+    else
+        @throw [IllegalArgumentException newException:[NSString stringWithFormat:@"Don't understand this key:\"%@\"", dkey]];
+    sNode = [self search:dkp.key];
+    if ( sNode == nil || [sNode searchnode:dkp.key match:YES] == FAILURE ) {
+        if ( mustRelease ) [dkp release];
+        return(self);
+    }
+    told = dict.root;
+    /* del = */[self internaldelete:dkp];
+    
+    /*  check for shrink at the root  */
+    if ( numkeys == 1 && nodeType != LEAF ) {
+        told = btNodes[0];
+        told.nodeid = 1;
+        told.updtd = YES;
+        dict.root = told;
+    }
+#ifdef DONTUSENOMO
+    if (debug == 'd') [self printtree];
+#endif
+    if ( mustRelease ) [dkp release];
+    return(told);
+}
+
+/** insertKey is the insertion entry point
+ *  It determines if the key exists in the tree already
+ *  it calls internalInsert to determine if the key already exists in the tree,
+ *  and returns the node to be updated
+ */
+- (ACBTree *)insertkey:(ACBKey *)kp value:(id)value
+{
+    ACBTree *tnew, *q;
+    NSInteger h, nodeNum;
+    
+    tnew = self;
+    q = [self internalinsert:kp value:value split:&h];
+    /*  check for growth at the root  */
+    if ( q != nil ) {
+        tnew = [[ACBTree newNodeWithDictionary:dict] retain];
+        tnew.nodeType = BTNODE;
+        nodeNum = tnew.nodeid;
+        tnew.nodeid = 0;
+        self.nodeid = nodeNum;
+        [tnew insert:self.keys[numkeys-1] value:self index:0 split:&h];
+        [tnew insert:q.keys[q.numkeys-1] value:q index:1 split:&h];
+        tnew.numrecs = self.numrecs + q.numrecs;
+        tnew.lnodeid = self.nodeid;
+        tnew.rnodeid = self.rnodeid;
+        self.rnodeid = tnew.nodeid;
+        tnew.lnode = self;
+        tnew.rnode = self.rnode;
+        self.rnode = tnew;
+        /* affected by nodeid swap */
+        // newnode.lnodeid = tnew.btNodes[0].nodeid;
+    }
+    //dict.root = t;
+    //l.reccnt++;
+    return(tnew);
+}
+
+- (ACBTree *)search:(NSString *)kstr
+{
+    NSInteger i, ret;
+    NSInteger srchlvl = 0;
+    ACBTree *t;
+
+    t = self;
+    if ( self.numkeys == 0 && self.nodeType == LEAF )
+        return nil;
+    while (t != nil) {
+        for (i = 0; i < t.numkeys; i++) {
+            ret = [t.keys[i].key compare:kstr];
+            if ( ret >= 0 ) {
+                if ( t.nodeType == LEAF ) {
+                    if ( ret == 0 ) return (t);    /* node containing keyentry found */
+                    else return nil;
+                }
+                else {
+                    break;
+                }
+            }
+        }
+        srchlvl++;
+        if ( t.nodeType == BTNODE ) t = t.btNodes[i];
+        else {
+            t = nil;
+        }
+    }
+    return(nil);          /* entry not found */
+}
+
+/** SEARCHNODE
+ *  calling parameters --
+ *      BKEY PTR for key to search for.
+ *      TYPE for exact match(YES) or position(NO)
+ *  returns -- i
+ *      i == FAILURE when match required but does not exist.
+ *      i == t.numkeys if no existing insertion branch found.
+ *      otherwise i == insertion branch.
+ */
+- (NSInteger)searchnode:(NSString *)kstr match:(BOOL)match
+{
+    NSInteger i, ret;
+    for ( i = 0; i < numkeys; i++ ) {
+        ret = [keys[i].key compare:kstr];
+        if ( ret >= 0 ) {         /* key node found */
+            if ( ret == 0 && match == NO ) {
+                return FAILURE;
+            }
+            else if ( ret > 0 &&  match == YES ) {
+                return FAILURE;
+            }
+            break;
+        }
+    }
+    if ( i == numkeys && match == YES ) {
+        i = FAILURE;
+    }
+    return(i);
+}
+
+- (ACBKey *)internaldelete:(ACBKey *)dkp
+{
+    NSInteger i, nkey;
+    __strong ACBKey *del = nil;
+    ACBTree *tsb;
+    NSInteger srchlvl = 0;
+    
+    /* find deletion branch */
+    if ( self.nodeType != LEAF ) {
+        srchlvl++;
+        /* search for end of tree */
+        i = [self searchnode:dkp.key match:NO];
+        del = [btNodes[i] internaldelete:dkp];
+        srchlvl--;
+        /* if not LEAF propagate back high key    */
+        tsb = btNodes[i];
+        nkey = tsb.numkeys - 1;
+    }
+    /***  the bottom of the tree has been reached       ***/
+    else {                   /* set up deletion ptrs      */
+        if ( [self delfrmnode:dkp] == SUCCESS ) {
+            if ( numkeys < BTHNODESIZE+1 ) {
+                del = dkp;
+            }
+            else {
+                del = nil;
+            }
+            dkp.recnum = nodeid;
+            return(del);
+        }
+    }
+    /***       indicate deletion to be done            ***/
+    if ( del != nil ) {
+        /*** the key in "del" has to be deleted from in present node ***/
+        if ( btNodes[i].numkeys >= BTHNODESIZE+1 ) {
+            /* node does not need balancing */
+            del = nil;
+            self.keys[i] = tsb.keys[nkey];
+        }
+        else {                         /* node requires balancing */
+            if ( i == 0 ) {
+                [self rotateright:0];
+                self.btNodes[0] = tsb;
+            } else if ( i < numkeys-1 ) {     /* look to the right first */
+                if ( self.btNodes[i+1].numkeys > BTHNODESIZE+1 ) {  /* carry from right */
+                    [self borrowright:i];
+                }
+                else {           /* merge present node with right node */
+                    [self mergenode:i];
+                }
+            }
+            else {                      /* look to the left */
+                if ( i > 0 ) {          /* carry or merge with left node */
+                    if ( self.btNodes[i-1].numkeys > BTHNODESIZE+1 ) { /* carry from left */
+                        [self borrowleft:i];
+                    }
+                    else { /*** merge present node with left node ***/
+                        i--;
+                        [self mergenode:i];
+                        tsb = self.btNodes[i];
+                    }
+                }
+            }
+        self.keys[i] = tsb.keys[nkey];
+        }
+    }
+    numrecs--;
+    updtd = TRUE;
+    return(del);
+}
+
+/** Search key kp on B-tree with root t; if found increment counter.
+ *  otherwise insert an item with key kp in tree.  If an ACBKey
+ *  emerges to be passed to a lower level, then assign it to kp;
+ *  h = "tree t has become higher"
+ */
+- (ACBTree *) internalinsert:(ACBKey *)kp value:(id)value split:(NSInteger *)h
+{
+    /* search key ins on node t^; h = false  */
+    NSInteger i, ret;
+    ACBTree *q, *tmp;
+    
+    for (i = 0; i < numkeys; i++) {
+        ret = [keys[i].key compare:kp.key];
+        if ( ret >= 0 ) {
+            if ( nodeType == LEAF && ret == 0 ) return (self);    /* node containing keyentry found */
+            break;
+        }
+    }
+    if ( nodeType == LEAF ) { /*  key goes in this node  */
+        q = [self insert:kp value:value index:i split:h];
+    }
+    else  { /* nodeType == BTNODE */
+        /*  key is not on this node  */
+        q = [self.btNodes[i] internalinsert:kp value:value split:h];
+        if ( *h ) {
+            [self insert:kp value:q index:i split:h];
+        }
+        else {
+            self.numrecs++;
+        }
+        tmp = self.btNodes[numkeys-1];
+        keys[numkeys-1] = tmp.keys[tmp.numkeys-1];
+        if ( i != numkeys-1 ) {
+            tmp = self.btNodes[i];
+            keys[i] = tmp.keys[tmp.numkeys-1];
+        }
+        updtd = YES;
+    } /* search */
+    return q;
+}
+
+/** Do the actual insertion or split and insert
+ *  insert key to the right of t.keys[hi] 
+ */
+- (ACBTree *) insert:(ACBKey *)kp value:(id)value index:(NSInteger)hi split:(NSInteger *)h
+{
+    ACBTree *b;
+    
+    if ( numkeys < BTNODESIZE ) {
+        *h = NO;
+        [self rotateright:hi];
+        keys[hi] = kp;
+        btNodes[hi] = value;
+        numrecs++;
+        numkeys++;
+        updtd = YES;
+        //[kp retain];
+        return nil;
+    }
+    else { /*  node t is full; split it and assign the emerging ACBKey to olditem  */
+        b = [self splitnode:hi];
+        if ( hi <= BTHNODESIZE ) {              /* insert key in left page */
+            [self rotateright:hi];
+            keys[hi] = kp;
+            btNodes[hi] = value;
+            numrecs++;
+            numkeys++;
+        }
+        else {                                  /* insert key in right page */
+            hi -= BTHNODESIZE;
+            if ( b.rnode == nil ) hi--;
+            [b rotateright:hi];
+            b.keys[hi] = kp;
+            b.btNodes[hi] = value;
+            b.numrecs++;
+            b.numkeys++;
+        }
+        numkeys = b.numkeys = BTHNODESIZE+1;
+        b.updtd = updtd = YES;
+    }
+    return b;
+} /* insert */
+
+- (void)borrowleft:(NSInteger)i
+{
+    ACBTree *t0, *t1;
+    NSInteger nkey;
+    
+    t0 = btNodes[i];
+    t1 = btNodes[i-1];
+    nkey = t1.numkeys-1;
+    [t0 insinnode:t1.keys[nkey] value:t1.btNodes[nkey]];
+    [t1 delfrmnode:t1.keys[nkey]];
+    nkey--;
+    keys[i-1] = t1.keys[nkey];
+    keys[i-1].recnum = t1.nodeid;
+}
+
+- (void)borrowright:(NSInteger)i
+{
+    ACBTree *t0, *t1;
+    NSInteger nkey;
+    
+    t0 = btNodes[i];
+    t1 = btNodes[i+1];
+    [t0 insinnode:t1.keys[0] value:t1.btNodes[0]];
+    [t1 delfrmnode:t1.keys[0]];
+    nkey = t0.numkeys - 1;
+    keys[i] = t0.keys[nkey];
+    keys[i].recnum = t0.nodeid;
+}
+
+- (NSInteger)delfrmnode:(ACBKey *)ikp
+{
+    NSInteger j;
+    
+    j = [self searchnode:ikp.key match:YES];
+    if (j == FAILURE) {
+        return(FAILURE);
+    }
+    ACBKey *k0 = nil;
+    ACBTree *n0 = nil;
+    if ( self.nodeType == LEAF ) {
+        k0 = self.keys[j];
+        n0 = self.btNodes[j];
+    }
+    [self rotateleft:j];
+    self.numkeys--;
+    numrecs -= ((self.nodeType == LEAF)?1:btNodes[j].numrecs);
+    if ( k0 ) [k0 release];
+    if ( n0 ) [n0 release];
+    updtd = TRUE;
+    return(SUCCESS);
+}
+
+- (NSInteger)insinnode:(ACBKey *)ikp value:(id)value
+{
+    NSInteger j;
+    
+    j = [self searchnode:ikp.key match:NO];
+    [self rotateright:j];
+    keys[j] = ikp;
+    btNodes[j] = value;
+    numkeys++;
+    if ( nodeType == LEAF ) {
+        numrecs++;
+    }
+    else {
+        numrecs += btNodes[j].numrecs;
+    }
+    updtd = TRUE;
+    return(j);
+}
+
+- (void)mergenode:(NSInteger)i
+{
+    ACBTree *t0, *t1, *tr;
+    NSInteger j, k, nkeys;
+    
+    t0 = btNodes[i];
+    t1 = btNodes[i+1];
+    /*** move keys and pointers from
+     t1 node to t0 node           ***/
+    for (j=t0.numkeys, k=0; j < BTNODESIZE && k < t1.numkeys; j++, k++) {
+        t0.keys[j] = t1.keys[k];
+        t0.btNodes[j] = t1.btNodes[k];
+        t0.numkeys++;
+    }
+    t0.numrecs += t1.numrecs;
+    t0.rnode = t1.rnode;
+    t0.rnodeid = t1.rnodeid;
+    t0.updtd = YES;
+    nkeys = t0.numkeys - 1;
+    keys[i] = t0.keys[nkeys]; /* update key to point to new high key */
+    [self rotateleft:i+1]; /* copy over the keys and nodes */
+    
+    t1.nodeType = -1;
+    if (t1.rnodeid != 0xffff && i < numkeys - 2) {
+        tr = btNodes[i+1];
+        tr.lnodeid = t0.nodeid;
+        tr.lnode = t0;
+        tr.updtd = YES;
+    }
+    self.numkeys--;
+    updtd = YES;
+}
+
+- (ACBTree *)splitnode:(NSInteger)idx
+{
+    ACBTree *t1;
+    NSInteger j, k;
+    
+    k = (idx <= BTHNODESIZE) ? BTHNODESIZE : BTHNODESIZE+1;
+    /*** create new node ***/
+    // checknode(l, t, k);
+    t1 = [ACBTree newNodeWithDictionary:dict];
+    t1.nodeType = nodeType;
+    t1.rnode = self.rnode;
+    self.rnode = t1;
+    t1.lnode = self;
+    self.updtd = t1.updtd = YES;
+    /*** move keys and pointers ***/
+    NSInteger i = 0;
+    for (j = k; j < BTNODESIZE; j++, i++ ) {
+        t1.keys[i] = keys[j];
+        t1.btNodes[i] = btNodes[j];
+        t1.numrecs += ((nodeType == LEAF) ? 1 : btNodes[j].numrecs);
+        numrecs     -= ((nodeType == LEAF) ? 1 : btNodes[j].numrecs);
+        keys[j] = nil;
+        btNodes[j] = nil;
+    }
+    t1.numkeys  = BTNODESIZE-k;
+    self.numkeys = k;
+    return(t1);
+}
+
+#ifdef DONTUSENOMO
+freetree(l, t)
+FIDB *l;
+ACBTree *t;
+{
+    ACBTree *tmp;
+    NSInteger i;
+    
+    if (dict.root == nil) return(SUCCESS);
+    if (t.nodeid == 1) {
+        srchlvl = 0;
+    }
+    else srchlvl++;
+    for (i = 0; i < t.numkeys; i++) {
+        tmp = t.btNodes[i];
+        if (tmp != nil) {
+            if (tmp.nodeType == LEAF) {
+                free(tmp);    /* free the leaf */
+                if (tmp == l.rrnode) {
+                    l.rrnode = nil;
+                }
+                t.btNodes[i] = nil;
+                l.chknode.nods_inuse--;
+                /*              putpage(l, l.chknode, 0);
+                 */
+            }
+            else {
+                freetree(l, tmp); /* continue up the tree */
+                srchlvl--;        /* decrement the srchlvl on return */
+            }
+        }
+    }
+    free(t); /* free the node entered with */
+    if (t == l.rrnode) {
+        l.rrnode = nil;
+    }
+    l.chknode.nods_inuse--;
+    /*     putpage(l, l.chknode, 0);
+     */
+    t = nil;
+}
+
+- (void) notfound:(ACBKey *)kp
+{
+    /* error routine to perform if entry was expected and not found */
+}
+
+- (void)printtree:(ACBTree *)t
+{
+    BYTE *str;
+    NSInteger i, j;
+    NSUInteger *pdate, *ptime;
+    
+    syslst = stdprn;
+    if ( t.nodeid == 1 ) {
+        srchlvl = 0;
+    }
+    else srchlvl++;
+    for (j = 0; j < t.numkeys; j++) {
+        checknode(l, t, j);
+        if ( t.btNodes[j] != nil ) [self printtree:t.btNodes[j]];
+    }
+    NSLog(@"Nodeid = %d, nodeType = %s, numkeys = %d, numrecs = %d\n",
+          t.nodeid, (t.nodeType == BTNODE)?@"NODE":@"LEAF", t.numkeys, t.numrecs);
+    NSLog(@"Left nodeid = %d, Right nodeid = %d\n", t.lnodeid, t.rnodeid);
+    for (i = 0; i < t.numkeys; i++) {
+        NSLog(@"     t.keys[%d] recnum = %d, keyval = %@",
+              i, t.keys[i].recnum, t.keys[i]);
+        str = t.keys[i].kstr;
+        pdate = (NSUInteger *) (str + 6);
+        ptime = (NSUInteger *) (str + 8);
+        NSLog(@" date = %04.4x,  time = %04.4x\n",
+              *pdate, *ptime);
+    }
+}
+
+- (BOOL)puttree:(ACBTree *)t
+{
+    NSInteger i;
+    if (t.nodeType != LEAF) {
+        for (i = 0; i < t.numkeys; i++) {
+            if ( t.btNodes[i] != nil ) puttree(l, t.btNodes[i]);
+        }
+    }
+    if ( t.updtd ) {
+        putnode(l, t, t.nodeid);
+        return(YES);
+    }
+    return(NO);
+}
+
+#endif
+
+/** ROTATELEFT -- rotate keys from right to the left
+ *  starting at position j
+ */
+- (void)rotateleft:(NSInteger)j
+{
+    while ( j+1 < numkeys ) {
+        keys[j] = keys[j+1];
+        btNodes[j] = btNodes[j+1];
+        j++;
+    }
+}
+
+/** ROTATERIGHT -- rotate keys to the right by 1 position
+ *  starting at the last key down to position j.
+ */
+- (void)rotateright:(NSInteger)j
+{
+    NSInteger k;
+    
+    for ( k = numkeys; k > j; k-- ) {
+        keys[k] = keys[k-1];
+        btNodes[k] = btNodes[k-1];
+    }
+    keys[j] = nil;
+    btNodes[j] = nil;
+}
+
+- (NSInteger) keyWalkLeaves
+{
+    NSInteger i, idx = 0;
+    NSInteger keycnt;
+    ACBTree *t;
+
+    if ( self != dict.root ) {
+        return 0; // maybe I need to throw an exception here
+    }
+    t = self;
+    self.dict.data = [[NSMutableData dataWithLength:(numkeys * sizeof(id))] retain];
+    self.dict.ptrBuffer = [self.dict.data mutableBytes];
+    while ( t != nil && t.nodeType != LEAF ) {
+        t = t.btNodes[0];
+    }
+    do {
+        keycnt = t.numkeys;
+        for ( i = 0; i < keycnt; i++ ) {
+            if ( t.btNodes[i] != nil ) {
+                dict.ptrBuffer[idx++] = (id) t.keys[i].key;
+            }
+        }
+        t = t.rnode;
+    } while ( t != nil );
+    return( idx );
+}
+
+- (NSInteger) objectWalkLeaves
+{
+    NSInteger i, idx = 0;
+    NSInteger keycnt;
+    ACBTree *t;
+    
+    if ( self != dict.root ) {
+        return 0; // maybe I need to throw an exception here
+    }
+    t = self;
+    self.dict.data = [[NSMutableData dataWithLength:(numrecs * sizeof(id))] retain];
+    self.dict.ptrBuffer = [self.dict.data mutableBytes];
+    while ( t != nil && t.nodeType != LEAF ) {
+        t = t.btNodes[0];
+    }
+    do {
+        keycnt = t.numkeys;
+        for ( i = 0; i < keycnt; i++ ) {
+            if ( t.btNodes[i] != nil ) {
+                dict.ptrBuffer[idx++] = (id) t.btNodes[i];
+            }
+        }
+        t = t.rnode;
+    } while ( t != nil );
+    return( idx );
+}
+
+- (NSString *) description
+{
+    NSMutableString *str = [NSMutableString stringWithCapacity:16];
+    NSInteger i;
+    for (i = 0; i < numkeys; i++ ) {
+        [str appendString:[NSString stringWithFormat:@"key[%d]=%@", i, [keys[i] description]]];
+    }
+    for (i = 0; i < numkeys; i++ ) {
+        [str appendString:[NSString stringWithFormat:@"btnodes[%d]=%@\n", i, [btNodes[i] description]]];
+    }
+    return str;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/ACNumber.h b/runtime/ObjC/Framework/ACNumber.h
new file mode 100644
index 0000000..7861b88
--- /dev/null
+++ b/runtime/ObjC/Framework/ACNumber.h
@@ -0,0 +1,47 @@
+//
+//  ACNumber.h
+//  ST4
+//
+//  Created by Alan Condit on 3/19/12.
+//  Copyright 2012 Alan Condit. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+
+
+@interface ACNumber : NSObject {
+    
+    union {
+        BOOL b;
+        char c;
+        double d;
+        NSInteger i;
+    } u;
+    
+    BOOL fBOOL   :  1;
+    BOOL fChar   :  1;
+    BOOL fDouble :  1;
+    BOOL fNSInt  :  1;
+}
+
++ (ACNumber *)numberWithBool:(BOOL)aBool;
++ (ACNumber *)numberWithChar:(char)aChar;
++ (ACNumber *)numberWithDouble:(double)aDouble;
++ (ACNumber *)numberWithInt:(NSInteger)anInt;
++ (ACNumber *)numberWithInteger:(NSInteger)anInt;
+
+- (ACNumber *)initWithBool:(BOOL)aBool;
+- (ACNumber *)initWithChar:(char)aChar;
+- (ACNumber *)initWithDouble:(double)aDouble;
+- (ACNumber *)initWithInteger:(NSInteger)anInt;
+
+- (BOOL)boolValue;
+- (char)charValue;
+- (double)doubleValue;
+- (NSInteger)intValue;
+- (NSInteger)integerValue;
+- (NSInteger)inc;
+- (NSInteger)add:(NSInteger)anInt;
+- (NSString *)description;
+
+@end
diff --git a/runtime/ObjC/Framework/ACNumber.m b/runtime/ObjC/Framework/ACNumber.m
new file mode 100644
index 0000000..989f81a
--- /dev/null
+++ b/runtime/ObjC/Framework/ACNumber.m
@@ -0,0 +1,170 @@
+//
+//  ACNumber.m
+//  ST4
+//
+//  Created by Alan Condit on 3/19/12.
+//  Copyright 2012 Alan Condit. All rights reserved.
+//
+
+#import "ACNumber.h"
+
+
+@implementation ACNumber
+
++ (ACNumber *)numberWithBool:(BOOL)aBool
+{
+    return [[ACNumber alloc] initWithBool:aBool];
+}
+
++ (ACNumber *)numberWithChar:(char)aChar
+{
+    return [[ACNumber alloc] initWithChar:aChar];
+}
+
++ (ACNumber *)numberWithDouble:(double)aDouble
+{
+    return [[ACNumber alloc] initWithDouble:aDouble];
+}
+
++ (ACNumber *)numberWithInt:(NSInteger)anInt
+{
+    return [[ACNumber alloc] initWithInteger:anInt];
+}
+
++ (ACNumber *)numberWithInteger:(NSInteger)anInt
+{
+    return [[ACNumber alloc] initWithInteger:anInt];
+}
+
+
+- (id)init
+{
+    self = [super init];
+    if (self) {
+        // Initialization code here.
+    }
+    
+    return self;
+}
+
+- (ACNumber *)initWithBool:(BOOL)aBool
+{
+    self = [super init];
+    if ( self != nil ) {
+        fBOOL = YES;
+        fChar = NO;
+        fDouble = NO;
+        fNSInt = NO;
+        u.b = aBool;
+    }
+    return self;
+}
+
+- (ACNumber *)initWithChar:(char)aChar
+{
+    self = [super init];
+    if ( self != nil ) {
+        fBOOL = NO;
+        fChar = YES;
+        fDouble = NO;
+        fNSInt = NO;
+        u.c = aChar;
+    }
+    return self;
+}
+
+- (ACNumber *)initWithDouble:(double)aDouble
+{
+    self = [super init];
+    if ( self != nil ) {
+        fBOOL = NO;
+        fChar = NO;
+        fDouble = YES;
+        fNSInt = NO;
+        u.d = aDouble;
+    }
+    return self;
+}
+
+- (ACNumber *)initWithInteger:(NSInteger)anInt
+{
+    self = [super init];
+    if ( self != nil ) {
+        fBOOL = NO;
+        fChar = NO;
+        fDouble = NO;
+        fNSInt = YES;
+        u.i = anInt;
+    }
+    return self;
+}
+
+- (void)dealloc
+{
+    [super dealloc];
+}
+
+- (BOOL)boolValue
+{
+    if (fBOOL)
+        return u.b;
+    else
+        return NO;
+}
+
+- (char)charValue
+{
+    if (fChar)
+        return u.c;
+    else
+        return (char)-1;
+}
+
+- (double)doubleValue
+{
+    if (fDouble)
+        return u.d;
+    else
+        return 0.0;
+}
+
+- (NSInteger)intValue
+{
+    if (fNSInt)
+        return u.i;
+    else
+        return -1;
+}
+
+- (NSInteger)integerValue
+{
+    if (fNSInt)
+        return u.i;
+    else
+        return -1;
+}
+
+- (NSInteger)inc
+{
+    return (u.i+=1);
+}
+
+- (NSInteger)add:(NSInteger)anInt
+{
+    return (u.i+=anInt);
+}
+
+- (NSString *)description
+{
+    if (fBOOL)
+        return (u.b == YES) ? @"true" : @"false"; 
+    else if (fChar)
+        return [NSString stringWithFormat:@"%c", u.c];
+    else if (fNSInt)
+        return [NSString stringWithFormat:@"%Ld", u.i];
+    else if (fDouble)
+        return [NSString stringWithFormat:@"%Lf", u.d];
+    return @"ACNumber not valid";
+}
+
+@end
diff --git a/runtime/ObjC/Framework/AMutableArray.h b/runtime/ObjC/Framework/AMutableArray.h
new file mode 100644
index 0000000..45197b6
--- /dev/null
+++ b/runtime/ObjC/Framework/AMutableArray.h
@@ -0,0 +1,50 @@
+//
+//  AMutableArray.h
+//  a_ST4
+//
+//  Created by Alan Condit on 3/12/11.
+//  Copyright 2011 Alan's MachineWorks. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+#import "ArrayIterator.h"
+
+@class ArrayIterator;
+
+@interface AMutableArray : NSMutableArray {
+    NSInteger BuffSize;
+    NSInteger count;
+    __strong NSMutableData *buffer;
+    __strong id *ptrBuffer;
+}
+
++ (id) newArray;
++ (id) arrayWithCapacity:(NSInteger)size;
+
+- (id) init;
+- (id) initWithCapacity:(NSInteger)size;
+- (id) copyWithZone:(NSZone *)aZone;
+
+- (void) addObject:(id)anObject;
+- (void) addObjectsFromArray:(NSArray *)anArray;
+- (id) objectAtIndex:(NSInteger)anIdx;
+- (void) insertObject:(id)anObject atIndex:(NSInteger)anIdx;
+- (void) removeAllObjects;
+- (void) removeLastObject;
+- (void) removeObjectAtIndex:(NSInteger)idx;
+- (void) replaceObjectAtIndex:(NSInteger)idx withObject:(id)obj;
+- (NSInteger) count;
+- (void)setCount:(NSInteger)cnt;
+//- (NSUInteger)countByEnumeratingWithState:(NSFastEnumerationState *)state objects:(id *)stackbuf count:(NSUInteger)len;
+- (NSArray *) allObjects;
+- (ArrayIterator *) objectEnumerator;
+- (void) ensureCapacity:(NSInteger) index;
+- (NSString *) description;
+- (NSString *) toString;
+
+@property (assign) NSInteger BuffSize;
+@property (assign, getter=count, setter=setCount:) NSInteger count;
+@property (retain) NSMutableData *buffer;
+@property (assign) id *ptrBuffer;
+
+@end
diff --git a/runtime/ObjC/Framework/AMutableArray.m b/runtime/ObjC/Framework/AMutableArray.m
new file mode 100644
index 0000000..640ed37
--- /dev/null
+++ b/runtime/ObjC/Framework/AMutableArray.m
@@ -0,0 +1,300 @@
+//
+//  AMutableArray.m
+//  a_ST4
+//
+//  Created by Alan Condit on 3/12/11.
+//  Copyright 2011 Alan's MachineWorks. All rights reserved.
+//
+#import "AMutableArray.h"
+#import "ArrayIterator.h"
+
+#define BUFFSIZE 25
+
+@implementation AMutableArray
+
+@synthesize BuffSize;
+@synthesize buffer;
+@synthesize ptrBuffer;
+//@synthesize count;
+
+
++ (id) newArray
+{
+    return [[AMutableArray alloc] init];
+}
+
++ (id) arrayWithCapacity:(NSInteger)size
+{
+    return [[AMutableArray alloc] initWithCapacity:size];
+}
+
+- (id) init
+{
+    self=[super init];
+    if ( self != nil ) {
+        BuffSize = BUFFSIZE;
+        buffer = [[NSMutableData dataWithLength:(BuffSize * sizeof(id))] retain];
+        ptrBuffer = (id *)[buffer mutableBytes];
+        for( int idx = 0; idx < BuffSize; idx++ ) {
+            ptrBuffer[idx] = nil;
+        }
+    }
+    return self;
+}
+
+- (id) initWithCapacity:(NSInteger)len
+{
+    self=[super init];
+    if ( self != nil ) {
+        BuffSize = (len >= BUFFSIZE) ? len : BUFFSIZE;
+        buffer = [[NSMutableData dataWithLength:(BuffSize * sizeof(id))] retain];
+        ptrBuffer = (id *)[buffer mutableBytes];
+        for( int idx = 0; idx < BuffSize; idx++ ) {
+            ptrBuffer[idx] = nil;
+        }
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in AMutableArray" );
+#endif
+    if ( count ) [self removeAllObjects];
+    if ( buffer ) [buffer release];
+    [super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    AMutableArray *copy;
+    
+    copy = [[[self class] allocWithZone:aZone] init];
+    if ( buffer ) {
+        copy.buffer = [buffer copyWithZone:aZone];
+    }
+    copy.ptrBuffer = [copy.buffer mutableBytes];
+    copy.count = count;
+    copy.BuffSize = BuffSize;
+    return copy;
+}
+
+- (void) addObject:(id)anObject
+{
+    if ( anObject == nil ) anObject = [NSNull null];
+    [anObject retain];
+	[self ensureCapacity:count];
+	ptrBuffer[count++] = anObject;
+}
+
+- (void) addObjectsFromArray:(NSArray *)otherArray
+{
+    NSInteger cnt, i;
+    id tmp;
+    cnt = [otherArray count];
+    [self ensureCapacity:count+cnt];
+    for( i = 0; i < cnt; i++) {
+        tmp = [otherArray objectAtIndex:i];
+        [self addObject:tmp];
+    }
+    return;
+}
+
+- (id) objectAtIndex:(NSInteger)anIdx
+{
+    id obj;
+    if ( anIdx < 0 || anIdx >= count ) {
+        @throw [NSException exceptionWithName:NSRangeException
+                                       reason:[NSString stringWithFormat:@"Attempt to retrieve objectAtIndex %d past end", anIdx]
+                                     userInfo:nil];
+        return nil;
+    }
+    ptrBuffer = [buffer mutableBytes];
+    obj = ptrBuffer[anIdx];
+    if ( obj == [NSNull null] ) {
+        obj = nil;
+    }
+    return obj;
+}
+
+- (void) insertObject:(id)anObject atIndex:(NSInteger)anIdx
+{
+    if ( anObject == nil ) anObject = [NSNull null];
+    if ( anObject == nil ) {
+        @throw [NSException exceptionWithName:NSInvalidArgumentException reason:@"Attempt to insert nil objectAtIndex" userInfo:nil];
+    }
+    if ( anIdx < 0 || anIdx > count ) {
+        @throw [NSException exceptionWithName:NSRangeException reason:@"Attempt to insertObjectAtIndex past end" userInfo:nil];
+    }
+    if ( count == BuffSize ) {
+        [self ensureCapacity:count];
+    }
+    if ( anIdx < count ) {
+        for (int i = count; i > anIdx; i--) {
+            ptrBuffer[i] = ptrBuffer[i-1];
+        }
+    }
+    ptrBuffer[anIdx] = [anObject retain];
+    count++;
+}
+
+- (void) removeObjectAtIndex:(NSInteger)idx;
+{
+    id tmp;
+    if (idx < 0 || idx >= count) {
+        @throw [NSException exceptionWithName:NSRangeException reason:@"Attempt to insert removeObjectAtIndex past end" userInfo:nil];
+    }
+    else if (count) {
+        tmp = ptrBuffer[idx];
+        if ( tmp ) [tmp release];
+        for (int i = idx; i < count; i++) {
+            ptrBuffer[i] = ptrBuffer[i+1];
+        }
+        count--;
+    }
+}
+
+- (void) removeLastObject
+{
+    id tmp;
+    if (count == 0) {
+        @throw [NSException exceptionWithName:NSRangeException reason:@"Attempt to removeLastObject from 0" userInfo:nil];
+    }
+    count--;
+    tmp = ptrBuffer[count];
+    if ( tmp ) [tmp release];
+    ptrBuffer[count] = nil;
+}
+
+- (void)removeAllObjects
+{
+    id tmp;
+    if (count == 0) {
+        @throw [NSException exceptionWithName:NSRangeException reason:@"Attempt to removeAllObjects from 0" userInfo:nil];
+    }
+    int i;
+    for ( i = 0; i < BuffSize; i++ ) {
+        if (i < count) {
+            tmp = ptrBuffer[i];
+            if ( tmp ) [tmp release];
+        }
+        ptrBuffer[i] = nil;
+    }
+    count = 0;
+}
+
+- (void) replaceObjectAtIndex:(NSInteger)idx withObject:(id)obj
+{
+    id tmp;
+    if ( obj == nil ) {
+        obj = [NSNull null];
+    }
+    if ( idx < 0 || idx >= count ) {
+        @throw [NSException exceptionWithName:NSRangeException reason:@"Attempt to replace object past end" userInfo:nil];
+   }
+    if ( count ) {
+        [obj retain];
+        tmp = ptrBuffer[idx];
+        if ( tmp ) [tmp release];
+        ptrBuffer[idx] = obj;
+    }
+}
+
+- (NSInteger) count
+{
+    return count;
+}
+
+- (void) setCount:(NSInteger)cnt
+{
+    count = cnt;
+}
+
+- (NSArray *) allObjects
+{
+    return [NSArray arrayWithObjects:ptrBuffer count:count];
+}
+
+- (ArrayIterator *) objectEnumerator
+{
+    return [ArrayIterator newIterator:[self allObjects]];
+}
+
+// This is where all the magic happens.
+// You have two choices when implementing this method:
+// 1) Use the stack based array provided by stackbuf. If you do this, then you must respect the value of 'len'.
+// 2) Return your own array of objects. If you do this, return the full length of the array returned until you run out of objects, then return 0. For example, a linked-array implementation may return each array in order until you iterate through all arrays.
+// In either case, state->itemsPtr MUST be a valid array (non-nil). This sample takes approach #1, using stackbuf to store results.
+- (NSUInteger)countByEnumeratingWithState:(NSFastEnumerationState *)state objects:(id *)stackbuf count:(NSUInteger)len
+{
+    NSUInteger cnt = 0;
+    // This is the initialization condition, so we'll do one-time setup here.
+    // Ensure that you never set state->state back to 0, or use another method to detect initialization
+    // (such as using one of the values of state->extra).
+    if (state->state == 0) {
+        // We are not tracking mutations, so we'll set state->mutationsPtr to point into one of our extra values,
+        // since these values are not otherwise used by the protocol.
+        // If your class was mutable, you may choose to use an internal variable that is updated when the class is mutated.
+        // state->mutationsPtr MUST NOT be NULL.
+        state->mutationsPtr = &state->extra[0];
+    }
+    // Now we provide items, which we track with state->state, and determine if we have finished iterating.
+    if (state->state < self.count) {
+        // Set state->itemsPtr to the provided buffer.
+        // Alternate implementations may set state->itemsPtr to an internal C array of objects.
+        // state->itemsPtr MUST NOT be NULL.
+        state->itemsPtr = stackbuf;
+        // Fill in the stack array, either until we've provided all items from the list
+        // or until we've provided as many items as the stack based buffer will hold.
+        while((state->state < self.count) && (cnt < len)) {
+            // For this sample, we generate the contents on the fly.
+            // A real implementation would likely just be copying objects from internal storage.
+            stackbuf[cnt++] = ptrBuffer[state->state++];
+        }
+        // state->state = ((cnt < len)? cnt : len);
+    }
+    else
+    {
+        // We've already provided all our items, so we signal we are done by returning 0.
+        cnt = 0;
+    }
+    return cnt;
+}
+
+- (NSString *) description
+{
+    NSMutableString *str;
+    NSInteger idx, cnt;
+    id tmp;
+    cnt = [self count];
+    str = [NSMutableString stringWithCapacity:30];
+    [str appendString:@"["];
+    for (idx = 0; idx < cnt; idx++ ) {
+        tmp = [self objectAtIndex:idx];
+        [str appendString:((tmp == nil) ? @"nil" : [tmp description])];
+    }
+    [str appendString:@"]"];
+    return str;
+}
+
+- (NSString *) toString
+{
+    return [self description];
+}
+
+- (void) ensureCapacity:(NSInteger) index
+{
+	if ((index * sizeof(id)) >= [buffer length])
+	{
+		NSInteger newSize = ([buffer length] / sizeof(id)) * 2;
+		if (index > newSize) {
+			newSize = index + 1;
+		}
+        BuffSize = newSize;
+		[buffer setLength:(BuffSize * sizeof(id))];
+        ptrBuffer = [buffer mutableBytes];
+	}
+}
+
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/AMutableDictionary.h b/runtime/ObjC/Framework/AMutableDictionary.h
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/AMutableDictionary.h
rename to runtime/ObjC/Framework/AMutableDictionary.h
diff --git a/runtime/ObjC/Framework/AMutableDictionary.m b/runtime/ObjC/Framework/AMutableDictionary.m
new file mode 100644
index 0000000..0243d51
--- /dev/null
+++ b/runtime/ObjC/Framework/AMutableDictionary.m
@@ -0,0 +1,270 @@
+//
+//  AMutableDictionary.m
+//  ST4
+//
+//  Created by Alan Condit on 4/18/11.
+//  Copyright 2011 Alan Condit. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+#import "AMutableDictionary.h"
+#import "ACBTree.h"
+
+@implementation AMutableDictionary
+
+@synthesize root;
+@synthesize nodes_av;
+@synthesize nodes_inuse;
+@synthesize nxt_nodeid;
+//@synthesize count;
+@synthesize data;
+@synthesize ptrBuffer;
+
++ (AMutableDictionary *) newDictionary
+{
+    return [[AMutableDictionary alloc] init];
+}
+
+/** dictionaryWithCapacity
+ *  capacity is meaningless to ACBTree because
+ *  capacity is automatically increased
+ */
++ (AMutableDictionary *) dictionaryWithCapacity
+{
+    return [[AMutableDictionary alloc] init];
+}
+
+- (id)init
+{
+    self = [super init];
+    if (self) {
+        // Initialization code here.
+        nxt_nodeid = 0;
+        count = 0;
+        root = [ACBTree newNodeWithDictionary:self];
+        root.nodeType = LEAF;
+        root.numrecs = 0;
+        root.updtd = NO;
+        root.lnodeid = 1;
+        root.lnode = nil;
+        root.rnodeid = 0xffff;
+        root.rnode = nil;
+    }
+    return self;
+}
+
+/** initWithCapacity
+ *  capacity is meaningless to ACBTree because
+ *  capacity is automatically increased
+ */
+- (id) initWithCapacity:(NSUInteger)numItems
+{
+    self = [super init];
+    if (self) {
+        // Initialization code here.
+        nxt_nodeid = 0;
+        count = 0;
+        root = [ACBTree newNodeWithDictionary:self];
+        root.nodeType = LEAF;
+        root.numrecs = 0;
+        root.updtd = NO;
+        root.lnodeid = 1;
+        root.lnode = nil;
+        root.rnodeid = 0xffff;
+        root.rnode = nil;
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in AMutableDictionary" );
+#endif
+    if ( data ) [data release];
+    if ( root ) [root release];
+    [super dealloc];
+}
+
+- (id) objectForKey:(id)aKey
+{
+    id obj = nil;
+    ACBTree *node;
+    ACBKey *kp;
+    NSInteger ret;
+    BOOL mustRelease = NO;
+
+    if ( [aKey isKindOfClass:[NSString class]] ) {
+        kp = [ACBKey newKeyWithKStr:aKey];
+        mustRelease = YES;
+    }
+    else if ( [aKey isKindOfClass:[ACBKey class]] ) {
+        kp = aKey;
+        //ACBKey *akey = [ACBKey newKey:aKey];
+    }
+    else {
+        @throw [NSException exceptionWithName:NSInvalidArgumentException
+                                       reason:[NSString stringWithFormat:@"What kind of key is this? %@", aKey]
+                                     userInfo:nil];
+        return nil; // not a key that I know how to deal with
+    }
+    node = [root search:kp.key];
+    if ( node != nil ) {
+        ret = [node searchnode:kp.key match:YES];
+        if ( ret >= 0 && ret < node.numkeys ) {
+            obj = node.btNodes[ret];
+            if ( obj == [NSNull null] ) {
+                obj = nil;
+            }
+        }
+    }
+    if ( mustRelease ) [kp release];
+    return obj;
+}
+
+- (void) setObject:(id)obj forKey:(id)aKey
+{
+    ACBKey *kp;
+    BOOL mustRelease = NO;
+    if ( [aKey isKindOfClass:[NSString class]] ) {
+        kp = [ACBKey newKeyWithKStr:aKey];
+        mustRelease = YES;
+    }
+    else if ( [aKey isKindOfClass:[ACBKey class]] ) {
+        kp = (ACBKey *)aKey;
+    }
+    else {
+        @throw [NSException exceptionWithName:NSInvalidArgumentException
+                                       reason:[NSString stringWithFormat:@"What kind of key is this? %@", aKey]
+                                     userInfo:nil];
+    }
+    if ( [root search:kp.key] == nil ) {
+        if ( obj == nil ) {
+            obj = [NSNull null];
+        }
+        root = [root insertkey:kp value:obj];
+        [kp retain];
+        [obj retain];
+        kp.recnum = count++;
+    }
+    else {
+        if ( mustRelease ) [kp release];
+        @throw [NSException exceptionWithName:NSInvalidArgumentException reason:@"key alreadyExists" userInfo:nil];
+    }
+    return;
+}
+
+- (BOOL) isEqual:(id)object
+{
+    return [super isEqual:object];
+}
+
+- (void) removeObjectForKey:(id)aKey
+{
+    if ( [root deletekey:aKey] == SUCCESS )
+        count--;
+}
+
+- (NSUInteger) count
+{
+    return count;
+}
+
+- (NSArray *) allKeys
+{
+    NSUInteger cnt = [root keyWalkLeaves];
+    return [NSArray arrayWithObjects:ptrBuffer count:cnt];
+}
+
+- (NSArray *) allValues
+{
+    NSUInteger cnt = [root objectWalkLeaves];
+    return [NSArray arrayWithObjects:ptrBuffer count:cnt];
+}
+
+- (ArrayIterator *) keyEnumerator
+{
+    return [ArrayIterator newIterator:[self allKeys]];
+}
+
+- (ArrayIterator *) objectEnumerator
+{
+    return [ArrayIterator newIterator:[self allValues]];
+}
+
+// This is where all the magic happens.
+// You have two choices when implementing this method:
+// 1) Use the stack based array provided by stackbuf. If you do this, then you must respect the value of 'len'.
+// 2) Return your own array of objects. If you do this, return the full length of the array returned until you run out of objects, then return 0. For example, a linked-array implementation may return each array in order until you iterate through all arrays.
+// In either case, state->itemsPtr MUST be a valid array (non-nil). This sample takes approach #1, using stackbuf to store results.
+- (NSUInteger)countByEnumeratingWithState:(NSFastEnumerationState *)state objects:(id *)stackbuf count:(NSUInteger)len
+{
+    NSUInteger cnt = 0;
+    // This is the initialization condition, so we'll do one-time setup here.
+    // Ensure that you never set state->state back to 0, or use another method to detect initialization
+    // (such as using one of the values of state->extra).
+    if (state->state == 0) {
+        // We are not tracking mutations, so we'll set state->mutationsPtr to point into one of our extra values,
+        // since these values are not otherwise used by the protocol.
+        // If your class was mutable, you may choose to use an internal variable that is updated when the class is mutated.
+        // state->mutationsPtr MUST NOT be NULL.
+        state->mutationsPtr = &state->extra[0];
+        [self.root objectWalkLeaves];
+    }
+    // Now we provide items, which we track with state->state, and determine if we have finished iterating.
+    if (state->state < self.count) {
+        // Set state->itemsPtr to the provided buffer.
+        // Alternate implementations may set state->itemsPtr to an internal C array of objects.
+        // state->itemsPtr MUST NOT be NULL.
+        state->itemsPtr = stackbuf;
+        // Fill in the stack array, either until we've provided all items from the list
+        // or until we've provided as many items as the stack based buffer will hold.
+        while((state->state < self.count) && (cnt < len)) {
+            // For this sample, we generate the contents on the fly.
+            // A real implementation would likely just be copying objects from internal storage.
+            stackbuf[cnt++] = ptrBuffer[state->state++];
+        }
+        // state->state = ((cnt < len)? cnt : len);
+    }
+    else
+    {
+        // We've already provided all our items, so we signal we are done by returning 0.
+        cnt = 0;
+    }
+    return cnt;
+}
+
+- (void) clear
+{
+    if ( count ) [self removeAllObjects];
+}
+
+- (void) removeAllObjects
+{
+    if ( [self count] > 0 ) {
+        // root = [ACBTree newNodeWithDictionary:self];
+        NSArray *list = [self allKeys];
+        for ( NSInteger i = [self count] - 1; i >= 0; i-- ) {
+            [self removeObjectForKey:[list objectAtIndex:i]];
+        }
+        root.nodeid = 0;
+        nxt_nodeid = 1;
+    }
+}
+
+- (NSInteger) nextNodeId
+{
+    return nxt_nodeid++;
+}
+
+- (NSArray *) toKeyArray
+{
+    return nil;
+}
+
+- (NSArray *) toValueArray
+{
+    return nil;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/ANTLR.h b/runtime/ObjC/Framework/ANTLR.h
new file mode 100644
index 0000000..048cdd6
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLR.h
@@ -0,0 +1,118 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke (c) 2011 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <ANTLR/ACNumber.h>
+#import <ANTLR/ACBTree.h>
+#import <ANTLR/AMutableArray.h>
+#import <ANTLR/AMutableDictionary.h>
+#import <ANTLR/ANTLRBitSet.h>
+#import <ANTLR/ANTLRError.h>
+#import <ANTLR/ANTLRFileStream.h>
+#import <ANTLR/ANTLRInputStream.h>
+#import <ANTLR/ANTLRReaderStream.h>
+#import <ANTLR/ANTLRStringStream.h>
+#import <ANTLR/ArrayIterator.h>
+#import <ANTLR/BaseMapElement.h>
+#import <ANTLR/BaseRecognizer.h>
+#import <ANTLR/BaseStack.h>
+#import <ANTLR/BaseTree.h>
+#import <ANTLR/BaseTreeAdaptor.h>
+#import <ANTLR/BufferedTokenStream.h>
+#import <ANTLR/BufferedTreeNodeStream.h>
+#import <ANTLR/CharStream.h>
+#import <ANTLR/CharStreamState.h>
+#import <ANTLR/CommonErrorNode.h>
+#import <ANTLR/CommonToken.h>
+#import <ANTLR/CommonTokenStream.h>
+#import <ANTLR/CommonTree.h>
+#import <ANTLR/CommonTreeAdaptor.h>
+#import <ANTLR/CommonTreeNodeStream.h>
+#import <ANTLR/DFA.h>
+#import <ANTLR/Debug.h>
+#import <ANTLR/DebugEventSocketProxy.h>
+#import <ANTLR/DebugEventListener.h>
+#import <ANTLR/DebugParser.h>
+#import <ANTLR/DebugTokenStream.h>
+#import <ANTLR/DebugTreeAdaptor.h>
+#import <ANTLR/DebugTreeNodeStream.h>
+#import <ANTLR/DebugTreeParser.h>
+#import <ANTLR/DoubleKeyMap.h>
+#import <ANTLR/EarlyExitException.h>
+#import <ANTLR/Entry.h>
+#import <ANTLR/FailedPredicateException.h>
+#import <ANTLR/FastQueue.h>
+#import <ANTLR/HashMap.h>
+#import <ANTLR/HashRule.h>
+#import <ANTLR/IntArray.h>
+#import <ANTLR/IntStream.h>
+#import <ANTLR/Lexer.h>
+#import <ANTLR/LexerRuleReturnScope.h>
+#import <ANTLR/LinkBase.h>
+#import <ANTLR/LinkedHashMap.h>
+#import <ANTLR/LinkedList.h>
+#import <ANTLR/LookaheadStream.h>
+#import <ANTLR/MapElement.h>
+#import <ANTLR/Map.h>
+#import <ANTLR/MismatchedNotSetException.h>
+#import <ANTLR/MismatchedRangeException.h>
+#import <ANTLR/MismatchedSetException.h>
+#import <ANTLR/MismatchedTokenException.h>
+#import <ANTLR/MismatchedTreeNodeException.h>
+#import <ANTLR/MissingTokenException.h>
+#import <ANTLR/NodeMapElement.h>
+#import <ANTLR/NoViableAltException.h>
+#import <ANTLR/Parser.h>
+#import <ANTLR/ParserRuleReturnScope.h>
+#import <ANTLR/PtrBuffer.h>
+#import <ANTLR/RecognitionException.h>
+#import <ANTLR/RecognizerSharedState.h>
+#import <ANTLR/RewriteRuleElementStream.h>
+#import <ANTLR/RewriteRuleNodeStream.h>
+#import <ANTLR/RewriteRuleSubtreeStream.h>
+#import <ANTLR/RewriteRuleTokenStream.h>
+#import <ANTLR/RuleMemo.h>
+#import <ANTLR/RuleStack.h>
+#import <ANTLR/RuleReturnScope.h>
+#import <ANTLR/RuntimeException.h>
+#import <ANTLR/StreamEnumerator.h>
+#import <ANTLR/SymbolStack.h>
+#import <ANTLR/Token+DebuggerSupport.h>
+#import <ANTLR/Token.h>
+#import <ANTLR/TokenRewriteStream.h>
+#import <ANTLR/TokenSource.h>
+#import <ANTLR/TokenStream.h>
+#import <ANTLR/Tree.h>
+#import <ANTLR/TreeAdaptor.h>
+#import <ANTLR/TreeException.h>
+#import <ANTLR/TreeIterator.h>
+#import <ANTLR/TreeNodeStream.h>
+#import <ANTLR/TreeParser.h>
+#import <ANTLR/TreeRuleReturnScope.h>
+#import <ANTLR/UnbufferedTokenStream.h>
+//#import <ANTLR/UnbufferedCommonTreeNodeStream.h>
+//#import <ANTLR/UnbufferedCommonTreeNodeStreamState.h>
+#import <ANTLR/UniqueIDMap.h>
+#import <ANTLR/UnwantedTokenException.h>
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR.xcodeproj/acondit.mode1v3 b/runtime/ObjC/Framework/ANTLR.xcodeproj/acondit.mode1v3
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR.xcodeproj/acondit.mode1v3
rename to runtime/ObjC/Framework/ANTLR.xcodeproj/acondit.mode1v3
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR.xcodeproj/acondit.pbxuser b/runtime/ObjC/Framework/ANTLR.xcodeproj/acondit.pbxuser
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR.xcodeproj/acondit.pbxuser
rename to runtime/ObjC/Framework/ANTLR.xcodeproj/acondit.pbxuser
diff --git a/runtime/ObjC/Framework/ANTLR.xcodeproj/project.pbxproj b/runtime/ObjC/Framework/ANTLR.xcodeproj/project.pbxproj
new file mode 100644
index 0000000..3f837da
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLR.xcodeproj/project.pbxproj
@@ -0,0 +1,6358 @@
+// !$*UTF8*$!
+{
+	archiveVersion = 1;
+	classes = {
+	};
+	objectVersion = 46;
+	objects = {
+
+/* Begin PBXAggregateTarget section */
+		F762873F0B71519B006AA7EF /* Regenerate all examples */ = {
+			isa = PBXAggregateTarget;
+			buildConfigurationList = F76287400B7151B9006AA7EF /* Build configuration list for PBXAggregateTarget "Regenerate all examples" */;
+			buildPhases = (
+			);
+			dependencies = (
+				F76287A70B7157C2006AA7EF /* PBXTargetDependency */,
+				F762874C0B715417006AA7EF /* PBXTargetDependency */,
+				F76287AB0B7157C2006AA7EF /* PBXTargetDependency */,
+				F79EFB140C5845A300ABAB3D /* PBXTargetDependency */,
+				F76287A90B7157C2006AA7EF /* PBXTargetDependency */,
+				1A0F347112EA43BA00496BB8 /* PBXTargetDependency */,
+				F76287AD0B7157C2006AA7EF /* PBXTargetDependency */,
+				F76287AF0B7157C2006AA7EF /* PBXTargetDependency */,
+				1A0F347312EA43BA00496BB8 /* PBXTargetDependency */,
+				1A0F347512EA43BA00496BB8 /* PBXTargetDependency */,
+			);
+			name = "Regenerate all examples";
+			productName = Untitled;
+		};
+/* End PBXAggregateTarget section */
+
+/* Begin PBXBuildFile section */
+		1A01BD9312EB5A6000428792 /* Simplifier.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A0F347F12EA444500496BB8 /* Simplifier.m */; };
+		1A0F345E12EA42D800496BB8 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		1A0F345F12EA42D800496BB8 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
+		1A0F346012EA42D800496BB8 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
+		1A0F346D12EA434F00496BB8 /* Main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A0F343012EA411F00496BB8 /* Main.m */; };
+		1A0F348212EA444500496BB8 /* PolyLexer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A0F347A12EA444500496BB8 /* PolyLexer.h */; };
+		1A0F348412EA444500496BB8 /* PolyParser.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A0F347C12EA444500496BB8 /* PolyParser.h */; };
+		1A0F348612EA444500496BB8 /* Simplifier.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A0F347E12EA444500496BB8 /* Simplifier.h */; };
+		1A0F348912EA444500496BB8 /* PolyLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A0F347B12EA444500496BB8 /* PolyLexer.m */; };
+		1A0F348A12EA444500496BB8 /* PolyParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A0F347D12EA444500496BB8 /* PolyParser.m */; };
+		1A10050611B8796D0022B434 /* BufferedTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A18EF5511B8028D0006186A /* BufferedTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A10050711B8796E0022B434 /* ANTLRBitSet.h in Headers */ = {isa = PBXBuildFile; fileRef = F7F218EE097AFB1A000472E9 /* ANTLRBitSet.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A10050811B879A40022B434 /* FastQueue.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1CCC9011B6FD39002E5F53 /* FastQueue.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A10050911B879A80022B434 /* FailedPredicateException.h in Headers */ = {isa = PBXBuildFile; fileRef = F738D1730B07AEAA001813C4 /* FailedPredicateException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A10050B11B879B80022B434 /* IntArray.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1CCC9211B6FD39002E5F53 /* IntArray.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A100ABB11E604FE006ABF94 /* HashRule.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A100AB911E604FE006ABF94 /* HashRule.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A100ABC11E604FE006ABF94 /* HashRule.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A100ABA11E604FE006ABF94 /* HashRule.m */; };
+		1A12110311D3A62B00F27B38 /* CommonTokenTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2311D2BE4F000C72FC /* CommonTokenTest.m */; };
+		1A12117911D3B45C00F27B38 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		1A12117A11D3B47000F27B38 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
+		1A12117B11D3B47000F27B38 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
+		1A1211D711D3BF6800F27B38 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		1A1211D811D3BF6800F27B38 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
+		1A1211D911D3BF6800F27B38 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
+		1A1211DE11D3BFC900F27B38 /* ANTLRStringStreamTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2011D2BE4F000C72FC /* ANTLRStringStreamTest.m */; };
+		1A12122B11D3C93500F27B38 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A12122A11D3C93500F27B38 /* ANTLR.framework */; };
+		1A12122C11D3C93500F27B38 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
+		1A12122D11D3C93500F27B38 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
+		1A12126211D3CA0100F27B38 /* FastQueueTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1811D2BE4F000C72FC /* FastQueueTest.m */; };
+		1A1212E211D3F55500F27B38 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		1A1212E311D3F55500F27B38 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
+		1A1212E411D3F55500F27B38 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
+		1A1212E711D3F59300F27B38 /* IntArrayTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1A11D2BE4F000C72FC /* IntArrayTest.m */; };
+		1A12131211D3F7DC00F27B38 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		1A12131311D3F7DC00F27B38 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
+		1A12131411D3F7DC00F27B38 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
+		1A12131711D3F80500F27B38 /* CommonTreeTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2611D2BE4F000C72FC /* CommonTreeTest.m */; };
+		1A12C95911B89F62008C9BED /* ANTLRBitSet.m in Sources */ = {isa = PBXBuildFile; fileRef = F7F218EF097AFB1A000472E9 /* ANTLRBitSet.m */; };
+		1A12C95A11B89F64008C9BED /* BufferedTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A18EF5611B8028D0006186A /* BufferedTokenStream.m */; };
+		1A12C95B11B89F65008C9BED /* CommonToken.m in Sources */ = {isa = PBXBuildFile; fileRef = F777660409DC5CF400517181 /* CommonToken.m */; };
+		1A12C95C11B89F67008C9BED /* CommonTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F700ED950A5FF2A5005D0757 /* CommonTokenStream.m */; };
+		1A12C95D11B89F68008C9BED /* CommonTree.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C58E90AB3911D00282574 /* CommonTree.m */; };
+		1A12C95E11B89F69008C9BED /* CommonTreeAdaptor.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C59A60AB4F20A00282574 /* CommonTreeAdaptor.m */; };
+		1A12C95F11B89F6A008C9BED /* CommonTreeNodeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F76AA98F0CEA515A00AF044C /* CommonTreeNodeStream.m */; };
+		1A12C96011B89F6B008C9BED /* DebugEventListener.h in Headers */ = {isa = PBXBuildFile; fileRef = F7CECD7D0B1E5C370054CC3B /* DebugEventListener.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A12C96111B89F6F008C9BED /* Lexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7A4099209659BFB002CC781 /* Lexer.m */; };
+		1A12C96211B89F70008C9BED /* LexerRuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = F7009ADA0A1BE4AE002EDD5D /* LexerRuleReturnScope.m */; };
+		1A12C96311B89F76008C9BED /* LookaheadStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1CCCAA11B724B2002E5F53 /* LookaheadStream.m */; };
+		1A12C96411B89F76008C9BED /* MismatchedRangeException.m in Sources */ = {isa = PBXBuildFile; fileRef = F7037CEF0A0582FC0070435D /* MismatchedRangeException.m */; };
+		1A12C96511B89F77008C9BED /* MismatchedSetException.m in Sources */ = {isa = PBXBuildFile; fileRef = F70380BB0A07FA0D0070435D /* MismatchedSetException.m */; };
+		1A12C96611B89F78008C9BED /* MismatchedTokenException.m in Sources */ = {isa = PBXBuildFile; fileRef = F777668109DC719C00517181 /* MismatchedTokenException.m */; };
+		1A12C96711B89F7A008C9BED /* MismatchedTreeNodeException.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C5D610AB63E0B00282574 /* MismatchedTreeNodeException.m */; };
+		1A12C96811B89F7B008C9BED /* NoViableAltException.m in Sources */ = {isa = PBXBuildFile; fileRef = F79D598A0A0E51AB00EA3CEE /* NoViableAltException.m */; };
+		1A12C96911B89F7E008C9BED /* Parser.m in Sources */ = {isa = PBXBuildFile; fileRef = F700E8FA0A5FAD21005D0757 /* Parser.m */; };
+		1A12C96A11B89F7F008C9BED /* ParserRuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C5ECD0AB7E5A500282574 /* ParserRuleReturnScope.m */; };
+		1A12C96B11B89F80008C9BED /* RecognitionException.m in Sources */ = {isa = PBXBuildFile; fileRef = F777669209DC72D600517181 /* RecognitionException.m */; };
+		1A12C96C11B89F82008C9BED /* RecognizerSharedState.m in Sources */ = {isa = PBXBuildFile; fileRef = F7B1E5AD0CD7CF1900CE136E /* RecognizerSharedState.m */; };
+		1A12C96D11B89F83008C9BED /* RewriteRuleElementStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F71325870C4A060900B99F2D /* RewriteRuleElementStream.m */; };
+		1A12C96E11B89F84008C9BED /* RewriteRuleSubtreeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F70B11BC0C4C2B6400C3ECE0 /* RewriteRuleSubtreeStream.m */; };
+		1A12C96F11B89F85008C9BED /* RewriteRuleTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F70B11C20C4C2B7900C3ECE0 /* RewriteRuleTokenStream.m */; };
+		1A12C97011B89F87008C9BED /* ANTLRStringStream.m in Sources */ = {isa = PBXBuildFile; fileRef = F70AA7C609AA339900C3FD5E /* ANTLRStringStream.m */; };
+		1A12C97111B89F8B008C9BED /* CharStreamState.m in Sources */ = {isa = PBXBuildFile; fileRef = F70AA7CE09AA379300C3FD5E /* CharStreamState.m */; };
+		1A12C97211B89F8C008C9BED /* Token+DebuggerSupport.m in Sources */ = {isa = PBXBuildFile; fileRef = F77744040B234A3400D1F89B /* Token+DebuggerSupport.m */; };
+		1A12C97311B89F8E008C9BED /* TreeException.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D1760B07AEAA001813C4 /* TreeException.m */; };
+		1A12C97411B89F90008C9BED /* TreeParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C5D550AB63C1D00282574 /* TreeParser.m */; };
+		1A16B13C11C66492002860C7 /* LinkBase.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A16B13A11C66492002860C7 /* LinkBase.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A16B13D11C66492002860C7 /* LinkBase.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A16B13B11C66492002860C7 /* LinkBase.m */; };
+		1A1702FE11C05D4800F6978A /* HashMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1702FC11C05D4800F6978A /* HashMap.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A1702FF11C05D4800F6978A /* HashMap.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1702FD11C05D4800F6978A /* HashMap.m */; };
+		1A1BCDBB11CB01E60051A1EC /* RuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1BCDB911CB01E60051A1EC /* RuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A1BCDBC11CB01E60051A1EC /* RuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1BCDBA11CB01E60051A1EC /* RuleReturnScope.m */; };
+		1A1BCDCF11CB0B3D0051A1EC /* TreeRuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1BCDCD11CB0B3D0051A1EC /* TreeRuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A1BCDD011CB0B3D0051A1EC /* TreeRuleReturnScope.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1BCDCE11CB0B3D0051A1EC /* TreeRuleReturnScope.m */; };
+		1A1BCE2A11CB1A3E0051A1EC /* TreeRewriter.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1BCE2811CB1A3E0051A1EC /* TreeRewriter.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A1BCE2B11CB1A3E0051A1EC /* TreeRewriter.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1BCE2911CB1A3E0051A1EC /* TreeRewriter.m */; };
+		1A1CCCAB11B724B2002E5F53 /* LookaheadStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1CCCA911B724B2002E5F53 /* LookaheadStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A1CCCC811B727B5002E5F53 /* ANTLRError.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1CCCC711B727B5002E5F53 /* ANTLRError.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A1D465B11BE73B2001575F3 /* BaseTreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1D465911BE73B2001575F3 /* BaseTreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A1D465C11BE73B2001575F3 /* BaseTreeAdaptor.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1D465A11BE73B2001575F3 /* BaseTreeAdaptor.m */; };
+		1A1D467011BE75C0001575F3 /* MapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1D466E11BE75C0001575F3 /* MapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A1D467111BE75C0001575F3 /* MapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1D466F11BE75C0001575F3 /* MapElement.m */; };
+		1A1D467C11BE8E5A001575F3 /* CommonErrorNode.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1D467A11BE8E5A001575F3 /* CommonErrorNode.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A1D467D11BE8E5A001575F3 /* CommonErrorNode.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1D467B11BE8E5A001575F3 /* CommonErrorNode.m */; };
+		1A20C56512D6267500C2072A /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F700E86A0A5FA34D005D0757 /* main.m */; };
+		1A26329511C53578000DCDD4 /* MismatchedNotSetException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A26329311C53578000DCDD4 /* MismatchedNotSetException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A26329611C53578000DCDD4 /* MismatchedNotSetException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A26329411C53578000DCDD4 /* MismatchedNotSetException.m */; };
+		1A270BF911C1451200DCC8F3 /* TreeIterator.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A270BF711C1451200DCC8F3 /* TreeIterator.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A270BFA11C1451200DCC8F3 /* TreeIterator.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A270BF811C1451200DCC8F3 /* TreeIterator.m */; };
+		1A2D217511E4F57C00DFE328 /* UniqueIDMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A2D217311E4F57C00DFE328 /* UniqueIDMap.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A2D217611E4F57C00DFE328 /* UniqueIDMap.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A2D217411E4F57C00DFE328 /* UniqueIDMap.m */; };
+		1A2D218611E502DE00DFE328 /* NodeMapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A2D218411E502DE00DFE328 /* NodeMapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A2D218711E502DE00DFE328 /* NodeMapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A2D218511E502DE00DFE328 /* NodeMapElement.m */; };
+		1A348B5811D2BF1C000C72FC /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		1A348BA511D2C6A0000C72FC /* ANTLRBitSetTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1D11D2BE4F000C72FC /* ANTLRBitSetTest.m */; };
+		1A348BA811D2C6AD000C72FC /* CommonTokenTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2311D2BE4F000C72FC /* CommonTokenTest.m */; };
+		1A348BAB11D2C6B8000C72FC /* CommonTreeTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2611D2BE4F000C72FC /* CommonTreeTest.m */; };
+		1A348BAE11D2C6C6000C72FC /* FastQueueTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1811D2BE4F000C72FC /* FastQueueTest.m */; };
+		1A348BAF11D2C6D3000C72FC /* IntArrayTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1A11D2BE4F000C72FC /* IntArrayTest.m */; };
+		1A348BB211D2C6E3000C72FC /* ANTLRStringStreamTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B2011D2BE4F000C72FC /* ANTLRStringStreamTest.m */; };
+		1A348BB611D2C711000C72FC /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
+		1A348BB811D2C711000C72FC /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
+		1A348BF211D2D0E0000C72FC /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
+		1A348BF311D2D0E0000C72FC /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
+		1A348BF411D2D0E7000C72FC /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		1A348C0611D2D22B000C72FC /* ANTLRBitSetTest.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A348B1D11D2BE4F000C72FC /* ANTLRBitSetTest.m */; };
+		1A3A08E611E213C500D5EE26 /* BaseStack.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A3A08E411E213C500D5EE26 /* BaseStack.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A3A08E711E213C500D5EE26 /* BaseStack.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A3A08E511E213C500D5EE26 /* BaseStack.m */; };
+		1A3A08EA11E213E100D5EE26 /* SymbolStack.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A3A08E811E213E100D5EE26 /* SymbolStack.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A3A08EB11E213E100D5EE26 /* SymbolStack.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A3A08E911E213E100D5EE26 /* SymbolStack.m */; };
+		1A3A09BE11E235BD00D5EE26 /* antlr3.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A3A09BD11E235BD00D5EE26 /* antlr3.h */; };
+		1A406B5612E8F2ED005EF037 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F72C5E560AB7E41000282574 /* main.m */; };
+		1A45657711C922BE0082F421 /* RuleMemo.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A45657511C922BE0082F421 /* RuleMemo.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A45657811C922BE0082F421 /* RuleMemo.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A45657611C922BE0082F421 /* RuleMemo.m */; };
+		1A45658911C9270D0082F421 /* BaseMapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A45658711C9270D0082F421 /* BaseMapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A45658A11C9270D0082F421 /* BaseMapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A45658811C9270D0082F421 /* BaseMapElement.m */; };
+		1A4A851211CBCE3E00E4BF1B /* TreeVisitor.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A4A851011CBCE3E00E4BF1B /* TreeVisitor.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A4A851311CBCE3E00E4BF1B /* TreeVisitor.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A4A851111CBCE3E00E4BF1B /* TreeVisitor.m */; };
+		1A4A851811CBCE5500E4BF1B /* TreeVisitorAction.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A4A851611CBCE5500E4BF1B /* TreeVisitorAction.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A4A851911CBCE5500E4BF1B /* TreeVisitorAction.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A4A851711CBCE5500E4BF1B /* TreeVisitorAction.m */; };
+		1A4A851E11CBCF3700E4BF1B /* TreeWizard.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A4A851C11CBCF3700E4BF1B /* TreeWizard.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A4A851F11CBCF3700E4BF1B /* TreeWizard.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A4A851D11CBCF3700E4BF1B /* TreeWizard.m */; };
+		1A4D5AD611B55A45001C9482 /* BaseTree.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A4D5AD411B55A45001C9482 /* BaseTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A5EA50B11CFE7CE00E8932F /* Map.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A5EA50911CFE7CE00E8932F /* Map.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A5EA50C11CFE7CE00E8932F /* Map.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A5EA50A11CFE7CE00E8932F /* Map.m */; };
+		1A65B7D811B9532A00FD8754 /* BufferedTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A65B7D611B9532A00FD8754 /* BufferedTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A65B7D911B9532A00FD8754 /* BufferedTreeNodeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A65B7D711B9532A00FD8754 /* BufferedTreeNodeStream.m */; };
+		1A67885211B87ABA00A11EEC /* BaseTree.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A4D5AD511B55A45001C9482 /* BaseTree.m */; };
+		1A67885311B87AC400A11EEC /* CharStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F70AA7B509AA2B8800C3FD5E /* CharStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A67885411B87AEA00A11EEC /* FastQueue.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1CCC9111B6FD39002E5F53 /* FastQueue.m */; };
+		1A67885511B87AEF00A11EEC /* IntArray.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1CCC9311B6FD39002E5F53 /* IntArray.m */; };
+		1A6788FC11B893E100A11EEC /* BaseRecognizer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7A4098C09659BF3002CC781 /* BaseRecognizer.m */; };
+		1A6C451711BF4EE00039788A /* MissingTokenException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6C451511BF4EE00039788A /* MissingTokenException.m */; };
+		1A6C452811BF50A40039788A /* UnwantedTokenException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6C452611BF50A40039788A /* UnwantedTokenException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A6C452911BF50A40039788A /* UnwantedTokenException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A6C452711BF50A40039788A /* UnwantedTokenException.m */; };
+		1A75BF5911D6B3FD0096C6F5 /* MissingTokenException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A6C451411BF4EE00039788A /* MissingTokenException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A75BFBA11D6C2B10096C6F5 /* DFA.m in Sources */ = {isa = PBXBuildFile; fileRef = F7754E3E0A5C0A0500D0873A /* DFA.m */; };
+		1A77EE9312E6A57C007F323A /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
+		1A77EE9412E6A57C007F323A /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB711D2C711000C72FC /* CoreFoundation.framework */; };
+		1A77EE9712E6A594007F323A /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		1A86B91B11EB9F6300C67A03 /* ParseTree.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A86B91911EB9F6300C67A03 /* ParseTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A86B91C11EB9F6300C67A03 /* ParseTree.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A86B91A11EB9F6300C67A03 /* ParseTree.m */; };
+		1A86BACF11EC1CD000C67A03 /* UnbufferedTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A86BACD11EC1CD000C67A03 /* UnbufferedTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A86BAD011EC1CD000C67A03 /* UnbufferedTokenStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A86BACE11EC1CD000C67A03 /* UnbufferedTokenStream.m */; };
+		1A8ABFC611BA9B960038DBB0 /* CharStreamState.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A8ABFC511BA9B960038DBB0 /* CharStreamState.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A8AC00C11BAEC710038DBB0 /* RuntimeException.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A8AC00A11BAEC710038DBB0 /* RuntimeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1A8AC00D11BAEC710038DBB0 /* RuntimeException.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A8AC00B11BAEC710038DBB0 /* RuntimeException.m */; };
+		1AAC202C11CC621A00CF56D1 /* TreePatternLexer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AAC202A11CC621A00CF56D1 /* TreePatternLexer.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1AAC202D11CC621A00CF56D1 /* TreePatternLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AAC202B11CC621A00CF56D1 /* TreePatternLexer.m */; };
+		1AAC20A511CC790300CF56D1 /* TreePatternParser.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AAC20A311CC790300CF56D1 /* TreePatternParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1AAC20A611CC790300CF56D1 /* TreePatternParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AAC20A411CC790300CF56D1 /* TreePatternParser.m */; };
+		1AB4A54211B995290076E91A /* EarlyExitException.m in Sources */ = {isa = PBXBuildFile; fileRef = F700E61A0A5F66EC005D0757 /* EarlyExitException.m */; };
+		1AB4A54311B9952A0076E91A /* FailedPredicateException.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D1740B07AEAA001813C4 /* FailedPredicateException.m */; };
+		1AB4A59111B9A0DA0076E91A /* StreamEnumerator.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AB4A58F11B9A0DA0076E91A /* StreamEnumerator.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1AB4A59211B9A0DA0076E91A /* StreamEnumerator.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AB4A59011B9A0DA0076E91A /* StreamEnumerator.m */; };
+		1AB5F47711E3869D00E065B0 /* RuleMapElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AB5F47511E3869D00E065B0 /* RuleMapElement.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1AB5F47811E3869D00E065B0 /* RuleMapElement.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AB5F47611E3869D00E065B0 /* RuleMapElement.m */; };
+		1AB5F51E11E3BE2E00E065B0 /* PtrBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AB5F51C11E3BE2E00E065B0 /* PtrBuffer.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1AB5F51F11E3BE2E00E065B0 /* PtrBuffer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AB5F51D11E3BE2E00E065B0 /* PtrBuffer.m */; };
+		1AC5AC9E12E7BEFE00DF0C58 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		1AC5AC9F12E7BEFE00DF0C58 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
+		1AC5ACA112E7BEFE00DF0C58 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
+		1AC5ACAD12E7BF4E00DF0C58 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AC5AC8112E7BC9100DF0C58 /* main.m */; };
+		1AC5ACD612E7C05800DF0C58 /* LangLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AC5ACC912E7C03C00DF0C58 /* LangLexer.m */; };
+		1AC5ACE612E7CE4700DF0C58 /* LangParser.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AC5ACCB12E7C03C00DF0C58 /* LangParser.m */; };
+		1AC5ACE712E7CE4C00DF0C58 /* LangLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AC5ACC912E7C03C00DF0C58 /* LangLexer.m */; };
+		1AC5ACE812E7CE5100DF0C58 /* LangDumpDecl.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AC5ACC612E7C03C00DF0C58 /* LangDumpDecl.m */; };
+		1ADB66F112E74341007C1661 /* FuzzyLexer.h in Headers */ = {isa = PBXBuildFile; fileRef = 1ADB66F012E74341007C1661 /* FuzzyLexer.h */; };
+		1ADB67BA12E74E82007C1661 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1A348BB511D2C711000C72FC /* Cocoa.framework */; };
+		1AE8A96C11D9227A00D36FD6 /* RuleStack.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AE8A96A11D9227A00D36FD6 /* RuleStack.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1AE8A96D11D9227A00D36FD6 /* RuleStack.m in Sources */ = {isa = PBXBuildFile; fileRef = 1AE8A96B11D9227A00D36FD6 /* RuleStack.m */; };
+		1AEECE1511E7EB3C00554AAF /* TokenRewriteStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A1FFC5911CD12A400FBB452 /* TokenRewriteStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		1AEECE1611E7EB3D00554AAF /* TokenRewriteStream.m in Sources */ = {isa = PBXBuildFile; fileRef = 1A1FFC5A11CD12A400FBB452 /* TokenRewriteStream.m */; };
+		BF7D9B531519363200B58218 /* ACBTree.h in Headers */ = {isa = PBXBuildFile; fileRef = BF7D9B511519363200B58218 /* ACBTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		BF7D9B541519363200B58218 /* ACBTree.m in Sources */ = {isa = PBXBuildFile; fileRef = BF7D9B521519363200B58218 /* ACBTree.m */; };
+		BF7D9B571519367800B58218 /* AMutableArray.h in Headers */ = {isa = PBXBuildFile; fileRef = BF7D9B551519367800B58218 /* AMutableArray.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		BF7D9B581519367800B58218 /* AMutableArray.m in Sources */ = {isa = PBXBuildFile; fileRef = BF7D9B561519367800B58218 /* AMutableArray.m */; };
+		BF7D9B5B1519368C00B58218 /* AMutableDictionary.h in Headers */ = {isa = PBXBuildFile; fileRef = BF7D9B591519368C00B58218 /* AMutableDictionary.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		BF7D9B5C1519368C00B58218 /* AMutableDictionary.m in Sources */ = {isa = PBXBuildFile; fileRef = BF7D9B5A1519368C00B58218 /* AMutableDictionary.m */; };
+		BF7D9B5F151936B600B58218 /* DoubleKeyMap.h in Headers */ = {isa = PBXBuildFile; fileRef = BF7D9B5D151936B500B58218 /* DoubleKeyMap.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		BF7D9B60151936B600B58218 /* DoubleKeyMap.m in Sources */ = {isa = PBXBuildFile; fileRef = BF7D9B5E151936B600B58218 /* DoubleKeyMap.m */; };
+		BF7D9B63151936E700B58218 /* ANTLRFileStream.h in Headers */ = {isa = PBXBuildFile; fileRef = BF7D9B61151936E700B58218 /* ANTLRFileStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		BF7D9B64151936E700B58218 /* ANTLRFileStream.m in Sources */ = {isa = PBXBuildFile; fileRef = BF7D9B62151936E700B58218 /* ANTLRFileStream.m */; };
+		BF7D9B67151936FC00B58218 /* ANTLRInputStream.h in Headers */ = {isa = PBXBuildFile; fileRef = BF7D9B65151936FB00B58218 /* ANTLRInputStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		BF7D9B68151936FC00B58218 /* ANTLRInputStream.m in Sources */ = {isa = PBXBuildFile; fileRef = BF7D9B66151936FB00B58218 /* ANTLRInputStream.m */; };
+		BF7D9B6B1519371200B58218 /* ANTLRReaderStream.h in Headers */ = {isa = PBXBuildFile; fileRef = BF7D9B691519371100B58218 /* ANTLRReaderStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		BF7D9B6C1519371200B58218 /* ANTLRReaderStream.m in Sources */ = {isa = PBXBuildFile; fileRef = BF7D9B6A1519371200B58218 /* ANTLRReaderStream.m */; };
+		BF7D9B6F1519373700B58218 /* RewriteRuleNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = BF7D9B6D1519373600B58218 /* RewriteRuleNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		BF7D9B701519373700B58218 /* RewriteRuleNodeStream.m in Sources */ = {isa = PBXBuildFile; fileRef = BF7D9B6E1519373600B58218 /* RewriteRuleNodeStream.m */; };
+		BF7D9B731519375200B58218 /* ArrayIterator.h in Headers */ = {isa = PBXBuildFile; fileRef = BF7D9B711519375200B58218 /* ArrayIterator.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		BF7D9B741519375200B58218 /* ArrayIterator.m in Sources */ = {isa = PBXBuildFile; fileRef = BF7D9B721519375200B58218 /* ArrayIterator.m */; };
+		F7009ADB0A1BE4AE002EDD5D /* LexerRuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = F7009AD90A1BE4AE002EDD5D /* LexerRuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F700E61B0A5F66EC005D0757 /* EarlyExitException.h in Headers */ = {isa = PBXBuildFile; fileRef = F700E6190A5F66EC005D0757 /* EarlyExitException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F700E8FB0A5FAD21005D0757 /* Parser.h in Headers */ = {isa = PBXBuildFile; fileRef = F700E8F90A5FAD21005D0757 /* Parser.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F700EC670A5FDF0D005D0757 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		F700ECA40A5FDF1A005D0757 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
+		F700ECA50A5FDF1A005D0757 /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
+		F700ECD90A5FE19A005D0757 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
+		F700ECDA0A5FE19A005D0757 /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
+		F700ED7F0A5FF17C005D0757 /* TokenSource.h in Headers */ = {isa = PBXBuildFile; fileRef = F700ED7E0A5FF17C005D0757 /* TokenSource.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F700ED960A5FF2A5005D0757 /* CommonTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F700ED940A5FF2A5005D0757 /* CommonTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F7037CF00A0582FC0070435D /* MismatchedRangeException.h in Headers */ = {isa = PBXBuildFile; fileRef = F7037CEE0A0582FC0070435D /* MismatchedRangeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F7037EA60A05AFD70070435D /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		F70380BC0A07FA0D0070435D /* MismatchedSetException.h in Headers */ = {isa = PBXBuildFile; fileRef = F70380BA0A07FA0D0070435D /* MismatchedSetException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F7048FF80B07D05400D2F326 /* TestLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7048FF70B07D05400D2F326 /* TestLexer.m */; };
+		F7048FF90B07D05800D2F326 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F7E983940A0D6A5F00F16093 /* main.m */; };
+		F70AA7A609AA2A6900C3FD5E /* ANTLR.h in Headers */ = {isa = PBXBuildFile; fileRef = F70AA7A509AA2A6900C3FD5E /* ANTLR.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F70AA7AF09AA2AC000C3FD5E /* IntStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F70AA7AD09AA2AC000C3FD5E /* IntStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F70AA7C709AA339900C3FD5E /* ANTLRStringStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F70AA7C509AA339900C3FD5E /* ANTLRStringStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F70B11BD0C4C2B6400C3ECE0 /* RewriteRuleSubtreeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F70B11BB0C4C2B6400C3ECE0 /* RewriteRuleSubtreeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F70B11C30C4C2B7900C3ECE0 /* RewriteRuleTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F70B11C10C4C2B7900C3ECE0 /* RewriteRuleTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F71325880C4A060900B99F2D /* RewriteRuleElementStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F71325860C4A060900B99F2D /* RewriteRuleElementStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F72C58EA0AB3911D00282574 /* CommonTree.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C58E80AB3911D00282574 /* CommonTree.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F72C59A70AB4F20A00282574 /* CommonTreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C59A50AB4F20A00282574 /* CommonTreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F72C5B840AB52AD300282574 /* TreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C5B820AB52AD300282574 /* TreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F72C5D560AB63C1D00282574 /* TreeParser.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C5D540AB63C1D00282574 /* TreeParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F72C5D620AB63E0B00282574 /* MismatchedTreeNodeException.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C5D600AB63E0B00282574 /* MismatchedTreeNodeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F72C5E620AB7E4C900282574 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		F72C5E630AB7E4C900282574 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
+		F72C5E650AB7E4C900282574 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
+		F72C5ECE0AB7E5A500282574 /* ParserRuleReturnScope.h in Headers */ = {isa = PBXBuildFile; fileRef = F72C5ECC0AB7E5A500282574 /* ParserRuleReturnScope.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F738D1790B07AEAA001813C4 /* TreeException.h in Headers */ = {isa = PBXBuildFile; fileRef = F738D1750B07AEAA001813C4 /* TreeException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F738D1FC0B07B1BD001813C4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F7DD07800A7B67A7006A006C /* main.m */; };
+		F738D20D0B07B265001813C4 /* SymbolTableParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D2010B07B1CE001813C4 /* SymbolTableParser.m */; };
+		F738D20E0B07B266001813C4 /* SymbolTableLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D1FF0B07B1CE001813C4 /* SymbolTableLexer.m */; };
+		F738D2120B07B32D001813C4 /* T.g in Sources */ = {isa = PBXBuildFile; fileRef = F7DD05E40A7B14BE006A006C /* T.g */; };
+		F738D2220B07B39F001813C4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F7DD05E70A7B1572006A006C /* main.m */; };
+		F738D3190B07BDB7001813C4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F700ECE70A5FE25D005D0757 /* main.m */; };
+		F738D3610B07C105001813C4 /* CombinedLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D35E0B07C105001813C4 /* CombinedLexer.m */; };
+		F738D3620B07C105001813C4 /* CombinedParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D3600B07C105001813C4 /* CombinedParser.m */; };
+		F738D37E0B07C3BD001813C4 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F79D56C00A0E287500EA3CEE /* main.m */; };
+		F73E2B740A9CFE6A005D6267 /* Tree.h in Headers */ = {isa = PBXBuildFile; fileRef = F73E2B720A9CFE6A005D6267 /* Tree.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F73E2B7C0A9D0AFC005D6267 /* TreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = F73E2B7A0A9D0AFC005D6267 /* TreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F741D0830B381E720024DF3F /* SimpleCWalker.m in Sources */ = {isa = PBXBuildFile; fileRef = F741D0650B3812D40024DF3F /* SimpleCWalker.m */; };
+		F741D0840B381E730024DF3F /* SimpleCParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F7715D310AC9DE9E00ED984D /* SimpleCParser.m */; };
+		F741D08E0B381EA90024DF3F /* SimpleCLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7715D2F0AC9DE9E00ED984D /* SimpleCLexer.m */; };
+		F7492F5D09C016A200B25E30 /* BaseRecognizer.h in Headers */ = {isa = PBXBuildFile; fileRef = F7A4098B09659BF3002CC781 /* BaseRecognizer.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F76287130B714E77006AA7EF /* TLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7DD06E80A7B1700006A006C /* TLexer.m */; };
+		F76287140B714E78006AA7EF /* TParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D2240B07B3BC001813C4 /* TParser.m */; };
+		F76287150B714E82006AA7EF /* SimpleCParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D2510B07B842001813C4 /* SimpleCParser.m */; };
+		F76287160B714E83006AA7EF /* SimpleCLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F738D2810B07B9B6001813C4 /* SimpleCLexer.m */; };
+		F76287170B714EA9006AA7EF /* FuzzyLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F72B8D0B0AD01DCB0013F1E2 /* FuzzyLexer.m */; };
+		F763D4490A666D3D0061CD35 /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		F763D51E0A66765B0061CD35 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
+		F76AA9900CEA515A00AF044C /* CommonTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F76AA98E0CEA515A00AF044C /* CommonTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F7754E3F0A5C0A0500D0873A /* DFA.h in Headers */ = {isa = PBXBuildFile; fileRef = F7754E3D0A5C0A0500D0873A /* DFA.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F77744050B234A3400D1F89B /* Token+DebuggerSupport.h in Headers */ = {isa = PBXBuildFile; fileRef = F77744030B234A3400D1F89B /* Token+DebuggerSupport.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F77747560B23A70600D1F89B /* Debug.h in Headers */ = {isa = PBXBuildFile; fileRef = F77747550B23A70600D1F89B /* Debug.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F77765CC09DC583000517181 /* Token.h in Headers */ = {isa = PBXBuildFile; fileRef = F77765CA09DC583000517181 /* Token.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F777660509DC5CF400517181 /* CommonToken.h in Headers */ = {isa = PBXBuildFile; fileRef = F777660309DC5CF400517181 /* CommonToken.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F777668209DC719C00517181 /* MismatchedTokenException.h in Headers */ = {isa = PBXBuildFile; fileRef = F777668009DC719C00517181 /* MismatchedTokenException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F777669309DC72D600517181 /* RecognitionException.h in Headers */ = {isa = PBXBuildFile; fileRef = F777669109DC72D600517181 /* RecognitionException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F77766AF09DD53E800517181 /* TokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F77766AE09DD53E800517181 /* TokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F777678E09DD618000517181 /* Lexer.h in Headers */ = {isa = PBXBuildFile; fileRef = F7A4099109659BFB002CC781 /* Lexer.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F79D598B0A0E51AB00EA3CEE /* NoViableAltException.h in Headers */ = {isa = PBXBuildFile; fileRef = F79D59890A0E51AB00EA3CEE /* NoViableAltException.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F79D5AF60A0E634900EA3CEE /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		F79D5AF70A0E634A00EA3CEE /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
+		F79D5AF80A0E634A00EA3CEE /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
+		F7B1E5B00CD7CF1900CE136E /* RecognizerSharedState.h in Headers */ = {isa = PBXBuildFile; fileRef = F7B1E5AC0CD7CF1900CE136E /* RecognizerSharedState.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F7CD47650C64D24C00FF933A /* TreeRewriteLexer.m in Sources */ = {isa = PBXBuildFile; fileRef = F7CD46360C64BB7300FF933A /* TreeRewriteLexer.m */; };
+		F7CD47660C64D24D00FF933A /* TreeRewriteParser.m in Sources */ = {isa = PBXBuildFile; fileRef = F7CD46380C64BB7300FF933A /* TreeRewriteParser.m */; };
+		F7CD47670C64D24D00FF933A /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = F7CD45FC0C64BA4B00FF933A /* main.m */; };
+		F7CD48670C64D88800FF933A /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		F7CD48680C64D88800FF933A /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
+		F7CD486A0C64D88800FF933A /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
+		F7DD06040A7B1663006A006C /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
+		F7DD06070A7B1664006A006C /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
+		F7DD06300A7B1665006A006C /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
+		F7DD06C50A7B1691006A006C /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		F7DD074C0A7B6656006A006C /* ANTLR.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */; };
+		F7DD074D0A7B665C006A006C /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
+		F7DD074E0A7B665D006A006C /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
+		F7DD074F0A7B665D006A006C /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */; };
+		F7E261160B1E44320013F640 /* DebugParser.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E261140B1E44320013F640 /* DebugParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F7E2611A0B1E443D0013F640 /* DebugTreeParser.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E261180B1E443C0013F640 /* DebugTreeParser.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F7E261200B1E44E80013F640 /* DebugTokenStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E2611E0B1E44E80013F640 /* DebugTokenStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F7E261240B1E44FA0013F640 /* DebugTreeNodeStream.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E261220B1E44FA0013F640 /* DebugTreeNodeStream.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F7E261280B1E45070013F640 /* DebugTreeAdaptor.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E261260B1E45070013F640 /* DebugTreeAdaptor.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F7E261390B1E45580013F640 /* DebugEventSocketProxy.h in Headers */ = {isa = PBXBuildFile; fileRef = F7E261370B1E45580013F640 /* DebugEventSocketProxy.h */; settings = {ATTRIBUTES = (Public, ); }; };
+		F7E985580A0D865E00F16093 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
+		F7E985590A0D866000F16093 /* FuzzyLexer.h in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */; };
+		F7F4E9BA0A6E8B110092D087 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0867D69BFE84028FC02AAC07 /* Foundation.framework */; };
+/* End PBXBuildFile section */
+
+/* Begin PBXBuildRule section */
+		1A0F346112EA42D800496BB8 /* PBXBuildRule */ = {
+			isa = PBXBuildRule;
+			compilerSpec = com.apple.compilers.proxy.script;
+			filePatterns = .g.m;
+			fileType = pattern.proxy;
+			isEditable = 1;
+			outputFiles = (
+				$1.m,
+				$1.h,
+			);
+			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
+		};
+		1A994CF212A84FD3001853FF /* PBXBuildRule */ = {
+			isa = PBXBuildRule;
+			compilerSpec = com.apple.compilers.proxy.script;
+			filePatterns = .g.m;
+			fileType = pattern.proxy;
+			isEditable = 1;
+			outputFiles = (
+				$1.h,
+				$1.m,
+			);
+			script = "/usr/bin/java -jar antlr-3.3.1.jar $1.g$2";
+		};
+		1A994D3E12A858E1001853FF /* PBXBuildRule */ = {
+			isa = PBXBuildRule;
+			compilerSpec = com.apple.compilers.proxy.script;
+			filePatterns = .g.m;
+			fileType = pattern.proxy;
+			isEditable = 1;
+			outputFiles = (
+				$1.m,
+				$1.h,
+			);
+			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
+		};
+		1A994D4F12A85987001853FF /* PBXBuildRule */ = {
+			isa = PBXBuildRule;
+			compilerSpec = com.apple.compilers.proxy.script;
+			filePatterns = .g.m;
+			fileType = pattern.proxy;
+			isEditable = 1;
+			outputFiles = (
+				$1.h,
+				$1.m,
+			);
+			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
+		};
+		1A994D8512A85ABE001853FF /* PBXBuildRule */ = {
+			isa = PBXBuildRule;
+			compilerSpec = com.apple.compilers.proxy.script;
+			filePatterns = .g.m;
+			fileType = pattern.proxy;
+			isEditable = 1;
+			outputFiles = (
+				$1.h,
+				$1.m,
+			);
+			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1";
+		};
+		1A994DC612A85BFC001853FF /* PBXBuildRule */ = {
+			isa = PBXBuildRule;
+			compilerSpec = com.apple.compilers.proxy.script;
+			filePatterns = .g.m;
+			fileType = pattern.proxy;
+			isEditable = 1;
+			outputFiles = (
+				$1.h,
+				$1.m,
+			);
+			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
+		};
+		1A994DC712A85BFC001853FF /* PBXBuildRule */ = {
+			isa = PBXBuildRule;
+			compilerSpec = com.apple.compilers.proxy.script;
+			filePatterns = .g.m;
+			fileType = pattern.proxy;
+			isEditable = 1;
+			outputFiles = (
+				$1.h,
+				$1.m,
+			);
+			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
+		};
+		1A994DC812A85BFC001853FF /* PBXBuildRule */ = {
+			isa = PBXBuildRule;
+			compilerSpec = com.apple.compilers.proxy.script;
+			filePatterns = .g.m;
+			fileType = pattern.proxy;
+			isEditable = 1;
+			outputFiles = (
+				$1.h,
+				$1.m,
+			);
+			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
+		};
+		1A994DC912A85BFC001853FF /* PBXBuildRule */ = {
+			isa = PBXBuildRule;
+			compilerSpec = com.apple.compilers.proxy.script;
+			filePatterns = .g.m;
+			fileType = pattern.proxy;
+			isEditable = 1;
+			outputFiles = (
+				$1.h,
+				$1.m,
+			);
+			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
+		};
+		1AC5ACA212E7BEFE00DF0C58 /* PBXBuildRule */ = {
+			isa = PBXBuildRule;
+			compilerSpec = com.apple.compilers.proxy.script;
+			filePatterns = .g.m;
+			fileType = pattern.proxy;
+			isEditable = 1;
+			outputFiles = (
+				$1.m,
+				$1.h,
+			);
+			script = "/usr/bin/java -jar /Library/Java/Extensions/antlr-3.3.1.jar $1.g";
+		};
+/* End PBXBuildRule section */
+
+/* Begin PBXContainerItemProxy section */
+		1A0F347012EA43BA00496BB8 /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 1A0F343B12EA425700496BB8;
+			remoteInfo = "Regenerate polydiff";
+		};
+		1A0F347212EA43BA00496BB8 /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 1AC5AC9312E7BE0400DF0C58;
+			remoteInfo = "Regenerate treeparser";
+		};
+		1A0F347412EA43BA00496BB8 /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = F7CD47610C64D23800FF933A;
+			remoteInfo = "Regenerate treerewrite";
+		};
+		1A12134411D3FDA500F27B38 /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 1A348BEB11D2D0A1000C72FC;
+			remoteInfo = ANTLRBitsetTest;
+		};
+		1A12134611D3FDA500F27B38 /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 1A1210FA11D3A5D900F27B38;
+			remoteInfo = ANTLRCommonTokenTest;
+		};
+		1A12134811D3FDA500F27B38 /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 1A12130B11D3F7CD00F27B38;
+			remoteInfo = ANTLRCommonTreeTest;
+		};
+		1A12134A11D3FDA500F27B38 /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 1A12122311D3C92400F27B38;
+			remoteInfo = ANTLRFastQueueTest;
+		};
+		1A12134C11D3FDA500F27B38 /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 1A1212DB11D3F53600F27B38;
+			remoteInfo = ANTLRIntArrayTest;
+		};
+		1A12134E11D3FDA500F27B38 /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 1A1211D011D3BF4600F27B38;
+			remoteInfo = ANTLRStringStreamTest;
+		};
+		F762874B0B715417006AA7EF /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = F76287450B7151E3006AA7EF;
+			remoteInfo = "Regenerate fuzzy";
+		};
+		F76287A60B7157C2006AA7EF /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = F762877E0B71559C006AA7EF;
+			remoteInfo = "Regenerate combined";
+		};
+		F76287A80B7157C2006AA7EF /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = F76287820B71559F006AA7EF;
+			remoteInfo = "Regenerate LL-star";
+		};
+		F76287AA0B7157C2006AA7EF /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = F76287860B7155A2006AA7EF;
+			remoteInfo = "Regenerate hoistedPredicates";
+		};
+		F76287AC0B7157C2006AA7EF /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = F762878A0B7155AB006AA7EF;
+			remoteInfo = "Regenerate scopes";
+		};
+		F76287AE0B7157C2006AA7EF /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = F762878E0B7155AF006AA7EF;
+			remoteInfo = "Regenerate simplec tree";
+		};
+		F79EFB130C5845A300ABAB3D /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = F76287780B71557E006AA7EF;
+			remoteInfo = "Regenerate lexertest-simple";
+		};
+/* End PBXContainerItemProxy section */
+
+/* Begin PBXCopyFilesBuildPhase section */
+		F706A5710A0EC357008999AB /* CopyFiles */ = {
+			isa = PBXCopyFilesBuildPhase;
+			buildActionMask = 2147483647;
+			dstPath = "";
+			dstSubfolderSpec = 16;
+			files = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+/* End PBXCopyFilesBuildPhase section */
+
+/* Begin PBXFileReference section */
+		0867D69BFE84028FC02AAC07 /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = /System/Library/Frameworks/Foundation.framework; sourceTree = "<absolute>"; };
+		089C1667FE841158C02AAC07 /* English */ = {isa = PBXFileReference; fileEncoding = 10; lastKnownFileType = text.plist.strings; name = English; path = English.lproj/InfoPlist.strings; sourceTree = "<group>"; };
+		1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = FuzzyLexer.h; path = /System/Library/Frameworks/Cocoa.framework; sourceTree = "<absolute>"; };
+		1A0F342D12EA411F00496BB8 /* files */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = files; sourceTree = "<group>"; };
+		1A0F342E12EA411F00496BB8 /* input */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
+		1A0F343012EA411F00496BB8 /* Main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Main.m; sourceTree = "<group>"; };
+		1A0F343112EA411F00496BB8 /* output */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
+		1A0F343212EA411F00496BB8 /* Poly.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Poly.g; sourceTree = "<group>"; };
+		1A0F343312EA411F00496BB8 /* PolyDifferentiator.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = PolyDifferentiator.g; sourceTree = "<group>"; };
+		1A0F343412EA411F00496BB8 /* PolyPrinter.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = PolyPrinter.g; sourceTree = "<group>"; };
+		1A0F343512EA411F00496BB8 /* Simplifier.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Simplifier.g; sourceTree = "<group>"; };
+		1A0F346612EA42D800496BB8 /* polydiff */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = polydiff; sourceTree = BUILT_PRODUCTS_DIR; };
+		1A0F347812EA444500496BB8 /* Poly.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Poly.tokens; sourceTree = "<group>"; };
+		1A0F347912EA444500496BB8 /* PolyDifferentiator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = PolyDifferentiator.m; sourceTree = "<group>"; };
+		1A0F347A12EA444500496BB8 /* PolyLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PolyLexer.h; sourceTree = "<group>"; };
+		1A0F347B12EA444500496BB8 /* PolyLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = PolyLexer.m; sourceTree = "<group>"; };
+		1A0F347C12EA444500496BB8 /* PolyParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PolyParser.h; sourceTree = "<group>"; };
+		1A0F347D12EA444500496BB8 /* PolyParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = PolyParser.m; sourceTree = "<group>"; };
+		1A0F347E12EA444500496BB8 /* Simplifier.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Simplifier.h; sourceTree = "<group>"; };
+		1A0F347F12EA444500496BB8 /* Simplifier.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Simplifier.m; sourceTree = "<group>"; };
+		1A0F348012EA444500496BB8 /* Simplifier.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Simplifier.tokens; sourceTree = "<group>"; };
+		1A100AB911E604FE006ABF94 /* HashRule.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HashRule.h; sourceTree = "<group>"; };
+		1A100ABA11E604FE006ABF94 /* HashRule.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = HashRule.m; sourceTree = "<group>"; };
+		1A1210FB11D3A5D900F27B38 /* ANTLRCommonTokenTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRCommonTokenTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
+		1A1210FC11D3A5DA00F27B38 /* ANTLRCommonTokenTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRCommonTokenTest-Info.plist"; sourceTree = "<group>"; };
+		1A1211D111D3BF4700F27B38 /* ANTLRStringStreamTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRStringStreamTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
+		1A1211D211D3BF4700F27B38 /* ANTLRStringStreamTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRStringStreamTest-Info.plist"; sourceTree = "<group>"; };
+		1A12122411D3C92400F27B38 /* ANTLRFastQueueTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRFastQueueTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
+		1A12122511D3C92400F27B38 /* ANTLRFastQueueTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRFastQueueTest-Info.plist"; sourceTree = "<group>"; };
+		1A12122A11D3C93500F27B38 /* ANTLR.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = ANTLR.framework; path = Library/Frameworks/ANTLR.framework; sourceTree = SDKROOT; };
+		1A1212DC11D3F53600F27B38 /* ANTLRIntArrayTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRIntArrayTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
+		1A1212DD11D3F53600F27B38 /* ANTLRIntArrayTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRIntArrayTest-Info.plist"; sourceTree = "<group>"; };
+		1A12130C11D3F7CD00F27B38 /* ANTLRCommonTreeTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRCommonTreeTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
+		1A12130D11D3F7CD00F27B38 /* ANTLRCommonTreeTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRCommonTreeTest-Info.plist"; sourceTree = "<group>"; };
+		1A16B13A11C66492002860C7 /* LinkBase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LinkBase.h; sourceTree = "<group>"; };
+		1A16B13B11C66492002860C7 /* LinkBase.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = LinkBase.m; sourceTree = "<group>"; };
+		1A1702FC11C05D4800F6978A /* HashMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HashMap.h; sourceTree = "<group>"; };
+		1A1702FD11C05D4800F6978A /* HashMap.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = HashMap.m; sourceTree = "<group>"; };
+		1A18EF5511B8028D0006186A /* BufferedTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BufferedTokenStream.h; sourceTree = "<group>"; };
+		1A18EF5611B8028D0006186A /* BufferedTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = BufferedTokenStream.m; sourceTree = "<group>"; };
+		1A1BCDB911CB01E60051A1EC /* RuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RuleReturnScope.h; sourceTree = "<group>"; };
+		1A1BCDBA11CB01E60051A1EC /* RuleReturnScope.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RuleReturnScope.m; sourceTree = "<group>"; };
+		1A1BCDCD11CB0B3D0051A1EC /* TreeRuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeRuleReturnScope.h; sourceTree = "<group>"; };
+		1A1BCDCE11CB0B3D0051A1EC /* TreeRuleReturnScope.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeRuleReturnScope.m; sourceTree = "<group>"; };
+		1A1BCE2811CB1A3E0051A1EC /* TreeRewriter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeRewriter.h; sourceTree = "<group>"; };
+		1A1BCE2911CB1A3E0051A1EC /* TreeRewriter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeRewriter.m; sourceTree = "<group>"; };
+		1A1CCC9011B6FD39002E5F53 /* FastQueue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FastQueue.h; sourceTree = "<group>"; };
+		1A1CCC9111B6FD39002E5F53 /* FastQueue.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = FastQueue.m; sourceTree = "<group>"; };
+		1A1CCC9211B6FD39002E5F53 /* IntArray.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IntArray.h; sourceTree = "<group>"; };
+		1A1CCC9311B6FD39002E5F53 /* IntArray.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = IntArray.m; sourceTree = "<group>"; };
+		1A1CCCA911B724B2002E5F53 /* LookaheadStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LookaheadStream.h; sourceTree = "<group>"; };
+		1A1CCCAA11B724B2002E5F53 /* LookaheadStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = LookaheadStream.m; sourceTree = "<group>"; };
+		1A1CCCC711B727B5002E5F53 /* ANTLRError.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRError.h; sourceTree = "<group>"; };
+		1A1D465911BE73B2001575F3 /* BaseTreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BaseTreeAdaptor.h; sourceTree = "<group>"; };
+		1A1D465A11BE73B2001575F3 /* BaseTreeAdaptor.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = BaseTreeAdaptor.m; sourceTree = "<group>"; };
+		1A1D466E11BE75C0001575F3 /* MapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MapElement.h; sourceTree = "<group>"; };
+		1A1D466F11BE75C0001575F3 /* MapElement.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MapElement.m; sourceTree = "<group>"; };
+		1A1D467A11BE8E5A001575F3 /* CommonErrorNode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonErrorNode.h; sourceTree = "<group>"; };
+		1A1D467B11BE8E5A001575F3 /* CommonErrorNode.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CommonErrorNode.m; sourceTree = "<group>"; };
+		1A1FFC5911CD12A400FBB452 /* TokenRewriteStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TokenRewriteStream.h; sourceTree = "<group>"; };
+		1A1FFC5A11CD12A400FBB452 /* TokenRewriteStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TokenRewriteStream.m; sourceTree = "<group>"; };
+		1A26329311C53578000DCDD4 /* MismatchedNotSetException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MismatchedNotSetException.h; sourceTree = "<group>"; };
+		1A26329411C53578000DCDD4 /* MismatchedNotSetException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MismatchedNotSetException.m; sourceTree = "<group>"; };
+		1A270BF711C1451200DCC8F3 /* TreeIterator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeIterator.h; sourceTree = "<group>"; };
+		1A270BF811C1451200DCC8F3 /* TreeIterator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeIterator.m; sourceTree = "<group>"; };
+		1A2B096312E797DE00A75133 /* TestRewriteRuleTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = TestRewriteRuleTokenStream.m; path = test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.m; sourceTree = "<group>"; };
+		1A2B096612E797F600A75133 /* TestRewriteRuleTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = TestRewriteRuleTokenStream.h; path = test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.h; sourceTree = "<group>"; };
+		1A2D217311E4F57C00DFE328 /* UniqueIDMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UniqueIDMap.h; sourceTree = "<group>"; };
+		1A2D217411E4F57C00DFE328 /* UniqueIDMap.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = UniqueIDMap.m; sourceTree = "<group>"; };
+		1A2D218411E502DE00DFE328 /* NodeMapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NodeMapElement.h; sourceTree = "<group>"; };
+		1A2D218511E502DE00DFE328 /* NodeMapElement.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = NodeMapElement.m; sourceTree = "<group>"; };
+		1A348B1711D2BE4F000C72FC /* FastQueueTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FastQueueTest.h; sourceTree = "<group>"; };
+		1A348B1811D2BE4F000C72FC /* FastQueueTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = FastQueueTest.m; sourceTree = "<group>"; };
+		1A348B1911D2BE4F000C72FC /* IntArrayTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IntArrayTest.h; sourceTree = "<group>"; };
+		1A348B1A11D2BE4F000C72FC /* IntArrayTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = IntArrayTest.m; sourceTree = "<group>"; };
+		1A348B1C11D2BE4F000C72FC /* ANTLRBitSetTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRBitSetTest.h; sourceTree = "<group>"; };
+		1A348B1D11D2BE4F000C72FC /* ANTLRBitSetTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRBitSetTest.m; sourceTree = "<group>"; };
+		1A348B1F11D2BE4F000C72FC /* ANTLRStringStreamTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRStringStreamTest.h; sourceTree = "<group>"; };
+		1A348B2011D2BE4F000C72FC /* ANTLRStringStreamTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRStringStreamTest.m; sourceTree = "<group>"; };
+		1A348B2211D2BE4F000C72FC /* CommonTokenTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonTokenTest.h; sourceTree = "<group>"; };
+		1A348B2311D2BE4F000C72FC /* CommonTokenTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CommonTokenTest.m; sourceTree = "<group>"; };
+		1A348B2511D2BE4F000C72FC /* CommonTreeTest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonTreeTest.h; sourceTree = "<group>"; };
+		1A348B2611D2BE4F000C72FC /* CommonTreeTest.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CommonTreeTest.m; sourceTree = "<group>"; };
+		1A348B4E11D2BEE8000C72FC /* Test.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = Test.octest; sourceTree = BUILT_PRODUCTS_DIR; };
+		1A348B4F11D2BEE8000C72FC /* Test-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "Test-Info.plist"; sourceTree = "<group>"; };
+		1A348BB511D2C711000C72FC /* Cocoa.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Cocoa.framework; path = System/Library/Frameworks/Cocoa.framework; sourceTree = SDKROOT; };
+		1A348BB711D2C711000C72FC /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = System/Library/Frameworks/CoreFoundation.framework; sourceTree = SDKROOT; };
+		1A348BEC11D2D0A1000C72FC /* ANTLRBitsetTest.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = ANTLRBitsetTest.octest; sourceTree = BUILT_PRODUCTS_DIR; };
+		1A348BED11D2D0A1000C72FC /* ANTLRBitsetTest-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "ANTLRBitsetTest-Info.plist"; sourceTree = "<group>"; };
+		1A3A08E411E213C500D5EE26 /* BaseStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BaseStack.h; sourceTree = "<group>"; };
+		1A3A08E511E213C500D5EE26 /* BaseStack.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = BaseStack.m; sourceTree = "<group>"; };
+		1A3A08E811E213E100D5EE26 /* SymbolStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SymbolStack.h; sourceTree = "<group>"; };
+		1A3A08E911E213E100D5EE26 /* SymbolStack.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SymbolStack.m; sourceTree = "<group>"; };
+		1A3A09BD11E235BD00D5EE26 /* antlr3.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = antlr3.h; sourceTree = "<group>"; };
+		1A45657511C922BE0082F421 /* RuleMemo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RuleMemo.h; sourceTree = "<group>"; };
+		1A45657611C922BE0082F421 /* RuleMemo.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RuleMemo.m; sourceTree = "<group>"; };
+		1A45658711C9270D0082F421 /* BaseMapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BaseMapElement.h; sourceTree = "<group>"; };
+		1A45658811C9270D0082F421 /* BaseMapElement.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = BaseMapElement.m; sourceTree = "<group>"; };
+		1A4A851011CBCE3E00E4BF1B /* TreeVisitor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeVisitor.h; sourceTree = "<group>"; };
+		1A4A851111CBCE3E00E4BF1B /* TreeVisitor.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeVisitor.m; sourceTree = "<group>"; };
+		1A4A851611CBCE5500E4BF1B /* TreeVisitorAction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeVisitorAction.h; sourceTree = "<group>"; };
+		1A4A851711CBCE5500E4BF1B /* TreeVisitorAction.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeVisitorAction.m; sourceTree = "<group>"; };
+		1A4A851C11CBCF3700E4BF1B /* TreeWizard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeWizard.h; sourceTree = "<group>"; };
+		1A4A851D11CBCF3700E4BF1B /* TreeWizard.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeWizard.m; sourceTree = "<group>"; };
+		1A4D5AD411B55A45001C9482 /* BaseTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BaseTree.h; sourceTree = "<group>"; };
+		1A4D5AD511B55A45001C9482 /* BaseTree.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = BaseTree.m; sourceTree = "<group>"; };
+		1A5EA50911CFE7CE00E8932F /* Map.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Map.h; sourceTree = "<group>"; };
+		1A5EA50A11CFE7CE00E8932F /* Map.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Map.m; sourceTree = "<group>"; };
+		1A65B7D611B9532A00FD8754 /* BufferedTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BufferedTreeNodeStream.h; sourceTree = "<group>"; };
+		1A65B7D711B9532A00FD8754 /* BufferedTreeNodeStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = BufferedTreeNodeStream.m; sourceTree = "<group>"; };
+		1A6C451411BF4EE00039788A /* MissingTokenException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MissingTokenException.h; sourceTree = "<group>"; };
+		1A6C451511BF4EE00039788A /* MissingTokenException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MissingTokenException.m; sourceTree = "<group>"; };
+		1A6C452611BF50A40039788A /* UnwantedTokenException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnwantedTokenException.h; sourceTree = "<group>"; };
+		1A6C452711BF50A40039788A /* UnwantedTokenException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = UnwantedTokenException.m; sourceTree = "<group>"; };
+		1A77EE8912E6A552007F323A /* TreeRewriteRuleTokenStream.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = TreeRewriteRuleTokenStream.octest; sourceTree = BUILT_PRODUCTS_DIR; };
+		1A77EE8A12E6A552007F323A /* TreeRewriteRuleTokenStream-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "TreeRewriteRuleTokenStream-Info.plist"; sourceTree = "<group>"; };
+		1A86B91911EB9F6300C67A03 /* ParseTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ParseTree.h; sourceTree = "<group>"; };
+		1A86B91A11EB9F6300C67A03 /* ParseTree.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ParseTree.m; sourceTree = "<group>"; };
+		1A86BACD11EC1CD000C67A03 /* UnbufferedTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnbufferedTokenStream.h; sourceTree = "<group>"; };
+		1A86BACE11EC1CD000C67A03 /* UnbufferedTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = UnbufferedTokenStream.m; sourceTree = "<group>"; };
+		1A8ABFC511BA9B960038DBB0 /* CharStreamState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CharStreamState.h; sourceTree = "<group>"; };
+		1A8AC00A11BAEC710038DBB0 /* RuntimeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RuntimeException.h; sourceTree = "<group>"; };
+		1A8AC00B11BAEC710038DBB0 /* RuntimeException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RuntimeException.m; sourceTree = "<group>"; };
+		1A994CE412A84F3E001853FF /* SimpleC__.gl */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleC__.gl; sourceTree = "<group>"; };
+		1A9CBD2411C9979600DA8FEF /* UnbufferedCommonTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnbufferedCommonTreeNodeStream.h; sourceTree = "<group>"; };
+		1A9CBD2511C9979600DA8FEF /* UnbufferedCommonTreeNodeStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = UnbufferedCommonTreeNodeStream.m; sourceTree = "<group>"; };
+		1A9CBD2611C9979600DA8FEF /* UnbufferedCommonTreeNodeStreamState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnbufferedCommonTreeNodeStreamState.h; sourceTree = "<group>"; };
+		1A9CBD2711C9979600DA8FEF /* UnbufferedCommonTreeNodeStreamState.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = UnbufferedCommonTreeNodeStreamState.m; sourceTree = "<group>"; };
+		1AAC202A11CC621A00CF56D1 /* TreePatternLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreePatternLexer.h; sourceTree = "<group>"; };
+		1AAC202B11CC621A00CF56D1 /* TreePatternLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreePatternLexer.m; sourceTree = "<group>"; };
+		1AAC20A311CC790300CF56D1 /* TreePatternParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreePatternParser.h; sourceTree = "<group>"; };
+		1AAC20A411CC790300CF56D1 /* TreePatternParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreePatternParser.m; sourceTree = "<group>"; };
+		1AB4A58F11B9A0DA0076E91A /* StreamEnumerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StreamEnumerator.h; sourceTree = "<group>"; };
+		1AB4A59011B9A0DA0076E91A /* StreamEnumerator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = StreamEnumerator.m; sourceTree = "<group>"; };
+		1AB5F47511E3869D00E065B0 /* RuleMapElement.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RuleMapElement.h; sourceTree = "<group>"; };
+		1AB5F47611E3869D00E065B0 /* RuleMapElement.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RuleMapElement.m; sourceTree = "<group>"; };
+		1AB5F51C11E3BE2E00E065B0 /* PtrBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PtrBuffer.h; sourceTree = "<group>"; };
+		1AB5F51D11E3BE2E00E065B0 /* PtrBuffer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = PtrBuffer.m; sourceTree = "<group>"; };
+		1AC5AC7212E7BBB600DF0C58 /* files */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = files; path = treeparser/files; sourceTree = "<group>"; };
+		1AC5AC7312E7BBB600DF0C58 /* input */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = input; path = treeparser/input; sourceTree = "<group>"; };
+		1AC5AC7412E7BBB600DF0C58 /* Lang.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = Lang.g; path = treeparser/Lang.g; sourceTree = "<group>"; };
+		1AC5AC7512E7BBB600DF0C58 /* LangDumpDecl.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = LangDumpDecl.g; path = treeparser/LangDumpDecl.g; sourceTree = "<group>"; };
+		1AC5AC7712E7BBB600DF0C58 /* output */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = output; path = treeparser/output; sourceTree = "<group>"; };
+		1AC5AC7812E7BBB600DF0C58 /* README.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = README.txt; path = treeparser/README.txt; sourceTree = "<group>"; };
+		1AC5AC8112E7BC9100DF0C58 /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = main.m; path = treeparser/main.m; sourceTree = "<group>"; };
+		1AC5ACA712E7BEFE00DF0C58 /* treeparser */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = treeparser; sourceTree = BUILT_PRODUCTS_DIR; };
+		1AC5ACC412E7C03C00DF0C58 /* Lang.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = Lang.tokens; path = treeparser/Lang.tokens; sourceTree = "<group>"; };
+		1AC5ACC512E7C03C00DF0C58 /* LangDumpDecl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LangDumpDecl.h; path = treeparser/LangDumpDecl.h; sourceTree = "<group>"; };
+		1AC5ACC612E7C03C00DF0C58 /* LangDumpDecl.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = LangDumpDecl.m; path = treeparser/LangDumpDecl.m; sourceTree = "<group>"; };
+		1AC5ACC712E7C03C00DF0C58 /* LangDumpDecl.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = LangDumpDecl.tokens; path = treeparser/LangDumpDecl.tokens; sourceTree = "<group>"; };
+		1AC5ACC812E7C03C00DF0C58 /* LangLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LangLexer.h; path = treeparser/LangLexer.h; sourceTree = "<group>"; };
+		1AC5ACC912E7C03C00DF0C58 /* LangLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = LangLexer.m; path = treeparser/LangLexer.m; sourceTree = "<group>"; };
+		1AC5ACCA12E7C03C00DF0C58 /* LangParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LangParser.h; path = treeparser/LangParser.h; sourceTree = "<group>"; };
+		1AC5ACCB12E7C03C00DF0C58 /* LangParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = LangParser.m; path = treeparser/LangParser.m; sourceTree = "<group>"; };
+		1ADB66F012E74341007C1661 /* FuzzyLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FuzzyLexer.h; sourceTree = "<group>"; };
+		1ADE21F012E505D700E8A95C /* SimpleC.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleC.g; sourceTree = "<group>"; };
+		1AE8A96A11D9227A00D36FD6 /* RuleStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RuleStack.h; sourceTree = "<group>"; };
+		1AE8A96B11D9227A00D36FD6 /* RuleStack.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RuleStack.m; sourceTree = "<group>"; };
+		32DBCF5E0370ADEE00C91783 /* ANTLR_Prefix.pch */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLR_Prefix.pch; sourceTree = "<group>"; };
+		8DC2EF5A0486A6940098B216 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist; path = Info.plist; sourceTree = "<group>"; };
+		8DC2EF5B0486A6940098B216 /* ANTLR.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = ANTLR.framework; sourceTree = BUILT_PRODUCTS_DIR; };
+		BF7D9B511519363200B58218 /* ACBTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ACBTree.h; sourceTree = "<group>"; };
+		BF7D9B521519363200B58218 /* ACBTree.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ACBTree.m; sourceTree = "<group>"; };
+		BF7D9B551519367800B58218 /* AMutableArray.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AMutableArray.h; sourceTree = "<group>"; };
+		BF7D9B561519367800B58218 /* AMutableArray.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = AMutableArray.m; sourceTree = "<group>"; };
+		BF7D9B591519368C00B58218 /* AMutableDictionary.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AMutableDictionary.h; sourceTree = "<group>"; };
+		BF7D9B5A1519368C00B58218 /* AMutableDictionary.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = AMutableDictionary.m; sourceTree = "<group>"; };
+		BF7D9B5D151936B500B58218 /* DoubleKeyMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DoubleKeyMap.h; sourceTree = "<group>"; };
+		BF7D9B5E151936B600B58218 /* DoubleKeyMap.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = DoubleKeyMap.m; sourceTree = "<group>"; };
+		BF7D9B61151936E700B58218 /* ANTLRFileStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRFileStream.h; sourceTree = "<group>"; };
+		BF7D9B62151936E700B58218 /* ANTLRFileStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRFileStream.m; sourceTree = "<group>"; };
+		BF7D9B65151936FB00B58218 /* ANTLRInputStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRInputStream.h; sourceTree = "<group>"; };
+		BF7D9B66151936FB00B58218 /* ANTLRInputStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRInputStream.m; sourceTree = "<group>"; };
+		BF7D9B691519371100B58218 /* ANTLRReaderStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRReaderStream.h; sourceTree = "<group>"; };
+		BF7D9B6A1519371200B58218 /* ANTLRReaderStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRReaderStream.m; sourceTree = "<group>"; };
+		BF7D9B6D1519373600B58218 /* RewriteRuleNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RewriteRuleNodeStream.h; sourceTree = "<group>"; };
+		BF7D9B6E1519373600B58218 /* RewriteRuleNodeStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RewriteRuleNodeStream.m; sourceTree = "<group>"; };
+		BF7D9B711519375200B58218 /* ArrayIterator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ArrayIterator.h; sourceTree = "<group>"; };
+		BF7D9B721519375200B58218 /* ArrayIterator.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ArrayIterator.m; sourceTree = "<group>"; };
+		F7009AD90A1BE4AE002EDD5D /* LexerRuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LexerRuleReturnScope.h; sourceTree = "<group>"; };
+		F7009ADA0A1BE4AE002EDD5D /* LexerRuleReturnScope.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = LexerRuleReturnScope.m; sourceTree = "<group>"; };
+		F700E6190A5F66EC005D0757 /* EarlyExitException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = EarlyExitException.h; sourceTree = "<group>"; };
+		F700E61A0A5F66EC005D0757 /* EarlyExitException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = EarlyExitException.m; sourceTree = "<group>"; };
+		F700E85E0A5FA2DE005D0757 /* Combined.g */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; name = Combined.g; path = combined/Combined.g; sourceTree = "<group>"; };
+		F700E8640A5FA31D005D0757 /* combined */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = combined; sourceTree = BUILT_PRODUCTS_DIR; };
+		F700E86A0A5FA34D005D0757 /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = main.m; path = combined/main.m; sourceTree = "<group>"; };
+		F700E8F90A5FAD21005D0757 /* Parser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Parser.h; sourceTree = "<group>"; };
+		F700E8FA0A5FAD21005D0757 /* Parser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Parser.m; sourceTree = "<group>"; };
+		F700ECCF0A5FE176005D0757 /* input */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
+		F700ECD00A5FE176005D0757 /* output */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
+		F700ECD70A5FE186005D0757 /* LL-star */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "LL-star"; sourceTree = BUILT_PRODUCTS_DIR; };
+		F700ECE70A5FE25D005D0757 /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
+		F700ED7E0A5FF17C005D0757 /* TokenSource.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TokenSource.h; sourceTree = "<group>"; };
+		F700ED940A5FF2A5005D0757 /* CommonTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonTokenStream.h; sourceTree = "<group>"; };
+		F700ED950A5FF2A5005D0757 /* CommonTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CommonTokenStream.m; sourceTree = "<group>"; };
+		F7037CEE0A0582FC0070435D /* MismatchedRangeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MismatchedRangeException.h; sourceTree = "<group>"; };
+		F7037CEF0A0582FC0070435D /* MismatchedRangeException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MismatchedRangeException.m; sourceTree = "<group>"; };
+		F7037EA00A05AFB60070435D /* lexertest-simple */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "lexertest-simple"; sourceTree = BUILT_PRODUCTS_DIR; };
+		F7037EBD0A05B06B0070435D /* TestLexer.g */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = TestLexer.g; sourceTree = "<group>"; };
+		F70380BA0A07FA0D0070435D /* MismatchedSetException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MismatchedSetException.h; sourceTree = "<group>"; };
+		F70380BB0A07FA0D0070435D /* MismatchedSetException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MismatchedSetException.m; sourceTree = "<group>"; };
+		F7048FF50B07D05400D2F326 /* Test.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Test.tokens; sourceTree = "<group>"; };
+		F7048FF60B07D05400D2F326 /* TestLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TestLexer.h; sourceTree = "<group>"; };
+		F7048FF70B07D05400D2F326 /* TestLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TestLexer.m; sourceTree = "<group>"; };
+		F706A55B0A0EC307008999AB /* input */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
+		F70AA7A509AA2A6900C3FD5E /* ANTLR.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLR.h; sourceTree = "<group>"; };
+		F70AA7AD09AA2AC000C3FD5E /* IntStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IntStream.h; sourceTree = "<group>"; };
+		F70AA7B509AA2B8800C3FD5E /* CharStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CharStream.h; sourceTree = "<group>"; };
+		F70AA7C509AA339900C3FD5E /* ANTLRStringStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRStringStream.h; sourceTree = "<group>"; };
+		F70AA7C609AA339900C3FD5E /* ANTLRStringStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRStringStream.m; sourceTree = "<group>"; };
+		F70AA7CE09AA379300C3FD5E /* CharStreamState.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CharStreamState.m; sourceTree = "<group>"; };
+		F70B11BB0C4C2B6400C3ECE0 /* RewriteRuleSubtreeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RewriteRuleSubtreeStream.h; sourceTree = "<group>"; };
+		F70B11BC0C4C2B6400C3ECE0 /* RewriteRuleSubtreeStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RewriteRuleSubtreeStream.m; sourceTree = "<group>"; };
+		F70B11C10C4C2B7900C3ECE0 /* RewriteRuleTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RewriteRuleTokenStream.h; sourceTree = "<group>"; };
+		F70B11C20C4C2B7900C3ECE0 /* RewriteRuleTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RewriteRuleTokenStream.m; sourceTree = "<group>"; };
+		F70BB390098E5BB80054FEF8 /* SenTestingKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = SenTestingKit.framework; path = Library/Frameworks/SenTestingKit.framework; sourceTree = DEVELOPER_DIR; };
+		F71325860C4A060900B99F2D /* RewriteRuleElementStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RewriteRuleElementStream.h; sourceTree = "<group>"; };
+		F71325870C4A060900B99F2D /* RewriteRuleElementStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RewriteRuleElementStream.m; sourceTree = "<group>"; };
+		F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = /System/Library/Frameworks/CoreFoundation.framework; sourceTree = "<absolute>"; };
+		F72B8CFA0AD01D380013F1E2 /* Fuzzy.g */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = Fuzzy.g; sourceTree = "<group>"; };
+		F72B8D090AD01DCB0013F1E2 /* Fuzzy.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Fuzzy.tokens; sourceTree = "<group>"; };
+		F72B8D0B0AD01DCB0013F1E2 /* FuzzyLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = FuzzyLexer.m; sourceTree = "<group>"; };
+		F72C58E80AB3911D00282574 /* CommonTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonTree.h; sourceTree = "<group>"; };
+		F72C58E90AB3911D00282574 /* CommonTree.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CommonTree.m; sourceTree = "<group>"; };
+		F72C59A50AB4F20A00282574 /* CommonTreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonTreeAdaptor.h; sourceTree = "<group>"; };
+		F72C59A60AB4F20A00282574 /* CommonTreeAdaptor.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CommonTreeAdaptor.m; sourceTree = "<group>"; };
+		F72C5B820AB52AD300282574 /* TreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeNodeStream.h; sourceTree = "<group>"; };
+		F72C5D540AB63C1D00282574 /* TreeParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeParser.h; sourceTree = "<group>"; };
+		F72C5D550AB63C1D00282574 /* TreeParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeParser.m; sourceTree = "<group>"; };
+		F72C5D600AB63E0B00282574 /* MismatchedTreeNodeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MismatchedTreeNodeException.h; sourceTree = "<group>"; };
+		F72C5D610AB63E0B00282574 /* MismatchedTreeNodeException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MismatchedTreeNodeException.m; sourceTree = "<group>"; };
+		F72C5E2F0AB7529C00282574 /* input */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
+		F72C5E310AB7529C00282574 /* output */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
+		F72C5E560AB7E41000282574 /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
+		F72C5E690AB7E4C900282574 /* simplectree */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = simplectree; sourceTree = BUILT_PRODUCTS_DIR; };
+		F72C5ECC0AB7E5A500282574 /* ParserRuleReturnScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ParserRuleReturnScope.h; sourceTree = "<group>"; };
+		F72C5ECD0AB7E5A500282574 /* ParserRuleReturnScope.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ParserRuleReturnScope.m; sourceTree = "<group>"; };
+		F738D1730B07AEAA001813C4 /* FailedPredicateException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FailedPredicateException.h; sourceTree = "<group>"; };
+		F738D1740B07AEAA001813C4 /* FailedPredicateException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = FailedPredicateException.m; sourceTree = "<group>"; };
+		F738D1750B07AEAA001813C4 /* TreeException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeException.h; sourceTree = "<group>"; };
+		F738D1760B07AEAA001813C4 /* TreeException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeException.m; sourceTree = "<group>"; };
+		F738D1FD0B07B1CE001813C4 /* SymbolTable.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SymbolTable.tokens; sourceTree = "<group>"; };
+		F738D1FE0B07B1CE001813C4 /* SymbolTableLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SymbolTableLexer.h; sourceTree = "<group>"; };
+		F738D1FF0B07B1CE001813C4 /* SymbolTableLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SymbolTableLexer.m; sourceTree = "<group>"; };
+		F738D2000B07B1CE001813C4 /* SymbolTableParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SymbolTableParser.h; sourceTree = "<group>"; };
+		F738D2010B07B1CE001813C4 /* SymbolTableParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SymbolTableParser.m; sourceTree = "<group>"; };
+		F738D2230B07B3BC001813C4 /* TParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TParser.h; sourceTree = "<group>"; };
+		F738D2240B07B3BC001813C4 /* TParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TParser.m; sourceTree = "<group>"; };
+		F738D2510B07B842001813C4 /* SimpleCParser.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = SimpleCParser.m; sourceTree = "<group>"; };
+		F738D27F0B07B9B6001813C4 /* SimpleC.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleC.tokens; sourceTree = "<group>"; };
+		F738D2800B07B9B6001813C4 /* SimpleCLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SimpleCLexer.h; sourceTree = "<group>"; };
+		F738D2810B07B9B6001813C4 /* SimpleCLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SimpleCLexer.m; sourceTree = "<group>"; };
+		F738D2820B07B9B6001813C4 /* SimpleCParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SimpleCParser.h; sourceTree = "<group>"; };
+		F738D35C0B07C105001813C4 /* Combined.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = Combined.tokens; path = combined/Combined.tokens; sourceTree = "<group>"; };
+		F738D35D0B07C105001813C4 /* CombinedLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = CombinedLexer.h; path = combined/CombinedLexer.h; sourceTree = "<group>"; };
+		F738D35E0B07C105001813C4 /* CombinedLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = CombinedLexer.m; path = combined/CombinedLexer.m; sourceTree = "<group>"; };
+		F738D35F0B07C105001813C4 /* CombinedParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = CombinedParser.h; path = combined/CombinedParser.h; sourceTree = "<group>"; };
+		F738D3600B07C105001813C4 /* CombinedParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = CombinedParser.m; path = combined/CombinedParser.m; sourceTree = "<group>"; };
+		F73E2B720A9CFE6A005D6267 /* Tree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Tree.h; sourceTree = "<group>"; };
+		F73E2B7A0A9D0AFC005D6267 /* TreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeAdaptor.h; sourceTree = "<group>"; };
+		F741D0640B3812D40024DF3F /* SimpleCWalker.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SimpleCWalker.h; sourceTree = "<group>"; };
+		F741D0650B3812D40024DF3F /* SimpleCWalker.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SimpleCWalker.m; sourceTree = "<group>"; };
+		F762879C0B71578D006AA7EF /* README.rtf */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.rtf; path = README.rtf; sourceTree = "<group>"; };
+		F76AA98E0CEA515A00AF044C /* CommonTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonTreeNodeStream.h; sourceTree = "<group>"; };
+		F76AA98F0CEA515A00AF044C /* CommonTreeNodeStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CommonTreeNodeStream.m; sourceTree = "<group>"; };
+		F7715D1A0AC9DCE400ED984D /* SimpleC.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleC.g; sourceTree = "<group>"; };
+		F7715D1B0AC9DCE500ED984D /* SimpleCWalker.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleCWalker.g; sourceTree = "<group>"; };
+		F7715D1C0AC9DDD800ED984D /* SimpleC.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleC.tokens; sourceTree = "<group>"; };
+		F7715D1D0AC9DDD800ED984D /* SimpleCWalker.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SimpleCWalker.tokens; sourceTree = "<group>"; };
+		F7715D2E0AC9DE9E00ED984D /* SimpleCLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SimpleCLexer.h; sourceTree = "<group>"; };
+		F7715D2F0AC9DE9E00ED984D /* SimpleCLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SimpleCLexer.m; sourceTree = "<group>"; };
+		F7715D300AC9DE9E00ED984D /* SimpleCParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SimpleCParser.h; sourceTree = "<group>"; };
+		F7715D310AC9DE9E00ED984D /* SimpleCParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SimpleCParser.m; sourceTree = "<group>"; };
+		F7754E3D0A5C0A0500D0873A /* DFA.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DFA.h; sourceTree = "<group>"; };
+		F7754E3E0A5C0A0500D0873A /* DFA.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = DFA.m; sourceTree = "<group>"; };
+		F77744030B234A3400D1F89B /* Token+DebuggerSupport.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "Token+DebuggerSupport.h"; sourceTree = "<group>"; };
+		F77744040B234A3400D1F89B /* Token+DebuggerSupport.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "Token+DebuggerSupport.m"; sourceTree = "<group>"; };
+		F77747550B23A70600D1F89B /* Debug.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Debug.h; sourceTree = "<group>"; };
+		F77765CA09DC583000517181 /* Token.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Token.h; sourceTree = "<group>"; };
+		F777660309DC5CF400517181 /* CommonToken.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonToken.h; sourceTree = "<group>"; };
+		F777660409DC5CF400517181 /* CommonToken.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CommonToken.m; sourceTree = "<group>"; };
+		F777668009DC719C00517181 /* MismatchedTokenException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MismatchedTokenException.h; sourceTree = "<group>"; };
+		F777668109DC719C00517181 /* MismatchedTokenException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MismatchedTokenException.m; sourceTree = "<group>"; };
+		F777669109DC72D600517181 /* RecognitionException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RecognitionException.h; sourceTree = "<group>"; };
+		F777669209DC72D600517181 /* RecognitionException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RecognitionException.m; sourceTree = "<group>"; };
+		F77766AE09DD53E800517181 /* TokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TokenStream.h; sourceTree = "<group>"; };
+		F79D56600A0E23A400EA3CEE /* fuzzy */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = fuzzy; sourceTree = BUILT_PRODUCTS_DIR; };
+		F79D56C00A0E287500EA3CEE /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
+		F79D59890A0E51AB00EA3CEE /* NoViableAltException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NoViableAltException.h; sourceTree = "<group>"; };
+		F79D598A0A0E51AB00EA3CEE /* NoViableAltException.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = NoViableAltException.m; sourceTree = "<group>"; };
+		F7A4098B09659BF3002CC781 /* BaseRecognizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BaseRecognizer.h; sourceTree = "<group>"; };
+		F7A4098C09659BF3002CC781 /* BaseRecognizer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = BaseRecognizer.m; sourceTree = "<group>"; };
+		F7A4099109659BFB002CC781 /* Lexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Lexer.h; sourceTree = "<group>"; };
+		F7A4099209659BFB002CC781 /* Lexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Lexer.m; sourceTree = "<group>"; };
+		F7B1E5AC0CD7CF1900CE136E /* RecognizerSharedState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RecognizerSharedState.h; sourceTree = "<group>"; };
+		F7B1E5AD0CD7CF1900CE136E /* RecognizerSharedState.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RecognizerSharedState.m; sourceTree = "<group>"; };
+		F7CD45FC0C64BA4B00FF933A /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
+		F7CD45FD0C64BA4B00FF933A /* TreeRewrite.g */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = TreeRewrite.g; sourceTree = "<group>"; };
+		F7CD46340C64BB7300FF933A /* TreeRewrite.tokens */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = TreeRewrite.tokens; sourceTree = "<group>"; };
+		F7CD46350C64BB7300FF933A /* TreeRewriteLexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeRewriteLexer.h; sourceTree = "<group>"; };
+		F7CD46360C64BB7300FF933A /* TreeRewriteLexer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeRewriteLexer.m; sourceTree = "<group>"; };
+		F7CD46370C64BB7300FF933A /* TreeRewriteParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TreeRewriteParser.h; sourceTree = "<group>"; };
+		F7CD46380C64BB7300FF933A /* TreeRewriteParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TreeRewriteParser.m; sourceTree = "<group>"; };
+		F7CD475D0C64D22800FF933A /* treerewrite */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = treerewrite; sourceTree = BUILT_PRODUCTS_DIR; };
+		F7CECD7D0B1E5C370054CC3B /* DebugEventListener.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DebugEventListener.h; sourceTree = "<group>"; };
+		F7DD05E20A7B14BE006A006C /* input */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
+		F7DD05E30A7B14BE006A006C /* output */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
+		F7DD05E40A7B14BE006A006C /* T.g */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = T.g; sourceTree = "<group>"; };
+		F7DD05E70A7B1572006A006C /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
+		F7DD05EE0A7B15E1006A006C /* hoistedPredicates */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = hoistedPredicates; sourceTree = BUILT_PRODUCTS_DIR; };
+		F7DD06E70A7B1700006A006C /* TLexer.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = TLexer.h; sourceTree = "<group>"; };
+		F7DD06E80A7B1700006A006C /* TLexer.m */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.objc; path = TLexer.m; sourceTree = "<group>"; };
+		F7DD073C0A7B660A006A006C /* input */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = input; sourceTree = "<group>"; };
+		F7DD073D0A7B660A006A006C /* output */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = output; sourceTree = "<group>"; };
+		F7DD073E0A7B660A006A006C /* SymbolTable.g */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = text; path = SymbolTable.g; sourceTree = "<group>"; };
+		F7DD07440A7B6618006A006C /* scopes */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = scopes; sourceTree = BUILT_PRODUCTS_DIR; };
+		F7DD07800A7B67A7006A006C /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
+		F7E261140B1E44320013F640 /* DebugParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DebugParser.h; sourceTree = "<group>"; };
+		F7E261150B1E44320013F640 /* DebugParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = DebugParser.m; sourceTree = "<group>"; };
+		F7E261180B1E443C0013F640 /* DebugTreeParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DebugTreeParser.h; sourceTree = "<group>"; };
+		F7E261190B1E443C0013F640 /* DebugTreeParser.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = DebugTreeParser.m; sourceTree = "<group>"; };
+		F7E2611E0B1E44E80013F640 /* DebugTokenStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DebugTokenStream.h; sourceTree = "<group>"; };
+		F7E2611F0B1E44E80013F640 /* DebugTokenStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = DebugTokenStream.m; sourceTree = "<group>"; };
+		F7E261220B1E44FA0013F640 /* DebugTreeNodeStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DebugTreeNodeStream.h; sourceTree = "<group>"; };
+		F7E261230B1E44FA0013F640 /* DebugTreeNodeStream.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = DebugTreeNodeStream.m; sourceTree = "<group>"; };
+		F7E261260B1E45070013F640 /* DebugTreeAdaptor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DebugTreeAdaptor.h; sourceTree = "<group>"; };
+		F7E261270B1E45070013F640 /* DebugTreeAdaptor.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = DebugTreeAdaptor.m; sourceTree = "<group>"; };
+		F7E261370B1E45580013F640 /* DebugEventSocketProxy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DebugEventSocketProxy.h; sourceTree = "<group>"; };
+		F7E261380B1E45580013F640 /* DebugEventSocketProxy.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = DebugEventSocketProxy.m; sourceTree = "<group>"; };
+		F7E983940A0D6A5F00F16093 /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
+		F7EFFC8B0D164E2C008EE57E /* CHANGES.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = CHANGES.txt; sourceTree = "<group>"; };
+		F7F218EE097AFB1A000472E9 /* ANTLRBitSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ANTLRBitSet.h; sourceTree = "<group>"; };
+		F7F218EF097AFB1A000472E9 /* ANTLRBitSet.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ANTLRBitSet.m; sourceTree = "<group>"; };
+/* End PBXFileReference section */
+
+/* Begin PBXFrameworksBuildPhase section */
+		1A0F345D12EA42D800496BB8 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A0F345E12EA42D800496BB8 /* ANTLR.framework in Frameworks */,
+				1A0F345F12EA42D800496BB8 /* Foundation.framework in Frameworks */,
+				1A0F346012EA42D800496BB8 /* CoreFoundation.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A1210F811D3A5D900F27B38 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A12117911D3B45C00F27B38 /* ANTLR.framework in Frameworks */,
+				1A12117A11D3B47000F27B38 /* Cocoa.framework in Frameworks */,
+				1A12117B11D3B47000F27B38 /* CoreFoundation.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A1211CE11D3BF4600F27B38 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A1211D711D3BF6800F27B38 /* ANTLR.framework in Frameworks */,
+				1A1211D811D3BF6800F27B38 /* Cocoa.framework in Frameworks */,
+				1A1211D911D3BF6800F27B38 /* CoreFoundation.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A12122111D3C92400F27B38 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A12122B11D3C93500F27B38 /* ANTLR.framework in Frameworks */,
+				1A12122C11D3C93500F27B38 /* Cocoa.framework in Frameworks */,
+				1A12122D11D3C93500F27B38 /* CoreFoundation.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A1212D911D3F53600F27B38 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A1212E211D3F55500F27B38 /* ANTLR.framework in Frameworks */,
+				1A1212E311D3F55500F27B38 /* Cocoa.framework in Frameworks */,
+				1A1212E411D3F55500F27B38 /* CoreFoundation.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A12130911D3F7CD00F27B38 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A12131211D3F7DC00F27B38 /* ANTLR.framework in Frameworks */,
+				1A12131311D3F7DC00F27B38 /* Cocoa.framework in Frameworks */,
+				1A12131411D3F7DC00F27B38 /* CoreFoundation.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A348B4B11D2BEE8000C72FC /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A348B5811D2BF1C000C72FC /* ANTLR.framework in Frameworks */,
+				1A348BB611D2C711000C72FC /* Cocoa.framework in Frameworks */,
+				1A348BB811D2C711000C72FC /* CoreFoundation.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A348BE911D2D0A1000C72FC /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A348BF211D2D0E0000C72FC /* Cocoa.framework in Frameworks */,
+				1A348BF311D2D0E0000C72FC /* CoreFoundation.framework in Frameworks */,
+				1A348BF411D2D0E7000C72FC /* ANTLR.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A77EE8612E6A552007F323A /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A77EE9312E6A57C007F323A /* Cocoa.framework in Frameworks */,
+				1A77EE9412E6A57C007F323A /* CoreFoundation.framework in Frameworks */,
+				1A77EE9712E6A594007F323A /* ANTLR.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1AC5AC9D12E7BEFE00DF0C58 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1AC5AC9E12E7BEFE00DF0C58 /* ANTLR.framework in Frameworks */,
+				1AC5AC9F12E7BEFE00DF0C58 /* Foundation.framework in Frameworks */,
+				1AC5ACA112E7BEFE00DF0C58 /* CoreFoundation.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		8DC2EF560486A6940098B216 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F763D51E0A66765B0061CD35 /* CoreFoundation.framework in Frameworks */,
+				1ADB67BA12E74E82007C1661 /* Cocoa.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F700E8620A5FA31D005D0757 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F700ECA40A5FDF1A005D0757 /* CoreFoundation.framework in Frameworks */,
+				F700ECA50A5FDF1A005D0757 /* FuzzyLexer.h in Frameworks */,
+				F700EC670A5FDF0D005D0757 /* ANTLR.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F700ECD50A5FE186005D0757 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F7F4E9BA0A6E8B110092D087 /* Foundation.framework in Frameworks */,
+				F763D4490A666D3D0061CD35 /* ANTLR.framework in Frameworks */,
+				F700ECD90A5FE19A005D0757 /* CoreFoundation.framework in Frameworks */,
+				F700ECDA0A5FE19A005D0757 /* FuzzyLexer.h in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F7037E9E0A05AFB60070435D /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F7037EA60A05AFD70070435D /* ANTLR.framework in Frameworks */,
+				F7E985580A0D865E00F16093 /* Foundation.framework in Frameworks */,
+				F7E985590A0D866000F16093 /* FuzzyLexer.h in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F72C5E610AB7E4C900282574 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F72C5E620AB7E4C900282574 /* ANTLR.framework in Frameworks */,
+				F72C5E630AB7E4C900282574 /* Foundation.framework in Frameworks */,
+				F72C5E650AB7E4C900282574 /* CoreFoundation.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F79D565E0A0E23A400EA3CEE /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F79D5AF60A0E634900EA3CEE /* ANTLR.framework in Frameworks */,
+				F79D5AF70A0E634A00EA3CEE /* Foundation.framework in Frameworks */,
+				F79D5AF80A0E634A00EA3CEE /* FuzzyLexer.h in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F7CD475B0C64D22800FF933A /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F7CD48670C64D88800FF933A /* ANTLR.framework in Frameworks */,
+				F7CD48680C64D88800FF933A /* Foundation.framework in Frameworks */,
+				F7CD486A0C64D88800FF933A /* CoreFoundation.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F7DD05EC0A7B15E1006A006C /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F7DD06040A7B1663006A006C /* FuzzyLexer.h in Frameworks */,
+				F7DD06070A7B1664006A006C /* CoreFoundation.framework in Frameworks */,
+				F7DD06300A7B1665006A006C /* Foundation.framework in Frameworks */,
+				F7DD06C50A7B1691006A006C /* ANTLR.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F7DD07420A7B6618006A006C /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F7DD074C0A7B6656006A006C /* ANTLR.framework in Frameworks */,
+				F7DD074D0A7B665C006A006C /* Foundation.framework in Frameworks */,
+				F7DD074E0A7B665D006A006C /* FuzzyLexer.h in Frameworks */,
+				F7DD074F0A7B665D006A006C /* CoreFoundation.framework in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+/* End PBXFrameworksBuildPhase section */
+
+/* Begin PBXGroup section */
+		034768DFFF38A50411DB9C8B /* Products */ = {
+			isa = PBXGroup;
+			children = (
+				8DC2EF5B0486A6940098B216 /* ANTLR.framework */,
+				F7037EA00A05AFB60070435D /* lexertest-simple */,
+				F79D56600A0E23A400EA3CEE /* fuzzy */,
+				F700E8640A5FA31D005D0757 /* combined */,
+				F700ECD70A5FE186005D0757 /* LL-star */,
+				F7DD05EE0A7B15E1006A006C /* hoistedPredicates */,
+				F7DD07440A7B6618006A006C /* scopes */,
+				F72C5E690AB7E4C900282574 /* simplectree */,
+				F7CD475D0C64D22800FF933A /* treerewrite */,
+				1A348B4E11D2BEE8000C72FC /* Test.octest */,
+				1A348BEC11D2D0A1000C72FC /* ANTLRBitsetTest.octest */,
+				1A1210FB11D3A5D900F27B38 /* ANTLRCommonTokenTest.octest */,
+				1A1211D111D3BF4700F27B38 /* ANTLRStringStreamTest.octest */,
+				1A12122411D3C92400F27B38 /* ANTLRFastQueueTest.octest */,
+				1A1212DC11D3F53600F27B38 /* ANTLRIntArrayTest.octest */,
+				1A12130C11D3F7CD00F27B38 /* ANTLRCommonTreeTest.octest */,
+				1A77EE8912E6A552007F323A /* TreeRewriteRuleTokenStream.octest */,
+				1AC5ACA712E7BEFE00DF0C58 /* treeparser */,
+				1A0F346612EA42D800496BB8 /* polydiff */,
+			);
+			name = Products;
+			sourceTree = "<group>";
+		};
+		0867D691FE84028FC02AAC07 /* ANTLR */ = {
+			isa = PBXGroup;
+			children = (
+				F762879C0B71578D006AA7EF /* README.rtf */,
+				F7EFFC8B0D164E2C008EE57E /* CHANGES.txt */,
+				08FB77AEFE84172EC02AAC07 /* Classes */,
+				F7037EBB0A05B06B0070435D /* examples */,
+				32C88DFF0371C24200C91783 /* Other Sources */,
+				089C1665FE841158C02AAC07 /* Resources */,
+				0867D69AFE84028FC02AAC07 /* External Frameworks and Libraries */,
+				034768DFFF38A50411DB9C8B /* Products */,
+				1A348B1411D2BE4F000C72FC /* test */,
+				1A348B4F11D2BEE8000C72FC /* Test-Info.plist */,
+				1A348BB511D2C711000C72FC /* Cocoa.framework */,
+				1A348BB711D2C711000C72FC /* CoreFoundation.framework */,
+				1A348BED11D2D0A1000C72FC /* ANTLRBitsetTest-Info.plist */,
+				1A1210FC11D3A5DA00F27B38 /* ANTLRCommonTokenTest-Info.plist */,
+				1A1211D211D3BF4700F27B38 /* ANTLRStringStreamTest-Info.plist */,
+				1A12122511D3C92400F27B38 /* ANTLRFastQueueTest-Info.plist */,
+				1A12122A11D3C93500F27B38 /* ANTLR.framework */,
+				1A1212DD11D3F53600F27B38 /* ANTLRIntArrayTest-Info.plist */,
+				1A12130D11D3F7CD00F27B38 /* ANTLRCommonTreeTest-Info.plist */,
+				1A77EE8A12E6A552007F323A /* TreeRewriteRuleTokenStream-Info.plist */,
+			);
+			name = ANTLR;
+			sourceTree = "<group>";
+		};
+		0867D69AFE84028FC02AAC07 /* External Frameworks and Libraries */ = {
+			isa = PBXGroup;
+			children = (
+				F70BB390098E5BB80054FEF8 /* SenTestingKit.framework */,
+				1058C7B0FEA5585E11CA2CBB /* Linked Frameworks */,
+				1058C7B2FEA5585E11CA2CBB /* Other Frameworks */,
+			);
+			name = "External Frameworks and Libraries";
+			sourceTree = "<group>";
+		};
+		089C1665FE841158C02AAC07 /* Resources */ = {
+			isa = PBXGroup;
+			children = (
+				8DC2EF5A0486A6940098B216 /* Info.plist */,
+				089C1666FE841158C02AAC07 /* InfoPlist.strings */,
+			);
+			name = Resources;
+			sourceTree = "<group>";
+		};
+		08FB77AEFE84172EC02AAC07 /* Classes */ = {
+			isa = PBXGroup;
+			children = (
+				F7E2610F0B1E43E60013F640 /* Debugging */,
+				F7A40951096597D2002CC781 /* DFA */,
+				F7A4094C096597C4002CC781 /* Exceptions */,
+				F7F218EB097AFB0C000472E9 /* Misc */,
+				F7A4098809659BE5002CC781 /* Recognizer */,
+				1A1BCDC011CB04D20051A1EC /* Rules */,
+				F70AA7AA09AA2AAB00C3FD5E /* Streams */,
+				F7492F8D09C0171900B25E30 /* Tokens */,
+				F73E2B590A9CF83A005D6267 /* Trees */,
+			);
+			name = Classes;
+			sourceTree = "<group>";
+		};
+		1058C7B0FEA5585E11CA2CBB /* Linked Frameworks */ = {
+			isa = PBXGroup;
+			children = (
+				F71F6B8F09A81E6F003221F4 /* CoreFoundation.framework */,
+				1058C7B1FEA5585E11CA2CBB /* FuzzyLexer.h */,
+			);
+			name = "Linked Frameworks";
+			sourceTree = "<group>";
+		};
+		1058C7B2FEA5585E11CA2CBB /* Other Frameworks */ = {
+			isa = PBXGroup;
+			children = (
+				0867D69BFE84028FC02AAC07 /* Foundation.framework */,
+			);
+			name = "Other Frameworks";
+			sourceTree = "<group>";
+		};
+		1A0F342C12EA411F00496BB8 /* polydiff */ = {
+			isa = PBXGroup;
+			children = (
+				1A0F347812EA444500496BB8 /* Poly.tokens */,
+				1A0F347912EA444500496BB8 /* PolyDifferentiator.m */,
+				1A0F347A12EA444500496BB8 /* PolyLexer.h */,
+				1A0F347B12EA444500496BB8 /* PolyLexer.m */,
+				1A0F347C12EA444500496BB8 /* PolyParser.h */,
+				1A0F347D12EA444500496BB8 /* PolyParser.m */,
+				1A0F347E12EA444500496BB8 /* Simplifier.h */,
+				1A0F347F12EA444500496BB8 /* Simplifier.m */,
+				1A0F348012EA444500496BB8 /* Simplifier.tokens */,
+				1A0F342D12EA411F00496BB8 /* files */,
+				1A0F342E12EA411F00496BB8 /* input */,
+				1A0F343012EA411F00496BB8 /* Main.m */,
+				1A0F343112EA411F00496BB8 /* output */,
+				1A0F343212EA411F00496BB8 /* Poly.g */,
+				1A0F343312EA411F00496BB8 /* PolyDifferentiator.g */,
+				1A0F343412EA411F00496BB8 /* PolyPrinter.g */,
+				1A0F343512EA411F00496BB8 /* Simplifier.g */,
+			);
+			path = polydiff;
+			sourceTree = "<group>";
+		};
+		1A1BCDC011CB04D20051A1EC /* Rules */ = {
+			isa = PBXGroup;
+			children = (
+				1AB5F47511E3869D00E065B0 /* RuleMapElement.h */,
+				1AB5F47611E3869D00E065B0 /* RuleMapElement.m */,
+				F72C5ECC0AB7E5A500282574 /* ParserRuleReturnScope.h */,
+				F72C5ECD0AB7E5A500282574 /* ParserRuleReturnScope.m */,
+				1A1BCDB911CB01E60051A1EC /* RuleReturnScope.h */,
+				1A1BCDBA11CB01E60051A1EC /* RuleReturnScope.m */,
+				1A1BCDCD11CB0B3D0051A1EC /* TreeRuleReturnScope.h */,
+				1A1BCDCE11CB0B3D0051A1EC /* TreeRuleReturnScope.m */,
+			);
+			name = Rules;
+			sourceTree = "<group>";
+		};
+		1A348B1411D2BE4F000C72FC /* test */ = {
+			isa = PBXGroup;
+			children = (
+				1A348B1511D2BE4F000C72FC /* runtime */,
+			);
+			path = test;
+			sourceTree = "<group>";
+		};
+		1A348B1511D2BE4F000C72FC /* runtime */ = {
+			isa = PBXGroup;
+			children = (
+				1A348B1611D2BE4F000C72FC /* misc */,
+				1A348B1B11D2BE4F000C72FC /* sets */,
+				1A348B1E11D2BE4F000C72FC /* stream */,
+				1A348B2111D2BE4F000C72FC /* token */,
+				1A348B2411D2BE4F000C72FC /* tree */,
+				1A77EE1912E6A03B007F323A /* RewriteRule */,
+			);
+			path = runtime;
+			sourceTree = "<group>";
+		};
+		1A348B1611D2BE4F000C72FC /* misc */ = {
+			isa = PBXGroup;
+			children = (
+				1A348B1711D2BE4F000C72FC /* FastQueueTest.h */,
+				1A348B1811D2BE4F000C72FC /* FastQueueTest.m */,
+				1A348B1911D2BE4F000C72FC /* IntArrayTest.h */,
+				1A348B1A11D2BE4F000C72FC /* IntArrayTest.m */,
+			);
+			path = misc;
+			sourceTree = "<group>";
+		};
+		1A348B1B11D2BE4F000C72FC /* sets */ = {
+			isa = PBXGroup;
+			children = (
+				1A348B1C11D2BE4F000C72FC /* ANTLRBitSetTest.h */,
+				1A348B1D11D2BE4F000C72FC /* ANTLRBitSetTest.m */,
+			);
+			path = sets;
+			sourceTree = "<group>";
+		};
+		1A348B1E11D2BE4F000C72FC /* stream */ = {
+			isa = PBXGroup;
+			children = (
+				1A348B1F11D2BE4F000C72FC /* ANTLRStringStreamTest.h */,
+				1A348B2011D2BE4F000C72FC /* ANTLRStringStreamTest.m */,
+			);
+			path = stream;
+			sourceTree = "<group>";
+		};
+		1A348B2111D2BE4F000C72FC /* token */ = {
+			isa = PBXGroup;
+			children = (
+				1A348B2211D2BE4F000C72FC /* CommonTokenTest.h */,
+				1A348B2311D2BE4F000C72FC /* CommonTokenTest.m */,
+			);
+			path = token;
+			sourceTree = "<group>";
+		};
+		1A348B2411D2BE4F000C72FC /* tree */ = {
+			isa = PBXGroup;
+			children = (
+				1A348B2511D2BE4F000C72FC /* CommonTreeTest.h */,
+				1A348B2611D2BE4F000C72FC /* CommonTreeTest.m */,
+			);
+			path = tree;
+			sourceTree = "<group>";
+		};
+		1A77EE1912E6A03B007F323A /* RewriteRule */ = {
+			isa = PBXGroup;
+			children = (
+				1A2B096312E797DE00A75133 /* TestRewriteRuleTokenStream.m */,
+				1A2B096612E797F600A75133 /* TestRewriteRuleTokenStream.h */,
+			);
+			name = RewriteRule;
+			path = ../..;
+			sourceTree = "<group>";
+		};
+		1AC5AC6D12E7BB7600DF0C58 /* treeparser */ = {
+			isa = PBXGroup;
+			children = (
+				1AC5ACC412E7C03C00DF0C58 /* Lang.tokens */,
+				1AC5ACC512E7C03C00DF0C58 /* LangDumpDecl.h */,
+				1AC5ACC612E7C03C00DF0C58 /* LangDumpDecl.m */,
+				1AC5ACC712E7C03C00DF0C58 /* LangDumpDecl.tokens */,
+				1AC5ACC812E7C03C00DF0C58 /* LangLexer.h */,
+				1AC5ACC912E7C03C00DF0C58 /* LangLexer.m */,
+				1AC5ACCA12E7C03C00DF0C58 /* LangParser.h */,
+				1AC5ACCB12E7C03C00DF0C58 /* LangParser.m */,
+				1AC5AC7212E7BBB600DF0C58 /* files */,
+				1AC5AC7312E7BBB600DF0C58 /* input */,
+				1AC5AC7412E7BBB600DF0C58 /* Lang.g */,
+				1AC5AC7512E7BBB600DF0C58 /* LangDumpDecl.g */,
+				1AC5AC7712E7BBB600DF0C58 /* output */,
+				1AC5AC7812E7BBB600DF0C58 /* README.txt */,
+				1AC5AC8112E7BC9100DF0C58 /* main.m */,
+			);
+			name = treeparser;
+			sourceTree = "<group>";
+		};
+		32C88DFF0371C24200C91783 /* Other Sources */ = {
+			isa = PBXGroup;
+			children = (
+				1A3A09BD11E235BD00D5EE26 /* antlr3.h */,
+				1A1CCCC711B727B5002E5F53 /* ANTLRError.h */,
+				32DBCF5E0370ADEE00C91783 /* ANTLR_Prefix.pch */,
+				F70AA7A509AA2A6900C3FD5E /* ANTLR.h */,
+				F77747550B23A70600D1F89B /* Debug.h */,
+			);
+			name = "Other Sources";
+			sourceTree = "<group>";
+		};
+		F700E85D0A5FA2C0005D0757 /* combined */ = {
+			isa = PBXGroup;
+			children = (
+				F738D35C0B07C105001813C4 /* Combined.tokens */,
+				F738D35D0B07C105001813C4 /* CombinedLexer.h */,
+				F738D35E0B07C105001813C4 /* CombinedLexer.m */,
+				F738D35F0B07C105001813C4 /* CombinedParser.h */,
+				F738D3600B07C105001813C4 /* CombinedParser.m */,
+				F700E85E0A5FA2DE005D0757 /* Combined.g */,
+				F700E86A0A5FA34D005D0757 /* main.m */,
+			);
+			name = combined;
+			sourceTree = "<group>";
+		};
+		F700ECCE0A5FE176005D0757 /* LL-star */ = {
+			isa = PBXGroup;
+			children = (
+				1ADE21F012E505D700E8A95C /* SimpleC.g */,
+				F738D27F0B07B9B6001813C4 /* SimpleC.tokens */,
+				F738D2800B07B9B6001813C4 /* SimpleCLexer.h */,
+				F738D2810B07B9B6001813C4 /* SimpleCLexer.m */,
+				F738D2820B07B9B6001813C4 /* SimpleCParser.h */,
+				F738D2510B07B842001813C4 /* SimpleCParser.m */,
+				F700ECCF0A5FE176005D0757 /* input */,
+				F700ECD00A5FE176005D0757 /* output */,
+				1A994CE412A84F3E001853FF /* SimpleC__.gl */,
+				F700ECE70A5FE25D005D0757 /* main.m */,
+			);
+			path = "LL-star";
+			sourceTree = "<group>";
+		};
+		F7037EBB0A05B06B0070435D /* examples */ = {
+			isa = PBXGroup;
+			children = (
+				F700E85D0A5FA2C0005D0757 /* combined */,
+				F79D56590A0E238100EA3CEE /* fuzzy */,
+				F7DD05E10A7B14BE006A006C /* hoistedPredicates */,
+				F7037EBC0A05B06B0070435D /* lexertest-simple */,
+				F700ECCE0A5FE176005D0757 /* LL-star */,
+				1A0F342C12EA411F00496BB8 /* polydiff */,
+				F7DD073B0A7B660A006A006C /* scopes */,
+				F72C5E2D0AB7529C00282574 /* simplecTreeParser */,
+				1AC5AC6D12E7BB7600DF0C58 /* treeparser */,
+				F7CD45FB0C64BA4B00FF933A /* treerewrite */,
+			);
+			path = examples;
+			sourceTree = "<group>";
+		};
+		F7037EBC0A05B06B0070435D /* lexertest-simple */ = {
+			isa = PBXGroup;
+			children = (
+				F7048FF50B07D05400D2F326 /* Test.tokens */,
+				F7048FF60B07D05400D2F326 /* TestLexer.h */,
+				F7048FF70B07D05400D2F326 /* TestLexer.m */,
+				F7037EBD0A05B06B0070435D /* TestLexer.g */,
+				F7E983940A0D6A5F00F16093 /* main.m */,
+			);
+			path = "lexertest-simple";
+			sourceTree = "<group>";
+		};
+		F70AA7AA09AA2AAB00C3FD5E /* Streams */ = {
+			isa = PBXGroup;
+			children = (
+				F71325850C4A05DC00B99F2D /* Trees */,
+				F70AA7B509AA2B8800C3FD5E /* CharStream.h */,
+				1A18EF5511B8028D0006186A /* BufferedTokenStream.h */,
+				1A18EF5611B8028D0006186A /* BufferedTokenStream.m */,
+				F700ED940A5FF2A5005D0757 /* CommonTokenStream.h */,
+				F700ED950A5FF2A5005D0757 /* CommonTokenStream.m */,
+				F70AA7AD09AA2AC000C3FD5E /* IntStream.h */,
+				1A1CCCA911B724B2002E5F53 /* LookaheadStream.h */,
+				1A1CCCAA11B724B2002E5F53 /* LookaheadStream.m */,
+				1AB4A58F11B9A0DA0076E91A /* StreamEnumerator.h */,
+				1AB4A59011B9A0DA0076E91A /* StreamEnumerator.m */,
+				BF7D9B65151936FB00B58218 /* ANTLRInputStream.h */,
+				BF7D9B66151936FB00B58218 /* ANTLRInputStream.m */,
+				BF7D9B61151936E700B58218 /* ANTLRFileStream.h */,
+				BF7D9B62151936E700B58218 /* ANTLRFileStream.m */,
+				BF7D9B691519371100B58218 /* ANTLRReaderStream.h */,
+				BF7D9B6A1519371200B58218 /* ANTLRReaderStream.m */,
+				F70AA7C509AA339900C3FD5E /* ANTLRStringStream.h */,
+				F70AA7C609AA339900C3FD5E /* ANTLRStringStream.m */,
+				BF7D9B6D1519373600B58218 /* RewriteRuleNodeStream.h */,
+				BF7D9B6E1519373600B58218 /* RewriteRuleNodeStream.m */,
+				F700ED7E0A5FF17C005D0757 /* TokenSource.h */,
+				F77766AE09DD53E800517181 /* TokenStream.h */,
+				1A1FFC5911CD12A400FBB452 /* TokenRewriteStream.h */,
+				1A1FFC5A11CD12A400FBB452 /* TokenRewriteStream.m */,
+				1A86BACD11EC1CD000C67A03 /* UnbufferedTokenStream.h */,
+				1A86BACE11EC1CD000C67A03 /* UnbufferedTokenStream.m */,
+			);
+			name = Streams;
+			sourceTree = "<group>";
+		};
+		F71325850C4A05DC00B99F2D /* Trees */ = {
+			isa = PBXGroup;
+			children = (
+				1A9CBD2411C9979600DA8FEF /* UnbufferedCommonTreeNodeStream.h */,
+				1A9CBD2511C9979600DA8FEF /* UnbufferedCommonTreeNodeStream.m */,
+				1A9CBD2611C9979600DA8FEF /* UnbufferedCommonTreeNodeStreamState.h */,
+				1A9CBD2711C9979600DA8FEF /* UnbufferedCommonTreeNodeStreamState.m */,
+				F72C5B820AB52AD300282574 /* TreeNodeStream.h */,
+				1A65B7D611B9532A00FD8754 /* BufferedTreeNodeStream.h */,
+				1A65B7D711B9532A00FD8754 /* BufferedTreeNodeStream.m */,
+				F76AA98E0CEA515A00AF044C /* CommonTreeNodeStream.h */,
+				F76AA98F0CEA515A00AF044C /* CommonTreeNodeStream.m */,
+				F71325860C4A060900B99F2D /* RewriteRuleElementStream.h */,
+				F71325870C4A060900B99F2D /* RewriteRuleElementStream.m */,
+				F70B11BB0C4C2B6400C3ECE0 /* RewriteRuleSubtreeStream.h */,
+				F70B11BC0C4C2B6400C3ECE0 /* RewriteRuleSubtreeStream.m */,
+				F70B11C10C4C2B7900C3ECE0 /* RewriteRuleTokenStream.h */,
+				F70B11C20C4C2B7900C3ECE0 /* RewriteRuleTokenStream.m */,
+			);
+			name = Trees;
+			sourceTree = "<group>";
+		};
+		F72C5E2D0AB7529C00282574 /* simplecTreeParser */ = {
+			isa = PBXGroup;
+			children = (
+				F7715D2E0AC9DE9E00ED984D /* SimpleCLexer.h */,
+				F7715D2F0AC9DE9E00ED984D /* SimpleCLexer.m */,
+				F7715D300AC9DE9E00ED984D /* SimpleCParser.h */,
+				F7715D310AC9DE9E00ED984D /* SimpleCParser.m */,
+				F741D0640B3812D40024DF3F /* SimpleCWalker.h */,
+				F741D0650B3812D40024DF3F /* SimpleCWalker.m */,
+				F72C5E2F0AB7529C00282574 /* input */,
+				F72C5E310AB7529C00282574 /* output */,
+				F7715D1C0AC9DDD800ED984D /* SimpleC.tokens */,
+				F7715D1D0AC9DDD800ED984D /* SimpleCWalker.tokens */,
+				F7715D1A0AC9DCE400ED984D /* SimpleC.g */,
+				F7715D1B0AC9DCE500ED984D /* SimpleCWalker.g */,
+				F72C5E560AB7E41000282574 /* main.m */,
+			);
+			path = simplecTreeParser;
+			sourceTree = "<group>";
+		};
+		F73E2B590A9CF83A005D6267 /* Trees */ = {
+			isa = PBXGroup;
+			children = (
+				BF7D9B511519363200B58218 /* ACBTree.h */,
+				BF7D9B521519363200B58218 /* ACBTree.m */,
+				1A1D465911BE73B2001575F3 /* BaseTreeAdaptor.h */,
+				1A1D465A11BE73B2001575F3 /* BaseTreeAdaptor.m */,
+				1A4D5AD411B55A45001C9482 /* BaseTree.h */,
+				1A4D5AD511B55A45001C9482 /* BaseTree.m */,
+				1A8ABFC511BA9B960038DBB0 /* CharStreamState.h */,
+				F70AA7CE09AA379300C3FD5E /* CharStreamState.m */,
+				F72C58E80AB3911D00282574 /* CommonTree.h */,
+				F72C58E90AB3911D00282574 /* CommonTree.m */,
+				F72C59A50AB4F20A00282574 /* CommonTreeAdaptor.h */,
+				F72C59A60AB4F20A00282574 /* CommonTreeAdaptor.m */,
+				F73E2B720A9CFE6A005D6267 /* Tree.h */,
+				F73E2B7A0A9D0AFC005D6267 /* TreeAdaptor.h */,
+				1A270BF711C1451200DCC8F3 /* TreeIterator.h */,
+				1A270BF811C1451200DCC8F3 /* TreeIterator.m */,
+				1AAC202A11CC621A00CF56D1 /* TreePatternLexer.h */,
+				1AAC202B11CC621A00CF56D1 /* TreePatternLexer.m */,
+				1AAC20A311CC790300CF56D1 /* TreePatternParser.h */,
+				1AAC20A411CC790300CF56D1 /* TreePatternParser.m */,
+				1A1BCE2811CB1A3E0051A1EC /* TreeRewriter.h */,
+				1A1BCE2911CB1A3E0051A1EC /* TreeRewriter.m */,
+				1A4A851011CBCE3E00E4BF1B /* TreeVisitor.h */,
+				1A4A851111CBCE3E00E4BF1B /* TreeVisitor.m */,
+				1A4A851611CBCE5500E4BF1B /* TreeVisitorAction.h */,
+				1A4A851711CBCE5500E4BF1B /* TreeVisitorAction.m */,
+				1A4A851C11CBCF3700E4BF1B /* TreeWizard.h */,
+				1A4A851D11CBCF3700E4BF1B /* TreeWizard.m */,
+			);
+			name = Trees;
+			sourceTree = "<group>";
+		};
+		F7492F8D09C0171900B25E30 /* Tokens */ = {
+			isa = PBXGroup;
+			children = (
+				F77765CA09DC583000517181 /* Token.h */,
+				F777660309DC5CF400517181 /* CommonToken.h */,
+				F777660409DC5CF400517181 /* CommonToken.m */,
+			);
+			name = Tokens;
+			sourceTree = "<group>";
+		};
+		F77744070B234A3B00D1F89B /* Debugging Categories */ = {
+			isa = PBXGroup;
+			children = (
+				F77744030B234A3400D1F89B /* Token+DebuggerSupport.h */,
+				F77744040B234A3400D1F89B /* Token+DebuggerSupport.m */,
+			);
+			name = "Debugging Categories";
+			sourceTree = "<group>";
+		};
+		F79D56590A0E238100EA3CEE /* fuzzy */ = {
+			isa = PBXGroup;
+			children = (
+				1ADB66F012E74341007C1661 /* FuzzyLexer.h */,
+				F72B8D090AD01DCB0013F1E2 /* Fuzzy.tokens */,
+				F72B8CFA0AD01D380013F1E2 /* Fuzzy.g */,
+				F72B8D0B0AD01DCB0013F1E2 /* FuzzyLexer.m */,
+				F706A55B0A0EC307008999AB /* input */,
+				F79D56C00A0E287500EA3CEE /* main.m */,
+			);
+			path = fuzzy;
+			sourceTree = "<group>";
+		};
+		F7A4094C096597C4002CC781 /* Exceptions */ = {
+			isa = PBXGroup;
+			children = (
+				F738D1730B07AEAA001813C4 /* FailedPredicateException.h */,
+				F738D1740B07AEAA001813C4 /* FailedPredicateException.m */,
+				1A26329311C53578000DCDD4 /* MismatchedNotSetException.h */,
+				1A26329411C53578000DCDD4 /* MismatchedNotSetException.m */,
+				F7037CEE0A0582FC0070435D /* MismatchedRangeException.h */,
+				F7037CEF0A0582FC0070435D /* MismatchedRangeException.m */,
+				F70380BA0A07FA0D0070435D /* MismatchedSetException.h */,
+				F70380BB0A07FA0D0070435D /* MismatchedSetException.m */,
+				F777668009DC719C00517181 /* MismatchedTokenException.h */,
+				F777668109DC719C00517181 /* MismatchedTokenException.m */,
+				F72C5D600AB63E0B00282574 /* MismatchedTreeNodeException.h */,
+				F72C5D610AB63E0B00282574 /* MismatchedTreeNodeException.m */,
+				1A6C451411BF4EE00039788A /* MissingTokenException.h */,
+				1A6C451511BF4EE00039788A /* MissingTokenException.m */,
+				1A8AC00A11BAEC710038DBB0 /* RuntimeException.h */,
+				1A8AC00B11BAEC710038DBB0 /* RuntimeException.m */,
+				F79D59890A0E51AB00EA3CEE /* NoViableAltException.h */,
+				F79D598A0A0E51AB00EA3CEE /* NoViableAltException.m */,
+				F777669109DC72D600517181 /* RecognitionException.h */,
+				F777669209DC72D600517181 /* RecognitionException.m */,
+				F700E6190A5F66EC005D0757 /* EarlyExitException.h */,
+				F700E61A0A5F66EC005D0757 /* EarlyExitException.m */,
+				F738D1750B07AEAA001813C4 /* TreeException.h */,
+				F738D1760B07AEAA001813C4 /* TreeException.m */,
+				1A6C452611BF50A40039788A /* UnwantedTokenException.h */,
+				1A6C452711BF50A40039788A /* UnwantedTokenException.m */,
+			);
+			name = Exceptions;
+			sourceTree = "<group>";
+		};
+		F7A40951096597D2002CC781 /* DFA */ = {
+			isa = PBXGroup;
+			children = (
+				F7754E3D0A5C0A0500D0873A /* DFA.h */,
+				F7754E3E0A5C0A0500D0873A /* DFA.m */,
+			);
+			name = DFA;
+			sourceTree = "<group>";
+		};
+		F7A4098809659BE5002CC781 /* Recognizer */ = {
+			isa = PBXGroup;
+			children = (
+				F7A4098B09659BF3002CC781 /* BaseRecognizer.h */,
+				F7A4098C09659BF3002CC781 /* BaseRecognizer.m */,
+				F7B1E5AC0CD7CF1900CE136E /* RecognizerSharedState.h */,
+				F7B1E5AD0CD7CF1900CE136E /* RecognizerSharedState.m */,
+				F7A4099109659BFB002CC781 /* Lexer.h */,
+				F7A4099209659BFB002CC781 /* Lexer.m */,
+				F7009AD90A1BE4AE002EDD5D /* LexerRuleReturnScope.h */,
+				F7009ADA0A1BE4AE002EDD5D /* LexerRuleReturnScope.m */,
+				F700E8F90A5FAD21005D0757 /* Parser.h */,
+				F700E8FA0A5FAD21005D0757 /* Parser.m */,
+				F72C5D540AB63C1D00282574 /* TreeParser.h */,
+				F72C5D550AB63C1D00282574 /* TreeParser.m */,
+				1A86B91911EB9F6300C67A03 /* ParseTree.h */,
+				1A86B91A11EB9F6300C67A03 /* ParseTree.m */,
+			);
+			name = Recognizer;
+			sourceTree = "<group>";
+		};
+		F7CD45FB0C64BA4B00FF933A /* treerewrite */ = {
+			isa = PBXGroup;
+			children = (
+				F7CD46340C64BB7300FF933A /* TreeRewrite.tokens */,
+				F7CD46350C64BB7300FF933A /* TreeRewriteLexer.h */,
+				F7CD46360C64BB7300FF933A /* TreeRewriteLexer.m */,
+				F7CD46370C64BB7300FF933A /* TreeRewriteParser.h */,
+				F7CD46380C64BB7300FF933A /* TreeRewriteParser.m */,
+				F7CD45FC0C64BA4B00FF933A /* main.m */,
+				F7CD45FD0C64BA4B00FF933A /* TreeRewrite.g */,
+			);
+			path = treerewrite;
+			sourceTree = "<group>";
+		};
+		F7DD05E10A7B14BE006A006C /* hoistedPredicates */ = {
+			isa = PBXGroup;
+			children = (
+				F738D2230B07B3BC001813C4 /* TParser.h */,
+				F738D2240B07B3BC001813C4 /* TParser.m */,
+				F7DD06E70A7B1700006A006C /* TLexer.h */,
+				F7DD06E80A7B1700006A006C /* TLexer.m */,
+				F7DD05E20A7B14BE006A006C /* input */,
+				F7DD05E30A7B14BE006A006C /* output */,
+				F7DD05E40A7B14BE006A006C /* T.g */,
+				F7DD05E70A7B1572006A006C /* main.m */,
+			);
+			path = hoistedPredicates;
+			sourceTree = "<group>";
+		};
+		F7DD073B0A7B660A006A006C /* scopes */ = {
+			isa = PBXGroup;
+			children = (
+				F738D1FD0B07B1CE001813C4 /* SymbolTable.tokens */,
+				F738D1FE0B07B1CE001813C4 /* SymbolTableLexer.h */,
+				F738D1FF0B07B1CE001813C4 /* SymbolTableLexer.m */,
+				F738D2000B07B1CE001813C4 /* SymbolTableParser.h */,
+				F738D2010B07B1CE001813C4 /* SymbolTableParser.m */,
+				F7DD073C0A7B660A006A006C /* input */,
+				F7DD073D0A7B660A006A006C /* output */,
+				F7DD073E0A7B660A006A006C /* SymbolTable.g */,
+				F7DD07800A7B67A7006A006C /* main.m */,
+			);
+			path = scopes;
+			sourceTree = "<group>";
+		};
+		F7E2610F0B1E43E60013F640 /* Debugging */ = {
+			isa = PBXGroup;
+			children = (
+				F77744070B234A3B00D1F89B /* Debugging Categories */,
+				F7CECD7D0B1E5C370054CC3B /* DebugEventListener.h */,
+				F7E261370B1E45580013F640 /* DebugEventSocketProxy.h */,
+				F7E261380B1E45580013F640 /* DebugEventSocketProxy.m */,
+				F7E261140B1E44320013F640 /* DebugParser.h */,
+				F7E261150B1E44320013F640 /* DebugParser.m */,
+				F7E2611E0B1E44E80013F640 /* DebugTokenStream.h */,
+				F7E2611F0B1E44E80013F640 /* DebugTokenStream.m */,
+				F7E261180B1E443C0013F640 /* DebugTreeParser.h */,
+				F7E261190B1E443C0013F640 /* DebugTreeParser.m */,
+				F7E261220B1E44FA0013F640 /* DebugTreeNodeStream.h */,
+				F7E261230B1E44FA0013F640 /* DebugTreeNodeStream.m */,
+				F7E261260B1E45070013F640 /* DebugTreeAdaptor.h */,
+				F7E261270B1E45070013F640 /* DebugTreeAdaptor.m */,
+			);
+			name = Debugging;
+			sourceTree = "<group>";
+		};
+		F7F218EB097AFB0C000472E9 /* Misc */ = {
+			isa = PBXGroup;
+			children = (
+				BF7D9B551519367800B58218 /* AMutableArray.h */,
+				BF7D9B561519367800B58218 /* AMutableArray.m */,
+				BF7D9B591519368C00B58218 /* AMutableDictionary.h */,
+				BF7D9B5A1519368C00B58218 /* AMutableDictionary.m */,
+				BF7D9B711519375200B58218 /* ArrayIterator.h */,
+				BF7D9B721519375200B58218 /* ArrayIterator.m */,
+				BF7D9B5D151936B500B58218 /* DoubleKeyMap.h */,
+				BF7D9B5E151936B600B58218 /* DoubleKeyMap.m */,
+				1A100AB911E604FE006ABF94 /* HashRule.h */,
+				1A100ABA11E604FE006ABF94 /* HashRule.m */,
+				1A2D218411E502DE00DFE328 /* NodeMapElement.h */,
+				1A2D218511E502DE00DFE328 /* NodeMapElement.m */,
+				1AB5F51C11E3BE2E00E065B0 /* PtrBuffer.h */,
+				1AB5F51D11E3BE2E00E065B0 /* PtrBuffer.m */,
+				1A3A08E811E213E100D5EE26 /* SymbolStack.h */,
+				1A3A08E911E213E100D5EE26 /* SymbolStack.m */,
+				1A3A08E411E213C500D5EE26 /* BaseStack.h */,
+				1A3A08E511E213C500D5EE26 /* BaseStack.m */,
+				1A45658711C9270D0082F421 /* BaseMapElement.h */,
+				1A45658811C9270D0082F421 /* BaseMapElement.m */,
+				F7F218EE097AFB1A000472E9 /* ANTLRBitSet.h */,
+				F7F218EF097AFB1A000472E9 /* ANTLRBitSet.m */,
+				1A1D467A11BE8E5A001575F3 /* CommonErrorNode.h */,
+				1A1D467B11BE8E5A001575F3 /* CommonErrorNode.m */,
+				1A1CCC9011B6FD39002E5F53 /* FastQueue.h */,
+				1A1CCC9111B6FD39002E5F53 /* FastQueue.m */,
+				1A1702FC11C05D4800F6978A /* HashMap.h */,
+				1A1702FD11C05D4800F6978A /* HashMap.m */,
+				1A1CCC9211B6FD39002E5F53 /* IntArray.h */,
+				1A1CCC9311B6FD39002E5F53 /* IntArray.m */,
+				1A16B13A11C66492002860C7 /* LinkBase.h */,
+				1A16B13B11C66492002860C7 /* LinkBase.m */,
+				1A1D466E11BE75C0001575F3 /* MapElement.h */,
+				1A1D466F11BE75C0001575F3 /* MapElement.m */,
+				1A5EA50911CFE7CE00E8932F /* Map.h */,
+				1A5EA50A11CFE7CE00E8932F /* Map.m */,
+				1A45657511C922BE0082F421 /* RuleMemo.h */,
+				1A45657611C922BE0082F421 /* RuleMemo.m */,
+				1AE8A96A11D9227A00D36FD6 /* RuleStack.h */,
+				1AE8A96B11D9227A00D36FD6 /* RuleStack.m */,
+				1A2D217311E4F57C00DFE328 /* UniqueIDMap.h */,
+				1A2D217411E4F57C00DFE328 /* UniqueIDMap.m */,
+			);
+			name = Misc;
+			sourceTree = "<group>";
+		};
+/* End PBXGroup section */
+
+/* Begin PBXHeadersBuildPhase section */
+		8DC2EF500486A6940098B216 /* Headers */ = {
+			isa = PBXHeadersBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F70AA7A609AA2A6900C3FD5E /* ANTLR.h in Headers */,
+				1A45658911C9270D0082F421 /* BaseMapElement.h in Headers */,
+				F7492F5D09C016A200B25E30 /* BaseRecognizer.h in Headers */,
+				1A4D5AD611B55A45001C9482 /* BaseTree.h in Headers */,
+				1A1D465B11BE73B2001575F3 /* BaseTreeAdaptor.h in Headers */,
+				1A10050711B8796E0022B434 /* ANTLRBitSet.h in Headers */,
+				1A10050611B8796D0022B434 /* BufferedTokenStream.h in Headers */,
+				1A65B7D811B9532A00FD8754 /* BufferedTreeNodeStream.h in Headers */,
+				1A67885311B87AC400A11EEC /* CharStream.h in Headers */,
+				1A8ABFC611BA9B960038DBB0 /* CharStreamState.h in Headers */,
+				1A1D467C11BE8E5A001575F3 /* CommonErrorNode.h in Headers */,
+				F777660509DC5CF400517181 /* CommonToken.h in Headers */,
+				F700ED960A5FF2A5005D0757 /* CommonTokenStream.h in Headers */,
+				F72C58EA0AB3911D00282574 /* CommonTree.h in Headers */,
+				F76AA9900CEA515A00AF044C /* CommonTreeNodeStream.h in Headers */,
+				F72C59A70AB4F20A00282574 /* CommonTreeAdaptor.h in Headers */,
+				1A12C96011B89F6B008C9BED /* DebugEventListener.h in Headers */,
+				F7E261390B1E45580013F640 /* DebugEventSocketProxy.h in Headers */,
+				F7E261160B1E44320013F640 /* DebugParser.h in Headers */,
+				F7E261200B1E44E80013F640 /* DebugTokenStream.h in Headers */,
+				F7E261240B1E44FA0013F640 /* DebugTreeNodeStream.h in Headers */,
+				F7E261280B1E45070013F640 /* DebugTreeAdaptor.h in Headers */,
+				F7E2611A0B1E443D0013F640 /* DebugTreeParser.h in Headers */,
+				F77747560B23A70600D1F89B /* Debug.h in Headers */,
+				F7754E3F0A5C0A0500D0873A /* DFA.h in Headers */,
+				F700E61B0A5F66EC005D0757 /* EarlyExitException.h in Headers */,
+				1A1CCCC811B727B5002E5F53 /* ANTLRError.h in Headers */,
+				1A10050911B879A80022B434 /* FailedPredicateException.h in Headers */,
+				1A10050811B879A40022B434 /* FastQueue.h in Headers */,
+				1A1702FE11C05D4800F6978A /* HashMap.h in Headers */,
+				1A10050B11B879B80022B434 /* IntArray.h in Headers */,
+				F70AA7AF09AA2AC000C3FD5E /* IntStream.h in Headers */,
+				F777678E09DD618000517181 /* Lexer.h in Headers */,
+				F7009ADB0A1BE4AE002EDD5D /* LexerRuleReturnScope.h in Headers */,
+				1A16B13C11C66492002860C7 /* LinkBase.h in Headers */,
+				1A1CCCAB11B724B2002E5F53 /* LookaheadStream.h in Headers */,
+				1A1D467011BE75C0001575F3 /* MapElement.h in Headers */,
+				1A5EA50B11CFE7CE00E8932F /* Map.h in Headers */,
+				1A26329511C53578000DCDD4 /* MismatchedNotSetException.h in Headers */,
+				F7037CF00A0582FC0070435D /* MismatchedRangeException.h in Headers */,
+				F70380BC0A07FA0D0070435D /* MismatchedSetException.h in Headers */,
+				F777668209DC719C00517181 /* MismatchedTokenException.h in Headers */,
+				F72C5D620AB63E0B00282574 /* MismatchedTreeNodeException.h in Headers */,
+				1A75BF5911D6B3FD0096C6F5 /* MissingTokenException.h in Headers */,
+				F79D598B0A0E51AB00EA3CEE /* NoViableAltException.h in Headers */,
+				F700E8FB0A5FAD21005D0757 /* Parser.h in Headers */,
+				F72C5ECE0AB7E5A500282574 /* ParserRuleReturnScope.h in Headers */,
+				F777669309DC72D600517181 /* RecognitionException.h in Headers */,
+				F7B1E5B00CD7CF1900CE136E /* RecognizerSharedState.h in Headers */,
+				F71325880C4A060900B99F2D /* RewriteRuleElementStream.h in Headers */,
+				F70B11BD0C4C2B6400C3ECE0 /* RewriteRuleSubtreeStream.h in Headers */,
+				F70B11C30C4C2B7900C3ECE0 /* RewriteRuleTokenStream.h in Headers */,
+				1A45657711C922BE0082F421 /* RuleMemo.h in Headers */,
+				1A1BCDBB11CB01E60051A1EC /* RuleReturnScope.h in Headers */,
+				1A8AC00C11BAEC710038DBB0 /* RuntimeException.h in Headers */,
+				1AB4A59111B9A0DA0076E91A /* StreamEnumerator.h in Headers */,
+				F70AA7C709AA339900C3FD5E /* ANTLRStringStream.h in Headers */,
+				F77765CC09DC583000517181 /* Token.h in Headers */,
+				F77766AF09DD53E800517181 /* TokenStream.h in Headers */,
+				F77744050B234A3400D1F89B /* Token+DebuggerSupport.h in Headers */,
+				F700ED7F0A5FF17C005D0757 /* TokenSource.h in Headers */,
+				F73E2B740A9CFE6A005D6267 /* Tree.h in Headers */,
+				F73E2B7C0A9D0AFC005D6267 /* TreeAdaptor.h in Headers */,
+				F738D1790B07AEAA001813C4 /* TreeException.h in Headers */,
+				1A270BF911C1451200DCC8F3 /* TreeIterator.h in Headers */,
+				F72C5B840AB52AD300282574 /* TreeNodeStream.h in Headers */,
+				F72C5D560AB63C1D00282574 /* TreeParser.h in Headers */,
+				1AAC202C11CC621A00CF56D1 /* TreePatternLexer.h in Headers */,
+				1AAC20A511CC790300CF56D1 /* TreePatternParser.h in Headers */,
+				1A1BCDCF11CB0B3D0051A1EC /* TreeRuleReturnScope.h in Headers */,
+				1A1BCE2A11CB1A3E0051A1EC /* TreeRewriter.h in Headers */,
+				1A4A851211CBCE3E00E4BF1B /* TreeVisitor.h in Headers */,
+				1A4A851811CBCE5500E4BF1B /* TreeVisitorAction.h in Headers */,
+				1A4A851E11CBCF3700E4BF1B /* TreeWizard.h in Headers */,
+				1A6C452811BF50A40039788A /* UnwantedTokenException.h in Headers */,
+				1AE8A96C11D9227A00D36FD6 /* RuleStack.h in Headers */,
+				1A3A08E611E213C500D5EE26 /* BaseStack.h in Headers */,
+				1A3A08EA11E213E100D5EE26 /* SymbolStack.h in Headers */,
+				1A3A09BE11E235BD00D5EE26 /* antlr3.h in Headers */,
+				1AB5F47711E3869D00E065B0 /* RuleMapElement.h in Headers */,
+				1AB5F51E11E3BE2E00E065B0 /* PtrBuffer.h in Headers */,
+				1A2D217511E4F57C00DFE328 /* UniqueIDMap.h in Headers */,
+				1A2D218611E502DE00DFE328 /* NodeMapElement.h in Headers */,
+				1A100ABB11E604FE006ABF94 /* HashRule.h in Headers */,
+				1AEECE1511E7EB3C00554AAF /* TokenRewriteStream.h in Headers */,
+				1A86B91B11EB9F6300C67A03 /* ParseTree.h in Headers */,
+				1A86BACF11EC1CD000C67A03 /* UnbufferedTokenStream.h in Headers */,
+				1ADB66F112E74341007C1661 /* FuzzyLexer.h in Headers */,
+				1A0F348212EA444500496BB8 /* PolyLexer.h in Headers */,
+				1A0F348412EA444500496BB8 /* PolyParser.h in Headers */,
+				1A0F348612EA444500496BB8 /* Simplifier.h in Headers */,
+				BF7D9B531519363200B58218 /* ACBTree.h in Headers */,
+				BF7D9B571519367800B58218 /* AMutableArray.h in Headers */,
+				BF7D9B5B1519368C00B58218 /* AMutableDictionary.h in Headers */,
+				BF7D9B5F151936B600B58218 /* DoubleKeyMap.h in Headers */,
+				BF7D9B63151936E700B58218 /* ANTLRFileStream.h in Headers */,
+				BF7D9B67151936FC00B58218 /* ANTLRInputStream.h in Headers */,
+				BF7D9B6B1519371200B58218 /* ANTLRReaderStream.h in Headers */,
+				BF7D9B6F1519373700B58218 /* RewriteRuleNodeStream.h in Headers */,
+				BF7D9B731519375200B58218 /* ArrayIterator.h in Headers */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+/* End PBXHeadersBuildPhase section */
+
+/* Begin PBXLegacyTarget section */
+		1A0F343B12EA425700496BB8 /* Regenerate polydiff */ = {
+			isa = PBXLegacyTarget;
+			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar Poly.g PolyDifferentiator.g Simplifier.g PolyPrinter.g";
+			buildConfigurationList = 1A0F343C12EA425700496BB8 /* Build configuration list for PBXLegacyTarget "Regenerate polydiff" */;
+			buildPhases = (
+			);
+			buildToolPath = /usr/bin/java;
+			buildWorkingDirectory = "$(PROJECT_DIR)/examples/polydiff";
+			dependencies = (
+			);
+			name = "Regenerate polydiff";
+			passBuildSettingsInEnvironment = 1;
+			productName = Untitled;
+		};
+		1AC5AC9312E7BE0400DF0C58 /* Regenerate treeparser */ = {
+			isa = PBXLegacyTarget;
+			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar Lang.g LangDumpDecl.g";
+			buildConfigurationList = 1AC5AC9412E7BE0400DF0C58 /* Build configuration list for PBXLegacyTarget "Regenerate treeparser" */;
+			buildPhases = (
+			);
+			buildToolPath = /usr/bin/java;
+			buildWorkingDirectory = "$(PROJECT_DIR)/examples/treeparser";
+			dependencies = (
+			);
+			name = "Regenerate treeparser";
+			passBuildSettingsInEnvironment = 1;
+			productName = Untitled;
+		};
+		F76287450B7151E3006AA7EF /* Regenerate fuzzy */ = {
+			isa = PBXLegacyTarget;
+			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar Fuzzy.g";
+			buildConfigurationList = F76287460B715201006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate fuzzy" */;
+			buildPhases = (
+			);
+			buildToolPath = /usr/bin/java;
+			buildWorkingDirectory = "$(PROJECT_DIR)/examples/fuzzy";
+			dependencies = (
+			);
+			name = "Regenerate fuzzy";
+			passBuildSettingsInEnvironment = 1;
+			productName = Untitled;
+		};
+		F76287780B71557E006AA7EF /* Regenerate lexertest-simple */ = {
+			isa = PBXLegacyTarget;
+			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar TestLexer.g";
+			buildConfigurationList = F76287790B71557E006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate lexertest-simple" */;
+			buildPhases = (
+			);
+			buildToolPath = /usr/bin/java;
+			buildWorkingDirectory = "$(PROJECT_DIR)/examples/lexertest-simple";
+			dependencies = (
+			);
+			name = "Regenerate lexertest-simple";
+			passBuildSettingsInEnvironment = 1;
+			productName = Untitled;
+		};
+		F762877E0B71559C006AA7EF /* Regenerate combined */ = {
+			isa = PBXLegacyTarget;
+			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar Combined.g";
+			buildConfigurationList = F762877F0B71559C006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate combined" */;
+			buildPhases = (
+			);
+			buildToolPath = /usr/bin/java;
+			buildWorkingDirectory = "$(PROJECT_DIR)/examples/combined";
+			dependencies = (
+			);
+			name = "Regenerate combined";
+			passBuildSettingsInEnvironment = 1;
+			productName = Untitled;
+		};
+		F76287820B71559F006AA7EF /* Regenerate LL-star */ = {
+			isa = PBXLegacyTarget;
+			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar  SimpleC.g";
+			buildConfigurationList = F76287830B71559F006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate LL-star" */;
+			buildPhases = (
+			);
+			buildToolPath = /usr/bin/java;
+			buildWorkingDirectory = "$(PROJECT_DIR)/examples/LL-star";
+			dependencies = (
+			);
+			name = "Regenerate LL-star";
+			passBuildSettingsInEnvironment = 1;
+			productName = Untitled;
+		};
+		F76287860B7155A2006AA7EF /* Regenerate hoistedPredicates */ = {
+			isa = PBXLegacyTarget;
+			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar  T.g";
+			buildConfigurationList = F76287870B7155A2006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate hoistedPredicates" */;
+			buildPhases = (
+			);
+			buildToolPath = /usr/bin/java;
+			buildWorkingDirectory = "$(PROJECT_DIR)/examples/hoistedPredicates";
+			dependencies = (
+			);
+			name = "Regenerate hoistedPredicates";
+			passBuildSettingsInEnvironment = 1;
+			productName = Untitled;
+		};
+		F762878A0B7155AB006AA7EF /* Regenerate scopes */ = {
+			isa = PBXLegacyTarget;
+			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar  SymbolTable.g";
+			buildConfigurationList = F762878B0B7155AB006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate scopes" */;
+			buildPhases = (
+			);
+			buildToolPath = /usr/bin/java;
+			buildWorkingDirectory = "$(PROJECT_DIR)/examples/scopes";
+			dependencies = (
+			);
+			name = "Regenerate scopes";
+			passBuildSettingsInEnvironment = 1;
+			productName = Untitled;
+		};
+		F762878E0B7155AF006AA7EF /* Regenerate simplectree */ = {
+			isa = PBXLegacyTarget;
+			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar SimpleC.g SimpleCWalker.g";
+			buildConfigurationList = F762878F0B7155AF006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate simplectree" */;
+			buildPhases = (
+			);
+			buildToolPath = /usr/bin/java;
+			buildWorkingDirectory = "$(PROJECT_DIR)/examples/simplecTreeParser";
+			dependencies = (
+			);
+			name = "Regenerate simplectree";
+			passBuildSettingsInEnvironment = 1;
+			productName = Untitled;
+		};
+		F7CD47610C64D23800FF933A /* Regenerate treerewrite */ = {
+			isa = PBXLegacyTarget;
+			buildArgumentsString = "-jar /Library/Java/Extensions/antlr-3.3.1.jar TreeRewrite.g";
+			buildConfigurationList = F7CD47620C64D23800FF933A /* Build configuration list for PBXLegacyTarget "Regenerate treerewrite" */;
+			buildPhases = (
+			);
+			buildToolPath = /usr/bin/java;
+			buildWorkingDirectory = "$(PROJECT_DIR)/examples/treerewrite";
+			dependencies = (
+			);
+			name = "Regenerate treerewrite";
+			passBuildSettingsInEnvironment = 1;
+			productName = Untitled;
+		};
+/* End PBXLegacyTarget section */
+
+/* Begin PBXNativeTarget section */
+		1A0F345712EA42D800496BB8 /* polydiff */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 1A0F346212EA42D800496BB8 /* Build configuration list for PBXNativeTarget "polydiff" */;
+			buildPhases = (
+				1A0F345812EA42D800496BB8 /* Sources */,
+				1A0F345D12EA42D800496BB8 /* Frameworks */,
+			);
+			buildRules = (
+				1A0F346112EA42D800496BB8 /* PBXBuildRule */,
+			);
+			dependencies = (
+			);
+			name = polydiff;
+			productName = treerewrite;
+			productReference = 1A0F346612EA42D800496BB8 /* polydiff */;
+			productType = "com.apple.product-type.tool";
+		};
+		1A1210FA11D3A5D900F27B38 /* ANTLRCommonTokenTest */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 1A12110011D3A5DB00F27B38 /* Build configuration list for PBXNativeTarget "ANTLRCommonTokenTest" */;
+			buildPhases = (
+				1A1210F611D3A5D900F27B38 /* Resources */,
+				1A1210F711D3A5D900F27B38 /* Sources */,
+				1A1210F811D3A5D900F27B38 /* Frameworks */,
+				1A1210F911D3A5D900F27B38 /* ShellScript */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+			);
+			name = ANTLRCommonTokenTest;
+			productName = ANTLRCommonTokenTest;
+			productReference = 1A1210FB11D3A5D900F27B38 /* ANTLRCommonTokenTest.octest */;
+			productType = "com.apple.product-type.bundle";
+		};
+		1A1211D011D3BF4600F27B38 /* ANTLRStringStreamTest */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 1A1211D611D3BF4800F27B38 /* Build configuration list for PBXNativeTarget "ANTLRStringStreamTest" */;
+			buildPhases = (
+				1A1211CC11D3BF4600F27B38 /* Resources */,
+				1A1211CD11D3BF4600F27B38 /* Sources */,
+				1A1211CE11D3BF4600F27B38 /* Frameworks */,
+				1A1211CF11D3BF4600F27B38 /* ShellScript */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+			);
+			name = ANTLRStringStreamTest;
+			productName = ANTLRStringStreamTest;
+			productReference = 1A1211D111D3BF4700F27B38 /* ANTLRStringStreamTest.octest */;
+			productType = "com.apple.product-type.bundle";
+		};
+		1A12122311D3C92400F27B38 /* ANTLRFastQueueTest */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 1A12122911D3C92500F27B38 /* Build configuration list for PBXNativeTarget "ANTLRFastQueueTest" */;
+			buildPhases = (
+				1A12121F11D3C92400F27B38 /* Resources */,
+				1A12122011D3C92400F27B38 /* Sources */,
+				1A12122111D3C92400F27B38 /* Frameworks */,
+				1A12122211D3C92400F27B38 /* ShellScript */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+			);
+			name = ANTLRFastQueueTest;
+			productName = ANTLRFastQueueTest;
+			productReference = 1A12122411D3C92400F27B38 /* ANTLRFastQueueTest.octest */;
+			productType = "com.apple.product-type.bundle";
+		};
+		1A1212DB11D3F53600F27B38 /* ANTLRIntArrayTest */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 1A1212E111D3F53700F27B38 /* Build configuration list for PBXNativeTarget "ANTLRIntArrayTest" */;
+			buildPhases = (
+				1A1212D711D3F53600F27B38 /* Resources */,
+				1A1212D811D3F53600F27B38 /* Sources */,
+				1A1212D911D3F53600F27B38 /* Frameworks */,
+				1A1212DA11D3F53600F27B38 /* ShellScript */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+			);
+			name = ANTLRIntArrayTest;
+			productName = ANTLRIntArrayTest;
+			productReference = 1A1212DC11D3F53600F27B38 /* ANTLRIntArrayTest.octest */;
+			productType = "com.apple.product-type.bundle";
+		};
+		1A12130B11D3F7CD00F27B38 /* ANTLRCommonTreeTest */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 1A12131111D3F7CE00F27B38 /* Build configuration list for PBXNativeTarget "ANTLRCommonTreeTest" */;
+			buildPhases = (
+				1A12130711D3F7CD00F27B38 /* Resources */,
+				1A12130811D3F7CD00F27B38 /* Sources */,
+				1A12130911D3F7CD00F27B38 /* Frameworks */,
+				1A12130A11D3F7CD00F27B38 /* ShellScript */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+			);
+			name = ANTLRCommonTreeTest;
+			productName = ANTLRCommonTreeTest;
+			productReference = 1A12130C11D3F7CD00F27B38 /* ANTLRCommonTreeTest.octest */;
+			productType = "com.apple.product-type.bundle";
+		};
+		1A348B4D11D2BEE8000C72FC /* Test */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 1A348B5311D2BEE9000C72FC /* Build configuration list for PBXNativeTarget "Test" */;
+			buildPhases = (
+				1A348B4911D2BEE8000C72FC /* Resources */,
+				1A348B4A11D2BEE8000C72FC /* Sources */,
+				1A348B4B11D2BEE8000C72FC /* Frameworks */,
+				1A348B4C11D2BEE8000C72FC /* ShellScript */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+				1A12134511D3FDA500F27B38 /* PBXTargetDependency */,
+				1A12134711D3FDA500F27B38 /* PBXTargetDependency */,
+				1A12134911D3FDA500F27B38 /* PBXTargetDependency */,
+				1A12134B11D3FDA500F27B38 /* PBXTargetDependency */,
+				1A12134D11D3FDA500F27B38 /* PBXTargetDependency */,
+				1A12134F11D3FDA500F27B38 /* PBXTargetDependency */,
+			);
+			name = Test;
+			productName = Test;
+			productReference = 1A348B4E11D2BEE8000C72FC /* Test.octest */;
+			productType = "com.apple.product-type.bundle";
+		};
+		1A348BEB11D2D0A1000C72FC /* ANTLRBitsetTest */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 1A348BF111D2D0A2000C72FC /* Build configuration list for PBXNativeTarget "ANTLRBitsetTest" */;
+			buildPhases = (
+				1A348BE711D2D0A1000C72FC /* Resources */,
+				1A348BE811D2D0A1000C72FC /* Sources */,
+				1A348BE911D2D0A1000C72FC /* Frameworks */,
+				1A348BEA11D2D0A1000C72FC /* ShellScript */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+			);
+			name = ANTLRBitsetTest;
+			productName = ANTLRBitsetTest;
+			productReference = 1A348BEC11D2D0A1000C72FC /* ANTLRBitsetTest.octest */;
+			productType = "com.apple.product-type.bundle";
+		};
+		1A77EE8812E6A552007F323A /* TreeRewriteRuleTokenStream */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 1A77EE8E12E6A553007F323A /* Build configuration list for PBXNativeTarget "TreeRewriteRuleTokenStream" */;
+			buildPhases = (
+				1A77EE8412E6A552007F323A /* Resources */,
+				1A77EE8512E6A552007F323A /* Sources */,
+				1A77EE8612E6A552007F323A /* Frameworks */,
+				1A77EE8712E6A552007F323A /* ShellScript */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+			);
+			name = TreeRewriteRuleTokenStream;
+			productName = TreeRewriteRuleTokenStream;
+			productReference = 1A77EE8912E6A552007F323A /* TreeRewriteRuleTokenStream.octest */;
+			productType = "com.apple.product-type.bundle";
+		};
+		1AC5AC9812E7BEFE00DF0C58 /* treeparser */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 1AC5ACA312E7BEFE00DF0C58 /* Build configuration list for PBXNativeTarget "treeparser" */;
+			buildPhases = (
+				1AC5AC9912E7BEFE00DF0C58 /* Sources */,
+				1AC5AC9D12E7BEFE00DF0C58 /* Frameworks */,
+			);
+			buildRules = (
+				1AC5ACA212E7BEFE00DF0C58 /* PBXBuildRule */,
+			);
+			dependencies = (
+			);
+			name = treeparser;
+			productName = treerewrite;
+			productReference = 1AC5ACA712E7BEFE00DF0C58 /* treeparser */;
+			productType = "com.apple.product-type.tool";
+		};
+		8DC2EF4F0486A6940098B216 /* ANTLR */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 1DEB91AD08733DA50010E9CD /* Build configuration list for PBXNativeTarget "ANTLR" */;
+			buildPhases = (
+				8DC2EF500486A6940098B216 /* Headers */,
+				8DC2EF540486A6940098B216 /* Sources */,
+				8DC2EF560486A6940098B216 /* Frameworks */,
+				1A994CC412A84A46001853FF /* ShellScript */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+			);
+			name = ANTLR;
+			productInstallPath = "$(HOME)/Library/Frameworks";
+			productName = ANTLR;
+			productReference = 8DC2EF5B0486A6940098B216 /* ANTLR.framework */;
+			productType = "com.apple.product-type.framework";
+		};
+		F700E8630A5FA31D005D0757 /* combined */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = F700E86C0A5FA34D005D0757 /* Build configuration list for PBXNativeTarget "combined" */;
+			buildPhases = (
+				F700E8610A5FA31D005D0757 /* Sources */,
+				F700E8620A5FA31D005D0757 /* Frameworks */,
+			);
+			buildRules = (
+				1A994DC612A85BFC001853FF /* PBXBuildRule */,
+			);
+			dependencies = (
+			);
+			name = combined;
+			productName = combined;
+			productReference = F700E8640A5FA31D005D0757 /* combined */;
+			productType = "com.apple.product-type.tool";
+		};
+		F700ECD60A5FE186005D0757 /* LL-star */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = F700ECDC0A5FE1BF005D0757 /* Build configuration list for PBXNativeTarget "LL-star" */;
+			buildPhases = (
+				F700ECD40A5FE186005D0757 /* Sources */,
+				F700ECD50A5FE186005D0757 /* Frameworks */,
+			);
+			buildRules = (
+				1A994CF212A84FD3001853FF /* PBXBuildRule */,
+			);
+			dependencies = (
+			);
+			name = "LL-star";
+			productName = "LL-star";
+			productReference = F700ECD70A5FE186005D0757 /* LL-star */;
+			productType = "com.apple.product-type.tool";
+		};
+		F7037E9F0A05AFB60070435D /* lexertest-simple */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = F7037EB80A05AFEF0070435D /* Build configuration list for PBXNativeTarget "lexertest-simple" */;
+			buildPhases = (
+				F7037E9D0A05AFB60070435D /* Sources */,
+				F7037E9E0A05AFB60070435D /* Frameworks */,
+			);
+			buildRules = (
+				1A994DC912A85BFC001853FF /* PBXBuildRule */,
+			);
+			dependencies = (
+			);
+			name = "lexertest-simple";
+			productName = "lexertest-simple";
+			productReference = F7037EA00A05AFB60070435D /* lexertest-simple */;
+			productType = "com.apple.product-type.tool";
+		};
+		F72C5E5A0AB7E4C900282574 /* simplectree */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = F72C5E660AB7E4C900282574 /* Build configuration list for PBXNativeTarget "simplectree" */;
+			buildPhases = (
+				F72C5E5D0AB7E4C900282574 /* Sources */,
+				F72C5E610AB7E4C900282574 /* Frameworks */,
+			);
+			buildRules = (
+				1A994D4F12A85987001853FF /* PBXBuildRule */,
+			);
+			dependencies = (
+			);
+			name = simplectree;
+			productName = scopes;
+			productReference = F72C5E690AB7E4C900282574 /* simplectree */;
+			productType = "com.apple.product-type.tool";
+		};
+		F79D565F0A0E23A400EA3CEE /* fuzzy */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = F79D566F0A0E23D600EA3CEE /* Build configuration list for PBXNativeTarget "fuzzy" */;
+			buildPhases = (
+				F79D565D0A0E23A400EA3CEE /* Sources */,
+				F79D565E0A0E23A400EA3CEE /* Frameworks */,
+				F706A5710A0EC357008999AB /* CopyFiles */,
+			);
+			buildRules = (
+				1A994DC712A85BFC001853FF /* PBXBuildRule */,
+			);
+			dependencies = (
+			);
+			name = fuzzy;
+			productName = fuzzy;
+			productReference = F79D56600A0E23A400EA3CEE /* fuzzy */;
+			productType = "com.apple.product-type.tool";
+		};
+		F7CD475C0C64D22800FF933A /* treerewrite */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = F7CD477C0C64D27000FF933A /* Build configuration list for PBXNativeTarget "treerewrite" */;
+			buildPhases = (
+				F7CD475A0C64D22800FF933A /* Sources */,
+				F7CD475B0C64D22800FF933A /* Frameworks */,
+			);
+			buildRules = (
+				1A994D3E12A858E1001853FF /* PBXBuildRule */,
+			);
+			dependencies = (
+			);
+			name = treerewrite;
+			productName = treerewrite;
+			productReference = F7CD475D0C64D22800FF933A /* treerewrite */;
+			productType = "com.apple.product-type.tool";
+		};
+		F7DD05ED0A7B15E1006A006C /* hoistedPredicates */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = F7DD05F00A7B1640006A006C /* Build configuration list for PBXNativeTarget "hoistedPredicates" */;
+			buildPhases = (
+				F7DD05EB0A7B15E1006A006C /* Sources */,
+				F7DD05EC0A7B15E1006A006C /* Frameworks */,
+			);
+			buildRules = (
+				1A994DC812A85BFC001853FF /* PBXBuildRule */,
+			);
+			dependencies = (
+			);
+			name = hoistedPredicates;
+			productName = hoistedPredicates;
+			productReference = F7DD05EE0A7B15E1006A006C /* hoistedPredicates */;
+			productType = "com.apple.product-type.tool";
+		};
+		F7DD07430A7B6618006A006C /* scopes */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = F7DD07790A7B6682006A006C /* Build configuration list for PBXNativeTarget "scopes" */;
+			buildPhases = (
+				F7DD07410A7B6618006A006C /* Sources */,
+				F7DD07420A7B6618006A006C /* Frameworks */,
+			);
+			buildRules = (
+				1A994D8512A85ABE001853FF /* PBXBuildRule */,
+			);
+			dependencies = (
+			);
+			name = scopes;
+			productName = scopes;
+			productReference = F7DD07440A7B6618006A006C /* scopes */;
+			productType = "com.apple.product-type.tool";
+		};
+/* End PBXNativeTarget section */
+
+/* Begin PBXProject section */
+		0867D690FE84028FC02AAC07 /* Project object */ = {
+			isa = PBXProject;
+			buildConfigurationList = 1DEB91B108733DA50010E9CD /* Build configuration list for PBXProject "ANTLR" */;
+			compatibilityVersion = "Xcode 3.2";
+			developmentRegion = English;
+			hasScannedForEncodings = 1;
+			knownRegions = (
+				English,
+				Japanese,
+				French,
+				German,
+			);
+			mainGroup = 0867D691FE84028FC02AAC07 /* ANTLR */;
+			productRefGroup = 034768DFFF38A50411DB9C8B /* Products */;
+			projectDirPath = "";
+			projectRoot = "";
+			targets = (
+				8DC2EF4F0486A6940098B216 /* ANTLR */,
+				1A348B4D11D2BEE8000C72FC /* Test */,
+				1A348BEB11D2D0A1000C72FC /* ANTLRBitsetTest */,
+				1A1210FA11D3A5D900F27B38 /* ANTLRCommonTokenTest */,
+				1A12130B11D3F7CD00F27B38 /* ANTLRCommonTreeTest */,
+				1A12122311D3C92400F27B38 /* ANTLRFastQueueTest */,
+				1A1212DB11D3F53600F27B38 /* ANTLRIntArrayTest */,
+				1A1211D011D3BF4600F27B38 /* ANTLRStringStreamTest */,
+				1A77EE8812E6A552007F323A /* TreeRewriteRuleTokenStream */,
+				F762873F0B71519B006AA7EF /* Regenerate all examples */,
+				F762877E0B71559C006AA7EF /* Regenerate combined */,
+				F700E8630A5FA31D005D0757 /* combined */,
+				F76287450B7151E3006AA7EF /* Regenerate fuzzy */,
+				F79D565F0A0E23A400EA3CEE /* fuzzy */,
+				F76287860B7155A2006AA7EF /* Regenerate hoistedPredicates */,
+				F7DD05ED0A7B15E1006A006C /* hoistedPredicates */,
+				F76287780B71557E006AA7EF /* Regenerate lexertest-simple */,
+				F7037E9F0A05AFB60070435D /* lexertest-simple */,
+				F76287820B71559F006AA7EF /* Regenerate LL-star */,
+				F700ECD60A5FE186005D0757 /* LL-star */,
+				1A0F343B12EA425700496BB8 /* Regenerate polydiff */,
+				1A0F345712EA42D800496BB8 /* polydiff */,
+				F762878A0B7155AB006AA7EF /* Regenerate scopes */,
+				F7DD07430A7B6618006A006C /* scopes */,
+				F762878E0B7155AF006AA7EF /* Regenerate simplectree */,
+				F72C5E5A0AB7E4C900282574 /* simplectree */,
+				1AC5AC9312E7BE0400DF0C58 /* Regenerate treeparser */,
+				1AC5AC9812E7BEFE00DF0C58 /* treeparser */,
+				F7CD47610C64D23800FF933A /* Regenerate treerewrite */,
+				F7CD475C0C64D22800FF933A /* treerewrite */,
+			);
+		};
+/* End PBXProject section */
+
+/* Begin PBXResourcesBuildPhase section */
+		1A1210F611D3A5D900F27B38 /* Resources */ = {
+			isa = PBXResourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A1211CC11D3BF4600F27B38 /* Resources */ = {
+			isa = PBXResourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A12121F11D3C92400F27B38 /* Resources */ = {
+			isa = PBXResourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A1212D711D3F53600F27B38 /* Resources */ = {
+			isa = PBXResourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A12130711D3F7CD00F27B38 /* Resources */ = {
+			isa = PBXResourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A348B4911D2BEE8000C72FC /* Resources */ = {
+			isa = PBXResourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A348BE711D2D0A1000C72FC /* Resources */ = {
+			isa = PBXResourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A77EE8412E6A552007F323A /* Resources */ = {
+			isa = PBXResourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+/* End PBXResourcesBuildPhase section */
+
+/* Begin PBXShellScriptBuildPhase section */
+		1A1210F911D3A5D900F27B38 /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
+		};
+		1A1211CF11D3BF4600F27B38 /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
+		};
+		1A12122211D3C92400F27B38 /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
+		};
+		1A1212DA11D3F53600F27B38 /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
+		};
+		1A12130A11D3F7CD00F27B38 /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
+		};
+		1A348B4C11D2BEE8000C72FC /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
+		};
+		1A348BEA11D2D0A1000C72FC /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
+		};
+		1A77EE8712E6A552007F323A /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "# Run the unit tests in this test bundle.\n\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\"\n";
+		};
+		1A994CC412A84A46001853FF /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "";
+		};
+/* End PBXShellScriptBuildPhase section */
+
+/* Begin PBXSourcesBuildPhase section */
+		1A0F345812EA42D800496BB8 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A0F346D12EA434F00496BB8 /* Main.m in Sources */,
+				1A0F348912EA444500496BB8 /* PolyLexer.m in Sources */,
+				1A0F348A12EA444500496BB8 /* PolyParser.m in Sources */,
+				1A01BD9312EB5A6000428792 /* Simplifier.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A1210F711D3A5D900F27B38 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A12110311D3A62B00F27B38 /* CommonTokenTest.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A1211CD11D3BF4600F27B38 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A1211DE11D3BFC900F27B38 /* ANTLRStringStreamTest.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A12122011D3C92400F27B38 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A12126211D3CA0100F27B38 /* FastQueueTest.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A1212D811D3F53600F27B38 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A1212E711D3F59300F27B38 /* IntArrayTest.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A12130811D3F7CD00F27B38 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A12131711D3F80500F27B38 /* CommonTreeTest.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A348B4A11D2BEE8000C72FC /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A348BA511D2C6A0000C72FC /* ANTLRBitSetTest.m in Sources */,
+				1A348BA811D2C6AD000C72FC /* CommonTokenTest.m in Sources */,
+				1A348BAB11D2C6B8000C72FC /* CommonTreeTest.m in Sources */,
+				1A348BAE11D2C6C6000C72FC /* FastQueueTest.m in Sources */,
+				1A348BAF11D2C6D3000C72FC /* IntArrayTest.m in Sources */,
+				1A348BB211D2C6E3000C72FC /* ANTLRStringStreamTest.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A348BE811D2D0A1000C72FC /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A348C0611D2D22B000C72FC /* ANTLRBitSetTest.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1A77EE8512E6A552007F323A /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		1AC5AC9912E7BEFE00DF0C58 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1AC5ACAD12E7BF4E00DF0C58 /* main.m in Sources */,
+				1AC5ACE612E7CE4700DF0C58 /* LangParser.m in Sources */,
+				1AC5ACE712E7CE4C00DF0C58 /* LangLexer.m in Sources */,
+				1AC5ACE812E7CE5100DF0C58 /* LangDumpDecl.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		8DC2EF540486A6940098B216 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A67885211B87ABA00A11EEC /* BaseTree.m in Sources */,
+				1A67885411B87AEA00A11EEC /* FastQueue.m in Sources */,
+				1A67885511B87AEF00A11EEC /* IntArray.m in Sources */,
+				1A6788FC11B893E100A11EEC /* BaseRecognizer.m in Sources */,
+				1A12C95911B89F62008C9BED /* ANTLRBitSet.m in Sources */,
+				1A12C95A11B89F64008C9BED /* BufferedTokenStream.m in Sources */,
+				1A12C95B11B89F65008C9BED /* CommonToken.m in Sources */,
+				1A12C95C11B89F67008C9BED /* CommonTokenStream.m in Sources */,
+				1A12C95D11B89F68008C9BED /* CommonTree.m in Sources */,
+				1A12C95E11B89F69008C9BED /* CommonTreeAdaptor.m in Sources */,
+				1A12C95F11B89F6A008C9BED /* CommonTreeNodeStream.m in Sources */,
+				1A12C96111B89F6F008C9BED /* Lexer.m in Sources */,
+				1A12C96211B89F70008C9BED /* LexerRuleReturnScope.m in Sources */,
+				1A12C96311B89F76008C9BED /* LookaheadStream.m in Sources */,
+				1A12C96411B89F76008C9BED /* MismatchedRangeException.m in Sources */,
+				1A12C96511B89F77008C9BED /* MismatchedSetException.m in Sources */,
+				1A12C96611B89F78008C9BED /* MismatchedTokenException.m in Sources */,
+				1A12C96711B89F7A008C9BED /* MismatchedTreeNodeException.m in Sources */,
+				1A12C96811B89F7B008C9BED /* NoViableAltException.m in Sources */,
+				1A12C96911B89F7E008C9BED /* Parser.m in Sources */,
+				1A12C96A11B89F7F008C9BED /* ParserRuleReturnScope.m in Sources */,
+				1A12C96B11B89F80008C9BED /* RecognitionException.m in Sources */,
+				1A12C96C11B89F82008C9BED /* RecognizerSharedState.m in Sources */,
+				1A12C96D11B89F83008C9BED /* RewriteRuleElementStream.m in Sources */,
+				1A12C96E11B89F84008C9BED /* RewriteRuleSubtreeStream.m in Sources */,
+				1A12C96F11B89F85008C9BED /* RewriteRuleTokenStream.m in Sources */,
+				1A12C97011B89F87008C9BED /* ANTLRStringStream.m in Sources */,
+				1A12C97111B89F8B008C9BED /* CharStreamState.m in Sources */,
+				1A12C97211B89F8C008C9BED /* Token+DebuggerSupport.m in Sources */,
+				1A12C97311B89F8E008C9BED /* TreeException.m in Sources */,
+				1A12C97411B89F90008C9BED /* TreeParser.m in Sources */,
+				1A65B7D911B9532A00FD8754 /* BufferedTreeNodeStream.m in Sources */,
+				1AB4A54211B995290076E91A /* EarlyExitException.m in Sources */,
+				1AB4A54311B9952A0076E91A /* FailedPredicateException.m in Sources */,
+				1AB4A59211B9A0DA0076E91A /* StreamEnumerator.m in Sources */,
+				1A8AC00D11BAEC710038DBB0 /* RuntimeException.m in Sources */,
+				1A1D465C11BE73B2001575F3 /* BaseTreeAdaptor.m in Sources */,
+				1A1D467111BE75C0001575F3 /* MapElement.m in Sources */,
+				1A1D467D11BE8E5A001575F3 /* CommonErrorNode.m in Sources */,
+				1A6C451711BF4EE00039788A /* MissingTokenException.m in Sources */,
+				1A6C452911BF50A40039788A /* UnwantedTokenException.m in Sources */,
+				1A1702FF11C05D4800F6978A /* HashMap.m in Sources */,
+				1A270BFA11C1451200DCC8F3 /* TreeIterator.m in Sources */,
+				1A26329611C53578000DCDD4 /* MismatchedNotSetException.m in Sources */,
+				1A16B13D11C66492002860C7 /* LinkBase.m in Sources */,
+				1A45657811C922BE0082F421 /* RuleMemo.m in Sources */,
+				1A45658A11C9270D0082F421 /* BaseMapElement.m in Sources */,
+				1A1BCDBC11CB01E60051A1EC /* RuleReturnScope.m in Sources */,
+				1A1BCDD011CB0B3D0051A1EC /* TreeRuleReturnScope.m in Sources */,
+				1A1BCE2B11CB1A3E0051A1EC /* TreeRewriter.m in Sources */,
+				1A4A851311CBCE3E00E4BF1B /* TreeVisitor.m in Sources */,
+				1A4A851911CBCE5500E4BF1B /* TreeVisitorAction.m in Sources */,
+				1A4A851F11CBCF3700E4BF1B /* TreeWizard.m in Sources */,
+				1AAC202D11CC621A00CF56D1 /* TreePatternLexer.m in Sources */,
+				1AAC20A611CC790300CF56D1 /* TreePatternParser.m in Sources */,
+				1A5EA50C11CFE7CE00E8932F /* Map.m in Sources */,
+				1A75BFBA11D6C2B10096C6F5 /* DFA.m in Sources */,
+				1AE8A96D11D9227A00D36FD6 /* RuleStack.m in Sources */,
+				1A3A08E711E213C500D5EE26 /* BaseStack.m in Sources */,
+				1A3A08EB11E213E100D5EE26 /* SymbolStack.m in Sources */,
+				1AB5F47811E3869D00E065B0 /* RuleMapElement.m in Sources */,
+				1AB5F51F11E3BE2E00E065B0 /* PtrBuffer.m in Sources */,
+				1A2D217611E4F57C00DFE328 /* UniqueIDMap.m in Sources */,
+				1A2D218711E502DE00DFE328 /* NodeMapElement.m in Sources */,
+				1A100ABC11E604FE006ABF94 /* HashRule.m in Sources */,
+				1AEECE1611E7EB3D00554AAF /* TokenRewriteStream.m in Sources */,
+				1A86B91C11EB9F6300C67A03 /* ParseTree.m in Sources */,
+				1A86BAD011EC1CD000C67A03 /* UnbufferedTokenStream.m in Sources */,
+				BF7D9B541519363200B58218 /* ACBTree.m in Sources */,
+				BF7D9B581519367800B58218 /* AMutableArray.m in Sources */,
+				BF7D9B5C1519368C00B58218 /* AMutableDictionary.m in Sources */,
+				BF7D9B60151936B600B58218 /* DoubleKeyMap.m in Sources */,
+				BF7D9B64151936E700B58218 /* ANTLRFileStream.m in Sources */,
+				BF7D9B68151936FC00B58218 /* ANTLRInputStream.m in Sources */,
+				BF7D9B6C1519371200B58218 /* ANTLRReaderStream.m in Sources */,
+				BF7D9B701519373700B58218 /* RewriteRuleNodeStream.m in Sources */,
+				BF7D9B741519375200B58218 /* ArrayIterator.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F700E8610A5FA31D005D0757 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F738D3610B07C105001813C4 /* CombinedLexer.m in Sources */,
+				F738D3620B07C105001813C4 /* CombinedParser.m in Sources */,
+				1A20C56512D6267500C2072A /* main.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F700ECD40A5FE186005D0757 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F738D3190B07BDB7001813C4 /* main.m in Sources */,
+				F76287150B714E82006AA7EF /* SimpleCParser.m in Sources */,
+				F76287160B714E83006AA7EF /* SimpleCLexer.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F7037E9D0A05AFB60070435D /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F7048FF80B07D05400D2F326 /* TestLexer.m in Sources */,
+				F7048FF90B07D05800D2F326 /* main.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F72C5E5D0AB7E4C900282574 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				1A406B5612E8F2ED005EF037 /* main.m in Sources */,
+				F741D0830B381E720024DF3F /* SimpleCWalker.m in Sources */,
+				F741D0840B381E730024DF3F /* SimpleCParser.m in Sources */,
+				F741D08E0B381EA90024DF3F /* SimpleCLexer.m in Sources */,
+				1AC5ACD612E7C05800DF0C58 /* LangLexer.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F79D565D0A0E23A400EA3CEE /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F738D37E0B07C3BD001813C4 /* main.m in Sources */,
+				F76287170B714EA9006AA7EF /* FuzzyLexer.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F7CD475A0C64D22800FF933A /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F7CD47650C64D24C00FF933A /* TreeRewriteLexer.m in Sources */,
+				F7CD47660C64D24D00FF933A /* TreeRewriteParser.m in Sources */,
+				F7CD47670C64D24D00FF933A /* main.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F7DD05EB0A7B15E1006A006C /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F738D2120B07B32D001813C4 /* T.g in Sources */,
+				F738D2220B07B39F001813C4 /* main.m in Sources */,
+				F76287130B714E77006AA7EF /* TLexer.m in Sources */,
+				F76287140B714E78006AA7EF /* TParser.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		F7DD07410A7B6618006A006C /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				F738D1FC0B07B1BD001813C4 /* main.m in Sources */,
+				F738D20D0B07B265001813C4 /* SymbolTableParser.m in Sources */,
+				F738D20E0B07B266001813C4 /* SymbolTableLexer.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+/* End PBXSourcesBuildPhase section */
+
+/* Begin PBXTargetDependency section */
+		1A0F347112EA43BA00496BB8 /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 1A0F343B12EA425700496BB8 /* Regenerate polydiff */;
+			targetProxy = 1A0F347012EA43BA00496BB8 /* PBXContainerItemProxy */;
+		};
+		1A0F347312EA43BA00496BB8 /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 1AC5AC9312E7BE0400DF0C58 /* Regenerate treeparser */;
+			targetProxy = 1A0F347212EA43BA00496BB8 /* PBXContainerItemProxy */;
+		};
+		1A0F347512EA43BA00496BB8 /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = F7CD47610C64D23800FF933A /* Regenerate treerewrite */;
+			targetProxy = 1A0F347412EA43BA00496BB8 /* PBXContainerItemProxy */;
+		};
+		1A12134511D3FDA500F27B38 /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 1A348BEB11D2D0A1000C72FC /* ANTLRBitsetTest */;
+			targetProxy = 1A12134411D3FDA500F27B38 /* PBXContainerItemProxy */;
+		};
+		1A12134711D3FDA500F27B38 /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 1A1210FA11D3A5D900F27B38 /* ANTLRCommonTokenTest */;
+			targetProxy = 1A12134611D3FDA500F27B38 /* PBXContainerItemProxy */;
+		};
+		1A12134911D3FDA500F27B38 /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 1A12130B11D3F7CD00F27B38 /* ANTLRCommonTreeTest */;
+			targetProxy = 1A12134811D3FDA500F27B38 /* PBXContainerItemProxy */;
+		};
+		1A12134B11D3FDA500F27B38 /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 1A12122311D3C92400F27B38 /* ANTLRFastQueueTest */;
+			targetProxy = 1A12134A11D3FDA500F27B38 /* PBXContainerItemProxy */;
+		};
+		1A12134D11D3FDA500F27B38 /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 1A1212DB11D3F53600F27B38 /* ANTLRIntArrayTest */;
+			targetProxy = 1A12134C11D3FDA500F27B38 /* PBXContainerItemProxy */;
+		};
+		1A12134F11D3FDA500F27B38 /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 1A1211D011D3BF4600F27B38 /* ANTLRStringStreamTest */;
+			targetProxy = 1A12134E11D3FDA500F27B38 /* PBXContainerItemProxy */;
+		};
+		F762874C0B715417006AA7EF /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = F76287450B7151E3006AA7EF /* Regenerate fuzzy */;
+			targetProxy = F762874B0B715417006AA7EF /* PBXContainerItemProxy */;
+		};
+		F76287A70B7157C2006AA7EF /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = F762877E0B71559C006AA7EF /* Regenerate combined */;
+			targetProxy = F76287A60B7157C2006AA7EF /* PBXContainerItemProxy */;
+		};
+		F76287A90B7157C2006AA7EF /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = F76287820B71559F006AA7EF /* Regenerate LL-star */;
+			targetProxy = F76287A80B7157C2006AA7EF /* PBXContainerItemProxy */;
+		};
+		F76287AB0B7157C2006AA7EF /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = F76287860B7155A2006AA7EF /* Regenerate hoistedPredicates */;
+			targetProxy = F76287AA0B7157C2006AA7EF /* PBXContainerItemProxy */;
+		};
+		F76287AD0B7157C2006AA7EF /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = F762878A0B7155AB006AA7EF /* Regenerate scopes */;
+			targetProxy = F76287AC0B7157C2006AA7EF /* PBXContainerItemProxy */;
+		};
+		F76287AF0B7157C2006AA7EF /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = F762878E0B7155AF006AA7EF /* Regenerate simplectree */;
+			targetProxy = F76287AE0B7157C2006AA7EF /* PBXContainerItemProxy */;
+		};
+		F79EFB140C5845A300ABAB3D /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = F76287780B71557E006AA7EF /* Regenerate lexertest-simple */;
+			targetProxy = F79EFB130C5845A300ABAB3D /* PBXContainerItemProxy */;
+		};
+/* End PBXTargetDependency section */
+
+/* Begin PBXVariantGroup section */
+		089C1666FE841158C02AAC07 /* InfoPlist.strings */ = {
+			isa = PBXVariantGroup;
+			children = (
+				089C1667FE841158C02AAC07 /* English */,
+			);
+			name = InfoPlist.strings;
+			sourceTree = "<group>";
+		};
+/* End PBXVariantGroup section */
+
+/* Begin XCBuildConfiguration section */
+		1A0F343D12EA425700496BB8 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = polydiff;
+			};
+			name = Debug;
+		};
+		1A0F343E12EA425700496BB8 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = treerewrite;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1A0F343F12EA425700496BB8 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = treerewrite;
+			};
+			name = Release;
+		};
+		1A0F346312EA42D800496BB8 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
+				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
+				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
+				ALTERNATE_PERMISSIONS_FILES = "";
+				ALWAYS_SEARCH_USER_PATHS = YES;
+				ANTLR_DEBUG = YES;
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/polydiff";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/polydiff";
+				ARCHS = "$(NATIVE_ARCH)";
+				BUILD_VARIANTS = normal;
+				BUNDLE_LOADER = "";
+				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
+				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
+				COPYING_PRESERVES_HFS_DATA = NO;
+				COPY_PHASE_STRIP = NO;
+				CURRENT_PROJECT_VERSION = "";
+				DEAD_CODE_STRIPPING = NO;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				DEPLOYMENT_LOCATION = NO;
+				DEPLOYMENT_POSTPROCESSING = NO;
+				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
+				DYLIB_COMPATIBILITY_VERSION = "";
+				DYLIB_CURRENT_VERSION = "";
+				EXECUTABLE_EXTENSION = "";
+				EXECUTABLE_PREFIX = "";
+				EXPORTED_SYMBOLS_FILE = "";
+				FRAMEWORK_SEARCH_PATHS = "";
+				FRAMEWORK_VERSION = A;
+				GCC_ALTIVEC_EXTENSIONS = NO;
+				GCC_AUTO_VECTORIZATION = NO;
+				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
+				GCC_CW_ASM_SYNTAX = YES;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = full;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_ASM_KEYWORD = YES;
+				GCC_ENABLE_CPP_EXCEPTIONS = YES;
+				GCC_ENABLE_CPP_RTTI = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = supported;
+				GCC_ENABLE_PASCAL_STRINGS = YES;
+				GCC_ENABLE_SSE3_EXTENSIONS = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_ENABLE_TRIGRAPHS = NO;
+				GCC_FAST_MATH = NO;
+				GCC_FAST_OBJC_DISPATCH = NO;
+				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
+				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
+				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
+				GCC_INPUT_FILETYPE = automatic;
+				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
+				GCC_MODEL_PPC64 = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_NO_COMMON_BLOCKS = NO;
+				GCC_OBJC_CALL_CXX_CDTORS = NO;
+				GCC_ONE_BYTE_BOOL = NO;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				GCC_PREPROCESSOR_DEFINITIONS = "";
+				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
+				GCC_REUSE_STRINGS = YES;
+				GCC_SHORT_ENUMS = NO;
+				GCC_STRICT_ALIASING = NO;
+				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
+				GCC_THREADSAFE_STATICS = YES;
+				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
+				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
+				GCC_UNROLL_LOOPS = NO;
+				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
+				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
+				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
+				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
+				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
+				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
+				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
+				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
+				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
+				GCC_WARN_MISSING_PARENTHESES = NO;
+				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
+				GCC_WARN_PEDANTIC = NO;
+				GCC_WARN_SHADOW = NO;
+				GCC_WARN_SIGN_COMPARE = NO;
+				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
+				GCC_WARN_UNINITIALIZED_AUTOS = NO;
+				GCC_WARN_UNKNOWN_PRAGMAS = NO;
+				GCC_WARN_UNUSED_FUNCTION = NO;
+				GCC_WARN_UNUSED_LABEL = NO;
+				GCC_WARN_UNUSED_PARAMETER = NO;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				GENERATE_MASTER_OBJECT_FILE = NO;
+				GENERATE_PKGINFO_FILE = NO;
+				GENERATE_PROFILING_CODE = NO;
+				HEADER_SEARCH_PATHS = "";
+				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
+				INFOPLIST_FILE = "";
+				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
+				INFOPLIST_PREFIX_HEADER = "";
+				INFOPLIST_PREPROCESS = NO;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
+				INIT_ROUTINE = "";
+				INSTALL_GROUP = "$(GROUP)";
+				INSTALL_MODE_FLAG = "a-w,a+rX";
+				INSTALL_OWNER = "$(USER)";
+				INSTALL_PATH = "$(HOME)/bin";
+				KEEP_PRIVATE_EXTERNS = NO;
+				LIBRARY_SEARCH_PATHS = "";
+				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
+				LINKER_DISPLAYS_MANGLED_NAMES = NO;
+				LINK_WITH_STANDARD_LIBRARIES = YES;
+				MACH_O_TYPE = "";
+				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
+				OBJROOT = Build/Intermediates;
+				ONLY_ACTIVE_ARCH = YES;
+				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
+				OTHER_CFLAGS = "";
+				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
+				PRELINK_FLAGS = "";
+				PRELINK_LIBS = "";
+				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
+				PRIVATE_HEADERS_FOLDER_PATH = "";
+				PRODUCT_NAME = polydiff;
+				PUBLIC_HEADERS_FOLDER_PATH = "";
+				REZ_SEARCH_PATHS = "";
+				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
+				SDKROOT = macosx10.6;
+				SECTORDER_FLAGS = "";
+				SEPARATE_STRIP = NO;
+				SEPARATE_SYMBOL_EDIT = NO;
+				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
+				SKIP_INSTALL = NO;
+				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
+				STRIPFLAGS = "";
+				STRIP_INSTALLED_PRODUCT = "";
+				STRIP_STYLE = all;
+				SYMROOT = Build;
+				TEST_HOST = "";
+				TEST_RIG = "";
+				UNEXPORTED_SYMBOLS_FILE = "";
+				USER_HEADER_SEARCH_PATHS = "";
+				VERSIONING_SYSTEM = "";
+				WARNING_CFLAGS = "";
+				WARNING_LDFLAGS = "";
+				WRAPPER_EXTENSION = "";
+				ZERO_LINK = NO;
+			};
+			name = Debug;
+		};
+		1A0F346412EA42D800496BB8 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
+				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
+				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
+				ALTERNATE_PERMISSIONS_FILES = "";
+				ALWAYS_SEARCH_USER_PATHS = YES;
+				ANTLR_DEBUG = YES;
+				ANTLR_EXTRA_JAVA_ARGS = "/Users/acondit/Projects/idea/antlr3/classes:/Users/acondit/Projects/idea/stringtemplate/classes:/Library/Java/Extensions/antlr-3.3.1.jar";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/treerewrite";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/treerewrite";
+				ARCHS = "$(NATIVE_ARCH)";
+				BUILD_VARIANTS = normal;
+				BUNDLE_LOADER = "";
+				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
+				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
+				COPYING_PRESERVES_HFS_DATA = NO;
+				COPY_PHASE_STRIP = NO;
+				CURRENT_PROJECT_VERSION = "";
+				DEAD_CODE_STRIPPING = NO;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				DEPLOYMENT_LOCATION = NO;
+				DEPLOYMENT_POSTPROCESSING = NO;
+				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
+				DYLIB_COMPATIBILITY_VERSION = "";
+				DYLIB_CURRENT_VERSION = "";
+				EXECUTABLE_EXTENSION = "";
+				EXECUTABLE_PREFIX = "";
+				EXPORTED_SYMBOLS_FILE = "";
+				FRAMEWORK_SEARCH_PATHS = "";
+				FRAMEWORK_VERSION = A;
+				GCC_ALTIVEC_EXTENSIONS = NO;
+				GCC_AUTO_VECTORIZATION = NO;
+				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
+				GCC_CW_ASM_SYNTAX = YES;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = full;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_ASM_KEYWORD = YES;
+				GCC_ENABLE_CPP_EXCEPTIONS = YES;
+				GCC_ENABLE_CPP_RTTI = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_PASCAL_STRINGS = YES;
+				GCC_ENABLE_SSE3_EXTENSIONS = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_ENABLE_TRIGRAPHS = NO;
+				GCC_FAST_MATH = NO;
+				GCC_FAST_OBJC_DISPATCH = NO;
+				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
+				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
+				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
+				GCC_INPUT_FILETYPE = automatic;
+				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
+				GCC_MODEL_PPC64 = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_NO_COMMON_BLOCKS = NO;
+				GCC_OBJC_CALL_CXX_CDTORS = NO;
+				GCC_ONE_BYTE_BOOL = NO;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				GCC_PREPROCESSOR_DEFINITIONS = "";
+				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
+				GCC_REUSE_STRINGS = YES;
+				GCC_SHORT_ENUMS = NO;
+				GCC_STRICT_ALIASING = NO;
+				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
+				GCC_THREADSAFE_STATICS = YES;
+				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
+				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
+				GCC_UNROLL_LOOPS = NO;
+				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
+				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
+				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
+				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
+				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
+				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
+				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
+				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
+				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
+				GCC_WARN_MISSING_PARENTHESES = NO;
+				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
+				GCC_WARN_PEDANTIC = NO;
+				GCC_WARN_SHADOW = NO;
+				GCC_WARN_SIGN_COMPARE = NO;
+				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
+				GCC_WARN_UNINITIALIZED_AUTOS = NO;
+				GCC_WARN_UNKNOWN_PRAGMAS = NO;
+				GCC_WARN_UNUSED_FUNCTION = NO;
+				GCC_WARN_UNUSED_LABEL = NO;
+				GCC_WARN_UNUSED_PARAMETER = NO;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				GENERATE_MASTER_OBJECT_FILE = NO;
+				GENERATE_PKGINFO_FILE = NO;
+				GENERATE_PROFILING_CODE = NO;
+				HEADER_SEARCH_PATHS = "";
+				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
+				INFOPLIST_FILE = "";
+				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
+				INFOPLIST_PREFIX_HEADER = "";
+				INFOPLIST_PREPROCESS = NO;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
+				INIT_ROUTINE = "";
+				INSTALL_GROUP = "$(GROUP)";
+				INSTALL_MODE_FLAG = "a-w,a+rX";
+				INSTALL_OWNER = "$(USER)";
+				INSTALL_PATH = "$(HOME)/bin";
+				KEEP_PRIVATE_EXTERNS = NO;
+				LIBRARY_SEARCH_PATHS = "";
+				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
+				LINKER_DISPLAYS_MANGLED_NAMES = NO;
+				LINK_WITH_STANDARD_LIBRARIES = YES;
+				MACH_O_TYPE = "";
+				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
+				OBJROOT = /Users/acondit/Projects/Intermediates;
+				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
+				OTHER_CFLAGS = "";
+				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
+				PRELINK_FLAGS = "";
+				PRELINK_LIBS = "";
+				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
+				PRIVATE_HEADERS_FOLDER_PATH = "";
+				PRODUCT_NAME = treerewrite;
+				PUBLIC_HEADERS_FOLDER_PATH = "";
+				REZ_SEARCH_PATHS = "";
+				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
+				SDKROOT = macosx10.4;
+				SECTORDER_FLAGS = "";
+				SEPARATE_STRIP = NO;
+				SEPARATE_SYMBOL_EDIT = NO;
+				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
+				SKIP_INSTALL = NO;
+				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
+				STRIPFLAGS = "";
+				STRIP_INSTALLED_PRODUCT = "";
+				STRIP_STYLE = all;
+				SYMROOT = /Users/acondit/Projects/Antlr/Build;
+				TEST_HOST = "";
+				TEST_RIG = "";
+				UNEXPORTED_SYMBOLS_FILE = "";
+				USER_HEADER_SEARCH_PATHS = "";
+				VERSIONING_SYSTEM = "";
+				WARNING_CFLAGS = "";
+				WARNING_LDFLAGS = "";
+				WRAPPER_EXTENSION = "";
+				ZERO_LINK = NO;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1A0F346512EA42D800496BB8 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
+				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
+				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
+				ALTERNATE_PERMISSIONS_FILES = "";
+				ALWAYS_SEARCH_USER_PATHS = YES;
+				ARCHS = (
+					ppc,
+					i386,
+				);
+				BUILD_VARIANTS = normal;
+				BUNDLE_LOADER = "";
+				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
+				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
+				COPYING_PRESERVES_HFS_DATA = NO;
+				COPY_PHASE_STRIP = YES;
+				CURRENT_PROJECT_VERSION = "";
+				DEAD_CODE_STRIPPING = NO;
+				DEBUG_INFORMATION_FORMAT = stabs;
+				DEPLOYMENT_LOCATION = NO;
+				DEPLOYMENT_POSTPROCESSING = NO;
+				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
+				DYLIB_COMPATIBILITY_VERSION = "";
+				DYLIB_CURRENT_VERSION = "";
+				EXECUTABLE_EXTENSION = "";
+				EXECUTABLE_PREFIX = "";
+				EXPORTED_SYMBOLS_FILE = "";
+				FRAMEWORK_SEARCH_PATHS = "";
+				FRAMEWORK_VERSION = A;
+				GCC_ALTIVEC_EXTENSIONS = NO;
+				GCC_AUTO_VECTORIZATION = NO;
+				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
+				GCC_CW_ASM_SYNTAX = YES;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = default;
+				GCC_DYNAMIC_NO_PIC = YES;
+				GCC_ENABLE_ASM_KEYWORD = YES;
+				GCC_ENABLE_CPP_EXCEPTIONS = YES;
+				GCC_ENABLE_CPP_RTTI = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_PASCAL_STRINGS = YES;
+				GCC_ENABLE_SSE3_EXTENSIONS = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = YES;
+				GCC_ENABLE_TRIGRAPHS = NO;
+				GCC_FAST_MATH = NO;
+				GCC_FAST_OBJC_DISPATCH = NO;
+				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
+				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
+				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
+				GCC_INPUT_FILETYPE = automatic;
+				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
+				GCC_MODEL_PPC64 = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_NO_COMMON_BLOCKS = NO;
+				GCC_OBJC_CALL_CXX_CDTORS = NO;
+				GCC_ONE_BYTE_BOOL = NO;
+				GCC_OPTIMIZATION_LEVEL = s;
+				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				GCC_PREPROCESSOR_DEFINITIONS = "";
+				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
+				GCC_REUSE_STRINGS = YES;
+				GCC_SHORT_ENUMS = NO;
+				GCC_STRICT_ALIASING = NO;
+				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
+				GCC_THREADSAFE_STATICS = YES;
+				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
+				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
+				GCC_UNROLL_LOOPS = NO;
+				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
+				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
+				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
+				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
+				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
+				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
+				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
+				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
+				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
+				GCC_WARN_MISSING_PARENTHESES = NO;
+				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
+				GCC_WARN_PEDANTIC = NO;
+				GCC_WARN_SHADOW = YES;
+				GCC_WARN_SIGN_COMPARE = YES;
+				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
+				GCC_WARN_UNINITIALIZED_AUTOS = YES;
+				GCC_WARN_UNKNOWN_PRAGMAS = NO;
+				GCC_WARN_UNUSED_FUNCTION = NO;
+				GCC_WARN_UNUSED_LABEL = NO;
+				GCC_WARN_UNUSED_PARAMETER = YES;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				GENERATE_MASTER_OBJECT_FILE = NO;
+				GENERATE_PKGINFO_FILE = NO;
+				GENERATE_PROFILING_CODE = NO;
+				HEADER_SEARCH_PATHS = "";
+				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
+				INFOPLIST_FILE = "";
+				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
+				INFOPLIST_PREFIX_HEADER = "";
+				INFOPLIST_PREPROCESS = NO;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
+				INIT_ROUTINE = "";
+				INSTALL_GROUP = "$(GROUP)";
+				INSTALL_MODE_FLAG = "a-w,a+rX";
+				INSTALL_OWNER = "$(USER)";
+				INSTALL_PATH = "$(HOME)/bin";
+				KEEP_PRIVATE_EXTERNS = NO;
+				LIBRARY_SEARCH_PATHS = "";
+				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
+				LINKER_DISPLAYS_MANGLED_NAMES = NO;
+				LINK_WITH_STANDARD_LIBRARIES = YES;
+				MACH_O_TYPE = "";
+				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
+				OBJROOT = /Users/kroepke/Projects/Intermediates;
+				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
+				OTHER_CFLAGS = "";
+				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
+				PRELINK_FLAGS = "";
+				PRELINK_LIBS = "";
+				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
+				PRIVATE_HEADERS_FOLDER_PATH = "";
+				PRODUCT_NAME = treerewrite;
+				PUBLIC_HEADERS_FOLDER_PATH = "";
+				REZ_SEARCH_PATHS = "";
+				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
+				SDKROOT = macosx10.4;
+				SECTORDER_FLAGS = "";
+				SEPARATE_STRIP = NO;
+				SEPARATE_SYMBOL_EDIT = NO;
+				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
+				SKIP_INSTALL = NO;
+				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
+				STRIPFLAGS = "";
+				STRIP_INSTALLED_PRODUCT = "";
+				STRIP_STYLE = all;
+				SYMROOT = /Users/kroepke/Projects/Build;
+				TEST_HOST = "";
+				TEST_RIG = "";
+				UNEXPORTED_SYMBOLS_FILE = "";
+				USER_HEADER_SEARCH_PATHS = "";
+				VERSIONING_SYSTEM = "";
+				WARNING_CFLAGS = "";
+				WARNING_LDFLAGS = "";
+				WRAPPER_EXTENSION = "";
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		1A1210FD11D3A5DB00F27B38 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = required;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 1;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRCommonTokenTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRCommonTokenTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = Debug;
+		};
+		1A1210FE11D3A5DB00F27B38 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRCommonTokenTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRCommonTokenTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1A1210FF11D3A5DB00F27B38 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = YES;
+				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRCommonTokenTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRCommonTokenTest;
+				WRAPPER_EXTENSION = octest;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		1A1211D311D3BF4700F27B38 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = required;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 1;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRStringStreamTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRStringStreamTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = Debug;
+		};
+		1A1211D411D3BF4700F27B38 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRStringStreamTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRStringStreamTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1A1211D511D3BF4700F27B38 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = YES;
+				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRStringStreamTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRStringStreamTest;
+				WRAPPER_EXTENSION = octest;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		1A12122611D3C92500F27B38 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = required;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 1;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRFastQueueTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRFastQueueTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = Debug;
+		};
+		1A12122711D3C92500F27B38 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRFastQueueTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRFastQueueTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1A12122811D3C92500F27B38 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = YES;
+				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRFastQueueTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRFastQueueTest;
+				WRAPPER_EXTENSION = octest;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		1A1212DE11D3F53700F27B38 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = required;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 1;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRIntArrayTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRIntArrayTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = Debug;
+		};
+		1A1212DF11D3F53700F27B38 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRIntArrayTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRIntArrayTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1A1212E011D3F53700F27B38 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = YES;
+				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRIntArrayTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRIntArrayTest;
+				WRAPPER_EXTENSION = octest;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		1A12130E11D3F7CE00F27B38 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = required;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 1;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRCommonTreeTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRCommonTreeTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = Debug;
+		};
+		1A12130F11D3F7CE00F27B38 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRCommonTreeTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRCommonTreeTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1A12131011D3F7CE00F27B38 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = YES;
+				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRCommonTreeTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRCommonTreeTest;
+				WRAPPER_EXTENSION = octest;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		1A348B5011D2BEE8000C72FC /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = required;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 1;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "Test-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = Test;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = Debug;
+		};
+		1A348B5111D2BEE8000C72FC /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "Test-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = Test;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1A348B5211D2BEE8000C72FC /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = YES;
+				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "Test-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = Test;
+				WRAPPER_EXTENSION = octest;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		1A348BEE11D2D0A2000C72FC /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = required;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 1;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRBitsetTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRBitsetTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = Debug;
+		};
+		1A348BEF11D2D0A2000C72FC /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRBitsetTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRBitsetTest;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1A348BF011D2D0A2000C72FC /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = YES;
+				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "ANTLRBitsetTest-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = ANTLRBitsetTest;
+				WRAPPER_EXTENSION = octest;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		1A77EE8B12E6A552007F323A /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = required;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 1;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "TreeRewriteRuleTokenStream-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = TreeRewriteRuleTokenStream;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = Debug;
+		};
+		1A77EE8C12E6A552007F323A /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "TreeRewriteRuleTokenStream-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = TreeRewriteRuleTokenStream;
+				WRAPPER_EXTENSION = octest;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1A77EE8D12E6A552007F323A /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				COPY_PHASE_STRIP = YES;
+				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+				FRAMEWORK_SEARCH_PATHS = "$(DEVELOPER_LIBRARY_DIR)/Frameworks";
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/Cocoa.framework/Headers/Cocoa.h";
+				INFOPLIST_FILE = "TreeRewriteRuleTokenStream-Info.plist";
+				INSTALL_PATH = "$(USER_LIBRARY_DIR)/Bundles";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Cocoa,
+					"-framework",
+					SenTestingKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = TreeRewriteRuleTokenStream;
+				WRAPPER_EXTENSION = octest;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		1AC5AC9512E7BE0400DF0C58 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = treeparser;
+			};
+			name = Debug;
+		};
+		1AC5AC9612E7BE0400DF0C58 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = treerewrite;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1AC5AC9712E7BE0400DF0C58 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = treerewrite;
+			};
+			name = Release;
+		};
+		1AC5ACA412E7BEFE00DF0C58 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
+				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
+				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
+				ALTERNATE_PERMISSIONS_FILES = "";
+				ALWAYS_SEARCH_USER_PATHS = YES;
+				ANTLR_DEBUG = YES;
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/treerewrite";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/treerewrite";
+				ARCHS = "$(NATIVE_ARCH)";
+				BUILD_VARIANTS = normal;
+				BUNDLE_LOADER = "";
+				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
+				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
+				COPYING_PRESERVES_HFS_DATA = NO;
+				COPY_PHASE_STRIP = NO;
+				CURRENT_PROJECT_VERSION = "";
+				DEAD_CODE_STRIPPING = NO;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				DEPLOYMENT_LOCATION = NO;
+				DEPLOYMENT_POSTPROCESSING = NO;
+				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
+				DYLIB_COMPATIBILITY_VERSION = "";
+				DYLIB_CURRENT_VERSION = "";
+				EXECUTABLE_EXTENSION = "";
+				EXECUTABLE_PREFIX = "";
+				EXPORTED_SYMBOLS_FILE = "";
+				FRAMEWORK_SEARCH_PATHS = "";
+				FRAMEWORK_VERSION = A;
+				GCC_ALTIVEC_EXTENSIONS = NO;
+				GCC_AUTO_VECTORIZATION = NO;
+				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
+				GCC_CW_ASM_SYNTAX = YES;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = full;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_ASM_KEYWORD = YES;
+				GCC_ENABLE_CPP_EXCEPTIONS = YES;
+				GCC_ENABLE_CPP_RTTI = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = supported;
+				GCC_ENABLE_PASCAL_STRINGS = YES;
+				GCC_ENABLE_SSE3_EXTENSIONS = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_ENABLE_TRIGRAPHS = NO;
+				GCC_FAST_MATH = NO;
+				GCC_FAST_OBJC_DISPATCH = NO;
+				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
+				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
+				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
+				GCC_INPUT_FILETYPE = automatic;
+				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
+				GCC_MODEL_PPC64 = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_NO_COMMON_BLOCKS = NO;
+				GCC_OBJC_CALL_CXX_CDTORS = NO;
+				GCC_ONE_BYTE_BOOL = NO;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				GCC_PREPROCESSOR_DEFINITIONS = "";
+				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
+				GCC_REUSE_STRINGS = YES;
+				GCC_SHORT_ENUMS = NO;
+				GCC_STRICT_ALIASING = NO;
+				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
+				GCC_THREADSAFE_STATICS = YES;
+				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
+				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
+				GCC_UNROLL_LOOPS = NO;
+				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
+				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
+				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
+				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
+				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
+				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
+				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
+				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
+				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
+				GCC_WARN_MISSING_PARENTHESES = NO;
+				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
+				GCC_WARN_PEDANTIC = NO;
+				GCC_WARN_SHADOW = NO;
+				GCC_WARN_SIGN_COMPARE = NO;
+				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
+				GCC_WARN_UNINITIALIZED_AUTOS = NO;
+				GCC_WARN_UNKNOWN_PRAGMAS = NO;
+				GCC_WARN_UNUSED_FUNCTION = NO;
+				GCC_WARN_UNUSED_LABEL = NO;
+				GCC_WARN_UNUSED_PARAMETER = NO;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				GENERATE_MASTER_OBJECT_FILE = NO;
+				GENERATE_PKGINFO_FILE = NO;
+				GENERATE_PROFILING_CODE = NO;
+				HEADER_SEARCH_PATHS = "";
+				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
+				INFOPLIST_FILE = "";
+				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
+				INFOPLIST_PREFIX_HEADER = "";
+				INFOPLIST_PREPROCESS = NO;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
+				INIT_ROUTINE = "";
+				INSTALL_GROUP = "$(GROUP)";
+				INSTALL_MODE_FLAG = "a-w,a+rX";
+				INSTALL_OWNER = "$(USER)";
+				INSTALL_PATH = "$(HOME)/bin";
+				KEEP_PRIVATE_EXTERNS = NO;
+				LIBRARY_SEARCH_PATHS = "";
+				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
+				LINKER_DISPLAYS_MANGLED_NAMES = NO;
+				LINK_WITH_STANDARD_LIBRARIES = YES;
+				MACH_O_TYPE = "";
+				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
+				OBJROOT = Build/Intermediates;
+				ONLY_ACTIVE_ARCH = YES;
+				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
+				OTHER_CFLAGS = "";
+				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
+				PRELINK_FLAGS = "";
+				PRELINK_LIBS = "";
+				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
+				PRIVATE_HEADERS_FOLDER_PATH = "";
+				PRODUCT_NAME = treeparser;
+				PUBLIC_HEADERS_FOLDER_PATH = "";
+				REZ_SEARCH_PATHS = "";
+				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
+				SDKROOT = macosx10.6;
+				SECTORDER_FLAGS = "";
+				SEPARATE_STRIP = NO;
+				SEPARATE_SYMBOL_EDIT = NO;
+				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
+				SKIP_INSTALL = NO;
+				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
+				STRIPFLAGS = "";
+				STRIP_INSTALLED_PRODUCT = "";
+				STRIP_STYLE = all;
+				SYMROOT = Build;
+				TEST_HOST = "";
+				TEST_RIG = "";
+				UNEXPORTED_SYMBOLS_FILE = "";
+				USER_HEADER_SEARCH_PATHS = "";
+				VERSIONING_SYSTEM = "";
+				WARNING_CFLAGS = "";
+				WARNING_LDFLAGS = "";
+				WRAPPER_EXTENSION = "";
+				ZERO_LINK = NO;
+			};
+			name = Debug;
+		};
+		1AC5ACA512E7BEFE00DF0C58 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
+				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
+				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
+				ALTERNATE_PERMISSIONS_FILES = "";
+				ALWAYS_SEARCH_USER_PATHS = YES;
+				ANTLR_DEBUG = YES;
+				ANTLR_EXTRA_JAVA_ARGS = "/Users/acondit/Projects/idea/antlr3/classes:/Users/acondit/Projects/idea/stringtemplate/classes:/Library/Java/Extensions/antlr-3.3.1.jar";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/treerewrite";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/treerewrite";
+				ARCHS = "$(NATIVE_ARCH)";
+				BUILD_VARIANTS = normal;
+				BUNDLE_LOADER = "";
+				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
+				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
+				COPYING_PRESERVES_HFS_DATA = NO;
+				COPY_PHASE_STRIP = NO;
+				CURRENT_PROJECT_VERSION = "";
+				DEAD_CODE_STRIPPING = NO;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				DEPLOYMENT_LOCATION = NO;
+				DEPLOYMENT_POSTPROCESSING = NO;
+				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
+				DYLIB_COMPATIBILITY_VERSION = "";
+				DYLIB_CURRENT_VERSION = "";
+				EXECUTABLE_EXTENSION = "";
+				EXECUTABLE_PREFIX = "";
+				EXPORTED_SYMBOLS_FILE = "";
+				FRAMEWORK_SEARCH_PATHS = "";
+				FRAMEWORK_VERSION = A;
+				GCC_ALTIVEC_EXTENSIONS = NO;
+				GCC_AUTO_VECTORIZATION = NO;
+				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
+				GCC_CW_ASM_SYNTAX = YES;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = full;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_ASM_KEYWORD = YES;
+				GCC_ENABLE_CPP_EXCEPTIONS = YES;
+				GCC_ENABLE_CPP_RTTI = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_PASCAL_STRINGS = YES;
+				GCC_ENABLE_SSE3_EXTENSIONS = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_ENABLE_TRIGRAPHS = NO;
+				GCC_FAST_MATH = NO;
+				GCC_FAST_OBJC_DISPATCH = NO;
+				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
+				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
+				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
+				GCC_INPUT_FILETYPE = automatic;
+				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
+				GCC_MODEL_PPC64 = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_NO_COMMON_BLOCKS = NO;
+				GCC_OBJC_CALL_CXX_CDTORS = NO;
+				GCC_ONE_BYTE_BOOL = NO;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				GCC_PREPROCESSOR_DEFINITIONS = "";
+				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
+				GCC_REUSE_STRINGS = YES;
+				GCC_SHORT_ENUMS = NO;
+				GCC_STRICT_ALIASING = NO;
+				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
+				GCC_THREADSAFE_STATICS = YES;
+				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
+				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
+				GCC_UNROLL_LOOPS = NO;
+				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
+				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
+				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
+				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
+				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
+				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
+				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
+				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
+				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
+				GCC_WARN_MISSING_PARENTHESES = NO;
+				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
+				GCC_WARN_PEDANTIC = NO;
+				GCC_WARN_SHADOW = NO;
+				GCC_WARN_SIGN_COMPARE = NO;
+				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
+				GCC_WARN_UNINITIALIZED_AUTOS = NO;
+				GCC_WARN_UNKNOWN_PRAGMAS = NO;
+				GCC_WARN_UNUSED_FUNCTION = NO;
+				GCC_WARN_UNUSED_LABEL = NO;
+				GCC_WARN_UNUSED_PARAMETER = NO;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				GENERATE_MASTER_OBJECT_FILE = NO;
+				GENERATE_PKGINFO_FILE = NO;
+				GENERATE_PROFILING_CODE = NO;
+				HEADER_SEARCH_PATHS = "";
+				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
+				INFOPLIST_FILE = "";
+				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
+				INFOPLIST_PREFIX_HEADER = "";
+				INFOPLIST_PREPROCESS = NO;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
+				INIT_ROUTINE = "";
+				INSTALL_GROUP = "$(GROUP)";
+				INSTALL_MODE_FLAG = "a-w,a+rX";
+				INSTALL_OWNER = "$(USER)";
+				INSTALL_PATH = "$(HOME)/bin";
+				KEEP_PRIVATE_EXTERNS = NO;
+				LIBRARY_SEARCH_PATHS = "";
+				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
+				LINKER_DISPLAYS_MANGLED_NAMES = NO;
+				LINK_WITH_STANDARD_LIBRARIES = YES;
+				MACH_O_TYPE = "";
+				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
+				OBJROOT = /Users/acondit/Projects/Intermediates;
+				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
+				OTHER_CFLAGS = "";
+				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
+				PRELINK_FLAGS = "";
+				PRELINK_LIBS = "";
+				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
+				PRIVATE_HEADERS_FOLDER_PATH = "";
+				PRODUCT_NAME = treerewrite;
+				PUBLIC_HEADERS_FOLDER_PATH = "";
+				REZ_SEARCH_PATHS = "";
+				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
+				SDKROOT = macosx10.4;
+				SECTORDER_FLAGS = "";
+				SEPARATE_STRIP = NO;
+				SEPARATE_SYMBOL_EDIT = NO;
+				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
+				SKIP_INSTALL = NO;
+				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
+				STRIPFLAGS = "";
+				STRIP_INSTALLED_PRODUCT = "";
+				STRIP_STYLE = all;
+				SYMROOT = /Users/acondit/Projects/Antlr/Build;
+				TEST_HOST = "";
+				TEST_RIG = "";
+				UNEXPORTED_SYMBOLS_FILE = "";
+				USER_HEADER_SEARCH_PATHS = "";
+				VERSIONING_SYSTEM = "";
+				WARNING_CFLAGS = "";
+				WARNING_LDFLAGS = "";
+				WRAPPER_EXTENSION = "";
+				ZERO_LINK = NO;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		1AC5ACA612E7BEFE00DF0C58 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
+				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
+				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
+				ALTERNATE_PERMISSIONS_FILES = "";
+				ALWAYS_SEARCH_USER_PATHS = YES;
+				ARCHS = (
+					ppc,
+					i386,
+				);
+				BUILD_VARIANTS = normal;
+				BUNDLE_LOADER = "";
+				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
+				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
+				COPYING_PRESERVES_HFS_DATA = NO;
+				COPY_PHASE_STRIP = YES;
+				CURRENT_PROJECT_VERSION = "";
+				DEAD_CODE_STRIPPING = NO;
+				DEBUG_INFORMATION_FORMAT = stabs;
+				DEPLOYMENT_LOCATION = NO;
+				DEPLOYMENT_POSTPROCESSING = NO;
+				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
+				DYLIB_COMPATIBILITY_VERSION = "";
+				DYLIB_CURRENT_VERSION = "";
+				EXECUTABLE_EXTENSION = "";
+				EXECUTABLE_PREFIX = "";
+				EXPORTED_SYMBOLS_FILE = "";
+				FRAMEWORK_SEARCH_PATHS = "";
+				FRAMEWORK_VERSION = A;
+				GCC_ALTIVEC_EXTENSIONS = NO;
+				GCC_AUTO_VECTORIZATION = NO;
+				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
+				GCC_CW_ASM_SYNTAX = YES;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = default;
+				GCC_DYNAMIC_NO_PIC = YES;
+				GCC_ENABLE_ASM_KEYWORD = YES;
+				GCC_ENABLE_CPP_EXCEPTIONS = YES;
+				GCC_ENABLE_CPP_RTTI = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_PASCAL_STRINGS = YES;
+				GCC_ENABLE_SSE3_EXTENSIONS = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = YES;
+				GCC_ENABLE_TRIGRAPHS = NO;
+				GCC_FAST_MATH = NO;
+				GCC_FAST_OBJC_DISPATCH = NO;
+				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
+				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
+				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
+				GCC_INPUT_FILETYPE = automatic;
+				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
+				GCC_MODEL_PPC64 = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_NO_COMMON_BLOCKS = NO;
+				GCC_OBJC_CALL_CXX_CDTORS = NO;
+				GCC_ONE_BYTE_BOOL = NO;
+				GCC_OPTIMIZATION_LEVEL = s;
+				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				GCC_PREPROCESSOR_DEFINITIONS = "";
+				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
+				GCC_REUSE_STRINGS = YES;
+				GCC_SHORT_ENUMS = NO;
+				GCC_STRICT_ALIASING = NO;
+				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
+				GCC_THREADSAFE_STATICS = YES;
+				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
+				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
+				GCC_UNROLL_LOOPS = NO;
+				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
+				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
+				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
+				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
+				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
+				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
+				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
+				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
+				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
+				GCC_WARN_MISSING_PARENTHESES = NO;
+				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
+				GCC_WARN_PEDANTIC = NO;
+				GCC_WARN_SHADOW = YES;
+				GCC_WARN_SIGN_COMPARE = YES;
+				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
+				GCC_WARN_UNINITIALIZED_AUTOS = YES;
+				GCC_WARN_UNKNOWN_PRAGMAS = NO;
+				GCC_WARN_UNUSED_FUNCTION = NO;
+				GCC_WARN_UNUSED_LABEL = NO;
+				GCC_WARN_UNUSED_PARAMETER = YES;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				GENERATE_MASTER_OBJECT_FILE = NO;
+				GENERATE_PKGINFO_FILE = NO;
+				GENERATE_PROFILING_CODE = NO;
+				HEADER_SEARCH_PATHS = "";
+				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
+				INFOPLIST_FILE = "";
+				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
+				INFOPLIST_PREFIX_HEADER = "";
+				INFOPLIST_PREPROCESS = NO;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
+				INIT_ROUTINE = "";
+				INSTALL_GROUP = "$(GROUP)";
+				INSTALL_MODE_FLAG = "a-w,a+rX";
+				INSTALL_OWNER = "$(USER)";
+				INSTALL_PATH = "$(HOME)/bin";
+				KEEP_PRIVATE_EXTERNS = NO;
+				LIBRARY_SEARCH_PATHS = "";
+				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
+				LINKER_DISPLAYS_MANGLED_NAMES = NO;
+				LINK_WITH_STANDARD_LIBRARIES = YES;
+				MACH_O_TYPE = "";
+				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
+				OBJROOT = /Users/kroepke/Projects/Intermediates;
+				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
+				OTHER_CFLAGS = "";
+				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
+				PRELINK_FLAGS = "";
+				PRELINK_LIBS = "";
+				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
+				PRIVATE_HEADERS_FOLDER_PATH = "";
+				PRODUCT_NAME = treerewrite;
+				PUBLIC_HEADERS_FOLDER_PATH = "";
+				REZ_SEARCH_PATHS = "";
+				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
+				SDKROOT = macosx10.4;
+				SECTORDER_FLAGS = "";
+				SEPARATE_STRIP = NO;
+				SEPARATE_SYMBOL_EDIT = NO;
+				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
+				SKIP_INSTALL = NO;
+				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
+				STRIPFLAGS = "";
+				STRIP_INSTALLED_PRODUCT = "";
+				STRIP_STYLE = all;
+				SYMROOT = /Users/kroepke/Projects/Build;
+				TEST_HOST = "";
+				TEST_RIG = "";
+				UNEXPORTED_SYMBOLS_FILE = "";
+				USER_HEADER_SEARCH_PATHS = "";
+				VERSIONING_SYSTEM = "";
+				WARNING_CFLAGS = "";
+				WARNING_LDFLAGS = "";
+				WRAPPER_EXTENSION = "";
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		1DEB91AE08733DA50010E9CD /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_JAVA_ARGS = "/Library/Java/Extensions/antlr-2.7.7.jar:/Users/acondit/IdeaProjects/antlr3/out/production/antlr3:/Users/acondit/IdeaProjects/antlr3/out/production/stringtemplate";
+				ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
+				COPY_PHASE_STRIP = NO;
+				CURRENT_PROJECT_VERSION = 1;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				DEPLOYMENT_LOCATION = NO;
+				DYLIB_COMPATIBILITY_VERSION = 1;
+				DYLIB_CURRENT_VERSION = 1;
+				FRAMEWORK_VERSION = A;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = full;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_MODEL_TUNING = G4;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = ANTLR_Prefix.pch;
+				GCC_WARN_UNINITIALIZED_AUTOS = NO;
+				INFOPLIST_FILE = Info.plist;
+				INSTALL_PATH = "$(HOME)/Library/Frameworks";
+				PRODUCT_NAME = ANTLR;
+				VERSIONING_SYSTEM = "apple-generic";
+				WRAPPER_EXTENSION = framework;
+				ZERO_LINK = NO;
+			};
+			name = Debug;
+		};
+		1DEB91AF08733DA50010E9CD /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
+				BUILD_VARIANTS = (
+					normal,
+					debug,
+				);
+				CURRENT_PROJECT_VERSION = 1;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				DYLIB_COMPATIBILITY_VERSION = 1;
+				DYLIB_CURRENT_VERSION = 1;
+				FRAMEWORK_VERSION = A;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = ANTLR_Prefix.pch;
+				INFOPLIST_FILE = Info.plist;
+				INSTALL_PATH = "$(HOME)/Library/Frameworks";
+				PRODUCT_NAME = ANTLR;
+				VERSIONING_SYSTEM = "apple-generic";
+				WRAPPER_EXTENSION = framework;
+			};
+			name = Release;
+		};
+		1DEB91B208733DA50010E9CD /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_ARGS = "";
+				ANTLR_EXTRA_JAVA_ARGS = "";
+				ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = full;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = supported;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_OPTIMIZATION_LEVEL = 1;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
+				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_SHADOW = NO;
+				GCC_WARN_SIGN_COMPARE = NO;
+				GCC_WARN_UNINITIALIZED_AUTOS = YES;
+				GCC_WARN_UNUSED_PARAMETER = NO;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				PREBINDING = NO;
+				SDKROOT = macosx10.6;
+			};
+			name = Debug;
+		};
+		1DEB91B308733DA50010E9CD /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_ARGS = "";
+				ANTLR_EXTRA_JAVA_ARGS = "";
+				ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_SHADOW = YES;
+				GCC_WARN_SIGN_COMPARE = YES;
+				GCC_WARN_UNINITIALIZED_AUTOS = YES;
+				GCC_WARN_UNUSED_PARAMETER = YES;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				PREBINDING = NO;
+				SDKROOT = macosx10.6;
+			};
+			name = Release;
+		};
+		F700E86D0A5FA34D005D0757 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/combined";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/combined";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = YES;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 1;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				ONLY_ACTIVE_ARCH = YES;
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = combined;
+				ZERO_LINK = YES;
+			};
+			name = Debug;
+		};
+		F700E86E0A5FA34D005D0757 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				COPY_PHASE_STRIP = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = combined;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		F700ECDD0A5FE1BF005D0757 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_DEBUG = YES;
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/LL-star";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/LL-star";
+				ANTLR_X_DEBUG_ST = NO;
+				COPY_PHASE_STRIP = NO;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G4;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = "LL-star";
+				ZERO_LINK = NO;
+			};
+			name = Debug;
+		};
+		F700ECDE0A5FE1BF005D0757 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				COPY_PHASE_STRIP = YES;
+				DEBUG_INFORMATION_FORMAT = stabs;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = "LL-star";
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		F7037EB90A05AFEF0070435D /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/lexertest-simple";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/lexertest-simple";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = YES;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_PASCAL_STRINGS = YES;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = Build;
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = "lexertest-simple";
+				ZERO_LINK = NO;
+			};
+			name = Debug;
+		};
+		F7037EBA0A05AFEF0070435D /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				COPY_PHASE_STRIP = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = "lexertest-simple";
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		F72C5E670AB7E4C900282574 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_DEBUG = YES;
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/simpleCTreeParser";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/simpleCTreeParser";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				GCC_WARN_UNINITIALIZED_AUTOS = NO;
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = simplectree;
+				ZERO_LINK = NO;
+			};
+			name = Debug;
+		};
+		F72C5E680AB7E4C900282574 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				COPY_PHASE_STRIP = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = simplectree;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		F76287410B7151B9006AA7EF /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/Users/acondit/source/antlr_src/code/antlr/out/production/antlr3:/Users/acondit/source/antlr_src/code/antlr/out/production/stringtemplate";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				PRODUCT_NAME = Untitled;
+			};
+			name = Debug;
+		};
+		F76287420B7151B9006AA7EF /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_JAVA_ARGS = "ANTLR_EXTRA_JAVA_ARGS = /usr/share/java/antlr-2.7.7.jar:/Users/acondit/source/antlr_src/code/antlr/out/production/antlr3:/Users/acondit/source/antlr_src/code/antlr/out/production/stringtemplate\n";
+				COPY_PHASE_STRIP = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				PRODUCT_NAME = Untitled;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		F76287470B715201006AA7EF /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = fuzzy;
+			};
+			name = Debug;
+		};
+		F76287480B715201006AA7EF /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = fuzzy;
+			};
+			name = Release;
+		};
+		F762877A0B71557E006AA7EF /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = "lexertest-simple";
+			};
+			name = Debug;
+		};
+		F762877B0B71557E006AA7EF /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = fuzzy;
+			};
+			name = Release;
+		};
+		F76287800B71559C006AA7EF /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = combined;
+			};
+			name = Debug;
+		};
+		F76287810B71559C006AA7EF /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = combined;
+			};
+			name = Release;
+		};
+		F76287840B71559F006AA7EF /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = "LL-star";
+			};
+			name = Debug;
+		};
+		F76287850B71559F006AA7EF /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = "LL-star";
+			};
+			name = Release;
+		};
+		F76287880B7155A2006AA7EF /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = hoistedPredicates;
+			};
+			name = Debug;
+		};
+		F76287890B7155A2006AA7EF /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = hoistedPredicates;
+			};
+			name = Release;
+		};
+		F762878C0B7155AB006AA7EF /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = scopes;
+			};
+			name = Debug;
+		};
+		F762878D0B7155AB006AA7EF /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = scopes;
+			};
+			name = Release;
+		};
+		F76287900B7155AF006AA7EF /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				BUILD_SETTING = NO;
+				GRAMMAR_SETTING = NO;
+				PRODUCT_NAME = simplectree;
+			};
+			name = Debug;
+		};
+		F76287910B7155AF006AA7EF /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = "simplec tree";
+			};
+			name = Release;
+		};
+		F79D56700A0E23D600EA3CEE /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/fuzzy";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/fuzzy";
+				ANTLR_TRACE = NO;
+				ANTLR_X_DEBUG_ST = NO;
+				COPY_PHASE_STRIP = NO;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G4;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = fuzzy;
+				ZERO_LINK = NO;
+			};
+			name = Debug;
+		};
+		F79D56710A0E23D600EA3CEE /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				COPY_PHASE_STRIP = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = fuzzy;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		F7C562300CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_ARGS = "";
+				ANTLR_EXTRA_JAVA_ARGS = "";
+				ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = full;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
+				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_SHADOW = NO;
+				GCC_WARN_SIGN_COMPARE = NO;
+				GCC_WARN_UNINITIALIZED_AUTOS = YES;
+				GCC_WARN_UNUSED_PARAMETER = NO;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				PREBINDING = NO;
+				SDKROOT = macosx10.6;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562310CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
+				COPY_PHASE_STRIP = NO;
+				CURRENT_PROJECT_VERSION = 1;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				DEPLOYMENT_LOCATION = NO;
+				DYLIB_COMPATIBILITY_VERSION = 1;
+				DYLIB_CURRENT_VERSION = 1;
+				FRAMEWORK_VERSION = A;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = full;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_MODEL_TUNING = G4;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = ANTLR_Prefix.pch;
+				GCC_WARN_UNINITIALIZED_AUTOS = NO;
+				INFOPLIST_FILE = Info.plist;
+				INSTALL_PATH = "$(HOME)/Library/Frameworks";
+				PRODUCT_NAME = ANTLR;
+				VERSIONING_SYSTEM = "apple-generic";
+				WRAPPER_EXTENSION = framework;
+				ZERO_LINK = NO;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562330CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/Users/acondit/source/antlr_src/code/antlr/out/production/antlr3:/Users/acondit/source/antlr_src/code/antlr/out/production/stringtemplate";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				PRODUCT_NAME = Untitled;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562340CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/lexertest-simple";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/lexertest-simple";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = YES;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_PASCAL_STRINGS = YES;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = "lexertest-simple";
+				ZERO_LINK = NO;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562350CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = "lexertest-simple";
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562360CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/fuzzy";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/fuzzy";
+				ANTLR_TRACE = NO;
+				ANTLR_X_DEBUG_ST = NO;
+				COPY_PHASE_STRIP = NO;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G4;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = fuzzy;
+				ZERO_LINK = NO;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562370CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = fuzzy;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562380CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/combined";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/combined";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = YES;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = combined;
+				ZERO_LINK = YES;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562390CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = combined;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C5623A0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_DEBUG = YES;
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/LL-star";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/LL-star";
+				ANTLR_X_DEBUG_ST = NO;
+				COPY_PHASE_STRIP = NO;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G4;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = "LL-star";
+				ZERO_LINK = NO;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C5623B0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = "LL-star";
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C5623C0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/hoistedPredicates";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/hoistedPredicates";
+				ARCHS = "$(NATIVE_ARCH)";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = YES;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = hoistedPredicates;
+				ZERO_LINK = NO;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C5623D0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = hoistedPredicates;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C5623E0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/scopes";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/scopes";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = YES;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = scopes;
+				ZERO_LINK = NO;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C5623F0CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = scopes;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562400CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_DEBUG = YES;
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/simpleCTreeParser";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/simpleCTreeParser";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				GCC_WARN_UNINITIALIZED_AUTOS = NO;
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = simplectree;
+				ZERO_LINK = NO;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562410CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = "simplec tree";
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562420CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
+				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
+				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
+				ALTERNATE_PERMISSIONS_FILES = "";
+				ALWAYS_SEARCH_USER_PATHS = YES;
+				ANTLR_DEBUG = YES;
+				ANTLR_EXTRA_JAVA_ARGS = "/Users/acondit/Projects/idea/antlr3/classes:/Users/acondit/Projects/idea/stringtemplate/classes:/Library/Java/Extensions/antlr-3.3.1.jar";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/treerewrite";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/treerewrite";
+				ARCHS = "$(NATIVE_ARCH)";
+				BUILD_VARIANTS = normal;
+				BUNDLE_LOADER = "";
+				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
+				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
+				COPYING_PRESERVES_HFS_DATA = NO;
+				COPY_PHASE_STRIP = NO;
+				CURRENT_PROJECT_VERSION = "";
+				DEAD_CODE_STRIPPING = NO;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				DEPLOYMENT_LOCATION = NO;
+				DEPLOYMENT_POSTPROCESSING = NO;
+				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
+				DYLIB_COMPATIBILITY_VERSION = "";
+				DYLIB_CURRENT_VERSION = "";
+				EXECUTABLE_EXTENSION = "";
+				EXECUTABLE_PREFIX = "";
+				EXPORTED_SYMBOLS_FILE = "";
+				FRAMEWORK_SEARCH_PATHS = "";
+				FRAMEWORK_VERSION = A;
+				GCC_ALTIVEC_EXTENSIONS = NO;
+				GCC_AUTO_VECTORIZATION = NO;
+				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
+				GCC_CW_ASM_SYNTAX = YES;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = full;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_ASM_KEYWORD = YES;
+				GCC_ENABLE_CPP_EXCEPTIONS = YES;
+				GCC_ENABLE_CPP_RTTI = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_PASCAL_STRINGS = YES;
+				GCC_ENABLE_SSE3_EXTENSIONS = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_ENABLE_TRIGRAPHS = NO;
+				GCC_FAST_MATH = NO;
+				GCC_FAST_OBJC_DISPATCH = NO;
+				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
+				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
+				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
+				GCC_INPUT_FILETYPE = automatic;
+				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
+				GCC_MODEL_PPC64 = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_NO_COMMON_BLOCKS = NO;
+				GCC_OBJC_CALL_CXX_CDTORS = NO;
+				GCC_ONE_BYTE_BOOL = NO;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				GCC_PREPROCESSOR_DEFINITIONS = "";
+				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
+				GCC_REUSE_STRINGS = YES;
+				GCC_SHORT_ENUMS = NO;
+				GCC_STRICT_ALIASING = NO;
+				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
+				GCC_THREADSAFE_STATICS = YES;
+				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
+				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
+				GCC_UNROLL_LOOPS = NO;
+				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
+				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
+				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
+				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
+				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
+				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
+				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
+				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
+				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
+				GCC_WARN_MISSING_PARENTHESES = NO;
+				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
+				GCC_WARN_PEDANTIC = NO;
+				GCC_WARN_SHADOW = NO;
+				GCC_WARN_SIGN_COMPARE = NO;
+				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
+				GCC_WARN_UNINITIALIZED_AUTOS = NO;
+				GCC_WARN_UNKNOWN_PRAGMAS = NO;
+				GCC_WARN_UNUSED_FUNCTION = NO;
+				GCC_WARN_UNUSED_LABEL = NO;
+				GCC_WARN_UNUSED_PARAMETER = NO;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				GENERATE_MASTER_OBJECT_FILE = NO;
+				GENERATE_PKGINFO_FILE = NO;
+				GENERATE_PROFILING_CODE = NO;
+				HEADER_SEARCH_PATHS = "";
+				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
+				INFOPLIST_FILE = "";
+				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
+				INFOPLIST_PREFIX_HEADER = "";
+				INFOPLIST_PREPROCESS = NO;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
+				INIT_ROUTINE = "";
+				INSTALL_GROUP = "$(GROUP)";
+				INSTALL_MODE_FLAG = "a-w,a+rX";
+				INSTALL_OWNER = "$(USER)";
+				INSTALL_PATH = "$(HOME)/bin";
+				KEEP_PRIVATE_EXTERNS = NO;
+				LIBRARY_SEARCH_PATHS = "";
+				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
+				LINKER_DISPLAYS_MANGLED_NAMES = NO;
+				LINK_WITH_STANDARD_LIBRARIES = YES;
+				MACH_O_TYPE = "";
+				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
+				OBJROOT = /Users/acondit/Projects/Intermediates;
+				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
+				OTHER_CFLAGS = "";
+				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
+				PRELINK_FLAGS = "";
+				PRELINK_LIBS = "";
+				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
+				PRIVATE_HEADERS_FOLDER_PATH = "";
+				PRODUCT_NAME = treerewrite;
+				PUBLIC_HEADERS_FOLDER_PATH = "";
+				REZ_SEARCH_PATHS = "";
+				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
+				SDKROOT = macosx10.4;
+				SECTORDER_FLAGS = "";
+				SEPARATE_STRIP = NO;
+				SEPARATE_SYMBOL_EDIT = NO;
+				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
+				SKIP_INSTALL = NO;
+				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
+				STRIPFLAGS = "";
+				STRIP_INSTALLED_PRODUCT = "";
+				STRIP_STYLE = all;
+				SYMROOT = /Users/acondit/Projects/Antlr/Build;
+				TEST_HOST = "";
+				TEST_RIG = "";
+				UNEXPORTED_SYMBOLS_FILE = "";
+				USER_HEADER_SEARCH_PATHS = "";
+				VERSIONING_SYSTEM = "";
+				WARNING_CFLAGS = "";
+				WARNING_LDFLAGS = "";
+				WRAPPER_EXTENSION = "";
+				ZERO_LINK = NO;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7C562430CD513D400727DB0 /* Debug with StringTemplate Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = treerewrite;
+			};
+			name = "Debug with StringTemplate Debug";
+		};
+		F7CD47630C64D23800FF933A /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = treerewrite;
+			};
+			name = Debug;
+		};
+		F7CD47640C64D23800FF933A /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				PRODUCT_NAME = treerewrite;
+			};
+			name = Release;
+		};
+		F7CD477D0C64D27000FF933A /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
+				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
+				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
+				ALTERNATE_PERMISSIONS_FILES = "";
+				ALWAYS_SEARCH_USER_PATHS = YES;
+				ANTLR_DEBUG = YES;
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/treerewrite";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/treerewrite";
+				ARCHS = "$(NATIVE_ARCH)";
+				BUILD_VARIANTS = normal;
+				BUNDLE_LOADER = "";
+				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
+				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
+				COPYING_PRESERVES_HFS_DATA = NO;
+				COPY_PHASE_STRIP = NO;
+				CURRENT_PROJECT_VERSION = "";
+				DEAD_CODE_STRIPPING = NO;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				DEPLOYMENT_LOCATION = NO;
+				DEPLOYMENT_POSTPROCESSING = NO;
+				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
+				DYLIB_COMPATIBILITY_VERSION = "";
+				DYLIB_CURRENT_VERSION = "";
+				EXECUTABLE_EXTENSION = "";
+				EXECUTABLE_PREFIX = "";
+				EXPORTED_SYMBOLS_FILE = "";
+				FRAMEWORK_SEARCH_PATHS = "";
+				FRAMEWORK_VERSION = A;
+				GCC_ALTIVEC_EXTENSIONS = NO;
+				GCC_AUTO_VECTORIZATION = NO;
+				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
+				GCC_CW_ASM_SYNTAX = YES;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = full;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_ASM_KEYWORD = YES;
+				GCC_ENABLE_CPP_EXCEPTIONS = YES;
+				GCC_ENABLE_CPP_RTTI = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_OBJC_GC = supported;
+				GCC_ENABLE_PASCAL_STRINGS = YES;
+				GCC_ENABLE_SSE3_EXTENSIONS = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_ENABLE_TRIGRAPHS = NO;
+				GCC_FAST_MATH = NO;
+				GCC_FAST_OBJC_DISPATCH = NO;
+				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
+				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
+				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
+				GCC_INPUT_FILETYPE = automatic;
+				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
+				GCC_MODEL_PPC64 = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_NO_COMMON_BLOCKS = NO;
+				GCC_OBJC_CALL_CXX_CDTORS = NO;
+				GCC_ONE_BYTE_BOOL = NO;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				GCC_PREPROCESSOR_DEFINITIONS = "";
+				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
+				GCC_REUSE_STRINGS = YES;
+				GCC_SHORT_ENUMS = NO;
+				GCC_STRICT_ALIASING = NO;
+				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
+				GCC_THREADSAFE_STATICS = YES;
+				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
+				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
+				GCC_UNROLL_LOOPS = NO;
+				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
+				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
+				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = NO;
+				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
+				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
+				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
+				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
+				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
+				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
+				GCC_WARN_MISSING_PARENTHESES = NO;
+				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
+				GCC_WARN_PEDANTIC = NO;
+				GCC_WARN_SHADOW = NO;
+				GCC_WARN_SIGN_COMPARE = NO;
+				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
+				GCC_WARN_UNINITIALIZED_AUTOS = NO;
+				GCC_WARN_UNKNOWN_PRAGMAS = NO;
+				GCC_WARN_UNUSED_FUNCTION = NO;
+				GCC_WARN_UNUSED_LABEL = NO;
+				GCC_WARN_UNUSED_PARAMETER = NO;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				GENERATE_MASTER_OBJECT_FILE = NO;
+				GENERATE_PKGINFO_FILE = NO;
+				GENERATE_PROFILING_CODE = NO;
+				HEADER_SEARCH_PATHS = "";
+				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
+				INFOPLIST_FILE = "";
+				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
+				INFOPLIST_PREFIX_HEADER = "";
+				INFOPLIST_PREPROCESS = NO;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
+				INIT_ROUTINE = "";
+				INSTALL_GROUP = "$(GROUP)";
+				INSTALL_MODE_FLAG = "a-w,a+rX";
+				INSTALL_OWNER = "$(USER)";
+				INSTALL_PATH = "$(HOME)/bin";
+				KEEP_PRIVATE_EXTERNS = NO;
+				LIBRARY_SEARCH_PATHS = "";
+				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
+				LINKER_DISPLAYS_MANGLED_NAMES = NO;
+				LINK_WITH_STANDARD_LIBRARIES = YES;
+				MACH_O_TYPE = "";
+				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
+				OBJROOT = Build/Intermediates;
+				ONLY_ACTIVE_ARCH = YES;
+				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
+				OTHER_CFLAGS = "";
+				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
+				PRELINK_FLAGS = "";
+				PRELINK_LIBS = "";
+				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
+				PRIVATE_HEADERS_FOLDER_PATH = "";
+				PRODUCT_NAME = treerewrite;
+				PUBLIC_HEADERS_FOLDER_PATH = "";
+				REZ_SEARCH_PATHS = "";
+				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
+				SDKROOT = macosx10.6;
+				SECTORDER_FLAGS = "";
+				SEPARATE_STRIP = NO;
+				SEPARATE_SYMBOL_EDIT = NO;
+				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
+				SKIP_INSTALL = NO;
+				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
+				STRIPFLAGS = "";
+				STRIP_INSTALLED_PRODUCT = "";
+				STRIP_STYLE = all;
+				SYMROOT = Build;
+				TEST_HOST = "";
+				TEST_RIG = "";
+				UNEXPORTED_SYMBOLS_FILE = "";
+				USER_HEADER_SEARCH_PATHS = "";
+				VERSIONING_SYSTEM = "";
+				WARNING_CFLAGS = "";
+				WARNING_LDFLAGS = "";
+				WRAPPER_EXTENSION = "";
+				ZERO_LINK = NO;
+			};
+			name = Debug;
+		};
+		F7CD477E0C64D27000FF933A /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALTERNATE_GROUP = "$(INSTALL_GROUP)";
+				ALTERNATE_MODE = "$(INSTALL_MODE_FLAG)";
+				ALTERNATE_OWNER = "$(INSTALL_OWNER)";
+				ALTERNATE_PERMISSIONS_FILES = "";
+				ALWAYS_SEARCH_USER_PATHS = YES;
+				ARCHS = (
+					ppc,
+					i386,
+				);
+				BUILD_VARIANTS = normal;
+				BUNDLE_LOADER = "";
+				CONFIGURATION_BUILD_DIR = "$(BUILD_DIR)/$(CONFIGURATION)";
+				CONFIGURATION_TEMP_DIR = "$(PROJECT_TEMP_DIR)/$(CONFIGURATION)";
+				COPYING_PRESERVES_HFS_DATA = NO;
+				COPY_PHASE_STRIP = YES;
+				CURRENT_PROJECT_VERSION = "";
+				DEAD_CODE_STRIPPING = NO;
+				DEBUG_INFORMATION_FORMAT = stabs;
+				DEPLOYMENT_LOCATION = NO;
+				DEPLOYMENT_POSTPROCESSING = NO;
+				DSTROOT = "/tmp/$(PROJECT_NAME).dst";
+				DYLIB_COMPATIBILITY_VERSION = "";
+				DYLIB_CURRENT_VERSION = "";
+				EXECUTABLE_EXTENSION = "";
+				EXECUTABLE_PREFIX = "";
+				EXPORTED_SYMBOLS_FILE = "";
+				FRAMEWORK_SEARCH_PATHS = "";
+				FRAMEWORK_VERSION = A;
+				GCC_ALTIVEC_EXTENSIONS = NO;
+				GCC_AUTO_VECTORIZATION = NO;
+				GCC_CHAR_IS_UNSIGNED_CHAR = NO;
+				GCC_CW_ASM_SYNTAX = YES;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DEBUGGING_SYMBOLS = default;
+				GCC_DYNAMIC_NO_PIC = YES;
+				GCC_ENABLE_ASM_KEYWORD = YES;
+				GCC_ENABLE_CPP_EXCEPTIONS = YES;
+				GCC_ENABLE_CPP_RTTI = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+				GCC_ENABLE_PASCAL_STRINGS = YES;
+				GCC_ENABLE_SSE3_EXTENSIONS = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = YES;
+				GCC_ENABLE_TRIGRAPHS = NO;
+				GCC_FAST_MATH = NO;
+				GCC_FAST_OBJC_DISPATCH = NO;
+				GCC_FEEDBACK_DIRECTED_OPTIMIZATION = Off;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				GCC_GENERATE_TEST_COVERAGE_FILES = NO;
+				GCC_INCREASE_PRECOMPILED_HEADER_SHARING = NO;
+				GCC_INLINES_ARE_PRIVATE_EXTERN = YES;
+				GCC_INPUT_FILETYPE = automatic;
+				GCC_INSTRUMENT_PROGRAM_FLOW_ARCS = NO;
+				GCC_MODEL_PPC64 = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_NO_COMMON_BLOCKS = NO;
+				GCC_OBJC_CALL_CXX_CDTORS = NO;
+				GCC_ONE_BYTE_BOOL = NO;
+				GCC_OPTIMIZATION_LEVEL = s;
+				GCC_PFE_FILE_C_DIALECTS = "c objective-c c++ objective-c++";
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				GCC_PREPROCESSOR_DEFINITIONS = "";
+				GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS = "";
+				GCC_REUSE_STRINGS = YES;
+				GCC_SHORT_ENUMS = NO;
+				GCC_STRICT_ALIASING = NO;
+				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
+				GCC_THREADSAFE_STATICS = YES;
+				GCC_TREAT_NONCONFORMANT_CODE_ERRORS_AS_WARNINGS = NO;
+				GCC_TREAT_WARNINGS_AS_ERRORS = NO;
+				GCC_UNROLL_LOOPS = NO;
+				GCC_USE_GCC3_PFE_SUPPORT = "$(USE_GCC3_PFE_SUPPORT)";
+				GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
+				GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = YES;
+				GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
+				GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
+				GCC_WARN_ABOUT_POINTER_SIGNEDNESS = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES;
+				GCC_WARN_ALLOW_INCOMPLETE_PROTOCOL = YES;
+				GCC_WARN_CHECK_SWITCH_STATEMENTS = YES;
+				GCC_WARN_EFFECTIVE_CPLUSPLUS_VIOLATIONS = NO;
+				GCC_WARN_FOUR_CHARACTER_CONSTANTS = NO;
+				GCC_WARN_HIDDEN_VIRTUAL_FUNCTIONS = NO;
+				GCC_WARN_INHIBIT_ALL_WARNINGS = NO;
+				GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = NO;
+				GCC_WARN_MISSING_PARENTHESES = NO;
+				GCC_WARN_NON_VIRTUAL_DESTRUCTOR = NO;
+				GCC_WARN_PEDANTIC = NO;
+				GCC_WARN_SHADOW = YES;
+				GCC_WARN_SIGN_COMPARE = YES;
+				GCC_WARN_TYPECHECK_CALLS_TO_PRINTF = NO;
+				GCC_WARN_UNINITIALIZED_AUTOS = YES;
+				GCC_WARN_UNKNOWN_PRAGMAS = NO;
+				GCC_WARN_UNUSED_FUNCTION = NO;
+				GCC_WARN_UNUSED_LABEL = NO;
+				GCC_WARN_UNUSED_PARAMETER = YES;
+				GCC_WARN_UNUSED_VALUE = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				GENERATE_MASTER_OBJECT_FILE = NO;
+				GENERATE_PKGINFO_FILE = NO;
+				GENERATE_PROFILING_CODE = NO;
+				HEADER_SEARCH_PATHS = "";
+				INFOPLIST_EXPAND_BUILD_SETTINGS = YES;
+				INFOPLIST_FILE = "";
+				INFOPLIST_OTHER_PREPROCESSOR_FLAGS = "";
+				INFOPLIST_PREFIX_HEADER = "";
+				INFOPLIST_PREPROCESS = NO;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = "";
+				INIT_ROUTINE = "";
+				INSTALL_GROUP = "$(GROUP)";
+				INSTALL_MODE_FLAG = "a-w,a+rX";
+				INSTALL_OWNER = "$(USER)";
+				INSTALL_PATH = "$(HOME)/bin";
+				KEEP_PRIVATE_EXTERNS = NO;
+				LIBRARY_SEARCH_PATHS = "";
+				LINKER_DISPLAYS_FILES_FOR_UNDEFINED_SYMBOLS = YES;
+				LINKER_DISPLAYS_MANGLED_NAMES = NO;
+				LINK_WITH_STANDARD_LIBRARIES = YES;
+				MACH_O_TYPE = "";
+				MACOSX_DEPLOYMENT_TARGET = "$(inherited)";
+				OBJROOT = /Users/kroepke/Projects/Intermediates;
+				ONLY_LINK_ESSENTIAL_SYMBOLS = NO;
+				OTHER_CFLAGS = "";
+				OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRECOMPS_INCLUDE_HEADERS_FROM_BUILT_PRODUCTS_DIR = YES;
+				PRELINK_FLAGS = "";
+				PRELINK_LIBS = "";
+				PRESERVE_DEAD_CODE_INITS_AND_TERMS = NO;
+				PRIVATE_HEADERS_FOLDER_PATH = "";
+				PRODUCT_NAME = treerewrite;
+				PUBLIC_HEADERS_FOLDER_PATH = "";
+				REZ_SEARCH_PATHS = "";
+				SCAN_ALL_SOURCE_FILES_FOR_INCLUDES = NO;
+				SDKROOT = macosx10.4;
+				SECTORDER_FLAGS = "";
+				SEPARATE_STRIP = NO;
+				SEPARATE_SYMBOL_EDIT = NO;
+				SHARED_PRECOMPS_DIR = "$(CACHE_ROOT)/SharedPrecompiledHeaders";
+				SKIP_INSTALL = NO;
+				STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
+				STRIPFLAGS = "";
+				STRIP_INSTALLED_PRODUCT = "";
+				STRIP_STYLE = all;
+				SYMROOT = /Users/kroepke/Projects/Build;
+				TEST_HOST = "";
+				TEST_RIG = "";
+				UNEXPORTED_SYMBOLS_FILE = "";
+				USER_HEADER_SEARCH_PATHS = "";
+				VERSIONING_SYSTEM = "";
+				WARNING_CFLAGS = "";
+				WARNING_LDFLAGS = "";
+				WRAPPER_EXTENSION = "";
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		F7DD05F10A7B1640006A006C /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate\n";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/hoistedPredicates";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/hoistedPredicates";
+				ARCHS = "$(NATIVE_ARCH)";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = YES;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = hoistedPredicates;
+				ZERO_LINK = NO;
+			};
+			name = Debug;
+		};
+		F7DD05F20A7B1640006A006C /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ARCHS = "$(NATIVE_ARCH)";
+				COPY_PHASE_STRIP = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_ENABLE_SYMBOL_SEPARATION = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = hoistedPredicates;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+		F7DD077A0A7B6682006A006C /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ANTLR_EXTRA_JAVA_ARGS = "/usr/share/java/antlr-2.7.7.jar:/usr/share/java/antlr3/out/production/antlr3:/usr/share/java/antlr3/out/production/stringtemplate";
+				ANTLR_FORCE_OUT_DIR = "$(PROJECT_DIR)/examples/scopes";
+				ANTLR_LIB_DIR = "$(PROJECT_DIR)/examples/scopes";
+				COPY_PHASE_STRIP = NO;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_ENABLE_FIX_AND_CONTINUE = YES;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_MODEL_TUNING = G5;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = scopes;
+				ZERO_LINK = NO;
+			};
+			name = Debug;
+		};
+		F7DD077B0A7B6682006A006C /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				COPY_PHASE_STRIP = YES;
+				GCC_ENABLE_FIX_AND_CONTINUE = NO;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = NO;
+				GCC_MODEL_TUNING = G5;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "$(SYSTEM_LIBRARY_DIR)/Frameworks/AppKit.framework/Headers/AppKit.h";
+				INSTALL_PATH = "$(HOME)/bin";
+				OTHER_LDFLAGS = (
+					"-framework",
+					Foundation,
+					"-framework",
+					AppKit,
+				);
+				PREBINDING = NO;
+				PRODUCT_NAME = scopes;
+				ZERO_LINK = NO;
+			};
+			name = Release;
+		};
+/* End XCBuildConfiguration section */
+
+/* Begin XCConfigurationList section */
+		1A0F343C12EA425700496BB8 /* Build configuration list for PBXLegacyTarget "Regenerate polydiff" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1A0F343D12EA425700496BB8 /* Debug */,
+				1A0F343E12EA425700496BB8 /* Debug with StringTemplate Debug */,
+				1A0F343F12EA425700496BB8 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1A0F346212EA42D800496BB8 /* Build configuration list for PBXNativeTarget "polydiff" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1A0F346312EA42D800496BB8 /* Debug */,
+				1A0F346412EA42D800496BB8 /* Debug with StringTemplate Debug */,
+				1A0F346512EA42D800496BB8 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1A12110011D3A5DB00F27B38 /* Build configuration list for PBXNativeTarget "ANTLRCommonTokenTest" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1A1210FD11D3A5DB00F27B38 /* Debug */,
+				1A1210FE11D3A5DB00F27B38 /* Debug with StringTemplate Debug */,
+				1A1210FF11D3A5DB00F27B38 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1A1211D611D3BF4800F27B38 /* Build configuration list for PBXNativeTarget "ANTLRStringStreamTest" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1A1211D311D3BF4700F27B38 /* Debug */,
+				1A1211D411D3BF4700F27B38 /* Debug with StringTemplate Debug */,
+				1A1211D511D3BF4700F27B38 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1A12122911D3C92500F27B38 /* Build configuration list for PBXNativeTarget "ANTLRFastQueueTest" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1A12122611D3C92500F27B38 /* Debug */,
+				1A12122711D3C92500F27B38 /* Debug with StringTemplate Debug */,
+				1A12122811D3C92500F27B38 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1A1212E111D3F53700F27B38 /* Build configuration list for PBXNativeTarget "ANTLRIntArrayTest" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1A1212DE11D3F53700F27B38 /* Debug */,
+				1A1212DF11D3F53700F27B38 /* Debug with StringTemplate Debug */,
+				1A1212E011D3F53700F27B38 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1A12131111D3F7CE00F27B38 /* Build configuration list for PBXNativeTarget "ANTLRCommonTreeTest" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1A12130E11D3F7CE00F27B38 /* Debug */,
+				1A12130F11D3F7CE00F27B38 /* Debug with StringTemplate Debug */,
+				1A12131011D3F7CE00F27B38 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1A348B5311D2BEE9000C72FC /* Build configuration list for PBXNativeTarget "Test" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1A348B5011D2BEE8000C72FC /* Debug */,
+				1A348B5111D2BEE8000C72FC /* Debug with StringTemplate Debug */,
+				1A348B5211D2BEE8000C72FC /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1A348BF111D2D0A2000C72FC /* Build configuration list for PBXNativeTarget "ANTLRBitsetTest" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1A348BEE11D2D0A2000C72FC /* Debug */,
+				1A348BEF11D2D0A2000C72FC /* Debug with StringTemplate Debug */,
+				1A348BF011D2D0A2000C72FC /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1A77EE8E12E6A553007F323A /* Build configuration list for PBXNativeTarget "TreeRewriteRuleTokenStream" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1A77EE8B12E6A552007F323A /* Debug */,
+				1A77EE8C12E6A552007F323A /* Debug with StringTemplate Debug */,
+				1A77EE8D12E6A552007F323A /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1AC5AC9412E7BE0400DF0C58 /* Build configuration list for PBXLegacyTarget "Regenerate treeparser" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1AC5AC9512E7BE0400DF0C58 /* Debug */,
+				1AC5AC9612E7BE0400DF0C58 /* Debug with StringTemplate Debug */,
+				1AC5AC9712E7BE0400DF0C58 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1AC5ACA312E7BEFE00DF0C58 /* Build configuration list for PBXNativeTarget "treeparser" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1AC5ACA412E7BEFE00DF0C58 /* Debug */,
+				1AC5ACA512E7BEFE00DF0C58 /* Debug with StringTemplate Debug */,
+				1AC5ACA612E7BEFE00DF0C58 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1DEB91AD08733DA50010E9CD /* Build configuration list for PBXNativeTarget "ANTLR" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1DEB91AE08733DA50010E9CD /* Debug */,
+				F7C562310CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				1DEB91AF08733DA50010E9CD /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		1DEB91B108733DA50010E9CD /* Build configuration list for PBXProject "ANTLR" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				1DEB91B208733DA50010E9CD /* Debug */,
+				F7C562300CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				1DEB91B308733DA50010E9CD /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F700E86C0A5FA34D005D0757 /* Build configuration list for PBXNativeTarget "combined" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F700E86D0A5FA34D005D0757 /* Debug */,
+				F7C562380CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F700E86E0A5FA34D005D0757 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F700ECDC0A5FE1BF005D0757 /* Build configuration list for PBXNativeTarget "LL-star" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F700ECDD0A5FE1BF005D0757 /* Debug */,
+				F7C5623A0CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F700ECDE0A5FE1BF005D0757 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F7037EB80A05AFEF0070435D /* Build configuration list for PBXNativeTarget "lexertest-simple" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F7037EB90A05AFEF0070435D /* Debug */,
+				F7C562340CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F7037EBA0A05AFEF0070435D /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F72C5E660AB7E4C900282574 /* Build configuration list for PBXNativeTarget "simplectree" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F72C5E670AB7E4C900282574 /* Debug */,
+				F7C562400CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F72C5E680AB7E4C900282574 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F76287400B7151B9006AA7EF /* Build configuration list for PBXAggregateTarget "Regenerate all examples" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F76287410B7151B9006AA7EF /* Debug */,
+				F7C562330CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F76287420B7151B9006AA7EF /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F76287460B715201006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate fuzzy" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F76287470B715201006AA7EF /* Debug */,
+				F7C562370CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F76287480B715201006AA7EF /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F76287790B71557E006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate lexertest-simple" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F762877A0B71557E006AA7EF /* Debug */,
+				F7C562350CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F762877B0B71557E006AA7EF /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F762877F0B71559C006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate combined" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F76287800B71559C006AA7EF /* Debug */,
+				F7C562390CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F76287810B71559C006AA7EF /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F76287830B71559F006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate LL-star" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F76287840B71559F006AA7EF /* Debug */,
+				F7C5623B0CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F76287850B71559F006AA7EF /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F76287870B7155A2006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate hoistedPredicates" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F76287880B7155A2006AA7EF /* Debug */,
+				F7C5623D0CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F76287890B7155A2006AA7EF /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F762878B0B7155AB006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate scopes" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F762878C0B7155AB006AA7EF /* Debug */,
+				F7C5623F0CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F762878D0B7155AB006AA7EF /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F762878F0B7155AF006AA7EF /* Build configuration list for PBXLegacyTarget "Regenerate simplectree" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F76287900B7155AF006AA7EF /* Debug */,
+				F7C562410CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F76287910B7155AF006AA7EF /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F79D566F0A0E23D600EA3CEE /* Build configuration list for PBXNativeTarget "fuzzy" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F79D56700A0E23D600EA3CEE /* Debug */,
+				F7C562360CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F79D56710A0E23D600EA3CEE /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F7CD47620C64D23800FF933A /* Build configuration list for PBXLegacyTarget "Regenerate treerewrite" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F7CD47630C64D23800FF933A /* Debug */,
+				F7C562430CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F7CD47640C64D23800FF933A /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F7CD477C0C64D27000FF933A /* Build configuration list for PBXNativeTarget "treerewrite" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F7CD477D0C64D27000FF933A /* Debug */,
+				F7C562420CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F7CD477E0C64D27000FF933A /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F7DD05F00A7B1640006A006C /* Build configuration list for PBXNativeTarget "hoistedPredicates" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F7DD05F10A7B1640006A006C /* Debug */,
+				F7C5623C0CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F7DD05F20A7B1640006A006C /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+		F7DD07790A7B6682006A006C /* Build configuration list for PBXNativeTarget "scopes" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				F7DD077A0A7B6682006A006C /* Debug */,
+				F7C5623E0CD513D400727DB0 /* Debug with StringTemplate Debug */,
+				F7DD077B0A7B6682006A006C /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Debug;
+		};
+/* End XCConfigurationList section */
+	};
+	rootObject = 0867D690FE84028FC02AAC07 /* Project object */;
+}
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/.DS_Store b/runtime/ObjC/Framework/ANTLR/.DS_Store
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/.DS_Store
rename to runtime/ObjC/Framework/ANTLR/.DS_Store
Binary files differ
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/contents.xcworkspacedata
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/contents.xcworkspacedata
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/contents.xcworkspacedata
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/xcuserdata/acondit.xcuserdatad/WorkspaceSettings.xcsettings b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/xcuserdata/acondit.xcuserdatad/WorkspaceSettings.xcsettings
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/xcuserdata/acondit.xcuserdatad/WorkspaceSettings.xcsettings
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/xcuserdata/acondit.xcuserdatad/WorkspaceSettings.xcsettings
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/xcuserdata/acondit.xcuserdatad/xcdebugger/Expressions.xcexplist b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/xcuserdata/acondit.xcuserdatad/xcdebugger/Expressions.xcexplist
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/xcuserdata/acondit.xcuserdatad/xcdebugger/Expressions.xcexplist
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/project.xcworkspace/xcuserdata/acondit.xcuserdatad/xcdebugger/Expressions.xcexplist
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/ANTLRTests.xcscheme b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/ANTLRTests.xcscheme
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/ANTLRTests.xcscheme
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/ANTLRTests.xcscheme
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/combined.xcscheme b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/combined.xcscheme
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/combined.xcscheme
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/combined.xcscheme
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/hoistedPredicates.xcscheme b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/hoistedPredicates.xcscheme
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/hoistedPredicates.xcscheme
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/hoistedPredicates.xcscheme
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/lexertest-simple.xcscheme b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/lexertest-simple.xcscheme
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/lexertest-simple.xcscheme
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/lexertest-simple.xcscheme
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/polydiff.xcscheme b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/polydiff.xcscheme
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/polydiff.xcscheme
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/polydiff.xcscheme
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/scopes.xcscheme b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/scopes.xcscheme
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/scopes.xcscheme
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/scopes.xcscheme
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/simplecTreeParser.xcscheme b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/simplecTreeParser.xcscheme
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/simplecTreeParser.xcscheme
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/simplecTreeParser.xcscheme
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/treeparser.xcscheme b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/treeparser.xcscheme
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/treeparser.xcscheme
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/treeparser.xcscheme
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/treerewrite.xcscheme b/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/treerewrite.xcscheme
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/treerewrite.xcscheme
rename to runtime/ObjC/Framework/ANTLR/ANTLR.xcodeproj/xcuserdata/acondit.xcuserdatad/xcschemes/treerewrite.xcscheme
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR/ANTLR-Info.plist b/runtime/ObjC/Framework/ANTLR/ANTLR/ANTLR-Info.plist
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR/ANTLR-Info.plist
rename to runtime/ObjC/Framework/ANTLR/ANTLR/ANTLR-Info.plist
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR/ANTLR-Prefix.pch b/runtime/ObjC/Framework/ANTLR/ANTLR/ANTLR-Prefix.pch
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR/ANTLR-Prefix.pch
rename to runtime/ObjC/Framework/ANTLR/ANTLR/ANTLR-Prefix.pch
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR/en.lproj/InfoPlist.strings b/runtime/ObjC/Framework/ANTLR/ANTLR/en.lproj/InfoPlist.strings
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLR/en.lproj/InfoPlist.strings
rename to runtime/ObjC/Framework/ANTLR/ANTLR/en.lproj/InfoPlist.strings
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/en.lproj/InfoPlist.strings b/runtime/ObjC/Framework/ANTLR/ANTLRTests/en.lproj/InfoPlist.strings
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR/ANTLRTests/en.lproj/InfoPlist.strings
rename to runtime/ObjC/Framework/ANTLR/ANTLRTests/en.lproj/InfoPlist.strings
diff --git a/runtime/ObjC/Framework/ANTLRBitSet.h b/runtime/ObjC/Framework/ANTLRBitSet.h
new file mode 100644
index 0000000..140e94d
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLRBitSet.h
@@ -0,0 +1,95 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+#import <CoreFoundation/CoreFoundation.h>
+#import "Token.h"
+#import "AMutableArray.h"
+
+#define BITS (sizeof(NSUInteger) * 8)
+#define LOG_BITS ((sizeof(NSUInteger)==8)?6:5)
+
+// A simple wrapper around CoreFoundation bit vectors to shield the rest of the implementation
+// from the specifics of the BitVector initialization and query functions.
+// This is fast, so there is no need to reinvent the wheel just yet.
+
+@interface ANTLRBitSet : NSObject < NSMutableCopying > {
+	__strong CFMutableBitVectorRef bitVector;
+}
+
+#pragma mark Class Methods
+
++ (ANTLRBitSet *) newBitSet;
++ (ANTLRBitSet *) newBitSetWithType:(TokenType)type;
+/** Construct a ANTLRBitSet given the size
+ * @param nbits The size of the ANTLRBitSet in bits
+ */
++ (ANTLRBitSet *) newBitSetWithNBits:(NSUInteger)nbits;
++ (ANTLRBitSet *) newBitSetWithArray:(AMutableArray *)types;
++ (ANTLRBitSet *) newBitSetWithBits:(const unsigned long long *)theBits Count:(NSUInteger)longCount;
+
++ (ANTLRBitSet *) of:(NSUInteger)el;
++ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b;
++ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c;
++ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c And4:(NSUInteger)d;
+
+#pragma mark Initializer
+
+- (ANTLRBitSet *) init;
+- (ANTLRBitSet *) initWithType:(TokenType)type;
+- (ANTLRBitSet *) initWithNBits:(NSUInteger)nbits;
+- (ANTLRBitSet *) initWithBitVector:(CFMutableBitVectorRef)theBitVector;
+- (ANTLRBitSet *) initWithBits:(const unsigned long long const*)theBits Count:(NSUInteger)theCount;
+- (ANTLRBitSet *) initWithArrayOfBits:(NSArray *)theArray;
+
+#pragma mark Operations
+- (ANTLRBitSet *) or:(ANTLRBitSet *) aBitSet;
+- (void) orInPlace:(ANTLRBitSet *) aBitSet;
+- (void) add:(NSUInteger) bit;
+- (void) remove:(NSUInteger) bit;
+- (void) setAllBits:(BOOL) aState;
+
+- (NSInteger) numBits;
+- (NSUInteger) size;
+- (void) setSize:(NSUInteger) noOfWords;
+
+#pragma mark Informational
+- (unsigned long long) bitMask:(NSUInteger) bitNumber;
+- (BOOL) member:(NSUInteger)bitNumber;
+- (BOOL) isNil;
+- (NSString *) description;
+- (NSString *) toString;
+
+#pragma mark NSCopying support
+
+- (id) mutableCopyWithZone:(NSZone *) theZone;
+
+
+//private
+- (CFMutableBitVectorRef) _bitVector;
+@property (getter=_bitVector) CFMutableBitVectorRef bitVector;
+@end
diff --git a/runtime/ObjC/Framework/ANTLRBitSet.m b/runtime/ObjC/Framework/ANTLRBitSet.m
new file mode 100644
index 0000000..46c02c6
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLRBitSet.m
@@ -0,0 +1,326 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "ANTLRBitSet.h"
+
+@implementation ANTLRBitSet
+#pragma mark Class Methods
+
++ (ANTLRBitSet *) newBitSet
+{
+    return [[ANTLRBitSet alloc] init];
+}
+
++ (ANTLRBitSet *) newBitSetWithType:(TokenType)type
+{
+    return [[ANTLRBitSet alloc] initWithType:type];
+}
+
+/** Construct a ANTLRBitSet given the size
+ * @param nbits The size of the ANTLRBitSet in bits
+ */
++ (ANTLRBitSet *) newBitSetWithNBits:(NSUInteger)nbits
+{
+    return [[ANTLRBitSet alloc] initWithNBits:nbits];
+}
+
++ (ANTLRBitSet *) newBitSetWithArray:(AMutableArray *)types
+{
+    return [[ANTLRBitSet alloc] initWithArrayOfBits:types];
+}
+
++ (ANTLRBitSet *) newBitSetWithBits:(const unsigned long long *)theBits Count:(NSUInteger)longCount
+{
+    return [[ANTLRBitSet alloc] initWithBits:theBits Count:longCount];
+}
+
+
++ (ANTLRBitSet *) of:(NSUInteger) el
+{
+    ANTLRBitSet *s = [ANTLRBitSet newBitSetWithNBits:(el + 1)];
+    [s add:el];
+    return s;
+}
+
++ (ANTLRBitSet *) of:(NSUInteger) a And2:(NSUInteger) b
+{
+    NSInteger c = (((a>b)?a:b)+1);
+    ANTLRBitSet *s = [ANTLRBitSet newBitSetWithNBits:c];
+    [s add:a];
+    [s add:b];
+    return s;
+}
+
++ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c
+{
+    NSUInteger d = ((a>b)?a:b);
+    d = ((c>d)?c:d)+1;
+    ANTLRBitSet *s = [ANTLRBitSet newBitSetWithNBits:d];
+    [s add:a];
+    [s add:b];
+    [s add:c];
+    return s;
+}
+
++ (ANTLRBitSet *) of:(NSUInteger)a And2:(NSUInteger)b And3:(NSUInteger)c And4:(NSUInteger)d
+{
+    NSUInteger e = ((a>b)?a:b);
+    NSUInteger f = ((c>d)?c:d);
+    e = ((e>f)?e:f)+1;
+    ANTLRBitSet *s = [ANTLRBitSet newBitSetWithNBits:e];
+    [s add:a];
+    [s add:b];
+    [s add:c];
+    [s add:d];
+    return s;
+}
+
+// initializer
+#pragma mark Initializer
+
+- (ANTLRBitSet *) init
+{
+	if ((self = [super init]) != nil) {
+		bitVector = CFBitVectorCreateMutable(kCFAllocatorDefault,0);
+	}
+	return self;
+}
+
+- (ANTLRBitSet *) initWithType:(TokenType)type
+{
+	if ((self = [super init]) != nil) {
+		bitVector = CFBitVectorCreateMutable(kCFAllocatorDefault,0);
+        if ((CFIndex)type >= CFBitVectorGetCount(bitVector))
+            CFBitVectorSetCount(bitVector, type+1);
+        CFBitVectorSetBitAtIndex(bitVector, type, 1);
+	}
+	return self;
+}
+
+- (ANTLRBitSet *) initWithNBits:(NSUInteger)nbits
+{
+	if ((self = [super init]) != nil) {
+        bitVector = CFBitVectorCreateMutable(kCFAllocatorDefault,0);
+        CFBitVectorSetCount( bitVector, nbits );
+	}
+	return self;
+}
+
+- (ANTLRBitSet *) initWithBitVector:(CFMutableBitVectorRef)theBitVector
+{
+	if ((self = [super init]) != nil) {
+		bitVector = theBitVector;
+	}
+	return self;
+}
+
+// Initialize the bit vector with a constant array of ulonglongs like ANTLR generates.
+// Converts to big endian, because the underlying CFBitVector works like that.
+- (ANTLRBitSet *) initWithBits:(const unsigned long long *)theBits Count:(NSUInteger)longCount
+{
+	if ((self = [super init]) != nil) {
+		unsigned int longNo;
+//        unsigned long long swappedBits = 0LL;
+		CFIndex bitIdx;
+        bitVector = CFBitVectorCreateMutable ( kCFAllocatorDefault, 0 );
+		CFBitVectorSetCount( bitVector, sizeof(unsigned long long)*8*longCount );
+
+		for (longNo = 0; longNo < longCount; longNo++) {
+			for (bitIdx = 0; bitIdx < (CFIndex)sizeof(unsigned long long)*8; bitIdx++) {
+//				swappedBits = CFSwapInt64HostToBig(theBits[longNo]);
+//				if (swappedBits & (1LL << bitIdx)) {
+				if (theBits[longNo] & (1LL << bitIdx)) {
+					CFBitVectorSetBitAtIndex(bitVector, bitIdx+(longNo*(sizeof(unsigned long long)*8)), 1);
+				}
+			}
+		}
+	}
+	return self;
+}
+
+// Initialize bit vector with an array of anything. Just test the boolValue and set the corresponding bit.
+// Note: This is big-endian!
+- (ANTLRBitSet *) initWithArrayOfBits:(NSArray *)theArray
+{
+	if ((self = [super init]) != nil) {
+        bitVector = CFBitVectorCreateMutable ( kCFAllocatorDefault, 0 );
+		id value;
+		int bit = 0;
+		for (value in theArray) {
+			if ([value boolValue] == YES) {
+                [self add:bit];
+				//CFBitVectorSetBitAtIndex(bitVector, bit, 1);
+			}
+			bit++;
+		}
+	}
+	return self;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in ANTLRBitSet" );
+#endif
+	CFRelease(bitVector);
+	[super dealloc];
+}
+
+	// operations
+#pragma mark Operations
+// return a copy of (self|aBitSet)
+- (ANTLRBitSet *) or:(ANTLRBitSet *) aBitSet
+{
+	ANTLRBitSet *bitsetCopy = [self mutableCopyWithZone:nil];
+	[bitsetCopy orInPlace:aBitSet];
+	return bitsetCopy;
+}
+
+// perform a bitwise OR operation in place by changing underlying bit vector, growing it if necessary
+- (void) orInPlace:(ANTLRBitSet *) aBitSet
+{
+	CFIndex selfCnt = CFBitVectorGetCount(bitVector);
+	CFMutableBitVectorRef otherBitVector = [aBitSet _bitVector];
+	CFIndex otherCnt = CFBitVectorGetCount(otherBitVector);
+	CFIndex maxBitCnt = selfCnt > otherCnt ? selfCnt : otherCnt;
+	CFBitVectorSetCount(bitVector,maxBitCnt);		// be sure to grow the CFBitVector manually!
+	
+	CFIndex currIdx;
+	for (currIdx = 0; currIdx < maxBitCnt; currIdx++) {
+		if (CFBitVectorGetBitAtIndex(bitVector, currIdx) | CFBitVectorGetBitAtIndex(otherBitVector, currIdx)) {
+			CFBitVectorSetBitAtIndex(bitVector, currIdx, 1);
+		}
+	}
+}
+
+// set a bit, grow the bit vector if necessary
+- (void) add:(NSUInteger) bit
+{
+	if ((CFIndex)bit >= CFBitVectorGetCount(bitVector))
+		CFBitVectorSetCount(bitVector, bit+1);
+	CFBitVectorSetBitAtIndex(bitVector, bit, 1);
+}
+
+// unset a bit
+- (void) remove:(NSUInteger) bit
+{
+	CFBitVectorSetBitAtIndex(bitVector, bit, 0);
+}
+
+- (void) setAllBits:(BOOL) aState
+{
+    for( NSInteger bit=0; bit < CFBitVectorGetCount(bitVector); bit++ ) {
+        CFBitVectorSetBitAtIndex(bitVector, bit, aState);
+    }
+}
+
+// returns the number of bits in the bit vector.
+- (NSInteger) numBits
+{
+    // return CFBitVectorGetCount(bitVector);
+    return CFBitVectorGetCountOfBit(bitVector, CFRangeMake(0, CFBitVectorGetCount(bitVector)), 1);
+}
+
+// returns the number of bits in the bit vector.
+- (NSUInteger) size
+{
+    return CFBitVectorGetCount(bitVector);
+}
+
+- (void) setSize:(NSUInteger) nBits
+{
+    CFBitVectorSetCount( bitVector, nBits );
+}
+
+#pragma mark Informational
+// return a bitmask representation of this bitvector for easy operations
+- (unsigned long long) bitMask:(NSUInteger) bitNumber
+{
+	return 1LL << bitNumber;
+}
+
+// test a bit (no pun intended)
+- (BOOL) member:(NSUInteger) bitNumber
+{
+	return CFBitVectorGetBitAtIndex(bitVector,bitNumber) ? YES : NO;
+}
+
+// are all bits off?
+- (BOOL) isNil
+{
+	return ((CFBitVectorGetCountOfBit(bitVector, CFRangeMake(0,CFBitVectorGetCount(bitVector)), 1) == 0) ? YES : NO);
+}
+
+// debugging aid. GDB invokes this automagically
+// return a string representation of the bit vector, indicating by their bitnumber which bits are set
+- (NSString *) description
+{
+	CFIndex length = CFBitVectorGetCount(bitVector);
+	CFIndex currBit;
+	NSMutableString *descString = [NSMutableString  stringWithString:@"{"];
+	BOOL haveInsertedBit = NO;
+	for (currBit = 0; currBit < length; currBit++) {
+		if ( CFBitVectorGetBitAtIndex(bitVector, currBit) ) {
+			if (haveInsertedBit) {
+				[descString appendString:@","];
+			}
+			[descString appendFormat:@"%d", currBit];
+			haveInsertedBit = YES;
+		}
+	}
+	[descString appendString:@"}"];
+	return descString;
+}
+
+// return a string representation of the bit vector, indicating by their bitnumber which bits are set
+- (NSString *) toString
+{
+	
+	return [self description];
+}
+
+	// NSCopying
+#pragma mark NSCopying support
+
+- (id) mutableCopyWithZone:(NSZone *) theZone
+{
+	ANTLRBitSet *newBitSet = [[ANTLRBitSet allocWithZone:theZone] initWithBitVector:CFBitVectorCreateMutableCopy(kCFAllocatorDefault,0,bitVector)];
+	return newBitSet;
+}
+
+- (CFMutableBitVectorRef) _bitVector
+{
+	return bitVector;
+}
+
+@synthesize bitVector;
+@end
+
+NSInteger max(NSInteger a, NSInteger b)
+{
+    return (a>b)?a:b;
+}
+
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRBitsetTest-Info.plist b/runtime/ObjC/Framework/ANTLRBitsetTest-Info.plist
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLRBitsetTest-Info.plist
rename to runtime/ObjC/Framework/ANTLRBitsetTest-Info.plist
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTokenTest-Info.plist b/runtime/ObjC/Framework/ANTLRCommonTokenTest-Info.plist
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTokenTest-Info.plist
rename to runtime/ObjC/Framework/ANTLRCommonTokenTest-Info.plist
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeTest-Info.plist b/runtime/ObjC/Framework/ANTLRCommonTreeTest-Info.plist
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLRCommonTreeTest-Info.plist
rename to runtime/ObjC/Framework/ANTLRCommonTreeTest-Info.plist
diff --git a/runtime/ObjC/Framework/ANTLRError.h b/runtime/ObjC/Framework/ANTLRError.h
new file mode 100644
index 0000000..5d974e4
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLRError.h
@@ -0,0 +1,35 @@
+//
+//  ANTLRError.h
+//  ANTLR
+//
+//  Created by Ian Michell on 30/03/2010.
+//  Copyright 2010 Ian Michell. All rights reserved.
+//
+
+// [The "BSD licence"]
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+
+#define ANTLRErrorDomain @"ANTLRError"
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRFastQueueTest-Info.plist b/runtime/ObjC/Framework/ANTLRFastQueueTest-Info.plist
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLRFastQueueTest-Info.plist
rename to runtime/ObjC/Framework/ANTLRFastQueueTest-Info.plist
diff --git a/runtime/ObjC/Framework/ANTLRFileStream.h b/runtime/ObjC/Framework/ANTLRFileStream.h
new file mode 100644
index 0000000..a4a931d
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLRFileStream.h
@@ -0,0 +1,50 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#import "ANTLRStringStream.h"
+
+/** This is a char buffer stream that is loaded from a file
+ *  all at once when you construct the object.  This looks very
+ *  much like an ANTLRReader or ANTLRInputStream, but it's a special case
+ *  since we know the exact size of the object to load.  We can avoid lots
+ *  of data copying. 
+ */
+@interface ANTLRFileStream : ANTLRStringStream {
+	__strong NSString *fileName;
+}
+
+
++ (id) newANTLRFileStream:(NSString*) fileName;
++ (id) newANTLRFileStream:(NSString *)aFileName encoding:(NSStringEncoding)encoding;
+- (id) init:(NSString *) aFileName;
+- (id) init:(NSString *) aFileName encoding:(NSStringEncoding)encoding;
+- (void) load:(NSString *)fileName encoding:(NSStringEncoding)encoding;
+- (NSString *) getSourceName;
+
+@property (retain) NSString *fileName;
+
+@end
diff --git a/runtime/ObjC/Framework/ANTLRFileStream.m b/runtime/ObjC/Framework/ANTLRFileStream.m
new file mode 100644
index 0000000..bbb6fb2
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLRFileStream.m
@@ -0,0 +1,109 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+ 
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+ 
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** This is a char buffer stream that is loaded from a file
+ *  all at once when you construct the object.  This looks very
+ *  much like an ANTLRReader or ANTLRInputStream, but it's a special case
+ *  since we know the exact size of the object to load.  We can avoid lots
+ *  of data copying. 
+ */
+
+#import "ANTLRFileStream.h"
+
+@implementation ANTLRFileStream
+
+@synthesize fileName;
+
++ (id) newANTLRFileStream:(NSString*)fileName
+{
+    return [[ANTLRFileStream alloc] init:fileName];
+}
+
++ (id) newANTLRFileStream:(NSString *)aFileName encoding:(NSStringEncoding)encoding
+{
+    return [[ANTLRFileStream alloc] init:aFileName encoding:encoding];
+}
+
+- (id) init:(NSString *)aFileName
+{
+    self = [super init];
+    if ( self != nil ) {
+        fileName = aFileName;
+        [self load:aFileName encoding:NSUTF8StringEncoding];
+    }
+    return self;
+}
+
+- (id) init:(NSString *) aFileName encoding:(NSStringEncoding)encoding
+{
+    self = [super init];
+    if ( self != nil ) {
+        fileName = aFileName;
+        [self load:aFileName encoding:encoding];
+    }
+    return self;
+}
+
+- (NSString *) getSourceName
+{
+    return fileName;
+}
+
+- (void) load:(NSString *)aFileName encoding:(NSStringEncoding)encoding
+{
+    if ( aFileName==nil ) {
+        return;
+    }
+    NSError *error;
+    NSData *retData = nil;
+    NSFileHandle *fh;
+    @try {
+        NSString *fn = [aFileName stringByStandardizingPath];
+        NSURL *f = [NSURL fileURLWithPath:fn];
+        fh = [NSFileHandle fileHandleForReadingFromURL:f error:&error];
+        if ( fh==nil ) {
+            return;
+        }
+        int numRead=0;
+        int p1 = 0;
+        retData = [fh readDataToEndOfFile];
+        numRead = [retData length];
+#pragma mark fix these NSLog calls
+        NSLog( @"read %d chars; p was %d is now %d", n, p1, (p1+numRead) );
+        p1 += numRead;
+        n = p1;
+        data = [[NSString alloc] initWithData:retData encoding:NSASCIIStringEncoding];
+#pragma mark fix these NSLog calls
+        NSLog( @"n=%d", n );
+    }
+    @finally {
+        [fh closeFile];
+    }
+}
+
+@end
diff --git a/runtime/ObjC/Framework/ANTLRInputStream.h b/runtime/ObjC/Framework/ANTLRInputStream.h
new file mode 100644
index 0000000..5127cdb
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLRInputStream.h
@@ -0,0 +1,31 @@
+//
+//  ANTLRInputStream.h
+//  ANTLR
+//
+//  Created by Alan Condit on 2/21/11.
+//  Copyright 2011 Alan's MachineWorks. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+#import "AntlrReaderStream.h"
+
+@interface ANTLRInputStream : ANTLRReaderStream {
+    NSStringEncoding encoding;
+}
+
+@property (assign) NSStringEncoding encoding;
+
++ (id) newANTLRInputStream;
++ (id) newANTLRInputStream:(NSInputStream *)anInput;
++ (id) newANTLRInputStream:(NSInputStream *)anInput size:(NSInteger)theSize;
++ (id) newANTLRInputStream:(NSInputStream *)anInput encoding:(NSStringEncoding)theEncoding;
++ (id) newANTLRInputStream:(NSInputStream *)anInput
+                      size:(NSInteger)theSize
+            readBufferSize:(NSInteger)theRBSize
+                  encoding:(NSStringEncoding)theEncoding;
+- (id) init;
+- (id) initWithInput:(NSInputStream *)anInput
+                size:(NSInteger)theSize
+      readBufferSize:(NSInteger)theRBSize
+            encoding:(NSStringEncoding)theEncoding;
+@end
diff --git a/runtime/ObjC/Framework/ANTLRInputStream.m b/runtime/ObjC/Framework/ANTLRInputStream.m
new file mode 100644
index 0000000..ca5bc4a
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLRInputStream.m
@@ -0,0 +1,63 @@
+//
+//  ANTLRInputStream.m
+//  ANTLR
+//
+//  Created by Alan Condit on 2/21/11.
+//  Copyright 2011 Alan's MachineWorks. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+#import "ANTLRInputStream.h"
+
+
+@implementation ANTLRInputStream
+
+@synthesize encoding;
+
++ (id) newANTLRInputStream
+{
+    return [[ANTLRInputStream alloc] init];
+}
+
++ (id) newANTLRInputStream:(NSInputStream *)anInput
+{
+    return [[ANTLRInputStream alloc] initWithInput:anInput size:ANTLRReaderStream.INITIAL_BUFFER_SIZE readBufferSize:ANTLRReaderStream.READ_BUFFER_SIZE encoding:NSASCIIStringEncoding];
+}
+
++ (id) newANTLRInputStream:(NSInputStream *)anInput size:(NSInteger)theSize
+{
+    return [[ANTLRInputStream alloc] initWithInput:anInput size:theSize readBufferSize:ANTLRReaderStream.READ_BUFFER_SIZE encoding:NSASCIIStringEncoding];
+}
+
++ (id) newANTLRInputStream:(NSInputStream *)anInput encoding:(NSStringEncoding)theEncoding
+{
+    return [[ANTLRInputStream alloc] initWithInput:anInput size:ANTLRReaderStream.INITIAL_BUFFER_SIZE readBufferSize:ANTLRReaderStream.READ_BUFFER_SIZE encoding:theEncoding];
+}
+
++ (id) newANTLRInputStream:(NSInputStream *)anInput
+                      size:(NSInteger)theSize
+            readBufferSize:(NSInteger)theRBSize
+                  encoding:(NSStringEncoding)theEncoding
+{
+    return [[ANTLRInputStream alloc] initWithInput:anInput size:theSize readBufferSize:theRBSize encoding:theEncoding];
+}
+
+- (id) init
+{
+    self = [super init];
+    return self;
+}
+
+- (id) initWithInput:(NSInputStream *)anInput
+                size:(NSInteger)theSize
+      readBufferSize:(NSInteger)theRBSize
+            encoding:(NSStringEncoding)theEncoding
+{
+    self = [super initWithReader:anInput size:theSize readBufferSize:theRBSize];
+    if ( self != nil ) {
+        //[self load:theSize readBufferSize:theRBSize]; // load called in super class
+    }
+    return self;
+}
+
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRIntArrayTest-Info.plist b/runtime/ObjC/Framework/ANTLRIntArrayTest-Info.plist
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLRIntArrayTest-Info.plist
rename to runtime/ObjC/Framework/ANTLRIntArrayTest-Info.plist
diff --git a/runtime/ObjC/Framework/ANTLRReaderStream.h b/runtime/ObjC/Framework/ANTLRReaderStream.h
new file mode 100644
index 0000000..4187fe8
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLRReaderStream.h
@@ -0,0 +1,38 @@
+//
+//  AntlrReaderStream.h
+//  ANTLR
+//
+//  Created by Alan Condit on 2/21/11.
+//  Copyright 2011 Alan's MachineWorks. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+#import "ANTLRStringStream.h"
+
+@interface ANTLRReaderStream : ANTLRStringStream {
+    NSInputStream *is;
+    NSInteger size;
+    NSInteger rbSize;
+    //NSData *data; /* ANTLRStringStream has NSString *data */
+    NSInteger p1;
+}
+
+@property (retain) NSInputStream *is;
+@property (assign) NSInteger size;
+@property (assign) NSInteger rbSize;
+//@property (retain) NSData *data;
+
++ (NSInteger) READ_BUFFER_SIZE;
++ (NSInteger) INITIAL_BUFFER_SIZE;
+
++ (id) newANTLRReaderStream;
++ (id) newANTLRReaderStream:(NSInputStream *)r;
++ (id) newANTLRReaderStream:(NSInputStream *)r size:(NSInteger)aSize;
++ (id) newANTLRReaderStream:(NSInputStream *)r size:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize;
+- (id) initWithReader:(NSInputStream *)r size:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize;
+- (void) load:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize;
+- (void) setUpStreamForFile;
+- (void) stream:(NSStream *)stream handleEvent:(NSStreamEvent)eventCode;
+- (void) close;
+
+@end
diff --git a/runtime/ObjC/Framework/ANTLRReaderStream.m b/runtime/ObjC/Framework/ANTLRReaderStream.m
new file mode 100644
index 0000000..de86e13
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLRReaderStream.m
@@ -0,0 +1,164 @@
+//
+//  ANTLRReaderStream.m
+//  ANTLR
+//
+//  Created by Alan Condit on 2/21/11.
+//  Copyright 2011 Alan's MachineWorks. All rights reserved.
+//
+
+#import "ANTLRReaderStream.h"
+#import "ACNumber.h"
+
+@implementation ANTLRReaderStream
+
+@synthesize is;
+@synthesize size;
+@synthesize rbSize;
+
+static NSInteger READ_BUFFER_SIZE = 1024;
+static NSInteger INITIAL_BUFFER_SIZE = 1024;
+
++ (NSInteger) READ_BUFFER_SIZE
+{
+    return READ_BUFFER_SIZE;
+}
+
++ (NSInteger) INITIAL_BUFFER_SIZE
+{
+    return INITIAL_BUFFER_SIZE;
+}
+
++ (id) newANTLRReaderStream
+{
+    return [[ANTLRReaderStream alloc] init];
+}
+
++ (id) newANTLRReaderStream:(NSInputStream *)r
+{
+    return [[ANTLRReaderStream alloc] initWithReader:r size:INITIAL_BUFFER_SIZE readBufferSize:READ_BUFFER_SIZE];
+}
+
++ (id) newANTLRReaderStream:(NSInputStream *)r size:(NSInteger)aSize
+{
+    return [[ANTLRReaderStream alloc] initWithReader:r size:aSize readBufferSize:READ_BUFFER_SIZE];
+}
+
++ (id) newANTLRReaderStream:(NSInputStream *)r size:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize
+{
+//    load(r, aSize, aReadChunkSize);
+    return [[ANTLRReaderStream alloc] initWithReader:r size:aSize readBufferSize:aReadChunkSize];
+}
+
+- (id) init
+{
+	self = [super init];
+	if ( self != nil ) {
+        int p1 = n;  // init from ANTLRStringStream val
+        is = nil;
+        rbSize = READ_BUFFER_SIZE;
+        size = INITIAL_BUFFER_SIZE;
+    }
+    return self;
+}
+
+- (id) initWithReader:(NSInputStream *)r size:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize
+{
+	self = [super init];
+	if ( self != nil ) {
+        int p1 = n;  // init from ANTLRStringStream val
+        is = r;
+        rbSize = aSize;
+        size = aReadChunkSize;
+        [is open];
+//        [self setUpStreamForFile];
+        if ( [is hasBytesAvailable] ) {
+            [self load:aSize readBufferSize:aReadChunkSize];
+        }
+    }
+    return self;
+}
+
+- (void) load:(NSInteger)aSize readBufferSize:(NSInteger)aReadChunkSize
+{
+    NSMutableData *retData = nil;
+    uint8_t buf[1024];
+    if ( is==nil ) {
+        return;
+    }
+    if ( aSize<=0 ) {
+        aSize = INITIAL_BUFFER_SIZE;
+    }
+    if ( aReadChunkSize<=0 ) {
+        aReadChunkSize = READ_BUFFER_SIZE;
+    }
+#pragma mark fix these NSLog calls
+    @try {
+        int numRead=0;
+        numRead = [is read:buf maxLength:aReadChunkSize];
+        retData = [NSMutableData dataWithCapacity:numRead];
+        [retData appendBytes:(const void *)buf length:numRead];
+        NSLog( @"read %d chars; p was %d is now %d", n, p1, (p1+numRead) );
+        p1 += numRead;
+        n = p1;
+        data = [[NSString alloc] initWithData:retData encoding:NSASCIIStringEncoding];
+        NSLog( @"n=%d\n", n );
+    }
+    @finally {
+        [self close];
+    }
+}
+
+- (void)setUpStreamForFile
+{
+    // iStream is NSInputStream instance variable
+//    if ( is == nil )
+//        is = [[NSInputStream alloc] initWithFileAtPath:path];
+    [is setDelegate:self];
+    [is scheduleInRunLoop:[NSRunLoop currentRunLoop]
+                       forMode:NSDefaultRunLoopMode];
+    [is open];
+}
+
+- (void)stream:(NSStream *)stream handleEvent:(NSStreamEvent)eventCode
+{
+    NSMutableData *myData = nil;
+    ACNumber *bytesRead = [ACNumber numberWithInteger:0];
+    uint8_t buf[1024];
+    switch(eventCode) {
+        case NSStreamEventHasBytesAvailable:
+        {
+            if(!myData) {
+                myData = [[NSMutableData data] retain];
+            }
+            unsigned int len = 0;
+            len = [(NSInputStream *)stream read:buf maxLength:1024];
+            if(len) {
+                [myData appendBytes:(const void *)buf length:len];
+                // bytesRead is an instance variable of type ACNumber.
+                bytesRead = [ACNumber numberWithInteger:[bytesRead integerValue]+len];
+                data = [[NSString alloc] initWithData:myData encoding:NSASCIIStringEncoding];
+            } else {
+                NSLog(@"no buffer!");
+            }
+            break;
+        }
+        case NSStreamEventEndEncountered:
+        {
+            [stream close];
+            [stream removeFromRunLoop:[NSRunLoop currentRunLoop]
+                              forMode:NSDefaultRunLoopMode];
+            [stream release];
+            stream = nil; // stream is ivar, so reinit it
+            break;
+        }
+        // continued
+    }
+}
+
+- (void) close
+{
+    [is close];
+    is = nil;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/ANTLRStringStream.h b/runtime/ObjC/Framework/ANTLRStringStream.h
new file mode 100644
index 0000000..061a4b1
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLRStringStream.h
@@ -0,0 +1,114 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+#import "CharStream.h"
+#import "CharStreamState.h"
+#import "PtrBuffer.h"
+
+@interface ANTLRStringStream : NSObject < CharStream > {
+	NSString *data;
+	NSInteger n;
+	NSInteger index;
+	NSUInteger line;
+	NSUInteger charPositionInLine;
+	NSInteger markDepth;
+	PtrBuffer *markers;
+	NSInteger lastMarker;
+	NSString *name;
+    CharStreamState *charState;
+}
+
++ newANTLRStringStream;
+
++ newANTLRStringStream:(NSString *)aString;
+
++ newANTLRStringStream:(char *)myData Count:(NSInteger)numBytes;
+
+- (id) init;
+
+// this initializer copies the string
+- (id) initWithString:(NSString *) theString;
+
+// This is the preferred constructor as no data is copied
+- (id) initWithStringNoCopy:(NSString *) theString;
+
+- (id) initWithData:(char *)myData Count:(NSInteger)numBytes;
+
+- (void) dealloc;
+
+- (id) copyWithZone:(NSZone *)aZone;
+
+// reset the stream's state, but keep the data to feed off
+- (void) reset;
+// consume one character from the stream
+- (void) consume;
+
+// look ahead i characters
+- (NSInteger) LA:(NSInteger) i;
+- (NSInteger) LT:(NSInteger) i;
+
+// total length of the input data
+- (NSInteger) size;
+
+// seek and rewind in the stream
+- (NSInteger) mark;
+- (void) rewind:(NSInteger) marker;
+- (void) rewind;
+- (void) release:(NSInteger) marker;
+- (void) seek:(NSInteger) index;
+
+// provide the streams data (e.g. for tokens using indices)
+- (NSString *) substring:(NSInteger)startIndex To:(NSInteger)stopIndex;
+- (NSString *) substringWithRange:(NSRange) theRange;
+
+- (NSUInteger) getLine;
+- (NSUInteger) getCharPositionInLine;
+- (void) setLine:(NSUInteger) aLine;
+- (void) setCharPositionInLine:(NSUInteger) pos;
+
+- (PtrBuffer *)getMarkers;
+- (void) setMarkers:(PtrBuffer *)aMarkerList;
+
+- (NSString *)getSourceName;
+
+- (NSString *)toString;
+
+// accessors to the raw data of this stream
+
+@property (retain) NSString *data;
+@property (assign) NSInteger index;
+@property (assign) NSInteger n;
+@property (assign) NSUInteger line;
+@property (assign) NSUInteger charPositionInLine;
+@property (assign) NSInteger markDepth;
+@property (retain) PtrBuffer *markers;
+@property (assign) NSInteger lastMarker;
+@property (retain) NSString *name;
+@property (retain) CharStreamState *charState;
+
+@end
diff --git a/runtime/ObjC/Framework/ANTLRStringStream.m b/runtime/ObjC/Framework/ANTLRStringStream.m
new file mode 100644
index 0000000..9010b4a
--- /dev/null
+++ b/runtime/ObjC/Framework/ANTLRStringStream.m
@@ -0,0 +1,407 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import "ANTLRStringStream.h"
+
+extern NSInteger debug;
+
+@implementation ANTLRStringStream
+
+@synthesize data;
+@synthesize n;
+@synthesize index;
+@synthesize line;
+@synthesize charPositionInLine;
+@synthesize markDepth;
+@synthesize markers;
+@synthesize lastMarker;
+@synthesize name;
+@synthesize charState;
+
++ newANTLRStringStream
+{
+    return [[ANTLRStringStream alloc] init];
+}
+
++ newANTLRStringStream:(NSString *)aString;
+{
+    return [[ANTLRStringStream alloc] initWithString:aString];
+}
+
+
++ newANTLRStringStream:(char *)myData Count:(NSInteger)numBytes;
+{
+    return [[ANTLRStringStream alloc] initWithData:myData Count:numBytes];
+}
+
+
+- (id) init
+{
+	if ((self = [super init]) != nil) {
+        n = 0;
+        index = 0;
+        line = 1;
+        charPositionInLine = 0;
+        markDepth = 0;
+		markers = [PtrBuffer newPtrBufferWithLen:10];
+        [markers retain];
+        [markers addObject:[NSNull null]]; // ANTLR generates code that assumes markers to be 1-based,
+        charState = [[CharStreamState newCharStreamState] retain];
+	}
+	return self;
+}
+
+- (id) initWithString:(NSString *) theString
+{
+	if ((self = [super init]) != nil) {
+		//[self setData:[NSString stringWithString:theString]];
+        data = [theString retain];
+        n = [data length];
+        index = 0;
+        line = 1;
+        charPositionInLine = 0;
+        markDepth = 0;
+		markers = [[PtrBuffer newPtrBufferWithLen:10] retain];
+        [markers addObject:[NSNull null]]; // ANTLR generates code that assumes markers to be 1-based,
+        charState = [[CharStreamState newCharStreamState] retain];
+	}
+	return self;
+}
+
+- (id) initWithStringNoCopy:(NSString *) theString
+{
+	if ((self = [super init]) != nil) {
+		//[self setData:theString];
+        data = [theString retain];
+        n = [data length];
+        index = 0;
+        line = 1;
+        charPositionInLine = 0;
+        markDepth = 0;
+		markers = [PtrBuffer newPtrBufferWithLen:100];
+        [markers retain];
+        [markers addObject:[NSNull null]]; // ANTLR generates code that assumes markers to be 1-based,
+        charState = [[CharStreamState newCharStreamState] retain];
+	}
+	return self;
+}
+
+- (id) initWithData:(char *)myData Count:(NSInteger)numBytes
+{
+    if ((self = [super init]) != nil) {
+        data = [NSString stringWithCString:myData encoding:NSASCIIStringEncoding];
+        n = numBytes;
+        index = 0;
+        line = 1;
+        charPositionInLine = 0;
+        markDepth = 0;
+		markers = [PtrBuffer newPtrBufferWithLen:100];
+        [markers retain];
+        [markers addObject:[NSNull null]]; // ANTLR generates code that assumes markers to be 1-based,
+        charState = [[CharStreamState newCharStreamState] retain];
+    }
+    return( self );
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in ANTLRStringStream" );
+#endif
+    if ( markers && [markers count] ) {
+        [markers removeAllObjects];
+        [markers release];
+        markers = nil;
+    }
+    if ( data ) {
+        [data release];
+        data = nil;
+    }
+	[super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    ANTLRStringStream *copy;
+	
+    copy = [[[self class] allocWithZone:aZone] init];
+    //    copy = [super copyWithZone:aZone]; // allocation occurs here
+    if ( data != nil )
+        copy.data = [self.data copyWithZone:aZone];
+    copy.n = n;
+    copy.index = index;
+    copy.line = line;
+    copy.charPositionInLine = charPositionInLine;
+    copy.markDepth = markDepth;
+    if ( markers != nil )
+        copy.markers = [markers copyWithZone:nil];
+    copy.lastMarker = lastMarker;
+    if ( name != nil )
+        copy.name = [self.name copyWithZone:aZone];
+    return copy;
+}
+
+// reset the streams charState
+// the streams content is not reset!
+- (void) reset
+{
+	index = 0;
+	line = 1;
+	charPositionInLine = 0;
+	markDepth = 0;
+    if ( markers && [markers count] )
+        [markers removeAllObjects];
+    [markers addObject:[NSNull null]];  // ANTLR generates code that assumes markers to be 1-based,
+                                        // thus the initial null in the array!
+}
+
+// read one character off the stream, tracking line numbers and character positions
+// automatically.
+// Override this in subclasses if you want to avoid the overhead of automatic line/pos
+// handling. Do not call super in that case.
+- (void) consume 
+{
+	if ( index < n ) {
+		charPositionInLine++;
+		if ( [data characterAtIndex:index] == '\n' ) {
+			line++;
+			charPositionInLine=0;
+		}
+		index++;
+	}
+}
+
+// implement the lookahead method used in lexers
+- (NSInteger) LA:(NSInteger) i 
+{
+    NSInteger c;
+    if ( i == 0 )
+        return 0; // undefined
+    if ( i < 0 ) {
+        i++;
+        if ( index+i-1 < 0 ) {
+		    return CharStreamEOF;
+		}
+	}
+    if ( (index+i-1) >= n ) {
+		return CharStreamEOF;
+	}
+    c = [data characterAtIndex:index+i-1];
+	return (NSInteger)c;
+}
+
+- (NSInteger) LT:(NSInteger)i
+{
+    return [self LA:i];
+}
+
+- (NSInteger) size 
+{
+	return n;
+}
+
+// push the current charState of the stream onto a stack
+// returns the depth of the stack, to be used as a marker to rewind the stream.
+// Note: markers are 1-based!
+- (NSInteger) mark 
+{
+    if (debug > 1) NSLog(@"mark entry -- markers=%x, markDepth=%d\n", (int)markers, markDepth);
+    if ( markers == nil ) {
+        markers = [PtrBuffer newPtrBufferWithLen:100];
+		[markers addObject:[NSNull null]]; // ANTLR generates code that assumes markers to be 1-based,
+        markDepth = markers.ptr;
+    }
+    markDepth++;
+	CharStreamState *State = nil;
+	if ( (markDepth) >= [markers count] ) {
+        if ( markDepth > 1 ) {
+            State = [CharStreamState newCharStreamState];
+            [State retain];
+        }
+        if ( markDepth == 1 )
+            State = charState;
+		[markers insertObject:State atIndex:markDepth];
+        if (debug > 1) NSLog(@"mark save State %x at %d, index=%d, line=%d, charPositionInLine=%d\n", (NSUInteger)State, markDepth, State.index, State.line, State.charPositionInLine);
+	}
+	else {
+        if (debug > 1) NSLog(@"mark retrieve markers=%x markDepth=%d\n", (NSUInteger)markers, markDepth);
+        State = [markers objectAtIndex:markDepth];
+        [State retain];
+        State = (CharStreamState *)[markers objectAtIndex:markDepth];
+        if (debug > 1) NSLog(@"mark retrieve charState %x from %d, index=%d, line=%d, charPositionInLine=%d\n", (NSUInteger)State, markDepth, State.index, State.line, State.charPositionInLine);
+	}
+    State.index = index;
+	State.line = line;
+	State.charPositionInLine = charPositionInLine;
+	lastMarker = markDepth;
+    if (debug > 1) NSLog(@"mark exit -- markers=%x, charState=%x, index=%d, line=%d, charPositionInLine=%d\n", (NSUInteger)markers, (NSUInteger)State, State.index, State.line, State.charPositionInLine);
+	return markDepth;
+}
+
+- (void) rewind:(NSInteger) marker 
+{
+    CharStreamState *State;
+    if (debug > 1) NSLog(@"rewind entry -- markers=%x marker=%d\n", (NSUInteger)markers, marker);
+    if ( marker == 1 )
+        State = charState;
+    else
+        State = (CharStreamState *)[markers objectAtIndex:marker];
+    if (debug > 1) NSLog(@"rewind entry -- marker=%d charState=%x, index=%d, line=%d, charPositionInLine=%d\n", marker, (NSUInteger)charState, charState.index, charState.line, charState.charPositionInLine);
+	// restore stream charState
+	[self seek:State.index];
+	line = State.line;
+	charPositionInLine = charState.charPositionInLine;
+	[self release:marker];
+    if (debug > 1) NSLog(@"rewind exit -- marker=%d charState=%x, index=%d, line=%d, charPositionInLine=%d\n", marker, (NSUInteger)charState, charState.index, charState.line, charState.charPositionInLine);
+}
+
+- (void) rewind
+{
+	[self rewind:lastMarker];
+}
+
+// remove stream states on top of 'marker' from the marker stack
+// returns the new markDepth of the stack.
+// Note: unfortunate naming for Objective-C, but to keep close to the Java target this is named release:
+- (void) release:(NSInteger) marker 
+{
+	// unwind any other markers made after marker and release marker
+	markDepth = marker;
+	markDepth--;
+    if (debug > 1) NSLog(@"release:marker= %d, markDepth = %d\n", marker, markDepth);
+}
+
+// when seeking forward we must handle character position and line numbers.
+// seeking backward already has the correct line information on the markers stack, 
+// so we just take it from there.
+- (void) seek:(NSInteger) anIndex 
+{
+    if (debug > 1) NSLog(@"seek entry -- seekIndex=%d index=%d\n", anIndex, index);
+	if ( anIndex <= index ) {
+		index = anIndex; // just jump; don't update stream charState (line, ...)
+        if (debug > 1) NSLog(@"seek exit return -- index=%d index=%d\n", anIndex, index);
+		return;
+	}
+	// seek forward, consume until index hits anIndex
+	while ( index < anIndex ) {
+		[self consume];
+	}
+    if (debug > 1) NSLog(@"seek exit end -- index=%d index=%d\n", anIndex, index);
+}
+
+// get a substring from our raw data.
+- (NSString *) substring:(NSInteger)startIndex To:(NSInteger)stopIndex 
+{
+    NSRange theRange = NSMakeRange(startIndex, stopIndex-startIndex);
+	return [data substringWithRange:theRange];
+}
+
+// get a substring from our raw data.
+- (NSString *) substringWithRange:(NSRange) theRange 
+{
+	return [data substringWithRange:theRange];
+}
+
+
+- (NSUInteger) getLine
+{
+    return line;
+}
+
+- (NSUInteger) getCharPositionInLine
+{
+    return charPositionInLine;
+}
+
+- (void) setLine:(NSUInteger) aLine
+{
+    line = aLine;
+}
+
+- (void) setCharPositionInLine:(NSUInteger) pos
+{
+    charPositionInLine = pos;
+}
+
+- (PtrBuffer *)getMarkers
+{
+    return markers;
+}
+
+- (void) setMarkers:(PtrBuffer *)aMarkerList
+{
+    markers = aMarkerList;
+}
+
+- (NSString *)getSourceName
+{
+    return name;
+}
+
+- (void) setSourceName:(NSString *)aName
+{
+    if ( name != aName ) {
+        if ( name ) [name release];
+        if ( aName ) [aName retain];
+        name = aName;
+    }
+}
+
+
+- (CharStreamState *)getCharState
+{
+    return charState;
+}
+
+- (void) setCharState:(CharStreamState *)aCharState
+{
+    charState = aCharState;
+}
+
+- (NSString *)toString
+{
+    return [NSString stringWithString:data];
+}
+
+//---------------------------------------------------------- 
+//  data 
+//---------------------------------------------------------- 
+- (NSString *) getData
+{
+    return data; 
+}
+
+- (void) setData: (NSString *) aData
+{
+    if (data != aData) {
+        if ( data ) [data release];
+        data = [NSString stringWithString:aData];
+        [data retain];
+    }
+}
+
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRStringStreamTest-Info.plist b/runtime/ObjC/Framework/ANTLRStringStreamTest-Info.plist
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLRStringStreamTest-Info.plist
rename to runtime/ObjC/Framework/ANTLRStringStreamTest-Info.plist
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLR_Prefix.pch b/runtime/ObjC/Framework/ANTLR_Prefix.pch
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLR_Prefix.pch
rename to runtime/ObjC/Framework/ANTLR_Prefix.pch
diff --git a/runtime/ObjC/Framework/ArrayIterator.h b/runtime/ObjC/Framework/ArrayIterator.h
new file mode 100644
index 0000000..3a0af07
--- /dev/null
+++ b/runtime/ObjC/Framework/ArrayIterator.h
@@ -0,0 +1,78 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2011 Terence Parr and Alan Condit
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#import <Foundation/Foundation.h>
+
+/**
+ * Iterator for an array so I don't have to copy the array to a List
+ * just to make it iteratable.
+ */
+
+/*
+ * this is the state structure for FastEnumeration
+ typedef struct {
+ unsigned long state;
+ id *itemsPtr;
+ unsigned long *mutationsPtr;
+ unsigned long extra[5];
+ } NSFastEnumerationState;
+ */
+
+@interface ArrayIterator : NSObject {
+    
+    __strong id peekObj;
+    /**
+     * NSArrays are fixed size; precompute count.
+     */
+    NSInteger count;
+    NSInteger index;
+    __strong NSArray *anArray;
+    
+}
+
++ (ArrayIterator *) newIterator:(NSArray *)array;
++ (ArrayIterator *) newIteratorForDictKey:(NSDictionary *)dict;
++ (ArrayIterator *) newIteratorForDictObj:(NSDictionary *)dict;
+
+- (id) initWithArray:(NSArray *)array;
+- (id) initWithDictKey:(NSDictionary *)dict;
+- (id) initWithDictObj:(NSDictionary *)dict;
+
+- (BOOL) hasNext;
+- (id) nextObject;
+- (NSArray *)allObjects;
+- (void) removeObjectAtIndex:(NSInteger)idx;
+- (NSInteger) count;
+- (void) setCount:(NSInteger)cnt;
+- (void) dealloc;
+
+@property (retain) id peekObj;
+@property (assign, getter=count, setter=setCount:) NSInteger count;
+@property (assign) NSInteger index;
+@property (retain) NSArray *anArray;
+
+@end
diff --git a/runtime/ObjC/Framework/ArrayIterator.m b/runtime/ObjC/Framework/ArrayIterator.m
new file mode 100644
index 0000000..42d3f7a
--- /dev/null
+++ b/runtime/ObjC/Framework/ArrayIterator.m
@@ -0,0 +1,183 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2011 Terence Parr and Alan Condit
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#import "AMutableArray.h"
+#import "ArrayIterator.h"
+#import "RuntimeException.h"
+
+@class AMutableArray;
+
+@implementation ArrayIterator
+
+@synthesize peekObj;
+//@synthesize count;
+@synthesize index;
+@synthesize anArray;
+
+
++ (ArrayIterator *) newIterator:(NSArray *)array
+{
+    return [[ArrayIterator alloc] initWithArray:array];
+}
+
++ (ArrayIterator *) newIteratorForDictKey:(NSDictionary *)dict
+{
+    return [[ArrayIterator alloc] initWithDictKey:dict];
+}
+
++ (ArrayIterator *) newIteratorForDictObj:(NSDictionary *)dict
+{
+    return [[ArrayIterator alloc] initWithDictObj:dict];
+}
+
+- (id) initWithArray:(NSArray *)array
+{
+    self=[super init];
+    if ( self != nil ) {
+        if (![array isKindOfClass:[NSArray class]]) {
+                @throw [NSException exceptionWithName:NSInvalidArgumentException
+                                               reason:[NSString stringWithFormat:@"ArrayIterator expecting NSArray class but got %@", [array className]]
+                                             userInfo:nil];
+        }
+        anArray = [array retain];
+#ifdef DONTUSENOMO
+        for (int i = 0; i < [array count]; i++) {
+            [anArray addObject:[array objectAtIndex:i]];
+            count++;
+        }
+#endif
+        peekObj = nil;
+        count = [anArray count];
+        index = 0;
+    }
+    return self;
+}
+
+- (id) initWithDictKey:(NSDictionary *)dict
+{
+    self=[super init];
+    if ( self != nil ) {
+        if (![dict isKindOfClass:[NSDictionary class]]) {
+            @throw [NSException exceptionWithName:NSInvalidArgumentException
+                                           reason:[NSString stringWithFormat:@"ArrayIterator expecting NSDictionary class but got %@", [dict className]]
+                                         userInfo:nil];
+        }
+        anArray = [[[dict keyEnumerator] allObjects] retain];
+        peekObj = nil;
+        count = [anArray count];
+        index = 0;
+    }
+    return self;
+}
+
+- (id) initWithDictObj:(NSDictionary *)dict
+{
+    self=[super init];
+    if ( self != nil ) {
+        if (![dict isKindOfClass:[NSDictionary class]]) {
+            @throw [NSException exceptionWithName:NSInvalidArgumentException
+                                           reason:[NSString stringWithFormat:@"ArrayIterator expecting NSDictionary class but got %@", [dict className]]
+                                         userInfo:nil];
+        }
+        anArray = [[[dict objectEnumerator] allObjects] retain];
+        peekObj = nil;
+        count = [anArray count];
+        index = 0;
+    }
+    return self;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in ArrayIterator" );
+#endif
+    if ( anArray ) [anArray release];
+    [super dealloc];
+}
+
+- (BOOL) hasNext
+{
+    if ( peekObj == nil ) {
+        peekObj = [self nextObject];
+    }
+    return ((peekObj) ? YES : NO);
+}
+
+- (NSObject *) nextObject
+{
+    id obj = nil;
+    if ( peekObj ) {
+        obj = peekObj;
+        peekObj = nil;
+        return obj;
+    }
+    if ( index >= count ) {
+        return nil;
+    }
+    if ( anArray ) {
+        obj = [anArray objectAtIndex:index++];
+        if ( index >= count ) {
+            [anArray release];
+            anArray = nil;
+            index = 0;
+            count = 0;
+        }
+    }
+    return obj;
+}
+
+- (NSArray *) allObjects
+{
+    if ( (count <= 0 || index >= count) && peekObj == nil ) return nil;
+    AMutableArray *theArray = [AMutableArray arrayWithCapacity:count];
+    if (peekObj) {
+        [theArray addObject:peekObj];
+        peekObj = nil;
+    }
+    for (int i = index; i < count; i++) {
+        [theArray addObject:[anArray objectAtIndex:i]];
+    }
+    return [NSArray arrayWithArray:(NSArray *)theArray];
+}
+
+- (void) removeObjectAtIndex:(NSInteger)idx
+{
+    @throw [UnsupportedOperationException newException:@"Cant remove object from ArrayIterator"];
+}
+
+- (NSInteger) count
+{
+    return (index - count);
+}
+
+- (void) setCount:(NSInteger)cnt
+{
+    count = cnt;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/BaseMapElement.h b/runtime/ObjC/Framework/BaseMapElement.h
new file mode 100644
index 0000000..defb659
--- /dev/null
+++ b/runtime/ObjC/Framework/BaseMapElement.h
@@ -0,0 +1,52 @@
+//
+//  BaseMapElement.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/16/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "LinkBase.h"
+#import "ACNumber.h"
+
+@interface BaseMapElement : LinkBase {
+    ACNumber *index;
+}
+
+@property (retain) ACNumber *index;
+
++ (id) newBaseMapElement;
++ (id) newBaseMapElementWithIndex:(ACNumber *)anIdx;
+- (id) init;
+- (id) initWithAnIndex:(ACNumber *)anIdx;
+
+- (id) copyWithZone:(NSZone *)aZone;
+
+- (NSInteger)count;
+- (NSInteger)size;
+
+@end
diff --git a/runtime/ObjC/Framework/BaseMapElement.m b/runtime/ObjC/Framework/BaseMapElement.m
new file mode 100644
index 0000000..2ff2172
--- /dev/null
+++ b/runtime/ObjC/Framework/BaseMapElement.m
@@ -0,0 +1,95 @@
+//
+//  BaseMapElement.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/16/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "BaseMapElement.h"
+
+
+@implementation BaseMapElement
+
+@synthesize index;
+
++ (BaseMapElement *)newBaseMapElement
+{
+    return [[BaseMapElement alloc] init];
+}
+
++ (BaseMapElement *)newBaseMapElementWithIndex:(ACNumber *)aNumber
+{
+    return [[BaseMapElement alloc] initWithAnIndex:(ACNumber *)aNumber];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil ) {
+        index = nil;
+    }
+    return (self);
+}
+
+- (id) initWithAnIndex:(ACNumber *)aNumber
+{
+    if ((self = [super init]) != nil ) {
+        index = aNumber;
+        if ( index ) [index retain];
+    }
+    return (self);
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in BaseMapElement" );
+#endif
+    if ( index ) [index release];
+    [super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    BaseMapElement *copy;
+    
+    copy = [super copyWithZone:aZone];
+    copy.index = index;
+    return( copy );
+}
+
+- (NSInteger)count
+{
+    return 1;
+}
+                          
+                          
+- (NSInteger)size
+{
+    return(  sizeof(index) );
+}
+
+@end
diff --git a/runtime/ObjC/Framework/BaseRecognizer.h b/runtime/ObjC/Framework/BaseRecognizer.h
new file mode 100644
index 0000000..e398063
--- /dev/null
+++ b/runtime/ObjC/Framework/BaseRecognizer.h
@@ -0,0 +1,179 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+
+#import "IntStream.h"
+#import "AMutableArray.h"
+
+// This is an abstract superclass for lexers and parsers.
+
+#define ANTLR_MEMO_RULE_FAILED -2
+#define ANTLR_MEMO_RULE_UNKNOWN -1
+#define ANTLR_INITIAL_FOLLOW_STACK_SIZE 100
+
+#import "MapElement.h"
+#import "ANTLRBitSet.h"
+#import "Token.h"
+#import "RecognizerSharedState.h"
+#import "RecognitionException.h"
+#import "MissingTokenException.h"
+#import "MismatchedTokenException.h"
+#import "MismatchedTreeNodeException.h"
+#import "UnwantedTokenException.h"
+#import "NoViableAltException.h"
+#import "EarlyExitException.h"
+#import "MismatchedSetException.h"
+#import "MismatchedNotSetException.h"
+#import "FailedPredicateException.h"
+
+@interface BaseRecognizer : NSObject {
+    __strong RecognizerSharedState *state;  // the state of this recognizer. Might be shared with other recognizers, e.g. in grammar import scenarios.
+    __strong NSString *grammarFileName;          // where did the grammar come from. filled in by codegeneration
+    __strong NSString *sourceName;
+    __strong AMutableArray *tokenNames;
+}
+
++ (void) initialize;
+
++ (BaseRecognizer *) newBaseRecognizer;
++ (BaseRecognizer *) newBaseRecognizerWithRuleLen:(NSInteger)aLen;
++ (BaseRecognizer *) newBaseRecognizer:(RecognizerSharedState *)aState;
+
++ (AMutableArray *)getTokenNames;
++ (void)setTokenNames:(NSArray *)aTokNamArray;
++ (void)setGrammarFileName:(NSString *)aFileName;
+
+- (id) init;
+- (id) initWithLen:(NSInteger)aLen;
+- (id) initWithState:(RecognizerSharedState *)aState;
+
+- (void) dealloc;
+
+// simple accessors
+- (NSInteger) getBacktrackingLevel;
+- (void) setBacktrackingLevel:(NSInteger) level;
+
+- (BOOL) getFailed;
+- (void) setFailed: (BOOL) flag;
+
+- (RecognizerSharedState *) getState;
+- (void) setState:(RecognizerSharedState *) theState;
+
+// reset this recognizer - might be extended by codegeneration/grammar
+- (void) reset;
+
+/** Match needs to return the current input symbol, which gets put
+ *  into the label for the associated token ref; e.g., x=ID.  Token
+ *  and tree parsers need to return different objects. Rather than test
+ *  for input stream type or change the IntStream interface, I use
+ *  a simple method to ask the recognizer to tell me what the current
+ *  input symbol is.
+ * 
+ *  This is ignored for lexers.
+ */
+- (id) input;
+
+- (void)skip;
+
+// do actual matching of tokens/characters
+- (id) match:(id<IntStream>)anInput TokenType:(NSInteger)ttype Follow:(ANTLRBitSet *)follow;
+- (void) matchAny:(id<IntStream>)anInput;
+- (BOOL) mismatchIsUnwantedToken:(id<IntStream>)anInput TokenType:(NSInteger) ttype;
+- (BOOL) mismatchIsMissingToken:(id<IntStream>)anInput Follow:(ANTLRBitSet *)follow;
+
+// error reporting and recovery
+- (void) reportError:(RecognitionException *)e;
+- (void) displayRecognitionError:(AMutableArray *)theTokNams Exception:(RecognitionException *)e;
+- (NSString *)getErrorMessage:(RecognitionException *)e TokenNames:(AMutableArray *)theTokNams;
+- (NSInteger) getNumberOfSyntaxErrors;
+- (NSString *)getErrorHeader:(RecognitionException *)e;
+- (NSString *)getTokenErrorDisplay:(id<Token>)t;
+- (void) emitErrorMessage:(NSString *)msg;
+- (void) recover:(id<IntStream>)anInput Exception:(RecognitionException *)e;
+
+// begin hooks for debugger
+- (void) beginResync;
+- (void) endResync;
+// end hooks for debugger
+
+// compute the bitsets necessary to do matching and recovery
+- (ANTLRBitSet *)computeErrorRecoverySet;
+- (ANTLRBitSet *)computeContextSensitiveRuleFOLLOW;
+- (ANTLRBitSet *)combineFollows:(BOOL) exact;
+
+- (id<Token>) recoverFromMismatchedToken:(id<IntStream>)anInput 
+                                    TokenType:(NSInteger)ttype 
+                                       Follow:(ANTLRBitSet *)follow;
+                                    
+- (id<Token>)recoverFromMismatchedSet:(id<IntStream>)anInput
+                                    Exception:(RecognitionException *)e
+                                    Follow:(ANTLRBitSet *)follow;
+
+- (id) getCurrentInputSymbol:(id<IntStream>)anInput;
+- (id) getMissingSymbol:(id<IntStream>)anInput
+              Exception:(RecognitionException *)e
+              TokenType:(NSInteger) expectedTokenType
+                Follow:(ANTLRBitSet *)follow;
+
+// helper methods for recovery. try to resync somewhere
+- (void) consumeUntilTType:(id<IntStream>)anInput TokenType:(NSInteger)ttype;
+- (void) consumeUntilFollow:(id<IntStream>)anInput Follow:(ANTLRBitSet *)bitSet;
+- (void) pushFollow:(ANTLRBitSet *)fset;
+- (ANTLRBitSet *)popFollow;
+
+// to be used by the debugger to do reporting. maybe hook in incremental stuff here, too.
+- (AMutableArray *) getRuleInvocationStack;
+- (AMutableArray *) getRuleInvocationStack:(RecognitionException *)exception
+                                 Recognizer:(NSString *)recognizerClassName;
+
+- (AMutableArray *) getTokenNames;
+- (NSString *)getGrammarFileName;
+- (NSString *)getSourceName;
+- (AMutableArray *) toStrings:(NSArray *)tokens;
+// support for memoization
+- (NSInteger) getRuleMemoization:(NSInteger)ruleIndex StartIndex:(NSInteger)ruleStartIndex;
+- (BOOL) alreadyParsedRule:(id<IntStream>)anInput RuleIndex:(NSInteger)ruleIndex;
+- (void) memoize:(id<IntStream>)anInput
+         RuleIndex:(NSInteger)ruleIndex
+        StartIndex:(NSInteger)ruleStartIndex;
+- (NSInteger) getRuleMemoizationCacheSize;
+- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol;
+- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol;
+
+
+// support for syntactic predicates. these are called indirectly to support funky stuff in grammars,
+// like supplying selectors instead of writing code directly into the actions of the grammar.
+- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment;
+// stream:(id<IntStream>)anInput;
+
+@property (retain) RecognizerSharedState *state;
+@property (retain) NSString *grammarFileName;
+@property (retain) NSString *sourceName;
+@property (retain) AMutableArray *tokenNames;
+
+@end
diff --git a/runtime/ObjC/Framework/BaseRecognizer.m b/runtime/ObjC/Framework/BaseRecognizer.m
new file mode 100644
index 0000000..6caf795
--- /dev/null
+++ b/runtime/ObjC/Framework/BaseRecognizer.m
@@ -0,0 +1,1132 @@
+//
+//  BaseRecognizer.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/16/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "ACNumber.h"
+#import "BaseRecognizer.h"
+#import "HashRule.h"
+#import "RuleMemo.h"
+#import "CommonToken.h"
+#import "Map.h"
+#import "NoViableAltException.h"
+
+extern NSInteger debug;
+
+@implementation BaseRecognizer
+
+static AMutableArray *_tokenNames;
+static NSString *_grammarFileName;
+static NSString *NEXT_TOKEN_RULE_NAME;
+
+@synthesize state;
+@synthesize grammarFileName;
+//@synthesize failed;
+@synthesize sourceName;
+//@synthesize numberOfSyntaxErrors;
+@synthesize tokenNames;
+
++ (void) initialize
+{
+    NEXT_TOKEN_RULE_NAME = [NSString stringWithString:@"nextToken"];
+    [NEXT_TOKEN_RULE_NAME retain];
+}
+
++ (BaseRecognizer *) newBaseRecognizer
+{
+    return [[BaseRecognizer alloc] init];
+}
+
++ (BaseRecognizer *) newBaseRecognizerWithRuleLen:(NSInteger)aLen
+{
+    return [[BaseRecognizer alloc] initWithLen:aLen];
+}
+
++ (BaseRecognizer *) newBaseRecognizer:(RecognizerSharedState *)aState
+{
+	return [[BaseRecognizer alloc] initWithState:aState];
+}
+
++ (AMutableArray *)getTokenNames
+{
+    return _tokenNames;
+}
+
++ (void)setTokenNames:(AMutableArray *)theTokNams
+{
+    if ( _tokenNames != theTokNams ) {
+        if ( _tokenNames ) [_tokenNames release];
+        [theTokNams retain];
+    }
+    _tokenNames = theTokNams;
+}
+
++ (void)setGrammarFileName:(NSString *)aFileName
+{
+    if ( _grammarFileName != aFileName ) {
+        if ( _grammarFileName ) [_grammarFileName release];
+        [aFileName retain];
+    }
+    [_grammarFileName retain];
+}
+
+- (id) init
+{
+	if ((self = [super init]) != nil) {
+        if (state == nil) {
+            state = [[RecognizerSharedState newRecognizerSharedState] retain];
+        }
+        tokenNames = _tokenNames;
+        if ( tokenNames ) [tokenNames retain];
+        grammarFileName = _grammarFileName;
+        if ( grammarFileName ) [grammarFileName retain];
+        state._fsp = -1;
+        state.errorRecovery = NO;		// are we recovering?
+        state.lastErrorIndex = -1;
+        state.failed = NO;				// indicate that some match failed
+        state.syntaxErrors = 0;
+        state.backtracking = 0;			// the level of backtracking
+        state.tokenStartCharIndex = -1;
+	}
+	return self;
+}
+
+- (id) initWithLen:(NSInteger)aLen
+{
+	if ((self = [super init]) != nil) {
+        if (state == nil) {
+            state = [[RecognizerSharedState newRecognizerSharedStateWithRuleLen:aLen] retain];
+        }
+        tokenNames = _tokenNames;
+        if ( tokenNames ) [tokenNames retain];
+        grammarFileName = _grammarFileName;
+        if ( grammarFileName ) [grammarFileName retain];
+        state._fsp = -1;
+        state.errorRecovery = NO;		// are we recovering?
+        state.lastErrorIndex = -1;
+        state.failed = NO;				// indicate that some match failed
+        state.syntaxErrors = 0;
+        state.backtracking = 0;			// the level of backtracking
+        state.tokenStartCharIndex = -1;
+	}
+	return self;
+}
+
+- (id) initWithState:(RecognizerSharedState *)aState
+{
+	if ((self = [super init]) != nil) {
+		state = aState;
+        if (state == nil) {
+            state = [RecognizerSharedState newRecognizerSharedState];
+        }
+        [state retain];
+        tokenNames = _tokenNames;
+        if ( tokenNames ) [tokenNames retain];
+        grammarFileName = _grammarFileName;
+        if ( grammarFileName ) [grammarFileName retain];
+        state._fsp = -1;
+        state.errorRecovery = NO;		// are we recovering?
+        state.lastErrorIndex = -1;
+        state.failed = NO;				// indicate that some match failed
+        state.syntaxErrors = 0;
+        state.backtracking = 0;			// the level of backtracking
+        state.tokenStartCharIndex = -1;
+	}
+	return self;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in BaseRecognizer" );
+#endif
+	if ( grammarFileName ) [grammarFileName release];
+	if ( tokenNames ) [tokenNames release];
+	if ( state ) [state release];
+	[super dealloc];
+}
+
+// reset the recognizer to the initial state. does not touch the token source!
+// this can be extended by the grammar writer to reset custom ivars
+- (void) reset
+{
+    if ( state == nil )
+        return; 
+    if ( state.following != nil ) {
+        if ( [state.following count] )
+            [state.following removeAllObjects];
+    }
+    state._fsp = -1;
+    state.errorRecovery = NO;		// are we recovering?
+    state.lastErrorIndex = -1;
+    state.failed = NO;				// indicate that some match failed
+    state.syntaxErrors = 0;
+    state.backtracking = 0;			// the level of backtracking
+    state.tokenStartCharIndex = -1;
+    if ( state.ruleMemo != nil ) {
+        if ( [state.ruleMemo count] )
+            [state.ruleMemo removeAllObjects];
+    }
+}
+
+- (BOOL) getFailed
+{
+	return [state getFailed];
+}
+
+- (void) setFailed:(BOOL)flag
+{
+	[state setFailed:flag];
+}
+
+- (RecognizerSharedState *) getState
+{
+	return state;
+}
+
+- (void) setState:(RecognizerSharedState *) theState
+{
+	if (state != theState) {
+		if ( state ) [state release];
+		state = theState;
+		[state retain];
+	}
+}
+
+- (id)input
+{
+    return nil; // Must be overriden in inheriting class
+}
+
+- (void)skip // override in inheriting class
+{
+    return;
+}
+
+-(id) match:(id<IntStream>)anInput TokenType:(NSInteger)ttype Follow:(ANTLRBitSet *)follow
+{
+	id matchedSymbol = [self getCurrentInputSymbol:anInput];
+	if ([anInput LA:1] == ttype) {
+		[anInput consume];
+		state.errorRecovery = NO;
+		state.failed = NO;
+		return matchedSymbol;
+	}
+	if (state.backtracking > 0) {
+		state.failed = YES;
+		return matchedSymbol;
+	}
+	matchedSymbol = [self recoverFromMismatchedToken:anInput TokenType:ttype Follow:follow];
+	return matchedSymbol;
+}
+
+-(void) matchAny:(id<IntStream>)anInput
+{
+    state.errorRecovery = NO;
+    state.failed = NO;
+    [anInput consume];
+}
+
+-(BOOL) mismatchIsUnwantedToken:(id<IntStream>)anInput TokenType:(NSInteger)ttype
+{
+    return [anInput LA:2] == ttype;
+}
+
+-(BOOL) mismatchIsMissingToken:(id<IntStream>)anInput Follow:(ANTLRBitSet *) follow
+{
+    if ( follow == nil ) {
+        // we have no information about the follow; we can only consume
+        // a single token and hope for the best
+        return NO;
+    }
+    // compute what can follow this grammar element reference
+    if ( [follow member:TokenTypeEOR] ) {
+        ANTLRBitSet *viableTokensFollowingThisRule = [self computeContextSensitiveRuleFOLLOW];
+        follow = [follow or:viableTokensFollowingThisRule];
+        if ( state._fsp >= 0 ) { // remove EOR if we're not the start symbol
+            [follow remove:(TokenTypeEOR)];
+        }
+    }
+    // if current token is consistent with what could come after set
+    // then we know we're missing a token; error recovery is free to
+    // "insert" the missing token
+    
+    //System.out.println("viable tokens="+follow.toString(getTokenNames()));
+    //System.out.println("LT(1)="+((TokenStream)input).LT(1));
+    
+    // BitSet cannot handle negative numbers like -1 (EOF) so I leave EOR
+    // in follow set to indicate that the fall of the start symbol is
+    // in the set (EOF can follow).
+    if ( [follow member:[anInput LA:1]] || [follow member:TokenTypeEOR] ) {
+        //System.out.println("LT(1)=="+((TokenStream)input).LT(1)+" is consistent with what follows; inserting...");
+        return YES;
+    }
+    return NO;
+}
+
+/** Report a recognition problem.
+ *
+ *  This method sets errorRecovery to indicate the parser is recovering
+ *  not parsing.  Once in recovery mode, no errors are generated.
+ *  To get out of recovery mode, the parser must successfully match
+ *  a token (after a resync).  So it will go:
+ *
+ * 		1. error occurs
+ * 		2. enter recovery mode, report error
+ * 		3. consume until token found in resynch set
+ * 		4. try to resume parsing
+ * 		5. next match() will reset errorRecovery mode
+ *
+ *  If you override, make sure to update syntaxErrors if you care about that.
+ */
+-(void) reportError:(RecognitionException *) e
+{
+    // if we've already reported an error and have not matched a token
+    // yet successfully, don't report any errors.
+    if ( state.errorRecovery ) {
+        //System.err.print("[SPURIOUS] ");
+        return;
+    }
+    state.syntaxErrors++; // don't count spurious
+    state.errorRecovery = YES;
+    
+    [self displayRecognitionError:[self getTokenNames] Exception:e];
+}
+
+-(void) displayRecognitionError:(AMutableArray *)theTokNams Exception:(RecognitionException *)e
+{
+    NSString *hdr = [self getErrorHeader:e];
+    NSString *msg = [self getErrorMessage:e TokenNames:theTokNams];
+    [self emitErrorMessage:[NSString stringWithFormat:@" %@ %@", hdr, msg]];
+}
+
+/** What error message should be generated for the various
+ *  exception types?
+ *
+ *  Not very object-oriented code, but I like having all error message
+ *  generation within one method rather than spread among all of the
+ *  exception classes. This also makes it much easier for the exception
+ *  handling because the exception classes do not have to have pointers back
+ *  to this object to access utility routines and so on. Also, changing
+ *  the message for an exception type would be difficult because you
+ *  would have to subclassing exception, but then somehow get ANTLR
+ *  to make those kinds of exception objects instead of the default.
+ *  This looks weird, but trust me--it makes the most sense in terms
+ *  of flexibility.
+ *
+ *  For grammar debugging, you will want to override this to add
+ *  more information such as the stack frame with
+ *  getRuleInvocationStack(e, this.getClass().getName()) and,
+ *  for no viable alts, the decision description and state etc...
+ *
+ *  Override this to change the message generated for one or more
+ *  exception types.
+ */
+- (NSString *)getErrorMessage:(RecognitionException *)e TokenNames:(AMutableArray *)theTokNams
+{
+    // NSString *msg = [e getMessage];
+    NSString *msg;
+    if ( [e isKindOfClass:[UnwantedTokenException class]] ) {
+        UnwantedTokenException *ute = (UnwantedTokenException *)e;
+        NSString *tokenName=@"<unknown>";
+        if ( ute.expecting == TokenTypeEOF ) {
+            tokenName = @"EOF";
+        }
+        else {
+            tokenName = (NSString *)[theTokNams objectAtIndex:ute.expecting];
+        }
+        msg = [NSString stringWithFormat:@"extraneous input %@ expecting %@", [self getTokenErrorDisplay:[ute getUnexpectedToken]],
+               tokenName];
+    }
+    else if ( [e isKindOfClass:[MissingTokenException class] ] ) {
+        MissingTokenException *mte = (MissingTokenException *)e;
+        NSString *tokenName=@"<unknown>";
+        if ( mte.expecting== TokenTypeEOF ) {
+            tokenName = @"EOF";
+        }
+        else {
+            tokenName = [theTokNams objectAtIndex:mte.expecting];
+        }
+        msg = [NSString stringWithFormat:@"missing %@ at %@", tokenName, [self getTokenErrorDisplay:(e.token)] ];
+    }
+    else if ( [e isKindOfClass:[MismatchedTokenException class]] ) {
+        MismatchedTokenException *mte = (MismatchedTokenException *)e;
+        NSString *tokenName=@"<unknown>";
+        if ( mte.expecting== TokenTypeEOF ) {
+            tokenName = @"EOF";
+        }
+        else {
+            tokenName = [theTokNams objectAtIndex:mte.expecting];
+        }
+        msg = [NSString stringWithFormat:@"mismatched input %@ expecting %@",[self getTokenErrorDisplay:(e.token)], tokenName];
+    }
+    else if ( [e isKindOfClass:[MismatchedTreeNodeException class]] ) {
+        MismatchedTreeNodeException *mtne = (MismatchedTreeNodeException *)e;
+        NSString *tokenName=@"<unknown>";
+        if ( mtne.expecting==TokenTypeEOF ) {
+            tokenName = @"EOF";
+        }
+        else {
+            tokenName = [theTokNams objectAtIndex:mtne.expecting];
+        }
+        msg = [NSString stringWithFormat:@"mismatched tree node: %@ expecting %@", mtne.node, tokenName];
+    }
+    else if ( [e isKindOfClass:[NoViableAltException class]] ) {
+        //NoViableAltException *nvae = (NoViableAltException *)e;
+        // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
+        // and "(decision="+nvae.decisionNumber+") and
+        // "state "+nvae.stateNumber
+        //        msg = [NSString stringWithFormat:@"no viable alternative at input %@", [self getTokenErrorDisplay:e.token]];
+        msg = [NSString stringWithFormat:@"no viable alternative decision:%d state:%d at input %@", ((NoViableAltException *)e).stateNumber, ((NoViableAltException *)e).decisionNumber, [self getTokenErrorDisplay:e.token]];
+    }
+    else if ( [e isKindOfClass:[EarlyExitException class]] ) {
+        //EarlyExitException *eee = (EarlyExitException *)e;
+        // for development, can add "(decision="+eee.decisionNumber+")"
+        msg =[NSString stringWithFormat: @"required (...)+ loop did not match anything at input ", [self getTokenErrorDisplay:e.token]];
+    }
+    else if ( [e isKindOfClass:[MismatchedSetException class]] ) {
+        MismatchedSetException *mse = (MismatchedSetException *)e;
+        msg = [NSString stringWithFormat:@"mismatched input %@ expecting set %@",
+               [self getTokenErrorDisplay:(e.token)],
+               mse.expecting];
+    }
+#pragma warning NotSet not yet implemented.
+    else if ( [e isKindOfClass:[MismatchedNotSetException class] ] ) {
+        MismatchedNotSetException *mse = (MismatchedNotSetException *)e;
+        msg = [NSString stringWithFormat:@"mismatched input %@ expecting set %@",
+               [self getTokenErrorDisplay:(e.token)],
+               mse.expecting];
+    }
+    else if ( [e isKindOfClass:[FailedPredicateException class]] ) {
+        FailedPredicateException *fpe = (FailedPredicateException *)e;
+        msg = [NSString stringWithFormat:@"rule %@ failed predicate: { %@ }?", fpe.ruleName, fpe.predicate];
+    }
+    else {
+        msg = [NSString stringWithFormat:@"Exception= %@\n", e.name];
+    }
+    return msg;
+}
+
+/** Get number of recognition errors (lexer, parser, tree parser).  Each
+ *  recognizer tracks its own number.  So parser and lexer each have
+ *  separate count.  Does not count the spurious errors found between
+ *  an error and next valid token match
+ *
+ *  See also reportError()
+ */
+- (NSInteger) getNumberOfSyntaxErrors
+{
+    return state.syntaxErrors;
+}
+
+/** What is the error header, normally line/character position information? */
+- (NSString *)getErrorHeader:(RecognitionException *)e
+{
+    return [NSString stringWithFormat:@"line %d:%d", e.line, e.charPositionInLine];
+}
+
+/** How should a token be displayed in an error message? The default
+ *  is to display just the text, but during development you might
+ *  want to have a lot of information spit out.  Override in that case
+ *  to use t.toString() (which, for CommonToken, dumps everything about
+ *  the token). This is better than forcing you to override a method in
+ *  your token objects because you don't have to go modify your lexer
+ *  so that it creates a new Java type.
+ */
+- (NSString *)getTokenErrorDisplay:(id<Token>)t
+{
+    NSString *s = t.text;
+    if ( s == nil ) {
+        if ( t.type == TokenTypeEOF ) {
+            s = @"<EOF>";
+        }
+        else {
+            s = [NSString stringWithFormat:@"<%@>", t.type];
+        }
+    }
+    s = [s stringByReplacingOccurrencesOfString:@"\n" withString:@"\\\\n"];
+    s = [s stringByReplacingOccurrencesOfString:@"\r" withString:@"\\\\r"];
+    s = [s stringByReplacingOccurrencesOfString:@"\t" withString:@"\\\\t"];
+    return [NSString stringWithFormat:@"\'%@\'", s];
+}
+                                        
+/** Override this method to change where error messages go */
+- (void) emitErrorMessage:(NSString *) msg
+{
+//    System.err.println(msg);
+    NSLog(@"%@", msg);
+}
+
+/** Recover from an error found on the input stream.  This is
+ *  for NoViableAlt and mismatched symbol exceptions.  If you enable
+ *  single token insertion and deletion, this will usually not
+ *  handle mismatched symbol exceptions but there could be a mismatched
+ *  token that the match() routine could not recover from.
+ */
+- (void)recover:(id<IntStream>)anInput Exception:(RecognitionException *)re
+{
+    if ( state.lastErrorIndex == anInput.index ) {
+        // uh oh, another error at same token index; must be a case
+        // where LT(1) is in the recovery token set so nothing is
+        // consumed; consume a single token so at least to prevent
+        // an infinite loop; this is a failsafe.
+        [anInput consume];
+    }
+    state.lastErrorIndex = anInput.index;
+    ANTLRBitSet *followSet = [self computeErrorRecoverySet];
+    [self beginResync];
+    [self consumeUntilFollow:anInput Follow:followSet];
+    [self endResync];
+}
+
+- (void) beginResync
+{
+    
+}
+
+- (void) endResync
+{
+    
+}
+                            
+/*  Compute the error recovery set for the current rule.  During
+ *  rule invocation, the parser pushes the set of tokens that can
+ *  follow that rule reference on the stack; this amounts to
+ *  computing FIRST of what follows the rule reference in the
+ *  enclosing rule. This local follow set only includes tokens
+ *  from within the rule; i.e., the FIRST computation done by
+ *  ANTLR stops at the end of a rule.
+ *
+ *  EXAMPLE
+ *
+ *  When you find a "no viable alt exception", the input is not
+ *  consistent with any of the alternatives for rule r.  The best
+ *  thing to do is to consume tokens until you see something that
+ *  can legally follow a call to r *or* any rule that called r.
+ *  You don't want the exact set of viable next tokens because the
+ *  input might just be missing a token--you might consume the
+ *  rest of the input looking for one of the missing tokens.
+ *
+ *  Consider grammar:
+ *
+ *  a : '[' b ']'
+ *    | '(' b ')'
+ *    ;
+ *  b : c '^' INT ;
+ *  c : ID
+ *    | INT
+ *    ;
+ *
+ *  At each rule invocation, the set of tokens that could follow
+ *  that rule is pushed on a stack.  Here are the various "local"
+ *  follow sets:
+ *
+ *  FOLLOW(b1_in_a) = FIRST(']') = ']'
+ *  FOLLOW(b2_in_a) = FIRST(')') = ')'
+ *  FOLLOW(c_in_b) = FIRST('^') = '^'
+ *
+ *  Upon erroneous input "[]", the call chain is
+ *
+ *  a -> b -> c
+ *
+ *  and, hence, the follow context stack is:
+ *
+ *  depth  local follow set     after call to rule
+ *    0         <EOF>                    a (from main())
+ *    1          ']'                     b
+ *    3          '^'                     c
+ *
+ *  Notice that ')' is not included, because b would have to have
+ *  been called from a different context in rule a for ')' to be
+ *  included.
+ *
+ *  For error recovery, we cannot consider FOLLOW(c)
+ *  (context-sensitive or otherwise).  We need the combined set of
+ *  all context-sensitive FOLLOW sets--the set of all tokens that
+ *  could follow any reference in the call chain.  We need to
+ *  resync to one of those tokens.  Note that FOLLOW(c)='^' and if
+ *  we resync'd to that token, we'd consume until EOF.  We need to
+ *  sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+ *  In this case, for input "[]", LA(1) is in this set so we would
+ *  not consume anything and after printing an error rule c would
+ *  return normally.  It would not find the required '^' though.
+ *  At this point, it gets a mismatched token error and throws an
+ *  exception (since LA(1) is not in the viable following token
+ *  set).  The rule exception handler tries to recover, but finds
+ *  the same recovery set and doesn't consume anything.  Rule b
+ *  exits normally returning to rule a.  Now it finds the ']' (and
+ *  with the successful match exits errorRecovery mode).
+ *
+ *  So, you cna see that the parser walks up call chain looking
+ *  for the token that was a member of the recovery set.
+ *
+ *  Errors are not generated in errorRecovery mode.
+ *
+ *  ANTLR's error recovery mechanism is based upon original ideas:
+ *
+ *  "Algorithms + Data Structures = Programs" by Niklaus Wirth
+ *
+ *  and
+ *
+ *  "A note on error recovery in recursive descent parsers":
+ *  http://portal.acm.org/citation.cfm?id=947902.947905
+ *
+ *  Later, Josef Grosch had some good ideas:
+ *
+ *  "Efficient and Comfortable Error Recovery in Recursive Descent
+ *  Parsers":
+ *  ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+ *
+ *  Like Grosch I implemented local FOLLOW sets that are combined
+ *  at run-time upon error to avoid overhead during parsing.
+ */
+- (ANTLRBitSet *) computeErrorRecoverySet
+{
+    return [self combineFollows:NO];
+}
+
+/** Compute the context-sensitive FOLLOW set for current rule.
+ *  This is set of token types that can follow a specific rule
+ *  reference given a specific call chain.  You get the set of
+ *  viable tokens that can possibly come next (lookahead depth 1)
+ *  given the current call chain.  Contrast this with the
+ *  definition of plain FOLLOW for rule r:
+ *
+ *   FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
+ *
+ *  where x in T* and alpha, beta in V*; T is set of terminals and
+ *  V is the set of terminals and nonterminals.  In other words,
+ *  FOLLOW(r) is the set of all tokens that can possibly follow
+ *  references to r in *any* sentential form (context).  At
+ *  runtime, however, we know precisely which context applies as
+ *  we have the call chain.  We may compute the exact (rather
+ *  than covering superset) set of following tokens.
+ *
+ *  For example, consider grammar:
+ *
+ *  stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
+ *       | "return" expr '.'
+ *       ;
+ *  expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
+ *  atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
+ *       | '(' expr ')'
+ *       ;
+ *
+ *  The FOLLOW sets are all inclusive whereas context-sensitive
+ *  FOLLOW sets are precisely what could follow a rule reference.
+ *  For input input "i=(3);", here is the derivation:
+ *
+ *  stat => ID '=' expr ';'
+ *       => ID '=' atom ('+' atom)* ';'
+ *       => ID '=' '(' expr ')' ('+' atom)* ';'
+ *       => ID '=' '(' atom ')' ('+' atom)* ';'
+ *       => ID '=' '(' INT ')' ('+' atom)* ';'
+ *       => ID '=' '(' INT ')' ';'
+ *
+ *  At the "3" token, you'd have a call chain of
+ *
+ *    stat -> expr -> atom -> expr -> atom
+ *
+ *  What can follow that specific nested ref to atom?  Exactly ')'
+ *  as you can see by looking at the derivation of this specific
+ *  input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
+ *
+ *  You want the exact viable token set when recovering from a
+ *  token mismatch.  Upon token mismatch, if LA(1) is member of
+ *  the viable next token set, then you know there is most likely
+ *  a missing token in the input stream.  "Insert" one by just not
+ *  throwing an exception.
+ */
+- (ANTLRBitSet *)computeContextSensitiveRuleFOLLOW
+{
+    return [self combineFollows:YES];
+}
+
+// what is exact? it seems to only add sets from above on stack
+// if EOR is in set i.  When it sees a set w/o EOR, it stops adding.
+// Why would we ever want them all?  Maybe no viable alt instead of
+// mismatched token?
+- (ANTLRBitSet *)combineFollows:(BOOL) exact
+{
+    NSInteger top = state._fsp;
+    ANTLRBitSet *followSet = [[ANTLRBitSet newBitSet] retain];
+    for (int i = top; i >= 0; i--) {
+        ANTLRBitSet *localFollowSet = (ANTLRBitSet *)[state.following objectAtIndex:i];
+        /*
+         System.out.println("local follow depth "+i+"="+
+         localFollowSet.toString(getTokenNames())+")");
+         */
+        [followSet orInPlace:localFollowSet];
+        if ( exact ) {
+            // can we see end of rule?
+            if ( [localFollowSet member:TokenTypeEOR] ) {
+                // Only leave EOR in set if at top (start rule); this lets
+                // us know if have to include follow(start rule); i.e., EOF
+                if ( i > 0 ) {
+                    [followSet remove:TokenTypeEOR];
+                }
+            }
+            else { // can't see end of rule, quit
+                break;
+            }
+        }
+    }
+    return followSet;
+}
+
+/** Attempt to recover from a single missing or extra token.
+ *
+ *  EXTRA TOKEN
+ *
+ *  LA(1) is not what we are looking for.  If LA(2) has the right token,
+ *  however, then assume LA(1) is some extra spurious token.  Delete it
+ *  and LA(2) as if we were doing a normal match(), which advances the
+ *  input.
+ *
+ *  MISSING TOKEN
+ *
+ *  If current token is consistent with what could come after
+ *  ttype then it is ok to "insert" the missing token, else throw
+ *  exception For example, Input "i=(3;" is clearly missing the
+ *  ')'.  When the parser returns from the nested call to expr, it
+ *  will have call chain:
+ *
+ *    stat -> expr -> atom
+ *
+ *  and it will be trying to match the ')' at this point in the
+ *  derivation:
+ *
+ *       => ID '=' '(' INT ')' ('+' atom)* ';'
+ *                          ^
+ *  match() will see that ';' doesn't match ')' and report a
+ *  mismatched token error.  To recover, it sees that LA(1)==';'
+ *  is in the set of tokens that can follow the ')' token
+ *  reference in rule atom.  It can assume that you forgot the ')'.
+ */
+- (id<Token>)recoverFromMismatchedToken:(id<IntStream>)anInput
+                       TokenType:(NSInteger)ttype
+                          Follow:(ANTLRBitSet *)follow
+{
+    RecognitionException *e = nil;
+    // if next token is what we are looking for then "delete" this token
+    if ( [self mismatchIsUnwantedToken:anInput TokenType:ttype] ) {
+        e = [UnwantedTokenException newException:ttype Stream:anInput];
+        /*
+         System.err.println("recoverFromMismatchedToken deleting "+
+         ((TokenStream)input).LT(1)+
+         " since "+((TokenStream)input).LT(2)+" is what we want");
+         */
+        [self beginResync];
+        [anInput consume]; // simply delete extra token
+        [self endResync];
+        [self reportError:e];  // report after consuming so AW sees the token in the exception
+                         // we want to return the token we're actually matching
+        id matchedSymbol = [self getCurrentInputSymbol:anInput];
+        [anInput consume]; // move past ttype token as if all were ok
+        return matchedSymbol;
+    }
+    // can't recover with single token deletion, try insertion
+    if ( [self mismatchIsMissingToken:anInput Follow:follow] ) {
+        id<Token> inserted = [self getMissingSymbol:anInput Exception:e TokenType:ttype Follow:follow];
+        e = [MissingTokenException newException:ttype Stream:anInput With:inserted];
+        [self reportError:e];  // report after inserting so AW sees the token in the exception
+        return inserted;
+    }
+    // even that didn't work; must throw the exception
+    e = [MismatchedTokenException newException:ttype Stream:anInput];
+    @throw e;
+}
+
+/** Not currently used */
+-(id) recoverFromMismatchedSet:(id<IntStream>)anInput
+                     Exception:(RecognitionException *)e
+                        Follow:(ANTLRBitSet *) follow
+{
+    if ( [self mismatchIsMissingToken:anInput Follow:follow] ) {
+        // System.out.println("missing token");
+        [self reportError:e];
+        // we don't know how to conjure up a token for sets yet
+        return [self getMissingSymbol:anInput Exception:e TokenType:TokenTypeInvalid Follow:follow];
+    }
+    // TODO do single token deletion like above for Token mismatch
+    @throw e;
+}
+
+/** Match needs to return the current input symbol, which gets put
+ *  into the label for the associated token ref; e.g., x=ID.  Token
+ *  and tree parsers need to return different objects. Rather than test
+ *  for input stream type or change the IntStream interface, I use
+ *  a simple method to ask the recognizer to tell me what the current
+ *  input symbol is.
+ * 
+ *  This is ignored for lexers.
+ */
+- (id) getCurrentInputSymbol:(id<IntStream>)anInput
+{
+    return nil;
+}
+
+/** Conjure up a missing token during error recovery.
+ *
+ *  The recognizer attempts to recover from single missing
+ *  symbols. But, actions might refer to that missing symbol.
+ *  For example, x=ID {f($x);}. The action clearly assumes
+ *  that there has been an identifier matched previously and that
+ *  $x points at that token. If that token is missing, but
+ *  the next token in the stream is what we want we assume that
+ *  this token is missing and we keep going. Because we
+ *  have to return some token to replace the missing token,
+ *  we have to conjure one up. This method gives the user control
+ *  over the tokens returned for missing tokens. Mostly,
+ *  you will want to create something special for identifier
+ *  tokens. For literals such as '{' and ',', the default
+ *  action in the parser or tree parser works. It simply creates
+ *  a CommonToken of the appropriate type. The text will be the token.
+ *  If you change what tokens must be created by the lexer,
+ *  override this method to create the appropriate tokens.
+ */
+- (id)getMissingSymbol:(id<IntStream>)anInput
+             Exception:(RecognitionException *)e
+             TokenType:(NSInteger)expectedTokenType
+                Follow:(ANTLRBitSet *)follow
+{
+    return nil;
+}
+
+
+-(void) consumeUntilTType:(id<IntStream>)anInput TokenType:(NSInteger)tokenType
+{
+    //System.out.println("consumeUntil "+tokenType);
+    int ttype = [anInput LA:1];
+    while (ttype != TokenTypeEOF && ttype != tokenType) {
+        [anInput consume];
+        ttype = [anInput LA:1];
+    }
+}
+
+/** Consume tokens until one matches the given token set */
+-(void) consumeUntilFollow:(id<IntStream>)anInput Follow:(ANTLRBitSet *)set
+{
+    //System.out.println("consumeUntil("+set.toString(getTokenNames())+")");
+    int ttype = [anInput LA:1];
+    while (ttype != TokenTypeEOF && ![set member:ttype] ) {
+        //System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
+        [anInput consume];
+        ttype = [anInput LA:1];
+    }
+}
+
+/** Push a rule's follow set using our own hardcoded stack */
+- (void)pushFollow:(ANTLRBitSet *)fset
+{
+    if ( (state._fsp +1) >= [state.following count] ) {
+        //        AMutableArray *f = [AMutableArray arrayWithCapacity:[[state.following] count]*2];
+        //        System.arraycopy(state.following, 0, f, 0, state.following.length);
+        //        state.following = f;
+        [state.following addObject:fset];
+        [fset retain];
+        state._fsp++;
+    }
+    else {
+        [state.following replaceObjectAtIndex:++state._fsp withObject:fset];
+    }
+}
+
+- (ANTLRBitSet *)popFollow
+{
+    ANTLRBitSet *fset;
+
+    if ( state._fsp >= 0 && [state.following count] > 0 ) {
+        fset = [state.following objectAtIndex:state._fsp--];
+        [state.following removeLastObject];
+        return fset;
+    }
+    else {
+        NSLog( @"Attempted to pop a follow when none exists on the stack\n" );
+    }
+    return nil;
+}
+
+/** Return List<String> of the rules in your parser instance
+ *  leading up to a call to this method.  You could override if
+ *  you want more details such as the file/line info of where
+ *  in the parser java code a rule is invoked.
+ *
+ *  This is very useful for error messages and for context-sensitive
+ *  error recovery.
+ */
+- (AMutableArray *)getRuleInvocationStack
+{
+    NSString *parserClassName = [[self className] retain];
+    return [self getRuleInvocationStack:[RecognitionException newException] Recognizer:parserClassName];
+}
+
+/** A more general version of getRuleInvocationStack where you can
+ *  pass in, for example, a RecognitionException to get it's rule
+ *  stack trace.  This routine is shared with all recognizers, hence,
+ *  static.
+ *
+ *  TODO: move to a utility class or something; weird having lexer call this
+ */
+- (AMutableArray *)getRuleInvocationStack:(RecognitionException *)e
+                                Recognizer:(NSString *)recognizerClassName
+{
+    // char *name;
+    AMutableArray *rules = [[AMutableArray arrayWithCapacity:20] retain];
+    NSArray *stack = [e callStackSymbols];
+    int i = 0;
+    for (i = [stack count]-1; i >= 0; i--) {
+        NSString *t = [stack objectAtIndex:i];
+        // NSLog(@"stack %d = %@\n", i, t);
+        if ( [t commonPrefixWithString:@"org.antlr.runtime." options:NSLiteralSearch] ) {
+            // id aClass = objc_getClass( [t UTF8String] );
+            continue; // skip support code such as this method
+        }
+        if ( [t isEqualTo:NEXT_TOKEN_RULE_NAME] ) {
+            // name = sel_getName(method_getName(method));
+            // NSString *aMethod = [NSString stringWithFormat:@"%s", name];
+            continue;
+        }
+        if ( ![t isEqualTo:recognizerClassName] ) {
+            // name = class_getName( [t UTF8String] );
+            continue; // must not be part of this parser
+        }
+        [rules addObject:t];
+    }
+#ifdef DONTUSEYET
+    StackTraceElement[] stack = e.getStackTrace();
+    int i = 0;
+    for (i=stack.length-1; i>=0; i--) {
+        StackTraceElement t = stack[i];
+        if ( [t getClassName().startsWith("org.antlr.runtime.") ) {
+            continue; // skip support code such as this method
+        }
+              if ( [[t getMethodName] equals:NEXT_TOKEN_RULE_NAME] ) {
+            continue;
+        }
+              if ( ![[t getClassName] equals:recognizerClassName] ) {
+            continue; // must not be part of this parser
+        }
+              [rules addObject:[t getMethodName]];
+    }
+#endif
+    [stack release];
+    return rules;
+}
+
+- (NSInteger) getBacktrackingLevel
+{
+    return [state getBacktracking];
+}
+      
+- (void) setBacktrackingLevel:(NSInteger)level
+{
+    [state setBacktracking:level];
+}
+      
+        /** Used to print out token names like ID during debugging and
+ *  error reporting.  The generated parsers implement a method
+ *  that overrides this to point to their String[] tokenNames.
+ */
+- (NSArray *)getTokenNames
+{
+    return tokenNames;
+}
+
+/** For debugging and other purposes, might want the grammar name.
+ *  Have ANTLR generate an implementation for this method.
+ */
+- (NSString *)getGrammarFileName
+{
+    return grammarFileName;
+}
+
+- (NSString *)getSourceName
+{
+    return nil;
+}
+
+/** A convenience method for use most often with template rewrites.
+ *  Convert a List<Token> to List<String>
+ */
+- (AMutableArray *)toStrings:(AMutableArray *)tokens
+{
+    if ( tokens == nil )
+        return nil;
+    AMutableArray *strings = [AMutableArray arrayWithCapacity:[tokens count]];
+    id object;
+    NSInteger i = 0;
+    for (object in tokens) {
+        [strings addObject:[object text]];
+        i++;
+    }
+    return strings;
+}
+
+/** Given a rule number and a start token index number, return
+ *  ANTLR_MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
+ *  start index.  If this rule has parsed input starting from the
+ *  start index before, then return where the rule stopped parsing.
+ *  It returns the index of the last token matched by the rule.
+ *
+ *  For now we use a hashtable and just the slow Object-based one.
+ *  Later, we can make a special one for ints and also one that
+ *  tosses out data after we commit past input position i.
+ */
+- (NSInteger)getRuleMemoization:(NSInteger)ruleIndex StartIndex:(NSInteger)ruleStartIndex
+{
+    ACNumber *stopIndexI;
+    HashRule *aHashRule;
+    if ( (aHashRule = [state.ruleMemo objectAtIndex:ruleIndex]) == nil ) {
+        aHashRule = [HashRule newHashRuleWithLen:17];
+        [state.ruleMemo insertObject:aHashRule atIndex:ruleIndex];
+    }
+    stopIndexI = [aHashRule getRuleMemoStopIndex:ruleStartIndex];
+    if ( stopIndexI == nil ) {
+        return ANTLR_MEMO_RULE_UNKNOWN;
+    }
+    return [stopIndexI integerValue];
+}
+
+/** Has this rule already parsed input at the current index in the
+ *  input stream?  Return the stop token index or MEMO_RULE_UNKNOWN.
+ *  If we attempted but failed to parse properly before, return
+ *  MEMO_RULE_FAILED.
+ *
+ *  This method has a side-effect: if we have seen this input for
+ *  this rule and successfully parsed before, then seek ahead to
+ *  1 past the stop token matched for this rule last time.
+ */
+- (BOOL)alreadyParsedRule:(id<IntStream>)anInput RuleIndex:(NSInteger)ruleIndex
+{
+    NSInteger aStopIndex = [self getRuleMemoization:ruleIndex StartIndex:anInput.index];
+    if ( aStopIndex == ANTLR_MEMO_RULE_UNKNOWN ) {
+        // NSLog(@"rule %d not yet encountered\n", ruleIndex);
+        return NO;
+    }
+    if ( aStopIndex == ANTLR_MEMO_RULE_FAILED ) {
+        if (debug) NSLog(@"rule %d will never succeed\n", ruleIndex);
+        state.failed = YES;
+    }
+    else {
+        if (debug) NSLog(@"seen rule %d before; skipping ahead to %d failed = %@\n", ruleIndex, aStopIndex+1, state.failed?@"YES":@"NO");
+        [anInput seek:(aStopIndex+1)]; // jump to one past stop token
+    }
+    return YES;
+}
+      
+/** Record whether or not this rule parsed the input at this position
+ *  successfully.  Use a standard java hashtable for now.
+ */
+- (void)memoize:(id<IntStream>)anInput
+      RuleIndex:(NSInteger)ruleIndex
+     StartIndex:(NSInteger)ruleStartIndex
+{
+    RuleStack *aRuleStack;
+    NSInteger stopTokenIndex;
+
+    aRuleStack = state.ruleMemo;
+    stopTokenIndex = (state.failed ? ANTLR_MEMO_RULE_FAILED : (anInput.index-1));
+    if ( aRuleStack == nil ) {
+        if (debug) NSLog(@"!!!!!!!!! memo array is nil for %@", [self getGrammarFileName]);
+        return;
+    }
+    if ( ruleIndex >= [aRuleStack length] ) {
+        if (debug) NSLog(@"!!!!!!!!! memo size is %d, but rule index is %d", [state.ruleMemo length], ruleIndex);
+        return;
+    }
+    if ( [aRuleStack objectAtIndex:ruleIndex] != nil ) {
+        [aRuleStack putHashRuleAtRuleIndex:ruleIndex StartIndex:ruleStartIndex StopIndex:stopTokenIndex];
+    }
+    return;
+}
+   
+/** return how many rule/input-index pairs there are in total.
+ *  TODO: this includes synpreds. :(
+ */
+- (NSInteger)getRuleMemoizationCacheSize
+{
+    RuleStack *aRuleStack;
+    HashRule *aHashRule;
+
+    int aCnt = 0;
+    aRuleStack = state.ruleMemo;
+    for (NSUInteger i = 0; aRuleStack != nil && i < [aRuleStack length]; i++) {
+        aHashRule = [aRuleStack objectAtIndex:i];
+        if ( aHashRule != nil ) {
+            aCnt += [aHashRule count]; // how many input indexes are recorded?
+        }
+    }
+    return aCnt;
+}
+
+#pragma warning Have to fix traceIn and traceOut.
+- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol
+{
+    NSLog(@"enter %@ %@", ruleName, inputSymbol);
+    if ( state.backtracking > 0 ) {
+        NSLog(@" backtracking=%s", ((state.backtracking==YES)?"YES":"NO"));
+    }
+    NSLog(@"\n");
+}
+
+- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex Object:(id)inputSymbol
+{
+    NSLog(@"exit %@ -- %@", ruleName, inputSymbol);
+    if ( state.backtracking > 0 ) {
+        NSLog(@" backtracking=%s %s", state.backtracking?"YES":"NO", state.failed ? "failed":"succeeded");
+    }
+    NSLog(@"\n");
+}
+
+
+// call a syntactic predicate methods using its selector. this way we can support arbitrary synpreds.
+- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment // stream:(id<IntStream>)input
+{
+    id<IntStream> input;
+
+    state.backtracking++;
+    // input = state.token.input;
+    input = self.input;
+    int start = [input mark];
+    @try {
+        [self performSelector:synpredFragment];
+    }
+    @catch (RecognitionException *re) {
+        NSLog(@"impossible synpred: %@", re.name);
+    }
+    BOOL success = (state.failed == NO);
+    [input rewind:start];
+    state.backtracking--;
+    state.failed = NO;
+    return success;
+}
+              
+@end
+                               
diff --git a/runtime/ObjC/Framework/BaseStack.h b/runtime/ObjC/Framework/BaseStack.h
new file mode 100644
index 0000000..7c92536
--- /dev/null
+++ b/runtime/ObjC/Framework/BaseStack.h
@@ -0,0 +1,66 @@
+//
+//  BaseRecognizer.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/16/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "PtrBuffer.h"
+
+//#define GLOBAL_SCOPE       0
+//#define LOCAL_SCOPE        1
+#define HASHSIZE         101
+#define HBUFSIZE      0x2000
+
+@interface BaseStack : PtrBuffer {
+	//RuleStack *fNext;
+    // TStringPool *fPool;
+    NSInteger LastHash;
+}
+
+//@property (copy) RuleStack *fNext;
+@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
+
+// Contruction/Destruction
++ (BaseStack *)newBaseStack;
++ (BaseStack *)newBaseStackWithLen:(NSInteger)cnt;
+- (id)init;
+- (id)initWithLen:(NSInteger)cnt;
+- (void)dealloc;
+
+// Instance Methods
+- (id) copyWithZone:(NSZone *)aZone;
+
+- (NSUInteger)count;
+- (NSUInteger)size;
+/* clear -- reinitialize the maplist array */
+
+- (NSInteger)getLastHash;
+- (void)setLastHash:(NSInteger)aVal;
+
+@end
diff --git a/runtime/ObjC/Framework/BaseStack.m b/runtime/ObjC/Framework/BaseStack.m
new file mode 100644
index 0000000..c622060
--- /dev/null
+++ b/runtime/ObjC/Framework/BaseStack.m
@@ -0,0 +1,131 @@
+//
+//  BaseRecognizer.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/16/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#define SUCCESS (0)
+#define FAILURE (-1)
+
+#import "BaseStack.h"
+#import "Tree.h"
+
+/*
+ * Start of BaseStack
+ */
+@implementation BaseStack
+
+@synthesize LastHash;
+
++(BaseStack *)newBaseStack
+{
+    return [[BaseStack alloc] init];
+}
+
++(BaseStack *)newBaseStackWithLen:(NSInteger)cnt
+{
+    return [[BaseStack alloc] initWithLen:cnt];
+}
+
+-(id)init
+{
+	self = [super initWithLen:HASHSIZE];
+	if ( self != nil ) {
+	}
+    return( self );
+}
+
+-(id)initWithLen:(NSInteger)cnt
+{
+	self = [super initWithLen:cnt];
+    if ( self != nil ) {
+	}
+    return( self );
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in BaseStack" );
+#endif
+	[super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    BaseStack *copy;
+    
+    copy = [super copyWithZone:aZone];
+    return copy;
+}
+
+- (NSUInteger)count
+{
+    NSUInteger aCnt = 0;
+    
+    for (int i = 0; i < BuffSize; i++) {
+        if (ptrBuffer[i] != nil) {
+            aCnt++;
+        }
+    }
+    return aCnt;
+}
+
+- (NSUInteger) size
+{
+    return BuffSize;
+}
+
+-(void)deleteBaseStack:(BaseStack *)np
+{
+    id tmp, rtmp;
+    NSInteger idx;
+    
+    if ( self.fNext != nil ) {
+        for( idx = 0; idx < BuffSize; idx++ ) {
+            tmp = (LinkBase *)ptrBuffer[idx];
+            while ( tmp ) {
+                rtmp = tmp;
+                tmp = [tmp getfNext];
+                [rtmp release];
+            }
+        }
+    }
+}
+
+- (NSInteger)getLastHash
+{
+    return LastHash;
+}
+
+- (void)setLastHash:(NSInteger)aVal
+{
+    LastHash = aVal;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/BaseTree.h b/runtime/ObjC/Framework/BaseTree.h
new file mode 100755
index 0000000..07df439
--- /dev/null
+++ b/runtime/ObjC/Framework/BaseTree.h
@@ -0,0 +1,210 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "Tree.h"
+#import "CommonToken.h"
+#import "AMutableArray.h"
+
+@protocol BaseTree <Tree>
+
++ (id<BaseTree>) INVALID_NODE;
+
++ (id<BaseTree>) newTree;
++ (id<BaseTree>) newTree:(id<BaseTree>)node;
+
+- (id<BaseTree>) init;
+- (id<BaseTree>) initWith:(id<BaseTree>)node;
+
+- (id<BaseTree>) getChild:(NSUInteger)i;
+- (AMutableArray *)children;
+- (void) setChildren:(AMutableArray *)anArray;
+- (id<BaseTree>)getFirstChildWithType:(NSInteger)type;
+- (NSUInteger) getChildCount;
+
+// Add t as a child to this node.  If t is null, do nothing.  If t
+//  is nil, add all children of t to this' children.
+
+- (void) addChild:(id<BaseTree>) tree;
+- (void) addChildren:(NSArray *) theChildren;
+//- (void) removeAllChildren;
+
+- (void) setChild:(NSInteger) i With:(id<BaseTree>)t;
+- (id) deleteChild:(NSInteger) i;
+- (AMutableArray *) createChildrenList;
+- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
+// Indicates the node is a nil node but may still have children, meaning
+// the tree is a flat list.
+
+- (BOOL) isNil;
+- (NSInteger) getTokenStartIndex;
+- (void) setTokenStartIndex:(NSInteger) index;
+- (NSInteger) getTokenStopIndex;
+- (void) setTokenStopIndex:(NSInteger) index;
+
+- (void) freshenParentAndChildIndexes;
+- (void) freshenParentAndChildIndexes:(NSInteger) offset;
+- (void) sanityCheckParentAndChildIndexes;
+- (void) sanityCheckParentAndChildIndexes:(id<BaseTree>) parent At:(NSInteger) i;
+
+- (NSInteger) getChildIndex;
+- (void) setChildIndex:(NSInteger)i;
+
+- (id<BaseTree>)getAncestor:(NSInteger)ttype;
+- (AMutableArray *)getAncestors;
+
+#pragma mark Copying
+- (id) copyWithZone:(NSZone *)aZone;	// the children themselves are not copied here!
+- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
+- (id) deepCopyWithZone:(NSZone *)aZone;
+
+#pragma mark Tree Parser support
+- (NSInteger)type;
+- (NSString *)text;
+// In case we don't have a token payload, what is the line for errors?
+- (NSUInteger)line;
+- (NSUInteger)charPositionInLine;
+
+
+#pragma mark Informational
+- (NSString *) treeDescription;
+- (NSString *) description;
+
+- (NSString *) toString;
+- (NSString *) toStringTree;
+
+@property (retain) AMutableArray *children;
+@property (retain) NSException *anException;
+
+@end
+
+@interface BaseTree : NSObject <BaseTree>
+{
+	__strong AMutableArray *children;
+    __strong NSException *anException;
+}
+
++ (id<BaseTree>) INVALID_NODE;
++ (id<BaseTree>) newTree;
++ (id<BaseTree>) newTree:(id<BaseTree>)node;
+         
+- (id<BaseTree>) init;
+- (id<BaseTree>) initWith:(id<BaseTree>)node;
+
+- (id<BaseTree>) getChild:(NSUInteger)i;
+- (AMutableArray *)children;
+- (void) setChildren:(AMutableArray *)anArray;
+- (id<BaseTree>)getFirstChildWithType:(NSInteger)type;
+- (NSUInteger) getChildCount;
+
+//- (void) removeAllChildren;
+
+// Add t as a child to this node.  If t is null, do nothing.  If t
+//  is nil, add all children of t to this' children.
+
+- (void) addChild:(id<BaseTree>) tree;
+- (void) addChildren:(NSArray *) theChildren;
+
+- (void) setChild:(NSUInteger) i With:(id<BaseTree>)t;
+- (id) deleteChild:(NSUInteger) idx;
+- (AMutableArray *) createChildrenList;
+- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
+// Indicates the node is a nil node but may still have children, meaning
+	// the tree is a flat list.
+
+- (BOOL) isNil;
+- (NSInteger) getTokenStartIndex;
+- (void) setTokenStartIndex:(NSInteger) index;
+- (NSInteger) getTokenStopIndex;
+- (void) setTokenStopIndex:(NSInteger) index;
+
+- (void) freshenParentAndChildIndexes;
+- (void) freshenParentAndChildIndexes:(NSInteger) offset;
+- (void) sanityCheckParentAndChildIndexes;
+- (void) sanityCheckParentAndChildIndexes:(id<BaseTree>)parent At:(NSInteger) i;
+
+- (NSInteger) getChildIndex;
+- (void) setChildIndex:(NSInteger)i;
+
+- (BOOL) hasAncestor:(NSInteger) ttype;
+- (id<BaseTree>)getAncestor:(NSInteger)ttype;
+- (AMutableArray *)getAncestors;
+
+- (id) copyWithZone:(NSZone *)aZone;
+- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
+- (id) deepCopyWithZone:(NSZone *)aZone;
+
+	// Return a token type; needed for tree parsing
+- (NSInteger)type;
+- (NSString *)text;
+
+	// In case we don't have a token payload, what is the line for errors?
+- (NSUInteger)line;
+- (NSUInteger)charPositionInLine;
+- (void) setCharPositionInLine:(NSUInteger)pos;
+
+- (NSString *) treeDescription;
+- (NSString *) description;
+- (NSString *) toString;
+- (NSString *) toStringTree;
+
+@property (retain) AMutableArray *children;
+@property (retain) NSException *anException;
+
+@end
+
+@interface TreeNavigationNode : BaseTree {
+}
+- (id) init;
+- (id) copyWithZone:(NSZone *)aZone;
+@end
+
+@interface TreeNavigationNodeDown : TreeNavigationNode {
+}
++ (TreeNavigationNodeDown *) getNavigationNodeDown;
+- (id) init;
+- (NSInteger) tokenType;
+- (NSString *) description;
+@end
+
+@interface TreeNavigationNodeUp : TreeNavigationNode {
+}
++ (TreeNavigationNodeUp *) getNavigationNodeUp;
+- (id) init;
+- (NSInteger) tokenType;
+- (NSString *) description;
+@end
+
+@interface TreeNavigationNodeEOF : TreeNavigationNode {
+}
++ (TreeNavigationNodeEOF *) getNavigationNodeEOF;
+- (id) init;
+- (NSInteger) tokenType;
+- (NSString *) description;
+@end
+
+extern TreeNavigationNodeDown *navigationNodeDown;
+extern TreeNavigationNodeUp *navigationNodeUp;
+extern TreeNavigationNodeEOF *navigationNodeEOF;
diff --git a/runtime/ObjC/Framework/BaseTree.m b/runtime/ObjC/Framework/BaseTree.m
new file mode 100755
index 0000000..2e8448f
--- /dev/null
+++ b/runtime/ObjC/Framework/BaseTree.m
@@ -0,0 +1,621 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "BaseTree.h"
+#import "BaseTreeAdaptor.h"
+#import "Token.h"
+// TODO: this shouldn't be here...but needed for invalidNode
+#import "AMutableArray.h"
+#import "CommonTree.h"
+#import "RuntimeException.h"
+#import "ANTLRError.h"
+
+#pragma mark - Navigation Nodes
+TreeNavigationNodeDown *navigationNodeDown = nil;
+TreeNavigationNodeUp *navigationNodeUp = nil;
+TreeNavigationNodeEOF *navigationNodeEOF = nil;
+
+
+@implementation BaseTree
+
+static id<BaseTree> invalidNode = nil;
+
+#pragma mark Tree protocol conformance
+
++ (id<BaseTree>) INVALID_NODE
+{
+	if ( invalidNode == nil ) {
+		invalidNode = [[CommonTree alloc] initWithTokenType:TokenTypeInvalid];
+	}
+	return invalidNode;
+}
+
++ (id<BaseTree>) invalidNode
+{
+	if ( invalidNode == nil ) {
+		invalidNode = [[CommonTree alloc] initWithTokenType:TokenTypeInvalid];
+	}
+	return invalidNode;
+}
+
++ newTree
+{
+    return [[BaseTree alloc] init];
+}
+
+/** Create a new node from an existing node does nothing for BaseTree
+ *  as there are no fields other than the children list, which cannot
+ *  be copied as the children are not considered part of this node. 
+ */
++ newTree:(id<BaseTree>) node
+{
+    return [[BaseTree alloc] initWith:(id<BaseTree>) node];
+}
+
+- (id) init
+{
+    self = [super init];
+    if ( self != nil ) {
+        children = nil;
+        return self;
+    }
+    return nil;
+}
+
+- (id) initWith:(id<BaseTree>)node
+{
+    self = [super init];
+    if ( self != nil ) {
+        // children = [[AMutableArray arrayWithCapacity:5] retain];
+        // [children addObject:node];
+        [self addChild:node];
+        return self;
+    }
+    return nil;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in BaseTree %x", (NSInteger)self );
+#endif
+	if ( children ) {
+#ifdef DEBUG_DEALLOC
+        NSLog( @"called dealloc children in BaseTree" );
+#endif
+        [children release];
+    }
+	[super dealloc];
+}
+
+- (id<BaseTree>) getChild:(NSUInteger)i
+{
+    if ( children == nil || i >= [children count] ) {
+        return nil;
+    }
+    return (id<BaseTree>)[children objectAtIndex:i];
+}
+
+/** Get the children internal List; note that if you directly mess with
+ *  the list, do so at your own risk.
+ */
+- (AMutableArray *) children
+{
+    return children;
+}
+
+- (void) setChildren:(AMutableArray *)anArray
+{
+    if ( children != anArray ) {
+        if ( children ) [children release];
+        if ( anArray ) [anArray retain];
+    }
+    children = anArray;
+}
+
+- (id<BaseTree>) getFirstChildWithType:(NSInteger) aType
+{
+    for (NSUInteger i = 0; children != nil && i < [children count]; i++) {
+        id<BaseTree> t = (id<BaseTree>) [children objectAtIndex:i];
+        if ( t.type == aType ) {
+            return t;
+        }
+    }	
+    return nil;
+}
+
+- (NSUInteger) getChildCount
+{
+    if ( children == nil ) {
+        return 0;
+    }
+    return [children count];
+}
+
+/** Add t as child of this node.
+ *
+ *  Warning: if t has no children, but child does
+ *  and child isNil then this routine moves children to t via
+ *  t.children = child.children; i.e., without copying the array.
+ */
+- (void) addChild:(id<BaseTree>) t
+{
+    //System.out.println("add child "+t.toStringTree()+" "+self.toStringTree());
+    //System.out.println("existing children: "+children);
+    if ( t == nil ) {
+        return; // do nothing upon addChild(nil)
+    }
+    if ( self == (BaseTree *)t )
+        @throw [IllegalArgumentException newException:@"BaseTree Can't add self to self as child"];        
+    id<BaseTree> childTree = (id<BaseTree>) t;
+    if ( [childTree isNil] ) { // t is an empty node possibly with children
+        if ( children != nil && children == childTree.children ) {
+            @throw [RuntimeException newException:@"BaseTree add child list to itself"];
+        }
+        // just add all of childTree's children to this
+        if ( childTree.children != nil ) {
+            if ( children != nil ) { // must copy, this has children already
+                int n = [childTree.children count];
+                for ( int i = 0; i < n; i++) {
+                    id<BaseTree> c = (id<BaseTree>)[childTree.children objectAtIndex:i];
+                    [children addObject:c];
+                    // handle double-link stuff for each child of nil root
+                    [c setParent:(id<BaseTree>)self];
+                    [c setChildIndex:[children count]-1];
+                }
+            }
+            else {
+                // no children for this but t has children; just set pointer
+                // call general freshener routine
+                children = childTree.children;
+                [self freshenParentAndChildIndexes];
+            }
+        }
+    }
+    else { // child is not nil (don't care about children)
+        if ( children == nil ) {
+            children = [[AMutableArray arrayWithCapacity:5] retain]; // create children list on demand
+        }
+        [children addObject:t];
+        [childTree setParent:(id<BaseTree>)self];
+        [childTree setChildIndex:[children count]-1];
+    }
+    // System.out.println("now children are: "+children);
+}
+
+/** Add all elements of kids list as children of this node */
+- (void) addChildren:(AMutableArray *) kids
+{
+    for (NSUInteger i = 0; i < [kids count]; i++) {
+        id<BaseTree> t = (id<BaseTree>) [kids objectAtIndex:i];
+        [self addChild:t];
+    }
+}
+
+- (void) setChild:(NSUInteger) i With:(id<BaseTree>)t
+{
+    if ( t == nil ) {
+        return;
+    }
+    if ( [t isNil] ) {
+        @throw [IllegalArgumentException newException:@"BaseTree Can't set single child to a list"];        
+    }
+    if ( children == nil ) {
+        children = [[AMutableArray arrayWithCapacity:5] retain];
+    }
+    if ([children count] > i ) {
+        [children replaceObjectAtIndex:i withObject:t];
+    }
+    else {
+        [children insertObject:t atIndex:i];
+    }
+    [t setParent:(id<BaseTree>)self];
+    [t setChildIndex:i];
+}
+
+- (id) deleteChild:(NSUInteger) idx
+{
+    if ( children == nil ) {
+        return nil;
+    }
+    id<BaseTree> killed = (id<BaseTree>)[children objectAtIndex:idx];
+    [children removeObjectAtIndex:idx];
+    // walk rest and decrement their child indexes
+    [self freshenParentAndChildIndexes:idx];
+    return killed;
+}
+
+/** Delete children from start to stop and replace with t even if t is
+ *  a list (nil-root Tree).  num of children can increase or decrease.
+ *  For huge child lists, inserting children can force walking rest of
+ *  children to set their childindex; could be slow.
+ */
+- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t
+{
+    /*
+     System.out.println("replaceChildren "+startChildIndex+", "+stopChildIndex+
+     " with "+((BaseTree)t).toStringTree());
+     System.out.println("in="+toStringTree());
+     */
+    if ( children == nil ) {
+        @throw [IllegalArgumentException newException:@"BaseTree Invalid Indexes; no children in list"];        
+    }
+    int replacingHowMany = stopChildIndex - startChildIndex + 1;
+    int replacingWithHowMany;
+    id<BaseTree> newTree = (id<BaseTree>) t;
+    AMutableArray *newChildren = nil;
+    // normalize to a list of children to add: newChildren
+    if ( [newTree isNil] ) {
+        newChildren = newTree.children;
+    }
+    else {
+        newChildren = [AMutableArray arrayWithCapacity:5];
+        [newChildren addObject:newTree];
+    }
+    replacingWithHowMany = [newChildren count];
+    int numNewChildren = [newChildren count];
+    int delta = replacingHowMany - replacingWithHowMany;
+    // if same number of nodes, do direct replace
+    if ( delta == 0 ) {
+        int j = 0; // index into new children
+        for (int i=startChildIndex; i <= stopChildIndex; i++) {
+            id<BaseTree> child = (id<BaseTree>)[newChildren objectAtIndex:j];
+            [children replaceObjectAtIndex:i withObject:(id)child];
+            [child setParent:(id<BaseTree>)self];
+            [child setChildIndex:i];
+            j++;
+        }
+    }
+    else if ( delta > 0 ) { // fewer new nodes than there were
+                            // set children and then delete extra
+        for (int j = 0; j < numNewChildren; j++) {
+            [children replaceObjectAtIndex:startChildIndex+j withObject:[newChildren objectAtIndex:j]];
+        }
+        int indexToDelete = startChildIndex+numNewChildren;
+        for (int c=indexToDelete; c<=stopChildIndex; c++) {
+            // delete same index, shifting everybody down each time
+            [children removeObjectAtIndex:indexToDelete];
+        }
+        [self freshenParentAndChildIndexes:startChildIndex];
+    }
+    else { // more new nodes than were there before
+           // fill in as many children as we can (replacingHowMany) w/o moving data
+        for (int j=0; j<replacingHowMany; j++) {
+            [children replaceObjectAtIndex:startChildIndex+j withObject:[newChildren objectAtIndex:j]];
+        }
+        //        int numToInsert = replacingWithHowMany-replacingHowMany;
+        for (int j=replacingHowMany; j<replacingWithHowMany; j++) {
+            [children insertObject:[newChildren objectAtIndex:j] atIndex:startChildIndex+j];
+        }
+        [self freshenParentAndChildIndexes:startChildIndex];
+    }
+    //System.out.println("out="+toStringTree());
+}
+
+/** Override in a subclass to change the impl of children list */
+- (AMutableArray *) createChildrenList
+{
+    return [AMutableArray arrayWithCapacity:5];
+}
+
+- (BOOL) isNil
+{
+    return NO;
+}
+
+/** Set the parent and child index values for all child of t */
+- (void) freshenParentAndChildIndexes
+{
+    [self freshenParentAndChildIndexes:0];
+}
+               
+- (void) freshenParentAndChildIndexes:(NSInteger) offset
+{
+    int n = [self getChildCount];
+    for (int i = offset; i < n; i++) {
+        id<BaseTree> child = (id<BaseTree>)[self getChild:i];
+        [child setChildIndex:i];
+        [child setParent:(id<BaseTree>)self];
+    }
+}
+               
+- (void) sanityCheckParentAndChildIndexes
+{
+    [self sanityCheckParentAndChildIndexes:nil At:-1];
+}
+               
+- (void) sanityCheckParentAndChildIndexes:(id<BaseTree>)aParent At:(NSInteger) i
+{
+    if ( aParent != [self getParent] ) {
+        @throw [IllegalStateException newException:[NSString stringWithFormat:@"parents don't match; expected %s found %s", aParent, [self getParent]]];
+    }
+    if ( i != [self getChildIndex] ) {
+        @throw [IllegalStateException newException:[NSString stringWithFormat:@"child indexes don't match; expected %d found %d", i, [self getChildIndex]]];
+    }
+    int n = [self getChildCount];
+    for (int c = 0; c < n; c++) {
+        id<BaseTree> child = (id<BaseTree>)[self getChild:c];
+        [child sanityCheckParentAndChildIndexes:(id<BaseTree>)self At:c];
+    }
+}
+               
+/**  What is the smallest token index (indexing from 0) for this node
+ *   and its children?
+ */
+- (NSInteger) getTokenStartIndex
+{
+    return 0;
+}
+
+- (void) setTokenStartIndex:(NSInteger) anIndex
+{
+}
+
+/**  What is the largest token index (indexing from 0) for this node
+ *   and its children?
+ */
+- (NSInteger) getTokenStopIndex
+{
+    return 0;
+}
+
+- (void) setTokenStopIndex:(NSInteger) anIndex
+{
+}
+
+- (id<BaseTree>) dupNode
+{
+    return nil;
+}
+
+
+/** BaseTree doesn't track child indexes. */
+- (NSInteger) getChildIndex
+{
+    return 0;
+}
+
+- (void) setChildIndex:(NSInteger) anIndex
+{
+}
+
+/** BaseTree doesn't track parent pointers. */
+- (id<BaseTree>) getParent
+{
+    return nil;
+}
+
+- (void) setParent:(id<BaseTree>) t
+{
+}
+
+/** Walk upwards looking for ancestor with this token type. */
+- (BOOL) hasAncestor:(NSInteger) ttype
+{
+    return([self getAncestor:ttype] != nil);
+}
+
+/** Walk upwards and get first ancestor with this token type. */
+- (id<BaseTree>) getAncestor:(NSInteger) ttype
+{
+    id<BaseTree> t = (id<BaseTree>)self;
+    t = (id<BaseTree>)[t getParent];
+    while ( t != nil ) {
+        if ( t.type == ttype )
+            return t;
+        t = (id<BaseTree>)[t getParent];
+    }
+    return nil;
+}
+
+/** Return a list of all ancestors of this node.  The first node of
+ *  list is the root and the last is the parent of this node.
+ */
+- (AMutableArray *)getAncestors
+{
+    if ( [self getParent] == nil )
+        return nil;
+    AMutableArray *ancestors = [AMutableArray arrayWithCapacity:5];
+    id<BaseTree> t = (id<BaseTree>)self;
+    t = (id<BaseTree>)[t getParent];
+    while ( t != nil ) {
+        [ancestors insertObject:t atIndex:0]; // insert at start
+        t = (id<BaseTree>)[t getParent];
+    }
+    return ancestors;
+}
+
+- (NSInteger)type
+{
+    return TokenTypeInvalid;
+}
+
+- (NSString *)text
+{
+    return nil;
+}
+
+- (NSUInteger)line
+{
+    return 0;
+}
+
+- (NSUInteger)charPositionInLine
+{
+    return 0;
+}
+
+- (void) setCharPositionInLine:(NSUInteger) pos
+{
+}
+
+#pragma mark Copying
+     
+     // the children themselves are not copied here!
+- (id) copyWithZone:(NSZone *)aZone
+{
+    id<BaseTree> theCopy = [[[self class] allocWithZone:aZone] init];
+    [theCopy addChildren:self.children];
+    return theCopy;
+}
+     
+- (id) deepCopy 					// performs a deepCopyWithZone: with the default zone
+{
+    return [self deepCopyWithZone:NULL];
+}
+     
+- (id) deepCopyWithZone:(NSZone *)aZone
+{
+    id<BaseTree> theCopy = [self copyWithZone:aZone];
+        
+    if ( [theCopy.children count] )
+        [theCopy.children removeAllObjects];
+    AMutableArray *childrenCopy = theCopy.children;
+    for (id loopItem in children) {
+        id<BaseTree> childCopy = [loopItem deepCopyWithZone:aZone];
+        [theCopy addChild:childCopy];
+    }
+    if ( childrenCopy ) [childrenCopy release];
+    return theCopy;
+}
+     
+- (NSString *) treeDescription
+{
+    if ( children == nil || [children count] == 0 ) {
+        return [self description];
+    }
+    NSMutableString *buf = [NSMutableString stringWithCapacity:[children count]];
+    if ( ![self isNil] ) {
+        [buf appendString:@"("];
+        [buf appendString:[self toString]];
+        [buf appendString:@" "];
+    }
+    for (int i = 0; children != nil && i < [children count]; i++) {
+        id<BaseTree> t = (id<BaseTree>)[children objectAtIndex:i];
+        if ( i > 0 ) {
+            [buf appendString:@" "];
+        }
+        [buf appendString:[(id<BaseTree>)t toStringTree]];
+    }
+    if ( ![self isNil] ) {
+        [buf appendString:@")"];
+    }
+    return buf;
+}
+
+/** Print out a whole tree not just a node */
+- (NSString *) toStringTree
+{
+    return [self treeDescription];
+}
+
+- (NSString *) description
+{
+    return nil;
+}
+
+/** Override to say how a node (not a tree) should look as text */
+- (NSString *) toString
+{
+    return nil;
+}
+
+@synthesize children;
+@synthesize anException;
+
+@end
+
+#pragma mark -
+
+@implementation TreeNavigationNode
+- (id)init
+{
+    self = (TreeNavigationNode *)[super init];
+    return self;
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+	return nil;
+}
+@end
+
+@implementation TreeNavigationNodeDown
++ (TreeNavigationNodeDown *) getNavigationNodeDown
+{
+    if ( navigationNodeDown == nil )
+        navigationNodeDown = [[TreeNavigationNodeDown alloc] init];
+    return navigationNodeDown;
+}
+
+- (id)init
+{
+    self = [super init];
+    return self;
+}
+
+- (NSInteger) tokenType { return TokenTypeDOWN; }
+- (NSString *) description { return @"DOWN"; }
+@end
+
+@implementation TreeNavigationNodeUp
++ (TreeNavigationNodeUp *) getNavigationNodeUp
+{
+    if ( navigationNodeUp == nil )
+        navigationNodeUp = [[TreeNavigationNodeUp alloc] init];
+    return navigationNodeUp;
+}
+
+
+- (id)init
+{
+    self = [super init];
+    return self;
+}
+
+- (NSInteger) tokenType { return TokenTypeUP; }
+- (NSString *) description { return @"UP"; }
+@end
+
+@implementation TreeNavigationNodeEOF
++ (TreeNavigationNodeEOF *) getNavigationNodeEOF
+{
+    if ( navigationNodeEOF == nil )
+        navigationNodeEOF = [[TreeNavigationNodeEOF alloc] init];
+    return navigationNodeEOF;
+}
+
+- (id)init
+{
+    self = [super init];
+    return self;
+}
+
+- (NSInteger) tokenType { return TokenTypeEOF; }
+- (NSString *) description { return @"EOF"; }
+
+@end
+
diff --git a/runtime/ObjC/Framework/BaseTreeAdaptor.h b/runtime/ObjC/Framework/BaseTreeAdaptor.h
new file mode 100644
index 0000000..c0d8442
--- /dev/null
+++ b/runtime/ObjC/Framework/BaseTreeAdaptor.h
@@ -0,0 +1,182 @@
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "TreeAdaptor.h"
+#import "CommonErrorNode.h"
+#import "UniqueIDMap.h"
+
+@interface BaseTreeAdaptor : NSObject <TreeAdaptor, NSCopying> {
+    UniqueIDMap *treeToUniqueIDMap;
+	NSInteger uniqueNodeID;
+}
+
+- (id) init;
+
+- (id) copyWithZone:(NSZone *)aZone;
+
+- (id) emptyNode;
+
+- (id) createNil;
+
+/** create tree node that holds the start and stop tokens associated
+ *  with an error.
+ *
+ *  If you specify your own kind of tree nodes, you will likely have to
+ *  override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
+ *  if no token payload but you might have to set token type for diff
+ *  node type.
+ *
+ *  You don't have to subclass CommonErrorNode; you will likely need to
+ *  subclass your own tree node class to avoid class cast exception.
+ */
+- (id) errorNode:(id<TokenStream>)anInput
+            From:(id<Token>)startToken
+              To:(id<Token>)stopToken
+       Exception:(NSException *) e;
+
+- (BOOL) isNil:(id<BaseTree>) aTree;
+
+- (id<BaseTree>)dupTree:(id<BaseTree>)aTree;
+
+/** This is generic in the sense that it will work with any kind of
+ *  tree (not just Tree interface).  It invokes the adaptor routines
+ *  not the tree node routines to do the construction.  
+ */
+- (id<BaseTree>)dupTree:(id<BaseTree>)aTree Parent:(id<BaseTree>)parent;
+- (id<BaseTree>)dupNode:(id<BaseTree>)aNode;
+/** Add a child to the tree t.  If child is a flat tree (a list), make all
+ *  in list children of t.  Warning: if t has no children, but child does
+ *  and child isNil then you can decide it is ok to move children to t via
+ *  t.children = child.children; i.e., without copying the array.  Just
+ *  make sure that this is consistent with have the user will build
+ *  ASTs.
+ */
+- (void) addChild:(id<BaseTree>)aChild toTree:(id<BaseTree>)aTree;
+
+/** If oldRoot is a nil root, just copy or move the children to newRoot.
+ *  If not a nil root, make oldRoot a child of newRoot.
+ *
+ *    old=^(nil a b c), new=r yields ^(r a b c)
+ *    old=^(a b c), new=r yields ^(r ^(a b c))
+ *
+ *  If newRoot is a nil-rooted single child tree, use the single
+ *  child as the new root node.
+ *
+ *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
+ *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
+ *
+ *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
+ *
+ *    old=null, new=r yields r
+ *    old=null, new=^(nil r) yields ^(nil r)
+ *
+ *  Return newRoot.  Throw an exception if newRoot is not a
+ *  simple node or nil root with a single child node--it must be a root
+ *  node.  If newRoot is ^(nil x) return x as newRoot.
+ *
+ *  Be advised that it's ok for newRoot to point at oldRoot's
+ *  children; i.e., you don't have to copy the list.  We are
+ *  constructing these nodes so we should have this control for
+ *  efficiency.
+ */
+- (id<BaseTree>)becomeRoot:(id<BaseTree>)aNewRoot old:(id<BaseTree>)oldRoot;
+
+/** Transform ^(nil x) to x and nil to null */
+- (id<BaseTree>)rulePostProcessing:(id<BaseTree>)aRoot;
+
+- (id<BaseTree>)becomeRootfromToken:(id<Token>)aNewRoot old:(id<BaseTree>)oldRoot;
+
+- (id<BaseTree>) create:(id<Token>)payload;
+- (id<BaseTree>) createTree:(NSInteger)aTType FromToken:(id<Token>)aFromToken;
+- (id<BaseTree>) createTree:(NSInteger)aTType FromToken:(id<Token>)aFromToken Text:(NSString *)theText;
+- (id<BaseTree>) createTree:(NSInteger)aTType Text:(NSString *)theText;
+
+- (NSInteger) getType:(id<BaseTree>)aTree;
+
+- (void) setType:(id<BaseTree>)aTree Type:(NSInteger)type;
+
+- (id<Token>)getToken:(CommonTree *)t;
+
+- (NSString *)getText:(CommonTree *)aTree;
+
+- (void) setText:(id<BaseTree>)aTree Text:(NSString *)theText;
+
+- (id<BaseTree>) getChild:(id<BaseTree>)aTree At:(NSInteger)i;
+
+- (void) setChild:(id<BaseTree>)aTree At:(NSInteger)index Child:(id<BaseTree>)aChild;
+
+- (id<BaseTree>) deleteChild:(id<BaseTree>)aTree Index:(NSInteger)index;
+
+- (NSInteger) getChildCount:(id<BaseTree>)aTree;
+
+- (id<BaseTree>) getParent:(id<BaseTree>) t;
+
+- (void) setParent:(id<BaseTree>)t With:(id<BaseTree>) parent;
+
+/** What index is this node in the child list? Range: 0..n-1
+ *  If your node type doesn't handle this, it's ok but the tree rewrites
+ *  in tree parsers need this functionality.
+ */
+- (NSInteger) getChildIndex:(id)t;
+- (void) setChildIndex:(id)t With:(NSInteger)index;
+
+- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id)t;
+
+- (NSInteger) getUniqueID:(id<BaseTree>)node;
+
+#ifdef DONTUSENOMO
+- (NSInteger) getUniqueID;
+
+- (void) setUniqueNodeID:(NSInteger)aUniqueNodeID;
+
+- (UniqueIDMap *)getTreeToUniqueIDMap;
+
+- (void) setTreeToUniqueIDMap:(UniqueIDMap *)aMapNode;
+#endif
+
+/** Tell me how to create a token for use with imaginary token nodes.
+ *  For example, there is probably no input symbol associated with imaginary
+ *  token DECL, but you need to create it as a payload or whatever for
+ *  the DECL node as in ^(DECL type ID).
+ *
+ *  This is a variant of createToken where the new token is derived from
+ *  an actual real input token.  Typically this is for converting '{'
+ *  tokens to BLOCK etc...  You'll see
+ *
+ *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
+ *
+ *  If you care what the token payload objects' type is, you should
+ *  override this method and any other createToken variant.
+ */
+- (id<Token>)createToken:(NSInteger)aTType Text:(NSString *)theText;
+
+- (id<Token>)createToken:(id<Token>)aFromToken;
+
+@property (retain) UniqueIDMap *treeToUniqueIDMap;
+@property (assign) NSInteger uniqueNodeID;
+
+@end
diff --git a/runtime/ObjC/Framework/BaseTreeAdaptor.m b/runtime/ObjC/Framework/BaseTreeAdaptor.m
new file mode 100644
index 0000000..1a2a3c3
--- /dev/null
+++ b/runtime/ObjC/Framework/BaseTreeAdaptor.m
@@ -0,0 +1,429 @@
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "BaseTreeAdaptor.h"
+#import "RuntimeException.h"
+#import "UniqueIDMap.h"
+#import "MapElement.h"
+#import "CommonTree.h"
+
+@implementation BaseTreeAdaptor
+
+@synthesize treeToUniqueIDMap;
+@synthesize uniqueNodeID;
+
++ (id<Tree>) newEmptyTree
+{
+    return [[CommonTree alloc] init];
+}
+
+- (id) init
+{
+    self = [super init];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    BaseTreeAdaptor *copy;
+    
+    copy = [[[self class] alloc] init];
+    if (treeToUniqueIDMap)
+        copy.treeToUniqueIDMap = [treeToUniqueIDMap copyWithZone:aZone];
+    copy.uniqueNodeID = uniqueNodeID;
+    return copy;
+}
+    
+
+- (id) createNil
+{
+    return [CommonTree newTreeWithToken:nil];
+}
+
+- (id) emptyNode
+{
+    return [CommonTree newTreeWithToken:nil];
+}
+
+/** create tree node that holds the start and stop tokens associated
+ *  with an error.
+ *
+ *  If you specify your own kind of tree nodes, you will likely have to
+ *  override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
+ *  if no token payload but you might have to set token type for diff
+ *  node type.
+ *
+ *  You don't have to subclass CommonErrorNode; you will likely need to
+ *  subclass your own tree node class to avoid class cast exception.
+ */
+- (id) errorNode:(id<TokenStream>)anInput
+            From:(id<Token>)startToken
+              To:(id<Token>)stopToken
+       Exception:(RecognitionException *) e;
+{
+    //System.out.println("returning error node '"+t+"' @index="+anInput.index());
+    return [CommonErrorNode newCommonErrorNode:anInput
+                                                    From:startToken
+                                                      To:stopToken
+                                               Exception:e];
+}
+
+- (BOOL) isNil:(id) tree
+{
+    return [(id)tree isNil];
+}
+
+- (id)dupTree:(id)tree
+{
+    return [self dupTree:(id)tree Parent:nil];
+}
+
+/** This is generic in the sense that it will work with any kind of
+ *  tree (not just Tree interface).  It invokes the adaptor routines
+ *  not the tree node routines to do the construction.  
+ */
+- (id)dupTree:(id)t Parent:(id)parent
+{
+    if ( t==nil ) {
+        return nil;
+    }
+    id newTree = [self dupNode:t];
+    // ensure new subtree root has parent/child index set
+    [self setChildIndex:newTree With:[self getChildIndex:t]]; // same index in new tree
+    [self setParent:newTree With:parent];
+    NSInteger n = [self getChildCount:t];
+    for (NSInteger i = 0; i < n; i++) {
+        id child = [self getChild:t At:i];
+        id newSubTree = [self dupTree:child Parent:t];
+        [self addChild:newSubTree toTree:newTree];
+    }
+    return newTree;
+}
+
+- (id)dupNode:(id)aNode
+{
+    return aNode; // override for better results :>)
+}
+/** Add a child to the tree t.  If child is a flat tree (a list), make all
+ *  in list children of t.  Warning: if t has no children, but child does
+ *  and child isNil then you can decide it is ok to move children to t via
+ *  t.children = child.children; i.e., without copying the array.  Just
+ *  make sure that this is consistent with have the user will build
+ *  ASTs.
+ */
+- (void) addChild:(id)child toTree:(id)t
+{
+    if ( t != nil && child != nil ) {
+        [(id)t addChild:child];
+    }
+}
+
+/** If oldRoot is a nil root, just copy or move the children to newRoot.
+ *  If not a nil root, make oldRoot a child of newRoot.
+ *
+ *    old=^(nil a b c), new=r yields ^(r a b c)
+ *    old=^(a b c), new=r yields ^(r ^(a b c))
+ *
+ *  If newRoot is a nil-rooted single child tree, use the single
+ *  child as the new root node.
+ *
+ *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
+ *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
+ *
+ *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
+ *
+ *    old=null, new=r yields r
+ *    old=null, new=^(nil r) yields ^(nil r)
+ *
+ *  Return newRoot.  Throw an exception if newRoot is not a
+ *  simple node or nil root with a single child node--it must be a root
+ *  node.  If newRoot is ^(nil x) return x as newRoot.
+ *
+ *  Be advised that it's ok for newRoot to point at oldRoot's
+ *  children; i.e., you don't have to copy the list.  We are
+ *  constructing these nodes so we should have this control for
+ *  efficiency.
+ */
+- (id)becomeRoot:(id)newRoot old:(id)oldRoot
+{
+    if ( oldRoot == nil ) {
+        return newRoot;
+    }
+    //System.out.println("becomeroot new "+newRoot.toString()+" old "+oldRoot);
+    id newRootTree = (id)newRoot;
+    id oldRootTree = (id)oldRoot;
+    // handle ^(nil real-node)
+    if ( [newRootTree isNil] ) {
+        NSInteger nc = [newRootTree getChildCount];
+        if ( nc == 1 ) newRootTree = [(id)newRootTree getChild:0];
+        else if ( nc > 1 ) {
+            // TODO: make tree run time exceptions hierarchy
+            @throw [RuntimeException newException:NSStringFromClass([self class]) reason:@"more than one node as root (TODO: make exception hierarchy)"];
+        }
+    }
+    // add oldRoot to newRoot; addChild takes care of case where oldRoot
+    // is a flat list (i.e., nil-rooted tree).  All children of oldRoot
+    // are added to newRoot.
+    [newRootTree addChild:oldRootTree];
+    return newRootTree;
+}
+
+/** Transform ^(nil x) to x and nil to null */
+- (id)rulePostProcessing:(id)root
+{
+    //System.out.println("rulePostProcessing: "+((Tree)root).toStringTree());
+    id r = (id)root;
+    if ( r != nil && [r isNil] ) {
+        if ( [r getChildCount] == 0 ) {
+            r = nil;
+        }
+        else if ( [r getChildCount] == 1 ) {
+            r = (id)[r getChild:0];
+            // whoever invokes rule will set parent and child index
+            [r setParent:nil];
+            [r setChildIndex:-1];
+        }
+    }
+    return r;
+}
+
+- (id)becomeRootfromToken:(id<Token>)newRoot old:(id)oldRoot
+{
+    return [self becomeRoot:(id)[self create:newRoot] old:oldRoot];
+}
+
+- (id) create:(id<Token>)aToken
+{
+    return [CommonTree newTreeWithToken:aToken];
+}
+
+- (id)createTree:(NSInteger)tokenType FromToken:(id<Token>)fromToken
+{
+    fromToken = [self createToken:fromToken];
+    //((ClassicToken)fromToken).setType(tokenType);
+    [fromToken setType:tokenType];
+    id t = [self create:fromToken];
+    return t;
+}
+
+- (id)createTree:(NSInteger)tokenType FromToken:(id<Token>)fromToken Text:(NSString *)text
+{
+    if (fromToken == nil)
+        return [self createTree:tokenType Text:text];
+    fromToken = [self createToken:fromToken];
+    [fromToken setType:tokenType];
+    [fromToken setText:text];
+    id t = [self create:fromToken];
+    return t;
+}
+
+- (id)createTree:(NSInteger)tokenType Text:(NSString *)text
+{
+    id<Token> fromToken = [self createToken:tokenType Text:text];
+    id t = (id)[self create:fromToken];
+    return t;
+}
+
+- (NSInteger) getType:(CommonTree *) t
+{
+    return [t type];
+}
+
+- (void) setType:(id)t Type:(NSInteger)type
+{
+    @throw [NoSuchElementException newException:@"don't know enough about Tree node"];
+}
+
+/** What is the Token associated with this node?  If
+ *  you are not using CommonTree, then you must
+ *  override this in your own adaptor.
+ */
+- (id<Token>) getToken:(CommonTree *) t
+{
+    if ( [t isKindOfClass:[CommonTree class]] ) {
+        return [t getToken];
+    }
+    return nil; // no idea what to do
+}
+
+- (NSString *)getText:(CommonTree *)t
+{
+    return [t text];
+}
+
+- (void) setText:(id)t Text:(NSString *)text
+{
+    @throw [NoSuchElementException newException:@"don't know enough about Tree node"];
+}
+
+- (id) getChild:(id)t At:(NSInteger)index
+{
+    return [(id)t getChild:index ];
+}
+
+- (void) setChild:(id)t At:(NSInteger)index Child:(id)child
+{
+    [(id)t setChild:index With:(id)child];
+}
+
+- (id) deleteChild:(id)t Index:(NSInteger)index
+{
+    return [(id)t deleteChild:index];
+}
+
+- (NSInteger) getChildCount:(id)t
+{
+    return [(id)t getChildCount];
+}
+
+- (id<BaseTree>) getParent:(id<BaseTree>) t
+{
+    if ( t == nil )
+        return nil;
+    return (id<BaseTree>)[t getParent];
+}
+
+- (void) setParent:(id<BaseTree>)t With:(id<BaseTree>) parent
+{
+    if ( t != nil )
+        [(id<BaseTree>) t setParent:(id<BaseTree>)parent];
+}
+
+/** What index is this node in the child list? Range: 0..n-1
+ *  If your node type doesn't handle this, it's ok but the tree rewrites
+ *  in tree parsers need this functionality.
+ */
+- (NSInteger) getChildIndex:(id)t
+{
+    return ((CommonTree *)t).childIndex;
+}
+
+- (void) setChildIndex:(id)t With:(NSInteger)index
+{
+    ((CommonTree *)t).childIndex = index;
+}
+
+- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id)t
+{
+    return;
+}
+
+- (NSInteger) getUniqueID:(id)node
+{
+    if ( treeToUniqueIDMap == nil ) {
+        treeToUniqueIDMap = [UniqueIDMap newUniqueIDMap];
+    }
+    ACNumber *prevID = [treeToUniqueIDMap getNode:node];
+    if ( prevID != nil ) {
+        return [prevID integerValue];
+    }
+    NSInteger anID = uniqueNodeID;
+    // MapElement *aMapNode = [MapElement newMapElementWithObj1:[ACNumber numberWithInteger:anID] Obj2:node];
+    [treeToUniqueIDMap putID:[ACNumber numberWithInteger:anID] Node:node];
+    uniqueNodeID++;
+    return anID;
+    // GCC makes these nonunique:
+    // return System.identityHashCode(node);
+}
+
+/** Tell me how to create a token for use with imaginary token nodes.
+ *  For example, there is probably no input symbol associated with imaginary
+ *  token DECL, but you need to create it as a payload or whatever for
+ *  the DECL node as in ^(DECL type ID).
+ *
+ *  If you care what the token payload objects' type is, you should
+ *  override this method and any other createToken variant.
+ */
+- (id<Token>) createToken:(NSInteger)aTType Text:(NSString *)text
+{
+    return nil;
+}
+
+/** Tell me how to create a token for use with imaginary token nodes.
+ *  For example, there is probably no input symbol associated with imaginary
+ *  token DECL, but you need to create it as a payload or whatever for
+ *  the DECL node as in ^(DECL type ID).
+ *
+ *  This is a variant of createToken where the new token is derived from
+ *  an actual real input token.  Typically this is for converting '{'
+ *  tokens to BLOCK etc...  You'll see
+ *
+ *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
+ *
+ *  If you care what the token payload objects' type is, you should
+ *  override this method and any other createToken variant.
+ */
+- (id<Token>) createToken:(id<Token>) fromToken
+{
+    return nil;
+}
+
+/** Track start/stop token for subtree root created for a rule.
+ *  Only works with Tree nodes.  For rules that match nothing,
+ *  seems like this will yield start=i and stop=i-1 in a nil node.
+ *  Might be useful info so I'll not force to be i..i.
+ */
+- (void) setTokenBoundaries:(id)aTree From:(id<Token>)startToken To:(id<Token>)stopToken
+{
+    return;
+}
+
+- (NSInteger) getTokenStartIndex:(id)aTree
+{
+    return -1;
+}
+
+- (NSInteger) getTokenStopIndex:(id)aTree
+{
+    return -1;
+}
+
+#ifdef DONTUSENOMO
+- (NSInteger)getUniqueID
+{
+    return uniqueNodeID;
+}
+
+- (void) setUniqueNodeID:(NSInteger)aUniqueNodeID
+{
+    uniqueNodeID = aUniqueNodeID;
+}
+
+- (UniqueIDMap *)getTreeToUniqueIDMap
+{
+    return treeToUniqueIDMap;
+}
+
+- (void) setTreeToUniqueIDMap:(UniqueIDMap *)aMapListNode
+{
+    treeToUniqueIDMap = aMapListNode;
+}
+
+#endif
+
+@end
diff --git a/runtime/ObjC/Framework/BufferedTokenStream.h b/runtime/ObjC/Framework/BufferedTokenStream.h
new file mode 100644
index 0000000..ca8c4fb
--- /dev/null
+++ b/runtime/ObjC/Framework/BufferedTokenStream.h
@@ -0,0 +1,102 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "TokenStream.h"
+#import "TokenSource.h"
+#import "ANTLRBitSet.h"
+#import "CommonToken.h"
+#import "AMutableArray.h"
+
+@interface BufferedTokenStream : NSObject <TokenStream> 
+{
+__strong id<TokenSource> tokenSource;
+    
+    /** Record every single token pulled from the source so we can reproduce
+     *  chunks of it later.  The buffer in LookaheadStream overlaps sometimes
+     *  as its moving window moves through the input.  This list captures
+     *  everything so we can access complete input text.
+     */
+__strong AMutableArray *tokens;
+    
+    /** Track the last mark() call result value for use in rewind(). */
+NSInteger lastMarker;
+    
+    /** The index into the tokens list of the current token (next token
+     *  to consume).  tokens[index] should be LT(1).  index=-1 indicates need
+     *  to initialize with first token.  The ctor doesn't get a token.
+     *  First call to LT(1) or whatever gets the first token and sets index=0;
+     */
+NSInteger index;
+    
+NSInteger range; // how deep have we gone?
+    
+}
+@property (retain, getter=getTokenSource,setter=setTokenSource:) id<TokenSource> tokenSource;
+@property (retain, getter=getTokens,setter=setTokens:) AMutableArray *tokens;
+@property (assign, getter=getLastMarker,setter=setLastMarker:) NSInteger lastMarker;
+@property (assign) NSInteger index;
+@property (assign, getter=getRange,setter=setRange:) NSInteger range;
+
++ (BufferedTokenStream *) newBufferedTokenStream;
++ (BufferedTokenStream *) newBufferedTokenStreamWith:(id<TokenSource>)aSource;
+- (id) initWithTokenSource:(id<TokenSource>)aSource;
+- (void)dealloc;
+- (id) copyWithZone:(NSZone *)aZone;
+- (NSUInteger)charPositionInLine;
+- (NSUInteger)line;
+- (NSInteger) getRange;
+- (void) setRange:(NSInteger)anInt;
+- (NSInteger) mark;
+- (void) release:(NSInteger) marker;
+- (void) rewind:(NSInteger) marker;
+- (void) rewind;
+- (void) reset;
+- (void) seek:(NSInteger) anIndex;
+- (NSInteger) size;
+- (void) consume;
+- (void) sync:(NSInteger) i;
+- (void) fetch:(NSInteger) n;
+- (id<Token>) getToken:(NSInteger) i;
+- (AMutableArray *)getFrom:(NSInteger)startIndex To:(NSInteger) stopIndex;
+- (NSInteger) LA:(NSInteger)i;
+- (id<Token>) LB:(NSInteger) k;
+- (id<Token>) LT:(NSInteger) k;
+- (void) setup;
+- (id<TokenSource>) getTokenSource;
+- (void) setTokenSource:(id<TokenSource>) aTokenSource;
+- (AMutableArray *)getTokens;
+- (NSString *) getSourceName;
+- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex;
+- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex With:(ANTLRBitSet *)types;
+- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithList:(AMutableArray *)types;
+- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithType:(NSInteger)ttype;
+- (NSString *) toString;
+- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex;
+- (NSString *) toStringFromToken:(id<Token>)startIndex ToToken:(id<Token>)stopIndex;
+- (void) fill;
+
+@end
diff --git a/runtime/ObjC/Framework/BufferedTokenStream.m b/runtime/ObjC/Framework/BufferedTokenStream.m
new file mode 100644
index 0000000..17ed16e
--- /dev/null
+++ b/runtime/ObjC/Framework/BufferedTokenStream.m
@@ -0,0 +1,392 @@
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "BufferedTokenStream.h"
+#import "TokenSource.h"
+#import "CommonTreeAdaptor.h"
+#import "RuntimeException.h"
+
+extern NSInteger debug;
+
+@implementation BufferedTokenStream
+
+@synthesize tokenSource;
+@synthesize tokens;
+@synthesize lastMarker;
+@synthesize index;
+@synthesize range;
+
++ (BufferedTokenStream *) newBufferedTokenStream
+{
+    return [[BufferedTokenStream alloc] init];
+}
+
++ (BufferedTokenStream *) newBufferedTokenStreamWith:(id<TokenSource>)aSource
+{
+    return [[BufferedTokenStream alloc] initWithTokenSource:aSource];
+}
+
+- (BufferedTokenStream *) init
+{
+	if ((self = [super init]) != nil)
+	{
+        tokenSource = nil;
+        tokens = [[AMutableArray arrayWithCapacity:1000] retain];
+        index = -1;
+        range = -1;
+	}
+	return self;
+}
+
+-(id) initWithTokenSource:(id<TokenSource>)aSource
+{
+	if ((self = [super init]) != nil)
+	{
+        tokenSource = [aSource retain];
+        tokens = [[AMutableArray arrayWithCapacity:1000] retain];
+        index = -1;
+        range = -1;
+	}
+	return self;
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    BufferedTokenStream *copy;
+    
+    copy = [[[self class] allocWithZone:aZone] init];
+    copy.tokenSource = self.tokenSource;
+    if ( self.tokens )
+        copy.tokens = [tokens copyWithZone:aZone];
+    copy.lastMarker = self.lastMarker;
+    copy.index = self.index;
+    copy.range = self.range;
+    return copy;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in BufferedTokenStream" );
+#endif
+    if ( tokens ) [tokens release];
+    if ( tokenSource ) [tokenSource release];
+	[super dealloc];
+}
+
+- (NSUInteger)line
+{
+    return ((CommonToken *)[tokens objectAtIndex:index]).line;
+}
+
+- (NSUInteger)charPositionInLine
+{
+    return ((CommonToken *)[tokens objectAtIndex:index]).charPositionInLine;
+}
+
+- (id<TokenSource>) getTokenSource
+{
+    return tokenSource;
+}
+
+- (NSInteger) getRange
+{
+    return range;
+}
+
+- (void) setRange:(NSInteger)anInt
+{
+    range = anInt;
+}
+
+- (NSInteger) mark
+{
+    if ( index == -1 ) {
+        [self setup];
+//        [self fill];
+    }
+    lastMarker = self.index;
+    return lastMarker;
+}
+
+- (void) release:(NSInteger) marker
+{
+    // no resources to release
+}
+
+- (void) rewind:(NSInteger) marker
+{
+    [self seek:marker];
+}
+
+- (void) rewind
+{
+    [self seek:lastMarker];
+}
+
+- (void) reset
+{
+    index = 0;
+    lastMarker = 0;
+}
+
+- (void) seek:(NSInteger) anIndex
+{
+    index = anIndex;
+}
+
+- (NSInteger) size
+{
+    return [tokens count];
+}
+
+/** Move the input pointer to the next incoming token.  The stream
+ *  must become active with LT(1) available.  consume() simply
+ *  moves the input pointer so that LT(1) points at the next
+ *  input symbol. Consume at least one token.
+ *
+ *  Walk past any token not on the channel the parser is listening to.
+ */
+- (void) consume
+{
+    if ( index == -1 ) {
+        [self setup];
+//        [self fill];
+    }
+    index++;
+    [self sync:index];
+}
+
+/** Make sure index i in tokens has a token. */
+- (void) sync:(NSInteger) i
+{
+    // how many more elements we need?
+    NSInteger n = (i - [tokens count]) + 1;
+    if (debug > 1) NSLog(@"[self sync:%d] needs %d\n", i, n);
+    if ( n > 0 )
+        [self fetch:n];
+}
+
+/** add n elements to buffer */
+- (void) fetch:(NSInteger)n
+{
+    for (NSInteger i=1; i <= n; i++) {
+        id<Token> t = [tokenSource nextToken];
+        [t setTokenIndex:[tokens count]];
+        if (debug > 1) NSLog(@"adding %@ at index %d\n", [t text], [tokens count]);
+        [tokens addObject:t];
+        if ( t.type == TokenTypeEOF )
+            break;
+    }
+}
+
+- (id<Token>) getToken:(NSInteger) i
+{
+    if ( i < 0 || i >= [tokens count] ) {
+        @throw [NoSuchElementException newException:[NSString stringWithFormat:@"token index %d out of range 0..%d", i, [tokens count]-1]];
+    }
+    return [tokens objectAtIndex:i];
+}
+
+/** Get all tokens from start..stop inclusively */
+- (AMutableArray *)getFrom:(NSInteger)startIndex To:(NSInteger)stopIndex
+{
+    if ( startIndex < 0 || stopIndex < 0 )
+        return nil;
+    if ( index == -1 ) {
+        [self setup];
+//        [self fill];
+    }
+    AMutableArray *subset = [AMutableArray arrayWithCapacity:5];
+    if ( stopIndex >= [tokens count] )
+        stopIndex = [tokens count]-1;
+    for (NSInteger i = startIndex; i <= stopIndex; i++) {
+        id<Token>t = [tokens objectAtIndex:i];
+        if ( t.type == TokenTypeEOF )
+            break;
+        [subset addObject:t];
+    }
+    return subset;
+}
+
+- (NSInteger) LA:(NSInteger)i
+{
+    return [[self LT:i] type];
+}
+
+- (id<Token>) LB:(NSInteger)k
+{
+    if ( (index - k) < 0 )
+        return nil;
+    return [tokens objectAtIndex:(index-k)];
+}
+
+- (id<Token>) LT:(NSInteger)k
+{
+    if ( index == -1 ) {
+        [self setup];
+//        [self fill];
+    }
+    if ( k == 0 )
+        return nil;
+    if ( k < 0 )
+        return [self LB:-k];
+    
+    NSInteger i = index + k - 1;
+    [self sync:i];
+    if ( i >= [tokens count] ) { // return EOF token
+                                // EOF must be last token
+        return [tokens objectAtIndex:([tokens count]-1)];
+    }
+    if ( i > range )
+        range = i; 		
+    return [tokens objectAtIndex:i];
+}
+
+- (void) setup
+{
+    [self sync:0];
+    index = 0;
+}
+
+/** Reset this token stream by setting its token source. */
+- (void) setTokenSource:(id<TokenSource>) aTokenSource
+{
+    tokenSource = aTokenSource;
+    if ( [tokens count] )
+        [tokens removeAllObjects];
+    index = -1;
+}
+
+- (AMutableArray *)getTokens
+{
+    return tokens;
+}
+
+- (AMutableArray *)getTokensFrom:(NSInteger) startIndex To:(NSInteger) stopIndex
+{
+    return [self getTokensFrom:startIndex To:stopIndex With:(ANTLRBitSet *)nil];
+}
+
+/** Given a start and stop index, return a List of all tokens in
+ *  the token type BitSet.  Return null if no tokens were found.  This
+ *  method looks at both on and off channel tokens.
+ */
+- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex With:(ANTLRBitSet *)types
+{
+    if ( index == -1 ) {
+        [self setup];
+//        [self fill];
+    }
+    if ( stopIndex >= [tokens count] )
+        stopIndex = [tokens count]-1;
+    if ( startIndex < 0 )
+        startIndex = 0;
+    if ( startIndex > stopIndex )
+        return nil;
+    
+    // list = tokens[start:stop]:{Token t, t.getType() in types}
+    AMutableArray *filteredTokens = [AMutableArray arrayWithCapacity:5];
+    for (NSInteger i = startIndex; i <= stopIndex; i++) {
+        id<Token>t = [tokens objectAtIndex:i];
+        if ( types == nil || [types member:t.type] ) {
+            [filteredTokens addObject:t];
+        }
+    }
+    if ( [filteredTokens count] == 0 ) {
+        filteredTokens = nil;
+    }
+    return filteredTokens;
+}
+
+- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithType:(NSInteger)ttype
+{
+    return [self getTokensFrom:startIndex To:stopIndex With:[ANTLRBitSet of:ttype]];
+}
+
+- (AMutableArray *)getTokensFrom:(NSInteger)startIndex To:(NSInteger)stopIndex WithList:(AMutableArray *)types
+{
+    return [self getTokensFrom:startIndex To:stopIndex With:[ANTLRBitSet newBitSetWithArray:types]];
+}
+            
+- (NSString *)getSourceName
+{
+    return [tokenSource getSourceName];
+}
+
+/** Grab *all* tokens from stream and return string */
+- (NSString *) toString
+{
+    if ( index == -1 ) {
+        [self setup];
+    }
+    [self fill];
+    return [self toStringFromStart:0 ToEnd:[tokens count]-1];
+}
+
+- (NSString *) toStringFromStart:(NSInteger)startIdx ToEnd:(NSInteger)stopIdx
+{
+    if ( startIdx < 0 || stopIdx < 0 )
+        return nil;
+    if ( index == -1 ) {
+        [self setup];
+    }
+    if ( stopIdx >= [tokens count] )
+        stopIdx = [tokens count]-1;
+    NSMutableString *buf = [NSMutableString stringWithCapacity:5];
+    for (NSInteger i = startIdx; i <= stopIdx; i++) {
+        id<Token>t = [tokens objectAtIndex:i];
+        if ( t.type == TokenTypeEOF )
+            break;
+        [buf appendString:[t text]];
+    }
+    return buf;
+}
+
+- (NSString *) toStringFromToken:(id<Token>)startToken ToToken:(id<Token>)stopToken
+{
+    if ( startToken != nil && stopToken != nil ) {
+        return [self toStringFromStart:[startToken getTokenIndex] ToEnd:[stopToken getTokenIndex]];
+    }
+    return nil;
+}
+
+/** Get all tokens from lexer until EOF */
+- (void) fill
+{
+    if ( index == -1 ) [self setup];
+    if ( [((CommonToken *)[tokens objectAtIndex:index]) type] == TokenTypeEOF )
+        return;
+    
+    NSInteger i = index+1;
+    [self sync:i];
+    while ( [((CommonToken *)[tokens objectAtIndex:i]) type] != TokenTypeEOF ) {
+        i++;
+        [self sync:i];
+    }
+}
+
+@end
diff --git a/runtime/ObjC/Framework/BufferedTreeNodeStream.h b/runtime/ObjC/Framework/BufferedTreeNodeStream.h
new file mode 100644
index 0000000..f2691f8
--- /dev/null
+++ b/runtime/ObjC/Framework/BufferedTreeNodeStream.h
@@ -0,0 +1,157 @@
+//
+//  BufferedTreeNodeStream.h
+//  ANTLR
+//
+// [The "BSD licence"]
+// Copyright (c) 2010 Ian Michell 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "Tree.h"
+#import "CommonTreeAdaptor.h"
+#import "TokenStream.h"
+#import "CommonTreeNodeStream.h"
+#import "LookaheadStream.h"
+#import "TreeIterator.h"
+#import "IntArray.h"
+#import "AMutableArray.h"
+
+#define DEFAULT_INITIAL_BUFFER_SIZE 100
+#define INITIAL_CALL_STACK_SIZE 10
+
+#ifdef DONTUSENOMO
+@interface StreamIterator : TreeIterator
+{
+    NSInteger idx;
+    __strong BufferedTreeNodeStream *input;
+    __strong AMutableArray *nodes;
+}
+
++ (id) newStreamIterator:(BufferedTreeNodeStream *) theStream;
+
+- (id) initWithStream:(BufferedTreeNodeStream *) theStream;
+
+- (BOOL) hasNext;
+- (id) next;
+- (void) remove;
+@end
+#endif
+
+@interface BufferedTreeNodeStream : NSObject <TreeNodeStream> 
+{
+	id up;
+	id down;
+	id eof;
+	
+	AMutableArray *nodes;
+	
+	id root; // root
+	
+	id<TokenStream> tokens;
+	CommonTreeAdaptor *adaptor;
+	
+	BOOL uniqueNavigationNodes;
+	NSInteger index;
+	NSInteger lastMarker;
+	IntArray *calls;
+	
+	NSEnumerator *e;
+    id currentSymbol;
+	
+}
+
+@property (retain, getter=getUp, setter=setUp:) id up;
+@property (retain, getter=getDown, setter=setDown:) id down;
+@property (retain, getter=eof, setter=setEof:) id eof;
+@property (retain, getter=getNodes, setter=setNodes:) AMutableArray *nodes;
+@property (retain, getter=getTreeSource, setter=setTreeSource:) id root;
+@property (retain, getter=getTokenStream, setter=setTokenStream:) id<TokenStream> tokens;
+@property (retain, getter=getAdaptor, setter=setAdaptor:) CommonTreeAdaptor *adaptor;
+@property (assign, getter=getUniqueNavigationNodes, setter=setUniqueNavigationNodes:) BOOL uniqueNavigationNodes;
+@property (assign) NSInteger index;
+@property (assign, getter=getLastMarker, setter=setLastMarker:) NSInteger lastMarker;
+@property (retain, getter=getCalls, setter=setCalls:) IntArray *calls;
+@property (retain, getter=getEnum, setter=setEnum:) NSEnumerator *e;
+@property (retain, getter=getCurrentSymbol, setter=setCurrentSymbol:) id currentSymbol;
+
++ (BufferedTreeNodeStream *) newBufferedTreeNodeStream:(CommonTree *)tree;
++ (BufferedTreeNodeStream *) newBufferedTreeNodeStream:(id<TreeAdaptor>)adaptor Tree:(CommonTree *)tree;
++ (BufferedTreeNodeStream *) newBufferedTreeNodeStream:(id<TreeAdaptor>)adaptor Tree:(CommonTree *)tree withBufferSize:(NSInteger)initialBufferSize;
+
+#pragma mark Constructor
+- (id) initWithTree:(CommonTree *)tree;
+- (id) initWithTreeAdaptor:(CommonTreeAdaptor *)anAdaptor Tree:(CommonTree *)tree;
+- (id) initWithTreeAdaptor:(CommonTreeAdaptor *)anAdaptor Tree:(CommonTree *)tree WithBufferSize:(NSInteger)bufferSize;
+
+- (void)dealloc;
+- (id) copyWithZone:(NSZone *)aZone;
+
+// protected methods. DO NOT USE
+#pragma mark Protected Methods
+- (void) fillBuffer;
+- (void) fillBufferWithTree:(CommonTree *) tree;
+- (NSInteger) getNodeIndex:(CommonTree *) node;
+- (void) addNavigationNode:(NSInteger) type;
+- (id) get:(NSUInteger) i;
+- (id) LT:(NSInteger) k;
+- (id) getCurrentSymbol;
+- (id) LB:(NSInteger) i;
+#pragma mark General Methods
+- (NSString *) getSourceName;
+
+- (id<TokenStream>) getTokenStream;
+- (void) setTokenStream:(id<TokenStream>) tokens;
+- (id<TreeAdaptor>) getTreeAdaptor;
+- (void) setTreeAdaptor:(id<TreeAdaptor>) anAdaptor;
+
+- (BOOL)getUniqueNavigationNodes;
+- (void) setUniqueNavigationNodes:(BOOL)aVal;
+
+- (void) consume;
+- (NSInteger) LA:(NSInteger) i;
+- (NSInteger) mark;
+- (void) release:(NSInteger) marker;
+- (void) rewind:(NSInteger) marker;
+- (void) rewind;
+- (void) seek:(NSInteger) idx;
+
+- (void) push:(NSInteger) i;
+- (NSInteger) pop;
+
+- (void) reset;
+- (NSUInteger) count;
+- (NSEnumerator *) objectEnumerator;
+- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
+
+- (NSString *) toTokenTypeString;
+- (NSString *) toTokenString:(NSInteger)aStart ToEnd:(NSInteger)aStop;
+- (NSString *) toStringFromNode:(id)aStart ToNode:(id)aStop;
+
+// getters and setters
+- (AMutableArray *) getNodes;
+- (id) eof;
+- (void)setEof:(id)anEOF;
+
+@end
diff --git a/runtime/ObjC/Framework/BufferedTreeNodeStream.m b/runtime/ObjC/Framework/BufferedTreeNodeStream.m
new file mode 100644
index 0000000..bf41026
--- /dev/null
+++ b/runtime/ObjC/Framework/BufferedTreeNodeStream.m
@@ -0,0 +1,556 @@
+//
+//  BufferedTreeNodeStream.m
+//  ANTLR
+//
+// [The "BSD licence"]
+// Copyright (c) 2010 Ian Michell 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "BufferedTreeNodeStream.h"
+#import "StreamEnumerator.h"
+#import "CommonTreeAdaptor.h"
+
+extern NSInteger debug;
+
+#ifdef DONTUSENOMO
+@implementation TreeStreamIterator
++ newTreeStreamIteratorWithNodes:(BufferedTreeNodeStream *)theStream
+{
+    return[[TreeStreamIterator alloc] initWithStream:theStream];
+}
+
+- (id) initWithStream:(BufferedTreeNodeStream *)theStream
+{
+    if ((self = [super init]) != nil) {
+        idx = 0;
+        input = theStream;
+        nodes = [theStream getNodes];
+    }
+    return self;
+}
+
+- (BOOL) hasNext
+{
+    return idx < [nodes count];
+}
+
+- (id) next
+{
+    NSInteger current = idx;
+    idx++;
+    if (current < [nodes count]) {
+    }
+    return [nodes getEof];
+}
+
+- (void) remove
+{
+	@throw [RuntimeException newException:@"cannot remove nodes from stream"];
+}
+
+@end
+#endif
+
+@implementation BufferedTreeNodeStream
+
+@synthesize up;
+@synthesize down;
+@synthesize eof;
+@synthesize nodes;
+@synthesize root;
+@synthesize tokens;
+@synthesize adaptor;
+@synthesize uniqueNavigationNodes;
+@synthesize index;
+@synthesize lastMarker;
+@synthesize calls;
+@synthesize e;
+@synthesize currentSymbol;
+
++ (BufferedTreeNodeStream *) newBufferedTreeNodeStream:(CommonTree *) aTree
+{
+    return [((BufferedTreeNodeStream *)[BufferedTreeNodeStream alloc]) initWithTree:(CommonTree *)aTree];
+}
+
++ (BufferedTreeNodeStream *) newBufferedTreeNodeStream:(id<TreeAdaptor>)adaptor Tree:(CommonTree *)aTree
+{
+    return [[BufferedTreeNodeStream alloc] initWithTreeAdaptor:adaptor Tree:(CommonTree *)aTree];
+}
+
++ (BufferedTreeNodeStream *) newBufferedTreeNodeStream:(id<TreeAdaptor>)adaptor Tree:(CommonTree *)aTree withBufferSize:(NSInteger)initialBufferSize
+{
+    return [[BufferedTreeNodeStream alloc] initWithTreeAdaptor:adaptor Tree:(CommonTree *)aTree WithBufferSize:initialBufferSize];
+}
+
+-(BufferedTreeNodeStream *) init
+{
+	self = [super init];
+	if (self) {
+		index = -1;
+		uniqueNavigationNodes = NO;
+        root = [[CommonTree alloc] init];
+        //		tokens = tree;
+        adaptor = [[[CommonTreeAdaptor alloc] init] retain];
+        nodes = [[AMutableArray arrayWithCapacity:DEFAULT_INITIAL_BUFFER_SIZE] retain];
+        down = [[adaptor createTree:TokenTypeDOWN Text:@"DOWN"] retain];
+        up = [[adaptor createTree:TokenTypeUP Text:@"UP"] retain];
+        eof = [[adaptor createTree:TokenTypeEOF Text:@"EOF"] retain];
+    }
+	return self;
+}
+
+- (BufferedTreeNodeStream *)initWithTree:(CommonTree *) aTree
+{
+	self = [super init];
+	if (self) {
+		index = -1;
+		uniqueNavigationNodes = NO;
+        root = aTree;
+        //		tokens = aTree;
+        adaptor = [[[CommonTreeAdaptor alloc] init] retain];
+        nodes = [[AMutableArray arrayWithCapacity:DEFAULT_INITIAL_BUFFER_SIZE] retain];
+        down = [[adaptor createTree:TokenTypeDOWN Text:@"DOWN"] retain];
+        up = [[adaptor createTree:TokenTypeUP Text:@"UP"] retain];
+        eof = [[adaptor createTree:TokenTypeEOF Text:@"EOF"] retain];
+    }
+	return self;
+}
+
+-(BufferedTreeNodeStream *) initWithTreeAdaptor:(CommonTreeAdaptor *)anAdaptor Tree:(CommonTree *)aTree
+{
+	self = [super init];
+	if (self) {
+		index = -1;
+		uniqueNavigationNodes = NO;
+        root = aTree;
+        //		tokens = aTree;
+        adaptor = [anAdaptor retain];
+        nodes = [[AMutableArray arrayWithCapacity:DEFAULT_INITIAL_BUFFER_SIZE] retain];
+        down = [[adaptor createTree:TokenTypeDOWN Text:@"DOWN"] retain];
+        up = [[adaptor createTree:TokenTypeUP Text:@"UP"] retain];
+        eof = [[adaptor createTree:TokenTypeEOF Text:@"EOF"] retain];
+    }
+	return self;
+}
+
+-(BufferedTreeNodeStream *) initWithTreeAdaptor:(CommonTreeAdaptor *)anAdaptor Tree:(CommonTree *)aTree WithBufferSize:(NSInteger)bufferSize
+{
+	self = [super init];
+	if (self) {
+        //		down = [adaptor createToken:TokenTypeDOWN withText:@"DOWN"];
+        //		up = [adaptor createToken:TokenTypeDOWN withText:@"UP"];
+        //		eof = [adaptor createToken:TokenTypeDOWN withText:@"EOF"];
+		index = -1;
+		uniqueNavigationNodes = NO;
+        root = aTree;
+        //		tokens = aTree;
+        adaptor = [anAdaptor retain];
+        nodes = [[AMutableArray arrayWithCapacity:bufferSize] retain];
+        down = [[adaptor createTree:TokenTypeDOWN Text:@"DOWN"] retain];
+        up = [[adaptor createTree:TokenTypeUP Text:@"UP"] retain];
+        eof = [[adaptor createTree:TokenTypeEOF Text:@"EOF"] retain];
+	}
+	return self;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in BufferedTreeNodeStream" );
+#endif
+    if ( adaptor ) [adaptor release];
+    if ( nodes ) [nodes release];
+    if ( root ) [root release];
+    if ( down ) [down release];
+    if ( up ) [up release];
+    if ( eof ) [eof release];
+	[super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    BufferedTreeNodeStream *copy;
+    
+    copy = [[[self class] allocWithZone:aZone] init];
+    if ( up )
+        copy.up = [up copyWithZone:aZone];
+    if ( down )
+        copy.down = [down copyWithZone:aZone];
+    if ( eof )
+        copy.eof = [eof copyWithZone:aZone];
+    if ( nodes )
+        copy.nodes = [nodes copyWithZone:aZone];
+    if ( root )
+        copy.root = [root copyWithZone:aZone];
+    if ( tokens )
+        copy.tokens = [tokens copyWithZone:aZone];
+    if ( adaptor )
+        copy.adaptor = [adaptor copyWithZone:aZone];
+    copy.uniqueNavigationNodes = self.uniqueNavigationNodes;
+    copy.index = self.index;
+    copy.lastMarker = self.lastMarker;
+    if ( calls )
+        copy.calls = [calls copyWithZone:aZone];
+    return copy;
+}
+
+// protected methods. DO NOT USE
+#pragma mark Protected Methods
+-(void) fillBuffer
+{
+	[self fillBufferWithTree:root];
+	// if (debug > 1) NSLog("revIndex=%@", tokenTypeToStreamIndexesMap);
+	index = 0; // buffer of nodes intialized now
+}
+
+-(void) fillBufferWithTree:(CommonTree *) aTree
+{
+	BOOL empty = [adaptor isNil:(id<BaseTree>)aTree];
+	if (!empty) {
+		[nodes addObject:aTree];
+	}
+	NSInteger n = [adaptor getChildCount:aTree];
+	if (!empty && n > 0) {
+		[self addNavigationNode:TokenTypeDOWN];
+	}
+	for (NSInteger c = 0; c < n; c++) {
+		id child = [adaptor getChild:aTree At:c];
+		[self fillBufferWithTree:child];
+	}
+	if (!empty && n > 0) {
+		[self addNavigationNode:TokenTypeUP];
+	}
+}
+
+-(NSInteger) getNodeIndex:(CommonTree *) node
+{
+	if (index == -1) {
+		[self fillBuffer];
+	}
+	for (NSUInteger i = 0; i < [nodes count]; i++) {
+		id t = [nodes objectAtIndex:i];
+		if (t == node) {
+			return i;
+		}
+	}
+	return -1;
+}
+
+-(void) addNavigationNode:(NSInteger) type
+{
+	id navNode = nil;
+	if (type == TokenTypeDOWN) {
+		if (self.uniqueNavigationNodes) {
+			navNode = [adaptor createToken:TokenTypeDOWN Text:@"DOWN"];
+		}
+		else {
+			navNode = down;
+		}
+
+	}
+	else {
+		if (self.uniqueNavigationNodes) {
+			navNode = [adaptor createToken:TokenTypeUP Text:@"UP"];
+		}
+		else {
+			navNode = up;
+		}
+	}
+	[nodes addObject:navNode];
+}
+
+-(id) get:(NSUInteger) i
+{
+	if (index == -1) {
+		[self fillBuffer];
+	}
+	return [nodes objectAtIndex:i];
+}
+
+-(id) LT:(NSInteger) k
+{
+	if (index == -1) {
+		[self fillBuffer];
+	}
+	if (k == 0) {
+		return nil;
+	}
+	if (k < 0) {
+		return [self LB:-k];
+	}
+	if ((index + k - 1) >= [nodes count]) {
+		return eof;
+	}
+	return [nodes objectAtIndex:(index + k - 1)];
+}
+
+-(id) getCurrentSymbol
+{
+	return [self LT:1];
+}
+
+-(id) LB:(NSInteger) k
+{
+	if (k == 0) {
+		return nil;
+	}
+	if ((index - k) < 0) {
+		return nil;
+	}
+	return [nodes objectAtIndex:(index - k)];
+}
+
+- (CommonTree *)getTreeSource
+{
+    return root;
+}
+
+-(NSString *)getSourceName
+{
+	return [[self getTokenStream] getSourceName];
+}
+
+- (id<TokenStream>)getTokenStream
+{
+    return tokens;
+}
+
+- (void) setTokenStream:(id<TokenStream>)newtokens
+{
+    tokens = newtokens;
+}
+
+- (id<TreeAdaptor>)getTreeAdaptor
+{
+    return adaptor;
+}
+
+- (void) setTreeAdaptor:(id<TreeAdaptor>)anAdaptor
+{
+    adaptor = anAdaptor;
+}
+
+- (BOOL)getUniqueNavigationNodes
+{
+    return uniqueNavigationNodes;
+}
+
+- (void) setUniqueNavigationNodes:(BOOL)aVal
+{
+    uniqueNavigationNodes = aVal;
+}
+
+-(void) consume
+{
+	if (index == -1) {
+		[self fillBuffer];
+	}
+	index++;
+}
+
+-(NSInteger) LA:(NSInteger) i
+{
+	return [adaptor getType:[self LT:i]];
+}
+
+-(NSInteger) mark
+{
+	if (index == -1) {
+		[self fillBuffer];
+	}
+	lastMarker = self.index;
+	return lastMarker;
+}
+
+-(void) release:(NSInteger) marker
+{
+	// do nothing
+}
+
+-(void) rewind:(NSInteger) marker
+{
+	[self seek:marker];
+}
+
+-(void) rewind
+{
+	[self seek:lastMarker];
+}
+
+-(void) seek:(NSInteger) i
+{
+	if (index == -1) {
+		[self fillBuffer];
+	}
+	index = i;
+}
+
+-(void) push:(NSInteger) i
+{
+	if (calls == nil) {
+		calls = [IntArray newArrayWithLen:INITIAL_CALL_STACK_SIZE];
+	}
+	[calls push:index];
+	[self seek:i];
+}
+
+-(NSInteger) pop
+{
+	NSInteger ret = [calls pop];
+	[self seek:ret];
+	return ret;
+}
+
+-(void) reset
+{
+	index = 0;
+	lastMarker = 0;
+	if (calls != nil) {
+		[calls reset];
+	}
+}
+
+-(NSUInteger) count
+{
+	if (index == -1) {
+		[self fillBuffer];
+	}
+	return [nodes count];
+}
+
+-(NSUInteger) size
+{
+	return [self count];
+}
+
+-(NSEnumerator *) objectEnumerator
+{
+	if (e == nil) {
+		e = [[StreamEnumerator alloc] initWithNodes:nodes andEOF:eof];
+	}
+	return e;
+}
+
+-(void) replaceChildren:(CommonTree *) parent From:(NSInteger)startIdx To:(NSInteger)stopIdx With:(CommonTree *)aTree
+{
+	if (parent != nil) {
+		[adaptor replaceChildren:parent From:startIdx To:stopIdx With:aTree];
+	}
+}
+
+-(NSString *) toTokenTypeString
+{
+	if (index == -1)
+	{
+		[self fillBuffer];
+	}
+	NSMutableString *buf = [NSMutableString stringWithCapacity:10];
+	for (NSUInteger i= 0; i < [nodes count]; i++) {
+		CommonTree * aTree = (CommonTree *)[self get:i];
+		[buf appendFormat:@" %d", [adaptor getType:aTree]];
+	}
+	return buf;
+}
+
+-(NSString *) toTokenString:(NSInteger)aStart ToEnd:(NSInteger)aStop
+{
+	if (index == -1) {
+		[self fillBuffer];
+	}
+	NSMutableString *buf = [NSMutableString stringWithCapacity:10];
+	for (NSUInteger i = aStart; i < [nodes count] && i <= aStop; i++) {
+		CommonTree * t = (CommonTree *)[self get:i];
+		[buf appendFormat:@" %d", [adaptor getType:t]];
+	}
+	return buf;
+}
+
+-(NSString *) toStringFromNode:(id)aStart ToNode:(id)aStop
+{
+	if (aStart == nil || aStop == nil) {
+		return nil;
+	}
+	if (index == -1) {
+		[self fillBuffer];
+	}
+	
+	// if we have a token stream, use that to dump text in order
+	if ([self getTokenStream] != nil) {
+		NSInteger beginTokenIndex = [adaptor getTokenStartIndex:aStart];
+		NSInteger endTokenIndex = [adaptor getTokenStopIndex:aStop];
+		
+		if ([adaptor getType:aStop] == TokenTypeUP) {
+			endTokenIndex = [adaptor getTokenStopIndex:aStart];
+		}
+		else if ([adaptor getType:aStop] == TokenTypeEOF) {
+			endTokenIndex = [self count] - 2; //don't use EOF
+		}
+        [tokens toStringFromStart:beginTokenIndex ToEnd:endTokenIndex];
+	}
+	// walk nodes looking for aStart
+	CommonTree * aTree = nil;
+	NSUInteger i = 0;
+	for (; i < [nodes count]; i++) {
+		aTree = [nodes objectAtIndex:i];
+		if (aTree == aStart) {
+			break;
+		}
+	}
+	NSMutableString *buf = [NSMutableString stringWithCapacity:10];
+	aTree = [nodes objectAtIndex:i]; // why?
+	while (aTree != aStop) {
+		NSString *text = [adaptor getText:aTree];
+		if (text == nil) {
+			text = [NSString stringWithFormat:@" %d", [adaptor getType:aTree]];
+		}
+		[buf appendString:text];
+		i++;
+		aTree = [nodes objectAtIndex:i];
+	}
+	NSString *text = [adaptor getText:aStop];
+	if (text == nil) {
+		text = [NSString stringWithFormat:@" %d", [adaptor getType:aStop]];
+	}
+	[buf appendString:text];
+	return buf;
+}
+
+// getters and setters
+- (AMutableArray *) getNodes
+{
+    return nodes;
+}
+
+- (id) eof
+{
+    return eof;
+}
+
+- (void) setEof:(id)theEOF
+{
+    eof = theEOF;
+}
+
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/CHANGES.txt b/runtime/ObjC/Framework/CHANGES.txt
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/CHANGES.txt
rename to runtime/ObjC/Framework/CHANGES.txt
diff --git a/runtime/ObjC/Framework/CharStream.h b/runtime/ObjC/Framework/CharStream.h
new file mode 100644
index 0000000..7463160
--- /dev/null
+++ b/runtime/ObjC/Framework/CharStream.h
@@ -0,0 +1,58 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "IntStream.h"
+
+#define	CharStreamEOF -1
+
+
+@protocol CharStream < IntStream >
+
+/** For infinite streams, you don't need this; primarily I'm providing
+ *  a useful interface for action code.  Just make sure actions don't
+ *  use this on streams that don't support it.
+ */
+- (NSString *) substringWithRange:(NSRange) theRange;
+
+/** Get the ith character of lookahead.  This is the same usually as
+ *  LA(i).  This will be used for labels in the generated
+ *  lexer code.  I'd prefer to return a char here type-wise, but it's
+ *  probably better to be 32-bit clean and be consistent with LA.
+ */
+- (NSInteger)LT:(NSInteger) i;
+
+// ANTLR tracks the line information automatically
+- (NSInteger)getLine;
+// Because this stream can rewind, we need to be able to reset the line
+- (void)setLine:(NSInteger)aLine;
+
+- (void)setCharPositionInLine:(NSInteger)pos;
+
+
+// The index of the character relative to the beginning of the line 0..n-1
+- (NSInteger)getCharPositionInLine;
+
+@end
diff --git a/runtime/ObjC/Framework/CharStreamState.h b/runtime/ObjC/Framework/CharStreamState.h
new file mode 100644
index 0000000..7e8ba17
--- /dev/null
+++ b/runtime/ObjC/Framework/CharStreamState.h
@@ -0,0 +1,49 @@
+//
+//  CharStreamState.h
+//  ANTLR
+//
+// [The "BSD licence"]
+// Copyright (c)  2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+
+
+@interface CharStreamState : NSObject
+{
+NSInteger index;
+NSUInteger line;
+NSUInteger charPositionInLine;
+}
+
+@property (assign) NSInteger index;
+@property (assign) NSUInteger line;
+@property (assign) NSUInteger charPositionInLine;
+
++ newCharStreamState;
+
+- (id) init;
+
+@end
diff --git a/runtime/ObjC/Framework/CharStreamState.m b/runtime/ObjC/Framework/CharStreamState.m
new file mode 100755
index 0000000..0776f32
--- /dev/null
+++ b/runtime/ObjC/Framework/CharStreamState.m
@@ -0,0 +1,52 @@
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import "CharStreamState.h"
+
+
+@implementation CharStreamState
+
+@synthesize index;
+@synthesize line;
+@synthesize charPositionInLine;
+
++ newCharStreamState
+{
+    return [[CharStreamState alloc] init];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil) {
+        index = 0;
+        line = 1;
+        charPositionInLine = 0;
+    }
+    return self;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/CommonErrorNode.h b/runtime/ObjC/Framework/CommonErrorNode.h
new file mode 100644
index 0000000..b36dba8
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonErrorNode.h
@@ -0,0 +1,67 @@
+//
+//  CommonErrorNode.h
+//  ANTLR
+//
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "CommonTree.h"
+#import "TokenStream.h"
+//#import "IntStream.h"
+//#import "Token.h"
+#import "UnWantedTokenException.h"
+
+@interface CommonErrorNode : CommonTree
+{
+id<IntStream> input;
+id<Token> startToken;
+id<Token> stopToken;
+RecognitionException *trappedException;
+}
+
++ (id) newCommonErrorNode:(id<TokenStream>)anInput
+                  From:(id<Token>)startToken
+                    To:(id<Token>)stopToken
+                     Exception:(RecognitionException *) e;
+
+- (id) initWithInput:(id<TokenStream>)anInput
+                From:(id<Token>)startToken
+                  To:(id<Token>)stopToken
+           Exception:(RecognitionException *) e;
+
+- (void)dealloc;
+- (BOOL) isNil;
+
+- (NSInteger)type;
+- (NSString *)text;
+- (NSString *)toString;
+
+@property (retain) id<IntStream> input;
+@property (retain) id<Token> startToken;
+@property (retain) id<Token> stopToken;
+@property (retain) RecognitionException *trappedException;
+@end
diff --git a/runtime/ObjC/Framework/CommonErrorNode.m b/runtime/ObjC/Framework/CommonErrorNode.m
new file mode 100644
index 0000000..7979a9e
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonErrorNode.m
@@ -0,0 +1,159 @@
+//
+//  CommonErrorNode.m
+//  ANTLR
+//
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "CommonErrorNode.h"
+#import "MissingTokenException.h"
+#import "NoViableAltException.h"
+#import "TreeNodeStream.h"
+#import "UnwantedTokenException.h"
+
+@implementation CommonErrorNode
+
++ (id) newCommonErrorNode:(id<TokenStream>)anInput
+                          From:(id<Token>)aStartToken
+                            To:(id<Token>)aStopToken
+                     Exception:(RecognitionException *) e
+{
+    return [[CommonErrorNode alloc] initWithInput:anInput From:aStartToken To:aStopToken Exception:e];
+}
+
+- (id) init
+{
+    self = [super init];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (id) initWithInput:(id<TokenStream>)anInput
+                From:(id<Token>)aStartToken
+                  To:(id<Token>)aStopToken
+           Exception:(RecognitionException *) e
+{
+    self = [super init];
+    if ( self != nil ) {
+        //System.out.println("aStartToken: "+aStartToken+", aStopToken: "+aStopToken);
+        if ( aStopToken == nil ||
+            ([aStopToken getTokenIndex] < [aStartToken getTokenIndex] &&
+             aStopToken.type != TokenTypeEOF) )
+        {
+            // sometimes resync does not consume a token (when LT(1) is
+            // in follow set.  So, aStopToken will be 1 to left to aStartToken. adjust.
+            // Also handle case where aStartToken is the first token and no token
+            // is consumed during recovery; LT(-1) will return null.
+            aStopToken = aStartToken;
+        }
+        input = anInput;
+        if ( input ) [input retain];
+        startToken = aStartToken;
+        if ( startToken ) [startToken retain];
+        stopToken = aStopToken;
+        if ( stopToken ) [stopToken retain];
+        trappedException = e;
+        if ( trappedException ) [trappedException retain];
+    }
+    return self;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in CommonErrorNode" );
+#endif
+    if ( input ) [input release];
+    if ( startToken ) [startToken release];
+    if ( stopToken ) [stopToken release];
+    if ( trappedException ) [trappedException release];
+	[super dealloc];
+}
+
+- (BOOL) isNil
+{
+    return NO;
+}
+
+- (NSInteger)type
+{
+    return TokenTypeInvalid;
+}
+
+- (NSString *)text
+{
+    NSString *badText = nil;
+    if ( [startToken isKindOfClass:[self class]] ) {
+        int i = [(id<Token>)startToken getTokenIndex];
+        int j = [(id<Token>)stopToken getTokenIndex];
+        if ( stopToken.type == TokenTypeEOF ) {
+            j = [(id<TokenStream>)input size];
+        }
+        badText = [(id<TokenStream>)input toStringFromStart:i ToEnd:j];
+    }
+    else if ( [startToken isKindOfClass:[self class]] ) {
+        badText = [(id<TreeNodeStream>)input toStringFromNode:startToken ToNode:stopToken];
+    }
+    else {
+        // people should subclass if they alter the tree type so this
+        // next one is for sure correct.
+        badText = @"<unknown>";
+    }
+    return badText;
+}
+
+- (NSString *)toString
+{
+    NSString *aString;
+    if ( [trappedException isKindOfClass:[MissingTokenException class]] ) {
+        aString = [NSString stringWithFormat:@"<missing type: %@ >",
+        [(MissingTokenException *)trappedException getMissingType]];
+        return aString;
+    }
+    else if ( [trappedException isKindOfClass:[UnwantedTokenException class]] ) {
+        aString = [NSString stringWithFormat:@"<extraneous: %@, resync=%@>",
+        [trappedException getUnexpectedToken],
+        [self text]];
+        return aString;
+    }
+    else if ( [trappedException isKindOfClass:[MismatchedTokenException class]] ) {
+        aString = [NSString stringWithFormat:@"<mismatched token: %@, resync=%@>", trappedException.token, [self text]];
+        return aString;
+    }
+    else if ( [trappedException isKindOfClass:[NoViableAltException class]] ) {
+        aString = [NSString stringWithFormat:@"<unexpected:  %@, resync=%@>", trappedException.token, [self text]];
+        return aString;
+    }
+    aString = [NSString stringWithFormat:@"<error: %@>",[self text]];
+    return aString;
+}
+
+@synthesize input;
+@synthesize startToken;
+@synthesize stopToken;
+@synthesize trappedException;
+@end
diff --git a/runtime/ObjC/Framework/CommonToken.h b/runtime/ObjC/Framework/CommonToken.h
new file mode 100644
index 0000000..16823da
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonToken.h
@@ -0,0 +1,141 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+#import "Token.h"
+#import "CharStream.h"
+
+@interface CommonToken : NSObject < Token > {
+	__strong NSString *text;
+	NSInteger type;
+	// information about the Token's position in the input stream
+	NSUInteger line;
+	NSUInteger charPositionInLine;
+	NSUInteger channel;
+	// this token's position in the TokenStream
+	NSInteger index;
+	
+	// indices into the CharStream to avoid copying the text
+	// can manually override the text by using -setText:
+	NSInteger startIndex;
+	NSInteger stopIndex;
+	// the actual input stream this token was found in
+	__strong id<CharStream> input;
+}
+
++ (void) initialize;
++ (NSInteger) DEFAULT_CHANNEL;
++ (id<Token>)INVALID_TOKEN;
++ (NSInteger) INVALID_TOKEN_TYPE;
++ (id<Token>) newToken;
++ (id<Token>) newToken:(id<CharStream>)anInput
+                       Type:(NSInteger)aTType
+                    Channel:(NSInteger)aChannel
+                      Start:(NSInteger)aStart
+                       Stop:(NSInteger)aStop;
++ (id<Token>) newToken:(TokenType)aType;
++ (id<Token>) newToken:(NSInteger)tokenType Text:(NSString *)tokenText;
++ (id<Token>) newTokenWithToken:(CommonToken *)fromToken;
++ (id<Token>) eofToken;
++ (id<Token>) skipToken;
++ (id<Token>) invalidToken;
++ (TokenChannel) defaultChannel;
+
+// designated initializer. This is used as the default way to initialize a Token in the generated code.
+- (id) init;
+- (id) initWithInput:(id<CharStream>)anInput
+                                Type:(NSInteger)aTType
+                             Channel:(NSInteger)aChannel
+                               Start:(NSInteger)theStart
+                                Stop:(NSInteger)theStop;
+- (id) initWithToken:(id<Token>)aToken;
+- (id) initWithType:(TokenType)aType;
+- (id) initWithType:(TokenType)aTType Text:(NSString *)tokenText;
+
+//---------------------------------------------------------- 
+//  text 
+//---------------------------------------------------------- 
+- (NSString *)text;
+- (void) setText:(NSString *)aText;
+
+//---------------------------------------------------------- 
+//  charPositionInLine 
+//---------------------------------------------------------- 
+- (NSUInteger) getCharPositionInLine;
+- (void) setCharPositionInLine:(NSUInteger)aCharPositionInLine;
+
+//---------------------------------------------------------- 
+//  line 
+//---------------------------------------------------------- 
+- (NSUInteger) getLine;
+- (void) setLine:(NSUInteger)aLine;
+
+//---------------------------------------------------------- 
+//  type 
+//---------------------------------------------------------- 
+- (NSInteger)type;
+- (void) setType:(NSInteger)aType;
+
+//---------------------------------------------------------- 
+//  channel 
+//---------------------------------------------------------- 
+- (NSUInteger)channel;
+- (void) setChannel:(NSUInteger)aChannel;
+
+//---------------------------------------------------------- 
+//  input 
+//---------------------------------------------------------- 
+- (id<CharStream>)input;
+- (void) setInput:(id<CharStream>)anInput;
+
+- (NSInteger)getStart;
+- (void) setStart: (NSInteger)aStart;
+
+- (NSInteger)getStop;
+- (void) setStop: (NSInteger) aStop;
+
+// the index of this Token into the TokenStream
+- (NSInteger)getTokenIndex;
+- (void) setTokenIndex:(NSInteger)aTokenIndex;
+
+// conform to NSCopying
+- (id) copyWithZone:(NSZone *)theZone;
+
+- (NSString *) description;
+- (NSString *) toString;
+
+@property (retain, getter = text, setter = setText:) NSString *text;
+@property (assign) NSInteger type;
+@property (assign, getter = line, setter = setLine:) NSUInteger line;
+@property (assign, getter=charPositionInLine, setter = setCharPositionInLine:) NSUInteger charPositionInLine;
+@property (assign) NSUInteger channel;
+@property (assign) NSInteger index;
+@property (assign, getter=getStart, setter=setStart:) NSInteger startIndex;
+@property (assign, getter=getStop, setter=setStop:) NSInteger stopIndex;
+@property (retain) id<CharStream> input;
+
+@end
diff --git a/runtime/ObjC/Framework/CommonToken.m b/runtime/ObjC/Framework/CommonToken.m
new file mode 100644
index 0000000..5b09acb
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonToken.m
@@ -0,0 +1,403 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import "CommonToken.h"
+
+static CommonToken *SKIP_TOKEN;
+static CommonToken *EOF_TOKEN;
+static CommonToken *INVALID_TOKEN;
+
+@implementation CommonToken
+
+    static NSInteger DEFAULT_CHANNEL = TokenChannelDefault;
+    static NSInteger INVALID_TOKEN_TYPE = TokenTypeInvalid;
+
+
+@synthesize text;
+@synthesize type;
+@synthesize line;
+@synthesize charPositionInLine;
+@synthesize channel;
+@synthesize index;
+@synthesize startIndex;
+@synthesize stopIndex;
+@synthesize input;
+
++ (void) initialize
+{
+    EOF_TOKEN = [CommonToken newToken:TokenTypeEOF Text:@"EOF"];
+    SKIP_TOKEN = [CommonToken newToken:TokenTypeInvalid Text:@"Skip"];
+    INVALID_TOKEN = [CommonToken newToken:TokenTypeInvalid Text:@"Invalid"];
+    [EOF_TOKEN retain];
+    [SKIP_TOKEN retain];
+    [INVALID_TOKEN retain];
+}
+
++ (CommonToken *)INVALID_TOKEN
+{
+    return INVALID_TOKEN;
+}
+
++ (NSInteger) DEFAULT_CHANNEL
+{
+    return DEFAULT_CHANNEL;
+}
+
++ (NSInteger) INVALID_TOKEN_TYPE
+{
+    return INVALID_TOKEN_TYPE;
+}
+
++ (CommonToken *) newToken
+{
+    return [[CommonToken alloc] init];
+}
+
++ (CommonToken *) newToken:(id<CharStream>)anInput Type:(NSInteger)aTType Channel:(NSInteger)aChannel Start:(NSInteger)aStart Stop:(NSInteger)aStop
+{
+    return [[CommonToken alloc] initWithInput:(id<CharStream>)anInput Type:(NSInteger)aTType Channel:(NSInteger)aChannel Start:(NSInteger)aStart Stop:(NSInteger)aStop];
+}
+
++ (CommonToken *) newToken:(TokenType)tokenType
+{
+    return( [[CommonToken alloc] initWithType:tokenType] );
+}
+
++ (CommonToken *) newToken:(NSInteger)tokenType Text:(NSString *)tokenText
+{
+    return( [[CommonToken alloc] initWithType:tokenType Text:tokenText] );
+}
+
++ (CommonToken *) newTokenWithToken:(CommonToken *)fromToken
+{
+    return( [[CommonToken alloc] initWithToken:fromToken] );
+}
+
+// return the singleton EOF Token 
++ (id<Token>) eofToken
+{
+    if (EOF_TOKEN == nil) {
+        EOF_TOKEN = [[CommonToken newToken:TokenTypeEOF Text:@"EOF"] retain];
+    }
+    return EOF_TOKEN;
+}
+
+// return the singleton skip Token 
++ (id<Token>) skipToken
+{
+    if (SKIP_TOKEN == nil) {
+        SKIP_TOKEN = [[CommonToken newToken:TokenTypeInvalid Text:@"Skip"] retain];
+    }
+    return SKIP_TOKEN;
+}
+
+// return the singleton skip Token 
++ (id<Token>) invalidToken
+{
+    if (INVALID_TOKEN == nil) {
+        INVALID_TOKEN = [[CommonToken newToken:TokenTypeInvalid Text:@"Invalid"] retain];
+    }
+    return SKIP_TOKEN;
+}
+
+// the default channel for this class of Tokens
++ (TokenChannel) defaultChannel
+{
+    return TokenChannelDefault;
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil) {
+        input = nil;
+        type = TokenTypeInvalid;
+        channel = TokenChannelDefault;
+        startIndex = 0;
+        stopIndex = 0;
+    }
+    return self;
+}
+
+// designated initializer
+- (id) initWithInput:(id<CharStream>)anInput
+                Type:(NSInteger)aTType
+             Channel:(NSInteger)aChannel
+               Start:(NSInteger)aStart
+                Stop:(NSInteger)aStop
+{
+    if ((self = [super init]) != nil) {
+        input = anInput;
+        if ( input ) [input retain];
+        type = aTType;
+        channel = aChannel;
+        startIndex = aStart;
+        stopIndex = aStop;
+        if (type == TokenTypeEOF)
+            text = @"EOF";
+        else
+            text = [input substringWithRange:NSMakeRange(startIndex, (stopIndex-startIndex)+1)];
+        if ( text ) [text retain];
+    }
+    return self;
+}
+
+- (id) initWithToken:(CommonToken *)oldToken
+{
+    if ((self = [super init]) != nil) {
+        text = [NSString stringWithString:oldToken.text];
+        if ( text ) [text retain];
+        type = oldToken.type;
+        line = oldToken.line;
+        index = oldToken.index;
+        charPositionInLine = oldToken.charPositionInLine;
+        channel = oldToken.channel;
+        input = oldToken.input;
+        if ( input ) [input retain];
+        if ( [oldToken isKindOfClass:[CommonToken class]] ) {
+            startIndex = oldToken.startIndex;
+            stopIndex = oldToken.stopIndex;
+        }
+    }
+    return self;
+}
+
+- (id) initWithType:(TokenType)aTType
+{
+    if ((self = [super init]) != nil) {
+        self.type = aTType;
+    }
+    return self;
+}
+
+- (id) initWithType:(TokenType)aTType Text:(NSString *)tokenText
+{
+    if ((self = [super init]) != nil) {
+        self.type = aTType;
+        self.text = [NSString stringWithString:tokenText];
+        if ( text ) [text retain];
+    }
+    return self;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in CommonToken" );
+#endif
+    if ( input ) [input release];
+    if ( text ) [text release];
+    [super dealloc];
+}
+
+// create a copy, including the text if available
+// the input stream is *not* copied!
+- (id) copyWithZone:(NSZone *)theZone
+{
+    CommonToken *copy = [[[self class] allocWithZone:theZone] init];
+    
+    if (text)
+        copy.text = [text copyWithZone:nil];
+    copy.type = type;
+    copy.line = line;
+    copy.charPositionInLine = charPositionInLine;
+    copy.channel = channel;
+    copy.index = index;
+    copy.startIndex = startIndex;
+    copy.stopIndex = stopIndex;
+    copy.input = input;
+    return copy;
+}
+
+
+//---------------------------------------------------------- 
+//  charPositionInLine 
+//---------------------------------------------------------- 
+- (NSUInteger) getCharPositionInLine
+{
+    return charPositionInLine;
+}
+
+- (void) setCharPositionInLine:(NSUInteger)aCharPositionInLine
+{
+    charPositionInLine = aCharPositionInLine;
+}
+
+//---------------------------------------------------------- 
+//  line 
+//---------------------------------------------------------- 
+- (NSUInteger) getLine
+{
+    return line;
+}
+
+- (void) setLine:(NSUInteger)aLine
+{
+    line = aLine;
+}
+
+//---------------------------------------------------------- 
+//  text 
+//---------------------------------------------------------- 
+- (NSString *) text
+{
+    if (text != nil) {
+        return text;
+    }
+    if (input == nil) {
+        return nil;
+    }
+    int n = [input size];
+    if ( startIndex < n && stopIndex < n) {
+        return [input substringWithRange:NSMakeRange(startIndex, (stopIndex-startIndex)+1)];
+    }
+    else {
+        return @"<EOF>";
+    }
+}
+
+- (void) setText:(NSString *)aText
+{
+    if (text != aText) {
+        if ( text ) [text release];
+        text = aText;
+        [text retain];
+    }
+}
+
+
+//---------------------------------------------------------- 
+//  type 
+//---------------------------------------------------------- 
+- (NSInteger)type
+{
+    return type;
+}
+
+- (void) setType:(NSInteger)aType
+{
+    type = aType;
+}
+
+//---------------------------------------------------------- 
+//  channel 
+//---------------------------------------------------------- 
+- (NSUInteger)channel
+{
+    return channel;
+}
+
+- (void) setChannel:(NSUInteger)aChannel
+{
+    channel = aChannel;
+}
+
+
+//---------------------------------------------------------- 
+//  input 
+//---------------------------------------------------------- 
+- (id<CharStream>) input
+{
+    return input; 
+}
+
+- (void) setInput: (id<CharStream>) anInput
+{
+    if (input != anInput) {
+        if ( input ) [input release];
+        [anInput retain];
+    }
+    input = anInput;
+}
+
+
+//---------------------------------------------------------- 
+//  start 
+//---------------------------------------------------------- 
+- (NSInteger) getStart
+{
+    return startIndex;
+}
+
+- (void) setStart: (NSInteger) aStart
+{
+    startIndex = aStart;
+}
+
+//---------------------------------------------------------- 
+//  stop 
+//---------------------------------------------------------- 
+- (NSInteger) getStop
+{
+    return stopIndex;
+}
+
+- (void) setStop: (NSInteger) aStop
+{
+    stopIndex = aStop;
+}
+
+//---------------------------------------------------------- 
+//  index 
+//---------------------------------------------------------- 
+- (NSInteger) getTokenIndex;
+{
+    return index;
+}
+
+- (void) setTokenIndex: (NSInteger) aTokenIndex;
+{
+    index = aTokenIndex;
+}
+
+
+// provide a textual representation for debugging
+- (NSString *) description
+{
+    NSString *channelStr;
+    NSMutableString *txtString;
+
+    channelStr = @"";
+    if ( channel > 0 ) {
+        channelStr = [NSString stringWithFormat:@",channel=%d\n", channel];
+    }
+    if ([self text] != nil) {
+        txtString = [NSMutableString stringWithString:[self text]];
+        [txtString replaceOccurrencesOfString:@"\n" withString:@"\\\\n" options:NSAnchoredSearch range:NSMakeRange(0, [txtString length])];
+        [txtString replaceOccurrencesOfString:@"\r" withString:@"\\\\r" options:NSAnchoredSearch range:NSMakeRange(0, [txtString length])];
+        [txtString replaceOccurrencesOfString:@"\t" withString:@"\\\\t" options:NSAnchoredSearch range:NSMakeRange(0, [txtString length])];
+    } else {
+        txtString = [NSMutableString stringWithString:@"<no text>"];
+    }
+    return [NSString stringWithFormat:@"[@%d, %d:%d='%@',<%d>%@,%d:%d]", index, startIndex, stopIndex, txtString, type, channelStr, line, charPositionInLine];
+}
+
+- (NSString *)toString
+{
+   return [self description];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/CommonTokenStream.h b/runtime/ObjC/Framework/CommonTokenStream.h
new file mode 100644
index 0000000..80c580e
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonTokenStream.h
@@ -0,0 +1,96 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "TokenStream.h"
+#import "Token.h"
+#import "CommonToken.h"
+#import "TokenSource.h"
+#import "ANTLRBitSet.h"
+#import "BufferedTokenStream.h"
+#import "AMutableDictionary.h"
+
+@interface CommonTokenStream : BufferedTokenStream < TokenStream >
+{
+	__strong AMutableDictionary *channelOverride;
+	NSUInteger channel;
+}
+
+@property (retain, getter=getChannelOverride,setter=setChannelOverride:) AMutableDictionary *channelOverride;
+@property (assign, getter=channel,setter=setChannel:) NSUInteger channel;
+
++ (CommonTokenStream *)newCommonTokenStream;
++ (CommonTokenStream *)newCommonTokenStreamWithTokenSource:(id<TokenSource>)theTokenSource;
++ (CommonTokenStream *)newCommonTokenStreamWithTokenSource:(id<TokenSource>)theTokenSource
+                                                               Channel:(NSUInteger)aChannel;
+
+- (id) init;
+- (id) initWithTokenSource:(id<TokenSource>)theTokenSource;
+- (id) initWithTokenSource:(id<TokenSource>)theTokenSource Channel:(NSUInteger)aChannel;
+
+- (void) consume;
+- (id<Token>) LB:(NSInteger)k;
+- (id<Token>) LT:(NSInteger)k;
+
+- (NSInteger) skipOffTokenChannels:(NSInteger) i;
+- (NSInteger) skipOffTokenChannelsReverse:(NSInteger) i;
+
+- (void)setup;
+- (void)reset;
+
+- (NSInteger) getNumberOfOnChannelTokens;
+
+// - (id<TokenSource>) getTokenSource;
+- (void) setTokenSource: (id<TokenSource>) aTokenSource;
+
+- (NSUInteger)channel;
+- (void)setChannel:(NSUInteger)aChannel;
+
+- (AMutableDictionary *)channelOverride;
+- (void)setChannelOverride:(AMutableDictionary *)anOverride;
+
+- (id) copyWithZone:(NSZone *)aZone;
+
+#ifdef DONTUSENOMO
+- (NSArray *) tokensInRange:(NSRange)aRange;
+- (NSArray *) tokensInRange:(NSRange)aRange inBitSet:(ANTLRBitSet *)aBitSet;
+- (NSArray *) tokensInRange:(NSRange)aRange withTypes:(NSArray *)tokenTypes;
+- (NSArray *) tokensInRange:(NSRange)aRange withType:(NSInteger)tokenType;
+
+- (id<Token>) getToken:(NSInteger)i;
+
+- (NSInteger) size;
+- (void) rewind;
+- (void) rewind:(NSInteger)marker;
+- (void) seek:(NSInteger)index;
+
+- (NSString *) toString;
+- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex;
+- (NSString *) toStringFromToken:(id<Token>)startToken ToToken:(id<Token>)stopToken;
+
+#endif
+
+@end
diff --git a/runtime/ObjC/Framework/CommonTokenStream.m b/runtime/ObjC/Framework/CommonTokenStream.m
new file mode 100644
index 0000000..53aac0e
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonTokenStream.m
@@ -0,0 +1,358 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "Token.h"
+#import "CommonTokenStream.h"
+
+
+@implementation CommonTokenStream
+
+@synthesize channelOverride;
+@synthesize channel;
+
+#pragma mark Initialization
+
++ (CommonTokenStream *)newCommonTokenStream
+{
+    return [[CommonTokenStream alloc] init];
+}
+
++ (CommonTokenStream *)newCommonTokenStreamWithTokenSource:(id<TokenSource>)theTokenSource
+{
+    return [[CommonTokenStream alloc] initWithTokenSource:(id<TokenSource>)theTokenSource];
+}
+
++ (CommonTokenStream *)newCommonTokenStreamWithTokenSource:(id<TokenSource>)theTokenSource Channel:(NSUInteger)aChannel
+{
+    return [[CommonTokenStream alloc] initWithTokenSource:(id<TokenSource>)theTokenSource Channel:aChannel];
+}
+
+- (id) init
+{
+	if ((self = [super init]) != nil) {
+		channelOverride = [[AMutableDictionary dictionaryWithCapacity:100] retain];
+		channel = TokenChannelDefault;
+	}
+	return self;
+}
+
+- (id) initWithTokenSource:(id<TokenSource>)theTokenSource
+{
+	if ((self = [super initWithTokenSource:theTokenSource]) != nil) {
+		channelOverride = [[AMutableDictionary dictionaryWithCapacity:100] retain];
+		channel = TokenChannelDefault;
+	}
+	return self;
+}
+
+- (id) initWithTokenSource:(id<TokenSource>)theTokenSource Channel:(NSUInteger)aChannel
+{
+	if ((self = [super initWithTokenSource:theTokenSource]) != nil) {
+		channelOverride = [[AMutableDictionary dictionaryWithCapacity:100] retain];
+		channel = aChannel;
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in CommonTokenStream" );
+#endif
+	if ( channelOverride ) [channelOverride release];
+	if ( tokens ) [tokens release];
+	[self setTokenSource:nil];
+	[super dealloc];
+}
+
+/** Always leave index on an on-channel token. */
+- (void) consume
+{
+    if (index == -1) [self setup];
+    index++;
+    [self sync:index];
+    while ( ((CommonToken *)[tokens objectAtIndex:index]).channel != channel ) {
+		index++;
+		[self sync:index];
+	}
+}
+
+#pragma mark Lookahead
+
+- (id<Token>) LB:(NSInteger)k
+{
+	if ( k == 0 || (index-k) < 0 ) {
+		return nil;
+	}
+	int i = index;
+	int n = 1;
+    // find k good tokens looking backwards
+	while ( n <= k ) {
+		i = [self skipOffTokenChannelsReverse:i-1];
+		n++;
+	}
+	if ( i < 0 ) {
+		return nil;
+	}
+	return [tokens objectAtIndex:i];
+}
+
+- (id<Token>) LT:(NSInteger)k
+{
+	if ( index == -1 ) [self setup];
+	if ( k == 0 ) return nil;
+	if ( k < 0 ) return [self LB:-k];
+	int i = index;
+	int n = 1;
+	while ( n < k ) {
+		i = [self skipOffTokenChannels:i+1];
+		n++;
+	}
+//	if ( i >= (NSInteger)[tokens count] ) {
+//		return [CommonToken eofToken];
+//	}
+    if ( i > range ) range = i;
+	return [tokens objectAtIndex:i];
+}
+
+#pragma mark Channels & Skipping
+
+- (NSInteger) skipOffTokenChannels:(NSInteger) idx
+{
+    [self sync:idx];
+	while ( ((CommonToken *)[tokens objectAtIndex:idx]).channel != channel ) {
+		idx++;
+        [self sync:idx];
+	}
+	return idx;
+}
+
+- (NSInteger) skipOffTokenChannelsReverse:(NSInteger) i
+{
+	while ( i >= 0 && ((CommonToken *)[tokens objectAtIndex:i]).channel != channel ) {
+		i--;
+	}
+	return i;
+}
+
+- (void) reset
+{
+    [super reset];
+    index = [self skipOffTokenChannels:0];
+}
+
+- (void) setup
+{
+    index = 0;
+    [self sync:0];
+    int i = 0;
+    while ( ((CommonToken *)[tokens objectAtIndex:i]).channel != channel ) {
+        i++;
+        [self sync:i];
+    }
+	// leave index pointing at first token on channel
+    index = i;
+}
+
+- (NSInteger) getNumberOfOnChannelTokens
+{
+    NSInteger n = 0;
+    [self fill];
+    for( int i = 0; i < [tokens count]; i++ ) {
+        CommonToken *t = [tokens objectAtIndex:i];
+        if ( t.channel == channel )
+            n++;
+        if ( t.type == TokenTypeEOF )
+            break;
+    }
+    return n;
+}
+
+/** Reset this token stream by setting its token source. */
+- (void) setTokenSource:(id<TokenSource>)aTokenSource
+{
+    [super setTokenSource:aTokenSource];
+    channel = TokenChannelDefault;
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    CommonTokenStream *copy;
+	
+    //    copy = [[[self class] allocWithZone:aZone] init];
+    copy = [super copyWithZone:aZone]; // allocation occurs in BaseTree
+    if ( self.channelOverride )
+        copy.channelOverride = [channelOverride copyWithZone:aZone];
+    copy.channel = channel;
+    return copy;
+}
+
+- (NSUInteger)channel
+{
+    return channel;
+}
+
+- (void)setChannel:(NSUInteger)aChannel
+{
+    channel = aChannel;
+}
+
+- (AMutableDictionary *)channelOverride
+{
+    return channelOverride;
+}
+
+- (void)setChannelOverride:(AMutableDictionary *)anOverride
+{
+    channelOverride = anOverride;
+}
+
+#ifdef DONTUSENOMO
+#pragma mark Token access
+
+- (NSArray *) tokensInRange:(NSRange)aRange
+{
+	return [tokens subarrayWithRange:aRange];
+}
+
+#pragma mark Accessors
+
+- (id<TokenSource>) getTokenSource
+{
+    return tokenSource; 
+}
+
+- (NSArray *) tokensInRange:(NSRange)aRange inBitSet:(ANTLRBitSet *)aBitSet
+{
+	unsigned int startIndex = aRange.location;
+	unsigned int stopIndex = aRange.location+aRange.length;
+	if ( index == -1 ) {
+		[self setup];
+	}
+	if (stopIndex >= [tokens count]) {
+		stopIndex = [tokens count] - 1;
+	}
+	AMutableArray *filteredTokens = [AMutableArray arrayWithCapacity:100];
+	unsigned int i=0;
+	for (i = startIndex; i<=stopIndex; i++) {
+		id<Token> token = [tokens objectAtIndex:i];
+		if (aBitSet == nil || [aBitSet member:token.type]) {
+			[filteredTokens addObject:token];
+		}
+	}
+	if ([filteredTokens count]) {
+		return filteredTokens;
+	} else {
+		[filteredTokens release];
+		return nil;
+	}
+}
+
+- (NSArray *) tokensInRange:(NSRange)aRange withTypes:(NSArray *)tokenTypes
+{
+	ANTLRBitSet *bits = [[ANTLRBitSet alloc] initWithArrayOfBits:tokenTypes];
+	NSArray *returnTokens = [[self tokensInRange:aRange inBitSet:bits] retain];
+	[bits release];
+	return returnTokens;
+}
+
+- (NSArray *) tokensInRange:(NSRange)aRange withType:(NSInteger)tokenType
+{
+	ANTLRBitSet *bits = [[ANTLRBitSet alloc] init];
+	[bits add:tokenType];
+	NSArray *returnTokens = [[self tokensInRange:aRange inBitSet:bits] retain];
+	[bits release];
+	return returnTokens;
+}
+
+- (id<Token>) getToken:(NSInteger)i
+{
+	return [tokens objectAtIndex:i];
+}
+
+- (NSInteger) size
+{
+	return [tokens count];
+}
+
+- (void) rewind
+{
+	[self seek:lastMarker];
+}
+
+- (void) rewind:(NSInteger)marker
+{
+	[self seek:marker];
+}
+
+- (void) seek:(NSInteger)anIndex
+{
+	index = anIndex;
+}
+#pragma mark toString routines
+
+- (NSString *) toString
+{
+	if ( index == -1 ) {
+		[self setup];
+	}
+	return [self toStringFromStart:0 ToEnd:[tokens count]];
+}
+
+- (NSString *) toStringFromStart:(NSInteger)startIdx ToEnd:(NSInteger) stopIdx
+{
+    NSMutableString *stringBuffer;
+    id<Token> t;
+
+    if ( startIdx < 0 || stopIdx < 0 ) {
+        return nil;
+    }
+    if ( index == -1 ) {
+        [self setup];
+    }
+    if ( stopIdx >= [tokens count] ) {
+        stopIdx = [tokens count]-1;
+    }
+    stringBuffer = [NSMutableString stringWithCapacity:30];
+    for (int i = startIdx; i <= stopIdx; i++) {
+        t = (id<Token>)[tokens objectAtIndex:i];
+        [stringBuffer appendString:[t text]];
+    }
+    return stringBuffer;
+}
+
+- (NSString *) toStringFromToken:(id<Token>)startToken ToToken:(id<Token>)stopToken
+{
+	if (startToken && stopToken) {
+		int startIdx = [startToken getTokenIndex];
+		int stopIdx = [stopToken getTokenIndex];
+		return [self toStringFromStart:startIdx ToEnd:stopIdx];
+	}
+	return nil;
+}
+#endif
+
+@end
diff --git a/runtime/ObjC/Framework/CommonTree.h b/runtime/ObjC/Framework/CommonTree.h
new file mode 100644
index 0000000..a5117d3
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonTree.h
@@ -0,0 +1,92 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "CommonToken.h"
+#import "BaseTree.h"
+
+@interface CommonTree : BaseTree <Tree> {
+	__strong CommonToken *token;
+	NSInteger startIndex;
+	NSInteger stopIndex;
+    __strong CommonTree *parent;
+    NSInteger childIndex;
+}
+
++ (CommonTree *) invalidNode;
++ (CommonTree *) newTree;
++ (CommonTree *) newTreeWithTree:(CommonTree *)aTree;
++ (CommonTree *) newTreeWithToken:(CommonToken *)aToken;
++ (CommonTree *) newTreeWithTokenType:(NSInteger)tokenType;
++ (CommonTree *) newTreeWithTokenType:(NSInteger)aTType Text:(NSString *)theText;
+
+- (id) init;
+- (id) initWithTreeNode:(CommonTree *)aNode;
+- (id) initWithToken:(CommonToken *)aToken;
+- (id) initWithTokenType:(NSInteger)aTokenType;
+- (id) initWithTokenType:(NSInteger)aTokenType Text:(NSString *)theText;
+
+- (id<BaseTree>) copyWithZone:(NSZone *)aZone;
+
+- (BOOL) isNil;
+
+- (CommonToken *) getToken;
+- (void) setToken:(CommonToken *)aToken;
+- (CommonToken *) dupNode;
+- (NSInteger)type;
+- (NSString *)text;
+- (NSUInteger)line;
+- (void) setLine:(NSUInteger)aLine;
+- (NSUInteger)charPositionInLine;
+- (void) setCharPositionInLine:(NSUInteger)pos;
+- (CommonTree *) getParent;
+- (void) setParent:(CommonTree *) t;
+
+#ifdef DONTUSENOMO
+- (NSString *) treeDescription;
+#endif
+- (NSString *) description;
+- (void) setUnknownTokenBoundaries;
+- (NSInteger) getTokenStartIndex;
+- (void) setTokenStartIndex: (NSInteger) aStartIndex;
+- (NSInteger) getTokenStopIndex;
+- (void) setTokenStopIndex: (NSInteger) aStopIndex;
+
+/*
+ @property (retain, getter=getCommonToken, setter=setCommonToken:) CommonToken *token;
+ @property (assign, getter=getTokenStartIndex, setter=setTokenStartIndex:) NSInteger startIndex;
+ @property (assign, getter=getTokenStopIndex, setter=setTokenStopIndex:) NSInteger stopIndex;
+ @property (retain, getter=getParent, setter=setParent:) id<BaseTree> parentparent;
+ @property (assign, getter=getChildIndex, setter=setChildIndex:) NSInteger childIndex;
+ */
+
+@property (retain) CommonToken *token;
+@property (assign) NSInteger startIndex;
+@property (assign) NSInteger stopIndex;
+@property (retain) CommonTree *parent;
+@property (assign) NSInteger childIndex;
+
+@end
diff --git a/runtime/ObjC/Framework/CommonTree.m b/runtime/ObjC/Framework/CommonTree.m
new file mode 100644
index 0000000..062fa20
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonTree.m
@@ -0,0 +1,345 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "CommonTree.h"
+
+
+@implementation CommonTree
+
++ (CommonTree *)INVALID_NODE
+{
+	return [[CommonTree alloc] initWithToken:[CommonToken invalidToken]];
+}
+
++ (CommonTree *)invalidNode
+{
+    // Had to cast to CommonTree * here, because GCC is dumb.
+	return [[CommonTree alloc] initWithToken:CommonToken.INVALID_TOKEN];
+}
+
++ (CommonTree *)newTree
+{
+    return [[CommonTree alloc] init];
+}
+
++ (CommonTree *)newTreeWithTree:(CommonTree *)aTree
+{
+    return [[CommonTree alloc] initWithTreeNode:aTree];
+}
+
++ (CommonTree *)newTreeWithToken:(id<Token>)aToken
+{
+	return [[CommonTree alloc] initWithToken:aToken];
+}
+
++ (CommonTree *)newTreeWithTokenType:(NSInteger)aTType
+{
+	return [[CommonTree alloc] initWithTokenType:(NSInteger)aTType];
+}
+
++ (CommonTree *)newTreeWithTokenType:(NSInteger)aTType Text:(NSString *)theText
+{
+	return [[CommonTree alloc] initWithTokenType:(NSInteger)aTType Text:theText];
+}
+
+- (id)init
+{
+	self = (CommonTree *)[super init];
+	if ( self != nil ) {
+        token = nil;
+		startIndex = -1;
+		stopIndex = -1;
+        parent = nil;
+        childIndex = -1;
+	}
+	return (CommonTree *)self;
+}
+
+- (id)initWithTreeNode:(CommonTree *)aNode
+{
+	self = (CommonTree *)[super init];
+	if ( self != nil ) {
+		token = aNode.token;
+        if ( token ) [token retain];
+		startIndex = aNode.startIndex;
+		stopIndex = aNode.stopIndex;
+        parent = nil;
+        childIndex = -1;
+	}
+	return self;
+}
+
+- (id)initWithToken:(id<Token>)aToken
+{
+	self = (CommonTree *)[super init];
+	if ( self != nil ) {
+		token = aToken;
+        if ( token ) [token retain];
+		startIndex = -1;
+		stopIndex = -1;
+        parent = nil;
+        childIndex = -1;
+	}
+	return self;
+}
+
+- (id)initWithTokenType:(NSInteger)aTokenType
+{
+	self = (CommonTree *)[super init];
+	if ( self != nil ) {
+		token = [[CommonToken newToken:aTokenType] retain];
+//		startIndex = token.startIndex;
+		startIndex = -1;
+//		stopIndex = token.stopIndex;
+		stopIndex = -1;
+        parent = nil;
+        childIndex = -1;
+	}
+	return self;
+}
+
+- (id) initWithTokenType:(NSInteger)aTokenType Text:(NSString *)theText
+{
+	self = (CommonTree *)[super init];
+	if ( self != nil ) {
+		token = [[CommonToken newToken:aTokenType Text:theText] retain];
+//		startIndex = token.startIndex;
+		startIndex = -1;
+//		stopIndex = token.stopIndex;
+		stopIndex = -1;
+        parent = nil;
+        childIndex = -1;
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+    if ( token ) {
+        [token release];
+        token = nil;
+    }
+    if ( parent ) {
+        [parent release];
+        parent = nil;
+    }
+	[super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    CommonTree *copy;
+	
+    //    copy = [[[self class] allocWithZone:aZone] init];
+    copy = [super copyWithZone:aZone]; // allocation occurs in BaseTree
+    if ( self.token )
+        copy.token = [self.token copyWithZone:aZone];
+    copy.startIndex = startIndex;
+    copy.stopIndex = stopIndex;
+    copy.parent = (CommonTree *)[self.parent copyWithZone:aZone];
+    copy.childIndex = childIndex;
+    return copy;
+}
+
+- (BOOL) isNil
+{
+	return token == nil;
+}
+
+- (CommonToken *) getToken
+{
+	return token;
+}
+
+- (void) setToken:(CommonToken *) aToken
+{
+	if ( token != aToken ) {
+		if ( token ) [token release];
+		[aToken retain];
+		token = aToken;
+	}
+}
+
+- (CommonTree *) dupNode
+{
+    return [CommonTree newTreeWithTree:self ];
+}
+
+- (NSInteger)type
+{
+	if (token)
+		return token.type;
+	return TokenTypeInvalid;
+}
+
+- (NSString *)text
+{
+	if (token)
+		return token.text;
+	return nil;
+}
+
+- (NSUInteger)line
+{
+	if (token)
+		return token.line;
+	return 0;
+}
+
+- (void) setLine:(NSUInteger)aLine
+{
+    if (token)
+        token.line = aLine;
+}
+
+- (NSUInteger)charPositionInLine
+{
+	if (token)
+		return token.charPositionInLine;
+	return 0;
+}
+
+- (void) setCharPositionInLine:(NSUInteger)pos
+{
+    if (token)
+        token.charPositionInLine = pos;
+}
+
+- (NSInteger) getTokenStartIndex
+{
+	if ( startIndex == -1 && token != nil ) {
+		return [token getTokenIndex];
+	}
+    return startIndex;
+}
+
+- (void) setTokenStartIndex: (NSInteger) aStartIndex
+{
+    startIndex = aStartIndex;
+}
+
+- (NSInteger) getTokenStopIndex
+{
+	if ( stopIndex == -1 && token != nil ) {
+		return [token getTokenIndex];
+	}
+    return stopIndex;
+}
+
+- (void) setTokenStopIndex: (NSInteger) aStopIndex
+{
+    stopIndex = aStopIndex;
+}
+
+#ifdef DONTUSENOMO
+- (NSString *) treeDescription
+{
+	if (children) {
+		NSMutableString *desc = [NSMutableString stringWithString:@"(^"];
+		[desc appendString:[self description]];
+		unsigned int childIdx;
+		for (childIdx = 0; childIdx < [children count]; childIdx++) {
+			[desc appendFormat:@"%@", [[children objectAtIndex:childIdx] treeDescription]];
+		}
+		[desc appendString:@")"];
+		return desc;
+	} else {
+		return [self description];
+	}
+}
+#endif
+
+/** For every node in this subtree, make sure it's start/stop token's
+ *  are set.  Walk depth first, visit bottom up.  Only updates nodes
+ *  with at least one token index < 0.
+ */
+- (void) setUnknownTokenBoundaries
+{
+    if ( children == nil ) {
+        if ( startIndex < 0 || stopIndex < 0 ) {
+            startIndex = stopIndex = [token getTokenIndex];
+        }
+        return;
+    }
+    for (NSUInteger i=0; i < [children count]; i++) {
+        [[children objectAtIndex:i] setUnknownTokenBoundaries];
+    }
+    if ( startIndex >= 0 && stopIndex >= 0 )
+         return; // already set
+    if ( [children count] > 0 ) {
+        CommonTree *firstChild = (CommonTree *)[children objectAtIndex:0];
+        CommonTree *lastChild = (CommonTree *)[children objectAtIndex:[children count]-1];
+        startIndex = [firstChild getTokenStartIndex];
+        stopIndex = [lastChild getTokenStopIndex];
+    }
+}
+
+- (NSInteger) getChildIndex
+{
+    return childIndex;
+}
+
+- (CommonTree *) getParent
+{
+    return parent;
+}
+
+- (void) setParent:(CommonTree *) t
+{
+    parent = t;
+}
+
+- (void) setChildIndex:(NSInteger) anIndex
+{
+    childIndex = anIndex;
+}
+
+- (NSString *) description
+{
+    if ( [self isNil] ) {
+        return @"nil";
+    }
+    if ( [self type] == TokenTypeInvalid ) {
+        return @"<errornode>";
+    }
+    if ( token==nil ) {
+        return nil;
+    }
+    return token.text;
+}
+
+- (NSString *) toString
+{
+    return [self description];
+}
+
+@synthesize token;
+@synthesize startIndex;
+@synthesize stopIndex;
+@synthesize parent;
+@synthesize childIndex;
+
+@end
diff --git a/runtime/ObjC/Framework/CommonTreeAdaptor.h b/runtime/ObjC/Framework/CommonTreeAdaptor.h
new file mode 100644
index 0000000..8d775e8
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonTreeAdaptor.h
@@ -0,0 +1,65 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+#import "Tree.h"
+#import "CommonToken.h"
+#import "CommonTree.h"
+#import "BaseTreeAdaptor.h"
+
+@interface CommonTreeAdaptor : BaseTreeAdaptor {
+}
+
++ (CommonTree *) newEmptyTree;
++ (CommonTreeAdaptor *)newTreeAdaptor;
+- (id) init;
+- (CommonTree *)dupNode:(CommonTree *)t;   
+
+- (CommonTree *) create:(id<Token>) payload;
+//- (CommonTree *) createTree:(NSInteger)tokenType fromToken:(CommonToken *)aToken;
+//- (CommonTree *) createTree:(NSInteger)tokenType fromToken:(CommonToken *)aToken Text:(NSString *)text;
+- (id<Token>)createToken:(NSInteger)tokenType Text:(NSString *)text;
+- (id<Token>)createToken:(id<Token>)fromToken;
+- (void) setTokenBoundaries:(CommonTree *)t From:(id<Token>)startToken To:(id<Token>)stopToken;
+- (NSInteger)getTokenStartIndex:(CommonTree *)t;
+- (NSInteger)getTokenStopIndex:(CommonTree *)t;
+- (NSString *)getText:(CommonTree *)t;
+- (void)setText:(CommonTree *)t Text:(NSString *)text;
+- (NSInteger)getType:(CommonTree *)t;
+- (void) setType:(CommonTree *)t Type:(NSInteger)tokenType;
+- (id<Token>)getToken:(CommonTree *)t;
+- (CommonTree *)getChild:(CommonTree *)t At:(NSInteger)i;
+- (void) setChild:(CommonTree *)t At:(NSInteger)i Child:(CommonTree *)child;
+- (NSInteger)getChildCount:(CommonTree *)t;
+- (CommonTree *)getParent:(CommonTree *)t;
+- (void)setParent:(CommonTree *)t With:(CommonTree *)parent;
+- (NSInteger)getChildIndex:(CommonTree *)t;
+- (void)setChildIndex:(CommonTree *)t With:(NSInteger)index;
+- (void)replaceChildren:(CommonTree *)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(CommonTree *)t;
+- (id)copyWithZone:(NSZone *)zone;
+
+@end
diff --git a/runtime/ObjC/Framework/CommonTreeAdaptor.m b/runtime/ObjC/Framework/CommonTreeAdaptor.m
new file mode 100644
index 0000000..b88dbb0
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonTreeAdaptor.m
@@ -0,0 +1,240 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "CommonTreeAdaptor.h"
+
+@implementation CommonTreeAdaptor
+
++ (CommonTree *) newEmptyTree;
+{
+    return [CommonTree newTree];
+}
+
++ (CommonTreeAdaptor *)newTreeAdaptor
+{
+    return[[CommonTreeAdaptor alloc] init];
+}
+
+- (id) init
+{
+    self = [super init];
+    if (self) {
+    }
+    return self;
+}
+
+/** Duplicate a node.  This is part of the factory;
+ *	override if you want another kind of node to be built.
+ *
+ *  I could use reflection to prevent having to override this
+ *  but reflection is slow.
+ */
+- (id) dupNode:(id<BaseTree>)t
+{
+    if ( t==nil )
+        return nil;
+    return [CommonTree newTree:t];
+}
+
+/** Tell me how to create a token for use with imaginary token nodes.
+ *  For example, there is probably no input symbol associated with imaginary
+ *  token DECL, but you need to create it as a payload or whatever for
+ *  the DECL node as in ^(DECL type ID).
+ *
+ *  This is a variant of createToken where the new token is derived from
+ *  an actual real input token.  Typically this is for converting '{'
+ *  tokens to BLOCK etc...  You'll see
+ *
+ *    r : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
+ *
+ *  If you care what the token payload objects' type is, you should
+ *  override this method and any other createToken variant.
+ */
+- (CommonTree *) create:(CommonToken *)aToken
+{
+    return [CommonTree newTreeWithToken:aToken];
+}
+
+/** Tell me how to create a token for use with imaginary token nodes.
+ *  For example, there is probably no input symbol associated with imaginary
+ *  token DECL, but you need to create it as a payload or whatever for
+ *  the DECL node as in ^(DECL type ID).
+ *
+ *  If you care what the token payload objects' type is, you should
+ *  override this method and any other createToken variant.
+ */
+- (CommonTree *)createTree:(NSInteger)tokenType Text:(NSString *)text
+{
+    return [CommonTree newTreeWithTokenType:tokenType Text:text];
+}
+
+- (id<Token>)createToken:(NSInteger)tokenType Text:(NSString *)text
+{
+    id<Token> fromToken = [CommonToken newToken:tokenType Text:text];
+    return fromToken;
+}
+
+- (id<Token>)createToken:(id<Token>)fromToken
+{
+    return [CommonToken newTokenWithToken:(CommonToken *)fromToken];
+}
+
+/** Track start/stop token for subtree root created for a rule.
+ *  Only works with Tree nodes.  For rules that match nothing,
+ *  seems like this will yield start=i and stop=i-1 in a nil node.
+ *  Might be useful info so I'll not force to be i..i.
+ */
+- (void) setTokenBoundaries:(id<BaseTree>)aTree From:(id<Token>)startToken To:(id<Token>)stopToken
+{
+    if ( aTree == nil )
+        return;
+    int startTokIdx = 0;
+    int stopTokIdx = 0;
+    if ( startToken != nil )
+        startTokIdx = [startToken getTokenIndex];
+    if ( stopToken != nil )
+        stopTokIdx = [stopToken getTokenIndex];
+    [(id<BaseTree>)aTree setTokenStartIndex:startTokIdx];
+    [(id<BaseTree>)aTree setTokenStopIndex:stopTokIdx];
+}
+
+- (NSInteger)getTokenStartIndex:(id<BaseTree>) t
+{
+    if ( t == nil )
+        return -1;
+    return [(id<BaseTree>)t getTokenStartIndex];
+}
+
+- (NSInteger)getTokenStopIndex:(id<BaseTree>) t
+{
+    if ( t == nil )
+        return -1;
+    return [(id<BaseTree>)t getTokenStopIndex];
+}
+
+- (NSString *)getText:(CommonTree *)t
+{
+    if ( t == nil )
+        return nil;
+    return t.token.text;
+}
+
+- (void)setText:(id<BaseTree>)t Text:(NSString *)text
+{
+    if ( t == nil )
+        return;
+}
+
+- (NSInteger)getType:(CommonTree *)t
+{
+    if ( t==nil )
+        return TokenTypeInvalid;
+    return t.token.type;
+}
+
+- (void) setType:(id<BaseTree>)t Type:(NSInteger)tokenType
+{
+    if ( t==nil )
+        return;
+}
+
+/** What is the Token associated with this node?  If
+ *  you are not using CommonTree, then you must
+ *  override this in your own adaptor.
+ */
+- (id<Token>) getToken:(CommonTree *) t
+{
+    if ( [t isKindOfClass:[CommonTree class]] ) {
+        return t.token;
+    }
+    return nil; // no idea what to do
+}
+
+- (id<BaseTree>) getChild:(id<BaseTree>)t At:(NSInteger)i
+{
+    if ( t == nil )
+        return nil;
+    return [(id<BaseTree>)t getChild:i];
+}
+
+- (void) setChild:(id<BaseTree>)t At:(NSInteger)i Child:(id<BaseTree>)child
+{
+    if ( t == nil )
+        return;
+    [(id<BaseTree>)t setChild:i With:child];
+}
+
+- (id) deleteChild:(id<BaseTree>)t Index:(NSInteger)anIndex
+{
+    return [t deleteChild:anIndex];
+}
+
+- (NSInteger) getChildCount:(id<BaseTree>) t
+{
+    if ( t == nil )
+        return 0;
+    return [(id<BaseTree>) t getChildCount];
+}
+
+- (id<BaseTree>) getParent:(id<BaseTree>) t
+{
+    if ( t == nil )
+        return nil;
+    return (id<BaseTree>)[t getParent];
+}
+
+- (void) setParent:(id<BaseTree>)t With:(id<BaseTree>) parent
+{
+    if ( t != nil )
+        [(id<BaseTree>) t setParent:(id<BaseTree>)parent];
+}
+
+- (NSInteger) getChildIndex:(id<BaseTree>) t
+{
+    if ( t == nil )
+        return 0;
+    return [(id<BaseTree>) t getChildIndex];
+}
+
+- (void) setChildIndex:(id<BaseTree>)t With:(NSInteger)anIndex
+{
+    if ( t!=nil )
+        [(id<BaseTree>)t setChildIndex:anIndex];
+}
+
+- (void) replaceChildren:(id<BaseTree>)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id<BaseTree>)t
+{
+    if ( parent != nil ) {
+        [(id<BaseTree>)parent replaceChildrenFrom:startChildIndex To:stopChildIndex With:t];
+    }
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    return [[[self class] allocWithZone:aZone] init];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/CommonTreeNodeStream.h b/runtime/ObjC/Framework/CommonTreeNodeStream.h
new file mode 100644
index 0000000..be5371f
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonTreeNodeStream.h
@@ -0,0 +1,120 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "CommonTree.h"
+#import "CommonTreeNodeStream.h"
+#import "LookaheadStream.h"
+#import "TreeNodeStream.h"
+#import "TreeIterator.h"
+#import "IntArray.h"
+
+@interface CommonTreeNodeStream : LookaheadStream <TreeNodeStream> {
+#define DEFAULT_INITIAL_BUFFER_SIZE 100
+#define INITIAL_CALL_STACK_SIZE 10
+    
+/** Pull nodes from which tree? */
+__strong id root;
+    
+/** If this tree (root) was created from a token stream, track it. */
+__strong id <TokenStream> tokens;
+    
+	/** What tree adaptor was used to build these trees */
+__strong CommonTreeAdaptor *adaptor;
+    
+/** The tree iterator we using */
+__strong TreeIterator *it;
+    
+/** Stack of indexes used for push/pop calls */
+__strong IntArray *calls;    
+    
+/** Tree (nil A B C) trees like flat A B C streams */
+BOOL hasNilRoot;
+    
+/** Tracks tree depth.  Level=0 means we're at root node level. */
+NSInteger level;
+}
+@property (retain, getter=getRoot, setter=setRoot:) CommonTree *root;
+@property (retain, getter=getTokens,setter=setTokens:) id<TokenStream> tokens;
+@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) CommonTreeAdaptor *adaptor;
+@property (assign, getter=getLevel, setter=setLevel:) NSInteger level;
+
++ (CommonTreeNodeStream *) newCommonTreeNodeStream:(CommonTree *)theTree;
++ (CommonTreeNodeStream *) newCommonTreeNodeStream:(id<TreeAdaptor>)anAdaptor Tree:(CommonTree *)theTree;
+
+- (id) initWithTree:(CommonTree *)theTree;
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)adaptor Tree:(CommonTree *)theTree;
+    
+- (void) reset;
+    
+    /** Pull elements from tree iterator.  Track tree level 0..max_level.
+     *  If nil rooted tree, don't give initial nil and DOWN nor final UP.
+     */
+- (id) nextElement;
+    
+- (BOOL) isEOF:(id<BaseTree>) obj;
+- (void) setUniqueNavigationNodes:(BOOL) uniqueNavigationNodes;
+    
+- (id) getTreeSource;
+    
+- (NSString *) getSourceName;
+    
+- (id<TokenStream>) getTokenStream;
+    
+- (void) setTokenStream:(id<TokenStream>) tokens;
+    
+- (CommonTreeAdaptor *) getTreeAdaptor;
+    
+- (void) setTreeAdaptor:(CommonTreeAdaptor *) adaptor;
+    
+- (CommonTree *)get:(NSInteger) i;
+
+- (NSInteger) LA:(NSInteger) i;
+    
+    /** Make stream jump to a new location, saving old location.
+     *  Switch back with pop().
+     */
+- (void) push:(NSInteger) index;
+    
+    /** Seek back to previous index saved during last push() call.
+     *  Return top of stack (return index).
+     */
+- (NSInteger) pop;
+    
+// TREE REWRITE INTERFACE
+    
+- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
+    
+- (NSString *) toStringFromNode:(id<BaseTree>)startNode ToNode:(id<BaseTree>)stopNode;
+
+/** For debugging; destructive: moves tree iterator to end. */
+- (NSString *) toTokenTypeString;
+
+@property (retain) TreeIterator *it;
+@property (retain) IntArray *calls;
+@property BOOL hasNilRoot;
+@end
diff --git a/runtime/ObjC/Framework/CommonTreeNodeStream.m b/runtime/ObjC/Framework/CommonTreeNodeStream.m
new file mode 100644
index 0000000..b195f5d
--- /dev/null
+++ b/runtime/ObjC/Framework/CommonTreeNodeStream.m
@@ -0,0 +1,249 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "CommonTreeNodeStream.h"
+#import "TokenStream.h"
+#import "IntStream.h"
+#import "CharStream.h"
+#import "AMutableArray.h"
+#import "CommonTreeAdaptor.h"
+
+#ifndef DEBUG_DEALLOC
+#define DEBUG_DEALLOC
+#endif
+
+@implementation CommonTreeNodeStream
+
+@synthesize root;
+@synthesize tokens;
+@synthesize adaptor;
+@synthesize level;
+
++ (CommonTreeNodeStream *) newCommonTreeNodeStream:(CommonTree *)theTree
+{
+    return [[CommonTreeNodeStream alloc] initWithTree:theTree];
+}
+
++ (CommonTreeNodeStream *) newCommonTreeNodeStream:(id<TreeAdaptor>)anAdaptor Tree:(CommonTree *)theTree
+{
+    return [[CommonTreeNodeStream alloc] initWithTreeAdaptor:anAdaptor Tree:theTree];
+}
+
+- (id) initWithTree:(CommonTree *)theTree
+{
+    if ((self = [super init]) != nil ) {
+        adaptor = [[CommonTreeAdaptor newTreeAdaptor] retain];
+        root = [theTree retain];
+        navigationNodeEOF = [[adaptor createTree:TokenTypeEOF Text:@"EOF"] retain]; // set EOF
+        it = [[TreeIterator newANTRLTreeIteratorWithAdaptor:adaptor andTree:root] retain];
+        calls = [[IntArray newArrayWithLen:INITIAL_CALL_STACK_SIZE] retain];
+        /** Tree (nil A B C) trees like flat A B C streams */
+        hasNilRoot = NO;
+        level = 0;
+    }
+    return self;
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)anAdaptor Tree:(CommonTree *)theTree
+{
+    if ((self = [super init]) != nil ) {
+        adaptor = [anAdaptor retain];
+        root = [theTree retain];
+        navigationNodeEOF = [[adaptor createTree:TokenTypeEOF Text:@"EOF"] retain]; // set EOF
+        //    it = [root objectEnumerator];
+        it = [[TreeIterator newANTRLTreeIteratorWithAdaptor:adaptor andTree:root] retain];
+        calls = [[IntArray newArrayWithLen:INITIAL_CALL_STACK_SIZE] retain];
+        /** Tree (nil A B C) trees like flat A B C streams */
+        hasNilRoot = NO;
+        level = 0;
+    }
+    //    eof = [self isEOF]; // make sure tree iterator returns the EOF we want
+    return self;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in CommonTreeNodeStream" );
+#endif
+    if ( root ) [root release];
+    if ( tokens ) [tokens release];
+    if ( adaptor ) [adaptor release];
+    if ( it ) [it release];
+    if ( calls ) [calls release];    
+    [super dealloc];
+}
+
+- (void) reset
+{
+    [super reset];
+    [it reset];
+    hasNilRoot = false;
+    level = 0;
+    if ( calls != nil )
+        [calls reset];  // [calls clear]; // in Java
+}
+
+/** Pull elements from tree iterator.  Track tree level 0..max_level.
+ *  If nil rooted tree, don't give initial nil and DOWN nor final UP.
+ */
+- (id) nextElement
+{
+    id t = [it nextObject];
+    //System.out.println("pulled "+adaptor.getType(t));
+    if ( t == [it up] ) {
+        level--;
+        if ( level==0 && hasNilRoot ) return [it nextObject]; // don't give last UP; get EOF
+    }
+    else if ( t == [it down] )
+        level++;
+    if ( level == 0 && [adaptor isNil:t] ) { // if nil root, scarf nil, DOWN
+        hasNilRoot = true;
+        t = [it nextObject]; // t is now DOWN, so get first real node next
+        level++;
+        t = [it nextObject];
+    }
+    return t;
+}
+
+- (BOOL) isEOF:(id<BaseTree>) aTree
+{
+    return [adaptor getType:(CommonTree *)aTree] == TokenTypeEOF;
+}
+
+- (void) setUniqueNavigationNodes:(BOOL) uniqueNavigationNodes
+{
+}
+
+- (id) getTreeSource
+{
+    return root;
+}
+
+- (NSString *) getSourceName
+{
+    return [[self getTokenStream] getSourceName];
+}
+
+- (id<TokenStream>) getTokenStream
+{
+    return tokens;
+}
+
+- (void) setTokenStream:(id<TokenStream>)theTokens
+{
+    if ( tokens != theTokens ) {
+        if ( tokens ) [tokens release];
+        [theTokens retain];
+    }
+    tokens = theTokens;
+}
+
+- (CommonTreeAdaptor *) getTreeAdaptor
+{
+    return adaptor;
+}
+
+- (void) setTreeAdaptor:(CommonTreeAdaptor *) anAdaptor
+{
+    if ( adaptor != anAdaptor ) {
+        if ( adaptor ) [adaptor release];
+        [anAdaptor retain];
+    }
+    adaptor = anAdaptor;
+}
+
+- (CommonTree *)getNode:(NSInteger) i
+{
+    @throw [RuntimeException newException:@"Absolute node indexes are meaningless in an unbuffered stream"];
+    return nil;
+}
+
+- (NSInteger) LA:(NSInteger) i
+{
+    return [adaptor getType:[self LT:i]];
+}
+
+/** Make stream jump to a new location, saving old location.
+ *  Switch back with pop().
+ */
+- (void) push:(NSInteger) anIndex
+{
+    if ( calls == nil ) {
+        calls = [[IntArray newArrayWithLen:INITIAL_CALL_STACK_SIZE] retain];
+    }
+    [calls push:p]; // save current anIndex
+    [self seek:anIndex];
+}
+
+/** Seek back to previous anIndex saved during last push() call.
+ *  Return top of stack (return anIndex).
+ */
+- (NSInteger) pop
+{
+    int ret = [calls pop];
+    [self seek:ret];
+    return ret;
+}    
+
+// TREE REWRITE INTERFACE
+
+- (void) replaceChildren:(id) parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) aTree
+{
+    if ( parent != nil ) {
+        [adaptor replaceChildren:parent From:startChildIndex To:stopChildIndex With:aTree];
+    }
+}
+
+- (NSString *) toStringFromNode:(id<BaseTree>)startNode ToNode:(id<BaseTree>)stopNode
+{
+    // we'll have to walk from start to stop in tree; we're not keeping
+    // a complete node stream buffer
+    return @"n/a";
+}
+
+/** For debugging; destructive: moves tree iterator to end. */
+- (NSString *) toTokenTypeString
+{
+    [self reset];
+    NSMutableString *buf = [NSMutableString stringWithCapacity:5];
+    id obj = [self LT:1];
+    NSInteger type = [adaptor getType:obj];
+    while ( type != TokenTypeEOF ) {
+        [buf appendString:@" "];
+        [buf appendString:[NSString stringWithFormat:@"%d", type]];
+        [self consume];
+        obj = [self LT:1];
+        type = [adaptor getType:obj];
+    }
+    return buf;
+}
+
+@synthesize it;
+@synthesize calls;
+@synthesize hasNilRoot;
+@end
+
diff --git a/runtime/ObjC/Framework/DFA.h b/runtime/ObjC/Framework/DFA.h
new file mode 100644
index 0000000..508e1ff
--- /dev/null
+++ b/runtime/ObjC/Framework/DFA.h
@@ -0,0 +1,84 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "BaseRecognizer.h"
+#import "CharStream.h"
+#import "NoViableAltException.h"
+
+@interface DFA : NSObject {
+	// the tables are set by subclasses to their own static versions.
+	const NSInteger *eot;
+	const NSInteger *eof;
+	const unichar *min;
+	const unichar *max;
+	const NSInteger *accept;
+	const NSInteger *special;
+	const NSInteger **transition;
+	
+	__strong BaseRecognizer *recognizer;
+	NSInteger decisionNumber;
+    NSInteger len;
+}
+
+- (id) initWithRecognizer:(id) theRecognizer;
+// simulate the DFA using the static tables and predict an alternative
+- (NSInteger) predict:(id<CharStream>)anInput;
+- (void) noViableAlt:(NSInteger)state Stream:(id<IntStream>)anInput;
+
+- (NSInteger) specialStateTransition:(NSInteger)state Stream:(id<IntStream>)anInput;
+// - (NSInteger) specialStateTransition:(NSInteger) state;
+//- (unichar) specialTransition:(unichar) state symbol:(NSInteger) symbol;
+
+// hook for debugger support
+- (void) error:(NoViableAltException *)nvae;
+
+- (NSString *) description;
+- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment;
+
++ (void) setIsEmittingDebugInfo:(BOOL) shouldEmitDebugInfo;
+
+- (NSInteger *) unpackEncodedString:(NSString *)encodedString;
+- (short *) unpackEncodedStringToUnsignedChars:(NSString *)encodedString;
+- (NSInteger)getDecision;
+- (void)setDecision:(NSInteger)aDecison;
+
+- (BaseRecognizer *)getRecognizer;
+- (void)setRecognizer:(BaseRecognizer *)aRecognizer;
+- (NSInteger)length;
+
+@property const NSInteger *eot;
+@property const NSInteger *eof;
+@property const unichar *min;
+@property const unichar *max;
+@property const NSInteger *accept;
+@property const NSInteger *special;
+@property const NSInteger **transition;
+
+@property (retain, getter=getRecognizer,setter=setRecognizer:) BaseRecognizer *recognizer;
+@property (assign, getter=getDecision,setter=setDecision:) NSInteger decisionNumber;
+@property (assign, getter=getLen,setter=setLen:) NSInteger len;
+@end
diff --git a/runtime/ObjC/Framework/DFA.m b/runtime/ObjC/Framework/DFA.m
new file mode 100644
index 0000000..d0957e8
--- /dev/null
+++ b/runtime/ObjC/Framework/DFA.m
@@ -0,0 +1,262 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "DFA.h"
+#import <Token.h>
+#import <NoViableAltException.h>
+
+NSInteger debug = 0;
+
+@implementation DFA
+@synthesize recognizer;
+@synthesize decisionNumber;
+@synthesize len;
+
+- (id) initWithRecognizer:(BaseRecognizer *) theRecognizer
+{
+	if ((self = [super init]) != nil) {
+		recognizer = theRecognizer;
+        [recognizer retain];
+        debug = 0;
+	}
+	return self;
+}
+
+// using the tables ANTLR generates for the DFA based prediction this method simulates the DFA
+// and returns the prediction of the alternative to be used.
+- (NSInteger) predict:(id<IntStream>)input
+{
+    if ( debug > 2 ) {
+        NSLog(@"Enter DFA.predict for decision %d", decisionNumber);
+    }
+	int aMark = [input mark];
+	int s = 0;
+	@try {
+		while (YES) {
+			if ( debug > 2 )
+                NSLog(@"DFA %d state %d LA(1)='%c'(%x)", decisionNumber, s, (unichar)[input LA:1], [input LA:1]);
+			NSInteger specialState = special[s];
+			if (specialState >= 0) {
+				// this state is special in that it has some code associated with it. we cannot do this in a pure DFA so
+				// we signal the caller accordingly.
+				if ( debug > 2 ) {
+                    NSLog(@"DFA %d state %d is special state %d", decisionNumber, s, specialState);
+                }
+				s = [self specialStateTransition:specialState Stream:input];
+                if ( debug > 2 ) {
+                    NSLog(@"DFA %d returns from special state %d to %d", decisionNumber, specialState, s);
+                }
+                if (s == -1 ) {
+                    [self noViableAlt:s Stream:input];
+                    return 0;
+                }
+				[input consume];
+				continue;
+			}
+			if (accept[s] >= 1) {  // if this is an accepting state return the prediction
+				if ( debug > 2 ) NSLog(@"accept; predict %d from state %d", accept[s], s);
+				return accept[s];
+			}
+			// based on the lookahead lookup the next transition, consume and do transition
+			// or signal that we have no viable alternative
+			NSInteger c = [input LA:1];
+			if ( (unichar)c >= min[s] && (unichar)c <= max[s]) {
+				int snext = transition[s][c-min[s]];
+				if (snext < 0) {
+                    // was in range but not a normal transition
+                    // must check EOT, which is like the else clause.
+                    // eot[s]>=0 indicates that an EOT edge goes to another
+                    // state.
+					if (eot[s] >= 0) {
+						if ( debug > 2 ) NSLog(@"EOT transition");
+						s = eot[s];
+						[input consume];
+                        // TODO: I had this as return accept[eot[s]]
+                        // which assumed here that the EOT edge always
+                        // went to an accept...faster to do this, but
+                        // what about predicated edges coming from EOT
+                        // target?
+						continue;
+					}
+					[self noViableAlt:s Stream:input];
+					return 0;
+				}
+				s = snext;
+				[input consume];
+				continue;
+			}
+			
+			if (eot[s] >= 0) {// EOT transition? we may still accept the input in the next state
+				if ( debug > 2 ) NSLog(@"EOT transition");
+				s = eot[s];
+				[input consume];
+				continue;
+			}
+			if ( c == TokenTypeEOF && eof[s] >= 0) {  // we are at EOF and may even accept the input.
+				if ( debug > 2 ) NSLog(@"accept via EOF; predict %d from %d", accept[eof[s]], eof[s]);
+				return accept[eof[s]];
+			}
+			if ( debug > 2 ) {
+                NSLog(@"no viable alt!\n");
+                NSLog(@"min[%d] = %d\n", s, min[s]);
+                NSLog(@"max[%d] = %d\n", s, min[s]);
+                NSLog(@"eot[%d] = %d\n", s, min[s]);
+                NSLog(@"eof[%d] = %d\n", s, min[s]);
+                for (NSInteger p = 0; p < self.len; p++) {
+                    NSLog(@"%d ", transition[s][p]);
+                }
+                NSLog(@"\n");
+            }
+			[self noViableAlt:s Stream:input];
+            return 0;
+		}
+	}
+	@finally {
+		[input rewind:aMark];
+	}
+	return 0; // silence warning
+}
+
+- (void) noViableAlt:(NSInteger)state Stream:(id<IntStream>)anInput
+{
+	if ([recognizer.state isBacktracking]) {
+		[recognizer.state setFailed:YES];
+		return;
+	}
+	NoViableAltException *nvae = [NoViableAltException newException:decisionNumber state:state stream:anInput];
+	[self error:nvae];
+	@throw nvae;
+}
+
+- (NSInteger) specialStateTransition:(NSInteger)state Stream:(id<IntStream>)anInput
+{
+    @throw [NoViableAltException newException:-1 state:state stream:anInput];
+	return -1;
+}
+
+- (void) error:(NoViableAltException *)nvae
+{
+	// empty, hook for debugger support
+}
+
+- (NSString *) description
+{
+	return @"subclass responsibility";
+}
+
+- (BOOL) evaluateSyntacticPredicate:(SEL)synpredFragment
+{
+	return [recognizer evaluateSyntacticPredicate:synpredFragment];
+}
+
++ (void) setIsEmittingDebugInfo:(BOOL) shouldEmitDebugInfo
+{
+	debug = shouldEmitDebugInfo;
+}
+
+/** Given a String that has a run-length-encoding of some unsigned shorts
+ *  like "\1\2\3\9", convert to short[] {2,9,9,9}.  We do this to avoid
+ *  static short[] which generates so much init code that the class won't
+ *  compile. :(
+ */
+- (NSInteger *) unpackEncodedString:(NSString *)encodedString
+{
+    // walk first to find how big it is.
+    int size = 0;
+    for (int i=0; i < [encodedString length]; i+=2) {
+        size += [encodedString characterAtIndex:i];
+    }
+    __strong NSInteger *data = (NSInteger *)calloc(size, sizeof(NSInteger));
+    int di = 0;
+    for (int i=0; i < [encodedString length]; i+=2) {
+        char n = [encodedString characterAtIndex:i];
+        char v = [encodedString characterAtIndex:i+1];
+        // add v n times to data
+        for (int j = 0; j < n; j++) {
+            data[di++] = v;
+        }
+    }
+    return data;
+}
+
+/** Hideous duplication of code, but I need different typed arrays out :( */
+- (short *) unpackEncodedStringToUnsignedChars:(NSString *)encodedString
+{
+    // walk first to find how big it is.
+    int size = 0;
+    for (int i=0; i < [encodedString length]; i+=2) {
+        size += [encodedString characterAtIndex:i];
+    }
+    __strong short *data = (short *)calloc(size, sizeof(short));
+    int di = 0;
+    for (int i=0; i < [encodedString length]; i+=2) {
+        char n = [encodedString characterAtIndex:i];
+        char v = [encodedString characterAtIndex:i+1];
+        // add v n times to data
+        for (int j = 0; j < n; j++) {
+            data[di++] = v;
+        }
+    }
+    return (short *)data;
+}
+
+- (NSInteger)getDecision
+{
+    return decisionNumber;
+}
+
+- (void)setDecision:(NSInteger)aDecison
+{
+    decisionNumber = aDecison;
+}
+
+- (BaseRecognizer *)getRecognizer
+{
+    return recognizer;
+}
+
+- (void)setRecognizer:(BaseRecognizer *)aRecognizer
+{
+    if ( recognizer != aRecognizer ) {
+        if ( recognizer ) [recognizer release];
+        [aRecognizer retain];
+    }
+    recognizer = aRecognizer;
+}
+
+- (NSInteger)length
+{
+    return len;
+}
+
+@synthesize eot;
+@synthesize eof;
+@synthesize min;
+@synthesize max;
+@synthesize accept;
+@synthesize special;
+@synthesize transition;
+@end
diff --git a/runtime/ObjC/Framework/Debug.h b/runtime/ObjC/Framework/Debug.h
new file mode 100644
index 0000000..9b3ccda
--- /dev/null
+++ b/runtime/ObjC/Framework/Debug.h
@@ -0,0 +1,33 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "DebugEventListener.h"
+#import "DebugEventSocketProxy.h"
+#import "DebugParser.h"
+#import "DebugTokenStream.h"
+#import "DebugTreeParser.h"
+#import "DebugTreeNodeStream.h"
+#import "DebugTreeAdaptor.h"
diff --git a/runtime/ObjC/Framework/DebugEventListener.h b/runtime/ObjC/Framework/DebugEventListener.h
new file mode 100644
index 0000000..11f94a8
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugEventListener.h
@@ -0,0 +1,275 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "Token.h"
+#import "RecognitionException.h"
+
+@protocol DebugEventListener 
+
+#define DebugProtocolVersion 1
+
+/** The parser has just entered a rule.  No decision has been made about
+*  which alt is predicted.  This is fired AFTER init actions have been
+*  executed.  Attributes are defined and available etc...
+*/
+- (void) enterRule:(NSString *)ruleName;
+
+/** Because rules can have lots of alternatives, it is very useful to
+*  know which alt you are entering.  This is 1..n for n alts.
+*/
+- (void) enterAlt:(NSInteger)alt;
+
+/** This is the last thing executed before leaving a rule.  It is
+*  executed even if an exception is thrown.  This is triggered after
+*  error reporting and recovery have occurred (unless the exception is
+											   *  not caught in this rule).  This implies an "exitAlt" event.
+*/
+- (void) exitRule:(NSString *)ruleName;
+
+/** Track entry into any (...) subrule other EBNF construct */
+- (void) enterSubRule:(NSInteger)decisionNumber;
+
+- (void) exitSubRule:(NSInteger)decisionNumber;
+
+/** Every decision, fixed k or arbitrary, has an enter/exit event
+*  so that a GUI can easily track what LT/consume events are
+*  associated with prediction.  You will see a single enter/exit
+*  subrule but multiple enter/exit decision events, one for each
+*  loop iteration.
+*/
+- (void) enterDecision:(NSInteger)decisionNumber;
+
+- (void) exitDecision:(NSInteger)decisionNumber;
+
+/** An input token was consumed; matched by any kind of element.
+*  Trigger after the token was matched by things like match(), matchAny().
+*/
+- (void) consumeToken:(id<Token>)t;
+
+/** An off-channel input token was consumed.
+*  Trigger after the token was matched by things like match(), matchAny().
+*  (unless of course the hidden token is first stuff in the input stream).
+*/
+- (void) consumeHiddenToken:(id<Token>)t;
+
+/** Somebody (anybody) looked ahead.  Note that this actually gets
+*  triggered by both LA and LT calls.  The debugger will want to know
+*  which Token object was examined.  Like consumeToken, this indicates
+*  what token was seen at that depth.  A remote debugger cannot look
+*  ahead into a file it doesn't have so LT events must pass the token
+*  even if the info is redundant.
+*/
+- (void) LT:(NSInteger)i foundToken:(id<Token>)t;
+
+/** The parser is going to look arbitrarily ahead; mark this location,
+*  the token stream's marker is sent in case you need it.
+*/
+- (void) mark:(NSInteger)marker;
+
+/** After an arbitrairly long lookahead as with a cyclic DFA (or with
+*  any backtrack), this informs the debugger that stream should be
+*  rewound to the position associated with marker.
+*/
+- (void) rewind:(NSInteger)marker;
+
+/** Rewind to the input position of the last marker.
+*  Used currently only after a cyclic DFA and just
+*  before starting a sem/syn predicate to get the
+*  input position back to the start of the decision.
+*  Do not "pop" the marker off the state.  mark(i)
+*  and rewind(i) should balance still.
+*/
+- (void) rewind;
+
+- (void) beginBacktrack:(NSInteger)level;
+
+- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful;
+
+/** To watch a parser move through the grammar, the parser needs to
+*  inform the debugger what line/charPos it is passing in the grammar.
+*  For now, this does not know how to switch from one grammar to the
+*  other and back for island grammars etc...
+*
+*  This should also allow breakpoints because the debugger can stop
+*  the parser whenever it hits this line/pos.
+*/
+- (void) locationLine:(NSInteger)line column:(NSInteger)pos;
+
+/** A recognition exception occurred such as NoViableAltException.  I made
+*  this a generic event so that I can alter the exception hierachy later
+*  without having to alter all the debug objects.
+*
+*  Upon error, the stack of enter rule/subrule must be properly unwound.
+*  If no viable alt occurs it is within an enter/exit decision, which
+*  also must be rewound.  Even the rewind for each mark must be unwount.
+*  In the Java target this is pretty easy using try/finally, if a bit
+*  ugly in the generated code.  The rewind is generated in DFA.predict()
+*  actually so no code needs to be generated for that.  For languages
+*  w/o this "finally" feature (C++?), the target implementor will have
+*  to build an event stack or something.
+*
+*  Across a socket for remote debugging, only the RecognitionException
+*  data fields are transmitted.  The token object or whatever that
+*  caused the problem was the last object referenced by LT.  The
+*  immediately preceding LT event should hold the unexpected Token or
+*  char.
+*
+*  Here is a sample event trace for grammar:
+*
+*  b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
+*    | D
+*    ;
+*
+*  The sequence for this rule (with no viable alt in the subrule) for
+*  input 'c c' (there are 3 tokens) is:
+*
+*		commence
+*		LT(1)
+*		enterRule b
+*		location 7 1
+*		enter decision 3
+*		LT(1)
+*		exit decision 3
+*		enterAlt1
+*		location 7 5
+*		LT(1)
+*		consumeToken [c/<4>,1:0]
+*		location 7 7
+*		enterSubRule 2
+*		enter decision 2
+*		LT(1)
+*		LT(1)
+*		recognitionException NoViableAltException 2 1 2
+*		exit decision 2
+*		exitSubRule 2
+*		beginResync
+*		LT(1)
+*		consumeToken [c/<4>,1:1]
+*		LT(1)
+*		endResync
+*		LT(-1)
+*		exitRule b
+*		terminate
+*/
+- (void) recognitionException:(RecognitionException *)e;
+
+/** Indicates the recognizer is about to consume tokens to resynchronize
+*  the parser.  Any consume events from here until the recovered event
+*  are not part of the parse--they are dead tokens.
+*/
+- (void) beginResync;
+
+/** Indicates that the recognizer has finished consuming tokens in order
+*  to resychronize.  There may be multiple beginResync/endResync pairs
+*  before the recognizer comes out of errorRecovery mode (in which
+*  multiple errors are suppressed).  This will be useful
+*  in a gui where you want to probably grey out tokens that are consumed
+*  but not matched to anything in grammar.  Anything between
+*  a beginResync/endResync pair was tossed out by the parser.
+*/
+- (void) endResync;
+
+/** A semantic predicate was evaluate with this result and action text */
+- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result;
+
+/** Announce that parsing has begun.  Not technically useful except for
+*  sending events over a socket.  A GUI for example will launch a thread
+*  to connect and communicate with a remote parser.  The thread will want
+*  to notify the GUI when a connection is made.  ANTLR parsers
+*  trigger this upon entry to the first rule (the ruleLevel is used to
+*  figure this out).
+*/
+- (void) commence;
+
+/** Parsing is over; successfully or not.  Mostly useful for telling
+*  remote debugging listeners that it's time to quit.  When the rule
+*  invocation level goes to zero at the end of a rule, we are done
+*  parsing.
+*/
+- (void) terminate;
+
+
+// T r e e  P a r s i n g
+
+/** Input for a tree parser is an AST, but we know nothing for sure
+*  about a node except its type and text (obtained from the adaptor).
+*  This is the analog of the consumeToken method.  Again, the ID is
+*  the hashCode usually of the node so it only works if hashCode is
+*  not implemented.  If the type is UP or DOWN, then
+*  the ID is not really meaningful as it's fixed--there is
+*  just one UP node and one DOWN navigation node.
+*/
+- (void) consumeNode:(NSInteger)nodeHash ofType:(NSInteger)type text:(NSString *)text;
+
+/** The tree parser lookedahead.  If the type is UP or DOWN,
+*  then the ID is not really meaningful as it's fixed--there is
+*  just one UP node and one DOWN navigation node.
+*/
+- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
+
+
+// A S T  E v e n t s
+
+/** A nil was created (even nil nodes have a unique ID...
+*  they are not "null" per se).  As of 4/28/2006, this
+*  seems to be uniquely triggered when starting a new subtree
+*  such as when entering a subrule in automatic mode and when
+*  building a tree in rewrite mode.
+*/
+- (void) createNilNode:(unsigned)hash;
+
+/** Announce a new node built from text */
+- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type;
+
+/** Announce a new node built from an existing token */
+- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex;
+
+/** Make a node the new root of an existing root.  See
+*
+*  Note: the newRootID parameter is possibly different
+*  than the TreeAdaptor.becomeRoot() newRoot parameter.
+*  In our case, it will always be the result of calling
+*  TreeAdaptor.becomeRoot() and not root_n or whatever.
+*
+*  The listener should assume that this event occurs
+*  only when the current subrule (or rule) subtree is
+*  being reset to newRootID.
+*
+*/
+- (void) makeNode:(unsigned)newRootHash parentOf:(unsigned)oldRootHash;
+
+/** Make childID a child of rootID.
+*  @see org.antlr.runtime.tree.TreeAdaptor.addChild()
+*/
+- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash;
+
+/** Set the token start/stop token index for a subtree root or node */
+- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSUInteger)tokenStartIndex To:(NSUInteger)tokenStopIndex;
+
+- (void) waitForDebuggerConnection;
+
+@end
diff --git a/runtime/ObjC/Framework/DebugEventSocketProxy.h b/runtime/ObjC/Framework/DebugEventSocketProxy.h
new file mode 100644
index 0000000..c40b3b4
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugEventSocketProxy.h
@@ -0,0 +1,112 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "Parser.h"
+#import "DebugEventListener.h"
+#import <sys/socket.h>
+#import <netinet/in.h>
+#import <netinet/tcp.h>
+#include <arpa/inet.h>
+
+// default port for ANTLRWorks
+#define DEFAULT_DEBUGGER_PORT 49001
+
+@interface DebugEventSocketProxy : NSObject <DebugEventListener> {
+	int serverSocket;
+	
+	struct sockaddr debugger_sockaddr;
+	socklen_t debugger_socklen;
+	int debuggerSocket;
+	NSFileHandle *debuggerFH;
+	
+	NSString *grammarName;
+	int debuggerPort;
+}
+
+- (id) init;
+- (id) initWithGrammarName:(NSString *)aGrammarName debuggerPort:(NSInteger)aPort;
+- (void) waitForDebuggerConnection;
+- (void) waitForAck;
+- (void) sendToDebugger:(NSString *)message;
+- (void) sendToDebugger:(NSString *)message waitForResponse:(BOOL)wait;
+
+- (NSInteger) serverSocket;
+- (void) setServerSocket: (NSInteger) aServerSocket;
+
+- (NSInteger) debuggerSocket;
+- (void) setDebuggerSocket: (NSInteger) aDebuggerSocket;
+
+- (NSString *) grammarName;
+- (void) setGrammarName: (NSString *) aGrammarName;
+
+- (NSInteger) debuggerPort;
+- (void) setDebuggerPort: (NSInteger) aDebuggerPort;
+
+- (NSString *) escapeNewlines:(NSString *)aString;
+
+#pragma mark -
+
+#pragma mark DebugEventListener Protocol
+- (void) enterRule:(NSString *)ruleName;
+- (void) enterAlt:(NSInteger)alt;
+- (void) exitRule:(NSString *)ruleName;
+- (void) enterSubRule:(NSInteger)decisionNumber;
+- (void) exitSubRule:(NSInteger)decisionNumber;
+- (void) enterDecision:(NSInteger)decisionNumber;
+- (void) exitDecision:(NSInteger)decisionNumber;
+- (void) consumeToken:(id<Token>)t;
+- (void) consumeHiddenToken:(id<Token>)t;
+- (void) LT:(NSInteger)i foundToken:(id<Token>)t;
+- (void) mark:(NSInteger)marker;
+- (void) rewind:(NSInteger)marker;
+- (void) rewind;
+- (void) beginBacktrack:(NSInteger)level;
+- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful;
+- (void) locationLine:(NSInteger)line column:(NSInteger)pos;
+- (void) recognitionException:(RecognitionException *)e;
+- (void) beginResync;
+- (void) endResync;
+- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result;
+- (void) commence;
+- (void) terminate;
+
+
+#pragma mark Tree Parsing
+- (void) consumeNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
+- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text;
+
+
+#pragma mark AST Events
+
+- (void) createNilNode:(unsigned)hash;
+- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type;
+- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex;
+- (void) makeNode:(unsigned)newRootHash parentOf:(unsigned)oldRootHash;
+- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash;
+- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSInteger)tokenStartIndex To:(NSInteger)tokenStopIndex;
+
+@end
diff --git a/runtime/ObjC/Framework/DebugEventSocketProxy.m b/runtime/ObjC/Framework/DebugEventSocketProxy.m
new file mode 100644
index 0000000..4155da2
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugEventSocketProxy.m
@@ -0,0 +1,423 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "DebugEventSocketProxy.h"
+#import "Token+DebuggerSupport.h"
+#include <string.h>
+
+static NSData *newlineData = nil;
+static unsigned lengthOfUTF8Ack = 0;
+
+@implementation DebugEventSocketProxy
+
++ (void) initialize
+{
+	if (!newlineData) newlineData = [@"\n" dataUsingEncoding:NSUTF8StringEncoding];
+	if (!lengthOfUTF8Ack) lengthOfUTF8Ack = [[@"ack\n" dataUsingEncoding:NSUTF8StringEncoding] length];
+}
+
+- (id) init
+{
+	return [self initWithGrammarName:nil debuggerPort:DEFAULT_DEBUGGER_PORT];
+}
+
+- (id) initWithGrammarName:(NSString *)aGrammarName debuggerPort:(NSInteger)aPort
+{
+	self = [super init];
+	if (self) {
+		serverSocket = -1;
+		[self setGrammarName:aGrammarName];
+		if (aPort == -1) aPort = DEFAULT_DEBUGGER_PORT;
+		[self setDebuggerPort:aPort];
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+	if (serverSocket != -1) 
+		shutdown(serverSocket,SHUT_RDWR);
+	serverSocket = -1;
+	[debuggerFH release];
+    [self setGrammarName:nil];
+    [super dealloc];
+}
+
+/* Java stuff
+public void handshake() throws IOException {
+    if ( serverSocket==nil ) {
+        serverSocket = new ServerSocket(port);
+        socket = serverSocket.accept();
+        socket.setTcpNoDelay(true);
+        OutputStream os = socket.getOutputStream();
+        OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
+        out = new PrintWriter(new BufferedWriter(osw));
+        InputStream is = socket.getInputStream();
+        InputStreamReader isr = new InputStreamReader(is, "UTF8");
+        in = new BufferedReader(isr);
+        out.println("ANTLR "+ DebugEventListener.PROTOCOL_VERSION);
+        out.println("grammar \""+ grammarFileName);
+        out.flush();
+        ack();
+    }
+}
+
+- (void) commence
+{
+    // don't bother sending event; listener will trigger upon connection
+}
+
+- (void) terminate
+{
+    [self transmit:@"terminate";
+    [out close];
+    try {
+        [socket close];
+    }
+    catch (IOException *ioe) {
+        ioe.printStackTrace(System.err);
+    }
+}
+
+- (void) ack
+{
+    try {
+        in.readLine();
+    }
+    catch (IOException ioe) {
+        ioe.printStackTrace(System.err);
+    }
+}
+
+protected void transmit(String event) {
+    out.println(event);
+    out.flush();
+    ack();
+}
+*/
+
+- (void) waitForDebuggerConnection
+{
+	if (serverSocket == -1) {
+		serverSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+		
+		NSAssert1(serverSocket != -1, @"Failed to create debugger socket. %s", strerror(errno));
+		
+		int yes = 1;
+		setsockopt(serverSocket, SOL_SOCKET, SO_KEEPALIVE|SO_REUSEPORT|SO_REUSEADDR|TCP_NODELAY, (void *)&yes, sizeof(NSInteger));
+
+		struct sockaddr_in server_addr;
+		bzero(&server_addr, sizeof(struct sockaddr_in));
+		server_addr.sin_family = AF_INET;
+		server_addr.sin_port = htons([self debuggerPort]);
+		server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
+		NSAssert1( bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(struct sockaddr)) != -1, @"bind(2) failed. %s", strerror(errno));
+
+		NSAssert1(listen(serverSocket,50) == 0, @"listen(2) failed. %s", strerror(errno));
+		
+		NSLog(@"ANTLR waiting for debugger attach (grammar %@)", [self grammarName]);
+		
+		debuggerSocket = accept(serverSocket, &debugger_sockaddr, &debugger_socklen);
+		NSAssert1( debuggerSocket != -1, @"accept(2) failed. %s", strerror(errno));
+		
+		debuggerFH = [[NSFileHandle alloc] initWithFileDescriptor:debuggerSocket];
+		[self sendToDebugger:[NSString stringWithFormat:@"ANTLR %d", DebugProtocolVersion] waitForResponse:NO];
+		[self sendToDebugger:[NSString stringWithFormat:@"grammar \"%@", [self grammarName]] waitForResponse:NO];
+	}
+}
+
+- (void) waitForAck
+{
+	NSString *response;
+	@try {
+		NSData *newLine = [debuggerFH readDataOfLength:lengthOfUTF8Ack];
+		response = [[NSString alloc] initWithData:newLine encoding:NSUTF8StringEncoding];
+		if (![response isEqualToString:@"ack\n"]) @throw [NSException exceptionWithName:@"DebugEventSocketProxy" reason:@"illegal response from debugger" userInfo:nil];
+	}
+	@catch (NSException *e) {
+		NSLog(@"socket died or debugger misbehaved: %@ read <%@>", e, response);
+	}
+	@finally {
+		[response release];
+	}
+}
+
+- (void) sendToDebugger:(NSString *)message
+{
+	[self sendToDebugger:message waitForResponse:YES];
+}
+
+- (void) sendToDebugger:(NSString *)message waitForResponse:(BOOL)wait
+{
+	if (! debuggerFH ) return;
+	[debuggerFH writeData:[message dataUsingEncoding:NSUTF8StringEncoding]];
+	[debuggerFH writeData:newlineData];
+	if (wait) [self waitForAck];
+}
+
+- (NSInteger) serverSocket
+{
+    return serverSocket;
+}
+
+- (void) setServerSocket: (NSInteger) aServerSocket
+{
+    serverSocket = aServerSocket;
+}
+
+- (NSInteger) debuggerSocket
+{
+    return debuggerSocket;
+}
+
+- (void) setDebuggerSocket: (NSInteger) aDebuggerSocket
+{
+    debuggerSocket = aDebuggerSocket;
+}
+
+- (NSString *) grammarName
+{
+    return grammarName; 
+}
+
+- (void) setGrammarName: (NSString *) aGrammarName
+{
+    if (grammarName != aGrammarName) {
+        [aGrammarName retain];
+        [grammarName release];
+        grammarName = aGrammarName;
+    }
+}
+
+- (NSInteger) debuggerPort
+{
+    return debuggerPort;
+}
+
+- (void) setDebuggerPort: (NSInteger) aDebuggerPort
+{
+    debuggerPort = aDebuggerPort;
+}
+
+- (NSString *) escapeNewlines:(NSString *)aString
+{
+	NSMutableString *escapedText;
+	if (aString) {
+		escapedText = [NSMutableString stringWithString:aString];
+		NSRange wholeString = NSMakeRange(0,[escapedText length]);
+		[escapedText replaceOccurrencesOfString:@"%" withString:@"%25" options:0 range:wholeString];
+		[escapedText replaceOccurrencesOfString:@"\n" withString:@"%0A" options:0 range:wholeString];
+		[escapedText replaceOccurrencesOfString:@"\r" withString:@"%0D" options:0 range:wholeString];
+	} else {
+		escapedText = [NSMutableString stringWithString:@""];
+	}
+	return escapedText;
+}
+
+#pragma mark -
+
+#pragma mark DebugEventListener Protocol
+- (void) enterRule:(NSString *)ruleName
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"enterRule %@", ruleName]];
+}
+
+- (void) enterAlt:(NSInteger)alt
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"enterAlt %d", alt]]; 
+}
+
+- (void) exitRule:(NSString *)ruleName
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"exitRule %@", ruleName]];
+}
+
+- (void) enterSubRule:(NSInteger)decisionNumber
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"enterSubRule %d", decisionNumber]];
+}
+
+- (void) exitSubRule:(NSInteger)decisionNumber
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"exitSubRule %d", decisionNumber]];
+}
+
+- (void) enterDecision:(NSInteger)decisionNumber
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"enterDecision %d", decisionNumber]];
+}
+
+- (void) exitDecision:(NSInteger)decisionNumber
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"exitDecision %d", decisionNumber]];
+}
+
+- (void) consumeToken:(id<Token>)t
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"consumeToken %@", [self escapeNewlines:[t description]]]];
+}
+
+- (void) consumeHiddenToken:(id<Token>)t
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"consumeHiddenToken %@", [self escapeNewlines:[t description]]]];
+}
+
+- (void) LT:(NSInteger)i foundToken:(id<Token>)t
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"LT %d %@", i, [self escapeNewlines:[t description]]]];
+}
+
+- (void) mark:(NSInteger)marker
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"mark %d", marker]];
+}
+- (void) rewind:(NSInteger)marker
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"rewind %d", marker]];
+}
+
+- (void) rewind
+{
+	[self sendToDebugger:@"rewind"];
+}
+
+- (void) beginBacktrack:(NSInteger)level
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"beginBacktrack %d", level]];
+}
+
+- (void) endBacktrack:(NSInteger)level wasSuccessful:(BOOL)successful
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"endBacktrack %d %d", level, successful ? 1 : 0]];
+}
+
+- (void) locationLine:(NSInteger)line column:(NSInteger)pos
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"location %d %d", line, pos]];
+}
+
+- (void) recognitionException:(RecognitionException *)e
+{
+#warning TODO: recognition exceptions
+	// these must use the names of the corresponding Java exception classes, because ANTLRWorks recreates the exception
+	// objects on the Java side.
+	// Write categories for Objective-C exceptions to provide those names
+}
+
+- (void) beginResync
+{
+	[self sendToDebugger:@"beginResync"];
+}
+	
+- (void) endResync
+{
+	[self sendToDebugger:@"endResync"];
+}
+
+- (void) semanticPredicate:(NSString *)predicate matched:(BOOL)result
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"semanticPredicate %d %@", result?1:0, [self escapeNewlines:predicate]]];
+}
+
+- (void) commence
+{
+	// no need to send event
+}
+
+- (void) terminate
+{
+	[self sendToDebugger:@"terminate"];
+	@try {
+		[debuggerFH closeFile];
+	}
+	@finally {
+#warning TODO: make socket handling robust. too lazy now...
+		shutdown(serverSocket,SHUT_RDWR);
+		serverSocket = -1;
+	}
+}
+
+
+#pragma mark Tree Parsing
+- (void) consumeNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"consumeNode %u %d %@",
+		nodeHash,
+		type,
+		[self escapeNewlines:text]
+		]];
+}
+
+- (void) LT:(NSInteger)i foundNode:(unsigned)nodeHash ofType:(NSInteger)type text:(NSString *)text
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"LN %d %u %d %@",
+		i,
+		nodeHash,
+		type,
+		[self escapeNewlines:text]
+		]];
+}
+
+
+#pragma mark AST Events
+
+- (void) createNilNode:(unsigned)hash
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"nilNode %u", hash]];
+}
+
+- (void) createNode:(unsigned)hash text:(NSString *)text type:(NSInteger)type
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"createNodeFromToken %u %d %@", 
+		hash,
+		type,
+		[self escapeNewlines:text]
+		]];
+}
+
+- (void) createNode:(unsigned)hash fromTokenAtIndex:(NSInteger)tokenIndex
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"createNode %u %d", hash, tokenIndex]];
+}
+
+- (void) becomeRoot:(unsigned)newRootHash old:(unsigned)oldRootHash
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"becomeRoot %u %u", newRootHash, oldRootHash]];
+}
+
+- (void) addChild:(unsigned)childHash toTree:(unsigned)treeHash
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"addChild %u %u", treeHash, childHash]];
+}
+
+- (void) setTokenBoundariesForTree:(unsigned)nodeHash From:(NSInteger)tokenStartIndex To:(NSInteger)tokenStopIndex
+{
+	[self sendToDebugger:[NSString stringWithFormat:@"setTokenBoundaries %u %d %d", nodeHash, tokenStartIndex, tokenStopIndex]];
+}
+
+
+
+@end
diff --git a/runtime/ObjC/Framework/DebugParser.h b/runtime/ObjC/Framework/DebugParser.h
new file mode 100644
index 0000000..9f34d96
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugParser.h
@@ -0,0 +1,57 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "Parser.h"
+#import "DebugEventSocketProxy.h"
+#import "DebugTokenStream.h"
+
+@interface DebugParser : Parser {
+	id<DebugEventListener> debugListener;
+}
+
++ (id) newDebugParser:(id<TokenStream>)theStream
+        debugListener:(id<DebugEventListener>)debugListener;
+
++ (id) newDebugParser:(id<TokenStream>)theStream
+                state:(RecognizerSharedState *)state;
+
++ (id) newDebugParser:(id<TokenStream>)theStream
+        debugListener:(id<DebugEventListener>)debugListener
+                state:(RecognizerSharedState *)state;
+
+- (id) initWithTokenStream:(id<TokenStream>)theStream;
+- (id) initWithTokenStream:(id<TokenStream>)theStream
+			  debuggerPort:(NSInteger)portNumber;
+// designated initializer
+- (id) initWithTokenStream:(id<TokenStream>)theStream
+			 debugListener:(id<DebugEventListener>)theDebugListener
+			  debuggerPort:(NSInteger)portNumber;
+
+- (id<DebugEventListener>) debugListener;
+- (void) setDebugListener: (id<DebugEventListener>) aDebugListener;
+
+@end
diff --git a/runtime/ObjC/Framework/DebugParser.m b/runtime/ObjC/Framework/DebugParser.m
new file mode 100644
index 0000000..d67e895
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugParser.m
@@ -0,0 +1,113 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "DebugParser.h"
+
+
+@implementation DebugParser
+
+- (id) initWithTokenStream:(id<TokenStream>)theStream
+{
+	return [self initWithTokenStream:theStream debugListener:nil debuggerPort:-1];
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)theStream
+			  debuggerPort:(NSInteger)portNumber
+{
+	return [self initWithTokenStream:theStream debugListener:nil debuggerPort:portNumber];
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)theStream
+			 debugListener:(id<DebugEventListener>)theDebugListener
+			  debuggerPort:(NSInteger)portNumber
+{
+	id<DebugEventListener,NSObject> debugger = nil;
+	id<TokenStream> tokenStream = nil;
+	if (theDebugListener) {
+		debugger = [(id<DebugEventListener,NSObject>)theDebugListener retain];
+		debugger = theDebugListener;
+	} else {
+		debugger = [[DebugEventSocketProxy alloc] initWithGrammarName:[self grammarFileName] debuggerPort:portNumber];
+	}
+	if (theStream && ![theStream isKindOfClass:[DebugTokenStream class]]) {
+		tokenStream = [[DebugTokenStream alloc] initWithTokenStream:theStream debugListener:debugger];
+	} else {
+		tokenStream = [theStream retain];
+		tokenStream = theStream;
+	}
+	self = [super initWithTokenStream:tokenStream];
+	if (self) {
+		[self setDebugListener:debugger];
+		[debugger release];
+		[tokenStream release];
+		[debugListener waitForDebuggerConnection];
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+    [self setDebugListener: nil];
+    [super dealloc];
+}
+
+- (id<DebugEventListener>) debugListener
+{
+    return debugListener; 
+}
+
+- (void) setDebugListener: (id<DebugEventListener>) aDebugListener
+{
+    if (debugListener != aDebugListener) {
+        [(id<DebugEventListener,NSObject>)aDebugListener retain];
+        [(id<DebugEventListener,NSObject>)debugListener release];
+        debugListener = aDebugListener;
+    }
+}
+
+#pragma mark -
+#pragma mark Overrides
+
+- (void) beginResync
+{
+	[debugListener beginResync];
+}
+
+- (void) endResync
+{
+	[debugListener endResync];
+}
+- (void)beginBacktracking:(NSInteger)level
+{
+	[debugListener beginBacktrack:level];
+}
+
+- (void)endBacktracking:(NSInteger)level wasSuccessful:(BOOL)successful
+{
+	[debugListener endBacktrack:level wasSuccessful:successful];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/DebugTokenStream.h b/runtime/ObjC/Framework/DebugTokenStream.h
new file mode 100644
index 0000000..6a30491
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugTokenStream.h
@@ -0,0 +1,62 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "Parser.h"
+#import "TokenStream.h"
+#import "TokenSource.h"
+#import "DebugTokenStream.h"
+#import "DebugEventListener.h"
+
+@interface DebugTokenStream : NSObject <TokenStream>
+{
+	id<DebugEventListener> debugListener;
+	id<TokenStream> input;
+	BOOL initialStreamState;
+    NSInteger lastMarker;
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)theStream debugListener:(id<DebugEventListener>)debugger;
+
+- (id<DebugEventListener>) debugListener;
+- (void) setDebugListener: (id<DebugEventListener>) aDebugListener;
+
+- (id<TokenStream>) input;
+- (void) setInput:(id<TokenStream>)aTokenStream;
+
+- (void) consume;
+- (id<Token>) getToken:(NSInteger)index;
+- (NSInteger) getIndex;
+- (void) release:(NSInteger)marker;
+- (void) seek:(NSInteger)index;
+- (NSInteger) size;
+- (id<TokenSource>) getTokenSource;
+- (NSString *) getSourceName;
+- (NSString *) toString;
+- (NSString *) toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop;
+- (NSString *) toStringFromToken:(CommonToken *)startToken ToToken:(CommonToken *)stopToken;
+
+@end
diff --git a/runtime/ObjC/Framework/DebugTokenStream.m b/runtime/ObjC/Framework/DebugTokenStream.m
new file mode 100644
index 0000000..27790a1
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugTokenStream.m
@@ -0,0 +1,204 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "DebugTokenStream.h"
+
+
+@implementation DebugTokenStream
+
+
+- (id) initWithTokenStream:(id<TokenStream>)theStream debugListener:(id<DebugEventListener>)debugger
+{
+	self = [super init];
+	if (self) {
+		[self setDebugListener:debugger];
+		[self setInput:theStream];
+		[self.input LT:1];	// force reading first on-channel token
+		initialStreamState = YES;
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+    [self setDebugListener:nil];
+    self.input = nil;
+    [super dealloc];
+}
+
+
+- (id<DebugEventListener>) debugListener
+{
+    return debugListener; 
+}
+
+- (void) setDebugListener: (id<DebugEventListener>) aDebugListener
+{
+    if (debugListener != aDebugListener) {
+        [(id<DebugEventListener,NSObject>)aDebugListener retain];
+        [(id<DebugEventListener,NSObject>)debugListener release];
+        debugListener = aDebugListener;
+    }
+}
+
+- (id<TokenStream>) input
+{
+    return input; 
+}
+
+- (void) setInput: (id<TokenStream>) aTokenStream
+{
+    if (input != aTokenStream) {
+        if ( input ) [input release];
+        input = aTokenStream;
+        [input retain];
+    }
+}
+
+- (void) consumeInitialHiddenTokens
+{
+	int firstIdx = input.index;
+	for (int i = 0; i<firstIdx; i++)
+		[debugListener consumeHiddenToken:[input getToken:i]];
+	initialStreamState = NO;
+}
+
+#pragma mark -
+#pragma mark Proxy implementation
+
+// anything else that hasn't some debugger event assicioated with it, is simply
+// forwarded to the actual token stream
+- (void) forwardInvocation:(NSInvocation *)anInvocation
+{
+	[anInvocation invokeWithTarget:self.input];
+}
+
+- (void) consume
+{
+	if ( initialStreamState )
+		[self consumeInitialHiddenTokens];
+	int a = input.index;
+	id<Token> token = [input LT:1];
+	[input consume];
+	int b = input.index;
+	[debugListener consumeToken:token];
+	if (b > a+1) // must have consumed hidden tokens
+		for (int i = a+1; i < b; i++)
+			[debugListener consumeHiddenToken:[input getToken:i]];
+}
+
+- (NSInteger) mark
+{
+	lastMarker = [input mark];
+	[debugListener mark:lastMarker];
+	return lastMarker;
+}
+
+- (void) rewind
+{
+	[debugListener rewind];
+	[input rewind];
+}
+
+- (void) rewind:(NSInteger)marker
+{
+	[debugListener rewind:marker];
+	[input rewind:marker];
+}
+
+- (id<Token>) LT:(NSInteger)k
+{
+	if ( initialStreamState )
+		[self consumeInitialHiddenTokens];
+	[debugListener LT:k foundToken:[input LT:k]];
+	return [input LT:k];
+}
+
+- (NSInteger) LA:(NSInteger)k
+{
+	if ( initialStreamState )
+		[self consumeInitialHiddenTokens];
+	[debugListener LT:k foundToken:[input LT:k]];
+	return [input LA:k];
+}
+
+- (id<Token>) getToken:(NSInteger)i
+{
+    return [input getToken:i];
+}
+
+- (NSInteger) getIndex
+{
+    return input.index;
+}
+
+- (void) release:(NSInteger) marker
+{
+}
+
+- (void) seek:(NSInteger)index
+{
+    // TODO: implement seek in dbg interface
+    // db.seek(index);
+    [input seek:index];
+}
+
+- (NSInteger) size
+{
+    return [input size];
+}
+
+- (id<TokenSource>) getTokenSource
+{
+    return [input getTokenSource];
+}
+
+- (NSString *) getSourceName
+{
+    return [[input getTokenSource] getSourceName];
+}
+
+- (NSString *) description
+{
+    return [input toString];
+}
+
+- (NSString *) toString
+{
+    return [input toString];
+}
+
+- (NSString *) toStringFromStart:(NSInteger)startIndex ToEnd:(NSInteger)stopIndex
+{
+    return [input toStringFromStart:startIndex ToEnd:stopIndex];
+}
+
+- (NSString *) toStringFromToken:(CommonToken *)startToken ToToken:(CommonToken *)stopToken
+{
+    return [input toStringFromStart:startToken.startIndex ToEnd:stopToken.stopIndex];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/DebugTreeAdaptor.h b/runtime/ObjC/Framework/DebugTreeAdaptor.h
new file mode 100644
index 0000000..ae1a0d6
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugTreeAdaptor.h
@@ -0,0 +1,45 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "Parser.h"
+#import "CommonTreeAdaptor.h"
+#import "DebugEventListener.h"
+
+@interface DebugTreeAdaptor : BaseTreeAdaptor {
+	id<DebugEventListener> debugListener;
+	CommonTreeAdaptor *treeAdaptor;
+}
+
+- (id) initWithTreeAdaptor:(CommonTreeAdaptor *)aTreeAdaptor debugListener:(id<DebugEventListener>)aDebugListener;
+
+- (id<DebugEventListener>)debugListener;
+- (void) setDebugListener:(id<DebugEventListener>)aDebugListener;
+
+- (CommonTreeAdaptor *) getTreeAdaptor;
+- (void) setTreeAdaptor:(CommonTreeAdaptor *)aTreeAdaptor;
+
+@end
diff --git a/runtime/ObjC/Framework/DebugTreeAdaptor.m b/runtime/ObjC/Framework/DebugTreeAdaptor.m
new file mode 100644
index 0000000..5ca8df1
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugTreeAdaptor.m
@@ -0,0 +1,229 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "DebugTreeAdaptor.h"
+
+
+@implementation DebugTreeAdaptor
+
+
+- (id) initWithTreeAdaptor:(CommonTreeAdaptor *)aTreeAdaptor debugListener:(id<DebugEventListener>)aDebugListener
+{
+	self = [super init];
+	if (self) {
+		[self setDebugListener:aDebugListener];
+		[self setTreeAdaptor:aTreeAdaptor];
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+    [self setDebugListener: nil];
+    [self setTreeAdaptor: nil];
+    [super dealloc];
+}
+
+- (id<DebugEventListener>) debugListener
+{
+    return debugListener; 
+}
+
+- (void) setDebugListener: (id<DebugEventListener>) aDebugListener
+{
+    if (debugListener != aDebugListener) {
+        [(id<TreeAdaptor,NSObject>)aDebugListener retain];
+        [(id<TreeAdaptor,NSObject>)debugListener release];
+        debugListener = aDebugListener;
+    }
+}
+
+- (CommonTreeAdaptor *) getTreeAdaptor
+{
+    return treeAdaptor; 
+}
+
+- (void) setTreeAdaptor: (CommonTreeAdaptor *) aTreeAdaptor
+{
+    if (treeAdaptor != aTreeAdaptor) {
+        [aTreeAdaptor retain];
+        [treeAdaptor release];
+        treeAdaptor = aTreeAdaptor;
+    }
+}
+
+#pragma mark -
+#pragma mark Proxy implementation
+
+// anything else that hasn't some debugger event assicioated with it, is simply
+// forwarded to the actual token stream
+- (void) forwardInvocation:(NSInvocation *)anInvocation
+{
+	[anInvocation invokeWithTarget:[self getTreeAdaptor]];
+}
+
+#pragma mark -
+
+#pragma mark Construction
+
+- (id<BaseTree>) newTreeWithToken:(id<Token>) payload
+{
+	id<BaseTree> newTree = [CommonTree newTreeWithToken:payload];
+	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] fromTokenAtIndex:[payload getTokenIndex]];
+	return newTree;
+}
+
+- (id<BaseTree>) emptyTree
+{
+	id<BaseTree> newTree = [treeAdaptor newEmptyTree];
+	[debugListener createNilNode:[treeAdaptor uniqueIdForTree:newTree]];
+	return newTree;
+}
+
+/*	We don't have debug events for those:
+ - (id) copyNode:(id<BaseTree>)aNode
+{
+}
+- (id) copyTree:(id<BaseTree>)aTree
+{
+}
+*/
+
+- (void) addChild:(id<BaseTree>)child toTree:(id<BaseTree>)aTree
+{
+	[treeAdaptor addChild:child toTree:aTree];
+	[debugListener addChild:[treeAdaptor uniqueIdForTree:child] toTree:[self uniqueIdForTree:aTree]];
+}
+
+- (id<BaseTree>) becomeRoot:(id<BaseTree>)newRoot old:(id<BaseTree>)oldRoot
+{
+	id<BaseTree> newTree = [treeAdaptor becomeRoot:newRoot old:oldRoot];
+	[debugListener becomeRoot:[treeAdaptor uniqueIdForTree:newTree] old:[self uniqueIdForTree:oldRoot]];
+	return newTree;
+}
+
+/* handle by forwardInvocation: 
+- (NSUInteger) uniqueIdForTree:(id<BaseTree>)aNode
+{
+}
+*/
+
+#pragma mark Rewrite Rules
+
+ - (void) addTokenAsChild:(id<Token>)child toTree:(id<BaseTree>)aTree
+{
+	id<BaseTree> newChild = [self newTreeWithToken:child];
+	[self addChild:newChild toTree:aTree];
+}
+
+- (id<BaseTree>) makeToken:(id<Token>)newRoot parentOf:(id<BaseTree>)oldRoot
+{
+	id<BaseTree> newNode = [self newTreeWithToken:newRoot];
+	return [self becomeRoot:newNode old:oldRoot];
+}
+
+- (id<BaseTree>) newTreeWithTokenType:(NSInteger)tokenType
+{
+	id<BaseTree> newTree = [treeAdaptor newTreeWithTokenType:tokenType];
+	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] text:nil type:tokenType];
+	return newTree;
+}
+
+- (id<BaseTree>) newTreeWithTokenType:(NSInteger)tokenType text:(NSString *)tokenText
+{
+	id<BaseTree> newTree = [treeAdaptor newTreeWithTokenType:tokenType text:tokenText];
+	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] text:tokenText type:tokenType];
+	return newTree;
+}
+- (id<BaseTree>) newTreeWithToken:(id<Token>)fromToken tokenType:(NSInteger)tokenType
+{
+	id<BaseTree> newTree = [treeAdaptor newTreeWithToken:fromToken tokenType:tokenType];
+	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] text:fromToken.text type:tokenType];
+	return newTree;
+}
+
+- (id<BaseTree>) newTreeWithToken:(id<Token>)fromToken tokenType:(NSInteger)tokenType text:(NSString *)tokenText
+{
+	id<BaseTree> newTree = [treeAdaptor newTreeWithToken:fromToken tokenType:tokenType text:tokenText];
+	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] text:tokenText type:tokenType];
+	return newTree;
+}
+
+- (id<BaseTree>) newTreeWithToken:(id<Token>)fromToken text:(NSString *)tokenText
+{
+	id<BaseTree> newTree = [treeAdaptor newTreeWithToken:fromToken text:tokenText];
+	[debugListener createNode:[treeAdaptor uniqueIdForTree:newTree] text:tokenText type:fromToken.type];
+	return newTree;
+}
+
+#pragma mark Content
+
+/* handled by forwardInvocation:
+- (NSInteger) tokenTypeForNode:(id<BaseTree>)aNode
+{
+}
+ 
+- (void) setTokenType:(NSInteger)tokenType forNode:(id)aNode
+{
+}
+
+- (NSString *) textForNode:(id<BaseTree>)aNode
+{
+}
+ 
+- (void) setText:(NSString *)tokenText forNode:(id<BaseTree>)aNode
+{
+}
+*/
+- (void) setBoundariesForTree:(id<BaseTree>)aTree fromToken:(id<Token>)startToken toToken:(id<Token>)stopToken
+{
+	[treeAdaptor setBoundariesForTree:aTree fromToken:startToken toToken:stopToken];
+	if (aTree && startToken && stopToken) {
+		[debugListener setTokenBoundariesForTree:[aTree hash] From:[startToken getTokenIndex] To:[stopToken getTokenIndex]];
+	}
+}
+/* handled by forwardInvocation:
+- (NSInteger) tokenStartIndexForTree:(id<BaseTree>)aTree
+{
+}
+ 
+- (NSInteger) tokenStopIndexForTree:(id<BaseTree>)aTree
+{
+}
+*/
+
+#pragma mark Navigation / Tree Parsing
+/* handled by forwardInvocation:
+- (id<BaseTree>) childForNode:(id<BaseTree>) aNode atIndex:(NSInteger) i
+{
+}
+ 
+- (NSInteger) childCountForTree:(id<BaseTree>) aTree
+{
+}
+*/
+
+@end
diff --git a/runtime/ObjC/Framework/DebugTreeNodeStream.h b/runtime/ObjC/Framework/DebugTreeNodeStream.h
new file mode 100644
index 0000000..4262505
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugTreeNodeStream.h
@@ -0,0 +1,67 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "DebugEventListener.h"
+#import "TreeAdaptor.h"
+#import "TreeNodeStream.h"
+
+@interface DebugTreeNodeStream : NSObject <TreeNodeStream> {
+	id<DebugEventListener> debugListener;
+	id<TreeAdaptor> treeAdaptor;
+	id<TreeNodeStream> input;
+	BOOL initialStreamState;
+}
+
+- (id) initWithTreeNodeStream:(id<TreeNodeStream>)theStream debugListener:(id<DebugEventListener>)debugger;
+
+- (id<DebugEventListener>) debugListener;
+- (void) setDebugListener: (id<DebugEventListener>) aDebugListener;
+
+- (id<TreeNodeStream>) input;
+- (void) setInput: (id<TreeNodeStream>) aTreeNodeStream;
+
+- (id<TreeAdaptor>) getTreeAdaptor;
+- (void) setTreeAdaptor: (id<TreeAdaptor>) aTreeAdaptor;
+
+#pragma mark TreeNodeStream conformance
+
+- (id) LT:(NSInteger)k;
+- (id<TreeAdaptor>) getTreeAdaptor;
+- (void) setUniqueNavigationNodes:(BOOL)flag;
+
+#pragma mark IntStream conformance
+- (void) consume;
+- (NSInteger) LA:(NSUInteger) i;
+- (NSUInteger) mark;
+- (NSUInteger) getIndex;
+- (void) rewind:(NSUInteger) marker;
+- (void) rewind;
+- (void) release:(NSUInteger) marker;
+- (void) seek:(NSUInteger) index;
+- (NSUInteger) size;
+
+@end
diff --git a/runtime/ObjC/Framework/DebugTreeNodeStream.m b/runtime/ObjC/Framework/DebugTreeNodeStream.m
new file mode 100644
index 0000000..c923f45
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugTreeNodeStream.m
@@ -0,0 +1,175 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "DebugTreeNodeStream.h"
+
+
+@implementation DebugTreeNodeStream
+
+- (id) initWithTreeNodeStream:(id<TreeNodeStream>)theStream debugListener:(id<DebugEventListener>)debugger
+{
+	self = [super init];
+	if (self) {
+		[self setDebugListener:debugger];
+		[self setTreeAdaptor:[theStream treeAdaptor]];
+		[self setInput:theStream];
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+    [self setDebugListener: nil];
+    [self setTreeAdaptor: nil];
+    input = nil;
+    [super dealloc];
+}
+
+- (id<DebugEventListener>) debugListener
+{
+    return debugListener; 
+}
+
+- (void) setDebugListener: (id<DebugEventListener>) aDebugListener
+{
+    if (debugListener != aDebugListener) {
+        [(id<DebugEventListener,NSObject>)aDebugListener retain];
+        [(id<DebugEventListener,NSObject>)debugListener release];
+        debugListener = aDebugListener;
+    }
+}
+
+
+- (id<TreeAdaptor>) getTreeAdaptor
+{
+    return treeAdaptor; 
+}
+
+- (void) setTreeAdaptor: (id<TreeAdaptor>) aTreeAdaptor
+{
+    if (treeAdaptor != aTreeAdaptor) {
+        [(id<TreeAdaptor,NSObject>)aTreeAdaptor retain];
+        [(id<TreeAdaptor,NSObject>)treeAdaptor release];
+        treeAdaptor = aTreeAdaptor;
+    }
+}
+
+
+- (id<TreeNodeStream>) input
+{
+    return input; 
+}
+
+- (void) setInput:(id<TreeNodeStream>) aTreeNodeStream
+{
+    if (input != aTreeNodeStream) {
+        [input release];
+        [(id<TreeNodeStream,NSObject>)aTreeNodeStream retain];
+    }
+    input = aTreeNodeStream;
+}
+
+
+#pragma mark TreeNodeStream conformance
+
+- (id) LT:(NSInteger)k
+{
+	id node = [input LT:k];
+	unsigned hash = [treeAdaptor uniqueIdForTree:node];
+	NSString *text = [treeAdaptor textForNode:node];
+	int type = [treeAdaptor tokenTypeForNode:node];
+	[debugListener LT:k foundNode:hash ofType:type text:text];
+	return node;
+}
+
+- (void) setUniqueNavigationNodes:(BOOL)flag
+{
+	[input setUniqueNavigationNodes:flag];
+}
+
+#pragma mark IntStream conformance
+- (void) consume
+{
+	id node = [input LT:1];
+	[input consume];
+	unsigned hash = [treeAdaptor uniqueIdForTree:node];
+	NSString *theText = [treeAdaptor textForNode:node];
+	int aType = [treeAdaptor tokenTypeForNode:node];
+	[debugListener consumeNode:hash ofType:aType text:theText];
+}
+
+- (NSInteger) LA:(NSUInteger) i
+{
+	id<BaseTree> node = [self LT:1];
+	return node.type;
+}
+
+- (NSUInteger) mark
+{
+	unsigned lastMarker = [input mark];
+	[debugListener mark:lastMarker];
+	return lastMarker;
+}
+
+- (NSUInteger) getIndex
+{
+	return input.index;
+}
+
+- (void) rewind:(NSUInteger) marker
+{
+	[input rewind:marker];
+	[debugListener rewind:marker];
+}
+
+- (void) rewind
+{
+	[input rewind];
+	[debugListener rewind];
+}
+
+- (void) release:(NSUInteger) marker
+{
+	[input release:marker];
+}
+
+- (void) seek:(NSUInteger) index
+{
+	[input seek:index];
+	// todo: seek missing in debug protocol
+}
+
+- (NSUInteger) size
+{
+	return [input size];
+}
+
+- (NSString *) toStringFromToken:(id)startNode ToToken:(id)stopNode
+{
+    return [input toStringFromToken:(id<Token>)startNode ToToken:(id<Token>)stopNode];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/DebugTreeParser.h b/runtime/ObjC/Framework/DebugTreeParser.h
new file mode 100644
index 0000000..0c6a186
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugTreeParser.h
@@ -0,0 +1,52 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "TreeParser.h"
+#import "DebugEventSocketProxy.h"
+#import "DebugTreeNodeStream.h"
+
+@interface DebugTreeParser : TreeParser {
+	id<DebugEventListener> debugListener;
+}
+
+- (id) initWithTreeNodeStream:(id<TreeNodeStream>)theStream;
+- (id) initWithTreeNodeStream:(id<TreeNodeStream>)theStream
+				 debuggerPort:(NSInteger)portNumber;
+	// designated initializer
+- (id) initWithTreeNodeStream:(id<TreeNodeStream>)theStream
+				debugListener:(id<DebugEventListener>)theDebugListener
+				 debuggerPort:(NSInteger)portNumber;
+
+- (id<DebugEventListener>) debugListener;
+- (void) setDebugListener: (id<DebugEventListener>) aDebugListener;
+
+- (void) recoverFromMismatchedToken:(id<IntStream>)inputStream 
+						  exception:(NSException *)e 
+						  tokenType:(TokenType)ttype 
+							 follow:(ANTLRBitSet *)follow;
+
+@end
diff --git a/runtime/ObjC/Framework/DebugTreeParser.m b/runtime/ObjC/Framework/DebugTreeParser.m
new file mode 100644
index 0000000..70b8ac1
--- /dev/null
+++ b/runtime/ObjC/Framework/DebugTreeParser.m
@@ -0,0 +1,128 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "DebugTreeParser.h"
+
+
+@implementation DebugTreeParser
+
+- (id) initWithTreeNodeStream:(id<TreeNodeStream>)theStream
+{
+	return [self initWithTreeNodeStream:theStream debugListener:nil debuggerPort:-1];
+}
+
+- (id) initWithTreeNodeStream:(id<TreeNodeStream>)theStream
+				 debuggerPort:(NSInteger)portNumber
+{
+	return [self initWithTreeNodeStream:theStream debugListener:nil debuggerPort:portNumber];
+}
+
+- (id) initWithTreeNodeStream:(id<TreeNodeStream>)theStream
+				debugListener:(id<DebugEventListener>)theDebugListener
+				 debuggerPort:(NSInteger)portNumber
+{
+	id<DebugEventListener,NSObject> debugger = nil;
+	id<TreeNodeStream> treeNodeStream = nil;
+	if (theDebugListener) {
+		debugger = (id<DebugEventListener>)theDebugListener;
+	} else {
+		debugger = [[DebugEventSocketProxy alloc] initWithGrammarName:[self grammarFileName] debuggerPort:portNumber];
+	}
+	if (theStream && ![theStream isKindOfClass:[DebugTreeNodeStream class]]) {
+		treeNodeStream = [[DebugTreeNodeStream alloc] initWithTreeNodeStream:theStream debugListener:debugger];
+	} else {
+		treeNodeStream = theStream;
+	}
+	self = [super initWithStream:treeNodeStream];
+	if ( self ) {
+		[self setDebugListener:debugger];
+		//[debugger release];
+		//[treeNodeStream release];
+		[debugListener waitForDebuggerConnection];
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+    [self setDebugListener: nil];
+    [super dealloc];
+}
+
+- (id<DebugEventListener>) debugListener
+{
+    return debugListener; 
+}
+
+- (void) setDebugListener: (id<DebugEventListener>) aDebugListener
+{
+    if (debugListener != aDebugListener) {
+        [(id<DebugEventListener,NSObject>)aDebugListener retain];
+        [(id<DebugEventListener,NSObject>)debugListener release];
+        debugListener = aDebugListener;
+    }
+}
+
+#pragma mark -
+#pragma mark Overrides
+
+- (void) beginResync
+{
+	[debugListener beginResync];
+}
+
+- (void) endResync
+{
+	[debugListener endResync];
+}
+- (void)beginBacktracking:(NSInteger)level
+{
+	[debugListener beginBacktrack:level];
+}
+
+- (void)endBacktracking:(NSInteger)level wasSuccessful:(BOOL)successful
+{
+	[debugListener endBacktrack:level wasSuccessful:successful];
+}
+
+- (void) recoverFromMismatchedToken:(id<IntStream>)inputStream 
+						  exception:(NSException *)e 
+						  tokenType:(TokenType)ttype 
+							 follow:(ANTLRBitSet *)follow
+{
+#warning TODO: recoverFromMismatchedToken in debugger
+	[self recoverFromMismatchedToken:inputStream exception:e follow:follow];
+}
+
+- (void) recoverFromMismatchedSet:(id<IntStream>)inputStream
+						exception:(NSException *)e
+						   follow:(ANTLRBitSet *)follow
+{
+#warning TODO: recoverFromMismatchedSet in debugger
+	[super recoverFromMismatchedSet:inputStream];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/DoubleKeyMap.h b/runtime/ObjC/Framework/DoubleKeyMap.h
new file mode 100644
index 0000000..bbaa351
--- /dev/null
+++ b/runtime/ObjC/Framework/DoubleKeyMap.h
@@ -0,0 +1,28 @@
+
+#import "AMutableArray.h"
+#import "AMutableDictionary.h"
+#import "LinkBase.h"
+/**
+ * Sometimes we need to map a key to a value but key is two pieces of data.
+ * This nested hash table saves creating a single key each time we access
+ * map; avoids mem creation.
+ */
+
+@class AMutableArray;
+
+@interface DoubleKeyMap : LinkBase {
+    AMutableDictionary *data;
+}
+
+- (id) init;
+- (id) setObject:(id)v forKey1:(id)k1 forKey2:(NSString *)k2;
+- (id) objectForKey1:(id)k1 forKey2:(id)k2;
+- (AMutableDictionary *) objectForKey:(id)k1;
+- (NSArray *) valuesForKey:(id)k1;
+- (NSArray *) allKeys1;
+- (AMutableArray *) allKeys2:(id)k1;
+- (NSArray *) values;
+
+@property (retain) AMutableDictionary *data;
+
+@end
diff --git a/runtime/ObjC/Framework/DoubleKeyMap.m b/runtime/ObjC/Framework/DoubleKeyMap.m
new file mode 100644
index 0000000..5d01f6f
--- /dev/null
+++ b/runtime/ObjC/Framework/DoubleKeyMap.m
@@ -0,0 +1,101 @@
+#import "DoubleKeyMap.h"
+
+@implementation DoubleKeyMap
+
+- (id) init
+{
+    self = [super init];
+    if ( self  != nil ) {
+        data = [[AMutableDictionary dictionaryWithCapacity:30] retain];
+    }
+    return self;
+}
+
+- (id) setObject:(id)v forKey1:(id)k1 forKey2:(id)k2
+{
+    AMutableDictionary *data2 = [data objectForKey:k1];
+    id prev = nil;
+    if ( data2 == nil ) {
+        data2 = [AMutableDictionary dictionaryWithCapacity:30];
+        [data setObject:data2 forKey:k1];
+    }
+    else {
+        prev = [data2 objectForKey:k2];
+    }
+    [data2 setObject:v forKey:k2];
+    return prev;
+}
+
+- (id) objectForKey1:(id)k1 forKey2:(id)k2
+{
+    AMutableDictionary *data2 = [data objectForKey:k1];
+    if ( data2 == nil )
+        return nil;
+    return [data2 objectForKey:k2];
+}
+
+- (AMutableDictionary *) objectForKey:(id)k1
+{
+    return [data objectForKey:k1];
+}
+
+
+/**
+ * Get all values associated with primary key
+ */
+- (NSArray *) valuesForKey:(id)k1
+{
+    AMutableDictionary *data2 = [data objectForKey:k1];
+    if ( data2 == nil )
+        return nil;
+    return [data2 allValues];
+}
+
+
+/**
+ * get all primary keys
+ */
+- (NSArray *) allKeys1
+{
+    return [data allKeys];
+}
+
+
+/**
+ * get all secondary keys associated with a primary key
+ */
+- (NSArray *) allKeys2:(id)k1
+{
+    AMutableDictionary * data2 = [data objectForKey:k1];
+    if ( data2 == nil )
+        return nil;
+    return [data2 allKeys];
+}
+
+- (AMutableArray *) values
+{
+//    HashMap *s = [[HashMap newHashMapWithLen:30];
+    AMutableArray *s = [AMutableArray arrayWithCapacity:30];
+    
+    for (AMutableDictionary *k2 in [data allValues]) {
+        
+        for ( NSString *v in [k2 allValues]) {
+            [s addObject:v];
+        }
+        
+    }
+    
+    return s;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in CommonToken" );
+#endif
+    [data release];
+    [super dealloc];
+}
+
+@synthesize data;
+@end
diff --git a/runtime/ObjC/Framework/EarlyExitException.h b/runtime/ObjC/Framework/EarlyExitException.h
new file mode 100644
index 0000000..0849f13
--- /dev/null
+++ b/runtime/ObjC/Framework/EarlyExitException.h
@@ -0,0 +1,39 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "RecognitionException.h"
+
+@interface EarlyExitException : RecognitionException {
+	int decisionNumber;
+}
+
++ (EarlyExitException *) newException:(id<IntStream>)anInputStream decisionNumber:(NSInteger)aDecisionNumber;
+- (id) initWithStream:(id<IntStream>)anInputStream decisionNumber:(NSInteger) aDecisionNumber;
+
+@property int decisionNumber;
+@end
+
diff --git a/runtime/ObjC/Framework/EarlyExitException.m b/runtime/ObjC/Framework/EarlyExitException.m
new file mode 100644
index 0000000..bf6fc1b
--- /dev/null
+++ b/runtime/ObjC/Framework/EarlyExitException.m
@@ -0,0 +1,54 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "EarlyExitException.h"
+
+
+@implementation EarlyExitException
+
++ (EarlyExitException *) newException:(id<IntStream>) anInputStream decisionNumber:(NSInteger) aDecisionNumber
+{
+	return [[self alloc] initWithStream:anInputStream decisionNumber:aDecisionNumber];
+}
+
+- (id) initWithStream:(id<IntStream>)anInputStream decisionNumber:(NSInteger) aDecisionNumber
+{
+	if ((self = [super initWithStream:anInputStream]) != nil) {
+		decisionNumber = aDecisionNumber;
+	}
+	return self;
+}
+
+- (NSString *) description
+{
+	NSMutableString *desc = (NSMutableString *)[super description];
+	[desc appendFormat:@" decision:%d", decisionNumber];
+	return desc;
+}
+
+@synthesize decisionNumber;
+@end
+
diff --git a/antlr-3.4/runtime/ObjC/Framework/English.lproj/InfoPlist.strings b/runtime/ObjC/Framework/English.lproj/InfoPlist.strings
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/English.lproj/InfoPlist.strings
rename to runtime/ObjC/Framework/English.lproj/InfoPlist.strings
Binary files differ
diff --git a/runtime/ObjC/Framework/Entry.h b/runtime/ObjC/Framework/Entry.h
new file mode 100644
index 0000000..eabbba2
--- /dev/null
+++ b/runtime/ObjC/Framework/Entry.h
@@ -0,0 +1,46 @@
+
+@class HashTable;
+
+/**
+ * HashTable entry.
+ */
+
+@interface HTEntry : NSObject {
+    HTEntry *next;
+    NSInteger hash;
+    NSString *key;
+    id value;
+}
+
+@property(nonatomic, retain) HTEntry  *next;
+@property(assign)           NSInteger  hash;
+@property(nonatomic, retain) NSString *key;
+@property(nonatomic, retain)        id value;
+
++ (HTEntry *)newEntry:(NSInteger)h key:(NSString *)k value:(id)v next:(HTEntry *) n;
+- (id) init:(NSInteger)h key:(NSString *)k value:(id)v next:(HTEntry *)n;
+- (id) copyWithZone:(NSZone *)zone;
+- (void) setValue:(id)newValue;
+- (BOOL) isEqualTo:(id)o;
+- (NSInteger) hash;
+- (NSString *) description;
+@end
+
+/**
+ * LinkedMap entry.
+ */
+
+@interface LMNode : NSObject {
+    LMNode *next;
+    LMNode *prev;
+    id item;
+}
+
+@property(nonatomic, retain) LMNode *next;
+@property(nonatomic, retain) LMNode *prev;
+@property(nonatomic, retain)      id item;
+
++ (LMNode *) newNode:(LMNode *)aPrev element:(id)anElement next:(LMNode *)aNext;
+- (id) init:(LMNode *)aPrev element:(id)anElement next:(LMNode *)aNext;
+@end
+
diff --git a/runtime/ObjC/Framework/Entry.m b/runtime/ObjC/Framework/Entry.m
new file mode 100644
index 0000000..f42190e
--- /dev/null
+++ b/runtime/ObjC/Framework/Entry.m
@@ -0,0 +1,110 @@
+#import "Entry.h"
+#import <Foundation/Foundation.h>
+#import "RuntimeException.h"
+
+@implementation HTEntry
+
+@synthesize next;
+@synthesize hash;
+@synthesize key;
+@synthesize value;
+
++ (id) newEntry:(int)aHash key:(NSString *)aKey value:(id)aValue next:(HTEntry *)aNext
+{
+    return [[HTEntry alloc] init:aHash key:aKey value:aValue next:aNext];
+}
+
+- (id) init:(int)aHash key:(NSString *)aKey value:(id)aValue next:(HTEntry *)aNext
+{
+    if ( (self = [super init]) != nil) {
+        next  = aNext;
+        hash  = aHash;
+        key   = aKey;
+        value = aValue;
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [next release];
+    [key release];
+    [value release];
+    [super dealloc];
+}
+
+
+- (id) copyWithZone:(NSZone *)zone
+{
+    HTEntry *copy = [[HTEntry allocWithZone:zone] init:hash key:key value:value next:next];
+    copy.next  = next;
+    copy.hash  = hash;
+    copy.key   = key;
+    copy.value = value;
+    //    return [[[HTEntry allocWithZone:zone] init:hash key:key value:value next:(next == nil ? nil : (HTEntry *)[next copyWithZone])] autorelease];
+    return copy;
+}
+
+- (void) setValue:(id)aValue
+{
+    if (aValue == nil)
+        @throw [[[NullPointerException alloc] init] autorelease];
+    //    id oldValue = value;
+    value = aValue;
+    //    return oldValue;
+}
+
+- (BOOL) isEqualTo:(id)o
+{
+/*
+    if (!([o conformsToProtocol:@protocol(HTEntry)]))
+        return NO;
+ */
+    HTEntry *e = (HTEntry *)o;
+    return (key == nil ? e.key == nil : [key isEqualTo:e.key]) && (value == nil ? e.value == nil : [value isEqualTo:e.value]);
+}
+
+- (int) hash
+{
+    return hash ^ (value == nil ? 0 : [value hash]);
+}
+
+- (NSString *) description
+{
+    return [NSString stringWithFormat:@"%@ = %@",[key description], [value description]];
+}
+
+@end
+
+@implementation LMNode
+
+@synthesize next;
+@synthesize prev;
+@synthesize item;
+
++ (LMNode *) newNode:(LMNode *)aPrev element:(id)anElement next:(LMNode *)aNext
+{
+    return [[LMNode alloc] init:aPrev element:anElement next:aNext];
+}
+
+- (id) init:(LMNode *)aPrev element:(id)anElement next:(LMNode *)aNext
+{
+    self = [super init];
+    if (self) {
+        item = anElement;
+        next = aNext;
+        prev = aPrev;
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [item release];
+    [next release];
+    [prev release];
+    [super dealloc];
+}
+
+@end
+
diff --git a/runtime/ObjC/Framework/FailedPredicateException.h b/runtime/ObjC/Framework/FailedPredicateException.h
new file mode 100644
index 0000000..05184bc
--- /dev/null
+++ b/runtime/ObjC/Framework/FailedPredicateException.h
@@ -0,0 +1,50 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "RecognitionException.h"
+
+
+@interface FailedPredicateException : RecognitionException
+{
+	NSString *predicate;
+	NSString *ruleName;
+}
+
+@property (retain) NSString *predicate;
+@property (retain) NSString *ruleName;
+
++ (FailedPredicateException *) newException:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<IntStream>)theStream;
+- (FailedPredicateException *) initWithRuleName:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<IntStream>)theStream;
+
+#ifdef DONTUSEYET
+- (NSString *) getPredicate;
+- (void) setPredicate:(NSString *)thePredicate;
+- (NSString *) getRuleName;
+- (void) setRuleName:(NSString *)theRuleName;
+#endif
+
+@end
diff --git a/runtime/ObjC/Framework/FailedPredicateException.m b/runtime/ObjC/Framework/FailedPredicateException.m
new file mode 100644
index 0000000..02f9f42
--- /dev/null
+++ b/runtime/ObjC/Framework/FailedPredicateException.m
@@ -0,0 +1,96 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 20110 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "FailedPredicateException.h"
+
+
+@implementation FailedPredicateException
+
+@synthesize predicate;
+@synthesize ruleName;
+
++ (FailedPredicateException *) newException:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<IntStream>)theStream
+{
+	return [[FailedPredicateException alloc] initWithRuleName:theRuleName predicate:thePredicate stream:theStream];
+}
+
+- (FailedPredicateException *) initWithRuleName:(NSString *)theRuleName predicate:(NSString *)thePredicate stream:(id<IntStream>)theStream
+{
+	if ((self = [super initWithStream:theStream])) {
+		[self setPredicate:thePredicate];
+		[self setRuleName:theRuleName];
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in FailedPredicateException" );
+#endif
+	[self setPredicate:nil];
+	[self setRuleName:nil];
+	[super dealloc];
+}
+
+- (NSString *) description
+{
+	NSMutableString *desc = (NSMutableString *)[super description];
+	[desc appendFormat:@" rule: %@ predicate failed: %@", ruleName, predicate];
+	return desc;
+}
+
+#ifdef DONTUSEYET
+- (NSString *) getPredicate
+{
+	return predicate;
+}
+
+- (void) setPredicate:(NSString *)thePredicate
+{
+	if (thePredicate != predicate) {
+		[thePredicate retain];
+		if ( predicate ) [predicate release];
+		predicate = thePredicate;
+	}
+}
+
+- (NSString *) getRuleName
+{
+	return ruleName;
+}
+
+- (void) setRuleName:(NSString *)theRuleName
+{
+	if (theRuleName != ruleName) {
+		[theRuleName retain];
+		if ( ruleName ) [ruleName release];
+		ruleName = theRuleName;
+	}
+}
+#endif
+
+@end
diff --git a/runtime/ObjC/Framework/FastQueue.h b/runtime/ObjC/Framework/FastQueue.h
new file mode 100644
index 0000000..1ac6d1a
--- /dev/null
+++ b/runtime/ObjC/Framework/FastQueue.h
@@ -0,0 +1,64 @@
+//
+//  FastQueue.h
+//  ANTLR
+//
+//  Created by Ian Michell on 26/04/2010.
+// [The "BSD licence"]
+// Copyright (c) 2010 Ian Michell 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "AMutableArray.h"
+
+@interface FastQueue : NSObject <NSCopying>
+{
+    __strong AMutableArray *data;
+    NSUInteger p;
+    NSUInteger range;
+}
+
+@property (retain) AMutableArray *data;
+@property (assign) NSUInteger p;
+@property (assign) NSUInteger range;
+
++ (id) newFastQueue;
+
+- (id) init;
+
+- (id) copyWithZone:(NSZone *)aZone;
+
+- (void) reset;
+- (id) remove;
+- (void) addObject:(id) obj;
+- (NSUInteger) count;
+- (NSUInteger) size;
+- (NSUInteger) range;
+- (id) head;
+- (id) objectAtIndex:(NSInteger) i;
+- (void) clear;
+- (NSString *) toString;
+- (NSString *) description;
+
+@end
diff --git a/runtime/ObjC/Framework/FastQueue.m b/runtime/ObjC/Framework/FastQueue.m
new file mode 100644
index 0000000..c4c3dd7
--- /dev/null
+++ b/runtime/ObjC/Framework/FastQueue.m
@@ -0,0 +1,174 @@
+//
+//  FastQueue.m
+//  ANTLR
+//
+//  Created by Ian Michell on 26/04/2010.
+// [The "BSD licence"]
+// Copyright (c) 2010 Ian Michell 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "FastQueue.h"
+#import "ANTLRError.h"
+#import "RuntimeException.h"
+
+@implementation FastQueue
+
+//@synthesize pool;
+@synthesize data;
+@synthesize p;
+@synthesize range;
+
++ (id) newFastQueue
+{
+    return [[FastQueue alloc] init];
+}
+
+- (id) init
+{
+	self = [super init];
+	if ( self != nil ) {
+		data = [[AMutableArray arrayWithCapacity:100] retain];
+		p = 0;
+		range = -1;
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in FastQueue" );
+#endif
+	if ( data ) [data release];
+	[super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    FastQueue *copy;
+    
+    copy = [[[self class] allocWithZone:aZone] init];
+    copy.data = [data copyWithZone:nil];
+    copy.p = p;
+    copy.range = range;
+    return copy;
+}
+
+// FIXME: Java code has this, it doesn't seem like it needs to be there... Then again a lot of the code in the java runtime is not great...
+- (void) reset
+{
+	[self clear];
+}
+
+- (void) clear
+{
+	p = 0;
+    if ( [data count] )
+        [data removeAllObjects];
+}
+
+- (id) remove
+{
+	id obj = [self objectAtIndex:0];
+	p++;
+	// check to see if we have hit the end of the buffer
+	if ( p == [data count] ) {
+		// if we have, then we need to clear it out
+		[self clear];
+	}
+	return obj;
+}
+
+- (void) addObject:(id) obj
+{
+    [data addObject:obj];
+}
+
+- (NSUInteger) count
+{
+	return [data count];
+}
+
+- (NSUInteger) size
+{
+	return [data count] - p;
+}
+
+- (NSUInteger) range
+{
+    return range;
+}
+
+- (id) head
+{
+	return [self objectAtIndex:0];
+}
+
+- (id) objectAtIndex:(NSInteger) i
+{
+    NSInteger absIndex;
+
+    absIndex = p + i;
+	if ( absIndex >= [data count] ) {
+		@throw [NoSuchElementException newException:[NSString stringWithFormat:@"queue index %d > last index %d", absIndex, [data count]-1]];
+	}
+	if ( absIndex < 0 ) {
+	    @throw [NoSuchElementException newException:[NSString stringWithFormat:@"queue index %d < 0", absIndex]];
+	}
+	if ( absIndex > range ) range = absIndex;
+	return [data objectAtIndex:absIndex];
+}
+
+- (NSString *) toString
+{
+    return [self description];
+}
+
+- (NSString *) description
+{
+	NSMutableString *buf = [NSMutableString stringWithCapacity:30];
+	NSInteger n = [self size];
+	for (NSInteger i = 0; i < n; i++) {
+		[buf appendString:[[self objectAtIndex:i] description]];
+		if ((i + 1) < n) {
+			[buf appendString:@" "];
+		}
+	}
+	return buf;
+}
+
+#ifdef DONTUSENOMO
+- (NSAutoreleasePool *)getPool
+{
+    return pool;
+}
+
+- (void)setPool:(NSAutoreleasePool *)aPool
+{
+    pool = aPool;
+}
+#endif
+
+@end
diff --git a/runtime/ObjC/Framework/HashMap.h b/runtime/ObjC/Framework/HashMap.h
new file mode 100644
index 0000000..eb9ae55
--- /dev/null
+++ b/runtime/ObjC/Framework/HashMap.h
@@ -0,0 +1,326 @@
+//
+//  HashMap.h
+//  ANTLR
+//
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "AMutableArray.h"
+#import "AMutableDictionary.h"
+#import "ArrayIterator.h"
+#import "LinkBase.h"
+#import "MapElement.h"
+#import "PtrBuffer.h"
+
+#define GLOBAL_SCOPE       0
+#define LOCAL_SCOPE        1
+#define HASHSIZE         101
+#define HBUFSIZE      0x2000
+
+@class HashMap;
+
+/**
+ * HashMap entry.
+ */
+
+@interface HMEntry : NSObject {
+    HMEntry  *next;
+    NSInteger hash;
+    NSString *key;
+    id value;
+}
+
+@property(nonatomic, retain) HMEntry  *next;
+@property(assign)            NSInteger  hash;
+@property(nonatomic, retain) NSString *key;
+@property(nonatomic, retain) id value;
+
++ (HMEntry *)newEntry:(NSInteger)h key:(NSString *)k value:(id)v next:(HMEntry *) n;
+- (id) init:(NSInteger)h key:(NSString *)k value:(id)v next:(HMEntry *)n;
+- (void) setValue:(id)newValue;
+- (BOOL) isEqualTo:(id)o;
+- (NSInteger) hashCode;
+- (NSString *) description;
+- (void) recordAccess:(HashMap *)m;
+- (void) recordRemoval:(HashMap *)m;
+@end
+
+@interface HashIterator : ArrayIterator {
+    HMEntry  *next;
+    NSInteger expectedModCount;
+    NSInteger idx;
+    HMEntry  *current;
+    HashMap  *hm;
+}
+
++ (HashIterator *) newIterator:(HashMap *)aHM;
+
+- (id) init:(HashMap *)aHM;
+- (BOOL) hasNext;
+- (HMEntry *) next;
+- (void) remove;
+@end
+
+@interface HMEntryIterator : HashIterator
+{
+}
+
++ (HMEntryIterator *)newIterator:(HashMap *)aHM;
+
+- (id) init:(HashMap *)aHM;
+- (HMEntry *) next;
+@end
+
+@interface HMValueIterator : HashIterator
+{
+}
+
++ (HMValueIterator *)newIterator:(HashMap *)aHM;
+
+- (id) init:(HashMap *)aHM;
+- (id) next;
+@end
+
+@interface HMKeyIterator : HashIterator
+{
+}
+
++ (HMKeyIterator *)newIterator:(HashMap *)aHM;
+
+- (id) init:(HashMap *)aHM;
+- (NSString *) next;
+@end
+
+@interface HMKeySet : NSSet
+{
+    HashMap *hm;
+    AMutableArray *anArray;
+}
+
+@property (retain) HashMap *hm;
+@property (retain) AMutableArray *anArray;
+
++ (HMKeySet *)newKeySet:(HashMap *)aHM;
+
+- (id) init:(HashMap *)aHM;
+- (HashIterator *) iterator;
+- (NSUInteger) count;
+- (BOOL) contains:(id)o;
+- (BOOL) remove:(id)o;
+- (void) clear;
+- (AMutableArray *)toArray;
+@end
+
+@interface Values : PtrBuffer
+{
+    HashMap *hm;
+    AMutableArray *anArray;
+}
+
+@property (retain) HashMap *hm;
+@property (retain) AMutableArray *anArray;
+
++ (Values *)newValueSet:(HashMap *)aHM;
+
+- (id) init:(HashMap *)aHM;
+- (HashIterator *) iterator;
+- (NSUInteger) count;
+- (BOOL) contains:(id)o;
+- (void) clear;
+- (AMutableArray *)toArray;
+@end
+
+@interface HMEntrySet : NSSet
+{
+    HashMap *hm;
+    AMutableArray *anArray;
+}
+
+@property (retain) HashMap *hm;
+@property (retain) AMutableArray *anArray;
+
++ (HMEntrySet *)newEntrySet:(HashMap *)aHM;
+
+- (id) init:(HashMap *)aHM;
+- (HashIterator *) iterator;
+- (BOOL) contains:(id)o;
+- (BOOL) remove:(id)o;
+- (NSUInteger) count;
+- (void) clear;
+- (NSArray *)toArray;
+@end
+
+@interface HashMap : LinkBase {
+    //    TStringPool *fPool;
+    NSInteger Scope;
+    NSInteger LastHash;
+    NSInteger BuffSize;
+    NSInteger Capacity;
+    /**
+     * The number of key-value mappings contained in this map.
+     */
+    NSUInteger count;
+    NSUInteger ptr;
+    __strong NSMutableData *buffer;
+    __strong MapElement **ptrBuffer;
+    NSInteger mode;
+    /**
+     * The table, resized as necessary. Length MUST Always be a power of two.
+     */
+//    AMutableArray *table;
+    
+    /**
+     * The next size value at which to resize (capacity * load factor).
+     * @serial
+     */
+    NSInteger threshold;
+    
+    /**
+     * The load factor for the hash table.
+     * 
+     * @serial
+     */
+    float loadFactor;
+    /**
+     * The number of times this HashMap has been structurally modified
+     * Structural modifications are those that change the number of mappings in
+     * the HashMap or otherwise modify its internal structure (e.g.,
+     * rehash).  This field is used to make iterators on Collection-views of
+     * the HashMap fail-fast.  (See ConcurrentModificationException).
+     */
+    NSInteger modCount;
+    HMEntrySet *entrySet;
+    BOOL empty;
+    HMKeySet *keySet;
+    Values *values;
+}
+
+//@property (copy) TStringPool *fPool;
+@property (getter=getScope, setter=setScope:) NSInteger Scope;
+@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
+
+@property (getter=getMode,setter=setMode:) NSInteger mode;
+@property (assign) NSInteger BuffSize;
+@property (assign) NSInteger Capacity;
+@property (getter=getCount, setter=setCount:) NSUInteger count;
+@property (assign) NSUInteger ptr;
+@property (retain, getter=getBuffer, setter=setBuffer:) NSMutableData *buffer;
+@property (assign, getter=getPtrBuffer, setter=setPtrBuffer:) MapElement **ptrBuffer;
+@property (assign) NSInteger threshold;
+@property (assign) float loadFactor;
+@property (assign) NSInteger modCount;
+@property (retain) HMEntrySet *entrySet;
+@property (nonatomic, readonly) BOOL empty;
+@property (retain) HMKeySet *keySet;
+@property (retain) Values *values;
+
+// Contruction/Destruction
++ (id) newHashMap;
++ (id) newHashMap:(NSInteger)anInitialCapacity loadFactor:(float)loadFactor;
++ (id) newHashMap:(NSInteger)anInitialCapacity;
++ (id) newHashMapWithLen:(NSInteger)aBuffSize;
+- (id) init;
+- (id) initWithLen:(NSInteger)aBuffSize;
+- (id) init:(NSInteger)anInitialCapacity;
+- (id) init:(NSInteger)anInitialCapacity loadFactor:(float)loadFactor;
+- (id) initWithM:(HashMap *)m;
+- (void)dealloc;
+- (HashMap *)PushScope:( HashMap **)map;
+- (HashMap *)PopScope:( HashMap **)map;
+
+- (NSUInteger)count;
+- (NSInteger)size;
+
+// Instance Methods
+/*    form hash value for string s */
+- (NSInteger)hash:(NSString *)s;
+- (NSInteger)hashInt:(NSInteger)anInt;
+- (NSInteger) indexFor:(NSInteger)h length:(NSInteger)length;
+/*   look for s in ptrBuffer  */
+- (HashMap *)findscope:(NSInteger)level;
+/*   look for s in ptrBuffer  */
+- (id)lookup:(NSString *)s Scope:(NSInteger)scope;
+/*   look for s in ptrBuffer  */
+- (id)install:(MapElement *)sym Scope:(NSInteger)scope;
+/*   look for s in ptrBuffer  */
+- (void)deleteHashMap:(MapElement *)np;
+- (NSInteger)RemoveSym:(NSString *)s;
+- (void)delete_chain:(MapElement *)np;
+#ifdef DONTUSEYET
+- (int)bld_symtab:(KW_TABLE *)toknams;
+#endif
+- (MapElement **)getptrBuffer;
+- (MapElement *)getptrBufferEntry:(NSInteger)idx;
+- (void)setptrBuffer:(MapElement *)np Index:(NSInteger)idx;
+- (NSInteger)getScope;
+- (void)setScope:(NSInteger)i;
+- (MapElement *)getTType:(NSString *)name;
+- (MapElement *)getNameInList:(NSInteger)ttype;
+- (void)putNode:(NSString *)name TokenType:(NSInteger)ttype;
+- (NSInteger)getMode;
+- (void)setMode:(NSInteger)aMode;
+- (void) insertObject:(id)aRule atIndex:(NSInteger)idx;
+- (id) objectAtIndex:(NSInteger)idx;
+- (void) setObject:(id)aRule atIndex:(NSInteger)idx;
+- (void)addObject:(id)anObject;
+- (MapElement *) getName:(NSString *)aName;
+- (void) putName:(NSString *)name Node:(id)aNode;
+
+- (NSEnumerator *)objectEnumerator;
+- (BOOL) hasNext;
+- (MapElement *)nextObject;
+
+- (NSUInteger) count;
+- (id) get:(NSString *)key;
+- (id) getForNullKey;
+- (BOOL) containsKey:(NSString *)key;
+- (HMEntry *) getEntry:(NSString *)key;
+- (id) put:(NSString *)key value:(id)value;
+- (id) putForNullKey:(id)value;
+- (void) putForCreate:(NSString *)key value:(id)value;
+- (void) putAllForCreate:(HashMap *)m;
+- (void) resize:(NSInteger)newCapacity;
+- (void) transfer:(NSArray *)newTable;
+- (void) putAll:(HashMap *)m;
+- (id) remove:(NSString *)key;
+- (HMEntry *) removeEntryForKey:(NSString *)key;
+- (HMEntry *) removeMapping:(id)o;
+- (void) clear;
+- (BOOL) containsValue:(id)value;
+- (id) copyWithZone:(NSZone *)zone;
+- (NSString *) description;
+- (void) addEntry:(NSInteger)hash key:(NSString *)key value:(id)value bucketIndex:(NSInteger)bucketIndex;
+- (void) createEntry:(NSInteger)hash key:(NSString *)key value:(id)value bucketIndex:(NSInteger)bucketIndex;
+- (HMKeyIterator *) newKeyIterator;
+- (HMValueIterator *) newValueIterator;
+- (HMEntryIterator *) newEntryIterator;
+- (HMKeySet *) keySet;
+- (Values *) values;
+- (HMEntrySet *) entrySet;
+- (NSInteger) capacity;
+- (float) loadFactor;
+
+@end
diff --git a/runtime/ObjC/Framework/HashMap.m b/runtime/ObjC/Framework/HashMap.m
new file mode 100644
index 0000000..2755953
--- /dev/null
+++ b/runtime/ObjC/Framework/HashMap.m
@@ -0,0 +1,1786 @@
+//
+//  HashMap.m
+//  ANTLR
+//
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#define SUCCESS (0)
+#define FAILURE (-1)
+
+#import "HashMap.h"
+#import "AMutableArray.h"
+#import "RuntimeException.h"
+
+extern NSInteger max(NSInteger a, NSInteger b);
+
+static NSInteger itIndex;
+
+@implementation HMEntry
+
+@synthesize next;
+@synthesize hash;
+@synthesize key;
+@synthesize value;
+
+/**
+ * Creates new entry.
+ */
++ (HMEntry *)newEntry:(NSInteger)h key:(NSString *)k value:(id)v next:(HMEntry *) n
+{
+    return [[HMEntry alloc] init:h key:k value:v next:n];
+}
+
+- (id) init:(NSInteger)h key:(NSString *)k value:(id)v next:(HMEntry *)n
+{
+    self = [super init];
+    if ( self ) {
+        value = v;
+        next = n;
+        key = k;
+        hash = h;
+    }
+    return self;
+}
+
+- (void) setValue:(id)newValue
+{
+    value = newValue;
+    //    return oldValue;
+}
+
+- (BOOL) isEqualTo:(id)o
+{
+    /*
+     if (!([o conformsToProtocol:@protocol(HMEntry)]))
+     return NO;
+     */
+    HMEntry *e = (HMEntry *)o;
+    NSString *k1 = [self key];
+    NSString *k2 = [e key];
+    if (k1 == k2 || (k1 != nil && [k1 isEqualTo:k2])) {
+        id v1 = [self value];
+        id v2 = [e value];
+        if (v1 == v2 || (v1 != nil && [v1 isEqualTo:v2]))
+            return YES;
+    }
+    return NO;
+}
+
+- (NSInteger) hashCode
+{
+    return (key == nil ? 0 : [key hash]) ^ (value == nil ? 0 : [value hash]);
+}
+
+- (NSString *) description
+{
+    return [NSString stringWithFormat:@"%@ = %@",[key description], [value description]];
+}
+
+
+/**
+ * This method is invoked whenever the value in an entry is
+ * overwritten by an invocation of put(k,v) for a key k that's already
+ * in the HashMap.
+ */
+- (void) recordAccess:(HashMap *)m
+{
+}
+
+
+/**
+ * This method is invoked whenever the entry is
+ * removed from the table.
+ */
+- (void) recordRemoval:(HashMap *)m
+{
+}
+
+- (void) dealloc
+{
+    [key release];
+    [value release];
+    [next release];
+    [super dealloc];
+}
+
+@end
+
+@implementation HashIterator
+
++ (HashIterator *)newIterator:(HashMap *)aHM
+{
+    return [[HashIterator alloc] init:aHM];
+}
+
+- (id) init:(HashMap *)aHM
+{
+    self = [super init];
+    if ( self ) {
+        hm = aHM;
+        expectedModCount = hm.modCount;
+        if (count > 0) {
+            while ( idx < [hm.buffer length] ) {
+                next = (HMEntry *)hm.ptrBuffer[idx++];
+                if ( next == nil )
+                    break;
+            }
+        }
+    }
+    return self;
+}
+
+- (BOOL) hasNext
+{
+    return next != nil;
+}
+
+- (HMEntry *) next
+{
+//    if (hm.modCount != expectedModCount)
+//        @throw [[ConcurrentModificationException alloc] init];
+    HMEntry *e = next;
+    if (e == nil)
+        @throw [[NoSuchElementException alloc] init];
+    if ((next = e.next) == nil) {
+        while ( idx < [hm.buffer length] ) {
+            next = [anArray objectAtIndex:idx++];
+            if ( next == nil )
+                break;
+        }
+    }
+    current = e;
+    return e;
+}
+
+- (void) remove
+{
+    if (current == nil)
+        @throw [[IllegalStateException alloc] init];
+//    if (modCount != expectedModCount)
+//        @throw [[ConcurrentModificationException alloc] init];
+    NSString *k = current.key;
+    current = nil;
+    [hm removeEntryForKey:k];
+    expectedModCount = hm.modCount;
+}
+
+- (void) dealloc
+{
+    [next release];
+    [current release];
+    [super dealloc];
+}
+
+@end
+
+@implementation HMValueIterator
+
++ (HMValueIterator *)newIterator:(HashMap *)aHM
+{
+    return [[HMValueIterator alloc] init:aHM];
+}
+
+- (id) init:(HashMap *)aHM
+{
+    self = [super init:aHM];
+    if ( self ) {
+    }
+    return self;
+}
+
+- (id) next
+{
+    return [super next].value;
+}
+
+@end
+
+@implementation HMKeyIterator
+
++ (HMKeyIterator *)newIterator:(HashMap *)aHM
+{
+    return [[HMKeyIterator alloc] init:aHM];
+}
+
+- (id) init:(HashMap *)aHM
+{
+    self = [super init:aHM];
+    if ( self ) {
+    }
+    return self;
+}
+
+- (NSString *) next
+{
+    return [super next].key;
+}
+
+@end
+
+@implementation HMEntryIterator
+
++ (HMEntryIterator *)newIterator:(HashMap *)aHM
+{
+    return [[HMEntryIterator alloc] init:aHM];
+}
+
+- (id) init:(HashMap *)aHM
+{
+    self = [super init:aHM];
+    if ( self ) {
+    }
+    return self;
+}
+
+- (HMEntry *) next
+{
+    return [super next];
+}
+
+@end
+
+@implementation HMKeySet
+
+@synthesize hm;
+@synthesize anArray;
+
++ (HMKeySet *)newKeySet:(HashMap *)aHM
+{
+    return [[HMKeySet alloc] init:(HashMap *)aHM];
+}
+
+- (id) init:(HashMap *)aHM
+{
+    self = [super init];
+    if ( self ) {
+        hm = aHM;
+        anArray = [[AMutableArray arrayWithCapacity:16] retain];
+        HMKeyIterator *it = [hm newKeyIterator];
+        while ( [it hasNext] ) {
+            NSString *aKey = [it next];
+            [anArray addObject:aKey];
+        }
+    }
+    return self;
+}
+
+- (HashIterator *) iterator
+{
+    return [HMKeyIterator newIterator:hm];
+}
+
+- (NSUInteger) count
+{
+    return hm.count;
+}
+
+- (BOOL) contains:(id)o
+{
+    return [hm containsKey:o];
+}
+
+- (BOOL) remove:(id)o
+{
+    return [hm removeEntryForKey:o] != nil;
+}
+
+- (void) clear {
+    [hm clear];
+}
+
+- (AMutableArray *)toArray
+{
+    return anArray;
+}
+
+@end
+
+@implementation Values
+
+@synthesize hm;
+@synthesize anArray;
+
++ (Values *)newValueSet:(HashMap *)aHM
+{
+    return [[Values alloc] init:aHM];
+}
+
+- (id) init:(HashMap *)aHM
+{
+    self = [super init];
+    if ( self ) {
+        hm = aHM;
+        anArray = [[AMutableArray arrayWithCapacity:16] retain];
+        HMValueIterator *it = [hm newValueIterator];
+        while ( [it hasNext] ) {
+            id aValue = [it next];
+            [anArray addObject:aValue];
+        }
+    }
+    return self;    
+}
+
+- (ArrayIterator *) iterator
+{
+    return [HMValueIterator newIterator:hm];
+}
+
+- (NSUInteger) count
+{
+    return hm.count;
+}
+
+- (BOOL) contains:(id)o
+{
+    return [hm containsValue:o];
+}
+
+- (void) clear {
+    [hm clear];
+}
+
+- (AMutableArray *)toArray
+{
+    return anArray;
+}
+
+@end
+
+@implementation HMEntrySet
+
+@synthesize hm;
+@synthesize anArray;
+
++ (HMEntrySet *)newEntrySet:(HashMap *)aHM
+{
+    return [[HMEntrySet alloc] init:aHM];
+}
+
+- (id) init:(HashMap *)aHM
+{
+    self = [super init];
+    if ( self ) {
+        hm = aHM;
+        anArray = [[AMutableArray arrayWithCapacity:16] retain];
+        HMEntryIterator *it = [hm newEntryIterator];
+        while ( [it hasNext] ) {
+            HMEntry *entry = [it next];
+            [anArray addObject:entry];
+        }
+    }
+    return self;
+}
+
+- (HashIterator *) iterator
+{
+    return [HMEntryIterator newIterator:hm];
+}
+
+- (BOOL) contains:(id)o
+{
+/*
+    if (!([o conformsToProtocol:@protocol(HMEntry)]))
+        return NO;
+ */
+    HMEntry *e = (HMEntry *)o;
+    HMEntry *candidate = [hm getEntry:e.key];
+    return candidate != nil && [candidate isEqualTo:e];
+}
+
+- (BOOL) remove:(id)o
+{
+    return [hm removeMapping:o] != nil;
+}
+
+- (NSUInteger) count
+{
+    return hm.count;
+}
+
+- (void) clear
+{
+    [hm clear];
+}
+
+- (NSArray *)toArray
+{
+    return anArray;
+}
+
+@end
+
+/**
+ * The default initial capacity - MUST be a power of two.
+ */
+NSInteger const DEFAULT_INITIAL_CAPACITY = 16;
+
+/**
+ * The maximum capacity, used if a higher value is implicitly specified
+ * by either of the constructors with arguments.
+ * MUST be a power of two <= 1<<30.
+ */
+NSInteger const MAXIMUM_CAPACITY = 1 << 30;
+
+/**
+ * The load factor used when none specified in constructor.
+ */
+float const DEFAULT_LOAD_FACTOR = 0.75f;
+//long const serialVersionUID = 362498820763181265L;
+
+/*
+ * Start of HashMap
+ */
+@implementation HashMap
+
+@synthesize Scope;
+@synthesize LastHash;
+@synthesize BuffSize;
+@synthesize Capacity;
+@synthesize count;
+@synthesize ptr;
+@synthesize ptrBuffer;
+@synthesize buffer;
+@synthesize threshold;
+@synthesize loadFactor;
+@synthesize modCount;
+@synthesize entrySet;
+@synthesize empty;
+@synthesize keySet;
+@synthesize values;
+
++(id)newHashMap
+{
+    return [[HashMap alloc] init];
+}
+
++(id)newHashMapWithLen:(NSInteger)aBuffSize
+{
+    return [[HashMap alloc] initWithLen:aBuffSize];
+}
+
++ (id) newHashMap:(NSInteger)initialCapacity
+{
+    return [[HashMap alloc] init:initialCapacity loadFactor:DEFAULT_LOAD_FACTOR];
+}
+
++ (id) newHashMap:(NSInteger)initialCapacity loadFactor:(float)aLoadFactor
+{
+    return [[HashMap alloc] init:initialCapacity loadFactor:aLoadFactor];
+}
+
+/**
+ * Constructs an empty <tt>HashMap</tt> with the default initial capacity
+ * (16) and the default load factor (0.75).
+ */
+- (id) init
+{
+    NSInteger idx;
+
+    self = [super init];
+    if ( self ) {
+        entrySet = nil;
+        loadFactor = DEFAULT_LOAD_FACTOR;
+        threshold = (NSInteger)(DEFAULT_INITIAL_CAPACITY * DEFAULT_LOAD_FACTOR);
+        count = 0;
+        BuffSize = HASHSIZE;
+        NSInteger capacity = 1;
+        
+        while (capacity < BuffSize)
+            capacity <<= 1;
+        
+        BuffSize = capacity;
+        fNext = nil;
+        Scope = 0;
+        ptr = 0;
+        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize * sizeof(id)] retain];
+        ptrBuffer = (MapElement **) [buffer mutableBytes];
+        if ( fNext != nil ) {
+            Scope = ((HashMap *)fNext)->Scope+1;
+            for( idx = 0; idx < BuffSize; idx++ ) {
+                ptrBuffer[idx] = ((HashMap *)fNext)->ptrBuffer[idx];
+            }
+        }
+        mode = 0;
+        keySet = nil;
+        values = nil;
+   }
+    return self;
+}
+
+-(id)initWithLen:(NSInteger)aBuffSize
+{
+    NSInteger idx;
+    
+    self = [super init];
+    if ( self ) {
+        fNext = nil;
+        entrySet = nil;
+        loadFactor = DEFAULT_LOAD_FACTOR;
+        threshold = (NSInteger)(DEFAULT_INITIAL_CAPACITY * DEFAULT_LOAD_FACTOR);
+        count = 0;
+        BuffSize = aBuffSize;
+        NSInteger capacity = 1;
+        
+        while (capacity < BuffSize)
+            capacity <<= 1;
+        
+        BuffSize = capacity * sizeof(id);
+        Capacity = capacity;
+        Scope = 0;
+        ptr = 0;
+        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize] retain];
+        ptrBuffer = (MapElement **) [buffer mutableBytes];
+        if ( fNext != nil ) {
+            Scope = ((HashMap *)fNext)->Scope+1;
+            for( idx = 0; idx < Capacity; idx++ ) {
+                ptrBuffer[idx] = ((HashMap *)fNext)->ptrBuffer[idx];
+            }
+        }
+        mode = 0;
+        keySet = nil;
+        values = nil;
+    }
+    return( self );
+}
+
+/**
+ * Constructs an empty <tt>HashMap</tt> with the specified initial
+ * capacity and load factor.
+ * 
+ * @param  initialCapacity the initial capacity
+ * @param  loadFactor      the load factor
+ * @throws IllegalArgumentException if the initial capacity is negative
+ * or the load factor is nonpositive
+ */
+- (id) init:(NSInteger)initialCapacity loadFactor:(float)aLoadFactor
+{
+    self = [super init];
+    if ( self ) {
+        entrySet = nil;
+        if (initialCapacity < 0)
+            @throw [[IllegalArgumentException alloc] init:[NSString stringWithFormat:@"Illegal initial capacity: %d", initialCapacity]];
+        if (initialCapacity > MAXIMUM_CAPACITY)
+            initialCapacity = MAXIMUM_CAPACITY;
+        if (aLoadFactor <= 0 /* || [Float isNaN:loadFactor] */)
+            @throw [[IllegalArgumentException alloc] init:[NSString stringWithFormat:@"Illegal load factor:%d ", aLoadFactor]];
+        NSInteger capacity = 1;
+        
+        while (capacity < initialCapacity)
+            capacity <<= 1;
+        
+        count = 0;
+        BuffSize = capacity * sizeof(id);
+        Capacity = capacity;
+        loadFactor = aLoadFactor;
+        threshold = (NSInteger)(capacity * loadFactor);
+//        ptrBuffer = [AMutableArray arrayWithCapacity:initialCapacity];
+//        [self init];
+        keySet = nil;
+        values = nil;
+        Scope = 0;
+        ptr = 0;
+        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize] retain];
+        ptrBuffer = (MapElement **) [buffer mutableBytes];
+    }
+    return self;
+}
+
+
+/**
+ * Constructs an empty <tt>HashMap</tt> with the specified initial
+ * capacity and the default load factor (0.75).
+ * 
+ * @param  initialCapacity the initial capacity.
+ * @throws IllegalArgumentException if the initial capacity is negative.
+ */
+- (id) init:(NSInteger)anInitialCapacity
+{
+    self = [super init];
+    if ( self ) {
+        entrySet = nil;
+        NSInteger initialCapacity = anInitialCapacity;
+        if (initialCapacity > MAXIMUM_CAPACITY)
+            initialCapacity = MAXIMUM_CAPACITY;
+        NSInteger capacity = 1;
+        while (capacity < initialCapacity)
+            capacity <<= 1;
+        count = 0;
+        BuffSize = capacity;
+        loadFactor = DEFAULT_LOAD_FACTOR;
+        threshold = (NSInteger)(capacity * loadFactor);
+        keySet = nil;
+        values = nil;
+        Scope = 0;
+        ptr = 0;
+        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize] retain];
+        ptrBuffer = (MapElement **) [buffer mutableBytes];
+    }
+    return self;
+}
+
+/**
+ * Constructs a new <tt>HashMap</tt> with the same mappings as the
+ * specified <tt>Map</tt>.  The <tt>HashMap</tt> is created with
+ * default load factor (0.75) and an initial capacity sufficient to
+ * hold the mappings in the specified <tt>Map</tt>.
+ * 
+ * @param   m the map whose mappings are to be placed in this map
+ * @throws  NullPointerException if the specified map is null
+ */
+- (id) initWithM:(HashMap *)m
+{
+    self = [super init];
+    self = [self init:(NSInteger)max((([m count] / DEFAULT_LOAD_FACTOR) + 1), DEFAULT_INITIAL_CAPACITY) loadFactor:DEFAULT_LOAD_FACTOR];
+    if ( self ) {
+        entrySet = nil;
+        NSInteger initialCapacity = max((([m count] / DEFAULT_LOAD_FACTOR) + 1), DEFAULT_INITIAL_CAPACITY);
+        if (initialCapacity > MAXIMUM_CAPACITY)
+            initialCapacity = MAXIMUM_CAPACITY;
+        NSInteger capacity = 1;
+        while (capacity < initialCapacity)
+            capacity <<= 1;
+        count = 0;
+        BuffSize = capacity * sizeof(id);
+        Capacity = capacity;
+        loadFactor = DEFAULT_LOAD_FACTOR;
+        threshold = (NSInteger)(capacity * loadFactor);
+        keySet = nil;
+        values = nil;
+        Scope = 0;
+        ptr = 0;
+        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize] retain];
+        ptrBuffer = (MapElement **) [buffer mutableBytes];
+        [self putAllForCreate:m];
+    }
+    return self;
+}
+
+-(void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in HashMap" );
+#endif
+    MapElement *tmp, *rtmp;
+    NSInteger idx;
+
+    if ( self.fNext != nil ) {
+        for( idx = 0; idx < Capacity; idx++ ) {
+            tmp = ptrBuffer[idx];
+            while ( tmp && tmp != [((HashMap *)fNext) getptrBufferEntry:idx] ) {
+                rtmp = tmp;
+                // tmp = [tmp getfNext];
+                tmp = (MapElement *)tmp.fNext;
+                [rtmp release];
+            }
+        }
+    }
+    if ( buffer ) [buffer release];
+#ifdef DONTUSEYET
+    [ptrBuffer release];
+    [entrySet release];
+#endif
+    if ( keySet ) [keySet release];
+    if ( values ) [values release];
+    [super dealloc];
+}
+
+- (NSUInteger)count
+{
+/*
+    NSUInteger aCnt = 0;
+    
+    for (NSUInteger i = 0; i < Capacity; i++) {
+        if ( ptrBuffer[i] != nil ) {
+            aCnt++;
+        }
+    }
+    return aCnt;
+ */
+    return count;
+}
+                          
+- (NSInteger) size
+{
+    NSInteger aSize = 0;
+    
+    for (NSInteger i = 0; i < Capacity; i++) {
+        if ( ptrBuffer[i] != nil ) {
+            aSize += sizeof(id);
+        }
+    }
+    return aSize;
+}
+                                  
+                                  
+-(void)deleteHashMap:(MapElement *)np
+{
+    MapElement *tmp, *rtmp;
+    NSInteger idx;
+    
+    if ( self.fNext != nil ) {
+        for( idx = 0; idx < Capacity; idx++ ) {
+            tmp = ptrBuffer[idx];
+            while ( tmp && tmp != (LinkBase *)[((HashMap *)fNext) getptrBufferEntry:idx] ) {
+                rtmp = tmp;
+                tmp = [tmp getfNext];
+                [rtmp release];
+            }
+        }
+    }
+}
+
+-(HashMap *)PushScope:(HashMap **)map
+{
+    NSInteger idx;
+    HashMap *htmp;
+    
+    htmp = [HashMap newHashMap];
+    if ( *map != nil ) {
+        ((HashMap *)htmp)->fNext = *map;
+        [htmp setScope:[((HashMap *)htmp->fNext) getScope]+1];
+        for( idx = 0; idx < Capacity; idx++ ) {
+            htmp->ptrBuffer[idx] = ((HashMap *)htmp->fNext)->ptrBuffer[idx];
+        }
+    }
+    //    gScopeLevel++;
+    *map = htmp;
+    return( htmp );
+}
+
+-(HashMap *)PopScope:(HashMap **)map
+{
+    NSInteger idx;
+    MapElement *tmp;
+    HashMap *htmp;
+    
+    htmp = *map;
+    if ( (*map)->fNext != nil ) {
+        *map = (HashMap *)htmp->fNext;
+        for( idx = 0; idx < Capacity; idx++ ) {
+            if ( htmp->ptrBuffer[idx] == nil ||
+                htmp->ptrBuffer[idx] == (*map)->ptrBuffer[idx] ) {
+                break;
+            }
+            tmp = htmp->ptrBuffer[idx];
+            /*
+             * must deal with parms, locals and labels at some point
+             * can not forget the debuggers
+             */
+            htmp->ptrBuffer[idx] = [tmp getfNext];
+            [tmp release];
+        }
+        *map = (HashMap *)htmp->fNext;
+        //        gScopeLevel--;
+    }
+    return( htmp );
+}
+
+#ifdef USERDOC
+/*
+ *  HASH        hash entry to get idx to table
+ *  NSInteger hash( HashMap *self, char *s );
+ *
+ *     Inputs:  char *s             string to find
+ *
+ *     Returns: NSInteger                 hashed value
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(NSInteger)hash:(NSString *)s       /*    form hash value for string s */
+{
+    NSInteger hashval;
+    const char *tmp;
+    
+    tmp = [s cStringUsingEncoding:NSASCIIStringEncoding];
+    for( hashval = 0; *tmp != '\0'; )
+        hashval += *tmp++;
+    self->LastHash = hashval % Capacity;
+    return( self->LastHash );
+}
+
+/**
+ * Applies a supplemental hash function to a given hashCode, which
+ * defends against poor quality hash functions.  This is critical
+ * because HashMap uses power-of-two length hash tables, that
+ * otherwise encounter collisions for hashCodes that do not differ
+ * in lower bits. Note: Null keys always map to hash 0, thus idx 0.
+ */
+- (NSInteger) hashInt:(NSInteger) h
+{
+    // This function ensures that hashCodes that differ only by
+    // constant multiples at each bit position have a bounded
+    // number of collisions (approximately 8 at default load factor).
+    h ^= (h >> 20) ^ (h >> 12);
+    return h ^ (h >> 7) ^ (h >> 4);
+}
+
+/**
+ * Returns idx for hash code h.
+ */
+- (NSInteger) indexFor:(NSInteger)h length:(NSInteger)length
+{
+    return h & (length - 1);
+}
+
+#ifdef USERDOC
+/*
+ *  FINDSCOPE  search hashed list for entry
+ *  HashMap *findscope( HashMap *self, NSInteger scope );
+ *
+ *     Inputs:  NSInteger       scope -- scope level to find
+ *
+ *     Returns: HashMap   pointer to ptrBuffer of proper scope level
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(HashMap *)findscope:(NSInteger)scope
+{
+    if ( self->Scope == scope ) {
+        return( self );
+    }
+    else if ( fNext ) {
+        return( [((HashMap *)fNext) findscope:scope] );
+    }
+    return( nil );              /*   not found      */
+}
+
+#ifdef USERDOC
+/*
+ *  LOOKUP  search hashed list for entry
+ *  MapElement *lookup( HashMap *self, char *s, NSInteger scope );
+ *
+ *     Inputs:  char     *s          string to find
+ *
+ *     Returns: MapElement  *           pointer to entry
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(id)lookup:(NSString *)s Scope:(NSInteger)scope
+{
+    MapElement *np;
+    
+    for( np = self->ptrBuffer[[self hash:s]]; np != nil; np = [np getfNext] ) {
+        if ( [s isEqualToString:[np getName]] ) {
+            return( np );        /*   found it       */
+        }
+    }
+    return( nil );              /*   not found      */
+}
+
+#ifdef USERDOC
+/*
+ *  INSTALL search hashed list for entry
+ *  NSInteger install( HashMap *self, MapElement *sym, NSInteger scope );
+ *
+ *     Inputs:  MapElement    *sym   -- symbol ptr to install
+ *              NSInteger         scope -- level to find
+ *
+ *     Returns: Boolean     TRUE   if installed
+ *                          FALSE  if already in table
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(MapElement *)install:(MapElement *)sym Scope:(NSInteger)scope
+{
+    MapElement *np;
+    
+    np = [self lookup:[sym getName] Scope:scope ];
+    if ( np == nil ) {
+        [sym retain];
+        [sym setFNext:self->ptrBuffer[ self->LastHash ]];
+        self->ptrBuffer[ self->LastHash ] = sym;
+        return( self->ptrBuffer[ self->LastHash ] );
+    }
+    return( nil );            /*   not found      */
+}
+
+#ifdef USERDOC
+/*
+ *  RemoveSym  search hashed list for entry
+ *  NSInteger RemoveSym( HashMap *self, char *s );
+ *
+ *     Inputs:  char     *s          string to find
+ *
+ *     Returns: NSInteger      indicator of SUCCESS OR FAILURE
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(NSInteger)RemoveSym:(NSString *)s
+{
+    MapElement *np, *tmp;
+    NSInteger idx;
+    
+    idx = [self hash:s];
+    for ( tmp = self->ptrBuffer[idx], np = self->ptrBuffer[idx]; np != nil; np = [np getfNext] ) {
+        if ( [s isEqualToString:[np getName]] ) {
+            tmp = [np getfNext];             /* get the next link  */
+            [np release];
+            return( SUCCESS );            /* report SUCCESS     */
+        }
+        tmp = [np getfNext];              //  BAD!!!!!!
+    }
+    return( FAILURE );                    /*   not found      */
+}
+
+-(void)delete_chain:(MapElement *)np
+{
+    if ( [np getfNext] != nil )
+        [self delete_chain:[np getfNext]];
+    [np dealloc];
+}
+
+#ifdef DONTUSEYET
+-(NSInteger)bld_symtab:(KW_TABLE *)toknams
+{
+    NSInteger i;
+    MapElement *np;
+    
+    for( i = 0; *(toknams[i].name) != '\0'; i++ ) {
+        // install symbol in ptrBuffer
+        np = [MapElement newMapElement:[NSString stringWithFormat:@"%s", toknams[i].name]];
+        //        np->fType = toknams[i].toknum;
+        [self install:np Scope:0];
+    }
+    return( SUCCESS );
+}
+#endif
+
+-(MapElement *)getptrBufferEntry:(NSInteger)idx
+{
+    return( ptrBuffer[idx] );
+}
+
+-(MapElement **)getptrBuffer
+{
+    return( ptrBuffer );
+}
+
+-(void)setptrBuffer:(MapElement *)np Index:(NSInteger)idx
+{
+    if ( idx < Capacity ) {
+        [np retain];
+        ptrBuffer[idx] = np;
+    }
+}
+
+-(NSInteger)getScope
+{
+    return( Scope );
+}
+
+-(void)setScopeScope:(NSInteger)i
+{
+    Scope = i;
+}
+
+- (MapElement *)getTType:(NSString *)name
+{
+    return [self lookup:name Scope:0];
+}
+
+/*
+ * works only for maplist indexed not by name but by TokenNumber
+ */
+- (MapElement *)getNameInList:(NSInteger)ttype
+{
+    MapElement *np;
+    NSInteger aTType;
+
+    aTType = ttype % Capacity;
+    for( np = self->ptrBuffer[aTType]; np != nil; np = [np getfNext] ) {
+        if ( [(ACNumber *)np.node integerValue] == ttype ) {
+            return( np );        /*   found it       */
+        }
+    }
+    return( nil );              /*   not found      */
+}
+
+- (LinkBase *)getName:(NSString *)name
+{
+    return [self lookup:name Scope:0]; /*  nil if not found      */    
+}
+
+- (void)putNode:(NSString *)name TokenType:(NSInteger)ttype
+{
+    MapElement *np;
+    
+    // install symbol in ptrBuffer
+    np = [MapElement newMapElementWithName:[NSString stringWithString:name] Type:ttype];
+    //        np->fType = toknams[i].toknum;
+    [self install:np Scope:0];
+}
+
+- (NSInteger)getMode
+{
+    return mode;
+}
+
+- (void)setMode:(NSInteger)aMode
+{
+    mode = aMode;
+}
+
+- (void) addObject:(id)aRule
+{
+    NSInteger idx;
+
+    idx = [self count];
+    if ( idx >= Capacity ) {
+        idx %= Capacity;
+    }
+    ptrBuffer[idx] = aRule;
+}
+
+/* this may have to handle linking into the chain
+ */
+- (void) insertObject:(id)aRule atIndex:(NSInteger)idx
+{
+    if ( idx >= Capacity ) {
+        idx %= Capacity;
+    }
+    if ( aRule != ptrBuffer[idx] ) {
+        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
+        [aRule retain];
+    }
+    ptrBuffer[idx] = aRule;
+}
+
+- (id)objectAtIndex:(NSInteger)idx
+{
+    if ( idx >= Capacity ) {
+        idx %= Capacity;
+    }
+    return ptrBuffer[idx];
+}
+
+/**
+ * Returns <tt>true</tt> if this map contains no key-value mappings.
+ * 
+ * @return <tt>true</tt> if this map contains no key-value mappings
+ */
+- (BOOL) empty
+{
+    return count == 0;
+}
+
+/**
+ * Offloaded version of get() to look up null keys.  Null keys map
+ * to idx 0.  This null case is split out into separate methods
+ * for the sake of performance in the two most commonly used
+ * operations (get and put), but incorporated with conditionals in
+ * others.
+ */
+- (id) getForNullKey
+{
+    
+    for (HMEntry *e = (HMEntry *)ptrBuffer[0]; e != nil; e = e.next) {
+        if (e.key == nil)
+            return e.value;
+    }
+    
+    return nil;
+}
+
+/**
+ * Returns the value to which the specified key is mapped,
+ * or {@code null} if this map contains no mapping for the key.
+ * 
+ * <p>More formally, if this map contains a mapping from a key
+ * {@code k} to a value {@code v} such that {@code (key==null ? k==null :
+ * key.equals(k))}, then this method returns {@code v}; otherwise
+ * it returns {@code null}.  (There can be at most one such mapping.)
+ * 
+ * <p>A return value of {@code null} does not <i>necessarily</i>
+ * indicate that the map contains no mapping for the key; it's also
+ * possible that the map explicitly maps the key to {@code null}.
+ * The {@link #containsKey containsKey} operation may be used to
+ * distinguish these two cases.
+ * 
+ * @see #put(Object, Object)
+ */
+- (id) get:(NSString *)key
+{
+    if (key == nil)
+        return [self getForNullKey];
+    //    NSInteger hash = [self hashInt:[self hash:key]];
+    NSInteger hash = [self hashInt:[key hash]];
+    
+    for (HMEntry *e = (HMEntry *)ptrBuffer[[self indexFor:hash length:[self capacity]]]; e != nil; e = e.next) {
+        NSString *k;
+        if (e.hash == hash && ((k = e.key) == key || [key isEqualTo:k]))
+            return e.value;
+    }
+    
+    return nil;
+}
+
+
+/**
+ * Returns <tt>true</tt> if this map contains a mapping for the
+ * specified key.
+ * 
+ * @param   key   The key whose presence in this map is to be tested
+ * @return <tt>true</tt> if this map contains a mapping for the specified
+ * key.
+ */
+- (BOOL) containsKey:(NSString *)key
+{
+    return [self getEntry:key] != nil;
+}
+
+/**
+ * Returns the entry associated with the specified key in the
+ * HashMap.  Returns null if the HashMap contains no mapping
+ * for the key.
+ */
+- (HMEntry *) getEntry:(NSString *)key
+{
+    //    NSInteger hash = (key == nil) ? 0 : [self hashInt:[self hash:key]];
+    NSInteger hash = (key == nil) ? 0 : [self hashInt:[key hash]];
+    
+    for (HMEntry *e = (HMEntry *)ptrBuffer[[self indexFor:hash length:Capacity]]; e != nil; e = e.next) {
+        NSString *k;
+        if (e.hash == hash && ((k = e.key) == key || (key != nil && [key isEqualTo:k])))
+            return e;
+    }
+    
+    return nil;
+}
+
+
+/**
+ * Associates the specified value with the specified key in this map.
+ * If the map previously contained a mapping for the key, the old
+ * value is replaced.
+ * 
+ * @param key key with which the specified value is to be associated
+ * @param value value to be associated with the specified key
+ * @return the previous value associated with <tt>key</tt>, or
+ * <tt>null</tt> if there was no mapping for <tt>key</tt>.
+ * (A <tt>null</tt> return can also indicate that the map
+ * previously associated <tt>null</tt> with <tt>key</tt>.)
+ */
+- (id) put:(NSString *)key value:(id)value
+{
+    if (key == nil)
+        return [self putForNullKey:value];
+//    NSInteger hash = [self hashInt:[self hash:key]];
+    NSInteger hash = [self hashInt:[key hash]];
+    NSInteger i = [self indexFor:hash length:[self capacity]];
+    
+    for (HMEntry *e = (HMEntry *)ptrBuffer[i]; e != nil; e = e.next) {
+        NSString *k;
+        if (e.hash == hash && ((k = e.key) == key || [key isEqualTo:k])) {
+            id oldValue = e.value;
+            e.value = value;
+            [e recordAccess:self];
+            return oldValue;
+        }
+    }
+    
+    modCount++;
+    [self addEntry:hash key:key value:value bucketIndex:i];
+    return nil;
+}
+
+
+/**
+ * Offloaded version of put for null keys
+ */
+- (id) putForNullKey:(id)value
+{
+    
+    for (HMEntry *e = (HMEntry *)ptrBuffer[0]; e != nil; e = e.next) {
+        if (e.key == nil) {
+            id oldValue = e.value;
+            e.value = value;
+            [e recordAccess:self];
+            return oldValue;
+        }
+    }
+    
+    modCount++;
+    [self addEntry:0 key:nil value:value bucketIndex:0];
+    return nil;
+}
+
+/**
+ * This method is used instead of put by constructors and
+ * pseudoconstructors (clone, readObject).  It does not resize the table,
+ * check for comodification, etc.  It calls createEntry rather than
+ * addEntry.
+ */
+- (void) putForCreate:(NSString *)key value:(id)value
+{
+    NSInteger hash = (key == nil) ? 0 : [self hashInt:[self hash:key]];
+    NSInteger i = [self indexFor:hash length:[self capacity]];
+    
+    for (HMEntry *e = (HMEntry *)ptrBuffer[i]; e != nil; e = e.next) {
+        NSString *k;
+        if (e.hash == hash && ((k = e.key) == key || (key != nil && [key isEqualTo:k]))) {
+            e.value = value;
+            return;
+        }
+    }
+    
+    [self createEntry:hash key:key value:value bucketIndex:i];
+}
+
+- (void) putAllForCreate:(HashMap *)m
+{
+    
+    for (HMEntry *e in [m entrySet])
+        [self putForCreate:[e key] value:[e value]];
+    
+}
+
+/**
+ * Rehashes the contents of this map into a new array with a
+ * larger capacity.  This method is called automatically when the
+ * number of keys in this map reaches its threshold.
+ * 
+ * If current capacity is MAXIMUM_CAPACITY, this method does not
+ * resize the map, but sets threshold to Integer.MAX_VALUE.
+ * This has the effect of preventing future calls.
+ * 
+ * @param newCapacity the new capacity, MUST be a power of two;
+ * must be greater than current capacity unless current
+ * capacity is MAXIMUM_CAPACITY (in which case value
+ * is irrelevant).
+ */
+- (void) resize:(NSInteger)newCapacity
+{
+//    NSArray * oldTable = ptrBuffer;
+    NSInteger oldCapacity = Capacity;
+    if (oldCapacity == MAXIMUM_CAPACITY) {
+        threshold = NSIntegerMax;
+        return;
+    }
+//    NSArray * newTable = [NSArray array];
+//    [self transfer:newTable];
+    BuffSize = newCapacity * sizeof(id);
+    Capacity = newCapacity;
+    [buffer setLength:BuffSize];
+    ptrBuffer = [buffer mutableBytes];
+    threshold = (NSInteger)(newCapacity * loadFactor);
+}
+
+
+/**
+ * Transfers all entries from current table to newTable.
+ */
+- (void) transfer:(AMutableArray *)newTable
+{
+    NSInteger newCapacity = [newTable count];
+    
+    for (NSInteger j = 0; j < [self capacity]; j++) {
+        HMEntry *e = (HMEntry *)ptrBuffer[j];
+        if (e != nil) {
+            ptrBuffer[j] = nil;
+            
+            do {
+                HMEntry *next = e.next;
+                NSInteger i = [self indexFor:e.hash length:newCapacity];
+                e.next = [newTable objectAtIndex:i];
+                [newTable replaceObjectAtIndex:i withObject:e];
+                e = next;
+            }
+            while (e != nil);
+        }
+    }
+    
+}
+
+
+/**
+ * Copies all of the mappings from the specified map to this map.
+ * These mappings will replace any mappings that this map had for
+ * any of the keys currently in the specified map.
+ * 
+ * @param m mappings to be stored in this map
+ * @throws NullPointerException if the specified map is null
+ */
+- (void) putAll:(HashMap *)m
+{
+    NSInteger numKeysToBeAdded = [m count];
+    if (numKeysToBeAdded == 0)
+        return;
+    if (numKeysToBeAdded > threshold) {
+        NSInteger targetCapacity = (NSInteger)(numKeysToBeAdded / loadFactor + 1);
+        if (targetCapacity > MAXIMUM_CAPACITY)
+            targetCapacity = MAXIMUM_CAPACITY;
+        NSInteger newCapacity = Capacity;
+        
+        while (newCapacity < targetCapacity)
+            newCapacity <<= 1;
+        
+        if (newCapacity > Capacity)
+            [self resize:newCapacity];
+    }
+    
+    for (HMEntry *e in [m entrySet])
+        [self put:[e key] value:[e value]];
+    
+}
+
+/**
+ * Removes the mapping for the specified key from this map if present.
+ * 
+ * @param  key key whose mapping is to be removed from the map
+ * @return the previous value associated with <tt>key</tt>, or
+ * <tt>null</tt> if there was no mapping for <tt>key</tt>.
+ * (A <tt>null</tt> return can also indicate that the map
+ * previously associated <tt>null</tt> with <tt>key</tt>.)
+ */
+- (id) remove:(NSString *)key
+{
+    HMEntry *e = [self removeEntryForKey:key];
+    return (e == nil ? nil : e.value);
+}
+
+
+/**
+ * Removes and returns the entry associated with the specified key
+ * in the HashMap.  Returns null if the HashMap contains no mapping
+ * for this key.
+ */
+- (HMEntry *) removeEntryForKey:(NSString *)key
+{
+    NSInteger hash = (key == nil) ? 0 : [self hashInt:[self hash:key]];
+    NSInteger i = [self indexFor:hash length:Capacity];
+    HMEntry *prev = (HMEntry *)ptrBuffer[i];
+    HMEntry *e = prev;
+    
+    while (e != nil) {
+        HMEntry *next = e.next;
+        NSString *k;
+        if (e.hash == hash && ((k = e.key) == key || (key != nil && [key isEqualTo:k]))) {
+            modCount++;
+            count--;
+            if (prev == e)
+                ptrBuffer[i] = (id) next;
+            else
+                prev.next = next;
+            [e recordRemoval:self];
+            return e;
+        }
+        prev = e;
+        e = next;
+    }
+    
+    return e;
+}
+
+/**
+ * Special version of remove for EntrySet.
+ */
+- (HMEntry *) removeMapping:(id)o
+{
+//    if (!([o conformsToProtocol:@protocol(HMEntry)]))
+//        return nil;
+    HMEntry *entry = (HMEntry *)o;
+    NSString *key = entry.key;
+    NSInteger hash = (key == nil) ? 0 : [self hashInt:[self hash:key]];
+    NSInteger i = [self indexFor:hash length:Capacity];
+    HMEntry *prev = (HMEntry *)ptrBuffer[i];
+    HMEntry *e = prev;
+    
+    while (e != nil) {
+        HMEntry *next = e.next;
+        if (e.hash == hash && [e isEqualTo:entry]) {
+            modCount++;
+            count--;
+            if (prev == e)
+                ptrBuffer[i] = (id)next;
+            else
+                prev.next = next;
+            [e recordRemoval:self];
+            return e;
+        }
+        prev = e;
+        e = next;
+    }
+    
+    return e;
+}
+
+/**
+ * Removes all of the mappings from this map.
+ * The map will be empty after this call returns.
+ */
+- (void) clear
+{
+    modCount++;
+    id tmp;
+    
+    for (NSInteger i = 0; i < Capacity; i++) {
+        tmp = ptrBuffer[i];
+        if ( tmp ) {
+            [tmp release];
+        }
+        ptrBuffer[i] = nil;
+    }
+    count = 0;
+}
+
+
+/**
+ * Special-case code for containsValue with null argument
+ */
+- (BOOL) containsNullValue
+{
+    for (NSInteger i = 0; i < Capacity; i++)
+        
+        for (HMEntry *e = (HMEntry *)ptrBuffer[i]; e != nil; e = e.next)
+            if (e.value == nil)
+                return YES;
+    return NO;
+}
+
+/**
+ * Returns <tt>true</tt> if this map maps one or more keys to the
+ * specified value.
+ * 
+ * @param value value whose presence in this map is to be tested
+ * @return <tt>true</tt> if this map maps one or more keys to the
+ * specified value
+ */
+- (BOOL) containsValue:(id)value
+{
+    if (value == nil)
+        return [self containsNullValue];
+    
+    for (NSInteger i = 0; i < Capacity; i++)
+        
+        for (HMEntry *e = (HMEntry *)ptrBuffer[i]; e != nil; e = e.next)
+            if ([value isEqualTo:e.value])
+                return YES;
+    
+    
+    return NO;
+}
+
+/**
+ * Returns a shallow copy of this <tt>HashMap</tt> instance: the keys and
+ * values themselves are not cloned.
+ * 
+ * @return a shallow copy of this map
+ */
+- (id) copyWithZone:(NSZone *)zone
+{
+    HashMap *result = nil;
+    
+    //    @try {
+    result = [HashMap allocWithZone:zone];
+//        result = (HashMap *)[super copyWithZone:zone];
+//    }
+//    @catch (CloneNotSupportedException * e) {
+//    }
+    result.ptrBuffer = ptrBuffer;
+    result.entrySet = nil;
+    //    result.modCount = 0;
+    //    result.count = 0;
+    //    [result init];
+    [result putAllForCreate:self];
+    result.count = count;
+    result.threshold = threshold;
+    result.loadFactor = loadFactor;
+    result.modCount = modCount;
+    result.entrySet = entrySet;
+    return result;
+}
+
+
+/**
+ * Returns a string representation of this map.  The string representation
+ * consists of a list of key-value mappings in the order returned by the
+ * map's <tt>entrySet</tt> view's iterator, enclosed in braces
+ * (<tt>"{}"</tt>).  Adjacent mappings are separated by the characters
+ * <tt>", "</tt> (comma and space).  Each key-value mapping is rendered as
+ * the key followed by an equals sign (<tt>"="</tt>) followed by the
+ * associated value.  Keys and values are converted to strings as by
+ * {@link String#valueOf(Object)}.
+ *
+ * @return a string representation of this map
+ */
+- (NSString *)description
+{
+    HashIterator *it = [[self entrySet] iterator];
+    if (![it hasNext])
+        return @"{}";
+    
+    NSMutableString *sb = [NSMutableString stringWithCapacity:40];
+    [sb appendString:@"{"];
+    while ( YES ) {
+        HMEntry *e = [it next];
+        NSString *key = e.key;
+        id value = e.value;
+        [sb appendFormat:@"%@=%@", (key == self ? @"[self Map]" : key), (value == self ? @"[self Map]" : value)];
+        if ( ![it hasNext] ) {
+            [sb appendString:@"}"];
+            return sb;
+        }
+        [sb appendString:@", "];
+    }
+}
+
+/**
+ * Adds a new entry with the specified key, value and hash code to
+ * the specified bucket.  It is the responsibility of this
+ * method to resize the table if appropriate.
+ * 
+ * Subclass overrides this to alter the behavior of put method.
+ */
+- (void) addEntry:(NSInteger)hash key:(NSString *)key value:(id)value bucketIndex:(NSInteger)bucketIndex
+{
+    HMEntry *e = (HMEntry *)ptrBuffer[bucketIndex];
+    ptrBuffer[bucketIndex] = [[HMEntry alloc] init:hash key:key value:value next:e];
+    if (count++ >= threshold)
+        [self resize:2 * BuffSize];
+}
+
+/**
+ * Like addEntry except that this version is used when creating entries
+ * as part of Map construction or "pseudo-construction" (cloning,
+ * deserialization).  This version needn't worry about resizing the table.
+ * 
+ * Subclass overrides this to alter the behavior of HashMap(Map),
+ * clone, and readObject.
+ */
+- (void) createEntry:(NSInteger)hash key:(NSString *)key value:(id)value bucketIndex:(NSInteger)bucketIndex
+{
+    HMEntry *e = (HMEntry *)ptrBuffer[bucketIndex];
+    ptrBuffer[bucketIndex] = [[HMEntry alloc] init:hash key:key value:value next:e];
+    count++;
+}
+
+- (HMKeyIterator *) newKeyIterator
+{
+    return [HMKeyIterator newIterator:self];
+}
+
+- (HMValueIterator *) newValueIterator
+{
+    return [HMValueIterator newIterator:self];
+}
+
+- (HMEntryIterator *) newEntryIterator
+{
+    return [HMEntryIterator newIterator:self];
+}
+
+
+/**
+ * Returns a {@link Set} view of the keys contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa.  If the map is modified
+ * while an iteration over the set is in progress (except through
+ * the iterator's own <tt>remove</tt> operation), the results of
+ * the iteration are undefined.  The set supports element removal,
+ * which removes the corresponding mapping from the map, via the
+ * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
+ * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
+ * operations.  It does not support the <tt>add</tt> or <tt>addAll</tt>
+ * operations.
+ */
+- (HMKeySet *) keySet
+{
+    HMKeySet *ks = keySet;
+    return (ks != nil ? ks : (keySet = [HMKeySet newKeySet:self]));
+}
+
+
+/**
+ * Returns a {@link Collection} view of the values contained in this map.
+ * The collection is backed by the map, so changes to the map are
+ * reflected in the collection, and vice-versa.  If the map is
+ * modified while an iteration over the collection is in progress
+ * (except through the iterator's own <tt>remove</tt> operation),
+ * the results of the iteration are undefined.  The collection
+ * supports element removal, which removes the corresponding
+ * mapping from the map, via the <tt>Iterator.remove</tt>,
+ * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
+ * <tt>retainAll</tt> and <tt>clear</tt> operations.  It does not
+ * support the <tt>add</tt> or <tt>addAll</tt> operations.
+ */
+- (Values *) values
+{
+    Values *vs = values;
+    return (vs != nil ? vs : (values = [Values newValueSet:self]));
+}
+
+
+/**
+ * Returns a {@link Set} view of the mappings contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa.  If the map is modified
+ * while an iteration over the set is in progress (except through
+ * the iterator's own <tt>remove</tt> operation, or through the
+ * <tt>setValue</tt> operation on a map entry returned by the
+ * iterator) the results of the iteration are undefined.  The set
+ * supports element removal, which removes the corresponding
+ * mapping from the map, via the <tt>Iterator.remove</tt>,
+ * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and
+ * <tt>clear</tt> operations.  It does not support the
+ * <tt>add</tt> or <tt>addAll</tt> operations.
+ * 
+ * @return a set view of the mappings contained in this map
+ */
+- (HMEntrySet *) entrySet0
+{
+    HMEntrySet *es = entrySet;
+    return es != nil ? es : (entrySet = [HMEntrySet newEntrySet:self]);
+}
+
+- (HMEntrySet *) entrySet
+{
+    return [self entrySet0];
+}
+
+
+/**
+ * Save the state of the <tt>HashMap</tt> instance to a stream (i.e.,
+ * serialize it).
+ * 
+ * @serialData The <i>capacity</i> of the HashMap (the length of the
+ * bucket array) is emitted (NSInteger), followed by the
+ * <i>count</i> (an NSInteger, the number of key-value
+ * mappings), followed by the key (Object) and value (Object)
+ * for each key-value mapping.  The key-value mappings are
+ * emitted in no particular order.
+ */
+- (void) writeObject:(NSOutputStream *)s
+{
+/*
+    NSEnumerator * i = (count > 0) ? [[self entrySet0] iterator] : nil;
+    [s defaultWriteObject];
+    [s writeInt:[buffer length]];
+    [s writeInt:count];
+    if (i != nil) {
+        while ([i hasNext]) {
+            HMEntry *e = [i nextObject];
+            [s writeObject:[e key]];
+            [s writeObject:[e value]];
+        }
+        
+    }
+ */
+}
+
+
+/**
+ * Reconstitute the <tt>HashMap</tt> instance from a stream (i.e.,
+ * deserialize it).
+ */
+- (void) readObject:(NSInputStream *)s
+{
+/*
+    [s defaultReadObject];
+    NSInteger numBuckets = [s readInt];
+    ptrBuffer = [NSArray array];
+    [self init];
+    NSInteger count = [s readInt];
+    
+    for (NSInteger i = 0; i < count; i++) {
+        NSString * key = (NSString *)[s readObject];
+        id value = (id)[s readObject];
+        [self putForCreate:key value:value];
+    }
+ */
+}
+
+- (NSInteger) capacity
+{
+    return Capacity;
+}
+
+- (float) loadFactor
+{
+    return loadFactor;
+}
+
+/* this will never link into the chain
+ */
+- (void) setObject:(id)aRule atIndex:(NSInteger)idx
+{
+    if ( idx >= Capacity ) {
+        idx %= Capacity;
+    }
+    if ( aRule != ptrBuffer[idx] ) {
+        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
+        [aRule retain];
+    }
+    ptrBuffer[idx] = aRule;
+}
+
+- (void)putName:(NSString *)name Node:(id)aNode
+{
+    MapElement *np;
+    
+    np = [self lookup:name Scope:0 ];
+    if ( np == nil ) {
+        np = [MapElement newMapElementWithName:name Node:aNode];
+        if ( ptrBuffer[LastHash] )
+            [ptrBuffer[LastHash] release];
+        [np retain];
+        np.fNext = ptrBuffer[ LastHash ];
+        ptrBuffer[ LastHash ] = np;
+    }
+    return;    
+}
+
+- (NSEnumerator *)objectEnumerator
+{
+#pragma mark fix this its broken
+    NSEnumerator *anEnumerator;
+
+    itIndex = 0;
+    return anEnumerator;
+}
+
+- (BOOL)hasNext
+{
+    if (self && [self count] < Capacity-1) {
+        return YES;
+    }
+    return NO;
+}
+
+- (MapElement *)nextObject
+{
+    if (self && itIndex < Capacity-1) {
+        return ptrBuffer[itIndex];
+    }
+    return nil;
+}
+
+@end
+
diff --git a/runtime/ObjC/Framework/HashRule.h b/runtime/ObjC/Framework/HashRule.h
new file mode 100644
index 0000000..94b0abd
--- /dev/null
+++ b/runtime/ObjC/Framework/HashRule.h
@@ -0,0 +1,71 @@
+//
+//  HashRule.h
+//  ANTLR
+//
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "ACNumber.h"
+#import "RuleMemo.h"
+#import "PtrBuffer.h"
+
+#define GLOBAL_SCOPE       0
+#define LOCAL_SCOPE        1
+#define HASHSIZE         101
+#define HBUFSIZE      0x2000
+
+@interface HashRule : PtrBuffer {
+    //    TStringPool *fPool;
+    NSInteger LastHash;
+    NSInteger mode;
+}
+
+// Contruction/Destruction
++ (id)newHashRule;
++ (id)newHashRuleWithLen:(NSInteger)aBuffSize;
+- (id)init;
+- (id)initWithLen:(NSInteger)aBuffSize;
+- (void)dealloc;
+
+- (NSInteger)count;
+- (NSInteger)length;
+- (NSInteger)size;
+
+// Instance Methods
+- (void)deleteHashRule:(RuleMemo *)np;
+- (void)delete_chain:(RuleMemo *)np;
+- (RuleMemo **)getPtrBuffer;
+- (void)setPtrBuffer:(RuleMemo **)np;
+- (ACNumber *)getRuleMemoStopIndex:(NSInteger)aStartIndex;
+- (void)putRuleMemoAtStartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex;
+- (NSInteger)getMode;
+- (void)setMode:(NSInteger)aMode;
+- (void) insertObject:(RuleMemo *)aRule atIndex:(NSInteger)Index;
+- (RuleMemo *) objectAtIndex:(NSInteger)Index;
+
+@property (getter=getLastHash, setter=setLastHash:) NSInteger LastHash;
+@property (getter=getMode,setter=setMode:) NSInteger mode;
+@end
diff --git a/runtime/ObjC/Framework/HashRule.m b/runtime/ObjC/Framework/HashRule.m
new file mode 100644
index 0000000..e453ba1
--- /dev/null
+++ b/runtime/ObjC/Framework/HashRule.m
@@ -0,0 +1,279 @@
+//
+//  HashRule.m
+//  ANTLR
+//
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#define SUCCESS (0)
+#define FAILURE (-1)
+#define ANTLR_MEMO_RULE_UNKNOWN -1
+
+#import "HashRule.h"
+
+/*
+ * Start of HashRule
+ */
+@implementation HashRule
+
+@synthesize LastHash;
+
++(id)newHashRule
+{
+    return [[HashRule alloc] init];
+}
+
++(id)newHashRuleWithLen:(NSInteger)aBuffSize
+{
+    return [[HashRule alloc] initWithLen:aBuffSize];
+}
+
+-(id)init
+{
+    self = [super initWithLen:HASHSIZE];
+    if ( self != nil ) {
+    }
+    return( self );
+}
+
+-(id)initWithLen:(NSInteger)aBuffSize
+{
+    self = [super initWithLen:aBuffSize];
+    if ( self != nil ) {
+        mode = 0;
+    }
+    return( self );
+}
+
+-(void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in HashRule" );
+#endif
+    RuleMemo *tmp, *rtmp;
+    int Index;
+    
+    if ( self.fNext != nil ) {
+        for( Index = 0; Index < BuffSize; Index++ ) {
+            tmp = ptrBuffer[Index];
+            while ( tmp && tmp != ptrBuffer[Index] ) {
+                rtmp = tmp;
+                if ([tmp isKindOfClass:[LinkBase class]])
+                    tmp = (RuleMemo *)tmp.fNext;
+                else
+                    tmp = nil;
+                [rtmp release];
+            }
+        }
+    }
+    [super dealloc];
+}
+
+- (NSInteger)count
+{
+    NSInteger aCnt = 0;
+    
+    for (int i = 0; i < BuffSize; i++) {
+        if ( ptrBuffer[i] != nil ) {
+            aCnt++;
+        }
+    }
+    return aCnt;
+}
+                          
+- (NSInteger) length
+{
+    return BuffSize;
+}
+
+- (NSInteger) size
+{
+    NSInteger aSize = 0;
+    
+    for (int i = 0; i < BuffSize; i++) {
+        if ( ptrBuffer[i] != nil ) {
+            aSize += sizeof(id);
+        }
+    }
+    return aSize;
+}
+                                  
+                                  
+-(void)deleteHashRule:(RuleMemo *)np
+{
+    RuleMemo *tmp, *rtmp;
+    int Index;
+    
+    if ( self.fNext != nil ) {
+        for( Index = 0; Index < BuffSize; Index++ ) {
+            tmp = ptrBuffer[Index];
+            while ( tmp && tmp != ptrBuffer[Index ] ) {
+                rtmp = tmp;
+                if ([tmp isKindOfClass:[LinkBase class]])
+                    tmp = (RuleMemo *)tmp.fNext;
+                else
+                    tmp = nil;
+                [rtmp release];
+            }
+        }
+    }
+}
+
+-(void)delete_chain:(RuleMemo *)np
+{
+    if ( np.fNext != nil )
+        [self delete_chain:np.fNext];
+    [np release];
+}
+
+-(RuleMemo **)getPtrBuffer
+{
+    return( ptrBuffer );
+}
+
+-(void)setPtrBuffer:(RuleMemo **)np
+{
+    ptrBuffer = np;
+}
+
+- (ACNumber *)getRuleMemoStopIndex:(NSInteger)aStartIndex
+{
+    RuleMemo *aRule;
+    ACNumber *stopIndex;
+    NSInteger anIndex;
+    
+    anIndex = ( aStartIndex >= BuffSize ) ? aStartIndex % BuffSize : aStartIndex;
+    if ((aRule = ptrBuffer[anIndex]) == nil) {
+        return nil;
+    }
+    stopIndex = [aRule getStopIndex:aStartIndex];
+    return stopIndex;
+}
+
+- (void)putRuleMemo:(RuleMemo *)aRule AtStartIndex:(NSInteger)aStartIndex
+{
+    NSInteger anIndex;
+    
+    anIndex = (aStartIndex >= BuffSize) ? aStartIndex %= BuffSize : aStartIndex;
+    if ( ptrBuffer[anIndex] == nil ) {
+        ptrBuffer[anIndex] = aRule;
+        [aRule retain];
+    }
+    else {
+        do {
+            if ( [aRule.startIndex integerValue] == aStartIndex ) {
+                [aRule setStartIndex:aRule.stopIndex];
+                return;
+            }
+            aRule = aRule.fNext;
+        } while ( aRule != nil );
+    }
+}
+
+- (void)putRuleMemoAtStartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex
+{
+    RuleMemo *aRule, *newRule;
+    NSInteger anIndex;
+    NSInteger aMatchIndex;
+
+    anIndex = (aStartIndex >= BuffSize) ? aStartIndex % BuffSize : aStartIndex;
+    aRule = ptrBuffer[anIndex];
+    if ( aRule == nil ) {
+        aRule = [RuleMemo newRuleMemoWithStartIndex:[ACNumber numberWithInteger:aStartIndex]
+                                                    StopIndex:[ACNumber numberWithInteger:aStopIndex]];
+        [aRule retain];
+        ptrBuffer[anIndex] = aRule;
+    }
+    else {
+        aMatchIndex = [aRule.startIndex integerValue];
+        if ( aStartIndex > aMatchIndex ) {
+            if ( aRule != ptrBuffer[anIndex] ) {
+                [aRule retain];
+            }
+            aRule.fNext = ptrBuffer[anIndex];
+            ptrBuffer[anIndex] = aRule;
+            return;
+        }
+        while (aRule.fNext != nil) {
+            aMatchIndex = [((RuleMemo *)aRule.fNext).startIndex integerValue];
+            if ( aStartIndex > aMatchIndex ) {
+                newRule = [RuleMemo newRuleMemoWithStartIndex:[ACNumber numberWithInteger:aStartIndex]
+                                                              StopIndex:[ACNumber numberWithInteger:aStopIndex]];
+                [newRule retain];
+                newRule.fNext = aRule.fNext;
+                aRule.fNext = newRule;
+                return;
+            }
+            if ( aMatchIndex == aStartIndex ) {
+                [aRule setStartIndex:aRule.stopIndex];
+                return;
+            }
+            aRule = aRule.fNext;
+        }
+    }
+}
+
+- (NSInteger)getLastHash
+{
+    return LastHash;
+}
+
+- (void)setLastHash:(NSInteger)aHash
+{
+    LastHash = aHash;
+}
+
+- (NSInteger)getMode
+{
+    return mode;
+}
+
+- (void)setMode:(NSInteger)aMode
+{
+    mode = aMode;
+}
+
+- (void) insertObject:(RuleMemo *)aRule atIndex:(NSInteger)anIndex
+{
+    NSInteger Index;
+    
+    Index = ( anIndex >= BuffSize ) ? anIndex % BuffSize : anIndex;
+    if (aRule != ptrBuffer[Index]) {
+        if ( ptrBuffer[Index] ) [ptrBuffer[Index] release];
+        [aRule retain];
+    }
+    ptrBuffer[Index] = aRule;
+}
+
+- (RuleMemo *)objectAtIndex:(NSInteger)anIndex
+{
+    NSInteger anIdx;
+
+    anIdx = ( anIndex >= BuffSize ) ? anIndex % BuffSize : anIndex;
+    return ptrBuffer[anIdx];
+}
+
+
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/Info.plist b/runtime/ObjC/Framework/Info.plist
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/Info.plist
rename to runtime/ObjC/Framework/Info.plist
diff --git a/runtime/ObjC/Framework/IntArray.h b/runtime/ObjC/Framework/IntArray.h
new file mode 100644
index 0000000..72e29f2
--- /dev/null
+++ b/runtime/ObjC/Framework/IntArray.h
@@ -0,0 +1,74 @@
+//
+//  IntArray.h
+//  ANTLR
+//
+// Copyright (c) 2010 Ian Michell 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+
+#define ANTLR_INT_ARRAY_INITIAL_SIZE 10
+
+@interface IntArray : NSObject 
+{
+    NSUInteger BuffSize;
+    NSUInteger count;
+    NSInteger idx;
+    NSMutableData *buffer;
+    __strong NSInteger *intBuffer;
+    BOOL SPARSE;
+}
+
++ (IntArray *)newArray;
++ (IntArray *)newArrayWithLen:(NSUInteger)aLen;
+
+- (id) init;
+- (id) initWithLen:(NSUInteger)aLen;
+
+- (void) dealloc;
+
+- (id) copyWithZone:(NSZone *)aZone;
+
+- (void) addInteger:(NSInteger) value;
+- (NSInteger) pop;
+- (void) push:(NSInteger) value;
+- (NSInteger) integerAtIndex:(NSUInteger) index;
+- (void) insertInteger:(NSInteger)anInteger AtIndex:(NSUInteger) anIndex;
+- (NSInteger)removeIntegerAtIndex:(NSUInteger) anIndex;
+- (void)replaceInteger:(NSInteger)aValue AtIndex:(NSUInteger)anIndex;
+- (void) reset;
+
+- (NSUInteger) count;
+- (NSUInteger) size;
+- (void) ensureCapacity:(NSUInteger) anIndex;
+
+@property (assign) NSUInteger BuffSize;
+@property (assign) NSUInteger count;
+@property (assign) NSInteger idx;
+@property (retain) NSMutableData *buffer;
+@property (assign) NSInteger *intBuffer;
+@property (assign) BOOL SPARSE;
+
+@end
diff --git a/runtime/ObjC/Framework/IntArray.m b/runtime/ObjC/Framework/IntArray.m
new file mode 100644
index 0000000..0c01d1f
--- /dev/null
+++ b/runtime/ObjC/Framework/IntArray.m
@@ -0,0 +1,199 @@
+//
+//  IntArray.m
+//  ANTLR
+//
+//  Created by Ian Michell on 27/04/2010.
+// Copyright (c) 2010 Ian Michell 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "IntArray.h"
+#import "RuntimeException.h"
+
+@implementation IntArray
+
+@synthesize BuffSize;
+@synthesize count;
+@synthesize idx;
+@synthesize buffer;
+@synthesize intBuffer;
+@synthesize SPARSE;
+
++ (IntArray *)newArray
+{
+    return [[IntArray alloc] init];
+}
+
++ (IntArray *)newArrayWithLen:(NSUInteger)aLen
+{
+    return [[IntArray alloc] initWithLen:aLen];
+}
+
+- (id)init
+{
+    self = [super init];
+    if ( self != nil ) {
+        BuffSize  = (ANTLR_INT_ARRAY_INITIAL_SIZE * (sizeof(NSInteger)/sizeof(id)));
+        count = 0;
+        idx = -1;
+        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize * sizeof(id)] retain];
+        intBuffer = (NSInteger *)[buffer mutableBytes];
+        SPARSE = NO;
+    }
+    return self;
+}
+
+- (id)initWithLen:(NSUInteger)aLen
+{
+    self = [super init];
+    if ( self != nil ) {
+        BuffSize  = (ANTLR_INT_ARRAY_INITIAL_SIZE * (sizeof(NSInteger)/sizeof(id)));
+        count = 0;
+        idx = -1;
+        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize * sizeof(id)] retain];
+        intBuffer = (NSInteger *)[buffer mutableBytes];
+        SPARSE = NO;
+    }
+    return self;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in IntArray" );
+#endif
+    if ( buffer ) [buffer release];
+    [super dealloc];
+}
+
+- (id)copyWithZone:(NSZone *)aZone
+{
+    IntArray *copy;
+    
+    copy = [[[self class] alloc] initWithLen:BuffSize];
+    copy.idx = self.idx;
+    NSInteger anIndex;
+    for ( anIndex = 0; anIndex < BuffSize; anIndex++ ) {
+        [copy addInteger:intBuffer[anIndex]];
+    }
+    return copy;
+}
+
+- (NSUInteger)count
+{
+    return count;
+}
+
+// FIXME: Java runtime returns p, I'm not so sure it's right so have added p + 1 to show true size!
+- (NSUInteger)size
+{
+    if ( count > 0 )
+        return ( count * sizeof(NSInteger));
+    return 0;
+}
+
+- (void)addInteger:(NSInteger) value
+{
+    [self ensureCapacity:idx+1];
+    intBuffer[++idx] = (NSInteger) value;
+    count++;
+}
+
+- (NSInteger)pop
+{
+    if ( idx < 0 ) {
+        @throw [IllegalArgumentException newException:[NSString stringWithFormat:@"Nothing to pop, count = %d", count]];
+    }
+    NSInteger value = (NSInteger) intBuffer[idx--];
+    count--;
+    return value;
+}
+
+- (void)push:(NSInteger)aValue
+{
+    [self addInteger:aValue];
+}
+
+- (NSInteger)integerAtIndex:(NSUInteger) anIndex
+{
+    if ( SPARSE==NO  && anIndex > idx ) {
+        @throw [IllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than count %d", anIndex, count]];
+    }
+    else if ( SPARSE == YES && anIndex >= BuffSize ) {
+        @throw [IllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than BuffSize %d", anIndex, BuffSize]];
+    }
+    return intBuffer[anIndex];
+}
+
+- (void)insertInteger:(NSInteger)aValue AtIndex:(NSUInteger)anIndex
+{
+    [self replaceInteger:aValue AtIndex:anIndex];
+    count++;
+}
+
+- (NSInteger)removeIntegerAtIndex:(NSUInteger) anIndex
+{
+    if ( SPARSE==NO && anIndex > idx ) {
+        @throw [IllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than count %d", anIndex, count]];
+        return (NSInteger)-1;
+    } else if ( SPARSE==YES && anIndex >= BuffSize ) {
+        @throw [IllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than BuffSize %d", anIndex, BuffSize]];
+    }
+    count--;
+    return intBuffer[anIndex];
+}
+
+- (void)replaceInteger:(NSInteger)aValue AtIndex:(NSUInteger)anIndex
+{
+    if ( SPARSE == NO && anIndex > idx ) {
+        @throw [IllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than count %d", anIndex, count]];
+    }
+    else if ( SPARSE == YES && anIndex >= BuffSize ) {
+        @throw [IllegalArgumentException newException:[NSString stringWithFormat:@"Index %d must be less than BuffSize %d", anIndex, BuffSize]];
+    }
+    intBuffer[anIndex] = aValue;
+}
+
+-(void) reset
+{
+    count = 0;
+    idx = -1;
+}
+
+- (void) ensureCapacity:(NSUInteger) anIndex
+{
+    if ( (anIndex * sizeof(NSUInteger)) >= [buffer length] )
+    {
+        NSUInteger newSize = ([buffer length] / sizeof(NSInteger)) * 2;
+        if (anIndex > newSize) {
+            newSize = anIndex + 1;
+        }
+        BuffSize = newSize;
+        [buffer setLength:(BuffSize * sizeof(NSUInteger))];
+        intBuffer = (NSInteger *)[buffer mutableBytes];
+    }
+}
+
+@end
+
diff --git a/runtime/ObjC/Framework/IntStream.h b/runtime/ObjC/Framework/IntStream.h
new file mode 100644
index 0000000..08444a7
--- /dev/null
+++ b/runtime/ObjC/Framework/IntStream.h
@@ -0,0 +1,102 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DEBUG_DEALLOC
+#define DEBUG_DEALLOC
+#endif
+
+@protocol IntStream < NSObject, NSCopying >
+
+- (void) consume;
+
+// Get unichar at current input pointer + i ahead where i=1 is next character as int for including CharStreamEOF (-1) in the data range
+- (NSInteger) LA:(NSInteger) i;
+
+// Tell the stream to start buffering if it hasn't already.  Return
+// current input position, index(), or some other marker so that
+// when passed to rewind() you get back to the same spot.
+// rewind(mark()) should not affect the input cursor.
+// TODO: problem in that lexer stream returns not index but some marker 
+
+- (NSInteger) mark;
+
+// Return the current input symbol index 0..n where n indicates the
+// last symbol has been read.
+
+- (NSInteger) index;
+
+// Reset the stream so that next call to index would return marker.
+// The marker will usually be -index but it doesn't have to be.  It's
+// just a marker to indicate what state the stream was in.  This is
+// essentially calling -release: and -seek:.  If there are markers
+// created after this marker argument, this routine must unroll them
+// like a stack.  Assume the state the stream was in when this marker
+// was created.
+
+- (void) rewind;
+- (void) rewind:(NSInteger) marker;
+
+// You may want to commit to a backtrack but don't want to force the
+// stream to keep bookkeeping objects around for a marker that is
+// no longer necessary.  This will have the same behavior as
+// rewind() except it releases resources without the backward seek.
+
+- (void) release:(NSInteger) marker;
+
+// Set the input cursor to the position indicated by index.  This is
+// normally used to seek ahead in the input stream.  No buffering is
+// required to do this unless you know your stream will use seek to
+// move backwards such as when backtracking.
+// This is different from rewind in its multi-directional
+// requirement and in that its argument is strictly an input cursor (index).
+//
+// For char streams, seeking forward must update the stream state such
+// as line number.  For seeking backwards, you will be presumably
+// backtracking using the mark/rewind mechanism that restores state and
+// so this method does not need to update state when seeking backwards.
+//
+// Currently, this method is only used for efficient backtracking, but
+// in the future it may be used for incremental parsing.
+
+- (void) seek:(NSInteger) anIndex;
+
+/** Only makes sense for streams that buffer everything up probably, but
+ *  might be useful to display the entire stream or for testing.  This
+ *  value includes a single EOF.
+ */
+- (NSUInteger) size;
+/** Where are you getting symbols from?  Normally, implementations will
+ *  pass the buck all the way to the lexer who can ask its input stream
+ *  for the file name or whatever.
+ */
+- (NSString *)getSourceName;
+
+//@property (assign) NSInteger index;
+//@property (assign) NSUInteger line;
+//@property (assign) NSUInteger charPositionInLine;
+
+
+@end
diff --git a/runtime/ObjC/Framework/Lexer.h b/runtime/ObjC/Framework/Lexer.h
new file mode 100644
index 0000000..9fa13c8
--- /dev/null
+++ b/runtime/ObjC/Framework/Lexer.h
@@ -0,0 +1,90 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+#import "TokenSource.h"
+#import "BaseRecognizer.h"
+#import "RecognizerSharedState.h"
+#import "CharStream.h"
+#import "Token.h"
+#import "CommonToken.h"
+#import "RecognitionException.h"
+#import "MismatchedTokenException.h"
+#import "MismatchedRangeException.h"
+
+@interface Lexer : BaseRecognizer <TokenSource> {
+	id<CharStream> input;      ///< The character stream we pull tokens out of.
+	NSUInteger ruleNestingLevel;
+}
+
+@property (retain, getter=input, setter=setInput:) id<CharStream> input;
+@property (getter=getRuleNestingLevel, setter=setRuleNestingLevel:) NSUInteger ruleNestingLevel;
+
+#pragma mark Initializer
+- (id) initWithCharStream:(id<CharStream>) anInput;
+- (id) initWithCharStream:(id<CharStream>)anInput State:(RecognizerSharedState *)state;
+
+- (id) copyWithZone:(NSZone *)zone;
+
+- (void) reset;
+
+// - (RecognizerSharedState *) state;
+
+#pragma mark Tokens
+- (id<Token>)getToken;
+- (void) setToken: (id<Token>) aToken;
+- (id<Token>) nextToken;
+- (void) mTokens;		// abstract, defined in generated sources
+- (void) skip;
+- (id<CharStream>) input;
+- (void) setInput:(id<CharStream>)aCharStream;
+
+- (void) emit;
+- (void) emit:(id<Token>)aToken;
+
+#pragma mark Matching
+- (void) matchString:(NSString *)aString;
+- (void) matchAny;
+- (void) matchChar:(unichar) aChar;
+- (void) matchRangeFromChar:(unichar)fromChar to:(unichar)toChar;
+
+#pragma mark Informational
+- (NSUInteger) line;
+- (NSUInteger) charPositionInLine;
+- (NSInteger) index;
+- (NSString *) text;
+- (void) setText:(NSString *) theText;
+
+// error handling
+- (void) reportError:(RecognitionException *)e;
+- (NSString *)getErrorMessage:(RecognitionException *)e TokenNames:(AMutableArray *)tokenNames;
+- (NSString *)getCharErrorDisplay:(NSInteger)c;
+- (void) recover:(RecognitionException *)e;
+- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex;
+- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex;
+
+@end
diff --git a/runtime/ObjC/Framework/Lexer.m b/runtime/ObjC/Framework/Lexer.m
new file mode 100644
index 0000000..4b5e440
--- /dev/null
+++ b/runtime/ObjC/Framework/Lexer.m
@@ -0,0 +1,437 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <ANTLR/antlr.h>
+#import "Lexer.h"
+
+@implementation Lexer
+
+@synthesize input;
+@synthesize ruleNestingLevel;
+#pragma mark Initializer
+
+- (id) initWithCharStream:(id<CharStream>)anInput
+{
+    self = [super initWithState:[[RecognizerSharedState alloc] init]];
+    if ( self != nil ) {
+        input = [anInput retain];
+        if (state.token != nil)
+            [((CommonToken *)state.token) setInput:anInput];
+        ruleNestingLevel = 0;
+    }
+    return self;
+}
+
+- (id) initWithCharStream:(id<CharStream>)anInput State:(RecognizerSharedState *)aState
+{
+    self = [super initWithState:aState];
+    if ( self != nil ) {
+        input = [anInput retain];
+        if (state.token != nil)
+            [((CommonToken *)state.token) setInput:anInput];
+        ruleNestingLevel = 0;
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    if ( input ) [input release];
+    [super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    Lexer *copy;
+    
+    copy = [[[self class] allocWithZone:aZone] init];
+    //    copy = [super copyWithZone:aZone]; // allocation occurs here
+    if ( input != nil )
+        copy.input = input;
+    copy.ruleNestingLevel = ruleNestingLevel;
+    return copy;
+}
+
+- (void) reset
+{
+    [super reset]; // reset all recognizer state variables
+                   // wack Lexer state variables
+    if ( input != nil ) {
+        [input seek:0]; // rewind the input
+    }
+    if ( state == nil ) {
+        return; // no shared state work to do
+    }
+    state.token = nil;
+    state.type = CommonToken.INVALID_TOKEN_TYPE;
+    state.channel = CommonToken.DEFAULT_CHANNEL;
+    state.tokenStartCharIndex = -1;
+    state.tokenStartCharPositionInLine = -1;
+    state.tokenStartLine = -1;
+    state.text = nil;
+}
+
+// token stuff
+#pragma mark Tokens
+
+- (id<Token>)getToken
+{
+    return [state getToken]; 
+}
+
+- (void) setToken: (id<Token>) aToken
+{
+    if (state.token != aToken) {
+        [aToken retain];
+        state.token = aToken;
+    }
+}
+
+
+// this method may be overridden in the generated lexer if we generate a filtering lexer.
+- (id<Token>) nextToken
+{
+    while (YES) {
+        [self setToken:nil];
+        state.channel = CommonToken.DEFAULT_CHANNEL;
+        state.tokenStartCharIndex = input.index;
+        state.tokenStartCharPositionInLine = input.getCharPositionInLine;
+        state.tokenStartLine = input.getLine;
+        state.text = nil;
+        
+        // [self setText:[self text]];
+        if ([input LA:1] == CharStreamEOF) {
+            CommonToken *eof = [CommonToken newToken:input
+                                                          Type:TokenTypeEOF
+                                                       Channel:CommonToken.DEFAULT_CHANNEL
+                                                         Start:input.index
+                                                          Stop:input.index];
+            [eof setLine:input.getLine];
+            [eof setCharPositionInLine:input.getCharPositionInLine];
+            return eof;
+        }
+        @try {
+            [self mTokens];
+            // SEL aMethod = @selector(mTokens);
+            // [[self class] instancesRespondToSelector:aMethod];
+            if ( state.token == nil)
+                [self emit];
+            else if ( state.token == [CommonToken skipToken] ) {
+                continue;
+            }
+            return state.token;
+        }
+        @catch (MismatchedRangeException *re) {
+            [self reportError:re];
+            // [self recover:re];
+        }
+        @catch (MismatchedTokenException *re) {
+            [self reportError:re];
+            // [self recover:re];
+        }
+        @catch (RecognitionException *re) {
+            [self reportError:re];
+            [self recover:re];
+        }
+    }
+}
+
+- (void) mTokens
+{   // abstract, defined in generated source as a starting point for matching
+    [self doesNotRecognizeSelector:_cmd];
+}
+
+- (void) skip
+{
+    state.token = [CommonToken skipToken];
+}
+
+- (id<CharStream>) input
+{
+    return input; 
+}
+
+- (void) setInput:(id<CharStream>) anInput
+{
+    if ( anInput != input ) {
+        if ( input ) [input release];
+    }
+    input = nil;
+    [self reset];
+    input = anInput;
+    [input retain];
+}
+
+/** Currently does not support multiple emits per nextToken invocation
+ *  for efficiency reasons.  Subclass and override this method and
+ *  nextToken (to push tokens into a list and pull from that list rather
+ *  than a single variable as this implementation does).
+ */
+- (void) emit:(id<Token>)aToken
+{
+    state.token = aToken;
+}
+
+/** The standard method called to automatically emit a token at the
+ *  outermost lexical rule.  The token object should point into the
+ *  char buffer start..stop.  If there is a text override in 'text',
+ *  use that to set the token's text.  Override this method to emit
+ *  custom Token objects.
+ *
+ *  If you are building trees, then you should also override
+ *  Parser or TreeParser.getMissingSymbol().
+ */
+- (void) emit
+{
+    id<Token> aToken = [CommonToken newToken:input
+                                                  Type:state.type
+                                               Channel:state.channel
+                                                 Start:state.tokenStartCharIndex
+                                                  Stop:input.index-1];
+    aToken.text = [self text];
+    [aToken setCharPositionInLine:state.tokenStartCharPositionInLine];
+    [aToken setLine:state.tokenStartLine];
+    [aToken retain];
+    [self emit:aToken];
+    // [aToken release];
+}
+
+// matching
+#pragma mark Matching
+- (void) matchString:(NSString *)aString
+{
+    unichar c;
+    unsigned int i = 0;
+    unsigned int stringLength = [aString length];
+    while ( i < stringLength ) {
+        c = [input LA:1];
+        if ( c != [aString characterAtIndex:i] ) {
+            if ([state getBacktracking] > 0) {
+                state.failed = YES;
+                return;
+            }
+            MismatchedTokenException *mte = [MismatchedTokenException newExceptionChar:[aString characterAtIndex:i] Stream:input];
+            mte.c = c;
+            [self recover:mte];
+            @throw mte;
+        }
+        i++;
+        [input consume];
+        state.failed = NO;
+    }
+}
+
+- (void) matchAny
+{
+    [input consume];
+}
+
+- (void) matchChar:(unichar) aChar
+{
+    // TODO: -LA: is returning an int because it sometimes is used in the generated parser to compare lookahead with a tokentype.
+    //       try to change all those occurrences to -LT: if possible (i.e. if ANTLR can be made to generate LA only for lexer code)
+    unichar charLA;
+    charLA = [input LA:1];
+    if ( charLA != aChar) {
+        if ([state getBacktracking] > 0) {
+            state.failed = YES;
+            return;
+        }
+        MismatchedTokenException  *mte = [MismatchedTokenException newExceptionChar:aChar Stream:input];
+        mte.c = charLA;
+        [self recover:mte];
+        @throw mte;
+    }
+    [input consume];
+    state.failed = NO;
+}
+
+- (void) matchRangeFromChar:(unichar)fromChar to:(unichar)toChar
+{
+    unichar charLA = (unichar)[input LA:1];
+    if ( charLA < fromChar || charLA > toChar ) {
+        if ([state getBacktracking] > 0) {
+            state.failed = YES;
+            return;
+        }
+        MismatchedRangeException  *mre = [MismatchedRangeException
+                    newException:NSMakeRange((NSUInteger)fromChar,(NSUInteger)toChar)
+                               stream:input];
+        mre.c = charLA;
+        [self recover:mre];
+        @throw mre;
+    }       
+    [input consume];
+    state.failed = NO;
+}
+
+    // info
+#pragma mark Informational
+
+- (NSUInteger) line
+{
+    return input.getLine;
+}
+
+- (NSUInteger) charPositionInLine
+{
+    return input.getCharPositionInLine;
+}
+
+- (NSInteger) index
+{
+    return 0;
+}
+
+- (NSString *) text
+{
+    if (state.text != nil) {
+        return state.text;
+    }
+    return [input substringWithRange:NSMakeRange(state.tokenStartCharIndex, input.index-state.tokenStartCharIndex)];
+}
+
+- (void) setText:(NSString *) theText
+{
+    state.text = theText;
+}
+
+    // error handling
+- (void) reportError:(RecognitionException *)e
+{
+    /** TODO: not thought about recovery in lexer yet.
+     *
+     // if we've already reported an error and have not matched a token
+     // yet successfully, don't report any errors.
+     if ( errorRecovery ) {
+     //System.err.print("[SPURIOUS] ");
+     return;
+     }
+     errorRecovery = true;
+     */
+    
+    [self displayRecognitionError:[self getTokenNames] Exception:e];
+}
+
+- (NSString *)getErrorMessage:(RecognitionException *)e TokenNames:(AMutableArray *)tokenNames
+{
+/*    NSString *msg = [NSString stringWithFormat:@"Gotta fix getErrorMessage in Lexer.m--%@\n",
+                     e.name];
+ */
+    NSString *msg = nil;
+    if ( [e isKindOfClass:[MismatchedTokenException class]] ) {
+        MismatchedTokenException *mte = (MismatchedTokenException *)e;
+        msg = [NSString stringWithFormat:@"mismatched character \"%@\" expecting \"%@\"",
+            [self getCharErrorDisplay:mte.c], [self getCharErrorDisplay:mte.expectingChar]];
+    }
+    else if ( [e isKindOfClass:[NoViableAltException class]] ) {
+        NoViableAltException *nvae = (NoViableAltException *)e;
+        // for development, can add "decision=<<"+nvae.grammarDecisionDescription+">>"
+        // and "(decision="+nvae.decisionNumber+") and
+        // "state "+nvae.stateNumber
+        msg = [NSString stringWithFormat:@"no viable alternative decision:%d state:%d at character \"%@\"",
+               nvae.decisionNumber, nvae.stateNumber, [self getCharErrorDisplay:(nvae.c)]];
+    }
+    else if ( [e isKindOfClass:[EarlyExitException class]] ) {
+        EarlyExitException *eee = (EarlyExitException *)e;
+        // for development, can add "(decision="+eee.decisionNumber+")"
+        msg = [NSString stringWithFormat:@"required (...)+ loop did not match anything at character \"%@\"",
+               [self getCharErrorDisplay:(eee.c)]];
+    }
+    else if ( [e isKindOfClass:[MismatchedNotSetException class]] ) {
+        MismatchedNotSetException *mse = (MismatchedNotSetException *)e;
+        msg = [NSString stringWithFormat:@"mismatched character \"%@\"  expecting set \"%@\"",
+               [self getCharErrorDisplay:(mse.c)], mse.expecting];
+    }
+    else if ( [e isKindOfClass:[MismatchedSetException class]] ) {
+        MismatchedSetException *mse = (MismatchedSetException *)e;
+        msg = [NSString stringWithFormat:@"mismatched character \"%@\" expecting set \"%@\"",
+               [self getCharErrorDisplay:(mse.c)], mse.expecting];
+    }
+    else if ( [e isKindOfClass:[MismatchedRangeException class]] ) {
+        MismatchedRangeException *mre = (MismatchedRangeException *)e;
+        msg = [NSString stringWithFormat:@"mismatched character \"%@\" \"%@..%@\"",
+               [self getCharErrorDisplay:(mre.c)], [self getCharErrorDisplay:(mre.range.location)],
+               [self getCharErrorDisplay:(mre.range.location+mre.range.length-1)]];
+    }
+    else {
+        msg = [super getErrorMessage:e TokenNames:[self getTokenNames]];
+    }
+    return msg;
+}
+
+- (NSString *)getCharErrorDisplay:(NSInteger)c
+{
+    NSString *s;
+    switch ( c ) {
+        case 0:
+            s = @"char=<nil>";
+            break;
+        case TokenTypeEOF :
+        case 65535:
+            s = @"<EOF>";
+            break;
+        case '\n' :
+            s = @"\\n";
+            break;
+        case '\t' :
+            s = @"\\t";
+            break;
+        case '\r' :
+            s = @"\\r";
+            break;
+        default:
+            s = [NSString stringWithFormat:@"%c", (char)c];
+            break;
+    }
+    return s;
+}
+
+/** Lexers can normally match any char in it's vocabulary after matching
+ *  a token, so do the easy thing and just kill a character and hope
+ *  it all works out.  You can instead use the rule invocation stack
+ *  to do sophisticated error recovery if you are in a fragment rule.
+ */
+- (void)recover:(RecognitionException *)re
+{
+    //System.out.println("consuming char "+(char)input.LA(1)+" during recovery");
+    //re.printStackTrace();
+    [input consume];
+}
+
+- (void)traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex
+{
+    NSString *inputSymbol = [NSString stringWithFormat:@"%c line=%d:%d\n", [input LT:1], input.getLine, input.getCharPositionInLine];
+    [super traceIn:ruleName Index:ruleIndex Object:inputSymbol];
+}
+
+- (void)traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex
+{
+    NSString *inputSymbol = [NSString stringWithFormat:@"%c line=%d:%d\n", [input LT:1], input.getLine, input.getCharPositionInLine];
+    [super traceOut:ruleName Index:ruleIndex Object:inputSymbol];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/LexerRuleReturnScope.h b/runtime/ObjC/Framework/LexerRuleReturnScope.h
new file mode 100644
index 0000000..08f7eef
--- /dev/null
+++ b/runtime/ObjC/Framework/LexerRuleReturnScope.h
@@ -0,0 +1,43 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+
+@interface LexerRuleReturnScope : NSObject {
+	NSInteger start;
+	NSInteger stopToken;
+}
+
+- (NSInteger) getStart;
+- (void) setStart: (NSInteger) aStart;
+
+- (NSInteger) getStop;
+- (void) setStop: (NSInteger) aStop;
+
+@property (assign, getter=getStart, setter=setStart:) NSInteger start;
+@property (getter=getStop,setter=setStop:) NSInteger stopToken;
+
+@end
diff --git a/runtime/ObjC/Framework/LexerRuleReturnScope.m b/runtime/ObjC/Framework/LexerRuleReturnScope.m
new file mode 100644
index 0000000..9bb4226
--- /dev/null
+++ b/runtime/ObjC/Framework/LexerRuleReturnScope.m
@@ -0,0 +1,62 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "LexerRuleReturnScope.h"
+
+
+@implementation LexerRuleReturnScope
+
+@synthesize start;
+
+//---------------------------------------------------------- 
+//  start 
+//---------------------------------------------------------- 
+- (NSInteger) getStart
+{
+    return start;
+}
+
+- (void) setStart: (NSInteger) aStart
+{
+    start = aStart;
+}
+
+//---------------------------------------------------------- 
+//  stop 
+//---------------------------------------------------------- 
+- (NSInteger) getStop
+{
+    return stopToken;
+}
+
+- (void) setStop: (NSInteger) aStop
+{
+    stopToken = aStop;
+}
+
+
+
+@end
diff --git a/runtime/ObjC/Framework/LexerState.h b/runtime/ObjC/Framework/LexerState.h
new file mode 100644
index 0000000..7399502
--- /dev/null
+++ b/runtime/ObjC/Framework/LexerState.h
@@ -0,0 +1,57 @@
+// [The "BSD licence"]
+// Copyright (c) 2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "Token.h"
+#import "RecognizerSharedState.h"
+
+@interface LexerState : RecognizerSharedState {
+}
+
+- (void) reset;
+
+- (id<Token>) getToken;
+- (void) setToken:(id<Token>) theToken;
+
+- (NSUInteger) getTokenType;
+- (void) setTokenType:(unsigned int) theTokenType;
+
+- (NSUInteger) channel;
+- (void) setChannel:(unsigned int) theChannel;
+
+- (NSUInteger) getTokenStartLine;
+- (void) setTokenStartLine:(unsigned int) theTokenStartLine;
+
+- (NSUInteger) getTokenCharPositionInLine;
+- (void) setTokenCharPositionInLine:(unsigned int) theCharPosition;
+
+- (NSInteger) getTokenStartCharIndex;
+- (void) setTokenStartCharIndex:(int) theTokenStartCharIndex;
+
+- (NSString *) text;
+- (void) setText:(NSString *) theText;
+
+@end
diff --git a/runtime/ObjC/Framework/LexerState.m b/runtime/ObjC/Framework/LexerState.m
new file mode 100644
index 0000000..84ac36f
--- /dev/null
+++ b/runtime/ObjC/Framework/LexerState.m
@@ -0,0 +1,139 @@
+// [The "BSD licence"]
+// Copyright (c) 2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "LexerState.h"
+
+
+@implementation LexerState
+
+- (id) init
+{
+	self = [super init];
+	if (self) {
+		[self reset];
+	}
+	return self;
+}
+
+- (void) reset
+{
+	[self setToken:nil];
+	type = 0;				
+	channel = 0;				
+	tokenStartLine = 0;		
+	tokenStartCharPositionInLine = 0;
+	tokenStartCharIndex = -1;    
+	[self setText:nil];
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in LexerState" );
+#endif
+	[self setText:nil];
+	[self setToken:nil];
+	[super dealloc];
+}
+
+- (id<Token>) getToken
+{
+	return token;
+}
+
+- (void) setToken:(id<Token>) theToken
+{
+	if (theToken != token) {
+		if ( token ) [token release];
+		token = [theToken retain];
+	}
+}
+
+
+- (NSUInteger) getTokenType
+{
+	return type;
+}
+
+- (void) setTokenType:(NSUInteger) theTokenType
+{
+	type = theTokenType;
+}
+
+- (NSUInteger)channel
+{
+	return channel;
+}
+
+- (void) setChannel:(NSUInteger) theChannel
+{
+	channel = theChannel;
+}
+
+- (NSUInteger) getTokenStartLine
+{
+	return tokenStartLine;
+}
+
+- (void) setTokenStartLine:(NSUInteger) theTokenStartLine
+{
+	tokenStartLine = theTokenStartLine;
+}
+
+- (unsigned int) getTokenCharPositionInLine
+{
+	return tokenStartCharPositionInLine;
+}
+
+- (void) setTokenCharPositionInLine:(unsigned int) theCharPosition
+{
+	tokenStartCharPositionInLine = theCharPosition;
+}
+
+- (int) getTokenStartCharIndex
+{
+	return tokenStartCharIndex;
+}
+
+- (void) setTokenStartCharIndex:(int) theTokenStartCharIndex
+{
+	tokenStartCharIndex = theTokenStartCharIndex;
+}
+
+- (NSString *) text
+{
+	return text;
+}
+
+- (void) setText:(NSString *) theText
+{
+	if (text != theText) {
+		if ( text ) [text release];
+		text = [theText retain];
+	}
+}
+
+@end
diff --git a/runtime/ObjC/Framework/LinkBase.h b/runtime/ObjC/Framework/LinkBase.h
new file mode 100644
index 0000000..760a493
--- /dev/null
+++ b/runtime/ObjC/Framework/LinkBase.h
@@ -0,0 +1,80 @@
+//
+//  LinkBase.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/14/10.
+//  [The "BSD licence"]
+//  Copyright (c) 2010 Alan Condit
+//  All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+
+#ifndef DEBUG_DEALLOC
+#define DEBUG_DEALLOC
+#endif
+
+@protocol LinkList <NSObject>
+
++ (id<LinkList>)newLinkBase;
++ (id<LinkList>)newLinkBase:(id<LinkList>)np Prev:(id<LinkList>)pp;
+
+- (void) dealloc;
+
+- (id<LinkList>) append:(id<LinkList>)node;
+- (id<LinkList>) insert:(id<LinkList>)node;
+
+- (id<LinkList>) getfNext;
+- (void) setFNext:(id<LinkList>)np;
+- (id<LinkList>)getfPrev;
+- (void) setFPrev:(id<LinkList>)pp;
+
+@property (retain) id<LinkList> fPrev;
+@property (retain) id<LinkList> fNext;
+@end
+
+@interface LinkBase : NSObject <LinkList> {
+	id<LinkList> fPrev;
+	id<LinkList> fNext;
+}
+
+@property (retain) id<LinkList> fPrev;
+@property (retain) id<LinkList> fNext;
+
++ (id<LinkList>)newLinkBase;
++ (id<LinkList>)newLinkBase:(id<LinkList>)np Prev:(id<LinkList>)pp;
+- (id<LinkList>)init;
+- (id<LinkList>)initWithPtr:(id)np Prev:(id)pp;
+- (void)dealloc;
+
+- (id) copyWithZone:(NSZone *)aZone;
+
+- (id<LinkList>)append:(id<LinkList>)node;
+- (id<LinkList>)insert:(id<LinkList>)node;
+
+- (id<LinkList>)getfNext;
+- (void)setfNext:(id<LinkList>) np;
+- (id<LinkList>)getfPrev;
+- (void)setfPrev:(id<LinkList>) pp;
+@end
diff --git a/runtime/ObjC/Framework/LinkBase.m b/runtime/ObjC/Framework/LinkBase.m
new file mode 100644
index 0000000..ded5169
--- /dev/null
+++ b/runtime/ObjC/Framework/LinkBase.m
@@ -0,0 +1,127 @@
+//
+//  LinkBase.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/14/10.
+//  [The "BSD licence"]
+//  Copyright (c) 2010 Alan Condit
+//  All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "LinkBase.h"
+
+@implementation LinkBase
+
+@synthesize fPrev;
+@synthesize fNext;
+
++(id<LinkList>)newLinkBase
+{
+	return [[LinkBase alloc] init];
+}
+
++(id<LinkList>)newLinkBase:(id<LinkList>)np Prev:(id<LinkList>)pp
+{
+	return [[LinkBase alloc] initWithPtr:np Prev:pp];
+}
+
+-(id<LinkList>)init
+{
+	if ((self = [super init]) != nil) {
+		fNext = nil;
+		fPrev = nil;
+	}
+	return(self);
+}
+
+-(id<LinkList>)initWithPtr:(id<LinkList>)np Prev:(id<LinkList>)pp
+{
+	if ((self = [super init]) != nil) {
+		fNext = np;
+		fPrev = pp;
+	}
+	return(self);
+}
+
+-(void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in LinkBase" );
+#endif
+	if (fNext) [fNext release];
+	if (fPrev) [fPrev release];
+	[super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    LinkBase *copy;
+    
+    copy = [[self class] allocWithZone:aZone];
+    copy.fPrev = fPrev;
+    copy.fNext = fNext;
+    return( copy );
+}
+
+-(id<LinkList>)append:(id<LinkList>)node
+{
+	node.fPrev = (id<LinkList>)self;
+	node.fNext = (id<LinkList>)self.fNext;
+	if (node.fNext != nil)
+        node.fNext.fPrev = node;
+    self.fNext = node;
+    return( node );
+}
+
+-(id<LinkList>)insert:(id<LinkList>)node
+{
+	node.fNext = self;
+	node.fPrev = self.fPrev;
+    if (node.fPrev != nil) 
+        node.fPrev.fNext = node;
+	self.fPrev = node;
+	return( node );
+}
+
+-(id<LinkList>)getfNext
+{
+	return(fNext);
+}
+
+-(void)setfNext:(id<LinkList>)np
+{
+	fNext = np;
+}
+
+-(id<LinkList>)getfPrev
+{
+	return(fPrev);
+}
+
+-(void)setfPrev:(id<LinkList>)pp
+{
+	fPrev = pp;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/LinkedHashMap.h b/runtime/ObjC/Framework/LinkedHashMap.h
new file mode 100644
index 0000000..224ea0c
--- /dev/null
+++ b/runtime/ObjC/Framework/LinkedHashMap.h
@@ -0,0 +1,240 @@
+#import "HashMap.h"
+/**
+ * <p>Hash table and linked list implementation of the <tt>Map</tt> interface,
+ * with predictable iteration order.  This implementation differs from
+ * <tt>HashMap</tt> in that it maintains a doubly-linked list running through
+ * all of its entries.  This linked list defines the iteration ordering,
+ * which is normally the order in which keys were inserted into the map
+ * (<i>insertion-order</i>).  Note that insertion order is not affected
+ * if a key is <i>re-inserted</i> into the map.  (A key <tt>k</tt> is
+ * reinserted into a map <tt>m</tt> if <tt>m.put(k, v)</tt> is invoked when
+ * <tt>m.containsKey(k)</tt> would return <tt>true</tt> immediately prior to
+ * the invocation.)
+ * 
+ * <p>This implementation spares its clients from the unspecified, generally
+ * chaotic ordering provided by {@link HashMap} (and {@link Hashtable}),
+ * without incurring the increased cost associated with {@link TreeMap}.  It
+ * can be used to produce a copy of a map that has the same order as the
+ * original, regardless of the original map's implementation:
+ * <pre>
+ * void foo(Map m) {
+ * Map copy = new LinkedHashMap(m);
+ * ...
+ * }
+ * </pre>
+ * This technique is particularly useful if a module takes a map on input,
+ * copies it, and later returns results whose order is determined by that of
+ * the copy.  (Clients generally appreciate having things returned in the same
+ * order they were presented.)
+ * 
+ * <p>A special {@link #LinkedHashMap(NSInteger,float,boolean) constructor} is
+ * provided to create a linked hash map whose order of iteration is the order
+ * in which its entries were last accessed, from least-recently accessed to
+ * most-recently (<i>access-order</i>).  This kind of map is well-suited to
+ * building LRU caches.  Invoking the <tt>put</tt> or <tt>get</tt> method
+ * results in an access to the corresponding entry (assuming it exists after
+ * the invocation completes).  The <tt>putAll</tt> method generates one entry
+ * access for each mapping in the specified map, in the order that key-value
+ * mappings are provided by the specified map's entry set iterator.  <i>No
+ * other methods generate entry accesses.</i> In particular, operations on
+ * collection-views do <i>not</i> affect the order of iteration of the backing
+ * map.
+ * 
+ * <p>The {@link #removeEldestEntry(Map.Entry)} method may be overridden to
+ * impose a policy for removing stale mappings automatically when new mappings
+ * are added to the map.
+ * 
+ * <p>This class provides all of the optional <tt>Map</tt> operations, and
+ * permits null elements.  Like <tt>HashMap</tt>, it provides constant-time
+ * performance for the basic operations (<tt>add</tt>, <tt>contains</tt> and
+ * <tt>remove</tt>), assuming the hash function disperses elements
+ * properly among the buckets.  Performance is likely to be just slightly
+ * below that of <tt>HashMap</tt>, due to the added expense of maintaining the
+ * linked list, with one exception: Iteration over the collection-views
+ * of a <tt>LinkedHashMap</tt> requires time proportional to the <i>size</i>
+ * of the map, regardless of its capacity.  Iteration over a <tt>HashMap</tt>
+ * is likely to be more expensive, requiring time proportional to its
+ * <i>capacity</i>.
+ * 
+ * <p>A linked hash map has two parameters that affect its performance:
+ * <i>initial capacity</i> and <i>load factor</i>.  They are defined precisely
+ * as for <tt>HashMap</tt>.  Note, however, that the penalty for choosing an
+ * excessively high value for initial capacity is less severe for this class
+ * than for <tt>HashMap</tt>, as iteration times for this class are unaffected
+ * by capacity.
+ * 
+ * <p><strong>Note that this implementation is not synchronized.</strong>
+ * If multiple threads access a linked hash map concurrently, and at least
+ * one of the threads modifies the map structurally, it <em>must</em> be
+ * synchronized externally.  This is typically accomplished by
+ * synchronizing on some object that naturally encapsulates the map.
+ * 
+ * If no such object exists, the map should be "wrapped" using the
+ * {@link Collections#synchronizedMap Collections.synchronizedMap}
+ * method.  This is best done at creation time, to prevent accidental
+ * unsynchronized access to the map:<pre>
+ * Map m = Collections.synchronizedMap(new LinkedHashMap(...));</pre>
+ * 
+ * A structural modification is any operation that adds or deletes one or more
+ * mappings or, in the case of access-ordered linked hash maps, affects
+ * iteration order.  In insertion-ordered linked hash maps, merely changing
+ * the value associated with a key that is already contained in the map is not
+ * a structural modification.  <strong>In access-ordered linked hash maps,
+ * merely querying the map with <tt>get</tt> is a structural
+ * modification.</strong>)
+ * 
+ * <p>The iterators returned by the <tt>iterator</tt> method of the collections
+ * returned by all of this class's collection view methods are
+ * <em>fail-fast</em>: if the map is structurally modified at any time after
+ * the iterator is created, in any way except through the iterator's own
+ * <tt>remove</tt> method, the iterator will throw a {@link
+ * ConcurrentModificationException}.  Thus, in the face of concurrent
+ * modification, the iterator fails quickly and cleanly, rather than risking
+ * arbitrary, non-deterministic behavior at an undetermined time in the future.
+ * 
+ * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed
+ * as it is, generally speaking, impossible to make any hard guarantees in the
+ * presence of unsynchronized concurrent modification.  Fail-fast iterators
+ * throw <tt>ConcurrentModificationException</tt> on a best-effort basis.
+ * Therefore, it would be wrong to write a program that depended on this
+ * exception for its correctness:   <i>the fail-fast behavior of iterators
+ * should be used only to detect bugs.</i>
+ * 
+ * <p>This class is a member of the
+ * <a href="{@docRoot}/../technotes/guides/collections/index.html">
+ * Java Collections Framework</a>.
+ * 
+ * @param <K> the type of keys maintained by this map
+ * @param <V> the type of mapped values
+ * 
+ * @author  Josh Bloch
+ * @see     Object#hashCode()
+ * @see     Collection
+ * @see     Map
+ * @see     HashMap
+ * @see     TreeMap
+ * @see     Hashtable
+ * @since   1.4
+ */
+@class LinkedHashMap;
+
+/**
+ * LinkedHashMap entry.
+ */
+
+@interface LHMEntry : HMEntry
+{
+    LHMEntry *before;
+    LHMEntry *after;
+    BOOL accessOrder;
+}
+
+@property (retain) LHMEntry *before;
+@property (retain) LHMEntry *after;
+@property (assign) BOOL accessOrder;
+
+- (id) newEntry:(NSInteger)aHash key:(NSString *)aKey value:(id)aValue next:(LHMEntry *)aNext;
+
+- (id) init:(NSInteger)hash key:(NSString *)key value:(id)value next:(LHMEntry *)next;
+- (void) recordAccess:(LinkedHashMap *)m;
+- (void) recordRemoval:(LinkedHashMap *)m;
+
+@end
+
+/**
+ * LinkedHashMapIterator.
+ */
+
+@interface LinkedHashIterator : HashIterator
+{
+    LHMEntry *nextEntry;
+    LHMEntry *lastReturned;
+    LinkedHashMap *lhm;
+}
+
+@property (retain) LHMEntry *nextEntry;
+@property (retain) LHMEntry *lastReturned;
+@property (retain) LinkedHashMap *lhm;
+
++ (LinkedHashIterator *) newIterator:(LinkedHashMap *)aLHM;
+
+- (id) init:(LinkedHashMap *)aLHM;
+- (BOOL) hasNext;
+- (void) remove;
+- (LHMEntry *) nextEntry;
+@end
+
+@interface LHMEntryIterator : LinkedHashIterator
+{
+}
+
++ (LHMEntryIterator *)newIterator:(LinkedHashMap *)aHM;
+
+- (id) init:(LinkedHashMap *)aHM;
+- (LHMEntry *) next;
+@end
+
+@interface LHMKeyIterator : LinkedHashIterator
+{
+}
+
++ (LHMKeyIterator *)newIterator:(LinkedHashMap *)aHM;
+
+- (id) init:(LinkedHashMap *)aHM;
+- (NSString *) next;
+@end
+
+@interface LHMValueIterator : LinkedHashIterator
+{
+}
+
++ (LHMValueIterator *)newIterator:(LinkedHashMap *)aHM;
+
+- (id) init:(LinkedHashMap *)aHM;
+- (id) next;
+@end
+
+
+@interface LinkedHashMap : HashMap
+{
+    
+    /**
+     * The head of the doubly linked list.
+     */
+    LHMEntry *header;
+    /**
+     * The iteration ordering method for this linked hash map: <tt>true</tt>
+     * for access-order, <tt>false</tt> for insertion-order.
+     * 
+     * @serial
+     */
+    BOOL accessOrder;
+    
+}
+
+@property (retain) LHMEntry *header;
+@property (assign) BOOL accessOrder;
+
++ (id) newLinkedHashMap:(NSInteger)anInitialCapacity;
++ (id) newLinkedHashMap:(NSInteger)anInitialCapacity
+             loadFactor:(float)loadFactor;
++ (id) newLinkedHashMap:(NSInteger)anInitialCapacity
+             loadFactor:(float)loadFactor
+            accessOrder:(BOOL)anAccessOrder;
+
+- (id) init:(NSInteger)initialCapacity loadFactor:(float)loadFactor accessOrder:(BOOL)accessOrder;
+- (id) init:(NSInteger)initialCapacity loadFactor:(float)loadFactor;
+- (id) init:(NSInteger)initialCapacity;
+- (id) init;
+- (id) initWithM:(AMutableDictionary *)m;
+- (void) transfer:(NSArray *)newTable;
+- (BOOL) containsValue:(NSObject *)value;
+- (id) get:(NSString *)key;
+- (void) clear;
+- (LHMEntryIterator *) newEntryIterator;
+- (LHMKeyIterator *) newKeyIterator;
+- (LHMValueIterator *) newValueIterator;
+- (void) addEntry:(NSInteger)hash key:(NSString *)key value:(id)value bucketIndex:(NSInteger)bucketIndex;
+- (void) createEntry:(NSInteger)hash key:(NSString *)key value:(id)value bucketIndex:(NSInteger)bucketIndex;
+- (BOOL) removeEldestEntry:(LHMEntry *)eldest;
+@end
diff --git a/runtime/ObjC/Framework/LinkedHashMap.m b/runtime/ObjC/Framework/LinkedHashMap.m
new file mode 100644
index 0000000..b0d5f6f
--- /dev/null
+++ b/runtime/ObjC/Framework/LinkedHashMap.m
@@ -0,0 +1,513 @@
+#import <Foundation/Foundation.h>
+#import "AMutableArray.h"
+#import "LinkedHashMap.h"
+#import "RuntimeException.h"
+
+extern NSInteger const DEFAULT_INITIAL_CAPACITY;
+extern float const DEFAULT_LOAD_FACTOR;
+
+@implementation LHMEntry
+
+@synthesize before;
+@synthesize after;
+@synthesize accessOrder;
+
+- (id) newEntry:(NSInteger)aHash key:(NSString *)aKey value:(id)aValue next:(LHMEntry *)aNext
+{
+    return [[LHMEntry alloc] init:aHash key:aKey value:aValue next:aNext];
+}
+
+- (id) init:(NSInteger)aHash key:(NSString *)aKey value:(id)aValue next:(LHMEntry *)aNext
+{
+    self = [super init:aHash key:aKey value:aValue next:aNext];
+    if (self) {
+    }
+    return self;
+}
+
+
+- (void) dealloc
+{
+    [before release];
+    [after release];
+    [super dealloc];
+}
+
+/**
+ * Removes this entry from the linked list.
+ */
+- (void) removeEntry
+{
+    before.after = after;
+    after.before = before;
+}
+
+
+/**
+ * Inserts this entry before the specified existing entry in the list.
+ */
+- (void) addBefore:(LHMEntry *)existingEntry
+{
+    after = [existingEntry retain];
+    before = [existingEntry.before retain];
+    before.after = [self retain];
+    after.before = [self retain];
+}
+
+
+/**
+ * This method is invoked by the superclass whenever the value
+ * of a pre-existing entry is read by Map.get or modified by Map.set.
+ * If the enclosing Map is access-ordered, it moves the entry
+ * to the end of the list; otherwise, it does nothing.
+ */
+- (void) recordAccess:(LinkedHashMap *)m
+{
+    LinkedHashMap *lhm = (LinkedHashMap *)m;
+    if (lhm.accessOrder) {
+        lhm.modCount++;
+        [self removeEntry];
+        [self addBefore:lhm.header];
+    }
+}
+
+- (void) recordRemoval:(LinkedHashMap *)m
+{
+    [self removeEntry];
+}
+
+@end
+
+@implementation LinkedHashIterator
+
+@synthesize nextEntry;
+@synthesize lastReturned;
+@synthesize lhm;
+
++ (LinkedHashIterator *) newIterator:(LinkedHashMap *)aLHM
+{
+    return [[LinkedHashIterator alloc] init:aLHM];
+}
+
+- (id) init:(LinkedHashMap *)aLHM
+{
+    self = [super init];
+    if ( self ) {
+        lhm = aLHM;
+        nextEntry = lhm.header.after;
+        lastReturned = nil;
+        expectedModCount = lhm.modCount;
+/*
+        AMutableArray *a = [AMutableArray arrayWithCapacity:lhm.Capacity];
+        LHMEntry *tmp = lhm.header.after;
+        while ( tmp != lhm.header ) {
+            [a addObject:tmp];
+            tmp = tmp.after;
+        }
+        anArray = [NSArray arrayWithArray:a];
+ */
+    }
+    return self;
+}
+
+- (BOOL) hasNext
+{
+    return nextEntry != lhm.header;
+}
+
+- (void) remove
+{
+    if (lastReturned == nil)
+        @throw [[IllegalStateException newException] autorelease];
+    if (lhm.modCount != expectedModCount)
+        @throw [[ConcurrentModificationException newException:@"Unexpected modCount"] autorelease];
+    [lhm remove:(NSString *)(lastReturned.key)];
+    lastReturned = nil;
+    expectedModCount = lhm.modCount;
+}
+
+- (LHMEntry *) nextEntry
+{
+    if (lhm.modCount != expectedModCount)
+        @throw [[ConcurrentModificationException newException:@"Unexpected modCount"] autorelease];
+    if (nextEntry == lhm.header)
+        @throw [[[NoSuchElementException alloc] init] autorelease];
+    LHMEntry * e = lastReturned = nextEntry;
+    nextEntry = e.after;
+    return e;
+}
+
+- (void) dealloc
+{
+    [nextEntry release];
+    [lastReturned release];
+    [super dealloc];
+}
+
+@end
+
+@implementation LHMKeyIterator
++ (LHMKeyIterator *)newIterator:(LinkedHashMap *)aLHM
+{
+    return [[LHMKeyIterator alloc] init:aLHM];
+}
+
+- (id) init:(LinkedHashMap *)aLHM
+{
+    self = [super init:aLHM];
+    if ( self ) {
+    }
+    return self;
+}
+
+- (NSString *) next
+{
+    return [self nextEntry].key;
+}
+
+@end
+
+@implementation LHMValueIterator
++ (LHMValueIterator *)newIterator:(LinkedHashMap *)aLHM
+{
+    return [[LHMValueIterator alloc] init:aLHM];
+}
+
+- (id) init:(LinkedHashMap *)aLHM
+{
+    self = [super init:aLHM];
+    if ( self ) {
+    }
+    return self;
+}
+
+- (id) next
+{
+    return [self nextEntry].value;
+}
+
+@end
+
+@implementation LHMEntryIterator
++ (LHMEntryIterator *)newIterator:(LinkedHashMap *)aLHM
+{
+    return [[LHMEntryIterator alloc] init:aLHM];
+}
+
+- (id) init:(LinkedHashMap *)aLHM
+{
+    self = [super init:aLHM];
+    if ( self ) {
+    }
+    return self;
+}
+
+- (LHMEntry *) next
+{
+    return [self nextEntry];
+}
+
+@end
+
+//long const serialVersionUID = 3801124242820219131L;
+
+@implementation LinkedHashMap
+
+@synthesize header;
+@synthesize accessOrder;
+
+/**
+ * Constructs an empty insertion-ordered <tt>LinkedHashMap</tt> instance
+ * with the specified initial capacity and load factor.
+ * 
+ * @param  initialCapacity the initial capacity
+ * @param  loadFactor      the load factor
+ * @throws IllegalArgumentException if the initial capacity is negative
+ * or the load factor is nonpositive
+ */
++ (id) newLinkedHashMap:(NSInteger)anInitialCapacity
+             loadFactor:(float)loadFactor
+            accessOrder:(BOOL)anAccessOrder
+{
+    return [[LinkedHashMap alloc] init:anInitialCapacity
+                            loadFactor:loadFactor
+                           accessOrder:(BOOL)anAccessOrder];
+}
+
++ (id) newLinkedHashMap:(NSInteger)anInitialCapacity loadFactor:(float)loadFactor
+{
+    return [[LinkedHashMap alloc] init:anInitialCapacity loadFactor:loadFactor];
+}
+
++ (id) newLinkedHashMap:(NSInteger)anInitialCapacity
+{
+    return [[LinkedHashMap alloc] init:anInitialCapacity loadFactor:DEFAULT_LOAD_FACTOR];
+}
+
+/**
+ * Constructs an empty <tt>LinkedHashMap</tt> instance with the
+ * specified initial capacity, load factor and ordering mode.
+ * 
+ * @param  initialCapacity the initial capacity
+ * @param  loadFactor      the load factor
+ * @param  accessOrder     the ordering mode - <tt>true</tt> for
+ * access-order, <tt>false</tt> for insertion-order
+ * @throws IllegalArgumentException if the initial capacity is negative
+ * or the load factor is nonpositive
+ */
+- (id) init:(NSInteger)anInitialCapacity loadFactor:(float)aLoadFactor accessOrder:(BOOL)anAccessOrder
+{
+    self = [super init:anInitialCapacity loadFactor:aLoadFactor];
+    if ( self ) {
+        accessOrder = anAccessOrder;
+        header = [[[LHMEntry alloc] init:-1 key:nil value:nil next:nil] retain];
+        header.before = header.after = header;
+    }
+    return self;
+}
+
+- (id) init:(NSInteger)anInitialCapacity loadFactor:(float)aLoadFactor
+{
+    self = [super init:anInitialCapacity loadFactor:aLoadFactor];
+    if ( self ) {
+        accessOrder = NO;
+        header = [[[LHMEntry alloc] init:-1 key:nil value:nil next:nil] retain];
+        header.before = header.after = header;
+    }
+    return self;
+}
+
+/**
+ * Constructs an empty insertion-ordered <tt>LinkedHashMap</tt> instance
+ * with the specified initial capacity and a default load factor (0.75).
+ * 
+ * @param  initialCapacity the initial capacity
+ * @throws IllegalArgumentException if the initial capacity is negative
+ */
+- (id) init:(NSInteger)initialCapacity
+{
+    self = [super init:initialCapacity loadFactor:DEFAULT_LOAD_FACTOR];
+    if ( self ) {
+        accessOrder = NO;
+        header = [[[LHMEntry alloc] init:-1 key:nil value:nil next:nil] retain];
+        header.before = header.after = header;
+    }
+    return self;
+}
+
+/**
+ * Constructs an insertion-ordered <tt>LinkedHashMap</tt> instance with
+ * the same mappings as the specified map.  The <tt>LinkedHashMap</tt>
+ * instance is created with a default load factor (0.75) and an initial
+ * capacity sufficient to hold the mappings in the specified map.
+ * 
+ * @param  m the map whose mappings are to be placed in this map
+ * @throws NullPointerException if the specified map is null
+ */
+- (id) initWithM:(LinkedHashMap *)m
+{
+    self = [super initWithM:m];
+    if ( self ) {
+        accessOrder = NO;
+        header = [[[LHMEntry alloc] init:-1 key:nil value:nil next:nil] retain];
+        header.before = header.after = header;
+    }
+    return self;
+}
+
+/**
+ * Constructs an empty insertion-ordered <tt>LinkedHashMap</tt> instance
+ * with the default initial capacity (16) and load factor (0.75).
+ */
+- (id) init
+{
+    self = [super init];
+    if ( self ) {
+        accessOrder = NO;
+        header = [[[LHMEntry alloc] init:-1 key:nil value:nil next:nil] retain];
+        header.before = header.after = header;
+    }
+    return self;
+}
+
+
+/**
+ * Transfers all entries to new table array.  This method is called
+ * by superclass resize.  It is overridden for performance, as it is
+ * faster to iterate using our linked list.
+ */
+- (void) transfer:(AMutableArray *)newTable
+{
+    NSInteger newCapacity = [newTable count];
+    
+    for (LHMEntry * e = header.after; e != header; e = e.after) {
+        NSInteger index = [self indexFor:e.hash length:newCapacity];
+        e.next = [newTable objectAtIndex:index];
+        [newTable replaceObjectAtIndex:index withObject:e];
+    }
+    
+}
+
+/**
+ * Returns <tt>true</tt> if this map maps one or more keys to the
+ * specified value.
+ * 
+ * @param value value whose presence in this map is to be tested
+ * @return <tt>true</tt> if this map maps one or more keys to the
+ * specified value
+ */
+- (BOOL) containsValue:(id)value
+{
+    if (value == nil) {
+        
+        for (LHMEntry * e = header.after; e != header; e = e.after)
+            if (e.value == nil)
+                return YES;
+        
+    }
+    else {
+        
+        for (LHMEntry * e = header.after; e != header; e = e.after)
+            if ([value isEqualTo:e.value])
+                return YES;
+        
+    }
+    return NO;
+}
+
+/**
+ * Returns the value to which the specified key is mapped,
+ * or {@code null} if this map contains no mapping for the key.
+ * 
+ * <p>More formally, if this map contains a mapping from a key
+ * {@code k} to a value {@code v} such that {@code (key==null ? k==null :
+ * key.equals(k))}, then this method returns {@code v}; otherwise
+ * it returns {@code null}.  (There can be at most one such mapping.)
+ * 
+ * <p>A return value of {@code null} does not <i>necessarily</i>
+ * indicate that the map contains no mapping for the key; it's also
+ * possible that the map explicitly maps the key to {@code null}.
+ * The {@link #containsKey containsKey} operation may be used to
+ * distinguish these two cases.
+ */
+- (id) get:(NSString *)aKey
+{
+    LHMEntry * e = (LHMEntry *)[self getEntry:aKey];
+    if (e == nil)
+        return nil;
+    [e recordAccess:self];
+    return e.value;
+}
+
+
+/**
+ * Removes all of the mappings from this map.
+ * The map will be empty after this call returns.
+ */
+- (void) clear
+{
+    [super clear];
+    header.before = header.after = header;
+}
+
+- (void) dealloc {
+    [header release];
+    [super dealloc];
+}
+
+- (LHMEntryIterator *) newEntryIterator
+{
+    return [LHMEntryIterator newIterator:self];
+}
+
+- (LHMKeyIterator *) newKeyIterator
+{
+    return [LHMKeyIterator newIterator:self];
+}
+
+- (LHMValueIterator *) newValueIterator
+{
+    return [LHMValueIterator newIterator:self];
+}
+
+
+/**
+ * This override alters behavior of superclass put method. It causes newly
+ * allocated entry to get inserted at the end of the linked list and
+ * removes the eldest entry if appropriate.
+ */
+- (void) addEntry:(NSInteger)aHash key:(NSString *)aKey value:(id)aValue bucketIndex:(NSInteger)aBucketIndex
+{
+    [self createEntry:aHash key:aKey value:aValue bucketIndex:aBucketIndex];
+    LHMEntry * eldest = header.after;
+    if ([self removeEldestEntry:eldest]) {
+        [self removeEntryForKey:eldest.key];
+    }
+    else {
+        if (count >= threshold)
+            [self resize:2 * [buffer length]];
+    }
+}
+
+
+/**
+ * This override differs from addEntry in that it doesn't resize the
+ * table or remove the eldest entry.
+ */
+- (void) createEntry:(NSInteger)aHash key:(NSString *)aKey value:(id)aValue bucketIndex:(NSInteger)bucketIndex
+{
+    LHMEntry *old = (LHMEntry *)ptrBuffer[bucketIndex];
+    LHMEntry *e = [[[LHMEntry alloc] init:aHash key:aKey value:aValue next:old] retain];
+    ptrBuffer[bucketIndex] = (id)e;
+    [e addBefore:header];
+    count++;
+}
+
+
+/**
+ * Returns <tt>true</tt> if this map should remove its eldest entry.
+ * This method is invoked by <tt>put</tt> and <tt>putAll</tt> after
+ * inserting a new entry into the map.  It provides the implementor
+ * with the opportunity to remove the eldest entry each time a new one
+ * is added.  This is useful if the map represents a cache: it allows
+ * the map to reduce memory consumption by deleting stale entries.
+ * 
+ * <p>Sample use: this override will allow the map to grow up to 100
+ * entries and then delete the eldest entry each time a new entry is
+ * added, maintaining a steady state of 100 entries.
+ * <pre>
+ * private static final NSInteger MAX_ENTRIES = 100;
+ * 
+ * protected boolean removeEldestEntry(Map.LHMEntry eldest) {
+ * return count() > MAX_ENTRIES;
+ * }
+ * </pre>
+ * 
+ * <p>This method typically does not modify the map in any way,
+ * instead allowing the map to modify itself as directed by its
+ * return value.  It <i>is</i> permitted for this method to modify
+ * the map directly, but if it does so, it <i>must</i> return
+ * <tt>false</tt> (indicating that the map should not attempt any
+ * further modification).  The effects of returning <tt>true</tt>
+ * after modifying the map from within this method are unspecified.
+ * 
+ * <p>This implementation merely returns <tt>false</tt> (so that this
+ * map acts like a normal map - the eldest element is never removed).
+ * 
+ * @param    eldest The least recently inserted entry in the map, or if
+ * this is an access-ordered map, the least recently accessed
+ * entry.  This is the entry that will be removed it this
+ * method returns <tt>true</tt>.  If the map was empty prior
+ * to the <tt>put</tt> or <tt>putAll</tt> invocation resulting
+ * in this invocation, this will be the entry that was just
+ * inserted; in other words, if the map contains a single
+ * entry, the eldest entry is also the newest.
+ * @return   <tt>true</tt> if the eldest entry should be removed
+ * from the map; <tt>false</tt> if it should be retained.
+ */
+- (BOOL) removeEldestEntry:(LHMEntry *)eldest
+{
+    return NO;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/LinkedList.h b/runtime/ObjC/Framework/LinkedList.h
new file mode 100644
index 0000000..65de874
--- /dev/null
+++ b/runtime/ObjC/Framework/LinkedList.h
@@ -0,0 +1,189 @@
+#import "ArrayIterator.h"
+
+@class LinkedList;
+
+/**
+ * LinkedList entry.
+ */
+
+@interface LLNode : NSObject
+{
+    LLNode *next;
+    LLNode *prev;
+    id item;
+}
+
+@property(retain) LLNode *next;
+@property(retain) LLNode *prev;
+@property(retain)      id item;
+
++ (LLNode *) newNode:(LLNode *)aPrev element:(id)anElement next:(LLNode *)aNext;
+
+- (id) init:(LLNode *)aPrev element:(id)anElement next:(LLNode *)aNext;
+- (void) dealloc;
+@end
+
+@interface ListIterator : ArrayIterator {
+    LLNode * lastReturned;
+    LLNode * next;
+    NSInteger nextIndex;
+    NSInteger expectedModCount;
+    LinkedList *ll;
+}
+
++ (ListIterator *) newIterator:(LinkedList *)anLL;
++ (ListIterator *) newIterator:(LinkedList *)anLL withIndex:(NSInteger)anIndex;
+
+- (id) init:(LinkedList *)anLL withIndex:(NSInteger)anIndex;
+- (BOOL) hasNext;
+- (LLNode *) next;
+- (BOOL) hasPrevious;
+- (LLNode *) previous;
+- (NSInteger) nextIndex;
+- (NSInteger) previousIndex;
+- (void) remove;
+- (void) set:(LLNode *)e;
+- (void) add:(LLNode *)e;
+- (void) checkForComodification;
+@end
+
+/**
+ * Adapter to provide descending iterators via ListItr.previous
+ */
+
+@interface DescendingIterator : ListIterator {
+}
+
++ (DescendingIterator *) newIterator:(LinkedList *)anLL;
+- (id) init:(LinkedList *)anLL;
+- (BOOL) hasNext;
+- (LLNode *) next;
+- (void) remove;
+- (void) dealloc;
+@end
+
+/**
+ * Doubly-linked list implementation of the {@code List} and {@code Deque}
+ * interfaces.  Implements all optional list operations, and permits all
+ * elements (including {@code null}).
+ * 
+ * <p>All of the operations perform as could be expected for a doubly-linked
+ * list.  Operations that index into the list will traverse the list from
+ * the beginning or the end, whichever is closer to the specified index.
+ * 
+ * <p><strong>Note that this implementation is not synchronized.</strong>
+ * If multiple threads access a linked list concurrently, and at least
+ * one of the threads modifies the list structurally, it <i>must</i> be
+ * synchronized externally.  (A structural modification is any operation
+ * that adds or deletes one or more elements; merely setting the value of
+ * an element is not a structural modification.)  This is typically
+ * accomplished by synchronizing on some object that naturally
+ * encapsulates the list.
+ * 
+ * If no such object exists, the list should be "wrapped" using the
+ * {@link Collections#synchronizedList Collections.synchronizedList}
+ * method.  This is best done at creation time, to prevent accidental
+ * unsynchronized access to the list:<pre>
+ * List list = Collections.synchronizedList(new LinkedList(...));</pre>
+ * 
+ * <p>The iterators returned by this class's {@code iterator} and
+ * {@code listIterator} methods are <i>fail-fast</i>: if the list is
+ * structurally modified at any time after the iterator is created, in
+ * any way except through the Iterator's own {@code remove} or
+ * {@code add} methods, the iterator will throw a {@link
+ * ConcurrentModificationException}.  Thus, in the face of concurrent
+ * modification, the iterator fails quickly and cleanly, rather than
+ * risking arbitrary, non-deterministic behavior at an undetermined
+ * time in the future.
+ * 
+ * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed
+ * as it is, generally speaking, impossible to make any hard guarantees in the
+ * presence of unsynchronized concurrent modification.  Fail-fast iterators
+ * throw {@code ConcurrentModificationException} on a best-effort basis.
+ * Therefore, it would be wrong to write a program that depended on this
+ * exception for its correctness:   <i>the fail-fast behavior of iterators
+ * should be used only to detect bugs.</i>
+ * 
+ * <p>This class is a member of the
+ * <a href="{@docRoot}/../technotes/guides/collections/index.html">
+ * Java Collections Framework</a>.
+ * 
+ * @author  Josh Bloch
+ * @see     List
+ * @see     ArrayList
+ * @since 1.2
+ * @param <E> the type of elements held in this collection
+ */
+
+@interface LinkedList : NSObject {
+    /**
+     * Pointer to first node.
+     * Invariant: (first == null && last == null) ||
+     * (first.prev == null && first.item != null)
+     */
+    LLNode *first;
+    
+    /**
+     * Pointer to last node.
+     * Invariant: (first == null && last == null) ||
+     * (last.next == null && last.item != null)
+     */
+    LLNode *last;
+    NSInteger count;
+    NSInteger modCount;
+}
+
+@property(nonatomic, retain) LLNode *first;
+@property(nonatomic, retain) LLNode *last;
+@property(assign) NSInteger count;
+@property(assign) NSInteger modCount;
+
++ (LinkedList *)newLinkedList;
++ (LinkedList *)newLinkedList:(NSArray *)c;
+
+- (id) init;
+- (id) initWithC:(NSArray *)c;
+- (void) linkLast:(LLNode *)e;
+- (void) linkBefore:(LLNode *)e succ:(LLNode *)succ;
+- (LLNode *) unlink:(LLNode *)x;
+- (LLNode *) removeFirst;
+- (LLNode *) removeLast;
+- (void) addFirst:(LLNode *)e;
+- (void) addLast:(LLNode *)e;
+- (BOOL) contains:(id)o;
+- (NSInteger) count;
+- (BOOL) add:(LLNode *)e;
+- (BOOL) remove:(id)o;
+- (BOOL) addAll:(NSArray *)c;
+- (BOOL) addAll:(NSInteger)index c:(NSArray *)c;
+- (void) clear;
+- (LLNode *) get:(NSInteger)index;
+- (LLNode *) set:(NSInteger)index element:(LLNode *)element;
+- (void) add:(NSInteger)index element:(LLNode *)element;
+- (LLNode *) removeIdx:(NSInteger)index;
+- (void) checkElementIndex:(NSInteger)index;
+- (void) checkPositionIndex:(NSInteger)index;
+- (LLNode *) node:(NSInteger)index;
+- (NSInteger) indexOf:(id)o;
+- (NSInteger) lastIndexOf:(id)o;
+- (LLNode *) peek;
+- (LLNode *) element;
+- (LLNode *) poll;
+- (LLNode *) remove;
+- (BOOL) offer:(LLNode *)e;
+- (BOOL) offerFirst:(LLNode *)e;
+- (BOOL) offerLast:(LLNode *)e;
+- (LLNode *) peekFirst;
+- (LLNode *) peekLast;
+- (LLNode *) pollFirst;
+- (LLNode *) pollLast;
+- (void) push:(LLNode *)e;
+- (LLNode *) pop;
+- (BOOL) removeFirstOccurrence:(id)o;
+- (BOOL) removeLastOccurrence:(id)o;
+- (ListIterator *) listIterator:(NSInteger)index;
+- (NSEnumerator *) descendingIterator;
+- (id) copyWithZone:(NSZone *)zone;
+- (NSArray *) toArray;
+- (NSArray *) toArray:(NSArray *)a;
+@end
diff --git a/runtime/ObjC/Framework/LinkedList.m b/runtime/ObjC/Framework/LinkedList.m
new file mode 100644
index 0000000..8a8d39d
--- /dev/null
+++ b/runtime/ObjC/Framework/LinkedList.m
@@ -0,0 +1,1256 @@
+#import "LinkedList.h"
+#import <Foundation/Foundation.h>
+#import "AMutableArray.h"
+#import "RuntimeException.h"
+
+@implementation LLNode
+
+@synthesize next;
+@synthesize prev;
+@synthesize item;
+
++ (LLNode *) newNode:(LLNode *)aPrev element:(id)anElement next:(LLNode *)aNext
+{
+    return [[LLNode alloc] init:aPrev element:anElement next:aNext];
+}
+
+- (id) init:(LLNode *)aPrev element:(id)anElement next:(LLNode *)aNext
+{
+    self = [super init];
+    if (self) {
+        item = anElement;
+        next = aNext;
+        prev = aPrev;
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [item release];
+    [next release];
+    [prev release];
+    [super dealloc];
+}
+
+@end
+
+@implementation ListIterator
+
++ (ListIterator *) newIterator:(LinkedList *)anLL
+{
+    return [[ListIterator alloc] init:anLL withIndex:0];
+}
+
++ (ListIterator *) newIterator:(LinkedList *)anLL withIndex:(NSInteger)anIndex
+{
+    return [[ListIterator alloc] init:anLL withIndex:anIndex];
+}
+
+- (id) init:(LinkedList *)anLL withIndex:(NSInteger)anIndex
+{
+    self = [super init];
+    if ( self ) {
+        ll = anLL;
+        index = anIndex;
+        lastReturned = nil;
+        expectedModCount = ll.modCount;
+        next = (index == count) ? nil : [ll node:anIndex];
+        nextIndex = index;
+    }
+    return self;
+}
+
+- (BOOL) hasNext
+{
+    return nextIndex < count;
+}
+
+- (id) next
+{
+    [self checkForComodification];
+    if (![self hasNext])
+        @throw [[[NoSuchElementException alloc] init] autorelease];
+    lastReturned = next;
+    next = next.next;
+    nextIndex++;
+    return lastReturned.item;
+}
+
+- (BOOL) hasPrevious
+{
+    return nextIndex > 0;
+}
+
+- (id) previous
+{
+    [self checkForComodification];
+    if (![self hasPrevious])
+        @throw [[[NoSuchElementException alloc] init] autorelease];
+    lastReturned = next = (next == nil) ? ll.last : next.prev;
+    nextIndex--;
+    return lastReturned.item;
+}
+
+- (NSInteger) nextIndex
+{
+    return nextIndex;
+}
+
+- (NSInteger) previousIndex
+{
+    return nextIndex - 1;
+}
+
+- (void) remove
+{
+    [self checkForComodification];
+    if (lastReturned == nil)
+        @throw [[[IllegalStateException alloc] init] autorelease];
+    LLNode *lastNext = lastReturned.next;
+    [ll unlink:lastReturned];
+    if (next == lastReturned)
+        next = lastNext;
+    else
+        nextIndex--;
+    lastReturned = nil;
+    expectedModCount++;
+}
+
+- (void) set:(id)e
+{
+    if (lastReturned == nil)
+        @throw [[[IllegalStateException alloc] init] autorelease];
+    [self checkForComodification];
+    lastReturned.item = e;
+}
+
+- (void) add:(id)e
+{
+    [self checkForComodification];
+    lastReturned = nil;
+    if (next == nil)
+        [ll linkLast:e];
+    else
+        [ll linkBefore:e succ:next];
+    nextIndex++;
+    expectedModCount++;
+}
+
+- (void) checkForComodification
+{
+    if (ll.modCount != expectedModCount)
+        @throw [[[ConcurrentModificationException alloc] init] autorelease];
+}
+
+- (void) dealloc
+{
+    [lastReturned release];
+    [next release];
+    [super dealloc];
+}
+
+@end
+
+@implementation DescendingIterator
+
++ (DescendingIterator *)newIterator:(LinkedList *)anLL
+{
+    return [[DescendingIterator alloc] init:anLL];
+}
+
+- (id) init:(LinkedList *)anLL
+{
+    self = [super init:anLL withIndex:[anLL count]];
+    if ( self ) {
+        
+    }
+    return self;
+}
+
+- (BOOL) hasNext
+{
+    return [self hasPrevious];
+}
+
+- (id) next
+{
+    return [self previous];
+}
+
+- (void) remove
+{
+    [self remove];
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+@end
+
+//long const serialVersionUID = 876323262645176354L;
+
+@implementation LinkedList
+
+@synthesize first;
+@synthesize last;
+@synthesize count;
+@synthesize modCount;
+
++ (LinkedList *)newLinkedList
+{
+    return [[LinkedList alloc] init];
+}
+
++ (LinkedList *)newLinkedList:(NSArray *)c
+{
+    return [[LinkedList alloc] initWithC:c];
+}
+
+/**
+ * Constructs an empty list.
+ */
+- (id) init
+{
+    self = [super init];
+    if ( self ) {
+        count = 0;
+    }
+    return self;
+}
+
+
+/**
+ * Constructs a list containing the elements of the specified
+ * collection, in the order they are returned by the collection's
+ * iterator.
+ * 
+ * @param  c the collection whose elements are to be placed into this list
+ * @throws NullPointerException if the specified collection is null
+ */
+- (id) initWithC:(NSArray *)c
+{
+    self = [super init];
+    if ( self ) {
+        count = 0;
+        [self addAll:c];
+    }
+    return self;
+}
+
+
+- (void) dealloc
+{
+    [first release];
+    [last release];
+    [super dealloc];
+}
+
+/**
+ * Links e as first element.
+ */
+- (void) linkFirst:(id)e
+{
+    LLNode *f = first;
+    LLNode *newNode = [[LLNode newNode:nil element:e next:f] autorelease];
+    first = newNode;
+    if (f == nil)
+        last = newNode;
+    else
+        f.prev = newNode;
+    count++;
+    modCount++;
+}
+
+
+/**
+ * Links e as last element.
+ */
+- (void) linkLast:(id)e
+{
+    LLNode *l = last;
+    LLNode *newNode = [[LLNode newNode:l element:e next:nil] autorelease];
+    last = newNode;
+    if (l == nil)
+        first = newNode;
+    else
+        l.next = newNode;
+    count++;
+    modCount++;
+}
+
+
+/**
+ * Inserts element e before non-null LLNode succ.
+ */
+- (void) linkBefore:(id)e succ:(LLNode *)succ
+{
+    LLNode *pred = succ.prev;
+    LLNode *newNode = [[LLNode newNode:pred element:e next:succ] autorelease];
+    succ.prev = newNode;
+    if (pred == nil)
+        first = newNode;
+    else
+        pred.next = newNode;
+    count++;
+    modCount++;
+}
+
+
+/**
+ * Unlinks non-null first node f.
+ */
+- (id) unlinkFirst:(LLNode *)f
+{
+    id element = f.item;
+    LLNode *next = f.next;
+    f.item = nil;
+    f.next = nil;
+    first = next;
+    if (next == nil)
+        last = nil;
+    else
+        next.prev = nil;
+    count--;
+    modCount++;
+    return element;
+}
+
+
+/**
+ * Unlinks non-null last node l.
+ */
+- (id) unlinkLast:(LLNode *)l
+{
+    id element = l.item;
+    LLNode *prev = l.prev;
+    l.item = nil;
+    l.prev = nil;
+    last = prev;
+    if (prev == nil)
+        first = nil;
+    else
+        prev.next = nil;
+    count--;
+    modCount++;
+    return element;
+}
+
+
+/**
+ * Unlinks non-null node x.
+ */
+- (LLNode *) unlink:(LLNode *)x
+{
+    id element = x.item;
+    LLNode *next = x.next;
+    LLNode *prev = x.prev;
+    if (prev == nil) {
+        first = next;
+    }
+    else {
+        prev.next = next;
+        x.prev = nil;
+    }
+    if (next == nil) {
+        last = prev;
+    }
+    else {
+        next.prev = prev;
+        x.next = nil;
+    }
+    x.item = nil;
+    count--;
+    modCount++;
+    return element;
+}
+
+
+/**
+ * Returns the first element in this list.
+ * 
+ * @return the first element in this list
+ * @throws NoSuchElementException if this list is empty
+ */
+- (LLNode *) first
+{
+    LLNode *f = first;
+    if (f == nil)
+        @throw [[[NoSuchElementException alloc] init] autorelease];
+    return f.item;
+}
+
+
+/**
+ * Returns the last element in this list.
+ * 
+ * @return the last element in this list
+ * @throws NoSuchElementException if this list is empty
+ */
+- (LLNode *) last
+{
+    LLNode *l = last;
+    if (l == nil)
+        @throw [[[NoSuchElementException alloc] init] autorelease];
+    return l.item;
+}
+
+
+/**
+ * Removes and returns the first element from this list.
+ * 
+ * @return the first element from this list
+ * @throws NoSuchElementException if this list is empty
+ */
+- (LLNode *) removeFirst
+{
+    LLNode *f = first;
+    if (f == nil)
+        @throw [[[NoSuchElementException alloc] init] autorelease];
+    return [self unlinkFirst:f];
+}
+
+
+/**
+ * Removes and returns the last element from this list.
+ * 
+ * @return the last element from this list
+ * @throws NoSuchElementException if this list is empty
+ */
+- (LLNode *) removeLast
+{
+    LLNode *l = last;
+    if (l == nil)
+        @throw [[[NoSuchElementException alloc] init] autorelease];
+    return [self unlinkLast:l];
+}
+
+
+/**
+ * Inserts the specified element at the beginning of this list.
+ * 
+ * @param e the element to add
+ */
+- (void) addFirst:(LLNode *)e
+{
+    [self linkFirst:e];
+}
+
+
+/**
+ * Appends the specified element to the end of this list.
+ * 
+ * <p>This method is equivalent to {@link #add}.
+ * 
+ * @param e the element to add
+ */
+- (void) addLast:(LLNode *)e
+{
+    [self linkLast:e];
+}
+
+
+/**
+ * Returns {@code true} if this list contains the specified element.
+ * More formally, returns {@code true} if and only if this list contains
+ * at least one element {@code e} such that
+ * <tt>(o==null&nbsp;?&nbsp;e==null&nbsp;:&nbsp;o.equals(e))</tt>.
+ * 
+ * @param o element whose presence in this list is to be tested
+ * @return {@code true} if this list contains the specified element
+ */
+- (BOOL) contains:(id)o
+{
+    return [self indexOf:o] != -1;
+}
+
+
+/**
+ * Returns the number of elements in this list.
+ * 
+ * @return the number of elements in this list
+ */
+- (NSInteger) count
+{
+    return count;
+}
+
+
+/**
+ * Appends the specified element to the end of this list.
+ * 
+ * <p>This method is equivalent to {@link #addLast}.
+ * 
+ * @param e element to be appended to this list
+ * @return {@code true} (as specified by {@link Collection#add})
+ */
+- (BOOL) add:(LLNode *)e
+{
+    [self linkLast:e];
+    return YES;
+}
+
+
+/**
+ * Removes the first occurrence of the specified element from this list,
+ * if it is present.  If this list does not contain the element, it is
+ * unchanged.  More formally, removes the element with the lowest index
+ * {@code i} such that
+ * <tt>(o==null&nbsp;?&nbsp;get(i)==null&nbsp;:&nbsp;o.equals(get(i)))</tt>
+ * (if such an element exists).  Returns {@code true} if this list
+ * contained the specified element (or equivalently, if this list
+ * changed as a result of the call).
+ * 
+ * @param o element to be removed from this list, if present
+ * @return {@code true} if this list contained the specified element
+ */
+- (BOOL) remove:(id)o
+{
+    if (o == nil) {
+        
+        for (LLNode *x = first; x != nil; x = x.next) {
+            if (x.item == nil) {
+                [self unlink:x];
+                return YES;
+            }
+        }
+        
+    }
+    else {
+        
+        for (LLNode *x = first; x != nil; x = x.next) {
+            if ([o isEqualTo:x.item]) {
+                [self unlink:x];
+                return YES;
+            }
+        }
+        
+    }
+    return NO;
+}
+
+
+/**
+ * Appends all of the elements in the specified collection to the end of
+ * this list, in the order that they are returned by the specified
+ * collection's iterator.  The behavior of this operation is undefined if
+ * the specified collection is modified while the operation is in
+ * progress.  (Note that this will occur if the specified collection is
+ * this list, and it's nonempty.)
+ * 
+ * @param c collection containing elements to be added to this list
+ * @return {@code true} if this list changed as a result of the call
+ * @throws NullPointerException if the specified collection is null
+ */
+- (BOOL) addAll:(NSArray *)c
+{
+    return [self addAll:count c:c];
+}
+
+
+/**
+ * Inserts all of the elements in the specified collection into this
+ * list, starting at the specified position.  Shifts the element
+ * currently at that position (if any) and any subsequent elements to
+ * the right (increases their indices).  The new elements will appear
+ * in the list in the order that they are returned by the
+ * specified collection's iterator.
+ * 
+ * @param index index at which to insert the first element
+ * from the specified collection
+ * @param c collection containing elements to be added to this list
+ * @return {@code true} if this list changed as a result of the call
+ * @throws IndexOutOfBoundsException {@inheritDoc}
+ * @throws NullPointerException if the specified collection is null
+ */
+- (BOOL) addAll:(NSInteger)index c:(NSArray *)c
+{
+    [self checkPositionIndex:index];
+    AMutableArray *a = [AMutableArray arrayWithArray:c];
+    NSInteger numNew = [a count];
+    if (numNew == 0)
+        return NO;
+    LLNode *pred, *succ;
+    if (index == count) {
+        succ = nil;
+        pred = last;
+    }
+    else {
+        succ = [self node:index];
+        pred = succ.prev;
+    }
+    
+    for (id o in a) {
+        id e = (id)o;
+        LLNode *newNode = [[LLNode newNode:pred element:e next:nil] autorelease];
+        if (pred == nil)
+            first = newNode;
+        else
+            pred.next = newNode;
+        pred = newNode;
+    }
+    
+    if (succ == nil) {
+        last = pred;
+    }
+    else {
+        pred.next = succ;
+        succ.prev = pred;
+    }
+    count += numNew;
+    modCount++;
+    return YES;
+}
+
+
+/**
+ * Removes all of the elements from this list.
+ * The list will be empty after this call returns.
+ */
+- (void) clear
+{
+    
+    for (LLNode *x = first; x != nil; ) {
+        LLNode *next = x.next;
+        x.item = nil;
+        x.next = nil;
+        x.prev = nil;
+        x = next;
+    }
+    
+    first = last = nil;
+    count = 0;
+    modCount++;
+}
+
+
+/**
+ * Returns the element at the specified position in this list.
+ * 
+ * @param index index of the element to return
+ * @return the element at the specified position in this list
+ * @throws IndexOutOfBoundsException {@inheritDoc}
+ */
+- (id) get:(NSInteger)index
+{
+    [self checkElementIndex:index];
+    return [self node:index].item;
+}
+
+
+/**
+ * Replaces the element at the specified position in this list with the
+ * specified element.
+ * 
+ * @param index index of the element to replace
+ * @param element element to be stored at the specified position
+ * @return the element previously at the specified position
+ * @throws IndexOutOfBoundsException {@inheritDoc}
+ */
+- (id) set:(NSInteger)index element:(id)element
+{
+    [self checkElementIndex:index];
+    LLNode *x = [self node:index];
+    id oldVal = x.item;
+    x.item = element;
+    return oldVal;
+}
+
+
+/**
+ * Inserts the specified element at the specified position in this list.
+ * Shifts the element currently at that position (if any) and any
+ * subsequent elements to the right (adds one to their indices).
+ * 
+ * @param index index at which the specified element is to be inserted
+ * @param element element to be inserted
+ * @throws IndexOutOfBoundsException {@inheritDoc}
+ */
+- (void) add:(NSInteger)index element:(LLNode *)element
+{
+    [self checkPositionIndex:index];
+    if (index == count)
+        [self linkLast:element];
+    else
+        [self linkBefore:element succ:[self node:index]];
+}
+
+
+/**
+ * Removes the element at the specified position in this list.  Shifts any
+ * subsequent elements to the left (subtracts one from their indices).
+ * Returns the element that was removed from the list.
+ * 
+ * @param index the index of the element to be removed
+ * @return the element previously at the specified position
+ * @throws IndexOutOfBoundsException {@inheritDoc}
+ */
+- (LLNode *) removeIdx:(NSInteger)index
+{
+    [self checkElementIndex:index];
+    return [self unlink:[self node:index]];
+}
+
+
+/**
+ * Tells if the argument is the index of an existing element.
+ */
+- (BOOL) isElementIndex:(NSInteger)index
+{
+    return index >= 0 && index < count;
+}
+
+
+/**
+ * Tells if the argument is the index of a valid position for an
+ * iterator or an add operation.
+ */
+- (BOOL) isPositionIndex:(NSInteger)index
+{
+    return index >= 0 && index <= count;
+}
+
+
+/**
+ * Constructs an IndexOutOfBoundsException detail message.
+ * Of the many possible refactorings of the error handling code,
+ * this "outlining" performs best with both server and client VMs.
+ */
+- (NSString *) outOfBoundsMsg:(NSInteger)index
+{
+    return [NSString stringWithFormat:@"Index: %d, Size: %d", index, count];
+}
+
+- (void) checkElementIndex:(NSInteger)index
+{
+    if (![self isElementIndex:index])
+        @throw [[IndexOutOfBoundsException newException:[self outOfBoundsMsg:index]] autorelease];
+}
+
+- (void) checkPositionIndex:(NSInteger)index
+{
+    if (![self isPositionIndex:index])
+        @throw [[IndexOutOfBoundsException newException:[self outOfBoundsMsg:index]] autorelease];
+}
+
+
+/**
+ * Returns the (non-null) LLNode at the specified element index.
+ */
+- (LLNode *) node:(NSInteger)index
+{
+    if (index < (count >> 1)) {
+        LLNode *x = first;
+        
+        for (NSInteger i = 0; i < index; i++)
+            x = x.next;
+        
+        return x;
+    }
+    else {
+        LLNode *x = last;
+        
+        for (NSInteger i = count - 1; i > index; i--)
+            x = x.prev;
+        
+        return x;
+    }
+}
+
+
+/**
+ * Returns the index of the first occurrence of the specified element
+ * in this list, or -1 if this list does not contain the element.
+ * More formally, returns the lowest index {@code i} such that
+ * <tt>(o==null&nbsp;?&nbsp;get(i)==null&nbsp;:&nbsp;o.equals(get(i)))</tt>,
+ * or -1 if there is no such index.
+ * 
+ * @param o element to search for
+ * @return the index of the first occurrence of the specified element in
+ * this list, or -1 if this list does not contain the element
+ */
+- (NSInteger) indexOf:(id)o
+{
+    NSInteger index = 0;
+    if (o == nil) {
+        
+        for (LLNode *x = first; x != nil; x = x.next) {
+            if (x.item == nil)
+                return index;
+            index++;
+        }
+        
+    }
+    else {
+        
+        for (LLNode *x = first; x != nil; x = x.next) {
+            if ([o isEqualTo:x.item])
+                return index;
+            index++;
+        }
+        
+    }
+    return -1;
+}
+
+
+/**
+ * Returns the index of the last occurrence of the specified element
+ * in this list, or -1 if this list does not contain the element.
+ * More formally, returns the highest index {@code i} such that
+ * <tt>(o==null&nbsp;?&nbsp;get(i)==null&nbsp;:&nbsp;o.equals(get(i)))</tt>,
+ * or -1 if there is no such index.
+ * 
+ * @param o element to search for
+ * @return the index of the last occurrence of the specified element in
+ * this list, or -1 if this list does not contain the element
+ */
+- (NSInteger) lastIndexOf:(id)o
+{
+    NSInteger index = count;
+    if (o == nil) {
+        
+        for (LLNode *x = last; x != nil; x = x.prev) {
+            index--;
+            if (x.item == nil)
+                return index;
+        }
+        
+    }
+    else {
+        
+        for (LLNode *x = last; x != nil; x = x.prev) {
+            index--;
+            if ([o isEqualTo:x.item])
+                return index;
+        }
+        
+    }
+    return -1;
+}
+
+
+/**
+ * Retrieves, but does not remove, the head (first element) of this list.
+ * 
+ * @return the head of this list, or {@code null} if this list is empty
+ * @since 1.5
+ */
+- (LLNode *) peek
+{
+    LLNode *f = first;
+    return (f == nil) ? nil : f.item;
+}
+
+
+/**
+ * Retrieves, but does not remove, the head (first element) of this list.
+ * 
+ * @return the head of this list
+ * @throws NoSuchElementException if this list is empty
+ * @since 1.5
+ */
+- (LLNode *) element
+{
+    return [self first];
+}
+
+
+/**
+ * Retrieves and removes the head (first element) of this list.
+ * 
+ * @return the head of this list, or {@code null} if this list is empty
+ * @since 1.5
+ */
+- (LLNode *) poll
+{
+    LLNode *f = first;
+    return (f == nil) ? nil : [self unlinkFirst:f];
+}
+
+
+/**
+ * Retrieves and removes the head (first element) of this list.
+ * 
+ * @return the head of this list
+ * @throws NoSuchElementException if this list is empty
+ * @since 1.5
+ */
+- (LLNode *) remove
+{
+    return [self removeFirst];
+}
+
+
+/**
+ * Adds the specified element as the tail (last element) of this list.
+ * 
+ * @param e the element to add
+ * @return {@code true} (as specified by {@link Queue#offer})
+ * @since 1.5
+ */
+- (BOOL) offer:(LLNode *)e
+{
+    return [self add:e];
+}
+
+
+/**
+ * Inserts the specified element at the front of this list.
+ * 
+ * @param e the element to insert
+ * @return {@code true} (as specified by {@link Deque#offerFirst})
+ * @since 1.6
+ */
+- (BOOL) offerFirst:(LLNode *)e
+{
+    [self addFirst:e];
+    return YES;
+}
+
+
+/**
+ * Inserts the specified element at the end of this list.
+ * 
+ * @param e the element to insert
+ * @return {@code true} (as specified by {@link Deque#offerLast})
+ * @since 1.6
+ */
+- (BOOL) offerLast:(LLNode *)e
+{
+    [self addLast:e];
+    return YES;
+}
+
+
+/**
+ * Retrieves, but does not remove, the first element of this list,
+ * or returns {@code null} if this list is empty.
+ * 
+ * @return the first element of this list, or {@code null}
+ * if this list is empty
+ * @since 1.6
+ */
+- (LLNode *) peekFirst
+{
+    LLNode *f = first;
+    return (f == nil) ? nil : f.item;
+}
+
+
+/**
+ * Retrieves, but does not remove, the last element of this list,
+ * or returns {@code null} if this list is empty.
+ * 
+ * @return the last element of this list, or {@code null}
+ * if this list is empty
+ * @since 1.6
+ */
+- (LLNode *) peekLast
+{
+    LLNode *l = last;
+    return (l == nil) ? nil : l.item;
+}
+
+
+/**
+ * Retrieves and removes the first element of this list,
+ * or returns {@code null} if this list is empty.
+ * 
+ * @return the first element of this list, or {@code null} if
+ * this list is empty
+ * @since 1.6
+ */
+- (LLNode *) pollFirst
+{
+    LLNode *f = first;
+    return (f == nil) ? nil : [self unlinkFirst:f];
+}
+
+
+/**
+ * Retrieves and removes the last element of this list,
+ * or returns {@code null} if this list is empty.
+ * 
+ * @return the last element of this list, or {@code null} if
+ * this list is empty
+ * @since 1.6
+ */
+- (LLNode *) pollLast
+{
+    LLNode *l = last;
+    return (l == nil) ? nil : [self unlinkLast:l];
+}
+
+
+/**
+ * Pushes an element onto the stack represented by this list.  In other
+ * words, inserts the element at the front of this list.
+ * 
+ * <p>This method is equivalent to {@link #addFirst}.
+ * 
+ * @param e the element to push
+ * @since 1.6
+ */
+- (void) push:(LLNode *)e
+{
+    [self addFirst:e];
+}
+
+
+/**
+ * Pops an element from the stack represented by this list.  In other
+ * words, removes and returns the first element of this list.
+ * 
+ * <p>This method is equivalent to {@link #removeFirst()}.
+ * 
+ * @return the element at the front of this list (which is the top
+ * of the stack represented by this list)
+ * @throws NoSuchElementException if this list is empty
+ * @since 1.6
+ */
+- (LLNode *) pop
+{
+    return [self removeFirst];
+}
+
+
+/**
+ * Removes the first occurrence of the specified element in this
+ * list (when traversing the list from head to tail).  If the list
+ * does not contain the element, it is unchanged.
+ * 
+ * @param o element to be removed from this list, if present
+ * @return {@code true} if the list contained the specified element
+ * @since 1.6
+ */
+- (BOOL) removeFirstOccurrence:(id)o
+{
+    return [self remove:o];
+}
+
+
+/**
+ * Removes the last occurrence of the specified element in this
+ * list (when traversing the list from head to tail).  If the list
+ * does not contain the element, it is unchanged.
+ * 
+ * @param o element to be removed from this list, if present
+ * @return {@code true} if the list contained the specified element
+ * @since 1.6
+ */
+- (BOOL) removeLastOccurrence:(id)o
+{
+    if (o == nil) {
+        
+        for (LLNode *x = last; x != nil; x = x.prev) {
+            if (x.item == nil) {
+                [self unlink:x];
+                return YES;
+            }
+        }
+        
+    }
+    else {
+        
+        for (LLNode *x = last; x != nil; x = x.prev) {
+            if ([o isEqualTo:x.item]) {
+                [self unlink:x];
+                return YES;
+            }
+        }
+        
+    }
+    return NO;
+}
+
+
+/**
+ * Returns a list-iterator of the elements in this list (in proper
+ * sequence), starting at the specified position in the list.
+ * Obeys the general contract of {@code List.listIterator(NSInteger)}.<p>
+ * 
+ * The list-iterator is <i>fail-fast</i>: if the list is structurally
+ * modified at any time after the Iterator is created, in any way except
+ * through the list-iterator's own {@code remove} or {@code add}
+ * methods, the list-iterator will throw a
+ * {@code ConcurrentModificationException}.  Thus, in the face of
+ * concurrent modification, the iterator fails quickly and cleanly, rather
+ * than risking arbitrary, non-deterministic behavior at an undetermined
+ * time in the future.
+ * 
+ * @param index index of the first element to be returned from the
+ * list-iterator (by a call to {@code next})
+ * @return a ListIterator of the elements in this list (in proper
+ * sequence), starting at the specified position in the list
+ * @throws IndexOutOfBoundsException {@inheritDoc}
+ * @see List#listIterator(NSInteger)
+ */
+- (ListIterator *) listIterator:(NSInteger)index
+{
+    [self checkPositionIndex:index];
+    return [[ListIterator newIterator:self withIndex:index] autorelease];
+}
+
+
+/**
+ * @since 1.6
+ */
+- (NSEnumerator *) descendingIterator
+{
+    return [[[DescendingIterator alloc] init] autorelease];
+}
+
+/*
+- (LinkedList *) superClone:(NSZone *)zone
+{
+    
+    @try {
+        return (LinkedList *)[super copyWithZone:zone];
+    }
+    @catch (CloneNotSupportedException * e) {
+        @throw [[[NSException exceptionWithName:@"InternalException" reason:@"Attempted to Clone non-cloneable List" userInfo:nil] autorelease];
+    }
+}
+*/
+
+/**
+ * Returns a shallow copy of this {@code LinkedList}. (The elements
+ * themselves are not cloned.)
+ * 
+ * @return a shallow copy of this {@code LinkedList} instance
+ */
+- (id) copyWithZone:(NSZone *)zone
+{
+    LinkedList *clone = [LinkedList allocWithZone:zone];
+    clone.first = nil;
+    clone.last = nil;
+    clone.count = 0;
+    clone.modCount = 0;
+    
+    for (LLNode *x = first; x != nil; x = x.next)
+        [clone add:x.item];
+    
+    clone.count = count;
+    clone.first = first;
+    clone.last = last;
+    return clone;
+}
+
+
+/**
+ * Returns an array containing all of the elements in this list
+ * in proper sequence (from first to last element).
+ * 
+ * <p>The returned array will be "safe" in that no references to it are
+ * maintained by this list.  (In other words, this method must allocate
+ * a new array).  The caller is thus free to modify the returned array.
+ * 
+ * <p>This method acts as bridge between array-based and collection-based
+ * APIs.
+ * 
+ * @return an array containing all of the elements in this list
+ * in proper sequence
+ */
+- (NSArray *) toArray
+{
+    AMutableArray *result = [AMutableArray arrayWithCapacity:10];
+    
+    for (LLNode *x = first; x != nil; x = x.next)
+        [result addObject:x.item];
+    
+    return result;
+}
+
+
+/**
+ * Returns an array containing all of the elements in this list in
+ * proper sequence (from first to last element); the runtime type of
+ * the returned array is that of the specified array.  If the list fits
+ * in the specified array, it is returned therein.  Otherwise, a new
+ * array is allocated with the runtime type of the specified array and
+ * the size of this list.
+ * 
+ * <p>If the list fits in the specified array with room to spare (i.e.,
+ * the array has more elements than the list), the element in the array
+ * immediately following the end of the list is set to {@code null}.
+ * (This is useful in determining the length of the list <i>only</i> if
+ * the caller knows that the list does not contain any null elements.)
+ * 
+ * <p>Like the {@link #toArray()} method, this method acts as bridge between
+ * array-based and collection-based APIs.  Further, this method allows
+ * precise control over the runtime type of the output array, and may,
+ * under certain circumstances, be used to save allocation costs.
+ * 
+ * <p>Suppose {@code x} is a list known to contain only strings.
+ * The following code can be used to dump the list into a newly
+ * allocated array of {@code String}:
+ * 
+ * <pre>
+ * String[] y = x.toArray(new String[0]);</pre>
+ * 
+ * Note that {@code toArray(new Object[0])} is identical in function to
+ * {@code toArray()}.
+ * 
+ * @param a the array into which the elements of the list are to
+ * be stored, if it is big enough; otherwise, a new array of the
+ * same runtime type is allocated for this purpose.
+ * @return an array containing the elements of the list
+ * @throws ArrayStoreException if the runtime type of the specified array
+ * is not a supertype of the runtime type of every element in
+ * this list
+ * @throws NullPointerException if the specified array is null
+ */
+- (NSArray *) toArray:(AMutableArray *)a
+{
+    if ( [a count] < count )
+        a = (AMutableArray *)[AMutableArray arrayWithArray:a];
+    AMutableArray *result = a;
+    
+    for (LLNode *x = first; x != nil; x = x.next)
+        [result addObject:x.item];
+    
+    if ([a count] > count)
+        [a replaceObjectAtIndex:count withObject:nil];
+    return a;
+}
+
+
+/**
+ * Saves the state of this {@code LinkedList} instance to a stream
+ * (that is, serializes it).
+ * 
+ * @serialData The size of the list (the number of elements it
+ * contains) is emitted (NSInteger), followed by all of its
+ * elements (each an Object) in the proper order.
+ */
+- (void) writeObject:(NSOutputStream *)s
+{
+/*
+    [s defaultWriteObject];
+    [s writeInt:count];
+    
+    for (LLNode *x = first; x != nil; x = x.next)
+        [s writeObject:x.item];
+ */
+}
+
+
+/**
+ * Reconstitutes this {@code LinkedList} instance from a stream
+ * (that is, deserializes it).
+ */
+- (void) readObject:(NSInputStream *)s
+{
+/*
+    [s defaultReadObject];
+    NSInteger len = [s readInt];
+    
+    for (NSInteger i = 0; i < len; i++)
+        [self linkLast:(id)[s readObject]];
+ */
+}
+
+@end
diff --git a/runtime/ObjC/Framework/LookaheadStream.h b/runtime/ObjC/Framework/LookaheadStream.h
new file mode 100644
index 0000000..b600996
--- /dev/null
+++ b/runtime/ObjC/Framework/LookaheadStream.h
@@ -0,0 +1,77 @@
+//
+//  LookaheadStream.h
+//  ANTLR
+//
+//  Created by Ian Michell on 26/04/2010.
+//  [The "BSD licence"]
+//  Copyright (c) 2010 Ian Michell 2010 Alan Condit
+//  All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "FastQueue.h"
+
+#define UNITIALIZED_EOF_ELEMENT_INDEX NSIntegerMax
+
+@interface LookaheadStream : FastQueue
+{
+    NSInteger index;
+	NSInteger eofElementIndex;
+	NSInteger lastMarker;
+	NSInteger markDepth;
+	id prevElement;
+	id eof;
+}
+
+@property (readwrite, retain, getter=getEof, setter=setEof:) id eof;
+@property (assign) NSInteger index;
+@property (assign, getter=getEofElementIndex, setter=setEofElementIndex:) NSInteger eofElementIndex;
+@property (assign, getter=getLastMarker, setter=setLastMarker:) NSInteger lastMarker;
+@property (assign, getter=getMarkDepth, setter=setMarkDepth:) NSInteger markDepth;
+@property (retain) id prevElement;
+
+- (id) initWithEOF:(id) obj;
+- (id) nextElement;
+- (id) remove;
+- (void) consume;
+- (void) sync:(NSInteger) need;
+- (void) fill:(NSInteger) n;
+- (id) LT:(NSInteger) i;
+- (id) LB:(NSInteger) i;
+- (id) getCurrentSymbol;
+- (NSInteger) mark;
+- (void) release:(NSInteger) marker;
+- (void) rewind:(NSInteger) marker;
+- (void) rewind;
+- (void) seek:(NSInteger) i;
+- (id) getEof;
+- (void) setEof:(id) anID;
+- (NSInteger) getEofElementIndex;
+- (void) setEofElementIndex:(NSInteger) anInt;
+- (NSInteger) getLastMarker;
+- (void) setLastMarker:(NSInteger) anInt;
+- (NSInteger) getMarkDepth;
+- (void) setMarkDepth:(NSInteger) anInt;
+
+@end
diff --git a/runtime/ObjC/Framework/LookaheadStream.m b/runtime/ObjC/Framework/LookaheadStream.m
new file mode 100644
index 0000000..a0f6314
--- /dev/null
+++ b/runtime/ObjC/Framework/LookaheadStream.m
@@ -0,0 +1,229 @@
+//
+//  LookaheadStream.m
+//  ANTLR
+//
+//  Created by Ian Michell on 26/04/2010.
+// [The "BSD licence"]
+// Copyright (c) 2010 Ian Michell 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "LookaheadStream.h"
+#import "ANTLRError.h"
+#import "RecognitionException.h"
+#import "CommonToken.h"
+#import "RuntimeException.h"
+
+@implementation LookaheadStream
+
+@synthesize eof;
+@synthesize index;
+@synthesize eofElementIndex;
+@synthesize lastMarker;
+@synthesize markDepth;
+@synthesize prevElement;
+
+-(id) init
+{
+	self = [super init];
+	if ( self != nil ) {
+        eof = [[CommonToken eofToken] retain];
+		eofElementIndex = UNITIALIZED_EOF_ELEMENT_INDEX;
+		markDepth = 0;
+        index = 0;
+	}
+	return self;
+}
+
+-(id) initWithEOF:(id)obj
+{
+	if ((self = [super init]) != nil) {
+		self.eof = obj;
+        if ( self.eof ) [self.eof retain];
+	}
+	return self;
+}
+
+- (void) reset
+{
+	[super reset];
+    index = 0;
+    p = 0;
+    prevElement = nil;
+	eofElementIndex = UNITIALIZED_EOF_ELEMENT_INDEX;
+}
+
+-(id) nextElement
+{
+//	[self doesNotRecognizeSelector:_cmd];
+	return nil;
+}
+
+- (id) remove
+{
+    id obj = [self objectAtIndex:0];
+    p++;
+    // have we hit end of buffer and not backtracking?
+    if ( p == [data count] && markDepth==0 ) {
+        // if so, it's an opportunity to start filling at index 0 again
+        [self clear]; // size goes to 0, but retains memory
+    }
+    [obj release];
+    return obj;
+}
+
+-(void) consume
+{
+	[self sync:1];
+	prevElement = [self remove];
+    index++;
+}
+
+-(void) sync:(NSInteger) need
+{
+	NSInteger n = (p + need - 1) - [data count] + 1;
+	if ( n > 0 ) {
+		[self fill:n];
+	}
+}
+
+-(void) fill:(NSInteger) n
+{
+    id obj;
+	for (NSInteger i = 1; i <= n; i++) {
+		obj = [self nextElement];
+		if ( obj == eof ) {
+			[data addObject:self.eof];
+			eofElementIndex = [data count] - 1;
+		}
+		else {
+			[data addObject:obj];
+		}
+	}
+}
+
+-(NSUInteger) count
+{
+	@throw [NSException exceptionWithName:@"UnsupportedOperationException" reason:@"Streams have no defined size" userInfo:nil];
+}
+
+-(id) LT:(NSInteger) k
+{
+	if (k == 0) {
+		return nil;
+	}
+	if (k < 0) {
+		return [self LB:-k];
+	}
+	if ((p + k - 1) >= eofElementIndex) {
+		return self.eof;
+	}
+	[self sync:k];
+	return [self objectAtIndex:(k - 1)];
+}
+
+-(id) LB:(NSInteger) k
+{
+	if (k == 1) {
+		return prevElement;
+	}
+	@throw [NoSuchElementException newException:@"can't look backwards more than one token in this stream"];
+}
+
+-(id) getCurrentSymbol
+{
+	return [self LT:1];
+}
+
+-(NSInteger) mark
+{
+	markDepth++;
+	lastMarker = p;
+	return lastMarker;
+}
+
+-(void) release:(NSInteger) marker
+{
+	// no resources to release
+}
+
+-(void) rewind:(NSInteger) marker
+{
+	markDepth--;
+	[self seek:marker];
+//    if (marker == 0) [self reset];
+}
+
+-(void) rewind
+{
+	[self seek:lastMarker];
+//    if (lastMarker == 0) [self reset];
+}
+
+-(void) seek:(NSInteger) anIndex
+{
+	p = anIndex;
+}
+
+- (id) getEof
+{
+    return eof;
+}
+
+- (void) setEof:(id) anID
+{
+    eof = anID;
+}
+
+- (NSInteger) getEofElementIndex
+{
+    return eofElementIndex;
+}
+
+- (void) setEofElementIndex:(NSInteger) anInt
+{
+    eofElementIndex = anInt;
+}
+
+- (NSInteger) getLastMarker
+{
+    return lastMarker;
+}
+
+- (void) setLastMarker:(NSInteger) anInt
+{
+    lastMarker = anInt;
+}
+
+- (NSInteger) getMarkDepthlastMarker
+{
+    return markDepth;
+}
+
+- (void) setMarkDepth:(NSInteger) anInt
+{
+    markDepth = anInt;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/Map.h b/runtime/ObjC/Framework/Map.h
new file mode 100644
index 0000000..1356814
--- /dev/null
+++ b/runtime/ObjC/Framework/Map.h
@@ -0,0 +1,82 @@
+//
+//  Map.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/9/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "PtrBuffer.h"
+#import "MapElement.h"
+
+//#define GLOBAL_SCOPE      0
+//#define LOCAL_SCOPE       1
+#define HASHSIZE            101
+#define HBUFSIZE            0x2000
+
+@interface Map : PtrBuffer {
+	//Map *fNext; // found in superclass
+    // TStringPool *fPool;
+    NSInteger lastHash;
+}
+
+//@property (copy) Map *fNext;
+@property (getter=getLastHash, setter=setLastHash:) NSInteger lastHash;
+
+// Contruction/Destruction
++ (id)newMap;
++ (id)newMapWithLen:(NSInteger)aHashSize;
+
+- (id)init;
+- (id)initWithLen:(NSInteger)cnt;
+- (void)dealloc;
+// Instance Methods
+- (NSInteger)count;
+- (NSInteger)length;
+- (NSInteger)size;
+/* clear -- reinitialize the maplist array */
+- (void) clear;
+/* form hash value for string s */
+-(NSInteger)hash:(NSString *)s;
+/*   look for s in ptrBuffer  */
+-(id)lookup:(NSString *)s;
+/* look for s in ptrBuffer  */
+-(id)install:(MapElement *)sym;
+/*
+ * delete entry from list
+ */
+- (void)deleteMap:(MapElement *)np;
+- (NSInteger)RemoveSym:(NSString *)s;
+- (void)delete_chain:(MapElement *)np;
+- (MapElement *)getTType:(NSString *)name;
+- (MapElement *)getName:(NSInteger)ttype;
+- (NSInteger)getNode:(MapElement *)aNode;
+- (void)putNode:(NSInteger)aTType Node:(id)aNode;
+- (void)putName:(NSString *)name TType:(NSInteger)ttype;
+- (void)putName:(NSString *)name Node:(id)aNode;
+
+@end
diff --git a/runtime/ObjC/Framework/Map.m b/runtime/ObjC/Framework/Map.m
new file mode 100644
index 0000000..a9dc88f
--- /dev/null
+++ b/runtime/ObjC/Framework/Map.m
@@ -0,0 +1,362 @@
+//
+//  Map.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/9/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#define SUCCESS (0)
+#define FAILURE (-1)
+
+#import "Map.h"
+#import "BaseTree.h"
+
+/*
+ * Start of Map
+ */
+@implementation Map
+
+@synthesize lastHash;
+
++(id)newMap
+{
+    return [[Map alloc] init];
+}
+
++(id)newMapWithLen:(NSInteger)aBuffSize
+{
+    return [[Map alloc] initWithLen:aBuffSize];
+}
+
+-(id)init
+{
+    NSInteger idx;
+    
+	self = [super initWithLen:HASHSIZE];
+    if ( self != nil ) {
+		fNext = nil;
+        for( idx = 0; idx < HASHSIZE; idx++ ) {
+            ptrBuffer[idx] = nil;
+        }
+	}
+    return( self );
+}
+
+-(id)initWithLen:(NSInteger)aBuffSize
+{
+	self = [super initWithLen:aBuffSize];
+    if ( self != nil ) {
+	}
+    return( self );
+}
+
+-(void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in MMap" );
+#endif
+    MapElement *tmp, *rtmp;
+    NSInteger idx;
+	
+    if ( self.fNext != nil ) {
+        for( idx = 0; idx < BuffSize; idx++ ) {
+            tmp = ptrBuffer[idx];
+            while ( tmp ) {
+                rtmp = tmp;
+                tmp = (MapElement *)tmp.fNext;
+                [rtmp release];
+            }
+        }
+    }
+	[super dealloc];
+}
+
+-(void)deleteMap:(MapElement *)np
+{
+    MapElement *tmp, *rtmp;
+    NSInteger idx;
+    
+    if ( self.fNext != nil ) {
+        for( idx = 0; idx < BuffSize; idx++ ) {
+            tmp = ptrBuffer[idx];
+            while ( tmp ) {
+                rtmp = tmp;
+                tmp = [tmp getfNext];
+                [rtmp release];
+            }
+        }
+    }
+}
+
+- (void)clear
+{
+    MapElement *tmp, *rtmp;
+    NSInteger idx;
+
+    for( idx = 0; idx < BuffSize; idx++ ) {
+        tmp = ptrBuffer[idx];
+        while ( tmp ) {
+            rtmp = tmp;
+            tmp = [tmp getfNext];
+            [rtmp dealloc];
+        }
+        ptrBuffer[idx] = nil;
+    }
+}
+
+- (NSInteger)count
+{
+    NSInteger aCnt = 0;
+    
+    for (int i = 0; i < BuffSize; i++) {
+        if (ptrBuffer[i] != nil) {
+            aCnt++;
+        }
+    }
+    return aCnt;
+}
+
+- (NSInteger)length
+{
+    return BuffSize;
+}
+
+- (NSInteger)size
+{
+    MapElement *anElement;
+    NSInteger aSize = 0;
+    
+    for (int i = 0; i < BuffSize; i++) {
+        if ((anElement = ptrBuffer[i]) != nil) {
+            aSize += (NSInteger)[anElement size];
+        }
+    }
+    return aSize;
+}
+                          
+#ifdef USERDOC
+/*
+ *  HASH        hash entry to get index to table
+ *  NSInteger hash( Map *self, char *s );
+ *
+ *     Inputs:  NSString *s         string to find
+ *
+ *     Returns: NSInteger                 hashed value
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(NSInteger)hash:(NSString *)s       /*    form hash value for string s */
+{
+	NSInteger hashval;
+	const char *tmp;
+    
+	tmp = [s cStringUsingEncoding:NSASCIIStringEncoding];
+	for( hashval = 0; *tmp != '\0'; )
+        hashval += *tmp++;
+	self->lastHash = hashval % HASHSIZE;
+	return( self->lastHash );
+}
+
+#ifdef USERDOC
+/*
+ *  LOOKUP  search hashed list for entry
+ *  MapElement *lookup:(NSString *)s;
+ *
+ *     Inputs:  NSString  *s       string to find
+ *
+ *     Returns: MapElement  *        pointer to entry
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(id)lookup:(NSString *)s
+{
+    MapElement *np;
+    
+    for( np = self->ptrBuffer[[self hash:s]]; np != nil; np = [np getfNext] ) {
+        if ( [s isEqualToString:[np getName]] ) {
+            return( np );        /*   found it       */
+        }
+    }
+    return( nil );              /*   not found      */
+}
+
+#ifdef USERDOC
+/*
+ *  INSTALL search hashed list for entry
+ *  NSInteger install( Map *self, MapElement *sym );
+ *
+ *     Inputs:  MapElement    *sym   -- symbol ptr to install
+ *              NSInteger         scope -- level to find
+ *
+ *     Returns: Boolean     TRUE   if installed
+ *                          FALSE  if already in table
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(MapElement *)install:(MapElement *)sym
+{
+    MapElement *np;
+    
+    np = [self lookup:[sym getName]];
+    if ( np == nil ) {
+        [sym setFNext:ptrBuffer[ lastHash ]];
+        ptrBuffer[ lastHash ] = sym;
+        [sym retain];
+        return( ptrBuffer[ lastHash ] );
+    }
+    return( nil );            /*   not found      */
+}
+
+#ifdef USERDOC
+/*
+ *  RemoveSym  search hashed list for entry
+ *  NSInteger RemoveSym( Map *self, char *s );
+ *
+ *     Inputs:  char     *s          string to find
+ *
+ *     Returns: NSInteger      indicator of SUCCESS OR FAILURE
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(NSInteger)RemoveSym:(NSString *)s
+{
+    MapElement *np, *tmp;
+    NSInteger idx;
+    
+    idx = [self hash:s];
+    for ( tmp = self->ptrBuffer[idx], np = self->ptrBuffer[idx]; np != nil; np = [np getfNext] ) {
+        if ( [s isEqualToString:[np getName]] ) {
+            tmp = [np getfNext];             /* get the next link  */
+            [np release];
+            return( SUCCESS );            /* report SUCCESS     */
+        }
+        tmp = [np getfNext];              //  BAD!!!!!!
+    }
+    return( FAILURE );                    /*   not found      */
+}
+
+-(void)delete_chain:(MapElement *)np
+{
+    if ( [np getfNext] != nil )
+		[self delete_chain:[np getfNext]];
+	[np release];
+}
+
+#ifdef DONTUSEYET
+-(NSInteger)bld_symtab:(KW_TABLE *)toknams
+{
+    NSInteger i;
+    MapElement *np;
+    
+    for( i = 0; *(toknams[i].name) != '\0'; i++ ) {
+        // install symbol in ptrBuffer
+        np = [MapElement newMapElement:[NSString stringWithFormat:@"%s", toknams[i].name]];
+        //        np->fType = toknams[i].toknum;
+        [self install:np Scope:0];
+    }
+    return( SUCCESS );
+}
+#endif
+
+/*
+ * works only for maplist indexed not by name but by TokenNumber
+ */
+- (MapElement *)getName:(NSInteger)ttype
+{
+    MapElement *np;
+    NSInteger aTType;
+
+    aTType = ttype % HASHSIZE;
+    for( np = self->ptrBuffer[ttype]; np != nil; np = [np getfNext] ) {
+        if ( [(ACNumber *)np.node integerValue] == ttype ) {
+            return( np );        /*   found it       */
+        }
+    }
+    return( nil );              /*   not found      */
+}
+
+- (NSInteger)getNode:(id<BaseTree>)aNode
+{
+    MapElement *np;
+    NSInteger idx;
+
+    idx = [(id<BaseTree>)aNode type];
+    idx %= HASHSIZE;
+    np = ptrBuffer[idx];
+    return( [(ACNumber *)np.node integerValue] );
+}
+
+- (MapElement *)getTType:(NSString *)name
+{
+    return [self lookup:name];
+}
+
+// create node and install node in ptrBuffer
+- (void)putName:(NSString *)name TType:(NSInteger)ttype
+{
+    MapElement *np;
+    
+    np = [MapElement newMapElementWithName:[NSString stringWithString:name] Type:ttype];
+    [self install:np];
+}
+
+// create node and install node in ptrBuffer
+- (void)putName:(NSString *)name Node:(id)aNode
+{
+    MapElement *np, *np1;
+    NSInteger idx;
+    
+    idx = [self hash:name];
+    np1 = [MapElement newMapElementWithName:[NSString stringWithString:name] Type:idx];
+    np = [self lookup:name];
+    if ( np == nil ) {
+        [np1 setFNext:self->ptrBuffer[ self->lastHash ]];
+        self->ptrBuffer[ self->lastHash ] = np1;
+        [np1 retain];
+    }
+    else {
+        // ptrBuffer[idx] = np;
+    }
+    return;
+}
+
+// create node and install node in ptrBuffer
+- (void)putNode:(NSInteger)aTType Node:(id)aNode
+{
+    MapElement *np;
+    NSInteger ttype;
+    
+    ttype = aTType % HASHSIZE;
+    np = [MapElement newMapElementWithNode:ttype Node:(id)aNode];
+    ptrBuffer[ttype] = np;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/MapElement.h b/runtime/ObjC/Framework/MapElement.h
new file mode 100644
index 0000000..fe8d0a3
--- /dev/null
+++ b/runtime/ObjC/Framework/MapElement.h
@@ -0,0 +1,66 @@
+//
+//  MapElement.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/8/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "ACNumber.h"
+#import "BaseMapElement.h"
+
+@interface MapElement : BaseMapElement {
+    NSString *name;
+    id        node;
+}
+@property (retain, getter=getName, setter=setName:) NSString *name;
+@property (retain, getter=getNode, setter=setNode:) id node;
+
++ (id) newMapElement;
++ (id) newMapElementWithName:(NSString *)aName Type:(NSInteger)aTType;
++ (id) newMapElementWithNode:(NSInteger)aTType Node:(id)aNode;
++ (id) newMapElementWithName:(NSString *)aName Node:(id)aNode;
++ (id) newMapElementWithObj1:(id)anObj1 Obj2:(id)anObj2;
+- (id) init;
+- (id) initWithName:(NSString *)aName Type:(NSInteger)aTType;
+- (id) initWithNode:(NSInteger)aTType Node:(id)aNode;
+- (id) initWithName:(NSString *)aName Node:(id)aNode;
+- (id) initWithObj1:(id)anObj1 Obj2:(id)anObj2;
+
+- (id) copyWithZone:(NSZone *)aZone;
+
+- (NSInteger) count;
+- (NSInteger) size;
+- (NSString *)getName;
+- (void)setName:(NSString *)aName;
+- (id)getNode;
+- (void)setNode:(id)aNode;
+- (void)putNode:(id)aNode;
+- (void)putNode:(id)aNode With:(NSInteger)uniqueID;
+//- (void)setObject:(id)aNode atIndex:anIndex;
+
+@end
diff --git a/runtime/ObjC/Framework/MapElement.m b/runtime/ObjC/Framework/MapElement.m
new file mode 100644
index 0000000..2b05224
--- /dev/null
+++ b/runtime/ObjC/Framework/MapElement.m
@@ -0,0 +1,207 @@
+//
+//  MapElement.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/8/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "MapElement.h"
+
+
+@implementation MapElement
+
+@synthesize name;
+@synthesize node;
+
++ (id) newMapElement
+{
+    return [[MapElement alloc] init];
+}
+
++ (id) newMapElementWithName:(NSString *)aName Type:(NSInteger)aTType
+{
+    return [[MapElement alloc] initWithName:aName Type:aTType];
+}
+
++ (id) newMapElementWithNode:(NSInteger)aTType Node:(id)aNode
+{
+    return [[MapElement alloc] initWithNode:aTType Node:aNode];
+}
+
++ (id) newMapElementWithName:(NSString *)aName Node:(id)aNode
+{
+    return [[MapElement alloc] initWithName:aName Node:aNode];
+}
+
++ (id) newMapElementWithObj1:(id)anObj1 Obj2:(id)anObj2
+{
+    return [[MapElement alloc] initWithObj1:anObj1 Obj2:anObj2];
+}
+
+- (id) init
+{
+    self = [super init];
+    if ( self != nil ) {
+        index = nil;
+        name  = nil;
+    }
+    return self;
+}
+
+- (id) initWithName:(NSString *)aName Type:(NSInteger)aTType
+{
+    self = [super init];
+    if ( self != nil ) {
+        index = [[ACNumber numberWithInteger: aTType] retain];
+        name  = [[NSString stringWithString:aName] retain];
+    }
+    return self;
+}
+
+- (id) initWithNode:(NSInteger)aTType Node:(id)aNode
+{
+    self = [super initWithAnIndex:[ACNumber numberWithInteger:aTType]];
+    if ( self != nil ) {
+        node  = aNode;
+        if ( node ) [node retain];
+    }
+    return self;
+}
+
+- (id) initWithName:(NSString *)aName Node:(id)aNode
+{
+    self = [super init];
+    if ( self != nil ) {
+        name  = [[NSString stringWithString:aName] retain];
+        node = aNode;
+        if ( node ) [node retain];
+    }
+    return self;
+}
+
+- (id) initWithObj1:(id)anIndex Obj2:(id)aNode
+{
+    self = [super initWithAnIndex:anIndex];
+    if ( self != nil ) {
+        node = aNode;
+        if ( node ) [node retain];
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in MapElement" );
+#endif
+    if ( name ) [name release];
+    if ( node ) [node release];
+    [super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    MapElement *copy;
+
+    copy = [super copyWithZone:aZone];
+    if (name) copy.name = name;
+    if (node) copy.node = node;
+    return( copy );
+}
+
+- (NSInteger) count
+{
+    NSInteger aCnt = 0;
+    if (name != nil) aCnt++;;
+    if (node != nil) aCnt++;;
+    return aCnt;
+}
+
+- (NSInteger)size
+{
+    NSInteger aSize = 0;
+    if ( name ) aSize += sizeof(id);
+    if ( node ) aSize += sizeof(id);
+    return aSize;
+}
+
+
+- (NSString *)getName
+{
+    return name;
+}
+
+- (void)setName:(NSString *)aName
+{
+    if ( aName != name ) {
+        if ( name ) [name release];
+        [aName retain];
+    }
+    name = aName;
+}
+
+- (id)getNode
+{
+    return node;
+}
+
+- (void)setNode:(id)aNode
+{   if ( aNode != node ) {
+        if ( node ) [node release];
+        [aNode retain];
+    }
+    node = aNode;
+}
+
+- (void)putNode:(id)aNode
+{
+    index = ((MapElement *)aNode).index;
+    if (((MapElement *)aNode).name) {
+        name = [((MapElement *)aNode).name retain];
+        node = nil;
+    }
+    if (((MapElement *)aNode).node) {
+        name = nil;
+        node = [((MapElement *)aNode).node retain];
+    }
+}
+
+- (void)putNode:(id)aNode With:(NSInteger)uniqueID
+{
+    index = ((MapElement *)aNode).index;
+    if (((MapElement *)aNode).name) {
+        name = [((MapElement *)aNode).name retain];
+        node = nil;
+    }
+    if (((MapElement *)aNode).node) {
+        name = nil;
+        node = [((MapElement *)aNode).node retain];
+    }
+}
+
+@end
diff --git a/runtime/ObjC/Framework/MismatchedNotSetException.h b/runtime/ObjC/Framework/MismatchedNotSetException.h
new file mode 100644
index 0000000..84453b4
--- /dev/null
+++ b/runtime/ObjC/Framework/MismatchedNotSetException.h
@@ -0,0 +1,51 @@
+//
+//  MismatchedNotSetException.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/13/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "RecognitionException.h"
+#import "ANTLRBitSet.h"
+
+@interface MismatchedNotSetException : RecognitionException
+{
+    NSString *expecting;
+}
+@property (retain, getter=getExpecting, setter=setExpecting:) NSString *expecting;
+
+- (MismatchedNotSetException *)newException;
+- (MismatchedNotSetException *)newException:(id<IntStream>)anInput
+                                                               Follow:(NSString *)expecting;
+
+- (id) init;
+- (id) initWithStream:(id<IntStream>)anInput Follow:(NSString *)expecting;
+
+- (NSString *)toString;
+
+@end
diff --git a/runtime/ObjC/Framework/MismatchedNotSetException.m b/runtime/ObjC/Framework/MismatchedNotSetException.m
new file mode 100644
index 0000000..069c0f6
--- /dev/null
+++ b/runtime/ObjC/Framework/MismatchedNotSetException.m
@@ -0,0 +1,69 @@
+//
+//  MismatchedNotSetException.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/13/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "MismatchedNotSetException.h"
+
+@implementation MismatchedNotSetException
+
+@synthesize expecting;
+
+- (MismatchedNotSetException *)newException
+{
+    return [[MismatchedNotSetException alloc] init];
+}
+
+- (MismatchedNotSetException *)newException:(id<IntStream>)anInput
+                                                               Follow:(NSString *)expected
+{
+    return [[MismatchedNotSetException alloc] initWithStream:anInput Follow:(NSString *)expected];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil ) {
+    }
+    return(self);
+}
+
+- (id) initWithStream:(id<IntStream>)anInput Follow:(NSString *)expected
+{
+    if ((self = [super initWithStream:anInput]) != nil ) {
+        expecting = expected;
+    }
+    return(self);
+}
+
+- (NSString *)toString
+{
+    return [NSString stringWithFormat:@"MismatchedNotSetException( %d != %@ )", [self unexpectedType], expecting];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/MismatchedRangeException.h b/runtime/ObjC/Framework/MismatchedRangeException.h
new file mode 100644
index 0000000..dc3425d
--- /dev/null
+++ b/runtime/ObjC/Framework/MismatchedRangeException.h
@@ -0,0 +1,42 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "RecognitionException.h"
+
+@protocol IntStream;
+
+
+@interface MismatchedRangeException : RecognitionException {
+	NSRange range;
+}
+
+@property (assign) NSRange range;
+
++ (id) newException:(NSRange) aRange stream:(id<IntStream>) theInput;
+- (id) initWithRange:(NSRange) aRange stream:(id<IntStream>) theInput;
+
+@end
diff --git a/runtime/ObjC/Framework/MismatchedRangeException.m b/runtime/ObjC/Framework/MismatchedRangeException.m
new file mode 100644
index 0000000..2fbad2b
--- /dev/null
+++ b/runtime/ObjC/Framework/MismatchedRangeException.m
@@ -0,0 +1,55 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import "MismatchedRangeException.h"
+
+
+@implementation MismatchedRangeException
+
+@synthesize range;
+
++ (id) newException:(NSRange) aRange stream:(id<IntStream>) theInput
+{
+	return [[MismatchedRangeException alloc] initWithRange:aRange stream:theInput];
+}
+
+- (id) initWithRange:(NSRange) aRange stream:(id<IntStream>) theInput
+{
+	if ((self = [super initWithStream:theInput]) != nil) {
+		range = aRange;
+	}
+	return self;
+}
+
+- (NSString *) description
+{
+	NSMutableString *desc = (NSMutableString *)[super description];
+	[desc appendFormat:@" range:%@", NSStringFromRange(range)];
+	return desc;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/MismatchedSetException.h b/runtime/ObjC/Framework/MismatchedSetException.h
new file mode 100644
index 0000000..980717b
--- /dev/null
+++ b/runtime/ObjC/Framework/MismatchedSetException.h
@@ -0,0 +1,44 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+#import "RecognitionException.h"
+
+@interface MismatchedSetException : RecognitionException {
+	NSString *expecting;
+}
+
+@property (retain, getter=getExpecting, setter=setExpecting:) NSString *expecting;
+
++ (id) newException:(NSString *) theExpectedSet stream:(id<IntStream>) theStream;
+- (id) initWithSet:(NSString *) theExpectedSet stream:(id<IntStream>) theStream;
+
+- (NSString *) getExpecting;
+- (void) setExpecting: (NSString *) anExpectedSet;
+
+
+@end
diff --git a/runtime/ObjC/Framework/MismatchedSetException.m b/runtime/ObjC/Framework/MismatchedSetException.m
new file mode 100644
index 0000000..243aaaa
--- /dev/null
+++ b/runtime/ObjC/Framework/MismatchedSetException.m
@@ -0,0 +1,79 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "MismatchedSetException.h"
+
+
+@implementation MismatchedSetException
+
+@synthesize expecting;
+
++ (id) newException:(NSString *) theExpectedSet stream:(id<IntStream>) theStream
+{
+	return [[MismatchedSetException alloc] initWithSet:theExpectedSet stream:theStream];
+}
+
+- (id) initWithSet:(NSString *) theExpectedSet stream:(id<IntStream>) theStream
+{
+	if ((self = [super initWithStream:theStream]) != nil) {
+		[self setExpecting:theExpectedSet];
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+	[self setExpecting:nil];
+	[super dealloc];
+}
+
+- (NSString *) description
+{
+	NSMutableString *desc =(NSMutableString *)[super description];
+	[desc appendFormat:@" set:%@", expecting];
+	return desc;
+}
+
+
+//---------------------------------------------------------- 
+//  expectedSet 
+//---------------------------------------------------------- 
+- (NSString *) getExpecting
+{
+    return expecting; 
+}
+
+- (void) setExpecting: (NSString *) anExpectedSet
+{
+    if ( expecting != anExpectedSet ) {
+        if ( expecting ) [expecting release];
+        [anExpectedSet retain];
+        expecting = anExpectedSet;
+    }
+}
+
+
+@end
diff --git a/runtime/ObjC/Framework/MismatchedTokenException.h b/runtime/ObjC/Framework/MismatchedTokenException.h
new file mode 100644
index 0000000..792aa4d
--- /dev/null
+++ b/runtime/ObjC/Framework/MismatchedTokenException.h
@@ -0,0 +1,58 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+#import "RecognitionException.h"
+#import "ANTLRBitSet.h"
+
+@protocol IntStream;
+
+@interface MismatchedTokenException : RecognitionException {
+	NSInteger expecting;
+	unichar expectingChar;
+	BOOL isTokenType;
+}
+
+@property (assign, getter=getExpecting, setter=setExpecting:) NSInteger expecting;
+@property (assign, getter=getExpectingChar, setter=setExpectingChar:) unichar expectingChar;
+@property (assign, getter=getIsTokenType, setter=setIsTokenType:) BOOL isTokenType;
+
++ (id) newException:(NSInteger)expectedTokenType Stream:(id<IntStream>)anInput;
++ (id) newExceptionMissing:(NSInteger)expectedTokenType
+                                        Stream:(id<IntStream>)anInput
+                                         Token:(id<Token>)inserted;
++ (id) newExceptionChar:(unichar)expectedCharacter Stream:(id<IntStream>)anInput;
++ (id) newExceptionStream:(id<IntStream>)anInput
+                                    Exception:(NSException *)e
+                                       Follow:(ANTLRBitSet *)follow;
+- (id) initWithTokenType:(NSInteger)expectedTokenType Stream:(id<IntStream>)anInput;
+-(id) initWithTokenType:(NSInteger)expectedTokenType
+                 Stream:(id<IntStream>)anInput
+                  Token:(id<Token>)inserted;
+- (id) initWithCharacter:(unichar)expectedCharacter Stream:(id<IntStream>)anInput;
+
+@end
diff --git a/runtime/ObjC/Framework/MismatchedTokenException.m b/runtime/ObjC/Framework/MismatchedTokenException.m
new file mode 100644
index 0000000..716ea73
--- /dev/null
+++ b/runtime/ObjC/Framework/MismatchedTokenException.m
@@ -0,0 +1,99 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "MismatchedTokenException.h"
+
+
+@implementation MismatchedTokenException
+
+@synthesize expecting;
+@synthesize expectingChar;
+@synthesize isTokenType;
+
+
++ (id) newException:(NSInteger)expectedTokenType Stream:(id<IntStream>)anInput
+{
+	return [[MismatchedTokenException alloc] initWithTokenType:expectedTokenType Stream:anInput];
+}
+
++ (id) newExceptionMissing:(NSInteger)expectedTokenType
+                                        Stream:(id<IntStream>)anInput
+                                         Token:(id<Token>)inserted
+{
+	return [[MismatchedTokenException alloc] initWithTokenType:expectedTokenType Stream:anInput Token:inserted];
+}
+
++ (id) newExceptionChar:(unichar) expectedCharacter Stream:(id<IntStream>)anInput
+{
+	return [[MismatchedTokenException alloc] initWithCharacter:expectedCharacter Stream:anInput];
+}
+
++ (id) newExceptionStream:(id<IntStream>)anInput Exception:(NSException *)e Follow:(ANTLRBitSet *) follow
+{
+	return [[MismatchedTokenException alloc] initWithStream:anInput];
+}
+
+-(id) initWithTokenType:(NSInteger)expectedTokenType Stream:(id<IntStream>)anInput
+{
+	if ((self = [super initWithStream:anInput]) != nil) {
+		expecting = expectedTokenType;
+		isTokenType = YES;
+	}
+	return self;
+}
+
+-(id) initWithTokenType:(NSInteger)expectedTokenType
+                 Stream:(id<IntStream>)anInput
+                  Token:(id<Token>)inserted
+{
+	if ((self = [super initWithStream:anInput]) != nil) {
+		expecting = expectedTokenType;
+		isTokenType = YES;
+	}
+	return self;
+}
+
+- (id) initWithCharacter:(unichar) expectedCharacter Stream:(id<IntStream>)anInput
+{
+	if ((self = [super initWithStream:anInput]) != nil) {
+		expectingChar = expectedCharacter;
+		isTokenType = NO;
+	}
+	return self;
+}
+
+- (NSString *) description
+{
+	NSMutableString *desc = (NSMutableString *)[super description];
+	if (isTokenType) {
+		[desc appendFormat:@" expected:%d got:%d", expecting, [self unexpectedType]];
+	} else {
+		[desc appendFormat:@" expected:%C got:%c", self.c, (unichar)expectingChar];
+	}
+	return desc;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/MismatchedTreeNodeException.h b/runtime/ObjC/Framework/MismatchedTreeNodeException.h
new file mode 100644
index 0000000..4cb1c91
--- /dev/null
+++ b/runtime/ObjC/Framework/MismatchedTreeNodeException.h
@@ -0,0 +1,42 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "RecognitionException.h"
+
+@protocol IntStream;
+
+@interface MismatchedTreeNodeException : RecognitionException {
+	NSInteger expecting;
+}
+
+@property (getter=getExpecting, setter=setExpecting:) NSInteger expecting;
+
++ (id) newException:(NSInteger)expectedTokenType Stream:(id<IntStream>)anInput;
+- (id) initWithTokenType:(NSInteger) expectedTokenType Stream:(id<IntStream>)anInput;
+
+
+@end
diff --git a/runtime/ObjC/Framework/MismatchedTreeNodeException.m b/runtime/ObjC/Framework/MismatchedTreeNodeException.m
new file mode 100644
index 0000000..55addc3
--- /dev/null
+++ b/runtime/ObjC/Framework/MismatchedTreeNodeException.m
@@ -0,0 +1,54 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "MismatchedTreeNodeException.h"
+
+
+@implementation MismatchedTreeNodeException
+
+@synthesize expecting;
+
++ (id) newException:(NSInteger)expectedTokenType Stream:(id<IntStream>)anInput
+{
+	return [[MismatchedTreeNodeException alloc] initWithTokenType:expectedTokenType Stream:anInput];
+}
+
+-(id) initWithTokenType:(NSInteger)expectedTokenType Stream:(id<IntStream>)anInput
+{
+	if ((self = [super initWithStream:anInput]) != nil) {
+		expecting = expectedTokenType;
+	}
+	return self;
+}
+
+- (NSString *) description
+{
+	NSMutableString *desc = (NSMutableString *)[super description];
+	[desc appendFormat:@" expected:%d got:%d", expecting, [self unexpectedType]];
+	return desc;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/MissingTokenException.h b/runtime/ObjC/Framework/MissingTokenException.h
new file mode 100644
index 0000000..1f94005
--- /dev/null
+++ b/runtime/ObjC/Framework/MissingTokenException.h
@@ -0,0 +1,52 @@
+//
+//  MissingTokenException.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/8/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "MismatchedTokenException.h"
+#import "Token.h"
+
+@interface MissingTokenException : MismatchedTokenException {
+    id<Token> inserted;
+}
+/** Used for remote debugger deserialization */
++ (id) newException;
++ (id) newException:(NSInteger)expected
+             Stream:(id<IntStream>)anInput
+               With:(id<Token>)insertedToken;
+- (id) init;
+- (id) init:(NSInteger)expected Stream:(id<IntStream>)anInput With:(id<Token>)insertedToken;
+
+- (NSInteger) getMissingType;
+
+- (NSString *)toString;
+
+@property (retain) id<Token> inserted;
+@end
diff --git a/runtime/ObjC/Framework/MissingTokenException.m b/runtime/ObjC/Framework/MissingTokenException.m
new file mode 100644
index 0000000..d2a7878
--- /dev/null
+++ b/runtime/ObjC/Framework/MissingTokenException.m
@@ -0,0 +1,83 @@
+//
+//  MissingTokenException.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/8/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "MissingTokenException.h"
+
+
+@implementation MissingTokenException
+/** Used for remote debugger deserialization */
++ (id) newException
+{
+    return [[MissingTokenException alloc] init];
+}
+
++ (id) newException:(NSInteger)expected
+             Stream:(id<IntStream>)anInput
+               With:(id<Token>)insertedToken
+{
+    return [[MissingTokenException alloc] init:expected Stream:anInput With:insertedToken];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil) {
+    }
+    return self;
+}
+
+- (id) init:(NSInteger)expected Stream:(id<IntStream>)anInput With:(id<Token>)insertedToken
+{
+    if ((self = [super initWithStream:anInput]) != nil) {
+        expecting = expected;
+        input = anInput;
+        inserted = insertedToken;
+    }
+    return self;
+}
+
+- (NSInteger) getMissingType
+{
+    return expecting;
+}
+
+- (NSString *)toString
+{
+    if ( inserted != nil && token != nil ) {
+        return [NSString stringWithFormat:@"MissingTokenException(inserted %@ at %@)", inserted, token.text];
+    }
+    if ( token!=nil ) {
+        return [NSString stringWithFormat:@"MissingTokenException(at %@)", token.text ];
+    }
+    return @"MissingTokenException";
+}
+
+@synthesize inserted;
+@end
diff --git a/runtime/ObjC/Framework/NoViableAltException.h b/runtime/ObjC/Framework/NoViableAltException.h
new file mode 100644
index 0000000..47fb1c9
--- /dev/null
+++ b/runtime/ObjC/Framework/NoViableAltException.h
@@ -0,0 +1,45 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "RecognitionException.h"
+#import "IntStream.h"
+
+@interface NoViableAltException : RecognitionException {
+	int decisionNumber;
+	int stateNumber;
+}
+
++ (NoViableAltException *) newException:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<IntStream>)theStream;
+- (NoViableAltException *) initWithDecision:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<IntStream>)theStream;
+
+- (void)setDecisionNumber:(NSInteger)decisionNumber;
+- (void)setStateNumber:(NSInteger)stateNumber;
+
+
+@property (getter=decisionNumber,setter=setDecisionNumber:) NSInteger decisionNumber;
+@property (getter=stateNumber,setter=setStateNumber:) NSInteger stateNumber;
+@end
diff --git a/runtime/ObjC/Framework/NoViableAltException.m b/runtime/ObjC/Framework/NoViableAltException.m
new file mode 100644
index 0000000..5f64b0b
--- /dev/null
+++ b/runtime/ObjC/Framework/NoViableAltException.m
@@ -0,0 +1,83 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import "NoViableAltException.h"
+
+
+@implementation NoViableAltException
+
+
++ (NoViableAltException *) newException:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<IntStream>)theStream
+{
+	return [[self alloc] initWithDecision:theDecisionNumber state:theStateNumber stream:theStream];
+}
+
+
+- (NoViableAltException *) initWithDecision:(NSInteger)theDecisionNumber state:(NSInteger)theStateNumber stream:(id<IntStream>)theStream
+{
+	if ((self = [super initWithStream:theStream]) != nil) {
+		decisionNumber = theDecisionNumber;
+		stateNumber = theStateNumber;
+	}
+	return self;
+}
+
+- (NSString *) description
+{
+	NSMutableString *desc = (NSMutableString *)[super description];
+	[desc appendFormat:@" decision:%d state:%d", decisionNumber, stateNumber];
+	return desc;
+}
+
+//---------------------------------------------------------- 
+//  decisionNumber 
+//---------------------------------------------------------- 
+- (NSInteger) decisionNumber
+{
+    return decisionNumber;
+}
+
+- (void) setDecisionNumber: (NSInteger) aDecisionNumber
+{
+    decisionNumber = aDecisionNumber;
+}
+
+//---------------------------------------------------------- 
+//  stateNumber 
+//---------------------------------------------------------- 
+- (NSInteger) stateNumber
+{
+    return stateNumber;
+}
+
+- (void) setStateNumber: (NSInteger) aStateNumber
+{
+    stateNumber = aStateNumber;
+}
+
+
+@end
diff --git a/runtime/ObjC/Framework/NodeMapElement.h b/runtime/ObjC/Framework/NodeMapElement.h
new file mode 100644
index 0000000..523e457
--- /dev/null
+++ b/runtime/ObjC/Framework/NodeMapElement.h
@@ -0,0 +1,56 @@
+//
+//  RuleMapElement.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/16/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "BaseMapElement.h"
+#import "BaseTree.h"
+
+@interface NodeMapElement : BaseMapElement {
+    id<BaseTree> node;
+}
+
+@property (retain, getter=getNode, setter=setNode:) id<BaseTree> node;
+
++ (void)initialize;
+
++ (id) newNodeMapElement;
++ (id) newNodeMapElementWithIndex:(id)anIndex Node:(id<BaseTree>)aNode;
+- (id) init;
+- (id) initWithAnIndex:(id)anIndex Node:(id)aNode;
+
+- (id) copyWithZone:(NSZone *)aZone;
+
+- (id<BaseTree>)getNode;
+- (void)setNode:(id<BaseTree>)aNode;
+
+- (NSInteger)size;
+
+@end
diff --git a/runtime/ObjC/Framework/NodeMapElement.m b/runtime/ObjC/Framework/NodeMapElement.m
new file mode 100644
index 0000000..64c9fc0
--- /dev/null
+++ b/runtime/ObjC/Framework/NodeMapElement.m
@@ -0,0 +1,108 @@
+//
+//  NodeMapElement.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/16/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "NodeMapElement.h"
+
+static NSInteger _aUniqueID;
+
+@implementation NodeMapElement
+
+@synthesize node;
+
++ (void)initialize
+{
+    _aUniqueID = 0;
+}
+
++ (NodeMapElement *)newNodeMapElement
+{
+    return [[NodeMapElement alloc] init];
+}
+
++ (NodeMapElement *)newNodeMapElementWithIndex:(id)anIndex Node:(id<BaseTree>)aNode
+{
+    return [[NodeMapElement alloc] initWithAnIndex:anIndex Node:aNode];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil ) {
+        index = nil;
+        node = nil;
+    }
+    return (self);
+}
+
+- (id) initWithAnIndex:(id)anIndex Node:(id)aNode
+{
+    self = [super initWithAnIndex:anIndex];
+    if ( self ) {
+        if ( aNode != node ) {
+            if ( node ) [node release];
+            [aNode retain];
+        }
+        node = aNode;
+    }
+    return (self);
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    NodeMapElement *copy;
+    
+    copy = [super copyWithZone:aZone];
+    copy.node = node;
+    return( copy );
+}
+
+- (id<BaseTree>)getNode
+{
+    return node;
+}
+
+- (void)setNode:(id<BaseTree>)aNode
+{
+    if ( aNode != node ) {
+        if ( node ) [node release];
+        [aNode retain];
+    }
+    node = aNode;
+}
+
+- (NSInteger)size
+{
+    NSInteger aSize = 0;
+    if (node != nil) aSize += sizeof(id);
+    if (index != nil) aSize += sizeof(id);
+    return( aSize );
+}
+
+@end
diff --git a/runtime/ObjC/Framework/ParseTree.h b/runtime/ObjC/Framework/ParseTree.h
new file mode 100644
index 0000000..3f23287
--- /dev/null
+++ b/runtime/ObjC/Framework/ParseTree.h
@@ -0,0 +1,64 @@
+//
+//  ParseTree.h
+//  ANTLR
+//
+//  Created by Alan Condit on 7/12/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "BaseTree.h"
+#import "CommonToken.h"
+#import "AMutableArray.h"
+
+@interface ParseTree : BaseTree <BaseTree> {
+	__strong id<Token> payload;
+	__strong AMutableArray *hiddenTokens;
+}
+/** A record of the rules used to match a token sequence.  The tokens
+ *  end up as the leaves of this tree and rule nodes are the interior nodes.
+ *  This really adds no functionality, it is just an alias for CommonTree
+ *  that is more meaningful (specific) and holds a String to display for a node.
+ */
++ (id<BaseTree>)newParseTree:(id<Token>)label;
+- (id)initWithLabel:(id<Token>)label;
+
+- (id<BaseTree>)dupNode;
+- (NSInteger)type;
+- (NSString *)text;
+- (NSInteger)getTokenStartIndex;
+- (void)setTokenStartIndex:(NSInteger)index;
+- (NSInteger)getTokenStopIndex;
+- (void)setTokenStopIndex:(NSInteger)index;
+- (NSString *)description;
+- (NSString *)toString;
+- (NSString *)toStringWithHiddenTokens;
+- (NSString *)toInputString;
+- (void)_toStringLeaves:(NSMutableString *)buf;
+
+@property (retain) id<Token> payload;
+@property (retain) AMutableArray *hiddenTokens;
+@end
diff --git a/runtime/ObjC/Framework/ParseTree.m b/runtime/ObjC/Framework/ParseTree.m
new file mode 100644
index 0000000..8a33972
--- /dev/null
+++ b/runtime/ObjC/Framework/ParseTree.m
@@ -0,0 +1,149 @@
+//
+//  ParseTree.m
+//  ANTLR
+//
+//  Created by Alan Condit on 7/12/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "ParseTree.h"
+
+/** A record of the rules used to match a token sequence.  The tokens
+ *  end up as the leaves of this tree and rule nodes are the interior nodes.
+ *  This really adds no functionality, it is just an alias for CommonTree
+ *  that is more meaningful (specific) and holds a String to display for a node.
+ */
+@implementation ParseTree
++ (ParseTree *)newParseTree:(id<Token>)label
+{
+    return [[ParseTree alloc] initWithLabel:label];
+}
+    
+- (id)initWithLabel:(id<Token>)label
+{
+    self = [super init];
+    if ( self != nil) {
+        payload = [label retain];
+    }
+    return self;
+}
+
+- (id<BaseTree>)dupNode
+{
+    return nil;
+}
+    
+- (NSInteger)type
+{
+    return 0;
+}
+    
+- (NSString *)text
+{
+    return [self toString];
+}
+    
+- (NSInteger)getTokenStartIndex
+{
+    return 0;
+}
+    
+- (void)setTokenStartIndex:(NSInteger)anIndex
+{
+}
+    
+- (NSInteger)getTokenStopIndex
+{
+    return 0;
+}
+    
+- (void)setTokenStopIndex:(NSInteger)anIndex
+{
+}
+
+- (NSString *)description
+{
+    if ( [payload isKindOfClass:[CommonToken class]] ) {
+        id<Token> t = (id<Token>)payload;
+        if ( t.type == TokenTypeEOF ) {
+            return @"<EOF>";
+        }
+        return [t text];
+    }
+    return [payload description];
+}
+    
+- (NSString *)toString
+{
+    return [self description];
+}
+    
+/** Emit a token and all hidden nodes before.  EOF node holds all
+ *  hidden tokens after last real token.
+ */
+- (NSString *)toStringWithHiddenTokens
+{
+    NSMutableString *buf = [NSMutableString stringWithCapacity:25];
+    if ( hiddenTokens!=nil ) {
+        for (NSUInteger i = 0; i < [hiddenTokens count]; i++) {
+            id<Token>  hidden = (id<Token> ) [hiddenTokens objectAtIndex:i];
+            [buf appendString:[hidden text]];
+        }
+    }
+    NSString *nodeText = [self toString];
+    if ( ![nodeText isEqualTo:@"<EOF>"] )
+        [buf appendString:nodeText];
+    return buf;
+}
+    
+/** Print out the leaves of this tree, which means printing original
+ *  input back out.
+ */
+- (NSString *)toInputString
+{
+    NSMutableString *buf = [NSMutableString stringWithCapacity:25];
+    [self _toStringLeaves:buf];
+    return buf;
+}
+    
+- (void)_toStringLeaves:(NSMutableString *)buf
+{
+    if ( [payload isKindOfClass:[CommonToken class]] ) { // leaf node token?
+        [buf appendString:[self toStringWithHiddenTokens]];
+        return;
+    }
+    for (int i = 0; children!=nil && i < [children count]; i++) {
+        ParseTree *t = (ParseTree *) [children objectAtIndex:i];
+        [t _toStringLeaves:buf];
+    }
+}
+    
+@synthesize payload;
+@synthesize hiddenTokens;
+//@synthesize children;
+//@synthesize anException;
+
+@end
diff --git a/runtime/ObjC/Framework/Parser.h b/runtime/ObjC/Framework/Parser.h
new file mode 100644
index 0000000..38723d9
--- /dev/null
+++ b/runtime/ObjC/Framework/Parser.h
@@ -0,0 +1,59 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+#import "BaseRecognizer.h"
+#import "CommonToken.h"
+#import "TokenStream.h"
+
+@interface Parser : BaseRecognizer {
+	id<TokenStream> input;
+}
++ (Parser *)newParser:(id<TokenStream>)anInput;
++ (Parser *)newParser:(id<TokenStream>)anInput State:(RecognizerSharedState *)aState;
+
+- (id) initWithTokenStream:(id<TokenStream>)theStream;
+- (id) initWithTokenStream:(id<TokenStream>)theStream State:(RecognizerSharedState *)aState;
+
+- (id<TokenStream>) input;
+- (void) setInput: (id<TokenStream>) anInput;
+
+- (void) reset;
+
+- (id) getCurrentInputSymbol:(id<TokenStream>)anInput;
+- (CommonToken *)getMissingSymbol:(id<TokenStream>)input
+                             Exception:(RecognitionException *)e
+                                 TType:(NSInteger)expectedTokenType
+                                BitSet:(ANTLRBitSet *)follow;
+- (void) setTokenStream:(id<TokenStream>)anInput;
+- (id<TokenStream>)getTokenStream;
+- (NSString *)getSourceName;
+
+- (void) traceIn:(NSString *)ruleName Index:(int)ruleIndex;
+- (void) traceOut:(NSString *)ruleName Index:(NSInteger) ruleIndex;
+
+@end
diff --git a/runtime/ObjC/Framework/Parser.m b/runtime/ObjC/Framework/Parser.m
new file mode 100644
index 0000000..baa214c
--- /dev/null
+++ b/runtime/ObjC/Framework/Parser.m
@@ -0,0 +1,148 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "Parser.h"
+
+
+@implementation Parser
+
++ (Parser *)newParser:(id<TokenStream>)anInput
+{
+    return [[Parser alloc] initWithTokenStream:anInput];
+}
+
++ (Parser *)newParser:(id<TokenStream>)anInput State:(RecognizerSharedState *)aState
+{
+    return [[Parser alloc] initWithTokenStream:anInput State:aState];
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)theStream
+{
+    if ((self = [super init]) != nil) {
+        [self setInput:theStream];
+    }
+    return self;
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)theStream State:(RecognizerSharedState *)aState
+{
+    if ((self = [super initWithState:aState]) != nil) {
+        [self setInput:theStream];
+    }
+    return self;
+}
+
+- (void) reset
+{
+    [super reset]; // reset all recognizer state variables
+    if ( input!=nil ) {
+        [input seek:0]; // rewind the input
+    }
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in Parser" );
+#endif
+    [input release];
+	[super dealloc];
+}
+
+//---------------------------------------------------------- 
+//  input 
+//---------------------------------------------------------- 
+- (id<TokenStream>) input
+{
+    return input; 
+}
+
+- (void) setInput: (id<TokenStream>) anInput
+{
+    if (input != anInput) {
+        if ( input ) [input release];
+        [anInput retain];
+    }
+    input = anInput;
+}
+
+- (id) getCurrentInputSymbol:(id<TokenStream>)anInput
+{
+    state.token = [input LT:1];
+    return state.token;
+}
+
+- (CommonToken *)getMissingSymbol:(id<TokenStream>)anInput
+                             Exception:(RecognitionException *)e
+                                 TType:(NSInteger)expectedTokenType
+                                BitSet:(ANTLRBitSet *)follow
+{
+    NSString *tokenText = nil;
+    if ( expectedTokenType == TokenTypeEOF )
+        tokenText = @"<missing EOF>";
+    else
+        tokenText = [NSString stringWithFormat:@"<missing %@>\n",[[BaseRecognizer getTokenNames] objectAtIndex:expectedTokenType]];
+    CommonToken *t = [[CommonToken newToken:expectedTokenType Text:tokenText] retain];
+    CommonToken *current = [anInput LT:1];
+    if ( current.type == TokenTypeEOF ) {
+        current = [anInput LT:-1];
+    }
+    t.line = current.line;
+    t.charPositionInLine = current.charPositionInLine;
+    t.channel = TokenChannelDefault;
+    t.input = current.input;
+    return t;
+}
+
+/** Set the token stream and reset the parser */
+- (void) setTokenStream:(id<TokenStream>)anInput
+{
+    input = nil;
+    [self reset];
+    input = anInput;
+}
+
+- (id<TokenStream>)getTokenStream
+{
+    return input;
+}
+
+- (NSString *)getSourceName
+{
+    return [input getSourceName];
+}
+
+- (void) traceIn:(NSString *)ruleName Index:(int)ruleIndex
+{
+    [super traceIn:ruleName Index:ruleIndex Object:[input LT:1]];
+}
+
+- (void) traceOut:(NSString *)ruleName Index:(NSInteger) ruleIndex
+{
+    [super traceOut:ruleName Index:ruleIndex Object:[input LT:1]];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/ParserRuleReturnScope.h b/runtime/ObjC/Framework/ParserRuleReturnScope.h
new file mode 100644
index 0000000..4691074
--- /dev/null
+++ b/runtime/ObjC/Framework/ParserRuleReturnScope.h
@@ -0,0 +1,46 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "Token.h"
+#import "RuleReturnScope.h"
+
+@interface ParserRuleReturnScope : RuleReturnScope {
+	id<Token> start;
+	id<Token> stopToken;
+}
+@property (retain, getter=getStart, setter=setStart:) id<Token> start;
+@property (retain, getter=getStop, setter=setStop:)   id<Token> stopToken;
+
+- (id<Token>) getStart;
+- (void) setStart: (id<Token>) aStart;
+
+- (id<Token>) getStop;
+- (void) setStop: (id<Token>) aStop;
+
+- (id) copyWithZone:(NSZone *)theZone;
+
+@end
diff --git a/runtime/ObjC/Framework/ParserRuleReturnScope.m b/runtime/ObjC/Framework/ParserRuleReturnScope.m
new file mode 100644
index 0000000..de71a81
--- /dev/null
+++ b/runtime/ObjC/Framework/ParserRuleReturnScope.m
@@ -0,0 +1,80 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "ParserRuleReturnScope.h"
+
+
+@implementation ParserRuleReturnScope
+
+@synthesize start;
+@synthesize stopToken;
+
+- (void) dealloc
+{
+    [self setStart:nil];
+    [self setStop:nil];
+    [super dealloc];
+}
+
+- (id<Token>) getStart
+{
+    return start; 
+}
+
+- (void) setStart: (id<Token>) aStart
+{
+    if (start != aStart) {
+        [aStart retain];
+        if ( start ) [start release];
+        start = aStart;
+    }
+}
+
+- (id<Token>) getStop
+{
+    return stopToken; 
+}
+
+- (void) setStop: (id<Token>) aStop
+{
+    if (stopToken != aStop) {
+        [aStop retain];
+        if ( stopToken ) [stopToken release];
+        stopToken = aStop;
+    }
+}
+
+// create a copy, including the text if available
+// the input stream is *not* copied!
+- (id) copyWithZone:(NSZone *)theZone
+{
+    ParserRuleReturnScope *copy = [super copyWithZone:theZone];
+    copy.start = start;
+    copy.stopToken = stopToken;
+    return copy;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/PtrBuffer.h b/runtime/ObjC/Framework/PtrBuffer.h
new file mode 100644
index 0000000..bc49e88
--- /dev/null
+++ b/runtime/ObjC/Framework/PtrBuffer.h
@@ -0,0 +1,93 @@
+//
+//  PtrBuffer.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/9/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "LinkBase.h"
+
+//#define GLOBAL_SCOPE       0
+//#define LOCAL_SCOPE        1
+#define BUFFSIZE         101
+
+@interface PtrBuffer : LinkBase {
+    NSUInteger BuffSize;
+    NSUInteger count;
+    NSUInteger ptr;
+    __strong NSMutableData *buffer;
+    __strong id *ptrBuffer;
+}
+
+@property (getter=getBuffSize, setter=setBuffSize:) NSUInteger BuffSize;
+@property (getter=getCount, setter=setCount:) NSUInteger count;
+@property (getter=getPtr, setter=setPtr:) NSUInteger ptr;
+@property (retain, getter=getBuffer, setter=setBuffer:) NSMutableData *buffer;
+@property (assign, getter=getPtrBuffer, setter=setPtrBuffer:) id *ptrBuffer;
+
+// Contruction/Destruction
++(PtrBuffer *)newPtrBuffer;
++(PtrBuffer *)newPtrBufferWithLen:(NSInteger)cnt;
+-(id)init;
+-(id)initWithLen:(NSUInteger)cnt;
+-(void)dealloc;
+
+// Instance Methods
+- (id) copyWithZone:(NSZone *)aZone;
+/* clear -- reinitialize the maplist array */
+- (void) clear;
+
+- (NSUInteger)count;
+- (NSUInteger)length;
+- (NSUInteger)size;
+
+- (NSMutableData *)getBuffer;
+- (void)setBuffer:(NSMutableData *)np;
+- (NSUInteger)getCount;
+- (void)setCount:(NSUInteger)aCount;
+- (id *)getPtrBuffer;
+- (void)setPtrBuffer:(id *)np;
+- (NSUInteger)getPtr;
+- (void)setPtr:(NSUInteger)np;
+
+- (void) push:(id) v;
+- (id) pop;
+- (id) peek;
+
+- (void) addObject:(id) v;
+- (void) addObjectsFromArray:(PtrBuffer *)anArray;
+- (void) insertObject:(id)aRule atIndex:(NSUInteger)idx;
+- (id)   objectAtIndex:(NSUInteger)idx;
+- (void) removeAllObjects;
+- (void)removeObjectAtIndex:(NSInteger)idx;
+
+- (void) ensureCapacity:(NSUInteger) index;
+- (NSString *) description;
+- (NSString *) toString;
+
+@end
diff --git a/runtime/ObjC/Framework/PtrBuffer.m b/runtime/ObjC/Framework/PtrBuffer.m
new file mode 100644
index 0000000..c1fc4cb
--- /dev/null
+++ b/runtime/ObjC/Framework/PtrBuffer.m
@@ -0,0 +1,353 @@
+//
+//  PtrBuffer.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/9/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#define SUCCESS (0)
+#define FAILURE (-1)
+
+#import "PtrBuffer.h"
+#import "Tree.h"
+
+/*
+ * Start of PtrBuffer
+ */
+@implementation PtrBuffer
+
+@synthesize BuffSize;
+@synthesize buffer;
+@synthesize ptrBuffer;
+@synthesize count;
+@synthesize ptr;
+
++(PtrBuffer *)newPtrBuffer
+{
+    return [[PtrBuffer alloc] init];
+}
+
++(PtrBuffer *)newPtrBufferWithLen:(NSInteger)cnt
+{
+    return [[PtrBuffer alloc] initWithLen:cnt];
+}
+
+-(id)init
+{
+    NSUInteger idx;
+    
+    self = [super init];
+    if ( self != nil ) {
+        BuffSize  = BUFFSIZE;
+        ptr = 0;
+        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize * sizeof(id)] retain];
+        ptrBuffer = (id *) [buffer mutableBytes];
+        for( idx = 0; idx < BuffSize; idx++ ) {
+            ptrBuffer[idx] = nil;
+        }
+        count = 0;
+    }
+    return( self );
+}
+
+-(id)initWithLen:(NSUInteger)cnt
+{
+    NSUInteger idx;
+    
+    self = [super init];
+    if ( self != nil ) {
+        BuffSize  = cnt;
+        ptr = 0;
+        buffer = [[NSMutableData dataWithLength:(NSUInteger)BuffSize * sizeof(id)] retain];
+        ptrBuffer = (id *)[buffer mutableBytes];
+        for( idx = 0; idx < BuffSize; idx++ ) {
+            ptrBuffer[idx] = nil;
+        }
+        count = 0;
+    }
+    return( self );
+}
+
+-(void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in PtrBuffer" );
+#endif
+    LinkBase *tmp, *rtmp;
+    NSInteger idx;
+    
+    if ( self.fNext != nil ) {
+        for( idx = 0; idx < BuffSize; idx++ ) {
+            tmp = ptrBuffer[idx];
+            while ( tmp ) {
+                rtmp = tmp;
+                if ([tmp isKindOfClass:[LinkBase class]])
+                    tmp = (id)tmp.fNext;
+                else
+                    tmp = nil;
+                [rtmp release];
+            }
+        }
+    }
+    [buffer release];
+    [super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    PtrBuffer *copy;
+    
+    copy = [[[self class] allocWithZone:aZone] init];
+    if ( buffer )
+        copy.buffer = [buffer copyWithZone:aZone];
+    copy.ptrBuffer = ptrBuffer;
+    copy.ptr = ptr;
+    return copy;
+}
+
+- (void)clear
+{
+    LinkBase *tmp, *rtmp;
+    NSInteger idx;
+
+    for( idx = 0; idx < BuffSize; idx++ ) {
+        tmp = ptrBuffer[idx];
+        while ( tmp ) {
+            rtmp = tmp;
+            if ([tmp isKindOfClass:[LinkBase class]])
+                tmp = (id)tmp.fNext;
+            else
+                tmp = nil;
+            [rtmp dealloc];
+        }
+        ptrBuffer[idx] = nil;
+    }
+    count = 0;
+}
+
+- (NSMutableData *)getBuffer
+{
+    return( buffer );
+}
+
+- (void)setBuffer:(NSMutableData *)np
+{
+    buffer = np;
+}
+
+- (NSUInteger)getCount
+{
+    return( count );
+}
+
+- (void)setCount:(NSUInteger)aCount
+{
+    count = aCount;
+}
+
+- (id *)getPtrBuffer
+{
+    return( ptrBuffer );
+}
+
+- (void)setPtrBuffer:(id *)np
+{
+    ptrBuffer = np;
+}
+
+- (NSUInteger)getPtr
+{
+    return( ptr );
+}
+
+- (void)setPtr:(NSUInteger)aPtr
+{
+    ptr = aPtr;
+}
+
+- (void) addObject:(id) v
+{
+    [self ensureCapacity:ptr];
+    if ( v ) [v retain];
+    ptrBuffer[ptr++] = v;
+    count++;
+}
+
+- (void) push:(id) v
+{
+    if ( ptr >= BuffSize - 1 ) {
+        [self ensureCapacity:ptr];
+    }
+    if ( v ) [v retain];
+    ptrBuffer[ptr++] = v;
+    count++;
+}
+
+- (id) pop
+{
+    id v = nil;
+    if ( ptr > 0 ) {
+        v = ptrBuffer[--ptr];
+        ptrBuffer[ptr] = nil;
+    }
+    count--;
+    if ( v ) [v release];
+    return v;
+}
+
+- (id) peek
+{
+    id v = nil;
+    if ( ptr > 0 ) {
+        v = ptrBuffer[ptr-1];
+    }
+    return v;
+}
+
+- (NSUInteger)count
+{
+#ifdef DONTUSENOMO
+    int cnt = 0;
+    
+    for (NSInteger i = 0; i < BuffSize; i++ ) {
+        if ( ptrBuffer[i] != nil ) {
+            cnt++;
+        }
+    }
+    if ( cnt != count ) count = cnt;
+#endif
+    return count;
+}
+
+- (NSUInteger)length
+{
+    return BuffSize;
+}
+
+- (NSUInteger)size
+{
+    NSUInteger aSize = 0;
+    for (int i = 0; i < BuffSize; i++ ) {
+        if (ptrBuffer[i] != nil) {
+            aSize += sizeof(id);
+        }
+    }
+    return aSize;
+}
+
+- (void) insertObject:(id)aRule atIndex:(NSUInteger)idx
+{
+    if ( idx >= BuffSize ) {
+        [self ensureCapacity:idx];
+    }
+    if ( aRule != ptrBuffer[idx] ) {
+        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
+        if ( aRule ) [aRule retain];
+    }
+    ptrBuffer[idx] = aRule;
+    count++;
+}
+
+- (id)objectAtIndex:(NSUInteger)idx
+{
+    if ( idx < BuffSize ) {
+        return ptrBuffer[idx];
+    }
+    return nil;
+}
+
+- (void)addObjectsFromArray:(PtrBuffer *)anArray
+{
+    NSInteger cnt, i;
+    cnt = [anArray count];
+    for( i = 0; i < cnt; i++) {
+        id tmp = [anArray objectAtIndex:i];
+        if ( tmp ) [tmp retain];
+        [self insertObject:tmp atIndex:i];
+    }
+    count += cnt;
+    return;
+}
+
+- (void)removeAllObjects
+{
+    int i;
+    for ( i = 0; i < BuffSize; i++ ) {
+        if ( ptrBuffer[i] ) [ptrBuffer[i] release];
+        ptrBuffer[i] = nil;
+    }
+    count = 0;
+    ptr = 0;
+}
+
+- (void)removeObjectAtIndex:(NSInteger)idx
+{
+    int i;
+    if ( idx >= 0 && idx < count ) {
+        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
+        for ( i = idx; i < count-1; i++ ) {
+            ptrBuffer[i] = ptrBuffer[i+1];
+        }
+        ptrBuffer[i] = nil;
+        count--;
+    }
+}
+
+- (void) ensureCapacity:(NSUInteger) anIndex
+{
+    if ((anIndex * sizeof(id)) >= [buffer length])
+    {
+        NSInteger newSize = ([buffer length] / sizeof(id)) * 2;
+        if (anIndex > newSize) {
+            newSize = anIndex + 1;
+        }
+        BuffSize = newSize;
+        [buffer setLength:(BuffSize * sizeof(id))];
+        ptrBuffer = [buffer mutableBytes];
+    }
+}
+
+- (NSString *) description
+{
+    NSMutableString *str;
+    NSInteger idx, cnt;
+    cnt = [self count];
+    str = [NSMutableString stringWithCapacity:30];
+    [str appendString:@"["];
+    for (idx = 0; idx < cnt; idx++ ) {
+        [str appendString:[[self objectAtIndex:idx] description]];
+    }
+    [str appendString:@"]"];
+    return str;
+}
+
+- (NSString *) toString
+{
+    return [self description];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/PtrStack.h b/runtime/ObjC/Framework/PtrStack.h
new file mode 100644
index 0000000..cb4e7e3
--- /dev/null
+++ b/runtime/ObjC/Framework/PtrStack.h
@@ -0,0 +1,52 @@
+//
+//  PtrStack.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/9/10.
+//  Copyright 2010 Alan's MachineWorks. All rights reserved.
+//ptrBuffer
+
+#import <Foundation/Foundation.h>
+#import "ACNumber.h"
+#import "BaseStack.h"
+#import "RuleMemo.h"
+
+//#define GLOBAL_SCOPE       0
+//#define LOCAL_SCOPE        1
+#define HASHSIZE         101
+#define HBUFSIZE      0x2000
+
+@interface PtrStack : BaseStack {
+	//PtrStack *fNext;
+    // TStringPool *fPool;
+}
+
+//@property (copy) PtrStack *fNext;
+//@property (copy) TStringPool *fPool;
+
+// Contruction/Destruction
++ (PtrStack *)newPtrStack;
++ (PtrStack *)newPtrStack:(NSInteger)cnt;
+- (id)init;
+- (id)initWithLen:(NSInteger)aLen;
+- (void)dealloc;
+
+// Instance Methods
+- (id) copyWithZone:(NSZone *)aZone;
+/* clear -- reinitialize the maplist array */
+
+#ifdef DONTUSENOMO
+/* form hash value for string s */
+- (NSInteger)hash:(NSString *)s;
+/*   look for s in ptrBuffer  */
+- (id)lookup:(NSString *)s;
+/* look for s in ptrBuffer  */
+- (id)install:(id)sym;
+#endif
+
+#ifdef DONTUSENOMO
+- (id)getTType:(NSString *)name;
+- (id)getName:(NSInteger)ttype;
+#endif
+
+@end
diff --git a/runtime/ObjC/Framework/PtrStack.m b/runtime/ObjC/Framework/PtrStack.m
new file mode 100644
index 0000000..260e5c6
--- /dev/null
+++ b/runtime/ObjC/Framework/PtrStack.m
@@ -0,0 +1,191 @@
+//
+//  PtrStack.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/9/10.
+//  Copyright 2010 Alan's MachineWorks. All rights reserved.
+//
+#define SUCCESS (0)
+#define FAILURE (-1)
+
+#import "PtrStack.h"
+#import "Tree.h"
+
+/*
+ * Start of PtrStack
+ */
+@implementation PtrStack
+
++(PtrStack *)newPtrStack
+{
+    return [[PtrStack alloc] init];
+}
+
++(PtrStack *)newPtrStack:(NSInteger)cnt
+{
+    return [[PtrStack alloc] initWithLen:cnt];
+}
+
+-(id)init
+{
+	self = [super initWithLen:HASHSIZE];
+	if ( self != nil ) {
+	}
+    return( self );
+}
+
+-(id)initWithLen:(NSInteger)cnt
+{
+	self = [super initWithLen:cnt];
+	if ( self != nil ) {
+	}
+    return( self );
+}
+
+-(void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in PtrStack" );
+#endif
+	[super dealloc];
+}
+
+-(void)deletePtrStack:(PtrStack *)np
+{
+    LinkBase *tmp, *rtmp;
+    NSInteger idx;
+    
+    if ( self.fNext != nil ) {
+        for( idx = 0; idx < BuffSize; idx++ ) {
+            tmp = ptrBuffer[idx];
+            while ( tmp ) {
+                rtmp = tmp;
+                tmp = [tmp getfNext];
+                [rtmp release];
+            }
+        }
+    }
+}
+
+#ifdef DONTUSENOMO
+#ifdef USERDOC
+/*
+ *  HASH        hash entry to get index to table
+ *  NSInteger hash( PtrStack *self, char *s );
+ *
+ *     Inputs:  NSString *s         string to find
+ *
+ *     Returns: NSInteger                 hashed value
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(NSInteger)hash:(NSString *)s       /*    form hash value for string s */
+{
+	NSInteger hashval;
+	const char *tmp;
+    
+	tmp = [s cStringUsingEncoding:NSASCIIStringEncoding];
+	for( hashval = 0; *tmp != '\0'; )
+        hashval += *tmp++;
+	LastHash = hashval % HashSize;
+	return( LastHash );
+}
+
+#ifdef USERDOC
+/*
+ *  LOOKUP  search hashed list for entry
+ *  id lookup:(NSString *)s;
+ *
+ *     Inputs:  NSString  *s       string to find
+ *
+ *     Returns: RuleMemo  *        pointer to entry
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(id)lookup:(NSString *)s
+{
+    LinkBase *np;
+    
+    for( np = ptrBuffer[[self hash:s]]; np != nil; np = [np getfNext] ) {
+        if ( [s isEqualToString:[np getName]] ) {
+            return( np );        /*   found it       */
+        }
+    }
+    return( nil );              /*   not found      */
+}
+
+#ifdef USERDOC
+/*
+ *  INSTALL search hashed list for entry
+ *  NSInteger install( PtrStack *self, id sym );
+ *
+ *     Inputs:  RuleMemo    *sym   -- symbol ptr to install
+ *              NSInteger         scope -- level to find
+ *
+ *     Returns: Boolean     TRUE   if installed
+ *                          FALSE  if already in table
+ *
+ *  Last Revision 9/03/90
+ */
+#endif
+-(id)install:(id)sym
+{
+    LinkBase *np;
+    
+    np = [self lookup:[sym getName]];
+    if ( np == nil ) {
+        [sym setFNext:ptrBuffer[ LastHash ]];
+        ptrBuffer[ LastHash ] = [sym retain];
+        return( ptrBuffer[ LastHash ] );
+    }
+    return( nil );            /*   not found      */
+}
+#endif
+
+-(id)getptrBufferEntry:(NSInteger)idx
+{
+	return( ptrBuffer[idx] );
+}
+
+-(id *)getptrBuffer
+{
+	return( ptrBuffer );
+}
+
+-(void)setptrBuffer:(id *)np
+{
+    ptrBuffer = np;
+}
+
+#ifdef DONTUSENOMO
+/*
+ * works only for maplist indexed not by name but by TokenNumber
+ */
+- (id)getName:(NSInteger)ttype
+{
+    id np;
+    NSInteger aTType;
+
+    aTType = ttype % HashSize;
+    for( np = ptrBuffer[ttype]; np != nil; np = [np getfNext] ) {
+        if ( np.index == ttype ) {
+            return( np );        /*   found it       */
+        }
+    }
+    return( nil );              /*   not found      */
+}
+
+- (id)getTType:(NSString *)name
+{
+    return [self lookup:name];
+}
+#endif
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    return [super copyWithZone:aZone];
+}
+
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/README.rtf b/runtime/ObjC/Framework/README.rtf
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/README.rtf
rename to runtime/ObjC/Framework/README.rtf
diff --git a/runtime/ObjC/Framework/RecognitionException.h b/runtime/ObjC/Framework/RecognitionException.h
new file mode 100644
index 0000000..8919812
--- /dev/null
+++ b/runtime/ObjC/Framework/RecognitionException.h
@@ -0,0 +1,78 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+#import "RuntimeException.h"
+#import "Token.h"
+#import "IntStream.h"
+#import "BaseTree.h"
+
+@interface RecognitionException : RuntimeException {
+	id<IntStream> input;
+	NSInteger index;
+	id<Token> token;
+	id<BaseTree> node;
+	unichar c;
+	NSUInteger line;
+	NSUInteger charPositionInLine;
+	BOOL approximateLineInfo;
+}
+
+@property (retain, getter=getStream, setter=setStream:) id<IntStream> input;
+@property (assign) NSInteger index;
+@property (retain, getter=getToken, setter=setToken:) id<Token>token;
+@property (retain, getter=getNode, setter=setNode:) id<BaseTree>node;
+@property (assign) unichar c;
+@property (assign) NSUInteger line;
+@property (assign) NSUInteger charPositionInLine;
+@property (assign) BOOL approximateLineInfo;
+
++ (id) newException;
++ (id) newException:(id<IntStream>) anInputStream; 
+- (id) init;
+- (id) initWithStream:(id<IntStream>)anInputStream;
+- (id) initWithStream:(id<IntStream>)anInputStream reason:(NSString *)aReason;
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+- (void) extractInformationFromTreeNodeStream:(id<IntStream>)input;
+
+- (NSInteger) unexpectedType;
+- (id<Token>)getUnexpectedToken;
+
+- (id<IntStream>) getStream;
+- (void) setStream: (id<IntStream>) aStream;
+
+- (id<Token>) getToken;
+- (void) setToken: (id<Token>) aToken;
+
+- (id<BaseTree>) getNode;
+- (void) setNode: (id<BaseTree>) aNode;
+
+- (NSString *)getMessage;
+
+
+@end
diff --git a/runtime/ObjC/Framework/RecognitionException.m b/runtime/ObjC/Framework/RecognitionException.m
new file mode 100644
index 0000000..d6430b7
--- /dev/null
+++ b/runtime/ObjC/Framework/RecognitionException.m
@@ -0,0 +1,282 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "Foundation/NSObjCRuntime.h"
+#import "RecognitionException.h"
+#import "TokenStream.h"
+#import "TreeNodeStream.h"
+#import "BufferedTokenStream.h"
+
+@implementation RecognitionException
+
+@synthesize input;
+@synthesize index;
+@synthesize token;
+@synthesize node;
+@synthesize c;
+@synthesize line;
+@synthesize charPositionInLine;
+@synthesize approximateLineInfo;
+
++ (id) newException
+{
+	return [[RecognitionException alloc] init];
+}
+
++ (id) newException:(id<IntStream>) anInputStream
+{
+	return [[RecognitionException alloc] initWithStream:anInputStream];
+}
+
++ (id) newException:(id<IntStream>) anInputStream reason:(NSString *)aReason
+{
+	return [[RecognitionException alloc] initWithStream:anInputStream reason:aReason];
+}
+
+- (id) init
+{
+	self = [super initWithName:@"Recognition Exception" reason:@"Recognition Exception" userInfo:nil];
+	if ( self != nil ) {
+	}
+	return self;
+}
+
+- (id) initWithStream:(id<IntStream>)anInputStream reason:(NSString *)aReason
+{
+	self = [super initWithName:NSStringFromClass([self class]) reason:aReason userInfo:nil];
+	if ( self != nil ) {
+		[self setStream:anInputStream];
+		index = input.index;
+		
+		Class inputClass = [input class];
+		if ([inputClass conformsToProtocol:@protocol(TokenStream)]) {
+			[self setToken:[(id<TokenStream>)input LT:1]];
+			line = token.line;
+			charPositionInLine = token.charPositionInLine;
+		} else if ([inputClass conformsToProtocol:@protocol(CharStream)]) {
+			c = (unichar)[input LA:1];
+			line = ((id<CharStream>)input).getLine;
+			charPositionInLine = ((id<CharStream>)input).getCharPositionInLine;
+		} else if ([inputClass conformsToProtocol:@protocol(TreeNodeStream)]) {
+			[self setNode:[(id<TreeNodeStream>)input LT:1]];
+			line = [node line];
+			charPositionInLine = [node charPositionInLine];
+		} else {
+			c = (unichar)[input LA:1];
+		}
+	}
+	return self;
+}
+
+- (id) initWithStream:(id<IntStream>)anInputStream
+{
+	self = [super initWithName:NSStringFromClass([self class]) reason:@"Runtime Exception" userInfo:nil];
+	if ( self != nil ) {
+        self.input = anInputStream;
+        self.index = input.index;
+        if ( [anInputStream isKindOfClass:[BufferedTokenStream class]] ) {
+            self.token = [(id<TokenStream>)anInputStream LT:1];
+            self.line = [token line];
+            self.charPositionInLine = [token charPositionInLine];
+           if ( [input conformsToProtocol:objc_getProtocol("TreeNodeStream")] ) {
+               [self extractInformationFromTreeNodeStream:anInputStream];
+           }
+           else if ( [[anInputStream class] instancesRespondToSelector:@selector(LA1:)] ) {
+               c = [anInputStream LA:1];
+               if ( [[anInputStream class] instancesRespondToSelector:@selector(getLine)] )
+                   line = [anInputStream getLine];
+               if ( [[anInputStream class] instancesRespondToSelector:@selector(getCharPositionInLine)] )
+                   charPositionInLine = [anInputStream getCharPositionInLine];
+           }
+           else {
+               c = [anInputStream LA:1];
+           }
+        }
+	}
+	return self;
+}
+
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+	self = [super initWithName:aName reason:aReason userInfo:aUserInfo];
+	if ( self != nil ) {
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in RecognitionException" );
+#endif
+	if ( input ) [input release];
+	if ( token ) [token release];
+	if ( node ) [node release];
+	[super dealloc];
+}
+
+- (void) extractInformationFromTreeNodeStream:(id<TreeNodeStream>)anInput
+{
+    id<TreeNodeStream> nodes = anInput;
+    node = [nodes LT:1];
+    id<TreeAdaptor> adaptor = [nodes getTreeAdaptor];
+    id<Token> payload = [adaptor getToken:node];
+    if ( payload != nil ) {
+        token = payload;
+        if ( payload.line <= 0 ) {
+            // imaginary node; no line/pos info; scan backwards
+            int i = -1;
+            id priorNode = [nodes LT:i];
+            while ( priorNode != nil ) {
+                id<Token> priorPayload = [adaptor getToken:priorNode];
+                if ( priorPayload!=nil && priorPayload.line > 0 ) {
+                    // we found the most recent real line / pos info
+                    line = priorPayload.line;
+                    charPositionInLine = priorPayload.charPositionInLine;
+                    approximateLineInfo = YES;
+                    break;
+                }
+                --i;
+                priorNode = [nodes LT:i];
+            }
+        }
+        else { // node created from real token
+            line = payload.line;
+            charPositionInLine = payload.charPositionInLine;
+        }
+    }
+    else if ( [self.node isKindOfClass:[CommonTree class]] ) {
+        line = ((id<Tree>)node).line;
+        charPositionInLine = ((id<Tree>)node).charPositionInLine;
+        if ( [node isMemberOfClass:[CommonTree class]]) {
+            token = ((CommonTree *)node).token;
+        }
+    }
+    else {
+        NSInteger type = [adaptor getType:node];
+        NSString *text = [adaptor getText:node];
+        self.token = [CommonToken newToken:type Text:text];
+    }
+}
+
+- (NSInteger) unexpectedType
+{
+	if (token) {
+		return token.type;
+    } else if (node) {
+        return [node type];
+	} else {
+		return c;
+	}
+}
+
+- (id<Token>)getUnexpectedToken
+{
+    return token;
+}
+
+- (NSString *) description
+{
+	//NSMutableString *desc = [[NSMutableString alloc] initWithString:NSStringFromClass([self class])];
+	NSMutableString *desc = [NSMutableString stringWithString:[self className]];
+	if (token) {
+		[desc appendFormat:@" token:%@", token];
+	} else if (node) {
+		[desc appendFormat:@" node:%@", node];
+	} else {
+		[desc appendFormat:@" char:%c", c];
+	}
+	[desc appendFormat:@" line:%d position:%d", line, charPositionInLine];
+	return desc;
+}
+
+//---------------------------------------------------------- 
+//  input 
+//---------------------------------------------------------- 
+- (id<IntStream>) getStream
+{
+    return input; 
+}
+
+- (void) setStream: (id<IntStream>) aStream
+{
+    if ( input != aStream ) {
+        if ( input ) [input release];
+        if ( aStream ) [aStream retain];
+        input = aStream;
+    }
+}
+
+//---------------------------------------------------------- 
+//  token 
+//---------------------------------------------------------- 
+- (id<Token>) getToken
+{
+    return token; 
+}
+
+- (void) setToken: (id<Token>) aToken
+{
+    if (token != aToken) {
+        if ( token ) [token release];
+        if ( aToken ) [aToken retain];
+        token = aToken;
+    }
+}
+
+//---------------------------------------------------------- 
+//  node 
+//---------------------------------------------------------- 
+- (id<BaseTree>) getNode
+{
+    return node; 
+}
+
+- (void) setNode: (id<BaseTree>) aNode
+{
+    if (node != aNode) {
+        if ( node ) [node release];
+        if ( aNode ) [aNode retain];
+        node = aNode;
+    }
+}
+
+- (NSString *)getMessage
+{
+    return @"Fix getMessage in RecognitionException";
+}
+
+- (NSUInteger)charPositionInLine
+{
+    return charPositionInLine;
+}
+
+- (void)setCharPositionInLine:(NSUInteger)aPos
+{
+    charPositionInLine = aPos;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/RecognizerSharedState.h b/runtime/ObjC/Framework/RecognizerSharedState.h
new file mode 100755
index 0000000..72af752
--- /dev/null
+++ b/runtime/ObjC/Framework/RecognizerSharedState.h
@@ -0,0 +1,117 @@
+// [The "BSD licence"]
+// Copyright (c) 2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "Token.h"
+#import "ANTLRBitSet.h"
+#import "RuleStack.h"
+#import "AMutableArray.h"
+
+@interface RecognizerSharedState : NSObject {
+	__strong AMutableArray *following;  // a stack of FOLLOW bitsets used for context sensitive prediction and recovery
+    NSInteger _fsp;                     // Follow stack pointer
+	BOOL errorRecovery;                 // are we recovering?
+	NSInteger lastErrorIndex;
+	BOOL failed;                        // indicate that some match failed
+    NSInteger syntaxErrors;
+	NSInteger backtracking;             // the level of backtracking
+	__strong RuleStack *ruleMemo;	// store previous results of matching rules so we don't have to do it again. Hook in incremental stuff here, too.
+
+	__strong id<Token> token;
+	NSInteger  tokenStartCharIndex;
+	NSUInteger tokenStartLine;
+	NSUInteger tokenStartCharPositionInLine;
+	NSUInteger channel;
+	NSUInteger type;
+	NSString   *text;
+}
+
+@property (retain, getter=getFollowing, setter=setFollowing:) AMutableArray *following;
+@property (assign) NSInteger _fsp;
+@property (assign) BOOL errorRecovery;
+@property (assign) NSInteger lastErrorIndex;
+@property (assign, getter=getFailed, setter=setFailed:) BOOL failed;
+@property (assign) NSInteger syntaxErrors;
+@property (assign, getter=getBacktracking, setter=setBacktracking:) NSInteger backtracking;
+@property (retain, getter=getRuleMemo, setter=setRuleMemo:) RuleStack *ruleMemo;
+@property (copy, getter=getToken, setter=setToken:) id<Token> token;
+@property (getter=type,setter=setType:) NSUInteger type;
+@property (getter=channel,setter=setChannel:) NSUInteger channel;
+@property (getter=getTokenStartLine,setter=setTokenStartLine:) NSUInteger tokenStartLine;
+@property (getter=charPositionInLine,setter=setCharPositionInLine:) NSUInteger tokenStartCharPositionInLine;
+@property (getter=getTokenStartCharIndex,setter=setTokenStartCharIndex:) NSInteger tokenStartCharIndex;
+@property (retain, getter=text, setter=setText:) NSString *text;
+
++ (RecognizerSharedState *) newRecognizerSharedState;
++ (RecognizerSharedState *) newRecognizerSharedStateWithRuleLen:(NSInteger)aLen;
++ (RecognizerSharedState *) newRecognizerSharedState:(RecognizerSharedState *)aState;
+
+- (id) init;
+- (id) initWithRuleLen:(NSInteger)aLen;
+- (id) initWithState:(RecognizerSharedState *)state;
+
+- (id<Token>) getToken;
+- (void) setToken:(id<Token>) theToken;
+
+- (NSUInteger)type;
+- (void) setType:(NSUInteger) theTokenType;
+
+- (NSUInteger)channel;
+- (void) setChannel:(NSUInteger) theChannel;
+
+- (NSUInteger) getTokenStartLine;
+- (void) setTokenStartLine:(NSUInteger) theTokenStartLine;
+
+- (NSUInteger) charPositionInLine;
+- (void) setCharPositionInLine:(NSUInteger) theCharPosition;
+
+- (NSInteger) getTokenStartCharIndex;
+- (void) setTokenStartCharIndex:(NSInteger) theTokenStartCharIndex;
+
+- (NSString *)text;
+- (void) setText:(NSString *) theText;
+
+
+- (AMutableArray *) getFollowing;
+- (void)setFollowing:(AMutableArray *)aFollow;
+- (RuleStack *) getRuleMemo;
+- (void)setRuleMemo:(RuleStack *)aRuleMemo;
+- (BOOL) isErrorRecovery;
+- (void) setIsErrorRecovery: (BOOL) flag;
+
+- (BOOL) getFailed;
+- (void) setFailed: (BOOL) flag;
+
+- (NSInteger)  getBacktracking;
+- (void) setBacktracking:(NSInteger) value;
+- (void) increaseBacktracking;
+- (void) decreaseBacktracking;
+- (BOOL) isBacktracking;
+
+- (NSInteger) lastErrorIndex;
+- (void) setLastErrorIndex:(NSInteger) value;
+
+@end
diff --git a/runtime/ObjC/Framework/RecognizerSharedState.m b/runtime/ObjC/Framework/RecognizerSharedState.m
new file mode 100755
index 0000000..f964668
--- /dev/null
+++ b/runtime/ObjC/Framework/RecognizerSharedState.m
@@ -0,0 +1,331 @@
+// [The "BSD licence"]
+// Copyright (c) 2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "RecognizerSharedState.h"
+#import "CharStream.h"
+#import "CommonToken.h"
+#import "MismatchedTokenException.h"
+#import "MismatchedRangeException.h"
+
+@implementation RecognizerSharedState
+
+@synthesize following;
+@synthesize _fsp;
+@synthesize errorRecovery;
+@synthesize lastErrorIndex;
+@synthesize failed;
+@synthesize syntaxErrors;
+@synthesize backtracking;
+@synthesize ruleMemo;
+@synthesize token;
+@synthesize type;
+@synthesize channel;
+@synthesize tokenStartLine;
+@synthesize tokenStartCharPositionInLine;
+@synthesize tokenStartCharIndex;
+@synthesize text;
+
++ (RecognizerSharedState *) newRecognizerSharedState
+{
+    return [[[RecognizerSharedState alloc] init] retain];
+}
+
++ (RecognizerSharedState *) newRecognizerSharedStateWithRuleLen:(NSInteger)aLen
+{
+    return [[[RecognizerSharedState alloc] initWithRuleLen:aLen] retain];
+}
+
++ (RecognizerSharedState *) newRecognizerSharedState:(RecognizerSharedState *)aState
+{
+    return [[[RecognizerSharedState alloc] initWithState:aState] retain];
+}
+
+- (id) init
+{
+    HashRule *aHashRule;
+	if ((self = [super init]) != nil ) {
+        following = [[AMutableArray arrayWithCapacity:10] retain];
+        _fsp = -1;
+        errorRecovery = NO;			// are we recovering?
+        lastErrorIndex = -1;
+        failed = NO;				// indicate that some match failed
+        syntaxErrors = 0;
+        backtracking = 0;			// the level of backtracking
+        tokenStartCharIndex = -1;
+        tokenStartLine = 0;
+        int cnt = 200;
+		ruleMemo = [[RuleStack newRuleStack:cnt] retain];
+        for (int i = 0; i < cnt; i++ ) {
+            aHashRule = [[HashRule newHashRuleWithLen:17] retain];
+            [ruleMemo addObject:aHashRule];
+        }
+#ifdef DONTUSEYET
+        token = state.token;
+        tokenStartCharIndex = state.tokenStartCharIndex;
+        tokenStartCharPositionInLine = state.tokenStartCharPositionInLine;
+        channel = state.channel;
+        type = state.type;
+        text = state.text;
+#endif
+	}
+	return self;
+}
+
+- (id) initWithRuleLen:(NSInteger)aLen
+{
+    HashRule *aHashRule;
+	if ((self = [super init]) != nil ) {
+        following = [[AMutableArray arrayWithCapacity:10] retain];
+        _fsp = -1;
+        errorRecovery = NO;			// are we recovering?
+        lastErrorIndex = -1;
+        failed = NO;				// indicate that some match failed
+        syntaxErrors = 0;
+        backtracking = 0;			// the level of backtracking
+        tokenStartCharIndex = -1;
+        tokenStartLine = 0;
+		ruleMemo = [[RuleStack newRuleStack:aLen] retain];
+        for (int i = 0; i < aLen; i++ ) {
+            aHashRule = [[HashRule newHashRuleWithLen:17] retain];
+            [ruleMemo addObject:aHashRule];
+        }
+#ifdef DONTUSEYET
+        token = state.token;
+        tokenStartCharIndex = state.tokenStartCharIndex;
+        tokenStartCharPositionInLine = state.tokenStartCharPositionInLine;
+        channel = state.channel;
+        type = state.type;
+        text = state.text;
+#endif
+	}
+	return self;
+}
+
+- (id) initWithState:(RecognizerSharedState *)aState
+{
+    HashRule *aHashRule;
+    if ( [following count] < [aState.following count] ) {
+        //        following = new BitSet[state.following.size];
+    }
+    [following setArray:aState.following];
+    _fsp = aState._fsp;
+    errorRecovery = aState.errorRecovery;
+    lastErrorIndex = aState.lastErrorIndex;
+    failed = aState.failed;
+    syntaxErrors = aState.syntaxErrors;
+    backtracking = aState.backtracking;
+    if ( aState.ruleMemo == nil ) {
+        int cnt = 200;
+        ruleMemo = [[RuleStack newRuleStack:cnt] retain];
+        for (int i = 0; i < cnt; i++ ) {
+            aHashRule = [[HashRule newHashRuleWithLen:17] retain];
+            [ruleMemo addObject:aHashRule];
+        }
+    }
+    else {
+        ruleMemo = aState.ruleMemo;
+        if ( [ruleMemo count] == 0 ) {
+            int cnt = [ruleMemo length];
+            for (int i = 0; i < cnt; i++ ) {
+                [ruleMemo addObject:[[HashRule newHashRuleWithLen:17] retain]];
+            }
+        }
+        else {
+            [ruleMemo addObjectsFromArray:aState.ruleMemo];
+        }
+    }
+    token = aState.token;
+    tokenStartCharIndex = aState.tokenStartCharIndex;
+    tokenStartCharPositionInLine = aState.tokenStartCharPositionInLine;
+    tokenStartLine = aState.tokenStartLine;
+    channel = aState.channel;
+    type = aState.type;
+    text = aState.text;
+    return( self );
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in RecognizerSharedState" );
+#endif
+    if ( token ) [token release];
+	if ( following ) [following release];
+	if ( ruleMemo ) [ruleMemo release];
+	[super dealloc];
+}
+
+// token stuff
+#pragma mark Tokens
+
+- (id<Token>)getToken
+{
+    return token; 
+}
+
+- (void) setToken: (id<Token>) aToken
+{
+    if (token != aToken) {
+        [aToken retain];
+        if ( token ) [token release];
+        token = aToken;
+    }
+}
+
+- (NSUInteger)channel
+{
+    return channel;
+}
+
+- (void) setChannel:(NSUInteger) theChannel
+{
+    channel = theChannel;
+}
+
+- (NSUInteger) getTokenStartLine
+{
+    return tokenStartLine;
+}
+
+- (void) setTokenStartLine:(NSUInteger) theTokenStartLine
+{
+    tokenStartLine = theTokenStartLine;
+}
+
+- (NSUInteger) charPositionInLine
+{
+    return tokenStartCharPositionInLine;
+}
+
+- (void) setCharPositionInLine:(NSUInteger) theCharPosition
+{
+    tokenStartCharPositionInLine = theCharPosition;
+}
+
+- (NSInteger) getTokenStartCharIndex;
+{
+    return tokenStartCharIndex;
+}
+
+- (void) setTokenStartCharIndex:(NSInteger) theTokenStartCharIndex
+{
+    tokenStartCharIndex = theTokenStartCharIndex;
+}
+
+// error handling
+- (void) reportError:(RecognitionException *)e
+{
+	NSLog(@"%@", e.name);
+}
+
+- (AMutableArray *) getFollowing
+{
+	return following;
+}
+
+- (void)setFollowing:(AMutableArray *)aFollow
+{
+    if ( following != aFollow ) {
+        if ( following ) [following release];
+        [aFollow retain];
+    }
+    following = aFollow;
+}
+
+- (RuleStack *) getRuleMemo
+{
+	return ruleMemo;
+}
+
+- (void)setRuleMemo:(RuleStack *)aRuleMemo
+{
+    if ( ruleMemo != aRuleMemo ) {
+        if ( ruleMemo ) [ruleMemo release];
+        [aRuleMemo retain];
+    }
+    ruleMemo = aRuleMemo;
+}
+
+- (BOOL) isErrorRecovery
+{
+	return errorRecovery;
+}
+
+- (void) setIsErrorRecovery: (BOOL) flag
+{
+	errorRecovery = flag;
+}
+
+
+- (BOOL) getFailed
+{
+	return failed;
+}
+
+- (void) setFailed:(BOOL)flag
+{
+	failed = flag;
+}
+
+
+- (NSInteger) backtracking
+{
+	return backtracking;
+}
+
+- (void) setBacktracking:(NSInteger) value
+{
+	backtracking = value;
+}
+
+- (void) increaseBacktracking
+{
+	backtracking++;
+}
+
+- (void) decreaseBacktracking
+{
+	backtracking--;
+}
+
+- (BOOL) isBacktracking
+{
+	return backtracking > 0;
+}
+
+
+- (NSInteger) lastErrorIndex
+{
+    return lastErrorIndex;
+}
+
+- (void) setLastErrorIndex:(NSInteger) value
+{
+	lastErrorIndex = value;
+}
+
+
+@end
diff --git a/runtime/ObjC/Framework/RewriteRuleElementStream.h b/runtime/ObjC/Framework/RewriteRuleElementStream.h
new file mode 100644
index 0000000..367b4c0
--- /dev/null
+++ b/runtime/ObjC/Framework/RewriteRuleElementStream.h
@@ -0,0 +1,82 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "TreeAdaptor.h"
+
+// TODO: this should be separated into stream and enumerator classes
+@interface RewriteRuleElementStream : NSObject {
+    NSInteger cursor;
+    BOOL dirty;        ///< indicates whether the stream should return copies of its elements, set to true after a call to -reset
+    BOOL isSingleElement;
+    id singleElement;
+    __strong AMutableArray *elements;
+    
+    __strong NSString *elementDescription;
+    __strong id<TreeAdaptor> treeAdaptor;
+}
+
+@property (assign) NSInteger cursor;
+@property (assign) BOOL dirty;
+@property (assign) BOOL isSingleElement;
+@property (assign) id singleElement;
+@property (assign) AMutableArray *elements;
+@property (assign) NSString *elementDescription;
+@property (retain) id<TreeAdaptor> treeAdaptor;
+
++ (RewriteRuleElementStream*) newRewriteRuleElementStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                        description:(NSString *)anElementDescription;
++ (RewriteRuleElementStream*) newRewriteRuleElementStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                        description:(NSString *)anElementDescription
+                                                            element:(id)anElement;
++ (RewriteRuleElementStream*) newRewriteRuleElementStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                        description:(NSString *)anElementDescription
+                                                           elements:(NSArray *)theElements;
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
+
+- (void)reset;
+
+- (id<TreeAdaptor>) getTreeAdaptor;
+- (void) setTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor;
+
+- (void) addElement:(id)anElement;
+- (NSInteger) size;
+ 
+- (BOOL) hasNext;
+- (id<BaseTree>) nextTree;
+- (id<BaseTree>) _next;       // internal: TODO: redesign if necessary. maybe delegate
+
+- (id) copyElement:(id)element;
+- (id) toTree:(id)element;
+
+- (NSString *) getDescription;
+- (void) setDescription:(NSString *)description;
+
+@end
+
diff --git a/runtime/ObjC/Framework/RewriteRuleElementStream.m b/runtime/ObjC/Framework/RewriteRuleElementStream.m
new file mode 100644
index 0000000..66a4004
--- /dev/null
+++ b/runtime/ObjC/Framework/RewriteRuleElementStream.m
@@ -0,0 +1,258 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "RewriteRuleElementStream.h"
+
+@implementation RewriteRuleElementStream
+
+@synthesize cursor;
+@synthesize dirty;
+@synthesize isSingleElement;
+@synthesize singleElement;
+@synthesize elements;
+@synthesize elementDescription;
+@synthesize treeAdaptor;
+
++ (RewriteRuleElementStream *) newRewriteRuleElementStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                         description:(NSString *)anElementDescription
+{
+    return [[RewriteRuleElementStream alloc] initWithTreeAdaptor:aTreeAdaptor
+                                                          description:anElementDescription];
+}
+
++ (RewriteRuleElementStream *) newRewriteRuleElementStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                         description:(NSString *)anElementDescription
+                                                             element:(id)anElement
+{
+    return [[RewriteRuleElementStream alloc] initWithTreeAdaptor:aTreeAdaptor
+                                                          description:anElementDescription
+                                                              element:anElement];
+}
+
++ (RewriteRuleElementStream *) newRewriteRuleElementStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                         description:(NSString *)anElementDescription
+                                                            elements:(NSArray *)theElements;
+{
+    return [[RewriteRuleElementStream alloc] initWithTreeAdaptor:aTreeAdaptor
+                                                          description:anElementDescription
+                                                             elements:theElements];
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription
+{
+    if ((self = [super init]) != nil) {
+        cursor = 0;
+        dirty = NO;
+        [self setDescription:anElementDescription];
+        [self setTreeAdaptor:aTreeAdaptor];
+        dirty = NO;
+        isSingleElement = YES;
+        singleElement = nil;
+        elements = nil;
+    }
+    return self;
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement
+{
+    if ((self = [super init]) != nil) {
+        cursor = 0;
+        dirty = NO;
+        [self setDescription:anElementDescription];
+        [self setTreeAdaptor:aTreeAdaptor];
+        dirty = NO;
+        isSingleElement = YES;
+        singleElement = nil;
+        elements = nil;
+        [self addElement:anElement];
+    }
+    return self;
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements
+{
+    self = [super init];
+    if (self) {
+        cursor = 0;
+        dirty = NO;
+        [self setDescription:anElementDescription];
+        [self setTreeAdaptor:aTreeAdaptor];
+        dirty = NO;
+        singleElement = nil;
+        isSingleElement = NO;
+        elements = [[AMutableArray arrayWithArray:theElements] retain];
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in RewriteRuleElementStream" );
+#endif
+    if ( singleElement && isSingleElement ) [singleElement release];
+    else if ( elements && !isSingleElement ) [elements release];
+    [self setDescription:nil];
+    [self setTreeAdaptor:nil];
+    [super dealloc];
+}
+
+- (void)reset
+{
+    cursor = 0;
+    dirty = YES;
+}
+
+- (id<TreeAdaptor>) getTreeAdaptor
+{
+    return treeAdaptor;
+}
+
+- (void) setTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor
+{
+    if (treeAdaptor != aTreeAdaptor) {
+        if ( treeAdaptor ) [treeAdaptor release];
+        treeAdaptor = aTreeAdaptor;
+        [treeAdaptor retain];
+    }
+}
+
+- (void) addElement: (id)anElement
+{
+    if (anElement == nil)
+        return;
+    if (elements != nil) {
+        [elements addObject:anElement];
+        return;
+        }
+    if (singleElement == nil) {
+        singleElement = anElement;
+        singleElement = [anElement retain];
+        return;
+    }
+    isSingleElement = NO;
+    elements = [[AMutableArray arrayWithCapacity:5] retain];
+    [elements addObject:singleElement];
+    singleElement = nil;  // balance previous retain in initializer/addElement
+    [elements addObject:anElement];
+}
+
+- (void) setElement: (id)anElement
+{
+    if (anElement == nil)
+        return;
+    if (elements != nil) {
+        [elements addObject:anElement];
+        return;
+        }
+    if (singleElement == nil) {
+        singleElement = anElement;
+        singleElement = [anElement retain];
+        return;
+    }
+    isSingleElement = NO;
+    elements = [[AMutableArray arrayWithCapacity:5] retain];
+    [elements addObject:singleElement];
+    singleElement = nil;  // balance previous retain in initializer/addElement
+    [elements addObject:anElement];
+}
+
+- (id<BaseTree>) nextTree
+{
+    NSInteger n = [self size];
+    if ( dirty && (cursor >= 0 && n == 1)) {
+        // if out of elements and size is 1, dup
+        id element = [self _next];
+        return [self copyElement:element];
+    }
+    // test size above then fetch
+    id element = [self _next];
+    return element;
+}
+
+- (id) _next       // internal: TODO: redesign if necessary. maybe delegate
+{
+    NSInteger n = [self size];
+    if (n == 0) {
+        @throw [NSException exceptionWithName:@"RewriteEmptyStreamException" reason:nil userInfo:nil];// TODO: fill in real exception
+    }
+    if ( cursor >= n ) {
+        if ( n == 1 ) {
+            return [self toTree:singleElement]; // will be dup'ed in -next
+        }
+        @throw [NSException exceptionWithName:@"RewriteCardinalityException" reason:nil userInfo:nil];// TODO: fill in real exception
+    }
+    if (singleElement != nil) {
+        cursor++;
+        return [self toTree:singleElement];
+    }
+    id el = [elements objectAtIndex:cursor];
+    cursor++;
+    return [self toTree:el];
+}
+
+- (BOOL) hasNext
+{
+    return (singleElement != nil && cursor < 1) ||
+            (elements != nil && cursor < [elements count]);
+}
+
+- (NSInteger) size
+{
+    NSInteger n = 0;
+    if (singleElement != nil)
+        n = 1;
+    if (elements != nil)
+        return [elements count];
+    return n;
+}
+
+- (id) copyElement:(id)element
+{
+    [self doesNotRecognizeSelector:_cmd];   // subclass responsibility
+    return nil;
+}
+
+- (id<BaseTree>) toTree:(id)element
+{
+    return element;
+}
+
+- (NSString *) getDescription
+{
+    return elementDescription;
+}
+
+- (void) setDescription:(NSString *) description
+{
+    if ( description != nil && description != elementDescription ) {
+        if (elementDescription != nil) [elementDescription release];
+        elementDescription = [NSString stringWithString:description];
+        [elementDescription retain];
+    }
+}
+
+@end
diff --git a/runtime/ObjC/Framework/RewriteRuleNodeStream.h b/runtime/ObjC/Framework/RewriteRuleNodeStream.h
new file mode 100755
index 0000000..5792530
--- /dev/null
+++ b/runtime/ObjC/Framework/RewriteRuleNodeStream.h
@@ -0,0 +1,46 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "RewriteRuleElementStream.h"
+
+@interface RewriteRuleNodeStream : RewriteRuleElementStream {
+
+}
+
++ (RewriteRuleNodeStream *) newRewriteRuleNodeStream:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
++ (RewriteRuleNodeStream *) newRewriteRuleNodeStream:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
++ (RewriteRuleNodeStream *) newRewriteRuleNode:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
+
+- (id) nextNode;
+- (id) toTree:(id<BaseTree>)element;
+- (id) dup:(id)element;
+
+@end
diff --git a/runtime/ObjC/Framework/RewriteRuleNodeStream.m b/runtime/ObjC/Framework/RewriteRuleNodeStream.m
new file mode 100755
index 0000000..9db28ee
--- /dev/null
+++ b/runtime/ObjC/Framework/RewriteRuleNodeStream.m
@@ -0,0 +1,74 @@
+//
+//  RewriteRuleNodeStream.m
+//  ANTLR
+//
+//  Created by Kay Röpke on 7/16/07.
+//  Copyright 2007 classDump. All rights reserved.
+//
+
+#import "RewriteRuleNodeStream.h"
+#import "RuntimeException.h"
+
+@implementation RewriteRuleNodeStream
+
++ (RewriteRuleNodeStream*) newRewriteRuleNodeStream:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
+{
+    return [[RewriteRuleNodeStream alloc] initWithTreeAdaptor:aTreeAdaptor description:anElementDescription];
+}
+
++ (RewriteRuleNodeStream*) newRewriteRuleNodeStream:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
+{
+    return [[RewriteRuleNodeStream alloc] initWithTreeAdaptor:aTreeAdaptor description:anElementDescription element:anElement];
+}
+
++ (RewriteRuleNodeStream*) newRewriteRuleNode:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
+{
+    return [[RewriteRuleNodeStream alloc] initWithTreeAdaptor:aTreeAdaptor description:anElementDescription elements:theElements];
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription
+{
+    if ((self = [super initWithTreeAdaptor:aTreeAdaptor description:anElementDescription]) != nil) {
+        dirty = NO;
+        isSingleElement = YES;
+    }
+    return self;
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement
+{
+    if ((self = [super initWithTreeAdaptor:aTreeAdaptor description:anElementDescription element:anElement]) != nil) {
+        dirty = NO;
+    }
+    return self;
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements
+{
+    if ((self = [super init]) != nil) {
+        dirty = NO;
+    }
+    return self;
+}
+
+
+- (id) nextNode
+{
+    if (dirty || (cursor >= [self size] && [self size] == 1))
+        return [treeAdaptor dupNode:[self _next]];
+    else 
+        return [self _next];
+}
+
+- (id<BaseTree>) toTree:(id<BaseTree>)element
+{
+    return [treeAdaptor dupNode:element];
+}
+
+- (id) dup:(id)element
+{
+    return [treeAdaptor dupTree:element];
+    @throw [RuntimeException newException:@"UnsupportedOperationException" reason:@"dup can't be called for a node stream."];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/RewriteRuleSubtreeStream.h b/runtime/ObjC/Framework/RewriteRuleSubtreeStream.h
new file mode 100644
index 0000000..81ccd07
--- /dev/null
+++ b/runtime/ObjC/Framework/RewriteRuleSubtreeStream.h
@@ -0,0 +1,50 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "RewriteRuleElementStream.h"
+
+@interface RewriteRuleSubtreeStream : RewriteRuleElementStream {
+
+}
+
++ (RewriteRuleSubtreeStream *) newRewriteRuleSubtreeStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                         description:(NSString *)anElementDescription;
++ (RewriteRuleSubtreeStream *) newRewriteRuleSubtreeStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                         description:(NSString *)anElementDescription
+                                                             element:(id)anElement;
++ (RewriteRuleSubtreeStream *) newRewriteRuleSubtreeStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                         description:(NSString *)anElementDescription
+                                                            elements:(NSArray *)theElements;
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription;
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement;
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements;
+
+- (id) nextNode;
+- (id) dup:(id)element;
+
+@end
diff --git a/runtime/ObjC/Framework/RewriteRuleSubtreeStream.m b/runtime/ObjC/Framework/RewriteRuleSubtreeStream.m
new file mode 100644
index 0000000..6100b91
--- /dev/null
+++ b/runtime/ObjC/Framework/RewriteRuleSubtreeStream.m
@@ -0,0 +1,101 @@
+//
+//  RewriteRuleSubtreeStream.m
+//  ANTLR
+//
+//  Created by Kay Röpke on 7/16/07.
+// [The "BSD licence"]
+// Copyright (c) 2007 Kay Röpke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "RewriteRuleSubtreeStream.h"
+
+
+@implementation RewriteRuleSubtreeStream
+
++ (RewriteRuleSubtreeStream*) newRewriteRuleSubtreeStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                        description:(NSString *)anElementDescription;
+{
+    return [[RewriteRuleSubtreeStream alloc] initWithTreeAdaptor:aTreeAdaptor
+                                                          description:anElementDescription];
+}
+
++ (RewriteRuleSubtreeStream*) newRewriteRuleSubtreeStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                        description:(NSString *)anElementDescription
+                                                            element:(id)anElement;
+{
+    return [[RewriteRuleSubtreeStream alloc] initWithTreeAdaptor:aTreeAdaptor
+                                                          description:anElementDescription
+                                                              element:anElement];
+}
+
++ (RewriteRuleSubtreeStream*) newRewriteRuleSubtreeStream:(id<TreeAdaptor>)aTreeAdaptor
+                                                        description:(NSString *)anElementDescription
+                                                           elements:(NSArray *)theElements;
+{
+    return [[RewriteRuleSubtreeStream alloc] initWithTreeAdaptor:aTreeAdaptor
+                                                          description:anElementDescription
+                                                             elements:theElements];
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription
+{
+    if ((self = [super initWithTreeAdaptor:aTreeAdaptor description:anElementDescription]) != nil) {
+        dirty = NO;
+        isSingleElement = YES;
+    }
+    return self;
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription element:(id)anElement
+{
+    if ((self = [super initWithTreeAdaptor:aTreeAdaptor description:anElementDescription element:anElement]) != nil) {
+        dirty = NO;
+    }
+    return self;
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor description:(NSString *)anElementDescription elements:(NSArray *)theElements
+{
+    if ((self = [super initWithTreeAdaptor:aTreeAdaptor description:anElementDescription elements:theElements]) != nil) {
+        dirty = NO;
+    }
+    return self;
+}
+
+
+- (id) nextNode
+{
+    if (dirty || (cursor >= [self size] && [self size] == 1))
+        return [treeAdaptor dupNode:[self _next]];
+    else 
+        return [self _next];
+}
+
+- (id) dup:(id)element
+{
+    return [treeAdaptor dupTree:element];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/RewriteRuleTokenStream.h b/runtime/ObjC/Framework/RewriteRuleTokenStream.h
new file mode 100644
index 0000000..320b792
--- /dev/null
+++ b/runtime/ObjC/Framework/RewriteRuleTokenStream.h
@@ -0,0 +1,66 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "RewriteRuleElementStream.h"
+
+
+@interface RewriteRuleTokenStream : RewriteRuleElementStream {
+
+}
+
++ (id) newRewriteRuleTokenStream:(id<TreeAdaptor>)anAdaptor
+                          description:(NSString *)elementDescription;
+/** Create a stream with one element */
++ (id) newRewriteRuleTokenStream:(id<TreeAdaptor>)adaptor
+                          description:(NSString *)elementDescription
+                              element:(id) oneElement;
+/** Create a stream, but feed off an existing list */
++ (id) newRewriteRuleTokenStream:(id<TreeAdaptor>)adaptor
+                          description:(NSString *)elementDescription
+                             elements:(AMutableArray *)elements;
+
+- (id) init;
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)anAdaptor
+               description:(NSString *)aDescription;
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)anAdaptor 
+               description:(NSString *)aDescription
+                   element:(id)element;
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)anAdaptor
+               description:(NSString *)aDescription
+                  elements:(AMutableArray *)elements;
+                               
+/** Get next token from stream and make a node for it */
+- (id) nextNode;
+
+- (id) nextToken;
+
+/** Don't convert to a tree unless they explicitly call nextTree.
+ *  This way we can do hetero tree nodes in rewrite.
+ */
+- (id<BaseTree>) toTree:(id<Token>)element;
+
+@end
diff --git a/runtime/ObjC/Framework/RewriteRuleTokenStream.m b/runtime/ObjC/Framework/RewriteRuleTokenStream.m
new file mode 100644
index 0000000..137b457
--- /dev/null
+++ b/runtime/ObjC/Framework/RewriteRuleTokenStream.m
@@ -0,0 +1,128 @@
+//
+//  RewriteRuleTokenStream.m
+//  ANTLR
+//
+//  Created by Kay Röpke on 7/16/07.
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "RewriteRuleTokenStream.h"
+#import "RuntimeException.h"
+#import "HashMap.h"
+#import "MapElement.h"
+
+@implementation RewriteRuleTokenStream
+
++ (id) newRewriteRuleTokenStream:(id<TreeAdaptor>)anAdaptor
+                          description:(NSString *)elementDescription
+{
+    return [[RewriteRuleTokenStream alloc] initWithTreeAdaptor:anAdaptor
+                                                        description:elementDescription];
+}
+
+/** Create a stream with one element */
++ (id) newRewriteRuleTokenStream:(id<TreeAdaptor>)adaptor
+                          description:(NSString *)elementDescription
+                              element:(id) oneElement
+{
+    return [[RewriteRuleTokenStream alloc] initWithTreeAdaptor:adaptor
+                                                        description:elementDescription
+                                                            element:oneElement];
+}
+
+/** Create a stream, but feed off an existing list */
++ (id) newRewriteRuleTokenStream:(id<TreeAdaptor>)adaptor
+                          description:(NSString *)elementDescription
+                             elements:(AMutableArray *)elements
+{
+    return [[RewriteRuleTokenStream alloc] initWithTreeAdaptor:adaptor
+                                                        description:elementDescription
+                                                           elements:elements];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil ) {
+    }
+    return self;
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)anAdaptor
+               description:(NSString *)aDescription
+{
+    if ((self = [super initWithTreeAdaptor:anAdaptor
+                               description:aDescription]) != nil ) {
+    }
+    return self;
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)anAdaptor
+               description:(NSString *)aDescription
+                   element:(id)anElement
+{
+    if ((self = [super initWithTreeAdaptor:anAdaptor
+                               description:aDescription
+                                   element:anElement]) != nil ) {
+    }
+    return self;
+}
+
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>)anAdaptor
+               description:(NSString *)aDescription
+                  elements:(AMutableArray *)elementList
+{
+    if ((self = [super initWithTreeAdaptor:anAdaptor
+                               description:aDescription
+                                  elements:elementList]) != nil ) {
+    }
+    return self;
+}
+
+- (id<BaseTree>) nextNode
+{
+    id<Token> t = (id<Token>)[self _next];
+    return [treeAdaptor create:t];
+}
+
+- (id) nextToken
+{
+    return [self _next];
+}
+
+/** Don't convert to a tree unless they explicitly call nextTree.
+ *  This way we can do hetero tree nodes in rewrite.
+ */
+- (id<BaseTree>) toTree:(id<Token>)element
+{
+    return (id<BaseTree>)element;
+}
+
+- (id) copyElement:(id)element
+{
+    @throw [RuntimeException newException:@"copy can't be called for a token stream."];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/RuleMapElement.h b/runtime/ObjC/Framework/RuleMapElement.h
new file mode 100644
index 0000000..415d711
--- /dev/null
+++ b/runtime/ObjC/Framework/RuleMapElement.h
@@ -0,0 +1,55 @@
+//
+//  RuleMapElement.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/16/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "BaseMapElement.h"
+
+@interface RuleMapElement : BaseMapElement {
+    ACNumber *ruleNum;
+}
+
+@property (retain, getter=getRuleNum, setter=setRuleNum:) ACNumber *ruleNum;
+
++ (RuleMapElement *) newRuleMapElement;
++ (RuleMapElement *) newRuleMapElementWithIndex:(ACNumber *)anIdx;
++ (RuleMapElement *) newRuleMapElementWithIndex:(ACNumber *)anIdx RuleNum:(ACNumber *)aRuleNum;
+- (id) init;
+- (id) initWithAnIndex:(ACNumber *)anIdx;
+- (id) initWithAnIndex:(ACNumber *)anIdx RuleNum:(ACNumber *)aRuleNum;
+
+- (id) copyWithZone:(NSZone *)aZone;
+
+- (ACNumber *)getRuleNum;
+- (void)setRuleNum:(ACNumber *)aRuleNum;
+
+- (NSInteger)size;
+
+@end
diff --git a/runtime/ObjC/Framework/RuleMapElement.m b/runtime/ObjC/Framework/RuleMapElement.m
new file mode 100644
index 0000000..aedb996
--- /dev/null
+++ b/runtime/ObjC/Framework/RuleMapElement.m
@@ -0,0 +1,112 @@
+//
+//  RuleMapElement.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/16/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "ACNumber.h"
+#import "RuleMapElement.h"
+
+
+@implementation RuleMapElement
+
+@synthesize ruleNum;
+
++ (RuleMapElement *)newRuleMapElement
+{
+    return [[RuleMapElement alloc] init];
+}
+
++ (RuleMapElement *)newRuleMapElementWithIndex:(ACNumber *)aNumber
+{
+    return [[RuleMapElement alloc] initWithAnIndex:(ACNumber *)aNumber];
+}
+
++ (RuleMapElement *)newRuleMapElementWithIndex:(ACNumber *)aNumber RuleNum:(ACNumber *)aRuleNum
+{
+    return [[RuleMapElement alloc] initWithAnIndex:aNumber RuleNum:aRuleNum];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil ) {
+        index = nil;
+        ruleNum = nil;
+    }
+    return (self);
+}
+
+- (id) initWithAnIndex:(ACNumber *)aNumber
+{
+    if ((self = [super initWithAnIndex:aNumber]) != nil ) {
+        ruleNum = nil;
+    }
+    return (self);
+}
+
+- (id) initWithAnIndex:(ACNumber *)aNumber RuleNum:(ACNumber *)aRuleNum
+{
+    if ((self = [super initWithAnIndex:aNumber]) != nil ) {
+        [aRuleNum retain];
+        ruleNum = aRuleNum;
+    }
+    return (self);
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    RuleMapElement *copy;
+    
+    copy = [super copyWithZone:aZone];
+    copy.ruleNum = ruleNum;
+    return( copy );
+}
+
+- (id)getRuleNum
+{
+    return ruleNum;
+}
+
+- (void)setRuleNum:(id)aRuleNum
+{
+    if ( aRuleNum != ruleNum ) {
+        if ( ruleNum ) [ruleNum release];
+        [aRuleNum retain];
+    }
+    ruleNum = aRuleNum;
+}
+
+- (NSInteger)size
+{
+    NSInteger aSize = 0;
+    if (ruleNum != nil) aSize++;
+    if (index != nil) aSize++;
+    return( aSize );
+}
+
+@end
diff --git a/runtime/ObjC/Framework/RuleMemo.h b/runtime/ObjC/Framework/RuleMemo.h
new file mode 100644
index 0000000..40e6efc
--- /dev/null
+++ b/runtime/ObjC/Framework/RuleMemo.h
@@ -0,0 +1,61 @@
+//
+//  RuleMemo.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/16/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "ACNumber.h"
+#import "LinkBase.h"
+
+@interface RuleMemo : LinkBase {
+    ACNumber *startIndex;
+    ACNumber *stopIndex;
+}
+
+@property (retain, getter=getStartIndex, setter=setStartIndex:) ACNumber *startIndex;
+@property (retain, getter=getStopIndex, setter=setStopIndex:) ACNumber *stopIndex;
+
++ (RuleMemo *)newRuleMemo;
++ (RuleMemo *)newRuleMemoWithStartIndex:(ACNumber *)aStartIndex StopIndex:(ACNumber *)aStopIndex;
+
+- (id) init;
+- (id) initWithStartIndex:(ACNumber *)aStartIndex StopIndex:(ACNumber *)aStopIndex;
+
+- (NSInteger)count;
+- (NSInteger)size;
+
+- (RuleMemo *)getRuleWithStartIndex:(NSInteger)aStartIndex;
+- (ACNumber *)getStartIndex:(NSInteger)aStartIndex;
+- (ACNumber *)getStopIndex:(NSInteger)aStartIndex;
+- (ACNumber *)getStartIndex;
+- (void)setStartIndex:(ACNumber *)aStartIndex;
+- (ACNumber *)getStopIndex;
+- (void)setStopIndex:(ACNumber *)aStopIndex;
+
+@end
diff --git a/runtime/ObjC/Framework/RuleMemo.m b/runtime/ObjC/Framework/RuleMemo.m
new file mode 100644
index 0000000..7665158
--- /dev/null
+++ b/runtime/ObjC/Framework/RuleMemo.m
@@ -0,0 +1,158 @@
+//
+//  RuleMemo.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/16/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "RuleMemo.h"
+
+
+@implementation RuleMemo
+
+@synthesize startIndex;
+@synthesize stopIndex;
+
++ (RuleMemo *)newRuleMemo
+{
+    return [[RuleMemo alloc] init];
+}
+
++ (RuleMemo *)newRuleMemoWithStartIndex:(ACNumber *)anIndex StopIndex:(ACNumber *)aStopIndex
+{
+    return [[RuleMemo alloc] initWithStartIndex:anIndex StopIndex:aStopIndex];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil ) {
+        startIndex = nil;
+        stopIndex = nil;
+    }
+    return (self);
+}
+
+- (id) initWithStartIndex:(ACNumber *)aStartIndex StopIndex:(ACNumber *)aStopIndex
+{
+    if ((self = [super init]) != nil ) {
+        [aStartIndex retain];
+        startIndex = aStartIndex;
+        [aStopIndex retain];
+        stopIndex = aStopIndex;
+    }
+    return (self);
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    RuleMemo *copy;
+    
+    copy = [super copyWithZone:aZone];
+    copy.startIndex = startIndex;
+    copy.stopIndex = stopIndex;
+    return( copy );
+}
+
+- (NSInteger)count
+{
+    NSInteger aCnt = 0;
+    
+    if (startIndex != nil) aCnt++;
+    if (stopIndex != nil) aCnt++;
+    return aCnt;
+}
+
+- (NSInteger) size
+{
+    return (2 * sizeof(id));
+}
+
+- (RuleMemo *)getRuleWithStartIndex:(NSInteger)aStartIndex
+{
+    RuleMemo *aMatchMemo = self;
+    do {
+        if (aStartIndex == [aMatchMemo.startIndex integerValue] ) {
+            return aMatchMemo;
+        }
+        aMatchMemo = aMatchMemo.fNext;
+    } while ( aMatchMemo != nil );
+    return nil;
+}
+
+- (ACNumber *)getStartIndex:(NSInteger)aStartIndex
+{
+    RuleMemo *aMatchMemo = self;
+    do {
+        if (aStartIndex == [aMatchMemo.startIndex integerValue] ) {
+            return aMatchMemo.stopIndex;
+        }
+        aMatchMemo = aMatchMemo.fNext;
+    } while ( aMatchMemo != nil );
+    return nil;
+}
+
+- (ACNumber *)getStopIndex:(NSInteger)aStartIndex
+{
+    RuleMemo *aMatchMemo = self;
+    do {
+        if (aStartIndex == [aMatchMemo.startIndex integerValue] ) {
+            return aMatchMemo.stopIndex;
+        }
+        aMatchMemo = aMatchMemo.fNext;
+    } while ( aMatchMemo != nil );
+    return nil;
+}
+
+- (ACNumber *)getStartIndex;
+{
+    return startIndex;
+}
+
+- (void)setStartIndex:(ACNumber *)aStartIndex
+{
+    if ( aStartIndex != startIndex ) {
+        if ( startIndex ) [startIndex release];
+        [aStartIndex retain];
+    }
+    startIndex = aStartIndex;
+}
+
+- (ACNumber *)getStopIndex;
+{
+    return stopIndex;
+}
+
+- (void)setStopIndex:(ACNumber *)aStopIndex
+{
+    if ( aStopIndex != stopIndex ) {
+        if ( stopIndex ) [stopIndex release];
+        [aStopIndex retain];
+    }
+    stopIndex = aStopIndex;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/RuleReturnScope.h b/runtime/ObjC/Framework/RuleReturnScope.h
new file mode 100644
index 0000000..163107d
--- /dev/null
+++ b/runtime/ObjC/Framework/RuleReturnScope.h
@@ -0,0 +1,55 @@
+//
+//  RuleReturnScope.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/17/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "Token.h"
+
+@interface RuleReturnScope : NSObject <NSCopying> {
+
+}
+
+/** Return the start token or tree */
+- (id<Token>) getStart;
+
+/** Return the stop token or tree */
+- (id<Token>) getStop;
+
+/** Has a value potentially if output=AST; */
+- (id) getTree;
+
+/** Has a value potentially if output=template; Don't use StringTemplate
+ *  type as it then causes a dependency with ST lib.
+ */
+- (id) getTemplate;
+
+- (id) copyWithZone:(NSZone *)theZone;
+
+@end
diff --git a/runtime/ObjC/Framework/RuleReturnScope.m b/runtime/ObjC/Framework/RuleReturnScope.m
new file mode 100644
index 0000000..1271bc1
--- /dev/null
+++ b/runtime/ObjC/Framework/RuleReturnScope.m
@@ -0,0 +1,71 @@
+//
+//  RuleReturnScope.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/17/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "RuleReturnScope.h"
+
+
+@implementation RuleReturnScope
+
+/** Return the start token or tree */
+- (id) getStart
+{
+    return nil;
+}
+
+/** Return the stop token or tree */
+- (id) getStop
+{
+    return nil;
+}
+
+/** Has a value potentially if output=AST; */
+- (id) getTree
+{
+    return nil;
+}
+
+/** Has a value potentially if output=template; Don't use StringTemplate
+ *  type as it then causes a dependency with ST lib.
+ */
+- (id) getTemplate
+{
+    return nil;
+}
+
+// create a copy, including the text if available
+// the input stream is *not* copied!
+- (id) copyWithZone:(NSZone *)theZone
+{
+    RuleReturnScope *copy = [[[self class] allocWithZone:theZone] init];
+    return copy;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/RuleStack.h b/runtime/ObjC/Framework/RuleStack.h
new file mode 100644
index 0000000..a0f6235
--- /dev/null
+++ b/runtime/ObjC/Framework/RuleStack.h
@@ -0,0 +1,63 @@
+//
+//  RuleStack.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/9/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "BaseStack.h"
+#import "HashRule.h"
+
+//#define GLOBAL_SCOPE       0
+//#define LOCAL_SCOPE        1
+#define HASHSIZE         101
+#define HBUFSIZE      0x2000
+
+@interface RuleStack : BaseStack {
+}
+
+// Contruction/Destruction
++(RuleStack *)newRuleStack;
++(RuleStack *)newRuleStack:(NSInteger)cnt;
+-(id)init;
+-(id)initWithLen:(NSInteger)cnt;
+-(void)dealloc;
+
+// Instance Methods
+- (id) copyWithZone:(NSZone *)aZone;
+/* clear -- reinitialize the maplist array */
+
+- (NSInteger)count;
+- (NSInteger)size;
+
+- (HashRule *) pop;
+
+- (void) insertObject:(HashRule *)aHashRule atIndex:(NSInteger)idx;
+- (HashRule *)objectAtIndex:(NSInteger)idx;
+- (void)putHashRuleAtRuleIndex:(NSInteger)aRuleIndex StartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex;
+@end
diff --git a/runtime/ObjC/Framework/RuleStack.m b/runtime/ObjC/Framework/RuleStack.m
new file mode 100644
index 0000000..52c3889
--- /dev/null
+++ b/runtime/ObjC/Framework/RuleStack.m
@@ -0,0 +1,152 @@
+//
+//  RuleStack.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/9/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#define SUCCESS (0)
+#define FAILURE (-1)
+
+extern NSInteger debug;
+
+#import "RuleStack.h"
+#import "Tree.h"
+
+/*
+ * Start of RuleStack
+ */
+@implementation RuleStack
+
++ (RuleStack *)newRuleStack
+{
+    return [[RuleStack alloc] init];
+}
+
++ (RuleStack *)newRuleStack:(NSInteger)cnt
+{
+    return [[RuleStack alloc] initWithLen:cnt];
+}
+
+- (id)init
+{
+	if ((self = [super init]) != nil) {
+	}
+    return( self );
+}
+
+- (id)initWithLen:(NSInteger)cnt
+{
+	if ((self = [super initWithLen:cnt]) != nil) {
+	}
+    return( self );
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in RuleStack" );
+#endif
+	[super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    return [super copyWithZone:aZone];
+}
+
+- (NSInteger)count
+{
+    RuleMemo *anElement;
+    NSInteger aCnt = 0;
+    for( int i = 0; i < BuffSize; i++ ) {
+        if ((anElement = ptrBuffer[i]) != nil)
+            aCnt++;
+    }
+    return aCnt;
+}
+
+- (NSInteger)size
+{
+    RuleMemo *anElement;
+    NSInteger aSize = 0;
+    for( int i = 0; i < BuffSize; i++ ) {
+        if ((anElement = ptrBuffer[i]) != nil) {
+            aSize++;
+        }
+    }
+    return aSize;
+}
+
+- (HashRule *)pop
+{
+    return (HashRule *)[super pop];
+}
+
+- (void) insertObject:(HashRule *)aRule atIndex:(NSInteger)idx
+{
+    if ( idx >= BuffSize ) {
+        if ( debug > 2 ) NSLog( @"In RuleStack attempting to insert aRule at Index %d, but Buffer is only %d long\n", idx, BuffSize );
+        [self ensureCapacity:idx];
+    }
+    if ( aRule != ptrBuffer[idx] ) {
+        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
+        [aRule retain];
+    }
+    ptrBuffer[idx] = aRule;
+}
+
+- (HashRule *)objectAtIndex:(NSInteger)idx
+{
+    if (idx < BuffSize) {
+        return ptrBuffer[idx];
+    }
+    return nil;
+}
+
+- (void)putHashRuleAtRuleIndex:(NSInteger)aRuleIndex StartIndex:(NSInteger)aStartIndex StopIndex:(NSInteger)aStopIndex
+{
+    HashRule *aHashRule;
+    RuleMemo *aRuleMemo;
+
+    if (aRuleIndex >= BuffSize) {
+        if ( debug) NSLog( @"putHashRuleAtRuleIndex attempting to insert aRule at Index %d, but Buffer is only %d long\n", aRuleIndex, BuffSize );
+        [self ensureCapacity:aRuleIndex];
+    }
+    if ((aHashRule = ptrBuffer[aRuleIndex]) == nil) {
+        aHashRule = [[HashRule newHashRuleWithLen:17] retain];
+        ptrBuffer[aRuleIndex] = aHashRule;
+    }
+    if (( aRuleMemo = [aHashRule objectAtIndex:aStartIndex] ) == nil ) {
+        aRuleMemo = [[RuleMemo newRuleMemo] retain];
+        [aHashRule insertObject:aRuleMemo atIndex:aStartIndex];
+    }
+    [aRuleMemo setStartIndex:[ACNumber numberWithInteger:aStartIndex]];
+    [aRuleMemo setStopIndex:[ACNumber numberWithInteger:aStopIndex]];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/RuntimeException.h b/runtime/ObjC/Framework/RuntimeException.h
new file mode 100644
index 0000000..1a26e0a
--- /dev/null
+++ b/runtime/ObjC/Framework/RuntimeException.h
@@ -0,0 +1,160 @@
+//
+//  RuntimeException.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/5/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+
+@interface RuntimeException : NSException {
+}
+
++ (RuntimeException *) newException;
++ (RuntimeException *) newException:(NSString *)aReason;
++ (RuntimeException *) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
++ (RuntimeException *) newException:(NSString *)aName reason:(NSString *)aReason;
++ (RuntimeException *) newException:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+- (id) init;
+- (id) init:(NSString *)aReason;
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason;
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+- (NSString *) Description;
+- (id) stackTrace:(NSException *)e;
+
+@end
+
+@interface CloneNotSupportedException : RuntimeException {
+}
+
++ (id) newException;
++ (id) newException:(NSString *)aReason;
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+- (id) init;
+- (id) init:(NSString *)aReason;
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+@end
+
+@interface ConcurrentModificationException : RuntimeException {
+}
+
++ (id) newException:(NSString *)aReason;
+
+- (id) init;
+- (id) init:(NSString *)aReason;
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+@end
+
+@interface IllegalArgumentException : RuntimeException {
+}
+
++ (id) newException;
++ (id) newException:(NSString *)aReason;
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+- (id) init;
+- (id) init:(NSString *)aReason;
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+@end
+
+@interface IllegalStateException : RuntimeException {
+}
+
++ (id) newException;
++ (id) newException:(NSString *)aReason;
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+- (id) init;
+- (id)init:(NSString *)aReason;
+- (id)init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+@end
+
+@interface IndexOutOfBoundsException : RuntimeException {
+}
+
++ (id) newException;
++ (id) newException:(NSString *)aReason;
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+- (id) init;
+- (id)init:(NSString *)aReason;
+- (id)init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+@end
+
+@interface NoSuchElementException : RuntimeException {
+}
+
++ (id) newException;
++ (id) newException:(NSString *)aReason;
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+- (id) init;
+- (id) init:(NSString *)aReason;
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+@end
+
+@interface NullPointerException : RuntimeException {
+}
+
++ (id) newException;
++ (id) newException:(NSString *)aReason;
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+- (id) init;
+- (id) init:(NSString *)aReason;
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+@end
+
+@interface RewriteEarlyExitException : RuntimeException {
+}
+
++ (id) newException;
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+
+@end
+
+@interface UnsupportedOperationException : RuntimeException {
+}
+
++ (id) newException:(NSString *)aReason;
+
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason;
+- (id) initWithName:(NSString *)aMsg reason:(NSString *)aCause userInfo:(NSDictionary *)userInfo;
+
+@end
+
diff --git a/runtime/ObjC/Framework/RuntimeException.m b/runtime/ObjC/Framework/RuntimeException.m
new file mode 100644
index 0000000..25e17d6
--- /dev/null
+++ b/runtime/ObjC/Framework/RuntimeException.m
@@ -0,0 +1,427 @@
+//
+//  RuntimeException.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/5/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "RuntimeException.h"
+
+
+@implementation RuntimeException
+
++ (id) newException
+{
+    return [[RuntimeException alloc] init];
+}
+
++ (id) newException:(NSString *)aReason
+{
+    return [[RuntimeException alloc] init:aReason];
+}
+
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    return [[RuntimeException alloc] init:aReason userInfo:aUserInfo];
+}
+
++ (id) newException:(NSString *)aName reason:(NSString *)aReason;
+{
+    return [[RuntimeException alloc] initWithName:aName reason:aReason];
+}
+
++ (id) newException:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo;
+{
+    return [[RuntimeException alloc] initWithName:aName reason:aReason userInfo:aUserInfo];
+}
+
+
+- (id) init
+{
+    self = [super initWithName:@"RuntimeException" reason:@"UnknownException" userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason
+{
+    self = [super initWithName:(NSString *)@"RuntimeException" reason:(NSString *)aReason userInfo:(NSDictionary *)nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    self = [super initWithName:@"RuntimeException" reason:aReason userInfo:aUserInfo];
+    return(self);
+}
+
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason
+{
+    self = [super initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)nil];
+    return(self);
+}
+
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    self = [super initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo];
+    return(self);
+}
+
+- (NSString *) Description
+{
+    return [super reason];
+}
+
+- (id) stackTrace:(NSException *)e
+{
+    NSArray *addrs = [e callStackReturnAddresses];
+    NSArray *trace = [e callStackSymbols];
+    
+    for (NSString *traceStr in trace) {
+        NSLog( @"%@", traceStr);
+        // TODO: remove special after testing
+        if ([traceStr hasPrefix:@"main("] > 0)
+            return traceStr;
+        if (![traceStr hasPrefix:@"org.stringtemplate"])
+            return traceStr;
+    }
+    return trace;    
+}
+
+@end
+
+@implementation CloneNotSupportedException
+
++ (id) newException
+{
+    return [[CloneNotSupportedException alloc] init];
+}
+
++ (id) newException:(NSString *)aReason
+{
+    return [[CloneNotSupportedException alloc] init:aReason];
+}
+
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    return [[CloneNotSupportedException alloc] init:aReason userInfo:aUserInfo];
+}
+
+- (id) init
+{
+    self = [super initWithName:@"CloneNotSupportedException" reason:@"Attempted to clone non-cloneable object" userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason
+{
+    self = [super initWithName:@"CloneNotSupportedException" reason:(NSString *)aReason userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    self = [super initWithName:@"CloneNotSupportedException" reason:aReason userInfo:aUserInfo];
+    return(self);
+}
+
+@end
+
+@implementation ConcurrentModificationException
+
++ (id) newException
+{
+    return [[ConcurrentModificationException alloc] init];
+}
+
++ (id) newException:(NSString *)aReason
+{
+    return [[ConcurrentModificationException alloc] init:aReason];
+}
+
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    return [[ConcurrentModificationException alloc] init:aReason userInfo:aUserInfo];
+}
+
+- (id) init
+{
+    self = [super initWithName:@"ConcurrentModificationException" reason:@"UnknownException" userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason
+{
+    self = [super initWithName:@"ConcurrentModificationException" reason:(NSString *)aReason userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    self = [super initWithName:@"ConcurrentModificationException" reason:aReason userInfo:aUserInfo];
+    return(self);
+}
+
+@end
+
+@implementation IllegalArgumentException
+
++ (id) newException
+{
+    return [[IllegalArgumentException alloc] init];
+}
+
++ (id) newException:(NSString *)aReason
+{
+    return [[IllegalArgumentException alloc] init:aReason];
+}
+
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    return [[IllegalArgumentException alloc] init:aReason userInfo:aUserInfo];
+}
+
+- (id) init
+{
+    self = [super initWithName:@"IllegalArgumentException" reason:@"IllegalStateException" userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason
+{
+    self = [super initWithName:@"IllegalArgumentException" reason:(NSString *)aReason userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    self = [super initWithName:@"IllegalArgumentException" reason:aReason userInfo:aUserInfo];
+    return(self);
+}
+
+@end
+
+@implementation IllegalStateException
+
++ (id) newException
+{
+    return [[IllegalStateException alloc] init];
+}
+
++ (id) newException:(NSString *)aReason
+{
+    return [[IllegalStateException alloc] init:aReason];
+}
+
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    return [[IllegalStateException alloc] init:aReason userInfo:aUserInfo];
+}
+
+- (id) init
+{
+    self = [super initWithName:@"IllegalStateException" reason:@"IllegalStateException" userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason
+{
+    self = [super initWithName:@"IllegalStateException" reason:(NSString *)aReason userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    self = [super initWithName:@"IllegalStateException" reason:aReason userInfo:aUserInfo];
+    return(self);
+}
+
+@end
+
+@implementation IndexOutOfBoundsException
+
++ (id) newException
+{
+    return [[IndexOutOfBoundsException alloc] init];
+}
+
++ (id) newException:(NSString *)aReason
+{
+    return [[IndexOutOfBoundsException alloc] init:aReason];
+}
+
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    return [[IndexOutOfBoundsException alloc] init:aReason userInfo:aUserInfo];
+}
+
+- (id) init
+{
+    self = [super initWithName:@"IndexOutOfBoundsException" reason:@"IndexOutOfBoundsException" userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason
+{
+    self = [super initWithName:@"IndexOutOfBoundsException" reason:(NSString *)aReason userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    self = [super initWithName:@"IndexOutOfBoundsException" reason:aReason userInfo:aUserInfo];
+    return(self);
+}
+
+@end
+
+@implementation NoSuchElementException
+
++ (id) newException
+{
+    return [[NoSuchElementException alloc] init];
+}
+
++ (id) newException:(NSString *)aReason
+{
+    return [[NoSuchElementException alloc] init:aReason];
+}
+
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    return [[NoSuchElementException alloc] init:aReason userInfo:(NSDictionary *)aUserInfo];
+}
+
+- (id) init
+{
+    self = [super initWithName:@"NoSuchElementException" reason:@"UnknownException" userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason
+{
+    self = [super initWithName:@"NoSuchElementException" reason:(NSString *)aReason userInfo:(NSDictionary *)nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    self = [super initWithName:@"NoSuchElementException" reason:aReason userInfo:aUserInfo];
+    return(self);
+}
+
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    self = [super initWithName:aName reason:aReason userInfo:aUserInfo];
+    return(self);
+}
+
+@end
+
+@implementation NullPointerException
+
++ (id) newException
+{
+    return [[NullPointerException alloc] init];
+}
+
++ (id) newException:(NSString *)aReason
+{
+    return [[NullPointerException alloc] init:aReason];
+}
+
++ (id) newException:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    return [[NullPointerException alloc] init:aReason userInfo:(NSDictionary *)aUserInfo];
+}
+
+- (id) init
+{
+    self = [super initWithName:@"NullPointerException" reason:@"UnknownException" userInfo:nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason
+{
+    self = [super initWithName:@"NullPointerException" reason:(NSString *)aReason userInfo:(NSDictionary *)nil];
+    return(self);
+}
+
+- (id) init:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    self = [super initWithName:@"NullPointerException" reason:aReason userInfo:aUserInfo];
+    return(self);
+}
+
+@end
+
+@implementation RewriteEarlyExitException
+
++ (id) newException
+{
+	return [[self alloc] init];
+}
+
+- (id) init
+{
+	self = [super initWithName:@"RewriteEarlyExitException" reason:nil userInfo:nil];
+	return self;
+}
+
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)aUserInfo
+{
+    self = [super initWithName:aName reason:aReason userInfo:aUserInfo];
+    return(self);
+}
+
+- (NSString *) description
+{
+	return [self name];
+}
+
+@end
+
+@implementation UnsupportedOperationException
+
++ (id) newException:(NSString *)aReason
+{
+    return [[RuntimeException alloc] initWithName:@"Unsupported Operation Exception" reason:aReason userInfo:nil];
+}
+
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason
+{
+    self=[super initWithName:aName reason:aReason userInfo:nil];
+    return self;
+}
+
+- (id) initWithName:(NSString *)aName reason:(NSString *)aReason userInfo:(NSDictionary *)userInfo
+{
+    self=[super initWithName:aName reason:aReason userInfo:userInfo];
+    return self;
+}
+
+@end
+
diff --git a/runtime/ObjC/Framework/StreamEnumerator.h b/runtime/ObjC/Framework/StreamEnumerator.h
new file mode 100644
index 0000000..9461cb3
--- /dev/null
+++ b/runtime/ObjC/Framework/StreamEnumerator.h
@@ -0,0 +1,48 @@
+//
+//  StreamEnumertor.h
+//  ANTLR
+//
+//  Created by Ian Michell on 29/04/2010.
+// [The "BSD licence"]
+// Copyright (c) 2010 Ian Michell 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "AMutableArray.h"
+
+@interface StreamEnumerator : NSEnumerator 
+{
+	NSInteger i;
+	id eof;
+	AMutableArray *nodes;
+}
+
+-(id) initWithNodes:(AMutableArray *) n andEOF:(id) obj;
+-(BOOL) hasNext;
+
+@property NSInteger i;
+@property (retain) id eof;
+@property (retain) AMutableArray *nodes;
+@end
diff --git a/runtime/ObjC/Framework/StreamEnumerator.m b/runtime/ObjC/Framework/StreamEnumerator.m
new file mode 100644
index 0000000..eac54b5
--- /dev/null
+++ b/runtime/ObjC/Framework/StreamEnumerator.m
@@ -0,0 +1,77 @@
+//
+//  StreamEnumertor.m
+//  ANTLR
+//
+//  Created by Ian Michell on 29/04/2010.
+// [The "BSD licence"]
+// Copyright (c) 2010 Ian Michell 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "StreamEnumerator.h"
+
+
+@implementation StreamEnumerator
+
+-(id) init
+{
+	self = [super init];
+	if (self)
+	{
+		i = 0;
+	}
+	return self;
+}
+
+-(id) initWithNodes:(AMutableArray *) n andEOF:(id) obj
+{
+	self = [self init];
+	if (self)
+	{
+		nodes = n;
+		eof = obj;
+	}
+	return self;
+}
+
+-(BOOL) hasNext
+{
+	return i < [nodes count];
+}
+
+-(id) nextObject
+{
+	NSUInteger current = i;
+	i++;
+	if (current < [nodes count])
+	{
+		return [nodes objectAtIndex:current];
+	}
+	return eof;
+}
+
+@synthesize i;
+@synthesize eof;
+@synthesize nodes;
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/ANTLRStringStreamState.h b/runtime/ObjC/Framework/StringStreamState.h
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/ANTLRStringStreamState.h
rename to runtime/ObjC/Framework/StringStreamState.h
diff --git a/runtime/ObjC/Framework/SymbolStack.h b/runtime/ObjC/Framework/SymbolStack.h
new file mode 100644
index 0000000..77a6959
--- /dev/null
+++ b/runtime/ObjC/Framework/SymbolStack.h
@@ -0,0 +1,75 @@
+//
+//  SymbolStack.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/9/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "BaseStack.h"
+// #import "SymbolScope.h"
+
+//#define GLOBAL_SCOPE       0
+//#define LOCAL_SCOPE        1
+#define HASHSIZE         101
+#define HBUFSIZE      0x2000
+
+@interface SymbolsScope : NSObject
+{
+    
+}
+
++ (SymbolsScope *)newSymbolsScope;
+
+- (id)init;
+@end
+
+
+@interface SymbolStack : BaseStack {
+}
+
+// Contruction/Destruction
++(SymbolStack *)newSymbolStack;
++(SymbolStack *)newSymbolStackWithLen:(NSInteger)cnt;
+-(id)init;
+-(id)initWithLen:(NSInteger)cnt;
+-(void)dealloc;
+
+// Instance Methods
+- (id) copyWithZone:(NSZone *)aZone;
+/* clear -- reinitialize the maplist array */
+
+-(SymbolsScope *)getHashMapEntry:(NSInteger)idx;
+
+-(SymbolsScope **)getHashMap;
+
+-(SymbolsScope *) pop;
+
+- (void) insertObject:(SymbolsScope *)aScope atIndex:(NSInteger)idx;
+- (SymbolsScope *)objectAtIndex:(NSInteger)idx;
+
+@end
diff --git a/runtime/ObjC/Framework/SymbolStack.m b/runtime/ObjC/Framework/SymbolStack.m
new file mode 100644
index 0000000..f1a7f6e
--- /dev/null
+++ b/runtime/ObjC/Framework/SymbolStack.m
@@ -0,0 +1,126 @@
+//
+//  SymbolStack.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/9/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#define SUCCESS (0)
+#define FAILURE (-1)
+
+#import "SymbolStack.h"
+#import "Tree.h"
+
+
+@implementation SymbolsScope
+
++ (SymbolsScope *)newSymbolsScope
+{
+    return( [[SymbolsScope alloc] init] );
+}
+
+- (id)init
+{
+    if ((self = [super init]) != nil) {
+    }
+    return (self);
+}
+
+@end
+
+/*
+ * Start of SymbolStack
+ */
+@implementation SymbolStack
+
++(SymbolStack *)newSymbolStack
+{
+    return [[SymbolStack alloc] initWithLen:30];
+}
+
++(SymbolStack *)newSymbolStackWithLen:(NSInteger)cnt
+{
+    return [[SymbolStack alloc] initWithLen:cnt];
+}
+
+-(id)init
+{
+	if ((self = [super init]) != nil) {
+	}
+    return( self );
+}
+
+-(id)initWithLen:(NSInteger)cnt
+{
+	if ((self = [super initWithLen:cnt]) != nil) {
+	}
+    return( self );
+}
+
+-(void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in SymbolStack" );
+#endif
+	[super dealloc];
+}
+
+- (id) copyWithZone:(NSZone *)aZone
+{
+    return [super copyWithZone:aZone];
+}
+
+-(SymbolsScope *)getHashMapEntry:(NSInteger)idx
+{
+	return( (SymbolsScope *)[super objectAtIndex:idx] );
+}
+
+-(SymbolsScope **)getHashMap
+{
+	return( (SymbolsScope **)ptrBuffer );
+}
+
+-(SymbolsScope *) pop
+{
+    return (SymbolsScope *)[super pop];
+}
+
+- (void) insertObject:(SymbolsScope *)aRule atIndex:(NSInteger)idx
+{
+    if ( aRule != ptrBuffer[idx] ) {
+        if ( ptrBuffer[idx] ) [ptrBuffer[idx] release];
+        [aRule retain];
+    }
+    ptrBuffer[idx] = aRule;
+}
+
+- (SymbolsScope *)objectAtIndex:(NSInteger)idx
+{
+    return (SymbolsScope *)[super objectAtIndex:idx];
+}
+
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/Test-Info.plist b/runtime/ObjC/Framework/Test-Info.plist
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/Test-Info.plist
rename to runtime/ObjC/Framework/Test-Info.plist
diff --git a/antlr-3.4/runtime/ObjC/Framework/Tests-Info.plist b/runtime/ObjC/Framework/Tests-Info.plist
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/Tests-Info.plist
rename to runtime/ObjC/Framework/Tests-Info.plist
diff --git a/runtime/ObjC/Framework/Token+DebuggerSupport.h b/runtime/ObjC/Framework/Token+DebuggerSupport.h
new file mode 100644
index 0000000..c8aa28a
--- /dev/null
+++ b/runtime/ObjC/Framework/Token+DebuggerSupport.h
@@ -0,0 +1,41 @@
+//
+//  Token+DebuggerSupport.h
+//  ANTLR
+//
+//  Created by Kay Röpke on 03.12.2006.
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#import <Foundation/Foundation.h>
+#import "Token.h"
+#import "CommonToken.h"
+
+@interface CommonToken(DebuggerSupport)
+
+- (NSString *)debuggerDescription;
+
+@end
diff --git a/runtime/ObjC/Framework/Token+DebuggerSupport.m b/runtime/ObjC/Framework/Token+DebuggerSupport.m
new file mode 100644
index 0000000..8178fb8
--- /dev/null
+++ b/runtime/ObjC/Framework/Token+DebuggerSupport.m
@@ -0,0 +1,61 @@
+//
+//  Token+DebuggerSupport.m
+//  ANTLR
+//
+//  Created by Kay Röpke on 03.12.2006.
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "Token+DebuggerSupport.h"
+
+
+@implementation CommonToken(DebuggerSupport)
+
+- (NSString *)debuggerDescription
+{
+	NSString *_text = self.text;
+	NSMutableString *escapedText;
+	if (_text) {
+		escapedText = [_text copyWithZone:nil];
+		NSRange wholeString = NSMakeRange(0,[escapedText length]);
+		[escapedText replaceOccurrencesOfString:@"%" withString:@"%25" options:0 range:wholeString];
+		[escapedText replaceOccurrencesOfString:@"\n" withString:@"%0A" options:0 range:wholeString];
+		[escapedText replaceOccurrencesOfString:@"\r" withString:@"%0D" options:0 range:wholeString];
+	} else {
+		escapedText = [NSMutableString stringWithString:@""];
+	}
+	// format is tokenIndex, type, channel, line, col, (escaped)text
+	return [NSString stringWithFormat:@"%u %d %u %u %u \"%@", 
+		[self getTokenIndex],
+		self.type,
+		self.channel,
+		self.line,
+		self.charPositionInLine,
+		escapedText
+		];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/Token.h b/runtime/ObjC/Framework/Token.h
new file mode 100644
index 0000000..ea1e523
--- /dev/null
+++ b/runtime/ObjC/Framework/Token.h
@@ -0,0 +1,89 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+
+#ifndef DEBUG_DEALLOC
+#define DEBUG_DEALLOC
+#endif
+
+typedef enum {
+    TokenTypeEOF = -1,
+    TokenTypeInvalid,
+    TokenTypeEOR,
+    TokenTypeDOWN,
+    TokenTypeUP,
+    TokenTypeMIN
+} TokenType;
+
+typedef enum {
+    TokenChannelDefault = 0,
+    TokenChannelHidden = 99
+} TokenChannel;
+
+#define HIDDEN 99
+
+@protocol Token < NSObject, NSCopying >
+
+@property (retain, getter = text, setter = setText:) NSString *text;
+@property (assign) NSInteger type;
+@property (assign) NSUInteger line;
+@property (assign) NSUInteger charPositionInLine;
+
+// The singleton eofToken instance.
++ (id<Token>) eofToken;
+// The default channel for this class of Tokens
++ (TokenChannel) defaultChannel;
+
+// provide hooks to explicitely set the text as opposed to use the indices into the CharStream
+- (NSString *) text;
+- (void) setText:(NSString *)theText;
+
+- (NSInteger)type;
+- (void) setType: (NSInteger) aType;
+
+// ANTLR v3 provides automatic line and position tracking. Subclasses do not need to
+// override these, if they do not want to store line/pos tracking information
+- (NSUInteger)line;
+- (void) setLine: (NSUInteger) aLine;
+
+- (NSUInteger)charPositionInLine;
+- (void) setCharPositionInLine:(NSUInteger)aCharPositionInLine;
+
+// explicitely change the channel this Token is on. The default parser implementation
+// just sees the defaultChannel
+// Common idiom is to put whitespace tokens on channel 99.
+- (NSUInteger)channel;
+- (void) setChannel: (NSUInteger) aChannel;
+
+// the index of this Token into the TokenStream
+- (NSInteger) getTokenIndex;
+- (void) setTokenIndex: (NSInteger) aTokenIndex;
+- (NSString *)toString;
+
+@end
+
diff --git a/runtime/ObjC/Framework/TokenRewriteStream.h b/runtime/ObjC/Framework/TokenRewriteStream.h
new file mode 100644
index 0000000..846e4a2
--- /dev/null
+++ b/runtime/ObjC/Framework/TokenRewriteStream.h
@@ -0,0 +1,170 @@
+//
+//  TokenRewriteStream.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/19/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "CommonTokenStream.h"
+#import "LinkBase.h"
+#import "HashMap.h"
+#import "MapElement.h"
+#import "TokenSource.h"
+
+// Define the rewrite operation hierarchy
+
+@interface RewriteOperation : CommonTokenStream
+{
+/** What rwIndex into rewrites List are we? */
+NSInteger instructionIndex;
+/** Token buffer rwIndex. */
+NSInteger rwIndex;
+NSString *text;
+}
+
+@property (getter=getInstructionIndex, setter=setInstructionIndex:) NSInteger instructionIndex;
+@property (assign) NSInteger rwIndex;
+@property (retain, getter=text, setter=setText:) NSString *text;
+
++ (RewriteOperation *) newRewriteOperation:(NSInteger)anIndex Text:(NSString *)text;
+
+- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText;
+
+/** Execute the rewrite operation by possibly adding to the buffer.
+ *  Return the rwIndex of the next token to operate on.
+ */
+- (NSInteger) execute:(NSString *)buf;
+
+- (NSString *)toString;
+- (NSInteger) indexOf:(char)aChar inString:(NSString *)aString;
+@end
+
+@interface ANTLRInsertBeforeOp : RewriteOperation {
+}
+
++ (ANTLRInsertBeforeOp *) newANTLRInsertBeforeOp:(NSInteger)anIndex Text:(NSString *)theText;
+- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText;
+
+@end
+
+/** I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+ *  instructions.
+ */
+@interface ANTLRReplaceOp : RewriteOperation {
+    NSInteger lastIndex;
+}
+
+@property (assign) NSInteger lastIndex;
+
++ (ANTLRReplaceOp *) newANTLRReplaceOp:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString*)theText;
+- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
+
+- (NSInteger) execute:(NSString *)buf;
+- (NSString *)toString;
+
+@end
+
+@interface ANTLRDeleteOp : ANTLRReplaceOp {
+}
++ (ANTLRDeleteOp *) newANTLRDeleteOp:(NSInteger)from ToIndex:(NSInteger)to;
+
+- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to;
+
+- (NSString *)toString;
+
+@end
+
+
+@interface TokenRewriteStream : CommonTokenStream {
+/** You may have multiple, named streams of rewrite operations.
+ *  I'm calling these things "programs."
+ *  Maps String (name) -> rewrite (List)
+ */
+HashMap *programs;
+
+/** Map String (program name) -> Integer rwIndex */
+HashMap *lastRewriteTokenIndexes;
+}
+
+@property (retain, getter=getPrograms, setter=setPrograms:) HashMap *programs;
+@property (retain, getter=getLastRewriteTokenIndexes, setter=setLastRewriteTokenIndexes:) HashMap *lastRewriteTokenIndexes;
+
++ (TokenRewriteStream *)newTokenRewriteStream;
++ (TokenRewriteStream *)newTokenRewriteStream:(id<TokenSource>) aTokenSource;
++ (TokenRewriteStream *)newTokenRewriteStream:(id<TokenSource>) aTokenSource Channel:(NSInteger)aChannel;
+
+- (id) init;
+- (id)initWithTokenSource:(id<TokenSource>)aTokenSource;
+- (id)initWithTokenSource:(id<TokenSource>)aTokenSource Channel:(NSInteger)aChannel;
+
+- (HashMap *)getPrograms;
+- (void)setPrograms:(HashMap *)aProgList;
+
+- (void) rollback:(NSInteger)instructionIndex;
+- (void) rollback:(NSString *)programName Index:(NSInteger)anInstructionIndex;
+- (void) deleteProgram;
+- (void) deleteProgram:(NSString *)programName;
+- (void) insertAfterToken:(id<Token>)t Text:(NSString *)theText;
+- (void) insertAfterIndex:(NSInteger)anIndex Text:(NSString *)theText;
+- (void) insertAfterProgNam:(NSString *)programName Index:(NSInteger)anIndex Text:(NSString *)theText;
+
+
+- (void) insertBeforeToken:(id<Token>)t Text:(NSString *)theText;
+- (void) insertBeforeIndex:(NSInteger)anIndex Text:(NSString *)theText;
+- (void) insertBeforeProgName:(NSString *)programName Index:(NSInteger)anIndex Text:(NSString *)theText;
+- (void) replaceFromIndex:(NSInteger)anIndex Text:(NSString *)theText;
+- (void) replaceFromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
+- (void) replaceFromToken:(id<Token>)indexT Text:(NSString *)theText;
+- (void) replaceFromToken:(id<Token>)from ToToken:(id<Token>)to Text:(NSString *)theText;
+- (void) replaceProgNam:(NSString *)programName Token:(id<Token>)from Token:(id<Token>)to Text:(NSString *)theText;
+- (void) replaceProgNam:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText;
+- (void) delete:(NSInteger)anIndex;
+- (void) delete:(NSInteger)from ToIndex:(NSInteger)to;
+- (void) deleteToken:(id<Token>)indexT;
+- (void) deleteFromToken:(id<Token>)from ToToken:(id<Token>)to;
+- (void) delete:(NSString *)programName FromToken:(id<Token>)from ToToken:(id<Token>)to;
+- (void) delete:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to;
+- (NSInteger)getLastRewriteTokenIndex;
+- (NSInteger)getLastRewriteTokenIndex:(NSString *)programName;
+- (void)setLastRewriteTokenIndex:(NSString *)programName Index:(NSInteger)anInt;
+- (HashMap *) getProgram:(NSString *)name;
+- (HashMap *) initializeProgram:(NSString *)name;
+- (NSString *)toOriginalString;
+- (NSString *)toOriginalString:(NSInteger)start End:(NSInteger)end;
+- (NSString *)toString;
+- (NSString *)toString:(NSString *)programName;
+- (NSString *)toStringFromStart:(NSInteger)start ToEnd:(NSInteger)end;
+- (NSString *)toString:(NSString *)programName FromStart:(NSInteger)start ToEnd:(NSInteger)end;
+- (HashMap *)reduceToSingleOperationPerIndex:(HashMap *)rewrites;
+- (HashMap *)getKindOfOps:(HashMap *)rewrites KindOfClass:(Class)kind;
+- (HashMap *)getKindOfOps:(HashMap *)rewrites KindOfClass:(Class)kind Index:(NSInteger)before;
+- (NSString *)catOpText:(id)a PrevText:(id)b;
+- (NSMutableString *)toDebugString;
+- (NSMutableString *)toDebugStringFromStart:(NSInteger)start ToEnd:(NSInteger)end;
+                    
+@end
diff --git a/runtime/ObjC/Framework/TokenRewriteStream.m b/runtime/ObjC/Framework/TokenRewriteStream.m
new file mode 100644
index 0000000..7a77bfa
--- /dev/null
+++ b/runtime/ObjC/Framework/TokenRewriteStream.m
@@ -0,0 +1,692 @@
+//
+//  TokenRewriteStream.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/19/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "TokenRewriteStream.h"
+#import "RuntimeException.h"
+
+static NSString *DEFAULT_PROGRAM_NAME = @"default";
+static NSInteger PROGRAM_INIT_SIZE = 100;
+static NSInteger MIN_TOKEN_INDEX = 0;
+
+extern NSInteger debug;
+
+// Define the rewrite operation hierarchy
+
+@implementation RewriteOperation
+
+@synthesize instructionIndex;
+@synthesize rwIndex;
+@synthesize text;
+
++ (RewriteOperation *) newRewriteOperation:(NSInteger)anIndex Text:(NSString *)theText
+{
+    return [[RewriteOperation alloc] initWithIndex:anIndex Text:theText];
+}
+    
+- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText
+{
+    if ((self = [super init]) != nil) {
+        rwIndex = anIndex;
+        text = theText;
+    }
+    return self;
+}
+
+/** Execute the rewrite operation by possibly adding to the buffer.
+ *  Return the rwIndex of the next token to operate on.
+ */
+- (NSInteger) execute:(NSString *)buf
+{
+    return rwIndex;
+}
+    
+- (NSString *)toString
+{
+    NSString *opName = [self className];
+    int $index = [self indexOf:'$' inString:opName];
+    opName = [opName substringWithRange:NSMakeRange($index+1, [opName length])];
+    return [NSString stringWithFormat:@"<%@%d:\"%@\">", opName, rwIndex, opName];			
+}
+
+- (NSInteger) indexOf:(char)aChar inString:(NSString *)aString
+{
+    char indexedChar;
+
+    for( int i = 0; i < [aString length]; i++ ) {
+        indexedChar = [aString characterAtIndex:i];
+        if (indexedChar == aChar) {
+            return i;
+        }
+    }
+    return -1;
+}
+                                                    
+@end
+
+@implementation ANTLRInsertBeforeOp
+
++ (ANTLRInsertBeforeOp *) newANTLRInsertBeforeOp:(NSInteger) anIndex Text:(NSString *)theText
+{
+    return [[ANTLRInsertBeforeOp alloc] initWithIndex:anIndex Text:theText];
+}
+
+- (id) initWithIndex:(NSInteger)anIndex Text:(NSString *)theText
+{
+    if ((self = [super initWithIndex:anIndex Text:theText]) != nil) {
+        rwIndex = anIndex;
+        text = theText;
+    }
+    return self;
+}
+
+
+- (NSInteger) execute:(NSMutableString *)buf
+{
+    [buf appendString:text];
+    if ( ((CommonToken *)[tokens objectAtIndex:rwIndex]).type != TokenTypeEOF ) {
+        [buf appendString:[[tokens objectAtIndex:rwIndex] text]];
+    }
+    return rwIndex+1;
+}
+
+@end
+     
+/** I'm going to try replacing range from x..y with (y-x)+1 ANTLRReplaceOp
+ *  instructions.
+ */
+@implementation ANTLRReplaceOp
+
+@synthesize lastIndex;
+
++ (ANTLRReplaceOp *) newANTLRReplaceOp:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString*)theText
+{
+    return [[ANTLRReplaceOp alloc] initWithIndex:from ToIndex:to Text:theText];
+}
+
+- (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText
+{
+    if ((self = [super initWithIndex:from Text:theText]) != nil) {
+        lastIndex = to;
+    }
+    return self;
+}
+ 
+ 
+- (NSInteger) execute:(NSMutableString *)buf
+{
+    if ( text!=nil ) {
+        [buf appendString:text];
+    }
+        return lastIndex+1;
+}
+
+- (NSString *)toString
+{
+    return [NSString stringWithFormat:@"<ANTLRReplaceOp@ %d..%d :>%@\n", rwIndex, lastIndex, text];
+}
+
+@end
+
+@implementation ANTLRDeleteOp
+
++ (ANTLRDeleteOp *) newANTLRDeleteOp:(NSInteger)from ToIndex:(NSInteger)to
+{
+    // super(from To:to, null);
+    return [[ANTLRDeleteOp alloc] initWithIndex:from ToIndex:to];
+}
+
+ - (id) initWithIndex:(NSInteger)from ToIndex:(NSInteger)to
+{
+    if ((self = [super initWithIndex:from ToIndex:to Text:nil]) != nil) {
+        lastIndex = to;
+    }
+    return self;
+}
+     
+- (NSString *)toString
+{
+    return [NSString stringWithFormat:@"<DeleteOp@ %d..%d\n",  rwIndex, lastIndex];
+}
+
+@end
+
+
+@implementation TokenRewriteStream
+
+@synthesize programs;
+@synthesize lastRewriteTokenIndexes;
+
++ (TokenRewriteStream *)newTokenRewriteStream
+{
+    return [[TokenRewriteStream alloc] init];
+}
+
++ (TokenRewriteStream *)newTokenRewriteStream:(id<TokenSource>) aTokenSource
+{
+    return [[TokenRewriteStream alloc] initWithTokenSource:aTokenSource];
+}
+
++ (TokenRewriteStream *)newTokenRewriteStream:(id<TokenSource>) aTokenSource Channel:(NSInteger)aChannel
+{
+    return [[TokenRewriteStream alloc] initWithTokenSource:aTokenSource Channel:aChannel];
+}
+ 
+- (id) init
+{
+    if ((self = [super init]) != nil) {
+        programs = [HashMap newHashMap];
+        [programs addObject:[MapElement newMapElementWithName:DEFAULT_PROGRAM_NAME Node:[HashMap newHashMapWithLen:PROGRAM_INIT_SIZE]]];
+        lastRewriteTokenIndexes = [HashMap newHashMap];
+    }
+    return self;
+}
+ 
+- (id)initWithTokenSource:(id<TokenSource>)aTokenSource
+{
+    if ((self = [super init]) != nil) {
+        programs = [HashMap newHashMap];
+        [programs addObject:[MapElement newMapElementWithName:DEFAULT_PROGRAM_NAME Node:[HashMap newHashMapWithLen:PROGRAM_INIT_SIZE]]];
+        lastRewriteTokenIndexes = [HashMap newHashMap];
+        tokenSource = aTokenSource;
+    }
+    return self;
+}
+
+- (id)initWithTokenSource:(id<TokenSource>)aTokenSource Channel:(NSInteger)aChannel
+{
+    if ((self = [super init]) != nil) {
+        programs = [HashMap newHashMap];
+        [programs addObject:[MapElement newMapElementWithName:DEFAULT_PROGRAM_NAME Node:[HashMap newHashMapWithLen:PROGRAM_INIT_SIZE]]];
+        lastRewriteTokenIndexes = [HashMap newHashMap];
+        tokenSource = aTokenSource;
+        channel = aChannel;
+    }
+    return self;
+}
+ 
+- (HashMap *)getPrograms
+{
+    return programs;
+}
+ 
+- (void)setPrograms:(HashMap *)aProgList
+{
+    programs = aProgList;
+}
+
+- (void) rollback:(NSInteger)instructionIndex
+{
+    [self rollback:DEFAULT_PROGRAM_NAME Index:instructionIndex];
+}
+
+/** Rollback the instruction stream for a program so that
+ *  the indicated instruction (via instructionIndex) is no
+ *  longer in the stream.  UNTESTED!
+ */
+- (void) rollback:(NSString *)programName Index:(NSInteger)anInstructionIndex
+{
+    id object;
+    HashMap *is;
+
+    //    AMutableArray *is = [programs get(programName)];
+    is = [self getPrograms];
+    object = [is getName:programName];
+    if ( is != nil ) {
+#pragma warning this has to be fixed
+        [programs insertObject:programName  atIndex:anInstructionIndex];
+    }
+}
+
+- (void) deleteProgram
+{
+    [self deleteProgram:DEFAULT_PROGRAM_NAME];
+}
+
+/** Reset the program so that no instructions exist */
+- (void) deleteProgram:(NSString *)programName
+{
+    [self rollback:programName Index:MIN_TOKEN_INDEX];
+}
+
+- (void) insertAfterToken:(id<Token>)t Text:(NSString *)theText
+{
+    [self insertAfterProgNam:DEFAULT_PROGRAM_NAME Index:[t getTokenIndex] Text:theText];
+}
+
+- (void) insertAfterIndex:(NSInteger)anIndex Text:(NSString *)theText
+{
+    [self insertAfterProgNam:DEFAULT_PROGRAM_NAME Index:(NSInteger)anIndex Text:(NSString *)theText];
+}
+
+- (void) insertAfterProgNam:(NSString *)programName Index:(NSInteger)anIndex Text:(NSString *)theText
+{
+    // to insert after, just insert before next rwIndex (even if past end)
+    [self insertBeforeProgName:programName Index:anIndex+1 Text:theText];
+    //addToSortedRewriteList(programName, new InsertAfterOp(rwIndex,text));
+}
+
+
+
+
+
+
+
+
+
+- (void) insertBeforeToken:(id<Token>)t Text:(NSString *)theText
+{
+    [self insertBeforeProgName:DEFAULT_PROGRAM_NAME Index:[t getTokenIndex] Text:theText];
+}
+
+- (void) insertBeforeIndex:(NSInteger)anIndex Text:(NSString *)theText
+{
+    [self insertBeforeProgName:DEFAULT_PROGRAM_NAME Index:anIndex Text:theText];
+}
+
+- (void) insertBeforeProgName:(NSString *)programName Index:(NSInteger)rwIndex Text:(NSString *)theText
+{
+    //addToSortedRewriteList(programName, new ANTLRInsertBeforeOp(rwIndex,text));
+    RewriteOperation *op = [ANTLRInsertBeforeOp newANTLRInsertBeforeOp:rwIndex Text:theText];
+    HashMap *rewrites = [self getProgram:programName];
+    op.instructionIndex = [rewrites count];
+    [rewrites addObject:op];		
+}
+
+- (void) replaceFromIndex:(NSInteger)anIndex Text:(NSString *)theText
+{
+    [self replaceProgNam:DEFAULT_PROGRAM_NAME FromIndex:anIndex ToIndex:anIndex Text:theText];
+}
+
+- (void) replaceFromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText
+{
+    [self replaceProgNam:DEFAULT_PROGRAM_NAME FromIndex:from ToIndex:to Text:theText];
+}
+
+- (void) replaceFromToken:(id<Token>)anIndexT Text:(NSString *)theText
+{
+    [self replaceProgNam:DEFAULT_PROGRAM_NAME FromIndex:[anIndexT getTokenIndex] ToIndex:[anIndexT getTokenIndex] Text:theText];
+}
+
+- (void) replaceFromToken:(id<Token>)from ToToken:(id<Token>)to Text:(NSString *)theText
+{
+    [self replaceProgNam:DEFAULT_PROGRAM_NAME FromIndex:[from getTokenIndex] ToIndex:[to getTokenIndex] Text:theText];
+}
+
+- (void) replaceProgNam:(NSString *)programName Token:(id<Token>)from Token:(id<Token>)to Text:(NSString *)theText
+{
+    [self replaceProgNam:programName FromIndex:[from getTokenIndex] ToIndex:[to getTokenIndex] Text:theText];
+}
+                         
+- (void) replaceProgNam:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to Text:(NSString *)theText
+{
+    if ( from > to || from < 0 || to < 0 || to >= [tokens count] ) {
+        @throw [IllegalArgumentException newException:[NSString stringWithFormat:@"replace: range invalid: %d..%d size=%d\n", from, to, [tokens count]]];
+    }
+    RewriteOperation *op = [ANTLRReplaceOp newANTLRReplaceOp:from ToIndex:to Text:theText];
+    HashMap *rewrites = (HashMap *)[lastRewriteTokenIndexes getName:programName];
+    op.instructionIndex = [rewrites count];
+    [rewrites addObject:op];
+}
+
+- (void) delete:(NSInteger)anIndex
+{
+    [self delete:DEFAULT_PROGRAM_NAME  FromIndex:(NSInteger)anIndex  ToIndex:(NSInteger)anIndex];
+}
+
+- (void) delete:(NSInteger)from ToIndex:(NSInteger)to
+{
+    [self delete:DEFAULT_PROGRAM_NAME FromIndex:from ToIndex:to];
+}
+
+- (void) deleteToken:(id<Token>)anIndexT
+{
+    [self delete:DEFAULT_PROGRAM_NAME FromIndex:[anIndexT getTokenIndex] ToIndex:[anIndexT getTokenIndex]];
+}
+
+- (void) deleteFromToken:(id<Token>)from ToToken:(id<Token>)to
+{
+    [self delete:DEFAULT_PROGRAM_NAME FromIndex:[from getTokenIndex] ToIndex:[to getTokenIndex]];
+}
+
+- (void) delete:(NSString *)programName FromToken:(id<Token>)from ToToken:(id<Token>)to
+{
+    [self replaceProgNam:programName FromIndex:[from getTokenIndex] ToIndex:[to getTokenIndex] Text:nil];
+}
+
+- (void) delete:(NSString *)programName FromIndex:(NSInteger)from ToIndex:(NSInteger)to
+{
+    [self replaceProgNam:programName FromIndex:from ToIndex:to Text:nil];
+}
+
+- (NSInteger)getLastRewriteTokenIndex
+{
+    return [self getLastRewriteTokenIndex:DEFAULT_PROGRAM_NAME];
+}
+
+- (NSInteger)getLastRewriteTokenIndex:(NSString *)programName
+{
+#pragma warning fix this to look up the hashed name
+    NSInteger anInt = -1;
+    MapElement *node = [lastRewriteTokenIndexes lookup:programName Scope:0];
+    if ( node != nil ) {
+        anInt = [lastRewriteTokenIndexes hash:programName];
+    }
+    return anInt;
+}
+
+- (void)setLastRewriteTokenIndex:(NSString *)programName Index:(NSInteger)anInt
+{
+    [lastRewriteTokenIndexes insertObject:programName atIndex:anInt];
+}
+
+-(HashMap *) getProgram:(NSString *)name
+{
+   HashMap *is = (HashMap *)[programs getName:name];
+    if ( is == nil ) {
+        is = [self initializeProgram:name];
+    }
+    return is;
+}
+
+-(HashMap *) initializeProgram:(NSString *)name
+{
+    HashMap *is = [HashMap newHashMapWithLen:PROGRAM_INIT_SIZE];
+    [is putName:name Node:nil];
+    return is;
+}
+
+- (NSString *)toOriginalString
+{
+    [super fill];
+    return [self toOriginalString:MIN_TOKEN_INDEX End:[tokens count]-1];
+}
+
+- (NSString *)toOriginalString:(NSInteger)start End:(NSInteger)end
+{
+    NSMutableString *buf = [NSMutableString stringWithCapacity:100];
+    for (int i = start; i >= MIN_TOKEN_INDEX && i <= end && i< [tokens count]; i++) {
+        if ( [((CommonToken *)[lastRewriteTokenIndexes objectAtIndex:i]) type] != TokenTypeEOF )
+            [buf appendString:[[tokens objectAtIndex:i] text]];
+    }
+    return [NSString stringWithString:buf];
+}
+
+- (NSString *)toString
+{
+    [super fill];
+    return [self toStringFromStart:MIN_TOKEN_INDEX ToEnd:[tokens count]-1];
+}
+
+- (NSString *)toString:(NSString *)programName
+{
+    [super fill];
+    return [self toString:programName FromStart:MIN_TOKEN_INDEX ToEnd:[[programs objectAtIndex:MIN_TOKEN_INDEX] count]-1];
+}
+
+- (NSString *)toStringFromStart:(NSInteger)start ToEnd:(NSInteger)end
+{
+    return [self toString:DEFAULT_PROGRAM_NAME FromStart:start ToEnd:end];
+}
+
+- (NSString *)toString:(NSString *)programName FromStart:(NSInteger)start ToEnd:(NSInteger)end
+{
+    HashMap *rewrites = (HashMap *)[programs getName:programName];
+    
+    // ensure start/end are in range
+    if ( end > [tokens count]-1 ) end = [tokens count]-1;
+    if ( start < 0 )
+        start = 0;
+    
+    if ( rewrites == nil || [rewrites count] == 0 ) {
+        return [self toOriginalString:start End:end]; // no instructions to execute
+    }
+    NSMutableString *buf = [NSMutableString stringWithCapacity:100];
+    
+    // First, optimize instruction stream
+    HashMap *indexToOp = [self reduceToSingleOperationPerIndex:rewrites];
+    
+    // Walk buffer, executing instructions and emitting tokens
+    int i = start;
+    while ( i <= end && i < [tokens count] ) {
+        RewriteOperation *op = (RewriteOperation *)[indexToOp objectAtIndex:i];
+        [indexToOp setObject:nil atIndex:i]; // remove so any left have rwIndex size-1
+        id<Token>t = (id<Token>) [tokens objectAtIndex:i];
+        if ( op == nil ) {
+            // no operation at that rwIndex, just dump token
+            if ( t.type != TokenTypeEOF )
+                [buf appendString:t.text];
+            i++; // move to next token
+        }
+        else {
+            i = [op execute:buf]; // execute operation and skip
+        }
+    }
+    
+    // include stuff after end if it's last rwIndex in buffer
+    // So, if they did an insertAfter(lastValidIndex, "foo"), include
+    // foo if end==lastValidIndex.
+    //if ( end == [tokens size]-1 ) {
+    if ( end == [tokens count]-1 ) {
+        // Scan any remaining operations after last token
+        // should be included (they will be inserts).
+        int i2 = 0;
+        while ( i2 < [indexToOp count] - 1 ) {
+            RewriteOperation *op = [indexToOp objectAtIndex:i2];
+            if ( op.rwIndex >= [tokens count]-1 ) {
+                [buf appendString:op.text];
+            }
+        }
+    }
+    return [NSString stringWithString:buf];
+}
+
+/** We need to combine operations and report invalid operations (like
+ *  overlapping replaces that are not completed nested).  Inserts to
+ *  same rwIndex need to be combined etc...   Here are the cases:
+ *
+ *  I.i.u I.j.v								leave alone, nonoverlapping
+ *  I.i.u I.i.v								combine: Iivu
+ *
+ *  R.i-j.u R.x-y.v	| i-j in x-y			delete first R
+ *  R.i-j.u R.i-j.v							delete first R
+ *  R.i-j.u R.x-y.v	| x-y in i-j			ERROR
+ *  R.i-j.u R.x-y.v	| boundaries overlap	ERROR
+ *
+ *  I.i.u R.x-y.v | i in x-y				delete I
+ *  I.i.u R.x-y.v | i not in x-y			leave alone, nonoverlapping
+ *  R.x-y.v I.i.u | i in x-y				ERROR
+ *  R.x-y.v I.x.u 							R.x-y.uv (combine, delete I)
+ *  R.x-y.v I.i.u | i not in x-y			leave alone, nonoverlapping
+ *
+ *  I.i.u = insert u before op @ rwIndex i
+ *  R.x-y.u = replace x-y indexed tokens with u
+ *
+ *  First we need to examine replaces.  For any replace op:
+ *
+ * 		1. wipe out any insertions before op within that range.
+ *		2. Drop any replace op before that is contained completely within
+ *         that range.
+ *		3. Throw exception upon boundary overlap with any previous replace.
+ *
+ *  Then we can deal with inserts:
+ *
+ * 		1. for any inserts to same rwIndex, combine even if not adjacent.
+ * 		2. for any prior replace with same left boundary, combine this
+ *         insert with replace and delete this replace.
+ * 		3. throw exception if rwIndex in same range as previous replace
+ *
+ *  Don't actually delete; make op null in list. Easier to walk list.
+ *  Later we can throw as we add to rwIndex -> op map.
+ *
+ *  Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+ *  inserted stuff would be before the replace range.  But, if you
+ *  add tokens in front of a method body '{' and then delete the method
+ *  body, I think the stuff before the '{' you added should disappear too.
+ *
+ *  Return a map from token rwIndex to operation.
+ */
+- (HashMap *)reduceToSingleOperationPerIndex:(HashMap *)rewrites
+{
+    //System.out.println("rewrites="+rewrites);
+    if (debug > 1) NSLog(@"rewrites=%@\n", [rewrites getName:DEFAULT_PROGRAM_NAME]);
+    // WALK REPLACES
+    for (int i = 0; i < [rewrites count]; i++) {
+        RewriteOperation *op = (RewriteOperation *)[rewrites objectAtIndex:i];
+        if ( op==nil )
+            continue;
+        if ( !([[op class] isKindOfClass:[ANTLRReplaceOp class]]) )
+            continue;
+        ANTLRReplaceOp *rop = (ANTLRReplaceOp *)[rewrites objectAtIndex:i];
+        // Wipe prior inserts within range
+        //List inserts = getKindOfOps(rewrites, ANTLRInsertBeforeOp.class, i);
+        HashMap *inserts = [self getKindOfOps:rewrites KindOfClass:[ANTLRInsertBeforeOp class] Index:i];
+        for (int j = 0; j < [inserts size]; j++) {
+            ANTLRInsertBeforeOp *iop = (ANTLRInsertBeforeOp *)[inserts objectAtIndex:j];
+            if ( iop.rwIndex >= rop.rwIndex && iop.rwIndex <= rop.lastIndex ) {
+                // delete insert as it's a no-op.
+                [rewrites insertObject:nil atIndex:iop.instructionIndex];
+            }
+        }
+        // Drop any prior replaces contained within
+        HashMap *prevReplaces = [self getKindOfOps:rewrites KindOfClass:[ANTLRReplaceOp class] Index:i];
+        for (int j = 0; j < [prevReplaces count]; j++) {
+            ANTLRReplaceOp *prevRop = (ANTLRReplaceOp *) [prevReplaces objectAtIndex:j];
+            if ( prevRop.rwIndex>=rop.rwIndex && prevRop.lastIndex <= rop.lastIndex ) {
+                // delete replace as it's a no-op.
+                [rewrites setObject:nil atIndex:prevRop.instructionIndex];
+                continue;
+            }
+            // throw exception unless disjoint or identical
+            BOOL disjoint = prevRop.lastIndex<rop.rwIndex || prevRop.rwIndex > rop.lastIndex;
+            BOOL same = prevRop.rwIndex==rop.rwIndex && prevRop.lastIndex==rop.lastIndex;
+            if ( !disjoint && !same ) {
+                @throw [IllegalArgumentException newException:
+                        [NSString stringWithFormat:@"replace op boundaries of %@, overlap with previous %@\n", rop, prevRop]];
+            }
+        }
+    }
+    
+    // WALK INSERTS
+    for (int i = 0; i < [rewrites count]; i++) {
+        RewriteOperation *op = (RewriteOperation *)[rewrites objectAtIndex:i];
+        if ( op == nil )
+            continue;
+        if ( !([[op class] isKindOfClass:[ANTLRInsertBeforeOp class]]) )
+            continue;
+        ANTLRInsertBeforeOp *iop = (ANTLRInsertBeforeOp *)[rewrites objectAtIndex:i];
+        // combine current insert with prior if any at same rwIndex
+        HashMap *prevInserts = (HashMap *)[self getKindOfOps:rewrites KindOfClass:[ANTLRInsertBeforeOp class] Index:i];
+        for (int j = 0; j < [prevInserts count]; j++) {
+            ANTLRInsertBeforeOp *prevIop = (ANTLRInsertBeforeOp *) [prevInserts objectAtIndex:j];
+            if ( prevIop.rwIndex == iop.rwIndex ) { // combine objects
+                                                // convert to strings...we're in process of toString'ing
+                                                // whole token buffer so no lazy eval issue with any templates
+                iop.text = [self catOpText:iop.text PrevText:prevIop.text];
+                // delete redundant prior insert
+                [rewrites setObject:nil atIndex:prevIop.instructionIndex];
+            }
+        }
+        // look for replaces where iop.rwIndex is in range; error
+        HashMap *prevReplaces = (HashMap *)[self getKindOfOps:rewrites KindOfClass:[ANTLRReplaceOp class] Index:i];
+        for (int j = 0; j < [prevReplaces count]; j++) {
+            ANTLRReplaceOp *rop = (ANTLRReplaceOp *) [prevReplaces objectAtIndex:j];
+            if ( iop.rwIndex == rop.rwIndex ) {
+                rop.text = [self catOpText:iop.text PrevText:rop.text];
+                [rewrites setObject:nil atIndex:i];  // delete current insert
+                continue;
+            }
+            if ( iop.rwIndex >= rop.rwIndex && iop.rwIndex <= rop.lastIndex ) {
+                @throw [IllegalArgumentException newException:[NSString stringWithFormat:@"insert op %d within boundaries of previous %d", iop, rop]];
+            }
+        }
+    }
+    // System.out.println("rewrites after="+rewrites);
+    HashMap *m = [HashMap newHashMapWithLen:15];
+    for (int i = 0; i < [rewrites count]; i++) {
+        RewriteOperation *op = (RewriteOperation *)[rewrites objectAtIndex:i];
+        if ( op == nil )
+            continue; // ignore deleted ops
+        if ( [m objectAtIndex:op.rwIndex] != nil ) {
+            @throw [RuntimeException newException:@"should only be one op per rwIndex\n"];
+        }
+        //[m put(new Integer(op.rwIndex), op);
+        [m setObject:op atIndex:op.rwIndex];
+    }
+    //System.out.println("rwIndex to op: "+m);
+    if (debug > 1) NSLog(@"rwIndex to  op %d\n", (NSInteger)m);
+    return m;
+}
+
+- (NSString *)catOpText:(id)a PrevText:(id)b
+{
+    NSString *x = @"";
+    NSString *y = @"";
+    if ( a != nil )
+        x = [a toString];
+    if ( b != nil )
+        y = [b toString];
+    return [NSString stringWithFormat:@"%@%@",x, y];
+}
+
+- (HashMap *)getKindOfOps:(HashMap *)rewrites KindOfClass:(Class)kind
+{
+    return [self getKindOfOps:rewrites KindOfClass:kind Index:[rewrites count]];
+}
+
+/** Get all operations before an rwIndex of a particular kind */
+- (HashMap *)getKindOfOps:(HashMap *)rewrites KindOfClass:(Class)kind Index:(NSInteger)before
+{
+    HashMap *ops = [HashMap newHashMapWithLen:15];
+    for (int i = 0; i < before && i < [rewrites count]; i++) {
+        RewriteOperation *op = (RewriteOperation *)[rewrites objectAtIndex:i];
+        if ( op == nil )
+            continue; // ignore deleted
+        if ( [op isKindOfClass:(Class)kind] )
+            [ops addObject:op];
+    }		
+    return ops;
+}
+
+- (NSMutableString *)toDebugString
+{
+    return [self toDebugStringFromStart:MIN_TOKEN_INDEX ToEnd:[tokens count]-1];
+}
+
+- (NSMutableString *)toDebugStringFromStart:(NSInteger)start ToEnd:(NSInteger)end
+{
+    NSMutableString *buf = [NSMutableString stringWithCapacity:100];
+    for (int i = start; i >= MIN_TOKEN_INDEX && i <= end && i < [tokens count]; i++) {
+        [buf appendString:[[tokens objectAtIndex:i] text]];
+    }
+    return [NSString stringWithString:buf];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/TokenSource.h b/runtime/ObjC/Framework/TokenSource.h
new file mode 100644
index 0000000..ff46d4c
--- /dev/null
+++ b/runtime/ObjC/Framework/TokenSource.h
@@ -0,0 +1,38 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import "Token.h"
+
+// Anything that responds to -nextToken can be treated as a lexer.
+// For instance this can be a flex lexer or a handwritten one or even
+// a proxy for a remotely running token source (database, lexer, whatever).
+@protocol TokenSource <NSObject, NSCopying>
+
+- (id<Token>) nextToken;
+- (NSString *)getSourceName;
+
+@end
diff --git a/runtime/ObjC/Framework/TokenStream.h b/runtime/ObjC/Framework/TokenStream.h
new file mode 100644
index 0000000..a8c9cf1
--- /dev/null
+++ b/runtime/ObjC/Framework/TokenStream.h
@@ -0,0 +1,62 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import "IntStream.h"
+#import "Token.h"
+
+@protocol TokenStream < IntStream >
+
+// Get Token at current input pointer + i ahead where i=1 is next Token.
+// i<0 indicates tokens in the past.  So -1 is previous token and -2 is
+// two tokens ago. LT:0 is undefined.  For i>=n, return Token.EOFToken.
+// Return null for LT:0 and any index that results in an absolute address
+// that is negative.
+
+- (id<Token>) LT:(NSInteger) i;
+
+- (id<Token>) getToken:(NSUInteger) i;
+
+- (id) getTokenSource;
+
+- (NSString *) toString;
+/** Return the text of all tokens from start to stop, inclusive.
+ *  If the stream does not buffer all the tokens then it can just
+ *  return "" or null;  Users should not access $ruleLabel.text in
+ *  an action of course in that case.
+ */
+- (NSString *)toStringFromStart:(NSInteger)startIdx ToEnd:(NSInteger)stopIdx;
+
+/** Because the user is not required to use a token with an index stored
+ *  in it, we must provide a means for two token objects themselves to
+ *  indicate the start/end location.  Most often this will just delegate
+ *  to the other toString(int,int).  This is also parallel with
+ *  the TreeNodeStream.toString(Object,Object).
+ */
+- (NSString *) toStringFromToken:(id<Token>)startToken ToToken:(id<Token>)stopToken;
+
+
+@end
diff --git a/runtime/ObjC/Framework/Tree.h b/runtime/ObjC/Framework/Tree.h
new file mode 100644
index 0000000..c398b22
--- /dev/null
+++ b/runtime/ObjC/Framework/Tree.h
@@ -0,0 +1,129 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DEBUG_DEALLOC
+#define DEBUG_DEALLOC
+#endif
+
+#import "AMutableArray.h"
+
+@protocol Tree < NSObject, NSCopying >
+
+//+ (id<Tree>) invalidNode;
+
+- (id<Tree>) getChild:(NSUInteger)index;
+- (NSUInteger) getChildCount;
+
+// Tree tracks parent and child index now > 3.0
+
+- (id<Tree>)getParent;
+
+- (void) setParent:(id<Tree>)t;
+
+/** Is there is a node above with token type ttype? */
+- (BOOL) hasAncestor:(NSInteger)ttype;
+
+/** Walk upwards and get first ancestor with this token type. */
+- (id<Tree>) getAncestor:(NSInteger) ttype;
+
+/** Return a list of all ancestors of this node.  The first node of
+ *  list is the root and the last is the parent of this node.
+ */
+- (AMutableArray *) getAncestors;
+
+/** This node is what child index? 0..n-1 */
+- (NSInteger) getChildIndex;
+
+- (void) setChildIndex:(NSInteger) index;
+
+/** Set the parent and child index values for all children */
+- (void) freshenParentAndChildIndexes;
+
+/** Add t as a child to this node.  If t is null, do nothing.  If t
+ *  is nil, add all children of t to this' children.
+ */
+- (void) addChild:(id<Tree>) t;
+
+/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
+- (void) setChild:(NSInteger)i With:(id<Tree>) t;
+
+- (id) deleteChild:(NSInteger) i;
+
+/** Delete children from start to stop and replace with t even if t is
+ *  a list (nil-root tree).  num of children can increase or decrease.
+ *  For huge child lists, inserting children can force walking rest of
+ *  children to set their childindex; could be slow.
+ */
+- (void) replaceChildrenFrom:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id)t;	
+
+- (NSArray *) children;
+// Add t as a child to this node.  If t is null, do nothing.  If t
+//  is nil, add all children of t to this' children.
+
+- (void) addChildren:(NSArray *) theChildren;
+//- (void) removeAllChildren;
+
+// Indicates the node is a nil node but may still have children, meaning
+// the tree is a flat list.
+
+- (BOOL) isNil;
+
+/**  What is the smallest token index (indexing from 0) for this node
+ *   and its children?
+ */
+- (NSInteger) getTokenStartIndex;
+
+- (void) setTokenStartIndex:(NSInteger) index;
+
+/**  What is the largest token index (indexing from 0) for this node
+ *   and its children?
+ */
+- (NSInteger) getTokenStopIndex;
+- (void) setTokenStopIndex:(NSInteger) index;
+
+- (id<Tree>) dupNode;
+
+- (NSString *) toString;
+
+#pragma mark Copying
+- (id) copyWithZone:(NSZone *)aZone;	// the children themselves are not copied here!
+- (id) deepCopy;					// performs a deepCopyWithZone: with the default zone
+- (id) deepCopyWithZone:(NSZone *)aZone;
+
+#pragma mark Tree Parser support
+- (NSInteger)type;
+- (NSString *)text;
+// In case we don't have a token payload, what is the line for errors?
+- (NSUInteger)line;
+- (NSUInteger)charPositionInLine;
+- (void) setCharPositionInLine:(NSUInteger)pos;
+
+#pragma mark Informational
+- (NSString *) treeDescription;
+- (NSString *) description;
+
+@end
+
diff --git a/runtime/ObjC/Framework/Tree.m b/runtime/ObjC/Framework/Tree.m
new file mode 100644
index 0000000..25c02dd
--- /dev/null
+++ b/runtime/ObjC/Framework/Tree.m
@@ -0,0 +1,149 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "Tree.h"
+#import "Token.h"
+// TODO: this shouldn't be here...but needed for invalidNode
+#import "CommonTree.h"
+
+@implementation Tree
+
+@synthesize isEmpty;
+@synthesize isEmptyNode;
+@synthesize invalidNode;
+@synthesize children;
+
+#pragma mark Tree protocol conformance
+
++ (id<Tree>) invalidNode
+{
+	static id<Tree> invalidNode = nil;
+	if (!invalidNode) {
+		invalidNode = [[CommonTree alloc] initWithTokenType:TokenTypeInvalid];
+	}
+	return invalidNode;
+}
+
+- (id<Tree>) init
+{
+	self = [super init];
+	if ( self != nil ) {
+		isEmptyNode = NO;
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+	[super dealloc];
+}
+
+- (id<Tree>) getChild:(NSUInteger) index
+{
+	return nil;
+}
+
+- (NSUInteger) getChildCount
+{
+	return 0;
+}
+
+- (NSArray *) getChildren
+{
+	return nil;
+}
+
+	// Add tree as a child to this node.  If tree is nil, do nothing.  If tree
+	// is an empty node, add all children of tree to our children.
+
+- (void) addChild:(id<Tree>) tree
+{
+}
+
+- (void) addChildren:(NSArray *) theChildren
+{
+}
+
+- (void) removeAllChildren
+{
+}
+
+	// Indicates the node is an empty node but may still have children, meaning
+	// the tree is a flat list.
+
+- (BOOL) isEmpty
+{
+	return isEmptyNode;
+}
+
+- (void) setIsEmpty:(BOOL)emptyFlag
+{
+	isEmptyNode = emptyFlag;
+}
+
+#pragma mark Tree abstract base class
+
+	// Return a token type; needed for tree parsing
+- (NSInteger) getType
+{
+	return 0;
+}
+
+- (NSString *) getText
+{
+	return [self description];
+}
+
+	// In case we don't have a token payload, what is the line for errors?
+- (NSInteger) getLine
+{
+	return 0;
+}
+
+- (NSInteger) getCharPositionInLine
+{
+	return 0;
+}
+
+- (NSString *) treeDescription
+{
+	return @"";
+}
+
+- (NSString *) description
+{
+	return @"";
+}
+
+- (void) _createChildrenList
+{
+	if ( children == nil )
+		children = [[NSMutableArray alloc] init];
+}
+
+@end
+
+@end
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/TreeAdaptor.h b/runtime/ObjC/Framework/TreeAdaptor.h
new file mode 100644
index 0000000..fd4b792
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeAdaptor.h
@@ -0,0 +1,157 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "Token.h"
+#import "BaseTree.h"
+#import "TokenStream.h"
+
+#pragma warning tree/node diction is broken.
+
+@protocol TreeAdaptor <NSObject, NSCopying>
+
+#pragma mark Construction
+
+#pragma mark TreeAdaptor implementation
+- (id)dupNode:(id)aNode;	// copies just the node
+- (id)dupTree:(id)aTree;	// copies the entire subtree, recursively
+
+/** Return a nil node (an empty but non-null node) that can hold
+ *  a list of element as the children.  If you want a flat tree (a list)
+ *  use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
+ */
+- (id) emptyNode;
+
+/** Return a tree node representing an error.  This node records the
+ *  tokens consumed during error recovery.  The start token indicates the
+ *  input symbol at which the error was detected.  The stop token indicates
+ *  the last symbol consumed during recovery.
+ *
+ *  You must specify the input stream so that the erroneous text can
+ *  be packaged up in the error node.  The exception could be useful
+ *  to some applications; default implementation stores ptr to it in
+ *  the CommonErrorNode.
+ *
+ *  This only makes sense during token parsing, not tree parsing.
+ *  Tree parsing should happen only when parsing and tree construction
+ *  succeed.
+ */
+- (id) errorNode:(id<TokenStream>)anInput
+            From:(id<Token>)aStartToken
+              To:(id<Token>)aStopToken
+       Exception:(NSException *) e;
+
+/** Is tree considered a nil node used to make lists of child nodes? */
+- (BOOL) isNil:(id)aTree;
+
+
+- (void) addChild:(id)child toTree:(id)aTree;
+
+/** If oldRoot is a nil root, just copy or move the children to newRoot.
+ *  If not a nil root, make oldRoot a child of newRoot.
+ *
+ *    old=^(nil a b c), new=r yields ^(r a b c)
+ *    old=^(a b c), new=r yields ^(r ^(a b c))
+ *
+ *  If newRoot is a nil-rooted single child tree, use the single
+ *  child as the new root node.
+ *
+ *    old=^(nil a b c), new=^(nil r) yields ^(r a b c)
+ *    old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
+ *
+ *  If oldRoot was null, it's ok, just return newRoot (even if isNil).
+ *
+ *    old=null, new=r yields r
+ *    old=null, new=^(nil r) yields ^(nil r)
+ *
+ *  Return newRoot.  Throw an exception if newRoot is not a
+ *  simple node or nil root with a single child node--it must be a root
+ *  node.  If newRoot is ^(nil x) return x as newRoot.
+ *
+ *  Be advised that it's ok for newRoot to point at oldRoot's
+ *  children; i.e., you don't have to copy the list.  We are
+ *  constructing these nodes so we should have this control for
+ *  efficiency.
+ */
+- (id) becomeRoot:(id)newRoot old:(id)oldRoot;
+
+- (id) rulePostProcessing:(id)root;
+
+#pragma mark Rewrite Rules
+                           
+- (NSUInteger) getUniqueID:(id)aNode;
+
+- (id) create:(id<Token>)payload;
+- (id) createTree:(NSInteger)tokenType FromToken:(id<Token>)fromToken;
+- (id) createTree:(NSInteger)tokenType FromToken:(id<Token>)fromToken Text:(NSString *)text;
+- (id) createTree:(NSInteger)tokenType Text:(NSString *)text;
+
+#pragma mark Content
+
+- (id)dupNode:(id)aNode;
+- (id)dupTree:(id)aTree;
+
+- (NSInteger) getType:(id)aNode;
+- (void) setType:(id)aNode Type:(NSInteger)tokenType;
+
+- (NSString *) getText:(id)aNode;
+- (void) setText:(id)aNode Text:(NSString *)tokenText;
+
+- (id<Token>) getToken:(id)t;
+
+- (void) setTokenBoundaries:(id)aTree From:(id<Token>)startToken To:(id<Token>)stopToken;
+- (NSInteger) getTokenStartIndex:(id)aTree;
+- (NSInteger) getTokenStopIndex:(id)aTree;
+
+#pragma mark Navigation / Tree Parsing
+
+/** Get a child 0..n-1 node */
+- (id) getChild:(id)aNode At:(NSInteger) i;
+/** Set ith child (0..n-1) to t; t must be non-null and non-nil node */
+- (void) setChild:(id)aTree At:(NSInteger)index Child:(id)child;
+/** Remove ith child and shift children down from right. */
+- (id) deleteChild:(id)t Index:(NSInteger)index;
+
+/** How many children?  If 0, then this is a leaf node */
+- (NSInteger) getChildCount:(id) aTree;
+
+/** Who is the parent node of this node; if null, implies node is root.
+ *  If your node type doesn't handle this, it's ok but the tree rewrites
+ *  in tree parsers need this functionality.
+ */
+- (id)getParent:(id)t;
+- (void) setParent:(id)t With:(id)parent;
+
+/** What index is this node in the child list? Range: 0..n-1
+ *  If your node type doesn't handle this, it's ok but the tree rewrites
+ *  in tree parsers need this functionality.
+ */
+- (NSInteger) getChildIndex:(id)t;
+- (void) setChildIndex:(id)t With:(NSInteger)index;
+
+- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id)t;
+
+@end
+
diff --git a/runtime/ObjC/Framework/TreeAdaptor.m b/runtime/ObjC/Framework/TreeAdaptor.m
new file mode 100644
index 0000000..fe837ee
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeAdaptor.m
@@ -0,0 +1,238 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "TreeAdaptor.h"
+#import "TreeException.h"
+#import "BaseTree.h"
+
+@implementation TreeAdaptor
+
+
++ (id) newEmptyTree
+{
+	return [TreeAdaptor newTreeWithToken:nil];
+}
+
++ (id) newAdaptor
+{
+    return [[TreeAdaptor alloc] init];
+}
+
+- (id) init
+{
+    self = [super init];
+    return self;
+}
+
+- (id) initWithPayload:(id<Token>)payload
+{
+    self = [super init];
+    return self;
+}
+
+#pragma mark Rewrite Rules
+
+/** Create a tree node from Token object; for CommonTree type trees,
+ *  then the token just becomes the payload.  This is the most
+ *  common create call.
+ *
+ *  Override if you want another kind of node to be built.
+ */
+- (id) create:(id<Token>) payload
+{
+    return nil;
+}
+
+/** Create a new node derived from a token, with a new token type.
+ *  This is invoked from an imaginary node ref on right side of a
+ *  rewrite rule as IMAG[$tokenLabel].
+ *
+ *  This should invoke createToken(Token).
+ */
+- (id) createTree:(NSInteger)tokenType fromToken:(id<Token>)fromToken
+{
+	id<Token> newToken = [self createToken:fromToken];
+	[newToken setType:tokenType];
+    
+	id newTree = [self create:newToken];
+	[newToken release];
+	return newTree;
+}
+
+/** Create a new node derived from a token, with a new token type.
+ *  This is invoked from an imaginary node ref on right side of a
+ *  rewrite rule as IMAG[$tokenLabel].
+ *
+ *  This should invoke createToken(Token).
+ */
+- (id) createTree:(NSInteger)tokenType fromToken:(id<Token>)fromToken text:(NSString *)tokenText
+{
+	id<Token> newToken = [self createToken:fromToken];
+	[newToken setText:tokenText];
+	
+	id newTree = [self create:newToken];
+	[newToken release];
+	return newTree;
+}
+
+/** Create a new node derived from a token, with a new token type.
+ *  This is invoked from an imaginary node ref on right side of a
+ *  rewrite rule as IMAG["IMAG"].
+ *
+ *  This should invoke createToken(int,String).
+ */
+- (id) createTree:(NSInteger)tokenType text:(NSString *)tokenText
+{
+	id<Token> newToken = [self createToken:tokenType text:tokenText];
+	
+	id newTree = [self create:newToken];
+	[newToken release];
+	return newTree;
+}
+
+- (id) copyNode:(id)aNode
+{
+	return [aNode copyWithZone:nil];	// not -copy: to silence warnings
+}
+
+- (id) copyTree:(id)aTree
+{
+	return [aTree deepCopy];
+}
+
+
+- (void) addChild:(id)child toTree:(id)aTree
+{
+	[aTree addChild:child];
+}
+
+- (id) makeNode:(id)newRoot parentOf:(id)oldRoot
+{
+	id newRootNode = newRoot;
+
+	if (oldRoot == nil)
+		return newRootNode;
+    // handles ^(nil real-node) case
+	if ([newRootNode isNil]) {
+		if ([newRootNode getChildCount] > 1) {
+#warning TODO: Find a way to the current input stream here!
+			@throw [TreeException exceptionWithOldRoot:oldRoot newRoot:newRootNode stream:nil];
+		}
+#warning TODO: double check memory management with respect to code generation
+		// remove the empty node, placing its sole child in its role.
+		id tmpRootNode = [[newRootNode childAtIndex:0] retain];
+		[newRootNode release];
+		newRootNode = tmpRootNode;		
+	}
+	// the handling of an empty node at the root of oldRoot happens in addChild:
+	[newRootNode addChild:oldRoot];
+    // this release relies on the fact that the ANTLR code generator always assigns the return value of this method
+    // to the variable originally holding oldRoot. If we don't release we leak the reference.
+    // FIXME: this is totally non-obvious. maybe do it in calling code by comparing pointers and conditionally releasing
+    // the old object
+    [oldRoot release];
+    
+    // what happens to newRootNode's retain count? Should we be autoreleasing this one? Probably.
+	return [newRootNode retain];
+}
+
+
+- (id) postProcessTree:(id)aTree
+{
+	id processedNode = aTree;
+	if (aTree != nil && [aTree isNil] != NO && [aTree getChildCount] == 1) {
+		processedNode = [aTree childAtIndex:0];
+	}
+	return processedNode;
+}
+
+
+- (NSUInteger) uniqueIdForTree:(id)aNode
+{
+	// TODO: is hash appropriate here?
+	return [aNode hash];
+}
+
+
+#pragma mark Content
+
+- (NSInteger) tokenTypeForNode:(id)aNode
+{
+	return [aNode getType];
+}
+
+- (void) setTokenType:(NSInteger)tokenType forNode:(id)aNode
+{
+	// currently unimplemented
+}
+
+
+- (NSString *) textForNode:(id)aNode
+{
+	return [aNode getText];
+}
+
+- (void) setText:(NSString *)tokenText forNode:(id)aNode
+{
+	// currently unimplemented
+}
+
+
+#pragma mark Navigation / Tree Parsing
+
+- (id) childForNode:(id) aNode atIndex:(NSInteger) i
+{
+	// currently unimplemented
+	return nil;
+}
+
+- (NSInteger) childCountForTree:(id) aTree
+{
+	// currently unimplemented
+	return 0;
+}
+
+#pragma mark Subclass Responsibilties
+
+- (void) setBoundariesForTree:(id)aTree fromToken:(id<Token>)startToken toToken:(id<Token>)stopToken
+{
+	// subclass responsibility
+}
+
+- (NSInteger) tokenStartIndexForTree:(id)aTree
+{
+	// subclass responsibility
+	return 0;
+}
+
+- (NSInteger) tokenStopIndexForTree:(id)aTree
+{
+	// subclass responsibility
+	return 0;
+}
+
+
+@end
diff --git a/runtime/ObjC/Framework/TreeException.h b/runtime/ObjC/Framework/TreeException.h
new file mode 100644
index 0000000..739e205
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeException.h
@@ -0,0 +1,42 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "Tree.h"
+#import "RecognitionException.h"
+
+@interface TreeException : RecognitionException {
+	id<BaseTree> oldRoot;
+	id<BaseTree> newRoot;
+}
+
++ (id) newException:(id<BaseTree>)theOldRoot newRoot:(id<BaseTree>)theNewRoot stream:(id<IntStream>)aStream;
+- (id) initWithOldRoot:(id<BaseTree>)theOldRoot newRoot:(id<BaseTree>)theNewRoot stream:(id<IntStream>)aStream;
+
+- (void) setOldRoot:(id<BaseTree>)aTree;
+- (void) setNewRoot:(id<BaseTree>)aTree;
+
+@end
diff --git a/runtime/ObjC/Framework/TreeException.m b/runtime/ObjC/Framework/TreeException.m
new file mode 100644
index 0000000..3188f85
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeException.m
@@ -0,0 +1,85 @@
+//
+//  TreeException.m
+//  ANTLR
+//
+//  Created by Kay Röpke on 24.10.2006.
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import "TreeException.h"
+
+
+@implementation TreeException
+
++ (id) newException:(id<BaseTree>)theOldRoot newRoot:(id<BaseTree>)theNewRoot stream:(id<IntStream>)aStream;
+{
+	return [[TreeException alloc] initWithOldRoot:theOldRoot newRoot:theNewRoot stream:aStream];
+}
+
+- (id) initWithOldRoot:(id<BaseTree>)theOldRoot newRoot:(id<BaseTree>)theNewRoot stream:(id<IntStream>)aStream;
+{
+	if ((self = [super initWithStream:aStream reason:@"The new root has more than one child. Cannot make it the root node."]) != nil ) {
+		[self setOldRoot:theOldRoot];
+		[self setNewRoot:theNewRoot];
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in TreeException" );
+#endif
+	[self setOldRoot:nil];
+	[self setNewRoot:nil];
+	[super dealloc];
+}
+
+- (void) setNewRoot:(id<BaseTree>)aTree
+{
+	if (newRoot != aTree) {
+		[aTree retain];
+		if ( newRoot ) [newRoot release];
+		newRoot = aTree;
+	}
+}
+
+- (void) setOldRoot:(id<BaseTree>)aTree
+{
+	if (oldRoot != aTree) {
+		[aTree retain];
+		if ( oldRoot ) [oldRoot release];
+		oldRoot = aTree;
+	}
+}
+
+- (NSString *) description
+{
+	 return [NSMutableString stringWithFormat:@"%@ old root: <%@> new root: <%@>", [super description], [oldRoot treeDescription], [newRoot treeDescription]];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/TreeFilter.h b/runtime/ObjC/Framework/TreeFilter.h
new file mode 100644
index 0000000..950e573
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeFilter.h
@@ -0,0 +1,203 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Java Stuff
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.RecognizerSharedState;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.TokenStream;
+*/
+
+/**
+ Cut-n-paste from material I'm not using in the book anymore (edit later
+ to make sense):
+
+ Now, how are we going to test these tree patterns against every
+subtree in our original tree?  In what order should we visit nodes?
+For this application, it turns out we need a simple ``apply once''
+rule application strategy and a ``down then up'' tree traversal
+strategy.  Let's look at rule application first.
+
+As we visit each node, we need to see if any of our patterns match. If
+a pattern matches, we execute the associated tree rewrite and move on
+to the next node. In other words, we only look for a single rule
+application opportunity (we'll see below that we sometimes need to
+repeatedly apply rules). The following method applies a rule in a @cl
+TreeParser (derived from a tree grammar) to a tree:
+
+here is where weReferenced code/walking/patterns/TreePatternMatcher.java
+
+It uses reflection to lookup the appropriate rule within the generated
+tree parser class (@cl Simplify in this case). Most of the time, the
+rule will not match the tree.  To avoid issuing syntax errors and
+attempting error recovery, it bumps up the backtracking level.  Upon
+failure, the invoked rule immediately returns. If you don't plan on
+using this technique in your own ANTLR-based application, don't sweat
+the details. This method boils down to ``call a rule to match a tree,
+executing any embedded actions and rewrite rules.''
+
+At this point, we know how to define tree grammar rules and how to
+apply them to a particular subtree. The final piece of the tree
+pattern matcher is the actual tree traversal. We have to get the
+correct node visitation order.  In particular, we need to perform the
+scalar-vector multiply transformation on the way down (preorder) and
+we need to reduce multiply-by-zero subtrees on the way up (postorder).
+
+To implement a top-down visitor, we do a depth first walk of the tree,
+executing an action in the preorder position. To get a bottom-up
+visitor, we execute an action in the postorder position.  ANTLR
+provides a standard @cl TreeVisitor class with a depth first search @v
+visit method. That method executes either a @m pre or @m post method
+or both. In our case, we need to call @m applyOnce in both. On the way
+down, we'll look for @r vmult patterns. On the way up,
+we'll look for @r mult0 patterns.
+ */
+
+/*  Java Stuff
+public class TreeFilter extends TreeParser {
+    public interface fptr {
+        public void rule() throws RecognitionException;
+    }
+
+    protected TokenStream originalTokenStream;
+    protected TreeAdaptor originalAdaptor;
+
+    public TreeFilter(TreeNodeStream input) {
+        this(input, new RecognizerSharedState());
+    }
+    public TreeFilter(TreeNodeStream input, RecognizerSharedState state) {
+        super(input, state);
+        originalAdaptor = input.getTreeAdaptor();
+        originalTokenStream = input.getTokenStream();
+    }
+
+    public void applyOnce(Object t, fptr whichRule) {
+        if ( t==null ) return;
+        try {
+            // share TreeParser object but not parsing-related state
+            state = new RecognizerSharedState();
+            input = new CommonTreeNodeStream(originalAdaptor, t);
+            ((CommonTreeNodeStream)input).setTokenStream(originalTokenStream);
+            setBacktrackingLevel(1);
+            whichRule.rule();
+            setBacktrackingLevel(0);
+        }
+        catch (RecognitionException e) { ; }
+    }
+
+    public void downup(Object t) {
+        TreeVisitor v = new TreeVisitor(new CommonTreeAdaptor());
+        TreeVisitorAction actions = new TreeVisitorAction() {
+            public Object pre(Object t)  { applyOnce(t, topdown_fptr); return t; }
+            public Object post(Object t) { applyOnce(t, bottomup_fptr); return t; }
+        };
+        v.visit(t, actions);
+    }
+        
+    fptr topdown_fptr = new fptr() {
+        public void rule() throws RecognitionException {
+            topdown();
+        }
+    };
+
+    fptr bottomup_fptr = new fptr() {
+        public void rule() throws RecognitionException {
+            bottomup();
+        }
+    };
+
+    // methods the downup strategy uses to do the up and down rules.
+    // to override, just define tree grammar rule topdown and turn on
+    // filter=true.
+    public void topdown() throws RecognitionException {;}
+    public void bottomup() throws RecognitionException {;}
+}
+*/
+
+#import "RecognizerSharedState.h"
+#import "TokenStream.h"
+#import "TreeAdaptor.h"
+#import "TreeNodeStream.h"
+#import "TreeParser.h"
+#import "TreeVisitor.h"
+#import "TreeVisitorAction.h"
+
+@class TreeFilter;
+
+@interface fptr : NSObject {
+    SEL whichRule;
+    TreeFilter *treeFilter;
+}
+
+@property (assign) SEL whichRule;
+@property (assign) TreeFilter *treeFilter;
+
++ (fptr *) newfptr:(TreeFilter *)aTreeFilter Rule:(SEL) aRule;
+
+- (fptr *) init:(TreeFilter *)aTreeFilter Rule:(SEL)aRule;
+
+- (void) rule;
+
+@end
+
+@interface TreeFilter : TreeParser {
+    
+id<TokenStream> originalTokenStream;
+id<TreeAdaptor> originalAdaptor;
+fptr *topdown_fptr;
+fptr *bottomup_fptr;
+
+}
+
++ (id) newTreeFilter:(id<TreeNodeStream>)input;
+
++ (id) newTreeFilter:(id<TreeNodeStream>)input State:(RecognizerSharedState *)state;
+    
+- (id) initWithStream:(id<TreeNodeStream>)anInput State:(RecognizerSharedState *)aState;
+
+- (void) applyOnce:(id<BaseTree>)t rule:(fptr *)whichRule;
+    
+- (void) downup:(id<BaseTree>)t;
+
+- (void) settopdown_fptr;
+- (void) setbottomdown_fptr;
+    
+    // methods the downup strategy uses to do the up and down rules.
+    // to override, just define tree grammar rule topdown and turn on
+    // filter=true.
+- (void) topdown;
+- (void) bottomup;
+
+@property (retain) id<TokenStream> originalTokenStream;
+@property (retain) id<TreeAdaptor> originalAdaptor;
+@property (retain, setter=settopdown_fptr:) fptr *topdown_fptr;
+@property (retain, setter=setbottomdown_fptr:) fptr *bottomup_fptr;
+
+@end
+// end TreeFilter.h
diff --git a/runtime/ObjC/Framework/TreeFilter.m b/runtime/ObjC/Framework/TreeFilter.m
new file mode 100644
index 0000000..9730ad0
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeFilter.m
@@ -0,0 +1,258 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+     derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Java Stuff
+package org.antlr.runtime.tree;
+
+import org.antlr.runtime.RecognizerSharedState;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.TokenStream;
+*/
+
+/**
+ Cut-n-paste from material I'm not using in the book anymore (edit later
+ to make sense):
+
+ Now, how are we going to test these tree patterns against every
+subtree in our original tree?  In what order should we visit nodes?
+For this application, it turns out we need a simple ``apply once''
+rule application strategy and a ``down then up'' tree traversal
+strategy.  Let's look at rule application first.
+
+As we visit each node, we need to see if any of our patterns match. If
+a pattern matches, we execute the associated tree rewrite and move on
+to the next node. In other words, we only look for a single rule
+application opportunity (we'll see below that we sometimes need to
+repeatedly apply rules). The following method applies a rule in a @cl
+TreeParser (derived from a tree grammar) to a tree:
+
+here is where weReferenced code/walking/patterns/TreePatternMatcher.java
+
+It uses reflection to lookup the appropriate rule within the generated
+tree parser class (@cl Simplify in this case). Most of the time, the
+rule will not match the tree.  To avoid issuing syntax errors and
+attempting error recovery, it bumps up the backtracking level.  Upon
+failure, the invoked rule immediately returns. If you don't plan on
+using this technique in your own ANTLR-based application, don't sweat
+the details. This method boils down to ``call a rule to match a tree,
+executing any embedded actions and rewrite rules.''
+
+At this point, we know how to define tree grammar rules and how to
+apply them to a particular subtree. The final piece of the tree
+pattern matcher is the actual tree traversal. We have to get the
+correct node visitation order.  In particular, we need to perform the
+scalar-vector multiply transformation on the way down (preorder) and
+we need to reduce multiply-by-zero subtrees on the way up (postorder).
+
+To implement a top-down visitor, we do a depth first walk of the tree,
+executing an action in the preorder position. To get a bottom-up
+visitor, we execute an action in the postorder position.  ANTLR
+provides a standard @cl TreeVisitor class with a depth first search @v
+visit method. That method executes either a @m pre or @m post method
+or both. In our case, we need to call @m applyOnce in both. On the way
+down, we'll look for @r vmult patterns. On the way up,
+we'll look for @r mult0 patterns.
+ */
+
+/*  Java Stuff
+public class TreeFilter extends TreeParser {
+    public interface fptr {
+        public void rule() throws RecognitionException;
+    }
+
+    protected TokenStream originalTokenStream;
+    protected TreeAdaptor originalAdaptor;
+
+    public TreeFilter(TreeNodeStream input) {
+        this(input, new RecognizerSharedState());
+    }
+    public TreeFilter(TreeNodeStream input, RecognizerSharedState state) {
+        super(input, state);
+        originalAdaptor = input.getTreeAdaptor();
+        originalTokenStream = input.getTokenStream();
+    }
+
+    public void applyOnce(Object t, fptr whichRule) {
+        if ( t==null ) return;
+        try {
+            // share TreeParser object but not parsing-related state
+            state = new RecognizerSharedState();
+            input = new CommonTreeNodeStream(originalAdaptor, t);
+            ((CommonTreeNodeStream)input).setTokenStream(originalTokenStream);
+            setBacktrackingLevel(1);
+            whichRule.rule();
+            setBacktrackingLevel(0);
+        }
+        catch (RecognitionException e) { ; }
+    }
+
+    public void downup(Object t) {
+        TreeVisitor v = new TreeVisitor(new CommonTreeAdaptor());
+        TreeVisitorAction actions = new TreeVisitorAction() {
+            public Object pre(Object t)  { applyOnce(t, topdown_fptr); return t; }
+            public Object post(Object t) { applyOnce(t, bottomup_fptr); return t; }
+        };
+        v.visit(t, actions);
+    }
+        
+    fptr topdown_fptr = new fptr() {
+        public void rule() throws RecognitionException {
+            topdown();
+        }
+    };
+
+    fptr bottomup_fptr = new fptr() {
+        public void rule() throws RecognitionException {
+            bottomup();
+        }
+    };
+
+    // methods the downup strategy uses to do the up and down rules.
+    // to override, just define tree grammar rule topdown and turn on
+    // filter=true.
+    public void topdown() throws RecognitionException {;}
+    public void bottomup() throws RecognitionException {;}
+}
+
+*/
+
+#import "CommonTreeNodeStream.h"
+#import "TreeFilter.h"
+
+@class TreeFilter;
+
+@implementation fptr
+
++ (fptr *) newfptr:(TreeFilter *)aTreeFilter Rule:(SEL) aRule
+{
+    return [[fptr alloc] init];
+}
+
+- (fptr *) init:(TreeFilter *)aTreeFilter Rule:(SEL)aRule
+{
+    if ( (self = [super init]) != nil ) {
+        whichRule = aRule;
+        treeFilter = aTreeFilter;
+    }
+    return self;
+}
+
+- (void) rule
+{
+    if ( [treeFilter respondsToSelector:whichRule] ) {
+        [treeFilter performSelector:whichRule];
+    }
+    return;
+}
+
+@synthesize whichRule;
+@synthesize treeFilter;
+
+@end
+
+@implementation TreeFilter
+
++ (TreeFilter *) newTreeFilter:(id<TreeNodeStream>)anInput
+{
+        
+    return [[TreeFilter alloc] initWithStream:anInput State:[RecognizerSharedState newRecognizerSharedState]];
+}
+
++ (TreeFilter *) newTreeFilter:(id<TreeNodeStream>)anInput State:(RecognizerSharedState *) aState
+{
+    return [[TreeFilter alloc] initWithStream:anInput State:aState];
+}
+
+- (id) initWithStream:(id<TreeNodeStream>)anInput State:(RecognizerSharedState *)aState
+{
+    if (( self = [super initWithStream:input State:aState]) != nil ) {
+        originalAdaptor = [anInput getTreeAdaptor];
+        originalTokenStream = [anInput getTokenStream];
+        topdown_fptr = nil;
+        bottomup_fptr = nil;
+    }
+    return self;
+}
+    
+- (void) applyOnce:(id<BaseTree>)t rule:(fptr *)whichRule
+{
+        if ( t==nil ) return;
+        @try {
+            // share TreeParser object but not parsing-related state
+            state = [RecognizerSharedState newRecognizerSharedState];
+            input = [CommonTreeNodeStream newCommonTreeNodeStream:originalAdaptor Tree:(CommonTree *)t];
+            [(CommonTreeNodeStream *)input setTokenStream:originalTokenStream];
+            [self setBacktrackingLevel:1];
+            [whichRule rule];
+            [self setBacktrackingLevel:0];
+        }
+        @catch (RecognitionException *e) { ; }
+    }
+    
+- (void) downup:(id<BaseTree>) t
+{
+    TreeVisitor *v = [TreeVisitor newTreeVisitor:[CommonTreeAdaptor newTreeAdaptor]];
+    TreeVisitorAction *actions = [TreeVisitorActionFiltered newTreeVisitorActionFiltered:self 
+                                                                                   RuleD:topdown_fptr
+                                                                                   RuleU:bottomup_fptr];
+    [v visit:t Action:actions];
+}
+    
+- (void) settopdown_fptr
+{
+    SEL aRule = @selector(topdown);
+    topdown_fptr =  [fptr newfptr:self Rule:aRule];
+}
+- (void) setbottomdown_fptr
+{
+    SEL aRule = @selector(bottomup);
+    bottomup_fptr =  [fptr newfptr:self Rule:aRule];
+}
+
+    // methods the downup strategy uses to do the up and down rules.
+    // to override, just define tree grammar rule topdown and turn on
+    // filter=true.
+- (void) topdown
+{
+    return;
+}
+    
+- (void) bottomup
+{
+    return;
+}
+
+@synthesize originalTokenStream;
+@synthesize originalAdaptor;
+@synthesize topdown_fptr;
+@synthesize bottomup_fptr;
+
+@end
+
+// end TreeFilter.h
+
diff --git a/runtime/ObjC/Framework/TreeIterator.h b/runtime/ObjC/Framework/TreeIterator.h
new file mode 100644
index 0000000..36976fc
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeIterator.h
@@ -0,0 +1,72 @@
+//
+//  TreeIterator.h
+//  ANTLR
+//
+//  Created by Ian Michell on 26/04/2010.
+// [The "BSD licence"]
+// Copyright (c) 2010 Ian Michell 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "CommonTreeAdaptor.h"
+#import "FastQueue.h"
+#import "TreeAdaptor.h"
+#import "Tree.h"
+
+@interface TreeIterator : NSObject 
+{
+	BOOL firstTime;
+	__strong id<TreeAdaptor> adaptor;
+	__strong id<BaseTree> root;
+	__strong id<BaseTree> tree;
+	
+	__strong FastQueue *nodes;
+	__strong id<BaseTree> up;
+	__strong id<BaseTree> down;
+	__strong id<BaseTree> eof;
+}
+
+@property BOOL firstTime;
+@property(retain) id<TreeAdaptor> adaptor;
+@property(retain) id<BaseTree> root;
+@property(retain) id<BaseTree> tree;
+@property(retain) FastQueue *nodes;
+@property(retain, readwrite) id<BaseTree> up;
+@property(retain, readwrite) id<BaseTree> down;
+@property(retain, readwrite) id<BaseTree> eof;
+
++ newANTRLTreeIterator;
++ (TreeIterator *) newANTRLTreeIteratorWithAdaptor:(CommonTreeAdaptor *)adaptor
+                                                andTree:(id<BaseTree>)tree;
+- (id) init;
+- (id) initWithTree:(id<BaseTree>) t;
+- (id) initWithTreeAdaptor:(id<TreeAdaptor>) a andTree:(id<BaseTree>) t;
+
+- (void) reset;
+- (BOOL) hasNext;
+- (id) nextObject;
+- (NSArray *) allObjects;
+
+@end
diff --git a/runtime/ObjC/Framework/TreeIterator.m b/runtime/ObjC/Framework/TreeIterator.m
new file mode 100644
index 0000000..a79f97f
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeIterator.m
@@ -0,0 +1,202 @@
+//
+//  TreeIterator.m
+//  ANTLR
+//
+//  Created by Ian Michell on 26/04/2010.
+// Copyright (c) 2010 Ian Michell 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import "TreeIterator.h"
+#import "CommonTreeAdaptor.h"
+
+@implementation TreeIterator
+
++ (TreeIterator *) newANTRLTreeIterator
+{
+    return [[TreeIterator alloc] init];
+}
+
++ (TreeIterator *) newANTRLTreeIteratorWithAdaptor:(CommonTreeAdaptor *)adaptor
+                                                andTree:(id<BaseTree>)tree
+{
+    return [[TreeIterator alloc] initWithTreeAdaptor:adaptor andTree:tree];
+}
+
+- (id) init
+{
+    self = [super init];
+    if ( self != nil ) {
+        firstTime = YES;
+        nodes = [[FastQueue newFastQueue] retain];
+        down = [[adaptor createTree:TokenTypeDOWN Text:@"DOWN"] retain];
+        up = [[adaptor createTree:TokenTypeUP Text:@"UP"] retain];
+        eof = [[adaptor createTree:TokenTypeEOF Text:@"EOF"] retain];
+        tree = eof;
+        root = eof;
+    }
+    return self;
+}
+
+-(id) initWithTree:(id<BaseTree>) t
+{
+    self = [super init];
+    if ( self != nil ) {
+        firstTime = YES;
+        adaptor = [[CommonTreeAdaptor newTreeAdaptor] retain];
+        tree = [t retain];
+        root = t;
+        nodes = [[FastQueue newFastQueue] retain];
+        down = [[adaptor createTree:TokenTypeDOWN Text:@"DOWN"] retain];
+        up = [[adaptor createTree:TokenTypeUP Text:@"UP"] retain];
+        eof = [[adaptor createTree:TokenTypeEOF Text:@"EOF"] retain];
+    }
+    return self;
+}
+
+-(id) initWithTreeAdaptor:(id<TreeAdaptor>)a andTree:(id<BaseTree>)t
+{
+    self = [super init];
+    if ( self != nil ) {
+        firstTime = YES;
+        adaptor = [a retain];
+        tree = [t retain];
+        root = t;
+        nodes = [[FastQueue newFastQueue] retain];
+        down = [[adaptor createTree:TokenTypeDOWN Text:@"DOWN"] retain];
+        up = [[adaptor createTree:TokenTypeUP Text:@"UP"] retain];
+        eof = [[adaptor createTree:TokenTypeEOF Text:@"EOF"] retain];
+    }
+    return self;
+}
+
+- (void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in TreeIterator" );
+#endif
+    if ( adaptor ) [adaptor release];
+    if ( nodes ) [nodes release];
+    if ( tree && tree != eof ) [tree release];
+    if ( root && root != eof && root != tree ) [root release];
+    if ( down ) [down release];    
+    if ( up ) [up release];    
+    if ( eof ) [eof release];    
+    [super dealloc];
+}
+
+- (void)reset
+{
+    firstTime = YES;
+    tree = root;
+    [nodes clear];
+}
+
+-(BOOL) hasNext
+{
+    if ( firstTime ) {
+        return root != nil;
+    }
+    if ( nodes && [nodes size] > 0) {
+        return YES;
+    }
+    if ( tree == nil ) {
+        return NO;
+    }
+    if ( [adaptor getChildCount:tree] > 0 ) {
+        return YES;
+    }
+    return [adaptor getParent:tree] != nil;
+}
+
+-(id) nextObject
+{
+    // is this the first time we are using this method?
+    if ( firstTime ) {
+        firstTime = NO;
+        if ( [adaptor getChildCount:tree] == 0 ) {
+            [nodes addObject:eof];
+            return tree;
+        }
+        return tree;
+    }
+    // do we have any objects queued up?
+    if ( nodes && [nodes size] > 0 ) {
+        return [nodes remove];
+    }
+    // no nodes left?
+    if ( tree == nil ) {
+        return eof;
+    }
+    if ( [adaptor getChildCount:tree] > 0 ) {
+        tree = [adaptor getChild:tree At:0];
+        [nodes addObject:tree]; // real node is next after down
+        return self.down;
+    }
+    // if no children, look for next sibling of ancestor
+    id<BaseTree> parent = [adaptor getParent:tree];
+    while (parent != nil && ([adaptor getChildIndex:tree] + 1) >= [adaptor getChildCount:parent]) {
+        [nodes addObject:up];
+        tree = parent;
+        parent = [adaptor getParent:tree];
+    }
+    if ( parent == nil ) {
+        tree = nil;
+        [nodes addObject:self.eof];
+        return [nodes remove];
+    }
+    // must have found a node with an unvisited sibling
+    // move to it and return it
+    NSInteger nextSiblingIndex = [adaptor getChildIndex:tree] + 1;
+    tree = [adaptor getChild:parent At:nextSiblingIndex];
+    [nodes addObject:tree];
+    return [nodes remove];
+}
+
+-(NSArray *) allObjects
+{
+    AMutableArray *array = [AMutableArray arrayWithCapacity:10];
+    while ( [self hasNext] ) {
+        [array addObject:[self nextObject]];
+    }
+    return array;
+}
+
+- (void)remove
+{
+    @throw [RuntimeException newException:@"UnsupportedOperationException"];
+}
+
+@synthesize firstTime;
+@synthesize adaptor;
+@synthesize root;
+@synthesize tree;
+@synthesize nodes;
+
+@synthesize up;
+@synthesize down;
+@synthesize eof;
+
+@end
diff --git a/runtime/ObjC/Framework/TreeNodeStream.h b/runtime/ObjC/Framework/TreeNodeStream.h
new file mode 100644
index 0000000..944a5e0
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeNodeStream.h
@@ -0,0 +1,103 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Foundation/Foundation.h>
+#import "IntStream.h"
+#import "CharStream.h"
+#import "TokenStream.h"
+#import "CommonTree.h"
+#import "CommonTreeAdaptor.h"
+
+@protocol TreeNodeStream < IntStream > 
+
+- (id) initWithTree:(CommonTree *)theTree;
+
+/** Get a tree node at an absolute index i; 0..n-1.
+ *  If you don't want to buffer up nodes, then this method makes no
+ *  sense for you.
+ */
+- (id) get:(NSInteger) idx;
+/** Get tree node at current input pointer + i ahead where i=1 is next node.
+ *  i<0 indicates nodes in the past.  So LT(-1) is previous node, but
+ *  implementations are not required to provide results for k < -1.
+ *  LT(0) is undefined.  For i>=n, return null.
+ *  Return null for LT(0) and any index that results in an absolute address
+ *  that is negative.
+ *
+ *  This is analogus to the LT() method of the TokenStream, but this
+ *  returns a tree node instead of a token.  Makes code gen identical
+ *  for both parser and tree grammars. :)
+ */
+- (id) LT:(NSInteger)k;
+/** Where is this stream pulling nodes from?  This is not the name, but
+ *  the object that provides node objects.
+ */
+- (id) getTreeSource;
+/** If the tree associated with this stream was created from a TokenStream,
+ *  you can specify it here.  Used to do rule $text attribute in tree
+ *  parser.  Optional unless you use tree parser rule text attribute
+ *  or output=template and rewrite=true options.
+ */
+- (id<TokenStream>) getTokenStream; 
+/** What adaptor can tell me how to interpret/navigate nodes and
+ *  trees.  E.g., get text of a node.
+ */
+- (id<TreeAdaptor>) getTreeAdaptor;
+/** As we flatten the tree, we use UP, DOWN nodes to represent
+ *  the tree structure.  When debugging we need unique nodes
+ *  so we have to instantiate new ones.  When doing normal tree
+ *  parsing, it's slow and a waste of memory to create unique
+ *  navigation nodes.  Default should be false;
+ */
+- (void) setUniqueNavigationNodes:(BOOL)flag;
+/** Reset the tree node stream in such a way that it acts like
+ *  a freshly constructed stream.
+ */
+- (void) reset;
+
+/** Return the text of all nodes from start to stop, inclusive.
+ *  If the stream does not buffer all the nodes then it can still
+ *  walk recursively from start until stop.  You can always return
+ *  null or "" too, but users should not access $ruleLabel.text in
+ *  an action of course in that case.
+ */
+- (NSString *) toStringFromNode:(id)startNode ToNode:(id)stopNode;
+
+/** Replace from start to stop child index of parent with t, which might
+ *  be a list.  Number of children may be different
+ *  after this call.  The stream is notified because it is walking the
+ *  tree and might need to know you are monkeying with the underlying
+ *  tree.  Also, it might be able to modify the node stream to avoid
+ *  restreaming for future phases.
+ *
+ *  If parent is null, don't do anything; must be at root of overall tree.
+ *  Can't replace whatever points to the parent externally.  Do nothing.
+ */
+- (void) replaceChildren:(id)parent From:(NSInteger)startChildIndex To:(NSInteger)stopChildIndex With:(id) t;
+    
+
+@end
diff --git a/runtime/ObjC/Framework/TreeParser.h b/runtime/ObjC/Framework/TreeParser.h
new file mode 100644
index 0000000..0f98fd6
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeParser.h
@@ -0,0 +1,87 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "BaseRecognizer.h"
+#import "TreeNodeStream.h"
+#import "CommonTreeAdaptor.h"
+#import "MismatchedTreeNodeException.h"
+
+@interface TreeParser : BaseRecognizer {
+	id<TreeNodeStream> input;
+}
+
+@property (retain, getter=input, setter=setInput:) id<TreeNodeStream> input;
+
++ (id) newTreeParser:(id<TreeNodeStream>)anInput;
++ (id) newTreeParser:(id<TreeNodeStream>)anInput State:(RecognizerSharedState *)state;
+
+- (id) initWithStream:(id<TreeNodeStream>)theInput;
+- (id) initWithStream:(id<TreeNodeStream>)theInput
+                State:(RecognizerSharedState *)state;
+
+
+- (id<TreeNodeStream>)input;
+- (void) setInput:(id<TreeNodeStream>)anInput;
+
+- (void) setTreeNodeStream:(id<TreeNodeStream>) anInput;
+- (id<TreeNodeStream>) getTreeNodeStream;
+
+- (NSString *)getSourceName;
+
+- (id) getCurrentInputSymbol:(id<IntStream>) anInput;
+
+- (id) getMissingSymbol:(id<IntStream>)input
+              Exception:(RecognitionException *) e
+          ExpectedToken:(NSInteger) expectedTokenType
+                 BitSet:(ANTLRBitSet *)follow;
+
+/** Match '.' in tree parser has special meaning.  Skip node or
+ *  entire tree if node has children.  If children, scan until
+ *  corresponding UP node.
+ */
+- (void) matchAny:(id<IntStream>)ignore;
+
+/** We have DOWN/UP nodes in the stream that have no line info; override.
+ *  plus we want to alter the exception type.  Don't try to recover
+ *  from tree parser errors inline...
+ */
+- (id) recoverFromMismatchedToken:(id<IntStream>)anInput
+                             Type:(NSInteger)ttype
+                           Follow:(ANTLRBitSet *)follow;
+
+/** Prefix error message with the grammar name because message is
+ *  always intended for the programmer because the parser built
+ *  the input tree not the user.
+ */
+- (NSString *)getErrorHeader:(RecognitionException *)e;
+
+- (NSString *)getErrorMessage:(RecognitionException *)e TokenNames:(AMutableArray *) tokenNames;
+
+- (void) traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex;
+- (void) traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex;
+
+@end
diff --git a/runtime/ObjC/Framework/TreeParser.m b/runtime/ObjC/Framework/TreeParser.m
new file mode 100644
index 0000000..949771f
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeParser.m
@@ -0,0 +1,192 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "TreeParser.h"
+
+@implementation TreeParser
+
+@synthesize input;
+
++ (id) newTreeParser:(id<TreeNodeStream>)anInput
+{
+    return [[TreeParser alloc] initWithStream:anInput];
+}
+
++ (id) newTreeParser:(id<TreeNodeStream>)anInput State:(RecognizerSharedState *)theState
+{
+    return [[TreeParser alloc] initWithStream:anInput State:theState];
+}
+
+- (id) initWithStream:(id<TreeNodeStream>)theInput
+{
+	if ((self = [super init]) != nil) {
+		[self setInput:theInput];
+	}
+	return self;
+}
+
+- (id) initWithStream:(id<TreeNodeStream>)theInput State:(RecognizerSharedState *)theState
+{
+	if ((self = [super init]) != nil) {
+		[self setInput:theInput];
+        state = theState;
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in TreeParser" );
+#endif
+	if ( input ) [input release];
+	[super dealloc];
+}
+
+- (void) reset
+{
+    [super reset]; // reset all recognizer state variables
+    if ( input != nil ) {
+        [input seek:0]; // rewind the input
+    }
+}
+
+- (void) mismatch:(id<IntStream>)aStream tokenType:(TokenType)aTType follow:(ANTLRBitSet *)aBitset
+{
+	MismatchedTreeNodeException *mte = [MismatchedTreeNodeException newException:aTType Stream:aStream];
+    [mte setNode:[((id<TreeNodeStream>)aStream) LT:1]];
+	[self recoverFromMismatchedToken:aStream Type:aTType Follow:aBitset];
+}
+
+- (void) setTreeNodeStream:(id<TreeNodeStream>) anInput
+{
+    input = anInput;
+}
+
+- (id<TreeNodeStream>) getTreeNodeStream
+{
+    return input;
+}
+
+- (NSString *)getSourceName
+{
+    return [input getSourceName];
+}
+
+- (id) getCurrentInputSymbol:(id<IntStream>) anInput
+{
+    return [(id<TreeNodeStream>)anInput LT:1];
+}
+
+- (id) getMissingSymbol:(id<IntStream>)anInput
+              Exception:(RecognitionException *)e
+          ExpectedToken:(NSInteger)expectedTokenType
+                 BitSet:(ANTLRBitSet *)follow
+{
+    NSString *tokenText =[NSString stringWithFormat:@"<missing %@ %d>", [self getTokenNames], expectedTokenType];
+    //id<TreeAdaptor> anAdaptor = (id<TreeAdaptor>)[((id<TreeNodeStream>)e.input) getTreeAdaptor];
+    //return [anAdaptor createToken:expectedTokenType Text:tokenText];
+    return [CommonToken newToken:expectedTokenType Text:tokenText];
+}
+
+/** Match '.' in tree parser has special meaning.  Skip node or
+ *  entire tree if node has children.  If children, scan until
+ *  corresponding UP node.
+ */
+- (void) matchAny:(id<IntStream>)ignore
+{ // ignore stream, copy of input
+    state.errorRecovery = NO;
+    state.failed = NO;
+    id look = [input LT:1];
+    if ( [((CommonTreeAdaptor *)[input getTreeAdaptor]) getChildCount:look] == 0) {
+        [input consume]; // not subtree, consume 1 node and return
+        return;
+    }
+    // current node is a subtree, skip to corresponding UP.
+    // must count nesting level to get right UP
+    int level=0;
+    int tokenType = [((id<TreeAdaptor>)[input getTreeAdaptor]) getType:look];
+    while ( tokenType != TokenTypeEOF && !( tokenType == TokenTypeUP && level == 0) ) {
+        [input consume];
+        look = [input LT:1];
+        tokenType = [((id<TreeAdaptor>)[input getTreeAdaptor]) getType:look];
+        if ( tokenType == TokenTypeDOWN ) {
+            level++;
+        }
+        else if ( tokenType == TokenTypeUP ) {
+            level--;
+        }
+    }
+    [input consume]; // consume UP
+}
+
+/** We have DOWN/UP nodes in the stream that have no line info; override.
+ *  plus we want to alter the exception type.  Don't try to recover
+ *  from tree parser errors inline...
+ */
+- (id) recoverFromMismatchedToken:(id<IntStream>)anInput Type:(NSInteger)ttype Follow:(ANTLRBitSet *)follow
+{
+    @throw [MismatchedTreeNodeException newException:ttype Stream:anInput];
+}
+
+/** Prefix error message with the grammar name because message is
+ *  always intended for the programmer because the parser built
+ *  the input tree not the user.
+ */
+- (NSString *)getErrorHeader:(RecognitionException *)e
+{
+     return [NSString stringWithFormat:@"%@: node after line %@:%@",
+            [self getGrammarFileName], e.line, e.charPositionInLine];
+}
+
+/** Tree parsers parse nodes they usually have a token object as
+ *  payload. Set the exception token and do the default behavior.
+ */
+- (NSString *)getErrorMessage:(RecognitionException *)e  TokenNames:(AMutableArray *) theTokNams
+{
+    if ( [self isKindOfClass:[TreeParser class]] ) {
+        CommonTreeAdaptor *adaptor = (CommonTreeAdaptor *)[((id<TreeNodeStream>)e.input) getTreeAdaptor];
+        e.token = [adaptor getToken:((CommonTree *)e.node)];
+        if ( e.token == nil ) { // could be an UP/DOWN node
+            e.token = [CommonToken newToken:[adaptor getType:(CommonTree *)e.node]
+                                                        Text:[adaptor getText:(CommonTree *)e.node]];
+        }
+    }
+    return [super getErrorMessage:e TokenNames:theTokNams];
+}
+
+- (void) traceIn:(NSString *)ruleName Index:(NSInteger)ruleIndex
+{
+    [super traceIn:ruleName Index:ruleIndex Object:[input LT:1]];
+}
+
+- (void) traceOut:(NSString *)ruleName Index:(NSInteger)ruleIndex
+{
+    [super traceOut:ruleName Index:ruleIndex  Object:[input LT:1]];
+}
+
+
+@end
diff --git a/runtime/ObjC/Framework/TreePatternLexer.h b/runtime/ObjC/Framework/TreePatternLexer.h
new file mode 100644
index 0000000..e5b4754
--- /dev/null
+++ b/runtime/ObjC/Framework/TreePatternLexer.h
@@ -0,0 +1,89 @@
+//
+//  TreePatternLexer.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/18/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+
+typedef enum {
+	LexerTokenTypeEOF = -1,
+	LexerTokenTypeInvalid,
+	LexerTokenTypeBEGIN,
+	LexerTokenTypeEND,
+	LexerTokenTypeID,
+	LexerTokenTypeARG,
+	LexerTokenTypePERCENT,
+	LexerTokenTypeCOLON,
+	LexerTokenTypeDOT,
+} LexerTokenType;
+
+
+@interface TreePatternLexer : NSObject {
+
+/** The tree pattern to lex like "(A B C)" */
+NSString *pattern;
+    
+/** Index into input string */
+NSInteger p;
+    
+/** Current char */
+NSInteger c;
+    
+/** How long is the pattern in char? */
+NSInteger n;
+    
+/** Set when token type is ID or ARG (name mimics Java's StreamTokenizer) */
+NSMutableData *sval;
+__strong char *data;
+    
+BOOL error;
+
+}
+
+@property (retain) NSString *pattern;
+@property (assign) NSInteger p;
+@property (assign) NSInteger c;
+@property (assign) NSInteger n;
+@property (retain, getter=getSval, setter=setSval:) NSMutableData *sval;
+@property (assign) char *data;
+@property (assign) BOOL error;
+
++ (TreePatternLexer *)newTreePatternLexer:(NSString *)aPattern;
+- (id) init;
+- (id) initWithPattern:(NSString *)aPattern;
+
+- (void) dealloc;
+- (NSInteger) nextToken;
+- (void) consume;
+- (NSString *)toString;
+
+- (NSMutableData *)getSval;
+- (void) setSval:(NSMutableData *)aSval;
+
+@end
diff --git a/runtime/ObjC/Framework/TreePatternLexer.m b/runtime/ObjC/Framework/TreePatternLexer.m
new file mode 100644
index 0000000..2eafae3
--- /dev/null
+++ b/runtime/ObjC/Framework/TreePatternLexer.m
@@ -0,0 +1,191 @@
+//
+//  TreePatternLexer.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/18/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "TreePatternLexer.h"
+
+@implementation TreePatternLexer
+
+@synthesize pattern;
+@synthesize p;
+@synthesize c;
+@synthesize n;
+@synthesize sval;
+@synthesize data;
+@synthesize error;
+
++ (TreePatternLexer *)newTreePatternLexer:(NSString *)aPattern
+{
+    return [[TreePatternLexer alloc] initWithPattern:aPattern];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil ) {
+        p = -1;
+        n = 0;
+        error = NO;
+        sval = [[NSMutableData dataWithLength:1000] retain];
+        data = [sval mutableBytes];
+        pattern = @"";
+        n = [pattern length];
+        if ( pattern ) [pattern retain];
+        [self consume];
+    }
+    return self;
+}
+
+- (id) initWithPattern:(NSString *)aPattern
+{
+    if ((self = [super init]) != nil ) {
+        p = -1;
+        n = 0;
+        error = NO;
+        sval = [[NSMutableData dataWithLength:1000] retain];
+        data = [sval mutableBytes];
+        pattern = [aPattern retain];
+        n = [pattern length];
+        [self consume];
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in TreePatternLexer" );
+#endif
+	if ( pattern ) [pattern release];
+	if ( sval ) [sval release];
+	[super dealloc];
+}
+
+- (NSInteger) nextToken
+{
+    n = 0; // reset, but reuse buffer
+    while ( c != LexerTokenTypeEOF ) {
+        if ( c==' ' || c=='\n' || c=='\r' || c=='\t' ) {
+            [self consume];
+            continue;
+        }
+        if ( (c>='a' && c<='z') || (c>='A' && c<='Z') || c=='_' ) {
+            data[n++] = (char)c;
+            [self consume];
+            while ( (c>='a' && c<='z') || (c>='A' && c<='Z') ||
+                   (c>='0' && c<='9') || c=='_' )
+            {
+                data[n++] = (char)c;
+                [self consume];
+            }
+            return LexerTokenTypeID;
+        }
+        if ( c == '(' ) {
+            [self consume];
+            return LexerTokenTypeBEGIN;
+        }
+        if ( c==')' ) {
+            [self consume];
+            return LexerTokenTypeEND;
+        }
+        if ( c=='%' ) {
+            [self consume];
+            return LexerTokenTypePERCENT;
+        }
+        if ( c==':' ) {
+            [self consume];
+            return LexerTokenTypeCOLON;
+        }
+        if ( c=='.' ) {
+            [self consume];
+            return LexerTokenTypeDOT;
+        }
+        if ( c=='[' ) { // grab [x] as a string, returning x
+            [self consume];
+            while ( c!=']' ) {
+                if ( c=='\\' ) {
+                    [self consume];
+                    if ( c!=']' ) {
+                        data[n++] = (char)'\\';
+                    }
+                    data[n++] = (char)c;
+                }
+                else {
+                    data[n++] = (char)c;
+                }
+                [self consume];
+            }
+            [self consume];
+            return LexerTokenTypeARG;
+        }
+        [self consume];
+        error = true;
+        return LexerTokenTypeEOF;
+    }
+    return LexerTokenTypeEOF;
+}
+
+- (void) consume
+{
+    p++;
+    if ( p >= n ) {
+        c = LexerTokenTypeEOF;
+    }
+    else {
+        c = [pattern characterAtIndex:p];
+    }
+}
+
+- (NSString *)toString
+{
+    char buf[100];
+
+    NSInteger idx = 0;
+    for( NSInteger i = p; i < n; i++ ){
+        buf[idx++] = data[i];
+    }
+    buf[idx] = '\0';
+    return [NSString stringWithFormat:@"%s", buf];
+}
+
+- (NSMutableData *)getSval
+{
+    return sval;
+}
+
+- (void)setSval:(NSMutableData *)aSval
+{
+    if ( sval != aSval ) {
+        if ( sval ) [sval release];
+        [aSval retain];
+    }
+    sval = aSval;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/TreePatternParser.h b/runtime/ObjC/Framework/TreePatternParser.h
new file mode 100644
index 0000000..0b7b90b
--- /dev/null
+++ b/runtime/ObjC/Framework/TreePatternParser.h
@@ -0,0 +1,63 @@
+//
+//  TreePatternParser.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/18/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "TreePatternLexer.h"
+#import "TreeWizard.h"
+#import "TreeAdaptor.h"
+
+@interface TreePatternParser : NSObject {
+
+TreePatternLexer *tokenizer;
+NSInteger ttype;
+TreeWizard *wizard;
+id<TreeAdaptor> adaptor;
+    
+}
+
++ (TreePatternParser *)newTreePatternParser:(TreePatternLexer *)aTokenizer
+                                               Wizard:(TreeWizard *)aWizard
+                                              Adaptor:(id<TreeAdaptor>)anAdaptor;
+- (id) init;
+- (id) initWithTokenizer:(TreePatternLexer *)tokenizer
+                  Wizard:(TreeWizard *)aWizard
+                 Adaptor:(id<TreeAdaptor>)anAdaptor;
+
+- (void) dealloc;
+- (id<BaseTree>) pattern;
+- (id<BaseTree>) parseTree;
+- (id<BaseTree>) parseNode;
+
+@property (retain) TreePatternLexer *tokenizer;
+@property NSInteger ttype;
+@property (retain) TreeWizard *wizard;
+@property (retain) id<TreeAdaptor> adaptor;
+@end
diff --git a/runtime/ObjC/Framework/TreePatternParser.m b/runtime/ObjC/Framework/TreePatternParser.m
new file mode 100644
index 0000000..2afbd5f
--- /dev/null
+++ b/runtime/ObjC/Framework/TreePatternParser.m
@@ -0,0 +1,197 @@
+//
+//  TreePatternParser.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/18/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "TreePatternParser.h"
+#import "TreePatternLexer.h"
+
+@implementation TreePatternParser
+
++ (TreePatternParser *)newTreePatternParser:(TreePatternLexer *)aTokenizer
+                                               Wizard:(TreeWizard *)aWizard
+                                              Adaptor:(id<TreeAdaptor>)anAdaptor
+{
+    return [[TreePatternParser alloc] initWithTokenizer:aTokenizer Wizard:aWizard Adaptor:anAdaptor];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil) {
+        //tokenizer = aTokenizer;
+        //wizard = aWizard;
+        //adaptor = anAdaptor;
+        //ttype = [tokenizer nextToken]; // kickstart
+    }
+    return self;
+}
+
+- (id) initWithTokenizer:(TreePatternLexer *)aTokenizer
+                  Wizard:(TreeWizard *)aWizard
+                 Adaptor:(id<TreeAdaptor>)anAdaptor
+{
+    if ((self = [super init]) != nil) {
+        adaptor = anAdaptor;
+        if ( adaptor ) [adaptor retain];
+        tokenizer = aTokenizer;
+        if ( tokenizer ) [tokenizer retain];
+        wizard = aWizard;
+        if ( wizard ) [wizard retain];
+        ttype = [aTokenizer nextToken]; // kickstart
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in TreePatternParser" );
+#endif
+	if ( adaptor ) [adaptor release];
+	if ( tokenizer ) [tokenizer release];
+	if ( wizard ) [wizard release];
+	[super dealloc];
+}
+
+- (id<BaseTree>)pattern
+{
+    if ( ttype==LexerTokenTypeBEGIN ) {
+        return [self parseTree];
+    }
+    else if ( ttype==LexerTokenTypeID ) {
+        id<BaseTree> node = [self parseNode];
+        if ( ttype==LexerTokenTypeEOF ) {
+            return node;
+        }
+        return nil; // extra junk on end
+    }
+    return nil;
+}
+
+- (id<BaseTree>) parseTree
+{
+    if ( ttype != LexerTokenTypeBEGIN ) {
+        @throw [RuntimeException newException:@"no BEGIN"];
+    }
+    ttype = [tokenizer nextToken];
+    id<BaseTree> root = [self parseNode];
+    if ( root==nil ) {
+        return nil;
+    }
+    while ( ttype==LexerTokenTypeBEGIN  ||
+           ttype==LexerTokenTypeID      ||
+           ttype==LexerTokenTypePERCENT ||
+           ttype==LexerTokenTypeDOT )
+    {
+        if ( ttype==LexerTokenTypeBEGIN ) {
+            id<BaseTree> subtree = [self parseTree];
+            [adaptor addChild:subtree toTree:root];
+        }
+        else {
+            id<BaseTree> child = [self parseNode];
+            if ( child == nil ) {
+                return nil;
+            }
+            [adaptor addChild:child toTree:root];
+        }
+    }
+    if ( ttype != LexerTokenTypeEND ) {
+        @throw [RuntimeException newException:@"no END"];
+    }
+    ttype = [tokenizer nextToken];
+    return root;
+}
+
+- (id<BaseTree>) parseNode
+{
+    // "%label:" prefix
+    NSString *label = nil;
+    TreePattern *node;
+    if ( ttype == LexerTokenTypePERCENT ) {
+        ttype = [tokenizer nextToken];
+        if ( ttype != LexerTokenTypeID ) {
+            return nil;
+        }
+        label = [tokenizer toString];
+        ttype = [tokenizer nextToken];
+        if ( ttype != LexerTokenTypeCOLON ) {
+            return nil;
+        }
+        ttype = [tokenizer nextToken]; // move to ID following colon
+    }
+    
+    // Wildcard?
+    if ( ttype == LexerTokenTypeDOT ) {
+        ttype = [tokenizer nextToken];
+        id<Token> wildcardPayload = [CommonToken newToken:0 Text:@"."];
+        node = [ANTLRWildcardTreePattern newANTLRWildcardTreePattern:wildcardPayload];
+        if ( label != nil ) {
+            node.label = label;
+        }
+        return node;
+    }
+    
+    // "ID" or "ID[arg]"
+    if ( ttype != LexerTokenTypeID ) {
+        return nil;
+    }
+    NSString *tokenName = [tokenizer toString];
+    ttype = [tokenizer nextToken];
+    if ( [tokenName isEqualToString:@"nil"] ) {
+        return [adaptor emptyNode];
+    }
+    NSString *text = tokenName;
+    // check for arg
+    NSString *arg = nil;
+    if ( ttype == LexerTokenTypeARG ) {
+        arg = [tokenizer toString];
+        text = arg;
+        ttype = [tokenizer nextToken];
+    }
+    
+    // create node
+    int treeNodeType = [wizard getTokenType:tokenName];
+    if ( treeNodeType==TokenTypeInvalid ) {
+        return nil;
+    }
+    node = [adaptor createTree:treeNodeType Text:text];
+    if ( label!=nil && [node class] == [TreePattern class] ) {
+        ((TreePattern *)node).label = label;
+    }
+    if ( arg!=nil && [node class] == [TreePattern class] ) {
+        ((TreePattern *)node).hasTextArg = YES;
+    }
+    return node;
+}
+
+@synthesize tokenizer;
+@synthesize ttype;
+@synthesize wizard;
+@synthesize adaptor;
+@end
diff --git a/runtime/ObjC/Framework/TreeRewriter.h b/runtime/ObjC/Framework/TreeRewriter.h
new file mode 100644
index 0000000..fa3a884
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeRewriter.h
@@ -0,0 +1,78 @@
+//
+//  TreeRewriter.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/17/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "TreeParser.h"
+
+@interface ANTLRfptr : NSObject {
+    id  actor;
+    SEL ruleSEL;
+}
+
++ (ANTLRfptr *)newANTLRfptrWithRule:(SEL)aRuleAction withObject:(id)anObject;
+-initWithRule:(SEL)ruleAction withObject:(id)anObject;
+
+- (id)rule;
+
+@property (retain) id  actor;
+@property SEL ruleSEL;
+@end
+
+@interface TreeRewriter : TreeParser {
+    BOOL showTransformations;
+    id<TokenStream> originalTokenStream;
+    id<TreeAdaptor> originalAdaptor;
+    ANTLRfptr *rule;
+    ANTLRfptr *topdown_fptr;
+    ANTLRfptr *bottomup_ftpr;
+}
+
++ (TreeRewriter *) newTreeRewriter:(id<TreeNodeStream>)anInput;
++ (TreeRewriter *) newTreeRewriter:(id<TreeNodeStream>)anInput State:(RecognizerSharedState *)aState;
+- (id)initWithStream:(id<TreeNodeStream>)anInput;
+- (id)initWithStream:(id<TreeNodeStream>)anInput State:(RecognizerSharedState *)aState;
+- (id) applyOnce:(CommonTree *)t Rule:(ANTLRfptr *)whichRule;
+- (id) applyRepeatedly:(CommonTree *)t Rule:(ANTLRfptr *)whichRule;
+- (id) downup:(CommonTree *)t;
+- (id) pre:(CommonTree *)t;
+- (id) post:(CommonTree *)t;
+- (id) downup:(CommonTree *)t XForm:(BOOL)aShowTransformations;
+- (void)reportTransformation:(CommonTree *)oldTree Tree:(CommonTree *)newTree;
+- (id) topdown_fptr;
+- (id) bottomup_ftpr;
+- (id) topdown;
+- (id) bottomup;
+
+@property BOOL showTransformations;
+@property (retain) id<TokenStream> originalTokenStream;
+@property (retain) id<TreeAdaptor> originalAdaptor;
+@property (retain) ANTLRfptr *rule;
+@end
diff --git a/runtime/ObjC/Framework/TreeRewriter.m b/runtime/ObjC/Framework/TreeRewriter.m
new file mode 100644
index 0000000..c5ea12f
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeRewriter.m
@@ -0,0 +1,250 @@
+//
+//  TreeRewriter.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/17/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "TreeRewriter.h"
+#import "CommonTreeNodeStream.h"
+#import "TreeRuleReturnScope.h"
+#import "CommonTreeAdaptor.h"
+#import "TreeVisitor.h"
+
+@implementation ANTLRfptr
+
++ (ANTLRfptr *)newANTLRfptrWithRule:(SEL)aRuleAction withObject:(id)anObject
+{
+    return [[ANTLRfptr alloc] initWithRule:aRuleAction withObject:(id)anObject];
+}
+
+-initWithRule:(SEL)aRuleAction withObject:(id)anObject
+{
+    if ((self = [super init]) != nil) {
+        actor = anObject;
+        ruleSEL = aRuleAction;
+    }
+    return self;
+}
+
+- (id)rule
+{
+	if ( [actor respondsToSelector:ruleSEL] )
+		return [actor performSelector:ruleSEL];
+    else
+        @throw [RuntimeException newException:@"Unknown Rewrite exception"];
+    return nil;
+}
+
+@synthesize actor;
+@synthesize ruleSEL;
+@end
+
+@implementation TreeRewriter
+
++ (TreeRewriter *) newTreeRewriter:(id<TreeNodeStream>)anInput
+{
+    return [[TreeRewriter alloc] initWithStream:anInput State:[RecognizerSharedState newRecognizerSharedState]];
+}
+
++ (TreeRewriter *) newTreeRewriter:(id<TreeNodeStream>)anInput State:(RecognizerSharedState *)aState
+{
+    return [[TreeRewriter alloc] initWithStream:anInput State:aState];
+}
+
+- (id)initWithStream:(id<TreeNodeStream>)anInput
+{
+    SEL aRuleSel;
+
+    if ((self = [super initWithStream:anInput]) != nil) {
+        showTransformations = NO;
+        state = [[RecognizerSharedState newRecognizerSharedState] retain];
+        originalAdaptor = [input getTreeAdaptor];
+        if ( originalAdaptor ) [originalAdaptor retain];
+        originalTokenStream = [input getTokenStream];        
+        if ( originalTokenStream ) [originalTokenStream retain];
+        aRuleSel = @selector(topdown);
+        topdown_fptr = [ANTLRfptr newANTLRfptrWithRule:(SEL)aRuleSel withObject:self];
+        aRuleSel = @selector(bottomup);
+        bottomup_ftpr = [ANTLRfptr newANTLRfptrWithRule:(SEL)aRuleSel withObject:self];        
+    }
+    return self;
+}
+
+- (id)initWithStream:(id<TreeNodeStream>)anInput State:(RecognizerSharedState *)aState
+{
+    SEL aRuleSel;
+    
+    if ((self = [super initWithStream:anInput]) != nil) {
+        showTransformations = NO;
+        state = aState;
+        if ( state ) [state retain];
+        originalAdaptor = [input getTreeAdaptor];
+        if ( originalAdaptor ) [originalAdaptor retain];
+        originalTokenStream = [input getTokenStream];        
+        if ( originalTokenStream ) [originalTokenStream retain];
+        aRuleSel = @selector(topdown);
+        topdown_fptr = [ANTLRfptr newANTLRfptrWithRule:(SEL)aRuleSel withObject:self];
+        aRuleSel = @selector(bottomup);
+        bottomup_ftpr = [ANTLRfptr newANTLRfptrWithRule:(SEL)aRuleSel withObject:self];        
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in TreeRewriter" );
+#endif
+	if ( state ) [state release];
+	if ( originalAdaptor ) [originalAdaptor release];
+	if ( originalTokenStream ) [originalTokenStream release];
+	[super dealloc];
+}
+
+- (id) applyOnce:(CommonTree *)t Rule:(ANTLRfptr *)whichRule
+{
+    if ( t == nil ) return nil;
+    @try {
+        // share TreeParser object but not parsing-related state
+        state = [RecognizerSharedState newRecognizerSharedState];
+        input = [CommonTreeNodeStream newCommonTreeNodeStream:(CommonTreeAdaptor *)originalAdaptor Tree:t];
+        [(CommonTreeNodeStream *)input setTokenStream:originalTokenStream];
+        [self setBacktrackingLevel:1];
+        TreeRuleReturnScope *r = [(ANTLRfptr *)whichRule rule];
+        [self setBacktrackingLevel:0];
+        if ( [self getFailed] )
+            return t;
+        if ( showTransformations &&
+            r != nil && !(t == r.start) && r.start != nil ) {
+            [self reportTransformation:t Tree:r.start];
+        }
+        if ( r != nil && r.start != nil )
+            return r.start;
+        else
+            return t;
+    }
+    @catch (RecognitionException *e) {
+        return t;
+    }
+    return t;
+}
+
+- (id) applyRepeatedly:(CommonTree *)t Rule:(ANTLRfptr *)whichRule
+{
+    BOOL treeChanged = true;
+    while ( treeChanged ) {
+        TreeRewriter *u = [self applyOnce:t Rule:whichRule];
+        treeChanged = !(t == u);
+        t = u;
+    }
+    return t;
+}
+
+- (id) downup:(CommonTree *)t
+{
+    return [self downup:t XForm:NO];
+}
+
+- (id) pre:(CommonTree *)t
+{
+    return [self applyOnce:t Rule:topdown_fptr];
+}
+
+- (id)post:(CommonTree *)t
+{
+    return [self applyRepeatedly:t Rule:bottomup_ftpr];
+}
+
+#ifdef DONTUSENOMO
+public Object downup(Object t, boolean showTransformations) {
+    this.showTransformations = showTransformations;
+    TreeVisitor v = new TreeVisitor(new CommonTreeAdaptor());
+    TreeVisitorAction actions = new TreeVisitorAction() {
+        public Object pre(Object t)  { return applyOnce(t, topdown_fptr); }
+        public Object post(Object t) { return applyRepeatedly(t, bottomup_ftpr); }
+    };
+    t = v.visit(t, actions);
+    return t;
+}
+#endif
+
+- (id) downup:(CommonTree *)t XForm:(BOOL)aShowTransformations
+{
+    showTransformations = aShowTransformations;
+    TreeVisitor *v = [TreeVisitor newTreeVisitor:[[originalAdaptor class] newTreeAdaptor]];
+    TreeVisitorAction *actions = [TreeVisitorAction newTreeVisitorAction];
+    {
+        //public Object pre(Object t)  { return applyOnce(t, topdown_fptr); }
+        [self pre:t];
+        //public Object post(Object t) { return applyRepeatedly(t, bottomup_ftpr); }
+        [self post:t];
+    };
+    t = [v visit:t Action:actions];
+    return t;
+}
+
+/** Override this if you need transformation tracing to go somewhere
+ *  other than stdout or if you're not using Tree-derived trees.
+ */
+- (void)reportTransformation:(CommonTree *)oldTree Tree:(CommonTree *)newTree
+{
+    //System.out.println(((Tree)oldTree).toStringTree()+" -> "+ ((Tree)newTree).toStringTree());
+}
+
+- (id)topdown_fptr
+{
+    return [self topdown];
+}
+
+- (id)bottomup_ftpr
+{
+    return [self bottomup];
+}
+
+// methods the downup strategy uses to do the up and down rules.
+// to override, just define tree grammar rule topdown and turn on
+// filter=true.
+- (id) topdown
+// @throws RecognitionException
+{
+    @throw [RecognitionException newException:@"TopDown exception"];
+    return nil;
+}
+
+- (id) bottomup
+//@throws RecognitionException
+{
+    @throw [RecognitionException newException:@"BottomUp exception"];
+    return nil;
+}
+
+@synthesize showTransformations;
+@synthesize originalTokenStream;
+@synthesize originalAdaptor;
+@synthesize rule;
+@end
diff --git a/runtime/ObjC/Framework/TreeRuleReturnScope.h b/runtime/ObjC/Framework/TreeRuleReturnScope.h
new file mode 100644
index 0000000..88e8a0e
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeRuleReturnScope.h
@@ -0,0 +1,52 @@
+//
+//  TreeRuleReturnScope.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/17/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "RuleReturnScope.h"
+#import "CommonTree.h"
+
+@interface TreeRuleReturnScope : RuleReturnScope {
+    CommonTree *start;
+}
+
+@property (retain, getter=getStart, setter=setStart:) CommonTree *start;
+
+/** First node or root node of tree matched for this rule. */
+
++ (id) newReturnScope;
+- (id) init;
+- (void) dealloc;
+- (CommonTree *)getStart;
+- (void)setStart:(CommonTree *)aStart;
+
+- (id) copyWithZone:(NSZone *)theZone;
+
+@end
diff --git a/runtime/ObjC/Framework/TreeRuleReturnScope.m b/runtime/ObjC/Framework/TreeRuleReturnScope.m
new file mode 100644
index 0000000..8539d81
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeRuleReturnScope.m
@@ -0,0 +1,81 @@
+//
+//  TreeRuleReturnScope.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/17/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "TreeRuleReturnScope.h"
+
+
+@implementation TreeRuleReturnScope
+@synthesize start;
+
++ (id) newReturnScope
+{
+    return [[TreeRuleReturnScope alloc] init];
+}
+
+- (id) init
+{
+    self = [super init];
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in TreeRuleReturnScope" );
+#endif
+	if ( start ) [start release];
+	[super dealloc];
+}
+
+- (CommonTree *)getStart
+{
+    return start;
+}	
+
+- (void)setStart:(CommonTree *)aStart
+{
+    if ( start != aStart ) {
+        if ( start ) [start release];
+        [aStart retain];
+    }
+    start = aStart;
+}	
+
+// create a copy, including the text if available
+// the input stream is *not* copied!
+- (id) copyWithZone:(NSZone *)theZone
+{
+    TreeRuleReturnScope *copy = [super copyWithZone:theZone];
+    copy.start = start;
+    return copy;
+}
+
+@end
diff --git a/runtime/ObjC/Framework/TreeVisitor.h b/runtime/ObjC/Framework/TreeVisitor.h
new file mode 100644
index 0000000..7516a08
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeVisitor.h
@@ -0,0 +1,47 @@
+//
+//  TreeVisitor.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/18/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "TreeAdaptor.h"
+#import "TreeVisitorAction.h"
+
+@interface TreeVisitor : NSObject {
+   id<TreeAdaptor> adaptor;
+}
++ (TreeVisitor *)newTreeVisitor:(id<TreeAdaptor>) anAdaptor;
++ (TreeVisitor *)newTreeVisitor;
+- (id)init;
+- (id)initWithAdaptor:(id<TreeAdaptor>)anAdaptor;
+- (void) dealloc;
+- (id<BaseTree>)visit:(id<BaseTree>)t Action:(TreeVisitorAction *)action;
+
+@property (retain) id<TreeAdaptor> adaptor;
+@end
diff --git a/runtime/ObjC/Framework/TreeVisitor.m b/runtime/ObjC/Framework/TreeVisitor.m
new file mode 100644
index 0000000..fea76c7
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeVisitor.m
@@ -0,0 +1,103 @@
+//
+//  TreeVisitor.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/18/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "TreeVisitor.h"
+#import "CommonTreeAdaptor.h"
+
+@implementation TreeVisitor
+
++ (TreeVisitor *)newTreeVisitor:(id<TreeAdaptor>)anAdaptor
+{
+    return [[TreeVisitor alloc] initWithAdaptor:anAdaptor];
+}
+
++ (TreeVisitor *)newTreeVisitor
+{
+    return [[TreeVisitor alloc] init];
+}
+
+
+- (id)init
+{
+    if ((self = [super init]) != nil) {
+        adaptor = [[CommonTreeAdaptor newTreeAdaptor] retain];
+    }
+    return self;
+}
+
+- (id)initWithAdaptor:(id<TreeAdaptor>)anAdaptor
+{
+    if ((self = [super init]) != nil) {
+        adaptor = [anAdaptor retain];
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in TreeVisitor" );
+#endif
+    if ( adaptor ) [adaptor release];
+    [super dealloc];
+}
+
+/** Visit every node in tree t and trigger an action for each node
+ *  before/after having visited all of its children.
+ *  Execute both actions even if t has no children.
+ *  If a child visit yields a new child, it can update its
+ *  parent's child list or just return the new child.  The
+ *  child update code works even if the child visit alters its parent
+ *  and returns the new tree.
+ *
+ *  Return result of applying post action to this node.
+ */
+- (id<BaseTree>)visit:(id<BaseTree>)t Action:(TreeVisitorAction *)action
+{
+    // System.out.println("visit "+((Tree)t).toStringTree());
+    BOOL isNil = [adaptor isNil:t];
+    if ( action != nil && !isNil ) {
+        t = [action pre:(id<BaseTree>)t]; // if rewritten, walk children of new t
+    }
+    for (int i=0; i < [adaptor getChildCount:t]; i++) {
+        id<BaseTree> child = [adaptor getChild:t At:i];
+        id<BaseTree> visitResult = [self visit:child Action:action];
+        id<BaseTree> childAfterVisit = [adaptor getChild:t At:i];
+        if ( visitResult !=  childAfterVisit ) { // result & child differ?
+            [adaptor setChild:t At:i Child:visitResult];
+        }
+    }
+    if ( action != nil && !isNil ) t = [action post:(id<BaseTree>)t];
+    return t;
+}
+
+@synthesize adaptor;
+@end
diff --git a/runtime/ObjC/Framework/TreeVisitorAction.h b/runtime/ObjC/Framework/TreeVisitorAction.h
new file mode 100644
index 0000000..9c72dad
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeVisitorAction.h
@@ -0,0 +1,94 @@
+//
+//  TreeVisitorAction.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/18/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "BaseTree.h"
+
+@interface TreeVisitorAction : NSObject
+{
+    SEL preAction;
+    SEL postAction;
+
+}
+
+@property (assign, setter=setPreAction:) SEL preAction;
+@property (assign, setter=setPostAction:) SEL postAction;
+
++ (TreeVisitorAction *)newTreeVisitorAction;
+- (id) init;
+
+- (void)setPreAction:(SEL)anAction;
+- (void)setPostAction:(SEL)anAction;
+
+/** Execute an action before visiting children of t.  Return t or
+ *  a rewritten t.  It is up to the visitor to decide what to do
+ *  with the return value.  Children of returned value will be
+ *  visited if using TreeVisitor.visit().
+ */
+- (id<BaseTree>)pre:(id<BaseTree>) t;
+
+/** Execute an action after visiting children of t.  Return t or
+ *  a rewritten t.  It is up to the visitor to decide what to do
+ *  with the return value.
+ */
+- (id<BaseTree>)post:(id<BaseTree>) t;
+
+@end
+
+@class TreeFilter;
+@class fptr;
+
+@interface TreeVisitorActionFiltered : TreeVisitorAction
+{
+    TreeFilter *aTFilter;
+    fptr *TDRule;
+    fptr *BURule;
+}
+
+@property (assign, setter=setATFilter:) TreeFilter *aTFilter;
+
++ (TreeVisitorAction *)newTreeVisitorActionFiltered:(TreeFilter *)aFilter RuleD:(fptr *)aTDRule RuleU:(fptr *)aBURule;
+- (id) initWithFilter:(TreeFilter *)aFilter RuleD:(fptr *)aTDRule RuleU:(fptr *)aBURule;
+
+/** Execute an action before visiting children of t.  Return t or
+ *  a rewritten t.  It is up to the visitor to decide what to do
+ *  with the return value.  Children of returned value will be
+ *  visited if using TreeVisitor.visit().
+ */
+- (id<BaseTree>)pre:(id<BaseTree>) t;
+
+/** Execute an action after visiting children of t.  Return t or
+ *  a rewritten t.  It is up to the visitor to decide what to do
+ *  with the return value.
+ */
+- (id<BaseTree>)post:(id<BaseTree>) t;
+
+@end
diff --git a/runtime/ObjC/Framework/TreeVisitorAction.m b/runtime/ObjC/Framework/TreeVisitorAction.m
new file mode 100644
index 0000000..33c1bc7
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeVisitorAction.m
@@ -0,0 +1,140 @@
+//
+//  TreeVisitorAction.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/18/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "TreeVisitorAction.h"
+
+
+@implementation TreeVisitorAction
+
++ (TreeVisitorAction *)newTreeVisitorAction
+{
+    return [[TreeVisitorAction alloc] init];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil ) {
+        preAction = nil;
+        postAction = nil;
+    }
+    return self;
+}
+
+- (void)setPreAction:(SEL)anAction
+{
+    preAction = anAction;
+}
+
+- (void)setPostAction:(SEL)anAction
+{
+    postAction = anAction;
+}
+
+/** Execute an action before visiting children of t.  Return t or
+ *  a rewritten t.  It is up to the visitor to decide what to do
+ *  with the return value.  Children of returned value will be
+ *  visited if using TreeVisitor.visit().
+ */
+- (id<BaseTree>)pre:(id<BaseTree>) t
+{
+    if ( (preAction != nil ) && ( [self respondsToSelector:preAction] )) {
+        [self performSelector:preAction];
+        return t;
+    }
+    return nil;
+}
+
+/** Execute an action after visiting children of t.  Return t or
+ *  a rewritten t.  It is up to the visitor to decide what to do
+ *  with the return value.
+ */
+- (id<BaseTree>)post:(id<BaseTree>) t
+{
+    if ( (postAction != nil ) && ( [self respondsToSelector:postAction] )) {
+        [self performSelector:postAction];
+        return t;
+    }
+    return nil;
+}
+
+@synthesize preAction;
+@synthesize postAction;
+
+@end
+
+@implementation TreeVisitorActionFiltered
+
++ (TreeVisitorAction *)newTreeVisitorActionFiltered:(TreeFilter *)aFilter
+                                              RuleD:(fptr *)aTDRule
+                                              RuleU:(fptr *)aBURule
+{
+    return [[TreeVisitorActionFiltered alloc] initWithFilter:aFilter RuleD:aTDRule RuleU:aBURule];
+}
+
+- (id) initWithFilter:(TreeFilter *)aFilter
+                RuleD:(fptr *)aTDRule
+                RuleU:(fptr *)aBURule
+{
+    if (( self = [super init] ) != nil ) {
+        aTFilter = aFilter;
+        TDRule = aTDRule;
+        BURule = aBURule;
+    }
+    return self;
+}
+
+/** Execute an action before visiting children of t.  Return t or
+ *  a rewritten t.  It is up to the visitor to decide what to do
+ *  with the return value.  Children of returned value will be
+ *  visited if using TreeVisitor.visit().
+ */
+- (id<BaseTree>)pre:(id<BaseTree>) t
+{
+    [aTFilter applyOnce:t rule:(fptr *)TDRule];
+    return t;
+}
+
+/** Execute an action after visiting children of t.  Return t or
+ *  a rewritten t.  It is up to the visitor to decide what to do
+ *  with the return value.
+ */
+- (id<BaseTree>)post:(id<BaseTree>) t
+{
+    [aTFilter applyOnce:t rule:(fptr *)BURule];
+    return t;
+}
+
+
+
+@synthesize aTFilter;
+
+@end
+
diff --git a/runtime/ObjC/Framework/TreeWizard.h b/runtime/ObjC/Framework/TreeWizard.h
new file mode 100644
index 0000000..2965ed4
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeWizard.h
@@ -0,0 +1,136 @@
+//
+//  TreeWizard.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/18/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "CommonTreeAdaptor.h"
+#import "CommonTree.h"
+#import "MapElement.h"
+#import "Map.h"
+#import "AMutableArray.h"
+
+@class ANTLRVisitor;
+
+@protocol ANTLRContextVisitor <NSObject>
+// TODO: should this be called visit or something else?
+- (void) visit:(CommonTree *)t Parent:(CommonTree *)parent ChildIndex:(NSInteger)childIndex Map:(Map *)labels;
+
+@end
+
+@interface ANTLRVisitor : NSObject <ANTLRContextVisitor> {
+    NSInteger action;
+    id actor;
+    id object1;
+    id object2;
+}
++ (ANTLRVisitor *)newANTLRVisitor:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2;
+- (id) initWithAction:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2;
+
+- (void) visit:(CommonTree *)t;
+- (void) visit:(CommonTree *)t Parent:(CommonTree *)parent ChildIndex:(NSInteger)childIndex Map:(Map *)labels;
+
+@property NSInteger action;
+@property (retain) id actor;
+@property (retain) id object1;
+@property (retain) id object2;
+@end
+
+/** When using %label:TOKENNAME in a tree for parse(), we must
+ *  track the label.
+ */
+@interface TreePattern : CommonTree {
+    NSString *label;
+    BOOL      hasTextArg;
+}
+@property (retain, getter=getLabel, setter=setLabel:) NSString *label;
+@property (assign, getter=getHasTextArg, setter=setHasTextArg:) BOOL hasTextArg;
+
++ (CommonTree *)newTreePattern:(id<Token>)payload;
+
+- (id) initWithToken:(id<Token>)payload;
+- (NSString *)toString;
+@end
+
+@interface ANTLRWildcardTreePattern : TreePattern {
+}
+
++ (ANTLRWildcardTreePattern *)newANTLRWildcardTreePattern:(id<Token>)payload;
+- (id) initWithToken:(id<Token>)payload;
+@end
+
+/** This adaptor creates TreePattern objects for use during scan() */
+@interface TreePatternTreeAdaptor : CommonTreeAdaptor {
+}
++ (TreePatternTreeAdaptor *)newTreeAdaptor;
+- (id) init;
+- (CommonTree *)createTreePattern:(id<Token>)payload;
+
+@end
+
+@interface TreeWizard : NSObject {
+	id<TreeAdaptor> adaptor;
+	Map *tokenNameToTypeMap;
+}
++ (TreeWizard *) newTreeWizard:(id<TreeAdaptor>)anAdaptor;
++ (TreeWizard *)newTreeWizard:(id<TreeAdaptor>)adaptor Map:(Map *)aTokenNameToTypeMap;
++ (TreeWizard *)newTreeWizard:(id<TreeAdaptor>)adaptor TokenNames:(NSArray *)theTokNams;
++ (TreeWizard *)newTreeWizardWithTokenNames:(NSArray *)theTokNams;
+- (id) init;
+- (id) initWithAdaptor:(id<TreeAdaptor>)adaptor;
+- (id) initWithAdaptor:(id<TreeAdaptor>)adaptor Map:(Map *)tokenNameToTypeMap;
+- (id) initWithTokenNames:(NSArray *)theTokNams;
+- (id) initWithTokenNames:(id<TreeAdaptor>)anAdaptor TokenNames:(NSArray *)theTokNams;
+- (void) dealloc;
+- (Map *)computeTokenTypes:(NSArray *)theTokNams;
+- (NSInteger)getTokenType:(NSString *)tokenName;
+- (Map *)index:(CommonTree *)t;
+- (void) _index:(CommonTree *)t Map:(Map *)m;
+- (AMutableArray *)find:(CommonTree *) t Pattern:(NSString *)pattern;
+- (TreeWizard *)findFirst:(CommonTree *) t Type:(NSInteger)ttype;
+- (TreeWizard *)findFirst:(CommonTree *) t Pattern:(NSString *)pattern;
+- (void) visit:(CommonTree *)t Type:(NSInteger)ttype Visitor:(ANTLRVisitor *)visitor;
+- (void) _visit:(CommonTree *)t
+         Parent:(CommonTree *)parent
+     ChildIndex:(NSInteger)childIndex
+           Type:(NSInteger)ttype
+        Visitor:(ANTLRVisitor *)visitor;
+- (void)visit:(CommonTree *)t Pattern:(NSString *)pattern Visitor:(ANTLRVisitor *)visitor;
+- (BOOL)parse:(CommonTree *)t Pattern:(NSString *)pattern Map:(Map *)labels;
+- (BOOL) parse:(CommonTree *) t Pattern:(NSString *)pattern;
+- (BOOL) _parse:(CommonTree *)t1 Pattern:(CommonTree *)tpattern Map:(Map *)labels;
+- (CommonTree *) createTree:(NSString *)pattern;
+- (BOOL)equals:(id)t1 O2:(id)t2 Adaptor:(id<TreeAdaptor>)anAdaptor;
+- (BOOL)equals:(id)t1 O2:(id)t2;
+- (BOOL) _equals:(id)t1 O2:(id)t2 Adaptor:(id<TreeAdaptor>)anAdaptor;
+
+@property (retain) id<TreeAdaptor> adaptor;
+@property (retain) Map *tokenNameToTypeMap;
+@end
+
diff --git a/runtime/ObjC/Framework/TreeWizard.m b/runtime/ObjC/Framework/TreeWizard.m
new file mode 100644
index 0000000..e796e5c
--- /dev/null
+++ b/runtime/ObjC/Framework/TreeWizard.m
@@ -0,0 +1,735 @@
+//
+//  TreeWizard.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/18/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "TreeWizard.h"
+#import "TreePatternLexer.h"
+#import "TreePatternParser.h"
+#import "IntArray.h"
+
+@implementation ANTLRVisitor
+
++ (ANTLRVisitor *)newANTLRVisitor:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2
+{
+    return [[ANTLRVisitor alloc] initWithAction:anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2];
+}
+
+- (id) initWithAction:(NSInteger)anAction Actor:(id)anActor Object:(id)anObject1 Object:(id)anObject2
+{
+    if ((self = [super init]) != nil) {
+        action = anAction;
+        actor = anActor;
+        if ( actor ) [actor retain];
+        object1 = anObject1;
+        if ( object1 ) [object1 retain];
+        object2 = anObject2;
+        if ( object2 ) [object2 retain];
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in ANTLRVisitor" );
+#endif
+    if ( actor ) [actor release];
+    if ( object1 ) [object1 release];
+    if ( object2 ) [object2 release];
+    [super dealloc];
+}
+
+- (void) visit:(CommonTree *)t Parent:(CommonTree *)parent ChildIndex:(NSInteger)childIndex Map:(Map *)labels
+{
+    switch (action) {
+        case 0:
+            [(Map *)object2 /* labels */ clear];
+            if ( [(TreeWizard *)actor _parse:t Pattern:object1/* tpattern */ Map:object2 /* labels */] ) {
+                [self visit:t Parent:parent ChildIndex:childIndex Map:object2 /* labels */];
+            }
+            break;
+        case 1:
+            if ( [(TreeWizard *)actor _parse:t Pattern:object1/* tpattern */ Map:nil] ) {
+                [(AMutableArray *)object2/* subtrees */ addObject:t];
+            }
+            break;
+    }
+    // [self visit:t];
+    return;
+}
+
+- (void) visit:(CommonTree *)t
+{
+    [object1 addObject:t];
+    return;
+}
+
+@synthesize action;
+@synthesize actor;
+@synthesize object1;
+@synthesize object2;
+@end
+
+/** When using %label:TOKENNAME in a tree for parse(), we must
+ *  track the label.
+ */
+@implementation TreePattern
+
+@synthesize label;
+@synthesize hasTextArg;
+
++ (CommonTree *)newTreePattern:(id<Token>)payload
+{
+    return (CommonTree *)[[TreePattern alloc] initWithToken:payload];
+}
+
+- (id) initWithToken:(id<Token>)payload
+{
+    self = [super initWithToken:payload];
+    if ( self != nil ) {
+    }
+    return (CommonTree *)self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in TreePattern" );
+#endif
+    if ( label ) [label release];
+    [super dealloc];
+}
+
+- (NSString *)toString
+{
+    if ( label != nil ) {
+        return [NSString stringWithFormat:@"\% %@ : %@", label, [super toString]];
+    }
+    else {
+        return [super toString];				
+    }
+}
+
+@end
+
+@implementation ANTLRWildcardTreePattern
+
++ (ANTLRWildcardTreePattern *)newANTLRWildcardTreePattern:(id<Token>)payload
+{
+    return(ANTLRWildcardTreePattern *)[[ANTLRWildcardTreePattern alloc] initWithToken:(id<Token>)payload];
+}
+
+- (id) initWithToken:(id<Token>)payload
+{
+    self = [super initWithToken:payload];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+@end
+
+/** This adaptor creates TreePattern objects for use during scan() */
+@implementation TreePatternTreeAdaptor
+
++ (TreePatternTreeAdaptor *)newTreeAdaptor
+{
+    return [[TreePatternTreeAdaptor alloc] init];
+}
+
+- (id) init
+{
+    self = [super init];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (CommonTree *)createTreePattern:(id<Token>)payload
+{
+    return (CommonTree *)[super create:payload];
+}
+          
+@end
+
+@implementation TreeWizard
+
+// TODO: build indexes for the wizard
+
+/** During fillBuffer(), we can make a reverse index from a set
+ *  of token types of interest to the list of indexes into the
+ *  node stream.  This lets us convert a node pointer to a
+ *  stream index semi-efficiently for a list of interesting
+ *  nodes such as function definition nodes (you'll want to seek
+ *  to their bodies for an interpreter).  Also useful for doing
+ *  dynamic searches; i.e., go find me all PLUS nodes.
+ protected Map tokenTypeToStreamIndexesMap;
+ 
+ ** If tokenTypesToReverseIndex set to INDEX_ALL then indexing
+ *  occurs for all token types.
+ public static final Set INDEX_ALL = new HashSet();
+ 
+ ** A set of token types user would like to index for faster lookup.
+ *  If this is INDEX_ALL, then all token types are tracked.  If nil,
+ *  then none are indexed.
+ protected Set tokenTypesToReverseIndex = nil;
+ */
+
++ (TreeWizard *) newTreeWizard:(id<TreeAdaptor>)anAdaptor
+{
+    return [[TreeWizard alloc] initWithAdaptor:anAdaptor];
+}
+
++ (TreeWizard *)newTreeWizard:(id<TreeAdaptor>)anAdaptor Map:(Map *)aTokenNameToTypeMap
+{
+    return [[TreeWizard alloc] initWithAdaptor:anAdaptor Map:aTokenNameToTypeMap];
+}
+
++ (TreeWizard *)newTreeWizard:(id<TreeAdaptor>)anAdaptor TokenNames:(NSArray *)theTokNams
+{
+    return [[TreeWizard alloc] initWithTokenNames:anAdaptor TokenNames:theTokNams];
+}
+
++ (TreeWizard *)newTreeWizardWithTokenNames:(NSArray *)theTokNams
+{
+    return [[TreeWizard alloc] initWithTokenNames:theTokNams];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil) {
+    }
+    return self;
+}
+
+- (id) initWithAdaptor:(id<TreeAdaptor>)anAdaptor
+{
+    if ((self = [super init]) != nil) {
+        adaptor = anAdaptor;
+        if ( adaptor ) [adaptor retain];
+    }
+    return self;
+}
+            
+- (id) initWithAdaptor:(id<TreeAdaptor>)anAdaptor Map:(Map *)aTokenNameToTypeMap
+{
+    if ((self = [super init]) != nil) {
+        adaptor = anAdaptor;
+        if ( adaptor ) [adaptor retain];
+        tokenNameToTypeMap = aTokenNameToTypeMap;
+   }
+    return self;
+}
+
+- (id) initWithTokenNames:(NSArray *)theTokNams
+{
+    if ((self = [super init]) != nil) {
+#pragma warning Fix initWithTokenNames.
+        // adaptor = anAdaptor;
+        //tokenNameToTypeMap = aTokenNameToTypeMap;
+        tokenNameToTypeMap = [[self computeTokenTypes:theTokNams] retain];
+    }
+    return self;
+}
+             
+- (id) initWithTokenNames:(id<TreeAdaptor>)anAdaptor TokenNames:(NSArray *)theTokNams
+{
+    if ((self = [super init]) != nil) {
+        adaptor = anAdaptor;
+        if ( adaptor ) [adaptor retain];
+        // tokenNameToTypeMap = aTokenNameToTypeMap;
+        tokenNameToTypeMap = [[self computeTokenTypes:theTokNams] retain];
+    }
+    return self;
+}
+            
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in TreePatternTreeAdaptor" );
+#endif
+    if ( adaptor ) [adaptor release];
+    if ( tokenNameToTypeMap ) [tokenNameToTypeMap release];
+    [super dealloc];
+}
+
+/** Compute a Map<String, Integer> that is an inverted index of
+ *  tokenNames (which maps int token types to names).
+ */
+- (Map *)computeTokenTypes:(NSArray *)theTokNams
+{
+    Map *m = [Map newMap];
+    if ( theTokNams == nil ) {
+        return m;
+    }
+    for (int ttype = TokenTypeMIN; ttype < [theTokNams count]; ttype++) {
+        NSString *name = (NSString *) [theTokNams objectAtIndex:ttype];
+        [m putName:name TType:ttype];
+    }
+    return m;
+}
+
+/** Using the map of token names to token types, return the type. */
+- (NSInteger)getTokenType:(NSString *)tokenName
+{
+    if ( tokenNameToTypeMap == nil ) {
+        return TokenTypeInvalid;
+    }
+    NSInteger aTType = (NSInteger)[tokenNameToTypeMap getTType:tokenName];
+    if ( aTType != -1 ) {
+        return aTType;
+    }
+    return TokenTypeInvalid;
+}
+
+/** Walk the entire tree and make a node name to nodes mapping.
+ *  For now, use recursion but later nonrecursive version may be
+ *  more efficient.  Returns Map<Integer, List> where the List is
+ *  of your AST node type.  The Integer is the token type of the node.
+ *
+ *  TODO: save this index so that find and visit are faster
+ */
+- (Map *)index:(CommonTree *)t
+{
+    Map *m = [Map newMap];
+    [self _index:t Map:m];
+    return m;
+}
+
+/** Do the work for index */
+- (void) _index:(CommonTree *)t Map:(Map *)m
+{
+    if ( t==nil ) {
+        return;
+    }
+#pragma warning Fix _index use of Map.
+    NSInteger ttype = [adaptor getType:t];
+    Map *elements = (Map *)[m getName:ttype];
+    if ( elements == nil ) {
+        elements = [Map newMapWithLen:100];
+        [m putNode:ttype Node:elements];
+    }
+    [elements addObject:t];
+    int n = [adaptor getChildCount:t];
+    for (int i=0; i<n; i++) {
+        CommonTree * child = [adaptor getChild:t At:i];
+        [self _index:child Map:m];
+    }
+}
+
+/** Return a List of tree nodes with token type ttype */
+- (AMutableArray *)find:(CommonTree *)t Type:(NSInteger)ttype
+{
+#ifdef DONTUSENOMO
+    final List nodes = new ArrayList();
+    visit(t, ttype, new TreeWizard.Visitor() {
+        public void visit(Object t) {
+            [nodes addObject t];
+        }
+    } );
+#endif
+    AMutableArray *nodes = [AMutableArray arrayWithCapacity:100];
+    ANTLRVisitor *contextVisitor = [ANTLRVisitor newANTLRVisitor:3 Actor:self Object:(id)nodes Object:nil];
+    [self visit:t Type:ttype Visitor:contextVisitor];
+    return nodes;
+}
+
+/** Return a List of subtrees matching pattern. */
+- (AMutableArray *)find:(CommonTree *)t Pattern:(NSString *)pattern
+{
+    AMutableArray *subtrees = [AMutableArray arrayWithCapacity:100];
+    // Create a TreePattern from the pattern
+    TreePatternLexer *tokenizer = [TreePatternLexer newTreePatternLexer:pattern];
+    TreePatternParser *parser = [TreePatternParser newTreePatternParser:tokenizer
+                                                                                     Wizard:self
+                                                                                    Adaptor:[TreePatternTreeAdaptor newTreeAdaptor]];
+    CommonTree *tpattern = (CommonTree *)[parser pattern];
+    // don't allow invalid patterns
+    if ( tpattern == nil ||
+        [tpattern isNil] ||
+        [tpattern class] == [ANTLRWildcardTreePattern class] )
+    {
+        return nil;
+    }
+    int rootTokenType = [tpattern type];
+#ifdef DONTUSENOMO
+    visit(t, rootTokenType, new TreeWizard.ContextVisitor() {
+        public void visit(Object t, Object parent, int childIndex, Map labels) {
+            if ( _parse(t, tpattern, null) ) {
+                subtrees.add(t);
+            }
+        }
+    } );
+#endif
+    ANTLRVisitor *contextVisitor = [ANTLRVisitor newANTLRVisitor:1 Actor:self Object:tpattern Object:subtrees];
+    [self visit:t Type:rootTokenType Visitor:contextVisitor];
+    return subtrees;
+}
+
+- (TreeWizard *)findFirst:(CommonTree *) t Type:(NSInteger)ttype
+{
+    return nil;
+}
+
+- (TreeWizard *)findFirst:(CommonTree *) t Pattern:(NSString *)pattern
+{
+    return nil;
+}
+
+/** Visit every ttype node in t, invoking the visitor.  This is a quicker
+ *  version of the general visit(t, pattern) method.  The labels arg
+ *  of the visitor action method is never set (it's nil) since using
+ *  a token type rather than a pattern doesn't let us set a label.
+ */
+- (void) visit:(CommonTree *)t Type:(NSInteger)ttype Visitor:(ANTLRVisitor *)visitor
+{
+    [self _visit:t Parent:nil ChildIndex:0 Type:ttype Visitor:visitor];
+}
+
+/** Do the recursive work for visit */
+- (void) _visit:(CommonTree *)t
+         Parent:(CommonTree *)parent
+     ChildIndex:(NSInteger)childIndex
+           Type:(NSInteger)ttype
+        Visitor:(ANTLRVisitor *)visitor
+{
+    if ( t == nil ) {
+        return;
+    }
+    if ( [adaptor getType:t] == ttype ) {
+        [visitor visit:t Parent:parent ChildIndex:childIndex Map:nil];
+    }
+    int n = [adaptor getChildCount:t];
+    for (int i=0; i<n; i++) {
+        CommonTree * child = [adaptor getChild:t At:i];
+        [self _visit:child Parent:t ChildIndex:i Type:ttype Visitor:visitor];
+    }
+}
+
+/** For all subtrees that match the pattern, execute the visit action.
+ *  The implementation uses the root node of the pattern in combination
+ *  with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
+ *  Patterns with wildcard roots are also not allowed.
+ */
+- (void)visit:(CommonTree *)t Pattern:(NSString *)pattern Visitor:(ANTLRVisitor *)visitor
+{
+    // Create a TreePattern from the pattern
+    TreePatternLexer *tokenizer = [TreePatternLexer newTreePatternLexer:pattern];
+    TreePatternParser *parser =
+    [TreePatternParser newTreePatternParser:tokenizer Wizard:self Adaptor:[TreePatternTreeAdaptor newTreeAdaptor]];
+    CommonTree *tpattern = [parser pattern];
+    // don't allow invalid patterns
+    if ( tpattern == nil ||
+        [tpattern isNil] ||
+        [tpattern class] == [ANTLRWildcardTreePattern class] )
+    {
+        return;
+    }
+    MapElement *labels = [Map newMap]; // reused for each _parse
+    int rootTokenType = [tpattern type];
+#pragma warning This is another one of those screwy nested constructs that I have to figure out
+#ifdef DONTUSENOMO
+    visit(t, rootTokenType, new TreeWizard.ContextVisitor() {
+        public void visit(Object t, Object parent, int childIndex, Map unusedlabels) {
+            // the unusedlabels arg is null as visit on token type doesn't set.
+            labels.clear();
+            if ( _parse(t, tpattern, labels) ) {
+                visitor.visit(t, parent, childIndex, labels);
+            }
+        }
+    });
+#endif
+    ANTLRVisitor *contextVisitor = [ANTLRVisitor newANTLRVisitor:0 Actor:self Object:tpattern Object:labels];
+    [self visit:t Type:rootTokenType Visitor:contextVisitor];
+}
+
+/** Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
+ *  on the various nodes and '.' (dot) as the node/subtree wildcard,
+ *  return true if the pattern matches and fill the labels Map with
+ *  the labels pointing at the appropriate nodes.  Return false if
+ *  the pattern is malformed or the tree does not match.
+ *
+ *  If a node specifies a text arg in pattern, then that must match
+ *  for that node in t.
+ *
+ *  TODO: what's a better way to indicate bad pattern? Exceptions are a hassle 
+ */
+- (BOOL)parse:(CommonTree *)t Pattern:(NSString *)pattern Map:(Map *)labels
+{
+#ifdef DONTUSENOMO
+    TreePatternLexer tokenizer = new TreePatternLexer(pattern);
+    TreePatternParser parser =
+    new TreePatternParser(tokenizer, this, new TreePatternTreeAdaptor());
+    TreePattern tpattern = (TreePattern)parser.pattern();
+    /*
+     System.out.println("t="+((Tree)t).toStringTree());
+     System.out.println("scant="+tpattern.toStringTree());
+     */
+    boolean matched = _parse(t, tpattern, labels);
+    return matched;
+#endif
+    TreePatternLexer *tokenizer = [TreePatternLexer newTreePatternLexer:pattern];
+    TreePatternParser *parser = [TreePatternParser newTreePatternParser:tokenizer
+                                                                                Wizard:self
+                                                                               Adaptor:[TreePatternTreeAdaptor newTreeAdaptor]];
+    CommonTree *tpattern = [parser pattern];
+    /*
+     System.out.println("t="+((Tree)t).toStringTree());
+     System.out.println("scant="+tpattern.toStringTree());
+     */
+    //BOOL matched = [self _parse:t Pattern:tpattern Map:labels];
+    //return matched;
+    return [self _parse:t Pattern:tpattern Map:labels];
+}
+
+- (BOOL) parse:(CommonTree *)t Pattern:(NSString *)pattern
+{
+    return [self parse:t Pattern:pattern Map:nil];
+}
+
+/** Do the work for parse. Check to see if the t2 pattern fits the
+ *  structure and token types in t1.  Check text if the pattern has
+ *  text arguments on nodes.  Fill labels map with pointers to nodes
+ *  in tree matched against nodes in pattern with labels.
+ */
+- (BOOL) _parse:(CommonTree *)t1 Pattern:(CommonTree *)aTPattern Map:(Map *)labels
+{
+    TreePattern *tpattern;
+    // make sure both are non-nil
+    if ( t1 == nil || aTPattern == nil ) {
+        return NO;
+    }
+    if ( [aTPattern isKindOfClass:[ANTLRWildcardTreePattern class]] ) {
+        tpattern = (TreePattern *)aTPattern;
+    }
+    // check roots (wildcard matches anything)
+    if ( [tpattern class] != [ANTLRWildcardTreePattern class] ) {
+        if ( [adaptor getType:t1] != [tpattern type] )
+            return NO;
+        // if pattern has text, check node text
+        if ( tpattern.hasTextArg && ![[adaptor getText:t1] isEqualToString:[tpattern text]] ) {
+            return NO;
+        }
+    }
+    if ( tpattern.label != nil && labels!=nil ) {
+        // map label in pattern to node in t1
+        [labels putName:tpattern.label Node:t1];
+    }
+    // check children
+    int n1 = [adaptor getChildCount:t1];
+    int n2 = [tpattern getChildCount];
+    if ( n1 != n2 ) {
+        return NO;
+    }
+    for (int i=0; i<n1; i++) {
+        CommonTree * child1 = [adaptor getChild:t1 At:i];
+        CommonTree *child2 = (CommonTree *)[tpattern getChild:i];
+        if ( ![self _parse:child1 Pattern:child2 Map:labels] ) {
+            return NO;
+        }
+    }
+    return YES;
+}
+
+/** Create a tree or node from the indicated tree pattern that closely
+ *  follows ANTLR tree grammar tree element syntax:
+ *
+ * 		(root child1 ... child2).
+ *
+ *  You can also just pass in a node: ID
+ * 
+ *  Any node can have a text argument: ID[foo]
+ *  (notice there are no quotes around foo--it's clear it's a string).
+ *
+ *  nil is a special name meaning "give me a nil node".  Useful for
+ *  making lists: (nil A B C) is a list of A B C.
+ */
+- (CommonTree *) createTree:(NSString *)pattern
+{
+    TreePatternLexer *tokenizer = [TreePatternLexer newTreePatternLexer:pattern];
+    TreePatternParser *parser = [TreePatternParser newTreePatternParser:tokenizer Wizard:self Adaptor:adaptor];
+    CommonTree * t = [parser pattern];
+    return t;
+}
+
+/** Compare t1 and t2; return true if token types/text, structure match exactly.
+ *  The trees are examined in their entirety so that (A B) does not match
+ *  (A B C) nor (A (B C)). 
+ // TODO: allow them to pass in a comparator
+ *  TODO: have a version that is nonstatic so it can use instance adaptor
+ *
+ *  I cannot rely on the tree node's equals() implementation as I make
+ *  no constraints at all on the node types nor interface etc... 
+ */
+- (BOOL)equals:(id)t1 O2:(id)t2 Adaptor:(id<TreeAdaptor>)anAdaptor
+{
+    return [self _equals:t1 O2:t2 Adaptor:anAdaptor];
+}
+
+/** Compare type, structure, and text of two trees, assuming adaptor in
+ *  this instance of a TreeWizard.
+ */
+- (BOOL)equals:(id)t1 O2:(id)t2
+{
+    return [self _equals:t1 O2:t2 Adaptor:adaptor];
+}
+
+- (BOOL) _equals:(id)t1 O2:(id)t2 Adaptor:(id<TreeAdaptor>)anAdaptor
+{
+    // make sure both are non-nil
+    if ( t1==nil || t2==nil ) {
+        return NO;
+    }
+    // check roots
+    if ( [anAdaptor getType:t1] != [anAdaptor getType:t2] ) {
+        return NO;
+    }
+    if ( ![[anAdaptor getText:t1] isEqualTo:[anAdaptor getText:t2]] ) {
+        return NO;
+    }
+    // check children
+    NSInteger n1 = [anAdaptor getChildCount:t1];
+    NSInteger n2 = [anAdaptor getChildCount:t2];
+    if ( n1 != n2 ) {
+        return NO;
+    }
+    for (int i=0; i<n1; i++) {
+        CommonTree * child1 = [anAdaptor getChild:t1 At:i];
+        CommonTree * child2 = [anAdaptor getChild:t2 At:i];
+        if ( ![self _equals:child1 O2:child2 Adaptor:anAdaptor] ) {
+            return NO;
+        }
+    }
+    return YES;
+}
+
+// TODO: next stuff taken from CommonTreeNodeStream
+
+/** Given a node, add this to the reverse index tokenTypeToStreamIndexesMap.
+ *  You can override this method to alter how indexing occurs.  The
+ *  default is to create a
+ *
+ *    Map<Integer token type,ArrayList<Integer stream index>>
+ *
+ *  This data structure allows you to find all nodes with type INT in order.
+ *
+ *  If you really need to find a node of type, say, FUNC quickly then perhaps
+ *
+ *    Map<Integertoken type, Map<Object tree node, Integer stream index>>
+ *
+ *  would be better for you.  The interior maps map a tree node to
+ *  the index so you don't have to search linearly for a specific node.
+ *
+ *  If you change this method, you will likely need to change
+ *  getNodeIndex(), which extracts information.
+- (void)fillReverseIndex:(CommonTree *)node Index:(NSInteger)streamIndex
+{
+    //System.out.println("revIndex "+node+"@"+streamIndex);
+    if ( tokenTypesToReverseIndex == nil ) {
+        return; // no indexing if this is empty (nothing of interest)
+    }
+    if ( tokenTypeToStreamIndexesMap == nil ) {
+        tokenTypeToStreamIndexesMap = [Map newMap]; // first indexing op
+    }
+    int tokenType = [adaptor getType:node];
+    Integer tokenTypeI = new Integer(tokenType);
+    if ( !(tokenTypesToReverseIndex == INDEX_ALL ||
+            [tokenTypesToReverseIndex contains:tokenTypeI]) ) {
+        return; // tokenType not of interest
+    }
+    NSInteger streamIndexI = streamIndex;
+    AMutableArray *indexes = (AMutableArray *)[tokenTypeToStreamIndexesMap objectAtIndex:tokenTypeI];
+    if ( indexes==nil ) {
+        indexes = [AMutableArray arrayWithCapacity:100]; // no list yet for this token type
+        indexes.add(streamIndexI); // not there yet, add
+        [tokenTypeToStreamIndexesMap put:tokenTypeI Idexes:indexes];
+    }
+    else {
+        if ( ![indexes contains:streamIndexI] ) {
+            [indexes add:streamIndexI]; // not there yet, add
+        }
+    }
+}
+ 
+ ** Track the indicated token type in the reverse index.  Call this
+ *  repeatedly for each type or use variant with Set argument to
+ *  set all at once.
+ * @param tokenType
+public void reverseIndex:(NSInteger)tokenType
+{
+    if ( tokenTypesToReverseIndex == nil ) {
+        tokenTypesToReverseIndex = [Map newMap];
+    }
+    else if ( tokenTypesToReverseIndex == INDEX_ALL ) {
+        return;
+    }
+    tokenTypesToReverseIndex.add(new Integer(tokenType));
+}
+ 
+** Track the indicated token types in the reverse index. Set
+ *  to INDEX_ALL to track all token types.
+public void reverseIndex(Set tokenTypes) {
+    tokenTypesToReverseIndex = tokenTypes;
+}
+ 
+ ** Given a node pointer, return its index into the node stream.
+ *  This is not its Token stream index.  If there is no reverse map
+ *  from node to stream index or the map does not contain entries
+ *  for node's token type, a linear search of entire stream is used.
+ *
+ *  Return -1 if exact node pointer not in stream.
+public int getNodeIndex(Object node) {
+    //System.out.println("get "+node);
+    if ( tokenTypeToStreamIndexesMap==nil ) {
+        return getNodeIndexLinearly(node);
+    }
+    int tokenType = adaptor.getType(node);
+    Integer tokenTypeI = new Integer(tokenType);
+    ArrayList indexes = (ArrayList)tokenTypeToStreamIndexesMap.get(tokenTypeI);
+    if ( indexes==nil ) {
+        //System.out.println("found linearly; stream index = "+getNodeIndexLinearly(node));
+        return getNodeIndexLinearly(node);
+    }
+    for (int i = 0; i < indexes.size(); i++) {
+        Integer streamIndexI = (Integer)indexes.get(i);
+        Object n = get(streamIndexI.intValue());
+        if ( n==node ) {
+            //System.out.println("found in index; stream index = "+streamIndexI);
+            return streamIndexI.intValue(); // found it!
+        }
+    }
+    return -1;
+}
+ 
+*/
+
+@synthesize adaptor;
+@synthesize tokenNameToTypeMap;
+@end
diff --git a/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStream.h b/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStream.h
new file mode 100644
index 0000000..fd66ac7
--- /dev/null
+++ b/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStream.h
@@ -0,0 +1,122 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import <Cocoa/Cocoa.h>
+#import "TreeNodeStream.h"
+#import "CommonTokenStream.h"
+#import "CommonTree.h"
+#import "CommonTreeAdaptor.h"
+
+@interface ANTLRUnbufferedCommonTreeNodeStream : NSObject < TreeNodeStream > {
+
+	BOOL shouldUseUniqueNavigationNodes;
+
+	CommonTree *root;
+	CommonTree *currentNode;
+	CommonTree *previousNode;
+
+	id<TreeAdaptor> treeAdaptor;
+	
+	id<TokenStream> tokenStream;
+	
+	NSMutableArray *nodeStack;
+	NSMutableArray *indexStack;
+	PtrBuffer *markers;
+	NSInteger lastMarker;
+	
+	NSInteger currentChildIndex;
+	NSInteger absoluteNodeIndex;
+	
+	NSMutableArray *lookahead;
+	NSUInteger head;
+	NSUInteger tail;
+}
+
+@property (retain, getter=getRoot, setter=setRoot:) CommonTree *root;
+@property (retain, getter=getCurrentNode, setter=setCurrentNode:) CommonTree *currentNode;
+@property (retain, getter=getPreviousNode, setter=setPreviousNode:) CommonTree *previousNode;
+@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id<TreeAdaptor> treeAdaptor;
+@property (retain, getter=getTokenStream, setter=setTokenStream:) id<TokenStream> tokenStream;
+@property (retain, getter=getNodeStack, setter=setNodeStack:) NSMutableArray *nodeStack;
+@property (retain, getter=getIndexStack, setter=setIndexStackStack:) NSMutableArray *indexStack;
+@property (retain, getter=getMarkers, setter=setMarkers:) PtrBuffer *markers;
+@property (assign, getter=getLastMarker, setter=setLastMarker:) NSInteger lastMarker;
+@property (assign, getter=getCurrentChildIndex, setter=setCurrentChildIndex:) NSInteger currentChildIndex;
+@property (assign, getter=getAbsoluteNodeIndex, setter=setAbsoluteNodeIndex:) NSInteger absoluteNodeIndex;
+@property (retain, getter=getLookahead, setter=setLookahead:) NSMutableArray *lookahead;
+@property (assign, getter=getHead, setter=setHead:) NSUInteger head;
+@property (assign, getter=getTail, setter=setTail:) NSUInteger tail;
+
+- (id) initWithTree:(CommonTree *)theTree;
+- (id) initWithTree:(CommonTree *)theTree treeAdaptor:(CommonTreeAdaptor *)theAdaptor;
+
+- (void) reset;
+
+#pragma mark ANTLRTreeNodeStream conformance
+
+- (id) LT:(NSInteger)k;
+- (id) treeSource;
+- (id<TreeAdaptor>) getTreeAdaptor;
+- (void)setTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor;
+- (id<TokenStream>) getTokenStream;
+- (void) setTokenStream:(id<TokenStream>)aTokenStream;	///< Added by subclass, not in protocol
+- (void) setUsesUniqueNavigationNodes:(BOOL)flag;
+
+- (id) nodeAtIndex:(NSUInteger) idx;
+
+- (NSString *) toString;
+- (NSString *) toStringWithRange:(NSRange) aRange;
+- (NSString *) toStringFromNode:(id)startNode toNode:(id)stopNode;
+
+#pragma mark ANTLRIntStream conformance
+- (void) consume;
+- (NSInteger) LA:(NSUInteger) i;
+- (NSUInteger) mark;
+- (NSUInteger) getIndex;
+- (void) rewind:(NSUInteger) marker;
+- (void) rewind;
+- (void) release:(NSUInteger) marker;
+- (void) seek:(NSUInteger) index;
+- (NSUInteger) size;
+
+#pragma mark Lookahead Handling
+- (void) addLookahead:(id<BaseTree>)aNode;
+- (NSUInteger) lookaheadSize;
+- (void) fillBufferWithLookahead:(NSInteger)k;
+- (id) nextObject;
+
+#pragma mark Node visiting
+- (CommonTree *) handleRootNode;
+- (CommonTree *) visitChild:(NSInteger)childNumber;
+- (void) walkBackToMostRecentNodeWithUnvisitedChildren;
+- (void) addNavigationNodeWithType:(NSInteger)tokenType;
+
+#pragma mark Accessors
+- (CommonTree *) root;
+- (void) setRoot: (CommonTree *) aRoot;
+
+@end
diff --git a/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStream.m b/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStream.m
new file mode 100644
index 0000000..8c76966
--- /dev/null
+++ b/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStream.m
@@ -0,0 +1,432 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#import "UnbufferedCommonTreeNodeStream.h"
+#import "UnbufferedCommonTreeNodeStreamState.h"
+#import "BaseTree.h"
+#import "Token.h"
+
+#define INITIAL_LOOKAHEAD_BUFFER_SIZE 5
+@implementation ANTLRUnbufferedCommonTreeNodeStream
+
+@synthesize root;
+@synthesize currentNode;
+@synthesize previousNode;
+@synthesize treeAdaptor;
+@synthesize tokenStream;
+@synthesize nodeStack;
+@synthesize indexStack;
+@synthesize markers;
+@synthesize lastMarker;
+@synthesize currentChildIndex;
+@synthesize absoluteNodeIndex;
+@synthesize lookahead;
+@synthesize head;
+@synthesize tail;
+
+- (id) initWithTree:(CommonTree *)theTree
+{
+	return [self initWithTree:theTree treeAdaptor:nil];
+}
+
+- (id) initWithTree:(CommonTree *)theTree treeAdaptor:(CommonTreeAdaptor *)theAdaptor
+{
+	if ((self = [super init]) != nil) {
+		[self setRoot:theTree];
+		if ( theAdaptor == nil ) 
+			[self setTreeAdaptor:[CommonTreeAdaptor newTreeAdaptor]];
+		else
+			[self setTreeAdaptor:theAdaptor];
+		nodeStack = [[NSMutableArray arrayWithCapacity:5] retain];
+		indexStack = [[NSMutableArray arrayWithCapacity:5] retain];
+		markers = [[PtrBuffer newPtrBufferWithLen:100] retain];
+        // [markers insertObject:[NSNull null] atIndex:0];	// markers is one based - maybe fix this later
+		lookahead = [NSMutableArray arrayWithCapacity:INITIAL_LOOKAHEAD_BUFFER_SIZE];	// lookahead is filled with [NSNull null] in -reset
+        [lookahead retain];
+		[self reset];
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+	[self setRoot:nil];
+	[self setTreeAdaptor:nil];
+	
+	[nodeStack release];	nodeStack = nil;
+	[indexStack release];	indexStack = nil;
+	[markers release];		markers = nil;
+	[lookahead release];	lookahead = nil;
+	
+	[super dealloc];
+}
+
+- (void) reset
+{
+	currentNode = root;
+	previousNode = nil;
+	currentChildIndex = -1;
+	absoluteNodeIndex = -1;
+	head = tail = 0;
+	[nodeStack removeAllObjects];
+	[indexStack removeAllObjects];
+	[markers removeAllObjects];
+    // [markers insertObject:[NSNull null] atIndex:0];	// markers is one based - maybe fix this later
+	[lookahead removeAllObjects];
+	// TODO: this is not ideal, but works for now. optimize later
+	int i;
+	for (i = 0; i < INITIAL_LOOKAHEAD_BUFFER_SIZE; i++)
+		[lookahead addObject:[NSNull null]];
+}
+
+
+#pragma mark ANTLRTreeNodeStream conformance
+
+- (id) LT:(NSInteger)k
+{
+	if (k == -1)
+		return previousNode;
+	if (k < 0)
+		@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-LT: looking back more than one node unsupported for unbuffered streams" userInfo:nil];
+	if (k == 0)
+		return BaseTree.INVALID_NODE;
+	[self fillBufferWithLookahead:k];
+	return [lookahead objectAtIndex:(head+k-1) % [lookahead count]];
+}
+
+- (id) treeSource
+{
+	return [self root];
+}
+
+- (id<TreeAdaptor>) getTreeAdaptor;
+{
+	return treeAdaptor;
+}
+
+- (void)setTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor
+{
+    if (treeAdaptor != aTreeAdaptor) {
+        [aTreeAdaptor retain];
+        [treeAdaptor release];
+        treeAdaptor = aTreeAdaptor;
+    }
+}
+
+- (id<TokenStream>) getTokenStream
+{
+	return tokenStream;
+}
+
+- (void) setTokenStream:(id<TokenStream>)aTokenStream
+{
+	if (tokenStream != aTokenStream) {
+		[tokenStream release];
+		[aTokenStream retain];
+		tokenStream = aTokenStream;
+	}
+}
+
+- (void) setUsesUniqueNavigationNodes:(BOOL)flag
+{
+	shouldUseUniqueNavigationNodes = flag;
+}
+
+- (id) nodeAtIndex:(NSUInteger) idx
+{
+	@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-nodeAtIndex: unsupported for unbuffered streams" userInfo:nil];
+}
+
+- (NSString *) toString
+{
+	@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-toString unsupported for unbuffered streams" userInfo:nil];
+}
+
+- (NSString *) toStringWithRange:(NSRange) aRange
+{
+	@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-toString: unsupported for unbuffered streams" userInfo:nil];
+}
+
+- (NSString *) toStringFromNode:(id)startNode ToNode:(id)stopNode
+{
+	@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-toStringFromNode:toNode: unsupported for unbuffered streams" userInfo:nil];
+}
+
+#pragma mark ANTLRIntStream conformance
+
+- (void) consume
+{
+	[self fillBufferWithLookahead:1];
+	absoluteNodeIndex++;
+	previousNode = [lookahead objectAtIndex:head];
+	head = (head+1) % [lookahead count];
+}
+
+- (NSInteger) LA:(NSUInteger) i
+{
+	CommonTree *node = [self LT:i];
+	if (!node) 
+		return TokenTypeInvalid;
+	int ttype = [node getType];
+	return ttype;
+}
+
+- (NSUInteger) mark
+{
+	ANTLRUnbufferedCommonTreeNodeStreamState *state = [[[ANTLRUnbufferedCommonTreeNodeStreamState alloc] init] retain];
+	[state setCurrentNode:currentNode];
+	[state setPreviousNode:previousNode];
+	[state setIndexStackSize:[indexStack count]];
+	[state setNodeStackSize:[nodeStack count]];
+	[state setCurrentChildIndex:currentChildIndex];
+	[state setAbsoluteNodeIndex:absoluteNodeIndex];
+	unsigned int lookaheadSize = [self lookaheadSize];
+	unsigned int k;
+	for ( k = 0; k < lookaheadSize; k++) {
+		[state addToLookahead:[self LT:k+1]];
+	}
+	[markers addObject:state];
+	//[state release];
+	return [markers count];
+}
+
+- (NSUInteger) getIndex
+{
+	return absoluteNodeIndex + 1;
+}
+
+- (void) rewind:(NSUInteger) marker
+{
+	if ( [markers count] < marker ) {
+		return;
+	}
+	ANTLRUnbufferedCommonTreeNodeStreamState *state = [markers objectAtIndex:marker];
+	[markers removeObjectAtIndex:marker];
+
+	absoluteNodeIndex = [state absoluteNodeIndex];
+	currentChildIndex = [state currentChildIndex];
+	currentNode = [state currentNode];
+	previousNode = [state previousNode];
+	// drop node and index stacks back to old size
+	[nodeStack removeObjectsInRange:NSMakeRange([state nodeStackSize], [nodeStack count]-[state nodeStackSize])];
+	[indexStack removeObjectsInRange:NSMakeRange([state indexStackSize], [indexStack count]-[state indexStackSize])];
+	
+	head = tail = 0; // wack lookahead buffer and then refill
+	[lookahead release];
+	lookahead = [[NSMutableArray alloc] initWithArray:[state lookahead]];
+	tail = [lookahead count];
+	// make some room after the restored lookahead, so that the above line is not a bug ;)
+	// this also ensures that a subsequent -addLookahead: will not immediately need to resize the buffer
+	[lookahead addObjectsFromArray:[NSArray arrayWithObjects:[NSNull null], [NSNull null], [NSNull null], [NSNull null], [NSNull null], nil]];
+}
+
+- (void) rewind
+{
+	[self rewind:[markers count]];
+}
+
+- (void) release:(NSUInteger) marker
+{
+	@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-release: unsupported for unbuffered streams" userInfo:nil];
+}
+
+- (void) seek:(NSUInteger) anIndex
+{
+	if ( anIndex < (NSUInteger) index )
+		@throw [NSException exceptionWithName:@"ANTLRTreeException" reason:@"-seek: backwards unsupported for unbuffered streams" userInfo:nil];
+	while ( (NSUInteger) index < anIndex ) {
+		[self consume];
+	}
+}
+
+- (NSUInteger) size;
+{
+	return absoluteNodeIndex + 1;	// not entirely correct, but cheap.
+}
+
+
+#pragma mark Lookahead Handling
+- (void) addLookahead:(id<BaseTree>)aNode
+{
+	[lookahead replaceObjectAtIndex:tail withObject:aNode];
+	tail = (tail+1) % [lookahead count];
+	
+	if ( tail == head ) {
+		NSMutableArray *newLookahead = [[[NSMutableArray alloc] initWithCapacity:[lookahead count]*2] retain];
+		
+		NSRange headRange = NSMakeRange(head, [lookahead count]-head);
+		NSRange tailRange = NSMakeRange(0, tail);
+		
+		[newLookahead addObjectsFromArray:[lookahead objectsAtIndexes:[NSIndexSet indexSetWithIndexesInRange:headRange]]];
+		[newLookahead addObjectsFromArray:[lookahead objectsAtIndexes:[NSIndexSet indexSetWithIndexesInRange:tailRange]]];
+		
+		unsigned int i;
+		unsigned int lookaheadCount = [newLookahead count];
+		for (i = 0; i < lookaheadCount; i++)
+			[newLookahead addObject:[NSNull null]];
+		[lookahead release];
+		lookahead = newLookahead;
+		
+		head = 0;
+		tail = lookaheadCount;	// tail is the location the _next_ lookahead node will end up in, not the last element's idx itself!
+	}
+	
+}
+
+- (NSUInteger) lookaheadSize
+{
+	return tail < head
+		? ([lookahead count] - head + tail) 
+		: (tail - head);
+}
+
+- (void) fillBufferWithLookahead:(NSInteger)k
+{
+	unsigned int n = [self lookaheadSize];
+	unsigned int i;
+	id lookaheadObject = self; // any valid object would do.
+	for (i=1; i <= k-n && lookaheadObject != nil; i++) {
+		lookaheadObject = [self nextObject];
+	}
+}
+
+- (id) nextObject
+{
+	// NOTE: this could/should go into an NSEnumerator subclass for treenode streams.
+	if (currentNode == nil) {
+        if ( navigationNodeEOF == nil ) {
+            navigationNodeEOF = [[TreeNavigationNodeEOF alloc] init];
+        }
+		[self addLookahead:navigationNodeEOF];
+		return nil;
+	}
+	if (currentChildIndex == -1) {
+		return [self handleRootNode];
+	}
+	if (currentChildIndex < (NSInteger)[currentNode getChildCount]) {
+		return [self visitChild:currentChildIndex];
+	}
+	[self walkBackToMostRecentNodeWithUnvisitedChildren];
+	if (currentNode != nil) {
+		return [self visitChild:currentChildIndex];
+	}
+	
+	return nil;
+}	
+
+#pragma mark Node visiting
+- (CommonTree *) handleRootNode
+{
+	CommonTree *node = currentNode;
+	currentChildIndex = 0;
+	if ([node isNil]) {
+		node = [self visitChild:currentChildIndex];
+	} else {
+		[self addLookahead:node];
+		if ([currentNode getChildCount] == 0) {
+			currentNode = nil;
+		}
+	}
+	return node;
+}
+
+- (CommonTree *) visitChild:(NSInteger)childNumber
+{
+	CommonTree *node = nil;
+	
+	[nodeStack addObject:currentNode];
+	[indexStack addObject:[NSNumber numberWithInt:childNumber]];
+	if (childNumber == 0 && ![currentNode isNil])
+		[self addNavigationNodeWithType:TokenTypeDOWN];
+
+	currentNode = [currentNode getChild:childNumber];
+	currentChildIndex = 0;
+	node = currentNode;  // record node to return
+	[self addLookahead:node];
+	[self walkBackToMostRecentNodeWithUnvisitedChildren];
+	return node;
+}
+
+- (void) walkBackToMostRecentNodeWithUnvisitedChildren
+{
+	while (currentNode != nil && currentChildIndex >= (NSInteger)[currentNode getChildCount])
+	{
+		currentNode = (CommonTree *)[nodeStack lastObject];
+		[nodeStack removeLastObject];
+		currentChildIndex = [(NSNumber *)[indexStack lastObject] intValue];
+		[indexStack removeLastObject];
+		currentChildIndex++; // move to next child
+		if (currentChildIndex >= (NSInteger)[currentNode getChildCount]) {
+			if (![currentNode isNil]) {
+				[self addNavigationNodeWithType:TokenTypeUP];
+			}
+			if (currentNode == root) { // we done yet?
+				currentNode = nil;
+			}
+		}
+	}
+	
+}
+
+- (void) addNavigationNodeWithType:(NSInteger)tokenType
+{
+	// TODO: this currently ignores shouldUseUniqueNavigationNodes.
+	switch (tokenType) {
+		case TokenTypeDOWN: {
+            if (navigationNodeDown == nil) {
+                navigationNodeDown = [[TreeNavigationNodeDown alloc] init];
+            }
+			[self addLookahead:navigationNodeDown];
+			break;
+		}
+		case TokenTypeUP: {
+            if (navigationNodeUp == nil) {
+                navigationNodeUp = [[TreeNavigationNodeUp alloc] init];
+            }
+			[self addLookahead:navigationNodeUp];
+			break;
+		}
+	}
+}
+
+#pragma mark Accessors
+- (CommonTree *) root
+{
+    return root; 
+}
+
+- (void) setRoot: (CommonTree *) aRoot
+{
+    if (root != aRoot) {
+        [aRoot retain];
+        [root release];
+        root = aRoot;
+    }
+}
+
+@end
+
diff --git a/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStreamState.h b/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStreamState.h
new file mode 100644
index 0000000..280242b
--- /dev/null
+++ b/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStreamState.h
@@ -0,0 +1,66 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Cocoa/Cocoa.h>
+#import "CommonTree.h"
+
+@interface ANTLRUnbufferedCommonTreeNodeStreamState : NSObject {
+	CommonTree *currentNode;
+	CommonTree *previousNode;
+
+	int currentChildIndex;
+	int absoluteNodeIndex;
+	unsigned int nodeStackSize;
+	unsigned int indexStackSize;
+	
+	NSMutableArray *lookahead;
+}
+
+- (CommonTree *) currentNode;
+- (void) setCurrentNode: (CommonTree *) aCurrentNode;
+
+- (CommonTree *) previousNode;
+- (void) setPreviousNode: (CommonTree *) aPreviousNode;
+
+- (NSInteger) currentChildIndex;
+- (void) setCurrentChildIndex: (NSInteger) aCurrentChildIndex;
+
+- (NSInteger) absoluteNodeIndex;
+- (void) setAbsoluteNodeIndex: (NSInteger) anAbsoluteNodeIndex;
+
+- (NSUInteger) nodeStackSize;
+- (void) setNodeStackSize: (NSUInteger) aNodeStackSize;
+
+- (NSUInteger) indexStackSize;
+- (void) setIndexStackSize: (NSUInteger) anIndexStackSize;
+
+- (NSMutableArray *) lookahead;
+- (void) setLookahead: (NSMutableArray *) aLookahead;
+
+- (void) addToLookahead: (id)lookaheadObject;
+- (void) removeFromLookahead: (id)lookaheadObject;
+
+@end
diff --git a/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStreamState.m b/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStreamState.m
new file mode 100644
index 0000000..e5fa5ad
--- /dev/null
+++ b/runtime/ObjC/Framework/UnbufferedCommonTreeNodeStreamState.m
@@ -0,0 +1,140 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "UnbufferedCommonTreeNodeStreamState.h"
+
+
+@implementation ANTLRUnbufferedCommonTreeNodeStreamState
+
+- (id) init
+{
+	if ((self = [super init]) != nil) {
+		lookahead = [[NSMutableArray alloc] init];
+	}
+	return self;
+}
+
+- (void) dealloc
+{
+	[self setLookahead:nil];
+	[self setCurrentNode:nil];
+	[self setPreviousNode:nil];
+	[super dealloc];
+}
+
+- (CommonTree *) currentNode
+{
+    return currentNode; 
+}
+
+- (void) setCurrentNode: (CommonTree *) aCurrentNode
+{
+    if (currentNode != aCurrentNode) {
+        [aCurrentNode retain];
+        [currentNode release];
+        currentNode = aCurrentNode;
+    }
+}
+
+- (CommonTree *) previousNode
+{
+    return previousNode; 
+}
+
+- (void) setPreviousNode: (CommonTree *) aPreviousNode
+{
+    if (previousNode != aPreviousNode) {
+        [aPreviousNode retain];
+        [previousNode release];
+        previousNode = aPreviousNode;
+    }
+}
+
+- (NSInteger) currentChildIndex
+{
+    return currentChildIndex;
+}
+
+- (void) setCurrentChildIndex: (NSInteger) aCurrentChildIndex
+{
+    currentChildIndex = aCurrentChildIndex;
+}
+
+- (NSInteger) absoluteNodeIndex
+{
+    return absoluteNodeIndex;
+}
+
+- (void) setAbsoluteNodeIndex: (NSInteger) anAbsoluteNodeIndex
+{
+    absoluteNodeIndex = anAbsoluteNodeIndex;
+}
+
+- (NSUInteger) nodeStackSize
+{
+    return nodeStackSize;
+}
+
+- (void) setNodeStackSize: (NSUInteger) aNodeStackSize
+{
+    nodeStackSize = aNodeStackSize;
+}
+
+- (NSUInteger) indexStackSize
+{
+    return indexStackSize;
+}
+
+- (void) setIndexStackSize: (NSUInteger) anIndexStackSize
+{
+    indexStackSize = anIndexStackSize;
+}
+
+- (NSMutableArray *) lookahead
+{
+    return lookahead; 
+}
+
+- (void) setLookahead: (NSMutableArray *) aLookahead
+{
+    if (lookahead != aLookahead) {
+        [aLookahead retain];
+        [lookahead release];
+        lookahead = aLookahead;
+    }
+}
+
+- (void) addToLookahead: (id)lookaheadObject
+{
+    [[self lookahead] addObject: lookaheadObject];
+}
+- (void) removeFromLookahead: (id)lookaheadObject
+{
+    [[self lookahead] removeObject: lookaheadObject];
+}
+
+
+@end
diff --git a/runtime/ObjC/Framework/UnbufferedTokenStream.h b/runtime/ObjC/Framework/UnbufferedTokenStream.h
new file mode 100644
index 0000000..755c8bd
--- /dev/null
+++ b/runtime/ObjC/Framework/UnbufferedTokenStream.h
@@ -0,0 +1,62 @@
+//
+//  UnbufferedTokenStream.h
+//  ANTLR
+//
+//  Created by Alan Condit on 7/12/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "RuntimeException.h"
+#import "TokenSource.h"
+#import "LookaheadStream.h"
+#import "Token.h"
+
+@interface UnbufferedTokenStream : LookaheadStream {
+	id<TokenSource> tokenSource;
+    NSInteger tokenIndex; // simple counter to set token index in tokens
+    NSInteger channel;
+}
+
+@property (retain, getter=getTokenSource, setter=setTokenSource:) id<TokenSource> tokenSource;
+@property (getter=getTokenIndex, setter=setTokenIndex:) NSInteger tokenIndex;
+@property (getter=channel, setter=setChannel:) NSInteger channel;
+
++ (UnbufferedTokenStream *)newUnbufferedTokenStream:(id<TokenSource>)aTokenSource;
+- (id) init;
+- (id) initWithTokenSource:(id<TokenSource>)aTokenSource;
+
+- (id<Token>)nextElement;
+- (BOOL)isEOF:(id<Token>) aToken;
+- (id<TokenSource>)getTokenSource;
+- (NSString *)toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop;
+- (NSString *)toStringFromToken:(id<Token>)aStart ToEnd:(id<Token>)aStop;
+- (NSInteger)LA:(NSInteger)anIdx;
+- (id<Token>)objectAtIndex:(NSInteger)anIdx;
+- (NSString *)getSourceName;
+
+
+@end
diff --git a/runtime/ObjC/Framework/UnbufferedTokenStream.m b/runtime/ObjC/Framework/UnbufferedTokenStream.m
new file mode 100644
index 0000000..8f755d4
--- /dev/null
+++ b/runtime/ObjC/Framework/UnbufferedTokenStream.m
@@ -0,0 +1,118 @@
+//
+//  UnbufferedTokenStream.m
+//  ANTLR
+//
+//  Created by Alan Condit on 7/12/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "UnbufferedTokenStream.h"
+
+@implementation UnbufferedTokenStream
+
+@synthesize tokenSource;
+@synthesize tokenIndex;
+@synthesize channel;
+
++ (UnbufferedTokenStream *)newUnbufferedTokenStream:(id<TokenSource>)aTokenSource
+{
+    return [[UnbufferedTokenStream alloc] initWithTokenSource:aTokenSource];
+}
+
+- (id) init
+{
+    if ((self = [super init]) != nil) {
+        tokenSource = nil;
+        tokenIndex = 0;
+        channel = TokenChannelDefault;
+    }
+    return self;
+}
+
+- (id) initWithTokenSource:(id<TokenSource>)aTokenSource
+{
+    if ((self = [super init]) != nil) {
+        tokenSource = aTokenSource;
+        if ( tokenSource ) [tokenSource retain];
+        tokenIndex = 0;
+        channel = TokenChannelDefault;
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in UnbufferedTokenStream" );
+#endif
+    if ( tokenSource ) [tokenSource release];
+    [super dealloc];
+}
+
+- (id<Token>)nextElement
+{
+    id<Token> t = [tokenSource nextToken];
+    [t setTokenIndex:tokenIndex++];
+    return t;
+}
+
+- (BOOL)isEOF:(id<Token>)aToken
+{
+    return (aToken.type == TokenTypeEOF);
+}    
+
+- (id<TokenSource>)getTokenSource
+{
+    return tokenSource;
+}
+
+- (NSString *)toStringFromStart:(NSInteger)aStart ToEnd:(NSInteger)aStop
+{
+    return @"n/a";
+}
+
+- (NSString *)toStringFromToken:(id<Token>)aStart ToEnd:(id<Token>)aStop
+{
+    return @"n/a";
+}
+
+- (NSInteger)LA:(NSInteger)anIdx
+{
+    return [[self LT:anIdx] type];
+}
+
+- (id<Token>)objectAtIndex:(NSInteger)anIdx
+{
+    @throw [RuntimeException newException:@"Absolute token indexes are meaningless in an unbuffered stream"];
+}
+
+- (NSString *)getSourceName
+{
+    return [tokenSource getSourceName];
+}
+
+
+@end
diff --git a/runtime/ObjC/Framework/UniqueIDMap.h b/runtime/ObjC/Framework/UniqueIDMap.h
new file mode 100644
index 0000000..3efc0fd
--- /dev/null
+++ b/runtime/ObjC/Framework/UniqueIDMap.h
@@ -0,0 +1,64 @@
+//
+//  UniqueIDMap.h
+//  ANTLR
+//
+//  Created by Alan Condit on 7/7/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "PtrBuffer.h"
+#import "NodeMapElement.h"
+
+#define SUCCESS             0
+#define FAILURE             -1
+#define HASHSIZE            101
+#define HBUFSIZE            0x2000
+
+@interface UniqueIDMap : PtrBuffer {
+    NSInteger lastHash;
+}
+
+@property (getter=getLastHash, setter=setLastHash:) NSInteger lastHash;
+
++ (id)newUniqueIDMap;
++ (id)newUniqueIDMapWithLen:(NSInteger)aHashSize;
+
+- (id)init;
+- (id)initWithLen:(NSInteger)cnt;
+- (void)dealloc;
+// Instance Methods
+- (NSInteger)count;
+- (NSInteger)size;
+/* clear -- reinitialize the maplist array */
+- (void) clear;
+
+- (void)deleteUniqueIDMap:(NodeMapElement *)np;
+- (void)delete_chain:(NodeMapElement *)np;
+- (id)getNode:(id<BaseTree>)aNode;
+- (void)putID:(id)anID Node:(id<BaseTree>)aNode;
+
+@end
diff --git a/runtime/ObjC/Framework/UniqueIDMap.m b/runtime/ObjC/Framework/UniqueIDMap.m
new file mode 100644
index 0000000..daafbd0
--- /dev/null
+++ b/runtime/ObjC/Framework/UniqueIDMap.m
@@ -0,0 +1,184 @@
+//
+//  UniqueIDMap.m
+//  ANTLR
+//
+//  Created by Alan Condit on 7/7/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "UniqueIDMap.h"
+#import "Tree.h"
+
+@implementation UniqueIDMap
+@synthesize lastHash;
+
++(id)newUniqueIDMap
+{
+    UniqueIDMap *aNewUniqueIDMap;
+    
+    aNewUniqueIDMap = [[UniqueIDMap alloc] init];
+	return( aNewUniqueIDMap );
+}
+
++(id)newUniqueIDMapWithLen:(NSInteger)aBuffSize
+{
+    UniqueIDMap *aNewUniqueIDMap;
+    
+    aNewUniqueIDMap = [[UniqueIDMap alloc] initWithLen:aBuffSize];
+	return( aNewUniqueIDMap );
+}
+
+-(id)init
+{
+    NSInteger idx;
+    
+	if ((self = [super initWithLen:HASHSIZE]) != nil) {
+		fNext = nil;
+        for( idx = 0; idx < HASHSIZE; idx++ ) {
+            ptrBuffer[idx] = nil;
+        }
+	}
+    return( self );
+}
+
+-(id)initWithLen:(NSInteger)aBuffSize
+{
+	if ((self = [super initWithLen:aBuffSize]) != nil) {
+	}
+    return( self );
+}
+
+-(void)dealloc
+{
+#ifdef DEBUG_DEALLOC
+    NSLog( @"called dealloc in UniqueIDMap" );
+#endif
+    NodeMapElement *tmp, *rtmp;
+    NSInteger idx;
+	
+    if ( self.fNext != nil ) {
+        for( idx = 0; idx < HASHSIZE; idx++ ) {
+            tmp = ptrBuffer[idx];
+            while ( tmp ) {
+                rtmp = tmp;
+                tmp = (NodeMapElement *)tmp.fNext;
+                [rtmp release];
+            }
+        }
+    }
+	[super dealloc];
+}
+
+-(void)deleteUniqueIDMap:(NodeMapElement *)np
+{
+    NodeMapElement *tmp, *rtmp;
+    NSInteger idx;
+    
+    if ( self.fNext != nil ) {
+        for( idx = 0; idx < HASHSIZE; idx++ ) {
+            tmp = ptrBuffer[idx];
+            while ( tmp ) {
+                rtmp = tmp;
+                tmp = tmp.fNext;
+                [rtmp release];
+            }
+        }
+    }
+}
+
+- (void)clear
+{
+    NodeMapElement *tmp, *rtmp;
+    NSInteger idx;
+    
+    for( idx = 0; idx < HASHSIZE; idx++ ) {
+        tmp = ptrBuffer[idx];
+        while ( tmp ) {
+            rtmp = tmp;
+            tmp = [tmp getfNext];
+            [rtmp release];
+        }
+        ptrBuffer[idx] = nil;
+    }
+}
+
+- (NSInteger)count
+{
+    id anElement;
+    NSInteger aCnt = 0;
+    
+    for (int i = 0; i < BuffSize; i++) {
+        if ((anElement = ptrBuffer[i]) != nil) {
+            aCnt += (NSInteger)[anElement count];
+        }
+    }
+    return aCnt;
+}
+
+- (NSInteger)size
+{
+    return BuffSize;
+}
+
+-(void)delete_chain:(NodeMapElement *)np
+{
+    if ( np.fNext != nil )
+		[self delete_chain:np.fNext];
+	[np release];
+}
+
+- (id)getNode:(id<BaseTree>)aNode
+{
+    NodeMapElement *np;
+    NSInteger idx;
+    
+    idx = [(id<BaseTree>)aNode type];
+    np = ptrBuffer[idx];
+    while ( np != nil ) {
+        if (np.node == aNode) {
+            return( np.index );
+        }
+        np = np.fNext;
+    }
+    return( nil );
+}
+
+- (void)putID:(id)anID Node:(id<BaseTree>)aNode
+{
+    NodeMapElement *np, *np1;
+    NSInteger idx;
+    
+    idx = [(id<BaseTree>)aNode type];
+    idx %= HASHSIZE;
+    np = [[NodeMapElement newNodeMapElementWithIndex:anID Node:aNode] retain];
+    np1 = ptrBuffer[idx];
+    np.fNext = np1;
+    ptrBuffer[idx] = np;
+    return;
+}
+
+
+@end
diff --git a/runtime/ObjC/Framework/UnwantedTokenException.h b/runtime/ObjC/Framework/UnwantedTokenException.h
new file mode 100644
index 0000000..202b28c
--- /dev/null
+++ b/runtime/ObjC/Framework/UnwantedTokenException.h
@@ -0,0 +1,47 @@
+//
+//  UnwantedTokenException.h
+//  ANTLR
+//
+//  Created by Alan Condit on 6/8/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <Foundation/Foundation.h>
+#import "MismatchedTokenException.h"
+
+@interface UnwantedTokenException : MismatchedTokenException {
+
+}
++ (UnwantedTokenException *)newException;
++ (UnwantedTokenException *)newException:(NSInteger)expected Stream:(id<IntStream>)anInput;
+
+- (id) init;
+- (id) initWithStream:(id<IntStream>)anInput And:(NSInteger)expected;
+- (id<Token>)getUnexpectedToken;
+- (NSString *)toString;
+                     
+    
+@end
diff --git a/runtime/ObjC/Framework/UnwantedTokenException.m b/runtime/ObjC/Framework/UnwantedTokenException.m
new file mode 100644
index 0000000..8a9f50d
--- /dev/null
+++ b/runtime/ObjC/Framework/UnwantedTokenException.m
@@ -0,0 +1,80 @@
+//
+//  UnwantedTokenException.m
+//  ANTLR
+//
+//  Created by Alan Condit on 6/8/10.
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "UnwantedTokenException.h"
+
+@implementation UnwantedTokenException : MismatchedTokenException
+	/** Used for remote debugger deserialization */
++ (UnwantedTokenException *)newException
+{
+    return [[UnwantedTokenException alloc] init];
+}
+    
++ (UnwantedTokenException *)newException:(NSInteger)expected Stream:(id<IntStream>)anInput
+{
+    return [[UnwantedTokenException alloc] initWithStream:anInput And:expected];
+}
+
+- (id) init
+{
+    self = [super initWithStream:input];
+    if (self) {
+    }
+    return self;
+}
+     
+- (id) initWithStream:(id<IntStream>)anInput And:(NSInteger)expected
+{
+    self = [super initWithStream:anInput];
+    if (self) {
+        expecting = expected;
+    }
+    return self;
+}
+    
+- (id<Token>)getUnexpectedToken
+{
+    return token;
+}
+    
+- (NSString *)toString
+{
+    NSString *exp1 = [NSString stringWithFormat:@", expected %d", expecting];
+    if ( expecting == TokenTypeInvalid ) {
+        exp1 = @"";
+    }
+    if ( token==nil ) {
+        return [NSString stringWithFormat:@"UnwantedTokenException(found=%@)", exp1];
+    }
+    return [NSString stringWithFormat:@"UnwantedTokenException(found=%@ %@", token.text, exp1];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/antlr3.h b/runtime/ObjC/Framework/antlr3.h
new file mode 100644
index 0000000..80bc08b
--- /dev/null
+++ b/runtime/ObjC/Framework/antlr3.h
@@ -0,0 +1,118 @@
+// [The "BSD licence"]
+// Copyright (c) 2006-2007 Kay Roepke 2010 Alan Condit
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <ANTLR/ACNumber.h>
+#import <ANTLR/ACBTree.h>
+#import <ANTLR/AMutableArray.h>
+#import <ANTLR/AMutableDictionary.h>
+#import <ANTLR/ANTLRBitSet.h>
+#import <ANTLR/ANTLRError.h>
+#import <ANTLR/ANTLRFileStream.h>
+#import <ANTLR/ANTLRInputStream.h>
+#import <ANTLR/ANTLRReaderStream.h>
+#import <ANTLR/ANTLRStringStream.h>
+#import <ANTLR/ArrayIterator.h>
+#import <ANTLR/BaseMapElement.h>
+#import <ANTLR/BaseRecognizer.h>
+#import <ANTLR/BaseStack.h>
+#import <ANTLR/BaseTree.h>
+#import <ANTLR/BaseTreeAdaptor.h>
+#import <ANTLR/BufferedTokenStream.h>
+#import <ANTLR/BufferedTreeNodeStream.h>
+#import <ANTLR/CharStream.h>
+#import <ANTLR/CharStreamState.h>
+#import <ANTLR/CommonErrorNode.h>
+#import <ANTLR/CommonToken.h>
+#import <ANTLR/CommonTokenStream.h>
+#import <ANTLR/CommonTree.h>
+#import <ANTLR/CommonTreeAdaptor.h>
+#import <ANTLR/CommonTreeNodeStream.h>
+#import <ANTLR/DFA.h>
+#import <ANTLR/Debug.h>
+#import <ANTLR/DebugEventSocketProxy.h>
+#import <ANTLR/DebugEventListener.h>
+#import <ANTLR/DebugParser.h>
+#import <ANTLR/DebugTokenStream.h>
+#import <ANTLR/DebugTreeAdaptor.h>
+#import <ANTLR/DebugTreeNodeStream.h>
+#import <ANTLR/DebugTreeParser.h>
+#import <ANTLR/DoubleKeyMap.h>
+#import <ANTLR/EarlyExitException.h>
+#import <ANTLR/Entry.h>
+#import <ANTLR/FailedPredicateException.h>
+#import <ANTLR/FastQueue.h>
+#import <ANTLR/HashMap.h>
+#import <ANTLR/HashRule.h>
+#import <ANTLR/IntArray.h>
+#import <ANTLR/IntStream.h>
+#import <ANTLR/Lexer.h>
+#import <ANTLR/LexerRuleReturnScope.h>
+#import <ANTLR/LinkBase.h>
+#import <ANTLR/LinkedHashMap.h>
+#import <ANTLR/LinkedList.h>
+#import <ANTLR/LookaheadStream.h>
+#import <ANTLR/MapElement.h>
+#import <ANTLR/Map.h>
+#import <ANTLR/MismatchedNotSetException.h>
+#import <ANTLR/MismatchedRangeException.h>
+#import <ANTLR/MismatchedSetException.h>
+#import <ANTLR/MismatchedTokenException.h>
+#import <ANTLR/MismatchedTreeNodeException.h>
+#import <ANTLR/MissingTokenException.h>
+#import <ANTLR/NodeMapElement.h>
+#import <ANTLR/NoViableAltException.h>
+#import <ANTLR/Parser.h>
+#import <ANTLR/ParserRuleReturnScope.h>
+#import <ANTLR/PtrBuffer.h>
+#import <ANTLR/RecognitionException.h>
+#import <ANTLR/RecognizerSharedState.h>
+#import <ANTLR/RewriteRuleElementStream.h>
+#import <ANTLR/RewriteRuleNodeStream.h>
+#import <ANTLR/RewriteRuleSubtreeStream.h>
+#import <ANTLR/RewriteRuleTokenStream.h>
+#import <ANTLR/RuleMemo.h>
+#import <ANTLR/RuleStack.h>
+#import <ANTLR/RuleReturnScope.h>
+#import <ANTLR/RuntimeException.h>
+#import <ANTLR/StreamEnumerator.h>
+#import <ANTLR/SymbolStack.h>
+#import <ANTLR/Token+DebuggerSupport.h>
+#import <ANTLR/Token.h>
+#import <ANTLR/TokenRewriteStream.h>
+#import <ANTLR/TokenSource.h>
+#import <ANTLR/TokenStream.h>
+#import <ANTLR/Tree.h>
+#import <ANTLR/TreeAdaptor.h>
+#import <ANTLR/TreeException.h>
+#import <ANTLR/TreeIterator.h>
+#import <ANTLR/TreeNodeStream.h>
+#import <ANTLR/TreeParser.h>
+#import <ANTLR/TreeRuleReturnScope.h>
+#import <ANTLR/UnbufferedTokenStream.h>
+//#import <ANTLR/UnbufferedCommonTreeNodeStream.h>
+//#import <ANTLR/UnbufferedCommonTreeNodeStreamState.h>
+#import <ANTLR/UniqueIDMap.h>
+#import <ANTLR/UnwantedTokenException.h>
diff --git a/runtime/ObjC/Framework/examples/LL-star/SimpleC.tokens b/runtime/ObjC/Framework/examples/LL-star/SimpleC.tokens
new file mode 100644
index 0000000..1d3555d
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/LL-star/SimpleC.tokens
@@ -0,0 +1,31 @@
+T__7=7
+T__8=8
+T__9=9
+T__10=10
+T__11=11
+T__12=12
+T__13=13
+T__14=14
+T__15=15
+T__16=16
+T__17=17
+T__18=18
+T__19=19
+T__20=20
+ID=4
+INT=5
+WS=6
+'('=7
+')'=8
+'+'=9
+','=10
+';'=11
+'<'=12
+'='=13
+'=='=14
+'char'=15
+'for'=16
+'int'=17
+'void'=18
+'{'=19
+'}'=20
diff --git a/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.h b/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.h
new file mode 100644
index 0000000..832311d
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.h
@@ -0,0 +1,67 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g 2012-02-16 17:39:19
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* Start cyclicDFAInterface */
+
+#pragma mark Rule return scopes Interface start
+#pragma mark Rule return scopes Interface end
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__7 7
+#define T__8 8
+#define T__9 9
+#define T__10 10
+#define T__11 11
+#define T__12 12
+#define T__13 13
+#define T__14 14
+#define T__15 15
+#define T__16 16
+#define T__17 17
+#define T__18 18
+#define T__19 19
+#define T__20 20
+#define ID 4
+#define INT 5
+#define WS 6
+/* interface lexer class */
+@interface SimpleCLexer : Lexer { // line 283
+/* ObjC start of actions.lexer.memVars */
+/* ObjC end of actions.lexer.memVars */
+}
++ (void) initialize;
++ (SimpleCLexer *)newSimpleCLexerWithCharStream:(id<CharStream>)anInput;
+/* ObjC start actions.lexer.methodsDecl */
+/* ObjC end actions.lexer.methodsDecl */
+- (void) mT__7 ; 
+- (void) mT__8 ; 
+- (void) mT__9 ; 
+- (void) mT__10 ; 
+- (void) mT__11 ; 
+- (void) mT__12 ; 
+- (void) mT__13 ; 
+- (void) mT__14 ; 
+- (void) mT__15 ; 
+- (void) mT__16 ; 
+- (void) mT__17 ; 
+- (void) mT__18 ; 
+- (void) mT__19 ; 
+- (void) mT__20 ; 
+- (void) mID ; 
+- (void) mINT ; 
+- (void) mWS ; 
+- (void) mTokens ; 
+
+@end /* end of SimpleCLexer interface */
+
diff --git a/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.m b/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.m
new file mode 100644
index 0000000..47a7f5a
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/LL-star/SimpleCLexer.m
@@ -0,0 +1,1213 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g
+ *     -                            On : 2012-02-16 17:39:19
+ *     -                 for the lexer : SimpleCLexerLexer
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g 2012-02-16 17:39:19
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "SimpleCLexer.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+/** As per Terence: No returns for lexer rules! */
+@implementation SimpleCLexer // line 330
+
++ (void) initialize
+{
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g"];
+}
+
++ (NSString *) tokenNameForType:(NSInteger)aTokenType
+{
+    return [[self getTokenNames] objectAtIndex:aTokenType];
+}
+
++ (SimpleCLexer *)newSimpleCLexerWithCharStream:(id<CharStream>)anInput
+{
+    return [[SimpleCLexer alloc] initWithCharStream:anInput];
+}
+
+- (id) initWithCharStream:(id<CharStream>)anInput
+{
+    self = [super initWithCharStream:anInput State:[RecognizerSharedState newRecognizerSharedStateWithRuleLen:18+1]];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC Start of actions.lexer.methods */
+/* ObjC end of actions.lexer.methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+
+/* Start of Rules */
+// $ANTLR start "T__7"
+- (void) mT__7
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__7;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:7:6: ( '(' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:7:8: '(' // alt
+        {
+
+
+        [self matchChar:'(']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__7" */
+// $ANTLR start "T__8"
+- (void) mT__8
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__8;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:8:6: ( ')' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:8:8: ')' // alt
+        {
+
+
+        [self matchChar:')']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__8" */
+// $ANTLR start "T__9"
+- (void) mT__9
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__9;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:9:6: ( '+' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:9:8: '+' // alt
+        {
+
+
+        [self matchChar:'+']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__9" */
+// $ANTLR start "T__10"
+- (void) mT__10
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__10;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:10:7: ( ',' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:10:9: ',' // alt
+        {
+
+
+        [self matchChar:',']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__10" */
+// $ANTLR start "T__11"
+- (void) mT__11
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__11;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:11:7: ( ';' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:11:9: ';' // alt
+        {
+
+
+        [self matchChar:';']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__11" */
+// $ANTLR start "T__12"
+- (void) mT__12
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__12;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:12:7: ( '<' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:12:9: '<' // alt
+        {
+
+
+        [self matchChar:'<']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__12" */
+// $ANTLR start "T__13"
+- (void) mT__13
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__13;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:13:7: ( '=' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:13:9: '=' // alt
+        {
+
+
+        [self matchChar:'=']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__13" */
+// $ANTLR start "T__14"
+- (void) mT__14
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__14;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:14:7: ( '==' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:14:9: '==' // alt
+        {
+
+
+        [self matchString:@"=="]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__14" */
+// $ANTLR start "T__15"
+- (void) mT__15
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__15;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:15:7: ( 'char' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:15:9: 'char' // alt
+        {
+
+
+        [self matchString:@"char"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__15" */
+// $ANTLR start "T__16"
+- (void) mT__16
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__16;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:16:7: ( 'for' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:16:9: 'for' // alt
+        {
+
+
+        [self matchString:@"for"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__16" */
+// $ANTLR start "T__17"
+- (void) mT__17
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__17;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:17:7: ( 'int' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:17:9: 'int' // alt
+        {
+
+
+        [self matchString:@"int"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__17" */
+// $ANTLR start "T__18"
+- (void) mT__18
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__18;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:18:7: ( 'void' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:18:9: 'void' // alt
+        {
+
+
+        [self matchString:@"void"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__18" */
+// $ANTLR start "T__19"
+- (void) mT__19
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__19;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:19:7: ( '{' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:19:9: '{' // alt
+        {
+
+
+        [self matchChar:'{']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__19" */
+// $ANTLR start "T__20"
+- (void) mT__20
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__20;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:20:7: ( '}' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:20:9: '}' // alt
+        {
+
+
+        [self matchChar:'}']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__20" */
+// $ANTLR start "ID"
+- (void) mID
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = ID;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:94:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:94:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* // alt
+        {
+
+        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+            [input consume];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            [self recover:mse];
+            @throw mse;
+        }
+
+         
+
+        do {
+            NSInteger alt1=2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( ((LA1_0 >= '0' && LA1_0 <= '9')||(LA1_0 >= 'A' && LA1_0 <= 'Z')||LA1_0=='_'||(LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop1;
+            }
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "ID" */
+// $ANTLR start "INT"
+- (void) mINT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = INT;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:97:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:97:7: ( '0' .. '9' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:97:7: ( '0' .. '9' )+ // positiveClosureBlock
+        NSInteger cnt2 = 0;
+        do {
+            NSInteger alt2 = 2;
+            NSInteger LA2_0 = [input LA:1];
+            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
+                alt2=1;
+            }
+
+
+            switch (alt2) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt2 >= 1 )
+                        goto loop2;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:2];
+                    @throw eee;
+            }
+            cnt2++;
+        } while (YES);
+        loop2: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "INT" */
+// $ANTLR start "WS"
+- (void) mWS
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = WS;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:100:5: ( ( ' ' | '\\t' | '\\r' | '\\n' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:100:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:100:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // positiveClosureBlock
+        NSInteger cnt3 = 0;
+        do {
+            NSInteger alt3 = 2;
+            NSInteger LA3_0 = [input LA:1];
+            if ( ((LA3_0 >= '\t' && LA3_0 <= '\n')||LA3_0=='\r'||LA3_0==' ') ) {
+                alt3=1;
+            }
+
+
+            switch (alt3) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == '\r'||[input LA:1] == ' ') {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt3 >= 1 )
+                        goto loop3;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:3];
+                    @throw eee;
+            }
+            cnt3++;
+        } while (YES);
+        loop3: ;
+
+         
+
+         _channel=HIDDEN; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "WS" */
+- (void) mTokens
+{
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:8: ( T__7 | T__8 | T__9 | T__10 | T__11 | T__12 | T__13 | T__14 | T__15 | T__16 | T__17 | T__18 | T__19 | T__20 | ID | INT | WS ) //ruleblock
+    NSInteger alt4=17;
+    unichar charLA4 = [input LA:1];
+    switch (charLA4) {
+        case '(': ;
+            {
+            alt4=1;
+            }
+            break;
+        case ')': ;
+            {
+            alt4=2;
+            }
+            break;
+        case '+': ;
+            {
+            alt4=3;
+            }
+            break;
+        case ',': ;
+            {
+            alt4=4;
+            }
+            break;
+        case ';': ;
+            {
+            alt4=5;
+            }
+            break;
+        case '<': ;
+            {
+            alt4=6;
+            }
+            break;
+        case '=': ;
+            {
+            NSInteger LA4_7 = [input LA:2];
+
+            if ( (LA4_7=='=') ) {
+                alt4=8;
+            }
+            else {
+                alt4 = 7;
+            }
+            }
+            break;
+        case 'c': ;
+            {
+            NSInteger LA4_8 = [input LA:2];
+
+            if ( (LA4_8=='h') ) {
+                NSInteger LA4_19 = [input LA:3];
+
+                if ( (LA4_19=='a') ) {
+                    NSInteger LA4_23 = [input LA:4];
+
+                    if ( (LA4_23=='r') ) {
+                        NSInteger LA4_27 = [input LA:5];
+
+                        if ( ((LA4_27 >= '0' && LA4_27 <= '9')||(LA4_27 >= 'A' && LA4_27 <= 'Z')||LA4_27=='_'||(LA4_27 >= 'a' && LA4_27 <= 'z')) ) {
+                            alt4=15;
+                        }
+                        else {
+                            alt4 = 9;
+                        }
+                    }
+                    else {
+                        alt4 = 15;
+                    }
+                }
+                else {
+                    alt4 = 15;
+                }
+            }
+            else {
+                alt4 = 15;
+            }
+            }
+            break;
+        case 'f': ;
+            {
+            NSInteger LA4_9 = [input LA:2];
+
+            if ( (LA4_9=='o') ) {
+                NSInteger LA4_20 = [input LA:3];
+
+                if ( (LA4_20=='r') ) {
+                    NSInteger LA4_24 = [input LA:4];
+
+                    if ( ((LA4_24 >= '0' && LA4_24 <= '9')||(LA4_24 >= 'A' && LA4_24 <= 'Z')||LA4_24=='_'||(LA4_24 >= 'a' && LA4_24 <= 'z')) ) {
+                        alt4=15;
+                    }
+                    else {
+                        alt4 = 10;
+                    }
+                }
+                else {
+                    alt4 = 15;
+                }
+            }
+            else {
+                alt4 = 15;
+            }
+            }
+            break;
+        case 'i': ;
+            {
+            NSInteger LA4_10 = [input LA:2];
+
+            if ( (LA4_10=='n') ) {
+                NSInteger LA4_21 = [input LA:3];
+
+                if ( (LA4_21=='t') ) {
+                    NSInteger LA4_25 = [input LA:4];
+
+                    if ( ((LA4_25 >= '0' && LA4_25 <= '9')||(LA4_25 >= 'A' && LA4_25 <= 'Z')||LA4_25=='_'||(LA4_25 >= 'a' && LA4_25 <= 'z')) ) {
+                        alt4=15;
+                    }
+                    else {
+                        alt4 = 11;
+                    }
+                }
+                else {
+                    alt4 = 15;
+                }
+            }
+            else {
+                alt4 = 15;
+            }
+            }
+            break;
+        case 'v': ;
+            {
+            NSInteger LA4_11 = [input LA:2];
+
+            if ( (LA4_11=='o') ) {
+                NSInteger LA4_22 = [input LA:3];
+
+                if ( (LA4_22=='i') ) {
+                    NSInteger LA4_26 = [input LA:4];
+
+                    if ( (LA4_26=='d') ) {
+                        NSInteger LA4_30 = [input LA:5];
+
+                        if ( ((LA4_30 >= '0' && LA4_30 <= '9')||(LA4_30 >= 'A' && LA4_30 <= 'Z')||LA4_30=='_'||(LA4_30 >= 'a' && LA4_30 <= 'z')) ) {
+                            alt4=15;
+                        }
+                        else {
+                            alt4 = 12;
+                        }
+                    }
+                    else {
+                        alt4 = 15;
+                    }
+                }
+                else {
+                    alt4 = 15;
+                }
+            }
+            else {
+                alt4 = 15;
+            }
+            }
+            break;
+        case '{': ;
+            {
+            alt4=13;
+            }
+            break;
+        case '}': ;
+            {
+            alt4=14;
+            }
+            break;
+        case 'A': ;
+        case 'B': ;
+        case 'C': ;
+        case 'D': ;
+        case 'E': ;
+        case 'F': ;
+        case 'G': ;
+        case 'H': ;
+        case 'I': ;
+        case 'J': ;
+        case 'K': ;
+        case 'L': ;
+        case 'M': ;
+        case 'N': ;
+        case 'O': ;
+        case 'P': ;
+        case 'Q': ;
+        case 'R': ;
+        case 'S': ;
+        case 'T': ;
+        case 'U': ;
+        case 'V': ;
+        case 'W': ;
+        case 'X': ;
+        case 'Y': ;
+        case 'Z': ;
+        case '_': ;
+        case 'a': ;
+        case 'b': ;
+        case 'd': ;
+        case 'e': ;
+        case 'g': ;
+        case 'h': ;
+        case 'j': ;
+        case 'k': ;
+        case 'l': ;
+        case 'm': ;
+        case 'n': ;
+        case 'o': ;
+        case 'p': ;
+        case 'q': ;
+        case 'r': ;
+        case 's': ;
+        case 't': ;
+        case 'u': ;
+        case 'w': ;
+        case 'x': ;
+        case 'y': ;
+        case 'z': ;
+            {
+            alt4=15;
+            }
+            break;
+        case '0': ;
+        case '1': ;
+        case '2': ;
+        case '3': ;
+        case '4': ;
+        case '5': ;
+        case '6': ;
+        case '7': ;
+        case '8': ;
+        case '9': ;
+            {
+            alt4=16;
+            }
+            break;
+        case '\t': ;
+        case '\n': ;
+        case '\r': ;
+        case ' ': ;
+            {
+            alt4=17;
+            }
+            break;
+
+    default: ;
+        NoViableAltException *nvae = [NoViableAltException newException:4 state:0 stream:input];
+        nvae.c = charLA4;
+        @throw nvae;
+
+    }
+
+    switch (alt4) {
+        case 1 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:10: T__7 // alt
+            {
+
+
+            [self mT__7]; 
+
+
+             
+            }
+            break;
+        case 2 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:15: T__8 // alt
+            {
+
+
+            [self mT__8]; 
+
+
+             
+            }
+            break;
+        case 3 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:20: T__9 // alt
+            {
+
+
+            [self mT__9]; 
+
+
+             
+            }
+            break;
+        case 4 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:25: T__10 // alt
+            {
+
+
+            [self mT__10]; 
+
+
+             
+            }
+            break;
+        case 5 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:31: T__11 // alt
+            {
+
+
+            [self mT__11]; 
+
+
+             
+            }
+            break;
+        case 6 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:37: T__12 // alt
+            {
+
+
+            [self mT__12]; 
+
+
+             
+            }
+            break;
+        case 7 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:43: T__13 // alt
+            {
+
+
+            [self mT__13]; 
+
+
+             
+            }
+            break;
+        case 8 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:49: T__14 // alt
+            {
+
+
+            [self mT__14]; 
+
+
+             
+            }
+            break;
+        case 9 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:55: T__15 // alt
+            {
+
+
+            [self mT__15]; 
+
+
+             
+            }
+            break;
+        case 10 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:61: T__16 // alt
+            {
+
+
+            [self mT__16]; 
+
+
+             
+            }
+            break;
+        case 11 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:67: T__17 // alt
+            {
+
+
+            [self mT__17]; 
+
+
+             
+            }
+            break;
+        case 12 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:73: T__18 // alt
+            {
+
+
+            [self mT__18]; 
+
+
+             
+            }
+            break;
+        case 13 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:79: T__19 // alt
+            {
+
+
+            [self mT__19]; 
+
+
+             
+            }
+            break;
+        case 14 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:85: T__20 // alt
+            {
+
+
+            [self mT__20]; 
+
+
+             
+            }
+            break;
+        case 15 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:91: ID // alt
+            {
+
+
+            [self mID]; 
+
+
+             
+            }
+            break;
+        case 16 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:94: INT // alt
+            {
+
+
+            [self mINT]; 
+
+
+             
+            }
+            break;
+        case 17 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:1:98: WS // alt
+            {
+
+
+            [self mWS]; 
+
+
+             
+            }
+            break;
+
+    }
+
+}
+
+@end /* end of SimpleCLexer implementation line 397 */
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.h b/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.h
new file mode 100644
index 0000000..108113c
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.h
@@ -0,0 +1,105 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g 2012-02-16 17:39:18
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* parserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Cyclic DFA interface start DFA2
+@interface DFA2 : DFA {
+}
++ (DFA2 *) newDFA2WithRecognizer:(BaseRecognizer *)theRecognizer;
+- initWithRecognizer:(BaseRecognizer *)recognizer;
+@end /* end of DFA2 interface  */
+
+#pragma mark Cyclic DFA interface end DFA2
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__7 7
+#define T__8 8
+#define T__9 9
+#define T__10 10
+#define T__11 11
+#define T__12 12
+#define T__13 13
+#define T__14 14
+#define T__15 15
+#define T__16 16
+#define T__17 17
+#define T__18 18
+#define T__19 19
+#define T__20 20
+#define ID 4
+#define INT 5
+#define WS 6
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+
+/* Interface grammar class */
+@interface SimpleCParser  : Parser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+
+
+/* ObjC start of actions.(actionScope).memVars */
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* ObjC end of memVars */
+
+DFA2 *dfa2;
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newSimpleCParser:(id<TokenStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* ObjC end of methodsDecl */
+
+- (void)program; 
+- (void)declaration; 
+- (void)variable; 
+- (void)declarator; 
+- (NSString *)functionHeader; 
+- (void)formalParameter; 
+- (void)type; 
+- (void)block; 
+- (void)stat; 
+- (void)forStat; 
+- (void)assignStat; 
+- (void)expr; 
+- (void)condExpr; 
+- (void)aexpr; 
+- (void)atom; 
+
+
+@end /* end of SimpleCParser interface */
+
diff --git a/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.m b/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.m
new file mode 100644
index 0000000..da605a8
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/LL-star/SimpleCParser.m
@@ -0,0 +1,1541 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g
+ *     -                            On : 2012-02-16 17:39:18
+ *     -                for the parser : SimpleCParserParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g 2012-02-16 17:39:18
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "SimpleCParser.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+#pragma mark Cyclic DFA implementation start DFA2
+
+@implementation DFA2
+const static NSInteger dfa2_eot[13] =
+    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
+const static NSInteger dfa2_eof[13] =
+    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
+const static unichar dfa2_min[13] =
+    {4,4,7,4,-1,4,11,8,-1,-1,4,4,8};
+const static unichar dfa2_max[13] =
+    {18,4,11,18,-1,4,19,10,-1,-1,18,4,10};
+const static NSInteger dfa2_accept[13] =
+    {-1,-1,-1,-1,1,-1,-1,-1,2,3,-1,-1,-1};
+const static NSInteger dfa2_special[13] =
+    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
+
+/** Used when there is no transition table entry for a particular state */
+#define dfa2_T_empty	    nil
+
+const static NSInteger dfa2_T0[] =
+{
+     3, -1, -1, -1, 4
+};
+const static NSInteger dfa2_T1[] =
+{
+     6, -1, 10
+};
+const static NSInteger dfa2_T2[] =
+{
+     8, -1, -1, -1, -1, -1, -1, -1, 9
+};
+const static NSInteger dfa2_T3[] =
+{
+     2
+};
+const static NSInteger dfa2_T4[] =
+{
+     1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1
+};
+const static NSInteger dfa2_T5[] =
+{
+     7
+};
+const static NSInteger dfa2_T6[] =
+{
+     11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 11, -1, 11, 11
+};
+const static NSInteger dfa2_T7[] =
+{
+     12
+};
+const static NSInteger dfa2_T8[] =
+{
+     5, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, 5, -1, 5, 5
+};
+
+
+const static NSInteger *dfa2_transition[] =
+{
+    dfa2_T4, dfa2_T3, dfa2_T0, dfa2_T8, nil, dfa2_T5, dfa2_T2, dfa2_T1, 
+    nil, nil, dfa2_T6, dfa2_T7, dfa2_T1
+};
+
+//const static NSInteger dfa2_transition[] = {};
+
++ (DFA2 *) newDFA2WithRecognizer:(BaseRecognizer *)aRecognizer
+{
+    return [[[DFA2 alloc] initWithRecognizer:aRecognizer] retain];
+}
+
+- (id) initWithRecognizer:(BaseRecognizer *) theRecognizer
+{
+    self = [super initWithRecognizer:theRecognizer];
+    if ( self != nil ) {
+        decisionNumber = 2;
+        eot = dfa2_eot;
+        eof = dfa2_eof;
+        min = dfa2_min;
+        max = dfa2_max;
+        accept = dfa2_accept;
+        special = dfa2_special;
+        transition = dfa2_transition;
+/*
+        if (!(transition = calloc(13, sizeof(void*)))) {
+            [self release];
+            return nil;
+        }
+        len = 13;
+        transition[0] = dfa2_transition4;
+        transition[1] = dfa2_transition3;
+        transition[2] = dfa2_transition0;
+        transition[3] = dfa2_transition8;
+
+        transition[4] = dfa2_transition5;
+        transition[5] = dfa2_transition2;
+        transition[6] = dfa2_transition1;
+
+
+        transition[7] = dfa2_transition6;
+        transition[8] = dfa2_transition7;
+        transition[9] = dfa2_transition1;
+ */
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    free(transition);
+    [super dealloc];
+}
+
+- (NSString *) description
+{
+    return @"20:1: declaration : ( variable | functionHeader ';' | functionHeader block );";
+}
+
+
+@end /* end DFA2 implementation */
+
+#pragma mark Cyclic DFA implementation end DFA2
+
+
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_declaration_in_program28;
+static const unsigned long long FOLLOW_declaration_in_program28_data[] = { 0x0000000000068012LL};
+static ANTLRBitSet *FOLLOW_variable_in_declaration50;
+static const unsigned long long FOLLOW_variable_in_declaration50_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_functionHeader_in_declaration60;
+static const unsigned long long FOLLOW_functionHeader_in_declaration60_data[] = { 0x0000000000000800LL};
+static ANTLRBitSet *FOLLOW_11_in_declaration62;
+static const unsigned long long FOLLOW_11_in_declaration62_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_functionHeader_in_declaration75;
+static const unsigned long long FOLLOW_functionHeader_in_declaration75_data[] = { 0x0000000000080000LL};
+static ANTLRBitSet *FOLLOW_block_in_declaration77;
+static const unsigned long long FOLLOW_block_in_declaration77_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_type_in_variable99;
+static const unsigned long long FOLLOW_type_in_variable99_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_declarator_in_variable101;
+static const unsigned long long FOLLOW_declarator_in_variable101_data[] = { 0x0000000000000800LL};
+static ANTLRBitSet *FOLLOW_11_in_variable103;
+static const unsigned long long FOLLOW_11_in_variable103_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_ID_in_declarator122;
+static const unsigned long long FOLLOW_ID_in_declarator122_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_type_in_functionHeader151;
+static const unsigned long long FOLLOW_type_in_functionHeader151_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_ID_in_functionHeader153;
+static const unsigned long long FOLLOW_ID_in_functionHeader153_data[] = { 0x0000000000000080LL};
+static ANTLRBitSet *FOLLOW_7_in_functionHeader155;
+static const unsigned long long FOLLOW_7_in_functionHeader155_data[] = { 0x0000000000068110LL};
+static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader159;
+static const unsigned long long FOLLOW_formalParameter_in_functionHeader159_data[] = { 0x0000000000000500LL};
+static ANTLRBitSet *FOLLOW_10_in_functionHeader163;
+static const unsigned long long FOLLOW_10_in_functionHeader163_data[] = { 0x0000000000068010LL};
+static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader165;
+static const unsigned long long FOLLOW_formalParameter_in_functionHeader165_data[] = { 0x0000000000000500LL};
+static ANTLRBitSet *FOLLOW_8_in_functionHeader173;
+static const unsigned long long FOLLOW_8_in_functionHeader173_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_type_in_formalParameter195;
+static const unsigned long long FOLLOW_type_in_formalParameter195_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_declarator_in_formalParameter197;
+static const unsigned long long FOLLOW_declarator_in_formalParameter197_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_19_in_block286;
+static const unsigned long long FOLLOW_19_in_block286_data[] = { 0x00000000001F88B0LL};
+static ANTLRBitSet *FOLLOW_variable_in_block300;
+static const unsigned long long FOLLOW_variable_in_block300_data[] = { 0x00000000001F88B0LL};
+static ANTLRBitSet *FOLLOW_stat_in_block315;
+static const unsigned long long FOLLOW_stat_in_block315_data[] = { 0x00000000001908B0LL};
+static ANTLRBitSet *FOLLOW_20_in_block326;
+static const unsigned long long FOLLOW_20_in_block326_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_forStat_in_stat338;
+static const unsigned long long FOLLOW_forStat_in_stat338_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_expr_in_stat346;
+static const unsigned long long FOLLOW_expr_in_stat346_data[] = { 0x0000000000000800LL};
+static ANTLRBitSet *FOLLOW_11_in_stat348;
+static const unsigned long long FOLLOW_11_in_stat348_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_block_in_stat362;
+static const unsigned long long FOLLOW_block_in_stat362_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_assignStat_in_stat370;
+static const unsigned long long FOLLOW_assignStat_in_stat370_data[] = { 0x0000000000000800LL};
+static ANTLRBitSet *FOLLOW_11_in_stat372;
+static const unsigned long long FOLLOW_11_in_stat372_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_11_in_stat380;
+static const unsigned long long FOLLOW_11_in_stat380_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_16_in_forStat399;
+static const unsigned long long FOLLOW_16_in_forStat399_data[] = { 0x0000000000000080LL};
+static ANTLRBitSet *FOLLOW_7_in_forStat401;
+static const unsigned long long FOLLOW_7_in_forStat401_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_assignStat_in_forStat403;
+static const unsigned long long FOLLOW_assignStat_in_forStat403_data[] = { 0x0000000000000800LL};
+static ANTLRBitSet *FOLLOW_11_in_forStat405;
+static const unsigned long long FOLLOW_11_in_forStat405_data[] = { 0x00000000000000B0LL};
+static ANTLRBitSet *FOLLOW_expr_in_forStat407;
+static const unsigned long long FOLLOW_expr_in_forStat407_data[] = { 0x0000000000000800LL};
+static ANTLRBitSet *FOLLOW_11_in_forStat409;
+static const unsigned long long FOLLOW_11_in_forStat409_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_assignStat_in_forStat411;
+static const unsigned long long FOLLOW_assignStat_in_forStat411_data[] = { 0x0000000000000100LL};
+static ANTLRBitSet *FOLLOW_8_in_forStat413;
+static const unsigned long long FOLLOW_8_in_forStat413_data[] = { 0x0000000000080000LL};
+static ANTLRBitSet *FOLLOW_block_in_forStat415;
+static const unsigned long long FOLLOW_block_in_forStat415_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_ID_in_assignStat442;
+static const unsigned long long FOLLOW_ID_in_assignStat442_data[] = { 0x0000000000002000LL};
+static ANTLRBitSet *FOLLOW_13_in_assignStat444;
+static const unsigned long long FOLLOW_13_in_assignStat444_data[] = { 0x00000000000000B0LL};
+static ANTLRBitSet *FOLLOW_expr_in_assignStat446;
+static const unsigned long long FOLLOW_expr_in_assignStat446_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_condExpr_in_expr468;
+static const unsigned long long FOLLOW_condExpr_in_expr468_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_aexpr_in_condExpr487;
+static const unsigned long long FOLLOW_aexpr_in_condExpr487_data[] = { 0x0000000000005002LL};
+static ANTLRBitSet *FOLLOW_set_in_condExpr491;
+static const unsigned long long FOLLOW_set_in_condExpr491_data[] = { 0x00000000000000B0LL};
+static ANTLRBitSet *FOLLOW_aexpr_in_condExpr499;
+static const unsigned long long FOLLOW_aexpr_in_condExpr499_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_atom_in_aexpr521;
+static const unsigned long long FOLLOW_atom_in_aexpr521_data[] = { 0x0000000000000202LL};
+static ANTLRBitSet *FOLLOW_9_in_aexpr525;
+static const unsigned long long FOLLOW_9_in_aexpr525_data[] = { 0x00000000000000B0LL};
+static ANTLRBitSet *FOLLOW_atom_in_aexpr527;
+static const unsigned long long FOLLOW_atom_in_aexpr527_data[] = { 0x0000000000000202LL};
+static ANTLRBitSet *FOLLOW_ID_in_atom547;
+static const unsigned long long FOLLOW_ID_in_atom547_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_INT_in_atom561;
+static const unsigned long long FOLLOW_INT_in_atom561_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_7_in_atom575;
+static const unsigned long long FOLLOW_7_in_atom575_data[] = { 0x00000000000000B0LL};
+static ANTLRBitSet *FOLLOW_expr_in_atom577;
+static const unsigned long long FOLLOW_expr_in_atom577_data[] = { 0x0000000000000100LL};
+static ANTLRBitSet *FOLLOW_8_in_atom579;
+static const unsigned long long FOLLOW_8_in_atom579_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+
+@implementation SimpleCParser  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_declaration_in_program28 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declaration_in_program28_data Count:(NSUInteger)1] retain];
+    FOLLOW_variable_in_declaration50 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_declaration50_data Count:(NSUInteger)1] retain];
+    FOLLOW_functionHeader_in_declaration60 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration60_data Count:(NSUInteger)1] retain];
+    FOLLOW_11_in_declaration62 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_declaration62_data Count:(NSUInteger)1] retain];
+    FOLLOW_functionHeader_in_declaration75 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration75_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_declaration77 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_declaration77_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_variable99 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_variable99_data Count:(NSUInteger)1] retain];
+    FOLLOW_declarator_in_variable101 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_variable101_data Count:(NSUInteger)1] retain];
+    FOLLOW_11_in_variable103 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_variable103_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_declarator122 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_declarator122_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_functionHeader151 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_functionHeader151_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_functionHeader153 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_functionHeader153_data Count:(NSUInteger)1] retain];
+    FOLLOW_7_in_functionHeader155 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_functionHeader155_data Count:(NSUInteger)1] retain];
+    FOLLOW_formalParameter_in_functionHeader159 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader159_data Count:(NSUInteger)1] retain];
+    FOLLOW_10_in_functionHeader163 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_10_in_functionHeader163_data Count:(NSUInteger)1] retain];
+    FOLLOW_formalParameter_in_functionHeader165 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader165_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_functionHeader173 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_functionHeader173_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_formalParameter195 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_formalParameter195_data Count:(NSUInteger)1] retain];
+    FOLLOW_declarator_in_formalParameter197 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_formalParameter197_data Count:(NSUInteger)1] retain];
+    FOLLOW_19_in_block286 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_19_in_block286_data Count:(NSUInteger)1] retain];
+    FOLLOW_variable_in_block300 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_block300_data Count:(NSUInteger)1] retain];
+    FOLLOW_stat_in_block315 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block315_data Count:(NSUInteger)1] retain];
+    FOLLOW_20_in_block326 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_20_in_block326_data Count:(NSUInteger)1] retain];
+    FOLLOW_forStat_in_stat338 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_forStat_in_stat338_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_stat346 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_stat346_data Count:(NSUInteger)1] retain];
+    FOLLOW_11_in_stat348 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_stat348_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_stat362 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat362_data Count:(NSUInteger)1] retain];
+    FOLLOW_assignStat_in_stat370 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_stat370_data Count:(NSUInteger)1] retain];
+    FOLLOW_11_in_stat372 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_stat372_data Count:(NSUInteger)1] retain];
+    FOLLOW_11_in_stat380 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_stat380_data Count:(NSUInteger)1] retain];
+    FOLLOW_16_in_forStat399 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_16_in_forStat399_data Count:(NSUInteger)1] retain];
+    FOLLOW_7_in_forStat401 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_forStat401_data Count:(NSUInteger)1] retain];
+    FOLLOW_assignStat_in_forStat403 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_forStat403_data Count:(NSUInteger)1] retain];
+    FOLLOW_11_in_forStat405 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_forStat405_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_forStat407 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat407_data Count:(NSUInteger)1] retain];
+    FOLLOW_11_in_forStat409 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_forStat409_data Count:(NSUInteger)1] retain];
+    FOLLOW_assignStat_in_forStat411 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_forStat411_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_forStat413 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_forStat413_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_forStat415 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_forStat415_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_assignStat442 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_assignStat442_data Count:(NSUInteger)1] retain];
+    FOLLOW_13_in_assignStat444 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_13_in_assignStat444_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_assignStat446 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_assignStat446_data Count:(NSUInteger)1] retain];
+    FOLLOW_condExpr_in_expr468 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_condExpr_in_expr468_data Count:(NSUInteger)1] retain];
+    FOLLOW_aexpr_in_condExpr487 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_aexpr_in_condExpr487_data Count:(NSUInteger)1] retain];
+    FOLLOW_set_in_condExpr491 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_set_in_condExpr491_data Count:(NSUInteger)1] retain];
+    FOLLOW_aexpr_in_condExpr499 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_aexpr_in_condExpr499_data Count:(NSUInteger)1] retain];
+    FOLLOW_atom_in_aexpr521 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_aexpr521_data Count:(NSUInteger)1] retain];
+    FOLLOW_9_in_aexpr525 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_aexpr525_data Count:(NSUInteger)1] retain];
+    FOLLOW_atom_in_aexpr527 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_aexpr527_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_atom547 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_atom547_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_atom561 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_atom561_data Count:(NSUInteger)1] retain];
+    FOLLOW_7_in_atom575 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_atom575_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_atom577 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_atom577_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_atom579 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_atom579_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"ID", @"INT", @"WS", @"'('", @"')'", @"'+'", @"','", @"';'", @"'<'", @"'='", 
+ @"'=='", @"'char'", @"'for'", @"'int'", @"'void'", @"'{'", @"'}'", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g"];
+}
+
++ (SimpleCParser *)newSimpleCParser:(id<TokenStream>)aStream
+{
+    return [[SimpleCParser alloc] initWithTokenStream:aStream];
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)aStream
+{
+    self = [super initWithTokenStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:15+1] retain]];
+    if ( self != nil ) {
+        dfa2 = [DFA2 newDFA2WithRecognizer:self];
+        /* start of actions-actionScope-init */
+        /* start of init */
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [dfa2 release];
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start program
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:7:1: program : ( declaration )+ ;
+ */
+- (void) program
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:8:5: ( ( declaration )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:8:9: ( declaration )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:8:9: ( declaration )+ // positiveClosureBlock
+        NSInteger cnt1 = 0;
+        do {
+            NSInteger alt1 = 2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( (LA1_0==ID||LA1_0==15||(LA1_0 >= 17 && LA1_0 <= 18)) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:8:9: declaration // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_declaration_in_program28];
+                    [self declaration];
+
+                    [self popFollow];
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt1 >= 1 )
+                        goto loop1;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:1];
+                    @throw eee;
+            }
+            cnt1++;
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end program */
+
+/*
+ * $ANTLR start declaration
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:20:1: declaration : ( variable | functionHeader ';' | functionHeader block );
+ */
+- (void) declaration
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+        NSString * functionHeader1 = nil ;
+
+        NSString * functionHeader2 = nil ;
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:21:5: ( variable | functionHeader ';' | functionHeader block ) //ruleblock
+        NSInteger alt2=3;
+        alt2 = [dfa2 predict:input];
+        switch (alt2) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:21:9: variable // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_variable_in_declaration50];
+                [self variable];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:22:9: functionHeader ';' // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_functionHeader_in_declaration60];
+                functionHeader1 = [self functionHeader];
+
+                [self popFollow];
+
+
+                 
+                [self match:input TokenType:11 Follow:FOLLOW_11_in_declaration62]; 
+                 
+
+                 NSLog(@"%@ is a declaration\n", functionHeader1
+                ); 
+
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:24:9: functionHeader block // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_functionHeader_in_declaration75];
+                functionHeader2 = [self functionHeader];
+
+                [self popFollow];
+
+
+                 
+                /* ruleRef */
+                [self pushFollow:FOLLOW_block_in_declaration77];
+                [self block];
+
+                [self popFollow];
+
+
+                 
+
+                 NSLog(@"%@ is a definition\n", functionHeader2
+                ); 
+
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end declaration */
+
+/*
+ * $ANTLR start variable
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:28:1: variable : type declarator ';' ;
+ */
+- (void) variable
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:29:5: ( type declarator ';' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:29:9: type declarator ';' // alt
+        {
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_type_in_variable99];
+        [self type];
+
+        [self popFollow];
+
+
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_declarator_in_variable101];
+        [self declarator];
+
+        [self popFollow];
+
+
+         
+        [self match:input TokenType:11 Follow:FOLLOW_11_in_variable103]; 
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end variable */
+
+/*
+ * $ANTLR start declarator
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:32:1: declarator : ID ;
+ */
+- (void) declarator
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:33:5: ( ID ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:33:9: ID // alt
+        {
+
+        [self match:input TokenType:ID Follow:FOLLOW_ID_in_declarator122]; 
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end declarator */
+
+/*
+ * $ANTLR start functionHeader
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:36:1: functionHeader returns [NSString *name] : type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' ;
+ */
+- (NSString *) functionHeader
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+    NSString * name = nil ;
+
+
+
+        name=nil; // for now you must init here rather than in 'returns'
+
+    @try {
+        /* ruleLabelDefs entry */
+        CommonToken *ID3 = nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:40:5: ( type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:40:9: type ID '(' ( formalParameter ( ',' formalParameter )* )? ')' // alt
+        {
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_type_in_functionHeader151];
+        [self type];
+
+        [self popFollow];
+
+
+         
+        ID3=(CommonToken *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_functionHeader153]; 
+         
+        [self match:input TokenType:7 Follow:FOLLOW_7_in_functionHeader155]; 
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:40:21: ( formalParameter ( ',' formalParameter )* )? // block
+        NSInteger alt4=2;
+        NSInteger LA4_0 = [input LA:1];
+
+        if ( (LA4_0==ID||LA4_0==15||(LA4_0 >= 17 && LA4_0 <= 18)) ) {
+            alt4=1;
+        }
+        switch (alt4) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:40:23: formalParameter ( ',' formalParameter )* // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_formalParameter_in_functionHeader159];
+                [self formalParameter];
+
+                [self popFollow];
+
+
+                 
+
+                do {
+                    NSInteger alt3=2;
+                    NSInteger LA3_0 = [input LA:1];
+                    if ( (LA3_0==10) ) {
+                        alt3=1;
+                    }
+
+
+                    switch (alt3) {
+                        case 1 : ;
+                            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:40:41: ',' formalParameter // alt
+                            {
+
+                            [self match:input TokenType:10 Follow:FOLLOW_10_in_functionHeader163]; 
+                             
+                            /* ruleRef */
+                            [self pushFollow:FOLLOW_formalParameter_in_functionHeader165];
+                            [self formalParameter];
+
+                            [self popFollow];
+
+
+                             
+                            }
+                            break;
+
+                        default :
+                            goto loop3;
+                    }
+                } while (YES);
+                loop3: ;
+
+                 
+                }
+                break;
+
+        }
+
+         
+        [self match:input TokenType:8 Follow:FOLLOW_8_in_functionHeader173]; 
+         
+
+        name =  (ID3!=nil?ID3.text:nil);
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return name;
+}
+/* $ANTLR end functionHeader */
+
+/*
+ * $ANTLR start formalParameter
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:44:1: formalParameter : type declarator ;
+ */
+- (void) formalParameter
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:45:5: ( type declarator ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:45:9: type declarator // alt
+        {
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_type_in_formalParameter195];
+        [self type];
+
+        [self popFollow];
+
+
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_declarator_in_formalParameter197];
+        [self declarator];
+
+        [self popFollow];
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end formalParameter */
+
+/*
+ * $ANTLR start type
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:48:1: type : ( 'int' | 'char' | 'void' | ID );
+ */
+- (void) type
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:49:5: ( 'int' | 'char' | 'void' | ID ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g: // alt
+        {
+
+        if ([input LA:1] == ID||[input LA:1] == 15||(([input LA:1] >= 17) && ([input LA:1] <= 18))) {
+            [input consume];
+            [state setIsErrorRecovery:NO];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            @throw mse;
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end type */
+
+/*
+ * $ANTLR start block
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:55:1: block : '{' ( variable )* ( stat )* '}' ;
+ */
+- (void) block
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:56:5: ( '{' ( variable )* ( stat )* '}' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:56:9: '{' ( variable )* ( stat )* '}' // alt
+        {
+
+        [self match:input TokenType:19 Follow:FOLLOW_19_in_block286]; 
+         
+
+        do {
+            NSInteger alt5=2;
+            NSInteger LA5_0 = [input LA:1];
+            if ( (LA5_0==ID) ) {
+                NSInteger LA5_2 = [input LA:2];
+                if ( (LA5_2==ID) ) {
+                    alt5=1;
+                }
+
+
+            }
+            else if ( (LA5_0==15||(LA5_0 >= 17 && LA5_0 <= 18)) ) {
+                alt5=1;
+            }
+
+
+            switch (alt5) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:57:13: variable // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_variable_in_block300];
+                    [self variable];
+
+                    [self popFollow];
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop5;
+            }
+        } while (YES);
+        loop5: ;
+
+         
+
+        do {
+            NSInteger alt6=2;
+            NSInteger LA6_0 = [input LA:1];
+            if ( ((LA6_0 >= ID && LA6_0 <= INT)||LA6_0==7||LA6_0==11||LA6_0==16||LA6_0==19) ) {
+                alt6=1;
+            }
+
+
+            switch (alt6) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:58:13: stat // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_stat_in_block315];
+                    [self stat];
+
+                    [self popFollow];
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop6;
+            }
+        } while (YES);
+        loop6: ;
+
+         
+        [self match:input TokenType:20 Follow:FOLLOW_20_in_block326]; 
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end block */
+
+/*
+ * $ANTLR start stat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:62:1: stat : ( forStat | expr ';' | block | assignStat ';' | ';' );
+ */
+- (void) stat
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:62:5: ( forStat | expr ';' | block | assignStat ';' | ';' ) //ruleblock
+        NSInteger alt7=5;
+        unichar charLA7 = [input LA:1];
+        switch (charLA7) {
+            case 16: ;
+                {
+                alt7=1;
+                }
+                break;
+            case ID: ;
+                {
+                NSInteger LA7_2 = [input LA:2];
+
+                if ( (LA7_2==13) ) {
+                    alt7=4;
+                }
+                else if ( (LA7_2==9||(LA7_2 >= 11 && LA7_2 <= 12)||LA7_2==14) ) {
+                    alt7=2;
+                }
+                else {
+                    NoViableAltException *nvae = [NoViableAltException newException:7 state:2 stream:input];
+                    nvae.c = LA7_2;
+                    @throw nvae;
+
+                }
+                }
+                break;
+            case INT: ;
+            case 7: ;
+                {
+                alt7=2;
+                }
+                break;
+            case 19: ;
+                {
+                alt7=3;
+                }
+                break;
+            case 11: ;
+                {
+                alt7=5;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:7 state:0 stream:input];
+            nvae.c = charLA7;
+            @throw nvae;
+
+        }
+
+        switch (alt7) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:62:7: forStat // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_forStat_in_stat338];
+                [self forStat];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:63:7: expr ';' // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_expr_in_stat346];
+                [self expr];
+
+                [self popFollow];
+
+
+                 
+                [self match:input TokenType:11 Follow:FOLLOW_11_in_stat348]; 
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:64:7: block // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_block_in_stat362];
+                [self block];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+            case 4 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:65:7: assignStat ';' // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_assignStat_in_stat370];
+                [self assignStat];
+
+                [self popFollow];
+
+
+                 
+                [self match:input TokenType:11 Follow:FOLLOW_11_in_stat372]; 
+                 
+                }
+                break;
+            case 5 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:66:7: ';' // alt
+                {
+
+                [self match:input TokenType:11 Follow:FOLLOW_11_in_stat380]; 
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end stat */
+
+/*
+ * $ANTLR start forStat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:69:1: forStat : 'for' '(' assignStat ';' expr ';' assignStat ')' block ;
+ */
+- (void) forStat
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:70:5: ( 'for' '(' assignStat ';' expr ';' assignStat ')' block ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:70:9: 'for' '(' assignStat ';' expr ';' assignStat ')' block // alt
+        {
+
+        [self match:input TokenType:16 Follow:FOLLOW_16_in_forStat399]; 
+         
+        [self match:input TokenType:7 Follow:FOLLOW_7_in_forStat401]; 
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_assignStat_in_forStat403];
+        [self assignStat];
+
+        [self popFollow];
+
+
+         
+        [self match:input TokenType:11 Follow:FOLLOW_11_in_forStat405]; 
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_expr_in_forStat407];
+        [self expr];
+
+        [self popFollow];
+
+
+         
+        [self match:input TokenType:11 Follow:FOLLOW_11_in_forStat409]; 
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_assignStat_in_forStat411];
+        [self assignStat];
+
+        [self popFollow];
+
+
+         
+        [self match:input TokenType:8 Follow:FOLLOW_8_in_forStat413]; 
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_block_in_forStat415];
+        [self block];
+
+        [self popFollow];
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end forStat */
+
+/*
+ * $ANTLR start assignStat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:73:1: assignStat : ID '=' expr ;
+ */
+- (void) assignStat
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:74:5: ( ID '=' expr ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:74:9: ID '=' expr // alt
+        {
+
+        [self match:input TokenType:ID Follow:FOLLOW_ID_in_assignStat442]; 
+         
+        [self match:input TokenType:13 Follow:FOLLOW_13_in_assignStat444]; 
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_expr_in_assignStat446];
+        [self expr];
+
+        [self popFollow];
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end assignStat */
+
+/*
+ * $ANTLR start expr
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:77:1: expr : condExpr ;
+ */
+- (void) expr
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:77:5: ( condExpr ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:77:9: condExpr // alt
+        {
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_condExpr_in_expr468];
+        [self condExpr];
+
+        [self popFollow];
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end expr */
+
+/*
+ * $ANTLR start condExpr
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:80:1: condExpr : aexpr ( ( '==' | '<' ) aexpr )? ;
+ */
+- (void) condExpr
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:81:5: ( aexpr ( ( '==' | '<' ) aexpr )? ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:81:9: aexpr ( ( '==' | '<' ) aexpr )? // alt
+        {
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_aexpr_in_condExpr487];
+        [self aexpr];
+
+        [self popFollow];
+
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:81:15: ( ( '==' | '<' ) aexpr )? // block
+        NSInteger alt8=2;
+        NSInteger LA8_0 = [input LA:1];
+
+        if ( (LA8_0==12||LA8_0==14) ) {
+            alt8=1;
+        }
+        switch (alt8) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:81:17: ( '==' | '<' ) aexpr // alt
+                {
+
+                if ([input LA:1] == 12||[input LA:1] == 14) {
+                    [input consume];
+                    [state setIsErrorRecovery:NO];
+                } else {
+                    MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                    @throw mse;
+                }
+
+                 
+                /* ruleRef */
+                [self pushFollow:FOLLOW_aexpr_in_condExpr499];
+                [self aexpr];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end condExpr */
+
+/*
+ * $ANTLR start aexpr
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:84:1: aexpr : atom ( '+' atom )* ;
+ */
+- (void) aexpr
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:85:5: ( atom ( '+' atom )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:85:9: atom ( '+' atom )* // alt
+        {
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_atom_in_aexpr521];
+        [self atom];
+
+        [self popFollow];
+
+
+         
+
+        do {
+            NSInteger alt9=2;
+            NSInteger LA9_0 = [input LA:1];
+            if ( (LA9_0==9) ) {
+                alt9=1;
+            }
+
+
+            switch (alt9) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:85:16: '+' atom // alt
+                    {
+
+                    [self match:input TokenType:9 Follow:FOLLOW_9_in_aexpr525]; 
+                     
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_atom_in_aexpr527];
+                    [self atom];
+
+                    [self popFollow];
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop9;
+            }
+        } while (YES);
+        loop9: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end aexpr */
+
+/*
+ * $ANTLR start atom
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:88:1: atom : ( ID | INT | '(' expr ')' );
+ */
+- (void) atom
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:89:5: ( ID | INT | '(' expr ')' ) //ruleblock
+        NSInteger alt10=3;
+        unichar charLA10 = [input LA:1];
+        switch (charLA10) {
+            case ID: ;
+                {
+                alt10=1;
+                }
+                break;
+            case INT: ;
+                {
+                alt10=2;
+                }
+                break;
+            case 7: ;
+                {
+                alt10=3;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:10 state:0 stream:input];
+            nvae.c = charLA10;
+            @throw nvae;
+
+        }
+
+        switch (alt10) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:89:7: ID // alt
+                {
+
+                [self match:input TokenType:ID Follow:FOLLOW_ID_in_atom547]; 
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:90:7: INT // alt
+                {
+
+                [self match:input TokenType:INT Follow:FOLLOW_INT_in_atom561]; 
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/SimpleC.g:91:7: '(' expr ')' // alt
+                {
+
+                [self match:input TokenType:7 Follow:FOLLOW_7_in_atom575]; 
+                 
+                /* ruleRef */
+                [self pushFollow:FOLLOW_expr_in_atom577];
+                [self expr];
+
+                [self popFollow];
+
+
+                 
+                [self match:input TokenType:8 Follow:FOLLOW_8_in_atom579]; 
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end atom */
+/* ObjC end rules */
+
+@end /* end of SimpleCParser implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleC__.gl b/runtime/ObjC/Framework/examples/LL-star/SimpleC__.gl
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/LL-star/SimpleC__.gl
rename to runtime/ObjC/Framework/examples/LL-star/SimpleC__.gl
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/input b/runtime/ObjC/Framework/examples/LL-star/input
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/LL-star/input
rename to runtime/ObjC/Framework/examples/LL-star/input
diff --git a/runtime/ObjC/Framework/examples/LL-star/main.m b/runtime/ObjC/Framework/examples/LL-star/main.m
new file mode 100644
index 0000000..2ab47d3
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/LL-star/main.m
@@ -0,0 +1,32 @@
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+#import "SimpleCLexer.h"
+#import "SimpleCParser.h"
+
+int main()
+{
+    NSError *error;
+	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+
+	NSString *string = [NSString stringWithContentsOfFile:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/LL-star/input" encoding:NSASCIIStringEncoding error:&error];
+	NSLog(@"input is: %@", string);
+	ANTLRStringStream *stream = [[ANTLRStringStream alloc] initWithStringNoCopy:string];
+	SimpleCLexer *lexer = [[SimpleCLexer alloc] initWithCharStream:stream];
+
+//	CommonToken *currentToken;
+//	while ((currentToken = [lexer nextToken]) && currentToken.type != TokenTypeEOF) {
+//		NSLog(@"%@", [currentToken toString]);
+//	}
+	
+	CommonTokenStream *tokens = [[CommonTokenStream alloc] initWithTokenSource:lexer];
+	SimpleCParser *parser = [[SimpleCParser alloc] initWithTokenStream:tokens];
+	[parser program];
+
+	[lexer release];
+	[stream release];
+	[tokens release];
+	[parser release];
+
+	[pool release];
+	return 0;
+}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/output b/runtime/ObjC/Framework/examples/LL-star/output
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/LL-star/output
rename to runtime/ObjC/Framework/examples/LL-star/output
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/LL-star/simplec.g b/runtime/ObjC/Framework/examples/LL-star/simplec.g
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/LL-star/simplec.g
rename to runtime/ObjC/Framework/examples/LL-star/simplec.g
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/combined/Combined.g b/runtime/ObjC/Framework/examples/combined/Combined.g
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/combined/Combined.g
rename to runtime/ObjC/Framework/examples/combined/Combined.g
diff --git a/runtime/ObjC/Framework/examples/combined/Combined.tokens b/runtime/ObjC/Framework/examples/combined/Combined.tokens
new file mode 100644
index 0000000..343392f
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/combined/Combined.tokens
@@ -0,0 +1,3 @@
+ID=4
+INT=5
+WS=6
diff --git a/runtime/ObjC/Framework/examples/combined/CombinedLexer.h b/runtime/ObjC/Framework/examples/combined/CombinedLexer.h
new file mode 100644
index 0000000..6c2b67a
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/combined/CombinedLexer.h
@@ -0,0 +1,39 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g 2012-02-16 17:33:49
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* Start cyclicDFAInterface */
+
+#pragma mark Rule return scopes Interface start
+#pragma mark Rule return scopes Interface end
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define ID 4
+#define INT 5
+#define WS 6
+/* interface lexer class */
+@interface CombinedLexer : Lexer { // line 283
+/* ObjC start of actions.lexer.memVars */
+/* ObjC end of actions.lexer.memVars */
+}
++ (void) initialize;
++ (CombinedLexer *)newCombinedLexerWithCharStream:(id<CharStream>)anInput;
+/* ObjC start actions.lexer.methodsDecl */
+/* ObjC end actions.lexer.methodsDecl */
+- (void) mID ; 
+- (void) mINT ; 
+- (void) mWS ; 
+- (void) mTokens ; 
+
+@end /* end of CombinedLexer interface */
+
diff --git a/runtime/ObjC/Framework/examples/combined/CombinedLexer.m b/runtime/ObjC/Framework/examples/combined/CombinedLexer.m
new file mode 100644
index 0000000..473e077
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/combined/CombinedLexer.m
@@ -0,0 +1,412 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g
+ *     -                            On : 2012-02-16 17:33:49
+ *     -                 for the lexer : CombinedLexerLexer
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g 2012-02-16 17:33:49
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "CombinedLexer.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+/** As per Terence: No returns for lexer rules! */
+@implementation CombinedLexer // line 330
+
++ (void) initialize
+{
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g"];
+}
+
++ (NSString *) tokenNameForType:(NSInteger)aTokenType
+{
+    return [[self getTokenNames] objectAtIndex:aTokenType];
+}
+
++ (CombinedLexer *)newCombinedLexerWithCharStream:(id<CharStream>)anInput
+{
+    return [[CombinedLexer alloc] initWithCharStream:anInput];
+}
+
+- (id) initWithCharStream:(id<CharStream>)anInput
+{
+    self = [super initWithCharStream:anInput State:[RecognizerSharedState newRecognizerSharedStateWithRuleLen:4+1]];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC Start of actions.lexer.methods */
+/* ObjC end of actions.lexer.methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+
+/* Start of Rules */
+// $ANTLR start "ID"
+- (void) mID
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = ID;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:14:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:14:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* // alt
+        {
+
+        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+            [input consume];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            [self recover:mse];
+            @throw mse;
+        }
+
+         
+
+        do {
+            NSInteger alt1=2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( ((LA1_0 >= '0' && LA1_0 <= '9')||(LA1_0 >= 'A' && LA1_0 <= 'Z')||LA1_0=='_'||(LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop1;
+            }
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "ID" */
+// $ANTLR start "INT"
+- (void) mINT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = INT;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:17:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:17:9: ( '0' .. '9' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:17:9: ( '0' .. '9' )+ // positiveClosureBlock
+        NSInteger cnt2 = 0;
+        do {
+            NSInteger alt2 = 2;
+            NSInteger LA2_0 = [input LA:1];
+            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
+                alt2=1;
+            }
+
+
+            switch (alt2) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt2 >= 1 )
+                        goto loop2;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:2];
+                    @throw eee;
+            }
+            cnt2++;
+        } while (YES);
+        loop2: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "INT" */
+// $ANTLR start "WS"
+- (void) mWS
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = WS;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:20:5: ( ( ' ' | '\\t' | '\\r' | '\\n' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:20:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:20:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // positiveClosureBlock
+        NSInteger cnt3 = 0;
+        do {
+            NSInteger alt3 = 2;
+            NSInteger LA3_0 = [input LA:1];
+            if ( ((LA3_0 >= '\t' && LA3_0 <= '\n')||LA3_0=='\r'||LA3_0==' ') ) {
+                alt3=1;
+            }
+
+
+            switch (alt3) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == '\r'||[input LA:1] == ' ') {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt3 >= 1 )
+                        goto loop3;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:3];
+                    @throw eee;
+            }
+            cnt3++;
+        } while (YES);
+        loop3: ;
+
+         
+
+         _channel=99; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "WS" */
+- (void) mTokens
+{
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:1:8: ( ID | INT | WS ) //ruleblock
+    NSInteger alt4=3;
+    unichar charLA4 = [input LA:1];
+    switch (charLA4) {
+        case 'A': ;
+        case 'B': ;
+        case 'C': ;
+        case 'D': ;
+        case 'E': ;
+        case 'F': ;
+        case 'G': ;
+        case 'H': ;
+        case 'I': ;
+        case 'J': ;
+        case 'K': ;
+        case 'L': ;
+        case 'M': ;
+        case 'N': ;
+        case 'O': ;
+        case 'P': ;
+        case 'Q': ;
+        case 'R': ;
+        case 'S': ;
+        case 'T': ;
+        case 'U': ;
+        case 'V': ;
+        case 'W': ;
+        case 'X': ;
+        case 'Y': ;
+        case 'Z': ;
+        case '_': ;
+        case 'a': ;
+        case 'b': ;
+        case 'c': ;
+        case 'd': ;
+        case 'e': ;
+        case 'f': ;
+        case 'g': ;
+        case 'h': ;
+        case 'i': ;
+        case 'j': ;
+        case 'k': ;
+        case 'l': ;
+        case 'm': ;
+        case 'n': ;
+        case 'o': ;
+        case 'p': ;
+        case 'q': ;
+        case 'r': ;
+        case 's': ;
+        case 't': ;
+        case 'u': ;
+        case 'v': ;
+        case 'w': ;
+        case 'x': ;
+        case 'y': ;
+        case 'z': ;
+            {
+            alt4=1;
+            }
+            break;
+        case '0': ;
+        case '1': ;
+        case '2': ;
+        case '3': ;
+        case '4': ;
+        case '5': ;
+        case '6': ;
+        case '7': ;
+        case '8': ;
+        case '9': ;
+            {
+            alt4=2;
+            }
+            break;
+        case '\t': ;
+        case '\n': ;
+        case '\r': ;
+        case ' ': ;
+            {
+            alt4=3;
+            }
+            break;
+
+    default: ;
+        NoViableAltException *nvae = [NoViableAltException newException:4 state:0 stream:input];
+        nvae.c = charLA4;
+        @throw nvae;
+
+    }
+
+    switch (alt4) {
+        case 1 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:1:10: ID // alt
+            {
+
+
+            [self mID]; 
+
+
+             
+            }
+            break;
+        case 2 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:1:13: INT // alt
+            {
+
+
+            [self mINT]; 
+
+
+             
+            }
+            break;
+        case 3 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:1:17: WS // alt
+            {
+
+
+            [self mWS]; 
+
+
+             
+            }
+            break;
+
+    }
+
+}
+
+@end /* end of CombinedLexer implementation line 397 */
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/examples/combined/CombinedParser.h b/runtime/ObjC/Framework/examples/combined/CombinedParser.h
new file mode 100644
index 0000000..6627eed
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/combined/CombinedParser.h
@@ -0,0 +1,68 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g 2012-02-16 17:33:49
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* parserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define ID 4
+#define INT 5
+#define WS 6
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+
+/* Interface grammar class */
+@interface CombinedParser  : Parser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+
+
+/* ObjC start of actions.(actionScope).memVars */
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* ObjC end of memVars */
+
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newCombinedParser:(id<TokenStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* ObjC end of methodsDecl */
+
+- (void)stat; 
+- (void)identifier; 
+
+
+@end /* end of CombinedParser interface */
+
diff --git a/runtime/ObjC/Framework/examples/combined/CombinedParser.m b/runtime/ObjC/Framework/examples/combined/CombinedParser.m
new file mode 100644
index 0000000..355a7e7
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/combined/CombinedParser.m
@@ -0,0 +1,202 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g
+ *     -                            On : 2012-02-16 17:33:49
+ *     -                for the parser : CombinedParserParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g 2012-02-16 17:33:49
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "CombinedParser.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_identifier_in_stat20;
+static const unsigned long long FOLLOW_identifier_in_stat20_data[] = { 0x0000000000000012LL};
+static ANTLRBitSet *FOLLOW_ID_in_identifier35;
+static const unsigned long long FOLLOW_ID_in_identifier35_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+
+@implementation CombinedParser  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_identifier_in_stat20 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_identifier_in_stat20_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_identifier35 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_identifier35_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"ID", @"INT", @"WS", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g"];
+}
+
++ (CombinedParser *)newCombinedParser:(id<TokenStream>)aStream
+{
+    return [[CombinedParser alloc] initWithTokenStream:aStream];
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)aStream
+{
+    self = [super initWithTokenStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:2+1] retain]];
+    if ( self != nil ) {
+        /* start of actions-actionScope-init */
+        /* start of init */
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start stat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:7:1: stat : ( identifier )+ ;
+ */
+- (void) stat
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:7:5: ( ( identifier )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:7:7: ( identifier )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:7:7: ( identifier )+ // positiveClosureBlock
+        NSInteger cnt1 = 0;
+        do {
+            NSInteger alt1 = 2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( (LA1_0==ID) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:7:7: identifier // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_identifier_in_stat20];
+                    [self identifier];
+
+                    [self popFollow];
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt1 >= 1 )
+                        goto loop1;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:1];
+                    @throw eee;
+            }
+            cnt1++;
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end stat */
+
+/*
+ * $ANTLR start identifier
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:9:1: identifier : ID ;
+ */
+- (void) identifier
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:10:5: ( ID ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/combined/Combined.g:10:7: ID // alt
+        {
+
+        [self match:input TokenType:ID Follow:FOLLOW_ID_in_identifier35]; 
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end identifier */
+/* ObjC end rules */
+
+@end /* end of CombinedParser implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/combined/Combined__.gl b/runtime/ObjC/Framework/examples/combined/Combined__.gl
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/combined/Combined__.gl
rename to runtime/ObjC/Framework/examples/combined/Combined__.gl
diff --git a/runtime/ObjC/Framework/examples/combined/main.m b/runtime/ObjC/Framework/examples/combined/main.m
new file mode 100644
index 0000000..fc48224
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/combined/main.m
@@ -0,0 +1,23 @@
+#import <Foundation/Foundation.h>
+#import "CombinedLexer.h"
+#import <ANTLR/ANTLR.h>
+
+int main(int argc, const char * argv[])
+{
+    NSLog(@"starting combined\n");
+	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+	NSString *string = @"xyyyyaxyyyyb";
+	NSLog(@"%@", string);
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:string];
+	CombinedLexer *lexer = [CombinedLexer newCombinedLexerWithCharStream:stream];
+	id<Token> currentToken;
+	while ((currentToken = [lexer nextToken]) && currentToken.type != TokenTypeEOF) {
+		NSLog(@"%@", currentToken);
+	}
+	[lexer release];
+	[stream release];
+	
+	[pool release];
+    NSLog(@"exiting combined\n");
+	return 0;
+}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g b/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g
rename to runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g
diff --git a/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.h b/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.h
new file mode 100644
index 0000000..cfb8eae
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.h
@@ -0,0 +1,87 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g 2012-02-16 17:34:08
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* Start cyclicDFAInterface */
+
+#pragma mark Rule return scopes Interface start
+#pragma mark Rule return scopes Interface end
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define ARG 4
+#define CALL 5
+#define CHAR 6
+#define CLASS 7
+#define COMMENT 8
+#define ESC 9
+#define FIELD 10
+#define ID 11
+#define IMPORT 12
+#define METHOD 13
+#define QID 14
+#define QIDStar 15
+#define RETURN 16
+#define SL_COMMENT 17
+#define STAT 18
+#define STRING 19
+#define TYPE 20
+#define WS 21
+/* interface lexer class */
+@interface Fuzzy : Lexer { // line 283
+SEL synpred9_FuzzySelector;
+SEL synpred2_FuzzySelector;
+SEL synpred7_FuzzySelector;
+SEL synpred4_FuzzySelector;
+SEL synpred8_FuzzySelector;
+SEL synpred6_FuzzySelector;
+SEL synpred5_FuzzySelector;
+SEL synpred3_FuzzySelector;
+SEL synpred1_FuzzySelector;
+/* ObjC start of actions.lexer.memVars */
+/* ObjC end of actions.lexer.memVars */
+}
++ (void) initialize;
++ (Fuzzy *)newFuzzyWithCharStream:(id<CharStream>)anInput;
+/* ObjC start actions.lexer.methodsDecl */
+/* ObjC end actions.lexer.methodsDecl */
+- (void) mIMPORT ; 
+- (void) mRETURN ; 
+- (void) mCLASS ; 
+- (void) mMETHOD ; 
+- (void) mFIELD ; 
+- (void) mSTAT ; 
+- (void) mCALL ; 
+- (void) mCOMMENT ; 
+- (void) mSL_COMMENT ; 
+- (void) mSTRING ; 
+- (void) mCHAR ; 
+- (void) mWS ; 
+- (void) mQID ; 
+- (void) mQIDStar ; 
+- (void) mTYPE ; 
+- (void) mARG ; 
+- (void) mID ; 
+- (void) mESC ; 
+- (void) mTokens ; 
+- (void) synpred1_Fuzzy_fragment ; 
+- (void) synpred2_Fuzzy_fragment ; 
+- (void) synpred3_Fuzzy_fragment ; 
+- (void) synpred4_Fuzzy_fragment ; 
+- (void) synpred5_Fuzzy_fragment ; 
+- (void) synpred6_Fuzzy_fragment ; 
+- (void) synpred7_Fuzzy_fragment ; 
+- (void) synpred8_Fuzzy_fragment ; 
+- (void) synpred9_Fuzzy_fragment ; 
+
+@end /* end of Fuzzy interface */
+
diff --git a/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.m b/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.m
new file mode 100644
index 0000000..151974c
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.m
@@ -0,0 +1,2575 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g
+ *     -                            On : 2012-02-16 17:34:08
+ *     -                 for the lexer : FuzzyLexer
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g 2012-02-16 17:34:08
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "Fuzzy.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+/** As per Terence: No returns for lexer rules! */
+@implementation Fuzzy // line 330
+
++ (void) initialize
+{
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g"];
+}
+
++ (NSString *) tokenNameForType:(NSInteger)aTokenType
+{
+    return [[self getTokenNames] objectAtIndex:aTokenType];
+}
+
++ (Fuzzy *)newFuzzyWithCharStream:(id<CharStream>)anInput
+{
+    return [[Fuzzy alloc] initWithCharStream:anInput];
+}
+
+- (id) initWithCharStream:(id<CharStream>)anInput
+{
+    self = [super initWithCharStream:anInput State:[RecognizerSharedState newRecognizerSharedStateWithRuleLen:30+1]];
+    if ( self != nil ) {
+        SEL synpred9_FuzzySelector = @selector(synpred9_Fuzzy_fragment);
+
+        SEL synpred2_FuzzySelector = @selector(synpred2_Fuzzy_fragment);
+
+        SEL synpred7_FuzzySelector = @selector(synpred7_Fuzzy_fragment);
+
+        SEL synpred4_FuzzySelector = @selector(synpred4_Fuzzy_fragment);
+
+        SEL synpred8_FuzzySelector = @selector(synpred8_Fuzzy_fragment);
+
+        SEL synpred6_FuzzySelector = @selector(synpred6_Fuzzy_fragment);
+
+        SEL synpred5_FuzzySelector = @selector(synpred5_Fuzzy_fragment);
+
+        SEL synpred3_FuzzySelector = @selector(synpred3_Fuzzy_fragment);
+
+        SEL synpred1_FuzzySelector = @selector(synpred1_Fuzzy_fragment);
+
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC Start of actions.lexer.methods */
+/* ObjC end of actions.lexer.methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+
+- (id<Token>) nextToken
+{
+    while (YES) {
+        if ( [input LA:1] == CharStreamEOF ) {
+            return [CommonToken eofToken];
+        }
+        state.token = nil;
+        state.channel = TokenChannelDefault;
+        state.tokenStartCharIndex = input.index;
+        state.tokenStartCharPositionInLine = [input getCharPositionInLine];
+        state.tokenStartLine = [input getLine];
+        state.text = nil;
+        @try {
+            NSInteger m = [input mark];
+            state.backtracking = 1; /* means we won't throw slow exception */
+            state.failed = NO;
+            [self mTokens];
+            state.backtracking = 0;
+            /* mTokens backtracks with synpred at backtracking==2
+               and we set the synpredgate to allow actions at level 1. */
+            if ( state.failed ) {
+                [input rewind:m];
+                [input consume]; /* advance one char and try again */
+            } else {
+                [self emit];
+                return state.token;
+            }
+        }
+        @catch (RecognitionException *re) {
+            // shouldn't happen in backtracking mode, but...
+            [self reportError:re];
+            [self recover:re];
+        }
+    }
+}
+
+- (void)memoize:(id<IntStream>)anInput
+      RuleIndex:(NSInteger)ruleIndex
+     StartIndex:(NSInteger)ruleStartIndex
+{
+    if ( state.backtracking > 1 ) [super memoize:anInput RuleIndex:ruleIndex StartIndex:ruleStartIndex];
+}
+
+- (BOOL)alreadyParsedRule:(id<IntStream>)anInput RuleIndex:(NSInteger)ruleIndex
+{
+    if ( state.backtracking > 1 ) return [super alreadyParsedRule:anInput RuleIndex:ruleIndex];
+    return NO;
+}
+/* Start of Rules */
+// $ANTLR start "IMPORT"
+- (void) mIMPORT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = IMPORT;
+        NSInteger _channel = TokenChannelDefault;
+        CommonToken *name=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:5:2: ( 'import' WS name= QIDStar ( WS )? ';' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:5:4: 'import' WS name= QIDStar ( WS )? ';' // alt
+        {
+
+
+        [self matchString:@"import"]; if ( state.failed ) return ;
+
+
+         
+
+        [self mWS]; if ( state.failed ) return ;
+
+
+         
+
+        NSInteger nameStart31 = input.index;
+        [self mQIDStar]; if ( state.failed ) return ;
+
+        name = [[CommonToken newToken:input Type:TokenTypeInvalid Channel:TokenChannelDefault Start:nameStart31 Stop:input.index-1] retain];
+        name.line = self.line;
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:5:29: ( WS )? // block
+        NSInteger alt1=2;
+        NSInteger LA1_0 = [input LA:1];
+
+        if ( ((LA1_0 >= '\t' && LA1_0 <= '\n')||LA1_0==' ') ) {
+            alt1=1;
+        }
+        switch (alt1) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:5:29: WS // alt
+                {
+
+
+                [self mWS]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+
+        [self matchChar:';']; if ( state.failed ) return ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "IMPORT" */
+// $ANTLR start "RETURN"
+- (void) mRETURN
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = RETURN;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:10:2: ( 'return' ( options {greedy=false; } : . )* ';' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:10:4: 'return' ( options {greedy=false; } : . )* ';' // alt
+        {
+
+
+        [self matchString:@"return"]; if ( state.failed ) return ;
+
+
+         
+
+        do {
+            NSInteger alt2=2;
+            NSInteger LA2_0 = [input LA:1];
+            if ( (LA2_0==';') ) {
+                alt2=2;
+            }
+            else if ( ((LA2_0 >= 0x0000 && LA2_0 <= ':')||(LA2_0 >= '<' && LA2_0 <= 0xFFFF)) ) {
+                alt2=1;
+            }
+
+
+            switch (alt2) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:10:38: . // alt
+                    {
+
+                    [self matchAny]; if ( state.failed ) return ;
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop2;
+            }
+        } while (YES);
+        loop2: ;
+
+         
+
+        [self matchChar:';']; if ( state.failed ) return ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "RETURN" */
+// $ANTLR start "CLASS"
+- (void) mCLASS
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = CLASS;
+        NSInteger _channel = TokenChannelDefault;
+        CommonToken *name=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:14:2: ( 'class' WS name= ID ( WS )? ( 'extends' WS QID ( WS )? )? ( 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:14:4: 'class' WS name= ID ( WS )? ( 'extends' WS QID ( WS )? )? ( 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' // alt
+        {
+
+
+        [self matchString:@"class"]; if ( state.failed ) return ;
+
+
+         
+
+        [self mWS]; if ( state.failed ) return ;
+
+
+         
+
+        NSInteger nameStart81 = input.index;
+        [self mID]; if ( state.failed ) return ;
+
+        name = [[CommonToken newToken:input Type:TokenTypeInvalid Channel:TokenChannelDefault Start:nameStart81 Stop:input.index-1] retain];
+        name.line = self.line;
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:14:23: ( WS )? // block
+        NSInteger alt3=2;
+        NSInteger LA3_0 = [input LA:1];
+
+        if ( ((LA3_0 >= '\t' && LA3_0 <= '\n')||LA3_0==' ') ) {
+            alt3=1;
+        }
+        switch (alt3) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:14:23: WS // alt
+                {
+
+
+                [self mWS]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:14:27: ( 'extends' WS QID ( WS )? )? // block
+        NSInteger alt5=2;
+        NSInteger LA5_0 = [input LA:1];
+
+        if ( (LA5_0=='e') ) {
+            alt5=1;
+        }
+        switch (alt5) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:14:28: 'extends' WS QID ( WS )? // alt
+                {
+
+
+                [self matchString:@"extends"]; if ( state.failed ) return ;
+
+
+                 
+
+                [self mWS]; if ( state.failed ) return ;
+
+
+                 
+
+                [self mQID]; if ( state.failed ) return ;
+
+
+                 
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:14:45: ( WS )? // block
+                NSInteger alt4=2;
+                NSInteger LA4_0 = [input LA:1];
+
+                if ( ((LA4_0 >= '\t' && LA4_0 <= '\n')||LA4_0==' ') ) {
+                    alt4=1;
+                }
+                switch (alt4) {
+                    case 1 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:14:45: WS // alt
+                        {
+
+
+                        [self mWS]; if ( state.failed ) return ;
+
+
+                         
+                        }
+                        break;
+
+                }
+
+                 
+                }
+                break;
+
+        }
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:15:3: ( 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? // block
+        NSInteger alt10=2;
+        NSInteger LA10_0 = [input LA:1];
+
+        if ( (LA10_0=='i') ) {
+            alt10=1;
+        }
+        switch (alt10) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:15:4: 'implements' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* // alt
+                {
+
+
+                [self matchString:@"implements"]; if ( state.failed ) return ;
+
+
+                 
+
+                [self mWS]; if ( state.failed ) return ;
+
+
+                 
+
+                [self mQID]; if ( state.failed ) return ;
+
+
+                 
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:15:24: ( WS )? // block
+                NSInteger alt6=2;
+                NSInteger LA6_0 = [input LA:1];
+
+                if ( ((LA6_0 >= '\t' && LA6_0 <= '\n')||LA6_0==' ') ) {
+                    alt6=1;
+                }
+                switch (alt6) {
+                    case 1 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:15:24: WS // alt
+                        {
+
+
+                        [self mWS]; if ( state.failed ) return ;
+
+
+                         
+                        }
+                        break;
+
+                }
+
+                 
+
+                do {
+                    NSInteger alt9=2;
+                    NSInteger LA9_0 = [input LA:1];
+                    if ( (LA9_0==',') ) {
+                        alt9=1;
+                    }
+
+
+                    switch (alt9) {
+                        case 1 : ;
+                            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:15:29: ',' ( WS )? QID ( WS )? // alt
+                            {
+
+
+                            [self matchChar:',']; if ( state.failed ) return ;
+
+                             
+                            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:15:33: ( WS )? // block
+                            NSInteger alt7=2;
+                            NSInteger LA7_0 = [input LA:1];
+
+                            if ( ((LA7_0 >= '\t' && LA7_0 <= '\n')||LA7_0==' ') ) {
+                                alt7=1;
+                            }
+                            switch (alt7) {
+                                case 1 : ;
+                                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:15:33: WS // alt
+                                    {
+
+
+                                    [self mWS]; if ( state.failed ) return ;
+
+
+                                     
+                                    }
+                                    break;
+
+                            }
+
+                             
+
+                            [self mQID]; if ( state.failed ) return ;
+
+
+                             
+                            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:15:41: ( WS )? // block
+                            NSInteger alt8=2;
+                            NSInteger LA8_0 = [input LA:1];
+
+                            if ( ((LA8_0 >= '\t' && LA8_0 <= '\n')||LA8_0==' ') ) {
+                                alt8=1;
+                            }
+                            switch (alt8) {
+                                case 1 : ;
+                                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:15:41: WS // alt
+                                    {
+
+
+                                    [self mWS]; if ( state.failed ) return ;
+
+
+                                     
+                                    }
+                                    break;
+
+                            }
+
+                             
+                            }
+                            break;
+
+                        default :
+                            goto loop9;
+                    }
+                } while (YES);
+                loop9: ;
+
+                 
+                }
+                break;
+
+        }
+
+         
+
+        [self matchChar:'{']; if ( state.failed ) return ;
+
+         
+
+        if ( state.backtracking == 1 ) {
+            NSLog(@"found class %@", (name!=nil?name.text:nil));
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "CLASS" */
+// $ANTLR start "METHOD"
+- (void) mMETHOD
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = METHOD;
+        NSInteger _channel = TokenChannelDefault;
+        CommonToken *name=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:5: ( TYPE WS name= ID ( WS )? '(' ( ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* )? ')' ( WS )? ( 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:9: TYPE WS name= ID ( WS )? '(' ( ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* )? ')' ( WS )? ( 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? '{' // alt
+        {
+
+
+        [self mTYPE]; if ( state.failed ) return ;
+
+
+         
+
+        [self mWS]; if ( state.failed ) return ;
+
+
+         
+
+        NSInteger nameStart158 = input.index;
+        [self mID]; if ( state.failed ) return ;
+
+        name = [[CommonToken newToken:input Type:TokenTypeInvalid Channel:TokenChannelDefault Start:nameStart158 Stop:input.index-1] retain];
+        name.line = self.line;
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:25: ( WS )? // block
+        NSInteger alt11=2;
+        NSInteger LA11_0 = [input LA:1];
+
+        if ( ((LA11_0 >= '\t' && LA11_0 <= '\n')||LA11_0==' ') ) {
+            alt11=1;
+        }
+        switch (alt11) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:25: WS // alt
+                {
+
+
+                [self mWS]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+
+        [self matchChar:'(']; if ( state.failed ) return ;
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:33: ( ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* )? // block
+        NSInteger alt16=2;
+        NSInteger LA16_0 = [input LA:1];
+
+        if ( ((LA16_0 >= 'A' && LA16_0 <= 'Z')||LA16_0=='_'||(LA16_0 >= 'a' && LA16_0 <= 'z')) ) {
+            alt16=1;
+        }
+        switch (alt16) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:35: ARG ( WS )? ( ',' ( WS )? ARG ( WS )? )* // alt
+                {
+
+
+                [self mARG]; if ( state.failed ) return ;
+
+
+                 
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:39: ( WS )? // block
+                NSInteger alt12=2;
+                NSInteger LA12_0 = [input LA:1];
+
+                if ( ((LA12_0 >= '\t' && LA12_0 <= '\n')||LA12_0==' ') ) {
+                    alt12=1;
+                }
+                switch (alt12) {
+                    case 1 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:39: WS // alt
+                        {
+
+
+                        [self mWS]; if ( state.failed ) return ;
+
+
+                         
+                        }
+                        break;
+
+                }
+
+                 
+
+                do {
+                    NSInteger alt15=2;
+                    NSInteger LA15_0 = [input LA:1];
+                    if ( (LA15_0==',') ) {
+                        alt15=1;
+                    }
+
+
+                    switch (alt15) {
+                        case 1 : ;
+                            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:44: ',' ( WS )? ARG ( WS )? // alt
+                            {
+
+
+                            [self matchChar:',']; if ( state.failed ) return ;
+
+                             
+                            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:48: ( WS )? // block
+                            NSInteger alt13=2;
+                            NSInteger LA13_0 = [input LA:1];
+
+                            if ( ((LA13_0 >= '\t' && LA13_0 <= '\n')||LA13_0==' ') ) {
+                                alt13=1;
+                            }
+                            switch (alt13) {
+                                case 1 : ;
+                                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:48: WS // alt
+                                    {
+
+
+                                    [self mWS]; if ( state.failed ) return ;
+
+
+                                     
+                                    }
+                                    break;
+
+                            }
+
+                             
+
+                            [self mARG]; if ( state.failed ) return ;
+
+
+                             
+                            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:56: ( WS )? // block
+                            NSInteger alt14=2;
+                            NSInteger LA14_0 = [input LA:1];
+
+                            if ( ((LA14_0 >= '\t' && LA14_0 <= '\n')||LA14_0==' ') ) {
+                                alt14=1;
+                            }
+                            switch (alt14) {
+                                case 1 : ;
+                                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:56: WS // alt
+                                    {
+
+
+                                    [self mWS]; if ( state.failed ) return ;
+
+
+                                     
+                                    }
+                                    break;
+
+                            }
+
+                             
+                            }
+                            break;
+
+                        default :
+                            goto loop15;
+                    }
+                } while (YES);
+                loop15: ;
+
+                 
+                }
+                break;
+
+        }
+
+         
+
+        [self matchChar:')']; if ( state.failed ) return ;
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:69: ( WS )? // block
+        NSInteger alt17=2;
+        NSInteger LA17_0 = [input LA:1];
+
+        if ( ((LA17_0 >= '\t' && LA17_0 <= '\n')||LA17_0==' ') ) {
+            alt17=1;
+        }
+        switch (alt17) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:20:69: WS // alt
+                {
+
+
+                [self mWS]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:21:8: ( 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* )? // block
+        NSInteger alt22=2;
+        NSInteger LA22_0 = [input LA:1];
+
+        if ( (LA22_0=='t') ) {
+            alt22=1;
+        }
+        switch (alt22) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:21:9: 'throws' WS QID ( WS )? ( ',' ( WS )? QID ( WS )? )* // alt
+                {
+
+
+                [self matchString:@"throws"]; if ( state.failed ) return ;
+
+
+                 
+
+                [self mWS]; if ( state.failed ) return ;
+
+
+                 
+
+                [self mQID]; if ( state.failed ) return ;
+
+
+                 
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:21:25: ( WS )? // block
+                NSInteger alt18=2;
+                NSInteger LA18_0 = [input LA:1];
+
+                if ( ((LA18_0 >= '\t' && LA18_0 <= '\n')||LA18_0==' ') ) {
+                    alt18=1;
+                }
+                switch (alt18) {
+                    case 1 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:21:25: WS // alt
+                        {
+
+
+                        [self mWS]; if ( state.failed ) return ;
+
+
+                         
+                        }
+                        break;
+
+                }
+
+                 
+
+                do {
+                    NSInteger alt21=2;
+                    NSInteger LA21_0 = [input LA:1];
+                    if ( (LA21_0==',') ) {
+                        alt21=1;
+                    }
+
+
+                    switch (alt21) {
+                        case 1 : ;
+                            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:21:30: ',' ( WS )? QID ( WS )? // alt
+                            {
+
+
+                            [self matchChar:',']; if ( state.failed ) return ;
+
+                             
+                            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:21:34: ( WS )? // block
+                            NSInteger alt19=2;
+                            NSInteger LA19_0 = [input LA:1];
+
+                            if ( ((LA19_0 >= '\t' && LA19_0 <= '\n')||LA19_0==' ') ) {
+                                alt19=1;
+                            }
+                            switch (alt19) {
+                                case 1 : ;
+                                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:21:34: WS // alt
+                                    {
+
+
+                                    [self mWS]; if ( state.failed ) return ;
+
+
+                                     
+                                    }
+                                    break;
+
+                            }
+
+                             
+
+                            [self mQID]; if ( state.failed ) return ;
+
+
+                             
+                            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:21:42: ( WS )? // block
+                            NSInteger alt20=2;
+                            NSInteger LA20_0 = [input LA:1];
+
+                            if ( ((LA20_0 >= '\t' && LA20_0 <= '\n')||LA20_0==' ') ) {
+                                alt20=1;
+                            }
+                            switch (alt20) {
+                                case 1 : ;
+                                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:21:42: WS // alt
+                                    {
+
+
+                                    [self mWS]; if ( state.failed ) return ;
+
+
+                                     
+                                    }
+                                    break;
+
+                            }
+
+                             
+                            }
+                            break;
+
+                        default :
+                            goto loop21;
+                    }
+                } while (YES);
+                loop21: ;
+
+                 
+                }
+                break;
+
+        }
+
+         
+
+        [self matchChar:'{']; if ( state.failed ) return ;
+
+         
+
+        if ( state.backtracking == 1 ) {
+            NSLog(@"found method %@", (name!=nil?name.text:nil));
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "METHOD" */
+// $ANTLR start "FIELD"
+- (void) mFIELD
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = FIELD;
+        NSInteger _channel = TokenChannelDefault;
+        CommonToken *name=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:26:5: ( TYPE WS name= ID ( '[]' )? ( WS )? ( ';' | '=' ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:26:9: TYPE WS name= ID ( '[]' )? ( WS )? ( ';' | '=' ) // alt
+        {
+
+
+        [self mTYPE]; if ( state.failed ) return ;
+
+
+         
+
+        [self mWS]; if ( state.failed ) return ;
+
+
+         
+
+        NSInteger nameStart261 = input.index;
+        [self mID]; if ( state.failed ) return ;
+
+        name = [[CommonToken newToken:input Type:TokenTypeInvalid Channel:TokenChannelDefault Start:nameStart261 Stop:input.index-1] retain];
+        name.line = self.line;
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:26:25: ( '[]' )? // block
+        NSInteger alt23=2;
+        NSInteger LA23_0 = [input LA:1];
+
+        if ( (LA23_0=='[') ) {
+            alt23=1;
+        }
+        switch (alt23) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:26:25: '[]' // alt
+                {
+
+
+                [self matchString:@"[]"]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:26:31: ( WS )? // block
+        NSInteger alt24=2;
+        NSInteger LA24_0 = [input LA:1];
+
+        if ( ((LA24_0 >= '\t' && LA24_0 <= '\n')||LA24_0==' ') ) {
+            alt24=1;
+        }
+        switch (alt24) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:26:31: WS // alt
+                {
+
+
+                [self mWS]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+        if ([input LA:1] == ';'||[input LA:1] == '=') {
+            [input consume];
+            state.failed = NO;
+
+        } else {
+            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            [self recover:mse];
+            @throw mse;
+        }
+
+         
+
+        if ( state.backtracking == 1 ) {
+            NSLog(@"found var %@", (name!=nil?name.text:nil));
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "FIELD" */
+// $ANTLR start "STAT"
+- (void) mSTAT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = STAT;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:30:5: ( ( 'if' | 'while' | 'switch' | 'for' ) ( WS )? '(' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:30:7: ( 'if' | 'while' | 'switch' | 'for' ) ( WS )? '(' // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:30:7: ( 'if' | 'while' | 'switch' | 'for' ) // block
+        NSInteger alt25=4;
+        unichar charLA25 = [input LA:1];
+        switch (charLA25) {
+            case 'i': ;
+                {
+                alt25=1;
+                }
+                break;
+            case 'w': ;
+                {
+                alt25=2;
+                }
+                break;
+            case 's': ;
+                {
+                alt25=3;
+                }
+                break;
+            case 'f': ;
+                {
+                alt25=4;
+                }
+                break;
+
+        default: ;
+            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+            NoViableAltException *nvae = [NoViableAltException newException:25 state:0 stream:input];
+            nvae.c = charLA25;
+            @throw nvae;
+
+        }
+
+        switch (alt25) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:30:8: 'if' // alt
+                {
+
+
+                [self matchString:@"if"]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:30:13: 'while' // alt
+                {
+
+
+                [self matchString:@"while"]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:30:21: 'switch' // alt
+                {
+
+
+                [self matchString:@"switch"]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+            case 4 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:30:30: 'for' // alt
+                {
+
+
+                [self matchString:@"for"]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:30:37: ( WS )? // block
+        NSInteger alt26=2;
+        NSInteger LA26_0 = [input LA:1];
+
+        if ( ((LA26_0 >= '\t' && LA26_0 <= '\n')||LA26_0==' ') ) {
+            alt26=1;
+        }
+        switch (alt26) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:30:37: WS // alt
+                {
+
+
+                [self mWS]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+
+        [self matchChar:'(']; if ( state.failed ) return ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "STAT" */
+// $ANTLR start "CALL"
+- (void) mCALL
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = CALL;
+        NSInteger _channel = TokenChannelDefault;
+        CommonToken *name=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:33:5: (name= QID ( WS )? '(' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:33:9: name= QID ( WS )? '(' // alt
+        {
+
+
+        NSInteger nameStart326 = input.index;
+        [self mQID]; if ( state.failed ) return ;
+
+        name = [[CommonToken newToken:input Type:TokenTypeInvalid Channel:TokenChannelDefault Start:nameStart326 Stop:input.index-1] retain];
+        name.line = self.line;
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:33:18: ( WS )? // block
+        NSInteger alt27=2;
+        NSInteger LA27_0 = [input LA:1];
+
+        if ( ((LA27_0 >= '\t' && LA27_0 <= '\n')||LA27_0==' ') ) {
+            alt27=1;
+        }
+        switch (alt27) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:33:18: WS // alt
+                {
+
+
+                [self mWS]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+
+        [self matchChar:'(']; if ( state.failed ) return ;
+
+         
+
+        if ( state.backtracking == 1 ) {
+            /*ignore if this/super */ NSLog(@"found call %@",(name!=nil?name.text:nil));
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "CALL" */
+// $ANTLR start "COMMENT"
+- (void) mCOMMENT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = COMMENT;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:38:5: ( '/*' ( options {greedy=false; } : . )* '*/' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:38:9: '/*' ( options {greedy=false; } : . )* '*/' // alt
+        {
+
+
+        [self matchString:@"/*"]; if ( state.failed ) return ;
+
+
+         
+
+        do {
+            NSInteger alt28=2;
+            NSInteger LA28_0 = [input LA:1];
+            if ( (LA28_0=='*') ) {
+                NSInteger LA28_1 = [input LA:2];
+                if ( (LA28_1=='/') ) {
+                    alt28=2;
+                }
+                else if ( ((LA28_1 >= 0x0000 && LA28_1 <= '.')||(LA28_1 >= '0' && LA28_1 <= 0xFFFF)) ) {
+                    alt28=1;
+                }
+
+
+            }
+            else if ( ((LA28_0 >= 0x0000 && LA28_0 <= ')')||(LA28_0 >= '+' && LA28_0 <= 0xFFFF)) ) {
+                alt28=1;
+            }
+
+
+            switch (alt28) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:38:41: . // alt
+                    {
+
+                    [self matchAny]; if ( state.failed ) return ;
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop28;
+            }
+        } while (YES);
+        loop28: ;
+
+         
+
+        [self matchString:@"*/"]; if ( state.failed ) return ;
+
+
+         
+
+        if ( state.backtracking == 1 ) {
+            NSLog(@"found comment %@", [self text]);
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "COMMENT" */
+// $ANTLR start "SL_COMMENT"
+- (void) mSL_COMMENT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = SL_COMMENT;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:43:5: ( '//' ( options {greedy=false; } : . )* '\\n' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:43:9: '//' ( options {greedy=false; } : . )* '\\n' // alt
+        {
+
+
+        [self matchString:@"//"]; if ( state.failed ) return ;
+
+
+         
+
+        do {
+            NSInteger alt29=2;
+            NSInteger LA29_0 = [input LA:1];
+            if ( (LA29_0=='\n') ) {
+                alt29=2;
+            }
+            else if ( ((LA29_0 >= 0x0000 && LA29_0 <= '\t')||(LA29_0 >= 0x000B && LA29_0 <= 0xFFFF)) ) {
+                alt29=1;
+            }
+
+
+            switch (alt29) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:43:41: . // alt
+                    {
+
+                    [self matchAny]; if ( state.failed ) return ;
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop29;
+            }
+        } while (YES);
+        loop29: ;
+
+         
+
+        [self matchChar:'\n']; if ( state.failed ) return ;
+
+         
+
+        if ( state.backtracking == 1 ) {
+            NSLog(@"found // comment %@", [self text]);
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "SL_COMMENT" */
+// $ANTLR start "STRING"
+- (void) mSTRING
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = STRING;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:48:2: ( '\"' ( options {greedy=false; } : ESC | . )* '\"' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:48:4: '\"' ( options {greedy=false; } : ESC | . )* '\"' // alt
+        {
+
+
+        [self matchChar:'"']; if ( state.failed ) return ;
+
+         
+
+        do {
+            NSInteger alt30=3;
+            NSInteger LA30_0 = [input LA:1];
+            if ( (LA30_0=='"') ) {
+                alt30=3;
+            }
+            else if ( (LA30_0=='\\') ) {
+                NSInteger LA30_2 = [input LA:2];
+                if ( (LA30_2=='"') ) {
+                    alt30=1;
+                }
+                else if ( (LA30_2=='\\') ) {
+                    alt30=1;
+                }
+                else if ( (LA30_2=='\'') ) {
+                    alt30=1;
+                }
+                else if ( ((LA30_2 >= 0x0000 && LA30_2 <= '!')||(LA30_2 >= '#' && LA30_2 <= '&')||(LA30_2 >= '(' && LA30_2 <= '[')||(LA30_2 >= ']' && LA30_2 <= 0xFFFF)) ) {
+                    alt30=2;
+                }
+
+
+            }
+            else if ( ((LA30_0 >= 0x0000 && LA30_0 <= '!')||(LA30_0 >= '#' && LA30_0 <= '[')||(LA30_0 >= ']' && LA30_0 <= 0xFFFF)) ) {
+                alt30=2;
+            }
+
+
+            switch (alt30) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:48:34: ESC // alt
+                    {
+
+
+                    [self mESC]; if ( state.failed ) return ;
+
+
+                     
+                    }
+                    break;
+                case 2 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:48:40: . // alt
+                    {
+
+                    [self matchAny]; if ( state.failed ) return ;
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop30;
+            }
+        } while (YES);
+        loop30: ;
+
+         
+
+        [self matchChar:'"']; if ( state.failed ) return ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "STRING" */
+// $ANTLR start "CHAR"
+- (void) mCHAR
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = CHAR;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:52:2: ( '\\'' ( options {greedy=false; } : ESC | . )* '\\'' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:52:4: '\\'' ( options {greedy=false; } : ESC | . )* '\\'' // alt
+        {
+
+
+        [self matchChar:'\'']; if ( state.failed ) return ;
+
+         
+
+        do {
+            NSInteger alt31=3;
+            NSInteger LA31_0 = [input LA:1];
+            if ( (LA31_0=='\'') ) {
+                alt31=3;
+            }
+            else if ( (LA31_0=='\\') ) {
+                NSInteger LA31_2 = [input LA:2];
+                if ( (LA31_2=='\'') ) {
+                    alt31=1;
+                }
+                else if ( (LA31_2=='\\') ) {
+                    alt31=1;
+                }
+                else if ( (LA31_2=='"') ) {
+                    alt31=1;
+                }
+                else if ( ((LA31_2 >= 0x0000 && LA31_2 <= '!')||(LA31_2 >= '#' && LA31_2 <= '&')||(LA31_2 >= '(' && LA31_2 <= '[')||(LA31_2 >= ']' && LA31_2 <= 0xFFFF)) ) {
+                    alt31=2;
+                }
+
+
+            }
+            else if ( ((LA31_0 >= 0x0000 && LA31_0 <= '&')||(LA31_0 >= '(' && LA31_0 <= '[')||(LA31_0 >= ']' && LA31_0 <= 0xFFFF)) ) {
+                alt31=2;
+            }
+
+
+            switch (alt31) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:52:35: ESC // alt
+                    {
+
+
+                    [self mESC]; if ( state.failed ) return ;
+
+
+                     
+                    }
+                    break;
+                case 2 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:52:41: . // alt
+                    {
+
+                    [self matchAny]; if ( state.failed ) return ;
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop31;
+            }
+        } while (YES);
+        loop31: ;
+
+         
+
+        [self matchChar:'\'']; if ( state.failed ) return ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "CHAR" */
+// $ANTLR start "WS"
+- (void) mWS
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = WS;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:55:5: ( ( ' ' | '\\t' | '\\n' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:55:9: ( ' ' | '\\t' | '\\n' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:55:9: ( ' ' | '\\t' | '\\n' )+ // positiveClosureBlock
+        NSInteger cnt32 = 0;
+        do {
+            NSInteger alt32 = 2;
+            NSInteger LA32_0 = [input LA:1];
+            if ( ((LA32_0 >= '\t' && LA32_0 <= '\n')||LA32_0==' ') ) {
+                alt32=1;
+            }
+
+
+            switch (alt32) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == ' ') {
+                        [input consume];
+                        state.failed = NO;
+
+                    } else {
+                        if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt32 >= 1 )
+                        goto loop32;
+                    if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:32];
+                    @throw eee;
+            }
+            cnt32++;
+        } while (YES);
+        loop32: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "WS" */
+// $ANTLR start "QID"
+- (void) mQID
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:59:5: ( ID ( '.' ID )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:59:7: ID ( '.' ID )* // alt
+        {
+
+
+        [self mID]; if ( state.failed ) return ;
+
+
+         
+
+        do {
+            NSInteger alt33=2;
+            NSInteger LA33_0 = [input LA:1];
+            if ( (LA33_0=='.') ) {
+                alt33=1;
+            }
+
+
+            switch (alt33) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:59:11: '.' ID // alt
+                    {
+
+
+                    [self matchChar:'.']; if ( state.failed ) return ;
+
+                     
+
+                    [self mID]; if ( state.failed ) return ;
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop33;
+            }
+        } while (YES);
+        loop33: ;
+
+         
+        }
+
+
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "QID" */
+// $ANTLR start "QIDStar"
+- (void) mQIDStar
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:68:2: ( ID ( '.' ID )* ( '.*' )? ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:68:4: ID ( '.' ID )* ( '.*' )? // alt
+        {
+
+
+        [self mID]; if ( state.failed ) return ;
+
+
+         
+
+        do {
+            NSInteger alt34=2;
+            NSInteger LA34_0 = [input LA:1];
+            if ( (LA34_0=='.') ) {
+                NSInteger LA34_1 = [input LA:2];
+                if ( ((LA34_1 >= 'A' && LA34_1 <= 'Z')||LA34_1=='_'||(LA34_1 >= 'a' && LA34_1 <= 'z')) ) {
+                    alt34=1;
+                }
+
+
+            }
+
+
+            switch (alt34) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:68:8: '.' ID // alt
+                    {
+
+
+                    [self matchChar:'.']; if ( state.failed ) return ;
+
+                     
+
+                    [self mID]; if ( state.failed ) return ;
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop34;
+            }
+        } while (YES);
+        loop34: ;
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:68:17: ( '.*' )? // block
+        NSInteger alt35=2;
+        NSInteger LA35_0 = [input LA:1];
+
+        if ( (LA35_0=='.') ) {
+            alt35=1;
+        }
+        switch (alt35) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:68:17: '.*' // alt
+                {
+
+
+                [self matchString:@".*"]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+        }
+
+
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "QIDStar" */
+// $ANTLR start "TYPE"
+- (void) mTYPE
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:72:5: ( QID ( '[]' )? ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:72:9: QID ( '[]' )? // alt
+        {
+
+
+        [self mQID]; if ( state.failed ) return ;
+
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:72:13: ( '[]' )? // block
+        NSInteger alt36=2;
+        NSInteger LA36_0 = [input LA:1];
+
+        if ( (LA36_0=='[') ) {
+            alt36=1;
+        }
+        switch (alt36) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:72:13: '[]' // alt
+                {
+
+
+                [self matchString:@"[]"]; if ( state.failed ) return ;
+
+
+                 
+                }
+                break;
+
+        }
+
+         
+        }
+
+
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "TYPE" */
+// $ANTLR start "ARG"
+- (void) mARG
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:76:5: ( TYPE WS ID ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:76:9: TYPE WS ID // alt
+        {
+
+
+        [self mTYPE]; if ( state.failed ) return ;
+
+
+         
+
+        [self mWS]; if ( state.failed ) return ;
+
+
+         
+
+        [self mID]; if ( state.failed ) return ;
+
+
+         
+        }
+
+
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "ARG" */
+// $ANTLR start "ID"
+- (void) mID
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:80:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:80:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '_' | '0' .. '9' )* // alt
+        {
+
+        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+            [input consume];
+            state.failed = NO;
+
+        } else {
+            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            [self recover:mse];
+            @throw mse;
+        }
+
+         
+
+        do {
+            NSInteger alt37=2;
+            NSInteger LA37_0 = [input LA:1];
+            if ( ((LA37_0 >= '0' && LA37_0 <= '9')||(LA37_0 >= 'A' && LA37_0 <= 'Z')||LA37_0=='_'||(LA37_0 >= 'a' && LA37_0 <= 'z')) ) {
+                alt37=1;
+            }
+
+
+            switch (alt37) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+                        [input consume];
+                        state.failed = NO;
+
+                    } else {
+                        if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop37;
+            }
+        } while (YES);
+        loop37: ;
+
+         
+        }
+
+
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "ID" */
+// $ANTLR start "ESC"
+- (void) mESC
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:84:5: ( '\\\\' ( '\"' | '\\'' | '\\\\' ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:84:7: '\\\\' ( '\"' | '\\'' | '\\\\' ) // alt
+        {
+
+
+        [self matchChar:'\\']; if ( state.failed ) return ;
+
+         
+        if ([input LA:1] == '"'||[input LA:1] == '\''||[input LA:1] == '\\') {
+            [input consume];
+            state.failed = NO;
+
+        } else {
+            if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            [self recover:mse];
+            @throw mse;
+        }
+
+         
+        }
+
+
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "ESC" */
+- (void) mTokens
+{
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:39: ( IMPORT | RETURN | CLASS | METHOD | FIELD | STAT | CALL | COMMENT | SL_COMMENT | STRING | CHAR | WS ) //ruleblock
+    NSInteger alt38=12;
+    unichar charLA38 = [input LA:1];
+    switch (charLA38) {
+        case 'i': ;
+            {
+            NSInteger LA38_1 = [input LA:2];
+
+            if ( ([self evaluateSyntacticPredicate:@selector(synpred1_Fuzzy_fragment)]) ) {
+                alt38=1;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) ) {
+                alt38=4;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) ) {
+                alt38=5;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred6_Fuzzy_fragment)]) ) {
+                alt38=6;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) ) {
+                alt38=7;
+            }
+            else {
+                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+                NoViableAltException *nvae = [NoViableAltException newException:38 state:1 stream:input];
+                nvae.c = LA38_1;
+                @throw nvae;
+
+            }
+            }
+            break;
+        case 'r': ;
+            {
+            NSInteger LA38_7 = [input LA:2];
+
+            if ( ([self evaluateSyntacticPredicate:@selector(synpred2_Fuzzy_fragment)]) ) {
+                alt38=2;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) ) {
+                alt38=4;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) ) {
+                alt38=5;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) ) {
+                alt38=7;
+            }
+            else {
+                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+                NoViableAltException *nvae = [NoViableAltException newException:38 state:7 stream:input];
+                nvae.c = LA38_7;
+                @throw nvae;
+
+            }
+            }
+            break;
+        case 'c': ;
+            {
+            NSInteger LA38_9 = [input LA:2];
+
+            if ( ([self evaluateSyntacticPredicate:@selector(synpred3_Fuzzy_fragment)]) ) {
+                alt38=3;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) ) {
+                alt38=4;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) ) {
+                alt38=5;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) ) {
+                alt38=7;
+            }
+            else {
+                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+                NoViableAltException *nvae = [NoViableAltException newException:38 state:9 stream:input];
+                nvae.c = LA38_9;
+                @throw nvae;
+
+            }
+            }
+            break;
+        case 'f': ;
+        case 's': ;
+        case 'w': ;
+            {
+            NSInteger LA38_11 = [input LA:2];
+
+            if ( ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) ) {
+                alt38=4;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) ) {
+                alt38=5;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred6_Fuzzy_fragment)]) ) {
+                alt38=6;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) ) {
+                alt38=7;
+            }
+            else {
+                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+                NoViableAltException *nvae = [NoViableAltException newException:38 state:11 stream:input];
+                nvae.c = LA38_11;
+                @throw nvae;
+
+            }
+            }
+            break;
+        case 'A': ;
+        case 'B': ;
+        case 'C': ;
+        case 'D': ;
+        case 'E': ;
+        case 'F': ;
+        case 'G': ;
+        case 'H': ;
+        case 'I': ;
+        case 'J': ;
+        case 'K': ;
+        case 'L': ;
+        case 'M': ;
+        case 'N': ;
+        case 'O': ;
+        case 'P': ;
+        case 'Q': ;
+        case 'R': ;
+        case 'S': ;
+        case 'T': ;
+        case 'U': ;
+        case 'V': ;
+        case 'W': ;
+        case 'X': ;
+        case 'Y': ;
+        case 'Z': ;
+        case '_': ;
+        case 'a': ;
+        case 'b': ;
+        case 'd': ;
+        case 'e': ;
+        case 'g': ;
+        case 'h': ;
+        case 'j': ;
+        case 'k': ;
+        case 'l': ;
+        case 'm': ;
+        case 'n': ;
+        case 'o': ;
+        case 'p': ;
+        case 'q': ;
+        case 't': ;
+        case 'u': ;
+        case 'v': ;
+        case 'x': ;
+        case 'y': ;
+        case 'z': ;
+            {
+            NSInteger LA38_12 = [input LA:2];
+
+            if ( ([self evaluateSyntacticPredicate:@selector(synpred4_Fuzzy_fragment)]) ) {
+                alt38=4;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred5_Fuzzy_fragment)]) ) {
+                alt38=5;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred7_Fuzzy_fragment)]) ) {
+                alt38=7;
+            }
+            else {
+                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+                NoViableAltException *nvae = [NoViableAltException newException:38 state:12 stream:input];
+                nvae.c = LA38_12;
+                @throw nvae;
+
+            }
+            }
+            break;
+        case '/': ;
+            {
+            NSInteger LA38_13 = [input LA:2];
+
+            if ( ([self evaluateSyntacticPredicate:@selector(synpred8_Fuzzy_fragment)]) ) {
+                alt38=8;
+            }
+            else if ( ([self evaluateSyntacticPredicate:@selector(synpred9_Fuzzy_fragment)]) ) {
+                alt38=9;
+            }
+            else {
+                if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+                NoViableAltException *nvae = [NoViableAltException newException:38 state:13 stream:input];
+                nvae.c = LA38_13;
+                @throw nvae;
+
+            }
+            }
+            break;
+        case '"': ;
+            {
+            alt38=10;
+            }
+            break;
+        case '\'': ;
+            {
+            alt38=11;
+            }
+            break;
+        case '\t': ;
+        case '\n': ;
+        case ' ': ;
+            {
+            alt38=12;
+            }
+            break;
+
+    default: ;
+        if ( state.backtracking > 0 ) { state.failed = YES; return ; }
+
+        NoViableAltException *nvae = [NoViableAltException newException:38 state:0 stream:input];
+        nvae.c = charLA38;
+        @throw nvae;
+
+    }
+
+    switch (alt38) {
+        case 1 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:41: IMPORT // alt
+            {
+
+
+            [self mIMPORT]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+        case 2 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:48: RETURN // alt
+            {
+
+
+            [self mRETURN]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+        case 3 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:55: CLASS // alt
+            {
+
+
+            [self mCLASS]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+        case 4 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:61: METHOD // alt
+            {
+
+
+            [self mMETHOD]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+        case 5 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:68: FIELD // alt
+            {
+
+
+            [self mFIELD]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+        case 6 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:74: STAT // alt
+            {
+
+
+            [self mSTAT]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+        case 7 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:79: CALL // alt
+            {
+
+
+            [self mCALL]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+        case 8 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:84: COMMENT // alt
+            {
+
+
+            [self mCOMMENT]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+        case 9 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:92: SL_COMMENT // alt
+            {
+
+
+            [self mSL_COMMENT]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+        case 10 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:103: STRING // alt
+            {
+
+
+            [self mSTRING]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+        case 11 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:110: CHAR // alt
+            {
+
+
+            [self mCHAR]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+        case 12 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:115: WS // alt
+            {
+
+
+            [self mWS]; if ( state.failed ) return ;
+
+
+             
+            }
+            break;
+
+    }
+
+}
+// $ANTLR start synpred1_Fuzzy_fragment
+- (void) synpred1_Fuzzy_fragment
+{
+    /* ruleLabelDefs entry */
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:41: ( IMPORT ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:41: IMPORT // alt
+    {
+
+
+    [self mIMPORT]; if ( state.failed ) return ;
+
+
+     
+    }
+
+} // $ANTLR end synpred1_Fuzzy_fragment
+// $ANTLR start synpred2_Fuzzy_fragment
+- (void) synpred2_Fuzzy_fragment
+{
+    /* ruleLabelDefs entry */
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:48: ( RETURN ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:48: RETURN // alt
+    {
+
+
+    [self mRETURN]; if ( state.failed ) return ;
+
+
+     
+    }
+
+} // $ANTLR end synpred2_Fuzzy_fragment
+// $ANTLR start synpred3_Fuzzy_fragment
+- (void) synpred3_Fuzzy_fragment
+{
+    /* ruleLabelDefs entry */
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:55: ( CLASS ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:55: CLASS // alt
+    {
+
+
+    [self mCLASS]; if ( state.failed ) return ;
+
+
+     
+    }
+
+} // $ANTLR end synpred3_Fuzzy_fragment
+// $ANTLR start synpred4_Fuzzy_fragment
+- (void) synpred4_Fuzzy_fragment
+{
+    /* ruleLabelDefs entry */
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:61: ( METHOD ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:61: METHOD // alt
+    {
+
+
+    [self mMETHOD]; if ( state.failed ) return ;
+
+
+     
+    }
+
+} // $ANTLR end synpred4_Fuzzy_fragment
+// $ANTLR start synpred5_Fuzzy_fragment
+- (void) synpred5_Fuzzy_fragment
+{
+    /* ruleLabelDefs entry */
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:68: ( FIELD ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:68: FIELD // alt
+    {
+
+
+    [self mFIELD]; if ( state.failed ) return ;
+
+
+     
+    }
+
+} // $ANTLR end synpred5_Fuzzy_fragment
+// $ANTLR start synpred6_Fuzzy_fragment
+- (void) synpred6_Fuzzy_fragment
+{
+    /* ruleLabelDefs entry */
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:74: ( STAT ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:74: STAT // alt
+    {
+
+
+    [self mSTAT]; if ( state.failed ) return ;
+
+
+     
+    }
+
+} // $ANTLR end synpred6_Fuzzy_fragment
+// $ANTLR start synpred7_Fuzzy_fragment
+- (void) synpred7_Fuzzy_fragment
+{
+    /* ruleLabelDefs entry */
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:79: ( CALL ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:79: CALL // alt
+    {
+
+
+    [self mCALL]; if ( state.failed ) return ;
+
+
+     
+    }
+
+} // $ANTLR end synpred7_Fuzzy_fragment
+// $ANTLR start synpred8_Fuzzy_fragment
+- (void) synpred8_Fuzzy_fragment
+{
+    /* ruleLabelDefs entry */
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:84: ( COMMENT ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:84: COMMENT // alt
+    {
+
+
+    [self mCOMMENT]; if ( state.failed ) return ;
+
+
+     
+    }
+
+} // $ANTLR end synpred8_Fuzzy_fragment
+// $ANTLR start synpred9_Fuzzy_fragment
+- (void) synpred9_Fuzzy_fragment
+{
+    /* ruleLabelDefs entry */
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:92: ( SL_COMMENT ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.g:1:92: SL_COMMENT // alt
+    {
+
+
+    [self mSL_COMMENT]; if ( state.failed ) return ;
+
+
+     
+    }
+
+} // $ANTLR end synpred9_Fuzzy_fragment
+
+@end /* end of Fuzzy implementation line 397 */
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.tokens b/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.tokens
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/Fuzzy.tokens
rename to runtime/ObjC/Framework/examples/fuzzy/Fuzzy.tokens
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/FuzzyLexer.h b/runtime/ObjC/Framework/examples/fuzzy/FuzzyLexer.h
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/FuzzyLexer.h
rename to runtime/ObjC/Framework/examples/fuzzy/FuzzyLexer.h
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/input b/runtime/ObjC/Framework/examples/fuzzy/input
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/fuzzy/input
rename to runtime/ObjC/Framework/examples/fuzzy/input
diff --git a/runtime/ObjC/Framework/examples/fuzzy/main.m b/runtime/ObjC/Framework/examples/fuzzy/main.m
new file mode 100644
index 0000000..aff7fc1
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/fuzzy/main.m
@@ -0,0 +1,26 @@
+#import <Foundation/Foundation.h>
+#import "Fuzzy.h"
+#import <ANTLR/ANTLR.h>
+
+int main(int argc, const char * argv[])
+{
+    NSError *error;
+	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+	NSString *input = [NSString stringWithContentsOfFile:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/fuzzy/input"  encoding:NSASCIIStringEncoding error:&error];
+	NSLog(@"%@", input);
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:input];
+	Fuzzy *lex = [Fuzzy newFuzzyWithCharStream:stream];
+	CommonTokenStream *tokens = [CommonTokenStream newCommonTokenStreamWithTokenSource:lex];
+	NSLog( [tokens toString] );
+
+	id<Token> currentToken;
+	while ((currentToken = [lex nextToken]) && currentToken.type != TokenTypeEOF) {
+		NSLog(@"### %@", [currentToken toString]);
+	}
+
+	[lex release];
+	[stream release];
+	
+	[pool release];
+	return 0;
+}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/T.g b/runtime/ObjC/Framework/examples/hoistedPredicates/T.g
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/T.g
rename to runtime/ObjC/Framework/examples/hoistedPredicates/T.g
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/T.tokens b/runtime/ObjC/Framework/examples/hoistedPredicates/T.tokens
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/T.tokens
rename to runtime/ObjC/Framework/examples/hoistedPredicates/T.tokens
diff --git a/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.h b/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.h
new file mode 100644
index 0000000..5b93d1d
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.h
@@ -0,0 +1,41 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g 2012-02-16 17:34:26
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* Start cyclicDFAInterface */
+
+#pragma mark Rule return scopes Interface start
+#pragma mark Rule return scopes Interface end
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__7 7
+#define ID 4
+#define INT 5
+#define WS 6
+/* interface lexer class */
+@interface TLexer : Lexer { // line 283
+/* ObjC start of actions.lexer.memVars */
+/* ObjC end of actions.lexer.memVars */
+}
++ (void) initialize;
++ (TLexer *)newTLexerWithCharStream:(id<CharStream>)anInput;
+/* ObjC start actions.lexer.methodsDecl */
+/* ObjC end actions.lexer.methodsDecl */
+- (void) mT__7 ; 
+- (void) mID ; 
+- (void) mINT ; 
+- (void) mWS ; 
+- (void) mTokens ; 
+
+@end /* end of TLexer interface */
+
diff --git a/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.m b/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.m
new file mode 100644
index 0000000..a19b3df
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/hoistedPredicates/TLexer.m
@@ -0,0 +1,490 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g
+ *     -                            On : 2012-02-16 17:34:26
+ *     -                 for the lexer : TLexerLexer
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g 2012-02-16 17:34:26
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "TLexer.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+/** As per Terence: No returns for lexer rules! */
+@implementation TLexer // line 330
+
++ (void) initialize
+{
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g"];
+}
+
++ (NSString *) tokenNameForType:(NSInteger)aTokenType
+{
+    return [[self getTokenNames] objectAtIndex:aTokenType];
+}
+
++ (TLexer *)newTLexerWithCharStream:(id<CharStream>)anInput
+{
+    return [[TLexer alloc] initWithCharStream:anInput];
+}
+
+- (id) initWithCharStream:(id<CharStream>)anInput
+{
+    self = [super initWithCharStream:anInput State:[RecognizerSharedState newRecognizerSharedStateWithRuleLen:5+1]];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC Start of actions.lexer.methods */
+/* ObjC end of actions.lexer.methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+
+/* Start of Rules */
+// $ANTLR start "T__7"
+- (void) mT__7
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__7;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:7:6: ( 'enum' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:7:8: 'enum' // alt
+        {
+
+
+        [self matchString:@"enum"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__7" */
+// $ANTLR start "ID"
+- (void) mID
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = ID;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:37:5: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:37:9: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* // alt
+        {
+
+        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+            [input consume];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            [self recover:mse];
+            @throw mse;
+        }
+
+         
+
+        do {
+            NSInteger alt1=2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( ((LA1_0 >= '0' && LA1_0 <= '9')||(LA1_0 >= 'A' && LA1_0 <= 'Z')||LA1_0=='_'||(LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop1;
+            }
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "ID" */
+// $ANTLR start "INT"
+- (void) mINT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = INT;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:40:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:40:7: ( '0' .. '9' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:40:7: ( '0' .. '9' )+ // positiveClosureBlock
+        NSInteger cnt2 = 0;
+        do {
+            NSInteger alt2 = 2;
+            NSInteger LA2_0 = [input LA:1];
+            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
+                alt2=1;
+            }
+
+
+            switch (alt2) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt2 >= 1 )
+                        goto loop2;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:2];
+                    @throw eee;
+            }
+            cnt2++;
+        } while (YES);
+        loop2: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "INT" */
+// $ANTLR start "WS"
+- (void) mWS
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = WS;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:43:5: ( ( ' ' | '\\t' | '\\r' | '\\n' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:43:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:43:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // positiveClosureBlock
+        NSInteger cnt3 = 0;
+        do {
+            NSInteger alt3 = 2;
+            NSInteger LA3_0 = [input LA:1];
+            if ( ((LA3_0 >= '\t' && LA3_0 <= '\n')||LA3_0=='\r'||LA3_0==' ') ) {
+                alt3=1;
+            }
+
+
+            switch (alt3) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == '\r'||[input LA:1] == ' ') {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt3 >= 1 )
+                        goto loop3;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:3];
+                    @throw eee;
+            }
+            cnt3++;
+        } while (YES);
+        loop3: ;
+
+         
+
+         _channel=99; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "WS" */
+- (void) mTokens
+{
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:1:8: ( T__7 | ID | INT | WS ) //ruleblock
+    NSInteger alt4=4;
+    unichar charLA4 = [input LA:1];
+    switch (charLA4) {
+        case 'e': ;
+            {
+            NSInteger LA4_1 = [input LA:2];
+
+            if ( (LA4_1=='n') ) {
+                NSInteger LA4_5 = [input LA:3];
+
+                if ( (LA4_5=='u') ) {
+                    NSInteger LA4_6 = [input LA:4];
+
+                    if ( (LA4_6=='m') ) {
+                        NSInteger LA4_7 = [input LA:5];
+
+                        if ( ((LA4_7 >= '0' && LA4_7 <= '9')||(LA4_7 >= 'A' && LA4_7 <= 'Z')||LA4_7=='_'||(LA4_7 >= 'a' && LA4_7 <= 'z')) ) {
+                            alt4=2;
+                        }
+                        else {
+                            alt4 = 1;
+                        }
+                    }
+                    else {
+                        alt4 = 2;
+                    }
+                }
+                else {
+                    alt4 = 2;
+                }
+            }
+            else {
+                alt4 = 2;
+            }
+            }
+            break;
+        case 'A': ;
+        case 'B': ;
+        case 'C': ;
+        case 'D': ;
+        case 'E': ;
+        case 'F': ;
+        case 'G': ;
+        case 'H': ;
+        case 'I': ;
+        case 'J': ;
+        case 'K': ;
+        case 'L': ;
+        case 'M': ;
+        case 'N': ;
+        case 'O': ;
+        case 'P': ;
+        case 'Q': ;
+        case 'R': ;
+        case 'S': ;
+        case 'T': ;
+        case 'U': ;
+        case 'V': ;
+        case 'W': ;
+        case 'X': ;
+        case 'Y': ;
+        case 'Z': ;
+        case '_': ;
+        case 'a': ;
+        case 'b': ;
+        case 'c': ;
+        case 'd': ;
+        case 'f': ;
+        case 'g': ;
+        case 'h': ;
+        case 'i': ;
+        case 'j': ;
+        case 'k': ;
+        case 'l': ;
+        case 'm': ;
+        case 'n': ;
+        case 'o': ;
+        case 'p': ;
+        case 'q': ;
+        case 'r': ;
+        case 's': ;
+        case 't': ;
+        case 'u': ;
+        case 'v': ;
+        case 'w': ;
+        case 'x': ;
+        case 'y': ;
+        case 'z': ;
+            {
+            alt4=2;
+            }
+            break;
+        case '0': ;
+        case '1': ;
+        case '2': ;
+        case '3': ;
+        case '4': ;
+        case '5': ;
+        case '6': ;
+        case '7': ;
+        case '8': ;
+        case '9': ;
+            {
+            alt4=3;
+            }
+            break;
+        case '\t': ;
+        case '\n': ;
+        case '\r': ;
+        case ' ': ;
+            {
+            alt4=4;
+            }
+            break;
+
+    default: ;
+        NoViableAltException *nvae = [NoViableAltException newException:4 state:0 stream:input];
+        nvae.c = charLA4;
+        @throw nvae;
+
+    }
+
+    switch (alt4) {
+        case 1 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:1:10: T__7 // alt
+            {
+
+
+            [self mT__7]; 
+
+
+             
+            }
+            break;
+        case 2 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:1:15: ID // alt
+            {
+
+
+            [self mID]; 
+
+
+             
+            }
+            break;
+        case 3 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:1:18: INT // alt
+            {
+
+
+            [self mINT]; 
+
+
+             
+            }
+            break;
+        case 4 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:1:22: WS // alt
+            {
+
+
+            [self mWS]; 
+
+
+             
+            }
+            break;
+
+    }
+
+}
+
+@end /* end of TLexer implementation line 397 */
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.h b/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.h
new file mode 100644
index 0000000..ddc2b48
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.h
@@ -0,0 +1,83 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g 2012-02-16 17:34:26
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* parserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__7 7
+#define ID 4
+#define INT 5
+#define WS 6
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+
+/* Interface grammar class */
+@interface TParser  : Parser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+
+
+/* ObjC start of actions.(actionScope).memVars */
+
+/* With this true, enum is seen as a keyword.  False, it's an identifier */
+BOOL enableEnum;
+
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* ObjC end of memVars */
+
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newTParser:(id<TokenStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* ObjC end of methodsDecl */
+
+- (void)stat; 
+- (void)identifier; 
+- (void)enumAsKeyword; 
+- (void)enumAsID; 
+
+
+@end /* end of TParser interface */
+
+/** Demonstrates how semantic predicates get hoisted out of the rule in 
+ *  which they are found and used in other decisions.  This grammar illustrates
+ *  how predicates can be used to distinguish between enum as a keyword and
+ *  an ID *dynamically*. :)
+
+ * Run "java org.antlr.Tool -dfa t.g" to generate DOT (graphviz) files.  See
+ * the T_dec-1.dot file to see the predicates in action.
+ */
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.m b/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.m
new file mode 100644
index 0000000..2aae318
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/hoistedPredicates/TParser.m
@@ -0,0 +1,366 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g
+ *     -                            On : 2012-02-16 17:34:26
+ *     -                for the parser : TParserParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g 2012-02-16 17:34:26
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "TParser.h"
+/* ----------------------------------------- */
+
+/** Demonstrates how semantic predicates get hoisted out of the rule in 
+ *  which they are found and used in other decisions.  This grammar illustrates
+ *  how predicates can be used to distinguish between enum as a keyword and
+ *  an ID *dynamically*. :)
+
+ * Run "java org.antlr.Tool -dfa t.g" to generate DOT (graphviz) files.  See
+ * the T_dec-1.dot file to see the predicates in action.
+ */
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_identifier_in_stat34;
+static const unsigned long long FOLLOW_identifier_in_stat34_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_enumAsKeyword_in_stat47;
+static const unsigned long long FOLLOW_enumAsKeyword_in_stat47_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_ID_in_identifier66;
+static const unsigned long long FOLLOW_ID_in_identifier66_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_enumAsID_in_identifier74;
+static const unsigned long long FOLLOW_enumAsID_in_identifier74_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_7_in_enumAsKeyword89;
+static const unsigned long long FOLLOW_7_in_enumAsKeyword89_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_7_in_enumAsID100;
+static const unsigned long long FOLLOW_7_in_enumAsID100_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+
+@implementation TParser  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_identifier_in_stat34 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_identifier_in_stat34_data Count:(NSUInteger)1] retain];
+    FOLLOW_enumAsKeyword_in_stat47 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_enumAsKeyword_in_stat47_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_identifier66 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_identifier66_data Count:(NSUInteger)1] retain];
+    FOLLOW_enumAsID_in_identifier74 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_enumAsID_in_identifier74_data Count:(NSUInteger)1] retain];
+    FOLLOW_7_in_enumAsKeyword89 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_enumAsKeyword89_data Count:(NSUInteger)1] retain];
+    FOLLOW_7_in_enumAsID100 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_enumAsID100_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"ID", @"INT", @"WS", @"'enum'", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g"];
+}
+
++ (TParser *)newTParser:(id<TokenStream>)aStream
+{
+    return [[TParser alloc] initWithTokenStream:aStream];
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)aStream
+{
+    self = [super initWithTokenStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:4+1] retain]];
+    if ( self != nil ) {
+        /* start of actions-actionScope-init */
+
+        enableEnum = NO;
+
+        /* start of init */
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start stat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:24:1: stat : ( identifier | enumAsKeyword );
+ */
+- (void) stat
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:24:5: ( identifier | enumAsKeyword ) //ruleblock
+        NSInteger alt1=2;
+        NSInteger LA1_0 = [input LA:1];
+
+        if ( (LA1_0==ID) ) {
+            alt1=1;
+        }
+        else if ( (LA1_0==7) ) {
+            NSInteger LA1_2 = [input LA:2];
+
+            if ( ((!enableEnum)) ) {
+                alt1=1;
+            }
+            else if ( ((enableEnum)) ) {
+                alt1=2;
+            }
+            else {
+                NoViableAltException *nvae = [NoViableAltException newException:1 state:2 stream:input];
+                nvae.c = LA1_2;
+                @throw nvae;
+
+            }
+        }
+        else {
+            NoViableAltException *nvae = [NoViableAltException newException:1 state:0 stream:input];
+            nvae.c = LA1_0;
+            @throw nvae;
+
+        }
+        switch (alt1) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:24:7: identifier // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_identifier_in_stat34];
+                [self identifier];
+
+                [self popFollow];
+
+
+                 
+
+                NSLog(@"enum is an ID");
+
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:25:7: enumAsKeyword // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_enumAsKeyword_in_stat47];
+                [self enumAsKeyword];
+
+                [self popFollow];
+
+
+                 
+
+                NSLog(@"enum is a keyword");
+
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end stat */
+
+/*
+ * $ANTLR start identifier
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:28:1: identifier : ( ID | enumAsID );
+ */
+- (void) identifier
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:29:5: ( ID | enumAsID ) //ruleblock
+        NSInteger alt2=2;
+        NSInteger LA2_0 = [input LA:1];
+
+        if ( (LA2_0==ID) ) {
+            alt2=1;
+        }
+        else if ( (LA2_0==7) ) {
+            alt2=2;
+        }
+        else {
+            NoViableAltException *nvae = [NoViableAltException newException:2 state:0 stream:input];
+            nvae.c = LA2_0;
+            @throw nvae;
+
+        }
+        switch (alt2) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:29:7: ID // alt
+                {
+
+                [self match:input TokenType:ID Follow:FOLLOW_ID_in_identifier66]; 
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:30:7: enumAsID // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_enumAsID_in_identifier74];
+                [self enumAsID];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end identifier */
+
+/*
+ * $ANTLR start enumAsKeyword
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:33:1: enumAsKeyword :{...}? 'enum' ;
+ */
+- (void) enumAsKeyword
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:33:15: ({...}? 'enum' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:33:17: {...}? 'enum' // alt
+        {
+
+        if ( !((enableEnum)) ) {
+            @throw [FailedPredicateException newException:@"enumAsKeyword" predicate:@"enableEnum" stream:input];
+        }
+         
+        [self match:input TokenType:7 Follow:FOLLOW_7_in_enumAsKeyword89]; 
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end enumAsKeyword */
+
+/*
+ * $ANTLR start enumAsID
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:35:1: enumAsID :{...}? 'enum' ;
+ */
+- (void) enumAsID
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:35:10: ({...}? 'enum' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/T.g:35:12: {...}? 'enum' // alt
+        {
+
+        if ( !((!enableEnum)) ) {
+            @throw [FailedPredicateException newException:@"enumAsID" predicate:@"!enableEnum" stream:input];
+        }
+         
+        [self match:input TokenType:7 Follow:FOLLOW_7_in_enumAsID100]; 
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end enumAsID */
+/* ObjC end rules */
+
+@end /* end of TParser implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/input b/runtime/ObjC/Framework/examples/hoistedPredicates/input
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/input
rename to runtime/ObjC/Framework/examples/hoistedPredicates/input
diff --git a/runtime/ObjC/Framework/examples/hoistedPredicates/main.m b/runtime/ObjC/Framework/examples/hoistedPredicates/main.m
new file mode 100644
index 0000000..f71be5f
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/hoistedPredicates/main.m
@@ -0,0 +1,30 @@
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+#import "TLexer.h"
+#import "TParser.h"
+
+int main() {
+    NSError *error;
+	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+	
+	NSString *string = [NSString stringWithContentsOfFile:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/hoistedPredicates/input" encoding:NSASCIIStringEncoding error:&error];
+	NSLog(@"input is : %@", string);
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:string];
+	TLexer *lexer = [TLexer newTLexerWithCharStream:stream];
+	
+	//	Token *currentToken;
+	//	while ((currentToken = [lexer nextToken]) && [currentToken type] != TokenTypeEOF) {
+	//		NSLog(@"%@", currentToken);
+	//	}
+	
+	CommonTokenStream *tokenStream = [CommonTokenStream newCommonTokenStreamWithTokenSource:lexer];
+	TParser *parser = [[TParser alloc] initWithTokenStream:tokenStream];
+	[parser stat];
+	[lexer release];
+	[stream release];
+	[tokenStream release];
+	[parser release];
+	
+	[pool release];
+	return 0;
+}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/output b/runtime/ObjC/Framework/examples/hoistedPredicates/output
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/hoistedPredicates/output
rename to runtime/ObjC/Framework/examples/hoistedPredicates/output
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/Test.tokens b/runtime/ObjC/Framework/examples/lexertest-simple/Test.tokens
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/Test.tokens
rename to runtime/ObjC/Framework/examples/lexertest-simple/Test.tokens
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g b/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g
rename to runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g
diff --git a/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.h b/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.h
new file mode 100644
index 0000000..ab4ce16
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.h
@@ -0,0 +1,39 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g 2012-02-16 17:36:38
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* Start cyclicDFAInterface */
+
+#pragma mark Rule return scopes Interface start
+#pragma mark Rule return scopes Interface end
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define DIGIT 4
+#define ID 5
+#define LETTER 6
+/* interface lexer class */
+@interface TestLexer : Lexer { // line 283
+/* ObjC start of actions.lexer.memVars */
+/* ObjC end of actions.lexer.memVars */
+}
++ (void) initialize;
++ (TestLexer *)newTestLexerWithCharStream:(id<CharStream>)anInput;
+/* ObjC start actions.lexer.methodsDecl */
+/* ObjC end actions.lexer.methodsDecl */
+- (void) mID ; 
+- (void) mDIGIT ; 
+- (void) mLETTER ; 
+- (void) mTokens ; 
+
+@end /* end of TestLexer interface */
+
diff --git a/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.m b/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.m
new file mode 100644
index 0000000..8ef23fe
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.m
@@ -0,0 +1,218 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g
+ *     -                            On : 2012-02-16 17:36:38
+ *     -                 for the lexer : TestLexerLexer
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g 2012-02-16 17:36:38
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "TestLexer.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+/** As per Terence: No returns for lexer rules! */
+@implementation TestLexer // line 330
+
++ (void) initialize
+{
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g"];
+}
+
++ (NSString *) tokenNameForType:(NSInteger)aTokenType
+{
+    return [[self getTokenNames] objectAtIndex:aTokenType];
+}
+
++ (TestLexer *)newTestLexerWithCharStream:(id<CharStream>)anInput
+{
+    return [[TestLexer alloc] initWithCharStream:anInput];
+}
+
+- (id) initWithCharStream:(id<CharStream>)anInput
+{
+    self = [super initWithCharStream:anInput State:[RecognizerSharedState newRecognizerSharedStateWithRuleLen:4+1]];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC Start of actions.lexer.methods */
+/* ObjC end of actions.lexer.methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+
+/* Start of Rules */
+// $ANTLR start "ID"
+- (void) mID
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = ID;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g:8:4: ( LETTER ( LETTER | DIGIT )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g:8:6: LETTER ( LETTER | DIGIT )* // alt
+        {
+
+
+        [self mLETTER]; 
+
+
+         
+
+        do {
+            NSInteger alt1=2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( ((LA1_0 >= '0' && LA1_0 <= '9')||(LA1_0 >= 'A' && LA1_0 <= 'Z')||(LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop1;
+            }
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "ID" */
+// $ANTLR start "DIGIT"
+- (void) mDIGIT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g:11:16: ( '0' .. '9' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g: // alt
+        {
+
+        if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
+            [input consume];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            [self recover:mse];
+            @throw mse;
+        }
+
+         
+        }
+
+
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "DIGIT" */
+// $ANTLR start "LETTER"
+- (void) mLETTER
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g:15:2: ( 'a' .. 'z' | 'A' .. 'Z' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g: // alt
+        {
+
+        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+            [input consume];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            [self recover:mse];
+            @throw mse;
+        }
+
+         
+        }
+
+
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "LETTER" */
+- (void) mTokens
+{
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g:1:8: ( ID ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.g:1:10: ID // alt
+    {
+
+
+    [self mID]; 
+
+
+     
+    }
+
+
+}
+
+@end /* end of TestLexer implementation line 397 */
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.tokens b/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.tokens
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.tokens
rename to runtime/ObjC/Framework/examples/lexertest-simple/TestLexer.tokens
diff --git a/runtime/ObjC/Framework/examples/lexertest-simple/main.m b/runtime/ObjC/Framework/examples/lexertest-simple/main.m
new file mode 100644
index 0000000..709f440
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/lexertest-simple/main.m
@@ -0,0 +1,23 @@
+#import <Foundation/Foundation.h>
+#import "TestLexer.h"
+#import <ANTLR/ANTLR.h>
+#import <unistd.h>
+
+int main(int argc, const char * argv[])
+{
+	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+	
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"abB9Cdd44"];
+	TestLexer *lexer = [[TestLexer alloc] initWithCharStream:stream];
+	id<Token> currentToken;
+	while ((currentToken = [[lexer nextToken] retain]) && currentToken.type != TokenTypeEOF) {
+		NSLog(@"%@", currentToken);
+	}
+	[lexer release];
+	[stream release];
+	
+	[pool release];
+    // sleep for objectalloc
+    // while (1) sleep(60);
+	return 0;
+}
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/examples/polydiff/Poly.g b/runtime/ObjC/Framework/examples/polydiff/Poly.g
new file mode 100644
index 0000000..30ad7c3
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/Poly.g
@@ -0,0 +1,27 @@
+grammar Poly;
+options {
+    output=AST;
+    language=ObjC;
+    }
+tokens { MULT; } // imaginary token
+
+poly: term ('+'^ term)*
+    ;
+
+term: INT ID  -> ^(MULT[@"*"] INT ID)
+    | INT exp -> ^(MULT[@"*"] INT exp)
+    | exp
+    | INT
+	| ID
+    ;
+
+exp : ID '^'^ INT
+    ;
+    
+ID  returns [NSString *value]
+    : 'a'..'z'+ ;
+
+INT  returns [NSString *value]
+    : '0'..'9'+ ;
+
+WS	: (' '|'\t'|'\r'|'\n')+ { $channel=HIDDEN; } ;
diff --git a/runtime/ObjC/Framework/examples/polydiff/Poly.tokens b/runtime/ObjC/Framework/examples/polydiff/Poly.tokens
new file mode 100644
index 0000000..c711b35
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/Poly.tokens
@@ -0,0 +1,8 @@
+T__8=8
+T__9=9
+ID=4
+INT=5
+MULT=6
+WS=7
+'+'=8
+'^'=9
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g b/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g
new file mode 100644
index 0000000..642c511
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g
@@ -0,0 +1,26 @@
+tree grammar PolyDifferentiator;
+options {
+	tokenVocab=Poly;
+    language=ObjC;
+	ASTLabelType=CommonTree;
+	output=AST;
+//	rewrite=true; // works either in rewrite or normal mode
+}
+
+poly:	^('+' poly poly)
+	|	^(MULT INT ID)		-> INT
+	|	^(MULT c=INT ^('^' ID e=INT))
+		{
+		NSString *c2 = [NSString stringWithFormat:@"\%d", $c.int*$e.int];
+		NSString *e2 = [NSString stringWithFormat:@"\%d", $e.int-1];
+		}
+							-> ^(MULT[@"*"] INT[c2] ^('^' ID INT[e2]))
+	|	^('^' ID e=INT)
+		{
+		NSString *c2 = [NSString stringWithFormat:@"\%d", $e.int];
+		NSString *e2 = [NSString stringWithFormat:@"\%d", $e.int-1];
+		}
+							-> ^(MULT[@"*"] INT[c2] ^('^' ID INT[e2]))
+	|	INT					-> INT[@"0"]
+	|	ID					-> INT[@"1"]
+	;
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.h b/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.h
new file mode 100644
index 0000000..68949c3
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.h
@@ -0,0 +1,107 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g 2012-02-16 18:10:53
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* treeParserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__8 8
+#define T__9 9
+#define ID 4
+#define INT 5
+#define MULT 6
+#define WS 7
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+/* returnScopeInterface PolyDifferentiator_poly_return */
+@interface PolyDifferentiator_poly_return : TreeRuleReturnScope { /* returnScopeInterface line 1838 */
+/* ASTTreeParser returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (PolyDifferentiator_poly_return *)newPolyDifferentiator_poly_return;
+/* this is start of set and get methods */
+/* ASTTreeParser returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+
+/* Interface grammar class */
+@interface PolyDifferentiator  : TreeParser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+
+
+/* ObjC start of actions.(actionScope).memVars */
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* AST parserHeaderFile.memVars */
+NSInteger ruleLevel;
+NSArray *ruleNames;
+  /* AST super.memVars */
+/* AST parserMemVars */
+id<TreeAdaptor> treeAdaptor;   /* AST parserMemVars */
+/* ObjC end of memVars */
+
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* AST parserHeaderFile.properties */
+  /* AST super.properties */
+/* AST parserProperties */
+@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id<TreeAdaptor> treeAdaptor;   /* AST parserproperties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newPolyDifferentiator:(id<TreeNodeStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* AST parserHeaderFile.methodsDecl */
+  /* AST super.methodsDecl */
+/* AST parserMethodsDecl */
+- (id<TreeAdaptor>) getTreeAdaptor;
+- (void) setTreeAdaptor:(id<TreeAdaptor>)theTreeAdaptor;   /* AST parsermethodsDecl */
+/* ObjC end of methodsDecl */
+
+- (PolyDifferentiator_poly_return *)poly; 
+
+
+@end /* end of PolyDifferentiator interface */
+
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.m b/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.m
new file mode 100644
index 0000000..7b90e62
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.m
@@ -0,0 +1,791 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g
+ *     -                            On : 2012-02-16 18:10:53
+ *     -           for the tree parser : PolyDifferentiatorTreeParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g 2012-02-16 18:10:53
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "PolyDifferentiator.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_8_in_poly44;
+static const unsigned long long FOLLOW_8_in_poly44_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly46;
+static const unsigned long long FOLLOW_poly_in_poly46_data[] = { 0x0000000000000370LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly48;
+static const unsigned long long FOLLOW_poly_in_poly48_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_MULT_in_poly55;
+static const unsigned long long FOLLOW_MULT_in_poly55_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly57;
+static const unsigned long long FOLLOW_INT_in_poly57_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_ID_in_poly59;
+static const unsigned long long FOLLOW_ID_in_poly59_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_MULT_in_poly71;
+static const unsigned long long FOLLOW_MULT_in_poly71_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly75;
+static const unsigned long long FOLLOW_INT_in_poly75_data[] = { 0x0000000000000200LL};
+static ANTLRBitSet *FOLLOW_9_in_poly78;
+static const unsigned long long FOLLOW_9_in_poly78_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_ID_in_poly80;
+static const unsigned long long FOLLOW_ID_in_poly80_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly84;
+static const unsigned long long FOLLOW_INT_in_poly84_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_9_in_poly122;
+static const unsigned long long FOLLOW_9_in_poly122_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_ID_in_poly124;
+static const unsigned long long FOLLOW_ID_in_poly124_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly128;
+static const unsigned long long FOLLOW_INT_in_poly128_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly164;
+static const unsigned long long FOLLOW_INT_in_poly164_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_ID_in_poly178;
+static const unsigned long long FOLLOW_ID_in_poly178_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+@implementation PolyDifferentiator_poly_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (PolyDifferentiator_poly_return *)newPolyDifferentiator_poly_return
+{
+return [[[PolyDifferentiator_poly_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+
+
+@implementation PolyDifferentiator  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+/* AST genericParser.synthesize */
+/* AST parserProperties */
+@synthesize treeAdaptor;
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_8_in_poly44 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_poly44_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly46 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly46_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly48 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly48_data Count:(NSUInteger)1] retain];
+    FOLLOW_MULT_in_poly55 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_MULT_in_poly55_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly57 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly57_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_poly59 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_poly59_data Count:(NSUInteger)1] retain];
+    FOLLOW_MULT_in_poly71 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_MULT_in_poly71_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly75 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly75_data Count:(NSUInteger)1] retain];
+    FOLLOW_9_in_poly78 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_poly78_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_poly80 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_poly80_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly84 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly84_data Count:(NSUInteger)1] retain];
+    FOLLOW_9_in_poly122 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_poly122_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_poly124 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_poly124_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly128 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly128_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly164 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly164_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_poly178 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_poly178_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"ID", @"INT", @"MULT", @"WS", @"'+'", @"'^'", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g"];
+}
+
++ (PolyDifferentiator *)newPolyDifferentiator:(id<TreeNodeStream>)aStream
+{
+    return [[PolyDifferentiator alloc] initWithStream:aStream];
+}
+
+- (id) initWithStream:(id<TreeNodeStream>)aStream
+{
+    self = [super initWithStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:1+1] retain]];
+    if ( self != nil ) {
+        /* start of actions-actionScope-init */
+        /* start of init */
+        /* AST genericParser.init */
+        [self setTreeAdaptor:[[CommonTreeAdaptor newTreeAdaptor] retain]];
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    /* AST genericParser.dealloc */
+    [self setTreeAdaptor:nil];
+
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* AST genericParser.methods */
+/* AST parserMethods */
+- (id<TreeAdaptor>) getTreeAdaptor
+{
+	return treeAdaptor;
+}
+
+- (void) setTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor
+{
+	if (aTreeAdaptor != treeAdaptor) {
+		treeAdaptor = aTreeAdaptor;
+	}
+}
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start poly
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:10:1: poly : ( ^( '+' poly poly ) | ^( MULT INT ID ) -> INT | ^( MULT c= INT ^( '^' ID e= INT ) ) -> ^( MULT[@\"*\"] INT[c2] ^( '^' ID INT[e2] ) ) | ^( '^' ID e= INT ) -> ^( MULT[@\"*\"] INT[c2] ^( '^' ID INT[e2] ) ) | INT -> INT[@\"0\"] | ID -> INT[@\"1\"] );
+ */
+- (PolyDifferentiator_poly_return *) poly
+{
+    /* ruleScopeSetUp */
+
+    /* ASTTreeParser ruleDeclarations */
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    PolyDifferentiator_poly_return * retval = [PolyDifferentiator_poly_return newPolyDifferentiator_poly_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    CommonTree *_first_0 = nil;
+    CommonTree *_last = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonTree *c = nil;
+        CommonTree *e = nil;
+        CommonTree *char_literal1 = nil;
+        CommonTree *MULT4 = nil;
+        CommonTree *INT5 = nil;
+        CommonTree *ID6 = nil;
+        CommonTree *MULT7 = nil;
+        CommonTree *char_literal8 = nil;
+        CommonTree *ID9 = nil;
+        CommonTree *char_literal10 = nil;
+        CommonTree *ID11 = nil;
+        CommonTree *INT12 = nil;
+        CommonTree *ID13 = nil;PolyDifferentiator_poly_return * poly2 = nil ;
+
+        PolyDifferentiator_poly_return * poly3 = nil ;
+
+
+        CommonTree *c_tree=nil;
+        CommonTree *e_tree=nil;
+        CommonTree *char_literal1_tree=nil;
+        CommonTree *MULT4_tree=nil;
+        CommonTree *INT5_tree=nil;
+        CommonTree *ID6_tree=nil;
+        CommonTree *MULT7_tree=nil;
+        CommonTree *char_literal8_tree=nil;
+        CommonTree *ID9_tree=nil;
+        CommonTree *char_literal10_tree=nil;
+        CommonTree *ID11_tree=nil;
+        CommonTree *INT12_tree=nil;
+        CommonTree *ID13_tree=nil;
+        RewriteRuleTokenStream *stream_INT =
+            [[RewriteRuleNodeStream newRewriteRuleNodeStream:treeAdaptor
+                                                             description:@"token INT"] retain];
+        RewriteRuleTokenStream *stream_MULT =
+            [[RewriteRuleNodeStream newRewriteRuleNodeStream:treeAdaptor
+                                                             description:@"token MULT"] retain];
+        RewriteRuleTokenStream *stream_ID =
+            [[RewriteRuleNodeStream newRewriteRuleNodeStream:treeAdaptor
+                                                             description:@"token ID"] retain];
+        RewriteRuleTokenStream *stream_9 =
+            [[RewriteRuleNodeStream newRewriteRuleNodeStream:treeAdaptor
+                                                             description:@"token 9"] retain];
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:10:5: ( ^( '+' poly poly ) | ^( MULT INT ID ) -> INT | ^( MULT c= INT ^( '^' ID e= INT ) ) -> ^( MULT[@\"*\"] INT[c2] ^( '^' ID INT[e2] ) ) | ^( '^' ID e= INT ) -> ^( MULT[@\"*\"] INT[c2] ^( '^' ID INT[e2] ) ) | INT -> INT[@\"0\"] | ID -> INT[@\"1\"] ) //ruleblock
+        NSInteger alt1=6;
+        unichar charLA1 = [input LA:1];
+        switch (charLA1) {
+            case 8: ;
+                {
+                alt1=1;
+                }
+                break;
+            case MULT: ;
+                {
+                NSInteger LA1_2 = [input LA:2];
+
+                if ( (LA1_2==DOWN) ) {
+                    NSInteger LA1_6 = [input LA:3];
+
+                    if ( (LA1_6==INT) ) {
+                        NSInteger LA1_7 = [input LA:4];
+
+                        if ( (LA1_7==ID) ) {
+                            alt1=2;
+                        }
+                        else if ( (LA1_7==9) ) {
+                            alt1=3;
+                        }
+                        else {
+                            NoViableAltException *nvae = [NoViableAltException newException:1 state:7 stream:input];
+                            nvae.c = LA1_7;
+                            @throw nvae;
+
+                        }
+                    }
+                    else {
+                        NoViableAltException *nvae = [NoViableAltException newException:1 state:6 stream:input];
+                        nvae.c = LA1_6;
+                        @throw nvae;
+
+                    }
+                }
+                else {
+                    NoViableAltException *nvae = [NoViableAltException newException:1 state:2 stream:input];
+                    nvae.c = LA1_2;
+                    @throw nvae;
+
+                }
+                }
+                break;
+            case 9: ;
+                {
+                alt1=4;
+                }
+                break;
+            case INT: ;
+                {
+                alt1=5;
+                }
+                break;
+            case ID: ;
+                {
+                alt1=6;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:1 state:0 stream:input];
+            nvae.c = charLA1;
+            @throw nvae;
+
+        }
+
+        switch (alt1) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:10:7: ^( '+' poly poly ) // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_1 = _last;
+                CommonTree *_first_1 = nil;
+                CommonTree *root_1 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefRuleRoot */
+                _last = (CommonTree *)[input LT:1];
+                char_literal1=(CommonTree *)[self match:input TokenType:8 Follow:FOLLOW_8_in_poly44]; 
+                char_literal1_tree = (CommonTree *)[treeAdaptor dupNode:char_literal1];
+
+
+                root_1 = (CommonTree *)[treeAdaptor becomeRoot:char_literal1_tree old:root_1];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; 
+
+                /* ASTTreeParser ruleRef */
+                _last = (CommonTree *)[input LT:1];
+                /* ruleRef */
+                [self pushFollow:FOLLOW_poly_in_poly46];
+                poly2 = [self poly];
+
+                [self popFollow];
+
+
+                    [treeAdaptor addChild:poly2.tree toTree:root_1];
+
+                 
+                /* ASTTreeParser ruleRef */
+                _last = (CommonTree *)[input LT:1];
+                /* ruleRef */
+                [self pushFollow:FOLLOW_poly_in_poly48];
+                poly3 = [self poly];
+
+                [self popFollow];
+
+
+                    [treeAdaptor addChild:poly3.tree toTree:root_1];
+
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; 
+                [treeAdaptor addChild:root_1 toTree:root_0];
+                _last = _save_last_1;
+                }
+
+                 
+                /* ASTTreeParser noRewrite */
+
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:11:4: ^( MULT INT ID ) // alt
+                {
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_1 = _last;
+                CommonTree *_first_1 = nil;
+                CommonTree *root_1 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                MULT4=(CommonTree *)[self match:input TokenType:MULT Follow:FOLLOW_MULT_in_poly55];  
+                    [stream_MULT addElement:MULT4];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; 
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                INT5=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly57];  
+                    [stream_INT addElement:INT5];
+
+                 
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                ID6=(CommonTree *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_poly59];  
+                    [stream_ID addElement:ID6];
+
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; 
+                [treeAdaptor addChild:root_1 toTree:root_0];
+                _last = _save_last_1;
+                }
+
+                 
+                // AST REWRITE
+                // elements: INT
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 11:20: -> INT
+                {
+                     // TODO: args: 
+                    [treeAdaptor addChild:/* ASTTreeParser createRewriteNodeFromElement */
+                    [stream_INT nextNode]
+                     toTree:root_0];
+
+                }
+
+
+                retval.tree = root_0;
+
+
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:12:4: ^( MULT c= INT ^( '^' ID e= INT ) ) // alt
+                {
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_1 = _last;
+                CommonTree *_first_1 = nil;
+                CommonTree *root_1 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                MULT7=(CommonTree *)[self match:input TokenType:MULT Follow:FOLLOW_MULT_in_poly71];  
+                    [stream_MULT addElement:MULT7];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; 
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                c=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly75];  
+                    [stream_INT addElement:c];
+
+                 
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_2 = _last;
+                CommonTree *_first_2 = nil;
+                CommonTree *root_2 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                char_literal8=(CommonTree *)[self match:input TokenType:9 Follow:FOLLOW_9_in_poly78];  
+                    [stream_9 addElement:char_literal8];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; 
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                ID9=(CommonTree *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_poly80];  
+                    [stream_ID addElement:ID9];
+
+                 
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                e=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly84];  
+                    [stream_INT addElement:e];
+
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; 
+                [treeAdaptor addChild:root_2 toTree:root_1];
+                _last = _save_last_2;
+                }
+
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; 
+                [treeAdaptor addChild:root_1 toTree:root_0];
+                _last = _save_last_1;
+                }
+
+                 
+
+
+                		NSString *c2 = [NSString stringWithFormat:@"%d", (c!=nil?[c.text integerValue]:0)*(e!=nil?[e.text integerValue]:0)];
+                		NSString *e2 = [NSString stringWithFormat:@"%d", (e!=nil?[e.text integerValue]:0)-1];
+                		
+
+                 
+                // AST REWRITE
+                // elements: INT, 9, INT, ID, MULT
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 17:8: -> ^( MULT[@\"*\"] INT[c2] ^( '^' ID INT[e2] ) )
+                {
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:17:11: ^( MULT[@\"*\"] INT[c2] ^( '^' ID INT[e2] ) )
+                    {
+                        CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                        root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                                [[treeAdaptor createTree:MULT FromToken:@"*" Text:@"MULT"] retain]
+                         old:root_1];
+
+                        [treeAdaptor addChild:
+                                [[treeAdaptor createTree:INT FromToken:c2 Text:@"INT"] retain]
+                         toTree:root_1];
+
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:17:32: ^( '^' ID INT[e2] )
+                        {
+                            CommonTree *root_2 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                            root_2 = (CommonTree *)[treeAdaptor becomeRoot:/* ASTTreeParser createRewriteNodeFromElement */
+                            [stream_9 nextNode]
+                             old:root_2];
+
+                             // TODO: args: 
+                            [treeAdaptor addChild:/* ASTTreeParser createRewriteNodeFromElement */
+                            [stream_ID nextNode]
+                             toTree:root_2];
+
+                            [treeAdaptor addChild:
+                                    [[treeAdaptor createTree:INT FromToken:e2 Text:@"INT"] retain]
+                             toTree:root_2];
+
+                            [treeAdaptor addChild:root_2 toTree:root_1];
+                        }
+
+                        [treeAdaptor addChild:root_1 toTree:root_0];
+                    }
+
+                }
+
+
+                retval.tree = root_0;
+
+
+                }
+                break;
+            case 4 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:18:4: ^( '^' ID e= INT ) // alt
+                {
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_1 = _last;
+                CommonTree *_first_1 = nil;
+                CommonTree *root_1 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                char_literal10=(CommonTree *)[self match:input TokenType:9 Follow:FOLLOW_9_in_poly122];  
+                    [stream_9 addElement:char_literal10];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; 
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                ID11=(CommonTree *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_poly124];  
+                    [stream_ID addElement:ID11];
+
+                 
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                e=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly128];  
+                    [stream_INT addElement:e];
+
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; 
+                [treeAdaptor addChild:root_1 toTree:root_0];
+                _last = _save_last_1;
+                }
+
+                 
+
+
+                		NSString *c2 = [NSString stringWithFormat:@"%d", (e!=nil?[e.text integerValue]:0)];
+                		NSString *e2 = [NSString stringWithFormat:@"%d", (e!=nil?[e.text integerValue]:0)-1];
+                		
+
+                 
+                // AST REWRITE
+                // elements: INT, 9, INT, ID
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 23:8: -> ^( MULT[@\"*\"] INT[c2] ^( '^' ID INT[e2] ) )
+                {
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:23:11: ^( MULT[@\"*\"] INT[c2] ^( '^' ID INT[e2] ) )
+                    {
+                        CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                        root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                                [[treeAdaptor createTree:MULT FromToken:@"*" Text:@"MULT"] retain]
+                         old:root_1];
+
+                        [treeAdaptor addChild:
+                                [[treeAdaptor createTree:INT FromToken:c2 Text:@"INT"] retain]
+                         toTree:root_1];
+
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:23:32: ^( '^' ID INT[e2] )
+                        {
+                            CommonTree *root_2 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                            root_2 = (CommonTree *)[treeAdaptor becomeRoot:/* ASTTreeParser createRewriteNodeFromElement */
+                            [stream_9 nextNode]
+                             old:root_2];
+
+                             // TODO: args: 
+                            [treeAdaptor addChild:/* ASTTreeParser createRewriteNodeFromElement */
+                            [stream_ID nextNode]
+                             toTree:root_2];
+
+                            [treeAdaptor addChild:
+                                    [[treeAdaptor createTree:INT FromToken:e2 Text:@"INT"] retain]
+                             toTree:root_2];
+
+                            [treeAdaptor addChild:root_2 toTree:root_1];
+                        }
+
+                        [treeAdaptor addChild:root_1 toTree:root_0];
+                    }
+
+                }
+
+
+                retval.tree = root_0;
+
+
+                }
+                break;
+            case 5 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:24:4: INT // alt
+                {
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                INT12=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly164];  
+                    [stream_INT addElement:INT12];
+
+                 
+                // AST REWRITE
+                // elements: INT
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 24:12: -> INT[@\"0\"]
+                {
+                    [treeAdaptor addChild:
+                            [[treeAdaptor createTree:INT FromToken:@"0" Text:@"INT"] retain]
+                     toTree:root_0];
+
+                }
+
+
+                retval.tree = root_0;
+
+
+                }
+                break;
+            case 6 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.g:25:4: ID // alt
+                {
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                ID13=(CommonTree *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_poly178];  
+                    [stream_ID addElement:ID13];
+
+                 
+                // AST REWRITE
+                // elements: 
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 25:11: -> INT[@\"1\"]
+                {
+                    [treeAdaptor addChild:
+                            [[treeAdaptor createTree:INT FromToken:@"1" Text:@"INT"] retain]
+                     toTree:root_0];
+
+                }
+
+
+                retval.tree = root_0;
+
+
+                }
+                break;
+
+        }
+        /* ASTTreeParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+
+        [stream_INT release];
+        [stream_MULT release];
+        [stream_ID release];
+        [stream_9 release];
+
+        retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end poly */
+/* ObjC end rules */
+
+@end /* end of PolyDifferentiator implementation line 692 */
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.tokens b/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.tokens
new file mode 100644
index 0000000..c711b35
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyDifferentiator.tokens
@@ -0,0 +1,8 @@
+T__8=8
+T__9=9
+ID=4
+INT=5
+MULT=6
+WS=7
+'+'=8
+'^'=9
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyLexer.h b/runtime/ObjC/Framework/examples/polydiff/PolyLexer.h
new file mode 100644
index 0000000..9eb1d64
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyLexer.h
@@ -0,0 +1,44 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g 2012-02-16 18:10:11
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* Start cyclicDFAInterface */
+
+#pragma mark Rule return scopes Interface start
+#pragma mark Rule return scopes Interface end
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__8 8
+#define T__9 9
+#define ID 4
+#define INT 5
+#define MULT 6
+#define WS 7
+/* interface lexer class */
+@interface PolyLexer : Lexer { // line 283
+/* ObjC start of actions.lexer.memVars */
+/* ObjC end of actions.lexer.memVars */
+}
++ (void) initialize;
++ (PolyLexer *)newPolyLexerWithCharStream:(id<CharStream>)anInput;
+/* ObjC start actions.lexer.methodsDecl */
+/* ObjC end actions.lexer.methodsDecl */
+- (void) mT__8 ; 
+- (void) mT__9 ; 
+- (NSString *) mID ; 
+- (NSString *) mINT ; 
+- (void) mWS ; 
+- (void) mTokens ; 
+
+@end /* end of PolyLexer interface */
+
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyLexer.m b/runtime/ObjC/Framework/examples/polydiff/PolyLexer.m
new file mode 100644
index 0000000..1be0324
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyLexer.m
@@ -0,0 +1,486 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g
+ *     -                            On : 2012-02-16 18:10:11
+ *     -                 for the lexer : PolyLexerLexer
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g 2012-02-16 18:10:11
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "PolyLexer.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+/** As per Terence: No returns for lexer rules! */
+@implementation PolyLexer // line 330
+
++ (void) initialize
+{
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g"];
+}
+
++ (NSString *) tokenNameForType:(NSInteger)aTokenType
+{
+    return [[self getTokenNames] objectAtIndex:aTokenType];
+}
+
++ (PolyLexer *)newPolyLexerWithCharStream:(id<CharStream>)anInput
+{
+    return [[PolyLexer alloc] initWithCharStream:anInput];
+}
+
+- (id) initWithCharStream:(id<CharStream>)anInput
+{
+    self = [super initWithCharStream:anInput State:[RecognizerSharedState newRecognizerSharedStateWithRuleLen:6+1]];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC Start of actions.lexer.methods */
+/* ObjC end of actions.lexer.methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+
+/* Start of Rules */
+// $ANTLR start "T__8"
+- (void) mT__8
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__8;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:7:6: ( '+' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:7:8: '+' // alt
+        {
+
+
+        [self matchChar:'+']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__8" */
+// $ANTLR start "T__9"
+- (void) mT__9
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__9;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:8:6: ( '^' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:8:8: '^' // alt
+        {
+
+
+        [self matchChar:'^']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__9" */
+// $ANTLR start "ID"
+- (void) mID
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+    NSString * value = nil ;
+
+
+    @try {
+        NSInteger _type = ID;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:22:5: ( ( 'a' .. 'z' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:22:7: ( 'a' .. 'z' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:22:7: ( 'a' .. 'z' )+ // positiveClosureBlock
+        NSInteger cnt1 = 0;
+        do {
+            NSInteger alt1 = 2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( ((LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g: // alt
+                    {
+
+                    if ((([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt1 >= 1 )
+                        goto loop1;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:1];
+                    @throw eee;
+            }
+            cnt1++;
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "ID" */
+// $ANTLR start "INT"
+- (void) mINT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+    NSString * value = nil ;
+
+
+    @try {
+        NSInteger _type = INT;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:25:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:25:7: ( '0' .. '9' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:25:7: ( '0' .. '9' )+ // positiveClosureBlock
+        NSInteger cnt2 = 0;
+        do {
+            NSInteger alt2 = 2;
+            NSInteger LA2_0 = [input LA:1];
+            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
+                alt2=1;
+            }
+
+
+            switch (alt2) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt2 >= 1 )
+                        goto loop2;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:2];
+                    @throw eee;
+            }
+            cnt2++;
+        } while (YES);
+        loop2: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "INT" */
+// $ANTLR start "WS"
+- (void) mWS
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = WS;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:27:4: ( ( ' ' | '\\t' | '\\r' | '\\n' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:27:6: ( ' ' | '\\t' | '\\r' | '\\n' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:27:6: ( ' ' | '\\t' | '\\r' | '\\n' )+ // positiveClosureBlock
+        NSInteger cnt3 = 0;
+        do {
+            NSInteger alt3 = 2;
+            NSInteger LA3_0 = [input LA:1];
+            if ( ((LA3_0 >= '\t' && LA3_0 <= '\n')||LA3_0=='\r'||LA3_0==' ') ) {
+                alt3=1;
+            }
+
+
+            switch (alt3) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == '\r'||[input LA:1] == ' ') {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt3 >= 1 )
+                        goto loop3;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:3];
+                    @throw eee;
+            }
+            cnt3++;
+        } while (YES);
+        loop3: ;
+
+         
+
+         _channel=HIDDEN; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "WS" */
+- (void) mTokens
+{
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:1:8: ( T__8 | T__9 | ID | INT | WS ) //ruleblock
+    NSInteger alt4=5;
+    unichar charLA4 = [input LA:1];
+    switch (charLA4) {
+        case '+': ;
+            {
+            alt4=1;
+            }
+            break;
+        case '^': ;
+            {
+            alt4=2;
+            }
+            break;
+        case 'a': ;
+        case 'b': ;
+        case 'c': ;
+        case 'd': ;
+        case 'e': ;
+        case 'f': ;
+        case 'g': ;
+        case 'h': ;
+        case 'i': ;
+        case 'j': ;
+        case 'k': ;
+        case 'l': ;
+        case 'm': ;
+        case 'n': ;
+        case 'o': ;
+        case 'p': ;
+        case 'q': ;
+        case 'r': ;
+        case 's': ;
+        case 't': ;
+        case 'u': ;
+        case 'v': ;
+        case 'w': ;
+        case 'x': ;
+        case 'y': ;
+        case 'z': ;
+            {
+            alt4=3;
+            }
+            break;
+        case '0': ;
+        case '1': ;
+        case '2': ;
+        case '3': ;
+        case '4': ;
+        case '5': ;
+        case '6': ;
+        case '7': ;
+        case '8': ;
+        case '9': ;
+            {
+            alt4=4;
+            }
+            break;
+        case '\t': ;
+        case '\n': ;
+        case '\r': ;
+        case ' ': ;
+            {
+            alt4=5;
+            }
+            break;
+
+    default: ;
+        NoViableAltException *nvae = [NoViableAltException newException:4 state:0 stream:input];
+        nvae.c = charLA4;
+        @throw nvae;
+
+    }
+
+    switch (alt4) {
+        case 1 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:1:10: T__8 // alt
+            {
+
+
+            [self mT__8]; 
+
+
+             
+            }
+            break;
+        case 2 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:1:15: T__9 // alt
+            {
+
+
+            [self mT__9]; 
+
+
+             
+            }
+            break;
+        case 3 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:1:20: ID // alt
+            {
+
+
+            [self mID]; 
+
+
+             
+            }
+            break;
+        case 4 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:1:23: INT // alt
+            {
+
+
+            [self mINT]; 
+
+
+             
+            }
+            break;
+        case 5 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:1:27: WS // alt
+            {
+
+
+            [self mWS]; 
+
+
+             
+            }
+            break;
+
+    }
+
+}
+
+@end /* end of PolyLexer implementation line 397 */
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyParser.h b/runtime/ObjC/Framework/examples/polydiff/PolyParser.h
new file mode 100644
index 0000000..9fed63a
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyParser.h
@@ -0,0 +1,156 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g 2012-02-16 18:10:10
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* parserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__8 8
+#define T__9 9
+#define ID 4
+#define INT 5
+#define MULT 6
+#define WS 7
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+/* returnScopeInterface PolyParser_poly_return */
+@interface PolyParser_poly_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (PolyParser_poly_return *)newPolyParser_poly_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface PolyParser_term_return */
+@interface PolyParser_term_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (PolyParser_term_return *)newPolyParser_term_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface PolyParser_exp_return */
+@interface PolyParser_exp_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (PolyParser_exp_return *)newPolyParser_exp_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+
+/* Interface grammar class */
+@interface PolyParser  : Parser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+
+
+/* ObjC start of actions.(actionScope).memVars */
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* AST parserHeaderFile.memVars */
+NSInteger ruleLevel;
+NSArray *ruleNames;
+  /* AST super.memVars */
+/* AST parserMemVars */
+id<TreeAdaptor> treeAdaptor;   /* AST parserMemVars */
+/* ObjC end of memVars */
+
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* AST parserHeaderFile.properties */
+  /* AST super.properties */
+/* AST parserProperties */
+@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id<TreeAdaptor> treeAdaptor;   /* AST parserproperties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newPolyParser:(id<TokenStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* AST parserHeaderFile.methodsDecl */
+  /* AST super.methodsDecl */
+/* AST parserMethodsDecl */
+- (id<TreeAdaptor>) getTreeAdaptor;
+- (void) setTreeAdaptor:(id<TreeAdaptor>)theTreeAdaptor;   /* AST parsermethodsDecl */
+/* ObjC end of methodsDecl */
+
+- (PolyParser_poly_return *)poly; 
+- (PolyParser_term_return *)term; 
+- (PolyParser_exp_return *)exp; 
+
+
+@end /* end of PolyParser interface */
+
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyParser.m b/runtime/ObjC/Framework/examples/polydiff/PolyParser.m
new file mode 100644
index 0000000..4b80147
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyParser.m
@@ -0,0 +1,757 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g
+ *     -                            On : 2012-02-16 18:10:10
+ *     -                for the parser : PolyParserParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g 2012-02-16 18:10:10
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "PolyParser.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_term_in_poly43;
+static const unsigned long long FOLLOW_term_in_poly43_data[] = { 0x0000000000000102LL};
+static ANTLRBitSet *FOLLOW_8_in_poly46;
+static const unsigned long long FOLLOW_8_in_poly46_data[] = { 0x0000000000000030LL};
+static ANTLRBitSet *FOLLOW_term_in_poly49;
+static const unsigned long long FOLLOW_term_in_poly49_data[] = { 0x0000000000000102LL};
+static ANTLRBitSet *FOLLOW_INT_in_term63;
+static const unsigned long long FOLLOW_INT_in_term63_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_ID_in_term65;
+static const unsigned long long FOLLOW_ID_in_term65_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_INT_in_term85;
+static const unsigned long long FOLLOW_INT_in_term85_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_exp_in_term87;
+static const unsigned long long FOLLOW_exp_in_term87_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_exp_in_term106;
+static const unsigned long long FOLLOW_exp_in_term106_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_INT_in_term114;
+static const unsigned long long FOLLOW_INT_in_term114_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_ID_in_term119;
+static const unsigned long long FOLLOW_ID_in_term119_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_ID_in_exp132;
+static const unsigned long long FOLLOW_ID_in_exp132_data[] = { 0x0000000000000200LL};
+static ANTLRBitSet *FOLLOW_9_in_exp134;
+static const unsigned long long FOLLOW_9_in_exp134_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_INT_in_exp137;
+static const unsigned long long FOLLOW_INT_in_exp137_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+@implementation PolyParser_poly_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (PolyParser_poly_return *)newPolyParser_poly_return
+{
+return [[[PolyParser_poly_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation PolyParser_term_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (PolyParser_term_return *)newPolyParser_term_return
+{
+return [[[PolyParser_term_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation PolyParser_exp_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (PolyParser_exp_return *)newPolyParser_exp_return
+{
+return [[[PolyParser_exp_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+
+
+@implementation PolyParser  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+/* AST genericParser.synthesize */
+/* AST parserProperties */
+@synthesize treeAdaptor;
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_term_in_poly43 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_term_in_poly43_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_poly46 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_poly46_data Count:(NSUInteger)1] retain];
+    FOLLOW_term_in_poly49 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_term_in_poly49_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_term63 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_term63_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_term65 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_term65_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_term85 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_term85_data Count:(NSUInteger)1] retain];
+    FOLLOW_exp_in_term87 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_exp_in_term87_data Count:(NSUInteger)1] retain];
+    FOLLOW_exp_in_term106 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_exp_in_term106_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_term114 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_term114_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_term119 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_term119_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_exp132 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_exp132_data Count:(NSUInteger)1] retain];
+    FOLLOW_9_in_exp134 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_exp134_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_exp137 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_exp137_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"ID", @"INT", @"MULT", @"WS", @"'+'", @"'^'", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g"];
+}
+
++ (PolyParser *)newPolyParser:(id<TokenStream>)aStream
+{
+    return [[PolyParser alloc] initWithTokenStream:aStream];
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)aStream
+{
+    self = [super initWithTokenStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:3+1] retain]];
+    if ( self != nil ) {
+        /* start of actions-actionScope-init */
+        /* start of init */
+        /* AST genericParser.init */
+        [self setTreeAdaptor:[[CommonTreeAdaptor newTreeAdaptor] retain]];
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    /* AST genericParser.dealloc */
+    [self setTreeAdaptor:nil];
+
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* AST genericParser.methods */
+/* AST parserMethods */
+- (id<TreeAdaptor>) getTreeAdaptor
+{
+	return treeAdaptor;
+}
+
+- (void) setTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor
+{
+	if (aTreeAdaptor != treeAdaptor) {
+		treeAdaptor = aTreeAdaptor;
+	}
+}
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start poly
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:8:1: poly : term ( '+' ^ term )* ;
+ */
+- (PolyParser_poly_return *) poly
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    PolyParser_poly_return * retval = [PolyParser_poly_return newPolyParser_poly_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *char_literal2 = nil;PolyParser_term_return * term1 = nil ;
+
+        PolyParser_term_return * term3 = nil ;
+
+
+        CommonTree *char_literal2_tree=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:8:5: ( term ( '+' ^ term )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:8:7: term ( '+' ^ term )* // alt
+        {
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+        /* ASTParser ruleRef */
+        /* ruleRef */
+        [self pushFollow:FOLLOW_term_in_poly43];
+        term1 = [self term];
+
+        [self popFollow];
+
+
+        [treeAdaptor addChild:[term1 getTree] toTree:root_0];
+         
+
+        do {
+            NSInteger alt1=2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( (LA1_0==8) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:8:13: '+' ^ term // alt
+                    {
+
+                    char_literal2=(CommonToken *)[self match:input TokenType:8 Follow:FOLLOW_8_in_poly46]; 
+                    char_literal2_tree = /* ASTParser createNodeFromToken */
+                    (CommonTree *)[[treeAdaptor create:char_literal2] retain]
+                    ;
+                    root_0 = (CommonTree *)[treeAdaptor becomeRoot:char_literal2_tree old:root_0];
+
+                     
+                    /* ASTParser ruleRef */
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_term_in_poly49];
+                    term3 = [self term];
+
+                    [self popFollow];
+
+
+                    [treeAdaptor addChild:[term3 getTree] toTree:root_0];
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop1;
+            }
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end poly */
+
+/*
+ * $ANTLR start term
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:11:1: term : ( INT ID -> ^( MULT[@\"*\"] INT ID ) | INT exp -> ^( MULT[@\"*\"] INT exp ) | exp | INT | ID );
+ */
+- (PolyParser_term_return *) term
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    PolyParser_term_return * retval = [PolyParser_term_return newPolyParser_term_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *INT4 = nil;
+        CommonToken *ID5 = nil;
+        CommonToken *INT6 = nil;
+        CommonToken *INT9 = nil;
+        CommonToken *ID10 = nil;PolyParser_exp_return * exp7 = nil ;
+
+        PolyParser_exp_return * exp8 = nil ;
+
+
+        CommonTree *INT4_tree=nil;
+        CommonTree *ID5_tree=nil;
+        CommonTree *INT6_tree=nil;
+        CommonTree *INT9_tree=nil;
+        CommonTree *ID10_tree=nil;
+        RewriteRuleTokenStream *stream_INT =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token INT"] retain];
+        RewriteRuleTokenStream *stream_ID =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token ID"] retain];
+        RewriteRuleSubtreeStream *stream_exp =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule exp"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:11:5: ( INT ID -> ^( MULT[@\"*\"] INT ID ) | INT exp -> ^( MULT[@\"*\"] INT exp ) | exp | INT | ID ) //ruleblock
+        NSInteger alt2=5;
+        NSInteger LA2_0 = [input LA:1];
+
+        if ( (LA2_0==INT) ) {
+            NSInteger LA2_1 = [input LA:2];
+
+            if ( (LA2_1==ID) ) {
+                NSInteger LA2_3 = [input LA:3];
+
+                if ( (LA2_3==9) ) {
+                    alt2=2;
+                }
+                else if ( (LA2_3==EOF||LA2_3==8) ) {
+                    alt2=1;
+                }
+                else {
+                    NoViableAltException *nvae = [NoViableAltException newException:2 state:3 stream:input];
+                    nvae.c = LA2_3;
+                    @throw nvae;
+
+                }
+            }
+            else if ( (LA2_1==EOF||LA2_1==8) ) {
+                alt2=4;
+            }
+            else {
+                NoViableAltException *nvae = [NoViableAltException newException:2 state:1 stream:input];
+                nvae.c = LA2_1;
+                @throw nvae;
+
+            }
+        }
+        else if ( (LA2_0==ID) ) {
+            NSInteger LA2_2 = [input LA:2];
+
+            if ( (LA2_2==9) ) {
+                alt2=3;
+            }
+            else if ( (LA2_2==EOF||LA2_2==8) ) {
+                alt2=5;
+            }
+            else {
+                NoViableAltException *nvae = [NoViableAltException newException:2 state:2 stream:input];
+                nvae.c = LA2_2;
+                @throw nvae;
+
+            }
+        }
+        else {
+            NoViableAltException *nvae = [NoViableAltException newException:2 state:0 stream:input];
+            nvae.c = LA2_0;
+            @throw nvae;
+
+        }
+        switch (alt2) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:11:7: INT ID // alt
+                {
+
+                INT4=(CommonToken *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_term63];  
+                    [stream_INT addElement:INT4];
+
+                 
+                ID5=(CommonToken *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_term65];  
+                    [stream_ID addElement:ID5];
+
+                 
+                // AST REWRITE
+                // elements: ID, INT
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 11:15: -> ^( MULT[@\"*\"] INT ID )
+                {
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:11:18: ^( MULT[@\"*\"] INT ID )
+                    {
+                        CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                        root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                                [[treeAdaptor createTree:MULT FromToken:@"*" Text:@"MULT"] retain]
+                         old:root_1];
+
+                         // TODO: args: 
+                        [treeAdaptor addChild:
+                                    [stream_INT nextNode]
+                         toTree:root_1];
+
+                         // TODO: args: 
+                        [treeAdaptor addChild:
+                                    [stream_ID nextNode]
+                         toTree:root_1];
+
+                        [treeAdaptor addChild:root_1 toTree:root_0];
+                    }
+
+                }
+
+
+                retval.tree = root_0;
+
+
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:12:7: INT exp // alt
+                {
+
+                INT6=(CommonToken *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_term85];  
+                    [stream_INT addElement:INT6];
+
+                 
+                /* ruleRef */
+                [self pushFollow:FOLLOW_exp_in_term87];
+                exp7 = [self exp];
+
+                [self popFollow];
+
+
+                [stream_exp addElement:[exp7 getTree]];
+                 
+                // AST REWRITE
+                // elements: exp, INT
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 12:15: -> ^( MULT[@\"*\"] INT exp )
+                {
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:12:18: ^( MULT[@\"*\"] INT exp )
+                    {
+                        CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                        root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                                [[treeAdaptor createTree:MULT FromToken:@"*" Text:@"MULT"] retain]
+                         old:root_1];
+
+                         // TODO: args: 
+                        [treeAdaptor addChild:
+                                    [stream_INT nextNode]
+                         toTree:root_1];
+
+                        [treeAdaptor addChild:[stream_exp nextTree] toTree:root_1];
+
+                        [treeAdaptor addChild:root_1 toTree:root_0];
+                    }
+
+                }
+
+
+                retval.tree = root_0;
+
+
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:13:7: exp // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTParser ruleRef */
+                /* ruleRef */
+                [self pushFollow:FOLLOW_exp_in_term106];
+                exp8 = [self exp];
+
+                [self popFollow];
+
+
+                [treeAdaptor addChild:[exp8 getTree] toTree:root_0];
+                 
+                }
+                break;
+            case 4 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:14:7: INT // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTParser tokenRef */
+                INT9=(CommonToken *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_term114]; 
+                INT9_tree = /* ASTParser createNodeFromToken */
+                (CommonTree *)[[treeAdaptor create:INT9] retain]
+                ;
+                [treeAdaptor addChild:INT9_tree  toTree:root_0];
+
+                 
+                }
+                break;
+            case 5 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:15:4: ID // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTParser tokenRef */
+                ID10=(CommonToken *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_term119]; 
+                ID10_tree = /* ASTParser createNodeFromToken */
+                (CommonTree *)[[treeAdaptor create:ID10] retain]
+                ;
+                [treeAdaptor addChild:ID10_tree  toTree:root_0];
+
+                 
+                }
+                break;
+
+        }
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+        [stream_INT release];
+        [stream_ID release];
+        [stream_exp release];
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end term */
+
+/*
+ * $ANTLR start exp
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:18:1: exp : ID '^' ^ INT ;
+ */
+- (PolyParser_exp_return *) exp
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    PolyParser_exp_return * retval = [PolyParser_exp_return newPolyParser_exp_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *ID11 = nil;
+        CommonToken *char_literal12 = nil;
+        CommonToken *INT13 = nil;
+
+        CommonTree *ID11_tree=nil;
+        CommonTree *char_literal12_tree=nil;
+        CommonTree *INT13_tree=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:18:5: ( ID '^' ^ INT ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Poly.g:18:7: ID '^' ^ INT // alt
+        {
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+        /* ASTParser tokenRef */
+        ID11=(CommonToken *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_exp132]; 
+        ID11_tree = /* ASTParser createNodeFromToken */
+        (CommonTree *)[[treeAdaptor create:ID11] retain]
+        ;
+        [treeAdaptor addChild:ID11_tree  toTree:root_0];
+
+         
+        char_literal12=(CommonToken *)[self match:input TokenType:9 Follow:FOLLOW_9_in_exp134]; 
+        char_literal12_tree = /* ASTParser createNodeFromToken */
+        (CommonTree *)[[treeAdaptor create:char_literal12] retain]
+        ;
+        root_0 = (CommonTree *)[treeAdaptor becomeRoot:char_literal12_tree old:root_0];
+
+         
+        /* ASTParser tokenRef */
+        INT13=(CommonToken *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_exp137]; 
+        INT13_tree = /* ASTParser createNodeFromToken */
+        (CommonTree *)[[treeAdaptor create:INT13] retain]
+        ;
+        [treeAdaptor addChild:INT13_tree  toTree:root_0];
+
+         
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end exp */
+/* ObjC end rules */
+
+@end /* end of PolyParser implementation line 692 */
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.g b/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.g
new file mode 100644
index 0000000..cdb3168
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.g
@@ -0,0 +1,14 @@
+tree grammar PolyPrinter;
+options {
+	tokenVocab=Poly;
+    language=ObjC;
+	ASTLabelType=CommonTree;
+	output=template;
+}
+
+poly:	^('+'  a=poly b=poly)	-> template(a={$a.st} b:b={$b.st}) "<a>+<b>"
+	|	^(MULT a=poly b=poly)	-> template(a={$a.st} b:b={$b.st}) "<a><b>"
+	|	^('^'  a=poly b=poly)	-> template(a={$a.st} b:b={$b.st}) "<a>^<b>"
+	|	INT						-> {%{$INT.text}}
+	|	ID						-> {%{$ID.text}}
+	;
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.h b/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.h
new file mode 100644
index 0000000..3e59f89
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.h
@@ -0,0 +1,74 @@
+// $ANTLR 3.3.1-SNAPSHOT Jan 30, 2011 08:28:24 PolyPrinter.g 2011-01-30 08:45:32
+
+/* =============================================================================
+ * Standard antlr3 OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+#import <ST4/ST.h>
+
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* treeParserHeaderFile */
+
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__8 8
+#define T__9 9
+#define MULT 4
+#define INT 5
+#define ID 6
+#define WS 7
+#pragma mark Dynamic Global Scopes
+#pragma mark Dynamic Rule Scopes
+#pragma mark Rule Return Scopes start
+/* returnScopeInterface */
+@interface PolyPrinter_poly_return :TreeRuleReturnScope { /* returnScopeInterface line 1838 */
+ST *st; /* start of memVars() */
+}
+ /* start properties */
++ (PolyPrinter_poly_return *)newPolyPrinter_poly_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsdecl */
+- (id) getTemplate;  /* methodsDecl */
+@end /* end of returnScopeInterface interface */
+
+#pragma mark Rule return scopes end
+@interface PolyPrinter : TreeParser { /* line 572 */
+// start of globalAttributeScopeMemVar
+
+
+// start of action-actionScope-memVars
+// start of ruleAttributeScopeMemVar
+
+
+// Start of memVars
+
+ }
+
+// start of action-actionScope-methodsDecl
++ (id) newPolyPrinter:(id<TreeNodeStream>)aStream;
+
+
+
+- (PolyPrinter_poly_return *)poly; 
+
+
+@end /* end of PolyPrinter interface */
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.m b/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.m
new file mode 100644
index 0000000..f0af6ef
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.m
@@ -0,0 +1,390 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.3.1-SNAPSHOT Jan 30, 2011 08:28:24
+ *
+ *     -  From the grammar source file : PolyPrinter.g
+ *     -                            On : 2011-01-30 08:45:32
+ *     -           for the tree parser : PolyPrinterTreeParser *
+ * Editing it, at least manually, is not wise. 
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// [The "BSD licence"]
+// Copyright (c) 2010 Alan Condit
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// $ANTLR 3.3.1-SNAPSHOT Jan 30, 2011 08:28:24 PolyPrinter.g 2011-01-30 08:45:32
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import <ST4/ST.h>
+#import <ST4/STGroup.h>
+#import "PolyPrinter.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+
+/* =============================================================================
+ * Start of recognizer
+ */
+
+
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_8_in_poly43;
+static const unsigned long long FOLLOW_8_in_poly43_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly48;
+static const unsigned long long FOLLOW_poly_in_poly48_data[] = { 0x0000000000000370LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly52;
+static const unsigned long long FOLLOW_poly_in_poly52_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_MULT_in_poly74;
+static const unsigned long long FOLLOW_MULT_in_poly74_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly78;
+static const unsigned long long FOLLOW_poly_in_poly78_data[] = { 0x0000000000000370LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly82;
+static const unsigned long long FOLLOW_poly_in_poly82_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_9_in_poly104;
+static const unsigned long long FOLLOW_9_in_poly104_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly109;
+static const unsigned long long FOLLOW_poly_in_poly109_data[] = { 0x0000000000000370LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly113;
+static const unsigned long long FOLLOW_poly_in_poly113_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly134;
+static const unsigned long long FOLLOW_INT_in_poly134_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_ID_in_poly148;
+static const unsigned long long FOLLOW_ID_in_poly148_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global Scopes
+
+#pragma mark Dynamic Rule Scopes
+
+#pragma mark Rule return scopes start
+@implementation PolyPrinter_poly_return /* returnScope */
+ /* start of synthesize -- OBJC-Line 1837 */
++ (PolyPrinter_poly_return *)newPolyPrinter_poly_return
+{
+    return [[[PolyPrinter_poly_return alloc] init] retain];
+}
+
+- (id) getTemplate { return st; }
+
+//public StringTemplate st;
+//public Object getTemplate() { return st; }
+//public String toString() { return st==null?null:st.toString(); }
+
+@end /* end of returnScope implementation */
+
+
+
+@implementation PolyPrinter  // line 637
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_8_in_poly43 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_poly43_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly48 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly48_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly52 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly52_data Count:(NSUInteger)1] retain];
+    FOLLOW_MULT_in_poly74 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_MULT_in_poly74_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly78 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly78_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly82 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly82_data Count:(NSUInteger)1] retain];
+    FOLLOW_9_in_poly104 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_poly104_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly109 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly109_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly113 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly113_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly134 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly134_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_poly148 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_poly148_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[NSArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"MULT", @"INT", @"ID", @"WS", @"'+'", @"'^'", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"PolyPrinter.g"];
+}
+
++ (PolyPrinter *)newPolyPrinter:(id<TreeNodeStream>)aStream
+{
+
+    return [[PolyPrinter alloc] initWithStream:aStream];
+
+}
+
+
+- (id) initWithStream:(id<TreeNodeStream>)aStream
+{
+    if ((self = [super initWithStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:1+1] retain]]) != nil) {
+
+
+        /* start of actions-actionScope-init */
+        /* start of init */
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* members */
+ 
+
+/* start actions.actionScope.methods */
+/* start methods() */
+/*protected StringTemplateGroup templateLib = new StringTemplateGroup("PolyPrinterTemplates", AngleBracketTemplateLexer.class); */
+STGroup *templateLib = [STGroup newSTGroup];
+
+//public void setTemplateLib(StringTemplateGroup templateLib) {
+//  this.templateLib = templateLib;
+//}
+//public StringTemplateGroup getTemplateLib() {
+//  return templateLib;
+//}
+- (void) setTemplateLib:(STGroup *)aTemplateLib { templateLib = aTemplateLib; } 
+- (STGroup *)getTemplateLib { return templateLib; } 
+/** allows convenient multi-value initialization:
+ *  "new STAttrMap().put(...).put(...)"
+ */
+/*
+public static class STAttrMap extends HashMap {
+  public STAttrMap put(String attrName, Object value) {
+    super.put(attrName, value);
+    return this;
+  }
+  public STAttrMap put(String attrName, int value) {
+    super.put(attrName, new Integer(value));
+    return this;
+  }
+}
+ */
+// start rules
+/*
+ * $ANTLR start poly
+ * PolyPrinter.g:9:1: poly : ( ^( '+' a= poly b= poly ) -> template(a=$a.stb=$b.st) \"<a>+<b>\" | ^( MULT a= poly b= poly ) -> template(a=$a.stb=$b.st) \"<a><b>\" | ^( '^' a= poly b= poly ) -> template(a=$a.stb=$b.st) \"<a>^<b>\" | INT -> {%{$INT.text}} | ID -> {%{$ID.text}});
+ */
+- (PolyPrinter_poly_return *) poly
+{
+    /* ruleScopeSetUp */
+
+    PolyPrinter_poly_return * retval = [PolyPrinter_poly_return newPolyPrinter_poly_return];
+    [retval setStart:[input LT:1]];
+
+    @try {
+        CommonTree *INT1 = nil;
+        CommonTree *ID2 = nil;
+        PolyPrinter_poly_return * a = nil;
+
+        PolyPrinter_poly_return * b = nil;
+
+
+        // PolyPrinter.g:9:5: ( ^( '+' a= poly b= poly ) -> template(a=$a.stb=$b.st) \"<a>+<b>\" | ^( MULT a= poly b= poly ) -> template(a=$a.stb=$b.st) \"<a><b>\" | ^( '^' a= poly b= poly ) -> template(a=$a.stb=$b.st) \"<a>^<b>\" | INT -> {%{$INT.text}} | ID -> {%{$ID.text}}) //ruleblock
+        NSInteger alt1=5;
+        switch ([input LA:1]) {
+            case 8: ;
+                {
+                alt1=1;
+                }
+                break;
+            case MULT: ;
+                {
+                alt1=2;
+                }
+                break;
+            case 9: ;
+                {
+                alt1=3;
+                }
+                break;
+            case INT: ;
+                {
+                alt1=4;
+                }
+                break;
+            case ID: ;
+                {
+                alt1=5;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:1 state:0 stream:input];
+            @throw nvae;
+        }
+
+        switch (alt1) {
+            case 1 : ;
+                // PolyPrinter.g:9:7: ^( '+' a= poly b= poly ) // alt
+                {
+                [self match:input TokenType:8 Follow:FOLLOW_8_in_poly43]; 
+
+                    [self match:input TokenType:DOWN Follow:nil]; 
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_poly_in_poly48];
+                    a = [self poly];
+
+                    [self popFollow];
+
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_poly_in_poly52];
+                    b = [self poly];
+
+                    [self popFollow];
+
+
+
+                    [self match:input TokenType:UP Follow:nil]; 
+
+
+                // TEMPLATE REWRITE
+                // 9:29: -> template(a=$a.stb=$b.st) \"<a>+<b>\"
+                {
+                    retval.st = new StringTemplate(templateLib, "<a>+<b>",
+                  new STAttrMap().put("a", (a!=nil?[a st]:nil)).put("b", (b!=nil?[b st]:nil)));
+                }
+
+
+                }
+                break;
+            case 2 : ;
+                // PolyPrinter.g:10:4: ^( MULT a= poly b= poly ) // alt
+                {
+                [self match:input TokenType:MULT Follow:FOLLOW_MULT_in_poly74]; 
+
+                    [self match:input TokenType:DOWN Follow:nil]; 
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_poly_in_poly78];
+                    a = [self poly];
+
+                    [self popFollow];
+
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_poly_in_poly82];
+                    b = [self poly];
+
+                    [self popFollow];
+
+
+
+                    [self match:input TokenType:UP Follow:nil]; 
+
+
+                // TEMPLATE REWRITE
+                // 10:26: -> template(a=$a.stb=$b.st) \"<a><b>\"
+                {
+                    retval.st = new StringTemplate(templateLib, "<a><b>",
+                  new STAttrMap().put("a", (a!=nil?[a st]:nil)).put("b", (b!=nil?[b st]:nil)));
+                }
+
+
+                }
+                break;
+            case 3 : ;
+                // PolyPrinter.g:11:4: ^( '^' a= poly b= poly ) // alt
+                {
+                [self match:input TokenType:9 Follow:FOLLOW_9_in_poly104]; 
+
+                    [self match:input TokenType:DOWN Follow:nil]; 
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_poly_in_poly109];
+                    a = [self poly];
+
+                    [self popFollow];
+
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_poly_in_poly113];
+                    b = [self poly];
+
+                    [self popFollow];
+
+
+
+                    [self match:input TokenType:UP Follow:nil]; 
+
+
+                // TEMPLATE REWRITE
+                // 11:26: -> template(a=$a.stb=$b.st) \"<a>^<b>\"
+                {
+                    retval.st = [ST newST:@"<a>^<b>"];
+                    [retval.st add:@"a" value:@"b"];
+                }
+
+
+                }
+                break;
+            case 4 : ;
+                // PolyPrinter.g:12:4: INT // alt
+                {
+                INT1=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly134]; 
+
+
+                // TEMPLATE REWRITE
+                // 12:13: -> {%{$INT.text}}
+                {
+                    retval.st = [ST newST:(INT1!=nil?INT1.text:nil)];
+                }
+
+
+                }
+                break;
+            case 5 : ;
+                // PolyPrinter.g:13:4: ID // alt
+                {
+                ID2=(CommonTree *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_poly148]; 
+
+
+                // TEMPLATE REWRITE
+                // 13:12: -> {%{$ID.text}}
+                {
+                    retval.st = [ST newST:ID2!=nil?[ID2.text]:nil];
+                }
+
+
+                }
+                break;
+
+        }
+        // token+rule list labels
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+    @finally {
+    }
+    return retval;
+}
+/* $ANTLR end poly */
+
+@end /* end of PolyPrinter implementation line 692 */
+
+
+/* End of code
+ * =============================================================================
+ */
diff --git a/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.tokens b/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.tokens
new file mode 100644
index 0000000..6b157bd
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/PolyPrinter.tokens
@@ -0,0 +1,8 @@
+T__8=8
+T__9=9
+MULT=4
+INT=5
+ID=6
+WS=7
+'+'=8
+'^'=9
diff --git a/runtime/ObjC/Framework/examples/polydiff/Simplifier.g b/runtime/ObjC/Framework/examples/polydiff/Simplifier.g
new file mode 100644
index 0000000..e927e90
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/Simplifier.g
@@ -0,0 +1,37 @@
+tree grammar Simplifier;
+options {
+	tokenVocab=Poly;
+    language=ObjC;
+	ASTLabelType=CommonTree;
+	output=AST;
+	backtrack=true;
+//	rewrite=true; // works either in rewrite or normal mode
+}
+
+/** Match some common patterns that we can reduce via identity
+ *  definitions.  Since this is only run once, it will not be
+ *  perfect.  We'd need to run the tree into this until nothing
+ *  changed to make it correct.
+ */
+poly:	^('+' a=INT b=INT)	-> INT[[NSString stringWithFormat:@"\%d", ($a.int+$b.int)\]]
+
+	|	^('+' ^('+' a=INT p=poly) b=INT)
+							-> ^('+' $p INT[[NSString stringWithFormat:@"\%d", ($a.int+$b.int)\]])
+	
+	|	^('+' ^('+' p=poly a=INT) b=INT)
+							-> ^('+' $p INT[[NSString stringWithFormat:@"\%d", ($a.int+$b.int)\]])
+	
+	|	^('+' p=poly q=poly)-> { [[$p.tree toStringTree] isEqualToString:@"0"] }? $q
+							-> { [[$q.tree toStringTree] isEqualToString:@"0"] }? $p
+							-> ^('+' $p $q)
+
+	|	^(MULT INT poly)	-> {$INT.int==1}? poly
+							-> ^(MULT INT poly)
+
+	|	^('^' ID e=INT)		-> {$e.int==1}? ID
+							-> {$e.int==0}? INT[@"1"]
+							-> ^('^' ID INT)
+
+	|	INT
+	|	ID
+	;
diff --git a/runtime/ObjC/Framework/examples/polydiff/Simplifier.h b/runtime/ObjC/Framework/examples/polydiff/Simplifier.h
new file mode 100644
index 0000000..b3fc9df
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/Simplifier.h
@@ -0,0 +1,203 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g 2012-02-16 18:11:30
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* treeParserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__8 8
+#define T__9 9
+#define ID 4
+#define INT 5
+#define MULT 6
+#define WS 7
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+/* returnScopeInterface Simplifier_poly_return */
+@interface Simplifier_poly_return : TreeRuleReturnScope { /* returnScopeInterface line 1838 */
+/* ASTTreeParser returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (Simplifier_poly_return *)newSimplifier_poly_return;
+/* this is start of set and get methods */
+/* ASTTreeParser returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface Simplifier_synpred1_Simplifier_return */
+@interface Simplifier_synpred1_Simplifier_return : TreeRuleReturnScope { /* returnScopeInterface line 1838 */
+/* ASTTreeParser returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (Simplifier_synpred1_Simplifier_return *)newSimplifier_synpred1_Simplifier_return;
+/* this is start of set and get methods */
+/* ASTTreeParser returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface Simplifier_synpred2_Simplifier_return */
+@interface Simplifier_synpred2_Simplifier_return : TreeRuleReturnScope { /* returnScopeInterface line 1838 */
+/* ASTTreeParser returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (Simplifier_synpred2_Simplifier_return *)newSimplifier_synpred2_Simplifier_return;
+/* this is start of set and get methods */
+/* ASTTreeParser returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface Simplifier_synpred3_Simplifier_return */
+@interface Simplifier_synpred3_Simplifier_return : TreeRuleReturnScope { /* returnScopeInterface line 1838 */
+/* ASTTreeParser returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (Simplifier_synpred3_Simplifier_return *)newSimplifier_synpred3_Simplifier_return;
+/* this is start of set and get methods */
+/* ASTTreeParser returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface Simplifier_synpred4_Simplifier_return */
+@interface Simplifier_synpred4_Simplifier_return : TreeRuleReturnScope { /* returnScopeInterface line 1838 */
+/* ASTTreeParser returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (Simplifier_synpred4_Simplifier_return *)newSimplifier_synpred4_Simplifier_return;
+/* this is start of set and get methods */
+/* ASTTreeParser returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+
+/* Interface grammar class */
+@interface Simplifier  : TreeParser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+
+
+/* ObjC start of actions.(actionScope).memVars */
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* AST parserHeaderFile.memVars */
+NSInteger ruleLevel;
+NSArray *ruleNames;
+  /* AST super.memVars */
+/* AST parserMemVars */
+id<TreeAdaptor> treeAdaptor;   /* AST parserMemVars */
+/* ObjC end of memVars */
+
+SEL synpred2_SimplifierSelector;
+SEL synpred1_SimplifierSelector;
+SEL synpred4_SimplifierSelector;
+SEL synpred3_SimplifierSelector;
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* AST parserHeaderFile.properties */
+  /* AST super.properties */
+/* AST parserProperties */
+@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id<TreeAdaptor> treeAdaptor;   /* AST parserproperties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newSimplifier:(id<TreeNodeStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* AST parserHeaderFile.methodsDecl */
+  /* AST super.methodsDecl */
+/* AST parserMethodsDecl */
+- (id<TreeAdaptor>) getTreeAdaptor;
+- (void) setTreeAdaptor:(id<TreeAdaptor>)theTreeAdaptor;   /* AST parsermethodsDecl */
+/* ObjC end of methodsDecl */
+
+- (Simplifier_poly_return *)poly; 
+- (void)synpred1_Simplifier_fragment; 
+- (void)synpred2_Simplifier_fragment; 
+- (void)synpred3_Simplifier_fragment; 
+- (void)synpred4_Simplifier_fragment; 
+
+
+@end /* end of Simplifier interface */
+
diff --git a/runtime/ObjC/Framework/examples/polydiff/Simplifier.m b/runtime/ObjC/Framework/examples/polydiff/Simplifier.m
new file mode 100644
index 0000000..9474c94
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/Simplifier.m
@@ -0,0 +1,1397 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g
+ *     -                            On : 2012-02-16 18:11:30
+ *     -           for the tree parser : SimplifierTreeParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g 2012-02-16 18:11:30
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "Simplifier.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_8_in_poly52;
+static const unsigned long long FOLLOW_8_in_poly52_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly56;
+static const unsigned long long FOLLOW_INT_in_poly56_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly60;
+static const unsigned long long FOLLOW_INT_in_poly60_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_8_in_poly73;
+static const unsigned long long FOLLOW_8_in_poly73_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_8_in_poly76;
+static const unsigned long long FOLLOW_8_in_poly76_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly80;
+static const unsigned long long FOLLOW_INT_in_poly80_data[] = { 0x0000000000000370LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly84;
+static const unsigned long long FOLLOW_poly_in_poly84_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly89;
+static const unsigned long long FOLLOW_INT_in_poly89_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_8_in_poly117;
+static const unsigned long long FOLLOW_8_in_poly117_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_8_in_poly120;
+static const unsigned long long FOLLOW_8_in_poly120_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly124;
+static const unsigned long long FOLLOW_poly_in_poly124_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly128;
+static const unsigned long long FOLLOW_INT_in_poly128_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly133;
+static const unsigned long long FOLLOW_INT_in_poly133_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_8_in_poly161;
+static const unsigned long long FOLLOW_8_in_poly161_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly165;
+static const unsigned long long FOLLOW_poly_in_poly165_data[] = { 0x0000000000000370LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly169;
+static const unsigned long long FOLLOW_poly_in_poly169_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_MULT_in_poly216;
+static const unsigned long long FOLLOW_MULT_in_poly216_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly218;
+static const unsigned long long FOLLOW_INT_in_poly218_data[] = { 0x0000000000000370LL};
+static ANTLRBitSet *FOLLOW_poly_in_poly220;
+static const unsigned long long FOLLOW_poly_in_poly220_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_9_in_poly251;
+static const unsigned long long FOLLOW_9_in_poly251_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_ID_in_poly253;
+static const unsigned long long FOLLOW_ID_in_poly253_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly257;
+static const unsigned long long FOLLOW_INT_in_poly257_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_INT_in_poly302;
+static const unsigned long long FOLLOW_INT_in_poly302_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_ID_in_poly307;
+static const unsigned long long FOLLOW_ID_in_poly307_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_8_in_synpred1_Simplifier52;
+static const unsigned long long FOLLOW_8_in_synpred1_Simplifier52_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_INT_in_synpred1_Simplifier56;
+static const unsigned long long FOLLOW_INT_in_synpred1_Simplifier56_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_INT_in_synpred1_Simplifier60;
+static const unsigned long long FOLLOW_INT_in_synpred1_Simplifier60_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_8_in_synpred2_Simplifier73;
+static const unsigned long long FOLLOW_8_in_synpred2_Simplifier73_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_8_in_synpred2_Simplifier76;
+static const unsigned long long FOLLOW_8_in_synpred2_Simplifier76_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_INT_in_synpred2_Simplifier80;
+static const unsigned long long FOLLOW_INT_in_synpred2_Simplifier80_data[] = { 0x0000000000000370LL};
+static ANTLRBitSet *FOLLOW_poly_in_synpred2_Simplifier84;
+static const unsigned long long FOLLOW_poly_in_synpred2_Simplifier84_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_INT_in_synpred2_Simplifier89;
+static const unsigned long long FOLLOW_INT_in_synpred2_Simplifier89_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_8_in_synpred3_Simplifier117;
+static const unsigned long long FOLLOW_8_in_synpred3_Simplifier117_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_8_in_synpred3_Simplifier120;
+static const unsigned long long FOLLOW_8_in_synpred3_Simplifier120_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_poly_in_synpred3_Simplifier124;
+static const unsigned long long FOLLOW_poly_in_synpred3_Simplifier124_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_INT_in_synpred3_Simplifier128;
+static const unsigned long long FOLLOW_INT_in_synpred3_Simplifier128_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_INT_in_synpred3_Simplifier133;
+static const unsigned long long FOLLOW_INT_in_synpred3_Simplifier133_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_8_in_synpred4_Simplifier161;
+static const unsigned long long FOLLOW_8_in_synpred4_Simplifier161_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_poly_in_synpred4_Simplifier165;
+static const unsigned long long FOLLOW_poly_in_synpred4_Simplifier165_data[] = { 0x0000000000000370LL};
+static ANTLRBitSet *FOLLOW_poly_in_synpred4_Simplifier169;
+static const unsigned long long FOLLOW_poly_in_synpred4_Simplifier169_data[] = { 0x0000000000000008LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+@implementation Simplifier_poly_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (Simplifier_poly_return *)newSimplifier_poly_return
+{
+return [[[Simplifier_poly_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation Simplifier_synpred1_Simplifier_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (Simplifier_synpred1_Simplifier_return *)newSimplifier_synpred1_Simplifier_return
+{
+return [[[Simplifier_synpred1_Simplifier_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation Simplifier_synpred2_Simplifier_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (Simplifier_synpred2_Simplifier_return *)newSimplifier_synpred2_Simplifier_return
+{
+return [[[Simplifier_synpred2_Simplifier_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation Simplifier_synpred3_Simplifier_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (Simplifier_synpred3_Simplifier_return *)newSimplifier_synpred3_Simplifier_return
+{
+return [[[Simplifier_synpred3_Simplifier_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation Simplifier_synpred4_Simplifier_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (Simplifier_synpred4_Simplifier_return *)newSimplifier_synpred4_Simplifier_return
+{
+return [[[Simplifier_synpred4_Simplifier_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+
+
+@implementation Simplifier  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+/* AST genericParser.synthesize */
+/* AST parserProperties */
+@synthesize treeAdaptor;
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_8_in_poly52 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_poly52_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly56 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly56_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly60 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly60_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_poly73 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_poly73_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_poly76 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_poly76_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly80 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly80_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly84 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly84_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly89 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly89_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_poly117 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_poly117_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_poly120 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_poly120_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly124 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly124_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly128 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly128_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly133 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly133_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_poly161 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_poly161_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly165 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly165_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly169 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly169_data Count:(NSUInteger)1] retain];
+    FOLLOW_MULT_in_poly216 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_MULT_in_poly216_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly218 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly218_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_poly220 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_poly220_data Count:(NSUInteger)1] retain];
+    FOLLOW_9_in_poly251 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_poly251_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_poly253 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_poly253_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly257 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly257_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_poly302 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_poly302_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_poly307 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_poly307_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_synpred1_Simplifier52 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_synpred1_Simplifier52_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_synpred1_Simplifier56 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_synpred1_Simplifier56_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_synpred1_Simplifier60 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_synpred1_Simplifier60_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_synpred2_Simplifier73 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_synpred2_Simplifier73_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_synpred2_Simplifier76 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_synpred2_Simplifier76_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_synpred2_Simplifier80 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_synpred2_Simplifier80_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_synpred2_Simplifier84 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_synpred2_Simplifier84_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_synpred2_Simplifier89 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_synpred2_Simplifier89_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_synpred3_Simplifier117 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_synpred3_Simplifier117_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_synpred3_Simplifier120 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_synpred3_Simplifier120_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_synpred3_Simplifier124 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_synpred3_Simplifier124_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_synpred3_Simplifier128 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_synpred3_Simplifier128_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_synpred3_Simplifier133 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_synpred3_Simplifier133_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_synpred4_Simplifier161 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_synpred4_Simplifier161_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_synpred4_Simplifier165 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_synpred4_Simplifier165_data Count:(NSUInteger)1] retain];
+    FOLLOW_poly_in_synpred4_Simplifier169 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_poly_in_synpred4_Simplifier169_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"ID", @"INT", @"MULT", @"WS", @"'+'", @"'^'", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g"];
+    SEL synpred2_SimplifierSelector = @selector(synpred2_Simplifier_fragment);
+    SEL synpred1_SimplifierSelector = @selector(synpred1_Simplifier_fragment);
+    SEL synpred4_SimplifierSelector = @selector(synpred4_Simplifier_fragment);
+    SEL synpred3_SimplifierSelector = @selector(synpred3_Simplifier_fragment);
+
+}
+
++ (Simplifier *)newSimplifier:(id<TreeNodeStream>)aStream
+{
+    return [[Simplifier alloc] initWithStream:aStream];
+}
+
+- (id) initWithStream:(id<TreeNodeStream>)aStream
+{
+    self = [super initWithStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:8+1] retain]];
+    if ( self != nil ) {
+        /* start of actions-actionScope-init */
+        /* start of init */
+        /* AST genericParser.init */
+        [self setTreeAdaptor:[[CommonTreeAdaptor newTreeAdaptor] retain]];
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    /* AST genericParser.dealloc */
+    [self setTreeAdaptor:nil];
+
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* AST genericParser.methods */
+/* AST parserMethods */
+- (id<TreeAdaptor>) getTreeAdaptor
+{
+	return treeAdaptor;
+}
+
+- (void) setTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor
+{
+	if (aTreeAdaptor != treeAdaptor) {
+		treeAdaptor = aTreeAdaptor;
+	}
+}
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start poly
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:16:1: poly : ( ^( '+' a= INT b= INT ) -> INT[[NSString stringWithFormat:@\"\\%d\", ($a.int+$b.int)]] | ^( '+' ^( '+' a= INT p= poly ) b= INT ) -> ^( '+' $p INT[[NSString stringWithFormat:@\"\\%d\", ($a.int+$b.int)]] ) | ^( '+' ^( '+' p= poly a= INT ) b= INT ) -> ^( '+' $p INT[[NSString stringWithFormat:@\"\\%d\", ($a.int+$b.int)]] ) | ^( '+' p= poly q= poly ) -> { [[$p.tree toStringTree] isEqualToString:@\"0\"] }? $q -> { [[$q.tree toStringTree] isEqualToString:@\"0\"] }? $p -> ^( '+' $p $q) | ^( MULT INT poly ) -> {$INT.int==1}? poly -> ^( MULT INT poly ) | ^( '^' ID e= INT ) -> {$e.int==1}? ID -> {$e.int==0}? INT[@\"1\"] -> ^( '^' ID INT ) | INT | ID );
+ */
+- (Simplifier_poly_return *) poly
+{
+    /* ruleScopeSetUp */
+
+    /* ASTTreeParser ruleDeclarations */
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    Simplifier_poly_return * retval = [Simplifier_poly_return newSimplifier_poly_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    CommonTree *_first_0 = nil;
+    CommonTree *_last = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonTree *a = nil;
+        CommonTree *b = nil;
+        CommonTree *e = nil;
+        CommonTree *char_literal1 = nil;
+        CommonTree *char_literal2 = nil;
+        CommonTree *char_literal3 = nil;
+        CommonTree *char_literal4 = nil;
+        CommonTree *char_literal5 = nil;
+        CommonTree *char_literal6 = nil;
+        CommonTree *MULT7 = nil;
+        CommonTree *INT8 = nil;
+        CommonTree *char_literal10 = nil;
+        CommonTree *ID11 = nil;
+        CommonTree *INT12 = nil;
+        CommonTree *ID13 = nil;Simplifier_poly_return * p = nil ;
+
+        Simplifier_poly_return * q = nil ;
+
+        Simplifier_poly_return * poly9 = nil ;
+
+
+        CommonTree *a_tree=nil;
+        CommonTree *b_tree=nil;
+        CommonTree *e_tree=nil;
+        CommonTree *char_literal1_tree=nil;
+        CommonTree *char_literal2_tree=nil;
+        CommonTree *char_literal3_tree=nil;
+        CommonTree *char_literal4_tree=nil;
+        CommonTree *char_literal5_tree=nil;
+        CommonTree *char_literal6_tree=nil;
+        CommonTree *MULT7_tree=nil;
+        CommonTree *INT8_tree=nil;
+        CommonTree *char_literal10_tree=nil;
+        CommonTree *ID11_tree=nil;
+        CommonTree *INT12_tree=nil;
+        CommonTree *ID13_tree=nil;
+        RewriteRuleTokenStream *stream_INT =
+            [[RewriteRuleNodeStream newRewriteRuleNodeStream:treeAdaptor
+                                                             description:@"token INT"] retain];
+        RewriteRuleTokenStream *stream_MULT =
+            [[RewriteRuleNodeStream newRewriteRuleNodeStream:treeAdaptor
+                                                             description:@"token MULT"] retain];
+        RewriteRuleTokenStream *stream_ID =
+            [[RewriteRuleNodeStream newRewriteRuleNodeStream:treeAdaptor
+                                                             description:@"token ID"] retain];
+        RewriteRuleTokenStream *stream_9 =
+            [[RewriteRuleNodeStream newRewriteRuleNodeStream:treeAdaptor
+                                                             description:@"token 9"] retain];
+        RewriteRuleTokenStream *stream_8 =
+            [[RewriteRuleNodeStream newRewriteRuleNodeStream:treeAdaptor
+                                                             description:@"token 8"] retain];
+        RewriteRuleSubtreeStream *stream_poly =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule poly"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:16:5: ( ^( '+' a= INT b= INT ) -> INT[[NSString stringWithFormat:@\"\\%d\", ($a.int+$b.int)]] | ^( '+' ^( '+' a= INT p= poly ) b= INT ) -> ^( '+' $p INT[[NSString stringWithFormat:@\"\\%d\", ($a.int+$b.int)]] ) | ^( '+' ^( '+' p= poly a= INT ) b= INT ) -> ^( '+' $p INT[[NSString stringWithFormat:@\"\\%d\", ($a.int+$b.int)]] ) | ^( '+' p= poly q= poly ) -> { [[$p.tree toStringTree] isEqualToString:@\"0\"] }? $q -> { [[$q.tree toStringTree] isEqualToString:@\"0\"] }? $p -> ^( '+' $p $q) | ^( MULT INT poly ) -> {$INT.int==1}? poly -> ^( MULT INT poly ) | ^( '^' ID e= INT ) -> {$e.int==1}? ID -> {$e.int==0}? INT[@\"1\"] -> ^( '^' ID INT ) | INT | ID ) //ruleblock
+        NSInteger alt1=8;
+        unichar charLA1 = [input LA:1];
+        switch (charLA1) {
+            case 8: ;
+                {
+                NSInteger LA1_1 = [input LA:2];
+
+                if ( ([self evaluateSyntacticPredicate:@selector(synpred1_Simplifier_fragment)]) ) {
+                    alt1=1;
+                }
+                else if ( ([self evaluateSyntacticPredicate:@selector(synpred2_Simplifier_fragment)]) ) {
+                    alt1=2;
+                }
+                else if ( ([self evaluateSyntacticPredicate:@selector(synpred3_Simplifier_fragment)]) ) {
+                    alt1=3;
+                }
+                else if ( ([self evaluateSyntacticPredicate:@selector(synpred4_Simplifier_fragment)]) ) {
+                    alt1=4;
+                }
+                else {
+                    if ( state.backtracking > 0 ) { state.failed = YES; return retval; }
+
+                    NoViableAltException *nvae = [NoViableAltException newException:1 state:1 stream:input];
+                    nvae.c = LA1_1;
+                    @throw nvae;
+
+                }
+                }
+                break;
+            case MULT: ;
+                {
+                alt1=5;
+                }
+                break;
+            case 9: ;
+                {
+                alt1=6;
+                }
+                break;
+            case INT: ;
+                {
+                alt1=7;
+                }
+                break;
+            case ID: ;
+                {
+                alt1=8;
+                }
+                break;
+
+        default: ;
+            if ( state.backtracking > 0 ) { state.failed = YES; return retval; }
+
+            NoViableAltException *nvae = [NoViableAltException newException:1 state:0 stream:input];
+            nvae.c = charLA1;
+            @throw nvae;
+
+        }
+
+        switch (alt1) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:16:7: ^( '+' a= INT b= INT ) // alt
+                {
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_1 = _last;
+                CommonTree *_first_1 = nil;
+                CommonTree *root_1 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                char_literal1=(CommonTree *)[self match:input TokenType:8 Follow:FOLLOW_8_in_poly52]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_8 addElement:char_literal1];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; if ( state.failed ) return retval;
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                a=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly56]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_INT addElement:a];
+
+                 
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                b=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly60]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_INT addElement:b];
+
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; if ( state.failed ) return retval;
+                [treeAdaptor addChild:root_1 toTree:root_0];
+                _last = _save_last_1;
+                }
+
+                 
+                // AST REWRITE
+                // elements: INT
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                if ( state.backtracking == 0 ) {
+
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 16:26: -> INT[[NSString stringWithFormat:@\"\\%d\", ($a.int+$b.int)]]
+                {
+                    [treeAdaptor addChild:
+                            [[treeAdaptor createTree:INT FromToken:[NSString stringWithFormat:@"%d", ((a!=nil?[a.text integerValue]:0)+(b!=nil?[b.text integerValue]:0))] Text:@"INT"] retain]
+                     toTree:root_0];
+
+                }
+
+
+                retval.tree = root_0;
+
+                }
+
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:18:4: ^( '+' ^( '+' a= INT p= poly ) b= INT ) // alt
+                {
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_1 = _last;
+                CommonTree *_first_1 = nil;
+                CommonTree *root_1 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                char_literal2=(CommonTree *)[self match:input TokenType:8 Follow:FOLLOW_8_in_poly73]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_8 addElement:char_literal2];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; if ( state.failed ) return retval;
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_2 = _last;
+                CommonTree *_first_2 = nil;
+                CommonTree *root_2 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                char_literal3=(CommonTree *)[self match:input TokenType:8 Follow:FOLLOW_8_in_poly76]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_8 addElement:char_literal3];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; if ( state.failed ) return retval;
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                a=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly80]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_INT addElement:a];
+
+                 
+                /* ASTTreeParser ruleRefTrack */
+                _last = (CommonTree *)[input LT:1];
+                /* ruleRef */
+                [self pushFollow:FOLLOW_poly_in_poly84];
+                p = [self poly];
+
+                [self popFollow];
+                if ( state.failed ) return retval;
+
+                if ( state.backtracking == 0 ) 
+                [stream_poly addElement:[p getTree]];
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; if ( state.failed ) return retval;
+                [treeAdaptor addChild:root_2 toTree:root_1];
+                _last = _save_last_2;
+                }
+
+                 
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                b=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly89]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_INT addElement:b];
+
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; if ( state.failed ) return retval;
+                [treeAdaptor addChild:root_1 toTree:root_0];
+                _last = _save_last_1;
+                }
+
+                 
+                // AST REWRITE
+                // elements: INT, p, 8
+                // token labels: 
+                // rule labels: retval, p
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                if ( state.backtracking == 0 ) {
+
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+                RewriteRuleSubtreeStream *stream_p =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token p" element:p!=nil?[p getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 19:8: -> ^( '+' $p INT[[NSString stringWithFormat:@\"\\%d\", ($a.int+$b.int)]] )
+                {
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:19:11: ^( '+' $p INT[[NSString stringWithFormat:@\"\\%d\", ($a.int+$b.int)]] )
+                    {
+                        CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                        root_1 = (CommonTree *)[treeAdaptor becomeRoot:/* ASTTreeParser createRewriteNodeFromElement */
+                        [stream_8 nextNode]
+                         old:root_1];
+
+                        [treeAdaptor addChild:[stream_p nextTree] toTree:root_1];
+
+                        [treeAdaptor addChild:
+                                [[treeAdaptor createTree:INT FromToken:[NSString stringWithFormat:@"%d", ((a!=nil?[a.text integerValue]:0)+(b!=nil?[b.text integerValue]:0))] Text:@"INT"] retain]
+                         toTree:root_1];
+
+                        [treeAdaptor addChild:root_1 toTree:root_0];
+                    }
+
+                }
+
+
+                retval.tree = root_0;
+
+                }
+
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:21:4: ^( '+' ^( '+' p= poly a= INT ) b= INT ) // alt
+                {
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_1 = _last;
+                CommonTree *_first_1 = nil;
+                CommonTree *root_1 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                char_literal4=(CommonTree *)[self match:input TokenType:8 Follow:FOLLOW_8_in_poly117]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_8 addElement:char_literal4];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; if ( state.failed ) return retval;
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_2 = _last;
+                CommonTree *_first_2 = nil;
+                CommonTree *root_2 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                char_literal5=(CommonTree *)[self match:input TokenType:8 Follow:FOLLOW_8_in_poly120]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_8 addElement:char_literal5];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; if ( state.failed ) return retval;
+
+                /* ASTTreeParser ruleRefTrack */
+                _last = (CommonTree *)[input LT:1];
+                /* ruleRef */
+                [self pushFollow:FOLLOW_poly_in_poly124];
+                p = [self poly];
+
+                [self popFollow];
+                if ( state.failed ) return retval;
+
+                if ( state.backtracking == 0 ) 
+                [stream_poly addElement:[p getTree]];
+                 
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                a=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly128]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_INT addElement:a];
+
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; if ( state.failed ) return retval;
+                [treeAdaptor addChild:root_2 toTree:root_1];
+                _last = _save_last_2;
+                }
+
+                 
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                b=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly133]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_INT addElement:b];
+
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; if ( state.failed ) return retval;
+                [treeAdaptor addChild:root_1 toTree:root_0];
+                _last = _save_last_1;
+                }
+
+                 
+                // AST REWRITE
+                // elements: INT, 8, p
+                // token labels: 
+                // rule labels: retval, p
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                if ( state.backtracking == 0 ) {
+
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+                RewriteRuleSubtreeStream *stream_p =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token p" element:p!=nil?[p getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 22:8: -> ^( '+' $p INT[[NSString stringWithFormat:@\"\\%d\", ($a.int+$b.int)]] )
+                {
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:22:11: ^( '+' $p INT[[NSString stringWithFormat:@\"\\%d\", ($a.int+$b.int)]] )
+                    {
+                        CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                        root_1 = (CommonTree *)[treeAdaptor becomeRoot:/* ASTTreeParser createRewriteNodeFromElement */
+                        [stream_8 nextNode]
+                         old:root_1];
+
+                        [treeAdaptor addChild:[stream_p nextTree] toTree:root_1];
+
+                        [treeAdaptor addChild:
+                                [[treeAdaptor createTree:INT FromToken:[NSString stringWithFormat:@"%d", ((a!=nil?[a.text integerValue]:0)+(b!=nil?[b.text integerValue]:0))] Text:@"INT"] retain]
+                         toTree:root_1];
+
+                        [treeAdaptor addChild:root_1 toTree:root_0];
+                    }
+
+                }
+
+
+                retval.tree = root_0;
+
+                }
+
+                }
+                break;
+            case 4 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:24:4: ^( '+' p= poly q= poly ) // alt
+                {
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_1 = _last;
+                CommonTree *_first_1 = nil;
+                CommonTree *root_1 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                char_literal6=(CommonTree *)[self match:input TokenType:8 Follow:FOLLOW_8_in_poly161]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_8 addElement:char_literal6];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; if ( state.failed ) return retval;
+
+                /* ASTTreeParser ruleRefTrack */
+                _last = (CommonTree *)[input LT:1];
+                /* ruleRef */
+                [self pushFollow:FOLLOW_poly_in_poly165];
+                p = [self poly];
+
+                [self popFollow];
+                if ( state.failed ) return retval;
+
+                if ( state.backtracking == 0 ) 
+                [stream_poly addElement:[p getTree]];
+                 
+                /* ASTTreeParser ruleRefTrack */
+                _last = (CommonTree *)[input LT:1];
+                /* ruleRef */
+                [self pushFollow:FOLLOW_poly_in_poly169];
+                q = [self poly];
+
+                [self popFollow];
+                if ( state.failed ) return retval;
+
+                if ( state.backtracking == 0 ) 
+                [stream_poly addElement:[q getTree]];
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; if ( state.failed ) return retval;
+                [treeAdaptor addChild:root_1 toTree:root_0];
+                _last = _save_last_1;
+                }
+
+                 
+                // AST REWRITE
+                // elements: 8, p, q, p, q
+                // token labels: 
+                // rule labels: retval, q, p
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                if ( state.backtracking == 0 ) {
+
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+                RewriteRuleSubtreeStream *stream_q =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token q" element:q!=nil?[q getTree]:nil] retain];
+                RewriteRuleSubtreeStream *stream_p =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token p" element:p!=nil?[p getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 24:24: -> { [[$p.tree toStringTree] isEqualToString:@\"0\"] }? $q
+                if ( [[(p!=nil?((CommonTree *)p.tree):nil) toStringTree] isEqualToString:@"0"] ) {
+                    [treeAdaptor addChild:[stream_q nextTree] toTree:root_0];
+
+                }
+
+                else // 25:8: -> { [[$q.tree toStringTree] isEqualToString:@\"0\"] }? $p
+                if ( [[(q!=nil?((CommonTree *)q.tree):nil) toStringTree] isEqualToString:@"0"] ) {
+                    [treeAdaptor addChild:[stream_p nextTree] toTree:root_0];
+
+                }
+
+                else // 26:8: -> ^( '+' $p $q)
+                {
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:26:11: ^( '+' $p $q)
+                    {
+                        CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                        root_1 = (CommonTree *)[treeAdaptor becomeRoot:/* ASTTreeParser createRewriteNodeFromElement */
+                        [stream_8 nextNode]
+                         old:root_1];
+
+                        [treeAdaptor addChild:[stream_p nextTree] toTree:root_1];
+
+                        [treeAdaptor addChild:[stream_q nextTree] toTree:root_1];
+
+                        [treeAdaptor addChild:root_1 toTree:root_0];
+                    }
+
+                }
+
+
+                retval.tree = root_0;
+
+                }
+
+                }
+                break;
+            case 5 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:28:4: ^( MULT INT poly ) // alt
+                {
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_1 = _last;
+                CommonTree *_first_1 = nil;
+                CommonTree *root_1 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                MULT7=(CommonTree *)[self match:input TokenType:MULT Follow:FOLLOW_MULT_in_poly216]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_MULT addElement:MULT7];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; if ( state.failed ) return retval;
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                INT8=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly218]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_INT addElement:INT8];
+
+                 
+                /* ASTTreeParser ruleRefTrack */
+                _last = (CommonTree *)[input LT:1];
+                /* ruleRef */
+                [self pushFollow:FOLLOW_poly_in_poly220];
+                poly9 = [self poly];
+
+                [self popFollow];
+                if ( state.failed ) return retval;
+
+                if ( state.backtracking == 0 ) 
+                [stream_poly addElement:[poly9 getTree]];
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; if ( state.failed ) return retval;
+                [treeAdaptor addChild:root_1 toTree:root_0];
+                _last = _save_last_1;
+                }
+
+                 
+                // AST REWRITE
+                // elements: INT, poly, poly, MULT
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                if ( state.backtracking == 0 ) {
+
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 28:21: -> {$INT.int==1}? poly
+                if ((INT8!=nil?[INT8.text integerValue]:0)==1) {
+                    [treeAdaptor addChild:[stream_poly nextTree] toTree:root_0];
+
+                }
+
+                else // 29:8: -> ^( MULT INT poly )
+                {
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:29:11: ^( MULT INT poly )
+                    {
+                        CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                        root_1 = (CommonTree *)[treeAdaptor becomeRoot:/* ASTTreeParser createRewriteNodeFromElement */
+                        [stream_MULT nextNode]
+                         old:root_1];
+
+                         // TODO: args: 
+                        [treeAdaptor addChild:/* ASTTreeParser createRewriteNodeFromElement */
+                        [stream_INT nextNode]
+                         toTree:root_1];
+
+                        [treeAdaptor addChild:[stream_poly nextTree] toTree:root_1];
+
+                        [treeAdaptor addChild:root_1 toTree:root_0];
+                    }
+
+                }
+
+
+                retval.tree = root_0;
+
+                }
+
+                }
+                break;
+            case 6 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:31:4: ^( '^' ID e= INT ) // alt
+                {
+
+                /* ASTTreeParser tree */
+                _last = (CommonTree *)[input LT:1];
+                {
+                CommonTree *_save_last_1 = _last;
+                CommonTree *_first_1 = nil;
+                CommonTree *root_1 = [[[treeAdaptor class] newEmptyTree] retain];
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                char_literal10=(CommonTree *)[self match:input TokenType:9 Follow:FOLLOW_9_in_poly251]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_9 addElement:char_literal10];
+
+                 
+                [self match:input TokenType:TokenTypeDOWN Follow:nil]; if ( state.failed ) return retval;
+
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                ID11=(CommonTree *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_poly253]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_ID addElement:ID11];
+
+                 
+                /* ASTTreeParser tokenRefBang */
+                _last = (CommonTree *)[input LT:1];
+                e=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly257]; if ( state.failed ) return retval; 
+                if ( state.backtracking == 0 ) [stream_INT addElement:e];
+
+                 
+                [self match:input TokenType:TokenTypeUP Follow:nil]; if ( state.failed ) return retval;
+                [treeAdaptor addChild:root_1 toTree:root_0];
+                _last = _save_last_1;
+                }
+
+                 
+                // AST REWRITE
+                // elements: INT, ID, INT, 9, ID
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                if ( state.backtracking == 0 ) {
+
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 31:21: -> {$e.int==1}? ID
+                if ((e!=nil?[e.text integerValue]:0)==1) {
+                     // TODO: args: 
+                    [treeAdaptor addChild:/* ASTTreeParser createRewriteNodeFromElement */
+                    [stream_ID nextNode]
+                     toTree:root_0];
+
+                }
+
+                else // 32:8: -> {$e.int==0}? INT[@\"1\"]
+                if ((e!=nil?[e.text integerValue]:0)==0) {
+                    [treeAdaptor addChild:
+                            [[treeAdaptor createTree:INT FromToken:@"1" Text:@"INT"] retain]
+                     toTree:root_0];
+
+                }
+
+                else // 33:8: -> ^( '^' ID INT )
+                {
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:33:11: ^( '^' ID INT )
+                    {
+                        CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                        root_1 = (CommonTree *)[treeAdaptor becomeRoot:/* ASTTreeParser createRewriteNodeFromElement */
+                        [stream_9 nextNode]
+                         old:root_1];
+
+                         // TODO: args: 
+                        [treeAdaptor addChild:/* ASTTreeParser createRewriteNodeFromElement */
+                        [stream_ID nextNode]
+                         toTree:root_1];
+
+                         // TODO: args: 
+                        [treeAdaptor addChild:/* ASTTreeParser createRewriteNodeFromElement */
+                        [stream_INT nextNode]
+                         toTree:root_1];
+
+                        [treeAdaptor addChild:root_1 toTree:root_0];
+                    }
+
+                }
+
+
+                retval.tree = root_0;
+
+                }
+
+                }
+                break;
+            case 7 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:35:4: INT // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTTreeParser tokenRef */
+                _last = (CommonTree *)[input LT:1];
+                INT12=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_poly302]; if ( state.failed ) return retval;
+                if ( state.backtracking == 0 ) {
+                    INT12_tree = (CommonTree *)[treeAdaptor dupNode:INT12];
+
+
+                    [treeAdaptor addChild:INT12_tree toTree:root_0];
+                }
+
+                 
+                /* ASTTreeParser noRewrite */
+                if ( state.backtracking == 0 ) {
+                }
+                }
+                break;
+            case 8 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:36:4: ID // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTTreeParser tokenRef */
+                _last = (CommonTree *)[input LT:1];
+                ID13=(CommonTree *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_poly307]; if ( state.failed ) return retval;
+                if ( state.backtracking == 0 ) {
+                    ID13_tree = (CommonTree *)[treeAdaptor dupNode:ID13];
+
+
+                    [treeAdaptor addChild:ID13_tree toTree:root_0];
+                }
+
+                 
+                /* ASTTreeParser noRewrite */
+                if ( state.backtracking == 0 ) {
+                }
+                }
+                break;
+
+        }
+        /* ASTTreeParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+
+        [stream_INT release];
+        [stream_MULT release];
+        [stream_ID release];
+        [stream_9 release];
+        [stream_8 release];
+        [stream_poly release];
+
+        if ( state.backtracking == 0 ) {
+
+        retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+        }
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end poly */
+// $ANTLR start synpred1_Simplifier_fragment
+- (void) synpred1_Simplifier_fragment
+{
+    /* ruleLabelDefs entry */
+    CommonTree *a = nil;
+    CommonTree *b = nil;
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:16:7: ( ^( '+' a= INT b= INT ) ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:16:7: ^( '+' a= INT b= INT ) // alt
+    {
+
+
+    [self match:input TokenType:8 Follow:FOLLOW_8_in_synpred1_Simplifier52]; if ( state.failed ) return ;
+     
+        [self match:input TokenType:DOWN Follow:nil]; if ( state.failed ) return ;
+
+        a=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_synpred1_Simplifier56]; if ( state.failed ) return ;
+         
+        b=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_synpred1_Simplifier60]; if ( state.failed ) return ;
+         
+        [self match:input TokenType:UP Follow:nil]; if ( state.failed ) return ;
+
+     
+    }
+
+} // $ANTLR end synpred1_Simplifier_fragment
+// $ANTLR start synpred2_Simplifier_fragment
+- (void) synpred2_Simplifier_fragment
+{
+    /* ruleLabelDefs entry */
+    CommonTree *a = nil;
+    CommonTree *b = nil;Simplifier_poly_return * p = nil ;
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:18:4: ( ^( '+' ^( '+' a= INT p= poly ) b= INT ) ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:18:4: ^( '+' ^( '+' a= INT p= poly ) b= INT ) // alt
+    {
+
+
+    [self match:input TokenType:8 Follow:FOLLOW_8_in_synpred2_Simplifier73]; if ( state.failed ) return ;
+     
+        [self match:input TokenType:DOWN Follow:nil]; if ( state.failed ) return ;
+
+
+        [self match:input TokenType:8 Follow:FOLLOW_8_in_synpred2_Simplifier76]; if ( state.failed ) return ;
+         
+            [self match:input TokenType:DOWN Follow:nil]; if ( state.failed ) return ;
+
+            a=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_synpred2_Simplifier80]; if ( state.failed ) return ;
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_poly_in_synpred2_Simplifier84];
+            p = [self poly];
+
+            [self popFollow];
+            if ( state.failed ) return ;
+
+             
+            [self match:input TokenType:UP Follow:nil]; if ( state.failed ) return ;
+
+         
+        b=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_synpred2_Simplifier89]; if ( state.failed ) return ;
+         
+        [self match:input TokenType:UP Follow:nil]; if ( state.failed ) return ;
+
+     
+    }
+
+} // $ANTLR end synpred2_Simplifier_fragment
+// $ANTLR start synpred3_Simplifier_fragment
+- (void) synpred3_Simplifier_fragment
+{
+    /* ruleLabelDefs entry */
+    CommonTree *a = nil;
+    CommonTree *b = nil;Simplifier_poly_return * p = nil ;
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:21:4: ( ^( '+' ^( '+' p= poly a= INT ) b= INT ) ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:21:4: ^( '+' ^( '+' p= poly a= INT ) b= INT ) // alt
+    {
+
+
+    [self match:input TokenType:8 Follow:FOLLOW_8_in_synpred3_Simplifier117]; if ( state.failed ) return ;
+     
+        [self match:input TokenType:DOWN Follow:nil]; if ( state.failed ) return ;
+
+
+        [self match:input TokenType:8 Follow:FOLLOW_8_in_synpred3_Simplifier120]; if ( state.failed ) return ;
+         
+            [self match:input TokenType:DOWN Follow:nil]; if ( state.failed ) return ;
+
+            /* ruleRef */
+            [self pushFollow:FOLLOW_poly_in_synpred3_Simplifier124];
+            p = [self poly];
+
+            [self popFollow];
+            if ( state.failed ) return ;
+
+             
+            a=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_synpred3_Simplifier128]; if ( state.failed ) return ;
+             
+            [self match:input TokenType:UP Follow:nil]; if ( state.failed ) return ;
+
+         
+        b=(CommonTree *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_synpred3_Simplifier133]; if ( state.failed ) return ;
+         
+        [self match:input TokenType:UP Follow:nil]; if ( state.failed ) return ;
+
+     
+    }
+
+} // $ANTLR end synpred3_Simplifier_fragment
+// $ANTLR start synpred4_Simplifier_fragment
+- (void) synpred4_Simplifier_fragment
+{
+    /* ruleLabelDefs entry */
+    Simplifier_poly_return * p = nil ;
+
+    Simplifier_poly_return * q = nil ;
+
+
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:24:4: ( ^( '+' p= poly q= poly ) ) // ruleBlockSingleAlt
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/polydiff/Simplifier.g:24:4: ^( '+' p= poly q= poly ) // alt
+    {
+
+
+    [self match:input TokenType:8 Follow:FOLLOW_8_in_synpred4_Simplifier161]; if ( state.failed ) return ;
+     
+        [self match:input TokenType:DOWN Follow:nil]; if ( state.failed ) return ;
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_poly_in_synpred4_Simplifier165];
+        p = [self poly];
+
+        [self popFollow];
+        if ( state.failed ) return ;
+
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_poly_in_synpred4_Simplifier169];
+        q = [self poly];
+
+        [self popFollow];
+        if ( state.failed ) return ;
+
+         
+        [self match:input TokenType:UP Follow:nil]; if ( state.failed ) return ;
+
+     
+    }
+
+} // $ANTLR end synpred4_Simplifier_fragment
+/* ObjC end rules */
+
+@end /* end of Simplifier implementation line 692 */
diff --git a/runtime/ObjC/Framework/examples/polydiff/Simplifier.tokens b/runtime/ObjC/Framework/examples/polydiff/Simplifier.tokens
new file mode 100644
index 0000000..c711b35
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/Simplifier.tokens
@@ -0,0 +1,8 @@
+T__8=8
+T__9=9
+ID=4
+INT=5
+MULT=6
+WS=7
+'+'=8
+'^'=9
diff --git a/runtime/ObjC/Framework/examples/polydiff/files b/runtime/ObjC/Framework/examples/polydiff/files
new file mode 100644
index 0000000..f061d0c
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/files
@@ -0,0 +1,7 @@
+Poly.g
+PolyDifferentiator.g
+Simplifier.g
+PolyPrinter.g
+Main.java
+input
+output
diff --git a/runtime/ObjC/Framework/examples/polydiff/input b/runtime/ObjC/Framework/examples/polydiff/input
new file mode 100644
index 0000000..530a4c8
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/input
@@ -0,0 +1 @@
+2x^3 + x^5 + 4x + 10x + 8x + x + 2
diff --git a/runtime/ObjC/Framework/examples/polydiff/main.m b/runtime/ObjC/Framework/examples/polydiff/main.m
new file mode 100644
index 0000000..082ccd2
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/main.m
@@ -0,0 +1,50 @@
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+#import "PolyLexer.h"
+#import "PolyParser.h"
+// #import "PolyDifferentiator.h"
+// #import "PolyPrinter.h"
+// #import "Simplifier.h"
+
+
+int main(int argc, const char *argv[])
+{
+    NSError *error;
+    NSLog(@"starting polydiff\n");
+	NSString *input = [NSString stringWithContentsOfFile:@"../../examples/polydiff/input"  encoding:NSASCIIStringEncoding error:&error];
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:input];
+	NSLog(@"%@", input);
+
+// BUILD AST
+    PolyLexer *lex = [PolyLexer newPolyLexerWithCharStream:stream];
+    CommonTokenStream *tokens = [CommonTokenStream newCommonTokenStreamWithTokenSource:lex];
+    PolyParser *parser = [PolyParser newPolyParser:tokens];
+    PolyParser_poly_return *r = [parser poly];
+    NSLog(@"tree=%@", [r.tree toStringTree]);
+
+#ifdef DONTUSENOMO
+// DIFFERENTIATE
+    CommonTreeNodeStream *nodes = [CommonTreeNodeStream newCommonTreeNodeStream:r.tree];
+    [nodes setTokenStream:tokens];
+    PolyDifferentiator *differ = [PolyDifferentiator newPolyDifferentiator:nodes];
+    PolyDifferentiator_poly_return *r2 = [differ poly];
+    NSLog("d/dx=%@", [r2.tree toStringTree]);
+
+// SIMPLIFY / NORMALIZE
+    nodes = [CommonTreeNodeStream newCommonTreeNodeStream:r2.tree];
+    [nodes setTokenStream:tokens];
+    Simplifier *reducer = [Simplifier newSimplifier:nodes];
+    Simplifier_poly_return *r3 = [reducer poly];
+    NSLog("simplified=%@", [r3.tree toStringTree]);
+
+// CONVERT BACK TO POLYNOMIAL
+    nodes = [CommonTreeNodeStream newCommonTreeNodeStream:r3.tree];
+    [nodes setTokenStream:tokens];
+    PolyPrinter *printer = [PolyPrinter newPolyPrinter:nodes];
+    PolyPrinter_poly_return *r4 = [printer poly];
+    NSLog( [r4.st toString]);
+#endif
+
+    NSLog(@"exiting PolyDiff\n");
+    return 0;
+}
diff --git a/runtime/ObjC/Framework/examples/polydiff/output b/runtime/ObjC/Framework/examples/polydiff/output
new file mode 100644
index 0000000..e525030
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/polydiff/output
@@ -0,0 +1,4 @@
+tree=(+ (+ (+ (+ (+ (+ (* 2 (^ x 3)) (^ x 5)) (* 4 x)) (* 10 x)) (* 8 x)) x) 2)
+d/dx=(+ (+ (+ (+ (+ (+ (* 6 (^ x 2)) (* 5 (^ x 4))) 4) 10) 8) 1) 0)
+simplified=(+ (+ (+ (+ (* 6 (^ x 2)) (* 5 (^ x 4))) 4) 18) 1)
+6x^2+5x^4+4+18+1
diff --git a/runtime/ObjC/Framework/examples/scopes/SymbolTable.g b/runtime/ObjC/Framework/examples/scopes/SymbolTable.g
new file mode 100644
index 0000000..db37fc8
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/scopes/SymbolTable.g
@@ -0,0 +1,75 @@
+grammar SymbolTable;
+
+/* Scope of symbol names.  Both globals and block rules need to push a new
+ * symbol table upon entry and they must use the same stack.  So, I must
+ * define a global scope and say that globals and block use this by saying
+ * 'scope Symbols;' in those rule definitions.
+ */
+
+options {
+	language=ObjC;
+}
+
+scope Symbols {
+  PtrBuffer *names;
+}
+
+@memVars {
+int level;
+}
+
+@init {
+level = 0;
+}
+
+prog
+// scope Symbols;
+    :   globals (method)*
+    ;
+
+globals
+scope Symbols;
+@init {
+    level++;
+    $Symbols::names = [PtrBuffer newPtrBufferWithLen:10];
+}
+    :   (decl)*
+        {
+            NSLog( @"globals: \%@", [$Symbols::names toString] );
+            level--;
+        }
+    ;
+
+method
+    :   'method' ID '(' ')' block
+    ;
+
+block
+scope Symbols;
+@init {
+    level++;
+    $Symbols::names = [PtrBuffer newPtrBufferWithLen:10];
+}
+    :   '{' (decl)* (stat)* '}'
+        {
+            NSLog( @"level \%d symbols: \%@", level, [$Symbols::names toString] );
+            level--;
+        }
+    ;
+
+stat:   ID '=' INT ';'
+    |   block
+    ;
+
+decl:   'int' ID ';'
+        {[$Symbols::names addObject:$ID];} // add to current symbol table
+    ;
+
+ID  :   ('a'..'z')+
+    ;
+
+INT :   ('0'..'9')+
+    ;
+
+WS  :   (' '|'\n'|'\r')+ {$channel=HIDDEN;}
+    ;
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTable.tokens b/runtime/ObjC/Framework/examples/scopes/SymbolTable.tokens
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTable.tokens
rename to runtime/ObjC/Framework/examples/scopes/SymbolTable.tokens
diff --git a/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.h b/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.h
new file mode 100644
index 0000000..b5c76ec
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.h
@@ -0,0 +1,55 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g 2012-02-16 17:50:30
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* Start cyclicDFAInterface */
+
+#pragma mark Rule return scopes Interface start
+#pragma mark Rule return scopes Interface end
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__7 7
+#define T__8 8
+#define T__9 9
+#define T__10 10
+#define T__11 11
+#define T__12 12
+#define T__13 13
+#define T__14 14
+#define ID 4
+#define INT 5
+#define WS 6
+/* interface lexer class */
+@interface SymbolTableLexer : Lexer { // line 283
+/* ObjC start of actions.lexer.memVars */
+/* ObjC end of actions.lexer.memVars */
+}
++ (void) initialize;
++ (SymbolTableLexer *)newSymbolTableLexerWithCharStream:(id<CharStream>)anInput;
+/* ObjC start actions.lexer.methodsDecl */
+/* ObjC end actions.lexer.methodsDecl */
+- (void) mT__7 ; 
+- (void) mT__8 ; 
+- (void) mT__9 ; 
+- (void) mT__10 ; 
+- (void) mT__11 ; 
+- (void) mT__12 ; 
+- (void) mT__13 ; 
+- (void) mT__14 ; 
+- (void) mID ; 
+- (void) mINT ; 
+- (void) mWS ; 
+- (void) mTokens ; 
+
+@end /* end of SymbolTableLexer interface */
+
diff --git a/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.m b/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.m
new file mode 100644
index 0000000..d32d85c
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/scopes/SymbolTableLexer.m
@@ -0,0 +1,844 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g
+ *     -                            On : 2012-02-16 17:50:30
+ *     -                 for the lexer : SymbolTableLexerLexer
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g 2012-02-16 17:50:30
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "SymbolTableLexer.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+/** As per Terence: No returns for lexer rules! */
+@implementation SymbolTableLexer // line 330
+
++ (void) initialize
+{
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g"];
+}
+
++ (NSString *) tokenNameForType:(NSInteger)aTokenType
+{
+    return [[self getTokenNames] objectAtIndex:aTokenType];
+}
+
++ (SymbolTableLexer *)newSymbolTableLexerWithCharStream:(id<CharStream>)anInput
+{
+    return [[SymbolTableLexer alloc] initWithCharStream:anInput];
+}
+
+- (id) initWithCharStream:(id<CharStream>)anInput
+{
+    self = [super initWithCharStream:anInput State:[RecognizerSharedState newRecognizerSharedStateWithRuleLen:12+1]];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC Start of actions.lexer.methods */
+/* ObjC end of actions.lexer.methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+
+/* Start of Rules */
+// $ANTLR start "T__7"
+- (void) mT__7
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__7;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:7:6: ( '(' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:7:8: '(' // alt
+        {
+
+
+        [self matchChar:'(']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__7" */
+// $ANTLR start "T__8"
+- (void) mT__8
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__8;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:8:6: ( ')' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:8:8: ')' // alt
+        {
+
+
+        [self matchChar:')']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__8" */
+// $ANTLR start "T__9"
+- (void) mT__9
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__9;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:9:6: ( ';' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:9:8: ';' // alt
+        {
+
+
+        [self matchChar:';']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__9" */
+// $ANTLR start "T__10"
+- (void) mT__10
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__10;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:10:7: ( '=' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:10:9: '=' // alt
+        {
+
+
+        [self matchChar:'=']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__10" */
+// $ANTLR start "T__11"
+- (void) mT__11
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__11;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:11:7: ( 'int' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:11:9: 'int' // alt
+        {
+
+
+        [self matchString:@"int"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__11" */
+// $ANTLR start "T__12"
+- (void) mT__12
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__12;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:12:7: ( 'method' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:12:9: 'method' // alt
+        {
+
+
+        [self matchString:@"method"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__12" */
+// $ANTLR start "T__13"
+- (void) mT__13
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__13;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:13:7: ( '{' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:13:9: '{' // alt
+        {
+
+
+        [self matchChar:'{']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__13" */
+// $ANTLR start "T__14"
+- (void) mT__14
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__14;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:14:7: ( '}' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:14:9: '}' // alt
+        {
+
+
+        [self matchChar:'}']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__14" */
+// $ANTLR start "ID"
+- (void) mID
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = ID;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:68:5: ( ( 'a' .. 'z' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:68:9: ( 'a' .. 'z' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:68:9: ( 'a' .. 'z' )+ // positiveClosureBlock
+        NSInteger cnt1 = 0;
+        do {
+            NSInteger alt1 = 2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( ((LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g: // alt
+                    {
+
+                    if ((([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt1 >= 1 )
+                        goto loop1;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:1];
+                    @throw eee;
+            }
+            cnt1++;
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "ID" */
+// $ANTLR start "INT"
+- (void) mINT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = INT;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:71:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:71:9: ( '0' .. '9' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:71:9: ( '0' .. '9' )+ // positiveClosureBlock
+        NSInteger cnt2 = 0;
+        do {
+            NSInteger alt2 = 2;
+            NSInteger LA2_0 = [input LA:1];
+            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
+                alt2=1;
+            }
+
+
+            switch (alt2) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt2 >= 1 )
+                        goto loop2;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:2];
+                    @throw eee;
+            }
+            cnt2++;
+        } while (YES);
+        loop2: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "INT" */
+// $ANTLR start "WS"
+- (void) mWS
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = WS;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:74:5: ( ( ' ' | '\\n' | '\\r' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:74:9: ( ' ' | '\\n' | '\\r' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:74:9: ( ' ' | '\\n' | '\\r' )+ // positiveClosureBlock
+        NSInteger cnt3 = 0;
+        do {
+            NSInteger alt3 = 2;
+            NSInteger LA3_0 = [input LA:1];
+            if ( (LA3_0=='\n'||LA3_0=='\r'||LA3_0==' ') ) {
+                alt3=1;
+            }
+
+
+            switch (alt3) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g: // alt
+                    {
+
+                    if ([input LA:1] == '\n'||[input LA:1] == '\r'||[input LA:1] == ' ') {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt3 >= 1 )
+                        goto loop3;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:3];
+                    @throw eee;
+            }
+            cnt3++;
+        } while (YES);
+        loop3: ;
+
+         
+
+        _channel=HIDDEN;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "WS" */
+- (void) mTokens
+{
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:8: ( T__7 | T__8 | T__9 | T__10 | T__11 | T__12 | T__13 | T__14 | ID | INT | WS ) //ruleblock
+    NSInteger alt4=11;
+    unichar charLA4 = [input LA:1];
+    switch (charLA4) {
+        case '(': ;
+            {
+            alt4=1;
+            }
+            break;
+        case ')': ;
+            {
+            alt4=2;
+            }
+            break;
+        case ';': ;
+            {
+            alt4=3;
+            }
+            break;
+        case '=': ;
+            {
+            alt4=4;
+            }
+            break;
+        case 'i': ;
+            {
+            NSInteger LA4_5 = [input LA:2];
+
+            if ( (LA4_5=='n') ) {
+                NSInteger LA4_12 = [input LA:3];
+
+                if ( (LA4_12=='t') ) {
+                    NSInteger LA4_14 = [input LA:4];
+
+                    if ( ((LA4_14 >= 'a' && LA4_14 <= 'z')) ) {
+                        alt4=9;
+                    }
+                    else {
+                        alt4 = 5;
+                    }
+                }
+                else {
+                    alt4 = 9;
+                }
+            }
+            else {
+                alt4 = 9;
+            }
+            }
+            break;
+        case 'm': ;
+            {
+            NSInteger LA4_6 = [input LA:2];
+
+            if ( (LA4_6=='e') ) {
+                NSInteger LA4_13 = [input LA:3];
+
+                if ( (LA4_13=='t') ) {
+                    NSInteger LA4_15 = [input LA:4];
+
+                    if ( (LA4_15=='h') ) {
+                        NSInteger LA4_17 = [input LA:5];
+
+                        if ( (LA4_17=='o') ) {
+                            NSInteger LA4_18 = [input LA:6];
+
+                            if ( (LA4_18=='d') ) {
+                                NSInteger LA4_19 = [input LA:7];
+
+                                if ( ((LA4_19 >= 'a' && LA4_19 <= 'z')) ) {
+                                    alt4=9;
+                                }
+                                else {
+                                    alt4 = 6;
+                                }
+                            }
+                            else {
+                                alt4 = 9;
+                            }
+                        }
+                        else {
+                            alt4 = 9;
+                        }
+                    }
+                    else {
+                        alt4 = 9;
+                    }
+                }
+                else {
+                    alt4 = 9;
+                }
+            }
+            else {
+                alt4 = 9;
+            }
+            }
+            break;
+        case '{': ;
+            {
+            alt4=7;
+            }
+            break;
+        case '}': ;
+            {
+            alt4=8;
+            }
+            break;
+        case 'a': ;
+        case 'b': ;
+        case 'c': ;
+        case 'd': ;
+        case 'e': ;
+        case 'f': ;
+        case 'g': ;
+        case 'h': ;
+        case 'j': ;
+        case 'k': ;
+        case 'l': ;
+        case 'n': ;
+        case 'o': ;
+        case 'p': ;
+        case 'q': ;
+        case 'r': ;
+        case 's': ;
+        case 't': ;
+        case 'u': ;
+        case 'v': ;
+        case 'w': ;
+        case 'x': ;
+        case 'y': ;
+        case 'z': ;
+            {
+            alt4=9;
+            }
+            break;
+        case '0': ;
+        case '1': ;
+        case '2': ;
+        case '3': ;
+        case '4': ;
+        case '5': ;
+        case '6': ;
+        case '7': ;
+        case '8': ;
+        case '9': ;
+            {
+            alt4=10;
+            }
+            break;
+        case '\n': ;
+        case '\r': ;
+        case ' ': ;
+            {
+            alt4=11;
+            }
+            break;
+
+    default: ;
+        NoViableAltException *nvae = [NoViableAltException newException:4 state:0 stream:input];
+        nvae.c = charLA4;
+        @throw nvae;
+
+    }
+
+    switch (alt4) {
+        case 1 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:10: T__7 // alt
+            {
+
+
+            [self mT__7]; 
+
+
+             
+            }
+            break;
+        case 2 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:15: T__8 // alt
+            {
+
+
+            [self mT__8]; 
+
+
+             
+            }
+            break;
+        case 3 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:20: T__9 // alt
+            {
+
+
+            [self mT__9]; 
+
+
+             
+            }
+            break;
+        case 4 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:25: T__10 // alt
+            {
+
+
+            [self mT__10]; 
+
+
+             
+            }
+            break;
+        case 5 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:31: T__11 // alt
+            {
+
+
+            [self mT__11]; 
+
+
+             
+            }
+            break;
+        case 6 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:37: T__12 // alt
+            {
+
+
+            [self mT__12]; 
+
+
+             
+            }
+            break;
+        case 7 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:43: T__13 // alt
+            {
+
+
+            [self mT__13]; 
+
+
+             
+            }
+            break;
+        case 8 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:49: T__14 // alt
+            {
+
+
+            [self mT__14]; 
+
+
+             
+            }
+            break;
+        case 9 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:55: ID // alt
+            {
+
+
+            [self mID]; 
+
+
+             
+            }
+            break;
+        case 10 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:58: INT // alt
+            {
+
+
+            [self mINT]; 
+
+
+             
+            }
+            break;
+        case 11 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:1:62: WS // alt
+            {
+
+
+            [self mWS]; 
+
+
+             
+            }
+            break;
+
+    }
+
+}
+
+@end /* end of SymbolTableLexer implementation line 397 */
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.h b/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.h
new file mode 100644
index 0000000..afd118a
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.h
@@ -0,0 +1,101 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g 2012-02-16 17:50:30
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* parserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__7 7
+#define T__8 8
+#define T__9 9
+#define T__10 10
+#define T__11 11
+#define T__12 12
+#define T__13 13
+#define T__14 14
+#define ID 4
+#define INT 5
+#define WS 6
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+/* globalAttributeScopeInterface */
+@interface Symbols_Scope : SymbolsScope {
+PtrBuffer * names;
+ }
+/* start of globalAttributeScopeInterface properties */
+@property (assign, getter=getnames, setter=setnames:) PtrBuffer * names;
+/* end globalAttributeScopeInterface properties */
++ (Symbols_Scope *)newSymbols_Scope;
+- (id) init;
+/* start of globalAttributeScopeInterface methodsDecl */
+- (PtrBuffer *)getnames;
+- (void)setnames:(PtrBuffer *)aVal;
+/* End of globalAttributeScopeInterface methodsDecl */
+@end /* end of Symbols_Scope interface */
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+
+/* Interface grammar class */
+@interface SymbolTableParser  : Parser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+/* globalAttributeScopeMemVar */
+SymbolStack *Symbols_stack;
+Symbols_Scope *Symbols_scope;
+
+
+/* ObjC start of actions.(actionScope).memVars */
+
+int level;
+
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* ObjC end of memVars */
+
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newSymbolTableParser:(id<TokenStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* ObjC end of methodsDecl */
+
+- (void)prog; 
+- (void)globals; 
+- (void)method; 
+- (void)block; 
+- (void)stat; 
+- (void)decl; 
+
+
+@end /* end of SymbolTableParser interface */
+
diff --git a/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.m b/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.m
new file mode 100644
index 0000000..2f144f8
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/scopes/SymbolTableParser.m
@@ -0,0 +1,630 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g
+ *     -                            On : 2012-02-16 17:50:30
+ *     -                for the parser : SymbolTableParserParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g 2012-02-16 17:50:30
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "SymbolTableParser.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_globals_in_prog50;
+static const unsigned long long FOLLOW_globals_in_prog50_data[] = { 0x0000000000001002LL};
+static ANTLRBitSet *FOLLOW_method_in_prog53;
+static const unsigned long long FOLLOW_method_in_prog53_data[] = { 0x0000000000001002LL};
+static ANTLRBitSet *FOLLOW_decl_in_globals85;
+static const unsigned long long FOLLOW_decl_in_globals85_data[] = { 0x0000000000000802LL};
+static ANTLRBitSet *FOLLOW_12_in_method116;
+static const unsigned long long FOLLOW_12_in_method116_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_ID_in_method118;
+static const unsigned long long FOLLOW_ID_in_method118_data[] = { 0x0000000000000080LL};
+static ANTLRBitSet *FOLLOW_7_in_method120;
+static const unsigned long long FOLLOW_7_in_method120_data[] = { 0x0000000000000100LL};
+static ANTLRBitSet *FOLLOW_8_in_method122;
+static const unsigned long long FOLLOW_8_in_method122_data[] = { 0x0000000000002000LL};
+static ANTLRBitSet *FOLLOW_block_in_method124;
+static const unsigned long long FOLLOW_block_in_method124_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_13_in_block153;
+static const unsigned long long FOLLOW_13_in_block153_data[] = { 0x0000000000006810LL};
+static ANTLRBitSet *FOLLOW_decl_in_block156;
+static const unsigned long long FOLLOW_decl_in_block156_data[] = { 0x0000000000006810LL};
+static ANTLRBitSet *FOLLOW_stat_in_block161;
+static const unsigned long long FOLLOW_stat_in_block161_data[] = { 0x0000000000006010LL};
+static ANTLRBitSet *FOLLOW_14_in_block165;
+static const unsigned long long FOLLOW_14_in_block165_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_ID_in_stat189;
+static const unsigned long long FOLLOW_ID_in_stat189_data[] = { 0x0000000000000400LL};
+static ANTLRBitSet *FOLLOW_10_in_stat191;
+static const unsigned long long FOLLOW_10_in_stat191_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_INT_in_stat193;
+static const unsigned long long FOLLOW_INT_in_stat193_data[] = { 0x0000000000000200LL};
+static ANTLRBitSet *FOLLOW_9_in_stat195;
+static const unsigned long long FOLLOW_9_in_stat195_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_block_in_stat205;
+static const unsigned long long FOLLOW_block_in_stat205_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_11_in_decl219;
+static const unsigned long long FOLLOW_11_in_decl219_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_ID_in_decl221;
+static const unsigned long long FOLLOW_ID_in_decl221_data[] = { 0x0000000000000200LL};
+static ANTLRBitSet *FOLLOW_9_in_decl223;
+static const unsigned long long FOLLOW_9_in_decl223_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+@implementation Symbols_Scope  /* globalAttributeScopeImplementation */
+/* start of synthesize -- OBJC-Line 1750 */
+@synthesize names; 
+
++ (Symbols_Scope *)newSymbols_Scope
+{
+return [[Symbols_Scope alloc] init];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* start of iterate get and set functions */
+- (PtrBuffer *)getnames { return( names ); }
+- (void)setnames:(PtrBuffer *)aVal { names = aVal; }
+
+/* End of iterate get and set functions */
+@end /* end of Symbols_Scope implementation */
+
+
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+
+@implementation SymbolTableParser  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+static SymbolStack *Symbols_stack;
+ 
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_globals_in_prog50 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_globals_in_prog50_data Count:(NSUInteger)1] retain];
+    FOLLOW_method_in_prog53 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_method_in_prog53_data Count:(NSUInteger)1] retain];
+    FOLLOW_decl_in_globals85 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_decl_in_globals85_data Count:(NSUInteger)1] retain];
+    FOLLOW_12_in_method116 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_12_in_method116_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_method118 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_method118_data Count:(NSUInteger)1] retain];
+    FOLLOW_7_in_method120 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_7_in_method120_data Count:(NSUInteger)1] retain];
+    FOLLOW_8_in_method122 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_8_in_method122_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_method124 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_method124_data Count:(NSUInteger)1] retain];
+    FOLLOW_13_in_block153 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_13_in_block153_data Count:(NSUInteger)1] retain];
+    FOLLOW_decl_in_block156 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_decl_in_block156_data Count:(NSUInteger)1] retain];
+    FOLLOW_stat_in_block161 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block161_data Count:(NSUInteger)1] retain];
+    FOLLOW_14_in_block165 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_14_in_block165_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_stat189 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_stat189_data Count:(NSUInteger)1] retain];
+    FOLLOW_10_in_stat191 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_10_in_stat191_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_stat193 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_stat193_data Count:(NSUInteger)1] retain];
+    FOLLOW_9_in_stat195 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_stat195_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_stat205 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat205_data Count:(NSUInteger)1] retain];
+    FOLLOW_11_in_decl219 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_11_in_decl219_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_decl221 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_decl221_data Count:(NSUInteger)1] retain];
+    FOLLOW_9_in_decl223 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_9_in_decl223_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"ID", @"INT", @"WS", @"'('", @"')'", @"';'", @"'='", @"'int'", @"'method'", 
+ @"'{'", @"'}'", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g"];
+}
+
++ (SymbolTableParser *)newSymbolTableParser:(id<TokenStream>)aStream
+{
+    return [[SymbolTableParser alloc] initWithTokenStream:aStream];
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)aStream
+{
+    self = [super initWithTokenStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:6+1] retain]];
+    if ( self != nil ) {
+        /* globalAttributeScopeInit */
+
+        Symbols_scope = [Symbols_Scope newSymbols_Scope];
+
+        Symbols_stack = [SymbolStack newSymbolStackWithLen:30];
+
+        /* start of actions-actionScope-init */
+
+        level = 0;
+
+        /* start of init */
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [Symbols_stack release];
+     
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start prog
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:25:1: prog : globals ( method )* ;
+ */
+- (void) prog
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:27:5: ( globals ( method )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:27:9: globals ( method )* // alt
+        {
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_globals_in_prog50];
+        [self globals];
+
+        [self popFollow];
+
+
+         
+
+        do {
+            NSInteger alt1=2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( (LA1_0==12) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:27:18: method // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_method_in_prog53];
+                    [self method];
+
+                    [self popFollow];
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop1;
+            }
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end prog */
+
+/*
+ * $ANTLR start globals
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:30:1: globals : ( decl )* ;
+ */
+- (void) globals
+{
+    /* ruleScopeSetUp */
+    [Symbols_stack push:[Symbols_Scope newSymbols_Scope]];
+
+    /* ruleDeclarations */
+
+
+        level++;
+        /* scopeSetAttributeRef */((Symbols_Scope *)[Symbols_stack peek]).names =  [PtrBuffer newPtrBufferWithLen:10];
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:36:5: ( ( decl )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:36:9: ( decl )* // alt
+        {
+
+
+        do {
+            NSInteger alt2=2;
+            NSInteger LA2_0 = [input LA:1];
+            if ( (LA2_0==11) ) {
+                alt2=1;
+            }
+
+
+            switch (alt2) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:36:10: decl // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_decl_in_globals85];
+                    [self decl];
+
+                    [self popFollow];
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop2;
+            }
+        } while (YES);
+        loop2: ;
+
+         
+
+
+                    NSLog( @"globals: %@", [((Symbols_Scope *)[Symbols_stack peek]).names toString] );
+                    level--;
+                
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+        [Symbols_stack pop];
+
+    }
+    return ;
+}
+/* $ANTLR end globals */
+
+/*
+ * $ANTLR start method
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:43:1: method : 'method' ID '(' ')' block ;
+ */
+- (void) method
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:44:5: ( 'method' ID '(' ')' block ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:44:9: 'method' ID '(' ')' block // alt
+        {
+
+        [self match:input TokenType:12 Follow:FOLLOW_12_in_method116]; 
+         
+        [self match:input TokenType:ID Follow:FOLLOW_ID_in_method118]; 
+         
+        [self match:input TokenType:7 Follow:FOLLOW_7_in_method120]; 
+         
+        [self match:input TokenType:8 Follow:FOLLOW_8_in_method122]; 
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_block_in_method124];
+        [self block];
+
+        [self popFollow];
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end method */
+
+/*
+ * $ANTLR start block
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:47:1: block : '{' ( decl )* ( stat )* '}' ;
+ */
+- (void) block
+{
+    /* ruleScopeSetUp */
+    [Symbols_stack push:[Symbols_Scope newSymbols_Scope]];
+
+    /* ruleDeclarations */
+
+
+        level++;
+        /* scopeSetAttributeRef */((Symbols_Scope *)[Symbols_stack peek]).names =  [PtrBuffer newPtrBufferWithLen:10];
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:53:5: ( '{' ( decl )* ( stat )* '}' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:53:9: '{' ( decl )* ( stat )* '}' // alt
+        {
+
+        [self match:input TokenType:13 Follow:FOLLOW_13_in_block153]; 
+         
+
+        do {
+            NSInteger alt3=2;
+            NSInteger LA3_0 = [input LA:1];
+            if ( (LA3_0==11) ) {
+                alt3=1;
+            }
+
+
+            switch (alt3) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:53:14: decl // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_decl_in_block156];
+                    [self decl];
+
+                    [self popFollow];
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop3;
+            }
+        } while (YES);
+        loop3: ;
+
+         
+
+        do {
+            NSInteger alt4=2;
+            NSInteger LA4_0 = [input LA:1];
+            if ( (LA4_0==ID||LA4_0==13) ) {
+                alt4=1;
+            }
+
+
+            switch (alt4) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:53:22: stat // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_stat_in_block161];
+                    [self stat];
+
+                    [self popFollow];
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop4;
+            }
+        } while (YES);
+        loop4: ;
+
+         
+        [self match:input TokenType:14 Follow:FOLLOW_14_in_block165]; 
+         
+
+
+                    NSLog( @"level %d symbols: %@", level, [((Symbols_Scope *)[Symbols_stack peek]).names toString] );
+                    level--;
+                
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+        [Symbols_stack pop];
+
+    }
+    return ;
+}
+/* $ANTLR end block */
+
+/*
+ * $ANTLR start stat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:60:1: stat : ( ID '=' INT ';' | block );
+ */
+- (void) stat
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:60:5: ( ID '=' INT ';' | block ) //ruleblock
+        NSInteger alt5=2;
+        NSInteger LA5_0 = [input LA:1];
+
+        if ( (LA5_0==ID) ) {
+            alt5=1;
+        }
+        else if ( (LA5_0==13) ) {
+            alt5=2;
+        }
+        else {
+            NoViableAltException *nvae = [NoViableAltException newException:5 state:0 stream:input];
+            nvae.c = LA5_0;
+            @throw nvae;
+
+        }
+        switch (alt5) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:60:9: ID '=' INT ';' // alt
+                {
+
+                [self match:input TokenType:ID Follow:FOLLOW_ID_in_stat189]; 
+                 
+                [self match:input TokenType:10 Follow:FOLLOW_10_in_stat191]; 
+                 
+                [self match:input TokenType:INT Follow:FOLLOW_INT_in_stat193]; 
+                 
+                [self match:input TokenType:9 Follow:FOLLOW_9_in_stat195]; 
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:61:9: block // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_block_in_stat205];
+                [self block];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end stat */
+
+/*
+ * $ANTLR start decl
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:64:1: decl : 'int' ID ';' ;
+ */
+- (void) decl
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+        CommonToken *ID1 = nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:64:5: ( 'int' ID ';' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/SymbolTable.g:64:9: 'int' ID ';' // alt
+        {
+
+        [self match:input TokenType:11 Follow:FOLLOW_11_in_decl219]; 
+         
+        ID1=(CommonToken *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_decl221]; 
+         
+        [self match:input TokenType:9 Follow:FOLLOW_9_in_decl223]; 
+         
+
+        [((Symbols_Scope *)[Symbols_stack peek]).names addObject:ID1];
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end decl */
+/* ObjC end rules */
+
+@end /* end of SymbolTableParser implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTable__.gl b/runtime/ObjC/Framework/examples/scopes/SymbolTable__.gl
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/scopes/SymbolTable__.gl
rename to runtime/ObjC/Framework/examples/scopes/SymbolTable__.gl
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/input b/runtime/ObjC/Framework/examples/scopes/input
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/scopes/input
rename to runtime/ObjC/Framework/examples/scopes/input
diff --git a/runtime/ObjC/Framework/examples/scopes/main.m b/runtime/ObjC/Framework/examples/scopes/main.m
new file mode 100644
index 0000000..f7700fa
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/scopes/main.m
@@ -0,0 +1,32 @@
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+#import "SymbolTableLexer.h"
+#import "SymbolTableParser.h"
+
+int main()
+{
+    NSError *error;
+	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+	
+	NSString *string = [NSString stringWithContentsOfFile:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/scopes/input" encoding:NSASCIIStringEncoding error:&error];
+	NSLog(@"input is : %@", string);
+	ANTLRStringStream *stream = [[ANTLRStringStream alloc] initWithStringNoCopy:string];
+	SymbolTableLexer *lexer = [[SymbolTableLexer alloc] initWithCharStream:stream];
+	
+//	CommonToken *currentToken;
+//	while ((currentToken = [lexer nextToken]) && currentToken.type != TokenTypeEOF) {
+//		NSLog(@"%@", currentToken);
+//	}
+	
+	CommonTokenStream *tokens = [[CommonTokenStream alloc] initWithTokenSource:lexer];
+	SymbolTableParser *parser = [[SymbolTableParser alloc] initWithTokenStream:tokens];
+	[parser prog];
+
+	[lexer release];
+	[stream release];
+	[tokens release];
+	[parser release];
+	
+	[pool release];
+	return 0;
+}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/scopes/output b/runtime/ObjC/Framework/examples/scopes/output
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/scopes/output
rename to runtime/ObjC/Framework/examples/scopes/output
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g
rename to runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.tokens b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.tokens
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.tokens
rename to runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.tokens
diff --git a/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.h b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.h
new file mode 100644
index 0000000..e9e2d2a
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.h
@@ -0,0 +1,73 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g 2012-02-16 17:40:52
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* Start cyclicDFAInterface */
+
+#pragma mark Rule return scopes Interface start
+#pragma mark Rule return scopes Interface end
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define ARG_DEF 4
+#define BLOCK 5
+#define FUNC_DECL 6
+#define FUNC_DEF 7
+#define FUNC_HDR 8
+#define K_CHAR 9
+#define K_COMMA 10
+#define K_EQ 11
+#define K_EQEQ 12
+#define K_FOR 13
+#define K_ID 14
+#define K_INT 15
+#define K_INT_TYPE 16
+#define K_LCURLY 17
+#define K_LCURVE 18
+#define K_LT 19
+#define K_PLUS 20
+#define K_RCURLY 21
+#define K_RCURVE 22
+#define K_SEMICOLON 23
+#define K_VOID 24
+#define VAR_DEF 25
+#define WS 26
+/* interface lexer class */
+@interface SimpleCLexer : Lexer { // line 283
+/* ObjC start of actions.lexer.memVars */
+/* ObjC end of actions.lexer.memVars */
+}
++ (void) initialize;
++ (SimpleCLexer *)newSimpleCLexerWithCharStream:(id<CharStream>)anInput;
+/* ObjC start actions.lexer.methodsDecl */
+/* ObjC end actions.lexer.methodsDecl */
+- (void) mK_FOR ; 
+- (void) mK_CHAR ; 
+- (void) mK_INT_TYPE ; 
+- (void) mK_VOID ; 
+- (void) mK_ID ; 
+- (void) mK_INT ; 
+- (void) mK_LCURVE ; 
+- (void) mK_RCURVE ; 
+- (void) mK_PLUS ; 
+- (void) mK_COMMA ; 
+- (void) mK_SEMICOLON ; 
+- (void) mK_LT ; 
+- (void) mK_EQ ; 
+- (void) mK_EQEQ ; 
+- (void) mK_LCURLY ; 
+- (void) mK_RCURLY ; 
+- (void) mWS ; 
+- (void) mTokens ; 
+
+@end /* end of SimpleCLexer interface */
+
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCLexer.java b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.java
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCLexer.java
rename to runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.java
diff --git a/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.m b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.m
new file mode 100644
index 0000000..3f20dff
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCLexer.m
@@ -0,0 +1,1224 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g
+ *     -                            On : 2012-02-16 17:40:52
+ *     -                 for the lexer : SimpleCLexerLexer
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g 2012-02-16 17:40:52
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "SimpleCLexer.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+/** As per Terence: No returns for lexer rules! */
+@implementation SimpleCLexer // line 330
+
++ (void) initialize
+{
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g"];
+}
+
++ (NSString *) tokenNameForType:(NSInteger)aTokenType
+{
+    return [[self getTokenNames] objectAtIndex:aTokenType];
+}
+
++ (SimpleCLexer *)newSimpleCLexerWithCharStream:(id<CharStream>)anInput
+{
+    return [[SimpleCLexer alloc] initWithCharStream:anInput];
+}
+
+- (id) initWithCharStream:(id<CharStream>)anInput
+{
+    self = [super initWithCharStream:anInput State:[RecognizerSharedState newRecognizerSharedStateWithRuleLen:18+1]];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC Start of actions.lexer.methods */
+/* ObjC end of actions.lexer.methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+
+/* Start of Rules */
+// $ANTLR start "K_FOR"
+- (void) mK_FOR
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_FOR;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:91:7: ( 'for' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:91:9: 'for' // alt
+        {
+
+
+        [self matchString:@"for"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_FOR" */
+// $ANTLR start "K_CHAR"
+- (void) mK_CHAR
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_CHAR;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:92:7: ( 'char' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:92:9: 'char' // alt
+        {
+
+
+        [self matchString:@"char"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_CHAR" */
+// $ANTLR start "K_INT_TYPE"
+- (void) mK_INT_TYPE
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_INT_TYPE;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:93:12: ( 'int' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:93:14: 'int' // alt
+        {
+
+
+        [self matchString:@"int"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_INT_TYPE" */
+// $ANTLR start "K_VOID"
+- (void) mK_VOID
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_VOID;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:94:7: ( 'void' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:94:9: 'void' // alt
+        {
+
+
+        [self matchString:@"void"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_VOID" */
+// $ANTLR start "K_ID"
+- (void) mK_ID
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_ID;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:96:7: ( ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:96:11: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )* // alt
+        {
+
+        if ((([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+            [input consume];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            [self recover:mse];
+            @throw mse;
+        }
+
+         
+
+        do {
+            NSInteger alt1=2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( ((LA1_0 >= '0' && LA1_0 <= '9')||(LA1_0 >= 'A' && LA1_0 <= 'Z')||LA1_0=='_'||(LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))||(([input LA:1] >= 'A') && ([input LA:1] <= 'Z'))||[input LA:1] == '_'||(([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop1;
+            }
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_ID" */
+// $ANTLR start "K_INT"
+- (void) mK_INT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_INT;
+        NSInteger _channel = TokenChannelDefault;
+        CommonToken *anInt=nil;
+        AMutableArray *list_anInt=nil; 
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:99:7: ( (anInt+= ( '0' .. '9' ) )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:99:9: (anInt+= ( '0' .. '9' ) )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:99:14: (anInt+= ( '0' .. '9' ) )+ // positiveClosureBlock
+        NSInteger cnt2 = 0;
+        do {
+            NSInteger alt2 = 2;
+            NSInteger LA2_0 = [input LA:1];
+            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
+                alt2=1;
+            }
+
+
+            switch (alt2) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:99:14: anInt+= ( '0' .. '9' ) // alt
+                    {
+
+
+                    anInt = [input LA:1];
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        mse.c = anInt;
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt2 >= 1 )
+                        goto loop2;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:2];
+                    @throw eee;
+            }
+            cnt2++;
+        } while (YES);
+        loop2: ;
+
+         
+
+        NSLog(@"%@", list_anInt);
+
+         
+        }
+
+        /* token+rule list labels */
+        [list_anInt release];
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_INT" */
+// $ANTLR start "K_LCURVE"
+- (void) mK_LCURVE
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_LCURVE;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:102:10: ( '(' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:102:12: '(' // alt
+        {
+
+
+        [self matchChar:'(']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_LCURVE" */
+// $ANTLR start "K_RCURVE"
+- (void) mK_RCURVE
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_RCURVE;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:103:10: ( ')' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:103:12: ')' // alt
+        {
+
+
+        [self matchChar:')']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_RCURVE" */
+// $ANTLR start "K_PLUS"
+- (void) mK_PLUS
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_PLUS;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:104:8: ( '+' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:104:10: '+' // alt
+        {
+
+
+        [self matchChar:'+']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_PLUS" */
+// $ANTLR start "K_COMMA"
+- (void) mK_COMMA
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_COMMA;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:105:9: ( ',' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:105:11: ',' // alt
+        {
+
+
+        [self matchChar:',']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_COMMA" */
+// $ANTLR start "K_SEMICOLON"
+- (void) mK_SEMICOLON
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_SEMICOLON;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:106:13: ( ';' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:106:15: ';' // alt
+        {
+
+
+        [self matchChar:';']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_SEMICOLON" */
+// $ANTLR start "K_LT"
+- (void) mK_LT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_LT;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:107:8: ( '<' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:107:10: '<' // alt
+        {
+
+
+        [self matchChar:'<']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_LT" */
+// $ANTLR start "K_EQ"
+- (void) mK_EQ
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_EQ;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:108:8: ( '=' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:108:10: '=' // alt
+        {
+
+
+        [self matchChar:'=']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_EQ" */
+// $ANTLR start "K_EQEQ"
+- (void) mK_EQEQ
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_EQEQ;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:109:8: ( '==' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:109:10: '==' // alt
+        {
+
+
+        [self matchString:@"=="]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_EQEQ" */
+// $ANTLR start "K_LCURLY"
+- (void) mK_LCURLY
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_LCURLY;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:110:10: ( '{' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:110:12: '{' // alt
+        {
+
+
+        [self matchChar:'{']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_LCURLY" */
+// $ANTLR start "K_RCURLY"
+- (void) mK_RCURLY
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = K_RCURLY;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:111:10: ( '}' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:111:12: '}' // alt
+        {
+
+
+        [self matchChar:'}']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "K_RCURLY" */
+// $ANTLR start "WS"
+- (void) mWS
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = WS;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:113:5: ( ( ' ' | '\\t' | '\\r' | '\\n' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:113:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:113:9: ( ' ' | '\\t' | '\\r' | '\\n' )+ // positiveClosureBlock
+        NSInteger cnt3 = 0;
+        do {
+            NSInteger alt3 = 2;
+            NSInteger LA3_0 = [input LA:1];
+            if ( ((LA3_0 >= '\t' && LA3_0 <= '\n')||LA3_0=='\r'||LA3_0==' ') ) {
+                alt3=1;
+            }
+
+
+            switch (alt3) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '\t') && ([input LA:1] <= '\n'))||[input LA:1] == '\r'||[input LA:1] == ' ') {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt3 >= 1 )
+                        goto loop3;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:3];
+                    @throw eee;
+            }
+            cnt3++;
+        } while (YES);
+        loop3: ;
+
+         
+
+         _channel=HIDDEN; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "WS" */
+- (void) mTokens
+{
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:8: ( K_FOR | K_CHAR | K_INT_TYPE | K_VOID | K_ID | K_INT | K_LCURVE | K_RCURVE | K_PLUS | K_COMMA | K_SEMICOLON | K_LT | K_EQ | K_EQEQ | K_LCURLY | K_RCURLY | WS ) //ruleblock
+    NSInteger alt4=17;
+    unichar charLA4 = [input LA:1];
+    switch (charLA4) {
+        case 'f': ;
+            {
+            NSInteger LA4_1 = [input LA:2];
+
+            if ( (LA4_1=='o') ) {
+                NSInteger LA4_17 = [input LA:3];
+
+                if ( (LA4_17=='r') ) {
+                    NSInteger LA4_23 = [input LA:4];
+
+                    if ( ((LA4_23 >= '0' && LA4_23 <= '9')||(LA4_23 >= 'A' && LA4_23 <= 'Z')||LA4_23=='_'||(LA4_23 >= 'a' && LA4_23 <= 'z')) ) {
+                        alt4=5;
+                    }
+                    else {
+                        alt4 = 1;
+                    }
+                }
+                else {
+                    alt4 = 5;
+                }
+            }
+            else {
+                alt4 = 5;
+            }
+            }
+            break;
+        case 'c': ;
+            {
+            NSInteger LA4_2 = [input LA:2];
+
+            if ( (LA4_2=='h') ) {
+                NSInteger LA4_18 = [input LA:3];
+
+                if ( (LA4_18=='a') ) {
+                    NSInteger LA4_24 = [input LA:4];
+
+                    if ( (LA4_24=='r') ) {
+                        NSInteger LA4_28 = [input LA:5];
+
+                        if ( ((LA4_28 >= '0' && LA4_28 <= '9')||(LA4_28 >= 'A' && LA4_28 <= 'Z')||LA4_28=='_'||(LA4_28 >= 'a' && LA4_28 <= 'z')) ) {
+                            alt4=5;
+                        }
+                        else {
+                            alt4 = 2;
+                        }
+                    }
+                    else {
+                        alt4 = 5;
+                    }
+                }
+                else {
+                    alt4 = 5;
+                }
+            }
+            else {
+                alt4 = 5;
+            }
+            }
+            break;
+        case 'i': ;
+            {
+            NSInteger LA4_3 = [input LA:2];
+
+            if ( (LA4_3=='n') ) {
+                NSInteger LA4_19 = [input LA:3];
+
+                if ( (LA4_19=='t') ) {
+                    NSInteger LA4_25 = [input LA:4];
+
+                    if ( ((LA4_25 >= '0' && LA4_25 <= '9')||(LA4_25 >= 'A' && LA4_25 <= 'Z')||LA4_25=='_'||(LA4_25 >= 'a' && LA4_25 <= 'z')) ) {
+                        alt4=5;
+                    }
+                    else {
+                        alt4 = 3;
+                    }
+                }
+                else {
+                    alt4 = 5;
+                }
+            }
+            else {
+                alt4 = 5;
+            }
+            }
+            break;
+        case 'v': ;
+            {
+            NSInteger LA4_4 = [input LA:2];
+
+            if ( (LA4_4=='o') ) {
+                NSInteger LA4_20 = [input LA:3];
+
+                if ( (LA4_20=='i') ) {
+                    NSInteger LA4_26 = [input LA:4];
+
+                    if ( (LA4_26=='d') ) {
+                        NSInteger LA4_30 = [input LA:5];
+
+                        if ( ((LA4_30 >= '0' && LA4_30 <= '9')||(LA4_30 >= 'A' && LA4_30 <= 'Z')||LA4_30=='_'||(LA4_30 >= 'a' && LA4_30 <= 'z')) ) {
+                            alt4=5;
+                        }
+                        else {
+                            alt4 = 4;
+                        }
+                    }
+                    else {
+                        alt4 = 5;
+                    }
+                }
+                else {
+                    alt4 = 5;
+                }
+            }
+            else {
+                alt4 = 5;
+            }
+            }
+            break;
+        case 'A': ;
+        case 'B': ;
+        case 'C': ;
+        case 'D': ;
+        case 'E': ;
+        case 'F': ;
+        case 'G': ;
+        case 'H': ;
+        case 'I': ;
+        case 'J': ;
+        case 'K': ;
+        case 'L': ;
+        case 'M': ;
+        case 'N': ;
+        case 'O': ;
+        case 'P': ;
+        case 'Q': ;
+        case 'R': ;
+        case 'S': ;
+        case 'T': ;
+        case 'U': ;
+        case 'V': ;
+        case 'W': ;
+        case 'X': ;
+        case 'Y': ;
+        case 'Z': ;
+        case '_': ;
+        case 'a': ;
+        case 'b': ;
+        case 'd': ;
+        case 'e': ;
+        case 'g': ;
+        case 'h': ;
+        case 'j': ;
+        case 'k': ;
+        case 'l': ;
+        case 'm': ;
+        case 'n': ;
+        case 'o': ;
+        case 'p': ;
+        case 'q': ;
+        case 'r': ;
+        case 's': ;
+        case 't': ;
+        case 'u': ;
+        case 'w': ;
+        case 'x': ;
+        case 'y': ;
+        case 'z': ;
+            {
+            alt4=5;
+            }
+            break;
+        case '0': ;
+        case '1': ;
+        case '2': ;
+        case '3': ;
+        case '4': ;
+        case '5': ;
+        case '6': ;
+        case '7': ;
+        case '8': ;
+        case '9': ;
+            {
+            alt4=6;
+            }
+            break;
+        case '(': ;
+            {
+            alt4=7;
+            }
+            break;
+        case ')': ;
+            {
+            alt4=8;
+            }
+            break;
+        case '+': ;
+            {
+            alt4=9;
+            }
+            break;
+        case ',': ;
+            {
+            alt4=10;
+            }
+            break;
+        case ';': ;
+            {
+            alt4=11;
+            }
+            break;
+        case '<': ;
+            {
+            alt4=12;
+            }
+            break;
+        case '=': ;
+            {
+            NSInteger LA4_13 = [input LA:2];
+
+            if ( (LA4_13=='=') ) {
+                alt4=14;
+            }
+            else {
+                alt4 = 13;
+            }
+            }
+            break;
+        case '{': ;
+            {
+            alt4=15;
+            }
+            break;
+        case '}': ;
+            {
+            alt4=16;
+            }
+            break;
+        case '\t': ;
+        case '\n': ;
+        case '\r': ;
+        case ' ': ;
+            {
+            alt4=17;
+            }
+            break;
+
+    default: ;
+        NoViableAltException *nvae = [NoViableAltException newException:4 state:0 stream:input];
+        nvae.c = charLA4;
+        @throw nvae;
+
+    }
+
+    switch (alt4) {
+        case 1 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:10: K_FOR // alt
+            {
+
+
+            [self mK_FOR]; 
+
+
+             
+            }
+            break;
+        case 2 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:16: K_CHAR // alt
+            {
+
+
+            [self mK_CHAR]; 
+
+
+             
+            }
+            break;
+        case 3 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:23: K_INT_TYPE // alt
+            {
+
+
+            [self mK_INT_TYPE]; 
+
+
+             
+            }
+            break;
+        case 4 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:34: K_VOID // alt
+            {
+
+
+            [self mK_VOID]; 
+
+
+             
+            }
+            break;
+        case 5 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:41: K_ID // alt
+            {
+
+
+            [self mK_ID]; 
+
+
+             
+            }
+            break;
+        case 6 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:46: K_INT // alt
+            {
+
+
+            [self mK_INT]; 
+
+
+             
+            }
+            break;
+        case 7 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:52: K_LCURVE // alt
+            {
+
+
+            [self mK_LCURVE]; 
+
+
+             
+            }
+            break;
+        case 8 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:61: K_RCURVE // alt
+            {
+
+
+            [self mK_RCURVE]; 
+
+
+             
+            }
+            break;
+        case 9 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:70: K_PLUS // alt
+            {
+
+
+            [self mK_PLUS]; 
+
+
+             
+            }
+            break;
+        case 10 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:77: K_COMMA // alt
+            {
+
+
+            [self mK_COMMA]; 
+
+
+             
+            }
+            break;
+        case 11 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:85: K_SEMICOLON // alt
+            {
+
+
+            [self mK_SEMICOLON]; 
+
+
+             
+            }
+            break;
+        case 12 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:97: K_LT // alt
+            {
+
+
+            [self mK_LT]; 
+
+
+             
+            }
+            break;
+        case 13 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:102: K_EQ // alt
+            {
+
+
+            [self mK_EQ]; 
+
+
+             
+            }
+            break;
+        case 14 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:107: K_EQEQ // alt
+            {
+
+
+            [self mK_EQEQ]; 
+
+
+             
+            }
+            break;
+        case 15 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:114: K_LCURLY // alt
+            {
+
+
+            [self mK_LCURLY]; 
+
+
+             
+            }
+            break;
+        case 16 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:123: K_RCURLY // alt
+            {
+
+
+            [self mK_RCURLY]; 
+
+
+             
+            }
+            break;
+        case 17 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:1:132: WS // alt
+            {
+
+
+            [self mWS]; 
+
+
+             
+            }
+            break;
+
+    }
+
+}
+
+@end /* end of SimpleCLexer implementation line 397 */
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.h b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.h
new file mode 100644
index 0000000..31f40ee
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.h
@@ -0,0 +1,471 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g 2012-02-16 17:40:52
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* parserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Cyclic DFA interface start DFA2
+@interface DFA2 : DFA {
+}
++ (DFA2 *) newDFA2WithRecognizer:(BaseRecognizer *)theRecognizer;
+- initWithRecognizer:(BaseRecognizer *)recognizer;
+@end /* end of DFA2 interface  */
+
+#pragma mark Cyclic DFA interface end DFA2
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define ARG_DEF 4
+#define BLOCK 5
+#define FUNC_DECL 6
+#define FUNC_DEF 7
+#define FUNC_HDR 8
+#define K_CHAR 9
+#define K_COMMA 10
+#define K_EQ 11
+#define K_EQEQ 12
+#define K_FOR 13
+#define K_ID 14
+#define K_INT 15
+#define K_INT_TYPE 16
+#define K_LCURLY 17
+#define K_LCURVE 18
+#define K_LT 19
+#define K_PLUS 20
+#define K_RCURLY 21
+#define K_RCURVE 22
+#define K_SEMICOLON 23
+#define K_VOID 24
+#define VAR_DEF 25
+#define WS 26
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+/* returnScopeInterface SimpleCParser_program_return */
+@interface SimpleCParser_program_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_program_return *)newSimpleCParser_program_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_declaration_return */
+@interface SimpleCParser_declaration_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_declaration_return *)newSimpleCParser_declaration_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_variable_return */
+@interface SimpleCParser_variable_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_variable_return *)newSimpleCParser_variable_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_declarator_return */
+@interface SimpleCParser_declarator_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_declarator_return *)newSimpleCParser_declarator_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_functionHeader_return */
+@interface SimpleCParser_functionHeader_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_functionHeader_return *)newSimpleCParser_functionHeader_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_formalParameter_return */
+@interface SimpleCParser_formalParameter_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_formalParameter_return *)newSimpleCParser_formalParameter_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_type_return */
+@interface SimpleCParser_type_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_type_return *)newSimpleCParser_type_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_block_return */
+@interface SimpleCParser_block_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_block_return *)newSimpleCParser_block_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_stat_return */
+@interface SimpleCParser_stat_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_stat_return *)newSimpleCParser_stat_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_forStat_return */
+@interface SimpleCParser_forStat_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_forStat_return *)newSimpleCParser_forStat_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_assignStat_return */
+@interface SimpleCParser_assignStat_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_assignStat_return *)newSimpleCParser_assignStat_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_expr_return */
+@interface SimpleCParser_expr_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_expr_return *)newSimpleCParser_expr_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_condExpr_return */
+@interface SimpleCParser_condExpr_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_condExpr_return *)newSimpleCParser_condExpr_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_aexpr_return */
+@interface SimpleCParser_aexpr_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_aexpr_return *)newSimpleCParser_aexpr_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface SimpleCParser_atom_return */
+@interface SimpleCParser_atom_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (SimpleCParser_atom_return *)newSimpleCParser_atom_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+
+/* Interface grammar class */
+@interface SimpleCParser  : Parser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+
+
+/* ObjC start of actions.(actionScope).memVars */
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* AST parserHeaderFile.memVars */
+NSInteger ruleLevel;
+NSArray *ruleNames;
+  /* AST super.memVars */
+/* AST parserMemVars */
+id<TreeAdaptor> treeAdaptor;   /* AST parserMemVars */
+/* ObjC end of memVars */
+
+DFA2 *dfa2;
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* AST parserHeaderFile.properties */
+  /* AST super.properties */
+/* AST parserProperties */
+@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id<TreeAdaptor> treeAdaptor;   /* AST parserproperties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newSimpleCParser:(id<TokenStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* AST parserHeaderFile.methodsDecl */
+  /* AST super.methodsDecl */
+/* AST parserMethodsDecl */
+- (id<TreeAdaptor>) getTreeAdaptor;
+- (void) setTreeAdaptor:(id<TreeAdaptor>)theTreeAdaptor;   /* AST parsermethodsDecl */
+/* ObjC end of methodsDecl */
+
+- (SimpleCParser_program_return *)program; 
+- (SimpleCParser_declaration_return *)declaration; 
+- (SimpleCParser_variable_return *)variable; 
+- (SimpleCParser_declarator_return *)declarator; 
+- (SimpleCParser_functionHeader_return *)functionHeader; 
+- (SimpleCParser_formalParameter_return *)formalParameter; 
+- (SimpleCParser_type_return *)type; 
+- (SimpleCParser_block_return *)block; 
+- (SimpleCParser_stat_return *)stat; 
+- (SimpleCParser_forStat_return *)forStat; 
+- (SimpleCParser_assignStat_return *)assignStat; 
+- (SimpleCParser_expr_return *)expr; 
+- (SimpleCParser_condExpr_return *)condExpr; 
+- (SimpleCParser_aexpr_return *)aexpr; 
+- (SimpleCParser_atom_return *)atom; 
+
+
+@end /* end of SimpleCParser interface */
+
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCParser.java b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.java
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCParser.java
rename to runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.java
diff --git a/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.m b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.m
new file mode 100644
index 0000000..a8935a3
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCParser.m
@@ -0,0 +1,3144 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g
+ *     -                            On : 2012-02-16 17:40:52
+ *     -                for the parser : SimpleCParserParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g 2012-02-16 17:40:52
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "SimpleCParser.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+#pragma mark Cyclic DFA implementation start DFA2
+
+@implementation DFA2
+const static NSInteger dfa2_eot[13] =
+    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
+const static NSInteger dfa2_eof[13] =
+    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
+const static unichar dfa2_min[13] =
+    {9,14,18,9,-1,14,17,10,-1,-1,9,14,10};
+const static unichar dfa2_max[13] =
+    {24,14,23,24,-1,14,23,22,-1,-1,24,14,22};
+const static NSInteger dfa2_accept[13] =
+    {-1,-1,-1,-1,1,-1,-1,-1,2,3,-1,-1,-1};
+const static NSInteger dfa2_special[13] =
+    {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
+
+/** Used when there is no transition table entry for a particular state */
+#define dfa2_T_empty	    nil
+
+const static NSInteger dfa2_T0[] =
+{
+     9, -1, -1, -1, -1, -1, 8
+};
+const static NSInteger dfa2_T1[] =
+{
+     10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6
+};
+const static NSInteger dfa2_T2[] =
+{
+     5, -1, -1, -1, -1, 5, -1, 5, -1, -1, -1, -1, -1, 6, -1, 5
+};
+const static NSInteger dfa2_T3[] =
+{
+     11, -1, -1, -1, -1, 11, -1, 11, -1, -1, -1, -1, -1, -1, -1, 11
+};
+const static NSInteger dfa2_T4[] =
+{
+     3, -1, -1, -1, -1, 4
+};
+const static NSInteger dfa2_T5[] =
+{
+     1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1
+};
+const static NSInteger dfa2_T6[] =
+{
+     7
+};
+const static NSInteger dfa2_T7[] =
+{
+     12
+};
+const static NSInteger dfa2_T8[] =
+{
+     2
+};
+
+
+const static NSInteger *dfa2_transition[] =
+{
+    dfa2_T5, dfa2_T8, dfa2_T4, dfa2_T2, nil, dfa2_T6, dfa2_T0, dfa2_T1, 
+    nil, nil, dfa2_T3, dfa2_T7, dfa2_T1
+};
+
+//const static NSInteger dfa2_transition[] = {};
+
++ (DFA2 *) newDFA2WithRecognizer:(BaseRecognizer *)aRecognizer
+{
+    return [[[DFA2 alloc] initWithRecognizer:aRecognizer] retain];
+}
+
+- (id) initWithRecognizer:(BaseRecognizer *) theRecognizer
+{
+    self = [super initWithRecognizer:theRecognizer];
+    if ( self != nil ) {
+        decisionNumber = 2;
+        eot = dfa2_eot;
+        eof = dfa2_eof;
+        min = dfa2_min;
+        max = dfa2_max;
+        accept = dfa2_accept;
+        special = dfa2_special;
+        transition = dfa2_transition;
+/*
+        if (!(transition = calloc(13, sizeof(void*)))) {
+            [self release];
+            return nil;
+        }
+        len = 13;
+        transition[0] = dfa2_transition5;
+        transition[1] = dfa2_transition8;
+        transition[2] = dfa2_transition4;
+        transition[3] = dfa2_transition2;
+
+        transition[4] = dfa2_transition6;
+        transition[5] = dfa2_transition0;
+        transition[6] = dfa2_transition1;
+
+
+        transition[7] = dfa2_transition3;
+        transition[8] = dfa2_transition7;
+        transition[9] = dfa2_transition1;
+ */
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    free(transition);
+    [super dealloc];
+}
+
+- (NSString *) description
+{
+    return @"20:1: declaration : ( variable | functionHeader K_SEMICOLON -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) );";
+}
+
+
+@end /* end DFA2 implementation */
+
+#pragma mark Cyclic DFA implementation end DFA2
+
+
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_declaration_in_program85;
+static const unsigned long long FOLLOW_declaration_in_program85_data[] = { 0x0000000001014202LL};
+static ANTLRBitSet *FOLLOW_variable_in_declaration105;
+static const unsigned long long FOLLOW_variable_in_declaration105_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_functionHeader_in_declaration115;
+static const unsigned long long FOLLOW_functionHeader_in_declaration115_data[] = { 0x0000000000800000LL};
+static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_declaration117;
+static const unsigned long long FOLLOW_K_SEMICOLON_in_declaration117_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_functionHeader_in_declaration135;
+static const unsigned long long FOLLOW_functionHeader_in_declaration135_data[] = { 0x0000000000020000LL};
+static ANTLRBitSet *FOLLOW_block_in_declaration137;
+static const unsigned long long FOLLOW_block_in_declaration137_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_type_in_variable166;
+static const unsigned long long FOLLOW_type_in_variable166_data[] = { 0x0000000000004000LL};
+static ANTLRBitSet *FOLLOW_declarator_in_variable168;
+static const unsigned long long FOLLOW_declarator_in_variable168_data[] = { 0x0000000000800000LL};
+static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_variable170;
+static const unsigned long long FOLLOW_K_SEMICOLON_in_variable170_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_K_ID_in_declarator199;
+static const unsigned long long FOLLOW_K_ID_in_declarator199_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_type_in_functionHeader219;
+static const unsigned long long FOLLOW_type_in_functionHeader219_data[] = { 0x0000000000004000LL};
+static ANTLRBitSet *FOLLOW_K_ID_in_functionHeader221;
+static const unsigned long long FOLLOW_K_ID_in_functionHeader221_data[] = { 0x0000000000040000LL};
+static ANTLRBitSet *FOLLOW_K_LCURVE_in_functionHeader223;
+static const unsigned long long FOLLOW_K_LCURVE_in_functionHeader223_data[] = { 0x0000000001414200LL};
+static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader227;
+static const unsigned long long FOLLOW_formalParameter_in_functionHeader227_data[] = { 0x0000000000400400LL};
+static ANTLRBitSet *FOLLOW_K_COMMA_in_functionHeader231;
+static const unsigned long long FOLLOW_K_COMMA_in_functionHeader231_data[] = { 0x0000000001014200LL};
+static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader233;
+static const unsigned long long FOLLOW_formalParameter_in_functionHeader233_data[] = { 0x0000000000400400LL};
+static ANTLRBitSet *FOLLOW_K_RCURVE_in_functionHeader241;
+static const unsigned long long FOLLOW_K_RCURVE_in_functionHeader241_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_type_in_formalParameter281;
+static const unsigned long long FOLLOW_type_in_formalParameter281_data[] = { 0x0000000000004000LL};
+static ANTLRBitSet *FOLLOW_declarator_in_formalParameter283;
+static const unsigned long long FOLLOW_declarator_in_formalParameter283_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_K_LCURLY_in_block376;
+static const unsigned long long FOLLOW_K_LCURLY_in_block376_data[] = { 0x0000000001A7E200LL};
+static ANTLRBitSet *FOLLOW_variable_in_block390;
+static const unsigned long long FOLLOW_variable_in_block390_data[] = { 0x0000000001A7E200LL};
+static ANTLRBitSet *FOLLOW_stat_in_block405;
+static const unsigned long long FOLLOW_stat_in_block405_data[] = { 0x0000000000A6E000LL};
+static ANTLRBitSet *FOLLOW_K_RCURLY_in_block416;
+static const unsigned long long FOLLOW_K_RCURLY_in_block416_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_forStat_in_stat449;
+static const unsigned long long FOLLOW_forStat_in_stat449_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_expr_in_stat457;
+static const unsigned long long FOLLOW_expr_in_stat457_data[] = { 0x0000000000800000LL};
+static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_stat459;
+static const unsigned long long FOLLOW_K_SEMICOLON_in_stat459_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_block_in_stat468;
+static const unsigned long long FOLLOW_block_in_stat468_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_assignStat_in_stat476;
+static const unsigned long long FOLLOW_assignStat_in_stat476_data[] = { 0x0000000000800000LL};
+static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_stat478;
+static const unsigned long long FOLLOW_K_SEMICOLON_in_stat478_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_stat487;
+static const unsigned long long FOLLOW_K_SEMICOLON_in_stat487_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_K_FOR_in_forStat507;
+static const unsigned long long FOLLOW_K_FOR_in_forStat507_data[] = { 0x0000000000040000LL};
+static ANTLRBitSet *FOLLOW_K_LCURVE_in_forStat509;
+static const unsigned long long FOLLOW_K_LCURVE_in_forStat509_data[] = { 0x0000000000004000LL};
+static ANTLRBitSet *FOLLOW_assignStat_in_forStat513;
+static const unsigned long long FOLLOW_assignStat_in_forStat513_data[] = { 0x0000000000800000LL};
+static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_forStat515;
+static const unsigned long long FOLLOW_K_SEMICOLON_in_forStat515_data[] = { 0x000000000004C000LL};
+static ANTLRBitSet *FOLLOW_expr_in_forStat517;
+static const unsigned long long FOLLOW_expr_in_forStat517_data[] = { 0x0000000000800000LL};
+static ANTLRBitSet *FOLLOW_K_SEMICOLON_in_forStat519;
+static const unsigned long long FOLLOW_K_SEMICOLON_in_forStat519_data[] = { 0x0000000000004000LL};
+static ANTLRBitSet *FOLLOW_assignStat_in_forStat523;
+static const unsigned long long FOLLOW_assignStat_in_forStat523_data[] = { 0x0000000000400000LL};
+static ANTLRBitSet *FOLLOW_K_RCURVE_in_forStat525;
+static const unsigned long long FOLLOW_K_RCURVE_in_forStat525_data[] = { 0x0000000000020000LL};
+static ANTLRBitSet *FOLLOW_block_in_forStat527;
+static const unsigned long long FOLLOW_block_in_forStat527_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_K_ID_in_assignStat570;
+static const unsigned long long FOLLOW_K_ID_in_assignStat570_data[] = { 0x0000000000000800LL};
+static ANTLRBitSet *FOLLOW_K_EQ_in_assignStat572;
+static const unsigned long long FOLLOW_K_EQ_in_assignStat572_data[] = { 0x000000000004C000LL};
+static ANTLRBitSet *FOLLOW_expr_in_assignStat574;
+static const unsigned long long FOLLOW_expr_in_assignStat574_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_condExpr_in_expr598;
+static const unsigned long long FOLLOW_condExpr_in_expr598_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_aexpr_in_condExpr617;
+static const unsigned long long FOLLOW_aexpr_in_condExpr617_data[] = { 0x0000000000081002LL};
+static ANTLRBitSet *FOLLOW_K_EQEQ_in_condExpr622;
+static const unsigned long long FOLLOW_K_EQEQ_in_condExpr622_data[] = { 0x000000000004C000LL};
+static ANTLRBitSet *FOLLOW_K_LT_in_condExpr627;
+static const unsigned long long FOLLOW_K_LT_in_condExpr627_data[] = { 0x000000000004C000LL};
+static ANTLRBitSet *FOLLOW_aexpr_in_condExpr631;
+static const unsigned long long FOLLOW_aexpr_in_condExpr631_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_atom_in_aexpr653;
+static const unsigned long long FOLLOW_atom_in_aexpr653_data[] = { 0x0000000000100002LL};
+static ANTLRBitSet *FOLLOW_K_PLUS_in_aexpr657;
+static const unsigned long long FOLLOW_K_PLUS_in_aexpr657_data[] = { 0x000000000004C000LL};
+static ANTLRBitSet *FOLLOW_atom_in_aexpr660;
+static const unsigned long long FOLLOW_atom_in_aexpr660_data[] = { 0x0000000000100002LL};
+static ANTLRBitSet *FOLLOW_K_ID_in_atom680;
+static const unsigned long long FOLLOW_K_ID_in_atom680_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_K_INT_in_atom694;
+static const unsigned long long FOLLOW_K_INT_in_atom694_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_K_LCURVE_in_atom708;
+static const unsigned long long FOLLOW_K_LCURVE_in_atom708_data[] = { 0x000000000004C000LL};
+static ANTLRBitSet *FOLLOW_expr_in_atom710;
+static const unsigned long long FOLLOW_expr_in_atom710_data[] = { 0x0000000000400000LL};
+static ANTLRBitSet *FOLLOW_K_RCURVE_in_atom712;
+static const unsigned long long FOLLOW_K_RCURVE_in_atom712_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+@implementation SimpleCParser_program_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_program_return *)newSimpleCParser_program_return
+{
+return [[[SimpleCParser_program_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_declaration_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_declaration_return *)newSimpleCParser_declaration_return
+{
+return [[[SimpleCParser_declaration_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_variable_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_variable_return *)newSimpleCParser_variable_return
+{
+return [[[SimpleCParser_variable_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_declarator_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_declarator_return *)newSimpleCParser_declarator_return
+{
+return [[[SimpleCParser_declarator_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_functionHeader_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_functionHeader_return *)newSimpleCParser_functionHeader_return
+{
+return [[[SimpleCParser_functionHeader_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_formalParameter_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_formalParameter_return *)newSimpleCParser_formalParameter_return
+{
+return [[[SimpleCParser_formalParameter_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_type_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_type_return *)newSimpleCParser_type_return
+{
+return [[[SimpleCParser_type_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_block_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_block_return *)newSimpleCParser_block_return
+{
+return [[[SimpleCParser_block_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_stat_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_stat_return *)newSimpleCParser_stat_return
+{
+return [[[SimpleCParser_stat_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_forStat_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_forStat_return *)newSimpleCParser_forStat_return
+{
+return [[[SimpleCParser_forStat_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_assignStat_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_assignStat_return *)newSimpleCParser_assignStat_return
+{
+return [[[SimpleCParser_assignStat_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_expr_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_expr_return *)newSimpleCParser_expr_return
+{
+return [[[SimpleCParser_expr_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_condExpr_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_condExpr_return *)newSimpleCParser_condExpr_return
+{
+return [[[SimpleCParser_condExpr_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_aexpr_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_aexpr_return *)newSimpleCParser_aexpr_return
+{
+return [[[SimpleCParser_aexpr_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation SimpleCParser_atom_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCParser_atom_return *)newSimpleCParser_atom_return
+{
+return [[[SimpleCParser_atom_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+
+
+@implementation SimpleCParser  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+/* AST genericParser.synthesize */
+/* AST parserProperties */
+@synthesize treeAdaptor;
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_declaration_in_program85 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declaration_in_program85_data Count:(NSUInteger)1] retain];
+    FOLLOW_variable_in_declaration105 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_declaration105_data Count:(NSUInteger)1] retain];
+    FOLLOW_functionHeader_in_declaration115 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration115_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_SEMICOLON_in_declaration117 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_declaration117_data Count:(NSUInteger)1] retain];
+    FOLLOW_functionHeader_in_declaration135 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration135_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_declaration137 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_declaration137_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_variable166 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_variable166_data Count:(NSUInteger)1] retain];
+    FOLLOW_declarator_in_variable168 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_variable168_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_SEMICOLON_in_variable170 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_variable170_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_ID_in_declarator199 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_declarator199_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_functionHeader219 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_functionHeader219_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_ID_in_functionHeader221 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_functionHeader221_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_LCURVE_in_functionHeader223 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_LCURVE_in_functionHeader223_data Count:(NSUInteger)1] retain];
+    FOLLOW_formalParameter_in_functionHeader227 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader227_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_COMMA_in_functionHeader231 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_COMMA_in_functionHeader231_data Count:(NSUInteger)1] retain];
+    FOLLOW_formalParameter_in_functionHeader233 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader233_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_RCURVE_in_functionHeader241 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_RCURVE_in_functionHeader241_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_formalParameter281 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_formalParameter281_data Count:(NSUInteger)1] retain];
+    FOLLOW_declarator_in_formalParameter283 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_formalParameter283_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_LCURLY_in_block376 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_LCURLY_in_block376_data Count:(NSUInteger)1] retain];
+    FOLLOW_variable_in_block390 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_block390_data Count:(NSUInteger)1] retain];
+    FOLLOW_stat_in_block405 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block405_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_RCURLY_in_block416 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_RCURLY_in_block416_data Count:(NSUInteger)1] retain];
+    FOLLOW_forStat_in_stat449 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_forStat_in_stat449_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_stat457 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_stat457_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_SEMICOLON_in_stat459 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_stat459_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_stat468 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat468_data Count:(NSUInteger)1] retain];
+    FOLLOW_assignStat_in_stat476 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_stat476_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_SEMICOLON_in_stat478 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_stat478_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_SEMICOLON_in_stat487 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_stat487_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_FOR_in_forStat507 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_FOR_in_forStat507_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_LCURVE_in_forStat509 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_LCURVE_in_forStat509_data Count:(NSUInteger)1] retain];
+    FOLLOW_assignStat_in_forStat513 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_forStat513_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_SEMICOLON_in_forStat515 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_forStat515_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_forStat517 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat517_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_SEMICOLON_in_forStat519 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_SEMICOLON_in_forStat519_data Count:(NSUInteger)1] retain];
+    FOLLOW_assignStat_in_forStat523 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_assignStat_in_forStat523_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_RCURVE_in_forStat525 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_RCURVE_in_forStat525_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_forStat527 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_forStat527_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_ID_in_assignStat570 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_assignStat570_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_EQ_in_assignStat572 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQ_in_assignStat572_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_assignStat574 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_assignStat574_data Count:(NSUInteger)1] retain];
+    FOLLOW_condExpr_in_expr598 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_condExpr_in_expr598_data Count:(NSUInteger)1] retain];
+    FOLLOW_aexpr_in_condExpr617 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_aexpr_in_condExpr617_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_EQEQ_in_condExpr622 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQEQ_in_condExpr622_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_LT_in_condExpr627 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_LT_in_condExpr627_data Count:(NSUInteger)1] retain];
+    FOLLOW_aexpr_in_condExpr631 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_aexpr_in_condExpr631_data Count:(NSUInteger)1] retain];
+    FOLLOW_atom_in_aexpr653 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_aexpr653_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_PLUS_in_aexpr657 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_PLUS_in_aexpr657_data Count:(NSUInteger)1] retain];
+    FOLLOW_atom_in_aexpr660 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_aexpr660_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_ID_in_atom680 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_atom680_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_INT_in_atom694 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_INT_in_atom694_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_LCURVE_in_atom708 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_LCURVE_in_atom708_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_atom710 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_atom710_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_RCURVE_in_atom712 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_RCURVE_in_atom712_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"ARG_DEF", @"BLOCK", @"FUNC_DECL", @"FUNC_DEF", @"FUNC_HDR", @"K_CHAR", 
+ @"K_COMMA", @"K_EQ", @"K_EQEQ", @"K_FOR", @"K_ID", @"K_INT", @"K_INT_TYPE", 
+ @"K_LCURLY", @"K_LCURVE", @"K_LT", @"K_PLUS", @"K_RCURLY", @"K_RCURVE", 
+ @"K_SEMICOLON", @"K_VOID", @"VAR_DEF", @"WS", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g"];
+}
+
++ (SimpleCParser *)newSimpleCParser:(id<TokenStream>)aStream
+{
+    return [[SimpleCParser alloc] initWithTokenStream:aStream];
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)aStream
+{
+    self = [super initWithTokenStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:15+1] retain]];
+    if ( self != nil ) {
+        dfa2 = [DFA2 newDFA2WithRecognizer:self];
+        /* start of actions-actionScope-init */
+        /* start of init */
+        /* AST genericParser.init */
+        [self setTreeAdaptor:[[CommonTreeAdaptor newTreeAdaptor] retain]];
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [dfa2 release];
+    /* AST genericParser.dealloc */
+    [self setTreeAdaptor:nil];
+
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* AST genericParser.methods */
+/* AST parserMethods */
+- (id<TreeAdaptor>) getTreeAdaptor
+{
+	return treeAdaptor;
+}
+
+- (void) setTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor
+{
+	if (aTreeAdaptor != treeAdaptor) {
+		treeAdaptor = aTreeAdaptor;
+	}
+}
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start program
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:16:1: program : ( declaration )+ ;
+ */
+- (SimpleCParser_program_return *) program
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_program_return * retval = [SimpleCParser_program_return newSimpleCParser_program_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        SimpleCParser_declaration_return * declaration1 = nil ;
+
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:17:5: ( ( declaration )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:17:9: ( declaration )+ // alt
+        {
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:17:9: ( declaration )+ // positiveClosureBlock
+        NSInteger cnt1 = 0;
+        do {
+            NSInteger alt1 = 2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( (LA1_0==K_CHAR||LA1_0==K_ID||LA1_0==K_INT_TYPE||LA1_0==K_VOID) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:17:9: declaration // alt
+                    {
+
+                    /* ASTParser ruleRef */
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_declaration_in_program85];
+                    declaration1 = [self declaration];
+
+                    [self popFollow];
+
+
+                    [treeAdaptor addChild:[declaration1 getTree] toTree:root_0];
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt1 >= 1 )
+                        goto loop1;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:1];
+                    @throw eee;
+            }
+            cnt1++;
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end program */
+
+/*
+ * $ANTLR start declaration
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:20:1: declaration : ( variable | functionHeader K_SEMICOLON -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) );
+ */
+- (SimpleCParser_declaration_return *) declaration
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_declaration_return * retval = [SimpleCParser_declaration_return newSimpleCParser_declaration_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *K_SEMICOLON4 = nil;SimpleCParser_variable_return * variable2 = nil ;
+
+        SimpleCParser_functionHeader_return * functionHeader3 = nil ;
+
+        SimpleCParser_functionHeader_return * functionHeader5 = nil ;
+
+        SimpleCParser_block_return * block6 = nil ;
+
+
+        CommonTree *K_SEMICOLON4_tree=nil;
+        RewriteRuleTokenStream *stream_K_SEMICOLON =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_SEMICOLON"] retain];
+        RewriteRuleSubtreeStream *stream_functionHeader =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule functionHeader"] retain];
+        RewriteRuleSubtreeStream *stream_block =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule block"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:21:5: ( variable | functionHeader K_SEMICOLON -> ^( FUNC_DECL functionHeader ) | functionHeader block -> ^( FUNC_DEF functionHeader block ) ) //ruleblock
+        NSInteger alt2=3;
+        alt2 = [dfa2 predict:input];
+        switch (alt2) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:21:9: variable // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTParser ruleRef */
+                /* ruleRef */
+                [self pushFollow:FOLLOW_variable_in_declaration105];
+                variable2 = [self variable];
+
+                [self popFollow];
+
+
+                [treeAdaptor addChild:[variable2 getTree] toTree:root_0];
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:22:9: functionHeader K_SEMICOLON // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_functionHeader_in_declaration115];
+                functionHeader3 = [self functionHeader];
+
+                [self popFollow];
+
+
+                [stream_functionHeader addElement:[functionHeader3 getTree]];
+                 
+                K_SEMICOLON4=(CommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_declaration117];  
+                    [stream_K_SEMICOLON addElement:K_SEMICOLON4];
+
+                 
+                // AST REWRITE
+                // elements: functionHeader
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 22:36: -> ^( FUNC_DECL functionHeader )
+                {
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:22:39: ^( FUNC_DECL functionHeader )
+                    {
+                        CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                        root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                                [[treeAdaptor createTree:FUNC_DECL Text:@"FUNC_DECL"] retain]
+                         old:root_1];
+
+                        [treeAdaptor addChild:[stream_functionHeader nextTree] toTree:root_1];
+
+                        [treeAdaptor addChild:root_1 toTree:root_0];
+                    }
+
+                }
+
+
+                retval.tree = root_0;
+
+
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:23:9: functionHeader block // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_functionHeader_in_declaration135];
+                functionHeader5 = [self functionHeader];
+
+                [self popFollow];
+
+
+                [stream_functionHeader addElement:[functionHeader5 getTree]];
+                 
+                /* ruleRef */
+                [self pushFollow:FOLLOW_block_in_declaration137];
+                block6 = [self block];
+
+                [self popFollow];
+
+
+                [stream_block addElement:[block6 getTree]];
+                 
+                // AST REWRITE
+                // elements: functionHeader, block
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 23:30: -> ^( FUNC_DEF functionHeader block )
+                {
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:23:33: ^( FUNC_DEF functionHeader block )
+                    {
+                        CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                        root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                                [[treeAdaptor createTree:FUNC_DEF Text:@"FUNC_DEF"] retain]
+                         old:root_1];
+
+                        [treeAdaptor addChild:[stream_functionHeader nextTree] toTree:root_1];
+
+                        [treeAdaptor addChild:[stream_block nextTree] toTree:root_1];
+
+                        [treeAdaptor addChild:root_1 toTree:root_0];
+                    }
+
+                }
+
+
+                retval.tree = root_0;
+
+
+                }
+                break;
+
+        }
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+        [stream_K_SEMICOLON release];
+        [stream_functionHeader release];
+        [stream_block release];
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end declaration */
+
+/*
+ * $ANTLR start variable
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:26:1: variable : type declarator K_SEMICOLON -> ^( VAR_DEF type declarator ) ;
+ */
+- (SimpleCParser_variable_return *) variable
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_variable_return * retval = [SimpleCParser_variable_return newSimpleCParser_variable_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *K_SEMICOLON9 = nil;SimpleCParser_type_return * type7 = nil ;
+
+        SimpleCParser_declarator_return * declarator8 = nil ;
+
+
+        CommonTree *K_SEMICOLON9_tree=nil;
+        RewriteRuleTokenStream *stream_K_SEMICOLON =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_SEMICOLON"] retain];
+        RewriteRuleSubtreeStream *stream_declarator =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule declarator"] retain];
+        RewriteRuleSubtreeStream *stream_type =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule type"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:27:5: ( type declarator K_SEMICOLON -> ^( VAR_DEF type declarator ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:27:9: type declarator K_SEMICOLON // alt
+        {
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_type_in_variable166];
+        type7 = [self type];
+
+        [self popFollow];
+
+
+        [stream_type addElement:[type7 getTree]];
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_declarator_in_variable168];
+        declarator8 = [self declarator];
+
+        [self popFollow];
+
+
+        [stream_declarator addElement:[declarator8 getTree]];
+         
+        K_SEMICOLON9=(CommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_variable170];  
+            [stream_K_SEMICOLON addElement:K_SEMICOLON9];
+
+         
+        // AST REWRITE
+        // elements: declarator, type
+        // token labels: 
+        // rule labels: retval
+        // token list labels: 
+        // rule list labels: 
+        // wildcard labels: 
+        retval.tree = root_0;
+
+        RewriteRuleSubtreeStream *stream_retval =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+        // 27:37: -> ^( VAR_DEF type declarator )
+        {
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:27:40: ^( VAR_DEF type declarator )
+            {
+                CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                        [[treeAdaptor createTree:VAR_DEF Text:@"VAR_DEF"] retain]
+                 old:root_1];
+
+                [treeAdaptor addChild:[stream_type nextTree] toTree:root_1];
+
+                [treeAdaptor addChild:[stream_declarator nextTree] toTree:root_1];
+
+                [treeAdaptor addChild:root_1 toTree:root_0];
+            }
+
+        }
+
+
+        retval.tree = root_0;
+
+
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+        [stream_K_SEMICOLON release];
+        [stream_declarator release];
+        [stream_type release];
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end variable */
+
+/*
+ * $ANTLR start declarator
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:30:1: declarator : K_ID ;
+ */
+- (SimpleCParser_declarator_return *) declarator
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_declarator_return * retval = [SimpleCParser_declarator_return newSimpleCParser_declarator_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *K_ID10 = nil;
+
+        CommonTree *K_ID10_tree=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:31:5: ( K_ID ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:31:9: K_ID // alt
+        {
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+        /* ASTParser tokenRef */
+        K_ID10=(CommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_declarator199]; 
+        K_ID10_tree = /* ASTParser createNodeFromToken */
+        (CommonTree *)[[treeAdaptor create:K_ID10] retain]
+        ;
+        [treeAdaptor addChild:K_ID10_tree  toTree:root_0];
+
+         
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end declarator */
+
+/*
+ * $ANTLR start functionHeader
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:34:1: functionHeader : type K_ID K_LCURVE ( formalParameter ( K_COMMA formalParameter )* )? K_RCURVE -> ^( FUNC_HDR type K_ID ( formalParameter )+ ) ;
+ */
+- (SimpleCParser_functionHeader_return *) functionHeader
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_functionHeader_return * retval = [SimpleCParser_functionHeader_return newSimpleCParser_functionHeader_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *K_ID12 = nil;
+        CommonToken *K_LCURVE13 = nil;
+        CommonToken *K_COMMA15 = nil;
+        CommonToken *K_RCURVE17 = nil;SimpleCParser_type_return * type11 = nil ;
+
+        SimpleCParser_formalParameter_return * formalParameter14 = nil ;
+
+        SimpleCParser_formalParameter_return * formalParameter16 = nil ;
+
+
+        CommonTree *K_ID12_tree=nil;
+        CommonTree *K_LCURVE13_tree=nil;
+        CommonTree *K_COMMA15_tree=nil;
+        CommonTree *K_RCURVE17_tree=nil;
+        RewriteRuleTokenStream *stream_K_ID =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_ID"] retain];
+        RewriteRuleTokenStream *stream_K_LCURVE =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_LCURVE"] retain];
+        RewriteRuleTokenStream *stream_K_RCURVE =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_RCURVE"] retain];
+        RewriteRuleTokenStream *stream_K_COMMA =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_COMMA"] retain];
+        RewriteRuleSubtreeStream *stream_formalParameter =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule formalParameter"] retain];
+        RewriteRuleSubtreeStream *stream_type =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule type"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:35:5: ( type K_ID K_LCURVE ( formalParameter ( K_COMMA formalParameter )* )? K_RCURVE -> ^( FUNC_HDR type K_ID ( formalParameter )+ ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:35:9: type K_ID K_LCURVE ( formalParameter ( K_COMMA formalParameter )* )? K_RCURVE // alt
+        {
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_type_in_functionHeader219];
+        type11 = [self type];
+
+        [self popFollow];
+
+
+        [stream_type addElement:[type11 getTree]];
+         
+        K_ID12=(CommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_functionHeader221];  
+            [stream_K_ID addElement:K_ID12];
+
+         
+        K_LCURVE13=(CommonToken *)[self match:input TokenType:K_LCURVE Follow:FOLLOW_K_LCURVE_in_functionHeader223];  
+            [stream_K_LCURVE addElement:K_LCURVE13];
+
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:35:28: ( formalParameter ( K_COMMA formalParameter )* )? // block
+        NSInteger alt4=2;
+        NSInteger LA4_0 = [input LA:1];
+
+        if ( (LA4_0==K_CHAR||LA4_0==K_ID||LA4_0==K_INT_TYPE||LA4_0==K_VOID) ) {
+            alt4=1;
+        }
+        switch (alt4) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:35:30: formalParameter ( K_COMMA formalParameter )* // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_formalParameter_in_functionHeader227];
+                formalParameter14 = [self formalParameter];
+
+                [self popFollow];
+
+
+                [stream_formalParameter addElement:[formalParameter14 getTree]];
+                 
+
+                do {
+                    NSInteger alt3=2;
+                    NSInteger LA3_0 = [input LA:1];
+                    if ( (LA3_0==K_COMMA) ) {
+                        alt3=1;
+                    }
+
+
+                    switch (alt3) {
+                        case 1 : ;
+                            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:35:48: K_COMMA formalParameter // alt
+                            {
+
+                            K_COMMA15=(CommonToken *)[self match:input TokenType:K_COMMA Follow:FOLLOW_K_COMMA_in_functionHeader231];  
+                                [stream_K_COMMA addElement:K_COMMA15];
+
+                             
+                            /* ruleRef */
+                            [self pushFollow:FOLLOW_formalParameter_in_functionHeader233];
+                            formalParameter16 = [self formalParameter];
+
+                            [self popFollow];
+
+
+                            [stream_formalParameter addElement:[formalParameter16 getTree]];
+                             
+                            }
+                            break;
+
+                        default :
+                            goto loop3;
+                    }
+                } while (YES);
+                loop3: ;
+
+                 
+                }
+                break;
+
+        }
+
+         
+        K_RCURVE17=(CommonToken *)[self match:input TokenType:K_RCURVE Follow:FOLLOW_K_RCURVE_in_functionHeader241];  
+            [stream_K_RCURVE addElement:K_RCURVE17];
+
+         
+        // AST REWRITE
+        // elements: K_ID, type, formalParameter
+        // token labels: 
+        // rule labels: retval
+        // token list labels: 
+        // rule list labels: 
+        // wildcard labels: 
+        retval.tree = root_0;
+
+        RewriteRuleSubtreeStream *stream_retval =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+        // 36:9: -> ^( FUNC_HDR type K_ID ( formalParameter )+ )
+        {
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:36:12: ^( FUNC_HDR type K_ID ( formalParameter )+ )
+            {
+                CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                        [[treeAdaptor createTree:FUNC_HDR Text:@"FUNC_HDR"] retain]
+                 old:root_1];
+
+                [treeAdaptor addChild:[stream_type nextTree] toTree:root_1];
+
+                 // TODO: args: 
+                [treeAdaptor addChild:
+                            [stream_K_ID nextNode]
+                 toTree:root_1];
+
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:36:33: ( formalParameter )+
+                {
+                if ( !([stream_formalParameter hasNext]) ) {
+                    @throw [RewriteEarlyExitException newException];
+                }
+                while ( [stream_formalParameter hasNext] ) {
+                    [treeAdaptor addChild:[stream_formalParameter nextTree] toTree:root_1];
+
+                }
+                [stream_formalParameter reset];
+
+                }
+                [treeAdaptor addChild:root_1 toTree:root_0];
+            }
+
+        }
+
+
+        retval.tree = root_0;
+
+
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+        [stream_K_ID release];
+        [stream_K_LCURVE release];
+        [stream_K_RCURVE release];
+        [stream_K_COMMA release];
+        [stream_formalParameter release];
+        [stream_type release];
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end functionHeader */
+
+/*
+ * $ANTLR start formalParameter
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:39:1: formalParameter : type declarator -> ^( ARG_DEF type declarator ) ;
+ */
+- (SimpleCParser_formalParameter_return *) formalParameter
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_formalParameter_return * retval = [SimpleCParser_formalParameter_return newSimpleCParser_formalParameter_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        SimpleCParser_type_return * type18 = nil ;
+
+        SimpleCParser_declarator_return * declarator19 = nil ;
+
+
+        RewriteRuleSubtreeStream *stream_declarator =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule declarator"] retain];
+        RewriteRuleSubtreeStream *stream_type =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule type"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:40:5: ( type declarator -> ^( ARG_DEF type declarator ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:40:9: type declarator // alt
+        {
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_type_in_formalParameter281];
+        type18 = [self type];
+
+        [self popFollow];
+
+
+        [stream_type addElement:[type18 getTree]];
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_declarator_in_formalParameter283];
+        declarator19 = [self declarator];
+
+        [self popFollow];
+
+
+        [stream_declarator addElement:[declarator19 getTree]];
+         
+        // AST REWRITE
+        // elements: type, declarator
+        // token labels: 
+        // rule labels: retval
+        // token list labels: 
+        // rule list labels: 
+        // wildcard labels: 
+        retval.tree = root_0;
+
+        RewriteRuleSubtreeStream *stream_retval =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+        // 40:25: -> ^( ARG_DEF type declarator )
+        {
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:40:28: ^( ARG_DEF type declarator )
+            {
+                CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                        [[treeAdaptor createTree:ARG_DEF Text:@"ARG_DEF"] retain]
+                 old:root_1];
+
+                [treeAdaptor addChild:[stream_type nextTree] toTree:root_1];
+
+                [treeAdaptor addChild:[stream_declarator nextTree] toTree:root_1];
+
+                [treeAdaptor addChild:root_1 toTree:root_0];
+            }
+
+        }
+
+
+        retval.tree = root_0;
+
+
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+        [stream_declarator release];
+        [stream_type release];
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end formalParameter */
+
+/*
+ * $ANTLR start type
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:43:1: type : ( K_INT_TYPE | K_CHAR | K_VOID | K_ID );
+ */
+- (SimpleCParser_type_return *) type
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_type_return * retval = [SimpleCParser_type_return newSimpleCParser_type_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *set20 = nil;
+
+        CommonTree *set20_tree=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:44:5: ( K_INT_TYPE | K_CHAR | K_VOID | K_ID ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g: // alt
+        {
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+        /* ASTParser matchRuleBlockSet */
+        /* ASTParser matchSet */
+        set20 = (CommonToken *)[input LT:1]; /* matchSet */
+
+        if ([input LA:1] == K_CHAR||[input LA:1] == K_ID||[input LA:1] == K_INT_TYPE||[input LA:1] == K_VOID) {
+            [input consume];
+            [treeAdaptor addChild:/* ASTParser createNodeFromToken */
+            (CommonTree *)[[treeAdaptor create:set20] retain]
+             toTree:root_0 ];
+            [state setIsErrorRecovery:NO];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            @throw mse;
+        }
+
+         
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end type */
+
+/*
+ * $ANTLR start block
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:50:1: block : lc= K_LCURLY ( variable )* ( stat )* K_RCURLY -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* ) ;
+ */
+- (SimpleCParser_block_return *) block
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_block_return * retval = [SimpleCParser_block_return newSimpleCParser_block_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *lc = nil;
+        CommonToken *K_RCURLY23 = nil;SimpleCParser_variable_return * variable21 = nil ;
+
+        SimpleCParser_stat_return * stat22 = nil ;
+
+
+        CommonTree *lc_tree=nil;
+        CommonTree *K_RCURLY23_tree=nil;
+        RewriteRuleTokenStream *stream_K_LCURLY =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_LCURLY"] retain];
+        RewriteRuleTokenStream *stream_K_RCURLY =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_RCURLY"] retain];
+        RewriteRuleSubtreeStream *stream_variable =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule variable"] retain];
+        RewriteRuleSubtreeStream *stream_stat =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule stat"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:51:5: (lc= K_LCURLY ( variable )* ( stat )* K_RCURLY -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:51:9: lc= K_LCURLY ( variable )* ( stat )* K_RCURLY // alt
+        {
+
+        lc=(CommonToken *)[self match:input TokenType:K_LCURLY Follow:FOLLOW_K_LCURLY_in_block376];  
+            [stream_K_LCURLY addElement:lc];
+
+         
+
+        do {
+            NSInteger alt5=2;
+            NSInteger LA5_0 = [input LA:1];
+            if ( (LA5_0==K_ID) ) {
+                NSInteger LA5_2 = [input LA:2];
+                if ( (LA5_2==K_ID) ) {
+                    alt5=1;
+                }
+
+
+            }
+            else if ( (LA5_0==K_CHAR||LA5_0==K_INT_TYPE||LA5_0==K_VOID) ) {
+                alt5=1;
+            }
+
+
+            switch (alt5) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:52:13: variable // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_variable_in_block390];
+                    variable21 = [self variable];
+
+                    [self popFollow];
+
+
+                    [stream_variable addElement:[variable21 getTree]];
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop5;
+            }
+        } while (YES);
+        loop5: ;
+
+         
+
+        do {
+            NSInteger alt6=2;
+            NSInteger LA6_0 = [input LA:1];
+            if ( ((LA6_0 >= K_FOR && LA6_0 <= K_INT)||(LA6_0 >= K_LCURLY && LA6_0 <= K_LCURVE)||LA6_0==K_SEMICOLON) ) {
+                alt6=1;
+            }
+
+
+            switch (alt6) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:53:13: stat // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_stat_in_block405];
+                    stat22 = [self stat];
+
+                    [self popFollow];
+
+
+                    [stream_stat addElement:[stat22 getTree]];
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop6;
+            }
+        } while (YES);
+        loop6: ;
+
+         
+        K_RCURLY23=(CommonToken *)[self match:input TokenType:K_RCURLY Follow:FOLLOW_K_RCURLY_in_block416];  
+            [stream_K_RCURLY addElement:K_RCURLY23];
+
+         
+        // AST REWRITE
+        // elements: stat, variable
+        // token labels: 
+        // rule labels: retval
+        // token list labels: 
+        // rule list labels: 
+        // wildcard labels: 
+        retval.tree = root_0;
+
+        RewriteRuleSubtreeStream *stream_retval =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+        // 55:9: -> ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* )
+        {
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:55:12: ^( BLOCK[$lc,@\"BLOCK\"] ( variable )* ( stat )* )
+            {
+                CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                        [[treeAdaptor createTree:BLOCK FromToken:lc Text:@"BLOCK"] retain]
+                 old:root_1];
+
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:55:34: ( variable )*
+                while ( [stream_variable hasNext] ) {
+                    [treeAdaptor addChild:[stream_variable nextTree] toTree:root_1];
+
+                }
+                [stream_variable reset];
+
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:55:44: ( stat )*
+                while ( [stream_stat hasNext] ) {
+                    [treeAdaptor addChild:[stream_stat nextTree] toTree:root_1];
+
+                }
+                [stream_stat reset];
+
+                [treeAdaptor addChild:root_1 toTree:root_0];
+            }
+
+        }
+
+
+        retval.tree = root_0;
+
+
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+        [stream_K_LCURLY release];
+        [stream_K_RCURLY release];
+        [stream_variable release];
+        [stream_stat release];
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end block */
+
+/*
+ * $ANTLR start stat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:58:1: stat : ( forStat | expr K_SEMICOLON !| block | assignStat K_SEMICOLON !| K_SEMICOLON !);
+ */
+- (SimpleCParser_stat_return *) stat
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_stat_return * retval = [SimpleCParser_stat_return newSimpleCParser_stat_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *K_SEMICOLON26 = nil;
+        CommonToken *K_SEMICOLON29 = nil;
+        CommonToken *K_SEMICOLON30 = nil;SimpleCParser_forStat_return * forStat24 = nil ;
+
+        SimpleCParser_expr_return * expr25 = nil ;
+
+        SimpleCParser_block_return * block27 = nil ;
+
+        SimpleCParser_assignStat_return * assignStat28 = nil ;
+
+
+        CommonTree *K_SEMICOLON26_tree=nil;
+        CommonTree *K_SEMICOLON29_tree=nil;
+        CommonTree *K_SEMICOLON30_tree=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:58:5: ( forStat | expr K_SEMICOLON !| block | assignStat K_SEMICOLON !| K_SEMICOLON !) //ruleblock
+        NSInteger alt7=5;
+        unichar charLA7 = [input LA:1];
+        switch (charLA7) {
+            case K_FOR: ;
+                {
+                alt7=1;
+                }
+                break;
+            case K_ID: ;
+                {
+                NSInteger LA7_2 = [input LA:2];
+
+                if ( (LA7_2==K_EQ) ) {
+                    alt7=4;
+                }
+                else if ( (LA7_2==K_EQEQ||(LA7_2 >= K_LT && LA7_2 <= K_PLUS)||LA7_2==K_SEMICOLON) ) {
+                    alt7=2;
+                }
+                else {
+                    NoViableAltException *nvae = [NoViableAltException newException:7 state:2 stream:input];
+                    nvae.c = LA7_2;
+                    @throw nvae;
+
+                }
+                }
+                break;
+            case K_INT: ;
+            case K_LCURVE: ;
+                {
+                alt7=2;
+                }
+                break;
+            case K_LCURLY: ;
+                {
+                alt7=3;
+                }
+                break;
+            case K_SEMICOLON: ;
+                {
+                alt7=5;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:7 state:0 stream:input];
+            nvae.c = charLA7;
+            @throw nvae;
+
+        }
+
+        switch (alt7) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:58:7: forStat // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTParser ruleRef */
+                /* ruleRef */
+                [self pushFollow:FOLLOW_forStat_in_stat449];
+                forStat24 = [self forStat];
+
+                [self popFollow];
+
+
+                [treeAdaptor addChild:[forStat24 getTree] toTree:root_0];
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:59:7: expr K_SEMICOLON ! // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTParser ruleRef */
+                /* ruleRef */
+                [self pushFollow:FOLLOW_expr_in_stat457];
+                expr25 = [self expr];
+
+                [self popFollow];
+
+
+                [treeAdaptor addChild:[expr25 getTree] toTree:root_0];
+                 
+                K_SEMICOLON26=(CommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_stat459]; 
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:60:7: block // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTParser ruleRef */
+                /* ruleRef */
+                [self pushFollow:FOLLOW_block_in_stat468];
+                block27 = [self block];
+
+                [self popFollow];
+
+
+                [treeAdaptor addChild:[block27 getTree] toTree:root_0];
+                 
+                }
+                break;
+            case 4 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:61:7: assignStat K_SEMICOLON ! // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTParser ruleRef */
+                /* ruleRef */
+                [self pushFollow:FOLLOW_assignStat_in_stat476];
+                assignStat28 = [self assignStat];
+
+                [self popFollow];
+
+
+                [treeAdaptor addChild:[assignStat28 getTree] toTree:root_0];
+                 
+                K_SEMICOLON29=(CommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_stat478]; 
+                 
+                }
+                break;
+            case 5 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:62:7: K_SEMICOLON ! // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                K_SEMICOLON30=(CommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_stat487]; 
+                 
+                }
+                break;
+
+        }
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end stat */
+
+/*
+ * $ANTLR start forStat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:65:1: forStat : K_FOR K_LCURVE start= assignStat K_SEMICOLON expr K_SEMICOLON next= assignStat K_RCURVE block -> ^( K_FOR $start expr $next block ) ;
+ */
+- (SimpleCParser_forStat_return *) forStat
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_forStat_return * retval = [SimpleCParser_forStat_return newSimpleCParser_forStat_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *K_FOR31 = nil;
+        CommonToken *K_LCURVE32 = nil;
+        CommonToken *K_SEMICOLON33 = nil;
+        CommonToken *K_SEMICOLON35 = nil;
+        CommonToken *K_RCURVE36 = nil;SimpleCParser_assignStat_return * start = nil ;
+
+        SimpleCParser_assignStat_return * next = nil ;
+
+        SimpleCParser_expr_return * expr34 = nil ;
+
+        SimpleCParser_block_return * block37 = nil ;
+
+
+        CommonTree *K_FOR31_tree=nil;
+        CommonTree *K_LCURVE32_tree=nil;
+        CommonTree *K_SEMICOLON33_tree=nil;
+        CommonTree *K_SEMICOLON35_tree=nil;
+        CommonTree *K_RCURVE36_tree=nil;
+        RewriteRuleTokenStream *stream_K_LCURVE =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_LCURVE"] retain];
+        RewriteRuleTokenStream *stream_K_RCURVE =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_RCURVE"] retain];
+        RewriteRuleTokenStream *stream_K_SEMICOLON =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_SEMICOLON"] retain];
+        RewriteRuleTokenStream *stream_K_FOR =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_FOR"] retain];
+        RewriteRuleSubtreeStream *stream_assignStat =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule assignStat"] retain];
+        RewriteRuleSubtreeStream *stream_block =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule block"] retain];
+        RewriteRuleSubtreeStream *stream_expr =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule expr"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:66:5: ( K_FOR K_LCURVE start= assignStat K_SEMICOLON expr K_SEMICOLON next= assignStat K_RCURVE block -> ^( K_FOR $start expr $next block ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:66:9: K_FOR K_LCURVE start= assignStat K_SEMICOLON expr K_SEMICOLON next= assignStat K_RCURVE block // alt
+        {
+
+        K_FOR31=(CommonToken *)[self match:input TokenType:K_FOR Follow:FOLLOW_K_FOR_in_forStat507];  
+            [stream_K_FOR addElement:K_FOR31];
+
+         
+        K_LCURVE32=(CommonToken *)[self match:input TokenType:K_LCURVE Follow:FOLLOW_K_LCURVE_in_forStat509];  
+            [stream_K_LCURVE addElement:K_LCURVE32];
+
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_assignStat_in_forStat513];
+        start = [self assignStat];
+
+        [self popFollow];
+
+
+        [stream_assignStat addElement:[start getTree]];
+         
+        K_SEMICOLON33=(CommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_forStat515];  
+            [stream_K_SEMICOLON addElement:K_SEMICOLON33];
+
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_expr_in_forStat517];
+        expr34 = [self expr];
+
+        [self popFollow];
+
+
+        [stream_expr addElement:[expr34 getTree]];
+         
+        K_SEMICOLON35=(CommonToken *)[self match:input TokenType:K_SEMICOLON Follow:FOLLOW_K_SEMICOLON_in_forStat519];  
+            [stream_K_SEMICOLON addElement:K_SEMICOLON35];
+
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_assignStat_in_forStat523];
+        next = [self assignStat];
+
+        [self popFollow];
+
+
+        [stream_assignStat addElement:[next getTree]];
+         
+        K_RCURVE36=(CommonToken *)[self match:input TokenType:K_RCURVE Follow:FOLLOW_K_RCURVE_in_forStat525];  
+            [stream_K_RCURVE addElement:K_RCURVE36];
+
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_block_in_forStat527];
+        block37 = [self block];
+
+        [self popFollow];
+
+
+        [stream_block addElement:[block37 getTree]];
+         
+        // AST REWRITE
+        // elements: block, start, K_FOR, next, expr
+        // token labels: 
+        // rule labels: retval, start, next
+        // token list labels: 
+        // rule list labels: 
+        // wildcard labels: 
+        retval.tree = root_0;
+
+        RewriteRuleSubtreeStream *stream_retval =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+        RewriteRuleSubtreeStream *stream_start =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                description:@"token start" element:start!=nil?[start getTree]:nil] retain];
+        RewriteRuleSubtreeStream *stream_next =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                description:@"token next" element:next!=nil?[next getTree]:nil] retain];
+
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+        // 67:9: -> ^( K_FOR $start expr $next block )
+        {
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:67:12: ^( K_FOR $start expr $next block )
+            {
+                CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                            [stream_K_FOR nextNode]
+                 old:root_1];
+
+                [treeAdaptor addChild:[stream_start nextTree] toTree:root_1];
+
+                [treeAdaptor addChild:[stream_expr nextTree] toTree:root_1];
+
+                [treeAdaptor addChild:[stream_next nextTree] toTree:root_1];
+
+                [treeAdaptor addChild:[stream_block nextTree] toTree:root_1];
+
+                [treeAdaptor addChild:root_1 toTree:root_0];
+            }
+
+        }
+
+
+        retval.tree = root_0;
+
+
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+        [stream_K_LCURVE release];
+        [stream_K_RCURVE release];
+        [stream_K_SEMICOLON release];
+        [stream_K_FOR release];
+        [stream_assignStat release];
+        [stream_block release];
+        [stream_expr release];
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end forStat */
+
+/*
+ * $ANTLR start assignStat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:70:1: assignStat : K_ID K_EQ expr -> ^( K_EQ K_ID expr ) ;
+ */
+- (SimpleCParser_assignStat_return *) assignStat
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_assignStat_return * retval = [SimpleCParser_assignStat_return newSimpleCParser_assignStat_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *K_ID38 = nil;
+        CommonToken *K_EQ39 = nil;SimpleCParser_expr_return * expr40 = nil ;
+
+
+        CommonTree *K_ID38_tree=nil;
+        CommonTree *K_EQ39_tree=nil;
+        RewriteRuleTokenStream *stream_K_ID =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_ID"] retain];
+        RewriteRuleTokenStream *stream_K_EQ =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_EQ"] retain];
+        RewriteRuleSubtreeStream *stream_expr =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule expr"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:71:5: ( K_ID K_EQ expr -> ^( K_EQ K_ID expr ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:71:9: K_ID K_EQ expr // alt
+        {
+
+        K_ID38=(CommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_assignStat570];  
+            [stream_K_ID addElement:K_ID38];
+
+         
+        K_EQ39=(CommonToken *)[self match:input TokenType:K_EQ Follow:FOLLOW_K_EQ_in_assignStat572];  
+            [stream_K_EQ addElement:K_EQ39];
+
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_expr_in_assignStat574];
+        expr40 = [self expr];
+
+        [self popFollow];
+
+
+        [stream_expr addElement:[expr40 getTree]];
+         
+        // AST REWRITE
+        // elements: expr, K_ID, K_EQ
+        // token labels: 
+        // rule labels: retval
+        // token list labels: 
+        // rule list labels: 
+        // wildcard labels: 
+        retval.tree = root_0;
+
+        RewriteRuleSubtreeStream *stream_retval =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+        // 71:24: -> ^( K_EQ K_ID expr )
+        {
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:71:27: ^( K_EQ K_ID expr )
+            {
+                CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                            [stream_K_EQ nextNode]
+                 old:root_1];
+
+                 // TODO: args: 
+                [treeAdaptor addChild:
+                            [stream_K_ID nextNode]
+                 toTree:root_1];
+
+                [treeAdaptor addChild:[stream_expr nextTree] toTree:root_1];
+
+                [treeAdaptor addChild:root_1 toTree:root_0];
+            }
+
+        }
+
+
+        retval.tree = root_0;
+
+
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+        [stream_K_ID release];
+        [stream_K_EQ release];
+        [stream_expr release];
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end assignStat */
+
+/*
+ * $ANTLR start expr
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:74:1: expr : condExpr ;
+ */
+- (SimpleCParser_expr_return *) expr
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_expr_return * retval = [SimpleCParser_expr_return newSimpleCParser_expr_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        SimpleCParser_condExpr_return * condExpr41 = nil ;
+
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:74:5: ( condExpr ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:74:9: condExpr // alt
+        {
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+        /* ASTParser ruleRef */
+        /* ruleRef */
+        [self pushFollow:FOLLOW_condExpr_in_expr598];
+        condExpr41 = [self condExpr];
+
+        [self popFollow];
+
+
+        [treeAdaptor addChild:[condExpr41 getTree] toTree:root_0];
+         
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end expr */
+
+/*
+ * $ANTLR start condExpr
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:77:1: condExpr : aexpr ( ( K_EQEQ ^| K_LT ^) aexpr )? ;
+ */
+- (SimpleCParser_condExpr_return *) condExpr
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_condExpr_return * retval = [SimpleCParser_condExpr_return newSimpleCParser_condExpr_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *K_EQEQ43 = nil;
+        CommonToken *K_LT44 = nil;SimpleCParser_aexpr_return * aexpr42 = nil ;
+
+        SimpleCParser_aexpr_return * aexpr45 = nil ;
+
+
+        CommonTree *K_EQEQ43_tree=nil;
+        CommonTree *K_LT44_tree=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:78:5: ( aexpr ( ( K_EQEQ ^| K_LT ^) aexpr )? ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:78:9: aexpr ( ( K_EQEQ ^| K_LT ^) aexpr )? // alt
+        {
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+        /* ASTParser ruleRef */
+        /* ruleRef */
+        [self pushFollow:FOLLOW_aexpr_in_condExpr617];
+        aexpr42 = [self aexpr];
+
+        [self popFollow];
+
+
+        [treeAdaptor addChild:[aexpr42 getTree] toTree:root_0];
+         
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:78:15: ( ( K_EQEQ ^| K_LT ^) aexpr )? // block
+        NSInteger alt9=2;
+        NSInteger LA9_0 = [input LA:1];
+
+        if ( (LA9_0==K_EQEQ||LA9_0==K_LT) ) {
+            alt9=1;
+        }
+        switch (alt9) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:78:17: ( K_EQEQ ^| K_LT ^) aexpr // alt
+                {
+
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:78:17: ( K_EQEQ ^| K_LT ^) // block
+                NSInteger alt8=2;
+                NSInteger LA8_0 = [input LA:1];
+
+                if ( (LA8_0==K_EQEQ) ) {
+                    alt8=1;
+                }
+                else if ( (LA8_0==K_LT) ) {
+                    alt8=2;
+                }
+                else {
+                    NoViableAltException *nvae = [NoViableAltException newException:8 state:0 stream:input];
+                    nvae.c = LA8_0;
+                    @throw nvae;
+
+                }
+                switch (alt8) {
+                    case 1 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:78:18: K_EQEQ ^ // alt
+                        {
+
+                        K_EQEQ43=(CommonToken *)[self match:input TokenType:K_EQEQ Follow:FOLLOW_K_EQEQ_in_condExpr622]; 
+                        K_EQEQ43_tree = /* ASTParser createNodeFromToken */
+                        (CommonTree *)[[treeAdaptor create:K_EQEQ43] retain]
+                        ;
+                        root_0 = (CommonTree *)[treeAdaptor becomeRoot:K_EQEQ43_tree old:root_0];
+
+                         
+                        }
+                        break;
+                    case 2 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:78:28: K_LT ^ // alt
+                        {
+
+                        K_LT44=(CommonToken *)[self match:input TokenType:K_LT Follow:FOLLOW_K_LT_in_condExpr627]; 
+                        K_LT44_tree = /* ASTParser createNodeFromToken */
+                        (CommonTree *)[[treeAdaptor create:K_LT44] retain]
+                        ;
+                        root_0 = (CommonTree *)[treeAdaptor becomeRoot:K_LT44_tree old:root_0];
+
+                         
+                        }
+                        break;
+
+                }
+
+                 
+                /* ASTParser ruleRef */
+                /* ruleRef */
+                [self pushFollow:FOLLOW_aexpr_in_condExpr631];
+                aexpr45 = [self aexpr];
+
+                [self popFollow];
+
+
+                [treeAdaptor addChild:[aexpr45 getTree] toTree:root_0];
+                 
+                }
+                break;
+
+        }
+
+         
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end condExpr */
+
+/*
+ * $ANTLR start aexpr
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:81:1: aexpr : atom ( K_PLUS ^ atom )* ;
+ */
+- (SimpleCParser_aexpr_return *) aexpr
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_aexpr_return * retval = [SimpleCParser_aexpr_return newSimpleCParser_aexpr_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *K_PLUS47 = nil;SimpleCParser_atom_return * atom46 = nil ;
+
+        SimpleCParser_atom_return * atom48 = nil ;
+
+
+        CommonTree *K_PLUS47_tree=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:82:5: ( atom ( K_PLUS ^ atom )* ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:82:9: atom ( K_PLUS ^ atom )* // alt
+        {
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+        /* ASTParser ruleRef */
+        /* ruleRef */
+        [self pushFollow:FOLLOW_atom_in_aexpr653];
+        atom46 = [self atom];
+
+        [self popFollow];
+
+
+        [treeAdaptor addChild:[atom46 getTree] toTree:root_0];
+         
+
+        do {
+            NSInteger alt10=2;
+            NSInteger LA10_0 = [input LA:1];
+            if ( (LA10_0==K_PLUS) ) {
+                alt10=1;
+            }
+
+
+            switch (alt10) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:82:16: K_PLUS ^ atom // alt
+                    {
+
+                    K_PLUS47=(CommonToken *)[self match:input TokenType:K_PLUS Follow:FOLLOW_K_PLUS_in_aexpr657]; 
+                    K_PLUS47_tree = /* ASTParser createNodeFromToken */
+                    (CommonTree *)[[treeAdaptor create:K_PLUS47] retain]
+                    ;
+                    root_0 = (CommonTree *)[treeAdaptor becomeRoot:K_PLUS47_tree old:root_0];
+
+                     
+                    /* ASTParser ruleRef */
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_atom_in_aexpr660];
+                    atom48 = [self atom];
+
+                    [self popFollow];
+
+
+                    [treeAdaptor addChild:[atom48 getTree] toTree:root_0];
+                     
+                    }
+                    break;
+
+                default :
+                    goto loop10;
+            }
+        } while (YES);
+        loop10: ;
+
+         
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end aexpr */
+
+/*
+ * $ANTLR start atom
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:85:1: atom : ( K_ID | K_INT | K_LCURVE expr K_RCURVE -> expr );
+ */
+- (SimpleCParser_atom_return *) atom
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    SimpleCParser_atom_return * retval = [SimpleCParser_atom_return newSimpleCParser_atom_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *K_ID49 = nil;
+        CommonToken *K_INT50 = nil;
+        CommonToken *K_LCURVE51 = nil;
+        CommonToken *K_RCURVE53 = nil;SimpleCParser_expr_return * expr52 = nil ;
+
+
+        CommonTree *K_ID49_tree=nil;
+        CommonTree *K_INT50_tree=nil;
+        CommonTree *K_LCURVE51_tree=nil;
+        CommonTree *K_RCURVE53_tree=nil;
+        RewriteRuleTokenStream *stream_K_LCURVE =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_LCURVE"] retain];
+        RewriteRuleTokenStream *stream_K_RCURVE =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token K_RCURVE"] retain];
+        RewriteRuleSubtreeStream *stream_expr =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule expr"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:86:5: ( K_ID | K_INT | K_LCURVE expr K_RCURVE -> expr ) //ruleblock
+        NSInteger alt11=3;
+        unichar charLA11 = [input LA:1];
+        switch (charLA11) {
+            case K_ID: ;
+                {
+                alt11=1;
+                }
+                break;
+            case K_INT: ;
+                {
+                alt11=2;
+                }
+                break;
+            case K_LCURVE: ;
+                {
+                alt11=3;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:11 state:0 stream:input];
+            nvae.c = charLA11;
+            @throw nvae;
+
+        }
+
+        switch (alt11) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:86:7: K_ID // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTParser tokenRef */
+                K_ID49=(CommonToken *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_atom680]; 
+                K_ID49_tree = /* ASTParser createNodeFromToken */
+                (CommonTree *)[[treeAdaptor create:K_ID49] retain]
+                ;
+                [treeAdaptor addChild:K_ID49_tree  toTree:root_0];
+
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:87:7: K_INT // alt
+                {
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+                /* ASTParser tokenRef */
+                K_INT50=(CommonToken *)[self match:input TokenType:K_INT Follow:FOLLOW_K_INT_in_atom694]; 
+                K_INT50_tree = /* ASTParser createNodeFromToken */
+                (CommonTree *)[[treeAdaptor create:K_INT50] retain]
+                ;
+                [treeAdaptor addChild:K_INT50_tree  toTree:root_0];
+
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.g:88:7: K_LCURVE expr K_RCURVE // alt
+                {
+
+                K_LCURVE51=(CommonToken *)[self match:input TokenType:K_LCURVE Follow:FOLLOW_K_LCURVE_in_atom708];  
+                    [stream_K_LCURVE addElement:K_LCURVE51];
+
+                 
+                /* ruleRef */
+                [self pushFollow:FOLLOW_expr_in_atom710];
+                expr52 = [self expr];
+
+                [self popFollow];
+
+
+                [stream_expr addElement:[expr52 getTree]];
+                 
+                K_RCURVE53=(CommonToken *)[self match:input TokenType:K_RCURVE Follow:FOLLOW_K_RCURVE_in_atom712];  
+                    [stream_K_RCURVE addElement:K_RCURVE53];
+
+                 
+                // AST REWRITE
+                // elements: expr
+                // token labels: 
+                // rule labels: retval
+                // token list labels: 
+                // rule list labels: 
+                // wildcard labels: 
+                retval.tree = root_0;
+
+                RewriteRuleSubtreeStream *stream_retval =
+                    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                        description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+                root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+                // 88:30: -> expr
+                {
+                    [treeAdaptor addChild:[stream_expr nextTree] toTree:root_0];
+
+                }
+
+
+                retval.tree = root_0;
+
+
+                }
+                break;
+
+        }
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+        [stream_K_LCURVE release];
+        [stream_K_RCURVE release];
+        [stream_expr release];
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end atom */
+/* ObjC end rules */
+
+@end /* end of SimpleCParser implementation line 692 */
diff --git a/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g
new file mode 100644
index 0000000..a3a6293
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g
@@ -0,0 +1,69 @@
+tree grammar SimpleCTP;
+options {
+    tokenVocab = SimpleC;
+	language = ObjC;
+	ASTLabelType = ANTLRCommonTree;
+}
+
+scope Symbols
+{
+CommonTree *tree;
+}
+
+program
+    :   declaration+
+    ;
+
+declaration
+    :   variable
+    |   ^(FUNC_DECL functionHeader)
+    |   ^(FUNC_DEF functionHeader block)
+    ;
+
+variable
+    :   ^(VAR_DEF type declarator)
+    ;
+
+declarator
+    :   K_ID 
+    ;
+
+functionHeader
+    :   ^(FUNC_HDR type K_ID formalParameter+)
+    ;
+
+formalParameter
+    :   ^(ARG_DEF type declarator)
+    ;
+
+type
+    :   K_INT_TYPE
+    |   K_CHAR  
+    |   K_VOID
+    |   K_ID        
+    ;
+
+block
+    :   ^(BLOCK variable* stat*)
+    ;
+
+stat: forStat
+    | expr
+    | block
+    ;
+
+forStat
+    :   ^(K_FOR expr expr expr block)
+    ;
+
+expr:   ^(K_EQEQ expr expr)
+    |   ^(K_LT expr expr)
+    |   ^(K_PLUS expr expr)
+    |   ^(K_EQ K_ID e=expr) { NSLog(@"assigning \%@ to variable \%@", $e.text, $K_ID.text); }
+    |   atom
+    ;
+
+atom
+    : K_ID      
+    | K_INT      
+    ; 
diff --git a/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.h b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.h
new file mode 100644
index 0000000..e543b4c
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.h
@@ -0,0 +1,133 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g 2012-02-16 17:41:10
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* treeParserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define ARG_DEF 4
+#define BLOCK 5
+#define FUNC_DECL 6
+#define FUNC_DEF 7
+#define FUNC_HDR 8
+#define K_CHAR 9
+#define K_COMMA 10
+#define K_EQ 11
+#define K_EQEQ 12
+#define K_FOR 13
+#define K_ID 14
+#define K_INT 15
+#define K_INT_TYPE 16
+#define K_LCURLY 17
+#define K_LCURVE 18
+#define K_LT 19
+#define K_PLUS 20
+#define K_RCURLY 21
+#define K_RCURVE 22
+#define K_SEMICOLON 23
+#define K_VOID 24
+#define VAR_DEF 25
+#define WS 26
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+/* globalAttributeScopeInterface */
+@interface Symbols_Scope : SymbolsScope {
+ANTLRCommonTree * tree;
+ }
+/* start of globalAttributeScopeInterface properties */
+@property (assign, getter=gettree, setter=settree:) ANTLRCommonTree * tree;
+/* end globalAttributeScopeInterface properties */
++ (Symbols_Scope *)newSymbols_Scope;
+- (id) init;
+/* start of globalAttributeScopeInterface methodsDecl */
+- (ANTLRCommonTree *)gettree;
+- (void)settree:(ANTLRCommonTree *)aVal;
+/* End of globalAttributeScopeInterface methodsDecl */
+@end /* end of Symbols_Scope interface */
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+/* returnScopeInterface SimpleCTP_expr_return */
+@interface SimpleCTP_expr_return : TreeRuleReturnScope { /* returnScopeInterface line 1838 */
+ /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+
+
+/* start of method declarations */
+
++ (SimpleCTP_expr_return *)newSimpleCTP_expr_return;
+/* this is start of set and get methods */
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+
+/* Interface grammar class */
+@interface SimpleCTP  : TreeParser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+/* globalAttributeScopeMemVar */
+SymbolStack *Symbols_stack;
+Symbols_Scope *Symbols_scope;
+
+
+/* ObjC start of actions.(actionScope).memVars */
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* ObjC end of memVars */
+
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newSimpleCTP:(id<TreeNodeStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* ObjC end of methodsDecl */
+
+- (void)program; 
+- (void)declaration; 
+- (void)variable; 
+- (void)declarator; 
+- (void)functionHeader; 
+- (void)formalParameter; 
+- (void)type; 
+- (void)block; 
+- (void)stat; 
+- (void)forStat; 
+- (SimpleCTP_expr_return *)expr; 
+- (void)atom; 
+
+
+@end /* end of SimpleCTP interface */
+
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCTP.java b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.java
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output1/SimpleCTP.java
rename to runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.java
diff --git a/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.m b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.m
new file mode 100644
index 0000000..de4a62c
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.m
@@ -0,0 +1,1304 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g
+ *     -                            On : 2012-02-16 17:41:10
+ *     -           for the tree parser : SimpleCTPTreeParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g 2012-02-16 17:41:10
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "SimpleCTP.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_declaration_in_program56;
+static const unsigned long long FOLLOW_declaration_in_program56_data[] = { 0x00000000020000C2LL};
+static ANTLRBitSet *FOLLOW_variable_in_declaration76;
+static const unsigned long long FOLLOW_variable_in_declaration76_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_FUNC_DECL_in_declaration87;
+static const unsigned long long FOLLOW_FUNC_DECL_in_declaration87_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_functionHeader_in_declaration89;
+static const unsigned long long FOLLOW_functionHeader_in_declaration89_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_FUNC_DEF_in_declaration101;
+static const unsigned long long FOLLOW_FUNC_DEF_in_declaration101_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_functionHeader_in_declaration103;
+static const unsigned long long FOLLOW_functionHeader_in_declaration103_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_block_in_declaration105;
+static const unsigned long long FOLLOW_block_in_declaration105_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_VAR_DEF_in_variable126;
+static const unsigned long long FOLLOW_VAR_DEF_in_variable126_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_type_in_variable128;
+static const unsigned long long FOLLOW_type_in_variable128_data[] = { 0x0000000000004000LL};
+static ANTLRBitSet *FOLLOW_declarator_in_variable130;
+static const unsigned long long FOLLOW_declarator_in_variable130_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_K_ID_in_declarator150;
+static const unsigned long long FOLLOW_K_ID_in_declarator150_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_FUNC_HDR_in_functionHeader171;
+static const unsigned long long FOLLOW_FUNC_HDR_in_functionHeader171_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_type_in_functionHeader173;
+static const unsigned long long FOLLOW_type_in_functionHeader173_data[] = { 0x0000000000004000LL};
+static ANTLRBitSet *FOLLOW_K_ID_in_functionHeader175;
+static const unsigned long long FOLLOW_K_ID_in_functionHeader175_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader177;
+static const unsigned long long FOLLOW_formalParameter_in_functionHeader177_data[] = { 0x0000000000000018LL};
+static ANTLRBitSet *FOLLOW_ARG_DEF_in_formalParameter199;
+static const unsigned long long FOLLOW_ARG_DEF_in_formalParameter199_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_type_in_formalParameter201;
+static const unsigned long long FOLLOW_type_in_formalParameter201_data[] = { 0x0000000000004000LL};
+static ANTLRBitSet *FOLLOW_declarator_in_formalParameter203;
+static const unsigned long long FOLLOW_declarator_in_formalParameter203_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_BLOCK_in_block283;
+static const unsigned long long FOLLOW_BLOCK_in_block283_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_variable_in_block285;
+static const unsigned long long FOLLOW_variable_in_block285_data[] = { 0x000000000218F828LL};
+static ANTLRBitSet *FOLLOW_stat_in_block288;
+static const unsigned long long FOLLOW_stat_in_block288_data[] = { 0x000000000018F828LL};
+static ANTLRBitSet *FOLLOW_forStat_in_stat302;
+static const unsigned long long FOLLOW_forStat_in_stat302_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_expr_in_stat310;
+static const unsigned long long FOLLOW_expr_in_stat310_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_block_in_stat318;
+static const unsigned long long FOLLOW_block_in_stat318_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_K_FOR_in_forStat338;
+static const unsigned long long FOLLOW_K_FOR_in_forStat338_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_expr_in_forStat340;
+static const unsigned long long FOLLOW_expr_in_forStat340_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_forStat342;
+static const unsigned long long FOLLOW_expr_in_forStat342_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_forStat344;
+static const unsigned long long FOLLOW_expr_in_forStat344_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_block_in_forStat346;
+static const unsigned long long FOLLOW_block_in_forStat346_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_K_EQEQ_in_expr362;
+static const unsigned long long FOLLOW_K_EQEQ_in_expr362_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr364;
+static const unsigned long long FOLLOW_expr_in_expr364_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr366;
+static const unsigned long long FOLLOW_expr_in_expr366_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_K_LT_in_expr378;
+static const unsigned long long FOLLOW_K_LT_in_expr378_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr380;
+static const unsigned long long FOLLOW_expr_in_expr380_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr382;
+static const unsigned long long FOLLOW_expr_in_expr382_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_K_PLUS_in_expr394;
+static const unsigned long long FOLLOW_K_PLUS_in_expr394_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr396;
+static const unsigned long long FOLLOW_expr_in_expr396_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr398;
+static const unsigned long long FOLLOW_expr_in_expr398_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_K_EQ_in_expr410;
+static const unsigned long long FOLLOW_K_EQ_in_expr410_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_K_ID_in_expr412;
+static const unsigned long long FOLLOW_K_ID_in_expr412_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr416;
+static const unsigned long long FOLLOW_expr_in_expr416_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_atom_in_expr429;
+static const unsigned long long FOLLOW_atom_in_expr429_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+@implementation Symbols_Scope  /* globalAttributeScopeImplementation */
+/* start of synthesize -- OBJC-Line 1750 */
+@synthesize tree; 
+
++ (Symbols_Scope *)newSymbols_Scope
+{
+return [[Symbols_Scope alloc] init];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* start of iterate get and set functions */
+- (ANTLRCommonTree *)gettree { return( tree ); }
+- (void)settree:(ANTLRCommonTree *)aVal { tree = aVal; }
+
+/* End of iterate get and set functions */
+@end /* end of Symbols_Scope implementation */
+
+
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+@implementation SimpleCTP_expr_return /* returnScopeImplementation */
+ /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCTP_expr_return *)newSimpleCTP_expr_return
+{
+return [[[SimpleCTP_expr_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+
+
+@end /* end of returnScope implementation */
+
+
+
+@implementation SimpleCTP  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+static _stack;
+ 
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_declaration_in_program56 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declaration_in_program56_data Count:(NSUInteger)1] retain];
+    FOLLOW_variable_in_declaration76 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_declaration76_data Count:(NSUInteger)1] retain];
+    FOLLOW_FUNC_DECL_in_declaration87 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_DECL_in_declaration87_data Count:(NSUInteger)1] retain];
+    FOLLOW_functionHeader_in_declaration89 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration89_data Count:(NSUInteger)1] retain];
+    FOLLOW_FUNC_DEF_in_declaration101 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_DEF_in_declaration101_data Count:(NSUInteger)1] retain];
+    FOLLOW_functionHeader_in_declaration103 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration103_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_declaration105 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_declaration105_data Count:(NSUInteger)1] retain];
+    FOLLOW_VAR_DEF_in_variable126 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_VAR_DEF_in_variable126_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_variable128 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_variable128_data Count:(NSUInteger)1] retain];
+    FOLLOW_declarator_in_variable130 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_variable130_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_ID_in_declarator150 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_declarator150_data Count:(NSUInteger)1] retain];
+    FOLLOW_FUNC_HDR_in_functionHeader171 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_HDR_in_functionHeader171_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_functionHeader173 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_functionHeader173_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_ID_in_functionHeader175 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_functionHeader175_data Count:(NSUInteger)1] retain];
+    FOLLOW_formalParameter_in_functionHeader177 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader177_data Count:(NSUInteger)1] retain];
+    FOLLOW_ARG_DEF_in_formalParameter199 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ARG_DEF_in_formalParameter199_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_formalParameter201 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_formalParameter201_data Count:(NSUInteger)1] retain];
+    FOLLOW_declarator_in_formalParameter203 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_formalParameter203_data Count:(NSUInteger)1] retain];
+    FOLLOW_BLOCK_in_block283 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_BLOCK_in_block283_data Count:(NSUInteger)1] retain];
+    FOLLOW_variable_in_block285 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_block285_data Count:(NSUInteger)1] retain];
+    FOLLOW_stat_in_block288 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block288_data Count:(NSUInteger)1] retain];
+    FOLLOW_forStat_in_stat302 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_forStat_in_stat302_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_stat310 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_stat310_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_stat318 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat318_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_FOR_in_forStat338 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_FOR_in_forStat338_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_forStat340 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat340_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_forStat342 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat342_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_forStat344 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat344_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_forStat346 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_forStat346_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_EQEQ_in_expr362 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQEQ_in_expr362_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr364 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr364_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr366 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr366_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_LT_in_expr378 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_LT_in_expr378_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr380 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr380_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr382 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr382_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_PLUS_in_expr394 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_PLUS_in_expr394_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr396 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr396_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr398 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr398_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_EQ_in_expr410 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQ_in_expr410_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_ID_in_expr412 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_expr412_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr416 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr416_data Count:(NSUInteger)1] retain];
+    FOLLOW_atom_in_expr429 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_expr429_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"ARG_DEF", @"BLOCK", @"FUNC_DECL", @"FUNC_DEF", @"FUNC_HDR", @"K_CHAR", 
+ @"K_COMMA", @"K_EQ", @"K_EQEQ", @"K_FOR", @"K_ID", @"K_INT", @"K_INT_TYPE", 
+ @"K_LCURLY", @"K_LCURVE", @"K_LT", @"K_PLUS", @"K_RCURLY", @"K_RCURVE", 
+ @"K_SEMICOLON", @"K_VOID", @"VAR_DEF", @"WS", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g"];
+}
+
++ (SimpleCTP *)newSimpleCTP:(id<TreeNodeStream>)aStream
+{
+    return [[SimpleCTP alloc] initWithStream:aStream];
+}
+
+- (id) initWithStream:(id<TreeNodeStream>)aStream
+{
+    self = [super initWithStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:12+1] retain]];
+    if ( self != nil ) {
+        /* globalAttributeScopeInit */
+
+        Symbols_scope = [Symbols_Scope newSymbols_Scope];
+
+        Symbols_stack = [SymbolStack newSymbolStackWithLen:30];
+
+        /* start of actions-actionScope-init */
+        /* start of init */
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [Symbols_stack release];
+     
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start program
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:13:1: program : ( declaration )+ ;
+ */
+- (void) program
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:14:5: ( ( declaration )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:14:9: ( declaration )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:14:9: ( declaration )+ // positiveClosureBlock
+        NSInteger cnt1 = 0;
+        do {
+            NSInteger alt1 = 2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( ((LA1_0 >= FUNC_DECL && LA1_0 <= FUNC_DEF)||LA1_0==VAR_DEF) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:14:9: declaration // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_declaration_in_program56];
+                    [self declaration];
+
+                    [self popFollow];
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt1 >= 1 )
+                        goto loop1;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:1];
+                    @throw eee;
+            }
+            cnt1++;
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end program */
+
+/*
+ * $ANTLR start declaration
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:17:1: declaration : ( variable | ^( FUNC_DECL functionHeader ) | ^( FUNC_DEF functionHeader block ) );
+ */
+- (void) declaration
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:18:5: ( variable | ^( FUNC_DECL functionHeader ) | ^( FUNC_DEF functionHeader block ) ) //ruleblock
+        NSInteger alt2=3;
+        unichar charLA2 = [input LA:1];
+        switch (charLA2) {
+            case VAR_DEF: ;
+                {
+                alt2=1;
+                }
+                break;
+            case FUNC_DECL: ;
+                {
+                alt2=2;
+                }
+                break;
+            case FUNC_DEF: ;
+                {
+                alt2=3;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:2 state:0 stream:input];
+            nvae.c = charLA2;
+            @throw nvae;
+
+        }
+
+        switch (alt2) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:18:9: variable // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_variable_in_declaration76];
+                [self variable];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:19:9: ^( FUNC_DECL functionHeader ) // alt
+                {
+
+
+                [self match:input TokenType:FUNC_DECL Follow:FOLLOW_FUNC_DECL_in_declaration87]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_functionHeader_in_declaration89];
+                    [self functionHeader];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:20:9: ^( FUNC_DEF functionHeader block ) // alt
+                {
+
+
+                [self match:input TokenType:FUNC_DEF Follow:FOLLOW_FUNC_DEF_in_declaration101]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_functionHeader_in_declaration103];
+                    [self functionHeader];
+
+                    [self popFollow];
+
+
+                     
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_block_in_declaration105];
+                    [self block];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end declaration */
+
+/*
+ * $ANTLR start variable
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:23:1: variable : ^( VAR_DEF type declarator ) ;
+ */
+- (void) variable
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:24:5: ( ^( VAR_DEF type declarator ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:24:9: ^( VAR_DEF type declarator ) // alt
+        {
+
+
+        [self match:input TokenType:VAR_DEF Follow:FOLLOW_VAR_DEF_in_variable126]; 
+         
+            [self match:input TokenType:DOWN Follow:nil]; 
+
+            /* ruleRef */
+            [self pushFollow:FOLLOW_type_in_variable128];
+            [self type];
+
+            [self popFollow];
+
+
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_declarator_in_variable130];
+            [self declarator];
+
+            [self popFollow];
+
+
+             
+            [self match:input TokenType:UP Follow:nil]; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end variable */
+
+/*
+ * $ANTLR start declarator
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:27:1: declarator : K_ID ;
+ */
+- (void) declarator
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:28:5: ( K_ID ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:28:9: K_ID // alt
+        {
+
+        [self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_declarator150]; 
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end declarator */
+
+/*
+ * $ANTLR start functionHeader
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:31:1: functionHeader : ^( FUNC_HDR type K_ID ( formalParameter )+ ) ;
+ */
+- (void) functionHeader
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:32:5: ( ^( FUNC_HDR type K_ID ( formalParameter )+ ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:32:9: ^( FUNC_HDR type K_ID ( formalParameter )+ ) // alt
+        {
+
+
+        [self match:input TokenType:FUNC_HDR Follow:FOLLOW_FUNC_HDR_in_functionHeader171]; 
+         
+            [self match:input TokenType:DOWN Follow:nil]; 
+
+            /* ruleRef */
+            [self pushFollow:FOLLOW_type_in_functionHeader173];
+            [self type];
+
+            [self popFollow];
+
+
+             
+            [self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_functionHeader175]; 
+             
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:32:30: ( formalParameter )+ // positiveClosureBlock
+            NSInteger cnt3 = 0;
+            do {
+                NSInteger alt3 = 2;
+                NSInteger LA3_0 = [input LA:1];
+                if ( (LA3_0==ARG_DEF) ) {
+                    alt3=1;
+                }
+
+
+                switch (alt3) {
+                    case 1 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:32:30: formalParameter // alt
+                        {
+
+                        /* ruleRef */
+                        [self pushFollow:FOLLOW_formalParameter_in_functionHeader177];
+                        [self formalParameter];
+
+                        [self popFollow];
+
+
+                         
+                        }
+                        break;
+
+                    default :
+                        if ( cnt3 >= 1 )
+                            goto loop3;
+                        EarlyExitException *eee =
+                            [EarlyExitException newException:input decisionNumber:3];
+                        @throw eee;
+                }
+                cnt3++;
+            } while (YES);
+            loop3: ;
+
+             
+            [self match:input TokenType:UP Follow:nil]; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end functionHeader */
+
+/*
+ * $ANTLR start formalParameter
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:35:1: formalParameter : ^( ARG_DEF type declarator ) ;
+ */
+- (void) formalParameter
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:36:5: ( ^( ARG_DEF type declarator ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:36:9: ^( ARG_DEF type declarator ) // alt
+        {
+
+
+        [self match:input TokenType:ARG_DEF Follow:FOLLOW_ARG_DEF_in_formalParameter199]; 
+         
+            [self match:input TokenType:DOWN Follow:nil]; 
+
+            /* ruleRef */
+            [self pushFollow:FOLLOW_type_in_formalParameter201];
+            [self type];
+
+            [self popFollow];
+
+
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_declarator_in_formalParameter203];
+            [self declarator];
+
+            [self popFollow];
+
+
+             
+            [self match:input TokenType:UP Follow:nil]; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end formalParameter */
+
+/*
+ * $ANTLR start type
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:39:1: type : ( K_INT_TYPE | K_CHAR | K_VOID | K_ID );
+ */
+- (void) type
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:40:5: ( K_INT_TYPE | K_CHAR | K_VOID | K_ID ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g: // alt
+        {
+
+        if ([input LA:1] == K_CHAR||[input LA:1] == K_ID||[input LA:1] == K_INT_TYPE||[input LA:1] == K_VOID) {
+            [input consume];
+            [state setIsErrorRecovery:NO];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            @throw mse;
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end type */
+
+/*
+ * $ANTLR start block
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:46:1: block : ^( BLOCK ( variable )* ( stat )* ) ;
+ */
+- (void) block
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:47:5: ( ^( BLOCK ( variable )* ( stat )* ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:47:9: ^( BLOCK ( variable )* ( stat )* ) // alt
+        {
+
+
+        [self match:input TokenType:BLOCK Follow:FOLLOW_BLOCK_in_block283]; 
+         
+        if ( [input LA:1] == DOWN ) {
+            [self match:input TokenType:DOWN Follow:nil]; 
+
+
+            do {
+                NSInteger alt4=2;
+                NSInteger LA4_0 = [input LA:1];
+                if ( (LA4_0==VAR_DEF) ) {
+                    alt4=1;
+                }
+
+
+                switch (alt4) {
+                    case 1 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:47:17: variable // alt
+                        {
+
+                        /* ruleRef */
+                        [self pushFollow:FOLLOW_variable_in_block285];
+                        [self variable];
+
+                        [self popFollow];
+
+
+                         
+                        }
+                        break;
+
+                    default :
+                        goto loop4;
+                }
+            } while (YES);
+            loop4: ;
+
+             
+
+            do {
+                NSInteger alt5=2;
+                NSInteger LA5_0 = [input LA:1];
+                if ( (LA5_0==BLOCK||(LA5_0 >= K_EQ && LA5_0 <= K_INT)||(LA5_0 >= K_LT && LA5_0 <= K_PLUS)) ) {
+                    alt5=1;
+                }
+
+
+                switch (alt5) {
+                    case 1 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:47:27: stat // alt
+                        {
+
+                        /* ruleRef */
+                        [self pushFollow:FOLLOW_stat_in_block288];
+                        [self stat];
+
+                        [self popFollow];
+
+
+                         
+                        }
+                        break;
+
+                    default :
+                        goto loop5;
+                }
+            } while (YES);
+            loop5: ;
+
+             
+            [self match:input TokenType:UP Follow:nil]; 
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end block */
+
+/*
+ * $ANTLR start stat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:50:1: stat : ( forStat | expr | block );
+ */
+- (void) stat
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:50:5: ( forStat | expr | block ) //ruleblock
+        NSInteger alt6=3;
+        unichar charLA6 = [input LA:1];
+        switch (charLA6) {
+            case K_FOR: ;
+                {
+                alt6=1;
+                }
+                break;
+            case K_EQ: ;
+            case K_EQEQ: ;
+            case K_ID: ;
+            case K_INT: ;
+            case K_LT: ;
+            case K_PLUS: ;
+                {
+                alt6=2;
+                }
+                break;
+            case BLOCK: ;
+                {
+                alt6=3;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:6 state:0 stream:input];
+            nvae.c = charLA6;
+            @throw nvae;
+
+        }
+
+        switch (alt6) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:50:7: forStat // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_forStat_in_stat302];
+                [self forStat];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:51:7: expr // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_expr_in_stat310];
+                [self expr];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:52:7: block // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_block_in_stat318];
+                [self block];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end stat */
+
+/*
+ * $ANTLR start forStat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:55:1: forStat : ^( K_FOR expr expr expr block ) ;
+ */
+- (void) forStat
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:56:5: ( ^( K_FOR expr expr expr block ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:56:9: ^( K_FOR expr expr expr block ) // alt
+        {
+
+
+        [self match:input TokenType:K_FOR Follow:FOLLOW_K_FOR_in_forStat338]; 
+         
+            [self match:input TokenType:DOWN Follow:nil]; 
+
+            /* ruleRef */
+            [self pushFollow:FOLLOW_expr_in_forStat340];
+            [self expr];
+
+            [self popFollow];
+
+
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_expr_in_forStat342];
+            [self expr];
+
+            [self popFollow];
+
+
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_expr_in_forStat344];
+            [self expr];
+
+            [self popFollow];
+
+
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_block_in_forStat346];
+            [self block];
+
+            [self popFollow];
+
+
+             
+            [self match:input TokenType:UP Follow:nil]; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end forStat */
+
+/*
+ * $ANTLR start expr
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:59:1: expr : ( ^( K_EQEQ expr expr ) | ^( K_LT expr expr ) | ^( K_PLUS expr expr ) | ^( K_EQ K_ID e= expr ) | atom );
+ */
+- (SimpleCTP_expr_return *) expr
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+    SimpleCTP_expr_return * retval = [SimpleCTP_expr_return newSimpleCTP_expr_return];
+    [retval setStart:[input LT:1]];
+
+
+    @try {
+        /* ruleLabelDefs entry */
+        ANTLRCommonTree *K_ID1 = nil;SimpleCTP_expr_return * e = nil ;
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:59:5: ( ^( K_EQEQ expr expr ) | ^( K_LT expr expr ) | ^( K_PLUS expr expr ) | ^( K_EQ K_ID e= expr ) | atom ) //ruleblock
+        NSInteger alt7=5;
+        unichar charLA7 = [input LA:1];
+        switch (charLA7) {
+            case K_EQEQ: ;
+                {
+                alt7=1;
+                }
+                break;
+            case K_LT: ;
+                {
+                alt7=2;
+                }
+                break;
+            case K_PLUS: ;
+                {
+                alt7=3;
+                }
+                break;
+            case K_EQ: ;
+                {
+                alt7=4;
+                }
+                break;
+            case K_ID: ;
+            case K_INT: ;
+                {
+                alt7=5;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:7 state:0 stream:input];
+            nvae.c = charLA7;
+            @throw nvae;
+
+        }
+
+        switch (alt7) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:59:9: ^( K_EQEQ expr expr ) // alt
+                {
+
+
+                [self match:input TokenType:K_EQEQ Follow:FOLLOW_K_EQEQ_in_expr362]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr364];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr366];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:60:9: ^( K_LT expr expr ) // alt
+                {
+
+
+                [self match:input TokenType:K_LT Follow:FOLLOW_K_LT_in_expr378]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr380];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr382];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:61:9: ^( K_PLUS expr expr ) // alt
+                {
+
+
+                [self match:input TokenType:K_PLUS Follow:FOLLOW_K_PLUS_in_expr394]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr396];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr398];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+                }
+                break;
+            case 4 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:62:9: ^( K_EQ K_ID e= expr ) // alt
+                {
+
+
+                [self match:input TokenType:K_EQ Follow:FOLLOW_K_EQ_in_expr410]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    K_ID1=(ANTLRCommonTree *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_expr412]; 
+                     
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr416];
+                    e = [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+
+                 NSLog(@"assigning %@ to variable %@", (e!=nil?[[input getTokenStream] toStringFromStart:[[input getTreeAdaptor] getTokenStartIndex:[e getStart]]ToEnd:[[input getTreeAdaptor] getTokenStopIndex:[e getStart]]]:0), (K_ID1!=nil?K_ID1.text:nil)); 
+
+                 
+                }
+                break;
+            case 5 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:63:9: atom // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_atom_in_expr429];
+                [self atom];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end expr */
+
+/*
+ * $ANTLR start atom
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:66:1: atom : ( K_ID | K_INT );
+ */
+- (void) atom
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g:67:5: ( K_ID | K_INT ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.g: // alt
+        {
+
+        if ((([input LA:1] >= K_ID) && ([input LA:1] <= K_INT))) {
+            [input consume];
+            [state setIsErrorRecovery:NO];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            @throw mse;
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end atom */
+/* ObjC end rules */
+
+@end /* end of SimpleCTP implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.tokens b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.tokens
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.tokens
rename to runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCTP.tokens
diff --git a/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g
new file mode 100644
index 0000000..b6d6474
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g
@@ -0,0 +1,69 @@
+tree grammar SimpleCWalker;
+options {
+    tokenVocab = SimpleC;
+	language = ObjC;
+	ASTLabelType = CommonTree;
+}
+
+scope Symbols
+{
+CommonTree *tree;
+}
+
+program
+    :   declaration+
+    ;
+
+declaration
+    :   variable
+    |   ^(FUNC_DECL functionHeader)
+    |   ^(FUNC_DEF functionHeader block)
+    ;
+
+variable
+    :   ^(VAR_DEF type declarator)
+    ;
+
+declarator
+    :   K_ID 
+    ;
+
+functionHeader
+    :   ^(FUNC_HDR type K_ID formalParameter+)
+    ;
+
+formalParameter
+    :   ^(ARG_DEF type declarator)
+    ;
+
+type
+    :   K_INT_TYPE
+    |   K_CHAR  
+    |   K_VOID
+    |   K_ID        
+    ;
+
+block
+    :   ^(BLOCK variable* stat*)
+    ;
+
+stat: forStat
+    | expr
+    | block
+    ;
+
+forStat
+    :   ^(K_FOR expr expr expr block)
+    ;
+
+expr:   ^(K_EQEQ expr expr)
+    |   ^(K_LT expr expr)
+    |   ^(K_PLUS expr expr)
+    |   ^(K_EQ K_ID e=expr) { NSLog(@"assigning \%@ to variable \%@", $e.text, $K_ID.text); }
+    |   atom
+    ;
+
+atom
+    : K_ID      
+    | K_INT      
+    ; 
diff --git a/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.h b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.h
new file mode 100644
index 0000000..649efdb
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.h
@@ -0,0 +1,133 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g 2012-02-16 17:56:35
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* treeParserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define ARG_DEF 4
+#define BLOCK 5
+#define FUNC_DECL 6
+#define FUNC_DEF 7
+#define FUNC_HDR 8
+#define K_CHAR 9
+#define K_COMMA 10
+#define K_EQ 11
+#define K_EQEQ 12
+#define K_FOR 13
+#define K_ID 14
+#define K_INT 15
+#define K_INT_TYPE 16
+#define K_LCURLY 17
+#define K_LCURVE 18
+#define K_LT 19
+#define K_PLUS 20
+#define K_RCURLY 21
+#define K_RCURVE 22
+#define K_SEMICOLON 23
+#define K_VOID 24
+#define VAR_DEF 25
+#define WS 26
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+/* globalAttributeScopeInterface */
+@interface Symbols_Scope : SymbolsScope {
+CommonTree * tree;
+ }
+/* start of globalAttributeScopeInterface properties */
+@property (assign, getter=gettree, setter=settree:) CommonTree * tree;
+/* end globalAttributeScopeInterface properties */
++ (Symbols_Scope *)newSymbols_Scope;
+- (id) init;
+/* start of globalAttributeScopeInterface methodsDecl */
+- (CommonTree *)gettree;
+- (void)settree:(CommonTree *)aVal;
+/* End of globalAttributeScopeInterface methodsDecl */
+@end /* end of Symbols_Scope interface */
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+/* returnScopeInterface SimpleCWalker_expr_return */
+@interface SimpleCWalker_expr_return : TreeRuleReturnScope { /* returnScopeInterface line 1838 */
+ /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+
+
+/* start of method declarations */
+
++ (SimpleCWalker_expr_return *)newSimpleCWalker_expr_return;
+/* this is start of set and get methods */
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+
+/* Interface grammar class */
+@interface SimpleCWalker  : TreeParser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+/* globalAttributeScopeMemVar */
+SymbolStack *Symbols_stack;
+Symbols_Scope *Symbols_scope;
+
+
+/* ObjC start of actions.(actionScope).memVars */
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* ObjC end of memVars */
+
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newSimpleCWalker:(id<TreeNodeStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* ObjC end of methodsDecl */
+
+- (void)program; 
+- (void)declaration; 
+- (void)variable; 
+- (void)declarator; 
+- (void)functionHeader; 
+- (void)formalParameter; 
+- (void)type; 
+- (void)block; 
+- (void)stat; 
+- (void)forStat; 
+- (SimpleCWalker_expr_return *)expr; 
+- (void)atom; 
+
+
+@end /* end of SimpleCWalker interface */
+
diff --git a/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.m b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.m
new file mode 100644
index 0000000..c7d4cd4
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.m
@@ -0,0 +1,1304 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g
+ *     -                            On : 2012-02-16 17:56:35
+ *     -           for the tree parser : SimpleCWalkerTreeParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g 2012-02-16 17:56:35
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "SimpleCWalker.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_declaration_in_program56;
+static const unsigned long long FOLLOW_declaration_in_program56_data[] = { 0x00000000020000C2LL};
+static ANTLRBitSet *FOLLOW_variable_in_declaration76;
+static const unsigned long long FOLLOW_variable_in_declaration76_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_FUNC_DECL_in_declaration87;
+static const unsigned long long FOLLOW_FUNC_DECL_in_declaration87_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_functionHeader_in_declaration89;
+static const unsigned long long FOLLOW_functionHeader_in_declaration89_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_FUNC_DEF_in_declaration101;
+static const unsigned long long FOLLOW_FUNC_DEF_in_declaration101_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_functionHeader_in_declaration103;
+static const unsigned long long FOLLOW_functionHeader_in_declaration103_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_block_in_declaration105;
+static const unsigned long long FOLLOW_block_in_declaration105_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_VAR_DEF_in_variable126;
+static const unsigned long long FOLLOW_VAR_DEF_in_variable126_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_type_in_variable128;
+static const unsigned long long FOLLOW_type_in_variable128_data[] = { 0x0000000000004000LL};
+static ANTLRBitSet *FOLLOW_declarator_in_variable130;
+static const unsigned long long FOLLOW_declarator_in_variable130_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_K_ID_in_declarator150;
+static const unsigned long long FOLLOW_K_ID_in_declarator150_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_FUNC_HDR_in_functionHeader171;
+static const unsigned long long FOLLOW_FUNC_HDR_in_functionHeader171_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_type_in_functionHeader173;
+static const unsigned long long FOLLOW_type_in_functionHeader173_data[] = { 0x0000000000004000LL};
+static ANTLRBitSet *FOLLOW_K_ID_in_functionHeader175;
+static const unsigned long long FOLLOW_K_ID_in_functionHeader175_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_formalParameter_in_functionHeader177;
+static const unsigned long long FOLLOW_formalParameter_in_functionHeader177_data[] = { 0x0000000000000018LL};
+static ANTLRBitSet *FOLLOW_ARG_DEF_in_formalParameter199;
+static const unsigned long long FOLLOW_ARG_DEF_in_formalParameter199_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_type_in_formalParameter201;
+static const unsigned long long FOLLOW_type_in_formalParameter201_data[] = { 0x0000000000004000LL};
+static ANTLRBitSet *FOLLOW_declarator_in_formalParameter203;
+static const unsigned long long FOLLOW_declarator_in_formalParameter203_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_BLOCK_in_block283;
+static const unsigned long long FOLLOW_BLOCK_in_block283_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_variable_in_block285;
+static const unsigned long long FOLLOW_variable_in_block285_data[] = { 0x000000000218F828LL};
+static ANTLRBitSet *FOLLOW_stat_in_block288;
+static const unsigned long long FOLLOW_stat_in_block288_data[] = { 0x000000000018F828LL};
+static ANTLRBitSet *FOLLOW_forStat_in_stat302;
+static const unsigned long long FOLLOW_forStat_in_stat302_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_expr_in_stat310;
+static const unsigned long long FOLLOW_expr_in_stat310_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_block_in_stat318;
+static const unsigned long long FOLLOW_block_in_stat318_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_K_FOR_in_forStat338;
+static const unsigned long long FOLLOW_K_FOR_in_forStat338_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_expr_in_forStat340;
+static const unsigned long long FOLLOW_expr_in_forStat340_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_forStat342;
+static const unsigned long long FOLLOW_expr_in_forStat342_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_forStat344;
+static const unsigned long long FOLLOW_expr_in_forStat344_data[] = { 0x0000000000000020LL};
+static ANTLRBitSet *FOLLOW_block_in_forStat346;
+static const unsigned long long FOLLOW_block_in_forStat346_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_K_EQEQ_in_expr362;
+static const unsigned long long FOLLOW_K_EQEQ_in_expr362_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr364;
+static const unsigned long long FOLLOW_expr_in_expr364_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr366;
+static const unsigned long long FOLLOW_expr_in_expr366_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_K_LT_in_expr378;
+static const unsigned long long FOLLOW_K_LT_in_expr378_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr380;
+static const unsigned long long FOLLOW_expr_in_expr380_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr382;
+static const unsigned long long FOLLOW_expr_in_expr382_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_K_PLUS_in_expr394;
+static const unsigned long long FOLLOW_K_PLUS_in_expr394_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr396;
+static const unsigned long long FOLLOW_expr_in_expr396_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr398;
+static const unsigned long long FOLLOW_expr_in_expr398_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_K_EQ_in_expr410;
+static const unsigned long long FOLLOW_K_EQ_in_expr410_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_K_ID_in_expr412;
+static const unsigned long long FOLLOW_K_ID_in_expr412_data[] = { 0x000000000018D800LL};
+static ANTLRBitSet *FOLLOW_expr_in_expr416;
+static const unsigned long long FOLLOW_expr_in_expr416_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_atom_in_expr429;
+static const unsigned long long FOLLOW_atom_in_expr429_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+@implementation Symbols_Scope  /* globalAttributeScopeImplementation */
+/* start of synthesize -- OBJC-Line 1750 */
+@synthesize tree; 
+
++ (Symbols_Scope *)newSymbols_Scope
+{
+return [[Symbols_Scope alloc] init];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* start of iterate get and set functions */
+- (CommonTree *)gettree { return( tree ); }
+- (void)settree:(CommonTree *)aVal { tree = aVal; }
+
+/* End of iterate get and set functions */
+@end /* end of Symbols_Scope implementation */
+
+
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+@implementation SimpleCWalker_expr_return /* returnScopeImplementation */
+ /* start of synthesize -- OBJC-Line 1837 */
++ (SimpleCWalker_expr_return *)newSimpleCWalker_expr_return
+{
+return [[[SimpleCWalker_expr_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+
+
+@end /* end of returnScope implementation */
+
+
+
+@implementation SimpleCWalker  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+static _stack;
+ 
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_declaration_in_program56 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declaration_in_program56_data Count:(NSUInteger)1] retain];
+    FOLLOW_variable_in_declaration76 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_declaration76_data Count:(NSUInteger)1] retain];
+    FOLLOW_FUNC_DECL_in_declaration87 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_DECL_in_declaration87_data Count:(NSUInteger)1] retain];
+    FOLLOW_functionHeader_in_declaration89 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration89_data Count:(NSUInteger)1] retain];
+    FOLLOW_FUNC_DEF_in_declaration101 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_DEF_in_declaration101_data Count:(NSUInteger)1] retain];
+    FOLLOW_functionHeader_in_declaration103 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_functionHeader_in_declaration103_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_declaration105 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_declaration105_data Count:(NSUInteger)1] retain];
+    FOLLOW_VAR_DEF_in_variable126 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_VAR_DEF_in_variable126_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_variable128 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_variable128_data Count:(NSUInteger)1] retain];
+    FOLLOW_declarator_in_variable130 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_variable130_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_ID_in_declarator150 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_declarator150_data Count:(NSUInteger)1] retain];
+    FOLLOW_FUNC_HDR_in_functionHeader171 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_FUNC_HDR_in_functionHeader171_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_functionHeader173 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_functionHeader173_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_ID_in_functionHeader175 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_functionHeader175_data Count:(NSUInteger)1] retain];
+    FOLLOW_formalParameter_in_functionHeader177 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_formalParameter_in_functionHeader177_data Count:(NSUInteger)1] retain];
+    FOLLOW_ARG_DEF_in_formalParameter199 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ARG_DEF_in_formalParameter199_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_formalParameter201 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_formalParameter201_data Count:(NSUInteger)1] retain];
+    FOLLOW_declarator_in_formalParameter203 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_formalParameter203_data Count:(NSUInteger)1] retain];
+    FOLLOW_BLOCK_in_block283 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_BLOCK_in_block283_data Count:(NSUInteger)1] retain];
+    FOLLOW_variable_in_block285 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_variable_in_block285_data Count:(NSUInteger)1] retain];
+    FOLLOW_stat_in_block288 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_stat_in_block288_data Count:(NSUInteger)1] retain];
+    FOLLOW_forStat_in_stat302 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_forStat_in_stat302_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_stat310 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_stat310_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_stat318 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_stat318_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_FOR_in_forStat338 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_FOR_in_forStat338_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_forStat340 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat340_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_forStat342 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat342_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_forStat344 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_forStat344_data Count:(NSUInteger)1] retain];
+    FOLLOW_block_in_forStat346 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_block_in_forStat346_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_EQEQ_in_expr362 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQEQ_in_expr362_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr364 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr364_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr366 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr366_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_LT_in_expr378 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_LT_in_expr378_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr380 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr380_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr382 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr382_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_PLUS_in_expr394 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_PLUS_in_expr394_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr396 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr396_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr398 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr398_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_EQ_in_expr410 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_EQ_in_expr410_data Count:(NSUInteger)1] retain];
+    FOLLOW_K_ID_in_expr412 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_K_ID_in_expr412_data Count:(NSUInteger)1] retain];
+    FOLLOW_expr_in_expr416 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_expr_in_expr416_data Count:(NSUInteger)1] retain];
+    FOLLOW_atom_in_expr429 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_atom_in_expr429_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"ARG_DEF", @"BLOCK", @"FUNC_DECL", @"FUNC_DEF", @"FUNC_HDR", @"K_CHAR", 
+ @"K_COMMA", @"K_EQ", @"K_EQEQ", @"K_FOR", @"K_ID", @"K_INT", @"K_INT_TYPE", 
+ @"K_LCURLY", @"K_LCURVE", @"K_LT", @"K_PLUS", @"K_RCURLY", @"K_RCURVE", 
+ @"K_SEMICOLON", @"K_VOID", @"VAR_DEF", @"WS", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g"];
+}
+
++ (SimpleCWalker *)newSimpleCWalker:(id<TreeNodeStream>)aStream
+{
+    return [[SimpleCWalker alloc] initWithStream:aStream];
+}
+
+- (id) initWithStream:(id<TreeNodeStream>)aStream
+{
+    self = [super initWithStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:12+1] retain]];
+    if ( self != nil ) {
+        /* globalAttributeScopeInit */
+
+        Symbols_scope = [Symbols_Scope newSymbols_Scope];
+
+        Symbols_stack = [SymbolStack newSymbolStackWithLen:30];
+
+        /* start of actions-actionScope-init */
+        /* start of init */
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [Symbols_stack release];
+     
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start program
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:13:1: program : ( declaration )+ ;
+ */
+- (void) program
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:14:5: ( ( declaration )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:14:9: ( declaration )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:14:9: ( declaration )+ // positiveClosureBlock
+        NSInteger cnt1 = 0;
+        do {
+            NSInteger alt1 = 2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( ((LA1_0 >= FUNC_DECL && LA1_0 <= FUNC_DEF)||LA1_0==VAR_DEF) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:14:9: declaration // alt
+                    {
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_declaration_in_program56];
+                    [self declaration];
+
+                    [self popFollow];
+
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt1 >= 1 )
+                        goto loop1;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:1];
+                    @throw eee;
+            }
+            cnt1++;
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end program */
+
+/*
+ * $ANTLR start declaration
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:17:1: declaration : ( variable | ^( FUNC_DECL functionHeader ) | ^( FUNC_DEF functionHeader block ) );
+ */
+- (void) declaration
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:18:5: ( variable | ^( FUNC_DECL functionHeader ) | ^( FUNC_DEF functionHeader block ) ) //ruleblock
+        NSInteger alt2=3;
+        unichar charLA2 = [input LA:1];
+        switch (charLA2) {
+            case VAR_DEF: ;
+                {
+                alt2=1;
+                }
+                break;
+            case FUNC_DECL: ;
+                {
+                alt2=2;
+                }
+                break;
+            case FUNC_DEF: ;
+                {
+                alt2=3;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:2 state:0 stream:input];
+            nvae.c = charLA2;
+            @throw nvae;
+
+        }
+
+        switch (alt2) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:18:9: variable // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_variable_in_declaration76];
+                [self variable];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:19:9: ^( FUNC_DECL functionHeader ) // alt
+                {
+
+
+                [self match:input TokenType:FUNC_DECL Follow:FOLLOW_FUNC_DECL_in_declaration87]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_functionHeader_in_declaration89];
+                    [self functionHeader];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:20:9: ^( FUNC_DEF functionHeader block ) // alt
+                {
+
+
+                [self match:input TokenType:FUNC_DEF Follow:FOLLOW_FUNC_DEF_in_declaration101]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_functionHeader_in_declaration103];
+                    [self functionHeader];
+
+                    [self popFollow];
+
+
+                     
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_block_in_declaration105];
+                    [self block];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end declaration */
+
+/*
+ * $ANTLR start variable
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:23:1: variable : ^( VAR_DEF type declarator ) ;
+ */
+- (void) variable
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:24:5: ( ^( VAR_DEF type declarator ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:24:9: ^( VAR_DEF type declarator ) // alt
+        {
+
+
+        [self match:input TokenType:VAR_DEF Follow:FOLLOW_VAR_DEF_in_variable126]; 
+         
+            [self match:input TokenType:DOWN Follow:nil]; 
+
+            /* ruleRef */
+            [self pushFollow:FOLLOW_type_in_variable128];
+            [self type];
+
+            [self popFollow];
+
+
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_declarator_in_variable130];
+            [self declarator];
+
+            [self popFollow];
+
+
+             
+            [self match:input TokenType:UP Follow:nil]; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end variable */
+
+/*
+ * $ANTLR start declarator
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:27:1: declarator : K_ID ;
+ */
+- (void) declarator
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:28:5: ( K_ID ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:28:9: K_ID // alt
+        {
+
+        [self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_declarator150]; 
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end declarator */
+
+/*
+ * $ANTLR start functionHeader
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:31:1: functionHeader : ^( FUNC_HDR type K_ID ( formalParameter )+ ) ;
+ */
+- (void) functionHeader
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:32:5: ( ^( FUNC_HDR type K_ID ( formalParameter )+ ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:32:9: ^( FUNC_HDR type K_ID ( formalParameter )+ ) // alt
+        {
+
+
+        [self match:input TokenType:FUNC_HDR Follow:FOLLOW_FUNC_HDR_in_functionHeader171]; 
+         
+            [self match:input TokenType:DOWN Follow:nil]; 
+
+            /* ruleRef */
+            [self pushFollow:FOLLOW_type_in_functionHeader173];
+            [self type];
+
+            [self popFollow];
+
+
+             
+            [self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_functionHeader175]; 
+             
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:32:30: ( formalParameter )+ // positiveClosureBlock
+            NSInteger cnt3 = 0;
+            do {
+                NSInteger alt3 = 2;
+                NSInteger LA3_0 = [input LA:1];
+                if ( (LA3_0==ARG_DEF) ) {
+                    alt3=1;
+                }
+
+
+                switch (alt3) {
+                    case 1 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:32:30: formalParameter // alt
+                        {
+
+                        /* ruleRef */
+                        [self pushFollow:FOLLOW_formalParameter_in_functionHeader177];
+                        [self formalParameter];
+
+                        [self popFollow];
+
+
+                         
+                        }
+                        break;
+
+                    default :
+                        if ( cnt3 >= 1 )
+                            goto loop3;
+                        EarlyExitException *eee =
+                            [EarlyExitException newException:input decisionNumber:3];
+                        @throw eee;
+                }
+                cnt3++;
+            } while (YES);
+            loop3: ;
+
+             
+            [self match:input TokenType:UP Follow:nil]; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end functionHeader */
+
+/*
+ * $ANTLR start formalParameter
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:35:1: formalParameter : ^( ARG_DEF type declarator ) ;
+ */
+- (void) formalParameter
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:36:5: ( ^( ARG_DEF type declarator ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:36:9: ^( ARG_DEF type declarator ) // alt
+        {
+
+
+        [self match:input TokenType:ARG_DEF Follow:FOLLOW_ARG_DEF_in_formalParameter199]; 
+         
+            [self match:input TokenType:DOWN Follow:nil]; 
+
+            /* ruleRef */
+            [self pushFollow:FOLLOW_type_in_formalParameter201];
+            [self type];
+
+            [self popFollow];
+
+
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_declarator_in_formalParameter203];
+            [self declarator];
+
+            [self popFollow];
+
+
+             
+            [self match:input TokenType:UP Follow:nil]; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end formalParameter */
+
+/*
+ * $ANTLR start type
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:39:1: type : ( K_INT_TYPE | K_CHAR | K_VOID | K_ID );
+ */
+- (void) type
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:40:5: ( K_INT_TYPE | K_CHAR | K_VOID | K_ID ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g: // alt
+        {
+
+        if ([input LA:1] == K_CHAR||[input LA:1] == K_ID||[input LA:1] == K_INT_TYPE||[input LA:1] == K_VOID) {
+            [input consume];
+            [state setIsErrorRecovery:NO];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            @throw mse;
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end type */
+
+/*
+ * $ANTLR start block
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:46:1: block : ^( BLOCK ( variable )* ( stat )* ) ;
+ */
+- (void) block
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:47:5: ( ^( BLOCK ( variable )* ( stat )* ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:47:9: ^( BLOCK ( variable )* ( stat )* ) // alt
+        {
+
+
+        [self match:input TokenType:BLOCK Follow:FOLLOW_BLOCK_in_block283]; 
+         
+        if ( [input LA:1] == DOWN ) {
+            [self match:input TokenType:DOWN Follow:nil]; 
+
+
+            do {
+                NSInteger alt4=2;
+                NSInteger LA4_0 = [input LA:1];
+                if ( (LA4_0==VAR_DEF) ) {
+                    alt4=1;
+                }
+
+
+                switch (alt4) {
+                    case 1 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:47:17: variable // alt
+                        {
+
+                        /* ruleRef */
+                        [self pushFollow:FOLLOW_variable_in_block285];
+                        [self variable];
+
+                        [self popFollow];
+
+
+                         
+                        }
+                        break;
+
+                    default :
+                        goto loop4;
+                }
+            } while (YES);
+            loop4: ;
+
+             
+
+            do {
+                NSInteger alt5=2;
+                NSInteger LA5_0 = [input LA:1];
+                if ( (LA5_0==BLOCK||(LA5_0 >= K_EQ && LA5_0 <= K_INT)||(LA5_0 >= K_LT && LA5_0 <= K_PLUS)) ) {
+                    alt5=1;
+                }
+
+
+                switch (alt5) {
+                    case 1 : ;
+                        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:47:27: stat // alt
+                        {
+
+                        /* ruleRef */
+                        [self pushFollow:FOLLOW_stat_in_block288];
+                        [self stat];
+
+                        [self popFollow];
+
+
+                         
+                        }
+                        break;
+
+                    default :
+                        goto loop5;
+                }
+            } while (YES);
+            loop5: ;
+
+             
+            [self match:input TokenType:UP Follow:nil]; 
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end block */
+
+/*
+ * $ANTLR start stat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:50:1: stat : ( forStat | expr | block );
+ */
+- (void) stat
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:50:5: ( forStat | expr | block ) //ruleblock
+        NSInteger alt6=3;
+        unichar charLA6 = [input LA:1];
+        switch (charLA6) {
+            case K_FOR: ;
+                {
+                alt6=1;
+                }
+                break;
+            case K_EQ: ;
+            case K_EQEQ: ;
+            case K_ID: ;
+            case K_INT: ;
+            case K_LT: ;
+            case K_PLUS: ;
+                {
+                alt6=2;
+                }
+                break;
+            case BLOCK: ;
+                {
+                alt6=3;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:6 state:0 stream:input];
+            nvae.c = charLA6;
+            @throw nvae;
+
+        }
+
+        switch (alt6) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:50:7: forStat // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_forStat_in_stat302];
+                [self forStat];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:51:7: expr // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_expr_in_stat310];
+                [self expr];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:52:7: block // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_block_in_stat318];
+                [self block];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end stat */
+
+/*
+ * $ANTLR start forStat
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:55:1: forStat : ^( K_FOR expr expr expr block ) ;
+ */
+- (void) forStat
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:56:5: ( ^( K_FOR expr expr expr block ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:56:9: ^( K_FOR expr expr expr block ) // alt
+        {
+
+
+        [self match:input TokenType:K_FOR Follow:FOLLOW_K_FOR_in_forStat338]; 
+         
+            [self match:input TokenType:DOWN Follow:nil]; 
+
+            /* ruleRef */
+            [self pushFollow:FOLLOW_expr_in_forStat340];
+            [self expr];
+
+            [self popFollow];
+
+
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_expr_in_forStat342];
+            [self expr];
+
+            [self popFollow];
+
+
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_expr_in_forStat344];
+            [self expr];
+
+            [self popFollow];
+
+
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_block_in_forStat346];
+            [self block];
+
+            [self popFollow];
+
+
+             
+            [self match:input TokenType:UP Follow:nil]; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end forStat */
+
+/*
+ * $ANTLR start expr
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:59:1: expr : ( ^( K_EQEQ expr expr ) | ^( K_LT expr expr ) | ^( K_PLUS expr expr ) | ^( K_EQ K_ID e= expr ) | atom );
+ */
+- (SimpleCWalker_expr_return *) expr
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+    SimpleCWalker_expr_return * retval = [SimpleCWalker_expr_return newSimpleCWalker_expr_return];
+    [retval setStart:[input LT:1]];
+
+
+    @try {
+        /* ruleLabelDefs entry */
+        CommonTree *K_ID1 = nil;SimpleCWalker_expr_return * e = nil ;
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:59:5: ( ^( K_EQEQ expr expr ) | ^( K_LT expr expr ) | ^( K_PLUS expr expr ) | ^( K_EQ K_ID e= expr ) | atom ) //ruleblock
+        NSInteger alt7=5;
+        unichar charLA7 = [input LA:1];
+        switch (charLA7) {
+            case K_EQEQ: ;
+                {
+                alt7=1;
+                }
+                break;
+            case K_LT: ;
+                {
+                alt7=2;
+                }
+                break;
+            case K_PLUS: ;
+                {
+                alt7=3;
+                }
+                break;
+            case K_EQ: ;
+                {
+                alt7=4;
+                }
+                break;
+            case K_ID: ;
+            case K_INT: ;
+                {
+                alt7=5;
+                }
+                break;
+
+        default: ;
+            NoViableAltException *nvae = [NoViableAltException newException:7 state:0 stream:input];
+            nvae.c = charLA7;
+            @throw nvae;
+
+        }
+
+        switch (alt7) {
+            case 1 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:59:9: ^( K_EQEQ expr expr ) // alt
+                {
+
+
+                [self match:input TokenType:K_EQEQ Follow:FOLLOW_K_EQEQ_in_expr362]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr364];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr366];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+                }
+                break;
+            case 2 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:60:9: ^( K_LT expr expr ) // alt
+                {
+
+
+                [self match:input TokenType:K_LT Follow:FOLLOW_K_LT_in_expr378]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr380];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr382];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+                }
+                break;
+            case 3 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:61:9: ^( K_PLUS expr expr ) // alt
+                {
+
+
+                [self match:input TokenType:K_PLUS Follow:FOLLOW_K_PLUS_in_expr394]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr396];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr398];
+                    [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+                }
+                break;
+            case 4 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:62:9: ^( K_EQ K_ID e= expr ) // alt
+                {
+
+
+                [self match:input TokenType:K_EQ Follow:FOLLOW_K_EQ_in_expr410]; 
+                 
+                    [self match:input TokenType:DOWN Follow:nil]; 
+
+                    K_ID1=(CommonTree *)[self match:input TokenType:K_ID Follow:FOLLOW_K_ID_in_expr412]; 
+                     
+                    /* ruleRef */
+                    [self pushFollow:FOLLOW_expr_in_expr416];
+                    e = [self expr];
+
+                    [self popFollow];
+
+
+                     
+                    [self match:input TokenType:UP Follow:nil]; 
+
+                 
+
+                 NSLog(@"assigning %@ to variable %@", (e!=nil?[[input getTokenStream] toStringFromStart:[[input getTreeAdaptor] getTokenStartIndex:[e getStart]]ToEnd:[[input getTreeAdaptor] getTokenStopIndex:[e getStart]]]:0), (K_ID1!=nil?K_ID1.text:nil)); 
+
+                 
+                }
+                break;
+            case 5 : ;
+                // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:63:9: atom // alt
+                {
+
+                /* ruleRef */
+                [self pushFollow:FOLLOW_atom_in_expr429];
+                [self atom];
+
+                [self popFollow];
+
+
+                 
+                }
+                break;
+
+        }
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end expr */
+
+/*
+ * $ANTLR start atom
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:66:1: atom : ( K_ID | K_INT );
+ */
+- (void) atom
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g:67:5: ( K_ID | K_INT ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.g: // alt
+        {
+
+        if ((([input LA:1] >= K_ID) && ([input LA:1] <= K_INT))) {
+            [input consume];
+            [state setIsErrorRecovery:NO];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            @throw mse;
+        }
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end atom */
+/* ObjC end rules */
+
+@end /* end of SimpleCWalker implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.tokens b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.tokens
similarity index 100%
copy from antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC.tokens
copy to runtime/ObjC/Framework/examples/simplecTreeParser/SimpleCWalker.tokens
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC__.gl b/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC__.gl
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC__.gl
rename to runtime/ObjC/Framework/examples/simplecTreeParser/SimpleC__.gl
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/input b/runtime/ObjC/Framework/examples/simplecTreeParser/input
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/input
rename to runtime/ObjC/Framework/examples/simplecTreeParser/input
diff --git a/runtime/ObjC/Framework/examples/simplecTreeParser/main.m b/runtime/ObjC/Framework/examples/simplecTreeParser/main.m
new file mode 100644
index 0000000..bf3ea27
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/simplecTreeParser/main.m
@@ -0,0 +1,84 @@
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+#import "SimpleCLexer.h"
+#import "SimpleCParser.h"
+#import "SimpleCWalker.h"
+#import "stdio.h"
+#include <unistd.h>
+
+int main(int argc, const char * argv[]) {
+    NSError *anError;
+	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+    char *inp = "/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/simplecTreeParser/input";
+    
+/*
+    if (argc < 2) {
+        NSLog(@"provide the input file, please");
+        return 1;
+    }
+ */
+	
+	// simply read in the input file in one gulp
+	NSString *string = [NSString stringWithContentsOfFile:[NSString stringWithCString:inp encoding:NSASCIIStringEncoding] encoding:NSASCIIStringEncoding error:&anError];
+	NSLog(@"input is : %@", string);
+
+	// create a stream over the input, so the lexer can seek back and forth, but don't copy the string,
+	// as we make sure it will not go away.
+	// If the string would be coming from a volatile source, say a text field, we could opt to copy the string.
+	// That way we could do the parsing in a different thread, and still let the user edit the original string.
+	// But here we do it the simple way.
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:string];
+	
+	// Actually create the lexer feeding of the character stream.
+	SimpleCLexer *lexer = [SimpleCLexer newSimpleCLexerWithCharStream:stream];
+	
+	// For fun, you could print all tokens the lexer recognized, but we can only do it once. After that
+	// we would need to reset the lexer, and lex again.
+//    id<Token> currentToken;
+//    while ((currentToken = [lexer nextToken]) && [currentToken type] != TokenTypeEOF) {
+//        NSLog(@"%@", currentToken);
+//    }
+//	  [lexer reset];
+	
+	// Since the parser needs to scan back and forth over the tokens, we put them into a stream, too.
+	CommonTokenStream *tokenStream = [CommonTokenStream newCommonTokenStreamWithTokenSource:lexer];
+
+	// Construct a parser and feed it the token stream.
+	SimpleCParser *parser = [[SimpleCParser alloc] initWithTokenStream:tokenStream];
+	
+	// We start the parsing process by calling a parser rule. In theory you can call any parser rule here,
+	// but it obviously has to match the input token stream. Otherwise parsing would fail.
+	// Also watch out for internal dependencies in your grammar (e.g. you use a symbol table that's only
+	// initialized when you call a specific parser rule).
+	// This is a simple example, so we just call the top-most rule 'program'.
+	// Since we want to parse the AST the parser builds, we just ask the returned object for that.
+	CommonTree *program_tree = [[parser program] getTree];
+
+    NSLog(@"Reached end of first parse\n");
+	// Print the matched tree as a Lisp-style string
+	NSLog(@"tree: %@", [program_tree treeDescription]);
+	
+	// Create a new tree node stream that's feeding off of the root node (thus seeing the whole tree)
+	CommonTreeNodeStream *treeStream = [CommonTreeNodeStream newCommonTreeNodeStream:program_tree];
+	// tell the TreeNodeStream where the tokens originally came from, so we can retrieve arbitrary tokens and their text.
+	[treeStream setTokenStream:tokenStream];
+	
+	// Create the treeparser instance, passing it the stream of nodes
+	SimpleCWalker *walker = [[SimpleCWalker alloc] initWithStream:treeStream];
+	// As with parsers, you can invoke any treeparser rule here.
+	[walker program];
+
+	// Whew, done. Release everything that we are responsible for.
+	[lexer release];
+	[stream release];
+	[tokenStream release];
+	[parser release];
+	[treeStream release];
+	[walker release];
+
+	[pool release];
+
+    // use this for ObjectAlloc on Tiger
+    //while(1) sleep(5);
+	return 0;
+}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output b/runtime/ObjC/Framework/examples/simplecTreeParser/output
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/simplecTreeParser/output
rename to runtime/ObjC/Framework/examples/simplecTreeParser/output
diff --git a/runtime/ObjC/Framework/examples/treeparser/Lang.g b/runtime/ObjC/Framework/examples/treeparser/Lang.g
new file mode 100755
index 0000000..b2a5f0f
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treeparser/Lang.g
@@ -0,0 +1,22 @@
+grammar Lang;
+options {
+	output=AST;
+	language = ObjC;
+	ASTLabelType=CommonTree;
+}
+
+tokens {DECL;} // an imaginary node
+
+start : decl ;
+
+decl : type ID ';' -> ^(DECL type ID)
+     ;
+type : INTTYPE  // automatic tree construction builds a node for this rule
+     | FLOATTYPE
+     ;
+
+INTTYPE : 'int' ;
+FLOATTYPE : 'float' ;
+ID : 'a'..'z'+ ;
+INT : '0'..'9'+ ;
+WS : (' '|'\n') {$channel=HIDDEN;} ;
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/Lang.tokens b/runtime/ObjC/Framework/examples/treeparser/Lang.tokens
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/treeparser/Lang.tokens
rename to runtime/ObjC/Framework/examples/treeparser/Lang.tokens
diff --git a/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g b/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g
new file mode 100755
index 0000000..5524f12
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g
@@ -0,0 +1,17 @@
+tree grammar LangDumpDecl;
+options {
+    tokenVocab=Lang;
+	language = ObjC;
+    ASTLabelType = CommonTree;
+}
+
+decl : ^(DECL type declarator)
+       // label.start, label.start, label.text
+       { NSLog(@"int \%@", $declarator.text);}
+     ;
+
+type : INTTYPE ;
+
+declarator
+     : ID
+     ;
diff --git a/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.h b/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.h
new file mode 100644
index 0000000..67e4836
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.h
@@ -0,0 +1,90 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g 2012-02-16 17:59:08
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* treeParserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__10 10
+#define DECL 4
+#define FLOATTYPE 5
+#define ID 6
+#define INT 7
+#define INTTYPE 8
+#define WS 9
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+/* returnScopeInterface LangDumpDecl_declarator_return */
+@interface LangDumpDecl_declarator_return : TreeRuleReturnScope { /* returnScopeInterface line 1838 */
+ /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+
+
+/* start of method declarations */
+
++ (LangDumpDecl_declarator_return *)newLangDumpDecl_declarator_return;
+/* this is start of set and get methods */
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+
+/* Interface grammar class */
+@interface LangDumpDecl  : TreeParser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+
+
+/* ObjC start of actions.(actionScope).memVars */
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* ObjC end of memVars */
+
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newLangDumpDecl:(id<TreeNodeStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* ObjC end of methodsDecl */
+
+- (void)decl; 
+- (void)type; 
+- (LangDumpDecl_declarator_return *)declarator; 
+
+
+@end /* end of LangDumpDecl interface */
+
diff --git a/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.m b/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.m
new file mode 100644
index 0000000..cd0eac5
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.m
@@ -0,0 +1,261 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g
+ *     -                            On : 2012-02-16 17:59:08
+ *     -           for the tree parser : LangDumpDeclTreeParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g 2012-02-16 17:59:08
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "LangDumpDecl.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_DECL_in_decl45;
+static const unsigned long long FOLLOW_DECL_in_decl45_data[] = { 0x0000000000000004LL};
+static ANTLRBitSet *FOLLOW_type_in_decl47;
+static const unsigned long long FOLLOW_type_in_decl47_data[] = { 0x0000000000000040LL};
+static ANTLRBitSet *FOLLOW_declarator_in_decl49;
+static const unsigned long long FOLLOW_declarator_in_decl49_data[] = { 0x0000000000000008LL};
+static ANTLRBitSet *FOLLOW_INTTYPE_in_type81;
+static const unsigned long long FOLLOW_INTTYPE_in_type81_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_ID_in_declarator95;
+static const unsigned long long FOLLOW_ID_in_declarator95_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+@implementation LangDumpDecl_declarator_return /* returnScopeImplementation */
+ /* start of synthesize -- OBJC-Line 1837 */
++ (LangDumpDecl_declarator_return *)newLangDumpDecl_declarator_return
+{
+return [[[LangDumpDecl_declarator_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+
+
+@end /* end of returnScope implementation */
+
+
+
+@implementation LangDumpDecl  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_DECL_in_decl45 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_DECL_in_decl45_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_decl47 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_decl47_data Count:(NSUInteger)1] retain];
+    FOLLOW_declarator_in_decl49 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_declarator_in_decl49_data Count:(NSUInteger)1] retain];
+    FOLLOW_INTTYPE_in_type81 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INTTYPE_in_type81_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_declarator95 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_declarator95_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"DECL", @"FLOATTYPE", @"ID", @"INT", @"INTTYPE", @"WS", @"';'", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g"];
+}
+
++ (LangDumpDecl *)newLangDumpDecl:(id<TreeNodeStream>)aStream
+{
+    return [[LangDumpDecl alloc] initWithStream:aStream];
+}
+
+- (id) initWithStream:(id<TreeNodeStream>)aStream
+{
+    self = [super initWithStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:3+1] retain]];
+    if ( self != nil ) {
+        /* start of actions-actionScope-init */
+        /* start of init */
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start decl
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g:8:1: decl : ^( DECL type declarator ) ;
+ */
+- (void) decl
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+        LangDumpDecl_declarator_return * declarator1 = nil ;
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g:8:6: ( ^( DECL type declarator ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g:8:8: ^( DECL type declarator ) // alt
+        {
+
+
+        [self match:input TokenType:DECL Follow:FOLLOW_DECL_in_decl45]; 
+         
+            [self match:input TokenType:DOWN Follow:nil]; 
+
+            /* ruleRef */
+            [self pushFollow:FOLLOW_type_in_decl47];
+            [self type];
+
+            [self popFollow];
+
+
+             
+            /* ruleRef */
+            [self pushFollow:FOLLOW_declarator_in_decl49];
+            declarator1 = [self declarator];
+
+            [self popFollow];
+
+
+             
+            [self match:input TokenType:UP Follow:nil]; 
+
+         
+
+         NSLog(@"int %@", (declarator1!=nil?[[input getTokenStream] toStringFromStart:[[input getTreeAdaptor] getTokenStartIndex:[declarator1 getStart]]ToEnd:[[input getTreeAdaptor] getTokenStopIndex:[declarator1 getStart]]]:0));
+
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end decl */
+
+/*
+ * $ANTLR start type
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g:13:1: type : INTTYPE ;
+ */
+- (void) type
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g:13:6: ( INTTYPE ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g:13:8: INTTYPE // alt
+        {
+
+        [self match:input TokenType:INTTYPE Follow:FOLLOW_INTTYPE_in_type81]; 
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return ;
+}
+/* $ANTLR end type */
+
+/*
+ * $ANTLR start declarator
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g:15:1: declarator : ID ;
+ */
+- (LangDumpDecl_declarator_return *) declarator
+{
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+    LangDumpDecl_declarator_return * retval = [LangDumpDecl_declarator_return newLangDumpDecl_declarator_return];
+    [retval setStart:[input LT:1]];
+
+
+    @try {
+        /* ruleLabelDefs entry */
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g:16:6: ( ID ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.g:16:8: ID // alt
+        {
+
+        [self match:input TokenType:ID Follow:FOLLOW_ID_in_declarator95]; 
+         
+        }
+
+        /* token+rule list labels */
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end declarator */
+/* ObjC end rules */
+
+@end /* end of LangDumpDecl implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.tokens b/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.tokens
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.tokens
rename to runtime/ObjC/Framework/examples/treeparser/LangDumpDecl.tokens
diff --git a/runtime/ObjC/Framework/examples/treeparser/LangLexer.h b/runtime/ObjC/Framework/examples/treeparser/LangLexer.h
new file mode 100644
index 0000000..9905e66
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treeparser/LangLexer.h
@@ -0,0 +1,46 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g 2012-02-16 17:58:54
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* Start cyclicDFAInterface */
+
+#pragma mark Rule return scopes Interface start
+#pragma mark Rule return scopes Interface end
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__10 10
+#define DECL 4
+#define FLOATTYPE 5
+#define ID 6
+#define INT 7
+#define INTTYPE 8
+#define WS 9
+/* interface lexer class */
+@interface LangLexer : Lexer { // line 283
+/* ObjC start of actions.lexer.memVars */
+/* ObjC end of actions.lexer.memVars */
+}
++ (void) initialize;
++ (LangLexer *)newLangLexerWithCharStream:(id<CharStream>)anInput;
+/* ObjC start actions.lexer.methodsDecl */
+/* ObjC end actions.lexer.methodsDecl */
+- (void) mT__10 ; 
+- (void) mINTTYPE ; 
+- (void) mFLOATTYPE ; 
+- (void) mID ; 
+- (void) mINT ; 
+- (void) mWS ; 
+- (void) mTokens ; 
+
+@end /* end of LangLexer interface */
+
diff --git a/runtime/ObjC/Framework/examples/treeparser/LangLexer.m b/runtime/ObjC/Framework/examples/treeparser/LangLexer.m
new file mode 100644
index 0000000..bba84cd
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treeparser/LangLexer.m
@@ -0,0 +1,556 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g
+ *     -                            On : 2012-02-16 17:58:54
+ *     -                 for the lexer : LangLexerLexer
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g 2012-02-16 17:58:54
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "LangLexer.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+/** As per Terence: No returns for lexer rules! */
+@implementation LangLexer // line 330
+
++ (void) initialize
+{
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g"];
+}
+
++ (NSString *) tokenNameForType:(NSInteger)aTokenType
+{
+    return [[self getTokenNames] objectAtIndex:aTokenType];
+}
+
++ (LangLexer *)newLangLexerWithCharStream:(id<CharStream>)anInput
+{
+    return [[LangLexer alloc] initWithCharStream:anInput];
+}
+
+- (id) initWithCharStream:(id<CharStream>)anInput
+{
+    self = [super initWithCharStream:anInput State:[RecognizerSharedState newRecognizerSharedStateWithRuleLen:7+1]];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC Start of actions.lexer.methods */
+/* ObjC end of actions.lexer.methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+
+/* Start of Rules */
+// $ANTLR start "T__10"
+- (void) mT__10
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = T__10;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:7:7: ( ';' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:7:9: ';' // alt
+        {
+
+
+        [self matchChar:';']; 
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "T__10" */
+// $ANTLR start "INTTYPE"
+- (void) mINTTYPE
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = INTTYPE;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:18:9: ( 'int' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:18:11: 'int' // alt
+        {
+
+
+        [self matchString:@"int"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "INTTYPE" */
+// $ANTLR start "FLOATTYPE"
+- (void) mFLOATTYPE
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = FLOATTYPE;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:19:11: ( 'float' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:19:13: 'float' // alt
+        {
+
+
+        [self matchString:@"float"]; 
+
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "FLOATTYPE" */
+// $ANTLR start "ID"
+- (void) mID
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = ID;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:20:4: ( ( 'a' .. 'z' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:20:6: ( 'a' .. 'z' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:20:6: ( 'a' .. 'z' )+ // positiveClosureBlock
+        NSInteger cnt1 = 0;
+        do {
+            NSInteger alt1 = 2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( ((LA1_0 >= 'a' && LA1_0 <= 'z')) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g: // alt
+                    {
+
+                    if ((([input LA:1] >= 'a') && ([input LA:1] <= 'z'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt1 >= 1 )
+                        goto loop1;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:1];
+                    @throw eee;
+            }
+            cnt1++;
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "ID" */
+// $ANTLR start "INT"
+- (void) mINT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = INT;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:21:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:21:7: ( '0' .. '9' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:21:7: ( '0' .. '9' )+ // positiveClosureBlock
+        NSInteger cnt2 = 0;
+        do {
+            NSInteger alt2 = 2;
+            NSInteger LA2_0 = [input LA:1];
+            if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
+                alt2=1;
+            }
+
+
+            switch (alt2) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt2 >= 1 )
+                        goto loop2;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:2];
+                    @throw eee;
+            }
+            cnt2++;
+        } while (YES);
+        loop2: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "INT" */
+// $ANTLR start "WS"
+- (void) mWS
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = WS;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:22:4: ( ( ' ' | '\\n' ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:22:6: ( ' ' | '\\n' ) // alt
+        {
+
+        if ([input LA:1] == '\n'||[input LA:1] == ' ') {
+            [input consume];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            [self recover:mse];
+            @throw mse;
+        }
+
+         
+
+        _channel=HIDDEN;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "WS" */
+- (void) mTokens
+{
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:1:8: ( T__10 | INTTYPE | FLOATTYPE | ID | INT | WS ) //ruleblock
+    NSInteger alt3=6;
+    unichar charLA3 = [input LA:1];
+    switch (charLA3) {
+        case ';': ;
+            {
+            alt3=1;
+            }
+            break;
+        case 'i': ;
+            {
+            NSInteger LA3_2 = [input LA:2];
+
+            if ( (LA3_2=='n') ) {
+                NSInteger LA3_7 = [input LA:3];
+
+                if ( (LA3_7=='t') ) {
+                    NSInteger LA3_9 = [input LA:4];
+
+                    if ( ((LA3_9 >= 'a' && LA3_9 <= 'z')) ) {
+                        alt3=4;
+                    }
+                    else {
+                        alt3 = 2;
+                    }
+                }
+                else {
+                    alt3 = 4;
+                }
+            }
+            else {
+                alt3 = 4;
+            }
+            }
+            break;
+        case 'f': ;
+            {
+            NSInteger LA3_3 = [input LA:2];
+
+            if ( (LA3_3=='l') ) {
+                NSInteger LA3_8 = [input LA:3];
+
+                if ( (LA3_8=='o') ) {
+                    NSInteger LA3_10 = [input LA:4];
+
+                    if ( (LA3_10=='a') ) {
+                        NSInteger LA3_12 = [input LA:5];
+
+                        if ( (LA3_12=='t') ) {
+                            NSInteger LA3_13 = [input LA:6];
+
+                            if ( ((LA3_13 >= 'a' && LA3_13 <= 'z')) ) {
+                                alt3=4;
+                            }
+                            else {
+                                alt3 = 3;
+                            }
+                        }
+                        else {
+                            alt3 = 4;
+                        }
+                    }
+                    else {
+                        alt3 = 4;
+                    }
+                }
+                else {
+                    alt3 = 4;
+                }
+            }
+            else {
+                alt3 = 4;
+            }
+            }
+            break;
+        case 'a': ;
+        case 'b': ;
+        case 'c': ;
+        case 'd': ;
+        case 'e': ;
+        case 'g': ;
+        case 'h': ;
+        case 'j': ;
+        case 'k': ;
+        case 'l': ;
+        case 'm': ;
+        case 'n': ;
+        case 'o': ;
+        case 'p': ;
+        case 'q': ;
+        case 'r': ;
+        case 's': ;
+        case 't': ;
+        case 'u': ;
+        case 'v': ;
+        case 'w': ;
+        case 'x': ;
+        case 'y': ;
+        case 'z': ;
+            {
+            alt3=4;
+            }
+            break;
+        case '0': ;
+        case '1': ;
+        case '2': ;
+        case '3': ;
+        case '4': ;
+        case '5': ;
+        case '6': ;
+        case '7': ;
+        case '8': ;
+        case '9': ;
+            {
+            alt3=5;
+            }
+            break;
+        case '\n': ;
+        case ' ': ;
+            {
+            alt3=6;
+            }
+            break;
+
+    default: ;
+        NoViableAltException *nvae = [NoViableAltException newException:3 state:0 stream:input];
+        nvae.c = charLA3;
+        @throw nvae;
+
+    }
+
+    switch (alt3) {
+        case 1 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:1:10: T__10 // alt
+            {
+
+
+            [self mT__10]; 
+
+
+             
+            }
+            break;
+        case 2 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:1:16: INTTYPE // alt
+            {
+
+
+            [self mINTTYPE]; 
+
+
+             
+            }
+            break;
+        case 3 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:1:24: FLOATTYPE // alt
+            {
+
+
+            [self mFLOATTYPE]; 
+
+
+             
+            }
+            break;
+        case 4 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:1:34: ID // alt
+            {
+
+
+            [self mID]; 
+
+
+             
+            }
+            break;
+        case 5 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:1:37: INT // alt
+            {
+
+
+            [self mINT]; 
+
+
+             
+            }
+            break;
+        case 6 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:1:41: WS // alt
+            {
+
+
+            [self mWS]; 
+
+
+             
+            }
+            break;
+
+    }
+
+}
+
+@end /* end of LangLexer implementation line 397 */
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/examples/treeparser/LangParser.h b/runtime/ObjC/Framework/examples/treeparser/LangParser.h
new file mode 100644
index 0000000..62efb21
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treeparser/LangParser.h
@@ -0,0 +1,157 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g 2012-02-16 17:58:54
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* parserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define T__10 10
+#define DECL 4
+#define FLOATTYPE 5
+#define ID 6
+#define INT 7
+#define INTTYPE 8
+#define WS 9
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+/* returnScopeInterface LangParser_start_return */
+@interface LangParser_start_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (LangParser_start_return *)newLangParser_start_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface LangParser_decl_return */
+@interface LangParser_decl_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (LangParser_decl_return *)newLangParser_decl_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface LangParser_type_return */
+@interface LangParser_type_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (LangParser_type_return *)newLangParser_type_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+
+/* Interface grammar class */
+@interface LangParser  : Parser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+
+
+/* ObjC start of actions.(actionScope).memVars */
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* AST parserHeaderFile.memVars */
+NSInteger ruleLevel;
+NSArray *ruleNames;
+  /* AST super.memVars */
+/* AST parserMemVars */
+id<TreeAdaptor> treeAdaptor;   /* AST parserMemVars */
+/* ObjC end of memVars */
+
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* AST parserHeaderFile.properties */
+  /* AST super.properties */
+/* AST parserProperties */
+@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id<TreeAdaptor> treeAdaptor;   /* AST parserproperties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newLangParser:(id<TokenStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* AST parserHeaderFile.methodsDecl */
+  /* AST super.methodsDecl */
+/* AST parserMethodsDecl */
+- (id<TreeAdaptor>) getTreeAdaptor;
+- (void) setTreeAdaptor:(id<TreeAdaptor>)theTreeAdaptor;   /* AST parsermethodsDecl */
+/* ObjC end of methodsDecl */
+
+- (LangParser_start_return *)start; 
+- (LangParser_decl_return *)decl; 
+- (LangParser_type_return *)type; 
+
+
+@end /* end of LangParser interface */
+
diff --git a/runtime/ObjC/Framework/examples/treeparser/LangParser.m b/runtime/ObjC/Framework/examples/treeparser/LangParser.m
new file mode 100644
index 0000000..e879b2b
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treeparser/LangParser.m
@@ -0,0 +1,503 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g
+ *     -                            On : 2012-02-16 17:58:54
+ *     -                for the parser : LangParserParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g 2012-02-16 17:58:54
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "LangParser.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_decl_in_start41;
+static const unsigned long long FOLLOW_decl_in_start41_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_type_in_decl50;
+static const unsigned long long FOLLOW_type_in_decl50_data[] = { 0x0000000000000040LL};
+static ANTLRBitSet *FOLLOW_ID_in_decl52;
+static const unsigned long long FOLLOW_ID_in_decl52_data[] = { 0x0000000000000400LL};
+static ANTLRBitSet *FOLLOW_10_in_decl54;
+static const unsigned long long FOLLOW_10_in_decl54_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+@implementation LangParser_start_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (LangParser_start_return *)newLangParser_start_return
+{
+return [[[LangParser_start_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation LangParser_decl_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (LangParser_decl_return *)newLangParser_decl_return
+{
+return [[[LangParser_decl_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation LangParser_type_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (LangParser_type_return *)newLangParser_type_return
+{
+return [[[LangParser_type_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+
+
+@implementation LangParser  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+/* AST genericParser.synthesize */
+/* AST parserProperties */
+@synthesize treeAdaptor;
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_decl_in_start41 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_decl_in_start41_data Count:(NSUInteger)1] retain];
+    FOLLOW_type_in_decl50 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_type_in_decl50_data Count:(NSUInteger)1] retain];
+    FOLLOW_ID_in_decl52 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_ID_in_decl52_data Count:(NSUInteger)1] retain];
+    FOLLOW_10_in_decl54 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_10_in_decl54_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"DECL", @"FLOATTYPE", @"ID", @"INT", @"INTTYPE", @"WS", @"';'", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g"];
+}
+
++ (LangParser *)newLangParser:(id<TokenStream>)aStream
+{
+    return [[LangParser alloc] initWithTokenStream:aStream];
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)aStream
+{
+    self = [super initWithTokenStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:3+1] retain]];
+    if ( self != nil ) {
+        /* start of actions-actionScope-init */
+        /* start of init */
+        /* AST genericParser.init */
+        [self setTreeAdaptor:[[CommonTreeAdaptor newTreeAdaptor] retain]];
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    /* AST genericParser.dealloc */
+    [self setTreeAdaptor:nil];
+
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* AST genericParser.methods */
+/* AST parserMethods */
+- (id<TreeAdaptor>) getTreeAdaptor
+{
+	return treeAdaptor;
+}
+
+- (void) setTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor
+{
+	if (aTreeAdaptor != treeAdaptor) {
+		treeAdaptor = aTreeAdaptor;
+	}
+}
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start start
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:10:1: start : decl ;
+ */
+- (LangParser_start_return *) start
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    LangParser_start_return * retval = [LangParser_start_return newLangParser_start_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        LangParser_decl_return * decl1 = nil ;
+
+
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:10:7: ( decl ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:10:9: decl // alt
+        {
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+        /* ASTParser ruleRef */
+        /* ruleRef */
+        [self pushFollow:FOLLOW_decl_in_start41];
+        decl1 = [self decl];
+
+        [self popFollow];
+
+
+        [treeAdaptor addChild:[decl1 getTree] toTree:root_0];
+         
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end start */
+
+/*
+ * $ANTLR start decl
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:12:1: decl : type ID ';' -> ^( DECL type ID ) ;
+ */
+- (LangParser_decl_return *) decl
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    LangParser_decl_return * retval = [LangParser_decl_return newLangParser_decl_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *ID3 = nil;
+        CommonToken *char_literal4 = nil;LangParser_type_return * type2 = nil ;
+
+
+        CommonTree *ID3_tree=nil;
+        CommonTree *char_literal4_tree=nil;
+        RewriteRuleTokenStream *stream_10 =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token 10"] retain];
+        RewriteRuleTokenStream *stream_ID =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token ID"] retain];
+        RewriteRuleSubtreeStream *stream_type =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule type"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:12:6: ( type ID ';' -> ^( DECL type ID ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:12:8: type ID ';' // alt
+        {
+
+        /* ruleRef */
+        [self pushFollow:FOLLOW_type_in_decl50];
+        type2 = [self type];
+
+        [self popFollow];
+
+
+        [stream_type addElement:[type2 getTree]];
+         
+        ID3=(CommonToken *)[self match:input TokenType:ID Follow:FOLLOW_ID_in_decl52];  
+            [stream_ID addElement:ID3];
+
+         
+        char_literal4=(CommonToken *)[self match:input TokenType:10 Follow:FOLLOW_10_in_decl54];  
+            [stream_10 addElement:char_literal4];
+
+         
+        // AST REWRITE
+        // elements: type, ID
+        // token labels: 
+        // rule labels: retval
+        // token list labels: 
+        // rule list labels: 
+        // wildcard labels: 
+        retval.tree = root_0;
+
+        RewriteRuleSubtreeStream *stream_retval =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+        // 12:20: -> ^( DECL type ID )
+        {
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:12:23: ^( DECL type ID )
+            {
+                CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                root_1 = (CommonTree *)[treeAdaptor becomeRoot:
+                        [[treeAdaptor createTree:DECL Text:@"DECL"] retain]
+                 old:root_1];
+
+                [treeAdaptor addChild:[stream_type nextTree] toTree:root_1];
+
+                 // TODO: args: 
+                [treeAdaptor addChild:
+                            [stream_ID nextNode]
+                 toTree:root_1];
+
+                [treeAdaptor addChild:root_1 toTree:root_0];
+            }
+
+        }
+
+
+        retval.tree = root_0;
+
+
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+        [stream_10 release];
+        [stream_ID release];
+        [stream_type release];
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end decl */
+
+/*
+ * $ANTLR start type
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:14:1: type : ( INTTYPE | FLOATTYPE );
+ */
+- (LangParser_type_return *) type
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    LangParser_type_return * retval = [LangParser_type_return newLangParser_type_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *set5 = nil;
+
+        CommonTree *set5_tree=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g:14:6: ( INTTYPE | FLOATTYPE ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/Lang.g: // alt
+        {
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+        /* ASTParser matchRuleBlockSet */
+        /* ASTParser matchSet */
+        set5 = (CommonToken *)[input LT:1]; /* matchSet */
+
+        if ([input LA:1] == FLOATTYPE||[input LA:1] == INTTYPE) {
+            [input consume];
+            [treeAdaptor addChild:/* ASTParser createNodeFromToken */
+            (CommonTree *)[[treeAdaptor create:set5] retain]
+             toTree:root_0 ];
+            [state setIsErrorRecovery:NO];
+        } else {
+            MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+            @throw mse;
+        }
+
+         
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end type */
+/* ObjC end rules */
+
+@end /* end of LangParser implementation line 692 */
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/Main.java b/runtime/ObjC/Framework/examples/treeparser/Main.java
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/treeparser/Main.java
rename to runtime/ObjC/Framework/examples/treeparser/Main.java
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/README.txt b/runtime/ObjC/Framework/examples/treeparser/README.txt
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/treeparser/README.txt
rename to runtime/ObjC/Framework/examples/treeparser/README.txt
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/files b/runtime/ObjC/Framework/examples/treeparser/files
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/treeparser/files
rename to runtime/ObjC/Framework/examples/treeparser/files
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/input b/runtime/ObjC/Framework/examples/treeparser/input
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/treeparser/input
rename to runtime/ObjC/Framework/examples/treeparser/input
diff --git a/runtime/ObjC/Framework/examples/treeparser/main.m b/runtime/ObjC/Framework/examples/treeparser/main.m
new file mode 100644
index 0000000..610c76a
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treeparser/main.m
@@ -0,0 +1,55 @@
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+#import "LangLexer.h"
+#import "LangParser.h"
+#import "LangDumpDecl.h"
+#import "stdio.h"
+#include <unistd.h>
+
+/*
+import org.antlr.runtime.*;
+import org.antlr.runtime.tree.*;
+
+public class Main {
+	public static void main(String[] args) throws Exception {
+		CharStream input = new ANTLRFileStream(args[0]);
+		LangLexer lex = new LangLexer(input);
+		CommonTokenStream tokens = new CommonTokenStream(lex);
+		LangParser parser = new LangParser(tokens);
+		//LangParser.decl_return r = parser.decl();
+		LangParser.start_return r = parser.start();
+		System.out.println("tree: "+((Tree)r.tree).toStringTree());
+		CommonTree r0 = ((CommonTree)r.tree);
+        
+		CommonTreeNodeStream nodes = new CommonTreeNodeStream(r0);
+		nodes.setTokenStream(tokens);
+		LangDumpDecl walker = new LangDumpDecl(nodes);
+		walker.decl();
+	}
+}
+*/
+
+int main(int argc, const char * argv[])
+{
+    NSError *error;
+    NSLog(@"starting treeparser\n");
+    NSString *dir = @"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treeparser/input";
+	NSString *string = [NSString stringWithContentsOfFile:dir  encoding:NSASCIIStringEncoding error:&error];
+	NSLog(@"input = %@", string);
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:string];
+	LangLexer *lex = [LangLexer newLangLexerWithCharStream:stream];
+    CommonTokenStream *tokens = [CommonTokenStream newCommonTokenStreamWithTokenSource:lex];
+    LangParser *parser = [LangParser newLangParser:tokens];
+//    LangParser_decl_return *r = [parser decl];
+    LangParser_start_return *r = [parser start];
+    NSLog( @"tree: %@", [r.tree toStringTree]);
+    CommonTree *r0 = [r getTree];
+    
+    CommonTreeNodeStream *nodes = [CommonTreeNodeStream newCommonTreeNodeStream:r0];
+    [nodes setTokenStream:tokens];
+    LangDumpDecl *walker = [LangDumpDecl newLangDumpDecl:nodes];
+    [walker decl];
+
+    NSLog(@"exiting treeparser\n");
+	return 0;
+}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treeparser/output b/runtime/ObjC/Framework/examples/treeparser/output
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/treeparser/output
rename to runtime/ObjC/Framework/examples/treeparser/output
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g b/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g
rename to runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g
diff --git a/antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.tokens b/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.tokens
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.tokens
rename to runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.tokens
diff --git a/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.h b/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.h
new file mode 100644
index 0000000..8595371
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.h
@@ -0,0 +1,37 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g 2012-02-16 17:42:35
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* Start cyclicDFAInterface */
+
+#pragma mark Rule return scopes Interface start
+#pragma mark Rule return scopes Interface end
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define INT 4
+#define WS 5
+/* interface lexer class */
+@interface TreeRewriteLexer : Lexer { // line 283
+/* ObjC start of actions.lexer.memVars */
+/* ObjC end of actions.lexer.memVars */
+}
++ (void) initialize;
++ (TreeRewriteLexer *)newTreeRewriteLexerWithCharStream:(id<CharStream>)anInput;
+/* ObjC start actions.lexer.methodsDecl */
+/* ObjC end actions.lexer.methodsDecl */
+- (void) mINT ; 
+- (void) mWS ; 
+- (void) mTokens ; 
+
+@end /* end of TreeRewriteLexer interface */
+
diff --git a/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.m b/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.m
new file mode 100644
index 0000000..136f01f
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteLexer.m
@@ -0,0 +1,219 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g
+ *     -                            On : 2012-02-16 17:42:35
+ *     -                 for the lexer : TreeRewriteLexerLexer
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g 2012-02-16 17:42:35
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "TreeRewriteLexer.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+/** As per Terence: No returns for lexer rules! */
+@implementation TreeRewriteLexer // line 330
+
++ (void) initialize
+{
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g"];
+}
+
++ (NSString *) tokenNameForType:(NSInteger)aTokenType
+{
+    return [[self getTokenNames] objectAtIndex:aTokenType];
+}
+
++ (TreeRewriteLexer *)newTreeRewriteLexerWithCharStream:(id<CharStream>)anInput
+{
+    return [[TreeRewriteLexer alloc] initWithCharStream:anInput];
+}
+
+- (id) initWithCharStream:(id<CharStream>)anInput
+{
+    self = [super initWithCharStream:anInput State:[RecognizerSharedState newRecognizerSharedStateWithRuleLen:3+1]];
+    if ( self != nil ) {
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    [super dealloc];
+}
+
+/* ObjC Start of actions.lexer.methods */
+/* ObjC end of actions.lexer.methods */
+/* ObjC start methods() */
+/* ObjC end methods() */
+
+/* Start of Rules */
+// $ANTLR start "INT"
+- (void) mINT
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = INT;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:15:5: ( ( '0' .. '9' )+ ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:15:7: ( '0' .. '9' )+ // alt
+        {
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:15:7: ( '0' .. '9' )+ // positiveClosureBlock
+        NSInteger cnt1 = 0;
+        do {
+            NSInteger alt1 = 2;
+            NSInteger LA1_0 = [input LA:1];
+            if ( ((LA1_0 >= '0' && LA1_0 <= '9')) ) {
+                alt1=1;
+            }
+
+
+            switch (alt1) {
+                case 1 : ;
+                    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g: // alt
+                    {
+
+                    if ((([input LA:1] >= '0') && ([input LA:1] <= '9'))) {
+                        [input consume];
+                    } else {
+                        MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+                        [self recover:mse];
+                        @throw mse;
+                    }
+
+                     
+                    }
+                    break;
+
+                default :
+                    if ( cnt1 >= 1 )
+                        goto loop1;
+                    EarlyExitException *eee =
+                        [EarlyExitException newException:input decisionNumber:1];
+                    @throw eee;
+            }
+            cnt1++;
+        } while (YES);
+        loop1: ;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "INT" */
+// $ANTLR start "WS"
+- (void) mWS
+{
+    //
+    /* ruleScopeSetUp */
+
+    /* ruleDeclarations */
+
+    @try {
+        NSInteger _type = WS;
+        NSInteger _channel = TokenChannelDefault;
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:18:5: ( ' ' ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:18:9: ' ' // alt
+        {
+
+
+        [self matchChar:' ']; 
+
+         
+
+        _channel=HIDDEN;
+
+         
+        }
+
+        /* token+rule list labels */
+
+        state.type = _type;
+        state.channel = _channel;
+    }
+    @finally {
+        //
+        /* ruleScopeCleanUp */
+
+    }
+    return;
+}
+/* $ANTLR end "WS" */
+- (void) mTokens
+{
+    // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:1:8: ( INT | WS ) //ruleblock
+    NSInteger alt2=2;
+    NSInteger LA2_0 = [input LA:1];
+
+    if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
+        alt2=1;
+    }
+    else if ( (LA2_0==' ') ) {
+        alt2=2;
+    }
+    else {
+        NoViableAltException *nvae = [NoViableAltException newException:2 state:0 stream:input];
+        nvae.c = LA2_0;
+        @throw nvae;
+
+    }
+    switch (alt2) {
+        case 1 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:1:10: INT // alt
+            {
+
+
+            [self mINT]; 
+
+
+             
+            }
+            break;
+        case 2 : ;
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:1:14: WS // alt
+            {
+
+
+            [self mWS]; 
+
+
+             
+            }
+            break;
+
+    }
+
+}
+
+@end /* end of TreeRewriteLexer implementation line 397 */
\ No newline at end of file
diff --git a/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.h b/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.h
new file mode 100644
index 0000000..3139f47
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.h
@@ -0,0 +1,128 @@
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g 2012-02-16 17:42:35
+
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+
+/* parserHeaderFile */
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+#pragma mark Tokens
+#ifdef EOF
+#undef EOF
+#endif
+#define EOF -1
+#define INT 4
+#define WS 5
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+#pragma mark Rule Return Scopes returnScopeInterface
+/* returnScopeInterface TreeRewriteParser_rule_return */
+@interface TreeRewriteParser_rule_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (TreeRewriteParser_rule_return *)newTreeRewriteParser_rule_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+/* returnScopeInterface TreeRewriteParser_subrule_return */
+@interface TreeRewriteParser_subrule_return : ParserRuleReturnScope { /* returnScopeInterface line 1838 */
+/* AST returnScopeInterface.memVars */
+CommonTree *tree; /* ObjC start of memVars() */
+
+}
+/* start property declarations */
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) CommonTree *tree;
+
+/* start of method declarations */
+
++ (TreeRewriteParser_subrule_return *)newTreeRewriteParser_subrule_return;
+/* this is start of set and get methods */
+/* AST returnScopeInterface.methodsDecl */
+- (CommonTree *)getTree;
+
+- (void) setTree:(CommonTree *)aTree;
+  /* methodsDecl */
+
+@end /* end of returnScopeInterface interface */
+
+
+
+/* Interface grammar class */
+@interface TreeRewriteParser  : Parser { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+
+
+/* ObjC start of actions.(actionScope).memVars */
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+/* AST parserHeaderFile.memVars */
+NSInteger ruleLevel;
+NSArray *ruleNames;
+  /* AST super.memVars */
+/* AST parserMemVars */
+id<TreeAdaptor> treeAdaptor;   /* AST parserMemVars */
+/* ObjC end of memVars */
+
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+/* AST parserHeaderFile.properties */
+  /* AST super.properties */
+/* AST parserProperties */
+@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id<TreeAdaptor> treeAdaptor;   /* AST parserproperties */
+/* ObjC end of properties */
+
++ (void) initialize;
++ (id) newTreeRewriteParser:(id<TokenStream>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+/* AST parserHeaderFile.methodsDecl */
+  /* AST super.methodsDecl */
+/* AST parserMethodsDecl */
+- (id<TreeAdaptor>) getTreeAdaptor;
+- (void) setTreeAdaptor:(id<TreeAdaptor>)theTreeAdaptor;   /* AST parsermethodsDecl */
+/* ObjC end of methodsDecl */
+
+- (TreeRewriteParser_rule_return *)rule; 
+- (TreeRewriteParser_subrule_return *)subrule; 
+
+
+@end /* end of TreeRewriteParser interface */
+
diff --git a/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.m b/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.m
new file mode 100644
index 0000000..b4d635d
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treerewrite/TreeRewriteParser.m
@@ -0,0 +1,371 @@
+/** \file
+ *  This OBJC source file was generated by $ANTLR version 3.4
+ *
+ *     -  From the grammar source file : /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g
+ *     -                            On : 2012-02-16 17:42:35
+ *     -                for the parser : TreeRewriteParserParser
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+*/
+// $ANTLR 3.4 /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g 2012-02-16 17:42:35
+
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "TreeRewriteParser.h"
+/* ----------------------------------------- */
+
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+
+#pragma mark Bitsets
+static ANTLRBitSet *FOLLOW_INT_in_rule26;
+static const unsigned long long FOLLOW_INT_in_rule26_data[] = { 0x0000000000000010LL};
+static ANTLRBitSet *FOLLOW_subrule_in_rule28;
+static const unsigned long long FOLLOW_subrule_in_rule28_data[] = { 0x0000000000000002LL};
+static ANTLRBitSet *FOLLOW_INT_in_subrule53;
+static const unsigned long long FOLLOW_INT_in_subrule53_data[] = { 0x0000000000000002LL};
+
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+@implementation TreeRewriteParser_rule_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (TreeRewriteParser_rule_return *)newTreeRewriteParser_rule_return
+{
+return [[[TreeRewriteParser_rule_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+@implementation TreeRewriteParser_subrule_return /* returnScopeImplementation */
+/* AST returnScope.synthesize */
+@synthesize tree; /* start of synthesize -- OBJC-Line 1837 */
++ (TreeRewriteParser_subrule_return *)newTreeRewriteParser_subrule_return
+{
+return [[[TreeRewriteParser_subrule_return alloc] init] retain];
+}
+
+- (id) init
+{
+self = [super init];
+return self;
+}
+
+/* AST returnScope.methods */
+- (CommonTree *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(CommonTree *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+
+@end /* end of returnScope implementation */
+
+
+
+@implementation TreeRewriteParser  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+/* ObjC start synthesize() */
+/* AST genericParser.synthesize */
+/* AST parserProperties */
+@synthesize treeAdaptor;
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    FOLLOW_INT_in_rule26 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_rule26_data Count:(NSUInteger)1] retain];
+    FOLLOW_subrule_in_rule28 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_subrule_in_rule28_data Count:(NSUInteger)1] retain];
+    FOLLOW_INT_in_subrule53 = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)FOLLOW_INT_in_subrule53_data Count:(NSUInteger)1] retain];
+
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"<invalid>", @"<EOR>", @"<DOWN>", @"<UP>", 
+ @"INT", @"WS", nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"/Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g"];
+}
+
++ (TreeRewriteParser *)newTreeRewriteParser:(id<TokenStream>)aStream
+{
+    return [[TreeRewriteParser alloc] initWithTokenStream:aStream];
+}
+
+- (id) initWithTokenStream:(id<TokenStream>)aStream
+{
+    self = [super initWithTokenStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:2+1] retain]];
+    if ( self != nil ) {
+        /* start of actions-actionScope-init */
+        /* start of init */
+        /* AST genericParser.init */
+        [self setTreeAdaptor:[[CommonTreeAdaptor newTreeAdaptor] retain]];
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    /* AST genericParser.dealloc */
+    [self setTreeAdaptor:nil];
+
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+/* AST genericParser.methods */
+/* AST parserMethods */
+- (id<TreeAdaptor>) getTreeAdaptor
+{
+	return treeAdaptor;
+}
+
+- (void) setTreeAdaptor:(id<TreeAdaptor>)aTreeAdaptor
+{
+	if (aTreeAdaptor != treeAdaptor) {
+		treeAdaptor = aTreeAdaptor;
+	}
+}
+/* ObjC end methods() */
+/* ObjC start rules */
+/*
+ * $ANTLR start rule
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:8:1: rule : INT subrule -> ^( subrule INT ) ;
+ */
+- (TreeRewriteParser_rule_return *) rule
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    TreeRewriteParser_rule_return * retval = [TreeRewriteParser_rule_return newTreeRewriteParser_rule_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *INT1 = nil;TreeRewriteParser_subrule_return * subrule2 = nil ;
+
+
+        CommonTree *INT1_tree=nil;
+        RewriteRuleTokenStream *stream_INT =
+            [[RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"token INT"] retain];
+        RewriteRuleSubtreeStream *stream_subrule =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                                description:@"rule subrule"] retain];
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:8:5: ( INT subrule -> ^( subrule INT ) ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:8:7: INT subrule // alt
+        {
+
+        INT1=(CommonToken *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_rule26];  
+            [stream_INT addElement:INT1];
+
+         
+        /* ruleRef */
+        [self pushFollow:FOLLOW_subrule_in_rule28];
+        subrule2 = [self subrule];
+
+        [self popFollow];
+
+
+        [stream_subrule addElement:[subrule2 getTree]];
+         
+        // AST REWRITE
+        // elements: subrule, INT
+        // token labels: 
+        // rule labels: retval
+        // token list labels: 
+        // rule list labels: 
+        // wildcard labels: 
+        retval.tree = root_0;
+
+        RewriteRuleSubtreeStream *stream_retval =
+            [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                description:@"token retval" element:retval!=nil?[retval getTree]:nil] retain];
+
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+        // 8:19: -> ^( subrule INT )
+        {
+            // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:8:22: ^( subrule INT )
+            {
+                CommonTree *root_1 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+                root_1 = (CommonTree *)[treeAdaptor becomeRoot:(id<Tree>)[stream_subrule nextNode] old:root_1];
+
+                 // TODO: args: 
+                [treeAdaptor addChild:
+                            [stream_INT nextNode]
+                 toTree:root_1];
+
+                [treeAdaptor addChild:root_1 toTree:root_0];
+            }
+
+        }
+
+
+        retval.tree = root_0;
+
+
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+        [stream_INT release];
+        [stream_subrule release];
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end rule */
+
+/*
+ * $ANTLR start subrule
+ * /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:11:1: subrule : INT ;
+ */
+- (TreeRewriteParser_subrule_return *) subrule
+{
+    /* ruleScopeSetUp */
+
+    /* AST ruleDeclarations */
+    /* ruleDeclarations */
+    TreeRewriteParser_subrule_return * retval = [TreeRewriteParser_subrule_return newTreeRewriteParser_subrule_return];
+    [retval setStart:[input LT:1]];
+
+
+    CommonTree *root_0 = nil;
+
+    @try {
+        /* AST ruleLabelDefs */
+        /* ruleLabelDefs entry */
+        CommonToken *INT3 = nil;
+
+        CommonTree *INT3_tree=nil;
+
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:12:5: ( INT ) // ruleBlockSingleAlt
+        // /Users/acondit/source/antlr/code/antlr3/runtime/ObjC/Framework/examples/treerewrite/TreeRewrite.g:12:9: INT // alt
+        {
+        root_0 = (CommonTree *)[[[treeAdaptor class] newEmptyTree] retain];
+
+
+
+        /* ASTParser tokenRef */
+        INT3=(CommonToken *)[self match:input TokenType:INT Follow:FOLLOW_INT_in_subrule53]; 
+        INT3_tree = /* ASTParser createNodeFromToken */
+        (CommonTree *)[[treeAdaptor create:INT3] retain]
+        ;
+        [treeAdaptor addChild:INT3_tree  toTree:root_0];
+
+         
+        }
+
+        /* ASTParser ruleCleanUp */
+        /* AST ruleCleanUp */
+        /* token+rule list labels */
+        [retval setStop:[input LT:-1]];
+
+
+
+            retval.tree = (CommonTree *)[treeAdaptor rulePostProcessing:root_0];
+            [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+
+    }
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        /* ASTParser rule.setErrorReturnValue */
+        retval.tree = (CommonTree *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+
+    }
+
+    @finally {
+        /* ruleScopeCleanUp */
+
+    }
+    return retval;
+}
+/* $ANTLR end subrule */
+/* ObjC end rules */
+
+@end /* end of TreeRewriteParser implementation line 692 */
diff --git a/runtime/ObjC/Framework/examples/treerewrite/main.m b/runtime/ObjC/Framework/examples/treerewrite/main.m
new file mode 100644
index 0000000..6b0062b
--- /dev/null
+++ b/runtime/ObjC/Framework/examples/treerewrite/main.m
@@ -0,0 +1,38 @@
+#import <Foundation/Foundation.h>
+#import <ANTLR/ANTLR.h>
+#import "TreeRewriteLexer.h"
+#import "TreeRewriteParser.h"
+//#import "stdio.h"
+//#include <unistd.h>
+
+int main() {
+	NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"23 42"];
+	TreeRewriteLexer *lexer = [TreeRewriteLexer newTreeRewriteLexerWithCharStream:stream];
+	
+//    id<Token> currentToken;
+//    while ((currentToken = [lexer nextToken]) && [currentToken type] != TokenTypeEOF) {
+//        NSLog(@"%@", currentToken);
+//    }
+	
+	CommonTokenStream *tokenStream = [CommonTokenStream newCommonTokenStreamWithTokenSource:lexer];
+	TreeRewriteParser *parser = [[TreeRewriteParser alloc] initWithTokenStream:tokenStream];
+	CommonTree *rule_tree = [[parser rule] getTree];
+	NSLog(@"tree: %@", [rule_tree treeDescription]);
+//	CommonTreeNodeStream *treeStream = [[CommonTreeNodeStream alloc] initWithTree:program_tree];
+//	SimpleCTP *walker = [[SimpleCTP alloc] initWithTreeNodeStream:treeStream];
+//	[walker program];
+
+	[lexer release];
+	[stream release];
+	[tokenStream release];
+	[parser release];
+//	[treeStream release];
+//	[walker release];
+
+	[pool release];
+    // sleep for objectalloc
+    // while(1) sleep(60);
+	return 0;
+}
\ No newline at end of file
diff --git a/antlr-3.4/runtime/ObjC/Framework/filelist.java b/runtime/ObjC/Framework/filelist.java
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/filelist.java
rename to runtime/ObjC/Framework/filelist.java
diff --git a/antlr-3.4/runtime/ObjC/Framework/filelist.objc b/runtime/ObjC/Framework/filelist.objc
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/filelist.objc
rename to runtime/ObjC/Framework/filelist.objc
diff --git a/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.h b/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.h
new file mode 100755
index 0000000..5764f59
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.h
@@ -0,0 +1,51 @@
+// [The "BSD licence"]
+// Copyright (c) 2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import <SenTestingKit/SenTestingKit.h>
+#import "CommonTreeAdaptor.h"
+#import "RewriteRuleTokenStream.h"
+#import "CommonToken.h"
+
+@interface TestRewriteRuleTokenStream : SenTestCase {
+    CommonTreeAdaptor *treeAdaptor;
+    RewriteRuleTokenStream *stream;
+    
+    CommonToken *token1;
+    CommonToken *token2;
+    CommonToken *token3;
+    CommonToken *token4;
+}
+
+- (void) setUp;
+- (void) tearDown;
+//- (void) test01EmptyRewriteStream;
+- (void) test02RewriteStreamCount;
+- (void) test03SingleElement;
+- (void) test04SingleElementDup;
+- (void) test05MultipleElements;
+- (void) test06MultipleElementsAfterReset;
+
+@end
diff --git a/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.m b/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.m
new file mode 100755
index 0000000..7cd1fac
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/TestRewriteRuleTokenStream/TestRewriteRuleTokenStream.m
@@ -0,0 +1,201 @@
+// [The "BSD licence"]
+// Copyright (c) 2007 Kay Roepke
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#import "TestRewriteRuleTokenStream.h"
+#import "RewriteRuleTokenStream.h"
+#import "CommonTreeAdaptor.h"
+#import "CommonToken.h"
+
+@implementation TestRewriteRuleTokenStream
+
+- (void) setUp
+{
+    treeAdaptor = [CommonTreeAdaptor newTreeAdaptor];
+    stream = [RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                          description:@"rewrite rule token stream"];
+    token1 = [CommonToken newToken:5];
+    token2 = [CommonToken newToken:6];
+    token3 = [CommonToken newToken:7];
+    token4 = [CommonToken newToken:8];
+    [token1 setText:@"token 1"];
+    [token2 setText:@"token 2"];
+    [token3 setText:@"token 3"];
+    [token4 setText:@"token 4"];
+}
+
+- (void) tearDown
+{
+    [token1 release]; token1 = nil;
+    [token2 release]; token2 = nil;
+    [token3 release]; token3 = nil;
+    [token4 release]; token4 = nil;
+    
+    [treeAdaptor release]; treeAdaptor = nil;
+    [stream release]; stream = nil;
+}
+
+- (void) test01EmptyRewriteStream
+{
+    treeAdaptor = [CommonTreeAdaptor newTreeAdaptor];
+    stream = [RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"rewrite rule token stream"];
+    STAssertFalse([stream hasNext], @"-(BOOL)hasNext should be NO, but isn't");
+    STAssertThrows([stream nextToken], @"-next on empty stream should throw exception, but doesn't");
+}
+
+- (void) test02RewriteStreamCount
+{
+    treeAdaptor = [CommonTreeAdaptor newTreeAdaptor];
+    stream = [RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"rewrite rule token stream"];
+    token1 = [CommonToken newToken:5];
+    token2 = [CommonToken newToken:6];
+    [token1 setText:@"token 1"];
+    [token2 setText:@"token 2"];
+    STAssertTrue([stream size] == 0,
+                 @"empty stream should have count==0");
+    [stream addElement:token1];
+    STAssertTrue([stream size] == 1,
+                 @"single element stream should have count==1");
+    [stream addElement:token2];
+    STAssertTrue([stream size] == 2,
+                 @"multiple stream should have count==2");
+
+}
+
+- (void) test03SingleElement
+{
+    treeAdaptor = [CommonTreeAdaptor newTreeAdaptor];
+    stream = [RewriteRuleTokenStream newRewriteRuleTokenStream:treeAdaptor
+                                                             description:@"rewrite rule token stream"];
+    token1 = [CommonToken newToken:5];
+    token2 = [CommonToken newToken:6];
+    token3 = [CommonToken newToken:7];
+    token4 = [CommonToken newToken:8];
+    [token1 setText:@"token 1"];
+    [token2 setText:@"token 2"];
+    [token3 setText:@"token 3"];
+    [token4 setText:@"token 4"];
+    [stream addElement:token1];
+    STAssertTrue([stream hasNext], @"-hasNext should be YES, but isn't");
+    CommonTree *tree = [stream nextNode];
+    STAssertEqualObjects([tree getToken], token1, @"return token from stream should be token1, but isn't");
+}
+
+- (void) test04SingleElementDup
+{
+    [stream addElement:token1];
+    CommonTree *tree1, *tree2;
+    STAssertNoThrow(tree1 = [stream nextNode],
+                    @"stream iteration should not throw exception"
+                    );
+    STAssertNoThrow(tree2 = [stream nextNode],
+                    @"stream iteration past element count (single element) should not throw exception"
+                    );
+    STAssertEqualObjects([tree1 getToken], [tree2 getToken],
+                         @"tokens should be the same");
+    STAssertFalse(tree1 == tree2, 
+                         @"trees should be different, but aren't");
+}
+
+- (void) test05MultipleElements
+{
+    [stream addElement:token1];
+    [stream addElement:token2];
+    [stream addElement:token3];
+    CommonTree *tree1, *tree2, *tree3, *tree4;
+    STAssertNoThrow(tree1 = [stream nextNode],
+                    @"stream iteration should not throw exception"
+                    );
+    STAssertEqualObjects([tree1 getToken], token1,
+                         @"[tree1 token] should be equal to token1"
+                         );
+    STAssertNoThrow(tree2 = [stream nextNode],
+                    @"stream iteration should not throw exception"
+                    );
+    STAssertEqualObjects([tree2 getToken], token2,
+                         @"[tree2 token] should be equal to token2"
+                         );
+    STAssertNoThrow(tree3 = [stream nextNode],
+                    @"stream iteration should not throw exception"
+                    );
+    STAssertEqualObjects([tree3 getToken], token3,
+                         @"[tree3 token] should be equal to token3"
+                         );
+    STAssertThrows(tree4 = [stream nextNode],
+                    @"iterating beyond end of stream should throw an exception"
+                    );
+}
+
+- (void) test06MultipleElementsAfterReset
+{
+    [stream addElement:token1];
+    [stream addElement:token2];
+    [stream addElement:token3];
+    CommonTree *tree1, *tree2, *tree3;
+    
+    // consume the stream completely
+    STAssertNoThrow(tree1 = [stream nextNode],
+                    @"stream iteration should not throw exception"
+                    );
+    STAssertEqualObjects([tree1 getToken], token1,
+                         @"[tree1 token] should be equal to token1"
+                         );
+    STAssertNoThrow(tree2 = [stream nextNode],
+                    @"stream iteration should not throw exception"
+                    );
+    STAssertEqualObjects([tree2 getToken], token2,
+                         @"[tree2 token] should be equal to token2"
+                         );
+    STAssertNoThrow(tree3 = [stream nextNode],
+                    @"stream iteration should not throw exception"
+                    );
+    
+    [stream reset]; // after resetting the stream it should dup
+    
+    CommonTree *tree1Dup, *tree2Dup, *tree3Dup;
+
+    STAssertNoThrow(tree1Dup = [stream nextNode],
+                    @"stream iteration should not throw exception"
+                    );
+    STAssertTrue(tree1 != tree1Dup,
+                 @"[tree1 token] should be equal to token1"
+                 );
+    STAssertNoThrow(tree2Dup = [stream nextNode],
+                    @"stream iteration should not throw exception"
+                    );
+    STAssertTrue(tree2 != tree2Dup,
+                 @"[tree2 token] should be equal to token2"
+                 );
+    STAssertNoThrow(tree3Dup = [stream nextNode],
+                    @"stream iteration should not throw exception"
+                    );
+    STAssertTrue(tree3 != tree3Dup,
+                 @"[tree3 token] should be equal to token3"
+                 );
+}
+
+@end
diff --git a/runtime/ObjC/Framework/test/runtime/misc/FastQueueTest.h b/runtime/ObjC/Framework/test/runtime/misc/FastQueueTest.h
new file mode 100644
index 0000000..3000a59
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/misc/FastQueueTest.h
@@ -0,0 +1,24 @@
+//
+//  ANTLRFastQueueTest.h
+//  ANTLR
+//
+//  Created by Ian Michell on 13/05/2010.
+//  Copyright 2010 Ian Michell. All rights reserved.
+//
+
+#import <SenTestingKit/SenTestingKit.h>
+
+
+@interface FastQueueTest : SenTestCase {
+
+}
+
+-(void) testInit;
+-(void) testAddAndGet;
+-(void) testInvalidElementIndex;
+-(void) testHead;
+-(void) testClear;
+-(void) testDescription;
+-(void) testRemove;
+
+@end
diff --git a/runtime/ObjC/Framework/test/runtime/misc/FastQueueTest.m b/runtime/ObjC/Framework/test/runtime/misc/FastQueueTest.m
new file mode 100644
index 0000000..c37898a
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/misc/FastQueueTest.m
@@ -0,0 +1,103 @@
+//
+//  FastQueueTest.m
+//  ANTLR
+//
+//  Created by Ian Michell on 13/05/2010.
+//  Copyright 2010 Ian Michell and Alan Condit. All rights reserved.
+//
+
+#import "FastQueueTest.h"
+#import "FastQueue.h"
+#import "ANTLRError.h"
+#import "RuntimeException.h"
+
+@implementation FastQueueTest
+
+-(void) testInit
+{
+	FastQueue *queue = [[FastQueue newFastQueue] retain];
+	STAssertNotNil(queue, @"Queue was not created and was nil");
+	[queue release];
+}
+
+-(void) testAddAndGet
+{
+	FastQueue *queue = [[FastQueue newFastQueue] retain];
+	STAssertNotNil(queue, @"Queue was not created and was nil");
+	[queue addObject:@"My String"];
+	STAssertTrue([[queue objectAtIndex:0] isKindOfClass:[NSString class]], @"First object is not a NSString");
+	STAssertEquals([queue objectAtIndex:0], @"My String", @"Object at index zero is invalid");
+	STAssertTrue([queue size] == 1, @"Queue is the wrong size: %d", [queue size]);
+	[queue release];
+}
+
+-(void) testInvalidElementIndex
+{
+    //RuntimeException *NoSuchElementException = [NoSuchElementException newException:@"No such element exception"];
+    id retVal;
+	FastQueue *queue = [[FastQueue newFastQueue] retain];
+	STAssertNotNil(queue, @"Queue was not created and was nil");
+	@try 
+	{
+		retVal = [queue objectAtIndex:100];
+	}
+	@catch (NoSuchElementException *e) 
+	{
+		STAssertTrue([[e name] isEqualTo:@"NoSuchElementException"], @"Exception was not type: NoSuchElementException -- %@", [e name]);
+		return;
+	}
+	STFail(@"Exception NoSuchElementException was not thrown -- %@", [retVal name]);
+    [queue release];
+}
+
+-(void) testHead
+{
+	FastQueue *queue = [[FastQueue newFastQueue] retain];
+	STAssertNotNil(queue, @"Queue was not created and was nil");
+	[queue addObject:@"Item 1"];
+	[queue addObject:@"Item 2"];
+	[queue addObject:@"Item 3"];
+	id head = [queue head];
+	STAssertNotNil(head, @"Object returned from head is nil");
+	STAssertEquals(head, @"Item 1", @"Object returned was not first item in");
+	[queue release];
+}
+
+-(void) testClear
+{
+	FastQueue *queue = [[FastQueue newFastQueue] retain];
+	STAssertNotNil(queue, @"Queue was not created and was nil");
+	[queue addObject:@"Item 1"];
+	[queue addObject:@"Item 2"];
+	[queue addObject:@"Item 3"];
+	STAssertTrue([queue size] == 3, @"Queue was too small, was: %d expected 3", [queue size]);
+	[queue reset];
+	STAssertTrue([queue size] == 0, @"Queue is not empty, it's still %d", [queue size]);
+	[queue release];
+}
+
+-(void) testDescription
+{
+	FastQueue *queue = [[FastQueue newFastQueue] retain];
+	STAssertNotNil(queue, @"Queue was not created and was nil");
+	[queue addObject:@"My"];
+	[queue addObject:@"String"];
+	STAssertTrue([[queue description] isEqualToString:@"My String"], @"Queue description was not right, got: \"%@\" expected: \"My String\"", [queue description]);
+	[queue release];
+}
+
+-(void) testRemove
+{
+	FastQueue *queue = [[FastQueue newFastQueue] retain];
+	STAssertNotNil(queue, @"Queue was not created and was nil");
+	[queue addObject:@"My"];
+	[queue addObject:@"String"];
+	STAssertTrue([queue size] == 2, @"Queue not the correct size, was: %d expected 2", [queue size]);
+	[queue remove];
+	STAssertTrue([queue size] == 1, @"Queue not the correct size, was %d expected 1", [queue size]);
+	[queue remove]; // test that the queue is reset when we remove the last object...
+	STAssertTrue([queue size] == 0, @"Queue was not reset, when we hit the buffer, was still %d", [queue size]);
+	[queue release];
+}
+
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/IntArrayTest.h b/runtime/ObjC/Framework/test/runtime/misc/IntArrayTest.h
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/IntArrayTest.h
rename to runtime/ObjC/Framework/test/runtime/misc/IntArrayTest.h
diff --git a/runtime/ObjC/Framework/test/runtime/misc/IntArrayTest.m b/runtime/ObjC/Framework/test/runtime/misc/IntArrayTest.m
new file mode 100644
index 0000000..6d1e300
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/misc/IntArrayTest.m
@@ -0,0 +1,47 @@
+//
+//  IntArrayTest.m
+//  ANTLR
+//
+//  Created by Ian Michell on 13/05/2010.
+//  Copyright 2010 Ian Michell. All rights reserved.
+//
+
+#import "IntArrayTest.h"
+#import "IntArray.h"
+
+@implementation IntArrayTest
+
+-(void) testAdd
+{
+	IntArray *intArray = [IntArray newArrayWithLen:10];
+	[intArray addInteger:1];
+	STAssertTrue([intArray count] == 1, @"Int array should be of size 1");
+	STAssertTrue([intArray integerAtIndex:0] == 1, @"First item in int array should be 1");
+	[intArray release];
+}
+
+-(void) testPushPop
+{
+	IntArray *intArray = [IntArray newArrayWithLen:10];
+	for (NSInteger i = 0; i < 10; i++)
+	{
+		[intArray push:i + 1];
+	}
+	NSInteger popped = [intArray pop];
+	NSLog(@"Popped value: %d", popped);
+	STAssertTrue(popped == 10, @"Pop should pull the last element out, which should be 10 was: %d", popped);
+	[intArray release];
+}
+
+-(void) testClearAndAdd
+{
+	IntArray *intArray = [IntArray newArrayWithLen:10];
+	[intArray addInteger:1];
+	STAssertTrue([intArray count] == 1, @"Int array should be of size 1");
+	STAssertTrue([intArray integerAtIndex:0] == 1, @"First item in int array should be 1");
+	[intArray reset];
+	STAssertTrue([intArray count] == 0, @"Array size should be 0");
+	[intArray release];
+}
+
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/TestDictionary.h b/runtime/ObjC/Framework/test/runtime/misc/TestDictionary.h
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/TestDictionary.h
rename to runtime/ObjC/Framework/test/runtime/misc/TestDictionary.h
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/TestDictionary.m b/runtime/ObjC/Framework/test/runtime/misc/TestDictionary.m
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/test/runtime/misc/TestDictionary.m
rename to runtime/ObjC/Framework/test/runtime/misc/TestDictionary.m
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/recognizer/ANTLRRecognizerTest.h b/runtime/ObjC/Framework/test/runtime/recognizer/RecognizerTest.h
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/test/runtime/recognizer/ANTLRRecognizerTest.h
rename to runtime/ObjC/Framework/test/runtime/recognizer/RecognizerTest.h
diff --git a/runtime/ObjC/Framework/test/runtime/recognizer/RecognizerTest.m b/runtime/ObjC/Framework/test/runtime/recognizer/RecognizerTest.m
new file mode 100755
index 0000000..fb86952
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/recognizer/RecognizerTest.m
@@ -0,0 +1,14 @@
+//
+//  RecognizerTest.m
+//  ANTLR
+//
+//  Created by Ian Michell on 02/07/2010.
+//  Copyright 2010 Ian Michell. All rights reserved.
+//
+
+#import "RecognizerTest.h"
+
+
+@implementation ANTLRRecognizerTest
+
+@end
diff --git a/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.h b/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.h
new file mode 100644
index 0000000..c77a210
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.h
@@ -0,0 +1,25 @@
+//
+//  ANTLRBitSetTest.h
+//  ANTLR
+//
+//  Created by Ian Michell on 13/05/2010.
+//  Copyright 2010 Ian Michell. All rights reserved.
+//
+
+#import <SenTestingKit/SenTestingKit.h>
+
+@interface ANTLRBitSetTest : SenTestCase 
+{
+	
+}
+
+-(void) testWithBitData;
+-(void) testWithBitArray;
+-(void) testAdd;
+-(void) testRemove;
+-(void) testCopyBitSet;
+-(void) testOr;
+-(void) testOrInPlace;
+-(void) testDescription;
+
+@end
diff --git a/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.m b/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.m
new file mode 100644
index 0000000..feec9e9
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/sets/ANTLRBitSetTest.m
@@ -0,0 +1,129 @@
+//
+//  ANTLRBitSetTest.m
+//  ANTLR
+//
+//  Created by Ian Michell on 13/05/2010.
+//  Copyright 2010 Ian Michell and Alan Condit. All rights reserved.
+//
+
+#import "ANTLRBitSetTest.h"
+#import "ANTLRBitSet.h"
+#import "ACNumber.h"
+#import <CoreFoundation/CoreFoundation.h>
+#import <CoreFoundation/CFBitVector.h>
+
+@implementation ANTLRBitSetTest
+
+-(void) testWithBitData
+{
+	static const unsigned long long bitData[] = {3LL, 1LL};
+	ANTLRBitSet *bitSet = [ANTLRBitSet newBitSetWithBits:bitData Count:2];
+    CFIndex actual = (CFIndex)[bitSet numBits];
+    CFIndex expected = 3;
+	
+    STAssertEquals(actual, expected, @"There should be three bits set in bitvector. But I have %d", actual);
+	[bitSet release];
+}
+
+-(void) testWithBitArray
+{
+	AMutableArray *bits = [AMutableArray arrayWithCapacity:10];
+	[bits addObject:[ACNumber numberWithBool:YES]];
+	[bits addObject:[ACNumber numberWithBool:YES]];
+	[bits addObject:[ACNumber numberWithBool:NO]];
+	[bits addObject:[ACNumber numberWithBool:YES]];
+	[bits addObject:[ACNumber numberWithBool:NO]];
+	[bits addObject:[ACNumber numberWithBool:YES]];
+	STAssertTrue([[bits objectAtIndex:0] boolValue], @"Value at index 0 was not true");
+	STAssertTrue([[bits objectAtIndex:1] boolValue], @"Value at index 1 was not true");
+	STAssertFalse([[bits objectAtIndex:2] boolValue], @"Value at index 2 was not false");
+	STAssertTrue([[bits objectAtIndex:3] boolValue], @"Value at index 3 was not true");
+	STAssertFalse([[bits objectAtIndex:4] boolValue], @"Value at index 4 was not false");
+	STAssertTrue([[bits objectAtIndex:5] boolValue], @"Value at index 5 was not true");
+	ANTLRBitSet *bitSet = [ANTLRBitSet newBitSetWithArray:bits];
+	CFIndex actual = (CFIndex)[bitSet numBits];
+	CFIndex expected = 4;
+	STAssertEquals(actual, expected, @"There should be four bits set in bitvector. But I have %d", actual);
+	[bitSet release];
+}
+
+-(void) testAdd
+{
+
+	ANTLRBitSet *bitSet = [ANTLRBitSet newBitSet];
+	[bitSet add:1];
+	[bitSet add:2];
+	[bitSet add:3];
+	CFIndex actual = (CFIndex)[bitSet numBits];
+	CFIndex expected = 3;
+	STAssertEquals(actual, expected, @"There should be three bits set in bitvector. But I have %d", actual);
+	[bitSet release];
+}
+
+-(void) testRemove
+{
+	ANTLRBitSet *bitSet = [ANTLRBitSet newBitSet];
+	[bitSet add:1];
+	CFIndex actual = (CFIndex)[bitSet numBits];
+	CFIndex expected = 1;
+	STAssertTrue(actual == expected, @"Bitset was not of size 1");
+	STAssertTrue([bitSet member:1], @"Bit at index 1 is not a member...");
+	[bitSet remove:1];
+	actual = [bitSet numBits];
+	STAssertTrue(actual == 0, @"Bitset was not empty");
+	STAssertFalse([bitSet member:1], @"Bit at index 1 is a member...");
+	STAssertTrue([bitSet isNil], @"There was at least one bit on...");
+}
+
+-(void) testCopyBitSet
+{
+	static const unsigned long long bitData[] = {3LL, 1LL};
+	ANTLRBitSet *bitSet = [ANTLRBitSet newBitSetWithBits:bitData Count:2];
+	ANTLRBitSet *copy = [bitSet mutableCopyWithZone:nil];
+	CFIndex actual = (CFIndex)[copy numBits];
+	STAssertEquals(actual, (CFIndex)[bitSet numBits], @"There should be three bits set in bitvector. But I have %d", [copy numBits]);
+	[bitSet release];
+}
+
+-(void) testOr
+{
+	static const unsigned long long bitData[] = {3LL, 1LL};
+	ANTLRBitSet *bitSet = [ANTLRBitSet newBitSetWithBits:bitData Count:2];
+	
+	static const unsigned long long otherData[] = {5LL, 3LL, 1LL};
+	ANTLRBitSet *otherBitSet = [ANTLRBitSet newBitSetWithBits:otherData Count:3];
+	
+	ANTLRBitSet *c = [bitSet or:otherBitSet];
+	STAssertTrue([c size] == [otherBitSet size], @"c should be the same as otherBitSet");
+}
+
+-(void) testOrInPlace
+{
+    
+	ANTLRBitSet *bitSet = [ANTLRBitSet newBitSet];
+	[bitSet add:1];
+	[bitSet add:2];
+	[bitSet add:16];
+	CFIndex actual = (CFIndex)[bitSet numBits];
+	CFIndex expected = 3;
+	STAssertEquals(actual, expected, @"There should be three bits set in bitvector. But I have %d", actual);
+	ANTLRBitSet *followSet = [ANTLRBitSet newBitSet];
+    [followSet orInPlace:bitSet];
+	actual = (CFIndex)[followSet numBits];
+	expected = 3;
+    NSLog( @"%@\n", [followSet description] );
+	STAssertEquals(actual, expected, @"There should be three bits set in bitvector. But I have %d", actual);
+	[bitSet release];
+	[followSet release];
+}
+
+-(void) testDescription
+{
+	ANTLRBitSet *bitSet = [ANTLRBitSet newBitSet];
+	[bitSet add:1];
+	[bitSet add:2];
+	NSMutableString *aDescription = (NSMutableString *)[bitSet description];
+	STAssertTrue([aDescription isEqualToString:@"{1,2}"], @"Description was not right, expected '{1,2}' got: %@", aDescription);
+}
+
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/stream/ANTLRStringStreamTest.h b/runtime/ObjC/Framework/test/runtime/stream/ANTLRStringStreamTest.h
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/test/runtime/stream/ANTLRStringStreamTest.h
rename to runtime/ObjC/Framework/test/runtime/stream/ANTLRStringStreamTest.h
diff --git a/runtime/ObjC/Framework/test/runtime/stream/ANTLRStringStreamTest.m b/runtime/ObjC/Framework/test/runtime/stream/ANTLRStringStreamTest.m
new file mode 100644
index 0000000..48b137a
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/stream/ANTLRStringStreamTest.m
@@ -0,0 +1,108 @@
+//
+//  ANTLRStringStreamTest.m
+//  ANTLR
+//
+//  Created by Ian Michell on 12/05/2010.
+//  Copyright 2010 Ian Michell. All rights reserved.
+//
+
+#import "ANTLRStringStreamTest.h"
+#import "CharStream.h"
+#import "ANTLRStringStream.h"
+#import "ANTLRError.h"
+
+@implementation ANTLRStringStreamTest
+
+-(void) testInitWithInput
+{
+	NSString *input = @"This is a string used for ANTLRStringStream input ;)";
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:input];
+	NSString *subString = [stream substring:0 To:10];
+	NSLog(@"The first ten chars are '%@'", subString);
+	STAssertTrue([@"This is a " isEqualToString:subString], @"The strings do not match");
+	[stream release];
+}
+
+-(void) testConsumeAndReset
+{
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"This is a string used for input"];
+	[stream consume];
+	STAssertTrue(stream.index > 0, @"Index should be greater than 0 after consume");
+	[stream reset];
+	STAssertTrue(stream.index == 0, @"Index should be 0 after reset");
+	[stream release];
+}
+
+-(void) testConsumeWithNewLine
+{
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"This is a string\nused for input"];
+	while (stream.index < [stream size] && stream.line == 1)
+	{
+		[stream consume];
+	}
+	STAssertTrue(stream.line == 2, @"Line number is incorrect, should be 2, was %d!", stream.line);
+	STAssertTrue(stream.charPositionInLine == 0, @"Char position in line should be 0, it was: %d!", stream.charPositionInLine);
+	[stream release];
+}
+
+-(void) testLAEOF
+{
+    NSInteger i;
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"This is a string\nused for input"];
+	BOOL eofFound = NO;
+	for (i = 1; i <= [stream size]+1; i++) {
+		NSInteger r = [stream LA:i];
+		if (r == (NSInteger)CharStreamEOF) {
+			eofFound = YES;
+            break;
+		}
+	}
+	STAssertTrue(eofFound, @"EOF Was not found in stream, Length =%d, index = %d, i = %d", [stream size], stream.index, i);
+	[stream release];
+}
+
+-(void) testLTEOF
+{
+    NSInteger i;
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"This is a string\nused for input"];
+	BOOL eofFound = NO;
+	for ( i = 1; i <= [stream size]+1; i++) {
+		NSInteger r = [stream LT:i];
+		if (r == (NSInteger)CharStreamEOF) {
+			eofFound = YES;
+            break;
+		}
+	}
+	STAssertTrue(eofFound, @"EOF Was not found in stream, Length =%d, index = %d, i = %d", [stream size], stream.index, i);
+	[stream release];
+}
+
+-(void) testSeek
+{
+	ANTLRStringStream *stream =[ANTLRStringStream newANTLRStringStream:@"This is a string used for input"];
+	[stream seek:10];
+	STAssertTrue(stream.index == 10, @"Index should be 10");
+	// Get char 10 which is s (with 0 being T)
+	STAssertTrue([stream LA:1] > -1 && (char)[stream LA:1] == 's', @"Char returned should be s");
+	[stream release];
+}
+
+-(void) testSeekMarkAndRewind
+{
+	ANTLRStringStream *stream =[ANTLRStringStream newANTLRStringStream:@"This is a string used for input"];
+	[stream mark];
+	[stream seek:10];
+	STAssertTrue(stream.index == 10, @"Index should be 10");
+	[stream rewind];
+	STAssertTrue(stream.index == 0, @"Index should be 0");
+	[stream seek:5];
+	STAssertTrue(stream.index == 5, @"Index should be 5");
+	[stream mark]; // make a new marker to test a branch.
+	[stream seek:10];
+	STAssertTrue(stream.index == 10, @"Index should be 10");
+	[stream rewind]; // should be marked to 5.
+	STAssertTrue(stream.index == 5, @"Index should be 5");
+	[stream release];
+}
+
+@end
diff --git a/runtime/ObjC/Framework/test/runtime/token/CommonTokenTest.h b/runtime/ObjC/Framework/test/runtime/token/CommonTokenTest.h
new file mode 100644
index 0000000..3d82917
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/token/CommonTokenTest.h
@@ -0,0 +1,25 @@
+//
+//  CommonTokenTest.h
+//  ANTLR
+//
+//  Created by Ian Michell on 25/05/2010.
+//  Copyright 2010 Ian Michell. All rights reserved.
+//
+
+#import <SenTestingKit/SenTestingKit.h>
+
+
+@interface CommonTokenTest : SenTestCase 
+{
+
+}
+
+-(void) test01InitAndRelease;
+-(void) test02GetEOFToken;
+-(void) test03InitWithTokenType;
+-(void) test04InitWithTokenTypeAndText;
+-(void) test05InitWithCharStream;
+-(void) test06InitWithToken;
+-(void) test07TokenDescription;
+
+@end
diff --git a/runtime/ObjC/Framework/test/runtime/token/CommonTokenTest.m b/runtime/ObjC/Framework/test/runtime/token/CommonTokenTest.m
new file mode 100644
index 0000000..20a187f
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/token/CommonTokenTest.m
@@ -0,0 +1,98 @@
+//
+//  CommonTokenTest.m
+//  ANTLR
+//
+//  Created by Ian Michell on 25/05/2010.
+//  Copyright 2010 Ian Michell and Alan Condit. All rights reserved.
+//
+
+#import "CommonTokenTest.h"
+#import "CommonToken.h"
+#import "ANTLRStringStream.h"
+
+@implementation CommonTokenTest
+
+-(void) test01InitAndRelease
+{
+	CommonToken *token = [[CommonToken newToken] retain];
+	STAssertNotNil(token, @"Token was nil");
+	[token release];
+}
+
+-(void) test02GetEOFToken
+{
+	CommonToken *token = [[CommonToken eofToken] retain];
+	STAssertNotNil(token, @"Token was nil");
+	STAssertEquals(token.type, (NSInteger)TokenTypeEOF, @"Token was not of type TokenTypeEOF");
+	[token release];
+}
+
+-(void) test03InitWithTokenType
+{
+	CommonToken *token = [[CommonToken newToken:TokenTypeUP] retain];
+	token.text = @"<UP>";
+	STAssertNotNil(token, @"Token was nil");
+	STAssertEquals(token.type, (NSInteger)TokenTypeUP, @"Token was not of type TokenTypeUP");
+	STAssertNotNil(token.text, @"Token text was nil, was expecting <UP>");
+	STAssertTrue([token.text isEqualToString:@"<UP>"], @"Token text was not <UP> was instead: %@", token.text);
+	[token release];
+}
+
+-(void) test04InitWithTokenTypeAndText
+{
+	CommonToken *token = [[CommonToken newToken:TokenTypeUP Text:@"<UP>"] retain];
+	STAssertNotNil(token, @"Token was nil");
+	STAssertEquals(token.type, (NSInteger)TokenTypeUP, @"Token was not of type TokenTypeUP");
+	STAssertNotNil(token.text, @"Token text was nil, was expecting <UP>");
+	STAssertTrue([token.text isEqualToString:@"<UP>"], @"Token text was not <UP> was instead: %@", token.text);
+	[token release];
+}
+
+-(void) test05InitWithCharStream
+{
+	ANTLRStringStream *stream = [[ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"] retain];
+	CommonToken *token = [[CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5] retain];
+	STAssertNotNil(token, @"Token was nil");
+	STAssertEquals(token.type, (NSInteger)555, @"Token was not of type 555"); // Nice random type number
+	STAssertNotNil(token.text, @"Token text was nil, was expecting ||");
+	STAssertTrue([token.text isEqualToString:@"||"], @"Token text was not || was instead: %@", token.text);
+	[token release];
+    [stream release];
+}
+
+-(void) test06InitWithToken
+{
+	ANTLRStringStream *stream = [[ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"] retain];
+	CommonToken *token = [[CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5] retain];
+	STAssertNotNil(token, @"Token was nil");
+	STAssertEquals(token.type, (NSInteger)555, @"Token was not of type 555"); // Nice random type number
+	STAssertNotNil(token.text, @"Token text was nil, was expecting ||");
+	STAssertTrue([token.text isEqualToString:@"||"], @"Token text was not || was instead: %@", token.text);
+	
+	CommonToken *newToken = [[CommonToken newTokenWithToken:token] retain];
+	STAssertNotNil(newToken, @"New token is nil!");
+	STAssertEquals(newToken.type, token.type, @"Tokens types do not match %d:%d!", newToken.type, token.type);
+	STAssertEquals(newToken.line, token.line, @"Token lines do not match!");
+	STAssertEquals(newToken.index, token.index, @"Token indexes do not match");
+	STAssertEquals(newToken.channel, token.channel, @"Token channels are not the same");
+	STAssertEquals(newToken.charPositionInLine, token.charPositionInLine, @"Token char positions in lines do not match");
+	STAssertEquals(newToken.startIndex, token.startIndex, @"Token start positions do not match");
+	STAssertEquals(newToken.stopIndex, token.stopIndex, @"Token stop positions do not match");
+	STAssertTrue([newToken.text isEqualToString:token.text], @"Token text does not match!");
+	[token release];
+	[newToken release];
+    [stream release];
+}
+
+-(void) test07TokenDescription
+{
+    NSString *aDescription;
+	ANTLRStringStream *stream = [[ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"] retain];
+	CommonToken *token = [[CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5] retain];
+    aDescription = [token description];
+	STAssertTrue([aDescription isEqualToString:@"[@0, 4:5='||',<555>,0:0]"], @"String description for token is not correct! got %@", aDescription);
+    [token release];
+    [stream release];
+}
+
+@end
diff --git a/antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonErrorNodeTest.h b/runtime/ObjC/Framework/test/runtime/tree/CommonErrorNodeTest.h
similarity index 100%
rename from antlr-3.4/runtime/ObjC/Framework/test/runtime/tree/ANTLRCommonErrorNodeTest.h
rename to runtime/ObjC/Framework/test/runtime/tree/CommonErrorNodeTest.h
diff --git a/runtime/ObjC/Framework/test/runtime/tree/CommonErrorNodeTest.m b/runtime/ObjC/Framework/test/runtime/tree/CommonErrorNodeTest.m
new file mode 100755
index 0000000..679646d
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/tree/CommonErrorNodeTest.m
@@ -0,0 +1,14 @@
+//
+//  ANTLRCommonErrorNodeTest.m
+//  ANTLR
+//
+//  Created by Ian Michell on 10/06/2010.
+//  Copyright 2010 Ian Michell. All rights reserved.
+//
+
+#import "CommonErrorNodeTest.h"
+
+
+@implementation ANTLRCommonErrorNodeTest
+
+@end
diff --git a/runtime/ObjC/Framework/test/runtime/tree/CommonTreeAdaptorTest.h b/runtime/ObjC/Framework/test/runtime/tree/CommonTreeAdaptorTest.h
new file mode 100755
index 0000000..85c0493
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/tree/CommonTreeAdaptorTest.h
@@ -0,0 +1,16 @@
+//
+//  CommonTreeAdaptorTest.h
+//  ANTLR
+//
+//  Created by Ian Michell on 10/06/2010.
+//  Copyright 2010 Ian Michell. All rights reserved.
+//
+
+#import <SenTestingKit/SenTestingKit.h>
+
+
+@interface CommonTreeAdaptorTest : SenTestCase {
+
+}
+
+@end
diff --git a/runtime/ObjC/Framework/test/runtime/tree/CommonTreeAdaptorTest.m b/runtime/ObjC/Framework/test/runtime/tree/CommonTreeAdaptorTest.m
new file mode 100755
index 0000000..f5bf007
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/tree/CommonTreeAdaptorTest.m
@@ -0,0 +1,14 @@
+//
+//  CommonTreeAdaptorTest.m
+//  ANTLR
+//
+//  Created by Ian Michell on 10/06/2010.
+//  Copyright 2010 Ian Michell. All rights reserved.
+//
+
+#import "CommonTreeAdaptorTest.h"
+
+
+@implementation CommonTreeAdaptorTest
+
+@end
diff --git a/runtime/ObjC/Framework/test/runtime/tree/CommonTreeTest.h b/runtime/ObjC/Framework/test/runtime/tree/CommonTreeTest.h
new file mode 100644
index 0000000..2e2f10d
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/tree/CommonTreeTest.h
@@ -0,0 +1,42 @@
+//
+//  CommonTreeTest.h
+//  ANTLR
+//
+//  Created by Ian Michell on 26/05/2010.
+//  Copyright 2010 Ian Michell and Alan Condit. All rights reserved.
+//
+
+#import <SenTestingKit/SenTestingKit.h>
+
+
+@interface CommonTreeTest : SenTestCase 
+{
+}
+
+-(void) test01InitAndRelease;
+-(void) test02InitWithTree;
+-(void) test03WithToken;
+-(void) test04InvalidTreeNode;
+-(void) test05InitWithCommonTreeNode;
+-(void) test06CopyTree;
+-(void) test07Description;
+-(void) test08Text;
+-(void) test09AddChild;
+-(void) test10AddChildren;
+-(void) test11AddSelfAsChild;
+-(void) test12AddEmptyChildWithNoChildren;
+-(void) test13AddEmptyChildWithChildren;
+-(void) test14ChildAtIndex;
+-(void) test15SetChildAtIndex;
+-(void) test16GetAncestor;
+-(void) test17FirstChildWithType;
+-(void) test18SanityCheckParentAndChildIndexesForParentTree;
+-(void) test19DeleteChild;
+-(void) test20TreeDescriptions;
+-(void) test21ReplaceChildrenAtIndexWithNoChildren;
+-(void) test22ReplaceChildrenAtIndex;
+-(void) test23ReplaceChildrenAtIndexWithChild;
+-(void) test24ReplacechildrenAtIndexWithLessChildren;
+-(void) test25ReplacechildrenAtIndexWithMoreChildren;
+
+@end
diff --git a/runtime/ObjC/Framework/test/runtime/tree/CommonTreeTest.m b/runtime/ObjC/Framework/test/runtime/tree/CommonTreeTest.m
new file mode 100644
index 0000000..4db2300
--- /dev/null
+++ b/runtime/ObjC/Framework/test/runtime/tree/CommonTreeTest.m
@@ -0,0 +1,555 @@
+//
+//  CommonTreeTest.m
+//  ANTLR
+//
+//  Created by Ian Michell on 26/05/2010.
+//  Copyright 2010 Ian Michell. All rights reserved.
+//
+
+#import <ANTLR/BaseTree.h>
+#import "CommonTreeTest.h"
+#import <ANTLR/ANTLRStringStream.h>
+#import <ANTLR/CommonTree.h>
+#import <ANTLR/CommonToken.h>
+#import <ANTLR/ANTLRError.h>
+#import <ANTLR/RuntimeException.h>
+
+@implementation CommonTreeTest
+
+-(void) test01InitAndRelease
+{
+	CommonTree *tree = [CommonTree newTree];
+	STAssertNotNil(tree, @"Tree was nil");
+	// FIXME: It doesn't do anything else, perhaps initWithTree should set something somewhere, java says no though...
+    return;
+}
+
+-(void) test02InitWithTree
+{
+	CommonTree *tree = [CommonTree newTree];
+	STAssertNotNil(tree, @"Tree was nil");
+    if (tree != nil)
+        STAssertEquals(tree.type, (NSInteger)TokenTypeInvalid, @"Tree should have an invalid token type, because it has no token");
+    // [tree release];
+    return;
+}
+
+-(void) test03WithToken
+{
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	token.line = 1;
+	token.charPositionInLine = 4;
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	STAssertNotNil(tree, @"Tree was nil");
+    if (tree != nil)
+        STAssertNotNil(tree.token, @"Tree with token was nil");
+    if (tree != nil && tree.token != nil) {
+        STAssertEquals((NSUInteger) tree.token.line, (NSUInteger)1, [NSString stringWithFormat:@"Tree should be at line 1, but was at %d", tree.token.line] );
+        STAssertEquals((NSUInteger) tree.token.charPositionInLine, (NSUInteger)4, [NSString stringWithFormat:@"Char position should be 1, but was at %d", tree.token.charPositionInLine]);
+        STAssertNotNil(((CommonToken *)tree.token).text, @"Tree with token with text was nil");
+    }
+    if (tree != nil && tree.token != nil && tree.token.text != nil)
+        STAssertTrue([tree.token.text isEqualToString:@"||"], @"Text was not ||");
+	//[tree release];
+    return;
+}
+
+-(void) test04InvalidTreeNode
+{
+	CommonTree *tree = [CommonTree newTreeWithToken:[CommonToken invalidToken]];
+	STAssertNotNil(tree, @"Tree was nil");
+	STAssertEquals(tree.token.type, (NSInteger)TokenTypeInvalid, @"Tree Token type was not TokenTypeInvalid");
+	//[tree release];
+    return;
+}
+
+-(void) test05InitWithCommonTreeNode
+{
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	STAssertNotNil(tree, @"Tree was nil");
+	STAssertNotNil(tree.token, @"Tree token was nil");
+	CommonTree *newTree = [CommonTree newTreeWithTree:tree];
+	STAssertNotNil(newTree, @"New tree was nil");
+	STAssertNotNil(newTree.token, @"New tree token was nil");
+	STAssertEquals(newTree.token, tree.token, @"Tokens did not match");
+	STAssertEquals(newTree.startIndex, tree.startIndex, @"Token start index did not match %d:%d", newTree.startIndex, tree.startIndex);
+	STAssertEquals(newTree.stopIndex, tree.stopIndex, @"Token stop index did not match %d:%d", newTree.stopIndex, tree.stopIndex);
+	//[stream release];
+	//[tree release];
+	//[newTree release];
+	//[token release];
+    return;
+}
+
+-(void) test06CopyTree
+{
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	STAssertNotNil(tree, @"Tree was nil");
+	CommonTree *newTree = (CommonTree *)[tree copyWithZone:nil];
+	STAssertTrue([newTree isKindOfClass:[CommonTree class]], @"Copied tree was not an CommonTree");
+	STAssertNotNil(newTree, @"New tree was nil");
+	// STAssertEquals(newTree.token, tree.token, @"Tokens did not match");
+	STAssertEquals(newTree.stopIndex, tree.stopIndex, @"Token stop index did not match");
+	STAssertEquals(newTree.startIndex, tree.startIndex, @"Token start index did not match");
+	//[stream release];
+	//[tree release];
+	//[newTree release];
+	// [token release];
+    return;
+}
+
+-(void) test07Description
+{
+    NSString *aString;
+	CommonTree *errorTree = [CommonTree invalidNode];
+	STAssertNotNil(errorTree, @"Error tree node is nil");
+    if (errorTree != nil) {
+        aString = [errorTree description];
+        STAssertNotNil( aString, @"errorTree description returned nil");
+        if (aString != nil)
+            STAssertTrue([aString isEqualToString:@"<errornode>"], @"Not a valid error node description %@", aString);
+    }
+	//[errorTree release];
+	
+	CommonTree *tree = [CommonTree newTreeWithTokenType:TokenTypeUP];
+	STAssertNotNil(tree, @"Tree is nil");
+    if (tree != nil)
+        STAssertNil([tree description], @"Tree description was not nil, was: %@", [tree description]);
+	//[tree release];
+	
+	tree = [CommonTree newTree];
+	STAssertNotNil(tree, @"Tree is nil");
+    if (tree != nil) {
+        aString = [tree description];
+        STAssertNotNil(aString, @"tree description returned nil");
+        if (aString != nil)
+            STAssertTrue([aString isEqualToString:@"nil"], @"Tree description was not empty", [tree description]);
+    }
+	//[tree release];
+	
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	tree = [CommonTree newTreeWithToken:token];
+	STAssertNotNil(tree, @"Tree node is nil");
+    aString = [tree description];
+    STAssertNotNil(aString, @"tree description returned nil");
+    if (aString != nil)
+        STAssertTrue([aString isEqualToString:@"||"], @"description was not || was instead %@", [tree description]);
+	//[tree release];
+    return;
+}
+
+-(void) test08Text
+{
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	STAssertNotNil(tree, @"Tree was nil");
+	STAssertTrue([tree.token.text isEqualToString:@"||"], @"Tree text was not valid, should have been || was %@", tree.token.text);
+	//[tree release];
+	
+	// test nil (for line coverage)
+	tree = [CommonTree newTree];
+	STAssertNotNil(tree, @"Tree was nil");
+	STAssertNil(tree.token.text, @"Tree text was not nil: %@", tree.token.text);
+    return;
+}
+
+-(void) test09AddChild
+{
+	// Create a new tree
+	CommonTree *parent = [CommonTree newTreeWithTokenType:555];
+    parent.token.line = 1;
+	parent.token.charPositionInLine = 1;
+	
+	// Child tree
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	token.line = 1;
+	token.charPositionInLine = 4;
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	
+	// Add a child to the parent tree
+	[parent addChild:tree];
+
+
+	STAssertNotNil(parent, @"parent was nil");
+    if (parent != nil)
+        STAssertNotNil(parent.token, @"parent was nil");
+	STAssertEquals((NSInteger)parent.token.line, (NSInteger)1, @"Tree should be at line 1 but is %d", parent.token.line);
+	STAssertEquals((NSInteger)parent.token.charPositionInLine, (NSInteger)1, @"Char position should be 1 but is %d", parent.token.charPositionInLine);
+	
+	STAssertEquals((NSInteger)[parent getChildCount], (NSInteger)1, @"There should be 1 child but there were %d", [parent getChildCount]);
+	STAssertEquals((NSInteger)[[parent getChild:0] getChildIndex], (NSInteger)0, @"Child index should be 0 was : %d", [[parent getChild:0] getChildIndex]);
+	STAssertEquals([[parent getChild:0] getParent], parent, @"Parent not set for child");
+	
+	//[parent release];
+    return;
+}
+
+-(void) test10AddChildren
+{
+	// Create a new tree
+	CommonTree *parent = [CommonTree newTree];
+	
+	// Child tree
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	token.line = 1;
+	token.charPositionInLine = 4;
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	
+	// Add a child to the parent tree
+	[parent addChild: tree];
+	
+	CommonTree *newParent = [CommonTree newTree];
+	[newParent addChildren:parent.children];
+	
+	STAssertEquals([newParent getChild:0], [parent getChild:0], @"Children did not match");
+    return;
+}
+
+-(void) test11AddSelfAsChild
+{
+	CommonTree *parent = [CommonTree newTree];
+	@try 
+	{
+		[parent addChild:parent];
+	}
+	@catch (NSException *e) 
+	{
+		STAssertTrue([[e name] isEqualToString:@"IllegalArgumentException"], @"Got wrong kind of exception! %@", [e name]);
+		//[parent release];
+		return;
+	}
+	STFail(@"Did not get an exception when adding an empty child!");
+    return;
+}
+
+-(void) test12AddEmptyChildWithNoChildren
+{
+	CommonTree *emptyChild = [CommonTree newTree];
+	CommonTree *parent = [CommonTree newTree];
+	[parent addChild:emptyChild];
+	STAssertEquals((NSInteger)[parent getChildCount], (NSInteger)0, @"There were supposed to be no children!");
+	//[parent release];
+	//[emptyChild release];
+    return;
+}
+
+-(void) test13AddEmptyChildWithChildren
+{
+	// Create a new tree
+	CommonTree *parent = [CommonTree newTree];
+	
+	// Child tree
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	token.line = 1;
+	token.charPositionInLine = 4;
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	
+	// Add a child to the parent tree
+	[parent addChild: tree];
+	
+	CommonTree *newParent = [CommonTree newTree];
+	[newParent addChild:parent];
+	
+	STAssertEquals((NSInteger)[newParent getChildCount], (NSInteger)1, @"Parent should only have 1 child: %d", [newParent getChildCount]);
+	STAssertEquals([newParent getChild:0], tree, @"Child was not the correct object.");
+	//[parent release];
+	//[newParent release];
+	//[tree release];
+    return;
+}
+
+-(void) test14ChildAtIndex
+{
+	// Create a new tree
+	CommonTree *parent = [CommonTree newTree];
+	
+	// Child tree
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	
+	// Add a child to the parent tree
+	[parent addChild: tree];
+	
+	STAssertEquals((NSInteger)[parent getChildCount], (NSInteger)1, @"There were either no children or more than 1: %d", [parent getChildCount]);
+	
+	CommonTree *child = [parent getChild:0];
+	STAssertNotNil(child, @"Child at index 0 should not be nil");
+	STAssertEquals(child, tree, @"Child and Original tree were not the same");
+	//[parent release];
+    return;
+}
+
+-(void) test15SetChildAtIndex
+{
+	CommonTree *parent = [CommonTree newTree];
+	
+	// Child tree
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	
+	
+	tree = [CommonTree newTreeWithTokenType:TokenTypeUP];
+	tree.token.text = @"<UP>";
+	[parent addChild:tree];
+	
+	STAssertTrue([parent getChild:0] == tree, @"Trees don't match");
+	[parent setChild:0 With:tree];
+	
+	CommonTree *child = [parent getChild:0];
+	STAssertTrue([parent getChildCount] == 1, @"There were either no children or more than 1: %d", [parent getChildCount]);
+	STAssertNotNil(child, @"Child at index 0 should not be nil");
+	STAssertEquals(child, tree, @"Child and Original tree were not the same");
+	//[parent release];
+    return;
+}
+
+-(void) test16GetAncestor
+{
+	CommonTree *parent = [CommonTree newTreeWithTokenType:TokenTypeUP];
+	parent.token.text = @"<UP>";
+	
+	CommonTree *down = [CommonTree newTreeWithTokenType:TokenTypeDOWN];
+	down.token.text = @"<DOWN>";
+	
+	[parent addChild:down];
+	
+	// Child tree
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	
+	[down addChild:tree];
+	STAssertTrue([tree hasAncestor:TokenTypeUP], @"Should have an ancestor of type TokenTypeUP");
+	
+	CommonTree *ancestor = [tree getAncestor:TokenTypeUP];
+	STAssertNotNil(ancestor, @"Ancestor should not be nil");
+	STAssertEquals(ancestor, parent, @"Acenstors do not match");
+	//[parent release];
+    return;
+}
+
+-(void) test17FirstChildWithType
+{
+	// Create a new tree
+	CommonTree *parent = [CommonTree newTree];
+	
+	CommonTree *up = [CommonTree newTreeWithTokenType:TokenTypeUP];
+	CommonTree *down = [CommonTree newTreeWithTokenType:TokenTypeDOWN];
+	
+	[parent addChild:up];
+	[parent addChild:down];
+	
+	CommonTree *found = (CommonTree *)[parent getFirstChildWithType:TokenTypeDOWN];
+	STAssertNotNil(found, @"Child with type DOWN should not be nil");
+    if (found != nil) {
+        STAssertNotNil(found.token, @"Child token with type DOWN should not be nil");
+        if (found.token != nil)
+            STAssertEquals((NSInteger)found.token.type, (NSInteger)TokenTypeDOWN, @"Token type was not correct, should be down!");
+    }
+	found = (CommonTree *)[parent getFirstChildWithType:TokenTypeUP];
+	STAssertNotNil(found, @"Child with type UP should not be nil");
+    if (found != nil) {
+        STAssertNotNil(found.token, @"Child token with type UP should not be nil");
+        if (found.token != nil)
+            STAssertEquals((NSInteger)found.token.type, (NSInteger)TokenTypeUP, @"Token type was not correct, should be up!");
+    }
+	//[parent release];
+    return;
+}
+
+-(void) test18SanityCheckParentAndChildIndexesForParentTree
+{
+	// Child tree
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	
+	CommonTree *parent = [CommonTree newTreeWithTokenType:555];
+	STAssertNotNil(tree, @"tree should not be nil");
+	@try 
+	{
+		[tree sanityCheckParentAndChildIndexes];
+	}
+	@catch (NSException * e) 
+	{
+		STFail(@"Exception was thrown and this is not what's right...");
+	}
+	
+	BOOL passed = NO;
+	@try 
+	{
+		[tree sanityCheckParentAndChildIndexes:parent At:0];
+	}
+	@catch (NSException * e) 
+	{
+		STAssertTrue([[e name] isEqualToString:@"IllegalStateException"], @"Exception was not an IllegalStateException but was %@", [e name]);
+		passed = YES;
+	}
+	if (!passed)
+	{
+		STFail(@"An exception should have been thrown");
+	}
+	
+	STAssertNotNil(parent, @"parent should not be nil");
+	[parent addChild:tree];
+	@try 
+	{
+		[tree sanityCheckParentAndChildIndexes:parent At:0];
+	}
+	@catch (NSException * e) 
+	{
+		STFail(@"No exception should have been thrown!");
+	}
+    return;
+}
+
+-(void) test19DeleteChild
+{
+	// Child tree
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	
+	CommonTree *parent = [CommonTree newTree];
+	[parent addChild:tree];
+	
+	CommonTree *deletedChild = [parent deleteChild:0];
+	STAssertEquals(deletedChild, tree, @"Children do not match!");
+	STAssertEquals((NSInteger)[parent getChildCount], (NSInteger)0, @"Child count should be zero!");
+    return;
+}
+
+-(void) test20TreeDescriptions
+{
+	// Child tree
+	ANTLRStringStream *stream = [ANTLRStringStream newANTLRStringStream:@"this||is||a||double||piped||separated||csv"];
+	CommonToken *token = [CommonToken newToken:stream Type:555 Channel:TokenChannelDefault Start:4 Stop:5];
+	CommonTree *tree = [CommonTree newTreeWithToken:token];
+	
+	// Description for tree
+	NSString *treeDesc = [tree treeDescription];
+    STAssertNotNil(treeDesc, @"Tree description should not be nil");
+    STAssertTrue([treeDesc isEqualToString:@"||"], @"Tree description was not || but rather %@", treeDesc);
+	
+	CommonTree *parent = [CommonTree newTree];
+	STAssertTrue([[parent treeDescription] isEqualToString:@"nil"], @"Tree description was not nil was %@", [parent treeDescription]);
+	[parent addChild:tree];
+	treeDesc = [parent treeDescription];
+	STAssertTrue([treeDesc isEqualToString:@"||"], @"Tree description was not || but was: %@", treeDesc);
+	
+	// Test non empty parent
+	CommonTree *down = [CommonTree newTreeWithTokenType:TokenTypeDOWN];
+	down.token.text = @"<DOWN>";
+	
+	[tree addChild:down];
+	treeDesc = [parent treeDescription];
+	STAssertTrue([treeDesc isEqualToString:@"(|| <DOWN>)"], @"Tree description was wrong expected (|| <DOWN>) but got: %@", treeDesc);
+    return;
+}
+
+-(void) test21ReplaceChildrenAtIndexWithNoChildren
+{
+	CommonTree *parent = [CommonTree newTree];
+	CommonTree *parent2 = [CommonTree newTree];
+	CommonTree *child = [CommonTree newTreeWithTokenType:TokenTypeDOWN];
+	child.token.text = @"<DOWN>";
+	[parent2 addChild:child];
+	@try 
+	{
+		[parent replaceChildrenFrom:1 To:2 With:parent2];
+	}
+	@catch (NSException *ex)
+	{
+		STAssertTrue([[ex name] isEqualToString:@"IllegalArgumentException"], @"Expected an illegal argument exception... Got instead: %@", [ex name]);
+		return;
+	}
+	STFail(@"Exception was not thrown when I tried to replace a child on a parent with no children");
+    return;
+}
+
+-(void) test22ReplaceChildrenAtIndex
+{
+	CommonTree *parent1 = [CommonTree newTree];
+	CommonTree *child1 = [CommonTree newTreeWithTokenType:TokenTypeUP];
+	[parent1 addChild:child1];
+	CommonTree *parent2 = [CommonTree newTree];
+	CommonTree *child2 = [CommonTree newTreeWithTokenType:TokenTypeDOWN];
+	child2.token.text = @"<DOWN>";
+	[parent2 addChild:child2];
+	
+	[parent2 replaceChildrenFrom:0 To:0 With:parent1];
+	
+	STAssertEquals([parent2 getChild:0], child1, @"Child for parent 2 should have been from parent 1");
+    return;
+}
+
+-(void) test23ReplaceChildrenAtIndexWithChild
+{
+	CommonTree *replacement = [CommonTree newTreeWithTokenType:TokenTypeUP];
+	replacement.token.text = @"<UP>";
+	CommonTree *parent = [CommonTree newTree];
+	CommonTree *child = [CommonTree newTreeWithTokenType:TokenTypeDOWN];
+	child.token.text = @"<DOWN>";
+	[parent addChild:child];
+	
+	[parent replaceChildrenFrom:0 To:0 With:replacement];
+	
+	STAssertTrue([parent getChild:0] == replacement, @"Children do not match");
+    return;
+}
+
+-(void) test24ReplacechildrenAtIndexWithLessChildren
+{
+	CommonTree *parent1 = [CommonTree newTree];
+	CommonTree *child1 = [CommonTree newTreeWithTokenType:TokenTypeUP];
+	[parent1 addChild:child1];
+	
+	CommonTree *parent2 = [CommonTree newTree];
+	
+	CommonTree *child2 = [CommonTree newTreeWithTokenType:TokenTypeEOF];
+	[parent2 addChild:child2];
+	
+	CommonTree *child3 = [CommonTree newTreeWithTokenType:TokenTypeDOWN];
+	child2.token.text = @"<DOWN>";
+	[parent2 addChild:child3];
+	
+	[parent2 replaceChildrenFrom:0 To:1 With:parent1];
+	STAssertEquals((NSInteger)[parent2 getChildCount], (NSInteger)1, @"Should have one child but has %d", [parent2 getChildCount]);
+	STAssertEquals([parent2 getChild:0], child1, @"Child for parent 2 should have been from parent 1");
+    return;
+}
+
+-(void) test25ReplacechildrenAtIndexWithMoreChildren
+{
+	CommonTree *parent1 = [CommonTree newTree];
+	CommonTree *child1 = [CommonTree newTreeWithTokenType:TokenTypeUP];
+	[parent1 addChild:child1];
+	CommonTree *child2 = [CommonTree newTreeWithTokenType:TokenTypeEOF];
+	[parent1 addChild:child2];
+	
+	CommonTree *parent2 = [CommonTree newTree];
+	
+	CommonTree *child3 = [CommonTree newTreeWithTokenType:TokenTypeDOWN];
+	child2.token.text = @"<DOWN>";
+	[parent2 addChild:child3];
+	
+	[parent2 replaceChildrenFrom:0 To:0 With:parent1];
+	STAssertEquals((NSInteger)[parent2 getChildCount], (NSInteger)2, @"Should have one child but has %d", [parent2 getChildCount]);
+	STAssertEquals([parent2 getChild:0], child1, @"Child for parent 2 should have been from parent 1");
+	STAssertEquals([parent2 getChild:1], child2, @"An extra child (child2) should be in the children collection");
+    return;
+}
+
+@end
diff --git a/runtime/ObjC/README b/runtime/ObjC/README
new file mode 100644
index 0000000..22ad627
--- /dev/null
+++ b/runtime/ObjC/README
@@ -0,0 +1,31 @@
+ANTLR version 3 supports target language generation for the lexical
+analyzer and parsers. Objective C was supported previously but had not
+been brought up to date for some time. This release is built on the work
+by Kay Roepke, Ian Michell and Alan Condit.
+
+The project is currently working sufficiently for me to use it in compiling
+my grammar and tree walker. I am sure that it still has some bugs but I have
+fixed all of the bugs that I have found so far.
+
+The project consists of an Objective-C runtime framework that must be
+installed in /Library/Frameworks.
+
+It also requires the installation of the String Template files to
+support the target language code generation. Hopefully, at some point
+they will be incorporated into the ANTLR release code, so that the
+individual user doesn't have to do anything but load the framework into
+the proper location. However, for now you need to create an ObjC
+directory in antlr-3.2/tool/src/main/resources/org/antlr/codegen/templates
+and then copy the ObjC ".stg" files to 
+antlr-3.2/tool/src/main/resources/org/antlr/codegen/templates/ObjC/*.
+
+There is also a java file ObjCTarget.java that goes in <
+antlr-3.2/tool/src/main/java/org/antlr/codegen/ObjCTarget/Java>.
+
+If you are using Antlr3.3 the code from here is included with the Antlr tarball. You just need
+to copy the ANTLR.framework to /Library/Frameworks.
+
+antlr3.4.1
+Feb. 22, 2012 -- I just uploaded a new binary(zipped) copy of the ANTLR.framework and antlr3.4.jar 
+that has all of the renaming changes that I did to match the Java source names and 
+fixes to the DFA transitions. This is antlr-3.4.1.jar.
\ No newline at end of file
diff --git a/antlr-3.4/runtime/Perl5/.p4ignore b/runtime/Perl5/.p4ignore
similarity index 100%
rename from antlr-3.4/runtime/Perl5/.p4ignore
rename to runtime/Perl5/.p4ignore
diff --git a/antlr-3.4/runtime/Perl5/Build.PL b/runtime/Perl5/Build.PL
similarity index 100%
rename from antlr-3.4/runtime/Perl5/Build.PL
rename to runtime/Perl5/Build.PL
diff --git a/antlr-3.4/runtime/Perl5/Changes b/runtime/Perl5/Changes
similarity index 100%
rename from antlr-3.4/runtime/Perl5/Changes
rename to runtime/Perl5/Changes
diff --git a/antlr-3.4/runtime/Perl5/INSTALL b/runtime/Perl5/INSTALL
similarity index 100%
rename from antlr-3.4/runtime/Perl5/INSTALL
rename to runtime/Perl5/INSTALL
diff --git a/antlr-3.4/runtime/Perl5/MANIFEST b/runtime/Perl5/MANIFEST
similarity index 100%
rename from antlr-3.4/runtime/Perl5/MANIFEST
rename to runtime/Perl5/MANIFEST
diff --git a/antlr-3.4/runtime/Perl5/MANIFEST.SKIP b/runtime/Perl5/MANIFEST.SKIP
similarity index 100%
rename from antlr-3.4/runtime/Perl5/MANIFEST.SKIP
rename to runtime/Perl5/MANIFEST.SKIP
diff --git a/antlr-3.4/runtime/Perl5/Makefile.PL b/runtime/Perl5/Makefile.PL
similarity index 100%
rename from antlr-3.4/runtime/Perl5/Makefile.PL
rename to runtime/Perl5/Makefile.PL
diff --git a/antlr-3.4/runtime/Perl5/README b/runtime/Perl5/README
similarity index 100%
rename from antlr-3.4/runtime/Perl5/README
rename to runtime/Perl5/README
diff --git a/antlr-3.4/runtime/Perl5/docs/design.pod b/runtime/Perl5/docs/design.pod
similarity index 100%
rename from antlr-3.4/runtime/Perl5/docs/design.pod
rename to runtime/Perl5/docs/design.pod
diff --git a/antlr-3.4/runtime/Perl5/examples/README b/runtime/Perl5/examples/README
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/README
rename to runtime/Perl5/examples/README
diff --git a/antlr-3.4/runtime/Perl5/examples/expr/Expr.g b/runtime/Perl5/examples/expr/Expr.g
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/expr/Expr.g
rename to runtime/Perl5/examples/expr/Expr.g
diff --git a/antlr-3.4/runtime/Perl5/examples/expr/expr.pl b/runtime/Perl5/examples/expr/expr.pl
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/expr/expr.pl
rename to runtime/Perl5/examples/expr/expr.pl
diff --git a/antlr-3.4/runtime/Perl5/examples/id/IDLexer.g b/runtime/Perl5/examples/id/IDLexer.g
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/id/IDLexer.g
rename to runtime/Perl5/examples/id/IDLexer.g
diff --git a/antlr-3.4/runtime/Perl5/examples/id/id.pl b/runtime/Perl5/examples/id/id.pl
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/id/id.pl
rename to runtime/Perl5/examples/id/id.pl
diff --git a/antlr-3.4/runtime/Perl5/examples/mexpr/MExpr.g b/runtime/Perl5/examples/mexpr/MExpr.g
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/mexpr/MExpr.g
rename to runtime/Perl5/examples/mexpr/MExpr.g
diff --git a/antlr-3.4/runtime/Perl5/examples/mexpr/mexpr.pl b/runtime/Perl5/examples/mexpr/mexpr.pl
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/mexpr/mexpr.pl
rename to runtime/Perl5/examples/mexpr/mexpr.pl
diff --git a/antlr-3.4/runtime/Perl5/examples/simplecalc/SimpleCalc.g b/runtime/Perl5/examples/simplecalc/SimpleCalc.g
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/simplecalc/SimpleCalc.g
rename to runtime/Perl5/examples/simplecalc/SimpleCalc.g
diff --git a/antlr-3.4/runtime/Perl5/examples/simplecalc/simplecalc.pl b/runtime/Perl5/examples/simplecalc/simplecalc.pl
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/simplecalc/simplecalc.pl
rename to runtime/Perl5/examples/simplecalc/simplecalc.pl
diff --git a/antlr-3.4/runtime/Perl5/examples/tweak/T.g b/runtime/Perl5/examples/tweak/T.g
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/tweak/T.g
rename to runtime/Perl5/examples/tweak/T.g
diff --git a/antlr-3.4/runtime/Perl5/examples/tweak/input b/runtime/Perl5/examples/tweak/input
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/tweak/input
rename to runtime/Perl5/examples/tweak/input
diff --git a/antlr-3.4/runtime/Perl5/examples/tweak/output b/runtime/Perl5/examples/tweak/output
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/tweak/output
rename to runtime/Perl5/examples/tweak/output
diff --git a/antlr-3.4/runtime/Perl5/examples/tweak/tweak.pl b/runtime/Perl5/examples/tweak/tweak.pl
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/tweak/tweak.pl
rename to runtime/Perl5/examples/tweak/tweak.pl
diff --git a/antlr-3.4/runtime/Perl5/examples/zero-one/T.g b/runtime/Perl5/examples/zero-one/T.g
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/zero-one/T.g
rename to runtime/Perl5/examples/zero-one/T.g
diff --git a/antlr-3.4/runtime/Perl5/examples/zero-one/t-error.pl b/runtime/Perl5/examples/zero-one/t-error.pl
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/zero-one/t-error.pl
rename to runtime/Perl5/examples/zero-one/t-error.pl
diff --git a/antlr-3.4/runtime/Perl5/examples/zero-one/t.pl b/runtime/Perl5/examples/zero-one/t.pl
similarity index 100%
rename from antlr-3.4/runtime/Perl5/examples/zero-one/t.pl
rename to runtime/Perl5/examples/zero-one/t.pl
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime.pm b/runtime/Perl5/lib/ANTLR/Runtime.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime.pm
rename to runtime/Perl5/lib/ANTLR/Runtime.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/ANTLRFileStream.pm b/runtime/Perl5/lib/ANTLR/Runtime/ANTLRFileStream.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/ANTLRFileStream.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/ANTLRFileStream.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/ANTLRStringStream.pm b/runtime/Perl5/lib/ANTLR/Runtime/ANTLRStringStream.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/ANTLRStringStream.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/ANTLRStringStream.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/BaseRecognizer.pm b/runtime/Perl5/lib/ANTLR/Runtime/BaseRecognizer.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/BaseRecognizer.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/BaseRecognizer.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/BitSet.pm b/runtime/Perl5/lib/ANTLR/Runtime/BitSet.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/BitSet.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/BitSet.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/CharStream.pm b/runtime/Perl5/lib/ANTLR/Runtime/CharStream.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/CharStream.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/CharStream.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/CharStreamState.pm b/runtime/Perl5/lib/ANTLR/Runtime/CharStreamState.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/CharStreamState.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/CharStreamState.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/CommonToken.pm b/runtime/Perl5/lib/ANTLR/Runtime/CommonToken.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/CommonToken.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/CommonToken.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/CommonTokenStream.pm b/runtime/Perl5/lib/ANTLR/Runtime/CommonTokenStream.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/CommonTokenStream.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/CommonTokenStream.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/DFA.pm b/runtime/Perl5/lib/ANTLR/Runtime/DFA.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/DFA.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/DFA.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/EarlyExitException.pm b/runtime/Perl5/lib/ANTLR/Runtime/EarlyExitException.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/EarlyExitException.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/EarlyExitException.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/Exception.pm b/runtime/Perl5/lib/ANTLR/Runtime/Exception.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/Exception.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/Exception.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/IntStream.pm b/runtime/Perl5/lib/ANTLR/Runtime/IntStream.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/IntStream.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/IntStream.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/Lexer.pm b/runtime/Perl5/lib/ANTLR/Runtime/Lexer.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/Lexer.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/Lexer.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/MismatchedSetException.pm b/runtime/Perl5/lib/ANTLR/Runtime/MismatchedSetException.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/MismatchedSetException.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/MismatchedSetException.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/MismatchedTokenException.pm b/runtime/Perl5/lib/ANTLR/Runtime/MismatchedTokenException.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/MismatchedTokenException.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/MismatchedTokenException.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/MissingTokenException.pm b/runtime/Perl5/lib/ANTLR/Runtime/MissingTokenException.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/MissingTokenException.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/MissingTokenException.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/NoViableAltException.pm b/runtime/Perl5/lib/ANTLR/Runtime/NoViableAltException.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/NoViableAltException.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/NoViableAltException.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/Parser.pm b/runtime/Perl5/lib/ANTLR/Runtime/Parser.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/Parser.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/Parser.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/ParserRuleReturnScope.pm b/runtime/Perl5/lib/ANTLR/Runtime/ParserRuleReturnScope.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/ParserRuleReturnScope.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/ParserRuleReturnScope.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/RecognitionException.pm b/runtime/Perl5/lib/ANTLR/Runtime/RecognitionException.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/RecognitionException.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/RecognitionException.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/RecognizerSharedState.pm b/runtime/Perl5/lib/ANTLR/Runtime/RecognizerSharedState.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/RecognizerSharedState.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/RecognizerSharedState.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/RuleReturnScope.pm b/runtime/Perl5/lib/ANTLR/Runtime/RuleReturnScope.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/RuleReturnScope.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/RuleReturnScope.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/Stream.pm b/runtime/Perl5/lib/ANTLR/Runtime/Stream.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/Stream.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/Stream.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/Token.pm b/runtime/Perl5/lib/ANTLR/Runtime/Token.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/Token.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/Token.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/TokenSource.pm b/runtime/Perl5/lib/ANTLR/Runtime/TokenSource.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/TokenSource.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/TokenSource.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/TokenStream.pm b/runtime/Perl5/lib/ANTLR/Runtime/TokenStream.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/TokenStream.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/TokenStream.pm
diff --git a/antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/UnwantedTokenException.pm b/runtime/Perl5/lib/ANTLR/Runtime/UnwantedTokenException.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/lib/ANTLR/Runtime/UnwantedTokenException.pm
rename to runtime/Perl5/lib/ANTLR/Runtime/UnwantedTokenException.pm
diff --git a/antlr-3.4/runtime/Perl5/port.yml b/runtime/Perl5/port.yml
similarity index 100%
rename from antlr-3.4/runtime/Perl5/port.yml
rename to runtime/Perl5/port.yml
diff --git a/antlr-3.4/runtime/Perl5/t/author/api.t b/runtime/Perl5/t/author/api.t
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/author/api.t
rename to runtime/Perl5/t/author/api.t
diff --git a/antlr-3.4/runtime/Perl5/t/author/perlcritic.t b/runtime/Perl5/t/author/perlcritic.t
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/author/perlcritic.t
rename to runtime/Perl5/t/author/perlcritic.t
diff --git a/antlr-3.4/runtime/Perl5/t/author/perlcriticrc b/runtime/Perl5/t/author/perlcriticrc
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/author/perlcriticrc
rename to runtime/Perl5/t/author/perlcriticrc
diff --git a/antlr-3.4/runtime/Perl5/t/author/pod-coverage.t b/runtime/Perl5/t/author/pod-coverage.t
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/author/pod-coverage.t
rename to runtime/Perl5/t/author/pod-coverage.t
diff --git a/antlr-3.4/runtime/Perl5/t/author/pod.t b/runtime/Perl5/t/author/pod.t
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/author/pod.t
rename to runtime/Perl5/t/author/pod.t
diff --git a/antlr-3.4/runtime/Perl5/t/classes.t b/runtime/Perl5/t/classes.t
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/classes.t
rename to runtime/Perl5/t/classes.t
diff --git a/antlr-3.4/runtime/Perl5/t/classes/Test/ANTLR/Runtime/ANTLRStringStream.pm b/runtime/Perl5/t/classes/Test/ANTLR/Runtime/ANTLRStringStream.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/classes/Test/ANTLR/Runtime/ANTLRStringStream.pm
rename to runtime/Perl5/t/classes/Test/ANTLR/Runtime/ANTLRStringStream.pm
diff --git a/antlr-3.4/runtime/Perl5/t/classes/Test/ANTLR/Runtime/BitSet.pm b/runtime/Perl5/t/classes/Test/ANTLR/Runtime/BitSet.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/classes/Test/ANTLR/Runtime/BitSet.pm
rename to runtime/Perl5/t/classes/Test/ANTLR/Runtime/BitSet.pm
diff --git a/antlr-3.4/runtime/Perl5/t/classes/Test/ANTLR/Runtime/CommonToken.pm b/runtime/Perl5/t/classes/Test/ANTLR/Runtime/CommonToken.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/classes/Test/ANTLR/Runtime/CommonToken.pm
rename to runtime/Perl5/t/classes/Test/ANTLR/Runtime/CommonToken.pm
diff --git a/antlr-3.4/runtime/Perl5/t/classes/Test/ANTLR/Runtime/Exception.pm b/runtime/Perl5/t/classes/Test/ANTLR/Runtime/Exception.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/classes/Test/ANTLR/Runtime/Exception.pm
rename to runtime/Perl5/t/classes/Test/ANTLR/Runtime/Exception.pm
diff --git a/antlr-3.4/runtime/Perl5/t/classes/Test/ANTLR/Runtime/Lexer.pm b/runtime/Perl5/t/classes/Test/ANTLR/Runtime/Lexer.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/classes/Test/ANTLR/Runtime/Lexer.pm
rename to runtime/Perl5/t/classes/Test/ANTLR/Runtime/Lexer.pm
diff --git a/antlr-3.4/runtime/Perl5/t/examples/expr.t b/runtime/Perl5/t/examples/expr.t
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/examples/expr.t
rename to runtime/Perl5/t/examples/expr.t
diff --git a/antlr-3.4/runtime/Perl5/t/examples/fig.t b/runtime/Perl5/t/examples/fig.t
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/examples/fig.t
rename to runtime/Perl5/t/examples/fig.t
diff --git a/antlr-3.4/runtime/Perl5/t/examples/simplecalc.t b/runtime/Perl5/t/examples/simplecalc.t
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/examples/simplecalc.t
rename to runtime/Perl5/t/examples/simplecalc.t
diff --git a/antlr-3.4/runtime/Perl5/t/lexer.t b/runtime/Perl5/t/lexer.t
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/lexer.t
rename to runtime/Perl5/t/lexer.t
diff --git a/antlr-3.4/runtime/Perl5/t/lib/ANTLR/Runtime/Test.pm b/runtime/Perl5/t/lib/ANTLR/Runtime/Test.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/lib/ANTLR/Runtime/Test.pm
rename to runtime/Perl5/t/lib/ANTLR/Runtime/Test.pm
diff --git a/antlr-3.4/runtime/Perl5/t/lib/My/Test/Class.pm b/runtime/Perl5/t/lib/My/Test/Class.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/lib/My/Test/Class.pm
rename to runtime/Perl5/t/lib/My/Test/Class.pm
diff --git a/antlr-3.4/runtime/Perl5/t/lib/My/Test/Class/Load.pm b/runtime/Perl5/t/lib/My/Test/Class/Load.pm
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/lib/My/Test/Class/Load.pm
rename to runtime/Perl5/t/lib/My/Test/Class/Load.pm
diff --git a/antlr-3.4/runtime/Perl5/t/version.t b/runtime/Perl5/t/version.t
similarity index 100%
rename from antlr-3.4/runtime/Perl5/t/version.t
rename to runtime/Perl5/t/version.t
diff --git a/antlr-3.4/runtime/Perl5/tools/antlr.bat b/runtime/Perl5/tools/antlr.bat
similarity index 100%
rename from antlr-3.4/runtime/Perl5/tools/antlr.bat
rename to runtime/Perl5/tools/antlr.bat
diff --git a/antlr-3.4/runtime/Perl5/tools/antlr.sh b/runtime/Perl5/tools/antlr.sh
similarity index 100%
rename from antlr-3.4/runtime/Perl5/tools/antlr.sh
rename to runtime/Perl5/tools/antlr.sh
diff --git a/antlr-3.4/runtime/Perl5/tools/port.pl b/runtime/Perl5/tools/port.pl
similarity index 100%
rename from antlr-3.4/runtime/Perl5/tools/port.pl
rename to runtime/Perl5/tools/port.pl
diff --git a/antlr-3.4/runtime/Python/AUTHORS b/runtime/Python/AUTHORS
similarity index 100%
rename from antlr-3.4/runtime/Python/AUTHORS
rename to runtime/Python/AUTHORS
diff --git a/antlr-3.4/runtime/Python/ChangeLog b/runtime/Python/ChangeLog
similarity index 100%
rename from antlr-3.4/runtime/Python/ChangeLog
rename to runtime/Python/ChangeLog
diff --git a/antlr-3.4/runtime/Python/LICENSE b/runtime/Python/LICENSE
similarity index 100%
rename from antlr-3.4/runtime/Python/LICENSE
rename to runtime/Python/LICENSE
diff --git a/antlr-3.4/runtime/Python/MANIFEST.in b/runtime/Python/MANIFEST.in
similarity index 100%
rename from antlr-3.4/runtime/Python/MANIFEST.in
rename to runtime/Python/MANIFEST.in
diff --git a/antlr-3.4/runtime/Python/README b/runtime/Python/README
similarity index 100%
rename from antlr-3.4/runtime/Python/README
rename to runtime/Python/README
diff --git a/antlr-3.4/runtime/Python/TODO b/runtime/Python/TODO
similarity index 100%
rename from antlr-3.4/runtime/Python/TODO
rename to runtime/Python/TODO
diff --git a/antlr-3.4/runtime/Python/antlr3/__init__.py b/runtime/Python/antlr3/__init__.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/__init__.py
rename to runtime/Python/antlr3/__init__.py
diff --git a/antlr-3.4/runtime/Python/antlr3/compat.py b/runtime/Python/antlr3/compat.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/compat.py
rename to runtime/Python/antlr3/compat.py
diff --git a/antlr-3.4/runtime/Python/antlr3/constants.py b/runtime/Python/antlr3/constants.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/constants.py
rename to runtime/Python/antlr3/constants.py
diff --git a/antlr-3.4/runtime/Python/antlr3/debug.py b/runtime/Python/antlr3/debug.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/debug.py
rename to runtime/Python/antlr3/debug.py
diff --git a/antlr-3.4/runtime/Python/antlr3/dfa.py b/runtime/Python/antlr3/dfa.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/dfa.py
rename to runtime/Python/antlr3/dfa.py
diff --git a/runtime/Python/antlr3/dottreegen.py b/runtime/Python/antlr3/dottreegen.py
new file mode 100644
index 0000000..41415b1
--- /dev/null
+++ b/runtime/Python/antlr3/dottreegen.py
@@ -0,0 +1,210 @@
+""" @package antlr3.dottreegenerator
+@brief ANTLR3 runtime package, tree module
+
+This module contains all support classes for AST construction and tree parsers.
+
+"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+# lot's of docstrings are missing, don't complain for now...
+# pylint: disable-msg=C0111
+
+from antlr3.tree import CommonTreeAdaptor
+import stringtemplate3
+
+class DOTTreeGenerator(object):
+    """
+    A utility class to generate DOT diagrams (graphviz) from
+    arbitrary trees.  You can pass in your own templates and
+    can pass in any kind of tree or use Tree interface method.
+    """
+
+    _treeST = stringtemplate3.StringTemplate(
+        template=(
+        "digraph {\n" +
+        "  ordering=out;\n" +
+        "  ranksep=.4;\n" +
+        "  node [shape=plaintext, fixedsize=true, fontsize=11, fontname=\"Courier\",\n" +
+        "        width=.25, height=.25];\n" +
+        "  edge [arrowsize=.5]\n" +
+        "  $nodes$\n" +
+        "  $edges$\n" +
+        "}\n")
+        )
+
+    _nodeST = stringtemplate3.StringTemplate(
+        template="$name$ [label=\"$text$\"];\n"
+        )
+
+    _edgeST = stringtemplate3.StringTemplate(
+        template="$parent$ -> $child$ // \"$parentText$\" -> \"$childText$\"\n"
+        )
+
+    def __init__(self):
+        ## Track node to number mapping so we can get proper node name back
+        self.nodeToNumberMap = {}
+
+        ## Track node number so we can get unique node names
+        self.nodeNumber = 0
+
+
+    def toDOT(self, tree, adaptor=None, treeST=_treeST, edgeST=_edgeST):
+        if adaptor is None:
+            adaptor = CommonTreeAdaptor()
+
+        treeST = treeST.getInstanceOf()
+
+        self.nodeNumber = 0
+        self.toDOTDefineNodes(tree, adaptor, treeST)
+
+        self.nodeNumber = 0
+        self.toDOTDefineEdges(tree, adaptor, treeST, edgeST)
+        return treeST
+
+
+    def toDOTDefineNodes(self, tree, adaptor, treeST, knownNodes=None):
+        if knownNodes is None:
+            knownNodes = set()
+
+        if tree is None:
+            return
+
+        n = adaptor.getChildCount(tree)
+        if n == 0:
+            # must have already dumped as child from previous
+            # invocation; do nothing
+            return
+
+        # define parent node
+        number = self.getNodeNumber(tree)
+        if number not in knownNodes:
+            parentNodeST = self.getNodeST(adaptor, tree)
+            treeST.setAttribute("nodes", parentNodeST)
+            knownNodes.add(number)
+
+        # for each child, do a "<unique-name> [label=text]" node def
+        for i in range(n):
+            child = adaptor.getChild(tree, i)
+            
+            number = self.getNodeNumber(child)
+            if number not in knownNodes:
+                nodeST = self.getNodeST(adaptor, child)
+                treeST.setAttribute("nodes", nodeST)
+                knownNodes.add(number)
+
+            self.toDOTDefineNodes(child, adaptor, treeST, knownNodes)
+
+
+    def toDOTDefineEdges(self, tree, adaptor, treeST, edgeST):
+        if tree is None:
+            return
+
+        n = adaptor.getChildCount(tree)
+        if n == 0:
+            # must have already dumped as child from previous
+            # invocation; do nothing
+            return
+
+        parentName = "n%d" % self.getNodeNumber(tree)
+
+        # for each child, do a parent -> child edge using unique node names
+        parentText = adaptor.getText(tree)
+        for i in range(n):
+            child = adaptor.getChild(tree, i)
+            childText = adaptor.getText(child)
+            childName = "n%d" % self.getNodeNumber(child)
+            edgeST = edgeST.getInstanceOf()
+            edgeST.setAttribute("parent", parentName)
+            edgeST.setAttribute("child", childName)
+            edgeST.setAttribute("parentText", parentText)
+            edgeST.setAttribute("childText", childText)
+            treeST.setAttribute("edges", edgeST)
+            self.toDOTDefineEdges(child, adaptor, treeST, edgeST)
+
+
+    def getNodeST(self, adaptor, t):
+        text = adaptor.getText(t)
+        nodeST = self._nodeST.getInstanceOf()
+        uniqueName = "n%d" % self.getNodeNumber(t)
+        nodeST.setAttribute("name", uniqueName)
+        if text is not None:
+            text = text.replace('"', r'\"')
+        nodeST.setAttribute("text", text)
+        return nodeST
+
+
+    def getNodeNumber(self, t):
+        try:
+            return self.nodeToNumberMap[t]
+        except KeyError:
+            self.nodeToNumberMap[t] = self.nodeNumber
+            self.nodeNumber += 1
+            return self.nodeNumber - 1
+
+
+def toDOT(tree, adaptor=None, treeST=DOTTreeGenerator._treeST, edgeST=DOTTreeGenerator._edgeST):
+    """
+    Generate DOT (graphviz) for a whole tree not just a node.
+    For example, 3+4*5 should generate:
+
+    digraph {
+        node [shape=plaintext, fixedsize=true, fontsize=11, fontname="Courier",
+            width=.4, height=.2];
+        edge [arrowsize=.7]
+        "+"->3
+        "+"->"*"
+        "*"->4
+        "*"->5
+    }
+
+    Return the ST not a string in case people want to alter.
+
+    Takes a Tree interface object.
+
+    Example of invokation:
+
+        import antlr3
+        import antlr3.extras
+
+        input = antlr3.ANTLRInputStream(sys.stdin)
+        lex = TLexer(input)
+        tokens = antlr3.CommonTokenStream(lex)
+        parser = TParser(tokens)
+        tree = parser.e().tree
+        print tree.toStringTree()
+        st = antlr3.extras.toDOT(t)
+        print st
+        
+    """
+
+    gen = DOTTreeGenerator()
+    return gen.toDOT(tree, adaptor, treeST, edgeST)
diff --git a/antlr-3.4/runtime/Python/antlr3/exceptions.py b/runtime/Python/antlr3/exceptions.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/exceptions.py
rename to runtime/Python/antlr3/exceptions.py
diff --git a/antlr-3.4/runtime/Python/antlr3/extras.py b/runtime/Python/antlr3/extras.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/extras.py
rename to runtime/Python/antlr3/extras.py
diff --git a/antlr-3.4/runtime/Python/antlr3/main.py b/runtime/Python/antlr3/main.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/main.py
rename to runtime/Python/antlr3/main.py
diff --git a/antlr-3.4/runtime/Python/antlr3/recognizers.py b/runtime/Python/antlr3/recognizers.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/recognizers.py
rename to runtime/Python/antlr3/recognizers.py
diff --git a/runtime/Python/antlr3/streams.py b/runtime/Python/antlr3/streams.py
new file mode 100644
index 0000000..84016bd
--- /dev/null
+++ b/runtime/Python/antlr3/streams.py
@@ -0,0 +1,1522 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+import codecs
+from StringIO import StringIO
+
+from antlr3.constants import DEFAULT_CHANNEL, EOF
+from antlr3.tokens import Token, CommonToken
+
+
+############################################################################
+#
+# basic interfaces
+#   IntStream
+#    +- CharStream
+#    \- TokenStream
+#
+# subclasses must implemented all methods
+#
+############################################################################
+
+class IntStream(object):
+    """
+    @brief Base interface for streams of integer values.
+
+    A simple stream of integers used when all I care about is the char
+    or token type sequence (such as interpretation).
+    """
+
+    def consume(self):
+        raise NotImplementedError
+
+
+    def LA(self, i):
+        """Get int at current input pointer + i ahead where i=1 is next int.
+
+        Negative indexes are allowed.  LA(-1) is previous token (token
+	just matched).  LA(-i) where i is before first token should
+	yield -1, invalid char / EOF.
+	"""
+
+        raise NotImplementedError
+
+
+    def mark(self):
+        """
+        Tell the stream to start buffering if it hasn't already.  Return
+        current input position, index(), or some other marker so that
+        when passed to rewind() you get back to the same spot.
+        rewind(mark()) should not affect the input cursor.  The Lexer
+        track line/col info as well as input index so its markers are
+        not pure input indexes.  Same for tree node streams.
+        """
+
+        raise NotImplementedError
+
+
+    def index(self):
+        """
+        Return the current input symbol index 0..n where n indicates the
+        last symbol has been read.  The index is the symbol about to be
+        read not the most recently read symbol.
+        """
+
+        raise NotImplementedError
+
+
+    def rewind(self, marker=None):
+        """
+        Reset the stream so that next call to index would return marker.
+        The marker will usually be index() but it doesn't have to be.  It's
+        just a marker to indicate what state the stream was in.  This is
+        essentially calling release() and seek().  If there are markers
+        created after this marker argument, this routine must unroll them
+        like a stack.  Assume the state the stream was in when this marker
+        was created.
+
+        If marker is None:
+        Rewind to the input position of the last marker.
+        Used currently only after a cyclic DFA and just
+        before starting a sem/syn predicate to get the
+        input position back to the start of the decision.
+        Do not "pop" the marker off the state.  mark(i)
+        and rewind(i) should balance still. It is
+        like invoking rewind(last marker) but it should not "pop"
+        the marker off.  It's like seek(last marker's input position).
+	"""
+
+        raise NotImplementedError
+
+
+    def release(self, marker=None):
+        """
+        You may want to commit to a backtrack but don't want to force the
+        stream to keep bookkeeping objects around for a marker that is
+        no longer necessary.  This will have the same behavior as
+        rewind() except it releases resources without the backward seek.
+        This must throw away resources for all markers back to the marker
+        argument.  So if you're nested 5 levels of mark(), and then release(2)
+        you have to release resources for depths 2..5.
+	"""
+
+        raise NotImplementedError
+
+
+    def seek(self, index):
+        """
+        Set the input cursor to the position indicated by index.  This is
+        normally used to seek ahead in the input stream.  No buffering is
+        required to do this unless you know your stream will use seek to
+        move backwards such as when backtracking.
+
+        This is different from rewind in its multi-directional
+        requirement and in that its argument is strictly an input cursor
+        (index).
+
+        For char streams, seeking forward must update the stream state such
+        as line number.  For seeking backwards, you will be presumably
+        backtracking using the mark/rewind mechanism that restores state and
+        so this method does not need to update state when seeking backwards.
+
+        Currently, this method is only used for efficient backtracking using
+        memoization, but in the future it may be used for incremental parsing.
+
+        The index is 0..n-1.  A seek to position i means that LA(1) will
+        return the ith symbol.  So, seeking to 0 means LA(1) will return the
+        first element in the stream.
+        """
+
+        raise NotImplementedError
+
+
+    def size(self):
+        """
+        Only makes sense for streams that buffer everything up probably, but
+        might be useful to display the entire stream or for testing.  This
+        value includes a single EOF.
+	"""
+
+        raise NotImplementedError
+
+
+    def getSourceName(self):
+        """
+        Where are you getting symbols from?  Normally, implementations will
+        pass the buck all the way to the lexer who can ask its input stream
+        for the file name or whatever.
+        """
+
+        raise NotImplementedError
+
+
+class CharStream(IntStream):
+    """
+    @brief A source of characters for an ANTLR lexer.
+
+    This is an abstract class that must be implemented by a subclass.
+
+    """
+
+    # pylint does not realize that this is an interface, too
+    #pylint: disable-msg=W0223
+
+    EOF = -1
+
+
+    def substring(self, start, stop):
+        """
+        For infinite streams, you don't need this; primarily I'm providing
+        a useful interface for action code.  Just make sure actions don't
+        use this on streams that don't support it.
+        """
+
+        raise NotImplementedError
+
+
+    def LT(self, i):
+        """
+        Get the ith character of lookahead.  This is the same usually as
+        LA(i).  This will be used for labels in the generated
+        lexer code.  I'd prefer to return a char here type-wise, but it's
+        probably better to be 32-bit clean and be consistent with LA.
+        """
+
+        raise NotImplementedError
+
+
+    def getLine(self):
+        """ANTLR tracks the line information automatically"""
+
+        raise NotImplementedError
+
+
+    def setLine(self, line):
+        """
+        Because this stream can rewind, we need to be able to reset the line
+        """
+
+        raise NotImplementedError
+
+
+    def getCharPositionInLine(self):
+        """
+        The index of the character relative to the beginning of the line 0..n-1
+        """
+
+        raise NotImplementedError
+
+
+    def setCharPositionInLine(self, pos):
+        raise NotImplementedError
+
+
+class TokenStream(IntStream):
+    """
+
+    @brief A stream of tokens accessing tokens from a TokenSource
+
+    This is an abstract class that must be implemented by a subclass.
+
+    """
+
+    # pylint does not realize that this is an interface, too
+    #pylint: disable-msg=W0223
+
+    def LT(self, k):
+        """
+        Get Token at current input pointer + i ahead where i=1 is next Token.
+        i<0 indicates tokens in the past.  So -1 is previous token and -2 is
+        two tokens ago. LT(0) is undefined.  For i>=n, return Token.EOFToken.
+        Return null for LT(0) and any index that results in an absolute address
+        that is negative.
+	"""
+
+        raise NotImplementedError
+
+
+    def range(self):
+        """
+        How far ahead has the stream been asked to look?  The return
+        value is a valid index from 0..n-1.
+        """
+
+        raise NotImplementedError
+
+
+    def get(self, i):
+        """
+        Get a token at an absolute index i; 0..n-1.  This is really only
+        needed for profiling and debugging and token stream rewriting.
+        If you don't want to buffer up tokens, then this method makes no
+        sense for you.  Naturally you can't use the rewrite stream feature.
+        I believe DebugTokenStream can easily be altered to not use
+        this method, removing the dependency.
+        """
+
+        raise NotImplementedError
+
+
+    def getTokenSource(self):
+        """
+        Where is this stream pulling tokens from?  This is not the name, but
+        the object that provides Token objects.
+	"""
+
+        raise NotImplementedError
+
+
+    def toString(self, start=None, stop=None):
+        """
+        Return the text of all tokens from start to stop, inclusive.
+        If the stream does not buffer all the tokens then it can just
+        return "" or null;  Users should not access $ruleLabel.text in
+        an action of course in that case.
+
+        Because the user is not required to use a token with an index stored
+        in it, we must provide a means for two token objects themselves to
+        indicate the start/end location.  Most often this will just delegate
+        to the other toString(int,int).  This is also parallel with
+        the TreeNodeStream.toString(Object,Object).
+	"""
+
+        raise NotImplementedError
+
+
+############################################################################
+#
+# character streams for use in lexers
+#   CharStream
+#   \- ANTLRStringStream
+#
+############################################################################
+
+
+class ANTLRStringStream(CharStream):
+    """
+    @brief CharStream that pull data from a unicode string.
+
+    A pretty quick CharStream that pulls all data from an array
+    directly.  Every method call counts in the lexer.
+
+    """
+
+
+    def __init__(self, data):
+        """
+        @param data This should be a unicode string holding the data you want
+           to parse. If you pass in a byte string, the Lexer will choke on
+           non-ascii data.
+
+        """
+
+        CharStream.__init__(self)
+
+  	# The data being scanned
+        self.strdata = unicode(data)
+        self.data = [ord(c) for c in self.strdata]
+
+	# How many characters are actually in the buffer
+        self.n = len(data)
+
+ 	# 0..n-1 index into string of next char
+        self.p = 0
+
+	# line number 1..n within the input
+        self.line = 1
+
+ 	# The index of the character relative to the beginning of the
+        # line 0..n-1
+        self.charPositionInLine = 0
+
+	# A list of CharStreamState objects that tracks the stream state
+        # values line, charPositionInLine, and p that can change as you
+        # move through the input stream.  Indexed from 0..markDepth-1.
+        self._markers = [ ]
+        self.lastMarker = None
+        self.markDepth = 0
+
+        # What is name or source of this char stream?
+        self.name = None
+
+
+    def reset(self):
+        """
+        Reset the stream so that it's in the same state it was
+        when the object was created *except* the data array is not
+        touched.
+        """
+
+        self.p = 0
+        self.line = 1
+        self.charPositionInLine = 0
+        self._markers = [ ]
+
+
+    def consume(self):
+        try:
+            if self.data[self.p] == 10: # \n
+                self.line += 1
+                self.charPositionInLine = 0
+            else:
+                self.charPositionInLine += 1
+
+            self.p += 1
+
+        except IndexError:
+            # happend when we reached EOF and self.data[self.p] fails
+            # just do nothing
+            pass
+
+
+
+    def LA(self, i):
+        if i == 0:
+            return 0 # undefined
+
+        if i < 0:
+            i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
+
+        try:
+            return self.data[self.p+i-1]
+        except IndexError:
+            return EOF
+
+
+
+    def LT(self, i):
+        if i == 0:
+            return 0 # undefined
+
+        if i < 0:
+            i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
+
+        try:
+            return self.strdata[self.p+i-1]
+        except IndexError:
+            return EOF
+
+
+    def index(self):
+        """
+        Return the current input symbol index 0..n where n indicates the
+        last symbol has been read.  The index is the index of char to
+        be returned from LA(1).
+        """
+
+        return self.p
+
+
+    def size(self):
+        return self.n
+
+
+    def mark(self):
+        state = (self.p, self.line, self.charPositionInLine)
+        try:
+            self._markers[self.markDepth] = state
+        except IndexError:
+            self._markers.append(state)
+        self.markDepth += 1
+
+        self.lastMarker = self.markDepth
+
+        return self.lastMarker
+
+
+    def rewind(self, marker=None):
+        if marker is None:
+            marker = self.lastMarker
+
+        p, line, charPositionInLine = self._markers[marker-1]
+
+        self.seek(p)
+        self.line = line
+        self.charPositionInLine = charPositionInLine
+        self.release(marker)
+
+
+    def release(self, marker=None):
+        if marker is None:
+            marker = self.lastMarker
+
+        self.markDepth = marker-1
+
+
+    def seek(self, index):
+        """
+        consume() ahead until p==index; can't just set p=index as we must
+        update line and charPositionInLine.
+        """
+
+        if index <= self.p:
+            self.p = index # just jump; don't update stream state (line, ...)
+            return
+
+        # seek forward, consume until p hits index
+        while self.p < index:
+            self.consume()
+
+
+    def substring(self, start, stop):
+        return self.strdata[start:stop+1]
+
+
+    def getLine(self):
+        """Using setter/getter methods is deprecated. Use o.line instead."""
+        return self.line
+
+
+    def getCharPositionInLine(self):
+        """
+        Using setter/getter methods is deprecated. Use o.charPositionInLine
+        instead.
+        """
+        return self.charPositionInLine
+
+
+    def setLine(self, line):
+        """Using setter/getter methods is deprecated. Use o.line instead."""
+        self.line = line
+
+
+    def setCharPositionInLine(self, pos):
+        """
+        Using setter/getter methods is deprecated. Use o.charPositionInLine
+        instead.
+        """
+        self.charPositionInLine = pos
+
+
+    def getSourceName(self):
+        return self.name
+
+
+class ANTLRFileStream(ANTLRStringStream):
+    """
+    @brief CharStream that opens a file to read the data.
+
+    This is a char buffer stream that is loaded from a file
+    all at once when you construct the object.
+    """
+
+    def __init__(self, fileName, encoding=None):
+        """
+        @param fileName The path to the file to be opened. The file will be
+           opened with mode 'rb'.
+
+        @param encoding If you set the optional encoding argument, then the
+           data will be decoded on the fly.
+
+        """
+
+        self.fileName = fileName
+
+        fp = codecs.open(fileName, 'rb', encoding)
+        try:
+            data = fp.read()
+        finally:
+            fp.close()
+
+        ANTLRStringStream.__init__(self, data)
+
+
+    def getSourceName(self):
+        """Deprecated, access o.fileName directly."""
+
+        return self.fileName
+
+
+class ANTLRInputStream(ANTLRStringStream):
+    """
+    @brief CharStream that reads data from a file-like object.
+
+    This is a char buffer stream that is loaded from a file like object
+    all at once when you construct the object.
+
+    All input is consumed from the file, but it is not closed.
+    """
+
+    def __init__(self, file, encoding=None):
+        """
+        @param file A file-like object holding your input. Only the read()
+           method must be implemented.
+
+        @param encoding If you set the optional encoding argument, then the
+           data will be decoded on the fly.
+
+        """
+
+        if encoding is not None:
+            # wrap input in a decoding reader
+            reader = codecs.lookup(encoding)[2]
+            file = reader(file)
+
+        data = file.read()
+
+        ANTLRStringStream.__init__(self, data)
+
+
+# I guess the ANTLR prefix exists only to avoid a name clash with some Java
+# mumbojumbo. A plain "StringStream" looks better to me, which should be
+# the preferred name in Python.
+StringStream = ANTLRStringStream
+FileStream = ANTLRFileStream
+InputStream = ANTLRInputStream
+
+
+############################################################################
+#
+# Token streams
+#   TokenStream
+#   +- CommonTokenStream
+#   \- TokenRewriteStream
+#
+############################################################################
+
+
+class CommonTokenStream(TokenStream):
+    """
+    @brief The most common stream of tokens
+
+    The most common stream of tokens is one where every token is buffered up
+    and tokens are prefiltered for a certain channel (the parser will only
+    see these tokens and cannot change the filter channel number during the
+    parse).
+    """
+
+    def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
+        """
+        @param tokenSource A TokenSource instance (usually a Lexer) to pull
+            the tokens from.
+
+        @param channel Skip tokens on any channel but this one; this is how we
+            skip whitespace...
+
+        """
+
+        TokenStream.__init__(self)
+
+        self.tokenSource = tokenSource
+
+	# Record every single token pulled from the source so we can reproduce
+        # chunks of it later.
+        self.tokens = []
+
+	# Map<tokentype, channel> to override some Tokens' channel numbers
+        self.channelOverrideMap = {}
+
+	# Set<tokentype>; discard any tokens with this type
+        self.discardSet = set()
+
+	# Skip tokens on any channel but this one; this is how we skip
+        # whitespace...
+        self.channel = channel
+
+	# By default, track all incoming tokens
+        self.discardOffChannelTokens = False
+
+	# The index into the tokens list of the current token (next token
+        # to consume).  p==-1 indicates that the tokens list is empty
+        self.p = -1
+
+        # Remember last marked position
+        self.lastMarker = None
+
+        # how deep have we gone?
+        self._range = -1
+
+
+    def makeEOFToken(self):
+        return self.tokenSource.makeEOFToken()
+
+
+    def setTokenSource(self, tokenSource):
+        """Reset this token stream by setting its token source."""
+
+        self.tokenSource = tokenSource
+        self.tokens = []
+        self.p = -1
+        self.channel = DEFAULT_CHANNEL
+
+
+    def reset(self):
+        self.p = 0
+        self.lastMarker = None
+
+
+    def fillBuffer(self):
+        """
+        Load all tokens from the token source and put in tokens.
+	This is done upon first LT request because you might want to
+        set some token type / channel overrides before filling buffer.
+        """
+
+
+        index = 0
+        t = self.tokenSource.nextToken()
+        while t is not None and t.type != EOF:
+            discard = False
+
+            if self.discardSet is not None and t.type in self.discardSet:
+                discard = True
+
+            elif self.discardOffChannelTokens and t.channel != self.channel:
+                discard = True
+
+            # is there a channel override for token type?
+            try:
+                overrideChannel = self.channelOverrideMap[t.type]
+
+            except KeyError:
+                # no override for this type
+                pass
+
+            else:
+                if overrideChannel == self.channel:
+                    t.channel = overrideChannel
+                else:
+                    discard = True
+
+            if not discard:
+                t.index = index
+                self.tokens.append(t)
+                index += 1
+
+            t = self.tokenSource.nextToken()
+
+        # leave p pointing at first token on channel
+        self.p = 0
+        self.p = self.skipOffTokenChannels(self.p)
+
+
+    def consume(self):
+        """
+        Move the input pointer to the next incoming token.  The stream
+        must become active with LT(1) available.  consume() simply
+        moves the input pointer so that LT(1) points at the next
+        input symbol. Consume at least one token.
+
+        Walk past any token not on the channel the parser is listening to.
+        """
+
+        if self.p < len(self.tokens):
+            self.p += 1
+
+            self.p = self.skipOffTokenChannels(self.p) # leave p on valid token
+
+
+    def skipOffTokenChannels(self, i):
+        """
+        Given a starting index, return the index of the first on-channel
+        token.
+        """
+
+        try:
+            while self.tokens[i].channel != self.channel:
+                i += 1
+        except IndexError:
+            # hit the end of token stream
+            pass
+
+        return i
+
+
+    def skipOffTokenChannelsReverse(self, i):
+        while i >= 0 and self.tokens[i].channel != self.channel:
+            i -= 1
+
+        return i
+
+
+    def setTokenTypeChannel(self, ttype, channel):
+        """
+        A simple filter mechanism whereby you can tell this token stream
+        to force all tokens of type ttype to be on channel.  For example,
+        when interpreting, we cannot exec actions so we need to tell
+        the stream to force all WS and NEWLINE to be a different, ignored
+        channel.
+	"""
+
+        self.channelOverrideMap[ttype] = channel
+
+
+    def discardTokenType(self, ttype):
+        self.discardSet.add(ttype)
+
+
+    def getTokens(self, start=None, stop=None, types=None):
+        """
+        Given a start and stop index, return a list of all tokens in
+        the token type set.  Return None if no tokens were found.  This
+        method looks at both on and off channel tokens.
+        """
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        if stop is None or stop > len(self.tokens):
+            stop = len(self.tokens)
+
+        if start is None or stop < 0:
+            start = 0
+
+        if start > stop:
+            return None
+
+        if isinstance(types, (int, long)):
+            # called with a single type, wrap into set
+            types = set([types])
+
+        filteredTokens = [
+            token for token in self.tokens[start:stop]
+            if types is None or token.type in types
+            ]
+
+        if len(filteredTokens) == 0:
+            return None
+
+        return filteredTokens
+
+
+    def LT(self, k):
+        """
+        Get the ith token from the current position 1..n where k=1 is the
+        first symbol of lookahead.
+        """
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        if k == 0:
+            return None
+
+        if k < 0:
+            return self.LB(-k)
+
+        i = self.p
+        n = 1
+        # find k good tokens
+        while n < k:
+            # skip off-channel tokens
+            i = self.skipOffTokenChannels(i+1) # leave p on valid token
+            n += 1
+
+        if i > self._range:
+            self._range = i
+
+        try:
+            return self.tokens[i]
+        except IndexError:
+            return self.makeEOFToken()
+
+
+    def LB(self, k):
+        """Look backwards k tokens on-channel tokens"""
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        if k == 0:
+            return None
+
+        if self.p - k < 0:
+            return None
+
+        i = self.p
+        n = 1
+        # find k good tokens looking backwards
+        while n <= k:
+            # skip off-channel tokens
+            i = self.skipOffTokenChannelsReverse(i-1) # leave p on valid token
+            n += 1
+
+        if i < 0:
+            return None
+
+        return self.tokens[i]
+
+
+    def get(self, i):
+        """
+        Return absolute token i; ignore which channel the tokens are on;
+        that is, count all tokens not just on-channel tokens.
+        """
+
+        return self.tokens[i]
+
+
+    def slice(self, start, stop):
+        if self.p == -1:
+            self.fillBuffer()
+
+        if start < 0 or stop < 0:
+            return None
+
+        return self.tokens[start:stop+1]
+
+
+    def LA(self, i):
+        return self.LT(i).type
+
+
+    def mark(self):
+        self.lastMarker = self.index()
+        return self.lastMarker
+
+
+    def release(self, marker=None):
+        # no resources to release
+        pass
+
+
+    def size(self):
+        return len(self.tokens)
+
+
+    def range(self):
+        return self._range
+
+
+    def index(self):
+        return self.p
+
+
+    def rewind(self, marker=None):
+        if marker is None:
+            marker = self.lastMarker
+
+        self.seek(marker)
+
+
+    def seek(self, index):
+        self.p = index
+
+
+    def getTokenSource(self):
+        return self.tokenSource
+
+
+    def getSourceName(self):
+        return self.tokenSource.getSourceName()
+
+
+    def toString(self, start=None, stop=None):
+        if self.p == -1:
+            self.fillBuffer()
+
+        if start is None:
+            start = 0
+        elif not isinstance(start, int):
+            start = start.index
+
+        if stop is None:
+            stop = len(self.tokens) - 1
+        elif not isinstance(stop, int):
+            stop = stop.index
+
+        if stop >= len(self.tokens):
+            stop = len(self.tokens) - 1
+
+        return ''.join([t.text for t in self.tokens[start:stop+1]])
+
+
+class RewriteOperation(object):
+    """@brief Internal helper class."""
+
+    def __init__(self, stream, index, text):
+        self.stream = stream
+
+        # What index into rewrites List are we?
+        self.instructionIndex = None
+
+        # Token buffer index.
+        self.index = index
+        self.text = text
+
+    def execute(self, buf):
+        """Execute the rewrite operation by possibly adding to the buffer.
+        Return the index of the next token to operate on.
+        """
+
+        return self.index
+
+    def toString(self):
+        opName = self.__class__.__name__
+        return '<%s@%d:"%s">' % (
+            opName, self.index, self.text)
+
+    __str__ = toString
+    __repr__ = toString
+
+
+class InsertBeforeOp(RewriteOperation):
+    """@brief Internal helper class."""
+
+    def execute(self, buf):
+        buf.write(self.text)
+        if self.stream.tokens[self.index].type != EOF:
+            buf.write(self.stream.tokens[self.index].text)
+        return self.index + 1
+
+
+class ReplaceOp(RewriteOperation):
+    """
+    @brief Internal helper class.
+
+    I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+    instructions.
+    """
+
+    def __init__(self, stream, first, last, text):
+        RewriteOperation.__init__(self, stream, first, text)
+        self.lastIndex = last
+
+
+    def execute(self, buf):
+        if self.text is not None:
+            buf.write(self.text)
+
+        return self.lastIndex + 1
+
+
+    def toString(self):
+        if self.text is None:
+            return '<DeleteOp@%d..%d>' % (self.index, self.lastIndex)
+
+        return '<ReplaceOp@%d..%d:"%s">' % (
+            self.index, self.lastIndex, self.text)
+
+    __str__ = toString
+    __repr__ = toString
+
+
+class TokenRewriteStream(CommonTokenStream):
+    """@brief CommonTokenStream that can be modified.
+
+    Useful for dumping out the input stream after doing some
+    augmentation or other manipulations.
+
+    You can insert stuff, replace, and delete chunks.  Note that the
+    operations are done lazily--only if you convert the buffer to a
+    String.  This is very efficient because you are not moving data around
+    all the time.  As the buffer of tokens is converted to strings, the
+    toString() method(s) check to see if there is an operation at the
+    current index.  If so, the operation is done and then normal String
+    rendering continues on the buffer.  This is like having multiple Turing
+    machine instruction streams (programs) operating on a single input tape. :)
+
+    Since the operations are done lazily at toString-time, operations do not
+    screw up the token index values.  That is, an insert operation at token
+    index i does not change the index values for tokens i+1..n-1.
+
+    Because operations never actually alter the buffer, you may always get
+    the original token stream back without undoing anything.  Since
+    the instructions are queued up, you can easily simulate transactions and
+    roll back any changes if there is an error just by removing instructions.
+    For example,
+
+     CharStream input = new ANTLRFileStream("input");
+     TLexer lex = new TLexer(input);
+     TokenRewriteStream tokens = new TokenRewriteStream(lex);
+     T parser = new T(tokens);
+     parser.startRule();
+
+     Then in the rules, you can execute
+        Token t,u;
+        ...
+        input.insertAfter(t, "text to put after t");}
+        input.insertAfter(u, "text after u");}
+        System.out.println(tokens.toString());
+
+    Actually, you have to cast the 'input' to a TokenRewriteStream. :(
+
+    You can also have multiple "instruction streams" and get multiple
+    rewrites from a single pass over the input.  Just name the instruction
+    streams and use that name again when printing the buffer.  This could be
+    useful for generating a C file and also its header file--all from the
+    same buffer:
+
+        tokens.insertAfter("pass1", t, "text to put after t");}
+        tokens.insertAfter("pass2", u, "text after u");}
+        System.out.println(tokens.toString("pass1"));
+        System.out.println(tokens.toString("pass2"));
+
+    If you don't use named rewrite streams, a "default" stream is used as
+    the first example shows.
+    """
+
+    DEFAULT_PROGRAM_NAME = "default"
+    MIN_TOKEN_INDEX = 0
+
+    def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
+        CommonTokenStream.__init__(self, tokenSource, channel)
+
+        # You may have multiple, named streams of rewrite operations.
+        # I'm calling these things "programs."
+        #  Maps String (name) -> rewrite (List)
+        self.programs = {}
+        self.programs[self.DEFAULT_PROGRAM_NAME] = []
+
+ 	# Map String (program name) -> Integer index
+        self.lastRewriteTokenIndexes = {}
+
+
+    def rollback(self, *args):
+        """
+        Rollback the instruction stream for a program so that
+        the indicated instruction (via instructionIndex) is no
+        longer in the stream.  UNTESTED!
+        """
+
+        if len(args) == 2:
+            programName = args[0]
+            instructionIndex = args[1]
+        elif len(args) == 1:
+            programName = self.DEFAULT_PROGRAM_NAME
+            instructionIndex = args[0]
+        else:
+            raise TypeError("Invalid arguments")
+
+        p = self.programs.get(programName, None)
+        if p is not None:
+            self.programs[programName] = (
+                p[self.MIN_TOKEN_INDEX:instructionIndex])
+
+
+    def deleteProgram(self, programName=DEFAULT_PROGRAM_NAME):
+        """Reset the program so that no instructions exist"""
+
+        self.rollback(programName, self.MIN_TOKEN_INDEX)
+
+
+    def insertAfter(self, *args):
+        if len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            index = args[0]
+            text = args[1]
+
+        elif len(args) == 3:
+            programName = args[0]
+            index = args[1]
+            text = args[2]
+
+        else:
+            raise TypeError("Invalid arguments")
+
+        if isinstance(index, Token):
+            # index is a Token, grap the stream index from it
+            index = index.index
+
+        # to insert after, just insert before next index (even if past end)
+        self.insertBefore(programName, index+1, text)
+
+
+    def insertBefore(self, *args):
+        if len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            index = args[0]
+            text = args[1]
+
+        elif len(args) == 3:
+            programName = args[0]
+            index = args[1]
+            text = args[2]
+
+        else:
+            raise TypeError("Invalid arguments")
+
+        if isinstance(index, Token):
+            # index is a Token, grap the stream index from it
+            index = index.index
+
+        op = InsertBeforeOp(self, index, text)
+        rewrites = self.getProgram(programName)
+        op.instructionIndex = len(rewrites)
+        rewrites.append(op)
+
+
+    def replace(self, *args):
+        if len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            first = args[0]
+            last = args[0]
+            text = args[1]
+
+        elif len(args) == 3:
+            programName = self.DEFAULT_PROGRAM_NAME
+            first = args[0]
+            last = args[1]
+            text = args[2]
+
+        elif len(args) == 4:
+            programName = args[0]
+            first = args[1]
+            last = args[2]
+            text = args[3]
+
+        else:
+            raise TypeError("Invalid arguments")
+
+        if isinstance(first, Token):
+            # first is a Token, grap the stream index from it
+            first = first.index
+
+        if isinstance(last, Token):
+            # last is a Token, grap the stream index from it
+            last = last.index
+
+        if first > last or first < 0 or last < 0 or last >= len(self.tokens):
+            raise ValueError(
+                "replace: range invalid: %d..%d (size=%d)"
+                % (first, last, len(self.tokens)))
+
+        op = ReplaceOp(self, first, last, text)
+        rewrites = self.getProgram(programName)
+        op.instructionIndex = len(rewrites)
+        rewrites.append(op)
+
+
+    def delete(self, *args):
+        self.replace(*(list(args) + [None]))
+
+
+    def getLastRewriteTokenIndex(self, programName=DEFAULT_PROGRAM_NAME):
+        return self.lastRewriteTokenIndexes.get(programName, -1)
+
+
+    def setLastRewriteTokenIndex(self, programName, i):
+        self.lastRewriteTokenIndexes[programName] = i
+
+
+    def getProgram(self, name):
+        p = self.programs.get(name, None)
+        if p is  None:
+            p = self.initializeProgram(name)
+
+        return p
+
+
+    def initializeProgram(self, name):
+        p = []
+        self.programs[name] = p
+        return p
+
+
+    def toOriginalString(self, start=None, end=None):
+        if self.p == -1:
+            self.fillBuffer()
+
+        if start is None:
+            start = self.MIN_TOKEN_INDEX
+        if end is None:
+            end = self.size() - 1
+
+        buf = StringIO()
+        i = start
+        while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
+            if self.get(i).type != EOF:
+                buf.write(self.get(i).text)
+            i += 1
+
+        return buf.getvalue()
+
+
+    def toString(self, *args):
+        if self.p == -1:
+            self.fillBuffer()
+
+        if len(args) == 0:
+            programName = self.DEFAULT_PROGRAM_NAME
+            start = self.MIN_TOKEN_INDEX
+            end = self.size() - 1
+
+        elif len(args) == 1:
+            programName = args[0]
+            start = self.MIN_TOKEN_INDEX
+            end = self.size() - 1
+
+        elif len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            start = args[0]
+            end = args[1]
+
+        if start is None:
+            start = self.MIN_TOKEN_INDEX
+        elif not isinstance(start, int):
+            start = start.index
+
+        if end is None:
+            end = len(self.tokens) - 1
+        elif not isinstance(end, int):
+            end = end.index
+
+        # ensure start/end are in range
+        if end >= len(self.tokens):
+            end = len(self.tokens) - 1
+
+        if start < 0:
+            start = 0
+
+        rewrites = self.programs.get(programName)
+        if rewrites is None or len(rewrites) == 0:
+            # no instructions to execute
+            return self.toOriginalString(start, end)
+
+        buf = StringIO()
+
+        # First, optimize instruction stream
+        indexToOp = self.reduceToSingleOperationPerIndex(rewrites)
+
+        # Walk buffer, executing instructions and emitting tokens
+        i = start
+        while i <= end and i < len(self.tokens):
+            op = indexToOp.get(i)
+            # remove so any left have index size-1
+            try:
+                del indexToOp[i]
+            except KeyError:
+                pass
+
+            t = self.tokens[i]
+            if op is None:
+                # no operation at that index, just dump token
+                if t.type != EOF:
+                    buf.write(t.text)
+                i += 1 # move to next token
+
+            else:
+                i = op.execute(buf) # execute operation and skip
+
+        # include stuff after end if it's last index in buffer
+        # So, if they did an insertAfter(lastValidIndex, "foo"), include
+        # foo if end==lastValidIndex.
+        if end == len(self.tokens) - 1:
+            # Scan any remaining operations after last token
+            # should be included (they will be inserts).
+            for i in sorted(indexToOp.keys()):
+                op = indexToOp[i]
+                if op.index >= len(self.tokens)-1:
+                    buf.write(op.text)
+
+        return buf.getvalue()
+
+    __str__ = toString
+
+
+    def reduceToSingleOperationPerIndex(self, rewrites):
+        """
+        We need to combine operations and report invalid operations (like
+        overlapping replaces that are not completed nested).  Inserts to
+        same index need to be combined etc...   Here are the cases:
+
+        I.i.u I.j.v                           leave alone, nonoverlapping
+        I.i.u I.i.v                           combine: Iivu
+
+        R.i-j.u R.x-y.v | i-j in x-y          delete first R
+        R.i-j.u R.i-j.v                       delete first R
+        R.i-j.u R.x-y.v | x-y in i-j          ERROR
+        R.i-j.u R.x-y.v | boundaries overlap  ERROR
+
+        Delete special case of replace (text==null):
+        D.i-j.u D.x-y.v |                     boundaries overlapcombine to
+                                              max(min)..max(right)
+
+        I.i.u R.x-y.v   |                     i in (x+1)-ydelete I (since
+                                              insert before we're not deleting
+                                              i)
+        I.i.u R.x-y.v   |                     i not in (x+1)-yleave alone,
+                                              nonoverlapping
+
+        R.x-y.v I.i.u   | i in x-y            ERROR
+        R.x-y.v I.x.u                         R.x-y.uv (combine, delete I)
+        R.x-y.v I.i.u   | i not in x-y        leave alone, nonoverlapping
+
+        I.i.u = insert u before op @ index i
+        R.x-y.u = replace x-y indexed tokens with u
+
+        First we need to examine replaces.  For any replace op:
+
+          1. wipe out any insertions before op within that range.
+          2. Drop any replace op before that is contained completely within
+             that range.
+          3. Throw exception upon boundary overlap with any previous replace.
+
+        Then we can deal with inserts:
+
+          1. for any inserts to same index, combine even if not adjacent.
+          2. for any prior replace with same left boundary, combine this
+             insert with replace and delete this replace.
+          3. throw exception if index in same range as previous replace
+
+        Don't actually delete; make op null in list. Easier to walk list.
+        Later we can throw as we add to index -> op map.
+
+        Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+        inserted stuff would be before the replace range.  But, if you
+        add tokens in front of a method body '{' and then delete the method
+        body, I think the stuff before the '{' you added should disappear too.
+
+        Return a map from token index to operation.
+        """
+
+        # WALK REPLACES
+        for i, rop in enumerate(rewrites):
+            if rop is None:
+                continue
+
+            if not isinstance(rop, ReplaceOp):
+                continue
+
+            # Wipe prior inserts within range
+            for j, iop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
+                if iop.index == rop.index:
+                    # E.g., insert before 2, delete 2..2; update replace
+                    # text to include insert before, kill insert
+                    rewrites[iop.instructionIndex] = None
+                    rop.text = self.catOpText(iop.text, rop.text)
+
+                elif iop.index > rop.index and iop.index <= rop.lastIndex:
+                    # delete insert as it's a no-op.
+                    rewrites[j] = None
+
+            # Drop any prior replaces contained within
+            for j, prevRop in self.getKindOfOps(rewrites, ReplaceOp, i):
+                if (prevRop.index >= rop.index
+                    and prevRop.lastIndex <= rop.lastIndex):
+                    # delete replace as it's a no-op.
+                    rewrites[j] = None
+                    continue
+
+                # throw exception unless disjoint or identical
+                disjoint = (prevRop.lastIndex < rop.index
+                            or prevRop.index > rop.lastIndex)
+                same = (prevRop.index == rop.index
+                        and prevRop.lastIndex == rop.lastIndex)
+
+                # Delete special case of replace (text==null):
+                # D.i-j.u D.x-y.v| boundaries overlapcombine to
+                # max(min)..max(right)
+                if prevRop.text is None and rop.text is None and not disjoint:
+                    # kill first delete
+                    rewrites[prevRop.instructionIndex] = None
+
+                    rop.index = min(prevRop.index, rop.index)
+                    rop.lastIndex = max(prevRop.lastIndex, rop.lastIndex)
+
+                elif not disjoint and not same:
+                    raise ValueError(
+                        "replace op boundaries of %s overlap with previous %s"
+                        % (rop, prevRop))
+
+        # WALK INSERTS
+        for i, iop in enumerate(rewrites):
+            if iop is None:
+                continue
+
+            if not isinstance(iop, InsertBeforeOp):
+                continue
+
+            # combine current insert with prior if any at same index
+            for j, prevIop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
+                if prevIop.index == iop.index: # combine objects
+                    # convert to strings...we're in process of toString'ing
+                    # whole token buffer so no lazy eval issue with any
+                    # templates
+                    iop.text = self.catOpText(iop.text, prevIop.text)
+                    # delete redundant prior insert
+                    rewrites[j] = None
+
+            # look for replaces where iop.index is in range; error
+            for j, rop in self.getKindOfOps(rewrites, ReplaceOp, i):
+                if iop.index == rop.index:
+                    rop.text = self.catOpText(iop.text, rop.text)
+                    # delete current insert
+                    rewrites[i] = None
+                    continue
+
+                if iop.index >= rop.index and iop.index <= rop.lastIndex:
+                    raise ValueError(
+                        "insert op %s within boundaries of previous %s"
+                        % (iop, rop))
+
+        m = {}
+        for i, op in enumerate(rewrites):
+            if op is None:
+                # ignore deleted ops
+                continue
+
+            assert op.index not in m, "should only be one op per index"
+            m[op.index] = op
+
+        return m
+
+
+    def catOpText(self, a, b):
+        x = ""
+        y = ""
+        if a is not None:
+            x = a
+        if b is not None:
+            y = b
+        return x + y
+
+
+    def getKindOfOps(self, rewrites, kind, before=None):
+        """Get all operations before an index of a particular kind."""
+
+        if before is None:
+            before = len(rewrites)
+        elif before > len(rewrites):
+            before = len(rewrites)
+
+        for i, op in enumerate(rewrites[:before]):
+            if op is None:
+                # ignore deleted
+                continue
+            if op.__class__ == kind:
+                yield i, op
+
+
+    def toDebugString(self, start=None, end=None):
+        if start is None:
+            start = self.MIN_TOKEN_INDEX
+        if end is None:
+            end = self.size() - 1
+
+        buf = StringIO()
+        i = start
+        while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
+            buf.write(self.get(i))
+            i += 1
+
+        return buf.getvalue()
diff --git a/antlr-3.4/runtime/Python/antlr3/tokens.py b/runtime/Python/antlr3/tokens.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/tokens.py
rename to runtime/Python/antlr3/tokens.py
diff --git a/antlr-3.4/runtime/Python/antlr3/tree.py b/runtime/Python/antlr3/tree.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/tree.py
rename to runtime/Python/antlr3/tree.py
diff --git a/antlr-3.4/runtime/Python/antlr3/treewizard.py b/runtime/Python/antlr3/treewizard.py
similarity index 100%
rename from antlr-3.4/runtime/Python/antlr3/treewizard.py
rename to runtime/Python/antlr3/treewizard.py
diff --git a/antlr-3.4/runtime/Python/dist/antlr_python_runtime-3.4-py2.4.egg b/runtime/Python/dist/antlr_python_runtime-3.4-py2.4.egg
similarity index 100%
rename from antlr-3.4/runtime/Python/dist/antlr_python_runtime-3.4-py2.4.egg
rename to runtime/Python/dist/antlr_python_runtime-3.4-py2.4.egg
Binary files differ
diff --git a/antlr-3.4/runtime/Python/dist/antlr_python_runtime-3.4-py2.6.egg b/runtime/Python/dist/antlr_python_runtime-3.4-py2.6.egg
similarity index 100%
rename from antlr-3.4/runtime/Python/dist/antlr_python_runtime-3.4-py2.6.egg
rename to runtime/Python/dist/antlr_python_runtime-3.4-py2.6.egg
Binary files differ
diff --git a/antlr-3.4/runtime/Python/dist/antlr_python_runtime-3.4.tar.gz b/runtime/Python/dist/antlr_python_runtime-3.4.tar.gz
similarity index 100%
rename from antlr-3.4/runtime/Python/dist/antlr_python_runtime-3.4.tar.gz
rename to runtime/Python/dist/antlr_python_runtime-3.4.tar.gz
Binary files differ
diff --git a/antlr-3.4/runtime/Python/dist/antlr_python_runtime-3.4.zip b/runtime/Python/dist/antlr_python_runtime-3.4.zip
similarity index 100%
rename from antlr-3.4/runtime/Python/dist/antlr_python_runtime-3.4.zip
rename to runtime/Python/dist/antlr_python_runtime-3.4.zip
Binary files differ
diff --git a/antlr-3.4/runtime/Python/doxyfile b/runtime/Python/doxyfile
similarity index 100%
rename from antlr-3.4/runtime/Python/doxyfile
rename to runtime/Python/doxyfile
diff --git a/antlr-3.4/runtime/Python/ez_setup.py b/runtime/Python/ez_setup.py
similarity index 100%
rename from antlr-3.4/runtime/Python/ez_setup.py
rename to runtime/Python/ez_setup.py
diff --git a/antlr-3.4/runtime/Python/hudson-build.sh b/runtime/Python/hudson-build.sh
similarity index 100%
rename from antlr-3.4/runtime/Python/hudson-build.sh
rename to runtime/Python/hudson-build.sh
diff --git a/antlr-3.4/runtime/Python/mkdoxy.sh b/runtime/Python/mkdoxy.sh
similarity index 100%
rename from antlr-3.4/runtime/Python/mkdoxy.sh
rename to runtime/Python/mkdoxy.sh
diff --git a/antlr-3.4/runtime/Python/pylintrc b/runtime/Python/pylintrc
similarity index 100%
rename from antlr-3.4/runtime/Python/pylintrc
rename to runtime/Python/pylintrc
diff --git a/antlr-3.4/runtime/Python/setup.py b/runtime/Python/setup.py
similarity index 100%
rename from antlr-3.4/runtime/Python/setup.py
rename to runtime/Python/setup.py
diff --git a/antlr-3.4/runtime/Python/tests/t001lexer.g b/runtime/Python/tests/t001lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t001lexer.g
rename to runtime/Python/tests/t001lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t001lexer.py b/runtime/Python/tests/t001lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t001lexer.py
rename to runtime/Python/tests/t001lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t002lexer.g b/runtime/Python/tests/t002lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t002lexer.g
rename to runtime/Python/tests/t002lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t002lexer.py b/runtime/Python/tests/t002lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t002lexer.py
rename to runtime/Python/tests/t002lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t003lexer.g b/runtime/Python/tests/t003lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t003lexer.g
rename to runtime/Python/tests/t003lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t003lexer.py b/runtime/Python/tests/t003lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t003lexer.py
rename to runtime/Python/tests/t003lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t004lexer.g b/runtime/Python/tests/t004lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t004lexer.g
rename to runtime/Python/tests/t004lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t004lexer.py b/runtime/Python/tests/t004lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t004lexer.py
rename to runtime/Python/tests/t004lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t005lexer.g b/runtime/Python/tests/t005lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t005lexer.g
rename to runtime/Python/tests/t005lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t005lexer.py b/runtime/Python/tests/t005lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t005lexer.py
rename to runtime/Python/tests/t005lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t006lexer.g b/runtime/Python/tests/t006lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t006lexer.g
rename to runtime/Python/tests/t006lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t006lexer.py b/runtime/Python/tests/t006lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t006lexer.py
rename to runtime/Python/tests/t006lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t007lexer.g b/runtime/Python/tests/t007lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t007lexer.g
rename to runtime/Python/tests/t007lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t007lexer.py b/runtime/Python/tests/t007lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t007lexer.py
rename to runtime/Python/tests/t007lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t008lexer.g b/runtime/Python/tests/t008lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t008lexer.g
rename to runtime/Python/tests/t008lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t008lexer.py b/runtime/Python/tests/t008lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t008lexer.py
rename to runtime/Python/tests/t008lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t009lexer.g b/runtime/Python/tests/t009lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t009lexer.g
rename to runtime/Python/tests/t009lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t009lexer.py b/runtime/Python/tests/t009lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t009lexer.py
rename to runtime/Python/tests/t009lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t010lexer.g b/runtime/Python/tests/t010lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t010lexer.g
rename to runtime/Python/tests/t010lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t010lexer.py b/runtime/Python/tests/t010lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t010lexer.py
rename to runtime/Python/tests/t010lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t011lexer.g b/runtime/Python/tests/t011lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t011lexer.g
rename to runtime/Python/tests/t011lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t011lexer.py b/runtime/Python/tests/t011lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t011lexer.py
rename to runtime/Python/tests/t011lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXML.input b/runtime/Python/tests/t012lexerXML.input
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t012lexerXML.input
rename to runtime/Python/tests/t012lexerXML.input
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXML.output b/runtime/Python/tests/t012lexerXML.output
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t012lexerXML.output
rename to runtime/Python/tests/t012lexerXML.output
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXML.py b/runtime/Python/tests/t012lexerXML.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t012lexerXML.py
rename to runtime/Python/tests/t012lexerXML.py
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXMLLexer.g b/runtime/Python/tests/t012lexerXMLLexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t012lexerXMLLexer.g
rename to runtime/Python/tests/t012lexerXMLLexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t013parser.g b/runtime/Python/tests/t013parser.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t013parser.g
rename to runtime/Python/tests/t013parser.g
diff --git a/antlr-3.4/runtime/Python/tests/t013parser.py b/runtime/Python/tests/t013parser.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t013parser.py
rename to runtime/Python/tests/t013parser.py
diff --git a/antlr-3.4/runtime/Python/tests/t014parser.g b/runtime/Python/tests/t014parser.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t014parser.g
rename to runtime/Python/tests/t014parser.g
diff --git a/antlr-3.4/runtime/Python/tests/t014parser.py b/runtime/Python/tests/t014parser.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t014parser.py
rename to runtime/Python/tests/t014parser.py
diff --git a/antlr-3.4/runtime/Python/tests/t015calc.g b/runtime/Python/tests/t015calc.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t015calc.g
rename to runtime/Python/tests/t015calc.g
diff --git a/antlr-3.4/runtime/Python/tests/t015calc.py b/runtime/Python/tests/t015calc.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t015calc.py
rename to runtime/Python/tests/t015calc.py
diff --git a/antlr-3.4/runtime/Python/tests/t016actions.g b/runtime/Python/tests/t016actions.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t016actions.g
rename to runtime/Python/tests/t016actions.g
diff --git a/antlr-3.4/runtime/Python/tests/t016actions.py b/runtime/Python/tests/t016actions.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t016actions.py
rename to runtime/Python/tests/t016actions.py
diff --git a/antlr-3.4/runtime/Python/tests/t017parser.g b/runtime/Python/tests/t017parser.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t017parser.g
rename to runtime/Python/tests/t017parser.g
diff --git a/antlr-3.4/runtime/Python/tests/t017parser.py b/runtime/Python/tests/t017parser.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t017parser.py
rename to runtime/Python/tests/t017parser.py
diff --git a/antlr-3.4/runtime/Python/tests/t018llstar.g b/runtime/Python/tests/t018llstar.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t018llstar.g
rename to runtime/Python/tests/t018llstar.g
diff --git a/antlr-3.4/runtime/Python/tests/t018llstar.input b/runtime/Python/tests/t018llstar.input
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t018llstar.input
rename to runtime/Python/tests/t018llstar.input
diff --git a/antlr-3.4/runtime/Python/tests/t018llstar.output b/runtime/Python/tests/t018llstar.output
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t018llstar.output
rename to runtime/Python/tests/t018llstar.output
diff --git a/antlr-3.4/runtime/Python/tests/t018llstar.py b/runtime/Python/tests/t018llstar.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t018llstar.py
rename to runtime/Python/tests/t018llstar.py
diff --git a/antlr-3.4/runtime/Python/tests/t019lexer.g b/runtime/Python/tests/t019lexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t019lexer.g
rename to runtime/Python/tests/t019lexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t019lexer.input b/runtime/Python/tests/t019lexer.input
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t019lexer.input
rename to runtime/Python/tests/t019lexer.input
diff --git a/antlr-3.4/runtime/Python/tests/t019lexer.py b/runtime/Python/tests/t019lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t019lexer.py
rename to runtime/Python/tests/t019lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t020fuzzy.input b/runtime/Python/tests/t020fuzzy.input
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t020fuzzy.input
rename to runtime/Python/tests/t020fuzzy.input
diff --git a/antlr-3.4/runtime/Python/tests/t020fuzzy.output b/runtime/Python/tests/t020fuzzy.output
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t020fuzzy.output
rename to runtime/Python/tests/t020fuzzy.output
diff --git a/antlr-3.4/runtime/Python/tests/t020fuzzy.py b/runtime/Python/tests/t020fuzzy.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t020fuzzy.py
rename to runtime/Python/tests/t020fuzzy.py
diff --git a/antlr-3.4/runtime/Python/tests/t020fuzzyLexer.g b/runtime/Python/tests/t020fuzzyLexer.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t020fuzzyLexer.g
rename to runtime/Python/tests/t020fuzzyLexer.g
diff --git a/antlr-3.4/runtime/Python/tests/t021hoist.g b/runtime/Python/tests/t021hoist.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t021hoist.g
rename to runtime/Python/tests/t021hoist.g
diff --git a/antlr-3.4/runtime/Python/tests/t021hoist.py b/runtime/Python/tests/t021hoist.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t021hoist.py
rename to runtime/Python/tests/t021hoist.py
diff --git a/antlr-3.4/runtime/Python/tests/t022scopes.g b/runtime/Python/tests/t022scopes.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t022scopes.g
rename to runtime/Python/tests/t022scopes.g
diff --git a/antlr-3.4/runtime/Python/tests/t022scopes.py b/runtime/Python/tests/t022scopes.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t022scopes.py
rename to runtime/Python/tests/t022scopes.py
diff --git a/antlr-3.4/runtime/Python/tests/t023scopes.g b/runtime/Python/tests/t023scopes.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t023scopes.g
rename to runtime/Python/tests/t023scopes.g
diff --git a/antlr-3.4/runtime/Python/tests/t023scopes.py b/runtime/Python/tests/t023scopes.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t023scopes.py
rename to runtime/Python/tests/t023scopes.py
diff --git a/antlr-3.4/runtime/Python/tests/t024finally.g b/runtime/Python/tests/t024finally.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t024finally.g
rename to runtime/Python/tests/t024finally.g
diff --git a/antlr-3.4/runtime/Python/tests/t024finally.py b/runtime/Python/tests/t024finally.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t024finally.py
rename to runtime/Python/tests/t024finally.py
diff --git a/antlr-3.4/runtime/Python/tests/t025lexerRulePropertyRef.g b/runtime/Python/tests/t025lexerRulePropertyRef.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t025lexerRulePropertyRef.g
rename to runtime/Python/tests/t025lexerRulePropertyRef.g
diff --git a/antlr-3.4/runtime/Python/tests/t025lexerRulePropertyRef.py b/runtime/Python/tests/t025lexerRulePropertyRef.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t025lexerRulePropertyRef.py
rename to runtime/Python/tests/t025lexerRulePropertyRef.py
diff --git a/antlr-3.4/runtime/Python/tests/t026actions.g b/runtime/Python/tests/t026actions.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t026actions.g
rename to runtime/Python/tests/t026actions.g
diff --git a/antlr-3.4/runtime/Python/tests/t026actions.py b/runtime/Python/tests/t026actions.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t026actions.py
rename to runtime/Python/tests/t026actions.py
diff --git a/antlr-3.4/runtime/Python/tests/t027eof.g b/runtime/Python/tests/t027eof.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t027eof.g
rename to runtime/Python/tests/t027eof.g
diff --git a/antlr-3.4/runtime/Python/tests/t027eof.py b/runtime/Python/tests/t027eof.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t027eof.py
rename to runtime/Python/tests/t027eof.py
diff --git a/antlr-3.4/runtime/Python/tests/t028labelExpr.g.disabled b/runtime/Python/tests/t028labelExpr.g.disabled
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t028labelExpr.g.disabled
rename to runtime/Python/tests/t028labelExpr.g.disabled
diff --git a/antlr-3.4/runtime/Python/tests/t029synpredgate.g b/runtime/Python/tests/t029synpredgate.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t029synpredgate.g
rename to runtime/Python/tests/t029synpredgate.g
diff --git a/antlr-3.4/runtime/Python/tests/t029synpredgate.py b/runtime/Python/tests/t029synpredgate.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t029synpredgate.py
rename to runtime/Python/tests/t029synpredgate.py
diff --git a/antlr-3.4/runtime/Python/tests/t030specialStates.g b/runtime/Python/tests/t030specialStates.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t030specialStates.g
rename to runtime/Python/tests/t030specialStates.g
diff --git a/antlr-3.4/runtime/Python/tests/t030specialStates.py b/runtime/Python/tests/t030specialStates.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t030specialStates.py
rename to runtime/Python/tests/t030specialStates.py
diff --git a/antlr-3.4/runtime/Python/tests/t031emptyAlt.g b/runtime/Python/tests/t031emptyAlt.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t031emptyAlt.g
rename to runtime/Python/tests/t031emptyAlt.g
diff --git a/antlr-3.4/runtime/Python/tests/t031emptyAlt.py b/runtime/Python/tests/t031emptyAlt.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t031emptyAlt.py
rename to runtime/Python/tests/t031emptyAlt.py
diff --git a/antlr-3.4/runtime/Python/tests/t032subrulePredict.g b/runtime/Python/tests/t032subrulePredict.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t032subrulePredict.g
rename to runtime/Python/tests/t032subrulePredict.g
diff --git a/antlr-3.4/runtime/Python/tests/t032subrulePredict.py b/runtime/Python/tests/t032subrulePredict.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t032subrulePredict.py
rename to runtime/Python/tests/t032subrulePredict.py
diff --git a/antlr-3.4/runtime/Python/tests/t033backtracking.g b/runtime/Python/tests/t033backtracking.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t033backtracking.g
rename to runtime/Python/tests/t033backtracking.g
diff --git a/antlr-3.4/runtime/Python/tests/t033backtracking.py b/runtime/Python/tests/t033backtracking.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t033backtracking.py
rename to runtime/Python/tests/t033backtracking.py
diff --git a/antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.g b/runtime/Python/tests/t034tokenLabelPropertyRef.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.g
rename to runtime/Python/tests/t034tokenLabelPropertyRef.g
diff --git a/antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.py b/runtime/Python/tests/t034tokenLabelPropertyRef.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.py
rename to runtime/Python/tests/t034tokenLabelPropertyRef.py
diff --git a/antlr-3.4/runtime/Python/tests/t035ruleLabelPropertyRef.g b/runtime/Python/tests/t035ruleLabelPropertyRef.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t035ruleLabelPropertyRef.g
rename to runtime/Python/tests/t035ruleLabelPropertyRef.g
diff --git a/antlr-3.4/runtime/Python/tests/t035ruleLabelPropertyRef.py b/runtime/Python/tests/t035ruleLabelPropertyRef.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t035ruleLabelPropertyRef.py
rename to runtime/Python/tests/t035ruleLabelPropertyRef.py
diff --git a/antlr-3.4/runtime/Python/tests/t036multipleReturnValues.g b/runtime/Python/tests/t036multipleReturnValues.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t036multipleReturnValues.g
rename to runtime/Python/tests/t036multipleReturnValues.g
diff --git a/antlr-3.4/runtime/Python/tests/t036multipleReturnValues.py b/runtime/Python/tests/t036multipleReturnValues.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t036multipleReturnValues.py
rename to runtime/Python/tests/t036multipleReturnValues.py
diff --git a/antlr-3.4/runtime/Python/tests/t037rulePropertyRef.g b/runtime/Python/tests/t037rulePropertyRef.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t037rulePropertyRef.g
rename to runtime/Python/tests/t037rulePropertyRef.g
diff --git a/antlr-3.4/runtime/Python/tests/t037rulePropertyRef.py b/runtime/Python/tests/t037rulePropertyRef.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t037rulePropertyRef.py
rename to runtime/Python/tests/t037rulePropertyRef.py
diff --git a/antlr-3.4/runtime/Python/tests/t038lexerRuleLabel.g b/runtime/Python/tests/t038lexerRuleLabel.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t038lexerRuleLabel.g
rename to runtime/Python/tests/t038lexerRuleLabel.g
diff --git a/antlr-3.4/runtime/Python/tests/t038lexerRuleLabel.py b/runtime/Python/tests/t038lexerRuleLabel.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t038lexerRuleLabel.py
rename to runtime/Python/tests/t038lexerRuleLabel.py
diff --git a/antlr-3.4/runtime/Python/tests/t039labels.g b/runtime/Python/tests/t039labels.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t039labels.g
rename to runtime/Python/tests/t039labels.g
diff --git a/antlr-3.4/runtime/Python/tests/t039labels.py b/runtime/Python/tests/t039labels.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t039labels.py
rename to runtime/Python/tests/t039labels.py
diff --git a/antlr-3.4/runtime/Python/tests/t040bug80.g b/runtime/Python/tests/t040bug80.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t040bug80.g
rename to runtime/Python/tests/t040bug80.g
diff --git a/antlr-3.4/runtime/Python/tests/t040bug80.py b/runtime/Python/tests/t040bug80.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t040bug80.py
rename to runtime/Python/tests/t040bug80.py
diff --git a/antlr-3.4/runtime/Python/tests/t041parameters.g b/runtime/Python/tests/t041parameters.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t041parameters.g
rename to runtime/Python/tests/t041parameters.g
diff --git a/antlr-3.4/runtime/Python/tests/t041parameters.py b/runtime/Python/tests/t041parameters.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t041parameters.py
rename to runtime/Python/tests/t041parameters.py
diff --git a/antlr-3.4/runtime/Python/tests/t042ast.g b/runtime/Python/tests/t042ast.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t042ast.g
rename to runtime/Python/tests/t042ast.g
diff --git a/antlr-3.4/runtime/Python/tests/t042ast.py b/runtime/Python/tests/t042ast.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t042ast.py
rename to runtime/Python/tests/t042ast.py
diff --git a/antlr-3.4/runtime/Python/tests/t043synpred.g b/runtime/Python/tests/t043synpred.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t043synpred.g
rename to runtime/Python/tests/t043synpred.g
diff --git a/antlr-3.4/runtime/Python/tests/t043synpred.py b/runtime/Python/tests/t043synpred.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t043synpred.py
rename to runtime/Python/tests/t043synpred.py
diff --git a/antlr-3.4/runtime/Python/tests/t044trace.g b/runtime/Python/tests/t044trace.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t044trace.g
rename to runtime/Python/tests/t044trace.g
diff --git a/antlr-3.4/runtime/Python/tests/t044trace.py b/runtime/Python/tests/t044trace.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t044trace.py
rename to runtime/Python/tests/t044trace.py
diff --git a/antlr-3.4/runtime/Python/tests/t045dfabug.g b/runtime/Python/tests/t045dfabug.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t045dfabug.g
rename to runtime/Python/tests/t045dfabug.g
diff --git a/antlr-3.4/runtime/Python/tests/t045dfabug.py b/runtime/Python/tests/t045dfabug.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t045dfabug.py
rename to runtime/Python/tests/t045dfabug.py
diff --git a/antlr-3.4/runtime/Python/tests/t046rewrite.g b/runtime/Python/tests/t046rewrite.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t046rewrite.g
rename to runtime/Python/tests/t046rewrite.g
diff --git a/antlr-3.4/runtime/Python/tests/t046rewrite.py b/runtime/Python/tests/t046rewrite.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t046rewrite.py
rename to runtime/Python/tests/t046rewrite.py
diff --git a/antlr-3.4/runtime/Python/tests/t047treeparser.g b/runtime/Python/tests/t047treeparser.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t047treeparser.g
rename to runtime/Python/tests/t047treeparser.g
diff --git a/antlr-3.4/runtime/Python/tests/t047treeparser.py b/runtime/Python/tests/t047treeparser.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t047treeparser.py
rename to runtime/Python/tests/t047treeparser.py
diff --git a/antlr-3.4/runtime/Python/tests/t047treeparserWalker.g b/runtime/Python/tests/t047treeparserWalker.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t047treeparserWalker.g
rename to runtime/Python/tests/t047treeparserWalker.g
diff --git a/antlr-3.4/runtime/Python/tests/t048rewrite.g b/runtime/Python/tests/t048rewrite.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t048rewrite.g
rename to runtime/Python/tests/t048rewrite.g
diff --git a/antlr-3.4/runtime/Python/tests/t048rewrite.py b/runtime/Python/tests/t048rewrite.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t048rewrite.py
rename to runtime/Python/tests/t048rewrite.py
diff --git a/antlr-3.4/runtime/Python/tests/t048rewrite2.g b/runtime/Python/tests/t048rewrite2.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t048rewrite2.g
rename to runtime/Python/tests/t048rewrite2.g
diff --git a/antlr-3.4/runtime/Python/tests/t049treeparser.py b/runtime/Python/tests/t049treeparser.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t049treeparser.py
rename to runtime/Python/tests/t049treeparser.py
diff --git a/antlr-3.4/runtime/Python/tests/t050decorate.g b/runtime/Python/tests/t050decorate.g
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t050decorate.g
rename to runtime/Python/tests/t050decorate.g
diff --git a/antlr-3.4/runtime/Python/tests/t050decorate.py b/runtime/Python/tests/t050decorate.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t050decorate.py
rename to runtime/Python/tests/t050decorate.py
diff --git a/antlr-3.4/runtime/Python/tests/t051treeRewriteAST.py b/runtime/Python/tests/t051treeRewriteAST.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t051treeRewriteAST.py
rename to runtime/Python/tests/t051treeRewriteAST.py
diff --git a/antlr-3.4/runtime/Python/tests/t052import.py b/runtime/Python/tests/t052import.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t052import.py
rename to runtime/Python/tests/t052import.py
diff --git a/antlr-3.4/runtime/Python/tests/t053hetero.py b/runtime/Python/tests/t053hetero.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t053hetero.py
rename to runtime/Python/tests/t053hetero.py
diff --git a/antlr-3.4/runtime/Python/tests/t054main.py b/runtime/Python/tests/t054main.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t054main.py
rename to runtime/Python/tests/t054main.py
diff --git a/antlr-3.4/runtime/Python/tests/t055templates.py b/runtime/Python/tests/t055templates.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t055templates.py
rename to runtime/Python/tests/t055templates.py
diff --git a/antlr-3.4/runtime/Python/tests/t056lexer.py b/runtime/Python/tests/t056lexer.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t056lexer.py
rename to runtime/Python/tests/t056lexer.py
diff --git a/antlr-3.4/runtime/Python/tests/t057autoAST.py b/runtime/Python/tests/t057autoAST.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t057autoAST.py
rename to runtime/Python/tests/t057autoAST.py
diff --git a/antlr-3.4/runtime/Python/tests/t058rewriteAST.py b/runtime/Python/tests/t058rewriteAST.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t058rewriteAST.py
rename to runtime/Python/tests/t058rewriteAST.py
diff --git a/antlr-3.4/runtime/Python/tests/t059debug.py b/runtime/Python/tests/t059debug.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t059debug.py
rename to runtime/Python/tests/t059debug.py
diff --git a/antlr-3.4/runtime/Python/tests/t060leftrecursion.py b/runtime/Python/tests/t060leftrecursion.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/t060leftrecursion.py
rename to runtime/Python/tests/t060leftrecursion.py
diff --git a/antlr-3.4/runtime/Python/tests/testbase.py b/runtime/Python/tests/testbase.py
similarity index 100%
rename from antlr-3.4/runtime/Python/tests/testbase.py
rename to runtime/Python/tests/testbase.py
diff --git a/antlr-3.4/runtime/Python/unittests/testantlr3.py b/runtime/Python/unittests/testantlr3.py
similarity index 100%
rename from antlr-3.4/runtime/Python/unittests/testantlr3.py
rename to runtime/Python/unittests/testantlr3.py
diff --git a/antlr-3.4/runtime/Python/unittests/testbase.py b/runtime/Python/unittests/testbase.py
similarity index 100%
rename from antlr-3.4/runtime/Python/unittests/testbase.py
rename to runtime/Python/unittests/testbase.py
diff --git a/antlr-3.4/runtime/Python/unittests/testdfa.py b/runtime/Python/unittests/testdfa.py
similarity index 100%
rename from antlr-3.4/runtime/Python/unittests/testdfa.py
rename to runtime/Python/unittests/testdfa.py
diff --git a/antlr-3.4/runtime/Python/unittests/testdottreegen.py b/runtime/Python/unittests/testdottreegen.py
similarity index 100%
rename from antlr-3.4/runtime/Python/unittests/testdottreegen.py
rename to runtime/Python/unittests/testdottreegen.py
diff --git a/antlr-3.4/runtime/Python/unittests/testexceptions.py b/runtime/Python/unittests/testexceptions.py
similarity index 100%
rename from antlr-3.4/runtime/Python/unittests/testexceptions.py
rename to runtime/Python/unittests/testexceptions.py
diff --git a/antlr-3.4/runtime/Python/unittests/testrecognizers.py b/runtime/Python/unittests/testrecognizers.py
similarity index 100%
rename from antlr-3.4/runtime/Python/unittests/testrecognizers.py
rename to runtime/Python/unittests/testrecognizers.py
diff --git a/antlr-3.4/runtime/Python/unittests/teststreams.input1 b/runtime/Python/unittests/teststreams.input1
similarity index 100%
rename from antlr-3.4/runtime/Python/unittests/teststreams.input1
rename to runtime/Python/unittests/teststreams.input1
diff --git a/antlr-3.4/runtime/Python/unittests/teststreams.input2 b/runtime/Python/unittests/teststreams.input2
similarity index 100%
rename from antlr-3.4/runtime/Python/unittests/teststreams.input2
rename to runtime/Python/unittests/teststreams.input2
diff --git a/antlr-3.4/runtime/Python/unittests/teststreams.py b/runtime/Python/unittests/teststreams.py
similarity index 100%
rename from antlr-3.4/runtime/Python/unittests/teststreams.py
rename to runtime/Python/unittests/teststreams.py
diff --git a/antlr-3.4/runtime/Python/unittests/testtree.py b/runtime/Python/unittests/testtree.py
similarity index 100%
rename from antlr-3.4/runtime/Python/unittests/testtree.py
rename to runtime/Python/unittests/testtree.py
diff --git a/antlr-3.4/runtime/Python/unittests/testtreewizard.py b/runtime/Python/unittests/testtreewizard.py
similarity index 100%
rename from antlr-3.4/runtime/Python/unittests/testtreewizard.py
rename to runtime/Python/unittests/testtreewizard.py
diff --git a/antlr-3.4/runtime/Python/xmlrunner.py b/runtime/Python/xmlrunner.py
similarity index 100%
rename from antlr-3.4/runtime/Python/xmlrunner.py
rename to runtime/Python/xmlrunner.py
diff --git a/runtime/Python3/.gitignore b/runtime/Python3/.gitignore
new file mode 100644
index 0000000..1868f2a
--- /dev/null
+++ b/runtime/Python3/.gitignore
@@ -0,0 +1,4 @@
+.*.swp
+*~
+*.pyc
+*.gz
diff --git a/runtime/Python3/AUTHORS b/runtime/Python3/AUTHORS
new file mode 100644
index 0000000..5040e43
--- /dev/null
+++ b/runtime/Python3/AUTHORS
@@ -0,0 +1,6 @@
+Python target:
+Benjamin Niemann <pink at odahoda dot de>: Main developer of Python target.
+Clinton Roy <clinton.roy at gmail dot com>: AST templates and runtime.
+
+Python3 target:
+Benjamin S Wolf (http://github.com/Zannick): Converted Python target to Python3.
diff --git a/runtime/Python3/ChangeLog b/runtime/Python3/ChangeLog
new file mode 100644
index 0000000..ff5113f
--- /dev/null
+++ b/runtime/Python3/ChangeLog
@@ -0,0 +1,58 @@
+2012-06-26  Benjamin S Wolf  <jokeserver+antlr3@gmail.com>
+
+	Initial Python3 target, branched from the Python target by Benjamin
+	Niemann, with lots of code cleanup and minor refactoring.
+
+	* CodeGenerator.java, Python3.stg:
+	Generated code now uses set notation for setTest, rather than long
+	conditionals like "a == FOO or a == BAR or 10 <= a <= 12". This is
+	a (slight) performance improvement.
+
+	* tokens.py:
+	Token objects no longer have get/set methods for their attributes as I
+	switched them to use @property instead. The attributes should be accessed
+	directly.
+
+	* tokens.py, Python3.stg:
+	Fix a circular dependency in generated parsers, and give Token objects the
+	ability to return their typeName when asked for it. (The generated
+	recognizer gives Token the mapping from token type to type name.)
+
+2007-11-03  Benjamin Niemann  <pink@odahoda.de>
+
+	* PythonTarget.java, dfa.py, exceptions.py, recognizer.py, streams.py:
+	ANTLRStringStream.LA() now returns the character's ordinal and
+	generated lexers operate on integers. Also made various performance
+	tunings.
+
+2007-10-07  Benjamin Niemann  <pink@odahoda.de>
+
+	* main.py, Python.stg (outputFile): Added simple __main__ section to
+	generated code, so (simple) grammars can be executed as standalone
+	script.
+
+	* tree.py (RecognitionException.extractInformationFromTreeNodeStream),
+	exceptions.py (CommonTree): Small bugfixes.
+
+2007-09-30  Benjamin Niemann  <pink@odahoda.de>
+
+	* recognizers.py (TokenSource): Added iterator interface to TokenSource
+	class - and thus to Lexer.
+
+2007-06-27  Benjamin Niemann  <pink@odahoda.de>
+
+	* Python.stg (genericParser, parser, treeParser): Use correct @init
+	action block for tree parsers.
+
+2007-05-24  Benjamin Niemann  <pink@odahoda.de>
+
+	* Python.stg (rule): Added support for @decorate {...} action for
+	parser rules to add decorators to the rule method.
+
+2007-05-18  Benjamin Niemann  <pink@odahoda.de>
+
+	* Python.stg (isolatedLookaheadRangeTest, lookaheadRangeTest): 
+	Minor improvement of generated code (use '<lower> <= <LA> <= <upper>'
+	instead of '<LA> >= <lower> and <LA> <= <upper>').
+	
+
diff --git a/runtime/Python3/LICENSE b/runtime/Python3/LICENSE
new file mode 100644
index 0000000..66653dd
--- /dev/null
+++ b/runtime/Python3/LICENSE
@@ -0,0 +1,26 @@
+[The "BSD licence"]
+Copyright (c) 2003-2012 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/runtime/Python3/README b/runtime/Python3/README
new file mode 100644
index 0000000..821d4d7
--- /dev/null
+++ b/runtime/Python3/README
@@ -0,0 +1,81 @@
+1) ABOUT
+========
+
+This is the Python3 package 'antlr3', which is required to use parsers created
+by the ANTLR3 tool. See <http://www.antlr.org/> for more information about
+ANTLR3.
+
+
+2) STATUS
+=========
+
+The Python3 target for ANTLR3 is still in beta. Documentation is lacking, some
+bits of the code is not yet done, some functionality has not been tested yet.
+Also the API might change a bit - it currently mimics the Java implementation,
+but it may be made a bit more pythonic here and there.
+
+WARNING: The runtime library is not compatible with recognizers generated by
+ANTLR versions preceding V3.4.x. If you are an application developer,
+then the suggested way to solve this is to package the correct runtime with
+your application. Installing the runtime in the global site-packages directory
+may not be a good idea.
+Sorry for the inconvenience.
+
+
+3) DOWNLOAD
+===========
+
+This runtime is part of the ANTLR distribution. The latest version can be found
+at <http://www.antlr.org/download.html>.
+
+If you are interested in the latest, most bleeding edge version, have a look at
+the git repository at <http://github.com/antlr/antlr3>.
+
+
+4) INSTALLATION
+===============
+
+Just like any other Python package:
+$ python3 setup.py install
+
+See <http://docs.python.org/inst/> for more information.
+
+
+5) DOCUMENTATION
+================
+
+Documentation (as far as it exists) can be found in the wiki
+<http://www.antlr.org/wiki/display/ANTLR3/Antlr3Python3Target>
+
+
+6) REPORTING BUGS
+=================
+
+Please file bug reports on github: <http://github.com/antlr/antlr3>.
+
+
+7) HACKING
+==========
+
+Only the runtime package can be found here. There are also some StringTemplate
+files in 'src/org/antlr/codegen/templates/Python3/' and some Java code in
+'src/org/antlr/codegen/Python3Target.java' (of the main ANTLR3 source
+distribution).
+
+If there are no directories 'tests' and 'unittests' in 'runtime/Python3', you
+should fetch the latest ANTLR3 version from the perforce depot. See section
+DOWNLOAD.
+You'll need java and ant in order to compile and use the tool.
+Be sure to properly setup your CLASSPATH.
+(FIXME: is there some generic information, how to build it yourself? I should
+point to it to avoid duplication.)
+
+You can then use the commands
+$ python3 setup.py unittest
+$ python3 setup.py functest
+to ensure that changes do not break existing behaviour.
+
+Please send patches as pull requests on github. For larger code contributions
+you'll have to sign the "Developer's Certificate of Origin", which can be
+found on <http://www.antlr.org/license.html> or use the feedback form at
+<http://www.antlr.org/misc/feedback>.
diff --git a/runtime/Python3/antlr3/__init__.py b/runtime/Python3/antlr3/__init__.py
new file mode 100644
index 0000000..73b215b
--- /dev/null
+++ b/runtime/Python3/antlr3/__init__.py
@@ -0,0 +1,152 @@
+""" @package antlr3
+@brief ANTLR3 runtime package
+
+This module contains all support classes, which are needed to use recognizers
+generated by ANTLR3.
+
+@mainpage
+
+\\note Please be warned that the line numbers in the API documentation do not
+match the real locations in the source code of the package. This is an
+unintended artifact of doxygen, which I could only convince to use the
+correct module names by concatenating all files from the package into a single
+module file...
+
+Here is a little overview over the most commonly used classes provided by
+this runtime:
+
+@section recognizers Recognizers
+
+These recognizers are baseclasses for the code which is generated by ANTLR3.
+
+- BaseRecognizer: Base class with common recognizer functionality.
+- Lexer: Base class for lexers.
+- Parser: Base class for parsers.
+- tree.TreeParser: Base class for %tree parser.
+
+@section streams Streams
+
+Each recognizer pulls its input from one of the stream classes below. Streams
+handle stuff like buffering, look-ahead and seeking.
+
+A character stream is usually the first element in the pipeline of a typical
+ANTLR3 application. It is used as the input for a Lexer.
+
+- ANTLRStringStream: Reads from a string objects. The input should be a unicode
+  object, or ANTLR3 will have trouble decoding non-ascii data.
+- ANTLRFileStream: Opens a file and read the contents, with optional character
+  decoding.
+- ANTLRInputStream: Reads the date from a file-like object, with optional
+  character decoding.
+
+A Parser needs a TokenStream as input (which in turn is usually fed by a
+Lexer):
+
+- CommonTokenStream: A basic and most commonly used TokenStream
+  implementation.
+- TokenRewriteStream: A modification of CommonTokenStream that allows the
+  stream to be altered (by the Parser). See the 'tweak' example for a usecase.
+
+And tree.TreeParser finally fetches its input from a tree.TreeNodeStream:
+
+- tree.CommonTreeNodeStream: A basic and most commonly used tree.TreeNodeStream
+  implementation.
+  
+
+@section tokenstrees Tokens and Trees
+
+A Lexer emits Token objects which are usually buffered by a TokenStream. A
+Parser can build a Tree, if the output=AST option has been set in the grammar.
+
+The runtime provides these Token implementations:
+
+- CommonToken: A basic and most commonly used Token implementation.
+- ClassicToken: A Token object as used in ANTLR 2.x, used to %tree
+  construction.
+
+Tree objects are wrapper for Token objects.
+
+- tree.CommonTree: A basic and most commonly used Tree implementation.
+
+A tree.TreeAdaptor is used by the parser to create tree.Tree objects for the
+input Token objects.
+
+- tree.CommonTreeAdaptor: A basic and most commonly used tree.TreeAdaptor
+implementation.
+
+
+@section Exceptions
+
+RecognitionException are generated, when a recognizer encounters incorrect
+or unexpected input.
+
+- RecognitionException
+  - MismatchedRangeException
+  - MismatchedSetException
+    - MismatchedNotSetException
+    .
+  - MismatchedTokenException
+  - MismatchedTreeNodeException
+  - NoViableAltException
+  - EarlyExitException
+  - FailedPredicateException
+  .
+.
+
+A tree.RewriteCardinalityException is raised, when the parsers hits a
+cardinality mismatch during AST construction. Although this is basically a
+bug in your grammar, it can only be detected at runtime.
+
+- tree.RewriteCardinalityException
+  - tree.RewriteEarlyExitException
+  - tree.RewriteEmptyStreamException
+  .
+.
+
+"""
+
+# tree.RewriteRuleElementStream
+# tree.RewriteRuleSubtreeStream
+# tree.RewriteRuleTokenStream
+# CharStream
+# DFA
+# TokenSource
+
+# [The "BSD licence"]
+# Copyright (c) 2005-2012 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+__version__ = '3.4'
+
+# This runtime is compatible with generated parsers using the
+# API versions listed in constants.compatible_api_versions.
+# 'HEAD' is only used by unittests.
+
+from .constants import *
+from .dfa import *
+from .exceptions import *
+from .recognizers import *
+from .streams import *
+from .tokens import *
diff --git a/runtime/Python3/antlr3/constants.py b/runtime/Python3/antlr3/constants.py
new file mode 100644
index 0000000..f0203ee
--- /dev/null
+++ b/runtime/Python3/antlr3/constants.py
@@ -0,0 +1,59 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2012 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+compatible_api_versions = ['HEAD', 1]
+
+EOF = -1
+
+## All tokens go to the parser (unless skip() is called in that rule)
+# on a particular "channel".  The parser tunes to a particular channel
+# so that whitespace etc... can go to the parser on a "hidden" channel.
+DEFAULT_CHANNEL = 0
+
+## Anything on different channel than DEFAULT_CHANNEL is not parsed
+# by parser.
+HIDDEN_CHANNEL = 99
+
+# Predefined token types
+EOR_TOKEN_TYPE = 1
+
+##
+# imaginary tree navigation type; traverse "get child" link
+DOWN = 2
+##
+#imaginary tree navigation type; finish with a child list
+UP = 3
+
+MIN_TOKEN_TYPE = UP + 1
+	
+INVALID_TOKEN_TYPE = 0
+
diff --git a/runtime/Python3/antlr3/debug.py b/runtime/Python3/antlr3/debug.py
new file mode 100644
index 0000000..c309a36
--- /dev/null
+++ b/runtime/Python3/antlr3/debug.py
@@ -0,0 +1,1134 @@
+# begin[licence]
+#
+#  [The "BSD licence"]
+#  Copyright (c) 2005-2012 Terence Parr
+#  All rights reserved.
+
+#  Redistribution and use in source and binary forms, with or without
+#  modification, are permitted provided that the following conditions
+#  are met:
+#  1. Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+#  2. Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#  3. The name of the author may not be used to endorse or promote products
+#     derived from this software without specific prior written permission.
+
+#  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+#  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+#  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+#  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+#  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+#  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+#  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+import socket
+import sys
+from .constants import INVALID_TOKEN_TYPE
+from .exceptions import RecognitionException
+from .recognizers import Parser
+from .streams import TokenStream
+from .tokens import Token
+from .tree import CommonTreeAdaptor, TreeAdaptor, Tree
+
+class DebugParser(Parser):
+    def __init__(self, stream, state=None, dbg=None, *args, **kwargs):
+        # wrap token stream in DebugTokenStream (unless user already did so).
+        if not isinstance(stream, DebugTokenStream):
+            stream = DebugTokenStream(stream, dbg)
+
+        super().__init__(stream, state, *args, **kwargs)
+
+        # Who to notify when events in the parser occur.
+        self._dbg = None
+
+        self.setDebugListener(dbg)
+
+
+    def setDebugListener(self, dbg):
+        """Provide a new debug event listener for this parser.  Notify the
+        input stream too that it should send events to this listener.
+        """
+
+        if hasattr(self.input, 'dbg'):
+            self.input.dbg = dbg
+
+        self._dbg = dbg
+
+    def getDebugListener(self):
+        return self._dbg
+
+    dbg = property(getDebugListener, setDebugListener)
+
+
+    def beginResync(self):
+        self._dbg.beginResync()
+
+
+    def endResync(self):
+        self._dbg.endResync()
+
+
+    def beginBacktrack(self, level):
+        self._dbg.beginBacktrack(level)
+
+
+    def endBacktrack(self, level, successful):
+        self._dbg.endBacktrack(level, successful)
+
+
+    def reportError(self, exc):
+        Parser.reportError(self, exc)
+
+        if isinstance(exc, RecognitionException):
+            self._dbg.recognitionException(exc)
+
+
+class DebugTokenStream(TokenStream):
+    def __init__(self, input, dbg=None):
+        super().__init__()
+        self.input = input
+        self.initialStreamState = True
+        # Track the last mark() call result value for use in rewind().
+        self.lastMarker = None
+
+        self._dbg = None
+        self.setDebugListener(dbg)
+
+        # force TokenStream to get at least first valid token
+        # so we know if there are any hidden tokens first in the stream
+        self.input.LT(1)
+
+
+    def getDebugListener(self):
+        return self._dbg
+
+    def setDebugListener(self, dbg):
+        self._dbg = dbg
+
+    dbg = property(getDebugListener, setDebugListener)
+
+
+    def consume(self):
+        if self.initialStreamState:
+            self.consumeInitialHiddenTokens()
+
+        a = self.input.index()
+        t = self.input.LT(1)
+        self.input.consume()
+        b = self.input.index()
+        self._dbg.consumeToken(t)
+
+        if b > a + 1:
+            # then we consumed more than one token; must be off channel tokens
+            for idx in range(a + 1, b):
+                self._dbg.consumeHiddenToken(self.input.get(idx))
+
+
+    def consumeInitialHiddenTokens(self):
+        """consume all initial off-channel tokens"""
+
+        firstOnChannelTokenIndex = self.input.index()
+        for idx in range(firstOnChannelTokenIndex):
+            self._dbg.consumeHiddenToken(self.input.get(idx))
+
+        self.initialStreamState = False
+
+
+    def LT(self, i):
+        if self.initialStreamState:
+            self.consumeInitialHiddenTokens()
+
+        t = self.input.LT(i)
+        self._dbg.LT(i, t)
+        return t
+
+
+    def LA(self, i):
+        if self.initialStreamState:
+            self.consumeInitialHiddenTokens()
+
+        t = self.input.LT(i)
+        self._dbg.LT(i, t)
+        return t.type
+
+
+    def get(self, i):
+        return self.input.get(i)
+
+
+    def index(self):
+        return self.input.index()
+
+
+    def mark(self):
+        self.lastMarker = self.input.mark()
+        self._dbg.mark(self.lastMarker)
+        return self.lastMarker
+
+
+    def rewind(self, marker=None):
+        self._dbg.rewind(marker)
+        self.input.rewind(marker)
+
+
+    def release(self, marker):
+        pass
+
+
+    def seek(self, index):
+        # TODO: implement seek in dbg interface
+        # self._dbg.seek(index);
+        self.input.seek(index)
+
+
+    def size(self):
+        return self.input.size()
+
+
+    def getTokenSource(self):
+        return self.input.getTokenSource()
+
+
+    def getSourceName(self):
+        return self.getTokenSource().getSourceName()
+
+
+    def toString(self, start=None, stop=None):
+        return self.input.toString(start, stop)
+
+
+class DebugTreeAdaptor(TreeAdaptor):
+    """A TreeAdaptor proxy that fires debugging events to a DebugEventListener
+    delegate and uses the TreeAdaptor delegate to do the actual work.  All
+    AST events are triggered by this adaptor; no code gen changes are needed
+    in generated rules.  Debugging events are triggered *after* invoking
+    tree adaptor routines.
+
+    Trees created with actions in rewrite actions like "-> ^(ADD {foo} {bar})"
+    cannot be tracked as they might not use the adaptor to create foo, bar.
+    The debug listener has to deal with tree node IDs for which it did
+    not see a createNode event.  A single <unknown> node is sufficient even
+    if it represents a whole tree.
+    """
+
+    def __init__(self, dbg, adaptor):
+        super().__init__()
+        self.dbg = dbg
+        self.adaptor = adaptor
+
+
+    def createWithPayload(self, payload):
+        if payload.index < 0:
+            # could be token conjured up during error recovery
+            return self.createFromType(payload.type, payload.text)
+
+        node = self.adaptor.createWithPayload(payload)
+        self.dbg.createNode(node, payload)
+        return node
+
+    def createFromToken(self, tokenType, fromToken, text=None):
+        node = self.adaptor.createFromToken(tokenType, fromToken, text)
+        self.dbg.createNode(node)
+        return node
+
+    def createFromType(self, tokenType, text):
+        node = self.adaptor.createFromType(tokenType, text)
+        self.dbg.createNode(node)
+        return node
+
+
+    def errorNode(self, input, start, stop, exc):
+        node = self.adaptor.errorNode(input, start, stop, exc)
+        if node is not None:
+            self.dbg.errorNode(node)
+
+        return node
+
+
+    def dupTree(self, tree):
+        t = self.adaptor.dupTree(tree)
+        # walk the tree and emit create and add child events
+        # to simulate what dupTree has done. dupTree does not call this debug
+        # adapter so I must simulate.
+        self.simulateTreeConstruction(t)
+        return t
+
+
+    def simulateTreeConstruction(self, t):
+        """^(A B C): emit create A, create B, add child, ..."""
+        self.dbg.createNode(t)
+        for i in range(self.adaptor.getChildCount(t)):
+            child = self.adaptor.getChild(t, i)
+            self.simulateTreeConstruction(child)
+            self.dbg.addChild(t, child)
+
+
+    def dupNode(self, treeNode):
+        d = self.adaptor.dupNode(treeNode)
+        self.dbg.createNode(d)
+        return d
+
+
+    def nil(self):
+        node = self.adaptor.nil()
+        self.dbg.nilNode(node)
+        return node
+
+
+    def isNil(self, tree):
+        return self.adaptor.isNil(tree)
+
+
+    def addChild(self, t, child):
+        if isinstance(child, Token):
+            n = self.createWithPayload(child)
+            self.addChild(t, n)
+
+        else:
+            if t is None or child is None:
+                return
+
+            self.adaptor.addChild(t, child)
+            self.dbg.addChild(t, child)
+
+    def becomeRoot(self, newRoot, oldRoot):
+        if isinstance(newRoot, Token):
+            n = self.createWithPayload(newRoot)
+            self.adaptor.becomeRoot(n, oldRoot)
+        else:
+            n = self.adaptor.becomeRoot(newRoot, oldRoot)
+
+        self.dbg.becomeRoot(newRoot, oldRoot)
+        return n
+
+
+    def rulePostProcessing(self, root):
+        return self.adaptor.rulePostProcessing(root)
+
+
+    def getType(self, t):
+        return self.adaptor.getType(t)
+
+
+    def setType(self, t, type):
+        self.adaptor.setType(t, type)
+
+
+    def getText(self, t):
+        return self.adaptor.getText(t)
+
+
+    def setText(self, t, text):
+        self.adaptor.setText(t, text)
+
+
+    def getToken(self, t):
+        return self.adaptor.getToken(t)
+
+
+    def setTokenBoundaries(self, t, startToken, stopToken):
+        self.adaptor.setTokenBoundaries(t, startToken, stopToken)
+        if t and startToken and stopToken:
+            self.dbg.setTokenBoundaries(
+                t, startToken.index, stopToken.index)
+
+
+    def getTokenStartIndex(self, t):
+        return self.adaptor.getTokenStartIndex(t)
+
+
+    def getTokenStopIndex(self, t):
+        return self.adaptor.getTokenStopIndex(t)
+
+
+    def getChild(self, t, i):
+        return self.adaptor.getChild(t, i)
+
+
+    def setChild(self, t, i, child):
+        self.adaptor.setChild(t, i, child)
+
+
+    def deleteChild(self, t, i):
+        return self.adaptor.deleteChild(t, i)
+
+
+    def getChildCount(self, t):
+        return self.adaptor.getChildCount(t)
+
+
+    def getUniqueID(self, node):
+        return self.adaptor.getUniqueID(node)
+
+
+    def getParent(self, t):
+        return self.adaptor.getParent(t)
+
+
+    def getChildIndex(self, t):
+        return self.adaptor.getChildIndex(t)
+
+
+    def setParent(self, t, parent):
+        self.adaptor.setParent(t, parent)
+
+
+    def setChildIndex(self, t, index):
+        self.adaptor.setChildIndex(t, index)
+
+
+    def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
+        self.adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t)
+
+
+    ## support
+
+    def getDebugListener(self):
+        return self.dbg
+
+    def setDebugListener(self, dbg):
+        self.dbg = dbg
+
+
+    def getTreeAdaptor(self):
+        return self.adaptor
+
+
+
+class DebugEventListener(object):
+    """All debugging events that a recognizer can trigger.
+
+    I did not create a separate AST debugging interface as it would create
+    lots of extra classes and DebugParser has a dbg var defined, which makes
+    it hard to change to ASTDebugEventListener.  I looked hard at this issue
+    and it is easier to understand as one monolithic event interface for all
+    possible events.  Hopefully, adding ST debugging stuff won't be bad.  Leave
+    for future. 4/26/2006.
+    """
+
+    # Moved to version 2 for v3.1: added grammar name to enter/exit Rule
+    PROTOCOL_VERSION = "2"
+
+    def enterRule(self, grammarFileName, ruleName):
+        """The parser has just entered a rule. No decision has been made about
+        which alt is predicted.  This is fired AFTER init actions have been
+        executed.  Attributes are defined and available etc...
+        The grammarFileName allows composite grammars to jump around among
+        multiple grammar files.
+        """
+
+        pass
+
+
+    def enterAlt(self, alt):
+        """Because rules can have lots of alternatives, it is very useful to
+        know which alt you are entering.  This is 1..n for n alts.
+        """
+        pass
+
+
+    def exitRule(self, grammarFileName, ruleName):
+        """This is the last thing executed before leaving a rule.  It is
+        executed even if an exception is thrown.  This is triggered after
+        error reporting and recovery have occurred (unless the exception is
+        not caught in this rule).  This implies an "exitAlt" event.
+        The grammarFileName allows composite grammars to jump around among
+        multiple grammar files.
+        """
+        pass
+
+
+    def enterSubRule(self, decisionNumber):
+        """Track entry into any (...) subrule other EBNF construct"""
+        pass
+
+
+    def exitSubRule(self, decisionNumber):
+        pass
+
+
+    def enterDecision(self, decisionNumber, couldBacktrack):
+        """Every decision, fixed k or arbitrary, has an enter/exit event
+        so that a GUI can easily track what LT/consume events are
+        associated with prediction.  You will see a single enter/exit
+        subrule but multiple enter/exit decision events, one for each
+        loop iteration.
+        """
+        pass
+
+
+    def exitDecision(self, decisionNumber):
+        pass
+
+
+    def consumeToken(self, t):
+        """An input token was consumed; matched by any kind of element.
+        Trigger after the token was matched by things like match(), matchAny().
+        """
+        pass
+
+
+    def consumeHiddenToken(self, t):
+        """An off-channel input token was consumed.
+        Trigger after the token was matched by things like match(), matchAny().
+        (unless of course the hidden token is first stuff in the input stream).
+        """
+        pass
+
+
+    def LT(self, i, t):
+        """Somebody (anybody) looked ahead.  Note that this actually gets
+        triggered by both LA and LT calls.  The debugger will want to know
+        which Token object was examined.  Like consumeToken, this indicates
+        what token was seen at that depth.  A remote debugger cannot look
+        ahead into a file it doesn't have so LT events must pass the token
+        even if the info is redundant.
+        For tree parsers, if the type is UP or DOWN,
+        then the ID is not really meaningful as it's fixed--there is
+        just one UP node and one DOWN navigation node.
+        """
+        pass
+
+
+    def mark(self, marker):
+        """The parser is going to look arbitrarily ahead; mark this location,
+        the token stream's marker is sent in case you need it.
+        """
+        pass
+
+
+    def rewind(self, marker=None):
+        """After an arbitrairly long lookahead as with a cyclic DFA (or with
+        any backtrack), this informs the debugger that stream should be
+        rewound to the position associated with marker.
+
+        """
+        pass
+
+
+    def beginBacktrack(self, level):
+        pass
+
+
+    def endBacktrack(self, level, successful):
+        pass
+
+
+    def location(self, line, pos):
+        """To watch a parser move through the grammar, the parser needs to
+        inform the debugger what line/charPos it is passing in the grammar.
+        For now, this does not know how to switch from one grammar to the
+        other and back for island grammars etc...
+
+        This should also allow breakpoints because the debugger can stop
+        the parser whenever it hits this line/pos.
+        """
+        pass
+
+
+    def recognitionException(self, e):
+        """A recognition exception occurred such as NoViableAltException.  I made
+        this a generic event so that I can alter the exception hierachy later
+        without having to alter all the debug objects.
+
+        Upon error, the stack of enter rule/subrule must be properly unwound.
+        If no viable alt occurs it is within an enter/exit decision, which
+        also must be rewound.  Even the rewind for each mark must be unwount.
+        In the Java target this is pretty easy using try/finally, if a bit
+        ugly in the generated code.  The rewind is generated in DFA.predict()
+        actually so no code needs to be generated for that.  For languages
+        w/o this "finally" feature (C++?), the target implementor will have
+        to build an event stack or something.
+
+        Across a socket for remote debugging, only the RecognitionException
+        data fields are transmitted.  The token object or whatever that
+        caused the problem was the last object referenced by LT.  The
+        immediately preceding LT event should hold the unexpected Token or
+        char.
+
+        Here is a sample event trace for grammar:
+
+        b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
+          | D
+          ;
+
+        The sequence for this rule (with no viable alt in the subrule) for
+        input 'c c' (there are 3 tokens) is:
+
+                commence
+                LT(1)
+                enterRule b
+                location 7 1
+                enter decision 3
+                LT(1)
+                exit decision 3
+                enterAlt1
+                location 7 5
+                LT(1)
+                consumeToken [c/<4>,1:0]
+                location 7 7
+                enterSubRule 2
+                enter decision 2
+                LT(1)
+                LT(1)
+                recognitionException NoViableAltException 2 1 2
+                exit decision 2
+                exitSubRule 2
+                beginResync
+                LT(1)
+                consumeToken [c/<4>,1:1]
+                LT(1)
+                endResync
+                LT(-1)
+                exitRule b
+                terminate
+        """
+        pass
+
+
+    def beginResync(self):
+        """Indicates the recognizer is about to consume tokens to resynchronize
+        the parser.  Any consume events from here until the recovered event
+        are not part of the parse--they are dead tokens.
+        """
+        pass
+
+
+    def endResync(self):
+        """Indicates that the recognizer has finished consuming tokens in order
+        to resychronize.  There may be multiple beginResync/endResync pairs
+        before the recognizer comes out of errorRecovery mode (in which
+        multiple errors are suppressed).  This will be useful
+        in a gui where you want to probably grey out tokens that are consumed
+        but not matched to anything in grammar.  Anything between
+        a beginResync/endResync pair was tossed out by the parser.
+        """
+        pass
+
+
+    def semanticPredicate(self, result, predicate):
+        """A semantic predicate was evaluate with this result and action text"""
+        pass
+
+
+    def commence(self):
+        """Announce that parsing has begun.  Not technically useful except for
+        sending events over a socket.  A GUI for example will launch a thread
+        to connect and communicate with a remote parser.  The thread will want
+        to notify the GUI when a connection is made.  ANTLR parsers
+        trigger this upon entry to the first rule (the ruleLevel is used to
+        figure this out).
+        """
+        pass
+
+
+    def terminate(self):
+        """Parsing is over; successfully or not.  Mostly useful for telling
+        remote debugging listeners that it's time to quit.  When the rule
+        invocation level goes to zero at the end of a rule, we are done
+        parsing.
+        """
+        pass
+
+
+    ## T r e e  P a r s i n g
+
+    def consumeNode(self, t):
+        """Input for a tree parser is an AST, but we know nothing for sure
+        about a node except its type and text (obtained from the adaptor).
+        This is the analog of the consumeToken method.  Again, the ID is
+        the hashCode usually of the node so it only works if hashCode is
+        not implemented.  If the type is UP or DOWN, then
+        the ID is not really meaningful as it's fixed--there is
+        just one UP node and one DOWN navigation node.
+        """
+        pass
+
+
+    ## A S T  E v e n t s
+
+    def nilNode(self, t):
+        """A nil was created (even nil nodes have a unique ID...
+        they are not "null" per se).  As of 4/28/2006, this
+        seems to be uniquely triggered when starting a new subtree
+        such as when entering a subrule in automatic mode and when
+        building a tree in rewrite mode.
+
+        If you are receiving this event over a socket via
+        RemoteDebugEventSocketListener then only t.ID is set.
+        """
+        pass
+
+
+    def errorNode(self, t):
+        """Upon syntax error, recognizers bracket the error with an error node
+        if they are building ASTs.
+        """
+        pass
+
+
+    def createNode(self, node, token=None):
+        """Announce a new node built from token elements such as type etc...
+
+        If you are receiving this event over a socket via
+        RemoteDebugEventSocketListener then only t.ID, type, text are
+        set.
+        """
+        pass
+
+
+    def becomeRoot(self, newRoot, oldRoot):
+        """Make a node the new root of an existing root.
+
+        Note: the newRootID parameter is possibly different
+        than the TreeAdaptor.becomeRoot() newRoot parameter.
+        In our case, it will always be the result of calling
+        TreeAdaptor.becomeRoot() and not root_n or whatever.
+
+        The listener should assume that this event occurs
+        only when the current subrule (or rule) subtree is
+        being reset to newRootID.
+
+        If you are receiving this event over a socket via
+        RemoteDebugEventSocketListener then only IDs are set.
+
+        @see antlr3.tree.TreeAdaptor.becomeRoot()
+        """
+        pass
+
+
+    def addChild(self, root, child):
+        """Make childID a child of rootID.
+
+        If you are receiving this event over a socket via
+        RemoteDebugEventSocketListener then only IDs are set.
+
+        @see antlr3.tree.TreeAdaptor.addChild()
+        """
+        pass
+
+
+    def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex):
+        """Set the token start/stop token index for a subtree root or node.
+
+        If you are receiving this event over a socket via
+        RemoteDebugEventSocketListener then only t.ID is set.
+        """
+        pass
+
+
+class BlankDebugEventListener(DebugEventListener):
+    """A blank listener that does nothing; useful for real classes so
+    they don't have to have lots of blank methods and are less
+    sensitive to updates to debug interface.
+
+    Note: this class is identical to DebugEventListener and exists purely
+    for compatibility with Java.
+    """
+    pass
+
+
+class TraceDebugEventListener(DebugEventListener):
+    """A listener that simply records text representations of the events.
+
+    Useful for debugging the debugging facility ;)
+
+    Subclasses can override the record() method (which defaults to printing to
+    stdout) to record the events in a different way.
+    """
+
+    def __init__(self, adaptor=None):
+        super().__init__()
+
+        if adaptor is None:
+            adaptor = CommonTreeAdaptor()
+        self.adaptor = adaptor
+
+    def record(self, event):
+        sys.stdout.write(event + '\n')
+
+    def enterRule(self, grammarFileName, ruleName):
+        self.record("enterRule " + ruleName)
+
+    def exitRule(self, grammarFileName, ruleName):
+        self.record("exitRule " + ruleName)
+
+    def enterSubRule(self, decisionNumber):
+        self.record("enterSubRule")
+
+    def exitSubRule(self, decisionNumber):
+        self.record("exitSubRule")
+
+    def location(self, line, pos):
+        self.record("location {}:{}".format(line, pos))
+
+    ## Tree parsing stuff
+
+    def consumeNode(self, t):
+        self.record("consumeNode {} {} {}".format(
+                self.adaptor.getUniqueID(t),
+                self.adaptor.getText(t),
+                self.adaptor.getType(t)))
+
+    def LT(self, i, t):
+        self.record("LT {} {} {} {}".format(
+                i,
+                self.adaptor.getUniqueID(t),
+                self.adaptor.getText(t),
+                self.adaptor.getType(t)))
+
+
+    ## AST stuff
+    def nilNode(self, t):
+        self.record("nilNode {}".format(self.adaptor.getUniqueID(t)))
+
+    def createNode(self, t, token=None):
+        if token is None:
+            self.record("create {}: {}, {}".format(
+                    self.adaptor.getUniqueID(t),
+                    self.adaptor.getText(t),
+                    self.adaptor.getType(t)))
+
+        else:
+            self.record("create {}: {}".format(
+                    self.adaptor.getUniqueID(t),
+                    token.index))
+
+    def becomeRoot(self, newRoot, oldRoot):
+        self.record("becomeRoot {}, {}".format(
+                self.adaptor.getUniqueID(newRoot),
+                self.adaptor.getUniqueID(oldRoot)))
+
+    def addChild(self, root, child):
+        self.record("addChild {}, {}".format(
+                self.adaptor.getUniqueID(root),
+                self.adaptor.getUniqueID(child)))
+
+    def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex):
+        self.record("setTokenBoundaries {}, {}, {}".format(
+                self.adaptor.getUniqueID(t),
+                tokenStartIndex, tokenStopIndex))
+
+
+class RecordDebugEventListener(TraceDebugEventListener):
+    """A listener that records events as strings in an array."""
+
+    def __init__(self, adaptor=None):
+        super().__init__(adaptor)
+
+        self.events = []
+
+    def record(self, event):
+        self.events.append(event)
+
+
+class DebugEventSocketProxy(DebugEventListener):
+    """A proxy debug event listener that forwards events over a socket to
+    a debugger (or any other listener) using a simple text-based protocol;
+    one event per line.  ANTLRWorks listens on server socket with a
+    RemoteDebugEventSocketListener instance.  These two objects must therefore
+    be kept in sync.  New events must be handled on both sides of socket.
+    """
+
+    DEFAULT_DEBUGGER_PORT = 49100
+
+    def __init__(self, recognizer, adaptor=None, port=None, debug=None):
+        super().__init__()
+
+        self.grammarFileName = recognizer.getGrammarFileName()
+
+        # Almost certainly the recognizer will have adaptor set, but
+        # we don't know how to cast it (Parser or TreeParser) to get
+        # the adaptor field.  Must be set with a constructor. :(
+        self.adaptor = adaptor
+
+        self.port = port or self.DEFAULT_DEBUGGER_PORT
+
+        self.debug = debug
+
+        self.socket = None
+        self.connection = None
+        self.input = None
+        self.output = None
+
+
+    def log(self, msg):
+        if self.debug:
+            self.debug.write(msg + '\n')
+
+
+    def handshake(self):
+        if self.socket is None:
+            # create listening socket
+            self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+            self.socket.bind(('', self.port))
+            self.socket.listen(1)
+            self.log("Waiting for incoming connection on port {}".format(self.port))
+
+            # wait for an incoming connection
+            self.connection, addr = self.socket.accept()
+            self.log("Accepted connection from {}:{}".format(addr[0], addr[1]))
+
+            self.connection.setblocking(1)
+            self.connection.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
+
+            self.output = self.connection.makefile('w', 1)
+            self.input = self.connection.makefile('r', 1)
+
+            self.write("ANTLR {}".format(self.PROTOCOL_VERSION))
+            self.write('grammar "{}"'.format(self.grammarFileName))
+            self.ack()
+
+
+    def write(self, msg):
+        self.log("> {}".format(msg))
+        self.output.write("{}\n".format(msg))
+        self.output.flush()
+
+
+    def ack(self):
+        t = self.input.readline()
+        self.log("< {}".format(t.rstrip()))
+
+
+    def transmit(self, event):
+        self.write(event)
+        self.ack()
+
+
+    def commence(self):
+        # don't bother sending event; listener will trigger upon connection
+        pass
+
+
+    def terminate(self):
+        self.transmit("terminate")
+        self.output.close()
+        self.input.close()
+        self.connection.close()
+        self.socket.close()
+
+
+    def enterRule(self, grammarFileName, ruleName):
+        self.transmit("enterRule\t{}\t{}".format(grammarFileName, ruleName))
+
+
+    def enterAlt(self, alt):
+        self.transmit("enterAlt\t{}".format(alt))
+
+
+    def exitRule(self, grammarFileName, ruleName):
+        self.transmit("exitRule\t{}\t{}".format(grammarFileName, ruleName))
+
+
+    def enterSubRule(self, decisionNumber):
+        self.transmit("enterSubRule\t{}".format(decisionNumber))
+
+
+    def exitSubRule(self, decisionNumber):
+        self.transmit("exitSubRule\t{}".format(decisionNumber))
+
+
+    def enterDecision(self, decisionNumber, couldBacktrack):
+        self.transmit(
+            "enterDecision\t{}\t{:d}".format(decisionNumber, couldBacktrack))
+
+
+    def exitDecision(self, decisionNumber):
+        self.transmit("exitDecision\t{}".format(decisionNumber))
+
+
+    def consumeToken(self, t):
+        self.transmit("consumeToken\t{}".format(self.serializeToken(t)))
+
+
+    def consumeHiddenToken(self, t):
+        self.transmit("consumeHiddenToken\t{}".format(self.serializeToken(t)))
+
+
+    def LT(self, i, o):
+        if isinstance(o, Tree):
+            return self.LT_tree(i, o)
+        return self.LT_token(i, o)
+
+
+    def LT_token(self, i, t):
+        if t is not None:
+            self.transmit("LT\t{}\t{}".format(i, self.serializeToken(t)))
+
+
+    def mark(self, i):
+        self.transmit("mark\t{}".format(i))
+
+
+    def rewind(self, i=None):
+        if i is not None:
+            self.transmit("rewind\t{}".format(i))
+        else:
+            self.transmit("rewind")
+
+
+    def beginBacktrack(self, level):
+        self.transmit("beginBacktrack\t{}".format(level))
+
+
+    def endBacktrack(self, level, successful):
+        self.transmit("endBacktrack\t{}\t{}".format(
+                level, '1' if successful else '0'))
+
+
+    def location(self, line, pos):
+        self.transmit("location\t{}\t{}".format(line, pos))
+
+
+    def recognitionException(self, exc):
+        self.transmit('\t'.join([
+                    "exception",
+                    exc.__class__.__name__,
+                    str(int(exc.index)),
+                    str(int(exc.line)),
+                    str(int(exc.charPositionInLine))]))
+
+
+    def beginResync(self):
+        self.transmit("beginResync")
+
+
+    def endResync(self):
+        self.transmit("endResync")
+
+
+    def semanticPredicate(self, result, predicate):
+        self.transmit('\t'.join([
+                    "semanticPredicate",
+                    str(int(result)),
+                    self.escapeNewlines(predicate)]))
+
+    ## A S T  P a r s i n g  E v e n t s
+
+    def consumeNode(self, t):
+        FIXME(31)
+#         StringBuffer buf = new StringBuffer(50);
+#         buf.append("consumeNode");
+#         serializeNode(buf, t);
+#         transmit(buf.toString());
+
+
+    def LT_tree(self, i, t):
+        FIXME(34)
+#         int ID = adaptor.getUniqueID(t);
+#         String text = adaptor.getText(t);
+#         int type = adaptor.getType(t);
+#         StringBuffer buf = new StringBuffer(50);
+#         buf.append("LN\t"); // lookahead node; distinguish from LT in protocol
+#         buf.append(i);
+#         serializeNode(buf, t);
+#         transmit(buf.toString());
+
+
+    def serializeNode(self, buf, t):
+        FIXME(33)
+#         int ID = adaptor.getUniqueID(t);
+#         String text = adaptor.getText(t);
+#         int type = adaptor.getType(t);
+#         buf.append("\t");
+#         buf.append(ID);
+#         buf.append("\t");
+#         buf.append(type);
+#         Token token = adaptor.getToken(t);
+#         int line = -1;
+#         int pos = -1;
+#         if ( token!=null ) {
+#             line = token.getLine();
+#             pos = token.getCharPositionInLine();
+#             }
+#         buf.append("\t");
+#         buf.append(line);
+#         buf.append("\t");
+#         buf.append(pos);
+#         int tokenIndex = adaptor.getTokenStartIndex(t);
+#         buf.append("\t");
+#         buf.append(tokenIndex);
+#         serializeText(buf, text);
+
+
+    ## A S T  E v e n t s
+
+    def nilNode(self, t):
+        self.transmit("nilNode\t{}".format(self.adaptor.getUniqueID(t)))
+
+
+    def errorNode(self, t):
+        self.transmit('errorNode\t{}\t{}\t"{}'.format(
+             self.adaptor.getUniqueID(t),
+             INVALID_TOKEN_TYPE,
+             self.escapeNewlines(t.toString())))
+
+
+    def createNode(self, node, token=None):
+        if token is not None:
+            self.transmit("createNode\t{}\t{}".format(
+                    self.adaptor.getUniqueID(node),
+                    token.index))
+
+        else:
+            self.transmit('createNodeFromTokenElements\t{}\t{}\t"{}'.format(
+                    self.adaptor.getUniqueID(node),
+                    self.adaptor.getType(node),
+                    self.adaptor.getText(node)))
+
+
+    def becomeRoot(self, newRoot, oldRoot):
+        self.transmit("becomeRoot\t{}\t{}".format(
+                self.adaptor.getUniqueID(newRoot),
+                self.adaptor.getUniqueID(oldRoot)))
+
+
+    def addChild(self, root, child):
+        self.transmit("addChild\t{}\t{}".format(
+                self.adaptor.getUniqueID(root),
+                self.adaptor.getUniqueID(child)))
+
+
+    def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex):
+        self.transmit("setTokenBoundaries\t{}\t{}\t{}".format(
+                self.adaptor.getUniqueID(t),
+                tokenStartIndex, tokenStopIndex))
+
+
+
+    ## support
+
+    def setTreeAdaptor(self, adaptor):
+        self.adaptor = adaptor
+
+    def getTreeAdaptor(self):
+        return self.adaptor
+
+
+    def serializeToken(self, t):
+        buf = [str(int(t.index)),
+               str(int(t.type)),
+               str(int(t.channel)),
+               str(int(t.line or 0)),
+               str(int(t.charPositionInLine or 0)),
+               '"' + self.escapeNewlines(t.text)]
+        return '\t'.join(buf)
+
+
+    def escapeNewlines(self, txt):
+        if txt is None:
+            return ''
+
+        txt = txt.replace("%","%25")   # escape all escape char ;)
+        txt = txt.replace("\n","%0A")  # escape \n
+        txt = txt.replace("\r","%0D")  # escape \r
+        return txt
diff --git a/runtime/Python3/antlr3/dfa.py b/runtime/Python3/antlr3/dfa.py
new file mode 100644
index 0000000..95ad15a
--- /dev/null
+++ b/runtime/Python3/antlr3/dfa.py
@@ -0,0 +1,195 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2012 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+from .constants import EOF
+from .exceptions import NoViableAltException, BacktrackingFailed
+
+
+class DFA(object):
+    """@brief A DFA implemented as a set of transition tables.
+
+    Any state that has a semantic predicate edge is special; those states
+    are generated with if-then-else structures in a specialStateTransition()
+    which is generated by cyclicDFA template.
+    
+    """
+    
+    def __init__(
+        self,
+        recognizer, decisionNumber,
+        eot, eof, min, max, accept, special, transition
+        ):
+        ## Which recognizer encloses this DFA?  Needed to check backtracking
+        self.recognizer = recognizer
+
+        self.decisionNumber = decisionNumber
+        self.eot = eot
+        self.eof = eof
+        self.min = min
+        self.max = max
+        self.accept = accept
+        self.special = special
+        self.transition = transition
+
+
+    def predict(self, input):
+        """
+        From the input stream, predict what alternative will succeed
+        using this DFA (representing the covering regular approximation
+        to the underlying CFL).  Return an alternative number 1..n.  Throw
+        an exception upon error.
+        """
+        mark = input.mark()
+        s = 0 # we always start at s0
+        try:
+            for _ in range(50000):
+                specialState = self.special[s]
+                if specialState >= 0:
+                    s = self.specialStateTransition(specialState, input)
+                    if s == -1:
+                        self.noViableAlt(s, input)
+                        return 0
+                    input.consume()
+                    continue
+
+                if self.accept[s] >= 1:
+                    return self.accept[s]
+
+                # look for a normal char transition
+                c = input.LA(1)
+
+                if c >= self.min[s] and c <= self.max[s]:
+                    # move to next state
+                    snext = self.transition[s][c-self.min[s]]
+                    
+                    if snext < 0:
+                        # was in range but not a normal transition
+                        # must check EOT, which is like the else clause.
+                        # eot[s]>=0 indicates that an EOT edge goes to another
+                        # state.
+                        if self.eot[s] >= 0: # EOT Transition to accept state?
+                            s = self.eot[s]
+                            input.consume()
+                            # TODO: I had this as return accept[eot[s]]
+                            # which assumed here that the EOT edge always
+                            # went to an accept...faster to do this, but
+                            # what about predicated edges coming from EOT
+                            # target?
+                            continue
+
+                        self.noViableAlt(s, input)
+                        return 0
+
+                    s = snext
+                    input.consume()
+                    continue
+
+                if self.eot[s] >= 0:
+                    s = self.eot[s]
+                    input.consume()
+                    continue
+
+                # EOF Transition to accept state?
+                if c == EOF and self.eof[s] >= 0:
+                    return self.accept[self.eof[s]]
+
+                # not in range and not EOF/EOT, must be invalid symbol
+                self.noViableAlt(s, input)
+                return 0
+
+            else:
+                raise RuntimeError("DFA bang!")
+            
+        finally:
+            input.rewind(mark)
+
+
+    def noViableAlt(self, s, input):
+        if self.recognizer._state.backtracking > 0:
+            raise BacktrackingFailed
+
+        nvae = NoViableAltException(
+            self.getDescription(),
+            self.decisionNumber,
+            s,
+            input
+            )
+
+        self.error(nvae)
+        raise nvae
+
+
+    def error(self, nvae):
+        """A hook for debugging interface"""
+        pass
+
+
+    def specialStateTransition(self, s, input):
+        return -1
+
+
+    def getDescription(self):
+        return "n/a"
+
+
+##     def specialTransition(self, state, symbol):
+##         return 0
+
+
+    @classmethod
+    def unpack(cls, string):
+        """@brief Unpack the runlength encoded table data.
+
+        Terence implemented packed table initializers, because Java has a
+        size restriction on .class files and the lookup tables can grow
+        pretty large. The generated JavaLexer.java of the Java.g example
+        would be about 15MB with uncompressed array initializers.
+
+        Python does not have any size restrictions, but the compilation of
+        such large source files seems to be pretty memory hungry. The memory
+        consumption of the python process grew to >1.5GB when importing a
+        15MB lexer, eating all my swap space and I was to impacient to see,
+        if it could finish at all. With packed initializers that are unpacked
+        at import time of the lexer module, everything works like a charm.
+        
+        """
+        
+        ret = []
+        for i in range(0, len(string) - 1, 2):
+            (n, v) = ord(string[i]), ord(string[i + 1])
+
+            if v == 0xFFFF:
+                v = -1
+
+            ret += [v] * n
+
+        return ret
diff --git a/runtime/Python3/antlr3/exceptions.py b/runtime/Python3/antlr3/exceptions.py
new file mode 100644
index 0000000..78ea441
--- /dev/null
+++ b/runtime/Python3/antlr3/exceptions.py
@@ -0,0 +1,364 @@
+"""ANTLR3 exception hierarchy"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2012 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+from .constants import INVALID_TOKEN_TYPE
+
+
+class BacktrackingFailed(Exception):
+    """@brief Raised to signal failed backtrack attempt"""
+
+    pass
+
+
+class RecognitionException(Exception):
+    """@brief The root of the ANTLR exception hierarchy.
+
+    To avoid English-only error messages and to generally make things
+    as flexible as possible, these exceptions are not created with strings,
+    but rather the information necessary to generate an error.  Then
+    the various reporting methods in Parser and Lexer can be overridden
+    to generate a localized error message.  For example, MismatchedToken
+    exceptions are built with the expected token type.
+    So, don't expect getMessage() to return anything.
+
+    Note that as of Java 1.4, you can access the stack trace, which means
+    that you can compute the complete trace of rules from the start symbol.
+    This gives you considerable context information with which to generate
+    useful error messages.
+
+    ANTLR generates code that throws exceptions upon recognition error and
+    also generates code to catch these exceptions in each rule.  If you
+    want to quit upon first error, you can turn off the automatic error
+    handling mechanism using rulecatch action, but you still need to
+    override methods mismatch and recoverFromMismatchSet.
+    
+    In general, the recognition exceptions can track where in a grammar a
+    problem occurred and/or what was the expected input.  While the parser
+    knows its state (such as current input symbol and line info) that
+    state can change before the exception is reported so current token index
+    is computed and stored at exception time.  From this info, you can
+    perhaps print an entire line of input not just a single token, for example.
+    Better to just say the recognizer had a problem and then let the parser
+    figure out a fancy report.
+    
+    """
+
+    def __init__(self, input=None):
+        super().__init__()
+
+        # What input stream did the error occur in?
+        self.input = None
+
+        # What is index of token/char were we looking at when the error
+        # occurred?
+        self.index = None
+
+        # The current Token when an error occurred.  Since not all streams
+        # can retrieve the ith Token, we have to track the Token object.
+        # For parsers.  Even when it's a tree parser, token might be set.
+        self.token = None
+
+        # If this is a tree parser exception, node is set to the node with
+        # the problem.
+        self.node = None
+
+        # The current char when an error occurred. For lexers.
+        self.c = None
+
+        # Track the line at which the error occurred in case this is
+        # generated from a lexer.  We need to track this since the
+        # unexpected char doesn't carry the line info.
+        self.line = None
+
+        self.charPositionInLine = None
+
+        # If you are parsing a tree node stream, you will encounter som
+        # imaginary nodes w/o line/col info.  We now search backwards looking
+        # for most recent token with line/col info, but notify getErrorHeader()
+        # that info is approximate.
+        self.approximateLineInfo = False
+
+        
+        if input:
+            self.input = input
+            self.index = input.index()
+
+            # late import to avoid cyclic dependencies
+            from .streams import TokenStream, CharStream
+            from .tree import TreeNodeStream
+
+            if isinstance(self.input, TokenStream):
+                self.token = self.input.LT(1)
+                self.line = self.token.line
+                self.charPositionInLine = self.token.charPositionInLine
+
+            if isinstance(self.input, TreeNodeStream):
+                self.extractInformationFromTreeNodeStream(self.input)
+
+            else:
+                if isinstance(self.input, CharStream):
+                    self.c = self.input.LT(1)
+                    self.line = self.input.line
+                    self.charPositionInLine = self.input.charPositionInLine
+
+                else:
+                    self.c = self.input.LA(1)
+
+    def extractInformationFromTreeNodeStream(self, nodes):
+        from .tree import Tree, CommonTree
+        from .tokens import CommonToken
+        
+        self.node = nodes.LT(1)
+        adaptor = nodes.adaptor
+        payload = adaptor.getToken(self.node)
+        if payload:
+            self.token = payload
+            if payload.line <= 0:
+                # imaginary node; no line/pos info; scan backwards
+                i = -1
+                priorNode = nodes.LT(i)
+                while priorNode:
+                    priorPayload = adaptor.getToken(priorNode)
+                    if priorPayload and priorPayload.line > 0:
+                        # we found the most recent real line / pos info
+                        self.line = priorPayload.line
+                        self.charPositionInLine = priorPayload.charPositionInLine
+                        self.approximateLineInfo = True
+                        break
+                    
+                    i -= 1
+                    priorNode = nodes.LT(i)
+                    
+            else: # node created from real token
+                self.line = payload.line
+                self.charPositionInLine = payload.charPositionInLine
+                
+        elif isinstance(self.node, Tree):
+            self.line = self.node.line
+            self.charPositionInLine = self.node.charPositionInLine
+            if isinstance(self.node, CommonTree):
+                self.token = self.node.token
+
+        else:
+            type = adaptor.getType(self.node)
+            text = adaptor.getText(self.node)
+            self.token = CommonToken(type=type, text=text)
+
+     
+    def getUnexpectedType(self):
+        """Return the token type or char of the unexpected input element"""
+
+        from .streams import TokenStream
+        from .tree import TreeNodeStream
+
+        if isinstance(self.input, TokenStream):
+            return self.token.type
+
+        elif isinstance(self.input, TreeNodeStream):
+            adaptor = self.input.treeAdaptor
+            return adaptor.getType(self.node)
+
+        else:
+            return self.c
+
+    unexpectedType = property(getUnexpectedType)
+    
+
+class MismatchedTokenException(RecognitionException):
+    """@brief A mismatched char or Token or tree node."""
+    
+    def __init__(self, expecting, input):
+        super().__init__(input)
+        self.expecting = expecting
+        
+
+    def __str__(self):
+        return "MismatchedTokenException({!r}!={!r})".format(
+            self.getUnexpectedType(), self.expecting
+            )
+    __repr__ = __str__
+
+
+class UnwantedTokenException(MismatchedTokenException):
+    """An extra token while parsing a TokenStream"""
+
+    def getUnexpectedToken(self):
+        return self.token
+
+
+    def __str__(self):
+        exp = ", expected {}".format(self.expecting)
+        if self.expecting == INVALID_TOKEN_TYPE:
+            exp = ""
+
+        if not self.token:
+            return "UnwantedTokenException(found={}{})".format(None, exp)
+
+        return "UnwantedTokenException(found={}{})".format(self.token.text, exp)
+    __repr__ = __str__
+
+
+class MissingTokenException(MismatchedTokenException):
+    """
+    We were expecting a token but it's not found.  The current token
+    is actually what we wanted next.
+    """
+
+    def __init__(self, expecting, input, inserted):
+        super().__init__(expecting, input)
+
+        self.inserted = inserted
+
+
+    def getMissingType(self):
+        return self.expecting
+
+
+    def __str__(self):
+        if self.token:
+            if self.inserted:
+                return "MissingTokenException(inserted {!r} at {!r})".format(
+                    self.inserted, self.token.text)
+
+            return "MissingTokenException(at {!r})".format(self.token.text)
+
+        return "MissingTokenException"
+    __repr__ = __str__
+
+
+class MismatchedRangeException(RecognitionException):
+    """@brief The next token does not match a range of expected types."""
+
+    def __init__(self, a, b, input):
+        super().__init__(input)
+
+        self.a = a
+        self.b = b
+        
+
+    def __str__(self):
+        return "MismatchedRangeException({!r} not in [{!r}..{!r}])".format(
+            self.getUnexpectedType(), self.a, self.b
+            )
+    __repr__ = __str__
+    
+
+class MismatchedSetException(RecognitionException):
+    """@brief The next token does not match a set of expected types."""
+
+    def __init__(self, expecting, input):
+        super().__init__(input)
+
+        self.expecting = expecting
+        
+
+    def __str__(self):
+        return "MismatchedSetException({!r} not in {!r})".format(
+            self.getUnexpectedType(), self.expecting
+            )
+    __repr__ = __str__
+
+
+class MismatchedNotSetException(MismatchedSetException):
+    """@brief Used for remote debugger deserialization"""
+    
+    def __str__(self):
+        return "MismatchedNotSetException({!r}!={!r})".format(
+            self.getUnexpectedType(), self.expecting
+            )
+    __repr__ = __str__
+
+
+class NoViableAltException(RecognitionException):
+    """@brief Unable to decide which alternative to choose."""
+
+    def __init__(
+        self, grammarDecisionDescription, decisionNumber, stateNumber, input
+        ):
+        super().__init__(input)
+
+        self.grammarDecisionDescription = grammarDecisionDescription
+        self.decisionNumber = decisionNumber
+        self.stateNumber = stateNumber
+
+
+    def __str__(self):
+        return "NoViableAltException({!r}!=[{!r}])".format(
+            self.unexpectedType, self.grammarDecisionDescription
+            )
+    __repr__ = __str__
+    
+
+class EarlyExitException(RecognitionException):
+    """@brief The recognizer did not match anything for a (..)+ loop."""
+
+    def __init__(self, decisionNumber, input):
+        super().__init__(input)
+
+        self.decisionNumber = decisionNumber
+
+
+class FailedPredicateException(RecognitionException):
+    """@brief A semantic predicate failed during validation.
+
+    Validation of predicates
+    occurs when normally parsing the alternative just like matching a token.
+    Disambiguating predicate evaluation occurs when we hoist a predicate into
+    a prediction decision.
+    """
+
+    def __init__(self, input, ruleName, predicateText):
+        super().__init__(input)
+        
+        self.ruleName = ruleName
+        self.predicateText = predicateText
+
+
+    def __str__(self):
+        return "FailedPredicateException({},{{{}}}?)".format(
+            self.ruleName, self.predicateText)
+    __repr__ = __str__
+    
+
+class MismatchedTreeNodeException(RecognitionException):
+    """@brief The next tree mode does not match the expected type."""
+
+    def __init__(self, expecting, input):
+        super().__init__(input)
+        
+        self.expecting = expecting
+
+    def __str__(self):
+        return "MismatchedTreeNodeException({!r}!={!r})".format(
+            self.getUnexpectedType(), self.expecting
+            )
+    __repr__ = __str__
diff --git a/runtime/Python3/antlr3/main.py b/runtime/Python3/antlr3/main.py
new file mode 100644
index 0000000..739726b
--- /dev/null
+++ b/runtime/Python3/antlr3/main.py
@@ -0,0 +1,234 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2012 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+
+import sys
+import argparse
+
+from .streams import ANTLRStringStream, ANTLRFileStream, \
+     ANTLRInputStream, CommonTokenStream
+from .tree import CommonTreeNodeStream
+
+
+class _Main(object):
+    def __init__(self):
+        self.stdin = sys.stdin
+        self.stdout = sys.stdout
+        self.stderr = sys.stderr
+
+
+    def parseArgs(self, argv):
+        argParser = argparse.ArgumentParser()
+        argParser.add_argument("--input")
+        argParser.add_argument("--interactive", "-i", action="store_true")
+        argParser.add_argument("--no-output", action="store_true")
+        argParser.add_argument("--profile", action="store_true")
+        argParser.add_argument("--hotshot", action="store_true")
+        argParser.add_argument("--port", type=int)
+        argParser.add_argument("--debug-socket", action='store_true')
+        argParser.add_argument("file", nargs='?')
+
+        self.setupArgs(argParser)
+
+        return argParser.parse_args(argv[1:])
+
+
+    def setupArgs(self, argParser):
+        pass
+
+
+    def execute(self, argv):
+        args = self.parseArgs(argv)
+
+        self.setUp(args)
+
+        if args.interactive:
+            while True:
+                try:
+                    input_str = input(">>> ")
+                except (EOFError, KeyboardInterrupt):
+                    self.stdout.write("\nBye.\n")
+                    break
+
+                inStream = ANTLRStringStream(input_str)
+                self.parseStream(args, inStream)
+
+        else:
+            if args.input:
+                inStream = ANTLRStringStream(args.input)
+
+            elif args.file and args.file != '-':
+                inStream = ANTLRFileStream(args.file)
+
+            else:
+                inStream = ANTLRInputStream(self.stdin)
+
+            if args.profile:
+                try:
+                    import cProfile as profile
+                except ImportError:
+                    import profile
+
+                profile.runctx(
+                    'self.parseStream(args, inStream)',
+                    globals(),
+                    locals(),
+                    'profile.dat'
+                    )
+
+                import pstats
+                stats = pstats.Stats('profile.dat')
+                stats.strip_dirs()
+                stats.sort_stats('time')
+                stats.print_stats(100)
+
+            elif args.hotshot:
+                import hotshot
+
+                profiler = hotshot.Profile('hotshot.dat')
+                profiler.runctx(
+                    'self.parseStream(args, inStream)',
+                    globals(),
+                    locals()
+                    )
+
+            else:
+                self.parseStream(args, inStream)
+
+
+    def setUp(self, args):
+        pass
+
+
+    def parseStream(self, args, inStream):
+        raise NotImplementedError
+
+
+    def write(self, args, text):
+        if not args.no_output:
+            self.stdout.write(text)
+
+
+    def writeln(self, args, text):
+        self.write(args, text + '\n')
+
+
+class LexerMain(_Main):
+    def __init__(self, lexerClass):
+        super().__init__()
+
+        self.lexerClass = lexerClass
+
+
+    def parseStream(self, args, inStream):
+        lexer = self.lexerClass(inStream)
+        for token in lexer:
+            self.writeln(args, str(token))
+
+
+class ParserMain(_Main):
+    def __init__(self, lexerClassName, parserClass):
+        super().__init__()
+
+        self.lexerClassName = lexerClassName
+        self.lexerClass = None
+        self.parserClass = parserClass
+
+
+    def setupArgs(self, argParser):
+        argParser.add_argument("--lexer", dest="lexerClass",
+                               default=self.lexerClassName)
+        argParser.add_argument("--rule", dest="parserRule")
+
+
+    def setUp(self, args):
+        lexerMod = __import__(args.lexerClass)
+        self.lexerClass = getattr(lexerMod, args.lexerClass)
+
+
+    def parseStream(self, args, inStream):
+        kwargs = {}
+        if args.port is not None:
+            kwargs['port'] = args.port
+        if args.debug_socket:
+            kwargs['debug_socket'] = sys.stderr
+
+        lexer = self.lexerClass(inStream)
+        tokenStream = CommonTokenStream(lexer)
+        parser = self.parserClass(tokenStream, **kwargs)
+        result = getattr(parser, args.parserRule)()
+        if result:
+            if hasattr(result, 'tree') and result.tree:
+                self.writeln(args, result.tree.toStringTree())
+            else:
+                self.writeln(args, repr(result))
+
+
+class WalkerMain(_Main):
+    def __init__(self, walkerClass):
+        super().__init__()
+
+        self.lexerClass = None
+        self.parserClass = None
+        self.walkerClass = walkerClass
+
+
+    def setupArgs(self, argParser):
+        argParser.add_argument("--lexer", dest="lexerClass")
+        argParser.add_argument("--parser", dest="parserClass")
+        argParser.add_argument("--parser-rule", dest="parserRule")
+        argParser.add_argument("--rule", dest="walkerRule")
+
+
+    def setUp(self, args):
+        lexerMod = __import__(args.lexerClass)
+        self.lexerClass = getattr(lexerMod, args.lexerClass)
+        parserMod = __import__(args.parserClass)
+        self.parserClass = getattr(parserMod, args.parserClass)
+
+
+    def parseStream(self, args, inStream):
+        lexer = self.lexerClass(inStream)
+        tokenStream = CommonTokenStream(lexer)
+        parser = self.parserClass(tokenStream)
+        result = getattr(parser, args.parserRule)()
+        if result:
+            assert hasattr(result, 'tree'), "Parser did not return an AST"
+            nodeStream = CommonTreeNodeStream(result.tree)
+            nodeStream.setTokenStream(tokenStream)
+            walker = self.walkerClass(nodeStream)
+            result = getattr(walker, args.walkerRule)()
+            if result:
+                if hasattr(result, 'tree'):
+                    self.writeln(args, result.tree.toStringTree())
+                else:
+                    self.writeln(args, repr(result))
diff --git a/runtime/Python3/antlr3/recognizers.py b/runtime/Python3/antlr3/recognizers.py
new file mode 100644
index 0000000..3fdb593
--- /dev/null
+++ b/runtime/Python3/antlr3/recognizers.py
@@ -0,0 +1,1455 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2012 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+import sys
+import inspect
+
+from .constants import compatible_api_versions, DEFAULT_CHANNEL, \
+     HIDDEN_CHANNEL, EOF, EOR_TOKEN_TYPE, INVALID_TOKEN_TYPE
+from .exceptions import RecognitionException, MismatchedTokenException, \
+     MismatchedRangeException, MismatchedTreeNodeException, \
+     NoViableAltException, EarlyExitException, MismatchedSetException, \
+     MismatchedNotSetException, FailedPredicateException, \
+     BacktrackingFailed, UnwantedTokenException, MissingTokenException
+from .tokens import CommonToken, SKIP_TOKEN
+
+
+class RecognizerSharedState(object):
+    """
+    The set of fields needed by an abstract recognizer to recognize input
+    and recover from errors etc...  As a separate state object, it can be
+    shared among multiple grammars; e.g., when one grammar imports another.
+
+    These fields are publically visible but the actual state pointer per
+    parser is protected.
+    """
+
+    def __init__(self):
+        # Track the set of token types that can follow any rule invocation.
+        # Stack grows upwards.
+        self.following = []
+
+        # This is true when we see an error and before having successfully
+        # matched a token.  Prevents generation of more than one error message
+        # per error.
+        self.errorRecovery = False
+
+        # The index into the input stream where the last error occurred.
+        # This is used to prevent infinite loops where an error is found
+        # but no token is consumed during recovery...another error is found,
+        # ad naseum.  This is a failsafe mechanism to guarantee that at least
+        # one token/tree node is consumed for two errors.
+        self.lastErrorIndex = -1
+
+        # If 0, no backtracking is going on.  Safe to exec actions etc...
+        # If >0 then it's the level of backtracking.
+        self.backtracking = 0
+
+        # An array[size num rules] of (int -> int) dicts that tracks
+        # the stop token index for each rule.  ruleMemo[ruleIndex] is
+        # the memoization table for ruleIndex.  For key ruleStartIndex, you
+        # get back the stop token for associated rule or MEMO_RULE_FAILED.
+        #
+        # This is only used if rule memoization is on (which it is by default).
+        self.ruleMemo = None
+
+        ## Did the recognizer encounter a syntax error?  Track how many.
+        self.syntaxErrors = 0
+
+
+        # LEXER FIELDS (must be in same state object to avoid casting
+        # constantly in generated code and Lexer object) :(
+
+
+        ## The goal of all lexer rules/methods is to create a token object.
+        # This is an instance variable as multiple rules may collaborate to
+        # create a single token.  nextToken will return this object after
+        # matching lexer rule(s).  If you subclass to allow multiple token
+        # emissions, then set this to the last token to be matched or
+        # something nonnull so that the auto token emit mechanism will not
+        # emit another token.
+        self.token = None
+
+        ## What character index in the stream did the current token start at?
+        # Needed, for example, to get the text for current token.  Set at
+        # the start of nextToken.
+        self.tokenStartCharIndex = -1
+
+        ## The line on which the first character of the token resides
+        self.tokenStartLine = None
+
+        ## The character position of first character within the line
+        self.tokenStartCharPositionInLine = None
+
+        ## The channel number for the current token
+        self.channel = None
+
+        ## The token type for the current token
+        self.type = None
+
+        ## You can set the text for the current token to override what is in
+        # the input char buffer.  Use setText() or can set this instance var.
+        self.text = None
+
+
+class BaseRecognizer(object):
+    """
+    @brief Common recognizer functionality.
+
+    A generic recognizer that can handle recognizers generated from
+    lexer, parser, and tree grammars.  This is all the parsing
+    support code essentially; most of it is error recovery stuff and
+    backtracking.
+    """
+
+    MEMO_RULE_FAILED = -2
+    MEMO_RULE_UNKNOWN = -1
+
+    # copies from Token object for convenience in actions
+    DEFAULT_TOKEN_CHANNEL = DEFAULT_CHANNEL
+
+    # for convenience in actions
+    HIDDEN = HIDDEN_CHANNEL
+
+    # overridden by generated subclasses
+    grammarFileName = None
+    tokenNames = None
+
+    # The api_version attribute has been introduced in 3.3. If it is not
+    # overwritten in the generated recognizer, we assume a default of v0.
+    api_version = 0
+
+    def __init__(self, state=None):
+        # Input stream of the recognizer. Must be initialized by a subclass.
+        self.input = None
+
+        ## State of a lexer, parser, or tree parser are collected into a state
+        # object so the state can be shared.  This sharing is needed to
+        # have one grammar import others and share same error variables
+        # and other state variables.  It's a kind of explicit multiple
+        # inheritance via delegation of methods and shared state.
+        if state is None:
+            state = RecognizerSharedState()
+        self._state = state
+
+        if self.api_version not in compatible_api_versions:
+            raise RuntimeError(
+                "ANTLR version mismatch: "
+                "The recognizer has been generated with API V{}, "
+                "but this runtime does not support this."
+                .format(self.api_version))
+
+    # this one only exists to shut up pylint :(
+    def setInput(self, input):
+        self.input = input
+
+
+    def reset(self):
+        """
+        reset the parser's state; subclasses must rewind the input stream
+        """
+
+        # wack everything related to error recovery
+        if self._state is None:
+            # no shared state work to do
+            return
+
+        self._state.following = []
+        self._state.errorRecovery = False
+        self._state.lastErrorIndex = -1
+        self._state.syntaxErrors = 0
+        # wack everything related to backtracking and memoization
+        self._state.backtracking = 0
+        if self._state.ruleMemo is not None:
+            self._state.ruleMemo = {}
+
+
+    def match(self, input, ttype, follow):
+        """
+        Match current input symbol against ttype.  Attempt
+        single token insertion or deletion error recovery.  If
+        that fails, throw MismatchedTokenException.
+
+        To turn off single token insertion or deletion error
+        recovery, override recoverFromMismatchedToken() and have it
+        throw an exception. See TreeParser.recoverFromMismatchedToken().
+        This way any error in a rule will cause an exception and
+        immediate exit from rule.  Rule would recover by resynchronizing
+        to the set of symbols that can follow rule ref.
+        """
+
+        matchedSymbol = self.getCurrentInputSymbol(input)
+        if self.input.LA(1) == ttype:
+            self.input.consume()
+            self._state.errorRecovery = False
+            return matchedSymbol
+
+        if self._state.backtracking > 0:
+            # FIXME: need to return matchedSymbol here as well. damn!!
+            raise BacktrackingFailed
+
+        matchedSymbol = self.recoverFromMismatchedToken(input, ttype, follow)
+        return matchedSymbol
+
+
+    def matchAny(self):
+        """Match the wildcard: in a symbol"""
+
+        self._state.errorRecovery = False
+        self.input.consume()
+
+
+    def mismatchIsUnwantedToken(self, input, ttype):
+        return input.LA(2) == ttype
+
+
+    def mismatchIsMissingToken(self, input, follow):
+        if follow is None:
+            # we have no information about the follow; we can only consume
+            # a single token and hope for the best
+            return False
+
+        # compute what can follow this grammar element reference
+        if EOR_TOKEN_TYPE in follow:
+            viableTokensFollowingThisRule = self.computeContextSensitiveRuleFOLLOW()
+            follow |= viableTokensFollowingThisRule
+
+            if len(self._state.following) > 0:
+                # remove EOR if we're not the start symbol
+                follow -= {EOR_TOKEN_TYPE}
+
+        # if current token is consistent with what could come after set
+        # then we know we're missing a token; error recovery is free to
+        # "insert" the missing token
+        if input.LA(1) in follow or EOR_TOKEN_TYPE in follow:
+            return True
+
+        return False
+
+
+    def reportError(self, e):
+        """Report a recognition problem.
+
+        This method sets errorRecovery to indicate the parser is recovering
+        not parsing.  Once in recovery mode, no errors are generated.
+        To get out of recovery mode, the parser must successfully match
+        a token (after a resync).  So it will go:
+
+        1. error occurs
+        2. enter recovery mode, report error
+        3. consume until token found in resync set
+        4. try to resume parsing
+        5. next match() will reset errorRecovery mode
+
+        If you override, make sure to update syntaxErrors if you care about
+        that.
+
+        """
+
+        # if we've already reported an error and have not matched a token
+        # yet successfully, don't report any errors.
+        if self._state.errorRecovery:
+            return
+
+        self._state.syntaxErrors += 1 # don't count spurious
+        self._state.errorRecovery = True
+
+        self.displayRecognitionError(e)
+
+
+    def displayRecognitionError(self, e):
+        hdr = self.getErrorHeader(e)
+        msg = self.getErrorMessage(e)
+        self.emitErrorMessage(hdr + " " + msg)
+
+
+    def getErrorMessage(self, e):
+        """
+        What error message should be generated for the various
+        exception types?
+
+        Not very object-oriented code, but I like having all error message
+        generation within one method rather than spread among all of the
+        exception classes. This also makes it much easier for the exception
+        handling because the exception classes do not have to have pointers back
+        to this object to access utility routines and so on. Also, changing
+        the message for an exception type would be difficult because you
+        would have to subclassing exception, but then somehow get ANTLR
+        to make those kinds of exception objects instead of the default.
+        This looks weird, but trust me--it makes the most sense in terms
+        of flexibility.
+
+        For grammar debugging, you will want to override this to add
+        more information such as the stack frame with
+        getRuleInvocationStack(e, this.getClass().getName()) and,
+        for no viable alts, the decision description and state etc...
+
+        Override this to change the message generated for one or more
+        exception types.
+        """
+
+        if isinstance(e, UnwantedTokenException):
+            if e.expecting == EOF:
+                tokenName = "EOF"
+            else:
+                tokenName = self.tokenNames[e.expecting]
+
+            msg = "extraneous input {} expecting {}".format(
+                self.getTokenErrorDisplay(e.getUnexpectedToken()),
+                tokenName
+                )
+
+        elif isinstance(e, MissingTokenException):
+            if e.expecting == EOF:
+                tokenName = "EOF"
+            else:
+                tokenName = self.tokenNames[e.expecting]
+
+            msg = "missing {} at {}".format(
+                tokenName, self.getTokenErrorDisplay(e.token)
+                )
+
+        elif isinstance(e, MismatchedTokenException):
+            if e.expecting == EOF:
+                tokenName = "EOF"
+            else:
+                tokenName = self.tokenNames[e.expecting]
+
+            msg = "mismatched input {} expecting {}".format(
+                self.getTokenErrorDisplay(e.token),
+                tokenName
+                )
+
+        elif isinstance(e, MismatchedTreeNodeException):
+            if e.expecting == EOF:
+                tokenName = "EOF"
+            else:
+                tokenName = self.tokenNames[e.expecting]
+
+            msg = "mismatched tree node: {} expecting {}".format(
+                e.node, tokenName)
+
+        elif isinstance(e, NoViableAltException):
+            msg = "no viable alternative at input {}".format(
+                self.getTokenErrorDisplay(e.token))
+
+        elif isinstance(e, EarlyExitException):
+            msg = "required (...)+ loop did not match anything at input {}".format(
+                self.getTokenErrorDisplay(e.token))
+
+        elif isinstance(e, MismatchedSetException):
+            msg = "mismatched input {} expecting set {!r}".format(
+                self.getTokenErrorDisplay(e.token),
+                e.expecting
+                )
+
+        elif isinstance(e, MismatchedNotSetException):
+            msg = "mismatched input {} expecting set {!r}".format(
+                self.getTokenErrorDisplay(e.token),
+                e.expecting
+                )
+
+        elif isinstance(e, FailedPredicateException):
+            msg = "rule {} failed predicate: {{{}}}?".format(
+                e.ruleName,
+                e.predicateText
+                )
+
+        else:
+            msg = str(e)
+
+        return msg
+
+
+    def getNumberOfSyntaxErrors(self):
+        """
+        Get number of recognition errors (lexer, parser, tree parser).  Each
+        recognizer tracks its own number.  So parser and lexer each have
+        separate count.  Does not count the spurious errors found between
+        an error and next valid token match.
+
+        See also reportError().
+        """
+        return self._state.syntaxErrors
+
+
+    def getErrorHeader(self, e):
+        """
+        What is the error header, normally line/character position information?
+        """
+
+        source_name = self.getSourceName()
+        if source_name is not None:
+            return "{} line {}:{}".format(source_name, e.line, e.charPositionInLine)
+        return "line {}:{}".format(e.line, e.charPositionInLine)
+
+
+    def getTokenErrorDisplay(self, t):
+        """
+        How should a token be displayed in an error message? The default
+        is to display just the text, but during development you might
+        want to have a lot of information spit out.  Override in that case
+        to use t.toString() (which, for CommonToken, dumps everything about
+        the token). This is better than forcing you to override a method in
+        your token objects because you don't have to go modify your lexer
+        so that it creates a new Java type.
+        """
+
+        s = t.text
+        if s is None:
+            if t.type == EOF:
+                s = "<EOF>"
+            else:
+                s = "<{}>".format(t.typeName)
+
+        return repr(s)
+
+
+    def emitErrorMessage(self, msg):
+        """Override this method to change where error messages go"""
+        sys.stderr.write(msg + '\n')
+
+
+    def recover(self, input, re):
+        """
+        Recover from an error found on the input stream.  This is
+        for NoViableAlt and mismatched symbol exceptions.  If you enable
+        single token insertion and deletion, this will usually not
+        handle mismatched symbol exceptions but there could be a mismatched
+        token that the match() routine could not recover from.
+        """
+
+        # PROBLEM? what if input stream is not the same as last time
+        # perhaps make lastErrorIndex a member of input
+        if self._state.lastErrorIndex == input.index():
+            # uh oh, another error at same token index; must be a case
+            # where LT(1) is in the recovery token set so nothing is
+            # consumed; consume a single token so at least to prevent
+            # an infinite loop; this is a failsafe.
+            input.consume()
+
+        self._state.lastErrorIndex = input.index()
+        followSet = self.computeErrorRecoverySet()
+
+        self.beginResync()
+        self.consumeUntil(input, followSet)
+        self.endResync()
+
+
+    def beginResync(self):
+        """
+        A hook to listen in on the token consumption during error recovery.
+        The DebugParser subclasses this to fire events to the listenter.
+        """
+
+        pass
+
+
+    def endResync(self):
+        """
+        A hook to listen in on the token consumption during error recovery.
+        The DebugParser subclasses this to fire events to the listenter.
+        """
+
+        pass
+
+
+    def computeErrorRecoverySet(self):
+        """
+        Compute the error recovery set for the current rule.  During
+        rule invocation, the parser pushes the set of tokens that can
+        follow that rule reference on the stack; this amounts to
+        computing FIRST of what follows the rule reference in the
+        enclosing rule. This local follow set only includes tokens
+        from within the rule; i.e., the FIRST computation done by
+        ANTLR stops at the end of a rule.
+
+        EXAMPLE
+
+        When you find a "no viable alt exception", the input is not
+        consistent with any of the alternatives for rule r.  The best
+        thing to do is to consume tokens until you see something that
+        can legally follow a call to r *or* any rule that called r.
+        You don't want the exact set of viable next tokens because the
+        input might just be missing a token--you might consume the
+        rest of the input looking for one of the missing tokens.
+
+        Consider grammar:
+
+        a : '[' b ']'
+          | '(' b ')'
+          ;
+        b : c '^' INT ;
+        c : ID
+          | INT
+          ;
+
+        At each rule invocation, the set of tokens that could follow
+        that rule is pushed on a stack.  Here are the various "local"
+        follow sets:
+
+        FOLLOW(b1_in_a) = FIRST(']') = ']'
+        FOLLOW(b2_in_a) = FIRST(')') = ')'
+        FOLLOW(c_in_b) = FIRST('^') = '^'
+
+        Upon erroneous input "[]", the call chain is
+
+        a -> b -> c
+
+        and, hence, the follow context stack is:
+
+        depth  local follow set     after call to rule
+          0         \<EOF>                    a (from main())
+          1          ']'                     b
+          3          '^'                     c
+
+        Notice that ')' is not included, because b would have to have
+        been called from a different context in rule a for ')' to be
+        included.
+
+        For error recovery, we cannot consider FOLLOW(c)
+        (context-sensitive or otherwise).  We need the combined set of
+        all context-sensitive FOLLOW sets--the set of all tokens that
+        could follow any reference in the call chain.  We need to
+        resync to one of those tokens.  Note that FOLLOW(c)='^' and if
+        we resync'd to that token, we'd consume until EOF.  We need to
+        sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+        In this case, for input "[]", LA(1) is in this set so we would
+        not consume anything and after printing an error rule c would
+        return normally.  It would not find the required '^' though.
+        At this point, it gets a mismatched token error and throws an
+        exception (since LA(1) is not in the viable following token
+        set).  The rule exception handler tries to recover, but finds
+        the same recovery set and doesn't consume anything.  Rule b
+        exits normally returning to rule a.  Now it finds the ']' (and
+        with the successful match exits errorRecovery mode).
+
+        So, you cna see that the parser walks up call chain looking
+        for the token that was a member of the recovery set.
+
+        Errors are not generated in errorRecovery mode.
+
+        ANTLR's error recovery mechanism is based upon original ideas:
+
+        "Algorithms + Data Structures = Programs" by Niklaus Wirth
+
+        and
+
+        "A note on error recovery in recursive descent parsers":
+        http://portal.acm.org/citation.cfm?id=947902.947905
+
+        Later, Josef Grosch had some good ideas:
+
+        "Efficient and Comfortable Error Recovery in Recursive Descent
+        Parsers":
+        ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+
+        Like Grosch I implemented local FOLLOW sets that are combined
+        at run-time upon error to avoid overhead during parsing.
+        """
+
+        return self.combineFollows(False)
+
+
+    def computeContextSensitiveRuleFOLLOW(self):
+        """
+        Compute the context-sensitive FOLLOW set for current rule.
+        This is set of token types that can follow a specific rule
+        reference given a specific call chain.  You get the set of
+        viable tokens that can possibly come next (lookahead depth 1)
+        given the current call chain.  Contrast this with the
+        definition of plain FOLLOW for rule r:
+
+         FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
+
+        where x in T* and alpha, beta in V*; T is set of terminals and
+        V is the set of terminals and nonterminals.  In other words,
+        FOLLOW(r) is the set of all tokens that can possibly follow
+        references to r in *any* sentential form (context).  At
+        runtime, however, we know precisely which context applies as
+        we have the call chain.  We may compute the exact (rather
+        than covering superset) set of following tokens.
+
+        For example, consider grammar:
+
+        stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
+             | "return" expr '.'
+             ;
+        expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
+        atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
+             | '(' expr ')'
+             ;
+
+        The FOLLOW sets are all inclusive whereas context-sensitive
+        FOLLOW sets are precisely what could follow a rule reference.
+        For input "i=(3);", here is the derivation:
+
+        stat => ID '=' expr ';'
+             => ID '=' atom ('+' atom)* ';'
+             => ID '=' '(' expr ')' ('+' atom)* ';'
+             => ID '=' '(' atom ')' ('+' atom)* ';'
+             => ID '=' '(' INT ')' ('+' atom)* ';'
+             => ID '=' '(' INT ')' ';'
+
+        At the "3" token, you'd have a call chain of
+
+          stat -> expr -> atom -> expr -> atom
+
+        What can follow that specific nested ref to atom?  Exactly ')'
+        as you can see by looking at the derivation of this specific
+        input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
+
+        You want the exact viable token set when recovering from a
+        token mismatch.  Upon token mismatch, if LA(1) is member of
+        the viable next token set, then you know there is most likely
+        a missing token in the input stream.  "Insert" one by just not
+        throwing an exception.
+        """
+
+        return self.combineFollows(True)
+
+
+    def combineFollows(self, exact):
+        followSet = set()
+        for idx, localFollowSet in reversed(list(enumerate(self._state.following))):
+            followSet |= localFollowSet
+            if exact:
+                # can we see end of rule?
+                if EOR_TOKEN_TYPE in localFollowSet:
+                    # Only leave EOR in set if at top (start rule); this lets
+                    # us know if have to include follow(start rule); i.e., EOF
+                    if idx > 0:
+                        followSet.remove(EOR_TOKEN_TYPE)
+
+                else:
+                    # can't see end of rule, quit
+                    break
+
+        return followSet
+
+
+    def recoverFromMismatchedToken(self, input, ttype, follow):
+        """Attempt to recover from a single missing or extra token.
+
+        EXTRA TOKEN
+
+        LA(1) is not what we are looking for.  If LA(2) has the right token,
+        however, then assume LA(1) is some extra spurious token.  Delete it
+        and LA(2) as if we were doing a normal match(), which advances the
+        input.
+
+        MISSING TOKEN
+
+        If current token is consistent with what could come after
+        ttype then it is ok to 'insert' the missing token, else throw
+        exception For example, Input 'i=(3;' is clearly missing the
+        ')'.  When the parser returns from the nested call to expr, it
+        will have call chain:
+
+          stat -> expr -> atom
+
+        and it will be trying to match the ')' at this point in the
+        derivation:
+
+             => ID '=' '(' INT ')' ('+' atom)* ';'
+                                ^
+        match() will see that ';' doesn't match ')' and report a
+        mismatched token error.  To recover, it sees that LA(1)==';'
+        is in the set of tokens that can follow the ')' token
+        reference in rule atom.  It can assume that you forgot the ')'.
+        """
+
+        e = None
+
+        # if next token is what we are looking for then "delete" this token
+        if self.mismatchIsUnwantedToken(input, ttype):
+            e = UnwantedTokenException(ttype, input)
+
+            self.beginResync()
+            input.consume() # simply delete extra token
+            self.endResync()
+
+            # report after consuming so AW sees the token in the exception
+            self.reportError(e)
+
+            # we want to return the token we're actually matching
+            matchedSymbol = self.getCurrentInputSymbol(input)
+
+            # move past ttype token as if all were ok
+            input.consume()
+            return matchedSymbol
+
+        # can't recover with single token deletion, try insertion
+        if self.mismatchIsMissingToken(input, follow):
+            inserted = self.getMissingSymbol(input, e, ttype, follow)
+            e = MissingTokenException(ttype, input, inserted)
+
+            # report after inserting so AW sees the token in the exception
+            self.reportError(e)
+            return inserted
+
+        # even that didn't work; must throw the exception
+        e = MismatchedTokenException(ttype, input)
+        raise e
+
+
+    def recoverFromMismatchedSet(self, input, e, follow):
+        """Not currently used"""
+
+        if self.mismatchIsMissingToken(input, follow):
+            self.reportError(e)
+            # we don't know how to conjure up a token for sets yet
+            return self.getMissingSymbol(input, e, INVALID_TOKEN_TYPE, follow)
+
+        # TODO do single token deletion like above for Token mismatch
+        raise e
+
+
+    def getCurrentInputSymbol(self, input):
+        """
+        Match needs to return the current input symbol, which gets put
+        into the label for the associated token ref; e.g., x=ID.  Token
+        and tree parsers need to return different objects. Rather than test
+        for input stream type or change the IntStream interface, I use
+        a simple method to ask the recognizer to tell me what the current
+        input symbol is.
+
+        This is ignored for lexers.
+        """
+
+        return None
+
+
+    def getMissingSymbol(self, input, e, expectedTokenType, follow):
+        """Conjure up a missing token during error recovery.
+
+        The recognizer attempts to recover from single missing
+        symbols. But, actions might refer to that missing symbol.
+        For example, x=ID {f($x);}. The action clearly assumes
+        that there has been an identifier matched previously and that
+        $x points at that token. If that token is missing, but
+        the next token in the stream is what we want we assume that
+        this token is missing and we keep going. Because we
+        have to return some token to replace the missing token,
+        we have to conjure one up. This method gives the user control
+        over the tokens returned for missing tokens. Mostly,
+        you will want to create something special for identifier
+        tokens. For literals such as '{' and ',', the default
+        action in the parser or tree parser works. It simply creates
+        a CommonToken of the appropriate type. The text will be the token.
+        If you change what tokens must be created by the lexer,
+        override this method to create the appropriate tokens.
+        """
+
+        return None
+
+
+    def consumeUntil(self, input, tokenTypes):
+        """
+        Consume tokens until one matches the given token or token set
+
+        tokenTypes can be a single token type or a set of token types
+
+        """
+
+        if not isinstance(tokenTypes, (set, frozenset)):
+            tokenTypes = frozenset([tokenTypes])
+
+        ttype = input.LA(1)
+        while ttype != EOF and ttype not in tokenTypes:
+            input.consume()
+            ttype = input.LA(1)
+
+
+    def getRuleInvocationStack(self):
+        """
+        Return List<String> of the rules in your parser instance
+        leading up to a call to this method.  You could override if
+        you want more details such as the file/line info of where
+        in the parser java code a rule is invoked.
+
+        This is very useful for error messages and for context-sensitive
+        error recovery.
+
+        You must be careful, if you subclass a generated recognizers.
+        The default implementation will only search the module of self
+        for rules, but the subclass will not contain any rules.
+        You probably want to override this method to look like
+
+        def getRuleInvocationStack(self):
+            return self._getRuleInvocationStack(<class>.__module__)
+
+        where <class> is the class of the generated recognizer, e.g.
+        the superclass of self.
+        """
+
+        return self._getRuleInvocationStack(self.__module__)
+
+
+    @classmethod
+    def _getRuleInvocationStack(cls, module):
+        """
+        A more general version of getRuleInvocationStack where you can
+        pass in, for example, a RecognitionException to get it's rule
+        stack trace.  This routine is shared with all recognizers, hence,
+        static.
+
+        TODO: move to a utility class or something; weird having lexer call
+        this
+        """
+
+        # mmmhhh,... perhaps look at the first argument
+        # (f_locals[co_varnames[0]]?) and test if it's a (sub)class of
+        # requested recognizer...
+
+        rules = []
+        for frame in reversed(inspect.stack()):
+            code = frame[0].f_code
+            codeMod = inspect.getmodule(code)
+            if codeMod is None:
+                continue
+
+            # skip frames not in requested module
+            if codeMod.__name__ != module:
+                continue
+
+            # skip some unwanted names
+            if code.co_name in ('nextToken', '<module>'):
+                continue
+
+            rules.append(code.co_name)
+
+        return rules
+
+
+    def getBacktrackingLevel(self):
+        return self._state.backtracking
+
+    def setBacktrackingLevel(self, n):
+        self._state.backtracking = n
+
+
+    def getGrammarFileName(self):
+        """For debugging and other purposes, might want the grammar name.
+
+        Have ANTLR generate an implementation for this method.
+        """
+
+        return self.grammarFileName
+
+
+    def getSourceName(self):
+        raise NotImplementedError
+
+
+    def toStrings(self, tokens):
+        """A convenience method for use most often with template rewrites.
+
+        Convert a Token list to a str list.
+        """
+
+        if tokens is None:
+            return None
+
+        return [token.text for token in tokens]
+
+
+    def getRuleMemoization(self, ruleIndex, ruleStartIndex):
+        """
+        Given a rule number and a start token index number, return
+        MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
+        start index.  If this rule has parsed input starting from the
+        start index before, then return where the rule stopped parsing.
+        It returns the index of the last token matched by the rule.
+        """
+
+        if ruleIndex not in self._state.ruleMemo:
+            self._state.ruleMemo[ruleIndex] = {}
+
+        return self._state.ruleMemo[ruleIndex].get(
+            ruleStartIndex, self.MEMO_RULE_UNKNOWN
+            )
+
+
+    def alreadyParsedRule(self, input, ruleIndex):
+        """
+        Has this rule already parsed input at the current index in the
+        input stream?  Return the stop token index or MEMO_RULE_UNKNOWN.
+        If we attempted but failed to parse properly before, return
+        MEMO_RULE_FAILED.
+
+        This method has a side-effect: if we have seen this input for
+        this rule and successfully parsed before, then seek ahead to
+        1 past the stop token matched for this rule last time.
+        """
+
+        stopIndex = self.getRuleMemoization(ruleIndex, input.index())
+        if stopIndex == self.MEMO_RULE_UNKNOWN:
+            return False
+
+        if stopIndex == self.MEMO_RULE_FAILED:
+            raise BacktrackingFailed
+
+        else:
+            input.seek(stopIndex + 1)
+
+        return True
+
+
+    def memoize(self, input, ruleIndex, ruleStartIndex, success):
+        """
+        Record whether or not this rule parsed the input at this position
+        successfully.
+        """
+
+        if success:
+            stopTokenIndex = input.index() - 1
+        else:
+            stopTokenIndex = self.MEMO_RULE_FAILED
+
+        if ruleIndex in self._state.ruleMemo:
+            self._state.ruleMemo[ruleIndex][ruleStartIndex] = stopTokenIndex
+
+
+    def traceIn(self, ruleName, ruleIndex, inputSymbol):
+        sys.stdout.write("enter {} {}".format(ruleName, inputSymbol))
+
+        if self._state.backtracking > 0:
+            sys.stdout.write(" backtracking={}".format(self._state.backtracking))
+
+        sys.stdout.write('\n')
+
+
+    def traceOut(self, ruleName, ruleIndex, inputSymbol):
+        sys.stdout.write("exit {} {}".format(ruleName, inputSymbol))
+
+        if self._state.backtracking > 0:
+            sys.stdout.write(" backtracking={}".format(self._state.backtracking))
+
+        # mmmm... we use BacktrackingFailed exceptions now. So how could we
+        # get that information here?
+        #if self._state.failed:
+        #    sys.stdout.write(" failed")
+        #else:
+        #    sys.stdout.write(" succeeded")
+
+        sys.stdout.write('\n')
+
+
+class TokenSource(object):
+    """
+    @brief Abstract baseclass for token producers.
+
+    A source of tokens must provide a sequence of tokens via nextToken()
+    and also must reveal it's source of characters; CommonToken's text is
+    computed from a CharStream; it only store indices into the char stream.
+
+    Errors from the lexer are never passed to the parser.  Either you want
+    to keep going or you do not upon token recognition error.  If you do not
+    want to continue lexing then you do not want to continue parsing.  Just
+    throw an exception not under RecognitionException and Java will naturally
+    toss you all the way out of the recognizers.  If you want to continue
+    lexing then you should not throw an exception to the parser--it has already
+    requested a token.  Keep lexing until you get a valid one.  Just report
+    errors and keep going, looking for a valid token.
+    """
+
+    def nextToken(self):
+        """Return a Token object from your input stream (usually a CharStream).
+
+        Do not fail/return upon lexing error; keep chewing on the characters
+        until you get a good one; errors are not passed through to the parser.
+        """
+
+        raise NotImplementedError
+
+
+    def __iter__(self):
+        """The TokenSource is an interator.
+
+        The iteration will not include the final EOF token, see also the note
+        for the __next__() method.
+
+        """
+
+        return self
+
+
+    def __next__(self):
+        """Return next token or raise StopIteration.
+
+        Note that this will raise StopIteration when hitting the EOF token,
+        so EOF will not be part of the iteration.
+
+        """
+
+        token = self.nextToken()
+        if token is None or token.type == EOF:
+            raise StopIteration
+        return token
+
+
+class Lexer(BaseRecognizer, TokenSource):
+    """
+    @brief Baseclass for generated lexer classes.
+
+    A lexer is recognizer that draws input symbols from a character stream.
+    lexer grammars result in a subclass of this object. A Lexer object
+    uses simplified match() and error recovery mechanisms in the interest
+    of speed.
+    """
+
+    def __init__(self, input, state=None):
+        BaseRecognizer.__init__(self, state)
+        TokenSource.__init__(self)
+
+        # Where is the lexer drawing characters from?
+        self.input = input
+
+
+    def reset(self):
+        super().reset() # reset all recognizer state variables
+
+        if self.input is not None:
+            # rewind the input
+            self.input.seek(0)
+
+        if self._state is None:
+            # no shared state work to do
+            return
+
+        # wack Lexer state variables
+        self._state.token = None
+        self._state.type = INVALID_TOKEN_TYPE
+        self._state.channel = DEFAULT_CHANNEL
+        self._state.tokenStartCharIndex = -1
+        self._state.tokenStartLine = -1
+        self._state.tokenStartCharPositionInLine = -1
+        self._state.text = None
+
+
+    def makeEOFToken(self):
+        eof = CommonToken(
+            type=EOF, channel=DEFAULT_CHANNEL,
+            input=self.input,
+            start=self.input.index(), stop=self.input.index())
+        eof.line = self.input.line
+        eof.charPositionInLine = self.input.charPositionInLine
+        return eof
+
+    def nextToken(self):
+        """
+        Return a token from this source; i.e., match a token on the char
+        stream.
+        """
+
+        while 1:
+            self._state.token = None
+            self._state.channel = DEFAULT_CHANNEL
+            self._state.tokenStartCharIndex = self.input.index()
+            self._state.tokenStartCharPositionInLine = self.input.charPositionInLine
+            self._state.tokenStartLine = self.input.line
+            self._state.text = None
+            if self.input.LA(1) == EOF:
+                return self.makeEOFToken()
+
+            try:
+                self.mTokens()
+
+                if self._state.token is None:
+                    self.emit()
+
+                elif self._state.token == SKIP_TOKEN:
+                    continue
+
+                return self._state.token
+
+            except NoViableAltException as re:
+                self.reportError(re)
+                self.recover(re) # throw out current char and try again
+
+            except RecognitionException as re:
+                self.reportError(re)
+                # match() routine has already called recover()
+
+
+    def skip(self):
+        """
+        Instruct the lexer to skip creating a token for current lexer rule
+        and look for another token.  nextToken() knows to keep looking when
+        a lexer rule finishes with token set to SKIP_TOKEN.  Recall that
+        if token==null at end of any token rule, it creates one for you
+        and emits it.
+        """
+
+        self._state.token = SKIP_TOKEN
+
+
+    def mTokens(self):
+        """This is the lexer entry point that sets instance var 'token'"""
+
+        # abstract method
+        raise NotImplementedError
+
+
+    def setCharStream(self, input):
+        """Set the char stream and reset the lexer"""
+        self.input = None
+        self.reset()
+        self.input = input
+
+
+    def getSourceName(self):
+        return self.input.getSourceName()
+
+
+    def emit(self, token=None):
+        """
+        The standard method called to automatically emit a token at the
+        outermost lexical rule.  The token object should point into the
+        char buffer start..stop.  If there is a text override in 'text',
+        use that to set the token's text.  Override this method to emit
+        custom Token objects.
+
+        If you are building trees, then you should also override
+        Parser or TreeParser.getMissingSymbol().
+        """
+
+        if token is None:
+            token = CommonToken(
+                input=self.input,
+                type=self._state.type,
+                channel=self._state.channel,
+                start=self._state.tokenStartCharIndex,
+                stop=self.getCharIndex()-1
+                )
+            token.line = self._state.tokenStartLine
+            token.text = self._state.text
+            token.charPositionInLine = self._state.tokenStartCharPositionInLine
+
+        self._state.token = token
+
+        return token
+
+
+    def match(self, s):
+        if isinstance(s, str):
+            for c in s:
+                if self.input.LA(1) != ord(c):
+                    if self._state.backtracking > 0:
+                        raise BacktrackingFailed
+
+                    mte = MismatchedTokenException(c, self.input)
+                    self.recover(mte)
+                    raise mte
+
+                self.input.consume()
+
+        else:
+            if self.input.LA(1) != s:
+                if self._state.backtracking > 0:
+                    raise BacktrackingFailed
+
+                mte = MismatchedTokenException(chr(s), self.input)
+                self.recover(mte) # don't really recover; just consume in lexer
+                raise mte
+
+            self.input.consume()
+
+
+    def matchAny(self):
+        self.input.consume()
+
+
+    def matchRange(self, a, b):
+        if self.input.LA(1) < a or self.input.LA(1) > b:
+            if self._state.backtracking > 0:
+                raise BacktrackingFailed
+
+            mre = MismatchedRangeException(chr(a), chr(b), self.input)
+            self.recover(mre)
+            raise mre
+
+        self.input.consume()
+
+
+    def getLine(self):
+        return self.input.line
+
+
+    def getCharPositionInLine(self):
+        return self.input.charPositionInLine
+
+
+    def getCharIndex(self):
+        """What is the index of the current character of lookahead?"""
+
+        return self.input.index()
+
+
+    def getText(self):
+        """
+        Return the text matched so far for the current token or any
+        text override.
+        """
+        if self._state.text is not None:
+            return self._state.text
+
+        return self.input.substring(
+            self._state.tokenStartCharIndex,
+            self.getCharIndex()-1
+            )
+
+
+    def setText(self, text):
+        """
+        Set the complete text of this token; it wipes any previous
+        changes to the text.
+        """
+        self._state.text = text
+
+
+    text = property(getText, setText)
+
+
+    def reportError(self, e):
+        ## TODO: not thought about recovery in lexer yet.
+
+        ## # if we've already reported an error and have not matched a token
+        ## # yet successfully, don't report any errors.
+        ## if self.errorRecovery:
+        ##     return
+        ##
+        ## self.errorRecovery = True
+
+        self.displayRecognitionError(e)
+
+
+    def getErrorMessage(self, e):
+        msg = None
+
+        if isinstance(e, MismatchedTokenException):
+            msg = "mismatched character {} expecting {}".format(
+                self.getCharErrorDisplay(e.c),
+                self.getCharErrorDisplay(e.expecting))
+
+        elif isinstance(e, NoViableAltException):
+            msg = "no viable alternative at character {}".format(
+                self.getCharErrorDisplay(e.c))
+
+        elif isinstance(e, EarlyExitException):
+            msg = "required (...)+ loop did not match anything at character {}".format(
+                self.getCharErrorDisplay(e.c))
+
+        elif isinstance(e, MismatchedNotSetException):
+            msg = "mismatched character {} expecting set {!r}".format(
+                self.getCharErrorDisplay(e.c),
+                e.expecting)
+
+        elif isinstance(e, MismatchedSetException):
+            msg = "mismatched character {} expecting set {!r}".format(
+                self.getCharErrorDisplay(e.c),
+                e.expecting)
+
+        elif isinstance(e, MismatchedRangeException):
+            msg = "mismatched character {} expecting set {}..{}".format(
+                self.getCharErrorDisplay(e.c),
+                self.getCharErrorDisplay(e.a),
+                self.getCharErrorDisplay(e.b))
+
+        else:
+            msg = super().getErrorMessage(e)
+
+        return msg
+
+
+    def getCharErrorDisplay(self, c):
+        if c == EOF:
+            c = '<EOF>'
+        return repr(c)
+
+
+    def recover(self, re):
+        """
+        Lexers can normally match any char in it's vocabulary after matching
+        a token, so do the easy thing and just kill a character and hope
+        it all works out.  You can instead use the rule invocation stack
+        to do sophisticated error recovery if you are in a fragment rule.
+        """
+
+        self.input.consume()
+
+
+    def traceIn(self, ruleName, ruleIndex):
+        inputSymbol = "{} line={}:{}".format(self.input.LT(1),
+                                             self.getLine(),
+                                             self.getCharPositionInLine()
+                                             )
+
+        super().traceIn(ruleName, ruleIndex, inputSymbol)
+
+
+    def traceOut(self, ruleName, ruleIndex):
+        inputSymbol = "{} line={}:{}".format(self.input.LT(1),
+                                             self.getLine(),
+                                             self.getCharPositionInLine()
+                                             )
+
+        super().traceOut(ruleName, ruleIndex, inputSymbol)
+
+
+
+class Parser(BaseRecognizer):
+    """
+    @brief Baseclass for generated parser classes.
+    """
+
+    def __init__(self, lexer, state=None):
+        super().__init__(state)
+
+        self.input = lexer
+
+
+    def reset(self):
+        super().reset() # reset all recognizer state variables
+        if self.input is not None:
+            self.input.seek(0) # rewind the input
+
+
+    def getCurrentInputSymbol(self, input):
+        return input.LT(1)
+
+
+    def getMissingSymbol(self, input, e, expectedTokenType, follow):
+        if expectedTokenType == EOF:
+            tokenText = "<missing EOF>"
+        else:
+            tokenText = "<missing {}>".format(self.tokenNames[expectedTokenType])
+        t = CommonToken(type=expectedTokenType, text=tokenText)
+        current = input.LT(1)
+        if current.type == EOF:
+            current = input.LT(-1)
+
+        if current is not None:
+            t.line = current.line
+            t.charPositionInLine = current.charPositionInLine
+        t.channel = DEFAULT_CHANNEL
+        return t
+
+
+    def setTokenStream(self, input):
+        """Set the token stream and reset the parser"""
+
+        self.input = None
+        self.reset()
+        self.input = input
+
+
+    def getTokenStream(self):
+        return self.input
+
+
+    def getSourceName(self):
+        return self.input.getSourceName()
+
+
+    def traceIn(self, ruleName, ruleIndex):
+        super().traceIn(ruleName, ruleIndex, self.input.LT(1))
+
+
+    def traceOut(self, ruleName, ruleIndex):
+        super().traceOut(ruleName, ruleIndex, self.input.LT(1))
+
+
+class RuleReturnScope(object):
+    """
+    Rules can return start/stop info as well as possible trees and templates.
+    """
+
+    def getStart(self):
+        """Return the start token or tree."""
+        return None
+
+
+    def getStop(self):
+        """Return the stop token or tree."""
+        return None
+
+
+    def getTree(self):
+        """Has a value potentially if output=AST."""
+        return None
+
+
+    def getTemplate(self):
+        """Has a value potentially if output=template."""
+        return None
+
+
+class ParserRuleReturnScope(RuleReturnScope):
+    """
+    Rules that return more than a single value must return an object
+    containing all the values.  Besides the properties defined in
+    RuleLabelScope.predefinedRulePropertiesScope there may be user-defined
+    return values.  This class simply defines the minimum properties that
+    are always defined and methods to access the others that might be
+    available depending on output option such as template and tree.
+
+    Note text is not an actual property of the return value, it is computed
+    from start and stop using the input stream's toString() method.  I
+    could add a ctor to this so that we can pass in and store the input
+    stream, but I'm not sure we want to do that.  It would seem to be undefined
+    to get the .text property anyway if the rule matches tokens from multiple
+    input streams.
+
+    I do not use getters for fields of objects that are used simply to
+    group values such as this aggregate.  The getters/setters are there to
+    satisfy the superclass interface.
+    """
+
+    def __init__(self):
+        super().__init__()
+        self.start = None
+        self.stop = None
+        self.tree = None  # only used when output=AST
+
+
+    def getStart(self):
+        return self.start
+
+
+    def getStop(self):
+        return self.stop
+
+
+    def getTree(self):
+        return self.tree
diff --git a/runtime/Python3/antlr3/streams.py b/runtime/Python3/antlr3/streams.py
new file mode 100644
index 0000000..069755b
--- /dev/null
+++ b/runtime/Python3/antlr3/streams.py
@@ -0,0 +1,1460 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2012 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+from io import StringIO
+
+from .constants import DEFAULT_CHANNEL, EOF
+from .tokens import Token
+
+
+############################################################################
+#
+# basic interfaces
+#   IntStream
+#    +- CharStream
+#    \- TokenStream
+#
+# subclasses must implemented all methods
+#
+############################################################################
+
+class IntStream(object):
+    """
+    @brief Base interface for streams of integer values.
+
+    A simple stream of integers used when all I care about is the char
+    or token type sequence (such as interpretation).
+    """
+
+    def consume(self):
+        raise NotImplementedError
+
+
+    def LA(self, i):
+        """Get int at current input pointer + i ahead where i=1 is next int.
+
+        Negative indexes are allowed.  LA(-1) is previous token (token
+        just matched).  LA(-i) where i is before first token should
+        yield -1, invalid char / EOF.
+        """
+
+        raise NotImplementedError
+
+
+    def mark(self):
+        """
+        Tell the stream to start buffering if it hasn't already.  Return
+        current input position, index(), or some other marker so that
+        when passed to rewind() you get back to the same spot.
+        rewind(mark()) should not affect the input cursor.  The Lexer
+        track line/col info as well as input index so its markers are
+        not pure input indexes.  Same for tree node streams.
+        """
+
+        raise NotImplementedError
+
+
+    def index(self):
+        """
+        Return the current input symbol index 0..n where n indicates the
+        last symbol has been read.  The index is the symbol about to be
+        read not the most recently read symbol.
+        """
+
+        raise NotImplementedError
+
+
+    def rewind(self, marker=None):
+        """
+        Reset the stream so that next call to index would return marker.
+        The marker will usually be index() but it doesn't have to be.  It's
+        just a marker to indicate what state the stream was in.  This is
+        essentially calling release() and seek().  If there are markers
+        created after this marker argument, this routine must unroll them
+        like a stack.  Assume the state the stream was in when this marker
+        was created.
+
+        If marker is None:
+        Rewind to the input position of the last marker.
+        Used currently only after a cyclic DFA and just
+        before starting a sem/syn predicate to get the
+        input position back to the start of the decision.
+        Do not "pop" the marker off the state.  mark(i)
+        and rewind(i) should balance still. It is
+        like invoking rewind(last marker) but it should not "pop"
+        the marker off.  It's like seek(last marker's input position).
+        """
+
+        raise NotImplementedError
+
+
+    def release(self, marker=None):
+        """
+        You may want to commit to a backtrack but don't want to force the
+        stream to keep bookkeeping objects around for a marker that is
+        no longer necessary.  This will have the same behavior as
+        rewind() except it releases resources without the backward seek.
+        This must throw away resources for all markers back to the marker
+        argument.  So if you're nested 5 levels of mark(), and then release(2)
+        you have to release resources for depths 2..5.
+        """
+
+        raise NotImplementedError
+
+
+    def seek(self, index):
+        """
+        Set the input cursor to the position indicated by index.  This is
+        normally used to seek ahead in the input stream.  No buffering is
+        required to do this unless you know your stream will use seek to
+        move backwards such as when backtracking.
+
+        This is different from rewind in its multi-directional
+        requirement and in that its argument is strictly an input cursor
+        (index).
+
+        For char streams, seeking forward must update the stream state such
+        as line number.  For seeking backwards, you will be presumably
+        backtracking using the mark/rewind mechanism that restores state and
+        so this method does not need to update state when seeking backwards.
+
+        Currently, this method is only used for efficient backtracking using
+        memoization, but in the future it may be used for incremental parsing.
+
+        The index is 0..n-1.  A seek to position i means that LA(1) will
+        return the ith symbol.  So, seeking to 0 means LA(1) will return the
+        first element in the stream.
+        """
+
+        raise NotImplementedError
+
+
+    def size(self):
+        """
+        Only makes sense for streams that buffer everything up probably, but
+        might be useful to display the entire stream or for testing.  This
+        value includes a single EOF.
+        """
+
+        raise NotImplementedError
+
+
+    def getSourceName(self):
+        """
+        Where are you getting symbols from?  Normally, implementations will
+        pass the buck all the way to the lexer who can ask its input stream
+        for the file name or whatever.
+        """
+
+        raise NotImplementedError
+
+
+class CharStream(IntStream):
+    """
+    @brief A source of characters for an ANTLR lexer.
+
+    This is an abstract class that must be implemented by a subclass.
+
+    """
+
+    # pylint does not realize that this is an interface, too
+    #pylint: disable-msg=W0223
+
+    EOF = -1
+
+    def __init__(self):
+        # line number 1..n within the input
+        self._line = 1
+
+        # The index of the character relative to the beginning of the
+        # line 0..n-1
+        self._charPositionInLine = 0
+
+
+    def substring(self, start, stop):
+        """
+        For infinite streams, you don't need this; primarily I'm providing
+        a useful interface for action code.  Just make sure actions don't
+        use this on streams that don't support it.
+        """
+
+        raise NotImplementedError
+
+
+    def LT(self, i):
+        """
+        Get the ith character of lookahead.  This is the same usually as
+        LA(i).  This will be used for labels in the generated
+        lexer code.  I'd prefer to return a char here type-wise, but it's
+        probably better to be 32-bit clean and be consistent with LA.
+        """
+
+        raise NotImplementedError
+
+
+    @property
+    def line(self):
+        """ANTLR tracks the line information automatically"""
+        return self._line
+
+    @line.setter
+    def line(self, value):
+        """
+        Because this stream can rewind, we need to be able to reset the line
+        """
+        self._line = value
+
+
+    @property
+    def charPositionInLine(self):
+        """
+        The index of the character relative to the beginning of the line 0..n-1
+        """
+        return self._charPositionInLine
+
+    @charPositionInLine.setter
+    def charPositionInLine(self, pos):
+        self._charPositionInLine = pos
+
+
+class TokenStream(IntStream):
+    """
+
+    @brief A stream of tokens accessing tokens from a TokenSource
+
+    This is an abstract class that must be implemented by a subclass.
+
+    """
+
+    # pylint does not realize that this is an interface, too
+    #pylint: disable-msg=W0223
+
+    def LT(self, k):
+        """
+        Get Token at current input pointer + i ahead where i=1 is next Token.
+        i<0 indicates tokens in the past.  So -1 is previous token and -2 is
+        two tokens ago. LT(0) is undefined.  For i>=n, return Token.EOFToken.
+        Return null for LT(0) and any index that results in an absolute address
+        that is negative.
+        """
+
+        raise NotImplementedError
+
+
+    def range(self):
+        """
+        How far ahead has the stream been asked to look?  The return
+        value is a valid index from 0..n-1.
+        """
+
+        raise NotImplementedError
+
+
+    def get(self, i):
+        """
+        Get a token at an absolute index i; 0..n-1.  This is really only
+        needed for profiling and debugging and token stream rewriting.
+        If you don't want to buffer up tokens, then this method makes no
+        sense for you.  Naturally you can't use the rewrite stream feature.
+        I believe DebugTokenStream can easily be altered to not use
+        this method, removing the dependency.
+        """
+
+        raise NotImplementedError
+
+
+    def getTokenSource(self):
+        """
+        Where is this stream pulling tokens from?  This is not the name, but
+        the object that provides Token objects.
+        """
+
+        raise NotImplementedError
+
+
+    def toString(self, start=None, stop=None):
+        """
+        Return the text of all tokens from start to stop, inclusive.
+        If the stream does not buffer all the tokens then it can just
+        return "" or null;  Users should not access $ruleLabel.text in
+        an action of course in that case.
+
+        Because the user is not required to use a token with an index stored
+        in it, we must provide a means for two token objects themselves to
+        indicate the start/end location.  Most often this will just delegate
+        to the other toString(int,int).  This is also parallel with
+        the TreeNodeStream.toString(Object,Object).
+        """
+
+        raise NotImplementedError
+
+
+############################################################################
+#
+# character streams for use in lexers
+#   CharStream
+#   \- ANTLRStringStream
+#
+############################################################################
+
+
+class ANTLRStringStream(CharStream):
+    """
+    @brief CharStream that pull data from a unicode string.
+
+    A pretty quick CharStream that pulls all data from an array
+    directly.  Every method call counts in the lexer.
+
+    """
+
+
+    def __init__(self, data):
+        """
+        @param data This should be a unicode string holding the data you want
+        to parse. If you pass in a byte string, the Lexer will choke on
+        non-ascii data.
+        """
+
+        super().__init__()
+
+        # The data being scanned
+        self.strdata = str(data)
+        self.data = [ord(c) for c in self.strdata]
+
+        # How many characters are actually in the buffer
+        self.n = len(data)
+
+        # 0..n-1 index into string of next char
+        self.p = 0
+
+        # A list of CharStreamState objects that tracks the stream state
+        # values line, charPositionInLine, and p that can change as you
+        # move through the input stream.  Indexed from 0..markDepth-1.
+        self._markers = [ ]
+        self.lastMarker = None
+        self.markDepth = 0
+
+        # What is name or source of this char stream?
+        self.name = None
+
+
+    def reset(self):
+        """
+        Reset the stream so that it's in the same state it was
+        when the object was created *except* the data array is not
+        touched.
+        """
+
+        self.p = 0
+        self._line = 1
+        self.charPositionInLine = 0
+        self._markers = [ ]
+        self.lastMarker = None
+        self.markDepth = 0
+
+
+    def consume(self):
+        if self.p < self.n:
+            if self.data[self.p] == 10: # ord('\n')
+                self._line += 1
+                self.charPositionInLine = 0
+            else:
+                self.charPositionInLine += 1
+
+            self.p += 1
+
+        # else we reached EOF
+        # just do nothing
+
+
+    def LA(self, i):
+        if i == 0:
+            return 0 # undefined
+
+        if i < 0:
+            i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
+
+        if self.p + i - 1 < self.n:
+            return self.data[self.p + i - 1]
+        else:
+            return EOF
+
+
+
+    def LT(self, i):
+        if i == 0:
+            return 0 # undefined
+
+        if i < 0:
+            i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
+
+        if self.p + i - 1 < self.n:
+            return self.strdata[self.p + i - 1]
+        else:
+            return EOF
+
+
+    def index(self):
+        """
+        Return the current input symbol index 0..n where n indicates the
+        last symbol has been read.  The index is the index of char to
+        be returned from LA(1).
+        """
+
+        return self.p
+
+
+    def size(self):
+        return self.n
+
+
+    def mark(self):
+        state = (self.p, self.line, self.charPositionInLine)
+        if self.markDepth < len(self._markers):
+            self._markers[self.markDepth] = state
+        else:
+            self._markers.append(state)
+        self.markDepth += 1
+
+        self.lastMarker = self.markDepth
+
+        return self.lastMarker
+
+
+    def rewind(self, marker=None):
+        if marker is None:
+            marker = self.lastMarker
+
+        p, line, charPositionInLine = self._markers[marker - 1]
+
+        self.seek(p)
+        self._line = line
+        self.charPositionInLine = charPositionInLine
+        self.release(marker)
+
+
+    def release(self, marker=None):
+        if marker is None:
+            marker = self.lastMarker
+
+        self.markDepth = marker - 1
+
+
+    def seek(self, index):
+        """
+        consume() ahead until p==index; can't just set p=index as we must
+        update line and charPositionInLine.
+        """
+
+        if index <= self.p:
+            self.p = index # just jump; don't update stream state (line, ...)
+            return
+
+        # seek forward, consume until p hits index
+        while self.p < index:
+            self.consume()
+
+
+    def substring(self, start, stop):
+        return self.strdata[start:stop + 1]
+
+
+    def getSourceName(self):
+        return self.name
+
+
+class ANTLRFileStream(ANTLRStringStream):
+    """
+    @brief CharStream that opens a file to read the data.
+
+    This is a char buffer stream that is loaded from a file
+    all at once when you construct the object.
+    """
+
+    def __init__(self, fileName):
+        """
+        @param fileName The path to the file to be opened. The file will be
+           opened with mode 'r'.
+
+        """
+
+        self._fileName = fileName
+
+        with open(fileName, 'r') as fp:
+            super().__init__(fp.read())
+
+
+    @property
+    def fileName(self):
+        return self._fileName
+
+
+class ANTLRInputStream(ANTLRStringStream):
+    """
+    @brief CharStream that reads data from a file-like object.
+
+    This is a char buffer stream that is loaded from a file like object
+    all at once when you construct the object.
+
+    All input is consumed from the file, but it is not closed.
+    """
+
+    def __init__(self, file):
+        """
+        @param file A file-like object holding your input. Only the read()
+           method must be implemented.
+
+        """
+
+        data = file.read()
+
+        super().__init__(data)
+
+
+# I guess the ANTLR prefix exists only to avoid a name clash with some Java
+# mumbojumbo. A plain "StringStream" looks better to me, which should be
+# the preferred name in Python.
+StringStream = ANTLRStringStream
+FileStream = ANTLRFileStream
+InputStream = ANTLRInputStream
+
+
+############################################################################
+#
+# Token streams
+#   TokenStream
+#   +- CommonTokenStream
+#   \- TokenRewriteStream
+#
+############################################################################
+
+
+class CommonTokenStream(TokenStream):
+    """
+    @brief The most common stream of tokens
+
+    The most common stream of tokens is one where every token is buffered up
+    and tokens are prefiltered for a certain channel (the parser will only
+    see these tokens and cannot change the filter channel number during the
+    parse).
+    """
+
+    def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
+        """
+        @param tokenSource A TokenSource instance (usually a Lexer) to pull
+            the tokens from.
+
+        @param channel Skip tokens on any channel but this one; this is how we
+            skip whitespace...
+
+        """
+
+        super().__init__()
+
+        self.tokenSource = tokenSource
+
+        # Record every single token pulled from the source so we can reproduce
+        # chunks of it later.
+        self.tokens = []
+
+        # Map<tokentype, channel> to override some Tokens' channel numbers
+        self.channelOverrideMap = {}
+
+        # Set<tokentype>; discard any tokens with this type
+        self.discardSet = set()
+
+        # Skip tokens on any channel but this one; this is how we skip
+        # whitespace...
+        self.channel = channel
+
+        # By default, track all incoming tokens
+        self.discardOffChannelTokens = False
+
+        # The index into the tokens list of the current token (next token
+        # to consume).  p==-1 indicates that the tokens list is empty
+        self.p = -1
+
+        # Remember last marked position
+        self.lastMarker = None
+
+        # how deep have we gone?
+        self._range = -1
+
+
+    def makeEOFToken(self):
+        return self.tokenSource.makeEOFToken()
+
+
+    def setTokenSource(self, tokenSource):
+        """Reset this token stream by setting its token source."""
+
+        self.tokenSource = tokenSource
+        self.tokens = []
+        self.p = -1
+        self.channel = DEFAULT_CHANNEL
+
+
+    def reset(self):
+        self.p = 0
+        self.lastMarker = None
+
+
+    def fillBuffer(self):
+        """
+        Load all tokens from the token source and put in tokens.
+        This is done upon first LT request because you might want to
+        set some token type / channel overrides before filling buffer.
+        """
+
+
+        index = 0
+        t = self.tokenSource.nextToken()
+        while t and t.type != EOF:
+            discard = False
+
+            if self.discardSet and t.type in self.discardSet:
+                discard = True
+
+            elif self.discardOffChannelTokens and t.channel != self.channel:
+                discard = True
+
+            # is there a channel override for token type?
+            if t.type in self.channelOverrideMap:
+                overrideChannel = self.channelOverrideMap[t.type]
+
+                if overrideChannel == self.channel:
+                    t.channel = overrideChannel
+                else:
+                    discard = True
+
+            if not discard:
+                t.index = index
+                self.tokens.append(t)
+                index += 1
+
+            t = self.tokenSource.nextToken()
+
+        # leave p pointing at first token on channel
+        self.p = 0
+        self.p = self.skipOffTokenChannels(self.p)
+
+
+    def consume(self):
+        """
+        Move the input pointer to the next incoming token.  The stream
+        must become active with LT(1) available.  consume() simply
+        moves the input pointer so that LT(1) points at the next
+        input symbol. Consume at least one token.
+
+        Walk past any token not on the channel the parser is listening to.
+        """
+
+        if self.p < len(self.tokens):
+            self.p += 1
+
+            self.p = self.skipOffTokenChannels(self.p) # leave p on valid token
+
+
+    def skipOffTokenChannels(self, i):
+        """
+        Given a starting index, return the index of the first on-channel
+        token.
+        """
+
+        n = len(self.tokens)
+        while i < n and self.tokens[i].channel != self.channel:
+            i += 1
+
+        return i
+
+
+    def skipOffTokenChannelsReverse(self, i):
+        while i >= 0 and self.tokens[i].channel != self.channel:
+            i -= 1
+
+        return i
+
+
+    def setTokenTypeChannel(self, ttype, channel):
+        """
+        A simple filter mechanism whereby you can tell this token stream
+        to force all tokens of type ttype to be on channel.  For example,
+        when interpreting, we cannot exec actions so we need to tell
+        the stream to force all WS and NEWLINE to be a different, ignored
+        channel.
+        """
+
+        self.channelOverrideMap[ttype] = channel
+
+
+    def discardTokenType(self, ttype):
+        self.discardSet.add(ttype)
+
+
+    def getTokens(self, start=None, stop=None, types=None):
+        """
+        Given a start and stop index, return a list of all tokens in
+        the token type set.  Return None if no tokens were found.  This
+        method looks at both on and off channel tokens.
+        """
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        if stop is None or stop > len(self.tokens):
+            stop = len(self.tokens)
+
+        if start is None or start < 0:
+            start = 0
+
+        if start > stop:
+            return None
+
+        if isinstance(types, int):
+            # called with a single type, wrap into set
+            types = set([types])
+
+        filteredTokens = [
+            token for token in self.tokens[start:stop]
+            if types is None or token.type in types
+            ]
+
+        if len(filteredTokens) == 0:
+            return None
+
+        return filteredTokens
+
+
+    def LT(self, k):
+        """
+        Get the ith token from the current position 1..n where k=1 is the
+        first symbol of lookahead.
+        """
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        if k == 0:
+            return None
+
+        if k < 0:
+            return self.LB(-k)
+
+        i = self.p
+        n = 1
+        # find k good tokens
+        while n < k:
+            # skip off-channel tokens
+            i = self.skipOffTokenChannels(i + 1) # leave p on valid token
+            n += 1
+
+        if i > self._range:
+            self._range = i
+
+        if i < len(self.tokens):
+            return self.tokens[i]
+        else:
+            return self.makeEOFToken()
+
+
+    def LB(self, k):
+        """Look backwards k tokens on-channel tokens"""
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        if k == 0:
+            return None
+
+        if self.p - k < 0:
+            return None
+
+        i = self.p
+        n = 1
+        # find k good tokens looking backwards
+        while n <= k:
+            # skip off-channel tokens
+            i = self.skipOffTokenChannelsReverse(i - 1) # leave p on valid token
+            n += 1
+
+        if i < 0:
+            return None
+
+        return self.tokens[i]
+
+
+    def get(self, i):
+        """
+        Return absolute token i; ignore which channel the tokens are on;
+        that is, count all tokens not just on-channel tokens.
+        """
+
+        return self.tokens[i]
+
+
+    def slice(self, start, stop):
+        if self.p == -1:
+            self.fillBuffer()
+
+        if start < 0 or stop < 0:
+            return None
+
+        return self.tokens[start:stop + 1]
+
+
+    def LA(self, i):
+        return self.LT(i).type
+
+
+    def mark(self):
+        self.lastMarker = self.index()
+        return self.lastMarker
+
+
+    def release(self, marker=None):
+        # no resources to release
+        pass
+
+
+    def size(self):
+        return len(self.tokens)
+
+
+    def range(self):
+        return self._range
+
+
+    def index(self):
+        return self.p
+
+
+    def rewind(self, marker=None):
+        if marker is None:
+            marker = self.lastMarker
+
+        self.seek(marker)
+
+
+    def seek(self, index):
+        self.p = index
+
+
+    def getTokenSource(self):
+        return self.tokenSource
+
+
+    def getSourceName(self):
+        return self.tokenSource.getSourceName()
+
+
+    def toString(self, start=None, stop=None):
+        """Returns a string of all tokens between start and stop (inclusive)."""
+        if self.p == -1:
+            self.fillBuffer()
+
+        if start is None:
+            start = 0
+        elif not isinstance(start, int):
+            start = start.index
+
+        if stop is None:
+            stop = len(self.tokens) - 1
+        elif not isinstance(stop, int):
+            stop = stop.index
+
+        if stop >= len(self.tokens):
+            stop = len(self.tokens) - 1
+
+        return ''.join([t.text for t in self.tokens[start:stop + 1]])
+
+
+class RewriteOperation(object):
+    """@brief Internal helper class."""
+
+    def __init__(self, stream, index, text):
+        self.stream = stream
+
+        # What index into rewrites List are we?
+        self.instructionIndex = None
+
+        # Token buffer index.
+        self.index = index
+        self.text = text
+
+    def execute(self, buf):
+        """Execute the rewrite operation by possibly adding to the buffer.
+        Return the index of the next token to operate on.
+        """
+
+        return self.index
+
+    def toString(self):
+        opName = self.__class__.__name__
+        return '<{opName}@{0.index}:"{0.text}">'.format(self, opName=opName)
+
+    __str__ = toString
+    __repr__ = toString
+
+
+class InsertBeforeOp(RewriteOperation):
+    """@brief Internal helper class."""
+
+    def execute(self, buf):
+        buf.write(self.text)
+        if self.stream.tokens[self.index].type != EOF:
+            buf.write(self.stream.tokens[self.index].text)
+        return self.index + 1
+
+
+class ReplaceOp(RewriteOperation):
+    """
+    @brief Internal helper class.
+
+    I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+    instructions.
+    """
+
+    def __init__(self, stream, first, last, text):
+        super().__init__(stream, first, text)
+        self.lastIndex = last
+
+
+    def execute(self, buf):
+        if self.text is not None:
+            buf.write(self.text)
+
+        return self.lastIndex + 1
+
+
+    def toString(self):
+        if self.text is None:
+            return '<DeleteOp@{0.index}..{0.lastindex}>'.format(self)
+
+        return '<ReplaceOp@{0.index}..{0.lastIndex}:"{0.text}">'.format(self)
+
+    __str__ = toString
+    __repr__ = toString
+
+
+class TokenRewriteStream(CommonTokenStream):
+    """@brief CommonTokenStream that can be modified.
+
+    Useful for dumping out the input stream after doing some
+    augmentation or other manipulations.
+
+    You can insert stuff, replace, and delete chunks.  Note that the
+    operations are done lazily--only if you convert the buffer to a
+    String.  This is very efficient because you are not moving data around
+    all the time.  As the buffer of tokens is converted to strings, the
+    toString() method(s) check to see if there is an operation at the
+    current index.  If so, the operation is done and then normal String
+    rendering continues on the buffer.  This is like having multiple Turing
+    machine instruction streams (programs) operating on a single input tape. :)
+
+    Since the operations are done lazily at toString-time, operations do not
+    screw up the token index values.  That is, an insert operation at token
+    index i does not change the index values for tokens i+1..n-1.
+
+    Because operations never actually alter the buffer, you may always get
+    the original token stream back without undoing anything.  Since
+    the instructions are queued up, you can easily simulate transactions and
+    roll back any changes if there is an error just by removing instructions.
+    For example,
+
+     CharStream input = new ANTLRFileStream("input");
+     TLexer lex = new TLexer(input);
+     TokenRewriteStream tokens = new TokenRewriteStream(lex);
+     T parser = new T(tokens);
+     parser.startRule();
+
+     Then in the rules, you can execute
+        Token t,u;
+        ...
+        input.insertAfter(t, "text to put after t");}
+        input.insertAfter(u, "text after u");}
+        System.out.println(tokens.toString());
+
+    Actually, you have to cast the 'input' to a TokenRewriteStream. :(
+
+    You can also have multiple "instruction streams" and get multiple
+    rewrites from a single pass over the input.  Just name the instruction
+    streams and use that name again when printing the buffer.  This could be
+    useful for generating a C file and also its header file--all from the
+    same buffer:
+
+        tokens.insertAfter("pass1", t, "text to put after t");}
+        tokens.insertAfter("pass2", u, "text after u");}
+        System.out.println(tokens.toString("pass1"));
+        System.out.println(tokens.toString("pass2"));
+
+    If you don't use named rewrite streams, a "default" stream is used as
+    the first example shows.
+    """
+
+    DEFAULT_PROGRAM_NAME = "default"
+    MIN_TOKEN_INDEX = 0
+
+    def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
+        super().__init__(tokenSource, channel)
+
+        # You may have multiple, named streams of rewrite operations.
+        # I'm calling these things "programs."
+        #  Maps String (name) -> rewrite (List)
+        self.programs = {}
+        self.programs[self.DEFAULT_PROGRAM_NAME] = []
+
+        # Map String (program name) -> Integer index
+        self.lastRewriteTokenIndexes = {}
+
+
+    def rollback(self, *args):
+        """
+        Rollback the instruction stream for a program so that
+        the indicated instruction (via instructionIndex) is no
+        longer in the stream.  UNTESTED!
+        """
+
+        if len(args) == 2:
+            programName = args[0]
+            instructionIndex = args[1]
+        elif len(args) == 1:
+            programName = self.DEFAULT_PROGRAM_NAME
+            instructionIndex = args[0]
+        else:
+            raise TypeError("Invalid arguments")
+
+        p = self.programs.get(programName)
+        if p:
+            self.programs[programName] = (
+                p[self.MIN_TOKEN_INDEX:instructionIndex])
+
+
+    def deleteProgram(self, programName=DEFAULT_PROGRAM_NAME):
+        """Reset the program so that no instructions exist"""
+
+        self.rollback(programName, self.MIN_TOKEN_INDEX)
+
+
+    def insertAfter(self, *args):
+        if len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            index = args[0]
+            text = args[1]
+
+        elif len(args) == 3:
+            programName = args[0]
+            index = args[1]
+            text = args[2]
+
+        else:
+            raise TypeError("Invalid arguments")
+
+        if isinstance(index, Token):
+            # index is a Token, grap the stream index from it
+            index = index.index
+
+        # to insert after, just insert before next index (even if past end)
+        self.insertBefore(programName, index + 1, text)
+
+
+    def insertBefore(self, *args):
+        if len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            index = args[0]
+            text = args[1]
+
+        elif len(args) == 3:
+            programName = args[0]
+            index = args[1]
+            text = args[2]
+
+        else:
+            raise TypeError("Invalid arguments")
+
+        if isinstance(index, Token):
+            # index is a Token, grab the stream index from it
+            index = index.index
+
+        op = InsertBeforeOp(self, index, text)
+        rewrites = self.getProgram(programName)
+        op.instructionIndex = len(rewrites)
+        rewrites.append(op)
+
+
+    def replace(self, *args):
+        if len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            first = args[0]
+            last = args[0]
+            text = args[1]
+
+        elif len(args) == 3:
+            programName = self.DEFAULT_PROGRAM_NAME
+            first = args[0]
+            last = args[1]
+            text = args[2]
+
+        elif len(args) == 4:
+            programName = args[0]
+            first = args[1]
+            last = args[2]
+            text = args[3]
+
+        else:
+            raise TypeError("Invalid arguments")
+
+        if isinstance(first, Token):
+            # first is a Token, grap the stream index from it
+            first = first.index
+
+        if isinstance(last, Token):
+            # last is a Token, grap the stream index from it
+            last = last.index
+
+        if first > last or first < 0 or last < 0 or last >= len(self.tokens):
+            raise ValueError(
+                "replace: range invalid: {}..{} (size={})"
+                .format(first, last, len(self.tokens)))
+
+        op = ReplaceOp(self, first, last, text)
+        rewrites = self.getProgram(programName)
+        op.instructionIndex = len(rewrites)
+        rewrites.append(op)
+
+
+    def delete(self, *args):
+        self.replace(*(list(args) + [None]))
+
+
+    def getLastRewriteTokenIndex(self, programName=DEFAULT_PROGRAM_NAME):
+        return self.lastRewriteTokenIndexes.get(programName, -1)
+
+
+    def setLastRewriteTokenIndex(self, programName, i):
+        self.lastRewriteTokenIndexes[programName] = i
+
+
+    def getProgram(self, name):
+        p = self.programs.get(name)
+        if not p:
+            p = self.initializeProgram(name)
+
+        return p
+
+
+    def initializeProgram(self, name):
+        p = []
+        self.programs[name] = p
+        return p
+
+
+    def toOriginalString(self, start=None, end=None):
+        if self.p == -1:
+            self.fillBuffer()
+
+        if start is None:
+            start = self.MIN_TOKEN_INDEX
+        if end is None:
+            end = self.size() - 1
+
+        buf = StringIO()
+        i = start
+        while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
+            if self.get(i).type != EOF:
+                buf.write(self.get(i).text)
+            i += 1
+
+        return buf.getvalue()
+
+
+    def toString(self, *args):
+        if self.p == -1:
+            self.fillBuffer()
+
+        if len(args) == 0:
+            programName = self.DEFAULT_PROGRAM_NAME
+            start = self.MIN_TOKEN_INDEX
+            end = self.size() - 1
+
+        elif len(args) == 1:
+            programName = args[0]
+            start = self.MIN_TOKEN_INDEX
+            end = self.size() - 1
+
+        elif len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            start = args[0]
+            end = args[1]
+
+        if start is None:
+            start = self.MIN_TOKEN_INDEX
+        elif not isinstance(start, int):
+            start = start.index
+
+        if end is None:
+            end = len(self.tokens) - 1
+        elif not isinstance(end, int):
+            end = end.index
+
+        # ensure start/end are in range
+        if end >= len(self.tokens):
+            end = len(self.tokens) - 1
+
+        if start < 0:
+            start = 0
+
+        rewrites = self.programs.get(programName)
+        if not rewrites:
+            # no instructions to execute
+            return self.toOriginalString(start, end)
+
+        buf = StringIO()
+
+        # First, optimize instruction stream
+        indexToOp = self.reduceToSingleOperationPerIndex(rewrites)
+
+        # Walk buffer, executing instructions and emitting tokens
+        i = start
+        while i <= end and i < len(self.tokens):
+            # remove so any left have index size-1
+            op = indexToOp.pop(i, None)
+
+            t = self.tokens[i]
+            if op is None:
+                # no operation at that index, just dump token
+                if t.type != EOF:
+                    buf.write(t.text)
+                i += 1 # move to next token
+
+            else:
+                i = op.execute(buf) # execute operation and skip
+
+        # include stuff after end if it's last index in buffer
+        # So, if they did an insertAfter(lastValidIndex, "foo"), include
+        # foo if end == lastValidIndex.
+        if end == len(self.tokens) - 1:
+            # Scan any remaining operations after last token
+            # should be included (they will be inserts).
+            for i, op in sorted(indexToOp.items()):
+                if op.index >= len(self.tokens) - 1:
+                    buf.write(op.text)
+
+        return buf.getvalue()
+
+    __str__ = toString
+
+
+    def reduceToSingleOperationPerIndex(self, rewrites):
+        """
+        We need to combine operations and report invalid operations (like
+        overlapping replaces that are not completed nested).  Inserts to
+        same index need to be combined etc...   Here are the cases:
+
+        I.i.u I.j.v                           leave alone, nonoverlapping
+        I.i.u I.i.v                           combine: Iivu
+
+        R.i-j.u R.x-y.v | i-j in x-y          delete first R
+        R.i-j.u R.i-j.v                       delete first R
+        R.i-j.u R.x-y.v | x-y in i-j          ERROR
+        R.i-j.u R.x-y.v | boundaries overlap  ERROR
+
+        Delete special case of replace (text==null):
+        D.i-j.u D.x-y.v |                     boundaries overlapcombine to
+                                              max(min)..max(right)
+
+        I.i.u R.x-y.v   |                     i in (x+1)-ydelete I (since
+                                              insert before we're not deleting
+                                              i)
+        I.i.u R.x-y.v   |                     i not in (x+1)-yleave alone,
+                                              nonoverlapping
+
+        R.x-y.v I.i.u   | i in x-y            ERROR
+        R.x-y.v I.x.u                         R.x-y.uv (combine, delete I)
+        R.x-y.v I.i.u   | i not in x-y        leave alone, nonoverlapping
+
+        I.i.u = insert u before op @ index i
+        R.x-y.u = replace x-y indexed tokens with u
+
+        First we need to examine replaces.  For any replace op:
+
+          1. wipe out any insertions before op within that range.
+          2. Drop any replace op before that is contained completely within
+             that range.
+          3. Throw exception upon boundary overlap with any previous replace.
+
+        Then we can deal with inserts:
+
+          1. for any inserts to same index, combine even if not adjacent.
+          2. for any prior replace with same left boundary, combine this
+             insert with replace and delete this replace.
+          3. throw exception if index in same range as previous replace
+
+        Don't actually delete; make op null in list. Easier to walk list.
+        Later we can throw as we add to index -> op map.
+
+        Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+        inserted stuff would be before the replace range.  But, if you
+        add tokens in front of a method body '{' and then delete the method
+        body, I think the stuff before the '{' you added should disappear too.
+
+        Return a map from token index to operation.
+        """
+
+        # WALK REPLACES
+        for i, rop in enumerate(rewrites):
+            if not rop:
+                continue
+
+            if not isinstance(rop, ReplaceOp):
+                continue
+
+            # Wipe prior inserts within range
+            for j, iop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
+                if iop.index == rop.index:
+                    # E.g., insert before 2, delete 2..2; update replace
+                    # text to include insert before, kill insert
+                    rewrites[iop.instructionIndex] = None
+                    rop.text = self.catOpText(iop.text, rop.text)
+
+                elif iop.index > rop.index and iop.index <= rop.lastIndex:
+                    # delete insert as it's a no-op.
+                    rewrites[j] = None
+
+            # Drop any prior replaces contained within
+            for j, prevRop in self.getKindOfOps(rewrites, ReplaceOp, i):
+                if (prevRop.index >= rop.index
+                    and prevRop.lastIndex <= rop.lastIndex):
+                    # delete replace as it's a no-op.
+                    rewrites[j] = None
+                    continue
+
+                # throw exception unless disjoint or identical
+                disjoint = (prevRop.lastIndex < rop.index
+                            or prevRop.index > rop.lastIndex)
+                same = (prevRop.index == rop.index
+                        and prevRop.lastIndex == rop.lastIndex)
+
+                # Delete special case of replace (text==null):
+                # D.i-j.u D.x-y.v| boundaries overlapcombine to
+                # max(min)..max(right)
+                if prevRop.text is None and rop.text is None and not disjoint:
+                    # kill first delete
+                    rewrites[prevRop.instructionIndex] = None
+
+                    rop.index = min(prevRop.index, rop.index)
+                    rop.lastIndex = max(prevRop.lastIndex, rop.lastIndex)
+
+                elif not disjoint and not same:
+                    raise ValueError(
+                        "replace op boundaries of {} overlap with previous {}"
+                        .format(rop, prevRop))
+
+        # WALK INSERTS
+        for i, iop in enumerate(rewrites):
+            if iop is None:
+                continue
+
+            if not isinstance(iop, InsertBeforeOp):
+                continue
+
+            # combine current insert with prior if any at same index
+            for j, prevIop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
+                if prevIop.index == iop.index: # combine objects
+                    # convert to strings...we're in process of toString'ing
+                    # whole token buffer so no lazy eval issue with any
+                    # templates
+                    iop.text = self.catOpText(iop.text, prevIop.text)
+                    # delete redundant prior insert
+                    rewrites[j] = None
+
+            # look for replaces where iop.index is in range; error
+            for j, rop in self.getKindOfOps(rewrites, ReplaceOp, i):
+                if iop.index == rop.index:
+                    rop.text = self.catOpText(iop.text, rop.text)
+                    # delete current insert
+                    rewrites[i] = None
+                    continue
+
+                if iop.index >= rop.index and iop.index <= rop.lastIndex:
+                    raise ValueError(
+                        "insert op {} within boundaries of previous {}"
+                        .format(iop, rop))
+
+        m = {}
+        for i, op in enumerate(rewrites):
+            if op is None:
+                # ignore deleted ops
+                continue
+
+            assert op.index not in m, "should only be one op per index"
+            m[op.index] = op
+
+        return m
+
+
+    def catOpText(self, a, b):
+        x = ""
+        y = ""
+        if a:
+            x = a
+        if b:
+            y = b
+        return x + y
+
+
+    def getKindOfOps(self, rewrites, kind, before=None):
+        """Get all operations before an index of a particular kind."""
+
+        if before is None:
+            before = len(rewrites)
+        elif before > len(rewrites):
+            before = len(rewrites)
+
+        for i, op in enumerate(rewrites[:before]):
+            # ignore deleted
+            if op and op.__class__ == kind:
+                yield i, op
+
+
+    def toDebugString(self, start=None, end=None):
+        if start is None:
+            start = self.MIN_TOKEN_INDEX
+        if end is None:
+            end = self.size() - 1
+
+        buf = StringIO()
+        i = start
+        while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
+            buf.write(self.get(i))
+            i += 1
+
+        return buf.getvalue()
diff --git a/runtime/Python3/antlr3/tokens.py b/runtime/Python3/antlr3/tokens.py
new file mode 100644
index 0000000..31a2e93
--- /dev/null
+++ b/runtime/Python3/antlr3/tokens.py
@@ -0,0 +1,310 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2012 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+from .constants import DEFAULT_CHANNEL, EOF, INVALID_TOKEN_TYPE
+
+############################################################################
+#
+# basic token interface
+#
+############################################################################
+
+class Token(object):
+    """@brief Abstract token baseclass."""
+
+    TOKEN_NAMES_MAP = None
+
+    @classmethod
+    def registerTokenNamesMap(cls, tokenNamesMap):
+        """@brief Store a mapping from token type to token name.
+        
+        This enables token.typeName to give something more meaningful
+        than, e.g., '6'.
+        """
+        cls.TOKEN_NAMES_MAP = tokenNamesMap
+        cls.TOKEN_NAMES_MAP[EOF] = "EOF"
+
+    def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None,
+                 index=-1, line=0, charPositionInLine=-1, input=None):
+        # We use -1 for index and charPositionInLine as an invalid index
+        self._type = type
+        self._channel = channel
+        self._text = text
+        self._index = index
+        self._line = 0
+        self._charPositionInLine = charPositionInLine
+        self.input = input
+
+    # To override a property, you'll need to override both the getter and setter.
+    @property
+    def text(self):
+        return self._text
+
+    @text.setter
+    def text(self, value):
+        self._text = value
+
+
+    @property
+    def type(self):
+        return self._type
+
+    @type.setter
+    def type(self, value):
+        self._type = value
+
+    # For compatibility
+    def getType(self):
+        return self._type
+
+    @property
+    def typeName(self):
+        if self.TOKEN_NAMES_MAP:
+            return self.TOKEN_NAMES_MAP.get(self._type, "INVALID_TOKEN_TYPE")
+        else:
+            return str(self._type)
+    
+    @property
+    def line(self):
+        """Lines are numbered 1..n."""
+        return self._line
+
+    @line.setter
+    def line(self, value):
+        self._line = value
+
+
+    @property
+    def charPositionInLine(self):
+        """Columns are numbered 0..n-1."""
+        return self._charPositionInLine
+
+    @charPositionInLine.setter
+    def charPositionInLine(self, pos):
+        self._charPositionInLine = pos
+
+
+    @property
+    def channel(self):
+        return self._channel
+
+    @channel.setter
+    def channel(self, value):
+        self._channel = value
+
+
+    @property
+    def index(self):
+        """
+        An index from 0..n-1 of the token object in the input stream.
+        This must be valid in order to use the ANTLRWorks debugger.
+        """
+        return self._index
+
+    @index.setter
+    def index(self, value):
+        self._index = value
+
+
+    def getInputStream(self):
+        """@brief From what character stream was this token created.
+
+        You don't have to implement but it's nice to know where a Token
+        comes from if you have include files etc... on the input."""
+
+        raise NotImplementedError
+
+    def setInputStream(self, input):
+        """@brief From what character stream was this token created.
+
+        You don't have to implement but it's nice to know where a Token
+        comes from if you have include files etc... on the input."""
+
+        raise NotImplementedError
+
+
+############################################################################
+#
+# token implementations
+#
+# Token
+# +- CommonToken
+# \- ClassicToken
+#
+############################################################################
+
+class CommonToken(Token):
+    """@brief Basic token implementation.
+
+    This implementation does not copy the text from the input stream upon
+    creation, but keeps start/stop pointers into the stream to avoid
+    unnecessary copy operations.
+
+    """
+
+    def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None,
+                 input=None, start=None, stop=None, oldToken=None):
+
+        if oldToken:
+            super().__init__(oldToken.type, oldToken.channel, oldToken.text,
+                             oldToken.index, oldToken.line,
+                             oldToken.charPositionInLine, oldToken.input)
+            if isinstance(oldToken, CommonToken):
+                self.start = oldToken.start
+                self.stop = oldToken.stop
+            else:
+                self.start = start
+                self.stop = stop
+
+        else:
+            super().__init__(type=type, channel=channel, input=input)
+
+            # We need to be able to change the text once in a while.  If
+            # this is non-null, then getText should return this.  Note that
+            # start/stop are not affected by changing this.
+            self._text = text
+
+            # The char position into the input buffer where this token starts
+            self.start = start
+
+            # The char position into the input buffer where this token stops
+            # This is the index of the last char, *not* the index after it!
+            self.stop = stop
+
+
+    @property
+    def text(self):
+        # Could be the empty string, and we want to return that.
+        if self._text is not None:
+            return self._text
+
+        if not self.input:
+            return None
+
+        if self.start < self.input.size() and self.stop < self.input.size():
+            return self.input.substring(self.start, self.stop)
+
+        return '<EOF>'
+
+    @text.setter
+    def text(self, value):
+        """
+        Override the text for this token.  getText() will return this text
+        rather than pulling from the buffer.  Note that this does not mean
+        that start/stop indexes are not valid.  It means that that input
+        was converted to a new string in the token object.
+        """
+        self._text = value
+
+
+    def getInputStream(self):
+        return self.input
+
+    def setInputStream(self, input):
+        self.input = input
+
+
+    def __str__(self):
+        if self.type == EOF:
+            return "<EOF>"
+
+        channelStr = ""
+        if self.channel > 0:
+            channelStr = ",channel=" + str(self.channel)
+
+        txt = self.text
+        if txt:
+            # Put 2 backslashes in front of each character
+            txt = txt.replace("\n", r"\\n")
+            txt = txt.replace("\r", r"\\r")
+            txt = txt.replace("\t", r"\\t")
+        else:
+            txt = "<no text>"
+
+        return ("[@{0.index},{0.start}:{0.stop}={txt!r},"
+                "<{0.typeName}>{channelStr},"
+                "{0.line}:{0.charPositionInLine}]"
+                .format(self, txt=txt, channelStr=channelStr))
+
+
+class ClassicToken(Token):
+    """@brief Alternative token implementation.
+
+    A Token object like we'd use in ANTLR 2.x; has an actual string created
+    and associated with this object.  These objects are needed for imaginary
+    tree nodes that have payload objects.  We need to create a Token object
+    that has a string; the tree node will point at this token.  CommonToken
+    has indexes into a char stream and hence cannot be used to introduce
+    new strings.
+    """
+
+    def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL,
+                 oldToken=None):
+        if oldToken:
+            super().__init__(type=oldToken.type, channel=oldToken.channel,
+                             text=oldToken.text, line=oldToken.line,
+                             charPositionInLine=oldToken.charPositionInLine)
+
+        else:
+            super().__init__(type=type, channel=channel, text=text,
+                             index=None, line=None, charPositionInLine=None)
+
+
+    def getInputStream(self):
+        return None
+
+    def setInputStream(self, input):
+        pass
+
+
+    def toString(self):
+        channelStr = ""
+        if self.channel > 0:
+            channelStr = ",channel=" + str(self.channel)
+
+        txt = self.text
+        if not txt:
+            txt = "<no text>"
+
+        return ("[@{0.index!r},{txt!r},<{0.type!r}>{channelStr},"
+                "{0.line!r}:{0.charPositionInLine!r}]"
+                .format(self, txt=txt, channelStr=channelStr))
+
+    __str__ = toString
+    __repr__ = toString
+
+
+INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
+
+# In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR
+# will avoid creating a token for this symbol and try to fetch another.
+SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
diff --git a/runtime/Python3/antlr3/tree.py b/runtime/Python3/antlr3/tree.py
new file mode 100644
index 0000000..0a3214b
--- /dev/null
+++ b/runtime/Python3/antlr3/tree.py
@@ -0,0 +1,2829 @@
+""" @package antlr3.tree
+@brief ANTLR3 runtime package, tree module
+
+This module contains all support classes for AST construction and tree parsers.
+
+"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2012 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+# lot's of docstrings are missing, don't complain for now...
+# pylint: disable-msg=C0111
+
+import re
+
+from antlr3.constants import UP, DOWN, EOF, INVALID_TOKEN_TYPE
+from antlr3.recognizers import BaseRecognizer, RuleReturnScope
+from antlr3.streams import IntStream
+from antlr3.tokens import CommonToken, Token, INVALID_TOKEN
+from antlr3.exceptions import MismatchedTreeNodeException, \
+     MissingTokenException, UnwantedTokenException, MismatchedTokenException, \
+     NoViableAltException
+
+
+############################################################################
+#
+# tree related exceptions
+#
+############################################################################
+
+
+class RewriteCardinalityException(RuntimeError):
+    """
+    @brief Base class for all exceptions thrown during AST rewrite construction.
+
+    This signifies a case where the cardinality of two or more elements
+    in a subrule are different: (ID INT)+ where |ID|!=|INT|
+    """
+
+    def __init__(self, elementDescription):
+        RuntimeError.__init__(self, elementDescription)
+
+        self.elementDescription = elementDescription
+
+
+    def getMessage(self):
+        return self.elementDescription
+
+
+class RewriteEarlyExitException(RewriteCardinalityException):
+    """@brief No elements within a (...)+ in a rewrite rule"""
+
+    def __init__(self, elementDescription=None):
+        RewriteCardinalityException.__init__(self, elementDescription)
+
+
+class RewriteEmptyStreamException(RewriteCardinalityException):
+    """
+    @brief Ref to ID or expr but no tokens in ID stream or subtrees in expr stream
+    """
+
+    pass
+
+
+############################################################################
+#
+# basic Tree and TreeAdaptor interfaces
+#
+############################################################################
+
+class Tree(object):
+    """
+    @brief Abstract baseclass for tree nodes.
+
+    What does a tree look like?  ANTLR has a number of support classes
+    such as CommonTreeNodeStream that work on these kinds of trees.  You
+    don't have to make your trees implement this interface, but if you do,
+    you'll be able to use more support code.
+
+    NOTE: When constructing trees, ANTLR can build any kind of tree; it can
+    even use Token objects as trees if you add a child list to your tokens.
+
+    This is a tree node without any payload; just navigation and factory stuff.
+    """
+
+
+    def getChild(self, i):
+        raise NotImplementedError
+
+
+    def getChildCount(self):
+        raise NotImplementedError
+
+
+    def getParent(self):
+        """Tree tracks parent and child index now > 3.0"""
+
+        raise NotImplementedError
+
+    def setParent(self, t):
+        """Tree tracks parent and child index now > 3.0"""
+
+        raise NotImplementedError
+
+
+    def hasAncestor(self, ttype):
+        """Walk upwards looking for ancestor with this token type."""
+
+        raise NotImplementedError
+
+    def getAncestor(self, ttype):
+        """Walk upwards and get first ancestor with this token type."""
+
+        raise NotImplementedError
+
+    def getAncestors(self):
+        """Return a list of all ancestors of this node.
+
+        The first node of list is the root and the last is the parent of
+        this node.
+        """
+
+        raise NotImplementedError
+
+
+    def getChildIndex(self):
+        """This node is what child index? 0..n-1"""
+
+        raise NotImplementedError
+
+    def setChildIndex(self, index):
+        """This node is what child index? 0..n-1"""
+
+        raise NotImplementedError
+
+
+    def freshenParentAndChildIndexes(self):
+        """Set the parent and child index values for all children"""
+
+        raise NotImplementedError
+
+
+    def addChild(self, t):
+        """
+        Add t as a child to this node.  If t is null, do nothing.  If t
+        is nil, add all children of t to this' children.
+        """
+
+        raise NotImplementedError
+
+
+    def setChild(self, i, t):
+        """Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
+
+        raise NotImplementedError
+
+
+    def deleteChild(self, i):
+        raise NotImplementedError
+
+
+    def replaceChildren(self, startChildIndex, stopChildIndex, t):
+        """
+        Delete children from start to stop and replace with t even if t is
+        a list (nil-root tree).  num of children can increase or decrease.
+        For huge child lists, inserting children can force walking rest of
+        children to set their childindex; could be slow.
+        """
+
+        raise NotImplementedError
+
+
+    def isNil(self):
+        """
+        Indicates the node is a nil node but may still have children, meaning
+        the tree is a flat list.
+        """
+
+        raise NotImplementedError
+
+
+    def getTokenStartIndex(self):
+        """
+        What is the smallest token index (indexing from 0) for this node
+           and its children?
+        """
+
+        raise NotImplementedError
+
+
+    def setTokenStartIndex(self, index):
+        raise NotImplementedError
+
+
+    def getTokenStopIndex(self):
+        """
+        What is the largest token index (indexing from 0) for this node
+        and its children?
+        """
+
+        raise NotImplementedError
+
+
+    def setTokenStopIndex(self, index):
+        raise NotImplementedError
+
+
+    def dupNode(self):
+        raise NotImplementedError
+
+
+    def getType(self):
+        """Return a token type; needed for tree parsing."""
+
+        raise NotImplementedError
+
+
+    def getText(self):
+        raise NotImplementedError
+
+
+    def getLine(self):
+        """
+        In case we don't have a token payload, what is the line for errors?
+        """
+
+        raise NotImplementedError
+
+
+    def getCharPositionInLine(self):
+        raise NotImplementedError
+
+
+    def toStringTree(self):
+        raise NotImplementedError
+
+
+    def toString(self):
+        raise NotImplementedError
+
+
+
+class TreeAdaptor(object):
+    """
+    @brief Abstract baseclass for tree adaptors.
+
+    How to create and navigate trees.  Rather than have a separate factory
+    and adaptor, I've merged them.  Makes sense to encapsulate.
+
+    This takes the place of the tree construction code generated in the
+    generated code in 2.x and the ASTFactory.
+
+    I do not need to know the type of a tree at all so they are all
+    generic Objects.  This may increase the amount of typecasting needed. :(
+    """
+
+    # C o n s t r u c t i o n
+
+    def createWithPayload(self, payload):
+        """
+        Create a tree node from Token object; for CommonTree type trees,
+        then the token just becomes the payload.  This is the most
+        common create call.
+
+        Override if you want another kind of node to be built.
+        """
+
+        raise NotImplementedError
+
+
+    def dupNode(self, treeNode):
+        """Duplicate a single tree node.
+
+        Override if you want another kind of node to be built."""
+
+        raise NotImplementedError
+
+
+    def dupTree(self, tree):
+        """Duplicate tree recursively, using dupNode() for each node"""
+
+        raise NotImplementedError
+
+
+    def nil(self):
+        """
+        Return a nil node (an empty but non-null node) that can hold
+        a list of element as the children.  If you want a flat tree (a list)
+        use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
+        """
+
+        raise NotImplementedError
+
+
+    def errorNode(self, input, start, stop, exc):
+        """
+        Return a tree node representing an error.  This node records the
+        tokens consumed during error recovery.  The start token indicates the
+        input symbol at which the error was detected.  The stop token indicates
+        the last symbol consumed during recovery.
+
+        You must specify the input stream so that the erroneous text can
+        be packaged up in the error node.  The exception could be useful
+        to some applications; default implementation stores ptr to it in
+        the CommonErrorNode.
+
+        This only makes sense during token parsing, not tree parsing.
+        Tree parsing should happen only when parsing and tree construction
+        succeed.
+        """
+
+        raise NotImplementedError
+
+
+    def isNil(self, tree):
+        """Is tree considered a nil node used to make lists of child nodes?"""
+
+        raise NotImplementedError
+
+
+    def addChild(self, t, child):
+        """
+        Add a child to the tree t.  If child is a flat tree (a list), make all
+        in list children of t.  Warning: if t has no children, but child does
+        and child isNil then you can decide it is ok to move children to t via
+        t.children = child.children; i.e., without copying the array.  Just
+        make sure that this is consistent with have the user will build
+        ASTs. Do nothing if t or child is null.
+        """
+
+        raise NotImplementedError
+
+
+    def becomeRoot(self, newRoot, oldRoot):
+        """
+        If oldRoot is a nil root, just copy or move the children to newRoot.
+        If not a nil root, make oldRoot a child of newRoot.
+
+           old=^(nil a b c), new=r yields ^(r a b c)
+           old=^(a b c), new=r yields ^(r ^(a b c))
+
+        If newRoot is a nil-rooted single child tree, use the single
+        child as the new root node.
+
+           old=^(nil a b c), new=^(nil r) yields ^(r a b c)
+           old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
+
+        If oldRoot was null, it's ok, just return newRoot (even if isNil).
+
+           old=null, new=r yields r
+           old=null, new=^(nil r) yields ^(nil r)
+
+        Return newRoot.  Throw an exception if newRoot is not a
+        simple node or nil root with a single child node--it must be a root
+        node.  If newRoot is ^(nil x) return x as newRoot.
+
+        Be advised that it's ok for newRoot to point at oldRoot's
+        children; i.e., you don't have to copy the list.  We are
+        constructing these nodes so we should have this control for
+        efficiency.
+        """
+
+        raise NotImplementedError
+
+
+    def rulePostProcessing(self, root):
+        """
+        Given the root of the subtree created for this rule, post process
+        it to do any simplifications or whatever you want.  A required
+        behavior is to convert ^(nil singleSubtree) to singleSubtree
+        as the setting of start/stop indexes relies on a single non-nil root
+        for non-flat trees.
+
+        Flat trees such as for lists like "idlist : ID+ ;" are left alone
+        unless there is only one ID.  For a list, the start/stop indexes
+        are set in the nil node.
+
+        This method is executed after all rule tree construction and right
+        before setTokenBoundaries().
+        """
+
+        raise NotImplementedError
+
+
+    def getUniqueID(self, node):
+        """For identifying trees.
+
+        How to identify nodes so we can say "add node to a prior node"?
+        Even becomeRoot is an issue.  Use System.identityHashCode(node)
+        usually.
+        """
+
+        raise NotImplementedError
+
+
+    # R e w r i t e  R u l e s
+
+    def createFromToken(self, tokenType, fromToken, text=None):
+        """
+        Create a new node derived from a token, with a new token type and
+        (optionally) new text.
+
+        This is invoked from an imaginary node ref on right side of a
+        rewrite rule as IMAG[$tokenLabel] or IMAG[$tokenLabel "IMAG"].
+
+        This should invoke createToken(Token).
+        """
+
+        raise NotImplementedError
+
+
+    def createFromType(self, tokenType, text):
+        """Create a new node derived from a token, with a new token type.
+
+        This is invoked from an imaginary node ref on right side of a
+        rewrite rule as IMAG["IMAG"].
+
+        This should invoke createToken(int,String).
+        """
+
+        raise NotImplementedError
+
+
+    # C o n t e n t
+
+    def getType(self, t):
+        """For tree parsing, I need to know the token type of a node"""
+
+        raise NotImplementedError
+
+
+    def setType(self, t, type):
+        """Node constructors can set the type of a node"""
+
+        raise NotImplementedError
+
+
+    def getText(self, t):
+        raise NotImplementedError
+
+    def setText(self, t, text):
+        """Node constructors can set the text of a node"""
+
+        raise NotImplementedError
+
+
+    def getToken(self, t):
+        """Return the token object from which this node was created.
+
+        Currently used only for printing an error message.
+        The error display routine in BaseRecognizer needs to
+        display where the input the error occurred. If your
+        tree of limitation does not store information that can
+        lead you to the token, you can create a token filled with
+        the appropriate information and pass that back.  See
+        BaseRecognizer.getErrorMessage().
+        """
+
+        raise NotImplementedError
+
+
+    def setTokenBoundaries(self, t, startToken, stopToken):
+        """
+        Where are the bounds in the input token stream for this node and
+        all children?  Each rule that creates AST nodes will call this
+        method right before returning.  Flat trees (i.e., lists) will
+        still usually have a nil root node just to hold the children list.
+        That node would contain the start/stop indexes then.
+        """
+
+        raise NotImplementedError
+
+
+    def getTokenStartIndex(self, t):
+        """
+        Get the token start index for this subtree; return -1 if no such index
+        """
+
+        raise NotImplementedError
+
+
+    def getTokenStopIndex(self, t):
+        """
+        Get the token stop index for this subtree; return -1 if no such index
+        """
+
+        raise NotImplementedError
+
+
+    # N a v i g a t i o n  /  T r e e  P a r s i n g
+
+    def getChild(self, t, i):
+        """Get a child 0..n-1 node"""
+
+        raise NotImplementedError
+
+
+    def setChild(self, t, i, child):
+        """Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
+
+        raise NotImplementedError
+
+
+    def deleteChild(self, t, i):
+        """Remove ith child and shift children down from right."""
+
+        raise NotImplementedError
+
+
+    def getChildCount(self, t):
+        """How many children?  If 0, then this is a leaf node"""
+
+        raise NotImplementedError
+
+
+    def getParent(self, t):
+        """
+        Who is the parent node of this node; if null, implies node is root.
+        If your node type doesn't handle this, it's ok but the tree rewrites
+        in tree parsers need this functionality.
+        """
+
+        raise NotImplementedError
+
+
+    def setParent(self, t, parent):
+        """
+        Who is the parent node of this node; if null, implies node is root.
+        If your node type doesn't handle this, it's ok but the tree rewrites
+        in tree parsers need this functionality.
+        """
+
+        raise NotImplementedError
+
+
+    def getChildIndex(self, t):
+        """
+        What index is this node in the child list? Range: 0..n-1
+        If your node type doesn't handle this, it's ok but the tree rewrites
+        in tree parsers need this functionality.
+        """
+
+        raise NotImplementedError
+
+
+    def setChildIndex(self, t, index):
+        """
+        What index is this node in the child list? Range: 0..n-1
+        If your node type doesn't handle this, it's ok but the tree rewrites
+        in tree parsers need this functionality.
+        """
+
+        raise NotImplementedError
+
+
+    def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
+        """
+        Replace from start to stop child index of parent with t, which might
+        be a list.  Number of children may be different
+        after this call.
+
+        If parent is null, don't do anything; must be at root of overall tree.
+        Can't replace whatever points to the parent externally.  Do nothing.
+        """
+
+        raise NotImplementedError
+
+
+    # Misc
+
+    def create(self, *args):
+        """
+        Deprecated, use createWithPayload, createFromToken or createFromType.
+
+        This method only exists to mimic the Java interface of TreeAdaptor.
+
+        """
+
+        if len(args) == 1 and isinstance(args[0], Token):
+            # Object create(Token payload);
+##             warnings.warn(
+##                 "Using create() is deprecated, use createWithPayload()",
+##                 DeprecationWarning,
+##                 stacklevel=2
+##                 )
+            return self.createWithPayload(args[0])
+
+        if (len(args) == 2
+            and isinstance(args[0], int)
+            and isinstance(args[1], Token)):
+            # Object create(int tokenType, Token fromToken);
+##             warnings.warn(
+##                 "Using create() is deprecated, use createFromToken()",
+##                 DeprecationWarning,
+##                 stacklevel=2
+##                 )
+            return self.createFromToken(args[0], args[1])
+
+        if (len(args) == 3
+            and isinstance(args[0], int)
+            and isinstance(args[1], Token)
+            and isinstance(args[2], str)):
+            # Object create(int tokenType, Token fromToken, String text);
+##             warnings.warn(
+##                 "Using create() is deprecated, use createFromToken()",
+##                 DeprecationWarning,
+##                 stacklevel=2
+##                 )
+            return self.createFromToken(args[0], args[1], args[2])
+
+        if (len(args) == 2
+            and isinstance(args[0], int)
+            and isinstance(args[1], str)):
+            # Object create(int tokenType, String text);
+##             warnings.warn(
+##                 "Using create() is deprecated, use createFromType()",
+##                 DeprecationWarning,
+##                 stacklevel=2
+##                 )
+            return self.createFromType(args[0], args[1])
+
+        raise TypeError(
+            "No create method with this signature found: {}"
+            .format(', '.join(type(v).__name__ for v in args)))
+
+
+############################################################################
+#
+# base implementation of Tree and TreeAdaptor
+#
+# Tree
+# \- BaseTree
+#
+# TreeAdaptor
+# \- BaseTreeAdaptor
+#
+############################################################################
+
+
+class BaseTree(Tree):
+    """
+    @brief A generic tree implementation with no payload.
+
+    You must subclass to
+    actually have any user data.  ANTLR v3 uses a list of children approach
+    instead of the child-sibling approach in v2.  A flat tree (a list) is
+    an empty node whose children represent the list.  An empty, but
+    non-null node is called "nil".
+    """
+
+    # BaseTree is abstract, no need to complain about not implemented abstract
+    # methods
+    # pylint: disable-msg=W0223
+
+    def __init__(self, node=None):
+        """
+        Create a new node from an existing node does nothing for BaseTree
+        as there are no fields other than the children list, which cannot
+        be copied as the children are not considered part of this node.
+        """
+
+        super().__init__()
+        self.children = []
+        self.parent = None
+        self.childIndex = 0
+
+
+    def getChild(self, i):
+        try:
+            return self.children[i]
+        except IndexError:
+            return None
+
+
+    def getChildren(self):
+        """@brief Get the children internal List
+
+        Note that if you directly mess with
+        the list, do so at your own risk.
+        """
+
+        # FIXME: mark as deprecated
+        return self.children
+
+
+    def getFirstChildWithType(self, treeType):
+        for child in self.children:
+            if child.getType() == treeType:
+                return child
+
+        return None
+
+
+    def getChildCount(self):
+        return len(self.children)
+
+
+    def addChild(self, childTree):
+        """Add t as child of this node.
+
+        Warning: if t has no children, but child does
+        and child isNil then this routine moves children to t via
+        t.children = child.children; i.e., without copying the array.
+        """
+
+        # this implementation is much simpler and probably less efficient
+        # than the mumbo-jumbo that Ter did for the Java runtime.
+
+        if childTree is None:
+            return
+
+        if childTree.isNil():
+            # t is an empty node possibly with children
+
+            if self.children is childTree.children:
+                raise ValueError("attempt to add child list to itself")
+
+            # fix parent pointer and childIndex for new children
+            for idx, child in enumerate(childTree.children):
+                child.parent = self
+                child.childIndex = len(self.children) + idx
+
+            self.children += childTree.children
+
+        else:
+            # child is not nil (don't care about children)
+            self.children.append(childTree)
+            childTree.parent = self
+            childTree.childIndex = len(self.children) - 1
+
+
+    def addChildren(self, children):
+        """Add all elements of kids list as children of this node"""
+
+        self.children += children
+
+
+    def setChild(self, i, t):
+        if t is None:
+            return
+
+        if t.isNil():
+            raise ValueError("Can't set single child to a list")
+
+        self.children[i] = t
+        t.parent = self
+        t.childIndex = i
+
+
+    def deleteChild(self, i):
+        killed = self.children[i]
+
+        del self.children[i]
+
+        # walk rest and decrement their child indexes
+        for idx, child in enumerate(self.children[i:]):
+            child.childIndex = i + idx
+
+        return killed
+
+
+    def replaceChildren(self, startChildIndex, stopChildIndex, newTree):
+        """
+        Delete children from start to stop and replace with t even if t is
+        a list (nil-root tree).  num of children can increase or decrease.
+        For huge child lists, inserting children can force walking rest of
+        children to set their childindex; could be slow.
+        """
+
+        if (startChildIndex >= len(self.children)
+            or stopChildIndex >= len(self.children)):
+            raise IndexError("indexes invalid")
+
+        replacingHowMany = stopChildIndex - startChildIndex + 1
+
+        # normalize to a list of children to add: newChildren
+        if newTree.isNil():
+            newChildren = newTree.children
+
+        else:
+            newChildren = [newTree]
+
+        replacingWithHowMany = len(newChildren)
+        delta = replacingHowMany - replacingWithHowMany
+
+
+        if delta == 0:
+            # if same number of nodes, do direct replace
+            for idx, child in enumerate(newChildren):
+                self.children[idx + startChildIndex] = child
+                child.parent = self
+                child.childIndex = idx + startChildIndex
+
+        else:
+            # length of children changes...
+
+            # ...delete replaced segment...
+            del self.children[startChildIndex:stopChildIndex+1]
+
+            # ...insert new segment...
+            self.children[startChildIndex:startChildIndex] = newChildren
+
+            # ...and fix indeces
+            self.freshenParentAndChildIndexes(startChildIndex)
+
+
+    def isNil(self):
+        return False
+
+
+    def freshenParentAndChildIndexes(self, offset=0):
+        for idx, child in enumerate(self.children[offset:]):
+            child.childIndex = idx + offset
+            child.parent = self
+
+
+    def sanityCheckParentAndChildIndexes(self, parent=None, i=-1):
+        if parent != self.parent:
+            raise ValueError(
+                "parents don't match; expected {!r} found {!r}"
+                .format(parent, self.parent))
+
+        if i != self.childIndex:
+            raise ValueError(
+                "child indexes don't match; expected {} found {}"
+                .format(i, self.childIndex))
+
+        for idx, child in enumerate(self.children):
+            child.sanityCheckParentAndChildIndexes(self, idx)
+
+
+    def getChildIndex(self):
+        """BaseTree doesn't track child indexes."""
+
+        return 0
+
+
+    def setChildIndex(self, index):
+        """BaseTree doesn't track child indexes."""
+
+        pass
+
+
+    def getParent(self):
+        """BaseTree doesn't track parent pointers."""
+
+        return None
+
+    def setParent(self, t):
+        """BaseTree doesn't track parent pointers."""
+
+        pass
+
+
+    def hasAncestor(self, ttype):
+        """Walk upwards looking for ancestor with this token type."""
+        return self.getAncestor(ttype) is not None
+
+    def getAncestor(self, ttype):
+        """Walk upwards and get first ancestor with this token type."""
+        t = self.getParent()
+        while t is not None:
+            if t.getType() == ttype:
+                return t
+            t = t.getParent()
+
+        return None
+
+    def getAncestors(self):
+        """Return a list of all ancestors of this node.
+
+        The first node of list is the root and the last is the parent of
+        this node.
+        """
+        if self.getParent() is None:
+            return None
+
+        ancestors = []
+        t = self.getParent()
+        while t is not None:
+            ancestors.insert(0, t) # insert at start
+            t = t.getParent()
+
+        return ancestors
+
+
+    def toStringTree(self):
+        """Print out a whole tree not just a node"""
+
+        if len(self.children) == 0:
+            return self.toString()
+
+        buf = []
+        if not self.isNil():
+            buf.append('(')
+            buf.append(self.toString())
+            buf.append(' ')
+
+        for i, child in enumerate(self.children):
+            if i > 0:
+                buf.append(' ')
+            buf.append(child.toStringTree())
+
+        if not self.isNil():
+            buf.append(')')
+
+        return ''.join(buf)
+
+
+    def getLine(self):
+        return 0
+
+
+    def getCharPositionInLine(self):
+        return 0
+
+
+    def toString(self):
+        """Override to say how a node (not a tree) should look as text"""
+
+        raise NotImplementedError
+
+
+
+class BaseTreeAdaptor(TreeAdaptor):
+    """
+    @brief A TreeAdaptor that works with any Tree implementation.
+    """
+
+    # BaseTreeAdaptor is abstract, no need to complain about not implemented
+    # abstract methods
+    # pylint: disable-msg=W0223
+
+    def nil(self):
+        return self.createWithPayload(None)
+
+
+    def errorNode(self, input, start, stop, exc):
+        """
+        create tree node that holds the start and stop tokens associated
+        with an error.
+
+        If you specify your own kind of tree nodes, you will likely have to
+        override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
+        if no token payload but you might have to set token type for diff
+        node type.
+
+        You don't have to subclass CommonErrorNode; you will likely need to
+        subclass your own tree node class to avoid class cast exception.
+        """
+
+        return CommonErrorNode(input, start, stop, exc)
+
+
+    def isNil(self, tree):
+        return tree.isNil()
+
+
+    def dupTree(self, t, parent=None):
+        """
+        This is generic in the sense that it will work with any kind of
+        tree (not just Tree interface).  It invokes the adaptor routines
+        not the tree node routines to do the construction.
+        """
+
+        if t is None:
+            return None
+
+        newTree = self.dupNode(t)
+
+        # ensure new subtree root has parent/child index set
+
+        # same index in new tree
+        self.setChildIndex(newTree, self.getChildIndex(t))
+
+        self.setParent(newTree, parent)
+
+        for i in range(self.getChildCount(t)):
+            child = self.getChild(t, i)
+            newSubTree = self.dupTree(child, t)
+            self.addChild(newTree, newSubTree)
+
+        return newTree
+
+
+    def addChild(self, tree, child):
+        """
+        Add a child to the tree t.  If child is a flat tree (a list), make all
+        in list children of t.  Warning: if t has no children, but child does
+        and child isNil then you can decide it is ok to move children to t via
+        t.children = child.children; i.e., without copying the array.  Just
+        make sure that this is consistent with have the user will build
+        ASTs.
+        """
+
+        #if isinstance(child, Token):
+        #    child = self.createWithPayload(child)
+
+        if tree is not None and child is not None:
+            tree.addChild(child)
+
+
+    def becomeRoot(self, newRoot, oldRoot):
+        """
+        If oldRoot is a nil root, just copy or move the children to newRoot.
+        If not a nil root, make oldRoot a child of newRoot.
+
+          old=^(nil a b c), new=r yields ^(r a b c)
+          old=^(a b c), new=r yields ^(r ^(a b c))
+
+        If newRoot is a nil-rooted single child tree, use the single
+        child as the new root node.
+
+          old=^(nil a b c), new=^(nil r) yields ^(r a b c)
+          old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
+
+        If oldRoot was null, it's ok, just return newRoot (even if isNil).
+
+          old=null, new=r yields r
+          old=null, new=^(nil r) yields ^(nil r)
+
+        Return newRoot.  Throw an exception if newRoot is not a
+        simple node or nil root with a single child node--it must be a root
+        node.  If newRoot is ^(nil x) return x as newRoot.
+
+        Be advised that it's ok for newRoot to point at oldRoot's
+        children; i.e., you don't have to copy the list.  We are
+        constructing these nodes so we should have this control for
+        efficiency.
+        """
+
+        if isinstance(newRoot, Token):
+            newRoot = self.create(newRoot)
+
+        if oldRoot is None:
+            return newRoot
+
+        if not isinstance(newRoot, CommonTree):
+            newRoot = self.createWithPayload(newRoot)
+
+        # handle ^(nil real-node)
+        if newRoot.isNil():
+            nc = newRoot.getChildCount()
+            if nc == 1:
+                newRoot = newRoot.getChild(0)
+
+            elif nc > 1:
+                # TODO: make tree run time exceptions hierarchy
+                raise RuntimeError("more than one node as root")
+
+        # add oldRoot to newRoot; addChild takes care of case where oldRoot
+        # is a flat list (i.e., nil-rooted tree).  All children of oldRoot
+        # are added to newRoot.
+        newRoot.addChild(oldRoot)
+        return newRoot
+
+
+    def rulePostProcessing(self, root):
+        """Transform ^(nil x) to x and nil to null"""
+
+        if root is not None and root.isNil():
+            if root.getChildCount() == 0:
+                root = None
+
+            elif root.getChildCount() == 1:
+                root = root.getChild(0)
+                # whoever invokes rule will set parent and child index
+                root.setParent(None)
+                root.setChildIndex(-1)
+
+        return root
+
+
+    def createFromToken(self, tokenType, fromToken, text=None):
+        if fromToken is None:
+            return self.createFromType(tokenType, text)
+
+        assert isinstance(tokenType, int), type(tokenType).__name__
+        assert isinstance(fromToken, Token), type(fromToken).__name__
+        assert text is None or isinstance(text, str), type(text).__name__
+
+        fromToken = self.createToken(fromToken)
+        fromToken.type = tokenType
+        if text is not None:
+            fromToken.text = text
+        t = self.createWithPayload(fromToken)
+        return t
+
+
+    def createFromType(self, tokenType, text):
+        assert isinstance(tokenType, int), type(tokenType).__name__
+        assert isinstance(text, str) or text is None, type(text).__name__
+
+        fromToken = self.createToken(tokenType=tokenType, text=text)
+        t = self.createWithPayload(fromToken)
+        return t
+
+
+    def getType(self, t):
+        return t.getType()
+
+
+    def setType(self, t, type):
+        raise RuntimeError("don't know enough about Tree node")
+
+
+    def getText(self, t):
+        return t.getText()
+
+
+    def setText(self, t, text):
+        raise RuntimeError("don't know enough about Tree node")
+
+
+    def getChild(self, t, i):
+        return t.getChild(i)
+
+
+    def setChild(self, t, i, child):
+        t.setChild(i, child)
+
+
+    def deleteChild(self, t, i):
+        return t.deleteChild(i)
+
+
+    def getChildCount(self, t):
+        return t.getChildCount()
+
+
+    def getUniqueID(self, node):
+        return hash(node)
+
+
+    def createToken(self, fromToken=None, tokenType=None, text=None):
+        """
+        Tell me how to create a token for use with imaginary token nodes.
+        For example, there is probably no input symbol associated with imaginary
+        token DECL, but you need to create it as a payload or whatever for
+        the DECL node as in ^(DECL type ID).
+
+        If you care what the token payload objects' type is, you should
+        override this method and any other createToken variant.
+        """
+
+        raise NotImplementedError
+
+
+############################################################################
+#
+# common tree implementation
+#
+# Tree
+# \- BaseTree
+#    \- CommonTree
+#       \- CommonErrorNode
+#
+# TreeAdaptor
+# \- BaseTreeAdaptor
+#    \- CommonTreeAdaptor
+#
+############################################################################
+
+
+class CommonTree(BaseTree):
+    """@brief A tree node that is wrapper for a Token object.
+
+    After 3.0 release
+    while building tree rewrite stuff, it became clear that computing
+    parent and child index is very difficult and cumbersome.  Better to
+    spend the space in every tree node.  If you don't want these extra
+    fields, it's easy to cut them out in your own BaseTree subclass.
+
+    """
+
+    def __init__(self, payload):
+        BaseTree.__init__(self)
+
+        # What token indexes bracket all tokens associated with this node
+        # and below?
+        self.startIndex = -1
+        self.stopIndex = -1
+
+        # Who is the parent node of this node; if null, implies node is root
+        self.parent = None
+
+        # What index is this node in the child list? Range: 0..n-1
+        self.childIndex = -1
+
+        # A single token is the payload
+        if payload is None:
+            self.token = None
+
+        elif isinstance(payload, CommonTree):
+            self.token = payload.token
+            self.startIndex = payload.startIndex
+            self.stopIndex = payload.stopIndex
+
+        elif payload is None or isinstance(payload, Token):
+            self.token = payload
+
+        else:
+            raise TypeError(type(payload).__name__)
+
+
+
+    def getToken(self):
+        return self.token
+
+
+    def dupNode(self):
+        return CommonTree(self)
+
+
+    def isNil(self):
+        return self.token is None
+
+
+    def getType(self):
+        if self.token is None:
+            return INVALID_TOKEN_TYPE
+
+        return self.token.type
+
+    type = property(getType)
+
+
+    def getText(self):
+        if self.token is None:
+            return None
+
+        return self.token.text
+
+    text = property(getText)
+
+
+    def getLine(self):
+        if self.token is None or self.token.line == 0:
+            if self.getChildCount():
+                return self.getChild(0).getLine()
+            else:
+                return 0
+
+        return self.token.line
+
+    line = property(getLine)
+
+
+    def getCharPositionInLine(self):
+        if self.token is None or self.token.charPositionInLine == -1:
+            if self.getChildCount():
+                return self.getChild(0).getCharPositionInLine()
+            else:
+                return 0
+
+        else:
+            return self.token.charPositionInLine
+
+    charPositionInLine = property(getCharPositionInLine)
+
+
+    def getTokenStartIndex(self):
+        if self.startIndex == -1 and self.token:
+            return self.token.index
+
+        return self.startIndex
+
+    def setTokenStartIndex(self, index):
+        self.startIndex = index
+
+    tokenStartIndex = property(getTokenStartIndex, setTokenStartIndex)
+
+
+    def getTokenStopIndex(self):
+        if self.stopIndex == -1 and self.token:
+            return self.token.index
+
+        return self.stopIndex
+
+    def setTokenStopIndex(self, index):
+        self.stopIndex = index
+
+    tokenStopIndex = property(getTokenStopIndex, setTokenStopIndex)
+
+
+    def setUnknownTokenBoundaries(self):
+        """For every node in this subtree, make sure it's start/stop token's
+        are set.  Walk depth first, visit bottom up.  Only updates nodes
+        with at least one token index < 0.
+        """
+
+        if self.children is None:
+            if self.startIndex < 0 or self.stopIndex < 0:
+                self.startIndex = self.stopIndex = self.token.index
+
+            return
+
+        for child in self.children:
+            child.setUnknownTokenBoundaries()
+
+        if self.startIndex >= 0 and self.stopIndex >= 0:
+            # already set
+            return
+
+        if self.children:
+            firstChild = self.children[0]
+            lastChild = self.children[-1]
+            self.startIndex = firstChild.getTokenStartIndex()
+            self.stopIndex = lastChild.getTokenStopIndex()
+
+
+    def getChildIndex(self):
+        #FIXME: mark as deprecated
+        return self.childIndex
+
+
+    def setChildIndex(self, idx):
+        #FIXME: mark as deprecated
+        self.childIndex = idx
+
+
+    def getParent(self):
+        #FIXME: mark as deprecated
+        return self.parent
+
+
+    def setParent(self, t):
+        #FIXME: mark as deprecated
+        self.parent = t
+
+
+    def toString(self):
+        if self.isNil():
+            return "nil"
+
+        if self.getType() == INVALID_TOKEN_TYPE:
+            return "<errornode>"
+
+        return self.token.text
+
+    __str__ = toString
+
+
+
+    def toStringTree(self):
+        if not self.children:
+            return self.toString()
+
+        ret = ''
+        if not self.isNil():
+            ret += '({!s} '.format(self)
+
+        ret += ' '.join([child.toStringTree() for child in self.children])
+
+        if not self.isNil():
+            ret += ')'
+
+        return ret
+
+
+INVALID_NODE = CommonTree(INVALID_TOKEN)
+
+
+class CommonErrorNode(CommonTree):
+    """A node representing erroneous token range in token stream"""
+
+    def __init__(self, input, start, stop, exc):
+        CommonTree.__init__(self, None)
+
+        if (stop is None or (stop.index < start.index and stop.type != EOF)):
+            # sometimes resync does not consume a token (when LT(1) is
+            # in follow set.  So, stop will be 1 to left to start. adjust.
+            # Also handle case where start is the first token and no token
+            # is consumed during recovery; LT(-1) will return null.
+            stop = start
+
+        self.input = input
+        self.start = start
+        self.stop = stop
+        self.trappedException = exc
+
+
+    def isNil(self):
+        return False
+
+
+    def getType(self):
+        return INVALID_TOKEN_TYPE
+
+
+    def getText(self):
+        if isinstance(self.start, Token):
+            i = self.start.index
+            j = self.stop.index
+            if self.stop.type == EOF:
+                j = self.input.size()
+
+            badText = self.input.toString(i, j)
+
+        elif isinstance(self.start, Tree):
+            badText = self.input.toString(self.start, self.stop)
+
+        else:
+            # people should subclass if they alter the tree type so this
+            # next one is for sure correct.
+            badText = "<unknown>"
+
+        return badText
+
+
+    def toString(self):
+        if isinstance(self.trappedException, MissingTokenException):
+            return ("<missing type: "
+                    + str(self.trappedException.getMissingType())
+                    + ">")
+
+        elif isinstance(self.trappedException, UnwantedTokenException):
+            return ("<extraneous: "
+                    + str(self.trappedException.getUnexpectedToken())
+                    + ", resync=" + self.getText() + ">")
+
+        elif isinstance(self.trappedException, MismatchedTokenException):
+            return ("<mismatched token: "
+                    + str(self.trappedException.token)
+                    + ", resync=" + self.getText() + ">")
+
+        elif isinstance(self.trappedException, NoViableAltException):
+            return ("<unexpected: "
+                    + str(self.trappedException.token)
+                    + ", resync=" + self.getText() + ">")
+
+        return "<error: "+self.getText()+">"
+
+    __str__ = toString
+
+
+class CommonTreeAdaptor(BaseTreeAdaptor):
+    """
+    @brief A TreeAdaptor that works with any Tree implementation.
+
+    It provides
+    really just factory methods; all the work is done by BaseTreeAdaptor.
+    If you would like to have different tokens created than ClassicToken
+    objects, you need to override this and then set the parser tree adaptor to
+    use your subclass.
+
+    To get your parser to build nodes of a different type, override
+    create(Token), errorNode(), and to be safe, YourTreeClass.dupNode().
+    dupNode is called to duplicate nodes during rewrite operations.
+    """
+
+    def dupNode(self, treeNode):
+        """
+        Duplicate a node.  This is part of the factory;
+        override if you want another kind of node to be built.
+
+        I could use reflection to prevent having to override this
+        but reflection is slow.
+        """
+
+        if treeNode is None:
+            return None
+
+        return treeNode.dupNode()
+
+
+    def createWithPayload(self, payload):
+        return CommonTree(payload)
+
+
+    def createToken(self, fromToken=None, tokenType=None, text=None):
+        """
+        Tell me how to create a token for use with imaginary token nodes.
+        For example, there is probably no input symbol associated with imaginary
+        token DECL, but you need to create it as a payload or whatever for
+        the DECL node as in ^(DECL type ID).
+
+        If you care what the token payload objects' type is, you should
+        override this method and any other createToken variant.
+        """
+
+        if fromToken is not None:
+            return CommonToken(oldToken=fromToken)
+
+        return CommonToken(type=tokenType, text=text)
+
+
+    def setTokenBoundaries(self, t, startToken, stopToken):
+        """
+        Track start/stop token for subtree root created for a rule.
+        Only works with Tree nodes.  For rules that match nothing,
+        seems like this will yield start=i and stop=i-1 in a nil node.
+        Might be useful info so I'll not force to be i..i.
+        """
+
+        if t is None:
+            return
+
+        start = 0
+        stop = 0
+
+        if startToken is not None:
+            start = startToken.index
+
+        if stopToken is not None:
+            stop = stopToken.index
+
+        t.setTokenStartIndex(start)
+        t.setTokenStopIndex(stop)
+
+
+    def getTokenStartIndex(self, t):
+        if t is None:
+            return -1
+        return t.getTokenStartIndex()
+
+
+    def getTokenStopIndex(self, t):
+        if t is None:
+            return -1
+        return t.getTokenStopIndex()
+
+
+    def getText(self, t):
+        if t is None:
+            return None
+        return t.text
+
+
+    def getType(self, t):
+        if t is None:
+            return INVALID_TOKEN_TYPE
+
+        return t.type
+
+
+    def getToken(self, t):
+        """
+        What is the Token associated with this node?  If
+        you are not using CommonTree, then you must
+        override this in your own adaptor.
+        """
+
+        if isinstance(t, CommonTree):
+            return t.getToken()
+
+        return None # no idea what to do
+
+
+    def getChild(self, t, i):
+        if t is None:
+            return None
+        return t.getChild(i)
+
+
+    def getChildCount(self, t):
+        if t is None:
+            return 0
+        return t.getChildCount()
+
+
+    def getParent(self, t):
+        return t.getParent()
+
+
+    def setParent(self, t, parent):
+        t.setParent(parent)
+
+
+    def getChildIndex(self, t):
+        if t is None:
+            return 0
+        return t.getChildIndex()
+
+
+    def setChildIndex(self, t, index):
+        t.setChildIndex(index)
+
+
+    def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
+        if parent is not None:
+            parent.replaceChildren(startChildIndex, stopChildIndex, t)
+
+
+############################################################################
+#
+# streams
+#
+# TreeNodeStream
+# \- BaseTree
+#    \- CommonTree
+#
+# TreeAdaptor
+# \- BaseTreeAdaptor
+#    \- CommonTreeAdaptor
+#
+############################################################################
+
+
+
+class TreeNodeStream(IntStream):
+    """@brief A stream of tree nodes
+
+    It accessing nodes from a tree of some kind.
+    """
+
+    # TreeNodeStream is abstract, no need to complain about not implemented
+    # abstract methods
+    # pylint: disable-msg=W0223
+
+    def get(self, i):
+        """Get a tree node at an absolute index i; 0..n-1.
+        If you don't want to buffer up nodes, then this method makes no
+        sense for you.
+        """
+
+        raise NotImplementedError
+
+
+    def LT(self, k):
+        """
+        Get tree node at current input pointer + i ahead where i=1 is next node.
+        i<0 indicates nodes in the past.  So LT(-1) is previous node, but
+        implementations are not required to provide results for k < -1.
+        LT(0) is undefined.  For i>=n, return null.
+        Return null for LT(0) and any index that results in an absolute address
+        that is negative.
+
+        This is analogus to the LT() method of the TokenStream, but this
+        returns a tree node instead of a token.  Makes code gen identical
+        for both parser and tree grammars. :)
+        """
+
+        raise NotImplementedError
+
+
+    def getTreeSource(self):
+        """
+        Where is this stream pulling nodes from?  This is not the name, but
+        the object that provides node objects.
+        """
+
+        raise NotImplementedError
+
+
+    def getTokenStream(self):
+        """
+        If the tree associated with this stream was created from a TokenStream,
+        you can specify it here.  Used to do rule $text attribute in tree
+        parser.  Optional unless you use tree parser rule text attribute
+        or output=template and rewrite=true options.
+        """
+
+        raise NotImplementedError
+
+
+    def getTreeAdaptor(self):
+        """
+        What adaptor can tell me how to interpret/navigate nodes and
+        trees.  E.g., get text of a node.
+        """
+
+        raise NotImplementedError
+
+
+    def setUniqueNavigationNodes(self, uniqueNavigationNodes):
+        """
+        As we flatten the tree, we use UP, DOWN nodes to represent
+        the tree structure.  When debugging we need unique nodes
+        so we have to instantiate new ones.  When doing normal tree
+        parsing, it's slow and a waste of memory to create unique
+        navigation nodes.  Default should be false;
+        """
+
+        raise NotImplementedError
+
+
+    def reset(self):
+        """
+        Reset the tree node stream in such a way that it acts like
+        a freshly constructed stream.
+        """
+
+        raise NotImplementedError
+
+
+    def toString(self, start, stop):
+        """
+        Return the text of all nodes from start to stop, inclusive.
+        If the stream does not buffer all the nodes then it can still
+        walk recursively from start until stop.  You can always return
+        null or "" too, but users should not access $ruleLabel.text in
+        an action of course in that case.
+        """
+
+        raise NotImplementedError
+
+
+    # REWRITING TREES (used by tree parser)
+    def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
+        """
+ 	Replace from start to stop child index of parent with t, which might
+        be a list.  Number of children may be different
+        after this call.  The stream is notified because it is walking the
+        tree and might need to know you are monkeying with the underlying
+        tree.  Also, it might be able to modify the node stream to avoid
+        restreaming for future phases.
+
+        If parent is null, don't do anything; must be at root of overall tree.
+        Can't replace whatever points to the parent externally.  Do nothing.
+        """
+
+        raise NotImplementedError
+
+
+class CommonTreeNodeStream(TreeNodeStream):
+    """@brief A buffered stream of tree nodes.
+
+    Nodes can be from a tree of ANY kind.
+
+    This node stream sucks all nodes out of the tree specified in
+    the constructor during construction and makes pointers into
+    the tree using an array of Object pointers. The stream necessarily
+    includes pointers to DOWN and UP and EOF nodes.
+
+    This stream knows how to mark/release for backtracking.
+
+    This stream is most suitable for tree interpreters that need to
+    jump around a lot or for tree parsers requiring speed (at cost of memory).
+    There is some duplicated functionality here with UnBufferedTreeNodeStream
+    but just in bookkeeping, not tree walking etc...
+
+    @see UnBufferedTreeNodeStream
+    """
+
+    def __init__(self, *args):
+        TreeNodeStream.__init__(self)
+
+        if len(args) == 1:
+            adaptor = CommonTreeAdaptor()
+            tree = args[0]
+
+            nodes = None
+            down = None
+            up = None
+            eof = None
+
+        elif len(args) == 2:
+            adaptor = args[0]
+            tree = args[1]
+
+            nodes = None
+            down = None
+            up = None
+            eof = None
+
+        elif len(args) == 3:
+            parent = args[0]
+            start = args[1]
+            stop = args[2]
+
+            adaptor = parent.adaptor
+            tree = parent.root
+
+            nodes = parent.nodes[start:stop]
+            down = parent.down
+            up = parent.up
+            eof = parent.eof
+
+        else:
+            raise TypeError("Invalid arguments")
+
+        # all these navigation nodes are shared and hence they
+        # cannot contain any line/column info
+        if down is not None:
+            self.down = down
+        else:
+            self.down = adaptor.createFromType(DOWN, "DOWN")
+
+        if up is not None:
+            self.up = up
+        else:
+            self.up = adaptor.createFromType(UP, "UP")
+
+        if eof is not None:
+            self.eof = eof
+        else:
+            self.eof = adaptor.createFromType(EOF, "EOF")
+
+        # The complete mapping from stream index to tree node.
+        # This buffer includes pointers to DOWN, UP, and EOF nodes.
+        # It is built upon ctor invocation.  The elements are type
+        #  Object as we don't what the trees look like.
+
+        # Load upon first need of the buffer so we can set token types
+        # of interest for reverseIndexing.  Slows us down a wee bit to
+        # do all of the if p==-1 testing everywhere though.
+        if nodes is not None:
+            self.nodes = nodes
+        else:
+            self.nodes = []
+
+        # Pull nodes from which tree?
+        self.root = tree
+
+        # IF this tree (root) was created from a token stream, track it.
+        self.tokens = None
+
+        # What tree adaptor was used to build these trees
+        self.adaptor = adaptor
+
+        # Reuse same DOWN, UP navigation nodes unless this is true
+        self.uniqueNavigationNodes = False
+
+        # The index into the nodes list of the current node (next node
+        # to consume).  If -1, nodes array not filled yet.
+        self.p = -1
+
+        # Track the last mark() call result value for use in rewind().
+        self.lastMarker = None
+
+        # Stack of indexes used for push/pop calls
+        self.calls = []
+
+
+    def fillBuffer(self):
+        """Walk tree with depth-first-search and fill nodes buffer.
+        Don't do DOWN, UP nodes if its a list (t is isNil).
+        """
+
+        self._fillBuffer(self.root)
+        self.p = 0 # buffer of nodes intialized now
+
+
+    def _fillBuffer(self, t):
+        nil = self.adaptor.isNil(t)
+
+        if not nil:
+            self.nodes.append(t) # add this node
+
+        # add DOWN node if t has children
+        n = self.adaptor.getChildCount(t)
+        if not nil and n > 0:
+            self.addNavigationNode(DOWN)
+
+        # and now add all its children
+        for c in range(n):
+            self._fillBuffer(self.adaptor.getChild(t, c))
+
+        # add UP node if t has children
+        if not nil and n > 0:
+            self.addNavigationNode(UP)
+
+
+    def getNodeIndex(self, node):
+        """What is the stream index for node? 0..n-1
+        Return -1 if node not found.
+        """
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        for i, t in enumerate(self.nodes):
+            if t == node:
+                return i
+
+        return -1
+
+
+    def addNavigationNode(self, ttype):
+        """
+        As we flatten the tree, we use UP, DOWN nodes to represent
+        the tree structure.  When debugging we need unique nodes
+        so instantiate new ones when uniqueNavigationNodes is true.
+        """
+
+        navNode = None
+
+        if ttype == DOWN:
+            if self.hasUniqueNavigationNodes():
+                navNode = self.adaptor.createFromType(DOWN, "DOWN")
+
+            else:
+                navNode = self.down
+
+        else:
+            if self.hasUniqueNavigationNodes():
+                navNode = self.adaptor.createFromType(UP, "UP")
+
+            else:
+                navNode = self.up
+
+        self.nodes.append(navNode)
+
+
+    def get(self, i):
+        if self.p == -1:
+            self.fillBuffer()
+
+        return self.nodes[i]
+
+
+    def LT(self, k):
+        if self.p == -1:
+            self.fillBuffer()
+
+        if k == 0:
+            return None
+
+        if k < 0:
+            return self.LB(-k)
+
+        if self.p + k - 1 >= len(self.nodes):
+            return self.eof
+
+        return self.nodes[self.p + k - 1]
+
+
+    def getCurrentSymbol(self):
+        return self.LT(1)
+
+
+    def LB(self, k):
+        """Look backwards k nodes"""
+
+        if k == 0:
+            return None
+
+        if self.p - k < 0:
+            return None
+
+        return self.nodes[self.p - k]
+
+
+    def isEOF(self, obj):
+        return self.adaptor.getType(obj) == EOF
+
+
+    def getTreeSource(self):
+        return self.root
+
+
+    def getSourceName(self):
+        return self.getTokenStream().getSourceName()
+
+
+    def getTokenStream(self):
+        return self.tokens
+
+
+    def setTokenStream(self, tokens):
+        self.tokens = tokens
+
+
+    def getTreeAdaptor(self):
+        return self.adaptor
+
+
+    def hasUniqueNavigationNodes(self):
+        return self.uniqueNavigationNodes
+
+
+    def setUniqueNavigationNodes(self, uniqueNavigationNodes):
+        self.uniqueNavigationNodes = uniqueNavigationNodes
+
+
+    def consume(self):
+        if self.p == -1:
+            self.fillBuffer()
+
+        self.p += 1
+
+
+    def LA(self, i):
+        return self.adaptor.getType(self.LT(i))
+
+
+    def mark(self):
+        if self.p == -1:
+            self.fillBuffer()
+
+
+        self.lastMarker = self.index()
+        return self.lastMarker
+
+
+    def release(self, marker=None):
+        # no resources to release
+
+        pass
+
+
+    def index(self):
+        return self.p
+
+
+    def rewind(self, marker=None):
+        if marker is None:
+            marker = self.lastMarker
+
+        self.seek(marker)
+
+
+    def seek(self, index):
+        if self.p == -1:
+            self.fillBuffer()
+
+        self.p = index
+
+
+    def push(self, index):
+        """
+        Make stream jump to a new location, saving old location.
+        Switch back with pop().
+        """
+
+        self.calls.append(self.p) # save current index
+        self.seek(index)
+
+
+    def pop(self):
+        """
+        Seek back to previous index saved during last push() call.
+        Return top of stack (return index).
+        """
+
+        ret = self.calls.pop(-1)
+        self.seek(ret)
+        return ret
+
+
+    def reset(self):
+        self.p = 0
+        self.lastMarker = 0
+        self.calls = []
+
+
+    def size(self):
+        if self.p == -1:
+            self.fillBuffer()
+
+        return len(self.nodes)
+
+
+    # TREE REWRITE INTERFACE
+
+    def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
+        if parent is not None:
+            self.adaptor.replaceChildren(
+                parent, startChildIndex, stopChildIndex, t
+                )
+
+
+    def __str__(self):
+        """Used for testing, just return the token type stream"""
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        return ' '.join([str(self.adaptor.getType(node))
+                         for node in self.nodes
+                         ])
+
+
+    def toString(self, start, stop):
+        if start is None or stop is None:
+            return None
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        #System.out.println("stop: "+stop);
+        #if ( start instanceof CommonTree )
+        #    System.out.print("toString: "+((CommonTree)start).getToken()+", ");
+        #else
+        #    System.out.println(start);
+        #if ( stop instanceof CommonTree )
+        #    System.out.println(((CommonTree)stop).getToken());
+        #else
+        #    System.out.println(stop);
+
+        # if we have the token stream, use that to dump text in order
+        if self.tokens is not None:
+            beginTokenIndex = self.adaptor.getTokenStartIndex(start)
+            endTokenIndex = self.adaptor.getTokenStopIndex(stop)
+
+            # if it's a tree, use start/stop index from start node
+            # else use token range from start/stop nodes
+            if self.adaptor.getType(stop) == UP:
+                endTokenIndex = self.adaptor.getTokenStopIndex(start)
+
+            elif self.adaptor.getType(stop) == EOF:
+                endTokenIndex = self.size() -2 # don't use EOF
+
+            return self.tokens.toString(beginTokenIndex, endTokenIndex)
+
+        # walk nodes looking for start
+        i, t = 0, None
+        for i, t in enumerate(self.nodes):
+            if t == start:
+                break
+
+        # now walk until we see stop, filling string buffer with text
+        buf = []
+        t = self.nodes[i]
+        while t != stop:
+            text = self.adaptor.getText(t)
+            if text is None:
+                text = " " + self.adaptor.getType(t)
+
+            buf.append(text)
+            i += 1
+            t = self.nodes[i]
+
+        # include stop node too
+        text = self.adaptor.getText(stop)
+        if text is None:
+            text = " " +self.adaptor.getType(stop)
+
+        buf.append(text)
+
+        return ''.join(buf)
+
+
+    ## iterator interface
+    def __iter__(self):
+        if self.p == -1:
+            self.fillBuffer()
+
+        for node in self.nodes:
+            yield node
+
+
+#############################################################################
+#
+# tree parser
+#
+#############################################################################
+
+class TreeParser(BaseRecognizer):
+    """@brief Baseclass for generated tree parsers.
+
+    A parser for a stream of tree nodes.  "tree grammars" result in a subclass
+    of this.  All the error reporting and recovery is shared with Parser via
+    the BaseRecognizer superclass.
+    """
+
+    def __init__(self, input, state=None):
+        BaseRecognizer.__init__(self, state)
+
+        self.input = None
+        self.setTreeNodeStream(input)
+
+
+    def reset(self):
+        BaseRecognizer.reset(self) # reset all recognizer state variables
+        if self.input is not None:
+            self.input.seek(0) # rewind the input
+
+
+    def setTreeNodeStream(self, input):
+        """Set the input stream"""
+
+        self.input = input
+
+
+    def getTreeNodeStream(self):
+        return self.input
+
+
+    def getSourceName(self):
+        return self.input.getSourceName()
+
+
+    def getCurrentInputSymbol(self, input):
+        return input.LT(1)
+
+
+    def getMissingSymbol(self, input, e, expectedTokenType, follow):
+        tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">"
+        adaptor = input.adaptor
+        return adaptor.createToken(
+            CommonToken(type=expectedTokenType, text=tokenText))
+
+
+    # precompiled regex used by inContext
+    dotdot = ".*[^.]\\.\\.[^.].*"
+    doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*"
+    dotdotPattern = re.compile(dotdot)
+    doubleEtcPattern = re.compile(doubleEtc)
+
+    def inContext(self, context, adaptor=None, tokenName=None, t=None):
+        """Check if current node in input has a context.
+
+        Context means sequence of nodes towards root of tree.  For example,
+        you might say context is "MULT" which means my parent must be MULT.
+        "CLASS VARDEF" says current node must be child of a VARDEF and whose
+        parent is a CLASS node.  You can use "..." to mean zero-or-more nodes.
+        "METHOD ... VARDEF" means my parent is VARDEF and somewhere above
+        that is a METHOD node.  The first node in the context is not
+        necessarily the root.  The context matcher stops matching and returns
+        true when it runs out of context.  There is no way to force the first
+        node to be the root.
+        """
+
+        return self._inContext(
+            self.input.getTreeAdaptor(), self.tokenNames,
+            self.input.LT(1), context)
+
+    @classmethod
+    def _inContext(cls, adaptor, tokenNames, t, context):
+        """The worker for inContext.
+
+        It's static and full of parameters for testing purposes.
+        """
+
+        if cls.dotdotPattern.match(context):
+            # don't allow "..", must be "..."
+            raise ValueError("invalid syntax: ..")
+
+        if cls.doubleEtcPattern.match(context):
+            # don't allow double "..."
+            raise ValueError("invalid syntax: ... ...")
+
+        # ensure spaces around ...
+        context = context.replace("...", " ... ")
+        context = context.strip()
+        nodes = context.split()
+
+        ni = len(nodes) - 1
+        t = adaptor.getParent(t)
+        while ni >= 0 and t is not None:
+            if nodes[ni] == "...":
+                # walk upwards until we see nodes[ni-1] then continue walking
+                if ni == 0:
+                    # ... at start is no-op
+                    return True
+                goal = nodes[ni-1]
+                ancestor = cls._getAncestor(adaptor, tokenNames, t, goal)
+                if ancestor is None:
+                    return False
+                t = ancestor
+                ni -= 1
+
+            name = tokenNames[adaptor.getType(t)]
+            if name != nodes[ni]:
+                return False
+
+            # advance to parent and to previous element in context node list
+            ni -= 1
+            t = adaptor.getParent(t)
+
+        # at root but more nodes to match
+        if t is None and ni >= 0:
+            return False
+
+        return True
+
+    @staticmethod
+    def _getAncestor(adaptor, tokenNames, t, goal):
+        """Helper for static inContext."""
+        while t is not None:
+            name = tokenNames[adaptor.getType(t)]
+            if name == goal:
+                return t
+            t = adaptor.getParent(t)
+
+        return None
+
+
+    def matchAny(self):
+        """
+        Match '.' in tree parser has special meaning.  Skip node or
+        entire tree if node has children.  If children, scan until
+        corresponding UP node.
+        """
+
+        self._state.errorRecovery = False
+
+        look = self.input.LT(1)
+        if self.input.getTreeAdaptor().getChildCount(look) == 0:
+            self.input.consume() # not subtree, consume 1 node and return
+            return
+
+        # current node is a subtree, skip to corresponding UP.
+        # must count nesting level to get right UP
+        level = 0
+        tokenType = self.input.getTreeAdaptor().getType(look)
+        while tokenType != EOF and not (tokenType == UP and level==0):
+            self.input.consume()
+            look = self.input.LT(1)
+            tokenType = self.input.getTreeAdaptor().getType(look)
+            if tokenType == DOWN:
+                level += 1
+
+            elif tokenType == UP:
+                level -= 1
+
+        self.input.consume() # consume UP
+
+
+    def mismatch(self, input, ttype, follow):
+        """
+        We have DOWN/UP nodes in the stream that have no line info; override.
+        plus we want to alter the exception type. Don't try to recover
+        from tree parser errors inline...
+        """
+
+        raise MismatchedTreeNodeException(ttype, input)
+
+
+    def getErrorHeader(self, e):
+        """
+        Prefix error message with the grammar name because message is
+        always intended for the programmer because the parser built
+        the input tree not the user.
+        """
+
+        return (self.getGrammarFileName() +
+                ": node from {}line {}:{}".format(
+                    "after " if e.approximateLineInfo else '',
+                    e.line,
+                    e.charPositionInLine))
+
+    def getErrorMessage(self, e):
+        """
+        Tree parsers parse nodes they usually have a token object as
+        payload. Set the exception token and do the default behavior.
+        """
+
+        if isinstance(self, TreeParser):
+            adaptor = e.input.getTreeAdaptor()
+            e.token = adaptor.getToken(e.node)
+            if e.token is not None: # could be an UP/DOWN node
+                e.token = CommonToken(
+                    type=adaptor.getType(e.node),
+                    text=adaptor.getText(e.node)
+                    )
+
+        return BaseRecognizer.getErrorMessage(self, e)
+
+
+    def traceIn(self, ruleName, ruleIndex):
+        BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1))
+
+
+    def traceOut(self, ruleName, ruleIndex):
+        BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1))
+
+
+#############################################################################
+#
+# tree visitor
+#
+#############################################################################
+
+class TreeVisitor(object):
+    """Do a depth first walk of a tree, applying pre() and post() actions
+    we go.
+    """
+
+    def __init__(self, adaptor=None):
+        if adaptor is not None:
+            self.adaptor = adaptor
+        else:
+            self.adaptor = CommonTreeAdaptor()
+
+    def visit(self, t, pre_action=None, post_action=None):
+        """Visit every node in tree t and trigger an action for each node
+        before/after having visited all of its children.  Bottom up walk.
+        Execute both actions even if t has no children.  Ignore return
+        results from transforming children since they will have altered
+        the child list of this node (their parent).  Return result of
+        applying post action to this node.
+
+        The Python version differs from the Java version by taking two
+        callables 'pre_action' and 'post_action' instead of a class instance
+        that wraps those methods. Those callables must accept a TreeNode as
+        their single argument and return the (potentially transformed or
+        replaced) TreeNode.
+        """
+
+        isNil = self.adaptor.isNil(t)
+        if pre_action is not None and not isNil:
+            # if rewritten, walk children of new t
+            t = pre_action(t)
+
+        idx = 0
+        while idx < self.adaptor.getChildCount(t):
+            child = self.adaptor.getChild(t, idx)
+            self.visit(child, pre_action, post_action)
+            idx += 1
+
+        if post_action is not None and not isNil:
+            t = post_action(t)
+
+        return t
+
+#############################################################################
+#
+# tree iterator
+#
+#############################################################################
+
+class TreeIterator(object):
+    """
+    Return a node stream from a doubly-linked tree whose nodes
+    know what child index they are.
+
+    Emit navigation nodes (DOWN, UP, and EOF) to let show tree structure.
+    """
+
+    def __init__(self, tree, adaptor=None):
+        if adaptor is None:
+            adaptor = CommonTreeAdaptor()
+
+        self.root = tree
+        self.adaptor = adaptor
+
+        self.first_time = True
+        self.tree = tree
+
+        # If we emit UP/DOWN nodes, we need to spit out multiple nodes per
+        # next() call.
+        self.nodes = []
+
+        # navigation nodes to return during walk and at end
+        self.down = adaptor.createFromType(DOWN, "DOWN")
+        self.up = adaptor.createFromType(UP, "UP")
+        self.eof = adaptor.createFromType(EOF, "EOF")
+
+
+    def reset(self):
+        self.first_time = True
+        self.tree = self.root
+        self.nodes = []
+
+
+    def __iter__(self):
+        return self
+
+
+    def has_next(self):
+        if self.first_time:
+            return self.root is not None
+
+        if len(self.nodes) > 0:
+            return True
+
+        if self.tree is None:
+            return False
+
+        if self.adaptor.getChildCount(self.tree) > 0:
+            return True
+
+        # back at root?
+        return self.adaptor.getParent(self.tree) is not None
+
+
+    def __next__(self):
+        if not self.has_next():
+            raise StopIteration
+
+        if self.first_time:
+            # initial condition
+            self.first_time = False
+            if self.adaptor.getChildCount(self.tree) == 0:
+                # single node tree (special)
+                self.nodes.append(self.eof)
+                return self.tree
+
+            return self.tree
+
+        # if any queued up, use those first
+        if len(self.nodes) > 0:
+            return self.nodes.pop(0)
+
+        # no nodes left?
+        if self.tree is None:
+            return self.eof
+
+        # next node will be child 0 if any children
+        if self.adaptor.getChildCount(self.tree) > 0:
+            self.tree = self.adaptor.getChild(self.tree, 0)
+            # real node is next after DOWN
+            self.nodes.append(self.tree)
+            return self.down
+
+        # if no children, look for next sibling of tree or ancestor
+        parent = self.adaptor.getParent(self.tree)
+        # while we're out of siblings, keep popping back up towards root
+        while (parent is not None
+               and self.adaptor.getChildIndex(self.tree)+1 >= self.adaptor.getChildCount(parent)):
+            # we're moving back up
+            self.nodes.append(self.up)
+            self.tree = parent
+            parent = self.adaptor.getParent(self.tree)
+
+        # no nodes left?
+        if parent is None:
+            self.tree = None # back at root? nothing left then
+            self.nodes.append(self.eof) # add to queue, might have UP nodes in there
+            return self.nodes.pop(0)
+
+        # must have found a node with an unvisited sibling
+        # move to it and return it
+        nextSiblingIndex = self.adaptor.getChildIndex(self.tree) + 1
+        self.tree = self.adaptor.getChild(parent, nextSiblingIndex)
+        self.nodes.append(self.tree) # add to queue, might have UP nodes in there
+        return self.nodes.pop(0)
+
+
+
+#############################################################################
+#
+# streams for rule rewriting
+#
+#############################################################################
+
+class RewriteRuleElementStream(object):
+    """@brief Internal helper class.
+
+    A generic list of elements tracked in an alternative to be used in
+    a -> rewrite rule.  We need to subclass to fill in the next() method,
+    which returns either an AST node wrapped around a token payload or
+    an existing subtree.
+
+    Once you start next()ing, do not try to add more elements.  It will
+    break the cursor tracking I believe.
+
+    @see org.antlr.runtime.tree.RewriteRuleSubtreeStream
+    @see org.antlr.runtime.tree.RewriteRuleTokenStream
+
+    TODO: add mechanism to detect/puke on modification after reading from
+    stream
+    """
+
+    def __init__(self, adaptor, elementDescription, elements=None):
+        # Cursor 0..n-1.  If singleElement!=null, cursor is 0 until you next(),
+        # which bumps it to 1 meaning no more elements.
+        self.cursor = 0
+
+        # Track single elements w/o creating a list.  Upon 2nd add, alloc list
+        self.singleElement = None
+
+        # The list of tokens or subtrees we are tracking
+        self.elements = None
+
+        # Once a node / subtree has been used in a stream, it must be dup'd
+        # from then on.  Streams are reset after subrules so that the streams
+        # can be reused in future subrules.  So, reset must set a dirty bit.
+        # If dirty, then next() always returns a dup.
+        self.dirty = False
+
+        # The element or stream description; usually has name of the token or
+        # rule reference that this list tracks.  Can include rulename too, but
+        # the exception would track that info.
+        self.elementDescription = elementDescription
+
+        self.adaptor = adaptor
+
+        if isinstance(elements, (list, tuple)):
+            # Create a stream, but feed off an existing list
+            self.singleElement = None
+            self.elements = elements
+
+        else:
+            # Create a stream with one element
+            self.add(elements)
+
+
+    def reset(self):
+        """
+        Reset the condition of this stream so that it appears we have
+        not consumed any of its elements.  Elements themselves are untouched.
+        Once we reset the stream, any future use will need duplicates.  Set
+        the dirty bit.
+        """
+
+        self.cursor = 0
+        self.dirty = True
+
+
+    def add(self, el):
+        if el is None:
+            return
+
+        if self.elements is not None: # if in list, just add
+            self.elements.append(el)
+            return
+
+        if self.singleElement is None: # no elements yet, track w/o list
+            self.singleElement = el
+            return
+
+        # adding 2nd element, move to list
+        self.elements = []
+        self.elements.append(self.singleElement)
+        self.singleElement = None
+        self.elements.append(el)
+
+
+    def nextTree(self):
+        """
+        Return the next element in the stream.  If out of elements, throw
+        an exception unless size()==1.  If size is 1, then return elements[0].
+
+        Return a duplicate node/subtree if stream is out of elements and
+        size==1. If we've already used the element, dup (dirty bit set).
+        """
+
+        if (self.dirty
+            or (self.cursor >= len(self) and len(self) == 1)
+            ):
+            # if out of elements and size is 1, dup
+            el = self._next()
+            return self.dup(el)
+
+        # test size above then fetch
+        el = self._next()
+        return el
+
+
+    def _next(self):
+        """
+        do the work of getting the next element, making sure that it's
+        a tree node or subtree.  Deal with the optimization of single-
+        element list versus list of size > 1.  Throw an exception
+        if the stream is empty or we're out of elements and size>1.
+        protected so you can override in a subclass if necessary.
+        """
+
+        if len(self) == 0:
+            raise RewriteEmptyStreamException(self.elementDescription)
+
+        if self.cursor >= len(self): # out of elements?
+            if len(self) == 1: # if size is 1, it's ok; return and we'll dup
+                return self.toTree(self.singleElement)
+
+            # out of elements and size was not 1, so we can't dup
+            raise RewriteCardinalityException(self.elementDescription)
+
+        # we have elements
+        if self.singleElement is not None:
+            self.cursor += 1 # move cursor even for single element list
+            return self.toTree(self.singleElement)
+
+        # must have more than one in list, pull from elements
+        o = self.toTree(self.elements[self.cursor])
+        self.cursor += 1
+        return o
+
+
+    def dup(self, el):
+        """
+        When constructing trees, sometimes we need to dup a token or AST
+        subtree.  Dup'ing a token means just creating another AST node
+        around it.  For trees, you must call the adaptor.dupTree() unless
+        the element is for a tree root; then it must be a node dup.
+        """
+
+        raise NotImplementedError
+
+
+    def toTree(self, el):
+        """
+        Ensure stream emits trees; tokens must be converted to AST nodes.
+        AST nodes can be passed through unmolested.
+        """
+
+        return el
+
+
+    def hasNext(self):
+        return ( (self.singleElement is not None and self.cursor < 1)
+                 or (self.elements is not None
+                     and self.cursor < len(self.elements)
+                     )
+                 )
+
+
+    def size(self):
+        if self.singleElement is not None:
+            return 1
+
+        if self.elements is not None:
+            return len(self.elements)
+
+        return 0
+
+    __len__ = size
+
+
+    def getDescription(self):
+        """Deprecated. Directly access elementDescription attribute"""
+
+        return self.elementDescription
+
+
+class RewriteRuleTokenStream(RewriteRuleElementStream):
+    """@brief Internal helper class."""
+
+    def toTree(self, el):
+        # Don't convert to a tree unless they explicitly call nextTree.
+        # This way we can do hetero tree nodes in rewrite.
+        return el
+
+
+    def nextNode(self):
+        t = self._next()
+        return self.adaptor.createWithPayload(t)
+
+
+    def nextToken(self):
+        return self._next()
+
+
+    def dup(self, el):
+        raise TypeError("dup can't be called for a token stream.")
+
+
+class RewriteRuleSubtreeStream(RewriteRuleElementStream):
+    """@brief Internal helper class."""
+
+    def nextNode(self):
+        """
+        Treat next element as a single node even if it's a subtree.
+        This is used instead of next() when the result has to be a
+        tree root node.  Also prevents us from duplicating recently-added
+        children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
+        must dup the type node, but ID has been added.
+
+        Referencing a rule result twice is ok; dup entire tree as
+        we can't be adding trees as root; e.g., expr expr.
+
+        Hideous code duplication here with super.next().  Can't think of
+        a proper way to refactor.  This needs to always call dup node
+        and super.next() doesn't know which to call: dup node or dup tree.
+        """
+
+        if (self.dirty
+            or (self.cursor >= len(self) and len(self) == 1)
+            ):
+            # if out of elements and size is 1, dup (at most a single node
+            # since this is for making root nodes).
+            el = self._next()
+            return self.adaptor.dupNode(el)
+
+        # test size above then fetch
+        el = self._next()
+        while self.adaptor.isNil(el) and self.adaptor.getChildCount(el) == 1:
+            el = self.adaptor.getChild(el, 0)
+
+        # dup just the root (want node here)
+        return self.adaptor.dupNode(el)
+
+
+    def dup(self, el):
+        return self.adaptor.dupTree(el)
+
+
+
+class RewriteRuleNodeStream(RewriteRuleElementStream):
+    """
+    Queues up nodes matched on left side of -> in a tree parser. This is
+    the analog of RewriteRuleTokenStream for normal parsers.
+    """
+
+    def nextNode(self):
+        return self._next()
+
+
+    def toTree(self, el):
+        return self.adaptor.dupNode(el)
+
+
+    def dup(self, el):
+        # we dup every node, so don't have to worry about calling dup; short-
+        #circuited next() so it doesn't call.
+        raise TypeError("dup can't be called for a node stream.")
+
+
+class TreeRuleReturnScope(RuleReturnScope):
+    """
+    This is identical to the ParserRuleReturnScope except that
+    the start property is a tree nodes not Token object
+    when you are parsing trees.  To be generic the tree node types
+    have to be Object.
+    """
+
+    def __init__(self):
+        super().__init__()
+        self.start = None
+        self.tree = None
+
+
+    def getStart(self):
+        return self.start
+
+
+    def getTree(self):
+        return self.tree
diff --git a/runtime/Python3/antlr3/treewizard.py b/runtime/Python3/antlr3/treewizard.py
new file mode 100644
index 0000000..0fefe0f
--- /dev/null
+++ b/runtime/Python3/antlr3/treewizard.py
@@ -0,0 +1,619 @@
+""" @package antlr3.tree
+@brief ANTLR3 runtime package, treewizard module
+
+A utility module to create ASTs at runtime.
+See <http://www.antlr.org/wiki/display/~admin/2007/07/02/Exploring+Concept+of+TreeWizard> for an overview. Note that the API of the Python implementation is slightly different.
+
+"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2012 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+from .constants import INVALID_TOKEN_TYPE
+from .tokens import CommonToken
+from .tree import CommonTree, CommonTreeAdaptor
+
+
+def computeTokenTypes(tokenNames):
+    """
+    Compute a dict that is an inverted index of
+    tokenNames (which maps int token types to names).
+    """
+
+    if tokenNames:
+        return dict((name, type) for type, name in enumerate(tokenNames))
+
+    return {}
+
+
+## token types for pattern parser
+EOF = -1
+BEGIN = 1
+END = 2
+ID = 3
+ARG = 4
+PERCENT = 5
+COLON = 6
+DOT = 7
+
+class TreePatternLexer(object):
+    def __init__(self, pattern):
+        ## The tree pattern to lex like "(A B C)"
+        self.pattern = pattern
+
+	## Index into input string
+        self.p = -1
+
+	## Current char
+        self.c = None
+
+	## How long is the pattern in char?
+        self.n = len(pattern)
+
+	## Set when token type is ID or ARG
+        self.sval = None
+
+        self.error = False
+
+        self.consume()
+
+
+    __idStartChar = frozenset(
+        'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
+        )
+    __idChar = __idStartChar | frozenset('0123456789')
+
+    def nextToken(self):
+        self.sval = ""
+        while self.c != EOF:
+            if self.c in (' ', '\n', '\r', '\t'):
+                self.consume()
+                continue
+
+            if self.c in self.__idStartChar:
+                self.sval += self.c
+                self.consume()
+                while self.c in self.__idChar:
+                    self.sval += self.c
+                    self.consume()
+
+                return ID
+
+            if self.c == '(':
+                self.consume()
+                return BEGIN
+
+            if self.c == ')':
+                self.consume()
+                return END
+
+            if self.c == '%':
+                self.consume()
+                return PERCENT
+
+            if self.c == ':':
+                self.consume()
+                return COLON
+
+            if self.c == '.':
+                self.consume()
+                return DOT
+
+            if self.c == '[': # grab [x] as a string, returning x
+                self.consume()
+                while self.c != ']':
+                    if self.c == '\\':
+                        self.consume()
+                        if self.c != ']':
+                            self.sval += '\\'
+
+                        self.sval += self.c
+
+                    else:
+                        self.sval += self.c
+
+                    self.consume()
+
+                self.consume()
+                return ARG
+
+            self.consume()
+            self.error = True
+            return EOF
+
+        return EOF
+
+
+    def consume(self):
+        self.p += 1
+        if self.p >= self.n:
+            self.c = EOF
+
+        else:
+            self.c = self.pattern[self.p]
+
+
+class TreePatternParser(object):
+    def __init__(self, tokenizer, wizard, adaptor):
+        self.tokenizer = tokenizer
+        self.wizard = wizard
+        self.adaptor = adaptor
+        self.ttype = tokenizer.nextToken() # kickstart
+
+
+    def pattern(self):
+        if self.ttype == BEGIN:
+            return self.parseTree()
+
+        elif self.ttype == ID:
+            node = self.parseNode()
+            if self.ttype == EOF:
+                return node
+
+            return None # extra junk on end
+
+        return None
+
+
+    def parseTree(self):
+        if self.ttype != BEGIN:
+            return None
+
+        self.ttype = self.tokenizer.nextToken()
+        root = self.parseNode()
+        if root is None:
+            return None
+
+        while self.ttype in (BEGIN, ID, PERCENT, DOT):
+            if self.ttype == BEGIN:
+                subtree = self.parseTree()
+                self.adaptor.addChild(root, subtree)
+
+            else:
+                child = self.parseNode()
+                if child is None:
+                    return None
+
+                self.adaptor.addChild(root, child)
+
+        if self.ttype != END:
+            return None
+
+        self.ttype = self.tokenizer.nextToken()
+        return root
+
+
+    def parseNode(self):
+        # "%label:" prefix
+        label = None
+
+        if self.ttype == PERCENT:
+            self.ttype = self.tokenizer.nextToken()
+            if self.ttype != ID:
+                return None
+
+            label = self.tokenizer.sval
+            self.ttype = self.tokenizer.nextToken()
+            if self.ttype != COLON:
+                return None
+
+            self.ttype = self.tokenizer.nextToken() # move to ID following colon
+
+        # Wildcard?
+        if self.ttype == DOT:
+            self.ttype = self.tokenizer.nextToken()
+            wildcardPayload = CommonToken(0, ".")
+            node = WildcardTreePattern(wildcardPayload)
+            if label is not None:
+                node.label = label
+            return node
+
+        # "ID" or "ID[arg]"
+        if self.ttype != ID:
+            return None
+
+        tokenName = self.tokenizer.sval
+        self.ttype = self.tokenizer.nextToken()
+
+        if tokenName == "nil":
+            return self.adaptor.nil()
+
+        text = tokenName
+        # check for arg
+        arg = None
+        if self.ttype == ARG:
+            arg = self.tokenizer.sval
+            text = arg
+            self.ttype = self.tokenizer.nextToken()
+
+        # create node
+        treeNodeType = self.wizard.getTokenType(tokenName)
+        if treeNodeType == INVALID_TOKEN_TYPE:
+            return None
+
+        node = self.adaptor.createFromType(treeNodeType, text)
+        if label is not None and isinstance(node, TreePattern):
+            node.label = label
+
+        if arg is not None and isinstance(node, TreePattern):
+            node.hasTextArg = True
+
+        return node
+
+
+class TreePattern(CommonTree):
+    """
+    When using %label:TOKENNAME in a tree for parse(), we must
+    track the label.
+    """
+
+    def __init__(self, payload):
+        super().__init__(payload)
+
+        self.label = None
+        self.hasTextArg = None
+
+
+    def toString(self):
+        if self.label:
+            return '%' + self.label + ':' + super().toString()
+
+        else:
+            return super().toString()
+
+
+class WildcardTreePattern(TreePattern):
+    pass
+
+
+class TreePatternTreeAdaptor(CommonTreeAdaptor):
+    """This adaptor creates TreePattern objects for use during scan()"""
+
+    def createWithPayload(self, payload):
+        return TreePattern(payload)
+
+
+class TreeWizard(object):
+    """
+    Build and navigate trees with this object.  Must know about the names
+    of tokens so you have to pass in a map or array of token names (from which
+    this class can build the map).  I.e., Token DECL means nothing unless the
+    class can translate it to a token type.
+
+    In order to create nodes and navigate, this class needs a TreeAdaptor.
+
+    This class can build a token type -> node index for repeated use or for
+    iterating over the various nodes with a particular type.
+
+    This class works in conjunction with the TreeAdaptor rather than moving
+    all this functionality into the adaptor.  An adaptor helps build and
+    navigate trees using methods.  This class helps you do it with string
+    patterns like "(A B C)".  You can create a tree from that pattern or
+    match subtrees against it.
+    """
+
+    def __init__(self, adaptor=None, tokenNames=None, typeMap=None):
+        if adaptor is None:
+            self.adaptor = CommonTreeAdaptor()
+
+        else:
+            self.adaptor = adaptor
+
+        if typeMap is None:
+            self.tokenNameToTypeMap = computeTokenTypes(tokenNames)
+
+        else:
+            if tokenNames:
+                raise ValueError("Can't have both tokenNames and typeMap")
+
+            self.tokenNameToTypeMap = typeMap
+
+
+    def getTokenType(self, tokenName):
+        """Using the map of token names to token types, return the type."""
+
+        if tokenName in self.tokenNameToTypeMap:
+            return self.tokenNameToTypeMap[tokenName]
+        else:
+            return INVALID_TOKEN_TYPE
+
+
+    def create(self, pattern):
+        """
+        Create a tree or node from the indicated tree pattern that closely
+        follows ANTLR tree grammar tree element syntax:
+
+        (root child1 ... child2).
+
+        You can also just pass in a node: ID
+
+        Any node can have a text argument: ID[foo]
+        (notice there are no quotes around foo--it's clear it's a string).
+
+        nil is a special name meaning "give me a nil node".  Useful for
+        making lists: (nil A B C) is a list of A B C.
+        """
+
+        tokenizer = TreePatternLexer(pattern)
+        parser = TreePatternParser(tokenizer, self, self.adaptor)
+        return parser.pattern()
+
+
+    def index(self, tree):
+        """Walk the entire tree and make a node name to nodes mapping.
+
+        For now, use recursion but later nonrecursive version may be
+        more efficient.  Returns a dict int -> list where the list is
+        of your AST node type.  The int is the token type of the node.
+        """
+
+        m = {}
+        self._index(tree, m)
+        return m
+
+
+    def _index(self, t, m):
+        """Do the work for index"""
+
+        if t is None:
+            return
+
+        ttype = self.adaptor.getType(t)
+        elements = m.get(ttype)
+        if elements is None:
+            m[ttype] = elements = []
+
+        elements.append(t)
+        for i in range(self.adaptor.getChildCount(t)):
+            child = self.adaptor.getChild(t, i)
+            self._index(child, m)
+
+
+    def find(self, tree, what):
+        """Return a list of matching token.
+
+        what may either be an integer specifzing the token type to find or
+        a string with a pattern that must be matched.
+
+        """
+
+        if isinstance(what, int):
+            return self._findTokenType(tree, what)
+
+        elif isinstance(what, str):
+            return self._findPattern(tree, what)
+
+        else:
+            raise TypeError("'what' must be string or integer")
+
+
+    def _findTokenType(self, t, ttype):
+        """Return a List of tree nodes with token type ttype"""
+
+        nodes = []
+
+        def visitor(tree, parent, childIndex, labels):
+            nodes.append(tree)
+
+        self.visit(t, ttype, visitor)
+
+        return nodes
+
+
+    def _findPattern(self, t, pattern):
+        """Return a List of subtrees matching pattern."""
+
+        subtrees = []
+
+        # Create a TreePattern from the pattern
+        tokenizer = TreePatternLexer(pattern)
+        parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
+        tpattern = parser.pattern()
+
+        # don't allow invalid patterns
+        if (tpattern is None or tpattern.isNil()
+            or isinstance(tpattern, WildcardTreePattern)):
+            return None
+
+        rootTokenType = tpattern.getType()
+
+        def visitor(tree, parent, childIndex, label):
+            if self._parse(tree, tpattern, None):
+                subtrees.append(tree)
+
+        self.visit(t, rootTokenType, visitor)
+
+        return subtrees
+
+
+    def visit(self, tree, what, visitor):
+        """Visit every node in tree matching what, invoking the visitor.
+
+        If what is a string, it is parsed as a pattern and only matching
+        subtrees will be visited.
+        The implementation uses the root node of the pattern in combination
+        with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
+        Patterns with wildcard roots are also not allowed.
+
+        If what is an integer, it is used as a token type and visit will match
+        all nodes of that type (this is faster than the pattern match).
+        The labels arg of the visitor action method is never set (it's None)
+        since using a token type rather than a pattern doesn't let us set a
+        label.
+        """
+
+        if isinstance(what, int):
+            self._visitType(tree, None, 0, what, visitor)
+
+        elif isinstance(what, str):
+            self._visitPattern(tree, what, visitor)
+
+        else:
+            raise TypeError("'what' must be string or integer")
+
+
+    def _visitType(self, t, parent, childIndex, ttype, visitor):
+        """Do the recursive work for visit"""
+
+        if t is None:
+            return
+
+        if self.adaptor.getType(t) == ttype:
+            visitor(t, parent, childIndex, None)
+
+        for i in range(self.adaptor.getChildCount(t)):
+            child = self.adaptor.getChild(t, i)
+            self._visitType(child, t, i, ttype, visitor)
+
+
+    def _visitPattern(self, tree, pattern, visitor):
+        """
+        For all subtrees that match the pattern, execute the visit action.
+        """
+
+        # Create a TreePattern from the pattern
+        tokenizer = TreePatternLexer(pattern)
+        parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
+        tpattern = parser.pattern()
+
+        # don't allow invalid patterns
+        if (tpattern is None or tpattern.isNil()
+            or isinstance(tpattern, WildcardTreePattern)):
+            return
+
+        rootTokenType = tpattern.getType()
+
+        def rootvisitor(tree, parent, childIndex, labels):
+            labels = {}
+            if self._parse(tree, tpattern, labels):
+                visitor(tree, parent, childIndex, labels)
+
+        self.visit(tree, rootTokenType, rootvisitor)
+
+
+    def parse(self, t, pattern, labels=None):
+        """
+        Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
+        on the various nodes and '.' (dot) as the node/subtree wildcard,
+        return true if the pattern matches and fill the labels Map with
+        the labels pointing at the appropriate nodes.  Return false if
+        the pattern is malformed or the tree does not match.
+
+        If a node specifies a text arg in pattern, then that must match
+        for that node in t.
+        """
+
+        tokenizer = TreePatternLexer(pattern)
+        parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
+        tpattern = parser.pattern()
+
+        return self._parse(t, tpattern, labels)
+
+
+    def _parse(self, t1, tpattern, labels):
+        """
+        Do the work for parse. Check to see if the tpattern fits the
+        structure and token types in t1.  Check text if the pattern has
+        text arguments on nodes.  Fill labels map with pointers to nodes
+        in tree matched against nodes in pattern with labels.
+	"""
+
+        # make sure both are non-null
+        if t1 is None or tpattern is None:
+            return False
+
+        # check roots (wildcard matches anything)
+        if not isinstance(tpattern, WildcardTreePattern):
+            if self.adaptor.getType(t1) != tpattern.getType():
+                return False
+
+            # if pattern has text, check node text
+            if (tpattern.hasTextArg
+                and self.adaptor.getText(t1) != tpattern.getText()):
+                return False
+
+        if tpattern.label is not None and labels is not None:
+            # map label in pattern to node in t1
+            labels[tpattern.label] = t1
+
+        # check children
+        n1 = self.adaptor.getChildCount(t1)
+        n2 = tpattern.getChildCount()
+        if n1 != n2:
+            return False
+
+        for i in range(n1):
+            child1 = self.adaptor.getChild(t1, i)
+            child2 = tpattern.getChild(i)
+            if not self._parse(child1, child2, labels):
+                return False
+
+        return True
+
+
+    def equals(self, t1, t2, adaptor=None):
+        """
+        Compare t1 and t2; return true if token types/text, structure match
+        exactly.
+        The trees are examined in their entirety so that (A B) does not match
+        (A B C) nor (A (B C)).
+        """
+
+        if adaptor is None:
+            adaptor = self.adaptor
+
+        return self._equals(t1, t2, adaptor)
+
+
+    def _equals(self, t1, t2, adaptor):
+        # make sure both are non-null
+        if t1 is None or t2 is None:
+            return False
+
+        # check roots
+        if adaptor.getType(t1) != adaptor.getType(t2):
+            return False
+
+        if adaptor.getText(t1) != adaptor.getText(t2):
+            return False
+
+        # check children
+        n1 = adaptor.getChildCount(t1)
+        n2 = adaptor.getChildCount(t2)
+        if n1 != n2:
+            return False
+
+        for i in range(n1):
+            child1 = adaptor.getChild(t1, i)
+            child2 = adaptor.getChild(t2, i)
+            if not self._equals(child1, child2, adaptor):
+                return False
+
+        return True
diff --git a/runtime/Python3/doxyfile b/runtime/Python3/doxyfile
new file mode 100644
index 0000000..9f15919
--- /dev/null
+++ b/runtime/Python3/doxyfile
@@ -0,0 +1,270 @@
+# -*- mode: doxymacs -*-
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+DOXYFILE_ENCODING      = UTF-8
+PROJECT_NAME           = "ANTLR Python3 API"
+PROJECT_NUMBER         = 3.3
+OUTPUT_DIRECTORY       = api
+CREATE_SUBDIRS         = NO
+OUTPUT_LANGUAGE        = English
+BRIEF_MEMBER_DESC      = YES
+REPEAT_BRIEF           = YES
+ABBREVIATE_BRIEF       = "The $name class" \
+                         "The $name widget" \
+                         "The $name file" \
+                         is \
+                         provides \
+                         specifies \
+                         contains \
+                         represents \
+                         a \
+                         an \
+                         the
+ALWAYS_DETAILED_SEC    = YES
+INLINE_INHERITED_MEMB  = NO
+FULL_PATH_NAMES        = YES
+STRIP_FROM_PATH        = build/doc/
+STRIP_FROM_INC_PATH    = 
+SHORT_NAMES            = NO
+JAVADOC_AUTOBRIEF      = NO
+MULTILINE_CPP_IS_BRIEF = NO
+DETAILS_AT_TOP         = NO
+INHERIT_DOCS           = YES
+SEPARATE_MEMBER_PAGES  = NO
+TAB_SIZE               = 8
+ALIASES                = 
+OPTIMIZE_OUTPUT_FOR_C  = NO
+OPTIMIZE_OUTPUT_JAVA   = YES
+BUILTIN_STL_SUPPORT    = NO
+CPP_CLI_SUPPORT        = NO
+DISTRIBUTE_GROUP_DOC   = NO
+SUBGROUPING            = YES
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+EXTRACT_ALL            = YES
+EXTRACT_PRIVATE        = YES
+EXTRACT_STATIC         = YES
+EXTRACT_LOCAL_CLASSES  = YES
+EXTRACT_LOCAL_METHODS  = NO
+HIDE_UNDOC_MEMBERS     = NO
+HIDE_UNDOC_CLASSES     = NO
+HIDE_FRIEND_COMPOUNDS  = NO
+HIDE_IN_BODY_DOCS      = NO
+INTERNAL_DOCS          = NO
+CASE_SENSE_NAMES       = NO
+HIDE_SCOPE_NAMES       = NO
+SHOW_INCLUDE_FILES     = YES
+INLINE_INFO            = YES
+SORT_MEMBER_DOCS       = YES
+SORT_BRIEF_DOCS        = NO
+SORT_BY_SCOPE_NAME     = NO
+GENERATE_TODOLIST      = YES
+GENERATE_TESTLIST      = NO
+GENERATE_BUGLIST       = NO
+GENERATE_DEPRECATEDLIST= NO
+ENABLED_SECTIONS       = 
+MAX_INITIALIZER_LINES  = 30
+SHOW_USED_FILES        = YES
+SHOW_DIRECTORIES       = NO
+FILE_VERSION_FILTER    = 
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+QUIET                  = NO
+WARNINGS               = YES
+WARN_IF_UNDOCUMENTED   = YES
+WARN_IF_DOC_ERROR      = YES
+WARN_NO_PARAMDOC       = NO
+WARN_FORMAT            = "$file:$line: $text"
+WARN_LOGFILE           = 
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+INPUT                  = build/doc
+INPUT_ENCODING         = UTF-8
+FILE_PATTERNS          = *.c \
+                         *.cc \
+                         *.cxx \
+                         *.cpp \
+                         *.c++ \
+                         *.d \
+                         *.java \
+                         *.ii \
+                         *.ixx \
+                         *.ipp \
+                         *.i++ \
+                         *.inl \
+                         *.h \
+                         *.hh \
+                         *.hxx \
+                         *.hpp \
+                         *.h++ \
+                         *.idl \
+                         *.odl \
+                         *.cs \
+                         *.php \
+                         *.php3 \
+                         *.inc \
+                         *.m \
+                         *.mm \
+                         *.dox \
+                         *.py
+RECURSIVE              = YES
+EXCLUDE                = build/doc/antlr3/__init__.py
+EXCLUDE_SYMLINKS       = NO
+EXCLUDE_PATTERNS       = 
+EXCLUDE_SYMBOLS        = dfa exceptions recognizers streams tokens constants
+EXAMPLE_PATH           = 
+EXAMPLE_PATTERNS       = *
+EXAMPLE_RECURSIVE      = NO
+IMAGE_PATH             = 
+INPUT_FILTER           = 
+FILTER_PATTERNS        = 
+FILTER_SOURCE_FILES    = NO
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+SOURCE_BROWSER         = YES
+INLINE_SOURCES         = NO
+STRIP_CODE_COMMENTS    = YES
+REFERENCED_BY_RELATION = NO
+REFERENCES_RELATION    = NO
+REFERENCES_LINK_SOURCE = YES
+USE_HTAGS              = NO
+VERBATIM_HEADERS       = YES
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+ALPHABETICAL_INDEX     = NO
+COLS_IN_ALPHA_INDEX    = 5
+IGNORE_PREFIX          = 
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+GENERATE_HTML          = YES
+HTML_OUTPUT            = .
+HTML_FILE_EXTENSION    = .html
+HTML_HEADER            = 
+HTML_FOOTER            = 
+HTML_STYLESHEET        = 
+HTML_ALIGN_MEMBERS     = YES
+GENERATE_HTMLHELP      = NO
+CHM_FILE               = 
+HHC_LOCATION           = 
+GENERATE_CHI           = NO
+BINARY_TOC             = NO
+TOC_EXPAND             = NO
+DISABLE_INDEX          = NO
+ENUM_VALUES_PER_LINE   = 4
+GENERATE_TREEVIEW      = NO
+TREEVIEW_WIDTH         = 250
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+GENERATE_LATEX         = NO
+LATEX_OUTPUT           = latex
+LATEX_CMD_NAME         = latex
+MAKEINDEX_CMD_NAME     = makeindex
+COMPACT_LATEX          = NO
+PAPER_TYPE             = a4wide
+EXTRA_PACKAGES         = 
+LATEX_HEADER           = 
+PDF_HYPERLINKS         = NO
+USE_PDFLATEX           = YES
+LATEX_BATCHMODE        = NO
+LATEX_HIDE_INDICES     = NO
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+GENERATE_RTF           = NO
+RTF_OUTPUT             = rtf
+COMPACT_RTF            = NO
+RTF_HYPERLINKS         = NO
+RTF_STYLESHEET_FILE    = 
+RTF_EXTENSIONS_FILE    = 
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+GENERATE_MAN           = NO
+MAN_OUTPUT             = man
+MAN_EXTENSION          = .3
+MAN_LINKS              = NO
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+GENERATE_XML           = NO
+XML_OUTPUT             = xml
+XML_SCHEMA             = 
+XML_DTD                = 
+XML_PROGRAMLISTING     = YES
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+GENERATE_AUTOGEN_DEF   = NO
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+GENERATE_PERLMOD       = NO
+PERLMOD_LATEX          = NO
+PERLMOD_PRETTY         = YES
+PERLMOD_MAKEVAR_PREFIX = 
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+ENABLE_PREPROCESSING   = YES
+MACRO_EXPANSION        = YES
+EXPAND_ONLY_PREDEF     = NO
+SEARCH_INCLUDES        = YES
+INCLUDE_PATH           = 
+INCLUDE_FILE_PATTERNS  = 
+PREDEFINED             = DOXYGEN_SHOULD_SKIP_THIS
+EXPAND_AS_DEFINED      = 
+SKIP_FUNCTION_MACROS   = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references   
+#---------------------------------------------------------------------------
+TAGFILES               = 
+GENERATE_TAGFILE       = 
+ALLEXTERNALS           = NO
+EXTERNAL_GROUPS        = YES
+PERL_PATH              = /usr/bin/perl
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool   
+#---------------------------------------------------------------------------
+CLASS_DIAGRAMS         = NO
+MSCGEN_PATH            = 
+HIDE_UNDOC_RELATIONS   = YES
+HAVE_DOT               = YES
+CLASS_GRAPH            = YES
+COLLABORATION_GRAPH    = YES
+GROUP_GRAPHS           = YES
+UML_LOOK               = NO
+TEMPLATE_RELATIONS     = NO
+INCLUDE_GRAPH          = YES
+INCLUDED_BY_GRAPH      = YES
+CALL_GRAPH             = NO
+CALLER_GRAPH           = NO
+GRAPHICAL_HIERARCHY    = YES
+DIRECTORY_GRAPH        = YES
+DOT_IMAGE_FORMAT       = png
+DOT_PATH               = 
+DOTFILE_DIRS           = 
+DOT_GRAPH_MAX_NODES    = 50
+DOT_TRANSPARENT        = NO
+DOT_MULTI_TARGETS      = NO
+GENERATE_LEGEND        = YES
+DOT_CLEANUP            = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine   
+#---------------------------------------------------------------------------
+SEARCHENGINE           = NO
+
+
+#---------------------------------------------------------------------------
+# doxypy integration
+#---------------------------------------------------------------------------
+FILTER_SOURCE_FILES = YES
+INPUT_FILTER = "python doxypy.py"
diff --git a/runtime/Python3/ez_setup.py b/runtime/Python3/ez_setup.py
new file mode 100644
index 0000000..3ea2e66
--- /dev/null
+++ b/runtime/Python3/ez_setup.py
@@ -0,0 +1,485 @@
+#!python
+"""Bootstrap distribute installation
+
+If you want to use setuptools in your package's setup.py, just include this
+file in the same directory with it, and add this to the top of your setup.py::
+
+    from distribute_setup import use_setuptools
+    use_setuptools()
+
+If you want to require a specific version of setuptools, set a download
+mirror, or use an alternate download directory, you can do so by supplying
+the appropriate options to ``use_setuptools()``.
+
+This file can also be run as a script to install or upgrade setuptools.
+"""
+import os
+import sys
+import time
+import fnmatch
+import tempfile
+import tarfile
+from distutils import log
+
+try:
+    from site import USER_SITE
+except ImportError:
+    USER_SITE = None
+
+try:
+    import subprocess
+
+    def _python_cmd(*args):
+        args = (sys.executable,) + args
+        return subprocess.call(args) == 0
+
+except ImportError:
+    # will be used for python 2.3
+    def _python_cmd(*args):
+        args = (sys.executable,) + args
+        # quoting arguments if windows
+        if sys.platform == 'win32':
+            def quote(arg):
+                if ' ' in arg:
+                    return '"%s"' % arg
+                return arg
+            args = [quote(arg) for arg in args]
+        return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
+
+DEFAULT_VERSION = "0.6.14"
+DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
+SETUPTOOLS_FAKED_VERSION = "0.6c11"
+
+SETUPTOOLS_PKG_INFO = """\
+Metadata-Version: 1.0
+Name: setuptools
+Version: %s
+Summary: xxxx
+Home-page: xxx
+Author: xxx
+Author-email: xxx
+License: xxx
+Description: xxx
+""" % SETUPTOOLS_FAKED_VERSION
+
+
+def _install(tarball):
+    # extracting the tarball
+    tmpdir = tempfile.mkdtemp()
+    log.warn('Extracting in %s', tmpdir)
+    old_wd = os.getcwd()
+    try:
+        os.chdir(tmpdir)
+        tar = tarfile.open(tarball)
+        _extractall(tar)
+        tar.close()
+
+        # going in the directory
+        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+        os.chdir(subdir)
+        log.warn('Now working in %s', subdir)
+
+        # installing
+        log.warn('Installing Distribute')
+        if not _python_cmd('setup.py', 'install'):
+            log.warn('Something went wrong during the installation.')
+            log.warn('See the error message above.')
+    finally:
+        os.chdir(old_wd)
+
+
+def _build_egg(egg, tarball, to_dir):
+    # extracting the tarball
+    tmpdir = tempfile.mkdtemp()
+    log.warn('Extracting in %s', tmpdir)
+    old_wd = os.getcwd()
+    try:
+        os.chdir(tmpdir)
+        tar = tarfile.open(tarball)
+        _extractall(tar)
+        tar.close()
+
+        # going in the directory
+        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+        os.chdir(subdir)
+        log.warn('Now working in %s', subdir)
+
+        # building an egg
+        log.warn('Building a Distribute egg in %s', to_dir)
+        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+
+    finally:
+        os.chdir(old_wd)
+    # returning the result
+    log.warn(egg)
+    if not os.path.exists(egg):
+        raise IOError('Could not build the egg.')
+
+
+def _do_download(version, download_base, to_dir, download_delay):
+    egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
+                       % (version, sys.version_info[0], sys.version_info[1]))
+    if not os.path.exists(egg):
+        tarball = download_setuptools(version, download_base,
+                                      to_dir, download_delay)
+        _build_egg(egg, tarball, to_dir)
+    sys.path.insert(0, egg)
+    import setuptools
+    setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+                   to_dir=os.curdir, download_delay=15, no_fake=True):
+    # making sure we use the absolute path
+    to_dir = os.path.abspath(to_dir)
+    was_imported = 'pkg_resources' in sys.modules or \
+        'setuptools' in sys.modules
+    try:
+        try:
+            import pkg_resources
+            if not hasattr(pkg_resources, '_distribute'):
+                if not no_fake:
+                    _fake_setuptools()
+                raise ImportError
+        except ImportError:
+            return _do_download(version, download_base, to_dir, download_delay)
+        try:
+            pkg_resources.require("distribute>="+version)
+            return
+        except pkg_resources.VersionConflict:
+            e = sys.exc_info()[1]
+            if was_imported:
+                sys.stderr.write(
+                "The required version of distribute (>=%s) is not available,\n"
+                "and can't be installed while this script is running. Please\n"
+                "install a more recent version first, using\n"
+                "'easy_install -U distribute'."
+                "\n\n(Currently using %r)\n" % (version, e.args[0]))
+                sys.exit(2)
+            else:
+                del pkg_resources, sys.modules['pkg_resources']    # reload ok
+                return _do_download(version, download_base, to_dir,
+                                    download_delay)
+        except pkg_resources.DistributionNotFound:
+            return _do_download(version, download_base, to_dir,
+                                download_delay)
+    finally:
+        if not no_fake:
+            _create_fake_setuptools_pkg_info(to_dir)
+
+def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+                        to_dir=os.curdir, delay=15):
+    """Download distribute from a specified location and return its filename
+
+    `version` should be a valid distribute version number that is available
+    as an egg for download under the `download_base` URL (which should end
+    with a '/'). `to_dir` is the directory where the egg will be downloaded.
+    `delay` is the number of seconds to pause before an actual download
+    attempt.
+    """
+    # making sure we use the absolute path
+    to_dir = os.path.abspath(to_dir)
+    try:
+        from urllib.request import urlopen
+    except ImportError:
+        from urllib2 import urlopen
+    tgz_name = "distribute-%s.tar.gz" % version
+    url = download_base + tgz_name
+    saveto = os.path.join(to_dir, tgz_name)
+    src = dst = None
+    if not os.path.exists(saveto):  # Avoid repeated downloads
+        try:
+            log.warn("Downloading %s", url)
+            src = urlopen(url)
+            # Read/write all in one block, so we don't create a corrupt file
+            # if the download is interrupted.
+            data = src.read()
+            dst = open(saveto, "wb")
+            dst.write(data)
+        finally:
+            if src:
+                src.close()
+            if dst:
+                dst.close()
+    return os.path.realpath(saveto)
+
+def _no_sandbox(function):
+    def __no_sandbox(*args, **kw):
+        try:
+            from setuptools.sandbox import DirectorySandbox
+            if not hasattr(DirectorySandbox, '_old'):
+                def violation(*args):
+                    pass
+                DirectorySandbox._old = DirectorySandbox._violation
+                DirectorySandbox._violation = violation
+                patched = True
+            else:
+                patched = False
+        except ImportError:
+            patched = False
+
+        try:
+            return function(*args, **kw)
+        finally:
+            if patched:
+                DirectorySandbox._violation = DirectorySandbox._old
+                del DirectorySandbox._old
+
+    return __no_sandbox
+
+def _patch_file(path, content):
+    """Will backup the file then patch it"""
+    existing_content = open(path).read()
+    if existing_content == content:
+        # already patched
+        log.warn('Already patched.')
+        return False
+    log.warn('Patching...')
+    _rename_path(path)
+    f = open(path, 'w')
+    try:
+        f.write(content)
+    finally:
+        f.close()
+    return True
+
+_patch_file = _no_sandbox(_patch_file)
+
+def _same_content(path, content):
+    return open(path).read() == content
+
+def _rename_path(path):
+    new_name = path + '.OLD.%s' % time.time()
+    log.warn('Renaming %s into %s', path, new_name)
+    os.rename(path, new_name)
+    return new_name
+
+def _remove_flat_installation(placeholder):
+    if not os.path.isdir(placeholder):
+        log.warn('Unkown installation at %s', placeholder)
+        return False
+    found = False
+    for file in os.listdir(placeholder):
+        if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
+            found = True
+            break
+    if not found:
+        log.warn('Could not locate setuptools*.egg-info')
+        return
+
+    log.warn('Removing elements out of the way...')
+    pkg_info = os.path.join(placeholder, file)
+    if os.path.isdir(pkg_info):
+        patched = _patch_egg_dir(pkg_info)
+    else:
+        patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
+
+    if not patched:
+        log.warn('%s already patched.', pkg_info)
+        return False
+    # now let's move the files out of the way
+    for element in ('setuptools', 'pkg_resources.py', 'site.py'):
+        element = os.path.join(placeholder, element)
+        if os.path.exists(element):
+            _rename_path(element)
+        else:
+            log.warn('Could not find the %s element of the '
+                     'Setuptools distribution', element)
+    return True
+
+_remove_flat_installation = _no_sandbox(_remove_flat_installation)
+
+def _after_install(dist):
+    log.warn('After install bootstrap.')
+    placeholder = dist.get_command_obj('install').install_purelib
+    _create_fake_setuptools_pkg_info(placeholder)
+
+def _create_fake_setuptools_pkg_info(placeholder):
+    if not placeholder or not os.path.exists(placeholder):
+        log.warn('Could not find the install location')
+        return
+    pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
+    setuptools_file = 'setuptools-%s-py%s.egg-info' % \
+            (SETUPTOOLS_FAKED_VERSION, pyver)
+    pkg_info = os.path.join(placeholder, setuptools_file)
+    if os.path.exists(pkg_info):
+        log.warn('%s already exists', pkg_info)
+        return
+
+    log.warn('Creating %s', pkg_info)
+    f = open(pkg_info, 'w')
+    try:
+        f.write(SETUPTOOLS_PKG_INFO)
+    finally:
+        f.close()
+
+    pth_file = os.path.join(placeholder, 'setuptools.pth')
+    log.warn('Creating %s', pth_file)
+    f = open(pth_file, 'w')
+    try:
+        f.write(os.path.join(os.curdir, setuptools_file))
+    finally:
+        f.close()
+
+_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+
+def _patch_egg_dir(path):
+    # let's check if it's already patched
+    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+    if os.path.exists(pkg_info):
+        if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
+            log.warn('%s already patched.', pkg_info)
+            return False
+    _rename_path(path)
+    os.mkdir(path)
+    os.mkdir(os.path.join(path, 'EGG-INFO'))
+    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+    f = open(pkg_info, 'w')
+    try:
+        f.write(SETUPTOOLS_PKG_INFO)
+    finally:
+        f.close()
+    return True
+
+_patch_egg_dir = _no_sandbox(_patch_egg_dir)
+
+def _before_install():
+    log.warn('Before install bootstrap.')
+    _fake_setuptools()
+
+
+def _under_prefix(location):
+    if 'install' not in sys.argv:
+        return True
+    args = sys.argv[sys.argv.index('install')+1:]
+    for index, arg in enumerate(args):
+        for option in ('--root', '--prefix'):
+            if arg.startswith('%s=' % option):
+                top_dir = arg.split('root=')[-1]
+                return location.startswith(top_dir)
+            elif arg == option:
+                if len(args) > index:
+                    top_dir = args[index+1]
+                    return location.startswith(top_dir)
+        if arg == '--user' and USER_SITE is not None:
+            return location.startswith(USER_SITE)
+    return True
+
+
+def _fake_setuptools():
+    log.warn('Scanning installed packages')
+    try:
+        import pkg_resources
+    except ImportError:
+        # we're cool
+        log.warn('Setuptools or Distribute does not seem to be installed.')
+        return
+    ws = pkg_resources.working_set
+    try:
+        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
+                                  replacement=False))
+    except TypeError:
+        # old distribute API
+        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+
+    if setuptools_dist is None:
+        log.warn('No setuptools distribution found')
+        return
+    # detecting if it was already faked
+    setuptools_location = setuptools_dist.location
+    log.warn('Setuptools installation detected at %s', setuptools_location)
+
+    # if --root or --preix was provided, and if
+    # setuptools is not located in them, we don't patch it
+    if not _under_prefix(setuptools_location):
+        log.warn('Not patching, --root or --prefix is installing Distribute'
+                 ' in another location')
+        return
+
+    # let's see if its an egg
+    if not setuptools_location.endswith('.egg'):
+        log.warn('Non-egg installation')
+        res = _remove_flat_installation(setuptools_location)
+        if not res:
+            return
+    else:
+        log.warn('Egg installation')
+        pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
+        if (os.path.exists(pkg_info) and
+            _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
+            log.warn('Already patched.')
+            return
+        log.warn('Patching...')
+        # let's create a fake egg replacing setuptools one
+        res = _patch_egg_dir(setuptools_location)
+        if not res:
+            return
+    log.warn('Patched done.')
+    _relaunch()
+
+
+def _relaunch():
+    log.warn('Relaunching...')
+    # we have to relaunch the process
+    # pip marker to avoid a relaunch bug
+    if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
+        sys.argv[0] = 'setup.py'
+    args = [sys.executable] + sys.argv
+    sys.exit(subprocess.call(args))
+
+
+def _extractall(self, path=".", members=None):
+    """Extract all members from the archive to the current working
+       directory and set owner, modification time and permissions on
+       directories afterwards. `path' specifies a different directory
+       to extract to. `members' is optional and must be a subset of the
+       list returned by getmembers().
+    """
+    import copy
+    import operator
+    from tarfile import ExtractError
+    directories = []
+
+    if members is None:
+        members = self
+
+    for tarinfo in members:
+        if tarinfo.isdir():
+            # Extract directories with a safe mode.
+            directories.append(tarinfo)
+            tarinfo = copy.copy(tarinfo)
+            tarinfo.mode = 448 # decimal for oct 0700
+        self.extract(tarinfo, path)
+
+    # Reverse sort directories.
+    if sys.version_info < (2, 4):
+        def sorter(dir1, dir2):
+            return cmp(dir1.name, dir2.name)
+        directories.sort(sorter)
+        directories.reverse()
+    else:
+        directories.sort(key=operator.attrgetter('name'), reverse=True)
+
+    # Set correct owner, mtime and filemode on directories.
+    for tarinfo in directories:
+        dirpath = os.path.join(path, tarinfo.name)
+        try:
+            self.chown(tarinfo, dirpath)
+            self.utime(tarinfo, dirpath)
+            self.chmod(tarinfo, dirpath)
+        except ExtractError:
+            e = sys.exc_info()[1]
+            if self.errorlevel > 1:
+                raise
+            else:
+                self._dbg(1, "tarfile: %s" % e)
+
+
+def main(argv, version=DEFAULT_VERSION):
+    """Install or upgrade setuptools and EasyInstall"""
+    tarball = download_setuptools()
+    _install(tarball)
+
+
+if __name__ == '__main__':
+    main(sys.argv[1:])
diff --git a/antlr-3.4/runtime/Python/mkdoxy.sh b/runtime/Python3/mkdoxy.sh
similarity index 100%
copy from antlr-3.4/runtime/Python/mkdoxy.sh
copy to runtime/Python3/mkdoxy.sh
diff --git a/runtime/Python3/pylintrc b/runtime/Python3/pylintrc
new file mode 100644
index 0000000..1ded626
--- /dev/null
+++ b/runtime/Python3/pylintrc
@@ -0,0 +1,253 @@
+# lint Python modules using external checkers
+
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add <file or directory> to the black list. It should be a base name, not a
+# path. You may set this option multiple times.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+
+[MESSAGES CONTROL]
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once).
+# W0622: Redefining built-in '...'
+# C0103: Invalid name
+# R0904: Too many public methods
+# R0201: Method could be a function
+# C0302: Too many lines in a module
+# R0902: Too many instance attributes
+# R0913: Too many arguments
+# R0912: Too many branches
+# R0903: Too few public methods
+# C0111: Missing docstring
+# W0403: Relative import
+# W0401: Wildcard import
+# W0142: */** magic
+disable=W0622,C0103,R0904,R0201,C0302,R0902,R0913,R0912,R0903,C0111,W0403,W0401,W0142
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html
+output-format=text
+
+# Include message's id in output
+include-ids=yes
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=yes
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,apply,input
+
+# Regular expression which should only match correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression which should only match correct module level names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression which should only match correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression which should only match correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct instance attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct list comprehension /
+# generator expression variable names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Regular expression which should only match functions or classes name which do
+# not require a docstring
+no-docstring-rgx=__.*__
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=80
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string='    '
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the beginning of the name of dummy variables
+# (i.e. not used).
+dummy-variables-rgx=_|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branchs=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
diff --git a/runtime/Python3/setup.py b/runtime/Python3/setup.py
new file mode 100644
index 0000000..92d9ac4
--- /dev/null
+++ b/runtime/Python3/setup.py
@@ -0,0 +1,289 @@
+
+import sys
+if sys.version_info < (3, 2):
+    print('This antlr3 module requires Python 3.2 or later. You can '
+          'download Python 3 from\nhttps://python.org/, '
+          'or visit http://www.antlr.org/ for the Python target.')
+    sys.exit(1)
+
+# bootstrapping setuptools
+import ez_setup
+ez_setup.use_setuptools()
+
+import os
+import textwrap
+from distutils.errors import *
+from distutils.command.clean import clean as _clean
+from distutils.cmd import Command
+from setuptools import setup
+from distutils import log
+
+from distutils.core import setup
+
+class clean(_clean):
+    """Also cleanup local temp files."""
+
+    def run(self):
+        _clean.run(self)
+
+        import fnmatch
+
+        # kill temporary files
+        patterns = [
+            # generic tempfiles
+            '*~', '*.bak', '*.pyc',
+
+            # tempfiles generated by ANTLR runs
+            't[0-9]*Lexer.py', 't[0-9]*Parser.py',
+            '*.tokens', '*__.g',
+            ]
+
+        for path in ('antlr3', 'unittests', 'tests'):
+            path = os.path.join(os.path.dirname(__file__), path)
+            if os.path.isdir(path):
+                for root, dirs, files in os.walk(path, topdown=True):
+                    graveyard = []
+                    for pat in patterns:
+                        graveyard.extend(fnmatch.filter(files, pat))
+
+                    for name in graveyard:
+                        filePath = os.path.join(root, name)
+
+                        try:
+                            log.info("removing '%s'", filePath)
+                            os.unlink(filePath)
+                        except OSError as exc:
+                            log.warn(
+                                "Failed to delete '%s': %s",
+                                filePath, exc
+                                )
+
+
+class TestError(DistutilsError):
+    pass
+
+
+# grml.. the class name appears in the --help output:
+# ...
+# Options for 'CmdUnitTest' command
+# ...
+# so I have to use a rather ugly name...
+class unittest(Command):
+    """Run unit tests for package"""
+
+    description = "run unit tests for package"
+
+    user_options = []
+    boolean_options = []
+
+    def initialize_options(self):
+        pass
+
+    def finalize_options(self):
+        pass
+
+    def run(self):
+        testDir = os.path.join(os.path.dirname(__file__), 'unittests')
+        if not os.path.isdir(testDir):
+            raise DistutilsFileError(
+                "There is no 'unittests' directory. Did you fetch the "
+                "development version?",
+                )
+
+        import glob
+        import imp
+        import unittest
+        import traceback
+        import io
+
+        suite = unittest.TestSuite()
+        loadFailures = []
+
+        # collect tests from all unittests/test*.py files
+        testFiles = []
+        for testPath in glob.glob(os.path.join(testDir, 'test*.py')):
+            testFiles.append(testPath)
+
+        testFiles.sort()
+        for testPath in testFiles:
+            testID = os.path.basename(testPath)[:-3]
+
+            try:
+                modFile, modPathname, modDescription \
+                         = imp.find_module(testID, [testDir])
+
+                testMod = imp.load_module(
+                    testID, modFile, modPathname, modDescription
+                    )
+
+                suite.addTests(
+                    unittest.defaultTestLoader.loadTestsFromModule(testMod)
+                    )
+
+            except Exception:
+                buf = io.StringIO()
+                traceback.print_exc(file=buf)
+
+                loadFailures.append(
+                    (os.path.basename(testPath), buf.getvalue())
+                    )
+
+        runner = unittest.TextTestRunner(verbosity=2)
+        result = runner.run(suite)
+
+        for testName, error in loadFailures:
+            sys.stderr.write('\n' + '='*70 + '\n')
+            sys.stderr.write(
+                "Failed to load test module {}\n".format(testName)
+                )
+            sys.stderr.write(error)
+            sys.stderr.write('\n')
+
+        if not result.wasSuccessful() or loadFailures:
+            raise TestError(
+                "Unit test suite failed!",
+                )
+
+
+class functest(Command):
+    """Run functional tests for package"""
+
+    description = "run functional tests for package"
+
+    user_options = [
+        ('testcase=', None,
+         "testcase to run [default: run all]"),
+        ('antlr-version=', None,
+         "ANTLR version to use [default: HEAD (in ../../build)]"),
+        ('antlr-jar=', None,
+         "Explicit path to an antlr jar (overrides --antlr-version)"),
+        ]
+
+    boolean_options = []
+
+    def initialize_options(self):
+        self.testcase = None
+        self.antlr_version = 'HEAD'
+        self.antlr_jar = None
+
+    def finalize_options(self):
+        pass
+
+    def run(self):
+        import glob
+        import imp
+        import unittest
+        import traceback
+        import io
+
+        testDir = os.path.join(os.path.dirname(__file__), 'tests')
+        if not os.path.isdir(testDir):
+            raise DistutilsFileError(
+                "There is not 'tests' directory. Did you fetch the "
+                "development version?",
+                )
+
+        # make sure, relative imports from testcases work
+        sys.path.insert(0, testDir)
+
+        rootDir = os.path.abspath(
+            os.path.join(os.path.dirname(__file__), '..', '..'))
+
+        if self.antlr_jar is not None:
+            classpath = [self.antlr_jar]
+        elif self.antlr_version == 'HEAD':
+            classpath = [
+                os.path.join(rootDir, 'tool', 'target', 'classes'),
+                os.path.join(rootDir, 'runtime', 'Java', 'target', 'classes')
+                ]
+        else:
+            classpath = [
+                os.path.join(rootDir, 'archive',
+                             'antlr-{}.jar'.format(self.antlr_version))
+                ]
+
+        classpath.extend([
+            os.path.join(rootDir, 'lib', 'antlr-3.4.1-SNAPSHOT.jar'),
+            os.path.join(rootDir, 'lib', 'antlr-runtime-3.4.jar'),
+            os.path.join(rootDir, 'lib', 'ST-4.0.5.jar'),
+            ])
+        os.environ['CLASSPATH'] = ':'.join(classpath)
+
+        os.environ['ANTLRVERSION'] = self.antlr_version
+
+        suite = unittest.TestSuite()
+        loadFailures = []
+
+        # collect tests from all tests/t*.py files
+        testFiles = []
+        test_glob = 't[0-9][0-9][0-9]*.py'
+        for testPath in glob.glob(os.path.join(testDir, test_glob)):
+            if testPath.endswith('Lexer.py') or testPath.endswith('Parser.py'):
+                continue
+
+            # if a single testcase has been selected, filter out all other
+            # tests
+            if (self.testcase is not None
+                and not os.path.basename(testPath)[:-3].startswith(self.testcase)):
+                continue
+
+            testFiles.append(testPath)
+
+        testFiles.sort()
+        for testPath in testFiles:
+            testID = os.path.basename(testPath)[:-3]
+
+            try:
+                modFile, modPathname, modDescription \
+                         = imp.find_module(testID, [testDir])
+
+                testMod = imp.load_module(
+                    testID, modFile, modPathname, modDescription)
+
+                suite.addTests(
+                    unittest.defaultTestLoader.loadTestsFromModule(testMod))
+
+            except Exception:
+                buf = io.StringIO()
+                traceback.print_exc(file=buf)
+
+                loadFailures.append(
+                    (os.path.basename(testPath), buf.getvalue()))
+
+        runner = unittest.TextTestRunner(verbosity=2)
+
+        result = runner.run(suite)
+
+        for testName, error in loadFailures:
+            sys.stderr.write('\n' + '='*70 + '\n')
+            sys.stderr.write(
+                "Failed to load test module {}\n".format(testName)
+                )
+            sys.stderr.write(error)
+            sys.stderr.write('\n')
+
+        if not result.wasSuccessful() or loadFailures:
+            raise TestError(
+                "Functional test suite failed!",
+                )
+
+
+setup(name='antlr_python3_runtime',
+      version='3.4',
+      packages=['antlr3'],
+
+      author="Benjamin S Wolf",
+      author_email="jokeserver+antlr3@gmail.com",
+      url="http://www.antlr.org/",
+      download_url="http://www.antlr.org/download.html",
+      license="BSD",
+      description="Runtime package for ANTLR3",
+      long_description=textwrap.dedent('''\
+      This is the runtime package for ANTLR3, which is required to use parsers
+      generated by ANTLR3.
+      '''),
+      cmdclass={'unittest': unittest,
+                'functest': functest,
+                'clean': clean
+                },
+      )
diff --git a/runtime/Python3/tests/t001lexer.g b/runtime/Python3/tests/t001lexer.g
new file mode 100644
index 0000000..c363316
--- /dev/null
+++ b/runtime/Python3/tests/t001lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t001lexer;
+options {
+  language = Python3;
+}
+
+ZERO: '0';
diff --git a/runtime/Python3/tests/t001lexer.py b/runtime/Python3/tests/t001lexer.py
new file mode 100644
index 0000000..9450e8e
--- /dev/null
+++ b/runtime/Python3/tests/t001lexer.py
@@ -0,0 +1,57 @@
+import antlr3
+import testbase
+import unittest
+
+class t001lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+    
+        
+    def testValid(self):
+        stream = antlr3.StringStream('0')
+        lexer = self.getLexer(stream)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.ZERO)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.EOF)
+        
+
+    def testIteratorInterface(self):
+        stream = antlr3.StringStream('0')
+        lexer = self.getLexer(stream)
+
+        types = [token.type for token in lexer]
+
+        self.assertEqual(types, [self.lexerModule.ZERO])
+        
+
+    def testMalformedInput(self):
+        stream = antlr3.StringStream('1')
+        lexer = self.getLexer(stream)
+
+        try:
+            token = lexer.nextToken()
+            self.fail()
+
+        except antlr3.MismatchedTokenException as exc:
+            self.assertEqual(exc.expecting, '0')
+            self.assertEqual(exc.unexpectedType, '1')
+            
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t002lexer.g b/runtime/Python3/tests/t002lexer.g
new file mode 100644
index 0000000..f794d9b
--- /dev/null
+++ b/runtime/Python3/tests/t002lexer.g
@@ -0,0 +1,7 @@
+lexer grammar t002lexer;
+options {
+  language = Python3;
+}
+
+ZERO: '0';
+ONE: '1';
diff --git a/runtime/Python3/tests/t002lexer.py b/runtime/Python3/tests/t002lexer.py
new file mode 100644
index 0000000..37824ba
--- /dev/null
+++ b/runtime/Python3/tests/t002lexer.py
@@ -0,0 +1,50 @@
+import antlr3
+import testbase
+import unittest
+
+class t002lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+    
+        
+    def testValid(self):
+        stream = antlr3.StringStream('01')
+        lexer = self.getLexer(stream)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.ZERO)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.ONE)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.EOF)
+        
+
+    def testMalformedInput(self):
+        stream = antlr3.StringStream('2')
+        lexer = self.getLexer(stream)
+
+        try:
+            token = lexer.nextToken()
+            self.fail()
+
+        except antlr3.NoViableAltException as exc:
+            self.assertEqual(exc.unexpectedType, '2')
+            
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t003lexer.g b/runtime/Python3/tests/t003lexer.g
new file mode 100644
index 0000000..22253d2
--- /dev/null
+++ b/runtime/Python3/tests/t003lexer.g
@@ -0,0 +1,8 @@
+lexer grammar t003lexer;
+options {
+  language = Python3;
+}
+
+ZERO: '0';
+ONE: '1';
+FOOZE: 'fooze';
diff --git a/runtime/Python3/tests/t003lexer.py b/runtime/Python3/tests/t003lexer.py
new file mode 100644
index 0000000..da9421f
--- /dev/null
+++ b/runtime/Python3/tests/t003lexer.py
@@ -0,0 +1,53 @@
+import antlr3
+import testbase
+import unittest
+
+class t003lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+    
+        
+    def testValid(self):
+        stream = antlr3.StringStream('0fooze1')
+        lexer = self.getLexer(stream)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.ZERO)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOOZE)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.ONE)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.EOF)
+        
+
+    def testMalformedInput(self):
+        stream = antlr3.StringStream('2')
+        lexer = self.getLexer(stream)
+
+        try:
+            token = lexer.nextToken()
+            self.fail()
+
+        except antlr3.NoViableAltException as exc:
+            self.assertEqual(exc.unexpectedType, '2')
+            
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t004lexer.g b/runtime/Python3/tests/t004lexer.g
new file mode 100644
index 0000000..4a08d43
--- /dev/null
+++ b/runtime/Python3/tests/t004lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t004lexer;
+options {
+  language = Python3;
+}
+
+FOO: 'f' 'o'*;
diff --git a/runtime/Python3/tests/t004lexer.py b/runtime/Python3/tests/t004lexer.py
new file mode 100644
index 0000000..633427e
--- /dev/null
+++ b/runtime/Python3/tests/t004lexer.py
@@ -0,0 +1,70 @@
+import antlr3
+import testbase
+import unittest
+
+class t004lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+    
+        
+    def testValid(self):
+        stream = antlr3.StringStream('ffofoofooo')
+        lexer = self.getLexer(stream)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 0)
+        self.assertEqual(token.stop, 0)
+        self.assertEqual(token.text, 'f')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 1)
+        self.assertEqual(token.stop, 2)
+        self.assertEqual(token.text, 'fo')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 3)
+        self.assertEqual(token.stop, 5)
+        self.assertEqual(token.text, 'foo')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 6)
+        self.assertEqual(token.stop, 9)
+        self.assertEqual(token.text, 'fooo')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.EOF)
+        
+
+    def testMalformedInput(self):
+        stream = antlr3.StringStream('2')
+        lexer = self.getLexer(stream)
+
+        try:
+            token = lexer.nextToken()
+            self.fail()
+
+        except antlr3.MismatchedTokenException as exc:
+            self.assertEqual(exc.expecting, 'f')
+            self.assertEqual(exc.unexpectedType, '2')
+            
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/runtime/Python3/tests/t005lexer.g b/runtime/Python3/tests/t005lexer.g
new file mode 100644
index 0000000..247a344
--- /dev/null
+++ b/runtime/Python3/tests/t005lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t005lexer;
+options {
+  language = Python3;
+}
+
+FOO: 'f' 'o'+;
diff --git a/runtime/Python3/tests/t005lexer.py b/runtime/Python3/tests/t005lexer.py
new file mode 100644
index 0000000..e5ee165
--- /dev/null
+++ b/runtime/Python3/tests/t005lexer.py
@@ -0,0 +1,75 @@
+import antlr3
+import testbase
+import unittest
+
+class t005lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+    
+        
+    def testValid(self):
+        stream = antlr3.StringStream('fofoofooo')
+        lexer = self.getLexer(stream)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 0)
+        self.assertEqual(token.stop, 1)
+        self.assertEqual(token.text, 'fo')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 2)
+        self.assertEqual(token.stop, 4)
+        self.assertEqual(token.text, 'foo')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 5)
+        self.assertEqual(token.stop, 8)
+        self.assertEqual(token.text, 'fooo')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.EOF)
+        
+
+    def testMalformedInput1(self):
+        stream = antlr3.StringStream('2')
+        lexer = self.getLexer(stream)
+
+        try:
+            token = lexer.nextToken()
+            self.fail()
+
+        except antlr3.MismatchedTokenException as exc:
+            self.assertEqual(exc.expecting, 'f')
+            self.assertEqual(exc.unexpectedType, '2')
+
+
+    def testMalformedInput2(self):
+        stream = antlr3.StringStream('f')
+        lexer = self.getLexer(stream)
+
+        try:
+            token = lexer.nextToken()
+            self.fail()
+
+        except antlr3.EarlyExitException as exc:
+            self.assertEqual(exc.unexpectedType, antlr3.EOF)
+            
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t006lexer.g b/runtime/Python3/tests/t006lexer.g
new file mode 100644
index 0000000..b7f4f4a
--- /dev/null
+++ b/runtime/Python3/tests/t006lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t006lexer;
+options {
+  language = Python3;
+}
+
+FOO: 'f' ('o' | 'a')*;
diff --git a/runtime/Python3/tests/t006lexer.py b/runtime/Python3/tests/t006lexer.py
new file mode 100644
index 0000000..daa5d29
--- /dev/null
+++ b/runtime/Python3/tests/t006lexer.py
@@ -0,0 +1,61 @@
+import antlr3
+import testbase
+import unittest
+
+class t006lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+    
+        
+    def testValid(self):
+        stream = antlr3.StringStream('fofaaooa')
+        lexer = self.getLexer(stream)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 0)
+        self.assertEqual(token.stop, 1)
+        self.assertEqual(token.text, 'fo')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 2)
+        self.assertEqual(token.stop, 7)
+        self.assertEqual(token.text, 'faaooa')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.EOF)
+
+
+    def testMalformedInput(self):
+        stream = antlr3.StringStream('fofoaooaoa2')
+        lexer = self.getLexer(stream)
+
+        lexer.nextToken()
+        lexer.nextToken()
+        try:
+            token = lexer.nextToken()
+            self.fail(token)
+
+        except antlr3.MismatchedTokenException as exc:
+            self.assertEqual(exc.expecting, 'f')
+            self.assertEqual(exc.unexpectedType, '2')
+            self.assertEqual(exc.charPositionInLine, 10)
+            self.assertEqual(exc.line, 1)
+            
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t007lexer.g b/runtime/Python3/tests/t007lexer.g
new file mode 100644
index 0000000..e55b4b7
--- /dev/null
+++ b/runtime/Python3/tests/t007lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t007lexer;
+options {
+  language = Python3;
+}
+
+FOO: 'f' ('o' | 'a' 'b'+)*;
diff --git a/runtime/Python3/tests/t007lexer.py b/runtime/Python3/tests/t007lexer.py
new file mode 100644
index 0000000..02abb77
--- /dev/null
+++ b/runtime/Python3/tests/t007lexer.py
@@ -0,0 +1,59 @@
+import antlr3
+import testbase
+import unittest
+
+class t007lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+    
+        
+    def testValid(self):
+        stream = antlr3.StringStream('fofababbooabb')
+        lexer = self.getLexer(stream)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 0)
+        self.assertEqual(token.stop, 1)
+        self.assertEqual(token.text, 'fo')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 2)
+        self.assertEqual(token.stop, 12)
+        self.assertEqual(token.text, 'fababbooabb')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.EOF)
+
+
+    def testMalformedInput(self):
+        stream = antlr3.StringStream('foaboao')
+        lexer = self.getLexer(stream)
+
+        try:
+            token = lexer.nextToken()
+            self.fail(token)
+
+        except antlr3.EarlyExitException as exc:
+            self.assertEqual(exc.unexpectedType, 'o')
+            self.assertEqual(exc.charPositionInLine, 6)
+            self.assertEqual(exc.line, 1)
+            
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/runtime/Python3/tests/t008lexer.g b/runtime/Python3/tests/t008lexer.g
new file mode 100644
index 0000000..2a7904e
--- /dev/null
+++ b/runtime/Python3/tests/t008lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t008lexer;
+options {
+  language = Python3;
+}
+
+FOO: 'f' 'a'?;
diff --git a/runtime/Python3/tests/t008lexer.py b/runtime/Python3/tests/t008lexer.py
new file mode 100644
index 0000000..f3b1ed9
--- /dev/null
+++ b/runtime/Python3/tests/t008lexer.py
@@ -0,0 +1,66 @@
+import antlr3
+import testbase
+import unittest
+
+class t008lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+    
+        
+    def testValid(self):
+        stream = antlr3.StringStream('ffaf')
+        lexer = self.getLexer(stream)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 0)
+        self.assertEqual(token.stop, 0)
+        self.assertEqual(token.text, 'f')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 1)
+        self.assertEqual(token.stop, 2)
+        self.assertEqual(token.text, 'fa')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.FOO)
+        self.assertEqual(token.start, 3)
+        self.assertEqual(token.stop, 3)
+        self.assertEqual(token.text, 'f')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.EOF)
+
+
+    def testMalformedInput(self):
+        stream = antlr3.StringStream('fafb')
+        lexer = self.getLexer(stream)
+
+        lexer.nextToken()
+        lexer.nextToken()
+        try:
+            token = lexer.nextToken()
+            self.fail(token)
+
+        except antlr3.MismatchedTokenException as exc:
+            self.assertEqual(exc.unexpectedType, 'b')
+            self.assertEqual(exc.charPositionInLine, 3)
+            self.assertEqual(exc.line, 1)
+            
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t009lexer.g b/runtime/Python3/tests/t009lexer.g
new file mode 100644
index 0000000..a04b5b4
--- /dev/null
+++ b/runtime/Python3/tests/t009lexer.g
@@ -0,0 +1,6 @@
+lexer grammar t009lexer;
+options {
+  language = Python3;
+}
+
+DIGIT: '0' .. '9';
diff --git a/runtime/Python3/tests/t009lexer.py b/runtime/Python3/tests/t009lexer.py
new file mode 100644
index 0000000..bf60bce
--- /dev/null
+++ b/runtime/Python3/tests/t009lexer.py
@@ -0,0 +1,67 @@
+import antlr3
+import testbase
+import unittest
+
+class t009lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+
+
+    def testValid(self):
+        stream = antlr3.StringStream('085')
+        lexer = self.getLexer(stream)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.DIGIT)
+        self.assertEqual(token.start, 0)
+        self.assertEqual(token.stop, 0)
+        self.assertEqual(token.text, '0')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.DIGIT)
+        self.assertEqual(token.start, 1)
+        self.assertEqual(token.stop, 1)
+        self.assertEqual(token.text, '8')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.DIGIT)
+        self.assertEqual(token.start, 2)
+        self.assertEqual(token.stop, 2)
+        self.assertEqual(token.text, '5')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.EOF)
+
+
+    def testMalformedInput(self):
+        stream = antlr3.StringStream('2a')
+        lexer = self.getLexer(stream)
+
+        lexer.nextToken()
+        try:
+            token = lexer.nextToken()
+            self.fail(token)
+
+        except antlr3.MismatchedSetException as exc:
+            # TODO: This should provide more useful information
+            self.assertIsNone(exc.expecting)
+            self.assertEqual(exc.unexpectedType, 'a')
+            self.assertEqual(exc.charPositionInLine, 1)
+            self.assertEqual(exc.line, 1)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t010lexer.g b/runtime/Python3/tests/t010lexer.g
new file mode 100644
index 0000000..3a7524d
--- /dev/null
+++ b/runtime/Python3/tests/t010lexer.g
@@ -0,0 +1,7 @@
+lexer grammar t010lexer;
+options {
+  language = Python3;
+}
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
+WS: (' ' | '\n')+;
diff --git a/runtime/Python3/tests/t010lexer.py b/runtime/Python3/tests/t010lexer.py
new file mode 100644
index 0000000..9cedea3
--- /dev/null
+++ b/runtime/Python3/tests/t010lexer.py
@@ -0,0 +1,78 @@
+import antlr3
+import testbase
+import unittest
+
+class t010lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+    
+        
+    def testValid(self):
+        stream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
+        lexer = self.getLexer(stream)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.IDENTIFIER)
+        self.assertEqual(token.start, 0)
+        self.assertEqual(token.stop, 5)
+        self.assertEqual(token.text, 'foobar')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.WS)
+        self.assertEqual(token.start, 6)
+        self.assertEqual(token.stop, 6)
+        self.assertEqual(token.text, ' ')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.IDENTIFIER)
+        self.assertEqual(token.start, 7)
+        self.assertEqual(token.stop, 11)
+        self.assertEqual(token.text, '_Ab98')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.WS)
+        self.assertEqual(token.start, 12)
+        self.assertEqual(token.stop, 14)
+        self.assertEqual(token.text, ' \n ')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.IDENTIFIER)
+        self.assertEqual(token.start, 15)
+        self.assertEqual(token.stop, 20)
+        self.assertEqual(token.text, 'A12sdf')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.EOF)
+
+
+    def testMalformedInput(self):
+        stream = antlr3.StringStream('a-b')
+        lexer = self.getLexer(stream)
+
+        lexer.nextToken()
+        try:
+            token = lexer.nextToken()
+            self.fail(token)
+
+        except antlr3.NoViableAltException as exc:
+            self.assertEqual(exc.unexpectedType, '-')
+            self.assertEqual(exc.charPositionInLine, 1)
+            self.assertEqual(exc.line, 1)
+
+            
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t011lexer.g b/runtime/Python3/tests/t011lexer.g
new file mode 100644
index 0000000..17d01ea
--- /dev/null
+++ b/runtime/Python3/tests/t011lexer.g
@@ -0,0 +1,19 @@
+lexer grammar t011lexer;
+options {
+  language = Python3;
+}
+
+IDENTIFIER: 
+        ('a'..'z'|'A'..'Z'|'_') 
+        ('a'..'z'
+        |'A'..'Z'
+        |'0'..'9'
+        |'_'
+            { 
+              print("Underscore")
+              print("foo")
+            }
+        )*
+    ;
+
+WS: (' ' | '\n')+;
diff --git a/runtime/Python3/tests/t011lexer.py b/runtime/Python3/tests/t011lexer.py
new file mode 100644
index 0000000..b417826
--- /dev/null
+++ b/runtime/Python3/tests/t011lexer.py
@@ -0,0 +1,78 @@
+import antlr3
+import testbase
+import unittest
+
+class t011lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+    
+        
+    def testValid(self):
+        stream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
+        lexer = self.getLexer(stream)
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.IDENTIFIER)
+        self.assertEqual(token.start, 0)
+        self.assertEqual(token.stop, 5)
+        self.assertEqual(token.text, 'foobar')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.WS)
+        self.assertEqual(token.start, 6)
+        self.assertEqual(token.stop, 6)
+        self.assertEqual(token.text, ' ')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.IDENTIFIER)
+        self.assertEqual(token.start, 7)
+        self.assertEqual(token.stop, 11)
+        self.assertEqual(token.text, '_Ab98')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.WS)
+        self.assertEqual(token.start, 12)
+        self.assertEqual(token.stop, 14)
+        self.assertEqual(token.text, ' \n ')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.IDENTIFIER)
+        self.assertEqual(token.start, 15)
+        self.assertEqual(token.stop, 20)
+        self.assertEqual(token.text, 'A12sdf')
+
+        token = lexer.nextToken()
+        self.assertEqual(token.type, self.lexerModule.EOF)
+
+
+    def testMalformedInput(self):
+        stream = antlr3.StringStream('a-b')
+        lexer = self.getLexer(stream)
+
+        lexer.nextToken()
+        try:
+            token = lexer.nextToken()
+            self.fail(token)
+
+        except antlr3.NoViableAltException as exc:
+            self.assertEqual(exc.unexpectedType, '-')
+            self.assertEqual(exc.charPositionInLine, 1)
+            self.assertEqual(exc.line, 1)
+
+            
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXML.input b/runtime/Python3/tests/t012lexerXML.input
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t012lexerXML.input
copy to runtime/Python3/tests/t012lexerXML.input
diff --git a/antlr-3.4/runtime/Python/tests/t012lexerXML.output b/runtime/Python3/tests/t012lexerXML.output
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t012lexerXML.output
copy to runtime/Python3/tests/t012lexerXML.output
diff --git a/runtime/Python3/tests/t012lexerXML.py b/runtime/Python3/tests/t012lexerXML.py
new file mode 100644
index 0000000..40d67bb
--- /dev/null
+++ b/runtime/Python3/tests/t012lexerXML.py
@@ -0,0 +1,120 @@
+import antlr3
+import testbase
+import unittest
+import os
+import sys
+from io import StringIO
+import textwrap
+
+class t012lexerXML(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar('t012lexerXMLLexer.g')
+        
+        
+    def lexerClass(self, base):
+        class TLexer(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TLexer
+    
+        
+    def testValid(self):
+        inputPath = os.path.splitext(__file__)[0] + '.input'
+        with open(inputPath) as f:
+            data = f.read()
+        stream = antlr3.StringStream(data)
+        lexer = self.getLexer(stream)
+
+        while True:
+            token = lexer.nextToken()
+            if token.type == self.lexerModule.EOF:
+                break
+
+
+        output = lexer.outbuf.getvalue()
+
+        outputPath = os.path.splitext(__file__)[0] + '.output'
+
+        with open(outputPath) as f:
+            testOutput = f.read()
+
+        self.assertEqual(output, testOutput)
+
+
+    def testMalformedInput1(self):
+        input = textwrap.dedent("""\
+        <?xml version='1.0'?>
+        <document d>
+        </document>
+        """)
+
+        stream = antlr3.StringStream(input)
+        lexer = self.getLexer(stream)
+
+        try:
+            while True:
+                token = lexer.nextToken()
+                # Should raise NoViableAltException before hitting EOF
+                if token.type == antlr3.EOF:
+                    self.fail()
+
+        except antlr3.NoViableAltException as exc:
+            self.assertEqual(exc.unexpectedType, '>')
+            self.assertEqual(exc.charPositionInLine, 11)
+            self.assertEqual(exc.line, 2)
+
+
+    def testMalformedInput2(self):
+        input = textwrap.dedent("""\
+        <?tml version='1.0'?>
+        <document>
+        </document>
+        """)
+
+        stream = antlr3.StringStream(input)
+        lexer = self.getLexer(stream)
+
+        try:
+            while True:
+                token = lexer.nextToken()
+                # Should raise NoViableAltException before hitting EOF
+                if token.type == antlr3.EOF:
+                    self.fail()
+
+        except antlr3.MismatchedSetException as exc:
+            self.assertEqual(exc.unexpectedType, 't')
+            self.assertEqual(exc.charPositionInLine, 2)
+            self.assertEqual(exc.line, 1)
+
+
+    def testMalformedInput3(self):
+        input = textwrap.dedent("""\
+        <?xml version='1.0'?>
+        <docu ment attr="foo">
+        </document>
+        """)
+
+        stream = antlr3.StringStream(input)
+        lexer = self.getLexer(stream)
+
+        try:
+            while True:
+                token = lexer.nextToken()
+                # Should raise NoViableAltException before hitting EOF
+                if token.type == antlr3.EOF:
+                    self.fail()
+
+        except antlr3.NoViableAltException as exc:
+            self.assertEqual(exc.unexpectedType, 'a')
+            self.assertEqual(exc.charPositionInLine, 11)
+            self.assertEqual(exc.line, 2)
+
+            
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t012lexerXMLLexer.g b/runtime/Python3/tests/t012lexerXMLLexer.g
new file mode 100644
index 0000000..23e566a
--- /dev/null
+++ b/runtime/Python3/tests/t012lexerXMLLexer.g
@@ -0,0 +1,132 @@
+lexer grammar t012lexerXMLLexer;
+options {
+  language = Python3;
+}
+
+@header {
+from io import StringIO
+}
+
+@lexer::init {
+self.outbuf = StringIO()
+}
+
+@lexer::members {
+def output(self, line):
+    self.outbuf.write(line + "\n")
+}
+
+DOCUMENT
+    :  XMLDECL? WS? DOCTYPE? WS? ELEMENT WS? 
+    ;
+
+fragment DOCTYPE
+    :
+        '<!DOCTYPE' WS rootElementName=GENERIC_ID 
+        {self.output("ROOTELEMENT: "+rootElementName.text)}
+        WS
+        ( 
+            ( 'SYSTEM' WS sys1=VALUE
+                {self.output("SYSTEM: "+sys1.text)}
+                
+            | 'PUBLIC' WS pub=VALUE WS sys2=VALUE
+                {self.output("PUBLIC: "+pub.text)}
+                {self.output("SYSTEM: "+sys2.text)}   
+            )
+            ( WS )?
+        )?
+        ( dtd=INTERNAL_DTD
+            {self.output("INTERNAL DTD: "+dtd.text)}
+        )?
+		'>'
+	;
+
+fragment INTERNAL_DTD : '[' (options {greedy=false;} : .)* ']' ;
+
+fragment PI :
+        '<?' target=GENERIC_ID WS? 
+          {self.output("PI: "+target.text)}
+        ( ATTRIBUTE WS? )*  '?>'
+	;
+
+fragment XMLDECL :
+        '<?' ('x'|'X') ('m'|'M') ('l'|'L') WS? 
+          {self.output("XML declaration")}
+        ( ATTRIBUTE WS? )*  '?>'
+	;
+
+
+fragment ELEMENT
+    : ( START_TAG
+            (ELEMENT
+            | t=PCDATA
+                {self.output('PCDATA: "{}"'.format($t.text))}
+            | t=CDATA
+                {self.output('CDATA: "{}"'.format($t.text))}
+            | t=COMMENT
+                {self.output('Comment: "{}"'.format($t.text))}
+            | pi=PI
+            )*
+            END_TAG
+        | EMPTY_ELEMENT
+        )
+    ;
+
+fragment START_TAG 
+    : '<' WS? name=GENERIC_ID WS?
+          {self.output("Start Tag: "+name.text)}
+        ( ATTRIBUTE WS? )* '>'
+    ;
+
+fragment EMPTY_ELEMENT 
+    : '<' WS? name=GENERIC_ID WS?
+          {self.output("Empty Element: "+name.text)}
+        ( ATTRIBUTE WS? )* '/>'
+    ;
+
+fragment ATTRIBUTE 
+    : name=GENERIC_ID WS? '=' WS? value=VALUE
+        {self.output("Attr: {}={}".format(name.text, value.text))}
+    ;
+
+fragment END_TAG 
+    : '</' WS? name=GENERIC_ID WS? '>'
+        {self.output("End Tag: "+name.text)}
+    ;
+
+fragment COMMENT
+	:	'<!--' (options {greedy=false;} : .)* '-->'
+	;
+
+fragment CDATA
+	:	'<![CDATA[' (options {greedy=false;} : .)* ']]>'
+	;
+
+fragment PCDATA : (~'<')+ ; 
+
+fragment VALUE : 
+        ( '\"' (~'\"')* '\"'
+        | '\'' (~'\'')* '\''
+        )
+	;
+
+fragment GENERIC_ID 
+    : ( LETTER | '_' | ':') 
+        ( options {greedy=true;} : LETTER | '0'..'9' | '.' | '-' | '_' | ':' )*
+	;
+
+fragment LETTER
+	: 'a'..'z' 
+	| 'A'..'Z'
+	;
+
+fragment WS  :
+        (   ' '
+        |   '\t'
+        |  ( '\n'
+            |	'\r\n'
+            |	'\r'
+            )
+        )+
+    ;    
+
diff --git a/runtime/Python3/tests/t013parser.g b/runtime/Python3/tests/t013parser.g
new file mode 100644
index 0000000..bf97d77
--- /dev/null
+++ b/runtime/Python3/tests/t013parser.g
@@ -0,0 +1,23 @@
+grammar t013parser;
+options {
+  language = Python3;
+}
+
+@parser::init {
+self.identifiers = []
+self.reportedErrors = []
+}
+
+@parser::members {
+def foundIdentifier(self, name):
+    self.identifiers.append(name)
+
+def emitErrorMessage(self, msg):
+    self.reportedErrors.append(msg)
+}
+
+document:
+        t=IDENTIFIER {self.foundIdentifier($t.text)}
+        ;
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
diff --git a/runtime/Python3/tests/t013parser.py b/runtime/Python3/tests/t013parser.py
new file mode 100644
index 0000000..4562e36
--- /dev/null
+++ b/runtime/Python3/tests/t013parser.py
@@ -0,0 +1,35 @@
+import antlr3
+import testbase
+import unittest
+
+class t013parser(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def testValid(self):
+        cStream = antlr3.StringStream('foobar')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.document()
+
+        self.assertEqual(parser.reportedErrors, [])
+        self.assertEqual(parser.identifiers, ['foobar'])
+
+
+    def testMalformedInput1(self):
+        cStream = antlr3.StringStream('')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+
+        parser.document()
+
+        # FIXME: currently strings with formatted errors are collected
+        # can't check error locations yet
+        self.assertEqual(len(parser.reportedErrors), 1, parser.reportedErrors)
+            
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t014parser.g b/runtime/Python3/tests/t014parser.g
new file mode 100644
index 0000000..3d58d18
--- /dev/null
+++ b/runtime/Python3/tests/t014parser.g
@@ -0,0 +1,35 @@
+grammar t014parser;
+options {
+  language = Python3;
+}
+
+@parser::init {
+self.events = []
+self.reportedErrors = []
+}
+
+@parser::members {
+def emitErrorMessage(self, msg):
+    self.reportedErrors.append(msg)
+}
+        
+
+document:
+        ( declaration
+        | call
+        )*
+        EOF
+    ;
+
+declaration:
+        'var' t=IDENTIFIER ';'
+        {self.events.append(('decl', $t.text))}
+    ;
+
+call:
+        t=IDENTIFIER '(' ')' ';'
+        {self.events.append(('call', $t.text))}
+    ;
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
+WS:  (' '|'\r'|'\t'|'\n') {$channel=HIDDEN;};
diff --git a/runtime/Python3/tests/t014parser.py b/runtime/Python3/tests/t014parser.py
new file mode 100644
index 0000000..ae071d7
--- /dev/null
+++ b/runtime/Python3/tests/t014parser.py
@@ -0,0 +1,66 @@
+import antlr3
+import testbase
+import unittest
+
+class t014parser(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def testValid(self):
+        cStream = antlr3.StringStream('var foobar; gnarz(); var blupp; flupp ( ) ;')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.document()
+
+        self.assertEqual(parser.reportedErrors, [])
+        self.assertEqual(parser.events,
+                         [('decl', 'foobar'), ('call', 'gnarz'),
+                          ('decl', 'blupp'), ('call', 'flupp')])
+
+
+    def testMalformedInput1(self):
+        cStream = antlr3.StringStream('var; foo();')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+
+        parser.document()
+
+        # FIXME: currently strings with formatted errors are collected
+        # can't check error locations yet
+        self.assertEqual(len(parser.reportedErrors), 1, parser.reportedErrors)
+        self.assertEqual(parser.events, [])
+
+
+    def testMalformedInput2(self):
+        cStream = antlr3.StringStream('var foobar(); gnarz();')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+
+        parser.document()
+
+        # FIXME: currently strings with formatted errors are collected
+        # can't check error locations yet
+        self.assertEqual(len(parser.reportedErrors), 1, parser.reportedErrors)
+        self.assertEqual(parser.events, [('call', 'gnarz')])
+
+
+    def testMalformedInput3(self):
+        cStream = antlr3.StringStream('gnarz(; flupp();')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+
+        parser.document()
+
+        # FIXME: currently strings with formatted errors are collected
+        # can't check error locations yet
+        self.assertEqual(len(parser.reportedErrors), 1, parser.reportedErrors)
+        self.assertEqual(parser.events, [('call', 'gnarz'), ('call', 'flupp')])
+            
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t015calc.g b/runtime/Python3/tests/t015calc.g
new file mode 100644
index 0000000..54f17ec
--- /dev/null
+++ b/runtime/Python3/tests/t015calc.g
@@ -0,0 +1,54 @@
+grammar t015calc;
+options {
+  language = Python3;
+}
+
+@header {
+import math
+}
+
+@parser::init {
+self.reportedErrors = []
+}
+
+@parser::members {
+def emitErrorMessage(self, msg):
+    self.reportedErrors.append(msg)
+}
+
+evaluate returns [result]: r=expression {result = r};
+
+expression returns [result]: r=mult (
+    '+' r2=mult {r += r2}
+  | '-' r2=mult {r -= r2}
+  )* {result = r};
+
+mult returns [result]: r=log (
+    '*' r2=log {r *= r2}
+  | '/' r2=log {r /= r2}
+//  | '%' r2=log {r %= r2}
+  )* {result = r};
+
+log returns [result]: 'ln' r=exp {result = math.log(r)}
+    | r=exp {result = r}
+    ;
+
+exp returns [result]: r=atom ('^' r2=atom {r = math.pow(r,r2)} )? {result = r}
+    ;
+
+atom returns [result]:
+    n=INTEGER {result = int($n.text)}
+  | n=DECIMAL {result = float($n.text)} 
+  | '(' r=expression {result = r} ')'
+  | 'PI' {result = math.pi}
+  | 'E' {result = math.e}
+  ;
+
+INTEGER: DIGIT+;
+
+DECIMAL: DIGIT+ '.' DIGIT+;
+
+fragment
+DIGIT: '0'..'9';
+
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN};
diff --git a/runtime/Python3/tests/t015calc.py b/runtime/Python3/tests/t015calc.py
new file mode 100644
index 0000000..a7a5639
--- /dev/null
+++ b/runtime/Python3/tests/t015calc.py
@@ -0,0 +1,47 @@
+import antlr3
+import testbase
+import unittest
+
+class t015calc(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+
+
+    def _evaluate(self, expr, expected, errors=[]):
+        cStream = antlr3.StringStream(expr)
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        result = parser.evaluate()
+        self.assertEqual(result, expected)
+        self.assertEqual(len(parser.reportedErrors), len(errors),
+                         parser.reportedErrors)
+
+
+    def testValid01(self):
+        self._evaluate("1 + 2", 3)
+
+
+    def testValid02(self):
+        self._evaluate("1 + 2 * 3", 7)
+
+
+    def testValid03(self):
+        self._evaluate("10 / 2", 5)
+
+
+    def testValid04(self):
+        self._evaluate("6 + 2*(3+1) - 4", 10)
+
+
+    def testMalformedInput(self):
+        self._evaluate("6 - (2*1", 4, ["mismatched token at pos 8"])
+        
+    # FIXME: most parse errors result in TypeErrors in action code, because
+    # rules return None, which is then added/multiplied... to integers.
+    # evaluate("6 - foo 2", 4, ["some error"])
+            
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/runtime/Python3/tests/t016actions.g b/runtime/Python3/tests/t016actions.g
new file mode 100644
index 0000000..f6def13
--- /dev/null
+++ b/runtime/Python3/tests/t016actions.g
@@ -0,0 +1,31 @@
+grammar t016actions;
+options {
+  language = Python3;
+}
+
+declaration returns [name]
+    :   functionHeader ';'
+        {$name = $functionHeader.name}
+    ;
+
+functionHeader returns [name]
+    :   type ID
+	{$name = $ID.text}
+    ;
+
+type
+    :   'int'   
+    |   'char'  
+    |   'void'
+    ;
+
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+    ;
+
+WS  :   (   ' '
+        |   '\t'
+        |   '\r'
+        |   '\n'
+        )+
+        {$channel=HIDDEN}
+    ;    
diff --git a/runtime/Python3/tests/t016actions.py b/runtime/Python3/tests/t016actions.py
new file mode 100644
index 0000000..60ea53a
--- /dev/null
+++ b/runtime/Python3/tests/t016actions.py
@@ -0,0 +1,20 @@
+import antlr3
+import testbase
+import unittest
+
+class t016actions(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def testValid(self):
+        cStream = antlr3.StringStream("int foo;")
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        name = parser.declaration()
+        self.assertEqual(name, 'foo')
+            
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t017parser.g b/runtime/Python3/tests/t017parser.g
new file mode 100644
index 0000000..20b4724
--- /dev/null
+++ b/runtime/Python3/tests/t017parser.g
@@ -0,0 +1,91 @@
+grammar t017parser;
+
+options {
+    language = Python3;
+}
+
+program
+    :   declaration+
+    ;
+
+declaration
+    :   variable
+    |   functionHeader ';'
+    |   functionHeader block
+    ;
+
+variable
+    :   type declarator ';'
+    ;
+
+declarator
+    :   ID 
+    ;
+
+functionHeader
+    :   type ID '(' ( formalParameter ( ',' formalParameter )* )? ')'
+    ;
+
+formalParameter
+    :   type declarator        
+    ;
+
+type
+    :   'int'   
+    |   'char'  
+    |   'void'
+    |   ID        
+    ;
+
+block
+    :   '{'
+            variable*
+            stat*
+        '}'
+    ;
+
+stat: forStat
+    | expr ';'      
+    | block
+    | assignStat ';'
+    | ';'
+    ;
+
+forStat
+    :   'for' '(' assignStat ';' expr ';' assignStat ')' block        
+    ;
+
+assignStat
+    :   ID '=' expr        
+    ;
+
+expr:   condExpr
+    ;
+
+condExpr
+    :   aexpr ( ('==' | '<') aexpr )?
+    ;
+
+aexpr
+    :   atom ( '+' atom )*
+    ;
+
+atom
+    : ID      
+    | INT      
+    | '(' expr ')'
+    ; 
+
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+    ;
+
+INT :	('0'..'9')+
+    ;
+
+WS  :   (   ' '
+        |   '\t'
+        |   '\r'
+        |   '\n'
+        )+
+        {$channel=HIDDEN}
+    ;    
diff --git a/runtime/Python3/tests/t017parser.py b/runtime/Python3/tests/t017parser.py
new file mode 100644
index 0000000..3add2ad
--- /dev/null
+++ b/runtime/Python3/tests/t017parser.py
@@ -0,0 +1,58 @@
+import antlr3
+import testbase
+import unittest
+
+class t017parser(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+    def parserClass(self, base):
+        class TestParser(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self.reportedErrors = []
+        
+
+            def emitErrorMessage(self, msg):
+                self.reportedErrors.append(msg)
+                
+        return TestParser
+
+
+    def testValid(self):
+        cStream = antlr3.StringStream("int foo;")
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.program()
+
+        self.assertEqual(parser.reportedErrors, [])
+
+
+    def testMalformedInput1(self):
+        cStream = antlr3.StringStream('int foo() { 1+2 }')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.program()
+
+        # FIXME: currently strings with formatted errors are collected
+        # can't check error locations yet
+        self.assertEqual(len(parser.reportedErrors), 1, parser.reportedErrors)
+
+
+    def testMalformedInput2(self):
+        cStream = antlr3.StringStream('int foo() { 1+; 1+2 }')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.program()
+
+        # FIXME: currently strings with formatted errors are collected
+        # can't check error locations yet
+        self.assertEqual(len(parser.reportedErrors), 2, parser.reportedErrors)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t018llstar.g b/runtime/Python3/tests/t018llstar.g
new file mode 100644
index 0000000..40d8857
--- /dev/null
+++ b/runtime/Python3/tests/t018llstar.g
@@ -0,0 +1,111 @@
+grammar t018llstar;
+
+options {
+    language = Python3;
+}
+
+@header {
+from io import StringIO
+}
+
+@init {
+self.output = StringIO()
+}
+
+program
+    :   declaration+
+    ;
+
+/** In this rule, the functionHeader left prefix on the last two
+ *  alternatives is not LL(k) for a fixed k.  However, it is
+ *  LL(*).  The LL(*) algorithm simply scans ahead until it sees
+ *  either the ';' or the '{' of the block and then it picks
+ *  the appropriate alternative.  Lookhead can be arbitrarily
+ *  long in theory, but is <=10 in most cases.  Works great.
+ *  Use ANTLRWorks to see the lookahead use (step by Location)
+ *  and look for blue tokens in the input window pane. :)
+ */
+declaration
+    :   variable
+    |   functionHeader ';'
+	{self.output.write($functionHeader.name+" is a declaration\n")}
+    |   functionHeader block
+	{self.output.write($functionHeader.name+" is a definition\n")}
+    ;
+
+variable
+    :   type declarator ';'
+    ;
+
+declarator
+    :   ID 
+    ;
+
+functionHeader returns [name]
+    :   type ID '(' ( formalParameter ( ',' formalParameter )* )? ')'
+	{$name = $ID.text}
+    ;
+
+formalParameter
+    :   type declarator        
+    ;
+
+type
+    :   'int'   
+    |   'char'  
+    |   'void'
+    |   ID        
+    ;
+
+block
+    :   '{'
+            variable*
+            stat*
+        '}'
+    ;
+
+stat: forStat
+    | expr ';'      
+    | block
+    | assignStat ';'
+    | ';'
+    ;
+
+forStat
+    :   'for' '(' assignStat ';' expr ';' assignStat ')' block        
+    ;
+
+assignStat
+    :   ID '=' expr        
+    ;
+
+expr:   condExpr
+    ;
+
+condExpr
+    :   aexpr ( ('==' | '<') aexpr )?
+    ;
+
+aexpr
+    :   atom ( '+' atom )*
+    ;
+
+atom
+    : ID      
+    | INT      
+    | '(' expr ')'
+    ; 
+
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+    ;
+
+INT :	('0'..'9')+
+    ;
+
+WS  :   (   ' '
+        |   '\t'
+        |   '\r'
+        |   '\n'
+        )+
+        {$channel=HIDDEN}
+    ;    
diff --git a/antlr-3.4/runtime/Python/tests/t018llstar.input b/runtime/Python3/tests/t018llstar.input
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t018llstar.input
copy to runtime/Python3/tests/t018llstar.input
diff --git a/antlr-3.4/runtime/Python/tests/t018llstar.output b/runtime/Python3/tests/t018llstar.output
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t018llstar.output
copy to runtime/Python3/tests/t018llstar.output
diff --git a/runtime/Python3/tests/t018llstar.py b/runtime/Python3/tests/t018llstar.py
new file mode 100644
index 0000000..9cc3e22
--- /dev/null
+++ b/runtime/Python3/tests/t018llstar.py
@@ -0,0 +1,31 @@
+import antlr3
+import testbase
+import unittest
+import os
+import sys
+from io import StringIO
+
+class t018llstar(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def testValid(self):
+        inputPath = os.path.splitext(__file__)[0] + '.input'
+        with open(inputPath) as f:
+            cStream = antlr3.StringStream(f.read())
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.program()
+
+        output = parser.output.getvalue()
+
+        outputPath = os.path.splitext(__file__)[0] + '.output'
+        with open(outputPath) as f:
+            testOutput = f.read()
+
+        self.assertEqual(output, testOutput)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t019lexer.g b/runtime/Python3/tests/t019lexer.g
new file mode 100644
index 0000000..0b986a0
--- /dev/null
+++ b/runtime/Python3/tests/t019lexer.g
@@ -0,0 +1,64 @@
+lexer grammar t019lexer;
+options {
+    language=Python3;
+    filter=true;
+}
+
+IMPORT
+	:	'import' WS name=QIDStar WS? ';'
+	;
+	
+/** Avoids having "return foo;" match as a field */
+RETURN
+	:	'return' (options {greedy=false;}:.)* ';'
+	;
+
+CLASS
+	:	'class' WS name=ID WS? ('extends' WS QID WS?)?
+		('implements' WS QID WS? (',' WS? QID WS?)*)? '{'
+	;
+	
+COMMENT
+    :   '/*' (options {greedy=false;} : . )* '*/'
+    ;
+
+STRING
+    :	'"' (options {greedy=false;}: ESC | .)* '"'
+	;
+
+CHAR
+	:	'\'' (options {greedy=false;}: ESC | .)* '\''
+	;
+
+WS  :   (' '|'\t'|'\n')+
+    ;
+
+fragment
+QID :	ID ('.' ID)*
+	;
+	
+/** QID cannot see beyond end of token so using QID '.*'? somewhere won't
+ *  ever match since k=1 lookahead in the QID loop of '.' will make it loop.
+ *  I made this rule to compensate.
+ */
+fragment
+QIDStar
+	:	ID ('.' ID)* '.*'?
+	;
+
+fragment
+TYPE:   QID '[]'?
+    ;
+    
+fragment
+ARG :   TYPE WS ID
+    ;
+
+fragment
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+    ;
+
+fragment
+ESC	:	'\\' ('"'|'\''|'\\')
+	;
+
diff --git a/antlr-3.4/runtime/Python/tests/t019lexer.input b/runtime/Python3/tests/t019lexer.input
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t019lexer.input
copy to runtime/Python3/tests/t019lexer.input
diff --git a/runtime/Python3/tests/t019lexer.py b/runtime/Python3/tests/t019lexer.py
new file mode 100644
index 0000000..90c4fbb
--- /dev/null
+++ b/runtime/Python3/tests/t019lexer.py
@@ -0,0 +1,23 @@
+import os
+import antlr3
+import testbase
+import unittest
+
+class t019lexer(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def testValid(self):
+        inputPath = os.path.splitext(__file__)[0] + '.input'
+        with open(inputPath) as f:
+            stream = antlr3.StringStream(f.read())
+        lexer = self.getLexer(stream)
+
+        while True:
+            token = lexer.nextToken()
+            if token.type == antlr3.EOF:
+                break
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/antlr-3.4/runtime/Python/tests/t020fuzzy.input b/runtime/Python3/tests/t020fuzzy.input
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t020fuzzy.input
copy to runtime/Python3/tests/t020fuzzy.input
diff --git a/antlr-3.4/runtime/Python/tests/t020fuzzy.output b/runtime/Python3/tests/t020fuzzy.output
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t020fuzzy.output
copy to runtime/Python3/tests/t020fuzzy.output
diff --git a/runtime/Python3/tests/t020fuzzy.py b/runtime/Python3/tests/t020fuzzy.py
new file mode 100644
index 0000000..e43a12f
--- /dev/null
+++ b/runtime/Python3/tests/t020fuzzy.py
@@ -0,0 +1,35 @@
+import os
+import sys
+import antlr3
+import testbase
+import unittest
+from io import StringIO
+
+class t020fuzzy(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar('t020fuzzyLexer.g')
+        
+
+    def testValid(self):
+        inputPath = os.path.splitext(__file__)[0] + '.input'
+        with open(inputPath) as f:
+            stream = antlr3.StringStream(f.read())
+        lexer = self.getLexer(stream)
+
+        while True:
+            token = lexer.nextToken()
+            if token.type == antlr3.EOF:
+                break
+
+
+        output = lexer.output.getvalue()
+
+        outputPath = os.path.splitext(__file__)[0] + '.output'
+        with open(outputPath) as f:
+            testOutput = f.read()
+
+        self.assertEqual(output, testOutput)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t020fuzzyLexer.g b/runtime/Python3/tests/t020fuzzyLexer.g
new file mode 100644
index 0000000..f2aeaf5
--- /dev/null
+++ b/runtime/Python3/tests/t020fuzzyLexer.g
@@ -0,0 +1,96 @@
+lexer grammar t020fuzzyLexer;
+options {
+    language=Python3;
+    filter=true;
+}
+
+@header {
+from io import StringIO
+}
+
+@init {
+self.output = StringIO()
+}
+
+IMPORT
+	:	'import' WS name=QIDStar WS? ';'
+	;
+	
+/** Avoids having "return foo;" match as a field */
+RETURN
+	:	'return' (options {greedy=false;}:.)* ';'
+	;
+
+CLASS
+	:	'class' WS name=ID WS? ('extends' WS QID WS?)?
+		('implements' WS QID WS? (',' WS? QID WS?)*)? '{'
+        {self.output.write("found class "+$name.text+"\n")}
+	;
+	
+METHOD
+    :   TYPE WS name=ID WS? '(' ( ARG WS? (',' WS? ARG WS?)* )? ')' WS? 
+       ('throws' WS QID WS? (',' WS? QID WS?)*)? '{'
+        {self.output.write("found method "+$name.text+"\n");}
+    ;
+
+FIELD
+    :   TYPE WS name=ID '[]'? WS? (';'|'=')
+        {self.output.write("found var "+$name.text+"\n");}
+    ;
+
+STAT:	('if'|'while'|'switch'|'for') WS? '(' ;
+	
+CALL
+    :   name=QID WS? '('
+        {self.output.write("found call "+$name.text+"\n");}
+    ;
+
+COMMENT
+    :   '/*' (options {greedy=false;} : . )* '*/'
+        {self.output.write("found comment "+self.getText()+"\n");}
+    ;
+
+SL_COMMENT
+    :   '//' (options {greedy=false;} : . )* '\n'
+        {self.output.write("found // comment "+self.getText()+"\n");}
+    ;
+	
+STRING
+	:	'"' (options {greedy=false;}: ESC | .)* '"'
+	;
+
+CHAR
+	:	'\'' (options {greedy=false;}: ESC | .)* '\''
+	;
+
+WS  :   (' '|'\t'|'\n')+
+    ;
+
+fragment
+QID :	ID ('.' ID)*
+	;
+	
+/** QID cannot see beyond end of token so using QID '.*'? somewhere won't
+ *  ever match since k=1 lookahead in the QID loop of '.' will make it loop.
+ *  I made this rule to compensate.
+ */
+fragment
+QIDStar
+	:	ID ('.' ID)* '.*'?
+	;
+
+fragment
+TYPE:   QID '[]'?
+    ;
+    
+fragment
+ARG :   TYPE WS ID
+    ;
+
+fragment
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+    ;
+
+fragment
+ESC	:	'\\' ('"'|'\''|'\\')
+	;
diff --git a/runtime/Python3/tests/t021hoist.g b/runtime/Python3/tests/t021hoist.g
new file mode 100644
index 0000000..4b33c4f
--- /dev/null
+++ b/runtime/Python3/tests/t021hoist.g
@@ -0,0 +1,37 @@
+grammar t021hoist;
+options {
+    language=Python3;
+}
+
+/* With this true, enum is seen as a keyword.  False, it's an identifier */
+@parser::init {
+self.enableEnum = False
+}
+
+stat returns [enumIs]
+    : identifier    {enumIs = "ID"}
+    | enumAsKeyword {enumIs = "keyword"}
+    ;
+
+identifier
+    : ID
+    | enumAsID
+    ;
+
+enumAsKeyword : {self.enableEnum}? 'enum' ;
+
+enumAsID : {not self.enableEnum}? 'enum' ;
+
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+    ;
+
+INT :	('0'..'9')+
+    ;
+
+WS  :   (   ' '
+        |   '\t'
+        |   '\r'
+        |   '\n'
+        )+
+        {$channel=HIDDEN}
+    ;    
diff --git a/runtime/Python3/tests/t021hoist.py b/runtime/Python3/tests/t021hoist.py
new file mode 100644
index 0000000..571a1de
--- /dev/null
+++ b/runtime/Python3/tests/t021hoist.py
@@ -0,0 +1,38 @@
+import os
+import sys
+import antlr3
+import testbase
+import unittest
+
+
+class t021hoist(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def testValid1(self):
+        cStream = antlr3.StringStream('enum')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.enableEnum = True
+        enumIs = parser.stat()
+
+        self.assertEqual(enumIs, 'keyword')
+
+
+    def testValid2(self):
+        cStream = antlr3.StringStream('enum')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.enableEnum = False
+        enumIs = parser.stat()
+
+        self.assertEqual(enumIs, 'ID')
+
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/runtime/Python3/tests/t022scopes.g b/runtime/Python3/tests/t022scopes.g
new file mode 100644
index 0000000..fffeac5
--- /dev/null
+++ b/runtime/Python3/tests/t022scopes.g
@@ -0,0 +1,127 @@
+grammar t022scopes;
+
+options {
+    language=Python3;
+}
+
+/* global scopes */
+
+scope aScope {
+names
+}
+
+a
+scope aScope;
+    :   {$aScope::names = [];} ID*
+    ;
+
+
+/* rule scopes, from the book, final beta, p.147 */
+
+b[v]
+scope {x}
+    : {$b::x = v;} b2
+    ;
+
+b2
+    : b3
+    ;
+
+b3 
+    : {$b::x}?=> ID // only visible, if b was called with True
+    | NUM
+    ;
+
+
+/* rule scopes, from the book, final beta, p.148 */
+
+c returns [res]
+scope {
+    symbols
+}
+@init {
+    $c::symbols = set();
+}
+    : '{' c1* c2+ '}'
+        { $res = $c::symbols; }
+    ;
+
+c1
+    : 'int' ID {$c::symbols.add($ID.text)} ';'
+    ;
+
+c2
+    : ID '=' NUM ';'
+        {
+            if $ID.text not in $c::symbols:
+                raise RuntimeError($ID.text)
+        }
+    ;
+
+/* recursive rule scopes, from the book, final beta, p.150 */
+
+d returns [res]
+scope {
+    symbols
+}
+@init {
+    $d::symbols = set();
+}
+    : '{' d1* d2* '}'
+        { $res = $d::symbols; }
+    ;
+
+d1
+    : 'int' ID {$d::symbols.add($ID.text)} ';'
+    ;
+
+d2
+    : ID '=' NUM ';'
+        {
+            for s in reversed(range(len($d))):
+                if $ID.text in $d[s]::symbols:
+                    break
+            else:
+                raise RuntimeError($ID.text)
+        }
+    | d
+    ;
+
+/* recursive rule scopes, access bottom-most scope */
+
+e returns [res]
+scope {
+    a
+}
+@after {
+    $res = $e::a;
+}
+    : NUM { $e[0]::a = int($NUM.text); }
+    | '{' e '}'
+    ;
+
+
+/* recursive rule scopes, access with negative index */
+
+f returns [res]
+scope {
+    a
+}
+@after {
+    $res = $f::a;
+}
+    : NUM { $f[-2]::a = int($NUM.text); }
+    | '{' f '}'
+    ;
+
+
+/* tokens */
+
+ID  :   ('a'..'z')+
+    ;
+
+NUM :   ('0'..'9')+
+    ;
+
+WS  :   (' '|'\n'|'\r')+ {$channel=HIDDEN}
+    ;
diff --git a/runtime/Python3/tests/t022scopes.py b/runtime/Python3/tests/t022scopes.py
new file mode 100644
index 0000000..5dc1f2c
--- /dev/null
+++ b/runtime/Python3/tests/t022scopes.py
@@ -0,0 +1,159 @@
+import antlr3
+import testbase
+import unittest
+import textwrap
+
+
+class t022scopes(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def parserClass(self, base):
+        class TParser(base):
+            def emitErrorMessage(self, msg):
+                # report errors to /dev/null
+                pass
+
+            def reportError(self, re):
+                # no error recovery yet, just crash!
+                raise re
+
+        return TParser
+
+        
+    def testa1(self):
+        cStream = antlr3.StringStream('foobar')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.a()
+        
+
+    def testb1(self):
+        cStream = antlr3.StringStream('foobar')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+
+        self.assertRaises(antlr3.RecognitionException, parser.b, False)
+        
+
+    def testb2(self):
+        cStream = antlr3.StringStream('foobar')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.b(True)
+        
+
+    def testc1(self):
+        cStream = antlr3.StringStream(
+            textwrap.dedent('''\
+            {
+                int i;
+                int j;
+                i = 0;
+            }
+            '''))
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        symbols = parser.c()
+
+        self.assertEqual(
+            symbols,
+            set(['i', 'j'])
+            )
+        
+
+    def testc2(self):
+        cStream = antlr3.StringStream(
+            textwrap.dedent('''\
+            {
+                int i;
+                int j;
+                i = 0;
+                x = 4;
+            }
+            '''))
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+
+        self.assertRaisesRegex(RuntimeError, r'x', parser.c)
+
+
+    def testd1(self):
+        cStream = antlr3.StringStream(
+            textwrap.dedent('''\
+            {
+                int i;
+                int j;
+                i = 0;
+                {
+                    int i;
+                    int x;
+                    x = 5;
+                }
+            }
+            '''))
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        symbols = parser.d()
+
+        self.assertEqual(
+            symbols,
+            set(['i', 'j'])
+            )
+
+
+    def teste1(self):
+        cStream = antlr3.StringStream(
+            textwrap.dedent('''\
+            { { { { 12 } } } }
+            '''))
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        res = parser.e()
+
+        self.assertEqual(res, 12)
+
+
+    def testf1(self):
+        cStream = antlr3.StringStream(
+            textwrap.dedent('''\
+            { { { { 12 } } } }
+            '''))
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        res = parser.f()
+
+        self.assertIsNone(res)
+
+
+    def testf2(self):
+        cStream = antlr3.StringStream(
+            textwrap.dedent('''\
+            { { 12 } }
+            '''))
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        res = parser.f()
+
+        self.assertIsNone(res)
+
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t023scopes.g b/runtime/Python3/tests/t023scopes.g
new file mode 100644
index 0000000..bc94b8d
--- /dev/null
+++ b/runtime/Python3/tests/t023scopes.g
@@ -0,0 +1,18 @@
+grammar t023scopes;
+
+options {
+    language=Python3;
+}
+
+prog
+scope {
+name
+}
+    :   ID {$prog::name=$ID.text;}
+    ;
+
+ID  :   ('a'..'z')+
+    ;
+
+WS  :   (' '|'\n'|'\r')+ {$channel=HIDDEN}
+    ;
diff --git a/antlr-3.4/runtime/Python/tests/t023scopes.py b/runtime/Python3/tests/t023scopes.py
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t023scopes.py
copy to runtime/Python3/tests/t023scopes.py
diff --git a/runtime/Python3/tests/t024finally.g b/runtime/Python3/tests/t024finally.g
new file mode 100644
index 0000000..a744de3
--- /dev/null
+++ b/runtime/Python3/tests/t024finally.g
@@ -0,0 +1,19 @@
+grammar t024finally;
+
+options {
+    language=Python3;
+}
+
+prog returns [events]
+@init {events = []}
+@after {events.append('after')}
+    :   ID {raise RuntimeError}
+    ;
+    catch [RuntimeError] {events.append('catch')}
+    finally {events.append('finally')}
+
+ID  :   ('a'..'z')+
+    ;
+
+WS  :   (' '|'\n'|'\r')+ {$channel=HIDDEN}
+    ;
diff --git a/runtime/Python3/tests/t024finally.py b/runtime/Python3/tests/t024finally.py
new file mode 100644
index 0000000..24d0b71
--- /dev/null
+++ b/runtime/Python3/tests/t024finally.py
@@ -0,0 +1,23 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t024finally(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def testValid1(self):
+        cStream = antlr3.StringStream('foobar')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        events = parser.prog()
+
+        self.assertEqual(events, ['catch', 'finally'])
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/runtime/Python3/tests/t025lexerRulePropertyRef.g b/runtime/Python3/tests/t025lexerRulePropertyRef.g
new file mode 100644
index 0000000..0509375
--- /dev/null
+++ b/runtime/Python3/tests/t025lexerRulePropertyRef.g
@@ -0,0 +1,18 @@
+lexer grammar t025lexerRulePropertyRef;
+options {
+  language = Python3;
+}
+
+@lexer::init {
+self.properties = []
+}
+
+IDENTIFIER: 
+        ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+        {
+self.properties.append(
+    ($text, $type, $line, $pos, $index, $channel, $start, $stop)
+)
+        }
+    ;
+WS: (' ' | '\n')+;
diff --git a/runtime/Python3/tests/t025lexerRulePropertyRef.py b/runtime/Python3/tests/t025lexerRulePropertyRef.py
new file mode 100644
index 0000000..5b23c25
--- /dev/null
+++ b/runtime/Python3/tests/t025lexerRulePropertyRef.py
@@ -0,0 +1,54 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t025lexerRulePropertyRef(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def testValid1(self):
+        stream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
+        lexer = self.getLexer(stream)
+
+        while True:
+            token = lexer.nextToken()
+            if token.type == antlr3.EOF:
+                break
+
+        self.assertEqual(len(lexer.properties), 3, lexer.properties)
+
+        text, type, line, pos, index, channel, start, stop = lexer.properties[0]
+        self.assertEqual(text, 'foobar', lexer.properties[0])
+        self.assertEqual(type, self.lexerModule.IDENTIFIER, lexer.properties[0])
+        self.assertEqual(line, 1, lexer.properties[0])
+        self.assertEqual(pos, 0, lexer.properties[0])
+        self.assertEqual(index, -1, lexer.properties[0])
+        self.assertEqual(channel, antlr3.DEFAULT_CHANNEL, lexer.properties[0])
+        self.assertEqual(start, 0, lexer.properties[0])
+        self.assertEqual(stop, 5, lexer.properties[0])
+
+        text, type, line, pos, index, channel, start, stop = lexer.properties[1]
+        self.assertEqual(text, '_Ab98', lexer.properties[1])
+        self.assertEqual(type, self.lexerModule.IDENTIFIER, lexer.properties[1])
+        self.assertEqual(line, 1, lexer.properties[1])
+        self.assertEqual(pos, 7, lexer.properties[1])
+        self.assertEqual(index, -1, lexer.properties[1])
+        self.assertEqual(channel, antlr3.DEFAULT_CHANNEL, lexer.properties[1])
+        self.assertEqual(start, 7, lexer.properties[1])
+        self.assertEqual(stop, 11, lexer.properties[1])
+
+        text, type, line, pos, index, channel, start, stop = lexer.properties[2]
+        self.assertEqual(text, 'A12sdf', lexer.properties[2])
+        self.assertEqual(type, self.lexerModule.IDENTIFIER, lexer.properties[2])
+        self.assertEqual(line, 2, lexer.properties[2])
+        self.assertEqual(pos, 1, lexer.properties[2])
+        self.assertEqual(index, -1, lexer.properties[2])
+        self.assertEqual(channel, antlr3.DEFAULT_CHANNEL, lexer.properties[2])
+        self.assertEqual(start, 15, lexer.properties[2])
+        self.assertEqual(stop, 20, lexer.properties[2])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t026actions.g b/runtime/Python3/tests/t026actions.g
new file mode 100644
index 0000000..124be34
--- /dev/null
+++ b/runtime/Python3/tests/t026actions.g
@@ -0,0 +1,39 @@
+grammar t026actions;
+options {
+  language = Python3;
+}
+
+@lexer::init {
+    self.foobar = 'attribute;'
+}
+
+prog
+@init {
+    self.capture('init;')
+}
+@after {
+    self.capture('after;')
+}
+    :   IDENTIFIER EOF
+    ;
+    catch [ RecognitionException as exc ] {
+        self.capture('catch;')
+        raise
+    }
+    finally {
+        self.capture('finally;')
+    }
+
+
+IDENTIFIER
+    : ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+        {
+            # a comment
+          self.capture('action;')
+            self.capture('{!r} {!r} {!r} {!r} {!r} {!r} {!r} {!r};'.format($text, $type, $line, $pos, $index, $channel, $start, $stop))
+            if True:
+                self.capture(self.foobar)
+        }
+    ;
+
+WS: (' ' | '\n')+;
diff --git a/runtime/Python3/tests/t026actions.py b/runtime/Python3/tests/t026actions.py
new file mode 100644
index 0000000..20dc88b
--- /dev/null
+++ b/runtime/Python3/tests/t026actions.py
@@ -0,0 +1,68 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t026actions(testbase.ANTLRTest):
+    def parserClass(self, base):
+        class TParser(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self._errors = []
+                self._output = ""
+
+
+            def capture(self, t):
+                self._output += t
+
+
+            def emitErrorMessage(self, msg):
+                self._errors.append(msg)
+
+            
+        return TParser
+
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self._errors = []
+                self._output = ""
+
+
+            def capture(self, t):
+                self._output += t
+
+
+            def emitErrorMessage(self, msg):
+                self._errors.append(msg)
+
+            
+        return TLexer
+
+
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def testValid1(self):
+        cStream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.prog()
+
+        self.assertEqual(
+            parser._output,
+            'init;after;finally;')
+        self.assertEqual(
+            lexer._output,
+            "action;'foobar' 4 1 0 -1 0 0 5;attribute;action;"
+            "'_Ab98' 4 1 7 -1 0 7 11;attribute;action;"
+            "'A12sdf' 4 2 1 -1 0 15 20;attribute;")
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t027eof.g b/runtime/Python3/tests/t027eof.g
new file mode 100644
index 0000000..5c633a2
--- /dev/null
+++ b/runtime/Python3/tests/t027eof.g
@@ -0,0 +1,8 @@
+lexer grammar t027eof;
+
+options {
+    language=Python3;
+}
+
+END: EOF;
+SPACE: ' ';
diff --git a/runtime/Python3/tests/t027eof.py b/runtime/Python3/tests/t027eof.py
new file mode 100644
index 0000000..cf543b5
--- /dev/null
+++ b/runtime/Python3/tests/t027eof.py
@@ -0,0 +1,25 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t027eof(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    @testbase.broken("That's not how EOF is supposed to be used", Exception)
+    def testValid1(self):
+        cStream = antlr3.StringStream(' ')
+        lexer = self.getLexer(cStream)
+        
+        tok = lexer.nextToken()
+        self.assertEqual(tok.type, self.lexerModule.SPACE, tok)
+        
+        tok = lexer.nextToken()
+        self.assertEqual(tok.type, self.lexerModule.END, tok)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/antlr-3.4/runtime/Python/tests/t028labelExpr.g.disabled b/runtime/Python3/tests/t028labelExpr.g.disabled
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t028labelExpr.g.disabled
copy to runtime/Python3/tests/t028labelExpr.g.disabled
diff --git a/runtime/Python3/tests/t029synpredgate.g b/runtime/Python3/tests/t029synpredgate.g
new file mode 100644
index 0000000..169892a
--- /dev/null
+++ b/runtime/Python3/tests/t029synpredgate.g
@@ -0,0 +1,16 @@
+lexer grammar t029synpredgate;
+options {
+  language = Python3;
+}
+
+FOO
+    : ('ab')=> A
+    | ('ac')=> B
+    ;
+
+fragment
+A: 'a';
+
+fragment
+B: 'a';
+
diff --git a/antlr-3.4/runtime/Python/tests/t029synpredgate.py b/runtime/Python3/tests/t029synpredgate.py
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t029synpredgate.py
copy to runtime/Python3/tests/t029synpredgate.py
diff --git a/runtime/Python3/tests/t030specialStates.g b/runtime/Python3/tests/t030specialStates.g
new file mode 100644
index 0000000..51451c4
--- /dev/null
+++ b/runtime/Python3/tests/t030specialStates.g
@@ -0,0 +1,26 @@
+grammar t030specialStates;
+options {
+  language = Python3;
+}
+
+@init {
+self.cond = True
+}
+
+@members {
+def recover(self, input, re):
+    # no error recovery yet, just crash!
+    raise re
+}
+
+r
+    : ( {self.cond}? NAME
+        | {not self.cond}? NAME WS+ NAME
+        )
+        ( WS+ NAME )?
+        EOF
+    ;
+
+NAME: ('a'..'z') ('a'..'z' | '0'..'9')+;
+NUMBER: ('0'..'9')+;
+WS: ' '+;
diff --git a/antlr-3.4/runtime/Python/tests/t030specialStates.py b/runtime/Python3/tests/t030specialStates.py
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t030specialStates.py
copy to runtime/Python3/tests/t030specialStates.py
diff --git a/runtime/Python3/tests/t031emptyAlt.g b/runtime/Python3/tests/t031emptyAlt.g
new file mode 100644
index 0000000..de7d46e
--- /dev/null
+++ b/runtime/Python3/tests/t031emptyAlt.g
@@ -0,0 +1,16 @@
+grammar t031emptyAlt;
+options {
+  language = Python3;
+}
+
+r
+    : NAME 
+        ( {self.cond}?=> WS+ NAME
+        | 
+        )
+        EOF
+    ;
+
+NAME: ('a'..'z') ('a'..'z' | '0'..'9')+;
+NUMBER: ('0'..'9')+;
+WS: ' '+;
diff --git a/antlr-3.4/runtime/Python/tests/t031emptyAlt.py b/runtime/Python3/tests/t031emptyAlt.py
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t031emptyAlt.py
copy to runtime/Python3/tests/t031emptyAlt.py
diff --git a/runtime/Python3/tests/t032subrulePredict.g b/runtime/Python3/tests/t032subrulePredict.g
new file mode 100644
index 0000000..557f51f
--- /dev/null
+++ b/runtime/Python3/tests/t032subrulePredict.g
@@ -0,0 +1,8 @@
+grammar t032subrulePredict;
+options {
+  language = Python3;
+}
+
+a: 'BEGIN' b WS+ 'END';
+b: ( WS+ 'A' )+;
+WS: ' ';
diff --git a/antlr-3.4/runtime/Python/tests/t032subrulePredict.py b/runtime/Python3/tests/t032subrulePredict.py
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t032subrulePredict.py
copy to runtime/Python3/tests/t032subrulePredict.py
diff --git a/runtime/Python3/tests/t033backtracking.g b/runtime/Python3/tests/t033backtracking.g
new file mode 100644
index 0000000..447fac3
--- /dev/null
+++ b/runtime/Python3/tests/t033backtracking.g
@@ -0,0 +1,515 @@
+grammar t033backtracking;
+options {
+    language=Python3;
+    backtrack=true;
+    memoize=true;
+    k=2;
+}
+
+scope Symbols {
+	types;
+}
+
+@members {
+    def isTypeName(self, name):
+        for scope in reversed(self.Symbols_stack):
+            if name in scope.types:
+                return True
+
+        return False
+
+}
+
+translation_unit
+scope Symbols; // entire file is a scope
+@init {
+  $Symbols::types = set()
+}
+	: external_declaration+
+	;
+
+/** Either a function definition or any other kind of C decl/def.
+ *  The LL(*) analysis algorithm fails to deal with this due to
+ *  recursion in the declarator rules.  I'm putting in a
+ *  manual predicate here so that we don't backtrack over
+ *  the entire function.  Further, you get a better error
+ *  as errors within the function itself don't make it fail
+ *  to predict that it's a function.  Weird errors previously.
+ *  Remember: the goal is to avoid backtrack like the plague
+ *  because it makes debugging, actions, and errors harder.
+ *
+ *  Note that k=1 results in a much smaller predictor for the 
+ *  fixed lookahead; k=2 made a few extra thousand lines. ;)
+ *  I'll have to optimize that in the future.
+ */
+external_declaration
+options {k=1;}
+	: ( declaration_specifiers? declarator declaration* '{' )=> function_definition
+	| declaration
+	;
+
+function_definition
+scope Symbols; // put parameters and locals into same scope for now
+@init {
+  $Symbols::types = set()
+}
+	:	declaration_specifiers? declarator
+// 		(	declaration+ compound_statement	// K&R style
+// 		|	compound_statement				// ANSI style
+// 		)
+	;
+
+declaration
+scope {
+  isTypedef;
+}
+@init {
+  $declaration::isTypedef = False
+}
+	: 'typedef' declaration_specifiers? {$declaration::isTypedef = True}
+	  init_declarator_list ';' // special case, looking for typedef	
+	| declaration_specifiers init_declarator_list? ';'
+	;
+
+declaration_specifiers
+	:   (   storage_class_specifier
+		|   type_specifier
+        |   type_qualifier
+        )+
+	;
+
+init_declarator_list
+	: init_declarator (',' init_declarator)*
+	;
+
+init_declarator
+	: declarator //('=' initializer)?
+	;
+
+storage_class_specifier
+	: 'extern'
+	| 'static'
+	| 'auto'
+	| 'register'
+	;
+
+type_specifier
+	: 'void'
+	| 'char'
+	| 'short'
+	| 'int'
+	| 'long'
+	| 'float'
+	| 'double'
+	| 'signed'
+	| 'unsigned'
+// 	| struct_or_union_specifier
+// 	| enum_specifier
+	| type_id
+	;
+
+type_id
+    :   {self.isTypeName(self.input.LT(1).getText())}? IDENTIFIER
+//    	{System.out.println($IDENTIFIER.text+" is a type");}
+    ;
+
+// struct_or_union_specifier
+// options {k=3;}
+// scope Symbols; // structs are scopes
+// @init {
+//   $Symbols::types = set()
+// }
+// 	: struct_or_union IDENTIFIER? '{' struct_declaration_list '}'
+// 	| struct_or_union IDENTIFIER
+// 	;
+
+// struct_or_union
+// 	: 'struct'
+// 	| 'union'
+// 	;
+
+// struct_declaration_list
+// 	: struct_declaration+
+// 	;
+
+// struct_declaration
+// 	: specifier_qualifier_list struct_declarator_list ';'
+// 	;
+
+// specifier_qualifier_list
+// 	: ( type_qualifier | type_specifier )+
+// 	;
+
+// struct_declarator_list
+// 	: struct_declarator (',' struct_declarator)*
+// 	;
+
+// struct_declarator
+// 	: declarator (':' constant_expression)?
+// 	| ':' constant_expression
+// 	;
+
+// enum_specifier
+// options {k=3;}
+// 	: 'enum' '{' enumerator_list '}'
+// 	| 'enum' IDENTIFIER '{' enumerator_list '}'
+// 	| 'enum' IDENTIFIER
+// 	;
+
+// enumerator_list
+// 	: enumerator (',' enumerator)*
+// 	;
+
+// enumerator
+// 	: IDENTIFIER ('=' constant_expression)?
+// 	;
+
+type_qualifier
+	: 'const'
+	| 'volatile'
+	;
+
+declarator
+	: pointer? direct_declarator
+	| pointer
+	;
+
+direct_declarator
+	:   (	IDENTIFIER
+			{
+			if $declaration and $declaration::isTypedef:
+				$Symbols::types.add($IDENTIFIER.text)
+				print("define type "+$IDENTIFIER.text)
+			}
+		|	'(' declarator ')'
+		)
+        declarator_suffix*
+	;
+
+declarator_suffix
+	:   /*'[' constant_expression ']'
+    |*/   '[' ']'
+//     |   '(' parameter_type_list ')'
+//     |   '(' identifier_list ')'
+    |   '(' ')'
+	;
+
+pointer
+	: '*' type_qualifier+ pointer?
+	| '*' pointer
+	| '*'
+	;
+
+// parameter_type_list
+// 	: parameter_list (',' '...')?
+// 	;
+
+// parameter_list
+// 	: parameter_declaration (',' parameter_declaration)*
+// 	;
+
+// parameter_declaration
+// 	: declaration_specifiers (declarator|abstract_declarator)*
+// 	;
+
+// identifier_list
+// 	: IDENTIFIER (',' IDENTIFIER)*
+// 	;
+
+// type_name
+// 	: specifier_qualifier_list abstract_declarator?
+// 	;
+
+// abstract_declarator
+// 	: pointer direct_abstract_declarator?
+// 	| direct_abstract_declarator
+// 	;
+
+// direct_abstract_declarator
+// 	:	( '(' abstract_declarator ')' | abstract_declarator_suffix ) abstract_declarator_suffix*
+// 	;
+
+// abstract_declarator_suffix
+// 	:	'[' ']'
+// 	|	'[' constant_expression ']'
+// 	|	'(' ')'
+// 	|	'(' parameter_type_list ')'
+// 	;
+	
+// initializer
+// 	: assignment_expression
+// 	| '{' initializer_list ','? '}'
+// 	;
+
+// initializer_list
+// 	: initializer (',' initializer)*
+// 	;
+
+// // E x p r e s s i o n s
+
+// argument_expression_list
+// 	:   assignment_expression (',' assignment_expression)*
+// 	;
+
+// additive_expression
+// 	: (multiplicative_expression) ('+' multiplicative_expression | '-' multiplicative_expression)*
+// 	;
+
+// multiplicative_expression
+// 	: (cast_expression) ('*' cast_expression | '/' cast_expression | '%' cast_expression)*
+// 	;
+
+// cast_expression
+// 	: '(' type_name ')' cast_expression
+// 	| unary_expression
+// 	;
+
+// unary_expression
+// 	: postfix_expression
+// 	| '++' unary_expression
+// 	| '--' unary_expression
+// 	| unary_operator cast_expression
+// 	| 'sizeof' unary_expression
+// 	| 'sizeof' '(' type_name ')'
+// 	;
+
+// postfix_expression
+// 	:   primary_expression
+//         (   '[' expression ']'
+//         |   '(' ')'
+//         |   '(' argument_expression_list ')'
+//         |   '.' IDENTIFIER
+//         |   '*' IDENTIFIER
+//         |   '->' IDENTIFIER
+//         |   '++'
+//         |   '--'
+//         )*
+// 	;
+
+// unary_operator
+// 	: '&'
+// 	| '*'
+// 	| '+'
+// 	| '-'
+// 	| '~'
+// 	| '!'
+// 	;
+
+// primary_expression
+// 	: IDENTIFIER
+// 	| constant
+// 	| '(' expression ')'
+// 	;
+
+// constant
+//     :   HEX_LITERAL
+//     |   OCTAL_LITERAL
+//     |   DECIMAL_LITERAL
+//     |	CHARACTER_LITERAL
+// 	|	STRING_LITERAL
+//     |   FLOATING_POINT_LITERAL
+//     ;
+
+// /////
+
+// expression
+// 	: assignment_expression (',' assignment_expression)*
+// 	;
+
+// constant_expression
+// 	: conditional_expression
+// 	;
+
+// assignment_expression
+// 	: lvalue assignment_operator assignment_expression
+// 	| conditional_expression
+// 	;
+	
+// lvalue
+// 	:	unary_expression
+// 	;
+
+// assignment_operator
+// 	: '='
+// 	| '*='
+// 	| '/='
+// 	| '%='
+// 	| '+='
+// 	| '-='
+// 	| '<<='
+// 	| '>>='
+// 	| '&='
+// 	| '^='
+// 	| '|='
+// 	;
+
+// conditional_expression
+// 	: logical_or_expression ('?' expression ':' conditional_expression)?
+// 	;
+
+// logical_or_expression
+// 	: logical_and_expression ('||' logical_and_expression)*
+// 	;
+
+// logical_and_expression
+// 	: inclusive_or_expression ('&&' inclusive_or_expression)*
+// 	;
+
+// inclusive_or_expression
+// 	: exclusive_or_expression ('|' exclusive_or_expression)*
+// 	;
+
+// exclusive_or_expression
+// 	: and_expression ('^' and_expression)*
+// 	;
+
+// and_expression
+// 	: equality_expression ('&' equality_expression)*
+// 	;
+// equality_expression
+// 	: relational_expression (('=='|'!=') relational_expression)*
+// 	;
+
+// relational_expression
+// 	: shift_expression (('<'|'>'|'<='|'>=') shift_expression)*
+// 	;
+
+// shift_expression
+// 	: additive_expression (('<<'|'>>') additive_expression)*
+// 	;
+
+// // S t a t e m e n t s
+
+// statement
+// 	: labeled_statement
+// 	| compound_statement
+// 	| expression_statement
+// 	| selection_statement
+// 	| iteration_statement
+// 	| jump_statement
+// 	;
+
+// labeled_statement
+// 	: IDENTIFIER ':' statement
+// 	| 'case' constant_expression ':' statement
+// 	| 'default' ':' statement
+// 	;
+
+// compound_statement
+// scope Symbols; // blocks have a scope of symbols
+// @init {
+//   $Symbols::types = {}
+// }
+// 	: '{' declaration* statement_list? '}'
+// 	;
+
+// statement_list
+// 	: statement+
+// 	;
+
+// expression_statement
+// 	: ';'
+// 	| expression ';'
+// 	;
+
+// selection_statement
+// 	: 'if' '(' expression ')' statement (options {k=1; backtrack=false;}:'else' statement)?
+// 	| 'switch' '(' expression ')' statement
+// 	;
+
+// iteration_statement
+// 	: 'while' '(' expression ')' statement
+// 	| 'do' statement 'while' '(' expression ')' ';'
+// 	| 'for' '(' expression_statement expression_statement expression? ')' statement
+// 	;
+
+// jump_statement
+// 	: 'goto' IDENTIFIER ';'
+// 	| 'continue' ';'
+// 	| 'break' ';'
+// 	| 'return' ';'
+// 	| 'return' expression ';'
+// 	;
+
+IDENTIFIER
+	:	LETTER (LETTER|'0'..'9')*
+	;
+	
+fragment
+LETTER
+	:	'$'
+	|	'A'..'Z'
+	|	'a'..'z'
+	|	'_'
+	;
+
+CHARACTER_LITERAL
+    :   '\'' ( EscapeSequence | ~('\''|'\\') ) '\''
+    ;
+
+STRING_LITERAL
+    :  '"' ( EscapeSequence | ~('\\'|'"') )* '"'
+    ;
+
+HEX_LITERAL : '0' ('x'|'X') HexDigit+ IntegerTypeSuffix? ;
+
+DECIMAL_LITERAL : ('0' | '1'..'9' '0'..'9'*) IntegerTypeSuffix? ;
+
+OCTAL_LITERAL : '0' ('0'..'7')+ IntegerTypeSuffix? ;
+
+fragment
+HexDigit : ('0'..'9'|'a'..'f'|'A'..'F') ;
+
+fragment
+IntegerTypeSuffix
+	:	('u'|'U')? ('l'|'L')
+	|	('u'|'U')  ('l'|'L')?
+	;
+
+FLOATING_POINT_LITERAL
+    :   ('0'..'9')+ '.' ('0'..'9')* Exponent? FloatTypeSuffix?
+    |   '.' ('0'..'9')+ Exponent? FloatTypeSuffix?
+    |   ('0'..'9')+ Exponent FloatTypeSuffix?
+    |   ('0'..'9')+ Exponent? FloatTypeSuffix
+	;
+
+fragment
+Exponent : ('e'|'E') ('+'|'-')? ('0'..'9')+ ;
+
+fragment
+FloatTypeSuffix : ('f'|'F'|'d'|'D') ;
+
+fragment
+EscapeSequence
+    :   '\\' ('b'|'t'|'n'|'f'|'r'|'\"'|'\''|'\\')
+    |   OctalEscape
+    ;
+
+fragment
+OctalEscape
+    :   '\\' ('0'..'3') ('0'..'7') ('0'..'7')
+    |   '\\' ('0'..'7') ('0'..'7')
+    |   '\\' ('0'..'7')
+    ;
+
+fragment
+UnicodeEscape
+    :   '\\' 'u' HexDigit HexDigit HexDigit HexDigit
+    ;
+
+WS  :  (' '|'\r'|'\t'|'\u000C'|'\n') {$channel=HIDDEN;}
+    ;
+
+COMMENT
+    :   '/*' ( options {greedy=false;} : . )* '*/' {$channel=HIDDEN;}
+    ;
+
+LINE_COMMENT
+    : '//' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;}
+    ;
+
+// ignore #line info for now
+LINE_COMMAND 
+    : '#' ~('\n'|'\r')* '\r'? '\n' {$channel=HIDDEN;}
+    ;
+
diff --git a/antlr-3.4/runtime/Python/tests/t033backtracking.py b/runtime/Python3/tests/t033backtracking.py
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t033backtracking.py
copy to runtime/Python3/tests/t033backtracking.py
diff --git a/runtime/Python3/tests/t034tokenLabelPropertyRef.g b/runtime/Python3/tests/t034tokenLabelPropertyRef.g
new file mode 100644
index 0000000..5a0a35e
--- /dev/null
+++ b/runtime/Python3/tests/t034tokenLabelPropertyRef.g
@@ -0,0 +1,30 @@
+grammar t034tokenLabelPropertyRef;
+options {
+  language = Python3;
+}
+
+a: t=A
+        {
+            print($t.text)
+            print($t.type)
+            print($t.line)
+            print($t.pos)
+            print($t.channel)
+            print($t.index)
+            #print($t.tree)
+        }
+    ;
+
+A: 'a'..'z';
+
+WS  :
+        (   ' '
+        |   '\t'
+        |  ( '\n'
+            |	'\r\n'
+            |	'\r'
+            )
+        )+
+        { $channel = HIDDEN }
+    ;    
+
diff --git a/antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.py b/runtime/Python3/tests/t034tokenLabelPropertyRef.py
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t034tokenLabelPropertyRef.py
copy to runtime/Python3/tests/t034tokenLabelPropertyRef.py
diff --git a/runtime/Python3/tests/t035ruleLabelPropertyRef.g b/runtime/Python3/tests/t035ruleLabelPropertyRef.g
new file mode 100644
index 0000000..3725d34
--- /dev/null
+++ b/runtime/Python3/tests/t035ruleLabelPropertyRef.g
@@ -0,0 +1,16 @@
+grammar t035ruleLabelPropertyRef;
+options {
+  language = Python3;
+}
+
+a returns [bla]: t=b
+        {
+            $bla = $t.start, $t.stop, $t.text
+        }
+    ;
+
+b: A+;
+
+A: 'a'..'z';
+
+WS: ' '+  { $channel = HIDDEN };
diff --git a/runtime/Python3/tests/t035ruleLabelPropertyRef.py b/runtime/Python3/tests/t035ruleLabelPropertyRef.py
new file mode 100644
index 0000000..3347801
--- /dev/null
+++ b/runtime/Python3/tests/t035ruleLabelPropertyRef.py
@@ -0,0 +1,47 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t035ruleLabelPropertyRef(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+    
+        
+    def parserClass(self, base):
+        class TParser(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TParser
+    
+        
+    def testValid1(self):
+        cStream = antlr3.StringStream('   a a a a  ')
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        start, stop, text = parser.a()
+
+        # first token of rule b is the 2nd token (counting hidden tokens)
+        self.assertEqual(start.index, 1, start)
+
+        # first token of rule b is the 7th token (counting hidden tokens)
+        self.assertEqual(stop.index, 7, stop)
+
+        self.assertEqual(text, "a a a a")
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t036multipleReturnValues.g b/runtime/Python3/tests/t036multipleReturnValues.g
new file mode 100644
index 0000000..a3fc8a3
--- /dev/null
+++ b/runtime/Python3/tests/t036multipleReturnValues.g
@@ -0,0 +1,25 @@
+grammar t036multipleReturnValues;
+options {
+  language = Python3;
+}
+
+a returns [foo, bar]: A
+        {
+            $foo = "foo";
+            $bar = "bar";
+        }
+    ;
+
+A: 'a'..'z';
+
+WS  :
+        (   ' '
+        |   '\t'
+        |  ( '\n'
+            |	'\r\n'
+            |	'\r'
+            )
+        )+
+        { $channel = HIDDEN }
+    ;    
+
diff --git a/runtime/Python3/tests/t036multipleReturnValues.py b/runtime/Python3/tests/t036multipleReturnValues.py
new file mode 100644
index 0000000..8dd65be
--- /dev/null
+++ b/runtime/Python3/tests/t036multipleReturnValues.py
@@ -0,0 +1,43 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t036multipleReturnValues(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+    
+        
+    def parserClass(self, base):
+        class TParser(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TParser
+    
+        
+    def testValid1(self):
+        cStream = antlr3.StringStream('   a')
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        ret = parser.a()
+        self.assertEqual(ret.foo, 'foo')
+        self.assertEqual(ret.bar, 'bar')
+
+
+if __name__ == '__main__':
+    unittest.main()
+
+
diff --git a/runtime/Python3/tests/t037rulePropertyRef.g b/runtime/Python3/tests/t037rulePropertyRef.g
new file mode 100644
index 0000000..2069db1
--- /dev/null
+++ b/runtime/Python3/tests/t037rulePropertyRef.g
@@ -0,0 +1,15 @@
+grammar t037rulePropertyRef;
+options {
+  language = Python3;
+}
+
+a returns [bla]
+@after {
+    $bla = $start, $stop, $text
+}
+    : A+
+    ;
+
+A: 'a'..'z';
+
+WS: ' '+  { $channel = HIDDEN };
diff --git a/runtime/Python3/tests/t037rulePropertyRef.py b/runtime/Python3/tests/t037rulePropertyRef.py
new file mode 100644
index 0000000..bba4f3c
--- /dev/null
+++ b/runtime/Python3/tests/t037rulePropertyRef.py
@@ -0,0 +1,47 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t037rulePropertyRef(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+    
+        
+    def parserClass(self, base):
+        class TParser(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TParser
+    
+        
+    def testValid1(self):
+        cStream = antlr3.StringStream('   a a a a  ')
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        start, stop, text = parser.a().bla
+
+        # first token of rule b is the 2nd token (counting hidden tokens)
+        self.assertEqual(start.index, 1, start)
+        
+        # first token of rule b is the 7th token (counting hidden tokens)
+        self.assertEqual(stop.index, 7, stop)
+
+        self.assertEqual(text, "a a a a")
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t038lexerRuleLabel.g b/runtime/Python3/tests/t038lexerRuleLabel.g
new file mode 100644
index 0000000..8a6967d
--- /dev/null
+++ b/runtime/Python3/tests/t038lexerRuleLabel.g
@@ -0,0 +1,28 @@
+lexer grammar t038lexerRuleLabel;
+options {
+  language = Python3;
+}
+
+A: 'a'..'z' WS '0'..'9'
+        {
+            print($WS)
+            print($WS.type)
+            print($WS.line)
+            print($WS.pos)
+            print($WS.channel)
+            print($WS.index)
+            print($WS.text)
+        }
+    ;
+
+fragment WS  :
+        (   ' '
+        |   '\t'
+        |  ( '\n'
+            |	'\r\n'
+            |	'\r'
+            )
+        )+
+        { $channel = HIDDEN }
+    ;    
+
diff --git a/runtime/Python3/tests/t038lexerRuleLabel.py b/runtime/Python3/tests/t038lexerRuleLabel.py
new file mode 100644
index 0000000..7b2e55a
--- /dev/null
+++ b/runtime/Python3/tests/t038lexerRuleLabel.py
@@ -0,0 +1,33 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t038lexerRuleLabel(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+    
+        
+    def testValid1(self):
+        cStream = antlr3.StringStream('a  2')
+
+        lexer = self.getLexer(cStream)
+
+        while True:
+            t = lexer.nextToken()
+            if t.type == antlr3.EOF:
+                break
+            print(t)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t039labels.g b/runtime/Python3/tests/t039labels.g
new file mode 100644
index 0000000..12aa649
--- /dev/null
+++ b/runtime/Python3/tests/t039labels.g
@@ -0,0 +1,18 @@
+grammar t039labels;
+options {
+  language = Python3;
+}
+
+a returns [l]
+    : ids+=A ( ',' ids+=(A|B) )* C D w=. ids+=. F EOF
+        { l = ($ids, $w) }
+    ;
+
+A: 'a'..'z';
+B: '0'..'9';
+C: a='A' { print($a) };
+D: a='FOOBAR' { print($a) };
+E: 'GNU' a=. { print($a) };
+F: 'BLARZ' a=EOF { print($a) };
+
+WS: ' '+  { $channel = HIDDEN };
diff --git a/runtime/Python3/tests/t039labels.py b/runtime/Python3/tests/t039labels.py
new file mode 100644
index 0000000..9744017
--- /dev/null
+++ b/runtime/Python3/tests/t039labels.py
@@ -0,0 +1,53 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t039labels(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+    
+        
+    def parserClass(self, base):
+        class TParser(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TParser
+    
+        
+    def testValid1(self):
+        cStream = antlr3.StringStream(
+            'a, b, c, 1, 2 A FOOBAR GNU1 A BLARZ'
+            )
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        ids, w = parser.a()
+
+        self.assertEqual(len(ids), 6, ids)
+        self.assertEqual(ids[0].text, 'a', ids[0])
+        self.assertEqual(ids[1].text, 'b', ids[1])
+        self.assertEqual(ids[2].text, 'c', ids[2])
+        self.assertEqual(ids[3].text, '1', ids[3])
+        self.assertEqual(ids[4].text, '2', ids[4])
+        self.assertEqual(ids[5].text, 'A', ids[5])
+
+        self.assertEqual(w.text, 'GNU1', w)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
+
diff --git a/runtime/Python3/tests/t040bug80.g b/runtime/Python3/tests/t040bug80.g
new file mode 100644
index 0000000..dbd87c0
--- /dev/null
+++ b/runtime/Python3/tests/t040bug80.g
@@ -0,0 +1,13 @@
+lexer grammar t040bug80; 
+options {
+  language = Python3;
+}
+ 
+ID_LIKE
+    : 'defined' 
+    | {False}? Identifier 
+    | Identifier 
+    ; 
+ 
+fragment 
+Identifier: 'a'..'z'+ ; // with just 'a', output compiles 
diff --git a/runtime/Python3/tests/t040bug80.py b/runtime/Python3/tests/t040bug80.py
new file mode 100644
index 0000000..34c48b9
--- /dev/null
+++ b/runtime/Python3/tests/t040bug80.py
@@ -0,0 +1,33 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t040bug80(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+    
+        
+    def testValid1(self):
+        cStream = antlr3.StringStream('defined')
+        lexer = self.getLexer(cStream)
+        while True:
+            t = lexer.nextToken()
+            if t.type == antlr3.EOF:
+                break
+            print(t)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
+
diff --git a/runtime/Python3/tests/t041parameters.g b/runtime/Python3/tests/t041parameters.g
new file mode 100644
index 0000000..44db5bf
--- /dev/null
+++ b/runtime/Python3/tests/t041parameters.g
@@ -0,0 +1,16 @@
+grammar t041parameters;
+options {
+  language = Python3;
+}
+
+a[arg1, arg2] returns [l]
+    : A+ EOF
+        { 
+            l = ($arg1, $arg2) 
+            $arg1 = "gnarz"
+        }
+    ;
+
+A: 'a'..'z';
+
+WS: ' '+  { $channel = HIDDEN };
diff --git a/runtime/Python3/tests/t041parameters.py b/runtime/Python3/tests/t041parameters.py
new file mode 100644
index 0000000..e4bc8c0
--- /dev/null
+++ b/runtime/Python3/tests/t041parameters.py
@@ -0,0 +1,45 @@
+import antlr3
+import testbase
+import unittest
+
+
+class t041parameters(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+    
+        
+    def parserClass(self, base):
+        class TParser(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TParser
+    
+        
+    def testValid1(self):
+        cStream = antlr3.StringStream('a a a')
+
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        r = parser.a('foo', 'bar')
+
+        self.assertEqual(r, ('foo', 'bar'))
+
+
+if __name__ == '__main__':
+    unittest.main()
+
+
+
+
diff --git a/runtime/Python3/tests/t042ast.g b/runtime/Python3/tests/t042ast.g
new file mode 100644
index 0000000..5d2b9b9
--- /dev/null
+++ b/runtime/Python3/tests/t042ast.g
@@ -0,0 +1,353 @@
+grammar t042ast;
+options {
+    language = Python3;
+    output = AST;
+}
+
+tokens {
+    VARDEF;
+    FLOAT;
+    EXPR;
+    BLOCK;
+    VARIABLE;
+    FIELD;
+    CALL;
+    INDEX;
+    FIELDACCESS;
+}
+
+@init {
+self.flag = False
+}
+
+r1
+    : INT ('+'^ INT)*
+    ;
+
+r2
+    : 'assert'^ x=expression (':'! y=expression)? ';'!
+    ;
+
+r3
+    : 'if'^ expression s1=statement ('else'! s2=statement)?
+    ;
+
+r4
+    : 'while'^ expression statement
+    ;
+
+r5
+    : 'return'^ expression? ';'!
+    ;
+
+r6
+    : (INT|ID)+
+    ;
+
+r7
+    : INT -> 
+    ;
+
+r8
+    : 'var' ID ':' type -> ^('var' type ID) 
+    ;
+
+r9
+    : type ID ';' -> ^(VARDEF type ID) 
+    ;
+
+r10
+    : INT -> {CommonTree(CommonToken(type=FLOAT, text=$INT.text + ".0"))}
+    ;
+
+r11
+    : expression -> ^(EXPR expression)
+    | -> EXPR
+    ;
+
+r12
+    : ID (',' ID)* -> ID+
+    ;
+
+r13
+    : type ID (',' ID)* ';' -> ^(type ID+)
+    ;
+
+r14
+    :   expression? statement* type+
+        -> ^(EXPR expression? statement* type+)
+    ;
+
+r15
+    : INT -> INT INT
+    ;
+
+r16
+    : 'int' ID (',' ID)* -> ^('int' ID)+
+    ;
+
+r17
+    : 'for' '(' start=statement ';' expression ';' next=statement ')' statement
+        -> ^('for' $start expression $next statement)
+    ;
+
+r18
+    : t='for' -> ^(BLOCK)
+    ;
+
+r19
+    : t='for' -> ^(BLOCK[$t])
+    ;
+
+r20
+    : t='for' -> ^(BLOCK[$t,"FOR"])
+    ;
+
+r21
+    : t='for' -> BLOCK
+    ;
+
+r22
+    : t='for' -> BLOCK[$t]
+    ;
+
+r23
+    : t='for' -> BLOCK[$t,"FOR"]
+    ;
+
+r24
+    : r=statement expression -> ^($r expression)
+    ;
+
+r25
+    : r+=statement (',' r+=statement)+ expression -> ^($r expression)
+    ;
+
+r26
+    : r+=statement (',' r+=statement)+ -> ^(BLOCK $r+)
+    ;
+
+r27
+    : r=statement expression -> ^($r ^($r expression))
+    ;
+
+r28
+    : ('foo28a'|'foo28b') ->
+    ;
+
+r29
+    : (r+=statement)* -> ^(BLOCK $r+)
+    ;
+
+r30
+    : statement* -> ^(BLOCK statement?)
+    ;
+
+r31
+    : modifier type ID ('=' expression)? ';'
+        -> {self.flag == 0}? ^(VARDEF ID modifier* type expression?)
+        -> {self.flag == 1}? ^(VARIABLE ID modifier* type expression?)
+        ->                   ^(FIELD ID modifier* type expression?)
+    ;
+
+r32[which]
+  : ID INT -> {which==1}? ID
+           -> {which==2}? INT
+           -> // yield nothing as else-clause
+  ;
+
+r33
+    :   modifiers! statement
+    ;
+
+r34
+    :   modifiers! r34a[$modifiers.tree]
+    //|   modifiers! r33b[$modifiers.tree]
+    ;
+
+r34a[mod]
+    :   'class' ID ('extends' sup=type)?
+        ( 'implements' i+=type (',' i+=type)*)?
+        '{' statement* '}'
+        -> ^('class' ID {$mod} ^('extends' $sup)? ^('implements' $i+)? statement* )
+    ;
+
+r35
+    : '{' 'extends' (sup=type)? '}'
+        ->  ^('extends' $sup)?
+    ;
+
+r36
+    : 'if' '(' expression ')' s1=statement
+        ( 'else' s2=statement -> ^('if' ^(EXPR expression) $s1 $s2)
+        |                     -> ^('if' ^(EXPR expression) $s1)
+        )
+    ;
+
+r37
+    : (INT -> INT) ('+' i=INT -> ^('+' $r37 $i) )* 
+    ;
+
+r38
+    : INT ('+'^ INT)*
+    ;
+
+r39
+    : (primary->primary) // set return tree to just primary
+        ( '(' arg=expression ')'
+            -> ^(CALL $r39 $arg)
+        | '[' ie=expression ']'
+            -> ^(INDEX $r39 $ie)
+        | '.' p=primary
+            -> ^(FIELDACCESS $r39 $p)
+        )*
+    ;
+
+r40
+    : (INT -> INT) ( ('+' i+=INT)* -> ^('+' $r40 $i*) ) ';'
+    ;
+
+r41
+    : (INT -> INT) ( ('+' i=INT) -> ^($i $r41) )* ';'
+    ;
+
+r42
+    : ids+=ID (','! ids+=ID)*
+    ;
+
+r43 returns [res]
+    : ids+=ID! (','! ids+=ID!)* {$res = [id.text for id in $ids]}
+    ;
+
+r44
+    : ids+=ID^ (','! ids+=ID^)*
+    ;
+
+r45
+    : primary^
+    ;
+
+r46 returns [res]
+    : ids+=primary! (','! ids+=primary!)* {$res = [id.text for id in $ids]}
+    ;
+
+r47
+    : ids+=primary (','! ids+=primary)*
+    ;
+
+r48
+    : ids+=. (','! ids+=.)*
+    ;
+
+r49
+    : .^ ID
+    ;
+
+r50
+    : ID 
+        -> ^({CommonTree(CommonToken(type=FLOAT, text="1.0"))} ID)
+    ;
+
+/** templates tested:
+    tokenLabelPropertyRef_tree
+*/
+r51 returns [res]
+    : ID t=ID ID
+        { $res = $t.tree }
+    ;
+
+/** templates tested:
+    rulePropertyRef_tree
+*/
+r52 returns [res]
+@after {
+    $res = $tree
+}
+    : ID
+    ;
+
+/** templates tested:
+    ruleLabelPropertyRef_tree
+*/
+r53 returns [res]
+    : t=primary
+        { $res = $t.tree }
+    ;
+
+/** templates tested:
+    ruleSetPropertyRef_tree
+*/
+r54 returns [res]
+@after {
+    $tree = $t.tree;
+}
+    : ID t=expression ID
+    ;
+
+/** backtracking */
+r55
+options { backtrack=true; k=1; }
+    : (modifier+ INT)=> modifier+ expression
+    | modifier+ statement
+    ;
+
+
+/** templates tested:
+    rewriteTokenRef with len(args)>0
+*/
+r56
+    : t=ID* -> ID[$t,'foo']
+    ;
+
+/** templates tested:
+    rewriteTokenRefRoot with len(args)>0
+*/
+r57
+    : t=ID* -> ^(ID[$t,'foo'])
+    ;
+
+/** templates tested:
+    ???
+*/
+r58
+    : ({CommonTree(CommonToken(type=FLOAT, text="2.0"))})^
+    ;
+
+/** templates tested:
+    rewriteTokenListLabelRefRoot
+*/
+r59
+    : (t+=ID)+ statement -> ^($t statement)+
+    ;
+
+primary
+    : ID
+    ;
+
+expression
+    : r1
+    ;
+
+statement
+    : 'fooze'
+    | 'fooze2'
+    ;
+
+modifiers
+    : modifier+
+    ;
+
+modifier
+    : 'public'
+    | 'private'
+    ;
+
+type
+    : 'int'
+    | 'bool'
+    ;
+
+ID : 'a'..'z' + ;
+INT : '0'..'9' +;
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;};
+
diff --git a/runtime/Python3/tests/t042ast.py b/runtime/Python3/tests/t042ast.py
new file mode 100644
index 0000000..559d5f1
--- /dev/null
+++ b/runtime/Python3/tests/t042ast.py
@@ -0,0 +1,669 @@
+import unittest
+import textwrap
+import antlr3
+import testbase
+
+class t042ast(testbase.ANTLRTest):
+##     def lexerClass(self, base):
+##         class TLexer(base):
+##             def reportError(self, re):
+##                 # no error recovery yet, just crash!
+##                 raise re
+
+##         return TLexer
+    
+
+    def parserClass(self, base):
+        class TParser(base):
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TParser
+    
+
+    def parse(self, text, method, rArgs=(), **kwargs):
+        self.compileGrammar() #options='-trace')
+        
+        cStream = antlr3.StringStream(text)
+        self.lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(self.lexer)
+        self.parser = self.getParser(tStream)
+        
+        for attr, val in kwargs.items():
+            setattr(self.parser, attr, val)
+            
+        return getattr(self.parser, method)(*rArgs)
+
+    
+    def testR1(self):
+        r = self.parse("1 + 2", 'r1')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(+ 1 2)'
+            )
+
+
+    def testR2a(self):
+        r = self.parse("assert 2+3;", 'r2')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(assert (+ 2 3))'
+            )
+
+
+    def testR2b(self):
+        r = self.parse("assert 2+3 : 5;", 'r2')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(assert (+ 2 3) 5)'
+            )
+
+
+    def testR3a(self):
+        r = self.parse("if 1 fooze", 'r3')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(if 1 fooze)'
+            )
+
+
+    def testR3b(self):
+        r = self.parse("if 1 fooze else fooze", 'r3')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(if 1 fooze fooze)'
+            )
+
+
+    def testR4a(self):
+        r = self.parse("while 2 fooze", 'r4')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(while 2 fooze)'
+            )
+
+
+    def testR5a(self):
+        r = self.parse("return;", 'r5')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'return'
+            )
+
+
+    def testR5b(self):
+        r = self.parse("return 2+3;", 'r5')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(return (+ 2 3))'
+            )
+
+
+    def testR6a(self):
+        r = self.parse("3", 'r6')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '3'
+            )
+
+
+    def testR6b(self):
+        r = self.parse("3 a", 'r6')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '3 a'
+            )
+
+
+    def testR7(self):
+        r = self.parse("3", 'r7')
+        self.assertIsNone(r.tree)
+
+
+    def testR8(self):
+        r = self.parse("var foo:bool", 'r8')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(var bool foo)'
+            )
+
+
+    def testR9(self):
+        r = self.parse("int foo;", 'r9')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(VARDEF int foo)'
+            )
+
+
+    def testR10(self):
+        r = self.parse("10", 'r10')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '10.0'
+            )
+
+
+    def testR11a(self):
+        r = self.parse("1+2", 'r11')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(EXPR (+ 1 2))'
+            )
+
+
+    def testR11b(self):
+        r = self.parse("", 'r11')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'EXPR'
+            )
+
+
+    def testR12a(self):
+        r = self.parse("foo", 'r12')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'foo'
+            )
+
+
+    def testR12b(self):
+        r = self.parse("foo, bar, gnurz", 'r12')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'foo bar gnurz'
+            )
+
+
+    def testR13a(self):
+        r = self.parse("int foo;", 'r13')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(int foo)'
+            )
+
+
+    def testR13b(self):
+        r = self.parse("bool foo, bar, gnurz;", 'r13')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(bool foo bar gnurz)'
+            )
+
+
+    def testR14a(self):
+        r = self.parse("1+2 int", 'r14')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(EXPR (+ 1 2) int)'
+            )
+
+
+    def testR14b(self):
+        r = self.parse("1+2 int bool", 'r14')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(EXPR (+ 1 2) int bool)'
+            )
+
+
+    def testR14c(self):
+        r = self.parse("int bool", 'r14')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(EXPR int bool)'
+            )
+
+
+    def testR14d(self):
+        r = self.parse("fooze fooze int bool", 'r14')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(EXPR fooze fooze int bool)'
+            )
+
+
+    def testR14e(self):
+        r = self.parse("7+9 fooze fooze int bool", 'r14')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(EXPR (+ 7 9) fooze fooze int bool)'
+            )
+
+
+    def testR15(self):
+        r = self.parse("7", 'r15')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '7 7'
+            )
+
+
+    def testR16a(self):
+        r = self.parse("int foo", 'r16')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(int foo)'
+            )
+
+
+    def testR16b(self):
+        r = self.parse("int foo, bar, gnurz", 'r16')
+            
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(int foo) (int bar) (int gnurz)'
+            )
+
+
+    def testR17a(self):
+        r = self.parse("for ( fooze ; 1 + 2 ; fooze ) fooze", 'r17')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(for fooze (+ 1 2) fooze fooze)'
+            )
+
+
+    def testR18a(self):
+        r = self.parse("for", 'r18')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'BLOCK'
+            )
+
+
+    def testR19a(self):
+        r = self.parse("for", 'r19')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'for'
+            )
+
+
+    def testR20a(self):
+        r = self.parse("for", 'r20')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'FOR'
+            )
+
+
+    def testR21a(self):
+        r = self.parse("for", 'r21')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'BLOCK'
+            )
+
+
+    def testR22a(self):
+        r = self.parse("for", 'r22')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'for'
+            )
+
+
+    def testR23a(self):
+        r = self.parse("for", 'r23')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'FOR'
+            )
+
+
+    def testR24a(self):
+        r = self.parse("fooze 1 + 2", 'r24')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(fooze (+ 1 2))'
+            )
+
+
+    def testR25a(self):
+        r = self.parse("fooze, fooze2 1 + 2", 'r25')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(fooze (+ 1 2))'
+            )
+
+
+    def testR26a(self):
+        r = self.parse("fooze, fooze2", 'r26')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(BLOCK fooze fooze2)'
+            )
+
+
+    def testR27a(self):
+        r = self.parse("fooze 1 + 2", 'r27')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(fooze (fooze (+ 1 2)))'
+            )
+            
+
+    def testR28(self):
+        r = self.parse("foo28a", 'r28')
+        self.assertIsNone(r.tree)
+
+
+    def testR29(self):
+        self.assertRaises(RuntimeError, self.parse, "", 'r29')
+
+
+# FIXME: broken upstream?
+##     def testR30(self):
+##         try:
+##             r = self.parse("fooze fooze", 'r30')
+##             self.fail(r.tree.toStringTree())
+##         except RuntimeError:
+##             pass
+
+
+    def testR31a(self):
+        r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=0)
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(VARDEF gnurz public int (+ 1 2))'
+            )
+
+
+    def testR31b(self):
+        r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=1)
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(VARIABLE gnurz public int (+ 1 2))'
+            )
+
+
+    def testR31c(self):
+        r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=2)
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(FIELD gnurz public int (+ 1 2))'
+            )
+
+
+    def testR32a(self):
+        r = self.parse("gnurz 32", 'r32', [1], flag=2)
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'gnurz'
+            )
+
+
+    def testR32b(self):
+        r = self.parse("gnurz 32", 'r32', [2], flag=2)
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '32'
+            )
+
+
+    def testR32c(self):
+        r = self.parse("gnurz 32", 'r32', [3], flag=2)
+        self.assertIsNone(r.tree)
+
+
+    def testR33a(self):
+        r = self.parse("public private fooze", 'r33')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'fooze'
+            )
+
+
+    def testR34a(self):
+        r = self.parse("public class gnurz { fooze fooze2 }", 'r34')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(class gnurz public fooze fooze2)'
+            )
+
+
+    def testR34b(self):
+        r = self.parse("public class gnurz extends bool implements int, bool { fooze fooze2 }", 'r34')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(class gnurz public (extends bool) (implements int bool) fooze fooze2)'
+            )
+
+
+    def testR35(self):
+        self.assertRaises(RuntimeError, self.parse, "{ extends }", 'r35')
+
+
+    def testR36a(self):
+        r = self.parse("if ( 1 + 2 ) fooze", 'r36')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(if (EXPR (+ 1 2)) fooze)'
+            )
+
+
+    def testR36b(self):
+        r = self.parse("if ( 1 + 2 ) fooze else fooze2", 'r36')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(if (EXPR (+ 1 2)) fooze fooze2)'
+            )
+
+
+    def testR37(self):
+        r = self.parse("1 + 2 + 3", 'r37')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(+ (+ 1 2) 3)'
+            )
+
+
+    def testR38(self):
+        r = self.parse("1 + 2 + 3", 'r38')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(+ (+ 1 2) 3)'
+            )
+
+
+    def testR39a(self):
+        r = self.parse("gnurz[1]", 'r39')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(INDEX gnurz 1)'
+            )
+
+
+    def testR39b(self):
+        r = self.parse("gnurz(2)", 'r39')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(CALL gnurz 2)'
+            )
+
+
+    def testR39c(self):
+        r = self.parse("gnurz.gnarz", 'r39')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(FIELDACCESS gnurz gnarz)'
+            )
+
+
+    def testR39d(self):
+        r = self.parse("gnurz.gnarz.gnorz", 'r39')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(FIELDACCESS (FIELDACCESS gnurz gnarz) gnorz)'
+            )
+
+
+    def testR40(self):
+        r = self.parse("1 + 2 + 3;", 'r40')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(+ 1 2 3)'
+            )
+
+
+    def testR41(self):
+        r = self.parse("1 + 2 + 3;", 'r41')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(3 (2 1))'
+            )
+
+
+    def testR42(self):
+        r = self.parse("gnurz, gnarz, gnorz", 'r42')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'gnurz gnarz gnorz'
+            )
+
+
+    def testR43(self):
+        r = self.parse("gnurz, gnarz, gnorz", 'r43')
+        self.assertIsNone(r.tree)
+        self.assertEqual(
+            r.res,
+            ['gnurz', 'gnarz', 'gnorz']
+            )
+
+
+    def testR44(self):
+        r = self.parse("gnurz, gnarz, gnorz", 'r44')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(gnorz (gnarz gnurz))'
+            )
+
+
+    def testR45(self):
+        r = self.parse("gnurz", 'r45')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'gnurz'
+            )
+
+
+    def testR46(self):
+        r = self.parse("gnurz, gnarz, gnorz", 'r46')
+        self.assertIsNone(r.tree)
+        self.assertEqual(
+            r.res,
+            ['gnurz', 'gnarz', 'gnorz']
+            )
+
+
+    def testR47(self):
+        r = self.parse("gnurz, gnarz, gnorz", 'r47')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'gnurz gnarz gnorz'
+            )
+
+
+    def testR48(self):
+        r = self.parse("gnurz, gnarz, gnorz", 'r48')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'gnurz gnarz gnorz'
+            )
+
+
+    def testR49(self):
+        r = self.parse("gnurz gnorz", 'r49')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(gnurz gnorz)'
+            )
+
+
+    def testR50(self):
+        r = self.parse("gnurz", 'r50')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(1.0 gnurz)'
+            )
+
+
+    def testR51(self):
+        r = self.parse("gnurza gnurzb gnurzc", 'r51')
+        self.assertEqual(
+            r.res.toStringTree(),
+            'gnurzb'
+            )
+
+
+    def testR52(self):
+        r = self.parse("gnurz", 'r52')
+        self.assertEqual(
+            r.res.toStringTree(),
+            'gnurz'
+            )
+
+
+    def testR53(self):
+        r = self.parse("gnurz", 'r53')
+        self.assertEqual(
+            r.res.toStringTree(),
+            'gnurz'
+            )
+
+
+    def testR54(self):
+        r = self.parse("gnurza 1 + 2 gnurzb", 'r54')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(+ 1 2)'
+            )
+
+
+    def testR55a(self):
+        r = self.parse("public private 1 + 2", 'r55')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'public private (+ 1 2)'
+            )
+
+
+    def testR55b(self):
+        r = self.parse("public fooze", 'r55')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'public fooze'
+            )
+
+
+    def testR56(self):
+        r = self.parse("a b c d", 'r56')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'foo'
+            )
+
+
+    def testR57(self):
+        r = self.parse("a b c d", 'r57')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            'foo'
+            )
+
+
+    def testR59(self):
+        r = self.parse("a b c fooze", 'r59')
+        self.assertEqual(
+            r.tree.toStringTree(),
+            '(a fooze) (b fooze) (c fooze)'
+            )
+
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/runtime/Python3/tests/t043synpred.g b/runtime/Python3/tests/t043synpred.g
new file mode 100644
index 0000000..478b8be
--- /dev/null
+++ b/runtime/Python3/tests/t043synpred.g
@@ -0,0 +1,14 @@
+grammar t043synpred;
+options {
+  language = Python3;
+}
+
+a: ((s+ P)=> s+ b)? E;
+b: P 'foo';
+
+s: S;
+
+
+S: ' ';
+P: '+';
+E: '>';
diff --git a/antlr-3.4/runtime/Python/tests/t043synpred.py b/runtime/Python3/tests/t043synpred.py
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t043synpred.py
copy to runtime/Python3/tests/t043synpred.py
diff --git a/runtime/Python3/tests/t044trace.g b/runtime/Python3/tests/t044trace.g
new file mode 100644
index 0000000..e170bba
--- /dev/null
+++ b/runtime/Python3/tests/t044trace.g
@@ -0,0 +1,20 @@
+grammar t044trace;
+options {
+  language = Python3;
+}
+
+@init {
+    self._stack = None
+}
+
+a: '<' ((INT '+')=>b|c) '>';
+b: c ('+' c)*;
+c: INT 
+    {
+        if self._stack is None:
+            self._stack = self.getRuleInvocationStack()
+    }
+    ;
+
+INT: ('0'..'9')+;
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;};
diff --git a/runtime/Python3/tests/t044trace.py b/runtime/Python3/tests/t044trace.py
new file mode 100644
index 0000000..2d60b61
--- /dev/null
+++ b/runtime/Python3/tests/t044trace.py
@@ -0,0 +1,92 @@
+import antlr3
+import testbase
+import unittest
+
+
+class T(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar(options='-trace')
+
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self.traces = []
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+
+
+    def parserClass(self, base):
+        class TParser(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self.traces = []
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+            def getRuleInvocationStack(self):
+                return self._getRuleInvocationStack(base.__module__)
+
+        return TParser
+
+
+    def testTrace(self):
+        cStream = antlr3.StringStream('< 1 + 2 + 3 >')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.a()
+
+        self.assertEqual(
+            lexer.traces,
+            [ '>T__7', '<T__7', '>WS', '<WS', '>INT', '<INT', '>WS', '<WS',
+              '>T__6', '<T__6', '>WS', '<WS', '>INT', '<INT', '>WS', '<WS',
+              '>T__6', '<T__6', '>WS', '<WS', '>INT', '<INT', '>WS', '<WS',
+              '>T__8', '<T__8']
+            )
+
+        self.assertEqual(
+            parser.traces,
+            [ '>a', '>synpred1_t044trace_fragment', '<synpred1_t044trace_fragment', '>b', '>c',
+              '<c', '>c', '<c', '>c', '<c', '<b', '<a' ]
+            )
+
+
+    def testInvokationStack(self):
+        cStream = antlr3.StringStream('< 1 + 2 + 3 >')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.a()
+
+        self.assertEqual(parser._stack, ['a', 'b', 'c'])
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t045dfabug.g b/runtime/Python3/tests/t045dfabug.g
new file mode 100644
index 0000000..436aefa
--- /dev/null
+++ b/runtime/Python3/tests/t045dfabug.g
@@ -0,0 +1,32 @@
+grammar t045dfabug;
+options {
+    language = Python3;
+    output = AST;
+}
+
+
+// this rule used to generate an infinite loop in DFA.predict
+r
+options { backtrack=true; }
+    : (modifier+ INT)=> modifier+ expression
+    | modifier+ statement
+    ;
+
+expression
+    : INT '+' INT
+    ;
+
+statement
+    : 'fooze'
+    | 'fooze2'
+    ;
+
+modifier
+    : 'public'
+    | 'private'
+    ;
+
+ID : 'a'..'z' + ;
+INT : '0'..'9' +;
+WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;};
+
diff --git a/antlr-3.4/runtime/Python/tests/t045dfabug.py b/runtime/Python3/tests/t045dfabug.py
similarity index 100%
copy from antlr-3.4/runtime/Python/tests/t045dfabug.py
copy to runtime/Python3/tests/t045dfabug.py
diff --git a/runtime/Python3/tests/t046rewrite.g b/runtime/Python3/tests/t046rewrite.g
new file mode 100644
index 0000000..58e4071
--- /dev/null
+++ b/runtime/Python3/tests/t046rewrite.g
@@ -0,0 +1,54 @@
+grammar t046rewrite;
+options {
+    language=Python3;
+}
+
+program
+@init {
+    start = self.input.LT(1)
+}
+    :   method+
+        {
+        self.input.insertBefore(start,"public class Wrapper {\n")
+        self.input.insertAfter($method.stop, "\n}\n")
+        }
+    ;
+
+method
+    :   m='method' ID '(' ')' body
+        {self.input.replace($m, "public void");}
+    ; 
+
+body
+scope {
+    decls
+}
+@init {
+    $body::decls = set()
+}
+    :   lcurly='{' stat* '}'
+        {
+        for it in $body::decls:
+            self.input.insertAfter($lcurly, "\nint "+it+";")
+        }
+    ;
+
+stat:   ID '=' expr ';' {$body::decls.add($ID.text);}
+    ;
+
+expr:   mul ('+' mul)* 
+    ;
+
+mul :   atom ('*' atom)*
+    ;
+
+atom:   ID
+    |   INT
+    ;
+
+ID  :   ('a'..'z'|'A'..'Z')+ ;
+
+INT :   ('0'..'9')+ ;
+
+WS  :   (' '|'\t'|'\n')+ {$channel=HIDDEN;}
+    ;
diff --git a/runtime/Python3/tests/t046rewrite.py b/runtime/Python3/tests/t046rewrite.py
new file mode 100644
index 0000000..be1f4aa
--- /dev/null
+++ b/runtime/Python3/tests/t046rewrite.py
@@ -0,0 +1,52 @@
+import unittest
+import textwrap
+import antlr3
+import testbase
+
+class T(testbase.ANTLRTest):
+    def testRewrite(self):
+        self.compileGrammar()
+
+        input = textwrap.dedent(
+            '''\
+            method foo() {
+              i = 3;
+              k = i;
+              i = k*4;
+            }
+
+            method bar() {
+              j = i*2;
+            }
+            ''')
+        
+        cStream = antlr3.StringStream(input)
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.TokenRewriteStream(lexer)
+        parser = self.getParser(tStream)
+        parser.program()
+
+        expectedOutput = textwrap.dedent('''\
+        public class Wrapper {
+        public void foo() {
+        int k;
+        int i;
+          i = 3;
+          k = i;
+          i = k*4;
+        }
+
+        public void bar() {
+        int j;
+          j = i*2;
+        }
+        }
+
+        ''')
+
+        self.assertEqual(str(tStream), expectedOutput)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/runtime/Python3/tests/t047treeparser.g b/runtime/Python3/tests/t047treeparser.g
new file mode 100644
index 0000000..30cd25e
--- /dev/null
+++ b/runtime/Python3/tests/t047treeparser.g
@@ -0,0 +1,113 @@
+grammar t047treeparser;
+options {
+    language=Python3;
+    output=AST;
+}
+
+tokens {
+    VAR_DEF;
+    ARG_DEF;
+    FUNC_HDR;
+    FUNC_DECL;
+    FUNC_DEF;
+    BLOCK;
+}
+
+program
+    :   declaration+
+    ;
+
+declaration
+    :   variable
+    |   functionHeader ';' -> ^(FUNC_DECL functionHeader)
+    |   functionHeader block -> ^(FUNC_DEF functionHeader block)
+    ;
+
+variable
+    :   type declarator ';' -> ^(VAR_DEF type declarator)
+    ;
+
+declarator
+    :   ID 
+    ;
+
+functionHeader
+    :   type ID '(' ( formalParameter ( ',' formalParameter )* )? ')'
+        -> ^(FUNC_HDR type ID formalParameter+)
+    ;
+
+formalParameter
+    :   type declarator -> ^(ARG_DEF type declarator)
+    ;
+
+type
+    :   'int'   
+    |   'char'  
+    |   'void'
+    |   ID        
+    ;
+
+block
+    :   lc='{'
+            variable*
+            stat*
+        '}'
+        -> ^(BLOCK[$lc,"BLOCK"] variable* stat*)
+    ;
+
+stat: forStat
+    | expr ';'!
+    | block
+    | assignStat ';'!
+    | ';'!
+    ;
+
+forStat
+    :   'for' '(' start=assignStat ';' expr ';' next=assignStat ')' block
+        -> ^('for' $start expr $next block)
+    ;
+
+assignStat
+    :   ID EQ expr -> ^(EQ ID expr)
+    ;
+
+expr:   condExpr
+    ;
+
+condExpr
+    :   aexpr ( ('=='^ | '<'^) aexpr )?
+    ;
+
+aexpr
+    :   atom ( '+'^ atom )*
+    ;
+
+atom
+    : ID      
+    | INT      
+    | '(' expr ')' -> expr
+    ; 
+
+FOR : 'for' ;
+INT_TYPE : 'int' ;
+CHAR: 'char';
+VOID: 'void';
+
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
+    ;
+
+INT :	('0'..'9')+
+    ;
+
+EQ   : '=' ;
+EQEQ : '==' ;
+LT   : '<' ;
+PLUS : '+' ;
+
+WS  :   (   ' '
+        |   '\t'
+        |   '\r'
+        |   '\n'
+        )+
+        { $channel=HIDDEN }
+    ;    
diff --git a/runtime/Python3/tests/t047treeparser.py b/runtime/Python3/tests/t047treeparser.py
new file mode 100644
index 0000000..5b866b2
--- /dev/null
+++ b/runtime/Python3/tests/t047treeparser.py
@@ -0,0 +1,122 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+
+class T(testbase.ANTLRTest):
+    def walkerClass(self, base):
+        class TWalker(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self.traces = []
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+            
+        return TWalker
+    
+
+    def setUp(self):
+        self.compileGrammar()
+        self.compileGrammar('t047treeparserWalker.g', options='-trace')
+
+        
+    def testWalker(self):
+        input = textwrap.dedent(
+            '''\
+            char c;
+            int x;
+
+            void bar(int x);
+
+            int foo(int y, char d) {
+              int i;
+              for (i=0; i<3; i=i+1) {
+                x=3;
+                y=5;
+              }
+            }
+            ''')
+        
+        cStream = antlr3.StringStream(input)
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        r = parser.program()
+
+        self.assertEqual(
+            r.tree.toStringTree(),
+            "(VAR_DEF char c) (VAR_DEF int x) (FUNC_DECL (FUNC_HDR void bar (ARG_DEF int x))) (FUNC_DEF (FUNC_HDR int foo (ARG_DEF int y) (ARG_DEF char d)) (BLOCK (VAR_DEF int i) (for (= i 0) (< i 3) (= i (+ i 1)) (BLOCK (= x 3) (= y 5)))))"
+            )
+        
+        nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+        nodes.setTokenStream(tStream)
+        walker = self.getWalker(nodes)
+        walker.program()
+
+        # FIXME: need to crosscheck with Java target (compile walker with
+        # -trace option), if this is the real list. For now I'm happy that
+        # it does not crash ;)
+        self.assertEqual(
+            walker.traces,
+            [ '>program', '>declaration', '>variable', '>type', '<type',
+              '>declarator', '<declarator', '<variable', '<declaration',
+              '>declaration', '>variable', '>type', '<type', '>declarator',
+              '<declarator', '<variable', '<declaration', '>declaration',
+              '>functionHeader', '>type', '<type', '>formalParameter',
+              '>type', '<type', '>declarator', '<declarator',
+              '<formalParameter', '<functionHeader', '<declaration',
+              '>declaration', '>functionHeader', '>type', '<type',
+              '>formalParameter', '>type', '<type', '>declarator',
+              '<declarator', '<formalParameter', '>formalParameter', '>type',
+              '<type', '>declarator', '<declarator', '<formalParameter',
+              '<functionHeader', '>block', '>variable', '>type', '<type',
+              '>declarator', '<declarator', '<variable', '>stat', '>forStat',
+              '>expr', '>expr', '>atom', '<atom', '<expr', '<expr', '>expr',
+              '>expr', '>atom', '<atom', '<expr', '>expr', '>atom', '<atom',
+              '<expr', '<expr', '>expr', '>expr', '>expr', '>atom', '<atom',
+              '<expr', '>expr', '>atom', '<atom', '<expr', '<expr', '<expr',
+              '>block', '>stat', '>expr', '>expr', '>atom', '<atom', '<expr',
+              '<expr', '<stat', '>stat', '>expr', '>expr', '>atom', '<atom',
+              '<expr', '<expr', '<stat', '<block', '<forStat', '<stat',
+              '<block', '<declaration', '<program'
+              ]
+            )
+
+    def testRuleLabelPropertyRefText(self):
+        self.compileGrammar()
+        self.compileGrammar('t047treeparserWalker.g', options='-trace')
+
+        input = textwrap.dedent(
+            '''\
+            char c;
+            ''')
+        
+        cStream = antlr3.StringStream(input)
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        r = parser.variable()
+
+        nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+        nodes.setTokenStream(tStream)
+        walker = self.getWalker(nodes)
+        r = walker.variable()
+
+        self.assertEqual(r, 'c')
+        
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t047treeparserWalker.g b/runtime/Python3/tests/t047treeparserWalker.g
new file mode 100644
index 0000000..e78b1df
--- /dev/null
+++ b/runtime/Python3/tests/t047treeparserWalker.g
@@ -0,0 +1,67 @@
+tree grammar t047treeparserWalker;
+options {
+    language=Python3;
+    tokenVocab=t047treeparser;
+    ASTLabelType=CommonTree;
+}
+
+program
+    :   declaration+
+    ;
+
+declaration
+    :   variable
+    |   ^(FUNC_DECL functionHeader)
+    |   ^(FUNC_DEF functionHeader block)
+    ;
+
+variable returns [res]
+    :   ^(VAR_DEF type declarator)
+        { 
+            $res = $declarator.text; 
+        }
+    ;
+
+declarator
+    :   ID 
+    ;
+
+functionHeader
+    :   ^(FUNC_HDR type ID formalParameter+)
+    ;
+
+formalParameter
+    :   ^(ARG_DEF type declarator)
+    ;
+
+type
+    :   'int'
+    |   'char'
+    |   'void'
+    |   ID        
+    ;
+
+block
+    :   ^(BLOCK variable* stat*)
+    ;
+
+stat: forStat
+    | expr
+    | block
+    ;
+
+forStat
+    :   ^('for' expr expr expr block)
+    ;
+
+expr:   ^(EQEQ expr expr)
+    |   ^(LT expr expr)
+    |   ^(PLUS expr expr)
+    |   ^(EQ ID expr)
+    |   atom
+    ;
+
+atom
+    : ID      
+    | INT      
+    ; 
diff --git a/runtime/Python3/tests/t048rewrite.g b/runtime/Python3/tests/t048rewrite.g
new file mode 100644
index 0000000..e15e76d
--- /dev/null
+++ b/runtime/Python3/tests/t048rewrite.g
@@ -0,0 +1,9 @@
+lexer grammar t048rewrite;
+options {
+    language=Python3;
+}
+
+A: 'a';
+B: 'b';
+C: 'c';
+
diff --git a/runtime/Python3/tests/t048rewrite.py b/runtime/Python3/tests/t048rewrite.py
new file mode 100644
index 0000000..76edbfe
--- /dev/null
+++ b/runtime/Python3/tests/t048rewrite.py
@@ -0,0 +1,474 @@
+"""Testsuite for TokenRewriteStream class."""
+
+# don't care about docstrings
+# pylint: disable-msg=C0111
+
+import unittest
+import antlr3
+import testbase
+
+class T1(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+
+
+    def _parse(self, input):
+        cStream = antlr3.StringStream(input)
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.TokenRewriteStream(lexer)
+        tStream.fillBuffer()
+
+        return tStream
+
+
+    def testInsertBeforeIndex0(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(0, "0")
+
+        result = tokens.toString()
+        expecting = "0abc"
+        self.assertEqual(result, expecting)
+
+
+    def testInsertAfterLastIndex(self):
+        tokens = self._parse("abc")
+        tokens.insertAfter(2, "x")
+
+        result = tokens.toString()
+        expecting = "abcx"
+        self.assertEqual(result, expecting)
+
+
+    def test2InsertBeforeAfterMiddleIndex(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(1, "x")
+        tokens.insertAfter(1, "x")
+
+        result = tokens.toString()
+        expecting = "axbxc"
+        self.assertEqual(result, expecting)
+
+
+    def testReplaceIndex0(self):
+        tokens = self._parse("abc")
+        tokens.replace(0, "x")
+
+        result = tokens.toString()
+        expecting = "xbc"
+        self.assertEqual(result, expecting)
+
+
+    def testReplaceLastIndex(self):
+        tokens = self._parse("abc")
+        tokens.replace(2, "x")
+
+        result = tokens.toString()
+        expecting = "abx"
+        self.assertEqual(result, expecting)
+
+
+    def testReplaceMiddleIndex(self):
+        tokens = self._parse("abc")
+        tokens.replace(1, "x")
+
+        result = tokens.toString()
+        expecting = "axc"
+        self.assertEqual(result, expecting)
+
+
+    def test2ReplaceMiddleIndex(self):
+        tokens = self._parse("abc")
+        tokens.replace(1, "x")
+        tokens.replace(1, "y")
+
+        result = tokens.toString()
+        expecting = "ayc"
+        self.assertEqual(result, expecting)
+
+
+    def test2ReplaceMiddleIndex1InsertBefore(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(0, "_")
+        tokens.replace(1, "x")
+        tokens.replace(1, "y")
+
+        result = tokens.toString()
+        expecting = "_ayc"
+        self.assertEqual(expecting, result)
+
+
+    def testReplaceThenDeleteMiddleIndex(self):
+        tokens = self._parse("abc")
+        tokens.replace(1, "x")
+        tokens.delete(1)
+
+        result = tokens.toString()
+        expecting = "ac"
+        self.assertEqual(result, expecting)
+
+
+    def testInsertInPriorReplace(self):
+        tokens = self._parse("abc")
+        tokens.replace(0, 2, "x")
+        tokens.insertBefore(1, "0")
+        self.assertRaisesRegex(
+            ValueError,
+            (r'insert op <InsertBeforeOp@1:"0"> within boundaries of '
+             r'previous <ReplaceOp@0\.\.2:"x">'),
+            tokens.toString)
+
+    def testInsertThenReplaceSameIndex(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(0, "0")
+        tokens.replace(0, "x")  # supercedes insert at 0
+
+        result = tokens.toString()
+        expecting = "0xbc"
+        self.assertEqual(result, expecting)
+
+
+    def test2InsertMiddleIndex(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(1, "x")
+        tokens.insertBefore(1, "y")
+
+        result = tokens.toString()
+        expecting = "ayxbc"
+        self.assertEqual(result, expecting)
+
+
+    def test2InsertThenReplaceIndex0(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(0, "x")
+        tokens.insertBefore(0, "y")
+        tokens.replace(0, "z")
+
+        result = tokens.toString()
+        expecting = "yxzbc"
+        self.assertEqual(result, expecting)
+
+
+    def testReplaceThenInsertBeforeLastIndex(self):
+        tokens = self._parse("abc")
+        tokens.replace(2, "x")
+        tokens.insertBefore(2, "y")
+
+        result = tokens.toString()
+        expecting = "abyx"
+        self.assertEqual(result, expecting)
+
+
+    def testInsertThenReplaceLastIndex(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(2, "y")
+        tokens.replace(2, "x")
+
+        result = tokens.toString()
+        expecting = "abyx"
+        self.assertEqual(result, expecting)
+
+
+    def testReplaceThenInsertAfterLastIndex(self):
+        tokens = self._parse("abc")
+        tokens.replace(2, "x")
+        tokens.insertAfter(2, "y")
+
+        result = tokens.toString()
+        expecting = "abxy"
+        self.assertEqual(result, expecting)
+
+
+    def testReplaceRangeThenInsertAtLeftEdge(self):
+        tokens = self._parse("abcccba")
+        tokens.replace(2, 4, "x")
+        tokens.insertBefore(2, "y")
+
+        result = tokens.toString()
+        expecting = "abyxba"
+        self.assertEqual(result, expecting)
+
+
+    def testReplaceRangeThenInsertAtRightEdge(self):
+        tokens = self._parse("abcccba")
+        tokens.replace(2, 4, "x")
+        tokens.insertBefore(4, "y") # no effect; within range of a replace
+
+        self.assertRaisesRegex(
+            ValueError,
+            (r'insert op <InsertBeforeOp@4:"y"> within boundaries of '
+             r'previous <ReplaceOp@2\.\.4:"x">'),
+            tokens.toString)
+
+
+    def testReplaceRangeThenInsertAfterRightEdge(self):
+        tokens = self._parse("abcccba")
+        tokens.replace(2, 4, "x")
+        tokens.insertAfter(4, "y")
+
+        result = tokens.toString()
+        expecting = "abxyba"
+        self.assertEqual(result, expecting)
+
+
+    def testReplaceAll(self):
+        tokens = self._parse("abcccba")
+        tokens.replace(0, 6, "x")
+
+        result = tokens.toString()
+        expecting = "x"
+        self.assertEqual(result, expecting)
+
+
+    def testReplaceSubsetThenFetch(self):
+        tokens = self._parse("abcccba")
+        tokens.replace(2, 4, "xyz")
+
+        result = tokens.toString(0, 6)
+        expecting = "abxyzba"
+        self.assertEqual(result, expecting)
+
+
+    def testReplaceThenReplaceSuperset(self):
+        tokens = self._parse("abcccba")
+        tokens.replace(2, 4, "xyz")
+        tokens.replace(3, 5, "foo") # overlaps, error
+
+        self.assertRaisesRegex(
+            ValueError,
+            (r'replace op boundaries of <ReplaceOp@3\.\.5:"foo"> overlap '
+             r'with previous <ReplaceOp@2\.\.4:"xyz">'),
+            tokens.toString)
+
+
+    def testReplaceThenReplaceLowerIndexedSuperset(self):
+        tokens = self._parse("abcccba")
+        tokens.replace(2, 4, "xyz")
+        tokens.replace(1, 3, "foo") # overlap, error
+
+        self.assertRaisesRegex(
+            ValueError,
+            (r'replace op boundaries of <ReplaceOp@1\.\.3:"foo"> overlap '
+             r'with previous <ReplaceOp@2\.\.4:"xyz">'),
+            tokens.toString)
+
+
+    def testReplaceSingleMiddleThenOverlappingSuperset(self):
+        tokens = self._parse("abcba")
+        tokens.replace(2, 2, "xyz")
+        tokens.replace(0, 3, "foo")
+
+        result = tokens.toString()
+        expecting = "fooa"
+        self.assertEqual(result, expecting)
+
+
+    def testCombineInserts(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(0, "x")
+        tokens.insertBefore(0, "y")
+        result = tokens.toString()
+        expecting = "yxabc"
+        self.assertEqual(expecting, result)
+
+
+    def testCombine3Inserts(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(1, "x")
+        tokens.insertBefore(0, "y")
+        tokens.insertBefore(1, "z")
+        result = tokens.toString()
+        expecting = "yazxbc"
+        self.assertEqual(expecting, result)
+
+
+    def testCombineInsertOnLeftWithReplace(self):
+        tokens = self._parse("abc")
+        tokens.replace(0, 2, "foo")
+        tokens.insertBefore(0, "z") # combine with left edge of rewrite
+        result = tokens.toString()
+        expecting = "zfoo"
+        self.assertEqual(expecting, result)
+
+
+    def testCombineInsertOnLeftWithDelete(self):
+        tokens = self._parse("abc")
+        tokens.delete(0, 2)
+        tokens.insertBefore(0, "z") # combine with left edge of rewrite
+        result = tokens.toString()
+        expecting = "z" # make sure combo is not znull
+        self.assertEqual(expecting, result)
+
+
+    def testDisjointInserts(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(1, "x")
+        tokens.insertBefore(2, "y")
+        tokens.insertBefore(0, "z")
+        result = tokens.toString()
+        expecting = "zaxbyc"
+        self.assertEqual(expecting, result)
+
+
+    def testOverlappingReplace(self):
+        tokens = self._parse("abcc")
+        tokens.replace(1, 2, "foo")
+        tokens.replace(0, 3, "bar") # wipes prior nested replace
+        result = tokens.toString()
+        expecting = "bar"
+        self.assertEqual(expecting, result)
+
+
+    def testOverlappingReplace2(self):
+        tokens = self._parse("abcc")
+        tokens.replace(0, 3, "bar")
+        tokens.replace(1, 2, "foo") # cannot split earlier replace
+
+        self.assertRaisesRegex(
+            ValueError,
+            (r'replace op boundaries of <ReplaceOp@1\.\.2:"foo"> overlap '
+             r'with previous <ReplaceOp@0\.\.3:"bar">'),
+            tokens.toString)
+
+
+    def testOverlappingReplace3(self):
+        tokens = self._parse("abcc")
+        tokens.replace(1, 2, "foo")
+        tokens.replace(0, 2, "bar") # wipes prior nested replace
+        result = tokens.toString()
+        expecting = "barc"
+        self.assertEqual(expecting, result)
+
+
+    def testOverlappingReplace4(self):
+        tokens = self._parse("abcc")
+        tokens.replace(1, 2, "foo")
+        tokens.replace(1, 3, "bar") # wipes prior nested replace
+        result = tokens.toString()
+        expecting = "abar"
+        self.assertEqual(expecting, result)
+
+
+    def testDropIdenticalReplace(self):
+        tokens = self._parse("abcc")
+        tokens.replace(1, 2, "foo")
+        tokens.replace(1, 2, "foo") # drop previous, identical
+        result = tokens.toString()
+        expecting = "afooc"
+        self.assertEqual(expecting, result)
+
+
+    def testDropPrevCoveredInsert(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(1, "foo")
+        tokens.replace(1, 2, "foo") # kill prev insert
+        result = tokens.toString()
+        expecting = "afoofoo"
+        self.assertEqual(expecting, result)
+
+
+    def testLeaveAloneDisjointInsert(self):
+        tokens = self._parse("abcc")
+        tokens.insertBefore(1, "x")
+        tokens.replace(2, 3, "foo")
+        result = tokens.toString()
+        expecting = "axbfoo"
+        self.assertEqual(expecting, result)
+
+
+    def testLeaveAloneDisjointInsert2(self):
+        tokens = self._parse("abcc")
+        tokens.replace(2, 3, "foo")
+        tokens.insertBefore(1, "x")
+        result = tokens.toString()
+        expecting = "axbfoo"
+        self.assertEqual(expecting, result)
+
+
+    def testInsertBeforeTokenThenDeleteThatToken(self):
+        tokens = self._parse("abc")
+        tokens.insertBefore(2, "y")
+        tokens.delete(2)
+        result = tokens.toString()
+        expecting = "aby"
+        self.assertEqual(expecting, result)
+
+
+class T2(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar('t048rewrite2.g')
+
+
+    def _parse(self, input):
+        cStream = antlr3.StringStream(input)
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.TokenRewriteStream(lexer)
+        tStream.fillBuffer()
+
+        return tStream
+
+
+    def testToStringStartStop(self):
+        # Tokens: 0123456789
+        # Input:  x = 3 * 0
+        tokens = self._parse("x = 3 * 0;")
+        tokens.replace(4, 8, "0") # replace 3 * 0 with 0
+
+        result = tokens.toOriginalString()
+        expecting = "x = 3 * 0;"
+        self.assertEqual(expecting, result)
+
+        result = tokens.toString()
+        expecting = "x = 0;"
+        self.assertEqual(expecting, result)
+
+        result = tokens.toString(0, 9)
+        expecting = "x = 0;"
+        self.assertEqual(expecting, result)
+
+        result = tokens.toString(4, 8)
+        expecting = "0"
+        self.assertEqual(expecting, result)
+
+
+    def testToStringStartStop2(self):
+        # Tokens: 012345678901234567
+        # Input:  x = 3 * 0 + 2 * 0
+        tokens = self._parse("x = 3 * 0 + 2 * 0;")
+
+        result = tokens.toOriginalString()
+        expecting = "x = 3 * 0 + 2 * 0;"
+        self.assertEqual(expecting, result)
+
+        tokens.replace(4, 8, "0") # replace 3 * 0 with 0
+        result = tokens.toString()
+        expecting = "x = 0 + 2 * 0;"
+        self.assertEqual(expecting, result)
+
+        result = tokens.toString(0, 17)
+        expecting = "x = 0 + 2 * 0;"
+        self.assertEqual(expecting, result)
+
+        result = tokens.toString(4, 8)
+        expecting = "0"
+        self.assertEqual(expecting, result)
+
+        result = tokens.toString(0, 8)
+        expecting = "x = 0"
+        self.assertEqual(expecting, result)
+
+        result = tokens.toString(12, 16)
+        expecting = "2 * 0"
+        self.assertEqual(expecting, result)
+
+        tokens.insertAfter(17, "// comment")
+        result = tokens.toString(12, 18)
+        expecting = "2 * 0;// comment"
+        self.assertEqual(expecting, result)
+
+        result = tokens.toString(0, 8) # try again after insert at end
+        expecting = "x = 0"
+        self.assertEqual(expecting, result)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t048rewrite2.g b/runtime/Python3/tests/t048rewrite2.g
new file mode 100644
index 0000000..60178d7
--- /dev/null
+++ b/runtime/Python3/tests/t048rewrite2.g
@@ -0,0 +1,12 @@
+lexer grammar t048rewrite2;
+options {
+    language=Python3;
+}
+
+ID : 'a'..'z'+;
+INT : '0'..'9'+;
+SEMI : ';';
+PLUS : '+';
+MUL : '*';
+ASSIGN : '=';
+WS : ' '+;
diff --git a/runtime/Python3/tests/t049treeparser.py b/runtime/Python3/tests/t049treeparser.py
new file mode 100644
index 0000000..ec77618
--- /dev/null
+++ b/runtime/Python3/tests/t049treeparser.py
@@ -0,0 +1,477 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+
+class T(testbase.ANTLRTest):
+    def walkerClass(self, base):
+        class TWalker(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self._output = ""
+
+
+            def capture(self, t):
+                self._output += t
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+            
+        return TWalker
+    
+
+    def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
+        lexerCls, parserCls = self.compileInlineGrammar(grammar)
+        walkerCls = self.compileInlineGrammar(treeGrammar)
+
+        cStream = antlr3.StringStream(input)
+        lexer = lexerCls(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = parserCls(tStream)
+        r = getattr(parser, grammarEntry)()
+        nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+        nodes.setTokenStream(tStream)
+        walker = walkerCls(nodes)
+        getattr(walker, treeEntry)()
+
+        return walker._output
+    
+
+    def testFlatList(self):
+        grammar = textwrap.dedent(
+        r'''grammar T;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+        
+        treeGrammar = textwrap.dedent(
+        r'''tree grammar TP;
+        options {
+            language=Python3;
+            ASTLabelType=CommonTree;
+        }
+        a : ID INT
+            {self.capture("{}, {}".format($ID, $INT))}
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34"
+            )
+
+        self.assertEqual("abc, 34", found)
+        
+
+
+    def testSimpleTree(self):
+        grammar = textwrap.dedent(
+            r'''grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : ID INT -> ^(ID INT);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''tree grammar TP;
+            options {
+                language=Python3;
+                ASTLabelType=CommonTree;
+            }
+            a : ^(ID INT)
+                {self.capture(str($ID)+", "+str($INT))}
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34"
+            )
+            
+        self.assertEqual("abc, 34", found)
+
+
+    def testFlatVsTreeDecision(self):
+        grammar = textwrap.dedent(
+            r'''grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : b c ;
+            b : ID INT -> ^(ID INT);
+            c : ID INT;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\\n') {$channel=HIDDEN;} ;
+            ''')
+        
+        treeGrammar = textwrap.dedent(
+            r'''tree grammar TP;
+            options {
+                language=Python3;
+                ASTLabelType=CommonTree;
+            }
+            a : b b ;
+            b : ID INT    {self.capture(str($ID)+" "+str($INT)+'\n')}
+              | ^(ID INT) {self.capture("^("+str($ID)+" "+str($INT)+')');}
+              ;
+            ''')
+        
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a 1 b 2"
+            )
+        self.assertEqual("^(a 1)b 2\n", found)
+
+
+    def testFlatVsTreeDecision2(self):
+        grammar = textwrap.dedent(
+            r"""grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : b c ;
+            b : ID INT+ -> ^(ID INT+);
+            c : ID INT+;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            """)
+
+        treeGrammar = textwrap.dedent(
+            r'''tree grammar TP;
+            options {
+                language=Python3;
+                ASTLabelType=CommonTree;
+            }
+            a : b b ;
+            b : ID INT+    {self.capture(str($ID)+" "+str($INT)+"\n")}
+              | ^(x=ID (y=INT)+) {self.capture("^("+str($x)+' '+str($y)+')')}
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a 1 2 3 b 4 5"
+            )
+        self.assertEqual("^(a 3)b 5\n", found)
+
+
+    def testCyclicDFALookahead(self):
+        grammar = textwrap.dedent(
+            r'''grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : ID INT+ PERIOD;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            SEMI : ';' ;
+            PERIOD : '.' ;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''tree grammar TP;
+            options {
+                language=Python3;
+                ASTLabelType=CommonTree;
+            }
+            a : ID INT+ PERIOD {self.capture("alt 1")}
+              | ID INT+ SEMI   {self.capture("alt 2")}
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a 1 2 3."
+            )
+        self.assertEqual("alt 1", found)
+
+
+    def testNullableChildList(self):
+        grammar = textwrap.dedent(
+            r'''grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : ID INT? -> ^(ID INT?);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\\n') {$channel=HIDDEN;} ;
+            ''')
+        
+        treeGrammar = textwrap.dedent(
+            r'''tree grammar TP;
+            options {
+                language=Python3;
+                ASTLabelType=CommonTree;
+            }
+            a : ^(ID INT?)
+                {self.capture(str($ID))}
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc"
+            )
+        self.assertEqual("abc", found)
+
+
+    def testNullableChildList2(self):
+        grammar = textwrap.dedent(
+            r'''grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : ID INT? SEMI -> ^(ID INT?) SEMI ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            SEMI : ';' ;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''tree grammar TP;
+            options {
+                language=Python3;
+                ASTLabelType=CommonTree;
+            }
+            a : ^(ID INT?) SEMI
+                {self.capture(str($ID))}
+              ;
+            ''')
+        
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc;"
+            )
+        self.assertEqual("abc", found)
+
+
+    def testNullableChildList3(self):
+        grammar = textwrap.dedent(
+            r'''grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : x=ID INT? (y=ID)? SEMI -> ^($x INT? $y?) SEMI ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            SEMI : ';' ;
+            WS : (' '|'\\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''tree grammar TP;
+            options {
+                language=Python3;
+                ASTLabelType=CommonTree;
+            }
+            a : ^(ID INT? b) SEMI
+                {self.capture(str($ID)+", "+str($b.text))}
+              ;
+            b : ID? ;
+            ''')
+        
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc def;"
+            )
+        self.assertEqual("abc, def", found)
+
+
+    def testActionsAfterRoot(self):
+        grammar = textwrap.dedent(
+            r'''grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : x=ID INT? SEMI -> ^($x INT?) ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            SEMI : ';' ;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''tree grammar TP;
+            options {
+                language=Python3;
+                ASTLabelType=CommonTree;
+            }
+            a @init {x=0} : ^(ID {x=1} {x=2} INT?)
+                {self.capture(str($ID)+", "+str(x))}
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc;"
+            )
+        self.assertEqual("abc, 2", found)
+
+
+    def testWildcardLookahead(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID '+'^ INT;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            SEMI : ';' ;
+            PERIOD : '.' ;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP; 
+            options {language=Python3; tokenVocab=T; ASTLabelType=CommonTree;}
+            a : ^('+' . INT) { self.capture("alt 1") }
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a + 2")
+        self.assertEqual("alt 1", found)
+
+
+    def testWildcardLookahead2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID '+'^ INT;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            SEMI : ';' ;
+            PERIOD : '.' ;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3; tokenVocab=T; ASTLabelType=CommonTree;}
+            a : ^('+' . INT) { self.capture("alt 1") }
+              | ^('+' . .)   { self.capture("alt 2") }
+              ;
+            ''')
+
+        # AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a + 2")
+        self.assertEqual("alt 1", found)
+
+
+    def testWildcardLookahead3(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID '+'^ INT;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            SEMI : ';' ;
+            PERIOD : '.' ;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3; tokenVocab=T; ASTLabelType=CommonTree;}
+            a : ^('+' ID INT) { self.capture("alt 1") }
+              | ^('+' . .)   { self.capture("alt 2") }
+              ;
+            ''')
+
+        # AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a + 2")
+        self.assertEqual("alt 1", found)
+
+
+    def testWildcardPlusLookahead(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID '+'^ INT;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            SEMI : ';' ;
+            PERIOD : '.' ;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3; tokenVocab=T; ASTLabelType=CommonTree;}
+            a : ^('+' INT INT ) { self.capture("alt 1") }
+              | ^('+' .+)   { self.capture("alt 2") }
+              ;
+            ''')
+
+        # AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a + 2")
+        self.assertEqual("alt 2", found)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t050decorate.g b/runtime/Python3/tests/t050decorate.g
new file mode 100644
index 0000000..50e54e7
--- /dev/null
+++ b/runtime/Python3/tests/t050decorate.g
@@ -0,0 +1,29 @@
+grammar t050decorate;
+options {
+  language = Python3;
+}
+
+@header {
+    def logme(func):
+        def decorated(self, *args, **kwargs):
+            self.events.append('before')
+            try:
+                return func(self, *args, **kwargs)
+            finally:
+                self.events.append('after')
+
+        return decorated
+}
+
+@parser::init {
+self.events = []
+}
+
+document
+@decorate {
+    @logme
+}
+    : IDENTIFIER
+    ;
+
+IDENTIFIER: ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*;
diff --git a/runtime/Python3/tests/t050decorate.py b/runtime/Python3/tests/t050decorate.py
new file mode 100644
index 0000000..b5337a6
--- /dev/null
+++ b/runtime/Python3/tests/t050decorate.py
@@ -0,0 +1,21 @@
+import antlr3
+import testbase
+import unittest
+
+class t013parser(testbase.ANTLRTest):
+    def setUp(self):
+        self.compileGrammar()
+        
+        
+    def testValid(self):
+        cStream = antlr3.StringStream('foobar')
+        lexer = self.getLexer(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = self.getParser(tStream)
+        parser.document()
+
+        self.assertEqual(parser.events, ['before', 'after'])
+          
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t051treeRewriteAST.py b/runtime/Python3/tests/t051treeRewriteAST.py
new file mode 100644
index 0000000..3c9ced6
--- /dev/null
+++ b/runtime/Python3/tests/t051treeRewriteAST.py
@@ -0,0 +1,1565 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+
+class T(testbase.ANTLRTest):
+    def walkerClass(self, base):
+        class TWalker(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+                self.buf = ""
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TWalker
+
+
+    def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
+        lexerCls, parserCls = self.compileInlineGrammar(grammar)
+        walkerCls = self.compileInlineGrammar(treeGrammar)
+
+        cStream = antlr3.StringStream(input)
+        lexer = lexerCls(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = parserCls(tStream)
+        r = getattr(parser, grammarEntry)()
+        nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+        nodes.setTokenStream(tStream)
+        walker = walkerCls(nodes)
+        r = getattr(walker, treeEntry)()
+
+        if r.tree:
+            return r.tree.toStringTree()
+
+        return ""
+
+
+    def testFlatList(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T1;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP1;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T1;
+        }
+
+        a : ID INT -> INT ID;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34"
+            )
+
+        self.assertEqual("34 abc", found)
+
+
+    def testSimpleTree(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T2;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT -> ^(ID INT);
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP2;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T2;
+        }
+        a : ^(ID INT) -> ^(INT ID);
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34"
+            )
+
+        self.assertEqual("(34 abc)", found)
+
+
+    def testCombinedRewriteAndAuto(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T3;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT -> ^(ID INT) | INT ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP3;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T3;
+        }
+        a : ^(ID INT) -> ^(INT ID) | INT;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34"
+            )
+
+        self.assertEqual("(34 abc)", found)
+
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "34"
+            )
+
+        self.assertEqual("34", found)
+
+
+    def testAvoidDup(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T4;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP4;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T4;
+        }
+        a : ID -> ^(ID ID);
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc"
+            )
+
+        self.assertEqual("(abc abc)", found)
+
+
+    def testLoop(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T5;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID+ INT+ -> (^(ID INT))+ ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP5;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T5;
+        }
+        a : (^(ID INT))+ -> INT+ ID+;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a b c 3 4 5"
+            )
+
+        self.assertEqual("3 4 5 a b c", found)
+
+
+    def testAutoDup(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T6;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP6;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T6;
+        }
+        a : ID;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc"
+            )
+
+        self.assertEqual("abc", found)
+
+
+    def testAutoDupRule(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T7;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP7;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T7;
+        }
+        a : b c ;
+        b : ID ;
+        c : INT ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a 1"
+            )
+
+        self.assertEqual("a 1", found)
+
+
+    def testAutoWildcard(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3;output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+            a : ID .
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34")
+        self.assertEqual("abc 34", found)
+
+
+    def testAutoWildcard2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID INT -> ^(ID INT);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3;output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+            a : ^(ID .)
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34")
+        self.assertEqual("(abc 34)", found)
+
+
+    def testAutoWildcardWithLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3;output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+            a : ID c=.
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34")
+        self.assertEqual("abc 34", found)
+
+
+    def testAutoWildcardWithListLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3;output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+            a : ID c+=.
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34")
+        self.assertEqual("abc 34", found)
+
+
+    def testAutoDupMultiple(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T8;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID ID INT;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP8;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T8;
+        }
+        a : ID ID INT
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a b 3"
+            )
+
+        self.assertEqual("a b 3", found)
+
+
+    def testAutoDupTree(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T9;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT -> ^(ID INT);
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP9;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T9;
+        }
+        a : ^(ID INT)
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a 3"
+            )
+
+        self.assertEqual("(a 3)", found)
+
+
+    def testAutoDupTreeWithLabels(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T10;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT -> ^(ID INT);
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP10;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T10;
+        }
+        a : ^(x=ID y=INT)
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a 3"
+            )
+
+        self.assertEqual("(a 3)", found)
+
+
+    def testAutoDupTreeWithListLabels(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T11;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT -> ^(ID INT);
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP11;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T11;
+        }
+        a : ^(x+=ID y+=INT)
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a 3"
+            )
+
+        self.assertEqual("(a 3)", found)
+
+
+    def testAutoDupTreeWithRuleRoot(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T12;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT -> ^(ID INT);
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP12;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T12;
+        }
+        a : ^(b INT) ;
+        b : ID ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a 3"
+            )
+
+        self.assertEqual("(a 3)", found)
+
+
+    def testAutoDupTreeWithRuleRootAndLabels(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T13;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT -> ^(ID INT);
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP13;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T13;
+        }
+        a : ^(x=b INT) ;
+        b : ID ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a 3"
+            )
+
+        self.assertEqual("(a 3)", found)
+
+
+    def testAutoDupTreeWithRuleRootAndListLabels(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T14;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT -> ^(ID INT);
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP14;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T14;
+        }
+        a : ^(x+=b y+=c) ;
+        b : ID ;
+        c : INT ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a 3"
+            )
+
+        self.assertEqual("(a 3)", found)
+
+
+    def testAutoDupNestedTree(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T15;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : x=ID y=ID INT -> ^($x ^($y INT));
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP15;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T15;
+        }
+        a : ^(ID ^(ID INT))
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "a b 3"
+            )
+
+        self.assertEqual("(a (b 3))", found)
+
+
+    def testDelete(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T16;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP16;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T16;
+        }
+        a : ID ->
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc"
+            )
+
+        self.assertEqual("", found)
+
+    def testSetMatchNoRewrite(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : ID INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {
+                language=Python3;
+                output=AST;
+                ASTLabelType=CommonTree;
+                tokenVocab=T;
+            }
+            a : b INT;
+            b : ID | INT;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34"
+            )
+
+        self.assertEqual("abc 34", found)
+
+
+    def testSetOptionalMatchNoRewrite(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : ID INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {
+                language=Python3;
+                output=AST;
+                ASTLabelType=CommonTree;
+                tokenVocab=T;
+            }
+            a : (ID|INT)? INT ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34")
+
+        self.assertEqual("abc 34", found)
+
+
+    def testSetMatchNoRewriteLevel2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : x=ID INT -> ^($x INT);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {
+                language=Python3;
+                output=AST;
+                ASTLabelType=CommonTree;
+                tokenVocab=T;
+            }
+            a : ^(ID (ID | INT) ) ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34"
+            )
+
+        self.assertEqual("(abc 34)", found)
+
+
+    def testSetMatchNoRewriteLevel2Root(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : x=ID INT -> ^($x INT);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {
+                language=Python3;
+                output=AST;
+                ASTLabelType=CommonTree;
+                tokenVocab=T;
+            }
+            a : ^((ID | INT) INT) ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34"
+            )
+
+        self.assertEqual("(abc 34)", found)
+
+
+    ## REWRITE MODE
+
+    def testRewriteModeCombinedRewriteAndAuto(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T17;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT -> ^(ID INT) | INT ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP17;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T17;
+            rewrite=true;
+        }
+        a : ^(ID INT) -> ^(ID["ick"] INT)
+          | INT // leaves it alone, returning $a.start
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc 34"
+            )
+
+        self.assertEqual("(ick 34)", found)
+
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "34"
+            )
+
+        self.assertEqual("34", found)
+
+
+    def testRewriteModeFlatTree(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T18;
+            options {
+              language=Python3;
+              output=AST;
+            }
+            a : ID INT -> ID INT | INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP18;
+            options {
+              language=Python3;
+              output=AST;
+              ASTLabelType=CommonTree;
+              tokenVocab=T18;
+              rewrite=true;
+            }
+            s : ID a ;
+            a : INT -> INT["1"]
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 34"
+            )
+        self.assertEqual("abc 1", found)
+
+
+    def testRewriteModeChainRuleFlatTree(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID INT -> ID INT | INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            s : a ;
+            a : b ;
+            b : ID INT -> INT ID
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 34")
+        self.assertEqual("34 abc", found)
+
+
+    def testRewriteModeChainRuleTree(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID INT -> ^(ID INT) ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            s : a ;
+            a : b ; // a.tree must become b.tree
+            b : ^(ID INT) -> INT
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 34")
+        self.assertEqual("34", found)
+
+
+    def testRewriteModeChainRuleTree2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID INT -> ^(ID INT) ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r"""
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            tokens { X; }
+            s : a* b ; // only b contributes to tree, but it's after a*; s.tree = b.tree
+            a : X ;
+            b : ^(ID INT) -> INT
+              ;
+            """)
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 34")
+        self.assertEqual("34", found)
+
+
+    def testRewriteModeChainRuleTree3(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : 'boo' ID INT -> 'boo' ^(ID INT) ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r"""
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            tokens { X; }
+            s : 'boo' a* b ; // don't reset s.tree to b.tree due to 'boo'
+            a : X ;
+            b : ^(ID INT) -> INT
+              ;
+            """)
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "boo abc 34")
+        self.assertEqual("boo 34", found)
+
+
+    def testRewriteModeChainRuleTree4(self):
+        grammar = textwrap.dedent(
+            r"""
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            """)
+
+        treeGrammar = textwrap.dedent(
+            r"""
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            tokens { X; }
+            s : ^('boo' a* b) ; // don't reset s.tree to b.tree due to 'boo'
+            a : X ;
+            b : ^(ID INT) -> INT
+              ;
+            """)
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "boo abc 34")
+        self.assertEqual("(boo 34)", found)
+
+
+    def testRewriteModeChainRuleTree5(self):
+        grammar = textwrap.dedent(
+            r"""
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            """)
+
+        treeGrammar = textwrap.dedent(
+            r"""
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            tokens { X; }
+            s : ^(a b) ; // s.tree is a.tree
+            a : 'boo' ;
+            b : ^(ID INT) -> INT
+              ;
+            """)
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "boo abc 34")
+        self.assertEqual("(boo 34)", found)
+
+
+    def testRewriteOfRuleRef(self):
+        grammar = textwrap.dedent(
+            r"""
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID INT -> ID INT | INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            """)
+
+        treeGrammar = textwrap.dedent(
+            r"""
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            s : a -> a ;
+            a : ID INT -> ID INT ;
+            """)
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 34")
+        self.assertEqual("abc 34", found)
+
+
+    def testRewriteOfRuleRefRoot(self):
+        grammar = textwrap.dedent(
+            r"""
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID INT INT -> ^(INT ^(ID INT));
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            """)
+
+        treeGrammar = textwrap.dedent(
+            r"""
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            s : ^(a ^(ID INT)) -> a ;
+            a : INT ;
+            """)
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 12 34")
+        # emits whole tree when you ref the root since I can't know whether
+        # you want the children or not.  You might be returning a whole new
+        # tree.  Hmm...still seems weird.  oh well.
+        self.assertEqual("(12 (abc 34))", found)
+
+
+    def testRewriteOfRuleRefRootLabeled(self):
+        grammar = textwrap.dedent(
+            r"""
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID INT INT -> ^(INT ^(ID INT));
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            """)
+
+        treeGrammar = textwrap.dedent(
+            r"""
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            s : ^(label=a ^(ID INT)) -> a ;
+            a : INT ;
+            """)
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 12 34")
+        # emits whole tree when you ref the root since I can't know whether
+        # you want the children or not.  You might be returning a whole new
+        # tree.  Hmm...still seems weird.  oh well.
+        self.assertEqual("(12 (abc 34))", found)
+
+
+    def testRewriteOfRuleRefRootListLabeled(self):
+        grammar = textwrap.dedent(
+            r"""
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID INT INT -> ^(INT ^(ID INT));
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            """)
+
+        treeGrammar = textwrap.dedent(
+            r"""
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            s : ^(label+=a ^(ID INT)) -> a ;
+            a : INT ;
+            """)
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 12 34")
+        # emits whole tree when you ref the root since I can't know whether
+        # you want the children or not.  You might be returning a whole new
+        # tree.  Hmm...still seems weird.  oh well.
+        self.assertEqual("(12 (abc 34))", found)
+
+
+    def testRewriteOfRuleRefChild(self):
+        grammar = textwrap.dedent(
+            r"""
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID INT -> ^(ID ^(INT INT));
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            """)
+
+        treeGrammar = textwrap.dedent(
+            r"""
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            s : ^(ID a) -> a ;
+            a : ^(INT INT) ;
+            """)
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 34")
+        self.assertEqual("(34 34)", found)
+
+
+    def testRewriteOfRuleRefLabel(self):
+        grammar = textwrap.dedent(
+            r"""
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID INT -> ^(ID ^(INT INT));
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            """)
+
+        treeGrammar = textwrap.dedent(
+            r"""
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            s : ^(ID label=a) -> a ;
+            a : ^(INT INT) ;
+            """)
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 34")
+        self.assertEqual("(34 34)", found)
+
+
+    def testRewriteOfRuleRefListLabel(self):
+        grammar = textwrap.dedent(
+            r"""
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID INT -> ^(ID ^(INT INT));
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            """)
+
+        treeGrammar = textwrap.dedent(
+            r"""
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            s : ^(ID label+=a) -> a ;
+            a : ^(INT INT) ;
+            """)
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 34")
+        self.assertEqual("(34 34)", found)
+
+
+
+    def testRewriteModeWithPredicatedRewrites(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T19;
+            options {
+              language=Python3;
+              output=AST;
+            }
+            a : ID INT -> ^(ID["root"] ^(ID INT)) | INT -> ^(ID["root"] INT) ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP19;
+            options {
+              language=Python3;
+              output=AST;
+              ASTLabelType=CommonTree;
+              tokenVocab=T19;
+              rewrite=true;
+            }
+            s : ^(ID a) { self.buf += $s.start.toStringTree() };
+            a : ^(ID INT) -> {True}? ^(ID["ick"] INT)
+                          -> INT
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 34"
+            )
+
+        self.assertEqual("(root (ick 34))", found)
+
+
+    def testWildcardSingleNode(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : ID INT -> ^(ID["root"] INT);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {
+                language=Python3;
+                output=AST;
+                ASTLabelType=CommonTree;
+                tokenVocab=T;
+            }
+            s : ^(ID c=.) -> $c
+            ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 34"
+            )
+
+        self.assertEqual("34", found)
+
+    def testWildcardUnlabeledSingleNode(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID INT -> ^(ID INT);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+            s : ^(ID .) -> ID
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 34")
+        self.assertEqual("abc", found)
+
+
+    def testWildcardGrabsSubtree(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID x=INT y=INT z=INT -> ^(ID[\"root\"] ^($x $y $z));
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+            s : ^(ID c=.) -> $c
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 1 2 3")
+        self.assertEqual("(1 2 3)", found)
+
+
+    def testWildcardGrabsSubtree2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : ID x=INT y=INT z=INT -> ID ^($x $y $z);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+            s : ID c=. -> $c
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "abc 1 2 3")
+        self.assertEqual("(1 2 3)", found)
+
+
+    def testWildcardListLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST;}
+            a : INT INT INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T;}
+            s : (c+=.)+ -> $c+
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "1 2 3")
+        self.assertEqual("1 2 3", found)
+
+
+    def testWildcardListLabel2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree;}
+            a  : x=INT y=INT z=INT -> ^($x ^($y $z) ^($y $z));
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {language=Python3; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
+            s : ^(INT (c+=.)+) -> $c+
+              ;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 's',
+            "1 2 3")
+        self.assertEqual("(2 3) (2 3)", found)
+
+
+    def testRuleResultAsRoot(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : ID '=' INT -> ^('=' ID INT);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            COLON : ':' ;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {
+                language=Python3;
+                output=AST;
+                rewrite=true;
+                ASTLabelType=CommonTree;
+                tokenVocab=T;
+            }
+            a : ^(eq e1=ID e2=.) -> ^(eq $e2 $e1) ;
+            eq : '=' | ':' {pass} ;  // bug in set match, doesn't add to tree!! booh. force nonset.
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            "abc = 34")
+        self.assertEqual("(= 34 abc)", found)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t052import.py b/runtime/Python3/tests/t052import.py
new file mode 100644
index 0000000..d6de6ef
--- /dev/null
+++ b/runtime/Python3/tests/t052import.py
@@ -0,0 +1,431 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+import sys
+
+class T(testbase.ANTLRTest):
+    def setUp(self):
+        self.oldPath = sys.path[:]
+        sys.path.insert(0, self.baseDir)
+
+
+    def tearDown(self):
+        sys.path = self.oldPath
+
+
+    def parserClass(self, base):
+        class TParser(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self._output = ""
+
+
+            def capture(self, t):
+                self._output += t
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TParser
+
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self._output = ""
+
+
+            def capture(self, t):
+                self._output += t
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def recover(self, input):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+
+
+    def execParser(self, grammar, grammarEntry, slaves, input):
+        for slave in slaves:
+            parserName = self.writeInlineGrammar(slave)[0]
+            # slave parsers are imported as normal python modules
+            # to force reloading current version, purge module from sys.modules
+            if parserName + 'Parser' in sys.modules:
+                del sys.modules[parserName + 'Parser']
+
+        lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+        cStream = antlr3.StringStream(input)
+        lexer = lexerCls(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = parserCls(tStream)
+        getattr(parser, grammarEntry)()
+
+        return parser._output
+
+
+    def execLexer(self, grammar, slaves, input):
+        for slave in slaves:
+            parserName = self.writeInlineGrammar(slave)[0]
+            # slave parsers are imported as normal python modules
+            # to force reloading current version, purge module from sys.modules
+            if parserName + 'Parser' in sys.modules:
+                del sys.modules[parserName + 'Parser']
+
+        lexerCls = self.compileInlineGrammar(grammar)
+
+        cStream = antlr3.StringStream(input)
+        lexer = lexerCls(cStream)
+
+        while True:
+            token = lexer.nextToken()
+            if token is None or token.type == antlr3.EOF:
+                break
+
+            lexer._output += token.text
+
+        return lexer._output
+
+
+    def testDelegatorInvokesDelegateRule(self):
+        slave = textwrap.dedent(
+        r'''
+        parser grammar S1;
+        options {
+            language=Python3;
+        }
+        @members {
+            def capture(self, t):
+                self.gM1.capture(t)
+
+        }
+
+        a : B { self.capture("S.a") } ;
+        ''')
+
+        master = textwrap.dedent(
+        r'''
+        grammar M1;
+        options {
+            language=Python3;
+        }
+        import S1;
+        s : a ;
+        B : 'b' ; // defines B from inherited token space
+        WS : (' '|'\n') {self.skip()} ;
+        ''')
+
+        found = self.execParser(
+            master, 's',
+            slaves=[slave],
+            input="b"
+            )
+
+        self.assertEqual("S.a", found)
+
+
+    def testDelegatorInvokesDelegateRuleWithArgs(self):
+        slave = textwrap.dedent(
+        r'''
+        parser grammar S2;
+        options {
+            language=Python3;
+        }
+        @members {
+            def capture(self, t):
+                self.gM2.capture(t)
+        }
+        a[x] returns [y] : B {self.capture("S.a"); $y="1000";} ;
+        ''')
+
+        master = textwrap.dedent(
+        r'''
+        grammar M2;
+        options {
+            language=Python3;
+        }
+        import S2;
+        s : label=a[3] {self.capture($label.y);} ;
+        B : 'b' ; // defines B from inherited token space
+        WS : (' '|'\n') {self.skip()} ;
+        ''')
+
+        found = self.execParser(
+            master, 's',
+            slaves=[slave],
+            input="b"
+            )
+
+        self.assertEqual("S.a1000", found)
+
+
+    def testDelegatorAccessesDelegateMembers(self):
+        slave = textwrap.dedent(
+        r'''
+        parser grammar S3;
+        options {
+            language=Python3;
+        }
+        @members {
+            def capture(self, t):
+                self.gM3.capture(t)
+
+            def foo(self):
+                self.capture("foo")
+        }
+        a : B ;
+        ''')
+
+        master = textwrap.dedent(
+        r'''
+        grammar M3;        // uses no rules from the import
+        options {
+            language=Python3;
+        }
+        import S3;
+        s : 'b' {self.gS3.foo();} ; // gS is import pointer
+        WS : (' '|'\n') {self.skip()} ;
+        ''')
+
+        found = self.execParser(
+            master, 's',
+            slaves=[slave],
+            input="b"
+            )
+
+        self.assertEqual("foo", found)
+
+
+    def testDelegatorInvokesFirstVersionOfDelegateRule(self):
+        slave = textwrap.dedent(
+        r'''
+        parser grammar S4;
+        options {
+            language=Python3;
+        }
+        @members {
+            def capture(self, t):
+                self.gM4.capture(t)
+        }
+        a : b {self.capture("S.a");} ;
+        b : B ;
+        ''')
+
+        slave2 = textwrap.dedent(
+        r'''
+        parser grammar T4;
+        options {
+            language=Python3;
+        }
+        @members {
+            def capture(self, t):
+                self.gM4.capture(t)
+        }
+        a : B {self.capture("T.a");} ; // hidden by S.a
+        ''')
+
+        master = textwrap.dedent(
+        r'''
+        grammar M4;
+        options {
+            language=Python3;
+        }
+        import S4,T4;
+        s : a ;
+        B : 'b' ;
+        WS : (' '|'\n') {self.skip()} ;
+        ''')
+
+        found = self.execParser(
+            master, 's',
+            slaves=[slave, slave2],
+            input="b"
+            )
+
+        self.assertEqual("S.a", found)
+
+
+    def testDelegatesSeeSameTokenType(self):
+        slave = textwrap.dedent(
+        r'''
+        parser grammar S5; // A, B, C token type order
+        options {
+            language=Python3;
+        }
+        tokens { A; B; C; }
+        @members {
+            def capture(self, t):
+                self.gM5.capture(t)
+        }
+        x : A {self.capture("S.x ");} ;
+        ''')
+
+        slave2 = textwrap.dedent(
+        r'''
+        parser grammar T5;
+        options {
+            language=Python3;
+        }
+        tokens { C; B; A; } /// reverse order
+        @members {
+            def capture(self, t):
+                self.gM5.capture(t)
+        }
+        y : A {self.capture("T.y");} ;
+        ''')
+
+        master = textwrap.dedent(
+        r'''
+        grammar M5;
+        options {
+            language=Python3;
+        }
+        import S5,T5;
+        s : x y ; // matches AA, which should be "aa"
+        B : 'b' ; // another order: B, A, C
+        A : 'a' ;
+        C : 'c' ;
+        WS : (' '|'\n') {self.skip()} ;
+        ''')
+
+        found = self.execParser(
+            master, 's',
+            slaves=[slave, slave2],
+            input="aa"
+            )
+
+        self.assertEqual("S.x T.y", found)
+
+
+    def testDelegatorRuleOverridesDelegate(self):
+        slave = textwrap.dedent(
+        r'''
+        parser grammar S6;
+        options {
+            language=Python3;
+        }
+        @members {
+            def capture(self, t):
+                self.gM6.capture(t)
+        }
+        a : b {self.capture("S.a");} ;
+        b : B ;
+        ''')
+
+        master = textwrap.dedent(
+        r'''
+        grammar M6;
+        options {
+            language=Python3;
+        }
+        import S6;
+        b : 'b'|'c' ;
+        WS : (' '|'\n') {self.skip()} ;
+        ''')
+
+        found = self.execParser(
+            master, 'a',
+            slaves=[slave],
+            input="c"
+            )
+
+        self.assertEqual("S.a", found)
+
+
+    # LEXER INHERITANCE
+
+    def testLexerDelegatorInvokesDelegateRule(self):
+        slave = textwrap.dedent(
+        r'''
+        lexer grammar S7;
+        options {
+            language=Python3;
+        }
+        @members {
+            def capture(self, t):
+                self.gM7.capture(t)
+        }
+        A : 'a' {self.capture("S.A ");} ;
+        C : 'c' ;
+        ''')
+
+        master = textwrap.dedent(
+        r'''
+        lexer grammar M7;
+        options {
+            language=Python3;
+        }
+        import S7;
+        B : 'b' ;
+        WS : (' '|'\n') {self.skip()} ;
+        ''')
+
+        found = self.execLexer(
+            master,
+            slaves=[slave],
+            input="abc"
+            )
+
+        self.assertEqual("S.A abc", found)
+
+
+    def testLexerDelegatorRuleOverridesDelegate(self):
+        slave = textwrap.dedent(
+        r'''
+        lexer grammar S8;
+        options {
+            language=Python3;
+        }
+        @members {
+            def capture(self, t):
+                self.gM8.capture(t)
+        }
+        A : 'a' {self.capture("S.A")} ;
+        ''')
+
+        master = textwrap.dedent(
+        r'''
+        lexer grammar M8;
+        options {
+            language=Python3;
+        }
+        import S8;
+        A : 'a' {self.capture("M.A ");} ;
+        WS : (' '|'\n') {self.skip()} ;
+        ''')
+
+        found = self.execLexer(
+            master,
+            slaves=[slave],
+            input="a"
+            )
+
+        self.assertEqual("M.A a", found)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t053hetero.py b/runtime/Python3/tests/t053hetero.py
new file mode 100644
index 0000000..e85c038
--- /dev/null
+++ b/runtime/Python3/tests/t053hetero.py
@@ -0,0 +1,939 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+import sys
+
+class T(testbase.ANTLRTest):
+    def parserClass(self, base):
+        class TParser(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self._output = ""
+
+
+            def capture(self, t):
+                self._output += t
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TParser
+
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self._output = ""
+
+
+            def capture(self, t):
+                self._output += t
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+
+
+    def execParser(self, grammar, grammarEntry, input):
+        lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+        cStream = antlr3.StringStream(input)
+        lexer = lexerCls(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = parserCls(tStream)
+        r = getattr(parser, grammarEntry)()
+
+        if r:
+            return r.tree.toStringTree()
+
+        return ""
+
+
+    def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
+        lexerCls, parserCls = self.compileInlineGrammar(grammar)
+        walkerCls = self.compileInlineGrammar(treeGrammar)
+
+        cStream = antlr3.StringStream(input)
+        lexer = lexerCls(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = parserCls(tStream)
+        r = getattr(parser, grammarEntry)()
+        nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+        nodes.setTokenStream(tStream)
+        walker = walkerCls(nodes)
+        r = getattr(walker, treeEntry)()
+
+        if r:
+            return r.tree.toStringTree()
+
+        return ""
+
+
+    # PARSERS -- AUTO AST
+
+    def testToken(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T1;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        }
+        a : ID<V> ;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="a"
+            )
+
+        self.assertEqual("a<V>", found)
+
+
+    def testTokenCommonTree(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : ID<CommonTree> ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="a")
+
+        self.assertEqual("a", found)
+
+
+    def testTokenWithQualifiedType(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            @members {
+            class V(CommonTree):
+                def toString(self):
+                    return self.token.text + "<V>"
+                __str__ = toString
+            }
+            a : ID<TParser.V> ; // TParser.V is qualified name
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="a"
+            )
+
+        self.assertEqual("a<V>", found)
+
+
+    def testNamedType(self):
+        grammar = textwrap.dedent(
+            r"""
+            grammar $T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            @header {
+            class V(CommonTree):
+                def toString(self):
+                    return self.token.text + "<V>"
+                __str__ = toString
+            }
+            a : ID<node=V> ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\\n') {$channel=HIDDEN;} ;
+            """)
+
+        found = self.execParser(grammar, 'a', input="a")
+        self.assertEqual("a<V>", found)
+
+
+    def testTokenWithLabel(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T2;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        }
+        a : x=ID<V> ;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="a"
+            )
+
+        self.assertEqual("a<V>", found)
+
+
+    def testTokenWithListLabel(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T3;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        }
+        a : x+=ID<V> ;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="a"
+            )
+
+        self.assertEqual("a<V>", found)
+
+
+    def testTokenRoot(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T4;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        }
+        a : ID<V>^ ;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="a"
+            )
+
+        self.assertEqual("a<V>", found)
+
+
+    def testTokenRootWithListLabel(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T5;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        }
+        a : x+=ID<V>^ ;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="a"
+            )
+
+        self.assertEqual("a<V>", found)
+
+
+    def testString(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T6;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        }
+        a : 'begin'<V> ;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="begin"
+            )
+
+        self.assertEqual("begin<V>", found)
+
+
+    def testStringRoot(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T7;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        }
+        a : 'begin'<V>^ ;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="begin"
+            )
+
+        self.assertEqual("begin<V>", found)
+
+
+    # PARSERS -- REWRITE AST
+
+    def testRewriteToken(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T8;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        }
+        a : ID -> ID<V> ;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="a"
+            )
+
+        self.assertEqual("a<V>", found)
+
+
+    def testRewriteTokenWithArgs(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T9;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def __init__(self, *args):
+                if len(args) == 4:
+                    ttype = args[0]
+                    x = args[1]
+                    y = args[2]
+                    z = args[3]
+                    token = CommonToken(type=ttype, text="")
+
+                elif len(args) == 3:
+                    ttype = args[0]
+                    token = args[1]
+                    x = args[2]
+                    y, z = 0, 0
+
+                else:
+                    raise TypeError("Invalid args {!r}".format(args))
+
+                super().__init__(token)
+                self.x = x
+                self.y = y
+                self.z = z
+
+            def toString(self):
+                txt = ""
+                if self.token:
+                    txt += self.token.text
+                txt +="<V>;{0.x}{0.y}{0.z}".format(self)
+                return txt
+            __str__ = toString
+
+        }
+        a : ID -> ID<V>[42,19,30] ID<V>[$ID,99];
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="a"
+            )
+
+        self.assertEqual("<V>;421930 a<V>;9900", found)
+
+
+    def testRewriteTokenRoot(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T10;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        }
+        a : ID INT -> ^(ID<V> INT) ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="a 2"
+            )
+
+        self.assertEqual("(a<V> 2)", found)
+
+
+    def testRewriteString(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T11;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        }
+        a : 'begin' -> 'begin'<V> ;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="begin"
+            )
+
+        self.assertEqual("begin<V>", found)
+
+
+    def testRewriteStringRoot(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T12;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        }
+        a : 'begin' INT -> ^('begin'<V> INT) ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="begin 2"
+            )
+
+        self.assertEqual("(begin<V> 2)", found)
+
+    def testRewriteRuleResults(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            tokens {LIST;}
+            @header {
+            class V(CommonTree):
+                def toString(self):
+                    return self.token.text + "<V>"
+                __str__ = toString
+
+            class W(CommonTree):
+                def __init__(self, tokenType, txt):
+                    super().__init__(
+                        CommonToken(type=tokenType, text=txt))
+
+                def toString(self):
+                    return self.token.text + "<W>"
+                __str__ = toString
+
+            }
+            a : id (',' id)* -> ^(LIST<W>["LIST"] id+);
+            id : ID -> ID<V>;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="a,b,c")
+
+        self.assertEqual("(LIST<W> a<V> b<V> c<V>)", found)
+
+    def testCopySemanticsWithHetero(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            @header {
+            class V(CommonTree):
+                def dupNode(self):
+                    return V(self)
+
+                def toString(self):
+                    return self.token.text + "<V>"
+                __str__ = toString
+
+            }
+            a : type ID (',' ID)* ';' -> ^(type ID)+;
+            type : 'int'<V> ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\\n') {$channel=HIDDEN;} ;
+            ''')
+
+        found = self.execParser(
+            grammar, 'a',
+            input="int a, b, c;")
+        self.assertEqual("(int<V> a) (int<V> b) (int<V> c)", found)
+
+    # TREE PARSERS -- REWRITE AST
+
+    def testTreeParserRewriteFlatList(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T13;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP13;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T13;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        class W(CommonTree):
+            def toString(self):
+                return self.token.text + "<W>"
+            __str__ = toString
+
+        }
+        a : ID INT -> INT<V> ID<W>
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            input="abc 34"
+            )
+
+        self.assertEqual("34<V> abc<W>", found)
+
+
+    def testTreeParserRewriteTree(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T14;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID INT;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP14;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T14;
+        }
+        @header {
+        class V(CommonTree):
+            def toString(self):
+                return self.token.text + "<V>"
+            __str__ = toString
+
+        class W(CommonTree):
+            def toString(self):
+                return self.token.text + "<W>"
+            __str__ = toString
+
+        }
+        a : ID INT -> ^(INT<V> ID<W>)
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            input="abc 34"
+            )
+
+        self.assertEqual("(34<V> abc<W>)", found)
+
+
+    def testTreeParserRewriteImaginary(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T15;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP15;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T15;
+        }
+        tokens { ROOT; }
+        @header {
+        class V(CommonTree):
+            def __init__(self, tokenType):
+                super().__init__(CommonToken(tokenType))
+
+            def toString(self):
+                return tokenNames[self.token.type] + "<V>"
+            __str__ = toString
+
+
+        }
+        a : ID -> ROOT<V> ID
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            input="abc"
+            )
+
+        self.assertEqual("ROOT<V> abc", found)
+
+
+    def testTreeParserRewriteImaginaryWithArgs(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T16;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP16;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T16;
+        }
+        tokens { ROOT; }
+        @header {
+        class V(CommonTree):
+            def __init__(self, tokenType, x):
+                super().__init__(CommonToken(tokenType))
+                self.x = x
+
+            def toString(self):
+                return tokenNames[self.token.type] + "<V>;" + str(self.x)
+            __str__ = toString
+
+        }
+        a : ID -> ROOT<V>[42] ID
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            input="abc"
+            )
+
+        self.assertEqual("ROOT<V>;42 abc", found)
+
+
+    def testTreeParserRewriteImaginaryRoot(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T17;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP17;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T17;
+        }
+        tokens { ROOT; }
+        @header {
+        class V(CommonTree):
+            def __init__(self, tokenType):
+                super().__init__(CommonToken(tokenType))
+
+            def toString(self):
+                return tokenNames[self.token.type] + "<V>"
+            __str__ = toString
+
+        }
+        a : ID -> ^(ROOT<V> ID)
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            input="abc"
+            )
+
+        self.assertEqual("(ROOT<V> abc)", found)
+
+
+    def testTreeParserRewriteImaginaryFromReal(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T18;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ID ;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+;
+        WS : (' '|'\n') {$channel=HIDDEN;} ;
+        ''')
+
+        treeGrammar = textwrap.dedent(
+        r'''
+        tree grammar TP18;
+        options {
+            language=Python3;
+            output=AST;
+            ASTLabelType=CommonTree;
+            tokenVocab=T18;
+        }
+        tokens { ROOT; }
+        @header {
+        class V(CommonTree):
+            def __init__(self, tokenType, tree=None):
+                if tree is None:
+                    super().__init__(CommonToken(tokenType))
+                else:
+                    super().__init__(tree)
+                    self.token.type = tokenType
+
+            def toString(self):
+                return tokenNames[self.token.type]+"<V>@"+str(self.token.line)
+            __str__ = toString
+
+        }
+        a : ID -> ROOT<V>[$ID]
+          ;
+        ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            input="abc"
+            )
+
+        self.assertEqual("ROOT<V>@1", found)
+
+
+    def testTreeParserAutoHeteroAST(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {
+                language=Python3;
+                output=AST;
+            }
+            a : ID ';' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN;} ;
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''
+            tree grammar TP;
+            options {
+                language=Python3;
+                output=AST;
+                ASTLabelType=CommonTree;
+                tokenVocab=T;
+            }
+            tokens { ROOT; }
+            @header {
+            class V(CommonTree):
+                def toString(self):
+                    return CommonTree.toString(self) + "<V>"
+                __str__ = toString
+
+            }
+
+            a : ID<V> ';'<V>;
+            ''')
+
+        found = self.execTreeParser(
+            grammar, 'a',
+            treeGrammar, 'a',
+            input="abc;"
+            )
+
+        self.assertEqual("abc<V> ;<V>", found)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t054main.py b/runtime/Python3/tests/t054main.py
new file mode 100644
index 0000000..e81d253
--- /dev/null
+++ b/runtime/Python3/tests/t054main.py
@@ -0,0 +1,309 @@
+
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+import sys
+from io import StringIO
+
+class T(testbase.ANTLRTest):
+    def setUp(self):
+        self.oldPath = sys.path[:]
+        sys.path.insert(0, self.baseDir)
+
+
+    def tearDown(self):
+        sys.path = self.oldPath
+
+
+    def testOverrideMain(self):
+        grammar = textwrap.dedent(
+            r"""lexer grammar T3;
+            options {
+              language = Python3;
+              }
+
+            @main {
+            def main(argv):
+                raise RuntimeError("no")
+            }
+
+            ID: ('a'..'z' | '\u00c0'..'\u00ff')+;
+            WS: ' '+ { $channel = HIDDEN };
+            """)
+
+
+        stdout = StringIO()
+
+        lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
+        self.assertRaises(RuntimeError, lexerMod.main, ['lexer.py'])
+
+
+    def testLexerFromFile(self):
+        input = "foo bar"
+        inputPath = self.writeFile("input.txt", input)
+
+        grammar = textwrap.dedent(
+            r"""lexer grammar T1;
+            options {
+              language = Python3;
+              }
+
+            ID: 'a'..'z'+;
+            WS: ' '+ { $channel = HIDDEN };
+            """)
+
+
+        stdout = StringIO()
+
+        lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
+        lexerMod.main(
+            ['lexer.py', inputPath],
+            stdout=stdout
+            )
+
+        self.assertEqual(len(stdout.getvalue().splitlines()), 3)
+
+
+    def testLexerFromStdIO(self):
+        input = "foo bar"
+
+        grammar = textwrap.dedent(
+            r"""lexer grammar T2;
+            options {
+              language = Python3;
+              }
+
+            ID: 'a'..'z'+;
+            WS: ' '+ { $channel = HIDDEN };
+            """)
+
+
+        stdout = StringIO()
+
+        lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
+        lexerMod.main(
+            ['lexer.py'],
+            stdin=StringIO(input),
+            stdout=stdout
+            )
+
+        self.assertEqual(len(stdout.getvalue().splitlines()), 3)
+
+
+    def testLexerEncoding(self):
+        input = "föö bär"
+
+        grammar = textwrap.dedent(
+            r"""lexer grammar T3;
+            options {
+              language = Python3;
+              }
+
+            ID: ('a'..'z' | '\u00c0'..'\u00ff')+;
+            WS: ' '+ { $channel = HIDDEN };
+            """)
+
+
+        stdout = StringIO()
+
+        lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
+        lexerMod.main(
+            ['lexer.py'],
+            stdin=StringIO(input),
+            stdout=stdout
+            )
+
+        self.assertEqual(len(stdout.getvalue().splitlines()), 3)
+
+
+    def testCombined(self):
+        input = "foo bar"
+
+        grammar = textwrap.dedent(
+            r"""grammar T4;
+            options {
+              language = Python3;
+              }
+
+            r returns [res]: (ID)+ EOF { $res = $text };
+
+            ID: 'a'..'z'+;
+            WS: ' '+ { $channel = HIDDEN };
+            """)
+
+
+        stdout = StringIO()
+
+        lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
+        parserMod.main(
+            ['combined.py', '--rule', 'r'],
+            stdin=StringIO(input),
+            stdout=stdout
+            )
+
+        stdout = stdout.getvalue()
+        self.assertEqual(len(stdout.splitlines()), 1, stdout)
+
+
+    def testCombinedOutputAST(self):
+        input = "foo + bar"
+
+        grammar = textwrap.dedent(
+            r"""grammar T5;
+            options {
+              language = Python3;
+              output = AST;
+            }
+
+            r: ID OP^ ID EOF!;
+
+            ID: 'a'..'z'+;
+            OP: '+';
+            WS: ' '+ { $channel = HIDDEN };
+            """)
+
+
+        stdout = StringIO()
+
+        lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
+        parserMod.main(
+            ['combined.py', '--rule', 'r'],
+            stdin=StringIO(input),
+            stdout=stdout
+            )
+
+        stdout = stdout.getvalue().strip()
+        self.assertEqual(stdout, "(+ foo bar)")
+
+
+    def testTreeParser(self):
+        grammar = textwrap.dedent(
+            r'''grammar T6;
+            options {
+              language = Python3;
+              output = AST;
+            }
+
+            r: ID OP^ ID EOF!;
+
+            ID: 'a'..'z'+;
+            OP: '+';
+            WS: ' '+ { $channel = HIDDEN };
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''tree grammar T6Walker;
+            options {
+            language=Python3;
+            ASTLabelType=CommonTree;
+            tokenVocab=T6;
+            }
+            r returns [res]: ^(OP a=ID b=ID)
+              { $res = "{} {} {}".format($a.text, $OP.text, $b.text) }
+              ;
+            ''')
+
+        lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
+        walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True)
+
+        stdout = StringIO()
+        walkerMod.main(
+            ['walker.py', '--rule', 'r', '--parser', 'T6Parser', '--parser-rule', 'r', '--lexer', 'T6Lexer'],
+            stdin=StringIO("a+b"),
+            stdout=stdout
+            )
+
+        stdout = stdout.getvalue().strip()
+        self.assertEqual(stdout, "'a + b'")
+
+
+    def testTreeParserRewrite(self):
+        grammar = textwrap.dedent(
+            r'''grammar T7;
+            options {
+              language = Python3;
+              output = AST;
+            }
+
+            r: ID OP^ ID EOF!;
+
+            ID: 'a'..'z'+;
+            OP: '+';
+            WS: ' '+ { $channel = HIDDEN };
+            ''')
+
+        treeGrammar = textwrap.dedent(
+            r'''tree grammar T7Walker;
+            options {
+              language=Python3;
+              ASTLabelType=CommonTree;
+              tokenVocab=T7;
+              output=AST;
+            }
+            tokens {
+              ARG;
+            }
+            r: ^(OP a=ID b=ID) -> ^(OP ^(ARG ID) ^(ARG ID));
+            ''')
+
+        lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
+        walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True)
+
+        stdout = StringIO()
+        walkerMod.main(
+            ['walker.py', '--rule', 'r', '--parser', 'T7Parser', '--parser-rule', 'r', '--lexer', 'T7Lexer'],
+            stdin=StringIO("a+b"),
+            stdout=stdout
+            )
+
+        stdout = stdout.getvalue().strip()
+        self.assertEqual(stdout, "(+ (ARG a) (ARG b))")
+
+
+
+    def testGrammarImport(self):
+        slave = textwrap.dedent(
+            r'''
+            parser grammar T8S;
+            options {
+              language=Python3;
+            }
+
+            a : B;
+            ''')
+
+        parserName = self.writeInlineGrammar(slave)[0]
+        # slave parsers are imported as normal python modules
+        # to force reloading current version, purge module from sys.modules
+        if parserName + 'Parser' in sys.modules:
+            del sys.modules[parserName+'Parser']
+
+        master = textwrap.dedent(
+            r'''
+            grammar T8M;
+            options {
+              language=Python3;
+            }
+            import T8S;
+            s returns [res]: a { $res = $a.text };
+            B : 'b' ; // defines B from inherited token space
+            WS : (' '|'\n') {self.skip()} ;
+            ''')
+
+        stdout = StringIO()
+
+        lexerMod, parserMod = self.compileInlineGrammar(master, returnModule=True)
+        parserMod.main(
+            ['import.py', '--rule', 's'],
+            stdin=StringIO("b"),
+            stdout=stdout
+            )
+
+        stdout = stdout.getvalue().strip()
+        self.assertEqual(stdout, "'b'")
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t057autoAST.py b/runtime/Python3/tests/t057autoAST.py
new file mode 100644
index 0000000..63ce05a
--- /dev/null
+++ b/runtime/Python3/tests/t057autoAST.py
@@ -0,0 +1,1005 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+import sys
+
+class TestAutoAST(testbase.ANTLRTest):
+    def parserClass(self, base):
+        class TParser(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self._errors = []
+                self._output = ""
+
+
+            def capture(self, t):
+                self._output += t
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def emitErrorMessage(self, msg):
+                self._errors.append(msg)
+
+
+        return TParser
+
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self._output = ""
+
+
+            def capture(self, t):
+                self._output += t
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+
+
+    def execParser(self, grammar, grammarEntry, input, expectErrors=False):
+        lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+        cStream = antlr3.StringStream(input)
+        lexer = lexerCls(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = parserCls(tStream)
+        r = getattr(parser, grammarEntry)()
+
+        if not expectErrors:
+            self.assertEqual(len(parser._errors), 0, parser._errors)
+
+        result = ""
+
+        if r:
+            if hasattr(r, 'result'):
+                result += r.result
+
+            if r.tree:
+                result += r.tree.toStringTree()
+
+        if not expectErrors:
+            return result
+
+        else:
+            return result, parser._errors
+
+
+    def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
+        lexerCls, parserCls = self.compileInlineGrammar(grammar)
+        walkerCls = self.compileInlineGrammar(treeGrammar)
+
+        cStream = antlr3.StringStream(input)
+        lexer = lexerCls(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = parserCls(tStream)
+        r = getattr(parser, grammarEntry)()
+        nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+        nodes.setTokenStream(tStream)
+        walker = walkerCls(nodes)
+        r = getattr(walker, treeEntry)()
+
+        if r:
+            return r.tree.toStringTree()
+
+        return ""
+
+
+    def testTokenList(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : ID INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN};
+            ''')
+
+        found = self.execParser(grammar, "a", "abc 34")
+        self.assertEqual("abc 34", found);
+
+
+    def testTokenListInSingleAltBlock(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : (ID INT) ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar,"a", "abc 34")
+        self.assertEqual("abc 34", found)
+
+
+    def testSimpleRootAtOuterLevel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : ID^ INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc 34")
+        self.assertEqual("(abc 34)", found)
+
+
+    def testSimpleRootAtOuterLevelReverse(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : INT ID^ ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "34 abc")
+        self.assertEqual("(abc 34)", found)
+
+
+    def testBang(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID INT! ID! INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc 34 dag 4532")
+        self.assertEqual("abc 4532", found)
+
+
+    def testOptionalThenRoot(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ( ID INT )? ID^ ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a 1 b")
+        self.assertEqual("(b a 1)", found)
+
+
+    def testLabeledStringRoot(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : v='void'^ ID ';' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "void foo;")
+        self.assertEqual("(void foo ;)", found)
+
+
+    def testWildcard(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : v='void'^ . ';' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "void foo;")
+        self.assertEqual("(void foo ;)", found)
+
+
+    def testWildcardRoot(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : v='void' .^ ';' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "void foo;")
+        self.assertEqual("(foo void ;)", found)
+
+
+    def testWildcardRootWithLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : v='void' x=.^ ';' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "void foo;")
+        self.assertEqual("(foo void ;)", found)
+
+
+    def testWildcardRootWithListLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : v='void' x=.^ ';' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "void foo;")
+        self.assertEqual("(foo void ;)", found)
+
+
+    def testWildcardBangWithListLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : v='void' x=.! ';' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "void foo;")
+        self.assertEqual("void ;", found)
+
+
+    def testRootRoot(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID^ INT^ ID ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a 34 c")
+        self.assertEqual("(34 a c)", found)
+
+
+    def testRootRoot2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID INT^ ID^ ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a 34 c")
+        self.assertEqual("(c (34 a))", found)
+
+
+    def testRootThenRootInLoop(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID^ (INT '*'^ ID)+ ;
+            ID  : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a 34 * b 9 * c")
+        self.assertEqual("(* (* (a 34) b 9) c)", found)
+
+
+    def testNestedSubrule(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : 'void' (({pass}ID|INT) ID | 'null' ) ';' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "void a b;")
+        self.assertEqual("void a b ;", found)
+
+
+    def testInvokeRule(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a  : type ID ;
+            type : {pass}'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "int a")
+        self.assertEqual("int a", found)
+
+
+    def testInvokeRuleAsRoot(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a  : type^ ID ;
+            type : {pass}'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "int a")
+        self.assertEqual("(int a)", found)
+
+
+    def testInvokeRuleAsRootWithLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a  : x=type^ ID ;
+            type : {pass}'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "int a")
+        self.assertEqual("(int a)", found)
+
+
+    def testInvokeRuleAsRootWithListLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a  : x+=type^ ID ;
+            type : {pass}'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "int a")
+        self.assertEqual("(int a)", found)
+
+
+    def testRuleRootInLoop(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID ('+'^ ID)* ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a+b+c+d")
+        self.assertEqual("(+ (+ (+ a b) c) d)", found)
+
+
+    def testRuleInvocationRuleRootInLoop(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID (op^ ID)* ;
+            op : {pass}'+' | '-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a+b+c-d")
+        self.assertEqual("(- (+ (+ a b) c) d)", found)
+
+
+    def testTailRecursion(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            s : a ;
+            a : atom ('exp'^ a)? ;
+            atom : INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "s", "3 exp 4 exp 5")
+        self.assertEqual("(exp 3 (exp 4 5))", found)
+
+
+    def testSet(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID|INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc")
+        self.assertEqual("abc", found)
+
+
+    def testSetRoot(self):
+        grammar = textwrap.dedent(
+        r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ('+' | '-')^ ID ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "+abc")
+        self.assertEqual("(+ abc)", found)
+
+
+    @testbase.broken(
+        "FAILS until antlr.g rebuilt in v3", testbase.GrammarCompileError)
+    def testSetRootWithLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : x=('+' | '-')^ ID ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "+abc")
+        self.assertEqual("(+ abc)", found)
+
+
+    def testSetAsRuleRootInLoop(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID (('+'|'-')^ ID)* ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a+b-c")
+        self.assertEqual("(- (+ a b) c)", found)
+
+
+    def testNotSet(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ~ID '+' INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "34+2")
+        self.assertEqual("34 + 2", found)
+
+
+    def testNotSetWithLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : x=~ID '+' INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "34+2")
+        self.assertEqual("34 + 2", found)
+
+
+    def testNotSetWithListLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : x=~ID '+' INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "34+2")
+        self.assertEqual("34 + 2", found)
+
+
+    def testNotSetRoot(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ~'+'^ INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "34 55")
+        self.assertEqual("(34 55)", found)
+
+
+    def testNotSetRootWithLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ~'+'^ INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "34 55")
+        self.assertEqual("(34 55)", found)
+
+
+    def testNotSetRootWithListLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ~'+'^ INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "34 55")
+        self.assertEqual("(34 55)", found)
+
+
+    def testNotSetRuleRootInLoop(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : INT (~INT^ INT)* ;
+            blort : '+' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "3+4+5")
+        self.assertEqual("(+ (+ 3 4) 5)", found)
+
+
+    @testbase.broken("FIXME: What happened to the semicolon?", AssertionError)
+    def testTokenLabelReuse(self):
+        # check for compilation problem due to multiple defines
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a returns [result] : id=ID id=ID {$result = "2nd id="+$id.text+";"} ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("2nd id=b;a b", found)
+
+
+    def testTokenLabelReuse2(self):
+        # check for compilation problem due to multiple defines
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a returns [result]: id=ID id=ID^ {$result = "2nd id="+$id.text+','} ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("2nd id=b,(b a)", found)
+
+
+    def testTokenListLabelReuse(self):
+        # check for compilation problem due to multiple defines
+        # make sure ids has both ID tokens
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a returns [result] : ids+=ID ids+=ID {$result = "id list=[{}],".format(",".join([t.text for t in $ids]))} ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        expecting = "id list=[a,b],a b"
+        self.assertEqual(expecting, found)
+
+
+    def testTokenListLabelReuse2(self):
+        # check for compilation problem due to multiple defines
+        # make sure ids has both ID tokens
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a returns [result] : ids+=ID^ ids+=ID {$result = "id list=[{}],".format(",".join([t.text for t in $ids]))} ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        expecting = "id list=[a,b],(a b)"
+        self.assertEqual(expecting, found)
+
+
+    def testTokenListLabelRuleRoot(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : id+=ID^ ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a")
+        self.assertEqual("a", found)
+
+
+    def testTokenListLabelBang(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : id+=ID! ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a")
+        self.assertEqual("", found)
+
+
+    def testRuleListLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a returns [result]: x+=b x+=b {
+            t=$x[1]
+            $result = "2nd x="+t.toStringTree()+',';
+            };
+            b : ID;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("2nd x=b,a b", found)
+
+
+    def testRuleListLabelRuleRoot(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a returns [result] : ( x+=b^ )+ {
+            $result = "x="+$x[1].toStringTree()+',';
+            } ;
+            b : ID;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("x=(b a),(b a)", found)
+
+
+    def testRuleListLabelBang(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a returns [result] : x+=b! x+=b {
+            $result = "1st x="+$x[0].toStringTree()+',';
+            } ;
+            b : ID;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("1st x=a,b", found)
+
+
+    def testComplicatedMelange(self):
+        # check for compilation problem
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : A b=B b=B c+=C c+=C D {s = $D.text} ;
+            A : 'a' ;
+            B : 'b' ;
+            C : 'c' ;
+            D : 'd' ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b b c c d")
+        self.assertEqual("a b b c c d", found)
+
+
+    def testReturnValueWithAST(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a returns [result] : ID b { $result = str($b.i) + '\n';} ;
+            b returns [i] : INT {$i=int($INT.text);} ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc 34")
+        self.assertEqual("34\nabc 34", found)
+
+
+    def testSetLoop(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options { language=Python3;output=AST; }
+            r : (INT|ID)+ ;
+            ID : 'a'..'z' + ;
+            INT : '0'..'9' +;
+            WS: (' ' | '\n' | '\\t')+ {$channel = HIDDEN};
+            ''')
+
+        found = self.execParser(grammar, "r", "abc 34 d")
+        self.assertEqual("abc 34 d", found)
+
+
+    def testExtraTokenInSimpleDecl(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            decl : type^ ID '='! INT ';'! ;
+            type : 'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "decl", "int 34 x=1;",
+                                        expectErrors=True)
+        self.assertEqual(["line 1:4 extraneous input '34' expecting ID"],
+                         errors)
+        self.assertEqual("(int x 1)", found) # tree gets correct x and 1 tokens
+
+
+    def testMissingIDInSimpleDecl(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            tokens {EXPR;}
+            decl : type^ ID '='! INT ';'! ;
+            type : 'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "decl", "int =1;",
+                                        expectErrors=True)
+        self.assertEqual(["line 1:4 missing ID at '='"], errors)
+        self.assertEqual("(int <missing ID> 1)", found) # tree gets invented ID token
+
+
+    def testMissingSetInSimpleDecl(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            tokens {EXPR;}
+            decl : type^ ID '='! INT ';'! ;
+            type : 'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "decl", "x=1;",
+                                        expectErrors=True)
+        self.assertEqual(["line 1:0 mismatched input 'x' expecting set None"], errors)
+        self.assertEqual("(<error: x> x 1)", found) # tree gets invented ID token
+
+
+    def testMissingTokenGivesErrorNode(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : ID INT ; // follow is EOF
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "a", "abc", expectErrors=True)
+        self.assertEqual(["line 1:3 missing INT at '<EOF>'"], errors)
+        self.assertEqual("abc <missing INT>", found)
+
+
+    def testMissingTokenGivesErrorNodeInInvokedRule(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : b ;
+            b : ID INT ; // follow should see EOF
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "a", "abc", expectErrors=True)
+        self.assertEqual(["line 1:3 mismatched input '<EOF>' expecting INT"], errors)
+        self.assertEqual("<mismatched token: <EOF>, resync=abc>", found)
+
+
+    def testExtraTokenGivesErrorNode(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : b c ;
+            b : ID ;
+            c : INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "a", "abc ick 34",
+                                        expectErrors=True)
+        self.assertEqual(["line 1:4 extraneous input 'ick' expecting INT"],
+                          errors)
+        self.assertEqual("abc 34", found)
+
+
+    def testMissingFirstTokenGivesErrorNode(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : ID INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
+        self.assertEqual(["line 1:0 missing ID at '34'"], errors)
+        self.assertEqual("<missing ID> 34", found)
+
+
+    def testMissingFirstTokenGivesErrorNode2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : b c ;
+            b : ID ;
+            c : INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
+
+        # finds an error at the first token, 34, and re-syncs.
+        # re-synchronizing does not consume a token because 34 follows
+        # ref to rule b (start of c). It then matches 34 in c.
+        self.assertEqual(["line 1:0 missing ID at '34'"], errors)
+        self.assertEqual("<missing ID> 34", found)
+
+
+    def testNoViableAltGivesErrorNode(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : b | c ;
+            b : ID ;
+            c : INT ;
+            ID : 'a'..'z'+ ;
+            S : '*' ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "a", "*", expectErrors=True)
+        self.assertEqual(["line 1:0 no viable alternative at input '*'"],
+                         errors)
+        self.assertEqual("<unexpected: [@0,0:0='*',<S>,1:0], resync=*>",
+                         found)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t058rewriteAST.py b/runtime/Python3/tests/t058rewriteAST.py
new file mode 100644
index 0000000..bb59b50
--- /dev/null
+++ b/runtime/Python3/tests/t058rewriteAST.py
@@ -0,0 +1,1505 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import testbase
+import sys
+
+class TestRewriteAST(testbase.ANTLRTest):
+    def parserClass(self, base):
+        class TParser(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self._errors = []
+                self._output = ""
+
+
+            def capture(self, t):
+                self._output += t
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def emitErrorMessage(self, msg):
+                self._errors.append(msg)
+
+
+        return TParser
+
+
+    def lexerClass(self, base):
+        class TLexer(base):
+            def __init__(self, *args, **kwargs):
+                super().__init__(*args, **kwargs)
+
+                self._output = ""
+
+
+            def capture(self, t):
+                self._output += t
+
+
+            def traceIn(self, ruleName, ruleIndex):
+                self.traces.append('>'+ruleName)
+
+
+            def traceOut(self, ruleName, ruleIndex):
+                self.traces.append('<'+ruleName)
+
+
+            def recover(self, input, re):
+                # no error recovery yet, just crash!
+                raise
+
+        return TLexer
+
+
+    def execParser(self, grammar, grammarEntry, input, expectErrors=False):
+        lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+        cStream = antlr3.StringStream(input)
+        lexer = lexerCls(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = parserCls(tStream)
+        r = getattr(parser, grammarEntry)()
+
+        if not expectErrors:
+            self.assertEqual(len(parser._errors), 0, parser._errors)
+
+        result = ""
+
+        if r:
+            if hasattr(r, 'result'):
+                result += r.result
+
+            if r.tree:
+                result += r.tree.toStringTree()
+
+        if not expectErrors:
+            return result
+
+        else:
+            return result, parser._errors
+
+
+    def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
+        lexerCls, parserCls = self.compileInlineGrammar(grammar)
+        walkerCls = self.compileInlineGrammar(treeGrammar)
+
+        cStream = antlr3.StringStream(input)
+        lexer = lexerCls(cStream)
+        tStream = antlr3.CommonTokenStream(lexer)
+        parser = parserCls(tStream)
+        r = getattr(parser, grammarEntry)()
+        nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
+        nodes.setTokenStream(tStream)
+        walker = walkerCls(nodes)
+        r = getattr(walker, treeEntry)()
+
+        if r:
+            return r.tree.toStringTree()
+
+        return ""
+
+
+    def testDelete(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID INT -> ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc 34")
+        self.assertEqual("", found)
+
+
+    def testSingleToken(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID -> ID;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc")
+        self.assertEqual("abc", found)
+
+
+    def testSingleTokenToNewNode(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID -> ID["x"];
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc")
+        self.assertEqual("x", found)
+
+
+    def testSingleTokenToNewNodeRoot(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID -> ^(ID["x"] INT);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc")
+        self.assertEqual("(x INT)", found)
+
+
+    def testSingleTokenToNewNode2(self):
+        # Allow creation of new nodes w/o args.
+        grammar = textwrap.dedent(
+            r'''
+            grammar TT;
+            options {language=Python3;output=AST;}
+            a : ID -> ID[ ];
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc")
+        self.assertEqual("ID", found)
+
+
+    def testSingleCharLiteral(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : 'c' -> 'c';
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "c")
+        self.assertEqual("c", found)
+
+
+    def testSingleStringLiteral(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : 'ick' -> 'ick';
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "ick")
+        self.assertEqual("ick", found)
+
+
+    def testSingleRule(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : b -> b;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc")
+        self.assertEqual("abc", found)
+
+
+    def testReorderTokens(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID INT -> INT ID;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc 34")
+        self.assertEqual("34 abc", found)
+
+
+    def testReorderTokenAndRule(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : b INT -> INT b;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc 34")
+        self.assertEqual("34 abc", found)
+
+
+    def testTokenTree(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID INT -> ^(INT ID);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc 34")
+        self.assertEqual("(34 abc)", found)
+
+
+    def testTokenTreeAfterOtherStuff(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : 'void' ID INT -> 'void' ^(INT ID);
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "void abc 34")
+        self.assertEqual("void (34 abc)", found)
+
+
+    def testNestedTokenTreeWithOuterLoop(self):
+        # verify that ID and INT both iterate over outer index variable
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {DUH;}
+            a : ID INT ID INT -> ^( DUH ID ^( DUH INT) )+ ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a 1 b 2")
+        self.assertEqual("(DUH a (DUH 1)) (DUH b (DUH 2))", found)
+
+
+    def testOptionalSingleToken(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID -> ID? ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc")
+        self.assertEqual("abc", found)
+
+
+    def testClosureSingleToken(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID ID -> ID* ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("a b", found)
+
+
+    def testPositiveClosureSingleToken(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID ID -> ID+ ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("a b", found)
+
+
+    def testOptionalSingleRule(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : b -> b?;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc")
+        self.assertEqual("abc", found)
+
+
+    def testClosureSingleRule(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : b b -> b*;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("a b", found)
+
+
+    def testClosureOfLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : x+=b x+=b -> $x*;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("a b", found)
+
+
+    def testOptionalLabelNoListLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : (x=ID)? -> $x?;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a")
+        self.assertEqual("a", found)
+
+
+    def testPositiveClosureSingleRule(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : b b -> b+;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("a b", found)
+
+
+    def testSinglePredicateT(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID -> {True}? ID -> ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc")
+        self.assertEqual("abc", found)
+
+
+    def testSinglePredicateF(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID -> {False}? ID -> ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc")
+        self.assertEqual("", found)
+
+
+    def testMultiplePredicate(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID INT -> {False}? ID
+                       -> {True}? INT
+                       ->
+              ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a 2")
+        self.assertEqual("2", found)
+
+
+    def testMultiplePredicateTrees(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID INT -> {False}? ^(ID INT)
+                       -> {True}? ^(INT ID)
+                       -> ID
+              ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a 2")
+        self.assertEqual("(2 a)", found)
+
+
+    def testSimpleTree(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : op INT -> ^(op INT);
+            op : '+'|'-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "-34")
+        self.assertEqual("(- 34)", found)
+
+
+    def testSimpleTree2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : op INT -> ^(INT op);
+            op : '+'|'-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "+ 34")
+        self.assertEqual("(34 +)", found)
+
+
+
+    def testNestedTrees(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : 'var' (ID ':' type ';')+ -> ^('var' ^(':' ID type)+) ;
+            type : 'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "var a:int; b:float;")
+        self.assertEqual("(var (: a int) (: b float))", found)
+
+
+    def testImaginaryTokenCopy(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {VAR;}
+            a : ID (',' ID)*-> ^(VAR ID)+ ;
+            type : 'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a,b,c")
+        self.assertEqual("(VAR a) (VAR b) (VAR c)", found)
+
+
+    def testTokenUnreferencedOnLeftButDefined(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {VAR;}
+            a : b -> ID ;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a")
+        self.assertEqual("ID", found)
+
+
+    def testImaginaryTokenCopySetText(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {VAR;}
+            a : ID (',' ID)*-> ^(VAR["var"] ID)+ ;
+            type : 'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a,b,c")
+        self.assertEqual("(var a) (var b) (var c)", found)
+
+
+    def testImaginaryTokenNoCopyFromToken(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
+            type : 'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "{a b c}")
+        self.assertEqual("({ a b c)", found)
+
+
+    def testImaginaryTokenNoCopyFromTokenSetText(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : lc='{' ID+ '}' -> ^(BLOCK[$lc,"block"] ID+) ;
+            type : 'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "{a b c}")
+        self.assertEqual("(block a b c)", found)
+
+
+    def testMixedRewriteAndAutoAST(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : b b^ ; // 2nd b matches only an INT; can make it root
+            b : ID INT -> INT ID
+              | INT
+              ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a 1 2")
+        self.assertEqual("(2 1 a)", found)
+
+
+    def testSubruleWithRewrite(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : b b ;
+            b : (ID INT -> INT ID | INT INT -> INT+ )
+              ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a 1 2 3")
+        self.assertEqual("1 a 2 3", found)
+
+
+    def testSubruleWithRewrite2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {TYPE;}
+            a : b b ;
+            b : 'int'
+                ( ID -> ^(TYPE 'int' ID)
+                | ID '=' INT -> ^(TYPE 'int' ID INT)
+                )
+                ';'
+              ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "int a; int b=3;")
+        self.assertEqual("(TYPE int a) (TYPE int b 3)", found)
+
+
+    def testNestedRewriteShutsOffAutoAST(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : b b ;
+            b : ID ( ID (last=ID -> $last)+ ) ';' // get last ID
+              | INT // should still get auto AST construction
+              ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b c d; 42")
+        self.assertEqual("d 42", found)
+
+
+    def testRewriteActions(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : atom -> ^({self.adaptor.create(INT,"9")} atom) ;
+            atom : INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "3")
+        self.assertEqual("(9 3)", found)
+
+
+    def testRewriteActions2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : atom -> {self.adaptor.create(INT,"9")} atom ;
+            atom : INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "3")
+        self.assertEqual("9 3", found)
+
+
+    def testRefToOldValue(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : (atom -> atom) (op='+' r=atom -> ^($op $a $r) )* ;
+            atom : INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "3+4+5")
+        self.assertEqual("(+ (+ 3 4) 5)", found)
+
+
+    def testCopySemanticsForRules(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : atom -> ^(atom atom) ; // NOT CYCLE! (dup atom)
+            atom : INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "3")
+        self.assertEqual("(3 3)", found)
+
+
+    def testCopySemanticsForRules2(self):
+        # copy type as a root for each invocation of (...)+ in rewrite
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : type ID (',' ID)* ';' -> ^(type ID)+ ;
+            type : 'int' ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "int a,b,c;")
+        self.assertEqual("(int a) (int b) (int c)", found)
+
+
+    def testCopySemanticsForRules3(self):
+        # copy type *and* modifier even though it's optional
+        # for each invocation of (...)+ in rewrite
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ;
+            type : 'int' ;
+            modifier : 'public' ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "public int a,b,c;")
+        self.assertEqual("(int public a) (int public b) (int public c)", found)
+
+
+    def testCopySemanticsForRules3Double(self):
+        # copy type *and* modifier even though it's optional
+        # for each invocation of (...)+ in rewrite
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ^(type modifier? ID)+ ;
+            type : 'int' ;
+            modifier : 'public' ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "public int a,b,c;")
+        self.assertEqual("(int public a) (int public b) (int public c) (int public a) (int public b) (int public c)", found)
+
+
+    def testCopySemanticsForRules4(self):
+        # copy type *and* modifier even though it's optional
+        # for each invocation of (...)+ in rewrite
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {MOD;}
+            a : modifier? type ID (',' ID)* ';' -> ^(type ^(MOD modifier)? ID)+ ;
+            type : 'int' ;
+            modifier : 'public' ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "public int a,b,c;")
+        self.assertEqual("(int (MOD public) a) (int (MOD public) b) (int (MOD public) c)", found)
+
+
+    def testCopySemanticsLists(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {MOD;}
+            a : ID (',' ID)* ';' -> ID+ ID+ ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a,b,c;")
+        self.assertEqual("a b c a b c", found)
+
+
+    def testCopyRuleLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : x=b -> $x $x;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a")
+        self.assertEqual("a a", found)
+
+
+    def testCopyRuleLabel2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : x=b -> ^($x $x);
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a")
+        self.assertEqual("(a a)", found)
+
+
+    def testQueueingOfTokens(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : 'int' ID (',' ID)* ';' -> ^('int' ID+) ;
+            op : '+'|'-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "int a,b,c;")
+        self.assertEqual("(int a b c)", found)
+
+
+    def testCopyOfTokens(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : 'int' ID ';' -> 'int' ID 'int' ID ;
+            op : '+'|'-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "int a;")
+        self.assertEqual("int a int a", found)
+
+
+    def testTokenCopyInLoop(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : 'int' ID (',' ID)* ';' -> ^('int' ID)+ ;
+            op : '+'|'-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "int a,b,c;")
+        self.assertEqual("(int a) (int b) (int c)", found)
+
+
+    def testTokenCopyInLoopAgainstTwoOthers(self):
+        # must smear 'int' copies across as root of multiple trees
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : 'int' ID ':' INT (',' ID ':' INT)* ';' -> ^('int' ID INT)+ ;
+            op : '+'|'-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "int a:1,b:2,c:3;")
+        self.assertEqual("(int a 1) (int b 2) (int c 3)", found)
+
+
+    def testListRefdOneAtATime(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID+ -> ID ID ID ; // works if 3 input IDs
+            op : '+'|'-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b c")
+        self.assertEqual("a b c", found)
+
+
+    def testSplitListWithLabels(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {VAR;}
+            a : first=ID others+=ID* -> $first VAR $others+ ;
+            op : '+'|'-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b c")
+        self.assertEqual("a VAR b c", found)
+
+
+    def testComplicatedMelange(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : A A b=B B b=B c+=C C c+=C D {s=$D.text} -> A+ B+ C+ D ;
+            type : 'int' | 'float' ;
+            A : 'a' ;
+            B : 'b' ;
+            C : 'c' ;
+            D : 'd' ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a a b b b c c c d")
+        self.assertEqual("a a b b b c c c d", found)
+
+
+    def testRuleLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : x=b -> $x;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a")
+        self.assertEqual("a", found)
+
+
+    def testAmbiguousRule(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID a -> a | INT ;
+            ID : 'a'..'z'+ ;
+            INT: '0'..'9'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar,
+				    "a", "abc 34")
+        self.assertEqual("34", found)
+
+
+    def testRuleListLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : x+=b x+=b -> $x+;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("a b", found)
+
+
+    def testRuleListLabel2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : x+=b x+=b -> $x $x*;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("a b", found)
+
+
+    def testOptional(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : x=b (y=b)? -> $x $y?;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a")
+        self.assertEqual("a", found)
+
+
+    def testOptional2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : x=ID (y=b)? -> $x $y?;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("a b", found)
+
+
+    def testOptional3(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : x=ID (y=b)? -> ($x $y)?;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("a b", found)
+
+
+    def testOptional4(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : x+=ID (y=b)? -> ($x $y)?;
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("a b", found)
+
+
+    def testOptional5(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : ID -> ID? ; // match an ID to optional ID
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a")
+        self.assertEqual("a", found)
+
+
+    def testArbitraryExprType(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : x+=b x+=b -> {CommonTree(None)};
+            b : ID ;
+            ID : 'a'..'z'+ ;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "a b")
+        self.assertEqual("", found)
+
+
+    def testSet(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a: (INT|ID)+ -> INT+ ID+ ;
+            INT: '0'..'9'+;
+            ID : 'a'..'z'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "2 a 34 de")
+        self.assertEqual("2 34 a de", found)
+
+
+    def testSet2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a: (INT|ID) -> INT? ID? ;
+            INT: '0'..'9'+;
+            ID : 'a'..'z'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "2")
+        self.assertEqual("2", found)
+
+
+    @testbase.broken("http://www.antlr.org:8888/browse/ANTLR-162",
+                     antlr3.tree.RewriteEmptyStreamException)
+    def testSetWithLabel(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : x=(INT|ID) -> $x ;
+            INT: '0'..'9'+;
+            ID : 'a'..'z'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "2")
+        self.assertEqual("2", found)
+
+
+    def testRewriteAction(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens { FLOAT; }
+            r
+                : INT -> {CommonTree(CommonToken(type=FLOAT, text=$INT.text+".0"))}
+                ;
+            INT : '0'..'9'+;
+            WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN};
+            ''')
+
+        found = self.execParser(grammar, "r", "25")
+        self.assertEqual("25.0", found)
+
+
+    def testOptionalSubruleWithoutRealElements(self):
+        # copy type *and* modifier even though it's optional
+        # for each invocation of (...)+ in rewrite
+        grammar = textwrap.dedent(
+            r"""
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {PARMS;}
+
+            modulo
+             : 'modulo' ID ('(' parms+ ')')? -> ^('modulo' ID ^(PARMS parms+)?)
+             ;
+            parms : '#'|ID;
+            ID : ('a'..'z' | 'A'..'Z')+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            """)
+
+        found = self.execParser(grammar, "modulo", "modulo abc (x y #)")
+        self.assertEqual("(modulo abc (PARMS x y #))", found)
+
+
+    ## C A R D I N A L I T Y  I S S U E S
+
+    def testCardinality(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            tokens {BLOCK;}
+            a : ID ID INT INT INT -> (ID INT)+;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        self.assertRaises(antlr3.tree.RewriteCardinalityException,
+                          self.execParser, grammar, "a", "a b 3 4 5")
+
+
+    def testCardinality2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID+ -> ID ID ID ; // only 2 input IDs
+            op : '+'|'-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        self.assertRaises(antlr3.tree.RewriteCardinalityException,
+                          self.execParser, grammar, "a", "a b")
+
+
+    def testCardinality3(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID? INT -> ID INT ;
+            op : '+'|'-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        self.assertRaises(antlr3.tree.RewriteEmptyStreamException,
+                          self.execParser, grammar, "a", "3")
+
+
+    def testLoopCardinality(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID? INT -> ID+ INT ;
+            op : '+'|'-' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        self.assertRaises(antlr3.tree.RewriteEarlyExitException,
+                          self.execParser, grammar, "a", "3")
+
+
+    def testWildcard(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar T;
+            options {language=Python3;output=AST;}
+            a : ID c=. -> $c;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found = self.execParser(grammar, "a", "abc 34")
+        self.assertEqual("34", found)
+
+
+    # E R R O R S
+
+    def testExtraTokenInSimpleDecl(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            tokens {EXPR;}
+            decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;
+            type : 'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "decl", "int 34 x=1;",
+                                        expectErrors=True)
+        self.assertEqual(["line 1:4 extraneous input '34' expecting ID"],
+                         errors)
+        self.assertEqual("(EXPR int x 1)", found) # tree gets correct x and 1 tokens
+
+
+    #@testbase.broken("FIXME", AssertionError)
+    def testMissingIDInSimpleDecl(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            tokens {EXPR;}
+            decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;
+            type : 'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "decl", "int =1;",
+                                        expectErrors=True)
+        self.assertEqual(["line 1:4 missing ID at '='"], errors)
+        self.assertEqual("(EXPR int <missing ID> 1)", found) # tree gets invented ID token
+
+
+    def testMissingSetInSimpleDecl(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            tokens {EXPR;}
+            decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;
+            type : 'int' | 'float' ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "decl", "x=1;",
+                                        expectErrors=True)
+        self.assertEqual(["line 1:0 mismatched input 'x' expecting set None"],
+                         errors);
+        self.assertEqual("(EXPR <error: x> x 1)", found) # tree gets invented ID token
+
+
+    def testMissingTokenGivesErrorNode(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : ID INT -> ID INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "a", "abc",
+                                        expectErrors=True)
+        self.assertEqual(["line 1:3 missing INT at '<EOF>'"], errors)
+        # doesn't do in-line recovery for sets (yet?)
+        self.assertEqual("abc <missing INT>", found)
+
+
+    def testExtraTokenGivesErrorNode(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : b c -> b c;
+            b : ID -> ID ;
+            c : INT -> INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "a", "abc ick 34",
+                                        expectErrors=True)
+        self.assertEqual(["line 1:4 extraneous input 'ick' expecting INT"],
+                         errors)
+        self.assertEqual("abc 34", found)
+
+
+    #@testbase.broken("FIXME", AssertionError)
+    def testMissingFirstTokenGivesErrorNode(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : ID INT -> ID INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
+        self.assertEqual(["line 1:0 missing ID at '34'"], errors)
+        self.assertEqual("<missing ID> 34", found)
+
+
+    #@testbase.broken("FIXME", AssertionError)
+    def testMissingFirstTokenGivesErrorNode2(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : b c -> b c;
+            b : ID -> ID ;
+            c : INT -> INT ;
+            ID : 'a'..'z'+ ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
+        # finds an error at the first token, 34, and re-syncs.
+        # re-synchronizing does not consume a token because 34 follows
+        # ref to rule b (start of c). It then matches 34 in c.
+        self.assertEqual(["line 1:0 missing ID at '34'"], errors)
+        self.assertEqual("<missing ID> 34", found)
+
+
+    def testNoViableAltGivesErrorNode(self):
+        grammar = textwrap.dedent(
+            r'''
+            grammar foo;
+            options {language=Python3;output=AST;}
+            a : b -> b | c -> c;
+            b : ID -> ID ;
+            c : INT -> INT ;
+            ID : 'a'..'z'+ ;
+            S : '*' ;
+            INT : '0'..'9'+;
+            WS : (' '|'\n') {$channel=HIDDEN} ;
+            ''')
+
+        found, errors = self.execParser(grammar, "a", "*", expectErrors=True)
+        # finds an error at the first token, 34, and re-syncs.
+        # re-synchronizing does not consume a token because 34 follows
+        # ref to rule b (start of c). It then matches 34 in c.
+        self.assertEqual(["line 1:0 no viable alternative at input '*'"],
+                         errors);
+        self.assertEqual("<unexpected: [@0,0:0='*',<S>,1:0], resync=*>",
+                         found)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t059debug.py b/runtime/Python3/tests/t059debug.py
new file mode 100644
index 0000000..8e129f7
--- /dev/null
+++ b/runtime/Python3/tests/t059debug.py
@@ -0,0 +1,787 @@
+import unittest
+import textwrap
+import antlr3
+import antlr3.tree
+import antlr3.debug
+import testbase
+import sys
+import threading
+import socket
+import errno
+import time
+
+class Debugger(threading.Thread):
+    def __init__(self, port):
+        super().__init__()
+        self.events = []
+        self.success = False
+        self.port = port
+
+    def run(self):
+        # create listening socket
+        s = None
+        tstart = time.time()
+        while time.time() - tstart < 10:
+            try:
+                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+                s.connect(('127.0.0.1', self.port))
+                break
+            except socket.error as exc:
+                if s:
+                    s.close()
+                if exc.args[0] != errno.ECONNREFUSED:
+                    raise
+                time.sleep(0.1)
+
+        if s is None:
+            self.events.append(['nosocket'])
+            return
+
+        s.setblocking(1)
+        s.settimeout(10.0)
+
+        output = s.makefile('w', 1)
+        input = s.makefile('r', 1)
+
+        try:
+            # handshake
+            l = input.readline().strip()
+            assert l == 'ANTLR 2'
+            l = input.readline().strip()
+            assert l.startswith('grammar "'), l
+
+            output.write('ACK\n')
+            output.flush()
+
+            while True:
+                event = input.readline().strip()
+                self.events.append(event.split('\t'))
+
+                output.write('ACK\n')
+                output.flush()
+
+                if event == 'terminate':
+                    self.success = True
+                    break
+
+        except socket.timeout:
+            self.events.append(['timeout'])
+        except socket.error as exc:
+            self.events.append(['socketerror', exc.args])
+        finally:
+            output.close()
+            input.close()
+            s.close()
+
+
+class T(testbase.ANTLRTest):
+    def execParser(self, grammar, grammarEntry, input, listener,
+                   parser_args={}):
+        if listener is None:
+            port = 49100
+            debugger = Debugger(port)
+            debugger.start()
+            # TODO(pink): install alarm, so it doesn't hang forever in case of a bug
+
+        else:
+            port = None
+
+        try:
+            lexerCls, parserCls = self.compileInlineGrammar(
+                grammar, options='-debug')
+
+            cStream = antlr3.StringStream(input)
+            lexer = lexerCls(cStream)
+            tStream = antlr3.CommonTokenStream(lexer)
+            parser = parserCls(tStream, dbg=listener, port=port, **parser_args)
+            getattr(parser, grammarEntry)()
+
+        finally:
+            if listener is None:
+                debugger.join()
+                return debugger
+
+    def testBasicParser(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : ID EOF;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        listener = antlr3.debug.RecordDebugEventListener()
+
+        self.execParser(
+            grammar, 'a',
+            input="a",
+            listener=listener)
+
+        # We only check that some LT events are present. How many is subject
+        # to change (at the time of writing there are two, which is one too
+        # many).
+        lt_events = [event for event in listener.events
+                     if event.startswith("LT ")]
+        self.assertNotEqual(lt_events, [])
+
+        # For the rest, filter out LT events to get a reliable test.
+        expected = ["enterRule a",
+                    "location 6:1",
+                    "location 6:5",
+                    "location 6:8",
+                    "location 6:11",
+                    "exitRule a"]
+        found = [event for event in listener.events
+                 if not event.startswith("LT ")]
+        self.assertListEqual(found, expected)
+
+    def testSocketProxy(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : ID EOF;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="a",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected = [['enterRule', 'T.g', 'a'],
+                    ['location', '6', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '5'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+                    ['location', '6', '8'],
+                    ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+                    ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+                    ['consumeToken', '-1', '-1', '0', '1', '1', '"<EOF>'],
+                    ['location', '6', '11'],
+                    ['exitRule', 'T.g', 'a'],
+                    ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+    def testRecognitionException(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : ID EOF;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="a b",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected = [['enterRule', 'T.g', 'a'],
+                    ['location', '6', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '5'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+                    ['consumeHiddenToken', '1', '5', '99', '1', '1', '"'],
+                    ['location', '6', '8'],
+                    ['LT', '1', '2', '4', '0', '1', '2', '"b'],
+                    ['LT', '1', '2', '4', '0', '1', '2', '"b'],
+                    ['LT', '2', '-1', '-1', '0', '1', '3', '"<EOF>'],
+                    ['LT', '1', '2', '4', '0', '1', '2', '"b'],
+                    ['LT', '1', '2', '4', '0', '1', '2', '"b'],
+                    ['beginResync'],
+                    ['consumeToken', '2', '4', '0', '1', '2', '"b'],
+                    ['endResync'],
+                    ['exception', 'UnwantedTokenException', '2', '1', '2'],
+                    ['LT', '1', '-1', '-1', '0', '1', '3', '"<EOF>'],
+                    ['consumeToken', '-1', '-1', '0', '1', '3', '"<EOF>'],
+                    ['location', '6', '11'],
+                    ['exitRule', 'T.g', 'a'],
+                    ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+
+    def testSemPred(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : {True}? ID EOF;
+        ID : 'a'..'z'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="a",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected = [['enterRule', 'T.g', 'a'],
+                    ['location', '6', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '5'],
+                    ['semanticPredicate', '1', 'True'],
+                    ['location', '6', '13'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+                    ['location', '6', '16'],
+                    ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+                    ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+                    ['consumeToken', '-1', '-1', '0', '1', '1', '"<EOF>'],
+                    ['location', '6', '19'],
+                    ['exitRule', 'T.g', 'a'],
+                    ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+
+    def testPositiveClosureBlock(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : ID ( ID | INT )+ EOF;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="a 1 b c 3",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected = [['enterRule', 'T.g', 'a'],
+                    ['location', '6', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '5'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+                    ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'],
+                    ['location', '6', '8'],
+                    ['enterSubRule', '1'],
+                    ['enterDecision', '1', '0'],
+                    ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+                    ['exitDecision', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '8'],
+                    ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+                    ['consumeToken', '2', '5', '0', '1', '2', '"1'],
+                    ['consumeHiddenToken', '3', '6', '99', '1', '3', '"'],
+                    ['enterDecision', '1', '0'],
+                    ['LT', '1', '4', '4', '0', '1', '4', '"b'],
+                    ['exitDecision', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '8'],
+                    ['LT', '1', '4', '4', '0', '1', '4', '"b'],
+                    ['consumeToken', '4', '4', '0', '1', '4', '"b'],
+                    ['consumeHiddenToken', '5', '6', '99', '1', '5', '"'],
+                    ['enterDecision', '1', '0'],
+                    ['LT', '1', '6', '4', '0', '1', '6', '"c'],
+                    ['exitDecision', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '8'],
+                    ['LT', '1', '6', '4', '0', '1', '6', '"c'],
+                    ['consumeToken', '6', '4', '0', '1', '6', '"c'],
+                    ['consumeHiddenToken', '7', '6', '99', '1', '7', '"'],
+                    ['enterDecision', '1', '0'],
+                    ['LT', '1', '8', '5', '0', '1', '8', '"3'],
+                    ['exitDecision', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '8'],
+                    ['LT', '1', '8', '5', '0', '1', '8', '"3'],
+                    ['consumeToken', '8', '5', '0', '1', '8', '"3'],
+                    ['enterDecision', '1', '0'],
+                    ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+                    ['exitDecision', '1'],
+                    ['exitSubRule', '1'],
+                    ['location', '6', '22'],
+                    ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+                    ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+                    ['consumeToken', '-1', '-1', '0', '1', '9', '"<EOF>'],
+                    ['location', '6', '25'],
+                    ['exitRule', 'T.g', 'a'],
+                    ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+
+    def testClosureBlock(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : ID ( ID | INT )* EOF;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="a 1 b c 3",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected = [['enterRule', 'T.g', 'a'],
+                    ['location', '6', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '5'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+                    ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'],
+                    ['location', '6', '8'],
+                    ['enterSubRule', '1'],
+                    ['enterDecision', '1', '0'],
+                    ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+                    ['exitDecision', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '8'],
+                    ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+                    ['consumeToken', '2', '5', '0', '1', '2', '"1'],
+                    ['consumeHiddenToken', '3', '6', '99', '1', '3', '"'],
+                    ['enterDecision', '1', '0'],
+                    ['LT', '1', '4', '4', '0', '1', '4', '"b'],
+                    ['exitDecision', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '8'],
+                    ['LT', '1', '4', '4', '0', '1', '4', '"b'],
+                    ['consumeToken', '4', '4', '0', '1', '4', '"b'],
+                    ['consumeHiddenToken', '5', '6', '99', '1', '5', '"'],
+                    ['enterDecision', '1', '0'],
+                    ['LT', '1', '6', '4', '0', '1', '6', '"c'],
+                    ['exitDecision', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '8'],
+                    ['LT', '1', '6', '4', '0', '1', '6', '"c'],
+                    ['consumeToken', '6', '4', '0', '1', '6', '"c'],
+                    ['consumeHiddenToken', '7', '6', '99', '1', '7', '"'],
+                    ['enterDecision', '1', '0'],
+                    ['LT', '1', '8', '5', '0', '1', '8', '"3'],
+                    ['exitDecision', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '8'],
+                    ['LT', '1', '8', '5', '0', '1', '8', '"3'],
+                    ['consumeToken', '8', '5', '0', '1', '8', '"3'],
+                    ['enterDecision', '1', '0'],
+                    ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+                    ['exitDecision', '1'],
+                    ['exitSubRule', '1'],
+                    ['location', '6', '22'],
+                    ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+                    ['LT', '1', '-1', '-1', '0', '1', '9', '"<EOF>'],
+                    ['consumeToken', '-1', '-1', '0', '1', '9', '"<EOF>'],
+                    ['location', '6', '25'],
+                    ['exitRule', 'T.g', 'a'],
+                    ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+
+    def testMismatchedSetException(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : ID ( ID | INT ) EOF;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="a",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected = [['enterRule', 'T.g', 'a'],
+                    ['location', '6', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '5'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+                    ['location', '6', '8'],
+                    ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+                    ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+                    ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+                    ['exception', 'MismatchedSetException', '1', '1', '1'],
+                    ['exception', 'MismatchedSetException', '1', '1', '1'],
+                    ['beginResync'],
+                    ['LT', '1', '-1', '-1', '0', '1', '1', '"<EOF>'],
+                    ['endResync'],
+                    ['location', '6', '24'],
+                    ['exitRule', 'T.g', 'a'],
+                    ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+
+    def testBlock(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : ID ( b | c ) EOF;
+        b : ID;
+        c : INT;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="a 1",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected =  [['enterRule', 'T.g', 'a'],
+                     ['location', '6', '1'],
+                     ['enterAlt', '1'],
+                     ['location', '6', '5'],
+                     ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                     ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                     ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+                     ['consumeHiddenToken', '1', '6', '99', '1', '1', '"'],
+                     ['location', '6', '8'],
+                     ['enterSubRule', '1'],
+                     ['enterDecision', '1', '0'],
+                     ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+                     ['exitDecision', '1'],
+                     ['enterAlt', '2'],
+                     ['location', '6', '14'],
+                     ['enterRule', 'T.g', 'c'],
+                     ['location', '8', '1'],
+                     ['enterAlt', '1'],
+                     ['location', '8', '5'],
+                     ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+                     ['LT', '1', '2', '5', '0', '1', '2', '"1'],
+                     ['consumeToken', '2', '5', '0', '1', '2', '"1'],
+                     ['location', '8', '8'],
+                     ['exitRule', 'T.g', 'c'],
+                     ['exitSubRule', '1'],
+                     ['location', '6', '18'],
+                     ['LT', '1', '-1', '-1', '0', '1', '3', '"<EOF>'],
+                     ['LT', '1', '-1', '-1', '0', '1', '3', '"<EOF>'],
+                     ['consumeToken', '-1', '-1', '0', '1', '3', '"<EOF>'],
+                     ['location', '6', '21'],
+                     ['exitRule', 'T.g', 'a'],
+                     ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+
+    def testNoViableAlt(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : ID ( b | c ) EOF;
+        b : ID;
+        c : INT;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        BANG : '!' ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="a !",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected =  [['enterRule', 'T.g', 'a'],
+                     ['location', '6', '1'],
+                     ['enterAlt', '1'],
+                     ['location', '6', '5'],
+                     ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+                     ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+                     ['consumeToken', '0', '5', '0', '1', '0', '"a'],
+                     ['consumeHiddenToken', '1', '7', '99', '1', '1', '"'],
+                     ['location', '6', '8'],
+                     ['enterSubRule', '1'],
+                     ['enterDecision', '1', '0'],
+                     ['LT', '1', '2', '4', '0', '1', '2', '"!'],
+                     ['LT', '1', '2', '4', '0', '1', '2', '"!'],
+                     ['LT', '1', '2', '4', '0', '1', '2', '"!'],
+                     ['exception', 'NoViableAltException', '2', '1', '2'],
+                     ['exitDecision', '1'],
+                     ['exitSubRule', '1'],
+                     ['exception', 'NoViableAltException', '2', '1', '2'],
+                     ['beginResync'],
+                     ['LT', '1', '2', '4', '0', '1', '2', '"!'],
+                     ['consumeToken', '2', '4', '0', '1', '2', '"!'],
+                     ['LT', '1', '-1', '-1', '0', '1', '3', '"<EOF>'],
+                     ['endResync'],
+                     ['location', '6', '21'],
+                     ['exitRule', 'T.g', 'a'],
+                     ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+
+    def testRuleBlock(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : b | c;
+        b : ID;
+        c : INT;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="1",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected = [['enterRule', 'T.g', 'a'],
+                    ['location', '6', '1'],
+                    ['enterDecision', '1', '0'],
+                    ['LT', '1', '0', '5', '0', '1', '0', '"1'],
+                    ['exitDecision', '1'],
+                    ['enterAlt', '2'],
+                    ['location', '6', '9'],
+                    ['enterRule', 'T.g', 'c'],
+                    ['location', '8', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '8', '5'],
+                    ['LT', '1', '0', '5', '0', '1', '0', '"1'],
+                    ['LT', '1', '0', '5', '0', '1', '0', '"1'],
+                    ['consumeToken', '0', '5', '0', '1', '0', '"1'],
+                    ['location', '8', '8'],
+                    ['exitRule', 'T.g', 'c'],
+                    ['location', '6', '10'],
+                    ['exitRule', 'T.g', 'a'],
+                    ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+
+    def testRuleBlockSingleAlt(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : b;
+        b : ID;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="a",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected = [['enterRule', 'T.g', 'a'],
+                    ['location', '6', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '5'],
+                    ['enterRule', 'T.g', 'b'],
+                    ['location', '7', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '7', '5'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+                    ['location', '7', '7'],
+                    ['exitRule', 'T.g', 'b'],
+                    ['location', '6', '6'],
+                    ['exitRule', 'T.g', 'a'],
+                    ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+
+    def testBlockSingleAlt(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : ( b );
+        b : ID;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="a",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected = [['enterRule', 'T.g', 'a'],
+                    ['location', '6', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '5'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '7'],
+                    ['enterRule', 'T.g', 'b'],
+                    ['location', '7', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '7', '5'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['LT', '1', '0', '4', '0', '1', '0', '"a'],
+                    ['consumeToken', '0', '4', '0', '1', '0', '"a'],
+                    ['location', '7', '7'],
+                    ['exitRule', 'T.g', 'b'],
+                    ['location', '6', '10'],
+                    ['exitRule', 'T.g', 'a'],
+                    ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+
+    def testDFA(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+        }
+        a : ( b | c ) EOF;
+        b : ID* INT;
+        c : ID+ BANG;
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        BANG : '!';
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        debugger = self.execParser(
+            grammar, 'a',
+            input="a!",
+            listener=None)
+
+        self.assertTrue(debugger.success)
+        expected = [['enterRule', 'T.g', 'a'],
+                    ['location', '6', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '6', '5'],
+                    ['enterSubRule', '1'],
+                    ['enterDecision', '1', '0'],
+                    ['mark', '0'],
+                    ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+                    ['consumeToken', '0', '5', '0', '1', '0', '"a'],
+                    ['LT', '1', '1', '4', '0', '1', '1', '"!'],
+                    ['consumeToken', '1', '4', '0', '1', '1', '"!'],
+                    ['rewind', '0'],
+                    ['exitDecision', '1'],
+                    ['enterAlt', '2'],
+                    ['location', '6', '11'],
+                    ['enterRule', 'T.g', 'c'],
+                    ['location', '8', '1'],
+                    ['enterAlt', '1'],
+                    ['location', '8', '5'],
+                    ['enterSubRule', '3'],
+                    ['enterDecision', '3', '0'],
+                    ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+                    ['exitDecision', '3'],
+                    ['enterAlt', '1'],
+                    ['location', '8', '5'],
+                    ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+                    ['LT', '1', '0', '5', '0', '1', '0', '"a'],
+                    ['consumeToken', '0', '5', '0', '1', '0', '"a'],
+                    ['enterDecision', '3', '0'],
+                    ['LT', '1', '1', '4', '0', '1', '1', '"!'],
+                    ['exitDecision', '3'],
+                    ['exitSubRule', '3'],
+                    ['location', '8', '9'],
+                    ['LT', '1', '1', '4', '0', '1', '1', '"!'],
+                    ['LT', '1', '1', '4', '0', '1', '1', '"!'],
+                    ['consumeToken', '1', '4', '0', '1', '1', '"!'],
+                    ['location', '8', '13'],
+                    ['exitRule', 'T.g', 'c'],
+                    ['exitSubRule', '1'],
+                    ['location', '6', '15'],
+                    ['LT', '1', '-1', '-1', '0', '1', '2', '"<EOF>'],
+                    ['LT', '1', '-1', '-1', '0', '1', '2', '"<EOF>'],
+                    ['consumeToken', '-1', '-1', '0', '1', '2', '"<EOF>'],
+                    ['location', '6', '18'],
+                    ['exitRule', 'T.g', 'a'],
+                    ['terminate']]
+
+        self.assertListEqual(debugger.events, expected)
+
+
+    def testBasicAST(self):
+        grammar = textwrap.dedent(
+        r'''
+        grammar T;
+        options {
+            language=Python3;
+            output=AST;
+        }
+        a : ( b | c ) EOF!;
+        b : ID* INT -> ^(INT ID*);
+        c : ID+ BANG -> ^(BANG ID+);
+        ID : 'a'..'z'+ ;
+        INT : '0'..'9'+ ;
+        BANG : '!';
+        WS : (' '|'\n') {$channel=HIDDEN} ;
+        ''')
+
+        listener = antlr3.debug.RecordDebugEventListener()
+
+        self.execParser(
+            grammar, 'a',
+            input="a!",
+            listener=listener)
+
+        # don't check output for now (too dynamic), I'm satisfied if it
+        # doesn't crash
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/t060leftrecursion.py b/runtime/Python3/tests/t060leftrecursion.py
new file mode 100644
index 0000000..05b5bc0
--- /dev/null
+++ b/runtime/Python3/tests/t060leftrecursion.py
@@ -0,0 +1,468 @@
+import unittest
+import re
+import textwrap
+import antlr3
+import testbase
+
+
+# Left-recursion resolution is not yet enabled in the tool.
+
+# class TestLeftRecursion(testbase.ANTLRTest):
+#     def parserClass(self, base):
+#         class TParser(base):
+#             def __init__(self, *args, **kwargs):
+#                 super().__init__(*args, **kwargs)
+
+#                 self._output = ""
+
+
+#             def capture(self, t):
+#                 self._output += str(t)
+
+
+#             def recover(self, input, re):
+#                 # no error recovery yet, just crash!
+#                 raise
+
+#         return TParser
+
+
+#     def execParser(self, grammar, grammarEntry, input):
+#         lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+#         cStream = antlr3.StringStream(input)
+#         lexer = lexerCls(cStream)
+#         tStream = antlr3.CommonTokenStream(lexer)
+#         parser = parserCls(tStream)
+#         getattr(parser, grammarEntry)()
+#         return parser._output
+
+
+#     def runTests(self, grammar, tests, grammarEntry):
+#         lexerCls, parserCls = self.compileInlineGrammar(grammar)
+
+#         build_ast = re.search(r'output\s*=\s*AST', grammar)
+
+#         for input, expecting in tests:
+#             cStream = antlr3.StringStream(input)
+#             lexer = lexerCls(cStream)
+#             tStream = antlr3.CommonTokenStream(lexer)
+#             parser = parserCls(tStream)
+#             r = getattr(parser, grammarEntry)()
+#             found = parser._output
+#             if build_ast:
+#               found += r.tree.toStringTree()
+
+#             self.assertEqual(
+#                 expecting, found,
+#                 "{!r} != {!r} (for input {!r})".format(expecting, found, input))
+
+
+#     def testSimple(self):
+#         grammar = textwrap.dedent(
+#             r"""
+#             grammar T;
+#             options {
+#                 language=Python3;
+#             }
+#             s : a { self.capture($a.text) } ;
+#             a : a ID
+#               | ID
+#               ;
+#             ID : 'a'..'z'+ ;
+#             WS : (' '|'\n') {self.skip()} ;
+#             """)
+
+#         found = self.execParser(grammar, 's', 'a b c')
+#         expecting = "abc"
+#         self.assertEqual(expecting, found)
+
+
+#     def testSemPred(self):
+#         grammar = textwrap.dedent(
+#             r"""
+#             grammar T;
+#             options {
+#                 language=Python3;
+#             }
+#             s : a { self.capture($a.text) } ;
+#             a : a {True}? ID
+#               | ID
+#               ;
+#             ID : 'a'..'z'+ ;
+#             WS : (' '|'\n') {self.skip()} ;
+#             """)
+
+#         found = self.execParser(grammar, "s", "a b c")
+#         expecting = "abc"
+#         self.assertEqual(expecting, found)
+
+#     def testTernaryExpr(self):
+#         grammar = textwrap.dedent(
+#             r"""
+#             grammar T;
+#             options {
+#                 language=Python3;
+#                 output=AST;
+#             }
+#             e : e '*'^ e
+#               | e '+'^ e
+#               | e '?'<assoc=right>^ e ':'! e
+#               | e '='<assoc=right>^ e
+#               | ID
+#               ;
+#             ID : 'a'..'z'+ ;
+#             WS : (' '|'\n') {self.skip()} ;
+#             """)
+
+#         tests = [
+#             ("a", "a"),
+#             ("a+b", "(+ a b)"),
+#             ("a*b", "(* a b)"),
+#             ("a?b:c", "(? a b c)"),
+#             ("a=b=c", "(= a (= b c))"),
+#             ("a?b+c:d", "(? a (+ b c) d)"),
+#             ("a?b=c:d", "(? a (= b c) d)"),
+#             ("a? b?c:d : e", "(? a (? b c d) e)"),
+#             ("a?b: c?d:e", "(? a b (? c d e))"),
+#             ]
+#         self.runTests(grammar, tests, "e")
+
+
+#     def testDeclarationsUsingASTOperators(self):
+#         grammar = textwrap.dedent(
+#             r"""
+#             grammar T;
+#             options {
+#                 language=Python3;
+#                 output=AST;
+#             }
+#             declarator
+#                     : declarator '['^ e ']'!
+#                     | declarator '['^ ']'!
+#                     | declarator '('^ ')'!
+#                     | '*'^ declarator // binds less tight than suffixes
+#                     | '('! declarator ')'!
+#                     | ID
+#                     ;
+#             e : INT ;
+#             ID : 'a'..'z'+ ;
+#             INT : '0'..'9'+ ;
+#             WS : (' '|'\n') {self.skip()} ;
+#             """)
+
+#         tests = [
+#             ("a", "a"),
+#             ("*a", "(* a)"),
+#             ("**a", "(* (* a))"),
+#             ("a[3]", "([ a 3)"),
+#             ("b[]", "([ b)"),
+#             ("(a)", "a"),
+#             ("a[]()", "(( ([ a))"),
+#             ("a[][]", "([ ([ a))"),
+#             ("*a[]", "(* ([ a))"),
+#             ("(*a)[]", "([ (* a))"),
+#             ]
+#         self.runTests(grammar, tests, "declarator")
+
+
+#     def testDeclarationsUsingRewriteOperators(self):
+#         grammar = textwrap.dedent(
+#             r"""
+#             grammar T;
+#             options {
+#                 language=Python3;
+#                 output=AST;
+#             }
+#             declarator
+#                     : declarator '[' e ']' -> ^('[' declarator e)
+#                     | declarator '[' ']' -> ^('[' declarator)
+#                     | declarator '(' ')' -> ^('(' declarator)
+#                     | '*' declarator -> ^('*' declarator)  // binds less tight than suffixes
+#                     | '(' declarator ')' -> declarator
+#                     | ID -> ID
+#                     ;
+#             e : INT ;
+#             ID : 'a'..'z'+ ;
+#             INT : '0'..'9'+ ;
+#             WS : (' '|'\n') {self.skip()} ;
+#             """)
+
+#         tests = [
+#             ("a", "a"),
+#             ("*a", "(* a)"),
+#             ("**a", "(* (* a))"),
+#             ("a[3]", "([ a 3)"),
+#             ("b[]", "([ b)"),
+#             ("(a)", "a"),
+#             ("a[]()", "(( ([ a))"),
+#             ("a[][]", "([ ([ a))"),
+#             ("*a[]", "(* ([ a))"),
+#             ("(*a)[]", "([ (* a))"),
+#             ]
+#         self.runTests(grammar, tests, "declarator")
+
+
+#     def testExpressionsUsingASTOperators(self):
+#         grammar = textwrap.dedent(
+#             r"""
+#             grammar T;
+#             options {
+#                 language=Python3;
+#                 output=AST;
+#             }
+#             e : e '.'^ ID
+#               | e '.'^ 'this'
+#               | '-'^ e
+#               | e '*'^ e
+#               | e ('+'^|'-'^) e
+#               | INT
+#               | ID
+#               ;
+#             ID : 'a'..'z'+ ;
+#             INT : '0'..'9'+ ;
+#             WS : (' '|'\n') {self.skip()} ;
+#             """)
+
+#         tests = [
+#             ("a", "a"),
+#             ("1", "1"),
+#             ("a+1", "(+ a 1)"),
+#             ("a*1", "(* a 1)"),
+#             ("a.b", "(. a b)"),
+#             ("a.this", "(. a this)"),
+#             ("a-b+c", "(+ (- a b) c)"),
+#             ("a+b*c", "(+ a (* b c))"),
+#             ("a.b+1", "(+ (. a b) 1)"),
+#             ("-a", "(- a)"),
+#             ("-a+b", "(+ (- a) b)"),
+#             ("-a.b", "(- (. a b))"),
+#             ]
+#         self.runTests(grammar, tests, "e")
+
+
+#     @testbase.broken(
+#         "Grammar compilation returns errors", testbase.GrammarCompileError)
+#     def testExpressionsUsingRewriteOperators(self):
+#         grammar = textwrap.dedent(
+#             r"""
+#             grammar T;
+#             options {
+#                 language=Python3;
+#                 output=AST;
+#             }
+#             e : e '.' ID                   -> ^('.' e ID)
+#               | e '.' 'this'               -> ^('.' e 'this')
+#               | '-' e                      -> ^('-' e)
+#               | e '*' b=e                  -> ^('*' e $b)
+#               | e (op='+'|op='-') b=e      -> ^($op e $b)
+#               | INT                        -> INT
+#               | ID                         -> ID
+#               ;
+#             ID : 'a'..'z'+ ;
+#             INT : '0'..'9'+ ;
+#             WS : (' '|'\n') {self.skip()} ;
+#             """)
+
+#         tests = [
+#             ("a", "a"),
+#             ("1", "1"),
+#             ("a+1", "(+ a 1)"),
+#             ("a*1", "(* a 1)"),
+#             ("a.b", "(. a b)"),
+#             ("a.this", "(. a this)"),
+#             ("a+b*c", "(+ a (* b c))"),
+#             ("a.b+1", "(+ (. a b) 1)"),
+#             ("-a", "(- a)"),
+#             ("-a+b", "(+ (- a) b)"),
+#             ("-a.b", "(- (. a b))"),
+#             ]
+#         self.runTests(grammar, tests, "e")
+
+
+#     def testExpressionAssociativity(self):
+#         grammar = textwrap.dedent(
+#             r"""
+#             grammar T;
+#             options {
+#                 language=Python3;
+#                 output=AST;
+#             }
+#             e
+#               : e '.'^ ID
+#               | '-'^ e
+#               | e '^'<assoc=right>^ e
+#               | e '*'^ e
+#               | e ('+'^|'-'^) e
+#               | e ('='<assoc=right>^ |'+='<assoc=right>^) e
+#               | INT
+#               | ID
+#               ;
+#             ID : 'a'..'z'+ ;
+#             INT : '0'..'9'+ ;
+#             WS : (' '|'\n') {self.skip()} ;
+#             """)
+
+#         tests = [
+#             ("a", "a"),
+#             ("1", "1"),
+#             ("a+1", "(+ a 1)"),
+#             ("a*1", "(* a 1)"),
+#             ("a.b", "(. a b)"),
+#             ("a-b+c", "(+ (- a b) c)"),
+#             ("a+b*c", "(+ a (* b c))"),
+#             ("a.b+1", "(+ (. a b) 1)"),
+#             ("-a", "(- a)"),
+#             ("-a+b", "(+ (- a) b)"),
+#             ("-a.b", "(- (. a b))"),
+#             ("a^b^c", "(^ a (^ b c))"),
+#             ("a=b=c", "(= a (= b c))"),
+#             ("a=b=c+d.e", "(= a (= b (+ c (. d e))))"),
+#             ]
+#         self.runTests(grammar, tests, "e")
+
+
+#     def testJavaExpressions(self):
+#       grammar = textwrap.dedent(
+#             r"""
+#             grammar T;
+#             options {
+#                 language=Python3;
+#                 output=AST;
+#             }
+#             expressionList
+#                 :   e (','! e)*
+#                 ;
+#             e   :   '('! e ')'!
+#                 |   'this'
+#                 |   'super'
+#                 |   INT
+#                 |   ID
+#                 |   type '.'^ 'class'
+#                 |   e '.'^ ID
+#                 |   e '.'^ 'this'
+#                 |   e '.'^ 'super' '('^ expressionList? ')'!
+#                 |   e '.'^ 'new'^ ID '('! expressionList? ')'!
+#                 |       'new'^ type ( '(' expressionList? ')'! | (options {k=1;}:'[' e ']'!)+) // ugly; simplified
+#                 |   e '['^ e ']'!
+#                 |   '('^ type ')'! e
+#                 |   e ('++'^ | '--'^)
+#                 |   e '('^ expressionList? ')'!
+#                 |   ('+'^|'-'^|'++'^|'--'^) e
+#                 |   ('~'^|'!'^) e
+#                 |   e ('*'^|'/'^|'%'^) e
+#                 |   e ('+'^|'-'^) e
+#                 |   e ('<'^ '<' | '>'^ '>' '>' | '>'^ '>') e
+#                 |   e ('<='^ | '>='^ | '>'^ | '<'^) e
+#                 |   e 'instanceof'^ e
+#                 |   e ('=='^ | '!='^) e
+#                 |   e '&'^ e
+#                 |   e '^'<assoc=right>^ e
+#                 |   e '|'^ e
+#                 |   e '&&'^ e
+#                 |   e '||'^ e
+#                 |   e '?' e ':' e
+#                 |   e ('='<assoc=right>^
+#                       |'+='<assoc=right>^
+#                       |'-='<assoc=right>^
+#                       |'*='<assoc=right>^
+#                       |'/='<assoc=right>^
+#                       |'&='<assoc=right>^
+#                       |'|='<assoc=right>^
+#                       |'^='<assoc=right>^
+#                       |'>>='<assoc=right>^
+#                       |'>>>='<assoc=right>^
+#                       |'<<='<assoc=right>^
+#                       |'%='<assoc=right>^) e
+#                 ;
+#             type: ID
+#                 | ID '['^ ']'!
+#                 | 'int'
+#                 | 'int' '['^ ']'!
+#                 ;
+#             ID : ('a'..'z'|'A'..'Z'|'_'|'$')+;
+#             INT : '0'..'9'+ ;
+#             WS : (' '|'\n') {self.skip()} ;
+#             """)
+
+#       tests = [
+#           ("a", "a"),
+#           ("1", "1"),
+#           ("a+1", "(+ a 1)"),
+#           ("a*1", "(* a 1)"),
+#           ("a.b", "(. a b)"),
+#           ("a-b+c", "(+ (- a b) c)"),
+#           ("a+b*c", "(+ a (* b c))"),
+#           ("a.b+1", "(+ (. a b) 1)"),
+#           ("-a", "(- a)"),
+#           ("-a+b", "(+ (- a) b)"),
+#           ("-a.b", "(- (. a b))"),
+#           ("a^b^c", "(^ a (^ b c))"),
+#           ("a=b=c", "(= a (= b c))"),
+#           ("a=b=c+d.e", "(= a (= b (+ c (. d e))))"),
+#           ("a|b&c", "(| a (& b c))"),
+#           ("(a|b)&c", "(& (| a b) c)"),
+#           ("a > b", "(> a b)"),
+#           ("a >> b", "(> a b)"),  # text is from one token
+#           ("a < b", "(< a b)"),
+#           ("(T)x", "(( T x)"),
+#           ("new A().b", "(. (new A () b)"),
+#           ("(T)t.f()", "(( (( T (. t f)))"),
+#           ("a.f(x)==T.c", "(== (( (. a f) x) (. T c))"),
+#           ("a.f().g(x,1)", "(( (. (( (. a f)) g) x 1)"),
+#           ("new T[((n-1) * x) + 1]", "(new T [ (+ (* (- n 1) x) 1))"),
+#           ]
+#       self.runTests(grammar, tests, "e")
+
+
+#     def testReturnValueAndActions(self):
+#         grammar = textwrap.dedent(
+#             r"""
+#             grammar T;
+#             options {
+#                 language=Python3;
+#             }
+#             s : e { self.capture($e.v) } ;
+#             e returns [v, ignored]
+#               : e '*' b=e {$v *= $b.v;}
+#               | e '+' b=e {$v += $b.v;}
+#               | INT {$v = int($INT.text);}
+#               ;
+#             INT : '0'..'9'+ ;
+#             WS : (' '|'\n') {self.skip()} ;
+#             """)
+
+#         tests = [
+#             ("4", "4"),
+#             ("1+2", "3")
+#             ]
+#         self.runTests(grammar, tests, "s")
+
+
+#     def testReturnValueAndActionsAndASTs(self):
+#         grammar = textwrap.dedent(
+#             r"""
+#             grammar T;
+#             options {
+#                 language=Python3;
+#                 output=AST;
+#             }
+#             s : e { self.capture("v={}, ".format($e.v)) } ;
+#             e returns [v, ignored]
+#               : e '*'^ b=e {$v *= $b.v;}
+#               | e '+'^ b=e {$v += $b.v;}
+#               | INT {$v = int($INT.text);}
+#               ;
+#             INT : '0'..'9'+ ;
+#             WS : (' '|'\n') {self.skip()} ;
+#             """)
+
+#         tests = [
+#             ("4", "v=4, 4"),
+#             ("1+2", "v=3, (+ 1 2)"),
+#             ]
+#         self.runTests(grammar, tests, "s")
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/runtime/Python3/tests/testbase.py b/runtime/Python3/tests/testbase.py
new file mode 100644
index 0000000..b2a9223
--- /dev/null
+++ b/runtime/Python3/tests/testbase.py
@@ -0,0 +1,425 @@
+from distutils.errors import *
+import errno
+import glob
+import hashlib
+import imp
+import inspect
+import os
+import re
+import shutil
+import sys
+import tempfile
+import unittest
+
+import antlr3
+
+def unlink(path):
+    try:
+        os.unlink(path)
+    except OSError as exc:
+        if exc.errno != errno.ENOENT:
+            raise
+
+
+class GrammarCompileError(Exception):
+  """Grammar failed to compile."""
+  pass
+
+
+# At least on MacOSX tempdir (/tmp) is a symlink. It's sometimes dereferences,
+# sometimes not, breaking the inspect.getmodule() function.
+testbasedir = os.path.join(
+    os.path.realpath(tempfile.gettempdir()),
+    'antlr3-test')
+
+
+class BrokenTest(unittest.TestCase.failureException):
+    def __repr__(self):
+        name, reason = self.args
+        return '{}: {}: {} works now'.format(
+            (self.__class__.__name__, name, reason))
+
+
+def broken(reason, *exceptions):
+    '''Indicates a failing (or erroneous) test case fails that should succeed.
+    If the test fails with an exception, list the exception type in args'''
+    def wrapper(test_method):
+        def replacement(*args, **kwargs):
+            try:
+                test_method(*args, **kwargs)
+            except exceptions or unittest.TestCase.failureException:
+                pass
+            else:
+                raise BrokenTest(test_method.__name__, reason)
+        replacement.__doc__ = test_method.__doc__
+        replacement.__name__ = 'XXX_' + test_method.__name__
+        replacement.todo = reason
+        return replacement
+    return wrapper
+
+
+dependencyCache = {}
+compileErrorCache = {}
+
+# setup java CLASSPATH
+if 'CLASSPATH' not in os.environ:
+    cp = []
+
+    baseDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
+    libDir = os.path.join(baseDir, 'lib')
+
+    jar = os.path.join(libDir, 'ST-4.0.5.jar')
+    if not os.path.isfile(jar):
+        raise DistutilsFileError(
+            "Missing file '{}'. Grab it from a distribution package.".format(jar)
+            )
+    cp.append(jar)
+
+    jar = os.path.join(libDir, 'antlr-3.4.1-SNAPSHOT.jar')
+    if not os.path.isfile(jar):
+        raise DistutilsFileError(
+            "Missing file '{}'. Grab it from a distribution package.".format(jar)
+            )
+    cp.append(jar)
+
+    jar = os.path.join(libDir, 'antlr-runtime-3.4.jar')
+    if not os.path.isfile(jar):
+        raise DistutilsFileError(
+            "Missing file '{}'. Grab it from a distribution package.".format(jar)
+            )
+    cp.append(jar)
+
+    cp.append(os.path.join(baseDir, 'runtime', 'Python', 'build'))
+
+    classpath = '-cp "' + ':'.join([os.path.abspath(p) for p in cp]) + '"'
+
+else:
+    classpath = ''
+
+
+class ANTLRTest(unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        self.moduleName = os.path.splitext(os.path.basename(sys.modules[self.__module__].__file__))[0]
+        self.className = self.__class__.__name__
+        self._baseDir = None
+
+        self.lexerModule = None
+        self.parserModule = None
+
+        self.grammarName = None
+        self.grammarType = None
+
+
+    @property
+    def baseDir(self):
+        if self._baseDir is None:
+            testName = 'unknownTest'
+            for frame in inspect.stack():
+                code = frame[0].f_code
+                codeMod = inspect.getmodule(code)
+                if codeMod is None:
+                    continue
+
+                # skip frames not in requested module
+                if codeMod is not sys.modules[self.__module__]:
+                    continue
+
+                # skip some unwanted names
+                if code.co_name in ('nextToken', '<module>'):
+                    continue
+
+                if code.co_name.startswith('test'):
+                    testName = code.co_name
+                    break
+
+            self._baseDir = os.path.join(
+                testbasedir,
+                self.moduleName, self.className, testName)
+            if not os.path.isdir(self._baseDir):
+                os.makedirs(self._baseDir)
+
+        return self._baseDir
+
+
+    def _invokeantlr(self, dir, file, options, javaOptions=''):
+        cmd = 'cd {}; java {} {} org.antlr.Tool -o . {} {} 2>&1'.format(
+            dir, javaOptions, classpath, options, file
+            )
+        fp = os.popen(cmd)
+        output = ''
+        failed = False
+        for line in fp:
+            output += line
+
+            if line.startswith('error('):
+                failed = True
+
+        rc = fp.close()
+        if rc:
+            failed = True
+
+        if failed:
+            raise GrammarCompileError(
+                "Failed to compile grammar '{}':\n{}\n\n{}".format(file, cmd, output)
+                )
+
+
+    def compileGrammar(self, grammarName=None, options='', javaOptions=''):
+        if grammarName is None:
+            grammarName = self.moduleName + '.g'
+
+        self._baseDir = os.path.join(
+            testbasedir,
+            self.moduleName)
+        if not os.path.isdir(self._baseDir):
+            os.makedirs(self._baseDir)
+
+        if self.grammarName is None:
+            self.grammarName = os.path.splitext(grammarName)[0]
+
+        grammarPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), grammarName)
+
+        # get type and name from first grammar line
+        with open(grammarPath, 'r') as fp:
+            grammar = fp.read()
+        m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar, re.MULTILINE)
+        self.assertIsNotNone(m, grammar)
+        self.grammarType = m.group(2) or 'combined'
+
+        self.assertIn(self.grammarType, ('lexer', 'parser', 'tree', 'combined'))
+
+        # don't try to rebuild grammar, if it already failed
+        if grammarName in compileErrorCache:
+            return
+
+        try:
+        #     # get dependencies from antlr
+        #     if grammarName in dependencyCache:
+        #         dependencies = dependencyCache[grammarName]
+
+        #     else:
+        #         dependencies = []
+        #         cmd = ('cd %s; java %s %s org.antlr.Tool -o . -depend %s 2>&1'
+        #                % (self.baseDir, javaOptions, classpath, grammarPath))
+
+        #         output = ""
+        #         failed = False
+
+        #         fp = os.popen(cmd)
+        #         for line in fp:
+        #             output += line
+
+        #             if line.startswith('error('):
+        #                 failed = True
+        #             elif ':' in line:
+        #                 a, b = line.strip().split(':', 1)
+        #                 dependencies.append(
+        #                     (os.path.join(self.baseDir, a.strip()),
+        #                      [os.path.join(self.baseDir, b.strip())])
+        #                     )
+
+        #         rc = fp.close()
+        #         if rc is not None:
+        #             failed = True
+
+        #         if failed:
+        #             raise GrammarCompileError(
+        #                 "antlr -depend failed with code {} on grammar '{}':\n\n{}\n{}".format(
+        #                     rc, grammarName, cmd, output)
+        #                 )
+
+        #         # add dependencies to my .stg files
+        #         templateDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'tool', 'src', 'main', 'resources', 'org', 'antlr', 'codegen', 'templates', 'Python'))
+        #         templates = glob.glob(os.path.join(templateDir, '*.stg'))
+
+        #         for dst, src in dependencies:
+        #             src.extend(templates)
+
+        #         dependencyCache[grammarName] = dependencies
+
+        #     rebuild = False
+        #     for dest, sources in dependencies:
+        #         if not os.path.isfile(dest):
+        #             rebuild = True
+        #             break
+
+        #         for source in sources:
+        #             if os.path.getmtime(source) > os.path.getmtime(dest):
+        #                 rebuild = True
+        #                 break
+
+
+        #     if rebuild:
+        #         self._invokeantlr(self.baseDir, grammarPath, options, javaOptions)
+
+            self._invokeantlr(self.baseDir, grammarPath, options, javaOptions)
+
+        except:
+            # mark grammar as broken
+            compileErrorCache[grammarName] = True
+            raise
+
+
+    def lexerClass(self, base):
+        """Optionally build a subclass of generated lexer class"""
+
+        return base
+
+
+    def parserClass(self, base):
+        """Optionally build a subclass of generated parser class"""
+
+        return base
+
+
+    def walkerClass(self, base):
+        """Optionally build a subclass of generated walker class"""
+
+        return base
+
+
+    def __load_module(self, name):
+        modFile, modPathname, modDescription = imp.find_module(name, [self.baseDir])
+
+        with modFile:
+            return imp.load_module(name, modFile, modPathname, modDescription)
+
+
+    def getLexer(self, *args, **kwargs):
+        """Build lexer instance. Arguments are passed to lexer.__init__()."""
+
+        if self.grammarType == 'lexer':
+            self.lexerModule = self.__load_module(self.grammarName)
+            cls = getattr(self.lexerModule, self.grammarName)
+        else:
+            self.lexerModule = self.__load_module(self.grammarName + 'Lexer')
+            cls = getattr(self.lexerModule, self.grammarName + 'Lexer')
+
+        cls = self.lexerClass(cls)
+
+        lexer = cls(*args, **kwargs)
+
+        return lexer
+
+
+    def getParser(self, *args, **kwargs):
+        """Build parser instance. Arguments are passed to parser.__init__()."""
+
+        if self.grammarType == 'parser':
+            self.lexerModule = self.__load_module(self.grammarName)
+            cls = getattr(self.lexerModule, self.grammarName)
+        else:
+            self.parserModule = self.__load_module(self.grammarName + 'Parser')
+            cls = getattr(self.parserModule, self.grammarName + 'Parser')
+        cls = self.parserClass(cls)
+
+        parser = cls(*args, **kwargs)
+
+        return parser
+
+
+    def getWalker(self, *args, **kwargs):
+        """Build walker instance. Arguments are passed to walker.__init__()."""
+
+        self.walkerModule = self.__load_module(self.grammarName + 'Walker')
+        cls = getattr(self.walkerModule, self.grammarName + 'Walker')
+        cls = self.walkerClass(cls)
+
+        walker = cls(*args, **kwargs)
+
+        return walker
+
+
+    def writeInlineGrammar(self, grammar):
+        # Create a unique ID for this test and use it as the grammar name,
+        # to avoid class name reuse. This kinda sucks. Need to find a way so
+        # tests can use the same grammar name without messing up the namespace.
+        # Well, first I should figure out what the exact problem is...
+        id = hashlib.md5(self.baseDir.encode('utf-8')).hexdigest()[-8:]
+        grammar = grammar.replace('$TP', 'TP' + id)
+        grammar = grammar.replace('$T', 'T' + id)
+
+        # get type and name from first grammar line
+        m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar, re.MULTILINE)
+        self.assertIsNotNone(m, grammar)
+        grammarType = m.group(2) or 'combined'
+        grammarName = m.group(3)
+
+        self.assertIn(grammarType, ('lexer', 'parser', 'tree', 'combined'))
+
+        grammarPath = os.path.join(self.baseDir, grammarName + '.g')
+
+        # dump temp grammar file
+        with open(grammarPath, 'w') as fp:
+            fp.write(grammar)
+
+        return grammarName, grammarPath, grammarType
+
+
+    def writeFile(self, name, contents):
+        testDir = os.path.dirname(os.path.abspath(__file__))
+        path = os.path.join(self.baseDir, name)
+
+        with open(path, 'w') as fp:
+            fp.write(contents)
+
+        return path
+
+
+    def compileInlineGrammar(self, grammar, options='', javaOptions='',
+                             returnModule=False):
+        # write grammar file
+        grammarName, grammarPath, grammarType = self.writeInlineGrammar(grammar)
+
+        # compile it
+        self._invokeantlr(
+            os.path.dirname(grammarPath),
+            os.path.basename(grammarPath),
+            options,
+            javaOptions
+            )
+
+        if grammarType == 'combined':
+            lexerMod = self.__load_module(grammarName + 'Lexer')
+            parserMod = self.__load_module(grammarName + 'Parser')
+            if returnModule:
+                return lexerMod, parserMod
+
+            lexerCls = getattr(lexerMod, grammarName + 'Lexer')
+            lexerCls = self.lexerClass(lexerCls)
+            parserCls = getattr(parserMod, grammarName + 'Parser')
+            parserCls = self.parserClass(parserCls)
+
+            return lexerCls, parserCls
+
+        if grammarType == 'lexer':
+            lexerMod = self.__load_module(grammarName)
+            if returnModule:
+                return lexerMod
+
+            lexerCls = getattr(lexerMod, grammarName)
+            lexerCls = self.lexerClass(lexerCls)
+
+            return lexerCls
+
+        if grammarType == 'parser':
+            parserMod = self.__load_module(grammarName)
+            if returnModule:
+                return parserMod
+
+            parserCls = getattr(parserMod, grammarName)
+            parserCls = self.parserClass(parserCls)
+
+            return parserCls
+
+        if grammarType == 'tree':
+            walkerMod = self.__load_module(grammarName)
+            if returnModule:
+                return walkerMod
+
+            walkerCls = getattr(walkerMod, grammarName)
+            walkerCls = self.walkerClass(walkerCls)
+
+            return walkerCls
diff --git a/antlr-3.4/runtime/Python/unittests/testantlr3.py b/runtime/Python3/unittests/testantlr3.py
similarity index 100%
copy from antlr-3.4/runtime/Python/unittests/testantlr3.py
copy to runtime/Python3/unittests/testantlr3.py
diff --git a/runtime/Python3/unittests/testbase.py b/runtime/Python3/unittests/testbase.py
new file mode 100644
index 0000000..c39243e
--- /dev/null
+++ b/runtime/Python3/unittests/testbase.py
@@ -0,0 +1,27 @@
+import unittest
+
+class BrokenTest(unittest.TestCase.failureException):
+    def __repr__(self):
+        name, reason = self.args
+        return '{}: {}: {} works now'.format(
+            self.__class__.__name__, name, reason)
+
+
+def broken(reason, *exceptions):
+    '''Indicates a failing (or erroneous) test case fails that should succeed.
+    If the test fails with an exception, list the exception type in args'''
+    def wrapper(test_method):
+        def replacement(*args, **kwargs):
+            try:
+                test_method(*args, **kwargs)
+            except exceptions or unittest.TestCase.failureException:
+                pass
+            else:
+                raise BrokenTest(test_method.__name__, reason)
+        replacement.__doc__ = test_method.__doc__
+        replacement.__name__ = 'XXX_' + test_method.__name__
+        replacement.todo = reason
+        return replacement
+    return wrapper
+
+
diff --git a/runtime/Python3/unittests/testdfa.py b/runtime/Python3/unittests/testdfa.py
new file mode 100644
index 0000000..7ae362d
--- /dev/null
+++ b/runtime/Python3/unittests/testdfa.py
@@ -0,0 +1,63 @@
+
+import unittest
+
+import antlr3
+
+
+class TestDFA(unittest.TestCase):
+    """Test case for the DFA class."""
+
+    def setUp(self):
+        """Setup test fixure.
+
+        We need a Recognizer in order to instanciate a DFA.
+
+        """
+
+        class TRecognizer(antlr3.BaseRecognizer):
+            api_version = 'HEAD'
+
+        self.recog = TRecognizer()
+
+
+    def testInit(self):
+        """DFA.__init__()
+
+        Just a smoke test.
+
+        """
+
+        dfa = antlr3.DFA(
+            self.recog, 1,
+            eot=[],
+            eof=[],
+            min=[],
+            max=[],
+            accept=[],
+            special=[],
+            transition=[]
+            )
+
+
+    def testUnpack(self):
+        """DFA.unpack()"""
+
+        self.assertEqual(
+            antlr3.DFA.unpack(
+            "\1\3\1\4\2\uffff\1\5\22\uffff\1\2\31\uffff\1\6\6\uffff"
+            "\32\6\4\uffff\1\6\1\uffff\32\6"
+            ),
+            [ 3, 4, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+              -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+              -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+              6, -1, -1, -1, -1, -1, -1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+              6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, -1, -1, -1, -1, 6, -1,
+              6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+              6, 6, 6, 6, 6
+              ]
+            )
+
+
+
+if __name__ == "__main__":
+    unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
diff --git a/antlr-3.4/runtime/Python/unittests/testexceptions.py b/runtime/Python3/unittests/testexceptions.py
similarity index 100%
copy from antlr-3.4/runtime/Python/unittests/testexceptions.py
copy to runtime/Python3/unittests/testexceptions.py
diff --git a/runtime/Python3/unittests/testrecognizers.py b/runtime/Python3/unittests/testrecognizers.py
new file mode 100644
index 0000000..c30c06c
--- /dev/null
+++ b/runtime/Python3/unittests/testrecognizers.py
@@ -0,0 +1,67 @@
+import sys
+import unittest
+
+import antlr3
+
+
+class TestBaseRecognizer(unittest.TestCase):
+    """Tests for BaseRecognizer class"""
+
+    def testGetRuleInvocationStack(self):
+        """BaseRecognizer._getRuleInvocationStack()"""
+
+        rules = antlr3.BaseRecognizer._getRuleInvocationStack(__name__)
+        self.assertEqual(
+            rules,
+            ['testGetRuleInvocationStack']
+            )
+
+
+class TestTokenSource(unittest.TestCase):
+    """Testcase to the antlr3.TokenSource class"""
+
+
+    def testIteratorInterface(self):
+        """TokenSource.next()"""
+
+        class TrivialToken(object):
+            def __init__(self, type):
+                self.type = type
+
+        class TestSource(antlr3.TokenSource):
+            def __init__(self):
+                self.tokens = [
+                    TrivialToken(1),
+                    TrivialToken(2),
+                    TrivialToken(3),
+                    TrivialToken(4),
+                    TrivialToken(antlr3.EOF),
+                    ]
+
+            def nextToken(self):
+                return self.tokens.pop(0)
+
+
+        src = TestSource()
+        tokens = []
+        for token in src:
+            tokens.append(token.type)
+
+        self.assertEqual(tokens, [1, 2, 3, 4])
+
+
+
+class TestLexer(unittest.TestCase):
+
+    def testInit(self):
+        """Lexer.__init__()"""
+
+        class TLexer(antlr3.Lexer):
+            api_version = 'HEAD'
+
+        stream = antlr3.StringStream('foo')
+        TLexer(stream)
+
+
+if __name__ == "__main__":
+    unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
diff --git a/antlr-3.4/runtime/Python/unittests/teststreams.input1 b/runtime/Python3/unittests/teststreams.input1
similarity index 100%
copy from antlr-3.4/runtime/Python/unittests/teststreams.input1
copy to runtime/Python3/unittests/teststreams.input1
diff --git a/antlr-3.4/runtime/Python/unittests/teststreams.input2 b/runtime/Python3/unittests/teststreams.input2
similarity index 100%
copy from antlr-3.4/runtime/Python/unittests/teststreams.input2
copy to runtime/Python3/unittests/teststreams.input2
diff --git a/runtime/Python3/unittests/teststreams.py b/runtime/Python3/unittests/teststreams.py
new file mode 100644
index 0000000..957fffa
--- /dev/null
+++ b/runtime/Python3/unittests/teststreams.py
@@ -0,0 +1,659 @@
+
+from io import StringIO
+import os
+import unittest
+import antlr3
+
+
+class TestStringStream(unittest.TestCase):
+    """Test case for the StringStream class."""
+
+    def testSize(self):
+        """StringStream.size()"""
+
+        stream = antlr3.StringStream('foo')
+
+        self.assertEqual(stream.size(), 3)
+
+
+    def testIndex(self):
+        """StringStream.index()"""
+
+        stream = antlr3.StringStream('foo')
+
+        self.assertEqual(stream.index(), 0)
+
+
+    def testConsume(self):
+        """StringStream.consume()"""
+
+        stream = antlr3.StringStream('foo\nbar')
+
+        stream.consume() # f
+        self.assertEqual(stream.index(), 1)
+        self.assertEqual(stream.charPositionInLine, 1)
+        self.assertEqual(stream.line, 1)
+
+        stream.consume() # o
+        self.assertEqual(stream.index(), 2)
+        self.assertEqual(stream.charPositionInLine, 2)
+        self.assertEqual(stream.line, 1)
+
+        stream.consume() # o
+        self.assertEqual(stream.index(), 3)
+        self.assertEqual(stream.charPositionInLine, 3)
+        self.assertEqual(stream.line, 1)
+
+        stream.consume() # \n
+        self.assertEqual(stream.index(), 4)
+        self.assertEqual(stream.charPositionInLine, 0)
+        self.assertEqual(stream.line, 2)
+
+        stream.consume() # b
+        self.assertEqual(stream.index(), 5)
+        self.assertEqual(stream.charPositionInLine, 1)
+        self.assertEqual(stream.line, 2)
+
+        stream.consume() # a
+        self.assertEqual(stream.index(), 6)
+        self.assertEqual(stream.charPositionInLine, 2)
+        self.assertEqual(stream.line, 2)
+
+        stream.consume() # r
+        self.assertEqual(stream.index(), 7)
+        self.assertEqual(stream.charPositionInLine, 3)
+        self.assertEqual(stream.line, 2)
+
+        stream.consume() # EOF
+        self.assertEqual(stream.index(), 7)
+        self.assertEqual(stream.charPositionInLine, 3)
+        self.assertEqual(stream.line, 2)
+
+        stream.consume() # EOF
+        self.assertEqual(stream.index(), 7)
+        self.assertEqual(stream.charPositionInLine, 3)
+        self.assertEqual(stream.line, 2)
+
+
+    def testReset(self):
+        """StringStream.reset()"""
+
+        stream = antlr3.StringStream('foo')
+
+        stream.consume()
+        stream.consume()
+
+        stream.reset()
+        self.assertEqual(stream.index(), 0)
+        self.assertEqual(stream.line, 1)
+        self.assertEqual(stream.charPositionInLine, 0)
+        self.assertEqual(stream.LT(1), 'f')
+
+
+    def testLA(self):
+        """StringStream.LA()"""
+
+        stream = antlr3.StringStream('foo')
+
+        self.assertEqual(stream.LT(1), 'f')
+        self.assertEqual(stream.LT(2), 'o')
+        self.assertEqual(stream.LT(3), 'o')
+
+        stream.consume()
+        stream.consume()
+
+        self.assertEqual(stream.LT(1), 'o')
+        self.assertEqual(stream.LT(2), antlr3.EOF)
+        self.assertEqual(stream.LT(3), antlr3.EOF)
+
+
+    def testSubstring(self):
+        """StringStream.substring()"""
+
+        stream = antlr3.StringStream('foobar')
+
+        self.assertEqual(stream.substring(0, 0), 'f')
+        self.assertEqual(stream.substring(0, 1), 'fo')
+        self.assertEqual(stream.substring(0, 5), 'foobar')
+        self.assertEqual(stream.substring(3, 5), 'bar')
+
+
+    def testSeekForward(self):
+        """StringStream.seek(): forward"""
+
+        stream = antlr3.StringStream('foo\nbar')
+
+        stream.seek(4)
+
+        self.assertEqual(stream.index(), 4)
+        self.assertEqual(stream.line, 2)
+        self.assertEqual(stream.charPositionInLine, 0)
+        self.assertEqual(stream.LT(1), 'b')
+
+
+##     # not yet implemented
+##     def testSeekBackward(self):
+##         """StringStream.seek(): backward"""
+
+##         stream = antlr3.StringStream('foo\nbar')
+
+##         stream.seek(4)
+##         stream.seek(1)
+
+##         self.assertEqual(stream.index(), 1)
+##         self.assertEqual(stream.line, 1)
+##         self.assertEqual(stream.charPositionInLine, 1)
+##         self.assertEqual(stream.LA(1), 'o')
+
+
+    def testMark(self):
+        """StringStream.mark()"""
+
+        stream = antlr3.StringStream('foo\nbar')
+
+        stream.seek(4)
+
+        marker = stream.mark()
+        self.assertEqual(marker, 1)
+        self.assertEqual(stream.markDepth, 1)
+
+        stream.consume()
+        marker = stream.mark()
+        self.assertEqual(marker, 2)
+        self.assertEqual(stream.markDepth, 2)
+
+
+    def testReleaseLast(self):
+        """StringStream.release(): last marker"""
+
+        stream = antlr3.StringStream('foo\nbar')
+
+        stream.seek(4)
+        marker1 = stream.mark()
+
+        stream.consume()
+        marker2 = stream.mark()
+
+        stream.release()
+        self.assertEqual(stream.markDepth, 1)
+
+        # release same marker again, nothing has changed
+        stream.release()
+        self.assertEqual(stream.markDepth, 1)
+
+
+    def testReleaseNested(self):
+        """StringStream.release(): nested"""
+
+        stream = antlr3.StringStream('foo\nbar')
+
+        stream.seek(4)
+        marker1 = stream.mark()
+
+        stream.consume()
+        marker2 = stream.mark()
+
+        stream.consume()
+        marker3 = stream.mark()
+
+        stream.release(marker2)
+        self.assertEqual(stream.markDepth, 1)
+
+
+    def testRewindLast(self):
+        """StringStream.rewind(): last marker"""
+
+        stream = antlr3.StringStream('foo\nbar')
+
+        stream.seek(4)
+
+        marker = stream.mark()
+        stream.consume()
+        stream.consume()
+
+        stream.rewind()
+        self.assertEqual(stream.markDepth, 0)
+        self.assertEqual(stream.index(), 4)
+        self.assertEqual(stream.line, 2)
+        self.assertEqual(stream.charPositionInLine, 0)
+        self.assertEqual(stream.LT(1), 'b')
+
+
+    def testRewindNested(self):
+        """StringStream.rewind(): nested"""
+
+        stream = antlr3.StringStream('foo\nbar')
+
+        stream.seek(4)
+        marker1 = stream.mark()
+
+        stream.consume()
+        marker2 = stream.mark()
+
+        stream.consume()
+        marker3 = stream.mark()
+
+        stream.rewind(marker2)
+        self.assertEqual(stream.markDepth, 1)
+        self.assertEqual(stream.index(), 5)
+        self.assertEqual(stream.line, 2)
+        self.assertEqual(stream.charPositionInLine, 1)
+        self.assertEqual(stream.LT(1), 'a')
+
+
+class TestFileStream(unittest.TestCase):
+    """Test case for the FileStream class."""
+
+
+    def testNoEncoding(self):
+        path = os.path.join(os.path.dirname(__file__), 'teststreams.input1')
+
+        stream = antlr3.FileStream(path)
+
+        stream.seek(4)
+        marker1 = stream.mark()
+
+        stream.consume()
+        marker2 = stream.mark()
+
+        stream.consume()
+        marker3 = stream.mark()
+
+        stream.rewind(marker2)
+        self.assertEqual(stream.markDepth, 1)
+        self.assertEqual(stream.index(), 5)
+        self.assertEqual(stream.line, 2)
+        self.assertEqual(stream.charPositionInLine, 1)
+        self.assertEqual(stream.LT(1), 'a')
+        self.assertEqual(stream.LA(1), ord('a'))
+
+
+    def testEncoded(self):
+        path = os.path.join(os.path.dirname(__file__), 'teststreams.input2')
+
+        stream = antlr3.FileStream(path)
+
+        stream.seek(4)
+        marker1 = stream.mark()
+
+        stream.consume()
+        marker2 = stream.mark()
+
+        stream.consume()
+        marker3 = stream.mark()
+
+        stream.rewind(marker2)
+        self.assertEqual(stream.markDepth, 1)
+        self.assertEqual(stream.index(), 5)
+        self.assertEqual(stream.line, 2)
+        self.assertEqual(stream.charPositionInLine, 1)
+        self.assertEqual(stream.LT(1), 'ä')
+        self.assertEqual(stream.LA(1), ord('ä'))
+
+
+
+class TestInputStream(unittest.TestCase):
+    """Test case for the InputStream class."""
+
+    def testNoEncoding(self):
+        file = StringIO('foo\nbar')
+
+        stream = antlr3.InputStream(file)
+
+        stream.seek(4)
+        marker1 = stream.mark()
+
+        stream.consume()
+        marker2 = stream.mark()
+
+        stream.consume()
+        marker3 = stream.mark()
+
+        stream.rewind(marker2)
+        self.assertEqual(stream.markDepth, 1)
+        self.assertEqual(stream.index(), 5)
+        self.assertEqual(stream.line, 2)
+        self.assertEqual(stream.charPositionInLine, 1)
+        self.assertEqual(stream.LT(1), 'a')
+        self.assertEqual(stream.LA(1), ord('a'))
+
+
+    def testEncoded(self):
+        file = StringIO('foo\nbär')
+
+        stream = antlr3.InputStream(file)
+
+        stream.seek(4)
+        marker1 = stream.mark()
+
+        stream.consume()
+        marker2 = stream.mark()
+
+        stream.consume()
+        marker3 = stream.mark()
+
+        stream.rewind(marker2)
+        self.assertEqual(stream.markDepth, 1)
+        self.assertEqual(stream.index(), 5)
+        self.assertEqual(stream.line, 2)
+        self.assertEqual(stream.charPositionInLine, 1)
+        self.assertEqual(stream.LT(1), 'ä')
+        self.assertEqual(stream.LA(1), ord('ä'))
+
+
+class TestCommonTokenStream(unittest.TestCase):
+    """Test case for the StringStream class."""
+
+    def setUp(self):
+        """Setup test fixure
+
+        The constructor of CommonTokenStream needs a token source. This
+        is a simple mock class providing just the nextToken() method.
+
+        """
+
+        class MockSource(object):
+            def __init__(self):
+                self.tokens = []
+
+            def makeEOFToken(self):
+                return antlr3.CommonToken(type=antlr3.EOF)
+
+            def nextToken(self):
+                if self.tokens:
+                    return self.tokens.pop(0)
+                return None
+
+        self.source = MockSource()
+
+
+    def testInit(self):
+        """CommonTokenStream.__init__()"""
+
+        stream = antlr3.CommonTokenStream(self.source)
+        self.assertEqual(stream.index(), -1)
+
+
+    def testSetTokenSource(self):
+        """CommonTokenStream.setTokenSource()"""
+
+        stream = antlr3.CommonTokenStream(None)
+        stream.setTokenSource(self.source)
+        self.assertEqual(stream.index(), -1)
+        self.assertEqual(stream.channel, antlr3.DEFAULT_CHANNEL)
+
+
+    def testLTEmptySource(self):
+        """CommonTokenStream.LT(): EOF (empty source)"""
+
+        stream = antlr3.CommonTokenStream(self.source)
+
+        lt1 = stream.LT(1)
+        self.assertEqual(lt1.type, antlr3.EOF)
+
+
+    def testLT1(self):
+        """CommonTokenStream.LT(1)"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12)
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+
+        lt1 = stream.LT(1)
+        self.assertEqual(lt1.type, 12)
+
+
+    def testLT1WithHidden(self):
+        """CommonTokenStream.LT(1): with hidden tokens"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=13)
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+
+        lt1 = stream.LT(1)
+        self.assertEqual(lt1.type, 13)
+
+
+    def testLT2BeyondEnd(self):
+        """CommonTokenStream.LT(2): beyond end"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=13, channel=antlr3.HIDDEN_CHANNEL)
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+
+        lt1 = stream.LT(2)
+        self.assertEqual(lt1.type, antlr3.EOF)
+
+
+    # not yet implemented
+    def testLTNegative(self):
+        """CommonTokenStream.LT(-1): look back"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=13)
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+        stream.fillBuffer()
+        stream.consume()
+
+        lt1 = stream.LT(-1)
+        self.assertEqual(lt1.type, 12)
+
+
+    def testLB1(self):
+        """CommonTokenStream.LB(1)"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=13)
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+        stream.fillBuffer()
+        stream.consume()
+
+        self.assertEqual(stream.LB(1).type, 12)
+
+
+    def testLTZero(self):
+        """CommonTokenStream.LT(0)"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=13)
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+
+        lt1 = stream.LT(0)
+        self.assertIsNone(lt1)
+
+
+    def testLBBeyondBegin(self):
+        """CommonTokenStream.LB(-1): beyond begin"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=13)
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+        self.assertIsNone(stream.LB(1))
+
+        stream.consume()
+        stream.consume()
+        self.assertIsNone(stream.LB(3))
+
+
+    def testFillBuffer(self):
+        """CommonTokenStream.fillBuffer()"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=13)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=14)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=antlr3.EOF)
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+        stream.fillBuffer()
+
+        self.assertEqual(len(stream.tokens), 3)
+        self.assertEqual(stream.tokens[0].type, 12)
+        self.assertEqual(stream.tokens[1].type, 13)
+        self.assertEqual(stream.tokens[2].type, 14)
+
+
+    def testConsume(self):
+        """CommonTokenStream.consume()"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=13)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=antlr3.EOF)
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+        self.assertEqual(stream.LA(1), 12)
+
+        stream.consume()
+        self.assertEqual(stream.LA(1), 13)
+
+        stream.consume()
+        self.assertEqual(stream.LA(1), antlr3.EOF)
+
+        stream.consume()
+        self.assertEqual(stream.LA(1), antlr3.EOF)
+
+
+    def testSeek(self):
+        """CommonTokenStream.seek()"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=13)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=antlr3.EOF)
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+        self.assertEqual(stream.LA(1), 12)
+
+        stream.seek(2)
+        self.assertEqual(stream.LA(1), antlr3.EOF)
+
+        stream.seek(0)
+        self.assertEqual(stream.LA(1), 12)
+
+
+    def testMarkRewind(self):
+        """CommonTokenStream.mark()/rewind()"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=13)
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=antlr3.EOF)
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+        stream.fillBuffer()
+
+        stream.consume()
+        marker = stream.mark()
+
+        stream.consume()
+        stream.rewind(marker)
+
+        self.assertEqual(stream.LA(1), 13)
+
+
+    def testToString(self):
+        """CommonTokenStream.toString()"""
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=12, text="foo")
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=13, text="bar")
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=14, text="gnurz")
+            )
+
+        self.source.tokens.append(
+            antlr3.CommonToken(type=15, text="blarz")
+            )
+
+        stream = antlr3.CommonTokenStream(self.source)
+
+        self.assertEqual(stream.toString(), "foobargnurzblarz")
+        self.assertEqual(stream.toString(1, 2), "bargnurz")
+        self.assertEqual(stream.toString(stream.tokens[1], stream.tokens[-2]), "bargnurz")
+
+
+if __name__ == "__main__":
+    unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
diff --git a/runtime/Python3/unittests/testtree.py b/runtime/Python3/unittests/testtree.py
new file mode 100644
index 0000000..83c3876
--- /dev/null
+++ b/runtime/Python3/unittests/testtree.py
@@ -0,0 +1,1334 @@
+
+from io import StringIO
+import os
+import unittest
+
+from antlr3.tree import (CommonTreeNodeStream, CommonTree, CommonTreeAdaptor,
+                         TreeParser, TreeVisitor, TreeIterator)
+from antlr3 import CommonToken, UP, DOWN, EOF
+from antlr3.treewizard import TreeWizard
+
+class TestTreeNodeStream(unittest.TestCase):
+    """Test case for the TreeNodeStream class."""
+
+    def setUp(self):
+        self.adaptor = CommonTreeAdaptor()
+
+
+    def newStream(self, t):
+        """Build new stream; let's us override to test other streams."""
+        return CommonTreeNodeStream(t)
+
+
+    def testSingleNode(self):
+        t = CommonTree(CommonToken(101))
+
+        stream = self.newStream(t)
+        expecting = "101"
+        found = self.toNodesOnlyString(stream)
+        self.assertEqual(expecting, found)
+
+        expecting = "101"
+        found = str(stream)
+        self.assertEqual(expecting, found)
+
+
+    def testTwoChildrenOfNilRoot(self):
+        class V(CommonTree):
+            def __init__(self, token=None, ttype=None):
+                if token:
+                    self.token = token
+                    
+                elif ttype:
+                    self.token = CommonToken(type=ttype)
+                    
+
+            def __str__(self):
+                if self.token:
+                    txt = self.token.text
+                else:
+                    txt = ""
+
+                txt += "<V>"
+                return txt
+
+        root_0 = self.adaptor.nil()
+        t = V(ttype=101)
+        u = V(token=CommonToken(type=102, text="102"))
+        self.adaptor.addChild(root_0, t)
+        self.adaptor.addChild(root_0, u)
+        self.assertIsNone(root_0.parent)
+        self.assertEqual(-1, root_0.childIndex)
+        self.assertEqual(0, t.childIndex)
+        self.assertEqual(1, u.childIndex)
+
+
+    def test4Nodes(self):
+        # ^(101 ^(102 103) 104)
+        t = CommonTree(CommonToken(101))
+        t.addChild(CommonTree(CommonToken(102)))
+        t.getChild(0).addChild(CommonTree(CommonToken(103)))
+        t.addChild(CommonTree(CommonToken(104)))
+
+        stream = self.newStream(t)
+        expecting = "101 102 103 104"
+        found = self.toNodesOnlyString(stream)
+        self.assertEqual(expecting, found)
+
+        expecting = "101 2 102 2 103 3 104 3"
+        found = str(stream)
+        self.assertEqual(expecting, found)
+
+
+    def testList(self):
+        root = CommonTree(None)
+
+        t = CommonTree(CommonToken(101))
+        t.addChild(CommonTree(CommonToken(102)))
+        t.getChild(0).addChild(CommonTree(CommonToken(103)))
+        t.addChild(CommonTree(CommonToken(104)))
+
+        u = CommonTree(CommonToken(105))
+
+        root.addChild(t)
+        root.addChild(u)
+
+        stream = CommonTreeNodeStream(root)
+        expecting = "101 102 103 104 105"
+        found = self.toNodesOnlyString(stream)
+        self.assertEqual(expecting, found)
+
+        expecting = "101 2 102 2 103 3 104 3 105"
+        found = str(stream)
+        self.assertEqual(expecting, found)
+
+
+    def testFlatList(self):
+        root = CommonTree(None)
+
+        root.addChild(CommonTree(CommonToken(101)))
+        root.addChild(CommonTree(CommonToken(102)))
+        root.addChild(CommonTree(CommonToken(103)))
+
+        stream = CommonTreeNodeStream(root)
+        expecting = "101 102 103"
+        found = self.toNodesOnlyString(stream)
+        self.assertEqual(expecting, found)
+
+        expecting = "101 102 103"
+        found = str(stream)
+        self.assertEqual(expecting, found)
+
+
+    def testListWithOneNode(self):
+        root = CommonTree(None)
+
+        root.addChild(CommonTree(CommonToken(101)))
+
+        stream = CommonTreeNodeStream(root)
+        expecting = "101"
+        found = self.toNodesOnlyString(stream)
+        self.assertEqual(expecting, found)
+
+        expecting = "101"
+        found = str(stream)
+        self.assertEqual(expecting, found)
+
+
+    def testAoverB(self):
+        t = CommonTree(CommonToken(101))
+        t.addChild(CommonTree(CommonToken(102)))
+
+        stream = self.newStream(t)
+        expecting = "101 102"
+        found = self.toNodesOnlyString(stream)
+        self.assertEqual(expecting, found)
+
+        expecting = "101 2 102 3"
+        found = str(stream)
+        self.assertEqual(expecting, found)
+
+
+    def testLT(self):
+        # ^(101 ^(102 103) 104)
+        t = CommonTree(CommonToken(101))
+        t.addChild(CommonTree(CommonToken(102)))
+        t.getChild(0).addChild(CommonTree(CommonToken(103)))
+        t.addChild(CommonTree(CommonToken(104)))
+
+        stream = self.newStream(t)
+        self.assertEqual(101, stream.LT(1).getType())
+        self.assertEqual(DOWN, stream.LT(2).getType())
+        self.assertEqual(102, stream.LT(3).getType())
+        self.assertEqual(DOWN, stream.LT(4).getType())
+        self.assertEqual(103, stream.LT(5).getType())
+        self.assertEqual(UP, stream.LT(6).getType())
+        self.assertEqual(104, stream.LT(7).getType())
+        self.assertEqual(UP, stream.LT(8).getType())
+        self.assertEqual(EOF, stream.LT(9).getType())
+        # check way ahead
+        self.assertEqual(EOF, stream.LT(100).getType())
+
+
+    def testMarkRewindEntire(self):
+        # ^(101 ^(102 103 ^(106 107) ) 104 105)
+        # stream has 7 real + 6 nav nodes
+        # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+        r0 = CommonTree(CommonToken(101))
+        r1 = CommonTree(CommonToken(102))
+        r0.addChild(r1)
+        r1.addChild(CommonTree(CommonToken(103)))
+        r2 = CommonTree(CommonToken(106))
+        r2.addChild(CommonTree(CommonToken(107)))
+        r1.addChild(r2)
+        r0.addChild(CommonTree(CommonToken(104)))
+        r0.addChild(CommonTree(CommonToken(105)))
+
+        stream = CommonTreeNodeStream(r0)
+        m = stream.mark() # MARK
+        for _ in range(13): # consume til end
+            stream.LT(1)
+            stream.consume()
+
+        self.assertEqual(EOF, stream.LT(1).getType())
+        self.assertEqual(UP, stream.LT(-1).getType())  #TODO: remove?
+        stream.rewind(m)      # REWIND
+
+        # consume til end again :)
+        for _ in range(13): # consume til end
+            stream.LT(1)
+            stream.consume()
+
+        self.assertEqual(EOF, stream.LT(1).getType())
+        self.assertEqual(UP, stream.LT(-1).getType())  #TODO: remove?
+
+
+    def testMarkRewindInMiddle(self):
+        # ^(101 ^(102 103 ^(106 107) ) 104 105)
+        # stream has 7 real + 6 nav nodes
+        # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+        r0 = CommonTree(CommonToken(101))
+        r1 = CommonTree(CommonToken(102))
+        r0.addChild(r1)
+        r1.addChild(CommonTree(CommonToken(103)))
+        r2 = CommonTree(CommonToken(106))
+        r2.addChild(CommonTree(CommonToken(107)))
+        r1.addChild(r2)
+        r0.addChild(CommonTree(CommonToken(104)))
+        r0.addChild(CommonTree(CommonToken(105)))
+
+        stream = CommonTreeNodeStream(r0)
+        for _ in range(7): # consume til middle
+            #System.out.println(tream.LT(1).getType())
+            stream.consume()
+
+        self.assertEqual(107, stream.LT(1).getType())
+        m = stream.mark() # MARK
+        stream.consume() # consume 107
+        stream.consume() # consume UP
+        stream.consume() # consume UP
+        stream.consume() # consume 104
+        stream.rewind(m)      # REWIND
+
+        self.assertEqual(107, stream.LT(1).getType())
+        stream.consume()
+        self.assertEqual(UP, stream.LT(1).getType())
+        stream.consume()
+        self.assertEqual(UP, stream.LT(1).getType())
+        stream.consume()
+        self.assertEqual(104, stream.LT(1).getType())
+        stream.consume()
+        # now we're past rewind position
+        self.assertEqual(105, stream.LT(1).getType())
+        stream.consume()
+        self.assertEqual(UP, stream.LT(1).getType())
+        stream.consume()
+        self.assertEqual(EOF, stream.LT(1).getType())
+        self.assertEqual(UP, stream.LT(-1).getType())
+
+
+    def testMarkRewindNested(self):
+        # ^(101 ^(102 103 ^(106 107) ) 104 105)
+        # stream has 7 real + 6 nav nodes
+        # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+        r0 = CommonTree(CommonToken(101))
+        r1 = CommonTree(CommonToken(102))
+        r0.addChild(r1)
+        r1.addChild(CommonTree(CommonToken(103)))
+        r2 = CommonTree(CommonToken(106))
+        r2.addChild(CommonTree(CommonToken(107)))
+        r1.addChild(r2)
+        r0.addChild(CommonTree(CommonToken(104)))
+        r0.addChild(CommonTree(CommonToken(105)))
+
+        stream = CommonTreeNodeStream(r0)
+        m = stream.mark() # MARK at start
+        stream.consume() # consume 101
+        stream.consume() # consume DN
+        m2 = stream.mark() # MARK on 102
+        stream.consume() # consume 102
+        stream.consume() # consume DN
+        stream.consume() # consume 103
+        stream.consume() # consume 106
+        stream.rewind(m2)      # REWIND to 102
+        self.assertEqual(102, stream.LT(1).getType())
+        stream.consume()
+        self.assertEqual(DOWN, stream.LT(1).getType())
+        stream.consume()
+        # stop at 103 and rewind to start
+        stream.rewind(m) # REWIND to 101
+        self.assertEqual(101, stream.LT(1).getType())
+        stream.consume()
+        self.assertEqual(DOWN, stream.LT(1).getType())
+        stream.consume()
+        self.assertEqual(102, stream.LT(1).getType())
+        stream.consume()
+        self.assertEqual(DOWN, stream.LT(1).getType())
+
+
+    def testSeek(self):
+        # ^(101 ^(102 103 ^(106 107) ) 104 105)
+        # stream has 7 real + 6 nav nodes
+        # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+        r0 = CommonTree(CommonToken(101))
+        r1 = CommonTree(CommonToken(102))
+        r0.addChild(r1)
+        r1.addChild(CommonTree(CommonToken(103)))
+        r2 = CommonTree(CommonToken(106))
+        r2.addChild(CommonTree(CommonToken(107)))
+        r1.addChild(r2)
+        r0.addChild(CommonTree(CommonToken(104)))
+        r0.addChild(CommonTree(CommonToken(105)))
+
+        stream = CommonTreeNodeStream(r0)
+        stream.consume() # consume 101
+        stream.consume() # consume DN
+        stream.consume() # consume 102
+        stream.seek(7)   # seek to 107
+        self.assertEqual(107, stream.LT(1).getType())
+        stream.consume() # consume 107
+        stream.consume() # consume UP
+        stream.consume() # consume UP
+        self.assertEqual(104, stream.LT(1).getType())
+
+
+    def testSeekFromStart(self):
+        # ^(101 ^(102 103 ^(106 107) ) 104 105)
+        # stream has 7 real + 6 nav nodes
+        # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+        r0 = CommonTree(CommonToken(101))
+        r1 = CommonTree(CommonToken(102))
+        r0.addChild(r1)
+        r1.addChild(CommonTree(CommonToken(103)))
+        r2 = CommonTree(CommonToken(106))
+        r2.addChild(CommonTree(CommonToken(107)))
+        r1.addChild(r2)
+        r0.addChild(CommonTree(CommonToken(104)))
+        r0.addChild(CommonTree(CommonToken(105)))
+
+        stream = CommonTreeNodeStream(r0)
+        stream.seek(7)   # seek to 107
+        self.assertEqual(107, stream.LT(1).getType())
+        stream.consume() # consume 107
+        stream.consume() # consume UP
+        stream.consume() # consume UP
+        self.assertEqual(104, stream.LT(1).getType())
+
+
+    def testReset(self):
+        # ^(101 ^(102 103 ^(106 107) ) 104 105)
+        # stream has 7 real + 6 nav nodes
+        # Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+        r0 = CommonTree(CommonToken(101))
+        r1 = CommonTree(CommonToken(102))
+        r0.addChild(r1)
+        r1.addChild(CommonTree(CommonToken(103)))
+        r2 = CommonTree(CommonToken(106))
+        r2.addChild(CommonTree(CommonToken(107)))
+        r1.addChild(r2)
+        r0.addChild(CommonTree(CommonToken(104)))
+        r0.addChild(CommonTree(CommonToken(105)))
+
+        stream = CommonTreeNodeStream(r0)
+        v1 = self.toNodesOnlyString(stream) # scan all
+        stream.reset()
+        v2 = self.toNodesOnlyString(stream) # scan all
+        self.assertEqual(v1, v2)
+
+
+    def testIterator(self):
+        r0 = CommonTree(CommonToken(101))
+        r1 = CommonTree(CommonToken(102))
+        r0.addChild(r1)
+        r1.addChild(CommonTree(CommonToken(103)))
+        r2 = CommonTree(CommonToken(106))
+        r2.addChild(CommonTree(CommonToken(107)))
+        r1.addChild(r2)
+        r0.addChild(CommonTree(CommonToken(104)))
+        r0.addChild(CommonTree(CommonToken(105)))
+
+        stream = CommonTreeNodeStream(r0)
+
+        expecting = [
+            101, DOWN, 102, DOWN, 103, 106, DOWN, 107, UP, UP, 104, 105, UP]
+        found = [t.type for t in stream]
+        self.assertEqual(expecting, found)
+
+
+    def toNodesOnlyString(self, nodes):
+        buf = []
+        for i in range(nodes.size()):
+            t = nodes.LT(i + 1)
+            type = nodes.getTreeAdaptor().getType(t)
+            if type not in {DOWN, UP}:
+                buf.append(str(type))
+
+        return ' '.join(buf)
+
+
+class TestCommonTreeNodeStream(unittest.TestCase):
+    """Test case for the CommonTreeNodeStream class."""
+
+    def testPushPop(self):
+        # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
+        # stream has 9 real + 8 nav nodes
+        # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
+        r0 = CommonTree(CommonToken(101))
+        r1 = CommonTree(CommonToken(102))
+        r1.addChild(CommonTree(CommonToken(103)))
+        r0.addChild(r1)
+        r2 = CommonTree(CommonToken(104))
+        r2.addChild(CommonTree(CommonToken(105)))
+        r0.addChild(r2)
+        r3 = CommonTree(CommonToken(106))
+        r3.addChild(CommonTree(CommonToken(107)))
+        r0.addChild(r3)
+        r0.addChild(CommonTree(CommonToken(108)))
+        r0.addChild(CommonTree(CommonToken(109)))
+
+        stream = CommonTreeNodeStream(r0)
+        expecting = "101 2 102 2 103 3 104 2 105 3 106 2 107 3 108 109 3"
+        found = str(stream)
+        self.assertEqual(expecting, found)
+
+        # Assume we want to hit node 107 and then "call 102" then return
+
+        indexOf102 = 2
+        indexOf107 = 12
+        for _ in range(indexOf107):# consume til 107 node
+            stream.consume()
+
+        # CALL 102
+        self.assertEqual(107, stream.LT(1).getType())
+        stream.push(indexOf102)
+        self.assertEqual(102, stream.LT(1).getType())
+        stream.consume() # consume 102
+        self.assertEqual(DOWN, stream.LT(1).getType())
+        stream.consume() # consume DN
+        self.assertEqual(103, stream.LT(1).getType())
+        stream.consume() # consume 103
+        self.assertEqual(UP, stream.LT(1).getType())
+        # RETURN
+        stream.pop()
+        self.assertEqual(107, stream.LT(1).getType())
+
+
+    def testNestedPushPop(self):
+        # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
+        # stream has 9 real + 8 nav nodes
+        # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
+        r0 = CommonTree(CommonToken(101))
+        r1 = CommonTree(CommonToken(102))
+        r1.addChild(CommonTree(CommonToken(103)))
+        r0.addChild(r1)
+        r2 = CommonTree(CommonToken(104))
+        r2.addChild(CommonTree(CommonToken(105)))
+        r0.addChild(r2)
+        r3 = CommonTree(CommonToken(106))
+        r3.addChild(CommonTree(CommonToken(107)))
+        r0.addChild(r3)
+        r0.addChild(CommonTree(CommonToken(108)))
+        r0.addChild(CommonTree(CommonToken(109)))
+
+        stream = CommonTreeNodeStream(r0)
+
+        # Assume we want to hit node 107 and then "call 102", which
+        # calls 104, then return
+
+        indexOf102 = 2
+        indexOf107 = 12
+        for _ in range(indexOf107): # consume til 107 node
+            stream.consume()
+
+        self.assertEqual(107, stream.LT(1).getType())
+        # CALL 102
+        stream.push(indexOf102)
+        self.assertEqual(102, stream.LT(1).getType())
+        stream.consume() # consume 102
+        self.assertEqual(DOWN, stream.LT(1).getType())
+        stream.consume() # consume DN
+        self.assertEqual(103, stream.LT(1).getType())
+        stream.consume() # consume 103
+
+        # CALL 104
+        indexOf104 = 6
+        stream.push(indexOf104)
+        self.assertEqual(104, stream.LT(1).getType())
+        stream.consume() # consume 102
+        self.assertEqual(DOWN, stream.LT(1).getType())
+        stream.consume() # consume DN
+        self.assertEqual(105, stream.LT(1).getType())
+        stream.consume() # consume 103
+        self.assertEqual(UP, stream.LT(1).getType())
+        # RETURN (to UP node in 102 subtree)
+        stream.pop()
+
+        self.assertEqual(UP, stream.LT(1).getType())
+        # RETURN (to empty stack)
+        stream.pop()
+        self.assertEqual(107, stream.LT(1).getType())
+
+
+    def testPushPopFromEOF(self):
+        # ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
+        # stream has 9 real + 8 nav nodes
+        # Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
+        r0 = CommonTree(CommonToken(101))
+        r1 = CommonTree(CommonToken(102))
+        r1.addChild(CommonTree(CommonToken(103)))
+        r0.addChild(r1)
+        r2 = CommonTree(CommonToken(104))
+        r2.addChild(CommonTree(CommonToken(105)))
+        r0.addChild(r2)
+        r3 = CommonTree(CommonToken(106))
+        r3.addChild(CommonTree(CommonToken(107)))
+        r0.addChild(r3)
+        r0.addChild(CommonTree(CommonToken(108)))
+        r0.addChild(CommonTree(CommonToken(109)))
+
+        stream = CommonTreeNodeStream(r0)
+
+        while stream.LA(1) != EOF:
+            stream.consume()
+
+        indexOf102 = 2
+        indexOf104 = 6
+        self.assertEqual(EOF, stream.LT(1).getType())
+
+        # CALL 102
+        stream.push(indexOf102)
+        self.assertEqual(102, stream.LT(1).getType())
+        stream.consume() # consume 102
+        self.assertEqual(DOWN, stream.LT(1).getType())
+        stream.consume() # consume DN
+        self.assertEqual(103, stream.LT(1).getType())
+        stream.consume() # consume 103
+        self.assertEqual(UP, stream.LT(1).getType())
+        # RETURN (to empty stack)
+        stream.pop()
+        self.assertEqual(EOF, stream.LT(1).getType())
+
+        # CALL 104
+        stream.push(indexOf104)
+        self.assertEqual(104, stream.LT(1).getType())
+        stream.consume() # consume 102
+        self.assertEqual(DOWN, stream.LT(1).getType())
+        stream.consume() # consume DN
+        self.assertEqual(105, stream.LT(1).getType())
+        stream.consume() # consume 103
+        self.assertEqual(UP, stream.LT(1).getType())
+        # RETURN (to empty stack)
+        stream.pop()
+        self.assertEqual(EOF, stream.LT(1).getType())
+
+
+class TestCommonTree(unittest.TestCase):
+    """Test case for the CommonTree class."""
+
+    def setUp(self):
+        """Setup test fixure"""
+
+        self.adaptor = CommonTreeAdaptor()
+
+
+    def testSingleNode(self):
+        t = CommonTree(CommonToken(101))
+        self.assertIsNone(t.parent)
+        self.assertEqual(-1, t.childIndex)
+
+
+    def test4Nodes(self):
+        # ^(101 ^(102 103) 104)
+        r0 = CommonTree(CommonToken(101))
+        r0.addChild(CommonTree(CommonToken(102)))
+        r0.getChild(0).addChild(CommonTree(CommonToken(103)))
+        r0.addChild(CommonTree(CommonToken(104)))
+
+        self.assertIsNone(r0.parent)
+        self.assertEqual(-1, r0.childIndex)
+
+
+    def testList(self):
+        # ^(nil 101 102 103)
+        r0 = CommonTree(None)
+        c0=CommonTree(CommonToken(101))
+        r0.addChild(c0)
+        c1=CommonTree(CommonToken(102))
+        r0.addChild(c1)
+        c2=CommonTree(CommonToken(103))
+        r0.addChild(c2)
+
+        self.assertIsNone(r0.parent)
+        self.assertEqual(-1, r0.childIndex)
+        self.assertEqual(r0, c0.parent)
+        self.assertEqual(0, c0.childIndex)
+        self.assertEqual(r0, c1.parent)
+        self.assertEqual(1, c1.childIndex)
+        self.assertEqual(r0, c2.parent)
+        self.assertEqual(2, c2.childIndex)
+
+
+    def testList2(self):
+        # Add child ^(nil 101 102 103) to root 5
+        # should pull 101 102 103 directly to become 5's child list
+        root = CommonTree(CommonToken(5))
+
+        # child tree
+        r0 = CommonTree(None)
+        c0=CommonTree(CommonToken(101))
+        r0.addChild(c0)
+        c1=CommonTree(CommonToken(102))
+        r0.addChild(c1)
+        c2=CommonTree(CommonToken(103))
+        r0.addChild(c2)
+
+        root.addChild(r0)
+
+        self.assertIsNone(root.parent)
+        self.assertEqual(-1, root.childIndex)
+        # check children of root all point at root
+        self.assertEqual(root, c0.parent)
+        self.assertEqual(0, c0.childIndex)
+        self.assertEqual(root, c0.parent)
+        self.assertEqual(1, c1.childIndex)
+        self.assertEqual(root, c0.parent)
+        self.assertEqual(2, c2.childIndex)
+
+
+    def testAddListToExistChildren(self):
+        # Add child ^(nil 101 102 103) to root ^(5 6)
+        # should add 101 102 103 to end of 5's child list
+        root = CommonTree(CommonToken(5))
+        root.addChild(CommonTree(CommonToken(6)))
+
+        # child tree
+        r0 = CommonTree(None)
+        c0=CommonTree(CommonToken(101))
+        r0.addChild(c0)
+        c1=CommonTree(CommonToken(102))
+        r0.addChild(c1)
+        c2=CommonTree(CommonToken(103))
+        r0.addChild(c2)
+
+        root.addChild(r0)
+
+        self.assertIsNone(root.parent)
+        self.assertEqual(-1, root.childIndex)
+        # check children of root all point at root
+        self.assertEqual(root, c0.parent)
+        self.assertEqual(1, c0.childIndex)
+        self.assertEqual(root, c0.parent)
+        self.assertEqual(2, c1.childIndex)
+        self.assertEqual(root, c0.parent)
+        self.assertEqual(3, c2.childIndex)
+
+
+    def testDupTree(self):
+        # ^(101 ^(102 103 ^(106 107) ) 104 105)
+        r0 = CommonTree(CommonToken(101))
+        r1 = CommonTree(CommonToken(102))
+        r0.addChild(r1)
+        r1.addChild(CommonTree(CommonToken(103)))
+        r2 = CommonTree(CommonToken(106))
+        r2.addChild(CommonTree(CommonToken(107)))
+        r1.addChild(r2)
+        r0.addChild(CommonTree(CommonToken(104)))
+        r0.addChild(CommonTree(CommonToken(105)))
+
+        dup = self.adaptor.dupTree(r0)
+
+        self.assertIsNone(dup.parent)
+        self.assertEqual(-1, dup.childIndex)
+        dup.sanityCheckParentAndChildIndexes()
+
+
+    def testBecomeRoot(self):
+        # 5 becomes root of ^(nil 101 102 103)
+        newRoot = CommonTree(CommonToken(5))
+
+        oldRoot = CommonTree(None)
+        oldRoot.addChild(CommonTree(CommonToken(101)))
+        oldRoot.addChild(CommonTree(CommonToken(102)))
+        oldRoot.addChild(CommonTree(CommonToken(103)))
+
+        self.adaptor.becomeRoot(newRoot, oldRoot)
+        newRoot.sanityCheckParentAndChildIndexes()
+
+
+    def testBecomeRoot2(self):
+        # 5 becomes root of ^(101 102 103)
+        newRoot = CommonTree(CommonToken(5))
+
+        oldRoot = CommonTree(CommonToken(101))
+        oldRoot.addChild(CommonTree(CommonToken(102)))
+        oldRoot.addChild(CommonTree(CommonToken(103)))
+
+        self.adaptor.becomeRoot(newRoot, oldRoot)
+        newRoot.sanityCheckParentAndChildIndexes()
+
+
+    def testBecomeRoot3(self):
+        # ^(nil 5) becomes root of ^(nil 101 102 103)
+        newRoot = CommonTree(None)
+        newRoot.addChild(CommonTree(CommonToken(5)))
+
+        oldRoot = CommonTree(None)
+        oldRoot.addChild(CommonTree(CommonToken(101)))
+        oldRoot.addChild(CommonTree(CommonToken(102)))
+        oldRoot.addChild(CommonTree(CommonToken(103)))
+
+        self.adaptor.becomeRoot(newRoot, oldRoot)
+        newRoot.sanityCheckParentAndChildIndexes()
+
+
+    def testBecomeRoot5(self):
+        # ^(nil 5) becomes root of ^(101 102 103)
+        newRoot = CommonTree(None)
+        newRoot.addChild(CommonTree(CommonToken(5)))
+
+        oldRoot = CommonTree(CommonToken(101))
+        oldRoot.addChild(CommonTree(CommonToken(102)))
+        oldRoot.addChild(CommonTree(CommonToken(103)))
+
+        self.adaptor.becomeRoot(newRoot, oldRoot)
+        newRoot.sanityCheckParentAndChildIndexes()
+
+
+    def testBecomeRoot6(self):
+        # emulates construction of ^(5 6)
+        root_0 = self.adaptor.nil()
+        root_1 = self.adaptor.nil()
+        root_1 = self.adaptor.becomeRoot(CommonTree(CommonToken(5)), root_1)
+
+        self.adaptor.addChild(root_1, CommonTree(CommonToken(6)))
+
+        self.adaptor.addChild(root_0, root_1)
+
+        root_0.sanityCheckParentAndChildIndexes()
+
+
+    # Test replaceChildren
+
+    def testReplaceWithNoChildren(self):
+        t = CommonTree(CommonToken(101))
+        newChild = CommonTree(CommonToken(5))
+        error = False
+        self.assertRaises(IndexError, t.replaceChildren, 0, 0, newChild)
+
+
+    def testReplaceWithOneChildren(self):
+        # assume token type 99 and use text
+        t = CommonTree(CommonToken(99, text="a"))
+        c0 = CommonTree(CommonToken(99, text="b"))
+        t.addChild(c0)
+
+        newChild = CommonTree(CommonToken(99, text="c"))
+        t.replaceChildren(0, 0, newChild)
+        expecting = "(a c)"
+        self.assertEqual(expecting, t.toStringTree())
+        t.sanityCheckParentAndChildIndexes()
+
+
+    def testReplaceInMiddle(self):
+        t = CommonTree(CommonToken(99, text="a"))
+        t.addChild(CommonTree(CommonToken(99, text="b")))
+        t.addChild(CommonTree(CommonToken(99, text="c"))) # index 1
+        t.addChild(CommonTree(CommonToken(99, text="d")))
+
+        newChild = CommonTree(CommonToken(99, text="x"))
+        t.replaceChildren(1, 1, newChild)
+        expecting = "(a b x d)"
+        self.assertEqual(expecting, t.toStringTree())
+        t.sanityCheckParentAndChildIndexes()
+
+
+    def testReplaceAtLeft(self):
+        t = CommonTree(CommonToken(99, text="a"))
+        t.addChild(CommonTree(CommonToken(99, text="b"))) # index 0
+        t.addChild(CommonTree(CommonToken(99, text="c")))
+        t.addChild(CommonTree(CommonToken(99, text="d")))
+
+        newChild = CommonTree(CommonToken(99, text="x"))
+        t.replaceChildren(0, 0, newChild)
+        expecting = "(a x c d)"
+        self.assertEqual(expecting, t.toStringTree())
+        t.sanityCheckParentAndChildIndexes()
+
+
+    def testReplaceAtRight(self):
+        t = CommonTree(CommonToken(99, text="a"))
+        t.addChild(CommonTree(CommonToken(99, text="b")))
+        t.addChild(CommonTree(CommonToken(99, text="c")))
+        t.addChild(CommonTree(CommonToken(99, text="d"))) # index 2
+
+        newChild = CommonTree(CommonToken(99, text="x"))
+        t.replaceChildren(2, 2, newChild)
+        expecting = "(a b c x)"
+        self.assertEqual(expecting, t.toStringTree())
+        t.sanityCheckParentAndChildIndexes()
+
+
+    def testReplaceOneWithTwoAtLeft(self):
+        t = CommonTree(CommonToken(99, text="a"))
+        t.addChild(CommonTree(CommonToken(99, text="b")))
+        t.addChild(CommonTree(CommonToken(99, text="c")))
+        t.addChild(CommonTree(CommonToken(99, text="d")))
+
+        newChildren = self.adaptor.nil()
+        newChildren.addChild(CommonTree(CommonToken(99, text="x")))
+        newChildren.addChild(CommonTree(CommonToken(99, text="y")))
+
+        t.replaceChildren(0, 0, newChildren)
+        expecting = "(a x y c d)"
+        self.assertEqual(expecting, t.toStringTree())
+        t.sanityCheckParentAndChildIndexes()
+
+
+    def testReplaceOneWithTwoAtRight(self):
+        t = CommonTree(CommonToken(99, text="a"))
+        t.addChild(CommonTree(CommonToken(99, text="b")))
+        t.addChild(CommonTree(CommonToken(99, text="c")))
+        t.addChild(CommonTree(CommonToken(99, text="d")))
+
+        newChildren = self.adaptor.nil()
+        newChildren.addChild(CommonTree(CommonToken(99, text="x")))
+        newChildren.addChild(CommonTree(CommonToken(99, text="y")))
+
+        t.replaceChildren(2, 2, newChildren)
+        expecting = "(a b c x y)"
+        self.assertEqual(expecting, t.toStringTree())
+        t.sanityCheckParentAndChildIndexes()
+
+
+    def testReplaceOneWithTwoInMiddle(self):
+        t = CommonTree(CommonToken(99, text="a"))
+        t.addChild(CommonTree(CommonToken(99, text="b")))
+        t.addChild(CommonTree(CommonToken(99, text="c")))
+        t.addChild(CommonTree(CommonToken(99, text="d")))
+
+        newChildren = self.adaptor.nil()
+        newChildren.addChild(CommonTree(CommonToken(99, text="x")))
+        newChildren.addChild(CommonTree(CommonToken(99, text="y")))
+
+        t.replaceChildren(1, 1, newChildren)
+        expecting = "(a b x y d)"
+        self.assertEqual(expecting, t.toStringTree())
+        t.sanityCheckParentAndChildIndexes()
+
+
+    def testReplaceTwoWithOneAtLeft(self):
+        t = CommonTree(CommonToken(99, text="a"))
+        t.addChild(CommonTree(CommonToken(99, text="b")))
+        t.addChild(CommonTree(CommonToken(99, text="c")))
+        t.addChild(CommonTree(CommonToken(99, text="d")))
+
+        newChild = CommonTree(CommonToken(99, text="x"))
+
+        t.replaceChildren(0, 1, newChild)
+        expecting = "(a x d)"
+        self.assertEqual(expecting, t.toStringTree())
+        t.sanityCheckParentAndChildIndexes()
+
+
+    def testReplaceTwoWithOneAtRight(self):
+        t = CommonTree(CommonToken(99, text="a"))
+        t.addChild(CommonTree(CommonToken(99, text="b")))
+        t.addChild(CommonTree(CommonToken(99, text="c")))
+        t.addChild(CommonTree(CommonToken(99, text="d")))
+
+        newChild = CommonTree(CommonToken(99, text="x"))
+
+        t.replaceChildren(1, 2, newChild)
+        expecting = "(a b x)"
+        self.assertEqual(expecting, t.toStringTree())
+        t.sanityCheckParentAndChildIndexes()
+
+
+    def testReplaceAllWithOne(self):
+        t = CommonTree(CommonToken(99, text="a"))
+        t.addChild(CommonTree(CommonToken(99, text="b")))
+        t.addChild(CommonTree(CommonToken(99, text="c")))
+        t.addChild(CommonTree(CommonToken(99, text="d")))
+
+        newChild = CommonTree(CommonToken(99, text="x"))
+
+        t.replaceChildren(0, 2, newChild)
+        expecting = "(a x)"
+        self.assertEqual(expecting, t.toStringTree())
+        t.sanityCheckParentAndChildIndexes()
+
+
+    def testReplaceAllWithTwo(self):
+        t = CommonTree(CommonToken(99, text="a"))
+        t.addChild(CommonTree(CommonToken(99, text="b")))
+        t.addChild(CommonTree(CommonToken(99, text="c")))
+        t.addChild(CommonTree(CommonToken(99, text="d")))
+
+        newChildren = self.adaptor.nil()
+        newChildren.addChild(CommonTree(CommonToken(99, text="x")))
+        newChildren.addChild(CommonTree(CommonToken(99, text="y")))
+
+        t.replaceChildren(0, 2, newChildren)
+        expecting = "(a x y)"
+        self.assertEqual(expecting, t.toStringTree())
+        t.sanityCheckParentAndChildIndexes()
+
+
+class TestTreeContext(unittest.TestCase):
+    """Test the TreeParser.inContext() method"""
+
+    tokenNames = [
+        "<invalid>", "<EOR>", "<DOWN>", "<UP>", "VEC", "ASSIGN", "PRINT",
+        "PLUS", "MULT", "DOT", "ID", "INT", "WS", "'['", "','", "']'"
+        ]
+
+    def testSimpleParent(self):
+        tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = True
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC")
+        self.assertEqual(expecting, found)
+
+
+    def testNoParent(self):
+        tree = "(PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(%x:PRINT (MULT ID (VEC INT INT INT)))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = False
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC")
+        self.assertEqual(expecting, found)
+
+
+    def testParentWithWildcard(self):
+        tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = True
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ...")
+        self.assertEqual(expecting, found)
+
+
+    def testWildcardAtStartIgnored(self):
+        tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = True
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "...VEC")
+        self.assertEqual(expecting, found)
+
+
+    def testWildcardInBetween(self):
+        tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = True
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT...VEC")
+        self.assertEqual(expecting, found)
+
+
+    def testLotsOfWildcards(self):
+        tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = True
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "... PRINT ... VEC ...")
+        self.assertEqual(expecting, found)
+
+
+    def testDeep(self):
+        tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = True
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ...")
+        self.assertEqual(expecting, found)
+
+
+    def testDeepAndFindRoot(self):
+        tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = True
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ...")
+        self.assertEqual(expecting, found)
+
+
+    def testDeepAndFindRoot2(self):
+        tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = True
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ... VEC ...")
+        self.assertEqual(expecting, found)
+
+
+    def testChain(self):
+        tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = True
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT MULT VEC MULT")
+        self.assertEqual(expecting, found)
+
+
+    ## TEST INVALID CONTEXTS
+
+    def testNotParent(self):
+        tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = False
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC")
+        self.assertEqual(expecting, found)
+
+
+    def testMismatch(self):
+        tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = False
+        ## missing MULT
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT VEC MULT")
+        self.assertEqual(expecting, found)
+
+
+    def testMismatch2(self):
+        tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = False
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT VEC ...")
+        self.assertEqual(expecting, found)
+
+
+    def testMismatch3(self):
+        tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        expecting = False
+        found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ... VEC MULT")
+        self.assertEqual(expecting, found)
+
+
+    def testDoubleEtc(self):
+        tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        self.assertRaisesRegex(
+            ValueError, r'invalid syntax: \.\.\. \.\.\.',
+            TreeParser._inContext, adaptor, self.tokenNames, node, "PRINT ... ... VEC")
+
+
+    def testDotDot(self):
+        tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        labels = {}
+        valid = wiz.parse(
+            t,
+            "(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
+            labels)
+        self.assertTrue(valid)
+        node = labels.get("x")
+
+        self.assertRaisesRegex(
+            ValueError, r'invalid syntax: \.\.',
+            TreeParser._inContext, adaptor, self.tokenNames, node, "PRINT .. VEC")
+
+
+class TestTreeVisitor(unittest.TestCase):
+    """Test of the TreeVisitor class."""
+
+    tokenNames = [
+        "<invalid>", "<EOR>", "<DOWN>", "<UP>", "VEC", "ASSIGN", "PRINT",
+        "PLUS", "MULT", "DOT", "ID", "INT", "WS", "'['", "','", "']'"
+        ]
+
+    def testTreeVisitor(self):
+        tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokenNames)
+        t = wiz.create(tree)
+
+        found = []
+        def pre(t):
+            found.append("pre({})".format(t))
+            return t
+        def post(t):
+            found.append("post({})".format(t))
+            return t
+
+        visitor = TreeVisitor(adaptor)
+        visitor.visit(t, pre, post)
+
+        expecting = [ "pre(PRINT)", "pre(MULT)", "pre(x)", "post(x)",
+                      "pre(VEC)", "pre(MULT)", "pre(9)", "post(9)", "pre(1)",
+                      "post(1)", "post(MULT)", "pre(2)", "post(2)", "pre(3)",
+                      "post(3)", "post(VEC)", "post(MULT)", "post(PRINT)" ]
+
+        self.assertEqual(expecting, found)
+
+
+class TestTreeIterator(unittest.TestCase):
+    tokens = [
+        "<invalid>", "<EOR>", "<DOWN>", "<UP>",
+        "A", "B", "C", "D", "E", "F", "G" ]
+
+    def testNode(self):
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokens)
+        t = wiz.create("A")
+        it = TreeIterator(t)
+        expecting = "A EOF"
+        found = self.toString(it)
+        self.assertEqual(expecting, found)
+
+
+    def testFlatAB(self):
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokens)
+        t = wiz.create("(nil A B)")
+        it = TreeIterator(t)
+        expecting = "nil DOWN A B UP EOF"
+        found = self.toString(it)
+        self.assertEqual(expecting, found)
+
+
+    def testAB(self):
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokens)
+        t = wiz.create("(A B)")
+        it = TreeIterator(t)
+        expecting = "A DOWN B UP EOF"
+        found = self.toString(it)
+        self.assertEqual(expecting, found)
+
+
+    def testABC(self):
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokens)
+        t = wiz.create("(A B C)")
+        it = TreeIterator(t)
+        expecting = "A DOWN B C UP EOF"
+        found = self.toString(it)
+        self.assertEqual(expecting, found)
+
+
+    def testVerticalList(self):
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokens)
+        t = wiz.create("(A (B C))")
+        it = TreeIterator(t)
+        expecting = "A DOWN B DOWN C UP UP EOF"
+        found = self.toString(it)
+        self.assertEqual(expecting, found)
+
+
+    def testComplex(self):
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokens)
+        t = wiz.create("(A (B (C D E) F) G)")
+        it = TreeIterator(t)
+        expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF"
+        found = self.toString(it)
+        self.assertEqual(expecting, found)
+
+
+    def testReset(self):
+        adaptor = CommonTreeAdaptor()
+        wiz = TreeWizard(adaptor, self.tokens)
+        t = wiz.create("(A (B (C D E) F) G)")
+        it = TreeIterator(t)
+        expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF"
+        found = self.toString(it)
+        self.assertEqual(expecting, found)
+
+        it.reset()
+        expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF"
+        found = self.toString(it)
+        self.assertEqual(expecting, found)
+
+
+    def toString(self, it):
+        buf = []
+        for n in it:
+            buf.append(str(n))
+
+        return ' '.join(buf)
+
+
+if __name__ == "__main__":
+    unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
diff --git a/runtime/Python3/unittests/testtreewizard.py b/runtime/Python3/unittests/testtreewizard.py
new file mode 100644
index 0000000..5ffaa4d
--- /dev/null
+++ b/runtime/Python3/unittests/testtreewizard.py
@@ -0,0 +1,689 @@
+
+from io import StringIO
+import os
+import unittest
+
+from antlr3.tree import CommonTreeAdaptor, CommonTree, INVALID_TOKEN_TYPE
+from antlr3.treewizard import TreeWizard, computeTokenTypes, \
+     TreePatternLexer, EOF, ID, BEGIN, END, PERCENT, COLON, DOT, ARG, \
+     TreePatternParser, \
+     TreePattern, WildcardTreePattern, TreePatternTreeAdaptor
+
+
+class TestComputeTokenTypes(unittest.TestCase):
+    """Test case for the computeTokenTypes function."""
+
+    def testNone(self):
+        """computeTokenTypes(None) -> {}"""
+
+        typeMap = computeTokenTypes(None)
+        self.assertIsInstance(typeMap, dict)
+        self.assertEqual(typeMap, {})
+
+
+    def testList(self):
+        """computeTokenTypes(['a', 'b']) -> { 'a': 0, 'b': 1 }"""
+
+        typeMap = computeTokenTypes(['a', 'b'])
+        self.assertIsInstance(typeMap, dict)
+        self.assertEqual(typeMap, { 'a': 0, 'b': 1 })
+
+
+class TestTreePatternLexer(unittest.TestCase):
+    """Test case for the TreePatternLexer class."""
+
+    def testBegin(self):
+        """TreePatternLexer(): '('"""
+
+        lexer = TreePatternLexer('(')
+        type = lexer.nextToken()
+        self.assertEqual(type, BEGIN)
+        self.assertEqual(lexer.sval, '')
+        self.assertFalse(lexer.error)
+
+
+    def testEnd(self):
+        """TreePatternLexer(): ')'"""
+
+        lexer = TreePatternLexer(')')
+        type = lexer.nextToken()
+        self.assertEqual(type, END)
+        self.assertEqual(lexer.sval, '')
+        self.assertFalse(lexer.error)
+
+
+    def testPercent(self):
+        """TreePatternLexer(): '%'"""
+
+        lexer = TreePatternLexer('%')
+        type = lexer.nextToken()
+        self.assertEqual(type, PERCENT)
+        self.assertEqual(lexer.sval, '')
+        self.assertFalse(lexer.error)
+
+
+    def testDot(self):
+        """TreePatternLexer(): '.'"""
+
+        lexer = TreePatternLexer('.')
+        type = lexer.nextToken()
+        self.assertEqual(type, DOT)
+        self.assertEqual(lexer.sval, '')
+        self.assertFalse(lexer.error)
+
+
+    def testColon(self):
+        """TreePatternLexer(): ':'"""
+
+        lexer = TreePatternLexer(':')
+        type = lexer.nextToken()
+        self.assertEqual(type, COLON)
+        self.assertEqual(lexer.sval, '')
+        self.assertFalse(lexer.error)
+
+
+    def testEOF(self):
+        """TreePatternLexer(): EOF"""
+
+        lexer = TreePatternLexer('  \n \r \t ')
+        type = lexer.nextToken()
+        self.assertEqual(type, EOF)
+        self.assertEqual(lexer.sval, '')
+        self.assertFalse(lexer.error)
+
+
+    def testID(self):
+        """TreePatternLexer(): ID"""
+
+        lexer = TreePatternLexer('_foo12_bar')
+        type = lexer.nextToken()
+        self.assertEqual(type, ID)
+        self.assertEqual(lexer.sval, '_foo12_bar')
+        self.assertFalse(lexer.error)
+
+
+    def testARG(self):
+        """TreePatternLexer(): ARG"""
+
+        lexer = TreePatternLexer(r'[ \]bla\n]')
+        type = lexer.nextToken()
+        self.assertEqual(type, ARG)
+        self.assertEqual(lexer.sval, r' ]bla\n')
+        self.assertFalse(lexer.error)
+
+
+    def testError(self):
+        """TreePatternLexer(): error"""
+
+        lexer = TreePatternLexer('1')
+        type = lexer.nextToken()
+        self.assertEqual(type, EOF)
+        self.assertEqual(lexer.sval, '')
+        self.assertTrue(lexer.error)
+
+
+class TestTreePatternParser(unittest.TestCase):
+    """Test case for the TreePatternParser class."""
+
+    def setUp(self):
+        """Setup text fixure
+
+        We need a tree adaptor, use CommonTreeAdaptor.
+        And a constant list of token names.
+
+        """
+
+        self.adaptor = CommonTreeAdaptor()
+        self.tokens = [
+            "", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR"
+            ]
+        self.wizard = TreeWizard(self.adaptor, tokenNames=self.tokens)
+
+
+    def testSingleNode(self):
+        """TreePatternParser: 'ID'"""
+        lexer = TreePatternLexer('ID')
+        parser = TreePatternParser(lexer, self.wizard, self.adaptor)
+        tree = parser.pattern()
+        self.assertIsInstance(tree, CommonTree)
+        self.assertEqual(tree.getType(), 10)
+        self.assertEqual(tree.getText(), 'ID')
+
+
+    def testSingleNodeWithArg(self):
+        """TreePatternParser: 'ID[foo]'"""
+        lexer = TreePatternLexer('ID[foo]')
+        parser = TreePatternParser(lexer, self.wizard, self.adaptor)
+        tree = parser.pattern()
+        self.assertIsInstance(tree, CommonTree)
+        self.assertEqual(tree.getType(), 10)
+        self.assertEqual(tree.getText(), 'foo')
+
+
+    def testSingleLevelTree(self):
+        """TreePatternParser: '(A B)'"""
+        lexer = TreePatternLexer('(A B)')
+        parser = TreePatternParser(lexer, self.wizard, self.adaptor)
+        tree = parser.pattern()
+        self.assertIsInstance(tree, CommonTree)
+        self.assertEqual(tree.getType(), 5)
+        self.assertEqual(tree.getText(), 'A')
+        self.assertEqual(tree.getChildCount(), 1)
+        self.assertEqual(tree.getChild(0).getType(), 6)
+        self.assertEqual(tree.getChild(0).getText(), 'B')
+
+
+    def testNil(self):
+        """TreePatternParser: 'nil'"""
+        lexer = TreePatternLexer('nil')
+        parser = TreePatternParser(lexer, self.wizard, self.adaptor)
+        tree = parser.pattern()
+        self.assertIsInstance(tree, CommonTree)
+        self.assertEqual(tree.getType(), 0)
+        self.assertIsNone(tree.getText())
+
+
+    def testWildcard(self):
+        """TreePatternParser: '(.)'"""
+        lexer = TreePatternLexer('(.)')
+        parser = TreePatternParser(lexer, self.wizard, self.adaptor)
+        tree = parser.pattern()
+        self.assertIsInstance(tree, WildcardTreePattern)
+
+
+    def testLabel(self):
+        """TreePatternParser: '(%a:A)'"""
+        lexer = TreePatternLexer('(%a:A)')
+        parser = TreePatternParser(lexer, self.wizard, TreePatternTreeAdaptor())
+        tree = parser.pattern()
+        self.assertIsInstance(tree, TreePattern)
+        self.assertEqual(tree.label, 'a')
+
+
+    def testError1(self):
+        """TreePatternParser: ')'"""
+        lexer = TreePatternLexer(')')
+        parser = TreePatternParser(lexer, self.wizard, self.adaptor)
+        tree = parser.pattern()
+        self.assertIsNone(tree)
+
+
+    def testError2(self):
+        """TreePatternParser: '()'"""
+        lexer = TreePatternLexer('()')
+        parser = TreePatternParser(lexer, self.wizard, self.adaptor)
+        tree = parser.pattern()
+        self.assertIsNone(tree)
+
+
+    def testError3(self):
+        """TreePatternParser: '(A ])'"""
+        lexer = TreePatternLexer('(A ])')
+        parser = TreePatternParser(lexer, self.wizard, self.adaptor)
+        tree = parser.pattern()
+        self.assertIsNone(tree)
+
+
+class TestTreeWizard(unittest.TestCase):
+    """Test case for the TreeWizard class."""
+
+    def setUp(self):
+        """Setup text fixure
+
+        We need a tree adaptor, use CommonTreeAdaptor.
+        And a constant list of token names.
+
+        """
+
+        self.adaptor = CommonTreeAdaptor()
+        self.tokens = [
+            "", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR"
+            ]
+
+
+    def testInit(self):
+        """TreeWizard.__init__()"""
+
+        wiz = TreeWizard(
+            self.adaptor,
+            tokenNames=['a', 'b']
+            )
+
+        self.assertIs(wiz.adaptor, self.adaptor)
+        self.assertEqual(
+            wiz.tokenNameToTypeMap,
+            { 'a': 0, 'b': 1 }
+            )
+
+
+    def testGetTokenType(self):
+        """TreeWizard.getTokenType()"""
+
+        wiz = TreeWizard(
+            self.adaptor,
+            tokenNames=self.tokens
+            )
+
+        self.assertEqual(
+            wiz.getTokenType('A'),
+            5
+            )
+
+        self.assertEqual(
+            wiz.getTokenType('VAR'),
+            11
+            )
+
+        self.assertEqual(
+            wiz.getTokenType('invalid'),
+            INVALID_TOKEN_TYPE
+            )
+
+    def testSingleNode(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("ID")
+        found = t.toStringTree()
+        expecting = "ID"
+        self.assertEqual(expecting, found)
+
+
+    def testSingleNodeWithArg(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("ID[foo]")
+        found = t.toStringTree()
+        expecting = "foo"
+        self.assertEqual(expecting, found)
+
+
+    def testSingleNodeTree(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A)")
+        found = t.toStringTree()
+        expecting = "A"
+        self.assertEqual(expecting, found)
+
+
+    def testSingleLevelTree(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A B C D)")
+        found = t.toStringTree()
+        expecting = "(A B C D)"
+        self.assertEqual(expecting, found)
+
+
+    def testListTree(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(nil A B C)")
+        found = t.toStringTree()
+        expecting = "A B C"
+        self.assertEqual(expecting, found)
+
+
+    def testInvalidListTree(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("A B C")
+        self.assertIsNone(t)
+
+
+    def testDoubleLevelTree(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A (B C) (B D) E)")
+        found = t.toStringTree()
+        expecting = "(A (B C) (B D) E)"
+        self.assertEqual(expecting, found)
+
+
+    def __simplifyIndexMap(self, indexMap):
+        return dict( # stringify nodes for easy comparing
+            (ttype, [str(node) for node in nodes])
+            for ttype, nodes in indexMap.items()
+            )
+
+    def testSingleNodeIndex(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("ID")
+        indexMap = wiz.index(tree)
+        found = self.__simplifyIndexMap(indexMap)
+        expecting = { 10: ["ID"] }
+        self.assertEqual(expecting, found)
+
+
+    def testNoRepeatsIndex(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("(A B C D)")
+        indexMap = wiz.index(tree)
+        found = self.__simplifyIndexMap(indexMap)
+        expecting = { 8:['D'], 6:['B'], 7:['C'], 5:['A'] }
+        self.assertEqual(expecting, found)
+
+
+    def testRepeatsIndex(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("(A B (A C B) B D D)")
+        indexMap = wiz.index(tree)
+        found = self.__simplifyIndexMap(indexMap)
+        expecting = { 8: ['D', 'D'], 6: ['B', 'B', 'B'], 7: ['C'], 5: ['A', 'A'] }
+        self.assertEqual(expecting, found)
+
+
+    def testNoRepeatsVisit(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("(A B C D)")
+
+        elements = []
+        def visitor(node, parent, childIndex, labels):
+            elements.append(str(node))
+
+        wiz.visit(tree, wiz.getTokenType("B"), visitor)
+
+        expecting = ['B']
+        self.assertEqual(expecting, elements)
+
+
+    def testNoRepeatsVisit2(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("(A B (A C B) B D D)")
+
+        elements = []
+        def visitor(node, parent, childIndex, labels):
+            elements.append(str(node))
+
+        wiz.visit(tree, wiz.getTokenType("C"), visitor)
+
+        expecting = ['C']
+        self.assertEqual(expecting, elements)
+
+
+    def testRepeatsVisit(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("(A B (A C B) B D D)")
+
+        elements = []
+        def visitor(node, parent, childIndex, labels):
+            elements.append(str(node))
+
+        wiz.visit(tree, wiz.getTokenType("B"), visitor)
+
+        expecting = ['B', 'B', 'B']
+        self.assertEqual(expecting, elements)
+
+
+    def testRepeatsVisit2(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("(A B (A C B) B D D)")
+
+        elements = []
+        def visitor(node, parent, childIndex, labels):
+            elements.append(str(node))
+
+        wiz.visit(tree, wiz.getTokenType("A"), visitor)
+
+        expecting = ['A', 'A']
+        self.assertEqual(expecting, elements)
+
+
+    def testRepeatsVisitWithContext(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("(A B (A C B) B D D)")
+
+        elements = []
+        def visitor(node, parent, childIndex, labels):
+            elements.append('{}@{}[{}]'.format(node, parent, childIndex))
+
+        wiz.visit(tree, wiz.getTokenType("B"), visitor)
+
+        expecting = ['B@A[0]', 'B@A[1]', 'B@A[2]']
+        self.assertEqual(expecting, elements)
+
+
+    def testRepeatsVisitWithNullParentAndContext(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("(A B (A C B) B D D)")
+
+        elements = []
+        def visitor(node, parent, childIndex, labels):
+            elements.append(
+                '{}@{}[{}]'.format(
+                    node, parent or 'nil', childIndex)
+                )
+
+        wiz.visit(tree, wiz.getTokenType("A"), visitor)
+
+        expecting = ['A@nil[0]', 'A@A[1]']
+        self.assertEqual(expecting, elements)
+
+
+    def testVisitPattern(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("(A B C (A B) D)")
+
+        elements = []
+        def visitor(node, parent, childIndex, labels):
+            elements.append(
+                str(node)
+                )
+
+        wiz.visit(tree, '(A B)', visitor)
+
+        expecting = ['A'] # shouldn't match overall root, just (A B)
+        self.assertEqual(expecting, elements)
+
+
+    def testVisitPatternMultiple(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("(A B C (A B) (D (A B)))")
+
+        elements = []
+        def visitor(node, parent, childIndex, labels):
+            elements.append(
+                '{}@{}[{}]'.format(node, parent or 'nil', childIndex)
+                )
+
+        wiz.visit(tree, '(A B)', visitor)
+
+        expecting = ['A@A[2]', 'A@D[0]']
+        self.assertEqual(expecting, elements)
+
+
+    def testVisitPatternMultipleWithLabels(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        tree = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))")
+
+        elements = []
+        def visitor(node, parent, childIndex, labels):
+            elements.append(
+                '{}@{}[{}]{}&{}'.format(
+                    node,
+                    parent or 'nil',
+                    childIndex,
+                    labels['a'],
+                    labels['b'],
+                    )
+                )
+
+        wiz.visit(tree, '(%a:A %b:B)', visitor)
+
+        expecting = ['foo@A[2]foo&bar', 'big@D[0]big&dog']
+        self.assertEqual(expecting, elements)
+
+
+    def testParse(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A B C)")
+        valid = wiz.parse(t, "(A B C)")
+        self.assertTrue(valid)
+
+
+    def testParseSingleNode(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("A")
+        valid = wiz.parse(t, "A")
+        self.assertTrue(valid)
+
+
+    def testParseSingleNodeFails(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("A")
+        valid = wiz.parse(t, "B")
+        self.assertFalse(valid)
+
+
+    def testParseFlatTree(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(nil A B C)")
+        valid = wiz.parse(t, "(nil A B C)")
+        self.assertTrue(valid)
+
+
+    def testParseFlatTreeFails(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(nil A B C)")
+        valid = wiz.parse(t, "(nil A B)")
+        self.assertFalse(valid)
+
+
+    def testParseFlatTreeFails2(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(nil A B C)")
+        valid = wiz.parse(t, "(nil A B A)")
+        self.assertFalse(valid)
+
+
+    def testWildcard(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A B C)")
+        valid = wiz.parse(t, "(A . .)")
+        self.assertTrue(valid)
+
+
+    def testParseWithText(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A B[foo] C[bar])")
+        # C pattern has no text arg so despite [bar] in t, no need
+        # to match text--check structure only.
+        valid = wiz.parse(t, "(A B[foo] C)")
+        self.assertTrue(valid)
+
+
+    def testParseWithText2(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A B[T__32] (C (D E[a])))")
+        # C pattern has no text arg so despite [bar] in t, no need
+        # to match text--check structure only.
+        valid = wiz.parse(t, "(A B[foo] C)")
+        self.assertEqual("(A T__32 (C (D a)))", t.toStringTree())
+
+
+    def testParseWithTextFails(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A B C)")
+        valid = wiz.parse(t, "(A[foo] B C)")
+        self.assertFalse(valid) # fails
+
+
+    def testParseLabels(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A B C)")
+        labels = {}
+        valid = wiz.parse(t, "(%a:A %b:B %c:C)", labels)
+        self.assertTrue(valid)
+        self.assertEqual("A", str(labels["a"]))
+        self.assertEqual("B", str(labels["b"]))
+        self.assertEqual("C", str(labels["c"]))
+
+
+    def testParseWithWildcardLabels(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A B C)")
+        labels = {}
+        valid = wiz.parse(t, "(A %b:. %c:.)", labels)
+        self.assertTrue(valid)
+        self.assertEqual("B", str(labels["b"]))
+        self.assertEqual("C", str(labels["c"]))
+
+
+    def testParseLabelsAndTestText(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A B[foo] C)")
+        labels = {}
+        valid = wiz.parse(t, "(%a:A %b:B[foo] %c:C)", labels)
+        self.assertTrue(valid)
+        self.assertEqual("A", str(labels["a"]))
+        self.assertEqual("foo", str(labels["b"]))
+        self.assertEqual("C", str(labels["c"]))
+
+
+    def testParseLabelsInNestedTree(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A (B C) (D E))")
+        labels = {}
+        valid = wiz.parse(t, "(%a:A (%b:B %c:C) (%d:D %e:E) )", labels)
+        self.assertTrue(valid)
+        self.assertEqual("A", str(labels["a"]))
+        self.assertEqual("B", str(labels["b"]))
+        self.assertEqual("C", str(labels["c"]))
+        self.assertEqual("D", str(labels["d"]))
+        self.assertEqual("E", str(labels["e"]))
+
+
+    def testEquals(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t1 = wiz.create("(A B C)")
+        t2 = wiz.create("(A B C)")
+        same = wiz.equals(t1, t2)
+        self.assertTrue(same)
+
+
+    def testEqualsWithText(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t1 = wiz.create("(A B[foo] C)")
+        t2 = wiz.create("(A B[foo] C)")
+        same = wiz.equals(t1, t2)
+        self.assertTrue(same)
+
+
+    def testEqualsWithMismatchedText(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t1 = wiz.create("(A B[foo] C)")
+        t2 = wiz.create("(A B C)")
+        same = wiz.equals(t1, t2)
+        self.assertFalse(same)
+
+
+    def testEqualsWithMismatchedList(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t1 = wiz.create("(A B C)")
+        t2 = wiz.create("(A B A)")
+        same = wiz.equals(t1, t2)
+        self.assertFalse(same)
+
+
+    def testEqualsWithMismatchedListLength(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t1 = wiz.create("(A B C)")
+        t2 = wiz.create("(A B)")
+        same = wiz.equals(t1, t2)
+        self.assertFalse(same)
+
+
+    def testFindPattern(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))")
+        subtrees = wiz.find(t, "(A B)")
+        found = [str(node) for node in subtrees]
+        expecting = ['foo', 'big']
+        self.assertEqual(expecting, found)
+
+
+    def testFindTokenType(self):
+        wiz = TreeWizard(self.adaptor, self.tokens)
+        t = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))")
+        subtrees = wiz.find(t, wiz.getTokenType('A'))
+        found = [str(node) for node in subtrees]
+        expecting = ['A', 'foo', 'big']
+        self.assertEqual(expecting, found)
+
+
+
+if __name__ == "__main__":
+    unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
diff --git a/antlr-3.4/runtime/Ruby/ANTLR-LICENSE.txt b/runtime/Ruby/ANTLR-LICENSE.txt
similarity index 100%
rename from antlr-3.4/runtime/Ruby/ANTLR-LICENSE.txt
rename to runtime/Ruby/ANTLR-LICENSE.txt
diff --git a/antlr-3.4/runtime/Ruby/History.txt b/runtime/Ruby/History.txt
similarity index 100%
rename from antlr-3.4/runtime/Ruby/History.txt
rename to runtime/Ruby/History.txt
diff --git a/antlr-3.4/runtime/Ruby/README.txt b/runtime/Ruby/README.txt
similarity index 100%
rename from antlr-3.4/runtime/Ruby/README.txt
rename to runtime/Ruby/README.txt
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3.rb b/runtime/Ruby/lib/antlr3.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3.rb
rename to runtime/Ruby/lib/antlr3.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/constants.rb b/runtime/Ruby/lib/antlr3/constants.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/constants.rb
rename to runtime/Ruby/lib/antlr3/constants.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/debug.rb b/runtime/Ruby/lib/antlr3/debug.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/debug.rb
rename to runtime/Ruby/lib/antlr3/debug.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/debug/event-hub.rb b/runtime/Ruby/lib/antlr3/debug/event-hub.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/debug/event-hub.rb
rename to runtime/Ruby/lib/antlr3/debug/event-hub.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/debug/record-event-listener.rb b/runtime/Ruby/lib/antlr3/debug/record-event-listener.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/debug/record-event-listener.rb
rename to runtime/Ruby/lib/antlr3/debug/record-event-listener.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/debug/rule-tracer.rb b/runtime/Ruby/lib/antlr3/debug/rule-tracer.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/debug/rule-tracer.rb
rename to runtime/Ruby/lib/antlr3/debug/rule-tracer.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/debug/socket.rb b/runtime/Ruby/lib/antlr3/debug/socket.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/debug/socket.rb
rename to runtime/Ruby/lib/antlr3/debug/socket.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/debug/trace-event-listener.rb b/runtime/Ruby/lib/antlr3/debug/trace-event-listener.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/debug/trace-event-listener.rb
rename to runtime/Ruby/lib/antlr3/debug/trace-event-listener.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/dfa.rb b/runtime/Ruby/lib/antlr3/dfa.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/dfa.rb
rename to runtime/Ruby/lib/antlr3/dfa.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/dot.rb b/runtime/Ruby/lib/antlr3/dot.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/dot.rb
rename to runtime/Ruby/lib/antlr3/dot.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/error.rb b/runtime/Ruby/lib/antlr3/error.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/error.rb
rename to runtime/Ruby/lib/antlr3/error.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/main.rb b/runtime/Ruby/lib/antlr3/main.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/main.rb
rename to runtime/Ruby/lib/antlr3/main.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/modes/ast-builder.rb b/runtime/Ruby/lib/antlr3/modes/ast-builder.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/modes/ast-builder.rb
rename to runtime/Ruby/lib/antlr3/modes/ast-builder.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/modes/filter.rb b/runtime/Ruby/lib/antlr3/modes/filter.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/modes/filter.rb
rename to runtime/Ruby/lib/antlr3/modes/filter.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/profile.rb b/runtime/Ruby/lib/antlr3/profile.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/profile.rb
rename to runtime/Ruby/lib/antlr3/profile.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/recognizers.rb b/runtime/Ruby/lib/antlr3/recognizers.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/recognizers.rb
rename to runtime/Ruby/lib/antlr3/recognizers.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/streams.rb b/runtime/Ruby/lib/antlr3/streams.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/streams.rb
rename to runtime/Ruby/lib/antlr3/streams.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/streams/interactive.rb b/runtime/Ruby/lib/antlr3/streams/interactive.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/streams/interactive.rb
rename to runtime/Ruby/lib/antlr3/streams/interactive.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/streams/rewrite.rb b/runtime/Ruby/lib/antlr3/streams/rewrite.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/streams/rewrite.rb
rename to runtime/Ruby/lib/antlr3/streams/rewrite.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/task.rb b/runtime/Ruby/lib/antlr3/task.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/task.rb
rename to runtime/Ruby/lib/antlr3/task.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/template.rb b/runtime/Ruby/lib/antlr3/template.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/template.rb
rename to runtime/Ruby/lib/antlr3/template.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/template/group-file-lexer.rb b/runtime/Ruby/lib/antlr3/template/group-file-lexer.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/template/group-file-lexer.rb
rename to runtime/Ruby/lib/antlr3/template/group-file-lexer.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/template/group-file-parser.rb b/runtime/Ruby/lib/antlr3/template/group-file-parser.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/template/group-file-parser.rb
rename to runtime/Ruby/lib/antlr3/template/group-file-parser.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/template/group-file.rb b/runtime/Ruby/lib/antlr3/template/group-file.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/template/group-file.rb
rename to runtime/Ruby/lib/antlr3/template/group-file.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/template/parameter.rb b/runtime/Ruby/lib/antlr3/template/parameter.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/template/parameter.rb
rename to runtime/Ruby/lib/antlr3/template/parameter.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/test/call-stack.rb b/runtime/Ruby/lib/antlr3/test/call-stack.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/test/call-stack.rb
rename to runtime/Ruby/lib/antlr3/test/call-stack.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/test/core-extensions.rb b/runtime/Ruby/lib/antlr3/test/core-extensions.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/test/core-extensions.rb
rename to runtime/Ruby/lib/antlr3/test/core-extensions.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/test/functional.rb b/runtime/Ruby/lib/antlr3/test/functional.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/test/functional.rb
rename to runtime/Ruby/lib/antlr3/test/functional.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/test/grammar.rb b/runtime/Ruby/lib/antlr3/test/grammar.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/test/grammar.rb
rename to runtime/Ruby/lib/antlr3/test/grammar.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/token.rb b/runtime/Ruby/lib/antlr3/token.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/token.rb
rename to runtime/Ruby/lib/antlr3/token.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/tree.rb b/runtime/Ruby/lib/antlr3/tree.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/tree.rb
rename to runtime/Ruby/lib/antlr3/tree.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/tree/debug.rb b/runtime/Ruby/lib/antlr3/tree/debug.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/tree/debug.rb
rename to runtime/Ruby/lib/antlr3/tree/debug.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/tree/visitor.rb b/runtime/Ruby/lib/antlr3/tree/visitor.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/tree/visitor.rb
rename to runtime/Ruby/lib/antlr3/tree/visitor.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/tree/wizard.rb b/runtime/Ruby/lib/antlr3/tree/wizard.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/tree/wizard.rb
rename to runtime/Ruby/lib/antlr3/tree/wizard.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/util.rb b/runtime/Ruby/lib/antlr3/util.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/util.rb
rename to runtime/Ruby/lib/antlr3/util.rb
diff --git a/antlr-3.4/runtime/Ruby/lib/antlr3/version.rb b/runtime/Ruby/lib/antlr3/version.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/lib/antlr3/version.rb
rename to runtime/Ruby/lib/antlr3/version.rb
diff --git a/antlr-3.4/runtime/Ruby/rakefile b/runtime/Ruby/rakefile
similarity index 100%
rename from antlr-3.4/runtime/Ruby/rakefile
rename to runtime/Ruby/rakefile
diff --git a/antlr-3.4/runtime/Ruby/test/functional/ast-output/auto-ast.rb b/runtime/Ruby/test/functional/ast-output/auto-ast.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/ast-output/auto-ast.rb
rename to runtime/Ruby/test/functional/ast-output/auto-ast.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/ast-output/construction.rb b/runtime/Ruby/test/functional/ast-output/construction.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/ast-output/construction.rb
rename to runtime/Ruby/test/functional/ast-output/construction.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/ast-output/hetero-nodes.rb b/runtime/Ruby/test/functional/ast-output/hetero-nodes.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/ast-output/hetero-nodes.rb
rename to runtime/Ruby/test/functional/ast-output/hetero-nodes.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/ast-output/rewrites.rb b/runtime/Ruby/test/functional/ast-output/rewrites.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/ast-output/rewrites.rb
rename to runtime/Ruby/test/functional/ast-output/rewrites.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/ast-output/tree-rewrite.rb b/runtime/Ruby/test/functional/ast-output/tree-rewrite.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/ast-output/tree-rewrite.rb
rename to runtime/Ruby/test/functional/ast-output/tree-rewrite.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/debugging/debug-mode.rb b/runtime/Ruby/test/functional/debugging/debug-mode.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/debugging/debug-mode.rb
rename to runtime/Ruby/test/functional/debugging/debug-mode.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/debugging/profile-mode.rb b/runtime/Ruby/test/functional/debugging/profile-mode.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/debugging/profile-mode.rb
rename to runtime/Ruby/test/functional/debugging/profile-mode.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/debugging/rule-tracing.rb b/runtime/Ruby/test/functional/debugging/rule-tracing.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/debugging/rule-tracing.rb
rename to runtime/Ruby/test/functional/debugging/rule-tracing.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/delegation/import.rb b/runtime/Ruby/test/functional/delegation/import.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/delegation/import.rb
rename to runtime/Ruby/test/functional/delegation/import.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/lexer/basic.rb b/runtime/Ruby/test/functional/lexer/basic.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/lexer/basic.rb
rename to runtime/Ruby/test/functional/lexer/basic.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/lexer/filter-mode.rb b/runtime/Ruby/test/functional/lexer/filter-mode.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/lexer/filter-mode.rb
rename to runtime/Ruby/test/functional/lexer/filter-mode.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/lexer/nuances.rb b/runtime/Ruby/test/functional/lexer/nuances.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/lexer/nuances.rb
rename to runtime/Ruby/test/functional/lexer/nuances.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/lexer/properties.rb b/runtime/Ruby/test/functional/lexer/properties.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/lexer/properties.rb
rename to runtime/Ruby/test/functional/lexer/properties.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/lexer/syn-pred.rb b/runtime/Ruby/test/functional/lexer/syn-pred.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/lexer/syn-pred.rb
rename to runtime/Ruby/test/functional/lexer/syn-pred.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/lexer/xml.rb b/runtime/Ruby/test/functional/lexer/xml.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/lexer/xml.rb
rename to runtime/Ruby/test/functional/lexer/xml.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/main/main-scripts.rb b/runtime/Ruby/test/functional/main/main-scripts.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/main/main-scripts.rb
rename to runtime/Ruby/test/functional/main/main-scripts.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/parser/actions.rb b/runtime/Ruby/test/functional/parser/actions.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/parser/actions.rb
rename to runtime/Ruby/test/functional/parser/actions.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/parser/backtracking.rb b/runtime/Ruby/test/functional/parser/backtracking.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/parser/backtracking.rb
rename to runtime/Ruby/test/functional/parser/backtracking.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/parser/basic.rb b/runtime/Ruby/test/functional/parser/basic.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/parser/basic.rb
rename to runtime/Ruby/test/functional/parser/basic.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/parser/calc.rb b/runtime/Ruby/test/functional/parser/calc.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/parser/calc.rb
rename to runtime/Ruby/test/functional/parser/calc.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/parser/ll-star.rb b/runtime/Ruby/test/functional/parser/ll-star.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/parser/ll-star.rb
rename to runtime/Ruby/test/functional/parser/ll-star.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/parser/nuances.rb b/runtime/Ruby/test/functional/parser/nuances.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/parser/nuances.rb
rename to runtime/Ruby/test/functional/parser/nuances.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/parser/predicates.rb b/runtime/Ruby/test/functional/parser/predicates.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/parser/predicates.rb
rename to runtime/Ruby/test/functional/parser/predicates.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/parser/properties.rb b/runtime/Ruby/test/functional/parser/properties.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/parser/properties.rb
rename to runtime/Ruby/test/functional/parser/properties.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/parser/rule-methods.rb b/runtime/Ruby/test/functional/parser/rule-methods.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/parser/rule-methods.rb
rename to runtime/Ruby/test/functional/parser/rule-methods.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/parser/scopes.rb b/runtime/Ruby/test/functional/parser/scopes.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/parser/scopes.rb
rename to runtime/Ruby/test/functional/parser/scopes.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/template-output/template-output.rb b/runtime/Ruby/test/functional/template-output/template-output.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/template-output/template-output.rb
rename to runtime/Ruby/test/functional/template-output/template-output.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/token-rewrite/basic.rb b/runtime/Ruby/test/functional/token-rewrite/basic.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/token-rewrite/basic.rb
rename to runtime/Ruby/test/functional/token-rewrite/basic.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/token-rewrite/via-parser.rb b/runtime/Ruby/test/functional/token-rewrite/via-parser.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/token-rewrite/via-parser.rb
rename to runtime/Ruby/test/functional/token-rewrite/via-parser.rb
diff --git a/antlr-3.4/runtime/Ruby/test/functional/tree-parser/basic.rb b/runtime/Ruby/test/functional/tree-parser/basic.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/functional/tree-parser/basic.rb
rename to runtime/Ruby/test/functional/tree-parser/basic.rb
diff --git a/antlr-3.4/runtime/Ruby/test/unit/sample-input/file-stream-1 b/runtime/Ruby/test/unit/sample-input/file-stream-1
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/sample-input/file-stream-1
rename to runtime/Ruby/test/unit/sample-input/file-stream-1
diff --git a/antlr-3.4/runtime/Ruby/test/unit/sample-input/template-group b/runtime/Ruby/test/unit/sample-input/template-group
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/sample-input/template-group
rename to runtime/Ruby/test/unit/sample-input/template-group
diff --git a/antlr-3.4/runtime/Ruby/test/unit/sample-input/teststreams.input2 b/runtime/Ruby/test/unit/sample-input/teststreams.input2
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/sample-input/teststreams.input2
rename to runtime/Ruby/test/unit/sample-input/teststreams.input2
diff --git a/antlr-3.4/runtime/Ruby/test/unit/test-dfa.rb b/runtime/Ruby/test/unit/test-dfa.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/test-dfa.rb
rename to runtime/Ruby/test/unit/test-dfa.rb
diff --git a/antlr-3.4/runtime/Ruby/test/unit/test-exceptions.rb b/runtime/Ruby/test/unit/test-exceptions.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/test-exceptions.rb
rename to runtime/Ruby/test/unit/test-exceptions.rb
diff --git a/antlr-3.4/runtime/Ruby/test/unit/test-recognizers.rb b/runtime/Ruby/test/unit/test-recognizers.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/test-recognizers.rb
rename to runtime/Ruby/test/unit/test-recognizers.rb
diff --git a/antlr-3.4/runtime/Ruby/test/unit/test-scheme.rb b/runtime/Ruby/test/unit/test-scheme.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/test-scheme.rb
rename to runtime/Ruby/test/unit/test-scheme.rb
diff --git a/antlr-3.4/runtime/Ruby/test/unit/test-scope.rb b/runtime/Ruby/test/unit/test-scope.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/test-scope.rb
rename to runtime/Ruby/test/unit/test-scope.rb
diff --git a/antlr-3.4/runtime/Ruby/test/unit/test-streams.rb b/runtime/Ruby/test/unit/test-streams.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/test-streams.rb
rename to runtime/Ruby/test/unit/test-streams.rb
diff --git a/antlr-3.4/runtime/Ruby/test/unit/test-template.rb b/runtime/Ruby/test/unit/test-template.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/test-template.rb
rename to runtime/Ruby/test/unit/test-template.rb
diff --git a/antlr-3.4/runtime/Ruby/test/unit/test-tree-wizard.rb b/runtime/Ruby/test/unit/test-tree-wizard.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/test-tree-wizard.rb
rename to runtime/Ruby/test/unit/test-tree-wizard.rb
diff --git a/antlr-3.4/runtime/Ruby/test/unit/test-trees.rb b/runtime/Ruby/test/unit/test-trees.rb
similarity index 100%
rename from antlr-3.4/runtime/Ruby/test/unit/test-trees.rb
rename to runtime/Ruby/test/unit/test-trees.rb
diff --git a/tool/CHANGES.txt b/tool/CHANGES.txt
new file mode 100644
index 0000000..0770dcb
--- /dev/null
+++ b/tool/CHANGES.txt
@@ -0,0 +1,3564 @@
+ANTLR 3.5 Release
+January 4, 2012
+
+Terence Parr, parrt at cs usfca edu
+ANTLR project lead and supreme dictator for life
+University of San Francisco
+
+CHANGES
+
+January 4 2012 -- release 3.5
+
+January 3, 2012
+
+* Improve error reporting and recovery for STRAY_BRACKET, fixes antlr/antlr3#42
+* Do not write output files if errors were reported, fixes antlr/antlr3#61
+* Fix AST operator on labeled set of terminals e.g. x=(A|B)^
+* Fix labeled set of terminals with rewrite e.g. x=(A|B) -> $x
+
+December 1, 2012
+
+* Added error msg for .. in parser
+
+September 17, 2012
+
+* Add Gokulakannan Somasundaram's C++ target based upon C target.
+  Use language=Cpp in options. It's a header-only library, runtime/Cpp/include,
+  so installation is not required.
+
+September 16, 2012
+
+* Python 3.3 target added by Benjamin Wolf based upon Python 2 target
+  https://github.com/antlr/antlr3/pull/23
+
+September 15, 2012
+
+* LookaheadStream bug fixes;
+  https://github.com/antlr/antlr3/pull/21
+
+* Pulled "Fix Python handling of syntactic predicates"
+  https://github.com/antlr/antlr3/pull/33
+
+July 15, 2012
+
+* GUnit improvements
+  https://github.com/antlr/antlr3/pull/27
+
+May 2012:
+
+* ANTLR3 update of ObjC runtime to go with latest ST4-ObjC
+  https://github.com/antlr/antlr3/pull/17
+
+August 9, 2012
+
+* Provide Lexer get end of file method so people can override it.
+
+November 25, 2011
+
+* stderr not test correctly in gunit examineExecResult
+
+October 27, 2011
+
+* Dieter Habelitz reported bug in java code gen with synpreds. labels were
+  being carried from grammar into synpreds but weren't typed properly (they
+  were "void x=null;" for x=ruleref labels)
+
+October 25, 2011
+
+* (Sam) Rule.setOption didn't do memoize option right.
+* (Sam) Fix labels in synpreds
+* (Sam) Fix input index for NoViableAltException during inline prediction
+* (Sam) Fix label aliasing errors in cases like (x=y|x=z)
+
+August 10, 2011
+
+* (Sam) fix stack overflow in semantic context analysis
+
+July 30, 2011
+
+* added range check to BaseTree.insertChild()
+
+July 18, 2011 -- release 3.4
+
+* Added tree method insertChild(int i, Object t).
+
+July 14, 2011
+
+* Added BaesTree.freshenParentAndChildIndexesDeeply() to recursively
+  walk tree and set ptrs.
+
+July 6, 2011
+
+* reset() for token stream didn't skip initial off-channel tokens.
+
+July 5, 2011
+
+* Sam fixes rare infinite loop upon error in nextToken().
+* rewrites weren't pulled from syntactic predicates.
+
+June 29, 2011
+
+* Sam noticed CommonToken.getText() cached substring pulled from input, which
+  defeated purpose of pointing into input char array.  Altered to not cache.
+  Should reduce memory footprint.
+
+June 24, 2011
+
+* empty alts with actions didn't have EPSILON dummy alt node.
+
+June 19, 2011
+
+* Udo noticed that Parser.getMissingSymbol() didn't set invented token's input
+
+June 8, 2011
+
+* Added inContext(String context) back into TreeParser.
+
+April 21, 2011
+
+* Updated for ST v4.0.2 (setting iterateAcrossValues = true as instance var)
+* Needed throws condition for delegatedRules.
+
+April 20, 2011 (Sam Harwell)
+
+* Implement the 'throwsSpec' feature of parser rules for the Java target
+* Improve algorithm for SemanticContext Boolean predicate reduction
+
+April 13, 2011
+
+* Unmangled region names in STViz hiearchy tree display.
+* Removed conversion timeout thing again
+
+April 11, 2011
+
+* Added option -Xconversiontimeout back in.  Turns out we hit NFA conversion
+  time landmine occasionally with huge grammars; fails over to backtracking
+  (if turned on) if it can't make DFA.
+
+March 29 - April 10, 2011
+
+* Uses ST v4 now!!!  Massive change.  Only updated Java target so far.
+  Ripped out ST v3 usage to remove indirect dependency on ANTLR v2.
+
+March 28, 2011
+
+* Sam Harwell ported all v2 grammars to v3!
+
+March 3, 2011
+
+* left-recursion pattern off until all targets catch up
+
+* ANTLRCore.sti no longer used; removed from all targets.
+
+* Adding type, text terminal options
+
+* Replaced hetero arg with terminalOptions arg in all templates that reference hetero
+  (this is the class name / node type used for TOKEN<NODETYPE> references
+  in grammar).  Widespread but trivial changes to all targets.  hetero is
+  now terminalOptions.node.  Can also get terminalOptions.type and
+  terminalOptions.text from dictionary of options.
+
+* Fixed mispelling of license in BSD license headers
+
+March 3, 2011
+
+* Add tree, getTree() to ParserRuleReturnScope to do away with specific ones like:
+    public static class rewrite_template_args_return extends ParserRuleReturnScope {
+        CommonTree tree;
+        public Object getTree() { return tree; }
+    };
+  Removed these special classes if it's just AST; keep if they have defined "returns"
+  values
+
+February 26, 2011
+
+* All finally {} have comment now to suppress warning.
+
+* removed ; from {;} blank method
+
+* Added @SuppressWarnings({"all"}) to front of each
+  generated class.
+
+* -print wasn't always showing ^ and ! in grammar
+
+* Added java-left-recur/Java.g example.
+
+* left-recursion pattern detection handles backtracking mode now
+
+February 25, 2011
+
+* -Xmaxinlinedfastates went to 60 from 10 for forcing prediction in left-
+  recursive expression rules to stay in rule; preds use a parameter.
+
+* trees know where they came from now start..stop tokens; todo: use for better err handling.
+
+* Got immediate left-recursion working for rules. Added TestLeftRecursion.java
+
+February 21, 2011
+
+* Fixed http://www.antlr.org/jira/browse/ANTLR-437 thanks to Vladislav Kuzkokov.
+  added unit test.
+
+February 17, 2011
+
+* Add -language L option to override language=L option in grammar.  Same
+  grammar can yield multiple parsers in different languages now.
+
+February 10, 2011
+
+* Added method to generated Java code to return the array of delegates; e.g.,
+    import Sub1, Sub2;
+  yields:
+
+    public Parser[] getDelegates() {
+        return new Parser[] {gSub1, gSub2};
+    }
+
+January 25, 2011
+
+* Improve error messages for no viable alt parse exceptions
+
+January 20, 2011
+
+* TokenRewriteStream had dead code; also updated insertBefore followed by
+  replace or delete.  If input is abc and I did insertBefore(2,"y"), where
+  'c' is index 2, then did delete of 2 previously defined functionality
+  was to ignore the insert. that's weird; fixed to keep insert.  Also
+  Delete special case of replace (text==null):
+  	  D.i-j.u D.x-y.v	| boundaries overlap => combine to max(min)..max(right)
+
+December 12, 2010
+
+* Send couldBacktrack now to enterDecision in debug protocol
+
+December 4, 2010
+
+* TreeWizard ctor needed a default tree adapator.
+
+November 29, 2010 -- ANTLR v3.3
+
+November 23, 2010
+
+* CodeGenerator.loadLanguageTarget is now static and available to load
+  targets so we can ask them questions during analysis.
+
+* Fixed and added unit test
+    http://www.antlr.org/jira/browse/ANTLR-370
+    http://www.antlr.org/jira/browse/ANTLR-375
+
+November 23, 2010
+
+* Added source name to syntax error msgs
+
+October 20, 2010
+
+Added boolean couldBacktrack to enterDecision in dbg interface. Breaks AW
+interface and other tools! [BREAKS BACKWARD COMPATIBILITY]
+
+October 17, 2010
+
+* Missing -trace in help msg
+
+November 22, 2010
+
+* Added GrammarAST: public int getCharPositionInLine() { return getColumn()-1; }
+  and Grammar.getHasDelegates() for C# guys
+
+October 16, 2010
+
+* Doesn't write profile data to file anymore; emits decision data to stderr
+
+October 14, 2010
+
+* Make OrderedHashSet have deterministic iteration
+
+July 20, 2010
+
+* greedy=true option shuts off nondeterminism warning.
+
+* code gen for AST and -profile didn't compile. had useless line:
+
+             proxy.setTreeAdaptor(adap);
+
+
+July 17, 2010
+
+* Removed conversion timeout failsafe; no longer needed.
+
+* Stats updated to be correct for -report.
+
+June 10, 2010
+
+* added toArray in OrderedHashSet to make addAll calls get same order for DFA edges and possibly code gen in some areas.
+
+June 5, 2010
+
+* Added -Xsavelexer
+
+May 24, 2010
+
+* lexerStringRef was missing elementIndex attribute. i='import' didn't work
+  in lexer.  Altered all target stg files.  Set in codegen.g
+
+* output=AST, rewrite=true for tree rewriters broken. nextNode for subtree
+  streams didn't dup node, it gave whole tree back.
+
+March 17, 2010
+
+* Added MachineProbe class to make it easier to highlight ambig paths in
+  grammar.  More accurate than DecisionProbe; retrofitted from v4.
+
+February 20, 2010
+
+* added range to TokenStream and implementors:
+    /** How far ahead has the stream been asked to look?  The return
+     *  value is a valid index from 0..n-1.
+     */
+    int range();
+
+* added new method to get subset of tokens to buffered token streams:
+	public List get(int start, int stop);
+
+February 15, 2010
+
+* Refs to other tokens in a lexer rule didn't get its line/charpos right.
+  altered Java.stg.
+
+January 31, 2010
+
+* Creating token from another token didn't copy input stream in CommonToken.
+  makes sense to copy too; i don't think anybody relies on it being null after
+  a copy. We might want to know where token came from.
+
+January 26, 2009
+
+* TreeParser.getMissingSymbol() use CommonTree instead of using
+  adaptor.create()
+
+December 8, 2009
+
+* Instead of sharing Token.EOF_TOKEN, I'm now creating EOF tokens so I can set the char position for better error messages.
+
+December 5, 2009
+
+* Fixed bug in TreeVisitor when rewrites altered number of children. Thanks to Chris DiGiano.
+
+* added new buffered on-demand streams: BufferedTokenStream. Renamed CommonTokenStream to LegacyCommonTokenStream and made new one as subclass of BufferedTokenStream.
+
+November 3, 2009
+
+* Added org.antlr.runtime.UnbufferedTokenStream. Was trivial and works!
+
+November 1, 2009
+
+* Couldn't properly reuse parser state; ctor reset the state; fixed.
+	Parser(TokenStream input, RecognizerSharedState state)
+
+* LookaheadStream<T> used some hardcoded Object return types for LT, etc...
+  uses T now.
+
+September 23, 2009 -- ANTLR v3.2
+
+September 21, 2009 [Jim Idle]
+
+* Added new options for tool invocation to control the points at which the code
+  generator tells the target code to use its equivalent of switch() instead of
+  inline ifs.
+      -Xmaxswitchcaselabels m don't generate switch() statements for dfas
+                              bigger  than m [300]
+      -Xminswitchalts m       don't generate switch() statements for dfas smaller
+                              than m [3]
+* Upgraded -X help output to include new optins and provide the default
+  settings, as well as provide units for those settings that need them.
+
+* Change the C Target to overide the deafults for the new settings to
+  generate the most optimizable C code from the modern C compiler point of
+  view. This is essentially to always use swtich statements unless there
+  is absolutely no other option. C defaults are to use 1 for minimum and
+  3000 for maximum number of alts that trigger switch(). This results in
+  object code that is 30% smaller and up to 20% faster.
+
+April 23, 2009
+
+* Added reset to TreeNodeStream interface.
+
+April 22, 2009
+
+* Fixed ANTLR-374.  Was caused by moved of grammars. %foo() stuff didn't work
+
+April 9, 2009
+
+* .g and .g3 file extensions work again.
+* introduced bug in 3.1.3: gives exception not error msg upon
+  missing .g file
+
+March 26, 2009
+
+* Made ctor in TreeRewriter and TreeFilter call this not super.
+
+March 21, 2009
+
+* Added ctor to RecognizerSharedState to allow cloning it.
+
+March 17, 2009 -- ANTLR v3.1.3
+
+* improved ANTLRv3.g to handle <...> element options
+
+March 15, 2009
+
+* Fixed ANTLR-389. Strip didn't ignore options in subrules; also seemed
+  to demand stdin.
+
+March 15, 2009
+
+* ANTLR always sorts incoming grammar list by dependency.  For example,
+  If W.g depends on tokens from P.g then P.g is done first even if
+  W.g mentioned first on command line.  It does not ignore any files you
+  specify the commandline.  If you do *.g and that includes some
+  imported grammars, it will run antlr on them.
+
+* -make option prevents ANTLR from running on P.g if P older than
+  generated files.
+
+* Added org.antlr.tool.GrammarSpelunker to build a faster dependency
+  checker (what grammars depend on etc...).  Totally independent of any
+  ANTLR code; easy to pull into other tools.
+
+* Added org.antlr.misc.Graph, a general graph with nodes
+  containing an Object payload. It knows how to do a topological sort
+  on the nodes.
+
+March 10, 2009
+
+* Added associativity token option to support upcoming special expression
+  parsing. Added rule option strategy=precedence also
+
+March 1, 2009
+
+* Changed ANTLRWorks debug port from 49153 to 49100.  Apparently we change the port in
+  ANTLRWorks to 49100 in 1.2 but forgot to do so in the ANTLR targets.
+
+START CHANGES FOR TREE FILTER MODE (pulled from dev branch)
+
+This feature will be announced in 3.2, but I am integrating from my development branch now into the mainline so target developers have a chance to implement. We might release 3.1.3 bug fix release before 3.2.
+
+* CommonTreeNodeStream -> BufferedTreeNodeStream.  Now,
+  CommonTreeNodeStream is completely unbuffered unless you are
+  backtracking.  No longer making a list of all nodes before tree parsing.
+
+* Added tree grammar filter=true mode.
+
+  Altered templates:
+	Java.stg: added filterMode to genericParser and treeParser.
+	This required a change to ANTLRCore.sti
+	Defined a default parameter in treeParser to set the superclass
+	to TreeFilter for tree grammar with filter=true. It sets
+	superclass to TreeRewriter if filter=true and output=AST.
+  Other them that, I only had to change ANTLR itself a little bit.
+  Made filter mode valid for tree grammars and have it automatically set
+  the necessary elements: @synpredgate, backtrack=true, rewrite=true
+  (if output=AST).  Added error message for detecting conflicting
+  options.
+
+* Added misc.FastQueue and TestFastQueue:
+  A queue that can dequeue and get(i) in O(1) and grow arbitrarily large.
+  A linked list is fast at dequeue but slow at get(i).  An array is
+  the reverse.  This is O(1) for both operations.
+
+* Added tree.TreeIterator, a generator that walks a doubly linked tree.
+  The nodes must know what index they are. It's an Iterator but
+  remove() is not supported. Returns navigation nodes always:
+  UP, DOWN, EOF.
+
+* Added misc.LookaheadStream: A lookahead queue that knows how
+  to mark/release locations in the buffer for backtracking purposes.
+  I hope to use for both tree nodes and tokens.  Just implement
+  nextElement() to say how to get next node or token.
+
+END CHANGES FOR TREE FILTER MODE
+
+February 23, 2009 -- ANTLR v3.1.2
+
+February 18, 2009
+
+* Added org.antlr.tool.Strip (reads from file arg or stdin, emits to stdout)
+  to strip actions from a grammar.
+
+February 4, 2009
+
+* Added CommonTree.setUnknownTokenBoundaries().  Sometimes we build trees
+  in a grammar and some of the token boundaries are not set properly.
+  This only matters if you want to print out the original text associated
+  with a subtree.  Check this out rule:
+
+	postfixExpression
+	    :   primary ('.'^ ID)*
+	    ;
+
+  For a.b.c, we get a '.' that does not have the token boundaries set.
+  ANTLR only sets token boundaries for subtrees returned from a rule.
+  SO, the overall '.' operator has the token boundaries set from 'a'
+  to 'c' tokens, but the lower '.' subtree does not get the boundaries
+  set (they are -1,-1).  Calling setUnknownTokenBoundaries() on
+  the returned tree sets the boundaries appropriately according to the
+  children's token boundaries.
+
+January 22, 2009
+
+* fixed to be listeners.add(listener); in addListener() of DebugEventHub.java
+
+January 20, 2009
+
+* Removed runtime method: mismatch in BaseRecognizer and TreeParser.  Seems
+  to be unused.  Had to override method recoverFromMismatchedToken() in
+  TreeParser to get rid of single token insertion and deletion for
+  tree parsing because it makes no sense with all of the up-and-down nodes.
+
+* Changed JIRA port number from 8888 to no port spec (aka port 80) and all
+  refs to it in this file.
+
+* Changed BaseTree to Tree typecase in getChild and toStringTree() and
+  deleteChild() to make more generic.
+
+December 16, 2008
+
+* Added -verbose cmd-line option and turned off standard header
+  and list of read files.  Silent now without -verbose.
+
+November 24, 2008
+
+* null-ptr protected getParent and a few others.
+
+* Added new ctor to CommonTreeNodeStream for walking subtrees.  Avoids
+  having to make new serialized stream as it can reuse overall node stream
+  buffer.
+
+November 20, 2008
+
+* Updated BaseTest to isolate tests better.
+
+November 17, 2008
+
+* BaseTreeAdaptor.getType() was hosed; always gave 0.  Thanks to Sam Harwell.
+
+November 8, 2008
+
+* Added methods to BaseRecognizer:
+  public void setBacktrackingLevel(int n) { state.backtracking = n; }
+  /** Return whether or not a backtracking attempt failed. */
+  public boolean failed() { return state.failed; }
+
+November 5, 2008
+
+* Tweaked traceIn/Out to say "fail/succeeded"
+
+* Bug in code gen for tree grammar wildcard list label x+=.
+
+* Use of backtrack=true anywhere in grammar causes backtracking sensitive
+  code to be generated.  Actions are gated etc...  Previously, that only
+  happened when a syntactic predicate appeared in a DFA.  But, we need
+  to gate actions when backtracking option is set even if no decision
+  is generated to support filtering of trees.
+
+October 25, 2008
+
+* Fixed debug event socket protocol to allow spaces in filenames.
+
+* Added TreeVisitor and TreeVisitorAction to org.antlr.runtime.tree.
+
+October 22, 2008
+
+* Added inContext() to TreeParser.  Very useful for predicating
+  tree grammar productions according to context (their parent list).
+  Added new TestTreeContext unit tests (15).
+
+    /** Check if current node in input has a context.  Context means sequence
+     *  of nodes towards root of tree.  For example, you might say context
+     *  is "MULT" which means my parent must be MULT.  "CLASS VARDEF" says
+     *  current node must be child of a VARDEF and whose parent is a CLASS node.
+     *  You can use "..." to mean zero-or-more nodes.  "METHOD ... VARDEF"
+     *  means my parent is VARDEF and somewhere above that is a METHOD node.
+     *  The first node in the context is not necessarily the root.  The context
+     *  matcher stops matching and returns true when it runs out of context.
+     *  There is no way to force the first node to be the root.
+     */
+    public boolean inContext(String context) {...}
+
+* Added 3 methods to Tree interface [BREAKS BACKWARD COMPATIBILITY]
+
+    /** Is there is a node above with token type ttype? */
+    public boolean hasAncestor(int ttype);
+
+    /** Walk upwards and get first ancestor with this token type. */
+    public Tree getAncestor(int ttype);
+
+    /** Return a list of all ancestors of this node.  The first node of
+     *  list is the root and the last is the parent of this node.
+     */
+    public List getAncestors();
+
+October 21, 2008
+
+* Updated unit tests to be correct for \uFFFE->\uFFFF change
+
+* Made . in tree grammar look like ^(. .*) to analysis, though ^(. foo)
+  is illegal (can't have . at root). Wildcard is subtree or node.
+  Fixed bugs:
+    http://www.antlr.org/browse/ANTLR-248
+    http://www.antlr.org/browse/ANTLR-344
+
+October 1, 2008 -- ANTLR v3.1.1
+
+September 8, 2008
+
+* Labels on tokens, rules carry into synpreds now so semantic predicates work.
+  This didn't work since labels were stripped in the synpred and they weren't
+  defined in the generated method.
+
+  a : x=A z=a {$x.text.equals($z.text)}? A
+    | y=A a A A
+    ;
+
+September 3, 2008
+
+* Made a REV static variable in Tool so that we can change the rev for
+  daily builds.
+
+* Made \uFFFF a valid character. Token types are 32-bit clean using -1
+  not 0x0000FFFF as -1 so it should be okay.  Label.java:
+    public static final int MIN_CHAR_VALUE = '\u0000';
+    public static final int MAX_CHAR_VALUE = '\uFFFF';
+
+August 30, 2008
+
+* Changed messages in en.stg so that TOKEN_NONDETERMINISM correctly
+  indicates when actions hid semantic predicates.
+
+August 15, 2008
+
+* Tweaked build properties and build.xml
+
+August 13, 2008
+
+* Fixed ANTLR-314; 3.1 introduced a problem with list labels +=
+
+August 12, 2008 -- ANTLR v3.1
+
+* Added JavaScript target
+
+August 7, 2008
+
+* an NFA target of EOF predicate transition in DFA cause an exception in
+  getPredicatesPerNonDeterministicAlt().
+
+* Kay Roepke found a nasty bug when debugging AST-constructing
+  composite recognizers.  If the input state was null to the constructor,
+  super class constructor created a new parser state object.
+  Later, though we passed the argument state not this.state
+  to the delegate constructors, forcing them to share a different
+  state objects!  Changed state to this.state in Dbg.stg constructors.
+
+* Ack. messed up debug/AST.  Have to set proxy's tree adaptor; it's
+  a circular ref.  Just an ASTDbg.stg change.
+
+August 4, 2008
+
+* superClass works now for lexers
+
+* Made Grammar.defineNamedAction propogate header actions down to all
+  delegates if root grammar; regardless of lexer/parser scope.
+
+* Rejiggered AST templates to propogate changes to tree adaptor
+  for delegate grammars. Fixes ANTLR-302
+
+August 4, 2008
+
+* FOLLOW set computations altered constant FOLLOW bit sets.
+
+* Added (...) are all predicate evaluations.
+
+* Extra init code for tree parser nonrewrite mode removed.
+
+* Added empty child list check in becomeRoot
+
+August 3, 2008
+
+* Was using RuleReturnScope not Rulename_return for list labels in tree
+  parser.
+
+* Didn't set _last in tree parser for rule ref track stuff (rewrite=true)
+
+August 2, 2008
+
+* Benjamin found another rewrite engine bug.
+
+July 30, 2008
+
+* CommonTreeNodeStream / CommonTokenStream did not reset properly.
+
+July 29, 2008
+
+* Fixed another bug in TokenRewriteStream; didn't like inserts after end.
+
+July 28, 2008
+
+* Fixed bug in TokenRewriteStream.toString(start,stop); it ignored
+  parameters. ;)
+
+July 17, 2008
+
+* allow qualified type names in hetero <...> options like T<a.b.c.Node>
+
+July 5, 2008
+
+* treeLevel not set for setBlock alts; added unit test
+
+July 3, 2008
+
+* Fixed ANTLR-267. parse tree added nodes during backtracking and
+  cyclic DFAs.  tracks hidden tokens too now. Added toInputString() to
+  get text back including hidden tokens.  Shows <epsilon> for rules
+  that match nothing.
+
+June 26, 2008
+
+* Added gParent ptr that points to immediate parent grammar. E.g.,
+    // delegators
+    public MParser gM;
+    public M_S gS;
+    public M_S gParent = gS; // NEW
+
+* Grammar imports didn't set all the delegate pointers...lots of imported
+  grammars would cause a null ptr exception.  Fixes ANTLR-292.
+
+June 25, 2008
+
+* List labels in tree construction didn't always track the tree; sometimes
+  had a rule result structure.
+
+June 4, 2008
+
+* Improved unit testing so that each test suite executes and builds grammars
+  in a separate temporary directory. This means they can execute concurrently.
+  Also seem to be a problem with my class path during execution. Moved
+  tmpdir for ahead of standard CLASSPATH.
+
+* By virtue of an improvement to StringTemplate, output newlines
+  in generated files should be normalized to whatever your host uses.
+
+June 3, 2008
+
+* Restrict legality of grammar options; for example you cannot use output option
+  in lexer anymore.
+
+June 2, 2008
+
+* Throw illegal arg exception upon invalid TokenRewriteStream ops. Rewrote
+  core of engine.  Slightly different operation.  Added many more unit tests.
+
+3.1b1 - May 20, 2008
+
+May 11, 2008
+
+* rewrite=true, output=AST for tree grammar was not working.  Altered trees were not
+  propagated back up the rule reference chain.  Required a number of mods to
+  ASTTreeParser.stg.  Added unit tests.
+
+May 10, 2008
+
+* [BACKWARD INCOMPATIBLE if you override match()]
+  I had turned off single token insertion and deletion because I could not figure
+  out how to work with trees and actions. Figure that out and so I turned it back on.
+  match() returns Object matched now (parser, tree parser) so we can set labels
+  on token refs properly after single token ins/del error recovery.  Allows actions
+  and tree construction to proceed normally even though we recover in the middle of
+  an alternative.  Added methods for conjuring up missing symbols: getMissingSymbol().
+
+* refactored BaseRecognizer error handling routines
+
+* Single token error recovery was not properly taking into consideration EOF.
+
+* ANTLR no longer tries to recover in tree parsers inline using single node deletion or insertion; throw exception.  Trees should be well formed as they are not created by users.
+
+* Added empty constructors to the exception classes that did not have them so that ANTLRWorks can create the exceptions.
+
+* Made debug tree adaptor deal with tokens conjured up during error recovery.
+
+* Removed extra location() debug element that was emitted.
+
+May 8, 2008
+
+* ANTLR didn't update line/col to DFA map for AW.
+
+May 6-7, 2008
+
+* Insufficiently covered (with semantic predicates) alt warnings are now emitted before
+  nondeterminisms so it's clear the nondeterminism is a result of insufficient preds.
+
+* Improved insufficiently covered alt warnings from:
+    warning(203): T.g:2:3: The following alternatives are insufficiently covered with predicates: 1
+  to:
+    warning(203): T.g:2:3: Input B is insufficiently covered with predicates at loca
+tions: alt 1: line 3:15, alt 2: line 2:9
+
+* Improved nondeterminism warning to have:
+  Semantic predicates were present but were hidden by actions.
+parser grammar U;
+a : (A B)? ;
+b : X a {p1}? A B | Y a {a1} {p2}? A B | Z a ;
+
+To create the prediction DFA for the optional sub rule in 'a', ANTLR must find all references to 'a' to determine what can follow. A B can follow 'a' in the first two alts rule 'b'.   To resolve the conflict between matching A B immediately in the sub rule and exiting rule 'a' to match it in 'b', ANTLR looks for predicates. In this case, there are two predicates that indicate the semantic context in which the surrounding alternatives are valid. The problem is that one of the predicates is hidden by an action. It took me 1.5 days, but I've finally have gotten ANTLR to properly track the insufficiently covered alternatives. Further, I have gotten it to tell you precisely where the uncovered predicates are even if they are simply hidden by actions. I have also updated all of the nondeterminism warnings so that it tells you if there was a predicate but one hidden by an action (this could be a separate condition from insufficiently covered predicates). here are your messages from ANTLR:
+
+ANTLR Parser Generator  Version 3.1b1 (??)  1989-2007
+warning(203): U.g:2:5: Input such as "A B" is insufficiently covered with predicates at locations: alt 2: line 3:38 at B
+Semantic predicates were present but were hidden by actions.
+warning(200): U.g:2:5: Decision can match input such as "A B" using multiple alternatives: 1, 2
+As a result, alternative(s) 2 were disabled for that input
+Semantic predicates were present but were hidden by actions.
+
+* Fixed issue where
+r41
+   : (INT -> INT) ( ('+' i=INT) -> ^($i $r41) )* ';'
+   ;
+still warned about $r41 being ambig.
+
+* actions are now added to the NFA.
+
+* Fixed ANTLR-222.  ANTLR now ignores preds after actions.
+
+May 5, 2008
+
+* Fixed ANTLR-235 by backing out a change from 12-31-07.
+
+* Fixed ANTLR-249; I include semantic context again in closure busy signal.
+
+May 3, 2008
+
+* Fixed ANTLR-208.  Looks in library or in -o output path.  antlr -o foo T.g U.g where U needs T.tokens won't work unless we look in foo too.  fixed.
+
+* Refactored assign.types.g to move methods to a class called AssignTokenTypesBehavior.
+
+* Fixed ANTLR-207.  Lexers importing vocabs didn't see ';'=4 type aliases in .tokens.
+
+* Fixed ANTLR-228.  Couldn't use wildcard in alts with AST rewrites.
+
+May 2, 2008
+
+* Fixed ANTLR-230; can use \' now in action.
+
+* Scope attributes no longer have a stack depth check on front.  If you ref $r::a when r has not invoked you, then you get an exception not a default value.  Back to the way 3.0.1 worked.
+
+* $channel was a global variable in 3.0.1 unlike $type which did not affect an invoking lexer rule.  Now it's local too.  Only $type and $channel are ever set with regularity.  Setting those should not affect an invoking lexer rule as in the following should work:
+
+  X : ID WS? '=' ID ;  // result is X on normal channel
+  WS : ' '+ {$channel = HIDDEN; } ;
+
+  STRING : '"' (ESC|.)* '"' ;  // result is STRING not ESC
+
+  FLOAT : INT '.' INT? ; // should be FLOAT
+  INT : Digit+ ;
+  fragment
+  Digit : '0'..'9' ;
+
+* Fixed bug in interpreter regarding (...)* loops
+
+May 1, 2008
+
+* Fixed ANTLR-202.  These now give warnings about ambig ref to $a.
+    a : ID a -> $a | INT ;
+  and
+    a : A a {$a.text} | B ;
+
+April 30, 2008
+
+* Fixed ANTLR-237. updated -depend to know about imported grammars.
+$ java org.antlr.Tool -depend -lib foo T.g
+  ANTLR Parser Generator  Version 3.1b1 (??)  1989-2007
+  T.g: foo/Java.g
+  TParser.java : T.g
+  T.tokens : T.g
+  TLexer.java : T.g
+  T_Java : T.g
+
+April 29, 2008
+
+* Fixed ANTLR-217; scope A,B,C; didn't work
+
+* Fixed ANTLR-224; ! or ^ on item in alt with rewrite gave exception
+
+* Added token options to terminals: ID<node=V; foo="Big bob"> etc...
+  node is default so you can do ID<V> for hetero tree types. most common.
+
+April 17, 2008
+
+* Use default msg if unknown recog type in getErrorMessage():
+	String msg = e.getMessage();
+
+April 14, 2008
+
+* %x.y = foo; was not working in @members section
+
+March 29, 2008
+
+* Import couldn't handle A imports B imports C.
+
+March 27, 2008
+
+* Added get/setInputStream to Token interface and affected classes.
+
+February 26, 2008
+
+* made fillBuffer public in CommonTreeNodeStream so we can add trees
+  to stream for interactive interpreters.
+
+February 14, 2008
+
+* Fixed a bug in the code generation where tree level 0 was used
+  no matter what to rewrite trees in tree grammars. added unit test
+
+* Fixed ANTLR-221. exceptions were generated when using
+  AST construction operators and no output=AST option.
+
+February 13, 2008
+
+* Improved error msgs for unreachable alts and tokens.
+
+February 11-12, 2008
+
+* Fixed ANTLR-219.
+  It looks like the AST construction code for sets was totally messed up.
+  This was for not only the new tree parser AST construction, but also
+  the regular tree construction for parsers. I had to introduce templates
+  in the ASTTreeParser.stg file to deal with this. added unit tests:
+  TestTreeGrammarRewriteAST.testSetMatchNoRewrite(),
+  testSetMatchNoRewriteLevel2(), testSetMatchNoRewriteLevel2Root().
+  Had to add template matchRuleBlockSet()
+  to differentiate between a regular set in one that is an entire rule.
+  If it is an entire rule, it has to set the return value, retval.tree.
+
+* Fixed ANTLR-220.
+  Made TreeAdaptor dupNode and dupTree events emit debugging events
+  so AW could see tree node duplications.
+
+February 4, 2008
+
+* BACKWARD INCOMPATIBILITY
+  Added getSourceName to IntStream and TokenSource interfaces and also the
+  BaseRecognizer.  Have to know where char come from for error messages.
+  Widespread change, but a trivial one.
+
+January 17, 2008
+
+* Interpreter throws FailedPredicateException now when it sees a predicate;
+  before it was silently failing.  I'll make it work one of these days. ;)
+
+January 12, 2008
+
+* Copy ctor not copying start and stop for common token. Fixes ANTLR-212
+
+* Removed single token insertion and deletion for tokens, sets.
+  Required a change to the code generation for matchSet() template
+  and a tweak inside the BaseRecognizer.  To engage this again is easy,
+  just override mismatch() to call mismatchRecover(). I changed it to simply
+  throw an exception.
+
+* Added syntaxError recognizer state var so you can easily tell if
+  a recognizer failed.  Added getNumberOfSyntaxErrors() to recognizers.
+
+* Added doc for the error node stuff:
+  http://www.antlr.org/wiki/display/ANTLR3/Tree+construction
+
+* Fixed ANTLR-193
+
+* Added recognizer methods to answer questions about current mismatched
+  token error.  Useful now since i don't automatically recover inline
+  to such errors (I throw exception):
+	mismatchIsUnwantedToken(IntStream input, int ttype)
+	mismatchIsMissingToken(IntStream input, BitSet follow)
+
+* Added UnwantedTokenException and MissingTokenException to make
+  match() problems more precise in case you want to catch differently.
+  Updated getErrorMessage() to be more precise.  Says:
+
+	line 2:9 missing EQ at '0'
+
+  now instead of
+
+	line 2:9 mismatched input '0' expecting EQ
+
+  Input "x=9 9;" gives
+
+	line 3:8 extraneous input '9' expecting ';'
+
+  When very confused, "x=9 for;", you still get old mismatched message:
+
+	line 3:8 extraneous input 'for' expecting ';'
+	line 3:11 mismatched input ';' expecting '('
+
+* Added unit tests to TestAutoAST and copied to TestRewriteAST with
+  suitable rewrites to ensure basic error node insertion works.
+
+January 11, 2008
+
+* Adding errorNode to TreeAdaptor and various debug
+  events/listeners.  Had to add new class runtime.tree.CommonErrorNode
+  to hold all the goodies: input stream, start/stop objects.
+
+* Tweaked CommonTree.getType() to return INVALID_TOKEN_TYPE
+  instead of literal 0 (same thing at moment though).
+
+* Updated ANTLRWorks to show error nodes in tree as much as I could; Jean
+  will get to rest of it.
+
+January 9-10, 2008
+
+* Continued work on debugging/profiling composite grammars.
+
+* Updated debug protocol for debugging composite grammars.  enter/exit
+  rule needs grammar to know when to flip display in AW.
+
+* Fixed ANTLR-209.  ANTLR consumed 2 not 1 char to recover in lexer.
+
+* Added two faqs instead of making changes to antlr runtime about
+  lexer error handling:
+  http://www.antlr.org/wiki/pages/viewpage.action?pageId=5341230
+  http://www.antlr.org/wiki/pages/viewpage.action?pageId=5341217
+
+January 1-8, 2008
+
+* Making debugging/profiling work with composite grammars.
+
+* Updated ANTLRWorks so it works still for noncomposite grammars.
+
+* two new examples: import and composite-java (the java example grammar
+  broken up into multiple pieces using import).
+
+* Worked on composite grammars.  Had to refactor a lot of code to make
+  ANTLR deal with one meta grammar made up of multiple grammars.  I
+  thought I had it sort of working back in August.  Yes, but barely. Lots
+  of work to do it seemed.  Lots of clean up work.  Many new unit tests
+  in TestCompositeGrammars.  Had to add new error messages warning about
+  conflicting tokens inherited from multiple grammars etc...
+
+    TOKEN_ALIAS_CONFLICT(arg,arg2) ::=
+      "cannot alias <arg>; string already assigned to <arg2>"
+    TOKEN_ALIAS_REASSIGNMENT(arg,arg2) ::=
+      "cannot alias <arg>; token name already assigned to <arg2>"
+    TOKEN_VOCAB_IN_DELEGATE(arg,arg2) ::=
+      "tokenVocab option ignored in imported grammar <arg>"
+    INVALID_IMPORT(arg,arg2) ::=
+      "<arg.grammarTypeString> grammar <arg.name> cannot import <arg2.grammarTypeString> grammar <arg2.name>"
+    IMPORTED_TOKENS_RULE_EMPTY(arg,arg2) ::=
+      "no lexer rules contributed to <arg> from imported grammar <arg2>"
+    IMPORT_NAME_CLASH(arg,arg2) ::=
+      "combined grammar <arg.name> and imported <arg2.grammarTypeString> grammar <arg2.name> both generate <arg2.recognizerName>; import ignored"
+
+  This stuff got really really complicated.  Syntactic predicate names even
+  had to be scoped per grammar so they don't conflict.
+
+* When using subrules like (atom->atom) to set result tree, it was not
+  properly setting result (early enough).  Future code got null for
+  $rule.tree.
+
+December 31, 2007
+
+* Added the start of a semantic predicate computation for LL(1) to
+  solve a problem with slow grammar analysis even with k=1 due to
+  predicates.  Then I realized the problem with that grammar was
+  elsewhere.  Semantic context really shouldn't be used when
+  preventing closure recomputation (May 2008 I discovered I was
+  wrong--you do need it).  The predicates became huge even though the
+  reduced value would be no different.  The analyzer seems faster now
+  that I am not testing predicate values all the time.  Further it may
+  terminate sooner just due to reduced closure recursion.
+
+* Moved FIRST/FOLLOW computations to a separate class LL1Analyzer to
+  tidy up.
+
+* ANTLR lexer allowed octal escapes, but they didn't work. ;)  Rather than
+  fix, I'm removing.  Use '\uxxxx' to get even 8 bit char values: \u00xx.
+
+December 29, 2007
+
+* Fixed ANTLR-206. I wasn't avoiding analyzing decisions in
+  left-recursive rules.
+
+* Had to add hetero arg to all tokenRef*() templates.  Added _last
+  local var to track last child so we can do replaceChildren() during
+  AST rewrite mode for tree grammars.  Should be useful later for .text
+  property.  Ack, hetero arg is on lots of templates. :(  Moved
+  ruleCleanUp() template into ASTTreeParser and ASTParser groups.
+
+* added noRewrite() template (to Java.stg) so we can insert code during
+  rewrite mode to return original tree if no rewrite.  Might be useful
+  for token rewrites later.  For templates too?
+
+* Had to add if !rewriteMode around tree construction in tree parser
+  templates.
+
+* Harald Muller pointed out that we need to use <initValue(attr.type)>
+  in our tests for null token/rule property references. For int types
+  we need 0 not null. (p!=null?p.line:0).  Changed scopeAttributeRef,
+  ruleLabelRef.  Also changed the known typed attributes like
+  lexerRuleLabelPropertyRef_line to yield 0 upon null rule ref to
+  be consistent with case when we don't know the type.  Fixes ANTLR-195.
+  Added testTypeOfGuardedAttributeRefIsCorrect test and reset expected
+  output for 13 tests that now "fail".
+
+December 28, 2007
+
+* added polydiff example (Java target)
+
+* added "int" property for token and lexer rule refs.  super convenient. E.g.,
+  a : b=INT {int x = $b.int;} ;
+
+December 27, 2007
+
+* Changed -Xnoinlinedfa to -Xmaxinlinedfastates m where m is
+  maximum number of states a DFA can have before ANTLR avoids
+  inlining it.  Instead, you get a table-based DFA.  This
+  affectively avoids some acyclic DFA that still have many states
+  with multiple incident edges.  The combinatorial explosion smacks
+  of infinite loop.  Fixes ANTLR-130.
+
+* [...] are allowed in args now but ] must be escaped as \]. E.g.,
+  a[String[\] ick, int i] : ... ;
+  And calling a rule: foo[x[i\], 34]
+  Fixes ANTLR-140.
+
+* Fixed ANTLR-105.  Target.getTargetStringLiteralFromANTLRStringLiteral()
+  escaped " that were already escaped.
+
+* target's can now specify how to encode int as char escape.  Moved
+  DFA.encodeIntAsCharEscape to Target.
+
+* Bug in runtime.DFA.  If a special state (one with predicate) failed, it
+  tried to continue (causing out of range exception due to state = -1)
+  instead of reporting error.
+
+* If -dfa with combined grammar T.g, builds T.dec-*.dot and TLexer.dec-*.dot
+
+* Fix ANTLR-165.
+  Generate TParser.java and TLexer.java from T.g if combined, else
+  use T.java as output regardless of type.
+  BACKWARD INCOMPATIBILITY since file names change.
+  I changed the examples-v3/java to be consistent.  Required XML.g ->
+  XMLLexer.java and fuzzy/Main.java change.
+
+* Fix ANTLR-169.  Deletes tmp lexer grammar file.
+
+December 25, 2007
+
+* Fixed ANTLR-111.  More unit tests in TestAttributes.
+
+December 25, 2007
+
+* Dangling states ("decision cannot distinguish between alternatives
+  for at least one input sequence") is now an error not a warning.
+
+* Added sample input sequence that leads to dangling DFA state, one
+  that cannot reach an accept state.  ANTLR ran into a case where
+  the same input sequence reaches multiple locations in the NFA
+  (and so not nondeterministic), but analysis ran out of further
+  NFA states to look for more input.  Commonly at EOF target states.
+  Now says:
+
+  error(202): CS.g:248:95: the decision cannot distinguish between alternative(s) 1,2 for input such as "DOT IDENTIFIER EOF"
+
+  Also fixed bug where dangling states did not resolve to stop states.
+
+* Fixed ANTLR-123
+
+December 17-21, 2007
+
+* k=1 doesn't prevent backtracking anymore as in
+  (options {k=1;}:'else' statement)?
+  if backtrack=true for overall grammar.  Set to false in subrule.
+
+* Optimized the analysis engine for LL(1).  Doesn't attempt LL(*) unless
+  LL(1) fails.  If not LL(1) but autobacktracking but no other kind of
+  predicate, it also avoids LL(*).  This is only important for really
+  big 4000 line grammars etc...
+
+* Lots of code clean up
+
+December 16, 2007
+
+* Yet more Kay pair programming.  Saved yet more RAM; 15% by
+  wacking NFA configurations etc in each DFA state after DFA construction.
+
+* Overall we drop from 2m49s to 1m11s for a huge 4000 line TSQL grammar
+  with k=*.  Only needs -Xconversiontimeout 2000 now not
+  -Xconversiontimeout 5000 too.  With k=1, it's 1m30s down to 40s.
+
+December 15, 2007
+
+* Working with Kay Roepke, we got about 15% speed improvement in
+  overall ANTLR exec time.  Memory footprint seems to be about 50%
+  smaller.
+
+December 13-14, 2007
+
+* I abort entire DFA construction now when I see recursion in > 1 alt.
+  Decision is non-LL(*) even if some pieces are LL(*).  Safer to bail
+  out and try with fixed k.  If user set fixed k then it continues because
+  analysis will eventually terminate for sure.  If a pred is encountered
+  and k=* and it's non-LL(*), it aborts and retries at k=1 but does NOT
+  emit an error.
+
+* Decided that recursion overflow while computing a lookahead DFA is
+  serious enough that I should bail out of entire DFA computation.
+  Previously analysis tried to keep going and made the rules about
+  how analysis worked more complicated.  Better to simply abort when
+  decision can't be computed with current max stack (-Xm option).
+  User can adjust or add predicate etc...  This is now an error
+  not a warning.
+
+* Recursion overflow and unreachable alt is now a fatal error; no code gen.
+  The decision will literally not work.
+
+* Cleaned up how DFA construction/analysis aborts due to non-LL(*) and
+  overflow etc...  Throws exceptions now, which cleans up a bunch of IF
+  checks etc...  Very nice now. Exceptions:
+	analysis/AnalysisRecursionOverflowException.java
+	analysis/AnalysisTimeoutException.java
+	analysis/NonLLStarDecisionException.java
+
+* ErrorManager.grammarWarning() counted them as errors not warnings.
+
+* Unreachable alt warnings are now errors.
+
+* The upshot of these changes is that I fixed ANTLR-178 and did
+  lots of refactoring of code handling analysis failure.
+
+December 11, 2007
+
+* Could not deal with spaces, oddly enough in arg lists:
+	grammar Bad;
+	a : A b["foo", $A.text] ;
+	b[String x, String y] : C ;
+
+October 28, 2007
+
+* Made ANTLR emit a better error message when it cannot write the
+  implicit lexer file from a combined grammar. Used to say "cannot open
+  file", now says "cannot write file" and gives backtrace.
+
+September 15, 2007
+
+add getCharStream to Lexer.
+
+September 10, 2007
+
+* Added {{...}} forced action executed even during backtracking.
+
+September 9, 2007
+
+* r='string' in lexer got a duplicate label definition.
+
+August 21, 2007
+
+* $scope::variable refs now check for empty stack so that expr == null if
+  $scope has an empty stack. Works for $scope[...]::variable too.  Nice!
+
+August 20, 2007
+
+* Added reset() to CommonTreeNodeStream, token stream too
+
+* Made refs to rule/token properties use ?: to avoid null ptr exception.
+  $label.st now is label!=null?label.st:null.  Updated TestAttributes.
+  This is useful not only for optional rule/token refs, but also during
+  error recovery.  If ID is not matched, $ID.text won't cause a null ptr.
+
+August 20, 2007
+*	Fixed ANTLR-177: hashCode/equals not consistent for label
+	Fixed bug where Rule was compared to string; introduced from dev branch
+
+August 15, 2007 -- Got rough draft of the grammar import working.
+                   Submit to dev and then integrate into mainline.
+
+	All file changes/additions:
+
+	README.txt	# edit
+	CHANGES.txt	# add
+	  Factored out the changes from the readme.
+
+	runtime/Java/src/org/antlr/runtime/BaseRecognizer.java	# edit
+	runtime/Java/src/org/antlr/runtime/DFA.java	# edit
+	runtime/Java/src/org/antlr/runtime/Lexer.java	# edit
+	runtime/Java/src/org/antlr/runtime/Parser.java	# edit
+	runtime/Java/src/org/antlr/runtime/debug/DebugParser.java	# edit
+	runtime/Java/src/org/antlr/runtime/tree/TreeParser.java	# edit
+	  Factored state fields into RecognizerSharedState
+	  object. You will see a lot of things like
+            state.errorRecovery = false;
+	runtime/Java/src/org/antlr/runtime/RecognizerSharedState.java	# add
+          Shares all recognizer state variables including lexer even though
+	  these are superfluous to parsers and tree parsers.  There
+	  was a casting issue that I could not resolve.
+
+	src/org/antlr/Tool.java	# edit
+	  Broke a part Grammar.setGrammarContent() into
+	  parseAndBuildAST() and analyzeGrammar() to make the grammar
+	  import work. I needed to be able to look at the trees for
+	  imported grammars before analyzing them and building DFA. Added
+	  use of the CompositeGrammar object and handling of multiple
+	  delegate grammars. Changed decision DFA DOT file names to
+	  include the grammar name.
+
+	src/org/antlr/analysis/DFA.java	# edit
+	  Just tweaked to use generics, updated a comment.
+
+	src/org/antlr/analysis/DecisionProbe.java	# edit
+	  Just tweaked to use generics.
+
+	src/org/antlr/analysis/NFA.java	# edit
+	  NFA now span multiple grammars and so I moved the NFAs state
+	  tracking to the composite grammar object.
+
+	src/org/antlr/analysis/NFAState.java	# edit
+	  Added some null checking and made a field public.
+
+	src/org/antlr/analysis/NFAToDFAConverter.java	# edit
+	  Changed a method call to directly access a field.
+
+	src/org/antlr/analysis/RuleClosureTransition.java	# edit
+	  Instead of using a rule index, which does not span multiple
+	  grammars, the transition object now attracts a pointer to
+	  the actual Rule definition object.
+
+	src/org/antlr/analysis/SemanticContext.java	# edit
+	  Tweaked to use a field instead of a method
+
+	src/org/antlr/codegen/ActionTranslator.g	# edit
+	src/org/antlr/codegen/ActionTranslatorLexer.java	# edit
+	  Tweaked to use new runtime and they changed method name.
+
+	src/org/antlr/codegen/CodeGenerator.java	# edit
+	  Tweaked comments.
+
+	src/org/antlr/codegen/codegen.g	# edit
+	  Added import grammar syntax and altered rule atom to pass a
+	  scope around so that grammar.rule works.  Caution this
+	  feature is used internally by ANTLR and is not meant to be
+	  used by users at this point.
+
+	src/org/antlr/codegen/templates/ANTLRCore.sti	# edit
+	  Added scope to all ruleref template interfaces.
+
+	src/org/antlr/codegen/templates/Java/Java.stg	# edit
+	  Grammars can now import other grammars, which I implemented
+	  using a delegation pointer to the other grammar(s). So if
+	  grammar A imports grammars B and C, then the generated
+	  recognizer for A must have delegation pointers to BParser
+	  and CParser objects. These are now fields:
+
+	    // delegates
+	    <grammar.delegates:
+	     {g|public <g.name>Lexer <g:delegateName()>;}; separator="\n">
+
+          Also, B and C must have back pointers to the delegator so
+          that they can refer to rules that have been overridden.
+          This is a mechanism akin to static inheritance:
+
+	    // delegators
+	    <grammar.delegators:
+	     {g|public <g.name>Lexer <g:delegateName()>;}; separator="\n">
+
+	  This file also has a lot of changes so that state variables
+	  now are state.backtracking instead of the implied
+	  this.backtracking.
+
+	  The file also refers to grammar.delegatedRules attribute
+	  which is the list of Rule objects for which you must
+	  generate manual delegation.  This amounts to a stub whereby
+	  rule foo's method foo() simply calls X.foo() if foo is not
+	  defined inside the delegator.
+
+	  You will notice that the ruleref templates now take a scope
+	  so that I can have implicit rule Tokens referred to
+	  delegate.Tokens rule in a delegate grammar.  This is the way
+	  I do lexer grammar imports.
+
+	  I added a template called delegateName which uses the
+	  grammar name to compute a delegate name if the user does not
+	  specify a label in the import statement such as:
+
+	  import x=X;
+
+	  Oh, note that rule reference templates all receive a Rule
+	  object now instead of the simple rule name as the 'rule'
+	  attribute.  You will see me doing <rule.name> instead of
+	  <name> now.
+
+	src/org/antlr/codegen/templates/Java/Dbg.stg	# edit
+	  Changes mirroring the constructor and field stuff from
+	  Java.stg. Part of this is a cut and paste because of a bug
+	  in ST.
+
+	src/org/antlr/codegen/templates/Java/AST.stg	# edit
+	src/org/antlr/codegen/templates/Java/ASTParser.stg	# edit
+	src/org/antlr/codegen/templates/Java/ASTTreeParser.stg	# edit
+	  Just added the scope attribute.
+
+	src/org/antlr/test/BaseTest.java	# edit
+	  Added functionality to support testing composite grammars.
+	    execLexer()
+
+	src/org/antlr/test/TestAttributes.java	# edit
+	  Tweak to deal with shared recognizer state.
+
+	src/org/antlr/test/TestCompositeGrammars.java	# add
+	  Start of my unit tests.
+
+	src/org/antlr/tool/CompositeGrammar.java	# add
+	src/org/antlr/tool/CompositeGrammarTree.java	# add
+	  Tracks main grammar and all delegate grammars. Tracks unique
+	  NFA state numbers and unique token types. This keeps a tree
+	  of grammars computed from the import/delegation chain. When
+	  you want to look up a rule, it starts at the root of the
+	  tree and does a pre-order search to find the rule.
+
+	src/org/antlr/tool/ActionAnalysis.g	# edit
+	src/org/antlr/tool/ActionAnalysisLexer.java	# edit
+
+	src/org/antlr/tool/AttributeScope.java	# edit
+	  Updated to use generics in one place.
+
+	src/org/antlr/tool/DOTGenerator.java	# edit
+	  Updated to indicate when nonlocal rules are referenced.
+
+	src/org/antlr/tool/ErrorManager.java	# edit
+	  Added some error messages for import grammars; I need more.
+
+	src/org/antlr/tool/FASerializer.java	# edit
+	  Tweaked to use a field not method.
+
+	src/org/antlr/tool/Grammar.java	# edit
+	  This is where most of the meat is for the grammar import
+	  stuff as you can imagine.  I factored out the token type
+	  tracking into the CompositeGrammar object. I added code to
+	  the addArtificialMatchTokensRule method so that it includes
+	  references to all delegate lexer Tokens rules. Altered the
+	  rule lookup stuff so that it knows about delegate grammars.
+
+	src/org/antlr/tool/GrammarAST.java	# edit
+	src/org/antlr/tool/GrammarAnalysisAbortedMessage.java	# edit
+	src/org/antlr/tool/GrammarReport.java	# edit
+	src/org/antlr/tool/NonRegularDecisionMessage.java	# edit
+	  Made enclosing rule visible as field.
+
+	src/org/antlr/tool/GrammarSanity.java	# edit
+	  General cleanup and addition of generics.
+
+	src/org/antlr/tool/Interpreter.java	# edit
+	  Reference fields instead of methods.
+
+	src/org/antlr/tool/NFAFactory.java	# edit
+	  General cleanup and use of Rule object instead of rule
+	  index.
+
+	src/org/antlr/tool/NameSpaceChecker.java	# edit
+	  A little bit of cleanup and changes to use either the local
+	  or globally visible rule. Added code to check that scopes
+	  are valid on scoped rule references. again this is an
+	  internal feature, not to be used by users.
+
+	src/org/antlr/tool/RandomPhrase.java	# edit
+	  Tweaked.
+
+	src/org/antlr/tool/Rule.java	# edit
+	  Added field imported. Removed some unused methods by
+	  commenting them out. Made toString() more expressive.
+
+	src/org/antlr/tool/antlr.g	# edit
+	src/org/antlr/tool/antlr.print.g	# edit
+	src/org/antlr/tool/assign.types.g	# edit
+	src/org/antlr/tool/buildnfa.g	# edit
+	src/org/antlr/tool/define.g	# edit
+	  Added syntax for import statement.  assign.types.g is the
+	  grammar that invokes Grammar.importGrammar().
+
+	src/org/antlr/tool/templates/messages/languages/en.stg	# edit
+	  Added error messages.
+
+	Added
+
+	CHANGES.txt
+	runtime/Java/src/org/antlr/runtime/RecognizerSharedState.java
+	src/org/antlr/test/TestCompositeGrammars.java
+	src/org/antlr/tool/CompositeGrammar.java
+	src/org/antlr/tool/CompositeGrammarTree.java
+
+3.0.1 - August 13, 2007
+
+[See target pages on the wiki for more information on the non-Java targets]
+
+August 7, 2007
+
+* added escaping of double quotes in DOTTreeGenerator
+
+July 22, 2007
+
+* fixed dynamic scope implementation in lexers. They were not creating new scope
+  entries on the stack.  Unsupported feature!
+
+July 30, 2007
+
+* float return values were initalized to 0.0 not 0.0f in java.
+
+July 28, 2007
+
+* Sam Ellis points out an init var bug in ANTLRReaderStream.
+
+July 27, 2007 (done in dev branch)
+
+* Moved token type index stuff from CommonTreeNodeStream to TreeWizard
+
+* Added getChildren to BaseTree.
+
+* Added heterogeneous tree functionality; rewrite for parser/tree parser
+  and auto AST constr. for parser.
+
+	org/antlr/runtime/tree/RewriteRuleElementStream.java
+	org/antlr/runtime/tree/RewriteRuleNodeStream.java
+	org/antlr/runtime/tree/RewriteRuleTokenStream.java
+		Renamed method next() and re-factor things to have more
+		specific methods: nextToken, nextNode, nextTree.
+
+	codegen/codegen.g
+		Updated to include new <NodeType> AST structure for
+		token references.  Pushed hereto attribute into
+		all tokenRef* templates.
+	codegen/templates/Java/AST.stg
+		Factored out a few templates:
+			createImaginaryNode(tokenType,hetero,args)
+			createRewriteNodeFromElement(token,hetero,args)
+		Converted a lot of stream next() calls to more specific
+			nextToken, nextNode, nextTree per above.
+	codegen/templates/Java/ASTParser.stg
+		Added createNodeFromToken template and re-factored creation
+		sites to use that template.  Added hetero attribute.
+	codegen/templates/Java/ASTTreeParser.stg
+		Added createRewriteNodeFromElement template and re-factored.
+
+	test/TestHeteroAST.java
+		New file. Unit tests to test new hetero tree construction.
+	test/TestRewriteAST.java
+		Fixed test.  Nil single-node trees no longer return nil;
+		They return null.
+
+	tool/ErrorManager.java
+	tool/templates/messages/languages/en.stg
+		Added error message:
+		HETERO_ILLEGAL_IN_REWRITE_ALT(arg) ::=
+		  "alts with rewrites can't use heterogeneous types left of ->"
+
+	tool/antlr.g
+	tool/antlr.print.g
+	tool/assign.types.g
+	tool/buildnfa.g
+	tool/define.g
+		Added syntax for <NodeType> to token references.
+		Altered AST structure rippled through different phases.
+
+July 24, 2007
+
+* Deleted DoubleLinkTree.java; CommonTree does that now.
+
+July 23, 2007
+
+* template group outputFile; changed rewrite arg to rewriteMode.
+
+* added rewrite mode for tree parser build AST.
+
+July 22, 2007
+
+* Kay fixed dynamic scope implementation in lexers. They were not
+  creating new scope entries on the stack.  This is an UNSUPPORTED feature.
+
+* added getParent and getChildIndex to TreeAdaptor.  Added
+  implementation to CommonTree.  It's just too useful having those
+  parent and child indexes available for rewriting etc...  I tried 2x
+  to make an implementation of tree rewriting w/o this and the
+  constraints just made it too expensive and complicated.  Have to
+  update adaptors to set parent, child index values.  Updated Tree
+  interface and BaseTree also.  Should only affect target developers
+  not users.  Well, unless they impl Tree.
+
+* dupNode (via ctor) of CommonTree didn't copy start/stop token indexes.
+
+TARGET DEVELOPERS WARNING -- AST.stg split with some functionality
+                             going into ASTParser.stg then I added
+                             ASTTreeParser.stg.  CodeGenerator
+                             assumes new subgroups exist.
+
+July 20, 2007
+
+* Added AST construction for tree parsers including -> rewrite rules.
+  Rewrite mode (rewrite=true) alters the tree in place rather than
+  constructing a whole new tree.  Implementation notes:
+
+  org/antlr/runtime/tree/Tree.java
+	Add methods for parent and child index functionality.
+	Also added freshenParentAndChildIndexes() which you can use
+	to ensure that all double linking is set up right after you
+	manipulate the tree manually.  The setChild preteens etc. do
+	the proper thing so you shouldn't need this.
+	Added replaceChildren() to support tree rewrite mode in tree parsers
+  org/antlr/runtime/tree/BaseTree.java
+	Updated to set parent and child index stuff.  Added replaceChildren
+	method etc...  It still only has a list of children as sole field
+     	but calls methods that subclasses can choose to implement such as
+	CommonTree.
+  org/antlr/runtime/tree/CommonTree.java
+	Added parent and childIndex fields to doubly link.
+  org/antlr/runtime/tree/TreeAdaptor.java
+	Added methods for new parent and child index functionality.
+	Also added method for rewrite mode in tree parsers:
+	replaceChildren(Object parent, int startChildIndex,
+                        int stopChildIndex, Object t);
+	Added setChild and deleteChild methods
+  org/antlr/runtime/tree/BaseTreeAdaptor.java
+	Moved dupTree here from BaseTree.
+	Updated rulePostProcessing to deal with parent and child index.
+	Added setChild and deleteChild implementations
+  org/antlr/runtime/tree/CommonTreeAdaptor.java
+	Added methods to deal with the parent and child index for a node.
+
+  org/antlr/runtime/tree/CommonTreeNodeStream.java
+	Removed token type index and method fillReverseIndex etc...
+	Probably will move into the tree wizard in the future.
+	Changed call/seek stack implementation to use IntArray
+	Added replaceChildren interface.
+  org/antlr/runtime/tree/TreeNodeStream.java
+	Added replaceChildren.
+  org/antlr/runtime/tree/UnBufferedTreeNodeStream.java
+	Added replaceChildren method but no implementation
+
+  codegen/templates/ANTLRCore.sti
+	Changed rewrite to a better name: rewriteMode
+	Added tree level argument to alt, tree so that auto AST
+        construction can occur while recognizing in tree parsers.
+
+  codegen/templates/Java/AST.stg
+	Split template group: added two subclasses to handle different
+	functionality for normal parsing and tree parsing + AST
+	construction.  Tree parsers default behavior is to dup tree
+	not construct another.  Added ASTParser.stg and
+	ASTTreeParser.stg to handle auto AST construction during
+	recognition for the two different parser types.  I just copied
+	the token, rule, set, wildcard templates to the subclasses.
+	The rewrite templates are still in AST.stg. I factored out the
+	node creation so that the same rewrite templates can be used
+	for both parsing and tree parsing.
+
+  codegen/templates/Java/ASTParser.stg
+	The templates needed to build trees with auto construction
+	during parsing.
+  codegen/templates/Java/ASTTreeParser.stg
+	The templates needed to build trees with auto construction
+	during tree parsing.
+  codegen/templates/Java/Java.stg
+	genericParser now has rewriteElementType (Note or Token) so
+	that the rewrite streams know what kind of elements are inside
+	during rewrite rule tree construction.
+  codegen/templates/Java/ST.stg
+	rewrite attribute name change to rewriteMode
+
+  org/antlr/runtime/debug/DebugTreeAdaptor.java
+  org/antlr/runtime/debug/DebugTreeNodeStream.java
+	Updated to handle new interfaces
+
+  test/BaseTest.java
+	Added test rig update to handle AST construction by tree parsers.
+	All tree construction runs automatically test sanity of parent
+	and child indexes.
+  test/TestTreeGrammarRewriteAST.java
+  test/TestTreeNodeStream.java
+  test/TestTrees.java
+	new file; tests the new parent and child index stuff in trees.
+
+July 19, 2007
+
+* implemented new unique ID; GC was causing non unique hash codes.  Debugging
+  tree grammars was messing up.
+
+* got tree rewrites working in tree grammars.  It builds a completely new
+  tree from old tree; i.e., you get two trees in memory.  W/o a rewrite
+  rule, the input for that rule is duplicated and returned. -> w/o elements
+  to the right means don't return anything; i.e., delete.  Ooops...way
+  harder than I thought.  Real implementation notes above.
+
+INCOMPATIBILITY WARNING -- templates have changed; must regen output from
+                           grammars.  Runtime libraries have also changed.
+                           Debug event listener interface has changed also.
+
+July 17, 2007
+
+* Added line/charposition to node socket events and event dump so
+  we have more info during tree parsing.  Only works if your
+  tree adaptor returns a value Token object from getToken(treenode)
+  with line/col set.  Refactored consumeNode/LN to use deserializeNode().
+
+* Fixed mismatched tree node exceptions; for imaginary nodes, it said
+  "missing null".  Now prints the token type we found.
+
+* Cleaned up exception stuff. MismatchedTreeNodeException was setting
+  line/col, but only RecognitionException should do that.
+
+* If imaginary token gets a mismatch, there is no line info.  Search
+  backwards in stream if input node stream supports to find last
+  node with good line/col info. E.g.,
+
+ANTLRv3Tree.g: node from after line 156:72 mismatched tree node: EOA expecting <UP>
+
+  which used to be:
+
+ANTLRv3Tree.g: node from line 0:0 mismatched tree node: null expecting <UP>
+
+* mismatched tree node exceptions were not sent to the debug event stream.
+  Due to a type being slightly different on recoverFromMismatchedToken()
+  in DebugTreeParser.  Was calling BaseRecognizer version not subclass.
+  Now we get:
+
+  9459:   Recognition exception MismatchedTreeNodeException(0!=0)
+
+* List labels were not allowed as root nodes in tree rewrites like
+  ^($listlabel ...).  Had to add a template to AST.stg:
+
+  /** Gen ^($label ...) where label+=... */
+  rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+
+July 16, 2007
+
+* fixed nextNode in RewriteRuleSubtreeStream was dup'ing too much,
+  screwing up debug event stream.  Also there was a bug in how
+  the rewrite tree stream stuff decided to dup nodes.
+
+* fixed bug in LT for tree parsing; text was not transmitted properly;
+  only single words worked.
+
+* made decision for rule put line/col on colon not first token of first alt.
+
+* remote ProxyToken now emits token index for easier debugging when looking
+  at AW's event stream.  For example, the @5 here is the token index:
+
+  31	Consume hidden [ /<64>,channel=99,30:7, @5]
+
+* same is true for consume nodes now:
+
+  25586	Consume node [')'/, <44>, 4712040,@1749]	25
+
+  When debugging tree parsers, it helps to track errors when you know
+  what corresponding input symbol created this tree node.
+
+* Changed debug events associated with trees quite a bit.  Passes nodes around
+  now rather than text, type, unique IDs etc...  Mostly affects internal stuff.
+  Target developers will have some work in their runtime to do to match
+  this change. :(  BUT, there is only a slight tweak in the Dbg.stg
+  and ASTDbg.stg templates.
+  Interface just didn't make sense as is.  If you turn on debugging, and
+  want to track a node creation, you want the node pointer not its ID,
+  text, etc...
+  Added ProxyTree for passing across socket.  Has line/charpos and tokenIndex
+
+July 15, 2007
+
+* added null ptr protection in CommonTreeAdaptor.
+
+July 14, 2007
+
+* null child in TreeAdaptor does nothing now.  Changed interface and
+  implementation.  Changed DebugTreeAdaptor to not fire events on null add
+  as well.
+
+July 12, 2007
+
+* added get method for the line/col to DFA map in Grammar.java
+
+July 7, 2007
+
+* fixed wrong order of test for exceptions in Lexer.getErrorMessage()
+
+June 28, 2007
+
+* Added ability to set the port number in the constructor for the debug parser.
+
+June 5, 2007
+
+* Changed (hidden) option -verbose to -Xnfastates; this just prints out the NFA states along each nondeterministic path for nondeterminism warnings.
+
+May 18, 2007
+
+* there were some dependencies with org.antlr.* that I removed from
+  org.antlr.runtime.*
+
+3.0 final - May 17, 2007
+
+May 14, 2007
+
+* Auto backtracking didn't work with ! and ^ suffixes on first element
+  of an alt.
+
+* Auto backtracking didn't work with an action as first element.
+
+May 10, 2007
+
+* turn off the warning about no local messages:
+ no such locale file org/antlr/tool/templates/messages/languages/ru.stg retrying with English locale
+
+May 5, 2007
+
+* moving org.antlr.runtime to runtime/Java/src/org/... Other target
+  source / libs are under runtime/targetname.
+
+May 4, 2007
+
+* You could not use arguments on a token reference that was a route in a
+  tree rewrite rule like -> ^(ID[args] ...).
+
+May 3, 2007
+
+* Fixed ANTLR-82.  Actions after the root were considered part of
+  an optional child.  They were not always executed.  Required a change
+  to the ANTLRCore.sti interface for tree() template.
+
+May 2, 2007
+
+* Fixed ANTLR-117. Wasn't building decisions properly for subrules in
+  syntactic predicates.
+
+April 22, 2007
+
+* Made build.xml ref all jars in antlr lib.  Thanks to Miguel Ping.
+
+* Fixed ANTLR-11
+
+* Now labels on ranges and such in lexer work properly.
+
+* ActionAnalysisLexer was in wrong package.
+
+April 21, 2007
+
+* Pushing a huge update that fixes:
+	http://www.antlr.org/browse/ANTLR-112
+	http://www.antlr.org/browse/ANTLR-110
+	http://www.antlr.org/browse/ANTLR-109
+	http://www.antlr.org/browse/ANTLR-103
+	http://www.antlr.org/browse/ANTLR-97
+	http://www.antlr.org/browse/ANTLR-113
+	http://www.antlr.org/browse/ANTLR-66
+	http://www.antlr.org/browse/ANTLR-98
+	http://www.antlr.org/browse/ANTLR-24
+	http://www.antlr.org/browse/ANTLR-114
+	http://www.antlr.org/browse/ANTLR-5
+	http://www.antlr.org/browse/ANTLR-6
+
+  Basically, I gutted the way AST rewrites work.  MUCH better.
+
+* Fixed lots of little label issues in the lexer.  Couldn't do x+=ID
+  in lexer, for example.  Fixed ANTLR-114, ANTLR-112
+
+* Isolated EOT transition in lexer generated dangling else clause.
+  Fixed ANTLR-113.
+
+April 17, 2007
+
+* Fixed a major problem with gated semantic predicates.  Added more
+  unit tests.
+
+* Fixed bug in cyclic DFA with syntactic predicates.  Wasn't rewinding
+  properly.  Further, mark() in token stream did not fill buffer so
+  when you rewound back to last marker index was -1 not 0.  At same time
+  I fixed ANTLR-103.  Syn preds evaluated only once now.
+
+* Altered code gen file writing so it writes directly to a file
+  instead of building a big string and then writing that out.  Should
+  be faster and much less memory intensive.
+
+* Fixed so antlr writes files to correct location again.  See:
+
+http://www.antlr.org/wiki/pages/viewpage.action?pageId=1862
+
+3.0b7 - April 12, 2007
+
+April 10, 2007
+
+* Allows -> {...} actions now when building ASTs.  Fixed ANTLR-14.
+
+* Allows ! on sets and wildcard now during output=AST option. Fixed ANTLR-17.
+
+* Fixed ANTLR-92 bug.  Couldn't use sets with -> tree construction.
+
+* No lexer rule for a token type is now a warning.
+
+* Fixed set labels in lexer; ANTLR-60 bug
+
+* Fixed problem with duplicate state variable definitions in switch-case
+
+April 9, 2007
+
+* Gated predicates didn't work properly in cyclic DFA.
+
+April 7, 2007
+
+* Couldn't have more than one set per rule it seems.  Fixed.
+
+April 3, 2007
+
+* Fix a problem in my unused label optimization.  Added new
+  pass over actions to examine them.
+
+* RuleReturnScope has method back:
+  /** Has a value potentially if output=template; Don't use StringTemplate
+   *  type as it then causes a dependency with ST lib.
+   */
+  public Object getTemplate() { return null; }
+
+March 30, 2007
+
+* Fixed ANTLR-8.  Labels to rules w/o return values caused compile errors.
+
+* Fixed ANTLR-89; semantic predicates in lexer sometimes
+  caused exception in code gen.
+
+* Fixed ANTLR-36; remove runtime dependency with ST
+
+March 29, 2007
+
+* Over last few days, I've gutted how ANTLR handles sets of chars or
+  tokens.  I cleaned up a lot of stuff in the grammars and added lots
+  of unit tests.
+
+March 26, 2007
+
+* CommonTreeNodeStream didn't push correctly; couldn't handle very
+  deeply nested trees.
+
+* Fixed bug that E : 'a' 'b' ; made E be seen as an alias of 'a'.
+
+March 22, 2007
+
+* Working with Egor Ushakov from Sun Optimization / NetBeans team I
+  made all the Java lexer transition tables static w/o screwing up
+  ability to reference semantic predicates etc...  Only changed Java.stg
+
+* cached text string in CommonToken.getText(); saves on repeated calls;
+  Java mode.
+
+* made all generated methods final; saves a few percent speed according to
+  Egor Ushakov (Java only).
+
+* removed most assignments from each lexer rule and even the Lexer.emit()
+  call!  All done in nextToken now.  Saves on code gen size and a wee bit of
+  execution speed probably.  Variables became fields: type, channel, line,
+  etc... Now emit() needs no args even.  Again, Egor helped on this.
+
+March 17, 2007
+
+* Jonathan DeKlotz updated C# templates to be 3.0b6 current
+
+March 14, 2007
+
+* Manually-specified (...)=> force backtracking eval of that predicate.
+  backtracking=true mode does not however.  Added unit test.
+
+March 14, 2007
+
+* Fixed bug in lexer where ~T didn't compute the set from rule T.
+
+* Added -Xnoinlinedfa make all DFA with tables; no inline prediction with IFs
+
+* Fixed http://www.antlr.org/browse/ANTLR-80.
+  Sem pred states didn't define lookahead vars.
+
+* Fixed http://www.antlr.org/browse/ANTLR-91.
+  When forcing some acyclic DFA to be state tables, they broke.
+  Forcing all DFA to be state tables should give same results.
+
+March 12, 2007
+
+* setTokenSource in CommonTokenStream didn't clear tokens list.
+  setCharStream calls reset in Lexer.
+
+* Altered -depend.  No longer printing grammar files for multiple input
+  files with -depend.  Doesn't show T__.g temp file anymore. Added
+  TLexer.tokens.  Added .h files if defined.
+
+February 11, 2007
+
+* Added -depend command-line option that, instead of processing files,
+  it shows you what files the input grammar(s) depend on and what files
+  they generate. For combined grammar T.g:
+
+  $ java org.antlr.Tool -depend T.g
+
+  You get:
+
+  TParser.java : T.g
+  T.tokens : T.g
+  T__.g : T.g
+
+  Now, assuming U.g is a tree grammar ref'd T's tokens:
+
+  $ java org.antlr.Tool -depend T.g U.g
+
+  TParser.java : T.g
+  T.tokens : T.g
+  T__.g : T.g
+  U.g: T.tokens
+  U.java : U.g
+  U.tokens : U.g
+
+  Handles spaces by escaping them.  Pays attention to -o, -fo and -lib.
+  Dir 'x y' is a valid dir in current dir.
+
+  $ java org.antlr.Tool -depend -lib /usr/local/lib -o 'x y' T.g U.g
+  x\ y/TParser.java : T.g
+  x\ y/T.tokens : T.g
+  x\ y/T__.g : T.g
+  U.g: /usr/local/lib/T.tokens
+  x\ y/U.java : U.g
+  x\ y/U.tokens : U.g
+
+  You have API access via org.antlr.tool.BuildDependencyGenerator class:
+  getGeneratedFileList(), getDependenciesFileList().  You can also access
+  the output template: getDependencies().  The file
+  org/antlr/tool/templates/depend.stg contains the template.  You can
+  modify as you want.  File objects go in so you can play with path etc...
+
+February 10, 2007
+
+* no more .gl files generated.  All .g all the time.
+
+* changed @finally to be @after and added a finally clause to the
+  exception stuff.  I also removed the superfluous "exception"
+  keyword.  Here's what the new syntax looks like:
+
+  a
+  @after { System.out.println("ick"); }
+    : 'a'
+    ;
+    catch[RecognitionException e] { System.out.println("foo"); }
+    catch[IOException e] { System.out.println("io"); }
+    finally { System.out.println("foobar"); }
+
+  @after executes after bookkeeping to set $rule.stop, $rule.tree but
+  before scopes pop and any memoization happens.  Dynamic scopes and
+  memoization are still in generated finally block because they must
+  exec even if error in rule.  The @after action and tree setting
+  stuff can technically be skipped upon syntax error in rule.  [Later
+  we might add something to finally to stick an ERROR token in the
+  tree and set the return value.]  Sequence goes: set $stop, $tree (if
+  any), @after (if any), pop scopes (if any), memoize (if needed),
+  grammar finally clause.  Last 3 are in generated code's finally
+  clause.
+
+3.0b6 - January 31, 2007
+
+January 30, 2007
+
+* Fixed bug in IntervalSet.and: it returned the same empty set all the time
+  rather than new empty set.  Code altered the same empty set.
+
+* Made analysis terminate faster upon a decision that takes too long;
+  it seemed to keep doing work for a while.  Refactored some names
+  and updated comments.  Also made it terminate when it realizes it's
+  non-LL(*) due to recursion.  just added terminate conditions to loop
+  in convert().
+
+* Sometimes fatal non-LL(*) messages didn't appear; instead you got
+  "antlr couldn't analyze", which is actually untrue.  I had the
+  order of some prints wrong in the DecisionProbe.
+
+* The code generator incorrectly detected when it could use a fixed,
+  acyclic inline DFA (i.e., using an IF).  Upon non-LL(*) decisions
+  with predicates, analysis made cyclic DFA.  But this stops
+  the computation detecting whether they are cyclic.  I just added
+  a protection in front of the acyclic DFA generator to avoid if
+  non-LL(*).  Updated comments.
+
+January 23, 2007
+
+* Made tree node streams use adaptor to create navigation nodes.
+  Thanks to Emond Papegaaij.
+
+January 22, 2007
+
+* Added lexer rule properties: start, stop
+
+January 1, 2007
+
+* analysis failsafe is back on; if a decision takes too long, it bails out
+  and uses k=1
+
+January 1, 2007
+
+* += labels for rules only work for output option; previously elements
+  of list were the return value structs, but are now either the tree or
+  StringTemplate return value.  You can label different rules now
+  x+=a x+=b.
+
+December 30, 2006
+
+* Allow \" to work correctly in "..." template.
+
+December 28, 2006
+
+* errors that are now warnings: missing AST label type in trees.
+  Also "no start rule detected" is warning.
+
+* tree grammars also can do rewrite=true for output=template.
+  Only works for alts with single node or tree as alt elements.
+  If you are going to use $text in a tree grammar or do rewrite=true
+  for templates, you must use in your main:
+
+  nodes.setTokenStream(tokens);
+
+* You get a warning for tree grammars that do rewrite=true and
+  output=template and have -> for alts that are not simple nodes
+  or simple trees.  new unit tests in TestRewriteTemplates at end.
+
+December 27, 2006
+
+* Error message appears when you use -> in tree grammar with
+  output=template and rewrite=true for alt that is not simple
+  node or tree ref.
+
+* no more $stop attribute for tree parsers; meaningless/useless.
+  Removed from TreeRuleReturnScope also.
+
+* rule text attribute in tree parser must pull from token buffer.
+  Makes no sense otherwise.  added getTokenStream to TreeNodeStream
+  so rule $text attr works.  CommonTreeNodeStream etc... now let
+  you set the token stream so you can access later from tree parser.
+  $text is not well-defined for rules like
+
+     slist : stat+ ;
+
+  because stat is not a single node nor rooted with a single node.
+  $slist.text will get only first stat.  I need to add a warning about
+  this...
+
+* Fixed http://www.antlr.org/browse/ANTLR-76 for Java.
+  Enhanced TokenRewriteStream so it accepts any object; converts
+  to string at last second.  Allows you to rewrite with StringTemplate
+  templates now :)
+
+* added rewrite option that makes -> template rewrites do replace ops for
+  TokenRewriteStream input stream.  In output=template and rewrite=true mode
+  same as before 'cept that the parser does
+
+    ((TokenRewriteStream)input).replace(
+	      ((Token)retval.start).getTokenIndex(),
+	      input.LT(-1).getTokenIndex(),
+	      retval.st);
+
+  after each rewrite so that the input stream is altered.  Later refs to
+  $text will have rewrites.  Here's a sample test program for grammar Rew.
+
+        FileReader groupFileR = new FileReader("Rew.stg");
+        StringTemplateGroup templates = new StringTemplateGroup(groupFileR);
+        ANTLRInputStream input = new ANTLRInputStream(System.in);
+        RewLexer lexer = new RewLexer(input);
+        TokenRewriteStream tokens = new TokenRewriteStream(lexer);
+        RewParser parser = new RewParser(tokens);
+        parser.setTemplateLib(templates);
+        parser.program();
+        System.out.println(tokens.toString());
+        groupFileR.close();
+
+December 26, 2006
+
+* BaseTree.dupTree didn't dup recursively.
+
+December 24, 2006
+
+* Cleaned up some comments and removed field treeNode
+  from MismatchedTreeNodeException class.  It is "node" in
+  RecognitionException.
+
+* Changed type from Object to BitSet for expecting fields in
+  MismatchedSetException and MismatchedNotSetException
+
+* Cleaned up error printing in lexers and the messages that it creates.
+
+* Added this to TreeAdaptor:
+	/** Return the token object from which this node was created.
+	 *  Currently used only for printing an error message.
+	 *  The error display routine in BaseRecognizer needs to
+	 *  display where the input the error occurred. If your
+	 *  tree of limitation does not store information that can
+	 *  lead you to the token, you can create a token filled with
+	 *  the appropriate information and pass that back.  See
+	 *  BaseRecognizer.getErrorMessage().
+	 */
+	public Token getToken(Object t);
+
+December 23, 2006
+
+* made BaseRecognizer.displayRecognitionError nonstatic so people can
+  override it. Not sure why it was static before.
+
+* Removed state/decision message that comes out of no
+  viable alternative exceptions, as that was too much.
+  removed the decision number from the early exit exception
+  also.  During development, you can simply override
+  displayRecognitionError from BaseRecognizer to add the stuff
+  back in if you want.
+
+* made output go to an output method you can override: emitErrorMessage()
+
+* general cleanup of the error emitting code in BaseRecognizer.  Lots
+  more stuff you can override: getErrorHeader, getTokenErrorDisplay,
+  emitErrorMessage, getErrorMessage.
+
+December 22, 2006
+
+* Altered Tree.Parser.matchAny() so that it skips entire trees if
+  node has children otherwise skips one node.  Now this works to
+  skip entire body of function if single-rooted subtree:
+  ^(FUNC name=ID arg=ID .)
+
+* Added "reverse index" from node to stream index.  Override
+  fillReverseIndex() in CommonTreeNodeStream if you want to change.
+  Use getNodeIndex(node) to find stream index for a specific tree node.
+  See getNodeIndex(), reverseIndex(Set tokenTypes),
+  reverseIndex(int tokenType), fillReverseIndex().  The indexing
+  costs time and memory to fill, but pulling stuff out will be lots
+  faster as it can jump from a node ptr straight to a stream index.
+
+* Added TreeNodeStream.get(index) to make it easier for interpreters to
+  jump around in tree node stream.
+
+* New CommonTreeNodeStream buffers all nodes in stream for fast jumping
+  around.  It now has push/pop methods to invoke other locations in
+  the stream for building interpreters.
+
+* Moved CommonTreeNodeStream to UnBufferedTreeNodeStream and removed
+  Iterator implementation.  moved toNodesOnlyString() to TestTreeNodeStream
+
+* [BREAKS ANY TREE IMPLEMENTATION]
+  made CommonTreeNodeStream work with any tree node type.  TreeAdaptor
+  now implements isNil so must add; trivial, but does break back
+  compatibility.
+
+December 17, 2006
+
+* Added traceIn/Out methods to recognizers so that you can override them;
+  previously they were in-line print statements. The message has also
+  been slightly improved.
+
+* Factored BuildParseTree into debug package; cleaned stuff up. Fixed
+  unit tests.
+
+December 15, 2006
+
+* [BREAKS ANY TREE IMPLEMENTATION]
+  org.antlr.runtime.tree.Tree; needed to add get/set for token start/stop
+  index so CommonTreeAdaptor can assume Tree interface not CommonTree
+  implementation.  Otherwise, no way to create your own nodes that satisfy
+  Tree because CommonTreeAdaptor was doing
+
+	public int getTokenStartIndex(Object t) {
+		return ((CommonTree)t).startIndex;
+	}
+
+  Added to Tree:
+
+	/**  What is the smallest token index (indexing from 0) for this node
+	 *   and its children?
+	 */
+	int getTokenStartIndex();
+
+	void setTokenStartIndex(int index);
+
+	/**  What is the largest token index (indexing from 0) for this node
+	 *   and its children?
+	 */
+	int getTokenStopIndex();
+
+	void setTokenStopIndex(int index);
+
+December 13, 2006
+
+* Added org.antlr.runtime.tree.DOTTreeGenerator so you can generate DOT
+  diagrams easily from trees.
+
+	CharStream input = new ANTLRInputStream(System.in);
+	TLexer lex = new TLexer(input);
+	CommonTokenStream tokens = new CommonTokenStream(lex);
+	TParser parser = new TParser(tokens);
+	TParser.e_return r = parser.e();
+	Tree t = (Tree)r.tree;
+	System.out.println(t.toStringTree());
+	DOTTreeGenerator gen = new DOTTreeGenerator();
+	StringTemplate st = gen.toDOT(t);
+	System.out.println(st);
+
+* Changed the way mark()/rewind() work in CommonTreeNode stream to mirror
+  more flexible solution in ANTLRStringStream.  Forgot to set lastMarker
+  anyway.  Now you can rewind to non-most-recent marker.
+
+December 12, 2006
+
+* Temp lexer now end in .gl (T__.gl, for example)
+
+* TreeParser suffix no longer generated for tree grammars
+
+* Defined reset for lexer, parser, tree parser; rewinds the input stream also
+
+December 10, 2006
+
+* Made Grammar.abortNFAToDFAConversion() abort in middle of a DFA.
+
+December 9, 2006
+
+* fixed bug in OrderedHashSet.add().  It didn't track elements correctly.
+
+December 6, 2006
+
+* updated build.xml for future Ant compatibility, thanks to Matt Benson.
+
+* various tests in TestRewriteTemplate and TestSyntacticPredicateEvaluation
+  were using the old 'channel' vs. new '$channel' notation.
+  TestInterpretedParsing didn't pick up an earlier change to CommonToken.
+  Reported by Matt Benson.
+
+* fixed platform dependent test failures in TestTemplates, supplied by Matt
+  Benson.
+
+November 29, 2006
+
+*  optimized semantic predicate evaluation so that p||!p yields true.
+
+November 22, 2006
+
+* fixed bug that prevented var = $rule.some_retval from working in anything
+  but the first alternative of a rule or subrule.
+
+* attribute names containing digits were not allowed, this is now fixed,
+  allowing attributes like 'name1' but not '1name1'.
+
+November 19, 2006
+
+* Removed LeftRecursionMessage and apparatus because it seems that I check
+  for left recursion upfront before analysis and everything gets specified as
+  recursion cycles at this point.
+
+November 16, 2006
+
+* TokenRewriteStream.replace was not passing programName to next method.
+
+November 15, 2006
+
+* updated DOT files for DFA generation to make smaller circles.
+
+* made epsilon edges italics in the NFA diagrams.
+
+3.0b5 - November 15, 2006
+
+The biggest thing is that your grammar file names must match the grammar name
+inside (your generated class names will also be different) and we use
+$channel=HIDDEN now instead of channel=99 inside lexer actions.
+Should be compatible other than that.   Please look at complete list of
+changes.
+
+November 14, 2006
+
+* Force token index to be -1 for CommonIndex in case not set.
+
+November 11, 2006
+
+* getUniqueID for TreeAdaptor now uses identityHashCode instead of hashCode.
+
+November 10, 2006
+
+* No grammar nondeterminism warning now when wildcard '.' is final alt.
+  Examples:
+
+	a : A | B | . ;
+
+	A : 'a'
+	  | .
+	  ;
+
+	SL_COMMENT
+	    : '//' (options {greedy=false;} : .)* '\r'? '\n'
+	    ;
+
+	SL_COMMENT2
+	    : '//' (options {greedy=false;} : 'x'|.)* '\r'? '\n'
+	    ;
+
+
+November 8, 2006
+
+* Syntactic predicates did not get hoisting properly upon non-LL(*) decision.  Other hoisting issues fixed.  Cleaned up code.
+
+* Removed failsafe that check to see if I'm spending too much time on a single DFA; I don't think we need it anymore.
+
+November 3, 2006
+
+* $text, $line, etc... were not working in assignments. Fixed and added
+  test case.
+
+* $label.text translated to label.getText in lexer even if label was on a char
+
+November 2, 2006
+
+* Added error if you don't specify what the AST type is; actions in tree
+  grammar won't work without it.
+
+  $ cat x.g
+  tree grammar x;
+  a : ID {String s = $ID.text;} ;
+
+  ANTLR Parser Generator   Early Access Version 3.0b5 (??, 2006)  1989-2006
+  error: x.g:0:0: (152) tree grammar x has no ASTLabelType option
+
+November 1, 2006
+
+* $text, $line, etc... were not working properly within lexer rule.
+
+October 32, 2006
+
+* Finally actions now execute before dynamic scopes are popped it in the
+  rule. Previously was not possible to access the rules scoped variables
+  in a finally action.
+
+October 29, 2006
+
+* Altered ActionTranslator to emit errors on setting read-only attributes
+  such as $start, $stop, $text in a rule. Also forbid setting any attributes
+  in rules/tokens referenced by a label or name.
+  Setting dynamic scopes's attributes and your own parameter attributes
+  is legal.
+
+October 27, 2006
+
+* Altered how ANTLR figures out what decision is associated with which
+  block of grammar.  Makes ANTLRWorks correctly find DFA for a block.
+
+October 26, 2006
+
+* Fixed bug where EOT transitions led to no NFA configs in a DFA state,
+  yielding an error in DFA table generation.
+
+* renamed action.g to ActionTranslator.g
+  the ActionTranslator class is now called ActionTranslatorLexer, as ANTLR
+  generates this classname now. Fixed rest of codebase accordingly.
+
+* added rules recognizing setting of scopes' attributes to ActionTranslator.g
+  the Objective C target needed access to the right-hand side of the assignment
+  in order to generate correct code
+
+* changed ANTLRCore.sti to reflect the new mandatory templates to support the above
+  namely: scopeSetAttributeRef, returnSetAttributeRef and the ruleSetPropertyRef_*
+  templates, with the exception of ruleSetPropertyRef_text. we cannot set this attribute
+
+October 19, 2006
+
+* Fixed 2 bugs in DFA conversion that caused exceptions.
+  altered functionality of getMinElement so it ignores elements<0.
+
+October 18, 2006
+
+* moved resetStateNumbersToBeContiguous() to after issuing of warnings;
+  an internal error in that routine should make more sense as issues
+  with decision will appear first.
+
+* fixed cut/paste bug I introduced when fixed EOF in min/max
+  bug. Prevented C grammar from working briefly.
+
+October 17, 2006
+
+* Removed a failsafe that seems to be unnecessary that ensure DFA didn't
+  get too big.  It was resulting in some failures in code generation that
+  led me on quite a strange debugging trip.
+
+October 16, 2006
+
+* Use channel=HIDDEN not channel=99 to put tokens on hidden channel.
+
+October 12, 2006
+
+* ANTLR now has a customizable message format for errors and warnings,
+  to make it easier to fulfill requirements by IDEs and such.
+  The format to be used can be specified via the '-message-format name'
+  command line switch. The default for name is 'antlr', also available
+  at the moment is 'gnu'. This is done via StringTemplate, for details
+  on the requirements look in org/antlr/tool/templates/messages/formats/
+
+* line numbers for lexers in combined grammars are now reported correctly.
+
+September 29, 2006
+
+* ANTLRReaderStream improperly checked for end of input.
+
+September 28, 2006
+
+* For ANTLRStringStream, LA(-1) was off by one...gave you LA(-2).
+
+3.0b4 - August 24, 2006
+
+* error when no rules in grammar.  doesn't crash now.
+
+* Token is now an interface.
+
+* remove dependence on non runtime classes in runtime package.
+
+* filename and grammar name must be same Foo in Foo.g.  Generates FooParser,
+  FooLexer, ...  Combined grammar Foo generates Foo$Lexer.g which generates
+  FooLexer.java.  tree grammars generate FooTreeParser.java
+
+August 24, 2006
+
+* added C# target to lib, codegen, templates
+
+August 11, 2006
+
+* added tree arg to navigation methods in treeadaptor
+
+August 07, 2006
+
+* fixed bug related to (a|)+ on end of lexer rules.  crashed instead
+  of warning.
+
+* added warning that interpreter doesn't do synpreds yet
+
+* allow different source of classloader:
+ClassLoader cl = Thread.currentThread().getContextClassLoader();
+if ( cl==null ) {
+    cl = this.getClass().getClassLoader();
+}
+
+
+July 26, 2006
+
+* compressed DFA edge tables significantly.  All edge tables are
+  unique. The transition table can reuse arrays.  Look like this now:
+
+     public static readonly DFA30_transition0 =
+     	new short[] { 46, 46, -1, 46, 46, -1, -1, -1, -1, -1, -1, -1,...};
+         public static readonly DFA30_transition1 =
+     	new short[] { 21 };
+      public static readonly short[][] DFA30_transition = {
+     	  DFA30_transition0,
+     	  DFA30_transition0,
+     	  DFA30_transition1,
+     	  ...
+      };
+
+* If you defined both a label like EQ and '=', sometimes the '=' was
+  used instead of the EQ label.
+
+* made headerFile template have same arg list as outputFile for consistency
+
+* outputFile, lexer, genericParser, parser, treeParser templates
+  reference cyclicDFAs attribute which was no longer used after I
+  started the new table-based DFA.  I made cyclicDFADescriptors
+  argument to outputFile and headerFile (only).  I think this is
+  correct as only OO languages will want the DFA in the recognizer.
+  At the top level, C and friends can use it.  Changed name to use
+  cyclicDFAs again as it's a better name probably.  Removed parameter
+  from the lexer, ...  For example, my parser template says this now:
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+* made all token ref token types go thru code gen's
+  getTokenTypeAsTargetLabel()
+
+* no more computing DFA transition tables for acyclic DFA.
+
+July 25, 2006
+
+* fixed a place where I was adding syn predicates into rewrite stuff.
+
+* turned off invalid token index warning in AW support; had a problem.
+
+* bad location event generated with -debug for synpreds in autobacktrack mode.
+
+July 24, 2006
+
+* changed runtime.DFA so that it treats all chars and token types as
+  char (unsigned 16 bit int).  -1 becomes '\uFFFF' then or 65535.
+
+* changed MAX_STATE_TRANSITIONS_FOR_TABLE to be 65534 by default
+  now. This means that all states can use a table to do transitions.
+
+* was not making synpreds on (C)* type loops with backtrack=true
+
+* was copying tree stuff and actions into synpreds with backtrack=true
+
+* was making synpreds on even single alt rules / blocks with backtrack=true
+
+3.0b3 - July 21, 2006
+
+* ANTLR fails to analyze complex decisions much less frequently.  It
+  turns out that the set of decisions for which ANTLR fails (times
+  out) is the same set (so far) of non-LL(*) decisions.  Morever, I'm
+  able to detect this situation quickly and report rather than timing
+  out. Errors look like:
+
+  java.g:468:23: [fatal] rule concreteDimensions has non-LL(*)
+    decision due to recursive rule invocations in alts 1,2.  Resolve
+    by left-factoring or using syntactic predicates with fixed k
+    lookahead or use backtrack=true option.
+
+  This message only appears when k=*.
+
+* Shortened no viable alt messages to not include decision
+  description:
+
+[compilationUnit, declaration]: line 8:8 decision=<<67:1: declaration
+: ( ( fieldDeclaration )=> fieldDeclaration | ( methodDeclaration )=>
+methodDeclaration | ( constructorDeclaration )=>
+constructorDeclaration | ( classDeclaration )=> classDeclaration | (
+interfaceDeclaration )=> interfaceDeclaration | ( blockDeclaration )=>
+blockDeclaration | emptyDeclaration );>> state 3 (decision=14) no
+viable alt; token=[@1,184:187='java',<122>,8:8]
+
+  too long and hard to read.
+
+July 19, 2006
+
+* Code gen bug: states with no emanating edges were ignored by ST.
+  Now an empty list is used.
+
+* Added grammar parameter to recognizer templates so they can access
+  properties like getName(), ...
+
+July 10, 2006
+
+* Fixed the gated pred merged state bug.  Added unit test.
+
+* added new method to Target: getTokenTypeAsTargetLabel()
+
+July 7, 2006
+
+* I was doing an AND instead of OR in the gated predicate stuff.
+  Thanks to Stephen Kou!
+
+* Reduce op for combining predicates was insanely slow sometimes and
+  didn't actually work well.  Now it's fast and works.
+
+* There is a bug in merging of DFA stop states related to gated
+  preds...turned it off for now.
+
+3.0b2 - July 5, 2006
+
+July 5, 2006
+
+* token emission not properly protected in lexer filter mode.
+
+* EOT, EOT DFA state transition tables should be init'd to -1 (only
+  was doing this for compressed tables).  Fixed.
+
+* in trace mode, exit method not shown for memoized rules
+
+* added -Xmaxdfaedges to allow you to increase number of edges allowed
+  for a single DFA state before it becomes "special" and can't fit in
+  a simple table.
+
+* Bug in tables.  Short are signed so min/max tables for DFA are now
+  char[].  Bizarre.
+
+July 3, 2006
+
+* Added a method to reset the tool error state for current thread.
+  See ErrorManager.java
+
+* [Got this working properly today] backtrack mode that let's you type
+  in any old crap and ANTLR will backtrack if it can't figure out what
+  you meant.  No errors are reported by antlr during analysis.  It
+  implicitly adds a syn pred in front of every production, using them
+  only if static grammar LL(*) analysis fails.  Syn pred code is not
+  generated if the pred is not used in a decision.
+
+  This is essentially a rapid prototyping mode.
+
+* Added backtracking report to the -report option
+
+* Added NFA->DFA conversion early termination report to the -report option
+
+* Added grammar level k and backtrack options to -report
+
+* Added a dozen unit tests to test autobacktrack NFA construction.
+
+* If you are using filter mode, you must manually use option
+  memoize=true now.
+
+July 2, 2006
+
+* Added k=* option so you can set k=2, for example, on whole grammar,
+  but an individual decision can be LL(*).
+
+* memoize option for grammars, rules, blocks.  Remove -nomemo cmd-line option
+
+* but in DOT generator for DFA; fixed.
+
+* runtime.DFA reported errors even when backtracking
+
+July 1, 2006
+
+* Added -X option list to help
+
+* Syn preds were being hoisted into other rules, causing lots of extra
+  backtracking.
+
+June 29, 2006
+
+* unnecessary files removed during build.
+
+* Matt Benson updated build.xml
+
+* Detecting use of synpreds in analysis now instead of codegen.  In
+  this way, I can avoid analyzing decisions in synpreds for synpreds
+  not used in a DFA for a real rule.  This is used to optimize things
+  for backtrack option.
+
+* Code gen must add _fragment or whatever to end of pred name in
+  template synpredRule to avoid having ANTLR know anything about
+  method names.
+
+* Added -IdbgST option to emit ST delimiters at start/stop of all
+  templates spit out.
+
+June 28, 2006
+
+* Tweaked message when ANTLR cannot handle analysis.
+
+3.0b1 - June 27, 2006
+
+June 24, 2006
+
+* syn preds no longer generate little static classes; they also don't
+  generate a whole bunch of extra crap in the rules built to test syn
+  preds.  Removed GrammarFragmentPointer class from runtime.
+
+June 23-24, 2006
+
+* added output option to -report output.
+
+* added profiling info:
+  Number of rule invocations in "guessing" mode
+  number of rule memoization cache hits
+  number of rule memoization cache misses
+
+* made DFA DOT diagrams go left to right not top to bottom
+
+* I try to recursive overflow states now by resolving these states
+  with semantic/syntactic predicates if they exist.  The DFA is then
+  deterministic rather than simply resolving by choosing first
+  nondeterministic alt.  I used to generated errors:
+
+~/tmp $ java org.antlr.Tool -dfa t.g
+ANTLR Parser Generator   Early Access Version 3.0b2 (July 5, 2006)  1989-2006
+t.g:2:5: Alternative 1: after matching input such as A A A A A decision cannot predict what comes next due to recursion overflow to b from b
+t.g:2:5: Alternative 2: after matching input such as A A A A A decision cannot predict what comes next due to recursion overflow to b from b
+
+  Now, I uses predicates if available and emits no warnings.
+
+* made sem preds share accept states.  Previously, multiple preds in a
+decision forked new accepts each time for each nondet state.
+
+June 19, 2006
+
+* Need parens around the prediction expressions in templates.
+
+* Referencing $ID.text in an action forced bad code gen in lexer rule ID.
+
+* Fixed a bug in how predicates are collected.  The definition of
+  "last predicated alternative" was incorrect in the analysis.  Further,
+  gated predicates incorrectly missed a case where an edge should become
+  true (a tautology).
+
+* Removed an unnecessary input.consume() reference in the runtime/DFA class.
+
+June 14, 2006
+
+* -> ($rulelabel)? didn't generate proper code for ASTs.
+
+* bug in code gen (did not compile)
+a : ID -> ID
+  | ID -> ID
+  ;
+Problem is repeated ref to ID from left side.  Juergen pointed this out.
+
+* use of tokenVocab with missing file yielded exception
+
+* (A|B)=> foo yielded an exception as (A|B) is a set not a block. Fixed.
+
+* Didn't set ID1= and INT1= for this alt:
+  | ^(ID INT+ {System.out.print(\"^(\"+$ID+\" \"+$INT+\")\");})
+
+* Fixed so repeated dangling state errors only occur once like:
+t.g:4:17: the decision cannot distinguish between alternative(s) 2,1 for at least one input sequence
+
+* tracking of rule elements was on (making list defs at start of
+  method) with templates instead of just with ASTs.  Turned off.
+
+* Doesn't crash when you give it a missing file now.
+
+* -report: add output info: how many LL(1) decisions.
+
+June 13, 2006
+
+* ^(ROOT ID?) Didn't work; nor did any other nullable child list such as
+  ^(ROOT ID* INT?).  Now, I check to see if child list is nullable using
+  Grammar.LOOK() and, if so, I generate an "IF lookahead is DOWN" gate
+  around the child list so the whole thing is optional.
+
+* Fixed a bug in LOOK that made it not look through nullable rules.
+
+* Using AST suffixes or -> rewrite syntax now gives an error w/o a grammar
+  output option.  Used to crash ;)
+
+* References to EOF ended up with improper -1 refs instead of EOF in output.
+
+* didn't warn of ambig ref to $expr in rewrite; fixed.
+list
+     :	'[' expr 'for' type ID 'in' expr ']'
+	-> comprehension(expr={$expr.st},type={},list={},i={})
+	;
+
+June 12, 2006
+
+* EOF works in the parser as a token name.
+
+* Rule b:(A B?)*; didn't display properly in AW due to the way ANTLR
+  generated NFA.
+
+* "scope x;" in a rule for unknown x gives no error.  Fixed.  Added unit test.
+
+* Label type for refs to start/stop in tree parser and other parsers were
+  not used.  Lots of casting.  Ick. Fixed.
+
+* couldn't refer to $tokenlabel in isolation; but need so we can test if
+  something was matched.  Fixed.
+
+* Lots of little bugs fixed in $x.y, %... translation due to new
+  action translator.
+
+* Improperly tracking block nesting level; result was that you couldn't
+  see $ID in action of rule "a : A+ | ID {Token t = $ID;} | C ;"
+
+* a : ID ID {$ID.text;} ; did not get a warning about ambiguous $ID ref.
+
+* No error was found on $COMMENT.text:
+
+COMMENT
+    :   '/*' (options {greedy=false;} : . )* '*/'
+        {System.out.println("found method "+$COMMENT.text);}
+    ;
+
+  $enclosinglexerrule scope does not exist.  Use text or setText() here.
+
+June 11, 2006
+
+* Single return values are initialized now to default or to your spec.
+
+* cleaned up input stream stuff.  Added ANTLRReaderStream, ANTLRInputStream
+  and refactored.  You can specify encodings now on ANTLRFileStream (and
+  ANTLRInputStream) now.
+
+* You can set text local var now in a lexer rule and token gets that text.
+  start/stop indexes are still set for the token.
+
+* Changed lexer slightly.  Calling a nonfragment rule from a
+  nonfragment rule does not set the overall token.
+
+June 10, 2006
+
+* Fixed bug where unnecessary escapes yield char==0 like '\{'.
+
+* Fixed analysis bug.  This grammar didn't report a recursion warning:
+x   : y X
+    | y Y
+    ;
+y   : L y R
+    | B
+    ;
+  The DFAState.equals() method was messed up.
+
+* Added @synpredgate {...} action so you can tell ANTLR how to gate actions
+  in/out during syntactic predicate evaluation.
+
+* Fuzzy parsing should be more efficient.  It should backtrack over a rule
+  and then rewind and do it again "with feeling" to exec actions.  It was
+  actually doing it 3x not 2x.
+
+June 9, 2006
+
+* Gutted and rebuilt the action translator for $x.y, $x::y, ...
+  Uses ANTLR v3 now for the first time inside v3 source. :)
+  ActionTranslator.java
+
+* Fixed a bug where referencing a return value on a rule didn't work
+  because later a ref to that rule's predefined properties didn't
+  properly force a return value struct to be built.  Added unit test.
+
+June 6, 2006
+
+* New DFA mechanisms.  Cyclic DFA are implemented as state tables,
+  encoded via strings as java cannot handle large static arrays :(
+  States with edges emanating that have predicates are specially
+  treated.  A method is generated to do these states.  The DFA
+  simulation routine uses the "special" array to figure out if the
+  state is special.  See March 25, 2006 entry for description:
+  http://www.antlr.org/blog/antlr3/codegen.tml.  analysis.DFA now has
+  all the state tables generated for code gen.  CyclicCodeGenerator.java
+  disappeared as it's unneeded code. :)
+
+* Internal general clean up of the DFA.states vs uniqueStates thing.
+  Fixed lookahead decisions no longer fill uniqueStates.  Waste of
+  time.  Also noted that when adding sem pred edges, I didn't check
+  for state reuse.  Fixed.
+
+June 4, 2006
+
+* When resolving ambig DFA states predicates, I did not add the new states
+  to the list of unique DFA states.  No observable effect on output except
+  that DFA state numbers were not always contiguous for predicated decisions.
+  I needed this fix for new DFA tables.
+
+3.0ea10 - June 2, 2006
+
+June 2, 2006
+
+* Improved grammar stats and added syntactic pred tracking.
+
+June 1, 2006
+
+* Due to a type mismatch, the DebugParser.recoverFromMismatchedToken()
+  method was not called.  Debug events for mismatched token error
+  notification were not sent to ANTLRWorks probably
+
+* Added getBacktrackingLevel() for any recognizer; needed for profiler.
+
+* Only writes profiling data for antlr grammar analysis with -profile set
+
+* Major update and bug fix to (runtime) Profiler.
+
+May 27, 2006
+
+* Added Lexer.skip() to force lexer to ignore current token and look for
+  another; no token is created for current rule and is not passed on to
+  parser (or other consumer of the lexer).
+
+* Parsers are much faster now.  I removed use of java.util.Stack for pushing
+  follow sets and use a hardcoded array stack instead.  Dropped from
+  5900ms to 3900ms for parse+lex time parsing entire java 1.4.2 source.  Lex
+  time alone was about 1500ms.  Just looking at parse time, we get about 2x
+  speed improvement. :)
+
+May 26, 2006
+
+* Fixed NFA construction so it generates NFA for (A*)* such that ANTLRWorks
+  can display it properly.
+
+May 25, 2006
+
+* added abort method to Grammar so AW can terminate the conversion if it's
+  taking too long.
+
+May 24, 2006
+
+* added method to get left recursive rules from grammar without doing full
+  grammar analysis.
+
+* analysis, code gen not attempted if serious error (like
+  left-recursion or missing rule definition) occurred while reading
+  the grammar in and defining symbols.
+
+* added amazing optimization; reduces analysis time by 90% for java
+  grammar; simple IF statement addition!
+
+3.0ea9 - May 20, 2006
+
+* added global k value for grammar to limit lookahead for all decisions unless
+overridden in a particular decision.
+
+* added failsafe so that any decision taking longer than 2 seconds to create
+the DFA will fall back on k=1.  Use -ImaxtimeforDFA n (in ms) to set the time.
+
+* added an option (turned off for now) to use multiple threads to
+perform grammar analysis.  Not much help on a 2-CPU computer as
+garbage collection seems to peg the 2nd CPU already. :( Gotta wait for
+a 4 CPU box ;)
+
+* switched from #src to // $ANTLR src directive.
+
+* CommonTokenStream.getTokens() looked past end of buffer sometimes. fixed.
+
+* unicode literals didn't really work in DOT output and generated code. fixed.
+
+* fixed the unit test rig so it compiles nicely with Java 1.5
+
+* Added ant build.xml file (reads build.properties file)
+
+* predicates sometimes failed to compile/eval properly due to missing (...)
+  in IF expressions.  Forced (..)
+
+* (...)? with only one alt were not optimized.  Was:
+
+        // t.g:4:7: ( B )?
+        int alt1=2;
+        int LA1_0 = input.LA(1);
+        if ( LA1_0==B ) {
+            alt1=1;
+        }
+        else if ( LA1_0==-1 ) {
+            alt1=2;
+        }
+        else {
+            NoViableAltException nvae =
+                new NoViableAltException("4:7: ( B )?", 1, 0, input);
+            throw nvae;
+        }
+
+is now:
+
+        // t.g:4:7: ( B )?
+        int alt1=2;
+        int LA1_0 = input.LA(1);
+        if ( LA1_0==B ) {
+            alt1=1;
+        }
+
+  Smaller, faster and more readable.
+
+* Allow manual init of return values now:
+  functionHeader returns [int x=3*4, char (*f)()=null] : ... ;
+
+* Added optimization for DFAs that fixed a codegen bug with rules in lexer:
+   EQ			 : '=' ;
+   ASSIGNOP		 : '=' | '+=' ;
+  EQ is a subset of other rule.  It did not given an error which is
+  correct, but generated bad code.
+
+* ANTLR was sending column not char position to ANTLRWorks.
+
+* Bug fix: location 0, 0 emitted for synpreds and empty alts.
+
+* debugging event handshake how sends grammar file name.  Added getGrammarFileName() to recognizers.  Java.stg generates it:
+
+    public String getGrammarFileName() { return "<fileName>"; }
+
+* tree parsers can do arbitrary lookahead now including backtracking.  I
+  updated CommonTreeNodeStream.
+
+* added events for debugging tree parsers:
+
+	/** Input for a tree parser is an AST, but we know nothing for sure
+	 *  about a node except its type and text (obtained from the adaptor).
+	 *  This is the analog of the consumeToken method.  Again, the ID is
+	 *  the hashCode usually of the node so it only works if hashCode is
+	 *  not implemented.
+	 */
+	public void consumeNode(int ID, String text, int type);
+
+	/** The tree parser looked ahead */
+	public void LT(int i, int ID, String text, int type);
+
+	/** The tree parser has popped back up from the child list to the
+	 *  root node.
+	 */
+	public void goUp();
+
+	/** The tree parser has descended to the first child of a the current
+	 *  root node.
+	 */
+	public void goDown();
+
+* Added DebugTreeNodeStream and DebugTreeParser classes
+
+* Added ctor because the debug tree node stream will need to ask quesitons about nodes and since  nodes are just Object, it needs an adaptor to decode the nodes and get text/type info for the debugger.
+
+public CommonTreeNodeStream(TreeAdaptor adaptor, Tree tree);
+
+* added getter to TreeNodeStream:
+	public TreeAdaptor getTreeAdaptor();
+
+* Implemented getText/getType in CommonTreeAdaptor.
+
+* Added TraceDebugEventListener that can dump all events to stdout.
+
+* I broke down and make Tree implement getText
+
+* tree rewrites now gen location debug events.
+
+* added AST debug events to listener; added blank listener for convenience
+
+* updated debug events to send begin/end backtrack events for debugging
+
+* with a : (b->b) ('+' b -> ^(PLUS $a b))* ; you get b[0] each time as
+  there is no loop in rewrite rule itself.  Need to know context that
+  the -> is inside the rule and hence b means last value of b not all
+  values.
+
+* Bug in TokenRewriteStream; ops at indexes < start index blocked proper op.
+
+* Actions in ST rewrites "-> ({$op})()" were not translated
+
+* Added new action name:
+
+@rulecatch {
+catch (RecognitionException re) {
+    reportError(re);
+    recover(input,re);
+}
+catch (Throwable t) {
+    System.err.println(t);
+}
+}
+Overrides rule catch stuff.
+
+* Isolated $ refs caused exception
+
+3.0ea8 - March 11, 2006
+
+* added @finally {...} action like @init for rules.  Executes in
+  finally block (java target) after all other stuff like rule memoization.
+  No code changes needs; ST just refs a new action:
+      <ruleDescriptor.actions.finally>
+
+* hideous bug fixed: PLUS='+' didn't result in '+' rule in lexer
+
+* TokenRewriteStream didn't do toString() right when no rewrites had been done.
+
+* lexer errors in interpreter were not printed properly
+
+* bitsets are dumped in hex not decimal now for FOLLOW sets
+
+* /* epsilon */ is not printed now when printing out grammars with empty alts
+
+* Fixed another bug in tree rewrite stuff where it was checking that elements
+  had at least one element.  Strange...commented out for now to see if I can remember what's up.
+
+* Tree rewrites had problems when you didn't have x+=FOO variables.  Rules
+  like this work now:
+
+  a : (x=ID)? y=ID -> ($x $y)?;
+
+* filter=true for lexers turns on k=1 and backtracking for every token
+  alternative.  Put the rules in priority order.
+
+* added getLine() etc... to Tree to support better error reporting for
+  trees.  Added MismatchedTreeNodeException.
+
+* $templates::foo() is gone.  added % as special template symbol.
+  %foo(a={},b={},...) ctor (even shorter than $templates::foo(...))
+  %({name-expr})(a={},...) indirect template ctor reference
+
+  The above are parsed by antlr.g and translated by codegen.g
+  The following are parsed manually here:
+
+  %{string-expr} anonymous template from string expr
+  %{expr}.y = z; template attribute y of StringTemplate-typed expr to z
+  %x.y = z; set template attribute y of x (always set never get attr)
+            to z [languages like python without ';' must still use the
+            ';' which the code generator is free to remove during code gen]
+
+* -> ({expr})(a={},...) notation for indirect template rewrite.
+  expr is the name of the template.
+
+* $x[i]::y and $x[-i]::y notation for accesssing absolute scope stack
+  indexes and relative negative scopes.  $x[-1]::y is the y attribute
+  of the previous scope (stack top - 1).
+
+* filter=true mode for lexers; can do this now...upon mismatch, just
+  consumes a char and tries again:
+lexer grammar FuzzyJava;
+options {filter=true;}
+
+FIELD
+    :   TYPE WS? name=ID WS? (';'|'=')
+        {System.out.println("found var "+$name.text);}
+    ;
+
+* refactored char streams so ANTLRFileStream is now a subclass of
+  ANTLRStringStream.
+
+* char streams for lexer now allowed nested backtracking in lexer.
+
+* added TokenLabelType for lexer/parser for all token labels
+
+* line numbers for error messages were not updated properly in antlr.g
+  for strings, char literals and <<...>>
+
+* init action in lexer rules was before the type,start,line,... decls.
+
+* Tree grammars can now specify output; I've only tested output=templat
+  though.
+
+* You can reference EOF now in the parser and lexer.  It's just token type
+  or char value -1.
+
+* Bug fix: $ID refs in the *lexer* were all messed up.  Cleaned up the
+  set of properties available...
+
+* Bug fix: .st not found in rule ref when rule has scope:
+field
+scope {
+	StringTemplate funcDef;
+}
+    :   ...
+	{$field::funcDef = $field.st;}
+    ;
+it gets field_stack.st instead
+
+* return in backtracking must return retval or null if return value.
+
+* $property within a rule now works like $text, $st, ...
+
+* AST/Template Rewrites were not gated by backtracking==0 so they
+  executed even when guessing.  Auto AST construction is now gated also.
+
+* CommonTokenStream was somehow returning tokens not text in toString()
+
+* added useful methods to runtime.BitSet and also to CommonToken so you can
+  update the text.  Added nice Token stream method:
+
+  /** Given a start and stop index, return a List of all tokens in
+   *  the token type BitSet.  Return null if no tokens were found.  This
+   *  method looks at both on and off channel tokens.
+   */
+  public List getTokens(int start, int stop, BitSet types);
+
+* literals are now passed in the .tokens files so you can ref them in
+  tree parses, for example.
+
+* added basic exception handling; no labels, just general catches:
+
+a : {;}A | B ;
+        exception
+                catch[RecognitionException re] {
+                        System.out.println("recog error");
+                }
+                catch[Exception e] {
+                        System.out.println("error");
+                }
+
+* Added method to TokenStream:
+  public String toString(Token start, Token stop);
+
+* antlr generates #src lines in lexer grammars generated from combined grammars
+  so error messages refer to original file.
+
+* lexers generated from combined grammars now use originally formatting.
+
+* predicates have $x.y stuff translated now.  Warning: predicates might be
+  hoisted out of context.
+
+* return values in return val structs are now public.
+
+* output=template with return values on rules was broken.  I assume return values with ASTs was broken too.  Fixed.
+
+3.0ea7 - December 14, 2005
+
+* Added -print option to print out grammar w/o actions
+
+* Renamed BaseParser to be BaseRecognizer and even made Lexer derive from
+  this; nice as it now shares backtracking support code.
+
+* Added syntactic predicates (...)=>.  See December 4, 2005 entry:
+
+  http://www.antlr.org/blog/antlr3/lookahead.tml
+
+  Note that we have a new option for turning off rule memoization during
+  backtracking:
+
+  -nomemo        when backtracking don't generate memoization code
+
+* Predicates are now tested in order that you specify the alts.  If you
+  leave the last alt "naked" (w/o pred), it will assume a true pred rather
+  than union of other preds.
+
+* Added gated predicates "{p}?=>" that literally turn off a production whereas
+disambiguating predicates are only hoisted into the predictor when syntax alone
+is not sufficient to uniquely predict alternatives.
+
+A : {p}?  => "a" ;
+B : {!p}? => ("a"|"b")+ ;
+
+* bug fixed related to predicates in predictor
+lexer grammar w;
+A : {p}? "a" ;
+B : {!p}? ("a"|"b")+ ;
+DFA is correct.  A state splits for input "a" on the pred.
+Generated code though was hosed.  No pred tests in prediction code!
+I added testLexerPreds() and others in TestSemanticPredicateEvaluation.java
+
+* added execAction template in case we want to do something in front of
+  each action execution or something.
+
+* left-recursive cycles from rules w/o decisions were not detected.
+
+* undefined lexer rules were not announced! fixed.
+
+* unreachable messages for Tokens rule now indicate rule name not alt. E.g.,
+
+  Ruby.lexer.g:24:1: The following token definitions are unreachable: IVAR
+
+* nondeterminism warnings improved for Tokens rule:
+
+Ruby.lexer.g:10:1: Multiple token rules can match input such as ""0".."9"": INT, FLOAT
+As a result, tokens(s) FLOAT were disabled for that input
+
+
+* DOT diagrams didn't show escaped char properly.
+
+* Char/string literals are now all 'abc' not "abc".
+
+* action syntax changed "@scope::actionname {action}" where scope defaults
+  to "parser" if parser grammar or combined grammar, "lexer" if lexer grammar,
+  and "treeparser" if tree grammar.  The code generation targets decide
+  what scopes are available.  Each "scope" yields a hashtable for use in
+  the output templates.  The scopes full of actions are sent to all output
+  file templates (currently headerFile and outputFile) as attribute actions.
+  Then you can reference <actions.scope> to get the map of actions associated
+  with scope and <actions.parser.header> to get the parser's header action
+  for example.  This should be very flexible.  The target should only have
+  to define which scopes are valid, but the action names should be variable
+  so we don't have to recompile ANTLR to add actions to code gen templates.
+
+  grammar T;
+  options {language=Java;}
+  @header { package foo; }
+  @parser::stuff { int i; } // names within scope not checked; target dependent
+  @members { int i; }
+  @lexer::header {head}
+  @lexer::members { int j; }
+  @headerfile::blort {...} // error: this target doesn't have headerfile
+  @treeparser::members {...} // error: this is not a tree parser
+  a
+  @init {int i;}
+    : ID
+    ;
+  ID : 'a'..'z';
+
+  For now, the Java target uses members and header as a valid name.  Within a
+  rule, the init action name is valid.
+
+* changed $dynamicscope.value to $dynamicscope::value even if value is defined
+  in same rule such as $function::name where rule function defines name.
+
+* $dynamicscope gets you the stack
+
+* rule scopes go like this now:
+
+  rule
+  scope {...}
+  scope slist,Symbols;
+  	: ...
+	;
+
+* Created RuleReturnScope as a generic rule return value.  Makes it easier
+  to do this:
+    RuleReturnScope r = parser.program();
+    System.out.println(r.getTemplate().toString());
+
+* $template, $tree, $start, etc...
+
+* $r.x in current rule.  $r is ignored as fully-qualified name. $r.start works too
+
+* added warning about $r referring to both return value of rule and dynamic scope of rule
+
+* integrated StringTemplate in a very simple manner
+
+Syntax:
+-> template(arglist) "..."
+-> template(arglist) <<...>>
+-> namedTemplate(arglist)
+-> {free expression}
+-> // empty
+
+Predicate syntax:
+a : A B -> {p1}? foo(a={$A.text})
+        -> {p2}? foo(a={$B.text})
+        -> // return nothing
+
+An arg list is just a list of template attribute assignments to actions in curlies.
+
+There is a setTemplateLib() method for you to use with named template rewrites.
+
+Use a new option:
+
+grammar t;
+options {output=template;}
+...
+
+This all should work for tree grammars too, but I'm still testing.
+
+* fixed bugs where strings were improperly escaped in exceptions, comments, etc..  For example, newlines came out as newlines not the escaped version
+
+3.0ea6 - November 13, 2005
+
+* turned off -debug/-profile, which was on by default
+
+* completely refactored the output templates; added some missing templates.
+
+* dramatically improved infinite recursion error messages (actually
+  left-recursion never even was printed out before).
+
+* wasn't printing dangling state messages when it reanalyzes with k=1.
+
+* fixed a nasty bug in the analysis engine dealing with infinite recursion.
+  Spent all day thinking about it and cleaned up the code dramatically.
+  Bug fixed and software is more powerful and I understand it better! :)
+
+* improved verbose DFA nodes; organized by alt
+
+* got much better random phrase generation.  For example:
+
+ $ java org.antlr.tool.RandomPhrase simple.g program
+ int Ktcdn ';' method wh '(' ')' '{' return 5 ';' '}'
+
+* empty rules like "a : ;" generated code that didn't compile due to
+  try/catch for RecognitionException.  Generated code couldn't possibly
+  throw that exception.
+
+* when printing out a grammar, such as in comments in generated code,
+  ANTLR didn't print ast suffix stuff back out for literals.
+
+* This never exited loop:
+  DATA : (options {greedy=false;}: .* '\n' )* '\n' '.' ;
+  and now it works due to new default nongreedy .*  Also this works:
+  DATA : (options {greedy=false;}: .* '\n' )* '.' ;
+
+* Dot star ".*" syntax didn't work; in lexer it is nongreedy by
+  default.  In parser it is on greedy but also k=1 by default.  Added
+  unit tests.  Added blog entry to describe.
+
+* ~T where T is the only token yielded an empty set but no error
+
+* Used to generate unreachable message here:
+
+  parser grammar t;
+  a : ID a
+    | ID
+    ;
+
+  z.g:3:11: The following alternatives are unreachable: 2
+
+  In fact it should really be an error; now it generates:
+
+  no start rule in grammar t (no rule can obviously be followed by EOF)
+
+  Per next change item, ANTLR cannot know that EOF follows rule 'a'.
+
+* added error message indicating that ANTLR can't figure out what your
+  start rule is.  Required to properly generate code in some cases.
+
+* validating semantic predicates now work (if they are false, they
+  throw a new FailedPredicateException
+
+* two hideous bug fixes in the IntervalSet, which made analysis go wrong
+  in a few cases.  Thanks to Oliver Zeigermann for finding lots of bugs
+  and making suggested fixes (including the next two items)!
+
+* cyclic DFAs are now nonstatic and hence can access instance variables
+
+* labels are now allowed on lexical elements (in the lexer)
+
+* added some internal debugging options
+
+* ~'a'* and ~('a')* were not working properly; refactored antlr.g grammar
+
+3.0ea5 - July 5, 2005
+
+* Using '\n' in a parser grammar resulted in a nonescaped version of '\n' in the token names table making compilation fail.  I fixed this by reorganizing/cleaning up portion of ANTLR that deals with literals.  See comment org.antlr.codegen.Target.
+
+* Target.getMaxCharValue() did not use the appropriate max value constant.
+
+* ALLCHAR was a constant when it should use the Target max value def.  set complement for wildcard also didn't use the Target def.  Generally cleaned up the max char value stuff.
+
+* Code gen didn't deal with ASTLabelType properly...I think even the 3.0ea7 example tree parser was broken! :(
+
+* Added a few more unit tests dealing with escaped literals
+
+3.0ea4 - June 29, 2005
+
+* tree parsers work; added CommonTreeNodeStream.  See simplecTreeParser
+  example in examples-v3 tarball.
+
+* added superClass and ASTLabelType options
+
+* refactored Parser to have a BaseParser and added TreeParser
+
+* bug fix: actions being dumped in description strings; compile errors
+  resulted
+
+3.0ea3 - June 23, 2005
+
+Enhancements
+
+* Automatic tree construction operators are in: ! ^ ^^
+
+* Tree construction rewrite rules are in
+	-> {pred1}? rewrite1
+	-> {pred2}? rewrite2
+	...
+	-> rewriteN
+
+  The rewrite rules may be elements like ID, expr, $label, {node expr}
+  and trees ^( <root> <children> ).  You have have (...)?, (...)*, (...)+
+  subrules as well.
+
+  You may have rewrites in subrules not just at outer level of rule, but
+  any -> rewrite forces auto AST construction off for that alternative
+  of that rule.
+
+  To avoid cycles, copy semantics are used:
+
+  r : INT -> INT INT ;
+
+  means make two new nodes from the same INT token.
+
+  Repeated references to a rule element implies a copy for at least one
+  tree:
+
+  a : atom -> ^(atom atom) ; // NOT CYCLE! (dup atom tree)
+
+* $ruleLabel.tree refers to tree created by matching the labeled element.
+
+* A description of the blocks/alts is generated as a comment in output code
+
+* A timestamp / signature is put at top of each generated code file
+
+3.0ea2 - June 12, 2005
+
+Bug fixes
+
+* Some error messages were missing the stackTrace parameter
+
+* Removed the file locking mechanism as it's not cross platform
+
+* Some absolute vs relative path name problems with writing output
+  files.  Rules are now more concrete.  -o option takes precedence
+  // -o /tmp /var/lib/t.g => /tmp/T.java
+  // -o subdir/output /usr/lib/t.g => subdir/output/T.java
+  // -o . /usr/lib/t.g => ./T.java
+  // -o /tmp subdir/t.g => /tmp/subdir/t.g
+  // If they didn't specify a -o dir so just write to location
+  // where grammar is, absolute or relative
+
+* does error checking on unknown option names now
+
+* Using just language code not locale name for error message file.  I.e.,
+  the default (and for any English speaking locale) is en.stg not en_US.stg
+  anymore.
+
+* The error manager now asks the Tool to panic rather than simply doing
+  a System.exit().
+
+* Lots of refactoring concerning grammar, rule, subrule options.  Now
+  detects invalid options.
+
+3.0ea1 - June 1, 2005
+
+Initial early access release
diff --git a/tool/LICENSE.txt b/tool/LICENSE.txt
new file mode 100644
index 0000000..a5216ef
--- /dev/null
+++ b/tool/LICENSE.txt
@@ -0,0 +1,26 @@
+[The "BSD license"]
+Copyright (c) 2013 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/antlr-3.4/tool/antlr.config b/tool/antlr.config
similarity index 100%
copy from antlr-3.4/tool/antlr.config
copy to tool/antlr.config
diff --git a/tool/pom.xml b/tool/pom.xml
new file mode 100644
index 0000000..cc5134d
--- /dev/null
+++ b/tool/pom.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <artifactId>antlr</artifactId>
+    <packaging>jar</packaging>
+    <name>ANTLR 3 Tool</name>
+    <description>The ANTLR 3 tool.</description>
+
+  <!--
+
+    Inherit from the ANTLR master pom, which tells us what
+    version we are and allows us to inherit dependencies
+    and so on.
+
+    -->
+    <parent>
+        <groupId>org.antlr</groupId>
+        <artifactId>antlr-master</artifactId>
+        <version>3.5.2</version>
+    </parent>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>antlr-runtime</artifactId>
+            <version>${project.version}</version>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>ST4</artifactId>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.antlr</groupId>
+            <artifactId>stringtemplate</artifactId>
+            <scope>compile</scope>
+            <optional>true</optional>
+        </dependency>
+
+    </dependencies>
+  <!--
+
+    Tell Maven which other artifacts we need in order to
+    build, run and test the ANTLR Tool. The ANTLR Tool uses earlier versions
+    of ANTLR at runtime (for the moment), uses the current
+    released version of ANTLR String template, but obviously is
+    reliant on the latest snapshot of the runtime, which will either be
+    taken from the antlr-snapshot repository, or your local .m2
+    repository if you built and installed that locally.
+
+    -->
+
+
+    <build>
+
+        <plugins>
+
+            <plugin>
+                <groupId>org.antlr</groupId>
+                <artifactId>antlr3-maven-plugin</artifactId>
+                <version>3.5</version>
+                <configuration>
+                    <libDirectory>target/generated-sources/antlr/org/antlr/grammar/v3</libDirectory>
+                </configuration>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>antlr</goal>
+                        </goals>
+                    </execution>
+                </executions>
+
+            </plugin>
+
+        </plugins>
+
+
+    </build>
+</project>
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLR.g b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLR.g
new file mode 100644
index 0000000..a501458
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLR.g
@@ -0,0 +1,1372 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2011 Terence Parr
+ All rights reserved.
+
+ Grammar conversion to ANTLR v3:
+ Copyright (c) 2011 Sam Harwell
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+	notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+	derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Read in an ANTLR grammar and build an AST.  Try not to do
+ *  any actions, just build the tree.
+ *
+ *  The phases are:
+ *
+ *		antlr.g (this file)
+ *		assign.types.g
+ *		define.g
+ *		buildnfa.g
+ *		antlr.print.g (optional)
+ *		codegen.g
+ *
+ *  Terence Parr
+ *  University of San Francisco
+ *  2005
+ */
+
+grammar ANTLR;
+
+options
+{
+	language=Java;
+	output=AST;
+	ASTLabelType=GrammarAST;
+}
+
+tokens
+{
+	//OPTIONS='options';
+	//TOKENS='tokens';
+	LEXER='lexer';
+	PARSER='parser';
+	CATCH='catch';
+	FINALLY='finally';
+	GRAMMAR='grammar';
+	PRIVATE='private';
+	PROTECTED='protected';
+	PUBLIC='public';
+	RETURNS='returns';
+	THROWS='throws';
+	TREE='tree';
+
+	RULE;
+	PREC_RULE;
+	RECURSIVE_RULE_REF; // flip recursive RULE_REF to RECURSIVE_RULE_REF in prec rules
+	BLOCK;
+	OPTIONAL;
+	CLOSURE;
+	POSITIVE_CLOSURE;
+	SYNPRED;
+	RANGE;
+	CHAR_RANGE;
+	EPSILON;
+	ALT;
+	EOR;
+	EOB;
+	EOA; // end of alt
+	ID;
+	ARG;
+	ARGLIST;
+	RET;
+	LEXER_GRAMMAR;
+	PARSER_GRAMMAR;
+	TREE_GRAMMAR;
+	COMBINED_GRAMMAR;
+	INITACTION;
+	FORCED_ACTION; // {{...}} always exec even during syn preds
+	LABEL; // $x used in rewrite rules
+	TEMPLATE;
+	SCOPE='scope';
+	IMPORT='import';
+	GATED_SEMPRED; // {p}? =>
+	SYN_SEMPRED; // (...) =>   it's a manually-specified synpred converted to sempred
+	BACKTRACK_SEMPRED; // auto backtracking mode syn pred converted to sempred
+	FRAGMENT='fragment';
+	DOT;
+	REWRITES;
+}
+
+@lexer::header {
+package org.antlr.grammar.v3;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Grammar;
+}
+
+@parser::header {
+package org.antlr.grammar.v3;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarAST;
+import org.antlr.misc.IntSet;
+import org.antlr.tool.Rule;
+}
+
+@lexer::members {
+public boolean hasASTOperator = false;
+private String fileName;
+
+public String getFileName() {
+    return fileName;
+}
+
+public void setFileName(String value) {
+    fileName = value;
+}
+
+@Override
+public Token nextToken() {
+	Token token = super.nextToken();
+	while (token.getType() == STRAY_BRACKET) {
+		ErrorManager.syntaxError(
+			ErrorManager.MSG_SYNTAX_ERROR,
+			null,
+			token,
+			"antlr: dangling ']'? make sure to escape with \\]",
+			null);
+
+		// skip this token
+		token = super.nextToken();
+	}
+
+	return token;
+}
+}
+
+@parser::members {
+protected String currentRuleName = null;
+protected GrammarAST currentBlockAST = null;
+protected boolean atTreeRoot; // are we matching a tree root in tree grammar?
+
+public static ANTLRParser createParser(TokenStream input) {
+    ANTLRParser parser = new ANTLRParser(input);
+    parser.adaptor = new grammar_Adaptor(parser);
+    return parser;
+}
+
+private static class GrammarASTErrorNode extends GrammarAST {
+    public IntStream input;
+    public Token start;
+    public Token stop;
+    public RecognitionException trappedException;
+
+    public GrammarASTErrorNode(TokenStream input, Token start, Token stop, RecognitionException e) {
+        super(stop);
+        //Console.Out.WriteLine( "start: " + start + ", stop: " + stop );
+        if ( stop == null ||
+             ( stop.getTokenIndex() < start.getTokenIndex() &&
+              stop.getType() != Token.EOF) ) {
+            // sometimes resync does not consume a token (when LT(1) is
+            // in follow set.  So, stop will be 1 to left to start. adjust.
+            // Also handle case where start is the first token and no token
+            // is consumed during recovery; LT(-1) will return null.
+            stop = start;
+        }
+        this.input = input;
+        this.start = start;
+        this.stop = stop;
+        this.trappedException = e;
+    }
+
+    @Override
+    public boolean isNil() { return false; }
+
+    @Override
+    public String getText() {
+        String badText = null;
+        if (start != null) {
+            int i = start.getTokenIndex();
+            int j = stop.getTokenIndex();
+            if (stop.getType() == Token.EOF) {
+                j = input.size();
+            }
+            badText = ((TokenStream)input).toString(i, j);
+        } else {
+            // people should subclass if they alter the tree type so this
+            // next one is for sure correct.
+            badText = "<unknown>";
+        }
+        return badText;
+    }
+
+    @Override
+    public void setText(String value) { }
+
+    @Override
+    public int getType() { return Token.INVALID_TOKEN_TYPE; }
+
+    @Override
+    public void setType(int value) { }
+
+    @Override
+    public String toString()
+    {
+        if (trappedException instanceof MissingTokenException)
+        {
+            return "<missing type: " +
+                   ( (MissingTokenException)trappedException ).getMissingType() +
+                   ">";
+        } else if (trappedException instanceof UnwantedTokenException) {
+            return "<extraneous: " +
+                   ( (UnwantedTokenException)trappedException ).getUnexpectedToken() +
+                   ", resync=" + getText() + ">";
+        } else if (trappedException instanceof MismatchedTokenException) {
+            return "<mismatched token: " + trappedException.token + ", resync=" + getText() + ">";
+        } else if (trappedException instanceof NoViableAltException) {
+            return "<unexpected: " + trappedException.token +
+                   ", resync=" + getText() + ">";
+        }
+        return "<error: " + getText() + ">";
+    }
+}
+
+static class grammar_Adaptor extends CommonTreeAdaptor {
+    ANTLRParser _outer;
+
+    public grammar_Adaptor(ANTLRParser outer) {
+        _outer = outer;
+    }
+
+    @Override
+    public Object create(Token payload) {
+        GrammarAST t = new GrammarAST( payload );
+        if (_outer != null)
+            t.enclosingRuleName = _outer.currentRuleName;
+        return t;
+    }
+
+    @Override
+    public Object errorNode(TokenStream input, Token start, Token stop, RecognitionException e) {
+        GrammarAST t = new GrammarASTErrorNode(input, start, stop, e);
+        if (_outer != null)
+            t.enclosingRuleName = _outer.currentRuleName;
+        return t;
+    }
+}
+
+private Grammar grammar;
+private int grammarType;
+private String fileName;
+
+public Grammar getGrammar() {
+    return grammar;
+}
+
+public void setGrammar(Grammar value) {
+    grammar = value;
+}
+
+public int getGrammarType() {
+    return grammarType;
+}
+
+public void setGrammarType(int value) {
+    grammarType = value;
+}
+
+public String getFileName() {
+    return fileName;
+}
+
+public void setFileName(String value) {
+    fileName = value;
+}
+
+private final int LA(int i) { return input.LA( i ); }
+
+private final Token LT(int k) { return input.LT( k ); }
+
+/*partial void createTreeAdaptor(ref ITreeAdaptor adaptor)
+{
+    adaptor = new grammar_Adaptor(this);
+}*/
+
+protected GrammarAST setToBlockWithSet(GrammarAST b) {
+    /*
+     * alt = ^(ALT["ALT"] {b} EOA["EOA"])
+     * prefixWithSynpred( alt )
+     * return ^(BLOCK["BLOCK"] {alt} EOB["<end-of-block>"])
+     */
+    GrammarAST alt = (GrammarAST)adaptor.create(ALT, "ALT");
+    adaptor.addChild(alt, b);
+    adaptor.addChild(alt, adaptor.create(EOA, "<end-of-alt>"));
+
+    prefixWithSynPred(alt);
+
+    GrammarAST block = (GrammarAST)adaptor.create(BLOCK, b.getToken(), "BLOCK");
+    adaptor.addChild(block, alt);
+    adaptor.addChild(alt, adaptor.create(EOB, "<end-of-block>"));
+
+    return block;
+}
+
+/** Create a copy of the alt and make it into a BLOCK; all actions,
+ *  labels, tree operators, rewrites are removed.
+ */
+protected GrammarAST createBlockFromDupAlt(GrammarAST alt) {
+    /*
+     * ^(BLOCK["BLOCK"] {GrammarAST.dupTreeNoActions(alt)} EOB["<end-of-block>"])
+     */
+    GrammarAST nalt = GrammarAST.dupTreeNoActions(alt, null);
+
+    GrammarAST block = (GrammarAST)adaptor.create(BLOCK, alt.getToken(), "BLOCK");
+    adaptor.addChild( block, nalt );
+    adaptor.addChild( block, adaptor.create( EOB, "<end-of-block>" ) );
+
+    return block;
+}
+
+/** Rewrite alt to have a synpred as first element;
+ *  (xxx)=&gt;xxx
+ *  but only if they didn't specify one manually.
+ */
+protected void prefixWithSynPred( GrammarAST alt ) {
+    // if they want backtracking and it's not a lexer rule in combined grammar
+    String autoBacktrack = (String)grammar.getBlockOption( currentBlockAST, "backtrack" );
+    if ( autoBacktrack == null )
+    {
+        autoBacktrack = (String)grammar.getOption( "backtrack" );
+    }
+    if ( autoBacktrack != null && autoBacktrack.equals( "true" ) &&
+         !( grammarType == Grammar.COMBINED &&
+         Rule.getRuleType(currentRuleName) == Grammar.LEXER) &&
+         alt.getChild( 0 ).getType() != SYN_SEMPRED )
+    {
+        // duplicate alt and make a synpred block around that dup'd alt
+        GrammarAST synpredBlockAST = createBlockFromDupAlt( alt );
+
+        // Create a BACKTRACK_SEMPRED node as if user had typed this in
+        // Effectively we replace (xxx)=>xxx with {synpredxxx}? xxx
+        GrammarAST synpredAST = createSynSemPredFromBlock( synpredBlockAST,
+                                                          BACKTRACK_SEMPRED );
+
+        // insert BACKTRACK_SEMPRED as first element of alt
+        //synpredAST.getLastSibling().setNextSibling( alt.getFirstChild() );
+        //synpredAST.addChild( alt.getFirstChild() );
+        //alt.setFirstChild( synpredAST );
+        GrammarAST[] children = alt.getChildrenAsArray();
+        adaptor.setChild( alt, 0, synpredAST );
+        for ( int i = 0; i < children.length; i++ )
+        {
+            if ( i < children.length - 1 )
+                adaptor.setChild( alt, i + 1, children[i] );
+            else
+                adaptor.addChild( alt, children[i] );
+        }
+    }
+}
+
+protected GrammarAST createSynSemPredFromBlock( GrammarAST synpredBlockAST, int synpredTokenType ) {
+    // add grammar fragment to a list so we can make fake rules for them later.
+    String predName = grammar.defineSyntacticPredicate( synpredBlockAST, currentRuleName );
+    // convert (alpha)=> into {synpredN}? where N is some pred count
+    // during code gen we convert to function call with templates
+    String synpredinvoke = predName;
+    GrammarAST p = (GrammarAST)adaptor.create( synpredTokenType, synpredinvoke );
+    // track how many decisions have synpreds
+    grammar.blocksWithSynPreds.add( currentBlockAST );
+    return p;
+}
+
+public static GrammarAST createSimpleRuleAST( String name, GrammarAST block, boolean fragment ) {
+    TreeAdaptor adaptor = new grammar_Adaptor(null);
+
+    GrammarAST modifier = null;
+    if ( fragment )
+    {
+        modifier = (GrammarAST)adaptor.create( FRAGMENT, "fragment" );
+    }
+
+    /*
+     * EOBAST = block.getLastChild()
+     * ^(RULE[block,"rule"] ID["name"] {modifier} ARG["ARG"] RET["RET"] SCOPE["scope"] {block} EOR[EOBAST,"<end-of-rule>"])
+     */
+    GrammarAST rule = (GrammarAST)adaptor.create( RULE, block.getToken(), "rule" );
+
+    adaptor.addChild( rule, adaptor.create( ID, name ) );
+    if ( modifier != null )
+        adaptor.addChild( rule, modifier );
+    adaptor.addChild( rule, adaptor.create( ARG, "ARG" ) );
+    adaptor.addChild( rule, adaptor.create( RET, "RET" ) );
+    adaptor.addChild( rule, adaptor.create( SCOPE, "scope" ) );
+    adaptor.addChild( rule, block );
+    adaptor.addChild( rule, adaptor.create( EOR, block.getLastChild().getToken(), "<end-of-rule>" ) );
+
+    return rule;
+}
+
+@Override
+public void reportError(RecognitionException ex)
+{
+    //Token token = null;
+    //try
+    //{
+    //    token = LT( 1 );
+    //}
+    //catch ( TokenStreamException tse )
+    //{
+    //    ErrorManager.internalError( "can't get token???", tse );
+    //}
+    Token token = ex.token;
+    ErrorManager.syntaxError(
+        ErrorManager.MSG_SYNTAX_ERROR,
+        grammar,
+        token,
+        "antlr: " + ex.toString(),
+        ex );
+}
+
+public void cleanup( GrammarAST root )
+{
+    if ( grammarType == Grammar.LEXER )
+    {
+        String filter = (String)grammar.getOption( "filter" );
+        GrammarAST tokensRuleAST =
+            grammar.addArtificialMatchTokensRule(
+                root,
+                grammar.lexerRuleNamesInCombined,
+                grammar.getDelegateNames(),
+                filter != null && filter.equals( "true" ) );
+    }
+}
+}
+
+public
+grammar_![Grammar g]
+@init
+{
+	this.grammar = g;
+	Map<String, Object> opts;
+}
+@after
+{
+	cleanup( $tree );
+}
+	:	//hdr:headerSpec
+		( ACTION )?
+		( cmt=DOC_COMMENT  )?
+		gr=grammarType gid=id {grammar.setName($gid.text);} SEMI
+		(	optionsSpec {opts = $optionsSpec.opts; grammar.setOptions(opts, $optionsSpec.start);}
+		)?
+		(ig=delegateGrammars)?
+		(ts=tokensSpec)?
+		scopes=attrScopes
+		(a=actions)?
+		r=rules
+		EOF
+		-> ^($gr $gid $cmt? optionsSpec? $ig? $ts? $scopes? $a? $r)
+	;
+
+grammarType
+	:	(	'lexer'  gr='grammar' {grammarType=Grammar.LEXER; grammar.type = Grammar.LEXER;}       // pure lexer
+			-> LEXER_GRAMMAR[$gr]
+		|	'parser' gr='grammar' {grammarType=Grammar.PARSER; grammar.type = Grammar.PARSER;}     // pure parser
+			-> PARSER_GRAMMAR[$gr]
+		|	'tree'   gr='grammar' {grammarType=Grammar.TREE_PARSER; grammar.type = Grammar.TREE_PARSER;}  // a tree parser
+			-> TREE_GRAMMAR[$gr]
+		|			 gr='grammar' {grammarType=Grammar.COMBINED; grammar.type = Grammar.COMBINED;} // merged parser/lexer
+			-> COMBINED_GRAMMAR[$gr]
+		)
+	;
+
+actions
+	:	(action)+
+	;
+
+/** Match stuff like @parser::members {int i;} */
+action
+	:	AMPERSAND^ (actionScopeName COLON! COLON!)? id ACTION
+	;
+
+/** Sometimes the scope names will collide with keywords; allow them as
+ *  ids for action scopes.
+ */
+actionScopeName
+	:	id
+	|	l='lexer'
+		-> ID[$l]
+	|	p='parser'
+		-> ID[$p]
+	;
+
+optionsSpec returns [Map<String, Object> opts=new HashMap<String, Object>()]
+	:	OPTIONS^ (option[$opts] SEMI!)+ RCURLY!
+	;
+
+option[Map<String, Object> opts]
+	:	id ASSIGN^ optionValue
+		{
+			$opts.put($id.text, $optionValue.value);
+		}
+	;
+
+optionValue returns [Object value = null]
+	:	x=id			 {$value = $x.text;}
+	|	s=STRING_LITERAL {String vs = $s.text;
+						  // remove the quotes:
+						  $value=vs.substring(1,vs.length()-1);}
+	|	c=CHAR_LITERAL   {String vs = $c.text;
+						  // remove the quotes:
+						  $value=vs.substring(1,vs.length()-1);}
+	|	i=INT            {$value = Integer.parseInt($i.text);}
+	|	ss=STAR			 {$value = "*";} // used for k=*
+		-> STRING_LITERAL[$ss]
+//	|	cs:charSet       {value = #cs;} // return set AST in this case
+	;
+
+delegateGrammars
+	:	'import'^ delegateGrammar (COMMA! delegateGrammar)* SEMI!
+	;
+
+delegateGrammar
+	:	lab=id ASSIGN^ g=id {grammar.importGrammar($g.tree, $lab.text);}
+	|	g2=id               {grammar.importGrammar($g2.tree,null);}
+	;
+
+tokensSpec
+	:	TOKENS^
+			tokenSpec*
+		RCURLY!
+	;
+
+tokenSpec
+	:	TOKEN_REF ( ASSIGN^ (STRING_LITERAL|CHAR_LITERAL) )? SEMI!
+	;
+
+attrScopes
+	:	(attrScope)*
+	;
+
+attrScope
+	:	'scope'^ id ruleActions? ACTION
+	;
+
+rules
+	:	(	rule
+		)+
+	;
+
+public
+rule
+@init
+{
+	GrammarAST eob=null;
+	CommonToken start = (CommonToken)LT(1);
+	int startLine = LT(1).getLine();
+}
+	:
+	(	(	d=DOC_COMMENT
+		)?
+		(	p1='protected'	//{modifier=$p1.tree;}
+		|	p2='public'		//{modifier=$p2.tree;}
+		|	p3='private'	//{modifier=$p3.tree;}
+		|	p4='fragment'	//{modifier=$p4.tree;}
+		)?
+		ruleName=id
+		{
+			currentRuleName=$ruleName.text;
+			if ( grammarType==Grammar.LEXER && $p4==null )
+				grammar.lexerRuleNamesInCombined.add(currentRuleName);
+		}
+		( BANG )?
+		( aa=ARG_ACTION )?
+		( 'returns' rt=ARG_ACTION  )?
+		( throwsSpec )?
+		( optionsSpec )?
+		scopes=ruleScopeSpec
+		(ruleActions)?
+		COLON
+		ruleAltList[$optionsSpec.opts]
+		SEMI
+		( ex=exceptionGroup )?
+		->	^(	RULE[$ruleName.start, "rule"]
+				$ruleName
+				// the modifier will be 0 or one of the modifiers:
+				$p1? $p2? $p3? $p4?
+				^(ARG["ARG"] $aa?)
+				^(RET["RET"] $rt?)
+				throwsSpec?
+				optionsSpec?
+				$scopes
+				ruleActions?
+				ruleAltList
+				$ex?
+				EOR[$SEMI,"<end-of-rule>"])
+	)
+	{
+		$tree.setTreeEnclosingRuleNameDeeply(currentRuleName);
+		((GrammarAST)$tree.getChild(0)).setBlockOptions($optionsSpec.opts);
+	}
+	;
+
+ruleActions
+	:	(ruleAction)+
+	;
+
+/** Match stuff like @init {int i;} */
+ruleAction
+	:	AMPERSAND^ id ACTION
+	;
+
+throwsSpec
+	:	'throws'^ id ( COMMA! id )*
+	;
+
+ruleScopeSpec
+	:	( 'scope' ruleActions? ACTION )?
+		( 'scope' idList SEMI )*
+		-> ^(SCOPE[$start,"scope"] ruleActions? ACTION? idList*)
+	;
+
+ruleAltList[Map<String, Object> opts]
+@init
+{
+	GrammarAST blkRoot = null;
+	GrammarAST save = currentBlockAST;
+}
+	:	( -> BLOCK[input.LT(-1),"BLOCK"] )
+		{
+			blkRoot = (GrammarAST)$tree.getChild(0);
+			blkRoot.setBlockOptions($opts);
+			currentBlockAST = blkRoot;
+		}
+		(	a1=alternative r1=rewrite
+			{if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred($a1.tree);}
+			-> $a1 $r1?
+		)
+		(	(	OR a2=alternative r2=rewrite
+				{if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR)) prefixWithSynPred($a2.tree);}
+				-> $ruleAltList $a2 $r2?
+			)+
+		|
+		)
+		-> ^({blkRoot} $ruleAltList EOB["<end-of-block>"])
+	;
+finally { currentBlockAST = save; }
+
+/** Build #(BLOCK ( #(ALT ...) EOB )+ ) */
+block
+@init
+{
+	GrammarAST save = currentBlockAST;
+}
+	:	(	lp=LPAREN
+			-> BLOCK[$lp,"BLOCK"]
+		)
+		{currentBlockAST = (GrammarAST)$tree.getChild(0);}
+		(
+			// 2nd alt and optional branch ambig due to
+			// linear approx LL(2) issue.  COLON ACTION
+			// matched correctly in 2nd alt.
+			(optionsSpec {((GrammarAST)$tree.getChild(0)).setOptions(grammar,$optionsSpec.opts);})?
+			( ruleActions )?
+			COLON
+		|	ACTION COLON
+		)?
+
+		a=alternative r=rewrite
+		{
+			stream_alternative.add( $r.tree );
+			if ( LA(1)==OR || (LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR) )
+				prefixWithSynPred($a.tree);
+		}
+		(	OR a=alternative r=rewrite
+			{
+				stream_alternative.add( $r.tree );
+				if (LA(1)==OR||(LA(2)==QUESTION||LA(2)==PLUS||LA(2)==STAR))
+					prefixWithSynPred($a.tree);
+			}
+		)*
+
+		rp=RPAREN
+		-> ^($block optionsSpec? ruleActions? ACTION? alternative+ EOB[$rp,"<end-of-block>"])
+	;
+finally { currentBlockAST = save; }
+
+// ALT and EOA have indexes tracking start/stop of entire alt
+alternative
+	:	element+
+		-> ^(ALT[$start,"ALT"] element+ EOA[input.LT(-1),"<end-of-alt>"])
+	|	// epsilon alt
+		-> ^(ALT[$start,"ALT"] EPSILON[input.LT(-1),"epsilon"] EOA[input.LT(-1),"<end-of-alt>"])
+	;
+
+exceptionGroup
+	:	exceptionHandler+ finallyClause?
+	|	finallyClause
+	;
+
+exceptionHandler
+	:	'catch'^ ARG_ACTION ACTION
+	;
+
+finallyClause
+	:	'finally'^ ACTION
+	;
+
+element
+	:	elementNoOptionSpec
+	;
+
+elementNoOptionSpec
+@init
+{
+	IntSet elements=null;
+}
+	:	(	id (ASSIGN^|PLUS_ASSIGN^)
+			(	atom (sub=ebnfSuffix[root_0,false]! {root_0 = $sub.tree;})?
+			|	ebnf
+			)
+		|	a=atom
+			(	sub2=ebnfSuffix[$a.tree,false]! {root_0=$sub2.tree;}
+			)?
+		|	ebnf
+		|	FORCED_ACTION
+		|	ACTION
+		|	p=SEMPRED ( IMPLIES! {$p.setType(GATED_SEMPRED);} )?
+			{
+			grammar.blocksWithSemPreds.add(currentBlockAST);
+			}
+		|	t3=tree_
+		)
+	;
+
+atom
+	:	range (ROOT^|BANG^)?
+	|	(
+			// grammar.rule but ensure no spaces. "A . B" is not a qualified ref
+			// We do here rather than lexer so we can build a tree
+			({LT(1).getCharPositionInLine()+LT(1).getText().length()==LT(2).getCharPositionInLine()&&
+			 LT(2).getCharPositionInLine()+1==LT(3).getCharPositionInLine()}? id WILDCARD (terminal|ruleref)) =>
+			id w=WILDCARD^ (terminal|ruleref) {$w.setType(DOT);}
+		|	terminal
+		|	ruleref
+		)
+	|	notSet (ROOT^|BANG^)?
+	;
+
+ruleref
+	:	RULE_REF^ ARG_ACTION? (ROOT^|BANG^)?
+	;
+
+notSet
+	:	NOT^
+		(	notTerminal
+		|	block
+		)
+	;
+
+treeRoot
+@init{atTreeRoot=true;}
+@after{atTreeRoot=false;}
+	:	id (ASSIGN^|PLUS_ASSIGN^) (atom|block)
+	|	atom
+	|	block
+	;
+
+tree_
+	:	TREE_BEGIN^
+		treeRoot element+
+		RPAREN!
+	;
+
+/** matches ENBF blocks (and sets via block rule) */
+ebnf
+	:	block
+		(	QUESTION
+			-> ^(OPTIONAL[$start,"?"] block)
+		|	STAR
+			-> ^(CLOSURE[$start,"*"] block)
+		|	PLUS
+			-> ^(POSITIVE_CLOSURE[$start,"+"] block)
+		|	IMPLIES // syntactic predicate
+			// ignore for lexer rules in combined
+			-> {grammarType == Grammar.COMBINED && Rule.getRuleType(currentRuleName) == Grammar.LEXER}? ^(SYNPRED[$start,"=>"] block)
+			// create manually specified (...)=> predicate; convert to sempred
+			-> {createSynSemPredFromBlock($block.tree, SYN_SEMPRED)}
+		|	ROOT
+			-> ^(ROOT block)
+		|	BANG
+			-> ^(BANG block)
+		|
+			-> block
+		)
+	;
+
+range!
+	:	{Rule.getRuleType(currentRuleName) == Grammar.LEXER}? =>
+	 	c1=CHAR_LITERAL RANGE c2=CHAR_LITERAL
+		-> ^(CHAR_RANGE[$c1,".."] $c1 $c2)
+	|	// range elsewhere is an error
+		(	t=TOKEN_REF r=RANGE TOKEN_REF
+		|	t=STRING_LITERAL r=RANGE STRING_LITERAL
+		|	t=CHAR_LITERAL r=RANGE CHAR_LITERAL
+		)
+		{
+		ErrorManager.syntaxError(
+			ErrorManager.MSG_RANGE_OP_ILLEGAL,grammar,$r,null,null);
+		}
+		-> $t // have to generate something for surrounding code, just return first token
+	;
+
+terminal
+	:	cl=CHAR_LITERAL^ ( elementOptions[$cl.tree]! )? (ROOT^|BANG^)?
+
+	|	tr=TOKEN_REF^
+		( elementOptions[$tr.tree]! )?
+		( ARG_ACTION )? // Args are only valid for lexer rules
+		(ROOT^|BANG^)?
+
+	|	sl=STRING_LITERAL^ ( elementOptions[$sl.tree]! )? (ROOT^|BANG^)?
+
+	|	wi=WILDCARD (ROOT^|BANG^)?
+		{
+			if ( atTreeRoot )
+			{
+				ErrorManager.syntaxError(
+					ErrorManager.MSG_WILDCARD_AS_ROOT,grammar,$wi,null,null);
+			}
+		}
+	;
+
+elementOptions[GrammarAST terminalAST]
+	:	OPEN_ELEMENT_OPTION^ defaultNodeOption[terminalAST] CLOSE_ELEMENT_OPTION!
+	|	OPEN_ELEMENT_OPTION^ elementOption[terminalAST] (SEMI! elementOption[terminalAST])* CLOSE_ELEMENT_OPTION!
+	;
+
+defaultNodeOption[GrammarAST terminalAST]
+	:	elementOptionId
+		{terminalAST.setTerminalOption(grammar,Grammar.defaultTokenOption,$elementOptionId.qid);}
+	;
+
+elementOption[GrammarAST terminalAST]
+	:	id ASSIGN^
+		(	elementOptionId
+			{terminalAST.setTerminalOption(grammar,$id.text,$elementOptionId.qid);}
+		|	(t=STRING_LITERAL|t=DOUBLE_QUOTE_STRING_LITERAL|t=DOUBLE_ANGLE_STRING_LITERAL)
+			{terminalAST.setTerminalOption(grammar,$id.text,$t.text);}
+		)
+	;
+
+elementOptionId returns [String qid]
+@init{StringBuffer buf = new StringBuffer();}
+	:	i=id {buf.append($i.text);} ('.' i=id {buf.append("." + $i.text);})*
+		{$qid = buf.toString();}
+	;
+
+ebnfSuffix[GrammarAST elemAST, boolean inRewrite]
+@init
+{
+GrammarAST blkRoot=null;
+GrammarAST alt=null;
+GrammarAST save = currentBlockAST;
+}
+@after
+{
+currentBlockAST = save;
+}
+	:	(	-> BLOCK[$elemAST.getToken(), "BLOCK"]
+		)
+		{ blkRoot = (GrammarAST)$tree.getChild(0); currentBlockAST = blkRoot; }
+		(	// create alt
+			-> ^(ALT[$elemAST.getToken(), "ALT"] {$elemAST} EOA["<end-of-alt>"])
+		)
+		{
+			alt = (GrammarAST)$tree.getChild(0);
+			if ( !inRewrite )
+				prefixWithSynPred(alt);
+		}
+		(	QUESTION
+			-> OPTIONAL[$elemAST.getToken(),"?"]
+		|	STAR
+			-> CLOSURE[$elemAST.getToken(),"*"]
+		|	PLUS
+			-> POSITIVE_CLOSURE[$elemAST.getToken(),"+"]
+		)
+		-> ^($ebnfSuffix ^({blkRoot} {alt} EOB[$elemAST.getToken(), "<end-of-block>"]))
+	;
+
+notTerminal
+	:	CHAR_LITERAL
+	|	TOKEN_REF
+	|	STRING_LITERAL
+	;
+
+idList
+	:	id (COMMA! id)*
+	;
+
+id
+	:	TOKEN_REF
+		-> ID[$TOKEN_REF]
+	|	RULE_REF
+		-> ID[$RULE_REF]
+	;
+
+// R E W R I T E  S Y N T A X
+
+rewrite
+	:	rewrite_with_sempred*
+		REWRITE rewrite_alternative
+		-> ^(REWRITES rewrite_with_sempred* ^(REWRITE rewrite_alternative))
+	|
+	;
+
+rewrite_with_sempred
+	:	REWRITE^ SEMPRED rewrite_alternative
+	;
+
+rewrite_block
+	:	LPAREN
+		rewrite_alternative
+		RPAREN
+		-> ^(BLOCK[$LPAREN,"BLOCK"] rewrite_alternative EOB[$RPAREN,"<end-of-block>"])
+	;
+
+rewrite_alternative
+options{k=1;}
+	:	{grammar.buildTemplate()}? => rewrite_template
+
+	|	{grammar.buildAST()}? => ( rewrite_element )+
+		-> {!stream_rewrite_element.hasNext()}? ^(ALT[LT(1),"ALT"] EPSILON["epsilon"] EOA["<end-of-alt>"])
+		-> ^(ALT[LT(1),"ALT"] rewrite_element+ EOA["<end-of-alt>"])
+
+	|
+		-> ^(ALT[LT(1),"ALT"] EPSILON["epsilon"] EOA["<end-of-alt>"])
+	|	{grammar.buildAST()}? ETC
+	;
+
+rewrite_element
+	:	(	t=rewrite_atom
+			-> $t
+		)
+		(	subrule=ebnfSuffix[$t.tree,true]
+			-> $subrule
+		)?
+	|	rewrite_ebnf
+	|	(	tr=rewrite_tree
+			-> $tr
+		)
+		(	subrule=ebnfSuffix[$tr.tree,true]
+			-> $subrule
+		)?
+	;
+
+rewrite_atom
+	:	tr=TOKEN_REF^ elementOptions[$tr.tree]!? ARG_ACTION? // for imaginary nodes
+	|	RULE_REF
+	|	cl=CHAR_LITERAL elementOptions[$cl.tree]!?
+	|	sl=STRING_LITERAL elementOptions[$sl.tree]!?
+	|	DOLLAR! label // reference to a label in a rewrite rule
+	|	ACTION
+	;
+
+label
+	:	TOKEN_REF -> LABEL[$TOKEN_REF]
+	|	RULE_REF -> LABEL[$RULE_REF]
+	;
+
+rewrite_ebnf
+	:	b=rewrite_block
+		(	QUESTION
+			-> ^(OPTIONAL[$b.start,"?"] $b)
+		|	STAR
+			-> ^(CLOSURE[$b.start,"*"] $b)
+		|	PLUS
+			-> ^(POSITIVE_CLOSURE[$b.start,"+"] $b)
+		)
+	;
+
+rewrite_tree
+	:	TREE_BEGIN^
+			rewrite_atom rewrite_element*
+		RPAREN!
+	;
+
+/** Build a tree for a template rewrite:
+	  ^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
+	where ARGLIST is always there even if no args exist.
+	ID can be "template" keyword.  If first child is ACTION then it's
+	an indirect template ref
+
+	-> foo(a={...}, b={...})
+	-> ({string-e})(a={...}, b={...})  // e evaluates to template name
+	-> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
+	-> {st-expr} // st-expr evaluates to ST
+ */
+public
+rewrite_template
+options{k=1;}
+	:	// -> template(a={...},...) "..."
+		{LT(1).getText().equals("template")}? => // inline
+		(	rewrite_template_head
+			-> rewrite_template_head
+		)
+		( st=DOUBLE_QUOTE_STRING_LITERAL | st=DOUBLE_ANGLE_STRING_LITERAL )
+		{ adaptor.addChild( $tree.getChild(0), adaptor.create($st) ); }
+
+	|	// -> foo(a={...}, ...)
+		rewrite_template_head
+
+	|	// -> ({expr})(a={...}, ...)
+		rewrite_indirect_template_head
+
+	|	// -> {...}
+		ACTION
+	;
+
+/** -> foo(a={...}, ...) */
+rewrite_template_head
+	:	id lp=LPAREN
+		rewrite_template_args
+		RPAREN
+		-> ^(TEMPLATE[$lp,"TEMPLATE"] id rewrite_template_args)
+	;
+
+/** -> ({expr})(a={...}, ...) */
+rewrite_indirect_template_head
+	:	lp=LPAREN
+		ACTION
+		RPAREN
+		LPAREN rewrite_template_args RPAREN
+		-> ^(TEMPLATE[$lp,"TEMPLATE"] ACTION rewrite_template_args)
+	;
+
+rewrite_template_args
+	:	rewrite_template_arg (COMMA rewrite_template_arg)*
+		-> ^(ARGLIST["ARGLIST"] rewrite_template_arg+)
+	|
+		-> ARGLIST["ARGLIST"]
+	;
+
+rewrite_template_arg
+	:	id a=ASSIGN ACTION
+		-> ^(ARG[$a,"ARG"] id ACTION)
+	;
+
+//////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////
+// L E X E R
+
+// get rid of warnings:
+fragment STRING_LITERAL : ;
+fragment FORCED_ACTION : ;
+fragment DOC_COMMENT : ;
+fragment SEMPRED : ;
+
+WS
+	:	(	' '
+		|	'\t'
+		|	('\r')? '\n'
+		)
+		{ $channel = HIDDEN; }
+	;
+
+COMMENT
+@init{List<Integer> type = new ArrayList<Integer>() {{ add(0); }};}
+	:	( SL_COMMENT | ML_COMMENT[type] {$type = type.get(0);} )
+		{
+			if ( $type != DOC_COMMENT )
+				$channel = HIDDEN;
+		}
+	;
+
+fragment
+SL_COMMENT
+	:	'//'
+		(	(' $ANTLR') => ' $ANTLR ' SRC (('\r')? '\n')? // src directive
+		|	~('\r'|'\n')* (('\r')? '\n')?
+		)
+	;
+
+fragment
+ML_COMMENT[List<Integer> type]
+	:	'/*'
+		{$type.set(0, (input.LA(1) == '*' && input.LA(2) != '/') ? DOC_COMMENT : ML_COMMENT);}
+		.*
+		'*/'
+	;
+
+OPEN_ELEMENT_OPTION
+	:	'<'
+	;
+
+CLOSE_ELEMENT_OPTION
+	:	'>'
+	;
+
+AMPERSAND : '@';
+
+COMMA : ',';
+
+QUESTION :	'?' ;
+
+TREE_BEGIN : '^(' ;
+
+LPAREN:	'(' ;
+
+RPAREN:	')' ;
+
+COLON :	':' ;
+
+STAR:	'*' ;
+
+PLUS:	'+' ;
+
+ASSIGN : '=' ;
+
+PLUS_ASSIGN : '+=' ;
+
+IMPLIES : '=>' ;
+
+REWRITE : '->' ;
+
+SEMI:	';' ;
+
+ROOT : '^' {hasASTOperator=true;} ;
+
+BANG : '!' {hasASTOperator=true;} ;
+
+OR	:	'|' ;
+
+WILDCARD : '.' ;
+
+ETC : '...' ;
+
+RANGE : '..' ;
+
+NOT :	'~' ;
+
+RCURLY:	'}'	;
+
+DOLLAR : '$' ;
+
+STRAY_BRACKET
+	:	']'
+	;
+
+CHAR_LITERAL
+	:	'\''
+		(	ESC
+		|	~('\\'|'\'')
+		)*
+		'\''
+		{
+			StringBuffer s = Grammar.getUnescapedStringFromGrammarStringLiteral($text);
+			if ( s.length() > 1 )
+			{
+				$type = STRING_LITERAL;
+			}
+		}
+	;
+
+DOUBLE_QUOTE_STRING_LITERAL
+@init
+{
+	StringBuilder builder = new StringBuilder();
+}
+	:	'"'							{builder.append('"');}
+		(	('\\\"') => '\\' '"'	{builder.append('"');}
+		|	'\\' c=~'"'				{builder.append("\\" + (char)$c);}
+		|	c=~('\\'|'"')			{builder.append((char)$c);}
+		)*
+		'"'							{builder.append('"');}
+		{
+			setText(builder.toString());
+		}
+	;
+
+DOUBLE_ANGLE_STRING_LITERAL
+	:	'<<' .* '>>'
+	;
+
+fragment
+ESC
+	:	'\\'
+		(	// due to the way ESC is used, we don't need to handle the following character in different ways
+			/*'n'
+		|	'r'
+		|	't'
+		|	'b'
+		|	'f'
+		|	'"'
+		|	'\''
+		|	'\\'
+		|	'>'
+		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
+		|*/	. // unknown, leave as it is
+		)
+	;
+
+fragment
+DIGIT
+	:	'0'..'9'
+	;
+
+fragment
+XDIGIT
+	:	'0' .. '9'
+	|	'a' .. 'f'
+	|	'A' .. 'F'
+	;
+
+INT
+	:	('0'..'9')+
+	;
+
+ARG_ACTION
+@init {
+	List<String> text = new ArrayList<String>() {{ add(null); }};
+}
+	:	'['
+		NESTED_ARG_ACTION[text]
+		']'
+		{setText(text.get(0));}
+	;
+
+fragment
+NESTED_ARG_ACTION[List<String> text]
+@init {
+	$text.set(0, "");
+	StringBuilder builder = new StringBuilder();
+}
+	:	(	('\\]') => '\\' ']'		{builder.append("]");}
+		|	'\\' c=~(']')			{builder.append("\\" + (char)$c);}
+		|	ACTION_STRING_LITERAL	{builder.append($ACTION_STRING_LITERAL.text);}
+		|	ACTION_CHAR_LITERAL		{builder.append($ACTION_CHAR_LITERAL.text);}
+		|	c=~('\\'|'"'|'\''|']')	{builder.append((char)$c);}
+		)*
+		{
+			$text.set(0, builder.toString());
+		}
+	;
+
+ACTION
+@init
+{
+	int actionLine = getLine();
+	int actionColumn = getCharPositionInLine();
+}
+	:	NESTED_ACTION
+		('?' {$type = SEMPRED;})?
+		{
+			String action = $text;
+			int n = 1; // num delimiter chars
+			if ( action.startsWith("{{") && action.endsWith("}}") )
+			{
+				$type = FORCED_ACTION;
+				n = 2;
+			}
+			action = action.substring(n,action.length()-n - ($type==SEMPRED ? 1 : 0));
+			setText(action);
+		}
+	;
+
+fragment
+NESTED_ACTION
+	:	'{'
+		(	NESTED_ACTION
+		|	ACTION_CHAR_LITERAL
+		|	('//' | '/*') => COMMENT
+		|	ACTION_STRING_LITERAL
+		|	ACTION_ESC
+		|	~('{'|'\''|'"'|'\\'|'}')
+		)*
+		'}'
+	;
+
+fragment
+ACTION_CHAR_LITERAL
+	:	'\''
+		(	ACTION_ESC
+		|	~('\\'|'\'')
+		)*
+		'\''
+	;
+
+fragment
+ACTION_STRING_LITERAL
+	:	'"'
+		(	ACTION_ESC
+		|	~('\\'|'"')
+		)*
+		'"'
+	;
+
+fragment
+ACTION_ESC
+	:	'\\\''
+	|	'\\\"'
+	|	'\\' ~('\''|'"')
+	;
+
+TOKEN_REF
+	:	'A'..'Z'
+		(	'a'..'z'|'A'..'Z'|'_'|'0'..'9'
+		)*
+	;
+
+TOKENS
+	:	'tokens' WS_LOOP '{'
+	;
+
+OPTIONS
+	:	'options' WS_LOOP '{'
+	;
+
+// we get a warning here when looking for options '{', but it works right
+RULE_REF
+@init
+{
+	int t=0;
+}
+	:	'a'..'z' ('a'..'z' | 'A'..'Z' | '_' | '0'..'9')*
+	;
+
+fragment
+WS_LOOP
+	:	(	WS
+		|	COMMENT
+		)*
+	;
+
+fragment
+WS_OPT
+	:	(WS)?
+	;
+
+/** Reset the file and line information; useful when the grammar
+ *  has been generated so that errors are shown relative to the
+ *  original file like the old C preprocessor used to do.
+ */
+fragment
+SRC
+	:	'src' ' ' file=ACTION_STRING_LITERAL ' ' line=INT
+		{
+			setFileName($file.text.substring(1,$file.text.length()-1));
+			input.setLine(Integer.parseInt($line.text) - 1);  // -1 because SL_COMMENT will increment the line no. KR
+		}
+	;
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRTreePrinter.g b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRTreePrinter.g
new file mode 100644
index 0000000..d4032fc
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRTreePrinter.g
@@ -0,0 +1,459 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2011 Terence Parr
+ All rights reserved.
+
+ Grammar conversion to ANTLR v3:
+ Copyright (c) 2011 Sam Harwell
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+	notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+	derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Print out a grammar (no pretty printing).
+ *
+ *  Terence Parr
+ *  University of San Francisco
+ *  August 19, 2003
+ */
+tree grammar ANTLRTreePrinter;
+
+options
+{
+	language=Java;
+	tokenVocab = ANTLR;
+	ASTLabelType = GrammarAST;
+}
+
+@header {
+package org.antlr.grammar.v3;
+import org.antlr.tool.*;
+import java.util.StringTokenizer;
+}
+
+@members {
+protected Grammar grammar;
+protected boolean showActions;
+protected StringBuilder buf = new StringBuilder(300);
+
+private ANTLRTreePrinter.block_return block(GrammarAST t, boolean forceParens) throws RecognitionException {
+    ANTLRTreePrinter other = new ANTLRTreePrinter(new CommonTreeNodeStream(t));
+    other.buf = buf;
+    return other.block(forceParens);
+}
+
+public final int countAltsForBlock(GrammarAST t) {
+    int n = 0;
+    for ( int i = 0; i < t.getChildCount(); i++ )
+    {
+        if ( t.getChild(i).getType() == ALT )
+            n++;
+    }
+
+    return n;
+}
+
+public void out(String s) {
+    buf.append(s);
+}
+
+@Override
+public void reportError(RecognitionException ex) {
+    Token token = null;
+    if (ex instanceof MismatchedTokenException) {
+        token = ((MismatchedTokenException)ex).token;
+    } else if (ex instanceof NoViableAltException) {
+        token = ((NoViableAltException)ex).token;
+    }
+
+    ErrorManager.syntaxError(
+        ErrorManager.MSG_SYNTAX_ERROR,
+        grammar,
+        token,
+        "antlr.print: " + ex.toString(),
+        ex );
+}
+
+/** Normalize a grammar print out by removing all double spaces
+ *  and trailing/beginning stuff.  FOr example, convert
+ *
+ *  ( A  |  B  |  C )*
+ *
+ *  to
+ *
+ *  ( A | B | C )*
+ */
+public static String normalize(String g) {
+    StringTokenizer st = new StringTokenizer(g, " ", false);
+    StringBuffer buf = new StringBuffer();
+    while ( st.hasMoreTokens() ) {
+        String w = st.nextToken();
+        buf.append(w);
+        buf.append(" ");
+    }
+    return buf.toString().trim();
+}
+}
+
+/** Call this to figure out how to print */
+public
+toString[Grammar g, boolean showActions] returns [String s=null]
+@init {
+	grammar = g;
+	this.showActions = showActions;
+}
+	:	(	grammar_
+		|	rule
+		|	alternative
+		|	element
+		|	single_rewrite
+		|	rewrite
+		|	EOR //{s="EOR";}
+		)
+		{return normalize(buf.toString());}
+	;
+
+// --------------
+
+grammar_
+	:	^( LEXER_GRAMMAR grammarSpec["lexer " ] )
+	|	^( PARSER_GRAMMAR grammarSpec["parser "] )
+	|	^( TREE_GRAMMAR grammarSpec["tree "] )
+	|	^( COMBINED_GRAMMAR grammarSpec[""] )
+	;
+
+attrScope
+	:	^( 'scope' ID ruleAction* ACTION )
+	;
+
+grammarSpec[String gtype]
+	:	id=ID {out(gtype+"grammar "+$id.text);}
+		(cmt=DOC_COMMENT {out($cmt.text+"\n");} )?
+		(optionsSpec)? {out(";\n");}
+		(delegateGrammars)?
+		(tokensSpec)?
+		(attrScope)*
+		(actions)?
+		rules
+	;
+
+actions
+	:	( action )+
+	;
+
+action
+@init {
+	String scope=null, name=null;
+	String action=null;
+}
+	:	^(	AMPERSAND id1=ID
+			(	id2=ID a1=ACTION
+				{scope=$id1.text; name=$a1.text; action=$a1.text;}
+			|	a2=ACTION
+				{scope=null; name=$id1.text; action=$a2.text;}
+			)
+		)
+		{
+			if ( showActions )
+			{
+				out("@"+(scope!=null?scope+"::":"")+name+action);
+			}
+		}
+	;
+
+optionsSpec
+	:	^(	OPTIONS {out(" options {");}
+			(option {out("; ");})+
+			{out("} ");}
+		)
+	;
+
+option
+	:	^( ASSIGN id=ID {out($id.text+"=");} optionValue )
+	;
+
+optionValue
+	:	id=ID            {out($id.text);}
+	|	s=STRING_LITERAL {out($s.text);}
+	|	c=CHAR_LITERAL   {out($c.text);}
+	|	i=INT            {out($i.text);}
+//	|   charSet
+	;
+
+/*
+charSet
+	:   #( CHARSET charSetElement )
+	;
+
+charSetElement
+	:   c:CHAR_LITERAL {out(#c.getText());}
+	|   #( OR c1:CHAR_LITERAL c2:CHAR_LITERAL )
+	|   #( RANGE c3:CHAR_LITERAL c4:CHAR_LITERAL )
+	;
+*/
+
+delegateGrammars
+	:	^( 'import' ( ^(ASSIGN ID ID) | ID )+ )
+	;
+
+tokensSpec
+	:	^(TOKENS tokenSpec*)
+	;
+
+tokenSpec
+	:	TOKEN_REF
+	|	^( ASSIGN TOKEN_REF (STRING_LITERAL|CHAR_LITERAL) )
+	;
+
+rules
+	:	( rule | precRule )+
+	;
+
+rule
+	:	^(	RULE id=ID
+			(modifier)?
+			{out($id.text);}
+			^(ARG (arg=ARG_ACTION {out("["+$arg.text+"]");} )? )
+			^(RET (ret=ARG_ACTION {out(" returns ["+$ret.text+"]");} )? )
+			(throwsSpec)?
+			(optionsSpec)?
+			(ruleScopeSpec)?
+			(ruleAction)*
+			{out(" :");}
+			{
+				if ( input.LA(5) == NOT || input.LA(5) == ASSIGN )
+					out(" ");
+			}
+			b=block[false]
+			(exceptionGroup)?
+			EOR {out(";\n");}
+		)
+	;
+
+precRule
+	:	^(	PREC_RULE id=ID
+			(modifier)?
+			{out($id.text);}
+			^(ARG (arg=ARG_ACTION {out("["+$arg.text+"]");} )? )
+			^(RET (ret=ARG_ACTION {out(" returns ["+$ret.text+"]");} )? )
+			(throwsSpec)?
+			(optionsSpec)?
+			(ruleScopeSpec)?
+			(ruleAction)*
+			{out(" :");}
+			{
+				if ( input.LA(5) == NOT || input.LA(5) == ASSIGN )
+					out(" ");
+			}
+			b=block[false]
+			(exceptionGroup)?
+			EOR {out(";\n");}
+		)
+	;
+
+ruleAction
+	:	^(AMPERSAND id=ID a=ACTION )
+		{if ( showActions ) out("@"+$id.text+"{"+$a.text+"}");}
+	;
+
+modifier
+@init
+{out($modifier.start.getText()); out(" ");}
+	:	'protected'
+	|	'public'
+	|	'private'
+	|	'fragment'
+	;
+
+throwsSpec
+	:	^('throws' ID+)
+	;
+
+ruleScopeSpec
+	:	^( 'scope' ruleAction* (ACTION)? ( ID )* )
+	;
+
+block[boolean forceParens]
+@init
+{
+int numAlts = countAltsForBlock($start);
+}
+	:	^(	BLOCK
+			{
+				if ( forceParens||numAlts>1 )
+				{
+					//for ( Antlr.Runtime.Tree.Tree parent = $start.getParent(); parent != null && parent.getType() != RULE; parent = parent.getParent() )
+					//{
+					//	if ( parent.getType() == BLOCK && countAltsForBlock((GrammarAST)parent) > 1 )
+					//	{
+					//		out(" ");
+					//		break;
+					//	}
+					//}
+					out(" (");
+				}
+			}
+			(optionsSpec {out(" :");} )?
+			alternative rewrite ( {out("|");} alternative rewrite )*
+			EOB   {if ( forceParens||numAlts>1 ) out(")");}
+		 )
+	;
+
+alternative
+	:	^( ALT element* EOA )
+	;
+
+exceptionGroup
+	:	( exceptionHandler )+ (finallyClause)?
+	|	finallyClause
+	;
+
+exceptionHandler
+	:	^('catch' ARG_ACTION ACTION)
+	;
+
+finallyClause
+	:	^('finally' ACTION)
+	;
+
+rewrite
+	:	^(REWRITES single_rewrite+)
+	|	REWRITES
+	|
+	;
+
+single_rewrite
+	:	^(	REWRITE {out(" ->");}
+			(	SEMPRED {out(" {"+$SEMPRED.text+"}?");}
+			)?
+			(	alternative
+			|	rewrite_template
+			|	ETC {out("...");}
+			|	ACTION {out(" {"+$ACTION.text+"}");}
+			)
+		)
+	;
+
+rewrite_template
+	:	^(	TEMPLATE
+			(	id=ID {out(" "+$id.text);}
+			|	ind=ACTION {out(" ({"+$ind.text+"})");}
+			)
+			^(	ARGLIST
+				{out("(");}
+				(	^(	ARG arg=ID {out($arg.text+"=");}
+						a=ACTION   {out($a.text);}
+					)
+				)*
+				{out(")");}
+			)
+			(	DOUBLE_QUOTE_STRING_LITERAL {out(" "+$DOUBLE_QUOTE_STRING_LITERAL.text);}
+			|	DOUBLE_ANGLE_STRING_LITERAL {out(" "+$DOUBLE_ANGLE_STRING_LITERAL.text);}
+			)?
+		)
+	;
+
+element
+	:	^(ROOT element) {out("^");}
+	|	^(BANG element) {out("!");}
+	|	atom
+	|	^(NOT {out("~");} element)
+	|	^(RANGE atom {out("..");} atom)
+	|	^(CHAR_RANGE atom {out("..");} atom)
+	|	^(ASSIGN id=ID {out($id.text+"=");} element)
+	|	^(PLUS_ASSIGN id2=ID {out($id2.text+"+=");} element)
+	|	ebnf
+	|	tree_
+	|	^( SYNPRED block[true] ) {out("=>");}
+	|	a=ACTION  {if ( showActions ) {out("{"); out($a.text); out("}");}}
+	|	a2=FORCED_ACTION  {if ( showActions ) {out("{{"); out($a2.text); out("}}");}}
+	|	pred=SEMPRED
+		{
+			if ( showActions )
+			{
+				out("{");
+				out($pred.text);
+				out("}?");
+			}
+			else
+			{
+				out("{...}?");
+			}
+		}
+	|	spred=SYN_SEMPRED
+		{
+			String name = $spred.text;
+			GrammarAST predAST=grammar.getSyntacticPredicate(name);
+			block(predAST, true);
+			out("=>");
+		}
+	|	^(BACKTRACK_SEMPRED .*) // don't print anything (auto backtrack stuff)
+	|	gpred=GATED_SEMPRED
+		{
+		if ( showActions ) {out("{"); out($gpred.text); out("}? =>");}
+		else {out("{...}? =>");}
+		}
+	|	EPSILON
+	;
+
+ebnf
+	:	block[true] {out(" ");}
+	|	^( OPTIONAL block[true] ) {out("? ");}
+	|	^( CLOSURE block[true] )  {out("* ");}
+	|	^( POSITIVE_CLOSURE block[true] ) {out("+ ");}
+	;
+
+tree_
+	:	^(TREE_BEGIN {out(" ^(");} element (element)* {out(") ");} )
+	;
+
+atom
+@init
+{out(" ");}
+	:	(	^(	RULE_REF		{out($start.toString());}
+				(rarg=ARG_ACTION	{out("["+$rarg.toString()+"]");})?
+				(ast_suffix)?
+			)
+		|	^(	TOKEN_REF		{out($start.toString());}
+				(targ=ARG_ACTION	{out("["+$targ.toString()+"]");} )?
+				(ast_suffix)?
+			)
+		|	^(	CHAR_LITERAL	{out($start.toString());}
+				(ast_suffix)?
+			)
+		|	^(	STRING_LITERAL	{out($start.toString());}
+				(ast_suffix)?
+			)
+		|	^(	WILDCARD		{out($start.toString());}
+				(ast_suffix)?
+			)
+		)
+		{out(" ");}
+	|	LABEL {out(" $"+$LABEL.text);} // used in -> rewrites
+	|	^(DOT ID {out($ID.text+".");} atom) // scope override on rule
+	;
+
+ast_suffix
+	:	ROOT {out("^");}
+	|	BANG  {out("!");}
+	;
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3.g b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3.g
new file mode 100644
index 0000000..76c92e4
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3.g
@@ -0,0 +1,625 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2010 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** ANTLR v3 grammar written in ANTLR v3 with AST construction */
+grammar ANTLRv3;
+
+options {
+	language=Java;
+	output=AST;
+	ASTLabelType=CommonTree;
+}
+
+tokens {
+	DOC_COMMENT;
+	PARSER;	
+    LEXER;
+    RULE;
+    BLOCK;
+    OPTIONAL;
+    CLOSURE;
+    POSITIVE_CLOSURE;
+    SYNPRED;
+    RANGE;
+    CHAR_RANGE;
+    EPSILON;
+    ALT;
+    EOR;
+    EOB;
+    EOA; // end of alt
+    ID;
+    ARG;
+    ARGLIST;
+    RET='returns';
+    LEXER_GRAMMAR;
+    PARSER_GRAMMAR;
+    TREE_GRAMMAR;
+    COMBINED_GRAMMAR;
+    LABEL; // $x used in rewrite rules
+    TEMPLATE;
+    SCOPE='scope';
+    SEMPRED;
+    GATED_SEMPRED; // {p}? =>
+    SYN_SEMPRED; // (...) =>   it's a manually-specified synpred converted to sempred
+    BACKTRACK_SEMPRED; // auto backtracking mode syn pred converted to sempred
+    FRAGMENT='fragment';
+    TREE_BEGIN='^(';
+    ROOT='^';
+    BANG='!';
+    RANGE='..';
+    REWRITE='->';
+    AT='@';
+    LABEL_ASSIGN='=';
+    LIST_LABEL_ASSIGN='+=';
+}
+
+@parser::header
+{
+    package org.antlr.grammar.v3;
+}
+@lexer::header
+{
+    package org.antlr.grammar.v3;
+}
+
+@members {
+	int gtype;
+}
+
+grammarDef
+    :   DOC_COMMENT?
+    	(	'lexer'  {gtype=LEXER_GRAMMAR;}    // pure lexer
+    	|   'parser' {gtype=PARSER_GRAMMAR;}   // pure parser
+    	|   'tree'   {gtype=TREE_GRAMMAR;}     // a tree parser
+    	|		     {gtype=COMBINED_GRAMMAR;} // merged parser/lexer
+    	)
+    	g='grammar' id ';' optionsSpec? tokensSpec? attrScope* action*
+    	rule+
+    	EOF
+    	-> ^( {adaptor.create(gtype,$g)}
+    		  id DOC_COMMENT? optionsSpec? tokensSpec? attrScope* action* rule+
+    		)
+    ;
+
+tokensSpec
+	:	TOKENS tokenSpec+ '}' -> ^(TOKENS tokenSpec+)
+	;
+
+tokenSpec
+	:	TOKEN_REF
+		(	'=' (lit=STRING_LITERAL|lit=CHAR_LITERAL)	-> ^('=' TOKEN_REF $lit)
+		|												-> TOKEN_REF
+		)
+		';'
+	;
+
+attrScope
+	:	'scope' id ACTION -> ^('scope' id ACTION)
+	;
+
+/** Match stuff like @parser::members {int i;} */
+action
+	:	'@' (actionScopeName '::')? id ACTION -> ^('@' actionScopeName? id ACTION)
+	;
+
+/** Sometimes the scope names will collide with keywords; allow them as
+ *  ids for action scopes.
+ */
+actionScopeName
+	:	id
+	|	l='lexer'	-> ID[$l]
+    |   p='parser'	-> ID[$p]
+	;
+
+optionsSpec
+	:	OPTIONS (option ';')+ '}' -> ^(OPTIONS option+)
+	;
+
+option
+    :   id '=' optionValue -> ^('=' id optionValue)
+ 	;
+ 	
+optionValue
+    :   qid
+    |   STRING_LITERAL
+    |   CHAR_LITERAL
+    |   INT
+    |	s='*' -> STRING_LITERAL[$s]  // used for k=*
+    ;
+
+rule
+scope {
+	String name;
+}
+	:	DOC_COMMENT?
+		( modifier=('protected'|'public'|'private'|'fragment') )?
+		id {$rule::name = $id.text;}
+		'!'?
+		( arg=ARG_ACTION )?
+		( 'returns' rt=ARG_ACTION  )?
+		throwsSpec? optionsSpec? ruleScopeSpec? ruleAction*
+		':'	altList	';'
+		exceptionGroup?
+	    -> ^( RULE id {modifier!=null?adaptor.create(modifier):null} ^(ARG[$arg] $arg)? ^('returns' $rt)?
+	    	  throwsSpec? optionsSpec? ruleScopeSpec? ruleAction*
+	    	  altList
+	    	  exceptionGroup?
+	    	  EOR["EOR"]
+	    	)
+	;
+
+/** Match stuff like @init {int i;} */
+ruleAction
+	:	'@' id ACTION -> ^('@' id ACTION)
+	;
+
+throwsSpec
+	:	'throws' id ( ',' id )* -> ^('throws' id+)
+	;
+
+ruleScopeSpec
+	:	'scope' ACTION -> ^('scope' ACTION)
+	|	'scope' id (',' id)* ';' -> ^('scope' id+)
+	|	'scope' ACTION
+		'scope' id (',' id)* ';'
+		-> ^('scope' ACTION id+ )
+	;
+
+block
+    :   lp='('
+		( (opts=optionsSpec)? ':' )?
+		altpair ( '|' altpair )*
+        rp=')'
+        -> ^( BLOCK[$lp,"BLOCK"] optionsSpec? altpair+ EOB[$rp,"EOB"] )
+    ;
+
+altpair : alternative rewrite ;
+
+altList
+@init {
+	// must create root manually as it's used by invoked rules in real antlr tool.
+	// leave here to demonstrate use of {...} in rewrite rule
+	// it's really BLOCK[firstToken,"BLOCK"]; set line/col to previous ( or : token.
+    CommonTree blkRoot = (CommonTree)adaptor.create(BLOCK,input.LT(-1),"BLOCK");
+}
+    :   altpair ( '|' altpair )* -> ^( {blkRoot} altpair+ EOB["EOB"] )
+    ;
+
+alternative
+@init {
+	Token firstToken = input.LT(1);
+	Token prevToken = input.LT(-1); // either : or | I think
+}
+    :   element+ -> ^(ALT[firstToken,"ALT"] element+ EOA["EOA"])
+    |   -> ^(ALT[prevToken,"ALT"] EPSILON[prevToken,"EPSILON"] EOA["EOA"])
+    ;
+
+exceptionGroup
+	:	( exceptionHandler )+ ( finallyClause )?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :    'catch' ARG_ACTION ACTION -> ^('catch' ARG_ACTION ACTION)
+    ;
+
+finallyClause
+    :    'finally' ACTION -> ^('finally' ACTION)
+    ;
+
+element
+	:	id (labelOp='='|labelOp='+=') atom
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] ^($labelOp id atom) EOA["EOA"]) EOB["EOB"]))
+		|				-> ^($labelOp id atom)
+		)
+	|	id (labelOp='='|labelOp='+=') block
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] ^($labelOp id block) EOA["EOA"]) EOB["EOB"]))
+		|				-> ^($labelOp id block)
+		)
+	|	atom
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] atom EOA["EOA"]) EOB["EOB"]) )
+		|				-> atom
+		)
+	|	ebnf
+	|   ACTION
+	|   SEMPRED ( g='=>' -> GATED_SEMPRED[$g] | -> SEMPRED )
+	|   treeSpec
+		(	ebnfSuffix	-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] treeSpec EOA["EOA"]) EOB["EOB"]) )
+		|				-> treeSpec
+		)
+	;
+
+atom:   terminal
+	|	range 
+		(	(op='^'|op='!')	-> ^($op range)
+		|					-> range
+		)
+    |	notSet
+		(	(op='^'|op='!')	-> ^($op notSet)
+		|					-> notSet
+		)
+    |   RULE_REF ARG_ACTION?
+		(	(op='^'|op='!')	-> ^($op RULE_REF ARG_ACTION?)
+		|					-> ^(RULE_REF ARG_ACTION?)
+		)
+    ;
+
+notSet
+	:	'~'
+		(	notTerminal elementOptions?	-> ^('~' notTerminal elementOptions?)
+		|	block elementOptions?		-> ^('~' block elementOptions?)
+		)
+	;
+
+notTerminal
+	:   CHAR_LITERAL
+	|	TOKEN_REF
+	|	STRING_LITERAL
+	;
+	
+elementOptions
+	:	'<' qid '>'					 -> ^(OPTIONS qid)
+	|	'<' option (';' option)* '>' -> ^(OPTIONS option+)
+	;
+
+elementOption
+	:	id '=' optionValue -> ^('=' id optionValue)
+	;
+	
+treeSpec
+	:	'^(' element ( element )+ ')' -> ^(TREE_BEGIN element+)
+	;
+
+range!
+	:	c1=CHAR_LITERAL RANGE c2=CHAR_LITERAL elementOptions?
+		-> ^(CHAR_RANGE[$c1,".."] $c1 $c2 elementOptions?)
+	;
+
+terminal
+    :   (	CHAR_LITERAL elementOptions?    	  -> ^(CHAR_LITERAL elementOptions?)
+	    	// Args are only valid for lexer rules
+		|   TOKEN_REF ARG_ACTION? elementOptions? -> ^(TOKEN_REF ARG_ACTION? elementOptions?)
+		|   STRING_LITERAL elementOptions?		  -> ^(STRING_LITERAL elementOptions?)
+		|   '.' elementOptions?		 			  -> ^('.' elementOptions?)
+		)
+		(	'^'							-> ^('^' $terminal)
+		|	'!' 						-> ^('!' $terminal)
+		)?
+	;
+
+/** Matches ENBF blocks (and token sets via block rule) */
+ebnf
+@init {
+    Token firstToken = input.LT(1);
+}
+@after {
+	$ebnf.tree.getToken().setLine(firstToken.getLine());
+	$ebnf.tree.getToken().setCharPositionInLine(firstToken.getCharPositionInLine());
+}
+	:	block
+		(	op='?'	-> ^(OPTIONAL[op] block)
+		|	op='*'	-> ^(CLOSURE[op] block)
+		|	op='+'	-> ^(POSITIVE_CLOSURE[op] block)
+		|   '=>'	// syntactic predicate
+					-> {gtype==COMBINED_GRAMMAR &&
+					    Character.isUpperCase($rule::name.charAt(0))}?
+					   // if lexer rule in combined, leave as pred for lexer
+					   ^(SYNPRED["=>"] block)
+					// in real antlr tool, text for SYN_SEMPRED is predname
+					-> SYN_SEMPRED
+        |			-> block
+		)
+	;
+
+ebnfSuffix
+@init {
+	Token op = input.LT(1);
+}
+	:	'?'	-> OPTIONAL[op]
+  	|	'*' -> CLOSURE[op]
+   	|	'+' -> POSITIVE_CLOSURE[op]
+	;
+	
+
+
+// R E W R I T E  S Y N T A X
+
+rewrite
+@init {
+	Token firstToken = input.LT(1);
+}
+	:	(rew+='->' preds+=SEMPRED predicated+=rewrite_alternative)*
+		rew2='->' last=rewrite_alternative
+        -> ^($rew $preds $predicated)* ^($rew2 $last)
+	|
+	;
+
+rewrite_alternative
+options {backtrack=true;}
+	:	rewrite_template
+	|	rewrite_tree_alternative
+   	|   /* empty rewrite */ -> ^(ALT["ALT"] EPSILON["EPSILON"] EOA["EOA"])
+	;
+	
+rewrite_tree_block
+    :   lp='(' rewrite_tree_alternative ')'
+    	-> ^(BLOCK[$lp,"BLOCK"] rewrite_tree_alternative EOB[$lp,"EOB"])
+    ;
+
+rewrite_tree_alternative
+    :	rewrite_tree_element+ -> ^(ALT["ALT"] rewrite_tree_element+ EOA["EOA"])
+    ;
+
+rewrite_tree_element
+	:	rewrite_tree_atom
+	|	rewrite_tree_atom ebnfSuffix
+		-> ^( ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] rewrite_tree_atom EOA["EOA"]) EOB["EOB"]))
+	|   rewrite_tree
+		(	ebnfSuffix
+			-> ^(ebnfSuffix ^(BLOCK["BLOCK"] ^(ALT["ALT"] rewrite_tree EOA["EOA"]) EOB["EOB"]))
+		|	-> rewrite_tree
+		)
+	|   rewrite_tree_ebnf
+	;
+
+rewrite_tree_atom
+    :   CHAR_LITERAL
+	|   TOKEN_REF ARG_ACTION? -> ^(TOKEN_REF ARG_ACTION?) // for imaginary nodes
+    |   RULE_REF
+	|   STRING_LITERAL
+	|   d='$' id -> LABEL[$d,$id.text] // reference to a label in a rewrite rule
+	|	ACTION
+	;
+
+rewrite_tree_ebnf
+@init {
+    Token firstToken = input.LT(1);
+}
+@after {
+	$rewrite_tree_ebnf.tree.getToken().setLine(firstToken.getLine());
+	$rewrite_tree_ebnf.tree.getToken().setCharPositionInLine(firstToken.getCharPositionInLine());
+}
+	:	rewrite_tree_block ebnfSuffix -> ^(ebnfSuffix rewrite_tree_block)
+	;
+	
+rewrite_tree
+	:	'^(' rewrite_tree_atom rewrite_tree_element* ')'
+		-> ^(TREE_BEGIN rewrite_tree_atom rewrite_tree_element* )
+	;
+
+/** Build a tree for a template rewrite:
+      ^(TEMPLATE (ID|ACTION) ^(ARGLIST ^(ARG ID ACTION) ...) )
+    where ARGLIST is always there even if no args exist.
+    ID can be "template" keyword.  If first child is ACTION then it's
+    an indirect template ref
+
+    -> foo(a={...}, b={...})
+    -> ({string-e})(a={...}, b={...})  // e evaluates to template name
+    -> {%{$ID.text}} // create literal template from string (done in ActionTranslator)
+	-> {st-expr} // st-expr evaluates to ST
+ */
+rewrite_template
+	:   // -> template(a={...},...) "..."    inline template
+		id lp='(' rewrite_template_args	')'
+		( str=DOUBLE_QUOTE_STRING_LITERAL | str=DOUBLE_ANGLE_STRING_LITERAL )
+		-> ^(TEMPLATE[$lp,"TEMPLATE"] id rewrite_template_args $str)
+
+	|	// -> foo(a={...}, ...)
+		rewrite_template_ref
+
+	|	// -> ({expr})(a={...}, ...)
+		rewrite_indirect_template_head
+
+	|	// -> {...}
+		ACTION
+	;
+
+/** -> foo(a={...}, ...) */
+rewrite_template_ref
+	:	id lp='(' rewrite_template_args	')'
+		-> ^(TEMPLATE[$lp,"TEMPLATE"] id rewrite_template_args)
+	;
+
+/** -> ({expr})(a={...}, ...) */
+rewrite_indirect_template_head
+	:	lp='(' ACTION ')' '(' rewrite_template_args ')'
+		-> ^(TEMPLATE[$lp,"TEMPLATE"] ACTION rewrite_template_args)
+	;
+
+rewrite_template_args
+	:	rewrite_template_arg (',' rewrite_template_arg)*
+		-> ^(ARGLIST rewrite_template_arg+)
+	|	-> ARGLIST
+	;
+
+rewrite_template_arg
+	:   id '=' ACTION -> ^(ARG[$id.start] id ACTION)
+	;
+
+qid :	id ('.' id)* ;
+	
+id	:	TOKEN_REF -> ID[$TOKEN_REF]
+	|	RULE_REF  -> ID[$RULE_REF]
+	;
+
+// L E X I C A L   R U L E S
+
+SL_COMMENT
+ 	:	'//'
+ 	 	(	' $ANTLR ' SRC // src directive
+ 		|	~('\r'|'\n')*
+		)
+		'\r'? '\n'
+		{$channel=HIDDEN;}
+	;
+
+ML_COMMENT
+	:	'/*' {if (input.LA(1)=='*') $type=DOC_COMMENT; else $channel=HIDDEN;} .* '*/'
+	;
+
+CHAR_LITERAL
+	:	'\'' LITERAL_CHAR '\''
+	;
+
+STRING_LITERAL
+	:	'\'' LITERAL_CHAR LITERAL_CHAR* '\''
+	;
+
+fragment
+LITERAL_CHAR
+	:	ESC
+	|	~('\''|'\\')
+	;
+
+DOUBLE_QUOTE_STRING_LITERAL
+	:	'"' (ESC | ~('\\'|'"'))* '"'
+	;
+
+DOUBLE_ANGLE_STRING_LITERAL
+	:	'<<' .* '>>'
+	;
+
+fragment
+ESC	:	'\\'
+		(	'n'
+		|	'r'
+		|	't'
+		|	'b'
+		|	'f'
+		|	'"'
+		|	'\''
+		|	'\\'
+		|	'>'
+		|	'u' XDIGIT XDIGIT XDIGIT XDIGIT
+		|	. // unknown, leave as it is
+		)
+	;
+
+fragment
+XDIGIT :
+		'0' .. '9'
+	|	'a' .. 'f'
+	|	'A' .. 'F'
+	;
+
+INT	:	'0'..'9'+
+	;
+
+ARG_ACTION
+	:	NESTED_ARG_ACTION
+	;
+
+fragment
+NESTED_ARG_ACTION :
+	'['
+	(	options {greedy=false; k=1;}
+	:	NESTED_ARG_ACTION
+	|	ACTION_STRING_LITERAL
+	|	ACTION_CHAR_LITERAL
+	|	.
+	)*
+	']'
+	//{setText(getText().substring(1, getText().length()-1));}
+	;
+
+ACTION
+	:	NESTED_ACTION ( '?' {$type = SEMPRED;} )?
+	;
+
+fragment
+NESTED_ACTION :
+	'{'
+	(	options {greedy=false; k=2;}
+	:	NESTED_ACTION
+	|	SL_COMMENT
+	|	ML_COMMENT
+	|	ACTION_STRING_LITERAL
+	|	ACTION_CHAR_LITERAL
+	|	.
+	)*
+	'}'
+   ;
+
+fragment
+ACTION_CHAR_LITERAL
+	:	'\'' (ACTION_ESC|~('\\'|'\'')) '\''
+	;
+
+fragment
+ACTION_STRING_LITERAL
+	:	'"' (ACTION_ESC|~('\\'|'"'))* '"'
+	;
+
+fragment
+ACTION_ESC
+	:	'\\\''
+	|	'\\' '"' // ANTLR doesn't like: '\\"'
+	|	'\\' ~('\''|'"')
+	;
+
+TOKEN_REF
+	:	'A'..'Z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+RULE_REF
+	:	'a'..'z' ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+	;
+
+/** Match the start of an options section.  Don't allow normal
+ *  action processing on the {...} as it's not a action.
+ */
+OPTIONS
+	:	'options' WS_LOOP '{'
+	;
+	
+TOKENS
+	:	'tokens' WS_LOOP '{'
+	;
+
+/** Reset the file and line information; useful when the grammar
+ *  has been generated so that errors are shown relative to the
+ *  original file like the old C preprocessor used to do.
+ */
+fragment
+SRC	:	'src' ' ' file=ACTION_STRING_LITERAL ' ' line=INT
+	;
+
+WS	:	(	' '
+		|	'\t'
+		|	'\r'? '\n'
+		)+
+		{$channel=HIDDEN;}
+	;
+
+fragment
+WS_LOOP
+	:	(	WS
+		|	SL_COMMENT
+		|	ML_COMMENT
+		)*
+	;
+
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3Tree.g b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3Tree.g
new file mode 100644
index 0000000..a660205
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/ANTLRv3Tree.g
@@ -0,0 +1,262 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2010 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** ANTLR v3 tree grammar to walk trees created by ANTLRv3.g */
+tree grammar ANTLRv3Tree;
+
+options {
+	language=Java;
+	tokenVocab = ANTLRv3;
+	ASTLabelType = CommonTree;
+}
+
+@header {
+package org.antlr.grammar.v3;
+}
+
+grammarDef
+    :   ^( grammarType ID DOC_COMMENT? optionsSpec? tokensSpec? attrScope* action* rule+ )
+    ;
+
+grammarType
+	:	LEXER_GRAMMAR
+    |	PARSER_GRAMMAR
+    |	TREE_GRAMMAR
+    |	COMBINED_GRAMMAR
+    ;
+
+tokensSpec
+	:	^(TOKENS tokenSpec+)
+	;
+
+tokenSpec
+	:	^('=' TOKEN_REF STRING_LITERAL)
+	|	^('=' TOKEN_REF CHAR_LITERAL)
+	|	TOKEN_REF
+	;
+
+attrScope
+	:	^('scope' ID ACTION)
+	;
+
+action
+	:	^('@' ID ID ACTION)
+	|	^('@' ID ACTION)
+	;
+
+optionsSpec
+	:	^(OPTIONS option+)
+	;
+
+option
+    :   qid // only allowed in element options
+    |	^('=' ID optionValue)
+ 	;
+ 	
+optionValue
+    :   ID
+    |   STRING_LITERAL
+    |   CHAR_LITERAL
+    |   INT
+    ;
+
+rule
+	:	^( RULE ID modifier? (^(ARG ARG_ACTION))? (^(RET ARG_ACTION))?
+	       throwsSpec? optionsSpec? ruleScopeSpec? ruleAction*
+	       altList
+	       exceptionGroup? EOR
+	     )
+	;
+
+modifier
+	:	'protected'|'public'|'private'|'fragment'
+	;
+
+/** Match stuff like @init {int i;} */
+ruleAction
+	:	^('@' ID ACTION)
+	;
+
+throwsSpec
+	:	^('throws' ID+)
+	;
+
+ruleScopeSpec
+	:	^('scope' ACTION)
+	|	^('scope' ACTION ID+)
+	|	^('scope' ID+)
+	;
+
+block
+    :   ^( BLOCK optionsSpec? (alternative rewrite)+ EOB )
+    ;
+
+altList
+    :   ^( BLOCK (alternative rewrite)+ EOB )
+    ;
+
+alternative
+    :   ^(ALT element+ EOA)
+    |   ^(ALT EPSILON EOA)
+    ;
+
+exceptionGroup
+	:	exceptionHandler+ finallyClause?
+	|	finallyClause
+    ;
+
+exceptionHandler
+    :    ^('catch' ARG_ACTION ACTION)
+    ;
+
+finallyClause
+    :    ^('finally' ACTION)
+    ;
+
+element
+	:	^(('='|'+=') ID block)
+	|	^(('='|'+=') ID atom)
+	|	atom
+	|	ebnf
+	|   ACTION
+	|   SEMPRED
+	|	GATED_SEMPRED
+	|   ^(TREE_BEGIN element+)
+	;
+
+atom:   ^(('^'|'!') atom)
+	|	^(CHAR_RANGE CHAR_LITERAL CHAR_LITERAL optionsSpec?)
+	|	^('~' notTerminal optionsSpec?)
+	|	^('~' block optionsSpec?)
+    |	^(RULE_REF ARG_ACTION)
+    |	RULE_REF
+    |   CHAR_LITERAL
+    |   ^(CHAR_LITERAL optionsSpec)
+    |	TOKEN_REF
+    |	^(TOKEN_REF optionsSpec)
+    |	^(TOKEN_REF ARG_ACTION optionsSpec)
+    |	^(TOKEN_REF ARG_ACTION)
+    |	STRING_LITERAL
+    |	^(STRING_LITERAL optionsSpec)
+    |	'.'
+    |	^('.' optionsSpec?)
+    ;
+
+/** Matches ENBF blocks (and token sets via block rule) */
+ebnf
+	:	^(SYNPRED block)
+	|	^(OPTIONAL block)
+  	|	^(CLOSURE block)
+   	|	^(POSITIVE_CLOSURE block)
+	|	SYN_SEMPRED
+	|	block
+	;
+
+notTerminal
+	:   CHAR_LITERAL
+	|	TOKEN_REF
+	|	STRING_LITERAL
+	;
+		
+// R E W R I T E  S Y N T A X
+
+rewrite
+	:	(^('->' SEMPRED rewrite_alternative))* ^('->' rewrite_alternative)
+	|
+	;
+
+rewrite_alternative
+	:	rewrite_template
+	|	rewrite_tree_alternative
+   	|   ^(ALT EPSILON EOA)
+	;
+	
+rewrite_tree_block
+    :   ^(BLOCK rewrite_tree_alternative EOB)
+    ;
+
+rewrite_tree_alternative
+    :	^(ALT rewrite_tree_element+ EOA)
+    ;
+
+rewrite_tree_element
+	:	rewrite_tree_atom
+	|	rewrite_tree
+	|   rewrite_tree_block
+	|   rewrite_tree_ebnf
+	;
+
+rewrite_tree_atom
+    :   CHAR_LITERAL
+	|   TOKEN_REF
+	|   ^(TOKEN_REF ARG_ACTION) // for imaginary nodes
+    |   RULE_REF
+	|   STRING_LITERAL
+	|   LABEL
+	|	ACTION
+	;
+
+rewrite_tree_ebnf
+	:	^(OPTIONAL rewrite_tree_block)
+  	|	^(CLOSURE rewrite_tree_block)
+   	|	^(POSITIVE_CLOSURE rewrite_tree_block)
+	;
+	
+rewrite_tree
+	:	^(TREE_BEGIN rewrite_tree_atom rewrite_tree_element* )
+	;
+
+rewrite_template
+	:   ^( TEMPLATE ID rewrite_template_args
+		   (DOUBLE_QUOTE_STRING_LITERAL | DOUBLE_ANGLE_STRING_LITERAL)
+		 )
+	|	rewrite_template_ref
+	|	rewrite_indirect_template_head
+	|	ACTION
+	;
+
+/** foo(a={...}, ...) */
+rewrite_template_ref
+	:	^(TEMPLATE ID rewrite_template_args)
+	;
+
+/** ({expr})(a={...}, ...) */
+rewrite_indirect_template_head
+	:	^(TEMPLATE ACTION rewrite_template_args)
+	;
+
+rewrite_template_args
+	:	^(ARGLIST rewrite_template_arg+)
+	|	ARGLIST
+	;
+
+rewrite_template_arg
+	:   ^(ARG ID ACTION)
+	;
+
+qid	:	ID ('.' ID)* ;
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/ActionAnalysis.g b/tool/src/main/antlr3/org/antlr/grammar/v3/ActionAnalysis.g
new file mode 100644
index 0000000..4edcc54
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/ActionAnalysis.g
@@ -0,0 +1,134 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2010 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** We need to set Rule.referencedPredefinedRuleAttributes before
+ *  code generation.  This filter looks at an action in context of
+ *  its rule and outer alternative number and figures out which
+ *  rules have predefined prefs referenced.  I need this so I can
+ *  remove unusued labels.  This also tracks, for labeled rules,
+ *  which are referenced by actions.
+ */
+lexer grammar ActionAnalysis;
+options {
+  language=Java;
+  filter=true;  // try all non-fragment rules in order specified
+}
+
+@header {
+package org.antlr.grammar.v3;
+import org.antlr.runtime.*;
+import org.antlr.tool.*;
+}
+
+@members {
+Rule enclosingRule;
+Grammar grammar;
+Token actionToken;
+int outerAltNum = 0;
+
+	public ActionAnalysis(Grammar grammar, String ruleName, GrammarAST actionAST)
+	{
+		this(new ANTLRStringStream(actionAST.token.getText()));
+		this.grammar = grammar;
+	    this.enclosingRule = grammar.getLocallyDefinedRule(ruleName);
+	    this.actionToken = actionAST.token;
+	    this.outerAltNum = actionAST.outerAltNum;
+	}
+
+public void analyze() {
+	// System.out.println("###\naction="+actionToken);
+	Token t;
+	do {
+		t = nextToken();
+	} while ( t.getType()!= Token.EOF );
+}
+}
+
+/**	$x.y	x is enclosing rule or rule ref or rule label
+ *			y is a return value, parameter, or predefined property.
+ */
+X_Y :	'$' x=ID '.' y=ID {enclosingRule!=null}?
+		{
+		AttributeScope scope = null;
+		String refdRuleName = null;
+		if ( $x.text.equals(enclosingRule.name) ) {
+			// ref to enclosing rule.
+			refdRuleName = $x.text;
+			scope = enclosingRule.getLocalAttributeScope($y.text);
+		}
+		else if ( enclosingRule.getRuleLabel($x.text)!=null ) {
+			// ref to rule label
+			Grammar.LabelElementPair pair = enclosingRule.getRuleLabel($x.text);
+			pair.actionReferencesLabel = true;
+			refdRuleName = pair.referencedRuleName;
+			Rule refdRule = grammar.getRule(refdRuleName);
+			if ( refdRule!=null ) {
+				scope = refdRule.getLocalAttributeScope($y.text);
+			}
+		}
+		else if ( enclosingRule.getRuleRefsInAlt(x.getText(), outerAltNum)!=null ) {
+			// ref to rule referenced in this alt
+			refdRuleName = $x.text;
+			Rule refdRule = grammar.getRule(refdRuleName);
+			if ( refdRule!=null ) {
+				scope = refdRule.getLocalAttributeScope($y.text);
+			}
+		}
+		if ( scope!=null &&
+			 (scope.isPredefinedRuleScope||scope.isPredefinedLexerRuleScope) )
+		{
+			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
+			//System.out.println("referenceRuleLabelPredefinedAttribute for "+refdRuleName);
+		}
+		}
+	;
+
+/** $x	x is an isolated rule label.  Just record that the label was referenced */
+X	:	'$' x=ID {enclosingRule!=null && enclosingRule.getRuleLabel($x.text)!=null}?
+		{
+			Grammar.LabelElementPair pair = enclosingRule.getRuleLabel($x.text);
+			pair.actionReferencesLabel = true;
+		}
+	;
+	
+/** $y	y is a return value, parameter, or predefined property of current rule */
+Y	:	'$' ID {enclosingRule!=null && enclosingRule.getLocalAttributeScope($ID.text)!=null}?
+		{
+			AttributeScope scope = enclosingRule.getLocalAttributeScope($ID.text);
+			if ( scope!=null &&
+				 (scope.isPredefinedRuleScope||scope.isPredefinedLexerRuleScope) )
+			{
+				grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
+				//System.out.println("referenceRuleLabelPredefinedAttribute for "+$ID.text);
+			}
+		}
+	;
+	
+fragment
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+    ;
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/ActionTranslator.g b/tool/src/main/antlr3/org/antlr/grammar/v3/ActionTranslator.g
new file mode 100644
index 0000000..f4af2fa
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/ActionTranslator.g
@@ -0,0 +1,810 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2010 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+lexer grammar ActionTranslator;
+options {
+  language=Java;
+  filter=true;  // try all non-fragment rules in order specified
+  // output=template;  TODO: can we make tokens return templates somehow?
+}
+
+@header {
+package org.antlr.grammar.v3;
+import org.stringtemplate.v4.ST;
+import org.antlr.runtime.*;
+import org.antlr.tool.*;
+import org.antlr.codegen.*;
+
+import org.antlr.runtime.*;
+import java.util.List;
+import java.util.ArrayList;
+import org.antlr.grammar.v3.ANTLRParser;
+
+}
+
+@members {
+public List<Object> chunks = new ArrayList<Object>();
+Rule enclosingRule;
+int outerAltNum;
+Grammar grammar;
+CodeGenerator generator;
+Token actionToken;
+
+	public ActionTranslator(CodeGenerator generator,
+								 String ruleName,
+								 GrammarAST actionAST)
+	{
+		this(new ANTLRStringStream(actionAST.token.getText()));
+		this.generator = generator;
+		this.grammar = generator.grammar;
+	    this.enclosingRule = grammar.getLocallyDefinedRule(ruleName);
+	    this.actionToken = actionAST.token;
+	    this.outerAltNum = actionAST.outerAltNum;
+	}
+
+	public ActionTranslator(CodeGenerator generator,
+								 String ruleName,
+								 Token actionToken,
+								 int outerAltNum)
+	{
+		this(new ANTLRStringStream(actionToken.getText()));
+		this.generator = generator;
+		grammar = generator.grammar;
+	    this.enclosingRule = grammar.getRule(ruleName);
+	    this.actionToken = actionToken;
+		this.outerAltNum = outerAltNum;
+	}
+
+/** Return a list of strings and ST objects that
+ *  represent the translated action.
+ */
+public List<Object> translateToChunks() {
+	// System.out.println("###\naction="+action);
+	Token t;
+	do {
+		t = nextToken();
+	} while ( t.getType()!= Token.EOF );
+	return chunks;
+}
+
+public String translate() {
+	List<Object> theChunks = translateToChunks();
+	//System.out.println("chunks="+a.chunks);
+	StringBuilder buf = new StringBuilder();
+	for (int i = 0; i < theChunks.size(); i++) {
+		Object o = theChunks.get(i);
+		if ( o instanceof ST ) buf.append(((ST)o).render());
+		else buf.append(o);
+	}
+	//System.out.println("translated: "+buf.toString());
+	return buf.toString();
+}
+
+public List<Object> translateAction(String action) {
+	String rname = null;
+	if ( enclosingRule!=null ) {
+		rname = enclosingRule.name;
+	}
+	ActionTranslator translator =
+		new ActionTranslator(generator,
+								  rname,
+								  new CommonToken(ANTLRParser.ACTION,action),outerAltNum);
+    return translator.translateToChunks();
+}
+
+public boolean isTokenRefInAlt(String id) {
+    return enclosingRule.getTokenRefsInAlt(id, outerAltNum)!=null;
+}
+public boolean isRuleRefInAlt(String id) {
+    return enclosingRule.getRuleRefsInAlt(id, outerAltNum)!=null;
+}
+public Grammar.LabelElementPair getElementLabel(String id) {
+    return enclosingRule.getLabel(id);
+}
+
+public void checkElementRefUniqueness(String ref, boolean isToken) {
+		List<GrammarAST> refs = null;
+		if ( isToken ) {
+		    refs = enclosingRule.getTokenRefsInAlt(ref, outerAltNum);
+		}
+		else {
+		    refs = enclosingRule.getRuleRefsInAlt(ref, outerAltNum);
+		}
+		if ( refs!=null && refs.size()>1 ) {
+			ErrorManager.grammarError(ErrorManager.MSG_NONUNIQUE_REF,
+									  grammar,
+									  actionToken,
+									  ref);
+		}
+}
+
+/** For \$rulelabel.name, return the Attribute found for name.  It
+ *  will be a predefined property or a return value.
+ */
+public Attribute getRuleLabelAttribute(String ruleName, String attrName) {
+	Rule r = grammar.getRule(ruleName);
+	AttributeScope scope = r.getLocalAttributeScope(attrName);
+	if ( scope!=null && !scope.isParameterScope ) {
+		return scope.getAttribute(attrName);
+	}
+	return null;
+}
+
+AttributeScope resolveDynamicScope(String scopeName) {
+	if ( grammar.getGlobalScope(scopeName)!=null ) {
+		return grammar.getGlobalScope(scopeName);
+	}
+	Rule scopeRule = grammar.getRule(scopeName);
+	if ( scopeRule!=null ) {
+		return scopeRule.ruleScope;
+	}
+	return null; // not a valid dynamic scope
+}
+
+protected ST template(String name) {
+	ST st = generator.getTemplates().getInstanceOf(name);
+	chunks.add(st);
+	return st;
+}
+
+
+}
+
+/**	$x.y	x is enclosing rule, y is a return value, parameter, or
+ * 			predefined property.
+ *
+ * 			r[int i] returns [int j]
+ * 				:	{$r.i, $r.j, $r.start, $r.stop, $r.st, $r.tree}
+ * 				;
+ */
+SET_ENCLOSING_RULE_SCOPE_ATTR
+	:	'$' x=ID '.' y=ID WS? '=' expr=ATTR_VALUE_EXPR ';'
+							{enclosingRule!=null &&
+	                         $x.text.equals(enclosingRule.name) &&
+	                         enclosingRule.getLocalAttributeScope($y.text)!=null}?
+		//{System.out.println("found \$rule.attr");}
+		{
+		ST st = null;
+		AttributeScope scope = enclosingRule.getLocalAttributeScope($y.text);
+		if ( scope.isPredefinedRuleScope ) {
+			if ( $y.text.equals("st") || $y.text.equals("tree") ) {
+				st = template("ruleSetPropertyRef_"+$y.text);
+				grammar.referenceRuleLabelPredefinedAttribute($x.text);
+				st.add("scope", $x.text);
+				st.add("attr", $y.text);
+				st.add("expr", translateAction($expr.text));
+			} else {
+				ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+										  grammar,
+										  actionToken,
+										  $x.text,
+										  $y.text);
+			}
+		}
+	    else if ( scope.isPredefinedLexerRuleScope ) {
+	    	// this is a better message to emit than the previous one...
+			ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+									  grammar,
+									  actionToken,
+									  $x.text,
+									  $y.text);
+	    }
+		else if ( scope.isParameterScope ) {
+			st = template("parameterSetAttributeRef");
+			st.add("attr", scope.getAttribute($y.text));
+			st.add("expr", translateAction($expr.text));
+		}
+		else { // must be return value
+			st = template("returnSetAttributeRef");
+			st.add("ruleDescriptor", enclosingRule);
+			st.add("attr", scope.getAttribute($y.text));
+			st.add("expr", translateAction($expr.text));
+		}
+		}
+	;
+ENCLOSING_RULE_SCOPE_ATTR
+	:	'$' x=ID '.' y=ID	{enclosingRule!=null &&
+	                         $x.text.equals(enclosingRule.name) &&
+	                         enclosingRule.getLocalAttributeScope($y.text)!=null}?
+		//{System.out.println("found \$rule.attr");}
+		{
+		if ( isRuleRefInAlt($x.text)  ) {
+			ErrorManager.grammarError(ErrorManager.MSG_RULE_REF_AMBIG_WITH_RULE_IN_ALT,
+									  grammar,
+									  actionToken,
+									  $x.text);
+		}
+		ST st = null;
+		AttributeScope scope = enclosingRule.getLocalAttributeScope($y.text);
+		if ( scope.isPredefinedRuleScope ) {
+			st = template("rulePropertyRef_"+$y.text);
+			grammar.referenceRuleLabelPredefinedAttribute($x.text);
+			st.add("scope", $x.text);
+			st.add("attr", $y.text);
+		}
+	    else if ( scope.isPredefinedLexerRuleScope ) {
+	    	// perhaps not the most precise error message to use, but...
+			ErrorManager.grammarError(ErrorManager.MSG_RULE_HAS_NO_ARGS,
+									  grammar,
+									  actionToken,
+									  $x.text);
+	    }
+		else if ( scope.isParameterScope ) {
+			st = template("parameterAttributeRef");
+			st.add("attr", scope.getAttribute($y.text));
+		}
+		else { // must be return value
+			st = template("returnAttributeRef");
+			st.add("ruleDescriptor", enclosingRule);
+			st.add("attr", scope.getAttribute($y.text));
+		}
+		}
+	;
+
+/** Setting $tokenlabel.attr or $tokenref.attr where attr is predefined property of a token is an error. */
+SET_TOKEN_SCOPE_ATTR
+	:	'$' x=ID '.' y=ID WS? '='
+							 {enclosingRule!=null && input.LA(1)!='=' &&
+	                         (enclosingRule.getTokenLabel($x.text)!=null||
+	                          isTokenRefInAlt($x.text)) &&
+	                         AttributeScope.tokenScope.getAttribute($y.text)!=null}?
+		//{System.out.println("found \$tokenlabel.attr or \$tokenref.attr");}
+		{
+		ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+								  grammar,
+								  actionToken,
+								  $x.text,
+								  $y.text);
+		}
+	;
+
+/** $tokenlabel.attr or $tokenref.attr where attr is predefined property of a token.
+ *  If in lexer grammar, only translate for strings and tokens (rule refs)
+ */
+TOKEN_SCOPE_ATTR
+	:	'$' x=ID '.' y=ID	{enclosingRule!=null &&
+	                         (enclosingRule.getTokenLabel($x.text)!=null||
+	                          isTokenRefInAlt($x.text)) &&
+	                         AttributeScope.tokenScope.getAttribute($y.text)!=null &&
+	                         (grammar.type!=Grammar.LEXER ||
+	                         getElementLabel($x.text).elementRef.token.getType()==ANTLRParser.TOKEN_REF ||
+	                         getElementLabel($x.text).elementRef.token.getType()==ANTLRParser.STRING_LITERAL)}?
+		// {System.out.println("found \$tokenlabel.attr or \$tokenref.attr");}
+		{
+		String label = $x.text;
+		if ( enclosingRule.getTokenLabel($x.text)==null ) {
+			// \$tokenref.attr  gotta get old label or compute new one
+			checkElementRefUniqueness($x.text, true);
+			label = enclosingRule.getElementLabel($x.text, outerAltNum, generator);
+			if ( label==null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+										  grammar,
+										  actionToken,
+										  "\$"+$x.text+"."+$y.text);
+				label = $x.text;
+			}
+		}
+		ST st = template("tokenLabelPropertyRef_"+$y.text);
+		st.add("scope", label);
+		st.add("attr", AttributeScope.tokenScope.getAttribute($y.text));
+		}
+	;
+
+/** Setting $rulelabel.attr or $ruleref.attr where attr is a predefined property is an error
+ *  This must also fail, if we try to access a local attribute's field, like $tree.scope = localObject
+ *  That must be handled by LOCAL_ATTR below. ANTLR only concerns itself with the top-level scope
+ *  attributes declared in scope {} or parameters, return values and the like.
+ */
+SET_RULE_SCOPE_ATTR
+@init {
+Grammar.LabelElementPair pair=null;
+String refdRuleName=null;
+}
+	:	'$' x=ID '.' y=ID WS? '=' {enclosingRule!=null && input.LA(1)!='='}?
+		{
+		pair = enclosingRule.getRuleLabel($x.text);
+		refdRuleName = $x.text;
+		if ( pair!=null ) {
+			refdRuleName = pair.referencedRuleName;
+		}
+		}
+		// supercomplicated because I can't exec the above action.
+		// This asserts that if it's a label or a ref to a rule proceed but only if the attribute
+		// is valid for that rule's scope
+		{(enclosingRule.getRuleLabel($x.text)!=null || isRuleRefInAlt($x.text)) &&
+	      getRuleLabelAttribute(enclosingRule.getRuleLabel($x.text)!=null?enclosingRule.getRuleLabel($x.text).referencedRuleName:$x.text,$y.text)!=null}?
+		//{System.out.println("found set \$rulelabel.attr or \$ruleref.attr: "+$x.text+"."+$y.text);}
+		{
+		ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+								  grammar,
+								  actionToken,
+								  $x.text,
+								  $y.text);
+		}
+	;
+
+/** $rulelabel.attr or $ruleref.attr where attr is a predefined property*/
+RULE_SCOPE_ATTR
+@init {
+Grammar.LabelElementPair pair=null;
+String refdRuleName=null;
+}
+	:	'$' x=ID '.' y=ID {enclosingRule!=null}?
+		{
+		pair = enclosingRule.getRuleLabel($x.text);
+		refdRuleName = $x.text;
+		if ( pair!=null ) {
+			refdRuleName = pair.referencedRuleName;
+		}
+		}
+		// supercomplicated because I can't exec the above action.
+		// This asserts that if it's a label or a ref to a rule proceed but only if the attribute
+		// is valid for that rule's scope
+		{(enclosingRule.getRuleLabel($x.text)!=null || isRuleRefInAlt($x.text)) &&
+	      getRuleLabelAttribute(enclosingRule.getRuleLabel($x.text)!=null?enclosingRule.getRuleLabel($x.text).referencedRuleName:$x.text,$y.text)!=null}?
+		//{System.out.println("found \$rulelabel.attr or \$ruleref.attr: "+$x.text+"."+$y.text);}
+		{
+		String label = $x.text;
+		if ( pair==null ) {
+			// \$ruleref.attr  gotta get old label or compute new one
+			checkElementRefUniqueness($x.text, false);
+			label = enclosingRule.getElementLabel($x.text, outerAltNum, generator);
+			if ( label==null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+										  grammar,
+										  actionToken,
+										  "\$"+$x.text+"."+$y.text);
+				label = $x.text;
+			}
+		}
+		ST st;
+		Rule refdRule = grammar.getRule(refdRuleName);
+		AttributeScope scope = refdRule.getLocalAttributeScope($y.text);
+		if ( scope.isPredefinedRuleScope ) {
+			st = template("ruleLabelPropertyRef_"+$y.text);
+			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
+			st.add("scope", label);
+			st.add("attr", $y.text);
+		}
+		else if ( scope.isPredefinedLexerRuleScope ) {
+			st = template("lexerRuleLabelPropertyRef_"+$y.text);
+			grammar.referenceRuleLabelPredefinedAttribute(refdRuleName);
+			st.add("scope", label);
+			st.add("attr", $y.text);
+		}
+		else if ( scope.isParameterScope ) {
+			// TODO: error!
+		}
+		else {
+			st = template("ruleLabelRef");
+			st.add("referencedRule", refdRule);
+			st.add("scope", label);
+			st.add("attr", scope.getAttribute($y.text));
+		}
+		}
+	;
+
+
+/** $label	either a token label or token/rule list label like label+=expr */
+LABEL_REF
+	:	'$' ID {enclosingRule!=null &&
+	            getElementLabel($ID.text)!=null &&
+		        enclosingRule.getRuleLabel($ID.text)==null}?
+		// {System.out.println("found \$label");}
+		{
+		ST st;
+		Grammar.LabelElementPair pair = getElementLabel($ID.text);
+		if ( pair.type==Grammar.RULE_LIST_LABEL ||
+             pair.type==Grammar.TOKEN_LIST_LABEL ||
+             pair.type==Grammar.WILDCARD_TREE_LIST_LABEL )
+        {
+			st = template("listLabelRef");
+		}
+		else {
+			st = template("tokenLabelRef");
+		}
+		st.add("label", $ID.text);
+		}
+	;
+
+/** $tokenref in a non-lexer grammar */
+ISOLATED_TOKEN_REF
+	:	'$' ID	{grammar.type!=Grammar.LEXER && enclosingRule!=null && isTokenRefInAlt($ID.text)}?
+		//{System.out.println("found \$tokenref");}
+		{
+		String label = enclosingRule.getElementLabel($ID.text, outerAltNum, generator);
+		checkElementRefUniqueness($ID.text, true);
+		if ( label==null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+									  grammar,
+									  actionToken,
+									  $ID.text);
+		}
+		else {
+			ST st = template("tokenLabelRef");
+			st.add("label", label);
+		}
+		}
+	;
+
+/** $lexerruleref from within the lexer */
+ISOLATED_LEXER_RULE_REF
+	:	'$' ID	{grammar.type==Grammar.LEXER &&
+	             enclosingRule!=null &&
+	             isRuleRefInAlt($ID.text)}?
+		//{System.out.println("found \$lexerruleref");}
+		{
+		String label = enclosingRule.getElementLabel($ID.text, outerAltNum, generator);
+		checkElementRefUniqueness($ID.text, false);
+		if ( label==null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_FORWARD_ELEMENT_REF,
+									  grammar,
+									  actionToken,
+									  $ID.text);
+		}
+		else {
+			ST st = template("lexerRuleLabel");
+			st.add("label", label);
+		}
+		}
+	;
+
+/**  $y 	return value, parameter, predefined rule property, or token/rule
+ *          reference within enclosing rule's outermost alt.
+ *          y must be a "local" reference; i.e., it must be referring to
+ *          something defined within the enclosing rule.
+ *
+ * 			r[int i] returns [int j]
+ * 				:	{$i, $j, $start, $stop, $st, $tree}
+ *              ;
+ *
+ *	TODO: this might get the dynamic scope's elements too.!!!!!!!!!
+ */
+SET_LOCAL_ATTR
+	:	'$' ID WS? '=' expr=ATTR_VALUE_EXPR ';' {enclosingRule!=null
+													&& enclosingRule.getLocalAttributeScope($ID.text)!=null
+													&& !enclosingRule.getLocalAttributeScope($ID.text).isPredefinedLexerRuleScope}?
+		//{System.out.println("found set \$localattr");}
+		{
+		ST st;
+		AttributeScope scope = enclosingRule.getLocalAttributeScope($ID.text);
+		if ( scope.isPredefinedRuleScope ) {
+			if ($ID.text.equals("tree") || $ID.text.equals("st")) {
+				st = template("ruleSetPropertyRef_"+$ID.text);
+				grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
+				st.add("scope", enclosingRule.name);
+				st.add("attr", $ID.text);
+				st.add("expr", translateAction($expr.text));
+			} else {
+				ErrorManager.grammarError(ErrorManager.MSG_WRITE_TO_READONLY_ATTR,
+										 grammar,
+										 actionToken,
+										 $ID.text,
+										 "");
+			}
+		}
+		else if ( scope.isParameterScope ) {
+			st = template("parameterSetAttributeRef");
+			st.add("attr", scope.getAttribute($ID.text));
+			st.add("expr", translateAction($expr.text));
+		}
+		else {
+			st = template("returnSetAttributeRef");
+			st.add("ruleDescriptor", enclosingRule);
+			st.add("attr", scope.getAttribute($ID.text));
+			st.add("expr", translateAction($expr.text));
+			}
+		}
+	;
+LOCAL_ATTR
+	:	'$' ID {enclosingRule!=null && enclosingRule.getLocalAttributeScope($ID.text)!=null}?
+		//{System.out.println("found \$localattr");}
+		{
+		ST st;
+		AttributeScope scope = enclosingRule.getLocalAttributeScope($ID.text);
+		if ( scope.isPredefinedRuleScope ) {
+			st = template("rulePropertyRef_"+$ID.text);
+			grammar.referenceRuleLabelPredefinedAttribute(enclosingRule.name);
+			st.add("scope", enclosingRule.name);
+			st.add("attr", $ID.text);
+		}
+		else if ( scope.isPredefinedLexerRuleScope ) {
+			st = template("lexerRulePropertyRef_"+$ID.text);
+			st.add("scope", enclosingRule.name);
+			st.add("attr", $ID.text);
+		}
+		else if ( scope.isParameterScope ) {
+			st = template("parameterAttributeRef");
+			st.add("attr", scope.getAttribute($ID.text));
+		}
+		else {
+			st = template("returnAttributeRef");
+			st.add("ruleDescriptor", enclosingRule);
+			st.add("attr", scope.getAttribute($ID.text));
+		}
+		}
+	;
+
+/**	$x::y	the only way to access the attributes within a dynamic scope
+ * 			regardless of whether or not you are in the defining rule.
+ *
+ * 			scope Symbols { List names; }
+ * 			r
+ * 			scope {int i;}
+ * 			scope Symbols;
+ * 				:	{$r::i=3;} s {$Symbols::names;}
+ * 				;
+ * 			s	:	{$r::i; $Symbols::names;}
+ * 				;
+ */
+SET_DYNAMIC_SCOPE_ATTR
+	:	'$' x=ID '::' y=ID WS? '=' expr=ATTR_VALUE_EXPR ';'
+						   {resolveDynamicScope($x.text)!=null &&
+						     resolveDynamicScope($x.text).getAttribute($y.text)!=null}?
+		//{System.out.println("found set \$scope::attr "+ $x.text + "::" + $y.text + " to " + $expr.text);}
+		{
+		AttributeScope scope = resolveDynamicScope($x.text);
+		if ( scope!=null ) {
+			ST st = template("scopeSetAttributeRef");
+			st.add("scope", $x.text);
+			st.add("attr",  scope.getAttribute($y.text));
+			st.add("expr",  translateAction($expr.text));
+		}
+		else {
+			// error: invalid dynamic attribute
+		}
+		}
+	;
+
+DYNAMIC_SCOPE_ATTR
+	:	'$' x=ID '::' y=ID
+						   {resolveDynamicScope($x.text)!=null &&
+						     resolveDynamicScope($x.text).getAttribute($y.text)!=null}?
+		//{System.out.println("found \$scope::attr "+ $x.text + "::" + $y.text);}
+		{
+		AttributeScope scope = resolveDynamicScope($x.text);
+		if ( scope!=null ) {
+			ST st = template("scopeAttributeRef");
+			st.add("scope", $x.text);
+			st.add("attr",  scope.getAttribute($y.text));
+		}
+		else {
+			// error: invalid dynamic attribute
+		}
+		}
+	;
+
+
+ERROR_SCOPED_XY
+	:	'$' x=ID '::' y=ID
+		{
+		chunks.add(getText());
+		generator.issueInvalidScopeError($x.text,$y.text,
+		                                 enclosingRule,actionToken,
+		                                 outerAltNum);		
+		}
+	;
+	
+/**		To access deeper (than top of stack) scopes, use the notation:
+ *
+ * 		$x[-1]::y previous (just under top of stack)
+ * 		$x[-i]::y top of stack - i where the '-' MUST BE PRESENT;
+ * 				  i.e., i cannot simply be negative without the '-' sign!
+ * 		$x[i]::y  absolute index i (0..size-1)
+ * 		$x[0]::y  is the absolute 0 indexed element (bottom of the stack)
+ */
+DYNAMIC_NEGATIVE_INDEXED_SCOPE_ATTR
+	:	'$' x=ID '[' '-' expr=SCOPE_INDEX_EXPR ']' '::' y=ID
+		// {System.out.println("found \$scope[-...]::attr");}
+		{
+		ST st = template("scopeAttributeRef");
+		st.add("scope",    $x.text);
+		st.add("attr",     resolveDynamicScope($x.text).getAttribute($y.text));
+		st.add("negIndex", $expr.text);
+		}		
+	;
+
+DYNAMIC_ABSOLUTE_INDEXED_SCOPE_ATTR
+	:	'$' x=ID '[' expr=SCOPE_INDEX_EXPR ']' '::' y=ID 
+		// {System.out.println("found \$scope[...]::attr");}
+		{
+		ST st = template("scopeAttributeRef");
+		st.add("scope", $x.text);
+		st.add("attr",  resolveDynamicScope($x.text).getAttribute($y.text));
+		st.add("index", $expr.text);
+		}		
+	;
+
+fragment
+SCOPE_INDEX_EXPR
+	:	(~']')+
+	;
+	
+/** $r		y is a rule's dynamic scope or a global shared scope.
+ * 			Isolated $rulename is not allowed unless it has a dynamic scope *and*
+ * 			there is no reference to rulename in the enclosing alternative,
+ * 			which would be ambiguous.  See TestAttributes.testAmbiguousRuleRef()
+ */
+ISOLATED_DYNAMIC_SCOPE
+	:	'$' ID {resolveDynamicScope($ID.text)!=null}?
+		// {System.out.println("found isolated \$scope where scope is a dynamic scope");}
+		{
+		ST st = template("isolatedDynamicScopeRef");
+		st.add("scope", $ID.text);
+		}		
+	;
+	
+// antlr.g then codegen.g does these first two currently.
+// don't want to duplicate that code.
+
+/** %foo(a={},b={},...) ctor */
+TEMPLATE_INSTANCE
+	:	'%' ID '(' ( WS? ARG (',' WS? ARG)* WS? )? ')'
+		// {System.out.println("found \%foo(args)");}
+		{
+		String action = getText().substring(1,getText().length());
+		String ruleName = "<outside-of-rule>";
+		if ( enclosingRule!=null ) {
+			ruleName = enclosingRule.name;
+		}
+		ST st =
+			generator.translateTemplateConstructor(ruleName,
+												   outerAltNum,
+												   actionToken,
+												   action);
+		if ( st!=null ) {
+			chunks.add(st);
+		}
+		}
+	;
+
+/** %({name-expr})(a={},...) indirect template ctor reference */
+INDIRECT_TEMPLATE_INSTANCE
+	:	'%' '(' ACTION ')' '(' ( WS? ARG (',' WS? ARG)* WS? )? ')'
+		// {System.out.println("found \%({...})(args)");}
+		{
+		String action = getText().substring(1,getText().length());
+		ST st =
+			generator.translateTemplateConstructor(enclosingRule.name,
+												   outerAltNum,
+												   actionToken,
+												   action);
+		chunks.add(st);
+		}
+	;
+
+fragment
+ARG	:	ID '=' ACTION
+	;
+
+/**	%{expr}.y = z; template attribute y of ST-typed expr to z */
+SET_EXPR_ATTRIBUTE
+	:	'%' a=ACTION '.' ID WS? '=' expr=ATTR_VALUE_EXPR ';'
+		// {System.out.println("found \%{expr}.y = z;");}
+		{
+		ST st = template("actionSetAttribute");
+		String action = $a.text;
+		action = action.substring(1,action.length()-1); // stuff inside {...}
+		st.add("st", translateAction(action));
+		st.add("attrName", $ID.text);
+		st.add("expr", translateAction($expr.text));
+		}
+	;
+	
+/*    %x.y = z; set template attribute y of x (always set never get attr)
+ *              to z [languages like python without ';' must still use the
+ *              ';' which the code generator is free to remove during code gen]
+ */
+SET_ATTRIBUTE
+	:	'%' x=ID '.' y=ID WS? '=' expr=ATTR_VALUE_EXPR ';'
+		// {System.out.println("found \%x.y = z;");}
+		{
+		ST st = template("actionSetAttribute");
+		st.add("st", $x.text);
+		st.add("attrName", $y.text);
+		st.add("expr", translateAction($expr.text));
+		}
+	;
+
+/** Don't allow an = as first char to prevent $x == 3; kind of stuff. */
+fragment
+ATTR_VALUE_EXPR
+	:	~'=' (~';')*
+	;
+	
+/** %{string-expr} anonymous template from string expr */
+TEMPLATE_EXPR
+	:	'%' a=ACTION
+		// {System.out.println("found \%{expr}");}
+		{
+		ST st = template("actionStringConstructor");
+		String action = $a.text;
+		action = action.substring(1,action.length()-1); // stuff inside {...}
+		st.add("stringExpr", translateAction(action));
+		}
+	;
+	
+fragment
+ACTION
+	:	'{' (options {greedy=false;}:.)* '}'
+	;
+	
+ESC :   '\\' '$' {chunks.add("\$");}
+	|	'\\' '%' {chunks.add("\%");}
+	|	'\\' ~('$'|'%') {chunks.add(getText());}
+    ;       
+
+ERROR_XY
+	:	'$' x=ID '.' y=ID
+		{
+		chunks.add(getText());
+		generator.issueInvalidAttributeError($x.text,$y.text,
+		                                     enclosingRule,actionToken,
+		                                     outerAltNum);
+		}
+	;
+	
+ERROR_X
+	:	'$' x=ID
+		{
+		chunks.add(getText());
+		generator.issueInvalidAttributeError($x.text,
+		                                     enclosingRule,actionToken,
+		                                     outerAltNum);
+		}
+	;
+	
+UNKNOWN_SYNTAX
+	:	'$'
+		{
+		chunks.add(getText());
+		// shouldn't need an error here.  Just accept \$ if it doesn't look like anything
+		}
+	|	'%' (ID|'.'|'('|')'|','|'{'|'}'|'"')*
+		{
+		chunks.add(getText());
+		ErrorManager.grammarError(ErrorManager.MSG_INVALID_TEMPLATE_ACTION,
+								  grammar,
+								  actionToken,
+								  getText());
+		}
+	;
+
+TEXT:	~('$'|'%'|'\\')+ {chunks.add(getText());}
+	;
+	
+fragment
+ID  :   ('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'_'|'0'..'9')*
+    ;
+
+fragment
+INT :	'0'..'9'+
+	;
+
+fragment
+WS	:	(' '|'\t'|'\n'|'\r')+
+	;
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/AssignTokenTypesWalker.g b/tool/src/main/antlr3/org/antlr/grammar/v3/AssignTokenTypesWalker.g
new file mode 100644
index 0000000..15ed503
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/AssignTokenTypesWalker.g
@@ -0,0 +1,403 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2011 Terence Parr
+ All rights reserved.
+
+ Grammar conversion to ANTLR v3:
+ Copyright (c) 2011 Sam Harwell
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+	notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+	derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** [Warning: TJP says that this is probably out of date as of 11/19/2005,
+ *   but since it's probably still useful, I'll leave in.  Don't have energy
+ *   to update at the moment.]
+ *
+ *  Compute the token types for all literals and rules etc..  There are
+ *  a few different cases to consider for grammar types and a few situations
+ *  within.
+ *
+ *  CASE 1 : pure parser grammar
+ *	a) Any reference to a token gets a token type.
+ *  b) The tokens section may alias a token name to a string or char
+ *
+ *  CASE 2 : pure lexer grammar
+ *  a) Import token vocabulary if available. Set token types for any new tokens
+ *     to values above last imported token type
+ *  b) token rule definitions get token types if not already defined
+ *  c) literals do NOT get token types
+ *
+ *  CASE 3 : merged parser / lexer grammar
+ *	a) Any char or string literal gets a token type in a parser rule
+ *  b) Any reference to a token gets a token type if not referencing
+ *     a fragment lexer rule
+ *  c) The tokens section may alias a token name to a string or char
+ *     which must add a rule to the lexer
+ *  d) token rule definitions get token types if not already defined
+ *  e) token rule definitions may also alias a token name to a literal.
+ *     E.g., Rule 'FOR : "for";' will alias FOR to "for" in the sense that
+ *     references to either in the parser grammar will yield the token type
+ *
+ *  What this pass does:
+ *
+ *  0. Collects basic info about the grammar like grammar name and type;
+ *     Oh, I have go get the options in case they affect the token types.
+ *     E.g., tokenVocab option.
+ *     Imports any token vocab name/type pairs into a local hashtable.
+ *  1. Finds a list of all literals and token names.
+ *  2. Finds a list of all token name rule definitions;
+ *     no token rules implies pure parser.
+ *  3. Finds a list of all simple token rule defs of form "&lt;NAME&gt; : &lt;literal&gt;;"
+ *     and aliases them.
+ *  4. Walks token names table and assign types to any unassigned
+ *  5. Walks aliases and assign types to referenced literals
+ *  6. Walks literals, assigning types if untyped
+ *  4. Informs the Grammar object of the type definitions such as:
+ *     g.defineToken(&lt;charliteral&gt;, ttype);
+ *     g.defineToken(&lt;stringliteral&gt;, ttype);
+ *     g.defineToken(&lt;tokenID&gt;, ttype);
+ *     where some of the ttype values will be the same for aliases tokens.
+ */
+tree grammar AssignTokenTypesWalker;
+
+options
+{
+	language=Java;
+	tokenVocab = ANTLR;
+	ASTLabelType = GrammarAST;
+}
+
+@header {
+package org.antlr.grammar.v3;
+
+import java.util.*;
+import org.antlr.analysis.*;
+import org.antlr.misc.*;
+import org.antlr.tool.*;
+
+import org.antlr.runtime.BitSet;
+}
+
+@members {
+protected Grammar grammar;
+protected String currentRuleName;
+
+protected static GrammarAST stringAlias;
+protected static GrammarAST charAlias;
+protected static GrammarAST stringAlias2;
+protected static GrammarAST charAlias2;
+
+@Override
+public void reportError(RecognitionException ex)
+{
+    Token token = null;
+    if (ex instanceof MismatchedTokenException) {
+        token = ((MismatchedTokenException)ex).token;
+    } else if (ex instanceof NoViableAltException) {
+        token = ((NoViableAltException)ex).token;
+    }
+
+    ErrorManager.syntaxError(
+        ErrorManager.MSG_SYNTAX_ERROR,
+        grammar,
+        token,
+        "assign.types: " + ex.toString(),
+        ex);
+}
+
+protected void initASTPatterns()
+{
+    TreeAdaptor adaptor = new ANTLRParser.grammar_Adaptor(null);
+
+    /*
+     * stringAlias = ^(BLOCK[] ^(ALT[] STRING_LITERAL[] EOA[]) EOB[])
+     */
+    stringAlias = (GrammarAST)adaptor.create( BLOCK, "BLOCK" );
+    {
+        GrammarAST alt = (GrammarAST)adaptor.create( ALT, "ALT" );
+        adaptor.addChild( alt, adaptor.create( STRING_LITERAL, "STRING_LITERAL" ) );
+        adaptor.addChild( alt, adaptor.create( EOA, "EOA" ) );
+        adaptor.addChild( stringAlias, alt );
+    }
+    adaptor.addChild( stringAlias, adaptor.create( EOB, "EOB" ) );
+
+    /*
+     * charAlias = ^(BLOCK[] ^(ALT[] CHAR_LITERAL[] EOA[]) EOB[])
+     */
+    charAlias = (GrammarAST)adaptor.create( BLOCK, "BLOCK" );
+    {
+        GrammarAST alt = (GrammarAST)adaptor.create( ALT, "ALT" );
+        adaptor.addChild( alt, adaptor.create( CHAR_LITERAL, "CHAR_LITERAL" ) );
+        adaptor.addChild( alt, adaptor.create( EOA, "EOA" ) );
+        adaptor.addChild( charAlias, alt );
+    }
+    adaptor.addChild( charAlias, adaptor.create( EOB, "EOB" ) );
+
+    /*
+     * stringAlias2 = ^(BLOCK[] ^(ALT[] STRING_LITERAL[] ACTION[] EOA[]) EOB[])
+     */
+    stringAlias2 = (GrammarAST)adaptor.create( BLOCK, "BLOCK" );
+    {
+        GrammarAST alt = (GrammarAST)adaptor.create( ALT, "ALT" );
+        adaptor.addChild( alt, adaptor.create( STRING_LITERAL, "STRING_LITERAL" ) );
+        adaptor.addChild( alt, adaptor.create( ACTION, "ACTION" ) );
+        adaptor.addChild( alt, adaptor.create( EOA, "EOA" ) );
+        adaptor.addChild( stringAlias2, alt );
+    }
+    adaptor.addChild( stringAlias2, adaptor.create( EOB, "EOB" ) );
+
+    /*
+     * charAlias = ^(BLOCK[] ^(ALT[] CHAR_LITERAL[] ACTION[] EOA[]) EOB[])
+     */
+    charAlias2 = (GrammarAST)adaptor.create( BLOCK, "BLOCK" );
+    {
+        GrammarAST alt = (GrammarAST)adaptor.create( ALT, "ALT" );
+        adaptor.addChild( alt, adaptor.create( CHAR_LITERAL, "CHAR_LITERAL" ) );
+        adaptor.addChild( alt, adaptor.create( ACTION, "ACTION" ) );
+        adaptor.addChild( alt, adaptor.create( EOA, "EOA" ) );
+        adaptor.addChild( charAlias2, alt );
+    }
+    adaptor.addChild( charAlias2, adaptor.create( EOB, "EOB" ) );
+}
+
+// Behavior moved to AssignTokenTypesBehavior
+protected void trackString(GrammarAST t) {}
+protected void trackToken( GrammarAST t ) {}
+protected void trackTokenRule( GrammarAST t, GrammarAST modifier, GrammarAST block ) {}
+protected void alias( GrammarAST t, GrammarAST s ) {}
+public void defineTokens( Grammar root ) {}
+protected void defineStringLiteralsFromDelegates() {}
+protected void assignStringTypes( Grammar root ) {}
+protected void aliasTokenIDsAndLiterals( Grammar root ) {}
+protected void assignTokenIDTypes( Grammar root ) {}
+protected void defineTokenNamesAndLiteralsInGrammar( Grammar root ) {}
+protected void init( Grammar root ) {}
+}
+
+public
+grammar_[Grammar g]
+@init
+{
+	if ( state.backtracking == 0 )
+		init($g);
+}
+	:	(	^( LEXER_GRAMMAR 	  grammarSpec )
+		|	^( PARSER_GRAMMAR   grammarSpec )
+		|	^( TREE_GRAMMAR     grammarSpec )
+		|	^( COMBINED_GRAMMAR grammarSpec )
+		)
+	;
+
+grammarSpec
+	:	id=ID
+		(cmt=DOC_COMMENT)?
+		(optionsSpec)?
+		(delegateGrammars)?
+		(tokensSpec)?
+		(attrScope)*
+		( ^(AMPERSAND .*) )* // skip actions
+		rules
+	;
+
+attrScope
+	:	^( 'scope' ID ( ^(AMPERSAND .*) )* ACTION )
+	;
+
+optionsSpec returns [Map<Object, Object> opts = new HashMap<Object, Object>()]
+	:	^( OPTIONS (option[$opts])+ )
+	;
+
+option[Map<Object, Object> opts]
+	:	^( ASSIGN ID optionValue )
+		{
+			String key = $ID.text;
+			$opts.put(key, $optionValue.value);
+			// check for grammar-level option to import vocabulary
+			if ( currentRuleName==null && key.equals("tokenVocab") )
+			{
+				grammar.importTokenVocabulary($ID,(String)$optionValue.value);
+			}
+		}
+	;
+
+optionValue returns [Object value=null]
+@init
+{
+	if ( state.backtracking == 0 )
+		$value = $start.getText();
+}
+	:	ID
+	|	STRING_LITERAL
+	|	CHAR_LITERAL
+	|	INT
+		{$value = Integer.parseInt($INT.text);}
+//  |   cs=charSet       {$value = $cs;} // return set AST in this case
+	;
+
+charSet
+	:	^( CHARSET charSetElement )
+	;
+
+charSetElement
+	:	CHAR_LITERAL
+	|	^( OR CHAR_LITERAL CHAR_LITERAL )
+	|	^( RANGE CHAR_LITERAL CHAR_LITERAL )
+	;
+
+delegateGrammars
+	:	^(	'import'
+			(	^(ASSIGN ID ID)
+			|	ID
+			)+
+		)
+	;
+
+tokensSpec
+	:	^(TOKENS tokenSpec*)
+	;
+
+tokenSpec
+	:	t=TOKEN_REF            {trackToken($t);}
+	|	^(	ASSIGN
+			t2=TOKEN_REF       {trackToken($t2);}
+			( s=STRING_LITERAL {trackString($s); alias($t2,$s);}
+			| c=CHAR_LITERAL   {trackString($c); alias($t2,$c);}
+			)
+		)
+	;
+
+rules
+	:	rule+
+	;
+
+rule
+	:	^(RULE ruleBody)
+	|	^(PREC_RULE ruleBody)
+	;
+
+ruleBody
+	:	id=ID {currentRuleName=$id.text;}
+		(m=modifier)?
+		^(ARG (ARG_ACTION)?)
+		^(RET (ARG_ACTION)?)
+		(throwsSpec)?
+		(optionsSpec)?
+		(ruleScopeSpec)?
+		( ^(AMPERSAND .*) )*
+		b=block
+		(exceptionGroup)?
+		EOR
+		{trackTokenRule($id,$m.start,$b.start);}
+	;
+
+modifier
+	:	'protected'
+	|	'public'
+	|	'private'
+	|	'fragment'
+	;
+
+throwsSpec
+	:	^('throws' ID+)
+	;
+
+ruleScopeSpec
+	:	^( 'scope' ( ^(AMPERSAND .*) )* (ACTION)? ( ID )* )
+	;
+
+block
+	:	^(	BLOCK
+			(optionsSpec)?
+			( alternative rewrite )+
+			EOB
+		)
+	;
+
+alternative
+	:	^( ALT (element)+ EOA )
+	;
+
+exceptionGroup
+	:	( exceptionHandler )+ (finallyClause)?
+	|	finallyClause
+	;
+
+exceptionHandler
+	:	^('catch' ARG_ACTION ACTION)
+	;
+
+finallyClause
+	:	^('finally' ACTION)
+	;
+
+rewrite
+	:	^(REWRITES ( ^(REWRITE .*) )* )
+	|
+	;
+
+element
+	:	^(ROOT element)
+	|	^(BANG element)
+	|	atom
+	|	^(NOT element)
+	|	^(RANGE atom atom)
+	|	^(CHAR_RANGE atom atom)
+	|	^(ASSIGN ID element)
+	|	^(PLUS_ASSIGN ID element)
+	|	ebnf
+	|	tree_
+	|	^( SYNPRED block )
+	|	FORCED_ACTION
+	|	ACTION
+	|	SEMPRED
+	|	SYN_SEMPRED
+	|	^(BACKTRACK_SEMPRED .*)
+	|	GATED_SEMPRED
+	|	EPSILON
+	;
+
+ebnf
+	:	block
+	|	^( OPTIONAL block )
+	|	^( CLOSURE block )
+	|	^( POSITIVE_CLOSURE block )
+	;
+
+tree_
+	:	^(TREE_BEGIN element+)
+	;
+
+atom
+	:	^( RULE_REF (ARG_ACTION)? )
+	|	^( t=TOKEN_REF (ARG_ACTION )? ) {trackToken($t);}
+	|	c=CHAR_LITERAL   {trackString($c);}
+	|	s=STRING_LITERAL {trackString($s);}
+	|	WILDCARD
+	|	^(DOT ID atom) // scope override on rule
+	;
+
+ast_suffix
+	:	ROOT
+	|	BANG
+	;
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/CodeGenTreeWalker.g b/tool/src/main/antlr3/org/antlr/grammar/v3/CodeGenTreeWalker.g
new file mode 100644
index 0000000..9c107b5
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/CodeGenTreeWalker.g
@@ -0,0 +1,1611 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2011 Terence Parr
+ All rights reserved.
+
+ Grammar conversion to ANTLR v3:
+ Copyright (c) 2011 Sam Harwell
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+	notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+	derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Walk a grammar and generate code by gradually building up
+ *  a bigger and bigger ST.
+ *
+ *  Terence Parr
+ *  University of San Francisco
+ *  June 15, 2004
+ */
+tree grammar CodeGenTreeWalker;
+
+options {
+	language=Java;
+	tokenVocab = ANTLR;
+	ASTLabelType=GrammarAST;
+}
+
+@header {
+package org.antlr.grammar.v3;
+
+import org.antlr.analysis.*;
+import org.antlr.misc.*;
+import org.antlr.tool.*;
+import org.antlr.codegen.*;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Collection;
+import org.antlr.runtime.BitSet;
+import org.antlr.runtime.DFA;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.STGroup;
+}
+
+@members {
+protected static final int RULE_BLOCK_NESTING_LEVEL = 0;
+protected static final int OUTER_REWRITE_NESTING_LEVEL = 0;
+
+private String currentRuleName = null;
+protected int blockNestingLevel = 0;
+protected int rewriteBlockNestingLevel = 0;
+private int outerAltNum = 0;
+protected ST currentBlockST = null;
+protected boolean currentAltHasASTRewrite = false;
+protected int rewriteTreeNestingLevel = 0;
+protected HashSet<Object> rewriteRuleRefs = null;
+
+public String getCurrentRuleName() {
+    return currentRuleName;
+}
+
+public void setCurrentRuleName(String value) {
+    currentRuleName = value;
+}
+
+public int getOuterAltNum() {
+    return outerAltNum;
+}
+
+public void setOuterAltNum(int value) {
+    outerAltNum = value;
+}
+
+@Override
+public void reportError(RecognitionException ex) {
+    Token token = null;
+    if (ex instanceof MismatchedTokenException) {
+        token = ((MismatchedTokenException)ex).token;
+    } else if (ex instanceof NoViableAltException) {
+        token = ((NoViableAltException)ex).token;
+    }
+
+    ErrorManager.syntaxError(
+        ErrorManager.MSG_SYNTAX_ERROR,
+        grammar,
+        token,
+        "codegen: " + ex.toString(),
+        ex );
+}
+
+public final void reportError(String s) {
+    System.out.println("codegen: error: " + s);
+}
+
+protected CodeGenerator generator;
+protected Grammar grammar;
+protected STGroup templates;
+
+/** The overall lexer/parser template; simulate dynamically scoped
+ *  attributes by making this an instance var of the walker.
+ */
+protected ST recognizerST;
+
+protected ST outputFileST;
+protected ST headerFileST;
+
+protected String outputOption = "";
+
+protected final ST getWildcardST(GrammarAST elementAST, GrammarAST ast_suffix, String label) {
+    String name = "wildcard";
+    if (grammar.type == Grammar.LEXER) {
+        name = "wildcardChar";
+    }
+    return getTokenElementST(name, name, elementAST, ast_suffix, label);
+}
+
+protected final ST getRuleElementST( String name,
+                                          String ruleTargetName,
+                                          GrammarAST elementAST,
+                                          GrammarAST ast_suffix,
+                                          String label ) {
+	Rule r = grammar.getRule( currentRuleName );
+	String suffix = getSTSuffix(elementAST, ast_suffix, label);
+	if ( !r.isSynPred ) {
+		name += suffix;
+	}
+	// if we're building trees and there is no label, gen a label
+	// unless we're in a synpred rule.
+	if ( ( grammar.buildAST() || suffix.length() > 0 ) && label == null &&
+		 ( r == null || !r.isSynPred ) ) {
+		// we will need a label to do the AST or tracking, make one
+		label = generator.createUniqueLabel( ruleTargetName );
+		CommonToken labelTok = new CommonToken( ANTLRParser.ID, label );
+		grammar.defineRuleRefLabel( currentRuleName, labelTok, elementAST );
+	}
+
+	ST elementST = templates.getInstanceOf( name );
+	if ( label != null ) {
+		elementST.add( "label", label );
+	}
+
+
+	return elementST;
+}
+
+protected final ST getTokenElementST( String name,
+                                           String elementName,
+                                           GrammarAST elementAST,
+                                           GrammarAST ast_suffix,
+                                           String label ) {
+    boolean tryUnchecked = false;
+    if (name == "matchSet" && elementAST.enclosingRuleName != null && elementAST.enclosingRuleName.length() > 0 && Rule.getRuleType(elementAST.enclosingRuleName) == Grammar.LEXER)
+    {
+        if ( ( elementAST.getParent().getType() == ANTLRLexer.ALT && elementAST.getParent().getParent().getParent().getType() == RULE && elementAST.getParent().getParent().getChildCount() == 2 )
+            || ( elementAST.getParent().getType() == ANTLRLexer.NOT && elementAST.getParent().getParent().getParent().getParent().getType() == RULE && elementAST.getParent().getParent().getParent().getChildCount() == 2 ) ) {
+            // single alt at the start of the rule needs to be checked
+        } else {
+            tryUnchecked = true;
+        }
+    }
+
+    String suffix = getSTSuffix( elementAST, ast_suffix, label );
+    // if we're building trees and there is no label, gen a label
+    // unless we're in a synpred rule.
+    Rule r = grammar.getRule( currentRuleName );
+    if ( ( grammar.buildAST() || suffix.length() > 0 ) && label == null &&
+         ( r == null || !r.isSynPred ) )
+    {
+        label = generator.createUniqueLabel( elementName );
+        CommonToken labelTok = new CommonToken( ANTLRParser.ID, label );
+        grammar.defineTokenRefLabel( currentRuleName, labelTok, elementAST );
+    }
+
+    ST elementST = null;
+    if ( tryUnchecked && templates.isDefined( name + "Unchecked" + suffix ) )
+        elementST = templates.getInstanceOf( name + "Unchecked" + suffix );
+    if ( elementST == null )
+        elementST = templates.getInstanceOf( name + suffix );
+
+    if ( label != null )
+    {
+        elementST.add( "label", label );
+    }
+    return elementST;
+}
+
+public final boolean isListLabel(String label) {
+    boolean hasListLabel = false;
+    if ( label != null ) {
+        Rule r = grammar.getRule( currentRuleName );
+        //String stName = null;
+        if ( r != null )
+        {
+            Grammar.LabelElementPair pair = r.getLabel( label );
+            if ( pair != null &&
+                 ( pair.type == Grammar.TOKEN_LIST_LABEL ||
+                  pair.type == Grammar.RULE_LIST_LABEL ||
+                  pair.type == Grammar.WILDCARD_TREE_LIST_LABEL ) )
+            {
+                hasListLabel = true;
+            }
+        }
+    }
+    return hasListLabel;
+}
+
+/** Return a non-empty template name suffix if the token is to be
+ *  tracked, added to a tree, or both.
+ */
+protected final String getSTSuffix(GrammarAST elementAST, GrammarAST ast_suffix, String label) {
+    if ( grammar.type == Grammar.LEXER )
+    {
+        return "";
+    }
+    // handle list label stuff; make element use "Track"
+
+    String operatorPart = "";
+    String rewritePart = "";
+    String listLabelPart = "";
+    Rule ruleDescr = grammar.getRule( currentRuleName );
+    if ( ast_suffix != null && !ruleDescr.isSynPred )
+    {
+        if ( ast_suffix.getType() == ANTLRParser.ROOT )
+        {
+            operatorPart = "RuleRoot";
+        }
+        else if ( ast_suffix.getType() == ANTLRParser.BANG )
+        {
+            operatorPart = "Bang";
+        }
+    }
+    if ( currentAltHasASTRewrite && elementAST.getType() != WILDCARD )
+    {
+        rewritePart = "Track";
+    }
+    if ( isListLabel( label ) )
+    {
+        listLabelPart = "AndListLabel";
+    }
+    String STsuffix = operatorPart + rewritePart + listLabelPart;
+    //JSystem.@out.println("suffix = "+STsuffix);
+
+    return STsuffix;
+}
+
+/** Convert rewrite AST lists to target labels list */
+protected final List<String> getTokenTypesAsTargetLabels(Collection<GrammarAST> refs)
+{
+    if ( refs == null || refs.size() == 0 )
+        return null;
+
+    List<String> labels = new ArrayList<String>( refs.size() );
+    for ( GrammarAST t : refs )
+    {
+        String label;
+        if ( t.getType() == ANTLRParser.RULE_REF || t.getType() == ANTLRParser.TOKEN_REF || t.getType() == ANTLRParser.LABEL)
+        {
+            label = t.getText();
+        }
+        else
+        {
+            // must be char or String literal
+            label = generator.getTokenTypeAsTargetLabel(grammar.getTokenType(t.getText()));
+        }
+        labels.add( label );
+    }
+    return labels;
+}
+
+public final void init( Grammar g ) {
+    this.grammar = g;
+    this.generator = grammar.getCodeGenerator();
+    this.templates = generator.getTemplates();
+}
+}
+
+public
+grammar_[Grammar g,
+		ST recognizerST,
+		ST outputFileST,
+		ST headerFileST]
+@init
+{
+	if ( state.backtracking == 0 )
+	{
+		init(g);
+		this.recognizerST = recognizerST;
+		this.outputFileST = outputFileST;
+		this.headerFileST = headerFileST;
+		String superClass = (String)g.getOption("superClass");
+		outputOption = (String)g.getOption("output");
+		if ( superClass!=null ) recognizerST.add("superClass", superClass);
+		if ( g.type!=Grammar.LEXER ) {
+		    Object lt = g.getOption("ASTLabelType");
+			if ( lt!=null ) recognizerST.add("ASTLabelType", lt);
+		}
+		if ( g.type==Grammar.TREE_PARSER && g.getOption("ASTLabelType")==null ) {
+			ErrorManager.grammarWarning(ErrorManager.MSG_MISSING_AST_TYPE_IN_TREE_GRAMMAR,
+									   g,
+									   null,
+									   g.name);
+		}
+		if ( g.type!=Grammar.TREE_PARSER ) {
+		    Object lt = g.getOption("TokenLabelType");
+			if ( lt!=null ) recognizerST.add("labelType", lt);
+		}
+		$recognizerST.add("numRules", grammar.getRules().size());
+		$outputFileST.add("numRules", grammar.getRules().size());
+		$headerFileST.add("numRules", grammar.getRules().size());
+	}
+}
+	:	(	^( LEXER_GRAMMAR grammarSpec )
+		|	^( PARSER_GRAMMAR grammarSpec )
+		|	^( TREE_GRAMMAR grammarSpec )
+		|	^( COMBINED_GRAMMAR grammarSpec )
+		)
+	;
+
+attrScope
+	:	^( 'scope' ID ( ^(AMPERSAND .*) )* ACTION )
+	;
+
+grammarSpec
+	:   name=ID
+		(	cmt=DOC_COMMENT
+			{
+				outputFileST.add("docComment", $cmt.text);
+				headerFileST.add("docComment", $cmt.text);
+			}
+		)?
+		{
+			recognizerST.add("name", grammar.getRecognizerName());
+			outputFileST.add("name", grammar.getRecognizerName());
+			headerFileST.add("name", grammar.getRecognizerName());
+			recognizerST.add("scopes", grammar.getGlobalScopes());
+			headerFileST.add("scopes", grammar.getGlobalScopes());
+		}
+		( ^(OPTIONS .*) )?
+		( ^(IMPORT .*) )?
+		( ^(TOKENS .*) )?
+		(attrScope)*
+		( ^(AMPERSAND .*) )*
+		rules[recognizerST]
+	;
+
+rules[ST recognizerST]
+@init
+{
+	String ruleName = ((GrammarAST)input.LT(1)).getChild(0).getText();
+	boolean generated = grammar.generateMethodForRule(ruleName);
+}
+	:	(	(	options {k=1;} :
+				{generated}? =>
+				rST=rule
+				{
+					if ( $rST.code != null )
+					{
+						recognizerST.add("rules", $rST.code);
+						outputFileST.add("rules", $rST.code);
+						headerFileST.add("rules", $rST.code);
+					}
+				}
+			|	^(RULE .*)
+			|	^(PREC_RULE .*) // ignore
+			)
+			{{
+				if ( input.LA(1) == RULE )
+				{
+					ruleName = ((GrammarAST)input.LT(1)).getChild(0).getText();
+					//System.Diagnostics.Debug.Assert( ruleName == ((GrammarAST)input.LT(1)).enclosingRuleName );
+					generated = grammar.generateMethodForRule(ruleName);
+				}
+			}}
+		)+
+	;
+
+rule returns [ST code=null]
+@init
+{
+	String initAction = null;
+	// get the dfa for the BLOCK
+	GrammarAST block2=(GrammarAST)$start.getFirstChildWithType(BLOCK);
+	org.antlr.analysis.DFA dfa = block2.getLookaheadDFA();
+	// init blockNestingLevel so it's block level RULE_BLOCK_NESTING_LEVEL
+	// for alts of rule
+	blockNestingLevel = RULE_BLOCK_NESTING_LEVEL-1;
+	Rule ruleDescr = grammar.getRule($start.getChild(0).getText());
+	currentRuleName = $start.getChild(0).getText();
+
+	// For syn preds, we don't want any AST code etc... in there.
+	// Save old templates ptr and restore later.  Base templates include Dbg.
+	STGroup saveGroup = templates;
+	if ( ruleDescr.isSynPred && generator.target.useBaseTemplatesForSynPredFragments() )
+	{
+		templates = generator.getBaseTemplates();
+	}
+
+	String description = "";
+}
+	:	^(	RULE id=ID
+			{assert currentRuleName == $id.text;}
+			(mod=modifier)?
+			^(ARG (ARG_ACTION)?)
+			^(RET (ARG_ACTION)?)
+			(throwsSpec)?
+			( ^(OPTIONS .*) )?
+			(ruleScopeSpec)?
+			( ^(AMPERSAND .*) )*
+			b=block["ruleBlock", dfa, null]
+			{
+				description =
+					grammar.grammarTreeToString((GrammarAST)$start.getFirstChildWithType(BLOCK),
+												false);
+				description =
+					generator.target.getTargetStringLiteralFromString(description);
+				$b.code.add("description", description);
+				// do not generate lexer rules in combined grammar
+				String stName = null;
+				if ( ruleDescr.isSynPred )
+				{
+					stName = "synpredRule";
+				}
+				else if ( grammar.type==Grammar.LEXER )
+				{
+					if ( currentRuleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) )
+					{
+						stName = "tokensRule";
+					}
+					else
+					{
+						stName = "lexerRule";
+					}
+				}
+				else
+				{
+					if ( !(grammar.type==Grammar.COMBINED &&
+						 Rule.getRuleType(currentRuleName) == Grammar.LEXER) )
+					{
+						stName = "rule";
+					}
+				}
+				$code = templates.getInstanceOf(stName);
+				if ( $code.getName().equals("/rule") )
+				{
+					$code.add("emptyRule", grammar.isEmptyRule(block2));
+				}
+				$code.add("ruleDescriptor", ruleDescr);
+				String memo = (String)grammar.getBlockOption($start,"memoize");
+				if ( memo==null )
+				{
+					memo = (String)grammar.getOption("memoize");
+				}
+				if ( memo!=null && memo.equals("true") &&
+					 (stName.equals("rule")||stName.equals("lexerRule")) )
+				{
+					$code.add("memoize", memo!=null && memo.equals("true"));
+				}
+			}
+
+			(exceptionGroup[$code])?
+			EOR
+		)
+		{
+			if ( $code!=null )
+			{
+				if ( grammar.type==Grammar.LEXER )
+				{
+					boolean naked =
+						currentRuleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ||
+						($mod.start!=null&&$mod.start.getText().equals(Grammar.FRAGMENT_RULE_MODIFIER));
+					$code.add("nakedBlock", naked);
+				}
+				else
+				{
+					description = grammar.grammarTreeToString($start,false);
+					description = generator.target.getTargetStringLiteralFromString(description);
+					$code.add("description", description);
+				}
+				Rule theRule = grammar.getRule(currentRuleName);
+				generator.translateActionAttributeReferencesForSingleScope(
+					theRule,
+					theRule.getActions()
+				);
+				$code.add("ruleName", currentRuleName);
+				$code.add("block", $b.code);
+				if ( initAction!=null )
+				{
+					$code.add("initAction", initAction);
+				}
+			}
+		}
+	;
+finally { templates = saveGroup; }
+
+modifier
+	:	'protected'
+	|	'public'
+	|	'private'
+	|	'fragment'
+	;
+
+throwsSpec
+	:	^('throws' ID+)
+	;
+
+ruleScopeSpec
+	:	^( 'scope' ( ^(AMPERSAND .*) )* (ACTION)? ( ID )* )
+	;
+
+block[String blockTemplateName, org.antlr.analysis.DFA dfa, GrammarAST label]
+	 returns [ST code=null]
+options { k=1; }
+@init
+{
+	int altNum = 0;
+
+	blockNestingLevel++;
+	if ( state.backtracking == 0 )
+	{
+		ST decision = null;
+		if ( $dfa != null )
+		{
+			$code = templates.getInstanceOf($blockTemplateName);
+			decision = generator.genLookaheadDecision(recognizerST,$dfa);
+			$code.add("decision", decision);
+			$code.add("decisionNumber", $dfa.getDecisionNumber());
+			$code.add("maxK",$dfa.getMaxLookaheadDepth());
+			$code.add("maxAlt",$dfa.getNumberOfAlts());
+		}
+		else
+		{
+			$code = templates.getInstanceOf($blockTemplateName+"SingleAlt");
+		}
+		$code.add("blockLevel", blockNestingLevel);
+		$code.add("enclosingBlockLevel", blockNestingLevel-1);
+		altNum = 1;
+		if ( this.blockNestingLevel==RULE_BLOCK_NESTING_LEVEL ) {
+			this.outerAltNum=1;
+		}
+	}
+}
+	:	{$start.getSetValue()!=null}? => setBlock
+		{
+			$code.add("alts",$setBlock.code);
+		}
+
+	|	^(  BLOCK
+			( ^(OPTIONS .*) )? // ignore
+			( alt=alternative[$label] rew=rewrite
+				{
+					if ( this.blockNestingLevel==RULE_BLOCK_NESTING_LEVEL )
+					{
+						this.outerAltNum++;
+					}
+					// add the rewrite code as just another element in the alt :)
+					// (unless it's a " -> ..." rewrite
+					// ( -> ... )
+					GrammarAST firstRewriteAST = $rew.start.findFirstType(REWRITE);
+					boolean etc =
+						$rew.start.getType()==REWRITES &&
+						firstRewriteAST.getChild(0)!=null &&
+						firstRewriteAST.getChild(0).getType()==ETC;
+					if ( $rew.code!=null && !etc )
+					{
+						$alt.code.add("rew", $rew.code);
+					}
+					// add this alt to the list of alts for this block
+					$code.add("alts",$alt.code);
+					$alt.code.add("altNum", altNum);
+					$alt.code.add("outerAlt", blockNestingLevel==RULE_BLOCK_NESTING_LEVEL);
+					altNum++;
+				}
+			)+
+			EOB
+		 )
+	;
+finally { blockNestingLevel--; }
+
+setBlock returns [ST code=null]
+@init
+{
+	ST setcode = null;
+	if ( state.backtracking == 0 )
+	{
+		if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() )
+		{
+			Rule r = grammar.getRule(currentRuleName);
+			currentAltHasASTRewrite = r.hasRewrite(outerAltNum);
+			if ( currentAltHasASTRewrite )
+			{
+				r.trackTokenReferenceInAlt($start, outerAltNum);
+			}
+		}
+	}
+}
+	:	^(s=BLOCK .*)
+		{
+			int i = ((CommonToken)$s.getToken()).getTokenIndex();
+			if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL )
+			{
+				setcode = getTokenElementST("matchRuleBlockSet", "set", $s, null, null);
+			}
+			else
+			{
+				setcode = getTokenElementST("matchSet", "set", $s, null, null);
+			}
+			setcode.add("elementIndex", i);
+			//if ( grammar.type!=Grammar.LEXER )
+			//{
+			//	generator.generateLocalFOLLOW($s,"set",currentRuleName,i);
+			//}
+			setcode.add("s",
+				generator.genSetExpr(templates,$s.getSetValue(),1,false));
+			ST altcode=templates.getInstanceOf("alt");
+			altcode.addAggr("elements.{el,line,pos}",
+								 setcode,
+								 $s.getLine(),
+								 $s.getCharPositionInLine() + 1
+								);
+			altcode.add("altNum", 1);
+			altcode.add("outerAlt", blockNestingLevel==RULE_BLOCK_NESTING_LEVEL);
+			if ( !currentAltHasASTRewrite && grammar.buildAST() )
+			{
+				altcode.add("autoAST", true);
+			}
+			altcode.add("treeLevel", rewriteTreeNestingLevel);
+			$code = altcode;
+		}
+	;
+
+setAlternative
+	:	^(ALT setElement+ EOA)
+	;
+
+exceptionGroup[ST ruleST]
+	:	( exceptionHandler[$ruleST] )+ (finallyClause[$ruleST])?
+	|	finallyClause[$ruleST]
+	;
+
+exceptionHandler[ST ruleST]
+	:	^('catch' ARG_ACTION ACTION)
+		{
+			List<? extends Object> chunks = generator.translateAction(currentRuleName,$ACTION);
+			$ruleST.addAggr("exceptions.{decl,action}",$ARG_ACTION.text,chunks);
+		}
+	;
+
+finallyClause[ST ruleST]
+	:	^('finally' ACTION)
+		{
+			List<? extends Object> chunks = generator.translateAction(currentRuleName,$ACTION);
+			$ruleST.add("finally",chunks);
+		}
+	;
+
+alternative[GrammarAST label] returns [ST code]
+@init
+{
+	if ( state.backtracking == 0 )
+	{
+		$code = templates.getInstanceOf("alt");
+		if ( blockNestingLevel==RULE_BLOCK_NESTING_LEVEL && grammar.buildAST() )
+		{
+			Rule r = grammar.getRule(currentRuleName);
+			currentAltHasASTRewrite = r.hasRewrite(outerAltNum);
+		}
+		String description = grammar.grammarTreeToString($start, false);
+		description = generator.target.getTargetStringLiteralFromString(description);
+		$code.add("description", description);
+		$code.add("treeLevel", rewriteTreeNestingLevel);
+		if ( !currentAltHasASTRewrite && grammar.buildAST() )
+		{
+			$code.add("autoAST", true);
+		}
+	}
+}
+	:	^(	a=ALT
+			(
+				e=element[$label,null]
+				{
+					if ($e.code != null)
+					{
+						$code.addAggr("elements.{el,line,pos}",
+										  $e.code,
+										  $e.start.getLine(),
+										  $e.start.getCharPositionInLine() + 1
+										 );
+					}
+				}
+			)+
+			EOA
+		)
+	;
+
+element[GrammarAST label, GrammarAST astSuffix] returns [ST code=null]
+options { k=1; }
+@init
+{
+	IntSet elements=null;
+	GrammarAST ast = null;
+}
+	:	^(ROOT e=element[$label,$ROOT])
+		{ $code = $e.code; }
+
+	|	^(BANG e=element[$label,$BANG])
+		{ $code = $e.code; }
+
+	|	^( n=NOT ne=notElement[$n, $label, $astSuffix] )
+		{ $code = $ne.code; }
+
+	|	^( ASSIGN alabel=ID e=element[$alabel,$astSuffix] )
+		{ $code = $e.code; }
+
+	|	^( PLUS_ASSIGN label2=ID e=element[$label2,$astSuffix] )
+		{ $code = $e.code; }
+
+	|	^(CHAR_RANGE a=CHAR_LITERAL b=CHAR_LITERAL)
+		{
+			$code = templates.getInstanceOf("charRangeRef");
+			String low = generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,$a.text);
+			String high = generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,$b.text);
+			$code.add("a", low);
+			$code.add("b", high);
+			if ( label!=null )
+			{
+				$code.add("label", $label.getText());
+			}
+		}
+
+	|	({((GrammarAST)input.LT(1)).getSetValue()==null}? (BLOCK|OPTIONAL|CLOSURE|POSITIVE_CLOSURE)) => /*{$start.getSetValue()==null}?*/ ebnf[$label]
+		{ $code = $ebnf.code; }
+
+	|	atom[null, $label, $astSuffix]
+		{ $code = $atom.code; }
+
+	|	tree_
+		{ $code = $tree_.code; }
+
+	|	element_action
+		{ $code = $element_action.code; }
+
+	|   (sp=SEMPRED|sp=GATED_SEMPRED)
+		{
+			$code = templates.getInstanceOf("validateSemanticPredicate");
+			$code.add("pred", generator.translateAction(currentRuleName,$sp));
+			String description = generator.target.getTargetStringLiteralFromString($sp.text);
+			$code.add("description", description);
+		}
+
+	|	SYN_SEMPRED // used only in lookahead; don't generate validating pred
+
+	|	^(SYNPRED .*)
+
+	|	^(BACKTRACK_SEMPRED .*)
+
+	|   EPSILON
+	;
+
+element_action returns [ST code=null]
+	:	act=ACTION
+		{
+			$code = templates.getInstanceOf("execAction");
+			$code.add("action", generator.translateAction(currentRuleName,$act));
+		}
+	|	act2=FORCED_ACTION
+		{
+			$code = templates.getInstanceOf("execForcedAction");
+			$code.add("action", generator.translateAction(currentRuleName,$act2));
+		}
+	;
+
+notElement[GrammarAST n, GrammarAST label, GrammarAST astSuffix] returns [ST code=null]
+@init
+{
+	IntSet elements=null;
+	String labelText = null;
+	if ( label!=null )
+	{
+		labelText = label.getText();
+	}
+}
+	:	(	assign_c=CHAR_LITERAL
+			{
+				int ttype=0;
+				if ( grammar.type==Grammar.LEXER )
+				{
+					ttype = Grammar.getCharValueFromGrammarCharLiteral($assign_c.text);
+				}
+				else
+				{
+					ttype = grammar.getTokenType($assign_c.text);
+				}
+				elements = grammar.complement(ttype);
+			}
+		|	assign_s=STRING_LITERAL
+			{
+				int ttype=0;
+				if ( grammar.type==Grammar.LEXER )
+				{
+					// TODO: error!
+				}
+				else
+				{
+					ttype = grammar.getTokenType($assign_s.text);
+				}
+				elements = grammar.complement(ttype);
+			}
+		|	assign_t=TOKEN_REF
+			{
+				int ttype = grammar.getTokenType($assign_t.text);
+				elements = grammar.complement(ttype);
+			}
+		|	^(assign_st=BLOCK .*)
+			{
+				elements = $assign_st.getSetValue();
+				elements = grammar.complement(elements);
+			}
+		)
+		{
+			$code = getTokenElementST("matchSet",
+									 "set",
+									 (GrammarAST)$n.getChild(0),
+									 astSuffix,
+									 labelText);
+			$code.add("s",generator.genSetExpr(templates,elements,1,false));
+			int i = ((CommonToken)n.getToken()).getTokenIndex();
+			$code.add("elementIndex", i);
+			if ( grammar.type!=Grammar.LEXER )
+			{
+				generator.generateLocalFOLLOW(n,"set",currentRuleName,i);
+			}
+		}
+	;
+
+ebnf[GrammarAST label] returns [ST code=null]
+@init
+{
+	org.antlr.analysis.DFA dfa=null;
+	GrammarAST b = (GrammarAST)$start.getChild(0);
+	GrammarAST eob = b.getLastChild(); // loops will use EOB DFA
+}
+	:	(	{ dfa = $start.getLookaheadDFA(); }
+			blk=block["block", dfa, $label]
+			{ $code = $blk.code; }
+		|	{ dfa = $start.getLookaheadDFA(); }
+			^( OPTIONAL blk=block["optionalBlock", dfa, $label] )
+			{ $code = $blk.code; }
+		|	{ dfa = eob.getLookaheadDFA(); }
+			^( CLOSURE blk=block["closureBlock", dfa, $label] )
+			{ $code = $blk.code; }
+		|	{ dfa = eob.getLookaheadDFA(); }
+			^( POSITIVE_CLOSURE blk=block["positiveClosureBlock", dfa, $label] )
+			{ $code = $blk.code; }
+		)
+		{
+			String description = grammar.grammarTreeToString($start, false);
+			description = generator.target.getTargetStringLiteralFromString(description);
+			$code.add("description", description);
+		}
+	;
+
+tree_ returns [ST code]
+@init
+{
+	rewriteTreeNestingLevel++;
+	GrammarAST rootSuffix = null;
+	if ( state.backtracking == 0 )
+	{
+		$code = templates.getInstanceOf("tree");
+		NFAState afterDOWN = (NFAState)$start.NFATreeDownState.transition(0).target;
+		LookaheadSet s = grammar.LOOK(afterDOWN);
+		if ( s.member(Label.UP) ) {
+			// nullable child list if we can see the UP as the next token
+			// we need an "if ( input.LA(1)==Token.DOWN )" gate around
+			// the child list.
+			$code.add("nullableChildList", "true");
+		}
+		$code.add("enclosingTreeLevel", rewriteTreeNestingLevel-1);
+		$code.add("treeLevel", rewriteTreeNestingLevel);
+		Rule r = grammar.getRule(currentRuleName);
+		if ( grammar.buildAST() && !r.hasRewrite(outerAltNum) ) {
+			rootSuffix = new GrammarAST(ROOT,"ROOT");
+		}
+	}
+}
+	:	^(	TREE_BEGIN
+			el=element[null,rootSuffix]
+			{
+				$code.addAggr("root.{el,line,pos}",
+								  $el.code,
+								  $el.start.getLine(),
+								  $el.start.getCharPositionInLine() + 1
+								  );
+			}
+			// push all the immediately-following actions out before children
+			// so actions aren't guarded by the "if (input.LA(1)==Token.DOWN)"
+			// guard in generated code.
+			(	(element_action) =>
+				act=element_action
+				{
+					$code.addAggr("actionsAfterRoot.{el,line,pos}",
+									  $act.code,
+									  $act.start.getLine(),
+									  $act.start.getCharPositionInLine() + 1
+									);
+				}
+			)*
+			(	 el=element[null,null]
+				 {
+				 $code.addAggr("children.{el,line,pos}",
+								  $el.code,
+								  $el.start.getLine(),
+								  $el.start.getCharPositionInLine() + 1
+								  );
+				 }
+			)*
+		)
+	;
+finally { rewriteTreeNestingLevel--; }
+
+atom[GrammarAST scope, GrammarAST label, GrammarAST astSuffix]
+	returns [ST code=null]
+@init
+{
+	String labelText=null;
+	if ( state.backtracking == 0 )
+	{
+		if ( label!=null )
+		{
+			labelText = label.getText();
+		}
+		if ( grammar.type!=Grammar.LEXER &&
+			 ($start.getType()==RULE_REF||$start.getType()==TOKEN_REF||
+			  $start.getType()==CHAR_LITERAL||$start.getType()==STRING_LITERAL) )
+		{
+			Rule encRule = grammar.getRule($start.enclosingRuleName);
+			if ( encRule!=null && encRule.hasRewrite(outerAltNum) && astSuffix!=null )
+			{
+				ErrorManager.grammarError(ErrorManager.MSG_AST_OP_IN_ALT_WITH_REWRITE,
+										  grammar,
+										  $start.getToken(),
+										  $start.enclosingRuleName,
+										  outerAltNum);
+				astSuffix = null;
+			}
+		}
+	}
+}
+	:   ^( r=RULE_REF (rarg=ARG_ACTION)? )
+		{
+			grammar.checkRuleReference(scope, $r, $rarg, currentRuleName);
+			String scopeName = null;
+			if ( scope!=null ) {
+				scopeName = scope.getText();
+			}
+			Rule rdef = grammar.getRule(scopeName, $r.text);
+			// don't insert label=r() if $label.attr not used, no ret value, ...
+			if ( !rdef.getHasReturnValue() ) {
+				labelText = null;
+			}
+			$code = getRuleElementST("ruleRef", $r.text, $r, astSuffix, labelText);
+			$code.add("rule", rdef);
+			if ( scope!=null ) { // scoped rule ref
+				Grammar scopeG = grammar.composite.getGrammar(scope.getText());
+				$code.add("scope", scopeG);
+			}
+			else if ( rdef.grammar != this.grammar ) { // nonlocal
+				// if rule definition is not in this grammar, it's nonlocal
+				List<Grammar> rdefDelegates = rdef.grammar.getDelegates();
+				if ( rdefDelegates.contains(this.grammar) ) {
+					$code.add("scope", rdef.grammar);
+				}
+				else {
+					// defining grammar is not a delegate, scope all the
+					// back to root, which has delegate methods for all
+					// rules.  Don't use scope if we are root.
+					if ( this.grammar != rdef.grammar.composite.delegateGrammarTreeRoot.grammar ) {
+						$code.add("scope",
+										  rdef.grammar.composite.delegateGrammarTreeRoot.grammar);
+					}
+				}
+			}
+
+			if ( $rarg!=null ) {
+				List<? extends Object> args = generator.translateAction(currentRuleName,$rarg);
+				$code.add("args", args);
+			}
+			int i = ((CommonToken)r.getToken()).getTokenIndex();
+			$code.add("elementIndex", i);
+			generator.generateLocalFOLLOW($r,$r.text,currentRuleName,i);
+			$r.code = $code;
+		}
+
+	|	^( t=TOKEN_REF (targ=ARG_ACTION)? )
+		{
+			if ( currentAltHasASTRewrite && $t.terminalOptions!=null &&
+				$t.terminalOptions.get(Grammar.defaultTokenOption)!=null )
+			{
+				ErrorManager.grammarError(ErrorManager.MSG_HETERO_ILLEGAL_IN_REWRITE_ALT,
+										grammar,
+										$t.getToken(),
+										$t.text);
+			}
+			grammar.checkRuleReference(scope, $t, $targ, currentRuleName);
+			if ( grammar.type==Grammar.LEXER )
+			{
+				if ( grammar.getTokenType($t.text)==Label.EOF )
+				{
+					$code = templates.getInstanceOf("lexerMatchEOF");
+				}
+				else
+				{
+					$code = templates.getInstanceOf("lexerRuleRef");
+					if ( isListLabel(labelText) )
+					{
+						$code = templates.getInstanceOf("lexerRuleRefAndListLabel");
+					}
+					String scopeName = null;
+					if ( scope!=null )
+					{
+						scopeName = scope.getText();
+					}
+					Rule rdef2 = grammar.getRule(scopeName, $t.text);
+					$code.add("rule", rdef2);
+					if ( scope!=null )
+					{ // scoped rule ref
+						Grammar scopeG = grammar.composite.getGrammar(scope.getText());
+						$code.add("scope", scopeG);
+					}
+					else if ( rdef2.grammar != this.grammar )
+					{ // nonlocal
+						// if rule definition is not in this grammar, it's nonlocal
+						$code.add("scope", rdef2.grammar);
+					}
+					if ( $targ!=null )
+					{
+						List<? extends Object> args = generator.translateAction(currentRuleName,$targ);
+						$code.add("args", args);
+					}
+				}
+				int i = ((CommonToken)$t.getToken()).getTokenIndex();
+				$code.add("elementIndex", i);
+				if ( label!=null )
+					$code.add("label", labelText);
+			}
+			else
+			{
+				$code = getTokenElementST("tokenRef", $t.text, $t, astSuffix, labelText);
+				String tokenLabel =
+					generator.getTokenTypeAsTargetLabel(grammar.getTokenType(t.getText()));
+				$code.add("token",tokenLabel);
+				if ( !currentAltHasASTRewrite && $t.terminalOptions!=null )
+				{
+					$code.add("terminalOptions", $t.terminalOptions);
+				}
+				int i = ((CommonToken)$t.getToken()).getTokenIndex();
+				$code.add("elementIndex", i);
+				generator.generateLocalFOLLOW($t,tokenLabel,currentRuleName,i);
+			}
+			$t.code = $code;
+		}
+
+	|	c=CHAR_LITERAL
+		{
+			if ( grammar.type==Grammar.LEXER )
+			{
+				$code = templates.getInstanceOf("charRef");
+				$code.add("char",
+				   generator.target.getTargetCharLiteralFromANTLRCharLiteral(generator,$c.text));
+				if ( label!=null )
+				{
+					$code.add("label", labelText);
+				}
+			}
+			else { // else it's a token type reference
+				$code = getTokenElementST("tokenRef", "char_literal", $c, astSuffix, labelText);
+				String tokenLabel = generator.getTokenTypeAsTargetLabel(grammar.getTokenType($c.text));
+				$code.add("token",tokenLabel);
+				if ( $c.terminalOptions!=null ) {
+					$code.add("terminalOptions",$c.terminalOptions);
+				}
+				int i = ((CommonToken)$c.getToken()).getTokenIndex();
+				$code.add("elementIndex", i);
+				generator.generateLocalFOLLOW($c,tokenLabel,currentRuleName,i);
+			}
+		}
+
+	|	s=STRING_LITERAL
+		{
+			int i = ((CommonToken)$s.getToken()).getTokenIndex();
+			if ( grammar.type==Grammar.LEXER )
+			{
+				$code = templates.getInstanceOf("lexerStringRef");
+				$code.add("string",
+					generator.target.getTargetStringLiteralFromANTLRStringLiteral(generator,$s.text));
+				$code.add("elementIndex", i);
+				if ( label!=null )
+				{
+					$code.add("label", labelText);
+				}
+			}
+			else
+			{
+				// else it's a token type reference
+				$code = getTokenElementST("tokenRef", "string_literal", $s, astSuffix, labelText);
+				String tokenLabel =
+					generator.getTokenTypeAsTargetLabel(grammar.getTokenType($s.text));
+				$code.add("token",tokenLabel);
+				if ( $s.terminalOptions!=null )
+				{
+					$code.add("terminalOptions",$s.terminalOptions);
+				}
+				$code.add("elementIndex", i);
+				generator.generateLocalFOLLOW($s,tokenLabel,currentRuleName,i);
+			}
+		}
+
+	|	w=WILDCARD
+		{
+			$code = getWildcardST($w,astSuffix,labelText);
+			$code.add("elementIndex", ((CommonToken)$w.getToken()).getTokenIndex());
+		}
+
+	|	^(DOT ID a=atom[$ID, label, astSuffix]) // scope override on rule or token
+		{ $code = $a.code; }
+
+	|	set[label,astSuffix]
+		{ $code = $set.code; }
+	;
+
+ast_suffix
+	:	ROOT
+	|	BANG
+	;
+
+set[GrammarAST label, GrammarAST astSuffix] returns [ST code=null]
+@init
+{
+	String labelText=null;
+	if ( $label!=null )
+	{
+		labelText = $label.getText();
+	}
+}
+	:	^(s=BLOCK .*) // only care that it's a BLOCK with setValue!=null
+		{
+			$code = getTokenElementST("matchSet", "set", $s, astSuffix, labelText);
+			int i = ((CommonToken)$s.getToken()).getTokenIndex();
+			$code.add("elementIndex", i);
+			if ( grammar.type!=Grammar.LEXER )
+			{
+				generator.generateLocalFOLLOW($s,"set",currentRuleName,i);
+			}
+			$code.add("s", generator.genSetExpr(templates,$s.getSetValue(),1,false));
+		}
+	;
+
+setElement
+	:	CHAR_LITERAL
+	|	TOKEN_REF
+	|	STRING_LITERAL
+	|	^(CHAR_RANGE CHAR_LITERAL CHAR_LITERAL)
+	;
+
+// REWRITE stuff
+
+rewrite returns [ST code=null]
+@init
+{
+	if ( state.backtracking == 0 )
+	{
+		if ( $start.getType()==REWRITES )
+		{
+			if ( generator.grammar.buildTemplate() )
+			{
+				$code = templates.getInstanceOf("rewriteTemplate");
+			}
+			else
+			{
+				$code = templates.getInstanceOf("rewriteCode");
+				$code.add("treeLevel", OUTER_REWRITE_NESTING_LEVEL);
+				$code.add("rewriteBlockLevel", OUTER_REWRITE_NESTING_LEVEL);
+				$code.add("referencedElementsDeep",
+								  getTokenTypesAsTargetLabels($start.rewriteRefsDeep));
+				Set<String> tokenLabels =
+					grammar.getLabels($start.rewriteRefsDeep, Grammar.TOKEN_LABEL);
+				Set<String> tokenListLabels =
+					grammar.getLabels($start.rewriteRefsDeep, Grammar.TOKEN_LIST_LABEL);
+				Set<String> ruleLabels =
+					grammar.getLabels($start.rewriteRefsDeep, Grammar.RULE_LABEL);
+				Set<String> ruleListLabels =
+					grammar.getLabels($start.rewriteRefsDeep, Grammar.RULE_LIST_LABEL);
+				Set<String> wildcardLabels =
+					grammar.getLabels($start.rewriteRefsDeep, Grammar.WILDCARD_TREE_LABEL);
+				Set<String> wildcardListLabels =
+					grammar.getLabels($start.rewriteRefsDeep, Grammar.WILDCARD_TREE_LIST_LABEL);
+				// just in case they ref $r for "previous value", make a stream
+				// from retval.tree
+				ST retvalST = templates.getInstanceOf("prevRuleRootRef");
+				ruleLabels.add(retvalST.render());
+				$code.add("referencedTokenLabels", tokenLabels);
+				$code.add("referencedTokenListLabels", tokenListLabels);
+				$code.add("referencedRuleLabels", ruleLabels);
+				$code.add("referencedRuleListLabels", ruleListLabels);
+				$code.add("referencedWildcardLabels", wildcardLabels);
+				$code.add("referencedWildcardListLabels", wildcardListLabels);
+			}
+		}
+		else
+		{
+				$code = templates.getInstanceOf("noRewrite");
+				$code.add("treeLevel", OUTER_REWRITE_NESTING_LEVEL);
+				$code.add("rewriteBlockLevel", OUTER_REWRITE_NESTING_LEVEL);
+		}
+	}
+}
+	:	^(	REWRITES
+			(
+				{rewriteRuleRefs = new HashSet<Object>();}
+				^( r=REWRITE (pred=SEMPRED)? alt=rewrite_alternative)
+				{
+					rewriteBlockNestingLevel = OUTER_REWRITE_NESTING_LEVEL;
+					List<? extends Object> predChunks = null;
+					if ( $pred!=null )
+					{
+						//predText = #pred.getText();
+						predChunks = generator.translateAction(currentRuleName,$pred);
+					}
+					String description =
+						grammar.grammarTreeToString($r,false);
+					description = generator.target.getTargetStringLiteralFromString(description);
+					$code.addAggr("alts.{pred,alt,description}",
+									  predChunks,
+									  alt,
+									  description);
+					pred=null;
+				}
+			)*
+		)
+	|
+	;
+
+rewrite_block[String blockTemplateName] returns [ST code=null]
+@init
+{
+	rewriteBlockNestingLevel++;
+	ST save_currentBlockST = currentBlockST;
+	if ( state.backtracking == 0 )
+	{
+		$code = templates.getInstanceOf(blockTemplateName);
+		currentBlockST = $code;
+		$code.add("rewriteBlockLevel", rewriteBlockNestingLevel);
+	}
+}
+	:	^(	BLOCK
+			{
+				currentBlockST.add("referencedElementsDeep",
+					getTokenTypesAsTargetLabels($BLOCK.rewriteRefsDeep));
+				currentBlockST.add("referencedElements",
+					getTokenTypesAsTargetLabels($BLOCK.rewriteRefsShallow));
+			}
+			alt=rewrite_alternative
+			EOB
+		)
+		{
+			$code.add("alt", $alt.code);
+		}
+	;
+finally { rewriteBlockNestingLevel--; currentBlockST = save_currentBlockST; }
+
+rewrite_alternative returns [ST code=null]
+	:	{generator.grammar.buildAST()}?
+		^(	a=ALT {$code=templates.getInstanceOf("rewriteElementList");}
+			(	(
+					el=rewrite_element
+					{$code.addAggr("elements.{el,line,pos}",
+										$el.code,
+										$el.start.getLine(),
+										$el.start.getCharPositionInLine() + 1
+										);
+					}
+				)+
+			|	EPSILON
+				{$code.addAggr("elements.{el,line,pos}",
+								   templates.getInstanceOf("rewriteEmptyAlt"),
+								   $a.getLine(),
+								   $a.getCharPositionInLine() + 1
+								   );
+				}
+			)
+			EOA
+		 )
+
+	|	{generator.grammar.buildTemplate()}? rewrite_template
+		{ $code = $rewrite_template.code; }
+
+	|	// reproduce same input (only AST at moment)
+		ETC
+	;
+
+rewrite_element returns [ST code=null]
+@init
+{
+	IntSet elements=null;
+	GrammarAST ast = null;
+}
+	:	rewrite_atom[false]
+		{ $code = $rewrite_atom.code; }
+	|	rewrite_ebnf
+		{ $code = $rewrite_ebnf.code; }
+	|	rewrite_tree
+		{ $code = $rewrite_tree.code; }
+	;
+
+rewrite_ebnf returns [ST code=null]
+	:	^( OPTIONAL rewrite_block["rewriteOptionalBlock"] )
+		{ $code = $rewrite_block.code; }
+		{
+			String description = grammar.grammarTreeToString($start, false);
+			description = generator.target.getTargetStringLiteralFromString(description);
+			$code.add("description", description);
+		}
+	|	^( CLOSURE rewrite_block["rewriteClosureBlock"] )
+		{ $code = $rewrite_block.code; }
+		{
+			String description = grammar.grammarTreeToString($start, false);
+			description = generator.target.getTargetStringLiteralFromString(description);
+			$code.add("description", description);
+		}
+	|	^( POSITIVE_CLOSURE rewrite_block["rewritePositiveClosureBlock"] )
+		{ $code = $rewrite_block.code; }
+		{
+			String description = grammar.grammarTreeToString($start, false);
+			description = generator.target.getTargetStringLiteralFromString(description);
+			$code.add("description", description);
+		}
+	;
+
+rewrite_tree returns [ST code]
+@init
+{
+	rewriteTreeNestingLevel++;
+	if ( state.backtracking == 0 )
+	{
+		$code = templates.getInstanceOf("rewriteTree");
+		$code.add("treeLevel", rewriteTreeNestingLevel);
+		$code.add("enclosingTreeLevel", rewriteTreeNestingLevel-1);
+	}
+}
+	:	^(	TREE_BEGIN
+			r=rewrite_atom[true]
+			{
+				$code.addAggr("root.{el,line,pos}",
+								   $r.code,
+								   $r.start.getLine(),
+								   $r.start.getCharPositionInLine() + 1
+								  );
+			}
+			(
+			  el=rewrite_element
+			  {
+				$code.addAggr("children.{el,line,pos}",
+									$el.code,
+									$el.start.getLine(),
+									$el.start.getCharPositionInLine() + 1
+									);
+			  }
+			)*
+		)
+		{
+			String description = grammar.grammarTreeToString($start, false);
+			description = generator.target.getTargetStringLiteralFromString(description);
+			$code.add("description", description);
+		}
+	;
+finally { rewriteTreeNestingLevel--; }
+
+rewrite_atom[boolean isRoot] returns [ST code=null]
+	:   r=RULE_REF
+		{
+			String ruleRefName = $r.text;
+			String stName = "rewriteRuleRef";
+			if ( isRoot )
+			{
+				stName += "Root";
+			}
+			$code = templates.getInstanceOf(stName);
+			$code.add("rule", ruleRefName);
+			if ( grammar.getRule(ruleRefName)==null )
+			{
+				ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_RULE_REF,
+										  grammar,
+										  $r.getToken(),
+										  ruleRefName);
+				$code = new ST(""); // blank; no code gen
+			}
+			else if ( grammar.getRule(currentRuleName)
+						 .getRuleRefsInAlt(ruleRefName,outerAltNum)==null )
+			{
+				ErrorManager.grammarError(ErrorManager.MSG_REWRITE_ELEMENT_NOT_PRESENT_ON_LHS,
+										  grammar,
+										  $r.getToken(),
+										  ruleRefName);
+				$code = new ST(""); // blank; no code gen
+			}
+			else
+			{
+				// track all rule refs as we must copy 2nd ref to rule and beyond
+				if ( !rewriteRuleRefs.contains(ruleRefName) )
+				{
+					rewriteRuleRefs.add(ruleRefName);
+				}
+			}
+		}
+
+	|
+		(	^(tk=TOKEN_REF (arg=ARG_ACTION)?)
+		|	cl=CHAR_LITERAL
+		|	sl=STRING_LITERAL
+		)
+		{
+			GrammarAST term = $tk;
+			if (term == null) term = $cl;
+			if (term == null) term = $sl;
+			String tokenName = $start.getToken().getText();
+			String stName = "rewriteTokenRef";
+			Rule rule = grammar.getRule(currentRuleName);
+			Collection<String> tokenRefsInAlt = rule.getTokenRefsInAlt(outerAltNum);
+			boolean createNewNode = !tokenRefsInAlt.contains(tokenName) || $arg!=null;
+			if ( createNewNode )
+			{
+				stName = "rewriteImaginaryTokenRef";
+			}
+			if ( isRoot )
+			{
+				stName += "Root";
+			}
+			$code = templates.getInstanceOf(stName);
+			if (term.terminalOptions != null) {
+				$code.add("terminalOptions",term.terminalOptions);
+			}
+			if ( $arg!=null )
+			{
+				List<? extends Object> args = generator.translateAction(currentRuleName,$arg);
+				$code.add("args", args);
+			}
+			$code.add("elementIndex", ((CommonToken)$start.getToken()).getTokenIndex());
+			int ttype = grammar.getTokenType(tokenName);
+			String tok = generator.getTokenTypeAsTargetLabel(ttype);
+			$code.add("token", tok);
+			if ( grammar.getTokenType(tokenName)==Label.INVALID )
+			{
+				ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE,
+										  grammar,
+										  $start.getToken(),
+										  tokenName);
+				$code = new ST(""); // blank; no code gen
+			}
+		}
+
+	|	LABEL
+		{
+			String labelName = $LABEL.text;
+			Rule rule = grammar.getRule(currentRuleName);
+			Grammar.LabelElementPair pair = rule.getLabel(labelName);
+			if ( labelName.equals(currentRuleName) )
+			{
+				// special case; ref to old value via $ rule
+				if ( rule.hasRewrite(outerAltNum) &&
+					 rule.getRuleRefsInAlt(outerAltNum).contains(labelName) )
+				{
+					ErrorManager.grammarError(ErrorManager.MSG_RULE_REF_AMBIG_WITH_RULE_IN_ALT,
+											  grammar,
+											  $LABEL.getToken(),
+											  labelName);
+				}
+				ST labelST = templates.getInstanceOf("prevRuleRootRef");
+				$code = templates.getInstanceOf("rewriteRuleLabelRef"+(isRoot?"Root":""));
+				$code.add("label", labelST);
+			}
+			else if ( pair==null )
+			{
+				ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_LABEL_REF_IN_REWRITE,
+										  grammar,
+										  $LABEL.getToken(),
+										  labelName);
+				$code = new ST("");
+			}
+			else
+			{
+				String stName = null;
+				switch ( pair.type )
+				{
+				case Grammar.TOKEN_LABEL :
+					stName = "rewriteTokenLabelRef";
+					break;
+				case Grammar.WILDCARD_TREE_LABEL :
+					stName = "rewriteWildcardLabelRef";
+					break;
+				case Grammar.WILDCARD_TREE_LIST_LABEL:
+					stName = "rewriteRuleListLabelRef"; // acts like rule ref list for ref
+					break;
+				case Grammar.RULE_LABEL :
+					stName = "rewriteRuleLabelRef";
+					break;
+				case Grammar.TOKEN_LIST_LABEL :
+					stName = "rewriteTokenListLabelRef";
+					break;
+				case Grammar.RULE_LIST_LABEL :
+					stName = "rewriteRuleListLabelRef";
+					break;
+				}
+				if ( isRoot )
+				{
+					stName += "Root";
+				}
+				$code = templates.getInstanceOf(stName);
+				$code.add("label", labelName);
+			}
+		}
+
+	|	ACTION
+		{
+			// actions in rewrite rules yield a tree object
+			String actText = $ACTION.text;
+			List<? extends Object> chunks = generator.translateAction(currentRuleName,$ACTION);
+			$code = templates.getInstanceOf("rewriteNodeAction"+(isRoot?"Root":""));
+			$code.add("action", chunks);
+		}
+	;
+
+public
+rewrite_template returns [ST code=null]
+	:	^( ALT EPSILON EOA ) {$code=templates.getInstanceOf("rewriteEmptyTemplate");}
+	|	^(	TEMPLATE (id=ID|ind=ACTION)
+			{
+				if ( $id!=null && $id.text.equals("template") )
+				{
+						$code = templates.getInstanceOf("rewriteInlineTemplate");
+				}
+				else if ( $id!=null )
+				{
+						$code = templates.getInstanceOf("rewriteExternalTemplate");
+						$code.add("name", $id.text);
+				}
+				else if ( $ind!=null )
+				{ // must be \%({expr})(args)
+					$code = templates.getInstanceOf("rewriteIndirectTemplate");
+					List<? extends Object> chunks=generator.translateAction(currentRuleName,$ind);
+					$code.add("expr", chunks);
+				}
+			}
+			^(	ARGLIST
+				(	^( ARG arg=ID a=ACTION
+					{
+						// must set alt num here rather than in define.g
+						// because actions like \%foo(name={\$ID.text}) aren't
+						// broken up yet into trees.
+						$a.outerAltNum = this.outerAltNum;
+						List<? extends Object> chunks = generator.translateAction(currentRuleName,$a);
+						$code.addAggr("args.{name,value}", $arg.text, chunks);
+					}
+					)
+				)*
+			)
+			(	DOUBLE_QUOTE_STRING_LITERAL
+				{
+					String sl = $DOUBLE_QUOTE_STRING_LITERAL.text;
+					String t = sl.substring( 1, sl.length() - 1 ); // strip quotes
+					t = generator.target.getTargetStringLiteralFromString(t);
+					$code.add("template",t);
+				}
+			|	DOUBLE_ANGLE_STRING_LITERAL
+				{
+					String sl = $DOUBLE_ANGLE_STRING_LITERAL.text;
+					String t = sl.substring( 2, sl.length() - 2 ); // strip double angle quotes
+					t = generator.target.getTargetStringLiteralFromString(t);
+					$code.add("template",t);
+				}
+			)?
+		)
+
+	|	act=ACTION
+		{
+			// set alt num for same reason as ARGLIST above
+			$act.outerAltNum = this.outerAltNum;
+			$code=templates.getInstanceOf("rewriteAction");
+			$code.add("action",
+							  generator.translateAction(currentRuleName,$act));
+		}
+	;
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/DefineGrammarItemsWalker.g b/tool/src/main/antlr3/org/antlr/grammar/v3/DefineGrammarItemsWalker.g
new file mode 100644
index 0000000..8fc2dd0
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/DefineGrammarItemsWalker.g
@@ -0,0 +1,701 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2011 Terence Parr
+ All rights reserved.
+
+ Grammar conversion to ANTLR v3:
+ Copyright (c) 2011 Sam Harwell
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+	notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+	derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+tree grammar DefineGrammarItemsWalker;
+
+options {
+	language=Java;
+	tokenVocab = ANTLR;
+	ASTLabelType = GrammarAST;
+}
+
+scope AttributeScopeActions {
+	HashMap<GrammarAST, GrammarAST> actions;
+}
+
+@header {
+package org.antlr.grammar.v3;
+import org.antlr.tool.*;
+import java.util.HashSet;
+import java.util.Set;
+}
+
+@members {
+protected Grammar grammar;
+protected GrammarAST root;
+protected String currentRuleName;
+protected GrammarAST currentRewriteBlock;
+protected GrammarAST currentRewriteRule;
+protected int outerAltNum = 0;
+protected int blockLevel = 0;
+
+public final int countAltsForRule( CommonTree t ) {
+    CommonTree block = (CommonTree)t.getFirstChildWithType(BLOCK);
+    int altCount = 0;
+    for (int i = 0; i < block.getChildCount(); i++) {
+        if (block.getChild(i).getType() == ALT)
+            altCount++;
+    }
+    return altCount;
+}
+
+protected final void finish() {
+    trimGrammar();
+}
+
+/** Remove any lexer rules from a COMBINED; already passed to lexer */
+protected final void trimGrammar() {
+    if ( grammar.type != Grammar.COMBINED ) {
+        return;
+    }
+    // form is (header ... ) ( grammar ID (scope ...) ... ( rule ... ) ( rule ... ) ... )
+    GrammarAST p = root;
+    // find the grammar spec
+    while ( !p.getText().equals( "grammar" ) ) {
+        p = p.getNextSibling();
+    }
+    for ( int i = 0; i < p.getChildCount(); i++ ) {
+        if ( p.getChild( i ).getType() != RULE )
+            continue;
+
+        String ruleName = p.getChild(i).getChild(0).getText();
+        //Console.Out.WriteLine( "rule " + ruleName + " prev=" + prev.getText() );
+        if (Rule.getRuleType(ruleName) == Grammar.LEXER) {
+            // remove lexer rule
+            p.deleteChild( i );
+            i--;
+        }
+    }
+    //Console.Out.WriteLine( "root after removal is: " + root.ToStringList() );
+}
+
+protected final void trackInlineAction( GrammarAST actionAST ) {
+    Rule r = grammar.getRule( currentRuleName );
+    if ( r != null ) {
+        r.trackInlineAction( actionAST );
+    }
+}
+}
+
+public
+grammar_[Grammar g]
+@init
+{
+grammar = $g;
+root = $start;
+}
+@after
+{
+finish();
+}
+	:	^( LEXER_GRAMMAR	{grammar.type = Grammar.LEXER;} 		grammarSpec )
+	|	^( PARSER_GRAMMAR	{grammar.type = Grammar.PARSER;}		grammarSpec )
+	|	^( TREE_GRAMMAR		{grammar.type = Grammar.TREE_PARSER;}	grammarSpec )
+	|	^( COMBINED_GRAMMAR	{grammar.type = Grammar.COMBINED;}		grammarSpec )
+	;
+
+attrScope
+scope AttributeScopeActions;
+@init
+{
+	$AttributeScopeActions::actions = new HashMap<GrammarAST, GrammarAST>();
+}
+	:	^( 'scope' name=ID attrScopeAction* attrs=ACTION )
+		{
+			AttributeScope scope = grammar.defineGlobalScope($name.text,$attrs.getToken());
+			scope.isDynamicGlobalScope = true;
+			scope.addAttributes($attrs.text, ';');
+			for (GrammarAST action : $AttributeScopeActions::actions.keySet())
+				scope.defineNamedAction(action, $AttributeScopeActions::actions.get(action));
+		}
+	;
+
+attrScopeAction
+	:	^(AMPERSAND ID ACTION)
+		{
+			$AttributeScopeActions::actions.put( $ID, $ACTION );
+		}
+	;
+
+grammarSpec
+	:	id=ID
+		(cmt=DOC_COMMENT)?
+		( optionsSpec )?
+		(delegateGrammars)?
+		(tokensSpec)?
+		(attrScope)*
+		(actions)?
+		rules
+	;
+
+actions
+	:	( action )+
+	;
+
+action
+@init
+{
+	String scope=null;
+	GrammarAST nameAST=null, actionAST=null;
+}
+	:	^(amp=AMPERSAND id1=ID
+			( id2=ID a1=ACTION
+			  {scope=$id1.text; nameAST=$id2; actionAST=$a1;}
+			| a2=ACTION
+			  {scope=null; nameAST=$id1; actionAST=$a2;}
+			)
+		 )
+		 {
+		 grammar.defineNamedAction($amp,scope,nameAST,actionAST);
+		 }
+	;
+
+optionsSpec
+	:	^(OPTIONS .*)
+	;
+
+delegateGrammars
+	:	^( 'import' ( ^(ASSIGN ID ID) | ID )+ )
+	;
+
+tokensSpec
+	:	^(TOKENS tokenSpec*)
+	;
+
+tokenSpec
+	:	t=TOKEN_REF
+	|	^(	ASSIGN
+			TOKEN_REF
+			(	STRING_LITERAL
+			|	CHAR_LITERAL
+			)
+		 )
+	;
+
+rules
+	:	(rule | ^(PREC_RULE .*))+
+	;
+
+rule
+@init
+{
+	String name=null;
+	Map<String, Object> opts=null;
+	Rule r = null;
+}
+	:		^( RULE id=ID {opts = $RULE.getBlockOptions();}
+			(modifier)?
+			^( ARG (args=ARG_ACTION)? )
+			^( RET (ret=ARG_ACTION)? )
+			(throwsSpec)?
+			(optionsSpec)?
+			{
+				name = $id.text;
+				currentRuleName = name;
+				if ( Rule.getRuleType(name) == Grammar.LEXER && grammar.type==Grammar.COMBINED )
+				{
+					// a merged grammar spec, track lexer rules and send to another grammar
+					grammar.defineLexerRuleFoundInParser($id.getToken(), $start);
+				}
+				else
+				{
+					int numAlts = countAltsForRule($start);
+					grammar.defineRule($id.getToken(), $modifier.mod, opts, $start, $args, numAlts);
+					r = grammar.getRule(name);
+					if ( $args!=null )
+					{
+						r.parameterScope = grammar.createParameterScope(name,$args.getToken());
+						r.parameterScope.addAttributes($args.text, ',');
+					}
+					if ( $ret!=null )
+					{
+						r.returnScope = grammar.createReturnScope(name,$ret.getToken());
+						r.returnScope.addAttributes($ret.text, ',');
+					}
+					if ( $throwsSpec.exceptions != null )
+					{
+						for (String exception : $throwsSpec.exceptions)
+							r.throwsSpec.add( exception );
+					}
+				}
+			}
+			(ruleScopeSpec[r])?
+			(ruleAction[r])*
+			{ this.blockLevel=0; }
+			b=block
+			(exceptionGroup)?
+			EOR
+			{
+				// copy rule options into the block AST, which is where
+				// the analysis will look for k option etc...
+				$b.start.setBlockOptions(opts);
+			}
+		)
+	;
+
+ruleAction[Rule r]
+	:	^(amp=AMPERSAND id=ID a=ACTION ) {if (r!=null) r.defineNamedAction($amp,$id,$a);}
+	;
+
+modifier returns [String mod]
+@init
+{
+	$mod = $start.getToken().getText();
+}
+	:	'protected'
+	|	'public'
+	|	'private'
+	|	'fragment'
+	;
+
+throwsSpec returns [HashSet<String> exceptions]
+@init
+{
+	$exceptions = new HashSet<String>();
+}
+	:	^('throws' (ID {$exceptions.add($ID.text);})+ )
+	;
+
+ruleScopeSpec[Rule r]
+scope AttributeScopeActions;
+@init
+{
+	$AttributeScopeActions::actions = new HashMap<GrammarAST, GrammarAST>();
+}
+	:	^(	'scope'
+			(	attrScopeAction* attrs=ACTION
+				{
+					r.ruleScope = grammar.createRuleScope(r.name,$attrs.getToken());
+					r.ruleScope.isDynamicRuleScope = true;
+					r.ruleScope.addAttributes($attrs.text, ';');
+					for (GrammarAST action : $AttributeScopeActions::actions.keySet())
+						r.ruleScope.defineNamedAction(action, $AttributeScopeActions::actions.get(action));
+				}
+			)?
+			(	uses=ID
+				{
+					if ( grammar.getGlobalScope($uses.text)==null ) {
+					ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE,
+					grammar,
+					$uses.getToken(),
+					$uses.text);
+					}
+					else {
+					if ( r.useScopes==null ) {r.useScopes=new ArrayList<String>();}
+					r.useScopes.add($uses.text);
+					}
+				}
+			)*
+		)
+	;
+
+block
+@init
+{
+	// must run during backtracking
+	this.blockLevel++;
+	if ( blockLevel == 1 )
+		this.outerAltNum=1;
+}
+	:	^(	BLOCK
+			(optionsSpec)?
+			(blockAction)*
+			(	alternative rewrite
+				{{
+					if ( this.blockLevel == 1 )
+						this.outerAltNum++;
+				}}
+			)+
+			EOB
+		 )
+	;
+finally { blockLevel--; }
+
+// TODO: this does nothing now! subrules cannot have init actions. :(
+blockAction
+	:	^(amp=AMPERSAND id=ID a=ACTION ) // {r.defineAction(#amp,#id,#a);}
+	;
+
+alternative
+//@init
+//{
+//	if ( state.backtracking == 0 )
+//	{
+//		if ( grammar.type!=Grammar.LEXER && grammar.GetOption("output")!=null && blockLevel==1 )
+//		{
+//			GrammarAST aRewriteNode = $start.FindFirstType(REWRITE); // alt itself has rewrite?
+//			GrammarAST rewriteAST = (GrammarAST)$start.Parent.getChild($start.ChildIndex + 1);
+//			// we have a rewrite if alt uses it inside subrule or this alt has one
+//			// but don't count -> ... rewrites, which mean "do default auto construction"
+//			if ( aRewriteNode!=null||
+//				 (firstRewriteAST!=null &&
+//				  firstRewriteAST.getType()==REWRITE &&
+//				  firstRewriteAST.getChild(0)!=null &&
+//				  firstRewriteAST.getChild(0).getType()!=ETC) )
+//			{
+//				Rule r = grammar.getRule(currentRuleName);
+//				r.TrackAltsWithRewrites($start,this.outerAltNum);
+//			}
+//		}
+//	}
+//}
+	:	^( ALT (element)+ EOA )
+	;
+
+exceptionGroup
+	:	( exceptionHandler )+ (finallyClause)?
+	|	finallyClause
+	;
+
+exceptionHandler
+	:   ^('catch' ARG_ACTION ACTION) {trackInlineAction($ACTION);}
+	;
+
+finallyClause
+	:    ^('finally' ACTION) {trackInlineAction($ACTION);}
+	;
+
+element
+	:   ^(ROOT element)
+	|   ^(BANG element)
+	|   atom[null]
+	|   ^(NOT element)
+	|   ^(RANGE atom[null] atom[null])
+	|   ^(CHAR_RANGE atom[null] atom[null])
+	|	^(	ASSIGN id=ID el=element)
+			{
+				GrammarAST e = $el.start;
+				if ( e.getType()==ANTLRParser.ROOT || e.getType()==ANTLRParser.BANG )
+				{
+					e = (GrammarAST)e.getChild(0);
+				}
+				if ( e.getType()==RULE_REF)
+				{
+					grammar.defineRuleRefLabel(currentRuleName,$id.getToken(),e);
+				}
+				else if ( e.getType()==WILDCARD && grammar.type==Grammar.TREE_PARSER )
+				{
+					grammar.defineWildcardTreeLabel(currentRuleName,$id.getToken(),e);
+				}
+				else
+				{
+					grammar.defineTokenRefLabel(currentRuleName,$id.getToken(),e);
+				}
+			}
+	|	^(	PLUS_ASSIGN id2=ID a2=element
+			{
+				GrammarAST a = $a2.start;
+				if ( a.getType()==ANTLRParser.ROOT || a.getType()==ANTLRParser.BANG )
+				{
+					a = (GrammarAST)a.getChild(0);
+				}
+				if ( a.getType()==RULE_REF )
+				{
+					grammar.defineRuleListLabel(currentRuleName,$id2.getToken(),a);
+				}
+				else if ( a.getType() == WILDCARD && grammar.type == Grammar.TREE_PARSER )
+				{
+					grammar.defineWildcardTreeListLabel( currentRuleName, $id2.getToken(), a );
+				}
+				else
+				{
+					grammar.defineTokenListLabel(currentRuleName,$id2.getToken(),a);
+				}
+			}
+		 )
+	|   ebnf
+	|   tree_
+	|   ^( SYNPRED block )
+	|   act=ACTION
+		{
+			$act.outerAltNum = this.outerAltNum;
+			trackInlineAction($act);
+		}
+	|   act2=FORCED_ACTION
+		{
+			$act2.outerAltNum = this.outerAltNum;
+			trackInlineAction($act2);
+		}
+	|   SEMPRED
+		{
+			$SEMPRED.outerAltNum = this.outerAltNum;
+			trackInlineAction($SEMPRED);
+		}
+	|   SYN_SEMPRED
+	|   ^(BACKTRACK_SEMPRED .*)
+	|   GATED_SEMPRED
+		{
+			$GATED_SEMPRED.outerAltNum = this.outerAltNum;
+			trackInlineAction($GATED_SEMPRED);
+		}
+	|   EPSILON 
+	;
+
+ebnf
+	:	(dotLoop) => dotLoop // .* or .+
+	|	block
+	|	^( OPTIONAL block )
+	|	^( CLOSURE block )
+	|	^( POSITIVE_CLOSURE block )
+	;
+
+/** Track the .* and .+ idioms and make them nongreedy by default.
+ */
+dotLoop
+	:	(	^( CLOSURE dotBlock )
+		|	^( POSITIVE_CLOSURE dotBlock )
+		)
+		{
+			GrammarAST block = (GrammarAST)$start.getChild(0);
+			Map<String, Object> opts = new HashMap<String, Object>();
+			opts.put("greedy", "false");
+			if ( grammar.type!=Grammar.LEXER )
+			{
+				// parser grammars assume k=1 for .* loops
+				// otherwise they (analysis?) look til EOF!
+				opts.put("k", 1);
+			}
+			block.setOptions(grammar,opts);
+		}
+	;
+
+dotBlock
+	:	^( BLOCK ^( ALT WILDCARD EOA ) EOB )
+	;
+
+tree_
+	:	^(TREE_BEGIN element+)
+	;
+
+atom[GrammarAST scope_]
+	:	^( rr=RULE_REF (rarg=ARG_ACTION)? )
+		{
+			grammar.altReferencesRule( currentRuleName, $scope_, $rr, this.outerAltNum );
+			if ( $rarg != null )
+			{
+				$rarg.outerAltNum = this.outerAltNum;
+				trackInlineAction($rarg);
+			}
+		}
+	|	^( t=TOKEN_REF (targ=ARG_ACTION )? )
+		{
+			if ( $targ != null )
+			{
+				$targ.outerAltNum = this.outerAltNum;
+				trackInlineAction($targ);
+			}
+			if ( grammar.type == Grammar.LEXER )
+			{
+				grammar.altReferencesRule( currentRuleName, $scope_, $t, this.outerAltNum );
+			}
+			else
+			{
+				grammar.altReferencesTokenID( currentRuleName, $t, this.outerAltNum );
+			}
+		}
+	|	c=CHAR_LITERAL
+		{
+			if ( grammar.type != Grammar.LEXER )
+			{
+				Rule rule = grammar.getRule(currentRuleName);
+				if ( rule != null )
+					rule.trackTokenReferenceInAlt($c, outerAltNum);
+			}
+		}
+	|	s=STRING_LITERAL 
+		{
+			if ( grammar.type != Grammar.LEXER )
+			{
+				Rule rule = grammar.getRule(currentRuleName);
+				if ( rule!=null )
+					rule.trackTokenReferenceInAlt($s, outerAltNum);
+			}
+		}
+	|	WILDCARD
+	|	^(DOT ID atom[$ID]) // scope override on rule
+	;
+
+ast_suffix
+	:	ROOT
+	|	BANG
+	;
+
+rewrite
+@init
+{
+	// track top level REWRITES node, store stuff there
+	currentRewriteRule = $start; // has to execute during backtracking
+	if ( state.backtracking == 0 )
+	{
+		if ( grammar.buildAST() )
+			currentRewriteRule.rewriteRefsDeep = new HashSet<GrammarAST>();
+	}
+}
+	:	^(	REWRITES
+			(	^( REWRITE (pred=SEMPRED)? rewrite_alternative )
+				{
+					if ( $pred != null )
+					{
+						$pred.outerAltNum = this.outerAltNum;
+						trackInlineAction($pred);
+					}
+				}
+			)*
+		)
+		//{System.out.println("-> refs = "+currentRewriteRule.rewriteRefsDeep);}
+	|
+	;
+
+rewrite_block
+@init
+{
+	GrammarAST enclosingBlock = currentRewriteBlock;
+	if ( state.backtracking == 0 )
+	{
+		// don't do if guessing
+		currentRewriteBlock=$start; // pts to BLOCK node
+		currentRewriteBlock.rewriteRefsShallow = new HashSet<GrammarAST>();
+		currentRewriteBlock.rewriteRefsDeep = new HashSet<GrammarAST>();
+	}
+}
+	:   ^( BLOCK rewrite_alternative EOB )
+		//{System.out.println("atoms="+currentRewriteBlock.rewriteRefs);}
+		{
+			// copy the element refs in this block to the surrounding block
+			if ( enclosingBlock != null )
+			{
+				for (GrammarAST item : currentRewriteBlock.rewriteRefsShallow)
+					enclosingBlock.rewriteRefsDeep.add( item );
+			}
+			//currentRewriteBlock = enclosingBlock; // restore old BLOCK ptr
+		}
+	;
+finally { currentRewriteBlock = enclosingBlock; }
+
+rewrite_alternative
+	:	{grammar.buildAST()}? => ^( a=ALT ( ( rewrite_element )+ | EPSILON ) EOA )
+	|	{grammar.buildTemplate()}? => rewrite_template
+	|	ETC {this.blockLevel==1}? // only valid as outermost rewrite
+	;
+
+rewrite_element
+	:	rewrite_atom
+	|	rewrite_ebnf
+	|	rewrite_tree
+	;
+
+rewrite_ebnf
+	:	^( OPTIONAL rewrite_block )
+	|	^( CLOSURE rewrite_block )
+	|	^( POSITIVE_CLOSURE rewrite_block )
+	;
+
+rewrite_tree
+	:   ^(	TREE_BEGIN rewrite_atom ( rewrite_element )* )
+	;
+
+rewrite_atom
+@init
+{
+	if ( state.backtracking == 0 )
+	{
+		Rule r = grammar.getRule(currentRuleName);
+		Set<String> tokenRefsInAlt = r.getTokenRefsInAlt(outerAltNum);
+		boolean imaginary =
+			$start.getType()==TOKEN_REF &&
+			!tokenRefsInAlt.contains($start.getText());
+		if ( !imaginary && grammar.buildAST() &&
+			 ($start.getType()==RULE_REF ||
+			  $start.getType()==LABEL ||
+			  $start.getType()==TOKEN_REF ||
+			  $start.getType()==CHAR_LITERAL ||
+			  $start.getType()==STRING_LITERAL) )
+		{
+			// track per block and for entire rewrite rule
+			if ( currentRewriteBlock!=null )
+			{
+				currentRewriteBlock.rewriteRefsShallow.add($start);
+				currentRewriteBlock.rewriteRefsDeep.add($start);
+			}
+
+			//System.out.println("adding "+$start.getText()+" to "+currentRewriteRule.getText());
+			currentRewriteRule.rewriteRefsDeep.add($start);
+		}
+	}
+}
+	:	RULE_REF 
+	|	(	^(	TOKEN_REF
+				(	ARG_ACTION
+					{
+						$ARG_ACTION.outerAltNum = this.outerAltNum;
+						trackInlineAction($ARG_ACTION);
+					}
+				)?
+			)
+		|	CHAR_LITERAL
+		|	STRING_LITERAL
+		)
+	|	LABEL
+	|	ACTION
+		{
+			$ACTION.outerAltNum = this.outerAltNum;
+			trackInlineAction($ACTION);
+		}
+	;
+
+rewrite_template
+	:	^(	ALT EPSILON EOA )
+	|	^(	TEMPLATE (id=ID|ind=ACTION)
+			^( ARGLIST
+				(	^( ARG arg=ID a=ACTION )
+					{
+						$a.outerAltNum = this.outerAltNum;
+						trackInlineAction($a);
+					}
+				)*
+			)
+			{
+				if ( $ind!=null )
+				{
+					$ind.outerAltNum = this.outerAltNum;
+					trackInlineAction($ind);
+				}
+			}
+			(	DOUBLE_QUOTE_STRING_LITERAL
+			|	DOUBLE_ANGLE_STRING_LITERAL
+			)?
+		)
+	|	act=ACTION
+		{
+			$act.outerAltNum = this.outerAltNum;
+			trackInlineAction($act);
+		}
+	;
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/LeftRecursiveRuleWalker.g b/tool/src/main/antlr3/org/antlr/grammar/v3/LeftRecursiveRuleWalker.g
new file mode 100644
index 0000000..32deefc
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/LeftRecursiveRuleWalker.g
@@ -0,0 +1,285 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Grammar conversion to ANTLR v3:
+ * Copyright (c) 2011 Sam Harwell
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** Find left-recursive rules */
+tree grammar LeftRecursiveRuleWalker;
+
+options {
+	language=Java;
+	tokenVocab=ANTLR;
+    ASTLabelType=GrammarAST;
+}
+
+@header {
+package org.antlr.grammar.v3;
+
+import org.antlr.analysis.*;
+import org.antlr.misc.*;
+import org.antlr.tool.*;
+
+import org.antlr.runtime.BitSet;
+import org.antlr.runtime.DFA;
+}
+
+@members {
+protected Grammar grammar;
+private String ruleName;
+private int outerAlt; // which outer alt of rule?
+public int numAlts;  // how many alts for this rule total?
+
+@Override
+public void reportError(RecognitionException ex)
+{
+    Token token = null;
+    if (ex instanceof MismatchedTokenException)
+    {
+        token = ((MismatchedTokenException)ex).token;
+    }
+    else if (ex instanceof NoViableAltException)
+    {
+        token = ((NoViableAltException)ex).token;
+    }
+
+    ErrorManager.syntaxError(
+        ErrorManager.MSG_SYNTAX_ERROR,
+        grammar,
+        token,
+        "assign.types: " + ex.toString(),
+        ex);
+}
+
+public void setTokenPrec(GrammarAST t, int alt) {}
+public void binaryAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {}
+public void ternaryAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {}
+public void prefixAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {}
+public void suffixAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {}
+public void otherAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {}
+public void setReturnValues(GrammarAST t) {}
+}
+
+optionsSpec
+	:	^(OPTIONS option+)
+	;
+
+option
+	:	^(ASSIGN ID optionValue)
+	;
+
+optionValue
+	:	ID
+	|	STRING_LITERAL
+	|	CHAR_LITERAL
+	|	INT
+	;
+
+charSetElement
+	:	CHAR_LITERAL
+	|	^(OR CHAR_LITERAL CHAR_LITERAL)
+	|	^(RANGE CHAR_LITERAL CHAR_LITERAL)
+	;
+
+public
+rec_rule[Grammar g] returns [boolean isLeftRec]
+@init
+{
+	grammar = g;
+	outerAlt = 1;
+}
+	:	^(	r=RULE id=ID {ruleName=$id.getText();}
+			modifier?
+			^(ARG ARG_ACTION?)
+			^(RET ARG_ACTION?)
+			optionsSpec?
+			ruleScopeSpec?
+			(^(AMPERSAND .*))*
+			ruleBlock {$isLeftRec = $ruleBlock.isLeftRec;}
+			exceptionGroup?
+			EOR
+		)
+		{if ($ruleBlock.isLeftRec) $r.setType(PREC_RULE);}
+	;
+
+modifier
+	:	'protected'
+	|	'public'
+	|	'private'
+	|	'fragment'
+	;
+
+ruleScopeSpec
+ 	:	^('scope' ACTION? ID*)
+ 	;
+
+ruleBlock returns [boolean isLeftRec]
+@init{boolean lr=false; this.numAlts = $start.getChildCount();}
+	:	^(	BLOCK
+			optionsSpec?
+			(	outerAlternative
+				{if ($outerAlternative.isLeftRec) $isLeftRec = true;}
+				rewrite?
+				{outerAlt++;}
+			)+
+			EOB
+		)
+	;
+
+block
+    :   ^(  BLOCK
+            optionsSpec?
+            ( ^(ALT element+ EOA) rewrite? )+
+            EOB   
+         )
+    ;
+
+/** An alt is either prefix, suffix, binary, or ternary operation or "other" */
+outerAlternative returns [boolean isLeftRec]
+@init
+{
+GrammarAST rew = $start.getNextSibling();
+if (rew.getType() != REWRITES)
+	rew = null;
+}
+    :   (binaryMultipleOp)=> binaryMultipleOp
+                             {binaryAlt($start, rew, outerAlt); $isLeftRec=true;}
+    |   (binary)=>           binary       
+                             {binaryAlt($start, rew, outerAlt); $isLeftRec=true;}
+    |   (ternary)=>          ternary
+                             {ternaryAlt($start, rew, outerAlt); $isLeftRec=true;}
+    |   (prefix)=>           prefix
+                             {prefixAlt($start, rew, outerAlt);}
+    |   (suffix)=>           suffix
+                             {suffixAlt($start, rew, outerAlt); $isLeftRec=true;}
+    |   ^(ALT element+ EOA) // "other" case
+                             {otherAlt($start, rew, outerAlt);}
+    ;
+
+binary
+	:	^( ALT (^(BACKTRACK_SEMPRED .*))? recurseNoLabel op=token recurse EOA ) {setTokenPrec($op.t, outerAlt);}
+	;
+
+binaryMultipleOp
+	:	^( ALT (^(BACKTRACK_SEMPRED .*))? recurseNoLabel ^( BLOCK ( ^( ALT op=token EOA {setTokenPrec($op.t, outerAlt);} ) )+ EOB ) recurse EOA )
+	;
+
+ternary
+	:	^( ALT (^(BACKTRACK_SEMPRED .*))? recurseNoLabel op=token recurse token recurse EOA ) {setTokenPrec($op.t, outerAlt);}
+	;
+
+prefix : ^( ALT (^(BACKTRACK_SEMPRED .*))? {setTokenPrec((GrammarAST)input.LT(1), outerAlt);} ({!((CommonTree)input.LT(1)).getText().equals(ruleName)}? element)+ recurse EOA ) ;
+
+suffix : ^( ALT (^(BACKTRACK_SEMPRED .*))? recurseNoLabel {setTokenPrec((GrammarAST)input.LT(1), outerAlt);} element+  EOA ) ;
+
+recurse
+	:	^(ASSIGN ID recurseNoLabel)
+	|	^(PLUS_ASSIGN ID recurseNoLabel)
+	|	recurseNoLabel
+	;
+
+recurseNoLabel : {((CommonTree)input.LT(1)).getText().equals(ruleName)}? RULE_REF;
+
+/*
+elementNotRecursiveRule
+    :   {_t.findFirstType(RULE_REF)!=null && _t.findFirstType(RULE_REF).getText().equals(ruleName)}?
+        e:element
+    ;
+*/
+
+token returns [GrammarAST t=null]
+	:	^(ASSIGN ID s=token {$t = $s.t;})
+	|	^(PLUS_ASSIGN ID s=token {$t = $s.t;})
+	|	^(ROOT s=token {$t = $s.t;})
+	|	^(BANG s=token {$t = $s.t;})
+	|	a=CHAR_LITERAL      {$t = $a;}
+	|	b=STRING_LITERAL    {$t = $b;}
+	|	c=TOKEN_REF         {$t = $c;}
+	;
+
+exceptionGroup
+	:	exceptionHandler+ finallyClause?
+	|	finallyClause
+    ;
+
+exceptionHandler
+	:	^('catch' ARG_ACTION ACTION)
+	;
+
+finallyClause
+	:	^('finally' ACTION)
+	;
+
+rewrite
+	:	^(REWRITES ( ^( REWRITE SEMPRED? (^(ALT .*)|^(TEMPLATE .*)|ACTION|ETC) ) )* )
+	;
+
+element
+	:	^(ROOT element)
+	|	^(BANG element)
+	|	atom
+	|	^(NOT element)
+	|	^(RANGE atom atom)
+	|	^(ASSIGN ID element)
+	|	^(PLUS_ASSIGN ID element)
+	|	ebnf
+	|	tree_
+	|	^(SYNPRED block) 
+	|	FORCED_ACTION
+	|	ACTION
+	|	SEMPRED
+	|	SYN_SEMPRED
+	|	BACKTRACK_SEMPRED
+	|	GATED_SEMPRED
+	|	EPSILON 
+	;
+
+ebnf:   block
+    |   ^( OPTIONAL block ) 
+    |   ^( CLOSURE block )  
+    |   ^( POSITIVE_CLOSURE block ) 
+    ;
+
+tree_
+	:	^(TREE_BEGIN element+)
+	;
+
+atom
+	:	^(RULE_REF ARG_ACTION?)
+	|	^(TOKEN_REF ARG_ACTION?)
+	|	CHAR_LITERAL
+	|	STRING_LITERAL
+	|	WILDCARD
+	|	^(DOT ID atom) // scope override on rule
+	;
+
+ast_suffix
+	:	ROOT
+	|	BANG
+	;
diff --git a/tool/src/main/antlr3/org/antlr/grammar/v3/TreeToNFAConverter.g b/tool/src/main/antlr3/org/antlr/grammar/v3/TreeToNFAConverter.g
new file mode 100644
index 0000000..8568a79
--- /dev/null
+++ b/tool/src/main/antlr3/org/antlr/grammar/v3/TreeToNFAConverter.g
@@ -0,0 +1,856 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2011 Terence Parr
+ All rights reserved.
+
+ Grammar conversion to ANTLR v3:
+ Copyright (c) 2011 Sam Harwell
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+	notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+	derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Build an NFA from a tree representing an ANTLR grammar. */
+tree grammar TreeToNFAConverter;
+
+options {
+	language=Java;
+	tokenVocab = ANTLR;
+	ASTLabelType = GrammarAST;
+}
+
+@header {
+package org.antlr.grammar.v3;
+
+import org.antlr.analysis.*;
+import org.antlr.misc.*;
+import org.antlr.tool.*;
+
+import org.antlr.runtime.BitSet;
+import org.antlr.runtime.DFA;
+}
+
+@members {
+/** Factory used to create nodes and submachines */
+protected NFAFactory factory = null;
+
+/** Which NFA object are we filling in? */
+protected NFA nfa = null;
+
+/** Which grammar are we converting an NFA for? */
+protected Grammar grammar = null;
+
+protected String currentRuleName = null;
+
+protected int outerAltNum = 0;
+protected int blockLevel = 0;
+
+protected int inTest = 0;
+
+public TreeToNFAConverter(TreeNodeStream input, Grammar g, NFA nfa, NFAFactory factory) {
+    this(input);
+    this.grammar = g;
+    this.nfa = nfa;
+    this.factory = factory;
+}
+
+public final IntSet setRule(GrammarAST t) throws RecognitionException {
+    TreeToNFAConverter other = new TreeToNFAConverter( new CommonTreeNodeStream( t ), grammar, nfa, factory );
+
+    other.currentRuleName = currentRuleName;
+    other.outerAltNum = outerAltNum;
+    other.blockLevel = blockLevel;
+
+    return other.setRule();
+}
+
+public final int testBlockAsSet( GrammarAST t ) throws RecognitionException {
+    Rule r = grammar.getLocallyDefinedRule( currentRuleName );
+    if ( r.hasRewrite( outerAltNum ) )
+        return -1;
+
+    TreeToNFAConverter other = new TreeToNFAConverter( new CommonTreeNodeStream( t ), grammar, nfa, factory );
+
+    other.state.backtracking++;
+    other.currentRuleName = currentRuleName;
+    other.outerAltNum = outerAltNum;
+    other.blockLevel = blockLevel;
+
+    int result = other.testBlockAsSet();
+    if ( other.state.failed )
+        return -1;
+
+    return result;
+}
+
+public final int testSetRule( GrammarAST t ) throws RecognitionException {
+    TreeToNFAConverter other = new TreeToNFAConverter( new CommonTreeNodeStream( t ), grammar, nfa, factory );
+
+    other.state.backtracking++;
+    other.currentRuleName = currentRuleName;
+    other.outerAltNum = outerAltNum;
+    other.blockLevel = blockLevel;
+
+    int result = other.testSetRule();
+    if ( other.state.failed )
+        state.failed = true;
+
+    return result;
+}
+
+protected void addFollowTransition( String ruleName, NFAState following ) {
+    //System.Console.Out.WriteLine( "adding follow link to rule " + ruleName );
+    // find last link in FOLLOW chain emanating from rule
+    Rule r = grammar.getRule( ruleName );
+    NFAState end = r.stopState;
+    while ( end.transition( 1 ) != null )
+    {
+        end = (NFAState)end.transition( 1 ).target;
+    }
+    if ( end.transition( 0 ) != null )
+    {
+        // already points to a following node
+        // gotta add another node to keep edges to a max of 2
+        NFAState n = factory.newState();
+        Transition e = new Transition( Label.EPSILON, n );
+        end.addTransition( e );
+        end = n;
+    }
+    Transition followEdge = new Transition( Label.EPSILON, following );
+    end.addTransition( followEdge );
+}
+
+protected void finish() {
+    int numEntryPoints = factory.build_EOFStates( grammar.getRules() );
+    if ( numEntryPoints == 0 )
+    {
+        ErrorManager.grammarWarning( ErrorManager.MSG_NO_GRAMMAR_START_RULE,
+                                   grammar,
+                                   null,
+                                   grammar.name );
+    }
+}
+
+@Override
+public void reportError(RecognitionException ex) {
+    if ( inTest > 0 )
+        throw new IllegalStateException(ex);
+
+    Token token = null;
+    if ( ex instanceof MismatchedTokenException )
+    {
+        token = ( (MismatchedTokenException)ex ).token;
+    }
+    else if ( ex instanceof NoViableAltException )
+    {
+        token = ( (NoViableAltException)ex ).token;
+    }
+
+    ErrorManager.syntaxError(
+        ErrorManager.MSG_SYNTAX_ERROR,
+        grammar,
+        token,
+        "buildnfa: " + ex.toString(),
+        ex );
+}
+
+private boolean hasElementOptions(GrammarAST node) {
+    if (node == null)
+        throw new NullPointerException("node");
+    return node.terminalOptions != null && node.terminalOptions.size() > 0;
+}
+}
+
+public
+grammar_
+@after
+{
+	finish();
+}
+	:	(	^( LEXER_GRAMMAR grammarSpec )
+		|	^( PARSER_GRAMMAR grammarSpec )
+		|	^( TREE_GRAMMAR grammarSpec )
+		|	^( COMBINED_GRAMMAR grammarSpec )
+		)
+	;
+
+attrScope
+	:	^( 'scope' ID ( ^(AMPERSAND .*) )* ACTION )
+	;
+
+grammarSpec
+	:	ID
+		(cmt=DOC_COMMENT)?
+		( ^(OPTIONS .*) )?
+		( ^(IMPORT .*) )?
+		( ^(TOKENS .*) )?
+		(attrScope)*
+		( ^(AMPERSAND .*) )* // skip actions
+		rules
+	;
+
+rules
+	:	(rule | ^(PREC_RULE .*))+
+	;
+
+rule
+	:	^(	RULE id=ID
+			{
+				currentRuleName = $id.text;
+				factory.setCurrentRule(grammar.getLocallyDefinedRule(currentRuleName));
+			}
+			(modifier)?
+			^(ARG (ARG_ACTION)?)
+			^(RET (ARG_ACTION)?)
+			(throwsSpec)?
+			( ^(OPTIONS .*) )?
+			( ruleScopeSpec )?
+			( ^(AMPERSAND .*) )*
+			b=block
+			(exceptionGroup)?
+			EOR
+			{
+				StateCluster g = $b.g;
+				if ($b.start.getSetValue() != null)
+				{
+					// if block comes back as a set not BLOCK, make it
+					// a single ALT block
+					g = factory.build_AlternativeBlockFromSet(g);
+				}
+				if (Rule.getRuleType(currentRuleName) == Grammar.PARSER || grammar.type==Grammar.LEXER)
+				{
+					// attach start node to block for this rule
+					Rule thisR = grammar.getLocallyDefinedRule(currentRuleName);
+					NFAState start = thisR.startState;
+					start.associatedASTNode = $id;
+					start.addTransition(new Transition(Label.EPSILON, g.left));
+
+					// track decision if > 1 alts
+					if ( grammar.getNumberOfAltsForDecisionNFA(g.left)>1 )
+					{
+						g.left.setDescription(grammar.grammarTreeToString($start, false));
+						g.left.setDecisionASTNode($b.start);
+						int d = grammar.assignDecisionNumber( g.left );
+						grammar.setDecisionNFA( d, g.left );
+						grammar.setDecisionBlockAST(d, $b.start);
+					}
+
+					// hook to end of rule node
+					NFAState end = thisR.stopState;
+					g.right.addTransition(new Transition(Label.EPSILON,end));
+				}
+			}
+		)
+	;
+
+modifier
+	:	'protected'
+	|	'public'
+	|	'private'
+	|	'fragment'
+	;
+
+throwsSpec
+	:	^('throws' ID+)
+	;
+
+ruleScopeSpec
+	:	^( 'scope' ( ^(AMPERSAND .*) )* (ACTION)? ( ID )* )
+	;
+
+block returns [StateCluster g = null]
+@init
+{
+	List<StateCluster> alts = new ArrayList<StateCluster>();
+	this.blockLevel++;
+	if ( this.blockLevel==1 )
+		this.outerAltNum=1;
+}
+	:	{grammar.isValidSet(this,$start) &&
+		 !currentRuleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME)}? =>
+		set {$g = $set.g;}
+
+	|	^(	BLOCK ( ^(OPTIONS .*) )?
+			(	a=alternative rewrite
+				{
+					alts.add($a.g);
+				}
+				{{
+					if ( blockLevel == 1 )
+						outerAltNum++;
+				}}
+			)+
+			EOB
+		)
+		{$g = factory.build_AlternativeBlock(alts);}
+	;
+finally { blockLevel--; }
+
+alternative returns [StateCluster g=null]
+	:	^( ALT (e=element {$g = factory.build_AB($g,$e.g);} )+ EOA )
+		{
+			if ($g==null) { // if alt was a list of actions or whatever
+				$g = factory.build_Epsilon();
+			}
+			else {
+				factory.optimizeAlternative($g);
+			}
+		}
+	;
+
+exceptionGroup
+	:	( exceptionHandler )+ (finallyClause)?
+	|	finallyClause
+	;
+
+exceptionHandler
+	:    ^('catch' ARG_ACTION ACTION)
+	;
+
+finallyClause
+	:    ^('finally' ACTION)
+	;
+
+rewrite
+	:	^(	REWRITES
+			(
+				{
+					if ( grammar.getOption("output")==null )
+					{
+						ErrorManager.grammarError(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
+												  grammar, $start.getToken(), currentRuleName);
+					}
+				}
+				^(REWRITE .*)
+			)*
+		)
+	|
+	;
+
+element returns [StateCluster g=null]
+	:   ^(ROOT e=element {$g = $e.g;})
+	|   ^(BANG e=element {$g = $e.g;})
+	|	^(ASSIGN ID e=element {$g = $e.g;})
+	|	^(PLUS_ASSIGN ID e=element {$g = $e.g;})
+	|   ^(RANGE a=atom[null] b=atom[null])
+		{$g = factory.build_Range(grammar.getTokenType($a.text),
+								 grammar.getTokenType($b.text));}
+	|   ^(CHAR_RANGE c1=CHAR_LITERAL c2=CHAR_LITERAL)
+		{
+		if ( grammar.type==Grammar.LEXER ) {
+			$g = factory.build_CharRange($c1.text, $c2.text);
+		}
+		}
+	|   atom_or_notatom {$g = $atom_or_notatom.g;}
+	|   ebnf {$g = $ebnf.g;}
+	|   tree_ {$g = $tree_.g;}
+	|   ^( SYNPRED block )
+	|   ACTION {$g = factory.build_Action($ACTION);}
+	|   FORCED_ACTION {$g = factory.build_Action($FORCED_ACTION);}
+	|   pred=SEMPRED {$g = factory.build_SemanticPredicate($pred);}
+	|   spred=SYN_SEMPRED {$g = factory.build_SemanticPredicate($spred);}
+	|   ^(bpred=BACKTRACK_SEMPRED .*) {$g = factory.build_SemanticPredicate($bpred);}
+	|   gpred=GATED_SEMPRED {$g = factory.build_SemanticPredicate($gpred);}
+	|   EPSILON {$g = factory.build_Epsilon();}
+	;
+
+ebnf returns [StateCluster g=null]
+@init
+{
+	GrammarAST blk = $start;
+	if (blk.getType() != BLOCK) {
+		blk = (GrammarAST)blk.getChild(0);
+	}
+	GrammarAST eob = blk.getLastChild();
+}
+	:	{grammar.isValidSet(this,$start)}? => set {$g = $set.g;}
+
+	|	b=block
+		{
+			// track decision if > 1 alts
+			if ( grammar.getNumberOfAltsForDecisionNFA($b.g.left)>1 )
+			{
+				$b.g.left.setDescription(grammar.grammarTreeToString(blk, false));
+				$b.g.left.setDecisionASTNode(blk);
+				int d = grammar.assignDecisionNumber( $b.g.left );
+				grammar.setDecisionNFA( d, $b.g.left );
+				grammar.setDecisionBlockAST(d, blk);
+			}
+			$g = $b.g;
+		}
+	|	^( OPTIONAL b=block )
+		{
+			StateCluster bg = $b.g;
+			if ( blk.getSetValue()!=null )
+			{
+				// if block comes back SET not BLOCK, make it
+				// a single ALT block
+				bg = factory.build_AlternativeBlockFromSet(bg);
+			}
+			$g = factory.build_Aoptional(bg);
+			$g.left.setDescription(grammar.grammarTreeToString($start, false));
+			// there is always at least one alt even if block has just 1 alt
+			int d = grammar.assignDecisionNumber( $g.left );
+			grammar.setDecisionNFA(d, $g.left);
+			grammar.setDecisionBlockAST(d, blk);
+			$g.left.setDecisionASTNode($start);
+		}
+	|	^( CLOSURE b=block )
+		{
+			StateCluster bg = $b.g;
+			if ( blk.getSetValue()!=null )
+			{
+				bg = factory.build_AlternativeBlockFromSet(bg);
+			}
+			$g = factory.build_Astar(bg);
+			// track the loop back / exit decision point
+			bg.right.setDescription("()* loopback of "+grammar.grammarTreeToString($start, false));
+			int d = grammar.assignDecisionNumber( bg.right );
+			grammar.setDecisionNFA(d, bg.right);
+			grammar.setDecisionBlockAST(d, blk);
+			bg.right.setDecisionASTNode(eob);
+			// make block entry state also have same decision for interpreting grammar
+			NFAState altBlockState = (NFAState)$g.left.transition(0).target;
+			altBlockState.setDecisionASTNode($start);
+			altBlockState.setDecisionNumber(d);
+			$g.left.setDecisionNumber(d); // this is the bypass decision (2 alts)
+			$g.left.setDecisionASTNode($start);
+		}
+	|	^( POSITIVE_CLOSURE b=block )
+		{
+			StateCluster bg = $b.g;
+			if ( blk.getSetValue()!=null )
+			{
+				bg = factory.build_AlternativeBlockFromSet(bg);
+			}
+			$g = factory.build_Aplus(bg);
+			// don't make a decision on left edge, can reuse loop end decision
+			// track the loop back / exit decision point
+			bg.right.setDescription("()+ loopback of "+grammar.grammarTreeToString($start, false));
+			int d = grammar.assignDecisionNumber( bg.right );
+			grammar.setDecisionNFA(d, bg.right);
+			grammar.setDecisionBlockAST(d, blk);
+			bg.right.setDecisionASTNode(eob);
+			// make block entry state also have same decision for interpreting grammar
+			NFAState altBlockState = (NFAState)$g.left.transition(0).target;
+			altBlockState.setDecisionASTNode($start);
+			altBlockState.setDecisionNumber(d);
+		}
+	;
+
+tree_ returns [StateCluster g=null]
+@init
+{
+	StateCluster down=null, up=null;
+}
+	:	^(	TREE_BEGIN
+			e=element { $g = $e.g; }
+			{
+				down = factory.build_Atom(Label.DOWN, $e.start);
+				// TODO set following states for imaginary nodes?
+				//el.followingNFAState = down.right;
+				$g = factory.build_AB($g,down);
+			}
+			( e=element {$g = factory.build_AB($g,$e.g);} )*
+			{
+				up = factory.build_Atom(Label.UP, $e.start);
+				//el.followingNFAState = up.right;
+				$g = factory.build_AB($g,up);
+				// tree roots point at right edge of DOWN for LOOK computation later
+				$start.NFATreeDownState = down.left;
+			}
+		)
+	;
+
+atom_or_notatom returns [StateCluster g=null]
+	:	atom[null] {$g = $atom.g;}
+	|	^(	n=NOT
+			(	c=CHAR_LITERAL (ast1=ast_suffix)?
+				{
+					int ttype=0;
+					if ( grammar.type==Grammar.LEXER )
+					{
+						ttype = Grammar.getCharValueFromGrammarCharLiteral($c.text);
+					}
+					else
+					{
+						ttype = grammar.getTokenType($c.text);
+					}
+					IntSet notAtom = grammar.complement(ttype);
+					if ( notAtom.isNil() )
+					{
+						ErrorManager.grammarError(
+							ErrorManager.MSG_EMPTY_COMPLEMENT,
+							grammar,
+							$c.getToken(),
+							$c.text);
+					}
+					$g=factory.build_Set(notAtom,$n);
+				}
+			|	t=TOKEN_REF (ast3=ast_suffix)?
+				{
+					int ttype=0;
+					IntSet notAtom = null;
+					if ( grammar.type==Grammar.LEXER )
+					{
+						notAtom = grammar.getSetFromRule(this,$t.text);
+						if ( notAtom==null )
+						{
+							ErrorManager.grammarError(
+								ErrorManager.MSG_RULE_INVALID_SET,
+								grammar,
+								$t.getToken(),
+								$t.text);
+						}
+						else
+						{
+							notAtom = grammar.complement(notAtom);
+						}
+					}
+					else
+					{
+						ttype = grammar.getTokenType($t.text);
+						notAtom = grammar.complement(ttype);
+					}
+					if ( notAtom==null || notAtom.isNil() )
+					{
+						ErrorManager.grammarError(
+							ErrorManager.MSG_EMPTY_COMPLEMENT,
+							grammar,
+							$t.getToken(),
+							$t.text);
+					}
+					$g=factory.build_Set(notAtom,$n);
+				}
+			|	set {$g = $set.g;}
+				{
+					GrammarAST stNode = (GrammarAST)$n.getChild(0);
+					//IntSet notSet = grammar.complement(stNode.getSetValue());
+					// let code generator complement the sets
+					IntSet s = stNode.getSetValue();
+					stNode.setSetValue(s);
+					// let code gen do the complement again; here we compute
+					// for NFA construction
+					s = grammar.complement(s);
+					if ( s.isNil() )
+					{
+						ErrorManager.grammarError(
+							ErrorManager.MSG_EMPTY_COMPLEMENT,
+							grammar,
+							$n.getToken());
+					}
+					$g=factory.build_Set(s,$n);
+				}
+			)
+			{$n.followingNFAState = $g.right;}
+		)
+	;
+
+atom[String scopeName] returns [StateCluster g=null]
+	:	^( r=RULE_REF (rarg=ARG_ACTION)? (as1=ast_suffix)? )
+		{
+			NFAState start = grammar.getRuleStartState(scopeName,$r.text);
+			if ( start!=null )
+			{
+				Rule rr = grammar.getRule(scopeName,$r.text);
+				$g = factory.build_RuleRef(rr, start);
+				r.followingNFAState = $g.right;
+				r.NFAStartState = $g.left;
+				if ( $g.left.transition(0) instanceof RuleClosureTransition
+					&& grammar.type!=Grammar.LEXER )
+				{
+					addFollowTransition($r.text, $g.right);
+				}
+				// else rule ref got inlined to a set
+			}
+		}
+
+	|	^( t=TOKEN_REF  (targ=ARG_ACTION)? (as2=ast_suffix)? )
+		{
+			if ( grammar.type==Grammar.LEXER )
+			{
+				NFAState start = grammar.getRuleStartState(scopeName,$t.text);
+				if ( start!=null )
+				{
+					Rule rr = grammar.getRule(scopeName,t.getText());
+					$g = factory.build_RuleRef(rr, start);
+					t.NFAStartState = $g.left;
+					// don't add FOLLOW transitions in the lexer;
+					// only exact context should be used.
+				}
+			}
+			else
+			{
+				$g = factory.build_Atom(t);
+				t.followingNFAState = $g.right;
+			}
+		}
+
+	|	^( c=CHAR_LITERAL  (as3=ast_suffix)? )
+		{
+			if ( grammar.type==Grammar.LEXER )
+			{
+				$g = factory.build_CharLiteralAtom(c);
+			}
+			else
+			{
+				$g = factory.build_Atom(c);
+				c.followingNFAState = $g.right;
+			}
+		}
+
+	|	^( s=STRING_LITERAL  (as4=ast_suffix)? )
+		{
+			if ( grammar.type==Grammar.LEXER )
+			{
+				$g = factory.build_StringLiteralAtom(s);
+			}
+			else
+			{
+				$g = factory.build_Atom(s);
+				s.followingNFAState = $g.right;
+			}
+		}
+
+	|	^(	w=WILDCARD (as5=ast_suffix)? )
+			{
+				if ( nfa.grammar.type == Grammar.TREE_PARSER
+					&& (w.getChildIndex() > 0 || w.getParent().getChild(1).getType() == EOA) )
+				{
+					$g = factory.build_WildcardTree( $w );
+				}
+				else
+				{
+					$g = factory.build_Wildcard( $w );
+				}
+			}
+
+	|	^( DOT scope_=ID a=atom[$scope_.text] {$g = $a.g;} ) // scope override
+	;
+
+ast_suffix
+	:	ROOT
+	|	BANG
+	;
+
+set returns [StateCluster g=null]
+@init
+{
+	IntSet elements=new IntervalSet();
+	if ( state.backtracking == 0 )
+		$start.setSetValue(elements); // track set for use by code gen
+}
+	:	^( b=BLOCK
+		   (^(ALT ( ^(BACKTRACK_SEMPRED .*) )? setElement[elements] EOA))+
+		   EOB
+		 )
+		{
+		$g = factory.build_Set(elements,$b);
+		$b.followingNFAState = $g.right;
+		$b.setSetValue(elements); // track set value of this block
+		}
+		//{System.out.println("set elements="+elements.toString(grammar));}
+	;
+
+setRule returns [IntSet elements=new IntervalSet()]
+@init
+{
+	IntSet s=null;
+}
+	:	^( RULE id=ID (modifier)? ARG RET ( ^(OPTIONS .*) )? ( ruleScopeSpec )?
+			( ^(AMPERSAND .*) )*
+			^( BLOCK ( ^(OPTIONS .*) )?
+			   ( ^(ALT (BACKTRACK_SEMPRED)? setElement[elements] EOA) )+
+			   EOB
+			 )
+			(exceptionGroup)?
+			EOR
+		 )
+	;
+catch[RecognitionException re] { throw re; }
+
+setElement[IntSet elements]
+@init
+{
+	int ttype;
+	IntSet ns=null;
+}
+	:	c=CHAR_LITERAL
+		{
+			if ( grammar.type==Grammar.LEXER )
+			{
+				ttype = Grammar.getCharValueFromGrammarCharLiteral($c.text);
+			}
+			else
+			{
+				ttype = grammar.getTokenType($c.text);
+			}
+			if ( elements.member(ttype) )
+			{
+				ErrorManager.grammarError(
+					ErrorManager.MSG_DUPLICATE_SET_ENTRY,
+					grammar,
+					$c.getToken(),
+					$c.text);
+			}
+			elements.add(ttype);
+		}
+	|	t=TOKEN_REF
+		{
+			if ( grammar.type==Grammar.LEXER )
+			{
+				// recursively will invoke this rule to match elements in target rule ref
+				IntSet ruleSet = grammar.getSetFromRule(this,$t.text);
+				if ( ruleSet==null )
+				{
+					ErrorManager.grammarError(
+						ErrorManager.MSG_RULE_INVALID_SET,
+						grammar,
+						$t.getToken(),
+						$t.text);
+				}
+				else
+				{
+					elements.addAll(ruleSet);
+				}
+			}
+			else
+			{
+				ttype = grammar.getTokenType($t.text);
+				if ( elements.member(ttype) )
+				{
+					ErrorManager.grammarError(
+						ErrorManager.MSG_DUPLICATE_SET_ENTRY,
+						grammar,
+						$t.getToken(),
+						$t.text);
+				}
+				elements.add(ttype);
+			}
+		}
+
+	|	s=STRING_LITERAL
+		{
+			ttype = grammar.getTokenType($s.text);
+			if ( elements.member(ttype) )
+			{
+				ErrorManager.grammarError(
+					ErrorManager.MSG_DUPLICATE_SET_ENTRY,
+					grammar,
+					$s.getToken(),
+					$s.text);
+			}
+			elements.add(ttype);
+		}
+	|	^(CHAR_RANGE c1=CHAR_LITERAL c2=CHAR_LITERAL)
+		{
+			if ( grammar.type==Grammar.LEXER )
+			{
+				int a = Grammar.getCharValueFromGrammarCharLiteral($c1.text);
+				int b = Grammar.getCharValueFromGrammarCharLiteral($c2.text);
+				elements.addAll(IntervalSet.of(a,b));
+			}
+		}
+
+	|	gset=set
+		{
+			Transition setTrans = $gset.g.left.transition(0);
+			elements.addAll(setTrans.label.getSet());
+		}
+
+	|	^(	NOT {ns=new IntervalSet();}
+			setElement[ns]
+			{
+				IntSet not = grammar.complement(ns);
+				elements.addAll(not);
+			}
+		)
+	;
+
+/** Check to see if this block can be a set.  Can't have actions
+ *  etc...  Also can't be in a rule with a rewrite as we need
+ *  to track what's inside set for use in rewrite.
+ *
+ *  This should only be called from the helper function in TreeToNFAConverterHelper.cs
+ *  and from the rule testSetElement below.
+ */
+testBlockAsSet returns [int alts=0]
+options { backtrack = true; }
+@init
+{
+	inTest++;
+}
+	:	^(	BLOCK
+			(	^(ALT (BACKTRACK_SEMPRED)? testSetElement {{$alts += $testSetElement.alts;}} EOA)
+			)+
+			EOB
+		)
+	;
+catch[RecognitionException re] { throw re; }
+finally { inTest--; }
+
+testSetRule returns [int alts=0]
+@init
+{
+	inTest++;
+}
+	:	^(	RULE id=ID (modifier)? ARG RET ( ^(OPTIONS .*) )? ( ruleScopeSpec )?
+			( ^(AMPERSAND .*) )*
+			^(	BLOCK
+				(	^(ALT (BACKTRACK_SEMPRED)? testSetElement {{$alts += $testSetElement.alts;}} EOA)
+				)+
+				EOB
+			)
+			(exceptionGroup)?
+			EOR
+		)
+	;
+catch[RecognitionException re] { throw re; }
+finally { inTest--; }
+
+/** Match just an element; no ast suffix etc.. */
+testSetElement returns [int alts=1]
+	:	c=CHAR_LITERAL {!hasElementOptions($c)}?
+	|	t=TOKEN_REF {!hasElementOptions($t)}?
+		{{
+			if ( grammar.type==Grammar.LEXER )
+			{
+				Rule rule = grammar.getRule($t.text);
+				if ( rule==null )
+				{
+					//throw new RecognitionException("invalid rule");
+					throw new RecognitionException();
+				}
+				// recursively will invoke this rule to match elements in target rule ref
+				$alts += testSetRule(rule.tree);
+			}
+		}}
+	|   {grammar.type!=Grammar.LEXER}? => s=STRING_LITERAL
+	|	^(CHAR_RANGE c1=CHAR_LITERAL c2=CHAR_LITERAL)
+		{{ $alts = IntervalSet.of( Grammar.getCharValueFromGrammarCharLiteral($c1.text), Grammar.getCharValueFromGrammarCharLiteral($c2.text) ).size(); }}
+	|   testBlockAsSet
+		{{ $alts = $testBlockAsSet.alts; }}
+	|   ^( NOT tse=testSetElement )
+		{{ $alts = grammar.getTokenTypes().size() - $tse.alts; }}
+	;
+catch[RecognitionException re] { throw re; }
diff --git a/tool/src/main/java/org/antlr/Tool.java b/tool/src/main/java/org/antlr/Tool.java
new file mode 100644
index 0000000..0d8709b
--- /dev/null
+++ b/tool/src/main/java/org/antlr/Tool.java
@@ -0,0 +1,1407 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr;
+
+import org.antlr.analysis.*;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.misc.Graph;
+import org.antlr.runtime.misc.Stats;
+import org.antlr.tool.*;
+import org.stringtemplate.v4.STGroup;
+
+import java.io.*;
+import java.util.*;
+
+/** The main ANTLR entry point.  Read a grammar and generate a parser. */
+public class Tool {
+
+    public final Properties antlrSettings = new Properties();
+
+	public final String VERSION;
+	{
+		String version = Tool.class.getPackage().getImplementationVersion();
+		VERSION = version != null ? version : "3.x";
+	}
+
+    public static final String UNINITIALIZED_DIR = "<unset-dir>";
+    private List<String> grammarFileNames = new ArrayList<String>();
+    private boolean generate_NFA_dot = false;
+    private boolean generate_DFA_dot = false;
+    private String outputDirectory = ".";
+    private boolean haveOutputDir = false;
+    private String inputDirectory = null;
+    private String parentGrammarDirectory;
+    private String grammarOutputDirectory;
+    private boolean haveInputDir = false;
+    private String libDirectory = ".";
+    private boolean debug = false;
+    private boolean trace = false;
+    private boolean profile = false;
+    private boolean report = false;
+    private boolean printGrammar = false;
+    private boolean depend = false;
+    private boolean forceAllFilesToOutputDir = false;
+    private boolean forceRelativeOutput = false;
+    protected boolean deleteTempLexer = true;
+    private boolean verbose = false;
+    /** Don't process grammar file if generated files are newer than grammar */
+    private boolean make = false;
+    private boolean showBanner = true;
+	private static boolean exitNow = false;
+	private static boolean return_dont_exit = false;
+
+
+	public String forcedLanguageOption; // -language L on command line
+
+    // The internal options are for my use on the command line during dev
+    //
+    public static boolean internalOption_PrintGrammarTree = false;
+    public static boolean internalOption_PrintDFA = false;
+    public static boolean internalOption_ShowNFAConfigsInDFA = false;
+    public static boolean internalOption_watchNFAConversion = false;
+
+    /**
+     * A list of dependency generators that are accumulated aaaas (and if) the
+     * tool is required to sort the provided grammars into build dependency order.
+    protected Map&lt;String, BuildDependencyGenerator&gt; buildDependencyGenerators;
+     */
+
+    public static void main(String[] args) {
+        Tool antlr = new Tool(args);
+
+        if (!exitNow) {
+            antlr.process();
+			if ( return_dont_exit ) return;
+            if (ErrorManager.getNumErrors() > 0) {
+                System.exit(1);
+            }
+            System.exit(0);
+        }
+    }
+
+    /**
+     * Load the properties file org/antlr/antlr.properties and populate any
+     * variables that must be initialized from it, such as the version of ANTLR.
+     */
+    private void loadResources() {
+        InputStream in;
+        in = this.getClass().getResourceAsStream("antlr.properties");
+
+        // If we found the resource, then load it, otherwise revert to the
+        // defaults.
+        //
+        if (in != null) {
+            try {
+                // Load the resources into the map
+                //
+                antlrSettings.load(in);
+
+                // Set any variables that we need to populate from the resources
+                //
+//                VERSION = antlrSettings.getProperty("antlr.version");
+            } catch (Exception e) {
+                // Do nothing, just leave the defaults in place
+            }
+        }
+    }
+
+    public Tool() {
+        loadResources();
+    }
+
+	@SuppressWarnings("OverridableMethodCallInConstructor")
+    public Tool(String[] args) {
+        loadResources();
+
+        // Set all the options and pick up all the named grammar files
+        processArgs(args);
+    }
+
+    public void processArgs(String[] args) {
+
+        if (isVerbose()) {
+            ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
+            showBanner = false;
+        }
+
+        if (args == null || args.length == 0) {
+            help();
+            return;
+        }
+        for (int i = 0; i < args.length; i++) {
+            if (args[i].equals("-o") || args[i].equals("-fo")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing output directory with -fo/-o option; ignoring");
+                }
+                else {
+                    if (args[i].equals("-fo")) { // force output into dir
+                        setForceAllFilesToOutputDir(true);
+                    }
+                    i++;
+                    outputDirectory = args[i];
+                    if (outputDirectory.endsWith("/") ||
+                        outputDirectory.endsWith("\\")) {
+                        outputDirectory =
+                            outputDirectory.substring(0, getOutputDirectory().length() - 1);
+                    }
+                    File outDir = new File(outputDirectory);
+                    haveOutputDir = true;
+                    if (outDir.exists() && !outDir.isDirectory()) {
+                        ErrorManager.error(ErrorManager.MSG_OUTPUT_DIR_IS_FILE, outputDirectory);
+                        setLibDirectory(".");
+                    }
+                }
+            }
+			else if (args[i].equals("-lib")) {
+				if (i + 1 >= args.length) {
+					System.err.println("missing library directory with -lib option; ignoring");
+				}
+				else {
+					i++;
+					setLibDirectory(args[i]);
+					if (getLibraryDirectory().endsWith("/") ||
+						getLibraryDirectory().endsWith("\\")) {
+						setLibDirectory(getLibraryDirectory().substring(0, getLibraryDirectory().length() - 1));
+					}
+					File outDir = new File(getLibraryDirectory());
+					if (!outDir.exists()) {
+						ErrorManager.error(ErrorManager.MSG_DIR_NOT_FOUND, getLibraryDirectory());
+						setLibDirectory(".");
+					}
+				}
+			}
+			else if (args[i].equals("-language")) {
+				if (i + 1 >= args.length) {
+					System.err.println("missing language name; ignoring");
+				}
+				else {
+					i++;
+					forcedLanguageOption = args[i];
+				}
+			}
+            else if (args[i].equals("-nfa")) {
+                setGenerate_NFA_dot(true);
+            }
+            else if (args[i].equals("-dfa")) {
+                setGenerate_DFA_dot(true);
+            }
+            else if (args[i].equals("-debug")) {
+                setDebug(true);
+            }
+            else if (args[i].equals("-trace")) {
+                setTrace(true);
+            }
+            else if (args[i].equals("-report")) {
+                setReport(true);
+            }
+            else if (args[i].equals("-profile")) {
+                setProfile(true);
+            }
+            else if (args[i].equals("-print")) {
+                setPrintGrammar(true);
+            }
+            else if (args[i].equals("-depend")) {
+                setDepend(true);
+            }
+            else if (args[i].equals("-verbose")) {
+                setVerbose(true);
+            }
+            else if (args[i].equals("-version")) {
+                version();
+                exitNow = true;
+            }
+            else if (args[i].equals("-make")) {
+                setMake(true);
+            }
+            else if (args[i].equals("-message-format")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing output format with -message-format option; using default");
+                }
+                else {
+                    i++;
+                    ErrorManager.setFormat(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xgrtree")) {
+                internalOption_PrintGrammarTree = true; // print grammar tree
+            }
+            else if (args[i].equals("-Xdfa")) {
+                internalOption_PrintDFA = true;
+            }
+            else if (args[i].equals("-Xnoprune")) {
+                DFAOptimizer.PRUNE_EBNF_EXIT_BRANCHES = false;
+            }
+            else if (args[i].equals("-Xnocollapse")) {
+                DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES = false;
+            }
+            else if (args[i].equals("-Xdbgconversion")) {
+                NFAToDFAConverter.debug = true;
+            }
+            else if (args[i].equals("-Xmultithreaded")) {
+                NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION = false;
+            }
+            else if (args[i].equals("-Xnomergestopstates")) {
+                DFAOptimizer.MERGE_STOP_STATES = false;
+            }
+            else if (args[i].equals("-Xdfaverbose")) {
+                internalOption_ShowNFAConfigsInDFA = true;
+            }
+            else if (args[i].equals("-Xwatchconversion")) {
+                internalOption_watchNFAConversion = true;
+            }
+            else if (args[i].equals("-XdbgST")) {
+                CodeGenerator.LAUNCH_ST_INSPECTOR = true;
+				STGroup.trackCreationEvents = true;
+				return_dont_exit = true;
+            }
+            else if (args[i].equals("-Xmaxinlinedfastates")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing max inline dfa states -Xmaxinlinedfastates option; ignoring");
+                }
+                else {
+                    i++;
+                    CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE = Integer.parseInt(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xmaxswitchcaselabels")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing max switch case labels -Xmaxswitchcaselabels option; ignoring");
+                }
+                else {
+                    i++;
+                    CodeGenerator.MAX_SWITCH_CASE_LABELS = Integer.parseInt(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xminswitchalts")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing min switch alternatives -Xminswitchalts option; ignoring");
+                }
+                else {
+                    i++;
+                    CodeGenerator.MIN_SWITCH_ALTS = Integer.parseInt(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xm")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing max recursion with -Xm option; ignoring");
+                }
+                else {
+                    i++;
+                    NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = Integer.parseInt(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xmaxdfaedges")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing max number of edges with -Xmaxdfaedges option; ignoring");
+                }
+                else {
+                    i++;
+                    DFA.MAX_STATE_TRANSITIONS_FOR_TABLE = Integer.parseInt(args[i]);
+                }
+            }
+            else if (args[i].equals("-Xconversiontimeout")) {
+                if (i + 1 >= args.length) {
+                    System.err.println("missing max time in ms -Xconversiontimeout option; ignoring");
+                }
+                else {
+                    i++;
+                    DFA.MAX_TIME_PER_DFA_CREATION = Integer.parseInt(args[i]);
+                }
+            }
+			else if (args[i].equals("-Xnfastates")) {
+				DecisionProbe.verbose = true;
+			}
+			else if (args[i].equals("-Xsavelexer")) {
+				deleteTempLexer = false;
+			}
+            else if (args[i].equals("-X")) {
+                Xhelp();
+            }
+            else {
+                if (args[i].charAt(0) != '-') {
+                    // Must be the grammar file
+                    addGrammarFile(args[i]);
+                }
+            }
+        }
+    }
+
+    /*
+    protected void checkForInvalidArguments(String[] args, BitSet cmdLineArgValid) {
+    // check for invalid command line args
+    for (int a = 0; a < args.length; a++) {
+    if (!cmdLineArgValid.member(a)) {
+    System.err.println("invalid command-line argument: " + args[a] + "; ignored");
+    }
+    }
+    }
+     */
+
+    /**
+     * Checks to see if the list of outputFiles all exist, and have
+     * last-modified timestamps which are later than the last-modified
+     * timestamp of all the grammar files involved in build the output
+     * (imports must be checked). If these conditions hold, the method
+     * returns false, otherwise, it returns true.
+     *
+     * @param grammarFileName The grammar file we are checking
+     */
+    public boolean buildRequired(String grammarFileName)
+        throws IOException
+    {
+        BuildDependencyGenerator bd =
+            new BuildDependencyGenerator(this, grammarFileName);
+
+        List<File> outputFiles = bd.getGeneratedFileList();
+        List<File> inputFiles = bd.getDependenciesFileList();
+        // Note that input directory must be set to use buildRequired
+        File grammarFile;
+        if (haveInputDir) {
+            grammarFile = new File(inputDirectory, grammarFileName);
+        }
+        else {
+            grammarFile = new File(grammarFileName);
+        }
+        long grammarLastModified = grammarFile.lastModified();
+        for (File outputFile : outputFiles) {
+            if (!outputFile.exists() || grammarLastModified > outputFile.lastModified()) {
+                // One of the output files does not exist or is out of date, so we must build it
+				if (isVerbose()) {
+					if (!outputFile.exists()) {
+						System.out.println("Output file " + outputFile + " does not exist: must build " + grammarFile);
+					}
+					else {
+						System.out.println("Output file " + outputFile + " is not up-to-date: must build " + grammarFile);
+					}
+				}
+
+                return true;
+            }
+            // Check all of the imported grammars and see if any of these are younger
+            // than any of the output files.
+            if (inputFiles != null) {
+                for (File inputFile : inputFiles) {
+
+                    if (inputFile.lastModified() > outputFile.lastModified()) {
+                        // One of the imported grammar files has been updated so we must build
+						if (isVerbose()) {
+							System.out.println("Input file " + inputFile + " is newer than output: must rebuild " + grammarFile);
+						}
+
+                        return true;
+                    }
+                }
+            }
+        }
+        if (isVerbose()) {
+            System.out.println("Grammar " + grammarFile + " is up to date - build skipped");
+        }
+        return false;
+    }
+
+    public void process() {
+        boolean exceptionWhenWritingLexerFile = false;
+        String lexerGrammarFileName;		// necessary at this scope to have access in the catch below
+
+        // Have to be tricky here when Maven or build tools call in and must new Tool()
+        // before setting options. The banner won't display that way!
+        if (isVerbose() && showBanner) {
+            ErrorManager.info("ANTLR Parser Generator  Version " + VERSION);
+            showBanner = false;
+        }
+
+        try {
+            sortGrammarFiles(); // update grammarFileNames
+        }
+        catch (Exception e) {
+            ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR,e);
+        }
+        catch (Error e) {
+            ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, e);
+        }
+
+        for (String grammarFileName : grammarFileNames) {
+            // If we are in make mode (to support build tools like Maven) and the
+            // file is already up to date, then we do not build it (and in verbose mode
+            // we will say so).
+            if (make) {
+                try {
+                    if ( !buildRequired(grammarFileName) ) continue;
+                }
+                catch (Exception e) {
+                    ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR,e);
+                }
+            }
+
+            if (isVerbose() && !isDepend()) {
+                System.out.println(grammarFileName);
+            }
+            try {
+                if (isDepend()) {
+                    BuildDependencyGenerator dep =
+                        new BuildDependencyGenerator(this, grammarFileName);
+                    /*
+                    List outputFiles = dep.getGeneratedFileList();
+                    List dependents = dep.getDependenciesFileList();
+                    System.out.println("output: "+outputFiles);
+                    System.out.println("dependents: "+dependents);
+                     */
+                    System.out.println(dep.getDependencies().render());
+                    continue;
+                }
+
+                Grammar rootGrammar = getRootGrammar(grammarFileName);
+                // we now have all grammars read in as ASTs
+                // (i.e., root and all delegates)
+				rootGrammar.composite.assignTokenTypes();
+				//rootGrammar.composite.translateLeftRecursiveRules();
+				rootGrammar.addRulesForSyntacticPredicates();
+				rootGrammar.composite.defineGrammarSymbols();
+                rootGrammar.composite.createNFAs();
+
+                generateRecognizer(rootGrammar);
+
+                if (isPrintGrammar()) {
+                    rootGrammar.printGrammar(System.out);
+                }
+
+                if (isReport()) {
+					GrammarReport2 greport = new GrammarReport2(rootGrammar);
+					System.out.print(greport.toString());
+//                    GrammarReport greport = new GrammarReport(rootGrammar);
+//                    System.out.println(greport.toString());
+//                    // print out a backtracking report too (that is not encoded into log)
+//                    System.out.println(greport.getBacktrackingReport());
+                }
+                if (isProfile()) {
+                    GrammarReport greport = new GrammarReport(rootGrammar);
+                    Stats.writeReport(GrammarReport.GRAMMAR_STATS_FILENAME,
+                                      greport.toNotifyString());
+                }
+
+                // now handle the lexer if one was created for a merged spec
+                String lexerGrammarStr = rootGrammar.getLexerGrammar();
+                //System.out.println("lexer rootGrammar:\n"+lexerGrammarStr);
+                if (rootGrammar.type == Grammar.COMBINED && lexerGrammarStr != null) {
+                    lexerGrammarFileName = rootGrammar.getImplicitlyGeneratedLexerFileName();
+                    try {
+                        Writer w = getOutputFile(rootGrammar, lexerGrammarFileName);
+                        w.write(lexerGrammarStr);
+                        w.close();
+                    }
+                    catch (IOException e) {
+                        // emit different error message when creating the implicit lexer fails
+                        // due to write permission error
+                        exceptionWhenWritingLexerFile = true;
+                        throw e;
+                    }
+                    try {
+                        StringReader sr = new StringReader(lexerGrammarStr);
+                        Grammar lexerGrammar = new Grammar(this);
+                        lexerGrammar.composite.watchNFAConversion = internalOption_watchNFAConversion;
+                        lexerGrammar.implicitLexer = true;
+                        //lexerGrammar.setTool(this);
+                        File lexerGrammarFullFile =
+                            new File(getFileDirectory(lexerGrammarFileName), lexerGrammarFileName);
+                        lexerGrammar.setFileName(lexerGrammarFullFile.toString());
+
+                        lexerGrammar.importTokenVocabulary(rootGrammar);
+                        lexerGrammar.parseAndBuildAST(sr);
+
+                        sr.close();
+
+                        lexerGrammar.composite.assignTokenTypes();
+						lexerGrammar.addRulesForSyntacticPredicates();
+                        lexerGrammar.composite.defineGrammarSymbols();
+                        lexerGrammar.composite.createNFAs();
+
+                        generateRecognizer(lexerGrammar);
+                    }
+                    finally {
+                        // make sure we clean up
+                        if (deleteTempLexer) {
+                            File outputDir = getOutputDirectory(lexerGrammarFileName);
+                            File outputFile = new File(outputDir, lexerGrammarFileName);
+                            outputFile.delete();
+                        }
+                    }
+                }
+            }
+            catch (IOException e) {
+                if (exceptionWhenWritingLexerFile) {
+                    ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, e);
+                }
+                else {
+                    ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE,
+                                       grammarFileName, e);
+                }
+            }
+            catch (Exception e) {
+                ErrorManager.error(ErrorManager.MSG_INTERNAL_ERROR, grammarFileName, e);
+            }
+            /*
+           finally {
+           System.out.println("creates="+ Interval.creates);
+           System.out.println("hits="+ Interval.hits);
+           System.out.println("misses="+ Interval.misses);
+           System.out.println("outOfRange="+ Interval.outOfRange);
+           }
+            */
+        }
+    }
+
+    public void sortGrammarFiles() throws IOException {
+        //System.out.println("Grammar names "+getGrammarFileNames());
+        Graph<String> g = new Graph<String>();
+        List<String> missingFiles = new ArrayList<String>();
+        for (String gfile : grammarFileNames) {
+            try {
+                GrammarSpelunker grammar = new GrammarSpelunker(inputDirectory, gfile);
+                grammar.parse();
+                String vocabName = grammar.getTokenVocab();
+                String grammarName = grammar.getGrammarName();
+                // Make all grammars depend on any tokenVocab options
+                if ( vocabName!=null ) g.addEdge(gfile, vocabName+CodeGenerator.VOCAB_FILE_EXTENSION);
+                // Make all generated tokens files depend on their grammars
+                g.addEdge(grammarName+CodeGenerator.VOCAB_FILE_EXTENSION, gfile);
+            }
+            catch (FileNotFoundException fnfe) {
+                ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE, gfile, fnfe);
+                missingFiles.add(gfile);
+            }
+        }
+        List<String> sorted = g.sort();
+        //System.out.println("sorted="+sorted);
+        grammarFileNames.clear(); // wipe so we can give new ordered list
+        for (int i = 0; i < sorted.size(); i++) {
+            String f = sorted.get(i);
+            if ( missingFiles.contains(f) ) continue;
+            if ( !(f.endsWith(".g") || f.endsWith(".g3")) ) continue;
+            grammarFileNames.add(f);
+        }
+        //System.out.println("new grammars="+grammarFileNames);
+    }
+
+    /** Get a grammar mentioned on the command-line and any delegates */
+    public Grammar getRootGrammar(String grammarFileName)
+        throws IOException
+    {
+        //ST.setLintMode(true);
+        // grammars mentioned on command line are either roots or single grammars.
+        // create the necessary composite in case it's got delegates; even
+        // single grammar needs it to get token types.
+        CompositeGrammar composite = new CompositeGrammar();
+        Grammar grammar = new Grammar(this, grammarFileName, composite);
+        composite.setDelegationRoot(grammar);
+        FileReader fr;
+        File f;
+
+        if (haveInputDir) {
+            f = new File(inputDirectory, grammarFileName);
+        }
+        else {
+            f = new File(grammarFileName);
+        }
+
+        // Store the location of this grammar as if we import files, we can then
+        // search for imports in the same location as the original grammar as well as in
+        // the lib directory.
+        //
+        parentGrammarDirectory = f.getParent();
+
+        if (grammarFileName.lastIndexOf(File.separatorChar) == -1) {
+            grammarOutputDirectory = ".";
+        }
+        else {
+            grammarOutputDirectory = grammarFileName.substring(0, grammarFileName.lastIndexOf(File.separatorChar));
+        }
+        fr = new FileReader(f);
+        BufferedReader br = new BufferedReader(fr);
+        grammar.parseAndBuildAST(br);
+        composite.watchNFAConversion = internalOption_watchNFAConversion;
+        br.close();
+        fr.close();
+        return grammar;
+    }
+
+    /** Create NFA, DFA and generate code for grammar.
+     *  Create NFA for any delegates first.  Once all NFA are created,
+     *  it's ok to create DFA, which must check for left-recursion.  That check
+     *  is done by walking the full NFA, which therefore must be complete.
+     *  After all NFA, comes DFA conversion for root grammar then code gen for
+     *  root grammar.  DFA and code gen for delegates comes next.
+     */
+    protected void generateRecognizer(Grammar grammar) {
+        String language = (String) grammar.getOption("language");
+        if (language != null) {
+            CodeGenerator generator = new CodeGenerator(this, grammar, language);
+            grammar.setCodeGenerator(generator);
+            generator.setDebug(isDebug());
+            generator.setProfile(isProfile());
+            generator.setTrace(isTrace());
+
+            // generate NFA early in case of crash later (for debugging)
+            if (isGenerate_NFA_dot()) {
+                generateNFAs(grammar);
+            }
+
+            // GENERATE CODE
+            generator.genRecognizer();
+
+            if (isGenerate_DFA_dot()) {
+                generateDFAs(grammar);
+            }
+
+            List<Grammar> delegates = grammar.getDirectDelegates();
+            for (int i = 0; delegates != null && i < delegates.size(); i++) {
+                Grammar delegate = delegates.get(i);
+                if (delegate != grammar) { // already processing this one
+                    generateRecognizer(delegate);
+                }
+            }
+        }
+    }
+
+    public void generateDFAs(Grammar g) {
+        for (int d = 1; d <= g.getNumberOfDecisions(); d++) {
+            DFA dfa = g.getLookaheadDFA(d);
+            if (dfa == null) {
+                continue; // not there for some reason, ignore
+            }
+            DOTGenerator dotGenerator = new DOTGenerator(g);
+            String dot = dotGenerator.getDOT(dfa.startState);
+            String dotFileName = g.name + "." + "dec-" + d;
+            if (g.implicitLexer) {
+                dotFileName = g.name + Grammar.grammarTypeToFileNameSuffix[g.type] + "." + "dec-" + d;
+            }
+            try {
+                writeDOTFile(g, dotFileName, dot);
+            } catch (IOException ioe) {
+                ErrorManager.error(ErrorManager.MSG_CANNOT_GEN_DOT_FILE,
+                                   dotFileName,
+                                   ioe);
+            }
+        }
+    }
+
+    protected void generateNFAs(Grammar g) {
+        DOTGenerator dotGenerator = new DOTGenerator(g);
+        Collection<Rule> rules = new HashSet<Rule>(g.getAllImportedRules());
+        rules.addAll(g.getRules());
+
+        for (Rule r : rules) {
+            try {
+                String dot = dotGenerator.getDOT(r.startState);
+                if (dot != null) {
+                    writeDOTFile(g, r, dot);
+                }
+            } catch (IOException ioe) {
+                ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, ioe);
+            }
+        }
+    }
+
+    protected void writeDOTFile(Grammar g, Rule r, String dot) throws IOException {
+        writeDOTFile(g, r.grammar.name + "." + r.name, dot);
+    }
+
+    protected void writeDOTFile(Grammar g, String name, String dot) throws IOException {
+        Writer fw = getOutputFile(g, name + ".dot");
+        fw.write(dot);
+        fw.close();
+    }
+
+    private static void version() {
+        ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
+    }
+
+    private static void help() {
+        ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
+        System.err.println("usage: java org.antlr.Tool [args] file.g [file2.g file3.g ...]");
+        System.err.println("  -o outputDir          specify output directory where all output is generated");
+        System.err.println("  -fo outputDir         same as -o but force even files with relative paths to dir");
+        System.err.println("  -lib dir              specify location of token files");
+        System.err.println("  -depend               generate file dependencies");
+        System.err.println("  -report               print out a report about the grammar(s) processed");
+        System.err.println("  -print                print out the grammar without actions");
+        System.err.println("  -debug                generate a parser that emits debugging events");
+		System.err.println("  -profile              generate a parser that computes profiling information");
+		System.err.println("  -trace                generate a recognizer that traces rule entry/exit");
+        System.err.println("  -nfa                  generate an NFA for each rule");
+        System.err.println("  -dfa                  generate a DFA for each decision point");
+        System.err.println("  -message-format name  specify output style for messages");
+        System.err.println("  -verbose              generate ANTLR version and other information");
+        System.err.println("  -make                 only build if generated files older than grammar");
+		System.err.println("  -version              print the version of ANTLR and exit.");
+		System.err.println("  -language L           override language grammar option; generate L");
+        System.err.println("  -X                    display extended argument list");
+    }
+
+    private static void Xhelp() {
+        ErrorManager.info("ANTLR Parser Generator  Version " + new Tool().VERSION);
+        System.err.println("  -Xgrtree                print the grammar AST");
+        System.err.println("  -Xdfa                   print DFA as text ");
+        System.err.println("  -Xnoprune               test lookahead against EBNF block exit branches");
+        System.err.println("  -Xnocollapse            collapse incident edges into DFA states");
+		System.err.println("  -Xdbgconversion         dump lots of info during NFA conversion");
+		System.err.println("  -Xconversiontimeout     use to restrict NFA conversion exponentiality");
+        System.err.println("  -Xmultithreaded         run the analysis in 2 threads");
+        System.err.println("  -Xnomergestopstates     do not merge stop states");
+        System.err.println("  -Xdfaverbose            generate DFA states in DOT with NFA configs");
+        System.err.println("  -Xwatchconversion       print a message for each NFA before converting");
+        System.err.println("  -XdbgST                 put tags at start/stop of all templates in output");
+        System.err.println("  -Xnfastates             for nondeterminisms, list NFA states for each path");
+        System.err.println("  -Xm m                   max number of rule invocations during conversion           [" + NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK + "]");
+        System.err.println("  -Xmaxdfaedges m         max \"comfortable\" number of edges for single DFA state     [" + DFA.MAX_STATE_TRANSITIONS_FOR_TABLE + "]");
+        System.err.println("  -Xmaxinlinedfastates m  max DFA states before table used rather than inlining      [" + CodeGenerator.MADSI_DEFAULT +"]");
+        System.err.println("  -Xmaxswitchcaselabels m don't generate switch() statements for dfas bigger  than m [" + CodeGenerator.MSCL_DEFAULT +"]");
+		System.err.println("  -Xminswitchalts m       don't generate switch() statements for dfas smaller than m [" + CodeGenerator.MSA_DEFAULT + "]");
+		System.err.println("  -Xsavelexer             don't delete temporary lexers generated from combined grammars");
+    }
+
+    /**
+     * Set the threshold of case labels beyond which ANTLR will not instruct the target template
+     * to generate switch() { case xxx: ...
+     *
+     * @param maxSwitchCaseLabels Maximum number of case lables that ANTLR should allow the target code
+     */
+    public void setMaxSwitchCaseLabels(int maxSwitchCaseLabels) {
+        CodeGenerator.MAX_SWITCH_CASE_LABELS = maxSwitchCaseLabels;
+    }
+
+    /**
+     * Set the threshold of the number alts, below which ANTLR will not instruct the target
+     * template to use a switch statement.
+     *
+     * @param minSwitchAlts the minimum number of alts required to use a switch staement
+     */
+    public void setMinSwitchAlts(int minSwitchAlts) {
+        CodeGenerator.MIN_SWITCH_ALTS = minSwitchAlts;
+    }
+
+    /**
+     * Set the location (base directory) where output files should be produced
+     * by the ANTLR tool.
+     * @param outputDirectory
+     */
+    public void setOutputDirectory(String outputDirectory) {
+        haveOutputDir = true;
+        this.outputDirectory = outputDirectory;
+    }
+
+    /**
+     * Used by build tools to force the output files to always be
+     * relative to the base output directory, even though the tool
+     * had to set the output directory to an absolute path as it
+     * cannot rely on the workign directory like command line invocation
+     * can.
+     *
+     * @param forceRelativeOutput true if output files hould always be relative to base output directory
+     */
+    public void setForceRelativeOutput(boolean forceRelativeOutput) {
+        this.forceRelativeOutput = forceRelativeOutput;
+    }
+
+    /**
+     * Set the base location of input files. Normally (when the tool is
+     * invoked from the command line), the inputDirectory is not set, but
+     * for build tools such as Maven, we need to be able to locate the input
+     * files relative to the base, as the working directory could be anywhere and
+     * changing workig directories is not a valid concept for JVMs because of threading and
+     * so on. Setting the directory just means that the getFileDirectory() method will
+     * try to open files relative to this input directory.
+     *
+     * @param inputDirectory Input source base directory
+     */
+    public void setInputDirectory(String inputDirectory) {
+        this.inputDirectory = inputDirectory;
+        haveInputDir = true;
+    }
+
+    /** This method is used by all code generators to create new output
+     *  files. If the outputDir set by -o is not present it will be created.
+     *  The final filename is sensitive to the output directory and
+     *  the directory where the grammar file was found.  If -o is /tmp
+     *  and the original grammar file was foo/t.g then output files
+     *  go in /tmp/foo.
+     *
+     *  The output dir -o spec takes precedence if it's absolute.
+     *  E.g., if the grammar file dir is absolute the output dir is given
+     *  precendence. "-o /tmp /usr/lib/t.g" results in "/tmp/T.java" as
+     *  output (assuming t.g holds T.java).
+     *
+     *  If no -o is specified, then just write to the directory where the
+     *  grammar file was found.
+     *
+     *  If outputDirectory==null then write a String.
+     */
+    public Writer getOutputFile(Grammar g, String fileName) throws IOException {
+        if (getOutputDirectory() == null) {
+            return new StringWriter();
+        }
+        // output directory is a function of where the grammar file lives
+        // for subdir/T.g, you get subdir here.  Well, depends on -o etc...
+        // But, if this is a .tokens file, then we force the output to
+        // be the base output directory (or current directory if there is not a -o)
+        //
+        File outputDir;
+        if (fileName.endsWith(CodeGenerator.VOCAB_FILE_EXTENSION)) {
+            if (haveOutputDir) {
+                outputDir = new File(getOutputDirectory());
+            }
+            else {
+                outputDir = new File(".");
+            }
+        }
+        else {
+            outputDir = getOutputDirectory(g.getFileName());
+        }
+        File outputFile = new File(outputDir, fileName);
+
+        if (!outputDir.exists()) {
+            outputDir.mkdirs();
+        }
+        FileWriter fw = new FileWriter(outputFile);
+        return new BufferedWriter(fw);
+    }
+
+    /**
+     * Return the location where ANTLR will generate output files for a given file. This is a
+     * base directory and output files will be relative to here in some cases
+     * such as when -o option is used and input files are given relative
+     * to the input directory.
+     *
+     * @param fileNameWithPath path to input source
+     */
+    public File getOutputDirectory(String fileNameWithPath) {
+
+        File outputDir;
+        String fileDirectory;
+
+        // Some files are given to us without a PATH but should should
+        // still be written to the output directory in the relative path of
+        // the output directory. The file directory is either the set of sub directories
+        // or just or the relative path recorded for the parent grammar. This means
+        // that when we write the tokens files, or the .java files for imported grammars
+        // taht we will write them in the correct place.
+        //
+        if (fileNameWithPath.lastIndexOf(File.separatorChar) == -1) {
+
+            // No path is included in the file name, so make the file
+            // directory the same as the parent grammar (which might sitll be just ""
+            // but when it is not, we will write the file in the correct place.
+            //
+            fileDirectory = grammarOutputDirectory;
+
+        }
+        else {
+            fileDirectory = fileNameWithPath.substring(0, fileNameWithPath.lastIndexOf(File.separatorChar));
+        }
+        if (haveOutputDir) {
+            // -o /tmp /var/lib/t.g => /tmp/T.java
+            // -o subdir/output /usr/lib/t.g => subdir/output/T.java
+            // -o . /usr/lib/t.g => ./T.java
+            if ((fileDirectory != null && !forceRelativeOutput) &&
+                (new File(fileDirectory).isAbsolute() ||
+                 fileDirectory.startsWith("~")) || // isAbsolute doesn't count this :(
+                isForceAllFilesToOutputDir()) {
+                // somebody set the dir, it takes precendence; write new file there
+                outputDir = new File(getOutputDirectory());
+            }
+            else {
+                // -o /tmp subdir/t.g => /tmp/subdir/t.g
+                if (fileDirectory != null) {
+                    outputDir = new File(getOutputDirectory(), fileDirectory);
+                }
+                else {
+                    outputDir = new File(getOutputDirectory());
+                }
+            }
+        }
+        else {
+            // they didn't specify a -o dir so just write to location
+            // where grammar is, absolute or relative, this will only happen
+            // with command line invocation as build tools will always
+            // supply an output directory.
+            //
+            outputDir = new File(fileDirectory);
+        }
+        return outputDir;
+    }
+
+    /**
+     * Name a file from the -lib dir.  Imported grammars and .tokens files
+     *
+     * If we do not locate the file in the library directory, then we try
+     * the location of the originating grammar.
+     *
+     * @param fileName input name we are looking for
+     * @return Path to file that we think shuold be the import file
+     *
+     * @throws java.io.IOException
+     */
+    public String getLibraryFile(String fileName) throws IOException {
+
+        // First, see if we can find the file in the library directory
+        //
+        File f = new File(getLibraryDirectory() + File.separator + fileName);
+
+        if (f.exists()) {
+
+            // Found in the library directory
+            //
+            return f.getAbsolutePath();
+        }
+
+        // Need to assume it is in the same location as the input file. Note that
+        // this is only relevant for external build tools and when the input grammar
+        // was specified relative to the source directory (working directory if using
+        // the command line.
+        //
+        return parentGrammarDirectory + File.separator + fileName;
+    }
+
+    /** Return the directory containing the grammar file for this grammar.
+     *  normally this is a relative path from current directory.  People will
+     *  often do "java org.antlr.Tool grammars/*.g3"  So the file will be
+     *  "grammars/foo.g3" etc...  This method returns "grammars".
+     *
+     *  If we have been given a specific input directory as a base, then
+     *  we must find the directory relative to this directory, unless the
+     *  file name is given to us in absolute terms.
+     */
+    public String getFileDirectory(String fileName) {
+
+        File f;
+        if (haveInputDir && !fileName.startsWith(File.separator)) {
+            f = new File(inputDirectory, fileName);
+        }
+        else {
+            f = new File(fileName);
+        }
+        // And ask Java what the base directory of this location is
+        //
+        return f.getParent();
+    }
+
+    /** Return a File descriptor for vocab file.  Look in library or
+     *  in -o output path.  antlr -o foo T.g U.g where U needs T.tokens
+     *  won't work unless we look in foo too. If we do not find the
+     *  file in the lib directory then must assume that the .tokens file
+     *  is going to be generated as part of this build and we have defined
+     *  .tokens files so that they ALWAYS are generated in the base output
+     *  directory, which means the current directory for the command line tool if there
+     *  was no output directory specified.
+     */
+    public File getImportedVocabFile(String vocabName) {
+
+        File f = new File(getLibraryDirectory(),
+                          File.separator +
+                          vocabName +
+                          CodeGenerator.VOCAB_FILE_EXTENSION);
+        if (f.exists()) {
+            return f;
+        }
+
+        // We did not find the vocab file in the lib directory, so we need
+        // to look for it in the output directory which is where .tokens
+        // files are generated (in the base, not relative to the input
+        // location.)
+        //
+        if (haveOutputDir) {
+            f = new File(getOutputDirectory(), vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
+        }
+        else {
+            f = new File(vocabName + CodeGenerator.VOCAB_FILE_EXTENSION);
+        }
+        return f;
+    }
+
+    /** If the tool needs to panic/exit, how do we do that?
+     */
+    public void panic() {
+        throw new Error("ANTLR panic");
+    }
+
+    /** Return a time stamp string accurate to sec: yyyy-mm-dd hh:mm:ss
+     */
+    public static String getCurrentTimeStamp() {
+        GregorianCalendar calendar = new java.util.GregorianCalendar();
+        int y = calendar.get(Calendar.YEAR);
+        int m = calendar.get(Calendar.MONTH) + 1; // zero-based for months
+        int d = calendar.get(Calendar.DAY_OF_MONTH);
+        int h = calendar.get(Calendar.HOUR_OF_DAY);
+        int min = calendar.get(Calendar.MINUTE);
+        int sec = calendar.get(Calendar.SECOND);
+        String sy = String.valueOf(y);
+        String sm = m < 10 ? "0" + m : String.valueOf(m);
+        String sd = d < 10 ? "0" + d : String.valueOf(d);
+        String sh = h < 10 ? "0" + h : String.valueOf(h);
+        String smin = min < 10 ? "0" + min : String.valueOf(min);
+        String ssec = sec < 10 ? "0" + sec : String.valueOf(sec);
+        return new StringBuffer().append(sy).append("-").append(sm).append("-").append(sd).append(" ").append(sh).append(":").append(smin).append(":").append(ssec).toString();
+    }
+
+    /**
+     * Provide the List of all grammar file names that the ANTLR tool will
+     * process or has processed.
+     *
+     * @return the grammarFileNames
+     */
+    public List<String> getGrammarFileNames() {
+        return grammarFileNames;
+    }
+
+    /**
+     * Indicates whether ANTLR has gnerated or will generate a description of
+     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
+     *
+     * @return the generate_NFA_dot
+     */
+    public boolean isGenerate_NFA_dot() {
+        return generate_NFA_dot;
+    }
+
+    /**
+     * Indicates whether ANTLR has generated or will generate a description of
+     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
+     *
+     * @return the generate_DFA_dot
+     */
+    public boolean isGenerate_DFA_dot() {
+        return generate_DFA_dot;
+    }
+
+    /**
+     * Return the Path to the base output directory, where ANTLR
+     * will generate all the output files for the current language target as
+     * well as any ancillary files such as .tokens vocab files.
+     *
+     * @return the output Directory
+     */
+    public String getOutputDirectory() {
+        return outputDirectory;
+    }
+
+    /**
+     * Return the Path to the directory in which ANTLR will search for ancillary
+     * files such as .tokens vocab files and imported grammar files.
+     *
+     * @return the lib Directory
+     */
+    public String getLibraryDirectory() {
+        return libDirectory;
+    }
+
+    /**
+     * Indicate if ANTLR has generated, or will generate a debug version of the
+     * recognizer. Debug versions of a parser communicate with a debugger such
+     * as that contained in ANTLRWorks and at start up will 'hang' waiting for
+     * a connection on an IP port (49100 by default).
+     *
+     * @return the debug flag
+     */
+    public boolean isDebug() {
+        return debug;
+    }
+
+    /**
+     * Indicate whether ANTLR has generated, or will generate a version of the
+     * recognizer that prints trace messages on entry and exit of each rule.
+     *
+     * @return the trace flag
+     */
+    public boolean isTrace() {
+        return trace;
+    }
+
+    /**
+     * Indicates whether ANTLR has generated or will generate a version of the
+     * recognizer that gathers statistics about its execution, which it prints when
+     * it terminates.
+     *
+     * @return the profile
+     */
+    public boolean isProfile() {
+        return profile;
+    }
+
+    /**
+     * Indicates whether ANTLR has generated or will generate a report of various
+     * elements of the grammar analysis, once it it has finished analyzing a grammar
+     * file.
+     *
+     * @return the report flag
+     */
+    public boolean isReport() {
+        return report;
+    }
+
+    /**
+     * Indicates whether ANTLR has printed, or will print, a version of the input grammar
+     * file(s) that is stripped of any action code embedded within.
+     *
+     * @return the printGrammar flag
+     */
+    public boolean isPrintGrammar() {
+        return printGrammar;
+    }
+
+    /**
+     * Indicates whether ANTLR has supplied, or will supply, a list of all the things
+     * that the input grammar depends upon and all the things that will be generated
+     * when that grammar is successfully analyzed.
+     *
+     * @return the depend flag
+     */
+    public boolean isDepend() {
+        return depend;
+    }
+
+    /**
+     * Indicates whether ANTLR will force all files to the output directory, even
+     * if the input files have relative paths from the input directory.
+     *
+     * @return the forceAllFilesToOutputDir flag
+     */
+    public boolean isForceAllFilesToOutputDir() {
+        return forceAllFilesToOutputDir;
+    }
+
+    /**
+     * Indicates whether ANTLR will be verbose when analyzing grammar files, such as
+     * displaying the names of the files it is generating and similar information.
+     *
+     * @return the verbose flag
+     */
+    public boolean isVerbose() {
+        return verbose;
+    }
+
+    /**
+     * Provide the current setting of the conversion timeout on DFA creation.
+     *
+     * @return DFA creation timeout value in milliseconds
+     */
+    public int getConversionTimeout() {
+        return DFA.MAX_TIME_PER_DFA_CREATION;
+    }
+
+    /**
+     * Returns the current setting of the message format descriptor
+     * @return Current message format
+     */
+    public String getMessageFormat() {
+        return ErrorManager.getMessageFormat().toString();
+    }
+
+    /**
+     * Returns the number of errors that the analysis/processing threw up.
+     * @return Error count
+     */
+    public int getNumErrors() {
+        return ErrorManager.getNumErrors();
+    }
+
+    /**
+     * Indicate whether the tool will analyze the dependencies of the provided grammar
+     * file list and ensure that grammars with dependencies are built
+     * after any of the other gramamrs in the list that they are dependent on. Setting
+     * this option also has the side effect that any grammars that are includes for other
+     * grammars in the list are excluded from individual analysis, which allows the caller
+     * to invoke the tool via org.antlr.tool -make *.g and not worry about the inclusion
+     * of grammars that are just includes for other grammars or what order the grammars
+     * appear on the command line.
+     *
+     * This option was coded to make life easier for tool integration (such as Maven) but
+     * may also be useful at the command line.
+     *
+     * @return true if the tool is currently configured to analyze and sort grammar files.
+     */
+    public boolean getMake() {
+        return make;
+    }
+
+    /**
+     * Set the message format to one of ANTLR, gnu, vs2005
+     *
+     * @param format
+     */
+    public void setMessageFormat(String format) {
+        ErrorManager.setFormat(format);
+    }
+
+    /** Provide the List of all grammar file names that the ANTLR tool should process.
+     *
+     * @param grammarFileNames The list of grammar files to process
+     */
+    public void setGrammarFileNames(List<String> grammarFileNames) {
+        this.grammarFileNames = grammarFileNames;
+    }
+
+    public void addGrammarFile(String grammarFileName) {
+        if (!grammarFileNames.contains(grammarFileName)) {
+            grammarFileNames.add(grammarFileName);
+        }
+    }
+
+    /**
+     * Indicate whether ANTLR should generate a description of
+     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
+     *
+     * @param generate_NFA_dot True to generate dot descriptions
+     */
+    public void setGenerate_NFA_dot(boolean generate_NFA_dot) {
+        this.generate_NFA_dot = generate_NFA_dot;
+    }
+
+    /**
+     * Indicates whether ANTLR should generate a description of
+     * all the NFAs in <a href="http://www.graphviz.org">Dot format</a>
+     *
+     * @param generate_DFA_dot True to generate dot descriptions
+     */
+    public void setGenerate_DFA_dot(boolean generate_DFA_dot) {
+        this.generate_DFA_dot = generate_DFA_dot;
+    }
+
+    /**
+     * Set the Path to the directory in which ANTLR will search for ancillary
+     * files such as .tokens vocab files and imported grammar files.
+     *
+     * @param libDirectory the libDirectory to set
+     */
+    public void setLibDirectory(String libDirectory) {
+        this.libDirectory = libDirectory;
+    }
+
+    /**
+     * Indicate whether ANTLR should generate a debug version of the
+     * recognizer. Debug versions of a parser communicate with a debugger such
+     * as that contained in ANTLRWorks and at start up will 'hang' waiting for
+     * a connection on an IP port (49100 by default).
+     *
+     * @param debug true to generate a debug mode parser
+     */
+    public void setDebug(boolean debug) {
+        this.debug = debug;
+    }
+
+    /**
+     * Indicate whether ANTLR should generate a version of the
+     * recognizer that prints trace messages on entry and exit of each rule
+     *
+     * @param trace true to generate a tracing parser
+     */
+    public void setTrace(boolean trace) {
+        this.trace = trace;
+    }
+
+    /**
+     * Indicate whether ANTLR should generate a version of the
+     * recognizer that gathers statistics about its execution, which it prints when
+     * it terminates.
+     *
+     * @param profile true to generate a profiling parser
+     */
+    public void setProfile(boolean profile) {
+        this.profile = profile;
+    }
+
+    /**
+     * Indicate whether ANTLR should generate a report of various
+     * elements of the grammar analysis, once it it has finished analyzing a grammar
+     * file.
+     *
+     * @param report true to generate the analysis report
+     */
+    public void setReport(boolean report) {
+        this.report = report;
+    }
+
+    /**
+     * Indicate whether ANTLR should print a version of the input grammar
+     * file(s) that is stripped of any action code embedded within.
+     *
+     * @param printGrammar true to generate a stripped file
+     */
+    public void setPrintGrammar(boolean printGrammar) {
+        this.printGrammar = printGrammar;
+    }
+
+    /**
+     * Indicate whether ANTLR should supply a list of all the things
+     * that the input grammar depends upon and all the things that will be generated
+     * when that gramamr is successfully analyzed.
+     *
+     * @param depend true to get depends set rather than process the grammar
+     */
+    public void setDepend(boolean depend) {
+        this.depend = depend;
+    }
+
+    /**
+     * Indicates whether ANTLR will force all files to the output directory, even
+     * if the input files have relative paths from the input directory.
+     *
+     * @param forceAllFilesToOutputDir true to force files to output directory
+     */
+    public void setForceAllFilesToOutputDir(boolean forceAllFilesToOutputDir) {
+        this.forceAllFilesToOutputDir = forceAllFilesToOutputDir;
+    }
+
+    /**
+     * Indicate whether ANTLR should be verbose when analyzing grammar files, such as
+     * displaying the names of the files it is generating and similar information.
+     *
+     * @param verbose true to be verbose
+     */
+    public void setVerbose(boolean verbose) {
+        this.verbose = verbose;
+    }
+
+    /**
+     * Indicate whether the tool should analyze the dependencies of the provided grammar
+     * file list and ensure that the grammars with dependencies are built
+     * after any of the other gramamrs in the list that they are dependent on. Setting
+     * this option also has the side effect that any grammars that are includes for other
+     * grammars in the list are excluded from individual analysis, which allows the caller
+     * to invoke the tool via org.antlr.tool -make *.g and not worry about the inclusion
+     * of grammars that are just includes for other grammars or what order the grammars
+     * appear on the command line.
+     *
+     * This option was coded to make life easier for tool integration (such as Maven) but
+     * may also be useful at the command line.
+     *
+     * @param make
+     */
+    public void setMake(boolean make) {
+        this.make = make;
+    }
+
+}
diff --git a/tool/src/main/java/org/antlr/analysis/ActionLabel.java b/tool/src/main/java/org/antlr/analysis/ActionLabel.java
new file mode 100644
index 0000000..884d8d5
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/ActionLabel.java
@@ -0,0 +1,60 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarAST;
+
+public class ActionLabel extends Label {
+	public GrammarAST actionAST;
+	
+	public ActionLabel(GrammarAST actionAST) {
+		super(ACTION);
+		this.actionAST = actionAST;
+	}
+
+	@Override
+	public boolean isEpsilon() {
+		return true; // we are to be ignored by analysis 'cept for predicates
+	}
+
+	@Override
+	public boolean isAction() {
+		return true;
+	}
+
+	@Override
+	public String toString() {
+		return "{"+actionAST+"}";
+	}
+
+	@Override
+	public String toString(Grammar g) {
+		return toString();
+	}
+}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/AnalysisRecursionOverflowException.java b/tool/src/main/java/org/antlr/analysis/AnalysisRecursionOverflowException.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/analysis/AnalysisRecursionOverflowException.java
rename to tool/src/main/java/org/antlr/analysis/AnalysisRecursionOverflowException.java
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/AnalysisTimeoutException.java b/tool/src/main/java/org/antlr/analysis/AnalysisTimeoutException.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/analysis/AnalysisTimeoutException.java
rename to tool/src/main/java/org/antlr/analysis/AnalysisTimeoutException.java
diff --git a/tool/src/main/java/org/antlr/analysis/DFA.java b/tool/src/main/java/org/antlr/analysis/DFA.java
new file mode 100644
index 0000000..5cc1556
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/DFA.java
@@ -0,0 +1,1167 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.misc.IntSet;
+import org.antlr.misc.IntervalSet;
+import org.antlr.misc.Utils;
+import org.antlr.runtime.IntStream;
+import org.stringtemplate.v4.ST;
+import org.antlr.tool.*;
+
+import java.util.*;
+
+/** A DFA (converted from a grammar's NFA).
+ *  DFAs are used as prediction machine for alternative blocks in all kinds
+ *  of recognizers (lexers, parsers, tree walkers).
+ */
+public class DFA {
+	public static final int REACHABLE_UNKNOWN = -2;
+	public static final int REACHABLE_BUSY = -1; // in process of computing
+	public static final int REACHABLE_NO = 0;
+	public static final int REACHABLE_YES = 1;
+
+	public static final int CYCLIC_UNKNOWN = -2;
+	public static final int CYCLIC_BUSY = -1; // in process of computing
+	public static final int CYCLIC_DONE = 0;
+	
+	/** Prevent explosion of DFA states during conversion. The max number
+	 *  of states per alt in a single decision's DFA.
+	public static final int MAX_STATES_PER_ALT_IN_DFA = 450;
+	 */
+
+	/** Set to 0 to not terminate early (time in ms) */
+	public static int MAX_TIME_PER_DFA_CREATION = 1*1000;
+
+	/** How many edges can each DFA state have before a "special" state
+	 *  is created that uses IF expressions instead of a table?
+	 */
+	public static int MAX_STATE_TRANSITIONS_FOR_TABLE = 65534;
+
+	/** What's the start state for this DFA? */
+    public DFAState startState;
+
+	/** This DFA is being built for which decision? */
+	public int decisionNumber = 0;
+
+    /** From what NFAState did we create the DFA? */
+    public NFAState decisionNFAStartState;
+
+	/** The printable grammar fragment associated with this DFA */
+	public String description;
+
+	/** A set of all uniquely-numbered DFA states.  Maps hash of DFAState
+     *  to the actual DFAState object.  We use this to detect
+     *  existing DFA states.  Map&lt;DFAState,DFAState&gt;.  Use Map so
+	 *  we can get old state back (Set only allows you to see if it's there).
+	 *  Not used during fixed k lookahead as it's a waste to fill it with
+	 *  a dup of states array.
+     */
+    protected Map<DFAState, DFAState> uniqueStates = new HashMap<DFAState, DFAState>();
+
+	/** Maps the state number to the actual DFAState.  Use a Vector as it
+	 *  grows automatically when I set the ith element.  This contains all
+	 *  states, but the states are not unique.  s3 might be same as s1 so
+	 *  s3 &rarr; s1 in this table.  This is how cycles occur.  If fixed k,
+	 *  then these states will all be unique as states[i] always points
+	 *  at state i when no cycles exist.
+	 *
+	 *  This is managed in parallel with uniqueStates and simply provides
+	 *  a way to go from state number to DFAState rather than via a
+	 *  hash lookup.
+	 */
+	protected Vector<DFAState> states = new Vector<DFAState>();
+
+	/** Unique state numbers per DFA */
+	protected int stateCounter = 0;
+
+	/** count only new states not states that were rejected as already present */
+	protected int numberOfStates = 0;
+
+	/** User specified max fixed lookahead.  If 0, nothing specified.  -1
+	 *  implies we have not looked at the options table yet to set k.
+	 */
+	protected int user_k = -1;
+
+	/** While building the DFA, track max lookahead depth if not cyclic */
+	protected int max_k = -1;
+
+    /** Is this DFA reduced?  I.e., can all states lead to an accept state? */
+    protected boolean reduced = true;
+
+    /** Are there any loops in this DFA?
+	 *  Computed by doesStateReachAcceptState()
+	 */
+    protected boolean cyclic = false;
+
+	/** Track whether this DFA has at least one sem/syn pred encountered
+	 *  during a closure operation.  This is useful for deciding whether
+	 *  to retry a non-LL(*) with k=1.  If no pred, it will not work w/o
+	 *  a pred so don't bother.  It would just give another error message.
+	 */
+	public boolean predicateVisible = false;
+
+	public boolean hasPredicateBlockedByAction = false;
+
+	/** Each alt in an NFA derived from a grammar must have a DFA state that
+     *  predicts it lest the parser not know what to do.  Nondeterminisms can
+     *  lead to this situation (assuming no semantic predicates can resolve
+     *  the problem) and when for some reason, I cannot compute the lookahead
+     *  (which might arise from an error in the algorithm or from
+     *  left-recursion etc...).  This list starts out with all alts contained
+     *  and then in method doesStateReachAcceptState() I remove the alts I
+     *  know to be uniquely predicted.
+     */
+    protected List<Integer> unreachableAlts;
+
+	protected int nAlts = 0;
+
+	/** We only want one accept state per predicted alt; track here */
+	protected DFAState[] altToAcceptState;
+
+	/** Track whether an alt discovers recursion for each alt during
+	 *  NFA to DFA conversion; &gt;1 alt with recursion implies nonregular.
+	 */
+	public IntSet recursiveAltSet = new IntervalSet();
+
+	/** Which NFA are we converting (well, which piece of the NFA)? */
+    public NFA nfa;
+
+	protected NFAToDFAConverter nfaConverter;
+
+	/** This probe tells you a lot about a decision and is useful even
+	 *  when there is no error such as when a syntactic nondeterminism
+	 *  is solved via semantic predicates.  Perhaps a GUI would want
+	 *  the ability to show that.
+	 */
+	public DecisionProbe probe = new DecisionProbe(this);
+
+	/** Track absolute time of the conversion so we can have a failsafe:
+	 *  if it takes too long, then terminate.  Assume bugs are in the
+	 *  analysis engine.
+	 */
+	//protected long conversionStartTime;
+
+	/** Map an edge transition table to a unique set number; ordered so
+	 *  we can push into the output template as an ordered list of sets
+	 *  and then ref them from within the transition[][] table.  Like this
+	 *  for C# target:
+	 *     public static readonly DFA30_transition0 =
+	 *     	new short[] { 46, 46, -1, 46, 46, -1, -1, -1, -1, -1, -1, -1,...};
+	 *         public static readonly DFA30_transition1 =
+	 *     	new short[] { 21 };
+	 *      public static readonly short[][] DFA30_transition = {
+	 *     	  DFA30_transition0,
+	 *     	  DFA30_transition0,
+	 *     	  DFA30_transition1,
+	 *     	  ...
+	 *      };
+	 */
+	public Map<List<Integer>, Integer> edgeTransitionClassMap = new LinkedHashMap<List<Integer>, Integer>();
+
+	/** The unique edge transition class number; every time we see a new
+	 *  set of edges emanating from a state, we number it so we can reuse
+	 *  if it's every seen again for another state.  For Java grammar,
+	 *  some of the big edge transition tables are seen about 57 times.
+	 */
+	protected int edgeTransitionClass =0;
+
+	/* This DFA can be converted to a transition[state][char] table and
+	 * the following tables are filled by createStateTables upon request.
+	 * These are injected into the templates for code generation.
+	 * See March 25, 2006 entry for description:
+	 *   http://www.antlr.org/blog/antlr3/codegen.tml
+	 * Often using Vector as can't set ith position in a List and have
+	 * it extend list size; bizarre.
+	 */
+
+	/** List of special DFAState objects */
+	public List<DFAState> specialStates;
+	/** List of ST for special states. */
+	public List<ST> specialStateSTs;
+	public Vector<Integer> accept;
+	public Vector<Integer> eot;
+	public Vector<Integer> eof;
+	public Vector<Integer> min;
+	public Vector<Integer> max;
+	public Vector<Integer> special;
+	public Vector<Vector<Integer>> transition;
+	/** just the Vector&lt;Integer&gt; indicating which unique edge table is at
+	 *  position i.
+	 */
+	public Vector<Integer> transitionEdgeTables; // not used by java yet
+	protected int uniqueCompressedSpecialStateNum = 0;
+
+	/** Which generator to use if we're building state tables */
+	protected CodeGenerator generator = null;
+
+	protected DFA() {}
+
+	public DFA(int decisionNumber, NFAState decisionStartState) {
+		this.decisionNumber = decisionNumber;
+        this.decisionNFAStartState = decisionStartState;
+        nfa = decisionStartState.nfa;
+        nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(decisionStartState);
+        //setOptions( nfa.grammar.getDecisionOptions(getDecisionNumber()) );
+        initAltRelatedInfo();
+
+		//long start = System.currentTimeMillis();
+        nfaConverter = new NFAToDFAConverter(this);
+		try {
+			nfaConverter.convert();
+
+			// figure out if there are problems with decision
+			verify();
+
+			if ( !probe.isDeterministic() || probe.analysisOverflowed() ) {
+				probe.issueWarnings();
+			}
+
+			// must be after verify as it computes cyclic, needed by this routine
+			// should be after warnings because early termination or something
+			// will not allow the reset to operate properly in some cases.
+			resetStateNumbersToBeContiguous();
+
+			//long stop = System.currentTimeMillis();
+			//System.out.println("verify cost: "+(int)(stop-start)+" ms");
+		}
+//		catch (AnalysisTimeoutException at) {
+//			probe.reportAnalysisTimeout();
+//			if ( !okToRetryDFAWithK1() ) {
+//				probe.issueWarnings();
+//			}
+//		}
+		catch (NonLLStarDecisionException nonLL) {
+			probe.reportNonLLStarDecision(this);
+			// >1 alt recurses, k=* and no auto backtrack nor manual sem/syn
+			if ( !okToRetryDFAWithK1() ) {
+				probe.issueWarnings();
+			}
+		}
+    }
+
+	/** Walk all states and reset their numbers to be a contiguous sequence
+	 *  of integers starting from 0.  Only cyclic DFA can have unused positions
+	 *  in states list.  State i might be identical to a previous state j and
+	 *  will result in states[i] == states[j].  We don't want to waste a state
+	 *  number on this.  Useful mostly for code generation in tables.
+	 *
+	 *  At the start of this routine, states[i].stateNumber &lt;= i by definition.
+	 *  If states[50].stateNumber is 50 then a cycle during conversion may
+	 *  try to add state 103, but we find that an identical DFA state, named
+	 *  50, already exists, hence, states[103]==states[50] and both have
+	 *  stateNumber 50 as they point at same object.  Afterwards, the set
+	 *  of state numbers from all states should represent a contiguous range
+	 *  from 0..n-1 where n is the number of unique states.
+	 */
+	public void resetStateNumbersToBeContiguous() {
+		if ( getUserMaxLookahead()>0 ) {
+			// all numbers are unique already; no states are thrown out.
+			return;
+		}
+
+        // walk list of DFAState objects by state number,
+		// setting state numbers to 0..n-1
+		int snum=0;
+		for (int i = 0; i <= getMaxStateNumber(); i++) {
+			DFAState s = getState(i);
+            // some states are unused after creation most commonly due to cycles
+            // or conflict resolution.
+            if ( s==null ) {
+                continue;
+            }
+			// state i is mapped to DFAState with state number set to i originally
+			// so if it's less than i, then we renumbered it already; that
+			// happens when states have been merged or cycles occurred I think.
+			// states[50] will point to DFAState with s50 in it but
+			// states[103] might also point at this same DFAState.  Since
+			// 50 < 103 then it's already been renumbered as it points downwards.
+			boolean alreadyRenumbered = s.stateNumber<i;
+			if ( !alreadyRenumbered ) {
+				// state i is a valid state, reset it's state number
+				s.stateNumber = snum; // rewrite state numbers to be 0..n-1
+				snum++;
+			}
+		}
+        if ( snum!=getNumberOfStates() ) {
+			ErrorManager.internalError("DFA "+decisionNumber+": "+
+				decisionNFAStartState.getDescription()+" num unique states "+getNumberOfStates()+
+				"!= num renumbered states "+snum);
+		}
+	}
+
+	// JAVA-SPECIFIC Accessors!!!!!  It is so impossible to get arrays
+	// or even consistently formatted strings acceptable to java that
+	// I am forced to build the individual char elements here
+
+	public List<? extends String> getJavaCompressedAccept() { return getRunLengthEncoding(accept); }
+	public List<? extends String> getJavaCompressedEOT() { return getRunLengthEncoding(eot); }
+	public List<? extends String> getJavaCompressedEOF() { return getRunLengthEncoding(eof); }
+	public List<? extends String> getJavaCompressedMin() { return getRunLengthEncoding(min); }
+	public List<? extends String> getJavaCompressedMax() { return getRunLengthEncoding(max); }
+	public List<? extends String> getJavaCompressedSpecial() { return getRunLengthEncoding(special); }
+	public List<List<? extends String>> getJavaCompressedTransition() {
+		if ( transition==null || transition.isEmpty() ) {
+			return null;
+		}
+		List<List<? extends String>> encoded = new ArrayList<List<? extends String>>(transition.size());
+		// walk Vector<Vector<FormattedInteger>> which is the transition[][] table
+		for (int i = 0; i < transition.size(); i++) {
+			Vector<Integer> transitionsForState = transition.elementAt(i);
+			encoded.add(getRunLengthEncoding(transitionsForState));
+		}
+		return encoded;
+	}
+
+	/** Compress the incoming data list so that runs of same number are
+	 *  encoded as number,value pair sequences.  3 -1 -1 -1 28 is encoded
+	 *  as 1 3 3 -1 1 28.  I am pretty sure this is the lossless compression
+	 *  that GIF files use.  Transition tables are heavily compressed by
+	 *  this technique.  I got the idea from JFlex http://jflex.de/
+	 *
+	 *  Return List&lt;String&gt; where each string is either \xyz for 8bit char
+	 *  and \uFFFF for 16bit.  Hideous and specific to Java, but it is the
+	 *  only target bad enough to need it.
+	 */
+	public List<? extends String> getRunLengthEncoding(List<Integer> data) {
+		if ( data==null || data.isEmpty() ) {
+			// for states with no transitions we want an empty string ""
+			// to hold its place in the transitions array.
+			List<String> empty = new ArrayList<String>();
+			empty.add("");
+			return empty;
+		}
+		int size = Math.max(2,data.size()/2);
+		List<String> encoded = new ArrayList<String>(size); // guess at size
+		// scan values looking for runs
+		int i = 0;
+		Integer emptyValue = Utils.integer(-1);
+		while ( i < data.size() ) {
+			Integer I = data.get(i);
+			if ( I==null ) {
+				I = emptyValue;
+			}
+			// count how many v there are?
+			int n = 0;
+			for (int j = i; j < data.size(); j++) {
+				Integer v = data.get(j);
+				if ( v==null ) {
+					v = emptyValue;
+				}
+				if ( I.equals(v) ) {
+					n++;
+				}
+				else {
+					break;
+				}
+			}
+			encoded.add(generator.target.encodeIntAsCharEscape((char)n));
+			encoded.add(generator.target.encodeIntAsCharEscape((char)I.intValue()));
+			i+=n;
+		}
+		return encoded;
+	}
+
+	public void createStateTables(CodeGenerator generator) {
+		//System.out.println("createTables:\n"+this);
+		this.generator = generator;
+		description = getNFADecisionStartState().getDescription();
+		description =
+			generator.target.getTargetStringLiteralFromString(description);
+
+		// create all the tables
+		special = new Vector<Integer>(this.getNumberOfStates()); // Vector<short>
+		special.setSize(this.getNumberOfStates());
+		specialStates = new ArrayList<DFAState>();				// List<DFAState>
+		specialStateSTs = new ArrayList<ST>();				// List<ST>
+		accept = new Vector<Integer>(this.getNumberOfStates()); // Vector<int>
+		accept.setSize(this.getNumberOfStates());
+		eot = new Vector<Integer>(this.getNumberOfStates()); // Vector<int>
+		eot.setSize(this.getNumberOfStates());
+		eof = new Vector<Integer>(this.getNumberOfStates()); // Vector<int>
+		eof.setSize(this.getNumberOfStates());
+		min = new Vector<Integer>(this.getNumberOfStates()); // Vector<int>
+		min.setSize(this.getNumberOfStates());
+		max = new Vector<Integer>(this.getNumberOfStates()); // Vector<int>
+		max.setSize(this.getNumberOfStates());
+		transition = new Vector<Vector<Integer>>(this.getNumberOfStates()); // Vector<Vector<int>>
+		transition.setSize(this.getNumberOfStates());
+		transitionEdgeTables = new Vector<Integer>(this.getNumberOfStates()); // Vector<int>
+		transitionEdgeTables.setSize(this.getNumberOfStates());
+
+		// for each state in the DFA, fill relevant tables.
+		Iterator<DFAState> it;
+		if ( getUserMaxLookahead()>0 ) {
+			it = states.iterator();
+		}
+		else {
+			it = getUniqueStates().values().iterator();
+		}
+		while ( it.hasNext() ) {
+			DFAState s = it.next();
+			if ( s==null ) {
+				// ignore null states; some acylic DFA see this condition
+				// when inlining DFA (due to lacking of exit branch pruning?)
+				continue;
+			}
+			if ( s.isAcceptState() ) {
+				// can't compute min,max,special,transition on accepts
+				accept.set(s.stateNumber,
+						   Utils.integer(s.getUniquelyPredictedAlt()));
+			}
+			else {
+				createMinMaxTables(s);
+				createTransitionTableEntryForState(s);
+				createSpecialTable(s);
+				createEOTAndEOFTables(s);
+			}
+		}
+
+		// now that we have computed list of specialStates, gen code for 'em
+		for (int i = 0; i < specialStates.size(); i++) {
+			DFAState ss = specialStates.get(i);
+			ST stateST =
+				generator.generateSpecialState(ss);
+			specialStateSTs.add(stateST);
+		}
+
+		// check that the tables are not messed up by encode/decode
+		/*
+		testEncodeDecode(min);
+		testEncodeDecode(max);
+		testEncodeDecode(accept);
+		testEncodeDecode(special);
+		System.out.println("min="+min);
+		System.out.println("max="+max);
+		System.out.println("eot="+eot);
+		System.out.println("eof="+eof);
+		System.out.println("accept="+accept);
+		System.out.println("special="+special);
+		System.out.println("transition="+transition);
+		*/
+	}
+
+	/*
+	private void testEncodeDecode(List data) {
+		System.out.println("data="+data);
+		List encoded = getRunLengthEncoding(data);
+		StringBuffer buf = new StringBuffer();
+		for (int i = 0; i < encoded.size(); i++) {
+			String I = (String)encoded.get(i);
+			int v = 0;
+			if ( I.startsWith("\\u") ) {
+				v = Integer.parseInt(I.substring(2,I.length()), 16);
+			}
+			else {
+				v = Integer.parseInt(I.substring(1,I.length()), 8);
+			}
+			buf.append((char)v);
+		}
+		String encodedS = buf.toString();
+		short[] decoded = org.antlr.runtime.DFA.unpackEncodedString(encodedS);
+		//System.out.println("decoded:");
+		for (int i = 0; i < decoded.length; i++) {
+			short x = decoded[i];
+			if ( x!=((Integer)data.get(i)).intValue() ) {
+				System.err.println("problem with encoding");
+			}
+			//System.out.print(", "+x);
+		}
+		//System.out.println();
+	}
+	*/
+
+	protected void createMinMaxTables(DFAState s) {
+		int smin = Label.MAX_CHAR_VALUE + 1;
+		int smax = Label.MIN_ATOM_VALUE - 1;
+		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
+			Transition edge = s.transition(j);
+			Label label = edge.label;
+			if ( label.isAtom() ) {
+				if ( label.getAtom()>=Label.MIN_CHAR_VALUE ) {
+					if ( label.getAtom()<smin ) {
+						smin = label.getAtom();
+					}
+					if ( label.getAtom()>smax ) {
+						smax = label.getAtom();
+					}
+				}
+			}
+			else if ( label.isSet() ) {
+				IntervalSet labels = (IntervalSet)label.getSet();
+				int lmin = labels.getMinElement();
+				// if valid char (don't do EOF) and less than current min
+				if ( lmin<smin && lmin>=Label.MIN_CHAR_VALUE ) {
+					smin = labels.getMinElement();
+				}
+				if ( labels.getMaxElement()>smax ) {
+					smax = labels.getMaxElement();
+				}
+			}
+		}
+
+		if ( smax<0 ) {
+			// must be predicates or pure EOT transition; just zero out min, max
+			smin = Label.MIN_CHAR_VALUE;
+			smax = Label.MIN_CHAR_VALUE;
+		}
+
+		min.set(s.stateNumber, Utils.integer((char)smin));
+		max.set(s.stateNumber, Utils.integer((char)smax));
+
+		if ( smax<0 || smin>Label.MAX_CHAR_VALUE || smin<0 ) {
+			ErrorManager.internalError("messed up: min="+min+", max="+max);
+		}
+	}
+
+	protected void createTransitionTableEntryForState(DFAState s) {
+		/*
+		System.out.println("createTransitionTableEntryForState s"+s.stateNumber+
+			" dec "+s.dfa.decisionNumber+" cyclic="+s.dfa.isCyclic());
+			*/
+		int smax = max.get(s.stateNumber);
+		int smin = min.get(s.stateNumber);
+
+		Vector<Integer> stateTransitions = new Vector<Integer>(smax-smin+1);
+		stateTransitions.setSize(smax-smin+1);
+		transition.set(s.stateNumber, stateTransitions);
+		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
+			Transition edge = s.transition(j);
+			Label label = edge.label;
+			if ( label.isAtom() && label.getAtom()>=Label.MIN_CHAR_VALUE ) {
+				int labelIndex = label.getAtom()-smin; // offset from 0
+				stateTransitions.set(labelIndex,
+									 Utils.integer(edge.target.stateNumber));
+			}
+			else if ( label.isSet() ) {
+				IntervalSet labels = (IntervalSet)label.getSet();
+				int[] atoms = labels.toArray();
+				for (int a = 0; a < atoms.length; a++) {
+					// set the transition if the label is valid (don't do EOF)
+					if ( atoms[a]>=Label.MIN_CHAR_VALUE ) {
+						int labelIndex = atoms[a]-smin; // offset from 0
+						stateTransitions.set(labelIndex,
+											 Utils.integer(edge.target.stateNumber));
+					}
+				}
+			}
+		}
+		// track unique state transition tables so we can reuse
+		Integer edgeClass = edgeTransitionClassMap.get(stateTransitions);
+		if ( edgeClass!=null ) {
+			//System.out.println("we've seen this array before; size="+stateTransitions.size());
+			transitionEdgeTables.set(s.stateNumber, edgeClass);
+		}
+		else {
+			edgeClass = Utils.integer(edgeTransitionClass);
+			transitionEdgeTables.set(s.stateNumber, edgeClass);
+			edgeTransitionClassMap.put(stateTransitions, edgeClass);
+			edgeTransitionClass++;
+		}
+	}
+
+	/** Set up the EOT and EOF tables; we cannot put -1 min/max values so
+	 *  we need another way to test that in the DFA transition function.
+	 */
+	protected void createEOTAndEOFTables(DFAState s) {
+		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
+			Transition edge = s.transition(j);
+			Label label = edge.label;
+			if ( label.isAtom() ) {
+				if ( label.getAtom()==Label.EOT ) {
+					// eot[s] points to accept state
+					eot.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
+				}
+				else if ( label.getAtom()==Label.EOF ) {
+					// eof[s] points to accept state
+					eof.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
+				}
+			}
+			else if ( label.isSet() ) {
+				IntervalSet labels = (IntervalSet)label.getSet();
+				int[] atoms = labels.toArray();
+				for (int a = 0; a < atoms.length; a++) {
+					if ( atoms[a]==Label.EOT ) {
+						// eot[s] points to accept state
+						eot.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
+					}
+					else if ( atoms[a]==Label.EOF ) {
+						eof.set(s.stateNumber, Utils.integer(edge.target.stateNumber));
+					}
+				}
+			}
+		}
+	}
+
+	protected void createSpecialTable(DFAState s) {
+		// number all special states from 0...n-1 instead of their usual numbers
+		boolean hasSemPred = false;
+
+		// TODO this code is very similar to canGenerateSwitch.  Refactor to share
+		for (int j = 0; j < s.getNumberOfTransitions(); j++) {
+			Transition edge = s.transition(j);
+			Label label = edge.label;
+			// can't do a switch if the edges have preds or are going to
+			// require gated predicates
+			if ( label.isSemanticPredicate() ||
+				 ((DFAState)edge.target).getGatedPredicatesInNFAConfigurations()!=null)
+			{
+				hasSemPred = true;
+				break;
+			}
+		}
+		// if has pred or too big for table, make it special
+		int smax = max.get(s.stateNumber);
+		int smin = min.get(s.stateNumber);
+		if ( hasSemPred || smax-smin>MAX_STATE_TRANSITIONS_FOR_TABLE ) {
+			special.set(s.stateNumber,
+						Utils.integer(uniqueCompressedSpecialStateNum));
+			uniqueCompressedSpecialStateNum++;
+			specialStates.add(s);
+		}
+		else {
+			special.set(s.stateNumber, Utils.integer(-1)); // not special
+		}
+	}
+
+	public int predict(IntStream input) {
+		Interpreter interp = new Interpreter(nfa.grammar, input);
+		return interp.predict(this);
+	}
+
+	/** Add a new DFA state to this DFA if not already present.
+     *  To force an acyclic, fixed maximum depth DFA, just always
+	 *  return the incoming state.  By not reusing old states,
+	 *  no cycles can be created.  If we're doing fixed k lookahead
+	 *  don't updated uniqueStates, just return incoming state, which
+	 *  indicates it's a new state.
+     */
+    protected DFAState addState(DFAState d) {
+		if ( getUserMaxLookahead()>0 ) {
+			return d;
+		}
+		// does a DFA state exist already with everything the same
+		// except its state number?
+		DFAState existing = uniqueStates.get(d);
+		if ( existing != null ) {
+            /*
+            System.out.println("state "+d.stateNumber+" exists as state "+
+                existing.stateNumber);
+                */
+            // already there...get the existing DFA state
+			return existing;
+		}
+
+		// if not there, then add new state.
+		uniqueStates.put(d,d);
+        numberOfStates++;
+		return d;
+	}
+
+	public void removeState(DFAState d) {
+		DFAState it = uniqueStates.remove(d);
+		if ( it!=null ) {
+			numberOfStates--;
+		}
+	}
+
+	public Map<DFAState, DFAState> getUniqueStates() {
+		return uniqueStates;
+	}
+
+	/** What is the max state number ever created?  This may be beyond
+	 *  getNumberOfStates().
+	 */
+	public int getMaxStateNumber() {
+		return states.size()-1;
+	}
+
+	public DFAState getState(int stateNumber) {
+		return states.get(stateNumber);
+	}
+
+	public void setState(int stateNumber, DFAState d) {
+		states.set(stateNumber, d);
+	}
+
+	/** Is the DFA reduced?  I.e., does every state have a path to an accept
+     *  state?  If not, don't delete as we need to generate an error indicating
+     *  which paths are "dead ends".  Also tracks list of alts with no accept
+     *  state in the DFA.  Must call verify() first before this makes sense.
+     */
+    public boolean isReduced() {
+        return reduced;
+    }
+
+    /** Is this DFA cyclic?  That is, are there any loops?  If not, then
+     *  the DFA is essentially an LL(k) predictor for some fixed, max k value.
+     *  We can build a series of nested IF statements to match this.  In the
+     *  presence of cycles, we need to build a general DFA and interpret it
+     *  to distinguish between alternatives.
+     */
+    public boolean isCyclic() {
+        return cyclic && getUserMaxLookahead()==0;
+    }
+
+	public boolean isClassicDFA() {
+		return !isCyclic() &&
+			   !nfa.grammar.decisionsWhoseDFAsUsesSemPreds.contains(this) &&
+			   !nfa.grammar.decisionsWhoseDFAsUsesSynPreds.contains(this);
+	}
+
+	public boolean canInlineDecision() {
+		return !isCyclic() &&
+		    !probe.isNonLLStarDecision() &&
+			getNumberOfStates() < CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE;
+	}
+
+	/** Is this DFA derived from the NFA for the Tokens rule? */
+	public boolean isTokensRuleDecision() {
+		if ( nfa.grammar.type!=Grammar.LEXER ) {
+			return false;
+		}
+		NFAState nfaStart = getNFADecisionStartState();
+		Rule r = nfa.grammar.getLocallyDefinedRule(Grammar.ARTIFICIAL_TOKENS_RULENAME);
+		NFAState TokensRuleStart = r.startState;
+		NFAState TokensDecisionStart =
+			(NFAState)TokensRuleStart.transition[0].target;
+		return nfaStart == TokensDecisionStart;
+	}
+
+	/** The user may specify a max, acyclic lookahead for any decision.  No
+	 *  DFA cycles are created when this value, k, is greater than 0.
+	 *  If this decision has no k lookahead specified, then try the grammar.
+	 */
+	public int getUserMaxLookahead() {
+		if ( user_k>=0 ) { // cache for speed
+			return user_k;
+		}
+		user_k = nfa.grammar.getUserMaxLookahead(decisionNumber);
+		return user_k;
+	}
+
+	public boolean getAutoBacktrackMode() {
+		return nfa.grammar.getAutoBacktrackMode(decisionNumber);
+	}
+
+	public void setUserMaxLookahead(int k) {
+		this.user_k = k;
+	}
+
+	/** Return k if decision is LL(k) for some k else return max int
+     */
+	public int getMaxLookaheadDepth() {
+		if ( hasCycle() ) return Integer.MAX_VALUE;
+		// compute to be sure
+		return _getMaxLookaheadDepth(startState, 0);
+	}
+
+	int _getMaxLookaheadDepth(DFAState d, int depth) {
+		// not cyclic; don't worry about termination
+		// fail if pred edge.
+		int max = depth;
+		for (int i=0; i<d.getNumberOfTransitions(); i++) {
+			Transition t = d.transition(i);
+//			if ( t.isSemanticPredicate() ) return Integer.MAX_VALUE;
+			if ( !t.isSemanticPredicate() ) {
+				// if pure pred not gated, it must target stop state; don't count
+				DFAState edgeTarget = (DFAState)t.target;
+				int m = _getMaxLookaheadDepth(edgeTarget, depth+1);
+				max = Math.max(max, m);
+			}
+		}
+		return max;
+	}
+
+	/** Count all disambiguating syn preds (ignore synpred tests
+	 *  for gated edges, which occur for nonambig input sequences).
+	 *  E.g.,
+	 *  x  : (X)=&gt; (X|Y)\n" +
+	 *     | X\n" +
+	 *     ;
+	 *
+	 *  gives
+	 * 
+	 * .s0-X-&gt;.s1
+	 * .s0-Y&amp;&amp;{synpred1_t}?-&gt;:s2=&gt;1
+	 * .s1-{synpred1_t}?-&gt;:s2=&gt;1
+	 * .s1-{true}?-&gt;:s3=&gt;2
+	 */
+	public boolean hasSynPred() {
+		boolean has = _hasSynPred(startState, new HashSet<DFAState>());
+//		if ( !has ) {
+//			System.out.println("no synpred in dec "+decisionNumber);
+//			FASerializer serializer = new FASerializer(nfa.grammar);
+//			String result = serializer.serialize(startState);
+//			System.out.println(result);
+//		}
+		return has;
+	}
+
+	public boolean getHasSynPred() { return hasSynPred(); } // for ST	
+
+	boolean _hasSynPred(DFAState d, Set<DFAState> busy) {
+		busy.add(d);
+		for (int i=0; i<d.getNumberOfTransitions(); i++) {
+			Transition t = d.transition(i);
+			if ( t.isSemanticPredicate() ) {
+				SemanticContext ctx = t.label.getSemanticContext();
+//				if ( ctx.toString().indexOf("synpred")>=0 ) {
+//					System.out.println("has pred "+ctx.toString()+" "+ctx.isSyntacticPredicate());
+//					System.out.println(((SemanticContext.Predicate)ctx).predicateAST.token);
+//				}
+				if ( ctx.isSyntacticPredicate() ) return true;
+			}
+			DFAState edgeTarget = (DFAState)t.target;
+			if ( !busy.contains(edgeTarget) && _hasSynPred(edgeTarget, busy) ) return true;
+		}
+
+		return false;
+	}
+
+	public boolean hasSemPred() { // has user-defined sempred
+		boolean has = _hasSemPred(startState, new HashSet<DFAState>());
+		return has;
+	}
+
+	boolean _hasSemPred(DFAState d, Set<DFAState> busy) {
+		busy.add(d);
+		for (int i=0; i<d.getNumberOfTransitions(); i++) {
+			Transition t = d.transition(i);
+			if ( t.isSemanticPredicate() ) {
+				SemanticContext ctx = t.label.getSemanticContext();
+				if ( ctx.hasUserSemanticPredicate() ) return true;
+			}
+			DFAState edgeTarget = (DFAState)t.target;
+			if ( !busy.contains(edgeTarget) && _hasSemPred(edgeTarget, busy) ) return true;
+		}
+
+		return false;
+	}
+
+	/** Compute cyclic w/o relying on state computed during analysis. just check. */
+	public boolean hasCycle() {
+		boolean cyclic = _hasCycle(startState, new HashMap<DFAState, Integer>());
+		return cyclic;
+	}
+
+	boolean _hasCycle(DFAState d, Map<DFAState, Integer> busy) {
+		busy.put(d, CYCLIC_BUSY);
+		for (int i=0; i<d.getNumberOfTransitions(); i++) {
+			Transition t = d.transition(i);
+			DFAState target = (DFAState)t.target;
+			int cond = CYCLIC_UNKNOWN;
+			if ( busy.get(target)!=null ) cond = busy.get(target);
+			if ( cond==CYCLIC_BUSY ) return true;
+			if ( cond!=CYCLIC_DONE && _hasCycle(target, busy) ) return true;
+		}
+		busy.put(d, CYCLIC_DONE);
+		return false;
+	}
+
+
+    /** Return a list of Integer alt numbers for which no lookahead could
+     *  be computed or for which no single DFA accept state predicts those
+     *  alts.  Must call verify() first before this makes sense.
+     */
+    public List<Integer> getUnreachableAlts() {
+        return unreachableAlts;
+    }
+
+	/** Once this DFA has been built, need to verify that:
+	 *
+	 *  1. it's reduced
+	 *  2. all alts have an accept state
+	 *
+	 *  Elsewhere, in the NFA converter, we need to verify that:
+	 *
+	 *  3. alts i and j have disjoint lookahead if no sem preds
+	 *  4. if sem preds, nondeterministic alts must be sufficiently covered
+	 *
+	 *  This is avoided if analysis bails out for any reason.
+	 */
+	public void verify() {
+		doesStateReachAcceptState(startState);
+	}
+
+    /** figure out if this state eventually reaches an accept state and
+     *  modify the instance variable 'reduced' to indicate if we find
+     *  at least one state that cannot reach an accept state.  This implies
+     *  that the overall DFA is not reduced.  This algorithm should be
+     *  linear in the number of DFA states.
+     *
+     *  The algorithm also tracks which alternatives have no accept state,
+     *  indicating a nondeterminism.
+	 *
+	 *  Also computes whether the DFA is cyclic.
+	 *
+     *  TODO: I call getUniquelyPredicatedAlt too much; cache predicted alt
+     */
+    protected boolean doesStateReachAcceptState(DFAState d) {
+		if ( d.isAcceptState() ) {
+            // accept states have no edges emanating from them so we can return
+            d.setAcceptStateReachable(REACHABLE_YES);
+            // this alt is uniquely predicted, remove from nondeterministic list
+            int predicts = d.getUniquelyPredictedAlt();
+            unreachableAlts.remove(Utils.integer(predicts));
+            return true;
+        }
+
+        // avoid infinite loops
+        d.setAcceptStateReachable(REACHABLE_BUSY);
+
+        boolean anEdgeReachesAcceptState = false;
+        // Visit every transition, track if at least one edge reaches stop state
+		// Cannot terminate when we know this state reaches stop state since
+		// all transitions must be traversed to set status of each DFA state.
+		for (int i=0; i<d.getNumberOfTransitions(); i++) {
+            Transition t = d.transition(i);
+            DFAState edgeTarget = (DFAState)t.target;
+            int targetStatus = edgeTarget.getAcceptStateReachable();
+            if ( targetStatus==REACHABLE_BUSY ) { // avoid cycles; they say nothing
+                cyclic = true;
+                continue;
+            }
+            if ( targetStatus==REACHABLE_YES ) { // avoid unnecessary work
+                anEdgeReachesAcceptState = true;
+                continue;
+            }
+            if ( targetStatus==REACHABLE_NO ) {  // avoid unnecessary work
+                continue;
+            }
+			// target must be REACHABLE_UNKNOWN (i.e., unvisited)
+            if ( doesStateReachAcceptState(edgeTarget) ) {
+                anEdgeReachesAcceptState = true;
+                // have to keep looking so don't break loop
+                // must cover all states even if we find a path for this state
+            }
+        }
+        if ( anEdgeReachesAcceptState ) {
+            d.setAcceptStateReachable(REACHABLE_YES);
+        }
+        else {
+            d.setAcceptStateReachable(REACHABLE_NO);
+			reduced = false;
+        }
+        return anEdgeReachesAcceptState;
+    }
+
+	/** Walk all accept states and find the manually-specified synpreds.
+	 *  Gated preds are not always hoisted
+	 *  I used to do this in the code generator, but that is too late.
+	 *  This converter tries to avoid computing DFA for decisions in
+	 *  syntactic predicates that are not ever used such as those
+	 *  created by autobacktrack mode.
+	 */
+	public void findAllGatedSynPredsUsedInDFAAcceptStates() {
+		int nAlts = getNumberOfAlts();
+		for (int i=1; i<=nAlts; i++) {
+			DFAState a = getAcceptState(i);
+			//System.out.println("alt "+i+": "+a);
+			if ( a!=null ) {
+				Set<? extends SemanticContext> synpreds = a.getGatedSyntacticPredicatesInNFAConfigurations();
+				if ( synpreds!=null ) {
+					// add all the predicates we find (should be just one, right?)
+					for (SemanticContext semctx : synpreds) {
+						// System.out.println("synpreds: "+semctx);
+						nfa.grammar.synPredUsedInDFA(this, semctx);
+					}
+				}
+			}
+		}
+	}
+
+	public NFAState getNFADecisionStartState() {
+        return decisionNFAStartState;
+    }
+
+	public DFAState getAcceptState(int alt) {
+		return altToAcceptState[alt];
+	}
+
+	public void setAcceptState(int alt, DFAState acceptState) {
+		altToAcceptState[alt] = acceptState;
+	}
+
+	public String getDescription() {
+		return description;
+	}
+
+	public int getDecisionNumber() {
+        return decisionNFAStartState.getDecisionNumber();
+    }
+
+	/** If this DFA failed to finish during construction, we might be
+	 *  able to retry with k=1 but we need to know whether it will
+	 *  potentially succeed.  Can only succeed if there is a predicate
+	 *  to resolve the issue.  Don't try if k=1 already as it would
+	 *  cycle forever.  Timeout can retry with k=1 even if no predicate
+	 *  if k!=1.
+	 */
+	public boolean okToRetryDFAWithK1() {
+		boolean nonLLStarOrOverflowAndPredicateVisible =
+			(probe.isNonLLStarDecision()||probe.analysisOverflowed()) &&
+		    predicateVisible; // auto backtrack or manual sem/syn
+		return getUserMaxLookahead()!=1 &&
+			 nonLLStarOrOverflowAndPredicateVisible;
+	}
+
+	public String getReasonForFailure() {
+		StringBuilder buf = new StringBuilder();
+		if ( probe.isNonLLStarDecision() ) {
+			buf.append("non-LL(*)");
+			if ( predicateVisible ) {
+				buf.append(" && predicate visible");
+			}
+		}
+		if ( probe.analysisOverflowed() ) {
+			buf.append("recursion overflow");
+			if ( predicateVisible ) {
+				buf.append(" && predicate visible");
+			}
+		}
+		buf.append("\n");
+		return buf.toString();
+	}
+
+	/** What GrammarAST node (derived from the grammar) is this DFA
+     *  associated with?  It will point to the start of a block or
+     *  the loop back of a (...)+ block etc...
+     */
+    public GrammarAST getDecisionASTNode() {
+        return decisionNFAStartState.associatedASTNode;
+    }
+
+    public boolean isGreedy() {
+		GrammarAST blockAST = nfa.grammar.getDecisionBlockAST(decisionNumber);
+		Object v = nfa.grammar.getBlockOption(blockAST,"greedy");
+		if ( v!=null && v.equals("false") ) {
+			return false;
+		}
+        return true;
+
+	}
+
+    public DFAState newState() {
+        DFAState n = new DFAState(this);
+        n.stateNumber = stateCounter;
+        stateCounter++;
+		states.setSize(n.stateNumber+1);
+		states.set(n.stateNumber, n); // track state num to state
+        return n;
+    }
+
+	public int getNumberOfStates() {
+		if ( getUserMaxLookahead()>0 ) {
+			// if using fixed lookahead then uniqueSets not set
+			return states.size();
+		}
+		return numberOfStates;
+	}
+
+	public int getNumberOfAlts() {
+		return nAlts;
+	}
+
+//	public boolean analysisTimedOut() {
+//		return probe.analysisTimedOut();
+//	}
+
+    protected void initAltRelatedInfo() {
+        unreachableAlts = new LinkedList<Integer>();
+        for (int i = 1; i <= nAlts; i++) {
+            unreachableAlts.add(Utils.integer(i));
+        }
+		altToAcceptState = new DFAState[nAlts+1];
+    }
+
+	@Override
+	public String toString() {
+		FASerializer serializer = new FASerializer(nfa.grammar);
+		if ( startState==null ) {
+			return "";
+		}
+		return serializer.serialize(startState, false);
+	}
+
+	/** EOT (end of token) is a label that indicates when the DFA conversion
+	 *  algorithm would "fall off the end of a lexer rule".  It normally
+	 *  means the default clause.  So for ('a'..'z')+ you would see a DFA
+	 *  with a state that has a..z and EOT emanating from it.  a..z would
+	 *  jump to a state predicting alt 1 and EOT would jump to a state
+	 *  predicting alt 2 (the exit loop branch).  EOT implies anything other
+	 *  than a..z.  If for some reason, the set is "all char" such as with
+	 *  the wildcard '.', then EOT cannot match anything.  For example,
+	 *
+	 *     BLOCK : '{' (.)* '}'
+	 *
+	 *  consumes all char until EOF when greedy=true.  When all edges are
+	 *  combined for the DFA state after matching '}', you will find that
+	 *  it is all char.  The EOT transition has nothing to match and is
+	 *  unreachable.  The findNewDFAStatesAndAddDFATransitions() method
+	 *  must know to ignore the EOT, so we simply remove it from the
+	 *  reachable labels.  Later analysis will find that the exit branch
+	 *  is not predicted by anything.  For greedy=false, we leave only
+	 *  the EOT label indicating that the DFA should stop immediately
+	 *  and predict the exit branch. The reachable labels are often a
+	 *  set of disjoint values like: [<EOT>, 42, {0..41, 43..65534}]
+	 *  due to DFA conversion so must construct a pure set to see if
+	 *  it is same as Label.ALLCHAR.
+	 *
+	 *  Only do this for Lexers.
+	 *
+	 *  If EOT coexists with ALLCHAR:
+	 *  1. If not greedy, modify the labels parameter to be EOT
+	 *  2. If greedy, remove EOT from the labels set
+	protected boolean reachableLabelsEOTCoexistsWithAllChar(OrderedHashSet labels)
+	{
+		Label eot = new Label(Label.EOT);
+		if ( !labels.containsKey(eot) ) {
+			return false;
+		}
+		System.out.println("### contains EOT");
+		boolean containsAllChar = false;
+		IntervalSet completeVocab = new IntervalSet();
+		int n = labels.size();
+		for (int i=0; i<n; i++) {
+			Label rl = (Label)labels.get(i);
+			if ( !rl.equals(eot) ) {
+				completeVocab.addAll(rl.getSet());
+			}
+		}
+		System.out.println("completeVocab="+completeVocab);
+		if ( completeVocab.equals(Label.ALLCHAR) ) {
+			System.out.println("all char");
+			containsAllChar = true;
+		}
+		return containsAllChar;
+	}
+	 */
+}
+
diff --git a/tool/src/main/java/org/antlr/analysis/DFAOptimizer.java b/tool/src/main/java/org/antlr/analysis/DFAOptimizer.java
new file mode 100644
index 0000000..ab7fba9
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/DFAOptimizer.java
@@ -0,0 +1,265 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.misc.Utils;
+import org.antlr.tool.Grammar;
+
+import java.util.HashSet;
+import java.util.Set;
+
+/** A module to perform optimizations on DFAs.
+ *
+ *  I could more easily (and more quickly) do some optimizations (such as
+ *  PRUNE_EBNF_EXIT_BRANCHES) during DFA construction, but then it
+ *  messes up the determinism checking.  For example, it looks like
+ *  loop exit branches are unreachable if you prune exit branches
+ *  during DFA construction and before determinism checks.
+ *
+ *  In general, ANTLR's NFA&rarr;DFA&rarr;codegen pipeline seems very robust
+ *  to me which I attribute to a uniform and consistent set of data
+ *  structures.  Regardless of what I want to "say"/implement, I do so
+ *  within the confines of, for example, a DFA.  The code generator
+ *  can then just generate code--it doesn't have to do much thinking.
+ *  Putting optimizations in the code gen code really starts to make
+ *  it a spagetti factory (uh oh, now I'm hungry!).  The pipeline is
+ *  very testable; each stage has well defined input/output pairs.
+ *
+ *  ### Optimization: PRUNE_EBNF_EXIT_BRANCHES
+ *
+ *  There is no need to test EBNF block exit branches.  Not only is it
+ *  an unneeded computation, but counter-intuitively, you actually get
+ *  better errors. You can report an error at the missing or extra
+ *  token rather than as soon as you've figured out you will fail.
+ *
+ *  Imagine optional block "( DOT CLASS )? SEMI".  ANTLR generates:
+ *
+ *  int alt=0;
+ *  if ( input.LA(1)==DOT ) {
+ *      alt=1;
+ *  }
+ *  else if ( input.LA(1)==SEMI ) {
+ *      alt=2;
+ *  }
+ *
+ *  Clearly, since Parser.match() will ultimately find the error, we
+ *  do not want to report an error nor do we want to bother testing
+ *  lookahead against what follows the (...)?  We want to generate
+ *  simply "should I enter the subrule?":
+ *
+ *  int alt=2;
+ *  if ( input.LA(1)==DOT ) {
+ *      alt=1;
+ *  }
+ *
+ *  NOTE 1. Greedy loops cannot be optimized in this way.  For example,
+ *  "(greedy=false:'x'|.)* '\n'".  You specifically need the exit branch
+ *  to tell you when to terminate the loop as the same input actually
+ *  predicts one of the alts (i.e., staying in the loop).
+ *
+ *  NOTE 2.  I do not optimize cyclic DFAs at the moment as it doesn't
+ *  seem to work. ;)  I'll have to investigate later to see what work I
+ *  can do on cyclic DFAs to make them have fewer edges.  Might have
+ *  something to do with the EOT token.
+ *
+ *  ### PRUNE_SUPERFLUOUS_EOT_EDGES
+ *
+ *  When a token is a subset of another such as the following rules, ANTLR
+ *  quietly assumes the first token to resolve the ambiguity.
+ *
+ *  EQ			: '=' ;
+ *  ASSIGNOP	: '=' | '+=' ;
+ *
+ *  It can yield states that have only a single edge on EOT to an accept
+ *  state.  This is a waste and messes up my code generation. ;)  If
+ *  Tokens rule DFA goes
+ *
+ * 		s0 -'='-&gt; s3 -EOT-&gt; s5 (accept)
+ *
+ *  then s5 should be pruned and s3 should be made an accept.  Do NOT do this
+ *  for keyword versus ID as the state with EOT edge emanating from it will
+ *  also have another edge.
+ *
+ *  ### Optimization: COLLAPSE_ALL_INCIDENT_EDGES
+ *
+ *  Done during DFA construction.  See method addTransition() in
+ *  NFAToDFAConverter.
+ *
+ *  ### Optimization: MERGE_STOP_STATES
+ *
+ *  Done during DFA construction.  See addDFAState() in NFAToDFAConverter.
+ */
+public class DFAOptimizer {
+	public static boolean PRUNE_EBNF_EXIT_BRANCHES = true;
+	public static boolean PRUNE_TOKENS_RULE_SUPERFLUOUS_EOT_EDGES = true;
+	public static boolean COLLAPSE_ALL_PARALLEL_EDGES = true;
+	public static boolean MERGE_STOP_STATES = true;
+
+	/** Used by DFA state machine generator to avoid infinite recursion
+	 *  resulting from cycles int the DFA.  This is a set of int state #s.
+	 *  This is a side-effect of calling optimize; can't clear after use
+	 *  because code gen needs it.
+	 */
+	protected Set<Integer> visited = new HashSet<Integer>();
+
+    protected Grammar grammar;
+
+    public DFAOptimizer(Grammar grammar) {
+		this.grammar = grammar;
+    }
+
+	public void optimize() {
+		// optimize each DFA in this grammar
+		for (int decisionNumber=1;
+			 decisionNumber<=grammar.getNumberOfDecisions();
+			 decisionNumber++)
+		{
+			DFA dfa = grammar.getLookaheadDFA(decisionNumber);
+			optimize(dfa);
+		}
+	}
+
+	protected void optimize(DFA dfa) {
+		if ( dfa==null ) {
+			return; // nothing to do
+		}
+		/*
+		System.out.println("Optimize DFA "+dfa.decisionNFAStartState.decisionNumber+
+						   " num states="+dfa.getNumberOfStates());
+		*/
+		//long start = System.currentTimeMillis();
+		if ( PRUNE_EBNF_EXIT_BRANCHES && dfa.canInlineDecision() ) {
+			visited.clear();
+			int decisionType =
+				dfa.getNFADecisionStartState().decisionStateType;
+			if ( dfa.isGreedy() &&
+				 (decisionType==NFAState.OPTIONAL_BLOCK_START ||
+				 decisionType==NFAState.LOOPBACK) )
+			{
+				optimizeExitBranches(dfa.startState);
+			}
+		}
+		// If the Tokens rule has syntactically ambiguous rules, try to prune
+		if ( PRUNE_TOKENS_RULE_SUPERFLUOUS_EOT_EDGES &&
+			 dfa.isTokensRuleDecision() &&
+			 dfa.probe.stateToSyntacticallyAmbiguousTokensRuleAltsMap.size()>0 )
+		{
+			visited.clear();
+			optimizeEOTBranches(dfa.startState);
+		}
+
+		/* ack...code gen needs this, cannot optimize
+		visited.clear();
+		unlinkUnneededStateData(dfa.startState);
+		*/
+		//long stop = System.currentTimeMillis();
+		//System.out.println("minimized in "+(int)(stop-start)+" ms");
+    }
+
+	protected void optimizeExitBranches(DFAState d) {
+		Integer sI = Utils.integer(d.stateNumber);
+		if ( visited.contains(sI) ) {
+			return; // already visited
+		}
+		visited.add(sI);
+		int nAlts = d.dfa.getNumberOfAlts();
+		for (int i = 0; i < d.getNumberOfTransitions(); i++) {
+			Transition edge = d.transition(i);
+			DFAState edgeTarget = ((DFAState)edge.target);
+			/*
+			System.out.println(d.stateNumber+"-"+
+							   edge.label.toString(d.dfa.nfa.grammar)+"->"+
+							   edgeTarget.stateNumber);
+			*/
+			// if target is an accept state and that alt is the exit alt
+			if ( edgeTarget.isAcceptState() &&
+				edgeTarget.getUniquelyPredictedAlt()==nAlts)
+			{
+				/*
+				System.out.println("ignoring transition "+i+" to max alt "+
+					d.dfa.getNumberOfAlts());
+				*/
+				d.removeTransition(i);
+				i--; // back up one so that i++ of loop iteration stays within bounds
+			}
+			optimizeExitBranches(edgeTarget);
+		}
+	}
+
+	protected void optimizeEOTBranches(DFAState d) {
+		Integer sI = Utils.integer(d.stateNumber);
+		if ( visited.contains(sI) ) {
+			return; // already visited
+		}
+		visited.add(sI);
+		for (int i = 0; i < d.getNumberOfTransitions(); i++) {
+			Transition edge = d.transition(i);
+			DFAState edgeTarget = ((DFAState)edge.target);
+			/*
+			System.out.println(d.stateNumber+"-"+
+							   edge.label.toString(d.dfa.nfa.grammar)+"->"+
+							   edgeTarget.stateNumber);
+			*/
+			// if only one edge coming out, it is EOT, and target is accept prune
+			if ( PRUNE_TOKENS_RULE_SUPERFLUOUS_EOT_EDGES &&
+				edgeTarget.isAcceptState() &&
+				d.getNumberOfTransitions()==1 &&
+				edge.label.isAtom() &&
+				edge.label.getAtom()==Label.EOT )
+			{
+				//System.out.println("state "+d+" can be pruned");
+				// remove the superfluous EOT edge
+				d.removeTransition(i);
+				d.setAcceptState(true); // make it an accept state
+				// force it to uniquely predict the originally predicted state
+				d.cachedUniquelyPredicatedAlt =
+					edgeTarget.getUniquelyPredictedAlt();
+				i--; // back up one so that i++ of loop iteration stays within bounds
+			}
+			optimizeEOTBranches(edgeTarget);
+		}
+	}
+
+	/** Walk DFA states, unlinking the nfa configs and whatever else I
+	 *  can to reduce memory footprint.
+	protected void unlinkUnneededStateData(DFAState d) {
+		Integer sI = Utils.integer(d.stateNumber);
+		if ( visited.contains(sI) ) {
+			return; // already visited
+		}
+		visited.add(sI);
+		d.nfaConfigurations = null;
+		for (int i = 0; i < d.getNumberOfTransitions(); i++) {
+			Transition edge = (Transition) d.transition(i);
+			DFAState edgeTarget = ((DFAState)edge.target);
+			unlinkUnneededStateData(edgeTarget);
+		}
+	}
+	 */
+
+}
diff --git a/tool/src/main/java/org/antlr/analysis/DFAState.java b/tool/src/main/java/org/antlr/analysis/DFAState.java
new file mode 100644
index 0000000..541d81a
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/DFAState.java
@@ -0,0 +1,780 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.misc.IntSet;
+import org.antlr.misc.MultiMap;
+import org.antlr.misc.OrderedHashSet;
+import org.antlr.misc.Utils;
+import org.antlr.tool.Grammar;
+
+import java.util.*;
+
+/** A DFA state represents a set of possible NFA configurations.
+ *  As Aho, Sethi, Ullman p. 117 says "The DFA uses its state
+ *  to keep track of all possible states the NFA can be in after
+ *  reading each input symbol.  That is to say, after reading
+ *  input a1a2..an, the DFA is in a state that represents the
+ *  subset T of the states of the NFA that are reachable from the
+ *  NFA's start state along some path labeled a1a2..an."
+ *  In conventional NFA&rarr;DFA conversion, therefore, the subset T
+ *  would be a bitset representing the set of states the
+ *  NFA could be in.  We need to track the alt predicted by each
+ *  state as well, however.  More importantly, we need to maintain
+ *  a stack of states, tracking the closure operations as they
+ *  jump from rule to rule, emulating rule invocations (method calls).
+ *  Recall that NFAs do not normally have a stack like a pushdown-machine
+ *  so I have to add one to simulate the proper lookahead sequences for
+ *  the underlying LL grammar from which the NFA was derived.
+ *
+ *  I use a list of NFAConfiguration objects.  An NFAConfiguration
+ *  is both a state (ala normal conversion) and an NFAContext describing
+ *  the chain of rules (if any) followed to arrive at that state.  There
+ *  is also the semantic context, which is the "set" of predicates found
+ *  on the path to this configuration.
+ *
+ *  A DFA state may have multiple references to a particular state,
+ *  but with different NFAContexts (with same or different alts)
+ *  meaning that state was reached via a different set of rule invocations.
+ */
+public class DFAState extends State {
+    public static final int INITIAL_NUM_TRANSITIONS = 4;
+	public static final int PREDICTED_ALT_UNSET = NFA.INVALID_ALT_NUMBER-1;
+
+    /** We are part of what DFA?  Use this ref to get access to the
+     *  context trees for an alt.
+     */
+    public DFA dfa;
+
+    /** Track the transitions emanating from this DFA state.  The List
+     *  elements are Transition objects.
+     */
+    protected List<Transition> transitions =
+		new ArrayList<Transition>(INITIAL_NUM_TRANSITIONS);
+
+	/** When doing an acyclic DFA, this is the number of lookahead symbols
+	 *  consumed to reach this state.  This value may be nonzero for most
+	 *  dfa states, but it is only a valid value if the user has specified
+	 *  a max fixed lookahead.
+	 */
+    protected int k;
+
+    /** The NFA&rarr;DFA algorithm may terminate leaving some states
+     *  without a path to an accept state, implying that upon certain
+     *  input, the decision is not deterministic--no decision about
+     *  predicting a unique alternative can be made.  Recall that an
+     *  accept state is one in which a unique alternative is predicted.
+     */
+    protected int acceptStateReachable = DFA.REACHABLE_UNKNOWN;
+
+    /** Rather than recheck every NFA configuration in a DFA state (after
+     *  resolving) in findNewDFAStatesAndAddDFATransitions just check
+     *  this boolean.  Saves a linear walk perhaps DFA state creation.
+     *  Every little bit helps.
+     */
+    protected boolean resolvedWithPredicates = false;
+
+	/** If a closure operation finds that we tried to invoke the same
+	 *  rule too many times (stack would grow beyond a threshold), it
+	 *  marks the state has aborted and notifies the DecisionProbe.
+	 */
+	public boolean abortedDueToRecursionOverflow = false;
+
+	/** If we detect recursion on more than one alt, decision is non-LL(*),
+	 *  but try to isolate it to only those states whose closure operations
+	 *  detect recursion.  There may be other alts that are cool:
+	 *
+	 *  a : recur '.'
+	 *    | recur ';'
+	 *    | X Y  // LL(2) decision; don't abort and use k=1 plus backtracking
+	 *    | X Z
+	 *    ;
+	 *
+	 *  12/13/2007: Actually this has caused problems.  If k=*, must terminate
+	 *  and throw out entire DFA; retry with k=1.  Since recursive, do not
+	 *  attempt more closure ops as it may take forever.  Exception thrown
+	 *  now and we simply report the problem.  If synpreds exist, I'll retry
+	 *  with k=1.
+	 */
+	protected boolean abortedDueToMultipleRecursiveAlts = false;
+
+	/** Build up the hash code for this state as NFA configurations
+     *  are added as it's monotonically increasing list of configurations.
+     */
+    protected int cachedHashCode;
+
+	protected int cachedUniquelyPredicatedAlt = PREDICTED_ALT_UNSET;
+
+	public int minAltInConfigurations=Integer.MAX_VALUE;
+
+	public boolean atLeastOneConfigurationHasAPredicate = false;
+
+	/** The set of NFA configurations (state,alt,context) for this DFA state */
+    public OrderedHashSet<NFAConfiguration> nfaConfigurations =
+		new OrderedHashSet<NFAConfiguration>();
+
+	public List<NFAConfiguration> configurationsWithLabeledEdges =
+		new ArrayList<NFAConfiguration>();
+
+	/** Used to prevent the closure operation from looping to itself and
+     *  hence looping forever.  Sensitive to the NFA state, the alt, and
+     *  the stack context.  This just the nfa config set because we want to
+	 *  prevent closures only on states contributed by closure not reach
+	 *  operations.
+	 *
+	 *  Two configurations identical including semantic context are
+	 *  considered the same closure computation.  @see NFAToDFAConverter.closureBusy().
+     */
+	protected Set<NFAConfiguration> closureBusy = new HashSet<NFAConfiguration>();
+
+	/** As this state is constructed (i.e., as NFA states are added), we
+     *  can easily check for non-epsilon transitions because the only
+     *  transition that could be a valid label is transition(0).  When we
+     *  process this node eventually, we'll have to walk all states looking
+     *  for all possible transitions.  That is of the order: size(label space)
+     *  times size(nfa states), which can be pretty damn big.  It's better
+     *  to simply track possible labels.
+     */
+    protected OrderedHashSet<Label> reachableLabels;
+
+    public DFAState(DFA dfa) {
+        this.dfa = dfa;
+    }
+
+	public void reset() {
+		//nfaConfigurations = null; // getGatedPredicatesInNFAConfigurations needs
+		configurationsWithLabeledEdges = null;
+		closureBusy = null;
+		reachableLabels = null;
+	}
+
+	@Override
+	public Transition transition(int i) {
+        return transitions.get(i);
+    }
+
+	@Override
+    public int getNumberOfTransitions() {
+        return transitions.size();
+    }
+
+	@Override
+    public void addTransition(Transition t) {
+        transitions.add(t);
+    }
+
+	/** Add a transition from this state to target with label.  Return
+	 *  the transition number from 0..n-1.
+	 */
+    public int addTransition(DFAState target, Label label) {
+		transitions.add( new Transition(label, target) );
+		return transitions.size()-1;
+    }
+
+    public Transition getTransition(int trans) {
+        return transitions.get(trans);
+    }
+
+	public void removeTransition(int trans) {
+		transitions.remove(trans);
+	}
+
+    /** Add an NFA configuration to this DFA node.  Add uniquely
+     *  an NFA state/alt/syntactic&amp;semantic context (chain of invoking state(s)
+     *  and semantic predicate contexts).
+     *
+     *  I don't see how there could be two configurations with same
+     *  state|alt|synCtx and different semantic contexts because the
+     *  semantic contexts are computed along the path to a particular state
+     *  so those two configurations would have to have the same predicate.
+     *  Nonetheless, the addition of configurations is unique on all
+     *  configuration info.  I guess I'm saying that syntactic context
+     *  implies semantic context as the latter is computed according to the
+     *  former.
+     *
+     *  As we add configurations to this DFA state, track the set of all possible
+     *  transition labels so we can simply walk it later rather than doing a
+     *  loop over all possible labels in the NFA.
+     */
+    public void addNFAConfiguration(NFAState state, NFAConfiguration c) {
+		if ( nfaConfigurations.contains(c) ) {
+            return;
+        }
+
+        nfaConfigurations.add(c);
+
+		// track min alt rather than compute later
+		if ( c.alt < minAltInConfigurations ) {
+			minAltInConfigurations = c.alt;
+		}
+
+		if ( c.semanticContext!=SemanticContext.EMPTY_SEMANTIC_CONTEXT ) {
+			atLeastOneConfigurationHasAPredicate = true;
+		}
+
+		// update hashCode; for some reason using context.hashCode() also
+        // makes the GC take like 70% of the CPU and is slow!
+        cachedHashCode += c.state + c.alt;
+
+		// update reachableLabels
+		// We're adding an NFA state; check to see if it has a non-epsilon edge
+		if ( state.transition[0] != null ) {
+			Label label = state.transition[0].label;
+			if ( !(label.isEpsilon()||label.isSemanticPredicate()) ) {
+				// this NFA state has a non-epsilon edge, track for fast
+				// walking later when we do reach on this DFA state we're
+				// building.
+				configurationsWithLabeledEdges.add(c);
+				if ( state.transition[1] ==null ) {
+					// later we can check this to ignore o-A->o states in closure
+					c.singleAtomTransitionEmanating = true;
+				}
+				addReachableLabel(label);
+			}
+		}
+    }
+
+	public NFAConfiguration addNFAConfiguration(NFAState state,
+												int alt,
+												NFAContext context,
+												SemanticContext semanticContext)
+	{
+		NFAConfiguration c = new NFAConfiguration(state.stateNumber,
+												  alt,
+												  context,
+												  semanticContext);
+		addNFAConfiguration(state, c);
+		return c;
+	}
+
+	/** Add label uniquely and disjointly; intersection with
+     *  another set or int/char forces breaking up the set(s).
+     *
+     *  Example, if reachable list of labels is [a..z, {k,9}, 0..9],
+     *  the disjoint list will be [{a..j,l..z}, k, 9, 0..8].
+     *
+     *  As we add NFA configurations to a DFA state, we might as well track
+     *  the set of all possible transition labels to make the DFA conversion
+     *  more efficient.  W/o the reachable labels, we'd need to check the
+     *  whole vocabulary space (could be 0..\uFFFF)!  The problem is that
+     *  labels can be sets, which may overlap with int labels or other sets.
+     *  As we need a deterministic set of transitions from any
+     *  state in the DFA, we must make the reachable labels set disjoint.
+     *  This operation amounts to finding the character classes for this
+     *  DFA state whereas with tools like flex, that need to generate a
+     *  homogeneous DFA, must compute char classes across all states.
+     *  We are going to generate DFAs with heterogeneous states so we
+     *  only care that the set of transitions out of a single state are
+     *  unique. :)
+     *
+     *  The idea for adding a new set, t, is to look for overlap with the
+     *  elements of existing list s.  Upon overlap, replace
+     *  existing set s[i] with two new disjoint sets, s[i]-t and s[i]&amp;t.
+     *  (if s[i]-t is nil, don't add).  The remainder is t-s[i], which is
+     *  what you want to add to the set minus what was already there.  The
+     *  remainder must then be compared against the i+1..n elements in s
+     *  looking for another collision.  Each collision results in a smaller
+     *  and smaller remainder.  Stop when you run out of s elements or
+     *  remainder goes to nil.  If remainder is non nil when you run out of
+     *  s elements, then add remainder to the end.
+     *
+     *  Single element labels are treated as sets to make the code uniform.
+     */
+    protected void addReachableLabel(Label label) {
+		if ( reachableLabels==null ) {
+			reachableLabels = new OrderedHashSet<Label>();
+		}
+		/*
+		System.out.println("addReachableLabel to state "+dfa.decisionNumber+"."+stateNumber+": "+label.getSet().toString(dfa.nfa.grammar));
+		System.out.println("start of add to state "+dfa.decisionNumber+"."+stateNumber+": " +
+				"reachableLabels="+reachableLabels.toString());
+				*/
+		if ( reachableLabels.contains(label) ) { // exact label present
+            return;
+        }
+        IntSet t = label.getSet();
+        IntSet remainder = t; // remainder starts out as whole set to add
+        int n = reachableLabels.size(); // only look at initial elements
+        // walk the existing list looking for the collision
+        for (int i=0; i<n; i++) {
+			Label rl = reachableLabels.get(i);
+            /*
+			System.out.println("comparing ["+i+"]: "+label.toString(dfa.nfa.grammar)+" & "+
+                    rl.toString(dfa.nfa.grammar)+"="+
+                    intersection.toString(dfa.nfa.grammar));
+            */
+			if ( !Label.intersect(label, rl) ) {
+                continue;
+            }
+			//System.out.println(label+" collides with "+rl);
+
+			// For any (s_i, t) with s_i&t!=nil replace with (s_i-t, s_i&t)
+            // (ignoring s_i-t if nil; don't put in list)
+
+            // Replace existing s_i with intersection since we
+            // know that will always be a non nil character class
+			IntSet s_i = rl.getSet();
+			IntSet intersection = s_i.and(t);
+            reachableLabels.set(i, new Label(intersection));
+
+            // Compute s_i-t to see what is in current set and not in incoming
+            IntSet existingMinusNewElements = s_i.subtract(t);
+			//System.out.println(s_i+"-"+t+"="+existingMinusNewElements);
+            if ( !existingMinusNewElements.isNil() ) {
+                // found a new character class, add to the end (doesn't affect
+                // outer loop duration due to n computation a priori.
+                Label newLabel = new Label(existingMinusNewElements);
+                reachableLabels.add(newLabel);
+            }
+
+			/*
+            System.out.println("after collision, " +
+                    "reachableLabels="+reachableLabels.toString());
+					*/
+
+            // anything left to add to the reachableLabels?
+            remainder = t.subtract(s_i);
+            if ( remainder.isNil() ) {
+                break; // nothing left to add to set.  done!
+            }
+
+            t = remainder;
+        }
+        if ( !remainder.isNil() ) {
+			/*
+			System.out.println("before add remainder to state "+dfa.decisionNumber+"."+stateNumber+": " +
+					"reachableLabels="+reachableLabels.toString());
+			System.out.println("remainder state "+dfa.decisionNumber+"."+stateNumber+": "+remainder.toString(dfa.nfa.grammar));
+            */
+			Label newLabel = new Label(remainder);
+            reachableLabels.add(newLabel);
+        }
+		/*
+		System.out.println("#END of add to state "+dfa.decisionNumber+"."+stateNumber+": " +
+				"reachableLabels="+reachableLabels.toString());
+				*/
+    }
+
+    public OrderedHashSet<Label> getReachableLabels() {
+        return reachableLabels;
+    }
+
+	public void setNFAConfigurations(OrderedHashSet<NFAConfiguration> configs) {
+		this.nfaConfigurations = configs;
+	}
+
+    /** A decent hash for a DFA state is the sum of the NFA state/alt pairs.
+     *  This is used when we add DFAState objects to the DFA.states Map and
+     *  when we compare DFA states.  Computed in addNFAConfiguration()
+     */
+	@Override
+    public int hashCode() {
+		if ( cachedHashCode==0 ) {
+			// LL(1) algorithm doesn't use NFA configurations, which
+			// dynamically compute hashcode; must have something; use super
+			return super.hashCode();
+		}
+		return cachedHashCode;
+    }
+
+    /** Two DFAStates are equal if their NFA configuration sets are the
+	 *  same. This method is used to see if a DFA state already exists.
+	 *
+     *  Because the number of alternatives and number of NFA configurations are
+     *  finite, there is a finite number of DFA states that can be processed.
+     *  This is necessary to show that the algorithm terminates.
+	 *
+	 *  Cannot test the DFA state numbers here because in DFA.addState we need
+	 *  to know if any other state exists that has this exact set of NFA
+	 *  configurations.  The DFAState state number is irrelevant.
+     */
+	@Override
+    public boolean equals(Object o) {
+		// compare set of NFA configurations in this set with other
+        DFAState other = (DFAState)o;
+		return this.nfaConfigurations.equals(other.nfaConfigurations);
+	}
+
+    /** Walk each configuration and if they are all the same alt, return
+     *  that alt else return NFA.INVALID_ALT_NUMBER.  Ignore resolved
+     *  configurations, but don't ignore resolveWithPredicate configs
+     *  because this state should not be an accept state.  We need to add
+     *  this to the work list and then have semantic predicate edges
+     *  emanating from it.
+     */
+    public int getUniquelyPredictedAlt() {
+		if ( cachedUniquelyPredicatedAlt!=PREDICTED_ALT_UNSET ) {
+			return cachedUniquelyPredicatedAlt;
+		}
+        int alt = NFA.INVALID_ALT_NUMBER;
+		int numConfigs = nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = nfaConfigurations.get(i);
+			// ignore anything we resolved; predicates will still result
+			// in transitions out of this state, so must count those
+			// configurations; i.e., don't ignore resolveWithPredicate configs
+			if ( configuration.resolved ) {
+				continue;
+			}
+			if ( alt==NFA.INVALID_ALT_NUMBER ) {
+				alt = configuration.alt; // found first nonresolved alt
+			}
+			else if ( configuration.alt!=alt ) {
+				return NFA.INVALID_ALT_NUMBER;
+			}
+		}
+		this.cachedUniquelyPredicatedAlt = alt;
+        return alt;
+    }
+
+	/** Return the uniquely mentioned alt from the NFA configurations;
+	 *  Ignore the resolved bit etc...  Return INVALID_ALT_NUMBER
+	 *  if there is more than one alt mentioned.
+	 */ 
+	public int getUniqueAlt() {
+		int alt = NFA.INVALID_ALT_NUMBER;
+		int numConfigs = nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = nfaConfigurations.get(i);
+			if ( alt==NFA.INVALID_ALT_NUMBER ) {
+				alt = configuration.alt; // found first alt
+			}
+			else if ( configuration.alt!=alt ) {
+				return NFA.INVALID_ALT_NUMBER;
+			}
+		}
+		return alt;
+	}
+
+	/** When more than one alternative can match the same input, the first
+	 *  alternative is chosen to resolve the conflict.  The other alts
+	 *  are "turned off" by setting the "resolved" flag in the NFA
+	 *  configurations.  Return the set of disabled alternatives.  For
+	 *
+	 *  a : A | A | A ;
+	 *
+	 *  this method returns {2,3} as disabled.  This does not mean that
+	 *  the alternative is totally unreachable, it just means that for this
+	 *  DFA state, that alt is disabled.  There may be other accept states
+	 *  for that alt.
+	 */
+	public Set<Integer> getDisabledAlternatives() {
+		Set<Integer> disabled = new LinkedHashSet<Integer>();
+		int numConfigs = nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = nfaConfigurations.get(i);
+			if ( configuration.resolved ) {
+				disabled.add(Utils.integer(configuration.alt));
+			}
+		}
+		return disabled;
+	}
+
+	protected Set<Integer> getNonDeterministicAlts() {
+		int user_k = dfa.getUserMaxLookahead();
+		if ( user_k>0 && user_k==k ) {
+			// if fixed lookahead, then more than 1 alt is a nondeterminism
+			// if we have hit the max lookahead
+			return getAltSet();
+		}
+		else if ( abortedDueToMultipleRecursiveAlts || abortedDueToRecursionOverflow ) {
+			// if we had to abort for non-LL(*) state assume all alts are a problem
+			return getAltSet();
+		}
+		else {
+			return getConflictingAlts();
+		}
+	}
+
+    /** Walk each NFA configuration in this DFA state looking for a conflict
+     *  where (s|i|ctx) and (s|j|ctx) exist, indicating that state s with
+     *  context conflicting ctx predicts alts i and j.  Return an Integer set
+	 *  of the alternative numbers that conflict.  Two contexts conflict if
+	 *  they are equal or one is a stack suffix of the other or one is
+	 *  the empty context.
+	 *
+     *  Use a hash table to record the lists of configs for each state
+	 *  as they are encountered.  We need only consider states for which
+	 *  there is more than one configuration.  The configurations' predicted
+	 *  alt must be different or must have different contexts to avoid a
+	 *  conflict.
+	 *
+	 *  Don't report conflicts for DFA states that have conflicting Tokens
+	 *  rule NFA states; they will be resolved in favor of the first rule.
+     */
+    protected Set<Integer> getConflictingAlts() {
+		// TODO this is called multiple times: cache result?
+		//System.out.println("getNondetAlts for DFA state "+stateNumber);
+ 		Set<Integer> nondeterministicAlts = new HashSet<Integer>();
+
+		// If only 1 NFA conf then no way it can be nondeterministic;
+		// save the overhead.  There are many o-a->o NFA transitions
+		// and so we save a hash map and iterator creation for each
+		// state.
+		int numConfigs = nfaConfigurations.size();
+		if ( numConfigs <=1 ) {
+			return null;
+		}
+
+		// First get a list of configurations for each state.
+		// Most of the time, each state will have one associated configuration.
+		MultiMap<Integer, NFAConfiguration> stateToConfigListMap =
+			new MultiMap<Integer, NFAConfiguration>();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = nfaConfigurations.get(i);
+			Integer stateI = Utils.integer(configuration.state);
+			stateToConfigListMap.map(stateI, configuration);
+		}
+		// potential conflicts are states with > 1 configuration and diff alts
+		Set<Integer> states = stateToConfigListMap.keySet();
+		int numPotentialConflicts = 0;
+		for (Integer stateI : states) {
+			boolean thisStateHasPotentialProblem = false;
+			List<NFAConfiguration> configsForState = stateToConfigListMap.get(stateI);
+			int alt=0;
+			int numConfigsForState = configsForState.size();
+			for (int i = 0; i < numConfigsForState && numConfigsForState>1 ; i++) {
+				NFAConfiguration c = configsForState.get(i);
+				if ( alt==0 ) {
+					alt = c.alt;
+				}
+				else if ( c.alt!=alt ) {
+					/*
+					System.out.println("potential conflict in state "+stateI+
+									   " configs: "+configsForState);
+					*/
+					// 11/28/2005: don't report closures that pinch back
+					// together in Tokens rule.  We want to silently resolve
+					// to the first token definition ala lex/flex by ignoring
+					// these conflicts.
+					// Also this ensures that lexers look for more and more
+					// characters (longest match) before resorting to predicates.
+					// TestSemanticPredicates.testLexerMatchesLongestThenTestPred()
+					// for example would terminate at state s1 and test predicate
+					// meaning input "ab" would test preds to decide what to
+					// do but it should match rule C w/o testing preds.
+					if ( dfa.nfa.grammar.type!=Grammar.LEXER ||
+						 !dfa.decisionNFAStartState.enclosingRule.name.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) )
+					{
+						numPotentialConflicts++;
+						thisStateHasPotentialProblem = true;
+					}
+				}
+			}
+			if ( !thisStateHasPotentialProblem ) {
+				// remove NFA state's configurations from
+				// further checking; no issues with it
+				// (can't remove as it's concurrent modification; set to null)
+				stateToConfigListMap.put(stateI, null);
+			}
+		}
+
+		// a fast check for potential issues; most states have none
+		if ( numPotentialConflicts==0 ) {
+			return null;
+		}
+
+		// we have a potential problem, so now go through config lists again
+		// looking for different alts (only states with potential issues
+		// are left in the states set).  Now we will check context.
+		// For example, the list of configs for NFA state 3 in some DFA
+		// state might be:
+		//   [3|2|[28 18 $], 3|1|[28 $], 3|1, 3|2]
+		// I want to create a map from context to alts looking for overlap:
+		//   [28 18 $] -> 2
+		//   [28 $] -> 1
+		//   [$] -> 1,2
+		// Indeed a conflict exists as same state 3, same context [$], predicts
+		// alts 1 and 2.
+		// walk each state with potential conflicting configurations
+		for (Integer stateI : states) {
+			List<NFAConfiguration> configsForState = stateToConfigListMap.get(stateI);
+			// compare each configuration pair s, t to ensure:
+			// s.ctx different than t.ctx if s.alt != t.alt
+			int numConfigsForState = 0;
+			if ( configsForState!=null ) {
+				numConfigsForState = configsForState.size();
+			}
+			for (int i = 0; i < numConfigsForState; i++) {
+				NFAConfiguration s = configsForState.get(i);
+				for (int j = i+1; j < numConfigsForState; j++) {
+					NFAConfiguration t = configsForState.get(j);
+					// conflicts means s.ctx==t.ctx or s.ctx is a stack
+					// suffix of t.ctx or vice versa (if alts differ).
+					// Also a conflict if s.ctx or t.ctx is empty
+					if ( s.alt != t.alt && s.context.conflictsWith(t.context) ) {
+						nondeterministicAlts.add(Utils.integer(s.alt));
+						nondeterministicAlts.add(Utils.integer(t.alt));
+					}
+				}
+			}
+		}
+
+		if ( nondeterministicAlts.isEmpty() ) {
+			return null;
+		}
+        return nondeterministicAlts;
+    }
+
+	/** Get the set of all alts mentioned by all NFA configurations in this
+	 *  DFA state.
+	 */
+	public Set<Integer> getAltSet() {
+		int numConfigs = nfaConfigurations.size();
+		Set<Integer> alts = new HashSet<Integer>();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = nfaConfigurations.get(i);
+			alts.add(Utils.integer(configuration.alt));
+		}
+		if ( alts.isEmpty() ) {
+			return null;
+		}
+		return alts;
+	}
+
+	public Set<? extends SemanticContext> getGatedSyntacticPredicatesInNFAConfigurations() {
+		int numConfigs = nfaConfigurations.size();
+		Set<SemanticContext> synpreds = new HashSet<SemanticContext>();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = nfaConfigurations.get(i);
+			SemanticContext gatedPredExpr =
+				configuration.semanticContext.getGatedPredicateContext();
+			// if this is a manual syn pred (gated and syn pred), add
+			if ( gatedPredExpr!=null &&
+				 configuration.semanticContext.isSyntacticPredicate() )
+			{
+				synpreds.add(configuration.semanticContext);
+			}
+		}
+		if ( synpreds.isEmpty() ) {
+			return null;
+		}
+		return synpreds;
+	}
+
+	/** For gated productions, we need an OR'd list of all predicates for the
+	 *  target of an edge so we can gate the edge based upon the predicates
+	 *  associated with taking that path (if any).
+	 *
+	 *  For syntactic predicates, we only want to generate predicate
+	 *  evaluations as it transitions to an accept state; waste to
+	 *  do it earlier.  So, only add gated preds derived from manually-
+	 *  specified syntactic predicates if this is an accept state.
+	 *
+	 *  Also, since configurations w/o gated predicates are like true
+	 *  gated predicates, finding a configuration whose alt has no gated
+	 *  predicate implies we should evaluate the predicate to true. This
+	 *  means the whole edge has to be ungated. Consider:
+	 *
+	 *	 X : ('a' | {p}?=&gt; 'a')
+	 *	   | 'a' 'b'
+	 *	   ;
+	 *
+	 *  Here, you 'a' gets you from s0 to s1 but you can't test p because
+	 *  plain 'a' is ok.  It's also ok for starting alt 2.  Hence, you can't
+	 *  test p.  Even on the edge going to accept state for alt 1 of X, you
+	 *  can't test p.  You can get to the same place with and w/o the context.
+	 *  Therefore, it is never ok to test p in this situation. 
+	 *
+	 *  TODO: cache this as it's called a lot; or at least set bit if &gt;1 present in state
+	 */
+	public SemanticContext getGatedPredicatesInNFAConfigurations() {
+		SemanticContext unionOfPredicatesFromAllAlts = null;
+		int numConfigs = nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = nfaConfigurations.get(i);
+			SemanticContext gatedPredExpr =
+				configuration.semanticContext.getGatedPredicateContext();
+			if ( gatedPredExpr==null ) {
+				// if we ever find a configuration w/o a gated predicate
+				// (even if it's a nongated predicate), we cannot gate
+				// the indident edges.
+				return null;
+			}
+			else if ( acceptState || !configuration.semanticContext.isSyntacticPredicate() ) {
+				// at this point we have a gated predicate and, due to elseif,
+				// we know it's an accept or not a syn pred.  In this case,
+				// it's safe to add the gated predicate to the union.  We
+				// only want to add syn preds if it's an accept state.  Other
+				// gated preds can be used with edges leading to accept states.
+				if ( unionOfPredicatesFromAllAlts==null ) {
+					unionOfPredicatesFromAllAlts = gatedPredExpr;
+				}
+				else {
+					unionOfPredicatesFromAllAlts =
+						SemanticContext.or(unionOfPredicatesFromAllAlts,gatedPredExpr);
+				}
+			}
+		}
+		if ( unionOfPredicatesFromAllAlts instanceof SemanticContext.TruePredicate ) {
+			return null;
+		}
+		return unionOfPredicatesFromAllAlts;
+	}
+
+    /** Is an accept state reachable from this state? */
+    public int getAcceptStateReachable() {
+        return acceptStateReachable;
+    }
+
+    public void setAcceptStateReachable(int acceptStateReachable) {
+        this.acceptStateReachable = acceptStateReachable;
+    }
+
+    public boolean isResolvedWithPredicates() {
+        return resolvedWithPredicates;
+    }
+
+    /** Print all NFA states plus what alts they predict */
+	@Override
+    public String toString() {
+        StringBuilder buf = new StringBuilder();
+        buf.append(stateNumber).append(":{");
+		for (int i = 0; i < nfaConfigurations.size(); i++) {
+			NFAConfiguration configuration = nfaConfigurations.get(i);
+			if ( i>0 ) {
+				buf.append(", ");
+			}
+			buf.append(configuration);
+		}
+        buf.append("}");
+        return buf.toString();
+    }
+
+	public int getLookaheadDepth() {
+		return k;
+	}
+
+	public void setLookaheadDepth(int k) {
+		this.k = k;
+		if ( k > dfa.max_k ) { // track max k for entire DFA
+			dfa.max_k = k;
+		}
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/analysis/DecisionProbe.java b/tool/src/main/java/org/antlr/analysis/DecisionProbe.java
new file mode 100644
index 0000000..625728b
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/DecisionProbe.java
@@ -0,0 +1,909 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.misc.MultiMap;
+import org.antlr.misc.Utils;
+import org.antlr.runtime.Token;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarAST;
+
+import java.util.*;
+
+/** Collection of information about what is wrong with a decision as
+ *  discovered while building the DFA predictor.
+ *
+ *  The information is collected during NFA&rarr;DFA conversion and, while
+ *  some of this is available elsewhere, it is nice to have it all tracked
+ *  in one spot so a great error message can be easily had.  I also like
+ *  the fact that this object tracks it all for later perusing to make an
+ *  excellent error message instead of lots of imprecise on-the-fly warnings
+ *  (during conversion).
+ *
+ *  A decision normally only has one problem; e.g., some input sequence
+ *  can be matched by multiple alternatives.  Unfortunately, some decisions
+ *  such as
+ *
+ *  a : ( A | B ) | ( A | B ) | A ;
+ *
+ *  have multiple problems.  So in general, you should approach a decision
+ *  as having multiple flaws each one uniquely identified by a DFAState.
+ *  For example, statesWithSyntacticallyAmbiguousAltsSet tracks the set of
+ *  all DFAStates where ANTLR has discovered a problem.  Recall that a decision
+ *  is represented internall with a DFA comprised of multiple states, each of
+ *  which could potentially have problems.
+ *
+ *  Because of this, you need to iterate over this list of DFA states.  You'll
+ *  note that most of the informational methods like
+ *  getSampleNonDeterministicInputSequence() require a DFAState.  This state
+ *  will be one of the iterated states from stateToSyntacticallyAmbiguousAltsSet.
+ *
+ *  This class is not thread safe due to shared use of visited maps etc...
+ *  Only one thread should really need to access one DecisionProbe anyway.
+ */
+public class DecisionProbe {
+	public DFA dfa;
+
+	/** Track all DFA states with nondeterministic alternatives.
+	 *  By reaching the same DFA state, a path through the NFA for some input
+	 *  is able to reach the same NFA state by starting at more than one
+	 *  alternative's left edge.  Though, later, we may find that predicates
+	 *  resolve the issue, but track info anyway.
+	 *  Note that from the DFA state, you can ask for
+	 *  which alts are nondeterministic.
+	 */
+	protected Set<DFAState> statesWithSyntacticallyAmbiguousAltsSet = new HashSet<DFAState>();
+
+	/** Track just like stateToSyntacticallyAmbiguousAltsMap, but only
+	 *  for nondeterminisms that arise in the Tokens rule such as keyword vs
+	 *  ID rule.  The state maps to the list of Tokens rule alts that are
+	 *  in conflict.
+	 */
+	protected Map<DFAState, Set<Integer>> stateToSyntacticallyAmbiguousTokensRuleAltsMap =
+		new HashMap<DFAState, Set<Integer>>();
+
+	/** Was a syntactic ambiguity resolved with predicates?  Any DFA
+	 *  state that predicts more than one alternative, must be resolved
+	 *  with predicates or it should be reported to the user.
+	 */
+	protected Set<DFAState> statesResolvedWithSemanticPredicatesSet = new HashSet<DFAState>();
+
+	/** Track the predicates for each alt per DFA state;
+	 *  more than one DFA state might have syntactically ambig alt prediction.
+	 *  Maps DFA state to another map, mapping alt number to a
+	 *  SemanticContext (pred(s) to execute to resolve syntactic ambiguity).
+	 */
+	protected Map<DFAState, Map<Integer,SemanticContext>> stateToAltSetWithSemanticPredicatesMap =
+		new HashMap<DFAState, Map<Integer,SemanticContext>>();
+
+	/** Tracks alts insufficiently covered.
+	 *  For example, p1||true gets reduced to true and so leaves
+	 *  whole alt uncovered.  This maps DFA state to the set of alts
+	 */
+	protected Map<DFAState,Map<Integer, Set<Token>>> stateToIncompletelyCoveredAltsMap =
+		new HashMap<DFAState,Map<Integer, Set<Token>>>();
+
+	/** The set of states w/o emanating edges and w/o resolving sem preds. */
+	protected Set<DFAState> danglingStates = new HashSet<DFAState>();
+
+	/** The overall list of alts within the decision that have at least one
+	 *  conflicting input sequence.
+	 */
+	protected Set<Integer> altsWithProblem = new HashSet<Integer>();
+
+	/** If decision with &gt; 1 alt has recursion in &gt; 1 alt, it's (likely) nonregular
+	 *  lookahead.  The decision cannot be made with a DFA.
+	 *  the alts are stored in altsWithProblem.
+	 */
+	public boolean nonLLStarDecision = false;
+
+	/** Recursion is limited to a particular depth.  If that limit is exceeded
+	 *  the proposed new NFAConfiguration is recorded for the associated DFA state.
+	 */
+	protected MultiMap<Integer, NFAConfiguration> stateToRecursionOverflowConfigurationsMap =
+		new MultiMap<Integer, NFAConfiguration>();
+	/*
+	protected Map<Integer, List<NFAConfiguration>> stateToRecursionOverflowConfigurationsMap =
+		new HashMap<Integer, List<NFAConfiguration>>();
+		*/
+
+	/** Left recursion discovered.  The proposed new NFAConfiguration
+	 *  is recorded for the associated DFA state.
+	protected Map<Integer,List<NFAConfiguration>> stateToLeftRecursiveConfigurationsMap =
+		new HashMap<Integer,List<NFAConfiguration>>();
+	 */
+
+	/** Did ANTLR have to terminate early on the analysis of this decision? */
+	protected boolean timedOut = false;
+
+	/** Used to find paths through syntactically ambiguous DFA. If we've
+	 *  seen statement number before, what did we learn?
+	 */
+	protected Map<Integer, Integer> stateReachable;
+
+	public static final Integer REACHABLE_BUSY = Utils.integer(-1);
+	public static final Integer REACHABLE_NO = Utils.integer(0);
+	public static final Integer REACHABLE_YES = Utils.integer(1);
+
+	/** Used while finding a path through an NFA whose edge labels match
+	 *  an input sequence.  Tracks the input position
+	 *  we were at the last time at this node.  If same input position, then
+	 *  we'd have reached same state without consuming input...probably an
+	 *  infinite loop.  Stop.  Set&lt;String&gt;.  The strings look like
+	 *  stateNumber_labelIndex.
+	 */
+	protected Set<String> statesVisitedAtInputDepth;
+
+	protected Set<Integer> statesVisitedDuringSampleSequence;
+
+	public static boolean verbose = false;
+
+	public DecisionProbe(DFA dfa) {
+		this.dfa = dfa;
+	}
+
+	// I N F O R M A T I O N  A B O U T  D E C I S I O N
+
+	/** Return a string like "3:22: ( A {;} | B )" that describes this
+	 *  decision.
+	 */
+	public String getDescription() {
+		return dfa.getNFADecisionStartState().getDescription();
+	}
+
+	public boolean isReduced() {
+		return dfa.isReduced();
+	}
+
+	public boolean isCyclic() {
+		return dfa.isCyclic();
+	}
+
+	/** If no states are dead-ends, no alts are unreachable, there are
+	 *  no nondeterminisms unresolved by syn preds, all is ok with decision.
+	 */
+	public boolean isDeterministic() {
+		if ( danglingStates.isEmpty() &&
+			 statesWithSyntacticallyAmbiguousAltsSet.isEmpty() &&
+			 dfa.getUnreachableAlts().isEmpty() )
+		{
+			return true;
+		}
+
+		if ( statesWithSyntacticallyAmbiguousAltsSet.size()>0 ) {
+			for (DFAState d : statesWithSyntacticallyAmbiguousAltsSet) {
+				if ( !statesResolvedWithSemanticPredicatesSet.contains(d) ) {
+					return false;
+				}
+			}
+			// no syntactically ambig alts were left unresolved by predicates
+			return true;
+		}
+		return false;
+	}
+
+	/** Did the analysis complete it's work? */
+//	public boolean analysisTimedOut() {
+//		return timedOut;
+//	}
+
+	/** Took too long to analyze a DFA */
+	public boolean analysisOverflowed() {
+		return stateToRecursionOverflowConfigurationsMap.size()>0;
+	}
+
+	/** Found recursion in &gt; 1 alt */
+	public boolean isNonLLStarDecision() {
+		return nonLLStarDecision;
+	}
+
+	/** How many states does the DFA predictor have? */
+	public int getNumberOfStates() {
+		return dfa.getNumberOfStates();
+	}
+
+	/** Get a list of all unreachable alternatives for this decision.  There
+	 *  may be multiple alternatives with ambiguous input sequences, but this
+	 *  is the overall list of unreachable alternatives (either due to
+	 *  conflict resolution or alts w/o accept states).
+	 */
+	public List<Integer> getUnreachableAlts() {
+		return dfa.getUnreachableAlts();
+	}
+
+	/** return set of states w/o emanating edges and w/o resolving sem preds.
+	 *  These states come about because the analysis algorithm had to
+	 *  terminate early to avoid infinite recursion for example (due to
+	 *  left recursion perhaps).
+	 */
+	public Set<DFAState> getDanglingStates() {
+		return danglingStates;
+	}
+
+    public Set<Integer> getNonDeterministicAlts() {
+        return altsWithProblem;
+	}
+
+	/** Return the sorted list of alts that conflict within a single state.
+	 *  Note that predicates may resolve the conflict.
+	 */
+	public List<Integer> getNonDeterministicAltsForState(DFAState targetState) {
+		Set<Integer> nondetAlts = targetState.getNonDeterministicAlts();
+		if ( nondetAlts==null ) {
+			return null;
+		}
+		List<Integer> sorted = new LinkedList<Integer>();
+		sorted.addAll(nondetAlts);
+		Collections.sort(sorted); // make sure it's 1, 2, ...
+		return sorted;
+	}
+
+	/** Return all DFA states in this DFA that have NFA configurations that
+	 *  conflict.  You must report a problem for each state in this set
+	 *  because each state represents a different input sequence.
+	 */
+	public Set<DFAState> getDFAStatesWithSyntacticallyAmbiguousAlts() {
+		return statesWithSyntacticallyAmbiguousAltsSet;
+	}
+
+	/** Which alts were specifically turned off to resolve nondeterminisms?
+	 *  This is different than the unreachable alts.  Disabled doesn't mean that
+	 *  the alternative is totally unreachable necessarily, it just means
+	 *  that for this DFA state, that alt is disabled.  There may be other
+	 *  accept states for that alt that make an alt reachable.
+	 */
+	public Set<Integer> getDisabledAlternatives(DFAState d) {
+		return d.getDisabledAlternatives();
+	}
+
+	/** If a recursion overflow is resolve with predicates, then we need
+	 *  to shut off the warning that would be generated.
+	 */
+	public void removeRecursiveOverflowState(DFAState d) {
+		Integer stateI = Utils.integer(d.stateNumber);
+		stateToRecursionOverflowConfigurationsMap.remove(stateI);
+	}
+
+	/** Return a List&lt;Label&gt; indicating an input sequence that can be matched
+	 *  from the start state of the DFA to the targetState (which is known
+	 *  to have a problem).
+	 */
+	public List<Label> getSampleNonDeterministicInputSequence(DFAState targetState) {
+		Set<DFAState> dfaStates = getDFAPathStatesToTarget(targetState);
+		statesVisitedDuringSampleSequence = new HashSet<Integer>();
+		List<Label> labels = new ArrayList<Label>(); // may access ith element; use array
+		if ( dfa==null || dfa.startState==null ) {
+			return labels;
+		}
+		getSampleInputSequenceUsingStateSet(dfa.startState,
+											targetState,
+											dfaStates,
+											labels);
+		return labels;
+	}
+
+	/** Given List&lt;Label&gt;, return a String with a useful representation
+	 *  of the associated input string.  One could show something different
+	 *  for lexers and parsers, for example.
+	 */
+	public String getInputSequenceDisplay(List<? extends Label> labels) {
+        Grammar g = dfa.nfa.grammar;
+		StringBuilder buf = new StringBuilder();
+		for (Iterator<? extends Label> it = labels.iterator(); it.hasNext();) {
+			Label label = it.next();
+			buf.append(label.toString(g));
+			if ( it.hasNext() && g.type!=Grammar.LEXER ) {
+				buf.append(' ');
+			}
+		}
+		return buf.toString();
+	}
+
+    /** Given an alternative associated with a nondeterministic DFA state,
+	 *  find the path of NFA states associated with the labels sequence.
+	 *  Useful tracing where in the NFA, a single input sequence can be
+	 *  matched.  For different alts, you should get different NFA paths.
+	 *
+	 *  The first NFA state for all NFA paths will be the same: the starting
+	 *  NFA state of the first nondeterministic alt.  Imagine (A|B|A|A):
+	 *
+	 * 	5-&gt;9-A-&gt;o
+	 *  |
+	 *  6-&gt;10-B-&gt;o
+	 *  |
+	 *  7-&gt;11-A-&gt;o
+	 *  |
+	 *  8-&gt;12-A-&gt;o
+	 *
+	 *  There are 3 nondeterministic alts.  The paths should be:
+	 *  5 9 ...
+	 *  5 6 7 11 ...
+	 *  5 6 7 8 12 ...
+	 *
+	 *  The NFA path matching the sample input sequence (labels) is computed
+	 *  using states 9, 11, and 12 rather than 5, 7, 8 because state 5, for
+	 *  example can get to all ambig paths.  Must isolate for each alt (hence,
+	 *  the extra state beginning each alt in my NFA structures).  Here,
+	 *  firstAlt=1.
+	 */
+	public List<? extends NFAState> getNFAPathStatesForAlt(int firstAlt,
+									   int alt,
+									   List<? extends Label> labels)
+	{
+		NFAState nfaStart = dfa.getNFADecisionStartState();
+		List<NFAState> path = new LinkedList<NFAState>();
+		// first add all NFA states leading up to altStart state
+		for (int a=firstAlt; a<=alt; a++) {
+			NFAState s =
+				dfa.nfa.grammar.getNFAStateForAltOfDecision(nfaStart,a);
+			path.add(s);
+		}
+
+		// add first state of actual alt
+		NFAState altStart = dfa.nfa.grammar.getNFAStateForAltOfDecision(nfaStart,alt);
+		NFAState isolatedAltStart = (NFAState)altStart.transition[0].target;
+		path.add(isolatedAltStart);
+
+		// add the actual path now
+		statesVisitedAtInputDepth = new HashSet<String>();
+		getNFAPath(isolatedAltStart,
+				   0,
+				   labels,
+				   path);
+        return path;
+	}
+
+	/** Each state in the DFA represents a different input sequence for an
+	 *  alt of the decision.  Given a DFA state, what is the semantic
+	 *  predicate context for a particular alt.
+	 */
+    public SemanticContext getSemanticContextForAlt(DFAState d, int alt) {
+		Map<Integer, SemanticContext> altToPredMap = stateToAltSetWithSemanticPredicatesMap.get(d);
+		if ( altToPredMap==null ) {
+			return null;
+		}
+		return altToPredMap.get(Utils.integer(alt));
+	}
+
+	/** At least one alt refs a sem or syn pred */
+	public boolean hasPredicate() {
+		return stateToAltSetWithSemanticPredicatesMap.size()>0;
+	}
+
+	public Set<DFAState> getNondeterministicStatesResolvedWithSemanticPredicate() {
+		return statesResolvedWithSemanticPredicatesSet;
+	}
+
+	/** Return a list of alts whose predicate context was insufficient to
+	 *  resolve a nondeterminism for state d.
+	 */
+	public Map<Integer, Set<Token>> getIncompletelyCoveredAlts(DFAState d) {
+		return stateToIncompletelyCoveredAltsMap.get(d);
+	}
+
+	public void issueWarnings() {
+		// NONREGULAR DUE TO RECURSION > 1 ALTS
+		// Issue this before aborted analysis, which might also occur
+		// if we take too long to terminate
+		if ( nonLLStarDecision && !dfa.getAutoBacktrackMode() ) {
+			ErrorManager.nonLLStarDecision(this);
+		}
+
+		issueRecursionWarnings();
+
+		// generate a separate message for each problem state in DFA
+		Set<DFAState> resolvedStates = getNondeterministicStatesResolvedWithSemanticPredicate();
+		Set<DFAState> problemStates = getDFAStatesWithSyntacticallyAmbiguousAlts();
+		if ( problemStates.size()>0 ) {
+			Iterator<DFAState> it =
+				problemStates.iterator();
+			while (	it.hasNext() && !dfa.nfa.grammar.NFAToDFAConversionExternallyAborted() ) {
+				DFAState d = it.next();
+				Map<Integer, Set<Token>> insufficientAltToLocations = getIncompletelyCoveredAlts(d);
+				if ( insufficientAltToLocations!=null && insufficientAltToLocations.size()>0 ) {
+					ErrorManager.insufficientPredicates(this,d,insufficientAltToLocations);
+				}
+				// don't report problem if resolved
+				if ( resolvedStates==null || !resolvedStates.contains(d) ) {
+					// first strip last alt from disableAlts if it's wildcard
+					// then don't print error if no more disable alts
+					Set<Integer> disabledAlts = getDisabledAlternatives(d);
+					stripWildCardAlts(disabledAlts);
+					if ( disabledAlts.size()>0 ) {
+						// nondeterminism; same input predicts multiple alts.
+						// but don't emit error if greedy=true explicitly set
+						boolean explicitlyGreedy = false;
+						GrammarAST blockAST =
+							d.dfa.nfa.grammar.getDecisionBlockAST(d.dfa.decisionNumber);
+						if ( blockAST!=null ) {
+							String greedyS = (String)blockAST.getBlockOption("greedy");
+							if ( greedyS!=null && greedyS.equals("true") ) explicitlyGreedy = true;
+						}
+						if ( !explicitlyGreedy) ErrorManager.nondeterminism(this,d);
+					}
+				}
+			}
+		}
+
+		Set<DFAState> danglingStates = getDanglingStates();
+		if ( danglingStates.size()>0 ) {
+			//System.err.println("no emanating edges for states: "+danglingStates);
+			for (DFAState d : danglingStates) {
+				ErrorManager.danglingState(this,d);
+			}
+		}
+
+		if ( !nonLLStarDecision ) {
+			List<Integer> unreachableAlts = dfa.getUnreachableAlts();
+			if ( unreachableAlts!=null && unreachableAlts.size()>0 ) {
+				// give different msg if it's an empty Tokens rule from delegate
+				boolean isInheritedTokensRule = false;
+				if ( dfa.isTokensRuleDecision() ) {
+					for (Integer altI : unreachableAlts) {
+						GrammarAST decAST = dfa.getDecisionASTNode();
+						GrammarAST altAST = (GrammarAST)decAST.getChild(altI-1);
+						GrammarAST delegatedTokensAlt =
+							(GrammarAST)altAST.getFirstChildWithType(ANTLRParser.DOT);
+						if ( delegatedTokensAlt !=null ) {
+							isInheritedTokensRule = true;
+							ErrorManager.grammarWarning(ErrorManager.MSG_IMPORTED_TOKENS_RULE_EMPTY,
+														dfa.nfa.grammar,
+														null,
+														dfa.nfa.grammar.name,
+														delegatedTokensAlt.getChild(0).getText());
+						}
+					}
+				}
+				if ( isInheritedTokensRule ) {
+				}
+				else {
+					ErrorManager.unreachableAlts(this,unreachableAlts);
+				}
+			}
+		}
+	}
+
+	/** Get the last disabled alt number and check in the grammar to see
+	 *  if that alt is a simple wildcard.  If so, treat like an else clause
+	 *  and don't emit the error.  Strip out the last alt if it's wildcard.
+	 */
+	protected void stripWildCardAlts(Set<Integer> disabledAlts) {
+		List<Integer> sortedDisableAlts = new ArrayList<Integer>(disabledAlts);
+		Collections.sort(sortedDisableAlts);
+		Integer lastAlt =
+			sortedDisableAlts.get(sortedDisableAlts.size()-1);
+		GrammarAST blockAST =
+			dfa.nfa.grammar.getDecisionBlockAST(dfa.decisionNumber);
+		//System.out.println("block with error = "+blockAST.toStringTree());
+		GrammarAST lastAltAST;
+		if ( blockAST.getChild(0).getType()==ANTLRParser.OPTIONS ) {
+			// if options, skip first child: ( options { ( = greedy false ) )
+			lastAltAST = (GrammarAST)blockAST.getChild(lastAlt.intValue());
+		}
+		else {
+			lastAltAST = (GrammarAST)blockAST.getChild(lastAlt -1);
+		}
+		//System.out.println("last alt is "+lastAltAST.toStringTree());
+		// if last alt looks like ( ALT . <end-of-alt> ) then wildcard
+		// Avoid looking at optional blocks etc... that have last alt
+		// as the EOB:
+		// ( BLOCK ( ALT 'else' statement <end-of-alt> ) <end-of-block> )
+		if ( lastAltAST.getType()!=ANTLRParser.EOB &&
+			 lastAltAST.getChild(0).getType()== ANTLRParser.WILDCARD &&
+			 lastAltAST.getChild(1).getType()== ANTLRParser.EOA )
+		{
+			//System.out.println("wildcard");
+			disabledAlts.remove(lastAlt);
+		}
+	}
+
+	protected void issueRecursionWarnings() {
+		// RECURSION OVERFLOW
+		Set<Integer> dfaStatesWithRecursionProblems =
+			stateToRecursionOverflowConfigurationsMap.keySet();
+		// now walk truly unique (unaliased) list of dfa states with inf recur
+		// Goal: create a map from alt to map<target,List<callsites>>
+		// Map<Map<String target, List<NFAState call sites>>
+		Map<Integer, Map<String, Set<NFAState>>> altToTargetToCallSitesMap =
+			new HashMap<Integer, Map<String, Set<NFAState>>>();
+		// track a single problem DFA state for each alt
+		Map<Integer, DFAState> altToDFAState = new HashMap<Integer, DFAState>();
+		computeAltToProblemMaps(dfaStatesWithRecursionProblems,
+								stateToRecursionOverflowConfigurationsMap,
+								altToTargetToCallSitesMap, // output param
+								altToDFAState);            // output param
+
+		// walk each alt with recursion overflow problems and generate error
+		Set<Integer> alts = altToTargetToCallSitesMap.keySet();
+		List<Integer> sortedAlts = new ArrayList<Integer>(alts);
+		Collections.sort(sortedAlts);
+		for (Integer altI : sortedAlts) {
+			Map<String, Set<NFAState>> targetToCallSiteMap =
+				altToTargetToCallSitesMap.get(altI);
+			Set<String> targetRules = targetToCallSiteMap.keySet();
+			Collection<Set<NFAState>> callSiteStates = targetToCallSiteMap.values();
+			DFAState sampleBadState = altToDFAState.get(altI);
+			ErrorManager.recursionOverflow(this,
+										   sampleBadState,
+										   altI,
+										   targetRules,
+										   callSiteStates);
+		}
+	}
+
+	private void computeAltToProblemMaps(Set<Integer> dfaStatesUnaliased,
+										 Map<Integer, List<NFAConfiguration>> configurationsMap,
+										 Map<Integer, Map<String, Set<NFAState>>> altToTargetToCallSitesMap,
+										 Map<Integer, DFAState> altToDFAState)
+	{
+		for (Integer stateI : dfaStatesUnaliased) {
+			// walk this DFA's config list
+			List<? extends NFAConfiguration> configs = configurationsMap.get(stateI);
+			for (int i = 0; i < configs.size(); i++) {
+				NFAConfiguration c = configs.get(i);
+				NFAState ruleInvocationState = dfa.nfa.getState(c.state);
+				Transition transition0 = ruleInvocationState.transition[0];
+				RuleClosureTransition ref = (RuleClosureTransition)transition0;
+				String targetRule = ((NFAState) ref.target).enclosingRule.name;
+				Integer altI = Utils.integer(c.alt);
+				Map<String, Set<NFAState>> targetToCallSiteMap =
+					altToTargetToCallSitesMap.get(altI);
+				if ( targetToCallSiteMap==null ) {
+					targetToCallSiteMap = new HashMap<String, Set<NFAState>>();
+					altToTargetToCallSitesMap.put(altI, targetToCallSiteMap);
+				}
+				Set<NFAState> callSites =
+					targetToCallSiteMap.get(targetRule);
+				if ( callSites==null ) {
+					callSites = new HashSet<NFAState>();
+					targetToCallSiteMap.put(targetRule, callSites);
+				}
+				callSites.add(ruleInvocationState);
+				// track one problem DFA state per alt
+				if ( altToDFAState.get(altI)==null ) {
+					DFAState sampleBadState = dfa.getState(stateI);
+					altToDFAState.put(altI, sampleBadState);
+				}
+			}
+		}
+	}
+
+	private Set<Integer> getUnaliasedDFAStateSet(Set<Integer> dfaStatesWithRecursionProblems) {
+		Set<Integer> dfaStatesUnaliased = new HashSet<Integer>();
+		for (Integer stateI : dfaStatesWithRecursionProblems) {
+			DFAState d = dfa.getState(stateI);
+			dfaStatesUnaliased.add(Utils.integer(d.stateNumber));
+		}
+		return dfaStatesUnaliased;
+	}
+
+
+	// T R A C K I N G  M E T H O D S
+
+    /** Report the fact that DFA state d is not a state resolved with
+     *  predicates and yet it has no emanating edges.  Usually this
+     *  is a result of the closure/reach operations being unable to proceed
+     */
+	public void reportDanglingState(DFAState d) {
+		danglingStates.add(d);
+	}
+
+//	public void reportAnalysisTimeout() {
+//		timedOut = true;
+//		dfa.nfa.grammar.setOfDFAWhoseAnalysisTimedOut.add(dfa);
+//	}
+
+	/** Report that at least 2 alts have recursive constructs.  There is
+	 *  no way to build a DFA so we terminated.
+	 */
+	public void reportNonLLStarDecision(DFA dfa) {
+		/*
+		System.out.println("non-LL(*) DFA "+dfa.decisionNumber+", alts: "+
+						   dfa.recursiveAltSet.toList());
+						   */
+		nonLLStarDecision = true;
+		dfa.nfa.grammar.numNonLLStar++;
+		altsWithProblem.addAll(dfa.recursiveAltSet.toList());
+	}
+
+	public void reportRecursionOverflow(DFAState d,
+										NFAConfiguration recursionNFAConfiguration)
+	{
+		// track the state number rather than the state as d will change
+		// out from underneath us; hash wouldn't return any value
+
+		// left-recursion is detected in start state.  Since we can't
+		// call resolveNondeterminism() on the start state (it would
+		// not look k=1 to get min single token lookahead), we must
+		// prevent errors derived from this state.  Avoid start state
+		if ( d.stateNumber > 0 ) {
+			Integer stateI = Utils.integer(d.stateNumber);
+			stateToRecursionOverflowConfigurationsMap.map(stateI, recursionNFAConfiguration);
+		}
+	}
+
+	public void reportNondeterminism(DFAState d, Set<Integer> nondeterministicAlts) {
+		altsWithProblem.addAll(nondeterministicAlts); // track overall list
+		statesWithSyntacticallyAmbiguousAltsSet.add(d);
+		dfa.nfa.grammar.setOfNondeterministicDecisionNumbers.add(
+			Utils.integer(dfa.getDecisionNumber())
+		);
+	}
+
+	/** Currently the analysis reports issues between token definitions, but
+	 *  we don't print out warnings in favor of just picking the first token
+	 *  definition found in the grammar ala lex/flex.
+	 */
+	public void reportLexerRuleNondeterminism(DFAState d, Set<Integer> nondeterministicAlts) {
+		stateToSyntacticallyAmbiguousTokensRuleAltsMap.put(d,nondeterministicAlts);
+	}
+
+	public void reportNondeterminismResolvedWithSemanticPredicate(DFAState d) {
+		// First, prevent a recursion warning on this state due to
+		// pred resolution
+		if ( d.abortedDueToRecursionOverflow ) {
+			d.dfa.probe.removeRecursiveOverflowState(d);
+		}
+		statesResolvedWithSemanticPredicatesSet.add(d);
+		//System.out.println("resolved with pred: "+d);
+		dfa.nfa.grammar.setOfNondeterministicDecisionNumbersResolvedWithPredicates.add(
+			Utils.integer(dfa.getDecisionNumber())
+		);
+	}
+
+	/** Report the list of predicates found for each alternative; copy
+	 *  the list because this set gets altered later by the method
+	 *  tryToResolveWithSemanticPredicates() while flagging NFA configurations
+	 *  in d as resolved.
+	 */
+	public void reportAltPredicateContext(DFAState d, Map<Integer, ? extends SemanticContext> altPredicateContext) {
+		Map<Integer, SemanticContext> copy = new HashMap<Integer, SemanticContext>();
+		copy.putAll(altPredicateContext);
+		stateToAltSetWithSemanticPredicatesMap.put(d,copy);
+	}
+
+	public void reportIncompletelyCoveredAlts(DFAState d,
+											  Map<Integer, Set<Token>> altToLocationsReachableWithoutPredicate)
+	{
+		stateToIncompletelyCoveredAltsMap.put(d, altToLocationsReachableWithoutPredicate);
+	}
+
+	// S U P P O R T
+
+	/** Given a start state and a target state, return true if start can reach
+	 *  target state.  Also, compute the set of DFA states
+	 *  that are on a path from start to target; return in states parameter.
+	 */
+	protected boolean reachesState(DFAState startState,
+								   DFAState targetState,
+								   Set<DFAState> states) {
+		if ( startState==targetState ) {
+			states.add(targetState);
+			//System.out.println("found target DFA state "+targetState.getStateNumber());
+			stateReachable.put(startState.stateNumber, REACHABLE_YES);
+			return true;
+		}
+
+		DFAState s = startState;
+		// avoid infinite loops
+		stateReachable.put(s.stateNumber, REACHABLE_BUSY);
+
+		// look for a path to targetState among transitions for this state
+		// stop when you find the first one; I'm pretty sure there is
+		// at most one path to any DFA state with conflicting predictions
+		for (int i=0; i<s.getNumberOfTransitions(); i++) {
+			Transition t = s.transition(i);
+			DFAState edgeTarget = (DFAState)t.target;
+			Integer targetStatus = stateReachable.get(edgeTarget.stateNumber);
+			if ( targetStatus==REACHABLE_BUSY ) { // avoid cycles; they say nothing
+				continue;
+			}
+			if ( targetStatus==REACHABLE_YES ) { // return success!
+				stateReachable.put(s.stateNumber, REACHABLE_YES);
+				return true;
+			}
+			if ( targetStatus==REACHABLE_NO ) { // try another transition
+				continue;
+			}
+			// if null, target must be REACHABLE_UNKNOWN (i.e., unvisited)
+			if ( reachesState(edgeTarget, targetState, states) ) {
+				states.add(s);
+				stateReachable.put(s.stateNumber, REACHABLE_YES);
+				return true;
+			}
+		}
+
+		stateReachable.put(s.stateNumber, REACHABLE_NO);
+		return false; // no path to targetState found.
+	}
+
+	protected Set<DFAState> getDFAPathStatesToTarget(DFAState targetState) {
+		Set<DFAState> dfaStates = new HashSet<DFAState>();
+		stateReachable = new HashMap<Integer, Integer>();
+		if ( dfa==null || dfa.startState==null ) {
+			return dfaStates;
+		}
+		boolean reaches = reachesState(dfa.startState, targetState, dfaStates);
+		return dfaStates;
+	}
+
+	/** Given a start state and a final state, find a list of edge labels
+	 *  between the two ignoring epsilon.  Limit your scan to a set of states
+	 *  passed in.  This is used to show a sample input sequence that is
+	 *  nondeterministic with respect to this decision.  Return List&lt;Label&gt; as
+	 *  a parameter.  The incoming states set must be all states that lead
+	 *  from startState to targetState and no others so this algorithm doesn't
+	 *  take a path that eventually leads to a state other than targetState.
+	 *  Don't follow loops, leading to short (possibly shortest) path.
+	 */
+	protected void getSampleInputSequenceUsingStateSet(State startState,
+													   State targetState,
+													   Set<DFAState> states,
+													   List<Label> labels)
+	{
+		statesVisitedDuringSampleSequence.add(startState.stateNumber);
+
+		// pick the first edge in states as the one to traverse
+		for (int i=0; i<startState.getNumberOfTransitions(); i++) {
+			Transition t = startState.transition(i);
+			DFAState edgeTarget = (DFAState)t.target;
+			if ( states.contains(edgeTarget) &&
+				 !statesVisitedDuringSampleSequence.contains(edgeTarget.stateNumber) )
+			{
+				labels.add(t.label); // traverse edge and track label
+				if ( edgeTarget!=targetState ) {
+					// get more labels if not at target
+					getSampleInputSequenceUsingStateSet(edgeTarget,
+														targetState,
+														states,
+														labels);
+				}
+				// done with this DFA state as we've found a good path to target
+				return;
+			}
+		}
+		labels.add(new Label(Label.EPSILON)); // indicate no input found
+		// this happens on a : {p1}? a | A ;
+		//ErrorManager.error(ErrorManager.MSG_CANNOT_COMPUTE_SAMPLE_INPUT_SEQ);
+	}
+
+	/** Given a sample input sequence, you usually would like to know the
+	 *  path taken through the NFA.  Return the list of NFA states visited
+	 *  while matching a list of labels.  This cannot use the usual
+	 *  interpreter, which does a deterministic walk.  We need to be able to
+	 *  take paths that are turned off during nondeterminism resolution. So,
+	 *  just do a depth-first walk traversing edges labeled with the current
+	 *  label.  Return true if a path was found emanating from state s.
+	 */
+	protected boolean getNFAPath(NFAState s,     // starting where?
+								 int labelIndex, // 0..labels.size()-1
+								 List<? extends Label> labels,    // input sequence
+								 List<? super NFAState> path)      // output list of NFA states
+	{
+		// track a visit to state s at input index labelIndex if not seen
+		String thisStateKey = getStateLabelIndexKey(s.stateNumber,labelIndex);
+		if ( statesVisitedAtInputDepth.contains(thisStateKey) ) {
+			/*
+			System.out.println("### already visited "+s.stateNumber+" previously at index "+
+						   labelIndex);
+			*/
+			return false;
+		}
+		statesVisitedAtInputDepth.add(thisStateKey);
+
+		/*
+		System.out.println("enter state "+s.stateNumber+" visited states: "+
+						   statesVisitedAtInputDepth);
+        */
+
+		// pick the first edge whose target is in states and whose
+		// label is labels[labelIndex]
+		for (int i=0; i<s.getNumberOfTransitions(); i++) {
+			Transition t = s.transition[i];
+			NFAState edgeTarget = (NFAState)t.target;
+			Label label = (Label)labels.get(labelIndex);
+			/*
+			System.out.println(s.stateNumber+"-"+
+							   t.label.toString(dfa.nfa.grammar)+"->"+
+							   edgeTarget.stateNumber+" =="+
+							   label.toString(dfa.nfa.grammar)+"?");
+			*/
+			if ( t.label.isEpsilon() || t.label.isSemanticPredicate() ) {
+				// nondeterministically backtrack down epsilon edges
+				path.add(edgeTarget);
+				boolean found =
+					getNFAPath(edgeTarget, labelIndex, labels, path);
+				if ( found ) {
+					statesVisitedAtInputDepth.remove(thisStateKey);
+					return true; // return to "calling" state
+				}
+				path.remove(path.size()-1); // remove; didn't work out
+				continue; // look at the next edge
+			}
+			if ( t.label.matches(label) ) {
+				path.add(edgeTarget);
+				/*
+				System.out.println("found label "+
+								   t.label.toString(dfa.nfa.grammar)+
+								   " at state "+s.stateNumber+"; labelIndex="+labelIndex);
+				*/
+				if ( labelIndex==labels.size()-1 ) {
+					// found last label; done!
+					statesVisitedAtInputDepth.remove(thisStateKey);
+					return true;
+				}
+				// otherwise try to match remaining input
+				boolean found =
+					getNFAPath(edgeTarget, labelIndex+1, labels, path);
+				if ( found ) {
+					statesVisitedAtInputDepth.remove(thisStateKey);
+					return true;
+				}
+				/*
+				System.out.println("backtrack; path from "+s.stateNumber+"->"+
+								   t.label.toString(dfa.nfa.grammar)+" didn't work");
+				*/
+				path.remove(path.size()-1); // remove; didn't work out
+				continue; // keep looking for a path for labels
+			}
+		}
+		//System.out.println("no epsilon or matching edge; removing "+thisStateKey);
+		// no edge was found matching label; is ok, some state will have it
+		statesVisitedAtInputDepth.remove(thisStateKey);
+		return false;
+	}
+
+	protected String getStateLabelIndexKey(int s, int i) {
+		StringBuilder buf = new StringBuilder();
+		buf.append(s);
+		buf.append('_');
+		buf.append(i);
+		return buf.toString();
+	}
+
+	/** From an alt number associated with artificial Tokens rule, return
+	 *  the name of the token that is associated with that alt.
+	 */ 
+	public String getTokenNameForTokensRuleAlt(int alt) {
+		NFAState decisionState = dfa.getNFADecisionStartState();
+		NFAState altState =
+			dfa.nfa.grammar.getNFAStateForAltOfDecision(decisionState,alt);
+		NFAState decisionLeft = (NFAState)altState.transition[0].target;
+		RuleClosureTransition ruleCallEdge =
+			(RuleClosureTransition)decisionLeft.transition[0];
+		NFAState ruleStartState = (NFAState)ruleCallEdge.target;
+		//System.out.println("alt = "+decisionLeft.getEnclosingRule());
+		return ruleStartState.enclosingRule.name;
+	}
+
+	public void reset() {
+		stateToRecursionOverflowConfigurationsMap.clear();
+	}
+}
diff --git a/tool/src/main/java/org/antlr/analysis/LL1Analyzer.java b/tool/src/main/java/org/antlr/analysis/LL1Analyzer.java
new file mode 100644
index 0000000..b5dd463
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/LL1Analyzer.java
@@ -0,0 +1,449 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.misc.IntSet;
+import org.antlr.misc.IntervalSet;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.Rule;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Created by IntelliJ IDEA.
+ * User: parrt
+ * Date: Dec 31, 2007
+ * Time: 1:31:16 PM
+ * To change this template use File | Settings | File Templates.
+ */
+public class LL1Analyzer {
+	/**	0	if we hit end of rule and invoker should keep going (epsilon) */
+	public static final int DETECT_PRED_EOR = 0;
+	/**	1	if we found a nonautobacktracking pred */
+	public static final int DETECT_PRED_FOUND = 1;
+	/**	2	if we didn't find such a pred */
+	public static final int DETECT_PRED_NOT_FOUND = 2;
+
+	public Grammar grammar;
+
+	/** Used during LOOK to detect computation cycles */
+	protected Set<NFAState> lookBusy = new HashSet<NFAState>();
+
+	public Map<NFAState, LookaheadSet> FIRSTCache = new HashMap<NFAState, LookaheadSet>();
+	public Map<Rule, LookaheadSet> FOLLOWCache = new HashMap<Rule, LookaheadSet>();
+
+	public LL1Analyzer(Grammar grammar) {
+		this.grammar = grammar;
+	}
+
+	/*
+	public void computeRuleFIRSTSets() {
+		if ( getNumberOfDecisions()==0 ) {
+			createNFAs();
+		}
+		for (Iterator it = getRules().iterator(); it.hasNext();) {
+			Rule r = (Rule)it.next();
+			if ( r.isSynPred ) {
+				continue;
+			}
+			LookaheadSet s = FIRST(r);
+			System.out.println("FIRST("+r.name+")="+s);
+		}
+	}
+	*/
+
+	/*
+	public Set<String> getOverriddenRulesWithDifferentFIRST() {
+		// walk every rule in this grammar and compare FIRST set with
+		// those in imported grammars.
+		Set<String> rules = new HashSet();
+		for (Iterator it = getRules().iterator(); it.hasNext();) {
+			Rule r = (Rule)it.next();
+			//System.out.println(r.name+" FIRST="+r.FIRST);
+			for (int i = 0; i < delegates.size(); i++) {
+				Grammar g = delegates.get(i);
+				Rule importedRule = g.getRule(r.name);
+				if ( importedRule != null ) { // exists in imported grammar
+					// System.out.println(r.name+" exists in imported grammar: FIRST="+importedRule.FIRST);
+					if ( !r.FIRST.equals(importedRule.FIRST) ) {
+						rules.add(r.name);
+					}
+				}
+			}
+		}
+		return rules;
+	}
+
+	public Set<Rule> getImportedRulesSensitiveToOverriddenRulesDueToLOOK() {
+		Set<String> diffFIRSTs = getOverriddenRulesWithDifferentFIRST();
+		Set<Rule> rules = new HashSet();
+		for (Iterator it = diffFIRSTs.iterator(); it.hasNext();) {
+			String r = (String) it.next();
+			for (int i = 0; i < delegates.size(); i++) {
+				Grammar g = delegates.get(i);
+				Set<Rule> callers = g.ruleSensitivity.get(r);
+				// somebody invokes rule whose FIRST changed in subgrammar?
+				if ( callers!=null ) {
+					rules.addAll(callers);
+					//System.out.println(g.name+" rules "+callers+" sensitive to "+r+"; dup 'em");
+				}
+			}
+		}
+		return rules;
+	}
+*/
+
+	/*
+	public LookaheadSet LOOK(Rule r) {
+		if ( r.FIRST==null ) {
+			r.FIRST = FIRST(r.startState);
+		}
+		return r.FIRST;
+	}
+*/
+
+	/** From an NFA state, s, find the set of all labels reachable from s.
+	 *  Used to compute follow sets for error recovery.  Never computes
+	 *  a FOLLOW operation.  FIRST stops at end of rules, returning EOR, unless
+	 *  invoked from another rule.  I.e., routine properly handles
+	 *
+	 *     a : b A ;
+	 *
+	 *  where b is nullable.
+	 *
+	 *  We record with EOR_TOKEN_TYPE if we hit the end of a rule so we can
+	 *  know at runtime (when these sets are used) to start walking up the
+	 *  follow chain to compute the real, correct follow set (as opposed to
+	 *  the FOLLOW, which is a superset).
+	 *
+	 *  This routine will only be used on parser and tree parser grammars.
+	 */
+	public LookaheadSet FIRST(NFAState s) {
+		//System.out.println("> FIRST("+s.enclosingRule.name+") in rule "+s.enclosingRule);
+		lookBusy.clear();
+		LookaheadSet look = _FIRST(s, false);
+		//System.out.println("< FIRST("+s.enclosingRule.name+") in rule "+s.enclosingRule+"="+look.toString(this.grammar));
+		return look;
+	}
+
+	public LookaheadSet FOLLOW(Rule r) {
+        //System.out.println("> FOLLOW("+r.name+") in rule "+r.startState.enclosingRule);
+		LookaheadSet f = FOLLOWCache.get(r);
+		if ( f!=null ) {
+			return f;
+		}
+		f = _FIRST(r.stopState, true);
+		FOLLOWCache.put(r, f);
+        //System.out.println("< FOLLOW("+r+") in rule "+r.startState.enclosingRule+"="+f.toString(this.grammar));
+		return f;
+	}
+
+	public LookaheadSet LOOK(NFAState s) {
+		if ( NFAToDFAConverter.debug ) {
+			System.out.println("> LOOK("+s+")");
+		}
+		lookBusy.clear();
+		LookaheadSet look = _FIRST(s, true);
+		// FOLLOW makes no sense (at the moment!) for lexical rules.
+		if ( grammar.type!=Grammar.LEXER && look.member(Label.EOR_TOKEN_TYPE) ) {
+			// avoid altering FIRST reset as it is cached
+			LookaheadSet f = FOLLOW(s.enclosingRule);
+			f.orInPlace(look);
+			f.remove(Label.EOR_TOKEN_TYPE);
+			look = f;
+			//look.orInPlace(FOLLOW(s.enclosingRule));
+		}
+		else if ( grammar.type==Grammar.LEXER && look.member(Label.EOT) ) {
+			// if this has EOT, lookahead is all char (all char can follow rule)
+			//look = new LookaheadSet(Label.EOT);
+			look = new LookaheadSet(IntervalSet.COMPLETE_SET);
+		}
+		if ( NFAToDFAConverter.debug ) {
+			System.out.println("< LOOK("+s+")="+look.toString(grammar));
+		}
+		return look;
+	}
+
+	protected LookaheadSet _FIRST(NFAState s, boolean chaseFollowTransitions) {
+		/*
+		System.out.println("_LOOK("+s+") in rule "+s.enclosingRule);
+		if ( s.transition[0] instanceof RuleClosureTransition ) {
+			System.out.println("go to rule "+((NFAState)s.transition[0].target).enclosingRule);
+		}
+		*/
+		if ( !chaseFollowTransitions && s.isAcceptState() ) {
+			if ( grammar.type==Grammar.LEXER ) {
+				// FOLLOW makes no sense (at the moment!) for lexical rules.
+				// assume all char can follow
+				return new LookaheadSet(IntervalSet.COMPLETE_SET);
+			}
+			return new LookaheadSet(Label.EOR_TOKEN_TYPE);
+		}
+
+		if ( lookBusy.contains(s) ) {
+			// return a copy of an empty set; we may modify set inline
+			return new LookaheadSet();
+		}
+		lookBusy.add(s);
+
+		Transition transition0 = s.transition[0];
+		if ( transition0==null ) {
+			return null;
+		}
+
+		if ( transition0.label.isAtom() ) {
+			int atom = transition0.label.getAtom();
+			return new LookaheadSet(atom);
+		}
+		if ( transition0.label.isSet() ) {
+			IntSet sl = transition0.label.getSet();
+			return new LookaheadSet(sl);
+		}
+
+		// compute FIRST of transition 0
+		LookaheadSet tset = null;
+		// if transition 0 is a rule call and we don't want FOLLOW, check cache
+		if ( !chaseFollowTransitions && transition0 instanceof RuleClosureTransition ) {
+			tset = FIRSTCache.get((NFAState)transition0.target);
+		}
+
+		// if not in cache, must compute
+		if ( tset==null ) {
+			tset = _FIRST((NFAState)transition0.target, chaseFollowTransitions);
+			// save FIRST cache for transition 0 if rule call
+			if ( !chaseFollowTransitions && transition0 instanceof RuleClosureTransition ) {
+				FIRSTCache.put((NFAState)transition0.target, tset);
+			}
+		}
+
+        LookaheadSet tsetCached = tset; // tset is stored in cache. We can't return the same instance
+
+		// did we fall off the end?
+		if ( grammar.type!=Grammar.LEXER && tset.member(Label.EOR_TOKEN_TYPE) ) {
+			if ( transition0 instanceof RuleClosureTransition ) {
+				// we called a rule that found the end of the rule.
+				// That means the rule is nullable and we need to
+				// keep looking at what follows the rule ref.  E.g.,
+				// a : b A ; where b is nullable means that LOOK(a)
+				// should include A.
+				RuleClosureTransition ruleInvocationTrans =
+					(RuleClosureTransition)transition0;
+				// remove the EOR and get what follows
+				//tset.remove(Label.EOR_TOKEN_TYPE);
+				NFAState following = ruleInvocationTrans.followState;
+				LookaheadSet fset =	_FIRST(following, chaseFollowTransitions);
+				fset.orInPlace(tset); // tset cached; or into new set
+				fset.remove(Label.EOR_TOKEN_TYPE);
+				tset = fset;
+			}
+		}
+
+		Transition transition1 = s.transition[1];
+		if ( transition1!=null ) {
+			LookaheadSet tset1 =
+				_FIRST((NFAState)transition1.target, chaseFollowTransitions);
+			tset1.orInPlace(tset);
+			tset = tset1;
+		}
+
+		// never return a cached set; clone
+		return tset==tsetCached ? new LookaheadSet(tset) : tset;
+	}
+
+	/** Is there a non-syn-pred predicate visible from s that is not in
+	 *  the rule enclosing s?  This accounts for most predicate situations
+	 *  and lets ANTLR do a simple LL(1)+pred computation.
+	 *
+	 *  TODO: what about gated vs regular preds?
+	 */
+	public boolean detectConfoundingPredicates(NFAState s) {
+		lookBusy.clear();
+		Rule r = s.enclosingRule;
+		return _detectConfoundingPredicates(s, r, false) == DETECT_PRED_FOUND;
+	}
+
+	protected int _detectConfoundingPredicates(NFAState s,
+											   Rule enclosingRule,
+											   boolean chaseFollowTransitions)
+	{
+		//System.out.println("_detectNonAutobacktrackPredicates("+s+")");
+		if ( !chaseFollowTransitions && s.isAcceptState() ) {
+			if ( grammar.type==Grammar.LEXER ) {
+				// FOLLOW makes no sense (at the moment!) for lexical rules.
+				// assume all char can follow
+				return DETECT_PRED_NOT_FOUND;
+			}
+			return DETECT_PRED_EOR;
+		}
+
+		if ( lookBusy.contains(s) ) {
+			// return a copy of an empty set; we may modify set inline
+			return DETECT_PRED_NOT_FOUND;
+		}
+		lookBusy.add(s);
+
+		Transition transition0 = s.transition[0];
+		if ( transition0==null ) {
+			return DETECT_PRED_NOT_FOUND;
+		}
+
+		if ( !(transition0.label.isSemanticPredicate()||
+			   transition0.label.isEpsilon()) ) {
+			return DETECT_PRED_NOT_FOUND;
+		}
+
+		if ( transition0.label.isSemanticPredicate() ) {
+			//System.out.println("pred "+transition0.label);
+			SemanticContext ctx = transition0.label.getSemanticContext();
+			SemanticContext.Predicate p = (SemanticContext.Predicate)ctx;
+			if ( p.predicateAST.getType() != ANTLRParser.BACKTRACK_SEMPRED ) {
+				return DETECT_PRED_FOUND;
+			}
+		}
+		
+		/*
+		if ( transition0.label.isSemanticPredicate() ) {
+			System.out.println("pred "+transition0.label);
+			SemanticContext ctx = transition0.label.getSemanticContext();
+			SemanticContext.Predicate p = (SemanticContext.Predicate)ctx;
+			// if a non-syn-pred found not in enclosingRule, say we found one
+			if ( p.predicateAST.getType() != ANTLRParser.BACKTRACK_SEMPRED &&
+				 !p.predicateAST.enclosingRuleName.equals(enclosingRule.name) )
+			{
+				System.out.println("found pred "+p+" not in "+enclosingRule.name);
+				return DETECT_PRED_FOUND;
+			}
+		}
+		*/
+
+		int result = _detectConfoundingPredicates((NFAState)transition0.target,
+												  enclosingRule,
+												  chaseFollowTransitions);
+		if ( result == DETECT_PRED_FOUND ) {
+			return DETECT_PRED_FOUND;
+		}
+
+		if ( result == DETECT_PRED_EOR ) {
+			if ( transition0 instanceof RuleClosureTransition ) {
+				// we called a rule that found the end of the rule.
+				// That means the rule is nullable and we need to
+				// keep looking at what follows the rule ref.  E.g.,
+				// a : b A ; where b is nullable means that LOOK(a)
+				// should include A.
+				RuleClosureTransition ruleInvocationTrans =
+					(RuleClosureTransition)transition0;
+				NFAState following = ruleInvocationTrans.followState;
+				int afterRuleResult =
+					_detectConfoundingPredicates(following,
+												 enclosingRule,
+												 chaseFollowTransitions);
+				if ( afterRuleResult == DETECT_PRED_FOUND ) {
+					return DETECT_PRED_FOUND;
+				}
+			}
+		}
+
+		Transition transition1 = s.transition[1];
+		if ( transition1!=null ) {
+			int t1Result =
+				_detectConfoundingPredicates((NFAState)transition1.target,
+											 enclosingRule,
+											 chaseFollowTransitions);
+			if ( t1Result == DETECT_PRED_FOUND ) {
+				return DETECT_PRED_FOUND;
+			}
+		}
+
+		return DETECT_PRED_NOT_FOUND;
+	}
+
+	/** Return predicate expression found via epsilon edges from s.  Do
+	 *  not look into other rules for now.  Do something simple.  Include
+	 *  backtracking synpreds.
+	 */
+	public SemanticContext getPredicates(NFAState altStartState) {
+		lookBusy.clear();
+		return _getPredicates(altStartState, altStartState);
+	}
+
+	protected SemanticContext _getPredicates(NFAState s, NFAState altStartState) {
+		//System.out.println("_getPredicates("+s+")");
+		if ( s.isAcceptState() ) {
+			return null;
+		}
+
+		// avoid infinite loops from (..)* etc...
+		if ( lookBusy.contains(s) ) {
+			return null;
+		}
+		lookBusy.add(s);
+
+		Transition transition0 = s.transition[0];
+		// no transitions
+		if ( transition0==null ) {
+			return null;
+		}
+
+		// not a predicate and not even an epsilon
+		if ( !(transition0.label.isSemanticPredicate()||
+			   transition0.label.isEpsilon()) ) {
+			return null;
+		}
+
+		SemanticContext p = null;
+		SemanticContext p0;
+		SemanticContext p1 = null;
+		if ( transition0.label.isSemanticPredicate() ) {
+			//System.out.println("pred "+transition0.label);
+			p = transition0.label.getSemanticContext();
+			// ignore backtracking preds not on left edge for this decision
+			if ( ((SemanticContext.Predicate)p).predicateAST.getType() ==
+				  ANTLRParser.BACKTRACK_SEMPRED  &&
+				 s == altStartState.transition[0].target )
+			{
+				p = null; // don't count
+			}
+		}
+
+		// get preds from beyond this state
+		p0 = _getPredicates((NFAState)transition0.target, altStartState);
+
+		// get preds from other transition
+		Transition transition1 = s.transition[1];
+		if ( transition1!=null ) {
+			p1 = _getPredicates((NFAState)transition1.target, altStartState);
+		}
+
+		// join this&following-right|following-down
+		return SemanticContext.and(p,SemanticContext.or(p0,p1));
+	}
+}
diff --git a/tool/src/main/java/org/antlr/analysis/LL1DFA.java b/tool/src/main/java/org/antlr/analysis/LL1DFA.java
new file mode 100644
index 0000000..55f0f8c
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/LL1DFA.java
@@ -0,0 +1,182 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.misc.IntervalSet;
+import org.antlr.misc.MultiMap;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/** A special DFA that is exactly LL(1) or LL(1) with backtracking mode
+ *  predicates to resolve edge set collisions.
+ */
+public class LL1DFA extends DFA {
+	/** From list of lookahead sets (one per alt in decision), create
+	 *  an LL(1) DFA.  One edge per set.
+	 *
+	 *  s0-{alt1}-&gt;:o=&gt;1
+	 *  | \
+	 *  |  -{alt2}-&gt;:o=&gt;2
+	 *  |
+	 *  ...
+	 */
+	@SuppressWarnings("OverridableMethodCallInConstructor")
+	public LL1DFA(int decisionNumber, NFAState decisionStartState, LookaheadSet[] altLook) {
+		DFAState s0 = newState();
+		startState = s0;
+		nfa = decisionStartState.nfa;
+		nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(decisionStartState);
+		this.decisionNumber = decisionNumber;
+		this.decisionNFAStartState = decisionStartState;
+		initAltRelatedInfo();
+		unreachableAlts = null;
+		for (int alt=1; alt<altLook.length; alt++) {
+			DFAState acceptAltState = newState();
+			acceptAltState.acceptState = true;
+			setAcceptState(alt, acceptAltState);
+			acceptAltState.k = 1;
+			acceptAltState.cachedUniquelyPredicatedAlt = alt;
+			Label e = getLabelForSet(altLook[alt].tokenTypeSet);
+			s0.addTransition(acceptAltState, e);
+		}
+	}
+
+	/** From a set of edgeset&rarr;list-of-alts mappings, create a DFA
+	 *  that uses syn preds for all |list-of-alts|&gt;1.
+	 */
+	@SuppressWarnings("OverridableMethodCallInConstructor")
+	public LL1DFA(int decisionNumber,
+				  NFAState decisionStartState,
+				  MultiMap<IntervalSet, Integer> edgeMap)
+	{
+		DFAState s0 = newState();
+		startState = s0;
+		nfa = decisionStartState.nfa;
+		nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(decisionStartState);
+		this.decisionNumber = decisionNumber;
+		this.decisionNFAStartState = decisionStartState;
+		initAltRelatedInfo();
+		unreachableAlts = null;
+		for (Map.Entry<IntervalSet, List<Integer>> entry : edgeMap.entrySet()) {
+			IntervalSet edge = entry.getKey();
+			List<Integer> alts = entry.getValue();
+			Collections.sort(alts); // make sure alts are attempted in order
+			//System.out.println(edge+" -> "+alts);
+			DFAState s = newState();
+			s.k = 1;
+			Label e = getLabelForSet(edge);
+			s0.addTransition(s, e);
+			if ( alts.size()==1 ) {
+				s.acceptState = true;
+				int alt = alts.get(0);
+				setAcceptState(alt, s);
+				s.cachedUniquelyPredicatedAlt = alt;
+			}
+			else {
+				// resolve with syntactic predicates.  Add edges from
+				// state s that test predicates.
+				s.resolvedWithPredicates = true;
+				for (int i = 0; i < alts.size(); i++) {
+					int alt = (int)alts.get(i);
+					s.cachedUniquelyPredicatedAlt =	NFA.INVALID_ALT_NUMBER;
+					DFAState predDFATarget = getAcceptState(alt);
+					if ( predDFATarget==null ) {
+						predDFATarget = newState(); // create if not there.
+						predDFATarget.acceptState = true;
+						predDFATarget.cachedUniquelyPredicatedAlt =	alt;
+						setAcceptState(alt, predDFATarget);
+					}
+					// add a transition to pred target from d
+					/*
+					int walkAlt =
+						decisionStartState.translateDisplayAltToWalkAlt(alt);
+					NFAState altLeftEdge = nfa.grammar.getNFAStateForAltOfDecision(decisionStartState, walkAlt);
+					NFAState altStartState = (NFAState)altLeftEdge.transition[0].target;
+					SemanticContext ctx = nfa.grammar.ll1Analyzer.getPredicates(altStartState);
+					System.out.println("sem ctx = "+ctx);
+					if ( ctx == null ) {
+						ctx = new SemanticContext.TruePredicate();
+					}
+					s.addTransition(predDFATarget, new Label(ctx));
+					*/
+					SemanticContext.Predicate synpred =
+						getSynPredForAlt(decisionStartState, alt);
+					if ( synpred == null ) {
+						synpred = new SemanticContext.TruePredicate();
+					}
+					s.addTransition(predDFATarget, new PredicateLabel(synpred));
+				}
+			}
+		}
+		//System.out.println("dfa for preds=\n"+this);
+	}
+
+	protected Label getLabelForSet(IntervalSet edgeSet) {
+		Label e;
+		int atom = edgeSet.getSingleElement();
+		if ( atom != Label.INVALID ) {
+			e = new Label(atom);
+		}
+		else {
+			e = new Label(edgeSet);
+		}
+		return e;
+	}
+
+	protected SemanticContext.Predicate getSynPredForAlt(NFAState decisionStartState,
+														 int alt)
+	{
+		int walkAlt =
+			decisionStartState.translateDisplayAltToWalkAlt(alt);
+		NFAState altLeftEdge =
+			nfa.grammar.getNFAStateForAltOfDecision(decisionStartState, walkAlt);
+		NFAState altStartState = (NFAState)altLeftEdge.transition[0].target;
+		//System.out.println("alt "+alt+" start state = "+altStartState.stateNumber);
+		if ( altStartState.transition[0].isSemanticPredicate() ) {
+			SemanticContext ctx = altStartState.transition[0].label.getSemanticContext();
+			if ( ctx.isSyntacticPredicate() ) {
+				SemanticContext.Predicate p = (SemanticContext.Predicate)ctx;
+				if ( p.predicateAST.getType() == ANTLRParser.BACKTRACK_SEMPRED ) {
+					/*
+					System.out.println("syn pred for alt "+walkAlt+" "+
+									   ((SemanticContext.Predicate)altStartState.transition[0].label.getSemanticContext()).predicateAST);
+					*/
+					if ( ctx.isSyntacticPredicate() ) {
+						nfa.grammar.synPredUsedInDFA(this, ctx);
+					}
+					return (SemanticContext.Predicate)altStartState.transition[0].label.getSemanticContext();
+				}
+			}
+		}
+		return null;
+	}
+}
diff --git a/tool/src/main/java/org/antlr/analysis/Label.java b/tool/src/main/java/org/antlr/analysis/Label.java
new file mode 100644
index 0000000..edd0c19
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/Label.java
@@ -0,0 +1,387 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.misc.IntSet;
+import org.antlr.misc.IntervalSet;
+import org.antlr.tool.Grammar;
+
+/** A state machine transition label.  A label can be either a simple
+ *  label such as a token or character.  A label can be a set of char or
+ *  tokens.  It can be an epsilon transition.  It can be a semantic predicate
+ *  (which assumes an epsilon transition) or a tree of predicates (in a DFA).
+ *  Special label types have to be &lt; 0 to avoid conflict with char.
+ */
+public class Label implements Comparable<Label>, Cloneable {
+    public static final int INVALID = -7;
+
+	public static final int ACTION = -6;
+	
+	public static final int EPSILON = -5;
+
+    public static final String EPSILON_STR = "<EPSILON>";
+
+    /** label is a semantic predicate; implies label is epsilon also */
+    public static final int SEMPRED = -4;
+
+    /** label is a set of tokens or char */
+    public static final int SET = -3;
+
+    /** End of Token is like EOF for lexer rules.  It implies that no more
+     *  characters are available and that NFA conversion should terminate
+     *  for this path.  For example
+     *
+     *  A : 'a' 'b' | 'a' ;
+     *
+     *  yields a DFA predictor:
+     *
+     *  o-a-&gt;o-b-&gt;1   predict alt 1
+     *       |
+     *       |-EOT-&gt;o predict alt 2
+     *
+     *  To generate code for EOT, treat it as the "default" path, which
+     *  implies there is no way to mismatch a char for the state from
+     *  which the EOT emanates.
+     */
+    public static final int EOT = -2;
+
+    public static final int EOF = -1;
+
+	/** We have labels like EPSILON that are below 0; it's hard to
+	 *  store them in an array with negative index so use this
+	 *  constant as an index shift when accessing arrays based upon
+	 *  token type.  If real token type is i, then array index would be
+	 *  NUM_FAUX_LABELS + i.
+	 */
+	public static final int NUM_FAUX_LABELS = -INVALID;
+
+    /** Anything at this value or larger can be considered a simple atom int
+     *  for easy comparison during analysis only; faux labels are not used
+	 *  during parse time for real token types or char values.
+     */
+    public static final int MIN_ATOM_VALUE = EOT;
+
+    // TODO: is 0 a valid unicode char? max is FFFF -1, right?
+    public static final int MIN_CHAR_VALUE = '\u0000';
+    public static final int MAX_CHAR_VALUE = '\uFFFF';
+
+	/** End of rule token type; imaginary token type used only for
+	 *  local, partial FOLLOW sets to indicate that the local FOLLOW
+	 *  hit the end of rule.  During error recovery, the local FOLLOW
+	 *  of a token reference may go beyond the end of the rule and have
+	 *  to use FOLLOW(rule).  I have to just shift the token types to 2..n
+	 *  rather than 1..n to accommodate this imaginary token in my bitsets.
+	 *  If I didn't use a bitset implementation for runtime sets, I wouldn't
+	 *  need this.  EOF is another candidate for a run time token type for
+	 *  parsers.  Follow sets are not computed for lexers so we do not have
+	 *  this issue.
+	 */
+	public static final int EOR_TOKEN_TYPE =
+		org.antlr.runtime.Token.EOR_TOKEN_TYPE;
+
+	public static final int DOWN = org.antlr.runtime.Token.DOWN;
+	public static final int UP = org.antlr.runtime.Token.UP;
+
+    /** tokens and char range overlap; tokens are MIN_TOKEN_TYPE..n */
+	public static final int MIN_TOKEN_TYPE =
+		org.antlr.runtime.Token.MIN_TOKEN_TYPE;
+
+    /** The wildcard '.' char atom implies all valid characters==UNICODE */
+    //public static final IntSet ALLCHAR = IntervalSet.of(MIN_CHAR_VALUE,MAX_CHAR_VALUE);
+
+    /** The token type or character value; or, signifies special label. */
+    protected int label;
+
+    /** A set of token types or character codes if label==SET */
+	// TODO: try IntervalSet for everything
+    protected IntSet labelSet;
+
+    public Label(int label) {
+        this.label = label;
+    }
+
+    /** Make a set label */
+    public Label(IntSet labelSet) {
+		if ( labelSet==null ) {
+			this.label = SET;
+			this.labelSet = IntervalSet.of(INVALID);
+			return;
+		}
+		int singleAtom = labelSet.getSingleElement();
+        if ( singleAtom!=INVALID ) {
+            // convert back to a single atomic element if |labelSet|==1
+            label = singleAtom;
+            return;
+        }
+        this.label = SET;
+        this.labelSet = labelSet;
+    }
+
+	@Override
+	public Object clone() {
+		Label l;
+		try {
+			l = (Label)super.clone();
+			l.label = this.label;
+            l.labelSet = new IntervalSet();
+			l.labelSet.addAll(this.labelSet);
+		}
+		catch (CloneNotSupportedException e) {
+			throw new InternalError();
+		}
+		return l;
+	}
+
+	public void add(Label a) {
+		if ( isAtom() ) {
+			labelSet = IntervalSet.of(label);
+			label=SET;
+			if ( a.isAtom() ) {
+				labelSet.add(a.getAtom());
+			}
+			else if ( a.isSet() ) {
+				labelSet.addAll(a.getSet());
+			}
+			else {
+				throw new IllegalStateException("can't add element to Label of type "+label);
+			}
+			return;
+		}
+		if ( isSet() ) {
+			if ( a.isAtom() ) {
+				labelSet.add(a.getAtom());
+			}
+			else if ( a.isSet() ) {
+				labelSet.addAll(a.getSet());
+			}
+			else {
+				throw new IllegalStateException("can't add element to Label of type "+label);
+			}
+			return;
+		}
+		throw new IllegalStateException("can't add element to Label of type "+label);
+	}
+
+    public boolean isAtom() {
+        return label>=MIN_ATOM_VALUE;
+    }
+
+    public boolean isEpsilon() {
+        return label==EPSILON;
+    }
+
+	public boolean isSemanticPredicate() {
+		return false;
+	}
+
+	public boolean isAction() {
+		return false;
+	}
+
+    public boolean isSet() {
+        return label==SET;
+    }
+
+    /** return the single atom label or INVALID if not a single atom */
+    public int getAtom() {
+        if ( isAtom() ) {
+            return label;
+        }
+        return INVALID;
+    }
+
+    public IntSet getSet() {
+        if ( label!=SET ) {
+            // convert single element to a set if they ask for it.
+            return IntervalSet.of(label);
+        }
+        return labelSet;
+    }
+
+    public void setSet(IntSet set) {
+        label=SET;
+        labelSet = set;
+    }
+
+    public SemanticContext getSemanticContext() {
+        return null;
+    }
+
+	public boolean matches(int atom) {
+		if ( label==atom ) {
+			return true; // handle the single atom case efficiently
+		}
+		if ( isSet() ) {
+			return labelSet.member(atom);
+		}
+		return false;
+	}
+
+	public boolean matches(IntSet set) {
+		if ( isAtom() ) {
+			return set.member(getAtom());
+		}
+		if ( isSet() ) {
+			// matches if intersection non-nil
+			return !getSet().and(set).isNil();
+		}
+		return false;
+	}
+
+
+	public boolean matches(Label other) {
+		if ( other.isSet() ) {
+			return matches(other.getSet());
+		}
+		if ( other.isAtom() ) {
+			return matches(other.getAtom());
+		}
+		return false;
+	}
+
+	@Override
+    public int hashCode() {
+        if (label==SET) {
+            return labelSet.hashCode();
+		}
+		else {
+			return label;
+		}
+	}
+
+	// TODO: do we care about comparing set {A} with atom A? Doesn't now.
+	@Override
+	public boolean equals(Object o) {
+		if ( o==null ) {
+			return false;
+		}
+		if ( this == o ) {
+			return true; // equals if same object
+		}
+		// labels must be the same even if epsilon or set or sempred etc...
+        if ( label!=((Label)o).label ) {
+            return false;
+        }
+		if ( label==SET ) {
+			return this.labelSet.equals(((Label)o).labelSet);
+		}
+		return true;  // label values are same, so true
+    }
+
+	@Override
+    public int compareTo(Label o) {
+        return this.label-o.label;
+    }
+
+    /** Predicates are lists of AST nodes from the NFA created from the
+     *  grammar, but the same predicate could be cut/paste into multiple
+     *  places in the grammar.  I must compare the text of all the
+     *  predicates to truly answer whether {p1,p2} .equals {p1,p2}.
+     *  Unfortunately, I cannot rely on the AST.equals() to work properly
+     *  so I must do a brute force O(n^2) nested traversal of the Set
+     *  doing a String compare.
+     *
+     *  At this point, Labels are not compared for equals when they are
+     *  predicates, but here's the code for future use.
+     */
+    /*
+    protected boolean predicatesEquals(Set others) {
+        Iterator iter = semanticContext.iterator();
+        while (iter.hasNext()) {
+            AST predAST = (AST) iter.next();
+            Iterator inner = semanticContext.iterator();
+            while (inner.hasNext()) {
+                AST otherPredAST = (AST) inner.next();
+                if ( !predAST.getText().equals(otherPredAST.getText()) ) {
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+      */
+
+	@Override
+    public String toString() {
+        switch (label) {
+            case SET :
+                return labelSet.toString();
+            default :
+                return String.valueOf(label);
+        }
+    }
+
+    public String toString(Grammar g) {
+        switch (label) {
+            case SET :
+                return labelSet.toString(g);
+            default :
+                return g.getTokenDisplayName(label);
+        }
+    }
+
+    /*
+    public String predicatesToString() {
+        if ( semanticContext==NFAConfiguration.DEFAULT_CLAUSE_SEMANTIC_CONTEXT ) {
+            return "!other preds";
+        }
+        StringBuffer buf = new StringBuffer();
+        Iterator iter = semanticContext.iterator();
+        while (iter.hasNext()) {
+            AST predAST = (AST) iter.next();
+            buf.append(predAST.getText());
+            if ( iter.hasNext() ) {
+                buf.append("&");
+            }
+        }
+        return buf.toString();
+    }
+    */
+
+	public static boolean intersect(Label label, Label edgeLabel) {
+		boolean hasIntersection = false;
+		boolean labelIsSet = label.isSet();
+		boolean edgeIsSet = edgeLabel.isSet();
+		if ( !labelIsSet && !edgeIsSet && edgeLabel.label==label.label ) {
+			hasIntersection = true;
+		}
+		else if ( labelIsSet && edgeIsSet &&
+				  !edgeLabel.getSet().and(label.getSet()).isNil() ) {
+			hasIntersection = true;
+		}
+		else if ( labelIsSet && !edgeIsSet &&
+				  label.getSet().member(edgeLabel.label) ) {
+			hasIntersection = true;
+		}
+		else if ( !labelIsSet && edgeIsSet &&
+				  edgeLabel.getSet().member(label.label) ) {
+			hasIntersection = true;
+		}
+		return hasIntersection;
+	}
+}
diff --git a/tool/src/main/java/org/antlr/analysis/LookaheadSet.java b/tool/src/main/java/org/antlr/analysis/LookaheadSet.java
new file mode 100644
index 0000000..4c4efb4
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/LookaheadSet.java
@@ -0,0 +1,112 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.misc.IntSet;
+import org.antlr.misc.IntervalSet;
+import org.antlr.tool.Grammar;
+
+/** An LL(1) lookahead set; contains a set of token types and a "hasEOF"
+ *  condition when the set contains EOF.  Since EOF is -1 everywhere and -1
+ *  cannot be stored in my BitSet, I set a condition here.  There may be other
+ *  reasons in the future to abstract a LookaheadSet over a raw BitSet.
+ */
+public class LookaheadSet {
+	public IntervalSet tokenTypeSet;
+
+	public LookaheadSet() {
+		tokenTypeSet = new IntervalSet();
+	}
+
+	public LookaheadSet(IntSet s) {
+		this();
+		tokenTypeSet.addAll(s);
+	}
+
+	public LookaheadSet(int atom) {
+		tokenTypeSet = IntervalSet.of(atom);
+	}
+
+    public LookaheadSet(LookaheadSet other) {
+        this();
+        this.tokenTypeSet.addAll(other.tokenTypeSet);
+    }
+
+    public void orInPlace(LookaheadSet other) {
+		this.tokenTypeSet.addAll(other.tokenTypeSet);
+	}
+
+	public LookaheadSet or(LookaheadSet other) {
+		return new LookaheadSet(tokenTypeSet.or(other.tokenTypeSet));
+	}
+
+	public LookaheadSet subtract(LookaheadSet other) {
+		return new LookaheadSet(this.tokenTypeSet.subtract(other.tokenTypeSet));
+	}
+
+	public boolean member(int a) {
+		return tokenTypeSet.member(a);
+	}
+
+	public LookaheadSet intersection(LookaheadSet s) {
+		IntSet i = this.tokenTypeSet.and(s.tokenTypeSet);
+		LookaheadSet intersection = new LookaheadSet(i);
+		return intersection;
+	}
+
+	public boolean isNil() {
+		return tokenTypeSet.isNil();
+	}
+
+	public void remove(int a) {
+		tokenTypeSet = tokenTypeSet.subtract(IntervalSet.of(a));
+	}
+
+	@Override
+	public int hashCode() {
+		return tokenTypeSet.hashCode();
+	}
+
+	@Override
+	public boolean equals(Object other) {
+		return tokenTypeSet.equals(((LookaheadSet)other).tokenTypeSet);
+	}
+
+	public String toString(Grammar g) {
+		if ( tokenTypeSet==null ) {
+			return "";
+		}
+		String r = tokenTypeSet.toString(g);
+		return r;
+	}
+
+	@Override
+	public String toString() {
+		return toString(null);
+	}
+}
diff --git a/tool/src/main/java/org/antlr/analysis/MachineProbe.java b/tool/src/main/java/org/antlr/analysis/MachineProbe.java
new file mode 100644
index 0000000..2c2dc7e
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/MachineProbe.java
@@ -0,0 +1,185 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.analysis;
+
+import org.antlr.misc.IntSet;
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+import org.antlr.tool.Grammar;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+public class MachineProbe {
+	DFA dfa;
+
+	public MachineProbe(DFA dfa) {
+		this.dfa = dfa;
+	}
+
+	List<DFAState> getAnyDFAPathToTarget(DFAState targetState) {
+		Set<DFAState> visited = new HashSet<DFAState>();
+		return getAnyDFAPathToTarget(dfa.startState, targetState, visited);
+	}
+
+	public List<DFAState> getAnyDFAPathToTarget(DFAState startState,
+			DFAState targetState, Set<DFAState> visited) {
+		List<DFAState> dfaStates = new ArrayList<DFAState>();
+		visited.add(startState);
+		if (startState.equals(targetState)) {
+			dfaStates.add(targetState);
+			return dfaStates;
+		}
+		// for (Edge e : startState.edges) { // walk edges looking for valid
+		// path
+		for (int i = 0; i < startState.getNumberOfTransitions(); i++) {
+			Transition e = startState.getTransition(i);
+			if (!visited.contains(e.target)) {
+				List<DFAState> path = getAnyDFAPathToTarget(
+						(DFAState) e.target, targetState, visited);
+				if (path != null) { // found path, we're done
+					dfaStates.add(startState);
+					dfaStates.addAll(path);
+					return dfaStates;
+				}
+			}
+		}
+		return null;
+	}
+
+	/** Return a list of edge labels from start state to targetState. */
+	public List<IntSet> getEdgeLabels(DFAState targetState) {
+		List<DFAState> dfaStates = getAnyDFAPathToTarget(targetState);
+		List<IntSet> labels = new ArrayList<IntSet>();
+		for (int i = 0; i < dfaStates.size() - 1; i++) {
+			DFAState d = dfaStates.get(i);
+			DFAState nextState = dfaStates.get(i + 1);
+			// walk looking for edge whose target is next dfa state
+			for (int j = 0; j < d.getNumberOfTransitions(); j++) {
+				Transition e = d.getTransition(j);
+				if (e.target.stateNumber == nextState.stateNumber) {
+					labels.add(e.label.getSet());
+				}
+			}
+		}
+		return labels;
+	}
+
+	/**
+	 * Given List&lt;IntSet&gt;, return a String with a useful representation of the
+	 * associated input string. One could show something different for lexers
+	 * and parsers, for example.
+	 */
+	public String getInputSequenceDisplay(Grammar g, List<IntSet> labels) {
+		List<String> tokens = new ArrayList<String>();
+		for (IntSet label : labels)
+			tokens.add(label.toString(g));
+		return tokens.toString();
+	}
+
+	/**
+	 * Given an alternative associated with a DFA state, return the list of
+	 * tokens (from grammar) associated with path through NFA following the
+	 * labels sequence. The nfaStates gives the set of NFA states associated
+	 * with alt that take us from start to stop. One of the NFA states in
+	 * nfaStates[i] will have an edge intersecting with labels[i].
+	 */
+	public List<Token> getGrammarLocationsForInputSequence(
+			List<Set<NFAState>> nfaStates, List<IntSet> labels) {
+		List<Token> tokens = new ArrayList<Token>();
+		for (int i = 0; i < nfaStates.size() - 1; i++) {
+			Set<NFAState> cur = nfaStates.get(i);
+			Set<NFAState> next = nfaStates.get(i + 1);
+			IntSet label = labels.get(i);
+			// find NFA state with edge whose label matches labels[i]
+			nfaConfigLoop: 
+			
+			for (NFAState p : cur) {
+				// walk p's transitions, looking for label
+				for (int j = 0; j < p.getNumberOfTransitions(); j++) {
+					Transition t = p.transition(j);
+					if (!t.isEpsilon() && !t.label.getSet().and(label).isNil()
+							&& next.contains(t.target)) {
+						if (p.associatedASTNode != null) {
+							Token oldtoken = p.associatedASTNode.token;
+							CommonToken token = new CommonToken(oldtoken
+									.getType(), oldtoken.getText());
+							token.setLine(oldtoken.getLine());
+							token.setCharPositionInLine(oldtoken.getCharPositionInLine());
+							tokens.add(token);
+							break nfaConfigLoop; // found path, move to next
+													// NFAState set
+						}
+					}
+				}
+			}
+		}
+		return tokens;
+	}
+
+	// /** Used to find paths through syntactically ambiguous DFA. If we've
+	// * seen statement number before, what did we learn?
+	// */
+	// protected Map<Integer, Integer> stateReachable;
+	//
+	// public Map<DFAState, Set<DFAState>> getReachSets(Collection<DFAState>
+	// targets) {
+	// Map<DFAState, Set<DFAState>> reaches = new HashMap<DFAState,
+	// Set<DFAState>>();
+	// // targets can reach themselves
+	// for (final DFAState d : targets) {
+	// reaches.put(d,new HashSet<DFAState>() {{add(d);}});
+	// }
+	//
+	// boolean changed = true;
+	// while ( changed ) {
+	// changed = false;
+	// for (DFAState d : dfa.states.values()) {
+	// if ( d.getNumberOfEdges()==0 ) continue;
+	// Set<DFAState> r = reaches.get(d);
+	// if ( r==null ) {
+	// r = new HashSet<DFAState>();
+	// reaches.put(d, r);
+	// }
+	// int before = r.size();
+	// // add all reaches from all edge targets
+	// for (Edge e : d.edges) {
+	// //if ( targets.contains(e.target) ) r.add(e.target);
+	// r.addAll( reaches.get(e.target) );
+	// }
+	// int after = r.size();
+	// if ( after>before) changed = true;
+	// }
+	// }
+	// return reaches;
+	// }
+
+}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NFA.java b/tool/src/main/java/org/antlr/analysis/NFA.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/analysis/NFA.java
rename to tool/src/main/java/org/antlr/analysis/NFA.java
diff --git a/tool/src/main/java/org/antlr/analysis/NFAConfiguration.java b/tool/src/main/java/org/antlr/analysis/NFAConfiguration.java
new file mode 100644
index 0000000..6ed9326
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/NFAConfiguration.java
@@ -0,0 +1,155 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.misc.Utils;
+
+/** An NFA state, predicted alt, and syntactic/semantic context.
+ *  The syntactic context is a pointer into the rule invocation
+ *  chain used to arrive at the state.  The semantic context is
+ *  the unordered set semantic predicates encountered before reaching
+ *  an NFA state.
+ */
+public class NFAConfiguration {
+    /** The NFA state associated with this configuration */
+    public int state;
+
+    /** What alt is predicted by this configuration */
+    public int alt;
+
+    /** What is the stack of rule invocations that got us to state? */
+    public NFAContext context;
+
+    /** The set of semantic predicates associated with this NFA
+     *  configuration.  The predicates were found on the way to
+     *  the associated NFA state in this syntactic context.
+     *  Set&lt;AST&gt;: track nodes in grammar containing the predicate
+     *  for error messages and such (nice to know where the predicate
+     *  came from in case of duplicates etc...).  By using a set,
+     *  the equals() method will correctly show {pred1,pred2} as equals()
+     *  to {pred2,pred1}.
+     */
+    public SemanticContext semanticContext = SemanticContext.EMPTY_SEMANTIC_CONTEXT;
+
+    /** Indicate that this configuration has been resolved and no further
+     *  DFA processing should occur with it.  Essentially, this is used
+     *  as an "ignore" bit so that upon a set of nondeterministic configurations
+     *  such as (s|2) and (s|3), I can set (s|3) to resolved=true (and any
+     *  other configuration associated with alt 3).
+     */
+    protected boolean resolved;
+
+    /** This bit is used to indicate a semantic predicate will be
+     *  used to resolve the conflict.  Method
+     *  DFA.findNewDFAStatesAndAddDFATransitions will add edges for
+     *  the predicates after it performs the reach operation.  The
+     *  nondeterminism resolver sets this when it finds a set of
+     *  nondeterministic configurations (as it does for "resolved" field)
+     *  that have enough predicates to resolve the conflit.
+     */
+    protected boolean resolveWithPredicate;
+
+    /** Lots of NFA states have only epsilon edges (1 or 2).  We can
+     *  safely consider only n&gt;0 during closure.
+     */
+    protected int numberEpsilonTransitionsEmanatingFromState;
+
+    /** Indicates that the NFA state associated with this configuration
+     *  has exactly one transition and it's an atom (not epsilon etc...).
+     */
+    protected boolean singleAtomTransitionEmanating;
+
+	//protected boolean addedDuringClosure = true;
+
+	public NFAConfiguration(int state,
+                            int alt,
+                            NFAContext context,
+                            SemanticContext semanticContext)
+    {
+        this.state = state;
+        this.alt = alt;
+        this.context = context;
+        this.semanticContext = semanticContext;
+    }
+
+    /** An NFA configuration is equal to another if both have
+     *  the same state, the predict the same alternative, and
+     *  syntactic/semantic contexts are the same.  I don't think
+     *  the state|alt|ctx could be the same and have two different
+     *  semantic contexts, but might as well define equals to be
+     *  everything.
+     */
+	@Override
+    public boolean equals(Object o) {
+		if ( o==null ) {
+			return false;
+		}
+        NFAConfiguration other = (NFAConfiguration)o;
+        return this.state==other.state &&
+               this.alt==other.alt &&
+               this.context.equals(other.context)&&
+               this.semanticContext.equals(other.semanticContext);
+    }
+
+	@Override
+    public int hashCode() {
+        int h = state + alt + context.hashCode();
+        return h;
+    }
+
+	@Override
+	public String toString() {
+		return toString(true);
+	}
+
+	public String toString(boolean showAlt) {
+		StringBuilder buf = new StringBuilder();
+		buf.append(state);
+		if ( showAlt ) {
+			buf.append("|");
+			buf.append(alt);
+		}
+		if ( context.parent!=null ) {
+            buf.append("|");
+            buf.append(context);
+        }
+        if ( semanticContext!=null &&
+             semanticContext!=SemanticContext.EMPTY_SEMANTIC_CONTEXT ) {
+            buf.append("|");
+			String escQuote = Utils.replace(semanticContext.toString(), "\"", "\\\"");
+			buf.append(escQuote);
+        }
+        if ( resolved ) {
+            buf.append("|resolved");
+        }
+		if ( resolveWithPredicate ) {
+			buf.append("|resolveWithPredicate");
+		}
+		return buf.toString();
+    }
+}
diff --git a/tool/src/main/java/org/antlr/analysis/NFAContext.java b/tool/src/main/java/org/antlr/analysis/NFAContext.java
new file mode 100644
index 0000000..ac15f14
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/NFAContext.java
@@ -0,0 +1,297 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+/** A tree node for tracking the call chains for NFAs that invoke
+ *  other NFAs.  These trees only have to point upwards to their parents
+ *  so we can walk back up the tree (i.e., pop stuff off the stack).  We
+ *  never walk from stack down down through the children.
+ *
+ *  Each alt predicted in a decision has its own context tree,
+ *  representing all possible return nodes.  The initial stack has
+ *  EOF ("$") in it.  So, for m alternative productions, the lookahead
+ *  DFA will have m NFAContext trees.
+ *
+ *  To "push" a new context, just do "new NFAContext(context-parent, state)"
+ *  which will add itself to the parent.  The root is NFAContext(null, null).
+ *
+ *  The complete context for an NFA configuration is the set of invoking states
+ *  on the path from this node thru the parent pointers to the root.
+ */
+public class NFAContext {
+	/** This is similar to Bermudez's m constant in his LAR(m) where
+	 *  you bound the stack so your states don't explode.  The main difference
+	 *  is that I bound only recursion on the stack, not the simple stack size.
+	 *  This looser constraint will let the conversion roam further to find
+	 *  lookahead to resolve a decision.
+	 *
+	 *  Bermudez's m operates differently as it is his LR stack depth
+	 *  I'm pretty sure it therefore includes all stack symbols.  Here I
+	 *  restrict the size of an NFA configuration to be finite because a
+	 *  stack component may mention the same NFA invocation state at
+	 *  most m times.  Hence, the number of DFA states will not grow forever.
+	 *  With recursive rules like
+	 *
+	 *    e : '(' e ')' | INT ;
+	 *
+	 *  you could chase your tail forever if somebody said "s : e '.' | e ';' ;"
+	 *  This constant prevents new states from being created after a stack gets
+	 *  "too big".  Actually (12/14/2007) I realize that this example is
+	 *  trapped by the non-LL(*) detector for recursion in &gt; 1 alt.  Here is
+	 *  an example that trips stack overflow:
+	 *
+	 *	  s : a Y | A A A A A X ; // force recursion past m=4
+	 *	  a : A a | Q;
+	 *
+	 *  If that were:
+	 *
+	 *	  s : a Y | A+ X ;
+	 *
+	 *  it could loop forever.
+	 *
+	 *  Imagine doing a depth-first search on the e DFA...as you chase an input
+	 *  sequence you can recurse to same rule such as e above.  You'd have a
+	 *  chain of ((((.  When you get do some point, you have to give up.  The
+	 *  states in the chain will have longer and longer NFA config stacks.
+	 *  Must limit size.
+	 *
+	 *  max=0 implies you cannot ever jump to another rule during closure.
+	 *  max=1 implies you can make as many calls as you want--you just
+	 *        can't ever visit a state that is on your rule invocation stack.
+	 * 		  I.e., you cannot ever recurse.
+	 *  max=2 implies you are able to recurse once (i.e., call a rule twice
+	 *  	  from the same place).
+	 *
+	 *  This tracks recursion to a rule specific to an invocation site!
+	 *  It does not detect multiple calls to a rule from different rule
+	 *  invocation states.  We are guaranteed to terminate because the
+	 *  stack can only grow as big as the number of NFA states * max.
+	 *
+	 *  I noticed that the Java grammar didn't work with max=1, but did with
+	 *  max=4.  Let's set to 4. Recursion is sometimes needed to resolve some
+	 *  fixed lookahead decisions.
+	 */
+	public static int MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK = 4;
+
+    public NFAContext parent;
+
+    /** The NFA state that invoked another rule's start state is recorded
+     *  on the rule invocation context stack.
+     */
+    public NFAState invokingState;
+
+    /** Computing the hashCode is very expensive and closureBusy()
+     *  uses it to track when it's seen a state|ctx before to avoid
+     *  infinite loops.  As we add new contexts, record the hash code
+     *  as this.invokingState + parent.cachedHashCode.  Avoids walking
+     *  up the tree for every hashCode().  Note that this caching works
+     *  because a context is a monotonically growing tree of context nodes
+     *  and nothing on the stack is ever modified...ctx just grows
+     *  or shrinks.
+     */
+    protected int cachedHashCode;
+
+    public NFAContext(NFAContext parent, NFAState invokingState) {
+        this.parent = parent;
+        this.invokingState = invokingState;
+        if ( invokingState!=null ) {
+            this.cachedHashCode = invokingState.stateNumber;
+        }
+        if ( parent!=null ) {
+            this.cachedHashCode += parent.cachedHashCode;
+        }
+    }
+
+	/** Two contexts are equals() if both have
+	 *  same call stack; walk upwards to the root.
+	 *  Recall that the root sentinel node has no invokingStates and no parent.
+	 *  Note that you may be comparing contexts in different alt trees.
+	 *
+	 *  The hashCode is now cheap as it's computed once upon each context
+	 *  push on the stack.  Use it to make equals() more efficient.
+	 */
+	@Override
+	public boolean equals(Object o) {
+		NFAContext other = ((NFAContext)o);
+		if ( this.cachedHashCode != other.cachedHashCode ) {
+			return false; // can't be same if hash is different
+		}
+		if ( this==other ) {
+			return true;
+		}
+		// System.out.println("comparing "+this+" with "+other);
+		NFAContext sp = this;
+		while ( sp.parent!=null && other.parent!=null ) {
+			if ( sp.invokingState != other.invokingState ) {
+				return false;
+			}
+			sp = sp.parent;
+			other = other.parent;
+		}
+		if ( !(sp.parent==null && other.parent==null) ) {
+			return false; // both pointers must be at their roots after walk
+		}
+		return true;
+	}
+
+	/** Two contexts conflict() if they are equals() or one is a stack suffix
+	 *  of the other.  For example, contexts [21 12 $] and [21 9 $] do not
+	 *  conflict, but [21 $] and [21 12 $] do conflict.  Note that I should
+	 *  probably not show the $ in this case.  There is a dummy node for each
+	 *  stack that just means empty; $ is a marker that's all.
+	 *
+	 *  This is used in relation to checking conflicts associated with a
+	 *  single NFA state's configurations within a single DFA state.
+	 *  If there are configurations s and t within a DFA state such that
+	 *  s.state=t.state &amp;&amp; s.alt != t.alt &amp;&amp; s.ctx conflicts t.ctx then
+	 *  the DFA state predicts more than a single alt--it's nondeterministic.
+	 *  Two contexts conflict if they are the same or if one is a suffix
+	 *  of the other.
+	 *
+	 *  When comparing contexts, if one context has a stack and the other
+	 *  does not then they should be considered the same context.  The only
+	 *  way for an NFA state p to have an empty context and a nonempty context
+	 *  is the case when closure falls off end of rule without a call stack
+	 *  and re-enters the rule with a context.  This resolves the issue I
+	 *  discussed with Sriram Srinivasan Feb 28, 2005 about not terminating
+	 *  fast enough upon nondeterminism.
+	 */
+	public boolean conflictsWith(NFAContext other) {
+		return this.suffix(other); // || this.equals(other);
+	}
+
+	/** [$] suffix any context
+	 *  [21 $] suffix [21 12 $]
+	 *  [21 12 $] suffix [21 $]
+	 *  [21 18 $] suffix [21 18 12 9 $]
+	 *  [21 18 12 9 $] suffix [21 18 $]
+	 *  [21 12 $] not suffix [21 9 $]
+	 *
+	 *  Example "[21 $] suffix [21 12 $]" means: rule r invoked current rule
+	 *  from state 21.  Rule s invoked rule r from state 12 which then invoked
+	 *  current rule also via state 21.  While the context prior to state 21
+	 *  is different, the fact that both contexts emanate from state 21 implies
+	 *  that they are now going to track perfectly together.  Once they
+	 *  converged on state 21, there is no way they can separate.  In other
+	 *  words, the prior stack state is not consulted when computing where to
+	 *  go in the closure operation.  ?$ and ??$ are considered the same stack.
+	 *  If ? is popped off then $ and ?$ remain; they are now an empty and
+	 *  nonempty context comparison.  So, if one stack is a suffix of
+	 *  another, then it will still degenerate to the simple empty stack
+	 *  comparison case.
+	 */
+	protected boolean suffix(NFAContext other) {
+		NFAContext sp = this;
+		// if one of the contexts is empty, it never enters loop and returns true
+		while ( sp.parent!=null && other.parent!=null ) {
+			if ( sp.invokingState != other.invokingState ) {
+				return false;
+			}
+			sp = sp.parent;
+			other = other.parent;
+		}
+		//System.out.println("suffix");
+		return true;
+	}
+
+    /** Walk upwards to the root of the call stack context looking
+     *  for a particular invoking state.
+	public boolean contains(int state) {
+        NFAContext sp = this;
+		int n = 0; // track recursive invocations of state
+		System.out.println("this.context is "+sp);
+		while ( sp.parent!=null ) {
+            if ( sp.invokingState.stateNumber == state ) {
+				return true;
+            }
+            sp = sp.parent;
+        }
+        return false;
+    }
+	 */
+
+	/** Given an NFA state number, how many times has the NFA-to-DFA
+	 *  conversion pushed that state on the stack?  In other words,
+	 *  the NFA state must be a rule invocation state and this method
+	 *  tells you how many times you've been to this state.  If none,
+	 *  then you have not called the target rule from this state before
+	 *  (though another NFA state could have called that target rule).
+	 *  If n=1, then you've been to this state before during this
+	 *  DFA construction and are going to invoke that rule again.
+	 *
+	 *  Note that many NFA states can invoke rule r, but we ignore recursion
+	 *  unless you hit the same rule invocation state again.
+	 */
+	public int recursionDepthEmanatingFromState(int state) {
+		NFAContext sp = this;
+		int n = 0; // track recursive invocations of target from this state
+		//System.out.println("this.context is "+sp);
+		while ( sp.parent!=null ) {
+			if ( sp.invokingState.stateNumber == state ) {
+				n++;
+			}
+			sp = sp.parent;
+		}
+		return n;
+	}
+
+	@Override
+    public int hashCode() {
+        return cachedHashCode;
+        /*
+        int h = 0;
+        NFAContext sp = this;
+        while ( sp.parent!=null ) {
+            h += sp.invokingState.getStateNumber();
+            sp = sp.parent;
+        }
+        return h;
+        */
+    }
+
+	/** A context is empty if there is no parent; meaning nobody pushed
+	 *  anything on the call stack.
+	 */
+	public boolean isEmpty() {
+		return parent==null;
+	}
+
+	@Override
+    public String toString() {
+        StringBuilder buf = new StringBuilder();
+        NFAContext sp = this;
+        buf.append("[");
+        while ( sp.parent!=null ) {
+            buf.append(sp.invokingState.stateNumber);
+            buf.append(" ");
+            sp = sp.parent;
+        }
+        buf.append("$]");
+        return buf.toString();
+    }
+}
diff --git a/tool/src/main/java/org/antlr/analysis/NFAConversionThread.java b/tool/src/main/java/org/antlr/analysis/NFAConversionThread.java
new file mode 100644
index 0000000..3690c94
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/NFAConversionThread.java
@@ -0,0 +1,66 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.misc.Barrier;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Grammar;
+
+/** Convert all decisions i..j inclusive in a thread */
+public class NFAConversionThread implements Runnable {
+	Grammar grammar;
+	int i, j;
+	Barrier barrier;
+	public NFAConversionThread(Grammar grammar,
+							   Barrier barrier,
+							   int i,
+							   int j)
+	{
+		this.grammar = grammar;
+		this.barrier = barrier;
+		this.i = i;
+		this.j = j;
+	}
+	@Override
+	public void run() {
+		for (int decision=i; decision<=j; decision++) {
+			NFAState decisionStartState = grammar.getDecisionNFAStartState(decision);
+			if ( decisionStartState.getNumberOfTransitions()>1 ) {
+				grammar.createLookaheadDFA(decision,true);
+			}
+		}
+		// now wait for others to finish
+		try {
+			barrier.waitForRelease();
+		}
+		catch(InterruptedException e) {
+			ErrorManager.internalError("what the hell? DFA interruptus", e);
+		}
+	}
+}
+
diff --git a/tool/src/main/java/org/antlr/analysis/NFAState.java b/tool/src/main/java/org/antlr/analysis/NFAState.java
new file mode 100644
index 0000000..4f27545
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/NFAState.java
@@ -0,0 +1,263 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.GrammarAST;
+import org.antlr.tool.Rule;
+
+/** A state within an NFA. At most 2 transitions emanate from any NFA state. */
+public class NFAState extends State {
+	// I need to distinguish between NFA decision states for (...)* and (...)+
+	// during NFA interpretation.
+	public static final int LOOPBACK = 1;
+	public static final int BLOCK_START = 2;
+	public static final int OPTIONAL_BLOCK_START = 3;
+	public static final int BYPASS = 4;
+	public static final int RIGHT_EDGE_OF_BLOCK = 5;
+
+	public static final int MAX_TRANSITIONS = 2;
+
+	/** How many transitions; 0, 1, or 2 transitions */
+	int numTransitions = 0;
+	public Transition[] transition = new Transition[MAX_TRANSITIONS];
+
+	/** For o-A-&gt;o type NFA tranitions, record the label that leads to this
+	 *  state.  Useful for creating rich error messages when we find
+	 *  insufficiently (with preds) covered states.
+	 */
+	public Label incidentEdgeLabel;
+
+	/** Which NFA are we in? */
+	public NFA nfa = null;
+
+	/** What's its decision number from 1..n? */
+	protected int decisionNumber = 0;
+
+	/** Subrules (...)* and (...)+ have more than one decision point in
+	 *  the NFA created for them.  They both have a loop-exit-or-stay-in
+	 *  decision node (the loop back node).  They both have a normal
+	 *  alternative block decision node at the left edge.  The (...)* is
+	 *  worse as it even has a bypass decision (2 alts: stay in or bypass)
+	 *  node at the extreme left edge.  This is not how they get generated
+	 *  in code as a while-loop or whatever deals nicely with either.  For
+	 *  error messages (where I need to print the nondeterministic alts)
+	 *  and for interpretation, I need to use the single DFA that is created
+	 *  (for efficiency) but interpret the results differently depending
+	 *  on which of the 2 or 3 decision states uses the DFA.  For example,
+	 *  the DFA will always report alt n+1 as the exit branch for n real
+	 *  alts, so I need to translate that depending on the decision state.
+	 *
+	 *  If decisionNumber&gt;0 then this var tells you what kind of decision
+	 *  state it is.
+	 */
+	public int decisionStateType;
+
+	/** What rule do we live in? */
+	public Rule enclosingRule;
+
+	/** During debugging and for nondeterminism warnings, it's useful
+	 *  to know what relationship this node has to the original grammar.
+	 *  For example, "start of alt 1 of rule a".
+	 */
+	protected String description;
+
+	/** Associate this NFAState with the corresponding GrammarAST node
+	 *  from which this node was created.  This is useful not only for
+	 *  associating the eventual lookahead DFA with the associated
+	 *  Grammar position, but also for providing users with
+	 *  nondeterminism warnings.  Mainly used by decision states to
+	 *  report line:col info.  Could also be used to track line:col
+	 *  for elements such as token refs.
+	 */
+	public GrammarAST associatedASTNode;
+
+	/** Is this state the sole target of an EOT transition? */
+	protected boolean EOTTargetState = false;
+
+	/** Jean Bovet needs in the GUI to know which state pairs correspond
+	 *  to the start/stop of a block.
+	  */
+	public int endOfBlockStateNumber = State.INVALID_STATE_NUMBER;
+
+	public NFAState(NFA nfa) {
+		this.nfa = nfa;
+	}
+
+	@Override
+	public int getNumberOfTransitions() {
+		return numTransitions;
+	}
+
+	@Override
+	public void addTransition(Transition e) {
+		if ( e==null ) {
+			throw new IllegalArgumentException("You can't add a null transition");			
+		}
+		if ( numTransitions>transition.length ) {
+			throw new IllegalArgumentException("You can only have "+transition.length+" transitions");
+		}
+		if ( e!=null ) {
+			transition[numTransitions] = e;
+			numTransitions++;
+			// Set the "back pointer" of the target state so that it
+			// knows about the label of the incoming edge.
+			Label label = e.label;
+			if ( label.isAtom() || label.isSet() ) {
+				if ( ((NFAState)e.target).incidentEdgeLabel!=null ) {
+					ErrorManager.internalError("Clobbered incident edge");
+				}
+				((NFAState)e.target).incidentEdgeLabel = e.label;
+			}
+		}
+	}
+
+	/** Used during optimization to reset a state to have the (single)
+	 *  transition another state has.
+	 */
+	public void setTransition0(Transition e) {
+		if ( e==null ) {
+			throw new IllegalArgumentException("You can't use a solitary null transition");
+		}
+		transition[0] = e;
+		transition[1] = null;
+		numTransitions = 1;
+	}
+
+	@Override
+	public Transition transition(int i) {
+		return transition[i];
+	}
+
+	/** The DFA decision for this NFA decision state always has
+	 *  an exit path for loops as n+1 for n alts in the loop.
+	 *  That is really useful for displaying nondeterministic alts
+	 *  and so on, but for walking the NFA to get a sequence of edge
+	 *  labels or for actually parsing, we need to get the real alt
+	 *  number.  The real alt number for exiting a loop is always 1
+	 *  as transition 0 points at the exit branch (we compute DFAs
+	 *  always for loops at the loopback state).
+	 *
+	 *  For walking/parsing the loopback state:
+	 * 		1 2 3 display alt (for human consumption)
+	 * 		2 3 1 walk alt
+	 *
+	 *  For walking the block start:
+	 * 		1 2 3 display alt
+	 * 		1 2 3
+	 *
+	 *  For walking the bypass state of a (...)* loop:
+	 * 		1 2 3 display alt
+	 * 		1 1 2 all block alts map to entering loop exit means take bypass
+	 *
+	 *  Non loop EBNF do not need to be translated; they are ignored by
+	 *  this method as decisionStateType==0.
+	 *
+	 *  Return same alt if we can't translate.
+	 */
+	public int translateDisplayAltToWalkAlt(int displayAlt) {
+		NFAState nfaStart = this;
+		if ( decisionNumber==0 || decisionStateType==0 ) {
+			return displayAlt;
+		}
+		int walkAlt = 0;
+		// find the NFA loopback state associated with this DFA
+		// and count number of alts (all alt numbers are computed
+		// based upon the loopback's NFA state.
+		/*
+		DFA dfa = nfa.grammar.getLookaheadDFA(decisionNumber);
+		if ( dfa==null ) {
+			ErrorManager.internalError("can't get DFA for decision "+decisionNumber);
+		}
+		*/
+		int nAlts = nfa.grammar.getNumberOfAltsForDecisionNFA(nfaStart);
+		switch ( nfaStart.decisionStateType ) {
+			case LOOPBACK :
+				walkAlt = displayAlt % nAlts + 1; // rotate right mod 1..3
+				break;
+			case BLOCK_START :
+			case OPTIONAL_BLOCK_START :
+				walkAlt = displayAlt; // identity transformation
+				break;
+			case BYPASS :
+				if ( displayAlt == nAlts ) {
+					walkAlt = 2; // bypass
+				}
+				else {
+					walkAlt = 1; // any non exit branch alt predicts entering
+				}
+				break;
+		}
+		return walkAlt;
+	}
+
+	// Setter/Getters
+
+	/** What AST node is associated with this NFAState?  When you
+	 *  set the AST node, I set the node to point back to this NFA state.
+	 */
+	public void setDecisionASTNode(GrammarAST decisionASTNode) {
+		decisionASTNode.setNFAStartState(this);
+		this.associatedASTNode = decisionASTNode;
+	}
+
+	public String getDescription() {
+		return description;
+	}
+
+	public void setDescription(String description) {
+		this.description = description;
+	}
+
+	public int getDecisionNumber() {
+		return decisionNumber;
+	}
+
+	public void setDecisionNumber(int decisionNumber) {
+		this.decisionNumber = decisionNumber;
+	}
+
+	public boolean isEOTTargetState() {
+		return EOTTargetState;
+	}
+
+	public void setEOTTargetState(boolean eot) {
+		EOTTargetState = eot;
+	}
+
+	public boolean isDecisionState() {
+		return decisionStateType>0;
+	}
+
+	@Override
+	public String toString() {
+		return String.valueOf(stateNumber);
+	}
+
+}
+
diff --git a/tool/src/main/java/org/antlr/analysis/NFAToDFAConverter.java b/tool/src/main/java/org/antlr/analysis/NFAToDFAConverter.java
new file mode 100644
index 0000000..f4b8538
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/NFAToDFAConverter.java
@@ -0,0 +1,1734 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.misc.OrderedHashSet;
+import org.antlr.misc.Utils;
+import org.antlr.runtime.Token;
+import org.antlr.tool.ErrorManager;
+
+import java.util.*;
+
+/** Code that embodies the NFA conversion to DFA. A new object is needed
+ *  per DFA (also required for thread safety if multiple conversions
+ *  launched).
+ */
+public class NFAToDFAConverter {
+	/** A list of DFA states we still need to process during NFA conversion */
+	protected List<DFAState> work = new LinkedList<DFAState>();
+
+	/** While converting NFA, we must track states that
+	 *  reference other rule's NFAs so we know what to do
+	 *  at the end of a rule.  We need to know what context invoked
+	 *  this rule so we can know where to continue looking for NFA
+	 *  states.  I'm tracking a context tree (record of rule invocation
+	 *  stack trace) for each alternative that could be predicted.
+	 */
+	protected NFAContext[] contextTrees;
+
+	/** We are converting which DFA? */
+	protected DFA dfa;
+
+	public static boolean debug = false;
+
+	/** Should ANTLR launch multiple threads to convert NFAs to DFAs?
+	 *  With a 2-CPU box, I note that it's about the same single or
+	 *  multithreaded.  Both CPU meters are going even when single-threaded
+	 *  so I assume the GC is killing us.  Could be the compiler.  When I
+	 *  run java -Xint mode, I get about 15% speed improvement with multiple
+	 *  threads.
+	 */
+	public static boolean SINGLE_THREADED_NFA_CONVERSION = true;
+
+	protected boolean computingStartState = false;
+
+	public NFAToDFAConverter(DFA dfa) {
+		this.dfa = dfa;
+		int nAlts = dfa.getNumberOfAlts();
+		initContextTrees(nAlts);
+	}
+
+	public void convert() {
+		//dfa.conversionStartTime = System.currentTimeMillis();
+
+		// create the DFA start state
+		dfa.startState = computeStartState();
+
+		// while more DFA states to check, process them
+		while ( work.size()>0 &&
+				!dfa.nfa.grammar.NFAToDFAConversionExternallyAborted() )
+		{
+			DFAState d = work.get(0);
+			if ( dfa.nfa.grammar.composite.watchNFAConversion ) {
+				System.out.println("convert DFA state "+d.stateNumber+
+								   " ("+d.nfaConfigurations.size()+" nfa states)");
+			}
+			int k = dfa.getUserMaxLookahead();
+			if ( k>0 && k==d.getLookaheadDepth() ) {
+				// we've hit max lookahead, make this a stop state
+				//System.out.println("stop state @k="+k+" (terminated early)");
+				/*
+				List<Label> sampleInputLabels = d.dfa.probe.getSampleNonDeterministicInputSequence(d);
+				String input = d.dfa.probe.getInputSequenceDisplay(sampleInputLabels);
+				System.out.println("sample input: "+input);
+				 */
+				resolveNonDeterminisms(d);
+				// Check to see if we need to add any semantic predicate transitions
+				if ( d.isResolvedWithPredicates() ) {
+					addPredicateTransitions(d);
+				}
+				else {
+					d.setAcceptState(true); // must convert to accept state at k
+				}
+			}
+			else {
+				findNewDFAStatesAndAddDFATransitions(d);
+			}
+			work.remove(0); // done with it; remove from work list
+		}
+
+		// Find all manual syn preds (gated).  These are not discovered
+		// in tryToResolveWithSemanticPredicates because they are implicitly
+		// added to every edge by code gen, DOT generation etc...
+		dfa.findAllGatedSynPredsUsedInDFAAcceptStates();
+	}
+
+	/** From this first NFA state of a decision, create a DFA.
+	 *  Walk each alt in decision and compute closure from the start of that
+	 *  rule, making sure that the closure does not include other alts within
+	 *  that same decision.  The idea is to associate a specific alt number
+	 *  with the starting closure so we can trace the alt number for all states
+	 *  derived from this.  At a stop state in the DFA, we can return this alt
+	 *  number, indicating which alt is predicted.
+	 *
+	 *  If this DFA is derived from an loop back NFA state, then the first
+	 *  transition is actually the exit branch of the loop.  Rather than make
+	 *  this alternative one, let's make this alt n+1 where n is the number of
+	 *  alts in this block.  This is nice to keep the alts of the block 1..n;
+	 *  helps with error messages.
+	 *
+	 *  I handle nongreedy in findNewDFAStatesAndAddDFATransitions
+	 *  when nongreedy and EOT transition.  Make state with EOT emanating
+	 *  from it the accept state.
+	 */
+	protected DFAState computeStartState() {
+		NFAState alt = dfa.decisionNFAStartState;
+		DFAState startState = dfa.newState();
+		computingStartState = true;
+		int i = 0;
+		int altNum = 1;
+		while ( alt!=null ) {
+			// find the set of NFA states reachable without consuming
+			// any input symbols for each alt.  Keep adding to same
+			// overall closure that will represent the DFA start state,
+			// but track the alt number
+			NFAContext initialContext = contextTrees[i];
+			// if first alt is derived from loopback/exit branch of loop,
+			// make alt=n+1 for n alts instead of 1
+			if ( i==0 &&
+				 dfa.getNFADecisionStartState().decisionStateType==NFAState.LOOPBACK )
+			{
+				int numAltsIncludingExitBranch = dfa.nfa.grammar
+					.getNumberOfAltsForDecisionNFA(dfa.decisionNFAStartState);
+				altNum = numAltsIncludingExitBranch;
+				closure((NFAState)alt.transition[0].target,
+						altNum,
+						initialContext,
+						SemanticContext.EMPTY_SEMANTIC_CONTEXT,
+						startState,
+						true
+				);
+				altNum = 1; // make next alt the first
+			}
+			else {
+				closure((NFAState)alt.transition[0].target,
+						altNum,
+						initialContext,
+						SemanticContext.EMPTY_SEMANTIC_CONTEXT,
+						startState,
+						true
+				);
+				altNum++;
+			}
+			i++;
+
+			// move to next alternative
+			if ( alt.transition[1] ==null ) {
+				break;
+			}
+			alt = (NFAState)alt.transition[1].target;
+		}
+
+		// now DFA start state has the complete closure for the decision
+		// but we have tracked which alt is associated with which
+		// NFA states.
+		dfa.addState(startState); // make sure dfa knows about this state
+		work.add(startState);
+		computingStartState = false;
+		return startState;
+	}
+
+	/** From this node, add a d--a--&gt;t transition for all
+	 *  labels 'a' where t is a DFA node created
+	 *  from the set of NFA states reachable from any NFA
+	 *  state in DFA state d.
+	 */
+	protected void findNewDFAStatesAndAddDFATransitions(DFAState d) {
+		//System.out.println("work on DFA state "+d);
+		OrderedHashSet<Label> labels = d.getReachableLabels();
+		//System.out.println("reachable labels="+labels);
+
+		/*
+		System.out.println("|reachable|/|nfaconfigs|="+
+				labels.size()+"/"+d.getNFAConfigurations().size()+"="+
+				labels.size()/(float)d.getNFAConfigurations().size());
+		*/
+
+		// normally EOT is the "default" clause and decisions just
+		// choose that last clause when nothing else matches.  DFA conversion
+		// continues searching for a unique sequence that predicts the
+		// various alts or until it finds EOT.  So this rule
+		//
+		// DUH : ('x'|'y')* "xy!";
+		//
+		// does not need a greedy indicator.  The following rule works fine too
+		//
+		// A : ('x')+ ;
+		//
+		// When the follow branch could match what is in the loop, by default,
+		// the nondeterminism is resolved in favor of the loop.  You don't
+		// get a warning because the only way to get this condition is if
+		// the DFA conversion hits the end of the token.  In that case,
+		// we're not *sure* what will happen next, but it could be anything.
+		// Anyway, EOT is the default case which means it will never be matched
+		// as resolution goes to the lowest alt number.  Exit branches are
+		// always alt n+1 for n alts in a block.
+		//
+		// When a loop is nongreedy and we find an EOT transition, the DFA
+		// state should become an accept state, predicting exit of loop.  It's
+		// just reversing the resolution of ambiguity.
+		// TODO: should this be done in the resolveAmbig method?
+		Label EOTLabel = new Label(Label.EOT);
+		boolean containsEOT = labels!=null && labels.contains(EOTLabel);
+		if ( !dfa.isGreedy() && containsEOT ) {
+			convertToEOTAcceptState(d);
+			return; // no more work to do on this accept state
+		}
+
+		// if in filter mode for lexer, want to match shortest not longest
+		// string so if we see an EOT edge emanating from this state, then
+		// convert this state to an accept state.  This only counts for
+		// The Tokens rule as all other decisions must continue to look for
+		// longest match.
+		// [Taking back out a few days later on Jan 17, 2006.  This could
+		//  be an option for the future, but this was wrong soluion for
+		//  filtering.]
+		/*
+		if ( dfa.nfa.grammar.type==Grammar.LEXER && containsEOT ) {
+			String filterOption = (String)dfa.nfa.grammar.getOption("filter");
+			boolean filterMode = filterOption!=null && filterOption.equals("true");
+			if ( filterMode && d.dfa.isTokensRuleDecision() ) {
+				DFAState t = reach(d, EOTLabel);
+				if ( t.getNFAConfigurations().size()>0 ) {
+					convertToEOTAcceptState(d);
+					//System.out.println("state "+d+" has EOT target "+t.stateNumber);
+					return;
+				}
+			}
+		}
+		*/
+
+		int numberOfEdgesEmanating = 0;
+		Map<Integer, Transition> targetToLabelMap = new HashMap<Integer, Transition>();
+		// for each label that could possibly emanate from NFAStates of d
+		int numLabels = 0;
+		if ( labels!=null ) {
+			numLabels = labels.size();
+		}
+		for (int i=0; i<numLabels; i++) {
+			Label label = labels.get(i);
+			DFAState t = reach(d, label);
+			if ( debug ) {
+				System.out.println("DFA state after reach "+label+" "+d+"-" +
+								   label.toString(dfa.nfa.grammar)+"->"+t);
+			}
+			if ( t==null ) {
+				// nothing was reached by label due to conflict resolution
+				// EOT also seems to be in here occasionally probably due
+				// to an end-of-rule state seeing it even though we'll pop
+				// an invoking state off the state; don't bother to conflict
+				// as this labels set is a covering approximation only.
+				continue;
+			}
+			//System.out.println("dfa.k="+dfa.getUserMaxLookahead());
+			if ( t.getUniqueAlt()==NFA.INVALID_ALT_NUMBER ) {
+				// Only compute closure if a unique alt number is not known.
+				// If a unique alternative is mentioned among all NFA
+				// configurations then there is no possibility of needing to look
+				// beyond this state; also no possibility of a nondeterminism.
+				// This optimization May 22, 2006 just dropped -Xint time
+				// for analysis of Java grammar from 11.5s to 2s!  Wow.
+				closure(t);  // add any NFA states reachable via epsilon
+			}
+
+			/*
+			System.out.println("DFA state after closure "+d+"-"+
+							   label.toString(dfa.nfa.grammar)+
+							   "->"+t);
+							   */
+
+			// add if not in DFA yet and then make d-label->t
+			DFAState targetState = addDFAStateToWorkList(t);
+
+			numberOfEdgesEmanating +=
+				addTransition(d, label, targetState, targetToLabelMap);
+
+			// lookahead of target must be one larger than d's k
+			// We are possibly setting the depth of a pre-existing state
+			// that is equal to one we just computed...not sure if that's
+			// ok.
+			targetState.setLookaheadDepth(d.getLookaheadDepth() + 1);
+		}
+
+		//System.out.println("DFA after reach / closures:\n"+dfa);
+		if ( !d.isResolvedWithPredicates() && numberOfEdgesEmanating==0 ) {
+			//System.out.println("dangling DFA state "+d+"\nAfter reach / closures:\n"+dfa);
+			// TODO: can fixed lookahead hit a dangling state case?
+			// TODO: yes, with left recursion
+			//System.err.println("dangling state alts: "+d.getAltSet());
+			dfa.probe.reportDanglingState(d);
+			// turn off all configurations except for those associated with
+			// min alt number; somebody has to win else some input will not
+			// predict any alt.
+			int minAlt = resolveByPickingMinAlt(d, null);
+			// force it to be an accept state
+			// don't call convertToAcceptState() which merges stop states.
+			// other states point at us; don't want them pointing to dead states
+			d.setAcceptState(true); // might be adding new accept state for alt
+			dfa.setAcceptState(minAlt, d);
+			//convertToAcceptState(d, minAlt); // force it to be an accept state
+		}
+
+		// Check to see if we need to add any semantic predicate transitions
+		// might have both token and predicated edges from d
+		if ( d.isResolvedWithPredicates() ) {
+			addPredicateTransitions(d);
+		}
+	}
+
+	/** Add a transition from state d to targetState with label in normal case.
+	 *  if COLLAPSE_ALL_INCIDENT_EDGES, however, try to merge all edges from
+	 *  d to targetState; this means merging their labels.  Another optimization
+	 *  is to reduce to a single EOT edge any set of edges from d to targetState
+	 *  where there exists an EOT state.  EOT is like the wildcard so don't
+	 *  bother to test any other edges.  Example:
+	 *
+	 *  NUM_INT
+	 *    : '1'..'9' ('0'..'9')* ('l'|'L')?
+     *    | '0' ('x'|'X') ('0'..'9'|'a'..'f'|'A'..'F')+ ('l'|'L')?
+     *    | '0' ('0'..'7')* ('l'|'L')?
+	 *    ;
+	 *
+	 *  The normal decision to predict alts 1, 2, 3 is:
+	 *
+	 *  if ( (input.LA(1)&gt;='1' &amp;&amp; input.LA(1)&lt;='9') ) {
+     *       alt7=1;
+     *  }
+     *  else if ( input.LA(1)=='0' ) {
+     *      if ( input.LA(2)=='X'||input.LA(2)=='x' ) {
+     *          alt7=2;
+     *      }
+     *      else if ( (input.LA(2)&gt;='0' &amp;&amp; input.LA(2)&lt;='7') ) {
+     *           alt7=3;
+     *      }
+     *      else if ( input.LA(2)=='L'||input.LA(2)=='l' ) {
+     *           alt7=3;
+     *      }
+     *      else {
+     *           alt7=3;
+     *      }
+     *  }
+     *  else error
+	 *
+     *  Clearly, alt 3 is predicted with extra work since it tests 0..7
+	 *  and [lL] before finally realizing that any character is actually
+	 *  ok at k=2.
+	 *
+	 *  A better decision is as follows:
+     *
+	 *  if ( (input.LA(1)&gt;='1' &amp;&amp; input.LA(1)&lt;='9') ) {
+	 *      alt7=1;
+	 *  }
+	 *  else if ( input.LA(1)=='0' ) {
+	 *      if ( input.LA(2)=='X'||input.LA(2)=='x' ) {
+	 *          alt7=2;
+	 *      }
+	 *      else {
+	 *          alt7=3;
+	 *      }
+	 *  }
+	 *
+	 *  The DFA originally has 3 edges going to the state the predicts alt 3,
+	 *  but upon seeing the EOT edge (the "else"-clause), this method
+	 *  replaces the old merged label (which would have (0..7|l|L)) with EOT.
+	 *  The code generator then leaves alt 3 predicted with a simple else-
+	 *  clause. :)
+	 *
+	 *  The only time the EOT optimization makes no sense is in the Tokens
+	 *  rule.  We want EOT to truly mean you have matched an entire token
+	 *  so don't bother actually rewinding to execute that rule unless there
+	 *  are actions in that rule.  For now, since I am not preventing
+	 *  backtracking from Tokens rule, I will simply allow the optimization.
+	 */
+	protected static int addTransition(DFAState d,
+									   Label label,
+									   DFAState targetState,
+									   Map<Integer, Transition> targetToLabelMap)
+	{
+		//System.out.println(d.stateNumber+"-"+label.toString(dfa.nfa.grammar)+"->"+targetState.stateNumber);
+		int n = 0;
+		if ( DFAOptimizer.COLLAPSE_ALL_PARALLEL_EDGES ) {
+			// track which targets we've hit
+			Integer tI = Utils.integer(targetState.stateNumber);
+			Transition oldTransition = targetToLabelMap.get(tI);
+			if ( oldTransition!=null ) {
+				//System.out.println("extra transition to "+tI+" upon "+label.toString(dfa.nfa.grammar));
+				// already seen state d to target transition, just add label
+				// to old label unless EOT
+				if ( label.getAtom()==Label.EOT ) {
+					// merge with EOT means old edge can go away
+					oldTransition.label = new Label(Label.EOT);
+				}
+				else {
+					// don't add anything to EOT, it's essentially the wildcard
+					if ( oldTransition.label.getAtom()!=Label.EOT ) {
+						// ok, not EOT, add in this label to old label
+						oldTransition.label.add(label);
+					}
+					//System.out.println("label updated to be "+oldTransition.label.toString(dfa.nfa.grammar));
+				}
+			}
+			else {
+				// make a transition from d to t upon 'a'
+				n = 1;
+				label = (Label)label.clone(); // clone in case we alter later
+				int transitionIndex = d.addTransition(targetState, label);
+				Transition trans = d.getTransition(transitionIndex);
+				// track target/transition pairs
+				targetToLabelMap.put(tI, trans);
+			}
+		}
+		else {
+			n = 1;
+			d.addTransition(targetState, label);
+		}
+		return n;
+	}
+
+	/** For all NFA states (configurations) merged in d,
+	 *  compute the epsilon closure; that is, find all NFA states reachable
+	 *  from the NFA states in d via purely epsilon transitions.
+	 */
+	public void closure(DFAState d) {
+		if ( debug ) {
+			System.out.println("closure("+d+")");
+		}
+
+		List<NFAConfiguration> configs = new ArrayList<NFAConfiguration>();
+		// Because we are adding to the configurations in closure
+		// must clone initial list so we know when to stop doing closure
+		configs.addAll(d.nfaConfigurations);
+		// for each NFA configuration in d (abort if we detect non-LL(*) state)
+		int numConfigs = configs.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration c = configs.get(i);
+			if ( c.singleAtomTransitionEmanating ) {
+				continue; // ignore NFA states w/o epsilon transitions
+			}
+			//System.out.println("go do reach for NFA state "+c.state);
+			// figure out reachable NFA states from each of d's nfa states
+			// via epsilon transitions.
+			// Fill configsInClosure rather than altering d configs inline
+			closure(dfa.nfa.getState(c.state),
+					c.alt,
+					c.context,
+					c.semanticContext,
+					d,
+					false);
+		}
+		//System.out.println("after closure d="+d);
+		d.closureBusy = null; // wack all that memory used during closure
+	}
+
+	/** Where can we get from NFA state p traversing only epsilon transitions?
+	 *  Add new NFA states + context to DFA state d.  Also add semantic
+	 *  predicates to semantic context if collectPredicates is set.  We only
+	 *  collect predicates at hoisting depth 0, meaning before any token/char
+	 *  have been recognized.  This corresponds, during analysis, to the
+	 *  initial DFA start state construction closure() invocation.
+	 *
+	 *  There are four cases of interest (the last being the usual transition):
+	 *
+	 *   1. Traverse an edge that takes us to the start state of another
+	 *      rule, r.  We must push this state so that if the DFA
+	 *      conversion hits the end of rule r, then it knows to continue
+	 *      the conversion at state following state that "invoked" r. By
+	 *      construction, there is a single transition emanating from a rule
+	 *      ref node.
+	 *
+	 *   2. Reach an NFA state associated with the end of a rule, r, in the
+	 *      grammar from which it was built.  We must add an implicit (i.e.,
+	 *      don't actually add an epsilon transition) epsilon transition
+	 *      from r's end state to the NFA state following the NFA state
+	 *      that transitioned to rule r's start state.  Because there are
+	 *      many states that could reach r, the context for a rule invocation
+	 *      is part of a call tree not a simple stack.  When we fall off end
+	 *      of rule, "pop" a state off the call tree and add that state's
+	 *      "following" node to d's NFA configuration list.  The context
+	 *      for this new addition will be the new "stack top" in the call tree.
+	 *
+	 *   3. Like case 2, we reach an NFA state associated with the end of a
+	 *      rule, r, in the grammar from which NFA was built.  In this case,
+	 *      however, we realize that during this NFA&rarr;DFA conversion, no state
+	 *      invoked the current rule's NFA.  There is no choice but to add
+	 *      all NFA states that follow references to r's start state.  This is
+	 *      analogous to computing the FOLLOW(r) in the LL(k) world.  By
+	 *      construction, even rule stop state has a chain of nodes emanating
+	 *      from it that points to every possible following node.  This case
+	 *      is conveniently handled then by the 4th case.
+	 *
+	 *   4. Normal case.  If p can reach another NFA state q, then add
+	 *      q to d's configuration list, copying p's context for q's context.
+	 *      If there is a semantic predicate on the transition, then AND it
+	 *      with any existing semantic context.
+	 *
+	 *   Current state p is always added to d's configuration list as it's part
+	 *   of the closure as well.
+	 *
+	 *  When is a closure operation in a cycle condition?  While it is
+	 *  very possible to have the same NFA state mentioned twice
+	 *  within the same DFA state, there are two situations that
+	 *  would lead to nontermination of closure operation:
+	 *
+	 *  o   Whenever closure reaches a configuration where the same state
+	 *      with same or a suffix context already exists.  This catches
+	 *      the IF-THEN-ELSE tail recursion cycle and things like
+	 *
+	 *      a : A a | B ;
+	 *
+	 *      the context will be $ (empty stack).
+	 *
+	 *      We have to check
+	 *      larger context stacks because of (...)+ loops.  For
+	 *      example, the context of a (...)+ can be nonempty if the
+	 *      surrounding rule is invoked by another rule:
+	 *
+	 *      a : b A | X ;
+	 *      b : (B|)+ ;  // nondeterministic by the way
+	 *
+	 *      The context of the (B|)+ loop is "invoked from item
+	 *      a : . b A ;" and then the empty alt of the loop can reach back
+	 *      to itself.  The context stack will have one "return
+	 *      address" element and so we must check for same state, same
+	 *      context for arbitrary context stacks.
+	 *
+	 *      Idea: If we've seen this configuration before during closure, stop.
+	 *      We also need to avoid reaching same state with conflicting context.
+	 *      Ultimately analysis would stop and we'd find the conflict, but we
+	 *      should stop the computation.  Previously I only checked for
+	 *      exact config.  Need to check for same state, suffix context
+	 * 		not just exact context.
+	 *
+	 *  o   Whenever closure reaches a configuration where state p
+	 *      is present in its own context stack.  This means that
+	 *      p is a rule invocation state and the target rule has
+	 *      been called before.  NFAContext.MAX_RECURSIVE_INVOCATIONS
+	 *      (See the comment there also) determines how many times
+	 *      it's possible to recurse; clearly we cannot recurse forever.
+	 *      Some grammars such as the following actually require at
+	 *      least one recursive call to correctly compute the lookahead:
+	 *
+	 *      a : L ID R
+	 *        | b
+	 *        ;
+	 *      b : ID
+	 *        | L a R
+	 *        ;
+	 *
+	 *      Input L ID R is ambiguous but to figure this out, ANTLR
+	 *      needs to go a-&gt;b-&gt;a-&gt;b to find the L ID sequence.
+	 *
+	 *      Do not allow closure to add a configuration that would
+	 *      allow too much recursion.
+	 *
+	 *      This case also catches infinite left recursion.
+	 */
+	public void closure(NFAState p,
+						int alt,
+						NFAContext context,
+						SemanticContext semanticContext,
+						DFAState d,
+						boolean collectPredicates)
+	{
+		if ( debug ){
+			System.out.println("closure at "+p.enclosingRule.name+" state "+p.stateNumber+"|"+
+							   alt+" filling DFA state "+d.stateNumber+" with context "+context
+							   );
+		}
+
+//		if ( DFA.MAX_TIME_PER_DFA_CREATION>0 &&
+//			 System.currentTimeMillis() - d.dfa.conversionStartTime >=
+//			 DFA.MAX_TIME_PER_DFA_CREATION )
+//		{
+//			// bail way out; we've blown up somehow
+//			throw new AnalysisTimeoutException(d.dfa);
+//		}
+
+		NFAConfiguration proposedNFAConfiguration =
+				new NFAConfiguration(p.stateNumber,
+						alt,
+						context,
+						semanticContext);
+
+		// Avoid infinite recursion
+		if ( closureIsBusy(d, proposedNFAConfiguration) ) {
+			if ( debug ) {
+				System.out.println("avoid visiting exact closure computation NFA config: "+
+								   proposedNFAConfiguration+" in "+p.enclosingRule.name);
+				System.out.println("state is "+d.dfa.decisionNumber+"."+d.stateNumber);
+			}
+			return;
+		}
+
+		// set closure to be busy for this NFA configuration
+		d.closureBusy.add(proposedNFAConfiguration);
+
+		// p itself is always in closure
+		d.addNFAConfiguration(p, proposedNFAConfiguration);
+
+		// Case 1: are we a reference to another rule?
+		Transition transition0 = p.transition[0];
+		if ( transition0 instanceof RuleClosureTransition ) {
+			int depth = context.recursionDepthEmanatingFromState(p.stateNumber);
+			// Detect recursion by more than a single alt, which indicates
+			// that the decision's lookahead language is potentially non-regular; terminate
+			if ( depth == 1 && d.dfa.getUserMaxLookahead()==0 ) { // k=* only
+				d.dfa.recursiveAltSet.add(alt); // indicate that this alt is recursive
+				if ( d.dfa.recursiveAltSet.size()>1 ) {
+					//System.out.println("recursive alts: "+d.dfa.recursiveAltSet.toString());
+					d.abortedDueToMultipleRecursiveAlts = true;
+					throw new NonLLStarDecisionException(d.dfa);
+				}
+				/*
+				System.out.println("alt "+alt+" in rule "+p.enclosingRule+" dec "+d.dfa.decisionNumber+
+					" ctx: "+context);
+				System.out.println("d="+d);
+				*/
+			}
+			// Detect an attempt to recurse too high
+			// if this context has hit the max recursions for p.stateNumber,
+			// don't allow it to enter p.stateNumber again
+			if ( depth >= NFAContext.MAX_SAME_RULE_INVOCATIONS_PER_NFA_CONFIG_STACK ) {
+				/*
+				System.out.println("OVF state "+d);
+				System.out.println("proposed "+proposedNFAConfiguration);
+				*/
+				d.abortedDueToRecursionOverflow = true;
+				d.dfa.probe.reportRecursionOverflow(d, proposedNFAConfiguration);
+				if ( debug ) {
+					System.out.println("analysis overflow in closure("+d.stateNumber+")");
+				}
+				return;
+			}
+
+			// otherwise, it's cool to (re)enter target of this rule ref
+			RuleClosureTransition ref = (RuleClosureTransition)transition0;
+			// first create a new context and push onto call tree,
+			// recording the fact that we are invoking a rule and
+			// from which state (case 2 below will get the following state
+			// via the RuleClosureTransition emanating from the invoking state
+			// pushed on the stack).
+			// Reset the context to reflect the fact we invoked rule
+			NFAContext newContext = new NFAContext(context, p);
+			//System.out.println("invoking rule "+ref.rule.name);
+			// System.out.println(" context="+context);
+			// traverse epsilon edge to new rule
+			NFAState ruleTarget = (NFAState)ref.target;
+			closure(ruleTarget, alt, newContext, semanticContext, d, collectPredicates);
+		}
+		// Case 2: end of rule state, context (i.e., an invoker) exists
+		else if ( p.isAcceptState() && context.parent!=null ) {
+			NFAState whichStateInvokedRule = context.invokingState;
+			RuleClosureTransition edgeToRule =
+				(RuleClosureTransition)whichStateInvokedRule.transition[0];
+			NFAState continueState = edgeToRule.followState;
+			NFAContext newContext = context.parent; // "pop" invoking state
+			closure(continueState, alt, newContext, semanticContext, d, collectPredicates);
+		}
+		// Case 3: end of rule state, nobody invoked this rule (no context)
+		//    Fall thru to be handled by case 4 automagically.
+		// Case 4: ordinary NFA->DFA conversion case: simple epsilon transition
+		else {
+			// recurse down any epsilon transitions
+			if ( transition0!=null && transition0.isEpsilon() ) {
+				boolean collectPredicatesAfterAction = collectPredicates;
+				if ( transition0.isAction() && collectPredicates ) {
+					collectPredicatesAfterAction = false;
+					/*
+					if ( computingStartState ) {
+						System.out.println("found action during prediction closure "+((ActionLabel)transition0.label).actionAST.token);
+					}
+					 */
+				}
+				closure((NFAState)transition0.target,
+						alt,
+						context,
+						semanticContext,
+						d,
+						collectPredicatesAfterAction
+				);
+			}
+			else if ( transition0!=null && transition0.isSemanticPredicate() ) {
+                SemanticContext labelContext = transition0.label.getSemanticContext();
+                if ( computingStartState ) {
+                    if ( collectPredicates ) {
+                        // only indicate we can see a predicate if we're collecting preds
+                        // Could be computing start state & seen an action before this.
+                        dfa.predicateVisible = true;
+                    }
+                    else {
+                        // this state has a pred, but we can't see it.
+                        dfa.hasPredicateBlockedByAction = true;
+                        // System.out.println("found pred during prediction but blocked by action found previously");
+                    }
+                }
+                // continue closure here too, but add the sem pred to ctx
+                SemanticContext newSemanticContext = semanticContext;
+                if ( collectPredicates ) {
+                    // AND the previous semantic context with new pred
+                    // do not hoist syn preds from other rules; only get if in
+                    // starting state's rule (i.e., context is empty)
+                    int walkAlt =
+						dfa.decisionNFAStartState.translateDisplayAltToWalkAlt(alt);
+					NFAState altLeftEdge =
+						dfa.nfa.grammar.getNFAStateForAltOfDecision(dfa.decisionNFAStartState,walkAlt);
+					/*
+					System.out.println("state "+p.stateNumber+" alt "+alt+" walkAlt "+walkAlt+" trans to "+transition0.target);
+					System.out.println("DFA start state "+dfa.decisionNFAStartState.stateNumber);
+					System.out.println("alt left edge "+altLeftEdge.stateNumber+
+						", epsilon target "+
+						altLeftEdge.transition(0).target.stateNumber);
+					*/
+					if ( !labelContext.isSyntacticPredicate() ||
+						 p==altLeftEdge.transition[0].target )
+					{
+						//System.out.println("&"+labelContext+" enclosingRule="+p.enclosingRule);
+						newSemanticContext =
+							SemanticContext.and(semanticContext, labelContext);
+					}
+				}
+				closure((NFAState)transition0.target,
+						alt,
+						context,
+						newSemanticContext,
+						d,
+						collectPredicates);
+			}
+			Transition transition1 = p.transition[1];
+			if ( transition1!=null && transition1.isEpsilon() ) {
+				closure((NFAState)transition1.target,
+						alt,
+						context,
+						semanticContext,
+						d,
+						collectPredicates);
+			}
+		}
+
+		// don't remove "busy" flag as we want to prevent all
+		// references to same config of state|alt|ctx|semCtx even
+		// if resulting from another NFA state
+	}
+
+	/** A closure operation should abort if that computation has already
+	 *  been done or a computation with a conflicting context has already
+	 *  been done.  If proposed NFA config's state and alt are the same
+	 *  there is potentially a problem.  If the stack context is identical
+	 *  then clearly the exact same computation is proposed.  If a context
+	 *  is a suffix of the other, then again the computation is in an
+	 *  identical context.  ?$ and ??$ are considered the same stack.
+	 *  We could walk configurations linearly doing the comparison instead
+	 *  of a set for exact matches but it's much slower because you can't
+	 *  do a Set lookup.  I use exact match as ANTLR
+	 *  always detect the conflict later when checking for context suffixes...
+	 *  I check for left-recursive stuff and terminate before analysis to
+	 *  avoid need to do this more expensive computation.
+	 *
+	 *  12-31-2007: I had to use the loop again rather than simple
+	 *  closureBusy.contains(proposedNFAConfiguration) lookup.  The
+	 *  semantic context should not be considered when determining if
+	 *  a closure operation is busy.  I saw a FOLLOW closure operation
+	 *  spin until time out because the predicate context kept increasing
+	 *  in size even though it's same boolean value.  This seems faster also
+	 *  because I'm not doing String.equals on the preds all the time.
+	 *
+	 *  05-05-2008: Hmm...well, i think it was a mistake to remove the sem
+	 *  ctx check below...adding back in.  Coincides with report of ANTLR
+	 *  getting super slow: http://www.antlr.org:8888/browse/ANTLR-235
+	 *  This could be because it doesn't properly compute then resolve
+	 *  a predicate expression.  Seems to fix unit test:
+	 *  TestSemanticPredicates.testSemanticContextPreventsEarlyTerminationOfClosure()
+	 *  Changing back to Set from List.  Changed a large grammar from 8 minutes
+	 *  to 11 seconds.  Cool.  Closing ANTLR-235.
+	 */
+	public static boolean closureIsBusy(DFAState d,
+										NFAConfiguration proposedNFAConfiguration)
+	{
+		return d.closureBusy.contains(proposedNFAConfiguration);
+/*
+		int numConfigs = d.closureBusy.size();
+		// Check epsilon cycle (same state, same alt, same context)
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration c = (NFAConfiguration) d.closureBusy.get(i);
+			if ( proposedNFAConfiguration.state==c.state &&
+				 proposedNFAConfiguration.alt==c.alt &&
+				 proposedNFAConfiguration.semanticContext.equals(c.semanticContext) &&
+				 proposedNFAConfiguration.context.suffix(c.context) )
+			{
+				return true;
+			}
+		}
+		return false;
+		*/
+	}
+
+	/** Given the set of NFA states in DFA state d, find all NFA states
+	 *  reachable traversing label arcs.  By definition, there can be
+	 *  only one DFA state reachable by an atom from DFA state d so we must
+	 *  find and merge all NFA states reachable via label.  Return a new
+	 *  DFAState that has all of those NFA states with their context (i.e.,
+	 *  which alt do they predict and where to return to if they fall off
+	 *  end of a rule).
+	 *
+	 *  Because we cannot jump to another rule nor fall off the end of a rule
+	 *  via a non-epsilon transition, NFA states reachable from d have the
+	 *  same configuration as the NFA state in d.  So if NFA state 7 in d's
+	 *  configurations can reach NFA state 13 then 13 will be added to the
+	 *  new DFAState (labelDFATarget) with the same configuration as state
+	 *  7 had.
+	 *
+	 *  This method does not see EOT transitions off the end of token rule
+	 *  accept states if the rule was invoked by somebody.
+	 */
+	public DFAState reach(DFAState d, Label label) {
+		//System.out.println("reach "+label.toString(dfa.nfa.grammar)+" from "+d.stateNumber);
+		DFAState labelDFATarget = dfa.newState();
+
+		// for each NFA state in d with a labeled edge,
+		// add in target states for label
+		//System.out.println("size(d.state="+d.stateNumber+")="+d.nfaConfigurations.size());
+		//System.out.println("size(labeled edge states)="+d.configurationsWithLabeledEdges.size());
+		List<NFAConfiguration> configs = d.configurationsWithLabeledEdges;
+		int numConfigs = configs.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration c = configs.get(i);
+			if ( c.resolved || c.resolveWithPredicate ) {
+				continue; // the conflict resolver indicates we must leave alone
+			}
+			NFAState p = dfa.nfa.getState(c.state);
+			// by design of the grammar->NFA conversion, only transition 0
+			// may have a non-epsilon edge.
+			Transition edge = p.transition[0];
+			if ( edge==null || !c.singleAtomTransitionEmanating ) {
+				continue;
+			}
+			Label edgeLabel = edge.label;
+
+			// SPECIAL CASE
+			// if it's an EOT transition on end of lexer rule, but context
+			// stack is not empty, then don't see the EOT; the closure
+			// will have added in the proper states following the reference
+			// to this rule in the invoking rule.  In other words, if
+			// somebody called this rule, don't see the EOT emanating from
+			// this accept state.
+			if ( c.context.parent!=null && edgeLabel.label==Label.EOT )	{
+				continue;
+			}
+
+			// Labels not unique at this point (not until addReachableLabels)
+			// so try simple int label match before general set intersection
+			//System.out.println("comparing "+edgeLabel+" with "+label);
+			if ( Label.intersect(label, edgeLabel) ) {
+				// found a transition with label;
+				// add NFA target to (potentially) new DFA state
+				NFAConfiguration newC = labelDFATarget.addNFAConfiguration(
+					(NFAState)edge.target,
+					c.alt,
+					c.context,
+					c.semanticContext);
+			}
+		}
+		if ( labelDFATarget.nfaConfigurations.size()==0 ) {
+			// kill; it's empty
+			dfa.setState(labelDFATarget.stateNumber, null);
+			labelDFATarget = null;
+		}
+        return labelDFATarget;
+	}
+
+	/** Walk the configurations of this DFA state d looking for the
+	 *  configuration, c, that has a transition on EOT.  State d should
+	 *  be converted to an accept state predicting the c.alt.  Blast
+	 *  d's current configuration set and make it just have config c.
+	 *
+	 *  TODO: can there be more than one config with EOT transition?
+	 *  That would mean that two NFA configurations could reach the
+	 *  end of the token with possibly different predicted alts.
+	 *  Seems like that would be rare or impossible.  Perhaps convert
+	 *  this routine to find all such configs and give error if &gt;1.
+	 */
+	protected void convertToEOTAcceptState(DFAState d) {
+		Label eot = new Label(Label.EOT);
+		int numConfigs = d.nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration c = d.nfaConfigurations.get(i);
+			if ( c.resolved || c.resolveWithPredicate ) {
+				continue; // the conflict resolver indicates we must leave alone
+			}
+			NFAState p = dfa.nfa.getState(c.state);
+			Transition edge = p.transition[0];
+			Label edgeLabel = edge.label;
+			if ( edgeLabel.equals(eot) ) {
+				//System.out.println("config with EOT: "+c);
+				d.setAcceptState(true);
+				//System.out.println("d goes from "+d);
+				d.nfaConfigurations.clear();
+				d.addNFAConfiguration(p,c.alt,c.context,c.semanticContext);
+				//System.out.println("to "+d);
+				return; // assume only one EOT transition
+			}
+		}
+	}
+
+	/** Add a new DFA state to the DFA if not already present.
+     *  If the DFA state uniquely predicts a single alternative, it
+     *  becomes a stop state; don't add to work list.  Further, if
+     *  there exists an NFA state predicted by &gt; 1 different alternatives
+     *  and with the same syn and sem context, the DFA is nondeterministic for
+     *  at least one input sequence reaching that NFA state.
+     */
+    protected DFAState addDFAStateToWorkList(DFAState d) {
+        DFAState existingState = dfa.addState(d);
+		if ( d != existingState ) {
+			// already there...use/return the existing DFA state.
+			// But also set the states[d.stateNumber] to the existing
+			// DFA state because the closureIsBusy must report
+			// infinite recursion on a state before it knows
+			// whether or not the state will already be
+			// found after closure on it finishes.  It could be
+			// referring to a state that will ultimately not make it
+			// into the reachable state space and the error
+			// reporting must be able to compute the path from
+			// start to the error state with infinite recursion
+			dfa.setState(d.stateNumber, existingState);
+			return existingState;
+		}
+
+		// if not there, then examine new state.
+
+		// resolve syntactic conflicts by choosing a single alt or
+        // by using semantic predicates if present.
+        resolveNonDeterminisms(d);
+
+        // If deterministic, don't add this state; it's an accept state
+        // Just return as a valid DFA state
+		int alt = d.getUniquelyPredictedAlt();
+		if ( alt!=NFA.INVALID_ALT_NUMBER ) { // uniquely predicts an alt?
+			d = convertToAcceptState(d, alt);
+			/*
+			System.out.println("convert to accept; DFA "+d.dfa.decisionNumber+" state "+d.stateNumber+" uniquely predicts alt "+
+				d.getUniquelyPredictedAlt());
+				*/
+		}
+		else {
+            // unresolved, add to work list to continue NFA conversion
+            work.add(d);
+        }
+        return d;
+    }
+
+	protected DFAState convertToAcceptState(DFAState d, int alt) {
+		// only merge stop states if they are deterministic and no
+		// recursion problems and only if they have the same gated pred
+		// context!
+		// Later, the error reporting may want to trace the path from
+		// the start state to the nondet state
+		if ( DFAOptimizer.MERGE_STOP_STATES &&
+			d.getNonDeterministicAlts()==null &&
+			!d.abortedDueToRecursionOverflow &&
+			!d.abortedDueToMultipleRecursiveAlts )
+		{
+			// check to see if we already have an accept state for this alt
+			// [must do this after we resolve nondeterminisms in general]
+			DFAState acceptStateForAlt = dfa.getAcceptState(alt);
+			if ( acceptStateForAlt!=null ) {
+				// we already have an accept state for alt;
+				// Are their gate sem pred contexts the same?
+				// For now we assume a braindead version: both must not
+				// have gated preds or share exactly same single gated pred.
+				// The equals() method is only defined on Predicate contexts not
+				// OR etc...
+				SemanticContext gatedPreds = d.getGatedPredicatesInNFAConfigurations();
+				SemanticContext existingStateGatedPreds =
+					acceptStateForAlt.getGatedPredicatesInNFAConfigurations();
+				if ( (gatedPreds==null && existingStateGatedPreds==null) ||
+				     ((gatedPreds!=null && existingStateGatedPreds!=null) &&
+					  gatedPreds.equals(existingStateGatedPreds)) )
+				{
+					// make this d.statenumber point at old DFA state
+					dfa.setState(d.stateNumber, acceptStateForAlt);
+					dfa.removeState(d);    // remove this state from unique DFA state set
+					d = acceptStateForAlt; // use old accept state; throw this one out
+					return d;
+				}
+				// else consider it a new accept state; fall through.
+			}
+		}
+		d.setAcceptState(true); // new accept state for alt
+		dfa.setAcceptState(alt, d);
+		return d;
+	}
+
+	/** If &gt; 1 NFA configurations within this DFA state have identical
+	 *  NFA state and context, but differ in their predicted
+	 *  TODO update for new context suffix stuff 3-9-2005
+	 *  alternative then a single input sequence predicts multiple alts.
+	 *  The NFA decision is therefore syntactically indistinguishable
+	 *  from the left edge upon at least one input sequence.  We may
+	 *  terminate the NFA to DFA conversion for these paths since no
+	 *  paths emanating from those NFA states can possibly separate
+	 *  these conjoined twins once interwined to make things
+	 *  deterministic (unless there are semantic predicates; see below).
+	 *
+	 *  Upon a nondeterministic set of NFA configurations, we should
+	 *  report a problem to the grammar designer and resolve the issue
+	 *  by aribitrarily picking the first alternative (this usually
+	 *  ends up producing the most natural behavior).  Pick the lowest
+	 *  alt number and just turn off all NFA configurations
+	 *  associated with the other alts. Rather than remove conflicting
+	 *  NFA configurations, I set the "resolved" bit so that future
+	 *  computations will ignore them.  In this way, we maintain the
+	 *  complete DFA state with all its configurations, but prevent
+	 *  future DFA conversion operations from pursuing undesirable
+	 *  paths.  Remember that we want to terminate DFA conversion as
+	 *  soon as we know the decision is deterministic *or*
+	 *  nondeterministic.
+	 *
+	 *  [BTW, I have convinced myself that there can be at most one
+	 *  set of nondeterministic configurations in a DFA state.  Only NFA
+	 *  configurations arising from the same input sequence can appear
+	 *  in a DFA state.  There is no way to have another complete set
+	 *  of nondeterministic NFA configurations without another input
+	 *  sequence, which would reach a different DFA state.  Therefore,
+	 *  the two nondeterministic NFA configuration sets cannot collide
+	 *  in the same DFA state.]
+	 *
+	 *  Consider DFA state {(s|1),(s|2),(s|3),(t|3),(v|4)} where (s|a)
+	 *  is state 's' and alternative 'a'.  Here, configuration set
+	 *  {(s|1),(s|2),(s|3)} predicts 3 different alts.  Configurations
+	 *  (s|2) and (s|3) are "resolved", leaving {(s|1),(t|3),(v|4)} as
+	 *  items that must still be considered by the DFA conversion
+	 *  algorithm in DFA.findNewDFAStatesAndAddDFATransitions().
+	 *
+	 *  Consider the following grammar where alts 1 and 2 are no
+	 *  problem because of the 2nd lookahead symbol.  Alts 3 and 4 are
+	 *  identical and will therefore reach the rule end NFA state but
+	 *  predicting 2 different alts (no amount of future lookahead
+	 *  will render them deterministic/separable):
+	 *
+	 *  a : A B
+	 *    | A C
+	 *    | A
+	 *    | A
+	 *    ;
+	 *
+	 *  Here is a (slightly reduced) NFA of this grammar:
+	 *
+	 *  (1)-A-&gt;(2)-B-&gt;(end)-EOF-&gt;(8)
+	 *   |              ^
+	 *  (2)-A-&gt;(3)-C----|
+	 *   |              ^
+	 *  (4)-A-&gt;(5)------|
+	 *   |              ^
+	 *  (6)-A-&gt;(7)------|
+	 *
+	 *  where (n) is NFA state n.  To begin DFA conversion, the start
+	 *  state is created:
+	 *
+	 *  {(1|1),(2|2),(4|3),(6|4)}
+	 *
+	 *  Upon A, all NFA configurations lead to new NFA states yielding
+	 *  new DFA state:
+	 *
+	 *  {(2|1),(3|2),(5|3),(7|4),(end|3),(end|4)}
+	 *
+	 *  where the configurations with state end in them are added
+	 *  during the epsilon closure operation.  State end predicts both
+	 *  alts 3 and 4.  An error is reported, the latter configuration is
+	 *  flagged as resolved leaving the DFA state as:
+	 *
+	 *  {(2|1),(3|2),(5|3),(7|4|resolved),(end|3),(end|4|resolved)}
+	 *
+	 *  As NFA configurations are added to a DFA state during its
+	 *  construction, the reachable set of labels is computed.  Here
+	 *  reachable is {B,C,EOF} because there is at least one NFA state
+	 *  in the DFA state that can transition upon those symbols.
+	 *
+	 *  The final DFA looks like:
+	 *
+	 *  {(1|1),(2|2),(4|3),(6|4)}
+	 *              |
+	 *              v
+	 *  {(2|1),(3|2),(5|3),(7|4),(end|3),(end|4)} -B-&gt; (end|1)
+	 *              |                        |
+	 *              C                        ----EOF-&gt; (8,3)
+	 *              |
+	 *              v
+	 *           (end|2)
+	 *
+	 *  Upon AB, alt 1 is predicted.  Upon AC, alt 2 is predicted.
+	 *  Upon A EOF, alt 3 is predicted.  Alt 4 is not a viable
+	 *  alternative.
+	 *
+	 *  The algorithm is essentially to walk all the configurations
+	 *  looking for a conflict of the form (s|i) and (s|j) for i!=j.
+	 *  Use a hash table to track state+context pairs for collisions
+	 *  so that we have O(n) to walk the n configurations looking for
+	 *  a conflict.  Upon every conflict, track the alt number so
+	 *  we have a list of all nondeterministically predicted alts. Also
+	 *  track the minimum alt.  Next go back over the configurations, setting
+	 *  the "resolved" bit for any that have an alt that is a member of
+	 *  the nondeterministic set.  This will effectively remove any alts
+	 *  but the one we want from future consideration.
+	 *
+	 *  See resolveWithSemanticPredicates()
+	 *
+	 *  AMBIGUOUS TOKENS
+	 *
+	 *  With keywords and ID tokens, there is an inherit ambiguity in that
+	 *  "int" can be matched by ID also.  Each lexer rule has an EOT
+	 *  transition emanating from it which is used whenever the end of
+	 *  a rule is reached and another token rule did not invoke it.  EOT
+	 *  is the only thing that can be seen next.  If two rules are identical
+	 *  like "int" and "int" then the 2nd def is unreachable and you'll get
+	 *  a warning.  We prevent a warning though for the keyword/ID issue as
+	 *  ID is still reachable.  This can be a bit weird.  '+' rule then a
+	 *  '+'|'+=' rule will fail to match '+' for the 2nd rule.
+	 *
+	 *  If all NFA states in this DFA state are targets of EOT transitions,
+	 *  (and there is more than one state plus no unique alt is predicted)
+	 *  then DFA conversion will leave this state as a dead state as nothing
+	 *  can be reached from this state.  To resolve the ambiguity, just do
+	 *  what flex and friends do: pick the first rule (alt in this case) to
+	 *  win.  This means you should put keywords before the ID rule.
+	 *  If the DFA state has only one NFA state then there is no issue:
+	 *  it uniquely predicts one alt. :)  Problem
+	 *  states will look like this during conversion:
+	 *
+	 *  DFA 1:{9|1, 19|2, 14|3, 20|2, 23|2, 24|2, ...}-&lt;EOT&gt;-&gt;5:{41|3, 42|2}
+	 *
+	 *  Worse, when you have two identical literal rules, you will see 3 alts
+	 *  in the EOT state (one for ID and one each for the identical rules).
+	 */
+	public void resolveNonDeterminisms(DFAState d) {
+		if ( debug ) {
+			System.out.println("resolveNonDeterminisms "+d.toString());
+		}
+		boolean conflictingLexerRules = false;
+		Set<Integer> nondeterministicAlts = d.getNonDeterministicAlts();
+		if ( debug && nondeterministicAlts!=null ) {
+			System.out.println("nondet alts="+nondeterministicAlts);
+		}
+
+		// CHECK FOR AMBIGUOUS EOT (if |allAlts|>1 and EOT state, resolve)
+		// grab any config to see if EOT state; any other configs must
+		// transition on EOT to get to this DFA state as well so all
+		// states in d must be targets of EOT.  These are the end states
+		// created in NFAFactory.build_EOFState
+		NFAConfiguration anyConfig = d.nfaConfigurations.get(0);
+		NFAState anyState = dfa.nfa.getState(anyConfig.state);
+
+		// if d is target of EOT and more than one predicted alt
+		// indicate that d is nondeterministic on all alts otherwise
+		// it looks like state has no problem
+		if ( anyState.isEOTTargetState() ) {
+			Set<Integer> allAlts = d.getAltSet();
+			// is more than 1 alt predicted?
+			if ( allAlts!=null && allAlts.size()>1 ) {
+				nondeterministicAlts = allAlts;
+				// track Tokens rule issues differently than other decisions
+				if ( d.dfa.isTokensRuleDecision() ) {
+					dfa.probe.reportLexerRuleNondeterminism(d,allAlts);
+					//System.out.println("Tokens rule DFA state "+d+" nondeterministic");
+					conflictingLexerRules = true;
+				}
+			}
+		}
+
+		// if no problems return unless we aborted work on d to avoid inf recursion
+		if ( !d.abortedDueToRecursionOverflow && nondeterministicAlts==null ) {
+			return; // no problems, return
+		}
+
+		// if we're not a conflicting lexer rule and we didn't abort, report ambig
+		// We should get a report for abort so don't give another
+		if ( !d.abortedDueToRecursionOverflow && !conflictingLexerRules ) {
+			// TODO: with k=x option set, this is called twice for same state
+			dfa.probe.reportNondeterminism(d, nondeterministicAlts);
+			// TODO: how to turn off when it's only the FOLLOW that is
+			// conflicting.  This used to shut off even alts i,j < n
+			// conflict warnings. :(
+		}
+
+		// ATTEMPT TO RESOLVE WITH SEMANTIC PREDICATES
+		boolean resolved =
+			tryToResolveWithSemanticPredicates(d, nondeterministicAlts);
+		if ( resolved ) {
+			if ( debug ) {
+				System.out.println("resolved DFA state "+d.stateNumber+" with pred");
+			}
+			d.resolvedWithPredicates = true;
+			dfa.probe.reportNondeterminismResolvedWithSemanticPredicate(d);
+			return;
+		}
+
+		// RESOLVE SYNTACTIC CONFLICT BY REMOVING ALL BUT ONE ALT
+		resolveByChoosingFirstAlt(d, nondeterministicAlts);
+
+		//System.out.println("state "+d.stateNumber+" resolved to alt "+winningAlt);
+	}
+
+	protected int resolveByChoosingFirstAlt(DFAState d, Set<Integer> nondeterministicAlts) {
+		int winningAlt;
+		if ( dfa.isGreedy() ) {
+			winningAlt = resolveByPickingMinAlt(d,nondeterministicAlts);
+		}
+		else {
+			// If nongreedy, the exit alt shout win, but only if it's
+			// involved in the nondeterminism!
+			/*
+			System.out.println("resolving exit alt for decision="+
+				dfa.decisionNumber+" state="+d);
+			System.out.println("nondet="+nondeterministicAlts);
+			System.out.println("exit alt "+exitAlt);
+			*/
+			int exitAlt = dfa.getNumberOfAlts();
+			if ( nondeterministicAlts.contains(Utils.integer(exitAlt)) ) {
+				// if nongreedy and exit alt is one of those nondeterministic alts
+				// predicted, resolve in favor of what follows block
+				winningAlt = resolveByPickingExitAlt(d,nondeterministicAlts);
+			}
+			else {
+				winningAlt = resolveByPickingMinAlt(d,nondeterministicAlts);
+			}
+		}
+		return winningAlt;
+	}
+
+	/** Turn off all configurations associated with the
+	 *  set of incoming nondeterministic alts except the min alt number.
+	 *  There may be many alts among the configurations but only turn off
+	 *  the ones with problems (other than the min alt of course).
+	 *
+	 *  If nondeterministicAlts is null then turn off all configs 'cept those
+	 *  associated with the minimum alt.
+	 *
+	 *  Return the min alt found.
+	 */
+	protected int resolveByPickingMinAlt(DFAState d, Set<Integer> nondeterministicAlts) {
+		int min;
+		if ( nondeterministicAlts!=null ) {
+			min = getMinAlt(nondeterministicAlts);
+		}
+		else {
+			min = d.minAltInConfigurations;
+		}
+
+		turnOffOtherAlts(d, min, nondeterministicAlts);
+
+		return min;
+	}
+
+	/** Resolve state d by choosing exit alt, which is same value as the
+	 *  number of alternatives.  Return that exit alt.
+	 */
+	protected int resolveByPickingExitAlt(DFAState d, Set<Integer> nondeterministicAlts) {
+		int exitAlt = dfa.getNumberOfAlts();
+		turnOffOtherAlts(d, exitAlt, nondeterministicAlts);
+		return exitAlt;
+	}
+
+	/** turn off all states associated with alts other than the good one
+	 *  (as long as they are one of the nondeterministic ones)
+	 */
+	protected static void turnOffOtherAlts(DFAState d, int min, Set<Integer> nondeterministicAlts) {
+		int numConfigs = d.nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = d.nfaConfigurations.get(i);
+			if ( configuration.alt!=min ) {
+				if ( nondeterministicAlts==null ||
+					 nondeterministicAlts.contains(Utils.integer(configuration.alt)) )
+				{
+					configuration.resolved = true;
+				}
+			}
+		}
+	}
+
+	protected static int getMinAlt(Set<Integer> nondeterministicAlts) {
+		int min = Integer.MAX_VALUE;
+		for (Integer altI : nondeterministicAlts) {
+			int alt = altI;
+			if ( alt < min ) {
+				min = alt;
+			}
+		}
+		return min;
+	}
+
+	/** See if a set of nondeterministic alternatives can be disambiguated
+	 *  with the semantic predicate contexts of the alternatives.
+	 *
+	 *  Without semantic predicates, syntactic conflicts are resolved
+	 *  by simply choosing the first viable alternative.  In the
+	 *  presence of semantic predicates, you can resolve the issue by
+	 *  evaluating boolean expressions at run time.  During analysis,
+	 *  this amounts to suppressing grammar error messages to the
+	 *  developer.  NFA configurations are always marked as "to be
+	 *  resolved with predicates" so that
+	 *  DFA.findNewDFAStatesAndAddDFATransitions() will know to ignore
+	 *  these configurations and add predicate transitions to the DFA
+	 *  after adding token/char labels.
+	 *
+	 *  During analysis, we can simply make sure that for n
+	 *  ambiguously predicted alternatives there are at least n-1
+	 *  unique predicate sets.  The nth alternative can be predicted
+	 *  with "not" the "or" of all other predicates.  NFA configurations without
+	 *  predicates are assumed to have the default predicate of
+	 *  "true" from a user point of view.  When true is combined via || with
+	 *  another predicate, the predicate is a tautology and must be removed
+	 *  from consideration for disambiguation:
+	 *
+	 *  a : b | B ; // hoisting p1||true out of rule b, yields no predicate
+	 *  b : {p1}? B | B ;
+	 *
+	 *  This is done down in getPredicatesPerNonDeterministicAlt().
+	 */
+	protected boolean tryToResolveWithSemanticPredicates(DFAState d,
+														 Set<Integer> nondeterministicAlts)
+	{
+		Map<Integer, SemanticContext> altToPredMap =
+				getPredicatesPerNonDeterministicAlt(d, nondeterministicAlts);
+
+		if ( altToPredMap.isEmpty() ) {
+			return false;
+		}
+
+		//System.out.println("nondeterministic alts with predicates: "+altToPredMap);
+		dfa.probe.reportAltPredicateContext(d, altToPredMap);
+
+		if ( nondeterministicAlts.size()-altToPredMap.size()>1 ) {
+			// too few predicates to resolve; just return
+			return false;
+		}
+
+		// Handle case where 1 predicate is missing
+		// Case 1. Semantic predicates
+		// If the missing pred is on nth alt, !(union of other preds)==true
+		// so we can avoid that computation.  If naked alt is ith, then must
+		// test it with !(union) since semantic predicated alts are order
+		// independent
+		// Case 2: Syntactic predicates
+		// The naked alt is always assumed to be true as the order of
+		// alts is the order of precedence.  The naked alt will be a tautology
+		// anyway as it's !(union of other preds).  This implies
+		// that there is no such thing as noviable alt for synpred edges
+		// emanating from a DFA state.
+		if ( altToPredMap.size()==nondeterministicAlts.size()-1 ) {
+			// if there are n-1 predicates for n nondeterministic alts, can fix
+			org.antlr.misc.BitSet ndSet = org.antlr.misc.BitSet.of(nondeterministicAlts);
+			org.antlr.misc.BitSet predSet = org.antlr.misc.BitSet.of(altToPredMap);
+			int nakedAlt = ndSet.subtract(predSet).getSingleElement();
+			SemanticContext nakedAltPred;
+			if ( nakedAlt == max(nondeterministicAlts) ) {
+				// the naked alt is the last nondet alt and will be the default clause
+				nakedAltPred = new SemanticContext.TruePredicate();
+			}
+			else {
+				// pretend naked alternative is covered with !(union other preds)
+				// unless one of preds from other alts is a manually specified synpred
+				// since those have precedence same as alt order.  Missing synpred
+				// is true so that alt wins (or is at least attempted).
+				// Note: can't miss any preds on alts (can't be here) if auto backtrack
+				// since it prefixes all.
+				// In LL(*) paper, i'll just have algorithm emit warning about uncovered
+				// pred
+				SemanticContext unionOfPredicatesFromAllAlts =
+					getUnionOfPredicates(altToPredMap);
+				//System.out.println("all predicates "+unionOfPredicatesFromAllAlts);
+				if ( unionOfPredicatesFromAllAlts.isSyntacticPredicate() ) {
+					nakedAltPred = new SemanticContext.TruePredicate();
+				}
+				else {
+					nakedAltPred =
+						SemanticContext.not(unionOfPredicatesFromAllAlts);
+				}
+			}
+
+			//System.out.println("covering naked alt="+nakedAlt+" with "+nakedAltPred);
+
+			altToPredMap.put(Utils.integer(nakedAlt), nakedAltPred);
+			// set all config with alt=nakedAlt to have the computed predicate
+			int numConfigs = d.nfaConfigurations.size();
+			for (int i = 0; i < numConfigs; i++) { // TODO: I don't think we need to do this; altToPredMap has it
+			 //7/27/10  theok, I removed it and it still seems to work with everything; leave in anyway just in case
+				NFAConfiguration configuration = d.nfaConfigurations.get(i);
+				if ( configuration.alt == nakedAlt ) {
+					configuration.semanticContext = nakedAltPred;
+				}
+			}
+		}
+
+		if ( altToPredMap.size()==nondeterministicAlts.size() ) {
+			// RESOLVE CONFLICT by picking one NFA configuration for each alt
+			// and setting its resolvedWithPredicate flag
+			// First, prevent a recursion warning on this state due to
+			// pred resolution
+			if ( d.abortedDueToRecursionOverflow ) {
+				d.dfa.probe.removeRecursiveOverflowState(d);
+			}
+			int numConfigs = d.nfaConfigurations.size();
+			//System.out.println("pred map="+altToPredMap);
+			for (int i = 0; i < numConfigs; i++) {
+				NFAConfiguration configuration = d.nfaConfigurations.get(i);
+				SemanticContext semCtx = altToPredMap.get(Utils.integer(configuration.alt));
+				if ( semCtx!=null ) {
+					// resolve (first found) with pred
+					// and remove alt from problem list
+					//System.out.println("c="+configuration);
+					configuration.resolveWithPredicate = true;
+					// altToPredMap has preds from all alts; store into "annointed" config
+					configuration.semanticContext = semCtx; // reset to combined
+					altToPredMap.remove(Utils.integer(configuration.alt));
+
+					// notify grammar that we've used the preds contained in semCtx
+					if ( semCtx.isSyntacticPredicate() ) {
+						dfa.nfa.grammar.synPredUsedInDFA(dfa, semCtx);
+					}
+				}
+				else if ( nondeterministicAlts.contains(Utils.integer(configuration.alt)) ) {
+					// resolve all configurations for nondeterministic alts
+					// for which there is no predicate context by turning it off
+					configuration.resolved = true;
+				}
+			}
+			return true;
+		}
+
+		return false;  // couldn't fix the problem with predicates
+	}
+
+	/** Return a mapping from nondeterministc alt to combined list of predicates.
+	 *  If both (s|i|semCtx1) and (t|i|semCtx2) exist, then the proper predicate
+	 *  for alt i is semCtx1||semCtx2 because you have arrived at this single
+	 *  DFA state via two NFA paths, both of which have semantic predicates.
+	 *  We ignore deterministic alts because syntax alone is sufficient
+	 *  to predict those.  Do not include their predicates.
+	 *
+	 *  Alts with no predicate are assumed to have {true}? pred.
+	 *
+	 *  When combining via || with "true", all predicates are removed from
+	 *  consideration since the expression will always be true and hence
+	 *  not tell us how to resolve anything.  So, if any NFA configuration
+	 *  in this DFA state does not have a semantic context, the alt cannot
+	 *  be resolved with a predicate.
+	 *
+	 *  If nonnull, incidentEdgeLabel tells us what NFA transition label
+	 *  we did a reach on to compute state d.  d may have insufficient
+	 *  preds, so we really want this for the error message.
+	 */
+	protected Map<Integer, SemanticContext> getPredicatesPerNonDeterministicAlt(DFAState d,
+																				Set<Integer> nondeterministicAlts)
+	{
+		// map alt to combined SemanticContext
+		Map<Integer, SemanticContext> altToPredicateContextMap =
+			new HashMap<Integer, SemanticContext>();
+		// init the alt to predicate set map
+		Map<Integer, OrderedHashSet<SemanticContext>> altToSetOfContextsMap =
+			new HashMap<Integer, OrderedHashSet<SemanticContext>>();
+		for (Integer altI : nondeterministicAlts) {
+			altToSetOfContextsMap.put(altI, new OrderedHashSet<SemanticContext>());
+		}
+
+		/*
+		List<Label> sampleInputLabels = d.dfa.probe.getSampleNonDeterministicInputSequence(d);
+		String input = d.dfa.probe.getInputSequenceDisplay(sampleInputLabels);
+		System.out.println("sample input: "+input);
+		*/
+
+		// for each configuration, create a unique set of predicates
+		// Also, track the alts with at least one uncovered configuration
+		// (one w/o a predicate); tracks tautologies like p1||true
+		Map<Integer, Set<Token>> altToLocationsReachableWithoutPredicate = new HashMap<Integer, Set<Token>>();
+		Set<Integer> nondetAltsWithUncoveredConfiguration = new HashSet<Integer>();
+		//System.out.println("configs="+d.nfaConfigurations);
+		//System.out.println("configs with preds?"+d.atLeastOneConfigurationHasAPredicate);
+		//System.out.println("configs with preds="+d.configurationsWithPredicateEdges);
+		int numConfigs = d.nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration configuration = d.nfaConfigurations.get(i);
+			Integer altI = Utils.integer(configuration.alt);
+			// if alt is nondeterministic, combine its predicates
+			if ( nondeterministicAlts.contains(altI) ) {
+				// if there is a predicate for this NFA configuration, OR in
+				if ( configuration.semanticContext !=
+					 SemanticContext.EMPTY_SEMANTIC_CONTEXT )
+				{
+					Set<SemanticContext> predSet = altToSetOfContextsMap.get(altI);
+					predSet.add(configuration.semanticContext);
+				}
+				else {
+					// if no predicate, but it's part of nondeterministic alt
+					// then at least one path exists not covered by a predicate.
+					// must remove predicate for this alt; track incomplete alts
+					nondetAltsWithUncoveredConfiguration.add(altI);
+					/*
+					NFAState s = dfa.nfa.getState(configuration.state);
+					System.out.println("###\ndec "+dfa.decisionNumber+" alt "+configuration.alt+
+									   " enclosing rule for nfa state not covered "+
+									   s.enclosingRule);
+					if ( s.associatedASTNode!=null ) {
+						System.out.println("token="+s.associatedASTNode.token);
+					}
+					System.out.println("nfa state="+s);
+
+					if ( s.incidentEdgeLabel!=null && Label.intersect(incidentEdgeLabel, s.incidentEdgeLabel) ) {
+						Set<Token> locations = altToLocationsReachableWithoutPredicate.get(altI);
+						if ( locations==null ) {
+							locations = new HashSet<Token>();
+							altToLocationsReachableWithoutPredicate.put(altI, locations);
+						}
+						locations.add(s.associatedASTNode.token);
+					}
+					*/
+				}
+			}
+		}
+
+		// For each alt, OR together all unique predicates associated with
+		// all configurations
+		// Also, track the list of incompletely covered alts: those alts
+		// with at least 1 predicate and at least one configuration w/o a
+		// predicate. We want this in order to report to the decision probe.
+		List<Integer> incompletelyCoveredAlts = new ArrayList<Integer>();
+		for (Integer altI : nondeterministicAlts) {
+			Set<SemanticContext> contextsForThisAlt = altToSetOfContextsMap.get(altI);
+			if ( nondetAltsWithUncoveredConfiguration.contains(altI) ) { // >= 1 config has no ctx
+				if ( contextsForThisAlt.size()>0 ) {    // && at least one pred
+					incompletelyCoveredAlts.add(altI);  // this alt incompleted covered
+				}
+				continue; // don't include at least 1 config has no ctx
+			}
+			SemanticContext combinedContext = null;
+			for (SemanticContext ctx : contextsForThisAlt) {
+				combinedContext =
+						SemanticContext.or(combinedContext,ctx);
+			}
+			altToPredicateContextMap.put(altI, combinedContext);
+		}
+
+		if ( incompletelyCoveredAlts.size()>0 ) {
+			/*
+			System.out.println("prob in dec "+dfa.decisionNumber+" state="+d);
+			FASerializer serializer = new FASerializer(dfa.nfa.grammar);
+			String result = serializer.serialize(dfa.startState);
+			System.out.println("dfa: "+result);
+			System.out.println("incomplete alts: "+incompletelyCoveredAlts);
+			System.out.println("nondet="+nondeterministicAlts);
+			System.out.println("nondetAltsWithUncoveredConfiguration="+ nondetAltsWithUncoveredConfiguration);
+			System.out.println("altToCtxMap="+altToSetOfContextsMap);
+			System.out.println("altToPredicateContextMap="+altToPredicateContextMap);
+			*/
+			for (int i = 0; i < numConfigs; i++) {
+				NFAConfiguration configuration = d.nfaConfigurations.get(i);
+				Integer altI = Utils.integer(configuration.alt);
+				if ( incompletelyCoveredAlts.contains(altI) &&
+					 configuration.semanticContext == SemanticContext.EMPTY_SEMANTIC_CONTEXT )
+				{
+					NFAState s = dfa.nfa.getState(configuration.state);
+					/*
+					System.out.print("nondet config w/o context "+configuration+
+									 " incident "+(s.incidentEdgeLabel!=null?s.incidentEdgeLabel.toString(dfa.nfa.grammar):null));
+					if ( s.associatedASTNode!=null ) {
+						System.out.print(" token="+s.associatedASTNode.token);
+					}
+					else System.out.println();
+					*/
+                    // We want to report getting to an NFA state with an
+                    // incoming label, unless it's EOF, w/o a predicate.
+                    if ( s.incidentEdgeLabel!=null && s.incidentEdgeLabel.label != Label.EOF ) {
+                        if ( s.associatedASTNode==null || s.associatedASTNode.token==null ) {
+							ErrorManager.internalError("no AST/token for nonepsilon target w/o predicate");
+						}
+						else {
+							Set<Token> locations = altToLocationsReachableWithoutPredicate.get(altI);
+							if ( locations==null ) {
+								locations = new HashSet<Token>();
+								altToLocationsReachableWithoutPredicate.put(altI, locations);
+							}
+							locations.add(s.associatedASTNode.token);
+						}
+					}
+				}
+			}
+			dfa.probe.reportIncompletelyCoveredAlts(d,
+													altToLocationsReachableWithoutPredicate);
+		}
+
+		return altToPredicateContextMap;
+	}
+
+	/** OR together all predicates from the alts.  Note that the predicate
+	 *  for an alt could itself be a combination of predicates.
+	 */
+	protected static SemanticContext getUnionOfPredicates(Map<?, SemanticContext> altToPredMap) {
+		Iterator<SemanticContext> iter;
+		SemanticContext unionOfPredicatesFromAllAlts = null;
+		iter = altToPredMap.values().iterator();
+		while ( iter.hasNext() ) {
+			SemanticContext semCtx = iter.next();
+			if ( unionOfPredicatesFromAllAlts==null ) {
+				unionOfPredicatesFromAllAlts = semCtx;
+			}
+			else {
+				unionOfPredicatesFromAllAlts =
+						SemanticContext.or(unionOfPredicatesFromAllAlts,semCtx);
+			}
+		}
+		return unionOfPredicatesFromAllAlts;
+	}
+
+	/** for each NFA config in d, look for "predicate required" sign set
+	 *  during nondeterminism resolution.
+	 *
+	 *  Add the predicate edges sorted by the alternative number; I'm fairly
+	 *  sure that I could walk the configs backwards so they are added to
+	 *  the predDFATarget in the right order, but it's best to make sure.
+	 *  Predicates succeed in the order they are specifed.  Alt i wins
+	 *  over alt i+1 if both predicates are true.
+	 */
+	protected void addPredicateTransitions(DFAState d) {
+		List<NFAConfiguration> configsWithPreds = new ArrayList<NFAConfiguration>();
+		// get a list of all configs with predicates
+		int numConfigs = d.nfaConfigurations.size();
+		for (int i = 0; i < numConfigs; i++) {
+			NFAConfiguration c = d.nfaConfigurations.get(i);
+			if ( c.resolveWithPredicate ) {
+				configsWithPreds.add(c);
+			}
+		}
+		// Sort ascending according to alt; alt i has higher precedence than i+1
+		Collections.sort(configsWithPreds,
+			 new Comparator<NFAConfiguration>() {
+			@Override
+				 public int compare(NFAConfiguration a, NFAConfiguration b) {
+					 if ( a.alt < b.alt ) return -1;
+					 else if ( a.alt > b.alt ) return 1;
+					 return 0;
+				 }
+			 });
+		List<NFAConfiguration> predConfigsSortedByAlt = configsWithPreds;
+		// Now, we can add edges emanating from d for these preds in right order
+		for (int i = 0; i < predConfigsSortedByAlt.size(); i++) {
+			NFAConfiguration c = predConfigsSortedByAlt.get(i);
+			DFAState predDFATarget = d.dfa.getAcceptState(c.alt);
+			if ( predDFATarget==null ) {
+				predDFATarget = dfa.newState(); // create if not there.
+				// create a new DFA state that is a target of the predicate from d
+				predDFATarget.addNFAConfiguration(dfa.nfa.getState(c.state),
+												  c.alt,
+												  c.context,
+												  c.semanticContext);
+				predDFATarget.setAcceptState(true);
+				dfa.setAcceptState(c.alt, predDFATarget);
+				DFAState existingState = dfa.addState(predDFATarget);
+				if ( predDFATarget != existingState ) {
+					// already there...use/return the existing DFA state that
+					// is a target of this predicate.  Make this state number
+					// point at the existing state
+					dfa.setState(predDFATarget.stateNumber, existingState);
+					predDFATarget = existingState;
+				}
+			}
+			// add a transition to pred target from d
+			d.addTransition(predDFATarget, new PredicateLabel(c.semanticContext));
+		}
+	}
+
+	protected void initContextTrees(int numberOfAlts) {
+        contextTrees = new NFAContext[numberOfAlts];
+        for (int i = 0; i < contextTrees.length; i++) {
+            int alt = i+1;
+            // add a dummy root node so that an NFA configuration can
+            // always point at an NFAContext.  If a context refers to this
+            // node then it implies there is no call stack for
+            // that configuration
+            contextTrees[i] = new NFAContext(null, null);
+        }
+    }
+
+	public static int max(Set<Integer> s) {
+		if ( s==null ) {
+			return Integer.MIN_VALUE;
+		}
+		int i = 0;
+		int m = 0;
+		for (Integer value : s) {
+			i++;
+			Integer I = value;
+			if ( i==1 ) { // init m with first value
+				m = I;
+				continue;
+			}
+			if ( I>m ) {
+				m = I;
+			}
+		}
+		return m;
+	}
+}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/NonLLStarDecisionException.java b/tool/src/main/java/org/antlr/analysis/NonLLStarDecisionException.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/analysis/NonLLStarDecisionException.java
rename to tool/src/main/java/org/antlr/analysis/NonLLStarDecisionException.java
diff --git a/tool/src/main/java/org/antlr/analysis/PredicateLabel.java b/tool/src/main/java/org/antlr/analysis/PredicateLabel.java
new file mode 100644
index 0000000..1aae6c7
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/PredicateLabel.java
@@ -0,0 +1,91 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarAST;
+
+public class PredicateLabel extends Label {
+	/** A tree of semantic predicates from the grammar AST if label==SEMPRED.
+	 *  In the NFA, labels will always be exactly one predicate, but the DFA
+	 *  may have to combine a bunch of them as it collects predicates from
+	 *  multiple NFA configurations into a single DFA state.
+	 */
+	protected SemanticContext semanticContext;
+	
+	/** Make a semantic predicate label */
+	public PredicateLabel(GrammarAST predicateASTNode) {
+		super(SEMPRED);
+		this.semanticContext = new SemanticContext.Predicate(predicateASTNode);
+	}
+
+	/** Make a semantic predicates label */
+	public PredicateLabel(SemanticContext semCtx) {
+		super(SEMPRED);
+		this.semanticContext = semCtx;
+	}
+
+	@Override
+	public int hashCode() {
+		return semanticContext.hashCode();
+	}
+
+	@Override
+	public boolean equals(Object o) {
+		if ( o==null ) {
+			return false;
+		}
+		if ( this == o ) {
+			return true; // equals if same object
+		}
+		if ( !(o instanceof PredicateLabel) ) {
+			return false;
+		}
+		return semanticContext.equals(((PredicateLabel)o).semanticContext);
+	}
+
+	@Override
+	public boolean isSemanticPredicate() {
+		return true;
+	}
+
+	@Override
+	public SemanticContext getSemanticContext() {
+		return semanticContext;
+	}
+
+	@Override
+	public String toString() {
+		return "{"+semanticContext+"}?";
+	}
+
+	@Override
+	public String toString(Grammar g) {
+		return toString();
+	}
+}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/RuleClosureTransition.java b/tool/src/main/java/org/antlr/analysis/RuleClosureTransition.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/analysis/RuleClosureTransition.java
rename to tool/src/main/java/org/antlr/analysis/RuleClosureTransition.java
diff --git a/tool/src/main/java/org/antlr/analysis/SemanticContext.java b/tool/src/main/java/org/antlr/analysis/SemanticContext.java
new file mode 100644
index 0000000..4f43b5a
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/SemanticContext.java
@@ -0,0 +1,836 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarAST;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.STGroup;
+
+import java.util.*;
+
+/** A binary tree structure used to record the semantic context in which
+ *  an NFA configuration is valid.  It's either a single predicate or
+ *  a tree representing an operation tree such as: p1&amp;&amp;p2 or p1||p2.
+ *
+ *  For NFA o-p1-&gt;o-p2-&gt;o, create tree AND(p1,p2).
+ *  For NFA (1)-p1-&gt;(2)
+ *           |       ^
+ *           |       |
+ *          (3)-p2----
+ *  we will have to combine p1 and p2 into DFA state as we will be
+ *  adding NFA configurations for state 2 with two predicates p1,p2.
+ *  So, set context for combined NFA config for state 2: OR(p1,p2).
+ *
+ *  I have scoped the AND, NOT, OR, and Predicate subclasses of
+ *  SemanticContext within the scope of this outer class.
+ *
+ *  July 7, 2006: TJP altered OR to be set of operands. the Binary tree
+ *  made it really hard to reduce complicated || sequences to their minimum.
+ *  Got huge repeated || conditions.
+ */
+public abstract class SemanticContext {
+	/** Create a default value for the semantic context shared among all
+	 *  NFAConfigurations that do not have an actual semantic context.
+	 *  This prevents lots of if!=null type checks all over; it represents
+	 *  just an empty set of predicates.
+	 */
+	public static final SemanticContext EMPTY_SEMANTIC_CONTEXT = new Predicate(Predicate.INVALID_PRED_VALUE);
+
+	/** Given a semantic context expression tree, return a tree with all
+	 *  nongated predicates set to true and then reduced.  So p&amp;&amp;(q||r) would
+	 *  return p&amp;&amp;r if q is nongated but p and r are gated.
+	 */
+	public abstract SemanticContext getGatedPredicateContext();
+
+	/** Generate an expression that will evaluate the semantic context,
+	 *  given a set of output templates.
+	 */
+	public abstract ST genExpr(CodeGenerator generator,
+										   STGroup templates,
+										   DFA dfa);
+
+	public abstract boolean hasUserSemanticPredicate(); // user-specified sempred {}? or {}?=>
+	public abstract boolean isSyntacticPredicate();
+
+	/** Notify the indicated grammar of any syn preds used within this context */
+	public void trackUseOfSyntacticPredicates(Grammar g) {
+	}
+
+	public static class Predicate extends SemanticContext {
+		/** The AST node in tree created from the grammar holding the predicate */
+		public GrammarAST predicateAST;
+
+		/** Is this a {...}?=&gt; gating predicate or a normal disambiguating {..}?
+		 *  If any predicate in expression is gated, then expression is considered
+		 *  gated.
+		 *
+		 *  The simple Predicate object's predicate AST's type is used to set
+		 *  gated to true if type==GATED_SEMPRED.
+		 */
+		protected boolean gated = false;
+
+		/** syntactic predicates are converted to semantic predicates
+		 *  but synpreds are generated slightly differently.
+		 */
+		protected boolean synpred = false;
+
+		public static final int INVALID_PRED_VALUE = -2;
+		public static final int FALSE_PRED = 0;
+		public static final int TRUE_PRED = ~0;
+
+		/** sometimes predicates are known to be true or false; we need
+		 *  a way to represent this without resorting to a target language
+		 *  value like true or TRUE.
+		 */
+		protected int constantValue = INVALID_PRED_VALUE;
+
+		public Predicate(int constantValue) {
+			predicateAST = new GrammarAST();
+			this.constantValue=constantValue;
+		}
+
+		public Predicate(GrammarAST predicate) {
+			this.predicateAST = predicate;
+			this.gated =
+				predicate.getType()==ANTLRParser.GATED_SEMPRED ||
+				predicate.getType()==ANTLRParser.SYN_SEMPRED ;
+			this.synpred =
+				predicate.getType()==ANTLRParser.SYN_SEMPRED ||
+				predicate.getType()==ANTLRParser.BACKTRACK_SEMPRED;
+		}
+
+		public Predicate(Predicate p) {
+			this.predicateAST = p.predicateAST;
+			this.gated = p.gated;
+			this.synpred = p.synpred;
+			this.constantValue = p.constantValue;
+		}
+
+		/** Two predicates are the same if they are literally the same
+		 *  text rather than same node in the grammar's AST.
+		 *  Or, if they have the same constant value, return equal.
+		 *  As of July 2006 I'm not sure these are needed.
+		 */
+		@Override
+		public boolean equals(Object o) {
+			if ( !(o instanceof Predicate) ) {
+				return false;
+			}
+
+			Predicate other = (Predicate)o;
+			if (this.constantValue != other.constantValue){
+				return false;
+			}
+
+			if (this.constantValue != INVALID_PRED_VALUE){
+				return true;
+			}
+
+			return predicateAST.getText().equals(other.predicateAST.getText());
+		}
+
+		@Override
+		public int hashCode() {
+			if (constantValue != INVALID_PRED_VALUE){
+				return constantValue;
+			}
+
+			if ( predicateAST ==null ) {
+				return 0;
+			}
+
+			return predicateAST.getText().hashCode();
+		}
+
+		@Override
+		public ST genExpr(CodeGenerator generator,
+									  STGroup templates,
+									  DFA dfa)
+		{
+			ST eST;
+			if ( templates!=null ) {
+				if ( synpred ) {
+					eST = templates.getInstanceOf("evalSynPredicate");
+				}
+				else {
+					eST = templates.getInstanceOf("evalPredicate");
+					generator.grammar.decisionsWhoseDFAsUsesSemPreds.add(dfa);
+				}
+				String predEnclosingRuleName = predicateAST.enclosingRuleName;
+				/*
+				String decisionEnclosingRuleName =
+					dfa.getNFADecisionStartState().getEnclosingRule();
+				// if these rulenames are diff, then pred was hoisted out of rule
+				// Currently I don't warn you about this as it could be annoying.
+				// I do the translation anyway.
+				*/
+				//eST.add("pred", this.toString());
+				if ( generator!=null ) {
+					eST.add("pred",
+									 generator.translateAction(predEnclosingRuleName,predicateAST));
+				}
+			}
+			else {
+				eST = new ST("<pred>");
+				eST.add("pred", this.toString());
+				return eST;
+			}
+			if ( generator!=null ) {
+				String description =
+					generator.target.getTargetStringLiteralFromString(this.toString());
+				eST.add("description", description);
+			}
+			return eST;
+		}
+
+		@Override
+		public SemanticContext getGatedPredicateContext() {
+			if ( gated ) {
+				return this;
+			}
+			return null;
+		}
+
+		@Override
+		public boolean hasUserSemanticPredicate() { // user-specified sempred
+			return predicateAST !=null &&
+				   ( predicateAST.getType()==ANTLRParser.GATED_SEMPRED ||
+					 predicateAST.getType()==ANTLRParser.SEMPRED );
+		}
+
+		@Override
+		public boolean isSyntacticPredicate() {
+			return predicateAST !=null &&
+				( predicateAST.getType()==ANTLRParser.SYN_SEMPRED ||
+				  predicateAST.getType()==ANTLRParser.BACKTRACK_SEMPRED );
+		}
+
+		@Override
+		public void trackUseOfSyntacticPredicates(Grammar g) {
+			if ( synpred ) {
+				g.synPredNamesUsedInDFA.add(predicateAST.getText());
+			}
+		}
+
+		@Override
+		public String toString() {
+			if ( predicateAST ==null ) {
+				return "<nopred>";
+			}
+			return predicateAST.getText();
+		}
+	}
+
+	public static class TruePredicate extends Predicate {
+		public TruePredicate() {
+			super(TRUE_PRED);
+		}
+
+		@Override
+		public ST genExpr(CodeGenerator generator,
+									  STGroup templates,
+									  DFA dfa)
+		{
+			if ( templates!=null ) {
+				return templates.getInstanceOf("true_value");
+			}
+			return new ST("true");
+		}
+
+		@Override
+		public boolean hasUserSemanticPredicate() {
+			return false; // not user specified.
+		}
+
+		@Override
+		public String toString() {
+			return "true"; // not used for code gen, just DOT and print outs
+		}
+	}
+
+	public static class FalsePredicate extends Predicate {
+		public FalsePredicate() {
+			super(FALSE_PRED);
+		}
+
+		@Override
+		public ST genExpr(CodeGenerator generator,
+									  STGroup templates,
+									  DFA dfa)
+		{
+			if ( templates!=null ) {
+				return templates.getInstanceOf("false");
+			}
+			return new ST("false");
+		}
+
+		@Override
+		public boolean hasUserSemanticPredicate() {
+			return false; // not user specified.
+		}
+
+		@Override
+		public String toString() {
+			return "false"; // not used for code gen, just DOT and print outs
+		}
+	}
+
+	public static abstract class CommutativePredicate extends SemanticContext {
+		protected final Set<SemanticContext> operands = new HashSet<SemanticContext>();
+		protected int hashcode;
+
+		public CommutativePredicate(SemanticContext a, SemanticContext b) {
+			if (a.getClass() == this.getClass()){
+				CommutativePredicate predicate = (CommutativePredicate)a;
+				operands.addAll(predicate.operands);
+			} else {
+				operands.add(a);
+			}
+
+			if (b.getClass() == this.getClass()){
+				CommutativePredicate predicate = (CommutativePredicate)b;
+				operands.addAll(predicate.operands);
+			} else {
+				operands.add(b);
+			}
+
+			hashcode = calculateHashCode();
+		}
+
+		public CommutativePredicate(HashSet<SemanticContext> contexts){
+			for (SemanticContext context : contexts){
+				if (context.getClass() == this.getClass()){
+					CommutativePredicate predicate = (CommutativePredicate)context;
+					operands.addAll(predicate.operands);
+				} else {
+					operands.add(context);
+				}
+			}
+
+			hashcode = calculateHashCode();
+		}
+
+		@Override
+		public SemanticContext getGatedPredicateContext() {
+			SemanticContext result = null;
+			for (SemanticContext semctx : operands) {
+				SemanticContext gatedPred = semctx.getGatedPredicateContext();
+				if ( gatedPred!=null ) {
+					result = combinePredicates(result, gatedPred);
+				}
+			}
+			return result;
+		}
+
+		@Override
+		public boolean hasUserSemanticPredicate() {
+			for (SemanticContext semctx : operands) {
+				if ( semctx.hasUserSemanticPredicate() ) {
+					return true;
+				}
+			}
+			return false;
+		}
+
+		@Override
+		public boolean isSyntacticPredicate() {
+			for (SemanticContext semctx : operands) {
+				if ( semctx.isSyntacticPredicate() ) {
+					return true;
+				}
+			}
+			return false;
+		}
+
+		@Override
+		public void trackUseOfSyntacticPredicates(Grammar g) {
+			for (SemanticContext semctx : operands) {
+				semctx.trackUseOfSyntacticPredicates(g);
+			}
+		}
+
+		@Override
+		public boolean equals(Object obj) {
+			if (this == obj)
+				return true;
+
+			if (obj.getClass() == this.getClass()) {
+				CommutativePredicate commutative = (CommutativePredicate)obj;
+				Set<SemanticContext> otherOperands = commutative.operands;
+				if (operands.size() != otherOperands.size())
+					return false;
+
+				return operands.containsAll(otherOperands);
+			}
+
+			if (obj instanceof NOT)
+			{
+				NOT not = (NOT)obj;
+				if (not.ctx instanceof CommutativePredicate && not.ctx.getClass() != this.getClass()) {
+					Set<SemanticContext> otherOperands = ((CommutativePredicate)not.ctx).operands;
+					if (operands.size() != otherOperands.size())
+						return false;
+
+					ArrayList<SemanticContext> temp = new ArrayList<SemanticContext>(operands.size());
+					for (SemanticContext context : otherOperands) {
+						temp.add(not(context));
+					}
+
+					return operands.containsAll(temp);
+				}
+			}
+
+			return false;
+		}
+
+		@Override
+		public int hashCode(){
+			return hashcode;
+		}
+
+		@Override
+		public String toString() {
+			StringBuilder buf = new StringBuilder();
+			buf.append("(");
+			int i = 0;
+			for (SemanticContext semctx : operands) {
+				if ( i>0 ) {
+					buf.append(getOperandString());
+				}
+				buf.append(semctx.toString());
+				i++;
+			}
+			buf.append(")");
+			return buf.toString();
+		}
+
+		public abstract String getOperandString();
+
+		public abstract SemanticContext combinePredicates(SemanticContext left, SemanticContext right);
+
+		public abstract int calculateHashCode();
+	}
+
+	public static class AND extends CommutativePredicate {
+		public AND(SemanticContext a, SemanticContext b) {
+			super(a,b);
+		}
+
+		public AND(HashSet<SemanticContext> contexts) {
+			super(contexts);
+		}
+
+		@Override
+		public ST genExpr(CodeGenerator generator,
+									  STGroup templates,
+									  DFA dfa)
+		{
+			ST result = null;
+			for (SemanticContext operand : operands) {
+				if (result == null) {
+					result = operand.genExpr(generator, templates, dfa);
+					continue;
+				}
+
+				ST eST;
+				if ( templates!=null ) {
+					eST = templates.getInstanceOf("andPredicates");
+				}
+				else {
+					eST = new ST("(<left>&&<right>)");
+				}
+				eST.add("left", result);
+				eST.add("right", operand.genExpr(generator,templates,dfa));
+				result = eST;
+			}
+
+			return result;
+		}
+
+		@Override
+		public String getOperandString() {
+			return "&&";
+		}
+
+		@Override
+		public SemanticContext combinePredicates(SemanticContext left, SemanticContext right) {
+			return SemanticContext.and(left, right);
+		}
+
+		@Override
+		public int calculateHashCode() {
+			int hashcode = 0;
+			for (SemanticContext context : operands) {
+				hashcode = hashcode ^ context.hashCode();
+			}
+
+			return hashcode;
+		}
+	}
+
+	public static class OR extends CommutativePredicate {
+		public OR(SemanticContext a, SemanticContext b) {
+			super(a,b);
+		}
+
+		public OR(HashSet<SemanticContext> contexts) {
+			super(contexts);
+		}
+
+		@Override
+		public ST genExpr(CodeGenerator generator,
+									  STGroup templates,
+									  DFA dfa)
+		{
+			ST eST;
+			if ( templates!=null ) {
+				eST = templates.getInstanceOf("orPredicates");
+			}
+			else {
+				eST = new ST("(<operands; separator=\"||\">)");
+			}
+			for (SemanticContext semctx : operands) {
+				eST.add("operands", semctx.genExpr(generator,templates,dfa));
+			}
+			return eST;
+		}
+
+		@Override
+		public String getOperandString() {
+			return "||";
+		}
+
+		@Override
+		public SemanticContext combinePredicates(SemanticContext left, SemanticContext right) {
+			return SemanticContext.or(left, right);
+		}
+
+		@Override
+		public int calculateHashCode() {
+			int hashcode = 0;
+			for (SemanticContext context : operands) {
+				hashcode = ~hashcode ^ context.hashCode();
+			}
+
+			return hashcode;
+		}
+	}
+
+	public static class NOT extends SemanticContext {
+		protected SemanticContext ctx;
+		public NOT(SemanticContext ctx) {
+			this.ctx = ctx;
+		}
+
+		@Override
+		public ST genExpr(CodeGenerator generator,
+									  STGroup templates,
+									  DFA dfa)
+		{
+			ST eST;
+			if ( templates!=null ) {
+				eST = templates.getInstanceOf("notPredicate");
+			}
+			else {
+				eST = new ST("!(<pred>)");
+			}
+			eST.add("pred", ctx.genExpr(generator,templates,dfa));
+			return eST;
+		}
+
+		@Override
+		public SemanticContext getGatedPredicateContext() {
+			SemanticContext p = ctx.getGatedPredicateContext();
+			if ( p==null ) {
+				return null;
+			}
+			return new NOT(p);
+		}
+
+		@Override
+		public boolean hasUserSemanticPredicate() {
+			return ctx.hasUserSemanticPredicate();
+		}
+
+		@Override
+		public boolean isSyntacticPredicate() {
+			return ctx.isSyntacticPredicate();
+		}
+
+		@Override
+		public void trackUseOfSyntacticPredicates(Grammar g) {
+			ctx.trackUseOfSyntacticPredicates(g);
+		}
+
+		@Override
+		public boolean equals(Object object) {
+			if ( !(object instanceof NOT) ) {
+				return false;
+			}
+			return this.ctx.equals(((NOT)object).ctx);
+		}
+
+		@Override
+		public int hashCode() {
+			return ~ctx.hashCode();
+		}
+
+		@Override
+		public String toString() {
+			return "!("+ctx+")";
+		}
+	}
+
+	public static SemanticContext and(SemanticContext a, SemanticContext b) {
+		//System.out.println("AND: "+a+"&&"+b);
+		if (a instanceof FalsePredicate || b instanceof FalsePredicate)
+			return new FalsePredicate();
+
+		SemanticContext[] terms = factorOr(a, b);
+		SemanticContext commonTerms = terms[0];
+		a = terms[1];
+		b = terms[2];
+
+		boolean factored = commonTerms != null && commonTerms != EMPTY_SEMANTIC_CONTEXT && !(commonTerms instanceof TruePredicate);
+		if (factored) {
+			return or(commonTerms, and(a, b));
+		}
+		
+		//System.Console.Out.WriteLine( "AND: " + a + "&&" + b );
+		if (a instanceof FalsePredicate || b instanceof FalsePredicate)
+			return new FalsePredicate();
+
+		if ( a==EMPTY_SEMANTIC_CONTEXT || a==null ) {
+			return b;
+		}
+		if ( b==EMPTY_SEMANTIC_CONTEXT || b==null ) {
+			return a;
+		}
+
+		if (a instanceof TruePredicate)
+			return b;
+
+		if (b instanceof TruePredicate)
+			return a;
+
+		//// Factoring takes care of this case
+		//if (a.Equals(b))
+		//    return a;
+
+		//System.out.println("## have to AND");
+		AND result = new AND(a,b);
+		if (result.operands.size() == 1) {
+			return result.operands.iterator().next();
+		}
+
+		return result;
+	}
+
+	public static SemanticContext or(SemanticContext a, SemanticContext b) {
+		//System.out.println("OR: "+a+"||"+b);
+		if (a instanceof TruePredicate || b instanceof TruePredicate)
+			return new TruePredicate();
+
+		SemanticContext[] terms = factorAnd(a, b);
+		SemanticContext commonTerms = terms[0];
+		a = terms[1];
+		b = terms[2];
+		boolean factored = commonTerms != null && commonTerms != EMPTY_SEMANTIC_CONTEXT && !(commonTerms instanceof FalsePredicate);
+		if (factored) {
+			return and(commonTerms, or(a, b));
+		}
+
+		if ( a==EMPTY_SEMANTIC_CONTEXT || a==null || a instanceof FalsePredicate ) {
+			return b;
+		}
+
+		if ( b==EMPTY_SEMANTIC_CONTEXT || b==null || b instanceof FalsePredicate ) {
+			return a;
+		}
+
+		if ( a instanceof TruePredicate || b instanceof TruePredicate || commonTerms instanceof TruePredicate ) {
+			return new TruePredicate();
+		}
+
+		//// Factoring takes care of this case
+		//if (a.equals(b))
+		//    return a;
+
+		if ( a instanceof NOT ) {
+			NOT n = (NOT)a;
+			// check for !p||p
+			if ( n.ctx.equals(b) ) {
+				return new TruePredicate();
+			}
+		}
+		else if ( b instanceof NOT ) {
+			NOT n = (NOT)b;
+			// check for p||!p
+			if ( n.ctx.equals(a) ) {
+				return new TruePredicate();
+			}
+		}
+
+		//System.out.println("## have to OR");
+		OR result = new OR(a,b);
+		if (result.operands.size() == 1)
+			return result.operands.iterator().next();
+
+		return result;
+	}
+
+	public static SemanticContext not(SemanticContext a) {
+		if (a instanceof NOT) {
+			return ((NOT)a).ctx;
+		}
+
+		if (a instanceof TruePredicate)
+			return new FalsePredicate();
+		else if (a instanceof FalsePredicate)
+			return new TruePredicate();
+
+		return new NOT(a);
+	}
+
+	// Factor so (a && b) == (result && a && b)
+	public static SemanticContext[] factorAnd(SemanticContext a, SemanticContext b)
+	{
+		if (a == EMPTY_SEMANTIC_CONTEXT || a == null || a instanceof FalsePredicate)
+			return new SemanticContext[] { EMPTY_SEMANTIC_CONTEXT, a, b };
+		if (b == EMPTY_SEMANTIC_CONTEXT || b == null || b instanceof FalsePredicate)
+			return new SemanticContext[] { EMPTY_SEMANTIC_CONTEXT, a, b };
+
+		if (a instanceof TruePredicate || b instanceof TruePredicate)
+		{
+			return new SemanticContext[] { new TruePredicate(), EMPTY_SEMANTIC_CONTEXT, EMPTY_SEMANTIC_CONTEXT };
+		}
+
+		HashSet<SemanticContext> opsA = new HashSet<SemanticContext>(getAndOperands(a));
+		HashSet<SemanticContext> opsB = new HashSet<SemanticContext>(getAndOperands(b));
+
+		HashSet<SemanticContext> result = new HashSet<SemanticContext>(opsA);
+		result.retainAll(opsB);
+		if (result.isEmpty())
+			return new SemanticContext[] { EMPTY_SEMANTIC_CONTEXT, a, b };
+
+		opsA.removeAll(result);
+		if (opsA.isEmpty())
+			a = new TruePredicate();
+		else if (opsA.size() == 1)
+			a = opsA.iterator().next();
+		else
+			a = new AND(opsA);
+
+		opsB.removeAll(result);
+		if (opsB.isEmpty())
+			b = new TruePredicate();
+		else if (opsB.size() == 1)
+			b = opsB.iterator().next();
+		else
+			b = new AND(opsB);
+
+		if (result.size() == 1)
+			return new SemanticContext[] { result.iterator().next(), a, b };
+
+		return new SemanticContext[] { new AND(result), a, b };
+	}
+
+	// Factor so (a || b) == (result || a || b)
+	public static SemanticContext[] factorOr(SemanticContext a, SemanticContext b)
+	{
+		HashSet<SemanticContext> opsA = new HashSet<SemanticContext>(getOrOperands(a));
+		HashSet<SemanticContext> opsB = new HashSet<SemanticContext>(getOrOperands(b));
+
+		HashSet<SemanticContext> result = new HashSet<SemanticContext>(opsA);
+		result.retainAll(opsB);
+		if (result.isEmpty())
+			return new SemanticContext[] { EMPTY_SEMANTIC_CONTEXT, a, b };
+
+		opsA.removeAll(result);
+		if (opsA.isEmpty())
+			a = new FalsePredicate();
+		else if (opsA.size() == 1)
+			a = opsA.iterator().next();
+		else
+			a = new OR(opsA);
+
+		opsB.removeAll(result);
+		if (opsB.isEmpty())
+			b = new FalsePredicate();
+		else if (opsB.size() == 1)
+			b = opsB.iterator().next();
+		else
+			b = new OR(opsB);
+
+		if (result.size() == 1)
+			return new SemanticContext[] { result.iterator().next(), a, b };
+
+		return new SemanticContext[] { new OR(result), a, b };
+	}
+
+	public static Collection<SemanticContext> getAndOperands(SemanticContext context)
+	{
+		if (context instanceof AND)
+			return ((AND)context).operands;
+
+		if (context instanceof NOT) {
+			Collection<SemanticContext> operands = getOrOperands(((NOT)context).ctx);
+			List<SemanticContext> result = new ArrayList<SemanticContext>(operands.size());
+			for (SemanticContext operand : operands) {
+				result.add(not(operand));
+			}
+			return result;
+		}
+
+		ArrayList<SemanticContext> result = new ArrayList<SemanticContext>();
+		result.add(context);
+		return result;
+	}
+
+	public static Collection<SemanticContext> getOrOperands(SemanticContext context)
+	{
+		if (context instanceof OR)
+			return ((OR)context).operands;
+
+		if (context instanceof NOT) {
+			Collection<SemanticContext> operands = getAndOperands(((NOT)context).ctx);
+			List<SemanticContext> result = new ArrayList<SemanticContext>(operands.size());
+			for (SemanticContext operand : operands) {
+				result.add(not(operand));
+			}
+			return result;
+		}
+
+		ArrayList<SemanticContext> result = new ArrayList<SemanticContext>();
+		result.add(context);
+		return result;
+	}
+}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/State.java b/tool/src/main/java/org/antlr/analysis/State.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/analysis/State.java
rename to tool/src/main/java/org/antlr/analysis/State.java
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/analysis/StateCluster.java b/tool/src/main/java/org/antlr/analysis/StateCluster.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/analysis/StateCluster.java
rename to tool/src/main/java/org/antlr/analysis/StateCluster.java
diff --git a/tool/src/main/java/org/antlr/analysis/Transition.java b/tool/src/main/java/org/antlr/analysis/Transition.java
new file mode 100644
index 0000000..7438a81
--- /dev/null
+++ b/tool/src/main/java/org/antlr/analysis/Transition.java
@@ -0,0 +1,87 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.analysis;
+
+/** A generic transition between any two state machine states.  It defines
+ *  some special labels that indicate things like epsilon transitions and
+ *  that the label is actually a set of labels or a semantic predicate.
+ *  This is a one way link.  It emanates from a state (usually via a list of
+ *  transitions) and has a label/target pair.  I have abstracted the notion
+ *  of a Label to handle the various kinds of things it can be.
+ */
+public class Transition implements Comparable<Transition> {
+    /** What label must be consumed to transition to target */
+    public Label label;
+
+    /** The target of this transition */
+    public State target;
+
+    public Transition(Label label, State target) {
+        this.label = label;
+        this.target = target;
+    }
+
+    public Transition(int label, State target) {
+        this.label = new Label(label);
+        this.target = target;
+    }
+
+	public boolean isEpsilon() {
+		return label.isEpsilon();
+	}
+
+	public boolean isAction() {
+		return label.isAction();
+	}
+
+    public boolean isSemanticPredicate() {
+        return label.isSemanticPredicate();
+    }
+
+	@Override
+    public int hashCode() {
+        return label.hashCode() + target.stateNumber;
+    }
+
+	@Override
+    public boolean equals(Object o) {
+        Transition other = (Transition)o;
+        return this.label.equals(other.label) &&
+               this.target.equals(other.target);
+    }
+
+	@Override
+    public int compareTo(Transition other) {
+        return this.label.compareTo(other.label);
+    }
+
+	@Override
+    public String toString() {
+        return label+"->"+target.stateNumber;
+    }
+}
diff --git a/tool/src/main/java/org/antlr/codegen/ACyclicDFACodeGenerator.java b/tool/src/main/java/org/antlr/codegen/ACyclicDFACodeGenerator.java
new file mode 100644
index 0000000..93adb61
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/ACyclicDFACodeGenerator.java
@@ -0,0 +1,191 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+import java.util.ArrayList;
+import org.antlr.analysis.*;
+import org.antlr.misc.Utils;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.STGroup;
+
+import java.util.List;
+
+public class ACyclicDFACodeGenerator {
+	protected CodeGenerator parentGenerator;
+
+	public ACyclicDFACodeGenerator(CodeGenerator parent) {
+		this.parentGenerator = parent;
+	}
+
+	public ST genFixedLookaheadDecision(STGroup templates,
+													DFA dfa)
+	{
+		return walkFixedDFAGeneratingStateMachine(templates, dfa, dfa.startState, 1);
+	}
+
+	protected ST walkFixedDFAGeneratingStateMachine(
+			STGroup templates,
+			DFA dfa,
+			DFAState s,
+			int k)
+	{
+		//System.out.println("walk "+s.stateNumber+" in dfa for decision "+dfa.decisionNumber);
+		if ( s.isAcceptState() ) {
+			ST dfaST = templates.getInstanceOf("dfaAcceptState");
+			dfaST.add("alt", Utils.integer(s.getUniquelyPredictedAlt()));
+			return dfaST;
+		}
+
+		// the default templates for generating a state and its edges
+		// can be an if-then-else structure or a switch
+		String dfaStateName = "dfaState";
+		String dfaLoopbackStateName = "dfaLoopbackState";
+		String dfaOptionalBlockStateName = "dfaOptionalBlockState";
+		String dfaEdgeName = "dfaEdge";
+		if ( parentGenerator.canGenerateSwitch(s) ) {
+			dfaStateName = "dfaStateSwitch";
+			dfaLoopbackStateName = "dfaLoopbackStateSwitch";
+			dfaOptionalBlockStateName = "dfaOptionalBlockStateSwitch";
+			dfaEdgeName = "dfaEdgeSwitch";
+		}
+
+		ST dfaST = templates.getInstanceOf(dfaStateName);
+		if ( dfa.getNFADecisionStartState().decisionStateType==NFAState.LOOPBACK ) {
+			dfaST = templates.getInstanceOf(dfaLoopbackStateName);
+		}
+		else if ( dfa.getNFADecisionStartState().decisionStateType==NFAState.OPTIONAL_BLOCK_START ) {
+			dfaST = templates.getInstanceOf(dfaOptionalBlockStateName);
+		}
+		dfaST.add("k", Utils.integer(k));
+		dfaST.add("stateNumber", Utils.integer(s.stateNumber));
+		dfaST.add("semPredState", s.isResolvedWithPredicates());
+		/*
+		String description = dfa.getNFADecisionStartState().getDescription();
+		description = parentGenerator.target.getTargetStringLiteralFromString(description);
+		//System.out.println("DFA: "+description+" associated with AST "+dfa.getNFADecisionStartState());
+		if ( description!=null ) {
+			dfaST.add("description", description);
+		}
+		*/
+		int EOTPredicts = NFA.INVALID_ALT_NUMBER;
+		DFAState EOTTarget = null;
+		//System.out.println("DFA state "+s.stateNumber);
+		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+			Transition edge = s.transition(i);
+			//System.out.println("edge "+s.stateNumber+"-"+edge.label.toString()+"->"+edge.target.stateNumber);
+			if ( edge.label.getAtom()==Label.EOT ) {
+				// don't generate a real edge for EOT; track alt EOT predicts
+				// generate that prediction in the else clause as default case
+				EOTTarget = (DFAState)edge.target;
+				EOTPredicts = EOTTarget.getUniquelyPredictedAlt();
+				/*
+				System.out.println("DFA s"+s.stateNumber+" EOT goes to s"+
+								   edge.target.stateNumber+" predicates alt "+
+								   EOTPredicts);
+				*/
+				continue;
+			}
+			ST edgeST = templates.getInstanceOf(dfaEdgeName);
+			// If the template wants all the label values delineated, do that
+			if ( edgeST.impl.formalArguments.get("labels")!=null ) {
+				List<Integer> labels = edge.label.getSet().toList();
+				List<String> targetLabels = new ArrayList<String>(labels.size());
+				for (int j = 0; j < labels.size(); j++) {
+					Integer vI = labels.get(j);
+					String label =
+						parentGenerator.getTokenTypeAsTargetLabel(vI);
+					targetLabels.add(label); // rewrite List element to be name
+				}
+				edgeST.add("labels", targetLabels);
+			}
+			else { // else create an expression to evaluate (the general case)
+				edgeST.add("labelExpr",
+									parentGenerator.genLabelExpr(templates,edge,k));
+			}
+
+			// stick in any gated predicates for any edge if not already a pred
+			if ( !edge.label.isSemanticPredicate() ) {
+				DFAState target = (DFAState)edge.target;
+				SemanticContext preds =
+					target.getGatedPredicatesInNFAConfigurations();
+				if ( preds!=null ) {
+					//System.out.println("preds="+target.getGatedPredicatesInNFAConfigurations());
+					ST predST = preds.genExpr(parentGenerator,
+														  parentGenerator.getTemplates(),
+														  dfa);
+					edgeST.add("predicates", predST);
+				}
+			}
+
+			ST targetST =
+				walkFixedDFAGeneratingStateMachine(templates,
+												   dfa,
+												   (DFAState)edge.target,
+												   k+1);
+			edgeST.add("targetState", targetST);
+			dfaST.add("edges", edgeST);
+			/*
+			System.out.println("back to DFA "+
+							   dfa.decisionNumber+"."+s.stateNumber);
+							   */
+		}
+
+		// HANDLE EOT EDGE
+		if ( EOTPredicts!=NFA.INVALID_ALT_NUMBER ) {
+			// EOT unique predicts an alt
+			dfaST.add("eotPredictsAlt", Utils.integer(EOTPredicts));
+		}
+		else if ( EOTTarget!=null && EOTTarget.getNumberOfTransitions()>0 ) {
+			// EOT state has transitions so must split on predicates.
+			// Generate predicate else-if clauses and then generate
+			// NoViableAlt exception as else clause.
+			// Note: these predicates emanate from the EOT target state
+			// rather than the current DFAState s so the error message
+			// might be slightly misleading if you are looking at the
+			// state number.  Predicates emanating from EOT targets are
+			// hoisted up to the state that has the EOT edge.
+			for (int i = 0; i < EOTTarget.getNumberOfTransitions(); i++) {
+				Transition predEdge = EOTTarget.transition(i);
+				ST edgeST = templates.getInstanceOf(dfaEdgeName);
+				edgeST.add("labelExpr",
+									parentGenerator.genSemanticPredicateExpr(templates,predEdge));
+				// the target must be an accept state
+				//System.out.println("EOT edge");
+				ST targetST =
+					walkFixedDFAGeneratingStateMachine(templates,
+													   dfa,
+													   (DFAState)predEdge.target,
+													   k+1);
+				edgeST.add("targetState", targetST);
+				dfaST.add("edges", edgeST);
+			}
+		}
+		return dfaST;
+	}
+}
+
diff --git a/tool/src/main/java/org/antlr/codegen/ActionScriptTarget.java b/tool/src/main/java/org/antlr/codegen/ActionScriptTarget.java
new file mode 100644
index 0000000..ace1be3
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/ActionScriptTarget.java
@@ -0,0 +1,137 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.stringtemplate.v4.ST;
+import org.antlr.tool.Grammar;
+
+public class ActionScriptTarget extends Target {
+
+	@Override
+    public String getTargetCharLiteralFromANTLRCharLiteral(
+            CodeGenerator generator,
+            String literal) {
+
+        int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+        return String.valueOf(c);
+    }
+
+	@Override
+    public String getTokenTypeAsTargetLabel(CodeGenerator generator,
+                                            int ttype) {
+        // use ints for predefined types;
+        // <invalid> <EOR> <DOWN> <UP>
+        if (ttype >= 0 && ttype <= 3) {
+            return String.valueOf(ttype);
+        }
+
+        String name = generator.grammar.getTokenDisplayName(ttype);
+
+        // If name is a literal, return the token type instead
+        if (name.charAt(0) == '\'') {
+            return String.valueOf(ttype);
+        }
+
+        return name;
+    }
+
+    /**
+     * ActionScript doesn't support Unicode String literals that are considered "illegal"
+     * or are in the surrogate pair ranges.  For example "/uffff" will not encode properly
+     * nor will "/ud800".  To keep things as compact as possible we use the following encoding
+     * if the int is below 255, we encode as hex literal
+     * If the int is between 255 and 0x7fff we use a single unicode literal with the value
+     * If the int is above 0x7fff, we use a unicode literal of 0x80hh, where hh is the high-order
+     * bits followed by \xll where ll is the lower order bits of a 16-bit number.
+     *
+     * Ideally this should be improved at a future date.  The most optimal way to encode this
+     * may be a compressed AMF encoding that is embedded using an Embed tag in ActionScript.
+     *
+     * @param v
+     */
+	@Override
+    public String encodeIntAsCharEscape(int v) {
+        // encode as hex
+        if ( v<=255 ) {
+			return "\\x"+ Integer.toHexString(v|0x100).substring(1,3);
+		}
+        if (v <= 0x7fff) {
+            String hex = Integer.toHexString(v|0x10000).substring(1,5);
+		    return "\\u"+hex;
+        }
+        if (v > 0xffff) {
+            System.err.println("Warning: character literal out of range for ActionScript target " + v);
+            return "";
+        }
+        StringBuilder buf = new StringBuilder("\\u80");
+        buf.append(Integer.toHexString((v >> 8) | 0x100).substring(1, 3)); // high - order bits
+        buf.append("\\x");
+        buf.append(Integer.toHexString((v & 0xff) | 0x100).substring(1, 3)); // low -order bits
+        return buf.toString();
+    }
+
+    /** Convert long to two 32-bit numbers separted by a comma.
+     *  ActionScript does not support 64-bit numbers, so we need to break
+     *  the number into two 32-bit literals to give to the Bit.  A number like
+     *  0xHHHHHHHHLLLLLLLL is broken into the following string:
+     *  "0xLLLLLLLL, 0xHHHHHHHH"
+	 *  Note that the low order bits are first, followed by the high order bits.
+     *  This is to match how the BitSet constructor works, where the bits are
+     *  passed in in 32-bit chunks with low-order bits coming first.
+	 */
+	@Override
+	public String getTarget64BitStringFromValue(long word) {
+		StringBuffer buf = new StringBuffer(22); // enough for the two "0x", "," and " "
+		buf.append("0x");
+        writeHexWithPadding(buf, Integer.toHexString((int)(word & 0x00000000ffffffffL)));
+        buf.append(", 0x");
+        writeHexWithPadding(buf, Integer.toHexString((int)(word >> 32)));
+
+        return buf.toString();
+	}
+
+    private void writeHexWithPadding(StringBuffer buf, String digits) {
+       digits = digits.toUpperCase();
+		int padding = 8 - digits.length();
+		// pad left with zeros
+		for (int i=1; i<=padding; i++) {
+			buf.append('0');
+		}
+		buf.append(digits);
+    }
+
+    protected ST chooseWhereCyclicDFAsGo(Tool tool,
+                                                     CodeGenerator generator,
+                                                     Grammar grammar,
+                                                     ST recognizerST,
+                                                     ST cyclicDFAST) {
+        return recognizerST;
+    }
+}
+
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/codegen/CSharp2Target.java b/tool/src/main/java/org/antlr/codegen/CSharp2Target.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/codegen/CSharp2Target.java
rename to tool/src/main/java/org/antlr/codegen/CSharp2Target.java
diff --git a/tool/src/main/java/org/antlr/codegen/CSharp3Target.java b/tool/src/main/java/org/antlr/codegen/CSharp3Target.java
new file mode 100644
index 0000000..643a0e8
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/CSharp3Target.java
@@ -0,0 +1,145 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2010 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.tool.Grammar;
+import org.stringtemplate.v4.AttributeRenderer;
+import org.stringtemplate.v4.ST;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Locale;
+import java.util.Map;
+
+public class CSharp3Target extends Target {
+    private static final HashSet<String> _languageKeywords = new HashSet<String>()
+        {{
+            add("abstract"); add("event"); add("new"); add("struct");
+            add("as"); add("explicit"); add("null"); add("switch");
+            add("base"); add("extern"); add("object"); add("this");
+            add("bool"); add("false"); add("operator"); add("throw");
+            add("break"); add("finally"); add("out"); add("true");
+            add("byte"); add("fixed"); add("override"); add("try");
+            add("case"); add("float"); add("params"); add("typeof");
+            add("catch"); add("for"); add("private"); add("uint");
+            add("char"); add("foreach"); add("protected"); add("ulong");
+            add("checked"); add("goto"); add("public"); add("unchecked");
+            add("class"); add("if"); add("readonly"); add("unsafe");
+            add("const"); add("implicit"); add("ref"); add("ushort");
+            add("continue"); add("in"); add("return"); add("using");
+            add("decimal"); add("int"); add("sbyte"); add("virtual");
+            add("default"); add("interface"); add("sealed"); add("volatile");
+            add("delegate"); add("internal"); add("short"); add("void");
+            add("do"); add("is"); add("sizeof"); add("while");
+            add("double"); add("lock"); add("stackalloc");
+            add("else"); add("long"); add("static");
+            add("enum"); add("namespace"); add("string");
+        }};
+
+    @Override
+    public boolean useBaseTemplatesForSynPredFragments() {
+        return false;
+    }
+
+    @Override
+    public String encodeIntAsCharEscape(int v) {
+        return "\\x" + Integer.toHexString(v).toUpperCase();
+    }
+
+    @Override
+    public String getTarget64BitStringFromValue(long word) {
+        return "0x" + Long.toHexString(word).toUpperCase();
+    }
+
+    @Override
+    protected void genRecognizerFile(Tool tool, CodeGenerator generator, Grammar grammar, ST outputFileST) throws IOException
+    {
+        if (!grammar.getGrammarIsRoot())
+        {
+            Grammar rootGrammar = grammar.composite.getRootGrammar();
+            String actionScope = grammar.getDefaultActionScope(grammar.type);
+            Map<String, Object> actions = rootGrammar.getActions().get(actionScope);
+            Object rootNamespace = actions != null ? actions.get("namespace") : null;
+            if (actions != null && rootNamespace != null)
+            {
+                actions = grammar.getActions().get(actionScope);
+                if (actions == null)
+                {
+                    actions = new HashMap<String, Object>();
+                    grammar.getActions().put(actionScope, actions);
+                }
+
+                actions.put("namespace", rootNamespace);
+            }
+        }
+
+        generator.getTemplates().registerRenderer(String.class, new StringRenderer(generator, this));
+        super.genRecognizerFile(tool, generator, grammar, outputFileST);
+    }
+
+    public static class StringRenderer implements AttributeRenderer
+    {
+        private final CodeGenerator _generator;
+        private final CSharp3Target _target;
+
+        public StringRenderer(CodeGenerator generator, CSharp3Target target)
+        {
+            _generator = generator;
+            _target = target;
+        }
+
+		@Override
+        public String toString(Object obj, String formatName, Locale locale)
+        {
+            String value = (String)obj;
+            if (value == null || formatName == null)
+                return value;
+
+            if (formatName.equals("id")) {
+                if (_languageKeywords.contains(value))
+                    return "@" + value;
+
+                return value;
+            } else if (formatName.equals("cap")) {
+                return Character.toUpperCase(value.charAt(0)) + value.substring(1);
+            } else if (formatName.equals("string")) {
+                return _target.getTargetStringLiteralFromString(value, true);
+            } else {
+                throw new IllegalArgumentException("Unsupported format name: '" + formatName + "'");
+            }
+        }
+    }
+}
+
diff --git a/tool/src/main/java/org/antlr/codegen/CTarget.java b/tool/src/main/java/org/antlr/codegen/CTarget.java
new file mode 100644
index 0000000..9ecae9a
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/CTarget.java
@@ -0,0 +1,330 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.stringtemplate.v4.ST;
+import org.antlr.tool.Grammar;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class CTarget extends Target {
+
+    List<String> strings = new ArrayList<String>();
+
+    @Override
+    protected void genRecognizerFile(Tool tool,
+            CodeGenerator generator,
+            Grammar grammar,
+            ST outputFileST)
+            throws IOException {
+
+        // Before we write this, and cause it to generate its string,
+        // we need to add all the string literals that we are going to match
+        //
+        outputFileST.add("literals", strings);
+        String fileName = generator.getRecognizerFileName(grammar.name, grammar.type);
+        generator.write(outputFileST, fileName);
+    }
+
+    @Override
+    protected void genRecognizerHeaderFile(Tool tool,
+            CodeGenerator generator,
+            Grammar grammar,
+            ST headerFileST,
+            String extName)
+            throws IOException {
+        // Pick up the file name we are generating. This method will return a
+        // a file suffixed with .c, so we must substring and add the extName
+        // to it as we cannot assign into strings in Java.
+        ///
+        String fileName = generator.getRecognizerFileName(grammar.name, grammar.type);
+        fileName = fileName.substring(0, fileName.length() - 2) + extName;
+
+        generator.write(headerFileST, fileName);
+    }
+
+    protected ST chooseWhereCyclicDFAsGo(Tool tool,
+            CodeGenerator generator,
+            Grammar grammar,
+            ST recognizerST,
+            ST cyclicDFAST) {
+        return recognizerST;
+    }
+
+    /** Is scope in @scope::name {action} valid for this kind of grammar?
+     *  Targets like C++ may want to allow new scopes like headerfile or
+     *  some such.  The action names themselves are not policed at the
+     *  moment so targets can add template actions w/o having to recompile
+     *  ANTLR.
+     */
+    @Override
+    public boolean isValidActionScope(int grammarType, String scope) {
+        switch (grammarType) {
+            case Grammar.LEXER:
+                if (scope.equals("lexer")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+                break;
+            case Grammar.PARSER:
+                if (scope.equals("parser")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+                break;
+            case Grammar.COMBINED:
+                if (scope.equals("parser")) {
+                    return true;
+                }
+                if (scope.equals("lexer")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+                break;
+            case Grammar.TREE_PARSER:
+                if (scope.equals("treeparser")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+                break;
+        }
+        return false;
+    }
+
+    @Override
+    public String getTargetCharLiteralFromANTLRCharLiteral(
+            CodeGenerator generator,
+            String literal) {
+
+        if (literal.startsWith("'\\u")) {
+            literal = "0x" + literal.substring(3, 7);
+        } else {
+            int c = literal.charAt(1);
+
+            if (c < 32 || c > 127) {
+                literal = "0x" + Integer.toHexString(c);
+            }
+        }
+
+        return literal;
+    }
+
+    /** Convert from an ANTLR string literal found in a grammar file to
+     *  an equivalent string literal in the C target.
+     *  Because we must support Unicode character sets and have chosen
+     *  to have the lexer match UTF32 characters, then we must encode
+     *  string matches to use 32 bit character arrays. Here then we
+     *  must produce the C array and cater for the case where the
+     *  lexer has been encoded with a string such as 'xyz\n',
+     */
+    @Override
+    public String getTargetStringLiteralFromANTLRStringLiteral(
+            CodeGenerator generator,
+            String literal) {
+        int index;
+        String bytes;
+        StringBuilder buf = new StringBuilder();
+
+        buf.append("{ ");
+
+        // We need ot lose any escaped characters of the form \x and just
+        // replace them with their actual values as well as lose the surrounding
+        // quote marks.
+        //
+        for (int i = 1; i < literal.length() - 1; i++) {
+            buf.append("0x");
+
+            if (literal.charAt(i) == '\\') {
+                i++; // Assume that there is a next character, this will just yield
+                // invalid strings if not, which is what the input would be of course - invalid
+                switch (literal.charAt(i)) {
+                    case 'u':
+                    case 'U':
+                        buf.append(literal.substring(i + 1, i + 5));  // Already a hex string
+                        i = i + 5;                                // Move to next string/char/escape
+                        break;
+
+                    case 'n':
+                    case 'N':
+
+                        buf.append("0A");
+                        break;
+
+                    case 'r':
+                    case 'R':
+
+                        buf.append("0D");
+                        break;
+
+                    case 't':
+                    case 'T':
+
+                        buf.append("09");
+                        break;
+
+                    case 'b':
+                    case 'B':
+
+                        buf.append("08");
+                        break;
+
+                    case 'f':
+                    case 'F':
+
+                        buf.append("0C");
+                        break;
+
+                    default:
+
+                        // Anything else is what it is!
+                        //
+                        buf.append(Integer.toHexString((int) literal.charAt(i)).toUpperCase());
+                        break;
+                }
+            } else {
+                buf.append(Integer.toHexString((int) literal.charAt(i)).toUpperCase());
+            }
+            buf.append(", ");
+        }
+        buf.append(" ANTLR3_STRING_TERMINATOR}");
+
+        bytes = buf.toString();
+        index = strings.indexOf(bytes);
+
+        if (index == -1) {
+            strings.add(bytes);
+            index = strings.indexOf(bytes);
+        }
+
+        String strref = "lit_" + String.valueOf(index + 1);
+
+        return strref;
+    }
+
+    /**
+     * Overrides the standard grammar analysis so we can prepare the analyser
+     * a little differently from the other targets.
+     *
+     * In particular we want to influence the way the code generator makes assumptions about
+     * switchs vs ifs, vs table driven DFAs. In general, C code should be generated that
+     * has the minimum use of tables, and tha meximum use of large switch statements. This
+     * allows the optimizers to generate very efficient code, it can reduce object code size
+     * by about 30% and give about a 20% performance improvement over not doing this. Hence,
+     * for the C target only, we change the defaults here, but only if they are still set to the
+     * defaults.
+     *
+     * @param generator An instance of the generic code generator class.
+     * @param grammar The grammar that we are currently analyzing
+     */
+    @Override
+    protected void performGrammarAnalysis(CodeGenerator generator, Grammar grammar) {
+
+        // Check to see if the maximum inline DFA states is still set to
+        // the default size. If it is then whack it all the way up to the maximum that
+        // we can sensibly get away with.
+        //
+        if (CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE == CodeGenerator.MADSI_DEFAULT ) {
+
+            CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE = 65535;
+        }
+
+        // Check to see if the maximum switch size is still set to the default
+        // and bring it up much higher if it is. Modern C compilers can handle
+        // much bigger switch statements than say Java can and if anyone finds a compiler
+        // that cannot deal with such big switches, all the need do is generate the
+        // code with a reduced -Xmaxswitchcaselabels nnn
+        //
+        if  (CodeGenerator.MAX_SWITCH_CASE_LABELS == CodeGenerator.MSCL_DEFAULT) {
+
+            CodeGenerator.MAX_SWITCH_CASE_LABELS = 3000;
+        }
+
+        // Check to see if the number of transitions considered a miminum for using
+        // a switch is still at the default. Because a switch is still generally faster than
+        // an if even with small sets, and given that the optimizer will do the best thing with it
+        // anyway, then we simply want to generate a switch for any number of states.
+        //
+        if (CodeGenerator.MIN_SWITCH_ALTS == CodeGenerator.MSA_DEFAULT) {
+
+            CodeGenerator.MIN_SWITCH_ALTS = 1;
+        }
+
+        // Now we allow the superclass implementation to do whatever it feels it
+        // must do.
+        //
+        super.performGrammarAnalysis(generator, grammar);
+    }
+}
+
diff --git a/tool/src/main/java/org/antlr/codegen/CodeGenerator.java b/tool/src/main/java/org/antlr/codegen/CodeGenerator.java
new file mode 100644
index 0000000..b279bd5
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/CodeGenerator.java
@@ -0,0 +1,1350 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+
+import org.antlr.Tool;
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.*;
+import org.antlr.grammar.v3.ANTLRLexer;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.grammar.v3.ActionTranslator;
+import org.antlr.grammar.v3.CodeGenTreeWalker;
+import org.antlr.misc.BitSet;
+import org.antlr.misc.*;
+import org.antlr.runtime.*;
+import org.antlr.runtime.tree.CommonTreeNodeStream;
+import org.antlr.tool.*;
+import org.stringtemplate.v4.*;
+
+import java.io.IOException;
+import java.io.Writer;
+import java.util.*;
+
+/** ANTLR's code generator.
+ *
+ *  Generate recognizers derived from grammars.  Language independence
+ *  achieved through the use of STGroup objects.  All output
+ *  strings are completely encapsulated in the group files such as Java.stg.
+ *  Some computations are done that are unused by a particular language.
+ *  This generator just computes and sets the values into the templates;
+ *  the templates are free to use or not use the information.
+ *
+ *  To make a new code generation target, define X.stg for language X
+ *  by copying from existing Y.stg most closely releated to your language;
+ *  e.g., to do CSharp.stg copy Java.stg.  The template group file has a
+ *  bunch of templates that are needed by the code generator.  You can add
+ *  a new target w/o even recompiling ANTLR itself.  The language=X option
+ *  in a grammar file dictates which templates get loaded/used.
+ *
+ *  Some language like C need both parser files and header files.  Java needs
+ *  to have a separate file for the cyclic DFA as ANTLR generates bytecodes
+ *  directly (which cannot be in the generated parser Java file).  To facilitate
+ *  this,
+ *
+ * cyclic can be in same file, but header, output must be searpate.  recognizer
+ *  is in outptufile.
+ */
+public class CodeGenerator {
+	/** When generating SWITCH statements, some targets might need to limit
+	 *  the size (based upon the number of case labels).  Generally, this
+	 *  limit will be hit only for lexers where wildcard in a UNICODE
+	 *  vocabulary environment would generate a SWITCH with 65000 labels.
+	 */
+	public final static int MSCL_DEFAULT = 300;
+	public static int MAX_SWITCH_CASE_LABELS = MSCL_DEFAULT;
+	public final static int MSA_DEFAULT = 3;
+	public static int MIN_SWITCH_ALTS = MSA_DEFAULT;
+	public boolean GENERATE_SWITCHES_WHEN_POSSIBLE = true;
+	public static boolean LAUNCH_ST_INSPECTOR = false;
+	public final static int MADSI_DEFAULT = 60; // do lots of states inline (needed for expression rules)
+	public static int MAX_ACYCLIC_DFA_STATES_INLINE = MADSI_DEFAULT;
+
+	public static String classpathTemplateRootDirectoryName =
+		"org/antlr/codegen/templates";
+
+	/** Which grammar are we generating code for?  Each generator
+	 *  is attached to a specific grammar.
+	 */
+	public Grammar grammar;
+
+	/** What language are we generating? */
+	protected String language;
+
+	/** The target specifies how to write out files and do other language
+	 *  specific actions.
+	 */
+	public Target target = null;
+
+	/** Where are the templates this generator should use to generate code? */
+	protected STGroup templates;
+
+	/** The basic output templates without AST or templates stuff; this will be
+	 *  the templates loaded for the language such as Java.stg *and* the Dbg
+	 *  stuff if turned on.  This is used for generating syntactic predicates.
+	 */
+	protected STGroup baseTemplates;
+
+	protected ST recognizerST;
+	protected ST outputFileST;
+	protected ST headerFileST;
+
+	/** Used to create unique labels */
+	protected int uniqueLabelNumber = 1;
+
+	/** A reference to the ANTLR tool so we can learn about output directories
+	 *  and such.
+	 */
+	protected Tool tool;
+
+	/** Generate debugging event method calls */
+	protected boolean debug;
+
+	/** Create a Tracer object and make the recognizer invoke this. */
+	protected boolean trace;
+
+	/** Track runtime parsing information about decisions etc...
+	 *  This requires the debugging event mechanism to work.
+	 */
+	protected boolean profile;
+
+	protected int lineWidth = 72;
+
+	/** I have factored out the generation of acyclic DFAs to separate class */
+	public ACyclicDFACodeGenerator acyclicDFAGenerator =
+		new ACyclicDFACodeGenerator(this);
+
+	/** I have factored out the generation of cyclic DFAs to separate class */
+	/*
+	public CyclicDFACodeGenerator cyclicDFAGenerator =
+		new CyclicDFACodeGenerator(this);
+		*/
+
+	public static final String VOCAB_FILE_EXTENSION = ".tokens";
+	protected final static String vocabFilePattern =
+		"<tokens:{it|<it.name>=<it.type>\n}>" +
+		"<literals:{it|<it.name>=<it.type>\n}>";
+
+	public CodeGenerator(Tool tool, Grammar grammar, String language) {
+		this.tool = tool;
+		this.grammar = grammar;
+		this.language = language;
+		target = loadLanguageTarget(language);
+	}
+
+	public static Target loadLanguageTarget(String language) {
+		Target target = null;
+		String targetName = "org.antlr.codegen."+language+"Target";
+		try {
+			Class<? extends Target> c = Class.forName(targetName).asSubclass(Target.class);
+			target = (Target)c.newInstance();
+		}
+		catch (ClassNotFoundException cnfe) {
+			target = new Target(); // use default
+		}
+		catch (InstantiationException ie) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_CREATE_TARGET_GENERATOR,
+							   targetName,
+							   ie);
+		}
+		catch (IllegalAccessException cnfe) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_CREATE_TARGET_GENERATOR,
+							   targetName,
+							   cnfe);
+		}
+		return target;
+	}
+
+	/** load the main language.stg template group file */
+	public void loadTemplates(String language) {
+		String langDir = classpathTemplateRootDirectoryName+"/"+language;
+		STGroup coreTemplates = new ToolSTGroupFile(langDir+"/"+language+".stg");
+		baseTemplates = coreTemplates;
+
+		// dynamically add subgroups that act like filters to apply to
+		// their supergroup.  E.g., Java:Dbg:AST:ASTParser::ASTDbg.
+		String outputOption = (String)grammar.getOption("output");
+		if ( outputOption!=null && outputOption.equals("AST") ) {
+			if ( debug && grammar.type!=Grammar.LEXER ) {
+				STGroup dbgTemplates = new ToolSTGroupFile(langDir+"/Dbg.stg");
+				dbgTemplates.importTemplates(coreTemplates);
+				baseTemplates = dbgTemplates;
+				STGroup astTemplates = new ToolSTGroupFile(langDir+"/AST.stg");
+				astTemplates.importTemplates(dbgTemplates);
+				STGroup astParserTemplates;
+				if ( grammar.type==Grammar.TREE_PARSER ) {
+					astParserTemplates = new ToolSTGroupFile(langDir+"/ASTTreeParser.stg");
+					astParserTemplates.importTemplates(astTemplates);
+				}
+				else {
+					astParserTemplates = new ToolSTGroupFile(langDir+"/ASTParser.stg");
+					astParserTemplates.importTemplates(astTemplates);
+				}
+				STGroup astDbgTemplates = new ToolSTGroupFile(langDir+"/ASTDbg.stg");
+				astDbgTemplates.importTemplates(astParserTemplates);
+				templates = astDbgTemplates;
+				dbgTemplates.iterateAcrossValues = true; // ST v3 compatibility with Maps
+				astDbgTemplates.iterateAcrossValues = true;
+				astParserTemplates.iterateAcrossValues = true;
+			}
+			else {
+				STGroup astTemplates = new ToolSTGroupFile(langDir+"/AST.stg");
+				astTemplates.importTemplates(coreTemplates);
+				STGroup astParserTemplates;
+				if ( grammar.type==Grammar.TREE_PARSER ) {
+					astParserTemplates = new ToolSTGroupFile(langDir+"/ASTTreeParser.stg");
+					astParserTemplates.importTemplates(astTemplates);
+				}
+				else {
+					astParserTemplates = new ToolSTGroupFile(langDir+"/ASTParser.stg");
+					astParserTemplates.importTemplates(astTemplates);
+				}
+				templates = astParserTemplates;
+				astTemplates.iterateAcrossValues = true; // ST v3 compatibility with Maps
+				astParserTemplates.iterateAcrossValues = true;
+			}
+		}
+		else if ( outputOption!=null && outputOption.equals("template") ) {
+			if ( debug && grammar.type!=Grammar.LEXER ) {
+				STGroup dbgTemplates = new ToolSTGroupFile(langDir+"/Dbg.stg");
+				dbgTemplates.importTemplates(coreTemplates);
+				baseTemplates = dbgTemplates;
+				STGroup stTemplates = new ToolSTGroupFile(langDir+"/ST.stg");
+				stTemplates.importTemplates(dbgTemplates);
+				templates = stTemplates;
+				dbgTemplates.iterateAcrossValues = true;
+			}
+			else {
+				STGroup stTemplates = new ToolSTGroupFile(langDir+"/ST.stg");
+				stTemplates.importTemplates(coreTemplates);
+				templates = stTemplates;
+			}
+			templates.iterateAcrossValues = true; // ST v3 compatibility with Maps
+		}
+		else if ( debug && grammar.type!=Grammar.LEXER ) {
+			STGroup dbgTemplates = new ToolSTGroupFile(langDir+"/Dbg.stg");
+			dbgTemplates.importTemplates(coreTemplates);
+			templates = dbgTemplates;
+			baseTemplates = templates;
+			baseTemplates.iterateAcrossValues = true; // ST v3 compatibility with Maps
+		}
+		else {
+			templates = coreTemplates;
+			coreTemplates.iterateAcrossValues = true; // ST v3 compatibility with Maps
+		}
+	}
+
+	/** Given the grammar to which we are attached, walk the AST associated
+	 *  with that grammar to create NFAs.  Then create the DFAs for all
+	 *  decision points in the grammar by converting the NFAs to DFAs.
+	 *  Finally, walk the AST again to generate code.
+	 *
+	 *  Either 1 or 2 files are written:
+	 *
+	 * 		recognizer: the main parser/lexer/treewalker item
+	 * 		header file: language like C/C++ need extern definitions
+	 *
+	 *  The target, such as JavaTarget, dictates which files get written.
+	 */
+	public ST genRecognizer() {
+		//System.out.println("### generate "+grammar.name+" recognizer");
+		// LOAD OUTPUT TEMPLATES
+		loadTemplates(language);
+		if ( templates==null ) {
+			return null;
+		}
+
+		// CREATE NFA FROM GRAMMAR, CREATE DFA FROM NFA
+		if ( ErrorManager.doNotAttemptAnalysis() ) {
+			return null;
+		}
+		target.performGrammarAnalysis(this, grammar);
+
+
+		// some grammar analysis errors will not yield reliable DFA
+		if ( ErrorManager.doNotAttemptCodeGen() ) {
+			return null;
+		}
+
+		// OPTIMIZE DFA
+		DFAOptimizer optimizer = new DFAOptimizer(grammar);
+		optimizer.optimize();
+
+		// OUTPUT FILE (contains recognizerST)
+		outputFileST = templates.getInstanceOf("outputFile");
+
+		// HEADER FILE
+		if ( templates.isDefined("headerFile") ) {
+			headerFileST = templates.getInstanceOf("headerFile");
+		}
+		else {
+			// create a dummy to avoid null-checks all over code generator
+			headerFileST = new ST(templates,"xyz");
+			headerFileST.add("cyclicDFAs", (Object)null); // it normally sees this from outputFile
+			//headerFileST.impl.name = "dummy-header-file";
+		}
+
+		boolean filterMode = grammar.getOption("filter")!=null &&
+							  grammar.getOption("filter").equals("true");
+        boolean canBacktrack = grammar.getSyntacticPredicates()!=null ||
+                               grammar.composite.getRootGrammar().atLeastOneBacktrackOption ||
+                               filterMode;
+
+        // TODO: move this down further because generating the recognizer
+		// alters the model with info on who uses predefined properties etc...
+		// The actions here might refer to something.
+
+		// The only two possible output files are available at this point.
+		// Verify action scopes are ok for target and dump actions into output
+		// Templates can say <actions.parser.header> for example.
+		Map<String, Map<String, Object>> actions = grammar.getActions();
+		verifyActionScopesOkForTarget(actions);
+		// translate $x::y references
+		translateActionAttributeReferences(actions);
+
+        ST gateST = templates.getInstanceOf("actionGate");
+        if ( filterMode ) {
+            // if filtering, we need to set actions to execute at backtracking
+            // level 1 not 0.
+            gateST = templates.getInstanceOf("filteringActionGate");
+        }
+        grammar.setSynPredGateIfNotAlready(gateST);
+
+        headerFileST.add("actions", actions);
+		outputFileST.add("actions", actions);
+
+		headerFileST.add("buildTemplate", grammar.buildTemplate());
+		outputFileST.add("buildTemplate", grammar.buildTemplate());
+		headerFileST.add("buildAST", grammar.buildAST());
+		outputFileST.add("buildAST", grammar.buildAST());
+
+		outputFileST.add("rewriteMode", grammar.rewriteMode());
+		headerFileST.add("rewriteMode", grammar.rewriteMode());
+
+		outputFileST.add("backtracking", canBacktrack);
+		headerFileST.add("backtracking", canBacktrack);
+		// turn on memoize attribute at grammar level so we can create ruleMemo.
+		// each rule has memoize attr that hides this one, indicating whether
+		// it needs to save results
+		String memoize = (String)grammar.getOption("memoize");
+		outputFileST.add("memoize",
+						 (grammar.atLeastOneRuleMemoizes ||
+						  memoize != null && memoize.equals("true") &&
+						  canBacktrack));
+		headerFileST.add("memoize",
+						 (grammar.atLeastOneRuleMemoizes ||
+						  memoize != null && memoize.equals("true") &&
+						  canBacktrack));
+
+
+		outputFileST.add("trace", trace);
+		headerFileST.add("trace", trace);
+
+		outputFileST.add("profile", profile);
+		headerFileST.add("profile", profile);
+
+		// RECOGNIZER
+		if ( grammar.type==Grammar.LEXER ) {
+			recognizerST = templates.getInstanceOf("lexer");
+			outputFileST.add("LEXER", true);
+			headerFileST.add("LEXER", true);
+			recognizerST.add("filterMode",
+							 filterMode);
+		}
+		else if ( grammar.type==Grammar.PARSER ||
+			grammar.type==Grammar.COMBINED )
+		{
+			recognizerST = templates.getInstanceOf("parser");
+			outputFileST.add("PARSER", true);
+			headerFileST.add("PARSER", true);
+		}
+		else {
+			recognizerST = templates.getInstanceOf("treeParser");
+			outputFileST.add("TREE_PARSER", true);
+			headerFileST.add("TREE_PARSER", true);
+            recognizerST.add("filterMode",
+							 filterMode);
+		}
+		outputFileST.add("recognizer", recognizerST);
+		headerFileST.add("recognizer", recognizerST);
+		outputFileST.add("actionScope",
+						 grammar.getDefaultActionScope(grammar.type));
+		headerFileST.add("actionScope",
+						 grammar.getDefaultActionScope(grammar.type));
+
+		String targetAppropriateFileNameString =
+			target.getTargetStringLiteralFromString(grammar.getFileName());
+		outputFileST.add("fileName", targetAppropriateFileNameString);
+		headerFileST.add("fileName", targetAppropriateFileNameString);
+		outputFileST.add("ANTLRVersion", tool.VERSION);
+		headerFileST.add("ANTLRVersion", tool.VERSION);
+		outputFileST.add("generatedTimestamp", Tool.getCurrentTimeStamp());
+		headerFileST.add("generatedTimestamp", Tool.getCurrentTimeStamp());
+
+		// GENERATE RECOGNIZER
+		// Walk the AST holding the input grammar, this time generating code
+		// Decisions are generated by using the precomputed DFAs
+		// Fill in the various templates with data
+		CodeGenTreeWalker gen = new CodeGenTreeWalker(new CommonTreeNodeStream(grammar.getGrammarTree()));
+		try {
+			gen.grammar_(
+						grammar,
+						recognizerST,
+						outputFileST,
+						headerFileST);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+							   re);
+		}
+
+		genTokenTypeConstants(recognizerST);
+		genTokenTypeConstants(outputFileST);
+		genTokenTypeConstants(headerFileST);
+
+		if ( grammar.type!=Grammar.LEXER ) {
+			genTokenTypeNames(recognizerST);
+			genTokenTypeNames(outputFileST);
+			genTokenTypeNames(headerFileST);
+		}
+
+		// Now that we know what synpreds are used, we can set into template
+		Set<String> synpredNames = null;
+		if ( grammar.synPredNamesUsedInDFA.size()>0 ) {
+			synpredNames = grammar.synPredNamesUsedInDFA;
+		}
+		outputFileST.add("synpreds", synpredNames);
+		headerFileST.add("synpreds", synpredNames);
+
+		// all recognizers can see Grammar object
+		recognizerST.add("grammar", grammar);
+
+		// do not render templates to disk if errors occurred
+		if ( ErrorManager.getErrorState().errors > 0 ) {
+			return null;
+		}
+
+		if (LAUNCH_ST_INSPECTOR) {
+			outputFileST.inspect();
+			if ( templates.isDefined("headerFile") ) headerFileST.inspect();
+		}
+
+		// WRITE FILES
+		try {
+			target.genRecognizerFile(tool,this,grammar,outputFileST);
+			if ( templates.isDefined("headerFile") ) {
+				ST extST = templates.getInstanceOf("headerFileExtension");
+				target.genRecognizerHeaderFile(tool,this,grammar,headerFileST,extST.render());
+			}
+			// write out the vocab interchange file; used by antlr,
+			// does not change per target
+			ST tokenVocabSerialization = genTokenVocabOutput();
+			String vocabFileName = getVocabFileName();
+			if ( vocabFileName!=null ) {
+				write(tokenVocabSerialization, vocabFileName);
+			}
+			//System.out.println(outputFileST.getDOTForDependencyGraph(false));
+		}
+		catch (IOException ioe) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, ioe);
+		}
+		/*
+		System.out.println("num obj.prop refs: "+ ASTExpr.totalObjPropRefs);
+		System.out.println("num reflection lookups: "+ ASTExpr.totalReflectionLookups);
+		*/
+
+		return outputFileST;
+	}
+
+	/** Some targets will have some extra scopes like C++ may have
+	 *  '@headerfile:name {action}' or something.  Make sure the
+	 *  target likes the scopes in action table.
+	 */
+	protected void verifyActionScopesOkForTarget(Map<String, Map<String, Object>> actions) {
+		for (Map.Entry<String, Map<String, Object>> entry : actions.entrySet()) {
+			String scope = entry.getKey();
+			if ( !target.isValidActionScope(grammar.type, scope) ) {
+				// get any action from the scope to get error location
+				Map<String, Object> scopeActions = entry.getValue();
+				GrammarAST actionAST =
+					(GrammarAST)scopeActions.values().iterator().next();
+				ErrorManager.grammarError(
+					ErrorManager.MSG_INVALID_ACTION_SCOPE,grammar,
+					actionAST.getToken(),scope,
+					grammar.getGrammarTypeString());
+			}
+		}
+	}
+
+	/** Actions may reference $x::y attributes, call translateAction on
+	 *  each action and replace that action in the Map.
+	 */
+	protected void translateActionAttributeReferences(Map<String, Map<String, Object>> actions) {
+		for (Map.Entry<String, Map<String, Object>> entry : actions.entrySet()) {
+			Map<String, Object> scopeActions = entry.getValue();
+			translateActionAttributeReferencesForSingleScope(null,scopeActions);
+		}
+	}
+
+	/** Use for translating rule @init{...} actions that have no scope */
+	public void translateActionAttributeReferencesForSingleScope(
+		Rule r,
+		Map<String, Object> scopeActions)
+	{
+		String ruleName=null;
+		if ( r!=null ) {
+			ruleName = r.name;
+		}
+		for (Map.Entry<String, Object> entry : scopeActions.entrySet()) {
+			String name = entry.getKey();
+			GrammarAST actionAST = (GrammarAST)entry.getValue();
+			List<?> chunks = translateAction(ruleName,actionAST);
+			scopeActions.put(name, chunks); // replace with translation
+		}
+	}
+
+	/** Error recovery in ANTLR recognizers.
+	 *
+	 *  Based upon original ideas:
+	 *
+	 *  Algorithms + Data Structures = Programs by Niklaus Wirth
+	 *
+	 *  and
+	 *
+	 *  A note on error recovery in recursive descent parsers:
+	 *  http://portal.acm.org/citation.cfm?id=947902.947905
+	 *
+	 *  Later, Josef Grosch had some good ideas:
+	 *  Efficient and Comfortable Error Recovery in Recursive Descent Parsers:
+	 *  ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+	 *
+	 *  Like Grosch I implemented local FOLLOW sets that are combined at run-time
+	 *  upon error to avoid parsing overhead.
+	 */
+	public void generateLocalFOLLOW(GrammarAST referencedElementNode,
+									String referencedElementName,
+									String enclosingRuleName,
+									int elementIndex)
+	{
+		/*
+		System.out.println("compute FOLLOW "+grammar.name+"."+referencedElementNode.toString()+
+						 " for "+referencedElementName+"#"+elementIndex +" in "+
+						 enclosingRuleName+
+						 " line="+referencedElementNode.getLine());
+						 */
+		NFAState followingNFAState = referencedElementNode.followingNFAState;
+		LookaheadSet follow = null;
+		if ( followingNFAState!=null ) {
+			// compute follow for this element and, as side-effect, track
+			// the rule LOOK sensitivity.
+			follow = grammar.FIRST(followingNFAState);
+		}
+
+		if ( follow==null ) {
+			ErrorManager.internalError("no follow state or cannot compute follow");
+			follow = new LookaheadSet();
+		}
+		if ( follow.member(Label.EOF) ) {
+			// TODO: can we just remove?  Seems needed here:
+			// compilation_unit : global_statement* EOF
+			// Actually i guess we resync to EOF regardless
+			follow.remove(Label.EOF);
+		}
+		//System.out.println(" "+follow);
+
+        List<Integer> tokenTypeList;
+        long[] words;
+		if ( follow.tokenTypeSet==null ) {
+			words = new long[1];
+            tokenTypeList = new ArrayList<Integer>();
+        }
+		else {
+			BitSet bits = BitSet.of(follow.tokenTypeSet);
+			words = bits.toPackedArray();
+            tokenTypeList = follow.tokenTypeSet.toList();
+        }
+		// use the target to convert to hex strings (typically)
+		String[] wordStrings = new String[words.length];
+		for (int j = 0; j < words.length; j++) {
+			long w = words[j];
+			wordStrings[j] = target.getTarget64BitStringFromValue(w);
+		}
+		recognizerST.addAggr("bitsets.{name,inName,bits,tokenTypes,tokenIndex}",
+							 referencedElementName,
+							 enclosingRuleName,
+							 wordStrings,
+							 tokenTypeList,
+							 Utils.integer(elementIndex));
+		outputFileST.addAggr("bitsets.{name,inName,bits,tokenTypes,tokenIndex}",
+							 referencedElementName,
+							 enclosingRuleName,
+							 wordStrings,
+							 tokenTypeList,
+							 Utils.integer(elementIndex));
+		headerFileST.addAggr("bitsets.{name,inName,bits,tokenTypes,tokenIndex}",
+							 referencedElementName,
+							 enclosingRuleName,
+							 wordStrings,
+							 tokenTypeList,
+							 Utils.integer(elementIndex));
+	}
+
+	// L O O K A H E A D  D E C I S I O N  G E N E R A T I O N
+
+	/** Generate code that computes the predicted alt given a DFA.  The
+	 *  recognizerST can be either the main generated recognizerTemplate
+	 *  for storage in the main parser file or a separate file.  It's up to
+	 *  the code that ultimately invokes the codegen.g grammar rule.
+	 *
+	 *  Regardless, the output file and header file get a copy of the DFAs.
+	 */
+	public ST genLookaheadDecision(ST recognizerST,
+								   DFA dfa)
+	{
+		ST decisionST;
+		// If we are doing inline DFA and this one is acyclic and LL(*)
+		// I have to check for is-non-LL(*) because if non-LL(*) the cyclic
+		// check is not done by DFA.verify(); that is, verify() avoids
+		// doesStateReachAcceptState() if non-LL(*)
+		if ( dfa.canInlineDecision() ) {
+			decisionST =
+				acyclicDFAGenerator.genFixedLookaheadDecision(getTemplates(), dfa);
+		}
+		else {
+			// generate any kind of DFA here (cyclic or acyclic)
+			dfa.createStateTables(this);
+			outputFileST.add("cyclicDFAs", dfa);
+			headerFileST.add("cyclicDFAs", dfa);
+			decisionST = templates.getInstanceOf("dfaDecision");
+			String description = dfa.getNFADecisionStartState().getDescription();
+			description = target.getTargetStringLiteralFromString(description);
+			if ( description!=null ) {
+				decisionST.add("description", description);
+			}
+			decisionST.add("decisionNumber",
+						   Utils.integer(dfa.getDecisionNumber()));
+		}
+		return decisionST;
+	}
+
+	/** A special state is huge (too big for state tables) or has a predicated
+	 *  edge.  Generate a simple if-then-else.  Cannot be an accept state as
+	 *  they have no emanating edges.  Don't worry about switch vs if-then-else
+	 *  because if you get here, the state is super complicated and needs an
+	 *  if-then-else.  This is used by the new DFA scheme created June 2006.
+	 */
+	public ST generateSpecialState(DFAState s) {
+		ST stateST;
+		stateST = templates.getInstanceOf("cyclicDFAState");
+		stateST.add("needErrorClause", true);
+		stateST.add("semPredState",
+					s.isResolvedWithPredicates());
+		stateST.add("stateNumber", s.stateNumber);
+		stateST.add("decisionNumber", s.dfa.decisionNumber);
+
+		boolean foundGatedPred = false;
+		ST eotST = null;
+		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+			Transition edge = s.transition(i);
+			ST edgeST;
+			if ( edge.label.getAtom()==Label.EOT ) {
+				// this is the default clause; has to held until last
+				edgeST = templates.getInstanceOf("eotDFAEdge");
+				stateST.remove("needErrorClause");
+				eotST = edgeST;
+			}
+			else {
+				edgeST = templates.getInstanceOf("cyclicDFAEdge");
+				ST exprST =
+					genLabelExpr(templates,edge,1);
+				edgeST.add("labelExpr", exprST);
+			}
+			edgeST.add("edgeNumber", Utils.integer(i + 1));
+			edgeST.add("targetStateNumber",
+					   Utils.integer(edge.target.stateNumber));
+			// stick in any gated predicates for any edge if not already a pred
+			if ( !edge.label.isSemanticPredicate() ) {
+				DFAState t = (DFAState)edge.target;
+				SemanticContext preds =	t.getGatedPredicatesInNFAConfigurations();
+				if ( preds!=null ) {
+					foundGatedPred = true;
+					ST predST = preds.genExpr(this,
+														  getTemplates(),
+														  t.dfa);
+					edgeST.add("predicates", predST.render());
+				}
+			}
+			if ( edge.label.getAtom()!=Label.EOT ) {
+				stateST.add("edges", edgeST);
+			}
+		}
+		if ( foundGatedPred ) {
+			// state has >= 1 edge with a gated pred (syn or sem)
+			// must rewind input first, set flag.
+			stateST.add("semPredState", foundGatedPred);
+		}
+		if ( eotST!=null ) {
+			stateST.add("edges", eotST);
+		}
+		return stateST;
+	}
+
+	/** Generate an expression for traversing an edge. */
+	protected ST genLabelExpr(STGroup templates,
+										  Transition edge,
+										  int k)
+	{
+		Label label = edge.label;
+		if ( label.isSemanticPredicate() ) {
+			return genSemanticPredicateExpr(templates, edge);
+		}
+		if ( label.isSet() ) {
+			return genSetExpr(templates, label.getSet(), k, true);
+		}
+		// must be simple label
+		ST eST = templates.getInstanceOf("lookaheadTest");
+		eST.add("atom", getTokenTypeAsTargetLabel(label.getAtom()));
+		eST.add("atomAsInt", Utils.integer(label.getAtom()));
+		eST.add("k", Utils.integer(k));
+		return eST;
+	}
+
+	protected ST genSemanticPredicateExpr(STGroup templates,
+													  Transition edge)
+	{
+		DFA dfa = ((DFAState)edge.target).dfa; // which DFA are we in
+		Label label = edge.label;
+		SemanticContext semCtx = label.getSemanticContext();
+		return semCtx.genExpr(this,templates,dfa);
+	}
+
+	/** For intervals such as [3..3, 30..35], generate an expression that
+	 *  tests the lookahead similar to LA(1)==3 || (LA(1)&gt;=30&amp;&amp;LA(1)&lt;=35)
+	 */
+	public ST genSetExpr(STGroup templates,
+									 IntSet set,
+									 int k,
+									 boolean partOfDFA)
+	{
+		if ( !(set instanceof IntervalSet) ) {
+			throw new IllegalArgumentException("unable to generate expressions for non IntervalSet objects");
+		}
+		IntervalSet iset = (IntervalSet)set;
+		if ( iset.getIntervals()==null || iset.getIntervals().isEmpty() ) {
+			ST emptyST = new ST(templates, "");
+			emptyST.impl.name = "empty-set-expr";
+			return emptyST;
+		}
+		String testSTName = "lookaheadTest";
+		String testRangeSTName = "lookaheadRangeTest";
+		String testSetSTName = "lookaheadSetTest";
+		String varSTName = "lookaheadVarName";
+		if ( !partOfDFA ) {
+			testSTName = "isolatedLookaheadTest";
+			testRangeSTName = "isolatedLookaheadRangeTest";
+			testSetSTName = "isolatedLookaheadSetTest";
+			varSTName = "isolatedLookaheadVarName";
+		}
+		ST setST = templates.getInstanceOf("setTest");
+		// If the SetTest template exists, separate the ranges:
+		// flatten the small ones into one list and make that a range,
+		// and leave the others as they are.
+		if ( templates.isDefined(testSetSTName) ) {
+			// Flatten the IntervalSet into a list of integers.
+			ST sST = templates.getInstanceOf(testSetSTName);
+			Iterator<Interval> iter = iset.getIntervals().iterator();
+			int rangeNumber = 1;
+			while (iter.hasNext()) {
+				Interval I = iter.next();
+				int a = I.a;
+				int b = I.b;
+				// Not flattening the large ranges helps us avoid making a
+				// set that contains 90% of Unicode when we could just use
+				// a simple range like (LA(1)>=123 && LA(1)<=65535).
+				// This flattens all ranges of length 4 or less.
+				if (b - a < 4) {
+					for (int i = a; i <= b; i++) {
+						sST.add("values", getTokenTypeAsTargetLabel(i));
+						sST.add("valuesAsInt", Utils.integer(i));
+					}
+				} else {
+					ST eST = templates.getInstanceOf(testRangeSTName);
+					eST.add("lower", getTokenTypeAsTargetLabel(a));
+					eST.add("lowerAsInt", Utils.integer(a));
+					eST.add("upper", getTokenTypeAsTargetLabel(b));
+					eST.add("upperAsInt", Utils.integer(b));
+					eST.add("rangeNumber", Utils.integer(rangeNumber));
+					eST.add("k", Utils.integer(k));
+					setST.add("ranges", eST);
+					rangeNumber++;
+				}
+			}
+			sST.add("k", Utils.integer(k));
+			setST.add("ranges", sST);
+			return setST;
+		}
+		Iterator<Interval> iter = iset.getIntervals().iterator();
+		int rangeNumber = 1;
+		while (iter.hasNext()) {
+			Interval I = iter.next();
+			int a = I.a;
+			int b = I.b;
+			ST eST;
+			if ( a==b ) {
+				eST = templates.getInstanceOf(testSTName);
+				eST.add("atom", getTokenTypeAsTargetLabel(a));
+				eST.add("atomAsInt", Utils.integer(a));
+				//eST.add("k",Utils.integer(k));
+			}
+			else {
+				eST = templates.getInstanceOf(testRangeSTName);
+				eST.add("lower", getTokenTypeAsTargetLabel(a));
+				eST.add("lowerAsInt", Utils.integer(a));
+				eST.add("upper", getTokenTypeAsTargetLabel(b));
+				eST.add("upperAsInt", Utils.integer(b));
+				eST.add("rangeNumber", Utils.integer(rangeNumber));
+			}
+			eST.add("k", Utils.integer(k));
+			setST.add("ranges", eST);
+			rangeNumber++;
+		}
+		return setST;
+	}
+
+	// T O K E N  D E F I N I T I O N  G E N E R A T I O N
+
+	/** Set attributes tokens and literals attributes in the incoming
+	 *  code template.  This is not the token vocab interchange file, but
+	 *  rather a list of token type ID needed by the recognizer.
+	 */
+	protected void genTokenTypeConstants(ST code) {
+		// make constants for the token types
+		for (String tokenID : grammar.getTokenIDs()) {
+			int tokenType = grammar.getTokenType(tokenID);
+			if ( tokenType==Label.EOF ||
+				 tokenType>=Label.MIN_TOKEN_TYPE )
+			{
+				// don't do FAUX labels 'cept EOF
+				code.addAggr("tokens.{name,type}", tokenID, Utils.integer(tokenType));
+			}
+		}
+	}
+
+	/** Generate a token names table that maps token type to a printable
+	 *  name: either the label like INT or the literal like "begin".
+	 */
+	protected void genTokenTypeNames(ST code) {
+		for (int t=Label.MIN_TOKEN_TYPE; t<=grammar.getMaxTokenType(); t++) {
+			String tokenName = grammar.getTokenDisplayName(t);
+			if ( tokenName!=null ) {
+				tokenName=target.getTargetStringLiteralFromString(tokenName, true);
+				code.add("tokenNames", tokenName);
+			}
+		}
+	}
+
+	/** Get a meaningful name for a token type useful during code generation.
+	 *  Literals without associated names are converted to the string equivalent
+	 *  of their integer values. Used to generate x==ID and x==34 type comparisons
+	 *  etc...  Essentially we are looking for the most obvious way to refer
+	 *  to a token type in the generated code.  If in the lexer, return the
+	 *  char literal translated to the target language.  For example, ttype=10
+	 *  will yield '\n' from the getTokenDisplayName method.  That must
+	 *  be converted to the target languages literals.  For most C-derived
+	 *  languages no translation is needed.
+	 */
+	public String getTokenTypeAsTargetLabel(int ttype) {
+		if ( grammar.type==Grammar.LEXER ) {
+			String name = grammar.getTokenDisplayName(ttype);
+			return target.getTargetCharLiteralFromANTLRCharLiteral(this,name);
+		}
+		return target.getTokenTypeAsTargetLabel(this,ttype);
+	}
+
+	/** Generate a token vocab file with all the token names/types.  For example:
+	 *  ID=7
+	 *  FOR=8
+	 *  'for'=8
+	 *
+	 *  This is independent of the target language; used by antlr internally
+	 */
+	protected ST genTokenVocabOutput() {
+		ST vocabFileST = new ST(vocabFilePattern);
+		vocabFileST.add("literals",(Object)null); // "define" literals arg
+		vocabFileST.add("tokens",(Object)null);
+		vocabFileST.impl.name = "vocab-file";
+		// make constants for the token names
+		for (String tokenID : grammar.getTokenIDs()) {
+			int tokenType = grammar.getTokenType(tokenID);
+			if ( tokenType>=Label.MIN_TOKEN_TYPE ) {
+				vocabFileST.addAggr("tokens.{name,type}", tokenID, Utils.integer(tokenType));
+			}
+		}
+
+		// now dump the strings
+		for (String literal : grammar.getStringLiterals()) {
+			int tokenType = grammar.getTokenType(literal);
+			if ( tokenType>=Label.MIN_TOKEN_TYPE ) {
+				vocabFileST.addAggr("tokens.{name,type}", literal, Utils.integer(tokenType));
+			}
+		}
+
+		return vocabFileST;
+	}
+
+	public List<? extends Object> translateAction(String ruleName,
+								GrammarAST actionTree)
+	{
+		if ( actionTree.getType()==ANTLRParser.ARG_ACTION ) {
+			return translateArgAction(ruleName, actionTree);
+		}
+		ActionTranslator translator = new ActionTranslator(this,ruleName,actionTree);
+		List<Object> chunks = translator.translateToChunks();
+		chunks = target.postProcessAction(chunks, actionTree.token);
+		return chunks;
+	}
+
+	/** Translate an action like [3,"foo",a[3]] and return a List of the
+	 *  translated actions.  Because actions are themselves translated to a list
+	 *  of chunks, must cat together into a ST&gt;.  Don't translate
+	 *  to strings early as we need to eval templates in context.
+	 */
+	public List<ST> translateArgAction(String ruleName,
+										   GrammarAST actionTree)
+	{
+		String actionText = actionTree.token.getText();
+		List<String> args = getListOfArgumentsFromAction(actionText,',');
+		List<ST> translatedArgs = new ArrayList<ST>();
+		for (String arg : args) {
+			if ( arg!=null ) {
+				Token actionToken =
+					new CommonToken(ANTLRParser.ACTION,arg);
+				ActionTranslator translator =
+					new ActionTranslator(this,ruleName,
+											  actionToken,
+											  actionTree.outerAltNum);
+				List<Object> chunks = translator.translateToChunks();
+				chunks = target.postProcessAction(chunks, actionToken);
+				ST catST = new ST(templates, "<chunks>");
+				catST.add("chunks", chunks);
+				translatedArgs.add(catST);
+			}
+		}
+		if ( translatedArgs.isEmpty() ) {
+			return null;
+		}
+		return translatedArgs;
+	}
+
+	public static List<String> getListOfArgumentsFromAction(String actionText,
+															int separatorChar)
+	{
+		List<String> args = new ArrayList<String>();
+		getListOfArgumentsFromAction(actionText, 0, -1, separatorChar, args);
+		return args;
+	}
+
+	/** Given an arg action like
+	 *
+	 *  [x, (*a).foo(21,33), 3.2+1, '\n',
+	 *  "a,oo\nick", {bl, "fdkj"eck}, ["cat\n,", x, 43]]
+	 *
+	 *  convert to a list of arguments.  Allow nested square brackets etc...
+	 *  Set separatorChar to ';' or ',' or whatever you want.
+	 */
+	public static int getListOfArgumentsFromAction(String actionText,
+												   int start,
+												   int targetChar,
+												   int separatorChar,
+												   List<String> args)
+	{
+		if ( actionText==null ) {
+			return -1;
+		}
+		actionText = actionText.replaceAll("//.*\n", "");
+		int n = actionText.length();
+		//System.out.println("actionText@"+start+"->"+(char)targetChar+"="+actionText.substring(start,n));
+		int p = start;
+		int last = p;
+		while ( p<n && actionText.charAt(p)!=targetChar ) {
+			int c = actionText.charAt(p);
+			switch ( c ) {
+				case '\'' :
+					p++;
+					while ( p<n && actionText.charAt(p)!='\'' ) {
+						if ( actionText.charAt(p)=='\\' && (p+1)<n &&
+							 actionText.charAt(p+1)=='\'' )
+						{
+							p++; // skip escaped quote
+						}
+						p++;
+					}
+					p++;
+					break;
+				case '"' :
+					p++;
+					while ( p<n && actionText.charAt(p)!='\"' ) {
+						if ( actionText.charAt(p)=='\\' && (p+1)<n &&
+							 actionText.charAt(p+1)=='\"' )
+						{
+							p++; // skip escaped quote
+						}
+						p++;
+					}
+					p++;
+					break;
+				case '(' :
+					p = getListOfArgumentsFromAction(actionText,p+1,')',separatorChar,args);
+					break;
+				case '{' :
+					p = getListOfArgumentsFromAction(actionText,p+1,'}',separatorChar,args);
+					break;
+				case '<' :
+					if ( actionText.indexOf('>',p+1)>=p ) {
+						// do we see a matching '>' ahead?  if so, hope it's a generic
+						// and not less followed by expr with greater than
+						p = getListOfArgumentsFromAction(actionText,p+1,'>',separatorChar,args);
+					}
+					else {
+						p++; // treat as normal char
+					}
+					break;
+				case '[' :
+					p = getListOfArgumentsFromAction(actionText,p+1,']',separatorChar,args);
+					break;
+				default :
+					if ( c==separatorChar && targetChar==-1 ) {
+						String arg = actionText.substring(last, p);
+						//System.out.println("arg="+arg);
+						args.add(arg.trim());
+						last = p+1;
+					}
+					p++;
+					break;
+			}
+		}
+		if ( targetChar==-1 && p<=n ) {
+			String arg = actionText.substring(last, p).trim();
+			//System.out.println("arg="+arg);
+			if ( arg.length()>0 ) {
+				args.add(arg.trim());
+			}
+		}
+		p++;
+		return p;
+	}
+
+	/** Given a template constructor action like %foo(a={...}) in
+	 *  an action, translate it to the appropriate template constructor
+	 *  from the templateLib. This translates a *piece* of the action.
+	 */
+	public ST translateTemplateConstructor(String ruleName,
+													   int outerAltNum,
+													   Token actionToken,
+													   String templateActionText)
+	{
+		// first, parse with antlr.g
+		//System.out.println("translate template: "+templateActionText);
+		ANTLRLexer lexer = new ANTLRLexer(new ANTLRStringStream(templateActionText));
+		lexer.setFileName(grammar.getFileName());
+		ANTLRParser parser = ANTLRParser.createParser(new CommonTokenStream(lexer));
+		parser.setFileName(grammar.getFileName());
+		ANTLRParser.rewrite_template_return parseResult = null;
+		try {
+			parseResult = parser.rewrite_template();
+		}
+		catch (RecognitionException re) {
+			ErrorManager.grammarError(ErrorManager.MSG_INVALID_TEMPLATE_ACTION,
+										  grammar,
+										  actionToken,
+										  templateActionText);
+		}
+		catch (Exception tse) {
+			ErrorManager.internalError("can't parse template action",tse);
+		}
+		GrammarAST rewriteTree = parseResult.getTree();
+
+		// then translate via codegen.g
+		CodeGenTreeWalker gen = new CodeGenTreeWalker(new CommonTreeNodeStream(rewriteTree));
+		gen.init(grammar);
+		gen.setCurrentRuleName(ruleName);
+		gen.setOuterAltNum(outerAltNum);
+		ST st = null;
+		try {
+			st = gen.rewrite_template();
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+							   re);
+		}
+		return st;
+	}
+
+
+	public void issueInvalidScopeError(String x,
+									   String y,
+									   Rule enclosingRule,
+									   Token actionToken,
+									   int outerAltNum)
+	{
+		//System.out.println("error $"+x+"::"+y);
+		Rule r = grammar.getRule(x);
+		AttributeScope scope = grammar.getGlobalScope(x);
+		if ( scope==null ) {
+			if ( r!=null ) {
+				scope = r.ruleScope; // if not global, might be rule scope
+			}
+		}
+		if ( scope==null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE,
+										  grammar,
+										  actionToken,
+										  x);
+		}
+		else if ( scope.getAttribute(y)==null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE,
+										  grammar,
+										  actionToken,
+										  x,
+										  y);
+		}
+	}
+
+	public void issueInvalidAttributeError(String x,
+										   String y,
+										   Rule enclosingRule,
+										   Token actionToken,
+										   int outerAltNum)
+	{
+		//System.out.println("error $"+x+"."+y);
+		if ( enclosingRule==null ) {
+			// action not in a rule
+			ErrorManager.grammarError(ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE,
+										  grammar,
+										  actionToken,
+										  x,
+										  y);
+			return;
+		}
+
+		// action is in a rule
+		Grammar.LabelElementPair label = enclosingRule.getRuleLabel(x);
+
+		if ( label!=null || enclosingRule.getRuleRefsInAlt(x, outerAltNum)!=null ) {
+			// $rulelabel.attr or $ruleref.attr; must be unknown attr
+			String refdRuleName = x;
+			if ( label!=null ) {
+				refdRuleName = enclosingRule.getRuleLabel(x).referencedRuleName;
+			}
+			Rule refdRule = grammar.getRule(refdRuleName);
+			AttributeScope scope = refdRule.getAttributeScope(y);
+			if ( scope==null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_RULE_ATTRIBUTE,
+										  grammar,
+										  actionToken,
+										  refdRuleName,
+										  y);
+			}
+			else if ( scope.isParameterScope ) {
+				ErrorManager.grammarError(ErrorManager.MSG_INVALID_RULE_PARAMETER_REF,
+										  grammar,
+										  actionToken,
+										  refdRuleName,
+										  y);
+			}
+			else if ( scope.isDynamicRuleScope ) {
+				ErrorManager.grammarError(ErrorManager.MSG_INVALID_RULE_SCOPE_ATTRIBUTE_REF,
+										  grammar,
+										  actionToken,
+										  refdRuleName,
+										  y);
+			}
+		}
+
+	}
+
+	public void issueInvalidAttributeError(String x,
+										   Rule enclosingRule,
+										   Token actionToken,
+										   int outerAltNum)
+	{
+		//System.out.println("error $"+x);
+		if ( enclosingRule==null ) {
+			// action not in a rule
+			ErrorManager.grammarError(ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE,
+										  grammar,
+										  actionToken,
+										  x);
+			return;
+		}
+
+		// action is in a rule
+		Grammar.LabelElementPair label = enclosingRule.getRuleLabel(x);
+		AttributeScope scope = enclosingRule.getAttributeScope(x);
+
+		if ( label!=null ||
+			 enclosingRule.getRuleRefsInAlt(x, outerAltNum)!=null ||
+			 enclosingRule.name.equals(x) )
+		{
+			ErrorManager.grammarError(ErrorManager.MSG_ISOLATED_RULE_SCOPE,
+										  grammar,
+										  actionToken,
+										  x);
+		}
+		else if ( scope!=null && scope.isDynamicRuleScope ) {
+			ErrorManager.grammarError(ErrorManager.MSG_ISOLATED_RULE_ATTRIBUTE,
+										  grammar,
+										  actionToken,
+										  x);
+		}
+		else {
+			ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE,
+									  grammar,
+									  actionToken,
+									  x);
+		}
+	}
+
+	// M I S C
+
+	public STGroup getTemplates() {
+		return templates;
+	}
+
+	public STGroup getBaseTemplates() {
+		return baseTemplates;
+	}
+
+	public void setDebug(boolean debug) {
+		this.debug = debug;
+	}
+
+	public void setTrace(boolean trace) {
+		this.trace = trace;
+	}
+
+	public void setProfile(boolean profile) {
+		this.profile = profile;
+		if ( profile ) {
+			setDebug(true); // requires debug events
+		}
+	}
+
+	public ST getRecognizerST() {
+		return outputFileST;
+	}
+
+	/** Generate TParser.java and TLexer.java from T.g if combined, else
+	 *  just use T.java as output regardless of type.
+	 */
+	public String getRecognizerFileName(String name, int type) {
+		ST extST = templates.getInstanceOf("codeFileExtension");
+		String recognizerName = grammar.getRecognizerName();
+		return recognizerName+extST.render();
+		/*
+		String suffix = "";
+		if ( type==Grammar.COMBINED ||
+			 (type==Grammar.LEXER && !grammar.implicitLexer) )
+		{
+			suffix = Grammar.grammarTypeToFileNameSuffix[type];
+		}
+		return name+suffix+extST.toString();
+		*/
+	}
+
+	/** What is the name of the vocab file generated for this grammar?
+	 *  Returns null if no .tokens file should be generated.
+	 */
+	public String getVocabFileName() {
+		if ( grammar.isBuiltFromString() ) {
+			return null;
+		}
+		return grammar.name+VOCAB_FILE_EXTENSION;
+	}
+
+	public void write(ST code, String fileName) throws IOException {
+		//long start = System.currentTimeMillis();
+		Writer w = tool.getOutputFile(grammar, fileName);
+		// Write the output to a StringWriter
+		STWriter wr = new AutoIndentWriter(w);
+		wr.setLineWidth(lineWidth);
+		code.write(wr);
+		w.close();
+		//long stop = System.currentTimeMillis();
+		//System.out.println("render time for "+fileName+": "+(int)(stop-start)+"ms");
+	}
+
+	/** You can generate a switch rather than if-then-else for a DFA state
+	 *  if there are no semantic predicates and the number of edge label
+	 *  values is small enough; e.g., don't generate a switch for a state
+	 *  containing an edge label such as 20..52330 (the resulting byte codes
+	 *  would overflow the method 65k limit probably).
+	 */
+	protected boolean canGenerateSwitch(DFAState s) {
+		if ( !GENERATE_SWITCHES_WHEN_POSSIBLE ) {
+			return false;
+		}
+		int size = 0;
+		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+			Transition edge = s.transition(i);
+			if ( edge.label.isSemanticPredicate() ) {
+				return false;
+			}
+			// can't do a switch if the edges are going to require predicates
+			if ( edge.label.getAtom()==Label.EOT ) {
+				int EOTPredicts = ((DFAState)edge.target).getUniquelyPredictedAlt();
+				if ( EOTPredicts==NFA.INVALID_ALT_NUMBER ) {
+					// EOT target has to be a predicate then; no unique alt
+					return false;
+				}
+			}
+			// if target is a state with gated preds, we need to use preds on
+			// this edge then to reach it.
+			if ( ((DFAState)edge.target).getGatedPredicatesInNFAConfigurations()!=null ) {
+				return false;
+			}
+			size += edge.label.getSet().size();
+		}
+		if ( s.getNumberOfTransitions()<MIN_SWITCH_ALTS ||
+			 size>MAX_SWITCH_CASE_LABELS ) {
+			return false;
+		}
+		return true;
+	}
+
+	/** Create a label to track a token / rule reference's result.
+	 *  Technically, this is a place where I break model-view separation
+	 *  as I am creating a variable name that could be invalid in a
+	 *  target language, however, label ::= &lt;ID&gt;&lt;INT&gt; is probably ok in
+	 *  all languages we care about.
+	 */
+	public String createUniqueLabel(String name) {
+		return new StringBuffer()
+			.append(name).append(uniqueLabelNumber++).toString();
+	}
+}
diff --git a/tool/src/main/java/org/antlr/codegen/CppTarget.java b/tool/src/main/java/org/antlr/codegen/CppTarget.java
new file mode 100755
index 0000000..eef580f
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/CppTarget.java
@@ -0,0 +1,376 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.tool.Grammar;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.misc.Aggregate;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class CppTarget extends Target {
+    @Override
+    public boolean useBaseTemplatesForSynPredFragments() {
+        return false;
+    }
+
+    ArrayList<String> strings = new ArrayList<String>();
+
+    @Override
+    protected void genRecognizerFile(Tool tool,
+            CodeGenerator generator,
+            Grammar grammar,
+            ST outputFileST)
+            throws IOException {
+
+        // Before we write this, and cause it to generate its string,
+        // we need to add all the string literals that we are going to match
+        //
+        outputFileST.add("literals", strings);
+        String fileName = generator.getRecognizerFileName(grammar.name, grammar.type);
+        generator.write(outputFileST, fileName);
+    }
+
+    @Override
+    protected void genRecognizerHeaderFile(Tool tool,
+            CodeGenerator generator,
+            Grammar grammar,
+            ST headerFileST,
+            String extName)
+            throws IOException {
+
+		//Its better we remove the EOF Token, as it would have been defined everywhere in C.
+		//we define it later as "EOF_TOKEN" instead of "EOF"
+		List<?> tokens = (List<?>)headerFileST.getAttribute("tokens");
+		for( int i = 0; i < tokens.size(); ++i )
+		{
+			boolean can_break = false;
+			Object tok = tokens.get(i);
+			if( tok instanceof Aggregate )
+			{
+				Aggregate atok = (Aggregate) tok;
+				for (Map.Entry<String, Object> pairs : atok.properties.entrySet()) {
+					if( pairs.getValue().equals("EOF") )
+					{
+						tokens.remove(i);
+						can_break = true;
+						break;
+					}
+				}
+			}
+
+			if( can_break )
+				break;
+		}
+
+		// Pick up the file name we are generating. This method will return a
+		// a file suffixed with .c, so we must substring and add the extName
+		// to it as we cannot assign into strings in Java.
+        ///
+        String fileName = generator.getRecognizerFileName(grammar.name, grammar.type);
+        fileName = fileName.substring(0, fileName.length() - 4) + extName;
+
+        generator.write(headerFileST, fileName);
+    }
+
+    protected ST chooseWhereCyclicDFAsGo(Tool tool,
+            CodeGenerator generator,
+            Grammar grammar,
+            ST recognizerST,
+            ST cyclicDFAST) {
+        return recognizerST;
+    }
+
+    /** Is scope in @scope::name {action} valid for this kind of grammar?
+     *  Targets like C++ may want to allow new scopes like headerfile or
+     *  some such.  The action names themselves are not policed at the
+     *  moment so targets can add template actions w/o having to recompile
+     *  ANTLR.
+     */
+    @Override
+    public boolean isValidActionScope(int grammarType, String scope) {
+        switch (grammarType) {
+            case Grammar.LEXER:
+                if (scope.equals("lexer")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+                if (scope.equals("namespace")) {
+				    return true;
+				}
+
+                break;
+            case Grammar.PARSER:
+                if (scope.equals("parser")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+				if (scope.equals("namespace")) {
+					return true;
+				}
+
+                break;
+            case Grammar.COMBINED:
+                if (scope.equals("parser")) {
+                    return true;
+                }
+                if (scope.equals("lexer")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+                if (scope.equals("namespace")) {
+				    return true;
+				}
+
+                break;
+            case Grammar.TREE_PARSER:
+                if (scope.equals("treeparser")) {
+                    return true;
+                }
+                if (scope.equals("header")) {
+                    return true;
+                }
+                if (scope.equals("includes")) {
+                    return true;
+                }
+                if (scope.equals("preincludes")) {
+                    return true;
+                }
+                if (scope.equals("overrides")) {
+                    return true;
+                }
+                if (scope.equals("namespace")) {
+				    return true;
+				}
+				break;
+        }
+        return false;
+    }
+
+    @Override
+    public String getTargetCharLiteralFromANTLRCharLiteral(
+            CodeGenerator generator,
+            String literal) {
+
+        if (literal.startsWith("'\\u")) {
+            literal = "0x" + literal.substring(3, 7);
+        } else {
+            int c = literal.charAt(1);
+
+            if (c < 32 || c > 127) {
+                literal = "0x" + Integer.toHexString(c);
+            }
+        }
+
+        return literal;
+    }
+
+    /** Convert from an ANTLR string literal found in a grammar file to
+     *  an equivalent string literal in the C target.
+     *  Because we must support Unicode character sets and have chosen
+     *  to have the lexer match UTF32 characters, then we must encode
+     *  string matches to use 32 bit character arrays. Here then we
+     *  must produce the C array and cater for the case where the
+     *  lexer has been encoded with a string such as 'xyz\n',
+     */
+    @Override
+    public String getTargetStringLiteralFromANTLRStringLiteral(
+            CodeGenerator generator,
+            String literal) {
+        int index;
+        String bytes;
+        StringBuffer buf = new StringBuffer();
+
+        buf.append("{ ");
+
+        // We need ot lose any escaped characters of the form \x and just
+        // replace them with their actual values as well as lose the surrounding
+        // quote marks.
+        //
+        for (int i = 1; i < literal.length() - 1; i++) {
+            buf.append("0x");
+
+            if (literal.charAt(i) == '\\') {
+                i++; // Assume that there is a next character, this will just yield
+                // invalid strings if not, which is what the input would be of course - invalid
+                switch (literal.charAt(i)) {
+                    case 'u':
+                    case 'U':
+                        buf.append(literal.substring(i + 1, i + 5));  // Already a hex string
+                        i = i + 5;                                // Move to next string/char/escape
+                        break;
+
+                    case 'n':
+                    case 'N':
+
+                        buf.append("0A");
+                        break;
+
+                    case 'r':
+                    case 'R':
+
+                        buf.append("0D");
+                        break;
+
+                    case 't':
+                    case 'T':
+
+                        buf.append("09");
+                        break;
+
+                    case 'b':
+                    case 'B':
+
+                        buf.append("08");
+                        break;
+
+                    case 'f':
+                    case 'F':
+
+                        buf.append("0C");
+                        break;
+
+                    default:
+
+                        // Anything else is what it is!
+                        //
+                        buf.append(Integer.toHexString((int) literal.charAt(i)).toUpperCase());
+                        break;
+                }
+            } else {
+                buf.append(Integer.toHexString((int) literal.charAt(i)).toUpperCase());
+            }
+            buf.append(", ");
+        }
+        buf.append(" antlr3::ANTLR_STRING_TERMINATOR}");
+
+        bytes = buf.toString();
+        index = strings.indexOf(bytes);
+
+        if (index == -1) {
+            strings.add(bytes);
+            index = strings.indexOf(bytes);
+        }
+
+        String strref = "lit_" + String.valueOf(index + 1);
+
+        return strref;
+    }
+
+    /**
+     * Overrides the standard grammar analysis so we can prepare the analyser
+     * a little differently from the other targets.
+     *
+     * In particular we want to influence the way the code generator makes assumptions about
+     * switchs vs ifs, vs table driven DFAs. In general, C code should be generated that
+     * has the minimum use of tables, and tha meximum use of large switch statements. This
+     * allows the optimizers to generate very efficient code, it can reduce object code size
+     * by about 30% and give about a 20% performance improvement over not doing this. Hence,
+     * for the C target only, we change the defaults here, but only if they are still set to the
+     * defaults.
+     *
+     * @param generator An instance of the generic code generator class.
+     * @param grammar The grammar that we are currently analyzing
+     */
+    @Override
+    protected void performGrammarAnalysis(CodeGenerator generator, Grammar grammar) {
+
+        // Check to see if the maximum inline DFA states is still set to
+        // the default size. If it is then whack it all the way up to the maximum that
+        // we can sensibly get away with.
+        //
+        if (CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE == CodeGenerator.MADSI_DEFAULT ) {
+
+            CodeGenerator.MAX_ACYCLIC_DFA_STATES_INLINE = 65535;
+        }
+
+        // Check to see if the maximum switch size is still set to the default
+        // and bring it up much higher if it is. Modern C compilers can handle
+        // much bigger switch statements than say Java can and if anyone finds a compiler
+        // that cannot deal with such big switches, all the need do is generate the
+        // code with a reduced -Xmaxswitchcaselabels nnn
+        //
+        if  (CodeGenerator.MAX_SWITCH_CASE_LABELS == CodeGenerator.MSCL_DEFAULT) {
+
+            CodeGenerator.MAX_SWITCH_CASE_LABELS = 3000;
+        }
+
+        // Check to see if the number of transitions considered a miminum for using
+        // a switch is still at the default. Because a switch is still generally faster than
+        // an if even with small sets, and given that the optimizer will do the best thing with it
+        // anyway, then we simply want to generate a switch for any number of states.
+        //
+        if (CodeGenerator.MIN_SWITCH_ALTS == CodeGenerator.MSA_DEFAULT) {
+
+            CodeGenerator.MIN_SWITCH_ALTS = 1;
+        }
+
+        // Now we allow the superclass implementation to do whatever it feels it
+        // must do.
+        //
+        super.performGrammarAnalysis(generator, grammar);
+    }
+}
+
diff --git a/tool/src/main/java/org/antlr/codegen/DelphiTarget.java b/tool/src/main/java/org/antlr/codegen/DelphiTarget.java
new file mode 100644
index 0000000..2e1d19a
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/DelphiTarget.java
@@ -0,0 +1,150 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.analysis.Label;
+import org.antlr.misc.Utils;
+import org.stringtemplate.v4.ST;
+import org.antlr.tool.Grammar;
+
+public class DelphiTarget extends Target 
+{
+  public DelphiTarget() { 
+    targetCharValueEscape['\n'] = "'#10'";    
+    targetCharValueEscape['\r'] = "'#13'";    
+    targetCharValueEscape['\t'] = "'#9'";   
+    targetCharValueEscape['\b'] = "\\b";    
+    targetCharValueEscape['\f'] = "\\f";    
+    targetCharValueEscape['\\'] = "\\";   
+    targetCharValueEscape['\''] = "''";   
+    targetCharValueEscape['"'] = "'";
+  } 
+
+  protected ST chooseWhereCyclicDFAsGo(Tool tool,
+                           CodeGenerator generator,
+                           Grammar grammar,
+                           ST recognizerST,
+                           ST cyclicDFAST)
+  {
+    return recognizerST;
+  }
+
+	@Override
+  public String encodeIntAsCharEscape(int v)
+  {
+    if (v <= 127)
+    {
+      String hex1 = Integer.toHexString(v | 0x10000).substring(3, 5);
+      return "'#$" + hex1 + "'";
+    }
+    String hex = Integer.toHexString(v | 0x10000).substring(1, 5);
+    return "'#$" + hex + "'";
+  }
+  
+	@Override
+  public String getTargetCharLiteralFromANTLRCharLiteral(
+    CodeGenerator generator,
+    String literal)
+  {
+    StringBuilder buf = new StringBuilder();
+    int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+    if ( c<Label.MIN_CHAR_VALUE ) {
+      return "0";
+    }
+    // normal char
+    buf.append(c);
+
+    return buf.toString();
+  } 
+
+	@Override
+  public String getTargetStringLiteralFromString(String s, boolean quoted) {
+    if ( s==null ) {
+      return null;
+    }
+    StringBuilder buf = new StringBuilder();
+    if ( quoted ) {
+      buf.append('\'');
+    }
+    for (int i=0; i<s.length(); i++) {
+      int c = s.charAt(i);
+      if ( c!='"' && // don't escape double quotes in strings for Delphi
+         c<targetCharValueEscape.length &&
+         targetCharValueEscape[c]!=null )
+      {
+        buf.append(targetCharValueEscape[c]);
+      }
+      else {
+        buf.append((char)c);
+      }
+      if ((i & 127) == 127)
+      {
+        // Concatenate string literals because Delphi doesn't support literals over 255 characters,
+        // and the code editor doesn't support lines over 1023 characters
+        buf.append("\' + \r\n  \'");
+      }
+    }
+    if ( quoted ) {
+      buf.append('\'');
+    }
+    return buf.toString();
+  }
+
+	@Override
+  public String getTargetStringLiteralFromANTLRStringLiteral(
+    CodeGenerator generator,
+    String literal)
+  {
+    literal = Utils.replace(literal,"\\\'","''"); // \' to ' to normalize
+    literal = Utils.replace(literal,"\\r\\n","'#13#10'"); 
+    literal = Utils.replace(literal,"\\r","'#13'"); 
+    literal = Utils.replace(literal,"\\n","'#10'"); 
+    StringBuilder buf = new StringBuilder(literal);
+    buf.setCharAt(0,'\'');
+    buf.setCharAt(literal.length()-1,'\'');
+    return buf.toString();
+  }
+   
+	@Override
+  public String getTarget64BitStringFromValue(long word) {
+    int numHexDigits = 8*2;
+    StringBuilder buf = new StringBuilder(numHexDigits+2);
+    buf.append("$");
+    String digits = Long.toHexString(word);
+    digits = digits.toUpperCase();
+    int padding = numHexDigits - digits.length();
+    // pad left with zeros
+    for (int i=1; i<=padding; i++) {
+      buf.append('0');
+    }
+    buf.append(digits);
+    return buf.toString();
+  }
+
+}
\ No newline at end of file
diff --git a/tool/src/main/java/org/antlr/codegen/JavaScriptTarget.java b/tool/src/main/java/org/antlr/codegen/JavaScriptTarget.java
new file mode 100755
index 0000000..2b6c99e
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/JavaScriptTarget.java
@@ -0,0 +1,76 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.codegen;
+
+public class JavaScriptTarget extends Target {
+    /** Convert an int to a JavaScript Unicode character literal.
+     *
+     *  The current JavaScript spec (ECMA-262) doesn't provide for octal
+     *  notation in String literals, although some implementations support it.
+     *  This method overrides the parent class so that characters will always
+     *  be encoded as Unicode literals (e.g. \u0011).
+     */
+	@Override
+    public String encodeIntAsCharEscape(int v) {
+        String hex = Integer.toHexString(v|0x10000).substring(1,5);
+        return "\\u"+hex;
+    }
+
+    /** Convert long to two 32-bit numbers separted by a comma.
+     *  JavaScript does not support 64-bit numbers, so we need to break
+     *  the number into two 32-bit literals to give to the Bit.  A number like
+     *  0xHHHHHHHHLLLLLLLL is broken into the following string:
+     *  "0xLLLLLLLL, 0xHHHHHHHH"
+     *  Note that the low order bits are first, followed by the high order bits.
+     *  This is to match how the BitSet constructor works, where the bits are
+     *  passed in in 32-bit chunks with low-order bits coming first.
+     *
+     *  Note: stole the following two methods from the ActionScript target.
+     */
+	@Override
+    public String getTarget64BitStringFromValue(long word) {
+        StringBuffer buf = new StringBuffer(22); // enough for the two "0x", "," and " "
+        buf.append("0x");
+        writeHexWithPadding(buf, Integer.toHexString((int)(word & 0x00000000ffffffffL)));
+        buf.append(", 0x");
+        writeHexWithPadding(buf, Integer.toHexString((int)(word >> 32)));
+
+        return buf.toString();
+    }
+
+    private void writeHexWithPadding(StringBuffer buf, String digits) {
+        digits = digits.toUpperCase();
+        int padding = 8 - digits.length();
+        // pad left with zeros
+        for (int i=1; i<=padding; i++) {
+            buf.append('0');
+        }
+        buf.append(digits);
+    }
+}
diff --git a/tool/src/main/java/org/antlr/codegen/JavaTarget.java b/tool/src/main/java/org/antlr/codegen/JavaTarget.java
new file mode 100644
index 0000000..a73f27c
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/JavaTarget.java
@@ -0,0 +1,65 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.Rule;
+import org.stringtemplate.v4.ST;
+
+import java.util.Set;
+
+public class JavaTarget extends Target {
+    @Override
+    public boolean useBaseTemplatesForSynPredFragments() {
+        return false;
+    }
+
+	protected ST chooseWhereCyclicDFAsGo(Tool tool,
+										 CodeGenerator generator,
+										 Grammar grammar,
+										 ST recognizerST,
+										 ST cyclicDFAST)
+	{
+		return recognizerST;
+	}
+
+	@Override
+	protected void performGrammarAnalysis(CodeGenerator generator, Grammar grammar) {
+		super.performGrammarAnalysis(generator, grammar);
+		for (Rule rule : grammar.getRules()) {
+			rule.throwsSpec.add("RecognitionException");
+		}
+		Set<? extends Rule> delegatedRules = grammar.getDelegatedRules();
+		if ( delegatedRules!=null ) {
+			for (Rule rule : delegatedRules) {
+				rule.throwsSpec.add("RecognitionException");
+			}
+		}
+	}
+}
diff --git a/tool/src/main/java/org/antlr/codegen/ObjCTarget.java b/tool/src/main/java/org/antlr/codegen/ObjCTarget.java
new file mode 100644
index 0000000..c20f76e
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/ObjCTarget.java
@@ -0,0 +1,114 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr and Alan Condit
+ *  Copyright (c) 2006 Kay Roepke (Objective-C runtime)
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.misc.Utils;
+import org.stringtemplate.v4.ST;
+import org.antlr.tool.Grammar;
+
+import java.io.IOException;
+
+public class ObjCTarget extends Target {
+	@Override
+	protected void genRecognizerHeaderFile(Tool tool,
+										   CodeGenerator generator,
+										   Grammar grammar,
+										   ST headerFileST,
+										   String extName)
+	throws IOException
+	{
+		generator.write(headerFileST, grammar.name + Grammar.grammarTypeToFileNameSuffix[grammar.type] + extName);
+	}
+
+	@Override
+	public String getTargetCharLiteralFromANTLRCharLiteral(CodeGenerator generator,
+														   String literal)
+	{
+		if  (literal.startsWith("'\\u") ) {
+			literal = "0x" +literal.substring(3, 7);
+		} else	{
+			int c = literal.charAt(1); // TJP
+			if  (c < 32 || c > 127) {
+				literal  =  "0x" + Integer.toHexString(c);
+			}
+		}
+
+		return literal;
+	}
+
+	/** Convert from an ANTLR string literal found in a grammar file to
+	*  an equivalent string literal in the target language.  For Java, this
+	*  is the translation 'a\n"' &rarr; "a\n\"".  Expect single quotes
+	*  around the incoming literal.  Just flip the quotes and replace
+	*  double quotes with \"
+	*/
+	@Override
+	public String getTargetStringLiteralFromANTLRStringLiteral(CodeGenerator generator,
+															   String literal)
+	{
+		literal = Utils.replace(literal,"\"","\\\"");
+		StringBuilder buf = new StringBuilder(literal);
+		buf.setCharAt(0,'"');
+		buf.setCharAt(literal.length()-1,'"');
+		buf.insert(0,'@');
+		return buf.toString();
+	}
+
+	/** If we have a label, prefix it with the recognizer's name */
+	@Override
+	public String getTokenTypeAsTargetLabel(CodeGenerator generator, int ttype) {
+		String name = generator.grammar.getTokenDisplayName(ttype);
+		// If name is a literal, return the token type instead
+		if ( name.charAt(0)=='\'' ) {
+			return String.valueOf(ttype);
+		}
+		return name;
+		//return generator.grammar.name + Grammar.grammarTypeToFileNameSuffix[generator.grammar.type] + "_" + name;
+		//return super.getTokenTypeAsTargetLabel(generator, ttype);
+		//return this.getTokenTextAndTypeAsTargetLabel(generator, null, ttype);
+	}
+
+	/** Target must be able to override the labels used for token types. Sometimes also depends on the token text.*/
+	public String getTokenTextAndTypeAsTargetLabel(CodeGenerator generator, String text, int tokenType) {
+		String name = generator.grammar.getTokenDisplayName(tokenType);
+		// If name is a literal, return the token type instead
+		if ( name.charAt(0)=='\'' ) {
+			return String.valueOf(tokenType);
+		}
+		String textEquivalent = text == null ? name : text;
+		if (textEquivalent.charAt(0) >= '0' && textEquivalent.charAt(0) <= '9') {
+			return textEquivalent;
+		} else {
+			return generator.grammar.name + Grammar.grammarTypeToFileNameSuffix[generator.grammar.type] + "_" + textEquivalent;
+		}
+	}
+
+}
+
diff --git a/tool/src/main/java/org/antlr/codegen/Perl5Target.java b/tool/src/main/java/org/antlr/codegen/Perl5Target.java
new file mode 100644
index 0000000..6b9ba16
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/Perl5Target.java
@@ -0,0 +1,94 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+import org.antlr.analysis.Label;
+import org.antlr.tool.AttributeScope;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.RuleLabelScope;
+
+public class Perl5Target extends Target {
+    public Perl5Target() {
+        targetCharValueEscape['$'] = "\\$";
+        targetCharValueEscape['@'] = "\\@";
+        targetCharValueEscape['%'] = "\\%";
+        AttributeScope.tokenScope.addAttribute("self", null);
+        RuleLabelScope.predefinedLexerRulePropertiesScope.addAttribute("self", null);
+    }
+
+	@Override
+    public String getTargetCharLiteralFromANTLRCharLiteral(final CodeGenerator generator,
+                                                           final String literal) {
+        final StringBuffer buf = new StringBuffer(10);
+
+        final int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+        if (c < Label.MIN_CHAR_VALUE) {
+            buf.append("\\x{0000}");
+        } else if (c < targetCharValueEscape.length &&
+                targetCharValueEscape[c] != null) {
+            buf.append(targetCharValueEscape[c]);
+        } else if (Character.UnicodeBlock.of((char) c) ==
+                Character.UnicodeBlock.BASIC_LATIN &&
+                !Character.isISOControl((char) c)) {
+            // normal char
+            buf.append((char) c);
+        } else {
+            // must be something unprintable...use \\uXXXX
+            // turn on the bit above max "\\uFFFF" value so that we pad with zeros
+            // then only take last 4 digits
+            String hex = Integer.toHexString(c | 0x10000).toUpperCase().substring(1, 5);
+            buf.append("\\x{");
+            buf.append(hex);
+            buf.append("}");
+        }
+
+        if (buf.indexOf("\\") == -1) {
+            // no need for interpolation, use single quotes
+            buf.insert(0, '\'');
+            buf.append('\'');
+        } else {
+            // need string interpolation
+            buf.insert(0, '\"');
+            buf.append('\"');
+        }
+
+        return buf.toString();
+    }
+
+	@Override
+    public String encodeIntAsCharEscape(final int v) {
+        final int intValue;
+        if ((v & 0x8000) == 0) {
+            intValue = v;
+        } else {
+            intValue = -(0x10000 - v);
+        }
+
+        return String.valueOf(intValue);
+    }
+}
diff --git a/tool/src/main/java/org/antlr/codegen/Python3Target.java b/tool/src/main/java/org/antlr/codegen/Python3Target.java
new file mode 100644
index 0000000..42bdc43
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/Python3Target.java
@@ -0,0 +1,227 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+
+Please excuse my obvious lack of Java experience. The code here is probably
+full of WTFs - though IMHO Java is the Real WTF(TM) here...
+
+ */
+
+package org.antlr.codegen;
+
+import org.antlr.runtime.Token;
+import org.antlr.tool.Grammar;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class Python3Target extends Target {
+    @Override
+    public boolean useBaseTemplatesForSynPredFragments() {
+        return false;
+    }
+
+    /** Target must be able to override the labels used for token types */
+	@Override
+    public String getTokenTypeAsTargetLabel(CodeGenerator generator,
+					    int ttype) {
+	// use ints for predefined types;
+	// <invalid> <EOR> <DOWN> <UP>
+	if ( ttype >= 0 && ttype <= 3 ) {
+	    return String.valueOf(ttype);
+	}
+
+	String name = generator.grammar.getTokenDisplayName(ttype);
+
+	// If name is a literal, return the token type instead
+	if ( name.charAt(0)=='\'' ) {
+	    return String.valueOf(ttype);
+	}
+
+	return name;
+    }
+
+	@Override
+    public String getTargetCharLiteralFromANTLRCharLiteral(
+            CodeGenerator generator,
+            String literal) {
+	int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+	return String.valueOf(c);
+    }
+
+    private List<String> splitLines(String text) {
+		ArrayList<String> l = new ArrayList<String>();
+		int idx = 0;
+
+		while ( true ) {
+			int eol = text.indexOf("\n", idx);
+			if ( eol == -1 ) {
+				l.add(text.substring(idx));
+				break;
+			}
+			else {
+				l.add(text.substring(idx, eol+1));
+				idx = eol+1;
+			}
+		}
+
+		return l;
+    }
+
+	@Override
+    public List<Object> postProcessAction(List<Object> chunks, Token actionToken) {
+		/* TODO
+		   - check for and report TAB usage
+		 */
+
+		//System.out.println("\n*** Action at " + actionToken.getLine() + ":" + actionToken.getColumn());
+
+		/* First I create a new list of chunks. String chunks are splitted into
+		   lines and some whitespace my be added at the beginning.
+
+		   As a result I get a list of chunks
+		   - where the first line starts at column 0
+		   - where every LF is at the end of a string chunk
+		*/
+
+		List<Object> nChunks = new ArrayList<Object>();
+		for (int i = 0; i < chunks.size(); i++) {
+			Object chunk = chunks.get(i);
+
+			if ( chunk instanceof String ) {
+				String text = (String)chunks.get(i);
+				if ( nChunks.isEmpty() && actionToken.getCharPositionInLine() >= 0 ) {
+					// first chunk and some 'virtual' WS at beginning
+					// prepend to this chunk
+
+					String ws = "";
+					for ( int j = 0 ; j < actionToken.getCharPositionInLine() ; j++ ) {
+						ws += " ";
+					}
+					text = ws + text;
+				}
+
+				nChunks.addAll(splitLines(text));
+			}
+			else {
+				if ( nChunks.isEmpty() && actionToken.getCharPositionInLine() >= 0 ) {
+					// first chunk and some 'virtual' WS at beginning
+					// add as a chunk of its own
+
+					String ws = "";
+					for ( int j = 0 ; j <= actionToken.getCharPositionInLine() ; j++ ) {
+						ws += " ";
+					}
+					nChunks.add(ws);
+				}
+
+				nChunks.add(chunk);
+			}
+		}
+
+		int lineNo = actionToken.getLine();
+		int col = 0;
+
+		// strip trailing empty lines
+		int lastChunk = nChunks.size() - 1;
+		while ( lastChunk > 0
+				&& nChunks.get(lastChunk) instanceof String
+				&& ((String)nChunks.get(lastChunk)).trim().length() == 0 )
+			lastChunk--;
+
+		// string leading empty lines
+		int firstChunk = 0;
+		while ( firstChunk <= lastChunk
+				&& nChunks.get(firstChunk) instanceof String
+				&& ((String)nChunks.get(firstChunk)).trim().length() == 0
+				&& ((String)nChunks.get(firstChunk)).endsWith("\n") ) {
+			lineNo++;
+			firstChunk++;
+		}
+
+		int indent = -1;
+		for ( int i = firstChunk ; i <= lastChunk ; i++ ) {
+			Object chunk = nChunks.get(i);
+
+			//System.out.println(lineNo + ":" + col + " " + quote(chunk.toString()));
+
+			if ( chunk instanceof String ) {
+				String text = (String)chunk;
+
+				if ( col == 0 ) {
+					if ( indent == -1 ) {
+						// first non-blank line
+						// count number of leading whitespaces
+
+						indent = 0;
+						for ( int j = 0; j < text.length(); j++ ) {
+							if ( !Character.isWhitespace(text.charAt(j)) )
+								break;
+			
+							indent++;
+						}
+					}
+
+					if ( text.length() >= indent ) {
+						int j;
+						for ( j = 0; j < indent ; j++ ) {
+							if ( !Character.isWhitespace(text.charAt(j)) ) {
+								// should do real error reporting here...
+								System.err.println("Warning: badly indented line " + lineNo + " in action:");
+								System.err.println(text);
+								break;
+							}
+						}
+
+						nChunks.set(i, text.substring(j));
+					}
+					else if ( text.trim().length() > 0 ) {
+						// should do real error reporting here...
+						System.err.println("Warning: badly indented line " + lineNo + " in action:");
+						System.err.println(text);
+					}
+				}
+
+				if ( text.endsWith("\n") ) {
+					lineNo++;
+					col = 0;
+				}
+				else {
+					col += text.length();
+				}
+			}
+			else {
+				// not really correct, but all I need is col to increment...
+				col += 1;
+			}
+		}
+
+		return nChunks;
+    }
+}
diff --git a/tool/src/main/java/org/antlr/codegen/PythonTarget.java b/tool/src/main/java/org/antlr/codegen/PythonTarget.java
new file mode 100644
index 0000000..9df18c2
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/PythonTarget.java
@@ -0,0 +1,226 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+
+Please excuse my obvious lack of Java experience. The code here is probably
+full of WTFs - though IMHO Java is the Real WTF(TM) here...
+
+ */
+
+package org.antlr.codegen;
+
+import org.antlr.runtime.Token;
+import org.antlr.tool.Grammar;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class PythonTarget extends Target {
+    @Override
+    public boolean useBaseTemplatesForSynPredFragments() {
+        return false;
+    }
+    /** Target must be able to override the labels used for token types */
+	@Override
+    public String getTokenTypeAsTargetLabel(CodeGenerator generator,
+					    int ttype) {
+	// use ints for predefined types;
+	// <invalid> <EOR> <DOWN> <UP>
+	if ( ttype >= 0 && ttype <= 3 ) {
+	    return String.valueOf(ttype);
+	}
+
+	String name = generator.grammar.getTokenDisplayName(ttype);
+
+	// If name is a literal, return the token type instead
+	if ( name.charAt(0)=='\'' ) {
+	    return String.valueOf(ttype);
+	}
+
+	return name;
+    }
+
+	@Override
+    public String getTargetCharLiteralFromANTLRCharLiteral(
+            CodeGenerator generator,
+            String literal) {
+	int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+	return String.valueOf(c);
+    }
+
+    private List<String> splitLines(String text) {
+		ArrayList<String> l = new ArrayList<String>();
+		int idx = 0;
+
+		while ( true ) {
+			int eol = text.indexOf("\n", idx);
+			if ( eol == -1 ) {
+				l.add(text.substring(idx));
+				break;
+			}
+			else {
+				l.add(text.substring(idx, eol+1));
+				idx = eol+1;
+			}
+		}
+
+		return l;
+    }
+
+	@Override
+    public List<Object> postProcessAction(List<Object> chunks, Token actionToken) {
+		/* TODO
+		   - check for and report TAB usage
+		 */
+
+		//System.out.println("\n*** Action at " + actionToken.getLine() + ":" + actionToken.getColumn());
+
+		/* First I create a new list of chunks. String chunks are splitted into
+		   lines and some whitespace my be added at the beginning.
+
+		   As a result I get a list of chunks
+		   - where the first line starts at column 0
+		   - where every LF is at the end of a string chunk
+		*/
+
+		List<Object> nChunks = new ArrayList<Object>();
+		for (int i = 0; i < chunks.size(); i++) {
+			Object chunk = chunks.get(i);
+
+			if ( chunk instanceof String ) {
+				String text = (String)chunks.get(i);
+				if ( nChunks.isEmpty() && actionToken.getCharPositionInLine() >= 0 ) {
+					// first chunk and some 'virtual' WS at beginning
+					// prepend to this chunk
+
+					String ws = "";
+					for ( int j = 0 ; j < actionToken.getCharPositionInLine() ; j++ ) {
+						ws += " ";
+					}
+					text = ws + text;
+				}
+
+				nChunks.addAll(splitLines(text));
+			}
+			else {
+				if ( nChunks.isEmpty() && actionToken.getCharPositionInLine() >= 0 ) {
+					// first chunk and some 'virtual' WS at beginning
+					// add as a chunk of its own
+
+					String ws = "";
+					for ( int j = 0 ; j <= actionToken.getCharPositionInLine() ; j++ ) {
+						ws += " ";
+					}
+					nChunks.add(ws);
+				}
+
+				nChunks.add(chunk);
+			}
+		}
+
+		int lineNo = actionToken.getLine();
+		int col = 0;
+
+		// strip trailing empty lines
+		int lastChunk = nChunks.size() - 1;
+		while ( lastChunk > 0
+				&& nChunks.get(lastChunk) instanceof String
+				&& ((String)nChunks.get(lastChunk)).trim().length() == 0 )
+			lastChunk--;
+
+		// string leading empty lines
+		int firstChunk = 0;
+		while ( firstChunk <= lastChunk
+				&& nChunks.get(firstChunk) instanceof String
+				&& ((String)nChunks.get(firstChunk)).trim().length() == 0
+				&& ((String)nChunks.get(firstChunk)).endsWith("\n") ) {
+			lineNo++;
+			firstChunk++;
+		}
+
+		int indent = -1;
+		for ( int i = firstChunk ; i <= lastChunk ; i++ ) {
+			Object chunk = nChunks.get(i);
+
+			//System.out.println(lineNo + ":" + col + " " + quote(chunk.toString()));
+
+			if ( chunk instanceof String ) {
+				String text = (String)chunk;
+
+				if ( col == 0 ) {
+					if ( indent == -1 ) {
+						// first non-blank line
+						// count number of leading whitespaces
+
+						indent = 0;
+						for ( int j = 0; j < text.length(); j++ ) {
+							if ( !Character.isWhitespace(text.charAt(j)) )
+								break;
+			
+							indent++;
+						}
+					}
+
+					if ( text.length() >= indent ) {
+						int j;
+						for ( j = 0; j < indent ; j++ ) {
+							if ( !Character.isWhitespace(text.charAt(j)) ) {
+								// should do real error reporting here...
+								System.err.println("Warning: badly indented line " + lineNo + " in action:");
+								System.err.println(text);
+								break;
+							}
+						}
+
+						nChunks.set(i, text.substring(j));
+					}
+					else if ( text.trim().length() > 0 ) {
+						// should do real error reporting here...
+						System.err.println("Warning: badly indented line " + lineNo + " in action:");
+						System.err.println(text);
+					}
+				}
+
+				if ( text.endsWith("\n") ) {
+					lineNo++;
+					col = 0;
+				}
+				else {
+					col += text.length();
+				}
+			}
+			else {
+				// not really correct, but all I need is col to increment...
+				col += 1;
+			}
+		}
+
+		return nChunks;
+    }
+}
diff --git a/tool/src/main/java/org/antlr/codegen/RubyTarget.java b/tool/src/main/java/org/antlr/codegen/RubyTarget.java
new file mode 100644
index 0000000..cb2107f
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/RubyTarget.java
@@ -0,0 +1,489 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2010 Kyle Yetter
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.tool.Grammar;
+import org.stringtemplate.v4.AttributeRenderer;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.STGroup;
+
+import java.io.IOException;
+import java.util.*;
+
+public class RubyTarget extends Target
+{
+    /** A set of ruby keywords which are used to escape labels and method names
+     *  which will cause parse errors in the ruby source
+     */
+    public static final Set<String> rubyKeywords =
+    new HashSet<String>() {
+        {
+        	add( "alias" );     add( "END" );     add( "retry" );
+        	add( "and" );       add( "ensure" );  add( "return" );
+        	add( "BEGIN" );     add( "false" );   add( "self" );
+        	add( "begin" );     add( "for" );     add( "super" );
+        	add( "break" );     add( "if" );      add( "then" );
+        	add( "case" );      add( "in" );      add( "true" );
+        	add( "class" );     add( "module" );  add( "undef" );
+        	add( "def" );       add( "next" );    add( "unless" );
+        	add( "defined?" );  add( "nil" );     add( "until" );
+        	add( "do" );        add( "not" );     add( "when" );
+        	add( "else" );      add( "or" );      add( "while" );
+        	add( "elsif" );     add( "redo" );    add( "yield" );
+        	add( "end" );       add( "rescue" );
+        }
+    };
+
+    public static Map<String, Map<String, Object>> sharedActionBlocks = new HashMap<String, Map<String, Object>>();
+
+    public class RubyRenderer implements AttributeRenderer
+    {
+    	protected String[] rubyCharValueEscape = new String[256];
+
+    	public RubyRenderer() {
+    		for ( int i = 0; i < 16; i++ ) {
+    			rubyCharValueEscape[ i ] = "\\x0" + Integer.toHexString( i );
+    		}
+    		for ( int i = 16; i < 32; i++ ) {
+    			rubyCharValueEscape[ i ] = "\\x" + Integer.toHexString( i );
+    		}
+    		for ( char i = 32; i < 127; i++ ) {
+    			rubyCharValueEscape[ i ] = Character.toString( i );
+    		}
+    		for ( int i = 127; i < 256; i++ ) {
+    			rubyCharValueEscape[ i ] = "\\x" + Integer.toHexString( i );
+    		}
+
+    		rubyCharValueEscape['\n'] = "\\n";
+    		rubyCharValueEscape['\r'] = "\\r";
+    		rubyCharValueEscape['\t'] = "\\t";
+    		rubyCharValueEscape['\b'] = "\\b";
+    		rubyCharValueEscape['\f'] = "\\f";
+    		rubyCharValueEscape['\\'] = "\\\\";
+    		rubyCharValueEscape['"'] = "\\\"";
+    	}
+
+		@Override
+        public String toString( Object o, String formatName, Locale locale ) {
+			if ( formatName==null ) {
+				return o.toString();
+			}
+			
+            String idString = o.toString();
+
+            if ( idString.length() == 0 ) return idString;
+
+            if ( formatName.equals( "snakecase" ) ) {
+                return snakecase( idString );
+            } else if ( formatName.equals( "camelcase" ) ) {
+                return camelcase( idString );
+            } else if ( formatName.equals( "subcamelcase" ) ) {
+                return subcamelcase( idString );
+            } else if ( formatName.equals( "constant" ) ) {
+                return constantcase( idString );
+            } else if ( formatName.equals( "platform" ) ) {
+                return platform( idString );
+            } else if ( formatName.equals( "lexerRule" ) ) {
+                return lexerRule( idString );
+            } else if ( formatName.equals( "constantPath" ) ) {
+            	return constantPath( idString );
+            } else if ( formatName.equals( "rubyString" ) ) {
+                return rubyString( idString );
+            } else if ( formatName.equals( "label" ) ) {
+                return label( idString );
+            } else if ( formatName.equals( "symbol" ) ) {
+                return symbol( idString );
+            } else {
+                throw new IllegalArgumentException( "Unsupported format name" );
+            }
+        }
+
+        /** given an input string, which is presumed
+         * to contain a word, which may potentially be camelcased,
+         * and convert it to snake_case underscore style.
+         *
+         * algorithm --
+         *   iterate through the string with a sliding window 3 chars wide
+         *
+         * example -- aGUIWhatNot
+         *   c   c+1 c+2  action
+         *   a   G        &lt;&lt; 'a' &lt;&lt; '_'  // a lower-upper word edge
+         *   G   U   I    &lt;&lt; 'g'
+         *   U   I   W    &lt;&lt; 'w'
+         *   I   W   h    &lt;&lt; 'i' &lt;&lt; '_'  // the last character in an acronym run of uppers
+         *   W   h        &lt;&lt; 'w'
+         *   ... and so on
+         */
+        private String snakecase( String value ) {
+            StringBuilder output_buffer = new StringBuilder();
+            int l = value.length();
+            int cliff = l - 1;
+            char cur;
+            char next;
+            char peek;
+
+            if ( value.length() == 0 ) return value;
+            if ( l == 1 ) return value.toLowerCase();
+
+            for ( int i = 0; i < cliff; i++ ) {
+                cur  = value.charAt( i );
+                next = value.charAt( i + 1 );
+
+                if ( Character.isLetter( cur ) ) {
+                    output_buffer.append( Character.toLowerCase( cur ) );
+
+                    if ( Character.isDigit( next ) || Character.isWhitespace( next ) ) {
+                        output_buffer.append( '_' );
+                    } else if ( Character.isLowerCase( cur ) && Character.isUpperCase( next ) ) {
+                        // at camelcase word edge
+                        output_buffer.append( '_' );
+                    } else if ( ( i < cliff - 1 ) && Character.isUpperCase( cur ) && Character.isUpperCase( next ) ) {
+                        // cur is part of an acronym
+
+                        peek = value.charAt( i + 2 );
+                        if ( Character.isLowerCase( peek ) ) {
+                            /* if next is the start of word (indicated when peek is lowercase)
+                                         then the acronym must be completed by appending an underscore */
+                            output_buffer.append( '_' );
+                        }
+                    }
+                } else if ( Character.isDigit( cur ) ) {
+                    output_buffer.append( cur );
+                    if ( Character.isLetter( next ) ) {
+                        output_buffer.append( '_' );
+                    }
+                } else if ( Character.isWhitespace( cur ) ) {
+                    // do nothing
+                } else {
+                    output_buffer.append( cur );
+                }
+
+            }
+
+            cur  = value.charAt( cliff );
+            if ( ! Character.isWhitespace( cur ) ) {
+                output_buffer.append( Character.toLowerCase( cur ) );
+            }
+
+            return output_buffer.toString();
+        }
+
+        private String constantcase( String value ) {
+            return snakecase( value ).toUpperCase();
+        }
+
+        private String platform( String value ) {
+            return ( "__" + value + "__" );
+        }
+
+        private String symbol( String value ) {
+            if ( value.matches( "[a-zA-Z_]\\w*[\\?\\!\\=]?" ) ) {
+                return ( ":" + value );
+            } else {
+                return ( "%s(" + value + ")" );
+            }
+        }
+
+        private String lexerRule( String value ) {
+					  // System.out.print( "lexerRule( \"" + value + "\") => " );
+            if ( value.equals( "Tokens" ) ) {
+							  // System.out.println( "\"token!\"" );
+                return "token!";
+            } else {
+							  // String result = snakecase( value ) + "!";
+								// System.out.println( "\"" + result + "\"" );
+                return ( snakecase( value ) + "!" );
+            }
+        }
+
+        private String constantPath( String value ) {
+            return value.replaceAll( "\\.", "::" );
+        }
+
+        private String rubyString( String value ) {
+        	StringBuilder output_buffer = new StringBuilder();
+        	int len = value.length();
+
+        	output_buffer.append( '"' );
+        	for ( int i = 0; i < len; i++ ) {
+        		output_buffer.append( rubyCharValueEscape[ value.charAt( i ) ] );
+        	}
+        	output_buffer.append( '"' );
+        	return output_buffer.toString();
+        }
+
+        private String camelcase( String value ) {
+            StringBuilder output_buffer = new StringBuilder();
+            int cliff = value.length();
+            char cur;
+            char next;
+            boolean at_edge = true;
+
+            if ( value.length() == 0 ) return value;
+            if ( cliff == 1 ) return value.toUpperCase();
+
+            for ( int i = 0; i < cliff; i++ ) {
+                cur  = value.charAt( i );
+
+                if ( Character.isWhitespace( cur ) ) {
+                    at_edge = true;
+                    continue;
+                } else if ( cur == '_' ) {
+                    at_edge = true;
+                    continue;
+                } else if ( Character.isDigit( cur ) ) {
+                    output_buffer.append( cur );
+                    at_edge = true;
+                    continue;
+                }
+
+                if ( at_edge ) {
+                    output_buffer.append( Character.toUpperCase( cur ) );
+                    if ( Character.isLetter( cur ) ) at_edge = false;
+                } else {
+                    output_buffer.append( cur );
+                }
+            }
+
+            return output_buffer.toString();
+        }
+
+        private String label( String value ) {
+            if ( rubyKeywords.contains( value ) ) {
+                return platform( value );
+            } else if ( Character.isUpperCase( value.charAt( 0 ) ) &&
+                        ( !value.equals( "FILE" ) ) &&
+                        ( !value.equals( "LINE" ) ) ) {
+                return platform( value );
+            } else if ( value.equals( "FILE" ) ) {
+                return "_FILE_";
+            } else if ( value.equals( "LINE" ) ) {
+                return "_LINE_";
+            } else {
+                return value;
+            }
+        }
+
+        private String subcamelcase( String value ) {
+            value = camelcase( value );
+            if ( value.length() == 0 )
+                return value;
+            Character head = Character.toLowerCase( value.charAt( 0 ) );
+            String tail = value.substring( 1 );
+            return head.toString().concat( tail );
+        }
+    }
+
+	@Override
+    protected void genRecognizerFile(
+    		Tool tool,
+    		CodeGenerator generator,
+    		Grammar grammar,
+    		ST outputFileST
+    ) throws IOException
+    {
+        /*
+            Below is an experimental attempt at providing a few named action blocks
+            that are printed in both lexer and parser files from combined grammars.
+            ANTLR appears to first generate a parser, then generate an independent lexer,
+            and then generate code from that. It keeps the combo/parser grammar object
+            and the lexer grammar object, as well as their respective code generator and
+            target instances, completely independent. So, while a bit hack-ish, this is
+            a solution that should work without having to modify Terrence Parr's
+            core tool code.
+
+            - sharedActionBlocks is a class variable containing a hash map
+            - if this method is called with a combo grammar, and the action map
+              in the grammar contains an entry for the named scope "all",
+              add an entry to sharedActionBlocks mapping the grammar name to
+              the "all" action map.
+            - if this method is called with an `implicit lexer'
+              (one that's extracted from a combo grammar), check to see if
+              there's an entry in sharedActionBlocks for the lexer's grammar name.
+            - if there is an action map entry, place it in the lexer's action map
+            - the recognizerFile template has code to place the
+              "all" actions appropriately
+
+            problems:
+              - This solution assumes that the parser will be generated
+                before the lexer. If that changes at some point, this will
+                not work.
+              - I have not investigated how this works with delegation yet
+
+            Kyle Yetter - March 25, 2010
+        */
+
+        if ( grammar.type == Grammar.COMBINED ) {
+            Map<String, Map<String, Object>> actions = grammar.getActions();
+            if ( actions.containsKey( "all" ) ) {
+                sharedActionBlocks.put( grammar.name, actions.get( "all" ) );
+            }
+        } else if ( grammar.implicitLexer ) {
+            if ( sharedActionBlocks.containsKey( grammar.name ) ) {
+                Map<String, Map<String, Object>> actions = grammar.getActions();
+                actions.put( "all", sharedActionBlocks.get( grammar.name ) );
+            }
+        }
+
+        STGroup group = generator.getTemplates();
+        RubyRenderer renderer = new RubyRenderer();
+        try {
+            group.registerRenderer( Class.forName( "java.lang.String" ), renderer );
+        } catch ( ClassNotFoundException e ) {
+            // this shouldn't happen
+            System.err.println( "ClassNotFoundException: " + e.getMessage() );
+            e.printStackTrace( System.err );
+        }
+        String fileName =
+            generator.getRecognizerFileName( grammar.name, grammar.type );
+        generator.write( outputFileST, fileName );
+    }
+
+	@Override
+    public String getTargetCharLiteralFromANTLRCharLiteral(
+        CodeGenerator generator,
+        String literal
+    )
+    {
+        int code_point = 0;
+        literal = literal.substring( 1, literal.length() - 1 );
+
+        if ( literal.charAt( 0 ) == '\\' ) {
+            switch ( literal.charAt( 1 ) ) {
+                case    '\\':
+                case    '"':
+                case    '\'':
+                    code_point = literal.codePointAt( 1 );
+                    break;
+                case    'n':
+                    code_point = 10;
+                    break;
+                case    'r':
+                    code_point = 13;
+                    break;
+                case    't':
+                    code_point = 9;
+                    break;
+                case    'b':
+                    code_point = 8;
+                    break;
+                case    'f':
+                    code_point = 12;
+                    break;
+                case    'u':    // Assume unnnn
+                    code_point = Integer.parseInt( literal.substring( 2 ), 16 );
+                    break;
+                default:
+                    System.out.println( "1: hey you didn't account for this: \"" + literal + "\"" );
+                    break;
+            }
+        } else if ( literal.length() == 1 ) {
+            code_point = literal.codePointAt( 0 );
+        } else {
+            System.out.println( "2: hey you didn't account for this: \"" + literal + "\"" );
+        }
+
+        return ( "0x" + Integer.toHexString( code_point ) );
+    }
+
+	@Override
+    public int getMaxCharValue( CodeGenerator generator )
+    {
+        // Versions before 1.9 do not support unicode
+        return 0xFF;
+    }
+
+	@Override
+    public String getTokenTypeAsTargetLabel( CodeGenerator generator, int ttype )
+    {
+        String name = generator.grammar.getTokenDisplayName( ttype );
+        // If name is a literal, return the token type instead
+        if ( name.charAt( 0 )=='\'' ) {
+            return generator.grammar.computeTokenNameFromLiteral( ttype, name );
+        }
+        return name;
+    }
+
+	@Override
+    public boolean isValidActionScope( int grammarType, String scope ) {
+        if ( scope.equals( "all" ) )       {
+            return true;
+        }
+        if ( scope.equals( "token" ) )     {
+            return true;
+        }
+        if ( scope.equals( "module" ) )    {
+            return true;
+        }
+        if ( scope.equals( "overrides" ) ) {
+            return true;
+        }
+
+        switch ( grammarType ) {
+        case Grammar.LEXER:
+            if ( scope.equals( "lexer" ) ) {
+                return true;
+            }
+            break;
+        case Grammar.PARSER:
+            if ( scope.equals( "parser" ) ) {
+                return true;
+            }
+            break;
+        case Grammar.COMBINED:
+            if ( scope.equals( "parser" ) ) {
+                return true;
+            }
+            if ( scope.equals( "lexer" ) ) {
+                return true;
+            }
+            break;
+        case Grammar.TREE_PARSER:
+            if ( scope.equals( "treeparser" ) ) {
+                return true;
+            }
+            break;
+        }
+        return false;
+    }
+
+	@Override
+    public String encodeIntAsCharEscape( final int v ) {
+        final int intValue;
+
+        if ( v == 65535 ) {
+            intValue = -1;
+        } else {
+            intValue = v;
+        }
+
+        return String.valueOf( intValue );
+    }
+}
diff --git a/tool/src/main/java/org/antlr/codegen/Target.java b/tool/src/main/java/org/antlr/codegen/Target.java
new file mode 100644
index 0000000..0a88326
--- /dev/null
+++ b/tool/src/main/java/org/antlr/codegen/Target.java
@@ -0,0 +1,364 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.codegen;
+
+import org.antlr.Tool;
+import org.antlr.analysis.Label;
+import org.antlr.runtime.Token;
+import org.stringtemplate.v4.ST;
+import org.antlr.tool.Grammar;
+
+import java.io.IOException;
+import java.util.List;
+
+/** The code generator for ANTLR can usually be retargeted just by providing
+ *  a new X.stg file for language X, however, sometimes the files that must
+ *  be generated vary enough that some X-specific functionality is required.
+ *  For example, in C, you must generate header files whereas in Java you do not.
+ *  Other languages may want to keep DFA separate from the main
+ *  generated recognizer file.
+ *
+ *  The notion of a Code Generator target abstracts out the creation
+ *  of the various files.  As new language targets get added to the ANTLR
+ *  system, this target class may have to be altered to handle more
+ *  functionality.  Eventually, just about all language generation issues
+ *  will be expressible in terms of these methods.
+ *
+ *  If org.antlr.codegen.XTarget class exists, it is used else
+ *  Target base class is used.  I am using a superclass rather than an
+ *  interface for this target concept because I can add functionality
+ *  later without breaking previously written targets (extra interface
+ *  methods would force adding dummy functions to all code generator
+ *  target classes).
+ *
+ */
+public class Target {
+
+	/** For pure strings of Java 16-bit unicode char, how can we display
+	 *  it in the target language as a literal.  Useful for dumping
+	 *  predicates and such that may refer to chars that need to be escaped
+	 *  when represented as strings.  Also, templates need to be escaped so
+	 *  that the target language can hold them as a string.
+	 *
+	 *  I have defined (via the constructor) the set of typical escapes,
+	 *  but your Target subclass is free to alter the translated chars or
+	 *  add more definitions.  This is nonstatic so each target can have
+	 *  a different set in memory at same time.
+	 */
+	protected String[] targetCharValueEscape = new String[255];
+
+	public Target() {
+		targetCharValueEscape['\n'] = "\\n";
+		targetCharValueEscape['\r'] = "\\r";
+		targetCharValueEscape['\t'] = "\\t";
+		targetCharValueEscape['\b'] = "\\b";
+		targetCharValueEscape['\f'] = "\\f";
+		targetCharValueEscape['\\'] = "\\\\";
+		targetCharValueEscape['\''] = "\\'";
+		targetCharValueEscape['"'] = "\\\"";
+	}
+
+    public boolean useBaseTemplatesForSynPredFragments() {
+        return true;
+    }
+
+	protected void genRecognizerFile(Tool tool,
+									 CodeGenerator generator,
+									 Grammar grammar,
+									 ST outputFileST)
+		throws IOException
+	{
+		String fileName =
+			generator.getRecognizerFileName(grammar.name, grammar.type);
+		generator.write(outputFileST, fileName);
+	}
+
+	protected void genRecognizerHeaderFile(Tool tool,
+										   CodeGenerator generator,
+										   Grammar grammar,
+										   ST headerFileST,
+										   String extName) // e.g., ".h"
+		throws IOException
+	{
+		// no header file by default
+	}
+
+	protected void performGrammarAnalysis(CodeGenerator generator,
+										  Grammar grammar)
+	{
+		// Build NFAs from the grammar AST
+		grammar.buildNFA();
+
+		// Create the DFA predictors for each decision
+		grammar.createLookaheadDFAs();
+	}
+
+	/** Is scope in @scope::name {action} valid for this kind of grammar?
+	 *  Targets like C++ may want to allow new scopes like headerfile or
+	 *  some such.  The action names themselves are not policed at the
+	 *  moment so targets can add template actions w/o having to recompile
+	 *  ANTLR.
+	 */
+	public boolean isValidActionScope(int grammarType, String scope) {
+		switch (grammarType) {
+			case Grammar.LEXER :
+				if ( scope.equals("lexer") ) {return true;}
+				break;
+			case Grammar.PARSER :
+				if ( scope.equals("parser") ) {return true;}
+				break;
+			case Grammar.COMBINED :
+				if ( scope.equals("parser") ) {return true;}
+				if ( scope.equals("lexer") ) {return true;}
+				break;
+			case Grammar.TREE_PARSER :
+				if ( scope.equals("treeparser") ) {return true;}
+				break;
+		}
+		return false;
+	}
+
+	/** Target must be able to override the labels used for token types */
+	public String getTokenTypeAsTargetLabel(CodeGenerator generator, int ttype) {
+		String name = generator.grammar.getTokenDisplayName(ttype);
+		// If name is a literal, return the token type instead
+		if ( name.charAt(0)=='\'' ) {
+			return String.valueOf(ttype);
+		}
+		return name;
+	}
+
+	/** Convert from an ANTLR char literal found in a grammar file to
+	 *  an equivalent char literal in the target language.  For most
+	 *  languages, this means leaving 'x' as 'x'.  Actually, we need
+	 *  to escape '\u000A' so that it doesn't get converted to \n by
+	 *  the compiler.  Convert the literal to the char value and then
+	 *  to an appropriate target char literal.
+	 *
+	 *  Expect single quotes around the incoming literal.
+	 */
+	public String getTargetCharLiteralFromANTLRCharLiteral(
+		CodeGenerator generator,
+		String literal)
+	{
+		StringBuilder buf = new StringBuilder();
+		buf.append('\'');
+		int c = Grammar.getCharValueFromGrammarCharLiteral(literal);
+		if ( c<Label.MIN_CHAR_VALUE ) {
+			return "'\u0000'";
+		}
+		if ( c<targetCharValueEscape.length &&
+			 targetCharValueEscape[c]!=null )
+		{
+			buf.append(targetCharValueEscape[c]);
+		}
+		else if ( Character.UnicodeBlock.of((char)c)==
+				  Character.UnicodeBlock.BASIC_LATIN &&
+				  !Character.isISOControl((char)c) )
+		{
+			// normal char
+			buf.append((char)c);
+		}
+		else {
+			// must be something unprintable...use \\uXXXX
+			// turn on the bit above max "\\uFFFF" value so that we pad with zeros
+			// then only take last 4 digits
+			String hex = Integer.toHexString(c|0x10000).toUpperCase().substring(1,5);
+			buf.append("\\u");
+			buf.append(hex);
+		}
+
+		buf.append('\'');
+		return buf.toString();
+	}
+
+	/** Convert from an ANTLR string literal found in a grammar file to
+	 *  an equivalent string literal in the target language.  For Java, this
+	 *  is the translation 'a\n"' &rarr; "a\n\"".  Expect single quotes
+	 *  around the incoming literal.  Just flip the quotes and replace
+	 *  double quotes with \"
+     * 
+     *  Note that we have decided to allow poeple to use '\"' without
+     *  penalty, so we must build the target string in a loop as Utils.replae
+     *  cannot handle both \" and " without a lot of messing around.
+     * 
+	 */
+	public String getTargetStringLiteralFromANTLRStringLiteral(
+		CodeGenerator generator,
+		String literal)
+	{
+        StringBuilder sb = new StringBuilder();
+        StringBuilder is = new StringBuilder(literal);
+        
+        // Opening quote
+        //
+        sb.append('"');
+        
+        for (int i = 1; i < is.length() -1; i++) {
+            if  (is.charAt(i) == '\\') {
+                // Anything escaped is what it is! We assume that
+                // people know how to escape characters correctly. However
+                // we catch anything that does not need an escape in Java (which
+                // is what the default implementation is dealing with and remove 
+                // the escape. The C target does this for instance.
+                //
+                switch (is.charAt(i+1)) {
+                    // Pass through any escapes that Java also needs
+                    //
+                    case    '"':
+                    case    'n':
+                    case    'r':
+                    case    't':
+                    case    'b':
+                    case    'f':
+                    case    '\\':
+                    case    'u':    // Assume unnnn
+                        sb.append('\\');    // Pass the escape through
+                        break;
+                    default:
+                        // Remove the escape by virtue of not adding it here
+                        // Thus \' becomes ' and so on
+                        //
+                        break;
+                }
+                
+                // Go past the \ character
+                //
+                i++;
+            } else {
+                // Chracters that don't need \ in ANTLR 'strings' but do in Java
+                //
+                if (is.charAt(i) == '"') {
+                    // We need to escape " in Java
+                    //
+                    sb.append('\\');
+                }
+            }
+            // Add in the next character, which may have been escaped
+            //
+            sb.append(is.charAt(i));   
+        }
+        
+        // Append closing " and return
+        //
+        sb.append('"');
+        
+		return sb.toString();
+	}
+
+	/** Given a random string of Java unicode chars, return a new string with
+	 *  optionally appropriate quote characters for target language and possibly
+	 *  with some escaped characters.  For example, if the incoming string has
+	 *  actual newline characters, the output of this method would convert them
+	 *  to the two char sequence \n for Java, C, C++, ...  The new string has
+	 *  double-quotes around it as well.  Example String in memory:
+	 *
+	 *     a"[newlinechar]b'c[carriagereturnchar]d[tab]e\f
+	 *
+	 *  would be converted to the valid Java s:
+	 *
+	 *     "a\"\nb'c\rd\te\\f"
+	 *
+	 *  or
+	 *
+	 *     a\"\nb'c\rd\te\\f
+	 *
+	 *  depending on the quoted arg.
+	 */
+	public String getTargetStringLiteralFromString(String s, boolean quoted) {
+		if ( s==null ) {
+			return null;
+		}
+
+		StringBuilder buf = new StringBuilder();
+		if ( quoted ) {
+			buf.append('"');
+		}
+		for (int i=0; i<s.length(); i++) {
+			int c = s.charAt(i);
+			if ( c!='\'' && // don't escape single quotes in strings for java
+				 c<targetCharValueEscape.length &&
+				 targetCharValueEscape[c]!=null )
+			{
+				buf.append(targetCharValueEscape[c]);
+			}
+			else {
+				buf.append((char)c);
+			}
+		}
+		if ( quoted ) {
+			buf.append('"');
+		}
+		return buf.toString();
+	}
+
+	public String getTargetStringLiteralFromString(String s) {
+		return getTargetStringLiteralFromString(s, false);
+	}
+
+	/** Convert long to 0xNNNNNNNNNNNNNNNN by default for spitting out
+	 *  with bitsets.  I.e., convert bytes to hex string.
+	 */
+	public String getTarget64BitStringFromValue(long word) {
+		int numHexDigits = 8*2;
+		StringBuilder buf = new StringBuilder(numHexDigits+2);
+		buf.append("0x");
+		String digits = Long.toHexString(word);
+		digits = digits.toUpperCase();
+		int padding = numHexDigits - digits.length();
+		// pad left with zeros
+		for (int i=1; i<=padding; i++) {
+			buf.append('0');
+		}
+		buf.append(digits);
+		return buf.toString();
+	}
+
+	public String encodeIntAsCharEscape(int v) {
+		if ( v<=127 ) {
+			return "\\"+Integer.toOctalString(v);
+		}
+		String hex = Integer.toHexString(v|0x10000).substring(1,5);
+		return "\\u"+hex;
+	}
+
+	/** Some targets only support ASCII or 8-bit chars/strings.  For example,
+	 *  C++ will probably want to return 0xFF here.
+	 */
+	public int getMaxCharValue(CodeGenerator generator) {
+		return Label.MAX_CHAR_VALUE;
+	}
+
+	/** Give target a chance to do some postprocessing on actions.
+	 *  Python for example will have to fix the indention.
+	 */
+	public List<Object> postProcessAction(List<Object> chunks, Token actionToken) {
+		return chunks;
+	}
+
+}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/misc/Barrier.java b/tool/src/main/java/org/antlr/misc/Barrier.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/misc/Barrier.java
rename to tool/src/main/java/org/antlr/misc/Barrier.java
diff --git a/tool/src/main/java/org/antlr/misc/BitSet.java b/tool/src/main/java/org/antlr/misc/BitSet.java
new file mode 100644
index 0000000..c85b923
--- /dev/null
+++ b/tool/src/main/java/org/antlr/misc/BitSet.java
@@ -0,0 +1,581 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.misc;
+
+import org.antlr.analysis.Label;
+import org.antlr.tool.Grammar;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**A BitSet to replace java.util.BitSet.
+ *
+ * Primary differences are that most set operators return new sets
+ * as opposed to oring and anding "in place".  Further, a number of
+ * operations were added.  I cannot contain a BitSet because there
+ * is no way to access the internal bits (which I need for speed)
+ * and, because it is final, I cannot subclass to add functionality.
+ * Consider defining set degree.  Without access to the bits, I must
+ * call a method n times to test the ith bit...ack!
+ *
+ * Also seems like or() from util is wrong when size of incoming set is bigger
+ * than this.bits.length.
+ *
+ * @author Terence Parr
+ */
+public class BitSet implements IntSet, Cloneable {
+    protected final static int BITS = 64;    // number of bits / long
+    protected final static int LOG_BITS = 6; // 2^6 == 64
+
+    /* We will often need to do a mod operator (i mod nbits).  Its
+     * turns out that, for powers of two, this mod operation is
+     * same as (i & (nbits-1)).  Since mod is slow, we use a
+     * precomputed mod mask to do the mod instead.
+     */
+    protected final static int MOD_MASK = BITS - 1;
+
+    /** The actual data bits */
+    protected long bits[];
+
+    /** Construct a bitset of size one word (64 bits) */
+    public BitSet() {
+        this(BITS);
+    }
+
+    /** Construction from a static array of longs */
+    public BitSet(long[] bits_) {
+        bits = bits_;
+    }
+
+    /** Construct a bitset given the size
+     * @param nbits The size of the bitset in bits
+     */
+    public BitSet(int nbits) {
+        bits = new long[((nbits - 1) >> LOG_BITS) + 1];
+    }
+
+    /** or this element into this set (grow as necessary to accommodate) */
+	@Override
+    public void add(int el) {
+        //System.out.println("add("+el+")");
+        int n = wordNumber(el);
+        //System.out.println("word number is "+n);
+        //System.out.println("bits.length "+bits.length);
+        if (n >= bits.length) {
+            growToInclude(el);
+        }
+        bits[n] |= bitMask(el);
+    }
+
+	@Override
+    public void addAll(IntSet set) {
+        if ( set instanceof BitSet ) {
+            this.orInPlace((BitSet)set);
+        }
+		else if ( set instanceof IntervalSet ) {
+			IntervalSet other = (IntervalSet)set;
+			// walk set and add each interval
+			for (Interval I : other.intervals) {
+				this.orInPlace(BitSet.range(I.a,I.b));
+			}
+		}
+		else {
+			throw new IllegalArgumentException("can't add "+
+											   set.getClass().getName()+
+											   " to BitSet");
+		}
+    }
+
+	public void addAll(int[] elements) {
+		if ( elements==null ) {
+			return;
+		}
+		for (int i = 0; i < elements.length; i++) {
+			int e = elements[i];
+			add(e);
+		}
+	}
+
+	public void addAll(Iterable<Integer> elements) {
+		if ( elements==null ) {
+			return;
+		}
+		for (Integer element : elements) {
+			add(element);
+		}
+		/*
+		int n = elements.size();
+		for (int i = 0; i < n; i++) {
+			Object o = elements.get(i);
+			if ( !(o instanceof Integer) ) {
+				throw new IllegalArgumentException();
+			}
+			Integer eI = (Integer)o;
+			add(eI.intValue());
+		}
+		 */
+	}
+
+	@Override
+    public IntSet and(IntSet a) {
+        BitSet s = (BitSet)this.clone();
+        s.andInPlace((BitSet)a);
+        return s;
+    }
+
+    public void andInPlace(BitSet a) {
+        int min = Math.min(bits.length, a.bits.length);
+        for (int i = min - 1; i >= 0; i--) {
+            bits[i] &= a.bits[i];
+        }
+        // clear all bits in this not present in a (if this bigger than a).
+        for (int i = min; i < bits.length; i++) {
+            bits[i] = 0;
+        }
+    }
+
+    private static long bitMask(int bitNumber) {
+        int bitPosition = bitNumber & MOD_MASK; // bitNumber mod BITS
+        return 1L << bitPosition;
+    }
+
+    public void clear() {
+        for (int i = bits.length - 1; i >= 0; i--) {
+            bits[i] = 0;
+        }
+    }
+
+    public void clear(int el) {
+        int n = wordNumber(el);
+        if (n >= bits.length) {	// grow as necessary to accommodate
+            growToInclude(el);
+        }
+        bits[n] &= ~bitMask(el);
+    }
+
+	@Override
+    public Object clone() {
+        BitSet s;
+        try {
+            s = (BitSet)super.clone();
+            s.bits = new long[bits.length];
+            System.arraycopy(bits, 0, s.bits, 0, bits.length);
+        }
+        catch (CloneNotSupportedException e) {
+            throw new InternalError();
+        }
+        return s;
+    }
+
+	@Override
+    public int size() {
+        int deg = 0;
+        for (int i = bits.length - 1; i >= 0; i--) {
+            long word = bits[i];
+            if (word != 0L) {
+                for (int bit = BITS - 1; bit >= 0; bit--) {
+                    if ((word & (1L << bit)) != 0) {
+                        deg++;
+                    }
+                }
+            }
+        }
+        return deg;
+    }
+
+	@Override
+    public boolean equals(Object other) {
+        if ( other == null || !(other instanceof BitSet) ) {
+            return false;
+        }
+
+        BitSet otherSet = (BitSet)other;
+
+        int n = Math.min(this.bits.length, otherSet.bits.length);
+
+        // for any bits in common, compare
+        for (int i=0; i<n; i++) {
+            if (this.bits[i] != otherSet.bits[i]) {
+                return false;
+            }
+        }
+
+        // make sure any extra bits are off
+
+        if (this.bits.length > n) {
+            for (int i = n+1; i<this.bits.length; i++) {
+                if (this.bits[i] != 0) {
+                    return false;
+                }
+            }
+        }
+        else if (otherSet.bits.length > n) {
+            for (int i = n+1; i<otherSet.bits.length; i++) {
+                if (otherSet.bits[i] != 0) {
+                    return false;
+                }
+            }
+        }
+
+        return true;
+    }
+
+    /**
+     * Grows the set to a larger number of bits.
+     * @param bit element that must fit in set
+     */
+    public void growToInclude(int bit) {
+        int newSize = Math.max(bits.length << 1, numWordsToHold(bit));
+        long newbits[] = new long[newSize];
+        System.arraycopy(bits, 0, newbits, 0, bits.length);
+        bits = newbits;
+    }
+
+	@Override
+    public boolean member(int el) {
+        int n = wordNumber(el);
+        if (n >= bits.length) return false;
+        return (bits[n] & bitMask(el)) != 0;
+    }
+
+    /** Get the first element you find and return it.  Return Label.INVALID
+     *  otherwise.
+     */
+	@Override
+    public int getSingleElement() {
+        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+            if (member(i)) {
+                return i;
+            }
+        }
+        return Label.INVALID;
+    }
+
+	@Override
+    public boolean isNil() {
+        for (int i = bits.length - 1; i >= 0; i--) {
+            if (bits[i] != 0) return false;
+        }
+        return true;
+    }
+
+    public IntSet complement() {
+        BitSet s = (BitSet)this.clone();
+        s.notInPlace();
+        return s;
+    }
+
+	@Override
+    public IntSet complement(IntSet set) {
+		if ( set==null ) {
+			return this.complement();
+		}
+        return set.subtract(this);
+    }
+
+    public void notInPlace() {
+        for (int i = bits.length - 1; i >= 0; i--) {
+            bits[i] = ~bits[i];
+        }
+    }
+
+    /** complement bits in the range 0..maxBit. */
+    public void notInPlace(int maxBit) {
+        notInPlace(0, maxBit);
+    }
+
+    /** complement bits in the range minBit..maxBit.*/
+    public void notInPlace(int minBit, int maxBit) {
+        // make sure that we have room for maxBit
+        growToInclude(maxBit);
+        for (int i = minBit; i <= maxBit; i++) {
+            int n = wordNumber(i);
+            bits[n] ^= bitMask(i);
+        }
+    }
+
+    private int numWordsToHold(int el) {
+        return (el >> LOG_BITS) + 1;
+    }
+
+    public static BitSet of(int el) {
+        BitSet s = new BitSet(el + 1);
+        s.add(el);
+        return s;
+    }
+
+    public static BitSet of(Collection<? extends Integer> elements) {
+        BitSet s = new BitSet();
+        for (Integer el : elements) {
+            s.add(el);
+        }
+        return s;
+    }
+
+	public static BitSet of(IntSet set) {
+		if ( set==null ) {
+			return null;
+		}
+
+		if ( set instanceof BitSet ) {
+			return (BitSet)set;
+		}
+		if ( set instanceof IntervalSet ) {
+			BitSet s = new BitSet();
+			s.addAll(set);
+			return s;
+		}
+		throw new IllegalArgumentException("can't create BitSet from "+set.getClass().getName());
+	}
+
+    public static BitSet of(Map<? extends Integer, ?> elements) {
+        return BitSet.of(elements.keySet());
+    }
+
+	public static BitSet range(int a, int b) {
+		BitSet s = new BitSet(b + 1);
+		for (int i = a; i <= b; i++) {
+			int n = wordNumber(i);
+			s.bits[n] |= bitMask(i);
+		}
+		return s;
+	}
+
+    /** return this | a in a new set */
+	@Override
+    public IntSet or(IntSet a) {
+		if ( a==null ) {
+			return this;
+		}
+        BitSet s = (BitSet)this.clone();
+        s.orInPlace((BitSet)a);
+        return s;
+    }
+
+    public void orInPlace(BitSet a) {
+		if ( a==null ) {
+			return;
+		}
+        // If this is smaller than a, grow this first
+        if (a.bits.length > bits.length) {
+            setSize(a.bits.length);
+        }
+        int min = Math.min(bits.length, a.bits.length);
+        for (int i = min - 1; i >= 0; i--) {
+            bits[i] |= a.bits[i];
+        }
+    }
+
+    // remove this element from this set
+	@Override
+    public void remove(int el) {
+        int n = wordNumber(el);
+        if (n >= bits.length) {
+            growToInclude(el);
+        }
+        bits[n] &= ~bitMask(el);
+    }
+
+    /**
+     * Sets the size of a set.
+     * @param nwords how many words the new set should be
+     */
+    private void setSize(int nwords) {
+        long newbits[] = new long[nwords];
+        int n = Math.min(nwords, bits.length);
+        System.arraycopy(bits, 0, newbits, 0, n);
+        bits = newbits;
+    }
+
+    public int numBits() {
+        return bits.length << LOG_BITS; // num words * bits per word
+    }
+
+    /** return how much space is being used by the bits array not
+     *  how many actually have member bits on.
+     */
+    public int lengthInLongWords() {
+        return bits.length;
+    }
+
+    /**Is this contained within a? */
+    public boolean subset(BitSet a) {
+        if (a == null) return false;
+        return this.and(a).equals(this);
+    }
+
+    /**Subtract the elements of 'a' from 'this' in-place.
+     * Basically, just turn off all bits of 'this' that are in 'a'.
+     */
+    public void subtractInPlace(BitSet a) {
+        if (a == null) return;
+        // for all words of 'a', turn off corresponding bits of 'this'
+        for (int i = 0; i < bits.length && i < a.bits.length; i++) {
+            bits[i] &= ~a.bits[i];
+        }
+    }
+
+	@Override
+    public IntSet subtract(IntSet a) {
+        if (a == null || !(a instanceof BitSet)) return null;
+
+        BitSet s = (BitSet)this.clone();
+        s.subtractInPlace((BitSet)a);
+        return s;
+    }
+
+	@Override
+	public List<Integer> toList() {
+		throw new NoSuchMethodError("BitSet.toList() unimplemented");
+	}
+
+    public int[] toArray() {
+        int[] elems = new int[size()];
+        int en = 0;
+        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+            if (member(i)) {
+                elems[en++] = i;
+            }
+        }
+        return elems;
+    }
+
+    public long[] toPackedArray() {
+        return bits;
+    }
+
+	@Override
+    public String toString() {
+        return toString(null);
+    }
+
+    /** Transform a bit set into a string by formatting each element as an integer
+     * separator The string to put in between elements
+     * @return A commma-separated list of values
+     */
+	@Override
+    public String toString(Grammar g) {
+        StringBuilder buf = new StringBuilder();
+        String separator = ",";
+		boolean havePrintedAnElement = false;
+		buf.append('{');
+
+        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+            if (member(i)) {
+                if (i > 0 && havePrintedAnElement ) {
+                    buf.append(separator);
+                }
+                if ( g!=null ) {
+                    buf.append(g.getTokenDisplayName(i));
+                }
+                else {
+                    buf.append(i);
+                }
+				havePrintedAnElement = true;
+            }
+        }
+		buf.append('}');
+        return buf.toString();
+    }
+
+    /**Create a string representation where instead of integer elements, the
+     * ith element of vocabulary is displayed instead.  Vocabulary is a Vector
+     * of Strings.
+     * separator The string to put in between elements
+     * @return A commma-separated list of character constants.
+     */
+    public String toString(String separator, List<String> vocabulary) {
+        if (vocabulary == null) {
+            return toString(null);
+        }
+        String str = "";
+        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+            if (member(i)) {
+                if (str.length() > 0) {
+                    str += separator;
+                }
+                if (i >= vocabulary.size()) {
+                    str += "'" + (char)i + "'";
+                }
+                else if (vocabulary.get(i) == null) {
+                    str += "'" + (char)i + "'";
+                }
+                else {
+                    str += vocabulary.get(i);
+                }
+            }
+        }
+        return str;
+    }
+
+    /**
+     * Dump a comma-separated list of the words making up the bit set.
+     * Split each 64 bit number into two more manageable 32 bit numbers.
+     * This generates a comma-separated list of C++-like unsigned long constants.
+     */
+    public String toStringOfHalfWords() {
+        StringBuilder s = new StringBuilder();
+        for (int i = 0; i < bits.length; i++) {
+            if (i != 0) s.append(", ");
+            long tmp = bits[i];
+            tmp &= 0xFFFFFFFFL;
+            s.append(tmp);
+			s.append("UL");
+            s.append(", ");
+            tmp = bits[i] >>> 32;
+            tmp &= 0xFFFFFFFFL;
+			s.append(tmp);
+			s.append("UL");
+        }
+		return s.toString();
+    }
+
+    /**
+     * Dump a comma-separated list of the words making up the bit set.
+     * This generates a comma-separated list of Java-like long int constants.
+     */
+    public String toStringOfWords() {
+		StringBuilder s = new StringBuilder();
+        for (int i = 0; i < bits.length; i++) {
+            if (i != 0) s.append(", ");
+            s.append(bits[i]);
+			s.append("L");
+        }
+        return s.toString();
+    }
+
+    public String toStringWithRanges() {
+        return toString();
+    }
+
+    private static int wordNumber(int bit) {
+        return bit >> LOG_BITS; // bit / BITS
+    }
+}
diff --git a/tool/src/main/java/org/antlr/misc/Graph.java b/tool/src/main/java/org/antlr/misc/Graph.java
new file mode 100644
index 0000000..5df5ac1
--- /dev/null
+++ b/tool/src/main/java/org/antlr/misc/Graph.java
@@ -0,0 +1,107 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.misc;
+
+import java.util.*;
+
+/** A generic graph with edges; Each node as a single Object payload.
+ *  This is only used to topologically sort a list of file dependencies
+ *  at the moment.
+ */
+public class Graph<T> {
+
+    public static class Node<T> {
+        T payload;
+        List<Node<T>> edges; // points at which nodes?
+
+        public Node(T payload) { this.payload = payload; }
+
+        public void addEdge(Node<T> n) {
+            if ( edges==null ) edges = new ArrayList<Node<T>>();
+            if ( !edges.contains(n) ) edges.add(n);
+        }
+
+		@Override
+        public String toString() { return payload.toString(); }
+    }
+
+    /** Map from node payload to node containing it */
+    protected Map<T,Node<T>> nodes = new HashMap<T,Node<T>>();
+
+    public void addEdge(T a, T b) {
+        //System.out.println("add edge "+a+" to "+b);
+        Node<T> a_node = getNode(a);
+        Node<T> b_node = getNode(b);
+        a_node.addEdge(b_node);
+    }
+
+    protected Node<T> getNode(T a) {
+        Node<T> existing = nodes.get(a);
+        if ( existing!=null ) return existing;
+        Node<T> n = new Node<T>(a);
+        nodes.put(a, n);
+        return n;
+    }
+
+    /** DFS-based topological sort.  A valid sort is the reverse of
+     *  the post-order DFA traversal.  Amazingly simple but true.
+     *  For sorting, I'm not following convention here since ANTLR
+     *  needs the opposite.  Here's what I assume for sorting:
+     *
+     *    If there exists an edge u &rarr; v then u depends on v and v
+     *    must happen before u.
+     *
+     *  So if this gives nonreversed postorder traversal, I get the order
+     *  I want.
+     */
+    public List<T> sort() {
+        Set<Node<T>> visited = new OrderedHashSet<Node<T>>();
+        ArrayList<T> sorted = new ArrayList<T>();
+        while ( visited.size() < nodes.size() ) {
+            // pick any unvisited node, n
+            Node<T> n = null;
+            for (Node<T> tNode : nodes.values()) {
+                n = tNode;
+                if ( !visited.contains(n) ) break;
+            }
+            DFS(n, visited, sorted);
+        }
+        return sorted;
+    }
+
+    public void DFS(Node<T> n, Set<Node<T>> visited, ArrayList<T> sorted) {
+        if ( visited.contains(n) ) return;
+        visited.add(n);
+        if ( n.edges!=null ) {
+            for (Node<T> target : n.edges) {
+                DFS(target, visited, sorted);
+            }
+        }
+        sorted.add(n.payload);
+    }
+}
\ No newline at end of file
diff --git a/tool/src/main/java/org/antlr/misc/IntArrayList.java b/tool/src/main/java/org/antlr/misc/IntArrayList.java
new file mode 100644
index 0000000..f1ee077
--- /dev/null
+++ b/tool/src/main/java/org/antlr/misc/IntArrayList.java
@@ -0,0 +1,158 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.misc;
+
+import java.util.AbstractList;
+
+/** An ArrayList based upon int members.  Not quite a real implementation of a
+ *  modifiable list as I don't do, for example, add(index,element).
+ *  TODO: unused?
+ */
+public class IntArrayList extends AbstractList<Integer> implements Cloneable {
+	private static final int DEFAULT_CAPACITY = 10;
+	protected int n = 0;
+	protected int[] elements = null;
+
+	public IntArrayList() {
+		this(DEFAULT_CAPACITY);
+	}
+
+	public IntArrayList(int initialCapacity) {
+		elements = new int[initialCapacity];
+	}
+
+	/** Set the ith element.  Like ArrayList, this does NOT affect size. */
+	public int set(int i, int newValue) {
+		if ( i>=n ) {
+			setSize(i); // unlike definition of set in ArrayList, set size
+		}
+		int v = elements[i];
+		elements[i] = newValue;
+		return v;
+	}
+
+	public boolean add(int o) {
+		if ( n>=elements.length ) {
+			grow();
+		}
+		elements[n] = o;
+		n++;
+		return true;
+	}
+
+	public void setSize(int newSize) {
+		if ( newSize>=elements.length ) {
+            ensureCapacity(newSize);
+		}
+		n = newSize;
+	}
+
+	protected void grow() {
+		ensureCapacity((elements.length * 3)/2 + 1);
+	}
+
+	public boolean contains(int v) {
+		for (int i = 0; i < n; i++) {
+			int element = elements[i];
+			if ( element == v ) {
+				return true;
+			}
+		}
+		return false;
+	}
+
+	public void ensureCapacity(int newCapacity) {
+		int oldCapacity = elements.length;
+		if (n>=oldCapacity) {
+			int oldData[] = elements;
+			elements = new int[newCapacity];
+			System.arraycopy(oldData, 0, elements, 0, n);
+		}
+	}
+
+	@Override
+	public Integer get(int i) {
+		return Utils.integer(element(i));
+	}
+
+	public int element(int i) {
+		return elements[i];
+	}
+
+	public int[] elements() {
+		int[] a = new int[n];
+		System.arraycopy(elements, 0, a, 0, n);
+		return a;
+	}
+
+	@Override
+	public int size() {
+		return n;
+	}
+
+    public int capacity() {
+        return elements.length;
+    }
+
+	@Override
+	public boolean equals(Object o) {
+        if ( o==null ) {
+            return false;
+        }
+        IntArrayList other = (IntArrayList)o;
+        if ( this.size()!=other.size() ) {
+            return false;
+        }
+		for (int i = 0; i < n; i++) {
+			if ( elements[i] != other.elements[i] ) {
+				return false;
+			}
+		}
+		return true;
+	}
+
+	@Override
+    public Object clone() throws CloneNotSupportedException {
+		IntArrayList a = (IntArrayList)super.clone();
+        a.n = this.n;
+        System.arraycopy(this.elements, 0, a.elements, 0, this.elements.length);
+        return a;
+    }
+
+	@Override
+	public String toString() {
+		StringBuilder buf = new StringBuilder();
+		for (int i = 0; i < n; i++) {
+			if ( i>0 ) {
+				buf.append(", ");
+			}
+			buf.append(elements[i]);
+		}
+		return buf.toString();
+	}
+}
diff --git a/tool/src/main/java/org/antlr/misc/IntSet.java b/tool/src/main/java/org/antlr/misc/IntSet.java
new file mode 100644
index 0000000..1551dd3
--- /dev/null
+++ b/tool/src/main/java/org/antlr/misc/IntSet.java
@@ -0,0 +1,86 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.misc;
+
+import org.antlr.tool.Grammar;
+
+import java.util.List;
+
+/** A generic set of ints that has an efficient implementation, BitSet,
+ *  which is a compressed bitset and is useful for ints that
+ *  are small, for example less than 500 or so, and w/o many ranges.  For
+ *  ranges with large values like unicode char sets, this is not very efficient.
+ *  Consider using IntervalSet.  Not all methods in IntervalSet are implemented.
+ *
+ *  @see org.antlr.misc.BitSet
+ *  @see org.antlr.misc.IntervalSet
+ */
+public interface IntSet {
+    /** Add an element to the set */
+    void add(int el);
+
+    /** Add all elements from incoming set to this set.  Can limit
+     *  to set of its own type.
+     */
+    void addAll(IntSet set);
+
+    /** Return the intersection of this set with the argument, creating
+     *  a new set.
+     */
+    IntSet and(IntSet a);
+
+    IntSet complement(IntSet elements);
+
+    IntSet or(IntSet a);
+
+    IntSet subtract(IntSet a);
+
+    /** Return the size of this set (not the underlying implementation's
+     *  allocated memory size, for example).
+     */
+    int size();
+
+    boolean isNil();
+
+	@Override
+    boolean equals(Object obj);
+
+    int getSingleElement();
+
+    boolean member(int el);
+
+    /** remove this element from this set */
+    void remove(int el);
+
+    List<Integer> toList();
+
+	@Override
+    String toString();
+
+    String toString(Grammar g);
+}
diff --git a/tool/src/main/java/org/antlr/misc/Interval.java b/tool/src/main/java/org/antlr/misc/Interval.java
new file mode 100644
index 0000000..3671fee
--- /dev/null
+++ b/tool/src/main/java/org/antlr/misc/Interval.java
@@ -0,0 +1,144 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.misc;
+
+/** An immutable inclusive interval a..b */
+public class Interval {
+	public static final int INTERVAL_POOL_MAX_VALUE = 1000;
+
+	static Interval[] cache = new Interval[INTERVAL_POOL_MAX_VALUE+1];
+
+	public int a;
+	public int b;
+
+	public static int creates = 0;
+	public static int misses = 0;
+	public static int hits = 0;
+	public static int outOfRange = 0;
+
+	public Interval(int a, int b) { this.a=a; this.b=b; }
+
+	/** Interval objects are used readonly so share all with the
+	 *  same single value a==b up to some max size.  Use an array as a perfect hash.
+	 *  Return shared object for 0..INTERVAL_POOL_MAX_VALUE or a new
+	 *  Interval object with a..a in it.  On Java.g, 218623 IntervalSets
+	 *  have a..a (set with 1 element).
+	 */
+	public static Interval create(int a, int b) {
+		//return new Interval(a,b);
+		// cache just a..a
+		if ( a!=b || a<0 || a>INTERVAL_POOL_MAX_VALUE ) {
+			return new Interval(a,b);
+		}
+		if ( cache[a]==null ) {
+			cache[a] = new Interval(a,a);
+		}
+		return cache[a];
+	}
+
+	@Override
+	public boolean equals(Object o) {
+		if ( o==null ) {
+			return false;
+		}
+		Interval other = (Interval)o;
+		return this.a==other.a && this.b==other.b;
+	}
+
+	/** Does this start completely before other? Disjoint */
+	public boolean startsBeforeDisjoint(Interval other) {
+		return this.a<other.a && this.b<other.a;
+	}
+
+	/** Does this start at or before other? Nondisjoint */
+	public boolean startsBeforeNonDisjoint(Interval other) {
+		return this.a<=other.a && this.b>=other.a;
+	}
+
+	/** Does this.a start after other.b? May or may not be disjoint */
+	public boolean startsAfter(Interval other) { return this.a>other.a; }
+
+	/** Does this start completely after other? Disjoint */
+	public boolean startsAfterDisjoint(Interval other) {
+		return this.a>other.b;
+	}
+
+	/** Does this start after other? NonDisjoint */
+	public boolean startsAfterNonDisjoint(Interval other) {
+		return this.a>other.a && this.a<=other.b; // this.b>=other.b implied
+	}
+
+	/** Are both ranges disjoint? I.e., no overlap? */
+	public boolean disjoint(Interval other) {
+		return startsBeforeDisjoint(other) || startsAfterDisjoint(other);
+	}
+
+	/** Are two intervals adjacent such as 0..41 and 42..42? */
+	public boolean adjacent(Interval other) {
+		return this.a == other.b+1 || this.b == other.a-1;
+	}
+
+	public boolean properlyContains(Interval other) {
+		return other.a >= this.a && other.b <= this.b;
+	}
+
+	/** Return the interval computed from combining this and other */
+	public Interval union(Interval other) {
+		return Interval.create(Math.min(a,other.a), Math.max(b,other.b));
+	}
+
+	/** Return the interval in common between this and o */
+	public Interval intersection(Interval other) {
+		return Interval.create(Math.max(a,other.a), Math.min(b,other.b));
+	}
+
+	/** Return the interval with elements from this not in other;
+	 *  other must not be totally enclosed (properly contained)
+	 *  within this, which would result in two disjoint intervals
+	 *  instead of the single one returned by this method.
+	 */
+	public Interval differenceNotProperlyContained(Interval other) {
+		Interval diff = null;
+		// other.a to left of this.a (or same)
+		if ( other.startsBeforeNonDisjoint(this) ) {
+			diff = Interval.create(Math.max(this.a,other.b+1),
+								   this.b);
+		}
+
+		// other.a to right of this.a
+		else if ( other.startsAfterNonDisjoint(this) ) {
+			diff = Interval.create(this.a, other.a-1);
+		}
+		return diff;
+	}
+
+	@Override
+	public String toString() {
+		return a+".."+b;
+	}
+}
diff --git a/tool/src/main/java/org/antlr/misc/IntervalSet.java b/tool/src/main/java/org/antlr/misc/IntervalSet.java
new file mode 100644
index 0000000..15ec943
--- /dev/null
+++ b/tool/src/main/java/org/antlr/misc/IntervalSet.java
@@ -0,0 +1,710 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.misc;
+
+import org.antlr.analysis.Label;
+import org.antlr.tool.Grammar;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+
+/** A set of integers that relies on ranges being common to do
+ *  "run-length-encoded" like compression (if you view an IntSet like
+ *  a BitSet with runs of 0s and 1s).  Only ranges are recorded so that
+ *  a few ints up near value 1000 don't cause massive bitsets, just two
+ *  integer intervals.
+ *
+ *  element values may be negative.  Useful for sets of EPSILON and EOF.
+ *
+ *  0..9 char range is index pair ['\u0030','\u0039'].
+ *  Multiple ranges are encoded with multiple index pairs.  Isolated
+ *  elements are encoded with an index pair where both intervals are the same.
+ *
+ *  The ranges are ordered and disjoint so that 2..6 appears before 101..103.
+ */
+public class IntervalSet implements IntSet {
+	public static final IntervalSet COMPLETE_SET = IntervalSet.of(0,Label.MAX_CHAR_VALUE);
+
+	/** The list of sorted, disjoint intervals. */
+    protected List<Interval> intervals;
+
+	/** Create a set with no elements */
+    public IntervalSet() {
+        intervals = new ArrayList<Interval>(2); // most sets are 1 or 2 elements
+    }
+
+	public IntervalSet(List<Interval> intervals) {
+		this.intervals = intervals;
+	}
+
+	/** Create a set with a single element, el. */
+    public static IntervalSet of(int a) {
+		IntervalSet s = new IntervalSet();
+        s.add(a);
+        return s;
+    }
+
+    /** Create a set with all ints within range [a..b] (inclusive) */
+    public static IntervalSet of(int a, int b) {
+        IntervalSet s = new IntervalSet();
+        s.add(a,b);
+        return s;
+    }
+
+    /** Add a single element to the set.  An isolated element is stored
+     *  as a range el..el.
+     */
+	@Override
+    public void add(int el) {
+        add(el,el);
+    }
+
+    /** Add interval; i.e., add all integers from a to b to set.
+     *  If b&lt;a, do nothing.
+     *  Keep list in sorted order (by left range value).
+     *  If overlap, combine ranges.  For example,
+     *  If this is {1..5, 10..20}, adding 6..7 yields
+     *  {1..5, 6..7, 10..20}.  Adding 4..8 yields {1..8, 10..20}.
+     */
+    public void add(int a, int b) {
+        add(Interval.create(a,b));
+    }
+
+	// copy on write so we can cache a..a intervals and sets of that
+	protected void add(Interval addition) {
+		//System.out.println("add "+addition+" to "+intervals.toString());
+		if ( addition.b<addition.a ) {
+			return;
+		}
+		// find position in list
+		// Use iterators as we modify list in place
+		for (ListIterator<Interval> iter = intervals.listIterator(); iter.hasNext();) {
+			Interval r = iter.next();
+			if ( addition.equals(r) ) {
+				return;
+			}
+			if ( addition.adjacent(r) || !addition.disjoint(r) ) {
+				// next to each other, make a single larger interval
+				Interval bigger = addition.union(r);
+				iter.set(bigger);
+				// make sure we didn't just create an interval that
+				// should be merged with next interval in list
+				while ( iter.hasNext() ) {
+					Interval next = iter.next();
+					if ( !bigger.adjacent(next) && bigger.disjoint(next) ) {
+						break;
+					}
+
+					// if we bump up against or overlap next, merge
+					iter.remove();   // remove this one
+					iter.previous(); // move backwards to what we just set
+					iter.set(bigger.union(next)); // set to 3 merged ones
+					iter.next(); // first call to next after previous duplicates the result
+				}
+				return;
+			}
+			if ( addition.startsBeforeDisjoint(r) ) {
+				// insert before r
+				iter.previous();
+				iter.add(addition);
+				return;
+			}
+			// if disjoint and after r, a future iteration will handle it
+		}
+		// ok, must be after last interval (and disjoint from last interval)
+		// just add it
+		intervals.add(addition);
+	}
+
+	/*
+	protected void add(Interval addition) {
+        //System.out.println("add "+addition+" to "+intervals.toString());
+        if ( addition.b<addition.a ) {
+            return;
+        }
+        // find position in list
+        //for (ListIterator iter = intervals.listIterator(); iter.hasNext();) {
+		int n = intervals.size();
+		for (int i=0; i<n; i++) {
+			Interval r = (Interval)intervals.get(i);
+            if ( addition.equals(r) ) {
+                return;
+            }
+            if ( addition.adjacent(r) || !addition.disjoint(r) ) {
+                // next to each other, make a single larger interval
+                Interval bigger = addition.union(r);
+				intervals.set(i, bigger);
+                // make sure we didn't just create an interval that
+                // should be merged with next interval in list
+				if ( (i+1)<n ) {
+					i++;
+					Interval next = (Interval)intervals.get(i);
+                    if ( bigger.adjacent(next)||!bigger.disjoint(next) ) {
+                        // if we bump up against or overlap next, merge
+						intervals.remove(i); // remove next one
+						i--;
+						intervals.set(i, bigger.union(next)); // set to 3 merged ones
+                    }
+                }
+                return;
+            }
+            if ( addition.startsBeforeDisjoint(r) ) {
+                // insert before r
+				intervals.add(i, addition);
+                return;
+            }
+            // if disjoint and after r, a future iteration will handle it
+        }
+        // ok, must be after last interval (and disjoint from last interval)
+        // just add it
+        intervals.add(addition);
+    }
+*/
+
+	@Override
+	public void addAll(IntSet set) {
+		if ( set==null ) {
+			return;
+		}
+        if ( !(set instanceof IntervalSet) ) {
+            throw new IllegalArgumentException("can't add non IntSet ("+
+											   set.getClass().getName()+
+											   ") to IntervalSet");
+        }
+        IntervalSet other = (IntervalSet)set;
+        // walk set and add each interval
+		int n = other.intervals.size();
+		for (int i = 0; i < n; i++) {
+			Interval I = other.intervals.get(i);
+			this.add(I.a,I.b);
+		}
+    }
+
+    public IntervalSet complement(int minElement, int maxElement) {
+        return this.complement(IntervalSet.of(minElement,maxElement));
+    }
+
+    /** Given the set of possible values (rather than, say UNICODE or MAXINT),
+     *  return a new set containing all elements in vocabulary, but not in
+     *  this.  The computation is (vocabulary - this).
+     *
+     *  'this' is assumed to be either a subset or equal to vocabulary.
+     */
+	@Override
+    public IntervalSet complement(IntSet vocabulary) {
+        if ( vocabulary==null ) {
+            return null; // nothing in common with null set
+        }
+		if ( !(vocabulary instanceof IntervalSet ) ) {
+			throw new IllegalArgumentException("can't complement with non IntervalSet ("+
+											   vocabulary.getClass().getName()+")");
+		}
+		IntervalSet vocabularyIS = ((IntervalSet)vocabulary);
+		int maxElement = vocabularyIS.getMaxElement();
+
+		IntervalSet compl = new IntervalSet();
+		int n = intervals.size();
+		if ( n ==0 ) {
+			return compl;
+		}
+		Interval first = intervals.get(0);
+		// add a range from 0 to first.a constrained to vocab
+		if ( first.a > 0 ) {
+			IntervalSet s = IntervalSet.of(0, first.a-1);
+			IntervalSet a = s.and(vocabularyIS);
+			compl.addAll(a);
+		}
+		for (int i=1; i<n; i++) { // from 2nd interval .. nth
+			Interval previous = intervals.get(i-1);
+			Interval current = intervals.get(i);
+			IntervalSet s = IntervalSet.of(previous.b+1, current.a-1);
+			IntervalSet a = s.and(vocabularyIS);
+			compl.addAll(a);
+		}
+		Interval last = intervals.get(n -1);
+		// add a range from last.b to maxElement constrained to vocab
+		if ( last.b < maxElement ) {
+			IntervalSet s = IntervalSet.of(last.b+1, maxElement);
+			IntervalSet a = s.and(vocabularyIS);
+			compl.addAll(a);
+		}
+		return compl;
+    }
+
+	/** Compute this-other via this&amp;~other.
+	 *  Return a new set containing all elements in this but not in other.
+	 *  other is assumed to be a subset of this;
+     *  anything that is in other but not in this will be ignored.
+	 */
+	@Override
+	public IntervalSet subtract(IntSet other) {
+		// assume the whole unicode range here for the complement
+		// because it doesn't matter.  Anything beyond the max of this' set
+		// will be ignored since we are doing this & ~other.  The intersection
+		// will be empty.  The only problem would be when this' set max value
+		// goes beyond MAX_CHAR_VALUE, but hopefully the constant MAX_CHAR_VALUE
+		// will prevent this.
+		return this.and(((IntervalSet)other).complement(COMPLETE_SET));
+	}
+
+	/** return a new set containing all elements in this but not in other.
+     *  Intervals may have to be broken up when ranges in this overlap
+     *  with ranges in other.  other is assumed to be a subset of this;
+     *  anything that is in other but not in this will be ignored.
+	 *
+	 *  Keep around, but 10-20-2005, I decided to make complement work w/o
+	 *  subtract and so then subtract can simply be a&~b
+	 *
+    public IntSet subtract(IntSet other) {
+        if ( other==null || !(other instanceof IntervalSet) ) {
+            return null; // nothing in common with null set
+        }
+
+        IntervalSet diff = new IntervalSet();
+
+        // iterate down both interval lists
+        ListIterator thisIter = this.intervals.listIterator();
+        ListIterator otherIter = ((IntervalSet)other).intervals.listIterator();
+        Interval mine=null;
+        Interval theirs=null;
+        if ( thisIter.hasNext() ) {
+            mine = (Interval)thisIter.next();
+        }
+        if ( otherIter.hasNext() ) {
+            theirs = (Interval)otherIter.next();
+        }
+        while ( mine!=null ) {
+            //System.out.println("mine="+mine+", theirs="+theirs);
+            // CASE 1: nothing in theirs removes a chunk from mine
+            if ( theirs==null || mine.disjoint(theirs) ) {
+                // SUBCASE 1a: finished traversing theirs; keep adding mine now
+                if ( theirs==null ) {
+                    // add everything in mine to difference since theirs done
+                    diff.add(mine);
+                    mine = null;
+                    if ( thisIter.hasNext() ) {
+                        mine = (Interval)thisIter.next();
+                    }
+                }
+                else {
+                    // SUBCASE 1b: mine is completely to the left of theirs
+                    // so we can add to difference; move mine, but not theirs
+                    if ( mine.startsBeforeDisjoint(theirs) ) {
+                        diff.add(mine);
+                        mine = null;
+                        if ( thisIter.hasNext() ) {
+                            mine = (Interval)thisIter.next();
+                        }
+                    }
+                    // SUBCASE 1c: theirs is completely to the left of mine
+                    else {
+                        // keep looking in theirs
+                        theirs = null;
+                        if ( otherIter.hasNext() ) {
+                            theirs = (Interval)otherIter.next();
+                        }
+                    }
+                }
+            }
+            else {
+                // CASE 2: theirs breaks mine into two chunks
+                if ( mine.properlyContains(theirs) ) {
+                    // must add two intervals: stuff to left and stuff to right
+                    diff.add(mine.a, theirs.a-1);
+                    // don't actually add stuff to right yet as next 'theirs'
+                    // might overlap with it
+                    // The stuff to the right might overlap with next "theirs".
+                    // so it is considered next
+                    Interval right = new Interval(theirs.b+1, mine.b);
+                    mine = right;
+                    // move theirs forward
+                    theirs = null;
+                    if ( otherIter.hasNext() ) {
+                        theirs = (Interval)otherIter.next();
+                    }
+                }
+
+                // CASE 3: theirs covers mine; nothing to add to diff
+                else if ( theirs.properlyContains(mine) ) {
+                    // nothing to add, theirs forces removal totally of mine
+                    // just move mine looking for an overlapping interval
+                    mine = null;
+                    if ( thisIter.hasNext() ) {
+                        mine = (Interval)thisIter.next();
+                    }
+                }
+
+                // CASE 4: non proper overlap
+                else {
+                    // overlap, but not properly contained
+                    diff.add(mine.differenceNotProperlyContained(theirs));
+                    // update iterators
+                    boolean moveTheirs = true;
+                    if ( mine.startsBeforeNonDisjoint(theirs) ||
+                         theirs.b > mine.b )
+                    {
+                        // uh oh, right of theirs extends past right of mine
+                        // therefore could overlap with next of mine so don't
+                        // move theirs iterator yet
+                        moveTheirs = false;
+                    }
+                    // always move mine
+                    mine = null;
+                    if ( thisIter.hasNext() ) {
+                        mine = (Interval)thisIter.next();
+                    }
+                    if ( moveTheirs ) {
+                        theirs = null;
+                        if ( otherIter.hasNext() ) {
+                            theirs = (Interval)otherIter.next();
+                        }
+                    }
+                }
+            }
+        }
+        return diff;
+    }
+	 */
+
+    /** TODO: implement this! */
+	@Override
+	public IntSet or(IntSet a) {
+		IntervalSet o = new IntervalSet();
+		o.addAll(this);
+		o.addAll(a);
+		//throw new NoSuchMethodError();
+		return o;
+	}
+
+    /** Return a new set with the intersection of this set with other.  Because
+     *  the intervals are sorted, we can use an iterator for each list and
+     *  just walk them together.  This is roughly O(min(n,m)) for interval
+     *  list lengths n and m.
+     */
+	@Override
+	public IntervalSet and(IntSet other) {
+		if ( other==null ) { //|| !(other instanceof IntervalSet) ) {
+			return null; // nothing in common with null set
+		}
+
+		List<Interval> myIntervals = this.intervals;
+		List<Interval> theirIntervals = ((IntervalSet)other).intervals;
+		IntervalSet intersection = null;
+		int mySize = myIntervals.size();
+		int theirSize = theirIntervals.size();
+		int i = 0;
+		int j = 0;
+		// iterate down both interval lists looking for nondisjoint intervals
+		while ( i<mySize && j<theirSize ) {
+			Interval mine = myIntervals.get(i);
+			Interval theirs = theirIntervals.get(j);
+			//System.out.println("mine="+mine+" and theirs="+theirs);
+			if ( mine.startsBeforeDisjoint(theirs) ) {
+				// move this iterator looking for interval that might overlap
+				i++;
+			}
+			else if ( theirs.startsBeforeDisjoint(mine) ) {
+				// move other iterator looking for interval that might overlap
+				j++;
+			}
+			else if ( mine.properlyContains(theirs) ) {
+				// overlap, add intersection, get next theirs
+				if ( intersection==null ) {
+					intersection = new IntervalSet();
+				}
+				intersection.add(mine.intersection(theirs));
+				j++;
+			}
+			else if ( theirs.properlyContains(mine) ) {
+				// overlap, add intersection, get next mine
+				if ( intersection==null ) {
+					intersection = new IntervalSet();
+				}
+				intersection.add(mine.intersection(theirs));
+				i++;
+			}
+			else if ( !mine.disjoint(theirs) ) {
+				// overlap, add intersection
+				if ( intersection==null ) {
+					intersection = new IntervalSet();
+				}
+				intersection.add(mine.intersection(theirs));
+				// Move the iterator of lower range [a..b], but not
+				// the upper range as it may contain elements that will collide
+				// with the next iterator. So, if mine=[0..115] and
+				// theirs=[115..200], then intersection is 115 and move mine
+				// but not theirs as theirs may collide with the next range
+				// in thisIter.
+				// move both iterators to next ranges
+				if ( mine.startsAfterNonDisjoint(theirs) ) {
+					j++;
+				}
+				else if ( theirs.startsAfterNonDisjoint(mine) ) {
+					i++;
+				}
+			}
+		}
+		if ( intersection==null ) {
+			return new IntervalSet();
+		}
+		return intersection;
+	}
+
+    /** Is el in any range of this set? */
+	@Override
+    public boolean member(int el) {
+		int n = intervals.size();
+		for (int i = 0; i < n; i++) {
+			Interval I = intervals.get(i);
+			int a = I.a;
+			int b = I.b;
+			if ( el<a ) {
+				break; // list is sorted and el is before this interval; not here
+			}
+			if ( el>=a && el<=b ) {
+				return true; // found in this interval
+			}
+		}
+		return false;
+/*
+		for (ListIterator iter = intervals.listIterator(); iter.hasNext();) {
+            Interval I = (Interval) iter.next();
+            if ( el<I.a ) {
+                break; // list is sorted and el is before this interval; not here
+            }
+            if ( el>=I.a && el<=I.b ) {
+                return true; // found in this interval
+            }
+        }
+        return false;
+        */
+    }
+
+    /** return true if this set has no members */
+	@Override
+    public boolean isNil() {
+        return intervals==null || intervals.isEmpty();
+    }
+
+    /** If this set is a single integer, return it otherwise Label.INVALID */
+	@Override
+    public int getSingleElement() {
+        if ( intervals!=null && intervals.size()==1 ) {
+            Interval I = intervals.get(0);
+            if ( I.a == I.b ) {
+                return I.a;
+            }
+        }
+        return Label.INVALID;
+    }
+
+	public int getMaxElement() {
+		if ( isNil() ) {
+			return Label.INVALID;
+		}
+		Interval last = intervals.get(intervals.size()-1);
+		return last.b;
+	}
+
+	/** Return minimum element &gt;= 0 */
+	public int getMinElement() {
+		if ( isNil() ) {
+			return Label.INVALID;
+		}
+		int n = intervals.size();
+		for (int i = 0; i < n; i++) {
+			Interval I = intervals.get(i);
+			int a = I.a;
+			int b = I.b;
+			for (int v=a; v<=b; v++) {
+				if ( v>=0 ) return v;
+			}
+		}
+		return Label.INVALID;
+	}
+
+    /** Return a list of Interval objects. */
+    public List<Interval> getIntervals() {
+        return intervals;
+    }
+
+    /** Are two IntervalSets equal?  Because all intervals are sorted
+     *  and disjoint, equals is a simple linear walk over both lists
+     *  to make sure they are the same.  Interval.equals() is used
+     *  by the List.equals() method to check the ranges.
+     */
+	@Override
+    public boolean equals(Object obj) {
+        if ( !(obj instanceof IntervalSet) ) {
+            return false;
+        }
+        IntervalSet other = (IntervalSet)obj;
+        return this.intervals.equals(other.intervals);
+    }
+
+	@Override
+    public String toString() {
+        return toString(null);
+    }
+
+	@Override
+    public String toString(Grammar g) {
+        StringBuilder buf = new StringBuilder();
+		if ( this.intervals==null || this.intervals.isEmpty() ) {
+			return "{}";
+		}
+        if ( this.intervals.size()>1 ) {
+            buf.append("{");
+        }
+        Iterator<Interval> iter = this.intervals.iterator();
+        while (iter.hasNext()) {
+            Interval I = iter.next();
+            int a = I.a;
+            int b = I.b;
+            if ( a==b ) {
+                if ( g!=null ) {
+                    buf.append(g.getTokenDisplayName(a));
+                }
+                else {
+                    buf.append(a);
+                }
+            }
+            else {
+                if ( g!=null ) {
+                    buf.append(g.getTokenDisplayName(a)).append("..").append(g.getTokenDisplayName(b));
+                }
+                else {
+                    buf.append(a).append("..").append(b);
+                }
+            }
+            if ( iter.hasNext() ) {
+                buf.append(", ");
+            }
+        }
+        if ( this.intervals.size()>1 ) {
+            buf.append("}");
+        }
+        return buf.toString();
+    }
+
+	@Override
+    public int size() {
+		int n = 0;
+		int numIntervals = intervals.size();
+		if ( numIntervals==1 ) {
+			Interval firstInterval = this.intervals.get(0);
+			return firstInterval.b-firstInterval.a+1;
+		}
+		for (int i = 0; i < numIntervals; i++) {
+			Interval I = intervals.get(i);
+			n += (I.b-I.a+1);
+		}
+		return n;
+    }
+
+	@Override
+    public List<Integer> toList() {
+		List<Integer> values = new ArrayList<Integer>();
+		int n = intervals.size();
+		for (int i = 0; i < n; i++) {
+			Interval I = intervals.get(i);
+			int a = I.a;
+			int b = I.b;
+			for (int v=a; v<=b; v++) {
+				values.add(Utils.integer(v));
+			}
+		}
+		return values;
+    }
+
+	/** Get the ith element of ordered set.  Used only by RandomPhrase so
+	 *  don't bother to implement if you're not doing that for a new
+	 *  ANTLR code gen target.
+	 */
+	public int get(int i) {
+		int n = intervals.size();
+		int index = 0;
+		for (int j = 0; j < n; j++) {
+			Interval I = intervals.get(j);
+			int a = I.a;
+			int b = I.b;
+			for (int v=a; v<=b; v++) {
+				if ( index==i ) {
+					return v;
+				}
+				index++;
+			}
+		}
+		return -1;
+	}
+
+	public int[] toArray() {
+		int[] values = new int[size()];
+		int n = intervals.size();
+		int j = 0;
+		for (int i = 0; i < n; i++) {
+			Interval I = intervals.get(i);
+			int a = I.a;
+			int b = I.b;
+			for (int v=a; v<=b; v++) {
+				values[j] = v;
+				j++;
+			}
+		}
+		return values;
+	}
+
+	public org.antlr.runtime.BitSet toRuntimeBitSet() {
+		org.antlr.runtime.BitSet s =
+			new org.antlr.runtime.BitSet(getMaxElement()+1);
+		int n = intervals.size();
+		for (int i = 0; i < n; i++) {
+			Interval I = intervals.get(i);
+			int a = I.a;
+			int b = I.b;
+			for (int v=a; v<=b; v++) {
+				s.add(v);
+			}
+		}
+		return s;
+	}
+
+	@Override
+	public void remove(int el) {
+        throw new NoSuchMethodError("IntervalSet.remove() unimplemented");
+    }
+
+	/*
+	protected void finalize() throws Throwable {
+		super.finalize();
+		System.out.println("size "+intervals.size()+" "+size());
+	}
+	*/
+}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/misc/MultiMap.java b/tool/src/main/java/org/antlr/misc/MultiMap.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/misc/MultiMap.java
rename to tool/src/main/java/org/antlr/misc/MultiMap.java
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/misc/MutableInteger.java b/tool/src/main/java/org/antlr/misc/MutableInteger.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/misc/MutableInteger.java
rename to tool/src/main/java/org/antlr/misc/MutableInteger.java
diff --git a/tool/src/main/java/org/antlr/misc/OrderedHashSet.java b/tool/src/main/java/org/antlr/misc/OrderedHashSet.java
new file mode 100644
index 0000000..1dbe93f
--- /dev/null
+++ b/tool/src/main/java/org/antlr/misc/OrderedHashSet.java
@@ -0,0 +1,119 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.misc;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.List;
+
+/** A HashMap that remembers the order that the elements were added.
+ *  You can alter the ith element with set(i,value) too :)  Unique list.
+ *  I need the replace/set-element-i functionality so I'm subclassing
+ *  OrderedHashSet.
+ */
+public class OrderedHashSet<T> extends LinkedHashSet<T> {
+    /** Track the elements as they are added to the set */
+    protected List<T> elements = new ArrayList<T>();
+
+    public T get(int i) {
+        return elements.get(i);
+    }
+
+    /** Replace an existing value with a new value; updates the element
+     *  list and the hash table, but not the key as that has not changed.
+     */
+    public T set(int i, T value) {
+        T oldElement = elements.get(i);
+        elements.set(i,value); // update list
+        super.remove(oldElement); // now update the set: remove/add
+        super.add(value);
+        return oldElement;
+    }
+
+    /** Add a value to list; keep in hashtable for consistency also;
+     *  Key is object itself.  Good for say asking if a certain string is in
+     *  a list of strings.
+     */
+	@Override
+    public boolean add(T value) {
+        boolean result = super.add(value);
+		if ( result ) {  // only track if new element not in set
+			elements.add(value);
+		}
+		return result;
+    }
+
+	@Override
+    public boolean remove(Object o) {
+		throw new UnsupportedOperationException();
+		/*
+		elements.remove(o);
+        return super.remove(o);
+        */
+    }
+
+	@Override
+    public void clear() {
+        elements.clear();
+        super.clear();
+    }
+
+    /** Return the List holding list of table elements.  Note that you are
+     *  NOT getting a copy so don't write to the list.
+     */
+    public List<T> elements() {
+        return elements;
+    }
+
+	@Override
+	public Iterator<T> iterator() {
+		return elements.iterator();
+	}
+
+	@Override
+	public Object[] toArray() {
+		return elements.toArray();
+	}
+
+	@Override
+    public int size() {
+		/*
+		if ( elements.size()!=super.size() ) {
+			ErrorManager.internalError("OrderedHashSet: elements and set size differs; "+
+									   elements.size()+"!="+super.size());
+        }
+        */
+        return elements.size();
+    }
+
+	@Override
+    public String toString() {
+        return elements.toString();
+    }
+}
diff --git a/tool/src/main/java/org/antlr/misc/Utils.java b/tool/src/main/java/org/antlr/misc/Utils.java
new file mode 100644
index 0000000..1e3939d
--- /dev/null
+++ b/tool/src/main/java/org/antlr/misc/Utils.java
@@ -0,0 +1,89 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.misc;
+
+public class Utils {
+	public static final int INTEGER_POOL_MAX_VALUE = 1000;
+	static Integer[] ints = new Integer[INTEGER_POOL_MAX_VALUE+1];
+
+	/** Integer objects are immutable so share all Integers with the
+	 *  same value up to some max size.  Use an array as a perfect hash.
+	 *  Return shared object for 0..INTEGER_POOL_MAX_VALUE or a new
+	 *  Integer object with x in it.
+	 */
+	public static Integer integer(int x) {
+		if ( x<0 || x>INTEGER_POOL_MAX_VALUE ) {
+			return x;
+		}
+		if ( ints[x]==null ) {
+			ints[x] = x;
+		}
+		return ints[x];
+	}
+
+	/** Given a source string, src,
+		a string to replace, replacee,
+		and a string to replace with, replacer,
+		return a new string w/ the replacing done.
+		You can use replacer==null to remove replacee from the string.
+
+		This should be faster than Java's String.replaceAll as that one
+		uses regex (I only want to play with strings anyway).
+	*/
+	public static String replace(String src, String replacee, String replacer) {
+		StringBuilder result = new StringBuilder(src.length() + 50);
+		int startIndex = 0;
+		int endIndex = src.indexOf(replacee);
+		while(endIndex != -1) {
+			result.append(src.substring(startIndex,endIndex));
+			if ( replacer!=null ) {
+				result.append(replacer);
+			}
+			startIndex = endIndex + replacee.length();
+			endIndex = src.indexOf(replacee,startIndex);
+		}
+		result.append(src.substring(startIndex,src.length()));
+		return result.toString();
+	}
+
+//	/** mimic struct; like a non-iterable map. */
+//	public static class Struct {
+//		public Map<String,Object> fields = new HashMap<String,Object>();
+//
+//		@Override
+//		public String toString() { return fields.toString(); }
+//	}
+//
+//	public static Struct struct(String propNames, Object... values) {
+//		String[] props = propNames.split(",");
+//		int i=0;
+//		Struct s = new Struct();
+//		for (String p : props) s.fields.put(p, values[i++]);
+//		return s;
+//	}
+}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/ANTLRErrorListener.java b/tool/src/main/java/org/antlr/tool/ANTLRErrorListener.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/tool/ANTLRErrorListener.java
rename to tool/src/main/java/org/antlr/tool/ANTLRErrorListener.java
diff --git a/tool/src/main/java/org/antlr/tool/AssignTokenTypesBehavior.java b/tool/src/main/java/org/antlr/tool/AssignTokenTypesBehavior.java
new file mode 100644
index 0000000..cc7c0f4
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/AssignTokenTypesBehavior.java
@@ -0,0 +1,308 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.Label;
+import org.antlr.grammar.v3.AssignTokenTypesWalker;
+import org.antlr.misc.Utils;
+
+import java.util.*;
+
+/** Move all of the functionality from assign.types.g grammar file. */
+public class AssignTokenTypesBehavior extends AssignTokenTypesWalker {
+	protected static final Integer UNASSIGNED = Utils.integer(-1);
+	protected static final Integer UNASSIGNED_IN_PARSER_RULE = Utils.integer(-2);
+
+	protected Map<String,Integer> stringLiterals = new TreeMap<String, Integer>();
+	protected Map<String,Integer> tokens = new TreeMap<String, Integer>();
+	protected Map<String,String> aliases = new TreeMap<String, String>();
+	protected Map<String,String> aliasesReverseIndex = new HashMap<String,String>();
+
+	/** Track actual lexer rule defs so we don't get repeated token defs in
+	 *  generated lexer.
+	 */
+	protected Set<String> tokenRuleDefs = new HashSet<String>();
+
+	public AssignTokenTypesBehavior() {
+		super(null);
+	}
+
+    @Override
+	protected void init(Grammar g) {
+		this.grammar = g;
+		currentRuleName = null;
+		if ( stringAlias==null ) {
+			// only init once; can't statically init since we need astFactory
+			initASTPatterns();
+		}
+	}
+
+	/** Track string literals (could be in tokens{} section) */
+    @Override
+	protected void trackString(GrammarAST t) {
+		// if lexer, don't allow aliasing in tokens section
+		if ( currentRuleName==null && grammar.type==Grammar.LEXER ) {
+			ErrorManager.grammarError(ErrorManager.MSG_CANNOT_ALIAS_TOKENS_IN_LEXER,
+									  grammar,
+									  t.token,
+									  t.getText());
+			return;
+		}
+		// in a plain parser grammar rule, cannot reference literals
+		// (unless defined previously via tokenVocab option)
+		// don't warn until we hit root grammar as may be defined there.
+		if ( grammar.getGrammarIsRoot() &&
+			 grammar.type==Grammar.PARSER &&
+			 grammar.getTokenType(t.getText())== Label.INVALID )
+		{
+			ErrorManager.grammarError(ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE,
+									  grammar,
+									  t.token,
+									  t.getText());
+		}
+		// Don't record literals for lexers, they are things to match not tokens
+		if ( grammar.type==Grammar.LEXER ) {
+			return;
+		}
+		// otherwise add literal to token types if referenced from parser rule
+		// or in the tokens{} section
+		if ( (currentRuleName==null ||
+			  Character.isLowerCase(currentRuleName.charAt(0))) &&
+																grammar.getTokenType(t.getText())==Label.INVALID )
+		{
+			stringLiterals.put(t.getText(), UNASSIGNED_IN_PARSER_RULE);
+		}
+	}
+
+    @Override
+	protected void trackToken(GrammarAST t) {
+		// imported token names might exist, only add if new
+		// Might have ';'=4 in vocab import and SEMI=';'. Avoid
+		// setting to UNASSIGNED if we have loaded ';'/SEMI
+		if ( grammar.getTokenType(t.getText())==Label.INVALID &&
+			 tokens.get(t.getText())==null )
+		{
+			tokens.put(t.getText(), UNASSIGNED);
+		}
+	}
+
+    @Override
+	protected void trackTokenRule(GrammarAST t,
+								  GrammarAST modifier,
+								  GrammarAST block)
+	{
+		// imported token names might exist, only add if new
+		if ( grammar.type==Grammar.LEXER || grammar.type==Grammar.COMBINED ) {
+			if ( !Character.isUpperCase(t.getText().charAt(0)) ) {
+				return;
+			}
+			if ( t.getText().equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ) {
+				// don't add Tokens rule
+				return;
+			}
+
+			// track all lexer rules so we can look for token refs w/o
+			// associated lexer rules.
+			grammar.composite.lexerRules.add(t.getText());
+
+			int existing = grammar.getTokenType(t.getText());
+			if ( existing==Label.INVALID ) {
+				tokens.put(t.getText(), UNASSIGNED);
+			}
+			// look for "<TOKEN> : <literal> ;" pattern
+			// (can have optional action last)
+			if ( block.hasSameTreeStructure(charAlias) ||
+				 block.hasSameTreeStructure(stringAlias) ||
+				 block.hasSameTreeStructure(charAlias2) ||
+				 block.hasSameTreeStructure(stringAlias2) )
+			{
+				tokenRuleDefs.add(t.getText());
+				/*
+			Grammar parent = grammar.composite.getDelegator(grammar);
+			boolean importedByParserOrCombined =
+				parent!=null &&
+				(parent.type==Grammar.LEXER||parent.type==Grammar.PARSER);
+				*/
+				if ( grammar.type==Grammar.COMBINED || grammar.type==Grammar.LEXER ) {
+					// only call this rule an alias if combined or lexer
+					alias(t, (GrammarAST)block.getChild(0).getChild(0));
+				}
+			}
+		}
+		// else error
+	}
+
+    @Override
+	protected void alias(GrammarAST t, GrammarAST s) {
+		String tokenID = t.getText();
+		String literal = s.getText();
+		String prevAliasLiteralID = aliasesReverseIndex.get(literal);
+		if ( prevAliasLiteralID!=null ) { // we've seen this literal before
+			if ( tokenID.equals(prevAliasLiteralID) ) {
+				// duplicate but identical alias; might be tokens {A='a'} and
+				// lexer rule A : 'a' ;  Is ok, just return
+				return;
+			}
+
+			// give error unless both are rules (ok if one is in tokens section)
+			if ( !(tokenRuleDefs.contains(tokenID) && tokenRuleDefs.contains(prevAliasLiteralID)) )
+			{
+				// don't allow alias if A='a' in tokens section and B : 'a'; is rule.
+				// Allow if both are rules.  Will get DFA nondeterminism error later.
+				ErrorManager.grammarError(ErrorManager.MSG_TOKEN_ALIAS_CONFLICT,
+										  grammar,
+										  t.token,
+										  tokenID+"="+literal,
+										  prevAliasLiteralID);
+			}
+			return; // don't do the alias
+		}
+		int existingLiteralType = grammar.getTokenType(literal);
+		if ( existingLiteralType !=Label.INVALID ) {
+			// we've seen this before from a tokenVocab most likely
+			// don't assign a new token type; use existingLiteralType.
+			tokens.put(tokenID, existingLiteralType);
+		}
+		String prevAliasTokenID = aliases.get(tokenID);
+		if ( prevAliasTokenID!=null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_TOKEN_ALIAS_REASSIGNMENT,
+									  grammar,
+									  t.token,
+									  tokenID+"="+literal,
+									  prevAliasTokenID);
+			return; // don't do the alias
+		}
+		aliases.put(tokenID, literal);
+		aliasesReverseIndex.put(literal, tokenID);
+	}
+
+    @Override
+	public void defineTokens(Grammar root) {
+/*
+	System.out.println("stringLiterals="+stringLiterals);
+	System.out.println("tokens="+tokens);
+	System.out.println("aliases="+aliases);
+	System.out.println("aliasesReverseIndex="+aliasesReverseIndex);
+*/
+
+		assignTokenIDTypes(root);
+
+		aliasTokenIDsAndLiterals(root);
+
+		assignStringTypes(root);
+
+/*
+	System.out.println("stringLiterals="+stringLiterals);
+	System.out.println("tokens="+tokens);
+	System.out.println("aliases="+aliases);
+*/
+		defineTokenNamesAndLiteralsInGrammar(root);
+	}
+
+/*
+protected void defineStringLiteralsFromDelegates() {
+	 if ( grammar.getGrammarIsMaster() && grammar.type==Grammar.COMBINED ) {
+		 List<Grammar> delegates = grammar.getDelegates();
+		 System.out.println("delegates in master combined: "+delegates);
+		 for (int i = 0; i < delegates.size(); i++) {
+			 Grammar d = (Grammar) delegates.get(i);
+			 Set<String> literals = d.getStringLiterals();
+			 for (Iterator it = literals.iterator(); it.hasNext();) {
+				 String literal = (String) it.next();
+				 System.out.println("literal "+literal);
+				 int ttype = grammar.getTokenType(literal);
+				 grammar.defineLexerRuleForStringLiteral(literal, ttype);
+			 }
+		 }
+	 }
+}
+*/
+
+    @Override
+	protected void assignStringTypes(Grammar root) {
+		// walk string literals assigning types to unassigned ones
+		for (Map.Entry<String, Integer> entry : stringLiterals.entrySet()) {
+			String lit = entry.getKey();
+			Integer oldTypeI = entry.getValue();
+			int oldType = oldTypeI;
+			if ( oldType<Label.MIN_TOKEN_TYPE ) {
+				Integer typeI = Utils.integer(root.getNewTokenType());
+				stringLiterals.put(lit, typeI);
+				// if string referenced in combined grammar parser rule,
+				// automatically define in the generated lexer
+				root.defineLexerRuleForStringLiteral(lit, typeI);
+			}
+		}
+	}
+
+    @Override
+	protected void aliasTokenIDsAndLiterals(Grammar root) {
+		if ( root.type==Grammar.LEXER ) {
+			return; // strings/chars are never token types in LEXER
+		}
+		// walk aliases if any and assign types to aliased literals if literal
+		// was referenced
+		for (Map.Entry<String, String> entry : aliases.entrySet()) {
+			String tokenID = entry.getKey();
+			String literal = entry.getValue();
+			if ( literal.charAt(0)=='\'' && stringLiterals.get(literal)!=null ) {
+				stringLiterals.put(literal, tokens.get(tokenID));
+				// an alias still means you need a lexer rule for it
+				Integer typeI = tokens.get(tokenID);
+				if ( !tokenRuleDefs.contains(tokenID) ) {
+					root.defineLexerRuleForAliasedStringLiteral(tokenID, literal, typeI);
+				}
+			}
+		}
+	}
+
+    @Override
+	protected void assignTokenIDTypes(Grammar root) {
+		// walk token names, assigning values if unassigned
+		for (Map.Entry<String, Integer> entry : tokens.entrySet()) {
+			String tokenID = entry.getKey();
+			if ( entry.getValue()==UNASSIGNED ) {
+				tokens.put(tokenID, Utils.integer(root.getNewTokenType()));
+			}
+		}
+	}
+
+    @Override
+	protected void defineTokenNamesAndLiteralsInGrammar(Grammar root) {
+		for (Map.Entry<String, Integer> entry : tokens.entrySet()) {
+			int ttype = entry.getValue();
+			root.defineToken(entry.getKey(), ttype);
+		}
+		for (Map.Entry<String, Integer> entry : stringLiterals.entrySet()) {
+			String lit = entry.getKey();
+			int ttype = entry.getValue();
+			root.defineToken(lit, ttype);
+		}
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/Attribute.java b/tool/src/main/java/org/antlr/tool/Attribute.java
new file mode 100644
index 0000000..f0432fa
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/Attribute.java
@@ -0,0 +1,135 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.tool;
+
+/** Track the names of attributes define in arg lists, return values,
+ *  scope blocks etc...
+ */
+public class Attribute {
+	/** The entire declaration such as "String foo;" */
+	public String decl;
+
+	/** The type; might be empty such as for Python which has no static typing */
+	public String type;
+
+	/** The name of the attribute "foo" */
+	public String name;
+
+	/** The optional attribute intialization expression */
+	public String initValue;
+
+	public Attribute(String decl) {
+		extractAttribute(decl);
+	}
+
+	public Attribute(String name, String decl) {
+		this.name = name;
+		this.decl = decl;
+	}
+
+	/** For decls like "String foo" or "char *foo32[3]" compute the ID
+	 *  and type declarations.  Also handle "int x=3" and 'T t = new T("foo")'
+	 *  but if the separator is ',' you cannot use ',' in the initvalue.
+	 *  AttributeScope.addAttributes takes care of the separation so we are
+	 *  free here to use from '=' to end of string as the expression.
+	 *
+	 *  Set name, type, initvalue, and full decl instance vars.
+	 */
+	protected void extractAttribute(String decl) {
+		if ( decl==null ) {
+			return;
+		}
+		boolean inID = false;
+		int start = -1;
+		int rightEdgeOfDeclarator = decl.length()-1;
+		int equalsIndex = decl.indexOf('=');
+		if ( equalsIndex>0 ) {
+			// everything after the '=' is the init value
+			this.initValue = decl.substring(equalsIndex+1,decl.length());
+			rightEdgeOfDeclarator = equalsIndex-1;
+		}
+		// walk backwards looking for start of an ID
+		for (int i=rightEdgeOfDeclarator; i>=0; i--) {
+			// if we haven't found the end yet, keep going
+			if ( !inID && Character.isLetterOrDigit(decl.charAt(i)) ) {
+			    inID = true;
+			}
+			else if ( inID &&
+				      !(Character.isLetterOrDigit(decl.charAt(i))||
+				       decl.charAt(i)=='_') ) {
+				start = i+1;
+				break;
+			}
+		}
+		if ( start<0 && inID ) {
+			start = 0;
+		}
+		if ( start<0 ) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_FIND_ATTRIBUTE_NAME_IN_DECL,decl);
+		}
+		// walk forwards looking for end of an ID
+		int stop=-1;
+		for (int i=start; i<=rightEdgeOfDeclarator; i++) {
+			// if we haven't found the end yet, keep going
+			if ( !(Character.isLetterOrDigit(decl.charAt(i))||
+				decl.charAt(i)=='_') )
+			{
+				stop = i;
+				break;
+			}
+			if ( i==rightEdgeOfDeclarator ) {
+				stop = i+1;
+			}
+		}
+
+		// the name is the last ID
+		this.name = decl.substring(start,stop);
+
+		// the type is the decl minus the ID (could be empty)
+		this.type = decl.substring(0,start);
+		if ( stop<=rightEdgeOfDeclarator ) {
+			this.type += decl.substring(stop,rightEdgeOfDeclarator+1);
+		}
+		this.type = type.trim();
+		if ( this.type.length()==0 ) {
+			this.type = null;
+		}
+
+		this.decl = decl;
+	}
+
+	@Override
+	public String toString() {
+		if ( initValue!=null ) {
+			return type+" "+name+"="+initValue;
+		}
+		return type+" "+name;
+	}
+}
+
diff --git a/tool/src/main/java/org/antlr/tool/AttributeScope.java b/tool/src/main/java/org/antlr/tool/AttributeScope.java
new file mode 100644
index 0000000..ba7c69b
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/AttributeScope.java
@@ -0,0 +1,198 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.tool;
+
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.runtime.Token;
+
+import java.util.*;
+
+/** Track the attributes within a scope.  A named scoped has just its list
+ *  of attributes.  Each rule has potentially 3 scopes: return values,
+ *  parameters, and an implicitly-named scope (i.e., a scope defined in a rule).
+ *  Implicitly-defined scopes are named after the rule; rules and scopes then
+ *  must live in the same name space--no collisions allowed.
+ */
+public class AttributeScope {
+
+	/** All token scopes (token labels) share the same fixed scope of
+	 *  of predefined attributes.  I keep this out of the runtime.Token
+	 *  object to avoid a runtime space burden.
+	 */
+	public static final AttributeScope tokenScope = new AttributeScope("Token",null);
+
+	static {
+		tokenScope.addAttribute("text", null);
+		tokenScope.addAttribute("type", null);
+		tokenScope.addAttribute("line", null);
+		tokenScope.addAttribute("index", null);
+		tokenScope.addAttribute("pos", null);
+		tokenScope.addAttribute("channel", null);
+		tokenScope.addAttribute("tree", null);
+		tokenScope.addAttribute("int", null);
+	}
+
+	/** This scope is associated with which input token (for error handling)? */
+	public Token derivedFromToken;
+
+	public Grammar grammar;
+
+	/** The scope name */
+	private String name;
+
+	/** Not a rule scope, but visible to all rules "scope symbols { ...}" */
+	public boolean isDynamicGlobalScope;
+
+	/** Visible to all rules, but defined in rule "scope { int i; }" */
+	public boolean isDynamicRuleScope;
+
+	public boolean isParameterScope;
+
+	public boolean isReturnScope;
+
+	public boolean isPredefinedRuleScope;
+
+	public boolean isPredefinedLexerRuleScope;
+
+	/** The list of Attribute objects */
+	protected LinkedHashMap<String,Attribute> attributes = new LinkedHashMap<String, Attribute>();
+
+	/* Placeholder for compatibility with the CSharp3 target. */
+	public LinkedHashMap<String, GrammarAST> actions = new LinkedHashMap<String, GrammarAST>();
+
+	public AttributeScope(String name, Token derivedFromToken) {
+		this(null,name,derivedFromToken);
+	}
+
+	public AttributeScope(Grammar grammar, String name, Token derivedFromToken) {
+		this.grammar = grammar;
+		this.name = name;
+		this.derivedFromToken = derivedFromToken;
+	}
+
+	public String getName() {
+		if ( isParameterScope ) {
+			return name+"_parameter";
+		}
+		else if ( isReturnScope ) {
+			return name+"_return";
+		}
+		return name;
+	}
+
+	/** From a chunk of text holding the definitions of the attributes,
+	 *  pull them apart and create an Attribute for each one.  Add to
+	 *  the list of attributes for this scope.  Pass in the character
+	 *  that terminates a definition such as ',' or ';'.  For example,
+	 *
+	 *  scope symbols {
+	 *  	int n;
+	 *  	List names;
+	 *  }
+	 *
+	 *  would pass in definitions equal to the text in between {...} and
+	 *  separator=';'.  It results in two Attribute objects.
+	 */
+	public void addAttributes(String definitions, int separator) {
+		List<String> attrs = new ArrayList<String>();
+		CodeGenerator.getListOfArgumentsFromAction(definitions,0,-1,separator,attrs);
+		for (String a : attrs) {
+			Attribute attr = new Attribute(a);
+			if ( !isReturnScope && attr.initValue!=null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_ARG_INIT_VALUES_ILLEGAL,
+										  grammar,
+										  derivedFromToken,
+										  attr.name);
+				attr.initValue=null; // wipe it out
+			}
+			attributes.put(attr.name, attr);
+		}
+	}
+
+	public void addAttribute(String name, String decl) {
+		attributes.put(name, new Attribute(name,decl));
+	}
+
+	/** Given @scope::name {action} define it for this attribute scope. Later,
+	 *  the code generator will ask for the actions table.
+	 */
+	public final void defineNamedAction(GrammarAST nameAST, GrammarAST actionAST)
+	{
+		String actionName = nameAST.getText();
+		GrammarAST a = actions.get(actionName);
+		if (a != null) {
+			ErrorManager.grammarError(ErrorManager.MSG_ACTION_REDEFINITION,
+									  grammar,
+									  nameAST.getToken(),
+									  nameAST.getText());
+		} else {
+			actions.put(actionName, actionAST);
+		}
+	}
+
+	public Attribute getAttribute(String name) {
+		return attributes.get(name);
+	}
+
+	/** Used by templates to get all attributes */
+	public List<Attribute> getAttributes() {
+		List<Attribute> a = new ArrayList<Attribute>();
+		a.addAll(attributes.values());
+		return a;
+	}
+
+	/** Return the set of keys that collide from
+	 *  this and other.
+	 */
+	public Set<String> intersection(AttributeScope other) {
+		if ( other==null || other.size()==0 || size()==0 ) {
+			return null;
+		}
+		Set<String> inter = new HashSet<String>();
+		Set<String> thisKeys = attributes.keySet();
+		for (String key : thisKeys) {
+			if ( other.attributes.get(key)!=null ) {
+				inter.add(key);
+			}
+		}
+		if ( inter.isEmpty() ) {
+			return null;
+		}
+		return inter;
+	}
+
+	public int size() {
+		return attributes==null?0:attributes.size();
+	}
+
+	@Override
+	public String toString() {
+		return (isDynamicGlobalScope?"global ":"")+getName()+":"+attributes;
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/BuildDependencyGenerator.java b/tool/src/main/java/org/antlr/tool/BuildDependencyGenerator.java
new file mode 100644
index 0000000..77dee7e
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/BuildDependencyGenerator.java
@@ -0,0 +1,236 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.misc.Utils;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.STGroup;
+
+import java.io.*;
+import java.util.ArrayList;
+import java.util.List;
+
+/** Given a grammar file, show the dependencies on .tokens etc...
+ *  Using ST, emit a simple "make compatible" list of dependencies.
+ *  For example, combined grammar T.g (no token import) generates:
+ *
+ *		TParser.java : T.g
+ * 		T.tokens : T.g
+ * 		T__g : T.g
+ *
+ *  For tree grammar TP with import of T.tokens:
+ *
+ * 		TP.g : T.tokens
+ * 		TP.java : TP.g
+ *
+ *  If "-lib libdir" is used on command-line with -depend, then include the
+ *  path like
+ *
+ * 		TP.g : libdir/T.tokens
+ *
+ *  Pay attention to -o as well:
+ *
+ * 		outputdir/TParser.java : T.g
+ *
+ *  So this output shows what the grammar depends on *and* what it generates.
+ *
+ *  Operate on one grammar file at a time.  If given a list of .g on the
+ *  command-line with -depend, just emit the dependencies.  The grammars
+ *  may depend on each other, but the order doesn't matter.  Build tools,
+ *  reading in this output, will know how to organize it.
+ *
+ *  This is a wee bit slow probably because the code generator has to load
+ *  all of its template files in order to figure out the file extension
+ *  for the generated recognizer.
+ *
+ *  This code was obvious until I removed redundant "./" on front of files
+ *  and had to escape spaces in filenames :(
+ */
+public class BuildDependencyGenerator {
+    protected String grammarFileName;
+    protected String tokenVocab;
+    protected Tool tool;
+    protected Grammar grammar;
+    protected CodeGenerator generator;
+    protected STGroup templates;
+
+    public BuildDependencyGenerator(Tool tool, String grammarFileName)
+            throws IOException {
+        this.tool = tool;
+        this.grammarFileName = grammarFileName;
+        grammar = tool.getRootGrammar(grammarFileName);
+        String language = (String) grammar.getOption("language");
+        generator = new CodeGenerator(tool, grammar, language);
+        generator.loadTemplates(language);
+    }
+
+    /** From T.g return a list of File objects that
+     *  name files ANTLR will emit from T.g.
+     */
+    public List<File> getGeneratedFileList() {
+        List<File> files = new ArrayList<File>();
+        File outputDir = tool.getOutputDirectory(grammarFileName);
+        if (outputDir.getName().equals(".")) {
+            outputDir = outputDir.getParentFile();
+        } else if (outputDir.getName().indexOf(' ') >= 0) { // has spaces?
+            String escSpaces = Utils.replace(outputDir.toString(),
+                    " ",
+                    "\\ ");
+            outputDir = new File(escSpaces);
+        }
+        // add generated recognizer; e.g., TParser.java
+        String recognizer =
+                generator.getRecognizerFileName(grammar.name, grammar.type);
+        files.add(new File(outputDir, recognizer));
+        // add output vocab file; e.g., T.tokens. This is always generated to
+        // the base output directory, which will be just . if there is no -o option
+        //
+        files.add(new File(tool.getOutputDirectory(), generator.getVocabFileName()));
+        // are we generating a .h file?
+        ST headerExtST = null;
+        ST extST = generator.getTemplates().getInstanceOf("codeFileExtension");
+        if (generator.getTemplates().isDefined("headerFile")) {
+            headerExtST = generator.getTemplates().getInstanceOf("headerFileExtension");
+            String suffix = Grammar.grammarTypeToFileNameSuffix[grammar.type];
+            String fileName = grammar.name + suffix + headerExtST.render();
+            files.add(new File(outputDir, fileName));
+        }
+        if (grammar.type == Grammar.COMBINED) {
+            // add autogenerated lexer; e.g., TLexer.java TLexer.h TLexer.tokens
+            // don't add T__.g (just a temp file)
+            
+            String suffix = Grammar.grammarTypeToFileNameSuffix[Grammar.LEXER];
+            String lexer = grammar.name + suffix + extST.render();
+            files.add(new File(outputDir, lexer));
+
+            // TLexer.h
+            if (headerExtST != null) {
+                String header = grammar.name + suffix + headerExtST.render();
+                files.add(new File(outputDir, header));
+            }
+        // for combined, don't generate TLexer.tokens
+        }
+
+        // handle generated files for imported grammars
+        List<Grammar> imports =
+                grammar.composite.getDelegates(grammar.composite.getRootGrammar());
+        for (Grammar g : imports) {
+            outputDir = tool.getOutputDirectory(g.getFileName());
+            String fname = groomQualifiedFileName(outputDir.toString(), g.getRecognizerName() + extST.render());
+            files.add(new File(fname));
+        }
+
+        if (files.isEmpty()) {
+            return null;
+        }
+        return files;
+    }
+
+    /**
+     * Return a list of File objects that name files ANTLR will read
+     * to process T.g; This can be .tokens files if the grammar uses the tokenVocab option
+     * as well as any imported grammar files.
+     */
+    public List<File> getDependenciesFileList() {
+        // Find all the things other than imported grammars
+        List<File> files = getNonImportDependenciesFileList();
+
+        // Handle imported grammars
+        List<Grammar> imports =
+                grammar.composite.getDelegates(grammar.composite.getRootGrammar());
+        for (Grammar g : imports) {
+            String libdir = tool.getLibraryDirectory();
+            String fileName = groomQualifiedFileName(libdir, g.fileName);
+            files.add(new File(fileName));
+        }
+
+        if (files.isEmpty()) {
+            return null;
+        }
+        return files;
+    }
+
+    /**
+     * Return a list of File objects that name files ANTLR will read
+     * to process T.g; This can only be .tokens files and only
+     * if they use the tokenVocab option.
+     *
+     * @return List of dependencies other than imported grammars
+     */
+    public List<File> getNonImportDependenciesFileList() {
+        List<File> files = new ArrayList<File>();
+
+        // handle token vocabulary loads
+        tokenVocab = (String) grammar.getOption("tokenVocab");
+        if (tokenVocab != null) {
+
+            File vocabFile = tool.getImportedVocabFile(tokenVocab);
+            files.add(vocabFile);
+        }
+
+        return files;
+    }
+
+    public ST getDependencies() {
+        loadDependencyTemplates();
+        ST dependenciesST = templates.getInstanceOf("dependencies");
+        dependenciesST.add("in", getDependenciesFileList());
+        dependenciesST.add("out", getGeneratedFileList());
+        dependenciesST.add("grammarFileName", grammar.fileName);
+        return dependenciesST;
+    }
+
+    public void loadDependencyTemplates() {
+        if (templates != null) return;
+        String fileName = "org/antlr/tool/templates/depend.stg";
+        templates = new ToolSTGroupFile(fileName);
+    }
+
+    public String getTokenVocab() {
+        return tokenVocab;
+    }
+
+    public CodeGenerator getGenerator() {
+        return generator;
+    }    
+
+    public String groomQualifiedFileName(String outputDir, String fileName) {
+        if (outputDir.equals(".")) {
+            return fileName;
+        } else if (outputDir.indexOf(' ') >= 0) { // has spaces?
+            String escSpaces = Utils.replace(outputDir.toString(),
+                    " ",
+                    "\\ ");
+            return escSpaces + File.separator + fileName;
+        } else {
+            return outputDir + File.separator + fileName;
+        }
+    }
+}
diff --git a/tool/src/main/java/org/antlr/tool/CompositeGrammar.java b/tool/src/main/java/org/antlr/tool/CompositeGrammar.java
new file mode 100644
index 0000000..9f1863e
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/CompositeGrammar.java
@@ -0,0 +1,542 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */package org.antlr.tool;
+
+import org.antlr.analysis.Label;
+import org.antlr.analysis.NFAState;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.grammar.v3.AssignTokenTypesWalker;
+import org.antlr.misc.Utils;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.tree.CommonTreeNodeStream;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Vector;
+
+/** A tree of component (delegate) grammars.
+ *
+ *  Rules defined in delegates are "inherited" like multi-inheritance
+ *  so you can override them.  All token types must be consistent across
+ *  rules from all delegate grammars, so they must be stored here in one
+ *  central place.
+ *
+ *  We have to start out assuming a composite grammar situation as we can't
+ *  look into the grammar files a priori to see if there is a delegate
+ *  statement.  Because of this, and to avoid duplicating token type tracking
+ *  in each grammar, even single noncomposite grammars use one of these objects
+ *  to track token types.
+ */
+public class CompositeGrammar {
+	public static final int MIN_RULE_INDEX = 1;
+
+	public CompositeGrammarTree delegateGrammarTreeRoot;
+
+	/** Used during getRuleReferenceClosure to detect computation cycles */
+	protected Set<NFAState> refClosureBusy = new HashSet<NFAState>();
+
+	/** Used to assign state numbers; all grammars in composite share common
+	 *  NFA space.  This NFA tracks state numbers number to state mapping.
+	 */
+	public int stateCounter = 0;
+
+	/** The NFA states in the NFA built from rules across grammars in composite.
+	 *  Maps state number to NFAState object.
+	 *  This is a Vector instead of a List because I need to be able to grow
+	 *  this properly.  After talking to Josh Bloch, Collections guy at Sun,
+	 *  I decided this was easiest solution.
+	 */
+	protected Vector<NFAState> numberToStateList = new Vector<NFAState>(1000);
+
+	/** Token names and literal tokens like "void" are uniquely indexed.
+	 *  with -1 implying EOF.  Characters are different; they go from
+	 *  -1 (EOF) to \uFFFE.  For example, 0 could be a binary byte you
+	 *  want to lexer.  Labels of DFA/NFA transitions can be both tokens
+	 *  and characters.  I use negative numbers for bookkeeping labels
+	 *  like EPSILON. Char/String literals and token types overlap in the same
+	 *  space, however.
+	 */
+	protected int maxTokenType = Label.MIN_TOKEN_TYPE-1;
+
+	/** Map token like ID (but not literals like "while") to its token type */
+	public Map<String, Integer> tokenIDToTypeMap = new LinkedHashMap<String, Integer>();
+
+	/** Map token literals like "while" to its token type.  It may be that
+	 *  WHILE="while"=35, in which case both tokenIDToTypeMap and this
+	 *  field will have entries both mapped to 35.
+	 */
+	public Map<String, Integer> stringLiteralToTypeMap = new LinkedHashMap<String, Integer>();
+	/** Reverse index for stringLiteralToTypeMap */
+	public Vector<String> typeToStringLiteralList = new Vector<String>();
+
+	/** Map a token type to its token name.
+	 *  Must subtract MIN_TOKEN_TYPE from index.
+	 */
+	public Vector<String> typeToTokenList = new Vector<String>();
+
+	/** If combined or lexer grammar, track the rules.
+	 * 	Track lexer rules so we can warn about undefined tokens.
+	 *  This is combined set of lexer rules from all lexer grammars
+	 *  seen in all imports.
+	 */
+	protected Set<String> lexerRules = new HashSet<String>();
+
+	/** Rules are uniquely labeled from 1..n among all grammars */
+	protected int ruleIndex = MIN_RULE_INDEX;
+
+	/** Map a rule index to its name; use a Vector on purpose as new
+	 *  collections stuff won't let me setSize and make it grow.  :(
+	 *  I need a specific guaranteed index, which the Collections stuff
+	 *  won't let me have.
+	 */
+	protected Vector<Rule> ruleIndexToRuleList = new Vector<Rule>();
+
+	public boolean watchNFAConversion = false;
+
+	protected void initTokenSymbolTables() {
+		// the faux token types take first NUM_FAUX_LABELS positions
+		// then we must have room for the predefined runtime token types
+		// like DOWN/UP used for tree parsing.
+		typeToTokenList.setSize(Label.NUM_FAUX_LABELS+Label.MIN_TOKEN_TYPE-1);
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.INVALID, "<INVALID>");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOT, "<EOT>");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SEMPRED, "<SEMPRED>");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.SET, "<SET>");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EPSILON, Label.EPSILON_STR);
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOF, "EOF");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.EOR_TOKEN_TYPE-1, "<EOR>");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.DOWN-1, "DOWN");
+		typeToTokenList.set(Label.NUM_FAUX_LABELS+Label.UP-1, "UP");
+		tokenIDToTypeMap.put("<INVALID>", Utils.integer(Label.INVALID));
+		tokenIDToTypeMap.put("<EOT>", Utils.integer(Label.EOT));
+		tokenIDToTypeMap.put("<SEMPRED>", Utils.integer(Label.SEMPRED));
+		tokenIDToTypeMap.put("<SET>", Utils.integer(Label.SET));
+		tokenIDToTypeMap.put("<EPSILON>", Utils.integer(Label.EPSILON));
+		tokenIDToTypeMap.put("EOF", Utils.integer(Label.EOF));
+		tokenIDToTypeMap.put("<EOR>", Utils.integer(Label.EOR_TOKEN_TYPE));
+		tokenIDToTypeMap.put("DOWN", Utils.integer(Label.DOWN));
+		tokenIDToTypeMap.put("UP", Utils.integer(Label.UP));
+	}
+
+	@SuppressWarnings("OverridableMethodCallInConstructor")
+	public CompositeGrammar() {
+		initTokenSymbolTables();
+	}
+
+	@SuppressWarnings("OverridableMethodCallInConstructor")
+	public CompositeGrammar(Grammar g) {
+		this();
+		setDelegationRoot(g);
+	}
+
+	public void setDelegationRoot(Grammar root) {
+		delegateGrammarTreeRoot = new CompositeGrammarTree(root);
+		root.compositeTreeNode = delegateGrammarTreeRoot;
+	}
+
+	public Rule getRule(String ruleName) {
+		return delegateGrammarTreeRoot.getRule(ruleName);
+	}
+
+	public Object getOption(String key) {
+		return delegateGrammarTreeRoot.getOption(key);
+	}
+
+	/** Add delegate grammar as child of delegator */
+	public void addGrammar(Grammar delegator, Grammar delegate) {
+		if ( delegator.compositeTreeNode==null ) {
+			delegator.compositeTreeNode = new CompositeGrammarTree(delegator);
+		}
+		delegator.compositeTreeNode.addChild(new CompositeGrammarTree(delegate));
+
+		/*// find delegator in tree so we can add a child to it
+		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(delegator);
+		t.addChild();
+		*/
+		// make sure new grammar shares this composite
+		delegate.composite = this;
+	}
+
+	/** Get parent of this grammar */
+	public Grammar getDelegator(Grammar g) {
+		CompositeGrammarTree me = delegateGrammarTreeRoot.findNode(g);
+		if ( me==null ) {
+			return null; // not found
+		}
+		if ( me.parent!=null ) {
+			return me.parent.grammar;
+		}
+		return null;
+	}
+
+	/** Get list of all delegates from all grammars in the delegate subtree of g.
+	 *  The grammars are in delegation tree preorder.  Don't include g itself
+	 *  in list as it is not a delegate of itself.
+	 */
+	public List<Grammar> getDelegates(Grammar g) {
+		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(g);
+		if ( t==null ) {
+			return null; // no delegates
+		}
+		List<Grammar> grammars = t.getPostOrderedGrammarList();
+		grammars.remove(grammars.size()-1); // remove g (last one)
+		return grammars;
+	}
+
+	public List<Grammar> getDirectDelegates(Grammar g) {
+		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(g);
+		List<CompositeGrammarTree> children = t.children;
+		if ( children==null ) {
+			return null;
+		}
+		List<Grammar> grammars = new ArrayList<Grammar>();
+		for (int i = 0; children!=null && i < children.size(); i++) {
+			CompositeGrammarTree child = children.get(i);
+			grammars.add(child.grammar);
+		}
+		return grammars;
+	}
+
+	/** Get delegates below direct delegates of g */
+	public List<Grammar> getIndirectDelegates(Grammar g) {
+		List<Grammar> direct = getDirectDelegates(g);
+		List<Grammar> delegates = getDelegates(g);
+		if ( direct!=null ) {
+			delegates.removeAll(direct);
+		}
+		return delegates;
+	}
+
+	/** Return list of delegate grammars from root down to g.
+	 *  Order is root, ..., g.parent.  (g not included).
+	 */
+	public List<Grammar> getDelegators(Grammar g) {
+		if ( g==delegateGrammarTreeRoot.grammar ) {
+			return null;
+		}
+		List<Grammar> grammars = new ArrayList<Grammar>();
+		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(g);
+		// walk backwards to root, collecting grammars
+		CompositeGrammarTree p = t.parent;
+		while ( p!=null ) {
+			grammars.add(0, p.grammar); // add to head so in order later
+			p = p.parent;
+		}
+		return grammars;
+	}
+
+	/** Get set of rules for grammar g that need to have manual delegation
+	 *  methods.  This is the list of rules collected from all direct/indirect
+	 *  delegates minus rules overridden in grammar g.
+	 *
+	 *  This returns null except for the delegate root because it is the only
+	 *  one that has to have a complete grammar rule interface.  The delegates
+	 *  should not be instantiated directly for use as parsers (you can create
+	 *  them to pass to the root parser's ctor as arguments).
+	 */
+	public Set<? extends Rule> getDelegatedRules(Grammar g) {
+		if ( g!=delegateGrammarTreeRoot.grammar ) {
+			return null;
+		}
+		Set<? extends Rule> rules = getAllImportedRules(g);
+		for (Iterator<? extends Rule> it = rules.iterator(); it.hasNext();) {
+			Rule r = it.next();
+			Rule localRule = g.getLocallyDefinedRule(r.name);
+			// if locally defined or it's not local but synpred, don't make
+			// a delegation method
+			if ( localRule!=null || r.isSynPred ) {
+				it.remove(); // kill overridden rules
+			}
+		}
+		return rules;
+	}
+
+	/** Get all rule definitions from all direct/indirect delegate grammars
+	 *  of g.
+	 */
+	public Set<? extends Rule> getAllImportedRules(Grammar g) {
+		Set<String> ruleNames = new HashSet<String>();
+		Set<Rule> rules = new HashSet<Rule>();
+		CompositeGrammarTree subtreeRoot = delegateGrammarTreeRoot.findNode(g);
+
+		List<Grammar> grammars = subtreeRoot.getPreOrderedGrammarList();
+		// walk all grammars preorder, priority given to grammar listed first.
+		for (int i = 0; i < grammars.size(); i++) {
+			Grammar delegate = grammars.get(i);
+			// for each rule in delegate, add to rules if no rule with that
+			// name as been seen.  (can't use removeAll; wrong hashcode/equals on Rule)
+			for (Rule r : delegate.getRules()) {
+				if ( !ruleNames.contains(r.name) ) {
+					ruleNames.add(r.name); // track that we've seen this
+					rules.add(r);
+				}
+			}
+		}
+		return rules;
+	}
+
+	public Grammar getRootGrammar() {
+		if ( delegateGrammarTreeRoot==null ) {
+			return null;
+		}
+		return delegateGrammarTreeRoot.grammar;
+	}
+
+	public Grammar getGrammar(String grammarName) {
+		CompositeGrammarTree t = delegateGrammarTreeRoot.findNode(grammarName);
+		if ( t!=null ) {
+			return t.grammar;
+		}
+		return null;
+	}
+
+	// NFA spans multiple grammars, must handle here
+
+	public int getNewNFAStateNumber() {
+		return stateCounter++;
+	}
+
+	public void addState(NFAState state) {
+		numberToStateList.setSize(state.stateNumber+1); // make sure we have room
+		numberToStateList.set(state.stateNumber, state);
+	}
+
+	public NFAState getState(int s) {
+		return numberToStateList.get(s);
+	}
+
+	public void assignTokenTypes() throws RecognitionException {
+		// ASSIGN TOKEN TYPES for all delegates (same walker)
+		//System.out.println("### assign types");
+		AssignTokenTypesWalker ttypesWalker = new AssignTokenTypesBehavior();
+		List<Grammar> grammars = delegateGrammarTreeRoot.getPostOrderedGrammarList();
+		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
+			Grammar g = grammars.get(i);
+			ttypesWalker.setTreeNodeStream(new CommonTreeNodeStream(g.getGrammarTree()));
+			try {
+				//System.out.println("    walking "+g.name);
+				ttypesWalker.grammar_(g);
+			}
+			catch (RecognitionException re) {
+				ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+								   re);
+			}
+		}
+		// the walker has filled literals, tokens, and alias tables.
+		// now tell it to define them in the root grammar
+		ttypesWalker.defineTokens(delegateGrammarTreeRoot.grammar);
+	}
+
+	public void translateLeftRecursiveRules() {
+		List<Grammar> grammars = delegateGrammarTreeRoot.getPostOrderedGrammarList();
+		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
+			Grammar g = grammars.get(i);
+			if ( !(g.type==Grammar.PARSER || g.type==Grammar.COMBINED) ) continue;
+			for (GrammarAST r : g.grammarTree.findAllType(ANTLRParser.RULE)) {
+				if ( !Character.isUpperCase(r.getChild(0).getText().charAt(0)) ) {
+					if ( LeftRecursiveRuleAnalyzer.hasImmediateRecursiveRuleRefs(r, r.enclosingRuleName) ) {
+						g.translateLeftRecursiveRule(r);
+					}
+				}
+			}
+		}
+	}
+
+	public void defineGrammarSymbols() {
+		delegateGrammarTreeRoot.trimLexerImportsIntoCombined();
+		List<Grammar> grammars = delegateGrammarTreeRoot.getPostOrderedGrammarList();
+		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
+			Grammar g = grammars.get(i);
+			g.defineGrammarSymbols();
+		}
+		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
+			Grammar g = grammars.get(i);
+			g.checkNameSpaceAndActions();
+		}
+		minimizeRuleSet();
+	}
+
+	public void createNFAs() {
+		if ( ErrorManager.doNotAttemptAnalysis() ) {
+			return;
+		}
+		List<Grammar> grammars = delegateGrammarTreeRoot.getPostOrderedGrammarList();
+		//System.out.println("### createNFAs for composite; grammars: "+names);
+		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
+			Grammar g = grammars.get(i);
+			g.createRuleStartAndStopNFAStates();
+		}
+		for (int i = 0; grammars!=null && i < grammars.size(); i++) {
+			Grammar g = grammars.get(i);
+			g.buildNFA();
+		}
+	}
+
+	public void minimizeRuleSet() {
+		Set<String> ruleDefs = new HashSet<String>();
+		_minimizeRuleSet(ruleDefs, delegateGrammarTreeRoot);
+	}
+
+	public void _minimizeRuleSet(Set<String> ruleDefs,
+								 CompositeGrammarTree p) {
+		Set<String> localRuleDefs = new HashSet<String>();
+		Set<String> overrides = new HashSet<String>();
+		// compute set of non-overridden rules for this delegate
+		for (Rule r : p.grammar.getRules()) {
+			if ( !ruleDefs.contains(r.name) ) {
+				localRuleDefs.add(r.name);
+			}
+			else if ( !r.name.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ) {
+				// record any overridden rule 'cept tokens rule
+				overrides.add(r.name);
+			}
+		}
+		//System.out.println("rule defs for "+p.grammar.name+": "+localRuleDefs);
+		//System.out.println("overridden rule for "+p.grammar.name+": "+overrides);
+		p.grammar.overriddenRules = overrides;
+
+		// make set of all rules defined thus far walking delegation tree.
+		// the same rule in two delegates resolves in favor of first found
+		// in tree therefore second must not be included
+		ruleDefs.addAll(localRuleDefs);
+
+		// pass larger set of defined rules to delegates
+		if ( p.children!=null ) {
+			for (CompositeGrammarTree delegate : p.children) {
+				_minimizeRuleSet(ruleDefs, delegate);
+			}
+		}
+	}
+
+	/*
+	public void minimizeRuleSet() {
+		Set<Rule> refs = _minimizeRuleSet(delegateGrammarTreeRoot);
+		System.out.println("all rule refs: "+refs);
+	}
+
+	public Set<Rule> _minimizeRuleSet(CompositeGrammarTree p) {
+		Set<Rule> refs = new HashSet<Rule>();
+		for (GrammarAST refAST : p.grammar.ruleRefs) {
+			System.out.println("ref "+refAST.getText()+": "+refAST.NFAStartState+
+							   " enclosing rule: "+refAST.NFAStartState.enclosingRule+
+							   " invoking rule: "+((NFAState)refAST.NFAStartState.transition[0].target).enclosingRule);
+			refs.add(((NFAState)refAST.NFAStartState.transition[0].target).enclosingRule);
+		}
+
+		if ( p.children!=null ) {
+			for (CompositeGrammarTree delegate : p.children) {
+				Set<Rule> delegateRuleRefs = _minimizeRuleSet(delegate);
+				refs.addAll(delegateRuleRefs);
+			}
+		}
+
+		return refs;
+	}
+	*/
+
+	/*
+	public void oldminimizeRuleSet() {
+		// first walk to remove all overridden rules
+		Set<String> ruleDefs = new HashSet<String>();
+		Set<String> ruleRefs = new HashSet<String>();
+		for (GrammarAST refAST : delegateGrammarTreeRoot.grammar.ruleRefs) {
+			String rname = refAST.getText();
+			ruleRefs.add(rname);
+		}
+		_minimizeRuleSet(ruleDefs,
+						 ruleRefs,
+						 delegateGrammarTreeRoot);
+		System.out.println("overall rule defs: "+ruleDefs);
+	}
+
+	public void _minimizeRuleSet(Set<String> ruleDefs,
+								 Set<String> ruleRefs,
+								 CompositeGrammarTree p) {
+		Set<String> localRuleDefs = new HashSet<String>();
+		for (Rule r : p.grammar.getRules()) {
+			if ( !ruleDefs.contains(r.name) ) {
+				localRuleDefs.add(r.name);
+				ruleDefs.add(r.name);
+			}
+		}
+		System.out.println("rule defs for "+p.grammar.name+": "+localRuleDefs);
+
+		// remove locally-defined rules not in ref set
+		// find intersection of local rules and references from delegator
+		// that is set of rules needed by delegator
+		Set<String> localRuleDefsSatisfyingRefsFromBelow = new HashSet<String>();
+		for (String r : ruleRefs) {
+			if ( localRuleDefs.contains(r) ) {
+				localRuleDefsSatisfyingRefsFromBelow.add(r);
+			}
+		}
+
+		// now get list of refs from localRuleDefsSatisfyingRefsFromBelow.
+		// Those rules are also allowed in this delegate
+		for (GrammarAST refAST : p.grammar.ruleRefs) {
+			if ( localRuleDefsSatisfyingRefsFromBelow.contains(refAST.enclosingRuleName) ) {
+				// found rule ref within needed rule
+			}
+		}
+
+		// remove rule refs not in the new rule def set
+
+		// walk all children, adding rules not already defined
+		if ( p.children!=null ) {
+			for (CompositeGrammarTree delegate : p.children) {
+				_minimizeRuleSet(ruleDefs, ruleRefs, delegate);
+			}
+		}
+	}
+	*/
+
+	/*
+	public void trackNFAStatesThatHaveLabeledEdge(Label label,
+												  NFAState stateWithLabeledEdge)
+	{
+		Set<NFAState> states = typeToNFAStatesWithEdgeOfTypeMap.get(label);
+		if ( states==null ) {
+			states = new HashSet<NFAState>();
+			typeToNFAStatesWithEdgeOfTypeMap.put(label, states);
+		}
+		states.add(stateWithLabeledEdge);
+	}
+
+	public Map<Label, Set<NFAState>> getTypeToNFAStatesWithEdgeOfTypeMap() {
+		return typeToNFAStatesWithEdgeOfTypeMap;
+	}
+
+	public Set<NFAState> getStatesWithEdge(Label label) {
+		return typeToNFAStatesWithEdgeOfTypeMap.get(label);
+	}
+*/
+}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/CompositeGrammarTree.java b/tool/src/main/java/org/antlr/tool/CompositeGrammarTree.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/tool/CompositeGrammarTree.java
rename to tool/src/main/java/org/antlr/tool/CompositeGrammarTree.java
diff --git a/tool/src/main/java/org/antlr/tool/DOTGenerator.java b/tool/src/main/java/org/antlr/tool/DOTGenerator.java
new file mode 100644
index 0000000..f0180c4
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/DOTGenerator.java
@@ -0,0 +1,402 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.Tool;
+import org.antlr.analysis.*;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.misc.Utils;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.STGroup;
+import org.stringtemplate.v4.STGroupFile;
+
+import java.util.*;
+
+/** The DOT (part of graphviz) generation aspect. */
+public class DOTGenerator {
+	public static final boolean STRIP_NONREDUCED_STATES = false;
+
+	protected String arrowhead="normal";
+	protected String rankdir="LR";
+
+	/** Library of output templates; use {@code <attrname>} format */
+    public static STGroup stlib = new STGroupFile("org/antlr/tool/templates/dot/dot.stg");
+
+    /** To prevent infinite recursion when walking state machines, record
+     *  which states we've visited.  Make a new set every time you start
+     *  walking in case you reuse this object.
+     */
+    protected Set<Object> markedStates = null;
+
+    protected Grammar grammar;
+
+    /** This aspect is associated with a grammar */
+	public DOTGenerator(Grammar grammar) {
+		this.grammar = grammar;
+	}
+
+    /** Return a String containing a DOT description that, when displayed,
+     *  will show the incoming state machine visually.  All nodes reachable
+     *  from startState will be included.
+     */
+    public String getDOT(State startState) {
+		if ( startState==null ) {
+			return null;
+		}
+		// The output DOT graph for visualization
+		ST dot;
+		markedStates = new HashSet<Object>();
+        if ( startState instanceof DFAState ) {
+            dot = stlib.getInstanceOf("dfa");
+			dot.add("startState",
+					Utils.integer(startState.stateNumber));
+			dot.add("useBox",
+					Tool.internalOption_ShowNFAConfigsInDFA);
+			walkCreatingDFADOT(dot, (DFAState)startState);
+        }
+        else {
+            dot = stlib.getInstanceOf("nfa");
+			dot.add("startState",
+					Utils.integer(startState.stateNumber));
+			walkRuleNFACreatingDOT(dot, startState);
+        }
+		dot.add("rankdir", rankdir);
+        return dot.render();
+    }
+
+    /** Return a String containing a DOT description that, when displayed,
+     *  will show the incoming state machine visually.  All nodes reachable
+     *  from startState will be included.
+    public String getRuleNFADOT(State startState) {
+        // The output DOT graph for visualization
+        ST dot = stlib.getInstanceOf("nfa");
+
+        markedStates = new HashSet();
+        dot.add("startState",
+                Utils.integer(startState.stateNumber));
+        walkRuleNFACreatingDOT(dot, startState);
+        return dot.toString();
+    }
+	 */
+
+    /** Do a depth-first walk of the state machine graph and
+     *  fill a DOT description template.  Keep filling the
+     *  states and edges attributes.
+     */
+    protected void walkCreatingDFADOT(ST dot,
+									  DFAState s)
+    {
+		if ( markedStates.contains(Utils.integer(s.stateNumber)) ) {
+			return; // already visited this node
+        }
+
+		markedStates.add(Utils.integer(s.stateNumber)); // mark this node as completed.
+
+        // first add this node
+        ST st;
+        if ( s.isAcceptState() ) {
+            st = stlib.getInstanceOf("stopstate");
+        }
+        else {
+            st = stlib.getInstanceOf("state");
+        }
+        st.add("name", getStateLabel(s));
+        dot.add("states", st);
+
+        // make a DOT edge for each transition
+		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+			Transition edge = s.transition(i);
+			/*
+			System.out.println("dfa "+s.dfa.decisionNumber+
+				" edge from s"+s.stateNumber+" ["+i+"] of "+s.getNumberOfTransitions());
+			*/
+			if ( STRIP_NONREDUCED_STATES ) {
+				if ( edge.target instanceof DFAState &&
+					((DFAState)edge.target).getAcceptStateReachable()!=DFA.REACHABLE_YES )
+				{
+					continue; // don't generate nodes for terminal states
+				}
+			}
+			st = stlib.getInstanceOf("edge");
+			st.add("label", getEdgeLabel(edge));
+			st.add("src", getStateLabel(s));
+            st.add("target", getStateLabel(edge.target));
+			st.add("arrowhead", arrowhead);
+            dot.add("edges", st);
+            walkCreatingDFADOT(dot, (DFAState)edge.target); // keep walkin'
+        }
+    }
+
+    /** Do a depth-first walk of the state machine graph and
+     *  fill a DOT description template.  Keep filling the
+     *  states and edges attributes.  We know this is an NFA
+     *  for a rule so don't traverse edges to other rules and
+     *  don't go past rule end state.
+     */
+    protected void walkRuleNFACreatingDOT(ST dot,
+                                          State s)
+    {
+        if ( markedStates.contains(s) ) {
+            return; // already visited this node
+        }
+
+        markedStates.add(s); // mark this node as completed.
+
+        // first add this node
+        ST stateST;
+        if ( s.isAcceptState() ) {
+            stateST = stlib.getInstanceOf("stopstate");
+        }
+        else {
+            stateST = stlib.getInstanceOf("state");
+        }
+        stateST.add("name", getStateLabel(s));
+        dot.add("states", stateST);
+
+        if ( s.isAcceptState() )  {
+            return; // don't go past end of rule node to the follow states
+        }
+
+        // special case: if decision point, then line up the alt start states
+        // unless it's an end of block
+		if ( ((NFAState)s).isDecisionState() ) {
+			GrammarAST n = ((NFAState)s).associatedASTNode;
+			if ( n!=null && n.getType()!=ANTLRParser.EOB ) {
+				ST rankST = stlib.getInstanceOf("decision-rank");
+				NFAState alt = (NFAState)s;
+				while ( alt!=null ) {
+					rankST.add("states", getStateLabel(alt));
+					if ( alt.transition[1] !=null ) {
+						alt = (NFAState)alt.transition[1].target;
+					}
+					else {
+						alt=null;
+					}
+				}
+				dot.add("decisionRanks", rankST);
+			}
+		}
+
+        // make a DOT edge for each transition
+		ST edgeST;
+		for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+            Transition edge = s.transition(i);
+            if ( edge instanceof RuleClosureTransition ) {
+                RuleClosureTransition rr = ((RuleClosureTransition)edge);
+                // don't jump to other rules, but display edge to follow node
+                edgeST = stlib.getInstanceOf("edge");
+				if ( rr.rule.grammar != grammar ) {
+					edgeST.add("label", "<" + rr.rule.grammar.name + "." + rr.rule.name + ">");
+				}
+				else {
+					edgeST.add("label", "<" + rr.rule.name + ">");
+				}
+				edgeST.add("src", getStateLabel(s));
+				edgeST.add("target", getStateLabel(rr.followState));
+				edgeST.add("arrowhead", arrowhead);
+                dot.add("edges", edgeST);
+				walkRuleNFACreatingDOT(dot, rr.followState);
+                continue;
+            }
+			if ( edge.isAction() ) {
+				edgeST = stlib.getInstanceOf("action-edge");
+			}
+			else if ( edge.isEpsilon() ) {
+				edgeST = stlib.getInstanceOf("epsilon-edge");
+			}
+			else {
+				edgeST = stlib.getInstanceOf("edge");
+			}
+			edgeST.add("label", getEdgeLabel(edge));
+            edgeST.add("src", getStateLabel(s));
+			edgeST.add("target", getStateLabel(edge.target));
+			edgeST.add("arrowhead", arrowhead);
+            dot.add("edges", edgeST);
+            walkRuleNFACreatingDOT(dot, edge.target); // keep walkin'
+        }
+    }
+
+    /*
+	public void writeDOTFilesForAllRuleNFAs() throws IOException {
+        Collection rules = grammar.getRules();
+        for (Iterator itr = rules.iterator(); itr.hasNext();) {
+			Grammar.Rule r = (Grammar.Rule) itr.next();
+            String ruleName = r.name;
+            writeDOTFile(
+                    ruleName,
+                    getRuleNFADOT(grammar.getRuleStartState(ruleName)));
+        }
+    }
+    */
+
+    /*
+	public void writeDOTFilesForAllDecisionDFAs() throws IOException {
+        // for debugging, create a DOT file for each decision in
+        // a directory named for the grammar.
+        File grammarDir = new File(grammar.name+"_DFAs");
+        grammarDir.mkdirs();
+        List decisionList = grammar.getDecisionNFAStartStateList();
+        if ( decisionList==null ) {
+            return;
+        }
+        int i = 1;
+        Iterator iter = decisionList.iterator();
+        while (iter.hasNext()) {
+            NFAState decisionState = (NFAState)iter.next();
+            DFA dfa = decisionState.getDecisionASTNode().getLookaheadDFA();
+            if ( dfa!=null ) {
+                String dot = getDOT( dfa.startState );
+                writeDOTFile(grammarDir+"/dec-"+i, dot);
+            }
+            i++;
+        }
+    }
+    */
+
+    /** Fix edge strings so they print out in DOT properly;
+	 *  generate any gated predicates on edge too.
+	 */
+    protected String getEdgeLabel(Transition edge) {
+		String label = edge.label.toString(grammar);
+		label = Utils.replace(label,"\\", "\\\\");
+		label = Utils.replace(label,"\"", "\\\"");
+		label = Utils.replace(label,"\n", "\\\\n");
+		label = Utils.replace(label,"\r", "");
+		if ( label.equals(Label.EPSILON_STR) ) {
+            label = "e";
+        }
+		State target = edge.target;
+		if ( !edge.isSemanticPredicate() && target instanceof DFAState ) {
+			// look for gated predicates; don't add gated to simple sempred edges
+			SemanticContext preds =
+				((DFAState)target).getGatedPredicatesInNFAConfigurations();
+			if ( preds!=null ) {
+				String predsStr;
+				predsStr = "&&{"+
+					preds.genExpr(grammar.generator,
+								  grammar.generator.getTemplates(), null).render()
+					+"}?";
+				label += predsStr;
+			}
+		}
+        return label;
+    }
+
+    protected String getStateLabel(State s) {
+        if ( s==null ) {
+            return "null";
+        }
+        String stateLabel = String.valueOf(s.stateNumber);
+		if ( s instanceof DFAState ) {
+            StringBuilder buf = new StringBuilder(250);
+			buf.append('s');
+			buf.append(s.stateNumber);
+			if ( Tool.internalOption_ShowNFAConfigsInDFA ) {
+				if ( s instanceof DFAState ) {
+					if ( ((DFAState)s).abortedDueToRecursionOverflow ) {
+						buf.append("\\n");
+						buf.append("abortedDueToRecursionOverflow");
+					}
+				}
+				Set<Integer> alts = ((DFAState)s).getAltSet();
+				if ( alts!=null ) {
+					buf.append("\\n");
+					// separate alts
+					List<Integer> altList = new ArrayList<Integer>();
+					altList.addAll(alts);
+					Collections.sort(altList);
+					Set<NFAConfiguration> configurations = ((DFAState) s).nfaConfigurations;
+					for (int altIndex = 0; altIndex < altList.size(); altIndex++) {
+						Integer altI = altList.get(altIndex);
+						int alt = altI;
+						if ( altIndex>0 ) {
+							buf.append("\\n");
+						}
+						buf.append("alt");
+						buf.append(alt);
+						buf.append(':');
+						// get a list of configs for just this alt
+						// it will help us print better later
+						List<NFAConfiguration> configsInAlt = new ArrayList<NFAConfiguration>();
+						for (NFAConfiguration c : configurations) {
+							if ( c.alt!=alt ) continue;
+							configsInAlt.add(c);
+						}
+						int n = 0;
+						for (int cIndex = 0; cIndex < configsInAlt.size(); cIndex++) {
+							NFAConfiguration c = configsInAlt.get(cIndex);
+							n++;
+							buf.append(c.toString(false));
+							if ( (cIndex+1)<configsInAlt.size() ) {
+								buf.append(", ");
+							}
+							if ( n%5==0 && (configsInAlt.size()-cIndex)>3 ) {
+								buf.append("\\n");
+							}
+						}
+					}
+				}
+			}
+            stateLabel = buf.toString();
+        }
+		if ( (s instanceof NFAState) && ((NFAState)s).isDecisionState() ) {
+			stateLabel = stateLabel+",d="+
+					((NFAState)s).getDecisionNumber();
+			if ( ((NFAState)s).endOfBlockStateNumber!=State.INVALID_STATE_NUMBER ) {
+				stateLabel += ",eob="+((NFAState)s).endOfBlockStateNumber;
+			}
+		}
+		else if ( (s instanceof NFAState) &&
+			((NFAState)s).endOfBlockStateNumber!=State.INVALID_STATE_NUMBER)
+		{
+			NFAState n = ((NFAState)s);
+			stateLabel = stateLabel+",eob="+n.endOfBlockStateNumber;
+		}
+        else if ( s instanceof DFAState && ((DFAState)s).isAcceptState() ) {
+            stateLabel = stateLabel+
+                    "=>"+((DFAState)s).getUniquelyPredictedAlt();
+        }
+        return '"'+stateLabel+'"';
+    }
+
+	public String getArrowheadType() {
+		return arrowhead;
+	}
+
+	public void setArrowheadType(String arrowhead) {
+		this.arrowhead = arrowhead;
+	}
+
+	public String getRankdir() {
+		return rankdir;
+	}
+
+	public void setRankdir(String rankdir) {
+		this.rankdir = rankdir;
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/ErrorManager.java b/tool/src/main/java/org/antlr/tool/ErrorManager.java
new file mode 100644
index 0000000..82e69e8
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/ErrorManager.java
@@ -0,0 +1,951 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.Tool;
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.NFAState;
+import org.antlr.misc.BitSet;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.STErrorListener;
+import org.stringtemplate.v4.STGroup;
+import org.stringtemplate.v4.STGroupFile;
+import org.stringtemplate.v4.misc.STMessage;
+
+import java.lang.reflect.Field;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.WeakHashMap;
+
+/** Defines all the errors ANTLR can generator for both the tool and for
+ *  issues with a grammar.
+ *
+ *  Here is a list of language names:
+ *
+ *  http://ftp.ics.uci.edu/pub/ietf/http/related/iso639.txt
+ *
+ *  Here is a list of country names:
+ *
+ *  http://www.chemie.fu-berlin.de/diverse/doc/ISO_3166.html
+ *
+ *  I use constants not strings to identify messages as the compiler will
+ *  find any errors/mismatches rather than leaving a mistyped string in
+ *  the code to be found randomly in the future.  Further, Intellij can
+ *  do field name expansion to save me some typing.  I have to map
+ *  int constants to template names, however, which could introduce a mismatch.
+ *  Someone could provide a .stg file that had a template name wrong.  When
+ *  I load the group, then, I must verify that all messages are there.
+ *
+ *  This is essentially the functionality of the resource bundle stuff Java
+ *  has, but I don't want to load a property file--I want to load a template
+ *  group file and this is so simple, why mess with their junk.
+ *
+ *  I use the default Locale as defined by java to compute a group file name
+ *  in the org/antlr/tool/templates/messages dir called en_US.stg and so on.
+ *
+ *  Normally we want to use the default locale, but often a message file will
+ *  not exist for it so we must fall back on the US local.
+ *
+ *  During initialization of this class, all errors go straight to System.err.
+ *  There is no way around this.  If I have not set up the error system, how
+ *  can I do errors properly?  For example, if the string template group file
+ *  full of messages has an error, how could I print to anything but System.err?
+ *
+ *  TODO: how to map locale to a file encoding for the stringtemplate group file?
+ *  ST knows how to pay attention to the default encoding so it
+ *  should probably just work unless a GUI sets the local to some chinese
+ *  variation but System.getProperty("file.encoding") is US.  Hmm...
+ *
+ *  TODO: get antlr.g etc.. parsing errors to come here.
+ */
+public class ErrorManager {
+	// TOOL ERRORS
+	// file errors
+	public static final int MSG_CANNOT_WRITE_FILE = 1;
+	public static final int MSG_CANNOT_CLOSE_FILE = 2;
+	public static final int MSG_CANNOT_FIND_TOKENS_FILE = 3;
+	public static final int MSG_ERROR_READING_TOKENS_FILE = 4;
+	public static final int MSG_DIR_NOT_FOUND = 5;
+	public static final int MSG_OUTPUT_DIR_IS_FILE = 6;
+	public static final int MSG_CANNOT_OPEN_FILE = 7;
+	public static final int MSG_FILE_AND_GRAMMAR_NAME_DIFFER = 8;
+	public static final int MSG_FILENAME_EXTENSION_ERROR = 9;
+
+	public static final int MSG_INTERNAL_ERROR = 10;
+	public static final int MSG_INTERNAL_WARNING = 11;
+	public static final int MSG_ERROR_CREATING_ARTIFICIAL_RULE = 12;
+	public static final int MSG_TOKENS_FILE_SYNTAX_ERROR = 13;
+	public static final int MSG_CANNOT_GEN_DOT_FILE = 14;
+	public static final int MSG_BAD_AST_STRUCTURE = 15;
+	public static final int MSG_BAD_ACTION_AST_STRUCTURE = 16;
+
+	// code gen errors
+	public static final int MSG_MISSING_CODE_GEN_TEMPLATES = 20;
+	public static final int MSG_MISSING_CYCLIC_DFA_CODE_GEN_TEMPLATES = 21;
+	public static final int MSG_CODE_GEN_TEMPLATES_INCOMPLETE = 22;
+	public static final int MSG_CANNOT_CREATE_TARGET_GENERATOR = 23;
+	//public static final int MSG_CANNOT_COMPUTE_SAMPLE_INPUT_SEQ = 24;
+	public static final int MSG_STRING_TEMPLATE_ERROR = 24;
+
+	// GRAMMAR ERRORS
+	public static final int MSG_SYNTAX_ERROR = 100;
+	public static final int MSG_RULE_REDEFINITION = 101;
+	public static final int MSG_LEXER_RULES_NOT_ALLOWED = 102;
+	public static final int MSG_PARSER_RULES_NOT_ALLOWED = 103;
+	public static final int MSG_CANNOT_FIND_ATTRIBUTE_NAME_IN_DECL = 104;
+	public static final int MSG_NO_TOKEN_DEFINITION = 105;
+	public static final int MSG_UNDEFINED_RULE_REF = 106;
+	public static final int MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE = 107;
+	public static final int MSG_CANNOT_ALIAS_TOKENS_IN_LEXER = 108;
+	public static final int MSG_ATTRIBUTE_REF_NOT_IN_RULE = 111;
+	public static final int MSG_INVALID_RULE_SCOPE_ATTRIBUTE_REF = 112;
+	public static final int MSG_UNKNOWN_ATTRIBUTE_IN_SCOPE = 113;
+	public static final int MSG_UNKNOWN_SIMPLE_ATTRIBUTE = 114;
+	public static final int MSG_INVALID_RULE_PARAMETER_REF = 115;
+	public static final int MSG_UNKNOWN_RULE_ATTRIBUTE = 116;
+	public static final int MSG_ISOLATED_RULE_SCOPE = 117;
+	public static final int MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE = 118;
+	public static final int MSG_LABEL_CONFLICTS_WITH_RULE = 119;
+	public static final int MSG_LABEL_CONFLICTS_WITH_TOKEN = 120;
+	public static final int MSG_LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE = 121;
+	public static final int MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL = 122;
+	public static final int MSG_ATTRIBUTE_CONFLICTS_WITH_RULE = 123;
+	public static final int MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL = 124;
+	public static final int MSG_LABEL_TYPE_CONFLICT = 125;
+	public static final int MSG_ARG_RETVAL_CONFLICT = 126;
+	public static final int MSG_NONUNIQUE_REF = 127;
+	public static final int MSG_FORWARD_ELEMENT_REF = 128;
+	public static final int MSG_MISSING_RULE_ARGS = 129;
+	public static final int MSG_RULE_HAS_NO_ARGS = 130;
+	public static final int MSG_ARGS_ON_TOKEN_REF = 131;
+	public static final int MSG_RULE_REF_AMBIG_WITH_RULE_IN_ALT = 132;
+	public static final int MSG_ILLEGAL_OPTION = 133;
+	public static final int MSG_LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT = 134;
+	public static final int MSG_UNDEFINED_TOKEN_REF_IN_REWRITE = 135;
+	public static final int MSG_REWRITE_ELEMENT_NOT_PRESENT_ON_LHS = 136;
+	public static final int MSG_UNDEFINED_LABEL_REF_IN_REWRITE = 137;
+	public static final int MSG_NO_GRAMMAR_START_RULE = 138;
+	public static final int MSG_EMPTY_COMPLEMENT = 139;
+	public static final int MSG_UNKNOWN_DYNAMIC_SCOPE = 140;
+	public static final int MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE = 141;
+	public static final int MSG_ISOLATED_RULE_ATTRIBUTE = 142;
+	public static final int MSG_INVALID_ACTION_SCOPE = 143;
+	public static final int MSG_ACTION_REDEFINITION = 144;
+	public static final int MSG_DOUBLE_QUOTES_ILLEGAL = 145;
+	public static final int MSG_INVALID_TEMPLATE_ACTION = 146;
+	public static final int MSG_MISSING_ATTRIBUTE_NAME = 147;
+	public static final int MSG_ARG_INIT_VALUES_ILLEGAL = 148;
+	public static final int MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION = 149;
+	public static final int MSG_NO_RULES = 150;
+	public static final int MSG_WRITE_TO_READONLY_ATTR = 151;
+	public static final int MSG_MISSING_AST_TYPE_IN_TREE_GRAMMAR = 152;
+	public static final int MSG_REWRITE_FOR_MULTI_ELEMENT_ALT = 153;
+	public static final int MSG_RULE_INVALID_SET = 154;
+	public static final int MSG_HETERO_ILLEGAL_IN_REWRITE_ALT = 155;
+	public static final int MSG_NO_SUCH_GRAMMAR_SCOPE = 156;
+	public static final int MSG_NO_SUCH_RULE_IN_SCOPE = 157;
+	public static final int MSG_TOKEN_ALIAS_CONFLICT = 158;
+	public static final int MSG_TOKEN_ALIAS_REASSIGNMENT = 159;
+	public static final int MSG_TOKEN_VOCAB_IN_DELEGATE = 160;
+	public static final int MSG_INVALID_IMPORT = 161;
+	public static final int MSG_IMPORTED_TOKENS_RULE_EMPTY = 162;
+	public static final int MSG_IMPORT_NAME_CLASH = 163;
+	public static final int MSG_AST_OP_WITH_NON_AST_OUTPUT_OPTION = 164;
+	public static final int MSG_AST_OP_IN_ALT_WITH_REWRITE = 165;
+    public static final int MSG_WILDCARD_AS_ROOT = 166;
+    public static final int MSG_CONFLICTING_OPTION_IN_TREE_FILTER = 167;
+	public static final int MSG_ILLEGAL_OPTION_VALUE = 168;
+	public static final int MSG_ALL_OPS_NEED_SAME_ASSOC = 169;
+	public static final int MSG_RANGE_OP_ILLEGAL = 170;
+
+	// GRAMMAR WARNINGS
+	public static final int MSG_GRAMMAR_NONDETERMINISM = 200; // A predicts alts 1,2
+	public static final int MSG_UNREACHABLE_ALTS = 201;       // nothing predicts alt i
+	public static final int MSG_DANGLING_STATE = 202;         // no edges out of state
+	public static final int MSG_INSUFFICIENT_PREDICATES = 203;
+	public static final int MSG_DUPLICATE_SET_ENTRY = 204;    // (A|A)
+	public static final int MSG_ANALYSIS_ABORTED = 205;
+	public static final int MSG_RECURSION_OVERLOW = 206;
+	public static final int MSG_LEFT_RECURSION = 207;
+	public static final int MSG_UNREACHABLE_TOKENS = 208; // nothing predicts token
+	public static final int MSG_TOKEN_NONDETERMINISM = 209; // alts of Tokens rule
+	public static final int MSG_LEFT_RECURSION_CYCLES = 210;
+	public static final int MSG_NONREGULAR_DECISION = 211;
+
+
+    // Dependency sorting errors
+    //
+    public static final int MSG_CIRCULAR_DEPENDENCY = 212; // t1.g -> t2.g -> t3.g ->t1.g
+
+	public static final int MAX_MESSAGE_NUMBER = 212;
+
+	/** Do not do perform analysis if one of these happens */
+	public static final BitSet ERRORS_FORCING_NO_ANALYSIS = new BitSet() {
+		{
+			add(MSG_RULE_REDEFINITION);
+			add(MSG_UNDEFINED_RULE_REF);
+			add(MSG_LEFT_RECURSION_CYCLES);
+			add(MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION);
+			add(MSG_NO_RULES);
+			add(MSG_NO_SUCH_GRAMMAR_SCOPE);
+			add(MSG_NO_SUCH_RULE_IN_SCOPE);
+			add(MSG_LEXER_RULES_NOT_ALLOWED);
+            add(MSG_WILDCARD_AS_ROOT);
+            add(MSG_CIRCULAR_DEPENDENCY);
+            // TODO: ...
+		}
+	};
+
+	/** Do not do code gen if one of these happens */
+	public static final BitSet ERRORS_FORCING_NO_CODEGEN = new BitSet() {
+		{
+			add(MSG_NONREGULAR_DECISION);
+			add(MSG_RECURSION_OVERLOW);
+			add(MSG_UNREACHABLE_ALTS);
+			add(MSG_FILE_AND_GRAMMAR_NAME_DIFFER);
+			add(MSG_INVALID_IMPORT);
+			add(MSG_AST_OP_WITH_NON_AST_OUTPUT_OPTION);
+            add(MSG_CIRCULAR_DEPENDENCY);
+			// TODO: ...
+		}
+	};
+
+	/** Only one error can be emitted for any entry in this table.
+	 *  Map&lt;String,Set&gt; where the key is a method name like danglingState.
+	 *  The set is whatever that method accepts or derives like a DFA.
+	 */
+	public static final Map<String, Set<String>> emitSingleError = new HashMap<String, Set<String>>() {
+		{
+			put("danglingState", new HashSet<String>());
+		}
+	};
+
+	/** Messages should be sensitive to the locale. */
+	private static Locale locale;
+	private static String formatName;
+
+	/** Each thread might need it's own error listener; e.g., a GUI with
+	 *  multiple window frames holding multiple grammars.
+	 */
+	private static final Map<Thread, ANTLRErrorListener> threadToListenerMap = new WeakHashMap<Thread, ANTLRErrorListener>();
+
+	public static class ErrorState {
+		public int errors;
+		public int warnings;
+		public int infos;
+		/** Track all msgIDs; we use to abort later if necessary
+		 *  also used in Message to find out what type of message it is via getMessageType()
+		 */
+		public BitSet errorMsgIDs = new BitSet();
+		public BitSet warningMsgIDs = new BitSet();
+		// TODO: figure out how to do info messages. these do not have IDs...kr
+		//public BitSet infoMsgIDs = new BitSet();
+	}
+
+	/** Track the number of errors regardless of the listener but track
+	 *  per thread.
+	 */
+	private static final Map<Thread, ErrorState> threadToErrorStateMap = new WeakHashMap<Thread, ErrorState>();
+
+	/** Each thread has its own ptr to a Tool object, which knows how
+	 *  to panic, for example.  In a GUI, the thread might just throw an Error
+	 *  to exit rather than the suicide System.exit.
+	 */
+	private static final Map<Thread, Tool> threadToToolMap = new WeakHashMap<Thread, Tool>();
+
+	/** The group of templates that represent all possible ANTLR errors. */
+	private static STGroup messages;
+	/** The group of templates that represent the current message format. */
+	private static STGroup format;
+
+	/** From a msgID how can I get the name of the template that describes
+	 *  the error or warning?
+	 */
+	private static final String[] idToMessageTemplateName = new String[MAX_MESSAGE_NUMBER+1];
+
+	static ANTLRErrorListener theDefaultErrorListener = new ANTLRErrorListener() {
+		@Override
+		public void info(String msg) {
+			if (formatWantsSingleLineMessage()) {
+				msg = msg.replaceAll("\n", " ");
+			}
+			System.err.println(msg);
+		}
+
+		@Override
+		public void error(Message msg) {
+			String outputMsg = msg.toString();
+			if (formatWantsSingleLineMessage()) {
+				outputMsg = outputMsg.replaceAll("\n", " ");
+			}
+			System.err.println(outputMsg);
+		}
+
+		@Override
+		public void warning(Message msg) {
+			String outputMsg = msg.toString();
+			if (formatWantsSingleLineMessage()) {
+				outputMsg = outputMsg.replaceAll("\n", " ");
+			}
+			System.err.println(outputMsg);
+		}
+
+		@Override
+		public void error(ToolMessage msg) {
+			String outputMsg = msg.toString();
+			if (formatWantsSingleLineMessage()) {
+				outputMsg = outputMsg.replaceAll("\n", " ");
+			}
+			System.err.println(outputMsg);
+		}
+	};
+
+	/** Handle all ST error listeners here (code gen, Grammar, and this class
+	 *  use templates.
+	 */
+	static STErrorListener initSTListener =
+		new STErrorListener() {
+			@Override
+			public void compileTimeError(STMessage msg) {
+				System.err.println("ErrorManager init error: "+msg);
+			}
+
+			@Override
+			public void runTimeError(STMessage msg) {
+				System.err.println("ErrorManager init error: "+msg);
+			}
+
+			@Override
+			public void IOError(STMessage msg) {
+				System.err.println("ErrorManager init error: "+msg);
+			}
+
+			@Override
+			public void internalError(STMessage msg) {
+				System.err.println("ErrorManager init error: "+msg);
+			}
+
+		};
+
+	/** During verification of the messages group file, don't gen errors.
+	 *  I'll handle them here.  This is used only after file has loaded ok
+	 *  and only for the messages STG.
+	 */
+	static STErrorListener blankSTListener =
+		new STErrorListener() {
+			@Override public void compileTimeError(STMessage msg) {			}
+			@Override public void runTimeError(STMessage msg) {			}
+			@Override public void IOError(STMessage msg) {			}
+			@Override public void internalError(STMessage msg) {			}
+		};
+
+	/** Errors during initialization related to ST must all go to System.err.
+	 */
+	static STErrorListener theDefaultSTListener =
+		new STErrorListener() {
+			@Override
+			public void compileTimeError(STMessage msg) {
+				ErrorManager.error(ErrorManager.MSG_STRING_TEMPLATE_ERROR, msg.toString(), msg.cause);
+			}
+
+			@Override
+			public void runTimeError(STMessage msg) {
+				switch (msg.error) {
+				case NO_SUCH_ATTRIBUTE:
+				case NO_SUCH_ATTRIBUTE_PASS_THROUGH:
+				case NO_SUCH_PROPERTY:
+					ErrorManager.warning(ErrorManager.MSG_STRING_TEMPLATE_ERROR, msg.toString());
+					return;
+
+				default:
+					ErrorManager.error(ErrorManager.MSG_STRING_TEMPLATE_ERROR, msg.toString(), msg.cause);
+					return;
+				}
+			}
+
+			@Override
+			public void IOError(STMessage msg) {
+				ErrorManager.error(ErrorManager.MSG_STRING_TEMPLATE_ERROR, msg.toString(), msg.cause);
+			}
+
+			@Override
+			public void internalError(STMessage msg) {
+				ErrorManager.error(ErrorManager.MSG_STRING_TEMPLATE_ERROR, msg.toString(), msg.cause);
+			}
+		};
+
+	// make sure that this class is ready to use after loading
+	static {
+		initIdToMessageNameMapping();
+		// it is inefficient to set the default locale here if another
+		// piece of code is going to set the locale, but that would
+		// require that a user call an init() function or something.  I prefer
+		// that this class be ready to go when loaded as I'm absentminded ;)
+		setLocale(Locale.getDefault());
+		// try to load the message format group
+		// the user might have specified one on the command line
+		// if not, or if the user has given an illegal value, we will fall back to "antlr"
+		setFormat("antlr");
+	}
+
+    public static STErrorListener getSTErrorListener() {
+		return theDefaultSTListener;
+	}
+
+	/** We really only need a single locale for entire running ANTLR code
+	 *  in a single VM.  Only pay attention to the language, not the country
+	 *  so that French Canadians and French Frenchies all get the same
+	 *  template file, fr.stg.  Just easier this way.
+	 */
+	public static void setLocale(Locale locale) {
+		ErrorManager.locale = locale;
+		String language = locale.getLanguage();
+		String fileName = "org/antlr/tool/templates/messages/languages/"+language+".stg";
+		try {
+			messages = new STGroupFile(fileName);
+		}
+		catch (IllegalArgumentException iae) {
+			if ( language.equals(Locale.US.getLanguage()) ) {
+				rawError("ANTLR installation corrupted; cannot find English messages file "+fileName);
+				panic();
+			}
+			else {
+				setLocale(Locale.US); // recurse on this rule, trying the US locale
+			}
+		}
+
+		messages.setListener(blankSTListener);
+		boolean messagesOK = verifyMessages();
+		if ( !messagesOK && language.equals(Locale.US.getLanguage()) ) {
+			rawError("ANTLR installation corrupted; English messages file "+language+".stg incomplete");
+			panic();
+		}
+		else if ( !messagesOK ) {
+			setLocale(Locale.US); // try US to see if that will work
+		}
+	}
+
+	/** The format gets reset either from the Tool if the user supplied a command line option to that effect
+	 *  Otherwise we just use the default "antlr".
+	 */
+	public static void setFormat(String formatName) {
+		ErrorManager.formatName = formatName;
+		String fileName = "org/antlr/tool/templates/messages/formats/"+formatName+".stg";
+		format = new STGroupFile(fileName);
+		format.setListener(initSTListener);
+		if ( !format.isDefined("message") ) { // pick random msg to load
+			if ( formatName.equals("antlr") ) {
+				rawError("no such message format file "+fileName+" retrying with default ANTLR format");
+				setFormat("antlr"); // recurse on this rule, trying the default message format
+				return;
+			}
+			else {
+				setFormat("antlr"); // recurse on this rule, trying the default message format
+			}
+		}
+
+		format.setListener(blankSTListener);
+		boolean formatOK = verifyFormat();
+		if ( !formatOK && formatName.equals("antlr") ) {
+			rawError("ANTLR installation corrupted; ANTLR messages format file "+formatName+".stg incomplete");
+			panic();
+		}
+		else if ( !formatOK ) {
+			setFormat("antlr"); // recurse on this rule, trying the default message format
+		}
+	}
+
+	/** Encodes the error handling found in setLocale, but does not trigger
+	 *  panics, which would make GUI tools die if ANTLR's installation was
+	 *  a bit screwy.  Duplicated code...ick.
+	public static Locale getLocaleForValidMessages(Locale locale) {
+		ErrorManager.locale = locale;
+		String language = locale.getLanguage();
+		String fileName = "org/antlr/tool/templates/messages/"+language+".stg";
+		ClassLoader cl = Thread.currentThread().getContextClassLoader();
+		InputStream is = cl.getResourceAsStream(fileName);
+		if ( is==null && language.equals(Locale.US.getLanguage()) ) {
+			return null;
+		}
+		else if ( is==null ) {
+			return getLocaleForValidMessages(Locale.US); // recurse on this rule, trying the US locale
+		}
+
+		boolean messagesOK = verifyMessages();
+		if ( !messagesOK && language.equals(Locale.US.getLanguage()) ) {
+			return null;
+		}
+		else if ( !messagesOK ) {
+			return getLocaleForValidMessages(Locale.US); // try US to see if that will work
+		}
+		return true;
+	}
+	 */
+
+	/** In general, you'll want all errors to go to a single spot.
+	 *  However, in a GUI, you might have two frames up with two
+	 *  different grammars.  Two threads might launch to process the
+	 *  grammars--you would want errors to go to different objects
+	 *  depending on the thread.  I store a single listener per
+	 *  thread.
+	 */
+	public static void setErrorListener(ANTLRErrorListener listener) {
+		threadToListenerMap.put(Thread.currentThread(), listener);
+	}
+
+    public static void removeErrorListener() {
+        threadToListenerMap.remove(Thread.currentThread());
+    }
+
+	public static void setTool(Tool tool) {
+		threadToToolMap.put(Thread.currentThread(), tool);
+	}
+
+	/** Given a message ID, return a ST that somebody can fill
+	 *  with data.  We need to convert the int ID to the name of a template
+	 *  in the messages ST group.
+	 */
+	public static ST getMessage(int msgID) {
+        String msgName = idToMessageTemplateName[msgID];
+		return messages.getInstanceOf(msgName);
+	}
+	public static String getMessageType(int msgID) {
+		if (getErrorState().warningMsgIDs.member(msgID)) {
+			return messages.getInstanceOf("warning").render();
+		}
+		else if (getErrorState().errorMsgIDs.member(msgID)) {
+			return messages.getInstanceOf("error").render();
+		}
+		assertTrue(false, "Assertion failed! Message ID " + msgID + " created but is not present in errorMsgIDs or warningMsgIDs.");
+		return "";
+	}
+
+	/** Return a ST that refers to the current format used for
+	 * emitting messages.
+	 */
+	public static ST getLocationFormat() {
+		return format.getInstanceOf("location");
+	}
+	public static ST getReportFormat() {
+		return format.getInstanceOf("report");
+	}
+	public static ST getMessageFormat() {
+		return format.getInstanceOf("message");
+	}
+	public static boolean formatWantsSingleLineMessage() {
+		return format.getInstanceOf("wantsSingleLineMessage").render().equals("true");
+	}
+
+	public static ANTLRErrorListener getErrorListener() {
+		ANTLRErrorListener el =
+			threadToListenerMap.get(Thread.currentThread());
+		if ( el==null ) {
+			return theDefaultErrorListener;
+		}
+		return el;
+	}
+
+	public static ErrorState getErrorState() {
+		ErrorState ec =
+			threadToErrorStateMap.get(Thread.currentThread());
+		if ( ec==null ) {
+			ec = new ErrorState();
+			threadToErrorStateMap.put(Thread.currentThread(), ec);
+		}
+		return ec;
+	}
+
+	public static int getNumErrors() {
+		return getErrorState().errors;
+	}
+
+	public static void resetErrorState() {
+        threadToListenerMap.clear();
+        ErrorState ec = new ErrorState();
+		threadToErrorStateMap.put(Thread.currentThread(), ec);
+	}
+
+	public static void info(String msg) {
+		getErrorState().infos++;
+		getErrorListener().info(msg);
+	}
+
+	public static void error(int msgID) {
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(new ToolMessage(msgID));
+	}
+
+	public static void error(int msgID, Throwable e) {
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(new ToolMessage(msgID,e));
+	}
+
+	public static void error(int msgID, Object arg) {
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(new ToolMessage(msgID, arg));
+	}
+
+	public static void error(int msgID, Object arg, Object arg2) {
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(new ToolMessage(msgID, arg, arg2));
+	}
+
+	public static void error(int msgID, Object arg, Throwable e) {
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(new ToolMessage(msgID, arg, e));
+	}
+
+	public static void warning(int msgID, Object arg) {
+		getErrorState().warnings++;
+		getErrorState().warningMsgIDs.add(msgID);
+		getErrorListener().warning(new ToolMessage(msgID, arg));
+	}
+
+	public static void nondeterminism(DecisionProbe probe,
+									  DFAState d)
+	{
+		getErrorState().warnings++;
+		Message msg = new GrammarNonDeterminismMessage(probe,d);
+		getErrorState().warningMsgIDs.add(msg.msgID);
+		getErrorListener().warning(msg);
+	}
+
+	public static void danglingState(DecisionProbe probe,
+									 DFAState d)
+	{
+		getErrorState().errors++;
+		Message msg = new GrammarDanglingStateMessage(probe,d);
+		getErrorState().errorMsgIDs.add(msg.msgID);
+		Set<String> seen = emitSingleError.get("danglingState");
+		if ( !seen.contains(d.dfa.decisionNumber+"|"+d.getAltSet()) ) {
+			getErrorListener().error(msg);
+			// we've seen this decision and this alt set; never again
+			seen.add(d.dfa.decisionNumber+"|"+d.getAltSet());
+		}
+	}
+
+	public static void analysisAborted(DecisionProbe probe)
+	{
+		getErrorState().warnings++;
+		Message msg = new GrammarAnalysisAbortedMessage(probe);
+		getErrorState().warningMsgIDs.add(msg.msgID);
+		getErrorListener().warning(msg);
+	}
+
+	public static void unreachableAlts(DecisionProbe probe,
+									   List<Integer> alts)
+	{
+		getErrorState().errors++;
+		Message msg = new GrammarUnreachableAltsMessage(probe,alts);
+		getErrorState().errorMsgIDs.add(msg.msgID);
+		getErrorListener().error(msg);
+	}
+
+	public static void insufficientPredicates(DecisionProbe probe,
+											  DFAState d,
+											  Map<Integer, Set<Token>> altToUncoveredLocations)
+	{
+		getErrorState().warnings++;
+		Message msg = new GrammarInsufficientPredicatesMessage(probe,d,altToUncoveredLocations);
+		getErrorState().warningMsgIDs.add(msg.msgID);
+		getErrorListener().warning(msg);
+	}
+
+	public static void nonLLStarDecision(DecisionProbe probe) {
+		getErrorState().errors++;
+		Message msg = new NonRegularDecisionMessage(probe, probe.getNonDeterministicAlts());
+		getErrorState().errorMsgIDs.add(msg.msgID);
+		getErrorListener().error(msg);
+	}
+
+	public static void recursionOverflow(DecisionProbe probe,
+										 DFAState sampleBadState,
+										 int alt,
+										 Collection<String> targetRules,
+										 Collection<? extends Collection<? extends NFAState>> callSiteStates)
+	{
+		getErrorState().errors++;
+		Message msg = new RecursionOverflowMessage(probe,sampleBadState, alt,
+										 targetRules, callSiteStates);
+		getErrorState().errorMsgIDs.add(msg.msgID);
+		getErrorListener().error(msg);
+	}
+
+	/*
+	// TODO: we can remove I think.  All detected now with cycles check.
+	public static void leftRecursion(DecisionProbe probe,
+									 int alt,
+									 Collection targetRules,
+									 Collection callSiteStates)
+	{
+		getErrorState().warnings++;
+		Message msg = new LeftRecursionMessage(probe, alt, targetRules, callSiteStates);
+		getErrorState().warningMsgIDs.add(msg.msgID);
+		getErrorListener().warning(msg);
+	}
+	*/
+
+	public static void leftRecursionCycles(Collection<? extends Set<? extends Rule>> cycles) {
+		getErrorState().errors++;
+		Message msg = new LeftRecursionCyclesMessage(cycles);
+		getErrorState().errorMsgIDs.add(msg.msgID);
+		getErrorListener().error(msg);
+	}
+
+	public static void grammarError(int msgID,
+									Grammar g,
+									Token token,
+									Object arg,
+									Object arg2)
+	{
+		getErrorState().errors++;
+		Message msg = new GrammarSemanticsMessage(msgID,g,token,arg,arg2);
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(msg);
+	}
+
+	public static void grammarError(int msgID,
+									Grammar g,
+									Token token,
+									Object arg)
+	{
+		grammarError(msgID,g,token,arg,null);
+	}
+
+	public static void grammarError(int msgID,
+									Grammar g,
+									Token token)
+	{
+		grammarError(msgID,g,token,null,null);
+	}
+
+	public static void grammarWarning(int msgID,
+									  Grammar g,
+									  Token token,
+									  Object arg,
+									  Object arg2)
+	{
+		getErrorState().warnings++;
+		Message msg = new GrammarSemanticsMessage(msgID,g,token,arg,arg2);
+		getErrorState().warningMsgIDs.add(msgID);
+		getErrorListener().warning(msg);
+	}
+
+	public static void grammarWarning(int msgID,
+									  Grammar g,
+									  Token token,
+									  Object arg)
+	{
+		grammarWarning(msgID,g,token,arg,null);
+	}
+
+	public static void grammarWarning(int msgID,
+									  Grammar g,
+									  Token token)
+	{
+		grammarWarning(msgID,g,token,null,null);
+	}
+
+	public static void syntaxError(int msgID,
+								   Grammar grammar,
+								   Token token,
+								   Object arg,
+								   RecognitionException re)
+	{
+		getErrorState().errors++;
+		getErrorState().errorMsgIDs.add(msgID);
+		getErrorListener().error(
+			new GrammarSyntaxMessage(msgID,grammar,token,arg,re)
+		);
+	}
+
+	public static void internalError(Object error, Throwable e) {
+		StackTraceElement location = getLastNonErrorManagerCodeLocation(e);
+		String msg = "Exception "+e+"@"+location+": "+error;
+		error(MSG_INTERNAL_ERROR, msg);
+	}
+
+	public static void internalError(Object error) {
+		StackTraceElement location =
+			getLastNonErrorManagerCodeLocation(new Exception());
+		String msg = location+": "+error;
+		error(MSG_INTERNAL_ERROR, msg);
+	}
+
+	public static boolean doNotAttemptAnalysis() {
+		return !getErrorState().errorMsgIDs.and(ERRORS_FORCING_NO_ANALYSIS).isNil();
+	}
+
+	public static boolean doNotAttemptCodeGen() {
+		return doNotAttemptAnalysis() ||
+			   !getErrorState().errorMsgIDs.and(ERRORS_FORCING_NO_CODEGEN).isNil();
+	}
+
+	/** Return first non ErrorManager code location for generating messages */
+	private static StackTraceElement getLastNonErrorManagerCodeLocation(Throwable e) {
+		StackTraceElement[] stack = e.getStackTrace();
+		int i = 0;
+		for (; i < stack.length; i++) {
+			StackTraceElement t = stack[i];
+			if ( t.toString().indexOf("ErrorManager")<0 ) {
+				break;
+			}
+		}
+		StackTraceElement location = stack[i];
+		return location;
+	}
+
+	// A S S E R T I O N  C O D E
+
+	public static void assertTrue(boolean condition, String message) {
+		if ( !condition ) {
+			internalError(message);
+		}
+	}
+
+	// S U P P O R T  C O D E
+
+	protected static boolean initIdToMessageNameMapping() {
+		// make sure a message exists, even if it's just to indicate a problem
+		for (int i = 0; i < idToMessageTemplateName.length; i++) {
+			idToMessageTemplateName[i] = "INVALID MESSAGE ID: "+i;
+		}
+		// get list of fields and use it to fill in idToMessageTemplateName mapping
+		Field[] fields = ErrorManager.class.getFields();
+		for (int i = 0; i < fields.length; i++) {
+			Field f = fields[i];
+			String fieldName = f.getName();
+			if ( !fieldName.startsWith("MSG_") ) {
+				continue;
+			}
+			String templateName =
+				fieldName.substring("MSG_".length(),fieldName.length());
+			int msgID;
+			try {
+				// get the constant value from this class object
+				msgID = f.getInt(ErrorManager.class);
+			}
+			catch (IllegalAccessException iae) {
+				System.err.println("cannot get const value for "+f.getName());
+				continue;
+			}
+			if ( fieldName.startsWith("MSG_") ) {
+                idToMessageTemplateName[msgID] = templateName;
+			}
+		}
+		return true;
+	}
+
+	/** Use reflection to find list of MSG_ fields and then verify a
+	 *  template exists for each one from the locale's group.
+	 */
+	protected static boolean verifyMessages() {
+		boolean ok = true;
+		Field[] fields = ErrorManager.class.getFields();
+		for (int i = 0; i < fields.length; i++) {
+			Field f = fields[i];
+			String fieldName = f.getName();
+			String templateName =
+				fieldName.substring("MSG_".length(),fieldName.length());
+			if ( fieldName.startsWith("MSG_") ) {
+				if ( !messages.isDefined(templateName) ) {
+					System.err.println("Message "+templateName+" in locale "+
+									   locale+" not found");
+					ok = false;
+				}
+			}
+		}
+		// check for special templates
+		if (!messages.isDefined("warning")) {
+			System.err.println("Message template 'warning' not found in locale "+ locale);
+			ok = false;
+		}
+		if (!messages.isDefined("error")) {
+			System.err.println("Message template 'error' not found in locale "+ locale);
+			ok = false;
+		}
+		return ok;
+	}
+
+	/** Verify the message format template group */
+	protected static boolean verifyFormat() {
+		boolean ok = true;
+		if (!format.isDefined("location")) {
+			System.err.println("Format template 'location' not found in " + formatName);
+			ok = false;
+		}
+		if (!format.isDefined("message")) {
+			System.err.println("Format template 'message' not found in " + formatName);
+			ok = false;
+		}
+		if (!format.isDefined("report")) {
+			System.err.println("Format template 'report' not found in " + formatName);
+			ok = false;
+		}
+		return ok;
+	}
+
+	/** If there are errors during ErrorManager init, we have no choice
+	 *  but to go to System.err.
+	 */
+	static void rawError(String msg) {
+		System.err.println(msg);
+	}
+
+	static void rawError(String msg, Throwable e) {
+		rawError(msg);
+		e.printStackTrace(System.err);
+	}
+
+	/** I *think* this will allow Tool subclasses to exit gracefully
+	 *  for GUIs etc...
+	 */
+	public static void panic() {
+		Tool tool = threadToToolMap.get(Thread.currentThread());
+		if ( tool==null ) {
+			// no tool registered, exit
+			throw new Error("ANTLR ErrorManager panic");
+		}
+		else {
+			tool.panic();
+		}
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/FASerializer.java b/tool/src/main/java/org/antlr/tool/FASerializer.java
new file mode 100644
index 0000000..4e601d8
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/FASerializer.java
@@ -0,0 +1,217 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.*;
+import org.antlr.misc.Utils;
+
+import java.util.*;
+
+/** An aspect of FA (finite automata) that knows how to dump them to serialized
+ *  strings.
+ */
+public class FASerializer {
+    /** To prevent infinite recursion when walking state machines, record
+     *  which states we've visited.  Make a new set every time you start
+     *  walking in case you reuse this object.  Multiple threads will trash
+     *  this shared variable.  Use a different FASerializer per thread.
+     */
+    protected Set<State> markedStates;
+
+    /** Each state we walk will get a new state number for serialization
+     *  purposes.  This is the variable that tracks state numbers.
+     */
+    protected int stateCounter = 0;
+
+    /** Rather than add a new instance variable to NFA and DFA just for
+     *  serializing machines, map old state numbers to new state numbers
+     *  by a State object &rarr; Integer new state number HashMap.
+     */
+    protected Map<State, Integer> stateNumberTranslator;
+
+    protected Grammar grammar;
+
+    /** This aspect is associated with a grammar; used to get token names */
+    public FASerializer(Grammar grammar) {
+        this.grammar = grammar;
+    }
+
+	public String serialize(State s) {
+		if ( s==null ) {
+			return "<no automaton>";
+		}
+		return serialize(s, true);
+	}
+
+	/** Return a string representation of a state machine.  Two identical
+     *  NFAs or DFAs will have identical serialized representations.  The
+     *  state numbers inside the state are not used; instead, a new number
+     *  is computed and because the serialization will walk the two
+     *  machines using the same specific algorithm, then the state numbers
+     *  will be identical.  Accept states are distinguished from regular
+     *  states.
+     */
+    public String serialize(State s, boolean renumber) {
+        markedStates = new HashSet<State>();
+        stateCounter = 0;
+		if ( renumber ) {
+			stateNumberTranslator = new HashMap<State, Integer>();
+        	walkFANormalizingStateNumbers(s);
+		}
+		List<String> lines = new ArrayList<String>();
+        if ( s.getNumberOfTransitions()>0 ) {
+			walkSerializingFA(lines, s);
+		}
+		else {
+			// special case: s0 is an accept
+			String s0 = getStateString(0, s);
+			lines.add(s0+"\n");
+		}
+        StringBuilder buf = new StringBuilder(0);
+        // sort lines to normalize; makes states come out ordered
+        // and then ordered by edge labels then by target state number :)
+        Collections.sort(lines);
+        for (int i = 0; i < lines.size(); i++) {
+            String line = lines.get(i);
+            buf.append(line);
+        }
+        return buf.toString();
+    }
+
+    /** In stateNumberTranslator, get a map from State to new, normalized
+     *  state number.  Used by walkSerializingFA to make sure any two
+     *  identical state machines will serialize the same way.
+     */
+    protected void walkFANormalizingStateNumbers(State s) {
+		if ( s==null ) {
+			ErrorManager.internalError("null state s");
+			return;
+		}
+        if ( stateNumberTranslator.get(s)!=null ) {
+            return; // already did this state
+        }
+        // assign a new state number for this node if there isn't one
+        stateNumberTranslator.put(s, Utils.integer(stateCounter));
+        stateCounter++;
+
+        // visit nodes pointed to by each transition;
+        for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+            Transition edge = s.transition(i);
+            walkFANormalizingStateNumbers(edge.target); // keep walkin'
+            // if this transition is a rule reference, the node "following" this state
+            // will not be found and appear to be not in graph.  Must explicitly jump
+            // to it, but don't "draw" an edge.
+            if ( edge instanceof RuleClosureTransition ) {
+				walkFANormalizingStateNumbers(((RuleClosureTransition) edge).followState);
+            }
+        }
+    }
+
+    protected void walkSerializingFA(List<String> lines, State s) {
+        if ( markedStates.contains(s) ) {
+            return; // already visited this node
+        }
+
+        markedStates.add(s); // mark this node as completed.
+
+		int normalizedStateNumber = s.stateNumber;
+		if ( stateNumberTranslator!=null ) {
+	        Integer normalizedStateNumberI = stateNumberTranslator.get(s);
+			normalizedStateNumber = normalizedStateNumberI;
+		}
+
+		String stateStr = getStateString(normalizedStateNumber, s);
+
+        // depth first walk each transition, printing its edge first
+        for (int i = 0; i < s.getNumberOfTransitions(); i++) {
+            Transition edge = s.transition(i);
+            StringBuilder buf = new StringBuilder();
+            buf.append(stateStr);
+			if ( edge.isAction() ) {
+				buf.append("-{}->");
+			}
+			else if ( edge.isEpsilon() ) {
+				buf.append("->");
+			}
+			else if ( edge.isSemanticPredicate() ) {
+				buf.append("-{").append(edge.label.getSemanticContext()).append("}?->");
+			}
+			else {
+				String predsStr = "";
+				if ( edge.target instanceof DFAState ) {
+					// look for gated predicates; don't add gated to simple sempred edges
+					SemanticContext preds =
+						((DFAState)edge.target).getGatedPredicatesInNFAConfigurations();
+					if ( preds!=null ) {
+						predsStr = "&&{"+
+							preds.genExpr(grammar.generator,
+									   	  grammar.generator.getTemplates(), null).render()
+							+"}?";
+					}
+				}
+				buf.append("-").append(edge.label.toString(grammar)).append(predsStr).append("->");
+			}
+
+			int normalizedTargetStateNumber = edge.target.stateNumber;
+			if ( stateNumberTranslator!=null ) {
+				Integer normalizedTargetStateNumberI =
+					stateNumberTranslator.get(edge.target);
+				normalizedTargetStateNumber = normalizedTargetStateNumberI;
+			}
+			buf.append(getStateString(normalizedTargetStateNumber, edge.target));
+            buf.append("\n");
+            lines.add(buf.toString());
+
+            // walk this transition
+            walkSerializingFA(lines, edge.target);
+
+            // if this transition is a rule reference, the node "following" this state
+            // will not be found and appear to be not in graph.  Must explicitly jump
+            // to it, but don't "draw" an edge.
+            if ( edge instanceof RuleClosureTransition ) {
+				walkSerializingFA(lines, ((RuleClosureTransition) edge).followState);
+            }
+        }
+
+    }
+
+    private String getStateString(int n, State s) {
+        String stateStr = ".s"+n;
+        if ( s.isAcceptState() ) {
+            if ( s instanceof DFAState ) {
+                stateStr = ":s"+n+"=>"+((DFAState)s).getUniquelyPredictedAlt();
+            }
+            else {
+                stateStr = ":s"+n;
+            }
+        }
+        return stateStr;
+    }
+
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/Grammar.java b/tool/src/main/java/org/antlr/tool/Grammar.java
new file mode 100644
index 0000000..667ebb2
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/Grammar.java
@@ -0,0 +1,3214 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.Tool;
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.LL1Analyzer;
+import org.antlr.analysis.LL1DFA;
+import org.antlr.analysis.Label;
+import org.antlr.analysis.LookaheadSet;
+import org.antlr.analysis.NFA;
+import org.antlr.analysis.NFAConversionThread;
+import org.antlr.analysis.NFAState;
+import org.antlr.analysis.NFAToDFAConverter;
+import org.antlr.analysis.SemanticContext;
+import org.antlr.analysis.Transition;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.codegen.Target;
+import org.antlr.grammar.v3.ANTLRLexer;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.grammar.v3.ANTLRTreePrinter;
+import org.antlr.grammar.v3.ActionAnalysis;
+import org.antlr.grammar.v3.DefineGrammarItemsWalker;
+import org.antlr.grammar.v3.TreeToNFAConverter;
+import org.antlr.misc.Barrier;
+import org.antlr.misc.IntSet;
+import org.antlr.misc.IntervalSet;
+import org.antlr.misc.MultiMap;
+import org.antlr.misc.OrderedHashSet;
+import org.antlr.misc.Utils;
+import org.antlr.runtime.ANTLRReaderStream;
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.CommonTokenStream;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.CommonTreeNodeStream;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.STGroup;
+import org.stringtemplate.v4.STGroupString;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.Reader;
+import java.io.StreamTokenizer;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.BitSet;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Vector;
+
+/** Represents a grammar in memory. */
+public class Grammar {
+	public static final String SYNPRED_RULE_PREFIX = "synpred";
+
+	public static final String GRAMMAR_FILE_EXTENSION = ".g";
+
+	/** used for generating lexer temp files */
+	public static final String LEXER_GRAMMAR_FILE_EXTENSION = ".g";
+
+	public static final int INITIAL_DECISION_LIST_SIZE = 300;
+	public static final int INVALID_RULE_INDEX = -1;
+
+	// the various kinds of labels. t=type, id=ID, types+=type ids+=ID
+	public static final int RULE_LABEL = 1;
+	public static final int TOKEN_LABEL = 2;
+	public static final int RULE_LIST_LABEL = 3;
+	public static final int TOKEN_LIST_LABEL = 4;
+    public static final int CHAR_LABEL = 5; // used in lexer for x='a'
+    public static final int WILDCARD_TREE_LABEL = 6; // Used in tree grammar x=.
+    public static final int WILDCARD_TREE_LIST_LABEL = 7; // Used in tree grammar x+=.
+
+
+    public static String[] LabelTypeToString =
+		{"<invalid>", "rule", "token", "rule-list", "token-list", "wildcard-tree", "wildcard-tree-list"};
+
+	public static final String ARTIFICIAL_TOKENS_RULENAME = "Tokens";
+	public static final String FRAGMENT_RULE_MODIFIER = "fragment";
+
+	public static final String SYNPREDGATE_ACTION_NAME = "synpredgate";
+
+	/** When converting ANTLR char and string literals, here is the
+	 *  value set of escape chars.
+	 */
+	public static int ANTLRLiteralEscapedCharValue[] = new int[255];
+
+	/** Given a char, we need to be able to show as an ANTLR literal.
+	 */
+	public static String ANTLRLiteralCharValueEscape[] = new String[255];
+
+	static {
+		ANTLRLiteralEscapedCharValue['n'] = '\n';
+		ANTLRLiteralEscapedCharValue['r'] = '\r';
+		ANTLRLiteralEscapedCharValue['t'] = '\t';
+		ANTLRLiteralEscapedCharValue['b'] = '\b';
+		ANTLRLiteralEscapedCharValue['f'] = '\f';
+		ANTLRLiteralEscapedCharValue['\\'] = '\\';
+		ANTLRLiteralEscapedCharValue['\''] = '\'';
+		ANTLRLiteralEscapedCharValue['"'] = '"';
+		ANTLRLiteralCharValueEscape['\n'] = "\\n";
+		ANTLRLiteralCharValueEscape['\r'] = "\\r";
+		ANTLRLiteralCharValueEscape['\t'] = "\\t";
+		ANTLRLiteralCharValueEscape['\b'] = "\\b";
+		ANTLRLiteralCharValueEscape['\f'] = "\\f";
+		ANTLRLiteralCharValueEscape['\\'] = "\\\\";
+		ANTLRLiteralCharValueEscape['\''] = "\\'";
+	}
+
+	public static final int LEXER = 1;
+	public static final int PARSER = 2;
+	public static final int TREE_PARSER = 3;
+	public static final int COMBINED = 4;
+	public static final String[] grammarTypeToString = new String[] {
+		"<invalid>",
+		"lexer",
+		"parser",
+		"tree",
+		"combined"
+	};
+
+	public static final String[] grammarTypeToFileNameSuffix = new String[] {
+		"<invalid>",
+		"Lexer",
+		"Parser",
+		"", // no suffix for tree grammars
+		"Parser" // if combined grammar, gen Parser and Lexer will be done later
+	};
+
+	/** Set of valid imports.  E.g., can only import a tree parser into
+	 *  another tree parser.  Maps delegate to set of delegator grammar types.
+	 *  validDelegations.get(LEXER) gives list of the kinds of delegators
+	 *  that can import lexers.
+	 */
+	public static MultiMap<Integer,Integer> validDelegations =
+		new MultiMap<Integer,Integer>() {
+			{
+				map(LEXER, LEXER);
+				map(LEXER, PARSER);
+				map(LEXER, COMBINED);
+
+				map(PARSER, PARSER);
+				map(PARSER, COMBINED);
+
+				map(TREE_PARSER, TREE_PARSER);
+
+				// TODO: allow COMBINED
+				// map(COMBINED, COMBINED);
+			}
+		};
+
+	/** This is the buffer of *all* tokens found in the grammar file
+	 *  including whitespace tokens etc...  I use this to extract
+	 *  lexer rules from combined grammars.
+	 */
+	public CommonTokenStream tokenBuffer;
+	public static final String IGNORE_STRING_IN_GRAMMAR_FILE_NAME = "__";
+	public static final String AUTO_GENERATED_TOKEN_NAME_PREFIX = "T__";
+
+	public static class Decision {
+		public Grammar grammar;
+		public int decision;
+		public NFAState startState;
+		public GrammarAST blockAST;
+		public DFA dfa;
+	}
+
+	public class LabelElementPair {
+		public Token label;
+		public GrammarAST elementRef;
+		public String referencedRuleName;
+		/** Has an action referenced the label?  Set by ActionAnalysis.g
+		 *  Currently only set for rule labels.
+		 */
+		public boolean actionReferencesLabel;
+		public int type; // in {RULE_LABEL,TOKEN_LABEL,RULE_LIST_LABEL,TOKEN_LIST_LABEL}
+		public LabelElementPair(Token label, GrammarAST elementRef) {
+			this.label = label;
+			this.elementRef = elementRef;
+			this.referencedRuleName = elementRef.getText();
+		}
+		public Rule getReferencedRule() {
+			return getRule(referencedRuleName);
+		}
+		@Override
+		public String toString() {
+			return elementRef.toString();
+		}
+	}
+
+	/** What name did the user provide for this grammar? */
+	public String name;
+
+	/** What type of grammar is this: lexer, parser, tree walker */
+	public int type;
+
+	/** A list of options specified at the grammar level such as language=Java.
+	 *  The value can be an AST for complicated values such as character sets.
+	 *  There may be code generator specific options in here.  I do no
+	 *  interpretation of the key/value pairs...they are simply available for
+	 *  who wants them.
+	 */
+	protected Map<String, Object> options;
+
+	public static final Set<String> legalLexerOptions =
+			new HashSet<String>() {
+				{
+				add("language"); add("tokenVocab");
+				add("TokenLabelType");
+				add("superClass");
+				add("filter");
+				add("k");
+				add("backtrack");
+				add("memoize");
+				}
+			};
+
+	public static final Set<String> legalParserOptions =
+			new HashSet<String>() {
+				{
+				add("language"); add("tokenVocab");
+				add("output"); add("rewrite"); add("ASTLabelType");
+				add("TokenLabelType");
+				add("superClass");
+				add("k");
+				add("backtrack");
+				add("memoize");
+				}
+			};
+
+    public static final Set<String> legalTreeParserOptions =
+        new HashSet<String>() {
+            {
+                add("language"); add("tokenVocab");
+                add("output"); add("rewrite"); add("ASTLabelType");
+                add("TokenLabelType");
+                add("superClass");
+                add("k");
+                add("backtrack");
+                add("memoize");
+                add("filter");
+            }
+        };
+
+	public static final Set<String> doNotCopyOptionsToLexer =
+		new HashSet<String>() {
+			{
+				add("output"); add("ASTLabelType"); add("superClass");
+				add("k"); add("backtrack"); add("memoize"); add("rewrite");
+			}
+		};
+
+	public static final Map<String, String> defaultOptions =
+			new HashMap<String, String>() {
+				{
+					put("language","Java");
+				}
+			};
+
+	public static final Set<String> legalBlockOptions =
+			new HashSet<String>() {{add("k"); add("greedy"); add("backtrack"); add("memoize");}};
+
+	/** What are the default options for a subrule? */
+	public static final Map<String, String> defaultBlockOptions =
+			new HashMap<String, String>() {{put("greedy","true");}};
+
+	public static final Map<String, String> defaultLexerBlockOptions =
+			new HashMap<String, String>() {{put("greedy","true");}};
+
+	// Token options are here to avoid contaminating Token object in runtime
+
+	/** Legal options for terminal refs like ID&lt;node=MyVarNode&gt; */
+	public static final Set<String> legalTokenOptions =
+		new HashSet<String>() {
+			{
+				add(defaultTokenOption);
+				add("type");
+				add("text");
+				add("assoc");
+			}
+		};
+
+	public static final String defaultTokenOption = "node";
+
+	/** Is there a global fixed lookahead set for this grammar?
+	 *  If 0, nothing specified.  -1 implies we have not looked at
+	 *  the options table yet to set k.
+	 */
+	protected int global_k = -1;
+
+	/** Map a scope to a map of name:action pairs.
+	 *  Map&lt;String, Map&lt;String,GrammarAST&gt;&gt;
+	 *  The code generator will use this to fill holes in the output files.
+	 *  I track the AST node for the action in case I need the line number
+	 *  for errors.
+	 */
+	private Map<String, Map<String, Object>> actions =
+		new HashMap<String, Map<String, Object>>();
+
+	/** The NFA that represents the grammar with edges labelled with tokens
+	 *  or epsilon.  It is more suitable to analysis than an AST representation.
+	 */
+	public NFA nfa;
+
+	protected NFAFactory factory;
+
+	/** If this grammar is part of a larger composite grammar via delegate
+	 *  statement, then this points at the composite.  The composite holds
+	 *  a global list of rules, token types, decision numbers, etc...
+	 */
+	public CompositeGrammar composite;
+
+	/** A pointer back into grammar tree.  Needed so we can add delegates. */
+	public CompositeGrammarTree compositeTreeNode;
+
+	/** If this is a delegate of another grammar, this is the label used
+	 *  as an instance var by that grammar to point at this grammar. null
+	 *  if no label was specified in the delegate statement.
+	 */
+	public String label;
+
+	/** TODO: hook this to the charVocabulary option */
+	protected IntSet charVocabulary = null;
+
+	/** For ANTLRWorks, we want to be able to map a line:col to a specific
+	 *  decision DFA so it can display DFA.
+	 */
+	Map<String, DFA> lineColumnToLookaheadDFAMap = new HashMap<String, DFA>();
+
+	public Tool tool;
+
+	/** The unique set of all rule references in any rule; set of tree node
+	 *  objects so two refs to same rule can exist but at different line/position.
+	 */
+	protected Set<GrammarAST> ruleRefs = new HashSet<GrammarAST>();
+
+	protected Set<GrammarAST> scopedRuleRefs = new HashSet<GrammarAST>();
+
+	/** The unique set of all token ID references in any rule */
+	protected Set<Token> tokenIDRefs = new HashSet<Token>();
+
+	/** Be able to assign a number to every decision in grammar;
+	 *  decisions in 1..n
+	 */
+	protected int decisionCount = 0;
+
+	/** A list of all rules that are in any left-recursive cycle.  There
+	 *  could be multiple cycles, but this is a flat list of all problematic
+	 *  rules. This is stuff we couldn't refactor to precedence rule.
+	 */
+	protected Set<Rule> leftRecursiveRules;
+
+	/** An external tool requests that DFA analysis abort prematurely.  Stops
+	 *  at DFA granularity, which are limited to a DFA size and time computation
+	 *  as failsafe.
+	 */
+	protected boolean externalAnalysisAbort;
+
+	public int numNonLLStar = 0; // hack to track for -report
+
+	/** When we read in a grammar, we track the list of syntactic predicates
+	 *  and build faux rules for them later.  See my blog entry Dec 2, 2005:
+	 *  http://www.antlr.org/blog/antlr3/lookahead.tml
+	 *  This maps the name (we make up) for a pred to the AST grammar fragment.
+	 */
+	protected LinkedHashMap<String, GrammarAST> nameToSynpredASTMap;
+
+	/** Each left-recursive precedence rule must define precedence array
+	 *  for binary operators like:
+	 *
+	 *  	static int[] e_prec = new int[tokenNames.length];
+	 *  	static {
+   	 *  		e_prec[75] = 1;
+	 *  	}
+	 *  Track and we push into parser later; this is computed
+	 *  early when we look for prec rules.
+	 */
+	public List<String> precRuleInitCodeBlocks = new ArrayList<String>();
+
+    /** At least one rule has memoize=true */
+    public boolean atLeastOneRuleMemoizes;
+
+    /** At least one backtrack=true in rule or decision or grammar. */
+    public boolean atLeastOneBacktrackOption;
+
+	/** Was this created from a COMBINED grammar? */
+	public boolean implicitLexer;
+
+	/** Map a rule to it's Rule object */
+	protected LinkedHashMap<String,Rule> nameToRuleMap = new LinkedHashMap<String,Rule>();
+
+	/** If this rule is a delegate, some rules might be overridden; don't
+	 *  want to gen code for them.
+	 */
+	public Set<String> overriddenRules = new HashSet<String>();
+
+	/** The list of all rules referenced in this grammar, not defined here,
+	 *  and defined in a delegate grammar.  Not all of these will be generated
+	 *  in the recognizer for this file; only those that are affected by rule
+	 *  definitions in this grammar.  I am not sure the Java target will need
+	 *  this but I'm leaving in case other targets need it.
+	 *  see NameSpaceChecker.lookForReferencesToUndefinedSymbols()
+	 */
+	protected Set<Rule> delegatedRuleReferences = new HashSet<Rule>();
+
+	/** The ANTLRParser tracks lexer rules when reading combined grammars
+	 *  so we can build the Tokens rule.
+	 */
+	public List<String> lexerRuleNamesInCombined = new ArrayList<String>();
+
+	/** Track the scopes defined outside of rules and the scopes associated
+	 *  with all rules (even if empty).
+	 */
+	protected Map<String, AttributeScope> scopes = new HashMap<String, AttributeScope>();
+
+	/** An AST that records entire input grammar with all rules.  A simple
+	 *  grammar with one rule, "grammar t; a : A | B ;", looks like:
+	 * ( grammar t ( rule a ( BLOCK ( ALT A ) ( ALT B ) ) &lt;end-of-rule&gt; ) )
+	 */
+	protected GrammarAST grammarTree = null;
+
+	/** Each subrule/rule is a decision point and we must track them so we
+	 *  can go back later and build DFA predictors for them.  This includes
+	 *  all the rules, subrules, optional blocks, ()+, ()* etc...
+	 */
+	protected Vector<Decision> indexToDecision =
+		new Vector<Decision>(INITIAL_DECISION_LIST_SIZE);
+
+	/** If non-null, this is the code generator we will use to generate
+	 *  recognizers in the target language.
+	 */
+	protected CodeGenerator generator;
+
+	public NameSpaceChecker nameSpaceChecker = new NameSpaceChecker(this);
+
+	public LL1Analyzer ll1Analyzer = new LL1Analyzer(this);
+
+	/** For merged lexer/parsers, we must construct a separate lexer spec.
+	 *  This is the template for lexer; put the literals first then the
+	 *  regular rules.  We don't need to specify a token vocab import as
+	 *  I make the new grammar import from the old all in memory; don't want
+	 *  to force it to read from the disk.  Lexer grammar will have same
+	 *  name as original grammar but will be in different filename.  Foo.g
+	 *  with combined grammar will have FooParser.java generated and
+	 *  Foo__.g with again Foo inside.  It will however generate FooLexer.java
+	 *  as it's a lexer grammar.  A bit odd, but autogenerated.  Can tweak
+	 *  later if we want.
+	 */
+	protected String lexerGrammarTemplate =
+			"grammar(name, options, imports, actionNames, actions, literals, rules) ::= <<\n" +
+			"lexer grammar <name>;\n" +
+			"<if(options)>" +
+			"options {\n" +
+			"  <options:{it | <it.name>=<it.value>;<\\n>}>\n" +
+			"}<\\n>\n" +
+			"<endif>\n" +
+			"<if(imports)>import <imports; separator=\", \">;<endif>\n" +
+			"<actionNames,actions:{n,a|@<n> {<a>\\}\n}>\n" +
+			"<literals:{it | <it.ruleName> : <it.literal> ;\n}>\n" +
+			"<rules>\n" +
+			">>\n";
+	protected ST lexerGrammarST;
+
+	/** What file name holds this grammar? */
+	protected String fileName;
+
+	/** How long in ms did it take to build DFAs for this grammar?
+	 *  If this grammar is a combined grammar, it only records time for
+	 *  the parser grammar component.  This only records the time to
+	 *  do the LL(*) work; NFA&rarr;DFA conversion.
+	 */
+	public long DFACreationWallClockTimeInMS;
+
+	public int numberOfSemanticPredicates = 0;
+	public int numberOfManualLookaheadOptions = 0;
+	public Set<Integer> setOfNondeterministicDecisionNumbers = new HashSet<Integer>();
+	public Set<Integer> setOfNondeterministicDecisionNumbersResolvedWithPredicates =
+		new HashSet<Integer>();
+
+	/** Track decisions with syn preds specified for reporting.
+	 *  This is the a set of BLOCK type AST nodes.
+	 */
+	public Set<GrammarAST> blocksWithSynPreds = new HashSet<GrammarAST>();
+
+	/** Track decisions that actually use the syn preds in the DFA.
+	 *  Computed during NFA to DFA conversion.
+	 */
+	public Set<DFA> decisionsWhoseDFAsUsesSynPreds = new HashSet<DFA>();
+
+	/** Track names of preds so we can avoid generating preds that aren't used
+	 *  Computed during NFA to DFA conversion.  Just walk accept states
+	 *  and look for synpreds because that is the only state target whose
+	 *  incident edges can have synpreds.  Same is try for
+	 *  decisionsWhoseDFAsUsesSynPreds.
+	 */
+	public Set<String> synPredNamesUsedInDFA = new HashSet<String>();
+
+	/** Track decisions with syn preds specified for reporting.
+	 *  This is the a set of BLOCK type AST nodes.
+	 */
+	public Set<GrammarAST> blocksWithSemPreds = new HashSet<GrammarAST>();
+
+	/** Track decisions that actually use the syn preds in the DFA. */
+	public Set<DFA> decisionsWhoseDFAsUsesSemPreds = new HashSet<DFA>();
+
+	protected boolean allDecisionDFACreated = false;
+
+	/** We need a way to detect when a lexer grammar is autogenerated from
+	 *  another grammar or we are just sending in a string representing a
+	 *  grammar.  We don't want to generate a .tokens file, for example,
+	 *  in such cases.
+	 */
+	protected boolean builtFromString = false;
+
+	/** Factored out the sanity checking code; delegate to it. */
+	GrammarSanity sanity = new GrammarSanity(this);
+
+	/** Useful for asking questions about target during analysis */
+	Target target;
+
+	/** Create a grammar from file name.  */
+	public Grammar(Tool tool, String fileName, CompositeGrammar composite) {
+		this.composite = composite;
+		setTool(tool);
+		setFileName(fileName);
+		// ensure we have the composite set to something
+		if ( composite.delegateGrammarTreeRoot==null ) {
+			composite.setDelegationRoot(this);
+		}
+		STGroup lexerGrammarSTG = new STGroupString(lexerGrammarTemplate);
+		lexerGrammarST = lexerGrammarSTG.getInstanceOf("grammar");
+		target = CodeGenerator.loadLanguageTarget((String) getOption("language"));
+	}
+
+	/** Useful for when you are sure that you are not part of a composite
+	 *  already.  Used in Interp/RandomPhrase and testing.
+	 */
+	public Grammar() { this((Tool)null); }
+
+	public Grammar(Tool tool) {
+		setTool(tool);
+		builtFromString = true;
+		composite = new CompositeGrammar(this);
+		STGroup lexerGrammarSTG = new STGroupString(lexerGrammarTemplate);
+		lexerGrammarST = lexerGrammarSTG.getInstanceOf("grammar");
+		target = CodeGenerator.loadLanguageTarget((String)getOption("language"));
+	}
+
+	/** Used for testing; only useful on noncomposite grammars.*/
+	public Grammar(String grammarString)
+			throws RecognitionException
+	{
+		this(null, grammarString);
+	}
+
+	/** Used for testing and Interp/RandomPhrase.  Only useful on
+	 *  noncomposite grammars.
+	 */
+	public Grammar(Tool tool, String grammarString)
+		throws RecognitionException
+	{
+		this(tool);
+		setFileName("<string>");
+		StringReader r = new StringReader(grammarString);
+		parseAndBuildAST(r);
+		composite.assignTokenTypes();
+		//composite.translateLeftRecursiveRules();
+		addRulesForSyntacticPredicates();
+		composite.defineGrammarSymbols();
+		//composite.createNFAs();
+		checkNameSpaceAndActions();
+	}
+
+	public void setFileName(String fileName) {
+		this.fileName = fileName;
+	}
+
+	public String getFileName() {
+		return fileName;
+	}
+
+	public void setName(String name) {
+		if ( name==null ) {
+			return;
+		}
+		// don't error check autogenerated files (those with '__' in them)
+		String saneFile = fileName.replace('\\', '/');
+		int lastSlash = saneFile.lastIndexOf('/');
+		String onlyFileName = saneFile.substring(lastSlash+1, fileName.length());
+		if ( !builtFromString ) {
+			int lastDot = onlyFileName.lastIndexOf('.');
+			String onlyFileNameNoSuffix;
+			if ( lastDot < 0 ) {
+				ErrorManager.error(ErrorManager.MSG_FILENAME_EXTENSION_ERROR, fileName);
+				onlyFileNameNoSuffix = onlyFileName+GRAMMAR_FILE_EXTENSION;
+			}
+			else {
+				onlyFileNameNoSuffix = onlyFileName.substring(0,lastDot);
+			}
+			if ( !name.equals(onlyFileNameNoSuffix) ) {
+				ErrorManager.error(ErrorManager.MSG_FILE_AND_GRAMMAR_NAME_DIFFER,
+								   name,
+								   fileName);
+			}
+		}
+		this.name = name;
+	}
+
+	public void setGrammarContent(String grammarString) throws RecognitionException {
+		StringReader r = new StringReader(grammarString);
+		parseAndBuildAST(r);
+		composite.assignTokenTypes();
+		composite.defineGrammarSymbols();
+	}
+
+	public void parseAndBuildAST()
+		throws IOException
+	{
+		FileReader fr;
+		BufferedReader br = null;
+		try {
+			fr = new FileReader(fileName);
+			br = new BufferedReader(fr);
+			parseAndBuildAST(br);
+			br.close();
+			br = null;
+		}
+		finally {
+			if ( br!=null ) {
+				br.close();
+			}
+		}
+	}
+
+	public void parseAndBuildAST(Reader r) {
+		// BUILD AST FROM GRAMMAR
+		ANTLRLexer lexer;
+		try {
+			lexer = new ANTLRLexer(new ANTLRReaderStream(r));
+		} catch (IOException e) {
+			ErrorManager.internalError("unexpected stream error from parsing "+fileName, e);
+			return;
+		}
+
+		lexer.setFileName(this.getFileName());
+		tokenBuffer = new CommonTokenStream(lexer);
+		ANTLRParser parser = ANTLRParser.createParser(tokenBuffer);
+		parser.setFileName(this.getFileName());
+		ANTLRParser.grammar__return result = null;
+		try {
+			result = parser.grammar_(this);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.internalError("unexpected parser recognition error from "+fileName, re);
+		}
+
+        dealWithTreeFilterMode(); // tree grammar and filter=true?
+
+        if ( lexer.hasASTOperator && !buildAST() ) {
+			Object value = getOption("output");
+			if ( value == null ) {
+				ErrorManager.grammarWarning(ErrorManager.MSG_REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION,
+										    this, null);
+				setOption("output", "AST", null);
+			}
+			else {
+				ErrorManager.grammarError(ErrorManager.MSG_AST_OP_WITH_NON_AST_OUTPUT_OPTION,
+										  this, null, value);
+			}
+		}
+
+		setGrammarTree(result.getTree());
+
+		//if ( grammarTree!=null ) System.out.println("grammar tree: "+grammarTree.toStringTree());
+
+		grammarTree.setUnknownTokenBoundaries();
+
+		setFileName(lexer.getFileName()); // the lexer #src might change name
+		if ( grammarTree.findFirstType(ANTLRParser.RULE)==null ) {
+			ErrorManager.error(ErrorManager.MSG_NO_RULES, getFileName());
+		}
+	}
+
+    protected void dealWithTreeFilterMode() {
+        Object filterMode = (String)getOption("filter");
+        if ( type==TREE_PARSER && filterMode!=null && filterMode.toString().equals("true") ) {
+            // check for conflicting options
+            // filter => backtrack=true
+            // filter&&output=AST => rewrite=true
+            // filter&&output!=AST => error
+            // any deviation from valid option set is an error
+            Object backtrack = (String)getOption("backtrack");
+            Object output = getOption("output");
+            Object rewrite = getOption("rewrite");
+            if ( backtrack!=null && !backtrack.toString().equals("true") ) {
+                ErrorManager.error(ErrorManager.MSG_CONFLICTING_OPTION_IN_TREE_FILTER,
+                                   "backtrack", backtrack);
+            }
+            if ( output!=null && !output.toString().equals("AST") ) {
+                ErrorManager.error(ErrorManager.MSG_CONFLICTING_OPTION_IN_TREE_FILTER,
+                                   "output", output);
+                setOption("output", "", null);
+            }
+            if ( rewrite!=null && !rewrite.toString().equals("true") ) {
+                ErrorManager.error(ErrorManager.MSG_CONFLICTING_OPTION_IN_TREE_FILTER,
+                                   "rewrite", rewrite);
+            }
+            // set options properly
+            setOption("backtrack", "true", null);
+            if ( output!=null && output.toString().equals("AST") ) {
+                setOption("rewrite", "true", null);
+            }
+            // @synpredgate set to state.backtracking==1 by code gen when filter=true
+            // superClass set in template target::treeParser
+        }
+    }
+
+	public void translateLeftRecursiveRule(GrammarAST ruleAST) {
+		//System.out.println(ruleAST.toStringTree());
+		CommonTreeNodeStream input = new CommonTreeNodeStream(ruleAST);
+		LeftRecursiveRuleAnalyzer leftRecursiveRuleWalker =
+			new LeftRecursiveRuleAnalyzer(input, this, ruleAST.enclosingRuleName);
+		boolean isLeftRec = false;
+		try {
+			//System.out.println("TESTING "+ruleAST.enclosingRuleName);
+			isLeftRec = leftRecursiveRuleWalker.rec_rule(this);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE, re);
+		}
+		if ( !isLeftRec ) return;
+		List<String> rules = new ArrayList<String>();
+		rules.add( leftRecursiveRuleWalker.getArtificialPrecStartRule() ) ;
+		rules.add( leftRecursiveRuleWalker.getArtificialOpPrecRule() );
+		rules.add( leftRecursiveRuleWalker.getArtificialPrimaryRule() );
+		for (String r : rules) {
+			GrammarAST t = parseArtificialRule(r);
+			addRule(grammarTree, t);
+			//System.out.println(t.toStringTree());
+		}
+
+		//precRuleInitCodeBlocks.add( precRuleWalker.getOpPrecJavaCode() );
+	}
+
+	public void defineGrammarSymbols() {
+		if ( Tool.internalOption_PrintGrammarTree ) {
+			System.out.println(grammarTree.toStringList());
+		}
+
+		// DEFINE RULES
+		//System.out.println("### define "+name+" rules");
+		DefineGrammarItemsWalker defineItemsWalker = new DefineGrammarItemsWalker(new CommonTreeNodeStream(getGrammarTree()));
+		try {
+			defineItemsWalker.grammar_(this);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+							   re);
+		}
+	}
+
+	/** ANALYZE ACTIONS, LOOKING FOR LABEL AND ATTR REFS, sanity check */
+	public void checkNameSpaceAndActions() {
+		examineAllExecutableActions();
+		checkAllRulesForUselessLabels();
+
+		nameSpaceChecker.checkConflicts();
+	}
+
+	/** Many imports are illegal such as lexer into a tree grammar */
+	public boolean validImport(Grammar delegate) {
+		List<Integer> validDelegators = validDelegations.get(delegate.type);
+		return validDelegators!=null && validDelegators.contains(this.type);
+	}
+
+	/** If the grammar is a combined grammar, return the text of the implicit
+	 *  lexer grammar.
+	 */
+	public String getLexerGrammar() {
+		if ( lexerGrammarST.getAttribute("literals")==null &&
+			 lexerGrammarST.getAttribute("rules")==null )
+		{
+			// if no rules, return nothing
+			return null;
+		}
+		lexerGrammarST.add("name", name);
+		// if there are any actions set for lexer, pass them in
+		if ( getActions().get("lexer")!=null ) {
+			lexerGrammarST.add("actionNames",
+										getActions().get("lexer").keySet());
+			lexerGrammarST.add("actions",
+										getActions().get("lexer").values());
+		}
+		// make sure generated grammar has the same options
+		if ( options!=null ) {
+			for (String optionName : options.keySet()) {
+				if ( !doNotCopyOptionsToLexer.contains(optionName) ) {
+					Object value = options.get(optionName);
+					lexerGrammarST.addAggr("options.{name,value}", optionName, value);
+				}
+			}
+		}
+		return lexerGrammarST.render();
+	}
+
+	public String getImplicitlyGeneratedLexerFileName() {
+		return name+
+			   IGNORE_STRING_IN_GRAMMAR_FILE_NAME +
+			   LEXER_GRAMMAR_FILE_EXTENSION;
+	}
+
+	/** Get the name of the generated recognizer; may or may not be same
+	 *  as grammar name.
+	 *  Recognizer is TParser and TLexer from T if combined, else
+	 *  just use T regardless of grammar type.
+	 */
+	public String getRecognizerName() {
+		String suffix = "";
+		List<Grammar> grammarsFromRootToMe = composite.getDelegators(this);
+		//System.out.println("grammarsFromRootToMe="+grammarsFromRootToMe);
+		String qualifiedName = name;
+		if ( grammarsFromRootToMe!=null ) {
+			StringBuilder buf = new StringBuilder();
+			for (Grammar g : grammarsFromRootToMe) {
+				buf.append(g.name);
+				buf.append('_');
+			}
+			buf.append(name);
+			qualifiedName = buf.toString();
+		}
+		if ( type==Grammar.COMBINED ||
+			 (type==Grammar.LEXER && implicitLexer) )
+		{
+			suffix = Grammar.grammarTypeToFileNameSuffix[type];
+		}
+		return qualifiedName+suffix;
+	}
+
+	/** Parse a rule we add artificially that is a list of the other lexer
+	 *  rules like this: "Tokens : ID | INT | SEMI ;"  nextToken() will invoke
+	 *  this to set the current token.  Add char literals before
+	 *  the rule references.
+	 *
+	 *  If in filter mode, we want every alt to backtrack and we need to
+	 *  do k=1 to force the "first token def wins" rule.  Otherwise, the
+	 *  longest-match rule comes into play with LL(*).
+	 *
+	 *  The ANTLRParser antlr.g file now invokes this when parsing a lexer
+	 *  grammar, which I think is proper even though it peeks at the info
+	 *  that later phases will (re)compute.  It gets a list of lexer rules
+	 *  and builds a string representing the rule; then it creates a parser
+	 *  and adds the resulting tree to the grammar's tree.
+	 */
+	public GrammarAST addArtificialMatchTokensRule(GrammarAST grammarAST,
+												   List<String> ruleNames,
+												   List<String> delegateNames,
+												   boolean filterMode) {
+		ST matchTokenRuleST;
+		if ( filterMode ) {
+			matchTokenRuleST = new ST(
+					ARTIFICIAL_TOKENS_RULENAME+
+					" options {k=1; backtrack=true;} : <rules; separator=\"|\">;");
+		}
+		else {
+			matchTokenRuleST = new ST(
+					ARTIFICIAL_TOKENS_RULENAME+" : <rules; separator=\"|\">;");
+		}
+
+		// Now add token rule references
+		for (int i = 0; i < ruleNames.size(); i++) {
+			String rname = ruleNames.get(i);
+			matchTokenRuleST.add("rules", rname);
+		}
+		for (int i = 0; i < delegateNames.size(); i++) {
+			String dname = delegateNames.get(i);
+			matchTokenRuleST.add("rules", dname+".Tokens");
+		}
+		//System.out.println("tokens rule: "+matchTokenRuleST.toString());
+		GrammarAST r = parseArtificialRule(matchTokenRuleST.render());
+		addRule(grammarAST, r);
+		//addRule((GrammarAST)parser.getAST());
+		//return (GrammarAST)parser.getAST();
+		return r;
+	}
+
+	public GrammarAST parseArtificialRule(String ruleText) {
+		ANTLRLexer lexer = new ANTLRLexer(new ANTLRStringStream(ruleText));
+		ANTLRParser parser = ANTLRParser.createParser(new CommonTokenStream(lexer));
+		parser.setGrammar(this);
+		parser.setGrammarType(this.type);
+		try {
+			ANTLRParser.rule_return result = parser.rule();
+			return result.getTree();
+		}
+		catch (Exception e) {
+			ErrorManager.error(ErrorManager.MSG_ERROR_CREATING_ARTIFICIAL_RULE,
+							   e);
+			return null;
+		}
+	}
+
+	public void addRule(GrammarAST grammarTree, GrammarAST t) {
+		GrammarAST p = null;
+		for (int i = 0; i < grammarTree.getChildCount(); i++ ) {
+			p = (GrammarAST)grammarTree.getChild(i);
+			if (p == null || p.getType() == ANTLRParser.RULE || p.getType() == ANTLRParser.PREC_RULE) {
+				break;
+			}
+		}
+
+		if (p != null) {
+			grammarTree.addChild(t);
+		}
+	}
+
+	/** for any syntactic predicates, we need to define rules for them; they will get
+	 *  defined automatically like any other rule. :)
+	 */
+	protected List<? extends GrammarAST> getArtificialRulesForSyntacticPredicates(LinkedHashMap<String,GrammarAST> nameToSynpredASTMap)
+	{
+		List<GrammarAST> rules = new ArrayList<GrammarAST>();
+		if ( nameToSynpredASTMap==null ) {
+			return rules;
+		}
+		boolean isLexer = grammarTree.getType()==ANTLRParser.LEXER_GRAMMAR;
+		for (Map.Entry<String, GrammarAST> entry : nameToSynpredASTMap.entrySet()) {
+			String synpredName = entry.getKey();
+			GrammarAST fragmentAST = entry.getValue();
+			GrammarAST ruleAST =
+				ANTLRParser.createSimpleRuleAST(synpredName,
+												fragmentAST,
+												isLexer);
+			rules.add(ruleAST);
+		}
+		return rules;
+	}
+
+	public void addRulesForSyntacticPredicates() {
+		// Get syn pred rules and add to existing tree
+		List<? extends GrammarAST> synpredRules =
+			getArtificialRulesForSyntacticPredicates(nameToSynpredASTMap);
+		for (int i = 0; i < synpredRules.size(); i++) {
+			GrammarAST rAST = (GrammarAST) synpredRules.get(i);
+			grammarTree.addChild(rAST);
+		}
+	}
+
+	/** Walk the list of options, altering this Grammar object according
+	 *  to any I recognize.
+	protected void processOptions() {
+		Iterator optionNames = options.keySet().iterator();
+		while (optionNames.hasNext()) {
+			String optionName = (String) optionNames.next();
+			Object value = options.get(optionName);
+			if ( optionName.equals("tokenVocab") ) {
+
+			}
+		}
+	}
+	 */
+
+	/** Define all the rule begin/end NFAStates to solve forward reference
+	 *  issues.  Critical for composite grammars too.
+	 *  This is normally called on all root/delegates manually and then
+	 *  buildNFA() is called afterwards because the NFA construction needs
+	 *  to see rule start/stop states from potentially every grammar. Has
+	 *  to be have these created a priori.  Testing routines will often
+	 *  just call buildNFA(), which forces a call to this method if not
+	 *  done already. Works ONLY for single noncomposite grammars.
+	 */
+	public void createRuleStartAndStopNFAStates() {
+		//System.out.println("### createRuleStartAndStopNFAStates "+getGrammarTypeString()+" grammar "+name+" NFAs");
+		if ( nfa!=null ) {
+			return;
+		}
+		nfa = new NFA(this);
+		factory = new NFAFactory(nfa);
+
+		Collection<Rule> rules = getRules();
+		for (Rule r : rules) {
+			String ruleName = r.name;
+			NFAState ruleBeginState = factory.newState();
+			ruleBeginState.setDescription("rule "+ruleName+" start");
+			ruleBeginState.enclosingRule = r;
+			r.startState = ruleBeginState;
+			NFAState ruleEndState = factory.newState();
+			ruleEndState.setDescription("rule "+ruleName+" end");
+			ruleEndState.setAcceptState(true);
+			ruleEndState.enclosingRule = r;
+			r.stopState = ruleEndState;
+		}
+	}
+
+	public void buildNFA() {
+		if ( nfa==null ) {
+			createRuleStartAndStopNFAStates();
+		}
+		if ( nfa.complete ) {
+			// don't let it create more than once; has side-effects
+			return;
+		}
+		//System.out.println("### build "+getGrammarTypeString()+" grammar "+name+" NFAs");
+		if ( getRules().isEmpty() ) {
+			return;
+		}
+
+		CommonTreeNodeStream input = new CommonTreeNodeStream(getGrammarTree());
+		TreeToNFAConverter nfaBuilder = new TreeToNFAConverter(input, this, nfa, factory);
+		try {
+			nfaBuilder.grammar_();
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE,
+							   name,
+							   re);
+		}
+		nfa.complete = true;
+	}
+
+	/** For each decision in this grammar, compute a single DFA using the
+	 *  NFA states associated with the decision.  The DFA construction
+	 *  determines whether or not the alternatives in the decision are
+	 *  separable using a regular lookahead language.
+	 *
+	 *  Store the lookahead DFAs in the AST created from the user's grammar
+	 *  so the code generator or whoever can easily access it.
+	 *
+	 *  This is a separate method because you might want to create a
+	 *  Grammar without doing the expensive analysis.
+	 */
+	public void createLookaheadDFAs() {
+		createLookaheadDFAs(true);
+	}
+
+	public void createLookaheadDFAs(boolean wackTempStructures) {
+		if ( nfa==null ) {
+			buildNFA();
+		}
+
+		// CHECK FOR LEFT RECURSION; Make sure we can actually do analysis
+		checkAllRulesForLeftRecursion();
+
+		/*
+		// was there a severe problem while sniffing the grammar?
+		if ( ErrorManager.doNotAttemptAnalysis() ) {
+			return;
+		}
+		*/
+
+		long start = System.currentTimeMillis();
+
+		//System.out.println("### create DFAs");
+		int numDecisions = getNumberOfDecisions();
+		if ( NFAToDFAConverter.SINGLE_THREADED_NFA_CONVERSION ) {
+			for (int decision=1; decision<=numDecisions; decision++) {
+				NFAState decisionStartState = getDecisionNFAStartState(decision);
+				if ( leftRecursiveRules.contains(decisionStartState.enclosingRule) ) {
+					// don't bother to process decisions within left recursive rules.
+					if ( composite.watchNFAConversion ) {
+						System.out.println("ignoring decision "+decision+
+										   " within left-recursive rule "+decisionStartState.enclosingRule.name);
+					}
+					continue;
+				}
+				if ( !externalAnalysisAbort && decisionStartState.getNumberOfTransitions()>1 ) {
+					Rule r = decisionStartState.enclosingRule;
+					if ( r.isSynPred && !synPredNamesUsedInDFA.contains(r.name) ) {
+						continue;
+					}
+					DFA dfa = null;
+					// if k=* or k=1, try LL(1)
+					if ( getUserMaxLookahead(decision)==0 ||
+						 getUserMaxLookahead(decision)==1 )
+					{
+						dfa = createLL_1_LookaheadDFA(decision);
+					}
+					if ( dfa==null ) {
+						if ( composite.watchNFAConversion ) {
+							System.out.println("decision "+decision+
+											   " not suitable for LL(1)-optimized DFA analysis");
+						}
+						dfa = createLookaheadDFA(decision, wackTempStructures);
+					}
+					if ( dfa.startState==null ) {
+						// something went wrong; wipe out DFA
+						setLookaheadDFA(decision, null);
+					}
+					if ( Tool.internalOption_PrintDFA ) {
+						System.out.println("DFA d="+decision);
+						FASerializer serializer = new FASerializer(nfa.grammar);
+						String result = serializer.serialize(dfa.startState);
+						System.out.println(result);
+					}
+				}
+			}
+		}
+		else {
+			ErrorManager.info("two-threaded DFA conversion");
+			// create a barrier expecting n DFA and this main creation thread
+			Barrier barrier = new Barrier(3);
+			// assume 2 CPU for now
+			int midpoint = numDecisions/2;
+			NFAConversionThread t1 =
+				new NFAConversionThread(this, barrier, 1, midpoint);
+			new Thread(t1).start();
+			if ( midpoint == (numDecisions/2) ) {
+				midpoint++;
+			}
+			NFAConversionThread t2 =
+				new NFAConversionThread(this, barrier, midpoint, numDecisions);
+			new Thread(t2).start();
+			// wait for these two threads to finish
+			try {
+				barrier.waitForRelease();
+			}
+			catch(InterruptedException e) {
+				ErrorManager.internalError("what the hell? DFA interruptus", e);
+			}
+		}
+
+		long stop = System.currentTimeMillis();
+		DFACreationWallClockTimeInMS = stop - start;
+
+		// indicate that we've finished building DFA (even if #decisions==0)
+		allDecisionDFACreated = true;
+	}
+
+	public DFA createLL_1_LookaheadDFA(int decision) {
+		Decision d = getDecision(decision);
+		String enclosingRule = d.startState.enclosingRule.name;
+		Rule r = d.startState.enclosingRule;
+		NFAState decisionStartState = getDecisionNFAStartState(decision);
+
+		if ( composite.watchNFAConversion ) {
+			System.out.println("--------------------\nattempting LL(1) DFA (d="
+							   +decisionStartState.getDecisionNumber()+") for "+
+							   decisionStartState.getDescription());
+		}
+
+		if ( r.isSynPred && !synPredNamesUsedInDFA.contains(enclosingRule) ) {
+			return null;
+		}
+
+		// compute lookahead for each alt
+		int numAlts = getNumberOfAltsForDecisionNFA(decisionStartState);
+		LookaheadSet[] altLook = new LookaheadSet[numAlts+1];
+		for (int alt = 1; alt <= numAlts; alt++) {
+			int walkAlt =
+				decisionStartState.translateDisplayAltToWalkAlt(alt);
+			NFAState altLeftEdge = getNFAStateForAltOfDecision(decisionStartState, walkAlt);
+			NFAState altStartState = (NFAState)altLeftEdge.transition[0].target;
+			//System.out.println("alt "+alt+" start state = "+altStartState.stateNumber);
+			altLook[alt] = ll1Analyzer.LOOK(altStartState);
+			//System.out.println("alt "+alt+": "+altLook[alt].toString(this));
+		}
+
+		// compare alt i with alt j for disjointness
+		boolean decisionIsLL_1 = true;
+outer:
+		for (int i = 1; i <= numAlts; i++) {
+			for (int j = i+1; j <= numAlts; j++) {
+				/*
+				System.out.println("compare "+i+", "+j+": "+
+								   altLook[i].toString(this)+" with "+
+								   altLook[j].toString(this));
+				*/
+				LookaheadSet collision = altLook[i].intersection(altLook[j]);
+				if ( !collision.isNil() ) {
+					//System.out.println("collision (non-LL(1)): "+collision.toString(this));
+					decisionIsLL_1 = false;
+					break outer;
+				}
+			}
+		}
+
+		boolean foundConfoundingPredicate =
+			ll1Analyzer.detectConfoundingPredicates(decisionStartState);
+		if ( decisionIsLL_1 && !foundConfoundingPredicate ) {
+			// build an LL(1) optimized DFA with edge for each altLook[i]
+			if ( NFAToDFAConverter.debug ) {
+				System.out.println("decision "+decision+" is simple LL(1)");
+			}
+			DFA lookaheadDFA = new LL1DFA(decision, decisionStartState, altLook);
+			setLookaheadDFA(decision, lookaheadDFA);
+			updateLineColumnToLookaheadDFAMap(lookaheadDFA);
+			return lookaheadDFA;
+		}
+
+		// not LL(1) but perhaps we can solve with simplified predicate search
+		// even if k=1 set manually, only resolve here if we have preds; i.e.,
+		// don't resolve etc...
+
+		/*
+		SemanticContext visiblePredicates =
+			ll1Analyzer.getPredicates(decisionStartState);
+		boolean foundConfoundingPredicate =
+			ll1Analyzer.detectConfoundingPredicates(decisionStartState);
+			*/
+
+		// exit if not forced k=1 or we found a predicate situation we
+		// can't handle: predicates in rules invoked from this decision.
+		if ( getUserMaxLookahead(decision)!=1 || // not manually set to k=1
+			 !getAutoBacktrackMode(decision) ||
+			 foundConfoundingPredicate )
+		{
+			//System.out.println("trying LL(*)");
+			return null;
+		}
+
+		List<IntervalSet> edges = new ArrayList<IntervalSet>();
+		for (int i = 1; i < altLook.length; i++) {
+			LookaheadSet s = altLook[i];
+			edges.add(s.tokenTypeSet);
+		}
+		List<IntervalSet> disjoint = makeEdgeSetsDisjoint(edges);
+		//System.out.println("disjoint="+disjoint);
+
+		MultiMap<IntervalSet, Integer> edgeMap = new MultiMap<IntervalSet, Integer>();
+		for (int i = 0; i < disjoint.size(); i++) {
+			IntervalSet ds = disjoint.get(i);
+			for (int alt = 1; alt < altLook.length; alt++) {
+				LookaheadSet look = altLook[alt];
+				if ( !ds.and(look.tokenTypeSet).isNil() ) {
+					edgeMap.map(ds, alt);
+				}
+			}
+		}
+		//System.out.println("edge map: "+edgeMap);
+
+		// TODO: how do we know we covered stuff?
+
+		// build an LL(1) optimized DFA with edge for each altLook[i]
+		DFA lookaheadDFA = new LL1DFA(decision, decisionStartState, edgeMap);
+		setLookaheadDFA(decision, lookaheadDFA);
+
+		// create map from line:col to decision DFA (for ANTLRWorks)
+		updateLineColumnToLookaheadDFAMap(lookaheadDFA);
+
+		return lookaheadDFA;
+	}
+
+	private void updateLineColumnToLookaheadDFAMap(DFA lookaheadDFA) {
+		GrammarAST decisionAST = nfa.grammar.getDecisionBlockAST(lookaheadDFA.decisionNumber);
+		int line = decisionAST.getLine();
+		int col = decisionAST.getCharPositionInLine();
+		lineColumnToLookaheadDFAMap.put(new StringBuffer().append(line).append(":")
+										.append(col).toString(), lookaheadDFA);
+	}
+
+	protected List<IntervalSet> makeEdgeSetsDisjoint(List<IntervalSet> edges) {
+		OrderedHashSet<IntervalSet> disjointSets = new OrderedHashSet<IntervalSet>();
+		// walk each incoming edge label/set and add to disjoint set
+		int numEdges = edges.size();
+		for (int e = 0; e < numEdges; e++) {
+			IntervalSet t = edges.get(e);
+			if ( disjointSets.contains(t) ) { // exact set present
+				continue;
+			}
+
+			// compare t with set i for disjointness
+			IntervalSet remainder = t; // remainder starts out as whole set to add
+			int numDisjointElements = disjointSets.size();
+			for (int i = 0; i < numDisjointElements; i++) {
+				IntervalSet s_i = disjointSets.get(i);
+
+				if ( t.and(s_i).isNil() ) { // nothing in common
+					continue;
+				}
+				//System.out.println(label+" collides with "+rl);
+
+				// For any (s_i, t) with s_i&t!=nil replace with (s_i-t, s_i&t)
+				// (ignoring s_i-t if nil; don't put in list)
+
+				// Replace existing s_i with intersection since we
+				// know that will always be a non nil character class
+				IntervalSet intersection = s_i.and(t);
+				disjointSets.set(i, intersection);
+
+				// Compute s_i-t to see what is in current set and not in incoming
+				IntervalSet existingMinusNewElements = s_i.subtract(t);
+				//System.out.println(s_i+"-"+t+"="+existingMinusNewElements);
+				if ( !existingMinusNewElements.isNil() ) {
+					// found a new character class, add to the end (doesn't affect
+					// outer loop duration due to n computation a priori.
+					disjointSets.add(existingMinusNewElements);
+				}
+
+				// anything left to add to the reachableLabels?
+				remainder = t.subtract(s_i);
+				if ( remainder.isNil() ) {
+					break; // nothing left to add to set.  done!
+				}
+
+				t = remainder;
+			}
+			if ( !remainder.isNil() ) {
+				disjointSets.add(remainder);
+			}
+		}
+		return disjointSets.elements();
+	}
+
+	public DFA createLookaheadDFA(int decision, boolean wackTempStructures) {
+		Decision d = getDecision(decision);
+		String enclosingRule = d.startState.enclosingRule.name;
+		Rule r = d.startState.enclosingRule;
+
+		//System.out.println("createLookaheadDFA(): "+enclosingRule+" dec "+decision+"; synprednames prev used "+synPredNamesUsedInDFA);
+		NFAState decisionStartState = getDecisionNFAStartState(decision);
+		long startDFA=0,stopDFA;
+		if ( composite.watchNFAConversion ) {
+			System.out.println("--------------------\nbuilding lookahead DFA (d="
+							   +decisionStartState.getDecisionNumber()+") for "+
+							   decisionStartState.getDescription());
+			startDFA = System.currentTimeMillis();
+		}
+
+		DFA lookaheadDFA = new DFA(decision, decisionStartState);
+		// Retry to create a simpler DFA if analysis failed (non-LL(*),
+		// recursion overflow, or time out).
+		boolean failed =
+			lookaheadDFA.probe.isNonLLStarDecision() ||
+			lookaheadDFA.probe.analysisOverflowed();
+		if ( failed && lookaheadDFA.okToRetryDFAWithK1() ) {
+			// set k=1 option and try again.
+			// First, clean up tracking stuff
+			decisionsWhoseDFAsUsesSynPreds.remove(lookaheadDFA);
+			// TODO: clean up synPredNamesUsedInDFA also (harder)
+			d.blockAST.setBlockOption(this, "k", Utils.integer(1));
+			if ( composite.watchNFAConversion ) {
+				System.out.print("trying decision "+decision+
+								 " again with k=1; reason: "+
+								 lookaheadDFA.getReasonForFailure());
+			}
+			lookaheadDFA = null; // make sure other memory is "free" before redoing
+			lookaheadDFA = new DFA(decision, decisionStartState);
+		}
+
+		setLookaheadDFA(decision, lookaheadDFA);
+
+		if ( wackTempStructures ) {
+			for (DFAState s : lookaheadDFA.getUniqueStates().values()) {
+				s.reset();
+			}
+		}
+
+		// create map from line:col to decision DFA (for ANTLRWorks)
+		updateLineColumnToLookaheadDFAMap(lookaheadDFA);
+
+		if ( composite.watchNFAConversion ) {
+			stopDFA = System.currentTimeMillis();
+			System.out.println("cost: "+lookaheadDFA.getNumberOfStates()+
+							   " states, "+(int)(stopDFA-startDFA)+" ms");
+		}
+		//System.out.println("after create DFA; synPredNamesUsedInDFA="+synPredNamesUsedInDFA);
+		return lookaheadDFA;
+	}
+
+	/** Terminate DFA creation (grammar analysis).
+	 */
+	public void externallyAbortNFAToDFAConversion() {
+		externalAnalysisAbort = true;
+	}
+
+	public boolean NFAToDFAConversionExternallyAborted() {
+		return externalAnalysisAbort;
+	}
+
+	/** Return a new unique integer in the token type space */
+	public int getNewTokenType() {
+		composite.maxTokenType++;
+		return composite.maxTokenType;
+	}
+
+	/** Define a token at a particular token type value.  Blast an
+	 *  old value with a new one.  This is called normal grammar processsing
+	 *  and during import vocab operations to set tokens with specific values.
+	 */
+	public void defineToken(String text, int tokenType) {
+		//System.out.println("defineToken("+text+", "+tokenType+")");
+		if ( composite.tokenIDToTypeMap.get(text)!=null ) {
+			// already defined?  Must be predefined one like EOF;
+			// do nothing
+			return;
+		}
+		// the index in the typeToTokenList table is actually shifted to
+		// hold faux labels as you cannot have negative indices.
+		if ( text.charAt(0)=='\'' ) {
+			composite.stringLiteralToTypeMap.put(text, Utils.integer(tokenType));
+			// track in reverse index too
+			if ( tokenType>=composite.typeToStringLiteralList.size() ) {
+				composite.typeToStringLiteralList.setSize(tokenType+1);
+			}
+			composite.typeToStringLiteralList.set(tokenType, text);
+		}
+		else { // must be a label like ID
+			composite.tokenIDToTypeMap.put(text, Utils.integer(tokenType));
+		}
+		int index = Label.NUM_FAUX_LABELS+tokenType-1;
+		//System.out.println("defining "+name+" token "+text+" at type="+tokenType+", index="+index);
+		composite.maxTokenType = Math.max(composite.maxTokenType, tokenType);
+		if ( index>=composite.typeToTokenList.size() ) {
+			composite.typeToTokenList.setSize(index+1);
+		}
+		String prevToken = composite.typeToTokenList.get(index);
+		if ( prevToken==null || prevToken.charAt(0)=='\'' ) {
+			// only record if nothing there before or if thing before was a literal
+			composite.typeToTokenList.set(index, text);
+		}
+	}
+
+	/** Define a new rule.  A new rule index is created by incrementing
+	 *  ruleIndex.
+	 */
+	public void defineRule(Token ruleToken,
+						   String modifier,
+						   Map<String, Object> options,
+						   GrammarAST tree,
+						   GrammarAST argActionAST,
+						   int numAlts)
+	{
+		String ruleName = ruleToken.getText();
+		if ( getLocallyDefinedRule(ruleName)!=null ) {
+			ErrorManager.grammarError(ErrorManager.MSG_RULE_REDEFINITION,
+									  this, ruleToken, ruleName);
+			return;
+		}
+
+		if ( (type==Grammar.PARSER||type==Grammar.TREE_PARSER) &&
+			 Character.isUpperCase(ruleName.charAt(0)) )
+		{
+			ErrorManager.grammarError(ErrorManager.MSG_LEXER_RULES_NOT_ALLOWED,
+									  this, ruleToken, ruleName);
+			return;
+		}
+
+		Rule r = new Rule(this, ruleName, composite.ruleIndex, numAlts);
+		/*
+		System.out.println("defineRule("+ruleName+",modifier="+modifier+
+						   "): index="+r.index+", nalts="+numAlts);
+		*/
+		r.modifier = modifier;
+		nameToRuleMap.put(ruleName, r);
+		setRuleAST(ruleName, tree);
+		r.setOptions(options, ruleToken);
+		r.argActionAST = argActionAST;
+		composite.ruleIndexToRuleList.setSize(composite.ruleIndex+1);
+		composite.ruleIndexToRuleList.set(composite.ruleIndex, r);
+		composite.ruleIndex++;
+		if ( ruleName.startsWith(SYNPRED_RULE_PREFIX) ) {
+			r.isSynPred = true;
+		}
+	}
+
+	/** Define a new predicate and get back its name for use in building
+	 *  a semantic predicate reference to the syn pred.
+	 */
+	public String defineSyntacticPredicate(GrammarAST blockAST,
+										   String currentRuleName)
+	{
+		if ( nameToSynpredASTMap==null ) {
+			nameToSynpredASTMap = new LinkedHashMap<String, GrammarAST>();
+		}
+		String predName =
+			SYNPRED_RULE_PREFIX+(nameToSynpredASTMap.size() + 1)+"_"+name;
+		blockAST.setTreeEnclosingRuleNameDeeply(predName);
+		nameToSynpredASTMap.put(predName, blockAST);
+		return predName;
+	}
+
+	public LinkedHashMap<String, GrammarAST> getSyntacticPredicates() {
+		return nameToSynpredASTMap;
+	}
+
+	public GrammarAST getSyntacticPredicate(String name) {
+		if ( nameToSynpredASTMap==null ) {
+			return null;
+		}
+		return nameToSynpredASTMap.get(name);
+	}
+
+	public void synPredUsedInDFA(DFA dfa, SemanticContext semCtx) {
+		decisionsWhoseDFAsUsesSynPreds.add(dfa);
+		semCtx.trackUseOfSyntacticPredicates(this); // walk ctx looking for preds
+	}
+
+	/*
+	public Set<Rule> getRuleNamesVisitedDuringLOOK() {
+		return rulesSensitiveToOtherRules;
+	}
+	*/
+
+	/** Given @scope::name {action} define it for this grammar.  Later,
+	 *  the code generator will ask for the actions table.  For composite
+     *  grammars, make sure header action propogates down to all delegates.
+	 */
+	public void defineNamedAction(GrammarAST ampersandAST,
+								  String scope,
+								  GrammarAST nameAST,
+								  GrammarAST actionAST)
+	{
+		if ( scope==null ) {
+			scope = getDefaultActionScope(type);
+		}
+		//System.out.println("Grammar "+name+" define @"+scope+"::"+nameAST.getText()+"{"+actionAST.getText()+"}");
+		String actionName = nameAST.getText();
+		Map<String, Object> scopeActions = getActions().get(scope);
+		if ( scopeActions==null ) {
+			scopeActions = new HashMap<String, Object>();
+			getActions().put(scope, scopeActions);
+		}
+		Object a = scopeActions.get(actionName);
+		if ( a!=null ) {
+			ErrorManager.grammarError(
+				ErrorManager.MSG_ACTION_REDEFINITION,this,
+				nameAST.getToken(),nameAST.getText());
+		}
+		else {
+			scopeActions.put(actionName,actionAST);
+		}
+        // propogate header (regardless of scope (lexer, parser, ...) ?
+        if ( this==composite.getRootGrammar() && actionName.equals("header") ) {
+            List<Grammar> allgrammars = composite.getRootGrammar().getDelegates();
+            for (Grammar delegate : allgrammars) {
+				if ( target.isValidActionScope(delegate.type, scope) ) {
+					//System.out.println("propogate to "+delegate.name);
+                	delegate.defineNamedAction(ampersandAST, scope, nameAST, actionAST);
+				}
+            }
+        }
+    }
+
+    public void setSynPredGateIfNotAlready(ST gateST) {
+        String scope = getDefaultActionScope(type);
+        Map<String, Object> actionsForGrammarScope = getActions().get(scope);
+        // if no synpredgate action set by user then set
+        if ( (actionsForGrammarScope==null ||
+             !actionsForGrammarScope.containsKey(Grammar.SYNPREDGATE_ACTION_NAME)) )
+        {
+            if ( actionsForGrammarScope==null ) {
+                actionsForGrammarScope=new HashMap<String, Object>();
+                getActions().put(scope, actionsForGrammarScope);
+            }
+            actionsForGrammarScope.put(Grammar.SYNPREDGATE_ACTION_NAME,
+                                       gateST);
+        }
+    }
+
+	public Map<String, Map<String, Object>> getActions() {
+		return actions;
+	}
+
+	/** Given a grammar type, what should be the default action scope?
+	 *  If I say @members in a COMBINED grammar, for example, the
+	 *  default scope should be "parser".
+	 */
+	public String getDefaultActionScope(int grammarType) {
+		switch (grammarType) {
+			case Grammar.LEXER :
+				return "lexer";
+			case Grammar.PARSER :
+			case Grammar.COMBINED :
+				return "parser";
+			case Grammar.TREE_PARSER :
+				return "treeparser";
+		}
+		return null;
+	}
+
+	public void defineLexerRuleFoundInParser(Token ruleToken,
+											 GrammarAST ruleAST)
+	{
+//		System.out.println("rule tree is:\n"+ruleAST.toStringTree());
+		/*
+		String ruleText = tokenBuffer.toOriginalString(ruleAST.ruleStartTokenIndex,
+											   ruleAST.ruleStopTokenIndex);
+		*/
+		// first, create the text of the rule
+		StringBuilder buf = new StringBuilder();
+		buf.append("// $ANTLR src \"");
+		buf.append(getFileName());
+		buf.append("\" ");
+		buf.append(ruleAST.getLine());
+		buf.append("\n");
+		for (int i=ruleAST.getTokenStartIndex();
+			 i<=ruleAST.getTokenStopIndex() && i<tokenBuffer.size();
+			 i++)
+		{
+			CommonToken t = (CommonToken)tokenBuffer.get(i);
+			// undo the text deletions done by the lexer (ugh)
+			if ( t.getType()==ANTLRParser.BLOCK ) {
+				buf.append("(");
+			}
+			else if ( t.getType()==ANTLRParser.ACTION ) {
+				buf.append("{");
+				buf.append(t.getText());
+				buf.append("}");
+			}
+			else if ( t.getType()==ANTLRParser.SEMPRED ||
+					  t.getType()==ANTLRParser.SYN_SEMPRED ||
+					  t.getType()==ANTLRParser.GATED_SEMPRED ||
+					  t.getType()==ANTLRParser.BACKTRACK_SEMPRED )
+			{
+				buf.append("{");
+				buf.append(t.getText());
+				buf.append("}?");
+			}
+			else if ( t.getType()==ANTLRParser.ARG_ACTION ) {
+				buf.append("[");
+				buf.append(t.getText());
+				buf.append("]");
+			}
+			else {
+				buf.append(t.getText());
+			}
+		}
+		String ruleText = buf.toString();
+		//System.out.println("[["+ruleText+"]]");
+		// now put the rule into the lexer grammar template
+		if ( getGrammarIsRoot() ) { // don't build lexers for delegates
+			lexerGrammarST.add("rules", ruleText);
+		}
+		// track this lexer rule's name
+		composite.lexerRules.add(ruleToken.getText());
+	}
+
+	/** If someone does PLUS='+' in the parser, must make sure we get
+	 *  "PLUS : '+' ;" in lexer not "T73 : '+';"
+	 */
+	public void defineLexerRuleForAliasedStringLiteral(String tokenID,
+													   String literal,
+													   int tokenType)
+	{
+		if ( getGrammarIsRoot() ) { // don't build lexers for delegates
+			//System.out.println("defineLexerRuleForAliasedStringLiteral: "+literal+" "+tokenType);
+			lexerGrammarST.addAggr("literals.{ruleName,type,literal}",
+										tokenID,
+										Utils.integer(tokenType),
+										literal);
+		}
+		// track this lexer rule's name
+		composite.lexerRules.add(tokenID);
+	}
+
+	public void defineLexerRuleForStringLiteral(String literal, int tokenType) {
+		//System.out.println("defineLexerRuleForStringLiteral: "+literal+" "+tokenType);
+		// compute new token name like T237 and define it as having tokenType
+		String tokenID = computeTokenNameFromLiteral(tokenType,literal);
+		defineToken(tokenID, tokenType);
+		// tell implicit lexer to define a rule to match the literal
+		if ( getGrammarIsRoot() ) { // don't build lexers for delegates
+			lexerGrammarST.addAggr("literals.{ruleName,type,literal}",
+										tokenID,
+										Utils.integer(tokenType),
+										literal);
+		}
+	}
+
+	public Rule getLocallyDefinedRule(String ruleName) {
+		Rule r = nameToRuleMap.get(ruleName);
+		return r;
+	}
+
+	public Rule getRule(String ruleName) {
+		Rule r = composite.getRule(ruleName);
+		/*
+		if ( r!=null && r.grammar != this ) {
+			System.out.println(name+".getRule("+ruleName+")="+r);
+		}
+		*/
+		return r;
+	}
+
+	public Rule getRule(String scopeName, String ruleName) {
+		if ( scopeName!=null ) { // scope override
+			Grammar scope = composite.getGrammar(scopeName);
+			if ( scope==null ) {
+				return null;
+			}
+			return scope.getLocallyDefinedRule(ruleName);
+		}
+		return getRule(ruleName);
+	}
+
+	public int getRuleIndex(String scopeName, String ruleName) {
+		Rule r = getRule(scopeName, ruleName);
+		if ( r!=null ) {
+			return r.index;
+		}
+		return INVALID_RULE_INDEX;
+	}
+
+	public int getRuleIndex(String ruleName) {
+		return getRuleIndex(null, ruleName);
+	}
+
+	public String getRuleName(int ruleIndex) {
+		Rule r = composite.ruleIndexToRuleList.get(ruleIndex);
+		if ( r!=null ) {
+			return r.name;
+		}
+		return null;
+	}
+
+	/** Should codegen.g gen rule for ruleName?
+	 * 	If synpred, only gen if used in a DFA.
+	 *  If regular rule, only gen if not overridden in delegator
+	 *  Always gen Tokens rule though.
+	 */
+	public boolean generateMethodForRule(String ruleName) {
+		if ( ruleName.equals(ARTIFICIAL_TOKENS_RULENAME) ) {
+			// always generate Tokens rule to satisfy lexer interface
+			// but it may have no alternatives.
+			return true;
+		}
+		if ( overriddenRules.contains(ruleName) ) {
+			// don't generate any overridden rules
+			return false;
+		}
+		// generate if non-synpred or synpred used in a DFA
+		Rule r = getLocallyDefinedRule(ruleName);
+		return !r.isSynPred ||
+			   (r.isSynPred&&synPredNamesUsedInDFA.contains(ruleName));
+	}
+
+	public AttributeScope defineGlobalScope(String name, Token scopeAction) {
+		AttributeScope scope = new AttributeScope(this, name, scopeAction);
+		scopes.put(name,scope);
+		return scope;
+	}
+
+	public AttributeScope createReturnScope(String ruleName, Token retAction) {
+		AttributeScope scope = new AttributeScope(this, ruleName, retAction);
+		scope.isReturnScope = true;
+		return scope;
+	}
+
+	public AttributeScope createRuleScope(String ruleName, Token scopeAction) {
+		AttributeScope scope = new AttributeScope(this, ruleName, scopeAction);
+		scope.isDynamicRuleScope = true;
+		return scope;
+	}
+
+	public AttributeScope createParameterScope(String ruleName, Token argAction) {
+		AttributeScope scope = new AttributeScope(this, ruleName, argAction);
+		scope.isParameterScope = true;
+		return scope;
+	}
+
+	/** Get a global scope */
+	public AttributeScope getGlobalScope(String name) {
+		return scopes.get(name);
+	}
+
+	public Map<String, AttributeScope> getGlobalScopes() {
+		return scopes;
+	}
+
+	/** Define a label defined in a rule r; check the validity then ask the
+	 *  Rule object to actually define it.
+	 */
+	protected void defineLabel(Rule r, Token label, GrammarAST element, int type) {
+		boolean err = nameSpaceChecker.checkForLabelTypeMismatch(r, label, type);
+		if ( err ) {
+			return;
+		}
+		r.defineLabel(label, element, type);
+	}
+
+	public void defineTokenRefLabel(String ruleName,
+									Token label,
+									GrammarAST tokenRef)
+	{
+		Rule r = getLocallyDefinedRule(ruleName);
+		if ( r!=null ) {
+			if ( type==LEXER &&
+				 (tokenRef.getType()==ANTLRParser.CHAR_LITERAL||
+				  tokenRef.getType()==ANTLRParser.BLOCK||
+				  tokenRef.getType()==ANTLRParser.NOT||
+				  tokenRef.getType()==ANTLRParser.CHAR_RANGE||
+				  tokenRef.getType()==ANTLRParser.WILDCARD))
+			{
+				defineLabel(r, label, tokenRef, CHAR_LABEL);
+			}
+            else {
+				defineLabel(r, label, tokenRef, TOKEN_LABEL);
+			}
+		}
+	}
+
+    public void defineWildcardTreeLabel(String ruleName,
+                                           Token label,
+                                           GrammarAST tokenRef)
+    {
+        Rule r = getLocallyDefinedRule(ruleName);
+        if ( r!=null ) {
+            defineLabel(r, label, tokenRef, WILDCARD_TREE_LABEL);
+        }
+    }
+
+    public void defineWildcardTreeListLabel(String ruleName,
+                                           Token label,
+                                           GrammarAST tokenRef)
+    {
+        Rule r = getLocallyDefinedRule(ruleName);
+        if ( r!=null ) {
+            defineLabel(r, label, tokenRef, WILDCARD_TREE_LIST_LABEL);
+        }
+    }
+
+    public void defineRuleRefLabel(String ruleName,
+								   Token label,
+								   GrammarAST ruleRef)
+	{
+		Rule r = getLocallyDefinedRule(ruleName);
+		if ( r!=null ) {
+			defineLabel(r, label, ruleRef, RULE_LABEL);
+		}
+	}
+
+	public void defineTokenListLabel(String ruleName,
+									 Token label,
+									 GrammarAST element)
+	{
+		Rule r = getLocallyDefinedRule(ruleName);
+		if ( r!=null ) {
+			defineLabel(r, label, element, TOKEN_LIST_LABEL);
+		}
+	}
+
+	public void defineRuleListLabel(String ruleName,
+									Token label,
+									GrammarAST element)
+	{
+		Rule r = getLocallyDefinedRule(ruleName);
+		if ( r!=null ) {
+			if ( !r.getHasMultipleReturnValues() ) {
+				ErrorManager.grammarError(
+					ErrorManager.MSG_LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT,this,
+					label,label.getText());
+			}
+			defineLabel(r, label, element, RULE_LIST_LABEL);
+		}
+	}
+
+	/** Given a set of all rewrite elements on right of -&gt;, filter for
+	 *  label types such as Grammar.TOKEN_LABEL, Grammar.TOKEN_LIST_LABEL, ...
+	 *  Return a displayable token type name computed from the GrammarAST.
+	 */
+	public Set<String> getLabels(Set<GrammarAST> rewriteElements, int labelType) {
+		Set<String> labels = new HashSet<String>();
+		for (GrammarAST el : rewriteElements) {
+			if ( el.getType()==ANTLRParser.LABEL ) {
+				String labelName = el.getText();
+				Rule enclosingRule = getLocallyDefinedRule(el.enclosingRuleName);
+				if ( enclosingRule==null ) continue;
+				LabelElementPair pair = enclosingRule.getLabel(labelName);
+                /*
+                // if tree grammar and we have a wildcard, only notice it
+                // when looking for rule labels not token label. x=. should
+                // look like a rule ref since could be subtree.
+                if ( type==TREE_PARSER && pair!=null &&
+                     pair.elementRef.getType()==ANTLRParser.WILDCARD )
+                {
+                    if ( labelType==WILDCARD_TREE_LABEL ) {
+                        labels.add(labelName);
+                        continue;
+                    }
+                    else continue;
+                }
+                 */
+                // if valid label and type is what we're looking for
+				// and not ref to old value val $rule, add to list
+				if ( pair!=null && pair.type==labelType &&
+					 !labelName.equals(el.enclosingRuleName) )
+				{
+					labels.add(labelName);
+				}
+			}
+		}
+		return labels;
+	}
+
+	/** Before generating code, we examine all actions that can have
+	 *  $x.y and $y stuff in them because some code generation depends on
+	 *  Rule.referencedPredefinedRuleAttributes.  I need to remove unused
+	 *  rule labels for example.
+	 */
+	protected void examineAllExecutableActions() {
+		Collection<Rule> rules = getRules();
+		for (Rule r : rules) {
+			// walk all actions within the rule elements, args, and exceptions
+			List<GrammarAST> actions = r.getInlineActions();
+			for (int i = 0; i < actions.size(); i++) {
+				GrammarAST actionAST = actions.get(i);
+				ActionAnalysis sniffer =
+					new ActionAnalysis(this, r.name, actionAST);
+				sniffer.analyze();
+			}
+			// walk any named actions like @init, @after
+			Collection<? extends Object> namedActions = r.getActions().values();
+			for (Object namedAction : namedActions) {
+				GrammarAST actionAST = (GrammarAST)namedAction;
+				ActionAnalysis sniffer =
+					new ActionAnalysis(this, r.name, actionAST);
+				sniffer.analyze();
+			}
+		}
+	}
+
+	/** Remove all labels on rule refs whose target rules have no return value.
+	 *  Do this for all rules in grammar.
+	 */
+	public void checkAllRulesForUselessLabels() {
+		if ( type==LEXER ) {
+			return;
+		}
+		Set<String> rules = nameToRuleMap.keySet();
+		for (String ruleName : rules) {
+			Rule r = getRule(ruleName);
+			removeUselessLabels(r.getRuleLabels());
+			removeUselessLabels(r.getRuleListLabels());
+		}
+	}
+
+	/** A label on a rule is useless if the rule has no return value, no
+	 *  tree or template output, and it is not referenced in an action.
+	 */
+	protected void removeUselessLabels(Map<String, LabelElementPair> ruleToElementLabelPairMap) {
+		if ( ruleToElementLabelPairMap==null ) {
+			return;
+		}
+		Collection<LabelElementPair> labels = ruleToElementLabelPairMap.values();
+		List<String> kill = new ArrayList<String>();
+		for (LabelElementPair pair : labels) {
+			Rule refdRule = getRule(pair.elementRef.getText());
+			if ( refdRule!=null && !refdRule.getHasReturnValue() && !pair.actionReferencesLabel ) {
+				//System.out.println(pair.label.getText()+" is useless");
+				kill.add(pair.label.getText());
+			}
+		}
+		for (int i = 0; i < kill.size(); i++) {
+			String labelToKill = kill.get(i);
+			// System.out.println("kill "+labelToKill);
+			ruleToElementLabelPairMap.remove(labelToKill);
+		}
+	}
+
+	/** Track a rule reference within an outermost alt of a rule.  Used
+	 *  at the moment to decide if $ruleref refers to a unique rule ref in
+	 *  the alt.  Rewrite rules force tracking of all rule AST results.
+	 *
+	 *  This data is also used to verify that all rules have been defined.
+	 */
+	public void altReferencesRule(String enclosingRuleName,
+								  GrammarAST refScopeAST,
+								  GrammarAST refAST,
+								  int outerAltNum)
+	{
+		/* Do nothing for now; not sure need; track S.x as x
+		String scope = null;
+		Grammar scopeG = null;
+		if ( refScopeAST!=null ) {
+			if ( !scopedRuleRefs.contains(refScopeAST) ) {
+				scopedRuleRefs.add(refScopeAST);
+			}
+			scope = refScopeAST.getText();
+		}
+		*/
+		Rule r = getRule(enclosingRuleName);
+		if ( r==null ) {
+			return; // no error here; see NameSpaceChecker
+		}
+		r.trackRuleReferenceInAlt(refAST, outerAltNum);
+		Token refToken = refAST.getToken();
+		if ( !ruleRefs.contains(refAST) ) {
+			ruleRefs.add(refAST);
+		}
+	}
+
+	/** Track a token reference within an outermost alt of a rule.  Used
+	 *  to decide if $tokenref refers to a unique token ref in
+	 *  the alt. Does not track literals!
+	 *
+	 *  Rewrite rules force tracking of all tokens.
+	 */
+	public void altReferencesTokenID(String ruleName, GrammarAST refAST, int outerAltNum) {
+		Rule r = getLocallyDefinedRule(ruleName);
+		if ( r==null ) {
+			return;
+		}
+		r.trackTokenReferenceInAlt(refAST, outerAltNum);
+		if ( !tokenIDRefs.contains(refAST.getToken()) ) {
+			tokenIDRefs.add(refAST.getToken());
+		}
+	}
+
+	/** To yield smaller, more readable code, track which rules have their
+	 *  predefined attributes accessed.  If the rule has no user-defined
+	 *  return values, then don't generate the return value scope classes
+	 *  etc...  Make the rule have void return value.  Don't track for lexer
+	 *  rules.
+	 */
+	public void referenceRuleLabelPredefinedAttribute(String ruleName) {
+		Rule r = getRule(ruleName);
+		if ( r!=null && type!=LEXER ) {
+			// indicate that an action ref'd an attr unless it's in a lexer
+			// so that $ID.text refs don't force lexer rules to define
+			// return values...Token objects are created by the caller instead.
+			r.referencedPredefinedRuleAttributes = true;
+		}
+	}
+
+	public List<? extends Collection<? extends Rule>> checkAllRulesForLeftRecursion() {
+		return sanity.checkAllRulesForLeftRecursion();
+	}
+
+	/** Return a list of left-recursive rules; no analysis can be done
+	 *  successfully on these.  Useful to skip these rules then and also
+	 *  for ANTLRWorks to highlight them.
+	 */
+	public Set<Rule> getLeftRecursiveRules() {
+		if ( nfa==null ) {
+			buildNFA();
+		}
+		if ( leftRecursiveRules!=null ) {
+			return leftRecursiveRules;
+		}
+		sanity.checkAllRulesForLeftRecursion();
+		return leftRecursiveRules;
+	}
+
+	public void checkRuleReference(GrammarAST scopeAST,
+								   GrammarAST refAST,
+								   GrammarAST argsAST,
+								   String currentRuleName)
+	{
+		sanity.checkRuleReference(scopeAST, refAST, argsAST, currentRuleName);
+	}
+
+	/** Rules like "a : ;" and "a : {...} ;" should not generate
+	 *  try/catch blocks for RecognitionException.  To detect this
+	 *  it's probably ok to just look for any reference to an atom
+	 *  that can match some input.  W/o that, the rule is unlikey to have
+	 *  any else.
+	 */
+	public boolean isEmptyRule(GrammarAST block) {
+		BitSet nonEmptyTerminals = new BitSet();
+		nonEmptyTerminals.set(ANTLRParser.TOKEN_REF);
+		nonEmptyTerminals.set(ANTLRParser.STRING_LITERAL);
+		nonEmptyTerminals.set(ANTLRParser.CHAR_LITERAL);
+		nonEmptyTerminals.set(ANTLRParser.WILDCARD);
+		nonEmptyTerminals.set(ANTLRParser.RULE_REF);
+		return findFirstTypeOutsideRewrite(block, nonEmptyTerminals) == null;
+	}
+
+	protected GrammarAST findFirstTypeOutsideRewrite(GrammarAST block, BitSet types) {
+		ArrayList<GrammarAST> worklist = new ArrayList<GrammarAST>();
+		worklist.add(block);
+		while (!worklist.isEmpty()) {
+			GrammarAST current = worklist.remove(worklist.size() - 1);
+			if (current.getType() == ANTLRParser.REWRITE) {
+				continue;
+			}
+
+			if (current.getType() >= 0 && types.get(current.getType())) {
+				return current;
+			}
+
+			worklist.addAll(Arrays.asList(current.getChildrenAsArray()));
+		}
+
+		return null;
+	}
+
+	public boolean isAtomTokenType(int ttype) {
+		return ttype == ANTLRParser.WILDCARD||
+			   ttype == ANTLRParser.CHAR_LITERAL||
+			   ttype == ANTLRParser.CHAR_RANGE||
+			   ttype == ANTLRParser.STRING_LITERAL||
+			   ttype == ANTLRParser.NOT||
+			   (type != LEXER && ttype == ANTLRParser.TOKEN_REF);
+	}
+
+	public int getTokenType(String tokenName) {
+		Integer I;
+		if ( tokenName.charAt(0)=='\'') {
+			I = composite.stringLiteralToTypeMap.get(tokenName);
+		}
+		else { // must be a label like ID
+			I = composite.tokenIDToTypeMap.get(tokenName);
+		}
+		int i = (I!=null)? I :Label.INVALID;
+		//System.out.println("grammar type "+type+" "+tokenName+"->"+i);
+		return i;
+	}
+
+	/** Get the list of tokens that are IDs like BLOCK and LPAREN */
+	public Set<String> getTokenIDs() {
+		return composite.tokenIDToTypeMap.keySet();
+	}
+
+	/** Return an ordered integer list of token types that have no
+	 *  corresponding token ID like INT or KEYWORD_BEGIN; for stuff
+	 *  like 'begin'.
+	 */
+	public Collection<Integer> getTokenTypesWithoutID() {
+		List<Integer> types = new ArrayList<Integer>();
+		for (int t =Label.MIN_TOKEN_TYPE; t<=getMaxTokenType(); t++) {
+			String name = getTokenDisplayName(t);
+			if ( name.charAt(0)=='\'' ) {
+				types.add(Utils.integer(t));
+			}
+		}
+		return types;
+	}
+
+	/** Get a list of all token IDs and literals that have an associated
+	 *  token type.
+	 */
+	public Set<String> getTokenDisplayNames() {
+		Set<String> names = new HashSet<String>();
+		for (int t =Label.MIN_TOKEN_TYPE; t <=getMaxTokenType(); t++) {
+			names.add(getTokenDisplayName(t));
+		}
+		return names;
+	}
+
+	/** Given a literal like (the 3 char sequence with single quotes) 'a',
+	 *  return the int value of 'a'. Convert escape sequences here also.
+	 *  ANTLR's antlr.g parser does not convert escape sequences.
+	 *
+	 *  11/26/2005: I changed literals to always be '...' even for strings.
+	 *  This routine still works though.
+	 */
+	public static int getCharValueFromGrammarCharLiteral(String literal) {
+		switch ( literal.length() ) {
+			case 3 :
+				// 'x'
+				return literal.charAt(1); // no escape char
+			case 4 :
+				// '\x'  (antlr lexer will catch invalid char)
+				if ( Character.isDigit(literal.charAt(2)) ) {
+					ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,
+									   "invalid char literal: "+literal);
+					return -1;
+				}
+				int escChar = literal.charAt(2);
+				int charVal = ANTLRLiteralEscapedCharValue[escChar];
+				if ( charVal==0 ) {
+					// Unnecessary escapes like '\{' should just yield {
+					return escChar;
+				}
+				return charVal;
+			case 8 :
+				// '\u1234'
+				String unicodeChars = literal.substring(3,literal.length()-1);
+				return Integer.parseInt(unicodeChars, 16);
+			default :
+				ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,
+								   "invalid char literal: "+literal);
+				return -1;
+		}
+	}
+
+	/** ANTLR does not convert escape sequences during the parse phase because
+	 *  it could not know how to print String/char literals back out when
+	 *  printing grammars etc...  Someone in China might use the real unicode
+	 *  char in a literal as it will display on their screen; when printing
+	 *  back out, I could not know whether to display or use a unicode escape.
+	 *
+	 *  This routine converts a string literal with possible escape sequences
+	 *  into a pure string of 16-bit char values.  Escapes and unicode \u0000
+	 *  specs are converted to pure chars.  return in a buffer; people may
+	 *  want to walk/manipulate further.
+	 *
+	 *  The NFA construction routine must know the actual char values.
+	 */
+	public static StringBuffer getUnescapedStringFromGrammarStringLiteral(String literal) {
+		//System.out.println("escape: ["+literal+"]");
+		StringBuffer buf = new StringBuffer();
+		int last = literal.length()-1; // skip quotes on outside
+		for (int i=1; i<last; i++) {
+			char c = literal.charAt(i);
+			if ( c=='\\' ) {
+				i++;
+				c = literal.charAt(i);
+				if ( Character.toUpperCase(c)=='U' ) {
+					// \u0000
+					i++;
+					String unicodeChars = literal.substring(i,i+4);
+					// parse the unicode 16 bit hex value
+					int val = Integer.parseInt(unicodeChars, 16);
+					i+=4-1; // loop will inc by 1; only jump 3 then
+					buf.append((char)val);
+				}
+				else if ( Character.isDigit(c) ) {
+					ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,
+									   "invalid char literal: "+literal);
+					buf.append("\\").append(c);
+				}
+				else {
+					buf.append((char)ANTLRLiteralEscapedCharValue[c]); // normal \x escape
+				}
+			}
+			else {
+				buf.append(c); // simple char x
+			}
+		}
+		//System.out.println("string: ["+buf.toString()+"]");
+		return buf;
+	}
+
+	/** Pull your token definitions from an existing grammar in memory.
+	 *  You must use Grammar() ctor then this method then setGrammarContent()
+	 *  to make this work.  This was useful primarily for testing and
+	 *  interpreting grammars until I added import grammar functionality.
+	 *  When you import a grammar you implicitly import its vocabulary as well
+	 *  and keep the same token type values.
+	 *
+	 *  Returns the max token type found.
+	 */
+	public int importTokenVocabulary(Grammar importFromGr) {
+		Set<String> importedTokenIDs = importFromGr.getTokenIDs();
+		for (String tokenID : importedTokenIDs) {
+			int tokenType = importFromGr.getTokenType(tokenID);
+			composite.maxTokenType = Math.max(composite.maxTokenType,tokenType);
+			if ( tokenType>=Label.MIN_TOKEN_TYPE ) {
+				//System.out.println("import token from grammar "+tokenID+"="+tokenType);
+				defineToken(tokenID, tokenType);
+			}
+		}
+		return composite.maxTokenType; // return max found
+	}
+
+	/** Import the rules/tokens of a delegate grammar. All delegate grammars are
+	 *  read during the ctor of first Grammar created.
+	 *
+	 *  Do not create NFA here because NFA construction needs to hook up with
+	 *  overridden rules in delegation root grammar.
+	 */
+	public void importGrammar(GrammarAST grammarNameAST, String label) {
+		String grammarName = grammarNameAST.getText();
+		//System.out.println("import "+gfile.getName());
+		String gname = grammarName + GRAMMAR_FILE_EXTENSION;
+		BufferedReader br = null;
+		try {
+			String fullName = tool.getLibraryFile(gname);
+			FileReader fr = new FileReader(fullName);
+			br = new BufferedReader(fr);
+			Grammar delegateGrammar;
+			delegateGrammar = new Grammar(tool, gname, composite);
+			delegateGrammar.label = label;
+
+			addDelegateGrammar(delegateGrammar);
+
+			delegateGrammar.parseAndBuildAST(br);
+			delegateGrammar.addRulesForSyntacticPredicates();
+			if ( !validImport(delegateGrammar) ) {
+				ErrorManager.grammarError(ErrorManager.MSG_INVALID_IMPORT,
+										  this,
+										  grammarNameAST.token,
+										  this,
+										  delegateGrammar);
+				return;
+			}
+			if ( this.type==COMBINED &&
+				 (delegateGrammar.name.equals(this.name+grammarTypeToFileNameSuffix[LEXER])||
+				  delegateGrammar.name.equals(this.name+grammarTypeToFileNameSuffix[PARSER])) )
+			{
+				ErrorManager.grammarError(ErrorManager.MSG_IMPORT_NAME_CLASH,
+										  this,
+										  grammarNameAST.token,
+										  this,
+										  delegateGrammar);
+				return;
+			}
+			if ( delegateGrammar.grammarTree!=null ) {
+				// we have a valid grammar
+				// deal with combined grammars
+				if ( delegateGrammar.type == LEXER && this.type == COMBINED ) {
+					// ooops, we wasted some effort; tell lexer to read it in
+					// later
+					lexerGrammarST.add("imports", grammarName);
+					// but, this parser grammar will need the vocab
+					// so add to composite anyway so we suck in the tokens later
+				}
+			}
+			//System.out.println("Got grammar:\n"+delegateGrammar);
+		}
+		catch (IOException ioe) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_OPEN_FILE,
+							   gname,
+							   ioe);
+		}
+		finally {
+			if ( br!=null ) {
+				try {
+					br.close();
+				}
+				catch (IOException ioe) {
+					ErrorManager.error(ErrorManager.MSG_CANNOT_CLOSE_FILE,
+									   gname,
+									   ioe);
+				}
+			}
+		}
+	}
+
+	/** add new delegate to composite tree */
+	protected void addDelegateGrammar(Grammar delegateGrammar) {
+		CompositeGrammarTree t = composite.delegateGrammarTreeRoot.findNode(this);
+		t.addChild(new CompositeGrammarTree(delegateGrammar));
+		// make sure new grammar shares this composite
+		delegateGrammar.composite = this.composite;
+	}
+
+	/** Load a vocab file &lt;vocabName&gt;.tokens and return max token type found. */
+	public int importTokenVocabulary(GrammarAST tokenVocabOptionAST,
+									 String vocabName)
+	{
+		if ( !getGrammarIsRoot() ) {
+			ErrorManager.grammarWarning(ErrorManager.MSG_TOKEN_VOCAB_IN_DELEGATE,
+										this,
+										tokenVocabOptionAST.token,
+										name);
+			return composite.maxTokenType;
+		}
+
+		File fullFile = tool.getImportedVocabFile(vocabName);
+		try {
+			FileReader fr = new FileReader(fullFile);
+			BufferedReader br = new BufferedReader(fr);
+			StreamTokenizer tokenizer = new StreamTokenizer(br);
+			tokenizer.parseNumbers();
+			tokenizer.wordChars('_', '_');
+			tokenizer.eolIsSignificant(true);
+			tokenizer.slashSlashComments(true);
+			tokenizer.slashStarComments(true);
+			tokenizer.ordinaryChar('=');
+			tokenizer.quoteChar('\'');
+			tokenizer.whitespaceChars(' ',' ');
+			tokenizer.whitespaceChars('\t','\t');
+			int lineNum = 1;
+			int token = tokenizer.nextToken();
+			while (token != StreamTokenizer.TT_EOF) {
+				String tokenID;
+				if ( token == StreamTokenizer.TT_WORD ) {
+					tokenID = tokenizer.sval;
+				}
+				else if ( token == '\'' ) {
+					tokenID = "'"+tokenizer.sval+"'";
+				}
+				else {
+					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
+									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
+									   Utils.integer(lineNum));
+					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {}
+					token = tokenizer.nextToken();
+					continue;
+				}
+				token = tokenizer.nextToken();
+				if ( token != '=' ) {
+					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
+									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
+									   Utils.integer(lineNum));
+					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {}
+					token = tokenizer.nextToken();
+					continue;
+				}
+				token = tokenizer.nextToken(); // skip '='
+				if ( token != StreamTokenizer.TT_NUMBER ) {
+					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
+									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
+									   Utils.integer(lineNum));
+					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {}
+					token = tokenizer.nextToken();
+					continue;
+				}
+				int tokenType = (int)tokenizer.nval;
+				token = tokenizer.nextToken();
+				//System.out.println("import "+tokenID+"="+tokenType);
+				composite.maxTokenType = Math.max(composite.maxTokenType,tokenType);
+				defineToken(tokenID, tokenType);
+				lineNum++;
+				if ( token != StreamTokenizer.TT_EOL ) {
+					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,
+									   vocabName+CodeGenerator.VOCAB_FILE_EXTENSION,
+									   Utils.integer(lineNum));
+					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {}
+					token = tokenizer.nextToken();
+					continue;
+				}
+				token = tokenizer.nextToken(); // skip newline
+			}
+			br.close();
+		}
+		catch (FileNotFoundException fnfe) {
+			ErrorManager.error(ErrorManager.MSG_CANNOT_FIND_TOKENS_FILE,
+							   fullFile);
+		}
+		catch (IOException ioe) {
+			ErrorManager.error(ErrorManager.MSG_ERROR_READING_TOKENS_FILE,
+							   fullFile,
+							   ioe);
+		}
+		catch (Exception e) {
+			ErrorManager.error(ErrorManager.MSG_ERROR_READING_TOKENS_FILE,
+							   fullFile,
+							   e);
+		}
+		return composite.maxTokenType;
+	}
+
+	/** Given a token type, get a meaningful name for it such as the ID
+	 *  or string literal.  If this is a lexer and the ttype is in the
+	 *  char vocabulary, compute an ANTLR-valid (possibly escaped) char literal.
+	 */
+	public String getTokenDisplayName(int ttype) {
+		String tokenName;
+		int index;
+		// inside any target's char range and is lexer grammar?
+		if ( this.type==LEXER &&
+			 ttype >= Label.MIN_CHAR_VALUE && ttype <= Label.MAX_CHAR_VALUE )
+		{
+			return getANTLRCharLiteralForChar(ttype);
+		}
+		// faux label?
+		else if ( ttype<0 ) {
+			tokenName = composite.typeToTokenList.get(Label.NUM_FAUX_LABELS+ttype);
+		}
+		else {
+			// compute index in typeToTokenList for ttype
+			index = ttype-1; // normalize to 0..n-1
+			index += Label.NUM_FAUX_LABELS;     // jump over faux tokens
+
+			if ( index<composite.typeToTokenList.size() ) {
+				tokenName = composite.typeToTokenList.get(index);
+				if ( tokenName!=null &&
+					 tokenName.startsWith(AUTO_GENERATED_TOKEN_NAME_PREFIX) )
+				{
+					tokenName = composite.typeToStringLiteralList.get(ttype);
+				}
+			}
+			else {
+				tokenName = String.valueOf(ttype);
+			}
+		}
+		//System.out.println("getTokenDisplayName ttype="+ttype+", index="+index+", name="+tokenName);
+		return tokenName;
+	}
+
+	/** Get the list of ANTLR String literals */
+	public Set<String> getStringLiterals() {
+		return composite.stringLiteralToTypeMap.keySet();
+	}
+
+	public String getGrammarTypeString() {
+		return grammarTypeToString[type];
+	}
+
+	public int getGrammarMaxLookahead() {
+		if ( global_k>=0 ) {
+			return global_k;
+		}
+		Object k = getOption("k");
+		if ( k==null ) {
+			global_k = 0;
+		}
+		else if (k instanceof Integer) {
+			Integer kI = (Integer)k;
+			global_k = kI;
+		}
+		else {
+			// must be String "*"
+			if ( k.equals("*") ) {  // this the default anyway
+				global_k = 0;
+			}
+		}
+		return global_k;
+	}
+
+	/** Save the option key/value pair and process it; return the key
+	 *  or null if invalid option.
+	 */
+	public String setOption(String key, Object value, Token optionsStartToken) {
+		if ( legalOption(key) ) {
+			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,
+									  this,
+									  optionsStartToken,
+									  key);
+			return null;
+		}
+		if ( !optionIsValid(key, value) ) {
+			return null;
+		}
+        if ( key.equals("backtrack") && value.toString().equals("true") ) {
+            composite.getRootGrammar().atLeastOneBacktrackOption = true;
+        }
+        if ( options==null ) {
+			options = new HashMap<String, Object>();
+		}
+		options.put(key, value);
+		return key;
+	}
+
+	public boolean legalOption(String key) {
+		switch ( type ) {
+			case LEXER :
+				return !legalLexerOptions.contains(key);
+			case PARSER :
+				return !legalParserOptions.contains(key);
+			case TREE_PARSER :
+				return !legalTreeParserOptions.contains(key);
+			default :
+				return !legalParserOptions.contains(key);
+		}
+	}
+
+	public void setOptions(Map<String, Object> options, Token optionsStartToken) {
+		if ( options==null ) {
+			this.options = null;
+			return;
+		}
+		Set<String> keys = options.keySet();
+		for (Iterator<String> it = keys.iterator(); it.hasNext();) {
+			String optionName = it.next();
+			Object optionValue = options.get(optionName);
+			String stored=setOption(optionName, optionValue, optionsStartToken);
+			if ( stored==null ) {
+				it.remove();
+			}
+		}
+	}
+
+	public Object getOption(String key) {
+		return composite.getOption(key);
+	}
+
+	public Object getLocallyDefinedOption(String key) {
+		Object value = null;
+		if ( options!=null ) {
+			value = options.get(key);
+		}
+		if ( value==null ) {
+			value = defaultOptions.get(key);
+		}
+		return value;
+	}
+
+	public Object getBlockOption(GrammarAST blockAST, String key) {
+		String v = (String)blockAST.getBlockOption(key);
+		if ( v!=null ) {
+			return v;
+		}
+		if ( type==Grammar.LEXER ) {
+			return defaultLexerBlockOptions.get(key);
+		}
+		return defaultBlockOptions.get(key);
+	}
+
+	public int getUserMaxLookahead(int decision) {
+		int user_k = 0;
+		GrammarAST blockAST = nfa.grammar.getDecisionBlockAST(decision);
+		Object k = blockAST.getBlockOption("k");
+		if ( k==null ) {
+			user_k = nfa.grammar.getGrammarMaxLookahead();
+			return user_k;
+		}
+		if (k instanceof Integer) {
+			Integer kI = (Integer)k;
+			user_k = kI;
+		}
+		else {
+			// must be String "*"
+			if ( k.equals("*") ) {
+				user_k = 0;
+			}
+		}
+		return user_k;
+	}
+
+	public boolean getAutoBacktrackMode(int decision) {
+		NFAState decisionNFAStartState = getDecisionNFAStartState(decision);
+		String autoBacktrack =
+			(String)getBlockOption(decisionNFAStartState.associatedASTNode, "backtrack");
+
+		if ( autoBacktrack==null ) {
+			autoBacktrack = (String)nfa.grammar.getOption("backtrack");
+		}
+		return autoBacktrack!=null&&autoBacktrack.equals("true");
+	}
+
+	public boolean optionIsValid(String key, Object value) {
+		return true;
+	}
+
+	public boolean buildAST() {
+		String outputType = (String)getOption("output");
+		if ( outputType!=null ) {
+			return outputType.toString().equals("AST");
+		}
+		return false;
+	}
+
+	public boolean rewriteMode() {
+		Object outputType = getOption("rewrite");
+		if ( outputType!=null ) {
+			return outputType.toString().equals("true");
+		}
+		return false;
+	}
+
+	public boolean isBuiltFromString() {
+		return builtFromString;
+	}
+
+	public boolean buildTemplate() {
+		String outputType = (String)getOption("output");
+		if ( outputType!=null ) {
+			return outputType.toString().equals("template");
+		}
+		return false;
+	}
+
+	public Collection<Rule> getRules() {
+		return nameToRuleMap.values();
+	}
+
+	/** Get the set of Rules that need to have manual delegations
+	 *  like "void rule() { importedGrammar.rule(); }"
+	 *
+	 *  If this grammar is master, get list of all rule definitions from all
+	 *  delegate grammars.  Only master has complete interface from combined
+	 *  grammars...we will generated delegates as helper objects.
+	 *
+	 *  Composite grammars that are not the root/master do not have complete
+	 *  interfaces.  It is not my intention that people use subcomposites.
+	 *  Only the outermost grammar should be used from outside code.  The
+	 *  other grammar components are specifically generated to work only
+	 *  with the master/root.
+	 *
+	 *  delegatedRules = imported - overridden
+	 */
+	public Set<? extends Rule> getDelegatedRules() {
+		return composite.getDelegatedRules(this);
+	}
+
+	/** Get set of all rules imported from all delegate grammars even if
+	 *  indirectly delegated.
+	 */
+	public Set<? extends Rule> getAllImportedRules() {
+		return composite.getAllImportedRules(this);
+	}
+
+	/** Get list of all delegates from all grammars directly or indirectly
+	 *  imported into this grammar.
+	 */
+	public List<Grammar> getDelegates() {
+		return composite.getDelegates(this);
+	}
+
+	public boolean getHasDelegates() {
+	   return !getDelegates().isEmpty();
+	}
+
+	public List<String> getDelegateNames() {
+		// compute delegates:{Grammar g | return g.name;}
+		List<String> names = new ArrayList<String>();
+		List<Grammar> delegates = composite.getDelegates(this);
+		if ( delegates!=null ) {
+			for (Grammar g : delegates) {
+				names.add(g.name);
+			}
+		}
+		return names;
+	}
+
+	public List<Grammar> getDirectDelegates() {
+		return composite.getDirectDelegates(this);
+	}
+
+	/** Get delegates below direct delegates */
+	public List<Grammar> getIndirectDelegates() {
+		return composite.getIndirectDelegates(this);
+	}
+
+	/** Get list of all delegators.  This amounts to the grammars on the path
+	 *  to the root of the delegation tree.
+	 */
+	public List<Grammar> getDelegators() {
+		return composite.getDelegators(this);
+	}
+
+	/** Who's my direct parent grammar? */
+	public Grammar getDelegator() {
+		return composite.getDelegator(this);
+	}
+
+	public Set<Rule> getDelegatedRuleReferences() {
+		return delegatedRuleReferences;
+	}
+
+	public boolean getGrammarIsRoot() {
+		return composite.delegateGrammarTreeRoot.grammar == this;
+	}
+
+	public void setRuleAST(String ruleName, GrammarAST t) {
+		Rule r = getLocallyDefinedRule(ruleName);
+		if ( r!=null ) {
+			r.tree = t;
+			r.EORNode = t.getLastChild();
+		}
+	}
+
+	public NFAState getRuleStartState(String ruleName) {
+		return getRuleStartState(null, ruleName);
+	}
+
+	public NFAState getRuleStartState(String scopeName, String ruleName) {
+		Rule r = getRule(scopeName, ruleName);
+		if ( r!=null ) {
+			//System.out.println("getRuleStartState("+scopeName+", "+ruleName+")="+r.startState);
+			return r.startState;
+		}
+		//System.out.println("getRuleStartState("+scopeName+", "+ruleName+")=null");
+		return null;
+	}
+
+	public String getRuleModifier(String ruleName) {
+		Rule r = getRule(ruleName);
+		if ( r!=null ) {
+			return r.modifier;
+		}
+		return null;
+	}
+
+	public NFAState getRuleStopState(String ruleName) {
+		Rule r = getRule(ruleName);
+		if ( r!=null ) {
+			return r.stopState;
+		}
+		return null;
+	}
+
+	public int assignDecisionNumber(NFAState state) {
+		decisionCount++;
+		state.setDecisionNumber(decisionCount);
+		return decisionCount;
+	}
+
+	protected Decision getDecision(int decision) {
+		int index = decision-1;
+		if ( index >= indexToDecision.size() ) {
+			return null;
+		}
+		Decision d = indexToDecision.get(index);
+		return d;
+	}
+
+	public List<Decision> getDecisions() {
+		return indexToDecision;
+	}
+
+	protected Decision createDecision(int decision) {
+		int index = decision-1;
+		if ( index < indexToDecision.size() ) {
+			return getDecision(decision); // don't recreate
+		}
+		Decision d = new Decision();
+		d.decision = decision;
+		d.grammar = this;
+		indexToDecision.setSize(getNumberOfDecisions());
+		indexToDecision.set(index, d);
+		return d;
+	}
+
+	public List<NFAState> getDecisionNFAStartStateList() {
+		List<NFAState> states = new ArrayList<NFAState>(100);
+		for (int d = 0; d < indexToDecision.size(); d++) {
+			Decision dec = indexToDecision.get(d);
+			states.add(dec.startState);
+		}
+		return states;
+	}
+
+	public NFAState getDecisionNFAStartState(int decision) {
+		Decision d = getDecision(decision);
+		if ( d==null ) {
+			return null;
+		}
+		return d.startState;
+	}
+
+	public DFA getLookaheadDFA(int decision) {
+		Decision d = getDecision(decision);
+		if ( d==null ) {
+			return null;
+		}
+		return d.dfa;
+	}
+
+	public GrammarAST getDecisionBlockAST(int decision) {
+		Decision d = getDecision(decision);
+		if ( d==null ) {
+			return null;
+		}
+		return d.blockAST;
+	}
+
+	/** returns a list of column numbers for all decisions
+	 *  on a particular line so ANTLRWorks choose the decision
+	 *  depending on the location of the cursor (otherwise,
+	 *  ANTLRWorks has to give the *exact* location which
+	 *  is not easy from the user point of view).
+	 *
+	 *  This is not particularly fast as it walks entire line:col&rarr;DFA map
+	 *  looking for a prefix of "line:".
+	 */
+	public List<Integer> getLookaheadDFAColumnsForLineInFile(int line) {
+		String prefix = line+":";
+		List<Integer> columns = new ArrayList<Integer>();
+		for (String key : lineColumnToLookaheadDFAMap.keySet()) {
+			if(key.startsWith(prefix)) {
+				columns.add(Integer.valueOf(key.substring(prefix.length())));
+			}
+		}
+		return columns;
+	}
+
+	/** Useful for ANTLRWorks to map position in file to the DFA for display */
+	public DFA getLookaheadDFAFromPositionInFile(int line, int col) {
+		return lineColumnToLookaheadDFAMap.get(
+			new StringBuffer().append(line).append(":").append(col).toString());
+	}
+
+	public Map<String, DFA> getLineColumnToLookaheadDFAMap() {
+		return lineColumnToLookaheadDFAMap;
+	}
+
+	/*
+	public void setDecisionOptions(int decision, Map options) {
+		Decision d = createDecision(decision);
+		d.options = options;
+	}
+
+	public void setDecisionOption(int decision, String name, Object value) {
+		Decision d = getDecision(decision);
+		if ( d!=null ) {
+			if ( d.options==null ) {
+				d.options = new HashMap();
+			}
+			d.options.put(name,value);
+		}
+	}
+
+	public Map getDecisionOptions(int decision) {
+		Decision d = getDecision(decision);
+		if ( d==null ) {
+			return null;
+		}
+		return d.options;
+    }
+    */
+
+	public int getNumberOfDecisions() {
+		return decisionCount;
+	}
+
+	public int getNumberOfCyclicDecisions() {
+		int n = 0;
+		for (int i=1; i<=getNumberOfDecisions(); i++) {
+			Decision d = getDecision(i);
+			if ( d.dfa!=null && d.dfa.isCyclic() ) {
+				n++;
+			}
+		}
+		return n;
+	}
+
+	/** Set the lookahead DFA for a particular decision.  This means
+	 *  that the appropriate AST node must updated to have the new lookahead
+	 *  DFA.  This method could be used to properly set the DFAs without
+	 *  using the createLookaheadDFAs() method.  You could do this
+	 *
+	 *    Grammar g = new Grammar("...");
+	 *    g.setLookahead(1, dfa1);
+	 *    g.setLookahead(2, dfa2);
+	 *    ...
+	 */
+	public void setLookaheadDFA(int decision, DFA lookaheadDFA) {
+		Decision d = createDecision(decision);
+		d.dfa = lookaheadDFA;
+		GrammarAST ast = d.startState.associatedASTNode;
+		ast.setLookaheadDFA(lookaheadDFA);
+	}
+
+	public void setDecisionNFA(int decision, NFAState state) {
+		Decision d = createDecision(decision);
+		d.startState = state;
+	}
+
+	public void setDecisionBlockAST(int decision, GrammarAST blockAST) {
+		//System.out.println("setDecisionBlockAST("+decision+", "+blockAST.token);
+		Decision d = createDecision(decision);
+		d.blockAST = blockAST;
+	}
+
+	public boolean allDecisionDFAHaveBeenCreated() {
+		return allDecisionDFACreated;
+	}
+
+	/** How many token types have been allocated so far? */
+	public int getMaxTokenType() {
+		return composite.maxTokenType;
+	}
+
+	/** What is the max char value possible for this grammar's target?  Use
+	 *  unicode max if no target defined.
+	 */
+	public int getMaxCharValue() {
+		if ( generator!=null ) {
+			return generator.target.getMaxCharValue(generator);
+		}
+		else {
+			return Label.MAX_CHAR_VALUE;
+		}
+	}
+
+	/** Return a set of all possible token or char types for this grammar */
+	public IntSet getTokenTypes() {
+		if ( type==LEXER ) {
+			return getAllCharValues();
+		}
+		return IntervalSet.of(Label.MIN_TOKEN_TYPE, getMaxTokenType());
+	}
+
+	/** If there is a char vocabulary, use it; else return min to max char
+	 *  as defined by the target.  If no target, use max unicode char value.
+	 */
+	public IntSet getAllCharValues() {
+		if ( charVocabulary!=null ) {
+			return charVocabulary;
+		}
+		IntSet allChar = IntervalSet.of(Label.MIN_CHAR_VALUE, getMaxCharValue());
+		return allChar;
+	}
+
+	/** Return a string representing the escaped char for code c.  E.g., If c
+	 *  has value 0x100, you will get "\u0100".  ASCII gets the usual
+	 *  char (non-hex) representation.  Control characters are spit out
+	 *  as unicode.  While this is specially set up for returning Java strings,
+	 *  it can be used by any language target that has the same syntax. :)
+	 *
+	 *  11/26/2005: I changed this to use double quotes, consistent with antlr.g
+	 *  12/09/2005: I changed so everything is single quotes
+	 */
+	public static String getANTLRCharLiteralForChar(int c) {
+		if ( c<Label.MIN_CHAR_VALUE ) {
+			ErrorManager.internalError("invalid char value "+c);
+			return "'<INVALID>'";
+		}
+		if ( c<ANTLRLiteralCharValueEscape.length && ANTLRLiteralCharValueEscape[c]!=null ) {
+			return '\''+ANTLRLiteralCharValueEscape[c]+'\'';
+		}
+		if ( Character.UnicodeBlock.of((char)c)==Character.UnicodeBlock.BASIC_LATIN &&
+			 !Character.isISOControl((char)c) ) {
+			if ( c=='\\' ) {
+				return "'\\\\'";
+			}
+			if ( c=='\'') {
+				return "'\\''";
+			}
+			return '\''+Character.toString((char)c)+'\'';
+		}
+		// turn on the bit above max "\uFFFF" value so that we pad with zeros
+		// then only take last 4 digits
+		String hex = Integer.toHexString(c|0x10000).toUpperCase().substring(1,5);
+		String unicodeStr = "'\\u"+hex+"'";
+		return unicodeStr;
+	}
+
+	/** For lexer grammars, return everything in unicode not in set.
+	 *  For parser and tree grammars, return everything in token space
+	 *  from MIN_TOKEN_TYPE to last valid token type or char value.
+	 */
+	public IntSet complement(IntSet set) {
+		//System.out.println("complement "+set.toString(this));
+		//System.out.println("vocabulary "+getTokenTypes().toString(this));
+		IntSet c = set.complement(getTokenTypes());
+		//System.out.println("result="+c.toString(this));
+		return c;
+	}
+
+	public IntSet complement(int atom) {
+		return complement(IntervalSet.of(atom));
+	}
+
+	/** Given set tree like ( SET A B ), check that A and B
+	 *  are both valid sets themselves, else we must tree like a BLOCK
+	 */
+	public boolean isValidSet(TreeToNFAConverter nfabuilder, GrammarAST t) {
+		boolean valid;
+		try {
+			//System.out.println("parse BLOCK as set tree: "+t.toStringTree());
+			int alts = nfabuilder.testBlockAsSet(t);
+			valid = alts > 1;
+		}
+		catch (RecognitionException re) {
+			// The rule did not parse as a set, return null; ignore exception
+			valid = false;
+		}
+		//System.out.println("valid? "+valid);
+		return valid;
+	}
+
+	/** Get the set equivalent (if any) of the indicated rule from this
+	 *  grammar.  Mostly used in the lexer to do ~T for some fragment rule
+	 *  T.  If the rule AST has a SET use that.  If the rule is a single char
+	 *  convert it to a set and return.  If rule is not a simple set (w/o actions)
+	 *  then return null.
+	 *  Rules have AST form:
+	 *
+	 *		^( RULE ID modifier ARG RET SCOPE block EOR )
+	 */
+	public IntSet getSetFromRule(TreeToNFAConverter nfabuilder, String ruleName)
+		throws RecognitionException
+	{
+		Rule r = getRule(ruleName);
+		if ( r==null ) {
+			return null;
+		}
+		IntSet elements;
+		//System.out.println("parsed tree: "+r.tree.toStringTree());
+		elements = nfabuilder.setRule(r.tree);
+		//System.out.println("elements="+elements);
+		return elements;
+	}
+
+	/** Decisions are linked together with transition(1).  Count how
+	 *  many there are.  This is here rather than in NFAState because
+	 *  a grammar decides how NFAs are put together to form a decision.
+	 */
+	public int getNumberOfAltsForDecisionNFA(NFAState decisionState) {
+		if ( decisionState==null ) {
+			return 0;
+		}
+		int n = 1;
+		NFAState p = decisionState;
+		while ( p.transition[1] !=null ) {
+			n++;
+			p = (NFAState)p.transition[1].target;
+		}
+		return n;
+	}
+
+	/** Get the ith alternative (1..n) from a decision; return null when
+	 *  an invalid alt is requested.  I must count in to find the right
+	 *  alternative number.  For (A|B), you get NFA structure (roughly):
+	 *
+	 *  o-&gt;o-A-&gt;o
+	 *  |
+	 *  o-&gt;o-B-&gt;o
+	 *
+	 *  This routine returns the leftmost state for each alt.  So alt=1, returns
+	 *  the upperleft most state in this structure.
+	 */
+	public NFAState getNFAStateForAltOfDecision(NFAState decisionState, int alt) {
+		if ( decisionState==null || alt<=0 ) {
+			return null;
+		}
+		int n = 1;
+		NFAState p = decisionState;
+		while ( p!=null ) {
+			if ( n==alt ) {
+				return p;
+			}
+			n++;
+			Transition next = p.transition[1];
+			p = null;
+			if ( next!=null ) {
+				p = (NFAState)next.target;
+			}
+		}
+		return null;
+	}
+
+	/*
+	public void computeRuleFOLLOWSets() {
+		if ( getNumberOfDecisions()==0 ) {
+			createNFAs();
+		}
+		for (Iterator it = getRules().iterator(); it.hasNext();) {
+			Rule r = (Rule)it.next();
+			if ( r.isSynPred ) {
+				continue;
+			}
+			LookaheadSet s = ll1Analyzer.FOLLOW(r);
+			System.out.println("FOLLOW("+r.name+")="+s);
+		}
+	}
+	*/
+
+	public LookaheadSet FIRST(NFAState s) {
+		return ll1Analyzer.FIRST(s);
+	}
+
+	public LookaheadSet LOOK(NFAState s) {
+		return ll1Analyzer.LOOK(s);
+	}
+
+	public void setCodeGenerator(CodeGenerator generator) {
+		this.generator = generator;
+	}
+
+	public CodeGenerator getCodeGenerator() {
+		return generator;
+	}
+
+	public GrammarAST getGrammarTree() {
+		return grammarTree;
+	}
+
+	public void setGrammarTree(GrammarAST value) {
+		grammarTree = value;
+	}
+
+	public Tool getTool() {
+		return tool;
+	}
+
+	public void setTool(Tool tool) {
+		this.tool = tool;
+	}
+
+	/** given a token type and the text of the literal, come up with a
+	 *  decent token type label.  For now it's just T&lt;type&gt;.  Actually,
+	 *  if there is an aliased name from tokens like PLUS='+', use it.
+	 */
+	public String computeTokenNameFromLiteral(int tokenType, String literal) {
+		return AUTO_GENERATED_TOKEN_NAME_PREFIX +tokenType;
+	}
+
+	@Override
+	public String toString() {
+	//	return "FFFFFFFFFFFFFF";
+		return grammarTreeToString(grammarTree);
+	}
+
+	public String grammarTreeToString(GrammarAST t) {
+		return grammarTreeToString(t, true);
+	}
+
+	public String grammarTreeToString(GrammarAST t, boolean showActions) {
+		String s;
+		try {
+			s = t.getLine()+":"+(t.getCharPositionInLine()+1)+": ";
+			s += new ANTLRTreePrinter(new CommonTreeNodeStream(t)).toString(this, showActions);
+		}
+		catch (Exception e) {
+			s = "<invalid or missing tree structure>";
+		}
+		return s;
+	}
+
+	public void printGrammar(PrintStream output) {
+		ANTLRTreePrinter printer = new ANTLRTreePrinter(new CommonTreeNodeStream(getGrammarTree()));
+		try {
+			String g = printer.toString(this, false);
+			output.println(g);
+		}
+		catch (RecognitionException re) {
+			ErrorManager.error(ErrorManager.MSG_SYNTAX_ERROR,re);
+		}
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarAST.java b/tool/src/main/java/org/antlr/tool/GrammarAST.java
new file mode 100644
index 0000000..fdddb11
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarAST.java
@@ -0,0 +1,572 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.NFAState;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.misc.IntSet;
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.CommonTree;
+import org.antlr.runtime.tree.Tree;
+import org.stringtemplate.v4.ST;
+
+import java.util.*;
+
+/** Grammars are first converted to ASTs using this class and then are
+ *  converted to NFAs via a tree walker.
+ *
+ *  The reader may notice that I have made a very non-OO decision in this
+ *  class to track variables for many different kinds of nodes.  It wastes
+ *  space for nodes that don't need the values and OO principles cry out
+ *  for a new class type for each kind of node in my tree.  I am doing this
+ *  on purpose for a variety of reasons.  I don't like using the type
+ *  system for different node types; it yields too many damn class files
+ *  which I hate.  Perhaps if I put them all in one file.  Most importantly
+ *  though I hate all the type casting that would have to go on.  I would
+ *  have all sorts of extra work to do.  Ick.  Anyway, I'm doing all this
+ *  on purpose, not out of ignorance. ;)
+ */
+public class GrammarAST extends CommonTree {
+	static int count = 0;
+
+	public int ID = ++count;
+
+	private String textOverride;
+
+    public String enclosingRuleName;
+
+    /** If this is a decision node, what is the lookahead DFA? */
+    public DFA lookaheadDFA = null;
+
+    /** What NFA start state was built from this node? */
+    public NFAState NFAStartState = null;
+
+	/** This is used for TREE_BEGIN nodes to point into
+	 *  the NFA.  TREE_BEGINs point at left edge of DOWN for LOOK computation
+     *  purposes (Nullable tree child list needs special code gen when matching).
+	 */
+	public NFAState NFATreeDownState = null;
+
+	/** Rule ref nodes, token refs, set, and NOT set refs need to track their
+	 *  location in the generated NFA so that local FOLLOW sets can be
+	 *  computed during code gen for automatic error recovery.
+	 */
+	public NFAState followingNFAState = null;
+
+	/** If this is a SET node, what are the elements? */
+    protected IntSet setValue = null;
+
+    /** If this is a BLOCK node, track options here */
+    protected Map<String,Object> blockOptions;
+
+	/** If this is a BLOCK node for a rewrite rule, track referenced
+	 *  elements here.  Don't track elements in nested subrules.
+	 */
+	public Set<GrammarAST> rewriteRefsShallow;
+
+	/*	If REWRITE node, track EVERY element and label ref to right of ->
+	 *  for this rewrite rule.  There could be multiple of these per
+	 *  rule:
+	 *
+	 *     a : ( ... -> ... | ... -> ... ) -> ... ;
+	 *
+	 *  We may need a list of all refs to do definitions for whole rewrite
+	 *  later.
+	 *
+	 *  If BLOCK then tracks every element at that level and below.
+	 */
+	public Set<GrammarAST> rewriteRefsDeep;
+
+	public Map<String,Object> terminalOptions;
+
+	/** if this is an ACTION node, this is the outermost enclosing
+	 *  alt num in rule.  For actions, define.g sets these (used to
+	 *  be codegen.g).  We need these set so we can examine actions
+	 *  early, before code gen, for refs to rule predefined properties
+	 *  and rule labels.  For most part define.g sets outerAltNum, but
+	 *  codegen.g does the ones for %foo(a={$ID.text}) type refs as
+	 *  the {$ID...} is not seen as an action until code gen pulls apart.
+	 */
+	public int outerAltNum;
+
+	/** if this is a TOKEN_REF or RULE_REF node, this is the code ST
+	 *  generated for this node.  We need to update it later to add
+	 *  a label if someone does $tokenref or $ruleref in an action.
+	 */
+	public ST code;
+
+    /**
+     *
+     */
+    public Map<String, Object> getBlockOptions() {
+        return blockOptions;
+    }
+
+    /**
+     *
+     * @param blockOptions
+     */
+    public void setBlockOptions(Map<String, Object> blockOptions) {
+        this.blockOptions = blockOptions;
+    }
+
+	public GrammarAST() {}
+
+	@SuppressWarnings("OverridableMethodCallInConstructor")
+	public GrammarAST(int t, String txt) {
+		initialize(t,txt);
+	}
+
+	@SuppressWarnings("OverridableMethodCallInConstructor")
+	public GrammarAST(Token token) {
+		initialize(token);
+	}
+
+	public void initialize(int i, String s) {
+        token = new CommonToken(i,s);
+		token.setTokenIndex(-1);
+    }
+
+    public void initialize(Tree ast) {
+		GrammarAST t = ((GrammarAST)ast);
+		this.startIndex = t.startIndex;
+		this.stopIndex = t.stopIndex;
+		this.token = t.token;
+		this.enclosingRuleName = t.enclosingRuleName;
+		this.setValue = t.setValue;
+		this.blockOptions = t.blockOptions;
+		this.outerAltNum = t.outerAltNum;
+	}
+
+    public void initialize(Token token) {
+        this.token = token;
+		if ( token!=null ) {
+			startIndex = token.getTokenIndex();
+			stopIndex = startIndex;
+		}
+    }
+
+    public DFA getLookaheadDFA() {
+        return lookaheadDFA;
+    }
+
+    public void setLookaheadDFA(DFA lookaheadDFA) {
+        this.lookaheadDFA = lookaheadDFA;
+    }
+
+    public NFAState getNFAStartState() {
+        return NFAStartState;
+    }
+
+    public void setNFAStartState(NFAState nfaStartState) {
+		this.NFAStartState = nfaStartState;
+	}
+
+	/** Save the option key/value pair and process it; return the key
+	 *  or null if invalid option.
+	 */
+	public String setBlockOption(Grammar grammar, String key, Object value) {
+		if ( blockOptions == null ) {
+			blockOptions = new HashMap<String, Object>();
+		}
+		return setOption(blockOptions, Grammar.legalBlockOptions, grammar, key, value);
+	}
+
+	public String setTerminalOption(Grammar grammar, String key, Object value) {
+		if ( terminalOptions == null ) {
+			terminalOptions = new HashMap<String,Object>();
+		}
+		return setOption(terminalOptions, Grammar.legalTokenOptions, grammar, key, value);
+	}
+
+	public String setOption(Map<String, Object> options, Set<String> legalOptions, Grammar grammar, String key, Object value) {
+		if ( !legalOptions.contains(key) ) {
+			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,
+									  grammar,
+									  token,
+									  key);
+			return null;
+		}
+		if ( value instanceof String ) {
+			String vs = (String)value;
+			if ( vs.charAt(0)=='"' ) {
+				value = vs.substring(1,vs.length()-1); // strip quotes
+            }
+        }
+		if ( key.equals("k") ) {
+			grammar.numberOfManualLookaheadOptions++;
+		}
+        if ( key.equals("backtrack") && value.toString().equals("true") ) {
+            grammar.composite.getRootGrammar().atLeastOneBacktrackOption = true;
+        }
+        options.put(key, value);
+		return key;
+    }
+
+    public Object getBlockOption(String key) {
+		Object value = null;
+		if ( blockOptions != null ) {
+			value = blockOptions.get(key);
+		}
+		return value;
+	}
+
+    public void setOptions(Grammar grammar, Map<String, Object> options) {
+		if ( options==null ) {
+			this.blockOptions = null;
+			return;
+		}
+		String[] keys = options.keySet().toArray(new String[options.size()]);
+		for (String optionName : keys) {
+			String stored= setBlockOption(grammar, optionName, options.get(optionName));
+			if ( stored==null ) {
+				options.remove(optionName);
+			}
+		}
+    }
+
+    @Override
+    public String getText() {
+		if ( textOverride!=null ) return textOverride;
+        if ( token!=null ) {
+            return token.getText();
+        }
+        return "";
+    }
+
+	public void setType(int type) {
+		token.setType(type);
+	}
+
+	public void setText(String text) {
+		textOverride = text; // don't alt tokens as others might see
+	}
+
+    @Override
+    public int getType() {
+        if ( token!=null ) {
+            return token.getType();
+        }
+        return -1;
+    }
+
+    @Override
+    public int getLine() {
+		int line=0;
+        if ( token!=null ) {
+            line = token.getLine();
+        }
+		if ( line==0 ) {
+			Tree child = getChild(0);
+			if ( child!=null ) {
+				line = child.getLine();
+			}
+		}
+        return line;
+    }
+
+    @Override
+    public int getCharPositionInLine(){
+		int col=0;
+        if ( token!=null ) {
+            col = token.getCharPositionInLine();
+        }
+		if ( col==0 ) {
+			Tree child = getChild(0);
+			if ( child!=null ) {
+				col = child.getCharPositionInLine();
+			}
+		}
+        return col;
+    }
+
+    public void setLine(int line) {
+        token.setLine(line);
+    }
+
+    public void setCharPositionInLine(int value){
+        token.setCharPositionInLine(value);
+    }
+
+ 	public IntSet getSetValue() {
+        return setValue;
+    }
+
+    public void setSetValue(IntSet setValue) {
+        this.setValue = setValue;
+    }
+
+    public GrammarAST getLastChild() {
+        if (getChildCount() == 0)
+            return null;
+        return (GrammarAST)getChild(getChildCount() - 1);
+    }
+
+    public GrammarAST getNextSibling() {
+        return (GrammarAST)getParent().getChild(getChildIndex() + 1);
+    }
+
+    public GrammarAST getLastSibling() {
+        Tree parent = getParent();
+        if ( parent==null ) {
+            return null;
+        }
+        return (GrammarAST)parent.getChild(parent.getChildCount() - 1);
+    }
+
+    public GrammarAST[] getChildrenAsArray() {
+		List<? extends Object> children = getChildren();
+		if (children == null) {
+			return new GrammarAST[0];
+		}
+
+        return children.toArray(new GrammarAST[children.size()]);
+    }
+
+    private static final GrammarAST DescendantDownNode = new GrammarAST(Token.DOWN, "DOWN");
+    private static final GrammarAST DescendantUpNode = new GrammarAST(Token.UP, "UP");
+
+    public static List<Tree> descendants(Tree root){
+        return descendants(root, false);
+    }
+
+    public static List<Tree> descendants(Tree root, boolean insertDownUpNodes){
+        List<Tree> result = new ArrayList<Tree>();
+        int count = root.getChildCount();
+
+        if (insertDownUpNodes){
+            result.add(root);
+            result.add(DescendantDownNode);
+
+            for (int i = 0 ; i < count ; i++){
+                Tree child = root.getChild(i);
+                for (Tree subchild : descendants(child, true))
+                    result.add(subchild);
+            }
+
+            result.add(DescendantUpNode);
+        }else{
+            result.add(root);
+            for (int i = 0 ; i < count ; i++){
+                Tree child = root.getChild(i);
+                for (Tree subchild : descendants(child, false))
+                    result.add(subchild);
+            }
+        }
+
+        return result;
+    }
+
+	public GrammarAST findFirstType(int ttype) {
+		// check this node (the root) first
+		if ( this.getType()==ttype ) {
+			return this;
+		}
+		// else check children
+		List<Tree> descendants = descendants(this);
+		for (Tree child : descendants) {
+			if ( child.getType()==ttype ) {
+				return (GrammarAST)child;
+			}
+		}
+		return null;
+	}
+
+	public List<GrammarAST> findAllType(int ttype) {
+		List<GrammarAST> nodes = new ArrayList<GrammarAST>();
+		_findAllType(ttype, nodes);
+		return nodes;
+	}
+
+	public void _findAllType(int ttype, List<GrammarAST> nodes) {
+		// check this node (the root) first
+		if ( this.getType()==ttype ) nodes.add(this);
+		// check children
+		for (int i = 0; i < getChildCount(); i++){
+			GrammarAST child = (GrammarAST)getChild(i);
+			child._findAllType(ttype, nodes);
+		}
+	}
+
+    /** Make nodes unique based upon Token so we can add them to a Set; if
+	 *  not a GrammarAST, check type.
+	 */
+	@Override
+	public boolean equals(Object ast) {
+		if ( this == ast ) {
+			return true;
+		}
+		if ( !(ast instanceof GrammarAST) ) {
+			return this.getType() == ((Tree)ast).getType();
+		}
+		GrammarAST t = (GrammarAST)ast;
+		return token.getLine() == t.getLine() &&
+			   token.getCharPositionInLine() == t.getCharPositionInLine();
+	}
+
+    /** Make nodes unique based upon Token so we can add them to a Set; if
+	 *  not a GrammarAST, check type.
+	 */
+    @Override
+    public int hashCode(){
+        if (token == null)
+            return 0;
+
+        return token.hashCode();
+    }
+
+	/** See if tree has exact token types and structure; no text */
+	public boolean hasSameTreeStructure(Tree other) {
+		// check roots first.
+		if (this.getType() != other.getType()) return false;
+		// if roots match, do full list match test on children.
+		Iterator<Tree> thisDescendants = descendants(this, true).iterator();
+		Iterator<Tree> otherDescendants = descendants(other, true).iterator();
+		while (thisDescendants.hasNext()) {
+			if (!otherDescendants.hasNext())
+				return false;
+			if (thisDescendants.next().getType() != otherDescendants.next().getType())
+				return false;
+		}
+		return !otherDescendants.hasNext();
+	}
+
+	public static GrammarAST dup(Tree t) {
+		if ( t==null ) {
+			return null;
+		}
+		GrammarAST dup_t = new GrammarAST();
+		dup_t.initialize(t);
+		return dup_t;
+	}
+
+    @Override
+    public Tree dupNode(){
+        return dup(this);
+    }
+
+	/**Duplicate a tree, assuming this is a root node of a tree--
+	 * duplicate that node and what's below; ignore siblings of root node.
+	 */
+	public static GrammarAST dupTreeNoActions(GrammarAST t, GrammarAST parent) {
+		if ( t==null ) {
+			return null;
+		}
+		GrammarAST result = (GrammarAST)t.dupNode();
+		for (GrammarAST subchild : getChildrenForDupTree(t)) {
+			result.addChild(dupTreeNoActions(subchild, result));
+		}
+		return result;
+	}
+
+	private static List<GrammarAST> getChildrenForDupTree(GrammarAST t) {
+		List<GrammarAST> result = new ArrayList<GrammarAST>();
+		for (int i = 0; i < t.getChildCount(); i++){
+			GrammarAST child = (GrammarAST)t.getChild(i);
+			int ttype = child.getType();
+			if (ttype == ANTLRParser.REWRITES || ttype == ANTLRParser.REWRITE || ttype==ANTLRParser.ACTION) {
+				continue;
+			}
+
+			if (ttype == ANTLRParser.BANG || ttype == ANTLRParser.ROOT) {
+				for (GrammarAST subchild : getChildrenForDupTree(child))
+					result.add(subchild);
+			} else {
+				result.add(child);
+			}
+		}
+		if ( result.size()==1 && result.get(0).getType()==ANTLRParser.EOA &&
+			 t.getType()==ANTLRParser.ALT )
+		{
+			// can't have an empty alt, insert epsilon
+			result.add(0, new GrammarAST(ANTLRParser.EPSILON, "epsilon"));
+		}
+
+		return result;
+	}
+
+	public static GrammarAST dupTree(GrammarAST t) {
+		if ( t==null ) {
+			return null;
+		}
+		GrammarAST root = dup(t);		// make copy of root
+		// copy all children of root.
+		for (int i= 0; i < t.getChildCount(); i++) {
+			GrammarAST child = (GrammarAST)t.getChild(i);
+			root.addChild(dupTree(child));
+		}
+		return root;
+	}
+
+	public void setTreeEnclosingRuleNameDeeply(String rname) {
+		enclosingRuleName = rname;
+		if (getChildCount() == 0) return;
+		for (Object child : getChildren()) {
+			if (!(child instanceof GrammarAST)) {
+				continue;
+			}
+			GrammarAST grammarAST = (GrammarAST)child;
+			grammarAST.setTreeEnclosingRuleNameDeeply(rname);
+		}
+	}
+
+	public String toStringList() {
+		String result = toStringTree();
+		if (this.getNextSibling() != null) {
+			result += ' ' + getNextSibling().toStringList();
+		}
+
+		return result;
+	}
+
+	/** Track start/stop token for subtree root created for a rule.
+	 *  Only works with Tree nodes.  For rules that match nothing,
+	 *  seems like this will yield start=i and stop=i-1 in a nil node.
+	 *  Might be useful info so I'll not force to be i..i.
+	 */
+	public void setTokenBoundaries(Token startToken, Token stopToken) {
+		if ( startToken!=null ) startIndex = startToken.getTokenIndex();
+		if ( stopToken!=null ) stopIndex = stopToken.getTokenIndex();
+	}
+
+	public GrammarAST getBlockALT(int i) {
+		if ( this.getType()!=ANTLRParser.BLOCK ) return null;
+		int alts = 0;
+		for (int j =0 ; j < getChildCount(); j++) {
+			if (getChild(j).getType() == ANTLRParser.ALT) {
+				alts++;
+			}
+			if (alts == i) {
+				return (GrammarAST)getChild(j);
+			}
+		}
+		return null;
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarAnalysisAbortedMessage.java b/tool/src/main/java/org/antlr/tool/GrammarAnalysisAbortedMessage.java
new file mode 100644
index 0000000..55f5c91
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarAnalysisAbortedMessage.java
@@ -0,0 +1,60 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.DecisionProbe;
+import org.stringtemplate.v4.ST;
+
+/** Reports the condition that ANTLR's LL(*) analysis engine terminated
+ *  early.
+ */
+public class GrammarAnalysisAbortedMessage extends Message {
+	public DecisionProbe probe;
+
+	public GrammarAnalysisAbortedMessage(DecisionProbe probe) {
+		super(ErrorManager.MSG_ANALYSIS_ABORTED);
+		this.probe = probe;
+	}
+
+	@Override
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getCharPositionInLine();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+		ST st = getMessageTemplate();
+		st.add("enclosingRule",
+						probe.dfa.getNFADecisionStartState().enclosingRule.name);
+
+		return super.toString(st);
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarDanglingStateMessage.java b/tool/src/main/java/org/antlr/tool/GrammarDanglingStateMessage.java
new file mode 100644
index 0000000..3cca533
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarDanglingStateMessage.java
@@ -0,0 +1,75 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.Label;
+import org.stringtemplate.v4.ST;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/** Reports a potential parsing issue with a decision; the decision is
+ *  nondeterministic in some way.
+ */
+public class GrammarDanglingStateMessage extends Message {
+	public DecisionProbe probe;
+	public DFAState problemState;
+
+	public GrammarDanglingStateMessage(DecisionProbe probe,
+									   DFAState problemState)
+	{
+		super(ErrorManager.MSG_DANGLING_STATE);
+		this.probe = probe;
+		this.problemState = problemState;
+	}
+
+	@Override
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getCharPositionInLine();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+		List<Label> labels = probe.getSampleNonDeterministicInputSequence(problemState);
+		String input = probe.getInputSequenceDisplay(labels);
+		ST st = getMessageTemplate();
+		List<Integer> alts = new ArrayList<Integer>();
+		alts.addAll(problemState.getAltSet());
+		Collections.sort(alts);
+		st.add("danglingAlts", alts);
+		st.add("input", input);
+
+		return super.toString(st);
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarInsufficientPredicatesMessage.java b/tool/src/main/java/org/antlr/tool/GrammarInsufficientPredicatesMessage.java
new file mode 100644
index 0000000..43a4518
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarInsufficientPredicatesMessage.java
@@ -0,0 +1,90 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.Label;
+import org.antlr.runtime.Token;
+import org.stringtemplate.v4.ST;
+
+import java.util.*;
+
+public class GrammarInsufficientPredicatesMessage extends Message {
+	public DecisionProbe probe;
+    public Map<Integer, Set<Token>> altToLocations;
+	public DFAState problemState;
+
+	public GrammarInsufficientPredicatesMessage(DecisionProbe probe,
+												DFAState problemState,
+												Map<Integer, Set<Token>> altToLocations)
+	{
+		super(ErrorManager.MSG_INSUFFICIENT_PREDICATES);
+		this.probe = probe;
+		this.problemState = problemState;
+		this.altToLocations = altToLocations;
+	}
+
+	@Override
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getCharPositionInLine();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+		ST st = getMessageTemplate();
+		// convert to string key to avoid 3.1 ST bug
+		Map<String, Set<Token>> altToLocationsWithStringKey = new LinkedHashMap<String, Set<Token>>();
+		List<Integer> alts = new ArrayList<Integer>();
+		alts.addAll(altToLocations.keySet());
+		Collections.sort(alts);
+		for (Integer altI : alts) {
+			altToLocationsWithStringKey.put(altI.toString(), altToLocations.get(altI));
+			/*
+			List<String> tokens = new ArrayList<String>();
+			for (Token t : altToLocations.get(altI)) {
+				tokens.add(t.toString());
+			}
+			Collections.sort(tokens);
+			System.out.println("tokens=\n"+tokens);
+			*/
+		}
+		st.add("altToLocations", altToLocationsWithStringKey);
+
+		List<Label> sampleInputLabels = problemState.dfa.probe.getSampleNonDeterministicInputSequence(problemState);
+		String input = problemState.dfa.probe.getInputSequenceDisplay(sampleInputLabels);
+		st.add("upon", input);
+
+		st.add("hasPredicateBlockedByAction", problemState.dfa.hasPredicateBlockedByAction);
+
+		return super.toString(st);
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarNonDeterminismMessage.java b/tool/src/main/java/org/antlr/tool/GrammarNonDeterminismMessage.java
new file mode 100644
index 0000000..d61258c
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarNonDeterminismMessage.java
@@ -0,0 +1,127 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.Label;
+import org.antlr.analysis.NFAState;
+import org.stringtemplate.v4.ST;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+/** Reports a potential parsing issue with a decision; the decision is
+ *  nondeterministic in some way.
+ */
+public class GrammarNonDeterminismMessage extends Message {
+	public DecisionProbe probe;
+    public DFAState problemState;
+
+	public GrammarNonDeterminismMessage(DecisionProbe probe,
+										DFAState problemState)
+	{
+		super(ErrorManager.MSG_GRAMMAR_NONDETERMINISM);
+		this.probe = probe;
+		this.problemState = problemState;
+		// flip msg ID if alts are actually token refs in Tokens rule
+		if ( probe.dfa.isTokensRuleDecision() ) {
+			setMessageID(ErrorManager.MSG_TOKEN_NONDETERMINISM);
+		}
+	}
+
+	@Override
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getCharPositionInLine();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+
+		ST st = getMessageTemplate();
+		// Now fill template with information about problemState
+		List<Label> labels = probe.getSampleNonDeterministicInputSequence(problemState);
+		String input = probe.getInputSequenceDisplay(labels);
+		st.add("input", input);
+
+		if ( probe.dfa.isTokensRuleDecision() ) {
+			Set<Integer> disabledAlts = probe.getDisabledAlternatives(problemState);
+			for (Integer altI : disabledAlts) {
+				String tokenName =
+					probe.getTokenNameForTokensRuleAlt(altI);
+				// reset the line/col to the token definition (pick last one)
+				NFAState ruleStart =
+					probe.dfa.nfa.grammar.getRuleStartState(tokenName);
+				line = ruleStart.associatedASTNode.getLine();
+				column = ruleStart.associatedASTNode.getCharPositionInLine();
+				st.add("disabled", tokenName);
+			}
+		}
+		else {
+			st.add("disabled", probe.getDisabledAlternatives(problemState));
+		}
+
+		List<Integer> nondetAlts = probe.getNonDeterministicAltsForState(problemState);
+		NFAState nfaStart = probe.dfa.getNFADecisionStartState();
+		// all state paths have to begin with same NFA state
+		int firstAlt = 0;
+		if ( nondetAlts!=null ) {
+			for (Integer displayAltI : nondetAlts) {
+				if ( DecisionProbe.verbose ) {
+					int tracePathAlt =
+						nfaStart.translateDisplayAltToWalkAlt(displayAltI);
+					if ( firstAlt == 0 ) {
+						firstAlt = tracePathAlt;
+					}
+					List<? extends NFAState> path =
+						probe.getNFAPathStatesForAlt(firstAlt,
+													 tracePathAlt,
+													 labels);
+					st.addAggr("paths.{alt, states}", displayAltI, path);
+				}
+				else {
+					if ( probe.dfa.isTokensRuleDecision() ) {
+						// alts are token rules, convert to the names instead of numbers
+						String tokenName =
+							probe.getTokenNameForTokensRuleAlt(displayAltI);
+						st.add("conflictingTokens", tokenName);
+					}
+					else {
+						st.add("conflictingAlts", displayAltI);
+					}
+				}
+			}
+		}
+		st.add("hasPredicateBlockedByAction", problemState.dfa.hasPredicateBlockedByAction);
+		return super.toString(st);
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarReport.java b/tool/src/main/java/org/antlr/tool/GrammarReport.java
new file mode 100644
index 0000000..416fbf9
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarReport.java
@@ -0,0 +1,483 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.DFA;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.misc.Utils;
+import org.antlr.runtime.misc.Stats;
+
+import java.lang.reflect.Field;
+import java.util.*;
+
+public class GrammarReport {
+	/** Because I may change the stats, I need to track version for later
+	 *  computations to be consistent.
+	 */
+	public static final String Version = "5";
+	public static final String GRAMMAR_STATS_FILENAME = "grammar.stats";
+
+	public static class ReportData {
+		String version;
+		String gname;
+		String gtype;
+		String language;
+		int numRules;
+		int numOuterProductions;
+		int numberOfDecisionsInRealRules;
+		int numberOfDecisions;
+		int numberOfCyclicDecisions;
+		int numberOfFixedKDecisions;
+		int numLL1;
+		int mink;
+		int maxk;
+		double avgk;
+		int numTokens;
+		long DFACreationWallClockTimeInMS;
+		int numberOfSemanticPredicates;
+		int numberOfManualLookaheadOptions; // TODO: verify
+		int numNonLLStarDecisions;
+		int numNondeterministicDecisions;
+		int numNondeterministicDecisionNumbersResolvedWithPredicates;
+		int errors;
+		int warnings;
+		int infos;
+		//int num_synpreds;
+		int blocksWithSynPreds;
+		int decisionsWhoseDFAsUsesSynPreds;
+		int blocksWithSemPreds;
+		int decisionsWhoseDFAsUsesSemPreds;
+		String output;
+		String grammarLevelk;
+		String grammarLevelBacktrack;
+	}
+
+	public static final String newline = System.getProperty("line.separator");
+
+	public Grammar grammar;
+
+	public GrammarReport(Grammar grammar) {
+		this.grammar = grammar;
+	}
+
+	public static ReportData getReportData(Grammar g) {
+		ReportData data = new ReportData();
+		data.version = Version;
+		data.gname = g.name;
+
+		data.gtype = g.getGrammarTypeString();
+
+		data.language = (String) g.getOption("language");
+		data.output = (String) g.getOption("output");
+		if ( data.output==null ) {
+			data.output = "none";
+		}
+
+		String k = (String) g.getOption("k");
+		if ( k==null ) {
+			k = "none";
+		}
+		data.grammarLevelk = k;
+
+		String backtrack = (String) g.getOption("backtrack");
+		if ( backtrack==null ) {
+			backtrack = "false";
+		}
+		data.grammarLevelBacktrack = backtrack;
+
+		int totalNonSynPredProductions = 0;
+		int totalNonSynPredRules = 0;
+		Collection<Rule> rules = g.getRules();
+		for (Rule r : rules) {
+			if ( !r.name.toUpperCase()
+				.startsWith(Grammar.SYNPRED_RULE_PREFIX.toUpperCase()) )
+			{
+				totalNonSynPredProductions += r.numberOfAlts;
+				totalNonSynPredRules++;
+			}
+		}
+
+		data.numRules = totalNonSynPredRules;
+		data.numOuterProductions = totalNonSynPredProductions;
+
+		int numACyclicDecisions =
+			g.getNumberOfDecisions()- g.getNumberOfCyclicDecisions();
+		List<Integer> depths = new ArrayList<Integer>();
+		int[] acyclicDFAStates = new int[numACyclicDecisions];
+		int[] cyclicDFAStates = new int[g.getNumberOfCyclicDecisions()];
+		int acyclicIndex = 0;
+		int cyclicIndex = 0;
+		int numLL1 = 0;
+		int blocksWithSynPreds = 0;
+		int dfaWithSynPred = 0;
+		int numDecisions = 0;
+		int numCyclicDecisions = 0;
+		for (int i=1; i<= g.getNumberOfDecisions(); i++) {
+			Grammar.Decision d = g.getDecision(i);
+			if( d.dfa==null ) {
+				//System.out.println("dec "+d.decision+" has no AST");
+				continue;
+			}
+			Rule r = d.dfa.decisionNFAStartState.enclosingRule;
+			if ( r.name.toUpperCase()
+				.startsWith(Grammar.SYNPRED_RULE_PREFIX.toUpperCase()) )
+			{
+				//System.out.println("dec "+d.decision+" is a synpred");
+				continue;
+			}
+
+			numDecisions++;
+			if ( blockHasSynPred(d.blockAST) ) blocksWithSynPreds++;
+			//if ( g.decisionsWhoseDFAsUsesSynPreds.contains(d.dfa) ) dfaWithSynPred++;
+			if ( d.dfa.hasSynPred() ) dfaWithSynPred++;
+			
+//			NFAState decisionStartState = grammar.getDecisionNFAStartState(d.decision);
+//			int nalts = grammar.getNumberOfAltsForDecisionNFA(decisionStartState);
+//			for (int alt = 1; alt <= nalts; alt++) {
+//				int walkAlt =
+//					decisionStartState.translateDisplayAltToWalkAlt(alt);
+//				NFAState altLeftEdge = grammar.getNFAStateForAltOfDecision(decisionStartState, walkAlt);
+//			}
+//			int nalts = grammar.getNumberOfAltsForDecisionNFA(d.dfa.decisionNFAStartState);
+//			for (int a=1; a<nalts; a++) {
+//				NFAState altStart =
+//					grammar.getNFAStateForAltOfDecision(d.dfa.decisionNFAStartState, a);
+//			}
+			if ( !d.dfa.isCyclic() ) {
+				if ( d.dfa.isClassicDFA() ) {
+					int maxk = d.dfa.getMaxLookaheadDepth();
+					//System.out.println("decision "+d.dfa.decisionNumber+" k="+maxk);
+					if ( maxk==1 ) numLL1++;
+					depths.add( maxk );
+				}
+				else {
+					acyclicDFAStates[acyclicIndex] = d.dfa.getNumberOfStates();
+					acyclicIndex++;
+				}
+			}
+			else {
+				//System.out.println("CYCLIC decision "+d.dfa.decisionNumber);
+				numCyclicDecisions++;
+				cyclicDFAStates[cyclicIndex] = d.dfa.getNumberOfStates();
+				cyclicIndex++;
+			}
+		}
+
+		data.numLL1 = numLL1;
+		data.numberOfFixedKDecisions = depths.size();
+		data.mink = Stats.min(depths);
+		data.maxk = Stats.max(depths);
+		data.avgk = Stats.avg(depths);
+
+		data.numberOfDecisionsInRealRules = numDecisions;
+		data.numberOfDecisions = g.getNumberOfDecisions();
+		data.numberOfCyclicDecisions = numCyclicDecisions;
+
+//		Map synpreds = grammar.getSyntacticPredicates();
+//		int num_synpreds = synpreds!=null ? synpreds.size() : 0;
+//		data.num_synpreds = num_synpreds;
+		data.blocksWithSynPreds = blocksWithSynPreds;
+		data.decisionsWhoseDFAsUsesSynPreds = dfaWithSynPred;
+
+//
+//		data. = Stats.stddev(depths);
+//
+//		data. = Stats.min(acyclicDFAStates);
+//
+//		data. = Stats.max(acyclicDFAStates);
+//
+//		data. = Stats.avg(acyclicDFAStates);
+//
+//		data. = Stats.stddev(acyclicDFAStates);
+//
+//		data. = Stats.sum(acyclicDFAStates);
+//
+//		data. = Stats.min(cyclicDFAStates);
+//
+//		data. = Stats.max(cyclicDFAStates);
+//
+//		data. = Stats.avg(cyclicDFAStates);
+//
+//		data. = Stats.stddev(cyclicDFAStates);
+//
+//		data. = Stats.sum(cyclicDFAStates);
+
+		data.numTokens = g.getTokenTypes().size();
+
+		data.DFACreationWallClockTimeInMS = g.DFACreationWallClockTimeInMS;
+
+		// includes true ones and preds in synpreds I think; strip out. 
+		data.numberOfSemanticPredicates = g.numberOfSemanticPredicates;
+
+		data.numberOfManualLookaheadOptions = g.numberOfManualLookaheadOptions;
+
+		data.numNonLLStarDecisions = g.numNonLLStar;
+		data.numNondeterministicDecisions = g.setOfNondeterministicDecisionNumbers.size();
+		data.numNondeterministicDecisionNumbersResolvedWithPredicates =
+			g.setOfNondeterministicDecisionNumbersResolvedWithPredicates.size();
+
+		data.errors = ErrorManager.getErrorState().errors;
+		data.warnings = ErrorManager.getErrorState().warnings;
+		data.infos = ErrorManager.getErrorState().infos;
+
+		data.blocksWithSemPreds = g.blocksWithSemPreds.size();
+
+		data.decisionsWhoseDFAsUsesSemPreds = g.decisionsWhoseDFAsUsesSemPreds.size();
+
+		return data;
+	}
+	
+	/** Create a single-line stats report about this grammar suitable to
+	 *  send to the notify page at antlr.org
+	 */
+	public String toNotifyString() {
+		StringBuilder buf = new StringBuilder();
+		ReportData data = getReportData(grammar);
+		Field[] fields = ReportData.class.getDeclaredFields();
+		int i = 0;
+		for (Field f : fields) {
+			try {
+				Object v = f.get(data);
+				String s = v!=null ? v.toString() : "null";
+				if (i>0) buf.append('\t');
+				buf.append(s);
+			}
+			catch (Exception e) {
+				ErrorManager.internalError("Can't get data", e);
+			}
+			i++;
+		}
+		return buf.toString();
+	}
+
+	public String getBacktrackingReport() {
+		StringBuilder buf = new StringBuilder();
+		buf.append("Backtracking report:");
+		buf.append(newline);
+		buf.append("Number of decisions that backtrack: ");
+		buf.append(grammar.decisionsWhoseDFAsUsesSynPreds.size());
+		buf.append(newline);
+		buf.append(getDFALocations(grammar.decisionsWhoseDFAsUsesSynPreds));
+		return buf.toString();
+	}
+
+	protected String getDFALocations(Set<DFA> dfas) {
+		Set<Integer> decisions = new HashSet<Integer>();
+		StringBuilder buf = new StringBuilder();
+		for (DFA dfa : dfas) {
+			// if we aborted a DFA and redid with k=1, the backtrackin
+			if ( decisions.contains(Utils.integer(dfa.decisionNumber)) ) {
+				continue;
+			}
+			decisions.add(Utils.integer(dfa.decisionNumber));
+			buf.append("Rule ");
+			buf.append(dfa.decisionNFAStartState.enclosingRule.name);
+			buf.append(" decision ");
+			buf.append(dfa.decisionNumber);
+			buf.append(" location ");
+			GrammarAST decisionAST =
+				dfa.decisionNFAStartState.associatedASTNode;
+			buf.append(decisionAST.getLine());
+			buf.append(":");
+			buf.append(decisionAST.getCharPositionInLine());
+			buf.append(newline);
+		}
+		return buf.toString();
+	}
+
+	/** Given a stats line suitable for sending to the antlr.org site,
+	 *  return a human-readable version.  Return null if there is a
+	 *  problem with the data.
+	 */
+	@Override
+	public String toString() {
+		return toString(toNotifyString());
+	}
+
+	protected static ReportData decodeReportData(String dataS) {
+		ReportData data = new ReportData();
+		StringTokenizer st = new StringTokenizer(dataS, "\t");
+		Field[] fields = ReportData.class.getDeclaredFields();
+		for (Field f : fields) {
+			String v = st.nextToken();
+			try {
+				if ( f.getType() == String.class ) {
+					f.set(data, v);
+				}
+				else if ( f.getType() == double.class ) {
+					f.set(data, Double.valueOf(v));					
+				}
+				else {
+					f.set(data, Integer.valueOf(v));					
+				}
+			}
+			catch (Exception e) {
+				ErrorManager.internalError("Can't get data", e);
+			}
+		}
+		return data;
+	}
+
+	public static String toString(String notifyDataLine) {
+		ReportData data = decodeReportData(notifyDataLine);
+		if ( data ==null ) {
+			return null;
+		}
+		StringBuilder buf = new StringBuilder();
+		buf.append("ANTLR Grammar Report; Stats Version ");
+		buf.append(data.version);
+		buf.append('\n');
+		buf.append("Grammar: ");
+		buf.append(data.gname);
+		buf.append('\n');
+		buf.append("Type: ");
+		buf.append(data.gtype);
+		buf.append('\n');
+		buf.append("Target language: ");
+		buf.append(data.language);
+		buf.append('\n');
+		buf.append("Output: ");
+		buf.append(data.output);
+		buf.append('\n');
+		buf.append("Grammar option k: ");
+		buf.append(data.grammarLevelk);
+		buf.append('\n');
+		buf.append("Grammar option backtrack: ");
+		buf.append(data.grammarLevelBacktrack);
+		buf.append('\n');
+		buf.append("Rules: ");
+		buf.append(data.numRules);
+		buf.append('\n');
+		buf.append("Outer productions: ");
+		buf.append(data.numOuterProductions);
+		buf.append('\n');
+		buf.append("Decisions: ");
+		buf.append(data.numberOfDecisions);
+		buf.append('\n');
+		buf.append("Decisions (ignoring decisions in synpreds): ");
+		buf.append(data.numberOfDecisionsInRealRules);
+		buf.append('\n');
+		buf.append("Fixed k DFA decisions: ");
+		buf.append(data.numberOfFixedKDecisions);
+		buf.append('\n');
+		buf.append("Cyclic DFA decisions: ");
+		buf.append(data.numberOfCyclicDecisions);
+		buf.append('\n');
+		buf.append("LL(1) decisions: "); buf.append(data.numLL1);
+		buf.append('\n');
+		buf.append("Min fixed k: "); buf.append(data.mink);
+		buf.append('\n');
+		buf.append("Max fixed k: "); buf.append(data.maxk);
+		buf.append('\n');
+		buf.append("Average fixed k: "); buf.append(data.avgk);
+		buf.append('\n');
+//		buf.append("Standard deviation of fixed k: "); buf.append(fields[12]);
+//		buf.append('\n');
+//		buf.append("Min acyclic DFA states: "); buf.append(fields[13]);
+//		buf.append('\n');
+//		buf.append("Max acyclic DFA states: "); buf.append(fields[14]);
+//		buf.append('\n');
+//		buf.append("Average acyclic DFA states: "); buf.append(fields[15]);
+//		buf.append('\n');
+//		buf.append("Standard deviation of acyclic DFA states: "); buf.append(fields[16]);
+//		buf.append('\n');
+//		buf.append("Total acyclic DFA states: "); buf.append(fields[17]);
+//		buf.append('\n');
+//		buf.append("Min cyclic DFA states: "); buf.append(fields[18]);
+//		buf.append('\n');
+//		buf.append("Max cyclic DFA states: "); buf.append(fields[19]);
+//		buf.append('\n');
+//		buf.append("Average cyclic DFA states: "); buf.append(fields[20]);
+//		buf.append('\n');
+//		buf.append("Standard deviation of cyclic DFA states: "); buf.append(fields[21]);
+//		buf.append('\n');
+//		buf.append("Total cyclic DFA states: "); buf.append(fields[22]);
+//		buf.append('\n');
+		buf.append("DFA creation time in ms: ");
+		buf.append(data.DFACreationWallClockTimeInMS);
+		buf.append('\n');
+
+//		buf.append("Number of syntactic predicates available (including synpred rules): ");
+//		buf.append(data.num_synpreds);
+//		buf.append('\n');
+		buf.append("Decisions with available syntactic predicates (ignoring synpred rules): ");
+		buf.append(data.blocksWithSynPreds);
+		buf.append('\n');
+		buf.append("Decision DFAs using syntactic predicates (ignoring synpred rules): ");
+		buf.append(data.decisionsWhoseDFAsUsesSynPreds);
+		buf.append('\n');
+
+		buf.append("Number of semantic predicates found: ");
+		buf.append(data.numberOfSemanticPredicates);
+		buf.append('\n');
+		buf.append("Decisions with semantic predicates: ");
+		buf.append(data.blocksWithSemPreds);
+		buf.append('\n');
+		buf.append("Decision DFAs using semantic predicates: ");
+		buf.append(data.decisionsWhoseDFAsUsesSemPreds);
+		buf.append('\n');
+
+		buf.append("Number of (likely) non-LL(*) decisions: ");
+		buf.append(data.numNonLLStarDecisions);
+		buf.append('\n');
+		buf.append("Number of nondeterministic decisions: ");
+		buf.append(data.numNondeterministicDecisions);
+		buf.append('\n');
+		buf.append("Number of nondeterministic decisions resolved with predicates: ");
+		buf.append(data.numNondeterministicDecisionNumbersResolvedWithPredicates);
+		buf.append('\n');
+
+		buf.append("Number of manual or forced fixed lookahead k=value options: ");
+		buf.append(data.numberOfManualLookaheadOptions);
+		buf.append('\n');
+
+		buf.append("Vocabulary size: ");
+		buf.append(data.numTokens);
+		buf.append('\n');
+		buf.append("Number of errors: ");
+		buf.append(data.errors);
+		buf.append('\n');
+		buf.append("Number of warnings: ");
+		buf.append(data.warnings);
+		buf.append('\n');
+		buf.append("Number of infos: ");
+		buf.append(data.infos);
+		buf.append('\n');
+		return buf.toString();
+	}
+
+	public static boolean blockHasSynPred(GrammarAST blockAST) {
+		GrammarAST c1 = blockAST.findFirstType(ANTLRParser.SYN_SEMPRED);
+		GrammarAST c2 = blockAST.findFirstType(ANTLRParser.BACKTRACK_SEMPRED);
+		if ( c1!=null || c2!=null ) return true;
+//		System.out.println(blockAST.enclosingRuleName+
+//						   " "+blockAST.getLine()+":"+blockAST.getColumn()+" no preds AST="+blockAST.toStringTree());
+		return false;
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarReport2.java b/tool/src/main/java/org/antlr/tool/GrammarReport2.java
new file mode 100644
index 0000000..409273d
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarReport2.java
@@ -0,0 +1,94 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.tool;
+
+/** Simplifying report dramatically for LL(*) paper.  Old results were
+ *  wrong anyway it seems.  We need:
+ *
+ * 		percent decisions that potentially backtrack
+ *  	histogram of regular lookahead depth (int k or *)
+ */
+public class GrammarReport2 {
+	public static final String newline = System.getProperty("line.separator");
+
+	public Grammar root;
+
+	public GrammarReport2(Grammar rootGrammar) {
+		this.root = rootGrammar;
+	}
+
+	@Override
+	public String toString() {
+		StringBuilder buf = new StringBuilder();
+		stats(root, buf);
+		CompositeGrammar composite = root.composite;
+		for (Grammar g : composite.getDelegates(root)) {
+			stats(g, buf);
+		}
+		return buf.toString();
+	}
+
+	void stats(Grammar g, StringBuilder buf) {
+		int numDec = g.getNumberOfDecisions();
+		for (int decision=1; decision<=numDec; decision++) {
+			Grammar.Decision d = g.getDecision(decision);
+			if ( d.dfa==null ) { // unusued decisions in auto synpreds
+				//System.err.println("no decision "+decision+" dfa for "+d.blockAST.toStringTree());
+				continue;
+			}
+			int k = d.dfa.getMaxLookaheadDepth();
+			Rule enclosingRule = d.dfa.decisionNFAStartState.enclosingRule;
+			if ( enclosingRule.isSynPred ) continue; // don't count synpred rules
+			buf.append(g.name).append(".").append(enclosingRule.name).append(":" +
+					   "");
+			GrammarAST decisionAST =
+				d.dfa.decisionNFAStartState.associatedASTNode;
+			buf.append(decisionAST.getLine());
+			buf.append(":");
+			buf.append(decisionAST.getCharPositionInLine());
+			buf.append(" decision ").append(decision).append(":");
+			
+			if ( d.dfa.isCyclic() ) buf.append(" cyclic");
+			if ( k!=Integer.MAX_VALUE ) buf.append(" k=").append(k); // fixed, no sempreds
+			if ( d.dfa.hasSynPred() ) buf.append(" backtracks"); // isolated synpred not gated
+			if ( d.dfa.hasSemPred() ) buf.append(" sempred"); // user-defined sempred
+//			else {
+//				buf.append("undefined");
+//				FASerializer serializer = new FASerializer(g);
+//				String result = serializer.serialize(d.dfa.startState);
+//				System.err.println(result);
+//			}
+			nl(buf);
+		}
+	}
+
+	void nl(StringBuilder buf) {
+		buf.append(newline);
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarSanity.java b/tool/src/main/java/org/antlr/tool/GrammarSanity.java
new file mode 100644
index 0000000..5678950
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarSanity.java
@@ -0,0 +1,326 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.NFAState;
+import org.antlr.analysis.RuleClosureTransition;
+import org.antlr.analysis.Transition;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.runtime.tree.Tree;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+/** Factor out routines that check sanity of rules, alts, grammars, etc.. */
+public class GrammarSanity {
+	/** The checkForLeftRecursion method needs to track what rules it has
+	 *  visited to track infinite recursion.
+	 */
+	protected Set<Rule> visitedDuringRecursionCheck = null;
+
+	protected Grammar grammar;
+	public GrammarSanity(Grammar grammar) {
+		this.grammar = grammar;
+	}
+
+	/** Check all rules for infinite left recursion before analysis. Return list
+	 *  of troublesome rule cycles.  This method has two side-effects: it notifies
+	 *  the error manager that we have problems and it sets the list of
+	 *  recursive rules that we should ignore during analysis.
+	 */
+	public List<Set<Rule>> checkAllRulesForLeftRecursion() {
+		grammar.buildNFA(); // make sure we have NFAs
+		grammar.leftRecursiveRules = new HashSet<Rule>();
+		List<Set<Rule>> listOfRecursiveCycles = new ArrayList<Set<Rule>>();
+		for (int i = 0; i < grammar.composite.ruleIndexToRuleList.size(); i++) {
+			Rule r = grammar.composite.ruleIndexToRuleList.elementAt(i);
+			if ( r!=null ) {
+				visitedDuringRecursionCheck = new HashSet<Rule>();
+				visitedDuringRecursionCheck.add(r);
+				Set<NFAState> visitedStates = new HashSet<NFAState>();
+				traceStatesLookingForLeftRecursion(r.startState,
+												   visitedStates,
+												   listOfRecursiveCycles);
+			}
+		}
+		if ( listOfRecursiveCycles.size()>0 ) {
+			ErrorManager.leftRecursionCycles(listOfRecursiveCycles);
+		}
+		return listOfRecursiveCycles;
+	}
+
+	/** From state s, look for any transition to a rule that is currently
+	 *  being traced.  When tracing r, visitedDuringRecursionCheck has r
+	 *  initially.  If you reach an accept state, return but notify the
+	 *  invoking rule that it is nullable, which implies that invoking
+	 *  rule must look at follow transition for that invoking state.
+	 *  The visitedStates tracks visited states within a single rule so
+	 *  we can avoid epsilon-loop-induced infinite recursion here.  Keep
+	 *  filling the cycles in listOfRecursiveCycles and also, as a
+	 *  side-effect, set leftRecursiveRules.
+	 */
+	protected boolean traceStatesLookingForLeftRecursion(NFAState s,
+														 Set<NFAState> visitedStates,
+														 List<Set<Rule>> listOfRecursiveCycles)
+	{
+		if ( s.isAcceptState() ) {
+			// this rule must be nullable!
+			// At least one epsilon edge reached accept state
+			return true;
+		}
+		if ( visitedStates.contains(s) ) {
+			// within same rule, we've hit same state; quit looping
+			return false;
+		}
+		visitedStates.add(s);
+		boolean stateReachesAcceptState = false;
+		Transition t0 = s.transition[0];
+		if ( t0 instanceof RuleClosureTransition ) {
+			RuleClosureTransition refTrans = (RuleClosureTransition)t0;
+			Rule refRuleDef = refTrans.rule;
+			//String targetRuleName = ((NFAState)t0.target).getEnclosingRule();
+			if ( visitedDuringRecursionCheck.contains(refRuleDef) ) {
+				// record left-recursive rule, but don't go back in
+				grammar.leftRecursiveRules.add(refRuleDef);
+				/*
+				System.out.println("already visited "+refRuleDef+", calling from "+
+								   s.enclosingRule);
+								   */
+				addRulesToCycle(refRuleDef,
+								s.enclosingRule,
+								listOfRecursiveCycles);
+			}
+			else {
+				// must visit if not already visited; send new visitedStates set
+				visitedDuringRecursionCheck.add(refRuleDef);
+				boolean callReachedAcceptState =
+					traceStatesLookingForLeftRecursion((NFAState)t0.target,
+													   new HashSet<NFAState>(),
+													   listOfRecursiveCycles);
+				// we're back from visiting that rule
+				visitedDuringRecursionCheck.remove(refRuleDef);
+				// must keep going in this rule then
+				if ( callReachedAcceptState ) {
+					NFAState followingState =
+						((RuleClosureTransition) t0).followState;
+					stateReachesAcceptState |=
+						traceStatesLookingForLeftRecursion(followingState,
+														   visitedStates,
+														   listOfRecursiveCycles);
+				}
+			}
+		}
+		else if ( t0.label.isEpsilon() || t0.label.isSemanticPredicate() ) {
+			stateReachesAcceptState |=
+				traceStatesLookingForLeftRecursion((NFAState)t0.target, visitedStates, listOfRecursiveCycles);
+		}
+		// else it has a labeled edge
+
+		// now do the other transition if it exists
+		Transition t1 = s.transition[1];
+		if ( t1!=null ) {
+			stateReachesAcceptState |=
+				traceStatesLookingForLeftRecursion((NFAState)t1.target,
+												   visitedStates,
+												   listOfRecursiveCycles);
+		}
+		return stateReachesAcceptState;
+	}
+
+	/** enclosingRuleName calls targetRuleName, find the cycle containing
+	 *  the target and add the caller.  Find the cycle containing the caller
+	 *  and add the target.  If no cycles contain either, then create a new
+	 *  cycle.  listOfRecursiveCycles is List&lt;Set&lt;String&gt;&gt; that holds a list
+	 *  of cycles (sets of rule names).
+	 */
+	protected void addRulesToCycle(Rule targetRule,
+								   Rule enclosingRule,
+								   List<Set<Rule>> listOfRecursiveCycles)
+	{
+		boolean foundCycle = false;
+		for (int i = 0; i < listOfRecursiveCycles.size(); i++) {
+			Set<Rule> rulesInCycle = listOfRecursiveCycles.get(i);
+			// ensure both rules are in same cycle
+			if ( rulesInCycle.contains(targetRule) ) {
+				rulesInCycle.add(enclosingRule);
+				foundCycle = true;
+			}
+			if ( rulesInCycle.contains(enclosingRule) ) {
+				rulesInCycle.add(targetRule);
+				foundCycle = true;
+			}
+		}
+		if ( !foundCycle ) {
+			Set<Rule> cycle = new HashSet<Rule>();
+			cycle.add(targetRule);
+			cycle.add(enclosingRule);
+			listOfRecursiveCycles.add(cycle);
+		}
+	}
+
+	public void checkRuleReference(GrammarAST scopeAST,
+								   GrammarAST refAST,
+								   GrammarAST argsAST,
+								   String currentRuleName)
+	{
+		Rule r = grammar.getRule(refAST.getText());
+		if ( refAST.getType()==ANTLRParser.RULE_REF ) {
+			if ( argsAST!=null ) {
+				// rule[args]; ref has args
+                if ( r!=null && r.argActionAST==null ) {
+					// but rule def has no args
+					ErrorManager.grammarError(
+						ErrorManager.MSG_RULE_HAS_NO_ARGS,
+						grammar,
+						argsAST.getToken(),
+						r.name);
+				}
+			}
+			else {
+				// rule ref has no args
+				if ( r!=null && r.argActionAST!=null ) {
+					// but rule def has args
+					ErrorManager.grammarError(
+						ErrorManager.MSG_MISSING_RULE_ARGS,
+						grammar,
+						refAST.getToken(),
+						r.name);
+				}
+			}
+		}
+		else if ( refAST.getType()==ANTLRParser.TOKEN_REF ) {
+			if ( grammar.type!=Grammar.LEXER ) {
+				if ( argsAST!=null ) {
+					// args on a token ref not in a lexer rule
+					ErrorManager.grammarError(
+						ErrorManager.MSG_ARGS_ON_TOKEN_REF,
+						grammar,
+						refAST.getToken(),
+						refAST.getText());
+				}
+				return; // ignore token refs in nonlexers
+			}
+			if ( argsAST!=null ) {
+				// tokenRef[args]; ref has args
+				if ( r!=null && r.argActionAST==null ) {
+					// but token rule def has no args
+					ErrorManager.grammarError(
+						ErrorManager.MSG_RULE_HAS_NO_ARGS,
+						grammar,
+						argsAST.getToken(),
+						r.name);
+				}
+			}
+			else {
+				// token ref has no args
+				if ( r!=null && r.argActionAST!=null ) {
+					// but token rule def has args
+					ErrorManager.grammarError(
+						ErrorManager.MSG_MISSING_RULE_ARGS,
+						grammar,
+						refAST.getToken(),
+						r.name);
+				}
+			}
+		}
+	}
+
+	/** Rules in tree grammar that use -&gt; rewrites and are spitting out
+	 *  templates via output=template and then use rewrite=true must only
+	 *  use -&gt; on alts that are simple nodes or trees or single rule refs
+	 *  that match either nodes or trees.  The altAST is the ALT node
+	 *  for an ALT.  Verify that its first child is simple.  Must be either
+	 *  ( ALT ^( A B ) &lt;end-of-alt&gt; ) or ( ALT A &lt;end-of-alt&gt; ) or
+	 *  other element.
+	 *
+	 *  Ignore predicates in front and labels.
+	 */
+	public void ensureAltIsSimpleNodeOrTree(GrammarAST altAST,
+											GrammarAST elementAST,
+											int outerAltNum)
+	{
+		if ( isValidSimpleElementNode(elementAST) ) {
+			GrammarAST next = elementAST.getNextSibling();
+			if ( !isNextNonActionElementEOA(next)) {
+				ErrorManager.grammarWarning(ErrorManager.MSG_REWRITE_FOR_MULTI_ELEMENT_ALT,
+											grammar,
+											next.token,
+											outerAltNum);
+			}
+			return;
+		}
+		switch ( elementAST.getType() ) {
+			case ANTLRParser.ASSIGN :		// labels ok on non-rule refs
+			case ANTLRParser.PLUS_ASSIGN :
+				if ( isValidSimpleElementNode(elementAST.getChild(1)) ) {
+					return;
+				}
+				break;
+			case ANTLRParser.ACTION :		// skip past actions
+			case ANTLRParser.SEMPRED :
+			case ANTLRParser.SYN_SEMPRED :
+			case ANTLRParser.BACKTRACK_SEMPRED :
+			case ANTLRParser.GATED_SEMPRED :
+				ensureAltIsSimpleNodeOrTree(altAST,
+											elementAST.getNextSibling(),
+											outerAltNum);
+				return;
+		}
+		ErrorManager.grammarWarning(ErrorManager.MSG_REWRITE_FOR_MULTI_ELEMENT_ALT,
+									grammar,
+									elementAST.token,
+									outerAltNum);
+	}
+
+	protected boolean isValidSimpleElementNode(Tree t) {
+		switch ( t.getType() ) {
+			case ANTLRParser.TREE_BEGIN :
+			case ANTLRParser.TOKEN_REF :
+			case ANTLRParser.CHAR_LITERAL :
+			case ANTLRParser.STRING_LITERAL :
+			case ANTLRParser.WILDCARD :
+				return true;
+			default :
+				return false;
+		}
+	}
+
+	protected boolean isNextNonActionElementEOA(GrammarAST t) {
+		while ( t.getType()==ANTLRParser.ACTION ||
+				t.getType()==ANTLRParser.SEMPRED )
+		{
+			t = t.getNextSibling();
+		}
+		if ( t.getType()==ANTLRParser.EOA ) {
+			return true;
+		}
+		return false;
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarSemanticsMessage.java b/tool/src/main/java/org/antlr/tool/GrammarSemanticsMessage.java
new file mode 100644
index 0000000..dc1ae63
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarSemanticsMessage.java
@@ -0,0 +1,89 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.runtime.Token;
+import org.stringtemplate.v4.ST;
+
+/** A problem with the symbols and/or meaning of a grammar such as rule
+ *  redefinition.
+ */
+public class GrammarSemanticsMessage extends Message {
+	public Grammar g;
+	/** Most of the time, we'll have a token such as an undefined rule ref
+	 *  and so this will be set.
+	 */
+	public Token offendingToken;
+
+	public GrammarSemanticsMessage(int msgID,
+						  Grammar g,
+						  Token offendingToken)
+	{
+		this(msgID,g,offendingToken,null,null);
+	}
+
+	public GrammarSemanticsMessage(int msgID,
+						  Grammar g,
+						  Token offendingToken,
+						  Object arg)
+	{
+		this(msgID,g,offendingToken,arg,null);
+	}
+
+	public GrammarSemanticsMessage(int msgID,
+						  Grammar g,
+						  Token offendingToken,
+						  Object arg,
+						  Object arg2)
+	{
+		super(msgID,arg,arg2);
+		this.g = g;
+		this.offendingToken = offendingToken;
+	}
+
+	@Override
+	public String toString() {
+		line = 0;
+		column = 0;
+		if ( offendingToken!=null ) {
+			line = offendingToken.getLine();
+			column = offendingToken.getCharPositionInLine();
+		}
+		if ( g!=null ) {
+			file = g.getFileName();
+		}
+		ST st = getMessageTemplate();
+		if ( arg!=null ) {
+			st.add("arg", arg);
+		}
+		if ( arg2!=null ) {
+			st.add("arg2", arg2);
+		}
+		return super.toString(st);
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarSerializerFoo.java b/tool/src/main/java/org/antlr/tool/GrammarSerializerFoo.java
new file mode 100644
index 0000000..d66f51e
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarSerializerFoo.java
@@ -0,0 +1,217 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.tool;
+
+import org.antlr.runtime.SerializedGrammar;
+
+import java.io.*;
+
+/** Serialize a grammar into a highly compressed form with
+ *  only the info needed to recognize sentences.
+ *  FORMAT:
+ *
+ *  file ::= $ANTLR&lt;version:byte&gt;&lt;grammartype:byte&gt;&lt;name:string&gt;;&lt;numRules:short&gt;&lt;rules&gt;
+ *  rule ::= R&lt;rulename:string&gt;;B&lt;nalts:short&gt;&lt;alts&gt;.
+ *  alt  ::= A&lt;elems&gt;;
+ *  elem ::= t&lt;tokentype:short&gt; | r&lt;ruleIndex:short&gt; | -&lt;char:uchar&gt;&lt;char:uchar&gt; | ~&lt;tokentype&gt; | w
+ */
+public class GrammarSerializerFoo {
+    protected DataOutputStream out;
+    protected String filename;
+    protected Grammar g;
+
+    protected ByteArrayOutputStream altBuf;
+    protected int numElementsInAlt = 0;
+
+    public GrammarSerializerFoo(Grammar g) {
+        this.g = g;
+    }
+
+    public void open(String filename) throws IOException {
+        this.filename = filename;
+        FileOutputStream fos = new FileOutputStream(filename);
+        BufferedOutputStream bos = new BufferedOutputStream(fos);
+        out = new DataOutputStream(bos);
+        writeString(out, SerializedGrammar.COOKIE);
+        out.writeByte(SerializedGrammar.FORMAT_VERSION);
+    }
+
+    public void close() throws IOException {
+        if ( out!=null ) out.close();
+        out = null;
+    }
+
+
+    // WRITE
+
+    public void grammar(int grammarTokenType, String name) {
+        try {
+            /*
+            switch ( grammarTokenType ) {
+                case ANTLRParser.LEXER_GRAMMAR : out.writeByte('l'); break;
+                case ANTLRParser.PARSER_GRAMMAR : out.writeByte('p'); break;
+                case ANTLRParser.TREE_GRAMMAR: out.writeByte('t'); break;
+                case ANTLRParser.COMBINED_GRAMMAR : out.writeByte('c'); break;
+            }
+            writeString(out, name);
+            */
+            out.writeShort(g.getRules().size());
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void rule(String name) {
+        try {
+            out.writeByte('R');
+            writeString(out, name);
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void endRule() {
+        try {
+            out.writeByte('.');
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void block(int nalts) {
+        try {
+            out.writeByte('B');
+            out.writeShort(nalts);
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void alt(GrammarAST alt) {
+        numElementsInAlt = 0;
+        try {
+            out.writeByte('A');
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+        //streams.push(out);
+        //altBuf = new ByteArrayOutputStream();
+        //out = new DataOutputStream(altBuf);
+    }
+
+    public void endAlt() {
+        try {
+            //out.flush();
+            //out = (DataOutputStream)streams.pop(); // restore previous stream
+            out.writeByte(';');
+            //out.writeShort(numElementsInAlt);
+            //out.write(altBuf.toByteArray());
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void ruleRef(GrammarAST t) {
+        numElementsInAlt++;
+        try {
+            out.writeByte('r');
+            out.writeShort(g.getRuleIndex(t.getText()));
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void token(GrammarAST t) {
+        numElementsInAlt++;
+        try {
+            out.writeByte('t');
+            int ttype = g.getTokenType(t.getText());
+            out.writeShort(ttype);
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void charLiteral(GrammarAST t) {
+        numElementsInAlt++;
+        try {
+            if ( g.type!=Grammar.LEXER ) {
+                out.writeByte('t');
+                int ttype = g.getTokenType(t.getText());
+                out.writeShort(ttype);
+            }
+            // else lexer???
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void wildcard(GrammarAST t) {
+        numElementsInAlt++;
+        try {
+            out.writeByte('w');
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void range() { // must be char range
+        numElementsInAlt++;
+        try {
+            out.writeByte('-');
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void not() {
+        try {
+            out.writeByte('~');
+        }
+        catch (IOException ioe) {
+            ErrorManager.error(ErrorManager.MSG_CANNOT_WRITE_FILE, filename);
+        }
+    }
+
+    public void writeString(DataOutputStream out, String s) throws IOException {
+        out.writeBytes(s);
+        out.writeByte(';');
+    }
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarSpelunker.java b/tool/src/main/java/org/antlr/tool/GrammarSpelunker.java
new file mode 100644
index 0000000..ddde27f
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarSpelunker.java
@@ -0,0 +1,250 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import java.io.*;
+import java.util.ArrayList;
+import java.util.List;
+
+/** Load a grammar file and scan it just until we learn a few items
+ *  of interest.  Currently: name, type, imports, tokenVocab, language option.
+ *
+ *  GrammarScanner (at bottom of this class) converts grammar to stuff like:
+ *
+ *   grammar Java ; options { backtrack true memoize true }
+ *   import JavaDecl JavaAnnotations JavaExpr ;
+ *   ... : ...
+ *
+ *  First ':' or '@' indicates we can stop looking for imports/options.
+ *
+ *  Then we just grab interesting grammar properties.
+ */
+public class GrammarSpelunker {
+    protected String grammarFileName;
+    protected String token;
+    protected Scanner scanner;
+
+    // grammar info / properties
+    protected String grammarModifier;
+    protected String grammarName;
+    protected String tokenVocab;
+    protected String language = "Java"; // default
+    protected String inputDirectory;
+    protected List<String> importedGrammars;
+
+    public GrammarSpelunker(String inputDirectory, String grammarFileName) {
+        this.inputDirectory = inputDirectory;
+        this.grammarFileName = grammarFileName;
+    }
+
+    void consume() throws IOException { token = scanner.nextToken(); }
+
+    protected void match(String expecting) throws IOException {
+        //System.out.println("match "+expecting+"; is "+token);
+        if ( token.equals(expecting) ) consume();
+        else throw new Error("Error parsing "+grammarFileName+": '"+token+
+                             "' not expected '"+expecting+"'");
+    }
+
+    public void parse() throws IOException {
+        Reader r = new FileReader((inputDirectory != null ? inputDirectory + File.separator : "") + grammarFileName);
+        BufferedReader br = new BufferedReader(r);
+        try {
+            scanner = new Scanner(br);
+            consume();
+            grammarHeader();
+            // scan until imports or options
+            while ( token!=null && !token.equals("@") && !token.equals(":") &&
+                    !token.equals("import") && !token.equals("options") )
+            {
+                consume();
+            }
+            if ( token.equals("options") ) options();
+            // scan until options or first rule
+            while ( token!=null && !token.equals("@") && !token.equals(":") &&
+                    !token.equals("import") )
+            {
+                consume();
+            }
+            if ( token.equals("import") ) imports();
+            // ignore rest of input; close up shop
+        }
+        finally {
+            if ( br!=null ) br.close();
+        }
+    }
+
+    protected void grammarHeader() throws IOException {
+        if ( token==null ) return;
+        if ( token.equals("tree") || token.equals("parser") || token.equals("lexer") ) {
+            grammarModifier=token;
+            consume();
+        }
+        match("grammar");
+        grammarName = token;
+        consume(); // move beyond name
+    }
+
+    // looks like "options { backtrack true ; tokenVocab MyTokens ; }"
+    protected void options() throws IOException {
+        match("options");
+        match("{");
+        while ( token!=null && !token.equals("}") ) {
+            String name = token;
+            consume();
+            String value = token;
+            consume();
+            match(";");
+            if ( name.equals("tokenVocab") ) tokenVocab = value;
+            if ( name.equals("language") ) language = value;
+        }
+        match("}");
+    }
+
+    // looks like "import JavaDecl JavaAnnotations JavaExpr ;"
+    protected void imports() throws IOException {
+        match("import");
+        importedGrammars = new ArrayList<String>();
+        while ( token!=null && !token.equals(";") ) {
+            importedGrammars.add(token);
+            consume();
+        }
+        match(";");
+        if ( importedGrammars.isEmpty() ) importedGrammars = null;
+    }
+
+    public String getGrammarModifier() { return grammarModifier; }
+
+    public String getGrammarName() { return grammarName; }
+
+    public String getTokenVocab() { return tokenVocab; }
+
+    public String getLanguage() { return language; }
+
+    public List<String> getImportedGrammars() { return importedGrammars; }
+
+    /** Strip comments and then return stream of words and
+     *  tokens {';', ':', '{', '}'}
+     */ 
+    public static class Scanner {
+        public static final int EOF = -1;
+        Reader input;
+        int c;
+
+        public Scanner(Reader input) throws IOException {
+            this.input = input;
+            consume();
+        }
+
+        boolean isDIGIT() { return c>='0'&&c<='9'; }
+        boolean isID_START() { return c>='a'&&c<='z' || c>='A'&&c<='Z'; }
+        boolean isID_LETTER() { return isID_START() || c>='0'&&c<='9' || c=='_'; }
+        
+        void consume() throws IOException { c = input.read(); }
+
+        public String nextToken() throws IOException {
+            while ( c!=EOF ) {
+                //System.out.println("check "+(char)c);
+                switch ( c ) {
+                    case ';' : consume(); return ";";
+                    case '{' : consume(); return "{";
+                    case '}' : consume(); return "}";
+                    case ':' : consume(); return ":";
+                    case '@' : consume(); return "@";
+                    case '/' : COMMENT(); break;
+                    case '\'': return STRING();
+                    default:
+                        if ( isID_START() ) return ID();
+                        else if ( isDIGIT() ) return INT();
+                        consume(); // ignore anything else
+                }
+            }
+            return null;
+        }
+
+        /** NAME : LETTER+ ; // NAME is sequence of &gt;=1 letter */
+        String ID() throws IOException {
+            StringBuilder buf = new StringBuilder();
+            while ( c!=EOF && isID_LETTER() ) { buf.append((char)c); consume(); }
+            return buf.toString();
+        }
+
+        String INT() throws IOException {
+            StringBuilder buf = new StringBuilder();
+            while ( c!=EOF && isDIGIT() ) { buf.append((char)c); consume(); }
+            return buf.toString();
+        }
+
+        String STRING() throws IOException {
+            StringBuilder buf = new StringBuilder();
+            consume();
+            while ( c!=EOF && c!='\'' ) {
+                if ( c=='\\' ) {
+                    buf.append((char)c);
+                    consume();
+                }
+                buf.append((char)c);
+                consume();
+            }
+            consume(); // scan past '
+            return buf.toString();
+        }
+
+        void COMMENT() throws IOException {
+            if ( c=='/' ) {
+                consume();
+                if ( c=='*' ) {
+                    consume();
+        scarf:
+                    while ( true ) {
+                        if ( c=='*' ) {
+                            consume();
+                            if ( c=='/' ) { consume(); break scarf; }
+                        }
+                        else {
+                            while ( c!=EOF && c!='*' ) consume();
+                        }
+                    }
+                }
+                else if ( c=='/' ) {
+                    while ( c!=EOF && c!='\n' ) consume();
+                }
+            }
+        }
+    }
+
+    /** Tester; Give grammar filename as arg */
+    public static void main(String[] args) throws IOException {
+        GrammarSpelunker g = new GrammarSpelunker(".", args[0]);
+        g.parse();
+        System.out.println(g.grammarModifier+" grammar "+g.grammarName);
+        System.out.println("language="+g.language);
+        System.out.println("tokenVocab="+g.tokenVocab);
+        System.out.println("imports="+g.importedGrammars);
+    }
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarSyntaxMessage.java b/tool/src/main/java/org/antlr/tool/GrammarSyntaxMessage.java
new file mode 100644
index 0000000..0950777
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarSyntaxMessage.java
@@ -0,0 +1,82 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.runtime.RecognitionException;
+import org.antlr.runtime.Token;
+import org.stringtemplate.v4.ST;
+
+/** A problem with the syntax of your antlr grammar such as
+ *  "The '{' came as a complete surprise to me at this point in your program"
+ */
+public class GrammarSyntaxMessage extends Message {
+	public Grammar g;
+	/** Most of the time, we'll have a token and so this will be set. */
+	public Token offendingToken;
+	public RecognitionException exception;
+
+	public GrammarSyntaxMessage(int msgID,
+								Grammar grammar,
+								Token offendingToken,
+								RecognitionException exception)
+	{
+		this(msgID,grammar,offendingToken,null,exception);
+	}
+
+	public GrammarSyntaxMessage(int msgID,
+								Grammar grammar,
+								Token offendingToken,
+								Object arg,
+								RecognitionException exception)
+	{
+		super(msgID, arg, null);
+		this.offendingToken = offendingToken;
+		this.exception = exception;
+		this.g = grammar;
+	}
+
+	@Override
+	public String toString() {
+		line = 0;
+		column = 0;
+		if ( offendingToken!=null ) {
+			line = offendingToken.getLine();
+			column = offendingToken.getCharPositionInLine();
+		}
+		// TODO: actually set the right Grammar instance to get the filename
+		// TODO: have to update all v2 grammar files for this. or use errormanager and tool to get the current grammar
+		if (g != null) {
+			file = g.getFileName();
+		}
+		ST st = getMessageTemplate();
+		if ( arg!=null ) {
+			st.add("arg", arg);
+		}
+		return super.toString(st);
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/GrammarUnreachableAltsMessage.java b/tool/src/main/java/org/antlr/tool/GrammarUnreachableAltsMessage.java
new file mode 100644
index 0000000..30ed750
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/GrammarUnreachableAltsMessage.java
@@ -0,0 +1,89 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.NFAState;
+import org.stringtemplate.v4.ST;
+
+import java.util.List;
+
+/** Reports a potential parsing issue with a decision; the decision is
+ *  nondeterministic in some way.
+ */
+public class GrammarUnreachableAltsMessage extends Message {
+	public DecisionProbe probe;
+    public List<Integer> alts;
+
+	public GrammarUnreachableAltsMessage(DecisionProbe probe,
+										 List<Integer> alts)
+	{
+		super(ErrorManager.MSG_UNREACHABLE_ALTS);
+		this.probe = probe;
+		this.alts = alts;
+		// flip msg ID if alts are actually token refs in Tokens rule
+		if ( probe.dfa.isTokensRuleDecision() ) {
+			setMessageID(ErrorManager.MSG_UNREACHABLE_TOKENS);
+		}
+	}
+
+	@Override
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getCharPositionInLine();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+
+		ST st = getMessageTemplate();
+
+		if ( probe.dfa.isTokensRuleDecision() ) {
+			// alts are token rules, convert to the names instead of numbers
+			for (int i = 0; i < alts.size(); i++) {
+				Integer altI = alts.get(i);
+				String tokenName =
+					probe.getTokenNameForTokensRuleAlt(altI);
+				// reset the line/col to the token definition
+				NFAState ruleStart =
+					probe.dfa.nfa.grammar.getRuleStartState(tokenName);
+				line = ruleStart.associatedASTNode.getLine();
+				column = ruleStart.associatedASTNode.getCharPositionInLine();
+				st.add("tokens", tokenName);
+			}
+		}
+		else {
+			// regular alt numbers, show the alts
+			st.add("alts", alts);
+		}
+
+		return super.toString(st);
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/Interp.java b/tool/src/main/java/org/antlr/tool/Interp.java
new file mode 100644
index 0000000..71a572d
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/Interp.java
@@ -0,0 +1,131 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.Tool;
+import org.antlr.runtime.*;
+import org.antlr.runtime.tree.ParseTree;
+
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.StringTokenizer;
+
+/** Interpret any ANTLR grammar:
+ *
+ *  java Interp file.g tokens-to-ignore start-rule input-file
+ *
+ *  java Interp C.g 'WS COMMENT' program t.c
+ *
+ *  where the WS and COMMENT are the names of tokens you want to have
+ *  the parser ignore.
+ */
+public class Interp {
+    public static class FilteringTokenStream extends CommonTokenStream {
+        public FilteringTokenStream(TokenSource src) { super(src); }
+        Set<Integer> hide = new HashSet<Integer>();
+		@Override
+        protected void sync(int i) {
+            super.sync(i);
+            if ( hide.contains(get(i).getType()) ) get(i).setChannel(Token.HIDDEN_CHANNEL);
+        }
+        public void setTokenTypeChannel(int ttype, int channel) {
+            hide.add(ttype);
+        }
+    }
+
+	// pass me a java file to parse
+	public static void main(String[] args) throws Exception {
+		if ( args.length!=4 ) {
+			System.err.println("java Interp file.g tokens-to-ignore start-rule input-file");
+			return;
+		}
+		String grammarFileName = args[0];
+		String ignoreTokens = args[1];
+		String startRule = args[2];
+		String inputFileName = args[3];
+
+		// TODO: using wrong constructor now
+		Tool tool = new Tool();
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar parser = new Grammar(tool, grammarFileName, composite);
+		composite.setDelegationRoot(parser);
+		FileReader fr = new FileReader(grammarFileName);
+		BufferedReader br = new BufferedReader(fr);
+		parser.parseAndBuildAST(br);
+		br.close();
+
+		parser.composite.assignTokenTypes();
+		parser.composite.defineGrammarSymbols();
+		parser.composite.createNFAs();
+
+		List<? extends Collection<? extends Rule>> leftRecursiveRules = parser.checkAllRulesForLeftRecursion();
+		if ( leftRecursiveRules.size()>0 ) {
+			return;
+		}
+
+		if ( parser.getRule(startRule)==null ) {
+			System.out.println("undefined start rule "+startRule);
+			return;
+		}
+
+		String lexerGrammarText = parser.getLexerGrammar();
+		Grammar lexer = new Grammar(tool);
+		lexer.importTokenVocabulary(parser);
+		lexer.fileName = grammarFileName;
+		lexer.setTool(tool);
+		if ( lexerGrammarText!=null ) {
+			lexer.setGrammarContent(lexerGrammarText);
+		}
+		else {
+			System.err.println("no lexer grammar found in "+grammarFileName);
+		}
+		lexer.composite.createNFAs();
+		
+		CharStream input =
+			new ANTLRFileStream(inputFileName);
+		Interpreter lexEngine = new Interpreter(lexer, input);
+		FilteringTokenStream tokens = new FilteringTokenStream(lexEngine);
+		StringTokenizer tk = new StringTokenizer(ignoreTokens, " ");
+		while ( tk.hasMoreTokens() ) {
+			String tokenName = tk.nextToken();
+			tokens.setTokenTypeChannel(lexer.getTokenType(tokenName), 99);
+		}
+
+		if ( parser.getRule(startRule)==null ) {
+			System.err.println("Rule "+startRule+" does not exist in "+grammarFileName);
+			return;
+		}
+		Interpreter parseEngine = new Interpreter(parser, tokens);
+		ParseTree t = parseEngine.parse(startRule);
+		System.out.println(t.toStringTree());
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/Interpreter.java b/tool/src/main/java/org/antlr/tool/Interpreter.java
new file mode 100644
index 0000000..298af5a
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/Interpreter.java
@@ -0,0 +1,456 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.*;
+import org.antlr.misc.IntervalSet;
+import org.antlr.runtime.*;
+import org.antlr.runtime.debug.BlankDebugEventListener;
+import org.antlr.runtime.debug.DebugEventListener;
+import org.antlr.runtime.debug.ParseTreeBuilder;
+import org.antlr.runtime.tree.ParseTree;
+
+import java.util.List;
+import java.util.Stack;
+
+/** The recognition interpreter/engine for grammars.  Separated
+ *  out of Grammar as it's related, but technically not a Grammar function.
+ *  You create an interpreter for a grammar and an input stream.  This object
+ *  can act as a TokenSource so that you can hook up two grammars (via
+ *  a CommonTokenStream) to lex/parse.  Being a token source only makes sense
+ *  for a lexer grammar of course.
+ */
+public class Interpreter implements TokenSource {
+	protected Grammar grammar;
+	protected IntStream input;
+
+	/** A lexer listener that just creates token objects as they
+	 *  are matched.  scan() use this listener to get a single object.
+	 *  To get a stream of tokens, you must call scan() multiple times,
+	 *  recording the token object result after each call.
+	 */
+	class LexerActionGetTokenType extends BlankDebugEventListener {
+		public CommonToken token;
+		Grammar g;
+		public LexerActionGetTokenType(Grammar g) {
+			this.g = g;
+		}
+
+		@Override
+		public void exitRule(String grammarFileName, String ruleName) {
+			if ( !ruleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ){
+				int type = g.getTokenType(ruleName);
+				int channel = Token.DEFAULT_CHANNEL;
+				token = new CommonToken((CharStream)input,type,channel,0,0);
+			}
+		}
+	}
+
+	public Interpreter(Grammar grammar, IntStream input) {
+		this.grammar = grammar;
+		this.input = input;
+	}
+
+	@Override
+	public Token nextToken() {
+		if ( grammar.type!=Grammar.LEXER ) {
+			return null;
+		}
+		if ( input.LA(1)==CharStream.EOF ) {
+            return new CommonToken((CharStream)input,Token.EOF,Token.DEFAULT_CHANNEL,input.index(),input.index());
+		}
+		int start = input.index();
+		int charPos = ((CharStream)input).getCharPositionInLine();
+		CommonToken token = null;
+		loop:
+		while (input.LA(1)!=CharStream.EOF) {
+			try {
+				token = scan(Grammar.ARTIFICIAL_TOKENS_RULENAME, null);
+				break;
+			}
+			catch (RecognitionException re) {
+				// report a problem and try for another
+				reportScanError(re);
+				continue loop;
+			}
+		}
+		// the scan can only set type
+		// we must set the line, and other junk here to make it a complete token
+		int stop = input.index()-1;
+		if ( token==null ) {
+            return new CommonToken((CharStream)input,Token.EOF,Token.DEFAULT_CHANNEL,start,start);
+		}
+		token.setLine(((CharStream)input).getLine());
+		token.setStartIndex(start);
+		token.setStopIndex(stop);
+		token.setCharPositionInLine(charPos);
+		return token;
+	}
+
+	/** For a given input char stream, try to match against the NFA
+	 *  starting at startRule.  This is a deterministic parse even though
+	 *  it is using an NFA because it uses DFAs at each decision point to
+	 *  predict which alternative will succeed.  This is exactly what the
+	 *  generated parser will do.
+	 *
+	 *  This only does lexer grammars.
+	 *
+	 *  Return the token type associated with the final rule end state.
+	 */
+	public void scan(String startRule,
+					 DebugEventListener actions,
+					 List<NFAState> visitedStates)
+		throws RecognitionException
+	{
+		if ( grammar.type!=Grammar.LEXER ) {
+			return;
+		}
+
+		//System.out.println("scan("+startRule+",'"+in.substring(in.index(),in.size()-1)+"')");
+		// Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet
+		if ( grammar.getRuleStartState(startRule)==null ) {
+			grammar.buildNFA();
+		}
+
+		if ( !grammar.allDecisionDFAHaveBeenCreated() ) {
+			// Create the DFA predictors for each decision
+			grammar.createLookaheadDFAs();
+		}
+
+		// do the parse
+		Stack<NFAState> ruleInvocationStack = new Stack<NFAState>();
+		NFAState start = grammar.getRuleStartState(startRule);
+		NFAState stop = grammar.getRuleStopState(startRule);
+		parseEngine(startRule, start, stop, input, ruleInvocationStack,
+					actions, visitedStates);
+	}
+
+	public CommonToken scan(String startRule)
+		throws RecognitionException
+	{
+		return scan(startRule, null);
+	}
+
+	public CommonToken scan(String startRule,
+							List<NFAState> visitedStates)
+		throws RecognitionException
+	{
+		LexerActionGetTokenType actions = new LexerActionGetTokenType(grammar);
+		scan(startRule, actions, visitedStates);
+		return actions.token;
+	}
+
+	public void parse(String startRule,
+					  DebugEventListener actions,
+					  List<NFAState> visitedStates)
+		throws RecognitionException
+	{
+		//System.out.println("parse("+startRule+")");
+		// Build NFAs/DFAs from the grammar AST if NFAs haven't been built yet
+		if ( grammar.getRuleStartState(startRule)==null ) {
+			grammar.buildNFA();
+		}
+		if ( !grammar.allDecisionDFAHaveBeenCreated() ) {
+			// Create the DFA predictors for each decision
+			grammar.createLookaheadDFAs();
+		}
+		// do the parse
+		Stack<NFAState> ruleInvocationStack = new Stack<NFAState>();
+		NFAState start = grammar.getRuleStartState(startRule);
+		NFAState stop = grammar.getRuleStopState(startRule);
+		parseEngine(startRule, start, stop, input, ruleInvocationStack,
+					actions, visitedStates);
+	}
+
+	public ParseTree parse(String startRule)
+		throws RecognitionException
+	{
+		return parse(startRule, null);
+	}
+
+	public ParseTree parse(String startRule, List<NFAState> visitedStates)
+		throws RecognitionException
+	{
+		ParseTreeBuilder actions = new ParseTreeBuilder(grammar.name);
+		try {
+			parse(startRule, actions, visitedStates);
+		}
+		catch (RecognitionException re) {
+			// Errors are tracked via the ANTLRDebugInterface
+			// Exceptions are used just to blast out of the parse engine
+			// The error will be in the parse tree.
+		}
+		return actions.getTree();
+	}
+
+	/** Fill a list of all NFA states visited during the parse */
+	protected void parseEngine(String startRule,
+							   NFAState start,
+							   NFAState stop,
+							   IntStream input,
+							   Stack<NFAState> ruleInvocationStack,
+							   DebugEventListener actions,
+							   List<NFAState> visitedStates)
+		throws RecognitionException
+	{
+		NFAState s = start;
+		if ( actions!=null ) {
+			actions.enterRule(s.nfa.grammar.getFileName(), start.enclosingRule.name);
+		}
+		int t = input.LA(1);
+		while ( s!=stop ) {
+			if ( visitedStates!=null ) {
+				visitedStates.add(s);
+			}
+			/*
+			System.out.println("parse state "+s.stateNumber+" input="+
+				s.nfa.grammar.getTokenDisplayName(t));
+				*/
+			// CASE 1: decision state
+			if ( s.getDecisionNumber()>0 && s.nfa.grammar.getNumberOfAltsForDecisionNFA(s)>1 ) {
+				// decision point, must predict and jump to alt
+				DFA dfa = s.nfa.grammar.getLookaheadDFA(s.getDecisionNumber());
+				/*
+				if ( s.nfa.grammar.type!=Grammar.LEXER ) {
+					System.out.println("decision: "+
+								   dfa.getNFADecisionStartState().getDescription()+
+								   " input="+s.nfa.grammar.getTokenDisplayName(t));
+				}
+				*/
+				int m = input.mark();
+				int predictedAlt = predict(dfa);
+				if ( predictedAlt == NFA.INVALID_ALT_NUMBER ) {
+					String description = dfa.getNFADecisionStartState().getDescription();
+					NoViableAltException nvae =
+						new NoViableAltException(description,
+													  dfa.getDecisionNumber(),
+													  s.stateNumber,
+													  input);
+					if ( actions!=null ) {
+						actions.recognitionException(nvae);
+					}
+					input.consume(); // recover
+					throw nvae;
+				}
+				input.rewind(m);
+				int parseAlt =
+					s.translateDisplayAltToWalkAlt(predictedAlt);
+				/*
+				if ( s.nfa.grammar.type!=Grammar.LEXER ) {
+					System.out.println("predicted alt "+predictedAlt+", parseAlt "+
+									   parseAlt);
+				}
+				*/
+				NFAState alt;
+				if ( parseAlt > s.nfa.grammar.getNumberOfAltsForDecisionNFA(s) ) {
+					// implied branch of loop etc...
+					alt = s.nfa.grammar.nfa.getState( s.endOfBlockStateNumber );
+				}
+				else {
+					alt = s.nfa.grammar.getNFAStateForAltOfDecision(s, parseAlt);
+				}
+				s = (NFAState)alt.transition[0].target;
+				continue;
+			}
+
+			// CASE 2: finished matching a rule
+			if ( s.isAcceptState() ) { // end of rule node
+				if ( actions!=null ) {
+					actions.exitRule(s.nfa.grammar.getFileName(), s.enclosingRule.name);
+				}
+				if ( ruleInvocationStack.empty() ) {
+					// done parsing.  Hit the start state.
+					//System.out.println("stack empty in stop state for "+s.getEnclosingRule());
+					break;
+				}
+				// pop invoking state off the stack to know where to return to
+				NFAState invokingState = ruleInvocationStack.pop();
+				RuleClosureTransition invokingTransition =
+						(RuleClosureTransition)invokingState.transition[0];
+				// move to node after state that invoked this rule
+				s = invokingTransition.followState;
+				continue;
+			}
+
+			Transition trans = s.transition[0];
+			Label label = trans.label;
+			if ( label.isSemanticPredicate() ) {
+				FailedPredicateException fpe =
+					new FailedPredicateException(input,
+												 s.enclosingRule.name,
+												 "can't deal with predicates yet");
+				if ( actions!=null ) {
+					actions.recognitionException(fpe);
+				}
+			}
+
+			// CASE 3: epsilon transition
+			if ( label.isEpsilon() ) {
+				// CASE 3a: rule invocation state
+				if ( trans instanceof RuleClosureTransition ) {
+					ruleInvocationStack.push(s);
+					s = (NFAState)trans.target;
+					//System.out.println("call "+s.enclosingRule.name+" from "+s.nfa.grammar.getFileName());
+					if ( actions!=null ) {
+						actions.enterRule(s.nfa.grammar.getFileName(), s.enclosingRule.name);
+					}
+					// could be jumping to new grammar, make sure DFA created
+					if ( !s.nfa.grammar.allDecisionDFAHaveBeenCreated() ) {
+						s.nfa.grammar.createLookaheadDFAs();
+					}
+				}
+				// CASE 3b: plain old epsilon transition, just move
+				else {
+					s = (NFAState)trans.target;
+				}
+			}
+
+			// CASE 4: match label on transition
+			else if ( label.matches(t) ) {
+				if ( actions!=null ) {
+					if ( s.nfa.grammar.type == Grammar.PARSER ||
+						 s.nfa.grammar.type == Grammar.COMBINED )
+					{
+						actions.consumeToken(((TokenStream)input).LT(1));
+					}
+				}
+				s = (NFAState)s.transition[0].target;
+				input.consume();
+				t = input.LA(1);
+			}
+
+			// CASE 5: error condition; label is inconsistent with input
+			else {
+				if ( label.isAtom() ) {
+					MismatchedTokenException mte =
+						new MismatchedTokenException(label.getAtom(), input);
+					if ( actions!=null ) {
+						actions.recognitionException(mte);
+					}
+					input.consume(); // recover
+					throw mte;
+				}
+				else if ( label.isSet() ) {
+					MismatchedSetException mse =
+						new MismatchedSetException(((IntervalSet)label.getSet()).toRuntimeBitSet(),
+												   input);
+					if ( actions!=null ) {
+						actions.recognitionException(mse);
+					}
+					input.consume(); // recover
+					throw mse;
+				}
+				else if ( label.isSemanticPredicate() ) {
+					FailedPredicateException fpe =
+						new FailedPredicateException(input,
+													 s.enclosingRule.name,
+													 label.getSemanticContext().toString());
+					if ( actions!=null ) {
+						actions.recognitionException(fpe);
+					}
+					input.consume(); // recover
+					throw fpe;
+				}
+				else {
+					throw new RecognitionException(input); // unknown error
+				}
+			}
+		}
+		//System.out.println("hit stop state for "+stop.getEnclosingRule());
+		if ( actions!=null ) {
+			actions.exitRule(s.nfa.grammar.getFileName(), stop.enclosingRule.name);
+		}
+	}
+
+	/** Given an input stream, return the unique alternative predicted by
+	 *  matching the input.  Upon error, return NFA.INVALID_ALT_NUMBER
+	 *  The first symbol of lookahead is presumed to be primed; that is,
+	 *  input.lookahead(1) must point at the input symbol you want to start
+	 *  predicting with.
+	 */
+	public int predict(DFA dfa) {
+		DFAState s = dfa.startState;
+		int c = input.LA(1);
+		Transition eotTransition = null;
+	dfaLoop:
+		while ( !s.isAcceptState() ) {
+			/*
+			System.out.println("DFA.predict("+s.getStateNumber()+", "+
+					dfa.getNFA().getGrammar().getTokenName(c)+")");
+			*/
+			// for each edge of s, look for intersection with current char
+			for (int i=0; i<s.getNumberOfTransitions(); i++) {
+				Transition t = s.transition(i);
+				// special case: EOT matches any char
+				if ( t.label.matches(c) ) {
+					// take transition i
+					s = (DFAState)t.target;
+					input.consume();
+					c = input.LA(1);
+					continue dfaLoop;
+				}
+				if ( t.label.getAtom()==Label.EOT ) {
+					eotTransition = t;
+				}
+			}
+			if ( eotTransition!=null ) {
+				s = (DFAState)eotTransition.target;
+				continue dfaLoop;
+			}
+			/*
+			ErrorManager.error(ErrorManager.MSG_NO_VIABLE_DFA_ALT,
+							   s,
+							   dfa.nfa.grammar.getTokenName(c));
+			*/
+			return NFA.INVALID_ALT_NUMBER;
+		}
+		// woohoo!  We know which alt to predict
+		// nothing emanates from a stop state; must terminate anyway
+		/*
+		System.out.println("DFA stop state "+s.getStateNumber()+" predicts "+
+				s.getUniquelyPredictedAlt());
+		*/
+		return s.getUniquelyPredictedAlt();
+	}
+
+	public void reportScanError(RecognitionException re) {
+		CharStream cs = (CharStream)input;
+		// print as good of a message as we can, given that we do not have
+		// a Lexer object and, hence, cannot call the routine to get a
+		// decent error message.
+		System.err.println("problem matching token at "+
+			cs.getLine()+":"+cs.getCharPositionInLine()+" "+re);
+	}
+
+	@Override
+	public String getSourceName() {
+		return input.getSourceName();
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/LeftRecursionCyclesMessage.java b/tool/src/main/java/org/antlr/tool/LeftRecursionCyclesMessage.java
new file mode 100644
index 0000000..98e969c
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/LeftRecursionCyclesMessage.java
@@ -0,0 +1,52 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.stringtemplate.v4.ST;
+
+import java.util.Collection;
+
+/** Similar to LeftRecursionMessage except this is used for announcing
+ *  cycles found by walking rules without decisions; the other msg is
+ *  invoked when a decision DFA construction finds a problem in closure.
+ */
+public class LeftRecursionCyclesMessage extends Message {
+	public Collection<? extends Collection<? extends Rule>> cycles;
+
+	public LeftRecursionCyclesMessage(Collection<? extends Collection<? extends Rule>> cycles) {
+		super(ErrorManager.MSG_LEFT_RECURSION_CYCLES);
+		this.cycles = cycles;
+	}
+
+	@Override
+	public String toString() {
+		ST st = getMessageTemplate();
+		st.add("listOfCycles", cycles);
+		return super.toString(st);
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/LeftRecursiveRuleAnalyzer.java b/tool/src/main/java/org/antlr/tool/LeftRecursiveRuleAnalyzer.java
new file mode 100644
index 0000000..61e1891
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/LeftRecursiveRuleAnalyzer.java
@@ -0,0 +1,353 @@
+package org.antlr.tool;
+
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.grammar.v3.*;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.CommonTreeNodeStream;
+import org.antlr.runtime.tree.TreeNodeStream;
+import org.stringtemplate.v4.*;
+
+import java.util.*;
+
+/** */
+public class LeftRecursiveRuleAnalyzer extends LeftRecursiveRuleWalker {
+	public static enum ASSOC { left, right };
+
+	public Grammar g;
+	public CodeGenerator generator;
+	public String ruleName;
+	Map<Integer, Integer> tokenToPrec = new HashMap<Integer, Integer>();
+	public LinkedHashMap<Integer, String> binaryAlts = new LinkedHashMap<Integer, String>();
+	public LinkedHashMap<Integer, String> ternaryAlts = new LinkedHashMap<Integer, String>();
+	public LinkedHashMap<Integer, String> suffixAlts = new LinkedHashMap<Integer, String>();
+	public List<String> prefixAlts = new ArrayList<String>();
+	public List<String> otherAlts = new ArrayList<String>();
+
+	public GrammarAST retvals;
+
+	public STGroup recRuleTemplates;
+	public String language;
+
+	public Map<Integer, ASSOC> altAssociativity = new HashMap<Integer, ASSOC>();
+
+	public LeftRecursiveRuleAnalyzer(TreeNodeStream input, Grammar g, String ruleName) {
+		super(input);
+		this.g = g;
+		this.ruleName = ruleName;
+		language = (String)g.getOption("language");
+		generator = new CodeGenerator(g.tool, g, language);
+		generator.loadTemplates(language);
+		loadPrecRuleTemplates();
+	}
+
+	public void loadPrecRuleTemplates() {
+		recRuleTemplates =
+			new ToolSTGroupFile(CodeGenerator.classpathTemplateRootDirectoryName+
+							"/LeftRecursiveRules.stg");
+		if ( !recRuleTemplates.isDefined("recRuleName") ) {
+			ErrorManager.error(ErrorManager.MSG_MISSING_CODE_GEN_TEMPLATES,
+							   "PrecRules");
+			return;
+		}
+	}
+
+	@Override
+	public void setReturnValues(GrammarAST t) {
+		System.out.println(t);
+		retvals = t;
+	}
+
+	@Override
+	public void setTokenPrec(GrammarAST t, int alt) {
+		int ttype = g.getTokenType(t.getText());
+		tokenToPrec.put(ttype, alt);
+		ASSOC assoc = ASSOC.left;
+		if ( t.terminalOptions!=null ) {
+			String a = (String)t.terminalOptions.get("assoc");
+			if ( a!=null ) {
+				if ( a.equals(ASSOC.right.toString()) ) {
+					assoc = ASSOC.right;
+				}
+				else {
+					ErrorManager.error(ErrorManager.MSG_ILLEGAL_OPTION_VALUE, "assoc", assoc);
+				}
+			}
+		}
+
+		if ( altAssociativity.get(alt)!=null && altAssociativity.get(alt)!=assoc ) {
+			ErrorManager.error(ErrorManager.MSG_ALL_OPS_NEED_SAME_ASSOC, alt);
+		}
+		altAssociativity.put(alt, assoc);
+
+		//System.out.println("op " + alt + ": " + t.getText()+", assoc="+assoc);
+	}
+
+	@Override
+	public void binaryAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {
+		altTree = GrammarAST.dupTree(altTree);
+		rewriteTree = GrammarAST.dupTree(rewriteTree);
+
+		stripSynPred(altTree);
+		stripLeftRecursion(altTree);
+
+		// rewrite e to be e_[rec_arg]
+		int nextPrec = nextPrecedence(alt);
+		ST refST = recRuleTemplates.getInstanceOf("recRuleRef");
+		refST.add("ruleName", ruleName);
+		refST.add("arg", nextPrec);
+		altTree = replaceRuleRefs(altTree, refST.render());
+
+		String altText = text(altTree);
+		altText = altText.trim();
+		altText += "{}"; // add empty alt to prevent pred hoisting
+		ST nameST = recRuleTemplates.getInstanceOf("recRuleName");
+		nameST.add("ruleName", ruleName);
+		rewriteTree = replaceRuleRefs(rewriteTree, "$" + nameST.render());
+		String rewriteText = text(rewriteTree);
+		binaryAlts.put(alt, altText + (rewriteText != null ? " " + rewriteText : ""));
+		//System.out.println("binaryAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
+	}
+
+	/** Convert e ? e : e  &rarr;  ? e : e_[nextPrec] */
+	@Override
+	public void ternaryAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {
+		altTree = GrammarAST.dupTree(altTree);
+		rewriteTree = GrammarAST.dupTree(rewriteTree);
+
+		stripSynPred(altTree);
+		stripLeftRecursion(altTree);
+
+		int nextPrec = nextPrecedence(alt);
+		ST refST = recRuleTemplates.getInstanceOf("recRuleRef");
+		refST.add("ruleName", ruleName);
+		refST.add("arg", nextPrec);
+		altTree = replaceLastRuleRef(altTree, refST.render());
+
+		String altText = text(altTree);
+		altText = altText.trim();
+		altText += "{}"; // add empty alt to prevent pred hoisting
+		ST nameST = recRuleTemplates.getInstanceOf("recRuleName");
+		nameST.add("ruleName", ruleName);
+		rewriteTree = replaceRuleRefs(rewriteTree, "$" + nameST.render());
+		String rewriteText = text(rewriteTree);
+		ternaryAlts.put(alt, altText + (rewriteText != null ? " " + rewriteText : ""));
+		//System.out.println("ternaryAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
+	}
+
+	@Override
+	public void prefixAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {
+		altTree = GrammarAST.dupTree(altTree);
+		rewriteTree = GrammarAST.dupTree(rewriteTree);
+
+		stripSynPred(altTree);
+
+		int nextPrec = precedence(alt);
+		// rewrite e to be e_[rec_arg]
+		ST refST = recRuleTemplates.getInstanceOf("recRuleRef");
+		refST.add("ruleName", ruleName);
+		refST.add("arg", nextPrec);
+		altTree = replaceRuleRefs(altTree, refST.render());
+		String altText = text(altTree);
+		altText = altText.trim();
+		altText += "{}"; // add empty alt to prevent pred hoisting
+
+		ST nameST = recRuleTemplates.getInstanceOf("recRuleName");
+		nameST.add("ruleName", ruleName);
+		rewriteTree = replaceRuleRefs(rewriteTree, nameST.render());
+		String rewriteText = text(rewriteTree);
+
+		prefixAlts.add(altText + (rewriteText != null ? " " + rewriteText : ""));
+		//System.out.println("prefixAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
+	}
+
+	@Override
+	public void suffixAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {
+		altTree = GrammarAST.dupTree(altTree);
+		rewriteTree = GrammarAST.dupTree(rewriteTree);
+		stripSynPred(altTree);
+		stripLeftRecursion(altTree);
+		ST nameST = recRuleTemplates.getInstanceOf("recRuleName");
+		nameST.add("ruleName", ruleName);
+		rewriteTree = replaceRuleRefs(rewriteTree, "$" + nameST.render());
+		String rewriteText = text(rewriteTree);
+		String altText = text(altTree);
+		altText = altText.trim();
+		suffixAlts.put(alt, altText + (rewriteText != null ? " " + rewriteText : ""));
+//		System.out.println("suffixAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
+	}
+
+	@Override
+	public void otherAlt(GrammarAST altTree, GrammarAST rewriteTree, int alt) {
+		altTree = GrammarAST.dupTree(altTree);
+		rewriteTree = GrammarAST.dupTree(rewriteTree);
+		stripSynPred(altTree);
+		stripLeftRecursion(altTree);
+		String altText = text(altTree);
+
+		String rewriteText = text(rewriteTree);
+		otherAlts.add(altText + (rewriteText != null ? " " + rewriteText : ""));
+		//System.out.println("otherAlt " + alt + ": " + altText + ", rewrite=" + rewriteText);
+	}
+
+	// --------- get transformed rules ----------------
+
+	public String getArtificialPrecStartRule() {
+		ST ruleST = recRuleTemplates.getInstanceOf("recRuleStart");
+		ruleST.add("ruleName", ruleName);
+		ruleST.add("minPrec", 0);
+		ruleST.add("userRetvals", retvals);
+		fillRetValAssignments(ruleST, "recRuleName");
+
+		System.out.println("start: " + ruleST);
+		return ruleST.render();
+	}
+
+	public String getArtificialOpPrecRule() {
+		ST ruleST = recRuleTemplates.getInstanceOf("recRule");
+		ruleST.add("ruleName", ruleName);
+		ruleST.add("buildAST", grammar.buildAST());
+		ST argDefST =
+			generator.getTemplates().getInstanceOf("recRuleDefArg");
+		ruleST.add("precArgDef", argDefST);
+		ST ruleArgST =
+			generator.getTemplates().getInstanceOf("recRuleArg");
+		ruleST.add("argName", ruleArgST);
+		ST setResultST =
+			generator.getTemplates().getInstanceOf("recRuleSetResultAction");
+		ruleST.add("setResultAction", setResultST);
+		ruleST.add("userRetvals", retvals);
+		fillRetValAssignments(ruleST, "recPrimaryName");
+
+		LinkedHashMap<Integer, String> opPrecRuleAlts = new LinkedHashMap<Integer, String>();
+		opPrecRuleAlts.putAll(binaryAlts);
+		opPrecRuleAlts.putAll(ternaryAlts);
+		opPrecRuleAlts.putAll(suffixAlts);
+		for (Map.Entry<Integer, String> entry : opPrecRuleAlts.entrySet()) {
+			int alt = entry.getKey();
+			String altText = entry.getValue();
+			ST altST = recRuleTemplates.getInstanceOf("recRuleAlt");
+			ST predST =
+				generator.getTemplates().getInstanceOf("recRuleAltPredicate");
+			predST.add("opPrec", precedence(alt));
+			predST.add("ruleName", ruleName);
+			altST.add("pred", predST);
+			altST.add("alt", altText);
+			ruleST.add("alts", altST);
+		}
+
+		System.out.println(ruleST);
+
+		return ruleST.render();
+	}
+
+	public String getArtificialPrimaryRule() {
+		ST ruleST = recRuleTemplates.getInstanceOf("recPrimaryRule");
+		ruleST.add("ruleName", ruleName);
+		ruleST.add("alts", prefixAlts);
+		ruleST.add("alts", otherAlts);
+		ruleST.add("userRetvals", retvals);
+		System.out.println(ruleST);
+		return ruleST.render();
+	}
+
+	public GrammarAST replaceRuleRefs(GrammarAST t, String name) {
+		if ( t==null ) return null;
+		for (GrammarAST rref : t.findAllType(RULE_REF)) {
+			if ( rref.getText().equals(ruleName) ) rref.setText(name);
+		}
+		return t;
+	}
+
+	public static boolean hasImmediateRecursiveRuleRefs(GrammarAST t, String ruleName) {
+		if ( t==null ) return false;
+		for (GrammarAST rref : t.findAllType(RULE_REF)) {
+			if ( rref.getText().equals(ruleName) ) return true;
+		}
+		return false;
+	}
+
+	public GrammarAST replaceLastRuleRef(GrammarAST t, String name) {
+		if ( t==null ) return null;
+		GrammarAST last = null;
+		for (GrammarAST rref : t.findAllType(RULE_REF)) { last = rref; }
+		if ( last !=null && last.getText().equals(ruleName) ) last.setText(name);
+		return t;
+	}
+
+	public void stripSynPred(GrammarAST altAST) {
+		GrammarAST t = (GrammarAST)altAST.getChild(0);
+		if ( t.getType()==ANTLRParser.BACKTRACK_SEMPRED ||
+			 t.getType()==ANTLRParser.SYNPRED ||
+			 t.getType()==ANTLRParser.SYN_SEMPRED )
+		{
+			altAST.deleteChild(0); // kill it
+		}
+	}
+
+	public void stripLeftRecursion(GrammarAST altAST) {
+		GrammarAST rref = (GrammarAST)altAST.getChild(0);
+		if ( rref.getType()== ANTLRParser.RULE_REF &&
+			 rref.getText().equals(ruleName))
+		{
+			// remove rule ref
+			altAST.deleteChild(0);
+			// reset index so it prints properly
+			GrammarAST newFirstChild = (GrammarAST) altAST.getChild(0);
+			altAST.setTokenStartIndex(newFirstChild.getTokenStartIndex());
+		}
+	}
+
+	public String text(GrammarAST t) {
+		if ( t==null ) return null;
+		try {
+			return new ANTLRTreePrinter(new CommonTreeNodeStream(t)).toString(grammar, true);
+		}
+		catch (Exception e) {
+			ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE, e);
+		}
+		return null;
+	}
+
+	public int precedence(int alt) {
+		return numAlts-alt+1;
+	}
+
+	public int nextPrecedence(int alt) {
+		int p = precedence(alt);
+		if ( altAssociativity.get(alt)==ASSOC.left ) p++;
+		return p;
+	}
+
+	public void fillRetValAssignments(ST ruleST, String srcName) {
+		if ( retvals==null ) return;
+
+		// complicated since we must be target-independent
+		for (String name : getNamesFromArgAction(retvals.token)) {
+			ST setRetValST =
+				generator.getTemplates().getInstanceOf("recRuleSetReturnAction");
+			ST ruleNameST = recRuleTemplates.getInstanceOf(srcName);
+			ruleNameST.add("ruleName", ruleName);
+			setRetValST.add("src", ruleNameST);
+			setRetValST.add("name", name);
+			ruleST.add("userRetvalAssignments",setRetValST);
+		}
+	}
+
+	public Collection<String> getNamesFromArgAction(Token t) {
+		AttributeScope returnScope = grammar.createReturnScope("",t);
+		returnScope.addAttributes(t.getText(), ',');
+		return returnScope.attributes.keySet();
+	}
+
+	@Override
+	public String toString() {
+		return "PrecRuleOperatorCollector{" +
+			   "binaryAlts=" + binaryAlts +
+			   ", rec=" + tokenToPrec +
+			   ", ternaryAlts=" + ternaryAlts +
+			   ", suffixAlts=" + suffixAlts +
+			   ", prefixAlts=" + prefixAlts +
+			   ", otherAlts=" + otherAlts +
+			   '}';
+	}
+}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/Message.java b/tool/src/main/java/org/antlr/tool/Message.java
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/tool/Message.java
rename to tool/src/main/java/org/antlr/tool/Message.java
diff --git a/tool/src/main/java/org/antlr/tool/NFAFactory.java b/tool/src/main/java/org/antlr/tool/NFAFactory.java
new file mode 100644
index 0000000..93db958
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/NFAFactory.java
@@ -0,0 +1,730 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.*;
+import org.antlr.misc.IntSet;
+import org.antlr.misc.IntervalSet;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+
+/** Routines to construct StateClusters from EBNF grammar constructs.
+ *  No optimization is done to remove unnecessary epsilon edges.
+ *
+ *  TODO: add an optimization that reduces number of states and transitions
+ *  will help with speed of conversion and make it easier to view NFA.  For
+ *  example, o-A-&gt;o--&gt;o-B-&gt;o should be o-A-&gt;o-B-&gt;o
+ */
+public class NFAFactory {
+	/** This factory is attached to a specifc NFA that it is building.
+     *  The NFA will be filled up with states and transitions.
+     */
+	NFA nfa = null;
+
+    public Rule getCurrentRule() {
+        return currentRule;
+    }
+
+    public void setCurrentRule(Rule currentRule) {
+        this.currentRule = currentRule;
+    }
+
+	Rule currentRule = null;
+
+	public NFAFactory(NFA nfa) {
+        nfa.setFactory(this);
+		this.nfa = nfa;
+	}
+
+    public NFAState newState() {
+        NFAState n = new NFAState(nfa);
+        int state = nfa.getNewNFAStateNumber();
+        n.stateNumber = state;
+        nfa.addState(n);
+		n.enclosingRule = currentRule;
+		return n;
+    }
+
+	/** Optimize an alternative (list of grammar elements).
+	 *
+	 *  Walk the chain of elements (which can be complicated loop blocks...)
+	 *  and throw away any epsilon transitions used to link up simple elements.
+	 *
+	 *  This only removes 195 states from the java.g's NFA, but every little
+	 *  bit helps.  Perhaps I can improve in the future.
+	 */
+	public void optimizeAlternative(StateCluster alt) {
+		NFAState s = alt.left;
+		while ( s!=alt.right ) {
+			// if it's a block element, jump over it and continue
+			if ( s.endOfBlockStateNumber!=State.INVALID_STATE_NUMBER ) {
+				s = nfa.getState(s.endOfBlockStateNumber);
+				continue;
+			}
+			Transition t = s.transition[0];
+			if ( t instanceof RuleClosureTransition ) {
+				s = ((RuleClosureTransition) t).followState;
+				continue;
+			}
+			if ( t.label.isEpsilon() && !t.label.isAction() && s.getNumberOfTransitions()==1 ) {
+				// bypass epsilon transition and point to what the epsilon's
+				// target points to unless that epsilon transition points to
+				// a block or loop etc..  Also don't collapse epsilons that
+				// point at the last node of the alt. Don't collapse action edges
+				NFAState epsilonTarget = (NFAState)t.target;
+				if ( epsilonTarget.endOfBlockStateNumber==State.INVALID_STATE_NUMBER &&
+					 epsilonTarget.transition[0] !=null )
+				{
+					s.setTransition0(epsilonTarget.transition[0]);
+					/*
+					System.out.println("### opt "+s.stateNumber+"->"+
+									   epsilonTarget.transition(0).target.stateNumber);
+					*/
+				}
+			}
+			s = (NFAState)t.target;
+		}
+	}
+
+	/** From label A build Graph o-A-&gt;o */
+	public StateCluster build_Atom(int label, GrammarAST associatedAST) {
+		NFAState left = newState();
+		NFAState right = newState();
+		left.associatedASTNode = associatedAST;
+		right.associatedASTNode = associatedAST;
+		transitionBetweenStates(left, right, label);
+		StateCluster g = new StateCluster(left, right);
+		return g;
+	}
+
+	public StateCluster build_Atom(GrammarAST atomAST) {
+		int tokenType = nfa.grammar.getTokenType(atomAST.getText());
+		return build_Atom(tokenType, atomAST);
+	}
+
+	/** From set build single edge graph o-&gt;o-set-&gt;o.  To conform to
+     *  what an alt block looks like, must have extra state on left.
+     */
+	public StateCluster build_Set(IntSet set, GrammarAST associatedAST) {
+        NFAState left = newState();
+        NFAState right = newState();
+		left.associatedASTNode = associatedAST;
+		right.associatedASTNode = associatedAST;
+		Label label = new Label(set);
+		Transition e = new Transition(label,right);
+        left.addTransition(e);
+		StateCluster g = new StateCluster(left, right);
+        return g;
+	}
+
+    /** Can only complement block of simple alts; can complement build_Set()
+     *  result, that is.  Get set and complement, replace old with complement.
+    public StateCluster build_AlternativeBlockComplement(StateCluster blk) {
+        State s0 = blk.left;
+        IntSet set = getCollapsedBlockAsSet(s0);
+        if ( set!=null ) {
+            // if set is available, then structure known and blk is a set
+            set = nfa.grammar.complement(set);
+            Label label = s0.transition(0).target.transition(0).label;
+            label.setSet(set);
+        }
+        return blk;
+    }
+	 */
+
+    public StateCluster build_Range(int a, int b) {
+        NFAState left = newState();
+        NFAState right = newState();
+		Label label = new Label(IntervalSet.of(a, b));
+		Transition e = new Transition(label,right);
+        left.addTransition(e);
+        StateCluster g = new StateCluster(left, right);
+        return g;
+    }
+
+	/** From char 'c' build StateCluster o-intValue(c)-&gt;o
+	 */
+	public StateCluster build_CharLiteralAtom(GrammarAST charLiteralAST) {
+        int c = Grammar.getCharValueFromGrammarCharLiteral(charLiteralAST.getText());
+		return build_Atom(c, charLiteralAST);
+	}
+
+	/** From char 'c' build StateCluster o-intValue(c)-&gt;o
+	 *  can include unicode spec likes '\u0024' later.  Accepts
+	 *  actual unicode 16-bit now, of course, by default.
+     *  TODO not supplemental char clean!
+	 */
+	public StateCluster build_CharRange(String a, String b) {
+		int from = Grammar.getCharValueFromGrammarCharLiteral(a);
+		int to = Grammar.getCharValueFromGrammarCharLiteral(b);
+		return build_Range(from, to);
+	}
+
+    /** For a non-lexer, just build a simple token reference atom.
+     *  For a lexer, a string is a sequence of char to match.  That is,
+     *  "fog" is treated as 'f' 'o' 'g' not as a single transition in
+     *  the DFA.  Machine== o-'f'-&gt;o-'o'-&gt;o-'g'-&gt;o and has n+1 states
+     *  for n characters.
+     */
+    public StateCluster build_StringLiteralAtom(GrammarAST stringLiteralAST) {
+        if ( nfa.grammar.type==Grammar.LEXER ) {
+			StringBuffer chars =
+				Grammar.getUnescapedStringFromGrammarStringLiteral(stringLiteralAST.getText());
+            NFAState first = newState();
+            NFAState last = null;
+            NFAState prev = first;
+            for (int i=0; i<chars.length(); i++) {
+                int c = chars.charAt(i);
+                NFAState next = newState();
+                transitionBetweenStates(prev, next, c);
+                prev = last = next;
+            }
+            return  new StateCluster(first, last);
+        }
+
+        // a simple token reference in non-Lexers
+        int tokenType = nfa.grammar.getTokenType(stringLiteralAST.getText());
+		return build_Atom(tokenType, stringLiteralAST);
+    }
+
+    /** For reference to rule r, build
+     *
+     *  o-e-&gt;(r)  o
+     *
+     *  where (r) is the start of rule r and the trailing o is not linked
+     *  to from rule ref state directly (it's done thru the transition(0)
+     *  RuleClosureTransition.
+     *
+     *  If the rule r is just a list of tokens, it's block will be just
+     *  a set on an edge o-&gt;o-&gt;o-set-&gt;o-&gt;o-&gt;o, could inline it rather than doing
+     *  the rule reference, but i'm not doing this yet as I'm not sure
+     *  it would help much in the NFA&rarr;DFA construction.
+     *
+     *  TODO add to codegen: collapse alt blks that are sets into single matchSet
+     */
+    public StateCluster build_RuleRef(Rule refDef, NFAState ruleStart) {
+        //System.out.println("building ref to rule "+nfa.grammar.name+"."+refDef.name);
+        NFAState left = newState();
+        // left.setDescription("ref to "+ruleStart.getDescription());
+        NFAState right = newState();
+        // right.setDescription("NFAState following ref to "+ruleStart.getDescription());
+        Transition e = new RuleClosureTransition(refDef,ruleStart,right);
+        left.addTransition(e);
+        StateCluster g = new StateCluster(left, right);
+        return g;
+    }
+
+    /** From an empty alternative build StateCluster o-e-&gt;o */
+    public StateCluster build_Epsilon() {
+        NFAState left = newState();
+        NFAState right = newState();
+        transitionBetweenStates(left, right, Label.EPSILON);
+        StateCluster g = new StateCluster(left, right);
+        return g;
+    }
+
+	/** Build what amounts to an epsilon transition with a semantic
+	 *  predicate action.  The pred is a pointer into the AST of
+	 *  the SEMPRED token.
+	 */
+	public StateCluster build_SemanticPredicate(GrammarAST pred) {
+		// don't count syn preds
+		if ( !pred.getText().toUpperCase()
+				.startsWith(Grammar.SYNPRED_RULE_PREFIX.toUpperCase()) )
+		{
+			nfa.grammar.numberOfSemanticPredicates++;
+		}
+		NFAState left = newState();
+		NFAState right = newState();
+		Transition e = new Transition(new PredicateLabel(pred), right);
+		left.addTransition(e);
+		StateCluster g = new StateCluster(left, right);
+		return g;
+	}
+
+	/** Build what amounts to an epsilon transition with an action.
+	 *  The action goes into NFA though it is ignored during analysis.
+	 *  It slows things down a bit, but I must ignore predicates after
+	 *  having seen an action (5-5-2008).
+	 */
+	public StateCluster build_Action(GrammarAST action) {
+		NFAState left = newState();
+		NFAState right = newState();
+		Transition e = new Transition(new ActionLabel(action), right);
+		left.addTransition(e);
+		return new StateCluster(left, right);
+	}
+
+	/** add an EOF transition to any rule end NFAState that points to nothing
+     *  (i.e., for all those rules not invoked by another rule).  These
+     *  are start symbols then.
+	 *
+	 *  Return the number of grammar entry points; i.e., how many rules are
+	 *  not invoked by another rule (they can only be invoked from outside).
+	 *  These are the start rules.
+     */
+    public int build_EOFStates(Collection<Rule> rules) {
+		int numberUnInvokedRules = 0;
+        for (Rule r : rules) {
+			NFAState endNFAState = r.stopState;
+            // Is this rule a start symbol?  (no follow links)
+			if ( endNFAState.transition[0] ==null ) {
+				// if so, then don't let algorithm fall off the end of
+				// the rule, make it hit EOF/EOT.
+				build_EOFState(endNFAState);
+				// track how many rules have been invoked by another rule
+				numberUnInvokedRules++;
+			}
+        }
+		return numberUnInvokedRules;
+    }
+
+    /** set up an NFA NFAState that will yield eof tokens or,
+     *  in the case of a lexer grammar, an EOT token when the conversion
+     *  hits the end of a rule.
+     */
+    private void build_EOFState(NFAState endNFAState) {
+		NFAState end = newState();
+        int label = Label.EOF;
+        if ( nfa.grammar.type==Grammar.LEXER ) {
+            label = Label.EOT;
+			end.setEOTTargetState(true);
+        }
+		/*
+		System.out.println("build "+nfa.grammar.getTokenDisplayName(label)+
+						   " loop on end of state "+endNFAState.getDescription()+
+						   " to state "+end.stateNumber);
+		*/
+		Transition toEnd = new Transition(label, end);
+		endNFAState.addTransition(toEnd);
+	}
+
+    /** From A B build A-e-&gt;B (that is, build an epsilon arc from right
+     *  of A to left of B).
+     *
+     *  As a convenience, return B if A is null or return A if B is null.
+     */
+    public StateCluster build_AB(StateCluster A, StateCluster B) {
+        if ( A==null ) {
+            return B;
+        }
+        if ( B==null ) {
+            return A;
+        }
+		transitionBetweenStates(A.right, B.left, Label.EPSILON);
+		StateCluster g = new StateCluster(A.left, B.right);
+        return g;
+    }
+
+	/** From a set ('a'|'b') build
+     *
+     *  o-&gt;o-'a'..'b'-&gt;o-&gt;o (last NFAState is blockEndNFAState pointed to by all alts)
+	 */
+	public StateCluster build_AlternativeBlockFromSet(StateCluster set) {
+		if ( set==null ) {
+			return null;
+		}
+
+		// single alt, no decision, just return only alt state cluster
+		NFAState startOfAlt = newState(); // must have this no matter what
+		transitionBetweenStates(startOfAlt, set.left, Label.EPSILON);
+
+		return new StateCluster(startOfAlt,set.right);
+	}
+
+	/** From A|B|..|Z alternative block build
+     *
+     *  o-&gt;o-A-&gt;o-&gt;o (last NFAState is blockEndNFAState pointed to by all alts)
+     *  |          ^
+     *  o-&gt;o-B-&gt;o--|
+     *  |          |
+     *  ...        |
+     *  |          |
+     *  o-&gt;o-Z-&gt;o--|
+     *
+     *  So every alternative gets begin NFAState connected by epsilon
+     *  and every alt right side points at a block end NFAState.  There is a
+     *  new NFAState in the NFAState in the StateCluster for each alt plus one for the
+     *  end NFAState.
+     *
+     *  Special case: only one alternative: don't make a block with alt
+     *  begin/end.
+     *
+     *  Special case: if just a list of tokens/chars/sets, then collapse
+     *  to a single edge'd o-set-&gt;o graph.
+     *
+     *  Set alt number (1..n) in the left-Transition NFAState.
+     */
+    public StateCluster build_AlternativeBlock(List<StateCluster> alternativeStateClusters)
+    {
+        StateCluster result;
+        if ( alternativeStateClusters==null || alternativeStateClusters.isEmpty() ) {
+            return null;
+        }
+
+		// single alt case
+		if ( alternativeStateClusters.size()==1 ) {
+			// single alt, no decision, just return only alt state cluster
+			StateCluster g = alternativeStateClusters.get(0);
+			NFAState startOfAlt = newState(); // must have this no matter what
+			transitionBetweenStates(startOfAlt, g.left, Label.EPSILON);
+
+			//System.out.println("### opt saved start/stop end in (...)");
+			return new StateCluster(startOfAlt,g.right);
+		}
+
+		// even if we can collapse for lookahead purposes, we will still
+        // need to predict the alts of this subrule in case there are actions
+        // etc...  This is the decision that is pointed to from the AST node
+        // (always)
+        NFAState prevAlternative = null; // tracks prev so we can link to next alt
+        NFAState firstAlt = null;
+        NFAState blockEndNFAState = newState();
+        blockEndNFAState.setDescription("end block");
+        int altNum = 1;
+        for (StateCluster g : alternativeStateClusters) {
+            // add begin NFAState for this alt connected by epsilon
+            NFAState left = newState();
+            left.setDescription("alt "+altNum+" of ()");
+			transitionBetweenStates(left, g.left, Label.EPSILON);
+			transitionBetweenStates(g.right, blockEndNFAState, Label.EPSILON);
+			// Are we the first alternative?
+			if ( firstAlt==null ) {
+				firstAlt = left; // track extreme left node of StateCluster
+			}
+			else {
+				// if not first alternative, must link to this alt from previous
+				transitionBetweenStates(prevAlternative, left, Label.EPSILON);
+			}
+			prevAlternative = left;
+			altNum++;
+		}
+
+		// return StateCluster pointing representing entire block
+		// Points to first alt NFAState on left, block end on right
+		result = new StateCluster(firstAlt, blockEndNFAState);
+
+		firstAlt.decisionStateType = NFAState.BLOCK_START;
+
+		// set EOB markers for Jean
+		firstAlt.endOfBlockStateNumber = blockEndNFAState.stateNumber;
+
+		return result;
+    }
+
+    /** From (A)? build either:
+     *
+	 *  o--A-&gt;o
+	 *  |     ^
+	 *  o----&gt;|
+     *
+     *  or, if A is a block, just add an empty alt to the end of the block
+     */
+    public StateCluster build_Aoptional(StateCluster A) {
+        StateCluster g;
+        int n = nfa.grammar.getNumberOfAltsForDecisionNFA(A.left);
+        if ( n==1 ) {
+            // no decision, just wrap in an optional path
+			//NFAState decisionState = newState();
+			NFAState decisionState = A.left; // resuse left edge
+			decisionState.setDescription("only alt of ()? block");
+			NFAState emptyAlt = newState();
+            emptyAlt.setDescription("epsilon path of ()? block");
+            NFAState blockEndNFAState;
+			blockEndNFAState = newState();
+			transitionBetweenStates(A.right, blockEndNFAState, Label.EPSILON);
+			blockEndNFAState.setDescription("end ()? block");
+            //transitionBetweenStates(decisionState, A.left, Label.EPSILON);
+            transitionBetweenStates(decisionState, emptyAlt, Label.EPSILON);
+            transitionBetweenStates(emptyAlt, blockEndNFAState, Label.EPSILON);
+
+			// set EOB markers for Jean
+			decisionState.endOfBlockStateNumber = blockEndNFAState.stateNumber;
+			blockEndNFAState.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
+
+            g = new StateCluster(decisionState, blockEndNFAState);
+        }
+        else {
+            // a decision block, add an empty alt
+            NFAState lastRealAlt =
+                    nfa.grammar.getNFAStateForAltOfDecision(A.left, n);
+            NFAState emptyAlt = newState();
+            emptyAlt.setDescription("epsilon path of ()? block");
+            transitionBetweenStates(lastRealAlt, emptyAlt, Label.EPSILON);
+            transitionBetweenStates(emptyAlt, A.right, Label.EPSILON);
+
+			// set EOB markers for Jean (I think this is redundant here)
+			A.left.endOfBlockStateNumber = A.right.stateNumber;
+			A.right.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
+
+            g = A; // return same block, but now with optional last path
+        }
+		g.left.decisionStateType = NFAState.OPTIONAL_BLOCK_START;
+
+        return g;
+    }
+
+    /** From (A)+ build
+	 *
+     *     |---|    (Transition 2 from A.right points at alt 1)
+	 *     v   |    (follow of loop is Transition 1)
+     *  o-&gt;o-A-o-&gt;o
+     *
+     *  Meaning that the last NFAState in A points back to A's left Transition NFAState
+     *  and we add a new begin/end NFAState.  A can be single alternative or
+     *  multiple.
+	 *
+	 *  During analysis we'll call the follow link (transition 1) alt n+1 for
+	 *  an n-alt A block.
+     */
+    public StateCluster build_Aplus(StateCluster A) {
+        NFAState left = newState();
+        NFAState blockEndNFAState = newState();
+		blockEndNFAState.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
+
+		// don't reuse A.right as loopback if it's right edge of another block
+		if ( A.right.decisionStateType == NFAState.RIGHT_EDGE_OF_BLOCK ) {
+			// nested A* so make another tail node to be the loop back
+			// instead of the usual A.right which is the EOB for inner loop
+			NFAState extraRightEdge = newState();
+			transitionBetweenStates(A.right, extraRightEdge, Label.EPSILON);
+			A.right = extraRightEdge;
+		}
+
+        transitionBetweenStates(A.right, blockEndNFAState, Label.EPSILON); // follow is Transition 1
+		// turn A's block end into a loopback (acts like alt 2)
+		transitionBetweenStates(A.right, A.left, Label.EPSILON); // loop back Transition 2
+		transitionBetweenStates(left, A.left, Label.EPSILON);
+		
+		A.right.decisionStateType = NFAState.LOOPBACK;
+		A.left.decisionStateType = NFAState.BLOCK_START;
+
+		// set EOB markers for Jean
+		A.left.endOfBlockStateNumber = A.right.stateNumber;
+
+        StateCluster g = new StateCluster(left, blockEndNFAState);
+        return g;
+    }
+
+    /** From (A)* build
+     *
+	 *     |---|
+	 *     v   |
+	 *  o-&gt;o-A-o--o (Transition 2 from block end points at alt 1; follow is Transition 1)
+     *  |         ^
+     *  o---------| (optional branch is 2nd alt of optional block containing A+)
+     *
+     *  Meaning that the last (end) NFAState in A points back to A's
+     *  left side NFAState and we add 3 new NFAStates (the
+     *  optional branch is built just like an optional subrule).
+     *  See the Aplus() method for more on the loop back Transition.
+	 *  The new node on right edge is set to RIGHT_EDGE_OF_CLOSURE so we
+	 *  can detect nested (A*)* loops and insert an extra node.  Previously,
+	 *  two blocks shared same EOB node.
+     *
+     *  There are 2 or 3 decision points in a A*.  If A is not a block (i.e.,
+     *  it only has one alt), then there are two decisions: the optional bypass
+     *  and then loopback.  If A is a block of alts, then there are three
+     *  decisions: bypass, loopback, and A's decision point.
+     *
+     *  Note that the optional bypass must be outside the loop as (A|B)* is
+     *  not the same thing as (A|B|)+.
+     *
+     *  This is an accurate NFA representation of the meaning of (A)*, but
+     *  for generating code, I don't need a DFA for the optional branch by
+     *  virtue of how I generate code.  The exit-loopback-branch decision
+     *  is sufficient to let me make an appropriate enter, exit, loop
+     *  determination.  See codegen.g
+     */
+    public StateCluster build_Astar(StateCluster A) {
+		NFAState bypassDecisionState = newState();
+		bypassDecisionState.setDescription("enter loop path of ()* block");
+        NFAState optionalAlt = newState();
+        optionalAlt.setDescription("epsilon path of ()* block");
+        NFAState blockEndNFAState = newState();
+		blockEndNFAState.decisionStateType = NFAState.RIGHT_EDGE_OF_BLOCK;
+
+		// don't reuse A.right as loopback if it's right edge of another block
+		if ( A.right.decisionStateType == NFAState.RIGHT_EDGE_OF_BLOCK ) {
+			// nested A* so make another tail node to be the loop back
+			// instead of the usual A.right which is the EOB for inner loop
+			NFAState extraRightEdge = newState();
+			transitionBetweenStates(A.right, extraRightEdge, Label.EPSILON);
+			A.right = extraRightEdge;
+		}
+
+		// convert A's end block to loopback
+		A.right.setDescription("()* loopback");
+		// Transition 1 to actual block of stuff
+        transitionBetweenStates(bypassDecisionState, A.left, Label.EPSILON);
+        // Transition 2 optional to bypass
+        transitionBetweenStates(bypassDecisionState, optionalAlt, Label.EPSILON);
+		transitionBetweenStates(optionalAlt, blockEndNFAState, Label.EPSILON);
+        // Transition 1 of end block exits
+        transitionBetweenStates(A.right, blockEndNFAState, Label.EPSILON);
+        // Transition 2 of end block loops
+        transitionBetweenStates(A.right, A.left, Label.EPSILON);
+
+		bypassDecisionState.decisionStateType = NFAState.BYPASS;
+		A.left.decisionStateType = NFAState.BLOCK_START;
+		A.right.decisionStateType = NFAState.LOOPBACK;
+
+		// set EOB markers for Jean
+		A.left.endOfBlockStateNumber = A.right.stateNumber;
+		bypassDecisionState.endOfBlockStateNumber = blockEndNFAState.stateNumber;
+
+        StateCluster g = new StateCluster(bypassDecisionState, blockEndNFAState);
+        return g;
+    }
+
+    /** Build an NFA predictor for special rule called Tokens manually that
+     *  predicts which token will succeed.  The refs to the rules are not
+     *  RuleRefTransitions as I want DFA conversion to stop at the EOT
+     *  transition on the end of each token, rather than return to Tokens rule.
+     *  If I used normal build_alternativeBlock for this, the RuleRefTransitions
+     *  would save return address when jumping away from Tokens rule.
+     *
+     *  All I do here is build n new states for n rules with an epsilon
+     *  edge to the rule start states and then to the next state in the
+     *  list:
+     *
+     *   o->(A)  (a state links to start of A and to next in list)
+     *   |
+     *   o->(B)
+     *   |
+     *   ...
+     *   |
+     *   o->(Z)
+	 *
+	 *  This is the NFA created for the artificial rule created in
+	 *  Grammar.addArtificialMatchTokensRule().
+	 *
+	 *  11/28/2005: removed so we can use normal rule construction for Tokens.
+    public NFAState build_ArtificialMatchTokensRuleNFA() {
+        int altNum = 1;
+        NFAState firstAlt = null; // the start state for the "rule"
+        NFAState prevAlternative = null;
+        Iterator iter = nfa.grammar.getRules().iterator();
+		// TODO: add a single decision node/state for good description
+        while (iter.hasNext()) {
+			Rule r = (Rule) iter.next();
+            String ruleName = r.name;
+			String modifier = nfa.grammar.getRuleModifier(ruleName);
+            if ( ruleName.equals(Grammar.ARTIFICIAL_TOKENS_RULENAME) ||
+				 (modifier!=null &&
+				  modifier.equals(Grammar.FRAGMENT_RULE_MODIFIER)) )
+			{
+                continue; // don't loop to yourself or do nontoken rules
+            }
+            NFAState ruleStartState = nfa.grammar.getRuleStartState(ruleName);
+            NFAState left = newState();
+            left.setDescription("alt "+altNum+" of artificial rule "+Grammar.ARTIFICIAL_TOKENS_RULENAME);
+            transitionBetweenStates(left, ruleStartState, Label.EPSILON);
+            // Are we the first alternative?
+            if ( firstAlt==null ) {
+                firstAlt = left; // track extreme top left node as rule start
+            }
+            else {
+                // if not first alternative, must link to this alt from previous
+                transitionBetweenStates(prevAlternative, left, Label.EPSILON);
+            }
+            prevAlternative = left;
+            altNum++;
+        }
+		firstAlt.decisionStateType = NFAState.BLOCK_START;
+
+        return firstAlt;
+    }
+	 */
+
+    /** Build an atom with all possible values in its label */
+    public StateCluster build_Wildcard(GrammarAST associatedAST) {
+        NFAState left = newState();
+        NFAState right = newState();
+        left.associatedASTNode = associatedAST;
+        right.associatedASTNode = associatedAST;
+        Label label = new Label(nfa.grammar.getTokenTypes()); // char or tokens
+        Transition e = new Transition(label,right);
+        left.addTransition(e);
+        StateCluster g = new StateCluster(left, right);
+        return g;
+    }
+
+    /** Build a subrule matching ^(. .*) (any tree or node). Let's use
+     *  (^(. .+) | .) to be safe.
+     */
+    public StateCluster build_WildcardTree(GrammarAST associatedAST) {
+        StateCluster wildRoot = build_Wildcard(associatedAST);
+
+        StateCluster down = build_Atom(Label.DOWN, associatedAST);
+        wildRoot = build_AB(wildRoot,down); // hook in; . DOWN
+
+        // make .+
+        StateCluster wildChildren = build_Wildcard(associatedAST);
+        wildChildren = build_Aplus(wildChildren);
+        wildRoot = build_AB(wildRoot,wildChildren); // hook in; . DOWN .+
+
+        StateCluster up = build_Atom(Label.UP, associatedAST);
+        wildRoot = build_AB(wildRoot,up); // hook in; . DOWN .+ UP
+
+        // make optional . alt
+        StateCluster optionalNodeAlt = build_Wildcard(associatedAST);
+
+        List<StateCluster> alts = new ArrayList<StateCluster>();
+        alts.add(wildRoot);
+        alts.add(optionalNodeAlt);
+        StateCluster blk = build_AlternativeBlock(alts);
+
+        return blk;
+    }
+
+    /** Given a collapsed block of alts (a set of atoms), pull out
+     *  the set and return it.
+     */
+    protected IntSet getCollapsedBlockAsSet(State blk) {
+        State s0 = blk;
+        if ( s0!=null && s0.transition(0)!=null ) {
+            State s1 = s0.transition(0).target;
+            if ( s1!=null && s1.transition(0)!=null ) {
+                Label label = s1.transition(0).label;
+                if ( label.isSet() ) {
+                    return label.getSet();
+                }
+            }
+        }
+        return null;
+    }
+
+	private void transitionBetweenStates(NFAState a, NFAState b, int label) {
+		Transition e = new Transition(label,b);
+		a.addTransition(e);
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/NameSpaceChecker.java b/tool/src/main/java/org/antlr/tool/NameSpaceChecker.java
new file mode 100644
index 0000000..8a926b5
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/NameSpaceChecker.java
@@ -0,0 +1,262 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.Label;
+import org.antlr.runtime.Token;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+public class NameSpaceChecker {
+	protected Grammar grammar;
+
+	public NameSpaceChecker(Grammar grammar) {
+		this.grammar = grammar;
+	}
+
+	public void checkConflicts() {
+		for (int i = CompositeGrammar.MIN_RULE_INDEX; i < grammar.composite.ruleIndexToRuleList.size(); i++) {
+			Rule r = grammar.composite.ruleIndexToRuleList.elementAt(i);
+			if ( r==null ) {
+				continue;
+			}
+			// walk all labels for Rule r
+			if ( r.labelNameSpace!=null ) {
+				for (Grammar.LabelElementPair pair : r.labelNameSpace.values()) {
+					checkForLabelConflict(r, pair.label);
+				}
+			}
+			// walk rule scope attributes for Rule r
+			if ( r.ruleScope!=null ) {
+				List<Attribute> attributes = r.ruleScope.getAttributes();
+				for (int j = 0; j < attributes.size(); j++) {
+					Attribute attribute = attributes.get(j);
+					checkForRuleScopeAttributeConflict(r, attribute);
+				}
+			}
+			checkForRuleDefinitionProblems(r);
+			checkForRuleArgumentAndReturnValueConflicts(r);
+		}
+		// check all global scopes against tokens
+		for (AttributeScope scope : grammar.getGlobalScopes().values()) {
+			checkForGlobalScopeTokenConflict(scope);
+		}
+		// check for missing rule, tokens
+		lookForReferencesToUndefinedSymbols();
+	}
+
+	protected void checkForRuleArgumentAndReturnValueConflicts(Rule r) {
+		if ( r.returnScope!=null ) {
+			Set<String> conflictingKeys = r.returnScope.intersection(r.parameterScope);
+			if (conflictingKeys!=null) {
+				for (String key : conflictingKeys) {
+					ErrorManager.grammarError(
+						ErrorManager.MSG_ARG_RETVAL_CONFLICT,
+						grammar,
+						r.tree.getToken(),
+						key,
+						r.name);
+				}
+			}
+		}
+	}
+
+	protected void checkForRuleDefinitionProblems(Rule r) {
+		String ruleName = r.name;
+		Token ruleToken = r.tree.getToken();
+		int msgID = 0;
+		if ( (grammar.type==Grammar.PARSER||grammar.type==Grammar.TREE_PARSER) &&
+			 Character.isUpperCase(ruleName.charAt(0)) )
+		{
+			msgID = ErrorManager.MSG_LEXER_RULES_NOT_ALLOWED;
+        }
+        else if ( grammar.type==Grammar.LEXER &&
+			      Character.isLowerCase(ruleName.charAt(0)) &&
+			      !r.isSynPred )
+		{
+			msgID = ErrorManager.MSG_PARSER_RULES_NOT_ALLOWED;
+        }
+		else if ( grammar.getGlobalScope(ruleName)!=null ) {
+			msgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		}
+		if ( msgID!=0 ) {
+			ErrorManager.grammarError(msgID, grammar, ruleToken, ruleName);
+		}
+	}
+
+	/** If ref to undefined rule, give error at first occurrence.
+	 * 
+	 *  Give error if you cannot find the scope override on a rule reference.
+	 *
+	 *  If you ref ID in a combined grammar and don't define ID as a lexer rule
+	 *  it is an error.
+	 */
+	protected void lookForReferencesToUndefinedSymbols() {
+		// for each rule ref, ask if there is a rule definition
+		for (GrammarAST refAST : grammar.ruleRefs) {
+			Token tok = refAST.token;
+			String ruleName = tok.getText();
+			Rule localRule = grammar.getLocallyDefinedRule(ruleName);
+			Rule rule = grammar.getRule(ruleName);
+			if ( localRule==null && rule!=null ) { // imported rule?
+				grammar.delegatedRuleReferences.add(rule);
+				rule.imported = true;
+			}
+			if ( rule==null && grammar.getTokenType(ruleName)!=Label.EOF ) {
+				ErrorManager.grammarError(ErrorManager.MSG_UNDEFINED_RULE_REF,
+										  grammar,
+										  tok,
+										  ruleName);
+			}
+        }
+		if ( grammar.type==Grammar.COMBINED ) {
+			// if we're a combined grammar, we know which token IDs have no
+			// associated lexer rule.
+			for (Token tok : grammar.tokenIDRefs) {
+				String tokenID = tok.getText();
+				if ( !grammar.composite.lexerRules.contains(tokenID) &&
+					 grammar.getTokenType(tokenID)!=Label.EOF )
+				{
+					ErrorManager.grammarWarning(ErrorManager.MSG_NO_TOKEN_DEFINITION,
+												grammar,
+												tok,
+												tokenID);
+				}
+			}
+		}
+		// check scopes and scoped rule refs
+		for (GrammarAST scopeAST : grammar.scopedRuleRefs) { // ^(DOT ID atom)
+			Grammar scopeG = grammar.composite.getGrammar(scopeAST.getText());
+			GrammarAST refAST = (GrammarAST)scopeAST.getChild(1);
+			String ruleName = refAST.getText();
+			if ( scopeG==null ) {
+				ErrorManager.grammarError(ErrorManager.MSG_NO_SUCH_GRAMMAR_SCOPE,
+										  grammar,
+										  scopeAST.getToken(),
+										  scopeAST.getText(),
+										  ruleName);
+			}
+			else {
+				Rule rule = grammar.getRule(scopeG.name, ruleName);
+				if ( rule==null ) {
+					ErrorManager.grammarError(ErrorManager.MSG_NO_SUCH_RULE_IN_SCOPE,
+											  grammar,
+											  scopeAST.getToken(),
+											  scopeAST.getText(),
+											  ruleName);
+				}
+			}
+		}
+	}
+
+	protected void checkForGlobalScopeTokenConflict(AttributeScope scope) {
+		if ( grammar.getTokenType(scope.getName())!=Label.INVALID ) {
+			ErrorManager.grammarError(ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE,
+									  grammar, null, scope.getName());
+		}
+	}
+
+	/** Check for collision of a rule-scope dynamic attribute with:
+	 *  arg, return value, rule name itself.  Labels are checked elsewhere.
+	 */
+	public void checkForRuleScopeAttributeConflict(Rule r, Attribute attribute) {
+		int msgID = 0;
+		Object arg2 = null;
+		String attrName = attribute.name;
+		if ( r.name.equals(attrName) ) {
+			msgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE;
+			arg2 = r.name;
+		}
+		else if ( (r.returnScope!=null&&r.returnScope.getAttribute(attrName)!=null) ||
+				  (r.parameterScope!=null&&r.parameterScope.getAttribute(attrName)!=null) )
+		{
+			msgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
+			arg2 = r.name;
+		}
+		if ( msgID!=0 ) {
+			ErrorManager.grammarError(msgID,grammar,r.tree.getToken(),attrName,arg2);
+		}
+	}
+
+	/** Make sure a label doesn't conflict with another symbol.
+	 *  Labels must not conflict with: rules, tokens, scope names,
+	 *  return values, parameters, and rule-scope dynamic attributes
+	 *  defined in surrounding rule.
+	 */
+	protected void checkForLabelConflict(Rule r, Token label) {
+		int msgID = 0;
+		Object arg2 = null;
+		if ( grammar.getGlobalScope(label.getText())!=null ) {
+			msgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		}
+		else if ( grammar.getRule(label.getText())!=null ) {
+			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE;
+		}
+		else if ( grammar.getTokenType(label.getText())!=Label.INVALID ) {
+			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_TOKEN;
+		}
+		else if ( r.ruleScope!=null && r.ruleScope.getAttribute(label.getText())!=null ) {
+			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE;
+			arg2 = r.name;
+		}
+		else if ( (r.returnScope!=null&&r.returnScope.getAttribute(label.getText())!=null) ||
+				  (r.parameterScope!=null&&r.parameterScope.getAttribute(label.getText())!=null) )
+		{
+			msgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
+			arg2 = r.name;
+		}
+		if ( msgID!=0 ) {
+			ErrorManager.grammarError(msgID,grammar,label,label.getText(),arg2);
+		}
+	}
+
+	/** If type of previous label differs from new label's type, that's an error.
+	 */
+	public boolean checkForLabelTypeMismatch(Rule r, Token label, int type) {
+		Grammar.LabelElementPair prevLabelPair =
+			r.labelNameSpace.get(label.getText());
+		if ( prevLabelPair!=null ) {
+			// label already defined; if same type, no problem
+			if ( prevLabelPair.type != type ) {
+				String typeMismatchExpr =
+					Grammar.LabelTypeToString[type]+"!="+
+					Grammar.LabelTypeToString[prevLabelPair.type];
+				ErrorManager.grammarError(
+					ErrorManager.MSG_LABEL_TYPE_CONFLICT,
+					grammar,
+					label,
+					label.getText(),
+					typeMismatchExpr);
+				return true;
+			}
+		}
+		return false;
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/NonRegularDecisionMessage.java b/tool/src/main/java/org/antlr/tool/NonRegularDecisionMessage.java
new file mode 100644
index 0000000..2529a06
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/NonRegularDecisionMessage.java
@@ -0,0 +1,70 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.DecisionProbe;
+import org.stringtemplate.v4.ST;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+/** More a single alternative recurses so this decision is not regular. */
+public class NonRegularDecisionMessage extends Message {
+	public DecisionProbe probe;
+	public Set<Integer> altsWithRecursion;
+
+	public NonRegularDecisionMessage(DecisionProbe probe, Set<Integer> altsWithRecursion) {
+		super(ErrorManager.MSG_NONREGULAR_DECISION);
+		this.probe = probe;
+		this.altsWithRecursion = altsWithRecursion;
+	}
+
+	@Override
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getCharPositionInLine();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+
+		ST st = getMessageTemplate();
+		String ruleName = probe.dfa.getNFADecisionStartState().enclosingRule.name;
+		st.add("ruleName", ruleName);
+		List<Integer> sortedAlts = new ArrayList<Integer>();
+		sortedAlts.addAll(altsWithRecursion);
+		Collections.sort(sortedAlts); // make sure it's 1, 2, ...
+		st.add("alts", sortedAlts);
+
+		return super.toString(st);
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/RandomPhrase.java b/tool/src/main/java/org/antlr/tool/RandomPhrase.java
new file mode 100644
index 0000000..bf154f3
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/RandomPhrase.java
@@ -0,0 +1,230 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.Tool;
+import org.antlr.analysis.Label;
+import org.antlr.analysis.NFAState;
+import org.antlr.analysis.RuleClosureTransition;
+import org.antlr.analysis.Transition;
+import org.antlr.misc.IntervalSet;
+import org.antlr.misc.Utils;
+
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Random;
+import java.util.Stack;
+
+/** Generate a random phrase given a grammar.
+ *  Usage:
+ *     java org.antlr.tool.RandomPhrase grammarFile.g startRule [seed]
+ *
+ *  For example:
+ *     java org.antlr.tool.RandomPhrase simple.g program 342
+ *
+ *  The seed acts like a unique identifier so you can get the same random
+ *  phrase back during unit testing, for example.
+ *
+ *  If you do not specify a seed then the current time in milliseconds is used
+ *  guaranteeing that you'll never see that seed again.
+ *
+ *  NOTE: this does not work well for large grammars...it tends to recurse
+ *  too much and build really long strings.  I need throttle control; later.
+ */
+public class RandomPhrase {
+	public static final boolean debug = false;
+
+	protected static Random random;
+
+	/** an experimental method to generate random phrases for a given
+	 *  grammar given a start rule.  Return a list of token types.
+	 */
+	protected static void randomPhrase(Grammar g, List<Integer> tokenTypes, String startRule) {
+		NFAState state = g.getRuleStartState(startRule);
+		NFAState stopState = g.getRuleStopState(startRule);
+
+		Stack<NFAState> ruleInvocationStack = new Stack<NFAState>();
+		while ( true ) {
+			if ( state==stopState && ruleInvocationStack.isEmpty() ) {
+				break;
+			}
+			if ( debug ) System.out.println("state "+state);
+			if ( state.getNumberOfTransitions()==0 ) {
+				if ( debug ) System.out.println("dangling state: "+state);
+				return;
+			}
+			// end of rule node
+			if ( state.isAcceptState() ) {
+				NFAState invokingState = ruleInvocationStack.pop();
+				if ( debug ) System.out.println("pop invoking state "+invokingState);
+				//System.out.println("leave "+state.enclosingRule.name);
+				RuleClosureTransition invokingTransition =
+					(RuleClosureTransition)invokingState.transition[0];
+				// move to node after state that invoked this rule
+				state = invokingTransition.followState;
+				continue;
+			}
+			if ( state.getNumberOfTransitions()==1 ) {
+				// no branching, just take this path
+				Transition t0 = state.transition[0];
+				if ( t0 instanceof RuleClosureTransition ) {
+					ruleInvocationStack.push(state);
+					if ( debug ) System.out.println("push state "+state);
+					//System.out.println("call "+((RuleClosureTransition)t0).rule.name);
+					//System.out.println("stack depth="+ruleInvocationStack.size());
+				}
+				else if ( t0.label.isSet() || t0.label.isAtom() ) {
+					tokenTypes.add( getTokenType(t0.label) );
+				}
+				state = (NFAState)t0.target;
+				continue;
+			}
+
+			int decisionNumber = state.getDecisionNumber();
+			if ( decisionNumber==0 ) {
+				System.out.println("weird: no decision number but a choice node");
+				continue;
+			}
+			// decision point, pick ith alternative randomly
+			int n = g.getNumberOfAltsForDecisionNFA(state);
+			int randomAlt = random.nextInt(n) + 1;
+			if ( debug ) System.out.println("randomAlt="+randomAlt);
+			NFAState altStartState =
+				g.getNFAStateForAltOfDecision(state, randomAlt);
+			Transition t = altStartState.transition[0];
+			state = (NFAState)t.target;
+		}
+	}
+
+	protected static Integer getTokenType(Label label) {
+		if ( label.isSet() ) {
+			// pick random element of set
+			IntervalSet typeSet = (IntervalSet)label.getSet();
+			int randomIndex = random.nextInt(typeSet.size());
+			return typeSet.get(randomIndex);
+		}
+		else {
+			return Utils.integer(label.getAtom());
+		}
+		//System.out.println(t0.label.toString(g));
+	}
+
+	/** Used to generate random strings */
+	public static void main(String[] args) {
+		if ( args.length < 2 ) {
+			System.err.println("usage: java org.antlr.tool.RandomPhrase grammarfile startrule");
+			return;
+		}
+		String grammarFileName = args[0];
+		String startRule = args[1];
+		long seed = System.currentTimeMillis(); // use random seed unless spec.
+		if ( args.length==3 ) {
+			String seedStr = args[2];
+			seed = Long.parseLong(seedStr);
+		}
+		try {
+			random = new Random(seed);
+
+			CompositeGrammar composite = new CompositeGrammar();
+			Tool tool = new Tool();
+			Grammar parser = new Grammar(tool, grammarFileName, composite);
+			composite.setDelegationRoot(parser);
+
+			FileReader fr = new FileReader(grammarFileName);
+			BufferedReader br = new BufferedReader(fr);
+			parser.parseAndBuildAST(br);
+			br.close();
+
+			parser.composite.assignTokenTypes();
+			parser.composite.defineGrammarSymbols();
+			parser.composite.createNFAs();
+
+			List<? extends Collection<? extends Rule>> leftRecursiveRules = parser.checkAllRulesForLeftRecursion();
+			if ( leftRecursiveRules.size()>0 ) {
+				return;
+			}
+
+			if ( parser.getRule(startRule)==null ) {
+				System.out.println("undefined start rule "+startRule);
+				return;
+			}
+
+			String lexerGrammarText = parser.getLexerGrammar();
+			Grammar lexer = new Grammar(tool);
+			lexer.importTokenVocabulary(parser);
+			lexer.fileName = grammarFileName;
+			if ( lexerGrammarText!=null ) {
+				lexer.setGrammarContent(lexerGrammarText);
+			}
+			else {
+				System.err.println("no lexer grammar found in "+grammarFileName);
+			}
+			lexer.buildNFA();
+			leftRecursiveRules = lexer.checkAllRulesForLeftRecursion();
+			if ( leftRecursiveRules.size()>0 ) {
+				return;
+			}
+			//System.out.println("lexer:\n"+lexer);
+
+			List<Integer> tokenTypes = new ArrayList<Integer>(100);
+			randomPhrase(parser, tokenTypes, startRule);
+			System.out.println("token types="+tokenTypes);
+			for (int i = 0; i < tokenTypes.size(); i++) {
+				Integer ttypeI = tokenTypes.get(i);
+				int ttype = ttypeI;
+				String ttypeDisplayName = parser.getTokenDisplayName(ttype);
+				if ( Character.isUpperCase(ttypeDisplayName.charAt(0)) ) {
+					List<Integer> charsInToken = new ArrayList<Integer>(10);
+					randomPhrase(lexer, charsInToken, ttypeDisplayName);
+					System.out.print(" ");
+					for (int j = 0; j < charsInToken.size(); j++) {
+						Integer cI = charsInToken.get(j);
+						System.out.print((char)cI.intValue());
+					}
+				}
+				else { // it's a literal
+					String literal =
+						ttypeDisplayName.substring(1,ttypeDisplayName.length()-1);
+					System.out.print(" "+literal);
+				}
+			}
+			System.out.println();
+		}
+		catch (Error er) {
+			System.err.println("Error walking "+grammarFileName+" rule "+startRule+" seed "+seed);
+			er.printStackTrace(System.err);
+		}
+		catch (Exception e) {
+			System.err.println("Exception walking "+grammarFileName+" rule "+startRule+" seed "+seed);
+			e.printStackTrace(System.err);
+		}
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/RecursionOverflowMessage.java b/tool/src/main/java/org/antlr/tool/RecursionOverflowMessage.java
new file mode 100644
index 0000000..fdb3557
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/RecursionOverflowMessage.java
@@ -0,0 +1,86 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.DFAState;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.Label;
+import org.antlr.analysis.NFAState;
+import org.stringtemplate.v4.ST;
+
+import java.util.Collection;
+import java.util.List;
+
+/** Indicates recursion overflow.  A DFA state tried add an NFA configuration
+ *  with NFA state p that was mentioned in its stack context too many times.
+ */
+public class RecursionOverflowMessage extends Message {
+	public DecisionProbe probe;
+	public DFAState sampleBadState;
+	public int alt;
+	public Collection<String> targetRules;
+	public Collection<? extends Collection<? extends NFAState>> callSiteStates;
+
+	public RecursionOverflowMessage(DecisionProbe probe,
+									DFAState sampleBadState,
+									int alt,
+									Collection<String> targetRules,
+									Collection<? extends Collection<? extends NFAState>> callSiteStates)
+	{
+		super(ErrorManager.MSG_RECURSION_OVERLOW);
+		this.probe = probe;
+		this.sampleBadState = sampleBadState;
+		this.alt = alt;
+		this.targetRules = targetRules;
+		this.callSiteStates = callSiteStates;
+	}
+
+	@Override
+	public String toString() {
+		GrammarAST decisionASTNode = probe.dfa.getDecisionASTNode();
+		line = decisionASTNode.getLine();
+		column = decisionASTNode.getCharPositionInLine();
+		String fileName = probe.dfa.nfa.grammar.getFileName();
+		if ( fileName!=null ) {
+			file = fileName;
+		}
+
+		ST st = getMessageTemplate();
+		st.add("targetRules", targetRules);
+		st.add("alt", alt);
+		st.add("callSiteStates", callSiteStates);
+
+		List<Label> labels =
+			probe.getSampleNonDeterministicInputSequence(sampleBadState);
+		String input = probe.getInputSequenceDisplay(labels);
+		st.add("input", input);
+
+		return super.toString(st);
+	}
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/Rule.java b/tool/src/main/java/org/antlr/tool/Rule.java
new file mode 100644
index 0000000..915c215
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/Rule.java
@@ -0,0 +1,585 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.analysis.NFAState;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+
+import java.util.*;
+
+/** Combine the info associated with a rule. */
+public class Rule {
+	public static final boolean supportsLabelOptimization;
+	static {
+		supportsLabelOptimization = false;
+	}
+
+	public String name;
+	public int index;
+	public String modifier;
+	public NFAState startState;
+	public NFAState stopState;
+
+	/** This rule's options */
+	protected Map<String, Object> options;
+
+	public static final Set<String> legalOptions =
+			new HashSet<String>() {
+                {
+                    add("k"); add("greedy"); add("memoize");
+                    add("backtrack");
+                }
+            };
+
+	/** The AST representing the whole rule */
+	public GrammarAST tree;
+
+	/** To which grammar does this belong? */
+	public Grammar grammar;
+
+	/** For convenience, track the argument def AST action node if any */
+	public GrammarAST argActionAST;
+
+	public GrammarAST EORNode;
+
+	/** The return values of a rule and predefined rule attributes */
+	public AttributeScope returnScope;
+
+	public AttributeScope parameterScope;
+
+	/** the attributes defined with "scope {...}" inside a rule */
+	public AttributeScope ruleScope;
+
+	/** A list of scope names (String) used by this rule */
+	public List<String> useScopes;
+
+    /** Exceptions that this rule can throw */
+    public Set<String> throwsSpec;
+
+    /** A list of all LabelElementPair attached to tokens like id=ID */
+    public LinkedHashMap<String, Grammar.LabelElementPair> tokenLabels;
+
+    /** A list of all LabelElementPair attached to tokens like x=. in tree grammar */
+    public LinkedHashMap<String, Grammar.LabelElementPair> wildcardTreeLabels;
+
+    /** A list of all LabelElementPair attached to tokens like x+=. in tree grammar */
+    public LinkedHashMap<String, Grammar.LabelElementPair> wildcardTreeListLabels;
+
+	/** A list of all LabelElementPair attached to single char literals like x='a' */
+	public LinkedHashMap<String, Grammar.LabelElementPair> charLabels;
+
+	/** A list of all LabelElementPair attached to rule references like f=field */
+	public LinkedHashMap<String, Grammar.LabelElementPair> ruleLabels;
+
+	/** A list of all Token list LabelElementPair like ids+=ID */
+	public LinkedHashMap<String, Grammar.LabelElementPair> tokenListLabels;
+
+	/** A list of all rule ref list LabelElementPair like ids+=expr */
+	public LinkedHashMap<String, Grammar.LabelElementPair> ruleListLabels;
+
+	/** All labels go in here (plus being split per the above lists) to
+	 *  catch dup label and label type mismatches.
+	 */
+	protected Map<String, Grammar.LabelElementPair> labelNameSpace =
+		new HashMap<String, Grammar.LabelElementPair>();
+
+	/** Map a name to an action for this rule.  Currently init is only
+	 *  one we use, but we can add more in future.
+	 *  The code generator will use this to fill holes in the rule template.
+	 *  I track the AST node for the action in case I need the line number
+	 *  for errors.  A better name is probably namedActions, but I don't
+	 *  want everyone to have to change their code gen templates now.
+	 */
+	protected Map<String, Object> actions =
+		new HashMap<String, Object>();
+
+	/** Track all executable actions other than named actions like @init.
+	 *  Also tracks exception handlers, predicates, and rewrite rewrites.
+	 *  We need to examine these actions before code generation so
+	 *  that we can detect refs to $rule.attr etc...
+	 */
+	protected List<GrammarAST> inlineActions = new ArrayList<GrammarAST>();
+
+	public int numberOfAlts;
+
+	/** Each alt has a Map&lt;tokenRefName,List&lt;tokenRefAST&gt;&gt;; range 1..numberOfAlts.
+	 *  So, if there are 3 ID refs in a rule's alt number 2, you'll have
+	 *  altToTokenRef[2].get("ID").size()==3.  This is used to see if $ID is ok.
+	 *  There must be only one ID reference in the alt for $ID to be ok in
+	 *  an action--must be unique.
+	 *
+	 *  This also tracks '+' and "int" literal token references
+	 *  (if not in LEXER).
+	 *
+	 *  Rewrite rules force tracking of all tokens.
+	 */
+	protected Map<String, List<GrammarAST>>[] altToTokenRefMap;
+
+	/** Each alt has a Map&lt;ruleRefName,List&lt;ruleRefAST&gt;&gt;; range 1..numberOfAlts
+	 *  So, if there are 3 expr refs in a rule's alt number 2, you'll have
+	 *  altToRuleRef[2].get("expr").size()==3.  This is used to see if $expr is ok.
+	 *  There must be only one expr reference in the alt for $expr to be ok in
+	 *  an action--must be unique.
+	 *
+	 *  Rewrite rules force tracking of all rule result ASTs. 1..n
+	 */
+	protected Map<String, List<GrammarAST>>[] altToRuleRefMap;
+
+	/** Do not generate start, stop etc... in a return value struct unless
+	 *  somebody references $r.start somewhere.
+	 */
+	public boolean referencedPredefinedRuleAttributes = false;
+
+	public boolean isSynPred = false;
+
+	public boolean imported = false;
+
+	@SuppressWarnings("unchecked")
+	public Rule(Grammar grammar,
+				String ruleName,
+				int ruleIndex,
+				int numberOfAlts)
+	{
+		this.name = ruleName;
+		this.index = ruleIndex;
+		this.numberOfAlts = numberOfAlts;
+		this.grammar = grammar;
+		throwsSpec = new HashSet<String>();
+		altToTokenRefMap = (Map<String, List<GrammarAST>>[])new Map<?, ?>[numberOfAlts+1];
+		altToRuleRefMap = (Map<String, List<GrammarAST>>[])new Map<?, ?>[numberOfAlts+1];
+		for (int alt=1; alt<=numberOfAlts; alt++) {
+			altToTokenRefMap[alt] = new HashMap<String, List<GrammarAST>>();
+			altToRuleRefMap[alt] = new HashMap<String, List<GrammarAST>>();
+		}
+	}
+
+	public static int getRuleType(String ruleName){
+		if (ruleName == null || ruleName.length() == 0)
+			throw new IllegalArgumentException("The specified rule name is not valid.");
+		return Character.isUpperCase(ruleName.charAt(0)) ? Grammar.LEXER : Grammar.PARSER;
+	}
+
+	public void defineLabel(Token label, GrammarAST elementRef, int type) {
+		Grammar.LabelElementPair pair = grammar.new LabelElementPair(label,elementRef);
+		pair.type = type;
+		labelNameSpace.put(label.getText(), pair);
+		switch ( type ) {
+            case Grammar.TOKEN_LABEL :
+                if ( tokenLabels==null ) tokenLabels = new LinkedHashMap<String, Grammar.LabelElementPair>();
+                tokenLabels.put(label.getText(), pair);
+                break;
+            case Grammar.WILDCARD_TREE_LABEL :
+                if ( wildcardTreeLabels==null ) wildcardTreeLabels = new LinkedHashMap<String, Grammar.LabelElementPair>();
+                wildcardTreeLabels.put(label.getText(), pair);
+                break;
+            case Grammar.WILDCARD_TREE_LIST_LABEL :
+                if ( wildcardTreeListLabels==null ) wildcardTreeListLabels = new LinkedHashMap<String, Grammar.LabelElementPair>();
+                wildcardTreeListLabels.put(label.getText(), pair);
+                break;
+			case Grammar.RULE_LABEL :
+				if ( ruleLabels==null ) ruleLabels = new LinkedHashMap<String, Grammar.LabelElementPair>();
+				ruleLabels.put(label.getText(), pair);
+				break;
+			case Grammar.TOKEN_LIST_LABEL :
+				if ( tokenListLabels==null ) tokenListLabels = new LinkedHashMap<String, Grammar.LabelElementPair>();
+				tokenListLabels.put(label.getText(), pair);
+				break;
+			case Grammar.RULE_LIST_LABEL :
+				if ( ruleListLabels==null ) ruleListLabels = new LinkedHashMap<String, Grammar.LabelElementPair>();
+				ruleListLabels.put(label.getText(), pair);
+				break;
+			case Grammar.CHAR_LABEL :
+				if ( charLabels==null ) charLabels = new LinkedHashMap<String, Grammar.LabelElementPair>();
+				charLabels.put(label.getText(), pair);
+				break;
+		}
+	}
+
+	public Grammar.LabelElementPair getLabel(String name) {
+		return labelNameSpace.get(name);
+	}
+
+	public Grammar.LabelElementPair getTokenLabel(String name) {
+		Grammar.LabelElementPair pair = null;
+		if ( tokenLabels!=null ) {
+			return tokenLabels.get(name);
+		}
+		return pair;
+	}
+
+	public Map<String, Grammar.LabelElementPair> getRuleLabels() {
+		return ruleLabels;
+	}
+
+	public Map<String, Grammar.LabelElementPair> getRuleListLabels() {
+		return ruleListLabels;
+	}
+
+	public Grammar.LabelElementPair getRuleLabel(String name) {
+		Grammar.LabelElementPair pair = null;
+		if ( ruleLabels!=null ) {
+			return ruleLabels.get(name);
+		}
+		return pair;
+	}
+
+	public Grammar.LabelElementPair getTokenListLabel(String name) {
+		Grammar.LabelElementPair pair = null;
+		if ( tokenListLabels!=null ) {
+			return tokenListLabels.get(name);
+		}
+		return pair;
+	}
+
+	public Grammar.LabelElementPair getRuleListLabel(String name) {
+		Grammar.LabelElementPair pair = null;
+		if ( ruleListLabels!=null ) {
+			return ruleListLabels.get(name);
+		}
+		return pair;
+	}
+
+	/** Track a token ID or literal like '+' and "void" as having been referenced
+	 *  somewhere within the alts (not rewrite sections) of a rule.
+	 *
+	 *  This differs from Grammar.altReferencesTokenID(), which tracks all
+	 *  token IDs to check for token IDs without corresponding lexer rules.
+	 */
+	public void trackTokenReferenceInAlt(GrammarAST refAST, int outerAltNum) {
+		List<GrammarAST> refs = altToTokenRefMap[outerAltNum].get(refAST.getText());
+		if ( refs==null ) {
+			refs = new ArrayList<GrammarAST>();
+			altToTokenRefMap[outerAltNum].put(refAST.getText(), refs);
+		}
+		refs.add(refAST);
+	}
+
+	public List<GrammarAST> getTokenRefsInAlt(String ref, int outerAltNum) {
+		if ( altToTokenRefMap[outerAltNum]!=null ) {
+			List<GrammarAST> tokenRefASTs = altToTokenRefMap[outerAltNum].get(ref);
+			return tokenRefASTs;
+		}
+		return null;
+	}
+
+	public void trackRuleReferenceInAlt(GrammarAST refAST, int outerAltNum) {
+		List<GrammarAST> refs = altToRuleRefMap[outerAltNum].get(refAST.getText());
+		if ( refs==null ) {
+			refs = new ArrayList<GrammarAST>();
+			altToRuleRefMap[outerAltNum].put(refAST.getText(), refs);
+		}
+		refs.add(refAST);
+	}
+
+	public List<GrammarAST> getRuleRefsInAlt(String ref, int outerAltNum) {
+		if ( altToRuleRefMap[outerAltNum]!=null ) {
+			List<GrammarAST> ruleRefASTs = altToRuleRefMap[outerAltNum].get(ref);
+			return ruleRefASTs;
+		}
+		return null;
+	}
+
+	public Set<String> getTokenRefsInAlt(int altNum) {
+		return altToTokenRefMap[altNum].keySet();
+	}
+
+	/** For use with rewrite rules, we must track all tokens matched on the
+	 *  left-hand-side; so we need Lists.  This is a unique list of all
+	 *  token types for which the rule needs a list of tokens.  This
+	 *  is called from the rule template not directly by the code generator.
+	 */
+	public Set<String> getAllTokenRefsInAltsWithRewrites() {
+		String output = (String)grammar.getOption("output");
+		Set<String> tokens = new HashSet<String>();
+		if ( output==null || !output.equals("AST") ) {
+			// return nothing if not generating trees; i.e., don't do for templates
+			return tokens;
+		}
+		//System.out.println("blk "+tree.findFirstType(ANTLRParser.BLOCK).toStringTree());
+		for (int i = 1; i <= numberOfAlts; i++) {
+			if ( hasRewrite(i) ) {
+				Map<String, List<GrammarAST>> m = altToTokenRefMap[i];
+				for (String tokenName : m.keySet()) {
+					// convert token name like ID to ID, "void" to 31
+					int ttype = grammar.getTokenType(tokenName);
+					String label = grammar.generator.getTokenTypeAsTargetLabel(ttype);
+					tokens.add(label);
+				}
+			}
+		}
+		return tokens;
+	}
+
+	public Set<String> getRuleRefsInAlt(int outerAltNum) {
+		return altToRuleRefMap[outerAltNum].keySet();
+	}
+
+	/** For use with rewrite rules, we must track all rule AST results on the
+	 *  left-hand-side; so we need Lists.  This is a unique list of all
+	 *  rule results for which the rule needs a list of results.
+	 */
+	public Set<String> getAllRuleRefsInAltsWithRewrites() {
+		Set<String> rules = new HashSet<String>();
+		for (int i = 1; i <= numberOfAlts; i++) {
+			if ( hasRewrite(i) ) {
+				Map<String, ?> m = altToRuleRefMap[i];
+				rules.addAll(m.keySet());
+			}
+		}
+		return rules;
+	}
+
+	public List<GrammarAST> getInlineActions() {
+		return inlineActions;
+	}
+
+	public boolean hasRewrite(int i) {
+		GrammarAST blk = tree.findFirstType(ANTLRParser.BLOCK);
+		GrammarAST alt = blk.getBlockALT(i);
+		GrammarAST rew = alt.getNextSibling();
+		if ( rew!=null && rew.getType()==ANTLRParser.REWRITES ) return true;
+		if ( alt.findFirstType(ANTLRParser.REWRITES)!=null ) return true;
+		return false;
+	}
+
+	/** Return the scope containing name */
+	public AttributeScope getAttributeScope(String name) {
+		AttributeScope scope = getLocalAttributeScope(name);
+		if ( scope!=null ) {
+			return scope;
+		}
+		if ( ruleScope!=null && ruleScope.getAttribute(name)!=null ) {
+			scope = ruleScope;
+		}
+		return scope;
+	}
+
+	/** Get the arg, return value, or predefined property for this rule */
+	public AttributeScope getLocalAttributeScope(String name) {
+		AttributeScope scope = null;
+		if ( returnScope!=null && returnScope.getAttribute(name)!=null ) {
+			scope = returnScope;
+		}
+		else if ( parameterScope!=null && parameterScope.getAttribute(name)!=null ) {
+			scope = parameterScope;
+		}
+		else {
+			AttributeScope rulePropertiesScope =
+				RuleLabelScope.grammarTypeToRulePropertiesScope[grammar.type];
+			if ( rulePropertiesScope.getAttribute(name)!=null ) {
+				scope = rulePropertiesScope;
+			}
+		}
+		return scope;
+	}
+
+	/** For references to tokens rather than by label such as $ID, we
+	 *  need to get the existing label for the ID ref or create a new
+	 *  one.
+	 */
+	public String getElementLabel(String refdSymbol,
+								  int outerAltNum,
+								  CodeGenerator generator)
+	{
+		GrammarAST uniqueRefAST;
+		if ( grammar.type != Grammar.LEXER &&
+			 Character.isUpperCase(refdSymbol.charAt(0)) )
+		{
+			// symbol is a token
+			List<GrammarAST> tokenRefs = getTokenRefsInAlt(refdSymbol, outerAltNum);
+			uniqueRefAST = tokenRefs.get(0);
+		}
+		else {
+			// symbol is a rule
+			List<GrammarAST> ruleRefs = getRuleRefsInAlt(refdSymbol, outerAltNum);
+			uniqueRefAST = ruleRefs.get(0);
+		}
+		if ( uniqueRefAST.code==null ) {
+			// no code?  must not have gen'd yet; forward ref
+			return null;
+		}
+		String labelName;
+		String existingLabelName =
+			(String)uniqueRefAST.code.getAttribute("label");
+		// reuse any label or list label if it exists
+		if ( existingLabelName!=null ) {
+			labelName = existingLabelName;
+		}
+		else {
+			// else create new label
+			labelName = generator.createUniqueLabel(refdSymbol);
+			CommonToken label = new CommonToken(ANTLRParser.ID, labelName);
+			if ( grammar.type != Grammar.LEXER &&
+				 Character.isUpperCase(refdSymbol.charAt(0)) )
+			{
+				grammar.defineTokenRefLabel(name, label, uniqueRefAST);
+			}
+			else {
+				grammar.defineRuleRefLabel(name, label, uniqueRefAST);
+			}
+			uniqueRefAST.code.add("label", labelName);
+		}
+		return labelName;
+	}
+
+	/** If a rule has no user-defined return values and nobody references
+	 *  it's start/stop (predefined attributes), then there is no need to
+	 *  define a struct; otherwise for now we assume a struct.  A rule also
+	 *  has multiple return values if you are building trees or templates.
+	 */
+	public boolean getHasMultipleReturnValues() {
+		return
+			referencedPredefinedRuleAttributes || grammar.buildAST() ||
+			grammar.buildTemplate() ||
+			(returnScope!=null && returnScope.attributes.size()>1);
+	}
+
+	public boolean getHasSingleReturnValue() {
+		return
+			!(referencedPredefinedRuleAttributes || grammar.buildAST() ||
+			  grammar.buildTemplate()) &&
+									   (returnScope!=null && returnScope.attributes.size()==1);
+	}
+
+	public boolean getHasReturnValue() {
+		return
+			referencedPredefinedRuleAttributes || grammar.buildAST() ||
+			grammar.buildTemplate() ||
+			(returnScope!=null && returnScope.attributes.size()>0);
+	}
+
+	public String getSingleValueReturnType() {
+		if ( returnScope!=null && returnScope.attributes.size()==1 ) {
+			return returnScope.attributes.values().iterator().next().type;
+		}
+		return null;
+	}
+
+	public String getSingleValueReturnName() {
+		if ( returnScope!=null && returnScope.attributes.size()==1 ) {
+			return returnScope.attributes.values().iterator().next().name;
+		}
+		return null;
+	}
+
+	/** Given @scope::name {action} define it for this grammar.  Later,
+	 *  the code generator will ask for the actions table.
+	 */
+	public void defineNamedAction(GrammarAST ampersandAST,
+								  GrammarAST nameAST,
+								  GrammarAST actionAST)
+	{
+		//System.out.println("rule @"+nameAST.getText()+"{"+actionAST.getText()+"}");
+		String actionName = nameAST.getText();
+		GrammarAST a = (GrammarAST)actions.get(actionName);
+		if ( a!=null ) {
+			ErrorManager.grammarError(
+				ErrorManager.MSG_ACTION_REDEFINITION,grammar,
+				nameAST.getToken(),nameAST.getText());
+		}
+		else {
+			actions.put(actionName,actionAST);
+		}
+	}
+
+	public void trackInlineAction(GrammarAST actionAST) {
+		inlineActions.add(actionAST);
+	}
+
+	public Map<String, Object> getActions() {
+		return actions;
+	}
+
+	public void setActions(Map<String, Object> actions) {
+		this.actions = actions;
+	}
+
+	/** Save the option key/value pair and process it; return the key
+	 *  or null if invalid option.
+	 */
+	public String setOption(String key, Object value, Token optionsStartToken) {
+		if ( !legalOptions.contains(key) ) {
+			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,
+									  grammar,
+									  optionsStartToken,
+									  key);
+			return null;
+		}
+		if ( options==null ) {
+			options = new HashMap<String, Object>();
+		}
+        if ( key.equals("memoize") && value.toString().equals("true") ) {
+			grammar.composite.getRootGrammar().atLeastOneRuleMemoizes = true;
+        }
+        if ( key.equals("backtrack") && value.toString().equals("true") ) {
+            grammar.composite.getRootGrammar().atLeastOneBacktrackOption = true;
+        }
+		if ( key.equals("k") ) {
+			grammar.numberOfManualLookaheadOptions++;
+		}
+		 options.put(key, value);
+		return key;
+	}
+
+	public void setOptions(Map<String, Object> options, Token optionsStartToken) {
+		if ( options==null ) {
+			this.options = null;
+			return;
+		}
+		Set<String> keys = options.keySet();
+		for (Iterator<String> it = keys.iterator(); it.hasNext();) {
+			String optionName = it.next();
+			Object optionValue = options.get(optionName);
+			String stored=setOption(optionName, optionValue, optionsStartToken);
+			if ( stored==null ) {
+				it.remove();
+			}
+		}
+	}
+
+	/** Used during grammar imports to see if sets of rules intersect... This
+	 *  method and hashCode use the String name as the key for Rule objects.
+	public boolean equals(Object other) {
+		return this.name.equals(((Rule)other).name);
+	}
+	 */
+
+	/** Used during grammar imports to see if sets of rules intersect...
+	public int hashCode() {
+		return name.hashCode();
+	}
+	 * */
+
+	@Override
+	public String toString() { // used for testing
+		return "["+grammar.name+"."+name+",index="+index+",line="+tree.getToken().getLine()+"]";
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/RuleLabelScope.java b/tool/src/main/java/org/antlr/tool/RuleLabelScope.java
new file mode 100644
index 0000000..ab45f8d
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/RuleLabelScope.java
@@ -0,0 +1,101 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.antlr.runtime.Token;
+
+public class RuleLabelScope extends AttributeScope {
+	/** Rules have a predefined set of attributes as well as
+	 *  the return values.  'text' needs to be computed though so.
+	 */
+	public static AttributeScope predefinedRulePropertiesScope =
+		new AttributeScope("RulePredefined",null) {{
+			addAttribute("text", null);
+			addAttribute("start", null);
+			addAttribute("stop", null);
+			addAttribute("tree", null);
+			addAttribute("st", null);
+			isPredefinedRuleScope = true;
+		}};
+
+	public static AttributeScope predefinedTreeRulePropertiesScope =
+		new AttributeScope("RulePredefined",null) {{
+			addAttribute("text", null);
+			addAttribute("start", null); // note: no stop; not meaningful
+			addAttribute("tree", null);
+			addAttribute("st", null);
+			isPredefinedRuleScope = true;
+		}};
+
+	public static AttributeScope predefinedLexerRulePropertiesScope =
+		new AttributeScope("LexerRulePredefined",null) {{
+			addAttribute("text", null);
+			addAttribute("type", null);
+			addAttribute("line", null);
+			addAttribute("index", null);
+			addAttribute("pos", null);
+			addAttribute("channel", null);
+			addAttribute("start", null);
+			addAttribute("stop", null);
+			addAttribute("int", null);
+			isPredefinedLexerRuleScope = true;
+		}};
+
+	public static AttributeScope[] grammarTypeToRulePropertiesScope =
+		{
+			null,
+			predefinedLexerRulePropertiesScope,	// LEXER
+			predefinedRulePropertiesScope,		// PARSER
+			predefinedTreeRulePropertiesScope,	// TREE_PARSER
+			predefinedRulePropertiesScope,		// COMBINED
+		};
+
+	public Rule referencedRule;
+
+	public RuleLabelScope(Rule referencedRule, Token actionToken) {
+		super("ref_"+referencedRule.name,actionToken);
+		this.referencedRule = referencedRule;
+	}
+
+	/** If you label a rule reference, you can access that rule's
+	 *  return values as well as any predefined attributes.
+	 */
+	@Override
+	public Attribute getAttribute(String name) {
+		AttributeScope rulePropertiesScope =
+			RuleLabelScope.grammarTypeToRulePropertiesScope[grammar.type];
+		if ( rulePropertiesScope.getAttribute(name)!=null ) {
+			return rulePropertiesScope.getAttribute(name);
+		}
+
+		if ( referencedRule.returnScope!=null ) {
+			return referencedRule.returnScope.getAttribute(name);
+		}
+		return null;
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/Strip.java b/tool/src/main/java/org/antlr/tool/Strip.java
new file mode 100644
index 0000000..f411090
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/Strip.java
@@ -0,0 +1,280 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.tool;
+
+import org.antlr.grammar.v3.ANTLRv3Lexer;
+import org.antlr.grammar.v3.ANTLRv3Parser;
+import org.antlr.runtime.*;
+import org.antlr.runtime.tree.CommonTree;
+import org.antlr.runtime.tree.TreeAdaptor;
+import org.antlr.runtime.tree.TreeWizard;
+
+import java.util.List;
+
+/** A basic action stripper. */
+public class Strip {
+    protected String filename;
+    protected TokenRewriteStream tokens;
+    protected boolean tree_option = false;
+    protected String args[];
+
+    public static void main(String args[]) throws Exception {
+        Strip s = new Strip(args);
+        s.parseAndRewrite();
+        System.out.println(s.tokens);
+    }
+
+    public Strip(String[] args) { this.args = args; }
+
+    public TokenRewriteStream getTokenStream() { return tokens; }
+
+    public void parseAndRewrite() throws Exception {
+        processArgs(args);
+        CharStream input;
+        if ( filename!=null ) input = new ANTLRFileStream(filename);
+        else input = new ANTLRInputStream(System.in);
+        // BUILD AST
+        ANTLRv3Lexer lex = new ANTLRv3Lexer(input);
+        tokens = new TokenRewriteStream(lex);
+        ANTLRv3Parser g = new ANTLRv3Parser(tokens);
+        ANTLRv3Parser.grammarDef_return r = g.grammarDef();
+        CommonTree t = r.getTree();
+        if (tree_option) System.out.println(t.toStringTree());
+        rewrite(g.getTreeAdaptor(),t,g.getTokenNames());
+    }
+
+    public void rewrite(TreeAdaptor adaptor, CommonTree t, String[] tokenNames) throws Exception {
+        TreeWizard wiz = new TreeWizard(adaptor, tokenNames);
+
+        // ACTIONS STUFF
+        wiz.visit(t, ANTLRv3Parser.ACTION,
+           new TreeWizard.Visitor() {
+			@Override
+               public void visit(Object t) { ACTION(tokens, (CommonTree)t); }
+           });
+
+        wiz.visit(t, ANTLRv3Parser.AT,  // ^('@' id ACTION) rule actions
+            new TreeWizard.Visitor() {
+			@Override
+              public void visit(Object t) {
+                  CommonTree a = (CommonTree)t;
+                  CommonTree action = null;
+                  if ( a.getChildCount()==2 ) action = (CommonTree)a.getChild(1);
+                  else if ( a.getChildCount()==3 ) action = (CommonTree)a.getChild(2);
+                  if ( action.getType()==ANTLRv3Parser.ACTION ) {
+                      tokens.delete(a.getTokenStartIndex(),
+                                    a.getTokenStopIndex());
+                      killTrailingNewline(tokens, action.getTokenStopIndex());
+                  }
+              }
+            });
+        wiz.visit(t, ANTLRv3Parser.ARG, // wipe rule arguments
+                  new TreeWizard.Visitor() {
+			@Override
+              public void visit(Object t) {
+                  CommonTree a = (CommonTree)t;
+                  a = (CommonTree)a.getChild(0);
+                  tokens.delete(a.token.getTokenIndex());
+                  killTrailingNewline(tokens, a.token.getTokenIndex());
+              }
+            });
+        wiz.visit(t, ANTLRv3Parser.RET, // wipe rule return declarations
+            new TreeWizard.Visitor() {
+			@Override
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    CommonTree ret = (CommonTree)a.getChild(0);
+                    tokens.delete(a.token.getTokenIndex(),
+                                  ret.token.getTokenIndex());
+                }
+            });
+        wiz.visit(t, ANTLRv3Parser.SEMPRED, // comment out semantic predicates
+            new TreeWizard.Visitor() {
+			@Override
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    tokens.replace(a.token.getTokenIndex(), "/*"+a.getText()+"*/");
+                }
+            });
+        wiz.visit(t, ANTLRv3Parser.GATED_SEMPRED, // comment out semantic predicates
+            new TreeWizard.Visitor() {
+			@Override
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    String text = tokens.toString(a.getTokenStartIndex(),
+                                                  a.getTokenStopIndex());
+                    tokens.replace(a.getTokenStartIndex(),
+                                   a.getTokenStopIndex(),
+                                   "/*"+text+"*/");
+                }
+            });
+        wiz.visit(t, ANTLRv3Parser.SCOPE, // comment scope specs
+            new TreeWizard.Visitor() {
+			@Override
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    tokens.delete(a.getTokenStartIndex(),
+                                  a.getTokenStopIndex());
+                    killTrailingNewline(tokens, a.getTokenStopIndex());
+                }
+            });        
+        wiz.visit(t, ANTLRv3Parser.ARG_ACTION, // args r[x,y] -> ^(r [x,y])
+            new TreeWizard.Visitor() {
+			@Override
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    if ( a.getParent().getType()==ANTLRv3Parser.RULE_REF ) {
+                        tokens.delete(a.getTokenStartIndex(),
+                                      a.getTokenStopIndex());
+                    }
+                }
+            });
+        wiz.visit(t, ANTLRv3Parser.LABEL_ASSIGN, // ^('=' id ^(RULE_REF [arg])), ...
+            new TreeWizard.Visitor() {
+			@Override
+                public void visit(Object t) {
+                    CommonTree a = (CommonTree)t;
+                    if ( !a.hasAncestor(ANTLRv3Parser.OPTIONS) ) { // avoid options
+                        CommonTree child = (CommonTree)a.getChild(0);
+                        tokens.delete(a.token.getTokenIndex());     // kill "id="
+                        tokens.delete(child.token.getTokenIndex());
+                    }
+                }
+            });
+        wiz.visit(t, ANTLRv3Parser.LIST_LABEL_ASSIGN, // ^('+=' id ^(RULE_REF [arg])), ...
+            new TreeWizard.Visitor() {
+			@Override
+              public void visit(Object t) {
+                  CommonTree a = (CommonTree)t;
+                  CommonTree child = (CommonTree)a.getChild(0);
+                  tokens.delete(a.token.getTokenIndex());     // kill "id+="
+                  tokens.delete(child.token.getTokenIndex());
+              }
+            });
+
+
+        // AST STUFF
+        wiz.visit(t, ANTLRv3Parser.REWRITE,
+            new TreeWizard.Visitor() {
+			@Override
+              public void visit(Object t) {
+                  CommonTree a = (CommonTree)t;
+                  CommonTree child = (CommonTree)a.getChild(0);
+                  int stop = child.getTokenStopIndex();
+                  if ( child.getType()==ANTLRv3Parser.SEMPRED ) {
+                      CommonTree rew = (CommonTree)a.getChild(1);
+                      stop = rew.getTokenStopIndex();
+                  }
+                  tokens.delete(a.token.getTokenIndex(), stop);
+                  killTrailingNewline(tokens, stop);
+              }
+            });
+        wiz.visit(t, ANTLRv3Parser.ROOT,
+           new TreeWizard.Visitor() {
+			@Override
+               public void visit(Object t) {
+                   tokens.delete(((CommonTree)t).token.getTokenIndex());
+               }
+           });
+        wiz.visit(t, ANTLRv3Parser.BANG,
+           new TreeWizard.Visitor() {
+			@Override
+               public void visit(Object t) {
+                   tokens.delete(((CommonTree)t).token.getTokenIndex());
+               }
+           });
+    }
+
+    public static void ACTION(TokenRewriteStream tokens, CommonTree t) {
+        CommonTree parent = (CommonTree)t.getParent();
+        int ptype = parent.getType();
+        if ( ptype==ANTLRv3Parser.SCOPE || // we have special rules for these
+             ptype==ANTLRv3Parser.AT )
+        {
+            return;
+        }
+        //System.out.println("ACTION: "+t.getText());
+        CommonTree root = (CommonTree)t.getAncestor(ANTLRv3Parser.RULE);
+        if ( root!=null ) {
+            CommonTree rule = (CommonTree)root.getChild(0);
+            //System.out.println("rule: "+rule);
+            if ( !Character.isUpperCase(rule.getText().charAt(0)) ) {
+                tokens.delete(t.getTokenStartIndex(),t.getTokenStopIndex());
+                killTrailingNewline(tokens, t.token.getTokenIndex());
+            }
+        }
+    }
+
+    private static void killTrailingNewline(TokenRewriteStream tokens, int index) {
+        List<? extends Token> all = tokens.getTokens();
+        Token tok = all.get(index);
+        Token after = all.get(index+1);
+        String ws = after.getText();
+        if ( ws.startsWith("\n") ) {
+            //System.out.println("killing WS after action");
+            if ( ws.length()>1 ) {
+                int space = ws.indexOf(' ');
+                int tab = ws.indexOf('\t');
+                if ( ws.startsWith("\n") &&
+                     space>=0 || tab>=0 )
+                {
+                    return; // do nothing if \n + indent
+                }
+                // otherwise kill all \n
+                ws = ws.replaceAll("\n", "");
+                tokens.replace(after.getTokenIndex(), ws);
+            }
+            else {
+                tokens.delete(after.getTokenIndex());
+            }
+        }
+    }
+
+    public void processArgs(String[] args) {
+		if ( args==null || args.length==0 ) {
+			help();
+			return;
+		}
+		for (int i = 0; i < args.length; i++) {
+			if (args[i].equals("-tree")) tree_option = true;
+			else {
+				if (args[i].charAt(0) != '-') {
+					// Must be the grammar file
+                    filename = args[i];
+				}
+			}
+		}
+	}
+
+    private static void help() {
+        System.err.println("usage: java org.antlr.tool.Strip [args] file.g");
+        System.err.println("  -tree      print out ANTLR grammar AST");
+    }
+
+}
diff --git a/tool/src/main/java/org/antlr/tool/ToolMessage.java b/tool/src/main/java/org/antlr/tool/ToolMessage.java
new file mode 100644
index 0000000..070049d
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/ToolMessage.java
@@ -0,0 +1,76 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.stringtemplate.v4.ST;
+
+/** A generic message from the tool such as "file not found" type errors; there
+ *  is no reason to create a special object for each error unlike the grammar
+ *  errors, which may be rather complex.
+ *
+ *  Sometimes you need to pass in a filename or something to say it is "bad".
+ *  Allow a generic object to be passed in and the string template can deal
+ *  with just printing it or pulling a property out of it.
+ *
+ *  TODO what to do with exceptions?  Want stack trace for internal errors?
+ */
+public class ToolMessage extends Message {
+
+	public ToolMessage(int msgID) {
+		super(msgID, null, null);
+	}
+	public ToolMessage(int msgID, Object arg) {
+		super(msgID, arg, null);
+	}
+	public ToolMessage(int msgID, Throwable e) {
+		super(msgID);
+		this.e = e;
+	}
+	public ToolMessage(int msgID, Object arg, Object arg2) {
+		super(msgID, arg, arg2);
+	}
+	public ToolMessage(int msgID, Object arg, Throwable e) {
+		super(msgID,arg,null);
+		this.e = e;
+	}
+	@Override
+	public String toString() {
+		ST st = getMessageTemplate();
+		if ( arg!=null ) {
+			st.add("arg", arg);
+		}
+		if ( arg2!=null ) {
+			st.add("arg2", arg2);
+		}
+		if ( e!=null ) {
+			st.add("exception", e);
+			st.add("stackTrace", e.getStackTrace());
+		}
+		return super.toString(st);
+	}
+}
diff --git a/tool/src/main/java/org/antlr/tool/ToolSTGroupFile.java b/tool/src/main/java/org/antlr/tool/ToolSTGroupFile.java
new file mode 100644
index 0000000..d20f813
--- /dev/null
+++ b/tool/src/main/java/org/antlr/tool/ToolSTGroupFile.java
@@ -0,0 +1,47 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2012 Terence Parr
+ *  Copyright (c) 2012 Sam Harwell
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.tool;
+
+import org.stringtemplate.v4.STErrorListener;
+import org.stringtemplate.v4.STGroupFile;
+
+/**
+ * This extension of {@link STGroupFile} automatically sets the group's
+ * {@link STErrorListener} to {@link ErrorManager#getSTErrorListener()}.
+ *
+ * @author Sam Harwell
+ */
+public class ToolSTGroupFile extends STGroupFile {
+
+	public ToolSTGroupFile(String fileName) {
+		super(fileName);
+		setListener(ErrorManager.getSTErrorListener());
+	}
+
+}
diff --git a/antlr-3.4/tool/src/main/java/org/antlr/tool/serialize.g b/tool/src/main/java/org/antlr/tool/serialize.g
similarity index 100%
rename from antlr-3.4/tool/src/main/java/org/antlr/tool/serialize.g
rename to tool/src/main/java/org/antlr/tool/serialize.g
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/antlr.properties b/tool/src/main/resources/org/antlr/antlr.properties
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/antlr.properties
rename to tool/src/main/resources/org/antlr/antlr.properties
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/AST.stg
new file mode 100644
index 0000000..44d6832
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/AST.stg
@@ -0,0 +1,405 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+@outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+import org.antlr.runtime.tree.*;<\n>
+<endif>
+>>
+
+@genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+protected var adaptor:TreeAdaptor = new CommonTreeAdaptor();<\n>
+override public function set treeAdaptor(adaptor:TreeAdaptor):void {
+    this.adaptor = adaptor;
+    <grammar.directDelegates:{g|<g:delegateName()>.treeAdaptor = this.adaptor;}>
+}
+override public function get treeAdaptor():TreeAdaptor {
+    return adaptor;
+}
+>>
+
+@returnScope.ruleReturnMembers() ::= <<
+<ASTLabelType> tree;
+public function get tree():Object { return tree; }
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+var root_0:<ASTLabelType> = null;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
+  ruleDescriptor.wildcardTreeListLabels]:{it |var <it.label.text>_tree:<ASTLabelType>=null;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{it |var <it.label.text>_tree:<ASTLabelType>=null;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{it |var stream_<it>:RewriteRule<rewriteElementType>Stream=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{it |var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+@alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+root_0 = <ASTLabelType>(adaptor.nil());<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule.name>.add(<label>.tree);
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule>.add(<label>.tree);
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+    referencedWildcardLabels,
+    referencedWildcardListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+// AST REWRITE
+// elements: <referencedElementsDeep; separator=", ">
+// token labels: <referencedTokenLabels; separator=", ">
+// rule labels: <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels: <referencedRuleListLabels; separator=", ">
+// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {<\n>
+<endif>
+<prevRuleRootRef()>.tree = root_0;
+<rewriteCodeLabels()>
+root_0 = <ASTLabelType>(adaptor.nil());
+<alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.tree = <ASTLabelType>(adaptor.rulePostProcessing(root_0));
+input.replaceChildren(adaptor.getParent(retval.start),
+                      adaptor.getChildIndex(retval.start),
+                      adaptor.getChildIndex(_last),
+                      retval.tree);
+<endif>
+<endif>
+<! if parser or tree-parser && rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.tree = root_0;
+<else>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.tree = root_0;
+<endif>
+<endif>
+<if(backtracking)>
+}
+<endif>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{it |var stream_<it>:RewriteRule<rewriteElementType>Stream=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>",<it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{it |var stream_<it>:RewriteRule<rewriteElementType>Stream=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
+    separator="\n"
+>
+<referencedWildcardLabels
+    :{it |var  stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
+    separator="\n"
+>
+<referencedWildcardListLabels
+    :{it |var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{it |var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"rule <it>",<it>!=null?<it>.tree:null);};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{it |var stream_<it>:RewriteRuleSubtreeStream=new RewriteRuleSubtreeStream(adaptor,"token <it>",list_<it>);};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if ( <referencedElementsDeep:{el | stream_<el>.hasNext}; separator="||"> ) {
+    <alt>
+}
+<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | stream_<el>.hasNext}; separator="||"> ) {
+    <alt>
+}
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if ( !(<referencedElements:{el | stream_<el>.hasNext}; separator="||">) ) {
+    throw new RewriteEarlyExitException();
+}
+while ( <referencedElements:{el | stream_<el>.hasNext}; separator="||"> ) {
+    <alt>
+}
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>) {
+    <a.alt>
+}<\n>
+<else>
+{
+    <a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+var root_<treeLevel>:<ASTLabelType> = <ASTLabelType>(adaptor.nil());
+<root:rewriteElement()>
+<children:rewriteElement()>
+adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,terminalOptions,args) ::= <<
+adaptor.addChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>));<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,terminalOptions,args) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>));<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,terminalOptions,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, <createImaginaryNode(token,terminalOptions,args)>);<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,terminalOptions,elementIndex) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<createImaginaryNode(token,terminalOptions,args)>, root_<treeLevel>));<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<rule>.nextTree());<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>));<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+adaptor.addChild(root_<treeLevel>, <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<action>, root_<treeLevel>));<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>));<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>));<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());<\n>
+>>
+
+
+createImaginaryNode(tokenType,terminalOptions,args) ::= <<
+<if(terminalOptions.node)>
+<! new MethodNode(IDLabel, args) !>
+new <terminalOptions.node>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+<ASTLabelType>(adaptor.create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>))
+<endif>
+>>
+
+createRewriteNodeFromElement(token,terminalOptions,args) ::= <<
+<if(terminalOptions.node)>
+new <terminalOptions.node>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+adaptor.create(<token>, <args; separator=", ">)
+<else>
+stream_<token>.nextNode()
+<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTParser.stg
new file mode 100644
index 0000000..72326b1
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTParser.stg
@@ -0,0 +1,189 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+
+@rule.setErrorReturnValue() ::= <<
+retval.tree = <ASTLabelType>(adaptor.errorNode(input, Token(retval.start), input.LT(-1), re));
+<! trace("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex,terminalOptions) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+root_0 = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_0));
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,terminalOptions,elementIndex) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <%
+<super.matchSet(postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <createNodeFromToken(...)>);}, ...)>
+%>
+
+matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,terminalOptions,postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
+<if(label)>
+<label>=<labelType>(input.LT(1));<\n>
+<endif>
+<super.matchSet(postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = <ASTLabelType>(adaptor.becomeRoot(<createNodeFromToken(...)>, root_0));},...)>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <label>.tree);
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = <ASTLabelType>(adaptor.becomeRoot(<label>.tree, root_0));
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+// WILDCARD AST
+
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <ASTLabelType>(adaptor.create(<label>));
+adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <ASTLabelType>(adaptor.create(<label>));
+root_0 = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_0));
+<if(backtracking)>}<endif>
+>>
+
+createNodeFromToken(label,terminalOptions) ::= <<
+<if(terminalOptions.node)>
+new <terminalOptions.node>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+<ASTLabelType>(adaptor.create(<label>))
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = <ASTLabelType>(adaptor.rulePostProcessing(root_0));
+adaptor.setTokenBoundaries(retval.tree, Token(retval.start), Token(retval.stop));
+<if(backtracking)>}<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTTreeParser.stg
new file mode 100644
index 0000000..4f26b1b
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ASTTreeParser.stg
@@ -0,0 +1,295 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+var _first_0:<ASTLabelType> = null;
+var _last:<ASTLabelType> = null;<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(rewriteMode)>
+retval.tree = <ASTLabelType>(_first_0);
+if ( adaptor.getParent(retval.tree)!=null && adaptor.isNil( adaptor.getParent(retval.tree) ) )
+    retval.tree = <ASTLabelType>(adaptor.getParent(retval.tree));
+<endif>
+<if(backtracking)>}<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+{
+var _save_last_<treeLevel>:<ASTLabelType> = _last;
+var _first_<treeLevel>:<ASTLabelType> = null;
+<if(!rewriteMode)>
+var root_<treeLevel>:<ASTLabelType> = <ASTLabelType>(adaptor.nil());
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+<if(root.el.rule)>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>.tree;
+<else>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==TokenConstants.DOWN ) {
+    matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}<\n>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex,terminalOptions) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<label>);
+<else>
+<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
+<endif><\n>
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<label>);
+<else>
+<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
+<endif><\n>
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_<treeLevel>));
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.dupTree(<label>);
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+// SET AST
+
+matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<label>);
+<else>
+<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
+<endif><\n>
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>\}<endif>
+<endif>
+}, ...
+)>
+>>
+
+matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<matchSet(...)>
+<noRewrite(...)> <! set return tree !>
+>>
+
+matchSetBang(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<label>);
+<else>
+<label>_tree = <ASTLabelType>(adaptor.dupNode(<label>));
+<endif><\n>
+root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<label>_tree, root_<treeLevel>));
+<if(backtracking)>}<endif>
+<endif>
+}, ...
+)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
+<if(!rewriteMode)>
+adaptor.addChild(root_<treeLevel>, <label>.tree);
+<else> <! rewrite mode !>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>.tree;
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_<treeLevel> = <ASTLabelType>(adaptor.becomeRoot(<label>.tree, root_<treeLevel>));
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = <ASTLabelType>(input.LT(1));
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,terminalOptions,scope) ::= <<
+<if(terminalOptions.node)>
+new <terminalOptions.node>(stream_<token>.nextNode())
+<else>
+stream_<token>.nextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = <ASTLabelType>(adaptor.rulePostProcessing(root_0));
+<if(backtracking)>}<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ActionScript.stg b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ActionScript.stg
new file mode 100644
index 0000000..1fdeadc
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ActionScript/ActionScript.stg
@@ -0,0 +1,1317 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2010 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+asTypeInitMap ::= [
+	"int":"0",
+	"uint":"0",
+	"Number":"0.0",
+	"Boolean":"false",
+	default:"null" // anything other than an atomic type
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass, literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+package<if(actions.(actionScope).package)> <actions.(actionScope).package><endif> {
+    <actions.(actionScope).header>
+    <@imports>
+import org.antlr.runtime.*;
+<if(TREE_PARSER)>
+    import org.antlr.runtime.tree.*;
+<endif>
+    <@end>
+
+    <docComment>
+    <recognizer>
+}
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, filterMode, labelType="Token",
+      superClass="Lexer") ::= <<
+public class <grammar.recognizerName> extends <if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else><@superClassName><superClass><@end><endif> {
+    <tokens:{it |public static const <it.name>:int=<it.type>;}; separator="\n">
+    <scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScope(it)><endif>}>
+    <actions.lexer.members>
+
+    // delegates
+    <grammar.delegates:
+         {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
+    <last(grammar.delegators):{g|public var gParent:<g.recognizerName>;}>
+
+    public function <grammar.recognizerName>(<grammar.delegators:{g|<g:delegateName()>:<g.recognizerName>, }>input:CharStream = null, state:RecognizerSharedState = null) {
+        super(input, state);
+        <cyclicDFAs:cyclicDFACtor()>
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        this.state.ruleMemo = new Array(<numRules>+1);<\n> <! index from 1..n !>
+<endif>
+<endif>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>this, input, this.state);}; separator="\n">
+        <grammar.delegators:
+         {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+    }
+    public override function get grammarFileName():String { return "<fileName>"; }
+
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+    <rules; separator="\n\n">
+
+    <synpreds:{p | <lexerSynpred(p)>}>
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+}
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+public override function nextToken():Token {
+    while (true) {
+        if ( input.LA(1)==CharStreamConstants.EOF ) {
+            return TokenConstants.EOF_TOKEN;
+        }
+        this.state.token = null;
+	    this.state.channel = TokenConstants.DEFAULT_CHANNEL;
+        this.state.tokenStartCharIndex = input.index;
+        this.state.tokenStartCharPositionInLine = input.charPositionInLine;
+        this.state.tokenStartLine = input.line;
+	    this.state.text = null;
+        try {
+            var m:int = input.mark();
+            this.state.backtracking=1; <! means we won't throw slow exception !>
+            this.state.failed=false;
+            mTokens();
+            this.state.backtracking=0;
+            <! mTokens backtracks with synpred at backtracking==2
+               and we set the synpredgate to allow actions at level 1. !>
+            if ( this.state.failed ) {
+                input.rewindTo(m);
+                input.consume(); <! advance one char and try again !>
+            }
+            else {
+                emit();
+                return this.state.token;
+            }
+        }
+        catch (re:RecognitionException) {
+            // shouldn't happen in backtracking mode, but...
+            reportError(re);
+            recover(re);
+        }
+    }
+    // Not reached - For ActionScript compiler
+    throw new Error();
+}
+
+public override function memoize(input:IntStream,
+		ruleIndex:int,
+		ruleStartIndex:int):void
+{
+if ( this.state.backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
+}
+
+public override function alreadyParsedRule(input:IntStream, ruleIndex:int):Boolean {
+if ( this.state.backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
+return false;
+}
+>>
+
+actionGate() ::= "this.state.backtracking==0"
+
+filteringActionGate() ::= "this.state.backtracking==1"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass,
+              labelType, members, rewriteElementType,
+              filterMode, ASTLabelType="Object") ::= <<
+public class <grammar.recognizerName> extends <if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else><@superClassName><superClass><@end><endif> {
+<if(grammar.grammarIsRoot)>
+    public static const tokenNames:Array = [
+        "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
+    ];<\n>
+<endif>
+    <tokens:{it |public static const <it.name>:int=<it.type>;}; separator="\n">
+
+    // delegates
+    <grammar.delegates: {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public var <g:delegateName()>:<g.recognizerName>;}; separator="\n">
+    <last(grammar.delegators):{g|public var gParent:<g.recognizerName>;}>
+
+    <scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScope(it)><endif>}>
+    <@members>
+   <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+    public function <grammar.recognizerName>(<grammar.delegators:{g|<g:delegateName()>:<g.recognizerName>, }>input:<inputStreamType>, state:RecognizerSharedState = null) {
+        super(input, state);
+        <cyclicDFAs:cyclicDFACtor()>
+        <parserCtorBody()>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>this, input, this.state);}; separator="\n">
+        <grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+    }
+    <@end>
+
+    public override function get tokenNames():Array { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; }
+    public override function get grammarFileName():String { return "<fileName>"; }
+
+    <members>
+
+    <rules; separator="\n\n">
+
+    <! generate rule/method definitions for imported rules so they
+       appear to be defined in this recognizer. !>
+       // Delegated rules
+    <grammar.delegatedRules:{ruleDescriptor|
+        public function <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope()>):<returnType()> { <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); \}}; separator="\n">
+
+    <synpreds:{p | <synpred(p)>}>
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+    <bitsets:{it | <bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                            words64=it.bits)>}>
+}
+>>
+
+parserCtorBody() ::= <<
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = new Array(<length(grammar.allImportedRules)>+1);<\n> <! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType="Object", superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
+<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, "TokenStream", superClass,
+              labelType, members, "Token",
+              false, ASTLabelType)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
+           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="Object",
+           superClass="TreeParser", members={<actions.treeparser.members>}) ::= <<
+<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, "TreeNodeStream", superClass,
+              labelType, members, "Node",
+              filterMode, ASTLabelType)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start <ruleName>
+public final function <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope()>):void {
+    <ruleLabelDefs()>
+<if(trace)>
+    traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+    try {
+        <block>
+    }
+    finally {
+        traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+    }
+<else>
+    <block>
+<endif>
+}
+// $ANTLR end <ruleName>
+>>
+
+synpred(name) ::= <<
+public final function <name>():Boolean {
+    this.state.backtracking++;
+    <@start()>
+    var start:int = input.mark();
+    try {
+        <name>_fragment(); // can never throw exception
+    } catch (re:RecognitionException) {
+        trace("impossible: "+re);
+    }
+    var success:Boolean = !this.state.failed;
+    input.rewindTo(start);
+    <@stop()>
+    this.state.backtracking--;
+    this.state.failed=false;
+    return success;
+}<\n>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( this.state.backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (this.state.failed) return <ruleReturnValue()>;<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (this.state.backtracking>0) {this.state.failed=true; return <ruleReturnValue()>;}<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+// $ANTLR start <ruleName>
+// <fileName>:<description>
+public final function <ruleName>(<ruleDescriptor.parameterScope:parameterScope()>):<returnType()> {
+    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    try {
+        <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+    }
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    catch (re:RecognitionException) {
+        reportError(re);
+        recoverStream(input,re);
+        <@setErrorReturnValue()>
+    }<\n>
+<endif>
+<endif>
+<endif>
+    finally {
+        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <memoize()>
+        <ruleScopeCleanUp()>
+        <finally>
+    }
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+// $ANTLR end <ruleName>
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) {
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+var retval:<returnType()> = new <returnType()>();
+retval.start = input.LT(1);<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+var <a.name>:<a.type> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+var <ruleDescriptor.name>_StartIndex:int = input.index;
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{it |<it>_stack.push(new Object());}; separator="\n">
+<ruleDescriptor.ruleScope:{it |<it.name>_stack.push(new Object());}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{it |<it>_stack.pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{it |<it.name>_stack.pop();}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it |var <it.label.text>:<labelType>=null;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it |var list_<it.label.text>:Array=null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|var <ll.label.text>:RuleReturnScope = null;}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{it |var <it.label.text>:<labelType>=null;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{it |var <it.label.text>:int;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{it |var list_<it.label.text>:Array=null;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <%
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.stop = input.LT(-1);<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( this.state.backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start <ruleName>
+public final function m<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>):void {
+    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    try {
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        var _type:int = <ruleName>;
+        var _channel:int = DEFAULT_TOKEN_CHANNEL;
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        this.state.type = _type;
+        this.state.channel = _channel;
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    }
+    finally {
+        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <ruleScopeCleanUp()>
+        <memoize()>
+    }
+}
+// $ANTLR end <ruleName>
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+public override function mTokens():void {
+    <block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var alt<decisionNumber>:int=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>) {
+    <alts:{a | <altSwitchCase(i, a)>}>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var alt<decisionNumber>:int=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch (alt<decisionNumber>) {
+    <alts:{a | <altSwitchCase(i, a)>}>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var cnt<decisionNumber>:int=0;
+<decls>
+<@preloop()>
+loop<decisionNumber>:
+do {
+    var alt<decisionNumber>:int=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+	<alts:{a | <altSwitchCase(i, a)>}>
+	default :
+	    if ( cnt<decisionNumber> >= 1 ) break loop<decisionNumber>;
+	    <ruleBacktrackFailure()>
+            throw new EarlyExitException(<decisionNumber>, input);
+            <! Need to add support for earlyExitException debug hook !>
+    }
+    cnt<decisionNumber>++;
+} while (true);
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+loop<decisionNumber>:
+do {
+    var alt<decisionNumber>:int=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+	<alts:{a | <altSwitchCase(i, a)>}>
+	default :
+	    break loop<decisionNumber>;
+    }
+} while (true);
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase(altNum, alt) ::= <<
+case <altNum> :
+    <@prealt()>
+    <alt>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+{
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+}
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element(e) ::= <<
+<@prematch()>
+<e.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)><label>=<labelType>(<endif>matchStream(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>)<if(label)>)<endif>; <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRef(token,label,elementIndex,terminalOptions)>
+<listLabel(label, label)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label>==null) list_<label>=new Array();
+list_<label>.push(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+matchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= input.LA(1);<\n>
+<else>
+<label>=<labelType>(input.LT(1));<\n>
+<endif>
+<endif>
+if ( <s> ) {
+    input.consume();
+    <postmatchCode>
+<if(!LEXER)>
+    this.state.errorRecovery=false;
+<endif>
+    <if(backtracking)>this.state.failed=false;<endif>
+}
+else {
+    <ruleBacktrackFailure()>
+    <@mismatchedSetException()>
+<if(LEXER)>
+    throw recover(new MismatchedSetException(null,input));<\n>
+<else>
+    throw new MismatchedSetException(null,input);
+    <! use following code to make it recover inline; remove throw mse;
+    recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+    !>
+<endif>
+}<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(label, label)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex="0") ::= <<
+<if(label)>
+var <label>Start:int = charIndex;
+matchString(<string>); <checkRuleBacktrackFailure()>
+var <label>StartLine<elementIndex>:int = line;
+var <label>StartCharPos<elementIndex>:int = charPositionInLine;
+<label> = CommonToken.createFromStream(input, TokenConstants.INVALID_TOKEN_TYPE, TokenConstants.DEFAULT_CHANNEL, <label>Start, charIndex-1);
+<label>.line = <label>StartLine<elementIndex>;
+<label>.charPositionInLine = <label>StartCharPos<elementIndex>;
+<else>
+matchString(<string>); <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)>
+<label>=<labelType>(input.LT(1));<\n>
+<endif>
+matchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<wildcard(...)>
+<listLabel(label, label)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+matchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(label, elementIndex)>
+<listLabel(label, label)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ *
+ * GMS: Note:  do not use post-decrement operator!  ASC produces bad code for exceptions in this case.
+ *      See: https://bugs.adobe.com/jira/browse/ASC-3625
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+pushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+state._fsp = state._fsp - 1;
+<checkRuleBacktrackFailure()>
+>>
+
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(rule,label,elementIndex,args,scope)>
+<listLabel(label, label)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+var <label>Start<elementIndex>:int = charIndex;
+var <label>StartLine<elementIndex>:int = line;
+var <label>StartCharPos<elementIndex>:int = charPositionInLine;
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = CommonToken.createFromStream(input, TokenConstants.INVALID_TOKEN_TYPE, TokenConstants.DEFAULT_CHANNEL, <label>Start<elementIndex>, charIndex-1);
+<label>.line = <label>StartLine<elementIndex>;
+<label>.charPositionInLine = <label>StartCharPos<elementIndex>;
+<else>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(rule,label,args,elementIndex,scope)>
+<listLabel(label, label)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+var <label>Start<elementIndex>:int = charIndex;
+var <label>StartLine<elementIndex>:int = line;
+var <label>StartCharPos<elementIndex>:int = charPositionInLine;
+match(EOF); <checkRuleBacktrackFailure()>
+var <label>:<labelType> = CommonToken.createFromStream(input, EOF, TokenConstants.DEFAULT_CHANNEL, <label>Start<elementIndex>, charIndex-1);
+<label>.line = <label>StartLine<elementIndex>;
+<label>.charPositionInLine = <label>StartCharPos<elementIndex>;
+<else>
+match(EOF); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "var <recRuleArg()>:int"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
+recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==TokenConstants.DOWN ) {
+    matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+matchStream(input, TokenConstants.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+matchStream(input, TokenConstants.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(pred,description)>) ) {
+    <ruleBacktrackFailure()>
+    throw new FailedPredicateException(input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber>:int = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+else {
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    throw new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+    <! Need to add hook for noViableAltException() !>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber>:int = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber>:int = input.LA(<k>);<\n>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else {
+    alt<decisionNumber>=<eotPredictsAlt>;
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+<edges; separator="\n">
+default:
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    throw new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);<\n>
+    <! Need to add hook for noViableAltException !>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+    <edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+default:
+    alt<decisionNumber>=<eotPredictsAlt>;
+    break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{it |case <it>:}; separator="\n">
+    {
+    <targetState>
+    }
+    break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = dfa<decisionNumber>.predict(input);
+>>
+
+cyclicDFACtor(dfa) ::= <<
+
+dfa<dfa.decisionNumber> = new DFA(this, <dfa.decisionNumber>,
+            "<dfa.description>",
+            DFA<dfa.decisionNumber>_eot, DFA<dfa.decisionNumber>_eof, DFA<dfa.decisionNumber>_min,
+            DFA<dfa.decisionNumber>_max, DFA<dfa.decisionNumber>_accept, DFA<dfa.decisionNumber>_special,
+            DFA<dfa.decisionNumber>_transition<if(dfa.specialStateSTs)>, DFA<dfa.decisionNumber>_specialStateTransition<endif>);
+
+>>
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+
+private const DFA<dfa.decisionNumber>_eot:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedEOT; wrap="\"+\n    \"">");
+private const DFA<dfa.decisionNumber>_eof:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedEOF; wrap="\"+\n    \"">");
+private const DFA<dfa.decisionNumber>_min:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedMin; wrap="\"+\n    \"">", true);
+private const DFA<dfa.decisionNumber>_max:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedMax; wrap="\"+\n    \"">", true);
+private const DFA<dfa.decisionNumber>_accept:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedAccept; wrap="\"+\n    \"">");
+private const DFA<dfa.decisionNumber>_special:Array =
+    DFA.unpackEncodedString("<dfa.javaCompressedSpecial; wrap="\"+\n    \"">");
+private const DFA<dfa.decisionNumber>_transition:Array = [
+        <dfa.javaCompressedTransition:{s|DFA.unpackEncodedString("<s; wrap="\"+\n\"">")}; separator=",\n">
+];
+<if(dfa.specialStateSTs)>
+    private function DFA<dfa.decisionNumber>_specialStateTransition(dfa:DFA, s:int, _input:IntStream):int {
+        <if(LEXER)>
+        var input:IntStream = _input;
+        <endif>
+        <if(PARSER)>
+        var input:TokenStream = TokenStream(_input);
+        <endif>
+        <if(TREE_PARSER)>
+        var input:TreeNodeStream = TreeNodeStream(_input);
+        <endif>
+    	var _s:int = s;
+        switch ( s ) {
+        <dfa.specialStateSTs:{state |
+        case <i0> : <! compressed special state numbers 0..n-1 !>
+            <state>}; separator="\n">
+        }
+<if(backtracking)>
+        if (this.state.backtracking>0) {this.state.failed=true; return -1;}<\n>
+<endif>
+        throw dfa.error(new NoViableAltException(dfa.description, <dfa.decisionNumber>, _s, input));
+    }<\n>
+<endif>
+
+protected var dfa<dfa.decisionNumber>:DFA;  // initialized in constructor
+
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber>:int = input.LA(1);<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+var index<decisionNumber>_<stateNumber>:int = input.index;
+input.rewind();<\n>
+<endif>
+s = -1;
+<edges; separator="\nelse ">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.seek(index<decisionNumber>_<stateNumber>);<\n>
+<endif>
+if ( s>=0 ) return s;
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left>&&<right>)"
+
+orPredicates(operands) ::= "(<operands; separator=\"||\">)"
+
+notPredicate(pred) ::= "!(<evalPredicate(pred,{})>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atomAsInt>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atomAsInt>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
+(LA<decisionNumber>_<stateNumber> >= <lowerAsInt> && LA<decisionNumber>_<stateNumber> \<= <upperAsInt>)
+%>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>) >= <lowerAsInt> && input.LA(<k>) \<= <upperAsInt>)"
+
+setTest(ranges) ::= <<
+<ranges; separator="||">
+>>
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected var <scope.name>_stack:Array = new Array();<\n>
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected var <scope.name>_stack:Array = new Array();<\n>
+<endif>
+>>
+
+returnStructName() ::= "<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope"
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnStructName()>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Generate the Java type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<returnStructName()>
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+delegateName(d) ::= <<
+<if(d.label)><d.label><else>g<d.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+<asTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+var <label.label.text>:<ruleLabelType(referencedRule=label.referencedRule)> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+public static class <returnType()> extends <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope {
+    <scope.attributes:{it |public <it.decl>;}; separator="\n">
+    <@ruleReturnMembers()>
+};
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{it |<it.name>:<it.type>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+ <if(negIndex)>
+ <scope>_stack[<scope>_stack.length-<negIndex>-1].<attr.name>
+ <else>
+ <if(index)>
+ <scope>_stack[<index>].<attr.name>
+ <else>
+ <scope>_stack[<scope>_stack.length-1].<attr.name>
+ <endif>
+ <endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+<scope>_stack[<scope>_stack.length-<negIndex>-1].<attr.name> =<expr>;
+<else>
+<if(index)>
+<scope>_stack[<index>].<attr.name> =<expr>;
+<else>
+<scope>_stack[<scope>_stack.length-1].<attr.name> =<expr>;
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+(<scope>!=null?<scope>.values.<attr.name>:<initValue(attr.type)>)
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.values.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.values.<attr.name> =<expr>;
+<else>
+<attr.name> =<expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=null?<scope>.text:null)"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=null?<scope>.type:0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=null?<scope>.line:0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=null?<scope>.charPositionInLine:0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=null?<scope>.channel:0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=null?<scope>.tokenIndex:0)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?int(<scope>.text):0)"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=null?<labelType>(<scope>.start):null)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=null?<labelType>(<scope>.stop):null)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=null?<ASTLabelType>(<scope>.tree):null)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+(<scope>!=null?(input.tokenStream.toStringWithRange(
+  input.treeAdaptor.getTokenStartIndex(<scope>.start),
+  input.treeAdaptor.getTokenStopIndex(<scope>.start))):null)
+<else>
+(<scope>!=null?input.toStringWithTokenRange(<scope>.start,<scope>.stop):null)
+<endif>
+>>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=null?<scope>.st:null)"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::=
+    "(<scope>!=null?<scope>.type:0)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::=
+    "(<scope>!=null?<scope>.lien:0)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::=
+    "(<scope>!=null?<scope>.charPositionInLine:0)"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::=
+    "(<scope>!=null?<scope>.channel:0)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::=
+    "(<scope>!=null?<scope>.tokenIndex:0)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::=
+    "(<scope>!=null?<scope>.text:null)"
+lexerRuleLabelPropertyRef_int(scope,attr) ::=
+    "(<scope>!=null?int(<scope>.text):0)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "<labelType>(retval.start)"
+rulePropertyRef_stop(scope,attr) ::= "<labelType>(retval.stop)"
+rulePropertyRef_tree(scope,attr) ::= "<ASTLabelType>(retval.tree)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+input.tokenStream.toStringWithRange(
+  input.treeAdaptor.getTokenStartIndex(retval.start),
+  input.treeAdaptor.getTokenStopIndex(retval.start))
+<else>
+input.toStringWithTokenRange(retval.start,input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "text"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(charIndex-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "int(<scope>.text)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <<
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {
+  <action>
+}
+<else>
+<action>
+<endif>
+>>
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+public static const <name>:BitSet = new BitSet([<words64:{it |<it>};separator=", ">]);<\n>
+>>
+
+codeFileExtension() ::= ".as"
+
+true_value() ::= "true"
+false_value() ::= "false"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/AST.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/AST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/C/AST.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/ASTDbg.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/ASTDbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/C/ASTDbg.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/ASTParser.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/ASTParser.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/C/ASTParser.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/ASTTreeParser.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/ASTTreeParser.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/C/ASTTreeParser.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/C/C.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/C.stg
new file mode 100644
index 0000000..63646a7
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/C/C.stg
@@ -0,0 +1,3251 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+ http://www.temporal-wave.com
+ http://www.linkedin.com/in/jimidle
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*
+ * This code generating template and the associated C runtime was produced by:
+ * Jim Idle jimi|hereisanat|idle|dotgoeshere|ws.
+ * If it causes the destruction of the Universe, it will be pretty cool so long as
+ * I am in a different one at the time.
+ */
+cTypeInitMap ::= [
+	"int"		    : "0",              // Integers     start out being 0
+	"long"		    : "0",              // Longs        start out being 0
+	"float"		    : "0.0",           // Floats       start out being 0
+	"double"	    : "0.0",           // Doubles      start out being 0
+	"ANTLR3_BOOLEAN"    : "ANTLR3_FALSE",   // Booleans     start out being Antlr C for false
+	"byte"		    : "0",              // Bytes        start out being 0
+	"short"		    : "0",              // Shorts       start out being 0
+	"char"		    : "0"              // Chars        start out being 0
+]
+
+leadIn(type) ::=
+<<
+/** \file
+ *  This <type> file was generated by $ANTLR version <ANTLRVersion>
+ *
+ *     -  From the grammar source file : <fileName>
+ *     -                            On : <generatedTimestamp>
+<if(LEXER)>
+ *     -                 for the lexer : <name>Lexer
+<endif>
+<if(PARSER)>
+ *     -                for the parser : <name>Parser
+<endif>
+<if(TREE_PARSER)>
+ *     -           for the tree parser : <name>TreeParser
+<endif>
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * C language generator and runtime by Jim Idle, jimi|hereisanat|idle|dotgoeshere|ws.
+ *
+ *
+>>
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope,
+            actions,
+            docComment,
+            recognizer,
+            name,
+            tokens,
+            tokenNames,
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            buildAST,
+            rewriteMode,
+            profile,
+            backtracking,
+            synpreds,
+            memoize,
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+            superClass,
+            literals
+            ) ::=
+<<
+<leadIn("C source")>
+*/
+// [The "BSD license"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+<if(actions.(actionScope).header)>
+
+/* =============================================================================
+ * This is what the grammar programmer asked us to put at the top of every file.
+ */
+<actions.(actionScope).header>
+/* End of Header action.
+ * =============================================================================
+ */
+<endif>
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#include    "<name>.h"
+<actions.(actionScope).postinclude>
+/* ----------------------------------------- */
+
+<docComment>
+
+<if(literals)>
+/** String literals used by <name> that we must do things like MATCHS() with.
+ *  C will normally just lay down 8 bit characters, and you can use L"xxx" to
+ *  get wchar_t, but wchar_t is 16 bits on Windows, which is not UTF32 and so
+ *  we perform this little trick of defining the literals as arrays of UINT32
+ *  and passing in the address of these.
+ */
+<literals:{it | static ANTLR3_UCHAR	lit_<i>[]  = <it>;}; separator="\n">
+
+<endif>
+
+
+
+
+/* MACROS that hide the C interface implementations from the
+ * generated code, which makes it a little more understandable to the human eye.
+ * I am very much against using C pre-processor macros for function calls and bits
+ * of code as you cannot see what is happening when single stepping in debuggers
+ * and so on. The exception (in my book at least) is for generated code, where you are
+ * not maintaining it, but may wish to read and understand it. If you single step it, you know that input()
+ * hides some indirect calls, but is always referring to the input stream. This is
+ * probably more readable than ctx->input->istream->input(snarfle0->blarg) and allows me to rejig
+ * the runtime interfaces without changing the generated code too often, without
+ * confusing the reader of the generated output, who may not wish to know the gory
+ * details of the interface inheritance.
+ */
+
+#define		CTX	ctx
+
+/* Aids in accessing scopes for grammar programmers
+ */
+#undef	SCOPE_TYPE
+#undef	SCOPE_STACK
+#undef	SCOPE_TOP
+#define	SCOPE_TYPE(scope)   p<name>_##scope##_SCOPE
+#define SCOPE_STACK(scope)  p<name>_##scope##Stack
+#define	SCOPE_TOP(scope)    ctx->p<name>_##scope##Top
+#define	SCOPE_SIZE(scope)		ctx->p<name>_##scope##Stack_limit
+#define SCOPE_INSTANCE(scope, i)	(ctx->SCOPE_STACK(scope)->get(ctx->SCOPE_STACK(scope),i))
+
+<if(LEXER)>
+
+/* Macros for accessing things in a lexer
+ */
+#undef	    LEXER
+#undef	    RECOGNIZER
+#undef	    RULEMEMO
+#undef	    GETCHARINDEX
+#undef	    GETLINE
+#undef	    GETCHARPOSITIONINLINE
+#undef	    EMIT
+#undef	    EMITNEW
+#undef	    MATCHC
+#undef	    MATCHS
+#undef	    MATCHRANGE
+#undef	    LTOKEN
+#undef	    HASFAILED
+#undef	    FAILEDFLAG
+#undef	    INPUT
+#undef	    STRSTREAM
+#undef	    LA
+#undef	    HASEXCEPTION
+#undef	    EXCEPTION
+#undef	    CONSTRUCTEX
+#undef	    CONSUME
+#undef	    LRECOVER
+#undef	    MARK
+#undef	    REWIND
+#undef	    REWINDLAST
+#undef	    BACKTRACKING
+#undef		MATCHANY
+#undef		MEMOIZE
+#undef		HAVEPARSEDRULE
+#undef		GETTEXT
+#undef		INDEX
+#undef		SEEK
+#undef		PUSHSTREAM
+#undef		POPSTREAM
+#undef		SETTEXT
+#undef		SETTEXT8
+
+#define	    LEXER					ctx->pLexer
+#define	    RECOGNIZER			    LEXER->rec
+#define		LEXSTATE				RECOGNIZER->state
+#define		TOKSOURCE				LEXSTATE->tokSource
+#define	    GETCHARINDEX()			LEXER->getCharIndex(LEXER)
+#define	    GETLINE()				LEXER->getLine(LEXER)
+#define	    GETTEXT()				LEXER->getText(LEXER)
+#define	    GETCHARPOSITIONINLINE() LEXER->getCharPositionInLine(LEXER)
+#define	    EMIT()					LEXSTATE->type = _type; LEXER->emit(LEXER)
+#define	    EMITNEW(t)				LEXER->emitNew(LEXER, t)
+#define	    MATCHC(c)				LEXER->matchc(LEXER, c)
+#define	    MATCHS(s)				LEXER->matchs(LEXER, s)
+#define	    MATCHRANGE(c1,c2)	    LEXER->matchRange(LEXER, c1, c2)
+#define	    MATCHANY()				LEXER->matchAny(LEXER)
+#define	    LTOKEN  				LEXSTATE->token
+#define	    HASFAILED()				(LEXSTATE->failed == ANTLR3_TRUE)
+#define	    BACKTRACKING			LEXSTATE->backtracking
+#define	    FAILEDFLAG				LEXSTATE->failed
+#define	    INPUT					LEXER->input
+#define	    STRSTREAM				INPUT
+#define		ISTREAM					INPUT->istream
+#define		INDEX()					ISTREAM->index(ISTREAM)
+#define		SEEK(n)					ISTREAM->seek(ISTREAM, n)
+#define	    EOF_TOKEN				&(LEXSTATE->tokSource->eofToken)
+#define	    HASEXCEPTION()			(LEXSTATE->error == ANTLR3_TRUE)
+#define	    EXCEPTION				LEXSTATE->exception
+#define	    CONSTRUCTEX()			RECOGNIZER->exConstruct(RECOGNIZER)
+#define	    LRECOVER()				LEXER->recover(LEXER)
+#define	    MARK()					ISTREAM->mark(ISTREAM)
+#define	    REWIND(m)				ISTREAM->rewind(ISTREAM, m)
+#define	    REWINDLAST()			ISTREAM->rewindLast(ISTREAM)
+#define		MEMOIZE(ri,si)			RECOGNIZER->memoize(RECOGNIZER, ri, si)
+#define		HAVEPARSEDRULE(r)		RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
+#define		PUSHSTREAM(str)			LEXER->pushCharStream(LEXER, str)
+#define		POPSTREAM()				LEXER->popCharStream(LEXER)
+#define		SETTEXT(str)			LEXSTATE->text = str
+#define		SKIP()					LEXSTATE->token = &(TOKSOURCE->skipToken)
+#define		USER1					LEXSTATE->user1
+#define		USER2					LEXSTATE->user2
+#define		USER3					LEXSTATE->user3
+#define		CUSTOM					LEXSTATE->custom
+#define		RULEMEMO				LEXSTATE->ruleMemo
+#define		DBG						RECOGNIZER->debugger
+
+/* If we have been told we can rely on the standard 8 bit or UTF16 input
+ * stream, then we can define our macros to use the direct pointers
+ * in the input object, which is much faster than indirect calls. This
+ * is really only significant to lexers with a lot of fragment rules (which
+ * do not place LA(1) in a temporary at the moment) and even then
+ * only if there is a lot of input (order of say 1M or so).
+ */
+#if	defined(ANTLR3_INLINE_INPUT_8BIT) || defined(ANTLR3_INLINE_INPUT_UTF16)
+
+# ifdef	ANTLR3_INLINE_INPUT_8BIT
+
+/* 8 bit character set */
+
+#  define	    NEXTCHAR	((pANTLR3_UINT8)(INPUT->nextChar))
+#  define	    DATAP	((pANTLR3_UINT8)(INPUT->data))
+
+# else
+
+#  define	    NEXTCHAR	((pANTLR3_UINT16)(INPUT->nextChar))
+#  define	    DATAP	((pANTLR3_UINT16)(INPUT->data))
+
+# endif
+
+# define	    LA(n) ((NEXTCHAR + n) > (DATAP + INPUT->sizeBuf) ? ANTLR3_CHARSTREAM_EOF : (ANTLR3_UCHAR)(*(NEXTCHAR + n - 1)))
+# define            CONSUME()                                           \\
+{                                                                       \\
+    if        (NEXTCHAR \< (DATAP + INPUT->sizeBuf))                     \\
+    {                                                                   \\
+        INPUT->charPositionInLine++;                                    \\
+        if  ((ANTLR3_UCHAR)(*NEXTCHAR) == INPUT->newlineChar)           \\
+        {                                                               \\
+            INPUT->line++;                                              \\
+            INPUT->charPositionInLine        = 0;                       \\
+            INPUT->currentLine                = (void *)(NEXTCHAR + 1); \\
+        }                                                               \\
+        INPUT->nextChar = (void *)(NEXTCHAR + 1);                       \\
+    }                                                                   \\
+}
+
+#else
+
+// Pick up the input character by calling the input stream implementation.
+//
+#define	    CONSUME()   INPUT->istream->consume(INPUT->istream)
+#define	    LA(n)       INPUT->istream->_LA(INPUT->istream, n)
+
+#endif
+<endif>
+
+<if(PARSER)>
+/* Macros for accessing things in the parser
+ */
+
+#undef	    PARSER
+#undef	    RECOGNIZER
+#undef	    HAVEPARSEDRULE
+#undef		MEMOIZE
+#undef	    INPUT
+#undef	    STRSTREAM
+#undef	    HASEXCEPTION
+#undef	    EXCEPTION
+#undef	    MATCHT
+#undef	    MATCHANYT
+#undef	    FOLLOWSTACK
+#undef	    FOLLOWPUSH
+#undef	    FOLLOWPOP
+#undef	    PRECOVER
+#undef	    PREPORTERROR
+#undef	    LA
+#undef	    LT
+#undef	    CONSTRUCTEX
+#undef	    CONSUME
+#undef	    MARK
+#undef	    REWIND
+#undef	    REWINDLAST
+#undef	    PERRORRECOVERY
+#undef	    HASFAILED
+#undef	    FAILEDFLAG
+#undef	    RECOVERFROMMISMATCHEDSET
+#undef	    RECOVERFROMMISMATCHEDELEMENT
+#undef		INDEX
+#undef      ADAPTOR
+#undef		SEEK
+#undef	    RULEMEMO
+#undef		DBG
+
+#define	    PARSER				ctx->pParser
+#define	    RECOGNIZER				PARSER->rec
+#define	    PSRSTATE				RECOGNIZER->state
+#define	    HAVEPARSEDRULE(r)			RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
+#define	    MEMOIZE(ri,si)			RECOGNIZER->memoize(RECOGNIZER, ri, si)
+#define	    INPUT				PARSER->tstream
+#define	    STRSTREAM				INPUT
+#define	    ISTREAM				INPUT->istream
+#define	    INDEX()				ISTREAM->index(INPUT->istream)
+#define	    HASEXCEPTION()			(PSRSTATE->error == ANTLR3_TRUE)
+#define	    EXCEPTION				PSRSTATE->exception
+#define	    MATCHT(t, fs)			RECOGNIZER->match(RECOGNIZER, t, fs)
+#define	    MATCHANYT()				RECOGNIZER->matchAny(RECOGNIZER)
+#define	    FOLLOWSTACK				PSRSTATE->following
+#ifdef  SKIP_FOLLOW_SETS
+#define	    FOLLOWPUSH(x)
+#define	    FOLLOWPOP()
+#else
+#define	    FOLLOWPUSH(x)			FOLLOWSTACK->push(FOLLOWSTACK, ((void *)(&(x))), NULL)
+#define	    FOLLOWPOP()				FOLLOWSTACK->pop(FOLLOWSTACK)
+#endif
+#define	    PRECOVER()				RECOGNIZER->recover(RECOGNIZER)
+#define	    PREPORTERROR()			RECOGNIZER->reportError(RECOGNIZER)
+#define	    LA(n)				INPUT->istream->_LA(ISTREAM, n)
+#define	    LT(n)				INPUT->_LT(INPUT, n)
+#define	    CONSTRUCTEX()			RECOGNIZER->exConstruct(RECOGNIZER)
+#define	    CONSUME()				ISTREAM->consume(ISTREAM)
+#define	    MARK()				ISTREAM->mark(ISTREAM)
+#define	    REWIND(m)				ISTREAM->rewind(ISTREAM, m)
+#define	    REWINDLAST()			ISTREAM->rewindLast(ISTREAM)
+#define	    SEEK(n)				ISTREAM->seek(ISTREAM, n)
+#define	    PERRORRECOVERY			PSRSTATE->errorRecovery
+#define	    FAILEDFLAG				PSRSTATE->failed
+#define	    HASFAILED()				(FAILEDFLAG == ANTLR3_TRUE)
+#define	    BACKTRACKING			PSRSTATE->backtracking
+#define	    RECOVERFROMMISMATCHEDSET(s)		RECOGNIZER->recoverFromMismatchedSet(RECOGNIZER, s)
+#define	    RECOVERFROMMISMATCHEDELEMENT(e)	RECOGNIZER->recoverFromMismatchedElement(RECOGNIZER, s)
+#define     ADAPTOR                         ctx->adaptor
+#define		RULEMEMO						PSRSTATE->ruleMemo
+#define		DBG								RECOGNIZER->debugger
+
+<endif>
+
+<if(TREE_PARSER)>
+/* Macros for accessing things in the parser
+ */
+
+#undef	    PARSER
+#undef	    RECOGNIZER
+#undef	    HAVEPARSEDRULE
+#undef	    INPUT
+#undef	    STRSTREAM
+#undef	    HASEXCEPTION
+#undef	    EXCEPTION
+#undef	    MATCHT
+#undef	    MATCHANYT
+#undef	    FOLLOWSTACK
+#undef	    FOLLOWPUSH
+#undef	    FOLLOWPOP
+#undef	    PRECOVER
+#undef	    PREPORTERROR
+#undef	    LA
+#undef	    LT
+#undef	    CONSTRUCTEX
+#undef	    CONSUME
+#undef	    MARK
+#undef	    REWIND
+#undef	    REWINDLAST
+#undef	    PERRORRECOVERY
+#undef	    HASFAILED
+#undef	    FAILEDFLAG
+#undef	    RECOVERFROMMISMATCHEDSET
+#undef	    RECOVERFROMMISMATCHEDELEMENT
+#undef	    BACKTRACKING
+#undef      ADAPTOR
+#undef	    RULEMEMO
+#undef		SEEK
+#undef		INDEX
+#undef		DBG
+
+#define	    PARSER							ctx->pTreeParser
+#define	    RECOGNIZER						PARSER->rec
+#define		PSRSTATE						RECOGNIZER->state
+#define	    HAVEPARSEDRULE(r)				RECOGNIZER->alreadyParsedRule(RECOGNIZER, r)
+#define	    INPUT							PARSER->ctnstream
+#define		ISTREAM							INPUT->tnstream->istream
+#define	    STRSTREAM						INPUT->tnstream
+#define	    HASEXCEPTION()					(PSRSTATE->error == ANTLR3_TRUE)
+#define	    EXCEPTION						PSRSTATE->exception
+#define	    MATCHT(t, fs)					RECOGNIZER->match(RECOGNIZER, t, fs)
+#define	    MATCHANYT()						RECOGNIZER->matchAny(RECOGNIZER)
+#define	    FOLLOWSTACK					    PSRSTATE->following
+#define	    FOLLOWPUSH(x)					FOLLOWSTACK->push(FOLLOWSTACK, ((void *)(&(x))), NULL)
+#define	    FOLLOWPOP()						FOLLOWSTACK->pop(FOLLOWSTACK)
+#define	    PRECOVER()						RECOGNIZER->recover(RECOGNIZER)
+#define	    PREPORTERROR()					RECOGNIZER->reportError(RECOGNIZER)
+#define	    LA(n)							ISTREAM->_LA(ISTREAM, n)
+#define	    LT(n)							INPUT->tnstream->_LT(INPUT->tnstream, n)
+#define	    CONSTRUCTEX()					RECOGNIZER->exConstruct(RECOGNIZER)
+#define	    CONSUME()						ISTREAM->consume(ISTREAM)
+#define	    MARK()							ISTREAM->mark(ISTREAM)
+#define	    REWIND(m)						ISTREAM->rewind(ISTREAM, m)
+#define	    REWINDLAST()					ISTREAM->rewindLast(ISTREAM)
+#define	    PERRORRECOVERY					PSRSTATE->errorRecovery
+#define	    FAILEDFLAG						PSRSTATE->failed
+#define	    HASFAILED()						(FAILEDFLAG == ANTLR3_TRUE)
+#define	    BACKTRACKING					PSRSTATE->backtracking
+#define	    RECOVERFROMMISMATCHEDSET(s)		RECOGNIZER->recoverFromMismatchedSet(RECOGNIZER, s)
+#define	    RECOVERFROMMISMATCHEDELEMENT(e)	RECOGNIZER->recoverFromMismatchedElement(RECOGNIZER, s)
+#define     ADAPTOR                         INPUT->adaptor
+#define		RULEMEMO						PSRSTATE->ruleMemo
+#define		SEEK(n)							ISTREAM->seek(ISTREAM, n)
+#define		INDEX()							ISTREAM->index(ISTREAM)
+#define		DBG								RECOGNIZER->debugger
+
+
+<endif>
+
+#define		TOKTEXT(tok, txt)				tok, (pANTLR3_UINT8)txt
+
+/* The 4 tokens defined below may well clash with your own #defines or token types. If so
+ * then for the present you must use different names for your defines as these are hard coded
+ * in the code generator. It would be better not to use such names internally, and maybe
+ * we can change this in a forthcoming release. I deliberately do not #undef these
+ * here as this will at least give you a redefined error somewhere if they clash.
+ */
+#define	    UP	    ANTLR3_TOKEN_UP
+#define	    DOWN    ANTLR3_TOKEN_DOWN
+#define	    EOR	    ANTLR3_TOKEN_EOR
+#define	    INVALID ANTLR3_TOKEN_INVALID
+
+
+/* =============================================================================
+ * Functions to create and destroy scopes. First come the rule scopes, followed
+ * by the global declared scopes.
+ */
+
+<rules: {r |<if(r.ruleDescriptor.ruleScope)>
+<ruleAttributeScopeFuncDecl(scope=r.ruleDescriptor.ruleScope)>
+<ruleAttributeScopeFuncs(scope=r.ruleDescriptor.ruleScope)>
+<endif>}>
+
+<recognizer.scopes:{it | <if(it.isDynamicGlobalScope)>
+<globalAttributeScopeFuncDecl(it)>
+<globalAttributeScopeFuncs(it)>
+<endif>}>
+
+/* ============================================================================= */
+
+/* =============================================================================
+ * Start of recognizer
+ */
+
+<recognizer>
+
+/* End of code
+ * =============================================================================
+ */
+
+>>
+headerFileExtension() ::= ".h"
+
+headerFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope,
+            actions,
+            docComment,
+            recognizer,
+            name,
+            tokens,
+            tokenNames,
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            buildAST,
+            rewriteMode,
+            profile,
+            backtracking,
+            synpreds,
+            memoize,
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+			superClass,
+            literals
+        ) ::=
+<<
+<leadIn("C header")>
+<if(PARSER)>
+ * The parser <mainName()>
+<endif>
+<if(LEXER)>
+ * The lexer <mainName()>
+<endif>
+<if(TREE_PARSER)>
+ * The tree parser <mainName()>
+<endif>
+has the callable functions (rules) shown below,
+ * which will invoke the code for the associated rule in the source grammar
+ * assuming that the input stream is pointing to a token/text stream that could begin
+ * this rule.
+ *
+ * For instance if you call the first (topmost) rule in a parser grammar, you will
+ * get the results of a full parse, but calling a rule half way through the grammar will
+ * allow you to pass part of a full token stream to the parser, such as for syntax checking
+ * in editors and so on.
+ *
+ * The parser entry points are called indirectly (by function pointer to function) via
+ * a parser context typedef p<name>, which is returned from a call to <name>New().
+ *
+<if(LEXER)>
+ * As this is a generated lexer, it is unlikely you will call it 'manually'. However
+ * the methods are provided anyway.
+ *
+<endif>
+ * The methods in p<name> are  as follows:
+ *
+ * <rules:{r | <if(!r.ruleDescriptor.isSynPred)> - <headerReturnType(ruleDescriptor=r.ruleDescriptor,...)>      p<name>-><r.ruleDescriptor.name>(p<name>)<endif>}; separator="\n * ">
+ *
+ * The return type for any particular rule is of course determined by the source
+ * grammar file.
+ */
+// [The "BSD license"]
+// Copyright (c) 2005-2009 Jim Idle, Temporal Wave LLC
+// http://www.temporal-wave.com
+// http://www.linkedin.com/in/jimidle
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef	_<name>_H
+#define _<name>_H
+<actions.(actionScope).preincludes>
+/* =============================================================================
+ * Standard antlr3 C runtime definitions
+ */
+#include    \<antlr3.h>
+
+/* End of standard antlr 3 runtime definitions
+ * =============================================================================
+ */
+<actions.(actionScope).includes>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Forward declare the context typedef so that we can use it before it is
+// properly defined. Delegators and delegates (from import statements) are
+// interdependent and their context structures contain pointers to each other
+// C only allows such things to be declared if you pre-declare the typedef.
+//
+typedef struct <name>_Ctx_struct <name>, * p<name>;
+
+<if(recognizer.grammar.delegates)>
+// Include delegate definition header files
+//
+<recognizer.grammar.delegates: {g|#include	\<<g.recognizerName>.h>}; separator="\n">
+
+<endif>
+
+
+<actions.(actionScope).header>
+
+#ifdef	ANTLR3_WINDOWS
+// Disable: Unreferenced parameter,							- Rules with parameters that are not used
+//          constant conditional,							- ANTLR realizes that a prediction is always true (synpred usually)
+//          initialized but unused variable					- tree rewrite variables declared but not needed
+//          Unreferenced local variable						- lexer rule declares but does not always use _type
+//          potentially unitialized variable used			- retval always returned from a rule
+//			unreferenced local function has been removed	- susually getTokenNames or freeScope, they can go without warnigns
+//
+// These are only really displayed at warning level /W4 but that is the code ideal I am aiming at
+// and the codegen must generate some of these warnings by necessity, apart from 4100, which is
+// usually generated when a parser rule is given a parameter that it does not use. Mostly though
+// this is a matter of orthogonality hence I disable that one.
+//
+#pragma warning( disable : 4100 )
+#pragma warning( disable : 4101 )
+#pragma warning( disable : 4127 )
+#pragma warning( disable : 4189 )
+#pragma warning( disable : 4505 )
+#pragma warning( disable : 4701 )
+#endif
+<if(backtracking)>
+
+/* ========================
+ * BACKTRACKING IS ENABLED
+ * ========================
+ */
+<endif>
+
+<rules:{r |<headerReturnScope(ruleDescriptor=r.ruleDescriptor,...)>}>
+
+<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeDecl(it)><endif>}>
+<rules:{r |<ruleAttributeScopeDecl(scope=r.ruleDescriptor.ruleScope)>}>
+<if(recognizer.grammar.delegators)>
+// Include delegator definition header files
+//
+<recognizer.grammar.delegators: {g|#include	\<<g.recognizerName>.h>}; separator="\n">
+
+<endif>
+
+/** Context tracking structure for <mainName()>
+ */
+struct <name>_Ctx_struct
+{
+    /** Built in ANTLR3 context tracker contains all the generic elements
+     *  required for context tracking.
+     */
+<if(PARSER)>
+    pANTLR3_PARSER   pParser;
+<endif>
+<if(LEXER)>
+    pANTLR3_LEXER    pLexer;
+<endif>
+<if(TREE_PARSER)>
+    pANTLR3_TREE_PARSER	    pTreeParser;
+<endif>
+
+<if(recognizer.grammar.delegates)>
+	<recognizer.grammar.delegates:
+         {g|p<g.recognizerName>	<g:delegateName()>;}; separator="\n">
+<endif>
+<if(recognizer.grammar.delegators)>
+	<recognizer.grammar.delegators:
+         {g|p<g.recognizerName>	<g:delegateName()>;}; separator="\n">
+<endif>
+<scopes:{it | <if(it.isDynamicGlobalScope)>
+    <globalAttributeScopeDef(it)>
+<endif>}; separator="\n\n">
+<rules: {r |<if(r.ruleDescriptor.ruleScope)>
+    <ruleAttributeScopeDef(scope=r.ruleDescriptor.ruleScope)>
+<endif>}>
+
+<if(LEXER)>
+    <rules:{r | <if(!r.ruleDescriptor.isSynPred)><headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*m<r.ruleDescriptor.name>)	(struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);<endif>}; separator="\n">
+<endif>
+<if(!LEXER)>
+    <rules:{r | <headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*<r.ruleDescriptor.name>)	(struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n">
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+    // Delegated rules
+<recognizer.grammar.delegatedRules:{ruleDescriptor|
+    <headerReturnType(ruleDescriptor)> (*<ruleDescriptor.name>)(struct <name>_Ctx_struct * ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n">
+<endif>
+
+    const char * (*getGrammarFileName)();
+    void            (*reset)  (struct <name>_Ctx_struct * ctx);
+    void	    (*free)   (struct <name>_Ctx_struct * ctx);
+    <@members>
+    <@end>
+    <actions.(actionScope).context>
+};
+
+// Function protoypes for the constructor functions that external translation units
+// such as delegators and delegates may wish to call.
+//
+ANTLR3_API p<name> <name>New         (<inputType()> instream<recognizer.grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>);
+ANTLR3_API p<name> <name>NewSSD      (<inputType()> instream, pANTLR3_RECOGNIZER_SHARED_STATE state<recognizer.grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>);
+<if(!recognizer.grammar.grammarIsRoot)>
+extern pANTLR3_UINT8   <recognizer.grammar.composite.rootGrammar.recognizerName>TokenNames[];
+<endif>
+
+
+/** Symbolic definitions of all the tokens that the <grammarType()> will work with.
+ * \{
+ *
+ * Antlr will define EOF, but we can't use that as it it is too common in
+ * in C header files and that would be confusing. There is no way to filter this out at the moment
+ * so we just undef it here for now. That isn't the value we get back from C recognizers
+ * anyway. We are looking for ANTLR3_TOKEN_EOF.
+ */
+#ifdef	EOF
+#undef	EOF
+#endif
+#ifdef	Tokens
+#undef	Tokens
+#endif
+<tokens:{it | #define <it.name>      <it.type>}; separator="\n">
+#ifdef	EOF
+#undef	EOF
+#define	EOF	ANTLR3_TOKEN_EOF
+#endif
+
+#ifndef TOKENSOURCE
+#define TOKENSOURCE(lxr) lxr->pLexer->rec->state->tokSource
+#endif
+
+/* End of token definitions for <name>
+ * =============================================================================
+ */
+/** \} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+/* END - Note:Keep extra line feed to satisfy UNIX systems */
+
+>>
+
+inputType() ::=<<
+<if(LEXER)>
+pANTLR3_INPUT_STREAM
+<endif>
+<if(PARSER)>
+pANTLR3_COMMON_TOKEN_STREAM
+<endif>
+<if(TREE_PARSER)>
+pANTLR3_COMMON_TREE_NODE_STREAM
+<endif>
+>>
+
+grammarType() ::= <<
+<if(PARSER)>
+parser
+<endif>
+<if(LEXER)>
+lexer
+<endif>
+<if(TREE_PARSER)>
+tree parser
+<endif>
+>>
+
+mainName() ::= <<
+<if(PARSER)>
+<name>
+<endif>
+<if(LEXER)>
+<name>
+<endif>
+<if(TREE_PARSER)>
+<name>
+<endif>
+>>
+
+headerReturnScope(ruleDescriptor) ::= "<returnScope(...)>"
+
+headerReturnType(ruleDescriptor) ::= <<
+<if(LEXER)>
+<if(!ruleDescriptor.isSynPred)>
+ void
+<else>
+ <returnType()>
+<endif>
+<else>
+ <returnType()>
+<endif>
+>>
+
+// Produce the lexer output
+//
+lexer(  grammar,
+		name,
+        tokens,
+        scopes,
+        rules,
+        numRules,
+        filterMode,
+        superClass,
+        labelType="pANTLR3_COMMON_TOKEN") ::= <<
+
+<if(filterMode)>
+/* Forward declare implementation function for ANTLR3_TOKEN_SOURCE interface when
+ * this is a filter mode lexer.
+ */
+static pANTLR3_COMMON_TOKEN <name>NextToken   (pANTLR3_TOKEN_SOURCE toksource);
+
+/* Override the normal MEMOIZE and HAVEALREADYPARSED macros as this is a filtering
+ * lexer. In filter mode, the memoizing and backtracking are gated at BACKTRACKING > 1 rather
+ * than just BACKTRACKING. In some cases this might generate code akin to:
+ *   if (BACKTRACKING) if (BACKTRACKING > 1) memoize.
+ * However, I assume that the C compilers/optimizers are smart enough to work this one out
+ * these days - Jim
+ */
+#undef		MEMOIZE
+#define		MEMOIZE(ri,si)			if (BACKTRACKING>1) { RECOGNIZER->memoize(RECOGNIZER, ri, si) }
+#undef		HAVEPARSEDRULE
+#define		HAVEPARSEDRULE(r)		if (BACKTRACKING>1) { RECOGNIZER->alreadyParsedRule(RECOGNIZER, r) }
+<endif>
+
+/* Forward declare the locally static matching functions we have generated and any predicate functions.
+ */
+<rules:{r | static ANTLR3_INLINE <headerReturnType(ruleDescriptor=r.ruleDescriptor)>	<if(!r.ruleDescriptor.isSynPred)>m<endif><r.ruleDescriptor.name>    (p<name> ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n">
+static void	<name>Free(p<name> ctx);
+
+/* =========================================================================
+ * Lexer matching rules end.
+ * =========================================================================
+ */
+
+<scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScope(it)><endif>}>
+
+<actions.lexer.members>
+
+static void
+<name>Free  (p<name> ctx)
+{
+<if(memoize)>
+	if	(RULEMEMO != NULL)
+	{
+		RULEMEMO->free(RULEMEMO);
+		RULEMEMO = NULL;
+	}
+<endif>
+<if(grammar.directDelegates)>
+	// Free the lexers that we delegated to
+	// functions to. NULL the state so we only free it once.
+	//
+	<grammar.directDelegates:
+         {g|ctx-><g:delegateName()>->pLexer->rec->state = NULL;
+         ctx-><g:delegateName()>->free(ctx-><g:delegateName()>);}; separator="\n">
+<endif>
+    LEXER->free(LEXER);
+
+    ANTLR3_FREE(ctx);
+}
+
+static void
+<name>Reset (p<name> ctx)
+{
+    RECOGNIZER->reset(RECOGNIZER);
+}
+
+/** \brief Name of the grammar file that generated this code
+ */
+static const char fileName[] = "<fileName>";
+
+/** \brief Return the name of the grammar file that generated this code.
+ */
+static const char * getGrammarFileName()
+{
+	return fileName;
+}
+
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+
+/** \brief Create a new lexer called <name>
+ *
+ * \param[in]    instream Pointer to an initialized input stream
+ * \return
+ *     - Success p<name> initialized for the lex start
+ *     - Fail NULL
+ */
+ANTLR3_API p<name> <name>New
+(<inputType()> instream<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
+{
+	// See if we can create a new lexer with the standard constructor
+	//
+	return <name>NewSSD(instream, NULL<grammar.delegators:{g|, <g:delegateName()>}>);
+}
+
+/** \brief Create a new lexer called <name>
+ *
+ * \param[in]    instream Pointer to an initialized input stream
+ * \param[state] state Previously created shared recognizer stat
+ * \return
+ *     - Success p<name> initialized for the lex start
+ *     - Fail NULL
+ */
+ANTLR3_API p<name> <name>NewSSD
+(pANTLR3_INPUT_STREAM instream, pANTLR3_RECOGNIZER_SHARED_STATE state<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
+{
+    p<name> ctx; // Context structure we will build and return
+
+    ctx = (p<name>) ANTLR3_CALLOC(1, sizeof(<name>));
+
+    if  (ctx == NULL)
+    {
+        // Failed to allocate memory for lexer context
+        return  NULL;
+    }
+
+    /* -------------------------------------------------------------------
+     * Memory for basic structure is allocated, now to fill in
+     * in base ANTLR3 structures. We initialize the function pointers
+     * for the standard ANTLR3 lexer function set, but upon return
+     * from here, the programmer may set the pointers to provide custom
+     * implementations of each function.
+     *
+     * We don't use the macros defined in <name>.h here so you can get a sense
+     * of what goes where.
+     */
+
+    /* Create a base lexer, using the supplied input stream
+     */
+    ctx->pLexer	= antlr3LexerNewStream(ANTLR3_SIZE_HINT, instream, state);
+
+    /* Check that we allocated the memory correctly
+     */
+    if	(ctx->pLexer == NULL)
+    {
+		ANTLR3_FREE(ctx);
+		return  NULL;
+    }
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+    // Create a LIST for recording rule memos.
+    //
+    ctx->pLexer->rec->ruleMemo    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */
+<endif>
+<endif>
+
+    /* Install the implementation of our <name> interface
+     */
+    <rules:{r | <if(!r.ruleDescriptor.isSynPred)>ctx->m<r.ruleDescriptor.name>	= m<r.ruleDescriptor.name>;<endif>}; separator="\n">
+
+    /** When the nextToken() call is made to this lexer's pANTLR3_TOKEN_SOURCE
+     *  it will call mTokens() in this generated code, and will pass it the ctx
+     * pointer of this lexer, not the context of the base lexer, so store that now.
+     */
+    ctx->pLexer->ctx	    = ctx;
+
+    /**Install the token matching function
+     */
+    ctx->pLexer->mTokens = (void (*) (void *))(mTokens);
+
+    ctx->getGrammarFileName	= getGrammarFileName;
+    ctx->free		= <name>Free;
+    ctx->reset          = <name>Reset;
+
+<if(grammar.directDelegates)>
+	// Initialize the lexers that we are going to delegate some
+	// functions to.
+	//
+	<grammar.directDelegates:
+         {g|ctx-><g:delegateName()> = <g.recognizerName>NewSSD(instream, ctx->pLexer->rec->state, ctx<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+<endif>
+<if(grammar.delegators)>
+	// Install the pointers back to lexers that will delegate us to perform certain functions
+	// for them.
+	//
+	<grammar.delegators:
+         {g|ctx-><g:delegateName()>			= <g:delegateName()>;}; separator="\n">
+<endif>
+<if(filterMode)>
+    /* We have filter mode turned on, so install the filtering nextToken function
+     */
+    ctx->pLexer->rec->state->tokSource->nextToken = <name>NextToken;
+<endif>
+	 <actions.lexer.apifuncs>
+
+    /* Return the newly built lexer to the caller
+     */
+    return  ctx;
+}
+<if(cyclicDFAs)>
+
+/* =========================================================================
+ * DFA tables for the lexer
+ */
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+/* =========================================================================
+ * End of DFA tables for the lexer
+ */
+<endif>
+
+/* =========================================================================
+ * Functions to match the lexer grammar defined tokens from the input stream
+ */
+
+<rules; separator="\n\n">
+
+/* =========================================================================
+ * Lexer matching rules end.
+ * =========================================================================
+ */
+<if(synpreds)>
+
+/* =========================================================================
+ * Lexer syntactic predicates
+ */
+<synpreds:{p | <lexerSynpred(predname=p)>}>
+/* =========================================================================
+ * Lexer syntactic predicates end.
+ * =========================================================================
+ */
+<endif>
+
+/* End of Lexer code
+ * ================================================
+ * ================================================
+ */
+
+>>
+
+
+filteringNextToken() ::= <<
+/** An override of the lexer's nextToken() method that backtracks over mTokens() looking
+ *  for matches in lexer filterMode.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  BACKTRACKING needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at BACKTRACKING==1.
+ */
+static pANTLR3_COMMON_TOKEN
+<name>NextToken(pANTLR3_TOKEN_SOURCE toksource)
+{
+    pANTLR3_LEXER   lexer;
+	pANTLR3_RECOGNIZER_SHARED_STATE state;
+
+    lexer   = (pANTLR3_LEXER)(toksource->super);
+    state	= lexer->rec->state;
+
+    /* Get rid of any previous token (token factory takes care of
+     * any deallocation when this token is finally used up.
+     */
+    state		->token	    = NULL;
+    state		->error	    = ANTLR3_FALSE;	    /* Start out without an exception	*/
+    state		->failed    = ANTLR3_FALSE;
+
+    /* Record the start of the token in our input stream.
+     */
+    state->tokenStartCharIndex			= lexer->input->istream->index(lexer->input->istream);
+    state->tokenStartCharPositionInLine	= lexer->input->getCharPositionInLine(lexer->input);
+    state->tokenStartLine				= lexer->input->getLine(lexer->input);
+    state->text							= NULL;
+
+    /* Now call the matching rules and see if we can generate a new token
+     */
+    for	(;;)
+    {
+		if  (lexer->input->istream->_LA(lexer->input->istream, 1) == ANTLR3_CHARSTREAM_EOF)
+		{
+			/* Reached the end of the stream, nothing more to do.
+			 */
+			pANTLR3_COMMON_TOKEN    teof = &(toksource->eofToken);
+
+			teof->setStartIndex (teof, lexer->getCharIndex(lexer));
+			teof->setStopIndex  (teof, lexer->getCharIndex(lexer));
+			teof->setLine		(teof, lexer->getLine(lexer));
+			return  teof;
+		}
+
+		state->token		= NULL;
+		state->error		= ANTLR3_FALSE;	    /* Start out without an exception	*/
+
+		{
+			ANTLR3_MARKER   m;
+
+			m						= lexer->input->istream->mark(lexer->input->istream);
+			state->backtracking		= 1;				/* No exceptions */
+			state->failed			= ANTLR3_FALSE;
+
+			/* Call the generated lexer, see if it can get a new token together.
+			 */
+			lexer->mTokens(lexer->ctx);
+    		state->backtracking	= 0;
+
+    		<! mTokens backtracks with synpred at BACKTRACKING==2
+				and we set the synpredgate to allow actions at level 1. !>
+
+			if	(state->failed == ANTLR3_TRUE)
+			{
+				lexer->input->istream->rewind(lexer->input->istream, m);
+				lexer->input->istream->consume(lexer->input->istream); <! advance one char and try again !>
+			}
+			else
+			{
+				lexer->emit(lexer);					/* Assemble the token and emit it to the stream */
+				return	state->token;
+			}
+		}
+    }
+}
+>>
+
+actionGate() ::= "BACKTRACKING==0"
+
+filteringActionGate() ::= "BACKTRACKING==1"
+
+/** How to generate a parser */
+genericParser(  grammar,
+				name,
+                scopes,
+                tokens,
+                tokenNames,
+                rules,
+                numRules,
+                bitsets,
+                inputStreamType,
+                superClass,
+                labelType,
+				members,
+				rewriteElementType, filterMode,
+                ASTLabelType="pANTLR3_BASE_TREE"
+              ) ::= <<
+
+
+<if(grammar.grammarIsRoot)>
+/** \brief Table of all token names in symbolic order, mainly used for
+ *         error reporting.
+ */
+pANTLR3_UINT8   <name>TokenNames[<length(tokenNames)>+4]
+     = {
+        (pANTLR3_UINT8) "\<invalid>",       /* String to print to indicate an invalid token */
+        (pANTLR3_UINT8) "\<EOR>",
+        (pANTLR3_UINT8) "\<DOWN>",
+        (pANTLR3_UINT8) "\<UP>",
+        <tokenNames:{it |(pANTLR3_UINT8) <it>}; separator=",\n">
+       };
+<endif>
+
+    <@members>
+
+    <@end>
+<rules:{r |<ruleAttributeScopeFuncMacro(scope=r.ruleDescriptor.ruleScope)>}>
+<scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScopeFuncMacro(it)><endif>}>
+
+// Forward declare the locally static matching functions we have generated.
+//
+<rules:{r | static <headerReturnType(ruleDescriptor=r.ruleDescriptor)>	<r.ruleDescriptor.name>    (p<name> ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n">
+static void	<name>Free(p<name> ctx);
+static void     <name>Reset (p<name> ctx);
+
+<if(!LEXER)>
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+<if(recognizer.grammar.delegatedRules)>
+// Delegated rules
+//
+<recognizer.grammar.delegatedRules:{ruleDescriptor|static <headerReturnType(ruleDescriptor)> <ruleDescriptor.name>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n">
+
+<endif>
+<endif>
+
+/* For use in tree output where we are accumulating rule labels via label += ruleRef
+ * we need a function that knows how to free a return scope when the list is destroyed.
+ * We cannot just use ANTLR3_FREE because in debug tracking mode, this is a macro.
+ */
+static	void ANTLR3_CDECL freeScope(void * scope)
+{
+    ANTLR3_FREE(scope);
+}
+
+/** \brief Name of the grammar file that generated this code
+ */
+static const char fileName[] = "<fileName>";
+
+/** \brief Return the name of the grammar file that generated this code.
+ */
+static const char * getGrammarFileName()
+{
+	return fileName;
+}
+/** \brief Create a new <name> parser and return a context for it.
+ *
+ * \param[in] instream Pointer to an input stream interface.
+ *
+ * \return Pointer to new parser context upon success.
+ */
+ANTLR3_API p<name>
+<name>New   (<inputStreamType> instream<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
+{
+	// See if we can create a new parser with the standard constructor
+	//
+	return <name>NewSSD(instream, NULL<grammar.delegators:{g|, <g:delegateName()>}>);
+}
+
+/** \brief Create a new <name> parser and return a context for it.
+ *
+ * \param[in] instream Pointer to an input stream interface.
+ *
+ * \return Pointer to new parser context upon success.
+ */
+ANTLR3_API p<name>
+<name>NewSSD   (<inputStreamType> instream, pANTLR3_RECOGNIZER_SHARED_STATE state<grammar.delegators:{g|, p<g.recognizerName> <g:delegateName()>}>)
+{
+    p<name> ctx;	    /* Context structure we will build and return   */
+
+    ctx	= (p<name>) ANTLR3_CALLOC(1, sizeof(<name>));
+
+    if	(ctx == NULL)
+    {
+		// Failed to allocate memory for parser context
+		//
+        return  NULL;
+    }
+
+    /* -------------------------------------------------------------------
+     * Memory for basic structure is allocated, now to fill in
+     * the base ANTLR3 structures. We initialize the function pointers
+     * for the standard ANTLR3 parser function set, but upon return
+     * from here, the programmer may set the pointers to provide custom
+     * implementations of each function.
+     *
+     * We don't use the macros defined in <name>.h here, in order that you can get a sense
+     * of what goes where.
+     */
+
+<if(PARSER)>
+    /* Create a base parser/recognizer, using the supplied token stream
+     */
+    ctx->pParser	    = antlr3ParserNewStream(ANTLR3_SIZE_HINT, instream->tstream, state);
+<endif>
+<if(TREE_PARSER)>
+    /* Create a base Tree parser/recognizer, using the supplied tree node stream
+     */
+    ctx->pTreeParser		= antlr3TreeParserNewStream(ANTLR3_SIZE_HINT, instream, state);
+<endif>
+
+    /* Install the implementation of our <name> interface
+     */
+    <rules:{r | ctx-><r.ruleDescriptor.name>	= <r.ruleDescriptor.name>;}; separator="\n">
+<if(grammar.delegatedRules)>
+	// Install the delegated methods so that they appear to be a part of this
+	// parser
+	//
+    <grammar.delegatedRules:{ruleDescriptor | ctx-><ruleDescriptor.name>	= <ruleDescriptor.name>;}; separator="\n">
+<endif>
+
+    ctx->free			= <name>Free;
+    ctx->reset			= <name>Reset;
+    ctx->getGrammarFileName	= getGrammarFileName;
+
+    /* Install the scope pushing methods.
+     */
+    <rules: {r |<if(r.ruleDescriptor.ruleScope)>
+<ruleAttributeScope(scope=r.ruleDescriptor.ruleScope)><\n>
+<endif>}>
+    <recognizer.scopes:{it |<if(it.isDynamicGlobalScope)>
+<globalAttributeScope(it)><\n>
+<endif>}>
+    <@apifuncs>
+
+    <@end>
+<if(grammar.directDelegates)>
+	// Initialize the parsers that we are going to delegate some
+	// functions to.
+	//
+	<grammar.directDelegates:
+         {g|ctx-><g:delegateName()> = <g.recognizerName>NewSSD(instream, PSRSTATE, ctx<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+<endif>
+<if(grammar.delegators)>
+	// Install the pointers back to parsers that will delegate us to perform certain functions
+	// for them.
+	//
+	<grammar.delegators:
+         {g|ctx-><g:delegateName()>			= <g:delegateName()>;}; separator="\n">
+<endif>
+    <actions.parser.apifuncs>
+    <actions.treeparser.apifuncs>
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+    /* Create a LIST for recording rule memos.
+     */
+     RULEMEMO    = antlr3IntTrieNew(15);	/* 16 bit depth is enough for 32768 rules! */<\n>
+<endif>
+<endif>
+    /* Install the token table
+     */
+    PSRSTATE->tokenNames   = <grammar.composite.rootGrammar.recognizerName>TokenNames;
+
+    <@debugStuff()>
+
+    /* Return the newly built parser to the caller
+     */
+    return  ctx;
+}
+
+static void
+<name>Reset (p<name> ctx)
+{
+    RECOGNIZER->reset(RECOGNIZER);
+}
+
+/** Free the parser resources
+ */
+ static void
+ <name>Free(p<name> ctx)
+ {
+    /* Free any scope memory
+     */
+    <rules: {r |<if(r.ruleDescriptor.ruleScope)><ruleAttributeScopeFree(scope=r.ruleDescriptor.ruleScope)><\n><endif>}>
+    <recognizer.scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScopeFree(it)><\n><endif>}>
+
+    <@cleanup>
+    <@end>
+<if(grammar.directDelegates)>
+	// Free the parsers that we delegated to
+	// functions to.NULL the state so we only free it once.
+	//
+	<grammar.directDelegates:
+         {g| ctx-><g:delegateName()>-><if(TREE_PARSER)>pTreeParser<else>pParser<endif>->rec->state = NULL;
+         ctx-><g:delegateName()>->free(ctx-><g:delegateName()>);}; separator="\n">
+<endif>
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+	if	(RULEMEMO != NULL)
+	{
+		RULEMEMO->free(RULEMEMO);
+		RULEMEMO = NULL;
+	}
+<endif>
+<endif>
+	// Free this parser
+	//
+<if(TREE_PARSER)>
+    ctx->pTreeParser->free(ctx->pTreeParser);<\n>
+<else>
+    ctx->pParser->free(ctx->pParser);<\n>
+<endif>
+
+    ANTLR3_FREE(ctx);
+
+    /* Everything is released, so we can return
+     */
+    return;
+ }
+
+/** Return token names used by this <grammarType()>
+ *
+ * The returned pointer is used as an index into the token names table (using the token
+ * number as the index).
+ *
+ * \return Pointer to first char * in the table.
+ */
+static pANTLR3_UINT8    *getTokenNames()
+{
+        return <grammar.composite.rootGrammar.recognizerName>TokenNames;
+}
+
+    <members>
+
+/* Declare the bitsets
+ */
+<bitsets:{it | <bitsetDeclare(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits)>}>
+
+
+<if(cyclicDFAs)>
+
+/* =========================================================================
+ * DFA tables for the parser
+ */
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+/* =========================================================================
+ * End of DFA tables for the parser
+ */
+<endif>
+
+/* ==============================================
+ * Parsing rules
+ */
+<rules; separator="\n\n">
+<if(grammar.delegatedRules)>
+	// Delegated methods that appear to be a part of this
+	// parser
+	//
+<grammar.delegatedRules:{ruleDescriptor|
+    <returnType()> <ruleDescriptor.name>(p<name> ctx<if(ruleDescriptor.parameterScope.attributes)>, <endif><ruleDescriptor.parameterScope:parameterScope()>)
+    \{
+        <if(ruleDescriptor.hasReturnValue)>return <endif>ctx-><ruleDescriptor.grammar:delegateName()>-><ruleDescriptor.name>(ctx-><ruleDescriptor.grammar:delegateName()><if(ruleDescriptor.parameterScope.attributes)>, <endif><ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">);
+	\}}; separator="\n">
+
+<endif>
+/* End of parsing rules
+ * ==============================================
+ */
+
+/* ==============================================
+ * Syntactic predicates
+ */
+<synpreds:{p | <synpred(predname=p)>}>
+/* End of syntactic predicates
+ * ==============================================
+ */
+
+
+
+
+
+>>
+
+parser(	grammar,
+		name,
+		scopes,
+		tokens,
+		tokenNames,
+		rules,
+		numRules,
+		bitsets,
+		ASTLabelType,
+		superClass="Parser",
+		labelType="pANTLR3_COMMON_TOKEN",
+		members={<actions.parser.members>}
+		) ::= <<
+<genericParser(inputStreamType="pANTLR3_COMMON_TOKEN_STREAM", rewriteElementType="TOKEN", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(	grammar,
+			name,
+			scopes,
+			tokens,
+			tokenNames,
+			globalAction,
+			rules,
+			numRules,
+			bitsets,
+			filterMode,
+			labelType={<ASTLabelType>},
+			ASTLabelType="pANTLR3_BASE_TREE",
+			superClass="TreeParser",
+			members={<actions.treeparser.members>}
+			) ::= <<
+<genericParser(inputStreamType="pANTLR3_COMMON_TREE_NODE_STREAM", rewriteElementType="NODE", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start <ruleName>
+static void <ruleName>_fragment(p<name> ctx <ruleDescriptor.parameterScope:parameterScope()>)
+{
+	<ruleLabelDefs()>
+	<ruleLabelInitializations()>
+<if(trace)>
+    ANTLR3_PRINTF("enter <ruleName> %d failed = %d, backtracking = %d\\n",LT(1),failed,BACKTRACKING);
+    <block>
+    ANTLR3_PRINTF("exit <ruleName> %d, failed = %d, backtracking = %d\\n",LT(1),failed,BACKTRACKING);
+
+<else>
+    <block>
+<endif>
+<ruleCleanUp()>
+}
+// $ANTLR end <ruleName>
+>>
+
+synpred(predname) ::= <<
+static ANTLR3_BOOLEAN <predname>(p<name> ctx)
+{
+    ANTLR3_MARKER   start;
+    ANTLR3_BOOLEAN  success;
+
+    BACKTRACKING++;
+    <@start()>
+    start	= MARK();
+    <predname>_fragment(ctx);	    // can never throw exception
+    success	= !(FAILEDFLAG);
+    REWIND(start);
+    <@stop()>
+    BACKTRACKING--;
+    FAILEDFLAG	= ANTLR3_FALSE;
+    return success;
+}<\n>
+>>
+
+lexerSynpred(predname) ::= <<
+<synpred(predname)>
+>>
+
+ruleMemoization(rname) ::= <<
+<if(memoize)>
+if ( (BACKTRACKING>0) && (HAVEPARSEDRULE(<ruleDescriptor.index>)) )
+{
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!ruleDescriptor.isSynPred)>
+	retval.start = 0;<\n>
+<endif>
+<endif>
+    <(ruleDescriptor.actions.after):execAfter()>
+    <finalCode(finalBlock=finally)>
+<if(!ruleDescriptor.isSynPred)>
+    <scopeClean()><\n>
+<endif>
+    return <ruleReturnValue()>;
+}
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+if  (HASEXCEPTION())
+{
+    goto rule<ruleDescriptor.name>Ex;
+}
+<if(backtracking)>
+if (HASFAILED())
+{
+    <scopeClean()>
+    <@debugClean()>
+    return <ruleReturnValue()>;
+}
+<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>
+if (BACKTRACKING>0)
+{
+    FAILEDFLAG = <true_value()>;
+    <scopeClean()>
+    return <ruleReturnValue()>;
+}
+<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+/**
+ * $ANTLR start <ruleName>
+ * <fileName>:<description>
+ */
+static <returnType()>
+<ruleName>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>)
+{
+    <if(trace)>ANTLR3_PRINTF("enter <ruleName> %s failed=%d, backtracking=%d\n", LT(1), BACKTRACKING);<endif>
+    <ruleDeclarations()>
+    <ruleDescriptor.actions.declarations>
+    <ruleLabelDefs()>
+    <ruleInitializations()>
+    <ruleDescriptor.actions.init>
+    <ruleMemoization(rname=ruleName)>
+    <ruleLabelInitializations()>
+    <@preamble()>
+    {
+        <block>
+    }
+
+    <ruleCleanUp()>
+<if(exceptions)>
+    if	(HASEXCEPTION())
+    {
+	<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+    }
+    else
+    {
+	<(ruleDescriptor.actions.after):execAfter()>
+    }
+<else>
+    <if(!emptyRule)>
+        <if(actions.(actionScope).rulecatch)>
+            <actions.(actionScope).rulecatch>
+        <else>
+            if (HASEXCEPTION())
+            {
+                PREPORTERROR();
+                PRECOVER();
+                <@setErrorReturnValue()>
+            }
+            <if(ruleDescriptor.actions.after)>
+            else
+            {
+                <(ruleDescriptor.actions.after):execAfter()>
+            }<\n>
+            <endif>
+        <endif>
+    <endif>
+<endif>
+
+    <if(trace)>ANTLR3_PRINTF("exit <ruleName> %d failed=%s backtracking=%s\n", LT(1), failed, BACKTRACKING);<endif>
+    <memoize()>
+<if(finally)>
+    <finalCode(finalBlock=finally)>
+<endif>
+    <scopeClean()>
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+/* $ANTLR end <ruleName> */
+>>
+
+finalCode(finalBlock) ::= <<
+{
+    <finalBlock>
+}
+
+>>
+
+catch(decl,action) ::= <<
+/* catch(decl,action)
+ */
+{
+    <e.action>
+}
+
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType()> retval;<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name>;
+}>
+<endif>
+<if(memoize)>
+ANTLR3_MARKER <ruleDescriptor.name>_StartIndex;
+<endif>
+>>
+
+ruleInitializations() ::= <<
+/* Initialize rule variables
+ */
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.returnScope.attributes:{ a |
+<if(a.initValue)>retval.<a.name> = <a.initValue>;<endif>
+}>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<if(a.initValue)><a.name> = <a.initValue>;<endif>
+}>
+<endif>
+<if(memoize)>
+<ruleDescriptor.name>_StartIndex = INDEX();<\n>
+<endif>
+<ruleDescriptor.useScopes:{it |<scopeTop(it)> = <scopePush(it)>;}; separator="\n">
+<ruleDescriptor.ruleScope:{it |<scopeTop(it.name)> = <scopePush(it.name)>;}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{it |<labelType>    <it.label.text>;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{it |pANTLR3_VECTOR    list_<it.label.text>;}; separator="\n"
+>
+<[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
+    :ruleLabelDef(); separator="\n"
+>
+>>
+
+ruleLabelInitializations() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{it |<it.label.text>       = NULL;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{it |list_<it.label.text>     = NULL;}; separator="\n"
+>
+<[ruleDescriptor.ruleLabels,ruleDescriptor.ruleListLabels]
+    :ruleLabelInitVal(); separator="\n"
+>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!ruleDescriptor.isSynPred)>
+retval.start = LT(1); retval.stop = retval.start;<\n>
+<endif>
+<endif>
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{it |<labelType> <it.label.text>;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{it |ANTLR3_UINT32 <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{it |pANTLR3_INT_TRIE list_<it.label.text>;}; separator="\n"
+>
+>>
+
+lexerRuleLabelInit() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{it |<it.label.text> = NULL;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{it |list_<it.label.text> = antlr3IntTrieNew(31);}; separator="\n"
+>
+>>
+
+lexerRuleLabelFree() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{it |<it.label.text> = NULL;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{it |list_<it.label.text>->free(list_<it.label.text>);}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <%
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+%>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( BACKTRACKING>0 ) { MEMOIZE(<ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+
+// This is where rules clean up and exit
+//
+goto rule<ruleDescriptor.name>Ex; /* Prevent compiler warnings */
+rule<ruleDescriptor.name>Ex: ;
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+<if(!ruleDescriptor.isSynPred)>
+retval.stop = LT(-1);<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+scopeClean() ::= <<
+<ruleDescriptor.useScopes:{it |<scopePop(it)>}; separator="\n">
+<ruleDescriptor.ruleScope:{it |<scopePop(it.name)>}; separator="\n">
+
+>>
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules, which do not produce tokens.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+//   Comes from: <block.description>
+/** \brief Lexer rule generated by ANTLR3
+ *
+ * $ANTLR start <ruleName>
+ *
+ * Looks to match the characters the constitute the token <ruleName>
+ * from the attached input stream.
+ *
+ *
+ * \remark
+ *  - lexer->error == ANTLR3_TRUE if an exception was thrown.
+ */
+static ANTLR3_INLINE
+void m<ruleName>(p<name> ctx<if(ruleDescriptor.parameterScope)>, <endif><ruleDescriptor.parameterScope:parameterScope()>)
+{
+	ANTLR3_UINT32	_type;
+    <ruleDeclarations()>
+    <ruleDescriptor.actions.declarations>
+    <lexerRuleLabelDefs()>
+    <if(trace)>System.out.println("enter <ruleName> '"+(char)LA(1)+"' line="+GETLINE()+":"+GETCHARPOSITIONINLINE()+" failed="+failed+" backtracking="+BACKTRACKING);<endif>
+
+<if(nakedBlock)>
+    <ruleMemoization(rname=ruleName)>
+    <lexerRuleLabelInit()>
+    <ruleDescriptor.actions.init>
+
+    <block><\n>
+<else>
+    <ruleMemoization(rname=ruleName)>
+    <lexerRuleLabelInit()>
+    _type	    = <ruleName>;
+
+    <ruleDescriptor.actions.init>
+
+    <block>
+	LEXSTATE->type = _type;
+<endif>
+    <if(trace)> ANTLR3_FPRINTF(stderr, "exit <ruleName> '%c' line=%d:%d failed = %d, backtracking =%d\n",LA(1),GETLINE(),GETCHARPOSITIONINLINE(),failed,BACKTRACKING);<endif>
+    <ruleCleanUp()>
+    <lexerRuleLabelFree()>
+    <(ruleDescriptor.actions.after):execAfter()>
+    <memoize>
+}
+// $ANTLR end <ruleName>
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+/** This is the entry point in to the lexer from an object that
+ *  wants to generate the next token, such as a pCOMMON_TOKEN_STREAM
+ */
+static void
+mTokens(p<name> ctx)
+{
+    <block><\n>
+
+    goto ruleTokensEx; /* Prevent compiler warnings */
+ruleTokensEx: ;
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+
+// <fileName>:<description>
+{
+    int alt<decisionNumber>=<maxAlt>;
+    <decls>
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    <@prebranch()>
+    switch (alt<decisionNumber>)
+    {
+	<alts:{a | <altSwitchCase(i,a)>}>
+    }
+    <@postbranch()>
+}
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+{
+    //  <fileName>:<description>
+
+    ANTLR3_UINT32 alt<decisionNumber>;
+
+    alt<decisionNumber>=<maxAlt>;
+
+    <decls>
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>)
+    {
+	<alts:{a | <altSwitchCase(i,a)>}>
+    }
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+{
+    int cnt<decisionNumber>=0;
+    <decls>
+    <@preloop()>
+
+    for (;;)
+    {
+        int alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	<decision>
+	<@postdecision()>
+	switch (alt<decisionNumber>)
+	{
+	    <alts:{a | <altSwitchCase(i,a)>}>
+	    default:
+
+		if ( cnt<decisionNumber> >= 1 )
+		{
+		    goto loop<decisionNumber>;
+		}
+		<ruleBacktrackFailure()>
+		<earlyExitEx()>
+		<@earlyExitException()>
+		goto rule<ruleDescriptor.name>Ex;
+	}
+	cnt<decisionNumber>++;
+    }
+    loop<decisionNumber>: ;	/* Jump to here if this rule does not match */
+    <@postloop()>
+}
+>>
+
+earlyExitEx() ::= <<
+/* mismatchedSetEx()
+ */
+CONSTRUCTEX();
+EXCEPTION->type = ANTLR3_EARLY_EXIT_EXCEPTION;
+EXCEPTION->name = (void *)ANTLR3_EARLY_EXIT_NAME;
+<\n>
+>>
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+
+// <fileName>:<description>
+<decls>
+
+<@preloop()>
+for (;;)
+{
+    int alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>)
+    {
+	<alts:{a | <altSwitchCase(i,a)>}>
+	default:
+	    goto loop<decisionNumber>;	/* break out of the loop */
+	    break;
+    }
+}
+loop<decisionNumber>: ; /* Jump out to here if this rule does not match */
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by antlr before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase(altNum,alt) ::= <<
+case <altNum>:
+    <@prealt()>
+    <alt>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+{
+    <@declarations()>
+    <@initializations()>
+    <elements:element()>
+    <rew>
+    <@cleanup()>
+}
+>>
+
+// E L E M E N T S
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+/** Dump the elements one per line */
+element(e) ::= <<
+<@prematch()>
+<e.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)><label> = (<labelType>)<endif> MATCHT(<token>, &FOLLOW_<token>_in_<ruleName><elementIndex>);
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label> == NULL)
+{
+    list_<label>=ctx->vectors->newVector(ctx->vectors);
+}
+list_<label>->add(list_<label>, <elem>, NULL);
+>>
+
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = LA(1);<\n>
+<endif>
+MATCHC(<char>);
+<checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = LA(1);<\n>
+<endif>
+MATCHRANGE(<a>, <b>);
+<checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= LA(1);<\n>
+<else>
+<label>=(<labelType>)LT(1);<\n>
+<endif>
+<endif>
+if ( <s> )
+{
+    CONSUME();
+    <postmatchCode>
+<if(!LEXER)>
+    PERRORRECOVERY=ANTLR3_FALSE;
+<endif>
+    <if(backtracking)>FAILEDFLAG=ANTLR3_FALSE;<\n><endif>
+}
+else
+{
+    <ruleBacktrackFailure()>
+    <mismatchedSetEx()>
+    <@mismatchedSetException()>
+<if(LEXER)>
+    LRECOVER();
+<else>
+    RECOVERFROMMISMATCHEDSET(&FOLLOW_set_in_<ruleName><elementIndex>);
+<endif>
+    goto rule<ruleDescriptor.name>Ex;
+}<\n>
+>>
+
+mismatchedSetEx() ::= <<
+CONSTRUCTEX();
+EXCEPTION->type         = ANTLR3_MISMATCHED_SET_EXCEPTION;
+EXCEPTION->name         = (void *)ANTLR3_MISMATCHED_SET_NAME;
+<if(PARSER)>
+EXCEPTION->expectingSet = &FOLLOW_set_in_<ruleName><elementIndex>;
+<endif>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex) ::= <<
+<if(label)>
+<label>Start = GETCHARINDEX();
+MATCHS(<string>);
+<checkRuleBacktrackFailure()>
+<label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory);
+<label>->setType(<label>, ANTLR3_TOKEN_INVALID);
+<label>->setStartIndex(<label>, <label>Start);
+<label>->setStopIndex(<label>, GETCHARINDEX()-1);
+<label>->input = INPUT->tnstream->istream;
+<else>
+MATCHS(<string>);
+<checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)>
+<label>=(<labelType>)LT(1);<\n>
+<endif>
+MATCHANYT();
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = LA(1);<\n>
+<endif>
+MATCHANY();
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values. The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+FOLLOWPUSH(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif><if(scope)>ctx-><scope:delegateName()>-><endif><rule.name>(ctx<if(scope)>-><scope:delegateName()><endif><if(args)>, <args; separator=", "><endif>);<\n>
+FOLLOWPOP();
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+/* <description> */
+<if(label)>
+{
+    ANTLR3_MARKER <label>Start<elementIndex> = GETCHARINDEX();
+    <if(scope)>ctx-><scope:delegateName()>-><endif>m<rule.name>(ctx<if(scope)>-><scope:delegateName()><endif> <if(args)>, <endif><args; separator=", ">);
+    <checkRuleBacktrackFailure()>
+    <label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory);
+    <label>->setType(<label>, ANTLR3_TOKEN_INVALID);
+    <label>->setStartIndex(<label>, <label>Start<elementIndex>);
+    <label>->setStopIndex(<label>, GETCHARINDEX()-1);
+    <label>->input = INPUT;
+}
+<else>
+<if(scope)>ctx-><scope:delegateName()>-><endif>m<rule.name>(ctx<if(scope)>-><scope:delegateName()><endif> <if(args)>, <endif><args; separator=", ">);
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+{
+    ANTLR3_UINT32 <label>Start<elementIndex>;
+    <labelType> <label>;
+    <label>Start<elementIndex> = GETCHARINDEX();
+    MATCHC(ANTLR3_CHARSTREAM_EOF);
+    <checkRuleBacktrackFailure()>
+    <label> = LEXSTATE->tokFactory->newToken(LEXSTATE->tokFactory);
+    <label>->setType(<label>, ANTLR3_TOKEN_EOF);
+    <label>->setStartIndex(<label>, <label>Start<elementIndex>);
+    <label>->setStopIndex(<label>, GETCHARINDEX()-1);
+    <label>->input = INPUT->tnstream->istream;
+}
+<else>
+    MATCHC(ANTLR3_CHARSTREAM_EOF);
+    <checkRuleBacktrackFailure()>
+    <endif>
+>>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "int <recRuleArg()>"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
+recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList, enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( LA(1)==ANTLR3_TOKEN_DOWN ) {
+    MATCHT(ANTLR3_TOKEN_DOWN, NULL);
+    <checkRuleBacktrackFailure()>
+    <children:element()>
+    MATCHT(ANTLR3_TOKEN_UP, NULL);
+    <checkRuleBacktrackFailure()>
+}
+<else>
+MATCHT(ANTLR3_TOKEN_DOWN, NULL);
+<checkRuleBacktrackFailure()>
+<children:element()>
+MATCHT(ANTLR3_TOKEN_UP, NULL);
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) )
+{
+    <ruleBacktrackFailure()>
+    <newFPE(...)>
+}
+>>
+
+newFPE() ::= <<
+    CONSTRUCTEX();
+    EXCEPTION->type         = ANTLR3_FAILED_PREDICATE_EXCEPTION;
+    EXCEPTION->message      = (void *)"<description>";
+    EXCEPTION->ruleName	 = (void *)"<ruleName>";
+    <\n>
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+
+{
+    int LA<decisionNumber>_<stateNumber> = LA(<k>);
+    <edges; separator="\nelse ">
+    else
+    {
+<if(eotPredictsAlt)>
+        alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+        <ruleBacktrackFailure()>
+
+        <newNVException()>
+        goto rule<ruleDescriptor.name>Ex;
+
+<endif>
+    }
+}
+>>
+
+newNVException() ::= <<
+CONSTRUCTEX();
+EXCEPTION->type         = ANTLR3_NO_VIABLE_ALT_EXCEPTION;
+EXCEPTION->message      = (void *)"<description>";
+EXCEPTION->decisionNum  = <decisionNumber>;
+EXCEPTION->state        = <stateNumber>;
+<@noViableAltException()>
+<\n>
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+{
+    int LA<decisionNumber>_<stateNumber> = LA(<k>);
+    <edges; separator="\nelse ">
+}
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+
+dfaLoopbackStateDecls()::= <<
+ANTLR3_UINT32   LA<decisionNumber>_<stateNumber>;
+>>
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+{
+   /* dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState)
+    */
+    int LA<decisionNumber>_<stateNumber> = LA(<k>);
+    <edges; separator="\nelse "><\n>
+    <if(eotPredictsAlt)>
+    <if(!edges)>
+	alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+	<else>
+    else
+    {
+	alt<decisionNumber>=<eotPredictsAlt>;
+    }<\n>
+    <endif>
+    <endif>
+}
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( <if(predicates)>(<predicates>) && <endif>(<labelExpr>))
+{
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( LA(<k>) )
+{
+<edges; separator="\n">
+
+default:
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    <newNVException()>
+    goto rule<ruleDescriptor.name>Ex;<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( LA(<k>) )
+{
+    <edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( LA(<k>) )
+{
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+default:
+    alt<decisionNumber>=<eotPredictsAlt>;
+    break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{it |case <it>:}; separator="\n">
+	{
+		<targetState>
+	}
+    break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = cdfa<decisionNumber>.predict(ctx, RECOGNIZER, ISTREAM, &cdfa<decisionNumber>);
+<checkRuleBacktrackFailure()>
+>>
+
+/* Dump DFA tables as static initialized arrays of shorts(16 bits)/characters(8 bits)
+ * which are then used to statically initialize the dfa structure, which means that there
+ * is no runtime initialization whatsoever, other than anything the C compiler might
+ * need to generate. In general the C compiler will lay out memory such that there is no
+ * runtime code required.
+ */
+cyclicDFA(dfa) ::= <<
+/** Static dfa state tables for Cyclic dfa:
+ *    <dfa.description>
+ */
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] =
+    {
+	<dfa.eot; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] =
+    {
+	<dfa.eof; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] =
+    {
+	<dfa.min; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] =
+    {
+	<dfa.max; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] =
+    {
+	<dfa.accept; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] =
+    {
+	<dfa.special; wrap="\n", separator=", ", null="-1">
+    };
+
+/** Used when there is no transition table entry for a particular state */
+#define dfa<dfa.decisionNumber>_T_empty	    NULL
+
+<dfa.edgeTransitionClassMap.keys:{ table |
+static const ANTLR3_INT32 dfa<dfa.decisionNumber>_T<i0>[] =
+    {
+	<table; separator=", ", wrap="\n", null="-1">
+    \};}; null = "">
+
+/* Transition tables are a table of sub tables, with some tables
+ * reused for efficiency.
+ */
+static const ANTLR3_INT32 * const dfa<dfa.decisionNumber>_transitions[] =
+{
+    <dfa.transitionEdgeTables:{xref|dfa<dfa.decisionNumber>_T<xref>}; separator=", ", wrap="\n", null="_empty">
+};
+
+<if(dfa.specialStateSTs)>
+static ANTLR3_INT32 dfa<dfa.decisionNumber>_sst(p<name> ctx, pANTLR3_BASE_RECOGNIZER recognizer, pANTLR3_INT_STREAM is, pANTLR3_CYCLIC_DFA dfa, ANTLR3_INT32 s)
+{
+    ANTLR3_INT32    _s;
+
+    _s	    = s;
+    switch  (s)
+    {
+    <dfa.specialStateSTs:{state |
+    case <i0>:
+
+	<state>}; separator="\n">
+    }
+<if(backtracking)>
+    if (BACKTRACKING > 0)
+    {
+	FAILEDFLAG = ANTLR3_TRUE;
+	return	-1;
+    }
+<endif>
+
+    CONSTRUCTEX();
+    EXCEPTION->type         = ANTLR3_NO_VIABLE_ALT_EXCEPTION;
+    EXCEPTION->message      = (void *)"<dfa.description>";
+    EXCEPTION->decisionNum  = <dfa.decisionNumber>;
+    EXCEPTION->state        = _s;
+    <@noViableAltException()>
+    return -1;
+}
+<endif>
+
+<@errorMethod()>
+
+/* Declare tracking structure for Cyclic DFA <dfa.decisionNumber>
+ */
+static
+ANTLR3_CYCLIC_DFA cdfa<dfa.decisionNumber>
+    =	{
+	    <dfa.decisionNumber>,		    /* Decision number of this dfa	    */
+	    /* Which decision this represents:   */
+	    (const pANTLR3_UCHAR)"<dfa.description>",
+<if(dfa.specialStateSTs)>
+	    (CDFA_SPECIAL_FUNC) dfa<dfa.decisionNumber>_sst,
+<else>
+	    (CDFA_SPECIAL_FUNC) antlr3dfaspecialStateTransition,	/* Default special state transition function	*/
+<endif>
+
+	    antlr3dfaspecialTransition,		/* DFA specialTransition is currently just a default function in the runtime */
+	    antlr3dfapredict,			/* DFA simulator function is in the runtime */
+	    dfa<dfa.decisionNumber>_eot,	    /* EOT table			    */
+	    dfa<dfa.decisionNumber>_eof,	    /* EOF table			    */
+	    dfa<dfa.decisionNumber>_min,	    /* Minimum tokens for each state    */
+	    dfa<dfa.decisionNumber>_max,	    /* Maximum tokens for each state    */
+	    dfa<dfa.decisionNumber>_accept,	/* Accept table			    */
+	    dfa<dfa.decisionNumber>_special,	/* Special transition states	    */
+	    dfa<dfa.decisionNumber>_transitions	/* Table of transition tables	    */
+
+	};
+/* End of Cyclic DFA <dfa.decisionNumber>
+ * ---------------------
+ */
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+{
+    ANTLR3_UINT32 LA<decisionNumber>_<stateNumber>;<\n>
+    ANTLR3_MARKER index<decisionNumber>_<stateNumber>;<\n>
+
+	LA<decisionNumber>_<stateNumber> = LA(1);<\n>
+    <if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+    index<decisionNumber>_<stateNumber> = INDEX();<\n>
+    REWINDLAST();<\n>
+    <endif>
+    s = -1;
+    <edges; separator="\nelse ">
+	<if(semPredState)> <! return input cursor to state before we rewound !>
+	SEEK(index<decisionNumber>_<stateNumber>);<\n>
+	<endif>
+    if ( s>=0 )
+    {
+	return s;
+    }
+}
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( <if(predicates)>(<predicates>) && <endif>(<labelExpr>) )
+{
+    s = <targetStateNumber>;
+}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+ s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "( (<left>) && (<right>) )"
+
+orPredicates(operands) ::= "(<operands:{o|(<o>)}; separator=\"||\">)"
+
+notPredicate(pred) ::= "!( <evalPredicate(pred,{})> )"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "<pred>(ctx)"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "LA(<k>) == <atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
+((LA<decisionNumber>_<stateNumber> >= <lower>) && (LA<decisionNumber>_<stateNumber> \<= <upper>))
+%>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "((LA(<k>) >= <lower>) && (LA(<k>) \<= <upper>))"
+
+setTest(ranges) ::= "<ranges; separator=\" || \">"
+
+// A T T R I B U T E S
+
+makeScopeSet() ::= <<
+/* makeScopeSet()
+ */
+ /** Definition of the <scope.name> scope variable tracking
+ *  structure. An instance of this structure is created by calling
+ *  <name>_<scope.name>Push().
+ */
+typedef struct  <scopeStruct(sname=scope.name,...)>_struct
+{
+    /** Function that the user may provide to be called when the
+     *  scope is destroyed (so you can free pANTLR3_HASH_TABLES and so on)
+     *
+     * \param POinter to an instance of this typedef/struct
+     */
+    void    (ANTLR3_CDECL *free)	(struct <scopeStruct(sname=scope.name,...)>_struct * frame);
+
+    /* =============================================================================
+     * Programmer defined variables...
+     */
+    <scope.attributes:{it |<it.decl>;}; separator="\n">
+
+    /* End of programmer defined variables
+     * =============================================================================
+     */
+}
+    <scopeStruct(sname=scope.name,...)>, * <scopeType(sname=scope.name,...)>;
+
+>>
+
+globalAttributeScopeDecl(scope) ::= <<
+<if(scope.attributes)>
+/* globalAttributeScopeDecl(scope)
+ */
+<makeScopeSet(...)>
+<endif>
+>>
+
+ruleAttributeScopeDecl(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeDecl(scope)
+ */
+<makeScopeSet(...)>
+<endif>
+>>
+
+globalAttributeScopeFuncDecl(scope) ::=
+<<
+/* globalAttributeScopeFuncDecl(scope)
+ */
+<if(scope.attributes)>
+/* -----------------------------------------------------------------------------
+ * Function declaration for creating a <name>_<scope.name> scope set
+ */
+static <scopeType(sname=scope.name,...)>   <scopePushName(sname=scope.name,...)>(p<name> ctx);
+static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope);
+/* ----------------------------------------------------------------------------- */
+
+<endif>
+>>
+
+globalAttributeScopeFuncMacro(scope) ::= <<
+<if(scope.attributes)>
+/* globalAttributeScopeFuncMacro(scope)
+ */
+/** Function  for popping the top value from a <scopeStack(sname=scope.name)>
+ */
+void
+<scopePopName(sname=scope.name,...)>(p<name> ctx)
+{
+    // First see if the user defined a function they want to be called when a
+    // scope is popped/freed.
+    //
+	// If the user supplied the scope entries with a free function,then call it first
+	//
+    if	(SCOPE_TOP(<scope.name>)->free != NULL)
+	{
+        SCOPE_TOP(<scope.name>)->free(SCOPE_TOP(<scope.name>));
+	}
+
+    // Now we decrement the scope's upper limit bound. We do not actually pop the scope as
+    // we want to reuse scope entries if we do continuous push and pops. Most scopes don't
+    // next too far so we don't want to keep freeing and allocating them
+    //
+    ctx-><scopeStack(sname=scope.name,...)>_limit--;
+    SCOPE_TOP(<scope.name>) = (<scopeType(sname=scope.name)>)(ctx-><scopeStack(sname=scope.name,...)>->get(ctx-><scopeStack(sname=scope.name,...)>, ctx-><scopeStack(sname=scope.name,...)>_limit - 1));
+}
+<endif>
+>>
+
+ruleAttributeScopeFuncDecl(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeFuncDecl(scope)
+ */
+/* -----------------------------------------------------------------------------
+ * Function declarations for creating a <name>_<scope.name> scope set
+ */
+static <scopeType(sname=scope.name,...)>   <scopePushName(sname=scope.name,...)>(p<name> ctx);
+static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope);
+/* ----------------------------------------------------------------------------- */
+
+<endif>
+>>
+
+ruleAttributeScopeFuncMacro(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeFuncMacro(scope)
+ */
+/** Function for popping the top value from a <scopeStack(sname=scope.name,...)>
+ */
+void
+<scopePopName(sname=scope.name,...)>(p<name> ctx)
+{
+    // First see if the user defined a function they want to be called when a
+    // scope is popped/freed.
+    //
+	// If the user supplied the scope entries with a free function,then call it first
+	//
+    if	(SCOPE_TOP(<scope.name>)->free != NULL)
+	{
+        SCOPE_TOP(<scope.name>)->free(SCOPE_TOP(<scope.name>));
+	}
+
+    // Now we decrement the scope's upper limit bound. We do not actually pop the scope as
+    // we want to reuse scope entries if we do continuous push and pops. Most scopes don't
+    // next too far so we don't want to keep freeing and allocating them
+    //
+    ctx-><scopeStack(sname=scope.name,...)>_limit--;
+    SCOPE_TOP(<scope.name>) = (<scopeType(sname=scope.name)>)(ctx-><scopeStack(sname=scope.name,...)>->get(ctx-><scopeStack(sname=scope.name,...)>, ctx-><scopeStack(sname=scope.name,...)>_limit - 1));
+}
+
+<endif>
+>>
+
+globalAttributeScopeDef(scope) ::=
+<<
+/* globalAttributeScopeDef(scope)
+ */
+<if(scope.attributes)>
+/** Pointer to the  <scope.name> stack for use by <scopePushName(sname=scope.name)>()
+ *  and <scopePopName(sname=scope.name,...)>()
+ */
+pANTLR3_STACK <scopeStack(sname=scope.name)>;
+ANTLR3_UINT32 <scopeStack(sname=scope.name)>_limit;
+/** Pointer to the top of the stack for the global scope <scopeStack(sname=scope.name)>
+ */
+<scopeType(sname=scope.name,...)>    (*<scopePushName(sname=scope.name,...)>)(struct <name>_Ctx_struct * ctx);
+<scopeType(sname=scope.name,...)>    <scopeTopDecl(sname=scope.name,...)>;
+
+<endif>
+>>
+
+ruleAttributeScopeDef(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeDef(scope)
+ */
+/** Pointer to the  <scope.name> stack for use by <scopePushName(sname=scope.name)>()
+ *  and <scopePopName(sname=scope.name,...)>()
+ */
+pANTLR3_STACK <scopeStack(sname=scope.name,...)>;
+ANTLR3_UINT32 <scopeStack(sname=scope.name,...)>_limit;
+<scopeType(sname=scope.name,...)>   (*<scopePushName(sname=scope.name,...)>)(struct <name>_Ctx_struct * ctx);
+<scopeType(sname=scope.name,...)>   <scopeTopDecl(sname=scope.name,...)>;
+
+<endif>
+>>
+
+globalAttributeScopeFuncs(scope) ::= <<
+<if(scope.attributes)>
+/* globalAttributeScopeFuncs(scope)
+ */
+<attributeFuncs(scope)>
+<endif>
+>>
+
+ruleAttributeScopeFuncs(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeFuncs(scope)
+ */
+<attributeFuncs(scope)>
+<endif>
+>>
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+/* globalAttributeScope(scope)
+ */
+ctx-><scopePushName(sname=scope.name,...)>     = <scopePushName(sname=scope.name,...)>;
+ctx-><scopeStack(sname=scope.name,...)>    = antlr3StackNew(0);
+ctx-><scopeStack(sname=scope.name,...)>_limit    = 0;
+<scopeTop(sname=scope.name,...)>      = NULL;
+<endif>
+>>
+
+ruleAttributeScope(scope) ::=
+<<
+<if(scope.attributes)>
+/* ruleAttributeScope(scope)
+ */
+ctx-><scopePushName(sname=scope.name,...)>     = <scopePushName(sname=scope.name,...)>;
+ctx-><scopeStack(sname=scope.name,...)>    = antlr3StackNew(0);
+ctx-><scopeStack(sname=scope.name,...)>_limit    = 0;
+<scopeTop(sname=scope.name,...)>      = NULL;
+<endif>
+>>
+globalAttributeScopeFree(scope) ::= <<
+<if(scope.attributes)>
+/* globalAttributeScope(scope)
+ */
+ctx-><scopeStack(sname=scope.name,...)>->free(ctx-><scopeStack(sname=scope.name,...)>);
+<endif>
+>>
+
+ruleAttributeScopeFree(scope) ::=
+<<
+<if(scope.attributes)>
+/* ruleAttributeScope(scope)
+ */
+ctx-><scopeStack(sname=scope.name,...)>->free(ctx-><scopeStack(sname=scope.name,...)>);
+<endif>
+>>
+
+scopeTopDecl(sname) ::= <<
+p<name>_<sname>Top
+>>
+
+scopeTop(sname) ::= <<
+ctx-><scopeTopDecl(sname=sname,...)>
+>>
+
+scopePop(sname) ::= <<
+<scopePopName(sname=sname,...)>(ctx);
+>>
+
+scopePush(sname) ::= <<
+p<name>_<sname>Push(ctx)
+>>
+
+scopePopName(sname) ::= <<
+p<name>_<sname>Pop
+>>
+
+scopePushName(sname) ::= <<
+p<name>_<sname>Push
+>>
+
+scopeType(sname) ::= <<
+p<name>_<sname>_SCOPE
+>>
+
+scopeStruct(sname) ::= <<
+<name>_<sname>_SCOPE
+>>
+
+scopeStack(sname) ::= <<
+p<name>_<sname>Stack
+>>
+
+attributeFuncs(scope) ::= <<
+<if(scope.attributes)>
+/* attributeFuncs(scope)
+ */
+
+static void ANTLR3_CDECL <scope.name>Free(<scopeType(sname=scope.name)> scope)
+{
+    ANTLR3_FREE(scope);
+}
+
+/** \brief Allocate initial memory for a <name> <scope.name> scope variable stack entry and
+ *         add it to the top of the stack.
+ *
+ * \remark
+ * By default the structure is freed with ANTLR_FREE(), but you can use the
+ * the \@init action to install a pointer to a custom free() routine by
+ * adding the code:
+ * \code
+ *   <scopeTop(sname=scope.name)>->free = myroutine;
+ * \endcode
+ *
+ * With lots of comments of course! The routine should be declared in
+ * \@members { } as:
+ * \code
+ *   void ANTLR3_CDECL myfunc( <scopeType(sname=scope.name)> ptr).
+ * \endcode
+ *
+ * It should perform any custom freeing stuff that you need (call ANTLR_FREE3, not free()
+ * NB: It should not free the pointer it is given, which is the scope stack entry itself
+ * and will be freed by the function that calls your custom free routine.
+ *
+ */
+static <scopeType(sname=scope.name)>
+<scopePushName(sname=scope.name)>(p<name> ctx)
+{
+    /* Pointer used to create a new set of attributes
+     */
+    <scopeType(sname=scope.name)>      newAttributes;
+
+    /* Allocate the memory for a new structure if we need one.
+     */
+    if (ctx-><scopeStack(sname=scope.name)>->size(ctx-><scopeStack(sname=scope.name)>) > ctx-><scopeStack(sname=scope.name)>_limit)
+    {
+        // The current limit value was less than the number of scopes available on the stack so
+        // we can just reuse one. Our limit tracks the stack count, so the index of the entry we want
+        // is one less than that, or conveniently, the current value of limit.
+        //
+        newAttributes = (<scopeType(sname=scope.name)>)ctx-><scopeStack(sname=scope.name)>->get(ctx-><scopeStack(sname=scope.name)>, ctx-><scopeStack(sname=scope.name)>_limit);
+    }
+    else
+    {
+        // Need a new allocation
+        //
+        newAttributes = (<scopeType(sname=scope.name)>) ANTLR3_MALLOC(sizeof(<scopeStruct(sname=scope.name)>));
+        if  (newAttributes != NULL)
+        {
+            /* Standard ANTLR3 library implementation
+             */
+            ctx-><scopeStack(sname=scope.name)>->push(ctx-><scopeStack(sname=scope.name)>, newAttributes, (void (*)(void *))<scope.name>Free);
+        }
+    }
+
+    // Blank out any previous free pointer, the user might or might install a new one.
+    //
+    newAttributes->free = NULL;
+
+    // Indicate the position in the available stack that the current level is at
+    //
+    ctx-><scopeStack(sname=scope.name)>_limit++;
+
+	/* Return value is the pointer to the new entry, which may be used locally
+	 * without de-referencing via the context.
+     */
+    return  newAttributes;
+}<\n>
+
+<endif>
+>>
+returnStructName(r) ::= "<r.name>_return"
+
+returnType() ::= <%
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+<else>
+ANTLR3_BOOLEAN
+<endif>
+%>
+
+/** Generate the C type associated with a single or multiple return
+ *  value(s).
+ */
+ruleLabelType(referencedRule) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.grammar.recognizerName>_<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+%>
+
+delegateName(d) ::= <<
+<if(d.label)><d.label><else>g<it.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "0".
+ */
+initValue(typeName) ::= <<
+ = <cTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label  */
+ruleLabelDef(label) ::= <<
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text>;
+#undef	RETURN_TYPE_<label.label.text>
+#define	RETURN_TYPE_<label.label.text> <ruleLabelType(referencedRule=label.referencedRule)><\n>
+>>
+/**  Rule label default value */
+ruleLabelInitVal(label) ::= <<
+>>
+
+ASTLabelType() ::= "<if(recognizer.ASTLabelType)><recognizer.ASTLabelType><else>pANTLR3_BASE_TREE<endif>"
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+typedef struct <ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()>_struct
+{
+<if(!TREE_PARSER)>
+    /** Generic return elements for ANTLR3 rules that are not in tree parsers or returning trees
+     */
+    pANTLR3_COMMON_TOKEN    start;
+    pANTLR3_COMMON_TOKEN    stop;
+<else>
+    <recognizer.ASTLabelType>       start;
+    <recognizer.ASTLabelType>       stop;
+<endif>
+    <@ruleReturnMembers()>
+    <ruleDescriptor.returnScope.attributes:{it |<it.type> <it.name>;}; separator="\n">
+}
+    <ruleDescriptor.grammar.recognizerName>_<ruleDescriptor:returnStructName()>;<\n><\n>
+<endif>
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{it |<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name>=<expr>;"
+
+/** Note that the scopeAttributeRef does not have access to the
+ * grammar name directly
+ */
+scopeAttributeRef(scope,attr,index,negIndex) ::= <%
+<if(negIndex)>
+	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get( ctx->SCOPE_STACK(<scope>), ctx->SCOPE_STACK(<scope>)->size(ctx->SCOPE_STACK(<scope>)) - <negIndex> - 1) ))-><attr.name>
+<else>
+<if(index)>
+	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get(ctx->SCOPE_STACK(<scope>), (ANTLR3_UINT32)<index> ) ))-><attr.name>
+<else>
+	(SCOPE_TOP(<scope>))-><attr.name>
+<endif>
+<endif>
+%>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
+<if(negIndex)>
+	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get( ctx->SCOPE_STACK(<scope>), ctx->SCOPE_STACK(<scope>)->size(ctx->SCOPE_STACK(<scope>)) - <negIndex> - 1) ))-><attr.name> = <expr>;
+<else>
+<if(index)>
+	((SCOPE_TYPE(<scope>))(ctx->SCOPE_STACK(<scope>)->get(ctx->SCOPE_STACK(<scope>), (ANTLR3_UINT32)<index> ) ))-><attr.name> = <expr>;
+<else>
+	(SCOPE_TOP(<scope>))-><attr.name>=<expr>;
+<endif>
+<endif>
+%>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "ctx->SCOPE_STACK(<scope>)"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<scope>.<attr.name>
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>=<expr>;
+<else>
+<attr.name>=<expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+//
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText(<scope>))"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>->getType(<scope>))"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>->getLine(<scope>))"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>->getCharPositionInLine(<scope>))"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>->getChannel(<scope>))"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>->getTokenIndex(<scope>))"
+tokenLabelPropertyRef_tree(scope,attr) ::= "(<scope>->tree)"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>->getText(<scope>)->toInt32(<scope>->getText(<scope>)))"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>.start)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>.stop)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>.tree)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+(STRSTREAM->toStringSS(STRSTREAM, <scope>.start, <scope>.start))
+<else>
+(STRSTREAM->toStringTT(STRSTREAM, <scope>.start, <scope>.stop))
+<endif>
+>>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "(<scope>->getType(<scope>))"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "(<scope>->getLine(<scope>))"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(<scope>->getCharPositionInLine(<scope>))"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(<scope>->getChannel(<scope>))"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "(<scope>->getTokenIndex(<scope>))"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText(<scope>))"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "retval.start"
+rulePropertyRef_stop(scope,attr) ::= "retval.stop"
+rulePropertyRef_tree(scope,attr) ::= "retval.tree"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+INPUT->toStringSS(INPUT, ADAPTOR->getTokenStartIndex(ADAPTOR, retval.start), ADAPTOR->getTokenStopIndex(ADAPTOR, retval.start))
+<else>
+STRSTREAM->toStringTT(STRSTREAM, retval.start, LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "LEXER->getText(LEXER)"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "LEXSTATE->tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "LEXSTATE->tokenStartCharPositionInLine"
+lexerRulePropertyRef_channel(scope,attr) ::= "LEXSTATE->channel"
+lexerRulePropertyRef_start(scope,attr) ::= "LEXSTATE->tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(LEXER->getCharIndex(LEXER)-1)"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_int(scope,attr) ::= "LEXER->getText(LEXER)->toInt32(LEXER->getText(LEXER))"
+
+
+// setting $st and $tree is allowed in local rule. everything else is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree=<expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st=<expr>;"
+
+
+/** How to deal with an @after for C targets. Because we cannot rely on
+ *  any garbage collection, after code is executed even in backtracking
+ *  mode. Must be documented clearly.
+ */
+execAfter(action) ::= <<
+{
+    <action>
+}
+>>
+
+/** How to execute an action (when not backtracking) */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if ( <actions.(actionScope).synpredgate> )
+{
+    <action>
+}
+<else>
+if ( BACKTRACKING == 0 )
+{
+    <action>
+}
+<endif>
+<else>
+{
+    <action>
+}
+<endif>
+>>
+
+// M I S C (properties, etc...)
+
+bitsetDeclare(name, words64) ::= <<
+
+/** Bitset defining follow set for error recovery in rule state: <name>  */
+static	ANTLR3_BITWORD <name>_bits[]	= { <words64:{it |ANTLR3_UINT64_LIT(<it>)}; separator=", "> };
+static  ANTLR3_BITSET_LIST <name>	= { <name>_bits, <length(words64)>	};
+>>
+
+bitset(name, words64) ::= <<
+antlr3BitsetSetAPI(&<name>);<\n>
+>>
+
+codeFileExtension() ::= ".c"
+
+true_value() ::= "ANTLR3_TRUE"
+false_value() ::= "ANTLR3_FALSE"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/C/Dbg.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/C/Dbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/C/Dbg.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/AST.stg
new file mode 100644
index 0000000..a94ed7c
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/AST.stg
@@ -0,0 +1,430 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2007-2008 Johannes Luber
+ * Copyright (c) 2005-2007 Kunle Odutola
+ * Copyright (c) 2011 Sam Harwell
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+@outputFile.imports() ::= <<
+<@super.imports()>
+
+<if(!TREE_PARSER)>
+<! tree parser would already have imported !>
+using Antlr.Runtime.Tree;
+using RewriteRuleITokenStream = Antlr.Runtime.Tree.RewriteRuleTokenStream;
+<endif>
+>>
+
+@genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+parserCtorBody() ::= <%
+<super.parserCtorBody()><\n>
+TreeAdaptor = 
+<if(actions.(actionScope).treeAdaptorInitializer)>
+	<actions.(actionScope).treeAdaptorInitializer>
+<else>
+	new <actions.(actionScope).treeAdaptorType; null="CommonTreeAdaptor">()
+<end>
+;
+%>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+private <treeAdaptorType()> adaptor;
+
+public <treeAdaptorType()> TreeAdaptor
+{
+	get
+	{
+		return adaptor;
+	}
+
+	set
+	{
+		this.adaptor = value;
+		<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
+	}
+}
+>>
+
+treeAdaptorType() ::= <<
+<actions.(actionScope).treeAdaptorType; null="ITreeAdaptor">
+>>
+
+ruleReturnBaseType() ::= <%
+Ast<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope\<<ASTLabelType>, <labelType>>
+%>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> root_0 = default(<ASTLabelType>);<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+	:{it|<ASTLabelType> <it.label.text>_tree = default(<ASTLabelType>);}; separator="\n">
+<ruleDescriptor.tokenListLabels:{it|<ASTLabelType> <it.label.text>_tree = default(<ASTLabelType>);}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+	:{it|RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+@alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+root_0 = (<ASTLabelType>)adaptor.Nil();
+<endif>
+<endif>
+<endif>
+>>
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<token>.Add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefTrack(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<token>.Add(<label>);
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<rule.name>.Add(<label>.Tree);
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<rule>.Add(<label>.Tree);
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	referencedWildcardLabels,
+	referencedWildcardListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::= <<
+<\n>{
+// AST REWRITE
+// elements: <referencedElementsDeep; separator=", ">
+// token labels: <referencedTokenLabels; separator=", ">
+// rule labels: <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels: <referencedRuleListLabels; separator=", ">
+// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
+<if(backtracking)>
+if (<actions.(actionScope).synpredgate>) {
+<endif>
+<prevRuleRootRef()>.Tree = root_0;
+<rewriteCodeLabels()>
+root_0 = (<ASTLabelType>)adaptor.Nil();
+<alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER&&rewriteMode)>
+<prevRuleRootRef()>.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+if (<prevRuleRootRef()>.Tree != null)
+	input.ReplaceChildren(adaptor.GetParent(retval.Start), adaptor.GetChildIndex(retval.Start), adaptor.GetChildIndex(_last), retval.Tree);
+<endif>
+<! if parser or tree-parser && rewrite!=true, we need to set result !>
+<if(!TREE_PARSER||!rewriteMode)>
+<prevRuleRootRef()>.Tree = root_0;
+<endif>
+<if(backtracking)>
+}
+<endif>
+}
+
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{it|RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>",<it>);};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{it|RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
+    separator="\n"
+>
+<referencedWildcardLabels
+	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
+	separator="\n"
+>
+<referencedWildcardListLabels
+	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
+	separator="\n"
+>
+<referencedRuleLabels
+    :{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>",<it>!=null?<it>.Tree:null);};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"token <it>",list_<it>);};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if (<referencedElementsDeep:{el | stream_<el>.HasNext}; separator="||">)
+{
+	<alt>
+}
+<referencedElementsDeep:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | stream_<el>.HasNext}; separator="||"> )
+{
+	<alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if (!(<referencedElements:{el | stream_<el>.HasNext}; separator="||">))
+{
+	throw new RewriteEarlyExitException();
+}
+while ( <referencedElements:{el | stream_<el>.HasNext}; separator="||"> )
+{
+	<alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>)
+{
+	<a.alt>
+}
+<else>
+{
+	<a.alt>
+}
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.Nil();
+<root:rewriteElement()>
+<children:rewriteElement()>
+adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <%
+<@pregen()>
+DebugLocation(<e.line>, <e.pos>);<\n>
+<e.el>
+%>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,args,terminalOptions={}) ::= <<
+adaptor.AddChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,args,terminalOptions={}) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>);<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,elementIndex,terminalOptions={}) ::= <<
+adaptor.AddChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,elementIndex,terminalOptions={}) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>);<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<rule>.NextTree());<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<rule>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+adaptor.AddChild(root_<treeLevel>, <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<action>, root_<treeLevel>);<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+createImaginaryNode(tokenType,args,terminalOptions={}) ::= <%
+<if(terminalOptions.node)>
+<! new MethodNode(IDLabel, args) !>
+new <terminalOptions.node>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+(<ASTLabelType>)adaptor.Create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
+<endif>
+%>
+
+createRewriteNodeFromElement(token,args,terminalOptions={}) ::= <%
+<if(terminalOptions.node)>
+new <terminalOptions.node>(stream_<token>.NextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+adaptor.Create(<token>, <args; separator=", ">)
+<else>
+stream_<token>.NextNode()
+<endif>
+<endif>
+%>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTDbg.stg
new file mode 100644
index 0000000..e7b2904
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTDbg.stg
@@ -0,0 +1,94 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2007-2008 Johannes Luber
+ * Copyright (c) 2005-2007 Kunle Odutola
+ * Copyright (c) 2005 Terence Parr
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
+ *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
+ */
+
+parserMembers() ::= <<
+	protected DebugTreeAdaptor adaptor;
+
+	public ITreeAdaptor TreeAdaptor
+	{
+		get
+		{
+			return adaptor;
+		}
+		set
+		{
+<if(grammar.grammarIsRoot)>
+			this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
+<else>
+			this.adaptor = (DebugTreeAdaptor)adaptor; // delegator sends dbg adaptor
+<endif><\n>
+			<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
+		}
+	}<\n>
+>>
+
+parserCtorBody() ::= <<
+<super.parserCtorBody()>
+>>
+
+createListenerAndHandshake() ::= <<
+DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, <if(TREE_PARSER)>input.TreeAdaptor<else>adaptor<endif> );
+DebugListener = proxy;
+<inputStreamType> = new Debug<inputStreamType>( input, proxy );
+try
+{
+	proxy.Handshake();
+}
+catch ( IOException ioe )
+{
+	ReportError( ioe );
+}
+>>
+
+@ctorForRootGrammar.finally() ::= <<
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;
+proxy.TreeAdaptor = adap;
+>>
+
+@ctorForProfilingRootGrammar.finally() ::=<<
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;
+>>
+
+@ctorForPredefinedListener.superClassRef() ::= ": base( input, dbg )"
+
+@ctorForPredefinedListener.finally() ::=<<
+<if(grammar.grammarIsRoot)><! don't create new adaptor for delegates !>
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;<\n>
+<endif>
+>>
+
+//@rewriteElement.pregen() ::= "dbg.Location( <e.line>, <e.pos> );"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTParser.stg
new file mode 100644
index 0000000..1585287
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTParser.stg
@@ -0,0 +1,192 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2007-2008 Johannes Luber
+ * Copyright (c) 2005-2007 Kunle Odutola
+ * Copyright (c) 2011 Sam Harwell
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+
+@rule.setErrorReturnValue() ::= <<
+retval.Tree = (<ASTLabelType>)adaptor.ErrorNode(input, retval.Start, input.LT(-1), re);
+<! System.out.WriteLine("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if (state.backtracking == 0) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= <<
+<super.matchSet(postmatchCode={<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>adaptor.AddChild(root_0, <createNodeFromToken(...)>);}, ...)>
+>>
+
+matchRuleBlockSet(s,label,elementIndex,postmatchCode,treeLevel="0",terminalOptions={}) ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,elementIndex,debug,terminalOptions={}) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);
+<endif>
+<super.matchSet(postmatchCode={<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<createNodeFromToken(...)>, root_0);}, ...)>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>adaptor.AddChild(root_0, <label>.Tree);
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_0);
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
+>>
+
+// WILDCARD AST
+
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(token=[],...)>"
+
+wildcardRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.wildcard(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+>>
+
+createNodeFromToken(label,terminalOptions={}) ::= <%
+<if(terminalOptions.node)>
+new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>)
+<else>
+(<ASTLabelType>)adaptor.Create(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>)
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+adaptor.SetTokenBoundaries(retval.Tree, retval.Start, retval.Stop);
+<if(backtracking)>}<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTTreeParser.stg
new file mode 100644
index 0000000..316282f
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ASTTreeParser.stg
@@ -0,0 +1,380 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2007-2008 Johannes Luber
+ * Copyright (c) 2005-2007 Kunle Odutola
+ * Copyright (c) 2011 Sam Harwell
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<if(!ruleDescriptor.isSynPred)>
+<ASTLabelType> _first_0 = default(<ASTLabelType>);
+<ASTLabelType> _last = default(<ASTLabelType>);
+<endif>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel=false, treeLevel=false) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<if(rewriteMode)>
+retval.Tree = (<ASTLabelType>)_first_0;
+if (adaptor.GetParent(retval.Tree)!=null && adaptor.IsNil(adaptor.GetParent(retval.Tree)))
+    retval.Tree = (<ASTLabelType>)adaptor.GetParent(retval.Tree);
+<endif>
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+{
+<ASTLabelType> _save_last_<treeLevel> = _last;
+<ASTLabelType> _first_<treeLevel> = default(<ASTLabelType>);
+<if(!rewriteMode)>
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.Nil();
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
+<if(root.el.rule)>
+if (_first_<enclosingTreeLevel> == null) _first_<enclosingTreeLevel> = <root.el.label>.Tree;
+<else>
+if (_first_<enclosingTreeLevel> == null) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if (input.LA(1) == TokenTypes.Down) {
+    Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+}
+<else>
+Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}
+<else>
+<super.tree(...)>
+<endif>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<else>
+<super.tokenRefBang(...)>
+<endif>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<endif><\n>
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
+if (_first_<treeLevel> == null) _first_<treeLevel> = <label>;
+<endif>
+<else>
+<super.tokenRef(...)>
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<tokenRef(...)>
+<listLabelElem(elem=label,...)>
+<else>
+<super.tokenRefAndListLabel(...)>
+<endif>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+<else>
+<super.tokenRefRuleRoot(...)>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<tokenRefRuleRoot(...)>
+<listLabelElem(elem=label,...)>
+<else>
+<super.tokenRefRuleRootAndListLabel(...)>
+<endif>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.DupTree(<label>);
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
+if (_first_<treeLevel> == null) _first_<treeLevel> = <label>;
+<endif>
+<else>
+<super.wildcard(...)>
+<endif>
+>>
+
+// SET AST
+
+matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<endif><\n>
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>\}<endif>
+<endif>
+}, ...
+)>
+<else>
+<super.matchSet(...)>
+<endif>
+>>
+
+matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<matchSet(...)>
+<noRewrite(...)> <! set return tree !>
+<else>
+<super.matchRuleBlockSet(...)>
+<endif>
+>>
+
+matchSetBang(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(...)>
+<else>
+<super.matchSetBang(...)>
+<endif>
+>>
+
+matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>\}<endif>
+<endif>
+}, ...
+)>
+<else>
+<super.matchSetRuleRoot(...)>
+<endif>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
+<if(!rewriteMode)>
+adaptor.AddChild(root_<treeLevel>, <label>.Tree);
+<else> <! rewrite mode !>
+if (_first_<treeLevel> == null) _first_<treeLevel> = <label>.Tree;
+<endif>
+<else>
+<super.ruleRef(...)>
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<ruleRef(...)>
+<listLabelElem(elem={<label>.Tree},...)>
+<else>
+<super.ruleRefAndListLabel(...)>
+<endif>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_<treeLevel>);
+<endif>
+<else>
+<super.ruleRefRuleRoot(...)>
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<ruleRefRuleRoot(...)>
+<listLabelElem(elem={<label>.Tree},...)>
+<else>
+<super.ruleRefRuleRootAndListLabel(...)>
+<endif>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrack(...)>
+<else>
+<super.ruleRefTrack(...)>
+<endif>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrackAndListLabel(...)>
+<else>
+<super.ruleRefTrackAndListLabel(...)>
+<endif>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRootTrack(...)>
+<else>
+<super.ruleRefRuleRootTrack(...)>
+<endif>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+<else>
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+<endif>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change NextToken to NextNode.
+ */
+createRewriteNodeFromElement(token,terminalOptions,args) ::= <%
+<if(terminalOptions.node)>
+new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif>stream_<token>.NextNode())
+<else>
+stream_<token>.NextNode()
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!ruleDescriptor.isSynPred)>
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+<if(backtracking)>}<endif>
+<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/CSharp2.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/CSharp2.stg
new file mode 100644
index 0000000..8f15ef2
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/CSharp2.stg
@@ -0,0 +1,1772 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2007-2008 Johannes Luber
+ * Copyright (c) 2005-2007 Kunle Odutola
+ * Copyright (c) 2011 Sam Harwell
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+csharpVisibilityMap ::= [
+	"private":"private",
+	"protected":"protected",
+	"public":"public",
+	"fragment":"private",
+	default:"private"
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(	LEXER,PARSER,TREE_PARSER, actionScope, actions,
+			docComment, recognizer,
+			name, tokens, tokenNames, rules, cyclicDFAs,
+			bitsets, buildTemplate, buildAST, rewriteMode, profile,
+			backtracking, synpreds, memoize, numRules,
+			fileName, ANTLRVersion, generatedTimestamp, trace,
+			scopes, superClass, literals) ::=
+<<
+//------------------------------------------------------------------------------
+// \<auto-generated>
+//     This code was generated by a tool.
+//     ANTLR Version: <ANTLRVersion>
+//
+//     Changes to this file may cause incorrect behavior and will be lost if
+//     the code is regenerated.
+// \</auto-generated>
+//------------------------------------------------------------------------------
+
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<if(trace)>
+#define ANTLR_TRACE
+<endif>
+<@debugPreprocessor()>
+// The variable 'variable' is assigned but its value is never used.
+#pragma warning disable 168, 219
+// Unreachable code detected.
+#pragma warning disable 162
+// Missing XML comment for publicly visible type or member 'Type_or_Member'
+#pragma warning disable 1591
+
+<actions.(actionScope).header>
+
+<@imports>
+using System.Collections.Generic;
+using Antlr.Runtime;
+using Antlr.Runtime.Misc;
+<if(TREE_PARSER)>
+using Antlr.Runtime.Tree;
+using RewriteRuleITokenStream = Antlr.Runtime.Tree.RewriteRuleTokenStream;
+<endif>
+using ConditionalAttribute = System.Diagnostics.ConditionalAttribute;
+<@end>
+<if(actions.(actionScope).namespace)>
+namespace <actions.(actionScope).namespace>
+{
+<endif>
+<docComment>
+<recognizer>
+<if(actions.(actionScope).namespace)>
+
+} // namespace <actions.(actionScope).namespace>
+<endif>
+>>
+
+lexerInputStreamType() ::= <<
+<actions.(actionScope).inputStreamType; null="ICharStream">
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, filterMode, labelType="CommonToken",
+      superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Lexer<endif>}) ::= <<
+[System.CodeDom.Compiler.GeneratedCode("ANTLR", "<ANTLRVersion>")]
+[System.CLSCompliant(false)]
+<parserModifier(grammar=grammar, actions=actions)> partial class <grammar.recognizerName> : <@superClassName><superClass><@end>
+{
+	<tokens:{it|public const int <it.name; format="id">=<it.type>;}; separator="\n">
+	<scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+	<actions.lexer.members>
+
+    // delegates
+    <grammar.delegates:
+         {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
+    <last(grammar.delegators):{g|private <g.recognizerName> gParent;}>
+
+	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>()<! needed by subclasses !>
+	{
+		OnCreated();
+	}
+
+	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<lexerInputStreamType()> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
+		: this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
+	{
+	}
+
+	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<lexerInputStreamType()> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+		: base(input, state)
+	{
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+		state.ruleMemo = new System.Collections.Generic.Dictionary\<int, int>[<numRules>+1];<\n><! index from 1..n !>
+<endif>
+<endif>
+		<grammar.directDelegates:
+		 {g|<g:delegateName()> = new <g.recognizerName>(input, this.state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+		<grammar.delegators:
+		 {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+		<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+
+		OnCreated();
+	}
+	public override string GrammarFileName { get { return "<fileName>"; } }
+
+	private static readonly bool[] decisionCanBacktrack = new bool[0];
+
+<if(grammar.hasDelegates)>
+	public override <lexerInputStreamType()> CharStream
+	{
+		get
+		{
+			return base.CharStream;
+		}
+		set
+		{
+			base.CharStream = value;
+			<grammar.directDelegates:
+			 {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+			<grammar.delegators:
+			 {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+			<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+		}
+	}
+
+<if(grammar.delegates)>
+	public override void SetState(RecognizerSharedState state)
+	{
+		base.SetState(state);
+		<grammar.delegates:{g|<g:delegateName()>.SetState(state);}; separator="\n">
+	}
+<endif>
+
+<endif>
+<if(filterMode)>
+	<filteringNextToken()>
+<endif>
+
+	[Conditional("ANTLR_TRACE")]
+	protected virtual void OnCreated() {}
+	[Conditional("ANTLR_TRACE")]
+	protected virtual void EnterRule(string ruleName, int ruleIndex) {}
+	[Conditional("ANTLR_TRACE")]
+	protected virtual void LeaveRule(string ruleName, int ruleIndex) {}
+
+    <rules; separator="\n">
+
+	<insertLexerSynpreds(synpreds)>
+
+	#region DFA
+	<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+
+	protected override void InitDFAs()
+	{
+		base.InitDFAs();
+		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this<if(dfa.specialStateSTs)>, SpecialStateTransition<dfa.decisionNumber><endif>);}; separator="\n">
+	}
+
+	<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+	#endregion
+
+}
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+public override IToken NextToken()
+{
+	while (true)
+	{
+		if (input.LA(1) == CharStreamConstants.EndOfFile)
+		{
+			IToken eof = new CommonToken((ICharStream)input, CharStreamConstants.EndOfFile, TokenChannels.Default, input.Index, input.Index);
+			eof.Line = Line;
+			eof.CharPositionInLine = CharPositionInLine;
+			return eof;
+		}
+		state.token = null;
+		state.channel = TokenChannels.Default;
+		state.tokenStartCharIndex = input.Index;
+		state.tokenStartCharPositionInLine = input.CharPositionInLine;
+		state.tokenStartLine = input.Line;
+		state.text = null;
+		try
+		{
+			int m = input.Mark();
+			state.backtracking=1;<! means we won't throw slow exception !>
+			state.failed=false;
+			mTokens();
+			state.backtracking=0;
+			<! mTokens backtracks with synpred at backtracking==2
+			   and we set the synpredgate to allow actions at level 1. !>
+			if (state.failed)
+			{
+				input.Rewind(m);
+				input.Consume();<! advance one char and try again !>
+			}
+			else
+			{
+				Emit();
+				return state.token;
+			}
+		}
+		catch (RecognitionException re)
+		{
+			// shouldn't happen in backtracking mode, but...
+			ReportError(re);
+			Recover(re);
+		}
+	}
+}
+
+public override void Memoize(IIntStream input, int ruleIndex, int ruleStartIndex)
+{
+	if (state.backtracking > 1)
+		base.Memoize(input, ruleIndex, ruleStartIndex);
+}
+
+public override bool AlreadyParsedRule(IIntStream input, int ruleIndex)
+{
+	if (state.backtracking > 1)
+		return base.AlreadyParsedRule(input, ruleIndex);
+
+	return false;
+}
+>>
+
+actionGate() ::= "state.backtracking == 0"
+
+filteringActionGate() ::= "state.backtracking == 1"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass,
+              labelType, members, rewriteElementType,
+              filterMode, ASTLabelType="object") ::= <<
+[System.CodeDom.Compiler.GeneratedCode("ANTLR", "<ANTLRVersion>")]
+[System.CLSCompliant(false)]
+<parserModifier(grammar=grammar, actions=actions)> partial class <grammar.recognizerName> : <@superClassName><superClass><@end>
+{
+<if(grammar.grammarIsRoot)>
+	internal static readonly string[] tokenNames = new string[] {
+		"\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
+	};
+<endif>
+	<tokens:{it|public const int <it.name; format="id">=<it.type>;}; separator="\n">
+
+<if(grammar.delegates)>
+	// delegates
+	<grammar.delegates:
+		 {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
+<endif>
+<if(grammar.delegators)>
+	// delegators
+	<grammar.delegators:
+		 {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
+	<last(grammar.delegators):{g|private <g.recognizerName> gParent;}>
+<endif>
+
+<if(grammar.delegates)>
+	public override void SetState(RecognizerSharedState state)
+	{
+		base.SetState(state);
+		<grammar.delegates:{g|<g:delegateName()>.SetState(state);}; separator="\n">
+	}
+
+<if(TREE_PARSER)>
+	public override void SetTreeNodeStream(ITreeNodeStream input)
+	{
+		base.SetTreeNodeStream(input);
+		<grammar.delegates:{g|<g:delegateName()>.SetTreeNodeStream(input);}; separator="\n">
+	}
+<endif>
+<endif>
+
+	<scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+	<@members()>
+
+	public override string[] TokenNames { get { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; } }
+	public override string GrammarFileName { get { return "<fileName>"; } }
+
+	<members>
+
+	[Conditional("ANTLR_TRACE")]
+	protected virtual void OnCreated() {}
+	[Conditional("ANTLR_TRACE")]
+	protected virtual void EnterRule(string ruleName, int ruleIndex) {}
+	[Conditional("ANTLR_TRACE")]
+	protected virtual void LeaveRule(string ruleName, int ruleIndex) {}
+
+	#region Rules
+	<rules; separator="\n">
+	#endregion Rules
+
+<if(grammar.delegatedRules)>
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+	#region Delegated rules
+<grammar.delegatedRules:{ruleDescriptor|
+	<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> <returnType(ruleDescriptor)> <ruleDescriptor.name; format="id">(<ruleDescriptor.parameterScope:parameterScope()>) <!throws RecognitionException !>{ <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name; format="id">(<if(ruleDescriptor.parameterScope)><ruleDescriptor.parameterScope.attributes:{a|<a.name; format="id">}; separator=", "><endif>); \}}; separator="\n">
+	#endregion Delegated rules
+<endif>
+
+	<insertSynpreds(synpreds)>
+
+<if(cyclicDFAs)>
+	#region DFA
+	<cyclicDFAs:{dfa | private DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+
+	protected override void InitDFAs()
+	{
+		base.InitDFAs();
+		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>( this<if(dfa.specialStateSTs)>, SpecialStateTransition<dfa.decisionNumber><endif> );}; separator="\n">
+	}
+
+	<cyclicDFAs:cyclicDFA()><! dump tables for all DFA !>
+	#endregion DFA
+<endif>
+
+<if(bitsets)>
+	#region Follow sets
+	private static class Follow
+	{
+		<bitsets:{it|<bitset(name={_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>}; separator="\n">
+	}
+	#endregion Follow sets
+<endif>
+}
+>>
+
+@genericParser.members() ::= <<
+#if ANTLR_DEBUG
+private static readonly bool[] decisionCanBacktrack =
+	new bool[]
+	{
+		false, // invalid decision
+		<grammar.decisions:{d | <d.dfa.hasSynPred>}; wrap="\n", separator=", ">
+	};
+#else
+private static readonly bool[] decisionCanBacktrack = new bool[0];
+#endif
+<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<inputStreamType> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+	: this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
+{
+}
+<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<inputStreamType> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+	: base(input, state)
+{
+<if(grammar.directDelegates)>
+	<grammar.directDelegates:
+	 {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+<endif>
+<if(grammar.indirectDelegates)>
+	<grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+<endif>
+<if(grammar.delegators)>
+	<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+<endif>
+	<parserCtorBody()>
+	OnCreated();
+}
+>>
+
+// imported grammars are 'public' (can't be internal because their return scope classes must be accessible)
+parserModifier(grammar, actions) ::= <<
+<if(grammar.grammarIsRoot)><actions.(actionScope).modifier; null="public"><else>public<endif>
+>>
+
+parserCtorBody() ::= <<
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = new System.Collections.Generic.Dictionary\<int, int>[<length(grammar.allImportedRules)>+1];<\n><! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
+       ASTLabelType="object", superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Parser<endif>}, labelType="IToken",
+       members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="ITokenStream", rewriteElementType="IToken", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
+           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="object",
+           superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Tree.<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif><endif>},
+           members={<actions.treeparser.members>}) ::= <<
+<genericParser(inputStreamType="ITreeNodeStream", rewriteElementType="Node", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+[Conditional("ANTLR_TRACE")]
+protected virtual void EnterRule_<ruleName>_fragment() {}
+[Conditional("ANTLR_TRACE")]
+protected virtual void LeaveRule_<ruleName>_fragment() {}
+
+// $ANTLR start <ruleName>
+<ruleModifier(grammar,ruleDescriptor)> void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope()>)
+{
+	<ruleLabelDefs()>
+	EnterRule_<ruleName>_fragment();
+	EnterRule("<ruleName>_fragment", <ruleDescriptor.index>);
+	TraceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+	try
+	{
+		<block>
+	}
+	finally
+	{
+		TraceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+		LeaveRule("<ruleName>_fragment", <ruleDescriptor.index>);
+		LeaveRule_<ruleName>_fragment();
+	}
+}
+// $ANTLR end <ruleName>
+>>
+
+insertLexerSynpreds(synpreds) ::= <<
+<insertSynpreds(synpreds)>
+>>
+
+insertSynpreds(synpreds) ::= <<
+<if(synpreds)>
+#region Synpreds
+private bool EvaluatePredicate(System.Action fragment)
+{
+	bool success = false;
+	state.backtracking++;
+	<@start()>
+	try { DebugBeginBacktrack(state.backtracking);
+	int start = input.Mark();
+	try
+	{
+		fragment();
+	}
+	catch ( RecognitionException re )
+	{
+		System.Console.Error.WriteLine("impossible: "+re);
+	}
+	success = !state.failed;
+	input.Rewind(start);
+	} finally { DebugEndBacktrack(state.backtracking, success); }
+	<@stop()>
+	state.backtracking--;
+	state.failed=false;
+	return success;
+}
+#endregion Synpreds
+<endif>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if (state.backtracking > 0 && AlreadyParsedRule(input, <ruleDescriptor.index>)) { <returnFromRule()> }
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.failed) <returnFromRule()><endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.backtracking>0) {state.failed=true; <returnFromRule()>}<endif>
+>>
+
+ruleWrapperMap ::= [
+	"bottomup":{<ruleWrapperBottomup()>},
+	"topdown":{<ruleWrapperTopdown()>},
+	default:""
+]
+
+ruleWrapperBottomup() ::= <<
+<if(TREE_PARSER && filterMode)>
+protected override <if(buildAST)>IAstRuleReturnScope<else>void<endif> Bottomup() { return bottomup(); }
+<endif>
+>>
+
+ruleWrapperTopdown() ::= <<
+<if(TREE_PARSER && filterMode)>
+protected override <if(buildAST)>IAstRuleReturnScope<else>void<endif> Topdown() { return topdown(); }
+<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(ruleDescriptor.returnScope)>
+
+[Conditional("ANTLR_TRACE")]
+protected virtual void EnterRule_<ruleName>() {}
+[Conditional("ANTLR_TRACE")]
+protected virtual void LeaveRule_<ruleName>() {}
+<ruleWrapperMap.(ruleName)>
+// $ANTLR start "<ruleName>"
+// <fileName>:<description>
+[GrammarRule("<ruleName>")]
+<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> <returnType(ruleDescriptor)> <ruleName; format="id">(<ruleDescriptor.parameterScope:parameterScope()>)
+{
+	EnterRule_<ruleName>();
+	EnterRule("<ruleName>", <ruleDescriptor.index>);
+	TraceIn("<ruleName>", <ruleDescriptor.index>);
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+	try { DebugEnterRule(GrammarFileName, "<ruleName>");
+	DebugLocation(<ruleDescriptor.tree.line>, <ruleDescriptor.EORNode.charPositionInLine>);
+	<@preamble()>
+	try
+	{
+		<ruleMemoization(name=ruleName)>
+		<block>
+		<ruleCleanUp()>
+		<(ruleDescriptor.actions.after):execAction()>
+	}
+<if(exceptions)>
+	<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+	<actions.(actionScope).rulecatch>
+<else>
+	catch (RecognitionException re)
+	{
+		ReportError(re);
+		Recover(input,re);
+	<@setErrorReturnValue()>
+	}
+<endif>
+<endif>
+<endif>
+	finally
+	{
+		TraceOut("<ruleName>", <ruleDescriptor.index>);
+		LeaveRule("<ruleName>", <ruleDescriptor.index>);
+		LeaveRule_<ruleName>();
+        <memoize()>
+        <ruleScopeCleanUp()>
+        <finally>
+    }
+ 	DebugLocation(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>);
+	} finally { DebugExitRule(GrammarFileName, "<ruleName>"); }
+	<@postamble()>
+	<returnFromRule()><\n>
+}
+// $ANTLR end "<ruleName>"
+>>
+
+// imported grammars need to have internal rules
+ruleModifier(grammar,ruleDescriptor) ::= <<
+<if(grammar.grammarIsRoot)><csharpVisibilityMap.(ruleDescriptor.modifier); null="private"><else>internal<endif>
+>>
+
+// imported grammars need to have public return scopes
+returnScopeModifier(grammar,ruleDescriptor) ::= <<
+<if(grammar.grammarIsRoot)><csharpVisibilityMap.(ruleDescriptor.modifier); null="private"><else>public<endif>
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>)
+{
+	<e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType(ruleDescriptor)> retval = new <returnType(ruleDescriptor)>();
+retval.Start = (<labelType>)input.LT(1);
+<elseif(ruleDescriptor.returnScope)>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name; format="id"> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+int <ruleDescriptor.name>_StartIndex = input.Index;
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{it|<it>_stack.Push(new <it>_scope());<it>_scopeInit(<it>_stack.Peek());}; separator="\n">
+<ruleDescriptor.ruleScope:{it|<it.name>_stack.Push(new <it.name>_scope());<it.name>_scopeInit(<it.name>_stack.Peek());}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{it|<it>_scopeAfter(<it>_stack.Peek());<it>_stack.Pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{it|<it.name>_scopeAfter(<it.name>_stack.Peek());<it.name>_stack.Pop();}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it|<labelType> <it.label.text> = default(<labelType>);}; separator="\n"
+>
+<ruleDescriptor.tokenListLabels
+    :{it|List\<<labelType>\> list_<it.label.text> = null;}; separator="\n"
+>
+<[ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it|List\<<ASTLabelType>\> list_<it.label.text> = null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
+<ruleDescriptor.ruleListLabels:ruleLabelDef(); separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{it|<labelType> <it.label.text> = default(<labelType>);}; separator="\n"
+>
+<[ruleDescriptor.charListLabels,
+  ruleDescriptor.charLabels]
+	:{it|int <it.label.text> = 0;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{it|List\<<labelType>\> list_<it.label.text> = null;}; separator="\n"
+>
+<ruleDescriptor.charListLabels:{it|List\<int> list_<it.label.text> = null;}; separator="\n"
+>
+>>
+
+returnFromRule() ::= <%
+return
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<! This comment is a hack to make sure the following
+   single space appears in the output. !> <ruleDescriptor.singleValueReturnName>
+<else>
+<!!> retval
+<endif>
+<endif>
+<endif>
+;
+%>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.Stop = (<labelType>)input.LT(-1);
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if (state.backtracking > 0) { Memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+
+[Conditional("ANTLR_TRACE")]
+protected virtual void EnterRule_<ruleName>() {}
+[Conditional("ANTLR_TRACE")]
+protected virtual void LeaveRule_<ruleName>() {}
+
+// $ANTLR start "<ruleName>"
+[GrammarRule("<ruleName>")]
+<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>)
+{
+	EnterRule_<ruleName>();
+	EnterRule("<ruleName>", <ruleDescriptor.index>);
+	TraceIn("<ruleName>", <ruleDescriptor.index>);
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+		try
+		{
+<if(nakedBlock)>
+		<ruleMemoization(name=ruleName)>
+		<lexerRuleLabelDefs()>
+		<ruleDescriptor.actions.init>
+		<block>
+<else>
+		int _type = <ruleName>;
+		int _channel = DefaultTokenChannel;
+		<ruleMemoization(name=ruleName)>
+		<lexerRuleLabelDefs()>
+		<ruleDescriptor.actions.init>
+		<block>
+		<ruleCleanUp()>
+		state.type = _type;
+		state.channel = _channel;
+		<(ruleDescriptor.actions.after):execAction()>
+<endif>
+	}
+	finally
+	{
+		TraceOut("<ruleName>", <ruleDescriptor.index>);
+		LeaveRule("<ruleName>", <ruleDescriptor.index>);
+		LeaveRule_<ruleName>();
+        <ruleScopeCleanUp()>
+        <memoize()>
+    }
+}
+// $ANTLR end "<ruleName>"
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+
+public override void mTokens()
+{
+	<block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+try { DebugEnterSubRule(<decisionNumber>);
+try { DebugEnterDecision(<decisionNumber>, decisionCanBacktrack[<decisionNumber>]);
+<decision>
+} finally { DebugExitDecision(<decisionNumber>); }
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>)
+{
+<alts:{a|<altSwitchCase(i,a)>}>
+}
+} finally { DebugExitSubRule(<decisionNumber>); }
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+try { DebugEnterDecision(<decisionNumber>, decisionCanBacktrack[<decisionNumber>]);
+<decision>
+} finally { DebugExitDecision(<decisionNumber>); }
+<@postdecision()>
+switch (alt<decisionNumber>)
+{
+<alts:{a|<altSwitchCase(i,a)>}>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+DebugEnterAlt(1);
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+DebugEnterAlt(1);
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int cnt<decisionNumber>=0;
+<decls>
+<@preloop()>
+try { DebugEnterSubRule(<decisionNumber>);
+while (true)
+{
+	int alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	try { DebugEnterDecision(<decisionNumber>, decisionCanBacktrack[<decisionNumber>]);
+	<decision>
+	} finally { DebugExitDecision(<decisionNumber>); }
+	<@postdecision()>
+	switch (alt<decisionNumber>)
+	{
+	<alts:{a|<altSwitchCase(i,a)>}>
+	default:
+		if (cnt<decisionNumber> >= 1)
+			goto loop<decisionNumber>;
+
+		<ruleBacktrackFailure()>
+		EarlyExitException eee<decisionNumber> = new EarlyExitException( <decisionNumber>, input );
+		DebugRecognitionException(eee<decisionNumber>);
+		<@earlyExitException()>
+		throw eee<decisionNumber>;
+	}
+	cnt<decisionNumber>++;
+}
+loop<decisionNumber>:
+	;
+
+} finally { DebugExitSubRule(<decisionNumber>); }
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+try { DebugEnterSubRule(<decisionNumber>);
+while (true)
+{
+	int alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	try { DebugEnterDecision(<decisionNumber>, decisionCanBacktrack[<decisionNumber>]);
+	<decision>
+	} finally { DebugExitDecision(<decisionNumber>); }
+	<@postdecision()>
+	switch ( alt<decisionNumber> )
+	{
+	<alts:{a|<altSwitchCase(i,a)>}>
+	default:
+		goto loop<decisionNumber>;
+	}
+}
+
+loop<decisionNumber>:
+	;
+
+} finally { DebugExitSubRule(<decisionNumber>); }
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase(altNum,alt) ::= <<
+case <altNum>:
+	<@prealt()>
+	DebugEnterAlt(<altNum>);
+	<alt>
+	break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+{
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+}
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element(it) ::= <%
+<@prematch()>
+DebugLocation(<it.line>, <it.pos>);<\n>
+<it.el><\n>
+%>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(label)><label>=(<labelType>)<endif>Match(input,<token>,Follow._<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+listLabelElem(label,elem,elemType) ::= <<
+if (list_<label>==null) list_<label>=new List\<<elemType; null={<labelType>}>\>();
+list_<label>.Add(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+Match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="",terminalOptions={}) ::= <<
+<if(label)>
+<matchSetLabel()>
+<endif>
+if (<s>)
+{
+	input.Consume();
+	<postmatchCode>
+	<if(!LEXER)>state.errorRecovery=false;<endif><if(backtracking)>state.failed=false;<endif>
+}
+else
+{
+	<ruleBacktrackFailure()>
+	MismatchedSetException mse = new MismatchedSetException(null,input);
+	DebugRecognitionException(mse);
+	<@mismatchedSetException()>
+<if(LEXER)>
+	Recover(mse);
+	throw mse;
+<else>
+	throw mse;
+	<! use following code to make it recover inline; remove throw mse;
+	recoverFromMismatchedSet(input,mse,Follow._set_in_<ruleName><elementIndex>);
+	!>
+<endif>
+}<\n>
+>>
+
+matchSetUnchecked(s,label,elementIndex,postmatchCode=false) ::= <%
+<if(label)>
+<matchSetLabel()><\n>
+<endif>
+input.Consume();<\n>
+<if(postmatchCode)>
+<postmatchCode><\n>
+<endif>
+<if(!LEXER)>state.errorRecovery=false;<endif><if(backtracking)>state.failed=false;<endif>
+%>
+
+matchSetLabel() ::= <%
+<if(LEXER)>
+<label>= input.LA(1);
+<else>
+<label>=(<labelType>)input.LT(1);
+<endif>
+%>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex) ::= <%
+<if(label)>
+int <label>Start = CharIndex;<\n>
+Match(<string>); <checkRuleBacktrackFailure()><\n>
+int <label>StartLine<elementIndex> = Line;<\n>
+int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
+<label> = new <labelType>(input, TokenTypes.Invalid, TokenChannels.Default, <label>Start, CharIndex-1);<\n>
+<label>.Line = <label>StartLine<elementIndex>;<\n>
+<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
+<else>
+Match(<string>); <checkRuleBacktrackFailure()><\n>
+<endif>
+%>
+
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+MatchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<wildcard(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+PushFollow(Follow._<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name; format="id">(<args; separator=", ">);
+PopFollow();
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabelElem(elem=label,elemType={<ASTLabelType>},...)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <%
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;<\n>
+int <label>StartLine<elementIndex> = Line;<\n>
+int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()><\n>
+<label> = new <labelType>(input, TokenTypes.Invalid, TokenChannels.Default, <label>Start<elementIndex>, CharIndex-1);<\n>
+<label>.Line = <label>StartLine<elementIndex>;<\n>
+<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
+<else>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+%>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <%
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;<\n>
+int <label>StartLine<elementIndex> = Line;<\n>
+int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
+Match(EOF); <checkRuleBacktrackFailure()><\n>
+<labelType> <label> = new <labelType>(input, EOF, TokenChannels.Default, <label>Start<elementIndex>, CharIndex-1);<\n>
+<label>.Line = <label>StartLine<elementIndex>;<\n>
+<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
+<else>
+Match(EOF); <checkRuleBacktrackFailure()>
+<endif>
+%>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "int <recRuleArg()>"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
+recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if (input.LA(1) == TokenTypes.Down)
+{
+	Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+	<children:element()>
+	Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+}
+<else>
+Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if (!(<evalPredicate(...)>))
+{
+	<ruleBacktrackFailure()>
+	throw new FailedPredicateException(input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<k> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+<if((!isTrue.(last(edges).labelExpr)) && (!last(edges).predicates))>
+else
+{
+<if(eotPredictsAlt)>
+	alt<decisionNumber> = <eotPredictsAlt>;
+<else>
+	<ruleBacktrackFailure()>
+	NoViableAltException nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input, <k>);
+	DebugRecognitionException(nvae);
+	<@noViableAltException()>
+	throw nvae;
+<endif>
+}
+<endif>
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<k> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<k> = input.LA(<k>);<\n>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber> = <eotPredictsAlt>;<! if no edges, don't gen ELSE !>
+<else>
+else
+{
+	alt<decisionNumber> = <eotPredictsAlt>;
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ((<labelExpr>)<if(predicates)> && (<predicates>)<endif>)
+{
+	<targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch (input.LA(<k>))
+{
+<edges; separator="\n">
+default:
+<if(eotPredictsAlt)>
+	alt<decisionNumber>=<eotPredictsAlt>;
+	break;<\n>
+<else>
+	{
+		<ruleBacktrackFailure()>
+		NoViableAltException nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input, <k>);
+		DebugRecognitionException(nvae);
+		<@noViableAltException()>
+		throw nvae;
+	}
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch (input.LA(<k>))
+{
+<edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch (input.LA(<k>))
+{
+<edges; separator="\n">
+<if(eotPredictsAlt)>
+default:
+	alt<decisionNumber>=<eotPredictsAlt>;
+	break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{it|case <it>:}; separator="\n">
+	{
+	<targetState>
+	}
+	break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+try
+{
+	alt<decisionNumber> = dfa<decisionNumber>.Predict(input);
+}
+catch (NoViableAltException nvae)
+{
+	DebugRecognitionException(nvae);
+	throw;
+}
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+private class DFA<dfa.decisionNumber> : DFA
+{
+	private const string DFA<dfa.decisionNumber>_eotS =
+		"<dfa.javaCompressedEOT; wrap="\"+\n\t\t\"">";
+	private const string DFA<dfa.decisionNumber>_eofS =
+		"<dfa.javaCompressedEOF; wrap="\"+\n\t\t\"">";
+	private const string DFA<dfa.decisionNumber>_minS =
+		"<dfa.javaCompressedMin; wrap="\"+\n\t\t\"">";
+	private const string DFA<dfa.decisionNumber>_maxS =
+		"<dfa.javaCompressedMax; wrap="\"+\n\t\t\"">";
+	private const string DFA<dfa.decisionNumber>_acceptS =
+		"<dfa.javaCompressedAccept; wrap="\"+\n\t\t\"">";
+	private const string DFA<dfa.decisionNumber>_specialS =
+		"<dfa.javaCompressedSpecial; wrap="\"+\n\t\t\"">}>";
+	private static readonly string[] DFA<dfa.decisionNumber>_transitionS =
+		{
+			<dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
+		};
+
+	private static readonly short[] DFA<dfa.decisionNumber>_eot = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eotS);
+	private static readonly short[] DFA<dfa.decisionNumber>_eof = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eofS);
+	private static readonly char[] DFA<dfa.decisionNumber>_min = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
+	private static readonly char[] DFA<dfa.decisionNumber>_max = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
+	private static readonly short[] DFA<dfa.decisionNumber>_accept = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
+	private static readonly short[] DFA<dfa.decisionNumber>_special = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_specialS);
+	private static readonly short[][] DFA<dfa.decisionNumber>_transition;
+
+	static DFA<dfa.decisionNumber>()
+	{
+		int numStates = DFA<dfa.decisionNumber>_transitionS.Length;
+		DFA<dfa.decisionNumber>_transition = new short[numStates][];
+		for ( int i=0; i \< numStates; i++ )
+		{
+			DFA<dfa.decisionNumber>_transition[i] = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_transitionS[i]);
+		}
+	}
+
+	public DFA<dfa.decisionNumber>( BaseRecognizer recognizer<if(dfa.specialStateSTs)>, SpecialStateTransitionHandler specialStateTransition<endif> )
+<if(dfa.specialStateSTs)>
+		: base(specialStateTransition)
+<endif>
+	{
+		this.recognizer = recognizer;
+		this.decisionNumber = <dfa.decisionNumber>;
+		this.eot = DFA<dfa.decisionNumber>_eot;
+		this.eof = DFA<dfa.decisionNumber>_eof;
+		this.min = DFA<dfa.decisionNumber>_min;
+		this.max = DFA<dfa.decisionNumber>_max;
+		this.accept = DFA<dfa.decisionNumber>_accept;
+		this.special = DFA<dfa.decisionNumber>_special;
+		this.transition = DFA<dfa.decisionNumber>_transition;
+	}
+
+	public override string Description { get { return "<dfa.description>"; } }
+
+	public override void Error(NoViableAltException nvae)
+	{
+		DebugRecognitionException(nvae);
+	}
+}<\n>
+<if(dfa.specialStateSTs)>
+private int SpecialStateTransition<dfa.decisionNumber>(DFA dfa, int s, IIntStream _input)<! throws NoViableAltException!>
+{
+	<if(LEXER)>
+	IIntStream input = _input;
+	<endif>
+	<if(PARSER)>
+	ITokenStream input = (ITokenStream)_input;
+	<endif>
+	<if(TREE_PARSER)>
+	ITreeNodeStream input = (ITreeNodeStream)_input;
+	<endif>
+	int _s = s;
+	s = -1;
+	<! pull these outside the switch cases to save space on locals !>
+	int LA<dfa.decisionNumber>_1 = input.LA(1);
+	int index<dfa.decisionNumber>_1 = input.Index;
+	switch (_s)
+	{
+	<dfa.specialStateSTs:{state |case <i0>:<! compressed special state numbers 0..n-1 !>
+	<state>}; separator="\n">
+
+	default:
+		break;
+	}
+
+	if (s >= 0)
+		return s;
+
+<if(backtracking)>
+	if (state.backtracking > 0) {state.failed=true; return -1;}
+<endif>
+	NoViableAltException nvae = new NoViableAltException(dfa.Description, <dfa.decisionNumber>, _s, input);
+	dfa.Error(nvae);
+	throw nvae;
+}
+<endif>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+{
+<if(semPredState)>
+	<! get next lookahead symbol to test edges, then rewind !>
+	input.Rewind();
+<endif>
+	<edges; separator="\nelse ">
+<if(semPredState)>
+	<! return input cursor to state before we rewound !>
+	input.Seek(index<decisionNumber>_1);
+<endif>
+	break;
+}
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ((<labelExpr>)<if(predicates)> && (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left>&&<right>)"
+
+orPredicates(operands) ::= "(<operands; separator=\"||\">)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "EvaluatePredicate(<pred>_fragment)"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<k>==<atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
+(LA<decisionNumber>_<k><ge()><lower> && LA<decisionNumber>_<k><le()><upper>)
+%>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)<ge()><lower> && input.LA(<k>)<le()><upper>)"
+
+le() ::= "\<="
+ge() ::= ">="
+
+setTest(ranges) ::= <<
+<ranges; separator="||">
+>>
+
+// A T T R I B U T E S
+
+attributeScope(scope) ::= <<
+<if(scope)>
+<if(scope.attributes)>
+protected sealed partial class <scope.name>_scope
+{
+	<scope.attributes:{it|public <it.decl>;}; separator="\n">
+}
+<if(scope.actions.scopeinit)>
+protected void <scope.name>_scopeInit( <scope.name>_scope scope )
+{
+	<scope.actions.scopeinit>
+}
+<else>
+protected virtual void <scope.name>_scopeInit( <scope.name>_scope scope ) {}
+<endif>
+<if(scope.actions.scopeafter)>
+protected void <scope.name>_scopeAfter( <scope.name>_scope scope )
+{
+	<scope.actions.scopeafter>
+}
+<else>
+protected virtual void <scope.name>_scopeAfter( <scope.name>_scope scope ) {}
+<endif>
+protected readonly ListStack\<<scope.name>_scope> <scope.name>_stack = new ListStack\<<scope.name>_scope>();
+<endif>
+<endif>
+>>
+
+globalAttributeScope(scope) ::= <<
+<attributeScope(...)>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<attributeScope(...)>
+>>
+
+returnStructName(it) ::= "<it.name>_return"
+
+returnType(ruleDescriptor) ::= <%
+<if(ruleDescriptor.returnScope.attributes && ruleDescriptor.hasMultipleReturnValues)>
+	<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
+<elseif(ruleDescriptor.hasMultipleReturnValues)>
+	<ruleReturnBaseType()>
+<elseif(ruleDescriptor.hasSingleReturnValue)>
+	<ruleDescriptor.singleValueReturnType>
+<else>
+	void
+<endif>
+%>
+
+/** Generate the C# type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+	<ruleReturnBaseType()>
+<elseif(referencedRule.hasSingleReturnValue)>
+	<referencedRule.singleValueReturnType>
+<else>
+	void
+<endif>
+%>
+
+delegateName(it) ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+default(<typeName>)
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <%
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;
+%>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(scope.attributes && ruleDescriptor.hasMultipleReturnValues)>
+<returnScopeModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> sealed partial class <ruleDescriptor:returnStructName()> : <ruleReturnBaseType()><@ruleReturnInterfaces()>
+{
+	<scope.attributes:{it|public <it.decl>;}; separator="\n">
+	<@ruleReturnMembers()>
+}
+<endif>
+>>
+
+ruleReturnBaseType() ::= <%
+<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope\<<labelType>>
+%>
+
+@returnScope.ruleReturnMembers() ::= <<
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{it|<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= <<
+<attr.name; format="id">
+>>
+
+parameterSetAttributeRef(attr,expr) ::= <<
+<attr.name; format="id"> =<expr>;
+>>
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <%
+<if(negIndex)>
+<scope>_stack[<scope>_stack.Count - <negIndex> - 1].<attr.name; format="id">
+<else>
+<if(index)>
+<scope>_stack[<index>].<attr.name; format="id">
+<else>
+<scope>_stack.Peek().<attr.name; format="id">
+<endif>
+<endif>
+%>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
+<if(negIndex)>
+<scope>_stack[<scope>_stack.Count - <negIndex> - 1].<attr.name; format="id"> = <expr>;
+<else>
+<if(index)>
+<scope>_stack[<index>].<attr.name; format="id"> = <expr>;
+<else>
+<scope>_stack.Peek().<attr.name; format="id"> = <expr>;
+<endif>
+<endif>
+%>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.Count>0 && $function::name.Equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+(<scope>!=null?((<returnType(referencedRule)>)<scope>).<attr.name; format="id">:<initValue(attr.type)>)
+<else>
+<scope>
+<endif>
+%>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name; format="id">
+<else>
+<attr.name; format="id">
+<endif>
+%>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name; format="id"> =<expr>;
+<else>
+<attr.name; format="id"> =<expr>;
+<endif>
+%>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=null?<scope>.Text:default(string))"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=null?<scope>.Type:0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=null?<scope>.Line:0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=null?<scope>.CharPositionInLine:0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=null?<scope>.Channel:0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=null?<scope>.TokenIndex:0)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?int.Parse(<scope>.Text):0)"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.Start):default(<labelType>))"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.Stop):default(<labelType>))"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=null?((<ASTLabelType>)<scope>.Tree):default(<ASTLabelType>))"
+ruleLabelPropertyRef_text(scope,attr) ::= <%
+<if(TREE_PARSER)>
+(<scope>!=null?(input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(<scope>.Start),
+  input.TreeAdaptor.GetTokenStopIndex(<scope>.Start))):default(string))
+<else>
+(<scope>!=null?input.ToString(<scope>.Start,<scope>.Stop):default(string))
+<endif>
+%>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=null?<scope>.Template:null)"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::=
+    "(<scope>!=null?<scope>.Type:0)"
+
+lexerRuleLabelPropertyRef_line(scope,attr) ::=
+    "(<scope>!=null?<scope>.Line:0)"
+
+lexerRuleLabelPropertyRef_pos(scope,attr) ::=
+    "(<scope>!=null?<scope>.CharPositionInLine:-1)"
+
+lexerRuleLabelPropertyRef_channel(scope,attr) ::=
+    "(<scope>!=null?<scope>.Channel:0)"
+
+lexerRuleLabelPropertyRef_index(scope,attr) ::=
+    "(<scope>!=null?<scope>.TokenIndex:0)"
+
+lexerRuleLabelPropertyRef_text(scope,attr) ::=
+    "(<scope>!=null?<scope>.Text:default(string))"
+
+lexerRuleLabelPropertyRef_int(scope,attr) ::=
+    "(<scope>!=null?int.Parse(<scope>.Text):0)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "retval.Start"
+rulePropertyRef_stop(scope,attr) ::= "retval.Stop"
+rulePropertyRef_tree(scope,attr) ::= "retval.Tree"
+rulePropertyRef_text(scope,attr) ::= <%
+<if(TREE_PARSER)>
+input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(retval.Start),
+  input.TreeAdaptor.GetTokenStopIndex(retval.Start))
+<else>
+input.ToString(retval.Start,input.LT(-1))
+<endif>
+%>
+rulePropertyRef_st(scope,attr) ::= "retval.Template"
+
+lexerRulePropertyRef_text(scope,attr) ::= "Text"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "int.Parse(<scope>.Text)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.Tree = <expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.Template =<expr>;"
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <%
+<if(backtracking)>
+if (<actions.(actionScope).synpredgate>)<\n>
+{<\n>
+<@indentedAction()><\n>
+}
+<else>
+<action>
+<endif>
+%>
+
+@execAction.indentedAction() ::= <<
+	<action>
+>>
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+public static readonly BitSet <name> = new BitSet(new ulong[]{<words64:{it|<it>UL};separator=",">});
+>>
+
+codeFileExtension() ::= ".cs"
+
+true_value() ::= "true"
+false_value() ::= "false"
+
+isTrue ::= [
+	"true" : true,
+	default : false
+]
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/Dbg.stg
new file mode 100644
index 0000000..b537ff1
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/Dbg.stg
@@ -0,0 +1,313 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2007-2008 Johannes Luber
+ * Copyright (c) 2005-2007 Kunle Odutola
+ * Copyright (c) 2011 Sam Harwell
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/** Template overrides to add debugging to normal Java output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+
+@outputFile.debugPreprocessor() ::= "#define ANTLR_DEBUG"
+
+@outputFile.imports() ::= <<
+<@super.imports()>
+using Antlr.Runtime.Debug;
+using IOException = System.IO.IOException;
+>>
+
+@genericParser.members() ::= <<
+<if(grammar.grammarIsRoot)>
+public static readonly string[] ruleNames =
+	new string[]
+	{
+		"invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n	", separator=", ">
+	};<\n>
+<endif>
+<if(grammar.grammarIsRoot)><! grammar imports other grammar(s) !>
+int ruleLevel = 0;
+public virtual int RuleLevel { get { return ruleLevel; } }
+public virtual void IncRuleLevel() { ruleLevel++; }
+public virtual void DecRuleLevel() { ruleLevel--; }
+<if(profile)>
+<ctorForProfilingRootGrammar()>
+<else>
+<ctorForRootGrammar()>
+<endif>
+<ctorForPredefinedListener()>
+<else><! imported grammar !>
+public int RuleLevel { get { return <grammar.delegators:{g| <g:delegateName()>}>.RuleLevel; } }
+public void IncRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.IncRuleLevel(); }
+public void DecRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.DecRuleLevel(); }
+<ctorForDelegateGrammar()>
+<endif>
+<if(profile)>
+public override bool AlreadyParsedRule( IIntStream input, int ruleIndex )
+{
+	int stopIndex = GetRuleMemoization(ruleIndex, input.Index);
+	((Profiler)dbg).ExamineRuleMemoization(input, ruleIndex, stopIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+	return base.AlreadyParsedRule(input, ruleIndex);
+}<\n>
+public override void Memoize( IIntStream input, int ruleIndex, int ruleStartIndex )
+{
+	((Profiler)dbg).Memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+	base.Memoize(input, ruleIndex, ruleStartIndex);
+}<\n>
+<endif>
+protected virtual bool EvalPredicate( bool result, string predicate )
+{
+	dbg.SemanticPredicate( result, predicate );
+	return result;
+}<\n>
+>>
+
+ctorForRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+<! Same except we add port number and profile stuff if root grammar !>
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input )
+	: this( input, DebugEventSocketProxy.DefaultDebuggerPort, new RecognizerSharedState() )
+{
+}
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, int port, RecognizerSharedState state )
+	: base( input, state )
+{
+	<createListenerAndHandshake()>
+	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>( input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
+	<parserCtorBody()>
+	<@finally()>
+}<\n>
+>>
+
+ctorForProfilingRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input )
+	: this( input, new Profiler(null), new RecognizerSharedState() )
+{
+}
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state )
+	: base( input, dbg, state )
+{
+	Profiler p = (Profiler)dbg;
+	p.setParser(this);
+	<grammar.directDelegates:
+	 {g|<g:delegateName()> = new <g.recognizerName>( input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
+	<parserCtorBody()>
+	<@finally()>
+}
+<\n>
+>>
+
+/** Basically we don't want to set any dbg listeners are root will have it. */
+ctorForDelegateGrammar() ::= <<
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
+	: base( input, dbg, state )
+{
+	<grammar.directDelegates:
+	 {g|<g:delegateName()> = new <g.recognizerName>( input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
+	<parserCtorBody()>
+}<\n>
+>>
+
+ctorForPredefinedListener() ::= <<
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg )
+	<@superClassRef>: base( input, dbg, new RecognizerSharedState() )<@end>
+{
+<if(profile)>
+	Profiler p = (Profiler)dbg;
+	p.setParser(this);
+<endif>
+	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+	<parserCtorBody()>
+	<@finally()>
+}<\n>
+>>
+
+createListenerAndHandshake() ::= <<
+<if(TREE_PARSER)>
+DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, input.TreeAdaptor );<\n>
+<else>
+DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, null );<\n>
+<endif>
+DebugListener = proxy;
+try
+{
+	proxy.Handshake();
+}
+catch ( IOException ioe )
+{
+	ReportError( ioe );
+}
+>>
+
+@genericParser.superClassName() ::= "Debug<@super.superClassName()>"
+
+/*
+ * Many of the following rules were merged into CSharp2.stg.
+ */
+
+@rule.preamble() ::= <<
+if (RuleLevel == 0)
+	DebugListener.Commence();
+IncRuleLevel();
+>>
+//@rule.preamble() ::= <<
+//try
+//{
+//	dbg.EnterRule( GrammarFileName, "<ruleName>" );
+//	if ( RuleLevel == 0 )
+//	{
+//		dbg.Commence();
+//	}
+//	IncRuleLevel();
+//	dbg.Location( <ruleDescriptor.tree.line>, <ruleDescriptor.tree.charPositionInLine> );<\n>
+//>>
+
+@rule.postamble() ::= <<
+DecRuleLevel();
+if (RuleLevel == 0)
+	DebugListener.Terminate();
+>>
+//@rule.postamble() ::= <<
+//dbg.Location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>);<\n>
+//}
+//finally
+//{
+//	dbg.ExitRule( GrammarFileName, "<ruleName>" );
+//	DecRuleLevel();
+//	if ( RuleLevel == 0 )
+//	{
+//		dbg.Terminate();
+//	}
+//}<\n>
+//>>
+
+//@insertSynpreds.start() ::= "dbg.BeginBacktrack( state.backtracking );"
+//@insertSynpreds.stop() ::= "dbg.EndBacktrack( state.backtracking, success );"
+
+// Common debug event triggers used by region overrides below
+
+//enterSubRule() ::= <<
+//try
+//{
+//	dbg.EnterSubRule( <decisionNumber> );<\n>
+//>>
+
+//exitSubRule() ::= <<
+//}
+//finally
+//{
+//	dbg.ExitSubRule( <decisionNumber> );
+//}<\n>
+//>>
+
+//enterDecision() ::= <<
+//try
+//{
+//	dbg.EnterDecision( <decisionNumber> );<\n>
+//>>
+
+//exitDecision() ::= <<
+//}
+//finally
+//{
+//	dbg.ExitDecision( <decisionNumber> );
+//}<\n>
+//>>
+
+//enterAlt(n) ::= "dbg.EnterAlt( <n> );<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+//@block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+//@block.postdecision() ::= "<exitDecision()>"
+
+//@block.postbranch() ::= "<exitSubRule()>"
+
+//@ruleBlock.predecision() ::= "<enterDecision()>"
+
+//@ruleBlock.postdecision() ::= "<exitDecision()>"
+
+//@ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+//@blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+//@positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+//@positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+//@positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+//@positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+//@positiveClosureBlock.earlyExitException() ::=
+//	"dbg.RecognitionException( eee<decisionNumber> );<\n>"
+
+//@closureBlock.preloop() ::= "<enterSubRule()>"
+
+//@closureBlock.postloop() ::= "<exitSubRule()>"
+
+//@closureBlock.predecision() ::= "<enterDecision()>"
+
+//@closureBlock.postdecision() ::= "<exitDecision()>"
+
+//@altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+//@element.prematch() ::=
+//	"dbg.Location( <it.line>, <it.pos> );"
+
+//@matchSet.mismatchedSetException() ::=
+//	"dbg.RecognitionException( mse );"
+
+//@dfaState.noViableAltException() ::= "dbg.RecognitionException( nvae );"
+
+//@dfaStateSwitch.noViableAltException() ::= "dbg.RecognitionException( nvae );"
+
+//dfaDecision(decisionNumber,description) ::= <<
+//try
+//{
+//	isCyclicDecision = true;
+//	<super.dfaDecision(...)>
+//}
+//catch ( NoViableAltException nvae )
+//{
+//	dbg.RecognitionException( nvae );
+//	throw nvae;
+//}
+//>>
+
+//@cyclicDFA.errorMethod() ::= <<
+//public override void Error( NoViableAltException nvae )
+//{
+//	((DebugParser)recognizer).dbg.RecognitionException( nvae );
+//}
+//>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+EvalPredicate(<pred>, "<description>")
+>>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ST.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/CSharp2/ST.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/AST.stg
new file mode 100644
index 0000000..d1d765a
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/AST.stg
@@ -0,0 +1,428 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+@outputFile.imports() ::= <<
+<@super.imports()>
+
+<if(!TREE_PARSER)>
+<! tree parser would already have imported !>
+using Antlr.Runtime.Tree;
+using RewriteRuleITokenStream = Antlr.Runtime.Tree.RewriteRuleTokenStream;
+<endif>
+>>
+
+@genericParser.members() ::= <<
+<@super.members()>
+<parserMembers()>
+>>
+
+parserCtorBody() ::= <<
+<super.parserCtorBody()>
+<treeAdaptorType()> treeAdaptor = default(<treeAdaptorType()>);
+CreateTreeAdaptor(ref treeAdaptor);
+TreeAdaptor = treeAdaptor<if(!actions.(actionScope).treeAdaptorType)> ?? new CommonTreeAdaptor()<endif>;
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+// Implement this function in your helper file to use a custom tree adaptor
+partial void CreateTreeAdaptor(ref <treeAdaptorType()> adaptor);
+
+private <treeAdaptorType()> adaptor;
+
+public <treeAdaptorType()> TreeAdaptor
+{
+	get
+	{
+		return adaptor;
+	}
+
+	set
+	{
+		this.adaptor = value;
+		<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
+	}
+}
+>>
+
+treeAdaptorType() ::= <<
+<actions.(actionScope).treeAdaptorType; null="ITreeAdaptor">
+>>
+
+ruleReturnBaseType() ::= <%
+Ast<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope\<<ASTLabelType>, <labelType>>
+%>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> root_0 = default(<ASTLabelType>);<\n>
+>>
+
+ruleLabelDefs(ruleDescriptor, labelType, ASTLabelType, rewriteElementType) ::= <%
+<super.ruleLabelDefs(...)>
+<if(!ruleDescriptor.isSynPred)>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+	:{it|<\n><ASTLabelType> <it.label.text>_tree = default(<ASTLabelType>);}>
+<ruleDescriptor.tokenListLabels:{it|<\n><ASTLabelType> <it.label.text>_tree = default(<ASTLabelType>);}>
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+	:{it|<\n>RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}>
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+	:{it|<\n>RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}>
+<endif>
+%>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+@alt.declarations() ::= <<
+<if(autoAST && outerAlt && !rewriteMode && !ruleDescriptor.isSynPred)>
+root_0 = (<ASTLabelType>)adaptor.Nil();
+<endif>
+>>
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<token>.Add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefTrack(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<token>.Add(<label>);
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<rule.name>.Add(<label>.Tree);
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>stream_<rule>.Add(<label>.Tree);
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	referencedWildcardLabels,
+	referencedWildcardListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::= <<
+<\n>{
+// AST REWRITE
+// elements: <referencedElementsDeep; separator=", ">
+// token labels: <referencedTokenLabels; separator=", ">
+// rule labels: <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels: <referencedRuleListLabels; separator=", ">
+// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
+<if(backtracking)>
+if (<actions.(actionScope).synpredgate>) {
+<endif>
+<prevRuleRootRef()>.Tree = root_0;
+<rewriteCodeLabels()>
+root_0 = (<ASTLabelType>)adaptor.Nil();
+<alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER&&rewriteMode)>
+<prevRuleRootRef()>.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+if (<prevRuleRootRef()>.Tree != null)
+	input.ReplaceChildren(adaptor.GetParent(retval.Start), adaptor.GetChildIndex(retval.Start), adaptor.GetChildIndex(_last), retval.Tree);
+<endif>
+<! if parser or tree-parser && rewrite!=true, we need to set result !>
+<if(!TREE_PARSER||!rewriteMode)>
+<prevRuleRootRef()>.Tree = root_0;
+<endif>
+<if(backtracking)>
+}
+<endif>
+}
+
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+	:{it|RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>",<it>);};
+	separator="\n"
+>
+<referencedTokenListLabels
+	:{it|RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
+	separator="\n"
+>
+<referencedWildcardLabels
+	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
+	separator="\n"
+>
+<referencedWildcardListLabels
+	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
+	separator="\n"
+>
+<referencedRuleLabels
+	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>",<it>!=null?<it>.Tree:null);};
+	separator="\n"
+>
+<referencedRuleListLabels
+	:{it|RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"token <it>",list_<it>);};
+	separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if (<referencedElementsDeep:{el | stream_<el>.HasNext}; separator="||">)
+{
+	<alt>
+}
+<referencedElementsDeep:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | stream_<el>.HasNext}; separator="||"> )
+{
+	<alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if (!(<referencedElements:{el | stream_<el>.HasNext}; separator="||">))
+{
+	throw new RewriteEarlyExitException();
+}
+while ( <referencedElements:{el | stream_<el>.HasNext}; separator="||"> )
+{
+	<alt>
+}
+<referencedElements:{el | stream_<el>.Reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>)
+{
+	<a.alt>
+}
+<else>
+{
+	<a.alt>
+}
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.Nil();
+<root:rewriteElement()>
+<children:rewriteElement()>
+adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <%
+<@pregen()>
+DebugLocation(<e.line>, <e.pos>);<\n>
+<e.el>
+%>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,args,terminalOptions={}) ::= <<
+adaptor.AddChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextNode());<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,args,terminalOptions={}) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>);<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,elementIndex,terminalOptions={}) ::= <<
+adaptor.AddChild(root_<treeLevel>, <createImaginaryNode(tokenType=token, ...)>);<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,elementIndex,terminalOptions={}) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<createImaginaryNode(tokenType=token, ...)>, root_<treeLevel>);<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<rule>.NextTree());<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<rule>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+adaptor.AddChild(root_<treeLevel>, <action>);<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<action>, root_<treeLevel>);<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(stream_<label>.NextNode(), root_<treeLevel>);<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+adaptor.AddChild(root_<treeLevel>, stream_<label>.NextTree());<\n>
+>>
+
+createImaginaryNode(tokenType,args,terminalOptions={}) ::= <%
+<if(terminalOptions.node)>
+<! new MethodNode(IDLabel, args) !>
+new <terminalOptions.node>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+(<ASTLabelType>)adaptor.Create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
+<endif>
+%>
+
+createRewriteNodeFromElement(token,args,terminalOptions={}) ::= <%
+<if(terminalOptions.node)>
+new <terminalOptions.node>(stream_<token>.NextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+adaptor.Create(<token>, <args; separator=", ">)
+<else>
+stream_<token>.NextNode()
+<endif>
+<endif>
+%>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTDbg.stg
new file mode 100644
index 0000000..b4b245a
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTDbg.stg
@@ -0,0 +1,98 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
+ *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
+ */
+
+parserMembers() ::= <<
+	// Implement this function in your helper file to use a custom tree adaptor
+	partial void InitializeTreeAdaptor();
+	protected DebugTreeAdaptor adaptor;
+
+	public ITreeAdaptor TreeAdaptor
+	{
+		get
+		{
+			return adaptor;
+		}
+		set
+		{
+<if(grammar.grammarIsRoot)>
+			this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
+<else>
+			this.adaptor = (DebugTreeAdaptor)adaptor; // delegator sends dbg adaptor
+<endif><\n>
+			<grammar.directDelegates:{g|<g:delegateName()>.TreeAdaptor = this.adaptor;}>
+		}
+	}<\n>
+>>
+
+parserCtorBody() ::= <<
+<super.parserCtorBody()>
+>>
+
+createListenerAndHandshake() ::= <<
+DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, <if(TREE_PARSER)>input.TreeAdaptor<else>adaptor<endif> );
+DebugListener = proxy;
+<inputStreamType> = new Debug<inputStreamType>( input, proxy );
+try
+{
+	proxy.Handshake();
+}
+catch ( IOException ioe )
+{
+	ReportError( ioe );
+}
+>>
+
+@ctorForRootGrammar.finally() ::= <<
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;
+proxy.TreeAdaptor = adap;
+>>
+
+@ctorForProfilingRootGrammar.finally() ::=<<
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;
+>>
+
+@ctorForPredefinedListener.superClassRef() ::= ": base( input, dbg )"
+
+@ctorForPredefinedListener.finally() ::=<<
+<if(grammar.grammarIsRoot)><! don't create new adaptor for delegates !>
+ITreeAdaptor adap = new CommonTreeAdaptor();
+TreeAdaptor = adap;<\n>
+<endif>
+>>
+
+//@rewriteElement.pregen() ::= "dbg.Location( <e.line>, <e.pos> );"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTParser.stg
new file mode 100644
index 0000000..b97d44b
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTParser.stg
@@ -0,0 +1,203 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+
+@rule.setErrorReturnValue() ::= <<
+retval.Tree = (<ASTLabelType>)adaptor.ErrorNode(input, retval.Start, input.LT(-1), re);
+<! System.out.WriteLine("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <%
+<super.tokenRef(...)>
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)><\n>if (state.backtracking == 0) {<endif>
+<\n><label>_tree = <createNodeFromToken(...)>;
+<\n>adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)><\n>}<endif>
+<endif>
+%>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex,terminalOptions={}) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <%
+<super.tokenRef(...)>
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)><\n>if (<actions.(actionScope).synpredgate>) {<endif>
+<\n><label>_tree = <createNodeFromToken(...)>;
+<\n>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)><\n>}<endif>
+<endif>
+%>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= <<
+<super.matchSet(postmatchCode={<if(!ruleDescriptor.isSynPred)><if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>adaptor.AddChild(root_0, <createNodeFromToken(...)>);<endif>}, ...)>
+>>
+
+matchRuleBlockSet(s,label,elementIndex,postmatchCode,treeLevel="0",terminalOptions={}) ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,elementIndex,debug,terminalOptions={}) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);
+<endif>
+<super.matchSet(postmatchCode={<if(!ruleDescriptor.isSynPred)><if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<createNodeFromToken(...)>, root_0);<endif>}, ...)>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <%
+<super.ruleRef(...)>
+<if(!ruleDescriptor.isSynPred)>
+<\n><if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>adaptor.AddChild(root_0, <label>.Tree);
+<endif>
+%>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_0);
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabelElem(elem={<label>.Tree},elemType=ASTLabelType,...)>
+>>
+
+// WILDCARD AST
+
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.wildcard(...)>
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+adaptor.AddChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(token=[],...)>"
+
+wildcardRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.wildcard(...)>
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.Create(<label>);
+root_0 = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+createNodeFromToken(label,terminalOptions={}) ::= <%
+<if(terminalOptions.node)>
+new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>)
+<else>
+(<ASTLabelType>)adaptor.Create(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>)
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+adaptor.SetTokenBoundaries(retval.Tree, retval.Start, retval.Stop);
+<if(backtracking)>}<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTTreeParser.stg
new file mode 100644
index 0000000..c5529b2
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ASTTreeParser.stg
@@ -0,0 +1,377 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<if(!ruleDescriptor.isSynPred)>
+<ASTLabelType> _first_0 = default(<ASTLabelType>);
+<ASTLabelType> _last = default(<ASTLabelType>);
+<endif>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel=false, treeLevel=false) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<if(rewriteMode)>
+retval.Tree = (<ASTLabelType>)_first_0;
+if (adaptor.GetParent(retval.Tree)!=null && adaptor.IsNil(adaptor.GetParent(retval.Tree)))
+	retval.Tree = (<ASTLabelType>)adaptor.GetParent(retval.Tree);
+<endif>
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+{
+<ASTLabelType> _save_last_<treeLevel> = _last;
+<ASTLabelType> _first_<treeLevel> = default(<ASTLabelType>);
+<if(!rewriteMode)>
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.Nil();
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
+<if(root.el.rule)>
+if (_first_<enclosingTreeLevel> == null) _first_<enclosingTreeLevel> = <root.el.label>.Tree;
+<else>
+if (_first_<enclosingTreeLevel> == null) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if (input.LA(1) == TokenTypes.Down) {
+	Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+	<children:element()>
+	Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+}
+<else>
+Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+adaptor.AddChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}
+<else>
+<super.tree(...)>
+<endif>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<else>
+<super.tokenRefBang(...)>
+<endif>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<endif><\n>
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
+if (_first_<treeLevel> == null) _first_<treeLevel> = <label>;
+<endif>
+<else>
+<super.tokenRef(...)>
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<tokenRef(...)>
+<listLabelElem(elem=label,...)>
+<else>
+<super.tokenRefAndListLabel(...)>
+<endif>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+<else>
+<super.tokenRefRuleRoot(...)>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<tokenRefRuleRoot(...)>
+<listLabelElem(elem=label,...)>
+<else>
+<super.tokenRefRuleRootAndListLabel(...)>
+<endif>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.DupTree(<label>);
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
+if (_first_<treeLevel> == null) _first_<treeLevel> = <label>;
+<endif>
+<else>
+<super.wildcard(...)>
+<endif>
+>>
+
+// SET AST
+
+matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<endif><\n>
+adaptor.AddChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>\}<endif>
+<endif>
+}, ...
+)>
+<else>
+<super.matchSet(...)>
+<endif>
+>>
+
+matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<matchSet(...)>
+<noRewrite(...)> <! set return tree !>
+<else>
+<super.matchRuleBlockSet(...)>
+<endif>
+>>
+
+matchSetBang(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(...)>
+<else>
+<super.matchSetBang(...)>
+<endif>
+>>
+
+matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.DupNode(<if(terminalOptions.type)><terminalOptions.type>,<endif><label><if(terminalOptions.text)>,<terminalOptions.text; format="string"><endif>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>\}<endif>
+<endif>
+}, ...
+)>
+<else>
+<super.matchSetRuleRoot(...)>
+<endif>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>)<endif>
+<if(!rewriteMode)>
+adaptor.AddChild(root_<treeLevel>, <label>.Tree);
+<else> <! rewrite mode !>
+if (_first_<treeLevel> == null) _first_<treeLevel> = <label>.Tree;
+<endif>
+<else>
+<super.ruleRef(...)>
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<ruleRef(...)>
+<listLabelElem(elem={<label>.Tree},...)>
+<else>
+<super.ruleRefAndListLabel(...)>
+<endif>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.BecomeRoot(<label>.Tree, root_<treeLevel>);
+<endif>
+<else>
+<super.ruleRefRuleRoot(...)>
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<ruleRefRuleRoot(...)>
+<listLabelElem(elem={<label>.Tree},...)>
+<else>
+<super.ruleRefRuleRootAndListLabel(...)>
+<endif>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrackAndListLabel(...)>
+<else>
+<super.ruleRefTrackAndListLabel(...)>
+<endif>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRootTrack(...)>
+<else>
+<super.ruleRefRuleRootTrack(...)>
+<endif>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+<else>
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+<endif>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change NextToken to NextNode.
+ */
+createRewriteNodeFromElement(token,terminalOptions,args) ::= <%
+<if(terminalOptions.node)>
+new <terminalOptions.node>(<if(terminalOptions.type)><terminalOptions.type>,<endif>stream_<token>.NextNode())
+<else>
+stream_<token>.NextNode()
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!ruleDescriptor.isSynPred)>
+<if(!rewriteMode)>
+<if(backtracking)>if (<actions.(actionScope).synpredgate>) {<endif>
+retval.Tree = (<ASTLabelType>)adaptor.RulePostProcessing(root_0);
+<if(backtracking)>}<endif>
+<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/CSharp3.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/CSharp3.stg
new file mode 100644
index 0000000..c9229e1
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/CSharp3.stg
@@ -0,0 +1,1749 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+csharpVisibilityMap ::= [
+	"private":"private",
+	"protected":"protected",
+	"public":"public",
+	"fragment":"private",
+	default:"private"
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(	LEXER,PARSER,TREE_PARSER, actionScope, actions,
+			docComment, recognizer,
+			name, tokens, tokenNames, rules, cyclicDFAs,
+			bitsets, buildTemplate, buildAST, rewriteMode, profile,
+			backtracking, synpreds, memoize, numRules,
+			fileName, ANTLRVersion, generatedTimestamp, trace,
+			scopes, superClass, literals) ::=
+<<
+//------------------------------------------------------------------------------
+// \<auto-generated>
+//     This code was generated by a tool.
+//     ANTLR Version: <ANTLRVersion>
+//
+//     Changes to this file may cause incorrect behavior and will be lost if
+//     the code is regenerated.
+// \</auto-generated>
+//------------------------------------------------------------------------------
+
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+// The variable 'variable' is assigned but its value is never used.
+#pragma warning disable 219
+// Unreachable code detected.
+#pragma warning disable 162
+// Missing XML comment for publicly visible type or member 'Type_or_Member'
+#pragma warning disable 1591
+// CLS compliance checking will not be performed on 'type' because it is not visible from outside this assembly.
+#pragma warning disable 3019
+
+<actions.(actionScope).header>
+
+<@imports>
+using System.Collections.Generic;
+using Antlr.Runtime;
+using Antlr.Runtime.Misc;
+<if(TREE_PARSER)>
+using Antlr.Runtime.Tree;
+using RewriteRuleITokenStream = Antlr.Runtime.Tree.RewriteRuleTokenStream;
+<endif>
+<@end>
+<if(actions.(actionScope).namespace)>
+namespace <actions.(actionScope).namespace>
+{
+<endif>
+<docComment>
+<recognizer>
+<if(actions.(actionScope).namespace)>
+
+} // namespace <actions.(actionScope).namespace>
+<endif>
+>>
+
+lexerInputStreamType() ::= <<
+<actions.(actionScope).inputStreamType; null="ICharStream">
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, filterMode, labelType="CommonToken",
+	  superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Lexer<endif>},
+	  rewriteElementType={}, ASTLabelType={}) ::= <<
+[System.CodeDom.Compiler.GeneratedCode("ANTLR", "<ANTLRVersion>")]
+[System.CLSCompliant(false)]
+<parserModifier(grammar=grammar, actions=actions)> partial class <grammar.recognizerName> : <@superClassName><superClass><@end>
+{
+	<tokens:{it|public const int <it.name; format="id">=<it.type>;}; separator="\n">
+	<scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+	<actions.lexer.members>
+
+	// delegates
+	<grammar.delegates:
+		 {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
+	// delegators
+	<grammar.delegators:
+		 {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
+	<last(grammar.delegators):{g|private <g.recognizerName> gParent;}>
+
+	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>()<! needed by subclasses !>
+	{
+		OnCreated();
+	}
+
+	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<lexerInputStreamType()> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
+		: this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
+	{
+	}
+
+	<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<lexerInputStreamType()> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+		: base(input, state)
+	{
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+		state.ruleMemo = new System.Collections.Generic.Dictionary\<int, int>[<numRules>+1];<\n><! index from 1..n !>
+<endif>
+<endif>
+		<grammar.directDelegates:
+		 {g|<g:delegateName()> = new <g.recognizerName>(input, this.state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+		<grammar.delegators:
+		 {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+		<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+
+		OnCreated();
+	}
+	public override string GrammarFileName { get { return "<fileName>"; } }
+
+<if(grammar.hasDelegates)>
+	public override <lexerInputStreamType()> CharStream
+	{
+		get
+		{
+			return base.CharStream;
+		}
+		set
+		{
+			base.CharStream = value;
+			<grammar.directDelegates:
+			 {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+			<grammar.delegators:
+			 {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+			<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+		}
+	}
+
+<if(grammar.delegates)>
+	public override void SetState(RecognizerSharedState state)
+	{
+		base.SetState(state);
+		<grammar.delegates:{g|<g:delegateName()>.SetState(state);}; separator="\n">
+	}
+<endif>
+
+<endif>
+<if(filterMode)>
+	<filteringNextToken()>
+<endif>
+
+
+	partial void OnCreated();
+	partial void EnterRule(string ruleName, int ruleIndex);
+	partial void LeaveRule(string ruleName, int ruleIndex);
+
+	<rules; separator="\n">
+
+	<insertLexerSynpreds(synpreds)>
+
+	#region DFA
+	<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+
+	protected override void InitDFAs()
+	{
+		base.InitDFAs();
+		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this<if(dfa.specialStateSTs)>, SpecialStateTransition<dfa.decisionNumber><endif>);}; separator="\n">
+	}
+
+	<cyclicDFAs:cyclicDFA()><! dump tables for all DFA !>
+	#endregion
+
+}
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+public override IToken NextToken()
+{
+	while (true)
+	{
+		if (input.LA(1) == CharStreamConstants.EndOfFile)
+		{
+			IToken eof = new CommonToken((ICharStream)input, CharStreamConstants.EndOfFile, TokenChannels.Default, input.Index, input.Index);
+			eof.Line = Line;
+			eof.CharPositionInLine = CharPositionInLine;
+			return eof;
+		}
+		state.token = null;
+		state.channel = TokenChannels.Default;
+		state.tokenStartCharIndex = input.Index;
+		state.tokenStartCharPositionInLine = input.CharPositionInLine;
+		state.tokenStartLine = input.Line;
+		state.text = null;
+		try
+		{
+			int m = input.Mark();
+			state.backtracking=1;<! means we won't throw slow exception !>
+			state.failed=false;
+			mTokens();
+			state.backtracking=0;
+			<! mTokens backtracks with synpred at backtracking==2
+			   and we set the synpredgate to allow actions at level 1. !>
+			if (state.failed)
+			{
+				input.Rewind(m);
+				input.Consume();<! advance one char and try again !>
+			}
+			else
+			{
+				Emit();
+				return state.token;
+			}
+		}
+		catch (RecognitionException re)
+		{
+			// shouldn't happen in backtracking mode, but...
+			ReportError(re);
+			Recover(re);
+		}
+	}
+}
+
+public override void Memoize(IIntStream input, int ruleIndex, int ruleStartIndex)
+{
+	if (state.backtracking > 1)
+		base.Memoize(input, ruleIndex, ruleStartIndex);
+}
+
+public override bool AlreadyParsedRule(IIntStream input, int ruleIndex)
+{
+	if (state.backtracking > 1)
+		return base.AlreadyParsedRule(input, ruleIndex);
+
+	return false;
+}
+>>
+
+actionGate() ::= "state.backtracking == 0"
+
+filteringActionGate() ::= "state.backtracking == 1"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass,
+              labelType, members, rewriteElementType,
+              filterMode, ASTLabelType="object") ::= <<
+[System.CodeDom.Compiler.GeneratedCode("ANTLR", "<ANTLRVersion>")]
+[System.CLSCompliant(false)]
+<parserModifier(grammar=grammar, actions=actions)> partial class <grammar.recognizerName> : <@superClassName><superClass><@end>
+{
+<if(grammar.grammarIsRoot)>
+	internal static readonly string[] tokenNames = new string[] {
+		"\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
+	};
+<endif>
+	<tokens:{it|public const int <it.name; format="id">=<it.type>;}; separator="\n">
+
+<if(grammar.delegates)>
+	// delegates
+	<grammar.delegates:
+		 {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
+<endif>
+<if(grammar.delegators)>
+	// delegators
+	<grammar.delegators:
+		 {g|private <g.recognizerName> <g:delegateName()>;}; separator="\n">
+	<last(grammar.delegators):{g|private <g.recognizerName> gParent;}>
+<endif>
+
+<if(grammar.delegates)>
+	public override void SetState(RecognizerSharedState state)
+	{
+		base.SetState(state);
+		<grammar.delegates:{g|<g:delegateName()>.SetState(state);}; separator="\n">
+	}
+
+<if(TREE_PARSER)>
+	public override void SetTreeNodeStream(ITreeNodeStream input)
+	{
+		base.SetTreeNodeStream(input);
+		<grammar.delegates:{g|<g:delegateName()>.SetTreeNodeStream(input);}; separator="\n">
+	}
+<endif>
+<endif>
+
+	<scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+	<@members()>
+
+	public override string[] TokenNames { get { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; } }
+	public override string GrammarFileName { get { return "<fileName>"; } }
+
+	<members>
+
+	partial void OnCreated();
+	partial void EnterRule(string ruleName, int ruleIndex);
+	partial void LeaveRule(string ruleName, int ruleIndex);
+
+	#region Rules
+	<rules; separator="\n">
+	#endregion Rules
+
+<if(grammar.delegatedRules)>
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+	#region Delegated rules
+<grammar.delegatedRules:{ruleDescriptor|
+	<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> <returnType(ruleDescriptor)> <ruleDescriptor.name; format="id">(<ruleDescriptor.parameterScope:parameterScope()>)<!throws RecognitionException !>{ <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name; format="id">(<if(ruleDescriptor.parameterScope)><ruleDescriptor.parameterScope.attributes:{a|<a.name; format="id">}; separator=", "><endif>); \}}; separator="\n">
+	#endregion Delegated rules
+<endif>
+
+	<insertSynpreds(synpreds)>
+
+<if(cyclicDFAs)>
+	#region DFA
+	<cyclicDFAs:{dfa | private DFA<dfa.decisionNumber> dfa<dfa.decisionNumber>;}; separator="\n">
+
+	protected override void InitDFAs()
+	{
+		base.InitDFAs();
+		<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>( this<if(dfa.specialStateSTs)>, SpecialStateTransition<dfa.decisionNumber><endif> );}; separator="\n">
+	}
+
+	<cyclicDFAs:cyclicDFA()><! dump tables for all DFA !>
+	#endregion DFA
+<endif>
+
+<if(bitsets)>
+	#region Follow sets
+	private static class Follow
+	{
+		<bitsets:{it|<bitset(name={_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>}; separator="\n">
+	}
+	#endregion Follow sets
+<endif>
+}
+>>
+
+@genericParser.members() ::= <<
+<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<inputStreamType> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+	: this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
+{
+}
+<actions.(actionScope).ctorModifier; null="public"> <grammar.recognizerName>(<inputStreamType> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+	: base(input, state)
+{
+<if(grammar.directDelegates)>
+	<grammar.directDelegates:
+		{g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+<endif>
+<if(grammar.indirectDelegates)>
+	<grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+<endif>
+<if(grammar.delegators)>
+	<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+<endif>
+	<parserCtorBody()>
+	OnCreated();
+}
+>>
+
+// imported grammars are 'public' (can't be internal because their return scope classes must be accessible)
+parserModifier(grammar, actions) ::= <<
+<if(grammar.grammarIsRoot)><actions.(actionScope).modifier; null="public"><else>public<endif>
+>>
+
+parserCtorBody() ::= <<
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = new System.Collections.Generic.Dictionary\<int, int>[<length(grammar.allImportedRules)>+1];<\n><! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
+       ASTLabelType="object", superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Parser<endif>}, labelType="IToken",
+       members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="ITokenStream", rewriteElementType="IToken", filterMode=false, ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
+           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="object",
+           superClass={<if(actions.(actionScope).superClass)><actions.(actionScope).superClass><else>Antlr.Runtime.Tree.<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif><endif>},
+           members={<actions.treeparser.members>}) ::= <<
+<genericParser(inputStreamType="ITreeNodeStream", rewriteElementType="Node", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+
+partial void EnterRule_<ruleName>_fragment();
+partial void LeaveRule_<ruleName>_fragment();
+
+// $ANTLR start <ruleName>
+<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope()>)
+{
+	<ruleLabelDefs(...)>
+	EnterRule_<ruleName>_fragment();
+	EnterRule("<ruleName>_fragment", <ruleDescriptor.index>);
+	TraceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+	try
+	{
+		<block>
+	}
+	finally
+	{
+		TraceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+		LeaveRule("<ruleName>_fragment", <ruleDescriptor.index>);
+		LeaveRule_<ruleName>_fragment();
+	}
+}
+// $ANTLR end <ruleName>
+>>
+
+insertLexerSynpreds(synpreds) ::= <<
+<insertSynpreds(synpreds)>
+>>
+
+insertSynpreds(synpreds) ::= <<
+<if(synpreds)>
+#region Synpreds
+private bool EvaluatePredicate(System.Action fragment)
+{
+	bool success = false;
+	state.backtracking++;
+	<@start()>
+	try { DebugBeginBacktrack(state.backtracking);
+	int start = input.Mark();
+	try
+	{
+		fragment();
+	}
+	catch ( RecognitionException re )
+	{
+		System.Console.Error.WriteLine("impossible: "+re);
+	}
+	success = !state.failed;
+	input.Rewind(start);
+	} finally { DebugEndBacktrack(state.backtracking, success); }
+	<@stop()>
+	state.backtracking--;
+	state.failed=false;
+	return success;
+}
+#endregion Synpreds
+<endif>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if (state.backtracking > 0 && AlreadyParsedRule(input, <ruleDescriptor.index>)) { <returnFromRule()> }
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.failed) <returnFromRule()><endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.backtracking>0) {state.failed=true; <returnFromRule()>}<endif>
+>>
+
+ruleWrapperMap ::= [
+	"bottomup":{<ruleWrapperBottomup()>},
+	"topdown":{<ruleWrapperTopdown()>},
+	default:""
+]
+
+ruleWrapperBottomup() ::= <<
+<if(TREE_PARSER && filterMode)>
+protected override <if(buildAST)>IAstRuleReturnScope<else>void<endif> Bottomup() { <if(buildAST)>return <endif>bottomup(); }
+<endif>
+>>
+
+ruleWrapperTopdown() ::= <<
+<if(TREE_PARSER && filterMode)>
+protected override <if(buildAST)>IAstRuleReturnScope<else>void<endif> Topdown() { <if(buildAST)>return <endif>topdown(); }
+<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(ruleDescriptor.returnScope)>
+partial void EnterRule_<ruleName>();
+partial void LeaveRule_<ruleName>();
+<ruleWrapperMap.(ruleName)>
+// $ANTLR start "<ruleName>"
+// <fileName>:<description>
+[GrammarRule("<ruleName>")]
+<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> <returnType(ruleDescriptor)> <ruleName; format="id">(<ruleDescriptor.parameterScope:parameterScope()>)
+{
+	EnterRule_<ruleName>();
+	EnterRule("<ruleName>", <ruleDescriptor.index>);
+	TraceIn("<ruleName>", <ruleDescriptor.index>);
+	<ruleScopeSetUp()>
+	<ruleDeclarations()>
+	<ruleLabelDefs(...)>
+	<ruleDescriptor.actions.init>
+	try { DebugEnterRule(GrammarFileName, "<ruleName>");
+	DebugLocation(<ruleDescriptor.tree.line>, <ruleDescriptor.EORNode.charPositionInLine>);
+	<@preamble()>
+	try
+	{
+		<ruleMemoization(name=ruleName)>
+		<block>
+		<ruleCleanUp()>
+		<(ruleDescriptor.actions.after):execAction()>
+	}
+<if(exceptions)>
+	<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+	<actions.(actionScope).rulecatch>
+<else>
+	catch (RecognitionException re)
+	{
+		ReportError(re);
+		Recover(input,re);
+		<@setErrorReturnValue()>
+	}
+<endif>
+<endif>
+<endif>
+	finally
+	{
+		TraceOut("<ruleName>", <ruleDescriptor.index>);
+		LeaveRule("<ruleName>", <ruleDescriptor.index>);
+		LeaveRule_<ruleName>();
+		<memoize()>
+		<ruleScopeCleanUp()>
+		<finally>
+	}
+	DebugLocation(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>);
+	} finally { DebugExitRule(GrammarFileName, "<ruleName>"); }
+	<@postamble()>
+	<returnFromRule()><\n>
+}
+// $ANTLR end "<ruleName>"
+>>
+
+// imported grammars need to have internal rules
+ruleModifier(grammar,ruleDescriptor) ::= <<
+<if(grammar.grammarIsRoot)><csharpVisibilityMap.(ruleDescriptor.modifier); null="private"><else>internal<endif>
+>>
+
+// imported grammars need to have public return scopes
+returnScopeModifier(grammar,ruleDescriptor) ::= <<
+<if(grammar.grammarIsRoot)><csharpVisibilityMap.(ruleDescriptor.modifier); null="private"><else>public<endif>
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>)
+{
+	<e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType(ruleDescriptor)> retval = new <returnType(ruleDescriptor)>(<if(ruleDescriptor.returnScope.attributes)>this<endif>);
+retval.Start = (<labelType>)input.LT(1);
+<elseif(ruleDescriptor.returnScope)>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name; format="id"> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+int <ruleDescriptor.name>_StartIndex = input.Index;
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{it|<it>_stack.Push(new <it>_scope(this));<it>_scopeInit(<it>_stack.Peek());}; separator="\n">
+<ruleDescriptor.ruleScope:{it|<it.name>_stack.Push(new <it.name>_scope(this));<it.name>_scopeInit(<it.name>_stack.Peek());}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{it|<it>_scopeAfter(<it>_stack.Peek());<it>_stack.Pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{it|<it.name>_scopeAfter(<it.name>_stack.Peek());<it.name>_stack.Pop();}; separator="\n">
+>>
+
+ruleLabelDefs(ruleDescriptor, labelType, ASTLabelType, rewriteElementType) ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+	:{it|<labelType> <it.label.text> = default(<labelType>);}; separator="\n"
+>
+<ruleDescriptor.tokenListLabels
+	:{it|List\<<labelType>\> list_<it.label.text> = null;}; separator="\n"
+>
+<[ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
+	:{it|List\<<ASTLabelType>\> list_<it.label.text> = null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
+<ruleDescriptor.ruleListLabels:ruleLabelDef(); separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+	:{it|<labelType> <it.label.text> = default(<labelType>);}; separator="\n"
+>
+<[ruleDescriptor.charListLabels,
+  ruleDescriptor.charLabels]
+	:{it|int <it.label.text> = 0;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels]
+	:{it|List\<<labelType>\> list_<it.label.text> = null;}; separator="\n"
+>
+<ruleDescriptor.charListLabels:{it|List\<int> list_<it.label.text> = null;}; separator="\n"
+>
+>>
+
+returnFromRule() ::= <%
+return
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<! This comment is a hack to make sure the following
+   single space appears in the output. !> <ruleDescriptor.singleValueReturnName>
+<else>
+<!!> retval
+<endif>
+<endif>
+<endif>
+;
+%>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.Stop = (<labelType>)input.LT(-1);
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if (state.backtracking > 0) { Memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+
+partial void EnterRule_<ruleName>();
+partial void LeaveRule_<ruleName>();
+
+// $ANTLR start "<ruleName>"
+[GrammarRule("<ruleName>")]
+<ruleModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>)
+{
+	EnterRule_<ruleName>();
+	EnterRule("<ruleName>", <ruleDescriptor.index>);
+	TraceIn("<ruleName>", <ruleDescriptor.index>);
+	<ruleScopeSetUp()>
+	<ruleDeclarations()>
+	try
+	{
+<if(nakedBlock)>
+		<ruleMemoization(name=ruleName)>
+		<lexerRuleLabelDefs()>
+		<ruleDescriptor.actions.init>
+		<block>
+<else>
+		int _type = <ruleName>;
+		int _channel = DefaultTokenChannel;
+		<ruleMemoization(name=ruleName)>
+		<lexerRuleLabelDefs()>
+		<ruleDescriptor.actions.init>
+		<block>
+		<ruleCleanUp()>
+		state.type = _type;
+		state.channel = _channel;
+		<(ruleDescriptor.actions.after):execAction()>
+<endif>
+	}
+	finally
+	{
+		TraceOut("<ruleName>", <ruleDescriptor.index>);
+		LeaveRule("<ruleName>", <ruleDescriptor.index>);
+		LeaveRule_<ruleName>();
+		<ruleScopeCleanUp()>
+		<memoize()>
+	}
+}
+// $ANTLR end "<ruleName>"
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+
+public override void mTokens()
+{
+	<block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+try { DebugEnterSubRule(<decisionNumber>);
+try { DebugEnterDecision(<decisionNumber>, false<!<decision.dfa.hasSynPred>!>);
+<decision>
+} finally { DebugExitDecision(<decisionNumber>); }
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>)
+{
+<alts:{a|<altSwitchCase(i,a)>}>
+}
+} finally { DebugExitSubRule(<decisionNumber>); }
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+try { DebugEnterDecision(<decisionNumber>, false<!<decision.dfa.hasSynPred>!>);
+<decision>
+} finally { DebugExitDecision(<decisionNumber>); }
+<@postdecision()>
+switch (alt<decisionNumber>)
+{
+<alts:{a|<altSwitchCase(i,a)>}>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+DebugEnterAlt(1);
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+DebugEnterAlt(1);
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int cnt<decisionNumber>=0;
+<decls>
+<@preloop()>
+try { DebugEnterSubRule(<decisionNumber>);
+while (true)
+{
+	int alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	try { DebugEnterDecision(<decisionNumber>, false<!<decision.dfa.hasSynPred>!>);
+	<decision>
+	} finally { DebugExitDecision(<decisionNumber>); }
+	<@postdecision()>
+	switch (alt<decisionNumber>)
+	{
+	<alts:{a|<altSwitchCase(i,a)>}>
+	default:
+		if (cnt<decisionNumber> >= 1)
+			goto loop<decisionNumber>;
+
+		<ruleBacktrackFailure()>
+		EarlyExitException eee<decisionNumber> = new EarlyExitException( <decisionNumber>, input );
+		DebugRecognitionException(eee<decisionNumber>);
+		<@earlyExitException()>
+		throw eee<decisionNumber>;
+	}
+	cnt<decisionNumber>++;
+}
+loop<decisionNumber>:
+	;
+
+} finally { DebugExitSubRule(<decisionNumber>); }
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+try { DebugEnterSubRule(<decisionNumber>);
+while (true)
+{
+	int alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	try { DebugEnterDecision(<decisionNumber>, false<!<decision.dfa.hasSynPred>!>);
+	<decision>
+	} finally { DebugExitDecision(<decisionNumber>); }
+	<@postdecision()>
+	switch ( alt<decisionNumber> )
+	{
+	<alts:{a|<altSwitchCase(i,a)>}>
+	default:
+		goto loop<decisionNumber>;
+	}
+}
+
+loop<decisionNumber>:
+	;
+
+} finally { DebugExitSubRule(<decisionNumber>); }
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase(altNum,alt) ::= <<
+case <altNum>:
+	<@prealt()>
+	DebugEnterAlt(<altNum>);
+	<alt>
+	break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+{
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+}
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element(it) ::= <%
+<@prematch()>
+DebugLocation(<it.line>, <it.pos>);<\n>
+<it.el><\n>
+%>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(label)><label>=(<labelType>)<endif>Match(input,<token>,Follow._<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+listLabel(label,elem) ::= <<
+#error The listLabel template should not be used with this target.<\n>
+>>
+
+listLabelElem(label,elem,elemType) ::= <<
+if (list_<label>==null) list_<label>=new List\<<elemType; null={<labelType>}>\>();
+list_<label>.Add(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+Match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="",terminalOptions={}) ::= <<
+<if(label)>
+<matchSetLabel()>
+<endif>
+if (<s>)
+{
+	input.Consume();
+	<postmatchCode>
+	<if(!LEXER)>state.errorRecovery=false;<endif><if(backtracking)>state.failed=false;<endif>
+}
+else
+{
+	<ruleBacktrackFailure()>
+	MismatchedSetException mse = new MismatchedSetException(null,input);
+	DebugRecognitionException(mse);
+	<@mismatchedSetException()>
+<if(LEXER)>
+	Recover(mse);
+	throw mse;
+<else>
+	throw mse;
+	<! use following code to make it recover inline; remove throw mse;
+	recoverFromMismatchedSet(input,mse,Follow._set_in_<ruleName><elementIndex>);
+	!>
+<endif>
+}<\n>
+>>
+
+matchSetUnchecked(s,label,elementIndex,postmatchCode=false) ::= <%
+<if(label)>
+<matchSetLabel()><\n>
+<endif>
+input.Consume();<\n>
+<if(postmatchCode)>
+<postmatchCode><\n>
+<endif>
+<if(!LEXER)>state.errorRecovery=false;<endif><if(backtracking)>state.failed=false;<endif>
+%>
+
+matchSetLabel() ::= <%
+<if(LEXER)>
+<label>= input.LA(1);
+<else>
+<label>=(<labelType>)input.LT(1);
+<endif>
+%>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex) ::= <%
+<if(label)>
+int <label>Start = CharIndex;<\n>
+Match(<string>); <checkRuleBacktrackFailure()><\n>
+int <label>StartLine<elementIndex> = Line;<\n>
+int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
+<label> = new <labelType>(input, TokenTypes.Invalid, TokenChannels.Default, <label>Start, CharIndex-1);<\n>
+<label>.Line = <label>StartLine<elementIndex>;<\n>
+<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
+<else>
+Match(<string>); <checkRuleBacktrackFailure()><\n>
+<endif>
+%>
+
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+MatchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<wildcard(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = input.LA(1);<\n>
+<endif>
+MatchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+PushFollow(Follow._<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name; format="id">(<args; separator=", ">);
+PopFollow();
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabelElem(elem=label,elemType={<ASTLabelType>},...)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <%
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;<\n>
+int <label>StartLine<elementIndex> = Line;<\n>
+int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()><\n>
+<label> = new <labelType>(input, TokenTypes.Invalid, TokenChannels.Default, <label>Start<elementIndex>, CharIndex-1);<\n>
+<label>.Line = <label>StartLine<elementIndex>;<\n>
+<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
+<else>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+%>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabelElem(elem=label,elemType=labelType,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <%
+<if(label)>
+int <label>Start<elementIndex> = CharIndex;<\n>
+int <label>StartLine<elementIndex> = Line;<\n>
+int <label>StartCharPos<elementIndex> = CharPositionInLine;<\n>
+Match(EOF); <checkRuleBacktrackFailure()><\n>
+<labelType> <label> = new <labelType>(input, EOF, TokenChannels.Default, <label>Start<elementIndex>, CharIndex-1);<\n>
+<label>.Line = <label>StartLine<elementIndex>;<\n>
+<label>.CharPositionInLine = <label>StartCharPos<elementIndex>;
+<else>
+Match(EOF); <checkRuleBacktrackFailure()>
+<endif>
+%>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "int <recRuleArg()>"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if (input.LA(1) == TokenTypes.Down)
+{
+	Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+	<children:element()>
+	Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+}
+<else>
+Match(input, TokenTypes.Down, null); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(input, TokenTypes.Up, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if (!(<evalPredicate(...)>))
+{
+	<ruleBacktrackFailure()>
+	throw new FailedPredicateException(input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<k> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+else
+{
+<if(eotPredictsAlt)>
+	alt<decisionNumber> = <eotPredictsAlt>;
+<else>
+	<ruleBacktrackFailure()>
+	NoViableAltException nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input, <k>);
+	DebugRecognitionException(nvae);
+	<@noViableAltException()>
+	throw nvae;
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<k> = input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<k> = input.LA(<k>);<\n>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber> = <eotPredictsAlt>;<! if no edges, don't gen ELSE !>
+<else>
+else
+{
+	alt<decisionNumber> = <eotPredictsAlt>;
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ((<labelExpr>)<if(predicates)> && (<predicates>)<endif>)
+{
+	<targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch (input.LA(<k>))
+{
+<edges; separator="\n">
+default:
+<if(eotPredictsAlt)>
+	alt<decisionNumber>=<eotPredictsAlt>;
+	break;<\n>
+<else>
+	{
+		<ruleBacktrackFailure()>
+		NoViableAltException nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input, <k>);
+		DebugRecognitionException(nvae);
+		<@noViableAltException()>
+		throw nvae;
+	}
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch (input.LA(<k>))
+{
+<edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch (input.LA(<k>))
+{
+<edges; separator="\n">
+<if(eotPredictsAlt)>
+default:
+	alt<decisionNumber>=<eotPredictsAlt>;
+	break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{it|case <it>:}; separator="\n">
+	{
+	<targetState>
+	}
+	break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+try
+{
+	alt<decisionNumber> = dfa<decisionNumber>.Predict(input);
+}
+catch (NoViableAltException nvae)
+{
+	DebugRecognitionException(nvae);
+	throw;
+}
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+private class DFA<dfa.decisionNumber> : DFA
+{
+	private const string DFA<dfa.decisionNumber>_eotS =
+		"<dfa.javaCompressedEOT; wrap="\"+\n\t\t\"">";
+	private const string DFA<dfa.decisionNumber>_eofS =
+		"<dfa.javaCompressedEOF; wrap="\"+\n\t\t\"">";
+	private const string DFA<dfa.decisionNumber>_minS =
+		"<dfa.javaCompressedMin; wrap="\"+\n\t\t\"">";
+	private const string DFA<dfa.decisionNumber>_maxS =
+		"<dfa.javaCompressedMax; wrap="\"+\n\t\t\"">";
+	private const string DFA<dfa.decisionNumber>_acceptS =
+		"<dfa.javaCompressedAccept; wrap="\"+\n\t\t\"">";
+	private const string DFA<dfa.decisionNumber>_specialS =
+		"<dfa.javaCompressedSpecial; wrap="\"+\n\t\t\"">}>";
+	private static readonly string[] DFA<dfa.decisionNumber>_transitionS =
+		{
+			<dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
+		};
+
+	private static readonly short[] DFA<dfa.decisionNumber>_eot = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eotS);
+	private static readonly short[] DFA<dfa.decisionNumber>_eof = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_eofS);
+	private static readonly char[] DFA<dfa.decisionNumber>_min = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
+	private static readonly char[] DFA<dfa.decisionNumber>_max = DFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
+	private static readonly short[] DFA<dfa.decisionNumber>_accept = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
+	private static readonly short[] DFA<dfa.decisionNumber>_special = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_specialS);
+	private static readonly short[][] DFA<dfa.decisionNumber>_transition;
+
+	static DFA<dfa.decisionNumber>()
+	{
+		int numStates = DFA<dfa.decisionNumber>_transitionS.Length;
+		DFA<dfa.decisionNumber>_transition = new short[numStates][];
+		for ( int i=0; i \< numStates; i++ )
+		{
+			DFA<dfa.decisionNumber>_transition[i] = DFA.UnpackEncodedString(DFA<dfa.decisionNumber>_transitionS[i]);
+		}
+	}
+
+	public DFA<dfa.decisionNumber>( BaseRecognizer recognizer<if(dfa.specialStateSTs)>, SpecialStateTransitionHandler specialStateTransition<endif> )
+<if(dfa.specialStateSTs)>
+		: base(specialStateTransition)
+<endif>
+	{
+		this.recognizer = recognizer;
+		this.decisionNumber = <dfa.decisionNumber>;
+		this.eot = DFA<dfa.decisionNumber>_eot;
+		this.eof = DFA<dfa.decisionNumber>_eof;
+		this.min = DFA<dfa.decisionNumber>_min;
+		this.max = DFA<dfa.decisionNumber>_max;
+		this.accept = DFA<dfa.decisionNumber>_accept;
+		this.special = DFA<dfa.decisionNumber>_special;
+		this.transition = DFA<dfa.decisionNumber>_transition;
+	}
+
+	public override string Description { get { return "<dfa.description>"; } }
+
+	public override void Error(NoViableAltException nvae)
+	{
+		DebugRecognitionException(nvae);
+	}
+}<\n>
+<if(dfa.specialStateSTs)>
+private int SpecialStateTransition<dfa.decisionNumber>(DFA dfa, int s, IIntStream _input)<! throws NoViableAltException!>
+{
+	<if(LEXER)>
+	IIntStream input = _input;
+	<endif>
+	<if(PARSER)>
+	ITokenStream input = (ITokenStream)_input;
+	<endif>
+	<if(TREE_PARSER)>
+	ITreeNodeStream input = (ITreeNodeStream)_input;
+	<endif>
+	int _s = s;
+	s = -1;
+	<! pull these outside the switch cases to save space on locals !>
+	int LA<dfa.decisionNumber>_1 = input.LA(1);
+	int index<dfa.decisionNumber>_1 = input.Index;
+	switch (_s)
+	{
+	<dfa.specialStateSTs:{state |case <i0>:<! compressed special state numbers 0..n-1 !>
+	<state>}; separator="\n">
+
+	default:
+		break;
+	}
+
+	if (s >= 0)
+		return s;
+
+<if(backtracking)>
+	if (state.backtracking > 0) {state.failed=true; return -1;}
+<endif>
+	NoViableAltException nvae = new NoViableAltException(dfa.Description, <dfa.decisionNumber>, _s, input);
+	dfa.Error(nvae);
+	throw nvae;
+}
+<endif>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+{
+<if(semPredState)>
+	<! get next lookahead symbol to test edges, then rewind !>
+	input.Rewind();
+<endif>
+	<edges; separator="\nelse ">
+<if(semPredState)>
+	<! return input cursor to state before we rewound !>
+	input.Seek(index<decisionNumber>_1);
+<endif>
+	break;
+}
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ((<labelExpr>)<if(predicates)> && (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left>&&<right>)"
+
+orPredicates(operands) ::= "(<operands; separator=\"||\">)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "EvaluatePredicate(<pred>_fragment)"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<k>==<atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
+(LA<decisionNumber>_<k><ge()><lower> && LA<decisionNumber>_<k><le()><upper>)
+%>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>)<ge()><lower> && input.LA(<k>)<le()><upper>)"
+
+le() ::= "\<="
+ge() ::= ">="
+
+setTest(ranges) ::= <<
+<ranges; separator="||">
+>>
+
+// A T T R I B U T E S
+
+attributeScope(scope) ::= <<
+<if(scope)>
+<if(scope.attributes)>
+protected sealed partial class <scope.name>_scope
+{
+	<scope.attributes:{it|public <it.decl>;}; separator="\n">
+
+	public <scope.name>_scope(<grammar.recognizerName> grammar) { OnCreated(grammar); }
+	partial void OnCreated(<grammar.recognizerName> grammar);
+}
+<if(scope.actions.scopeinit)>
+protected void <scope.name>_scopeInit( <scope.name>_scope scope )
+{
+	<scope.actions.scopeinit>
+}
+<else>
+partial void <scope.name>_scopeInit( <scope.name>_scope scope );
+<endif>
+<if(scope.actions.scopeafter)>
+protected void <scope.name>_scopeAfter( <scope.name>_scope scope )
+{
+	<scope.actions.scopeafter>
+}
+<else>
+partial void <scope.name>_scopeAfter( <scope.name>_scope scope );
+<endif>
+protected readonly ListStack\<<scope.name>_scope> <scope.name>_stack = new ListStack\<<scope.name>_scope>();
+<endif>
+<endif>
+>>
+
+globalAttributeScope(scope) ::= <<
+<attributeScope(...)>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<attributeScope(...)>
+>>
+
+returnStructName(it) ::= "<it.name>_return"
+
+returnType(ruleDescriptor) ::= <%
+<if(ruleDescriptor.returnScope.attributes && ruleDescriptor.hasMultipleReturnValues)>
+	<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
+<elseif(ruleDescriptor.hasMultipleReturnValues)>
+	<ruleReturnBaseType()>
+<elseif(ruleDescriptor.hasSingleReturnValue)>
+	<ruleDescriptor.singleValueReturnType>
+<else>
+	void
+<endif>
+%>
+
+/** Generate the C# type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+	<ruleReturnBaseType()>
+<elseif(referencedRule.hasSingleReturnValue)>
+	<referencedRule.singleValueReturnType>
+<else>
+	void
+<endif>
+%>
+
+delegateName(it) ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+default(<typeName>)
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <%
+<ruleLabelType(label.referencedRule)> <label.label.text> = <initValue(ruleLabelType(label.referencedRule))>;
+%>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(scope.attributes && ruleDescriptor.hasMultipleReturnValues)>
+<returnScopeModifier(grammar=grammar,ruleDescriptor=ruleDescriptor)> sealed partial class <ruleDescriptor:returnStructName()> : <ruleReturnBaseType()><@ruleReturnInterfaces()>
+{
+	<scope.attributes:{it|public <it.decl>;}; separator="\n">
+	<@ruleReturnMembers()>
+}
+<endif>
+>>
+
+ruleReturnBaseType() ::= <%
+<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope\<<labelType>>
+%>
+
+@returnScope.ruleReturnMembers() ::= <<
+public <ruleDescriptor:returnStructName()>(<grammar.recognizerName> grammar) {OnCreated(grammar);}
+partial void OnCreated(<grammar.recognizerName> grammar);
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{it|<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= <<
+<attr.name; format="id">
+>>
+
+parameterSetAttributeRef(attr,expr) ::= <<
+<attr.name; format="id"> =<expr>;
+>>
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <%
+<if(negIndex)>
+<scope>_stack[<scope>_stack.Count - <negIndex> - 1].<attr.name; format="id">
+<else>
+<if(index)>
+<scope>_stack[<index>].<attr.name; format="id">
+<else>
+<scope>_stack.Peek().<attr.name; format="id">
+<endif>
+<endif>
+%>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
+<if(negIndex)>
+<scope>_stack[<scope>_stack.Count - <negIndex> - 1].<attr.name; format="id"> = <expr>;
+<else>
+<if(index)>
+<scope>_stack[<index>].<attr.name; format="id"> = <expr>;
+<else>
+<scope>_stack.Peek().<attr.name; format="id"> = <expr>;
+<endif>
+<endif>
+%>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.Count>0 && $function::name.Equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+(<scope>!=null?((<returnType(referencedRule)>)<scope>).<attr.name; format="id">:<initValue(attr.type)>)
+<else>
+<scope>
+<endif>
+%>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name; format="id">
+<else>
+<attr.name; format="id">
+<endif>
+%>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name; format="id"> =<expr>;
+<else>
+<attr.name; format="id"> =<expr>;
+<endif>
+%>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=null?<scope>.Text:default(string))"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=null?<scope>.Type:0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=null?<scope>.Line:0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=null?<scope>.CharPositionInLine:0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=null?<scope>.Channel:0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=null?<scope>.TokenIndex:0)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?int.Parse(<scope>.Text):0)"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.Start):default(<labelType>))"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=null?((<labelType>)<scope>.Stop):default(<labelType>))"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=null?((<ASTLabelType>)<scope>.Tree):default(<ASTLabelType>))"
+ruleLabelPropertyRef_text(scope,attr) ::= <%
+<if(TREE_PARSER)>
+(<scope>!=null?(input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(<scope>.Start),
+  input.TreeAdaptor.GetTokenStopIndex(<scope>.Start))):default(string))
+<else>
+(<scope>!=null?input.ToString(<scope>.Start,<scope>.Stop):default(string))
+<endif>
+%>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=null?<scope>.Template:null)"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::=
+    "(<scope>!=null?<scope>.Type:0)"
+
+lexerRuleLabelPropertyRef_line(scope,attr) ::=
+    "(<scope>!=null?<scope>.Line:0)"
+
+lexerRuleLabelPropertyRef_pos(scope,attr) ::=
+    "(<scope>!=null?<scope>.CharPositionInLine:-1)"
+
+lexerRuleLabelPropertyRef_channel(scope,attr) ::=
+    "(<scope>!=null?<scope>.Channel:0)"
+
+lexerRuleLabelPropertyRef_index(scope,attr) ::=
+    "(<scope>!=null?<scope>.TokenIndex:0)"
+
+lexerRuleLabelPropertyRef_text(scope,attr) ::=
+    "(<scope>!=null?<scope>.Text:default(string))"
+
+lexerRuleLabelPropertyRef_int(scope,attr) ::=
+    "(<scope>!=null?int.Parse(<scope>.Text):0)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "retval.Start"
+rulePropertyRef_stop(scope,attr) ::= "retval.Stop"
+rulePropertyRef_tree(scope,attr) ::= "retval.Tree"
+rulePropertyRef_text(scope,attr) ::= <%
+<if(TREE_PARSER)>
+input.TokenStream.ToString(
+  input.TreeAdaptor.GetTokenStartIndex(retval.Start),
+  input.TreeAdaptor.GetTokenStopIndex(retval.Start))
+<else>
+input.ToString(retval.Start,input.LT(-1))
+<endif>
+%>
+rulePropertyRef_st(scope,attr) ::= "retval.Template"
+
+lexerRulePropertyRef_text(scope,attr) ::= "Text"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "int.Parse(<scope>.Text)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.Tree = <expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.Template =<expr>;"
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <%
+<if(backtracking)>
+if (<actions.(actionScope).synpredgate>)<\n>
+{<\n>
+<@indentedAction()><\n>
+}
+<else>
+<action>
+<endif>
+%>
+
+@execAction.indentedAction() ::= <<
+	<action>
+>>
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+public static readonly BitSet <name> = new BitSet(new ulong[]{<words64:{it|<it>UL};separator=",">});
+>>
+
+codeFileExtension() ::= ".cs"
+
+true_value() ::= "true"
+false_value() ::= "false"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/Dbg.stg
new file mode 100644
index 0000000..c250e44
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/Dbg.stg
@@ -0,0 +1,312 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2005-2008 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2008-2009 Sam Harwell, Pixel Mine, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/** Template overrides to add debugging to normal Java output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+
+@outputFile.imports() ::= <<
+<@super.imports()>
+using Antlr.Runtime.Debug;
+using IOException = System.IO.IOException;
+>>
+
+@genericParser.members() ::= <<
+<if(grammar.grammarIsRoot)>
+public static readonly string[] ruleNames =
+	new string[]
+	{
+		"invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n	", separator=", ">
+	};<\n>
+<endif>
+<if(grammar.grammarIsRoot)><! grammar imports other grammar(s) !>
+int ruleLevel = 0;
+public virtual int RuleLevel { get { return ruleLevel; } }
+public virtual void IncRuleLevel() { ruleLevel++; }
+public virtual void DecRuleLevel() { ruleLevel--; }
+<if(profile)>
+<ctorForProfilingRootGrammar()>
+<else>
+<ctorForRootGrammar()>
+<endif>
+<ctorForPredefinedListener()>
+<else><! imported grammar !>
+public int RuleLevel { get { return <grammar.delegators:{g| <g:delegateName()>}>.RuleLevel; } }
+public void IncRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.IncRuleLevel(); }
+public void DecRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.DecRuleLevel(); }
+<ctorForDelegateGrammar()>
+<endif>
+<if(profile)>
+public override bool AlreadyParsedRule( IIntStream input, int ruleIndex )
+{
+	int stopIndex = GetRuleMemoization(ruleIndex, input.Index);
+	((Profiler)dbg).ExamineRuleMemoization(input, ruleIndex, stopIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+	return base.AlreadyParsedRule(input, ruleIndex);
+}<\n>
+public override void Memoize( IIntStream input, int ruleIndex, int ruleStartIndex )
+{
+	((Profiler)dbg).Memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+	base.Memoize(input, ruleIndex, ruleStartIndex);
+}<\n>
+<endif>
+protected virtual bool EvalPredicate( bool result, string predicate )
+{
+	dbg.SemanticPredicate( result, predicate );
+	return result;
+}<\n>
+>>
+
+ctorForRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+<! Same except we add port number and profile stuff if root grammar !>
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input )
+	: this( input, DebugEventSocketProxy.DefaultDebuggerPort, new RecognizerSharedState() )
+{
+}
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, int port, RecognizerSharedState state )
+	: base( input, state )
+{
+	<createListenerAndHandshake()>
+	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>( input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
+	<parserCtorBody()>
+	<@finally()>
+}<\n>
+>>
+
+ctorForProfilingRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input )
+	: this( input, new Profiler(null), new RecognizerSharedState() )
+{
+}
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state )
+	: base( input, dbg, state )
+{
+	Profiler p = (Profiler)dbg;
+	p.setParser(this);
+	<grammar.directDelegates:
+	 {g|<g:delegateName()> = new <g.recognizerName>( input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
+	<parserCtorBody()>
+	<@finally()>
+}
+<\n>
+>>
+
+/** Basically we don't want to set any dbg listeners are root will have it. */
+ctorForDelegateGrammar() ::= <<
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}> )
+	: base( input, dbg, state )
+{
+	<grammar.directDelegates:
+	 {g|<g:delegateName()> = new <g.recognizerName>( input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}> );}; separator="\n">
+	<parserCtorBody()>
+}<\n>
+>>
+
+ctorForPredefinedListener() ::= <<
+<actions.(actionScope).ctorModifier; null="public"> <name>( <inputStreamType> input, IDebugEventListener dbg )
+	<@superClassRef>: base( input, dbg, new RecognizerSharedState() )<@end>
+{
+<if(profile)>
+	Profiler p = (Profiler)dbg;
+	p.setParser(this);
+<endif>
+	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+	<parserCtorBody()>
+	<@finally()>
+}<\n>
+>>
+
+createListenerAndHandshake() ::= <<
+<if(TREE_PARSER)>
+DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, input.TreeAdaptor );<\n>
+<else>
+DebugEventSocketProxy proxy = new DebugEventSocketProxy( this, port, null );<\n>
+<endif>
+DebugListener = proxy;
+try
+{
+	proxy.Handshake();
+}
+catch ( IOException ioe )
+{
+	ReportError( ioe );
+}
+>>
+
+@genericParser.superClassName() ::= "Debug<@super.superClassName()>"
+
+/*
+ * Much of the following rules were merged into CSharp3.stg.
+ */
+
+@rule.preamble() ::= <<
+if (RuleLevel == 0)
+	DebugListener.Commence();
+IncRuleLevel();
+>>
+//@rule.preamble() ::= <<
+//try
+//{
+//	dbg.EnterRule( GrammarFileName, "<ruleName>" );
+//	if ( RuleLevel == 0 )
+//	{
+//		dbg.Commence();
+//	}
+//	IncRuleLevel();
+//	dbg.Location( <ruleDescriptor.tree.line>, <ruleDescriptor.tree.charPositionInLine> );<\n>
+//>>
+
+@rule.postamble() ::= <<
+DecRuleLevel();
+if (RuleLevel == 0)
+	DebugListener.Terminate();
+>>
+//@rule.postamble() ::= <<
+//dbg.Location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>);<\n>
+//}
+//finally
+//{
+//	dbg.ExitRule( GrammarFileName, "<ruleName>" );
+//	DecRuleLevel();
+//	if ( RuleLevel == 0 )
+//	{
+//		dbg.Terminate();
+//	}
+//}<\n>
+//>>
+
+//@insertSynpreds.start() ::= "dbg.BeginBacktrack( state.backtracking );"
+//@insertSynpreds.stop() ::= "dbg.EndBacktrack( state.backtracking, success );"
+
+// Common debug event triggers used by region overrides below
+
+//enterSubRule() ::= <<
+//try
+//{
+//	dbg.EnterSubRule( <decisionNumber> );<\n>
+//>>
+
+//exitSubRule() ::= <<
+//}
+//finally
+//{
+//	dbg.ExitSubRule( <decisionNumber> );
+//}<\n>
+//>>
+
+//enterDecision() ::= <<
+//try
+//{
+//	dbg.EnterDecision( <decisionNumber> );<\n>
+//>>
+
+//exitDecision() ::= <<
+//}
+//finally
+//{
+//	dbg.ExitDecision( <decisionNumber> );
+//}<\n>
+//>>
+
+//enterAlt(n) ::= "dbg.EnterAlt( <n> );<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+//@block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+//@block.postdecision() ::= "<exitDecision()>"
+
+//@block.postbranch() ::= "<exitSubRule()>"
+
+//@ruleBlock.predecision() ::= "<enterDecision()>"
+
+//@ruleBlock.postdecision() ::= "<exitDecision()>"
+
+//@ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+//@blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+//@positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+//@positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+//@positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+//@positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+//@positiveClosureBlock.earlyExitException() ::=
+//	"dbg.RecognitionException( eee<decisionNumber> );<\n>"
+
+//@closureBlock.preloop() ::= "<enterSubRule()>"
+
+//@closureBlock.postloop() ::= "<exitSubRule()>"
+
+//@closureBlock.predecision() ::= "<enterDecision()>"
+
+//@closureBlock.postdecision() ::= "<exitDecision()>"
+
+//@altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+//@element.prematch() ::=
+//	"dbg.Location( <it.line>, <it.pos> );"
+
+//@matchSet.mismatchedSetException() ::=
+//	"dbg.RecognitionException( mse );"
+
+//@dfaState.noViableAltException() ::= "dbg.RecognitionException( nvae );"
+
+//@dfaStateSwitch.noViableAltException() ::= "dbg.RecognitionException( nvae );"
+
+//dfaDecision(decisionNumber,description) ::= <<
+//try
+//{
+//	isCyclicDecision = true;
+//	<super.dfaDecision(...)>
+//}
+//catch ( NoViableAltException nvae )
+//{
+//	dbg.RecognitionException( nvae );
+//	throw nvae;
+//}
+//>>
+
+//@cyclicDFA.errorMethod() ::= <<
+//public override void Error( NoViableAltException nvae )
+//{
+//	((DebugParser)recognizer).dbg.RecognitionException( nvae );
+//}
+//>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+EvalPredicate(<pred>, "<description>")
+>>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ST.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/CSharp3/ST.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Cpp/Cpp.stg b/tool/src/main/resources/org/antlr/codegen/templates/Cpp/Cpp.stg
new file mode 100755
index 0000000..d94024b
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Cpp/Cpp.stg
@@ -0,0 +1,2597 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2009 Gokulakannan Somasundaram,
+ 
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*
+ * This code generating template and the associated Cpp runtime was produced by:
+ * Gokulakannan Somasundaram ( heavy lifting from C Run-time by Jim Idle )
+ */
+cTypeInitMap ::= [
+	"int"		    : "0",              // Integers     start out being 0
+	"long"		    : "0",              // Longs        start out being 0
+	"float"		    : "0.0",           // Floats       start out being 0
+	"double"	    : "0.0",           // Doubles      start out being 0
+	"bool"    	    : "false",   // Booleans     start out being Antlr C for false
+	"byte"		    : "0",              // Bytes        start out being 0
+	"short"		    : "0",              // Shorts       start out being 0
+	"char"		    : "0"              // Chars        start out being 0
+]
+
+leadIn(type) ::=
+<<
+/** \file
+ *  This <type> file was generated by $ANTLR version <ANTLRVersion>
+ *
+ *     -  From the grammar source file : <fileName>
+ *     -                            On : <generatedTimestamp>
+<if(LEXER)>
+ *     -                 for the lexer : <name>Lexer
+<endif>
+<if(PARSER)>
+ *     -                for the parser : <name>Parser
+<endif>
+<if(TREE_PARSER)>
+ *     -           for the tree parser : <name>TreeParser
+<endif>
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * C++ language generator and runtime by Gokulakannan Somasundaram ( heavy lifting from C Run-time by Jim Idle )
+ *
+ *
+>>
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope,
+            actions,
+            docComment,
+            recognizer,
+            name,
+            tokens,
+            tokenNames,
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            buildAST,
+            rewriteMode,
+            profile,
+            backtracking,
+            synpreds,
+            memoize,
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+            superClass,
+            literals
+            ) ::=
+<<
+<leadIn("C++ source")>
+*/
+// [The "BSD license"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram, ElectronDB
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+<if(actions.(actionScope).header)>
+
+/* =============================================================================
+ * This is what the grammar programmer asked us to put at the top of every file.
+ */
+<actions.(actionScope).header>
+/* End of Header action.
+ * =============================================================================
+ */
+<endif>
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#include    "<name>.hpp"
+<if(trace)>
+#include \<iostream>
+<endif>
+<if(recognizer.grammar.delegators)>
+// Include delegator definition header files
+//
+<recognizer.grammar.delegators: {g|#include "<g.recognizerName>.hpp" }; separator="\n">
+<endif>
+
+<actions.(actionScope).postinclude>
+/* ----------------------------------------- */
+
+<docComment>
+
+<if(literals)>
+
+<beginNamespace(actions)>
+
+/** String literals used by <name> that we must do things like MATCHS() with.
+ *  C will normally just lay down 8 bit characters, and you can use L"xxx" to
+ *  get wchar_t, but wchar_t is 16 bits on Windows, which is not UTF32 and so
+ *  we perform this little trick of defining the literals as arrays of UINT32
+ *  and passing in the address of these.
+ */
+<literals:{it | static ANTLR_UCHAR	lit_<i>[]  = <it>;}; separator="\n">
+
+<endNamespace(actions)>
+
+<endif>
+
+/* ============================================================================= */
+
+/* =============================================================================
+ * Start of recognizer
+ */
+
+<recognizer>
+
+/* End of code
+ * =============================================================================
+ */
+
+>>
+headerFileExtension() ::= ".hpp"
+
+beginNamespace(actions) ::= <%
+	<if(actions.(actionScope).namespace)>
+	namespace <actions.(actionScope).namespace> {
+	<endif>
+%>
+
+endNamespace(actions) ::= <%
+	<if(actions.(actionScope).namespace)>
+	}
+	<endif>
+%>
+
+
+headerFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope,
+            actions,
+            docComment,
+            recognizer,
+            name,
+            tokens,
+            tokenNames,
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            buildAST,
+            rewriteMode,
+            profile,
+            backtracking,
+            synpreds,
+            memoize,
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+            superClass,
+            literals
+        ) ::=
+<<
+<leadIn("C++ header")>
+<if(PARSER)>
+ * The parser <mainName()> has the callable functions (rules) shown below,
+<endif>
+<if(LEXER)>
+ * The lexer <mainName()> has the callable functions (rules) shown below,
+<endif>
+<if(TREE_PARSER)>
+ * The tree parser <mainName()> has the callable functions (rules) shown below,
+<endif>
+ * which will invoke the code for the associated rule in the source grammar
+ * assuming that the input stream is pointing to a token/text stream that could begin
+ * this rule.
+ *
+ * For instance if you call the first (topmost) rule in a parser grammar, you will
+ * get the results of a full parse, but calling a rule half way through the grammar will
+ * allow you to pass part of a full token stream to the parser, such as for syntax checking
+ * in editors and so on.
+ *
+ */
+// [The "BSD license"]
+// Copyright (c) 2005-2009 Gokulakannan Somasundaram. 
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+//    derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef	_<name>_H
+#define _<name>_H
+<actions.(actionScope).preincludes>
+/* =============================================================================
+ * Standard antlr3 C++ runtime definitions
+ */
+#include \<antlr3.hpp>
+
+/* End of standard antlr 3 runtime definitions
+ * =============================================================================
+ */
+
+<actions.(actionScope).includes>
+
+<if(recognizer.grammar.delegates)>
+// Include delegate definition header files
+//
+<recognizer.grammar.delegates: {g|#include	"<g.recognizerName>.hpp"}; separator="\n">
+
+<endif>
+
+
+<actions.(actionScope).header>
+
+#ifdef	WIN32
+// Disable: Unreferenced parameter,							- Rules with parameters that are not used
+//          constant conditional,							- ANTLR realizes that a prediction is always true (synpred usually)
+//          initialized but unused variable					- tree rewrite variables declared but not needed
+//          Unreferenced local variable						- lexer rule declares but does not always use _type
+//          potentially unitialized variable used			- retval always returned from a rule
+//			unreferenced local function has been removed	- susually getTokenNames or freeScope, they can go without warnigns
+//
+// These are only really displayed at warning level /W4 but that is the code ideal I am aiming at
+// and the codegen must generate some of these warnings by necessity, apart from 4100, which is
+// usually generated when a parser rule is given a parameter that it does not use. Mostly though
+// this is a matter of orthogonality hence I disable that one.
+//
+#pragma warning( disable : 4100 )
+#pragma warning( disable : 4101 )
+#pragma warning( disable : 4127 )
+#pragma warning( disable : 4189 )
+#pragma warning( disable : 4505 )
+#pragma warning( disable : 4701 )
+#endif
+<if(backtracking)>
+
+/* ========================
+ * BACKTRACKING IS ENABLED
+ * ========================
+ */
+<endif>
+
+<beginNamespace(actions)>
+
+<if(recognizer.grammar.delegators)>
+// Include delegator definition classes
+//
+<recognizer.grammar.delegators: {g|class <g.recognizerName>; }; separator="\n">
+<endif>
+
+<actions.(actionScope).traits>
+typedef <name>Traits <name>ImplTraits;
+
+<rules:{r | <if(r.ruleDescriptor.isSynPred)> struct <r.ruleDescriptor.name> {\}; <endif>}; separator="\n">
+
+class <name>Tokens
+{
+public:
+	/** Symbolic definitions of all the tokens that the <grammarType()> will work with.
+	 *
+	 * Antlr will define EOF, but we can't use that as it it is too common in
+	 * in C header files and that would be confusing. There is no way to filter this out at the moment
+	 * so we just undef it here for now. That isn't the value we get back from C recognizers
+	 * anyway. We are looking for ANTLR_TOKEN_EOF.
+	 */
+	enum Tokens 
+	{
+		EOF_TOKEN = <name>ImplTraits::CommonTokenType::TOKEN_EOF
+		<tokens:{it | , <it.name> = <it.type> }; separator="\n">
+	};
+
+};
+
+/** Context tracking structure for <mainName()>
+ */
+class <name> : public <componentBaseType()>, public <name>Tokens
+{
+public:
+	typedef <name>ImplTraits ImplTraits;
+	typedef <name> ComponentType;
+	typedef ComponentType::StreamType StreamType;
+	typedef <componentBaseType()> BaseType;
+	typedef ImplTraits::RecognizerSharedStateType\<StreamType> RecognizerSharedStateType;
+	typedef StreamType InputType;
+<if(recognizer.filterMode)>
+	static const bool IsFiltered = true;
+<else>
+	static const bool IsFiltered = false;
+<endif>
+	
+	<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeDecl(it)><endif>}>
+	<rules:{r | <if(r.ruleDescriptor.ruleScope)><ruleAttributeScopeDecl(scope=r.ruleDescriptor.ruleScope)><endif>}>
+	
+private:	
+<if(recognizer.grammar.delegates)>
+	<recognizer.grammar.delegates:
+         {g|<g.recognizerName>*	 m_<g:delegateName()>;}; separator="\n">
+<endif>
+<if(recognizer.grammar.delegators)>
+	<recognizer.grammar.delegators:
+         {g|<g.recognizerName>*	 m_<g:delegateName()>;}; separator="\n">
+<endif>
+<scopes:{it | <if(it.isDynamicGlobalScope)>
+    <globalAttributeScopeDef(it)>
+<endif>}; separator="\n\n">
+<rules: {r |<if(r.ruleDescriptor.ruleScope)>
+    <ruleAttributeScopeDef(scope=r.ruleDescriptor.ruleScope)>
+<endif>}>
+    <@members>
+    <@end>
+
+public:
+    <name>(InputType* instream<recognizer.grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>);
+    <name>(InputType* instream, RecognizerSharedStateType* state<recognizer.grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>);
+    
+    void init(InputType* instream <recognizer.grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}> );
+
+    <actions.(actionScope).context>
+
+<if(LEXER)>
+<if(recognizer.filterMode)>
+    void  memoize(ANTLR_MARKER  ruleIndex, ANTLR_MARKER  ruleParseStart);
+    bool  alreadyParsedRule(ANTLR_MARKER	ruleIndex);
+    <filteringNextToken()>
+<endif>    
+    <rules:{r | <if(!r.ruleDescriptor.isSynPred)><headerReturnType(ruleDescriptor=r.ruleDescriptor)>  m<r.ruleDescriptor.name>( <r.ruleDescriptor.parameterScope:parameterScope()>);<endif>}; separator="\n">
+    <rules:{r | <if(r.ruleDescriptor.isSynPred)>  <headerReturnType(ruleDescriptor=r.ruleDescriptor)> msynpred( antlr3::ClassForwarder\< <r.ruleDescriptor.name> > <r.ruleDescriptor.parameterScope:parameterScope()>);
+    void m<r.ruleDescriptor.name>_fragment (<r.ruleDescriptor.parameterScope:parameterScope()>);<endif>}; separator="\n">
+<endif>
+<if(!LEXER)>
+    <rules:{r | <headerReturnScope(ruleDescriptor=r.ruleDescriptor)>}>
+    <rules:{r | <if(!r.ruleDescriptor.isSynPred)> <headerReturnType(ruleDescriptor=r.ruleDescriptor)> <r.ruleDescriptor.name> (<r.ruleDescriptor.parameterScope:parameterScope()>); <endif>}; separator="\n">
+    <rules:{r | <if(r.ruleDescriptor.isSynPred)>  <headerReturnType(ruleDescriptor=r.ruleDescriptor)> msynpred( antlr3::ClassForwarder\< <r.ruleDescriptor.name> > <r.ruleDescriptor.parameterScope:parameterScope()>);
+    void m<r.ruleDescriptor.name>_fragment (<r.ruleDescriptor.parameterScope:parameterScope()>);<endif>}; separator="\n">
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+    // Delegated rules
+<recognizer.grammar.delegatedRules:{ruleDescriptor|
+    <headerReturnType(ruleDescriptor)> <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope()>);}; separator="\n">
+<endif>
+
+    const char *    getGrammarFileName();
+    void            reset();
+    ~<name>();
+
+};
+
+// Function protoypes for the constructor functions that external translation units
+// such as delegators and delegates may wish to call.
+//
+<if(!recognizer.grammar.grammarIsRoot)>
+extern ANTLR_UINT8*   <recognizer.grammar.composite.rootGrammar.recognizerName>TokenNames[];
+<endif>
+
+
+/* End of token definitions for <name>
+ * =============================================================================
+ */
+
+<endNamespace(actions)>
+
+#endif
+
+/* END - Note:Keep extra line feed to satisfy UNIX systems */
+
+>>
+
+grammarType() ::= <%
+<if(PARSER)>
+parser
+<endif>
+<if(LEXER)>
+lexer
+<endif>
+<if(TREE_PARSER)>
+tree parser
+<endif>
+%>
+
+componentType() ::= <<
+<if(PARSER)>
+<name>ImplTraits::ParserType
+<endif>
+<if(LEXER)>
+<name>ImplTraits::LexerType
+<endif>
+<if(TREE_PARSER)>
+<name>ImplTraits::TreeParserType
+<endif>
+>>
+
+componentBaseType() ::= <%
+<if(PARSER)>
+<name>ImplTraits::BaseParserType
+<endif>
+<if(LEXER)>
+<name>ImplTraits::BaseLexerType
+<endif>
+<if(TREE_PARSER)>
+<name>ImplTraits::BaseTreeParserType
+<endif>
+%>
+
+streamType() ::= <<
+<if(PARSER)>
+<name>ImplTraits::ParserType::StreamType
+<endif>
+<if(LEXER)>
+<name>ImplTraits::LexerType::StreamType
+<endif>
+<if(TREE_PARSER)>
+<name>ImplTraits::TreeParserType::StreamType
+<endif>
+>>
+
+
+mainName() ::= <%
+<if(PARSER)>
+<name>
+<endif>
+<if(LEXER)>
+<name>
+<endif>
+<if(TREE_PARSER)>
+<name>
+<endif>
+%>
+
+headerReturnScope(ruleDescriptor) ::= "<returnScope(scope=ruleDescriptor.returnScope)>"
+
+headerReturnType(ruleDescriptor) ::= <%
+<if(LEXER)>
+<if(!ruleDescriptor.isSynPred)>
+ void
+<else>
+ <returnType()>
+<endif>
+<else>
+ <returnType()>
+<endif>
+%>
+
+// Produce the lexer output
+//
+lexer(  grammar,
+	name,
+        tokens,
+        scopes,
+        rules,
+        numRules,
+        filterMode,
+        superClass,
+        labelType="ImplTraits::CommonTokenType*") ::= <<
+
+using namespace antlr3;
+
+<beginNamespace(actions)>
+
+<if(filterMode)>
+
+/* Override the normal MEMOIZE and HAVEALREADYPARSED macros as this is a filtering
+ * lexer. In filter mode, the memoizing and backtracking are gated at BACKTRACKING > 1 rather
+ * than just BACKTRACKING. In some cases this might generate code akin to:
+ *   if (BACKTRACKING) if (BACKTRACKING > 1) memoize.
+ */
+void	<name>::memoize(ANTLR_MARKER  ruleIndex, ANTLR_MARKER  ruleParseStart)
+{
+	BaseType* base = this;
+	if ( this->get_backtracking()>1 ) 
+		base->memoize( ruleIndex, ruleParseStart );
+
+}
+
+bool	<name>::alreadyParsedRule(ANTLR_MARKER	ruleIndex)
+{
+	BaseType* base = this;
+	if ( this->get_backtracking() > 1 )
+		return base->haveParsedRule(ruleIndex);
+	return false;
+}
+
+<endif>
+
+/* =========================================================================
+ * Lexer matching rules end.
+ * =========================================================================
+ */
+
+<scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScope(it)><endif>}>
+
+<actions.lexer.members>
+
+<name>::~<name>()
+{
+<if(memoize)>
+	RuleMemoType* rulememo = this->getRuleMemo();
+	if(rulememo != NULL)
+	{
+		delete rulememo;
+		this->setRuleMemo(NULL);
+	}
+<endif>
+<if(grammar.directDelegates)>
+	// Free the lexers that we delegated to
+	// functions to. NULL the state so we only free it once.
+	//
+	<grammar.directDelegates:
+         {g| m_<g:delegateName()>->set_lexstate(NULL);
+         delete m_<g:delegateName()>; }; separator="\n">
+<endif>
+}
+
+void
+<name>::reset()
+{
+    this->get_rec()->reset();
+}
+
+/** \brief Name of the grammar file that generated this code
+ */
+static const char fileName[] = "<fileName>";
+
+/** \brief Return the name of the grammar file that generated this code.
+ */
+const char* <name>::getGrammarFileName()
+{
+	return fileName;
+}
+
+/** \brief Create a new lexer called <name>
+ *
+ * \param[in]    instream Pointer to an initialized input stream
+ * \return
+ *     - Success p<name> initialized for the lex start
+ *     - Fail NULL
+ */
+<name>::<name>(StreamType* instream<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>)
+:<name>ImplTraits::BaseLexerType(ANTLR_SIZE_HINT, instream, NULL)
+{
+	// See if we can create a new lexer with the standard constructor
+	//
+	this->init(instream <grammar.delegators:{g|, <g:delegateName()>}>);
+}
+
+/** \brief Create a new lexer called <name>
+ *
+ * \param[in]    instream Pointer to an initialized input stream
+ * \param[state] state Previously created shared recognizer stat
+ * \return
+ *     - Success p<name> initialized for the lex start
+ *     - Fail NULL
+ */
+<name>::<name>(StreamType* instream, RecognizerSharedStateType* state<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>)
+:<name>ImplTraits::BaseLexerType(ANTLR_SIZE_HINT, instream, state)
+{
+	this->init(instream <grammar.delegators:{g|, <g:delegateName()>} >);
+}
+
+void <name>::init(StreamType* instream<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>} >)
+{
+    /* -------------------------------------------------------------------
+     * Memory for basic structure is allocated, now to fill in
+     * in base ANTLR3 structures. We initialize the function pointers
+     * for the standard ANTLR3 lexer function set, but upon return
+     * from here, the programmer may set the pointers to provide custom
+     * implementations of each function.
+     *
+     * We don't use the macros defined in <name>.h here so you can get a sense
+     * of what goes where.
+     */
+
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+    // Create a LIST for recording rule memos.
+    //
+    this->setRuleMemo( new IntTrie(15) );	/* 16 bit depth is enough for 32768 rules! */
+<endif>
+<endif>
+
+<if(grammar.directDelegates)>
+	// Initialize the lexers that we are going to delegate some
+	// functions to.
+	//
+	<grammar.directDelegates:
+         {g|m_<g:delegateName()> = new <g.recognizerName>(instream, this->get_lexstate(), this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+<endif>
+<if(grammar.delegators)>
+	// Install the pointers back to lexers that will delegate us to perform certain functions
+	// for them.
+	//
+	<grammar.delegators:
+         {g| m_<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+<endif>
+}
+
+<if(cyclicDFAs)>
+
+/* =========================================================================
+ * DFA tables for the lexer
+ */
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+/* =========================================================================
+ * End of DFA tables for the lexer
+ */
+<endif>
+
+/* =========================================================================
+ * Functions to match the lexer grammar defined tokens from the input stream
+ */
+
+<rules; separator="\n\n">
+
+/* =========================================================================
+ * Lexer matching rules end.
+ * =========================================================================
+ */
+<if(synpreds)>
+
+/* =========================================================================
+ * Lexer syntactic predicates
+ */
+<synpreds:{p | <lexerSynpred(predname=p)>}>
+/* =========================================================================
+ * Lexer syntactic predicates end.
+ * =========================================================================
+ */
+<endif>
+
+/* End of Lexer code
+ * ================================================
+ * ================================================
+ */
+ 
+<endNamespace(actions)>
+
+>>
+
+
+filteringNextToken() ::= <<
+<name>ImplTraits::CommonTokenType*
+<name>ImplTraits::TokenSourceType::nextToken()
+{
+    LexerType*   lexer;
+    typename LexerType::RecognizerSharedStateType* state;
+
+    lexer   = this->get_super();
+    state	= lexer->get_lexstate();
+
+    /* Get rid of any previous token (token factory takes care of
+     * any deallocation when this token is finally used up.
+     */
+    state->set_token_present( false );
+    state->set_error( false );	    /* Start out without an exception	*/
+    state->set_failedflag(false);
+
+    /* Record the start of the token in our input stream.
+     */
+    state->set_tokenStartCharIndex( lexer->index();
+    state->set_tokenStartCharPositionInLine( lexer->getCharPositionInLine() );
+    state->set_tokenStartLine( lexer->getLine() );
+    state->set_text("");
+
+    /* Now call the matching rules and see if we can generate a new token
+     */
+    for	(;;)
+    {
+		if (lexer->LA(1) == ANTLR_CHARSTREAM_EOF)
+		{
+			/* Reached the end of the stream, nothing more to do.
+			 */
+			CommonTokenType&    teof = m_eofToken;
+
+			teof.set_startIndex(lexer->getCharIndex());
+			teof.set_stopIndex(lexer->getCharIndex());
+			teof.setLine(lexer->getLine());
+			return  &teof;
+		}
+
+		state->set_token_present(false);
+		state->set_error(false);	    /* Start out without an exception	*/
+
+		{
+			ANTLR_MARKER   m;
+
+			m	= this->get_istream()->mark();
+			state->set_backtracking(1);				/* No exceptions */
+			state->set_failedflag(false);
+
+			/* Call the generated lexer, see if it can get a new token together.
+			 */
+			lexer->mTokens();
+    			state->set_backtracking(0);
+
+    		<! mTokens backtracks with synpred at BACKTRACKING==2
+				and we set the synpredgate to allow actions at level 1. !>
+
+			if(state->get_failed())
+			{
+				lexer->rewind(m);
+				lexer->consume(); <! advance one char and try again !>
+			}
+			else
+			{
+				lexer->emit();					/* Assemble the token and emit it to the stream */
+				TokenType& tok = state->get_token();
+				return &tok;
+			}
+		}
+    }
+}
+>>
+
+actionGate() ::= "this->get_backtracking()==0"
+
+filteringActionGate() ::= "this->get_backtracking()==1"
+
+/** How to generate a parser */
+genericParser(  grammar, name, scopes, tokens, tokenNames, rules, numRules,
+                bitsets, inputStreamType, superClass,
+                labelType, members,	rewriteElementType,
+                filterMode, ASTLabelType="ImplTraits::TreeType*") ::= <<
+
+using namespace antlr3;
+<if(grammar.grammarIsRoot)>
+/** \brief Table of all token names in symbolic order, mainly used for
+ *         error reporting.
+ */
+ANTLR_UINT8* <name>TokenNames[<length(tokenNames)>+4]
+     = {
+        (ANTLR_UINT8*) "\<invalid>",       /* String to print to indicate an invalid token */
+        (ANTLR_UINT8*) "\<EOR>",
+        (ANTLR_UINT8*) "\<DOWN>",
+        (ANTLR_UINT8*) "\<UP>",
+        <tokenNames:{it |(ANTLR_UINT8*) <it>}; separator=",\n">
+       };
+<endif>
+
+    <@members>
+
+    <@end>
+
+/** \brief Name of the grammar file that generated this code
+ */
+static const char fileName[] = "<fileName>";
+
+/** \brief Return the name of the grammar file that generated this code.
+ */
+const char* <name>::getGrammarFileName()
+{
+	return fileName;
+}
+/** \brief Create a new <name> parser and return a context for it.
+ *
+ * \param[in] instream Pointer to an input stream interface.
+ *
+ * \return Pointer to new parser context upon success.
+ */
+<name>::<name>( StreamType* instream<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>)
+<constructorInitializerType("NULL")>
+{
+	// See if we can create a new parser with the standard constructor
+	//
+	this->init(instream<grammar.delegators:{g|, <g:delegateName()>}>);
+}
+
+/** \brief Create a new <name> parser and return a context for it.
+ *
+ * \param[in] instream Pointer to an input stream interface.
+ *
+ * \return Pointer to new parser context upon success.
+ */
+<name>::<name>( StreamType* instream, RecognizerSharedStateType* state<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>)
+<constructorInitializerType("state")>
+{
+	this->init(instream <grammar.delegators:{g|, <g:delegateName()>}>);
+}
+
+void <name>::init(StreamType* instream<grammar.delegators:{g|, <g.recognizerName>* <g:delegateName()>}>)
+{
+    <actions.parser.apifuncs>
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+    /* Create a LIST for recording rule memos.
+     */
+     typedef RecognizerSharedStateType::RuleMemoType RuleMemoType;
+     this->setRuleMemo( new RuleMemoType(15) );	/* 16 bit depth is enough for 32768 rules! */<\n>
+<endif>
+<endif>
+<if(grammar.directDelegates)>
+	// Initialize the lexers that we are going to delegate some
+	// functions to.
+	//
+	<grammar.directDelegates:
+	      	{g|m_<g:delegateName()> = new <g.recognizerName>(instream, this->get_psrstate(), this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+	<endif>
+        <if(grammar.delegators)>
+     	// Install the pointers back to lexers that will delegate us to perform certain functions
+     	// for them.
+     	//
+     		<grammar.delegators: {g| m_<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+	<endif>
+	/* Install the token table
+	*/
+	this->get_psrstate()->set_tokenNames( <grammar.composite.rootGrammar.recognizerName>TokenNames );
+
+	<@debugStuff()>
+
+}
+
+void
+<name>::reset()
+{
+    this->get_rec()->reset();
+}
+
+/** Free the parser resources
+ */
+<name>::~<name>()
+ {
+    <@cleanup>
+    <@end>
+<if(grammar.directDelegates)>
+	// Free the parsers that we delegated to
+	// functions to.NULL the state so we only free it once.
+	//
+	<grammar.directDelegates:
+         {g| m_<g:delegateName()>->set_psrstate( NULL );
+         delete m_<g:delegateName()>;}; separator="\n">
+<endif>
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+	if(this->getRuleMemo() != NULL)
+	{
+		delete this->getRuleMemo();
+		this->setRuleMemo(NULL);
+	}
+<endif>
+<endif>
+}
+
+/** Return token names used by this <grammarType()>
+ *
+ * The returned pointer is used as an index into the token names table (using the token
+ * number as the index).
+ *
+ * \return Pointer to first char * in the table.
+ */
+static ANTLR_UINT8**	getTokenNames()
+{
+        return <grammar.composite.rootGrammar.recognizerName>TokenNames;
+}
+
+    <members>
+
+/* Declare the bitsets
+ */
+<bitsets:{it | <bitsetDeclare(bitsetname={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits, traits={<name>ImplTraits} )>}>
+
+
+<if(cyclicDFAs)>
+
+/* =========================================================================
+ * DFA tables for the parser
+ */
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+/* =========================================================================
+ * End of DFA tables for the parser
+ */
+<endif>
+
+/* ==============================================
+ * Parsing rules
+ */
+<rules; separator="\n\n">
+<if(grammar.delegatedRules)>
+	// Delegated methods that appear to be a part of this
+	// parser
+	//
+<grammar.delegatedRules:{ruleDescriptor|
+    <returnType()> <name>::<ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope()>)
+    {
+        <if(ruleDescriptor.hasReturnValue)>return <endif>m_<ruleDescriptor.grammar:delegateName()>-><ruleDescriptor.name>(<if(ruleDescriptor.parameterScope)><ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", "><endif>);
+    \}}; separator="\n">
+
+<endif>
+/* End of parsing rules
+ * ==============================================
+ */
+
+/* ==============================================
+ * Syntactic predicates
+ */
+<synpreds:{p | <synpred(predname=p)>}>
+/* End of syntactic predicates
+ * ==============================================
+ */
+
+>>
+
+constructorInitializerType(rec_state) ::=<<
+<if(PARSER)>
+    :ImplTraits::BaseParserType(ANTLR_SIZE_HINT, instream, <rec_state>)
+<endif>
+<if(TREE_PARSER)>
+    :ImplTraits::BaseTreeParserType(ANTLR_SIZE_HINT, instream, <rec_state>)
+<endif>
+>>
+
+parser(	grammar,
+		name,
+		scopes,
+		tokens,
+		tokenNames,
+		rules,
+		numRules,
+		bitsets,
+		ASTLabelType,
+		superClass="Parser",
+		labelType="ImplTraits::CommonTokenType*",
+		members={<actions.parser.members>}
+		) ::= <<
+<beginNamespace(actions)>
+<genericParser(inputStreamType="CommonTokenStreamType*", rewriteElementType="Token", filterMode=false, ...)>
+<endNamespace(actions)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(	grammar,
+			name,
+			scopes,
+			tokens,
+			tokenNames,
+			globalAction,
+			rules,
+			numRules,
+			bitsets,
+			filterMode,
+			labelType={<ASTLabelType>},
+			ASTLabelType="ImplTraits::TreeType*",
+			superClass="TreeParser",
+			members={<actions.treeparser.members>}
+			) ::= <<
+<beginNamespace(actions)>
+<genericParser(inputStreamType="CommonTreeNodeStream*", rewriteElementType="Node", ...)>
+<endNamespace(actions)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start <ruleName>
+void <name>::m<ruleName>_fragment( <ruleDescriptor.parameterScope:parameterScope()> )
+{
+	<ruleLabelDefs()>
+	<ruleLabelInitializations()>
+<if(trace)>
+    ANTLR_PRINTF("enter <ruleName> %d failed = %d, backtracking = %d\\n", this->LT(1),failed,this->get_backtracking() );
+    <block>
+    ANTLR_PRINTF("exit <ruleName> %d, failed = %d, backtracking = %d\\n", this->LT(1),failed,this->get_backtracking());
+
+<else>
+    <block>
+<endif>
+
+goto rule<ruleDescriptor.name>Ex; /* Prevent compiler warnings */
+rule<ruleDescriptor.name>Ex: ;
+}
+// $ANTLR end <ruleName>
+>>
+
+synpred(predname) ::= <<
+
+bool <name>::msynpred( antlr3::ClassForwarder\< <predname> >  )
+{
+    ANTLR_MARKER   start;
+    bool  success;
+    
+    this->inc_backtracking();
+    <@start()>
+    start	= this->mark();
+    this->m<predname>_fragment();	    // can never throw exception
+    success	= !( this->get_failedflag() );
+    this->rewind(start);
+    <@stop()>
+    this->dec_backtracking();
+    this->set_failedflag(false);
+    return success;
+}<\n>
+>>
+
+lexerSynpred(predname) ::= <<
+<synpred(predname)>
+>>
+
+ruleMemoization(rname) ::= <<
+<if(memoize)>
+if ( (this->get_backtracking()>0) && (this->haveParsedRule(<ruleDescriptor.index>)) )
+{
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!ruleDescriptor.isSynPred)>
+	retval.start = 0;<\n>
+<endif>
+<endif>
+    <(ruleDescriptor.actions.after):execAfter()>
+    <finalCode(finalBlock=finally)>
+<if(!ruleDescriptor.isSynPred)>
+    <scopeClean()><\n>
+<endif>
+    return <ruleReturnValue()>;
+}
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+if  (this->hasException())
+{
+    goto rule<ruleDescriptor.name>Ex;
+}
+<if(backtracking)>
+if (this->hasFailed())
+{
+    <scopeClean()>
+    <@debugClean()>
+    return <ruleReturnValue()>;
+}
+<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>
+if (this->get_backtracking()>0)
+{
+    this->set_failedflag( true );
+    <scopeClean()>
+    return <ruleReturnValue()>;
+}
+<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+/**
+ * $ANTLR start <ruleName>
+ * <fileName>:<description>
+ */
+<returnType()>
+<name>::<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>)
+{
+    <if(trace)>ANTLR_PRINTF("enter <ruleName> %s failed=%d, backtracking=%d\n", this->LT(1), this->get_backtracking() );<endif>
+    <ruleDeclarations()>
+    <ruleDescriptor.actions.declarations>
+    <ruleLabelDefs()>
+    <ruleInitializations()>
+    <ruleDescriptor.actions.init>
+    <ruleMemoization(rname=ruleName)>
+    <ruleLabelInitializations()>
+    
+    <if(actions.(actionScope).rulecatch)>
+    	try {
+    <else> 
+    <if(exceptions)>
+        try {
+    <endif>
+    <endif>
+    <@preamble()>
+    {
+        <block>
+    }
+    <ruleCleanUp()>
+   
+<if(exceptions)>
+    <(ruleDescriptor.actions.after):execAfter()>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+    <if(!emptyRule)>
+    	if (this->hasException())
+        {
+            this->preporterror();
+            this->precover();
+            <@setErrorReturnValue()>
+        }
+        <if(ruleDescriptor.actions.after)>
+        else
+        {
+            <(ruleDescriptor.actions.after):execAfter()>
+        }<\n>
+        <endif>
+	<if(actions.(actionScope).rulecatch)>
+	    } <actions.(actionScope).rulecatch>
+        <endif>
+    <endif>
+<endif>
+
+    <if(trace)>ANTLR_PRINTF("exit <ruleName> %d failed=%s backtracking=%s\n", this->LT(1), failed, this->get_backtracking() );<endif>
+    <memoize()>
+<if(finally)>
+    <finalCode(finalBlock=finally)>
+<endif>
+    <scopeClean()>
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+/* $ANTLR end <ruleName> */
+>>
+
+finalCode(finalBlock) ::= <<
+{
+    <finalBlock>
+}
+
+>>
+
+catch(decl,action) ::= <<
+/* catch(decl,action)
+ */
+}catch (<e.decl>) {
+   <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType()> retval(this);<\n>
+<else>
+<if(PARSER)>
+    <name>ImplTraits::RuleReturnValueType _antlr_rule_exit(this);
+<endif>  
+<if(ruleDescriptor.returnScope)>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name>;
+}>
+<endif>
+<endif>
+<if(memoize)>
+ANTLR_MARKER <ruleDescriptor.name>_StartIndex;
+<endif>
+>>
+
+ruleInitializations() ::= <<
+/* Initialize rule variables
+ */
+<if(ruleDescriptor.returnScope)>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.returnScope.attributes:{ a | <if(a.initValue)>retval.<a.name> = <a.initValue>;<endif> }>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a | <if(a.initValue)><a.name> = <a.initValue>;<endif> }>
+<endif>
+<endif>
+<if(memoize)>
+<ruleDescriptor.name>_StartIndex = this->index();<\n>
+<endif>
+<ruleDescriptor.useScopes:{it | m_<it>_stack.push(<it>Scope()); }; separator="\n">
+<ruleDescriptor.ruleScope:{it | m_<it.name>_stack.push(<it.name>Scope()); }; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it |<labelType> <it.label.text> = NULL;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it |ImplTraits::TokenPtrsListType list_<it.label.text>;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
+>>
+
+ruleLabelInitializations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!ruleDescriptor.isSynPred)>
+retval.call_start_placeholder();
+<endif>
+<endif>
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{it |<labelType> <it.label.text> = NULL;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{it |ANTLR_UINT32 <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{it | ImplTraits::IntTrieType<CommonTokenType>* list_<it.label.text>;}; separator="\n"
+>
+>>
+
+lexerRuleLabelInit() ::= <<
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{it |list_<it.label.text> = new ImplTraits::IntTrieType<CommonTokenType>(31);}; separator="\n"
+>
+>>
+
+lexerRuleLabelFree() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{it |<it.label.text> = NULL;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{it | delete list_<it.label.text>;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <%
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+%>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( this->get_backtracking() > 0 ) { this->memoize(<ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+
+// This is where rules clean up and exit
+//
+goto rule<ruleDescriptor.name>Ex; /* Prevent compiler warnings */
+rule<ruleDescriptor.name>Ex: ;
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+<if(!ruleDescriptor.isSynPred)>
+retval.call_stop_placeholder();<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+scopeClean() ::= <<
+<ruleDescriptor.useScopes:{it | m_<it>_stack.pop(); }; separator="\n">
+<ruleDescriptor.ruleScope:{it | m_<it.name>_stack.pop(); }; separator="\n">
+
+>>
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules, which do not produce tokens.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+//   Comes from: <block.description>
+/** \brief Lexer rule generated by ANTLR3
+ *
+ * $ANTLR start <ruleName>
+ *
+ * Looks to match the characters the constitute the token <ruleName>
+ * from the attached input stream.
+ *
+ *
+ * \remark
+ *  - lexer->error == true if an exception was thrown.
+ */
+void <name>::m<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>)
+{
+    ANTLR_UINT32	_type;
+    <ruleDeclarations()>
+    <ruleDescriptor.actions.declarations>
+    <lexerRuleLabelDefs()>
+    <if(trace)>
+    std::cout \<\< "enter <ruleName> '" \<\< (char)this->LA(1)
+              \<\< "' line=" \<\< this->getLine() \<\< ":" \<\< this->getCharPositionInLine()
+              \<\< " failed=" \<\< this->get_failedflag() \<\< " backtracking=" \<\< this->get_backtracking() \<\< std::endl;
+    <endif>
+
+<if(nakedBlock)>
+    <ruleMemoization(rname=ruleName)>
+    <lexerRuleLabelInit()>
+    <ruleDescriptor.actions.init>
+
+    <block><\n>
+<else>
+    <ruleMemoization(rname=ruleName)>
+    <lexerRuleLabelInit()>
+    _type	    = <ruleName>;
+
+    <ruleDescriptor.actions.init>
+
+    <block>
+	this->get_lexstate()->set_type(_type);
+<endif>
+    <if(trace)>
+    std::cout \<\< "exit <ruleName> '" \<\< (char)this->LA(1)
+              \<\< "' line=" \<\< this->getLine() \<\< ":" \<\< this->getCharPositionInLine()
+              \<\< " failed=" \<\< this->get_failedflag() \<\< " backtracking=" \<\< this->get_backtracking() \<\< std::endl;
+    <endif>
+    <ruleCleanUp()>
+    <lexerRuleLabelFree()>
+    <(ruleDescriptor.actions.after):execAfter()>
+    <memoize>
+}
+// $ANTLR end <ruleName>
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+/** This is the entry point in to the lexer from an object that
+ *  wants to generate the next token, such as a pCOMMON_TOKEN_STREAM
+ */
+void
+<name>::mTokens()
+{
+    <block><\n>
+
+    goto ruleTokensEx; /* Prevent compiler warnings */
+ruleTokensEx: ;
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+
+// <fileName>:<description>
+{
+    int alt<decisionNumber>=<maxAlt>;
+    <decls>
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    <@prebranch()>
+    switch (alt<decisionNumber>)
+    {
+	<alts:{a | <altSwitchCase(i,a)>}>
+    }
+    <@postbranch()>
+}
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+{
+    //  <fileName>:<description>
+
+    ANTLR_UINT32 alt<decisionNumber>;
+
+    alt<decisionNumber>=<maxAlt>;
+
+    <decls>
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>)
+    {
+	<alts:{a | <altSwitchCase(i,a)>}>
+    }
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+{
+    int cnt<decisionNumber>=0;
+    <decls>
+    <@preloop()>
+
+    for (;;)
+    {
+        int alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	<decision>
+	<@postdecision()>
+	switch (alt<decisionNumber>)
+	{
+	    <alts:{a | <altSwitchCase(i,a)>}>
+	    default:
+
+		if ( cnt<decisionNumber> >= 1 )
+		{
+		    goto loop<decisionNumber>;
+		}
+		<ruleBacktrackFailure()>
+		<earlyExitEx()>
+		<@earlyExitException()>
+		goto rule<ruleDescriptor.name>Ex;
+	}
+	cnt<decisionNumber>++;
+    }
+    loop<decisionNumber>: ;	/* Jump to here if this rule does not match */
+    <@postloop()>
+}
+>>
+
+earlyExitEx() ::= <<
+/* mismatchedSetEx()
+ */
+new ANTLR_Exception\< <name>ImplTraits, EARLY_EXIT_EXCEPTION, StreamType>( this->get_rec(), "" );
+<\n>
+>>
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+
+// <fileName>:<description>
+<decls>
+
+<@preloop()>
+for (;;)
+{
+    int alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>)
+    {
+	<alts:{a | <altSwitchCase(i,a)>}>
+	default:
+	    goto loop<decisionNumber>;	/* break out of the loop */
+	    break;
+    }
+}
+loop<decisionNumber>: ; /* Jump out to here if this rule does not match */
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by antlr before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase(altNum,alt) ::= <<
+case <altNum>:
+    <@prealt()>
+    <alt>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+{
+    <@declarations()>
+    <@initializations()>
+    <elements:element()>
+    <rew>
+    <@cleanup()>
+}
+>>
+
+// E L E M E N T S
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+/** Dump the elements one per line */
+element(e) ::= <<
+<@prematch()>
+<e.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)><label> = <endif> this->matchToken(<token>, &FOLLOW_<token>_in_<ruleName><elementIndex>);
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+ list_<label>.push_back(<elem>);
+>>
+
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = this->LA(1);<\n>
+<endif>
+ this->matchc(<char>);
+<checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = this->LA(1);<\n>
+<endif>
+this->matchRange(<a>, <b>);
+<checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= this->LA(1);<\n>
+<else>
+<label>=(<labelType>) this->LT(1);<\n>
+<endif>
+<endif>
+if ( <s> )
+{
+    this->consume();
+    <postmatchCode>
+<if(!LEXER)>
+    this->set_perror_recovery(false);
+<endif>
+    <if(backtracking)> this->set_failedflag(false); <\n><endif>
+}
+else
+{
+    <ruleBacktrackFailure()>
+    <mismatchedSetEx()>
+    <@mismatchedSetException()>
+<if(LEXER)>
+    this->recover();
+<else>
+<! use following code to make it recover inline;
+    this->recoverFromMismatchedSet(&FOLLOW_set_in_<ruleName><elementIndex>);
+!>
+<endif>
+    goto rule<ruleDescriptor.name>Ex;
+}<\n>
+>>
+
+mismatchedSetEx() ::= <<
+new ANTLR_Exception\< <name>ImplTraits, MISMATCHED_SET_EXCEPTION, StreamType>( this->get_rec(), "" );
+<if(PARSER)>
+this->get_exception()->set_expectingSet(NULL);
+<! use following code to make it recover inline;
+this->get_exception()->set_expectingSet( &FOLLOW_set_in_<ruleName><elementIndex> );
+!>
+<endif>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex) ::= <<
+<if(label)>
+ANTLR_MARKER <label>Start = this->getCharIndex();
+ANTLR_UINT32 <label>StartLine<elementIndex> = this->getLine();
+ANTLR_UINT32 <label>StartCharPos<elementIndex> = this->getCharPositionInLine();
+this->matchs(<string>);
+<checkRuleBacktrackFailure()>
+<label> = new CommonTokenType;
+<label>->set_type( CommonTokenType::TOKEN_INVALID );
+<label>->set_startIndex( <label>Start);
+<label>->set_stopIndex( this->getCharIndex()-1);
+<label>->set_input( this->get_input() );
+<label>->set_line( <label>StartLine<elementIndex> );
+<label>->set_charPositionInLine( <label>StartCharPos<elementIndex> );
+<else>
+this->matchs(<string>);
+<checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)>
+<label>=(<labelType>)this->LT(1);<\n>
+<endif>
+this->matchAnyToken();
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = this->LA(1);<\n>
+<endif>
+this->matchAny();
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values. The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+this->followPush(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif><if(scope)>m_<scope:delegateName()>-><endif><rule.name>(<if(args)><args; separator=", "><endif>);<\n>
+this->followPop();
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+/* <description> */
+<if(label)>
+{
+    ANTLR_MARKER <label>Start<elementIndex> = this->getCharIndex();
+    ANTLR_UINT32 <label>StartLine<elementIndex> = this->getLine();
+    ANTLR_UINT32 <label>StartCharPos<elementIndex> = this->getCharPositionInLine();
+    <if(scope)>m_<scope:delegateName()>-><endif>m<rule.name>(<if(scope)>m_<scope:delegateName()><endif> <if(args)>, <endif><args; separator=", ">);
+    <checkRuleBacktrackFailure()>
+    <label> = new CommonTokenType();
+    <label>->set_type( CommonTokenType::TOKEN_INVALID);
+    <label>->set_startIndex( <label>Start<elementIndex> );
+    <label>->set_stopIndex( this->getCharIndex()-1 );
+    <label>->set_input( this->get_input() );
+    <label>->set_line( <label>StartLine<elementIndex> );
+    <label>->set_charPositionInLine( <label>StartCharPos<elementIndex> );
+}
+<else>
+<if(scope)>m_<scope:delegateName()>-><endif>m<rule.name>(<args; separator=", ">);
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+{
+    ANTLR_UINT32 <label>Start<elementIndex>;
+    ANTLR_UINT32 <label>StartLine<elementIndex> = this->getLine();
+    ANTLR_UINT32 <label>StartCharPos<elementIndex> = this->getCharPositionInLine();
+    <labelType> <label>;
+    <label>Start<elementIndex> = this->getCharIndex();
+    this->matchc(ANTLR_CHARSTREAM_EOF);
+    <checkRuleBacktrackFailure()>
+    <label> = new CommonTokenType();
+    <label>->set_type( CommonTokenType::TOKEN_EOF );
+    <label>->set_startIndex(<label>Start<elementIndex>);
+    <label>->set_stopIndex(this->getCharIndex()-1);
+    <label>->set_input( this->get_input() );
+    <label>->set_line( <label>StartLine<elementIndex> );
+    <label>->set_charPositionInLine( <label>StartCharPos<elementIndex> );
+}
+<else>
+    this->matchc(ANTLR_CHARSTREAM_EOF);
+    <checkRuleBacktrackFailure()>
+    <endif>
+>>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "int <recRuleArg()>"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
+recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList, enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( this->LA(1)== CommonTokenType::TOKEN_DOWN ) {
+    this->matchToken(CommonTokenType::TOKEN_DOWN, NULL);
+    <checkRuleBacktrackFailure()>
+    <children:element()>
+    this->matchToken(CommonTokenType::TOKEN_UP, NULL);
+    <checkRuleBacktrackFailure()>
+}
+<else>
+this->matchToken(CommonTokenType::TOKEN_DOWN, NULL);
+<checkRuleBacktrackFailure()>
+<children:element()>
+this->matchToken(CommonTokenType::TOKEN_UP, NULL);
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) )
+{
+    <ruleBacktrackFailure()>
+    <newFPE(...)>
+}
+>>
+
+newFPE() ::= <<
+	ExceptionBaseType* ex = new ANTLR_Exception\< <name>ImplTraits, FAILED_PREDICATE_EXCEPTION, StreamType>( this->get_rec(), "<description>" );
+    ex->set_ruleName( "<ruleName>" );
+    <\n>
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+
+{
+    int LA<decisionNumber>_<stateNumber> = this->LA(<k>);
+    <edges; separator="\nelse ">
+    else
+    {
+<if(eotPredictsAlt)>
+        alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+        <ruleBacktrackFailure()>
+
+        <newNVException()>
+        goto rule<ruleDescriptor.name>Ex;
+
+<endif>
+    }
+}
+>>
+
+newNVException() ::= <<
+ExceptionBaseType* ex = new ANTLR_Exception\< <name>ImplTraits, NO_VIABLE_ALT_EXCEPTION, StreamType>( this->get_rec(), "<description>" );
+ex->set_decisionNum( <decisionNumber> );
+ex->set_state( <stateNumber> );
+<@noViableAltException()>
+<\n>
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+{
+    int LA<decisionNumber>_<stateNumber> = this->LA(<k>);
+    <edges; separator="\nelse ">
+}
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+
+dfaLoopbackStateDecls()::= <<
+ANTLR_UINT32   LA<decisionNumber>_<stateNumber>;
+>>
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+{
+   /* dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState)
+    */
+    int LA<decisionNumber>_<stateNumber> = this->LA(<k>);
+    <edges; separator="\nelse "><\n>
+    <if(eotPredictsAlt)>
+    <if(!edges)>
+	alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+	<else>
+    else
+    {
+	alt<decisionNumber>=<eotPredictsAlt>;
+    }<\n>
+    <endif>
+    <endif>
+}
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>)<if(predicates)> && (<predicates>)<endif>)
+{
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( this->LA(<k>) )
+{
+<edges; separator="\n">
+
+default:
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    <newNVException()>
+    goto rule<ruleDescriptor.name>Ex;<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( this->LA(<k>) )
+{
+    <edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( this->LA(<k>) )
+{
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+default:
+    alt<decisionNumber>=<eotPredictsAlt>;
+    break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{it |case <it>:}; separator="\n">
+	{
+		<targetState>
+	}
+    break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = cdfa<decisionNumber>.predict(this, this->get_rec(), this->get_istream(), cdfa<decisionNumber> );
+<checkRuleBacktrackFailure()>
+>>
+
+/* Dump DFA tables as static initialized arrays of shorts(16 bits)/characters(8 bits)
+ * which are then used to statically initialize the dfa structure, which means that there
+ * is no runtime initialization whatsoever, other than anything the C compiler might
+ * need to generate. In general the C compiler will lay out memory such that there is no
+ * runtime code required.
+ */
+cyclicDFA(dfa) ::= <<
+/** Static dfa state tables for Cyclic dfa:
+ *    <dfa.description>
+ */
+static const ANTLR_INT32 dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] =
+    {
+	<dfa.eot; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR_INT32 dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] =
+    {
+	<dfa.eof; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR_INT32 dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] =
+    {
+	<dfa.min; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR_INT32 dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] =
+    {
+	<dfa.max; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR_INT32 dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] =
+    {
+	<dfa.accept; wrap="\n", separator=", ", null="-1">
+    };
+static const ANTLR_INT32 dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] =
+    {
+	<dfa.special; wrap="\n", separator=", ", null="-1">
+    };
+
+/** Used when there is no transition table entry for a particular state */
+static const ANTLR_INT32* dfa<dfa.decisionNumber>_T_empty	 =   NULL;
+
+<dfa.edgeTransitionClassMap.keys:{ table |
+static const ANTLR_INT32 dfa<dfa.decisionNumber>_T<i0>[] =
+    {
+	<table; separator=", ", wrap="\n", null="-1">
+    \};<\n>}; null = "">
+
+/* Transition tables are a table of sub tables, with some tables
+ * reused for efficiency.
+ */
+static const ANTLR_INT32 * const dfa<dfa.decisionNumber>_transitions[] =
+{
+    <dfa.transitionEdgeTables:{xref|dfa<dfa.decisionNumber>_T<xref>}; separator=", ", wrap="\n", null="NULL">
+};
+
+<@errorMethod()>
+
+/* Declare tracking structure for Cyclic DFA <dfa.decisionNumber>
+ */
+class <name>CyclicDFA<dfa.decisionNumber> : public CyclicDFA\< <name>ImplTraits, <name> >, public <name>Tokens
+{
+public:
+	typedef CyclicDFA\< <name>ImplTraits, <name> >  BaseType;
+	typedef BaseType::ContextType CtxType;
+	
+private:
+<if(dfa.specialStateSTs)>
+	//to maintain C-Target compatibility, we need to make some of ctx functions look like member funcs
+	CtxType*	m_ctx; 
+<endif>	
+
+public:
+	<name>CyclicDFA<dfa.decisionNumber>( ANTLR_INT32	decisionNumber
+					, const ANTLR_UCHAR*	description
+					, const ANTLR_INT32* const	eot
+					, const ANTLR_INT32* const	eof
+					, const ANTLR_INT32* const	min
+					, const ANTLR_INT32* const	max
+					, const ANTLR_INT32* const	accept
+					, const ANTLR_INT32* const	special
+					, const ANTLR_INT32* const *const	transition)
+					:BaseType( decisionNumber, description, eot, eof, min, max, accept,
+								special, transition )
+	{
+	<if(dfa.specialStateSTs)>
+		m_ctx = NULL;
+	<endif>		
+	}
+
+    <if(dfa.specialStateSTs)>
+    ANTLR_UINT32 LA(ANTLR_INT32 i)
+    {
+        return m_ctx->LA(i);
+    }
+
+    <if(PARSER)>
+    const CtxType::CommonTokenType*  LT(ANTLR_INT32 k)
+    {
+        return m_ctx->LT(k);
+    }
+    <endif>
+    <if(synpreds)>
+    template\<typename PredType>
+    bool msynpred( PredType pred )
+    {
+        return m_ctx->msynpred(pred);
+    }
+    <endif>
+        
+	ANTLR_INT32  specialStateTransition(CtxType * ctx, RecognizerType* recognizer, IntStreamType* is, ANTLR_INT32 s)
+	{
+	    ANTLR_INT32    _s;
+		
+	    m_ctx = ctx;
+	    _s	    = s;
+	    switch  (s)
+	    {
+	    <dfa.specialStateSTs:{state |
+	    case <i0>:
+
+		<state>}; separator="\n">
+	    }
+	<if(backtracking)>
+	    if ( ctx->get_backtracking() > 0)
+	    {
+		 ctx->set_failedflag( true );
+		return	-1;
+	    }
+	<endif>
+	    ExceptionBaseType* ex = new ANTLR_Exception\< <name>ImplTraits, NO_VIABLE_ALT_EXCEPTION, StreamType>( recognizer, "<dfa.description>" );
+	    ex->set_decisionNum( <dfa.decisionNumber> );
+	    ex->set_state(_s);
+	    <@noViableAltException()>
+	    return -1;
+	}
+	<endif>
+};
+ 
+static <name>CyclicDFA<dfa.decisionNumber>  cdfa<dfa.decisionNumber>(
+	    <dfa.decisionNumber>,		    /* Decision number of this dfa	    */
+	    /* Which decision this represents:   */
+	    (const ANTLR_UCHAR*)"<dfa.description>",
+	    dfa<dfa.decisionNumber>_eot,	    /* EOT table			    */
+	    dfa<dfa.decisionNumber>_eof,	    /* EOF table			    */
+	    dfa<dfa.decisionNumber>_min,	    /* Minimum tokens for each state    */
+	    dfa<dfa.decisionNumber>_max,	    /* Maximum tokens for each state    */
+	    dfa<dfa.decisionNumber>_accept,	/* Accept table			    */
+	    dfa<dfa.decisionNumber>_special,	/* Special transition states	    */
+	    dfa<dfa.decisionNumber>_transitions	/* Table of transition tables	    */
+
+	);
+
+
+/* End of Cyclic DFA <dfa.decisionNumber>
+ * ---------------------
+ */
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+{
+    ANTLR_UINT32 LA<decisionNumber>_<stateNumber>;<\n>
+    ANTLR_MARKER index<decisionNumber>_<stateNumber>;<\n>
+
+    LA<decisionNumber>_<stateNumber> = ctx->LA(1);<\n>
+    <if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+    index<decisionNumber>_<stateNumber> = ctx->index();<\n>
+    ctx->rewindLast();<\n>
+    <endif>
+    s = -1;
+    <edges; separator="\nelse ">
+	<if(semPredState)> <! return input cursor to state before we rewound !>
+	ctx->seek(index<decisionNumber>_<stateNumber>);<\n>
+	<endif>
+    if ( s>=0 )
+    {
+	return s;
+    }
+}
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif> )
+{
+    s = <targetStateNumber>;
+}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+ s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "( (<left>) && (<right>) )"
+
+orPredicates(operands) ::= "(<operands:{o|(<o>)}; separator=\"||\">)"
+
+notPredicate(pred) ::= "!( <evalPredicate(pred,{})> )"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "this->msynpred( antlr3::ClassForwarder\<<pred>>() )"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "this->LA(<k>) == <atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
+((LA<decisionNumber>_<stateNumber> >= <lower>) && (LA<decisionNumber>_<stateNumber> \<= <upper>))
+%>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "((this->LA(<k>) >= <lower>) && (this->LA(<k>) \<= <upper>))"
+
+setTest(ranges) ::= "<ranges; separator=\" || \">"
+
+// A T T R I B U T E S
+
+makeScopeSet() ::= <<
+/* makeScopeSet()
+ */
+ /** Definition of the <scope.name> scope variable tracking
+ *  structure. An instance of this structure is created by calling
+ *  <name>_<scope.name>Push().
+ */
+struct  <scopeStruct(sname=scope.name,...)>
+{
+    /* =============================================================================
+     * Programmer defined variables...
+     */
+    <scope.attributes:{it |<it.decl>;}; separator="\n">
+
+    /* End of programmer defined variables
+     * =============================================================================
+     */
+};
+
+>>
+
+globalAttributeScopeDecl(scope) ::= <<
+<if(scope.attributes)>
+/* globalAttributeScopeDecl(scope)
+ */
+<makeScopeSet(...)>
+<endif>
+>>
+
+ruleAttributeScopeDecl(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeDecl(scope)
+ */
+<makeScopeSet(...)>
+<endif>
+>>
+
+globalAttributeScopeDef(scope) ::=
+<<
+/* globalAttributeScopeDef(scope)
+ */
+<if(scope.attributes)>
+
+StackType\< <scopeStruct(sname=scope.name)> > <scopeStack(sname=scope.name)>;
+
+<endif>
+>>
+
+ruleAttributeScopeDef(scope) ::= <<
+<if(scope.attributes)>
+/* ruleAttributeScopeDef(scope)
+ */
+StackType\< <scopeStruct(sname=scope.name)> >  <scopeStack(sname=scope.name,...)>;
+
+<endif>
+>>
+
+scopeStruct(sname) ::= <<
+<sname>Scope
+>>
+
+scopeStack(sname) ::= <<
+m_<sname>_stack
+>>
+
+returnStructName(r) ::= "<r.name>_return"
+
+returnType() ::= <%
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.grammar.recognizerName>::<ruleDescriptor:returnStructName()>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+<else>
+bool
+<endif>
+%>
+
+/** Generate the C type associated with a single or multiple return
+ *  value(s).
+ */
+ruleLabelType(referencedRule) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+%>
+
+delegateName(d) ::= <<
+<if(d.label)><d.label><else>g<d.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "0".
+ */
+initValue(typeName) ::= <<
+ = <cTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label  */
+ruleLabelDef(label) ::= <<
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text>;
+>>
+/**  Rule label default value */
+ruleLabelInitVal(label) ::= <<
+>>
+
+ASTLabelType() ::= "<if(recognizer.ASTLabelType)><recognizer.ASTLabelType><else>ImplTraits::TreeType*<endif>"
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+struct <ruleDescriptor:returnStructName()> : public <name>ImplTraits::RuleReturnValueType
+{
+public:
+    typedef <name>ImplTraits::RuleReturnValueType BaseType;
+    <ruleDescriptor:returnStructName()>()
+        : BaseType()
+        <if(scope)>, <scope.attributes:{it | <it.name>() }; separator=","><endif>
+        { init(); }
+    <ruleDescriptor:returnStructName()>( BaseParserType* parser )
+        : BaseType(parser)
+        <if(scope)>, <scope.attributes:{it | <it.name>() }; separator=","><endif>
+        { init(); }
+    <ruleDescriptor:returnStructName()>( const <ruleDescriptor:returnStructName()>& other )
+        : BaseType(other)
+    <if(scope)>, <scope.attributes:{it | <it.name>(other.<it.name>) }; separator=", "><endif>
+    { copy(other); }
+    ~<ruleDescriptor:returnStructName()>()
+    {
+        <@ruleReturnMembersDelete()>
+    }
+
+    <ruleDescriptor:returnStructName()>&
+    operator=( const <ruleDescriptor:returnStructName()>& other )
+    {
+        BaseType::operator=( other );
+        <if(scope)><scope.attributes:{it | <it.name> = other.<it.name>; }; separator="\n"><endif>
+        copy(other);    
+    	return *this;
+    }
+    <@ruleReturnMembers()>
+    void init() { <@ruleReturnMembersInit()> }
+    void copy( const <ruleDescriptor:returnStructName()>& other) { <@ruleReturnMembersCopy()> }    
+<else>
+struct <ruleDescriptor:returnStructName()>
+{
+public:
+    <name>ImplTraits::<recognizer.ASTLabelType>       start;
+    <name>ImplTraits::<recognizer.ASTLabelType>       stop;
+    <ruleDescriptor:returnStructName()>( const <ruleDescriptor:returnStructName()>& other )
+    <if(scope.attributes)>
+    <scope.attributes:{it | <it.name>(other.<it.name>) }; separator=",">
+    <endif>
+    {
+     	start = other.start;
+     	stop  = other.stop;
+    }
+    
+    <ruleDescriptor:returnStructName()>&
+    operator=( const <ruleDescriptor:returnStructName()>& other )
+    {
+     	start = other.start;
+     	stop  = other.stop;
+
+    	<scope.attributes:{it | <it.name> = other.<it.name>; }; separator="\n">
+    	return *this;
+    }
+<endif>
+    <if(scope)><scope.attributes:{it |<it.type> <it.name>;}; separator="\n"><endif>
+};
+
+<endif>
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{it |<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name>=<expr>;"
+
+/** Note that the scopeAttributeRef does not have access to the
+ * grammar name directly
+ */
+scopeAttributeRef(scope,attr,index,negIndex) ::= <%
+<if(negIndex)>
+  m_<scope>_stack.at( m_<scope>_stack.size()-<negIndex>-1).<attr.name>
+<else>
+<if(index)>
+  m_<scope>_stack.at(<index>).<attr.name>
+<else>
+  m_<scope>_stack.peek().<attr.name>
+<endif>
+<endif>
+%>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
+<if(negIndex)>
+  m_<scope>_stack.at( m_<scope>_stack.size()-<negIndex>-1).<attr.name> = <expr>;
+<else>
+<if(index)>
+  m_<scope>_stack.at(<index>).<attr.name> = <expr>;
+<else>
+  m_<scope>_stack.peek().<attr.name> =<expr>;
+<endif>
+<endif>
+%>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<scope>.<attr.name>
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>=<expr>;
+<else>
+<attr.name>=<expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+//
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText())"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>->get_type())"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>->get_line())"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>->get_charPositionInLine())"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>->get_channel())"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>->get_tokenIndex())"
+tokenLabelPropertyRef_tree(scope,attr) ::= "(<scope>->get_tree())"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<name>ImplTraits::ConvertToInt32(<scope>->getText()))"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>.start)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>.stop)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>.tree)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+(this->get_strstream()->toStringSS(<scope>.start, <scope>.start))
+<else>
+(this->get_strstream()->toStringTT(<scope>.start, <scope>.stop))
+<endif>
+>>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "(<scope>->get_type())"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "(<scope>->get_line())"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(<scope>->get_charPositionInLine())"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(<scope>->get_channel())"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "(<scope>->get_tokenIndex())"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "(<scope>->getText())"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "retval.start"
+rulePropertyRef_stop(scope,attr) ::= "retval.stop"
+rulePropertyRef_tree(scope,attr) ::= "retval.tree"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+this->get_input()->toStringSS( this->get_adaptor()->getTokenStartIndex(retval.start), this->get_adaptor()->getTokenStopIndex(retval.start))
+<else>
+this->get_strstream()->toStringTT(retval.start, this->LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "this->getText()"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "this->get_state()->get_tokenStartLine()"
+lexerRulePropertyRef_pos(scope,attr) ::= "this->get_state()->get_tokenStartCharPositionInLine()"
+lexerRulePropertyRef_channel(scope,attr) ::= "this->get_state()->get_channel()"
+lexerRulePropertyRef_start(scope,attr) ::= "this->get_state()->get_tokenStartCharIndex()"
+lexerRulePropertyRef_stop(scope,attr) ::= "(this->getCharIndex()-1)"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_int(scope,attr) ::= "(<name>ImplTraits::ConvertToInt32(<scope>->getText()))"
+
+
+// setting $st and $tree is allowed in local rule. everything else is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree=<expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st=<expr>;"
+
+
+/** How to deal with an @after for C targets. Because we cannot rely on
+ *  any garbage collection, after code is executed even in backtracking
+ *  mode. Must be documented clearly.
+ */
+execAfter(action) ::= <<
+{
+    <action>
+}
+>>
+
+/** How to execute an action (when not backtracking) */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if ( <actions.(actionScope).synpredgate> )
+{
+    <action>
+}
+<else>
+if ( BACKTRACKING == 0 )
+{
+    <action>
+}
+<endif>
+<else>
+{
+    <action>
+}
+<endif>
+>>
+
+// M I S C (properties, etc...)
+
+bitsetDeclare(bitsetname, words64, traits) ::= <<
+
+/** Bitset defining follow set for error recovery in rule state: <name>  */
+static	ANTLR_BITWORD <bitsetname>_bits[]	= { <words64:{it |ANTLR_UINT64_LIT(<it>)}; separator=", "> };
+static  <traits>::BitsetListType <bitsetname>( <bitsetname>_bits, <length(words64)> );
+>>
+
+codeFileExtension() ::= ".cpp"
+
+true_value() ::= "true"
+false_value() ::= "false"
+
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Delphi/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/AST.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Delphi/AST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Delphi/AST.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTParser.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTParser.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTParser.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTTreeParser.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTTreeParser.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Delphi/ASTTreeParser.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Delphi/Delphi.stg b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/Delphi.stg
new file mode 100644
index 0000000..7390fde
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Delphi/Delphi.stg
@@ -0,0 +1,1805 @@
+/* [The "BSD license"]
+ Copyright (c) 2008 Erik van Bilsen
+ Copyright (c) 2007-2008 Johannes Luber
+ Copyright (c) 2005-2007 Kunle Odutola
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+group Delphi;
+
+csharpTypeInitMap ::= [
+  "int":"0",
+  "uint":"0",
+  "long":"0",
+  "ulong":"0",
+  "float":"0.0",
+  "double":"0.0",
+  "bool":"False",
+  "byte":"0",
+  "sbyte":"0",
+  "short":"0",
+  "ushort":"0",
+  "char":"#0",
+  "string":"''",
+  "String":"''",
+  default:"nil" // anything other than an atomic type
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ *  LEXER (Boolean): should we generate lexer code?
+ *  PARSER (Boolean): should we generate parser code?
+ *  TREE_PARSER (Boolean): should we generate tree parser code?
+ *  actionScope (String): 'lexer', 'parser', 'tree_parser' or custom scope
+ *  actions (HashMap):
+ *  docComment (String): document comment
+ *  recognizer (Object): recognizer class generator
+ *  name (String): name of grammar
+ *  tokens (HashMap<name: String, type: Integer>):
+ *  tokenNames:
+ *  rules:
+ *  cyclicDFAs:
+ *  bitsets:
+ *  buildTemplate (Boolean): should we generate a string template?
+ *  buildAST (Boolean): should we generate an AST?
+ *  rewriteMode (Boolean): are we rewriteing nodes?
+ *  profile (Boolean):
+ *  backtracking (Boolean): backtracking mode?
+ *  synpreds (): syntactic predicates
+ *  memoize (Boolean): should we memoize?
+ *  numRules (Integer): number of rules
+ *  fileName (String): fully qualified name of original .g file
+ *  ANTLRVersion (String): ANTLR version in Major.Minor.Build format
+ *  generatedTimestamp (String): date/time when the file is generated
+ *  trace (Boolean): should we trace input/output?
+ *  scopes:
+ *  superClass (String): name of base class, or empty string
+ *  literals:
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+     bitsets, buildTemplate, buildAST, rewriteMode, profile,
+     backtracking, synpreds, memoize, numRules,
+     fileName, ANTLRVersion, generatedTimestamp, trace,
+     scopes, superClass, literals) ::=
+<<
+unit <name>;
+
+{$HINTS OFF}
+
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<actions.(actionScope).header>
+
+interface
+
+<@imports>
+uses<\n>
+<@end>
+  <actions.(actionScope).usesInterface>
+<if(TREE_PARSER)>
+  Antlr.Runtime.Tree,<\n>
+<endif>
+  Antlr.Runtime,
+  Antlr.Runtime.Collections,
+  Antlr.Runtime.Tools;
+
+<docComment>
+<recognizer>
+>>
+
+/** Generates source code for the lexer class
+ * grammar (Grammar object)
+ */
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
+      filterMode, superClass="Lexer") ::= <<
+type
+  I<grammar.recognizerName> = interface(I<@superClassName><superClass><@end>)
+  end;
+
+  T<grammar.recognizerName> = class(T<@superClassName><superClass><@end>, I<grammar.recognizerName>)
+  strict private
+    FCnt: array [0..<grammar.numberOfDecisions>] of Byte;
+    FLA: array [0..<grammar.numberOfDecisions>, 0..255] of Integer;
+    FException: ERecognitionException;
+    procedure InitializeCyclicDFAs;
+  <cyclicDFAs:cyclicDFADeclaration()>
+  public
+    const
+      <tokens:{<it.name> = <it.type>;}; separator="\n">
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+  strict private
+    <actions.(actionScope).memberDeclarations>
+  public
+    // delegates
+    <grammar.delegates: {g|<g:delegateName()>: I<superClass>; {<g.recognizerName>}}; separator="\n">
+  public
+    // delegators
+    <grammar.delegators: {g|<g:delegateName()>: Pointer; {<g.recognizerName>}}; separator="\n">
+    <last(grammar.delegators):{g|gParent: Pointer; {<g.recognizerName>}}>
+  protected
+    { IBaseRecognizer }
+    function GetGrammarFileName: String; override;
+<if(filterMode)>
+    function AlreadyParsedRule(const Input: IIntStream;
+      const RuleIndex: Integer): Boolean; override;
+    procedure Memoize(const Input: IIntStream; const RuleIndex,
+      RuleStartIndex: Integer); override;
+  protected
+    { ILexer }
+    function NextToken: IToken; override;<\n>
+<endif>
+  protected
+    { ILexer }
+    procedure DoTokens; override;
+  public
+    constructor Create; overload;
+    constructor Create(const AInput: ICharStream<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
+    constructor Create(const AInput: ICharStream; const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
+
+    <rules: {r | <if(!r.ruleDescriptor.isSynPred)><lexerRuleDeclaration(r)><endif>}>
+    <synpreds:{p | <lexerSynpredDeclaration(p)>}; separator="\n">
+  end;
+
+implementation
+
+uses
+  <grammar.delegates: {g|<g.recognizerName>,}; separator="\n">
+  <grammar.delegators: {g|<g.recognizerName>,}; separator="\n">
+  <actions.(actionScope).usesImplementation>
+  SysUtils,
+  StrUtils,
+  Math;
+
+{ T<grammar.recognizerName> }
+
+constructor T<grammar.recognizerName>.Create;
+begin
+  InitializeCyclicDFAs;
+end;
+
+constructor T<grammar.recognizerName>.Create(const AInput: ICharStream<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
+begin
+  Create(AInput, nil<grammar.delegators:{g|, A<g:delegateName()>}>);
+end;
+
+constructor T<grammar.recognizerName>.Create(const AInput: ICharStream; const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
+begin
+  inherited Create(AInput, AState);
+  InitializeCyclicDFAs; { TODO: Necessary in Delphi??? Not removed yet. }
+  <if(memoize)>
+  <if(grammar.grammarIsRoot)>
+  State.RuleMemoCount := <numRules>+1;<\n> <! index from 1..n !>
+  <endif>
+  <endif>
+  <grammar.directDelegates:
+   {g|<g:delegateName()> := T<g.recognizerName>.Create(AInput, State<trunc(g.delegators):{p|, <p:delegateName()>}>, Self);}; separator="\n">
+  <grammar.delegators:
+   {g|<g:delegateName()> := Pointer(A<g:delegateName()>);}; separator="\n">
+  <last(grammar.delegators):{g|gParent := Pointer(A<g:delegateName()>);}>
+  <actions.(actionScope).memberInitializations>
+end;
+<actions.(actionScope).memberImplementations>
+function T<grammar.recognizerName>.GetGrammarFileName: String;
+begin
+  Result := '<fileName>';
+end;
+
+<if(filterMode)>
+<filteringNextToken()>
+<endif>
+
+<rules; separator="\n\n">
+<synpreds:{p | <lexerSynpred(p)>}>
+
+procedure T<grammar.recognizerName>.InitializeCyclicDFAs;
+begin
+  <cyclicDFAs:{dfa | FDFA<dfa.decisionNumber> := TDFA<dfa.decisionNumber>.Create(Self<@debugAddition()>);}; separator="\n">
+  <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>FDFA<dfa.decisionNumber>.SpecialStateTransitionHandler := DFA<dfa.decisionNumber>_SpecialStateTransition;<endif>}; separator="\n">
+end;
+
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+end.>>
+
+lexerRuleDeclaration(rule) ::= <<
+procedure m<rule.ruleName>(<rule.ruleDescriptor.parameterScope:parameterScope(scope=rule)>);<\n>
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+function T<grammar.recognizerName>.NextToken: IToken;
+var
+  M: Integer;
+begin
+  while (True) do
+  begin
+    if (Input.LA(1) = Integer(cscEOF)) then
+      Exit(TToken.EOF_TOKEN);
+
+    State.Token := nil;
+    State.Channel := TToken.DEFAULT_CHANNEL;
+    State.TokenStartCharIndex := Input.Index;
+    State.TokenStartCharPositionInLine := Input.CharPositionInLine;
+    State.TokenStartLine := Input.Line;
+    State.Text := '';
+    try
+      M := Input.Mark();
+      State.Backtracking := 1; <! means we won't throw slow exception !>
+      State.Failed := False;
+      mTokens();
+      State.Backtracking := 0;
+<!
+      mTokens backtracks with synpred at backtracking==2
+            and we set the synpredgate to allow actions at level 1.
+!>
+      if (State.Failed) then
+      begin
+        Input.Rewind(M);
+        Input.Consume; <! // advance one char and try again !>
+      end
+      else
+      begin
+        Emit;
+        Exit(State.Token);
+      end;
+    except
+      on RE: ERecognitionException do
+      begin
+        // shouldn't happen in backtracking mode, but...
+        ReportError(RE);
+        Recover(RE);
+      end;
+    end;
+  end;
+end;
+
+function T<grammar.recognizerName>.AlreadyParsedRule(const Input: IIntStream;
+  const RuleIndex: Integer): Boolean;
+begin
+  if (State.Backtracking > 1) then
+    Result := inherited AlreadyParsedRule(Input, RuleIndex)
+  else
+    Result := False;
+end;
+
+procedure T<grammar.recognizerName>.Memoize(const Input: IIntStream; const RuleIndex,
+  RuleStartIndex: Integer);
+begin
+  if (State.Backtracking > 1) then
+    inherited Memoize(Input, RuleIndex, RuleStartIndex);
+end;
+
+>>
+
+filteringActionGate() ::= "(State.Backtracking = 1)"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass, filterMode,
+              ASTLabelType="ANTLRInterface", labelType, members, rewriteElementType) ::= <<
+type
+  <rules: {r | <genericParserRuleReturnType(rule=r, ruleDescriptor=r.ruleDescriptor)>}>
+  I<grammar.recognizerName> = interface(I<@superClassName><superClass><@end>)
+    <rules: {r | <genericParserRuleInterface(rule=r, ruleDescriptor=r.ruleDescriptor)>}>
+  end;
+
+  T<grammar.recognizerName> = class(T<@superClassName><superClass><@end>, I<grammar.recognizerName>)
+<if(grammar.grammarIsRoot)>
+  public
+    const
+      TOKEN_NAMES: array [0..<length(tokenNames)>+3] of String = (
+        '\<invalid>',
+        '\<EOR>',
+        '\<DOWN>',
+        '\<UP>',
+        <tokenNames; separator=",\n">);<\n>
+<endif>
+  public
+    const
+      <tokens:{<it.name> = <it.type>;}; separator="\n">
+  public
+    // delegates
+    <grammar.delegates: {g|<g:delegateName()>: I<superClass>; {<g.recognizerName>}}; separator="\n">
+  public
+    // delegators
+    <grammar.delegators: {g|<g:delegateName()>: Pointer; {<g.recognizerName>}}; separator="\n">
+    <last(grammar.delegators):{g|gParent: Pointer; {<g.recognizerName>}}>
+
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeDeclaration(scope=it)><endif>}>
+<@members>
+    <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+  public
+    constructor Create(const AInput: <inputStreamType><grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
+    constructor Create(const AInput: <inputStreamType>; const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>); overload;
+<@end>
+  protected
+    { IBaseRecognizer }
+    function GetTokenNames: TStringArray; override;
+    function GetGrammarFileName: String; override;
+  strict private
+    <actions.(actionScope).memberDeclarations>
+  <rules: {r | <genericParserRuleDeclaration(rule=r, ruleDescriptor=r.ruleDescriptor)>}>
+
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+    // Delegated rules
+    <grammar.delegatedRules:{ruleDescriptor| <delegatedRuleDeclaration(ruleDescriptor)>}>
+
+    <synpreds:{p | <synpredDeclaration(p)>}; separator="\n">
+  <cyclicDFAs:cyclicDFADeclaration()>
+  strict private
+    FException: ERecognitionException;
+    FLA: array [0..<grammar.numberOfDecisions>, 0..255] of Integer;
+    FCnt: array [0..<grammar.numberOfDecisions>] of Byte;
+    procedure InitializeCyclicDFAs;
+<if(bitsets)>
+  public
+    class var
+      <bitsets:bitsetDecl(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>})>
+  public
+    class procedure InitializeBitsets; static;<\n>
+<endif>
+  end;
+
+implementation
+
+uses
+  <grammar.delegates: {g|<g.recognizerName>,}; separator="\n">
+  <grammar.delegators: {g|<g.recognizerName>,}; separator="\n">
+  <actions.(actionScope).usesImplementation>
+  SysUtils,
+  StrUtils,
+  Math;
+
+{ T<grammar.recognizerName> }
+
+constructor T<grammar.recognizerName>.Create(const AInput: <inputStreamType><grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
+begin
+  Create(AInput, TRecognizerSharedState.Create<grammar.delegators:{g|, A<g:delegateName()>}>);
+end;
+
+constructor T<grammar.recognizerName>.Create(const AInput: <inputStreamType>;
+  const AState: IRecognizerSharedState<grammar.delegators:{g|; const A<g:delegateName()>: IBaseRecognizer{<g.recognizerName>}}>);
+begin
+  inherited Create(AInput, AState);
+  <@membersConstructor>
+  <@end>
+  <parserCtorBody()>
+  <grammar.directDelegates:{g|<g:delegateName()> := T<g.recognizerName>.Create(Input, State<trunc(g.delegators):{p|, <p:delegateName()>}>, Self);}; separator="\n">
+  <grammar.indirectDelegates:{g | <g:delegateName()> := <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+  <last(grammar.delegators):{g|gParent := Pointer(A<g:delegateName()>);}>
+  <rules: {r | <ruleAttributeScopeInit(scope=r.ruleDescriptor.ruleScope)>}>
+  <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+  <actions.(actionScope).memberInitializations>
+end;
+<actions.(actionScope).memberImplementations>
+
+<grammar.delegatedRules:{ruleDescriptor| <delegatedRuleImplementation(ruleDescriptor)>}; separator="\n">
+procedure T<grammar.recognizerName>.InitializeCyclicDFAs;
+begin
+  <cyclicDFAs:{dfa | FDFA<dfa.decisionNumber> := TDFA<dfa.decisionNumber>.Create(Self);}; separator="\n">
+  <cyclicDFAs:{dfa | <if(dfa.specialStateSTs)>FDFA<dfa.decisionNumber>.SpecialStateTransitionHandler := DFA<dfa.decisionNumber>_SpecialStateTransition;<endif>}; separator="\n">
+end;
+
+<if(bitsets)>
+class procedure T<grammar.recognizerName>.InitializeBitsets;
+begin
+  <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>
+end;
+<endif>
+
+<@membersImplementation>
+ <@end>
+
+function T<grammar.recognizerName>.GetTokenNames: TStringArray;
+var
+  I: Integer;
+begin
+  SetLength(Result,Length(T<grammar.composite.rootGrammar.recognizerName>.TOKEN_NAMES));
+  for I := 0 to Length(T<grammar.composite.rootGrammar.recognizerName>.TOKEN_NAMES) - 1 do
+    Result[I] := T<grammar.composite.rootGrammar.recognizerName>.TOKEN_NAMES[I];
+end;
+
+function T<grammar.recognizerName>.GetGrammarFileName: String;
+begin
+  Result := '<fileName>';
+end;
+
+<rules; separator="\n\n">
+<synpreds:{p | <synpred(p)>}>
+
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+<if(bitsets)>
+initialization
+  T<grammar.recognizerName>.InitializeBitsets;<\n>
+<endif>
+end.>>
+
+delegatedRuleDeclaration(ruleDescriptor) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+function <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): I<returnType()>;<\n>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+function <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): <returnType()>;<\n>
+<else>
+procedure <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);<\n>
+<endif>
+<endif>
+>>
+
+delegatedRuleImplementation(ruleDescriptor) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+function T<grammar.recognizerName>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): I<returnType()>;<\n>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+function T<grammar.recognizerName>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): <returnType()>;<\n>
+<else>
+procedure T<grammar.recognizerName>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);<\n>
+<endif>
+<endif>
+begin
+  <if(ruleDescriptor.hasReturnValue)>Result :=<endif> T<ruleDescriptor.grammar.recognizerName>(<ruleDescriptor.grammar:delegateName()>.Implementor).<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">);
+end;
+
+>>
+
+parserCtorBody() ::= <<
+InitializeCyclicDFAs;
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+State.RuleMemoCount := <length(grammar.allImportedRules)>+1;<\n> <! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators: {g|<g:delegateName()> := Pointer(A<g:delegateName()>);}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="ITokenStream", rewriteElementType="Token", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="object", superClass="TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
+<genericParser(inputStreamType="ITreeNodeStream", rewriteElementType="Node", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start "<ruleName>"
+procedure T<grammar.recognizerName>.<ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);
+var
+  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
+  <ruleLabelDefVars()>
+begin
+  <ruleLabelDefs()>
+<if(trace)>
+  TraceIn('<ruleName>_fragment', <ruleDescriptor.index>);
+  try
+    <block>
+  finally
+    TraceOut('<ruleName>_fragment', <ruleDescriptor.index>);
+  end;
+<else>
+  <block>
+<endif>
+end;
+// $ANTLR end "<ruleName>"
+>>
+
+synpredDecls(name) ::= <<
+SynPredPointer <name>;<\n>
+>>
+
+synpred(name) ::= <<
+
+function T<grammar.recognizerName>.<name>: Boolean;
+var
+  Start: Integer;
+  Success: Boolean;
+begin
+  State.Backtracking := State.Backtracking + 1;
+  <@start()>
+  Start := Input.Mark;
+  try
+    <name>_fragment(); // can never throw exception
+  except
+    on RE: ERecognitionException do
+      WriteLn('Impossible: ' + RE.ToString);
+  end;
+  Success := not State.Failed;
+  Input.Rewind(Start);
+  <@stop()>
+  State.Backtracking := State.Backtracking - 1;
+  State.Failed := False;
+  Result := Success;
+end;<\n>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+lexerSynpredDeclaration(name) ::= <<
+function <name>: Boolean;
+procedure <name>_fragment;
+>>
+
+synpredDeclaration(name) ::= <<
+function <name>: Boolean;
+procedure <name>_fragment;
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ((State.Backtracking > 0) and AlreadyParsedRule(Input, <ruleDescriptor.index>)) then
+  Exit(<ruleReturnValue()>);
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)><\n>if (State.Failed) then Exit(<ruleReturnValue()>);<\n><endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (State.Backtracking > 0) then
+begin
+  State.Failed := True;
+  Exit(<ruleReturnValue()>);
+end;<endif>
+>>
+
+genericParserRuleDeclaration(rule, ruleDescriptor) ::= <<
+<if(ruleDescriptor.isSynPred)>
+<else>
+<ruleAttributeScopeDeclaration(scope=ruleDescriptor.ruleScope)>
+<returnScopeDeclaration(scope=ruleDescriptor.returnScope)>
+public
+<if(ruleDescriptor.hasMultipleReturnValues)>
+  function <rule.ruleName>: I<returnType()>;<\n>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+  function <rule.ruleName>: <returnType()>;<\n>
+<else>
+  procedure <rule.ruleName>;<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+genericParserRuleInterface(rule, ruleDescriptor) ::= <<
+<if(ruleDescriptor.isSynPred)>
+<else>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+function <rule.ruleName>: I<returnType()>;<\n>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+function <rule.ruleName>: <returnType()>;<\n>
+<else>
+procedure <rule.ruleName>;<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+genericParserRuleReturnType(rule, ruleDescriptor) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(ruleDescriptor.isSynPred)>
+<else>
+I<returnType()> = interface(I<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope)
+end;<\n>
+<endif>
+<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// $ANTLR start "<ruleName>"
+(* <fileName>:<description> *)
+<if(ruleDescriptor.hasMultipleReturnValues)>
+function T<grammar.recognizerName>.<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): I<returnType()>;
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+function T<grammar.recognizerName>.<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>): <returnType()>;
+<else>
+procedure T<grammar.recognizerName>.<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);
+<endif>
+<endif>
+
+var
+<ruleDescriptor.actions.vars>
+  Locals: TLocalStorage;
+<if(ruleDescriptor.hasMultipleReturnValues)>
+  RetVal: I<returnType()>;<\n>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+  RetVal: <returnType()>;<\n>
+<else>
+<endif>
+<endif>
+  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
+  <ruleDeclarationVars()>
+  <ruleLabelDefVars()>
+begin
+  Locals.Initialize;
+  try
+    <if(trace)>TraceIn('<ruleName>', <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    try
+      try
+        <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+<if(exceptions)>
+        <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+        <actions.(actionScope).rulecatch>
+<else>
+      except
+        on RE: ERecognitionException do
+        begin
+          ReportError(RE);
+          Recover(Input,RE);
+          <@setErrorReturnValue()>
+        end;<\n>
+<endif>
+<endif>
+<endif>
+      end;
+    finally
+      <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+      <memoize()>
+      <ruleScopeCleanUp()>
+      <finally>
+    end;
+    <@postamble()>
+  finally
+    Locals.Finalize;
+  end;
+  Exit(<ruleReturnValue()>);
+end;
+// $ANTLR end "<ruleName>"
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>)
+{
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+RetVal := T<returnType()>.Create;
+RetVal.Start := Input.LT(1);<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.name> := <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+<ruleDescriptor.name>_StartIndex := Input.Index();
+<endif>
+>>
+
+ruleDeclarationVars() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.name>: <a.type>;
+}>
+<endif>
+<if(memoize)>
+<ruleDescriptor.name>_StartIndex: Integer;
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{<it>Stack.Push(T<it>Scope.Create);}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>Stack.Push(T<it.name>Scope.Create);}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{<it>Stack.Pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>Stack.Pop;}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]:{<it.label.text> := nil;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{list_<it.label.text> := nil;}; separator="\n">
+<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|<ll.label.text> := nil;}; separator="\n">
+>>
+
+ruleLabelDefVars() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]:{<it.label.text>: I<labelType>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{list_<it.label.text>: IList\<IANTLRInterface\>;}; separator="\n">
+<ruleDescriptor.ruleLabels:ruleLabelDefVar(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|<ll.label.text>: <ruleLabelType(referencedRule=ll.referencedRule)>;}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<it.label.text> := nil;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{List_<it.label.text> := nil;}; separator="\n"
+>
+>>
+
+lexerRuleLabelDefDeclarations() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<it.label.text>: I<labelType>;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{int <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{List_<it.label.text>: IList;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+RetVal
+<endif>
+<else>
+<! nil !>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+RetVal.Stop := Input.LT(-1);
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if (State.Backtracking > 0) then
+  Memoize(Input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex);
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start "<ruleName>"
+<ruleDescriptor.parameterScope>
+procedure T<grammar.recognizerName>.m<ruleName>(<ruleDescriptor.parameterScope:parameterScope(scope=it)>);
+var
+  <ruleDescriptor.actions.vars>
+  Locals: TLocalStorage;
+  TokenType, Channel: Integer;
+  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
+  <lexerRuleLabelDefDeclarations()>
+begin
+  Locals.Initialize;
+  try
+    <ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+    <if(trace)>TraceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    try
+<if(nakedBlock)>
+      <ruleMemoization(name=ruleName)>
+      <lexerRuleLabelDefs()>
+      <ruleDescriptor.actions.init>
+      <block><\n>
+<else>
+      TokenType := <ruleName>;
+      Channel := DEFAULT_TOKEN_CHANNEL;
+      <ruleMemoization(name=ruleName)>
+      <lexerRuleLabelDefs()>
+      <ruleDescriptor.actions.init>
+      <block>
+      <ruleCleanUp()>
+      State.TokenType := TokenType;
+      State.Channel := Channel;
+      <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    finally
+      <if(trace)>TraceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+      <ruleScopeCleanUp()>
+      <memoize()>
+    end;
+  finally
+    Locals.Finalize;
+  end;
+end;
+// $ANTLR end "<ruleName>"
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+procedure T<grammar.recognizerName>.mTokens;
+var
+  Alt: array [0..<grammar.numberOfDecisions>] of Integer;
+begin
+  <block>
+end;
+
+procedure T<grammar.recognizerName>.DoTokens;
+begin
+  mTokens;
+end;
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+(* <fileName>:<description> *)
+Alt[<decisionNumber>] := <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+case Alt[<decisionNumber>] of
+  <alts:altSwitchCase()>
+end;
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+(* <fileName>:<description> *)
+Alt[<decisionNumber>] := <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+case Alt[<decisionNumber>] of
+  <alts:altSwitchCase()>
+end;
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+(* <fileName>:<description> *)
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+(* <fileName>:<description> *)
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+(* <fileName>:<description> *)
+FCnt[<decisionNumber>] := 0;
+<decls>
+<@preloop()>
+while (True) do
+begin
+  Alt[<decisionNumber>] := <maxAlt>;
+  <@predecision()>
+  <decision>
+  <@postdecision()>
+  case Alt[<decisionNumber>] of
+    <alts:altSwitchCase()>
+  else
+    begin
+      if (FCnt[<decisionNumber>] >= 1) then
+        Break;
+      <ruleBacktrackFailure()>
+      raise EEarlyExitException.Create(<decisionNumber>, Input);
+      <@earlyExitException()>
+    end;
+  end;
+  Inc(FCnt[<decisionNumber>]);
+end;
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+(* <fileName>:<description> *)
+<decls>
+<@preloop()>
+while (True) do
+begin
+  Alt[<decisionNumber>] := <maxAlt>;
+  <@predecision()>
+  <decision>
+  <@postdecision()>
+  case Alt[<decisionNumber>] of
+    <alts:altSwitchCase()>
+  else
+    Break;
+  end;
+end;
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+<i>:
+  <@prealt()>
+  <it><\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+(* <fileName>:<description> *)
+begin
+  <@declarations()>
+  <elements:element()>
+  <rew>
+  <@cleanup()>
+end;
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)><label> := <endif>Match(Input, <token>, FOLLOW_<token>_in_<ruleName><elementIndex>)<if(label)> as I<labelType><endif>;<\n><checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label> = nil) then list_<label> := TList\<IANTLRInterface\>.Create;
+list_<label>.Add(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> := Input.LA(1);<\n>
+<endif>
+Match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> := Input.LA(1);<\n>
+<endif>
+MatchRange(<a>, <b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label> := Input.LA(1);<\n>
+<else>
+<label> := Input.LT(1) as I<labelType>;<\n>
+<endif>
+<endif>
+if (<s>) then
+begin
+  Input.Consume;
+  <postmatchCode>
+  <if(!LEXER)>
+  State.ErrorRecovery := False;<endif>
+  <if(backtracking)>State.Failed := False;<endif>
+end
+else
+begin
+  <ruleBacktrackFailure()>
+  FException := EMismatchedSetException.Create(nil, Input);
+  <@mismatchedSetException()>
+<if(LEXER)>
+  Recover(FException);
+  raise FException;<\n>
+<else>
+  raise FException;
+  <! use following code to make it recover inline; remove throw mse;
+  RecoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+  !>
+<endif>
+end;<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex) ::= <<
+<if(label)>
+Locals.AsInteger['<label>Start'] := CharIndex;
+Match(<string>); <checkRuleBacktrackFailure()>
+<label> := TCommonToken.Create(Input, TToken.INVALID_TOKEN_TYPE, TToken.DEFAULT_CHANNEL, Locals.AsInteger['<label>Start'], CharIndex-1);
+<else>
+Match(<string>); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label> := Input.LT(1) as I<labelType>;<\n>
+<endif>
+MatchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> := Input.LA(1);<\n>
+<endif>
+MatchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+PushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)>
+<label> := <if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+<else>
+<if(scope)>T<scope.recognizerName>(IANTLRObject(<scope:delegateName()>).Implementor).<endif><rule.name>(<args; separator=", ">);<\n>
+<endif>
+State.FollowingStackPointer := State.FollowingStackPointer - 1;
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+Locals.AsInteger['<label>Start<elementIndex>'] := CharIndex;
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> := TCommonToken.Create(Input, TToken.INVALID_TOKEN_TYPE, TToken.DEFAULT_CHANNEL,
+  Locals.AsInteger['<label>Start<elementIndex>'], CharIndex - 1);
+<else>
+<if(scope)>(<scope:delegateName()>.Implementor as T<scope.recognizerName>).<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+Locals.AsInteger['<label>Start<elementIndex>'] := CharIndex;
+Match(EOF); <checkRuleBacktrackFailure()>
+Locals['<label>'] := TCommonToken.Create(Input, EOF, TToken.DEFAULT_CHANNEL, Locals.AsInteger['<label>Start<elementIndex>'], CharIndex-1);
+<else>
+Match(EOF); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if (Input.LA(1) = TToken.DOWN) then
+begin
+  Match(Input, TToken.DOWN, nil); <checkRuleBacktrackFailure()>
+  <children:element()>
+  Match(Input, TToken.UP, nil); <checkRuleBacktrackFailure()>
+end;
+<else>
+Match(Input, TToken.DOWN, nil); <checkRuleBacktrackFailure()>
+<children:element()>
+Match(Input, TToken.UP, nil);<\n><checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if (not (<evalPredicate(...)>)) then
+begin
+  <ruleBacktrackFailure()>
+  raise EFailedPredicateException.Create(Input, '<ruleName>', '<description>');
+end;<\n>
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+FLA[<decisionNumber>,<stateNumber>] := Input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+else
+begin
+<if(eotPredictsAlt)>
+  Alt[<decisionNumber>] := <eotPredictsAlt>;<\n>
+<else>
+  <ruleBacktrackFailure()>
+  raise ENoViableAltException.Create('<description>', <decisionNumber>, <stateNumber>, Input);<\n>
+<endif>
+end;
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+FLA[<decisionNumber>,<stateNumber>] := Input.LA(<k>);<\n>
+<edges; separator="\nelse ">;
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+FLA[<decisionNumber>,<stateNumber>] := Input.LA(<k>);
+<edges; separator="\nelse ">;<\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+Alt[<decisionNumber>] := <eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else
+begin
+  Alt[<decisionNumber>] := <eotPredictsAlt>;
+end;<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "Alt[<decisionNumber>] := <alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ((<labelExpr>)<if(predicates)> and (<predicates>)<endif>) then
+begin
+  <targetState>
+end <! no ; here !>
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+case Input.LA(<k>) of
+  <edges; separator="\n">
+else
+  begin
+<if(eotPredictsAlt)>
+    Alt[<decisionNumber>] := <eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    <@noViableAltException()>
+    raise ENoViableAltException.Create('<description>', <decisionNumber>, <stateNumber>, Input);<\n>
+<endif>
+  end;
+end;<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+case Input.LA(<k>) of
+  <edges; separator="\n">
+end;<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+case Input.LA(<k>) of
+  <edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+else
+  Alt[<decisionNumber>] := <eotPredictsAlt>;<\n>
+<endif>
+end;<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{<it>}; separator=",\n">:
+  begin
+    <targetState>
+  end;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+Alt[<decisionNumber>] := FDFA<decisionNumber>.Predict(Input);
+>>
+
+/* Dump DFA tables.
+ */
+cyclicDFADeclaration(dfa) ::= <<
+strict protected
+  type
+    TDFA<dfa.decisionNumber> = class(TDFA)
+    protected
+      { IDFA }
+      function Description: String; override;
+    public
+      constructor Create(const ARecognizer: IBaseRecognizer);
+    end;
+  var
+    FDFA<dfa.decisionNumber>: IDFA;
+<if(dfa.specialStateSTs)>
+strict protected
+  function DFA<dfa.decisionNumber>_SpecialStateTransition(const DFA: IDFA; S: Integer;
+    const AInput: IIntStream): Integer;<endif>
+>>
+
+cyclicDFA(dfa) ::= <<
+{ T<grammar.recognizerName>.TDFA<dfa.decisionNumber> }
+
+constructor T<grammar.recognizerName>.TDFA<dfa.decisionNumber>.Create(const ARecognizer: IBaseRecognizer);
+const
+  DFA<dfa.decisionNumber>_EOT = '<dfa.javaCompressedEOT; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_EOF = '<dfa.javaCompressedEOF; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_MIN = '<dfa.javaCompressedMin; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_MAX = '<dfa.javaCompressedMax; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_ACCEPT = '<dfa.javaCompressedAccept; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_SPECIAL = '<dfa.javaCompressedSpecial; wrap="'+\n    '">';
+  DFA<dfa.decisionNumber>_TRANSITION: array [0..<length(dfa.javaCompressedTransition)>-1] of String = (
+    <dfa.javaCompressedTransition:{s|'<s; wrap="'+\n'">'}; separator=",\n">);
+begin
+  inherited Create;
+  Recognizer := ARecognizer;
+  DecisionNumber := <dfa.decisionNumber>;
+  EOT := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_EOT);
+  EOF := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_EOF);
+  Min := TDFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_MIN);
+  Max := TDFA.UnpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_MAX);
+  Accept := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_ACCEPT);
+  Special := TDFA.UnpackEncodedString(DFA<dfa.decisionNumber>_SPECIAL);
+  Transition := TDFA.UnpackEncodedStringArray(DFA<dfa.decisionNumber>_TRANSITION);
+end;
+
+function T<grammar.recognizerName>.TDFA<dfa.decisionNumber>.Description: String;
+begin
+  Result := '<dfa.description>';
+end;<\n>
+<if(dfa.specialStateSTs)>
+function T<grammar.recognizerName>.DFA<dfa.decisionNumber>_SpecialStateTransition(const DFA: IDFA; S: Integer;
+  const AInput: IIntStream): Integer;
+var
+  Locals: TLocalStorage;
+  <if(LEXER)>
+  Input: IIntStream;
+  <endif>
+  <if(PARSER)>
+  Input: ITokenStream;
+  <endif>
+  <if(TREE_PARSER)>
+  Input: ITreeNodeStream;
+  <endif>
+  _S: Integer;
+  NVAE: ENoViableAltException;
+begin
+  Result := -1;
+  Locals.Initialize;
+  try
+    <if(LEXER)>
+    Input := AInput;
+    <endif>
+    <if(PARSER)>
+    Input := AInput as ITokenStream;
+    <endif>
+    <if(TREE_PARSER)>
+    Input := AInput as ITreeNodeStream;
+    <endif>
+    _S := S;
+    case S of
+      <dfa.specialStateSTs:{state | <i0>: begin<! compressed special state numbers 0..n-1 !>
+     <state> <\n>   end;}; separator="\n">
+    end;
+<if(backtracking)>
+    if (State.Backtracking > 0) then
+    begin
+      State.Failed := True;
+      Exit(-1);
+    end;<\n>
+<endif>
+    NVAE := ENoViableAltException.Create(DFA.Description, <dfa.decisionNumber>, _S, Input);
+    DFA.Error(NVAE);
+    raise NVAE;
+  finally
+    Locals.Finalize;
+  end;
+end;<\n>
+<endif>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+FLA[<decisionNumber>,<stateNumber>] := Input.LA(1);<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+Locals.AsInteger['index<decisionNumber>_<stateNumber>'] := Input.Index;
+Input.Rewind;<\n>
+<endif>
+S := -1;
+<edges; separator="\nelse ">;
+<if(semPredState)> <! return input cursor to state before we rewound !>
+Input.Seek(Locals.AsInteger['index<decisionNumber>_<stateNumber>']);<\n>
+<endif>
+if (S >= 0) then
+  Exit(S);
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ((<labelExpr>)<if(predicates)> and (<predicates>)<endif>) then
+  S := <targetStateNumber>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+S := <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "((<left>) and (<right>))"
+
+orPredicates(operands) ::= "(<operands:{o|(<o>)}; separator=\" or \">)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "FLA[<decisionNumber>,<stateNumber>] = <atomAsInt>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "Input.LA(<k>) = <atomAsInt>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+((FLA[<decisionNumber>,<stateNumber>] \>= <lowerAsInt>) and (FLA[<decisionNumber>,<stateNumber>] \<= <upperAsInt>))
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(Input.LA(<k>) \>= <lowerAsInt>) and (Input.LA(<k>) \<= <upperAsInt>)"
+
+setTest(ranges) ::= "<ranges; separator=\") or (\">"
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<scope.name>Stack := TStackList\<I<scope.name>Scope\>.Create;<\n>
+<endif>
+>>
+
+globalAttributeScopeDeclaration(scope) ::= <<
+<if(scope.attributes)>
+strict protected
+  type
+    I<scope.name>Scope = interface(IANTLRObject)
+    end;
+    T<scope.name>Scope = class(TANTLRObject, I<scope.name>Scope)
+    protected
+      <scope.attributes:{<it.name>: <it.type>;}; separator="\n">
+    end;
+strict protected
+  <scope.name>Stack: IStackList\<I<scope.name>Scope\>;
+<endif>
+>>
+
+ruleAttributeScopeDeclaration(scope) ::= <<
+<if(scope.attributes)>
+strict protected
+  type
+    I<scope.name>Scope = interface(IANTLRObject)
+    end;
+    T<scope.name>Scope = class(TANTLRObject, I<scope.name>Scope)
+    protected
+      <scope.attributes:{<it.name>: <it.type>;}; separator="\n">
+    end;
+strict protected
+  <scope.name>Stack: IStackList\<I<scope.name>Scope\>;
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<! protected Stack <scope.name>Stack = new Stack();<\n> !>
+>>
+
+ruleAttributeScopeInit(scope) ::= <<
+<if(scope)>
+<scope.name>Stack := TStackList\<I<scope.name>Scope\>.Create;<\n>
+<endif>
+>>
+
+returnStructName() ::= "<it.name>_return"
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor:returnStructName()>
+<! I<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope !>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+<! Pointer/void !>
+<endif>
+<endif>
+>>
+
+/** Generate the C# type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+I<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+delegateName() ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+<csharpTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<label.label.text> := <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+>>
+
+ruleLabelDefVar(label) ::= <<
+<label.label.text>: <ruleLabelType(referencedRule=label.referencedRule)>;
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+{ T<ruleDescriptor:returnStructName()> }
+
+<scope.attributes:{public <it.decl>;}; separator="\n">
+<@ruleReturnMembers()>
+<endif>
+>>
+
+returnScopeDeclaration(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+public
+  type
+    T<ruleDescriptor:returnStructName()> = class(T<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope, I<ruleDescriptor:returnStructName()>)
+    <scope.attributes:{public <it.decl>;}; separator="\n">
+    <@ruleReturnMembers()>
+    end;
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> := <expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+(<scope>Stack[<scope>Stack.Count-<negIndex>-1] as T<scope>Scope).<attr.name>
+<else>
+<if(index)>
+(<scope>Stack[<index>] as T<scope>Scope).<attr.name>
+((<scope>_scope)<scope>_stack[<index>]).<attr.name>
+<else>
+(<scope>Stack.Peek.Implementor as T<scope>Scope).<attr.name>
+<endif>
+<endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+(<scope>Stack[<scope>Stack.Count-<negIndex>-1] as T<scope>Scope).<attr.name> := <expr>;<\n>
+<else>
+<if(index)>
+(<scope>Stack[<index>] as T<scope>Scope).<attr.name> := <expr>;<\n>
+<else>
+(<scope>Stack.Peek.Implementor as T<scope>Scope).<attr.name> := <expr>;<\n>
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>Stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+(IfThen(Assigned(<scope>),Def(<scope>).<attr.name>,<initValue(attr.type)>))
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+RetVal.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+RetVal.<attr.name> := <expr>;
+<else>
+<attr.name> := <expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "(Def(<scope>).Text)"
+tokenLabelPropertyRef_type(scope,attr) ::= "(Def(<scope>).TokenType)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(Def(<scope>).Line)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(Def(<scope>).CharPositionInLine)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(Def(<scope>).Channel)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(Def(<scope>).TokenIndex)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(StrToIntDef(Def(<scope>).Text,0))"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(IfThen(Assigned(<scope>), Def(<scope>).Start, nil) as I<labelType>)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(Def(<scope>).Stop as I<labelType>)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(Def(Def(<scope>).Tree as I<ASTLabelType>))"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+IfThen(Assigned(<scope>), Input.TokenStream.ToString(
+  Input.TreeAdaptor.GetTokenStartIndex(Def(<scope>).Start),
+  Input.TreeAdaptor.GetTokenStopIndex(Def(<scope>).Start)), '')
+<else>
+IfThen(Assigned(<scope>), Input.ToString(
+  (Def(<scope>).Start) as IToken,(Def(<scope>).Stop) as IToken), '')
+<endif>
+>>
+ruleLabelPropertyRef_st(scope,attr) ::= "((<scope> != null) ? <scope>.ST : null)"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "(Def(<scope>).TokenType)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "(Def(<scope>).Line)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(IfThen(Assigned(<scope>),Def(<scope>).CharPositionInLine,-1))"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(Def(<scope>).Channel)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "(Def(<scope>).TokenIndex)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "(Def(<scope>).Text)"
+lexerRuleLabelPropertyRef_int(scope,attr) ::= "(StrToIntDef(Def(<scope>).Text,0))"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "(RetVal.Start as I<labelType>)"
+rulePropertyRef_stop(scope,attr) ::= "(RetVal.Stop as I<labelType>)"
+rulePropertyRef_tree(scope,attr) ::= "(RetVal.Tree as I<ASTLabelType>)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+Input.TokenStream.ToString(
+  Input.TreeAdaptor.GetTokenStartIndex(RetVal.Start),
+  Input.TreeAdaptor.GetTokenStopIndex(RetVal.Start))
+<else>
+Input.ToString(RetVal.Start as IToken,Input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "RetVal.ST"
+
+lexerRulePropertyRef_text(scope,attr) ::= "Text"
+lexerRulePropertyRef_type(scope,attr) ::= "TokenType"
+lexerRulePropertyRef_line(scope,attr) ::= "State.TokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "State.TokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "Channel"
+lexerRulePropertyRef_start(scope,attr) ::= "State.TokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(CharIndex-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "StrToInt(<scope>.Text)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "RetVal.Tree := <expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "RetVal.ST := <expr>;"
+
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if (<actions.(actionScope).synpredgate>) then
+begin
+  <action>
+end;
+<else>
+if (State.Backtracking = 0) then
+begin
+  <action>
+end;<\n>
+<endif>
+<else>
+<action>
+<endif>
+>>
+
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+<name> := TBitSet.Create([<words64:{<it>};separator=",">]);<\n>
+>>
+
+bitsetDecl(name) ::= <<
+<name>: IBitSet;<\n>
+>>
+
+codeFileExtension() ::= ".pas"
+
+true() ::= "True"
+false() ::= "False"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Java/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/AST.stg
new file mode 100644
index 0000000..7956218
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/AST.stg
@@ -0,0 +1,416 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+@outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+import org.antlr.runtime.tree.*;<\n>
+<endif>
+>>
+
+@genericParser.members() ::= <<
+<@super.members()>
+	<parserMembers()>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+parserMembers() ::= <<
+protected TreeAdaptor adaptor = new CommonTreeAdaptor();
+
+public void setTreeAdaptor(TreeAdaptor adaptor) {
+	this.adaptor = adaptor;
+	<grammar.directDelegates:{g|<g:delegateName()>.setTreeAdaptor(this.adaptor);}>
+}
+public TreeAdaptor getTreeAdaptor() {
+	return adaptor;
+}
+>>
+
+@returnScope.ruleReturnMembers() ::= <<
+<ASTLabelType> tree;
+@Override
+public <ASTLabelType> getTree() { return tree; }
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<ASTLabelType> root_0 = null;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<if(!ruleDescriptor.isSynPred)>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
+  ruleDescriptor.wildcardTreeListLabels]:{it | <ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{it | <ASTLabelType> <it.label.text>_tree=null;}; separator="\n">
+<if(ruleDescriptor.supportsLabelOptimization)>
+<ruleDescriptor.allTokenRefsInRewrites
+	:{it | RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInRewrites
+	:{it | RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
+<else>
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+	:{it | RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>");}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+	:{it | RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>");}; separator="\n">
+<endif>
+<endif>
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+@alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode && !ruleDescriptor.isSynPred)>
+root_0 = (<ASTLabelType>)adaptor.nil();<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label, ...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<token>.add(<label>);<\n>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label, ...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule.name>.add(<label>.getTree());
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(label, {<label>.getTree()})>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>stream_<rule>.add(<label>.getTree());
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(label, {<label>.getTree()})>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	referencedWildcardLabels,
+	referencedWildcardListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+// AST REWRITE
+// elements: <referencedElementsDeep; separator=", ">
+// token labels: <referencedTokenLabels; separator=", ">
+// rule labels: <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels: <referencedRuleListLabels; separator=", ">
+// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {
+<endif>
+<prevRuleRootRef()>.tree = root_0;
+<rewriteCodeLabels()>
+root_0 = (<ASTLabelType>)adaptor.nil();
+<alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
+input.replaceChildren(adaptor.getParent(retval.start),
+					  adaptor.getChildIndex(retval.start),
+					  adaptor.getChildIndex(_last),
+					  retval.tree);
+<endif>
+<endif>
+<! if parser or tree-parser && rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.tree = root_0;
+<else>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.tree = root_0;
+<endif>
+<endif>
+<if(backtracking)>
+}
+<endif>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+	:{it | RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>",<it>);};
+	separator="\n"
+>
+<referencedTokenListLabels
+	:{it | RewriteRule<rewriteElementType>Stream stream_<it>=new RewriteRule<rewriteElementType>Stream(adaptor,"token <it>", list_<it>);};
+	separator="\n"
+>
+<referencedWildcardLabels
+	:{it | RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",<it>);};
+	separator="\n"
+>
+<referencedWildcardListLabels
+	:{it | RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"wildcard <it>",list_<it>);};
+	separator="\n"
+>
+<referencedRuleLabels
+	:{it | RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"rule <it>",<it>!=null?<it>.getTree():null);};
+	separator="\n"
+>
+<referencedRuleListLabels
+	:{it | RewriteRuleSubtreeStream stream_<it>=new RewriteRuleSubtreeStream(adaptor,"token <it>",list_<it>);};
+	separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if ( <referencedElementsDeep:{el | stream_<el>.hasNext()}; separator="||"> ) {
+	<alt>
+}
+<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | stream_<el>.hasNext()}; separator="||"> ) {
+	<alt>
+}
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+if ( !(<referencedElements:{el | stream_<el>.hasNext()}; separator="||">) ) {
+	throw new RewriteEarlyExitException();
+}
+while ( <referencedElements:{el | stream_<el>.hasNext()}; separator="||"> ) {
+	<alt>
+}
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>) {
+	<a.alt>
+}<\n>
+<else>
+{
+	<a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = null;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.nil();
+<root:rewriteElement()>
+<children:rewriteElement()>
+adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,args,terminalOptions={}) ::= <<
+adaptor.addChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>);
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode());
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,args,terminalOptions={}) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>);
+>>
+
+rewriteImaginaryTokenRef(args,token,elementIndex,terminalOptions={}) ::= <<
+adaptor.addChild(root_<treeLevel>, <createImaginaryNode(token,args,terminalOptions)>);
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,elementIndex,terminalOptions={}) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<createImaginaryNode(token,args,terminalOptions)>, root_<treeLevel>);
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<rule>.nextTree());
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>);
+>>
+
+rewriteNodeAction(action) ::= <<
+adaptor.addChild(root_<treeLevel>, <action>);
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<action>, root_<treeLevel>);
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>);
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree());
+>>
+
+
+createImaginaryNode(tokenType,args,terminalOptions={}) ::= <%
+<if(terminalOptions.node)>
+<! new MethodNode(IDLabel, args) !>
+new <terminalOptions.node>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+(<ASTLabelType>)adaptor.create(<tokenType>, <args; separator=", "><if(!args)>"<tokenType>"<endif>)
+<endif>
+%>
+
+createRewriteNodeFromElement(token,args,terminalOptions={}) ::= <%
+<if(terminalOptions.node)>
+new <terminalOptions.node>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+adaptor.create(<token>, <args; separator=", ">)
+<else>
+stream_<token>.nextNode()
+<endif>
+<endif>
+%>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTDbg.stg
new file mode 100644
index 0000000..6ff493c
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTDbg.stg
@@ -0,0 +1,85 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
+ *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
+ */
+
+parserMembers() ::= <<
+	protected DebugTreeAdaptor adaptor;
+	public void setTreeAdaptor(TreeAdaptor adaptor) {
+<if(grammar.grammarIsRoot)>
+		this.adaptor = new DebugTreeAdaptor(dbg,adaptor);
+<else>
+		this.adaptor = (DebugTreeAdaptor)adaptor; // delegator sends dbg adaptor
+<endif>
+		<grammar.directDelegates:{g|<g:delegateName()>.setTreeAdaptor(this.adaptor);}>
+	}
+	public TreeAdaptor getTreeAdaptor() {
+		return adaptor;
+	}
+>>
+
+parserCtorBody() ::= <<
+<super.parserCtorBody()>
+>>
+
+createListenerAndHandshake() ::= <<
+DebugEventSocketProxy proxy =
+	new DebugEventSocketProxy(this,port,<if(TREE_PARSER)>input.getTreeAdaptor()<else>adaptor<endif>);
+setDebugListener(proxy);
+set<inputStreamType>(new Debug<inputStreamType>(input,proxy));
+try {
+	proxy.handshake();
+}
+catch (IOException ioe) {
+	reportError(ioe);
+}
+>>
+
+@ctorForRootGrammar.finally() ::= <<
+TreeAdaptor adap = new CommonTreeAdaptor();
+setTreeAdaptor(adap);
+proxy.setTreeAdaptor(adap);
+>>
+
+@ctorForProfilingRootGrammar.finally() ::=<<
+TreeAdaptor adap = new CommonTreeAdaptor();
+setTreeAdaptor(adap);
+>>
+
+@ctorForPredefinedListener.superClassRef() ::= "super(input, dbg);"
+
+@ctorForPredefinedListener.finally() ::=<<
+<if(grammar.grammarIsRoot)> <! don't create new adaptor for delegates !>
+TreeAdaptor adap = new CommonTreeAdaptor();
+setTreeAdaptor(adap);
+<endif>
+>>
+
+@rewriteElement.pregen() ::= "dbg.location(<e.line>,<e.pos>);"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTParser.stg
new file mode 100644
index 0000000..7df6e42
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTParser.stg
@@ -0,0 +1,199 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+
+@rule.setErrorReturnValue() ::= <<
+retval.tree = (<ASTLabelType>)adaptor.errorNode(input, retval.start, input.LT(-1), re);<!
+ System.out.println("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.tokenRef(...)>
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex,terminalOptions={}) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.tokenRef(...)>
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label, ...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label, ...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label, ...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= <%
+<super.matchSet(postmatchCode={<if(!ruleDescriptor.isSynPred)><if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <createNodeFromToken(...)>);<endif>}, ...)>
+%>
+
+matchRuleBlockSet(s,label,elementIndex,postmatchCode,treeLevel="0",terminalOptions={}) ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,elementIndex,debug,terminalOptions={}) ::= <<
+<if(label)>
+<label>=<castToLabelType("input.LT(1)")>;
+<endif>
+<super.matchSet(postmatchCode={<if(!ruleDescriptor.isSynPred)><if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = (<ASTLabelType>)adaptor.becomeRoot(<createNodeFromToken(...)>, root_0);<endif>},...)>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>adaptor.addChild(root_0, <label>.getTree());
+<endif>
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>.getTree(), root_0);
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(label, {<label>.getTree()})>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(label, {<label>.getTree()})>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(label, {<label>.getTree()})>
+>>
+
+// WILDCARD AST
+
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.wildcard(...)>
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
+adaptor.addChild(root_0, <label>_tree);
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(token=[],...)>"
+
+wildcardRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.wildcard(...)>
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.create(<label>);
+root_0 = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_0);
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+createNodeFromToken(label,terminalOptions={}) ::= <%
+<if(terminalOptions.node)>
+new <terminalOptions.node>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+(<ASTLabelType>)adaptor.create(<label>)
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+retval.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
+adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop);
+<if(backtracking)>}<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTTreeParser.stg
new file mode 100644
index 0000000..cbc26ea
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/ASTTreeParser.stg
@@ -0,0 +1,373 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+<if(!ruleDescriptor.isSynPred)>
+<ASTLabelType> _first_0 = null;
+<ASTLabelType> _last = null;<\n>
+<endif>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel=false, treeLevel=false) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(rewriteMode)>
+retval.tree = _first_0;
+if ( adaptor.getParent(retval.tree)!=null && adaptor.isNil( adaptor.getParent(retval.tree) ) )
+	retval.tree = (<ASTLabelType>)adaptor.getParent(retval.tree);
+<endif>
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+	 enclosingTreeLevel, treeLevel) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+{
+<ASTLabelType> _save_last_<treeLevel> = _last;
+<ASTLabelType> _first_<treeLevel> = null;
+<if(!rewriteMode)>
+<ASTLabelType> root_<treeLevel> = (<ASTLabelType>)adaptor.nil();
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+<if(root.el.rule)>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = (<ASTLabelType>)<root.el.label>.getTree();
+<elseif(root.el.label)>
+if ( _first_<enclosingTreeLevel>==null ) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==Token.DOWN ) {
+	match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+	<children:element()>
+	match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>);
+<endif>
+_last = _save_last_<treeLevel>;
+}<\n>
+<else>
+<super.tree(...)>
+<endif>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<else>
+<super.tokenRefBang(...)>
+<endif>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+<else>
+<super.tokenRef(...)>
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+<else>
+<super.tokenRefAndListLabel(...)>
+<endif>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>}<endif>
+<endif>
+<else>
+<super.tokenRefRuleRoot(...)>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+<else>
+<super.tokenRefRuleRootAndListLabel(...)>
+<endif>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType>)adaptor.dupTree(<label>);
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = <label>;
+<endif>
+<else>
+<super.wildcard(...)>
+<endif>
+>>
+
+// SET AST
+
+matchSet(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+adaptor.addChild(root_<treeLevel>, <label>_tree);
+<if(backtracking)>\}<endif>
+<endif>
+}, ...
+)>
+<else>
+<super.matchSet(...)>
+<endif>
+>>
+
+matchRuleBlockSet(s,label,elementIndex,postmatchCode,treeLevel="0",terminalOptions={}) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<matchSet(...)>
+<noRewrite(...)> <! set return tree !>
+<else>
+<super.matchRuleBlockSet(...)>
+<endif>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.matchSet(...)>
+<else>
+<super.matchSetBang(...)>
+<endif>
+>>
+
+matchSetRuleRoot(s,label,elementIndex,debug,terminalOptions={}) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = new <terminalOptions.node>(<label>);
+<else>
+<label>_tree = (<ASTLabelType>)adaptor.dupNode(<label>);
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>_tree, root_<treeLevel>);
+<if(backtracking)>\}<endif>
+<endif>
+}, ...
+)>
+<else>
+<super.matchSetRuleRoot(...)>
+<endif>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
+<if(!rewriteMode)>
+adaptor.addChild(root_<treeLevel>, <label>.getTree());
+<else> <! rewrite mode !>
+if ( _first_<treeLevel>==null ) _first_<treeLevel> = (<ASTLabelType>)<label>.getTree();
+<endif>
+<else>
+<super.ruleRef(...)>
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<ruleRef(...)>
+<listLabel(label, {<label>.getTree()})>
+<else>
+<super.ruleRefAndListLabel(...)>
+<endif>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>root_<treeLevel> = (<ASTLabelType>)adaptor.becomeRoot(<label>.getTree(), root_<treeLevel>);
+<endif>
+<else>
+<super.ruleRefRuleRoot(...)>
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<ruleRefRuleRoot(...)>
+<listLabel(label, {<label>.getTree()})>
+<else>
+<super.ruleRefRuleRootAndListLabel(...)>
+<endif>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefTrackAndListLabel(...)>
+<else>
+<super.ruleRefTrackAndListLabel(...)>
+<endif>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRootTrack(...)>
+<else>
+<super.ruleRefRootTrack(...)>
+<endif>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+_last = (<ASTLabelType>)input.LT(1);
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+<else>
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+<endif>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,args,terminalOptions={}) ::= <<
+<if(terminalOptions.node)>
+new <terminalOptions.node>(stream_<token>.nextNode())
+<else>
+stream_<token>.nextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!ruleDescriptor.isSynPred)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = (<ASTLabelType>)adaptor.rulePostProcessing(root_0);
+<if(backtracking)>}<endif>
+<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Java/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/Dbg.stg
new file mode 100644
index 0000000..e422ed8
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/Dbg.stg
@@ -0,0 +1,264 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template overrides to add debugging to normal Java output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+@outputFile.imports() ::= <<
+<@super.imports()>
+import org.antlr.runtime.debug.*;
+import java.io.IOException;
+>>
+
+@genericParser.members() ::= <<
+<if(grammar.grammarIsRoot)>
+	public static final String[] ruleNames = new String[] {
+		"invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n\t\t", separator=", ">
+	};<\n>
+<endif>
+	public static final boolean[] decisionCanBacktrack = new boolean[] {
+		false, // invalid decision
+		<grammar.decisions:{d | <d.dfa.hasSynPred; null="false">}; wrap="\n    ", separator=", ">
+	};<\n>
+<if(grammar.grammarIsRoot)> <! grammar imports other grammar(s) !>
+	public int ruleLevel = 0;
+	public int getRuleLevel() { return ruleLevel; }
+	public void incRuleLevel() { ruleLevel++; }
+	public void decRuleLevel() { ruleLevel--; }
+<if(profile)>
+	<ctorForProfilingRootGrammar()>
+<else>
+	<ctorForRootGrammar()>
+<endif>
+	<ctorForPredefinedListener()>
+<else><! imported grammar !>
+	public int getRuleLevel() { return <grammar.delegators:{g| <g:delegateName()>}>.getRuleLevel(); }
+	public void incRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.incRuleLevel(); }
+	public void decRuleLevel() { <grammar.delegators:{g| <g:delegateName()>}>.decRuleLevel(); }
+	<ctorForDelegateGrammar()>
+<endif>
+<if(profile)>
+	public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+		int stopIndex = getRuleMemoization(ruleIndex, input.index());
+		((Profiler)dbg).examineRuleMemoization(input, ruleIndex, stopIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+		return super.alreadyParsedRule(input, ruleIndex);
+	}
+
+	@Override
+	public void memoize(IntStream input,
+						int ruleIndex,
+						int ruleStartIndex)
+	{
+		((Profiler)dbg).memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+		super.memoize(input, ruleIndex, ruleStartIndex);
+	}<\n>
+<endif>
+	protected boolean evalPredicate(boolean result, String predicate) {
+		dbg.semanticPredicate(result, predicate);
+		return result;
+	}<\n>
+>>
+
+ctorForRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+<! Same except we add port number and profile stuff if root grammar !>
+public <name>(<inputStreamType> input) {
+	this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT, new RecognizerSharedState());
+}
+public <name>(<inputStreamType> input, int port, RecognizerSharedState state) {
+	super(input, state);
+	<parserCtorBody()>
+	<createListenerAndHandshake()>
+	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+	<@finally()>
+}<\n>
+>>
+
+ctorForProfilingRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+public <name>(<inputStreamType> input) {
+	this(input, new Profiler(null), new RecognizerSharedState());
+}
+public <name>(<inputStreamType> input, DebugEventListener dbg, RecognizerSharedState state) {
+	super(input, dbg, state);
+	Profiler p = (Profiler)dbg;
+	p.setParser(this);
+	<parserCtorBody()>
+	<grammar.directDelegates:
+		{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+	<@finally()>
+}
+<\n>
+>>
+
+/** Basically we don't want to set any dbg listeners are root will have it. */
+ctorForDelegateGrammar() ::= <<
+public <name>(<inputStreamType> input, DebugEventListener dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+	super(input, dbg, state);
+	<parserCtorBody()>
+	<grammar.directDelegates:
+		{g|<g:delegateName()> = new <g.recognizerName>(input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+}<\n>
+>>
+
+ctorForPredefinedListener() ::= <<
+public <name>(<inputStreamType> input, DebugEventListener dbg) {
+	<@superClassRef>super(input, dbg, new RecognizerSharedState());<@end>
+<if(profile)>
+	Profiler p = (Profiler)dbg;
+	p.setParser(this);
+<endif>
+	<parserCtorBody()>
+	<grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+	<@finally()>
+}<\n>
+>>
+
+createListenerAndHandshake() ::= <<
+<if(TREE_PARSER)>
+DebugEventSocketProxy proxy =
+	new DebugEventSocketProxy(this, port, input.getTreeAdaptor());<\n>
+<else>
+DebugEventSocketProxy proxy =
+	new DebugEventSocketProxy(this, port, null);<\n>
+<endif>
+setDebugListener(proxy);
+try {
+	proxy.handshake();
+}
+catch (IOException ioe) {
+	reportError(ioe);
+}
+>>
+
+@genericParser.superClassName() ::= "Debug<@super.superClassName()>"
+
+@rule.preamble() ::= <<
+try { dbg.enterRule(getGrammarFileName(), "<ruleName>");
+if ( getRuleLevel()==0 ) {dbg.commence();}
+incRuleLevel();
+dbg.location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.charPositionInLine>);<\n>
+>>
+
+@rule.postamble() ::= <<
+dbg.location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>);<\n>
+}
+finally {
+	dbg.exitRule(getGrammarFileName(), "<ruleName>");
+	decRuleLevel();
+	if ( getRuleLevel()==0 ) {dbg.terminate();}
+}<\n>
+>>
+
+@synpred.start() ::= "dbg.beginBacktrack(state.backtracking);"
+
+@synpred.stop() ::= "dbg.endBacktrack(state.backtracking, success);"
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::=
+	"try { dbg.enterSubRule(<decisionNumber>);<\n>"
+
+exitSubRule() ::=
+	"} finally {dbg.exitSubRule(<decisionNumber>);}<\n>"
+
+enterDecision() ::=
+	"try { dbg.enterDecision(<decisionNumber>, decisionCanBacktrack[<decisionNumber>]);<\n>"
+
+exitDecision() ::=
+	"} finally {dbg.exitDecision(<decisionNumber>);}<\n>"
+
+enterAlt(n) ::= "dbg.enterAlt(<n>);<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+@block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+@block.postdecision() ::= "<exitDecision()>"
+
+@block.postbranch() ::= "<exitSubRule()>"
+
+@ruleBlock.predecision() ::= "<enterDecision()>"
+
+@ruleBlock.postdecision() ::= "<exitDecision()>"
+
+@ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+@blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+@positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+@positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+@positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+@positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+@positiveClosureBlock.earlyExitException() ::=
+	"dbg.recognitionException(eee);<\n>"
+
+@closureBlock.preloop() ::= "<enterSubRule()>"
+
+@closureBlock.postloop() ::= "<exitSubRule()>"
+
+@closureBlock.predecision() ::= "<enterDecision()>"
+
+@closureBlock.postdecision() ::= "<exitDecision()>"
+
+@altSwitchCase.prealt() ::= "<enterAlt(altNum)>" // altNum is arg of altSwitchCase
+
+@element.prematch() ::=
+	"dbg.location(<e.line>,<e.pos>);" // e is arg of element
+
+@matchSet.mismatchedSetException() ::=
+	"dbg.recognitionException(mse);"
+
+@dfaState.noViableAltException() ::= "dbg.recognitionException(nvae);"
+
+@dfaStateSwitch.noViableAltException() ::= "dbg.recognitionException(nvae);"
+
+dfaDecision(decisionNumber,description) ::= <<
+try {
+	isCyclicDecision = true;
+	<super.dfaDecision(...)>
+}
+catch (NoViableAltException nvae) {
+	dbg.recognitionException(nvae);
+	throw nvae;
+}
+>>
+
+@cyclicDFA.errorMethod() ::= <<
+public void error(NoViableAltException nvae) {
+	dbg.recognitionException(nvae);
+}
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+evalPredicate(<pred>,"<description>")
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Java/Java.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/Java.stg
new file mode 100644
index 0000000..c4cc8d4
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/Java.stg
@@ -0,0 +1,1482 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2010 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+javaTypeInitMap ::= [
+	"int":"0",
+	"long":"0",
+	"float":"0.0f",
+	"double":"0.0",
+	"boolean":"false",
+	"byte":"0",
+	"short":"0",
+	"char":"0",
+	default:"null" // anything other than an atomic type
+]
+
+// System.Boolean.ToString() returns "True" and "False", but the proper C# literals are "true" and "false"
+// The Java version of Boolean returns "true" and "false", so they map to themselves here.
+booleanLiteral ::= [
+	"True":"true",
+	"False":"false",
+	"true":"true",
+	"false":"false",
+	default:"false"
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass, literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+<actions.(actionScope).header>
+
+<@imports>
+import org.antlr.runtime.*;
+<if(TREE_PARSER)>
+import org.antlr.runtime.tree.*;
+<endif>
+import java.util.Stack;
+import java.util.List;
+import java.util.ArrayList;
+<if(backtracking)>
+import java.util.Map;
+import java.util.HashMap;
+<endif>
+<@end>
+
+<docComment>
+@SuppressWarnings("all")
+<recognizer>
+
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, filterMode, labelType="CommonToken",
+      superClass="Lexer") ::= <<
+public class <grammar.recognizerName> extends <@superClassName><superClass><@end> {
+	<tokens:{it | public static final int <it.name>=<it.type>;}; separator="\n">
+	<scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScope(it)><endif>}>
+	<actions.lexer.members>
+
+	// delegates
+	<grammar.delegates:
+		{g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+	// delegators
+	<grammar.delegators:
+		{g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+	<last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+	public <superClass>[] getDelegates() {
+		return new <superClass>[] {<grammar.delegates: {g|<g:delegateName()>}; separator = ", ">};
+	}
+
+	public <grammar.recognizerName>() {} <! needed by subclasses !>
+	public <grammar.recognizerName>(CharStream input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+		this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>);
+	}
+	public <grammar.recognizerName>(CharStream input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+		super(input,state);
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+		state.ruleMemo = new HashMap[<numRules>+1];<\n><! index from 1..n !>
+<endif>
+<endif>
+		<grammar.directDelegates:
+			{g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+		<grammar.delegators:
+			{g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+		<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+	}
+	@Override public String getGrammarFileName() { return "<fileName>"; }
+
+<if(filterMode)>
+	<filteringNextToken()>
+<endif>
+	<rules; separator="\n\n">
+
+	<synpreds:{p | <lexerSynpred(p)>}>
+
+	<cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
+	<cyclicDFAs:cyclicDFA(); separator="\n\n"><! dump tables for all DFA !>
+
+}
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+@Override
+public Token nextToken() {
+	while (true) {
+		if ( input.LA(1)==CharStream.EOF ) {
+			Token eof = new CommonToken(input,Token.EOF,
+										Token.DEFAULT_CHANNEL,
+										input.index(),input.index());
+			eof.setLine(getLine());
+			eof.setCharPositionInLine(getCharPositionInLine());
+			return eof;
+		}
+		state.token = null;
+	state.channel = Token.DEFAULT_CHANNEL;
+		state.tokenStartCharIndex = input.index();
+		state.tokenStartCharPositionInLine = input.getCharPositionInLine();
+		state.tokenStartLine = input.getLine();
+	state.text = null;
+		try {
+			int m = input.mark();
+			state.backtracking=1; <! means we won't throw slow exception !>
+			state.failed=false;
+			mTokens();
+			state.backtracking=0;
+			<! mTokens backtracks with synpred at backtracking==2
+				and we set the synpredgate to allow actions at level 1. !>
+			if ( state.failed ) {
+				input.rewind(m);
+				input.consume(); <! advance one char and try again !>
+			}
+			else {
+				emit();
+				return state.token;
+			}
+		}
+		catch (RecognitionException re) {
+			// shouldn't happen in backtracking mode, but...
+			reportError(re);
+			recover(re);
+		}
+	}
+}
+
+@Override
+public void memoize(IntStream input,
+		int ruleIndex,
+		int ruleStartIndex)
+{
+if ( state.backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
+}
+
+@Override
+public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+if ( state.backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
+return false;
+}
+>>
+
+actionGate() ::= "state.backtracking==0"
+
+filteringActionGate() ::= "state.backtracking==1"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass,
+              labelType, members, rewriteElementType,
+              filterMode, ASTLabelType="Object") ::= <<
+public class <grammar.recognizerName> extends <@superClassName><superClass><@end> {
+<if(grammar.grammarIsRoot)>
+	public static final String[] tokenNames = new String[] {
+		"\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ", wrap="\n\t\t">
+	};
+<endif>
+	<tokens:{it |public static final int <it.name>=<it.type>;}; separator="\n">
+
+	// delegates
+	<grammar.delegates: {g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+	public <superClass>[] getDelegates() {
+		return new <superClass>[] {<grammar.delegates: {g|<g:delegateName()>}; separator = ", ">};
+	}
+
+	// delegators
+	<grammar.delegators:
+		{g|public <g.recognizerName> <g:delegateName()>;}; separator="\n">
+	<last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+
+	<scopes:{it |<if(it.isDynamicGlobalScope)><globalAttributeScope(it)><endif>}>
+
+<@members>
+	<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+	public <grammar.recognizerName>(<inputStreamType> input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+		this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>);
+	}
+	public <grammar.recognizerName>(<inputStreamType> input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+		super(input, state);
+		<parserCtorBody()>
+		<grammar.directDelegates:
+			{g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+		<grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+		<last(grammar.delegators):{g|gParent = <g:delegateName()>;}>
+	}
+<@end>
+
+	@Override public String[] getTokenNames() { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; }
+	@Override public String getGrammarFileName() { return "<fileName>"; }
+
+	<members>
+
+	<rules; separator="\n\n">
+
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+	// Delegated rules
+<grammar.delegatedRules:{ruleDescriptor|
+	public <returnType(ruleDescriptor)> <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope()>) throws <ruleDescriptor.throwsSpec; separator=", "> { <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<if(ruleDescriptor.parameterScope)><ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", "><endif>); \}}; separator="\n">
+
+	<synpreds:{p | <synpred(p)>}>
+
+	<cyclicDFAs:{dfa | protected DFA<dfa.decisionNumber> dfa<dfa.decisionNumber> = new DFA<dfa.decisionNumber>(this);}; separator="\n">
+	<cyclicDFAs:cyclicDFA(); separator="\n\n"><! dump tables for all DFA !>
+
+	<bitsets:{it | <bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+							words64=it.bits)>}; separator="\n">
+}
+>>
+
+parserCtorBody() ::= <<
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = new HashMap[<length(grammar.allImportedRules)>+1];<\n><! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+	{g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
+       ASTLabelType="Object", superClass="Parser", labelType="Token",
+       members={<actions.parser.members>}) ::= <<
+<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, "TokenStream", superClass,
+              labelType, members, "Token",
+              false, ASTLabelType)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
+           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="Object",
+           superClass={<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif>},
+           members={<actions.treeparser.members>}
+           ) ::= <<
+<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, "TreeNodeStream", superClass,
+              labelType, members, "Node",
+              filterMode, ASTLabelType)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start <ruleName>
+public final void <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope()>) throws <ruleDescriptor.throwsSpec:{x|<x>}; separator=", "> {
+	<ruleLabelDefs()>
+<if(trace)>
+	traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+	try {
+		<block>
+	}
+	finally {
+		traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+	}
+<else>
+	<block>
+<endif>
+}
+// $ANTLR end <ruleName>
+>>
+
+synpred(name) ::= <<
+public final boolean <name>() {
+	state.backtracking++;
+	<@start()>
+	int start = input.mark();
+	try {
+		<name>_fragment(); // can never throw exception
+	} catch (RecognitionException re) {
+		System.err.println("impossible: "+re);
+	}
+	boolean success = !state.failed;
+	input.rewind(start);
+	<@stop()>
+	state.backtracking--;
+	state.failed=false;
+	return success;
+}<\n>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( state.backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { <returnStatement(({<ruleReturnValue()>}))> }
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.failed) <returnStatement(({<ruleReturnValue()>}))><endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.backtracking>0) {state.failed=true; <returnStatement(({<ruleReturnValue()>}))>}<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// $ANTLR start "<ruleName>"
+// <fileName>:<description>
+<if(isPredefinedRewriteRule.(ruleName) && filterMode && buildAST)>
+@Override
+<endif>
+public final <returnType(ruleDescriptor)> <ruleName>(<ruleDescriptor.parameterScope:parameterScope()>) throws <ruleDescriptor.throwsSpec:{x|<x>}; separator=", "> {
+	<if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+	<ruleScopeSetUp()>
+	<ruleDeclarations()>
+	<ruleLabelDefs()>
+	<ruleDescriptor.actions.init>
+	<@preamble()>
+	try {
+		<ruleMemoization(name=ruleName)>
+		<block>
+		<ruleCleanUp()>
+		<(ruleDescriptor.actions.after):execAction()>
+	}
+<if(exceptions)>
+	<exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+	<actions.(actionScope).rulecatch>
+<else>
+	catch (RecognitionException re) {
+		reportError(re);
+		recover(input,re);
+		<@setErrorReturnValue()>
+	}
+<endif>
+<endif>
+<endif>
+	finally {
+		// do for sure before leaving
+		<if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+		<memoize()>
+		<ruleScopeCleanUp()>
+		<finally>
+	}
+	<@postamble()>
+	<returnStatement(({<ruleReturnValue()>}), false)>
+}
+// $ANTLR end "<ruleName>"
+>>
+
+returnStatement(returnValue, force=true) ::= <%
+<if(!isEmptyString.(returnValue))>
+	return <returnValue>;
+<elseif(force)>
+	return;
+<endif>
+%>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) {
+	<e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType(ruleDescriptor)> retval = new <returnType(ruleDescriptor)>();
+retval.start = input.LT(1);
+<elseif(ruleDescriptor.returnScope)>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+int <ruleDescriptor.name>_StartIndex = input.index();
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{it |<it>_stack.push(new <it>_scope());}; separator="\n">
+<ruleDescriptor.ruleScope:{it |<it.name>_stack.push(new <it.name>_scope());}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{it |<it>_stack.pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{it |<it.name>_stack.pop();}; separator="\n">
+>>
+
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+	:{it |<labelType> <it.label.text>=null;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
+	:{it |List\<Object> list_<it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|RuleReturnScope <ll.label.text> = null;}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+	:{it |<labelType> <it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{it |int <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels]
+	:{it |List\<Object> list_<it.label.text>=null;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <%
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.stop = input.LT(-1);
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( state.backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start "<ruleName>"
+public final void m<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>) throws RecognitionException {
+	<if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+	<ruleScopeSetUp()>
+	<ruleDeclarations()>
+	try {
+<if(nakedBlock)>
+		<ruleMemoization(name=ruleName)>
+		<lexerRuleLabelDefs()>
+		<ruleDescriptor.actions.init>
+		<block>
+<else>
+		int _type = <ruleName>;
+		int _channel = DEFAULT_TOKEN_CHANNEL;
+		<ruleMemoization(name=ruleName)>
+		<lexerRuleLabelDefs()>
+		<ruleDescriptor.actions.init>
+		<block>
+		<ruleCleanUp()>
+		state.type = _type;
+		state.channel = _channel;
+		<(ruleDescriptor.actions.after):execAction()>
+<endif>
+	}
+	finally {
+		// do for sure before leaving
+		<if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+		<ruleScopeCleanUp()>
+		<memoize()>
+	}
+}
+// $ANTLR end "<ruleName>"
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+@Override
+public void mTokens() throws RecognitionException {
+	<block>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>) {
+	<alts:{a | <altSwitchCase(i,a)>}>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch (alt<decisionNumber>) {
+	<alts:{a | <altSwitchCase(i,a)>}>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+int cnt<decisionNumber>=0;
+<decls>
+<@preloop()>
+loop<decisionNumber>:
+while (true) {
+	int alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	<decision>
+	<@postdecision()>
+	switch (alt<decisionNumber>) {
+	<alts:{a | <altSwitchCase(i,a)>}>
+	default :
+		if ( cnt<decisionNumber> >= 1 ) break loop<decisionNumber>;
+		<ruleBacktrackFailure()>
+		EarlyExitException eee = new EarlyExitException(<decisionNumber>, input);
+		<@earlyExitException()>
+		throw eee;
+	}
+	cnt<decisionNumber>++;
+}
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+loop<decisionNumber>:
+while (true) {
+	int alt<decisionNumber>=<maxAlt>;
+	<@predecision()>
+	<decision>
+	<@postdecision()>
+	switch (alt<decisionNumber>) {
+	<alts:{a | <altSwitchCase(i,a)>}>
+	default :
+		break loop<decisionNumber>;
+	}
+}
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase(altNum,alt) ::= <<
+case <altNum> :
+	<@prealt()>
+	<alt>
+	break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+{
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+}
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element(e) ::= <<
+<@prematch()>
+<e.el>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(label)><label>=(<labelType>)<endif>match(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(token,label,elementIndex,terminalOptions)>
+<listLabel(label, label)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label>==null) list_<label>=new ArrayList\<Object>();
+list_<label>.add(<elem>);
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = input.LA(1);
+<endif>
+match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = input.LA(1);
+<endif>
+matchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="",terminalOptions={}) ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= input.LA(1);
+<else>
+<label>=<castToLabelType("input.LT(1)")>;
+<endif>
+<endif>
+if ( <s> ) {
+	input.consume();
+	<postmatchCode>
+<if(!LEXER)>
+	state.errorRecovery=false;
+<endif>
+	<if(backtracking)>state.failed=false;<endif>
+}
+else {
+	<ruleBacktrackFailure()>
+	MismatchedSetException mse = new MismatchedSetException(null,input);
+	<@mismatchedSetException()>
+<if(LEXER)>
+	recover(mse);
+	throw mse;
+<else>
+	throw mse;
+	<! use following code to make it recover inline; remove throw mse;
+	recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>);
+	!>
+<endif>
+}
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(label, label)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex="0") ::= <<
+<if(label)>
+int <label>Start = getCharIndex();
+match(<string>); <checkRuleBacktrackFailure()>
+int <label>StartLine<elementIndex> = getLine();
+int <label>StartCharPos<elementIndex> = getCharPositionInLine();
+<label> = new <labelType>(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, getCharIndex()-1);
+<label>.setLine(<label>StartLine<elementIndex>);
+<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>);
+<else>
+match(<string>); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(label)>
+<label>=<castToLabelType("input.LT(1)")>;
+<endif>
+matchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<wildcard(...)>
+<listLabel(label, label)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = input.LA(1);
+<endif>
+matchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(label, elementIndex)>
+<listLabel(label, label)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+pushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);
+state._fsp--;
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(rule,label,elementIndex,args,scope)>
+<listLabel(label, label)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = getCharIndex();
+int <label>StartLine<elementIndex> = getLine();
+int <label>StartCharPos<elementIndex> = getCharPositionInLine();
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = new <labelType>(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
+<label>.setLine(<label>StartLine<elementIndex>);
+<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>);
+<else>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(rule,label,args,elementIndex,scope)>
+<listLabel(label, label)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = getCharIndex();
+int <label>StartLine<elementIndex> = getLine();
+int <label>StartCharPos<elementIndex> = getCharPositionInLine();
+match(EOF); <checkRuleBacktrackFailure()>
+<labelType> <label> = new <labelType>(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
+<label>.setLine(<label>StartLine<elementIndex>);
+<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>);
+<else>
+match(EOF); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "int <recRuleArg()>"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
+recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==Token.DOWN ) {
+	match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+	<children:element()>
+	match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(pred,description)>) ) {
+	<ruleBacktrackFailure()>
+	throw new FailedPredicateException(input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);
+<edges; separator="\nelse ">
+<if((!isTrue.(last(edges).labelExpr)) && (!last(edges).predicates))>
+else {
+<if(eotPredictsAlt)>
+	alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+	<ruleBacktrackFailure()>
+	<(nvaExceptionWrapperMap.(k))({NoViableAltException nvae =
+	new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);
+<@noViableAltException()>
+throw nvae;})>
+<endif>
+}
+<endif>
+>>
+
+nvaExceptionWrapperMap ::= [
+	"1":"wrapNvaExceptionForK1",
+	"2":"wrapNvaExceptionForK2",
+	default:"wrapNvaExceptionForKN"
+]
+
+wrapNvaExceptionForK1(exceptionCode) ::= <<
+<exceptionCode>
+>>
+
+wrapNvaExceptionForK2(exceptionCode) ::= <<
+int nvaeMark = input.mark();
+try {
+	input.consume();
+	<exceptionCode>
+} finally {
+	input.rewind(nvaeMark);
+}
+>>
+
+wrapNvaExceptionForKN(exceptionCode) ::= <<
+int nvaeMark = input.mark();
+try {
+	for (int nvaeConsume = 0; nvaeConsume \< <k> - 1; nvaeConsume++) {
+		input.consume();
+	}
+	<exceptionCode>
+} finally {
+	input.rewind(nvaeMark);
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(<k>);
+<edges; separator="\nelse ">
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else {
+	alt<decisionNumber>=<eotPredictsAlt>;
+}
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
+	<targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+<edges; separator="\n">
+default:
+<if(eotPredictsAlt)>
+	alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+	<ruleBacktrackFailure()>
+	<(nvaExceptionWrapperMap.(k))({NoViableAltException nvae =
+	new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input);
+<@noViableAltException()>
+throw nvae;})>
+<endif>
+}
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+	<edges; separator="\n">
+}
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( input.LA(<k>) ) {
+<edges; separator="\n">
+<if(eotPredictsAlt)>
+default:
+	alt<decisionNumber>=<eotPredictsAlt>;
+	break;
+<endif>
+}
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{it |case <it>:}; separator="\n">
+	{
+	<targetState>
+	}
+	break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = dfa<decisionNumber>.predict(input);
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+static final String DFA<dfa.decisionNumber>_eotS =
+	"<dfa.javaCompressedEOT; wrap="\"+\n\t\"">";
+static final String DFA<dfa.decisionNumber>_eofS =
+	"<dfa.javaCompressedEOF; wrap="\"+\n\t\"">";
+static final String DFA<dfa.decisionNumber>_minS =
+	"<dfa.javaCompressedMin; wrap="\"+\n\t\"">";
+static final String DFA<dfa.decisionNumber>_maxS =
+	"<dfa.javaCompressedMax; wrap="\"+\n\t\"">";
+static final String DFA<dfa.decisionNumber>_acceptS =
+	"<dfa.javaCompressedAccept; wrap="\"+\n\t\"">";
+static final String DFA<dfa.decisionNumber>_specialS =
+	"<dfa.javaCompressedSpecial; wrap="\"+\n\t\"">}>";
+static final String[] DFA<dfa.decisionNumber>_transitionS = {
+		<dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
+};
+
+static final short[] DFA<dfa.decisionNumber>_eot = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eotS);
+static final short[] DFA<dfa.decisionNumber>_eof = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eofS);
+static final char[] DFA<dfa.decisionNumber>_min = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS);
+static final char[] DFA<dfa.decisionNumber>_max = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS);
+static final short[] DFA<dfa.decisionNumber>_accept = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_acceptS);
+static final short[] DFA<dfa.decisionNumber>_special = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_specialS);
+static final short[][] DFA<dfa.decisionNumber>_transition;
+
+static {
+	int numStates = DFA<dfa.decisionNumber>_transitionS.length;
+	DFA<dfa.decisionNumber>_transition = new short[numStates][];
+	for (int i=0; i\<numStates; i++) {
+		DFA<dfa.decisionNumber>_transition[i] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_transitionS[i]);
+	}
+}
+
+protected class DFA<dfa.decisionNumber> extends DFA {
+
+	public DFA<dfa.decisionNumber>(BaseRecognizer recognizer) {
+		this.recognizer = recognizer;
+		this.decisionNumber = <dfa.decisionNumber>;
+		this.eot = DFA<dfa.decisionNumber>_eot;
+		this.eof = DFA<dfa.decisionNumber>_eof;
+		this.min = DFA<dfa.decisionNumber>_min;
+		this.max = DFA<dfa.decisionNumber>_max;
+		this.accept = DFA<dfa.decisionNumber>_accept;
+		this.special = DFA<dfa.decisionNumber>_special;
+		this.transition = DFA<dfa.decisionNumber>_transition;
+	}
+	@Override
+	public String getDescription() {
+		return "<dfa.description>";
+	}
+	<@errorMethod()>
+<if(dfa.specialStateSTs)>
+	@Override
+	public int specialStateTransition(int s, IntStream _input) throws NoViableAltException {
+		<if(LEXER)>
+		IntStream input = _input;
+		<endif>
+		<if(PARSER)>
+		TokenStream input = (TokenStream)_input;
+		<endif>
+		<if(TREE_PARSER)>
+		TreeNodeStream input = (TreeNodeStream)_input;
+		<endif>
+		int _s = s;
+		switch ( s ) {
+		<dfa.specialStateSTs:{state |
+		case <i0> : <! compressed special state numbers 0..n-1 !>
+			<state>}; separator="\n">
+		}
+<if(backtracking)>
+		if (state.backtracking>0) {state.failed=true; return -1;}
+<endif>
+		NoViableAltException nvae =
+			new NoViableAltException(getDescription(), <dfa.decisionNumber>, _s, input);
+		error(nvae);
+		throw nvae;
+	}
+<endif>
+}
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+int LA<decisionNumber>_<stateNumber> = input.LA(1);
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+int index<decisionNumber>_<stateNumber> = input.index();
+input.rewind();
+<endif>
+s = -1;
+<edges; separator="\nelse ">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.seek(index<decisionNumber>_<stateNumber>);
+<endif>
+if ( s>=0 ) return s;
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left>&&<right>)"
+
+orPredicates(operands) ::= "(<operands; separator=\"||\">)"
+
+notPredicate(pred) ::= "!(<evalPredicate(pred,{})>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
+(LA<decisionNumber>_<stateNumber> >= <lower> && LA<decisionNumber>_<stateNumber> \<= <upper>)
+%>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>) >= <lower> && input.LA(<k>) \<= <upper>)"
+
+setTest(ranges) ::= <<
+<ranges; separator="||">
+>>
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected static class <scope.name>_scope {
+	<scope.attributes:{it |<it.decl>;}; separator="\n">
+}
+protected Stack\<<scope.name>_scope> <scope.name>_stack = new Stack\<<scope.name>_scope>();
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<if(scope)>
+<if(scope.attributes)>
+protected static class <scope.name>_scope {
+	<scope.attributes:{it |<it.decl>;}; separator="\n">
+}
+protected Stack\<<scope.name>_scope> <scope.name>_stack = new Stack\<<scope.name>_scope>();
+<endif>
+<endif>
+>>
+
+returnStructName(r) ::= "<r.name>_return"
+
+returnType(ruleDescriptor) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
+<elseif(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+%>
+
+/** Generate the Java type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+<returnScopeBaseType()>
+<elseif(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+%>
+
+delegateName(d) ::= <<
+<if(d.label)><d.label><else>g<d.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+<javaTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <%
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> =
+ <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;
+%>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+public static class <ruleDescriptor:returnStructName()> extends <returnScopeBaseType()> {
+	<if(scope)><scope.attributes:{it |public <it.decl>;}; separator="\n"><endif>
+	<@ruleReturnMembers()>
+};
+<endif>
+>>
+
+returnScopeBaseType() ::= <%
+<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope
+%>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{it |<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <%
+<if(negIndex)>
+<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1).<attr.name>
+<else>
+<if(index)>
+<scope>_stack.elementAt(<index>).<attr.name>
+<else>
+<scope>_stack.peek().<attr.name>
+<endif>
+<endif>
+%>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
+<if(negIndex)>
+<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1).<attr.name> =<expr>;
+<else>
+<if(index)>
+<scope>_stack.elementAt(<index>).<attr.name> =<expr>;
+<else>
+<scope>_stack.peek().<attr.name> =<expr>;
+<endif>
+<endif>
+%>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+(<scope>!=null?((<returnType(referencedRule)>)<scope>).<attr.name>:<initValue(attr.type)>)
+<else>
+<scope>
+<endif>
+%>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+%>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> =<expr>;
+<else>
+<attr.name> =<expr>;
+<endif>
+%>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=null?<scope>.getText():null)"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=null?<scope>.getType():0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=null?<scope>.getLine():0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=null?<scope>.getCharPositionInLine():0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=null?<scope>.getChannel():0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=null?<scope>.getTokenIndex():0)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=null?Integer.valueOf(<scope>.getText()):0)"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=null?(<castToLabelType({<scope>.start})>):null)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=null?(<castToLabelType({<scope>.stop})>):null)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=null?((<ASTLabelType>)<scope>.getTree()):null)"
+ruleLabelPropertyRef_text(scope,attr) ::= <%
+<if(TREE_PARSER)>
+(<scope>!=null?(input.getTokenStream().toString(
+  input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
+  input.getTreeAdaptor().getTokenStopIndex(<scope>.start))):null)
+<else>
+(<scope>!=null?input.toString(<scope>.start,<scope>.stop):null)
+<endif>
+%>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=null?((StringTemplate)<scope>.getTemplate()):null)"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::=
+	"(<scope>!=null?<scope>.getType():0)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::=
+	"(<scope>!=null?<scope>.getLine():0)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::=
+	"(<scope>!=null?<scope>.getCharPositionInLine():-1)"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::=
+	"(<scope>!=null?<scope>.getChannel():0)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::=
+	"(<scope>!=null?<scope>.getTokenIndex():0)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::=
+	"(<scope>!=null?<scope>.getText():null)"
+lexerRuleLabelPropertyRef_int(scope,attr) ::=
+	"(<scope>!=null?Integer.valueOf(<scope>.getText()):0)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "(<castToLabelType(\"retval.start\")>)"
+rulePropertyRef_stop(scope,attr) ::= "(<castToLabelType(\"retval.stop\")>)"
+rulePropertyRef_tree(scope,attr) ::= "retval.tree"
+rulePropertyRef_text(scope,attr) ::= <%
+<if(TREE_PARSER)>
+input.getTokenStream().toString(
+  input.getTreeAdaptor().getTokenStartIndex(retval.start),
+  input.getTreeAdaptor().getTokenStopIndex(retval.start))
+<else>
+input.toString(retval.start,input.LT(-1))
+<endif>
+%>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "getText()"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(getCharIndex()-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "Integer.valueOf(<scope>.getText())"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <%
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {
+  <action>
+}
+<else>
+<action>
+<endif>
+%>
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+public static final BitSet <name> = new BitSet(new long[]{<words64:{it |<it>L};separator=",">});
+>>
+
+codeFileExtension() ::= ".java"
+
+true_value() ::= "true"
+false_value() ::= "false"
+
+isEmptyString ::= [
+	"" : true,
+	default : false
+]
+
+isTrue ::= [
+	"true" : true,
+	default : false
+]
+
+isDefaultLabelType ::= [
+	"Token" : true,
+	default : false
+]
+
+isPredefinedRewriteRule ::= [
+	"topdown" : true,
+	"bottomup" : true,
+	default : false
+]
+
+castToLabelType(value) ::= <%
+<if(!isDefaultLabelType.(labelType))>
+(<labelType>)
+<endif>
+<value>
+%>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Java/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Java/ST.stg
new file mode 100644
index 0000000..3b5323b
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Java/ST.stg
@@ -0,0 +1,159 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template subgroup to add template rewrite output
+ *  If debugging, then you'll also get STDbg.stg loaded.
+ */
+
+@outputFile.imports() ::= <<
+<@super.imports()>
+import org.antlr.stringtemplate.*;
+import org.antlr.stringtemplate.language.*;
+import java.util.HashMap;
+>>
+
+/** Add this to each rule's return value struct */
+@returnScope.ruleReturnMembers() ::= <<
+public StringTemplate st;
+public Object getTemplate() { return st; }
+public String toString() { return st==null?null:st.toString(); }
+>>
+
+@genericParser.members() ::= <<
+<@super.members()>
+	protected StringTemplateGroup templateLib =
+	  new StringTemplateGroup("<name>Templates", AngleBracketTemplateLexer.class);
+
+	public void setTemplateLib(StringTemplateGroup templateLib) {
+	  this.templateLib = templateLib;
+	}
+	public StringTemplateGroup getTemplateLib() {
+	  return templateLib;
+	}
+	/** allows convenient multi-value initialization:
+	 *  "new STAttrMap().put(...).put(...)"
+	 */
+	@SuppressWarnings("serial")
+	public static class STAttrMap extends HashMap\<String, Object> {
+		public STAttrMap put(String attrName, Object value) {
+			super.put(attrName, value);
+			return this;
+		}
+	}
+>>
+
+/** x+=rule when output=template */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(rule,label,elementIndex,args,scope)>
+<listLabel(label, {<label>.getTemplate()})>
+>>
+
+rewriteTemplate(alts) ::= <<
+
+// TEMPLATE REWRITE
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {
+  <alts:rewriteTemplateAlt(); separator="else ">
+  <if(rewriteMode)><replaceTextInLine()><endif>
+}
+<else>
+<alts:rewriteTemplateAlt(); separator="else ">
+<if(rewriteMode)><replaceTextInLine()><endif>
+<endif>
+>>
+
+replaceTextInLine() ::= <<
+<if(TREE_PARSER)>
+((TokenRewriteStream)input.getTokenStream()).replace(
+  input.getTreeAdaptor().getTokenStartIndex(retval.start),
+  input.getTreeAdaptor().getTokenStopIndex(retval.start),
+  retval.st);
+<else>
+((TokenRewriteStream)input).replace(
+  ((Token)retval.start).getTokenIndex(),
+  input.LT(-1).getTokenIndex(),
+  retval.st);
+<endif>
+>>
+
+rewriteTemplateAlt(alt) ::= <<
+// <alt.description>
+<if(alt.pred)>
+if (<alt.pred>) {
+	retval.st = <alt.alt>;
+}<\n>
+<else>
+{
+	retval.st = <alt.alt>;
+}<\n>
+<endif>
+>>
+
+rewriteEmptyTemplate(alts) ::= <<
+null;
+>>
+
+/** Invoke a template with a set of attribute name/value pairs.
+ *  Set the value of the rule's template *after* having set
+ *  the attributes because the rule's template might be used as
+ *  an attribute to build a bigger template; you get a self-embedded
+ *  template.
+ */
+rewriteExternalTemplate(name,args) ::= <%
+templateLib.getInstanceOf("<name>"<if(args)>,
+  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+  <endif>)
+%>
+
+/** expr is a string expression that says what template to load */
+rewriteIndirectTemplate(expr,args) ::= <%
+templateLib.getInstanceOf(<expr><if(args)>,
+  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+  <endif>)
+%>
+
+/** Invoke an inline template with a set of attribute name/value pairs */
+rewriteInlineTemplate(args, template) ::= <%
+new StringTemplate(templateLib, "<template>"<if(args)>,
+  new STAttrMap()<args:{a | .put("<a.name>", <a.value>)}>
+  <endif>)
+%>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+<action>
+>>
+
+/** An action has %st.attrName=expr; or %{st}.attrName=expr; */
+actionSetAttribute(st,attrName,expr) ::= <<
+(<st>).setAttribute("<attrName>",<expr>);
+>>
+
+/** Translate %{stringExpr} */
+actionStringConstructor(stringExpr) ::= <<
+new StringTemplate(templateLib,<stringExpr>)
+>>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/AST.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/AST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/JavaScript/AST.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTParser.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTParser.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTParser.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTTreeParser.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTTreeParser.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/JavaScript/ASTTreeParser.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/JavaScript.stg b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/JavaScript.stg
new file mode 100755
index 0000000..73cbaf4
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/JavaScript/JavaScript.stg
@@ -0,0 +1,1333 @@
+group JavaScript;
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+       bitsets, buildTemplate, buildAST, rewriteMode, profile,
+       backtracking, synpreds, memoize, numRules,
+       fileName, ANTLRVersion, generatedTimestamp, trace,
+       scopes, superClass, literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+<actions.(actionScope).header>
+
+<@imports>
+<if(TREE_PARSER)>
+<endif>
+<@end>
+
+<docComment>
+<recognizer>
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
+      filterMode, superClass="org.antlr.runtime.Lexer") ::= <<
+var <grammar.recognizerName> = function(input, state<grammar.delegators:{g|, <g:delegateName()>}>) {
+// alternate constructor @todo
+// public <grammar.recognizerName>(CharStream input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>)
+// public <grammar.recognizerName>(CharStream input, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+    if (!state) {
+        state = new org.antlr.runtime.RecognizerSharedState();
+    }
+
+    (function(){
+        <actions.lexer.members>
+    }).call(this);
+
+    <cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new <grammar.recognizerName>.DFA<dfa.decisionNumber>(this);}; separator="\n">
+    <grammar.recognizerName>.superclass.constructor.call(this, input, state);
+    <if(memoize)>
+    <if(grammar.grammarIsRoot)>
+    this.state.ruleMemo = {};
+    <endif>
+    <endif>
+
+    <grammar.directDelegates:
+       {g|this.<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+    <grammar.delegators:
+       {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+    <last(grammar.delegators):{g|this.gParent = this.<g:delegateName()>;}>
+
+    <actions.lexer.init>
+};
+
+org.antlr.lang.augmentObject(<grammar.recognizerName>, {
+    <tokens:{<it.name>: <it.type>}; separator=",\n">
+});
+
+(function(){
+var HIDDEN = org.antlr.runtime.Token.HIDDEN_CHANNEL,
+    EOF = org.antlr.runtime.Token.EOF;
+org.antlr.lang.extend(<grammar.recognizerName>, <@superClassName><superClass><@end>, {
+    <tokens:{<it.name> : <it.type>,}; separator="\n">
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+    getGrammarFileName: function() { return "<fileName>"; }
+});
+org.antlr.lang.augmentObject(<grammar.recognizerName>.prototype, {
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+    <rules; separator=",\n\n">
+
+    <synpreds:{p | <lexerSynpred(p)>}; separator=",\n">
+}, true); // important to pass true to overwrite default implementations
+
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+})();
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+nextToken: function() {
+    while (true) {
+        if ( this.input.LA(1)==org.antlr.runtime.CharStream.EOF ) {
+            return org.antlr.runtime.Token.EOF_TOKEN;
+        }
+        this.state.token = null;
+        this.state.channel = org.antlr.runtime.Token.DEFAULT_CHANNEL;
+        this.state.tokenStartCharIndex = this.input.index();
+        this.state.tokenStartCharPositionInLine = this.input.getCharPositionInLine();
+        this.state.tokenStartLine = this.input.getLine();
+        this.state.text = null;
+        try {
+            var m = this.input.mark();
+            this.state.backtracking=1; <! means we won't throw slow exception !>
+            this.state.failed=false;
+            this.mTokens();
+            this.state.backtracking=0;
+            <! mTokens backtracks with synpred at backtracking==2
+               and we set the synpredgate to allow actions at level 1. !>
+            if ( this.state.failed ) {
+                this.input.rewind(m);
+                this.input.consume(); <! advance one char and try again !>
+            }
+            else {
+                this.emit();
+                return this.state.token;
+            }
+        }
+        catch (re) {
+            // shouldn't happen in backtracking mode, but...
+            if (re instanceof org.antlr.runtime.RecognitionException) {
+                this.reportError(re);
+                this.recover(re);
+            } else {
+                throw re;
+            }
+        }
+    }
+},
+
+memoize: function(input, ruleIndex, ruleStartIndex) {
+    if (this.state.backtracking>1) {
+        <grammar.recognizerName>.superclass.prototype.memoize.call(this, input, ruleIndex, ruleStartIndex);
+    }
+},
+
+alreadyParsedRule: function(input, ruleIndex) {
+    if (this.state.backtracking>1) {
+        return <grammar.recognizerName>.superclass.prototype.alreadyParsedRule.call(this, input, ruleIndex);
+    }
+    return false;
+},
+
+
+>>
+
+actionGate() ::= "this.state.backtracking===0"
+
+filteringActionGate() ::= "this.state.backtracking===1"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass,
+              ASTLabelType="Object", labelType, members, rewriteElementType) ::= <<
+<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+var <grammar.recognizerName> = function(input, state<grammar.delegators:{g|, <g:delegateName()>}>) {
+    if (!state) {
+        state = new org.antlr.runtime.RecognizerSharedState();
+    }
+
+    (function(){
+        <members>
+    }).call(this);
+
+    <grammar.recognizerName>.superclass.constructor.call(this, input, state);
+
+    <cyclicDFAs:{dfa | this.dfa<dfa.decisionNumber> = new <grammar.recognizerName>.DFA<dfa.decisionNumber>(this);}; separator="\n">
+
+        <parserCtorBody()>
+        <grammar.directDelegates:
+         {g|this.<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this);}; separator="\n">
+         <grammar.indirectDelegates:{g | this.<g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>;}; separator="\n">
+         <last(grammar.delegators):{g|this.gParent = this.<g:delegateName()>;}>
+
+    /* @todo only create adaptor if output=AST */
+    this.adaptor = new org.antlr.runtime.tree.CommonTreeAdaptor();<\n>
+};
+
+org.antlr.lang.augmentObject(<grammar.recognizerName>, {
+    <tokens:{<it.name>: <it.type>}; separator=",\n">
+});
+
+(function(){
+// public class variables
+var <tokens:{<it.name>= <it.type>}; separator=",\n    ">;
+<if(TREE_PARSER)>
+var UP = org.antlr.runtime.Token.UP,
+    DOWN = org.antlr.runtime.Token.DOWN;
+<endif>
+
+
+// public instance methods/vars
+org.antlr.lang.extend(<grammar.recognizerName>, org.antlr.runtime.<@superClassName><superClass><@end>, {
+    <@members>
+    <@end>
+    <scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+
+    getTokenNames: function() { return <grammar.composite.rootGrammar.recognizerName>.tokenNames; },
+    getGrammarFileName: function() { return "<fileName>"; }
+});
+org.antlr.lang.augmentObject(<grammar.recognizerName>.prototype, {
+
+    <rules; separator=",\n\n">
+
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+    // Delegated rules
+<grammar.delegatedRules:{ruleDescriptor|
+    , <ruleDescriptor.name>: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) \{ <if(ruleDescriptor.hasReturnValue)>return <endif>this.<ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">); \}}>
+
+
+
+    <synpreds:{p | <synpred(p)>}; separator=",\n">
+
+}, true); // important to pass true to overwrite default implementations
+
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+// public class variables
+org.antlr.lang.augmentObject(<grammar.recognizerName>, {
+<if(grammar.grammarIsRoot)>
+    tokenNames: ["\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">],<\n>
+<endif>
+    <bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits); separator=",\n">
+});
+
+})();
+>>
+
+parserCtorBody() ::= <<
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = {};<\n> <! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType="Object", superClass="Parser", labelType="Token", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="TokenStream", rewriteElementType="Token", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="var", superClass="tree.TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
+<genericParser(inputStreamType="TreeNodeStream", rewriteElementType="Node", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start "<ruleName>"
+<ruleName>_fragment: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) {
+<if(trace)>
+    this.traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+    try {
+        <block>
+    }
+    finally {
+        this.traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+    }
+<else>
+    <block>
+<endif>
+},
+// $ANTLR end "<ruleName>"
+>>
+
+synpred(name) ::= <<
+<name>: function() {
+    this.state.backtracking++;
+    <@start()>
+    var start = this.input.mark();
+    try {
+        this.<name>_fragment(); // can never throw exception
+    } catch (re) {
+        alert("impossible: "+re.toString());
+    }
+    var success = !this.state.failed;
+    this.input.rewind(start);
+    <@stop()>
+    this.state.backtracking--;
+    this.state.failed=false;
+    return success;
+}
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( this.state.backtracking>0 && this.alreadyParsedRule(this.input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (this.state.failed) return <ruleReturnValue()>;<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (this.state.backtracking>0) {this.state.failed=true; return <ruleReturnValue()>;}<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// <fileName>:<description>
+// $ANTLR start "<ruleName>"
+<ruleDescriptor.actions.decorate>
+<ruleName>: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>) {
+    <if(trace)>this.traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    try {
+        <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+    }
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    catch (re) {
+        if (re instanceof org.antlr.runtime.RecognitionException) {
+            this.reportError(re);
+            this.recover(this.input,re);
+            <@setErrorReturnValue()>
+        } else {
+            throw re;
+        }
+    }<\n>
+<endif>
+<endif>
+<endif>
+    finally {
+        <if(trace)>this.traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <memoize()>
+        <ruleScopeCleanUp()>
+        <finally>
+    }
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) {
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+var retval = new <returnType()>();
+retval.start = this.input.LT(1);<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+var <a.name> = <if(a.initValue)><a.initValue><else>null<endif>;
+}>
+<endif>
+<if(memoize)>
+var <ruleDescriptor.name>_StartIndex = this.input.index();
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{this.<it>_stack.push({});}; separator="\n">
+<ruleDescriptor.ruleScope:{this.<it.name>_stack.push({});}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{this.<it>_stack.pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{this.<it.name>_stack.pop();}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{var <it.label.text> = null;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{var list_<it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|var <ll.label.text> = null;}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{var <it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{var <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{var list_<it.label.text>=null;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.stop = this.input.LT(-1);<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( this.state.backtracking>0 ) { this.memoize(this.input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start <ruleName>
+m<ruleName>: function(<ruleDescriptor.parameterScope:parameterScope(scope=it)>)  {
+    <if(trace)>this.traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    try {
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        var _type = this.<ruleName>;
+        var _channel = org.antlr.runtime.BaseRecognizer.DEFAULT_TOKEN_CHANNEL;
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        this.state.type = _type;
+        this.state.channel = _channel;
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    }
+    finally {
+        <if(trace)>this.traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <ruleScopeCleanUp()>
+        <memoize()>
+    }
+},
+// $ANTLR end "<ruleName>"
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+mTokens: function() {
+    <block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var cnt<decisionNumber>=0;
+<decls>
+<@preloop()>
+loop<decisionNumber>:
+do {
+    var alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+    default :
+        if ( cnt<decisionNumber> >= 1 ) {
+            break loop<decisionNumber>;
+        }
+        <ruleBacktrackFailure()>
+            var eee = new org.antlr.runtime.EarlyExitException(<decisionNumber>, this.input);
+            <@earlyExitException()>
+            throw eee;
+    }
+    cnt<decisionNumber>++;
+} while (true);
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+loop<decisionNumber>:
+do {
+    var alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+    default :
+        break loop<decisionNumber>;
+    }
+} while (true);
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+case <i> :
+    <@prealt()>
+    <it>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+<! (function() { /* @todo4 (do we really need a new scope?) */ !>
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+<! }).call(this); !>
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)><label>=<endif>this.match(this.input,<token>,<grammar.recognizerName>.FOLLOW_<token>_in_<ruleName><elementIndex>); <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (org.antlr.lang.isNull(list_<label>)) list_<label> = [];
+list_<label>.push(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = this.input.LA(1);<\n>
+<endif>
+this.match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = this.input.LA(1);<\n>
+<endif>
+this.matchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= this.input.LA(1);<\n>
+<else>
+<label>=this.input.LT(1);<\n>
+<endif>
+<endif>
+if ( <s> ) {
+    this.input.consume();
+    <postmatchCode>
+<if(!LEXER)>
+    this.state.errorRecovery=false;
+<endif>
+    <if(backtracking)>this.state.failed=false;<endif>
+}
+else {
+    <ruleBacktrackFailure()>
+    var mse = new org.antlr.runtime.MismatchedSetException(null,this.input);
+    <@mismatchedSetException()>
+<if(LEXER)>
+    this.recover(mse);
+    throw mse;
+<else>
+    throw mse;
+    <! use following code to make it recover inline; remove throw mse;
+    this.recoverFromMismatchedSet(this.input,mse,<grammar.recognizerName>.FOLLOW_set_in_<ruleName><elementIndex>);
+    !>
+<endif>
+}<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex) ::= <<
+<if(label)>
+var <label>Start = this.getCharIndex();
+this.match(<string>); <checkRuleBacktrackFailure()>
+var <label> = new org.antlr.runtime.CommonToken(this.input, org.antlr.runtime.Token.INVALID_TOKEN_TYPE, org.antlr.runtime.Token.DEFAULT_CHANNEL, <label>Start, this.getCharIndex()-1);
+<else>
+this.match(<string>); <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label>=this.input.LT(1);<\n>
+<endif>
+this.matchAny(this.input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = this.input.LA(1);<\n>
+<endif>
+this.matchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+this.pushFollow(<grammar.recognizerName>.FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)><label>=<endif>this.<if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">);<\n>
+this.state._fsp--;
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+var <label>Start<elementIndex> = this.getCharIndex();
+this.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = new org.antlr.runtime.CommonToken(this.input, org.antlr.runtime.Token.INVALID_TOKEN_TYPE, org.antlr.runtime.Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, this.getCharIndex()-1);
+<else>
+this.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+var <label>Start<elementIndex> = this.getCharIndex();
+this.match(EOF); <checkRuleBacktrackFailure()>
+var <label> = new org.antlr.runtime.CommonToken(this.input, this.EOF, org.antlr.runtime.Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, this.getCharIndex()-1);
+<else>
+this.match(this.EOF); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "int <recRuleArg()>"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
+recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( this.input.LA(1)==org.antlr.runtime.Token.DOWN ) {
+    this.match(this.input, org.antlr.runtime.Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    this.match(this.input, org.antlr.runtime.Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+this.match(this.input, org.antlr.runtime.Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+this.match(this.input, org.antlr.runtime.Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) {
+    <ruleBacktrackFailure()>
+    throw new org.antlr.runtime.FailedPredicateException(this.input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber> = this.input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+else {
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    var nvae =
+        new org.antlr.runtime.NoViableAltException("<description>", <decisionNumber>, <stateNumber>, this.input);<\n>
+    <@noViableAltException()>
+    throw nvae;<\n>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber> = this.input.LA(<k>);<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber> = this.input.LA(<k>);<\n>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else {
+    alt<decisionNumber>=<eotPredictsAlt>;
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( this.input.LA(<k>) ) {
+<edges; separator="\n">
+default:
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    var nvae =
+        new org.antlr.runtime.NoViableAltException("<description>", <decisionNumber>, <stateNumber>, this.input);<\n>
+    <@noViableAltException()>
+    throw nvae;<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( this.input.LA(<k>) ) {
+    <edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( this.input.LA(<k>) ) {
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+default:
+    alt<decisionNumber>=<eotPredictsAlt>;
+    break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{case <it>:}; separator="\n">
+    <targetState>
+    break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = this.dfa<decisionNumber>.predict(this.input);
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+org.antlr.lang.augmentObject(<grammar.recognizerName>, {
+    DFA<dfa.decisionNumber>_eotS:
+        "<dfa.javaCompressedEOT; wrap="\"+\n    \"">",
+    DFA<dfa.decisionNumber>_eofS:
+        "<dfa.javaCompressedEOF; wrap="\"+\n    \"">",
+    DFA<dfa.decisionNumber>_minS:
+        "<dfa.javaCompressedMin; wrap="\"+\n    \"">",
+    DFA<dfa.decisionNumber>_maxS:
+        "<dfa.javaCompressedMax; wrap="\"+\n    \"">",
+    DFA<dfa.decisionNumber>_acceptS:
+        "<dfa.javaCompressedAccept; wrap="\"+\n    \"">",
+    DFA<dfa.decisionNumber>_specialS:
+        "<dfa.javaCompressedSpecial; wrap="\"+\n    \"">}>",
+    DFA<dfa.decisionNumber>_transitionS: [
+            <dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
+    ]
+});
+
+org.antlr.lang.augmentObject(<grammar.recognizerName>, {
+    DFA<dfa.decisionNumber>_eot:
+        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_eotS),
+    DFA<dfa.decisionNumber>_eof:
+        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_eofS),
+    DFA<dfa.decisionNumber>_min:
+        org.antlr.runtime.DFA.unpackEncodedStringToUnsignedChars(<grammar.recognizerName>.DFA<dfa.decisionNumber>_minS),
+    DFA<dfa.decisionNumber>_max:
+        org.antlr.runtime.DFA.unpackEncodedStringToUnsignedChars(<grammar.recognizerName>.DFA<dfa.decisionNumber>_maxS),
+    DFA<dfa.decisionNumber>_accept:
+        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_acceptS),
+    DFA<dfa.decisionNumber>_special:
+        org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_specialS),
+    DFA<dfa.decisionNumber>_transition: (function() {
+        var a = [],
+            i,
+            numStates = <grammar.recognizerName>.DFA<dfa.decisionNumber>_transitionS.length;
+        for (i=0; i\<numStates; i++) {
+            a.push(org.antlr.runtime.DFA.unpackEncodedString(<grammar.recognizerName>.DFA<dfa.decisionNumber>_transitionS[i]));
+        }
+        return a;
+    })()
+});
+
+<grammar.recognizerName>.DFA<dfa.decisionNumber> = function(recognizer) {
+    this.recognizer = recognizer;
+    this.decisionNumber = <dfa.decisionNumber>;
+    this.eot = <grammar.recognizerName>.DFA<dfa.decisionNumber>_eot;
+    this.eof = <grammar.recognizerName>.DFA<dfa.decisionNumber>_eof;
+    this.min = <grammar.recognizerName>.DFA<dfa.decisionNumber>_min;
+    this.max = <grammar.recognizerName>.DFA<dfa.decisionNumber>_max;
+    this.accept = <grammar.recognizerName>.DFA<dfa.decisionNumber>_accept;
+    this.special = <grammar.recognizerName>.DFA<dfa.decisionNumber>_special;
+    this.transition = <grammar.recognizerName>.DFA<dfa.decisionNumber>_transition;
+};
+
+org.antlr.lang.extend(<grammar.recognizerName>.DFA<dfa.decisionNumber>, org.antlr.runtime.DFA, {
+    getDescription: function() {
+        return "<dfa.description>";
+    },
+    <@errorMethod()>
+<if(dfa.specialStateSTs)>
+    specialStateTransition: function(s, input) {
+        var _s = s;
+        /* bind to recognizer so semantic predicates can be evaluated */
+        var retval = (function(s, input) {
+            switch ( s ) {
+            <dfa.specialStateSTs:{state |
+            case <i0> : <! compressed special state numbers 0..n-1 !>
+                <state>}; separator="\n">
+            }
+        }).call(this.recognizer, s, input);
+        if (!org.antlr.lang.isUndefined(retval)) {
+            return retval;
+        }
+<if(backtracking)>
+        if (this.recognizer.state.backtracking>0) {this.recognizer.state.failed=true; return -1;}<\n>
+<endif>
+        var nvae =
+            new org.antlr.runtime.NoViableAltException(this.getDescription(), <dfa.decisionNumber>, _s, input);
+        this.error(nvae);
+        throw nvae;
+    },<\n>
+<endif>
+    dummy: null
+});<\n>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+var LA<decisionNumber>_<stateNumber> = input.LA(1);<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+var index<decisionNumber>_<stateNumber> = input.index();
+input.rewind();<\n>
+<endif>
+s = -1;
+<edges; separator="\nelse ">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.seek(index<decisionNumber>_<stateNumber>);<\n>
+<endif>
+if ( s>=0 ) return s;
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left>&&<right>)"
+
+orPredicates(operands) ::= "(<operands; separator=\"||\">)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "this.<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "this.input.LA(<k>)==<atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+(LA<decisionNumber>_<stateNumber>\>=<lower> && LA<decisionNumber>_<stateNumber>\<=<upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(this.input.LA(<k>)\>=<lower> && this.input.LA(<k>)\<=<upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\"||\">"
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+<scope.name>_stack: [],<\n>
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+<scope.name>_stack: [],<\n>
+<endif>
+>>
+
+returnStructName() ::= "<it.name>_return"
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.grammar.recognizerName>.<ruleDescriptor:returnStructName()>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Generate the Java type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.grammar.recognizerName>.<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+delegateName() ::= <<
+<if(it.label)><it.label><else>g<it.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+null
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<!<ruleLabelType(referencedRule=label.referencedRule)>!> var <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+// inline static return class
+<ruleDescriptor:returnStructName()>: (function() {
+    <returnType()> = function(){};
+    org.antlr.lang.extend(<returnType()>,
+                      org.antlr.runtime.<if(TREE_PARSER)>tree.Tree<else>Parser<endif>RuleReturnScope,
+    {
+        <@ruleReturnMembers()>
+    });
+    return;
+})(),
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{<it.decl>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+(this.<scope>_stack[this.<scope>_stack.length-<negIndex>-1]).<attr.name>
+<else>
+<if(index)>
+(this.<scope>_stack[<index>]).<attr.name>
+<else>
+org.antlr.lang.array.peek(this.<scope>_stack).<attr.name>
+<endif>
+<endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+(this.<scope>_stack[this.<scope>_stack.length-<negIndex>-1]).<attr.name> =<expr>;
+<else>
+<if(index)>
+(this.<scope>_stack[<index>]).<attr.name> =<expr>;
+<else>
+org.antlr.lang.array.peek(this.<scope>_stack).<attr.name> =<expr>;
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "this.<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+(<scope>!==null?<scope>.<attr.name>:<initValue(attr.type)>)
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> =<expr>;
+<else>
+<attr.name> =<expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>?<scope>.getText():null)"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>?<scope>.getType():0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>?<scope>.getLine():0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>?<scope>.getCharPositionInLine():0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>?<scope>.getChannel():0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>?<scope>.getTokenIndex():0)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>?parseInt(<scope>.getText(), 10):0)"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>?<scope>.start:null)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>?<scope>.stop:null)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>?<scope>.tree:null)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+(<scope>?(this.input.getTokenStream().toString(
+  this.input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
+  this.input.getTreeAdaptor().getTokenStopIndex(<scope>.start))):null)
+<else>
+(<scope>?this.input.toString(<scope>.start,<scope>.stop):null)
+<endif>
+>>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "(<scope>?<scope>.getType():0)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "(<scope>?<scope>.getLine():0)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(<scope>?<scope>.getCharPositionInLine():-1)"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(<scope>?<scope>.getChannel():0)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "(<scope>?<scope>.getTokenIndex():0)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "(<scope>?<scope>.getText():0)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "(retval.start)"
+rulePropertyRef_stop(scope,attr) ::= "(retval.stop)"
+rulePropertyRef_tree(scope,attr) ::= "(retval.tree)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+this.input.getTokenStream().toString(
+  this.input.getTreeAdaptor().getTokenStartIndex(retval.start),
+  this.input.getTreeAdaptor().getTokenStopIndex(retval.start))
+<else>
+this.input.toString(retval.start,this.input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "this.getText()"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "this.state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "this.state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "this.state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(this.getCharIndex()-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "parseInt(<scope>.getText(),10)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
+
+
+/** How to execute an action */
+execAction(action) ::= <<
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {
+  <action>
+}
+<else>
+<action>
+<endif>
+>>
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+<! @todo overflow issue !>
+<name>: new org.antlr.runtime.BitSet([<words64:{<it>};separator=",">])
+>>
+
+codeFileExtension() ::= ".js"
+
+true() ::= "true"
+false() ::= "false"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/LeftRecursiveRules.stg b/tool/src/main/resources/org/antlr/codegen/templates/LeftRecursiveRules.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/LeftRecursiveRules.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/LeftRecursiveRules.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ObjC/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/AST.stg
new file mode 100644
index 0000000..5ecadd7
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/AST.stg
@@ -0,0 +1,563 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2006, 2007 Kay Roepke 2010 Alan Condit
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+@genericParserHeaderFile.memVars() ::= <<
+/* AST parserHeaderFile.memVars */
+NSInteger ruleLevel;
+NSArray *ruleNames;
+<@super.memVars()>  /* AST super.memVars */
+<parserMemVars()>   /* AST parserMemVars */
+>>
+
+@genericParserHeaderFile.properties() ::= <<
+/* AST parserHeaderFile.properties */
+<@super.properties()>  /* AST super.properties */
+<parserProperties()>   /* AST parserproperties */
+>>
+
+@genericParserHeaderFile.methodsDecl() ::= <<
+/* AST parserHeaderFile.methodsDecl */
+<@super.methodsDecl()>  /* AST super.methodsDecl */
+<parserMethodsDecl()>   /* AST parsermethodsDecl */
+>>
+
+@genericParser.synthesize() ::= <<
+/* AST genericParser.synthesize */
+<@super.synthesize()>
+<parserSynthesize()>
+>>
+
+@genericParser.methods() ::= <<
+/* AST genericParser.methods */
+<@super.methods()>
+<parserMethods()>
+>>
+
+/* additional init code for tree support */
+@genericParser.init() ::= <<
+/* AST genericParser.init */
+<@super.init()>
+[self setTreeAdaptor:[[CommonTreeAdaptor newTreeAdaptor] retain]];
+>>
+
+@genericParser.dealloc() ::= <<
+/* AST genericParser.dealloc */
+[self setTreeAdaptor:nil];
+<@super.dealloc()>
+>>
+
+/* Add an adaptor property that knows how to build trees */
+parserMemVars() ::= <<
+/* AST parserMemVars */
+id\<TreeAdaptor> treeAdaptor;
+>>
+
+/* Add an adaptor property that knows how to build trees */
+parserProperties() ::= <<
+/* AST parserProperties */
+@property (retain, getter=getTreeAdaptor, setter=setTreeAdaptor:) id\<TreeAdaptor> treeAdaptor;
+>>
+
+/** Declaration of additional tree support methods - go in interface of parserHeaderFile() */
+parserMethodsDecl() ::= <<
+/* AST parserMethodsDecl */
+- (id\<TreeAdaptor>) getTreeAdaptor;
+- (void) setTreeAdaptor:(id\<TreeAdaptor>)theTreeAdaptor;
+>>
+
+/* Add an adaptor property that knows how to build trees */
+parserSynthesize() ::= <<
+/* AST parserProperties */
+@synthesize treeAdaptor;
+>>
+
+/** Definition of addition tree support methods - go in implementation of genericParser() */
+parserMethods() ::= <<
+/* AST parserMethods */
+- (id\<TreeAdaptor>) getTreeAdaptor
+{
+	return treeAdaptor;
+}
+
+- (void) setTreeAdaptor:(id\<TreeAdaptor>)aTreeAdaptor
+{
+	if (aTreeAdaptor != treeAdaptor) {
+		treeAdaptor = aTreeAdaptor;
+	}
+}
+>>
+
+/** addition memVars for returnscopes */
+@returnScopeInterface.memVars() ::= <<
+/* AST returnScopeInterface.memVars */
+<recognizer.ASTLabelType; null="CommonTree"> *tree;
+>>
+
+/** the interface of returnScope properties */
+@returnScopeInterface.properties() ::= <<
+/* AST returnScopeInterface.properties */
+@property (retain, getter=getTree, setter=setTree:) <recognizer.ASTLabelType; null="CommonTree"> *tree;
+>>
+
+/** the interface of returnScope methodsDecl */
+@returnScopeInterface.methodsDecl() ::= <<
+/* AST returnScopeInterface.methodsDecl */
+- (<recognizer.ASTLabelType; null="CommonTree"> *)getTree;<\n>
+- (void) setTree:(<recognizer.ASTLabelType; null="CommonTree"> *)aTree;<\n>
+>>
+
+/** the implementation of returnScope synthesize */
+@returnScopeImplementation.synthesize() ::= <<
+/* AST returnScope.synthesize */
+@synthesize tree;
+>>
+
+/** the implementation of returnScope methods */
+@returnScopeImplementation.methods() ::= <<
+/* AST returnScope.methods */
+- (<ASTLabelType> *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(<ASTLabelType> *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    self.tree = nil;
+    [super dealloc];
+}
+
+>>
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+/* AST ruleDeclarations */
+<super.ruleDeclarations()>
+<ASTLabelType> *root_0 = nil;<\n>
+>>
+
+ruleLabelDefs() ::= <<
+/* AST ruleLabelDefs */
+<super.ruleLabelDefs()>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
+  ruleDescriptor.wildcardTreeListLabels]:{it | <ASTLabelType> *<it.label.text>_tree=nil;}; separator="\n">
+<ruleDescriptor.tokenListLabels:{it | <ASTLabelType> *<it.label.text>_tree = nil;}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites:{it | RewriteRuleTokenStream *stream_<it> =
+    [[RewriteRule<rewriteElementType>Stream newRewriteRule<rewriteElementType>Stream:treeAdaptor
+                                                     description:@"token <it>"] retain];}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites:{it | RewriteRuleSubtreeStream *stream_<it> =
+    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+                                                        description:@"rule <it>"] retain];}; separator="\n">
+>>
+
+ruleCleanUp() ::= <<
+/* AST ruleCleanUp */
+<super.ruleCleanUp()>
+<[ruleDescriptor.allTokenRefsInAltsWithRewrites,ruleDescriptor.allRuleRefsInAltsWithRewrites]:{it | [stream_<it> release];}; separator="\n">
+<!
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(backtracking)>if ( state.backtracking == 0 ) {<\n>
+<endif>
+    [<prevRuleRootRef()> setTree:(<ASTLabelType> *)[treeAdaptor rulePostProcessing:root_0]];<\n>
+    [treeAdaptor setTokenBoundaries:[<prevRuleRootRef()> getTree]
+                               From:[<prevRuleRootRef()> getStart]
+                                 To:[<prevRuleRootRef()> getStop]];<\n>
+<if(backtracking)>}<\n>
+<endif>
+<endif>
+[root_0 release];
+!>
+>>
+
+rewriteCodeLabelsCleanUp() ::= <<
+/* AST rewriteCodeLabelsCleanUp */
+ <referencedTokenLabels:{it | [stream_<it> release];}; separator="\n">
+ <referencedTokenListLabels:{it | [stream_<it> release];}; separator="\n">
+ <referencedRuleLabels:{it | [stream_<it> release];}; separator="\n">
+ <referencedRuleListLabels:{it | [stream_<it> release];}; separator="\n">
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+@alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+root_0 = (<ASTLabelType> *)[[[treeAdaptor class] newEmptyTree] retain];<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex) ::= <<
+<! <super.tokenRef(...)> !>
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) <endif>
+    [stream_<token> addElement:<label>];<\n>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex) ::= <<
+<! <super.tokenRef(...)> !>
+<tokenRefBang(...)>
+<if(backtracking)>
+if ( !<actions.(actionScope).synpredgate> ) <endif>
+    [stream_<token> addElement:<label>];<\n>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
+[stream_<rule.name> addElement:[<label> getTree]];
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(elem={[<label> getTree]},...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<! <super.ruleRefRuleRoot(...)> !>
+<ruleRefRuleRoot(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
+    [stream_<rule.name> addElement:[<label> getTree]];<\n>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(elem={[<label> getTree]},...)>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+	alts, description,
+	referencedElementsDeep, // ALL referenced elements to right of ->
+	referencedTokenLabels,
+	referencedTokenListLabels,
+	referencedRuleLabels,
+	referencedRuleListLabels,
+	referencedWildcardLabels,
+	referencedWildcardListLabels,
+	rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+
+// AST REWRITE
+// elements: <referencedElementsDeep; separator=", ">
+// token labels: <referencedTokenLabels; separator=", ">
+// rule labels: <referencedRuleLabels; separator=", ">
+// token list labels: <referencedTokenListLabels; separator=", ">
+// rule list labels: <referencedRuleListLabels; separator=", ">
+// wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {<\n>
+<endif>
+<prevRuleRootRef()>.tree = root_0;<\n>
+<rewriteCodeLabels()>
+root_0 = (<ASTLabelType> *)[[[treeAdaptor class] newEmptyTree] retain];<\n>
+<alts:rewriteAlt(); separator="else ">
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.tree = (<ASTLabelType>)[treeAdaptor rulePostProcessing:root_0];
+[input replaceChildren:[treeAdaptor getParent:retval.start]
+                  From:[treeAdaptor getChildIndex:retval.start]
+                    To:[treeAdaptor getChildIndex:_last]
+                  With:retval.tree];
+<endif>
+<endif>
+<! if parser or tree-parser && rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.tree = root_0;<\n>
+<else>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.tree = root_0;<\n>
+<endif>
+<endif>
+<if(backtracking)>
+}
+<endif>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{it | RewriteRule<rewriteElementType>Stream *stream_<it> =
+    [[RewriteRule<rewriteElementType>Stream newRewriteRule<rewriteElementType>Stream:treeAdaptor description:@"token <it>" element:<it>] retain];};
+    separator="\n"
+>
+<referencedTokenListLabels:{it | RewriteRule<rewriteElementType>Stream *stream_<it> =
+    [[RewriteRule<rewriteElementType>Stream newRewriteRule<rewriteElementType>Stream:treeAdaptor
+        description:@"token <it>" elements:list_<it>] retain];};
+    separator="\n"
+>
+<referencedWildcardLabels:{it | RewriteRuleSubtreeStream stream_<it> =
+    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+        description:"wildcard <it>" element:<it>] retain];};
+    separator="\n"
+>
+<referencedWildcardListLabels:{it | RewriteRuleSubtreeStream stream_<it> =
+    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+        descriptor:"wildcard <it>" elements:list_<it>] retain];};
+    separator="\n"
+>
+<referencedRuleLabels:{it | RewriteRuleSubtreeStream *stream_<it> =
+    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+        description:@"token <it>" element:<it>!=nil?[<it> getTree]:nil] retain];};
+    separator="\n"
+>
+<referencedRuleListLabels:{it | RewriteRuleSubtreeStream *stream_<it> =
+    [[RewriteRuleSubtreeStream newRewriteRuleSubtreeStream:treeAdaptor
+        description:@"token <it>" elements:list_<it>] retain];};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+if ( <referencedElementsDeep:{el | [stream_<el> hasNext]}; separator="||"> ) {
+	<alt>
+}
+<referencedElementsDeep:{el | [stream_<el> reset];<\n>}>
+>>
+
+rewriteClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+while ( <referencedElements:{el | [stream_<el> hasNext]}; separator="||"> ) {
+    <alt>
+}
+<referencedElements:{el | [stream_<el> reset];<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+	alt,rewriteBlockLevel,
+	referencedElementsDeep, // all nested refs
+	referencedElements, // elements in immediately block; no nested blocks
+	description) ::=
+<<
+// <fileName>:<description>
+{
+if ( !(<referencedElements:{el | [stream_<el> hasNext]}; separator=" || ">) ) {
+    @throw [RewriteEarlyExitException newException];
+}
+while ( <referencedElements:{el | [stream_<el> hasNext]}; separator=" || "> ) {
+    <alt>
+}
+<referencedElements:{el | [stream_<el> reset];<\n>}>
+}
+>>
+
+rewriteAlt(a) ::= <<
+// <a.description>
+<if(a.pred)>
+if (<a.pred>) {
+    <a.alt>
+}<\n>
+<else>
+{
+    <a.alt>
+}<\n>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = nil;"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+// <fileName>:<description>
+{
+    <ASTLabelType> *root_<treeLevel> = (<ASTLabelType> *)[[[treeAdaptor class] newEmptyTree] retain];
+    <root:rewriteElement()>
+    <children:rewriteElement()>
+    [treeAdaptor addChild:root_<treeLevel> toTree:root_<enclosingTreeLevel>];
+}<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen()>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,terminalOptions,args) ::= <<
+ // TODO: args: <args; separator=", ">
+[treeAdaptor addChild:<createRewriteNodeFromElement(...)> toTree:root_<treeLevel>];<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+[treeAdaptor addChild:[stream_<label> nextNode] toTree:root_<treeLevel>];<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+[treeAdaptor addChild:[stream_<label> nextNode] toTree:root_<treeLevel>];<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:[stream_<label> nextNode] old:root_<treeLevel>];<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,terminalOptions,args) ::= <<
+root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:<createRewriteNodeFromElement(...)> old:root_<treeLevel>];<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,terminalOptions,elementIndex) ::= <<
+[treeAdaptor addChild:<createImaginaryNode(tokenType=token, ...)> toTree:root_<treeLevel>];<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,terminalOptions,elementIndex) ::= <<
+root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:<createImaginaryNode(tokenType=token, ...)> old:root_<treeLevel>];<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+root_0 = <action>;<\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+[treeAdaptor addChild:[stream_<rule> nextTree] toTree:root_<treeLevel>];<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:(id\<Tree>)[stream_<rule> nextNode] old:root_<treeLevel>];<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+[treeAdaptor addChild:<action> toTree:root_<treeLevel>];<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:<action> old:root_<treeLevel>];<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+[treeAdaptor addChild:[stream_<label> nextTree] toTree:root_<treeLevel>];<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+[treeAdaptor addChild:[stream_<label> nextTree] toTree:root_<treeLevel>];<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:[stream_<label> nextNode] old:root_<treeLevel>];<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:[stream_<label> nextNode] old:root_<treeLevel>];<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+[treeAdaptor addChild:[stream_<label> nextTree] toTree:root_<treeLevel>];<\n>
+>>
+
+createImaginaryNode(tokenType,terminalOptions,args) ::= <<
+<if(terminalOptions.node)>
+    [<terminalOptions.node> new<terminalOptions.node>:<tokenType> <if(args)>, <args; separator=", "><endif>]
+<else>
+    <if(args)>
+        [[treeAdaptor createTree:<tokenType> <if(first(args))>FromToken:<first(args)><endif> <if(first(rest(args)))>Text:<first(rest(args))><else>Text:@"<tokenType>"<endif>] retain]
+    <else>
+        [[treeAdaptor createTree:<tokenType> Text:@"<tokenType>"] retain]
+    <endif>
+<endif>
+>>
+
+createRewriteNodeFromElement(token,terminalOptions,args) ::= <<
+<if(terminalOptions.node)>
+    [<terminalOptions.node> new<terminalOptions.node>:[stream_<token> nextToken]<if(args)>, <args; separator=", "><endif>]
+<else>
+    <if(args)> <! must create new node from old !>
+        [[treeAdaptor createTree:<token> Text:<first(rest(args))> <args; separator=", ">] retain]
+    <else>
+        [stream_<token> nextNode]
+    <endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTDbg.stg
new file mode 100644
index 0000000..210cfcd
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTDbg.stg
@@ -0,0 +1,93 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2006 Kay Roepke 2010 Alan Condit
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
+ *  hierarchy is set up as ASTDbg : AST : Dbg : Java by code generator.
+ */
+parserMembers() ::= <<
+DebugTreeAdaptor *adaptor = [DebugTreeAdaptor newTreeAdaptor:(id)dbg Adaptor:[CommonTreeAdaptor newTreeAdaptor]];
+
+// fix this
+- (void) setTreeAdaptor:(id<TreeAdaptor>)anAdaptor
+{
+    adaptor = [DebugTreeAdaptor newTreeAdaptor:dbg Adaptor:anAdaptor];
+<if(grammar.grammarIsRoot)>
+    adaptor = [DebugTreeAdaptor newTreeAdaptor:adaptor withDBG:dbg];
+<else>
+    adaptor = (DebugTreeAdaptor *)adaptor; // delegator sends dbg adaptor
+<endif><\n>
+    <grammar.directDelegates:{g|[<g:delegateName()> setTreeAdaptor:adaptor];}>
+}
+
+- (id<TreeAdaptor>)getTreeAdaptor
+{
+    return adaptor;
+}<\n>
+>>
+
+parserCtorBody() ::= <<
+<super.parserCtorBody()>
+>>
+
+createListenerAndHandshake() ::= <<
+DebugEventSocketProxy proxy =
+    [DebugEventSocketProxy newDebugEventSocketProxy:self, port, <if(TREE_PARSER)>[input getTreeAdaptor]<else>adaptor<endif>];
+[self setDebugListener:proxy];
+[self set<inputStreamType>:[Debug<inputStreamType> newDebug<inputStreamType>:input with:proxy]];
+try {
+    [proxy handshake];
+}
+@catch (IOException *ioe) {
+    [self reportError:ioe];
+}
+>>
+
+@ctorForRootGrammar.finally() ::= <<
+CommonTreeAdaptor *adap = [CommonTreeAdaptor newTreeAdaptor];
+[self setTreeAdaptor:adap];
+[proxy setTreeAdaptor:adap];
+>>
+
+@ctorForProfilingRootGrammar.finally() ::=<<
+CommonTreeAdaptor *adap = [CommonTreeAdaptor newTreeAdaptor];
+[self setTreeAdaptor:adap];
+[proxy setTreeAdaptor:adap];
+>>
+
+@ctorForPredefinedListener.superClassRef() ::= @"super(input, dbg);"
+
+@ctorForPredefinedListener.finally() ::=<<
+<if(grammar.grammarIsRoot)> <! don't create new adaptor for delegates !>
+CommonTreeAdaptor *adap = [CommonTreeAdaptor newTreeAdaptor];
+[self setTreeAdaptor:adap];<\n>
+<endif>
+>>
+
+@treeParserHeaderFile.superClassName ::= "DebugTreeParser"
+
+@rewriteElement.pregen() ::= "[debugListener locationLine:<e.line> column:<e.pos>];"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTParser.stg
new file mode 100644
index 0000000..4eed274
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTParser.stg
@@ -0,0 +1,210 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2007 Kay Roepke 2010 Alan Condit
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+@rule.setErrorReturnValue() ::= <<
+/* ASTParser rule.setErrorReturnValue */
+retval.tree = (<ASTLabelType> *)[treeAdaptor errorNode:input From:retval.start To:[input LT:-1] Exception:re];
+<! System.out.println("<ruleName> returns "+((CommonTree)retval.tree).toStringTree()); !>
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token, label, elementIndex, terminalOptions) ::= <<
+/* ASTParser tokenRef */
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+[treeAdaptor addChild:<label>_tree  toTree:root_0];
+<if(backtracking)>}<endif>
+>>
+
+/* ID! and output=AST (same as plain tokenRef) */
+/* ASTParser tokenRefBang */
+tokenRefBang(token,label,elementIndex,terminalOptions) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
+<super.tokenRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = <createNodeFromToken(...)>;
+root_0 = (<ASTLabelType> *)[treeAdaptor becomeRoot:<label>_tree old:root_0];
+<if(backtracking)>}<endif>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+/* ASTParser tokenRefBangAndListLabel */
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+/* ASTParser tokenRefAndListLabel */
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,terminalOptions,elementIndex) ::= <<
+/* ASTParser tokenRefRuleRootAndListLabel */
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <%
+/* ASTParser matchSet */
+<super.matchSet(postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+    [treeAdaptor addChild:<createNodeFromToken(...)> toTree:root_0 ];}, ...)>
+%>
+
+matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
+/* ASTParser matchRuleBlockSet */
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,terminalOptions, postmatchCode) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
+/* ASTParser matchSetRuleRoot */
+<if(label)>
+<label>=(<labelType> *)[input LT:1]; /* matchSetRuleRoot */<\n>
+<endif>
+<super.matchSet(postmatchCode={<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+root_0 = (<ASTLabelType> *)[treeAdaptor becomeRoot:<createNodeFromToken(...)> old:root_0];}, ...)>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+/* ASTParser ruleRef */
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+[treeAdaptor addChild:[<label> getTree] toTree:root_0];
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+/* ASTParser ruleRefRuleRoot */
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+root_0 = (<ASTLabelType> *)[treeAdaptor becomeRoot:[<label> getTree] old:root_0];
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+/* ASTParser ruleRefAndListLabel */
+<ruleRef(...)>
+<listLabel(elem = {[<label> getTree]},...)>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+/* ASTParser ruleRefBangAndListLabel */
+<ruleRefBang(...)>
+<listLabel(elem = {[<label> getTree]},...)>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+/* ASTParser ruleRefRuleRootAndListLabel */
+<ruleRefRuleRoot(...)>
+<listLabel(elem = {[<label> getTree]},...)>
+>>
+
+// WILDCARD AST
+
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+/* ASTParser wildcard */
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+    [treeAdaptor addChild:[[treeAdaptor create:<label>] retain] toTree:root_0];
+<if(backtracking)>}<endif>
+>>
+
+wildcardBang(token,label,elementIndex,terminalOptions) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(token,label,elementIndex,terminalOptions) ::= <<
+/* ASTParser wildcardRuleRoot */
+<super.wildcard(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+    <label>_tree = [[treeAdaptor create:<label>] retain]
+    root_0 = (<ASTLabelType> *)[treeAdaptor becomeRoot:<label>_tree old:root_0];
+<if(backtracking)>}<endif>
+>>
+
+createNodeFromToken(label,terminalOptions) ::= <<
+/* ASTParser createNodeFromToken */
+<if(terminalOptions.node)>
+[<terminalOptions.node> new<terminalOptions.node>:<label>] <! new MethodNode(IDLabel) !>
+<else>
+(<ASTLabelType> *)[[treeAdaptor create:<label>] retain]
+<endif>
+>>
+
+// straight from java cleanup ///
+ruleCleanUp() ::= <<
+/* ASTParser ruleCleanUp */
+<super.ruleCleanUp()>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+    retval.tree = (<ASTLabelType> *)[treeAdaptor rulePostProcessing:root_0];
+    [treeAdaptor setTokenBoundaries:retval.tree From:retval.start To:retval.stopToken];
+<if(backtracking)>}<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTTreeParser.stg
new file mode 100644
index 0000000..9eb45dd
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ASTTreeParser.stg
@@ -0,0 +1,365 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2007 Kay Roepke 2010 Alan Condit
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+
+/* addition memVars for returnscopes */
+@returnScopeInterface.memVars() ::= <<
+/* ASTTreeParser returnScopeInterface.memVars */
+<recognizer.ASTLabelType; null="CommonTree"> *tree;
+>>
+
+/** the interface of returnScope methodsDecl */
+@returnScopeInterface.methodsDecl() ::= <<
+/* ASTTreeParser returnScopeInterface.methodsDecl */
+- (<recognizer.ASTLabelType; null="CommonTree"> *)getTree;
+- (void) setTree:(<recognizer.ASTLabelType; null="CommonTree"> *)aTree;<\n>
+>>
+
+/** the implementation of returnScope methods */
+@returnScope.methods() ::= <<
+/* ASTTreeParser returnScope.methods */
+- (<ASTLabelType> *)getTree
+{
+    return tree;
+}
+
+- (void) setTree:(<ASTLabelType> *)aTree
+{
+    if (tree != aTree) {
+        if (tree != nil) [tree release];
+        if (aTree != nil) [aTree retain];
+        tree = aTree;
+    }
+}
+
+- (void) dealloc
+{
+    [self setTree:nil];
+    [super dealloc];
+}
+
+@synthesize tree;
+>>
+
+@returnScopeProperties() ::= <<
+@property (retain) <recognizer.ASTLabelType; null="CommonTree"> *tree;
+>>
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+/* ASTTreeParser ruleDeclarations */
+<super.ruleDeclarations()>
+<ASTLabelType> *_first_0 = nil;
+<ASTLabelType> *_last = nil;<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+/* ASTTreeParser noRewrite */
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(rewriteMode)>
+retval.tree = (<ASTLabelType> *)_first_0;
+if ( [treeAdaptor getParent:retval.tree] != nil && [treeAdaptor isNil:[treeAdaptor getParent:retval.tree]] ) )
+    retval.tree = (<ASTLabelType> *)[treeAdaptor getParent:retval.tree];
+<endif>
+<if(backtracking)>}<endif>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+/* ASTTreeParser tree */
+_last = (<ASTLabelType> *)[input LT:1];
+{
+<ASTLabelType> *_save_last_<treeLevel> = _last;
+<ASTLabelType> *_first_<treeLevel> = nil;
+<if(!rewriteMode)>
+<ASTLabelType> *root_<treeLevel> = [[[treeAdaptor class] newEmptyTree] retain];
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+<if(root.el.rule)>
+if ( _first_<enclosingTreeLevel>==nil ) _first_<enclosingTreeLevel> = <root.el.label>.tree;
+<else>
+if ( _first_<enclosingTreeLevel>==nil ) _first_<enclosingTreeLevel> = <root.el.label>;
+<endif>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( [input LA:1] == TokenTypeDOWN ) {
+    [self match:input TokenType:TokenTypeDOWN Follow:nil]; <checkRuleBacktrackFailure()>
+    <children:element()>
+    [self match:input TokenType:TokenTypeUP Follow:nil]; <checkRuleBacktrackFailure()>
+}
+<else>
+[self match:input TokenType:TokenTypeDOWN Follow:nil]; <checkRuleBacktrackFailure()>
+<children:element()>
+[self match:input TokenType:TokenTypeUP Follow:nil]; <checkRuleBacktrackFailure()>
+<endif>
+<if(!rewriteMode)>
+[treeAdaptor addChild:root_<treeLevel> toTree:root_<enclosingTreeLevel>];
+<endif>
+_last = _save_last_<treeLevel>;
+}<\n>
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex,terminalOptions) ::= <<
+/* ASTTreeParser tokenRefBang */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+/* ASTTreeParser tokenRef */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+    <label>_tree = [<terminalOptions.node> new<terminalOptions.node>:<label>];
+<else>
+    <label>_tree = (<ASTLabelType> *)[treeAdaptor dupNode:<label>];
+<endif><\n>
+    [treeAdaptor addChild:<label>_tree toTree:root_<treeLevel>];
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel>==nil ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+/* ASTTreeParser tokenRefAndListLabel */
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex) ::= <<
+/* ASTTreeParser tokenRefRuleRoot */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = [<terminalOptions.node> new<terminalOptions.node>:<label>];
+<else>
+<label>_tree = (<ASTLabelType> *)[treeAdaptor dupNode:<label>];
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:<label>_tree old:root_<treeLevel>];
+<if(backtracking)>}<endif>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+/* ASTTreeParser tokenRefRuleRootAndListLabel */
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+/* ASTTreeParser wildcard */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<label>_tree = (<ASTLabelType> *)[adaptor dupTree:<label>];
+[adaptor addChild:<label>_tree toTree:root_<treeLevel>];
+<if(backtracking)>}<endif>
+<else> <! rewrite mode !>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> )<endif>
+if ( _first_<treeLevel> == nil ) _first_<treeLevel> = <label>;
+<endif>
+>>
+
+// SET AST
+
+matchSet(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
+/* ASTTreeParser matchSet */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = [<terminalOptions.node> new<terminalOptions.node>:<label>];
+<else>
+<label>_tree = (<ASTLabelType> *)[adaptor dupNode:<label>];
+<endif><\n>
+[adaptor addChild:<label>_tree toTree:root_<treeLevel>];
+<if(backtracking)>\}<endif>
+<endif>
+}, ...
+)>
+>>
+
+matchRuleBlockSet(s,label,terminalOptions,elementIndex,postmatchCode,treeLevel="0") ::= <<
+/* ASTTreeParser matchRuleBlockSet */
+<matchSet(...)>
+<noRewrite(...)> <! set return tree !>
+>>
+
+matchSetBang(s,label,terminalOptions,elementIndex,postmatchCode) ::= <<
+/* ASTTreeParser matchSetBang */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,terminalOptions,elementIndex,debug) ::= <<
+/* ASTTreeParser matchSetRuleRoot */
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<endif>
+<if(terminalOptions.node)>
+<label>_tree = [<terminalOptions.node> new<terminalOptions.node>:<label>];
+<else>
+<label>_tree = (<ASTLabelType> *)[adaptor dupNode:<label>];
+<endif><\n>
+root_<treeLevel> = (<ASTLabelType> *)[adaptor becomeRoot:<label>_tree old:root_<treeLevel>];
+<if(backtracking)>\}<endif>
+<endif>
+}, ...
+)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+/* ASTTreeParser ruleRef */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.ruleRef(...)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) <endif>
+<if(!rewriteMode)>
+    [treeAdaptor addChild:<label>.tree toTree:root_<treeLevel>];
+<else> <! rewrite mode !>
+if ( _first_<treeLevel> == nil ) _first_<treeLevel> = <label>.tree;
+<endif>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+/* ASTTreeParser ruleRefAndListLabel */
+<ruleRef(...)>
+<! <listLabel(elem = "["+label+" getTree]",...)> !>
+<listLabel(elem = {[<label> getTree]},...)>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+/* ASTTreeParser ruleRefRuleRoot */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<if(backtracking)>if ( state.backtracking == 0 ) <endif>
+root_<treeLevel> = (<ASTLabelType> *)[treeAdaptor becomeRoot:<label>.tree old:root_<treeLevel>];
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+/* ASTTreeParser ruleRefRuleRootAndListLabel */
+<ruleRefRuleRoot(...)>
+<listLabel(elem = {[<label> getTree]},...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+/* ASTTreeParser ruleRefTrack */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+/* ASTTreeParser ruleRefTrackAndListLabel */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+/* ASTTreeParser ruleRefRuleRootTrack */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+/* ASTTreeParser ruleRefRuleRootTrackAndListLabel */
+_last = (<ASTLabelType> *)[input LT:1];
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,terminalOptions,scope) ::= <<
+/* ASTTreeParser createRewriteNodeFromElement */
+<if(terminalOptions.node)>
+<! new <terminalOptions.node>(stream_<token>.nextNode()) !>
+[[[<terminalOptions.node>(stream_<token> alloc] init] nextNode];
+<else>
+<! stream_<token>.nextNode() !>
+[stream_<token> nextNode]
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+/* ASTTreeParser ruleCleanUp */
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<if(backtracking)>if ( <actions.(actionScope).synpredgate> ) {<\n><endif>
+retval.tree = (<ASTLabelType> *)[treeAdaptor rulePostProcessing:root_0];
+<if(backtracking)>}<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ObjC/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/Dbg.stg
new file mode 100644
index 0000000..5bad893
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/Dbg.stg
@@ -0,0 +1,228 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2006 Kay Roepke 2010 Alan Condit
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template overrides to add debugging to normal Objective-C output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+@headerFile.imports() ::= <<
+<@super.imports()>
+#import \<ANTLR/Debug.h>
+>>
+
+@parserHeaderFile.memVars() ::= <<
+NSInteger ruleLevel;
+NSArray *ruleNames;
+>>
+
+@parserHeaderFile.methodsDecl() ::= <<
+-(BOOL) evalPredicate:(NSString *)predicate matched:(BOOL)result;<\n>
+>>
+
+@genericParser.methods() ::= <<
+<if(grammar.grammarIsRoot)>
+AMutableArray *ruleNames = [AMutableArray arrayWithArray:{
+    @"invalidRule", <grammar.allImportedRules:{rST | @"<rST.name>"}; wrap=@"\n    ", separator=", ">
+};<\n>
+<endif>
+<if(grammar.grammarIsRoot)> <! grammar imports other grammar(s) !>
+    ruleLevel = 0;
+- (NSInteger) getRuleLevel { return ruleLevel; }
+- (void) incRuleLevel { ruleLevel++; }
+- (void) decRuleLevel { ruleLevel--; }
+<if(profile)>
+    <ctorForProfilingRootGrammar()>
+<else>
+    <ctorForRootGrammar()>
+<endif>
+<ctorForPredefinedListener()>
+<else> <! imported grammar !>
+- (NSInteger) getRuleLevel
+{
+    return <grammar.delegators:{g| <g:delegateName()>}>.getRuleLevel();
+}<\n>
+
+- (void) incRuleLevel
+{
+    <grammar.delegators:{g| <g:delegateName()>}>.incRuleLevel();
+}<\n>
+- (void) decRuleLevel
+{
+    <grammar.delegators:{g| <g:delegateName()>}>.decRuleLevel();
+}<\n>
+    <ctorForDelegateGrammar()>
+<endif>
+<if(profile)>
+- (BOOL) alreadyParsedRule:(id<IntStream>) input Index:(NSInteger) ruleIndex
+{
+    [(Profiler)dbg examineRuleMemoization:input, ruleIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames objectAtIndex:ruleIndex];
+    return super.alreadyParsedRule(input, ruleIndex);
+}<\n>
+- (void) memoize:(id<IntStream>)input RuleIndex:(NSInteger)ruleIndex StartIndex:(NSInteger)ruleStartIndex
+{
+    [((Profiler)dbg) memoize:input RuleIndex:ruleIndex StartIndex:ruleStartIndex [<grammar.composite.rootGrammar.recognizerName> ruleNames[ruleIndex]];
+    [super memoize:input RuleIndex:ruleIndex StartIndex:ruleStartIndex];
+}<\n>
+<endif>
+- (BOOL) evalPredicate:(BOOL)result Pred:(NSString *)predicate
+{
+    [dbg semanticPredicate:result Pred:predicate];
+    return result;
+}<\n>
+>>
+
+@genericParser.init() ::= <<
+ruleNames = [NSArray arrayWithObjects:<rules:{rST | @"<rST.ruleName>"}; separator=", ", wrap="\n	">, nil];<\n>
+>>
+
+@genericParser.dealloc() ::= <<
+[ruleNames release];<\n>
+>>
+
+@genericParser.methods() ::= <<
+-(BOOL) evalPredicate:(NSString *)predicate matched:(BOOL)result
+{
+	[debugListener semanticPredicate:predicate matched:result];
+	return result;
+}<\n>
+>>
+
+/* bug: can't use @super.superClassName()> */
+@parserHeaderFile.superClassName() ::= "Debug<if(TREE_PARSER)>Tree<endif>Parser"
+
+@rule.preamble() ::= <<
+@try { [debugListener enterRule:@"<ruleName>"];
+if ( ruleLevel==0 ) [debugListener commence];
+ruleLevel++;
+[debugListener locationLine:<ruleDescriptor.tree.line> column:<ruleDescriptor.tree.column>];<\n>
+>>
+
+@rule.postamble() ::= <<
+[debugListener locationLine:<ruleDescriptor.EORNode.line> column:<ruleDescriptor.EORNode.column>];<\n>
+}
+@finally {
+    [debugListener exitRule:@"<ruleName>"];
+    ruleLevel--;
+    if ( ruleLevel==0 ) [debugListener terminate];
+}<\n>
+>>
+
+/* these are handled in the runtime for now.
+ * stinks, but that's the easiest way to avoid having to generate two
+ * methods for each synpred
+
+@synpred.start() ::= "[debugListener beginBacktrack:state.backtracking];"
+
+@synpred.stop() ::= "[debugListener endBacktrack:state.backtracking wasSuccessful:success];"
+
+ */
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::=
+    "@try { [debugListener enterSubRule:<decisionNumber>];<\n>"
+
+exitSubRule() ::=
+    "} @finally { [debugListener exitSubRule:<decisionNumber>]; }<\n>"
+
+enterDecision() ::=
+    "@try { [debugListener enterDecision:<decisionNumber>];<\n>"
+
+exitDecision() ::=
+    "} @finally { [debugListener exitDecision:<decisionNumber>]; }<\n>"
+
+enterAlt(n) ::= "[debugListener enterAlt:<n>];<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+@block.predecision() ::= "<enterSubRule()><enterDecision()>"
+
+@block.postdecision() ::= "<exitDecision()>"
+
+@block.postbranch() ::= "<exitSubRule()>"
+
+@ruleBlock.predecision() ::= "<enterDecision()>"
+
+@ruleBlock.postdecision() ::= "<exitDecision()>"
+
+@ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+@blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+@positiveClosureBlock.preloop() ::= "<enterSubRule()>"
+
+@positiveClosureBlock.postloop() ::= "<exitSubRule()>"
+
+@positiveClosureBlock.predecision() ::= "<enterDecision()>"
+
+@positiveClosureBlock.postdecision() ::= "<exitDecision()>"
+
+@positiveClosureBlock.earlyExitException() ::=
+    "[debugListener recognitionException:eee];<\n>"
+
+@closureBlock.preloop() ::= "<enterSubRule()>"
+
+@closureBlock.postloop() ::= "<exitSubRule()>"
+
+@closureBlock.predecision() ::= "<enterDecision()>"
+
+@closureBlock.postdecision() ::= "<exitDecision()>"
+
+@altSwitchCase.prealt() ::= "<enterAlt(n=i)>"
+
+@element.prematch() ::=
+    "[debugListener locationLine:<it.line> column:<it.pos>];"
+
+@matchSet.mismatchedSetException() ::=
+    "[debugListener recognitionException:mse];"
+
+@dfaState.noViableAltException() ::= "[debugListener recognitionException:nvae];"
+
+@dfaStateSwitch.noViableAltException() ::= "[debugListener recognitionException:nvae];"
+
+dfaDecision(decisionNumber,description) ::= <<
+@try {
+    // isCyclicDecision is only necessary for the Profiler. Which I didn't do, yet.
+    // isCyclicDecision = YES;
+    <super.dfaDecision(...)>
+}
+@catch (NoViableAltException *nvae) {
+    [debugListener recognitionException:nvae];
+    @throw nvae;
+}
+>>
+
+@cyclicDFA.errorMethod() ::= <<
+-(void) error:(NoViableAltException *)nvae
+{
+    [[recognizer debugListener] recognitionException:nvae];
+}
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+[self evalPredicate:@"<description>" result:<pred>];
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ObjC.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ObjC.stg
new file mode 100644
index 0000000..369310a
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ObjC.stg
@@ -0,0 +1,2145 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2006, 2007 Kay Roepke 2010 Alan Condit
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*
+ *  Template group file for the Objective C code generator.
+ *  Heavily based on Java.stg
+ *
+ *  Written by Kay Roepke <kroepke(at)classdump.org>
+ *  Modified by Alan Condit <acondit(at)ipns.com>
+ *
+ *  This file is part of ANTLR and subject to the same license as ANTLR itself.
+ */
+
+objcTypeInitMap ::= [
+    "int"           : "0",              // Integers     start out being 0
+    "long"          : "0",              // Longs        start out being 0
+    "float"         : "0.0",            // Floats       start out being 0
+    "double"        : "0.0",            // Doubles      start out being 0
+    "BOOL"          : "NO",             // Booleans     start out being Antlr ObjC for false
+    "byte"          : "0",              // Bytes        start out being 0
+    "short"         : "0",              // Shorts       start out being 0
+    "char"          : "0",              // Chars        start out being 0
+    "id"            : "nil",            // ids          start out being nil
+    default         : "nil"             // anything other than an atomic type
+]
+
+// System.Boolean.ToString() returns "True" and "False", but the proper C# literals are "true" and "false"
+// The Java version of Boolean returns "true" and "false", so they map to themselves here.
+booleanLiteral ::= [
+	"True":"true",
+	"False":"false",
+	"true":"YES",
+	"false":"NO",
+	default:"NO"
+]
+
+
+className() ::= "<name><!<if(LEXER)>Lexer<else><if(TREE_PARSER)>Tree<endif>Parser<endif>!>"
+leadIn(type) ::=
+<<
+/** \file
+ *  This <type> file was generated by $ANTLR version <ANTLRVersion>
+ *
+ *     -  From the grammar source file : <fileName>
+ *     -                            On : <generatedTimestamp>
+<if(LEXER)>
+ *     -                 for the lexer : <name>Lexer
+<endif>
+<if(PARSER)>
+ *     -                for the parser : <name>Parser
+<endif>
+<if(TREE_PARSER)>
+ *     -           for the tree parser : <name>TreeParser
+<endif>
+ *
+ * Editing it, at least manually, is not wise.
+ *
+ * ObjC language generator and runtime by Alan Condit, acondit|hereisanat|ipns|dotgoeshere|com.
+ *
+ *
+>>
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope,
+            actions,
+            docComment,
+            recognizer,
+            name,
+            tokens,
+            tokenNames,
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            buildAST,
+            rewriteMode,
+            profile,
+            backtracking,
+            synpreds,
+            memoize,
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+            superClass,
+            literals
+            ) ::=
+<<
+<leadIn("OBJC source")>
+*/
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<! <if(actions.(actionScope).header)>
+/* =============================================================================
+ * This is what the grammar programmer asked us to put at the top of every file.
+ */
+<actions.(actionScope).header>
+/* End of Header action.
+ * =============================================================================
+ */
+<endif> !>
+
+/* -----------------------------------------
+ * Include the ANTLR3 generated header file.
+ */
+#import "<name><!<if(LEXER)>Lexer<else><if(TREE_PARSER)>Tree<endif>Parser<endif>!>.h"
+<actions.(actionScope).postinclude>
+/* ----------------------------------------- */
+
+<docComment>
+
+<if(literals)>
+/** String literals used by <name> that we must do things like MATCHS() with.
+ *  C will normally just lay down 8 bit characters, and you can use L"xxx" to
+ *  get wchar_t, but wchar_t is 16 bits on Windows, which is not UTF32 and so
+ *  we perform this little trick of defining the literals as arrays of UINT32
+ *  and passing in the address of these.
+ */
+<literals:{it | static ANTLR3_UCHAR  lit_<i>[]  = <it>;}; separator="\n">
+
+<endif>
+
+/* ============================================================================= */
+/* =============================================================================
+ * Start of recognizer
+ */
+<recognizer>
+>>
+headerFileExtension() ::= ".h"
+
+headerFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope,
+            actions,
+            docComment,
+            recognizer,
+            name,
+            tokens,
+            tokenNames,
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            buildAST,
+            rewriteMode,
+            profile,
+            backtracking,
+            synpreds,
+            memoize,
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+            superClass,
+            literals
+          ) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<@imports>
+<actions.(actionScope).preincludes>
+/* =============================================================================
+ * Standard antlr OBJC runtime definitions
+ */
+#import \<Cocoa/Cocoa.h>
+#import \<ANTLR/ANTLR.h>
+/* End of standard antlr3 runtime definitions
+ * =============================================================================
+ */
+<actions.(actionScope).includes>
+<@end>
+
+<if(LEXER)>
+<lexerHeaderFile(...)>
+<endif>
+<if(PARSER)>
+<parserHeaderFile(...)>
+<endif>
+<if(TREE_PARSER)>
+<treeParserHeaderFile(...)>
+<endif>
+<docComment>
+>>
+
+lexerHeaderFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope,
+            actions,
+            docComment,
+            recognizer,
+            name,
+            tokens,
+            tokenNames,
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            profile,
+            backtracking,
+            synpreds,
+            memoize,
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+            superClass="Lexer"
+            ) ::=
+<<
+
+<if(actions.(actionScope).header)>
+/* =============================================================================
+ * This is what the grammar programmer asked us to put at the top of every file.
+ */
+<actions.(actionScope).header>
+/* End of Header action.
+ * =============================================================================
+ */
+<endif>
+
+/* Start cyclicDFAInterface */
+<cyclicDFAs:cyclicDFAInterface()>
+
+#pragma mark Rule return scopes Interface start
+<rules:{rule |
+<rule.ruleDescriptor:{ruleDescriptor | <returnScopeInterface(scope=ruleDescriptor.returnScope)>}>}>
+#pragma mark Rule return scopes Interface end
+#pragma mark Tokens
+#ifndef TOKENLISTAlreadyDefined
+#define TOKENLISTAlreadyDefined 1
+#ifdef EOF
+#undef EOF
+#endif
+<tokens:{it | #define <it.name> <it.type>}; separator="\n">
+#endif
+/* interface lexer class */
+@interface <className()> <@superClassName>: <superClass><@end> { // line 283
+<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> *dfa<dfa.decisionNumber>;}; separator="\n">
+<synpreds:{pred | SEL <pred>Selector;}; separator="\n">
+/* ObjC start of actions.lexer.memVars */
+<actions.lexer.memVars>
+/* ObjC end of actions.lexer.memVars */
+}
++ (void) initialize;
++ (<className()> *)new<className()>WithCharStream:(id\<CharStream>)anInput;
+/* ObjC start actions.lexer.methodsDecl */
+<actions.lexer.methodsDecl>
+/* ObjC end actions.lexer.methodsDecl */
+<rules:{rule |
+- (<rule.ruleDescriptor:{ruleDescriptor|<returnType()>}>) <if(!rule.ruleDescriptor.isSynPred)>m<rule.ruleName><else><rule.ruleName>_fragment<endif> <if(rule.ruleDescriptor.parameterScope)><rule.ruleDescriptor.parameterScope:parameterScope()><endif>; }; separator="\n"><\n>
+@end /* end of <className()> interface */<\n>
+>>
+
+headerReturnScope(ruleDescriptor) ::= "<returnScopeInterface(...)>"
+headerReturnType(ruleDescriptor) ::= <<
+<if(LEXER)>
+<if(!r.ruleDescriptor.isSynPred)>
+ void
+<else>
+ <ruleDescriptor:returnType()>
+<endif>
+<else>
+ <ruleDescriptor:returnType()>
+<endif>
+>>
+// Produce the lexer output
+lexer(  grammar,
+        name,
+        tokens,
+        scopes,
+        rules,
+        numRules,
+        filterMode,
+        labelType="CommonToken",
+        superClass="Lexer"
+        ) ::= <<
+<cyclicDFAs:cyclicDFA()>
+
+/** As per Terence: No returns for lexer rules! */
+<!
+#pragma mark Rule return scopes start
+<rules:{rule | <rule.ruleDescriptor:{ruleDescriptor | 
+<returnScopeImplementation(scope=ruleDescriptor.returnScope)>}>
+}>
+#pragma mark Rule return scopes end
+!>
+@implementation <grammar.recognizerName> // line 330
+
++ (void) initialize
+{
+    [BaseRecognizer setGrammarFileName:@"<fileName>"];
+}
+
++ (NSString *) tokenNameForType:(NSInteger)aTokenType
+{
+    return [[self getTokenNames] objectAtIndex:aTokenType];
+}
+
++ (<grammar.recognizerName> *)new<grammar.recognizerName>WithCharStream:(id\<CharStream>)anInput
+{
+    return [[<grammar.recognizerName> alloc] initWithCharStream:anInput];
+}
+
+- (id) initWithCharStream:(id\<CharStream>)anInput
+{
+    self = [super initWithCharStream:anInput State:[RecognizerSharedState newRecognizerSharedStateWithRuleLen:<numRules>+1]];
+    if ( self != nil ) {
+<if(memoize)>
+        if ( state.ruleMemo == nil ) {
+            state.ruleMemo = [[RuleStack newRuleStackWithSize:<numRules>+1] retain];
+        }
+        if ( [state.ruleMemo count] == 0 ) {
+            // initialize the memoization cache - the indices are 1-based in the runtime code!
+            <! [state.ruleMemo addObject:[NSNull null]];     /* dummy entry to ensure 1-basedness. */ !>
+            for (NSInteger i = 0; i \< <numRules>; i++) {
+                [state.ruleMemo addObject:[HashRule newHashRuleWithLen:17]];
+            }
+        }
+<endif>
+        <synpreds:{pred | <lexerSynpred(name=pred)>};separator="\n">
+        <cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = [DFA<dfa.decisionNumber> newDFA<dfa.decisionNumber>WithRecognizer:self];}; separator="\n">
+        <actions.lexer.init>
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    <cyclicDFAs:{dfa | [dfa<dfa.decisionNumber> release];}; separator="\n">
+<actions.lexer.dealloc>
+    [super dealloc];
+}
+
+/* ObjC Start of actions.lexer.methods */
+<actions.lexer.methods>
+/* ObjC end of actions.lexer.methods */
+/* ObjC start methods() */
+<@methods()>
+/* ObjC end methods() */
+
+<if(actions.lexer.reset)>
+- (void) reset
+{
+    <actions.lexer.reset>
+    [super reset];
+}
+<endif>
+
+<if(filterMode)>
+<filteringNextToken()>
+<endif>
+/* Start of Rules */
+<rules; separator="\n">
+
+@end /* end of <grammar.recognizerName> implementation line 397 */
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+- (id\<Token>) nextToken
+{
+    while (YES) {
+        if ( [input LA:1] == CharStreamEOF ) {
+            return [<labelType> eofToken];
+        }
+        state.token = nil;
+        state.channel = TokenChannelDefault;
+        state.tokenStartCharIndex = input.index;
+        state.tokenStartCharPositionInLine = input.charPositionInLine;
+        state.tokenStartLine = input.line;
+        state.text = nil;
+        @try {
+            NSInteger m = [input mark];
+            state.backtracking = 1; /* means we won't throw slow exception */
+            state.failed = NO;
+            [self mTokens];
+            state.backtracking = 0;
+            /* mTokens backtracks with synpred at backtracking==2
+               and we set the synpredgate to allow actions at level 1. */
+            if ( state.failed ) {
+                [input rewind:m];
+                [input consume]; /* advance one char and try again */
+            } else {
+                [self emit];
+                return state.token;
+            }
+        }
+        @catch (RecognitionException *re) {
+            // shouldn't happen in backtracking mode, but...
+            [self reportError:re];
+            [self recover:re];
+        }
+    }
+}
+
+- (void)memoize:(id\<IntStream\>)anInput
+      RuleIndex:(NSInteger)ruleIndex
+     StartIndex:(NSInteger)ruleStartIndex
+{
+    if ( state.backtracking > 1 ) [super memoize:anInput RuleIndex:ruleIndex StartIndex:ruleStartIndex];
+}
+
+- (BOOL)alreadyParsedRule:(id\<IntStream\>)anInput RuleIndex:(NSInteger)ruleIndex
+{
+    if ( state.backtracking > 1 ) return [super alreadyParsedRule:anInput RuleIndex:ruleIndex];
+    return NO;
+}
+>>
+
+actionGate() ::= "state.backtracking == 0"
+
+filteringActionGate() ::= "state.backtracking == 1"
+
+parserHeaderFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope,
+            actions,
+            docComment,
+            recognizer,
+            name,
+            tokens,
+            tokenNames,
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            profile,
+            backtracking,
+            synpreds,
+            memoize,
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+            literals,
+            superClass="Parser"
+            ) ::= <<
+/* parserHeaderFile */
+<genericParserHeaderFile(inputStreamType="id\<TokenStream>",...)>
+>>
+
+treeParserHeaderFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope,
+            actions,
+            docComment,
+            recognizer,
+            name,
+            tokens,
+            tokenNames,
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            profile,
+            backtracking,
+            synpreds,
+            memoize,
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+            literals,
+            superClass="TreeParser"
+            ) ::= <<
+/* treeParserHeaderFile */
+<genericParserHeaderFile(inputStreamType="id\<TreeNodeStream>",...)>
+>>
+
+genericParserHeaderFile( LEXER,
+            PARSER,
+            TREE_PARSER,
+            actionScope,
+            actions,
+            docComment,
+            recognizer,
+            name,
+            tokens,
+            tokenNames,
+            rules,
+            cyclicDFAs,
+            bitsets,
+            buildTemplate,
+            profile,
+            backtracking,
+            synpreds,
+            memoize,
+            numRules,
+            fileName,
+            ANTLRVersion,
+            generatedTimestamp,
+            trace,
+            scopes,
+            superClass,
+            literals,
+            inputStreamType
+            ) ::=
+<<
+<if(actions.(actionScope).header)>
+/* =============================================================================
+ * This is what the grammar programmer asked us to put at the top of every file.
+ */
+<actions.(actionScope).header>
+/* End of Header action.
+ * =============================================================================
+ */
+<endif>
+
+#ifndef ANTLR3TokenTypeAlreadyDefined
+#define ANTLR3TokenTypeAlreadyDefined
+typedef enum {
+    ANTLR_EOF = -1,
+    INVALID,
+    EOR,
+    DOWN,
+    UP,
+    MIN
+} ANTLR3TokenType;
+#endif
+
+<cyclicDFAs:cyclicDFAInterface()>
+#pragma mark Tokens
+#ifndef TOKENLISTAlreadyDefined
+#define TOKENLISTAlreadyDefined 1
+#ifdef EOF
+#undef EOF
+#endif
+<tokens:{it | #define <it.name> <it.type>}; separator="\n">
+#endif
+#pragma mark Dynamic Global Scopes globalAttributeScopeInterface
+<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeInterface(scope=it)><endif>}>
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeInterface
+<rules:{rule |
+<rule.ruleDescriptor:{ ruleDescriptor | <ruleAttributeScopeInterface(scope=ruleDescriptor.ruleScope)>}>}>
+#pragma mark Rule Return Scopes returnScopeInterface
+<rules:{rule |<rule.ruleDescriptor:{ ruleDescriptor | <returnScopeInterface(scope=ruleDescriptor.returnScope)>}>}>
+
+/* Interface grammar class */
+@interface <className()> <@superClassName> : <superClass><@end> { /* line 572 */
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeDecl
+<rules:{rule | <rule.ruleDescriptor.ruleScope:ruleAttributeScopeDecl(scope=rule.ruleDescriptor.ruleScope)>}>
+#pragma mark Dynamic Global Rule Scopes globalAttributeScopeMemVar
+<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeMemVar(scope=it)><endif>}><\n>
+/* ObjC start of actions.(actionScope).memVars */
+<actions.(actionScope).memVars>
+/* ObjC end of actions.(actionScope).memVars */
+/* ObjC start of memVars */
+<@memVars()>
+/* ObjC end of memVars */
+
+<cyclicDFAs:{dfa | DFA<dfa.decisionNumber> *dfa<dfa.decisionNumber>;}; separator="\n">
+<synpreds:{pred | SEL <pred>Selector;}; separator="\n">
+ }
+
+/* ObjC start of actions.(actionScope).properties */
+<actions.(actionScope).properties>
+/* ObjC end of actions.(actionScope).properties */
+/* ObjC start of properties */
+<@properties()>
+/* ObjC end of properties */
+
++ (void) initialize;
++ (<className()> *) new<className()>:(<inputStreamType>)aStream;
+/* ObjC start of actions.(actionScope).methodsDecl */
+<actions.(actionScope).methodsDecl>
+/* ObjC end of actions.(actionScope).methodsDecl */
+
+/* ObjC start of methodsDecl */
+<@methodsDecl()>
+/* ObjC end of methodsDecl */
+
+<rules:{rule |
+- (<rule.ruleDescriptor:{ruleDescriptor|<returnType()>}>)<if(!rule.ruleDescriptor.isSynPred)><rule.ruleName><else><rule.ruleName>_fragment<endif><if(rule.ruleDescriptor.parameterScope)><rule.ruleDescriptor.parameterScope:parameterScope()><endif>; }; separator="\n"><\n>
+
+@end /* end of <className()> interface */<\n>
+>>
+
+parser( grammar,
+        name,
+        scopes,
+        tokens,
+        tokenNames,
+        rules,
+        numRules,
+        bitsets,
+        ASTLabelType="CommonTree",
+        superClass="Parser",
+        labelType="CommonToken",
+        members={<actions.parser.members>}
+        ) ::= <<
+<genericParser(inputStreamType="id\<TokenStream>", rewriteElementType="Token", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser( grammar,
+        name,
+        scopes,
+        tokens,
+        tokenNames,
+        globalAction,
+        rules,
+        numRules,
+        bitsets,
+        filterMode,
+        labelType={<ASTLabelType>},
+        ASTLabelType="CommonTree",
+        superClass={<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif>},
+        members={<actions.treeparser.members>}
+        ) ::= <<
+<genericParser(inputStreamType="id\<TreeNodeStream>", rewriteElementType="Node", ...)>
+>>
+
+/** How to generate a parser */
+genericParser(  grammar,
+        name,
+        scopes,
+        tokens,
+        tokenNames,
+        rules,
+        numRules,
+        cyclicDFAs,          // parser init -- initializes the DFAs
+        bitsets,
+        labelType,
+        ASTLabelType,
+        superClass,
+        members,
+        filterMode,
+        rewriteElementType,
+        inputStreamType
+        ) ::= <<
+<cyclicDFAs:cyclicDFA()>
+
+#pragma mark Bitsets
+<bitsets:{it | <bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>}>
+
+#pragma mark Dynamic Global globalAttributeScopeImplementation
+<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeImplementation(scope=it)><endif>}>
+
+#pragma mark Dynamic Rule Scopes ruleAttributeScopeImplementation
+<rules:{rule |
+<rule.ruleDescriptor:{ ruleDescriptor | <ruleAttributeScopeImplementation(scope=ruleDescriptor.ruleScope)>}>}>
+
+#pragma mark Rule Return Scopes returnScopeImplementation
+<rules:{rule | <rule.ruleDescriptor:{ ruleDescriptor | <returnScopeImplementation(scope=ruleDescriptor.returnScope)>}>}>
+
+@implementation <grammar.recognizerName>  // line 637
+
+/* ObjC start of ruleAttributeScope */
+#pragma mark Dynamic Rule Scopes ruleAttributeScope
+<rules:{rule | <rule.ruleDescriptor.ruleScope:ruleAttributeScope()>}>
+/* ObjC end of ruleAttributeScope */
+#pragma mark global Attribute Scopes globalAttributeScope
+/* ObjC start globalAttributeScope */
+<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScope()><endif>}>
+/* ObjC end globalAttributeScope */
+/* ObjC start actions.(actionScope).synthesize */
+<actions.(actionScope).synthesize>
+/* ObjC start synthesize() */
+<@synthesize()>
+
++ (void) initialize
+{
+    #pragma mark Bitsets
+    <bitsets:{it | <bitsetInit(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>}, words64=it.bits)>}>
+    [BaseRecognizer setTokenNames:[[AMutableArray arrayWithObjects:@"\<invalid>", @"\<EOR>", @"\<DOWN>", @"\<UP>", <tokenNames:{it | @<it>}; separator=", ", wrap="\n ">, nil] retain]];
+    [BaseRecognizer setGrammarFileName:@"<fileName>"];
+    <synpreds:{pred | <synpred(pred)>}>
+}
+
++ (<grammar.recognizerName> *)new<grammar.recognizerName>:(<inputStreamType>)aStream
+{
+<if(PARSER)>
+    return [[<grammar.recognizerName> alloc] initWithTokenStream:aStream];
+<else><! TREE_PARSER !>
+    return [[<grammar.recognizerName> alloc] initWithStream:aStream];
+<endif>
+}
+
+<if(PARSER)>
+- (id) initWithTokenStream:(<inputStreamType>)aStream
+{
+    self = [super initWithTokenStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:<numRules>+1] retain]];
+    if ( self != nil ) {
+<else><! TREE_PARSER !>
+- (id) initWithStream:(<inputStreamType>)aStream
+{
+    self = [super initWithStream:aStream State:[[RecognizerSharedState newRecognizerSharedStateWithRuleLen:<numRules>+1] retain]];
+    if ( self != nil ) {
+<endif>
+        <! <parserCtorBody()> !>
+        <cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = [DFA<dfa.decisionNumber> newDFA<dfa.decisionNumber>WithRecognizer:self];}; separator="\n">
+        <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeInit(scope=it)><endif>}>
+        <rules:{rule | <rule.ruleDescriptor.ruleScope:ruleAttributeScopeInit()>}>
+        /* start of actions-actionScope-init */
+        <actions.(actionScope).init>
+        /* start of init */
+        <@init()>
+    }
+    return self;
+}
+
+- (void) dealloc
+{
+    <cyclicDFAs:{dfa | [dfa<dfa.decisionNumber> release];}; separator="\n">
+    <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeDealloc(scope=it)><endif>}>
+    <actions.(actionScope).dealloc>
+    <@dealloc()>
+    [super dealloc];
+}
+
+/* ObjC start actions.(actionScope).methods */
+<actions.(actionScope).methods>
+/* ObjC end actions.(actionScope).methods */
+/* ObjC start methods() */
+<@methods()>
+/* ObjC end methods() */
+/* ObjC start rules */
+<rules; separator="\n">
+/* ObjC end rules */
+
+@end /* end of <grammar.recognizerName> implementation line 692 */<\n>
+>>
+
+parserCtorBody() ::= <<
+<if(memoize)> /* parserCtorBody */
+<if(grammar.grammarIsRoot)>
+state.ruleMemo = [[RuleStack newRuleStack:<numRules>+1] retain];<\n> <! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>;}; separator="\n">
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start <ruleName>_fragment
+- (void) <ruleName>_fragment
+{
+    <ruleLabelDefs()>
+    <if(trace)>
+        [self traceIn:\@"<ruleName>_fragment" Index:<ruleDescriptor.index>];
+    @try {
+        <block>
+    }
+    @finally {
+        [self traceOut:\@"<ruleName>_fragment" Index:<ruleDescriptor.index>];
+    }
+<else>
+    <block>
+<endif>
+} // $ANTLR end <ruleName>_fragment
+>>
+
+synpred(name) ::= <<
+SEL <name>Selector = @selector(<name>_fragment);
+<! // $ANTLR start <name>
+- (BOOL) <name>
+{
+    state.backtracking++;
+    <@start()>
+    NSInteger start = [input mark];
+    @try {
+        [self <name>_fragment]; // can never throw exception
+    }
+    @catch (RecognitionException *re) {
+        NSLog(@"impossible: %@\n", re.name);
+    }
+    BOOL success = (state.failed == NO);
+    [input rewind:start];
+    <@stop()>
+    state.backtracking--;
+    state.failed=NO;
+    return success;
+} // $ANTLR end <name> <\n> !>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( state.backtracking > 0 && [self alreadyParsedRule:input RuleIndex:<ruleDescriptor.index>] ) { return <ruleReturnValue()>; }
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if (backtracking)>if ( state.failed ) return <ruleReturnValue()>;<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if ( state.backtracking > 0 ) { state.failed = YES; return <ruleReturnValue()>; }<\n><endif>
+>>
+
+/** How to generate code for a rule.
+ *  The return type aggregates are declared in the header file (headerFile template)
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+
+/*
+ * $ANTLR start <ruleName>
+ * <fileName>:<description>
+ */
+- (<returnType()>) <ruleName><ruleDescriptor.parameterScope:parameterScope()>
+{
+    <if(trace)>[self traceIn:\@"<ruleName>" Index:<ruleDescriptor.index>];<endif>
+    <if(trace)>NSLog(@"enter <ruleName> %@ failed=%@ backtracking=%d", [input LT:1], (state.failed==YES)?@"YES":@"NO", state.backtracking);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    @try {
+        <ruleMemoization(name=ruleName)>
+        <ruleLabelDefs()>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+    }
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else><if(!emptyRule)><if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    @catch (RecognitionException *re) {
+        [self reportError:re];
+        [self recover:input Exception:re];
+        <@setErrorReturnValue()>
+    }<\n>
+<endif><endif><endif>
+    @finally {
+        <if(trace)>[self traceOut:@"<ruleName>" Index:<ruleDescriptor.index>];<endif>
+        <memoize()>
+        <ruleScopeCleanUp()>
+        <finally>
+    }
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+/* $ANTLR end <ruleName> */
+>>
+
+finalCode(finalBlock) ::= <<
+{
+    <finalBlock>
+}
+>>
+
+catch(decl,action) ::= <<
+@catch (<e.decl>) {
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+/* ruleDeclarations */
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<returnType()> retval = [<ruleDescriptor:returnStructName()> new<ruleDescriptor:returnStructName()>];
+[retval setStart:[input LT:1]];<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.type> <a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+NSInteger <ruleDescriptor.name>_StartIndex = input.index;
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+/* ruleScopeSetUp */
+<ruleDescriptor.useScopes:{it | [<it>_stack push:[<it>_Scope new<it>_Scope]];}>
+<ruleDescriptor.ruleScope:{it | [<it.name>_stack push:[<it.name>_Scope new<it.name>_Scope]];}>
+>>
+
+ruleScopeCleanUp() ::= <<
+/* ruleScopeCleanUp */
+<ruleDescriptor.useScopes:{it | [<it>_stack pop];}; separator="\n">
+<ruleDescriptor.ruleScope:{it | [<it.name>_stack pop];}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <%
+/* ruleLabelDefs entry */<"\n">
+<[ruleDescriptor.tokenLabels, ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it |<labelType> *<it.label.text> = nil;}; separator="\n"><"\n">
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it | AMutableArray *list_<it.label.text> = nil;}; separator="\n"><"\n">
+<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n"><"\n">
+<ruleDescriptor.ruleListLabels:{ll|ParserRuleReturnScope *<ll.label.text> = nil;}; separator="\n"><"\n">
+%>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{it |<labelType> *<it.label.text>=nil;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{it |NSInteger <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels]:{it |AMutableArray *list_<it.label.text>=nil; }; separator="\n">
+>>
+
+ruleReturnValue() ::= <%
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+/* token+rule list labels */
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{it |[list_<it.label.text> release];}; separator="\n">
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+[retval setStop:[input LT:-1]];<\n>
+<endif><endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if (state.backtracking > 0) [self memoize:input RuleIndex:<ruleDescriptor.index> StartIndex:<ruleDescriptor.name>_StartIndex];
+<endif><endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName, nakedBlock, ruleDescriptor, block, memoize) ::= <<
+// $ANTLR start "<ruleName>"
+- (void) m<ruleName><if(ruleDescriptor.parameterScope)><ruleDescriptor.parameterScope:parameterScope(scope=it)><endif>
+{
+    //<if(trace)>[self traceIn:\@"<ruleName>" Index:<ruleDescriptor.index>];<endif>
+    <if(trace)>NSLog(@"enter <ruleName> %C line=%d:%d failed=%@ backtracking=%d",
+        [input LA:1],
+        self.line,
+        self.charPositionInLine,
+        (state.failed==YES) ? @"YES" : @"NO",
+        state.backtracking);
+    <endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    @try {
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        NSInteger _type = <ruleName>;
+        NSInteger _channel = TokenChannelDefault;
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        state.type = _type;
+        state.channel = _channel;
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    }
+    @finally {
+        //<if(trace)>[self traceOut:[NSString stringWithFormat:@"<ruleName> %d\n", <ruleDescriptor.index>]];<endif>
+        <if(trace)>NSLog(@"exit <ruleName> %C line=%d:%d failed=%@ backtracking=%d",
+                    [input LA:1], self.line, self.charPositionInLine,
+                    (state.failed==YES) ? @"YES" : @"NO", state.backtracking);<endif>
+        <ruleScopeCleanUp()>
+        <memoize()>
+    }
+    return;
+}
+/* $ANTLR end "<ruleName>" */
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+- (void) mTokens
+{
+    <block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description> // block
+NSInteger alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch (alt<decisionNumber>) {
+    <alts:{a | <altSwitchCase(i, a)>}>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description> //ruleblock
+NSInteger alt<decisionNumber>=<maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch (alt<decisionNumber>) {
+    <alts:{a | <altSwitchCase(i, a)>}>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description> // ruleBlockSingleAlt
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description> // blockSingleAlt
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description> // positiveClosureBlock
+NSInteger cnt<decisionNumber> = 0;
+<decls>
+<@preloop()>
+do {
+    NSInteger alt<decisionNumber> = <maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+        <alts:{a | <altSwitchCase(i, a)>}>
+        default :
+            if ( cnt<decisionNumber> >= 1 )
+                goto loop<decisionNumber>;
+            <ruleBacktrackFailure()>
+            EarlyExitException *eee =
+                [EarlyExitException newException:input decisionNumber:<decisionNumber>];
+            <@earlyExitException()>
+            @throw eee;
+    }
+    cnt<decisionNumber>++;
+} while (YES);
+loop<decisionNumber>: ;
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 0 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+<decls>
+<@preloop()>
+do {
+    NSInteger alt<decisionNumber>=<maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch (alt<decisionNumber>) {
+        <alts:{a | <altSwitchCase(i, a)>}>
+        default :
+            goto loop<decisionNumber>;
+    }
+} while (YES);
+loop<decisionNumber>: ;
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase(altNum, alt) ::= <<
+case <altNum> : ;
+    <@prealt()>
+    <alt>
+    break;<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description> // alt
+{
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+}
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element(e) ::= << <@prematch()><\n><e.el><\n> >>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)><label>=(<labelType> *)<endif>[self match:input TokenType:<token> Follow:FOLLOW_<token>_in_<ruleName><elementIndex>]; <checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label> == nil) list_<label> = [[AMutableArray arrayWithCapacity:5] retain];
+[list_<label> addObject:<elem>];<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>NSInteger <label> = [input LA:1];<\n><endif>
+[self matchChar:<char>]; <checkRuleBacktrackFailure()><\n>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)><label> = [input LA:1];<\n><endif>
+[self matchRangeFromChar:<a> to:<b>]; <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label> = [input LA:1];<\n>
+<else>
+<label> = (<labelType> *)[input LT:1]; /* matchSet */<\n>
+<endif><endif>
+if (<s>) {
+    [input consume];
+    <postmatchCode>
+<if(!LEXER)>
+    [state setIsErrorRecovery:NO];
+<endif>
+    <if(backtracking)>state.failed = NO;<\n><endif>
+} else {
+    <ruleBacktrackFailure()>
+    MismatchedSetException *mse = [MismatchedSetException newException:nil stream:input];
+    <@mismatchedSetException()>
+<if(LEXER)>
+<if(label)>
+    mse.c = <label>;
+<endif>
+    [self recover:mse];
+    @throw mse;
+<else>
+    @throw mse;
+    <! use following code to make it recover inline; remove throw mse;
+    [self recoverFromMismatchedSet:input exception:mse follow:FOLLOW_set_in_<ruleName><elementIndex>]; !>
+<endif>
+}<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex="0") ::= <<
+<if(label)>
+NSInteger <label>Start = input.index;
+[self matchString:<string>]; <checkRuleBacktrackFailure()>
+NSInteger StartLine<elementIndex> = self.line;
+NSInteger <label>StartCharPos<elementIndex> = self.charPositionInLine;
+<label> = [[<labelType> newToken:input Type:TokenTypeInvalid Channel:TokenChannelDefault Start:<label>Start Stop:input.index] retain];
+[self setLine:<label>StartLine<elementIndex>];
+[self setCharPositionInLine:<label>StartCharPos<elementIndex>];
+<else>
+[self matchString:<string>]; <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)>
+<label> = (<labelType> *)[input LT:1];<\n>
+<endif>
+[self matchAny:input]; <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+NSInteger <label> = [input LA:1];<\n>
+<endif>
+[self matchAny]; <checkRuleBacktrackFailure()><\n>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+/* ruleRef */
+[self pushFollow:FOLLOW_<rule.name>_in_<ruleName><elementIndex>];
+<if(label)><label> = <endif>[self <if(scope)><scope:delegateName()>.<endif><rule.name><if(args)>:<first(args)> <rest(args):{ a | arg<i>:<rest(args)>}; separator=" "><endif>];<\n>
+[self popFollow];
+<checkRuleBacktrackFailure()><\n>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+NSInteger <label>Start<elementIndex> = input.index;
+[self m<rule.name><if(args)>:<args; separator=" :"><endif>]; <checkRuleBacktrackFailure()><\n>
+<label> = [[<labelType> newToken:input Type:TokenTypeInvalid Channel:TokenChannelDefault Start:<label>Start<elementIndex> Stop:input.index-1] retain];
+<label>.line = self.line;
+<else>
+[self <if(scope)><scope:delegateName()>.<endif>m<rule.name><if(args)>:<args; separator=" :"><endif>]; <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+NSInteger <label>Start<elementIndex> = input.index;
+[self matchChar:CharStreamEOF]; <checkRuleBacktrackFailure()><\n>
+<labelType> <label> = [[<labelType> newToken:input Type:TokenTypeEOF Channel:TokenChannelDefault Start:<label>Start<elementIndex> Stop:input.index-1] retain];
+<label>.line = self.line;
+<else>
+[self matchChar:CharStreamEOF]; <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "int <recRuleArg()>"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
+recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList, enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( [input LA:1] == DOWN ) {
+    [self match:input TokenType:DOWN Follow:nil]; <checkRuleBacktrackFailure()>
+    <children:element()>
+    [self match:input TokenType:UP Follow:nil]; <checkRuleBacktrackFailure()>
+}
+<else>
+    [self match:input TokenType:DOWN Follow:nil]; <checkRuleBacktrackFailure()>
+    <children:element()>
+    [self match:input TokenType:UP Follow:nil]; <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) {
+    <ruleBacktrackFailure()>
+    @throw [FailedPredicateException newException:@"<ruleName>" predicate:@"<description>" stream:input];
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+NSInteger LA<decisionNumber>_<stateNumber> = [input LA:<k>];<\n>
+<edges; separator="\nelse ">
+else {
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    NoViableAltException *nvae = [NoViableAltException newException:<decisionNumber> state:<stateNumber> stream:input];
+    nvae.c = LA<decisionNumber>_<stateNumber>;
+    <@noViableAltException()>
+    @throw nvae;<\n>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+NSInteger LA<decisionNumber>_<stateNumber> = [input LA:<k>];<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+NSInteger LA<decisionNumber>_<stateNumber> = [input LA:<k>];
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber>=<eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else {
+    alt<decisionNumber> = <eotPredictsAlt>;
+}<\n>
+<endif><endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+unichar charLA<decisionNumber> = [input LA:<k>];
+switch (charLA<decisionNumber>) {
+    <edges; separator="\n"><\n>
+default: ;
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    NoViableAltException *nvae = [NoViableAltException newException:<decisionNumber> state:<stateNumber> stream:input];
+    nvae.c = charLA<decisionNumber>;
+    <@noViableAltException()>
+    @throw nvae;<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ([input LA:<k>]) { // dfaOptionalBlockStateSwitch
+    <edges; separator="\n"><\n>
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ([input LA:<k>]) { // dfaLoopbackStateSwitch
+    <edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+default:
+    alt<decisionNumber> = <eotPredictsAlt>;
+    break;<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+<labels:{it | case <it>: ;}; separator="\n">
+    {
+    <targetState>
+    }
+    break;
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = [dfa<decisionNumber> predict:input];
+>>
+
+/** Used in headerFile */
+cyclicDFAInterface(dfa) ::= <<
+#pragma mark Cyclic DFA interface start DFA<dfa.decisionNumber>
+@interface DFA<dfa.decisionNumber> : DFA {
+}
++ (DFA<dfa.decisionNumber> *) newDFA<dfa.decisionNumber>WithRecognizer:(BaseRecognizer *)theRecognizer;
+- initWithRecognizer:(BaseRecognizer *)recognizer;
+@end /* end of DFA<dfa.decisionNumber> interface  */<\n>
+#pragma mark Cyclic DFA interface end DFA<dfa.decisionNumber><\n>
+>>
+
+/** Used in lexer/parser implementation files */
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+#pragma mark Cyclic DFA implementation start DFA<dfa.decisionNumber>
+
+@implementation DFA<dfa.decisionNumber>
+const static NSInteger dfa<dfa.decisionNumber>_eot[<dfa.numberOfStates>] =
+    {<dfa.eot; wrap="\n     ", separator=",", null="-1">};
+const static NSInteger dfa<dfa.decisionNumber>_eof[<dfa.numberOfStates>] =
+    {<dfa.eof; wrap="\n     ", separator=",", null="-1">};
+const static unichar dfa<dfa.decisionNumber>_min[<dfa.numberOfStates>] =
+    {<dfa.min; wrap="\n     ", separator=",", null="-1">};
+const static unichar dfa<dfa.decisionNumber>_max[<dfa.numberOfStates>] =
+    {<dfa.max; wrap="\n     ", separator=",", null="-1">};
+const static NSInteger dfa<dfa.decisionNumber>_accept[<dfa.numberOfStates>] =
+    {<dfa.accept; wrap="\n     ", separator=",", null="-1">};
+const static NSInteger dfa<dfa.decisionNumber>_special[<dfa.numberOfStates>] =
+    {<dfa.special; wrap="\n     ", separator=",", null="-1">};
+
+/** Used when there is no transition table entry for a particular state */
+#define dfa<dfa.decisionNumber>_T_empty	    nil
+
+<dfa.edgeTransitionClassMap.keys:{ table |
+const static NSInteger dfa<dfa.decisionNumber>_T<i0>[] =
+{
+     <table; separator=", ", wrap="\n ", null="-1">
+\};
+}; null="">
+
+const static NSInteger *dfa<dfa.decisionNumber>_transition[] =
+{
+    <dfa.transitionEdgeTables:{whichTable|dfa<dfa.decisionNumber>_T<whichTable>}; separator=", ", wrap="\n", null="nil">
+};
+
++ (DFA<dfa.decisionNumber> *) newDFA<dfa.decisionNumber>WithRecognizer:(BaseRecognizer *)aRecognizer
+{
+    return [[[DFA<dfa.decisionNumber> alloc] initWithRecognizer:aRecognizer] retain];
+}
+
+- (id) initWithRecognizer:(BaseRecognizer *) theRecognizer
+{
+    self = [super initWithRecognizer:theRecognizer];
+    if ( self != nil ) {
+        decisionNumber = <dfa.decisionNumber>;
+        eot = dfa<dfa.decisionNumber>_eot;
+        eof = dfa<dfa.decisionNumber>_eof;
+        min = dfa<dfa.decisionNumber>_min;
+        max = dfa<dfa.decisionNumber>_max;
+        accept = dfa<dfa.decisionNumber>_accept;
+        special = dfa<dfa.decisionNumber>_special;
+        transition = dfa<dfa.decisionNumber>_transition;
+    }
+    return self;
+}
+
+<if(dfa.specialStateSTs)>
+/* start dfa.specialStateSTs */
+- (NSInteger) specialStateTransition:(NSInteger)s Stream:(id\<IntStream\>)anInput
+{
+<if(LEXER)>
+    id\<IntStream\> input = anInput;<\n>
+<endif>
+<if(PARSER)>
+    id\<TokenStream\> input = (id\<TokenStream\>)anInput;<\n>
+<endif>
+<if(TREE_PARSER)>
+    id\<TreeNodeStream\> input = (id\<TreeNodeStream\>)anInput;<\n>
+<endif>
+    switch (s) {
+        <dfa.specialStateSTs:{state |
+        case <i0> : ;<! compressed special state numbers 0..n-1 !>
+            <state>}; separator="\n">
+    }
+<if(backtracking)>
+    if ( [recognizer getBacktrackingLevel] > 0 ) { [recognizer setFailed:YES]; return -1; }<\n>
+<endif>
+    NoViableAltException *nvae = [NoViableAltException newException:<dfa.decisionNumber> state:s stream:recognizer.input];
+    // nvae.c = s;
+    /* [self error:nvae]; */ <! for debugger - do later !>
+    @throw nvae;
+}<\n>
+/* end dfa.specialStateSTs */
+<endif>
+
+- (void) dealloc
+{
+    //free(transition);
+    [super dealloc];
+}
+
+- (NSString *) description
+{
+    return @"<dfa.description>";
+}
+
+<@errorMethod()>
+
+@end /* end DFA<dfa.decisionNumber> implementation */<\n>
+#pragma mark Cyclic DFA implementation end DFA<dfa.decisionNumber>
+<\n>
+>>
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber, stateNumber, edges, needErrorClause, semPredState) ::= <<
+/* cyclicDFAState */
+NSInteger LA<decisionNumber>_<stateNumber> = [input LA:1];<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+NSInteger index<decisionNumber>_<stateNumber> = input.index;
+[input rewind];<\n>
+<endif>
+s = -1;
+<edges; separator="\nelse ">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+[input seek:index<decisionNumber>_<stateNumber>];<\n>
+<endif>
+if ( s >= 0 )
+    return s;
+ break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+/* cyclicDFAEdge */
+if (<labelExpr><if(predicates)> && (<predicates>)<endif>) { s = <targetStateNumber>;}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n> /* eotDFAEdge */
+>>
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left>&&<right>)"
+
+orPredicates(operands) ::= "(<operands; separator=\"||\">)"
+
+notPredicate(pred) ::= "!(<evalPredicate(pred, {})>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+/*
+ * evalSynPredicate(pred,description) ::= "<pred>()"
+ *
+ * synpreds are broken in cyclic DFA special states
+ *  Damn! For now, work around with using the selectors directly, and by providing a trampoline evalSynPred method in
+ *  DFA
+ */
+/* evalSynPredicate(pred,description) ::= "[self evaluateSyntacticPredicate:<pred>Selector stream:input]" */
+evalSynPredicate(pred,description) ::= "[self evaluateSyntacticPredicate:@selector(<pred>_fragment)]"
+/* evalSynPredicate(pred,description) ::= "[recognizer <pred>]" */
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "[input LA:<k>] == <atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
+(LA<decisionNumber>_<stateNumber> >= <lower> && LA<decisionNumber>_<stateNumber> \<= <upper>)
+%>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(([input LA:<k>] >= <lower>) && ([input LA:<k>] \<= <upper>))"
+
+setTest(ranges) ::= <%
+<ranges; separator="||">
+%>
+
+// A T T R I B U T E S
+
+memVars(scope) ::= <% <scope.attributes:{a|<a.type> <a.name>;<\n>}; separator="\n"> %>
+
+properties(scope) ::= <%
+<scope.attributes:{a|@property (assign, getter=get<a.name>, setter=set<a.name>:) <a.type> <a.name>;<\n>}; separator="\n">
+%>
+
+methodsDecl(scope) ::= <%
+<scope.attributes:{a|- (<a.type>)get<a.name>;<\n>- (void)set<a.name>:(<a.type>)aVal;<\n>}; separator="\n">
+%>
+
+synthesize(scope) ::= <% <scope.attributes:{a|@synthesize <a.name>;}; separator="\n"> %>
+
+methods(scope) ::= <%
+<scope.attributes:{a|
+- (<a.type>)get<a.name> { return( <a.name> ); \}<\n>
+- (void)set<a.name>:(<a.type>)aVal { <a.name> = aVal; \}<\n>}; separator="\n">
+%>
+
+globalAttributeScopeInterface(scope) ::= <%
+/* globalAttributeScopeInterface */<\n>
+@interface <scope.name>_Scope : SymbolsScope {<\n>
+<if(scope.attributes)>
+<memVars(scope)>
+<endif>
+}<\n>
+<if(scope.attributes)>
+/* start of globalAttributeScopeInterface properties */<\n>
+<properties(scope)>
+/* end globalAttributeScopeInterface properties */<\n>
+<endif>
+
++ (<scope.name>_Scope *)new<scope.name>_Scope;<\n>
+- (id) init;<\n>
+<if(scope.attributes)>
+/* start of globalAttributeScopeInterface methodsDecl */<\n>
+<methodsDecl(scope)>
+/* End of globalAttributeScopeInterface methodsDecl */<\n>
+<endif>
+@end /* end of <scope.name>_Scope interface */<\n>
+%>
+
+globalAttributeScopeMemVar(scope) ::= <%
+/* globalAttributeScopeMemVar */<\n>
+SymbolStack *<scope.name>_stack;<\n>
+<scope.name>_Scope *<scope.name>_scope;<\n>
+%>
+
+globalAttributeScopeImplementation(scope) ::= <%
+@implementation <scope.name>_Scope  /* globalAttributeScopeImplementation */<\n>
+<if(scope.attributes)>
+/* start of synthesize -- OBJC-Line 1750 */<\n>
+<synthesize(scope)><\n>
+<endif>
+<\n>
++ (<scope.name>_Scope *)new<scope.name>_Scope<\n>
+{<\n>
+    return [[<scope.name>_Scope alloc] init];<\n>
+}<\n>
+<\n>
+- (id) init<\n>
+{<\n>
+    self = [super init];<\n>
+    return self;<\n>
+}<\n>
+<\n>
+<if(scope.attributes)>
+/* start of iterate get and set functions */<\n>
+<methods(scope)><\n>
+/* End of iterate get and set functions */<\n>
+<endif>
+@end /* end of <scope.name>_Scope implementation */<\n><\n>
+%>
+
+globalAttributeScopeInit(scope) ::= <<
+/* globalAttributeScopeInit */<\n>
+<scope.name>_scope = [<scope.name>_Scope new<scope.name>_Scope];<\n>
+<scope.name>_stack = [SymbolStack newSymbolStackWithLen:30];<\n>
+>>
+
+globalAttributeScopeDealloc(scope) ::= <% [<scope.name>_stack release];<\n> %>
+
+globalAttributeScope(scope) ::= <%
+<if(scope.name)>
+static <scope.name>_stack;<\n>
+<endif>
+%>
+
+ruleAttributeScopeMemVar(scope) ::= <%
+/* ObjC ruleAttributeScopeMemVar */<\n>
+<if(scope.attributes)>
+<scope.name>_Scope *<scope.name>_scope; /* ObjC ruleAttributeScopeMemVar */<\n>
+<endif>
+%>
+
+ruleAttributeScopeInterface(scope) ::= <%
+<if(scope.attributes)>
+/* start of ruleAttributeScopeInterface */<\n>
+@interface <scope.name>_Scope : SymbolsScope {<\n>
+    <memVars(scope)><\n>
+}<\n>
+<\n>
+/* start property declarations */<\n>
+<properties(scope)><\n>
+/* start method declarations */<\n>
++ (<scope.name>_Scope *)new<scope.name>_Scope;<\n>
+- (id) init;<\n>
+<methodsDecl(scope)><\n>
+@end /* end of ruleAttributeScopeInterface */<\n><\n>
+<endif>
+%>
+
+ruleAttributeScopeImplementation(scope) ::= <%
+<if(scope.attributes)>
+@implementation <scope.name>_Scope  /* start of ruleAttributeScopeImplementation */<\n>
+<synthesize(scope)><\n>
+<\n>
++ (<scope.name>_Scope *)new<scope.name>_Scope<\n>
+{<\n>
+    return [[<scope.name>_Scope alloc] init];<\n>
+}<\n>
+<\n>
+- (id) init<\n>
+{<\n>
+    self = [super init];<\n>
+    return self;<\n>
+}<\n>
+<\n>
+/* start of <scope.name>_Scope get and set functions */<\n>
+<methods(scope)><\n>
+/* End of <scope.name>_Scope get and set functions */<\n>
+@end /* end of ruleAttributeScopeImplementation */<\n><\n>
+<endif>
+%>
+
+ruleAttributeScopeInit(scope) ::= <%
+/* ruleAttributeScopeInit */<\n>
+<scope.name>_scope = [<scope.name>_Scope new<scope.name>_Scope];<\n>
+<scope.name>_stack = [SymbolStack newSymbolStackWithLen:30];<\n>
+%>
+
+ruleAttributeScopeDealloc(scope) ::= <% [<scope.name>_Scope release];<\n> %>
+
+ruleAttributeScope(scope) ::= <%
+<if(scope.attributes)>
+/* ruleAttributeScope */<\n>
+static SymbolStack *<scope.name>_stack;<\n>
+static <scope.name>_Scope *<scope.name>_scope;
+<endif>
+%>
+
+ruleAttributeScopeDecl(scope) ::= <%
+/* ruleAttributeScopeDecl */<\n>
+<if(scope.attributes)>
+<scope.name>_Scope *<scope.name>_scope;<\n>
+<endif>
+%>
+
+returnStructName(r) ::= "<className()>_<r.name>_return"
+
+returnType() ::= <%
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor:returnStructName()> *
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+<else>
+void
+<endif>
+%>
+
+/** Generate the Objective-C type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+<className()>_<referencedRule.name>_return *<else>
+<if(referencedRule.hasSingleReturnValue)><referencedRule.singleValueReturnType><else>
+void<endif>
+<endif>
+%>
+
+delegateName(d) ::= <% <if(d.label)><d.label><else>g<d.name><endif> %>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <% <objcTypeInitMap.(typeName)> %>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <%
+<ruleLabelType(referencedRule=label.referencedRule)> <label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+%>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScopeInterface(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+/* returnScopeInterface <ruleDescriptor:returnStructName()> */
+@interface <ruleDescriptor:returnStructName()> : <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope { /* returnScopeInterface line 1838 */
+<@memVars()> /* ObjC start of memVars() */<\n>
+<if(scope.attributes)>
+<memVars(scope)><\n>
+<endif>
+}
+/* start property declarations */
+<@properties()><\n>
+<if(scope.attributes)>
+<properties(scope)><\n>
+<endif>
+/* start of method declarations */<\n>
++ (<ruleDescriptor:returnStructName()> *)new<ruleDescriptor:returnStructName()>;
+/* this is start of set and get methods */
+<@methodsDecl()>  /* methodsDecl */<\n>
+<if(scope.attributes)>
+/* start of iterated get and set functions */<\n>
+<methodsDecl(scope)><\n>
+<endif>
+@end /* end of returnScopeInterface interface */<\n>
+<endif>
+>>
+
+returnScopeImplementation(scope) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+@implementation <ruleDescriptor:returnStructName()> /* returnScopeImplementation */<\n>
+<@synthesize()> /* start of synthesize -- OBJC-Line 1837 */<\n>
+<if(scope.attributes)>
+    <synthesize(scope)><\n>
+<endif>
++ (<ruleDescriptor:returnStructName()> *)new<ruleDescriptor:returnStructName()><\n>
+{<\n>
+    return [[[<ruleDescriptor:returnStructName()> alloc] init] retain];<\n>
+}<\n>
+<\n>
+- (id) init<\n>
+{<\n>
+    self = [super init];<\n>
+    return self;<\n>
+}<\n>
+<\n>
+<@methods()><\n>
+<if(scope.attributes)>
+/* start of iterate get and set functions */<\n>
+<methods(scope)><\n>
+/* End of iterate get and set functions */<\n>
+<endif>
+<actions.(actionScope).ruleReturnMethods>
+<@ruleReturnMembers()><\n>
+@end /* end of returnScope implementation */<\n><\n>
+<endif>
+%>
+
+parameterScope(scope) ::= <<
+<! <scope.attributes:{it | :(<it.type>)<it.name>}; separator=" "> !>
+<first(scope.attributes):{ a | :(<a.type>)<a.name>}> <rest(scope.attributes):{ a | arg<i>:(<a.type>)<a.name> }; separator=" ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>;"
+
+/** Note that the scopeAttributeRef does not have access to the
+ * grammar name directly
+ */
+scopeAttributeRef(scope,attr,index,negIndex) ::= <%
+<if(negIndex)>
+([((<scope>_Scope *)[<scope>_stack objectAtIndex:[<scope>_stack size]-<negIndex>-1)]).<attr.name>
+<else>
+<if(index)>
+((<scope>_Scope *)[<scope>_stack objectAtIndex:<index>]).<attr.name>
+<else>
+((<scope>_Scope *)[<scope>_stack peek]).<attr.name>
+<endif>
+<endif>
+%>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
+/* scopeSetAttributeRef */
+<if(negIndex)>
+((<scope>_Scope *)[<scope>_stack objectAtIndex:([<scope>_stack size]-<negIndex>-1)]).<attr.name> = <expr>;
+<else>
+<if(index)>
+((<scope>_Scope *)[<scope>_stack objectAtIndex:<index>]).<attr.name> = <expr>;
+<else>
+((<scope>_Scope *)[<scope>_stack peek]).<attr.name> = <expr>;
+<endif>
+<endif>
+%>
+
+scopeAttributeRefStack() ::= <<
+/* scopeAttributeRefStack */
+<if(negIndex)>
+((<scope>_Scope *)[<scope>_stack objectAtIndex:[<scope>_stack count]-<negIndex>-1]).<attr.name> = <expr>;
+<else>
+<if(index)>
+((<scope>_Scope *)[<scope>_stack objectAtIndex:<index>]).<attr.name> = <expr>;
+<else>
+((<scope>_Scope *)[<scope>_stack peek]).<attr.name> = <expr>;
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+(<scope>!=nil?<scope>.<attr.name>:<initValue(attr.type)>)
+<else>
+<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>  /* added to returnAttributeRef */<\n>
+<else>
+<attr.name><\n>
+<endif>
+%>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+ retval.<attr.name> =<expr>; /* added to returnSetAttributeRef */<\n>
+<else>
+<attr.name> = <expr>;<\n>
+<endif>
+%>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+/* not sure the next are the right approach; and they are evaluated early; */
+/* they cannot see TREE_PARSER or PARSER attributes for example. :( */
+
+tokenLabelPropertyRef_text(scope,attr) ::= "(<scope>!=nil?<scope>.text:nil)"
+tokenLabelPropertyRef_type(scope,attr) ::= "(<scope>!=nil?<scope>.type:0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(<scope>!=nil?<scope>.line:0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=nil?<scope>.charPositionInLine:0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=nil?<scope>.channel:0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(<scope>!=nil?[<scope> getTokenIndex]:0)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(<scope>!=nil?[<scope>.text integerValue]:0)"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(<scope>!=nil?((<labelType> *)<scope>.start):nil)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(<scope>!=nil?((<labelType> *)<scope>.stopToken):nil)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(<scope>!=nil?((<ASTLabelType> *)<scope>.tree):nil)"
+ruleLabelPropertyRef_text(scope,attr) ::= <%
+<if(TREE_PARSER)>
+(<scope>!=nil?[[input getTokenStream] toStringFromStart:[[input getTreeAdaptor] getTokenStartIndex:[<scope> getStart]]
+         ToEnd:[[input getTreeAdaptor] getTokenStopIndex:[<scope> getStart]]]:0)
+<else>
+(<scope>!=nil?([input toStringFromToken:[<scope> getStart] ToToken:[<scope> getStop]]:0)
+<endif>
+%>
+ruleLabelPropertyRef_st(scope,attr) ::= "(<scope>!=nil?[<scope> st]:nil)"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "(<scope>!=nil?<scope>.type:0)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "(<scope>!=nil?<scope>.line:0)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "(<scope>!=nil?<scope>.charPositionInLine:-1)"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "(<scope>!=nil?<scope>.channel:0)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "(<scope>!=nil?[<scope> getTokenIndex]:0)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "(<scope>!=nil?<scope>.text:nil)"
+lexerRuleLabelPropertyRef_int(scope,attr) ::="(<scope>!=nil?[<scope>.text integerValue]:0)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "((<labelType> *)retval.start)"
+rulePropertyRef_stop(scope,attr) ::= "((<labelType> *)retval.stopToken)"
+rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType> *)retval.tree)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+[[input getTokenStream] toStringFromStart:[[input getTreeAdaptor] getTokenStartIndex:retval.start.token.startIndex]
+                                    ToEnd:[[input getTreeAdaptor] getTokenStopIndex:retval.start.token.stopIndex]]
+<else>
+[input toStringFromToken:retval.start ToToken:[input LT:-1]]
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+/* hideous: find a way to cut down on the number of templates to support read/write access */
+/* TODO: also, which ones are valid to write to? ask Ter */
+lexerRuleSetPropertyRef_text(scope,attr,expr) ::= "state.text = <expr>;"
+lexerRuleSetPropertyRef_type(scope,attr,expr) ::= "_type"
+lexerRuleSetPropertyRef_line(scope,attr,expr) ::= "state.tokenStartLine"
+lexerRuleSetPropertyRef_pos(scope,attr,expr) ::= "state.tokenStartCharPositionInLine"
+lexerRuleSetPropertyRef_index(scope,attr,expr) ::= "-1" /* undefined token index in lexer */
+lexerRuleSetPropertyRef_channel(scope,attr,expr) ::= "state.channel=<expr>;"
+lexerRuleSetPropertyRef_start(scope,attr,expr) ::= "state.tokenStartCharIndex"
+lexerRuleSetPropertyRef_stop(scope,attr,expr) ::= "(input.index-1)"
+
+
+lexerRulePropertyRef_text(scope,attr) ::= "self.text"
+lexerRulePropertyRef_type(scope,attr) ::= "state.type"
+lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(input.index-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "[<scope>.text integerValue]"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.start =<expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;" /* "<\n>#error StringTemplates are unsupported<\n>" */
+
+
+/** How to execute an action */
+execAction(action) ::= <<
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {
+    <action>
+}
+<else>
+<action>
+<endif>
+>>
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+static ANTLRBitSet *<name>;
+static const unsigned long long <name>_data[] = { <words64:{it | <it>LL};separator=", ">};<\n>
+>>
+
+bitsetInit(name, words64) ::= <<
+<name> = [[ANTLRBitSet newBitSetWithBits:(const unsigned long long *)<name>_data Count:(NSUInteger)<length(words64)>] retain];<\n>
+>>
+
+codeFileExtension() ::= ".m"
+
+true_value() ::= "YES"
+false_value() ::= "NO"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ST.stg
new file mode 100644
index 0000000..398d34f
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ST.stg
@@ -0,0 +1,371 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template subgroup to add template rewrite output
+ *  If debugging, then you'll also get STDbg.stg loaded.
+ */
+
+@returnScopeInterface.memVars() ::= <<
+<@super.memVars()>
+/* ST returnInterface.memVars */
+ST *st;
+>>
+
+@returnScopeInterface.properties() ::= <<
+<@super.properties()>
+/* ST returnScope.properties */
+@property (retain, getter=getST, setter=setST:) ST *st;
+>>
+
+@returnScopeInterface.methodsDecl() ::= <<
+<@super.methodsDecl()>
+/* ST AST returnScopeInterface.methodsDecl */
+- (id) getTemplate;
+- (NSString *) toString;
+- (NSString *) description;
+>>
+
+@returnScopeInterface() ::= <<
+/* ST returnScopeInterface */
+@interface <returnScopeInterface.name> : ReturnScope {
+<returnScopeInterface.memVars()>
+}
+<returnScopeInterface.properties()>
+
+<returnScopeInterface.methods()>
+@end
+>>
+
+@returnScopeImplementation.synthesize() ::= <<
+<@super.synthesize()>
+/* ST returnScope.synthesize */
+@synthesize st;
+>>
+
+@returnScopeImplementation.methods() ::= <<
+<@super.methods()>
+/* ST returnScope.methods */
+- (id) getTemplate { return st; }
+- (NSString *) toString { return st==nil?nil:[st toString]; }
+- (NSString *) description { [self toString]; }
+>>
+
+@returnScopeImplementation() ::= <<
+@implementation <returnScopeImplementation.name>
+<returnScopeImplementation.synthesize()>
+
+<returnScopeImplementation.methods()>
+@end
+>>
+
+/** Add this to each rule's return value struct */
+@returnScope.ruleReturnMembers() ::= <<
+<@super.ruleReturnMembers()>
+/* ST returnScope.ruleReturnMembers -- empty */
+>>
+
+@genericParserHeaderFile.memVars() ::= <<
+<@super.memVars()>
+/* ST genericParserHeaderFile.memVars -- empty now */
+STGroup *templateLib; /* ST -- really a part of STAttrMap */
+>>
+
+@genericParserHeaderFile.properties() ::= <<
+<@super.properties()>
+/* ST genericParser.properties */
+@property (retain, getter=getTemplateLib, setter=setTemplateLib:) STGroup *templateLib;
+>>
+
+@genericParserHeaderFile.methodsDecl() ::= <<
+<@super.methodsDecl()>
+/* ST genericParser.methodsDecl */
+- init;
+- (STGroup *) getTemplateLib;
+- (void) setTemplateLib:(STGroup *)aTemplateLib;
+@end
+>>
+
+@genericParser.synthesize() ::= <<
+<@super.synthesize()>
+/* ST genericParserImplementation.synthesize */
+@synthesize templateLib;
+>>
+
+@genericParser.methods() ::= <<
+<@super.methods()>
+/* ST genericParser.methods */
+
+- (STGroup *)getTemplateLib
+{
+    return templateLib;
+}
+
+- (void) setTemplateLib:(STGroup *)aTemplateLib
+{
+    templateLib = aTemplateLib;
+}
+
+>>
+
+@genericParser.members() ::= <<
+<@super.members()>
+STGroup *templateLib = [STGroup  newSTGroup:@"<name>Templates"];
+
+- (STGroup *) getTemplateLib
+{
+  return templateLib;
+}
+
+- (void) setTemplateLib:(STGroup *) templateLib
+{
+  this.templateLib = templateLib;
+}
+
+/** allows convenient multi-value initialization:
+ *  "new STAttrMap().put(...).put(...)"
+ */
+/* REPLACE THIS STATIC CLASS
+static class STAttrMap extends HashMap {
+- (STAttrMap *) setObject:(id)aValue forKey:(NS*)String attrName
+{
+    [super setObject:value forKey:attrName];
+    return self;
+}
+- (STAttrMap *) setObjectWithInt:(NSInteger)value forKey:(NSString *)attrName
+{
+    [super setObject:[NSNumber numberWithInteger:value] forKey:attrName];
+    return self;
+  }
+}
+*/
+>>
+
+@STAttrMap() ::= <<
+/* -------- ST start STAttrMap ------------ */
+<@STAttrMap.interface()>
+<@STAttrMap.implementation()>
+/* -------- ST end STAttrMap ------------ */
+>>
+
+@STAttrMap.interface() ::= <<
+/* -------- ST start STAttrMap.interface ------------ */
+@interface STAttrMap : HashMap {
+/*    <@STAttrMap.memVars()> */
+    STGroup *templateLib;
+}
+
+/*    <@STAttrMap.properties()> */
+@property (retain, getter=getTemplateLib, setter=setTemplateLib:) STGroup *templateLib;
+/* <@STAttrMap.methodsDecl()> */
+- (id) init;
+- (STAttrMap *) setObject:(id)value forKey:(NSString *)attrName;
+- (STAttrMap *) setObjectWithInt:(NSInteger)value forKey:(NSString *)attrName;
+- (void) setTemplateLib:(STGroup *)aTemplateLib;
+- (STGroup *) getTemplateLib;
+@end
+/* -------- ST end STAttrMap.interface ------------ */
+>>
+
+@STAttrMap.implementation() ::= <<
+/* -------- ST start STAttrMap.implementation ------------ */
+/** allows convenient multi-value initialization:
+ *  "new STAttrMap().put(...).put(...)"
+ */
+@implementation STAttrMap
+@synthesize templateLib;
+
+<@STAttrMap.methods()>
+@end
+/* -------- ST end STAttrMap.implementation ------------ */
+>>
+
+@STAttrMap.memVars() ::= <<
+/* -------- ST start STAttrMap.memVars ------------ */
+    STGroup *templateLib;
+/* -------- ST end STAttrMap.memVars ------------ */
+>>
+
+@STAttrMap.properties() ::= <<
+/* -------- ST start STAttrMap.properties ------------ */
+@property (retain, getter=getTemplateLib, setter=setTemplateLib:) STGroup *templateLib;
+/* -------- ST end STAttrMap.properties ------------ */
+>>
+
+@STAttrMap.methodsDecl() ::= <<
+/* -------- ST start STAttrMap.methodsDecl ------------ */
+- (id) init;
+- (STAttrMap *) setObject:(id)value forKey:(NSString *)attrName;
+- (STAttrMap *) setObjectWithInt:(NSInteger)value forKey:(NSString *)attrName;
+- (void) setTemplateLib:(STGroup *)aTemplateLib;
+- (STGroup *) getTemplateLib;
+/* -------- ST end STAttrMap.methodsDecl ------------ */
+>>
+
+@STAttrMap.methods() ::= <<
+/* -------- ST start STAttrMap.methods ------------ */
+- (id) init
+{
+    self = [super initWithLen:16];
+    if ( self != nil ) {
+        templateLib = [STGroup newSTGroup:"<name>Templates"];
+    }
+    return self;
+}
+
+- (STAttrMap *) setObject:(id)aValue forKey:(NSString *)aAttrName
+{
+    [super setObject:aValue forKey:aAttrName];
+    return self;
+}
+
+- (STAttrMap *) setObjectWithInt:(NSInteger)aValue forKey:(NSString *)aAttrName
+{
+    [super setObject:[NSNumber numberWithInteger:aValue] forKey:aAttrName];
+    return self;
+}
+- (void) setTemplateLib:(STGroup *)aTemplateLib
+{
+    templateLib = aTemplateLib;
+}
+
+- (STGroup *)getTemplateLib
+{
+    return templateLib;
+}
+/* -------- ST end STAttrMap.methods ------------ */
+>>
+
+/** x+=rule when output=template */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+/* ST ruleRefAndListLable */
+<ruleRef(...)>
+<listLabel(elem=[label getTemplate,...]>
+>>
+
+rewriteTemplate(alts) ::= <<
+/* -------- ST start rewriteTemplate ------------ */
+// TEMPLATE REWRITE
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {
+    <alts:rewriteTemplateAlt(); separator="else ">
+    <if(rewriteMode)><replaceTextInLine()><endif>
+}
+<else>
+<alts:rewriteTemplateAlt(); separator="else ">
+<if(rewriteMode)><replaceTextInLine()><endif>
+<endif>
+/* -------- ST end rewriteTemplate ------------ */
+>>
+
+replaceTextInLine() ::= <<
+/* -------- ST start replaceTextInLine ------------ */
+<if(TREE_PARSER)>
+[[(TokenRewriteStream *)input getTokenStream]
+    replaceFromIndex:[[input getTreeAdaptor] getTokenStartIndex:retval.start]
+             ToIndex:[[input getTreeAdaptor] getTokenStopIndex:retval.start]
+                Text:retval.st];
+<else>
+[(TokenRewriteStream *)input)
+        replaceFromIndex:[((CommonToken *)retval.start) getTokenIndex]
+                 ToIndex:[[input LT:-1] getTokenIndex]
+                    Text:retval.st];
+<endif>
+/* -------- ST end replaceTextInLine ------------ */
+>>
+
+rewriteTemplateAlt() ::= <<
+/* -------- ST start rewriteTemplateAlt ------------ */
+/* ST <it.description> */
+<if(it.pred)>
+if (<it.pred>) {
+    retval.st = <it.alt>;
+}<\n>
+<else>
+{
+    retval.st = <it.alt>;
+}<\n>
+<endif>
+/* -------- ST end rewriteTemplateAlt ------------ */
+>>
+
+rewriteEmptyTemplate(alts) ::= <<
+nil;
+>>
+
+/** Invoke a template with a set of attribute name/value pairs.
+ *  Set the value of the rule's template *after* having set
+ *  the attributes because the rule's template might be used as
+ *  an attribute to build a bigger template; you get a self-embedded
+ *  template.
+ */
+rewriteExternalTemplate(name,args) ::= <<
+/* -------- ST start rewriteExternalTemplate ------------ */
+[templateLib getInstanceOf:@"<name>"
+<if(args)>[[STAttrMap newSTAttrMap] <args:{a | setObject:<a.value> forKey:@"<a.name>"]}><endif>]
+/* -------- ST end rewriteExternalTemplate ------------ */
+>>
+
+/** expr is a string expression that says what template to load */
+rewriteIndirectTemplate(expr,args) ::= <<
+/* -------- ST start rewriteIndirectTemplate ------------ */
+[templateLib getInstanceOf:<expr>
+<if(args)> [[STAttrMap newSTAttrMap]<args:{a | setObject:<a.value> forKey:@"<a.name>"]}>]
+<else>]<endif>
+/* -------- ST end rewriteIndirectTemplate ------------ */
+>>
+
+/** Invoke an inline template with a set of attribute name/value pairs */
+rewriteInlineTemplate(args, template) ::= <<
+/* -------- ST start rewriteInlineTemplate ------------ */
+STGroup *templateLib;
+templateLib.templates = [STAttrMap newSTAttrMap];
+<if(args)> [templateLib.templates <args:{a | setObject:<a.value> forKey:@"<a.name>"];}><endif>
+[ST newST:templateLib template:@"<template>"];
+/* -------- ST end rewriteInlineTemplate ------------ */
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+/* -------- ST start rewriteAction ------------ */
+<action>
+/* -------- ST end rewriteAction ------------ */
+>>
+
+/** An action has %st.attrName=expr; or %{st}.attrName=expr; */
+actionSetAttribute(st,attrName,expr) ::= <<
+/* -------- ST start actionSetAttribute ------------ */
+[[ST attributes] setObject:<expr> forKey:@"<attrName>"];
+<![<st> setAttribute:<expr> name:@"<attrName>"];!>
+/* -------- ST end actionSetAttribute ------------ */
+>>
+
+/** Translate %{stringExpr} */
+actionStringConstructor(stringExpr) ::= <<
+/* -------- ST start actionStringConstructor ------------ */
+[ST newSTWithTemplate:<stringExpr>]
+/* -------- ST end actionStringConstructor ------------ */
+>>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ST4ObjC.stg b/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ST4ObjC.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/ObjC/ST4ObjC.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/ObjC/ST4ObjC.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Perl5/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Perl5/ASTTreeParser.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Perl5/ASTTreeParser.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Perl5/ASTTreeParser.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Perl5/Perl5.stg b/tool/src/main/resources/org/antlr/codegen/templates/Perl5/Perl5.stg
new file mode 100644
index 0000000..1eb08ee
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Perl5/Perl5.stg
@@ -0,0 +1,1373 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ Copyright (c) 2007-2008 Ronald Blaschke
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+group Perl5;
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass, literals) ::=
+<<
+# $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+<actions.(actionScope).header>
+
+<@imports>
+<if(TREE_PARSER)>
+<endif>
+<if(backtracking)>
+<endif>
+<@end>
+
+<docComment>
+<recognizer>
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="Token",
+      filterMode, superClass="ANTLR::Runtime::Lexer")  ::= <<
+package <name>;
+
+use Carp;
+use English qw( -no_match_vars ) ;
+use Readonly;
+use Switch;
+
+use ANTLR::Runtime::BaseRecognizer;
+use ANTLR::Runtime::DFA;
+use ANTLR::Runtime::NoViableAltException;
+
+use Moose;
+
+extends 'ANTLR::Runtime::Lexer';
+
+Readonly my $HIDDEN => ANTLR::Runtime::BaseRecognizer->HIDDEN;
+sub HIDDEN { $HIDDEN }
+
+use constant {
+    <tokens:{ <it.name> => <it.type>, }; separator="\n">
+};
+<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+<actions.lexer.members>
+
+sub BUILD {
+    my ($self, $arg_ref) = @_;
+
+    $self->init_dfas();
+}
+
+sub get_grammar_file_name {
+    return "<fileName>";
+}
+
+<if(filterMode)>
+<filteringNextToken()>
+<endif>
+<rules; separator="\n\n">
+
+<synpreds:{p | <lexerSynpred(p)>}>
+
+<cyclicDFAs:{dfa | has 'dfa<dfa.decisionNumber>';}; separator="\n">
+
+sub init_dfas {
+    my ($self) = @_;
+
+    <cyclicDFAs:{dfa |
+    $self->dfa<dfa.decisionNumber>(<name>::DFA<dfa.decisionNumber>->new({ recognizer => $self }));
+    }; separator="\n">
+
+    return;
+}
+
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+no Moose;
+__PACKAGE__->meta->make_immutable();
+1;
+
+>>
+
+perlTypeInitMap ::= [
+	"$":"undef",
+	"@":"()",
+	"%":"()",
+	default:"undef"
+]
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+public Token nextToken() {
+    while (true) {
+        if ( input.LA(1)==CharStream.EOF ) {
+            return Token.EOF_TOKEN;
+        }
+        token = null;
+	channel = Token.DEFAULT_CHANNEL;
+        tokenStartCharIndex = input.index();
+        tokenStartCharPositionInLine = input.getCharPositionInLine();
+        tokenStartLine = input.getLine();
+	text = null;
+        try {
+            int m = input.mark();
+            backtracking=1; <! means we won't throw slow exception !>
+            failed=false;
+            mTokens();
+            backtracking=0;
+            <! mTokens backtracks with synpred at backtracking==2
+               and we set the synpredgate to allow actions at level 1. !>
+            if ( failed ) {
+                input.rewind(m);
+                input.consume(); <! advance one char and try again !>
+            }
+            else {
+                emit();
+                return token;
+            }
+        }
+        catch (RecognitionException re) {
+            // shouldn't happen in backtracking mode, but...
+            reportError(re);
+            recover(re);
+        }
+    }
+}
+
+public void memoize(IntStream input,
+		int ruleIndex,
+		int ruleStartIndex)
+{
+if ( backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
+}
+
+public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+if ( backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
+return false;
+}
+>>
+
+actionGate() ::= "$self->state->backtracking==0"
+
+filteringActionGate() ::= "backtracking==1"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass, filterMode,
+              ASTLabelType="Object", labelType, members) ::= <<
+package <name>;
+
+use English qw( -no_match_vars ) ;
+use Readonly;
+use Switch;
+use Carp;
+use ANTLR::Runtime::BitSet;
+
+use Moose;
+
+extends '<@superClassName><superClass><@end>';
+
+Readonly my $token_names => [
+    "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
+];
+
+use constant {
+<tokens:{ <it.name> => <it.type>, }; separator="\n">
+};
+
+<bitsets:bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits)>
+
+<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+<@members>
+<! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+
+sub BUILD {
+    my ($self, $arg_ref) = @_;
+
+<if(backtracking)>
+    $self->state->rule_memo({});<\n>
+<endif>
+}
+<@end>
+
+sub get_token_names {
+    return $token_names;
+}
+
+sub get_grammar_file_name {
+    return "<fileName>";
+}
+
+<members>
+
+<rules; separator="\n\n">
+
+<synpreds:{p | <synpred(p)>}>
+
+<cyclicDFAs:{dfa | dfa<dfa.decisionNumber> = __PACKAGE__::DFA<dfa.decisionNumber>->new($self);}; separator="\n">
+<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+no Moose;
+__PACKAGE__->meta->make_immutable();
+1;
+__END__
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType, superClass="ANTLR::Runtime::Parser", labelType="ANTLR::Runtime::Token", members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="ANTLR::Runtime::TokenStream", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="ANTLR::Runtime::TreeParser", members={<actions.treeparser.members>}, filterMode) ::= <<
+<genericParser(inputStreamType="TreeNodeStream", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+# $ANTLR start <ruleName>
+sub <ruleName>_fragment {
+# <ruleDescriptor.parameterScope:parameterScope(scope=it)>
+
+<if(trace)>
+    $self->traceIn("<ruleName>_fragment", <ruleDescriptor.index>);
+    eval {
+        <block>
+    };
+    $self->traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+    if ($EVAL_ERROR) {
+        croak $EVAL_ERROR;
+    }
+<else>
+    <block>
+<endif>
+}
+# $ANTLR end <ruleName>
+>>
+
+synpred(name) ::= <<
+public final boolean <name>() {
+    backtracking++;
+    <@start()>
+    int start = input.mark();
+    try {
+        <name>_fragment(); // can never throw exception
+    } catch (RecognitionException re) {
+        System.err.println("impossible: "+re);
+    }
+    boolean success = !failed;
+    input.rewind(start);
+    <@stop()>
+    backtracking--;
+    failed=false;
+    return success;
+}<\n>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()>; }
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>
+if ($self->state->failed) {
+    return <ruleReturnValue()>;
+}
+<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (backtracking>0) {failed=true; return <ruleReturnValue()>;}<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+# $ANTLR start <ruleName>
+# <fileName>:<description>
+sub <ruleName>() {
+    my ($self, <ruleDescriptor.parameterScope:parameterScope(scope=it)>) = @_;
+    <if(trace)>$self->traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    eval {
+        <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+    };
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    my $exception = $EVAL_ERROR;
+    if (ref $exception && $exception->isa('ANTLR::Runtime::RecognitionException')) {
+        $self->report_error($exception);
+        $self->recover($self->input, $exception);
+        $exception = undef;
+    }<\n>
+<endif>
+<endif>
+<endif>
+    <if(trace)>$self->traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+    <memoize()>
+    <ruleScopeCleanUp()>
+    <finally>
+    if ($exception) {
+        croak $exception;
+        #$exception->rethrow();
+    }
+    <@postamble()>
+    return <ruleReturnValue()>;
+}
+# $ANTLR end <ruleName>
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) {
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+my $retval = <returnType()>->new();
+$retval->set_start($self->input->LT(1));<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+my $<a.name> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>;
+}>
+<endif>
+<if(memoize)>
+my $<ruleDescriptor.name>_start_index = $self->input->index();
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.push(new <it>_scope());}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.push(new <it.name>_scope());}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{<it>_stack.pop();}; separator="\n">
+<ruleDescriptor.ruleScope:{<it.name>_stack.pop();}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels]
+    :{my $<it.label.text> = undef;}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]
+    :{List list_<it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(label=it); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|RuleReturnScope <ll.label.text> = null;}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{<labelType> <it.label.text>=null;}; separator="\n"
+>
+<ruleDescriptor.charLabels:{my $<it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{List list_<it.label.text>=null;}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+$<ruleDescriptor.singleValueReturnName>
+<else>
+$retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+$retval->set_stop($self->input->LT(-1));<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex); }
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+# $ANTLR start <ruleName>
+sub m_<ruleName> {
+# <ruleDescriptor.parameterScope:parameterScope(scope=it)>
+    my ($self) = @_;
+    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>);<endif>
+    <ruleDeclarations()>
+    eval {
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        my $_type = <ruleName>;
+        my $_channel = $self->DEFAULT_TOKEN_CHANNEL;
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        $self->state->type($_type);
+        $self->state->channel($_channel);
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    };
+    <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+    <memoize()>
+
+    if ($EVAL_ERROR) {
+        croak $EVAL_ERROR;
+    }
+}
+# $ANTLR end <ruleName>
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+sub m_tokens {
+    my ($self) = @_;
+    <block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+my $alt<decisionNumber> = <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+switch ($alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+my $alt<decisionNumber> = <maxAlt>;
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+switch ($alt<decisionNumber>) {
+    <alts:altSwitchCase()>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+my $cnt<decisionNumber> = 0;
+<decls>
+<@preloop()>
+LOOP<decisionNumber>:
+while (1) {
+    my $alt<decisionNumber> = <maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch ($alt<decisionNumber>) {
+	    <alts:altSwitchCase()>
+	    else {
+	        if ( $cnt<decisionNumber> >= 1 ) { last LOOP<decisionNumber> }
+	        <ruleBacktrackFailure()>
+            my $eee =
+                ANTLR::Runtime::EarlyExitException->new(<decisionNumber>, $self->input);
+            <@earlyExitException()>
+            croak $eee;
+        }
+    }
+    ++$cnt<decisionNumber>;
+}
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@preloop()>
+LOOP<decisionNumber>:
+while (1) {
+    my $alt<decisionNumber> = <maxAlt>;
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    switch ($alt<decisionNumber>) {
+	    <alts:altSwitchCase()>
+	    else { last LOOP<decisionNumber> }
+    }
+}
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase() ::= <<
+case <i> {
+    <@prealt()>
+    <it>
+}<\n>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+# <fileName>:<description>
+{
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+}
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element() ::= <<
+<@prematch()>
+<it.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)>$<label> =<endif>$self->match($self->input, <token>, $FOLLOW_<token>_in_<ruleName><elementIndex>);
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label>==null) list_<label>=new ArrayList();
+list_<label>.add(<elem>);<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = $self->input->LA(1);<\n>
+<endif>
+$self->match(<char>); <checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = $self->input->LA(1);<\n>
+<endif>
+$self->match_range(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= $self->input->LA(1);<\n>
+<else>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+<endif>
+if ( <s> ) {
+    $self->input->consume();
+    <postmatchCode>
+<if(!LEXER)>
+    $self->state->error_recovery(0);
+<endif>
+    <if(backtracking)>failed=false;<endif>
+}
+else {
+    <ruleBacktrackFailure()>
+    my $mse =
+        ANTLR::Runtime::MismatchedSetException->new(undef, $self->input);
+    <@mismatchedSetException()>
+<if(LEXER)>
+    $self->recover($mse);
+    $mse->throw();
+<else>
+    $mse->throw();
+    <! use following code to make it recover inline; remove throw mse;
+    $self->recoverFromMismatchedSet($self->input, $mse, $FOLLOW_set_in_<ruleName><elementIndex>);
+    !>
+<endif>
+}<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex) ::= <<
+<if(label)>
+int <label>Start = getCharIndex();
+$self->match(<string>); <checkRuleBacktrackFailure()>
+<labelType> <label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, getCharIndex()-1);
+<else>
+$self->match(<string>); <checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(label,elementIndex) ::= <<
+<if(label)>
+<label>=(<labelType>)input.LT(1);<\n>
+<endif>
+matchAny(input); <checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(label,elementIndex) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = $self->input->LA(1);<\n>
+<endif>
+matchAny(); <checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+$self->push_follow($FOLLOW_<rule.name>_in_<ruleName><elementIndex>);
+<if(label)>
+$<label> = $self-><rule.name>(<args; separator=", ">);<\n>
+<else>
+$self-><rule.name>(<args; separator=", ">);<\n>
+<endif>
+$self->state->_fsp($self->state->_fsp - 1);
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = getCharIndex();
+$self->m_<rule>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<label> = new CommonToken(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
+<else>
+$self->m_<rule.name>(<args; separator=", ">); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+int <label>Start<elementIndex> = getCharIndex();
+match(EOF); <checkRuleBacktrackFailure()>
+<labelType> <label> = new CommonToken(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1);
+<else>
+match(EOF); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==Token.DOWN ) {
+    match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+    <children:element()>
+    match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+}
+<else>
+match(input, Token.DOWN, null); <checkRuleBacktrackFailure()>
+<children:element()>
+match(input, Token.UP, null); <checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) {
+    <ruleBacktrackFailure()>
+    throw new FailedPredicateException(input, "<ruleName>", "<description>");
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+my $LA<decisionNumber>_<stateNumber> = $self->input->LA(<k>);<\n>
+<edges; separator="\nels">
+else {
+<if(eotPredictsAlt)>
+    $alt<decisionNumber> = <eotPredictsAlt>;
+<else>
+    <ruleBacktrackFailure()>
+    my $nvae =
+        ANTLR::Runtime::NoViableAltException->new({
+            grammar_decision_description => "<description>",
+            decision_number => <decisionNumber>,
+            state_number => <stateNumber>,
+            input => $self->input,
+        });<\n>
+    <@noViableAltException()>
+    croak $nvae;<\n>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+my $LA<decisionNumber>_<stateNumber> = $self->input->LA(<k>);<\n>
+<edges; separator="\nels">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+my $LA<decisionNumber>_<stateNumber> = $self->input->LA(<k>);<\n>
+<edges; separator="\nels"><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+$alt<decisionNumber> = <eotPredictsAlt>; <! if no edges, don't gen ELSE !>
+<else>
+else {
+    $alt<decisionNumber> = <eotPredictsAlt>;
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "$alt<decisionNumber> = <alt>;"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( $self->input->LA(<k>) ) {
+    <edges; separator="\n">
+    else {
+    <if(eotPredictsAlt)>
+        $alt<decisionNumber> = <eotPredictsAlt>;
+    <else>
+        <ruleBacktrackFailure()>
+        my $nvae =
+            ANTLR::Runtime::NoViableAltException->new({
+                grammar_decision_description => "<description>",
+                decision_number => <decisionNumber>,
+                state_number => <stateNumber>,
+                input => $self->input,
+            });<\n>
+        <@noViableAltException()>
+        croak $nvae;<\n>
+    <endif>
+    }
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( $self->input->LA(<k>) ) {
+    <edges; separator="\n">
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+switch ( $self->input->LA(<k>) ) {
+<edges; separator="\n"><\n>
+<if(eotPredictsAlt)>
+else { $alt<decisionNumber> = <eotPredictsAlt> }<\n>
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+case [<labels:{ <it> }; separator=", ">] { <targetState> }
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+$alt<decisionNumber> = $self->dfa<decisionNumber>->predict($self->input);
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+Readonly my $DFA<dfa.decisionNumber>_eot => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedEOT; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_eof => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedEOF; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_min => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedMin; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_max => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedMax; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_accept => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedAccept; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_special => ANTLR::Runtime::DFA->unpack_rle([ <dfa.javaCompressedSpecial; separator=", "> ]);
+Readonly my $DFA<dfa.decisionNumber>_transition => [ <dfa.javaCompressedTransition:{s|ANTLR::Runtime::DFA->unpack_rle([ <s; separator=", "> ])}; separator=", "> ];
+
+{
+package <name>::DFA<dfa.decisionNumber>;
+use ANTLR::Runtime::Class;
+
+use strict;
+use warnings;
+
+extends 'ANTLR::Runtime::DFA';
+
+sub BUILD {
+    my $self = shift;
+    my $param_ref = __PACKAGE__->unpack_params(@_, {
+        spec => [
+            {
+                name => 'recognizer',
+                isa  => 'ANTLR::Runtime::BaseRecognizer'
+            },
+        ]
+    });
+
+    $self->recognizer($param_ref->{recognizer});
+    $self->decision_number(<dfa.decisionNumber>);
+    $self->eot($DFA<dfa.decisionNumber>_eot);
+    $self->eof($DFA<dfa.decisionNumber>_eof);
+    $self->min($DFA<dfa.decisionNumber>_min);
+    $self->max($DFA<dfa.decisionNumber>_max);
+    $self->accept($DFA<dfa.decisionNumber>_accept);
+    $self->special($DFA<dfa.decisionNumber>_special);
+    $self->transition($DFA<dfa.decisionNumber>_transition);
+}
+
+sub get_description {
+    return "<dfa.description>";
+}
+
+<@errorMethod()>
+
+<if(dfa.specialStateSTs)>
+sub special_state_transition {
+    my ($self, $param_ref) = unpack_params(@_, {
+        spec => [
+            {
+                name => 's',
+                type => SCALAR,
+            },
+            {
+                name => 'input',
+                isa  => 'ANTLR::Runtime::IntStream',
+            }
+        ]
+    });
+    my $s = $param_ref->{s};
+    my $input = $param_ref->{input};
+
+    switch ($s) {
+        <dfa.specialStateSTs:{state |
+        case <i0> \{ <! compressed special state numbers 0..n-1 !>
+            <state>}; separator="\n">
+        }
+    }
+
+<if(backtracking)>
+    if ($self->state->backtracking > 0) {
+        $self->state->failed = 1;
+        return -1;
+    }<\n>
+<endif>
+
+    my $nvae =
+        ANTLR::Runtime::NoViableAltException->new({
+            grammar_decision_description => $self->get_description(),
+            decision_number => <dfa.decisionNumber>,
+            state_number => $s,
+            input => $input,
+        });<\n>
+    $self->error($nvae);
+    $nvae->throw();
+    }<\n>
+<endif>
+}<\n>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+my $input = $self->input;
+my $LA<decisionNumber>_<stateNumber> = $input->LA(1);<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+my $index<decisionNumber>_<stateNumber> = $input->index();
+$input->rewind();<\n>
+<endif>
+s = -1;
+<edges; separator="\nels">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.seek(index<decisionNumber>_<stateNumber>);<\n>
+<endif>
+if ( s>=0 ) return s;
+break;
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>;}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber>;<\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left> && <right>)"
+
+orPredicates(operands) ::= "(<operands; separator=\"||\">)"
+
+notPredicate(pred) ::= "!(<evalPredicate(...)>)"
+
+evalPredicate(pred,description) ::= "<pred>"
+
+evalSynPredicate(pred,description) ::= "<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "$LA<decisionNumber>_<stateNumber> eq <atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "$self->input->LA(<k>) eq <atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+($LA<decisionNumber>_<stateNumber> ge <lower> && $LA<decisionNumber>_<stateNumber> le <upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "($self->input->LA(<k>) ge <lower> && $self->input->LA(<k>) le <upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\" || \">"
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected static class <scope.name>_scope {
+    <scope.attributes:{<it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+protected static class <scope.name>_scope {
+    <scope.attributes:{<it.decl>;}; separator="\n">
+}
+protected Stack <scope.name>_stack = new Stack();<\n>
+<endif>
+>>
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor.name>_return
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Generate the Java type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+void
+<endif>
+<endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "undef".
+ */
+initValue(typeName) ::= <<
+<if(typeName)>
+<perlTypeInitMap.(typeName)>
+<else>
+undef
+<endif>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+my $<label.label.text> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))>;<\n>
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+{
+    package <returnType()>;
+    use ANTLR::Runtime::Class;
+
+    extends 'ANTLR::Runtime::<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope';
+
+    <scope.attributes:{public <it.decl>;}; separator="\n">
+    <@ruleReturnMembers()>
+}
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{$<it.name>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "$<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "$<attr.name> =<expr>;"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name>
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name>
+<else>
+((<scope>_scope)<scope>_stack.peek()).<attr.name>
+<endif>
+<endif>
+>>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <<
+<if(negIndex)>
+((<scope>_scope)<scope>_stack.elementAt(<scope>_stack.size()-<negIndex>-1)).<attr.name> =<expr>;
+<else>
+<if(index)>
+((<scope>_scope)<scope>_stack.elementAt(<index>)).<attr.name> =<expr>;
+<else>
+((<scope>_scope)<scope>_stack.peek()).<attr.name> =<expr>;
+<endif>
+<endif>
+>>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+$<scope>.<attr.name>
+<else>
+$<scope>
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+$<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> =<expr>;
+<else>
+$<attr.name> =<expr>;
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "$<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "$<scope>->get_text()"
+tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
+tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
+tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
+tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
+tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "((<labelType>)<scope>.start)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "((<labelType>)<scope>.stop)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)<scope>.tree)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+input.getTokenStream().toString(
+  input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
+  input.getTreeAdaptor().getTokenStopIndex(<scope>.start))
+<else>
+substr($self->input, $<scope>->start, $<scope>->stop)
+<endif>
+>>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "<scope>.st"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "$<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "<scope>.getType()"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "<scope>.getLine()"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "<scope>.getCharPositionInLine()"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "<scope>.getChannel()"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "<scope>.getTokenIndex()"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "<scope>.getText()"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "((<labelType>)retval.start)"
+rulePropertyRef_stop(scope,attr) ::= "((<labelType>)retval.stop)"
+rulePropertyRef_tree(scope,attr) ::= "((<ASTLabelType>)retval.tree)"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+input.getTokenStream().toString(
+  input.getTreeAdaptor().getTokenStartIndex(retval.start),
+  input.getTreeAdaptor().getTokenStopIndex(retval.start))
+<else>
+input.toString(retval.start,input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "getText()"
+lexerRulePropertyRef_type(scope,attr) ::= "$_type"
+lexerRulePropertyRef_line(scope,attr) ::= "tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "$_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(getCharIndex()-1)"
+lexerRulePropertyRef_self(scope,attr) ::= "$self"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>;"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>;"
+
+
+/** How to execute an action */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if ( <actions.(actionScope).synpredgate> ) {
+  <action>
+}
+<else>
+if ( backtracking==0 ) {
+  <action>
+}
+<endif>
+<else>
+<action>
+<endif>
+>>
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+Readonly my $<name> => ANTLR::Runtime::BitSet->new({ words64 => [ <words64:{'<it>'};separator=", "> ] });<\n>
+>>
+
+codeFileExtension() ::= ".pm"
+
+true() ::= "1"
+false() ::= "0"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/AST.stg
new file mode 100644
index 0000000..f007330
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python/AST.stg
@@ -0,0 +1,452 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+finishedBacktracking(block) ::= <<
+<if(backtracking)>
+if <actions.(actionScope).synpredgate>:
+    <block>
+<else>
+<block>
+<endif>
+>>
+
+@outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+from antlr3.tree import *<\n>
+<endif>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+@genericParser.init() ::= <<
+self._adaptor = None
+self.adaptor = CommonTreeAdaptor()
+<@super.init()>
+>>
+
+@genericParser.members() ::= <<
+<@super.members()>
+<astAccessor()>
+>>
+
+astAccessor() ::= <<
+def getTreeAdaptor(self):
+    return self._adaptor
+
+def setTreeAdaptor(self, adaptor):
+    self._adaptor = adaptor
+    <grammar.directDelegates:{g|self.<g:delegateName()>.adaptor = adaptor}; separator="\n">
+
+adaptor = property(getTreeAdaptor, setTreeAdaptor)
+>>
+
+@returnScope.ruleReturnInit() ::= <<
+self.tree = None
+>>
+
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+root_0 = None<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
+  ruleDescriptor.wildcardTreeListLabels]
+    :{it | <it.label.text>_tree = None}; separator="\n">
+<ruleDescriptor.tokenListLabels:{it | <it.label.text>_tree = None}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{it | stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>")}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{it | stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "rule <it>")}; separator="\n">
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+@alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+root_0 = self._adaptor.nil()<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<finishedBacktracking({stream_<token>.add(<label>)})>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)>
+<finishedBacktracking({stream_<token>.add(<label>)})>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({stream_<rule.name>.add(<label>.tree)})>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<finishedBacktracking({stream_<rule.name>.add(<label>.tree)})>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+        alts, description,
+        referencedElementsDeep, // ALL referenced elements to right of ->
+        referencedTokenLabels,
+        referencedTokenListLabels,
+        referencedRuleLabels,
+        referencedRuleListLabels,
+        referencedWildcardLabels,
+        referencedWildcardListLabels,
+        rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+# AST Rewrite
+# elements: <referencedElementsDeep; separator=", ">
+# token labels: <referencedTokenLabels; separator=", ">
+# rule labels: <referencedRuleLabels; separator=", ">
+# token list labels: <referencedTokenListLabels; separator=", ">
+# rule list labels: <referencedRuleListLabels; separator=", ">
+# wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
+<finishedBacktracking({
+<prevRuleRootRef()>.tree = root_0
+<rewriteCodeLabels()>
+root_0 = self._adaptor.nil()
+<first(alts):rewriteAltFirst(); anchor>
+
+<rest(alts):{a | el<rewriteAltRest(a)>}; anchor, separator="\n\n">
+
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.tree = self._adaptor.rulePostProcessing(root_0)
+self.input.replaceChildren(
+    self._adaptor.getParent(retval.start),
+    self._adaptor.getChildIndex(retval.start),
+    self._adaptor.getChildIndex(_last),
+    retval.tree
+    )<\n>
+<endif>
+<endif>
+
+<! if parser or tree-parser and rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.tree = root_0<\n>
+<else>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.tree = root_0<\n>
+<endif>
+<endif>
+})>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{it | stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>", <it>)};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{it | stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>", list_<it>)};
+    separator="\n"
+>
+<referencedWildcardLabels
+    :{it | stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "wildcard <it>", <it>)};
+    separator="\n"
+>
+<referencedWildcardListLabels
+    :{it | stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "wildcard <it>", list_<it>)};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{it |
+if <it> is not None:
+    stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "rule <it>", <it>.tree)
+else:
+    stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "token <it>", None)
+};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{it| stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "token <it>", list_<it>)};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+        alt,rewriteBlockLevel,
+        referencedElementsDeep, // all nested refs
+        referencedElements, // elements in immediately block; no nested blocks
+        description) ::=
+<<
+# <fileName>:<description>
+if <referencedElementsDeep:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+        alt,rewriteBlockLevel,
+        referencedElementsDeep, // all nested refs
+        referencedElements, // elements in immediately block; no nested blocks
+        description) ::=
+<<
+# <fileName>:<description>
+while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+        alt,rewriteBlockLevel,
+        referencedElementsDeep, // all nested refs
+        referencedElements, // elements in immediately block; no nested blocks
+        description) ::=
+<<
+# <fileName>:<description>
+if not (<referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">):
+    raise RewriteEarlyExitException()
+
+while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElements:{el | stream_<el>.reset()<\n>}>
+>>
+
+rewriteAltRest(a) ::= <<
+<if(a.pred)>if <a.pred>:
+    # <a.description>
+    <a.alt>
+<else>se: <! little hack to get if .. elif .. else block right !>
+    # <a.description>
+    <a.alt>
+<endif>
+>>
+
+rewriteAltFirst(a) ::= <<
+<if(a.pred)>
+if <a.pred>:
+    # <a.description>
+    <a.alt>
+<else>
+# <a.description>
+<a.alt>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = None"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+# <fileName>:<description>
+root_<treeLevel> = self._adaptor.nil()
+<root:rewriteElement()>
+<children:rewriteElement()>
+self._adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>)<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen><@end>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,args,terminalOptions={}) ::= <<
+self._adaptor.addChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>)<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode())<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode())<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,args,terminalOptions={}) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>)<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,elementIndex,terminalOptions={}) ::= <<
+self._adaptor.addChild(root_<treeLevel>, <createImaginaryNode(token, args, terminalOptions)>)<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,elementIndex,terminalOptions={}) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(<createImaginaryNode(token, args, terminalOptions)>, root_<treeLevel>)<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+<!FIXME(96,untested)!>
+root_0 = <action><\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<rule>.nextTree())<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+self._adaptor.addChild(root_<treeLevel>, <action>)<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(<action>, root_<treeLevel>)<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
+>>
+
+createImaginaryNode(tokenType,args,terminalOptions={}) ::= <<
+<if(terminalOptions.node)>
+<! new MethodNode(IDLabel, args) !>
+<terminalOptions.node>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+<if (!args)>self._adaptor.createFromType(<tokenType>, "<tokenType>")
+<else>self._adaptor.create(<tokenType>, <args; separator=", ">)
+<endif>
+<endif>
+>>
+
+//<! need to call different adaptor.create*() methods depending of argument count !>
+//<if (!args)>self._adaptor.createFromType(<tokenType>, "<tokenType>")
+//<else><if (!rest(args))>self._adaptor.createFromType(<tokenType>, <first(args)>)
+//<else><if (!rest(rest(args)))>self._adaptor.createFromToken(<tokenType>, <first(args)>, <first(rest(args))>)
+//<endif>
+//<endif>
+//<endif>
+
+
+createRewriteNodeFromElement(token,args,terminalOptions={}) ::= <<
+<if(terminalOptions.node)>
+<terminalOptions.node>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+<! need to call different adaptor.create*() methods depending of argument count !>
+<if (!args)>self._adaptor.createFromType(<token>, "<token>")
+<else><if (!rest(args))>self._adaptor.createFromToken(<token>, <first(args)>)
+<else><if (!rest(rest(args)))>self._adaptor.createFromToken(<token>, <first(args)>, <first(rest(args))>)
+<endif>
+<endif>
+<endif>
+<else>
+stream_<token>.nextNode()
+<endif>
+<endif>
+>>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTDbg.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTDbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Python/ASTDbg.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTParser.stg
new file mode 100644
index 0000000..7b4bc03
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTParser.stg
@@ -0,0 +1,199 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+
+finishedBacktracking(block) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>
+if <actions.(actionScope).synpredgate>:
+    <block>
+<else>
+<block>
+<endif>
+<endif>
+>>
+
+@ruleBody.setErrorReturnValue() ::= <<
+retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.tokenRef(...)>
+<finishedBacktracking({
+<label>_tree = <createNodeFromToken(...)>
+self._adaptor.addChild(root_0, <label>_tree)
+})>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex,terminalOptions={}) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.tokenRef(...)>
+<finishedBacktracking({
+<label>_tree = <createNodeFromToken(...)>
+root_0 = self._adaptor.becomeRoot(<label>_tree, root_0)
+})>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= <%
+<super.matchSet(postmatchCode={<finishedBacktracking({self._adaptor.addChild(root_0, <createNodeFromToken(...)>)})>}, ...)>
+%>
+
+matchRuleBlockSet(s,label,elementIndex,postmatchCode,treeLevel="0",terminalOptions={}) ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,elementIndex,debug,terminalOptions={}) ::= <<
+<if(label)>
+<label> = self.input.LT(1)<\n>
+<endif>
+<super.matchSet(postmatchCode={<finishedBacktracking({root_0 = self._adaptor.becomeRoot(<createNodeFromToken(...)>, root_0)})>}, ...)>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({self._adaptor.addChild(root_0, <label>.tree)})>
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({root_0 = self._adaptor.becomeRoot(<label>.tree, root_0)})>
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+// WILDCARD AST
+
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.wildcard(...)>
+<finishedBacktracking({
+<label>_tree = self._adaptor.createWithPayload(<label>)
+self._adaptor.addChild(root_0, <label>_tree)
+})>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.wildcard(...)>
+<finishedBacktracking({
+<label>_tree = self._adaptor.createWithPayload(<label>)
+root_0 = self._adaptor.becomeRoot(<label>_tree, root_0)
+})>
+>>
+
+createNodeFromToken(label,terminalOptions={}) ::= <%
+<if(terminalOptions.node)>
+<terminalOptions.node>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+self._adaptor.createWithPayload(<label>)
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<finishedBacktracking({
+retval.tree = self._adaptor.rulePostProcessing(root_0)
+self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+})>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTTreeParser.stg
new file mode 100644
index 0000000..996341b
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python/ASTTreeParser.stg
@@ -0,0 +1,311 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+
+finishedBacktracking(block) ::= <<
+<if(backtracking)>
+if <actions.(actionScope).synpredgate>:
+    <block>
+<else>
+<block>
+<endif>
+>>
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+_first_0 = None
+_last = None<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<finishedBacktracking({
+<if(rewriteMode)>
+retval.tree = _first_0
+if self._adaptor.getParent(retval.tree) is not None and self._adaptor.isNil(self._adaptor.getParent(retval.tree)):
+    retval.tree = self._adaptor.getParent(retval.tree)
+<endif>
+})>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = self.input.LT(1)
+_save_last_<treeLevel> = _last
+_first_<treeLevel> = None
+<if(!rewriteMode)>
+root_<treeLevel> = self._adaptor.nil()<\n>
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<finishedBacktracking({
+<if(root.el.rule)>
+if _first_<enclosingTreeLevel> is None:
+    _first_<enclosingTreeLevel> = <root.el.label>.tree<\n>
+<else>
+if _first_<enclosingTreeLevel> is None:
+    _first_<enclosingTreeLevel> = <root.el.label><\n>
+<endif>
+})>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if self.input.LA(1) == DOWN:
+    self.match(self.input, DOWN, None)
+    <children:element()>
+    self.match(self.input, UP, None)
+
+<else>
+self.match(self.input, DOWN, None)
+<children:element()>
+self.match(self.input, UP, None)<\n>
+<endif>
+<if(!rewriteMode)>
+self._adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>)<\n>
+<endif>
+_last = _save_last_<treeLevel>
+
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(terminalOptions.node)>
+<label>_tree = <terminalOptions.node>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+self._adaptor.addChild(root_<treeLevel>, <label>_tree)
+})>
+<else> <! rewrite mode !>
+<finishedBacktracking({
+if _first_<treeLevel> is None:
+    _first_<treeLevel> = <label><\n>
+})>
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(terminalOptions.node)>
+<label>_tree = <terminalOptions.node>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+root_<treeLevel> = self._adaptor.becomeRoot(<label>_tree, root_<treeLevel>)
+})>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+<label>_tree = self._adaptor.dupTree(<label>)
+self._adaptor.addChild(root_<treeLevel>, <label>_tree)
+})>
+<else> <! rewrite mode !>
+<finishedBacktracking({
+if _first_<treeLevel> is None:
+    _first_<treeLevel> = <label>
+})>
+<endif>
+>>
+
+// SET AST
+matchSet(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(terminalOptions.node)>
+<label>_tree = <terminalOptions.node>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+self._adaptor.addChild(root_<treeLevel>, <label>_tree)
+})>
+<endif>
+}, ...)>
+>>
+
+matchRuleBlockSet(s,label,elementIndex,postmatchCode,treeLevel="0",terminalOptions={}) ::= <<
+<matchSet(...)>
+<noRewrite(...)> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,elementIndex,debug,terminalOptions={}) ::= <<
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(terminalOptions.node)>
+<label>_tree = <terminalOptions.node>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+root_<treeLevel> = self._adaptor.becomeRoot(<label>_tree, root_<treeLevel>)
+})>
+<endif>
+}, ...)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRef(...)>
+<finishedBacktracking({
+<if(!rewriteMode)>
+self._adaptor.addChild(root_<treeLevel>, <label>.tree)
+<else> <! rewrite mode !>
+if _first_<treeLevel> is None:
+    _first_<treeLevel> = <label>.tree<\n>
+<endif>
+})>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+root_<treeLevel> = self._adaptor.becomeRoot(<label>.tree, root_<treeLevel>)
+})>
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,scope,terminalOptions={}) ::= <<
+<if(terminalOptions.node)>
+<terminalOptions.node>(stream_<token>.nextNode())
+<else>
+stream_<token>.nextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<finishedBacktracking({
+retval.tree = self._adaptor.rulePostProcessing(root_0)
+})>
+<endif>
+>>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/Dbg.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/Dbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Python/Dbg.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python/Python.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/Python.stg
new file mode 100644
index 0000000..cffdf86
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python/Python.stg
@@ -0,0 +1,1481 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2006 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** The API version of the runtime that recognizers generated by this runtime
+ *  need.
+ */
+apiVersion() ::= "1"
+
+// System.Boolean.ToString() returns "True" and "False", but the proper C# literals are "true" and "false"
+// The Java version of Boolean returns "true" and "false", so they map to themselves here.
+booleanLiteral ::= [
+	       "True":"true",
+	       "False":"false",
+	       "true":"true",
+	       "false":"false",
+	       default:"false"
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+           bitsets, buildTemplate, buildAST, rewriteMode, profile,
+           backtracking, synpreds, memoize, numRules,
+           fileName, ANTLRVersion, generatedTimestamp, trace,
+           scopes, superClass, literals) ::=
+<<
+# $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<@imports>
+import sys
+from antlr3 import *
+<if(TREE_PARSER)>
+from antlr3.tree import *<\n>
+<endif>
+from antlr3.compat import set, frozenset
+<@end>
+
+<actions.(actionScope).header>
+
+<! <docComment> !>
+
+# for convenience in actions
+HIDDEN = BaseRecognizer.HIDDEN
+
+# token types
+<tokens:{it | <it.name>=<it.type>}; separator="\n">
+
+<recognizer>
+
+<if(actions.(actionScope).main)>
+<actions.(actionScope).main>
+<else>
+def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
+<if(LEXER)>
+    from antlr3.main import LexerMain
+    main = LexerMain(<recognizer.name>)<\n>
+<endif>
+<if(PARSER)>
+    from antlr3.main import ParserMain
+    main = ParserMain("<recognizer.grammar.name>Lexer", <recognizer.name>)<\n>
+<endif>
+<if(TREE_PARSER)>
+    from antlr3.main import WalkerMain
+    main = WalkerMain(<recognizer.name>)<\n>
+<endif>
+    main.stdin = stdin
+    main.stdout = stdout
+    main.stderr = stderr
+    main.execute(argv)<\n>
+<endif>
+
+<actions.(actionScope).footer>
+
+if __name__ == '__main__':
+    main(sys.argv)
+
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, filterMode,
+      labelType="CommonToken", superClass="Lexer") ::= <<
+<grammar.directDelegates:
+ {g|from <g.recognizerName> import <g.recognizerName>}; separator="\n">
+
+class <grammar.recognizerName>(<@superClassName><superClass><@end>):
+    <scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+
+    grammarFileName = "<fileName>"
+    api_version = <apiVersion()>
+
+    def __init__(self<grammar.delegators:{g|, <g:delegateName()>}>, input=None, state=None):
+        if state is None:
+            state = RecognizerSharedState()
+        super(<grammar.recognizerName>, self).__init__(input, state)
+
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        self._state.ruleMemo = {}
+<endif>
+<endif>
+
+        <grammar.directDelegates:
+         {g|self.<g:delegateName()> = <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>self, input, state)}; separator="\n">
+        <grammar.directDelegates:
+         {g|<g.delegates:{h|self.<h:delegateName()> = self.<g:delegateName()>.<h:delegateName()>}; separator="\n">}; separator="\n">
+        <grammar.delegators:
+         {g|self.<g:delegateName()> = <g:delegateName()>}; separator="\n">
+        <last(grammar.delegators):
+    	 {g|self.gParent = <g:delegateName()>}; separator="\n">
+        self.delegates = [<grammar.delegates: {g|self.<g:delegateName()>}; separator = ", ">]
+
+        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; separator="\n">
+
+        <actions.lexer.init>
+
+
+    <actions.lexer.members>
+
+
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+    <rules; separator="\n\n">
+
+    <synpreds:{p | <lexerSynpred(p)>}>
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+def nextToken(self):
+    while True:
+        if self.input.LA(1) == EOF:
+            return self.makeEOFToken()
+
+        self._state.token = None
+        self._state.channel = DEFAULT_CHANNEL
+        self._state.tokenStartCharIndex = self.input.index()
+        self._state.tokenStartCharPositionInLine = self.input.charPositionInLine
+        self._state.tokenStartLine = self.input.line
+        self._state._text = None
+        try:
+            m = self.input.mark()
+            try:
+                # means we won't throw slow exception
+                self._state.backtracking = 1
+                try:
+                    self.mTokens()
+                finally:
+                    self._state.backtracking = 0
+
+            except BacktrackingFailed:
+                # mTokens backtracks with synpred at backtracking==2
+                # and we set the synpredgate to allow actions at level 1.
+                self.input.rewind(m)
+                self.input.consume() # advance one char and try again
+
+            else:
+                self.emit()
+                return self._state.token
+
+        except RecognitionException, re:
+            # shouldn't happen in backtracking mode, but...
+            self.reportError(re)
+            self.recover(re)
+
+
+def memoize(self, input, ruleIndex, ruleStartIndex, success):
+    if self._state.backtracking > 1:
+        # is Lexer always superclass?
+        super(<grammar.recognizerName>, self).memoize(input, ruleIndex, ruleStartIndex, success)
+
+
+def alreadyParsedRule(self, input, ruleIndex):
+    if self._state.backtracking > 1:
+        return super(<grammar.recognizerName>, self).alreadyParsedRule(input, ruleIndex)
+    return False
+
+
+>>
+
+actionGate() ::= "self._state.backtracking == 0"
+
+filteringActionGate() ::= "self._state.backtracking == 1"
+
+/** How to generate a parser */
+
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass, labelType, members,
+	      rewriteElementType, filterMode, init, ASTLabelType="Object") ::= <<
+<if(grammar.grammarIsRoot)>
+# token names
+tokenNames = [
+    "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>",
+    <tokenNames; wrap, separator=", ">
+]<\n>
+<else>
+from <grammar.composite.rootGrammar.recognizerName> import tokenNames<\n>
+<endif>
+<scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScopeClass(scope=it)><endif>}>
+
+<grammar.directDelegates:
+ {g|from <g.recognizerName> import <g.recognizerName>}; separator="\n">
+
+<rules:{it|<ruleAttributeScopeClass(scope=it.ruleDescriptor.ruleScope)>}>
+
+class <grammar.recognizerName>(<@superClassName><superClass><@end>):
+    grammarFileName = "<fileName>"
+    api_version = <apiVersion()>
+    tokenNames = tokenNames
+
+    def __init__(self<grammar.delegators:{g|, <g:delegateName()>}>, input, state=None, *args, **kwargs):
+        if state is None:
+            state = RecognizerSharedState()
+
+        <@args()>
+        super(<grammar.recognizerName>, self).__init__(input, state, *args, **kwargs)
+
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        self._state.ruleMemo = {}
+<endif>
+<endif>
+
+        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; separator="\n">
+
+        <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeStack(scope=it)><endif>}>
+	<rules:{it | <ruleAttributeScopeStack(scope=it.ruleDescriptor.ruleScope)>}>
+
+        <init>
+
+        <grammar.delegators:
+         {g|self.<g:delegateName()> = <g:delegateName()>}; separator="\n">
+        <grammar.directDelegates:
+         {g|self.<g:delegateName()> = <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>self, input, state)}; separator="\n">
+        <grammar.directDelegates:
+         {g|<g.delegates:{h|self.<h:delegateName()> = self.<g:delegateName()>.<h:delegateName()>}; separator="\n">}; separator="\n">
+        <last(grammar.delegators):
+    	 {g|self.gParent = self.<g:delegateName()>}; separator="\n">
+        self.delegates = [<grammar.delegates: {g|self.<g:delegateName()>}; separator = ", ">]
+
+	<@init><@end>
+
+
+    <@members><@end>
+
+    <members>
+
+    <rules; separator="\n\n">
+
+    <! generate rule/method definitions for imported rules so they
+       appear to be defined in this recognizer. !>
+    <grammar.delegatedRules:{ruleDescriptor| <delegateRule(ruleDescriptor)> }; separator="\n">
+
+    <synpreds:{p | <synpred(p)>}>
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+    <bitsets:{it | FOLLOW_<it.name>_in_<it.inName><it.tokenIndex> = frozenset([<it.tokenTypes:{it | <it>};separator=", ">])<\n>}>
+
+>>
+
+delegateRule(ruleDescriptor) ::= <<
+def <ruleDescriptor.name>(self, <ruleDescriptor.parameterScope:parameterScope()>):
+<\ >   <if(ruleDescriptor.hasReturnValue)>return <endif>self.<ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<if(ruleDescriptor.parameterScope)><ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", "><endif>)
+
+
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
+       ASTLabelType="Object", superClass="Parser", labelType="Token",
+       members={<actions.parser.members>},
+       init={<actions.parser.init>}
+       ) ::= <<
+<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, "TokenStream", superClass,
+              labelType, members, "Token",
+              false, init, ASTLabelType)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
+           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="Object",
+           superClass={<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif>},
+           members={<actions.treeparser.members>},
+	   init={<actions.treeparser.init>}
+           ) ::= <<
+<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, "TreeNodeStream", superClass,
+              labelType, members, "Node",
+              filterMode, init, ASTLabelType)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+# $ANTLR start "<ruleName>"
+def <ruleName>_fragment(self, <ruleDescriptor.parameterScope:parameterScope()>):
+    <ruleLabelDefs()>
+<if(trace)>
+    self.traceIn("<ruleName>_fragment", <ruleDescriptor.index>)
+    try:
+        <block>
+
+    finally:
+        self.traceOut("<ruleName>_fragment", <ruleDescriptor.index>)
+
+<else>
+    <block>
+<endif>
+# $ANTLR end "<ruleName>"
+
+
+>>
+
+synpred(name) ::= <<
+def <name>(self):
+    self._state.backtracking += 1
+    <@start()>
+    start = self.input.mark()
+    try:
+        self.<name>_fragment()
+    except BacktrackingFailed:
+        success = False
+    else:
+        success = True
+    self.input.rewind(start)
+    <@stop()>
+    self._state.backtracking -= 1
+    return success
+
+
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if self._state.backtracking > 0 and self.alreadyParsedRule(self.input, <ruleDescriptor.index>):
+    # for cached failed rules, alreadyParsedRule will raise an exception
+    success = True
+    return <ruleReturnValue()>
+
+<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>
+if self._state.backtracking > 0:
+    raise BacktrackingFailed
+
+<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+# $ANTLR start "<ruleName>"
+# <fileName>:<description>
+<ruleDescriptor.actions.decorate>
+def <ruleName>(self, <ruleDescriptor.parameterScope:parameterScope()>):
+<if(trace)>
+    self.traceIn("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    <@body><ruleBody()><@end>
+    <@postamble()>
+    return <ruleReturnValue()>
+
+# $ANTLR end "<ruleName>"
+>>
+
+ruleBody() ::= <<
+<if(memoize)>
+<if(backtracking)>
+success = False<\n>
+<endif>
+<endif>
+try:
+    try:
+        <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+
+<if(memoize)>
+<if(backtracking)>
+        success = True<\n>
+<endif>
+<endif>
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    except RecognitionException, re:
+        self.reportError(re)
+        self.recover(self.input, re)
+        <@setErrorReturnValue()>
+
+<endif>
+<else>
+    finally:
+        pass
+
+<endif>
+<endif>
+finally:
+<if(trace)>
+    self.traceOut("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+    <memoize()>
+    <ruleScopeCleanUp()>
+    <finally>
+    pass
+>>
+
+catch(decl,action) ::= <<
+except <e.decl>:
+    <e.action>
+
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval = self.<ruleDescriptor.name>_return()
+retval.start = self.input.LT(1)<\n>
+<elseif(ruleDescriptor.returnScope)>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.name> = <if(a.initValue)><a.initValue><else>None<endif>
+}>
+<endif>
+<if(memoize)>
+<ruleDescriptor.name>_StartIndex = self.input.index()
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{it | self.<it>_stack.append(<it>_scope())}; separator="\n">
+<ruleDescriptor.ruleScope:{it | self.<it.name>_stack.append(<it.name>_scope())}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{it | self.<it>_stack.pop()}; separator="\n">
+<ruleDescriptor.ruleScope:{it | self.<it.name>_stack.pop()}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it | <it.label.text> = None}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,
+  ruleDescriptor.wildcardTreeListLabels]
+    :{it | list_<it.label.text> = None}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
+<ruleDescriptor.ruleListLabels:{it | <it.label.text> = None}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{it | <it.label.text> = None}; separator="\n"
+>
+<ruleDescriptor.charLabels:{it | <it.label.text> = None}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{it | list_<it.label.text> = None}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <%
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.stop = self.input.LT(-1)<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if self._state.backtracking > 0:
+    self.memoize(self.input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex, success)
+
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+# $ANTLR start "<ruleName>"
+def m<ruleName>(self, <ruleDescriptor.parameterScope:parameterScope()>):
+<if(trace)>
+    self.traceIn("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+<if(memoize)>
+<if(backtracking)>
+    success = False<\n>
+<endif>
+<endif>
+    try:
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        _type = <ruleName>
+        _channel = DEFAULT_CHANNEL
+
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        self._state.type = _type
+        self._state.channel = _channel
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+<if(memoize)>
+<if(backtracking)>
+        success = True<\n>
+<endif>
+<endif>
+
+    finally:
+<if(trace)>
+        self.traceOut("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+	<ruleScopeCleanUp()>
+        <memoize()>
+        pass
+
+# $ANTLR end "<ruleName>"
+
+
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+def mTokens(self):
+    <block><\n>
+
+
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+alt<decisionNumber> = <maxAlt>
+<decls>
+<@body><blockBody()><@end>
+>>
+
+blockBody() ::= <<
+<@predecision()>
+<@decision><decision><@end>
+<@postdecision()>
+<@prebranch()>
+<alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+alt<decisionNumber> = <maxAlt>
+<decls>
+<@predecision()>
+<@decision><decision><@end>
+<@postdecision()>
+<alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+cnt<decisionNumber> = 0
+<decls>
+<@preloop()>
+<@loopBody>
+<positiveClosureBlockLoop()>
+<@end>
+<@postloop()>
+>>
+
+positiveClosureBlockLoop() ::= <<
+while True: #loop<decisionNumber>
+    alt<decisionNumber> = <maxAlt>
+    <@predecision()>
+    <@decisionBody><decision><@end>
+    <@postdecision()>
+    <alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
+    else:
+        if cnt<decisionNumber> >= 1:
+            break #loop<decisionNumber>
+
+        <ruleBacktrackFailure()>
+        eee = EarlyExitException(<decisionNumber>, self.input)
+        <@earlyExitException()>
+        raise eee
+
+    cnt<decisionNumber> += 1
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@preloop()>
+<@loopBody>
+<closureBlockLoop()>
+<@end>
+<@postloop()>
+>>
+
+closureBlockLoop() ::= <<
+while True: #loop<decisionNumber>
+    alt<decisionNumber> = <maxAlt>
+    <@predecision()>
+    <@decisionBody><decision><@end>
+    <@postdecision()>
+    <alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
+    else:
+        break #loop<decisionNumber>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase(altNum,alt) ::= <<
+if alt<decisionNumber> == <altNum>:
+    <@prealt()>
+    <alt>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt, treeLevel,rew) ::= <<
+# <fileName>:<description>
+pass <! so empty alternatives are a valid block !>
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element(e) ::= <<
+<@prematch()>
+<e.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(label)><label> = <endif>self.match(self.input, <token>, self.FOLLOW_<token>_in_<ruleName><elementIndex>)
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(token,label,elementIndex,terminalOptions)>
+<listLabel(label, label)>
+>>
+
+listLabel(label, elem) ::= <<
+if list_<label> is None:
+    list_<label> = []
+list_<label>.append(<elem>)<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = self.input.LA(1)<\n>
+<endif>
+self.match(<char>)
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = self.input.LA(1)<\n>
+<endif>
+self.matchRange(<a>, <b>)
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="",terminalOptions={}) ::= <<
+<if(label)>
+<label> = self.input.LT(1)<\n>
+<endif>
+if <s>:
+    self.input.consume()
+    <postmatchCode>
+<if(!LEXER)>
+    self._state.errorRecovery = False<\n>
+<endif>
+
+else:
+    <ruleBacktrackFailure()>
+    mse = MismatchedSetException(None, self.input)
+    <@mismatchedSetException()>
+<if(LEXER)>
+    self.recover(mse)
+    raise mse
+<else>
+    raise mse
+    <! use following code to make it recover inline; remove throw mse;
+    self.recoverFromMismatchedSet(
+        self.input, mse, self.FOLLOW_set_in_<ruleName><elementIndex>
+        )
+    !>
+<endif>
+<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(label, label)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex="0") ::= <<
+<if(label)>
+<label>Start = self.getCharIndex()
+self.match(<string>)
+<label>StartLine<elementIndex> = self.getLine()
+<label>StartCharPos<elementIndex> = self.getCharPositionInLine()
+<label> = <labelType>(input=self.input, type=INVALID_TOKEN_TYPE, channel=DEFAULT_CHANNEL, start=<label>Start, stop=self.getCharIndex()-1)
+<label>.setLine(<label>StartLine<elementIndex>)
+<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
+<else>
+self.match(<string>)
+<endif>
+>>
+
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(label)>
+<label> = self.input.LT(1)<\n>
+<endif>
+self.matchAny(self.input)
+>>
+
+wildcardAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<wildcard(...)>
+<listLabel(label,label)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = self.input.LA(1)<\n>
+<endif>
+self.matchAny()
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(label, elementIndex)>
+<listLabel(label, label)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values. The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+self._state.following.append(self.FOLLOW_<rule.name>_in_<ruleName><elementIndex>)
+<if(label)><label> = <endif>self.<if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">)<\n>
+self._state.following.pop()
+>>
+
+/** ids+=rule */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(rule,label,elementIndex,args,scope)>
+<listLabel(label, label)>
+>>
+
+/** A lexer rule reference
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+<label>Start<elementIndex> = self.getCharIndex()
+self.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
+<label>StartLine<elementIndex> = self.getLine()
+<label>StartCharPos<elementIndex> = self.getCharPositionInLine()
+<label> = <labelType>(
+    input=self.input,
+    type=INVALID_TOKEN_TYPE,
+    channel=DEFAULT_CHANNEL,
+    start=<label>Start<elementIndex>,
+    stop=self.getCharIndex()-1)
+<label>.setLine(<label>StartLine<elementIndex>)
+<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
+<else>
+self.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(rule,label,args,elementIndex,scope)>
+<listLabel(label, label)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+<label>Start<elementIndex> = self.getCharIndex()
+<label>StartLine<elementIndex> = self.getLine()
+<label>StartCharPos<elementIndex> = self.getCharPositionInLine()
+self.match(EOF)
+<label> = <labelType>(input=self.input, type=EOF, channel=DEFAULT_CHANNEL, start=<label>Start<elementIndex>, stop=self.getCharIndex()-1)
+<label>.setLine(<label>StartLine<elementIndex>)
+<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
+<else>
+self.match(EOF)
+<endif>
+>>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "<recRuleArg()>"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName, opPrec) ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0 = $<ruleName>_primary.tree"
+recRuleSetReturnAction(src, name)     ::= "$<name> = $<src>.<name>"
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if self.input.LA(1) == DOWN:
+    self.match(self.input, DOWN, None)
+    <children:element()>
+    self.match(self.input, UP, None)
+
+<else>
+self.match(self.input, DOWN, None)
+<children:element()>
+self.match(self.input, UP, None)
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if not (<evalPredicate(pred, description)>):
+    <ruleBacktrackFailure()>
+    raise FailedPredicateException(self.input, "<ruleName>", "<description>")
+
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
+<edges; separator="\nel">
+else:
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt>
+<else>
+    <ruleBacktrackFailure()>
+    nvae = NoViableAltException("<description>", <decisionNumber>, <stateNumber>, self.input)<\n>
+    <@noViableAltException()>
+    raise nvae<\n>
+<endif>
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
+<edges; separator="\nel">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
+<edges; separator="\nel"><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber> = <eotPredictsAlt> <! if no edges, don't gen ELSE !>
+<else>
+else:
+    alt<decisionNumber> = <eotPredictsAlt>
+<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if (<labelExpr>) <if(predicates)>and (<predicates>)<endif>:
+    <targetState>
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+<!
+  FIXME: this is one of the few occasion, where I miss a switch statement
+  in Python. ATM this is implemented as a list of if .. elif ..
+  This may be replaced by faster a dictionary lookup, when I find a solution
+  for the cases when an edge is not a plain dfaAcceptState.
+!>
+LA<decisionNumber> = self.input.LA(<k>)
+<edges; separator="\nel">
+else:
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt>
+<else>
+    <ruleBacktrackFailure()>
+    nvae = NoViableAltException("<description>", <decisionNumber>, <stateNumber>, self.input)<\n>
+    <@noViableAltException()>
+    raise nvae<\n>
+<endif>
+
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber> = self.input.LA(<k>)
+<edges; separator="\nel">
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber> = self.input.LA(<k>)
+<edges; separator="\nel">
+<if(eotPredictsAlt)>
+else:
+    alt<decisionNumber> = <eotPredictsAlt>
+<endif>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+if <labels:{it | LA<decisionNumber> == <it>}; separator=" or ">:
+    <targetState>
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = self.dfa<decisionNumber>.predict(self.input)
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+# lookup tables for DFA #<dfa.decisionNumber>
+
+DFA<dfa.decisionNumber>_eot = DFA.unpack(
+    u"<dfa.javaCompressedEOT; wrap="\"\n    u\"">"
+    )
+
+DFA<dfa.decisionNumber>_eof = DFA.unpack(
+    u"<dfa.javaCompressedEOF; wrap="\"\n    u\"">"
+    )
+
+DFA<dfa.decisionNumber>_min = DFA.unpack(
+    u"<dfa.javaCompressedMin; wrap="\"\n    u\"">"
+    )
+
+DFA<dfa.decisionNumber>_max = DFA.unpack(
+    u"<dfa.javaCompressedMax; wrap="\"\n    u\"">"
+    )
+
+DFA<dfa.decisionNumber>_accept = DFA.unpack(
+    u"<dfa.javaCompressedAccept; wrap="\"\n    u\"">"
+    )
+
+DFA<dfa.decisionNumber>_special = DFA.unpack(
+    u"<dfa.javaCompressedSpecial; wrap="\"\n    u\"">"
+    )
+
+
+DFA<dfa.decisionNumber>_transition = [
+    <dfa.javaCompressedTransition:{s|DFA.unpack(u"<s; wrap="\"\nu\"">")}; separator=",\n">
+]
+
+# class definition for DFA #<dfa.decisionNumber>
+
+class DFA<dfa.decisionNumber>(DFA):
+    pass
+
+    <@errorMethod()>
+
+<if(dfa.specialStateSTs)>
+    def specialStateTransition(self_, s, input):
+        # convince pylint that my self_ magic is ok ;)
+        # pylint: disable-msg=E0213
+
+        # pretend we are a member of the recognizer
+        # thus semantic predicates can be evaluated
+        self = self_.recognizer
+
+        _s = s
+
+        <dfa.specialStateSTs:{state | if s == <i0>: <! compressed special state numbers 0..n-1 !>
+    <state>}; separator="\nel">
+
+<if(backtracking)>
+        if self._state.backtracking > 0:
+            raise BacktrackingFailed
+
+<endif>
+        nvae = NoViableAltException(self_.getDescription(), <dfa.decisionNumber>, _s, input)
+        self_.error(nvae)
+        raise nvae<\n>
+<endif>
+
+>>
+
+cyclicDFAInit(dfa) ::= <<
+self.dfa<dfa.decisionNumber> = self.DFA<dfa.decisionNumber>(
+    self, <dfa.decisionNumber>,
+    eot = self.DFA<dfa.decisionNumber>_eot,
+    eof = self.DFA<dfa.decisionNumber>_eof,
+    min = self.DFA<dfa.decisionNumber>_min,
+    max = self.DFA<dfa.decisionNumber>_max,
+    accept = self.DFA<dfa.decisionNumber>_accept,
+    special = self.DFA<dfa.decisionNumber>_special,
+    transition = self.DFA<dfa.decisionNumber>_transition
+    )<\n>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = input.LA(1)<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+index<decisionNumber>_<stateNumber> = input.index()
+input.rewind()<\n>
+<endif>
+s = -1
+<edges; separator="\nel">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.seek(index<decisionNumber>_<stateNumber>)<\n>
+<endif>
+if s >= 0:
+    return s
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if (<labelExpr>)<if(predicates)> and (<predicates>)<endif>:
+    s = <targetStateNumber><\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+se:
+    s = <targetStateNumber><\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "((<left>) and (<right>))"
+
+orPredicates(operands) ::= "(<operands; separator=\" or \">)"
+
+notPredicate(pred) ::= "not (<evalPredicate(pred, {})>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "self.<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "self.input.LA(<k>) == <atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
+(<lower> \<= LA<decisionNumber>_<stateNumber> \<= <upper>)
+%>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(<lower> \<= self.input.LA(<k>) \<= <upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\" or \">"
+
+// A T T R I B U T E S
+
+globalAttributeScopeClass(scope) ::= <<
+<if(scope)>
+<if(scope.attributes)>
+class <scope.name>_scope(object):
+    def __init__(self):
+        <scope.attributes:{it | self.<it.decl> = None}; separator="\n">
+
+<endif>
+<endif>
+>>
+
+globalAttributeScopeStack(scope) ::= <<
+<if(scope)>
+<if(scope.attributes)>
+self.<scope.name>_stack = []<\n>
+<endif>
+<endif>
+>>
+
+ruleAttributeScopeClass(scope) ::= <<
+<if(scope)>
+<if(scope.attributes)>
+class <scope.name>_scope(object):
+    def __init__(self):
+        <scope.attributes:{it | self.<it.decl> = None}; separator="\n">
+
+<endif>
+<endif>
+>>
+
+ruleAttributeScopeStack(scope) ::= <<
+<if(scope)>
+<if(scope.attributes)>
+self.<scope.name>_stack = []<\n>
+<endif>
+<endif>
+>>
+
+delegateName(d) ::= <<
+<if(d.label)><d.label><else>g<d.name><endif>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<label.label.text> = None
+>>
+
+returnStructName(r) ::= "<r.name>_return"
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+class <ruleDescriptor:returnStructName()>(<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope):
+    def __init__(self):
+        super(<grammar.recognizerName>.<ruleDescriptor:returnStructName()>, self).__init__()
+
+        <if(scope)><scope.attributes:{it | self.<it.decl> = None}; separator="\n"><endif>
+        <@ruleReturnInit()>
+
+
+    <@ruleReturnMembers()>
+
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<if(scope)><scope.attributes:{it | <it.decl>}; separator=", "><endif>
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <%
+<if(negIndex)>
+self.<scope>_stack[-<negIndex>].<attr.name>
+<else>
+<if(index)>
+self.<scope>_stack[<index>].<attr.name>
+<else>
+self.<scope>_stack[-1].<attr.name>
+<endif>
+<endif>
+%>
+
+/* not applying patch because of bug in action parser!
+
+<if(negIndex)>
+((len(self.<scope>_stack) - <negIndex> - 1) >= 0 and [self.<scope>_stack[-<negIndex>].<attr.name>] or [None])[0]
+<else>
+<if(index)>
+((<index> \< len(self.<scope>_stack)) and [self.<scope>_stack[<index>].<attr.name>] or [None])[0]
+<else>
+((len(self.<scope>_stack) > 0) and [self.<scope>_stack[-1].<attr.name>] or [None])[0]
+<endif>
+<endif>
+
+*/
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
+<if(negIndex)>
+<!FIXME: this seems not to be used by ActionTranslator...!>
+self.<scope>_stack[-<negIndex>].<attr.name> = <expr>
+<else>
+<if(index)>
+<!FIXME: this seems not to be used by ActionTranslator...!>
+self.<scope>_stack[<index>].<attr.name> = <expr>
+<else>
+self.<scope>_stack[-1].<attr.name> = <expr>
+<endif>
+<endif>
+%>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "self.<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+((<scope> is not None) and [<scope>.<attr.name>] or [None])[0]
+<else>
+<scope>
+<endif>
+%>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+%>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> = <expr>
+<else>
+<attr.name> = <expr>
+<endif>
+%>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach; and they are evaluated early;
+// they cannot see TREE_PARSER or PARSER attributes for example. :(
+
+tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.text"
+tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.type"
+tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.line"
+tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.charPositionInLine"
+tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.channel"
+tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.index"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "<scope>.start"
+ruleLabelPropertyRef_stop(scope,attr) ::= "<scope>.stop"
+ruleLabelPropertyRef_tree(scope,attr) ::= "<scope>.tree"
+ruleLabelPropertyRef_text(scope,attr) ::= <%
+<if(TREE_PARSER)>
+((<scope> is not None) and [self.input.getTokenStream().toString(
+    self.input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
+    self.input.getTreeAdaptor().getTokenStopIndex(<scope>.start)
+    )] or [None])[0]
+<else>
+((<scope> is not None) and [self.input.toString(<scope>.start,<scope>.stop)] or [None])[0]
+<endif>
+%>
+ruleLabelPropertyRef_st(scope,attr) ::= "((<scope> is not None) and [<scope>.st] or [None])[0]"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "((<scope> is not None) and [<scope>.type] or [0])[0]"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "((<scope> is not None) and [<scope>.line] or [0])[0]"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "((<scope> is not None) and [<scope>.charPositionInLine] or [0])[0]"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "((<scope> is not None) and [<scope>.channel] or [0])[0]"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "((<scope> is not None) and [<scope>.index] or [0])[0]"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "((<scope> is not None) and [<scope>.text] or [None])[0]"
+lexerRuleLabelPropertyRef_int(scope,attr) ::= "((<scope> is not None) and [int(<scope>.text)] or [0])[0]"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "retval.start"
+rulePropertyRef_stop(scope,attr) ::= "retval.stop" //mmm... or input.LT(-1)??
+rulePropertyRef_tree(scope,attr) ::= "retval.tree"
+rulePropertyRef_text(scope,attr) ::= "self.input.toString(retval.start, self.input.LT(-1))"
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "self.text"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "self._state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "self._state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "self._state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(self.getCharIndex()-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "int(<scope>.text)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>"
+
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if <actions.(actionScope).synpredgate>:
+    pass
+    <action>
+
+<else>
+if <actions.(actionScope).synpredgate>:
+    pass
+    <action>
+
+<endif>
+<else>
+#action start
+<action>
+#action end
+<endif>
+>>
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+
+// M I S C (properties, etc...)
+
+codeFileExtension() ::= ".py"
+
+true_value() ::= "True"
+false_value() ::= "False"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python/ST.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Python/ST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Python/ST.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python3/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python3/AST.stg
new file mode 100644
index 0000000..6d4b68e
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python3/AST.stg
@@ -0,0 +1,452 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2012 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+finishedBacktracking(block) ::= <<
+<if(backtracking)>
+if <actions.(actionScope).synpredgate>:
+    <block>
+<else>
+<block>
+<endif>
+>>
+
+@outputFile.imports() ::= <<
+<@super.imports()>
+<if(!TREE_PARSER)><! tree parser would already have imported !>
+from antlr3.tree import *<\n>
+<endif>
+>>
+
+/** Add an adaptor property that knows how to build trees */
+@genericParser.init() ::= <<
+self._adaptor = None
+self.adaptor = CommonTreeAdaptor()
+<@super.init()>
+>>
+
+@genericParser.members() ::= <<
+<@super.members()>
+<astAccessor()>
+>>
+
+astAccessor() ::= <<
+def getTreeAdaptor(self):
+    return self._adaptor
+
+def setTreeAdaptor(self, adaptor):
+    self._adaptor = adaptor
+    <grammar.directDelegates:{g|self.<g:delegateName()>.adaptor = adaptor}; separator="\n">
+
+adaptor = property(getTreeAdaptor, setTreeAdaptor)
+>>
+
+@returnScope.ruleReturnInit() ::= <<
+self.tree = None
+>>
+
+
+/** Add a variable to track rule's return AST */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+root_0 = None<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<super.ruleLabelDefs()>
+<[ruleDescriptor.tokenLabels,ruleDescriptor.wildcardTreeLabels,
+  ruleDescriptor.wildcardTreeListLabels]
+    :{it | <it.label.text>_tree = None}; separator="\n">
+<ruleDescriptor.tokenListLabels:{it | <it.label.text>_tree = None}; separator="\n">
+<ruleDescriptor.allTokenRefsInAltsWithRewrites
+    :{it | stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>")}; separator="\n">
+<ruleDescriptor.allRuleRefsInAltsWithRewrites
+    :{it | stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "rule <it>")}; separator="\n">
+>>
+
+/** When doing auto AST construction, we must define some variables;
+ *  These should be turned off if doing rewrites.  This must be a "mode"
+ *  as a rule could have both rewrite and AST within the same alternative
+ *  block.
+ */
+@alt.declarations() ::= <<
+<if(autoAST)>
+<if(outerAlt)>
+<if(!rewriteMode)>
+root_0 = self._adaptor.nil()<\n>
+<endif>
+<endif>
+<endif>
+>>
+
+
+// T r a c k i n g  R u l e  E l e m e n t s
+
+/** ID and track it for use in a rewrite rule */
+tokenRefTrack(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)> <! Track implies no auto AST construction!>
+<finishedBacktracking({stream_<token>.add(<label>)})>
+>>
+
+/** ids+=ID and track it for use in a rewrite rule; adds to ids *and*
+ *  to the tracking list stream_ID for use in the rewrite.
+ */
+tokenRefTrackAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) track for rewrite */
+tokenRefRuleRootTrack(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)>
+<finishedBacktracking({stream_<token>.add(<label>)})>
+>>
+
+/** Match ^(label+=TOKEN ...) track for rewrite */
+tokenRefRuleRootTrackAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRootTrack(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({stream_<rule.name>.add(<label>.tree)})>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefTrack(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<finishedBacktracking({stream_<rule.name>.add(<label>.tree)})>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRootTrack(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+// R e w r i t e
+
+rewriteCode(
+        alts, description,
+        referencedElementsDeep, // ALL referenced elements to right of ->
+        referencedTokenLabels,
+        referencedTokenListLabels,
+        referencedRuleLabels,
+        referencedRuleListLabels,
+        referencedWildcardLabels,
+        referencedWildcardListLabels,
+        rewriteBlockLevel, enclosingTreeLevel, treeLevel) ::=
+<<
+# AST Rewrite
+# elements: <referencedElementsDeep; separator=", ">
+# token labels: <referencedTokenLabels; separator=", ">
+# rule labels: <referencedRuleLabels; separator=", ">
+# token list labels: <referencedTokenListLabels; separator=", ">
+# rule list labels: <referencedRuleListLabels; separator=", ">
+# wildcard labels: <[referencedWildcardLabels,referencedWildcardListLabels]; separator=", ">
+<finishedBacktracking({
+<prevRuleRootRef()>.tree = root_0
+<rewriteCodeLabels()>
+root_0 = self._adaptor.nil()
+<first(alts):rewriteAltFirst(); anchor>
+
+<rest(alts):{a | el<rewriteAltRest(a)>}; anchor, separator="\n\n">
+
+<! if tree parser and rewrite=true !>
+<if(TREE_PARSER)>
+<if(rewriteMode)>
+<prevRuleRootRef()>.tree = self._adaptor.rulePostProcessing(root_0)
+self.input.replaceChildren(
+    self._adaptor.getParent(retval.start),
+    self._adaptor.getChildIndex(retval.start),
+    self._adaptor.getChildIndex(_last),
+    retval.tree
+    )<\n>
+<endif>
+<endif>
+
+<! if parser or tree-parser and rewrite!=true, we need to set result !>
+<if(!TREE_PARSER)>
+<prevRuleRootRef()>.tree = root_0<\n>
+<else>
+<if(!rewriteMode)>
+<prevRuleRootRef()>.tree = root_0<\n>
+<endif>
+<endif>
+})>
+>>
+
+rewriteCodeLabels() ::= <<
+<referencedTokenLabels
+    :{it | stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>", <it>)};
+    separator="\n"
+>
+<referencedTokenListLabels
+    :{it | stream_<it> = RewriteRule<rewriteElementType>Stream(self._adaptor, "token <it>", list_<it>)};
+    separator="\n"
+>
+<referencedWildcardLabels
+    :{it | stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "wildcard <it>", <it>)};
+    separator="\n"
+>
+<referencedWildcardListLabels
+    :{it | stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "wildcard <it>", list_<it>)};
+    separator="\n"
+>
+<referencedRuleLabels
+    :{it |
+if <it> is not None:
+    stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "rule <it>", <it>.tree)
+else:
+    stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "token <it>", None)
+};
+    separator="\n"
+>
+<referencedRuleListLabels
+    :{it| stream_<it> = RewriteRuleSubtreeStream(self._adaptor, "token <it>", list_<it>)};
+    separator="\n"
+>
+>>
+
+/** Generate code for an optional rewrite block; note it uses the deep ref'd element
+  *  list rather shallow like other blocks.
+  */
+rewriteOptionalBlock(
+        alt,rewriteBlockLevel,
+        referencedElementsDeep, // all nested refs
+        referencedElements, // elements in immediately block; no nested blocks
+        description) ::=
+<<
+# <fileName>:<description>
+if <referencedElementsDeep:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElementsDeep:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewriteClosureBlock(
+        alt,rewriteBlockLevel,
+        referencedElementsDeep, // all nested refs
+        referencedElements, // elements in immediately block; no nested blocks
+        description) ::=
+<<
+# <fileName>:<description>
+while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElements:{el | stream_<el>.reset();<\n>}>
+>>
+
+rewritePositiveClosureBlock(
+        alt,rewriteBlockLevel,
+        referencedElementsDeep, // all nested refs
+        referencedElements, // elements in immediately block; no nested blocks
+        description) ::=
+<<
+# <fileName>:<description>
+if not (<referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">):
+    raise RewriteEarlyExitException()
+
+while <referencedElements:{el | stream_<el>.hasNext()}; separator=" or ">:
+    <alt>
+
+<referencedElements:{el | stream_<el>.reset()<\n>}>
+>>
+
+rewriteAltRest(a) ::= <<
+<if(a.pred)>if <a.pred>:
+    # <a.description>
+    <a.alt>
+<else>se: <! little hack to get if .. elif .. else block right !>
+    # <a.description>
+    <a.alt>
+<endif>
+>>
+
+rewriteAltFirst(a) ::= <<
+<if(a.pred)>
+if <a.pred>:
+    # <a.description>
+    <a.alt>
+<else>
+# <a.description>
+<a.alt>
+<endif>
+>>
+
+/** For empty rewrites: "r : ... -> ;" */
+rewriteEmptyAlt() ::= "root_0 = None"
+
+rewriteTree(root,children,description,enclosingTreeLevel,treeLevel) ::= <<
+# <fileName>:<description>
+root_<treeLevel> = self._adaptor.nil()
+<root:rewriteElement()>
+<children:rewriteElement()>
+self._adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>)<\n>
+>>
+
+rewriteElementList(elements) ::= "<elements:rewriteElement()>"
+
+rewriteElement(e) ::= <<
+<@pregen><@end>
+<e.el>
+>>
+
+/** Gen ID or ID[args] */
+rewriteTokenRef(token,elementIndex,args,terminalOptions={}) ::= <<
+self._adaptor.addChild(root_<treeLevel>, <createRewriteNodeFromElement(...)>)<\n>
+>>
+
+/** Gen $label ... where defined via label=ID */
+rewriteTokenLabelRef(label,elementIndex) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode())<\n>
+>>
+
+/** Gen $label ... where defined via label+=ID */
+rewriteTokenListLabelRef(label,elementIndex) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextNode())<\n>
+>>
+
+/** Gen ^($label ...) */
+rewriteTokenLabelRefRoot(label,elementIndex) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+/** Gen ^($label ...) where label+=... */
+rewriteTokenListLabelRefRoot ::= rewriteTokenLabelRefRoot
+
+/** Gen ^(ID ...) or ^(ID[args] ...) */
+rewriteTokenRefRoot(token,elementIndex,args,terminalOptions={}) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(<createRewriteNodeFromElement(...)>, root_<treeLevel>)<\n>
+>>
+
+rewriteImaginaryTokenRef(args,token,elementIndex,terminalOptions={}) ::= <<
+self._adaptor.addChild(root_<treeLevel>, <createImaginaryNode(token, args, terminalOptions)>)<\n>
+>>
+
+rewriteImaginaryTokenRefRoot(args,token,elementIndex,terminalOptions={}) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(<createImaginaryNode(token, args, terminalOptions)>, root_<treeLevel>)<\n>
+>>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+<!FIXME(96,untested)!>
+root_0 = <action><\n>
+>>
+
+/** What is the name of the previous value of this rule's root tree?  This
+ *  let's us refer to $rule to mean previous value.  I am reusing the
+ *  variable 'tree' sitting in retval struct to hold the value of root_0 right
+ *  before I set it during rewrites.  The assign will be to retval.tree.
+ */
+prevRuleRootRef() ::= "retval"
+
+rewriteRuleRef(rule) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<rule>.nextTree())<\n>
+>>
+
+rewriteRuleRefRoot(rule) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<rule>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+rewriteNodeAction(action) ::= <<
+self._adaptor.addChild(root_<treeLevel>, <action>)<\n>
+>>
+
+rewriteNodeActionRoot(action) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(<action>, root_<treeLevel>)<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel=rule */
+rewriteRuleLabelRef(label) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
+>>
+
+/** Gen $ruleLabel ... where defined via ruleLabel+=rule */
+rewriteRuleListLabelRef(label) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel=rule */
+rewriteRuleLabelRefRoot(label) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+/** Gen ^($ruleLabel ...) where ruleLabel+=rule */
+rewriteRuleListLabelRefRoot(label) ::= <<
+root_<treeLevel> = self._adaptor.becomeRoot(stream_<label>.nextNode(), root_<treeLevel>)<\n>
+>>
+
+rewriteWildcardLabelRef(label) ::= <<
+self._adaptor.addChild(root_<treeLevel>, stream_<label>.nextTree())<\n>
+>>
+
+createImaginaryNode(tokenType,args,terminalOptions={}) ::= <<
+<if(terminalOptions.node)>
+<! new MethodNode(IDLabel, args) !>
+<terminalOptions.node>(<tokenType><if(args)>, <args; separator=", "><endif>)
+<else>
+<if (!args)>self._adaptor.createFromType(<tokenType>, "<tokenType>")
+<else>self._adaptor.create(<tokenType>, <args; separator=", ">)
+<endif>
+<endif>
+>>
+
+//<! need to call different adaptor.create*() methods depending of argument count !>
+//<if (!args)>self._adaptor.createFromType(<tokenType>, "<tokenType>")
+//<else><if (!rest(args))>self._adaptor.createFromType(<tokenType>, <first(args)>)
+//<else><if (!rest(rest(args)))>self._adaptor.createFromToken(<tokenType>, <first(args)>, <first(rest(args))>)
+//<endif>
+//<endif>
+//<endif>
+
+
+createRewriteNodeFromElement(token,args,terminalOptions={}) ::= <<
+<if(terminalOptions.node)>
+<terminalOptions.node>(stream_<token>.nextToken()<if(args)>, <args; separator=", "><endif>)
+<else>
+<if(args)> <! must create new node from old !>
+<! need to call different adaptor.create*() methods depending of argument count !>
+<if (!args)>self._adaptor.createFromType(<token>, "<token>")
+<else><if (!rest(args))>self._adaptor.createFromToken(<token>, <first(args)>)
+<else><if (!rest(rest(args)))>self._adaptor.createFromToken(<token>, <first(args)>, <first(rest(args))>)
+<endif>
+<endif>
+<endif>
+<else>
+stream_<token>.nextNode()
+<endif>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python3/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python3/ASTDbg.stg
new file mode 100644
index 0000000..187db39
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python3/ASTDbg.stg
@@ -0,0 +1,59 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2012 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Template overrides to add debugging to AST stuff.  Dynamic inheritance
+ *  hierarchy is set up as ASTDbg : AST : Dbg : Python by code generator.
+ */
+group ASTDbg;
+
+astAccessor() ::= <<
+def setTreeAdaptor(self, adaptor):
+<if(grammar.grammarIsRoot)>
+    self._adaptor = DebugTreeAdaptor(self.dbg, adaptor)
+<else>
+    self._adaptor = adaptor # delegator sends dbg adaptor
+<endif>
+    <grammar.directDelegates:{g|<g:delegateName()>.setTreeAdaptor(self._adaptor)}>
+
+def getTreeAdaptor(self):
+    return self._adaptor
+
+adaptor = property(getTreeAdaptor, setTreeAdaptor)<\n>
+>>
+
+createListenerAndHandshake() ::= <<
+proxy = DebugEventSocketProxy(self, adaptor=<if(TREE_PARSER)>self.input.getTreeAdaptor()<else>self._adaptor<endif>,
+                              debug=debug_socket, port=port)
+self.setDebugListener(proxy)
+self.adaptor.setDebugListener(proxy)
+self.input.setDebugListener(proxy)
+#self.set<inputStreamType>(Debug<inputStreamType>(self.input, proxy))
+proxy.handshake()
+>>
+
+@rewriteElement.pregen() ::= "self._dbg.location(<e.line>, <e.pos>)"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python3/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python3/ASTParser.stg
new file mode 100644
index 0000000..cf1ab0f
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python3/ASTParser.stg
@@ -0,0 +1,199 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2012 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during normal parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  The situation is not too bad as rewrite (->) usage makes ^ and !
+ *  invalid. There is no huge explosion of combinations.
+ */
+
+finishedBacktracking(block) ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(backtracking)>
+if <actions.(actionScope).synpredgate>:
+    <block>
+<else>
+<block>
+<endif>
+<endif>
+>>
+
+@ruleBody.setErrorReturnValue() ::= <<
+retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
+>>
+
+// TOKEN AST STUFF
+
+/** ID and output=AST */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.tokenRef(...)>
+<finishedBacktracking({
+<label>_tree = <createNodeFromToken(...)>
+self._adaptor.addChild(root_0, <label>_tree)
+})>
+>>
+
+/** ID! and output=AST (same as plain tokenRef) */
+tokenRefBang(token,label,elementIndex,terminalOptions={}) ::= "<super.tokenRef(...)>"
+
+/** ID^ and output=AST */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.tokenRef(...)>
+<finishedBacktracking({
+<label>_tree = <createNodeFromToken(...)>
+root_0 = self._adaptor.becomeRoot(<label>_tree, root_0)
+})>
+>>
+
+/** ids+=ID! and output=AST */
+tokenRefBangAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefBang(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** label+=TOKEN when output=AST but not rewrite alt */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match label+=TOKEN^ when output=AST but not rewrite alt */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+// SET AST
+
+// the match set stuff is interesting in that it uses an argument list
+// to pass code to the default matchSet; another possible way to alter
+// inherited code.  I don't use the region stuff because I need to pass
+// different chunks depending on the operator.  I don't like making
+// the template name have the operator as the number of templates gets
+// large but this is the most flexible--this is as opposed to having
+// the code generator call matchSet then add root code or ruleroot code
+// plus list label plus ...  The combinations might require complicated
+// rather than just added on code.  Investigate that refactoring when
+// I have more time.
+
+matchSet(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= <%
+<super.matchSet(postmatchCode={<finishedBacktracking({self._adaptor.addChild(root_0, <createNodeFromToken(...)>)})>}, ...)>
+%>
+
+matchRuleBlockSet(s,label,elementIndex,postmatchCode,treeLevel="0",terminalOptions={}) ::= <<
+<matchSet(...)>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= "<super.matchSet(...)>"
+
+// note there is no matchSetTrack because -> rewrites force sets to be
+// plain old blocks of alts: (A|B|...|C)
+
+matchSetRuleRoot(s,label,elementIndex,debug,terminalOptions={}) ::= <<
+<if(label)>
+<label> = self.input.LT(1)<\n>
+<endif>
+<super.matchSet(postmatchCode={<finishedBacktracking({root_0 = self._adaptor.becomeRoot(<createNodeFromToken(...)>, root_0)})>}, ...)>
+>>
+
+// RULE REF AST
+
+/** rule when output=AST */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({self._adaptor.addChild(root_0, <label>.tree)})>
+>>
+
+/** rule! is same as normal rule ref */
+ruleRefBang(rule,label,elementIndex,args,scope) ::= "<super.ruleRef(...)>"
+
+/** rule^ */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+<super.ruleRef(...)>
+<finishedBacktracking({root_0 = self._adaptor.becomeRoot(<label>.tree, root_0)})>
+>>
+
+/** x+=rule when output=AST */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** x+=rule! when output=AST is a rule ref with list addition */
+ruleRefBangAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefBang(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** x+=rule^ */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+// WILDCARD AST
+
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.wildcard(...)>
+<finishedBacktracking({
+<label>_tree = self._adaptor.createWithPayload(<label>)
+self._adaptor.addChild(root_0, <label>_tree)
+})>
+>>
+
+wildcardBang(label,elementIndex) ::= "<super.wildcard(...)>"
+
+wildcardRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+<super.wildcard(...)>
+<finishedBacktracking({
+<label>_tree = self._adaptor.createWithPayload(<label>)
+root_0 = self._adaptor.becomeRoot(<label>_tree, root_0)
+})>
+>>
+
+createNodeFromToken(label,terminalOptions={}) ::= <%
+<if(terminalOptions.node)>
+<terminalOptions.node>(<label>) <! new MethodNode(IDLabel) !>
+<else>
+self._adaptor.createWithPayload(<label>)
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<finishedBacktracking({
+retval.tree = self._adaptor.rulePostProcessing(root_0)
+self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
+})>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python3/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python3/ASTTreeParser.stg
new file mode 100644
index 0000000..0a8d268
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python3/ASTTreeParser.stg
@@ -0,0 +1,311 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2012 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** Templates for building ASTs during tree parsing.
+ *
+ *  Deal with many combinations.  Dimensions are:
+ *  Auto build or rewrite
+ *    no label, label, list label  (label/no-label handled together)
+ *    child, root
+ *    token, set, rule, wildcard
+ *
+ *  Each combination has its own template except that label/no label
+ *  is combined into tokenRef, ruleRef, ...
+ */
+
+finishedBacktracking(block) ::= <<
+<if(backtracking)>
+if <actions.(actionScope).synpredgate>:
+    <block>
+<else>
+<block>
+<endif>
+>>
+
+/** Add a variable to track last element matched */
+ruleDeclarations() ::= <<
+<super.ruleDeclarations()>
+_first_0 = None
+_last = None<\n>
+>>
+
+/** What to emit when there is no rewrite rule.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= <<
+<finishedBacktracking({
+<if(rewriteMode)>
+retval.tree = _first_0
+if self._adaptor.getParent(retval.tree) is not None and self._adaptor.isNil(self._adaptor.getParent(retval.tree)):
+    retval.tree = self._adaptor.getParent(retval.tree)
+<endif>
+})>
+>>
+
+/** match ^(root children) in tree parser; override here to
+ *  add tree construction actions.
+ */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+_last = self.input.LT(1)
+_save_last_<treeLevel> = _last
+_first_<treeLevel> = None
+<if(!rewriteMode)>
+root_<treeLevel> = self._adaptor.nil()<\n>
+<endif>
+<root:element()>
+<if(rewriteMode)>
+<finishedBacktracking({
+<if(root.el.rule)>
+if _first_<enclosingTreeLevel> is None:
+    _first_<enclosingTreeLevel> = <root.el.label>.tree<\n>
+<else>
+if _first_<enclosingTreeLevel> is None:
+    _first_<enclosingTreeLevel> = <root.el.label><\n>
+<endif>
+})>
+<endif>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if self.input.LA(1) == DOWN:
+    self.match(self.input, DOWN, None)
+    <children:element()>
+    self.match(self.input, UP, None)
+
+<else>
+self.match(self.input, DOWN, None)
+<children:element()>
+self.match(self.input, UP, None)<\n>
+<endif>
+<if(!rewriteMode)>
+self._adaptor.addChild(root_<enclosingTreeLevel>, root_<treeLevel>)<\n>
+<endif>
+_last = _save_last_<treeLevel>
+
+>>
+
+// TOKEN AST STUFF
+
+/** ID! and output=AST (same as plain tokenRef) 'cept add
+ *  setting of _last
+ */
+tokenRefBang(token,label,elementIndex,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.tokenRef(...)>
+>>
+
+/** ID auto construct */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(terminalOptions.node)>
+<label>_tree = <terminalOptions.node>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+self._adaptor.addChild(root_<treeLevel>, <label>_tree)
+})>
+<else> <! rewrite mode !>
+<finishedBacktracking({
+if _first_<treeLevel> is None:
+    _first_<treeLevel> = <label><\n>
+})>
+<endif>
+>>
+
+/** label+=TOKEN auto construct */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** ^(ID ...) auto construct */
+tokenRefRuleRoot(token,label,elementIndex,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.tokenRef(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(terminalOptions.node)>
+<label>_tree = <terminalOptions.node>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+root_<treeLevel> = self._adaptor.becomeRoot(<label>_tree, root_<treeLevel>)
+})>
+<endif>
+>>
+
+/** Match ^(label+=TOKEN ...) auto construct */
+tokenRefRuleRootAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRefRuleRoot(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard and auto dup the node/subtree */
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.wildcard(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+<label>_tree = self._adaptor.dupTree(<label>)
+self._adaptor.addChild(root_<treeLevel>, <label>_tree)
+})>
+<else> <! rewrite mode !>
+<finishedBacktracking({
+if _first_<treeLevel> is None:
+    _first_<treeLevel> = <label>
+})>
+<endif>
+>>
+
+// SET AST
+matchSet(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(terminalOptions.node)>
+<label>_tree = <terminalOptions.node>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+self._adaptor.addChild(root_<treeLevel>, <label>_tree)
+})>
+<endif>
+}, ...)>
+>>
+
+matchRuleBlockSet(s,label,elementIndex,postmatchCode,treeLevel="0",terminalOptions={}) ::= <<
+<matchSet(...)>
+<noRewrite(...)> <! set return tree !>
+>>
+
+matchSetBang(s,label,elementIndex,postmatchCode,terminalOptions={}) ::= <<
+_last = self.input.LT(1)
+<super.matchSet(...)>
+>>
+
+matchSetRuleRoot(s,label,elementIndex,debug,terminalOptions={}) ::= <<
+<super.matchSet(postmatchCode={
+<if(!rewriteMode)>
+<finishedBacktracking({
+<if(terminalOptions.node)>
+<label>_tree = <terminalOptions.node>(<label>)
+<else>
+<label>_tree = self._adaptor.dupNode(<label>)
+<endif><\n>
+root_<treeLevel> = self._adaptor.becomeRoot(<label>_tree, root_<treeLevel>)
+})>
+<endif>
+}, ...)>
+>>
+
+// RULE REF AST
+
+/** rule auto construct */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRef(...)>
+<finishedBacktracking({
+<if(!rewriteMode)>
+self._adaptor.addChild(root_<treeLevel>, <label>.tree)
+<else> <! rewrite mode !>
+if _first_<treeLevel> is None:
+    _first_<treeLevel> = <label>.tree<\n>
+<endif>
+})>
+>>
+
+/** x+=rule auto construct */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** ^(rule ...) auto construct */
+ruleRefRuleRoot(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRef(...)>
+<if(!rewriteMode)>
+<finishedBacktracking({
+root_<treeLevel> = self._adaptor.becomeRoot(<label>.tree, root_<treeLevel>)
+})>
+<endif>
+>>
+
+/** ^(x+=rule ...) auto construct */
+ruleRefRuleRootAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRefRuleRoot(...)>
+<listLabel(label, {<label>.tree})>
+>>
+
+/** rule when output=AST and tracking for rewrite */
+ruleRefTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefTrack(...)>
+>>
+
+/** x+=rule when output=AST and tracking for rewrite */
+ruleRefTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefTrackAndListLabel(...)>
+>>
+
+/** ^(rule ...) rewrite */
+ruleRefRuleRootTrack(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefRootTrack(...)>
+>>
+
+/** ^(x+=rule ...) rewrite */
+ruleRefRuleRootTrackAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+_last = self.input.LT(1)
+<super.ruleRefRuleRootTrackAndListLabel(...)>
+>>
+
+/** Streams for token refs are tree nodes now; override to
+ *  change nextToken to nextNode.
+ */
+createRewriteNodeFromElement(token,scope,terminalOptions={}) ::= <<
+<if(terminalOptions.node)>
+<terminalOptions.node>(stream_<token>.nextNode())
+<else>
+stream_<token>.nextNode()
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<super.ruleCleanUp()>
+<if(!rewriteMode)>
+<finishedBacktracking({
+retval.tree = self._adaptor.rulePostProcessing(root_0)
+})>
+<endif>
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python3/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python3/Dbg.stg
new file mode 100644
index 0000000..4892cf4
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python3/Dbg.stg
@@ -0,0 +1,325 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2012 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template overrides to add debugging to normal Python output;
+ *  If ASTs are built, then you'll also get ASTDbg.stg loaded.
+ */
+
+@outputFile.imports() ::= <<
+<@super.imports()>
+from antlr3.debug import *
+>>
+
+@genericParser.args() ::= <<
+debug_socket = kwargs.pop('debug_socket', None)
+port = kwargs.pop('port', None)
+>>
+
+@genericParser.init() ::= <<
+self.ruleLevel = 0
+
+if self._dbg is None:
+    <createListenerAndHandshake()>
+
+>>
+
+createListenerAndHandshake() ::= <<
+<if(TREE_PARSER)>
+proxy = DebugEventSocketProxy(self, adaptor=self.input.getTreeAdaptor(),
+                              debug=debug_socket, port=port)<\n>
+<else>
+proxy = DebugEventSocketProxy(self, debug=debug_socket, port=port)<\n>
+<endif>
+self.setDebugListener(proxy)
+proxy.handshake()
+
+>>
+
+@genericParser.members() ::= <<
+<if(grammar.grammarIsRoot)>
+ruleNames = [
+    "invalidRule", <grammar.allImportedRules:{rST | "<rST.name>"}; wrap="\n    ", separator=", ">
+    ]<\n>
+<endif>
+decisionCanBacktrack = [
+    False, # invalid decision
+    <grammar.decisions:{d | <if(d.dfa.hasSynPred)>True<else>False<endif>}; wrap="\n    ", separator=", ">
+    ]
+<if(grammar.grammarIsRoot)> <! grammar imports other grammar(s) !>
+def getRuleLevel(self):
+    return self.ruleLevel
+
+def incRuleLevel(self):
+    self.ruleLevel += 1
+
+def decRuleLevel(self):
+    self.ruleLevel -= 1
+
+<if(profile)>
+    <ctorForProfilingRootGrammar()>
+<else>
+    <ctorForRootGrammar()>
+<endif>
+<ctorForPredefinedListener()>
+<else> <! imported grammar !>
+def getRuleLevel(self):
+    return <grammar.delegators:{g| <g:delegateName()>}>.getRuleLevel()
+
+def incRuleLevel(self):
+    <grammar.delegators:{g| <g:delegateName()>}>.incRuleLevel()
+
+def decRuleLevel(self):
+    <grammar.delegators:{g| <g:delegateName()>}>.decRuleLevel()
+
+<ctorForDelegateGrammar()>
+<endif>
+<if(profile)>
+FIXME(2)
+public boolean alreadyParsedRule(IntStream input, int ruleIndex) {
+    ((Profiler)self._dbg).examineRuleMemoization(input, ruleIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+    return super.alreadyParsedRule(input, ruleIndex);
+}<\n>
+FIXME(3)
+public void memoize(IntStream input,
+                    int ruleIndex,
+                    int ruleStartIndex)
+{
+    ((Profiler)self._dbg).memoize(input, ruleIndex, ruleStartIndex, <grammar.composite.rootGrammar.recognizerName>.ruleNames[ruleIndex]);
+    super.memoize(input, ruleIndex, ruleStartIndex);
+}<\n>
+<endif>
+def evalPredicate(self, result, predicate):
+    self._dbg.semanticPredicate(result, predicate)
+    return result
+<\n>
+>>
+
+ctorForRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+<! Same except we add port number and profile stuff if root grammar !>
+<!
+public <name>(<inputStreamType> input) {
+    this(input, DebugEventSocketProxy.DEFAULT_DEBUGGER_PORT, new RecognizerSharedState());
+}
+public <name>(<inputStreamType> input, int port, RecognizerSharedState state) {
+    super(input, state);
+    <parserCtorBody()>
+    <createListenerAndHandshake()>
+    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, self._dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}<\n>
+!>
+>>
+
+ctorForProfilingRootGrammar() ::= <<
+<! bug: can't use <@super.members()> cut-n-paste instead !>
+public <name>(<inputStreamType> input) {
+    this(input, new Profiler(null), new RecognizerSharedState());
+}
+public <name>(<inputStreamType> input, DebugEventListener self.dbg, RecognizerSharedState state) {
+    super(input, self.dbg, state);
+    Profiler p = (Profiler)self.dbg;
+    p.setParser(this);
+    <parserCtorBody()>
+    <grammar.directDelegates:
+     {g|<g:delegateName()> = new <g.recognizerName>(input, self.dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}
+<\n>
+>>
+
+/** Basically we don't want to set any dbg listeners are root will have it. */
+ctorForDelegateGrammar() ::= <<
+<!
+public <name>(<inputStreamType> input, DebugEventListener self.dbg, RecognizerSharedState state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+    super(input, dbg, state);
+    <parserCtorBody()>
+    <grammar.directDelegates:
+     {g|<g:delegateName()> = new <g.recognizerName>(input, this, this.state<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+}<\n>
+!>
+>>
+
+ctorForPredefinedListener() ::= <<
+<!
+public <name>(<inputStreamType> input, DebugEventListener dbg) {
+    <@superClassRef>super(input, dbg, new RecognizerSharedState());<@end>
+<if(profile)>
+    Profiler p = (Profiler)dbg;
+    p.setParser(this);
+<endif>
+    <parserCtorBody()>
+    <grammar.directDelegates:{g|<g:delegateName()> = new <g.recognizerName>(input, self._dbg, this.state, this<grammar.delegators:{g|, <g:delegateName()>}>);}; separator="\n">
+    <@finally()>
+}<\n>
+!>
+>>
+
+@genericParser.superClassName() ::= "Debug<@super.superClassName()>"
+
+@rule.body() ::= <<
+try:
+    self._dbg.enterRule(self.getGrammarFileName(), "<ruleName>")
+    if self.getRuleLevel() == 0:
+        self._dbg.commence()
+    self.incRuleLevel()
+    <! ST uses zero-based columns, we want one-base !>
+    self._dbg.location(<ruleDescriptor.tree.line>, <ruleDescriptor.tree.charPositionInLine>+1)
+
+    <@super.body()>
+
+    self._dbg.location(<ruleDescriptor.EORNode.line>, <ruleDescriptor.EORNode.charPositionInLine>+1)
+finally:
+    self._dbg.exitRule(self.getGrammarFileName(), "<ruleName>")
+    self.decRuleLevel()
+    if self.getRuleLevel() == 0:
+         self._dbg.terminate()
+
+>>
+
+@synpred.start() ::= "self._dbg.beginBacktrack(self._state.backtracking)"
+
+@synpred.stop() ::= "self._dbg.endBacktrack(self._state.backtracking, success)"
+
+// Common debug event triggers used by region overrides below
+
+enterSubRule() ::=
+    "try { self._dbg.enterSubRule(<decisionNumber>);<\n>"
+
+exitSubRule() ::=
+    "} finally {self._dbg.exitSubRule(<decisionNumber>);}<\n>"
+
+enterDecision() ::=
+    "try { self._dbg.enterDecision(<decisionNumber>);<\n>"
+
+exitDecision() ::=
+    "} finally {self._dbg.exitDecision(<decisionNumber>);}<\n>"
+
+enterAlt(n) ::= "self._dbg.enterAlt(<n>)<\n>"
+
+// Region overrides that tell various constructs to add debugging triggers
+
+@block.body() ::= <<
+try:
+    self._dbg.enterSubRule(<decisionNumber>)
+    <@super.body()>
+finally:
+    self._dbg.exitSubRule(<decisionNumber>)
+>>
+
+@blockBody.decision() ::= <<
+try:
+    self._dbg.enterDecision(
+        <decisionNumber>, self.decisionCanBacktrack[<decisionNumber>])
+    <@super.decision()>
+finally:
+    self._dbg.exitDecision(<decisionNumber>)
+>>
+
+@ruleBlock.decision() ::= <<
+try:
+    self._dbg.enterDecision(
+        <decisionNumber>, self.decisionCanBacktrack[<decisionNumber>])
+    <@super.decision()>
+finally:
+    self._dbg.exitDecision(<decisionNumber>)
+>>
+
+@ruleBlockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+@blockSingleAlt.prealt() ::= "<enterAlt(n=\"1\")>"
+
+@positiveClosureBlock.loopBody() ::= <<
+try:
+    self._dbg.enterSubRule(<decisionNumber>)
+    <@super.loopBody()>
+finally:
+    self._dbg.exitSubRule(<decisionNumber>)<\n>
+>>
+
+@positiveClosureBlockLoop.decisionBody() ::= <<
+try:
+    self._dbg.enterDecision(
+        <decisionNumber>, self.decisionCanBacktrack[<decisionNumber>])
+    <@super.decisionBody()>
+finally:
+    self._dbg.exitDecision(<decisionNumber>)
+>>
+
+@positiveClosureBlockLoop.earlyExitException() ::=
+    "self._dbg.recognitionException(eee)<\n>"
+
+@closureBlock.loopBody() ::= <<
+try:
+    self._dbg.enterSubRule(<decisionNumber>)
+    <@super.loopBody()>
+finally:
+    self._dbg.exitSubRule(<decisionNumber>)<\n>
+>>
+
+@closureBlockLoop.decisionBody() ::= <<
+try:
+    self._dbg.enterDecision(
+        <decisionNumber>, self.decisionCanBacktrack[<decisionNumber>])
+    <@super.decisionBody()>
+finally:
+    self._dbg.exitDecision(<decisionNumber>)
+>>
+
+@altSwitchCase.prealt() ::= "<enterAlt(altNum)>"
+
+@element.prematch() ::=
+    "self._dbg.location(<e.line>, <e.pos>)"
+
+@matchSet.mismatchedSetException() ::=
+    "self._dbg.recognitionException(mse)"
+
+@dfaState.noViableAltException() ::= "self._dbg.recognitionException(nvae)"
+
+@dfaStateSwitch.noViableAltException() ::= "self._dbg.recognitionException(nvae)"
+
+dfaDecision(decisionNumber,description) ::= <<
+try:
+    self.isCyclicDecision = True
+    <super.dfaDecision(...)>
+
+except NoViableAltException as nvae:
+    self._dbg.recognitionException(nvae)
+    raise
+
+>>
+
+@cyclicDFA.errorMethod() ::= <<
+def error(self, nvae):
+    self._dbg.recognitionException(nvae)
+
+>>
+
+/** Force predicate validation to trigger an event */
+evalPredicate(pred,description) ::= <<
+self.evalPredicate(<pred>,"<description>")
+>>
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python3/Python3.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python3/Python3.stg
new file mode 100644
index 0000000..ad2e2ab
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python3/Python3.stg
@@ -0,0 +1,1499 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2012 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/** The API version of the runtime that recognizers generated by this runtime
+ *  need.
+ */
+apiVersion() ::= "1"
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+           bitsets, buildTemplate, buildAST, rewriteMode, profile,
+           backtracking, synpreds, memoize, numRules,
+           fileName, ANTLRVersion, generatedTimestamp, trace,
+           scopes, superClass, literals) ::=
+<<
+# $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+
+<@imports>
+import sys
+from antlr3 import *
+<if(TREE_PARSER)>
+from antlr3.tree import *<\n>
+<endif>
+<@end>
+
+<actions.(actionScope).header>
+
+<! <docComment> !>
+
+# for convenience in actions
+HIDDEN = BaseRecognizer.HIDDEN
+
+# token types
+<tokens:{it | <it.name>=<it.type>}; separator="\n">
+
+# token names
+tokenNamesMap = {
+    0: "\<invalid>", 1: "\<EOR>", 2: "\<DOWN>", 3: "\<UP>",
+    <tokens:{it | <it.type>: "<it.name>"}; wrap, separator=", ">
+}
+Token.registerTokenNamesMap(tokenNamesMap)
+
+<recognizer>
+
+<if(actions.(actionScope).main)>
+<actions.(actionScope).main>
+<else>
+def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
+<if(LEXER)>
+    from antlr3.main import LexerMain
+    main = LexerMain(<recognizer.name>)<\n>
+<endif>
+<if(PARSER)>
+    from antlr3.main import ParserMain
+    main = ParserMain("<recognizer.grammar.name>Lexer", <recognizer.name>)<\n>
+<endif>
+<if(TREE_PARSER)>
+    from antlr3.main import WalkerMain
+    main = WalkerMain(<recognizer.name>)<\n>
+<endif>
+    main.stdin = stdin
+    main.stdout = stdout
+    main.stderr = stderr
+    main.execute(argv)<\n>
+<endif>
+
+<actions.(actionScope).footer>
+
+if __name__ == '__main__':
+    main(sys.argv)
+
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, filterMode,
+      labelType="CommonToken", superClass="Lexer") ::= <<
+<if(grammar.directDelegates)>
+# path hack to allow absolute import of related grammars.
+from os.path import dirname
+__path__ = [dirname(__file__)]
+del dirname
+
+<grammar.directDelegates:
+ {g|from .<g.recognizerName> import <g.recognizerName>}; separator="\n">
+<endif>
+
+class <grammar.recognizerName>(<@superClassName><superClass><@end>):
+    <scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}>
+
+    grammarFileName = "<fileName>"
+    api_version = <apiVersion()>
+
+    def __init__(self<grammar.delegators:{g|, <g:delegateName()>}>, input=None, state=None):
+        if state is None:
+            state = RecognizerSharedState()
+        super().__init__(input, state)
+
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        self._state.ruleMemo = {}
+<endif>
+<endif>
+
+        <grammar.directDelegates:
+         {g|self.<g:delegateName()> = <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>self, input, state)}; separator="\n">
+        <grammar.directDelegates:
+         {g|<g.delegates:{h|self.<h:delegateName()> = self.<g:delegateName()>.<h:delegateName()>}; separator="\n">}; separator="\n">
+        <grammar.delegators:
+         {g|self.<g:delegateName()> = <g:delegateName()>}; separator="\n">
+        <last(grammar.delegators):
+         {g|self.gParent = <g:delegateName()>}; separator="\n">
+        self.delegates = [<grammar.delegates: {g|self.<g:delegateName()>}; separator = ", ">]
+
+        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; separator="\n">
+
+        <actions.lexer.init>
+
+
+    <actions.lexer.members>
+
+
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+    <rules; separator="\n\n">
+
+    <synpreds:{p | <lexerSynpred(p)>}>
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+def nextToken(self):
+    while True:
+        if self.input.LA(1) == EOF:
+            return self.makeEOFToken()
+
+        self._state.token = None
+        self._state.channel = DEFAULT_CHANNEL
+        self._state.tokenStartCharIndex = self.input.index()
+        self._state.tokenStartCharPositionInLine = self.input.charPositionInLine
+        self._state.tokenStartLine = self.input.line
+        self._state._text = None
+        try:
+            m = self.input.mark()
+            try:
+                # means we won't throw slow exception
+                self._state.backtracking = 1
+                try:
+                    self.mTokens()
+                finally:
+                    self._state.backtracking = 0
+
+            except BacktrackingFailed:
+                # mTokens backtracks with synpred at backtracking==2
+                # and we set the synpredgate to allow actions at level 1.
+                self.input.rewind(m)
+                self.input.consume() # advance one char and try again
+
+            else:
+                self.emit()
+                return self._state.token
+
+        except RecognitionException as re:
+            # shouldn't happen in backtracking mode, but...
+            self.reportError(re)
+            self.recover(re)
+
+
+def memoize(self, input, ruleIndex, ruleStartIndex, success):
+    if self._state.backtracking > 1:
+        # is Lexer always superclass?
+        super().memoize(input, ruleIndex, ruleStartIndex, success)
+
+
+def alreadyParsedRule(self, input, ruleIndex):
+    if self._state.backtracking > 1:
+        return super().alreadyParsedRule(input, ruleIndex)
+    return False
+
+
+>>
+
+actionGate() ::= "self._state.backtracking == 0"
+
+filteringActionGate() ::= "self._state.backtracking == 1"
+
+/** How to generate a parser */
+
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass, labelType, members,
+              rewriteElementType, filterMode, init, ASTLabelType="Object") ::= <<
+# token names
+tokenNames = [
+    "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>",
+    <tokenNames; wrap, separator=", ">
+]
+
+<scopes:{it|<if(it.isDynamicGlobalScope)><globalAttributeScopeClass(scope=it)><endif>}>
+
+<if(grammar.directDelegates)>
+# path hack to allow absolute import of related grammars.
+from os.path import dirname
+__path__ = [dirname(__file__)]
+del dirname
+
+<grammar.directDelegates:
+ {g|from .<g.recognizerName> import <g.recognizerName>}; separator="\n">
+<endif>
+
+<rules:{it|<ruleAttributeScopeClass(scope=it.ruleDescriptor.ruleScope)>}>
+
+class <grammar.recognizerName>(<@superClassName><superClass><@end>):
+    grammarFileName = "<fileName>"
+    api_version = <apiVersion()>
+    tokenNames = tokenNames
+
+    def __init__(self<grammar.delegators:{g|, <g:delegateName()>}>, input, state=None, *args, **kwargs):
+        if state is None:
+            state = RecognizerSharedState()
+
+        <@args()>
+        super().__init__(input, state, *args, **kwargs)
+
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        self._state.ruleMemo = {}
+<endif>
+<endif>
+
+        <cyclicDFAs:{dfa | <cyclicDFAInit(dfa)>}; separator="\n">
+
+        <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeStack(scope=it)><endif>}>
+        <rules:{it | <ruleAttributeScopeStack(scope=it.ruleDescriptor.ruleScope)>}>
+
+        <init>
+
+        <grammar.delegators:
+         {g|self.<g:delegateName()> = <g:delegateName()>}; separator="\n">
+        <grammar.directDelegates:
+         {g|self.<g:delegateName()> = <g.recognizerName>(<trunc(g.delegators):{p|<p:delegateName()>, }>self, input, state)}; separator="\n">
+        <grammar.directDelegates:
+         {g|<g.delegates:{h|self.<h:delegateName()> = self.<g:delegateName()>.<h:delegateName()>}; separator="\n">}; separator="\n">
+        <last(grammar.delegators):
+         {g|self.gParent = self.<g:delegateName()>}; separator="\n">
+        self.delegates = [<grammar.delegates: {g|self.<g:delegateName()>}; separator = ", ">]
+
+        <@init><@end>
+
+
+    <@members><@end>
+
+    <members>
+
+    <rules; separator="\n\n">
+
+    <! generate rule/method definitions for imported rules so they
+       appear to be defined in this recognizer. !>
+    <grammar.delegatedRules:{ruleDescriptor| <delegateRule(ruleDescriptor)> }; separator="\n">
+
+    <synpreds:{p | <synpred(p)>}>
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+    <bitsets:{it | FOLLOW_<it.name>_in_<it.inName><it.tokenIndex> = frozenset([<it.tokenTypes:{it | <it>};separator=", ">])<\n>}>
+
+>>
+
+delegateRule(ruleDescriptor) ::= <<
+def <ruleDescriptor.name>(self, <ruleDescriptor.parameterScope:parameterScope()>):
+<\ >   <if(ruleDescriptor.hasReturnValue)>return <endif>self.<ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<if(ruleDescriptor.parameterScope)><ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", "><endif>)
+
+
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
+       ASTLabelType="Object", superClass="Parser", labelType="Token",
+       members={<actions.parser.members>},
+       init={<actions.parser.init>}
+       ) ::= <<
+<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, "TokenStream", superClass,
+              labelType, members, "Token",
+              false, init, ASTLabelType)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
+           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="Object",
+           superClass={<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif>},
+           members={<actions.treeparser.members>},
+           init={<actions.treeparser.init>}
+           ) ::= <<
+<genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, "TreeNodeStream", superClass,
+              labelType, members, "Node",
+              filterMode, init, ASTLabelType)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+# $ANTLR start "<ruleName>"
+def <ruleName>_fragment(self, <ruleDescriptor.parameterScope:parameterScope()>):
+    <ruleLabelDefs()>
+<if(trace)>
+    self.traceIn("<ruleName>_fragment", <ruleDescriptor.index>)
+    try:
+        <block>
+
+    finally:
+        self.traceOut("<ruleName>_fragment", <ruleDescriptor.index>)
+
+<else>
+    <block>
+<endif>
+# $ANTLR end "<ruleName>"
+
+
+>>
+
+synpred(name) ::= <<
+def <name>(self):
+    self._state.backtracking += 1
+    <@start()>
+    start = self.input.mark()
+    try:
+        self.<name>_fragment()
+    except BacktrackingFailed:
+        success = False
+    else:
+        success = True
+    self.input.rewind(start)
+    <@stop()>
+    self._state.backtracking -= 1
+    return success
+
+
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if self._state.backtracking > 0 and self.alreadyParsedRule(self.input, <ruleDescriptor.index>):
+    # for cached failed rules, alreadyParsedRule will raise an exception
+    success = True
+    return <ruleReturnValue()>
+
+<endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>
+if self._state.backtracking > 0:
+    raise BacktrackingFailed
+
+<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+# $ANTLR start "<ruleName>"
+# <fileName>:<description>
+<ruleDescriptor.actions.decorate>
+def <ruleName>(self, <ruleDescriptor.parameterScope:parameterScope()>):
+<if(trace)>
+    self.traceIn("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    <@body><ruleBody()><@end>
+    <@postamble()>
+    return <ruleReturnValue()>
+
+# $ANTLR end "<ruleName>"
+>>
+
+ruleBody() ::= <<
+<if(memoize)>
+<if(backtracking)>
+success = False<\n>
+<endif>
+<endif>
+try:
+    try:
+        <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+
+<if(memoize)>
+<if(backtracking)>
+        success = True<\n>
+<endif>
+<endif>
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    except RecognitionException as re:
+        self.reportError(re)
+        self.recover(self.input, re)
+        <@setErrorReturnValue()>
+
+<endif>
+<else>
+    finally:
+        pass
+
+<endif>
+<endif>
+finally:
+<if(trace)>
+    self.traceOut("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+    <memoize()>
+    <ruleScopeCleanUp()>
+    <finally>
+    pass
+>>
+
+catch(decl,action) ::= <<
+except <e.decl>:
+    <e.action>
+
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval = self.<ruleDescriptor.name>_return()
+retval.start = self.input.LT(1)<\n>
+<elseif(ruleDescriptor.returnScope)>
+<ruleDescriptor.returnScope.attributes:{ a |
+<a.name> = <if(a.initValue)><a.initValue><else>None<endif>
+}>
+<endif>
+<if(memoize)>
+<ruleDescriptor.name>_StartIndex = self.input.index()
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{it | self.<it>_stack.append(<it>_scope())}; separator="\n">
+<ruleDescriptor.ruleScope:{it | self.<it.name>_stack.append(<it.name>_scope())}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{it | self.<it>_stack.pop()}; separator="\n">
+<ruleDescriptor.ruleScope:{it | self.<it.name>_stack.pop()}; separator="\n">
+>>
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it | <it.label.text> = None}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,
+  ruleDescriptor.wildcardTreeListLabels]
+    :{it | list_<it.label.text> = None}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
+<ruleDescriptor.ruleListLabels:{it | <it.label.text> = None}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{it | <it.label.text> = None}; separator="\n"
+>
+<ruleDescriptor.charLabels:{it | <it.label.text> = None}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{it | list_<it.label.text> = None}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <%
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+%>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.stop = self.input.LT(-1)<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if self._state.backtracking > 0:
+    self.memoize(self.input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex, success)
+
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+# $ANTLR start "<ruleName>"
+def m<ruleName>(self, <ruleDescriptor.parameterScope:parameterScope()>):
+<if(trace)>
+    self.traceIn("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+<if(memoize)>
+<if(backtracking)>
+    success = False<\n>
+<endif>
+<endif>
+    try:
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block><\n>
+<else>
+        _type = <ruleName>
+        _channel = DEFAULT_CHANNEL
+
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        <block>
+        <ruleCleanUp()>
+        self._state.type = _type
+        self._state.channel = _channel
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+<if(memoize)>
+<if(backtracking)>
+        success = True<\n>
+<endif>
+<endif>
+
+    finally:
+<if(trace)>
+        self.traceOut("<ruleName>", <ruleDescriptor.index>)<\n>
+<endif>
+        <ruleScopeCleanUp()>
+        <memoize()>
+        pass
+
+# $ANTLR end "<ruleName>"
+
+
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+def mTokens(self):
+    <block><\n>
+
+
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+alt<decisionNumber> = <maxAlt>
+<decls>
+<@body><blockBody()><@end>
+>>
+
+blockBody() ::= <<
+<@predecision()>
+<@decision><decision><@end>
+<@postdecision()>
+<@prebranch()>
+<alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+alt<decisionNumber> = <maxAlt>
+<decls>
+<@predecision()>
+<@decision><decision><@end>
+<@postdecision()>
+<alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+cnt<decisionNumber> = 0
+<decls>
+<@preloop()>
+<@loopBody>
+<positiveClosureBlockLoop()>
+<@end>
+<@postloop()>
+>>
+
+positiveClosureBlockLoop() ::= <<
+while True: #loop<decisionNumber>
+    alt<decisionNumber> = <maxAlt>
+    <@predecision()>
+    <@decisionBody><decision><@end>
+    <@postdecision()>
+    <alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
+    else:
+        if cnt<decisionNumber> >= 1:
+            break #loop<decisionNumber>
+
+        <ruleBacktrackFailure()>
+        eee = EarlyExitException(<decisionNumber>, self.input)
+        <@earlyExitException()>
+        raise eee
+
+    cnt<decisionNumber> += 1
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# <fileName>:<description>
+<decls>
+<@preloop()>
+<@loopBody>
+<closureBlockLoop()>
+<@end>
+<@postloop()>
+>>
+
+closureBlockLoop() ::= <<
+while True: #loop<decisionNumber>
+    alt<decisionNumber> = <maxAlt>
+    <@predecision()>
+    <@decisionBody><decision><@end>
+    <@postdecision()>
+    <alts:{a | <altSwitchCase(i, a)>}; separator="\nel">
+    else:
+        break #loop<decisionNumber>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase(altNum,alt) ::= <<
+if alt<decisionNumber> == <altNum>:
+    <@prealt()>
+    <alt>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt, treeLevel,rew) ::= <<
+# <fileName>:<description>
+pass <! so empty alternatives are a valid block !>
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element(e) ::= <<
+<@prematch()>
+<e.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(label)><label> = <endif>self.match(self.input, <token>, self.FOLLOW_<token>_in_<ruleName><elementIndex>)
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<tokenRef(token,label,elementIndex,terminalOptions)>
+<listLabel(label, label)>
+>>
+
+listLabel(label, elem) ::= <<
+if list_<label> is None:
+    list_<label> = []
+list_<label>.append(<elem>)<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = self.input.LA(1)<\n>
+<endif>
+self.match(<char>)
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = self.input.LA(1)<\n>
+<endif>
+self.matchRange(<a>, <b>)
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,postmatchCode="",terminalOptions={}) ::= <<
+<if(label)>
+<label> = self.input.LT(1)<\n>
+<endif>
+if <s>:
+    self.input.consume()
+    <postmatchCode>
+<if(!LEXER)>
+    self._state.errorRecovery = False<\n>
+<endif>
+
+else:
+    <ruleBacktrackFailure()>
+    mse = MismatchedSetException(None, self.input)
+    <@mismatchedSetException()>
+<if(LEXER)>
+    self.recover(mse)
+    raise mse
+<else>
+    raise mse
+    <! use following code to make it recover inline; remove throw mse;
+    self.recoverFromMismatchedSet(
+        self.input, mse, self.FOLLOW_set_in_<ruleName><elementIndex>
+        )
+    !>
+<endif>
+<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(label, label)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex="0") ::= <<
+<if(label)>
+<label>Start = self.getCharIndex()
+self.match(<string>)
+<label>StartLine<elementIndex> = self.getLine()
+<label>StartCharPos<elementIndex> = self.getCharPositionInLine()
+<label> = <labelType>(input=self.input, type=INVALID_TOKEN_TYPE, channel=DEFAULT_CHANNEL, start=<label>Start, stop=self.getCharIndex()-1)
+<label>.line = <label>StartLine<elementIndex>
+<label>.charPositionInLine = <label>StartCharPos<elementIndex>
+<else>
+self.match(<string>)
+<endif>
+>>
+
+wildcard(token,label,elementIndex,terminalOptions={}) ::= <<
+<if(label)>
+<label> = self.input.LT(1)<\n>
+<endif>
+self.matchAny()
+>>
+
+wildcardAndListLabel(token,label,elementIndex,terminalOptions={}) ::= <<
+<wildcard(...)>
+<listLabel(label,label)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = self.input.LA(1)<\n>
+<endif>
+self.matchAny()
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(label, elementIndex)>
+<listLabel(label, label)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values. The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+self._state.following.append(self.FOLLOW_<rule.name>_in_<ruleName><elementIndex>)
+<if(label)><label> = <endif>self.<if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">)<\n>
+self._state.following.pop()
+>>
+
+/** ids+=rule */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(rule,label,elementIndex,args,scope)>
+<listLabel(label, label)>
+>>
+
+/** A lexer rule reference
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+<label>Start<elementIndex> = self.getCharIndex()
+self.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
+<label>StartLine<elementIndex> = self.getLine()
+<label>StartCharPos<elementIndex> = self.getCharPositionInLine()
+<label> = <labelType>(
+    input=self.input,
+    type=INVALID_TOKEN_TYPE,
+    channel=DEFAULT_CHANNEL,
+    start=<label>Start<elementIndex>,
+    stop=self.getCharIndex()-1)
+<label>.line = <label>StartLine<elementIndex>
+<label>.charPositionInLine = <label>StartCharPos<elementIndex>
+<else>
+self.<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(rule,label,args,elementIndex,scope)>
+<listLabel(label, label)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+<label>Start<elementIndex> = self.getCharIndex()
+<label>StartLine<elementIndex> = self.getLine()
+<label>StartCharPos<elementIndex> = self.getCharPositionInLine()
+self.match(EOF)
+<label> = <labelType>(input=self.input, type=EOF, channel=DEFAULT_CHANNEL, start=<label>Start<elementIndex>, stop=self.getCharIndex()-1)
+<label>.line = <label>StartLine<elementIndex>
+<label>.charPositionInLine = <label>StartCharPos<elementIndex>
+<else>
+self.match(EOF)
+<endif>
+>>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "<recRuleArg()>"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName, opPrec) ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0 = $<ruleName>_primary.tree"
+recRuleSetReturnAction(src, name)     ::= "$<name> = $<src>.<name>"
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if self.input.LA(1) == DOWN:
+    self.match(self.input, DOWN, None)
+    <children:element()>
+    self.match(self.input, UP, None)
+
+<else>
+self.match(self.input, DOWN, None)
+<children:element()>
+self.match(self.input, UP, None)
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if not (<evalPredicate(pred, description)>):
+    <ruleBacktrackFailure()>
+    raise FailedPredicateException(self.input, "<ruleName>", "<description>")
+
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
+<edges; separator="\nel">
+else:
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt>
+<else>
+    <ruleBacktrackFailure()>
+    nvae = NoViableAltException("<description>", <decisionNumber>, <stateNumber>, self.input)<\n>
+    <@noViableAltException()>
+    raise nvae<\n>
+<endif>
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
+<edges; separator="\nel">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = self.input.LA(<k>)<\n>
+<edges; separator="\nel"><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber> = <eotPredictsAlt> <! if no edges, don't gen ELSE !>
+<else>
+else:
+    alt<decisionNumber> = <eotPredictsAlt>
+<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber> = <alt>"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if (<labelExpr>) <if(predicates)>and (<predicates>)<endif>:
+    <targetState>
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+<!
+  FIXME: this is one of the few occasion, where I miss a switch statement
+  in Python. ATM this is implemented as a list of if .. elif ..
+  This may be replaced by faster a dictionary lookup, when I find a solution
+  for the cases when an edge is not a plain dfaAcceptState.
+!>
+LA<decisionNumber> = self.input.LA(<k>)
+<edges; separator="\nel">
+else:
+<if(eotPredictsAlt)>
+    alt<decisionNumber> = <eotPredictsAlt>
+<else>
+    <ruleBacktrackFailure()>
+    nvae = NoViableAltException("<description>", <decisionNumber>, <stateNumber>, self.input)<\n>
+    <@noViableAltException()>
+    raise nvae<\n>
+<endif>
+
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber> = self.input.LA(<k>)
+<edges; separator="\nel">
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+LA<decisionNumber> = self.input.LA(<k>)
+<edges; separator="\nel">
+<if(eotPredictsAlt)>
+else:
+    alt<decisionNumber> = <eotPredictsAlt>
+<endif>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+if LA<decisionNumber> in {<labels; separator=", ">}:
+    <targetState>
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = self.dfa<decisionNumber>.predict(self.input)
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+# lookup tables for DFA #<dfa.decisionNumber>
+
+DFA<dfa.decisionNumber>_eot = DFA.unpack(
+    "<dfa.javaCompressedEOT; wrap="\"\n    \"">"
+    )
+
+DFA<dfa.decisionNumber>_eof = DFA.unpack(
+    "<dfa.javaCompressedEOF; wrap="\"\n    \"">"
+    )
+
+DFA<dfa.decisionNumber>_min = DFA.unpack(
+    "<dfa.javaCompressedMin; wrap="\"\n    \"">"
+    )
+
+DFA<dfa.decisionNumber>_max = DFA.unpack(
+    "<dfa.javaCompressedMax; wrap="\"\n    \"">"
+    )
+
+DFA<dfa.decisionNumber>_accept = DFA.unpack(
+    "<dfa.javaCompressedAccept; wrap="\"\n    \"">"
+    )
+
+DFA<dfa.decisionNumber>_special = DFA.unpack(
+    "<dfa.javaCompressedSpecial; wrap="\"\n    \"">"
+    )
+
+
+DFA<dfa.decisionNumber>_transition = [
+    <dfa.javaCompressedTransition:{s|DFA.unpack("<s; wrap="\"\n\"">")}; separator=",\n">
+]
+
+# class definition for DFA #<dfa.decisionNumber>
+
+class DFA<dfa.decisionNumber>(DFA):
+    pass
+
+    <@errorMethod()>
+
+<if(dfa.specialStateSTs)>
+    def specialStateTransition(self_, s, input):
+        # convince pylint that my self_ magic is ok ;)
+        # pylint: disable-msg=E0213
+
+        # pretend we are a member of the recognizer
+        # thus semantic predicates can be evaluated
+        self = self_.recognizer
+
+        _s = s
+
+        <dfa.specialStateSTs:{state | if s == <i0>: <! compressed special state numbers 0..n-1 !>
+    <state>}; separator="\nel">
+
+<if(backtracking)>
+        if self._state.backtracking > 0:
+            raise BacktrackingFailed
+
+<endif>
+        nvae = NoViableAltException(self_.getDescription(), <dfa.decisionNumber>, _s, input)
+        self_.error(nvae)
+        raise nvae<\n>
+<endif>
+
+>>
+
+cyclicDFAInit(dfa) ::= <<
+self.dfa<dfa.decisionNumber> = self.DFA<dfa.decisionNumber>(
+    self, <dfa.decisionNumber>,
+    eot = self.DFA<dfa.decisionNumber>_eot,
+    eof = self.DFA<dfa.decisionNumber>_eof,
+    min = self.DFA<dfa.decisionNumber>_min,
+    max = self.DFA<dfa.decisionNumber>_max,
+    accept = self.DFA<dfa.decisionNumber>_accept,
+    special = self.DFA<dfa.decisionNumber>_special,
+    transition = self.DFA<dfa.decisionNumber>_transition
+    )<\n>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+LA<decisionNumber>_<stateNumber> = input.LA(1)<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+index<decisionNumber>_<stateNumber> = input.index()
+input.rewind()<\n>
+<endif>
+s = -1
+<edges; separator="\nel">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.seek(index<decisionNumber>_<stateNumber>)<\n>
+<endif>
+if s >= 0:
+    return s
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if (<labelExpr>)<if(predicates)> and (<predicates>)<endif>:
+    s = <targetStateNumber><\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+se:
+    s = <targetStateNumber><\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "((<left>) and (<right>))"
+
+orPredicates(operands) ::= "(<operands; separator=\" or \">)"
+
+notPredicate(pred) ::= "not (<evalPredicate(pred, {})>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "self.<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber> == <atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "self.input.LA(<k>) == <atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <%
+(<lower> \<= LA<decisionNumber>_<stateNumber> \<= <upper>)
+%>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(<lower> \<= self.input.LA(<k>) \<= <upper>)"
+
+lookaheadSetTest(values,k,valuesAsInt) ::= <%
+LA<decisionNumber>_<stateNumber> in {<values; separator=", ">}
+%>
+
+isolatedLookaheadSetTest(values,k,valuesAsInt) ::= <%
+self.input.LA(<k>) in {<values; separator=", ">}
+%>
+
+lookaheadVarName(k) ::= "LA<decisionNumber>_<stateNumber>"
+isolatedLookaheadVarName(k) ::= "self.input.LA(<k>)"
+
+setTest(ranges) ::= "<ranges; separator=\" or \">"
+
+// A T T R I B U T E S
+
+globalAttributeScopeClass(scope) ::= <<
+<if(scope)>
+<if(scope.attributes)>
+class <scope.name>_scope(object):
+    def __init__(self):
+        <scope.attributes:{it | self.<it.decl> = None}; separator="\n">
+
+<endif>
+<endif>
+>>
+
+globalAttributeScopeStack(scope) ::= <<
+<if(scope)>
+<if(scope.attributes)>
+self.<scope.name>_stack = []<\n>
+<endif>
+<endif>
+>>
+
+ruleAttributeScopeClass(scope) ::= <<
+<if(scope)>
+<if(scope.attributes)>
+class <scope.name>_scope(object):
+    def __init__(self):
+        <scope.attributes:{it | self.<it.decl> = None}; separator="\n">
+
+<endif>
+<endif>
+>>
+
+ruleAttributeScopeStack(scope) ::= <<
+<if(scope)>
+<if(scope.attributes)>
+self.<scope.name>_stack = []<\n>
+<endif>
+<endif>
+>>
+
+delegateName(d) ::= <<
+<if(d.label)><d.label><else>g<d.name><endif>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+<label.label.text> = None
+>>
+
+returnStructName(r) ::= "<r.name>_return"
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+class <ruleDescriptor:returnStructName()>(<if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope):
+    def __init__(self):
+        super().__init__()
+
+        <if(scope)><scope.attributes:{it | self.<it.decl> = None}; separator="\n"><endif>
+        <@ruleReturnInit()>
+
+
+    <@ruleReturnMembers()>
+
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<if(scope)><scope.attributes:{it | <it.decl>}; separator=", "><endif>
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <%
+<if(negIndex)>
+self.<scope>_stack[-<negIndex>].<attr.name>
+<else>
+<if(index)>
+self.<scope>_stack[<index>].<attr.name>
+<else>
+self.<scope>_stack[-1].<attr.name>
+<endif>
+<endif>
+%>
+
+/* not applying patch because of bug in action parser!
+
+<if(negIndex)>
+((len(self.<scope>_stack) - <negIndex> - 1) >= 0 and [self.<scope>_stack[-<negIndex>].<attr.name>] or [None])[0]
+<else>
+<if(index)>
+((<index> \< len(self.<scope>_stack)) and [self.<scope>_stack[<index>].<attr.name>] or [None])[0]
+<else>
+((len(self.<scope>_stack) > 0) and [self.<scope>_stack[-1].<attr.name>] or [None])[0]
+<endif>
+<endif>
+
+*/
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
+<if(negIndex)>
+<!FIXME: this seems not to be used by ActionTranslator...!>
+self.<scope>_stack[-<negIndex>].<attr.name> = <expr>
+<else>
+<if(index)>
+<!FIXME: this seems not to be used by ActionTranslator...!>
+self.<scope>_stack[<index>].<attr.name> = <expr>
+<else>
+self.<scope>_stack[-1].<attr.name> = <expr>
+<endif>
+<endif>
+%>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "self.<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+((<scope> is not None) and [<scope>.<attr.name>] or [None])[0]
+<else>
+<scope>
+<endif>
+%>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+%>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> = <expr>
+<else>
+<attr.name> = <expr>
+<endif>
+%>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach; and they are evaluated early;
+// they cannot see TREE_PARSER or PARSER attributes for example. :(
+
+tokenLabelPropertyRef_text(scope,attr) ::= "<scope>.text"
+tokenLabelPropertyRef_type(scope,attr) ::= "<scope>.type"
+tokenLabelPropertyRef_line(scope,attr) ::= "<scope>.line"
+tokenLabelPropertyRef_pos(scope,attr) ::= "<scope>.charPositionInLine"
+tokenLabelPropertyRef_channel(scope,attr) ::= "<scope>.channel"
+tokenLabelPropertyRef_index(scope,attr) ::= "<scope>.index"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "<scope>.start"
+ruleLabelPropertyRef_stop(scope,attr) ::= "<scope>.stop"
+ruleLabelPropertyRef_tree(scope,attr) ::= "<scope>.tree"
+ruleLabelPropertyRef_text(scope,attr) ::= <%
+<if(TREE_PARSER)>
+((<scope> is not None) and [self.input.getTokenStream().toString(
+    self.input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
+    self.input.getTreeAdaptor().getTokenStopIndex(<scope>.start)
+    )] or [None])[0]
+<else>
+((<scope> is not None) and [self.input.toString(<scope>.start,<scope>.stop)] or [None])[0]
+<endif>
+%>
+ruleLabelPropertyRef_st(scope,attr) ::= "((<scope> is not None) and [<scope>.st] or [None])[0]"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::= "((<scope> is not None) and [<scope>.type] or [0])[0]"
+lexerRuleLabelPropertyRef_line(scope,attr) ::= "((<scope> is not None) and [<scope>.line] or [0])[0]"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::= "((<scope> is not None) and [<scope>.charPositionInLine] or [0])[0]"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::= "((<scope> is not None) and [<scope>.channel] or [0])[0]"
+lexerRuleLabelPropertyRef_index(scope,attr) ::= "((<scope> is not None) and [<scope>.index] or [0])[0]"
+lexerRuleLabelPropertyRef_text(scope,attr) ::= "((<scope> is not None) and [<scope>.text] or [None])[0]"
+lexerRuleLabelPropertyRef_int(scope,attr) ::= "((<scope> is not None) and [int(<scope>.text)] or [0])[0]"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "retval.start"
+rulePropertyRef_stop(scope,attr) ::= "retval.stop" //mmm... or input.LT(-1)??
+rulePropertyRef_tree(scope,attr) ::= "retval.tree"
+rulePropertyRef_text(scope,attr) ::= "self.input.toString(retval.start, self.input.LT(-1))"
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "self.text"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "self._state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "self._state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "self._state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(self.getCharIndex()-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "int(<scope>.text)"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>"
+
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <<
+<if(backtracking)>
+<if(actions.(actionScope).synpredgate)>
+if <actions.(actionScope).synpredgate>:
+    pass
+    <action>
+
+<else>
+if <actions.(actionScope).synpredgate>:
+    pass
+    <action>
+
+<endif>
+<else>
+#action start
+<action>
+#action end
+<endif>
+>>
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+
+// M I S C (properties, etc...)
+
+codeFileExtension() ::= ".py"
+
+true_value() ::= "True"
+false_value() ::= "False"
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Python3/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Python3/ST.stg
new file mode 100644
index 0000000..718a1bd
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Python3/ST.stg
@@ -0,0 +1,171 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2005-2012 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/** Template subgroup to add template rewrite output
+ *  If debugging, then you'll also get STDbg.stg loaded.
+ */
+
+@outputFile.imports() ::= <<
+<@super.imports()>
+import stringtemplate3
+>>
+
+/** Add this to each rule's return value struct */
+@returnScope.ruleReturnInit() ::= <<
+self.st = None
+>>
+
+@returnScope.ruleReturnMembers() ::= <<
+def getTemplate(self):
+    return self.st
+
+def toString(self):
+    if self.st is not None:
+        return self.st.toString()
+    return None
+__str__ = toString
+
+>>
+
+@genericParser.init() ::= <<
+<@super.init()>
+self.templateLib = stringtemplate3.StringTemplateGroup(
+    '<name>Templates', lexer='angle-bracket'
+    )
+
+>>
+
+@genericParser.members() ::= <<
+<@super.members()>
+def setTemplateLib(self, templateLib):
+    self.templateLib = templateLib
+
+def getTemplateLib(self):
+    return self.templateLib
+
+>>
+
+/** x+=rule when output=template */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(label, {<label>.st})>
+>>
+
+rewriteTemplate(alts) ::= <<
+# TEMPLATE REWRITE
+<if(backtracking)>
+if <actions.(actionScope).synpredgate>:
+    <first(alts):rewriteTemplateAltFirst()>
+    <rest(alts):{it | el<rewriteTemplateAlt(it)>}>
+    <if(rewriteMode)><replaceTextInLine()><endif>
+
+<else>
+<first(alts):rewriteTemplateAltFirst()>
+<rest(alts):{it | el<rewriteTemplateAlt(it)>}>
+<if(rewriteMode)><replaceTextInLine()><endif>
+<endif>
+>>
+
+replaceTextInLine() ::= <<
+<if(TREE_PARSER)>
+self.input.getTokenStream().replace(
+    self.input.getTreeAdaptor().getTokenStartIndex(retval.start),
+    self.input.getTreeAdaptor().getTokenStopIndex(retval.start),
+    retval.st
+    )
+<else>
+self.input.replace(
+    retval.start.getTokenIndex(),
+    self.input.LT(-1).getTokenIndex(),
+    retval.st
+    )
+<endif>
+>>
+
+rewriteTemplateAltFirst(alt) ::= <<
+<if(alt.pred)>
+if <alt.pred>:
+    # <alt.description>
+    retval.st = <alt.alt>
+<\n>
+<else>
+# <alt.description>
+retval.st = <alt.alt>
+<\n>
+<endif>
+>>
+
+rewriteTemplateAlt(alt) ::= <<
+<if(alt.pred)>if <alt.pred>:
+    # <alt.description>
+    retval.st = <alt.alt>
+<\n>
+<else>se:
+    # <alt.description>
+    retval.st = <alt.alt>
+<\n>
+<endif>
+>>
+
+rewriteEmptyTemplate(alts) ::= <<
+None
+>>
+
+/** Invoke a template with a set of attribute name/value pairs.
+ *  Set the value of the rule's template *after* having set
+ *  the attributes because the rule's template might be used as
+ *  an attribute to build a bigger template; you get a self-embedded
+ *  template.
+ */
+rewriteExternalTemplate(name,args) ::= <%
+self.templateLib.getInstanceOf("<name>"<if(args)>, attributes={<args:{a | "<a.name>": <a.value>}; separator=", ">}<endif>)
+%>
+
+/** expr is a string expression that says what template to load */
+rewriteIndirectTemplate(expr,args) ::= <%
+self.templateLib.getInstanceOf(<expr><if(args)>, attributes={<args:{a | "<a.name>": <a.value>}; separator=", ">}<endif>)
+%>
+
+/** Invoke an inline template with a set of attribute name/value pairs */
+rewriteInlineTemplate(args, template) ::= <%
+stringtemplate3.StringTemplate("<template>", group=self.templateLib<if(args)>, attributes={<args:{a | "<a.name>": <a.value>}; separator=", ">}<endif>)
+%>
+
+/** plain -> {foo} action */
+rewriteAction(action) ::= <<
+<action>
+>>
+
+/** An action has %st.attrName=expr; or %{st}.attrName=expr; */
+actionSetAttribute(st,attrName,expr) ::= <<
+(<st>)["<attrName>"] = <expr>
+>>
+
+/** Translate %{stringExpr} */
+actionStringConstructor(stringExpr) ::= <<
+stringtemplate3.StringTemplate(<stringExpr>, group=self.templateLib)
+>>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/AST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Ruby/AST.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/AST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Ruby/AST.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTDbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTDbg.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTDbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTDbg.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTParser.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTParser.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTParser.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTTreeParser.stg b/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTTreeParser.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTTreeParser.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Ruby/ASTTreeParser.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/Dbg.stg b/tool/src/main/resources/org/antlr/codegen/templates/Ruby/Dbg.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/Dbg.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Ruby/Dbg.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Ruby/Ruby.stg b/tool/src/main/resources/org/antlr/codegen/templates/Ruby/Ruby.stg
new file mode 100644
index 0000000..d743623
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Ruby/Ruby.stg
@@ -0,0 +1,1477 @@
+/******************************************************************************
+ *********************  M A J O R   C O M P O N E N T S  **********************
+ ******************************************************************************/
+
+// System.Boolean.ToString() returns "True" and "False", but the proper C# literals are "true" and "false"
+// The Java version of Boolean returns "true" and "false", so they map to themselves here.
+booleanLiteral ::= [
+	"True":"true",
+	"False":"false",
+	"true":"true",
+	"false":"false",
+	default:"false"
+]
+
+/** The overall file structure of a recognizer; stores methods
+  * for rules and cyclic DFAs plus support code.
+  */
+outputFile(LEXER, PARSER, TREE_PARSER, actionScope, actions, docComment, recognizer, name,
+  tokens, tokenNames, rules, cyclicDFAs, bitsets, buildTemplate, buildAST, rewriteMode,
+  profile, backtracking, synpreds, memoize, numRules, fileName, ANTLRVersion, generatedTimestamp,
+  trace, scopes, superClass, literals) ::=
+<<
+#!/usr/bin/env ruby
+#
+# <fileName>
+# --
+# Generated using ANTLR version: <ANTLRVersion>
+# Ruby runtime library version: <runtimeLibraryVersion()>
+# Input grammar file: <fileName>
+# Generated at: <generatedTimestamp>
+#
+
+# ~~~\> start load path setup
+this_directory = File.expand_path( File.dirname( __FILE__ ) )
+$LOAD_PATH.unshift( this_directory ) unless $LOAD_PATH.include?( this_directory )
+
+antlr_load_failed = proc do
+  load_path = $LOAD_PATH.map { |dir| '  - ' \<\< dir }.join( $/ )
+  raise LoadError, \<\<-END.strip!
+
+Failed to load the ANTLR3 runtime library (version <runtimeLibraryVersion()>):
+
+Ensure the library has been installed on your system and is available
+on the load path. If rubygems is available on your system, this can
+be done with the command:
+
+  gem install antlr3
+
+Current load path:
+#{ load_path }
+
+  END
+end
+
+defined?( ANTLR3 ) or begin
+
+  # 1: try to load the ruby antlr3 runtime library from the system path
+  require 'antlr3'
+
+rescue LoadError
+
+  # 2: try to load rubygems if it isn't already loaded
+  defined?( Gem ) or begin
+    require 'rubygems'
+  rescue LoadError
+    antlr_load_failed.call
+  end
+
+  # 3: try to activate the antlr3 gem
+  begin
+    Gem.activate( 'antlr3', '~> <runtimeLibraryVersion()>' )
+  rescue Gem::LoadError
+    antlr_load_failed.call
+  end
+
+  require 'antlr3'
+
+end
+# \<~~~ end load path setup
+
+<placeAction(scope="all", name="header")>
+<placeAction(scope=actionScope,name="header")>
+
+<if(recognizer.grammar.grammarIsRoot)>
+<rootGrammarOutputFile()>
+<else>
+<delegateGrammarOutputFile()>
+<endif>
+
+<placeAction(scope=actionScope,name="footer")>
+<placeAction(scope="all", name="footer")>
+
+<if(actions.(actionScope).main)>
+if __FILE__ == $0 and ARGV.first != '--'
+  <placeAction(scope=actionScope,name="main")>
+end
+<endif>
+>>
+
+tokenDataModule() ::= <<
+# TokenData defines all of the token type integer values
+# as constants, which will be included in all
+# ANTLR-generated recognizers.
+const_defined?( :TokenData ) or TokenData = ANTLR3::TokenScheme.new
+
+module TokenData
+<if(tokens)>
+
+  # define the token constants
+  define_tokens( <tokens:{it | :<it.name> => <it.type>}; anchor, wrap="\n", separator=", "> )
+
+<endif>
+<if(tokenNames)>
+
+  # register the proper human-readable name or literal value
+  # for each token type
+  #
+  # this is necessary because anonymous tokens, which are
+  # created from literal values in the grammar, do not
+  # have descriptive names
+  register_names( <tokenNames:{it | <it>}; separator=", ", anchor, wrap="\n"> )
+
+<endif>
+
+  <placeAction(scope="token",name="scheme")>
+  <placeAction(scope="token",name="members")>
+end<\n>
+>>
+
+rootGrammarOutputFile() ::= <<
+module <recognizer.grammar.name>
+  <placeAction(scope="module",name="head")>
+  <tokenDataModule()>
+  <recognizer>
+  <placeAction(scope="module",name="foot")>
+end
+>>
+
+delegateGrammarOutputFile() ::= <<
+require '<recognizer.grammar.delegator.recognizerName>'
+
+<delegateGrammarModuleHead(gram=recognizer.grammar.delegator)>
+  <recognizer>
+<delegateGrammarModuleTail(gram=recognizer.grammar.delegator)>
+>>
+
+delegateGrammarModuleHead(gram) ::= <<
+<if(gram.grammarIsRoot)>
+module <gram.name>
+<else>
+<delegateGrammarModuleHead(gram=gram.delegator)><\n>
+class <gram.name>
+<endif>
+>>
+
+delegateGrammarModuleTail(gram) ::= <<
+<if(gram.grammarIsRoot)>
+end # module <gram.name>
+<else>
+end # class <gram.name>
+<delegateGrammarModuleTail(gram=gram.delegator)><\n>
+<endif>
+>>
+/* * * * * * * * * * R E C O G N I Z E R   C L A S S E S * * * * * * * * * */
+
+parser(
+  grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
+  ASTLabelType="Object", superClass="ANTLR3::Parser", labelType="ANTLR3::Token",
+  members={<actions.parser.members>}
+) ::= <<
+<if(grammar.grammarIsRoot)><autoloadDelegates()><endif>
+
+class <if(grammar.grammarIsRoot)>Parser<else><grammar.name><endif> \< <superClass>
+  <parserBody(inputStreamType="ANTLR3::TokenStream", rewriteElementType="Token", actionScope="parser", ...)>
+end # class <if(grammar.grammarIsRoot)>Parser<else><grammar.name><endif> \< <superClass>
+<if(!actions.(actionScope).main)>
+
+at_exit { <if(grammar.grammarIsRoot)>Parser<else><grammar.name><endif>.main( ARGV ) } if __FILE__ == $0
+<endif>
+>>
+
+/** How to generate a tree parser; same as parser except the
+  * input stream is a different type.
+  */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules, numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="Object", superClass="ANTLR3::TreeParser", members={<actions.treeparser.members>}) ::= <<
+<if(grammar.grammarIsRoot)><autoloadDelegates()><endif>
+
+class <if(grammar.grammarIsRoot)>TreeParser<else><grammar.name><endif> \< <superClass>
+  <parserBody(inputStreamType="TreeNodeStream", rewriteElementType="Node", actionScope="treeparser", ...)>
+end # class <if(grammar.grammarIsRoot)>TreeParser<else><grammar.name><endif> \< <superClass>
+<if(!actions.(actionScope).main)>
+
+at_exit { <if(grammar.grammarIsRoot)>TreeParser<else><grammar.name><endif>.main( ARGV ) } if __FILE__ == $0
+<endif>
+>>
+
+parserBody(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, inputStreamType, superClass, filterMode, labelType, members, rewriteElementType, actionScope, ASTLabelType="Object") ::= <<
+@grammar_home = <grammar.name>
+<if(!grammar.grammarIsRoot)><autoloadDelegates()><\n><endif>
+<@mixins()>
+
+RULE_METHODS = [ <rules:{r|:<r.ruleName>}; separator=", ", wrap="\n", anchor> ].freeze
+
+<scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeClass()><\n><endif>}>
+<rules:{it | <ruleAttributeScopeClass(.ruleDescriptor.ruleScope)>}>
+<if(grammar.delegators)>
+masters( <grammar.delegators:{d|:<d.name>}; separator=", "> )<\n>
+<endif>
+<if(grammar.directDelegates)>
+imports( <grammar.directDelegates:{d|:<d.name>}; separator=", "> )<\n>
+<endif>
+
+include TokenData
+
+begin
+  generated_using( "<fileName>", "<ANTLRVersion>", "<runtimeLibraryVersion()>" )
+rescue NoMethodError => error
+  # ignore
+end
+
+<if(!grammar.grammarIsRoot)>
+require '<grammar.composite.rootGrammar.recognizerName>'
+include <grammar.composite.rootGrammar.name>::TokenData<\n><\n>
+<endif>
+<parserConstructor()>
+<@additionalMembers()>
+<members>
+# - - - - - - - - - - - - Rules - - - - - - - - - - - - -
+<rules:{it | <it><\n>}>
+
+<if(grammar.delegatedRules)>
+# - - - - - - - - - - Delegated Rules - - - - - - - - - - -
+<grammar.delegatedRules:{ruleDescriptor|<delegateRule(ruleDescriptor)><\n>}>
+<endif>
+<if(cyclicDFAs)>
+# - - - - - - - - - - DFA definitions - - - - - - - - - - -
+<cyclicDFAs:{it | <cyclicDFA(it)>}>
+
+private
+
+def initialize_dfas
+  super rescue nil
+  <cyclicDFAs:{it | <cyclicDFAInit(it)>}>
+end
+
+<endif>
+<bitsets:{it | TOKENS_FOLLOWING_<it.name>_IN_<it.inName>_<it.tokenIndex> = Set[ <it.tokenTypes:{it | <it>}; separator=", "> ]<\n>}>
+>>
+
+parserConstructor() ::= <<
+def initialize( <grammar.delegators:{g|<g:delegateName()>, }>input, options = {} )
+  super( input, options )
+<if(memoize)><if(grammar.grammarIsRoot)>
+  @state.rule_memory = {}
+<endif><endif>
+  <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeStack()><\n><endif>}><rules:{it | <ruleAttributeScopeStack(.ruleDescriptor.ruleScope)>}>
+  <placeAction(scope=actionScope,name="init")>
+  <grammar.delegators:{g|@<g:delegateName()> = <g:delegateName()><\n>}><grammar.directDelegates:{g|@<g:delegateName()> = <newDelegate(g)><\n>}><last(grammar.delegators):{g|@parent = @<g:delegateName()><\n>}><@init()>
+end
+>>
+
+
+/* * * * * * * * * * * * * R U L E   M E T H O D S * * * * * * * * * * * * */
+
+/** A simpler version of a rule template that is specific to the
+  * imaginary rules created for syntactic predicates.  As they
+  * never have return values nor parameters etc..., just give
+  * simplest possible method.  Don't do any of the normal
+  * memoization stuff in here either; it's a waste. As
+  * predicates cannot be inlined into the invoking rule, they
+  * need to be in a rule by themselves.
+  */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::= <<
+#
+# syntactic predicate <ruleName>
+#
+# (in <fileName>)
+# <description>
+#
+# This is an imaginary rule inserted by ANTLR to
+# implement a syntactic predicate decision
+#
+def <ruleName><if(ruleDescriptor.parameterScope)>( <ruleDescriptor.parameterScope:parameterScope()> )<endif>
+  <traceIn()><ruleLabelDefs()>
+  <block>
+ensure
+  <traceOut()>
+end
+>>
+
+
+/** How to generate code for a rule.  This includes any return
+  * type data aggregates required for multiple return values.
+  */
+rule(ruleName, ruleDescriptor, block, emptyRule, description, exceptions, finally, memoize) ::= <<
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+#
+# parser rule <ruleName>
+#
+# (in <fileName>)
+# <description>
+#
+def <ruleName><if(ruleDescriptor.parameterScope)>( <ruleDescriptor.parameterScope:parameterScope()> )<endif>
+  <traceIn()><ruleScopeSetUp()><ruleDeclarations()><ruleLabelDefs()><action(name="init", code=ruleDescriptor.actions.init)>
+  <@body><ruleBody()><@end>
+
+  return <ruleReturnValue()>
+end
+<if(ruleDescriptor.modifier)>
+
+<ruleDescriptor.modifier> :<ruleName> rescue nil<\n>
+<endif>
+>>
+
+delegateRule(ruleDescriptor) ::= <<
+# delegated rule <ruleDescriptor.name>
+def <ruleDescriptor.name><if(ruleDescriptor.parameterScope)>( <ruleDescriptor.parameterScope:parameterScope()> )<endif>
+  <methodCall(del=ruleDescriptor.grammar, n=ruleDescriptor.name, args={<ruleDescriptor.parameterScope.attributes:{it | <it.name>}>})>
+end
+>>
+// HELPERS
+
+recognizerClassName() ::= <<
+<if(TREE_PARSER)>TreeParser<elseif(PARSER)>Parser<else>Lexer<endif>
+>>
+
+initializeDirectDelegate() ::= <<
+@<g:delegateName()> = <g.name>::<recognizerClassName()>.new(
+  <trunc(g.delegators):{p|<p:delegateName()>, }>self, input, options.merge( :state => @state )
+)
+>>
+
+initializeDelegator() ::= <<
+@<g:delegateName()> = <g:delegateName()>
+>>
+
+altSwitchCase(altNum,alt) ::= <<
+when <altNum>
+  <@prealt()>
+  <alt>
+>>
+
+blockBody() ::= <<
+<@decision><decision><@end>
+case alt_<decisionNumber>
+<alts:{a | <altSwitchCase(i,a)>}; separator="\n">
+end
+>>
+
+catch(decl, action) ::= <<
+# - - - - - - @catch <e.decl> - - - - - -
+rescue <e.decl>
+  <e.action><\n>
+>>
+
+closureBlockLoop() ::= <<
+while true # decision <decisionNumber>
+  alt_<decisionNumber> = <maxAlt>
+  <@decisionBody><decision><@end>
+  case alt_<decisionNumber>
+  <alts:{a | <altSwitchCase(i,a)>}; separator="\n">
+  else
+    break # out of loop for decision <decisionNumber>
+  end
+end # loop for decision <decisionNumber>
+>>
+
+delegateName(d) ::= <<
+<if(d.label)><d.label; format="label"><else><d.name; format="snakecase"><endif>
+>>
+
+element(e) ::= <<
+<e.el><\n>
+>>
+
+execForcedAction(action) ::= "<action>"
+
+globalAttributeScopeClass(scope) ::= <<
+<if(scope.attributes)>@@<scope.name> = Scope( <scope.attributes:{it | <it.decl; format="rubyString">}; separator=", "> )<\n><endif>
+>>
+
+globalAttributeScopeStack(scope) ::= <<
+<if(scope.attributes)>@<scope.name>_stack = []<\n><endif>
+>>
+
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+parameterScope(scope) ::= <<
+<scope.attributes:{it | <it.decl>}; separator=", ">
+>>
+
+positiveClosureBlockLoop() ::= <<
+match_count_<decisionNumber> = 0
+while true
+  alt_<decisionNumber> = <maxAlt>
+  <@decisionBody><decision><@end>
+  case alt_<decisionNumber>
+  <alts:{a | <altSwitchCase(i,a)>}; separator="\n">
+  else
+    match_count_<decisionNumber> > 0 and break
+    <ruleBacktrackFailure()>
+    eee = EarlyExit(<decisionNumber>)
+    <@earlyExitException()><\n>
+    raise eee
+  end
+  match_count_<decisionNumber> += 1
+end<\n>
+>>
+
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor:returnStructName(r=it)> = define_return_scope <scope.attributes:{it | :<it.decl>}; separator=", ">
+<endif>
+>>
+
+returnStructName(r) ::= "<r.name; format=\"camelcase\">ReturnValue"
+
+ruleAttributeScopeClass ::= globalAttributeScopeClass
+ruleAttributeScopeStack ::= globalAttributeScopeStack
+
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>
+@state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )<\n>
+<endif>
+>>
+
+ruleBody() ::= <<
+<if(memoize)><if(backtracking)>
+success = false # flag used for memoization<\n>
+<endif><endif>
+begin
+  <ruleMemoization(ruleName)><block><ruleCleanUp()><(ruleDescriptor.actions.after):execAction()>
+<if(memoize)><if(backtracking)>
+  success = true<\n>
+<endif><endif>
+<if(exceptions)>
+  <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+
+# - - - - - - - - @rulecatch - - - - - - - -
+<actions.(actionScope).rulecatch>
+<else>
+rescue ANTLR3::Error::RecognitionError => re
+  report_error(re)
+  recover(re)
+  <@setErrorReturnValue()>
+<endif>
+<endif>
+<endif>
+
+ensure
+  <traceOut()><memoize()><ruleScopeCleanUp()><finally>
+end
+>>
+
+ruleReturnValue() ::= <%
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+return_value
+<endif>
+<endif>
+<endif>
+%>
+
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+return_value = <returnStructName(r=ruleDescriptor)>.new
+
+# $rule.start = the first token seen before matching
+return_value.start = @input.look<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{a|<a.name> = <if(a.initValue)><a.initValue><else>nil<endif><\n>}>
+<endif>
+<if(memoize)>
+<ruleDescriptor.name>_start_index = @input.index<\n>
+<endif>
+>>
+
+ruleLabelDef(label) ::= <<
+<label.label.text; format="label"> = nil<\n>
+>>
+
+ruleLabelDefs() ::= <<
+<[
+    ruleDescriptor.tokenLabels,
+    ruleDescriptor.tokenListLabels,
+    ruleDescriptor.wildcardTreeLabels,
+    ruleDescriptor.wildcardTreeListLabels,
+    ruleDescriptor.ruleLabels,
+    ruleDescriptor.ruleListLabels
+ ]:
+ {<it.label.text; format="label"> = nil<\n>}
+><[
+    ruleDescriptor.tokenListLabels,
+    ruleDescriptor.ruleListLabels,
+    ruleDescriptor.wildcardTreeListLabels
+  ]:
+  {list_of_<it.label.text; format="label"> = []<\n>}
+>
+>>
+
+/* * * * * * * * * * * * * R U L E   H E L P E R S * * * * * * * * * * * * */
+
+traceIn() ::= <<
+<if(trace)>
+trace_in( __method__, <ruleDescriptor.index> )<\n>
+<else>
+# -> uncomment the next line to manually enable rule tracing
+# trace_in( __method__, <ruleDescriptor.index> )<\n>
+<endif>
+>>
+
+traceOut() ::= <<
+<if(trace)>
+trace_out( __method__, <ruleDescriptor.index> )<\n>
+<else>
+# -> uncomment the next line to manually enable rule tracing
+# trace_out( __method__, <ruleDescriptor.index> )<\n>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+# - - - - - - - rule clean up - - - - - - - -
+return_value.stop = @input.look( -1 )<\n>
+<endif>
+<endif>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+# rule memoization
+if @state.backtracking > 0 and already_parsed_rule?( __method__ )
+  success = true
+  return <ruleReturnValue()>
+end<\n>
+<endif>
+>>
+
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{it | @<it>_stack.push( @@<it>.new )<\n>}><ruleDescriptor.ruleScope:{it | @<it.name>_stack.push( @@<it.name>.new )<\n>}>
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{it | @<it>_stack.pop<\n>}><ruleDescriptor.ruleScope:{it | @<it.name>_stack.pop<\n>}>
+>>
+
+memoize() ::= <<
+<if(memoize)><if(backtracking)>
+memoize( __method__, <ruleDescriptor.name>_start_index, success ) if @state.backtracking > 0<\n>
+<endif><endif>
+>>
+
+/** helper template to format a ruby method call */
+methodCall(n, del, args) ::= <<
+<if(del)>@<del:delegateName()>.<endif><n><if(args)>( <args; separator=", "> )<endif>
+>>
+
+/* * * * * * * * * * * * * L E X E R   P A R T S * * * * * * * * * * * * * */
+
+actionGate() ::= "@state.backtracking == 0"
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# at line <description>
+alt_<decisionNumber> = <maxAlt>
+<decls>
+<@body><blockBody()><@end>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# at line <description>
+alt_<decisionNumber> = <maxAlt>
+<decls>
+<@decision><decision><@end>
+case alt_<decisionNumber>
+<alts:{a | <altSwitchCase(i,a)>}; separator="\n">
+end
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+<decls>
+<@prealt()>
+<alts>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+# at line <description>
+<decls>
+<@prealt()>
+<alts>
+>>
+
+/** A (..)+ block with 0 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# at file <description>
+<decls>
+<@loopBody>
+<positiveClosureBlockLoop()>
+<@end>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 0 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+# at line <description>
+<decls>
+<@loopBody>
+<closureBlockLoop()>
+<@end>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code
+  * generation so we can just use the normal block template
+  */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** An alternative is just a list of elements; at outermost
+  * level
+  */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+# at line <description>
+<elements:element()><rew>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)><label; format="label"> = <endif>match( <token>, TOKENS_FOLLOWING_<token>_IN_<ruleName>_<elementIndex> )
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRef(...)>
+<addToList(elem={<label; format="label">},...)>
+>>
+
+/* TRY THIS:
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+list_of_<label; format="label"> << match( <token>, TOKENS_FOLLOWING_<token>_IN_<ruleName>_<elementIndex> )
+>>
+*/
+
+addToList(label,elem) ::= <<
+list_of_<label; format="label"> \<\< <elem><\n>
+>>
+
+listLabel ::= addToList
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,terminalOptions,postmatchCode) ::= <<
+<if(label)>
+<label; format="label"> = @input.look<\n>
+<endif>
+if <s>
+  @input.consume
+  <postmatchCode>
+<if(!LEXER)>
+  @state.error_recovery = false<\n>
+<endif>
+else
+  <ruleBacktrackFailure()>
+  mse = MismatchedSet( nil )
+  <@mismatchedSetException()>
+<if(LEXER)>
+  recover mse
+  raise mse<\n>
+<else>
+  raise mse<\n>
+<endif>
+end
+<\n>
+>>
+
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<addToList(elem={<label; format="label">},...)>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)>
+<label; format="label"> = @input.look<\n>
+<endif>
+match_any
+>>
+
+/* TRY THIS:
+wildcard(label,elementIndex) ::= <<
+<if(label)><label; format="label"> = <endif>match_any
+>>
+*/
+
+wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<wildcard(...)>
+<addToList(elem={<label; format="label">},...)>
+>>
+
+
+/** Match a rule reference by invoking it possibly with
+  * arguments and a return value or values.
+  */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+@state.following.push( TOKENS_FOLLOWING_<rule.name>_IN_<ruleName>_<elementIndex> )
+<if(label)><label; format="label"> = <endif><methodCall(del=scope, n={<rule.name>}, args=args)>
+@state.following.pop
+>>
+
+/** ids+=ID */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<addToList(elem={<label; format="label">},...)>
+>>
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList, enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if @input.peek == DOWN
+  match( DOWN, nil )
+  <children:element()>
+  match( UP, nil )
+end
+<else>
+match( DOWN, nil )
+<children:element()>
+match( UP, nil )
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when
+  * it is also hoisted into a prediction expression).
+  */
+validateSemanticPredicate(pred,description) ::= <<
+<if(backtracking)>
+unless ( <evalPredicate(...)> )
+  <ruleBacktrackFailure()>
+  raise FailedPredicate( "<ruleName>", "<description>" )
+end
+<else>
+raise FailedPredicate( "<ruleName>", "<description>" ) unless ( <evalPredicate(...)> )
+<endif>
+>>
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+look_<decisionNumber>_<stateNumber> = @input.peek( <k> )<\n>
+<edges; separator="\nels">
+else
+<if(eotPredictsAlt)>
+  alt_<decisionNumber> = <eotPredictsAlt><\n>
+<else>
+<if(backtracking)>
+  <ruleBacktrackFailure()><\n>
+<endif>
+<@noViableAltException>
+  raise NoViableAlternative( "<description>", <decisionNumber>, <stateNumber> )<\n>
+<@end>
+<endif>
+end
+>>
+
+/** Same as a normal DFA state except that we don't examine
+  * look for the bypass alternative.  It delays error
+  * detection but this is faster, smaller, and more what people
+  * expect.  For (X)? people expect "if ( LA(1)==X ) match(X);"
+  * and that's it. *  If a semPredState, don't force look
+  * lookup; preds might not need.
+  */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+look_<decisionNumber>_<stateNumber> = @input.peek( <k> )<\n>
+<edges; separator="\nels">
+end
+>>
+
+
+/** A DFA state that is actually the loopback decision of a
+  * closure loop.  If end-of-token (EOT) predicts any of the
+  * targets then it should act like a default clause (i.e., no
+  * error can be generated). This is used only in the lexer so
+  * that for ('a')* on the end of a rule anything other than 'a'
+  * predicts exiting. *  If a semPredState, don't force
+  * look lookup; preds might not need.
+  */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+look_<decisionNumber>_<stateNumber> = @input.peek( <k> )<\n>
+<edges; separator="\nels"><\n>
+<if(eotPredictsAlt)>
+else
+  alt_<decisionNumber> = <eotPredictsAlt><\n>
+<endif>
+end
+>>
+
+
+/** An accept state indicates a unique alternative has been
+  * predicted
+  */
+dfaAcceptState(alt) ::= "alt_<decisionNumber> = <alt>"
+
+/** A simple edge with an expression.  If the expression is
+  * satisfied, enter to the target state.  To handle gated
+  * productions, we may have to evaluate some predicates for
+  * this edge.
+  */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( <labelExpr> )<if(predicates)> and ( <predicates> )<endif>
+  <targetState>
+>>
+
+
+/** A DFA state where a SWITCH may be generated.  The code
+  * generator decides if this is possible:
+  * CodeGenerator.canGenerateSwitch().
+  */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+case look_<decisionNumber> = @input.peek( <k> )
+<edges; separator="\n">
+else
+<if(eotPredictsAlt)>
+  alt_<decisionNumber> = <eotPredictsAlt><\n>
+<else>
+<if(backtracking)>
+  <ruleBacktrackFailure()><\n>
+<endif>
+<@noViableAltException>
+  raise NoViableAlternative( "<description>", <decisionNumber>, <stateNumber> )<\n>
+<@end>
+<endif>
+end
+>>
+
+
+dfaOptionalBlockStateSwitch(k, edges, eotPredictsAlt, description, stateNumber, semPredState) ::= <<
+case look_<decisionNumber> = @input.peek( <k> )
+<edges; separator="\n">
+end
+>>
+
+dfaLoopbackStateSwitch(k, edges, eotPredictsAlt, description, stateNumber, semPredState) ::= <<
+case look_<decisionNumber> = @input.peek( <k> )
+<edges; separator="\n">
+<if(eotPredictsAlt)>
+else
+  alt_<decisionNumber> = <eotPredictsAlt>
+<endif>
+end
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+when <labels:{it | <it>}; separator=", "> then <targetState>
+>>
+
+/** The code to initiate execution of a cyclic DFA; this is used
+  * in the rule to predict an alt just like the fixed DFA case.
+  * The <name> attribute is inherited via the parser, lexer, ...
+  */
+dfaDecision(decisionNumber, description) ::= <<
+alt_<decisionNumber> = @dfa<decisionNumber>.predict( @input )
+>>
+
+/** Generate the tables and support code needed for the DFAState
+  * object argument.  Unless there is a semantic predicate (or
+  * syn pred, which become sem preds), all states should be
+  * encoded in the state tables. Consequently,
+  * cyclicDFAState/cyclicDFAEdge,eotDFAEdge templates are not
+  * used except for special DFA states that cannot be encoded as
+  * a transition table.
+  */
+cyclicDFA(dfa) ::= <<
+class DFA<dfa.decisionNumber> \< ANTLR3::DFA
+  EOT = unpack( <dfa.javaCompressedEOT; anchor, separator=", ", wrap="\n"> )
+  EOF = unpack( <dfa.javaCompressedEOF; anchor, separator=", ", wrap="\n"> )
+  MIN = unpack( <dfa.javaCompressedMin; anchor, separator=", ", wrap="\n"> )
+  MAX = unpack( <dfa.javaCompressedMax; anchor, separator=", ", wrap="\n"> )
+  ACCEPT = unpack( <dfa.javaCompressedAccept; anchor, separator=", ", wrap="\n"> )
+  SPECIAL = unpack( <dfa.javaCompressedSpecial; anchor, separator=", ", wrap="\n"> )
+  TRANSITION = [
+    <dfa.javaCompressedTransition:{s|unpack( <s; wrap="\n", anchor, separator=", "> )}; separator=",\n">
+  ].freeze
+
+  ( 0 ... MIN.length ).zip( MIN, MAX ) do | i, a, z |
+    if a \> 0 and z \< 0
+      MAX[ i ] %= 0x10000
+    end
+  end
+
+  @decision = <dfa.decisionNumber>
+
+  <@errorMethod()>
+<if(dfa.description)>
+
+  def description
+    \<\<-'__dfa_description__'.strip!
+      <dfa.description>
+    __dfa_description__
+  end<\n>
+<endif>
+end<\n>
+>>
+
+
+specialStateTransitionMethod(dfa) ::= <<
+def special_state_transition_for_dfa<dfa.decisionNumber>(s, input)
+  case s
+  <dfa.specialStateSTs:{state|when <i0>
+  <state>}; separator="\n">
+  end
+<if(backtracking)>
+  @state.backtracking > 0 and raise ANTLR3::Error::BacktrackingFailed<\n>
+<endif>
+  nva = ANTLR3::Error::NoViableAlternative.new( @dfa<dfa.decisionNumber>.description, <dfa.decisionNumber>, s, input )
+  @dfa<dfa.decisionNumber>.error( nva )
+  raise nva
+end
+>>
+
+cyclicDFASynpred( name ) ::= <<
+def <name>() @recognizer.<name> end<\n>
+>>
+
+cyclicDFAInit(dfa) ::= <<
+<if(dfa.specialStateSTs)>
+@dfa<dfa.decisionNumber> = DFA<dfa.decisionNumber>.new( self, <dfa.decisionNumber> ) do |s|
+  case s
+  <dfa.specialStateSTs:{state|when <i0>
+  <state>}; separator="\n">
+  end
+
+  if s \< 0
+<if(backtracking)>
+    @state.backtracking > 0 and raise ANTLR3::Error::BacktrackingFailed<\n>
+<endif>
+    nva = ANTLR3::Error::NoViableAlternative.new( @dfa<dfa.decisionNumber>.description, <dfa.decisionNumber>, s, input )
+    @dfa<dfa.decisionNumber>.error( nva )
+    raise nva
+  end
+
+  s
+end<\n>
+<else>
+@dfa<dfa.decisionNumber> = DFA<dfa.decisionNumber>.new( self, <dfa.decisionNumber> )<\n>
+<endif>
+>>
+
+
+/** A special state in a cyclic DFA; special means has a
+  * semantic predicate or it's a huge set of symbols to check.
+  */
+cyclicDFAState(decisionNumber, stateNumber, edges, needErrorClause, semPredState) ::= <<
+look_<decisionNumber>_<stateNumber> = @input.peek
+<if(semPredState)>
+index_<decisionNumber>_<stateNumber> = @input.index
+@input.rewind( @input.last_marker, false )<\n>
+<endif>
+s = -1
+<edges; separator="els">end
+<if(semPredState)> <! return input cursor to state before we rewound !>
+@input.seek( index_<decisionNumber>_<stateNumber> )<\n>
+<endif>
+>>
+
+/** Just like a fixed DFA edge, test the look and indicate
+  * what state to jump to next if successful.  Again, this is
+  * for special states.
+  */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( <labelExpr> )<if(predicates)> and ( <predicates> )<endif>
+  s = <targetStateNumber><\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any
+  * char; always jump to the target.
+  */
+eotDFAEdge(targetStateNumber, edgeNumber, predicates) ::= <<
+e
+  s = <targetStateNumber><\n>
+>>
+
+andPredicates(left,right) ::= "( <left> ) and ( <right> )"
+
+orPredicates(operands) ::= "<operands:{o|( <o> )}; separator=\" or \">"
+
+notPredicate(pred) ::= "not ( <pred> )"
+
+evalPredicate(pred,description) ::= "( <pred> )"
+
+evalSynPredicate(pred,description) ::= <<
+syntactic_predicate?( :<pred:{it | <it>}> )
+>>
+
+lookaheadTest(atom, k, atomAsInt) ::= "look_<decisionNumber>_<stateNumber> == <atom>"
+
+/** Sometimes a look test cannot assume that LA(k) is in a
+  * temp variable somewhere.  Must ask for the look
+  * directly.
+  */
+isolatedLookaheadTest(atom, k, atomAsInt) ::= "@input.peek(<k>) == <atom>"
+
+lookaheadRangeTest(lower, upper, k, rangeNumber, lowerAsInt, upperAsInt) ::= <<
+look_<decisionNumber>_<stateNumber>.between?( <lower>, <upper> )
+>>
+
+isolatedLookaheadRangeTest(lower, upper, k, rangeNumber, lowerAsInt, upperAsInt) ::= <<
+@input.peek( <k> ).between?( <lower>, <upper> )
+>>
+
+setTest(ranges) ::= <<
+<ranges; separator=" || ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> = <expr>"
+
+scopeAttributeRef(scope, attr, index, negIndex) ::= <<
+<if(negIndex)>
+@<scope>_stack[ -<negIndex> ].<attr.name>
+<else>
+<if(index)>
+@<scope>_stack[ <index> ].<attr.name>
+<else>
+@<scope>_stack.last.<attr.name>
+<endif>
+<endif>
+>>
+
+
+scopeSetAttributeRef(scope, attr, expr, index, negIndex) ::= <<
+<if(negIndex)>
+@<scope>_stack[ -<negIndex> ].<attr.name> = <expr>
+<else>
+<if(index)>
+@<scope>_stack[ <index> ].<attr.name> = <expr>
+<else>
+@<scope>_stack.last.<attr.name> = <expr>
+<endif>
+<endif>
+>>
+
+
+/** $x is either global scope or x is rule with dynamic scope;
+  * refers to stack itself not top of stack.  This is useful for
+  * predicates like {$function.size()>0 &&
+  * $function::name.equals("foo")}?
+  */
+isolatedDynamicScopeRef(scope) ::= "@<scope>_stack"
+
+/** reference an attribute of rule; might only have single
+  * return value
+  */
+ruleLabelRef(referencedRule, scope, attr) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+( <scope; format="label">.nil? ? nil : <scope; format="label">.<attr.name> )
+<else>
+<scope; format="label">
+<endif>
+>>
+
+returnAttributeRef(ruleDescriptor, attr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+return_value.<attr.name>
+<else>
+<attr.name>
+<endif>
+>>
+
+returnSetAttributeRef(ruleDescriptor, attr, expr) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+return_value.<attr.name> = <expr>
+<else>
+<attr.name> = <expr>
+<endif>
+>>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label; format=\"label\">"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_of_<label; format=\"label\">"
+
+tokenLabelPropertyRef_text(scope, attr) ::= "<scope; format=\"label\">.text"
+tokenLabelPropertyRef_type(scope, attr) ::= "<scope; format=\"label\">.type"
+tokenLabelPropertyRef_line(scope, attr) ::= "<scope; format=\"label\">.line"
+tokenLabelPropertyRef_pos(scope, attr) ::= "<scope; format=\"label\">.column"
+tokenLabelPropertyRef_channel(scope, attr) ::= "<scope; format=\"label\">.channel"
+tokenLabelPropertyRef_index(scope, attr) ::= "<scope; format=\"label\">.index"
+tokenLabelPropertyRef_tree(scope, attr) ::= "tree_for_<scope>"
+
+ruleLabelPropertyRef_start(scope, attr) ::= "<scope; format=\"label\">.start"
+ruleLabelPropertyRef_stop(scope, attr) ::= "<scope; format=\"label\">.stop"
+ruleLabelPropertyRef_tree(scope, attr) ::= "<scope; format=\"label\">.tree"
+
+ruleLabelPropertyRef_text(scope, attr) ::= <<
+<if(TREE_PARSER)>
+(
+  @input.token_stream.to_s(
+    @input.tree_adaptor.token_start_index( <scope; format="label">.start ),
+    @input.tree_adaptor.token_stop_index( <scope; format="label">.start )
+  ) if <scope; format="label">
+)
+<else>
+( <scope; format="label"> && @input.to_s( <scope; format="label">.start, <scope; format="label">.stop ) )
+<endif>
+>>
+ruleLabelPropertyRef_st(scope, attr) ::= "( <scope; format=\"label\"> && <scope; format=\"label\">.template )"
+
+/******************************************************************************
+ *****************  L E X E R - O N L Y   T E M P L A T E S  ******************
+ ******************************************************************************/
+
+lexerSynpred(name) ::= ""
+
+lexer(grammar, name, tokens, scopes, rules, numRules, labelType="ANTLR3::Token", filterMode, superClass="ANTLR3::Lexer") ::= <<
+<if(grammar.grammarIsRoot)><autoloadDelegates()><endif>
+
+class <if(grammar.delegator)><grammar.name><else>Lexer<endif> \< <superClass>
+  @grammar_home = <grammar.name>
+<if(!grammar.grammarIsRoot)>
+  <autoloadDelegates()><\n>
+<endif>
+  include TokenData
+<if(filterMode)>
+  include ANTLR3::FilterMode<\n>
+<endif>
+  <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScopeClass()><\n><endif>}>
+
+  begin
+    generated_using( "<fileName>", "<ANTLRVersion>", "<runtimeLibraryVersion()>" )
+  rescue NoMethodError => error
+    # ignore
+  end
+
+  RULE_NAMES   = [ <trunc(rules):{r|"<r.ruleName>"}; separator=", ", wrap="\n", anchor> ].freeze
+  RULE_METHODS = [ <trunc(rules):{r|:<r.ruleName; format="lexerRule">}; separator=", ", wrap="\n", anchor> ].freeze
+
+<if(grammar.delegators)>
+  masters( <grammar.delegators:{d|:<d.name>}; separator=", "> )<\n>
+<endif>
+<if(grammar.directDelegates)>
+  imports( <grammar.directDelegates:{d|:<d.name>}; separator=", "> )<\n>
+<endif>
+
+  def initialize( <grammar.delegators:{g|<g:delegateName()>, }>input=nil, options = {} )
+    super( input, options )
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+    @state.rule_memory = {}<\n>
+<endif>
+<endif>
+    <grammar.delegators:{g|@<g:delegateName()> = <g:delegateName()><\n>}><grammar.directDelegates:{g|@<g:delegateName()> = <newDelegate(g)><\n>}><last(grammar.delegators):{g|@parent = @<g:delegateName()><\n>}><placeAction(scope="lexer",name="init")>
+  end
+
+  <placeAction(scope="lexer",name="members")>
+
+  # - - - - - - - - - - - lexer rules - - - - - - - - - - - -
+  <rules:{it | <it><\n>}>
+<if(grammar.delegatedRules)>
+
+  # - - - - - - - - - - delegated rules - - - - - - - - - - -
+  <grammar.delegatedRules:{ruleDescriptor|<delegateLexerRule(ruleDescriptor)><\n><\n>}>
+<endif>
+<if(cyclicDFAs)>
+
+  # - - - - - - - - - - DFA definitions - - - - - - - - - - -
+  <cyclicDFAs:cyclicDFA()>
+
+  private
+
+  def initialize_dfas
+    super rescue nil
+    <cyclicDFAs:cyclicDFAInit()>
+  end
+
+<endif>
+end # class <if(grammar.delegator)><grammar.name><else>Lexer<endif> \< <superClass>
+<if(!actions.(actionScope).main)>
+
+at_exit { <if(grammar.delegator)><grammar.name><else>Lexer<endif>.main( ARGV ) } if __FILE__ == $0
+<endif>
+>>
+
+
+lexerRuleLabelDefs() ::= <<
+<if([ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,ruleDescriptor.ruleLabels,ruleDescriptor.charLabels,ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels])>
+# - - - - label initialization - - - -
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,ruleDescriptor.ruleLabels,ruleDescriptor.charLabels]:{it | <it.label.text; format="label"> = nil<\n>}>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels]:{it | list_of_<it.label.text; format="label"> = [] unless defined?(list_of_<it.label.text; format="label">)<\n>}>
+<endif>
+>>
+
+
+/** How to generate a rule in the lexer; naked blocks are used
+  * for fragment rules.
+  */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+# lexer rule <ruleName; format="lexerRule"> (<ruleName>)
+# (in <fileName>)
+def <ruleName; format="lexerRule"><if(ruleDescriptor.parameterScope)>( <ruleDescriptor.parameterScope:parameterScope()> )<endif>
+  <traceIn()><ruleScopeSetUp()><ruleDeclarations()><if(memoize)>
+<if(backtracking)>
+
+  # backtracking success
+  success = false<\n>
+<endif>
+<endif>
+<if(nakedBlock)>
+  <ruleMemoization({<ruleName; format="lexerRule">})><lexerRuleLabelDefs()><action(name="init", code=ruleDescriptor.actions.init)>
+
+  # - - - - main rule block - - - -
+  <block>
+<else>
+
+  type = <ruleName>
+  channel = ANTLR3::DEFAULT_CHANNEL
+  <ruleMemoization(ruleName)><lexerRuleLabelDefs()><action(name="init", code=ruleDescriptor.actions.init)>
+
+  # - - - - main rule block - - - -
+  <block>
+  <ruleCleanUp()>
+
+  @state.type = type
+  @state.channel = channel
+<(ruleDescriptor.actions.after):execAction()>
+<endif>
+<if(memoize)><if(backtracking)>
+  success = false<\n>
+<endif><endif>
+ensure
+  <traceOut()><ruleScopeCleanUp()><memoize()>
+end
+<! <if(ruleDescriptor.modifier)>
+
+<ruleDescriptor.modifier> :<ruleName; format="lexerRule"><\n>
+<endif> !>
+>>
+
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label; format=\"label\">"
+lexerRuleLabelPropertyRef_line(scope, attr) ::= "<scope; format=\"label\">.line"
+lexerRuleLabelPropertyRef_type(scope, attr) ::= "<scope; format=\"label\">.type"
+lexerRuleLabelPropertyRef_pos(scope, attr) ::= "<scope; format=\"label\">.column"
+lexerRuleLabelPropertyRef_channel(scope, attr) ::= "<scope; format=\"label\">.channel"
+lexerRuleLabelPropertyRef_index(scope, attr) ::= "<scope; format=\"label\">.index"
+lexerRuleLabelPropertyRef_text(scope, attr) ::= "<scope; format=\"label\">.text"
+
+
+/** How to generate code for the implicitly-defined lexer
+  * grammar rule that chooses between lexer rules.
+  */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+# main rule used to study the input at the current position,
+# and choose the proper lexer rule to call in order to
+# fetch the next token
+#
+# usually, you don't make direct calls to this method,
+# but instead use the next_token method, which will
+# build and emit the actual next token
+def <ruleName; format="lexerRule">
+  <block>
+end
+>>
+
+lexerRulePropertyRef_text(scope, attr) ::= "self.text"
+lexerRulePropertyRef_type(scope, attr) ::= "type"
+lexerRulePropertyRef_line(scope, attr) ::= "@state.token_start_line"
+lexerRulePropertyRef_pos(scope, attr)  ::= "@state.token_start_column"
+
+/** Undefined, but present for consistency with Token
+  * attributes; set to -1
+  */
+lexerRulePropertyRef_index(scope, attr) ::= "-1"
+lexerRulePropertyRef_channel(scope, attr) ::= "channel"
+lexerRulePropertyRef_start(scope, attr) ::= "@state.token_start_position"
+lexerRulePropertyRef_stop(scope, attr) ::= "( self.character_index - 1 )"
+
+/** A lexer rule reference */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+<label; format="label">_start_<elementIndex> = self.character_index
+<methodCall(n={<rule.name; format="lexerRule">},del=scope,args=args)>
+<label; format="label"> = create_token do |t|
+  t.input   = @input
+  t.type    = ANTLR3::INVALID_TOKEN_TYPE
+  t.channel = ANTLR3::DEFAULT_CHANNEL
+  t.start   = <label; format="label">_start_<elementIndex>
+  t.stop    = self.character_index - 1
+end
+<else>
+<methodCall(n={<rule.name; format="lexerRule">}, del=scope, args=args)>
+<endif>
+>>
+
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<addToList(elem={<label; format="label">},...)>
+>>
+
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label; format="label"> = @input.peek<\n>
+<endif>
+match_any
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<addToList(elem={<label; format="label">},...)>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label; format="label"> = @input.peek<\n>
+<endif>
+match( <char> )
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label; format="label"> = @input.peek<\n>
+<endif>
+match_range( <a>, <b> )
+>>
+
+filteringNextToken() ::= ""
+filteringActionGate() ::= "@state.backtracking == 1"
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex) ::= <<
+<if(label)>
+<label; format="label">_start = self.character_index
+match( <string> )
+<label; format="label"> = create_token do |t|
+  t.input   = @input
+  t.type    = ANTLR3::INVALID_TOKEN_TYPE
+  t.channel = ANTLR3::DEFAULT_CHANNEL
+  t.start   = <label; format="label">_start
+  t.stop    = character_index - 1
+end
+<else>
+match( <string> )
+<endif>
+>>
+
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+<label; format="label">_start_<elementIndex> = character_index
+match( ANTLR3::EOF )
+<label; format="label"> = create_token do |t|
+  t.input   = @input
+  t.type    = ANTLR3::INVALID_TOKEN_TYPE
+  t.channel = ANTLR3::DEFAULT_CHANNEL
+  t.start   = <label; format="label">_start_<elementIndex>
+  t.stop    = character_index - 1
+end<\n>
+<else>
+match( ANTLR3::EOF )<\n>
+<endif>
+>>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "int <recRuleArg()>"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
+recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
+
+/** $start in parser rule */
+rulePropertyRef_start(scope, attr) ::= "return_value.start"
+
+/** $stop in parser rule */
+rulePropertyRef_stop(scope, attr) ::= "return_value.stop"
+
+/** $tree in parser rule */
+rulePropertyRef_tree(scope, attr) ::= "return_value.tree"
+
+/** $text in parser rule */
+rulePropertyRef_text(scope, attr) ::= "@input.to_s( return_value.start, @input.look( -1 ) )"
+
+/** $template in parser rule */
+rulePropertyRef_st(scope, attr) ::= "return_value.template"
+
+ruleSetPropertyRef_tree(scope, attr, expr) ::= "return_value.tree = <expr>"
+
+ruleSetPropertyRef_st(scope, attr, expr) ::= "return_value.template = <expr>"
+
+/** How to execute an action */
+execAction(action) ::= <<
+<if(backtracking)>
+# syntactic predicate action gate test
+if <actions.(actionScope).synpredgate>
+  # --> action
+  <action>
+  # \<-- action
+end
+<else>
+# --> action
+<action>
+# \<-- action
+<endif>
+>>
+
+codeFileExtension() ::= ".rb"
+
+true()  ::= "true"
+false() ::= "false"
+
+action(name, code) ::= <<
+<if(code)>
+# - - - - @<name> action - - - -
+<code><\n>
+<endif>
+>>
+
+autoloadDelegates() ::= <<
+<if(grammar.directDelegates)>
+<grammar.directDelegates:{it | autoload :<it.name>, "<it.recognizerName>"<\n>}>
+<endif>
+>>
+
+delegateLexerRule(ruleDescriptor) ::= <<
+# delegated lexer rule <ruleDescriptor.name; format="lexerRule"> (<ruleDescriptor.name> in the grammar)
+def <ruleDescriptor.name; format="lexerRule"><if(ruleDescriptor.parameterScope)>( <ruleDescriptor.parameterScope:parameterScope()> )<endif>
+  <methodCall(del=ruleDescriptor.grammar, n={<ruleDescriptor.name; format="lexerRule">}, args=ruleDescriptor.parameterScope.attributes)>
+end
+>>
+
+rootClassName() ::= <<
+<if(grammar.grammarIsRoot)><grammar.name><else><grammar.composite.rootGrammar.name><endif>::<if(TREE_PARSER)>TreeParser<elseif(PARSER)>Parser<else>Lexer<endif>
+>>
+
+grammarClassName() ::= <<
+<gram.name>::<if(TREE_PARSER)>TreeParser<elseif(PARSER)>Parser<else>Lexer<endif>
+>>
+
+newDelegate(gram) ::= <<
+<gram.name>.new( <trunc(gram.delegators):{p|<p:delegateName()>, }>
+  self, @input, :state => @state<@delegateOptions()>
+)
+>>
+
+placeAction(scope, name) ::= <<
+<if(actions.(scope).(name))>
+# - - - - - - begin action @<scope>::<name> - - - - - -
+<if(fileName)># <fileName><\n><endif>
+<actions.(scope).(name)>
+# - - - - - - end action @<scope>::<name> - - - - - - -<\n>
+<endif>
+>>
+
+runtimeLibraryVersion() ::= "1.8.1"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ST.stg b/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ST.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/codegen/templates/Ruby/ST.stg
rename to tool/src/main/resources/org/antlr/codegen/templates/Ruby/ST.stg
diff --git a/tool/src/main/resources/org/antlr/codegen/templates/Scala/Scala.stg b/tool/src/main/resources/org/antlr/codegen/templates/Scala/Scala.stg
new file mode 100644
index 0000000..bfd7c44
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/codegen/templates/Scala/Scala.stg
@@ -0,0 +1,1385 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2010 Matthew Lloyd
+ http://linkedin.com/in/matthewl
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+scalaTypeInitMap ::= [
+	"Int":"0",
+	"Long":"0",
+	"Float":"0.0f",
+	"Double":"0.0",
+	"Boolean":"false",
+	"Byte":"0",
+	"Short":"0",
+	"Char":"0",
+	default:"null" // anything other than an atomic type
+]
+
+/** The overall file structure of a recognizer; stores methods for rules
+ *  and cyclic DFAs plus support code.
+ */
+outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
+           docComment, recognizer,
+           name, tokens, tokenNames, rules, cyclicDFAs,
+	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
+	   backtracking, synpreds, memoize, numRules,
+	   fileName, ANTLRVersion, generatedTimestamp, trace,
+	   scopes, superClass, literals) ::=
+<<
+// $ANTLR <ANTLRVersion> <fileName> <generatedTimestamp>
+<actions.(actionScope).header>
+
+<@imports>
+import org.antlr.runtime._
+<if(TREE_PARSER)>
+import org.antlr.runtime.tree._
+<endif>
+<@end>
+
+<docComment>
+<recognizer>
+>>
+
+lexer(grammar, name, tokens, scopes, rules, numRules, filterMode, labelType="CommonToken",
+      superClass="Lexer") ::= <<
+object <grammar.recognizerName> {
+    <tokens:{it | val <it.name> = <it.type>}; separator="\n">
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+}
+
+class <grammar.recognizerName>(input: CharStream, state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>: RecognizerSharedState) extends <@superClassName><superClass><@end>(input, state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>) {
+    import <grammar.recognizerName>._
+    <actions.lexer.members>
+
+    // delegates
+    <grammar.delegates:
+         {g|<g.recognizerName> <g:delegateName()>}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|<g.recognizerName> <g:delegateName()>}; separator="\n">
+    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+
+    <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScope()><endif>}>
+
+    def this(input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>: CharStream) =
+        this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
+
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+        state.ruleMemo = new Array[java.util.Map[_,_]](<numRules>+1)<\n> <! index from 1..n !>
+<endif>
+<endif>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this)}; separator="\n">
+        <grammar.delegators:
+         {g|this.<g:delegateName()> = <g:delegateName()>}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>}>
+
+    override def getGrammarFileName = "<fileName>"
+
+<if(filterMode)>
+    <filteringNextToken()>
+<endif>
+    <rules; separator="\n\n">
+
+    <synpreds:{p | <lexerSynpred(p)>}>
+    <cyclicDFAs:{dfa | private val dfa<dfa.decisionNumber> = new <grammar.recognizerName>.DFA<dfa.decisionNumber>(this)}; separator="\n">
+}
+>>
+
+/** A override of Lexer.nextToken() that backtracks over mTokens() looking
+ *  for matches.  No error can be generated upon error; just rewind, consume
+ *  a token and then try again.  backtracking needs to be set as well.
+ *  Make rule memoization happen only at levels above 1 as we start mTokens
+ *  at backtracking==1.
+ */
+filteringNextToken() ::= <<
+override def nextToken(): Token = {
+    while (true) {
+        if ( input.LA(1)==CharStream.EOF ) {
+            var eof: Token = new CommonToken((CharStream)input,Token.EOF,
+                                        Token.DEFAULT_CHANNEL,
+                                        input.index(),input.index())
+            eof.setLine(getLine())
+            eof.setCharPositionInLine(getCharPositionInLine())
+            return eof
+        }
+        state.token = null
+	state.channel = Token.DEFAULT_CHANNEL
+        state.tokenStartCharIndex = input.index()
+        state.tokenStartCharPositionInLine = input.getCharPositionInLine()
+        state.tokenStartLine = input.getLine()
+	state.text = null
+        try {
+            val m = input.mark()
+            state.backtracking=1 <! means we won't throw slow exception !>
+            state.failed=false
+            mTokens()
+            state.backtracking=0
+            <! mTokens backtracks with synpred at backtracking==2
+               and we set the synpredgate to allow actions at level 1. !>
+            if ( state.failed ) {
+                input.rewind(m)
+                input.consume() <! advance one char and try again !>
+            }
+            else {
+                emit()
+                return state.token
+            }
+        }
+        catch {
+            case re: RecognitionException =>
+            // shouldn't happen in backtracking mode, but...
+            reportError(re)
+            recover(re)
+        }
+    }
+}
+
+override def memoize(input: IntStream,
+		ruleIndex: Int,
+		ruleStartIndex: Int) = {
+if ( state.backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex)
+}
+
+override def alreadyParsedRule(input: IntStream, ruleIndex: Int):Boolean {
+if ( state.backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex)
+return false
+}
+>>
+
+actionGate() ::= "state.backtracking==0"
+
+filteringActionGate() ::= "state.backtracking==1"
+
+/** How to generate a parser */
+genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
+              bitsets, inputStreamType, superClass,
+              labelType, members, rewriteElementType,
+              filterMode, ASTLabelType="Object") ::= <<
+object <grammar.recognizerName> {
+<if(grammar.grammarIsRoot)>
+    val tokenNames = Array(
+        "\<invalid>", "\<EOR>", "\<DOWN>", "\<UP>", <tokenNames; separator=", ">
+    )<\n>
+<endif>
+
+    <tokens:{it | val <it.name> = <it.type>}; separator="\n">
+
+    <cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>
+
+    <bitsets:{it | <bitset(name={FOLLOW_<it.name>_in_<it.inName><it.tokenIndex>},
+                    words64=it.bits)>}>
+}
+
+class <grammar.recognizerName>(input: <inputStreamType>, state<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>: RecognizerSharedState) extends <@superClassName><superClass><@end>(input, state) {
+    import <grammar.recognizerName>._
+    // delegates
+    <grammar.delegates:
+         {g|public <g.recognizerName> <g:delegateName()>}; separator="\n">
+    // delegators
+    <grammar.delegators:
+         {g|public <g.recognizerName> <g:delegateName()>}; separator="\n">
+    <last(grammar.delegators):{g|public <g.recognizerName> gParent;}>
+
+    <scopes:{it | <if(it.isDynamicGlobalScope)><globalAttributeScope()><endif>}>
+
+    <@members>
+    <! WARNING. bug in ST: this is cut-n-paste into Dbg.stg !>
+    def this(input<grammar.delegators:{g|, <g.recognizerName> <g:delegateName()>}>: <inputStreamType>) =
+        this(input, new RecognizerSharedState()<grammar.delegators:{g|, <g:delegateName()>}>)
+
+        <parserCtorBody()>
+        <grammar.directDelegates:
+         {g|<g:delegateName()> = new <g.recognizerName>(input, state<trunc(g.delegators):{p|, <p:delegateName()>}>, this)}; separator="\n">
+        <grammar.indirectDelegates:{g | <g:delegateName()> = <g.delegator:delegateName()>.<g:delegateName()>}; separator="\n">
+        <last(grammar.delegators):{g|gParent = <g:delegateName()>}>
+    <@end>
+
+    override def getTokenNames: Array[String] = tokenNames
+    override def getGrammarFileName = "<fileName>"
+
+    <members>
+
+    <rules; separator="\n\n">
+
+<! generate rule/method definitions for imported rules so they
+   appear to be defined in this recognizer. !>
+    // Delegated rules
+<grammar.delegatedRules:{ruleDescriptor|
+    @throws(classOf[RecognitionException])
+    def <ruleDescriptor.name>(<ruleDescriptor.parameterScope:parameterScope()>): <returnType()> = \{ <if(ruleDescriptor.hasReturnValue)>return <endif><ruleDescriptor.grammar:delegateName()>.<ruleDescriptor.name>(<ruleDescriptor.parameterScope.attributes:{a|<a.name>}; separator=", ">) \}}; separator="\n">
+
+    <synpreds:{p | <synpred(p)>}>
+
+    <cyclicDFAs:{dfa | private val dfa<dfa.decisionNumber> = new <grammar.recognizerName>.DFA<dfa.decisionNumber>(this)}; separator="\n">
+}
+>>
+
+parserCtorBody() ::= <<
+<if(memoize)>
+<if(grammar.grammarIsRoot)>
+this.state.ruleMemo = new Array[java.util.Map[_,_]](<length(grammar.allImportedRules)>+1)<\n> <! index from 1..n !>
+<endif>
+<endif>
+<grammar.delegators:
+ {g|this.<g:delegateName()> = <g:delegateName()>}; separator="\n">
+>>
+
+parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
+       ASTLabelType="Object", superClass="Parser", labelType="Token",
+       members={<actions.parser.members>}) ::= <<
+<genericParser(inputStreamType="TokenStream", rewriteElementType="Token", ...)>
+>>
+
+/** How to generate a tree parser; same as parser except the input
+ *  stream is a different type.
+ */
+treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
+           numRules, bitsets, filterMode, labelType={<ASTLabelType>}, ASTLabelType="Object",
+           superClass={<if(filterMode)><if(buildAST)>TreeRewriter<else>TreeFilter<endif><else>TreeParser<endif>},
+           members={<actions.treeparser.members>}
+           ) ::= <<
+<genericParser(inputStreamType="TreeNodeStream", rewriteElementType="Node", ...)>
+>>
+
+/** A simpler version of a rule template that is specific to the imaginary
+ *  rules created for syntactic predicates.  As they never have return values
+ *  nor parameters etc..., just give simplest possible method.  Don't do
+ *  any of the normal memoization stuff in here either; it's a waste.
+ *  As predicates cannot be inlined into the invoking rule, they need to
+ *  be in a rule by themselves.
+ */
+synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
+<<
+// $ANTLR start <ruleName>
+@throws(classOf[RecognitionException])
+def <ruleName>_fragment(<ruleDescriptor.parameterScope:parameterScope()>): Unit = {
+    <ruleLabelDefs()>
+<if(trace)>
+    traceIn("<ruleName>_fragment", <ruleDescriptor.index>)
+    try {
+        <block>
+    }
+    finally {
+        traceOut("<ruleName>_fragment", <ruleDescriptor.index>);
+    }
+<else>
+    <block>
+<endif>
+}
+// $ANTLR end <ruleName>
+>>
+
+synpred(name) ::= <<
+final def <name>(): Boolean = {
+    state.backtracking+=1
+    <@start()>
+    val start = input.mark()
+    try {
+        <name>_fragment() // can never throw exception
+    } catch {
+        case re: RecognitionException =>
+        System.err.println("impossible: "+re)
+    }
+    val success = !state.failed
+    input.rewind(start)
+    <@stop()>
+    state.backtracking-=1
+    state.failed=false
+    success
+}<\n>
+>>
+
+lexerSynpred(name) ::= <<
+<synpred(name)>
+>>
+
+ruleMemoization(name) ::= <<
+<if(memoize)>
+if ( state.backtracking>0 && alreadyParsedRule(input, <ruleDescriptor.index>) ) { return <ruleReturnValue()> }
+<endif>
+>>
+
+/** How to test for failure and return from rule */
+checkRuleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.failed) return <ruleReturnValue()><endif>
+>>
+
+/** This rule has failed, exit indicating failure during backtrack */
+ruleBacktrackFailure() ::= <<
+<if(backtracking)>if (state.backtracking>0) {state.failed=true; return <ruleReturnValue()>}<endif>
+>>
+
+/** How to generate code for a rule.  This includes any return type
+ *  data aggregates required for multiple return values.
+ */
+rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
+<ruleAttributeScope(scope=ruleDescriptor.ruleScope)>
+<returnScope(scope=ruleDescriptor.returnScope)>
+
+// $ANTLR start "<ruleName>"
+// <fileName>:<description>
+@throws(classOf[RecognitionException])
+final def <ruleName>(<ruleDescriptor.parameterScope:parameterScope()>): <returnType()> = {
+    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>)<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    <ruleLabelDefs()>
+    <ruleDescriptor.actions.init>
+    <@preamble()>
+    try {
+        <ruleMemoization(name=ruleName)>
+        <block>
+        <ruleCleanUp()>
+        <(ruleDescriptor.actions.after):execAction()>
+    }
+<if(exceptions)>
+    <exceptions:{e|<catch(decl=e.decl,action=e.action)><\n>}>
+<else>
+<if(!emptyRule)>
+<if(actions.(actionScope).rulecatch)>
+    <actions.(actionScope).rulecatch>
+<else>
+    catch {
+        case re: RecognitionException =>
+        reportError(re)
+        recover(input,re)
+	<@setErrorReturnValue()>
+    }<\n>
+<endif>
+<endif>
+<endif>
+    finally {
+        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>);<endif>
+        <memoize()>
+        <ruleScopeCleanUp()>
+        <finally>
+    }
+    <@postamble()>
+    return <ruleReturnValue()>
+}
+// $ANTLR end "<ruleName>"
+>>
+
+catch(decl,action) ::= <<
+catch (<e.decl>) {
+    <e.action>
+}
+>>
+
+ruleDeclarations() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+val retval = new <returnType()>()
+retval.start = input.LT(1)<\n>
+<else>
+<ruleDescriptor.returnScope.attributes:{ a |
+var <a.name>: <a.type> = <if(a.initValue)><a.initValue><else><initValue(a.type)><endif>
+}>
+<endif>
+<if(memoize)>
+val <ruleDescriptor.name>_StartIndex = input.index()
+<endif>
+>>
+
+ruleScopeSetUp() ::= <<
+<ruleDescriptor.useScopes:{it | <it>_stack.push(new <it>_scope())}; separator="\n">
+<ruleDescriptor.ruleScope:{it | <it.name>_stack.push(new <it.name>_scope())}; separator="\n">
+>>
+
+ruleScopeCleanUp() ::= <<
+<ruleDescriptor.useScopes:{it | <it>_stack.pop()}; separator="\n">
+<ruleDescriptor.ruleScope:{it | <it.name>_stack.pop()}; separator="\n">
+>>
+
+
+ruleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
+  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it | var <it.label.text>: <labelType> = null}; separator="\n"
+>
+<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
+    :{it | var list_<it.label.text>: java.util.List=null}; separator="\n"
+>
+<ruleDescriptor.ruleLabels:ruleLabelDef(); separator="\n">
+<ruleDescriptor.ruleListLabels:{ll|var <ll.label.text>: RuleReturnScope = null}; separator="\n">
+>>
+
+lexerRuleLabelDefs() ::= <<
+<[ruleDescriptor.tokenLabels,
+  ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleLabels]
+    :{it | var <it.label.text>: <labelType>=null}; separator="\n"
+>
+<ruleDescriptor.charLabels:{it | int <it.label.text>;}; separator="\n">
+<[ruleDescriptor.tokenListLabels,
+  ruleDescriptor.ruleListLabels]
+    :{it | var list_<it.label.text>: java.util.List=null}; separator="\n"
+>
+>>
+
+ruleReturnValue() ::= <<
+<if(!ruleDescriptor.isSynPred)>
+<if(ruleDescriptor.hasReturnValue)>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnName>
+<else>
+retval
+<endif>
+<endif>
+<endif>
+>>
+
+ruleCleanUp() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<if(!TREE_PARSER)>
+retval.stop = input.LT(-1)<\n>
+<endif>
+<endif>
+>>
+
+memoize() ::= <<
+<if(memoize)>
+<if(backtracking)>
+if ( state.backtracking>0 ) { memoize(input, <ruleDescriptor.index>, <ruleDescriptor.name>_StartIndex) }
+<endif>
+<endif>
+>>
+
+/** How to generate a rule in the lexer; naked blocks are used for
+ *  fragment rules.
+ */
+lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
+// $ANTLR start "<ruleName>"
+@throws(classOf[RecognitionException])
+final def m<ruleName>(<ruleDescriptor.parameterScope:parameterScope()>): Unit = {
+    <if(trace)>traceIn("<ruleName>", <ruleDescriptor.index>)<endif>
+    <ruleScopeSetUp()>
+    <ruleDeclarations()>
+    try {
+<if(nakedBlock)>
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        try <block><\n>
+<else>
+        var _type = <ruleName>
+        var _channel = BaseRecognizer.DEFAULT_TOKEN_CHANNEL
+        <ruleMemoization(name=ruleName)>
+        <lexerRuleLabelDefs()>
+        <ruleDescriptor.actions.init>
+        try <block>
+        <ruleCleanUp()>
+        state.`type` = _type
+        state.channel = _channel
+        <(ruleDescriptor.actions.after):execAction()>
+<endif>
+    }
+    finally {
+        <if(trace)>traceOut("<ruleName>", <ruleDescriptor.index>)<endif>
+        <ruleScopeCleanUp()>
+        <memoize()>
+    }
+}
+// $ANTLR end "<ruleName>"
+>>
+
+/** How to generate code for the implicitly-defined lexer grammar rule
+ *  that chooses between lexer rules.
+ */
+tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
+@throws(classOf[RecognitionException])
+def mTokens(): Unit = {
+    <block><\n>
+}
+>>
+
+// S U B R U L E S
+
+/** A (...) subrule with multiple alternatives */
+block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var alt<decisionNumber> = <maxAlt>
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+<@prebranch()>
+alt<decisionNumber> match {
+    <alts:{a | <altSwitchCase(i,a)>}>
+    case _ =>
+}
+<@postbranch()>
+>>
+
+/** A rule block with multiple alternatives */
+ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var alt<decisionNumber> = <maxAlt>
+<decls>
+<@predecision()>
+<decision>
+<@postdecision()>
+alt<decisionNumber> match {
+    <alts:{a | <altSwitchCase(i,a)>}>
+    case _ =>
+}
+>>
+
+ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A special case of a (...) subrule with a single alternative */
+blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@prealt()>
+<alts>
+<@postalt()>
+>>
+
+/** A (..)+ block with 1 or more alternatives */
+positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+var cnt<decisionNumber>: Int = 0
+<decls>
+<@preloop()>
+var loop<decisionNumber>_quitflag = false
+while (!loop<decisionNumber>_quitflag) {
+    var alt<decisionNumber>:Int = <maxAlt>
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    alt<decisionNumber> match {
+        <alts:{a | <altSwitchCase(i,a)>}>
+	case _ =>
+	    if ( cnt<decisionNumber> >= 1 ) loop<decisionNumber>_quitflag = true
+	    else {
+	    <ruleBacktrackFailure()>
+            val eee = new EarlyExitException(<decisionNumber>, input)
+            <@earlyExitException()>
+            throw eee
+      }
+    }
+    cnt<decisionNumber>+=1
+}
+<@postloop()>
+>>
+
+positiveClosureBlockSingleAlt ::= positiveClosureBlock
+
+/** A (..)* block with 1 or more alternatives */
+closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
+// <fileName>:<description>
+<decls>
+<@preloop()>
+var loop<decisionNumber>_quitflag = false
+while (!loop<decisionNumber>_quitflag) {
+    var alt<decisionNumber>:Int = <maxAlt>
+    <@predecision()>
+    <decision>
+    <@postdecision()>
+    alt<decisionNumber> match {
+        <alts:{a | <altSwitchCase(i,a)>}>
+	case _ => loop<decisionNumber>_quitflag = true
+    }
+}
+<@postloop()>
+>>
+
+closureBlockSingleAlt ::= closureBlock
+
+/** Optional blocks (x)? are translated to (x|) by before code generation
+ *  so we can just use the normal block template
+ */
+optionalBlock ::= block
+
+optionalBlockSingleAlt ::= block
+
+/** A case in a switch that jumps to an alternative given the alternative
+ *  number.  A DFA predicts the alternative and then a simple switch
+ *  does the jump to the code that actually matches that alternative.
+ */
+altSwitchCase(altNum, alt) ::= <<
+case <altNum> =>
+    <@prealt()>
+    <alt>
+>>
+
+/** An alternative is just a list of elements; at outermost level */
+alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
+// <fileName>:<description>
+{
+<@declarations()>
+<elements:element()>
+<rew>
+<@cleanup()>
+}
+>>
+
+/** What to emit when there is no rewrite.  For auto build
+ *  mode, does nothing.
+ */
+noRewrite(rewriteBlockLevel, treeLevel) ::= ""
+
+// E L E M E N T S
+
+/** Dump the elements one per line */
+element(e) ::= <<
+<@prematch()>
+<e.el><\n>
+>>
+
+/** match a token optionally with a label in front */
+tokenRef(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)><label>=<endif>`match`(input,<token>,FOLLOW_<token>_in_<ruleName><elementIndex>)<if(label)>.asInstanceOf[<labelType>]<endif>
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=ID */
+tokenRefAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<tokenRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+listLabel(label,elem) ::= <<
+if (list_<label>==null) list_<label>=new java.util.ArrayList()
+list_<label>.add(<elem>)<\n>
+>>
+
+/** match a character */
+charRef(char,label) ::= <<
+<if(label)>
+<label> = input.LA(1)<\n>
+<endif>
+`match`(<char>)
+<checkRuleBacktrackFailure()>
+>>
+
+/** match a character range */
+charRangeRef(a,b,label) ::= <<
+<if(label)>
+<label> = input.LA(1)<\n>
+<endif>
+matchRange(<a>,<b>); <checkRuleBacktrackFailure()>
+>>
+
+/** For now, sets are interval tests and must be tested inline */
+matchSet(s,label,elementIndex,terminalOptions,postmatchCode="") ::= <<
+<if(label)>
+<if(LEXER)>
+<label>= input.LA(1)<\n>
+<else>
+<label>=input.LT(1).asInstanceOf[<labelType>]<\n>
+<endif>
+<endif>
+if ( <s> ) {
+    input.consume()
+    <postmatchCode>
+<if(!LEXER)>
+    state.errorRecovery=false<\n>
+<endif>
+    <if(backtracking)>state.failed=false<endif>
+}
+else {
+    <ruleBacktrackFailure()>
+    val mse = new MismatchedSetException(null,input)
+    <@mismatchedSetException()>
+<if(LEXER)>
+    recover(mse)
+    throw mse
+<else>
+    throw mse
+    <! use following code to make it recover inline; remove throw mse;
+    recoverFromMismatchedSet(input,mse,FOLLOW_set_in_<ruleName><elementIndex>)
+    !>
+<endif>
+}<\n>
+>>
+
+matchRuleBlockSet ::= matchSet
+
+matchSetAndListLabel(s,label,elementIndex,postmatchCode) ::= <<
+<matchSet(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a string literal */
+lexerStringRef(string,label,elementIndex="0") ::= <<
+<if(label)>
+val <label>Start = getCharIndex()
+`match`(<string>)
+<checkRuleBacktrackFailure()>
+val <label>StartLine<elementIndex> = getLine()
+val <label>StartCharPos<elementIndex> = getCharPositionInLine()
+<label> = new <labelType>(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start, getCharIndex()-1)
+<label>.setLine(<label>StartLine<elementIndex>)
+<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
+<else>
+`match`(<string>)
+<checkRuleBacktrackFailure()><\n>
+<endif>
+>>
+
+wildcard(token,label,elementIndex,terminalOptions) ::= <<
+<if(label)>
+<label>=input.LT(1).asInstanceOf[<labelType>]<\n>
+<endif>
+matchAny(input)
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardAndListLabel(token,label,elementIndex,terminalOptions) ::= <<
+<wildcard(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match . wildcard in lexer */
+wildcardChar(label, elementIndex) ::= <<
+<if(label)>
+<label> = input.LA(1)<\n>
+<endif>
+matchAny()
+<checkRuleBacktrackFailure()>
+>>
+
+wildcardCharListLabel(label, elementIndex) ::= <<
+<wildcardChar(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** Match a rule reference by invoking it possibly with arguments
+ *  and a return value or values.  The 'rule' argument was the
+ *  target rule name, but now is type Rule, whose toString is
+ *  same: the rule name.  Now though you can access full rule
+ *  descriptor stuff.
+ */
+ruleRef(rule,label,elementIndex,args,scope) ::= <<
+pushFollow(FOLLOW_<rule.name>_in_<ruleName><elementIndex>)
+<if(label)><label>=<endif><if(scope)><scope:delegateName()>.<endif><rule.name>(<args; separator=", ">)<\n>
+state._fsp-=1
+<checkRuleBacktrackFailure()>
+>>
+
+/** ids+=r */
+ruleRefAndListLabel(rule,label,elementIndex,args,scope) ::= <<
+<ruleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** A lexer rule reference.
+ *
+ *  The 'rule' argument was the target rule name, but now
+ *  is type Rule, whose toString is same: the rule name.
+ *  Now though you can access full rule descriptor stuff.
+ */
+lexerRuleRef(rule,label,args,elementIndex,scope) ::= <<
+<if(label)>
+val <label>Start<elementIndex> = getCharIndex()
+val <label>StartLine<elementIndex> = getLine()
+val <label>StartCharPos<elementIndex> = getCharPositionInLine()
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
+<checkRuleBacktrackFailure()>
+<label> = new <labelType>(input, Token.INVALID_TOKEN_TYPE, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1)
+<label>.setLine(<label>StartLine<elementIndex>)
+<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
+<else>
+<if(scope)><scope:delegateName()>.<endif>m<rule.name>(<args; separator=", ">)
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** i+=INT in lexer */
+lexerRuleRefAndListLabel(rule,label,args,elementIndex,scope) ::= <<
+<lexerRuleRef(...)>
+<listLabel(elem=label,...)>
+>>
+
+/** EOF in the lexer */
+lexerMatchEOF(label,elementIndex) ::= <<
+<if(label)>
+val <label>Start<elementIndex> = getCharIndex()
+val <label>StartLine<elementIndex> = getLine()
+val <label>StartCharPos<elementIndex> = getCharPositionInLine()
+`match`(EOF)
+<checkRuleBacktrackFailure()>
+val <label> = new <labelType>(input, EOF, Token.DEFAULT_CHANNEL, <label>Start<elementIndex>, getCharIndex()-1)
+<label>.setLine(<label>StartLine<elementIndex>)
+<label>.setCharPositionInLine(<label>StartCharPos<elementIndex>)
+<else>
+`match`(EOF)
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+// used for left-recursive rules
+recRuleDefArg()                       ::= "int <recRuleArg()>"
+recRuleArg()                          ::= "_p"
+recRuleAltPredicate(ruleName,opPrec)  ::= "<recRuleArg()> \<= <opPrec>"
+recRuleSetResultAction()              ::= "root_0=$<ruleName>_primary.tree;"
+recRuleSetReturnAction(src,name)      ::= "$<name>=$<src>.<name>;"
+
+/** match ^(root children) in tree parser */
+tree(root, actionsAfterRoot, children, nullableChildList,
+     enclosingTreeLevel, treeLevel) ::= <<
+<root:element()>
+<actionsAfterRoot:element()>
+<if(nullableChildList)>
+if ( input.LA(1)==Token.DOWN ) {
+    `match`(input, Token.DOWN, null)
+    <checkRuleBacktrackFailure()>
+    <children:element()>
+    `match`(input, Token.UP, null)
+    <checkRuleBacktrackFailure()>
+}
+<else>
+`match`(input, Token.DOWN, null)
+<checkRuleBacktrackFailure()>
+<children:element()>
+`match`(input, Token.UP, null)
+<checkRuleBacktrackFailure()>
+<endif>
+>>
+
+/** Every predicate is used as a validating predicate (even when it is
+ *  also hoisted into a prediction expression).
+ */
+validateSemanticPredicate(pred,description) ::= <<
+if ( !(<evalPredicate(...)>) ) {
+    <ruleBacktrackFailure()>
+    throw new FailedPredicateException(input, "<ruleName>", "<description>")
+}
+>>
+
+// F i x e d  D F A  (if-then-else)
+
+dfaState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+val LA<decisionNumber>_<stateNumber> = input.LA(<k>)<\n>
+<edges; separator="\nelse ">
+else {
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>
+<else>
+    <ruleBacktrackFailure()>
+    val nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input)<\n>
+    <@noViableAltException()>
+    throw nvae<\n>
+<endif>
+}
+>>
+
+/** Same as a normal DFA state except that we don't examine lookahead
+ *  for the bypass alternative.  It delays error detection but this
+ *  is faster, smaller, and more what people expect.  For (X)? people
+ *  expect "if ( LA(1)==X ) match(X);" and that's it.
+ */
+dfaOptionalBlockState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+val LA<decisionNumber>_<stateNumber> = input.LA(<k>)<\n>
+<edges; separator="\nelse ">
+>>
+
+/** A DFA state that is actually the loopback decision of a closure
+ *  loop.  If end-of-token (EOT) predicts any of the targets then it
+ *  should act like a default clause (i.e., no error can be generated).
+ *  This is used only in the lexer so that for ('a')* on the end of a rule
+ *  anything other than 'a' predicts exiting.
+ */
+dfaLoopbackState(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+val LA<decisionNumber>_<stateNumber> = input.LA(<k>)<\n>
+<edges; separator="\nelse "><\n>
+<if(eotPredictsAlt)>
+<if(!edges)>
+alt<decisionNumber>=<eotPredictsAlt> <! if no edges, don't gen ELSE !>
+<else>
+else {
+    alt<decisionNumber>=<eotPredictsAlt>
+}<\n>
+<endif>
+<endif>
+>>
+
+/** An accept state indicates a unique alternative has been predicted */
+dfaAcceptState(alt) ::= "alt<decisionNumber>=<alt>"
+
+/** A simple edge with an expression.  If the expression is satisfied,
+ *  enter to the target state.  To handle gated productions, we may
+ *  have to evaluate some predicates for this edge.
+ */
+dfaEdge(labelExpr, targetState, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {
+    <targetState>
+}
+>>
+
+// F i x e d  D F A  (switch case)
+
+/** A DFA state where a SWITCH may be generated.  The code generator
+ *  decides if this is possible: CodeGenerator.canGenerateSwitch().
+ */
+dfaStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+input.LA(<k>) match {
+<edges; separator="\n">
+case _ =>
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>
+<else>
+    <ruleBacktrackFailure()>
+    val nvae = new NoViableAltException("<description>", <decisionNumber>, <stateNumber>, input)<\n>
+    <@noViableAltException()>
+    throw nvae<\n>
+<endif>
+}<\n>
+>>
+
+dfaOptionalBlockStateSwitch(k,edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+input.LA(<k>) match {
+    <edges; separator="\n">
+    case _ =>
+}<\n>
+>>
+
+dfaLoopbackStateSwitch(k, edges,eotPredictsAlt,description,stateNumber,semPredState) ::= <<
+input.LA(<k>) match {
+<edges; separator="\n"><\n>
+case _ =>
+<if(eotPredictsAlt)>
+    alt<decisionNumber>=<eotPredictsAlt>;
+<endif>
+}<\n>
+>>
+
+dfaEdgeSwitch(labels, targetState) ::= <<
+case <labels:{it | <it>}; separator=" | "> =>
+    {
+    <targetState>
+    }
+>>
+
+// C y c l i c  D F A
+
+/** The code to initiate execution of a cyclic DFA; this is used
+ *  in the rule to predict an alt just like the fixed DFA case.
+ *  The <name> attribute is inherited via the parser, lexer, ...
+ */
+dfaDecision(decisionNumber,description) ::= <<
+alt<decisionNumber> = dfa<decisionNumber>.predict(input)
+>>
+
+/* Dump DFA tables as run-length-encoded Strings of octal values.
+ * Can't use hex as compiler translates them before compilation.
+ * These strings are split into multiple, concatenated strings.
+ * Java puts them back together at compile time thankfully.
+ * Java cannot handle large static arrays, so we're stuck with this
+ * encode/decode approach.  See analysis and runtime DFA for
+ * the encoding methods.
+ */
+cyclicDFA(dfa) ::= <<
+val DFA<dfa.decisionNumber>_eotS =
+    "<dfa.javaCompressedEOT; wrap="\"+\n    \"">"
+val DFA<dfa.decisionNumber>_eofS =
+    "<dfa.javaCompressedEOF; wrap="\"+\n    \"">"
+val DFA<dfa.decisionNumber>_minS =
+    "<dfa.javaCompressedMin; wrap="\"+\n    \"">"
+val DFA<dfa.decisionNumber>_maxS =
+    "<dfa.javaCompressedMax; wrap="\"+\n    \"">"
+val DFA<dfa.decisionNumber>_acceptS =
+    "<dfa.javaCompressedAccept; wrap="\"+\n    \"">"
+val DFA<dfa.decisionNumber>_specialS =
+    "<dfa.javaCompressedSpecial; wrap="\"+\n    \"">}>"
+val DFA<dfa.decisionNumber>_transitionS: Array[String] = Array(
+        <dfa.javaCompressedTransition:{s|"<s; wrap="\"+\n\"">"}; separator=",\n">
+)
+
+val DFA<dfa.decisionNumber>_eot: Array[Short] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eotS)
+val DFA<dfa.decisionNumber>_eof: Array[Short] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_eofS)
+val DFA<dfa.decisionNumber>_min: Array[Char] = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_minS)
+val DFA<dfa.decisionNumber>_max: Array[Char] = DFA.unpackEncodedStringToUnsignedChars(DFA<dfa.decisionNumber>_maxS)
+val DFA<dfa.decisionNumber>_accept: Array[Short] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_acceptS)
+val DFA<dfa.decisionNumber>_special: Array[Short] = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_specialS)
+val DFA<dfa.decisionNumber>_transition = new Array[Array[Short]](DFA<dfa.decisionNumber>_transitionS.length)
+
+for (i \<- DFA<dfa.decisionNumber>_transition.indices) {
+    DFA<dfa.decisionNumber>_transition(i) = DFA.unpackEncodedString(DFA<dfa.decisionNumber>_transitionS(i))
+}
+
+class DFA<dfa.decisionNumber> extends DFA {
+
+    def this(recognizer: BaseRecognizer) = {
+        this()
+        this.recognizer = recognizer
+        this.decisionNumber = <dfa.decisionNumber>
+        this.eot = DFA<dfa.decisionNumber>_eot
+        this.eof = DFA<dfa.decisionNumber>_eof
+        this.min = DFA<dfa.decisionNumber>_min
+        this.max = DFA<dfa.decisionNumber>_max
+        this.accept = DFA<dfa.decisionNumber>_accept
+        this.special = DFA<dfa.decisionNumber>_special
+        this.transition = DFA<dfa.decisionNumber>_transition
+    }
+    override def getDescription = "<dfa.description>"
+    <@errorMethod()>
+<if(dfa.specialStateSTs)>
+    @throws(classOf[NoViableAltException])
+    override def specialStateTransition(s: Int, _input: IntStream):Int = {
+        <if(LEXER)>
+        val input = _input
+        <endif>
+        <if(PARSER)>
+        val input = _input.asInstanceOf[TokenStream]
+        <endif>
+        <if(TREE_PARSER)>
+        val input = _input.asInstanceOf[TreeNodeStream]
+        <endif>
+    	val _s = s
+        s match {
+        <dfa.specialStateSTs:{state |
+        case <i0> => <! compressed special state numbers 0..n-1 !>
+            <state>}; separator="\n">
+        case _ =>
+        }
+<if(backtracking)>
+        if (state.backtracking>0) {state.failed=true; return -1}<\n>
+<endif>
+        val nvae = new NoViableAltException(getDescription(), <dfa.decisionNumber>, _s, input)
+        error(nvae)
+        throw nvae
+    }<\n>
+<endif>
+}<\n>
+>>
+
+/** A state in a cyclic DFA; it's a special state and part of a big switch on
+ *  state.
+ */
+cyclicDFAState(decisionNumber,stateNumber,edges,needErrorClause,semPredState) ::= <<
+val LA<decisionNumber>_<stateNumber>: Int = input.LA(1)<\n>
+<if(semPredState)> <! get next lookahead symbol to test edges, then rewind !>
+val index<decisionNumber>_<stateNumber>: Int = input.index()
+input.rewind()<\n>
+<endif>
+s = -1
+<edges; separator="\nelse ">
+<if(semPredState)> <! return input cursor to state before we rewound !>
+input.seek(index<decisionNumber>_<stateNumber>)<\n>
+<endif>
+if ( s>=0 ) return s
+>>
+
+/** Just like a fixed DFA edge, test the lookahead and indicate what
+ *  state to jump to next if successful.
+ */
+cyclicDFAEdge(labelExpr, targetStateNumber, edgeNumber, predicates) ::= <<
+if ( (<labelExpr>) <if(predicates)>&& (<predicates>)<endif>) {s = <targetStateNumber>}<\n>
+>>
+
+/** An edge pointing at end-of-token; essentially matches any char;
+ *  always jump to the target.
+ */
+eotDFAEdge(targetStateNumber,edgeNumber, predicates) ::= <<
+s = <targetStateNumber><\n>
+>>
+
+
+// D F A  E X P R E S S I O N S
+
+andPredicates(left,right) ::= "(<left>&&<right>)"
+
+orPredicates(operands) ::= "(<operands; separator=\"||\">)"
+
+notPredicate(pred) ::= "!(<evalPredicate(pred,\"\")>)"
+
+evalPredicate(pred,description) ::= "(<pred>)"
+
+evalSynPredicate(pred,description) ::= "<pred>()"
+
+lookaheadTest(atom,k,atomAsInt) ::= "LA<decisionNumber>_<stateNumber>==<atom>"
+
+/** Sometimes a lookahead test cannot assume that LA(k) is in a temp variable
+ *  somewhere.  Must ask for the lookahead directly.
+ */
+isolatedLookaheadTest(atom,k,atomAsInt) ::= "input.LA(<k>)==<atom>"
+
+lookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= <<
+(LA<decisionNumber>_<stateNumber> >= <lower> && LA<decisionNumber>_<stateNumber> \<= <upper>)
+>>
+
+isolatedLookaheadRangeTest(lower,upper,k,rangeNumber,lowerAsInt,upperAsInt) ::= "(input.LA(<k>) >=<lower> && input.LA(<k>) \<= <upper>)"
+
+setTest(ranges) ::= "<ranges; separator=\"||\">"
+
+// A T T R I B U T E S
+
+globalAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+class <scope.name>_scope {
+    <scope.attributes:{it | var <it.name>: <it.type> = _}; separator="\n">
+}
+val <scope.name>_stack = new collection.mutable.Stack[<scope.name>_scope]<\n>
+<endif>
+>>
+
+ruleAttributeScope(scope) ::= <<
+<if(scope.attributes)>
+class <scope.name>_scope {
+    <scope.attributes:{it | var <it.name>: <it.type> = _}; separator="\n">
+}
+val <scope.name>_stack = new collection.mutable.Stack[<scope.name>_scope]<\n>
+<endif>
+>>
+
+returnStructName(r) ::= "<r.name>_return"
+
+returnType() ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+<ruleDescriptor:returnStructName()>
+<else>
+<if(ruleDescriptor.hasSingleReturnValue)>
+<ruleDescriptor.singleValueReturnType>
+<else>
+Unit
+<endif>
+<endif>
+>>
+
+/** Generate the Java type associated with a single or multiple return
+ *  values.
+ */
+ruleLabelType(referencedRule) ::= <<
+<if(referencedRule.hasMultipleReturnValues)>
+<referencedRule.name>_return
+<else>
+<if(referencedRule.hasSingleReturnValue)>
+<referencedRule.singleValueReturnType>
+<else>
+Unit
+<endif>
+<endif>
+>>
+
+delegateName(d) ::= <<
+<if(d.label)><d.label><else>g<d.name><endif>
+>>
+
+/** Using a type to init value map, try to init a type; if not in table
+ *  must be an object, default value is "null".
+ */
+initValue(typeName) ::= <<
+<scalaTypeInitMap.(typeName)>
+>>
+
+/** Define a rule label including default value */
+ruleLabelDef(label) ::= <<
+var <label.label.text>: <ruleLabelType(referencedRule=label.referencedRule)> = <initValue(typeName=ruleLabelType(referencedRule=label.referencedRule))><\n>
+>>
+
+/** Define a return struct for a rule if the code needs to access its
+ *  start/stop tokens, tree stuff, attributes, ...  Leave a hole for
+ *  subgroups to stick in members.
+ *  TODO(matthewlloyd): make this static
+ */
+returnScope(scope) ::= <<
+<if(ruleDescriptor.hasMultipleReturnValues)>
+final class <ruleDescriptor:returnStructName()> extends <if(TREE_PARSER)>Tree<else>Parser<endif>RuleReturnScope {
+    <scope.attributes:{it | var <it.name>: <it.type> = _}; separator="\n">
+    <@ruleReturnMembers()>
+}
+<endif>
+>>
+
+parameterScope(scope) ::= <<
+<scope.attributes:{it | <it.name>: <it.type>}; separator=", ">
+>>
+
+parameterAttributeRef(attr) ::= "<attr.name>"
+parameterSetAttributeRef(attr,expr) ::= "<attr.name> =<expr>"
+
+scopeAttributeRef(scope,attr,index,negIndex) ::= <%
+<if(negIndex)>
+<scope>_stack(<scope>_stack.size-<negIndex>-1).<attr.name>
+<else>
+<if(index)>
+<scope>_stack(<index>).<attr.name>
+<else>
+<scope>_stack.top.<attr.name>
+<endif>
+<endif>
+%>
+
+scopeSetAttributeRef(scope,attr,expr,index,negIndex) ::= <%
+<if(negIndex)>
+<scope>_stack(<scope>_stack.size-<negIndex>-1).<attr.name> = <expr>
+<else>
+<if(index)>
+<scope>_stack(<index>).<attr.name> = <expr>
+<else>
+<scope>_stack.top.<attr.name> = <expr>
+<endif>
+<endif>
+%>
+
+/** $x is either global scope or x is rule with dynamic scope; refers
+ *  to stack itself not top of stack.  This is useful for predicates
+ *  like {$function.size()>0 && $function::name.equals("foo")}?
+ */
+isolatedDynamicScopeRef(scope) ::= "<scope>_stack"
+
+/** reference an attribute of rule; might only have single return value */
+ruleLabelRef(referencedRule,scope,attr) ::= <%
+<if(referencedRule.hasMultipleReturnValues)>
+(if (<scope>!=null) <scope>.<attr.name> else <initValue(attr.type)>)
+<else>
+<scope>
+<endif>
+%>
+
+returnAttributeRef(ruleDescriptor,attr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name>
+<else>
+<attr.name>
+<endif>
+%>
+
+returnSetAttributeRef(ruleDescriptor,attr,expr) ::= <%
+<if(ruleDescriptor.hasMultipleReturnValues)>
+retval.<attr.name> =<expr>
+<else>
+<attr.name> =<expr>
+<endif>
+%>
+
+/** How to translate $tokenLabel */
+tokenLabelRef(label) ::= "<label>"
+
+/** ids+=ID {$ids} or e+=expr {$e} */
+listLabelRef(label) ::= "list_<label>"
+
+
+// not sure the next are the right approach
+
+tokenLabelPropertyRef_text(scope,attr) ::= "(if (<scope>!=null) <scope>.getText() else null)"
+tokenLabelPropertyRef_type(scope,attr) ::= "(if (<scope>!=null) <scope>.getType() else 0)"
+tokenLabelPropertyRef_line(scope,attr) ::= "(if (<scope>!=null) <scope>.getLine() else 0)"
+tokenLabelPropertyRef_pos(scope,attr) ::= "(if (<scope>!=null) <scope>.getCharPositionInLine() else 0)"
+tokenLabelPropertyRef_channel(scope,attr) ::= "(if (<scope>!=null) <scope>.getChannel() else 0)"
+tokenLabelPropertyRef_index(scope,attr) ::= "(if (<scope>!=null) <scope>.getTokenIndex() else 0)"
+tokenLabelPropertyRef_tree(scope,attr) ::= "<scope>_tree"
+tokenLabelPropertyRef_int(scope,attr) ::= "(if (<scope>!=null) Integer.valueOf(<scope>.getText()) else 0)"
+
+ruleLabelPropertyRef_start(scope,attr) ::= "(if (<scope>!=null) <scope>.start.asInstanceOf[<labelType>] else null)"
+ruleLabelPropertyRef_stop(scope,attr) ::= "(if (<scope>!=null) <scope>.stop.asInstanceOf[<labelType>] else null)"
+ruleLabelPropertyRef_tree(scope,attr) ::= "(if (<scope>!=null) <scope>.tree.asInstanceOf[<ASTLabelType>] else null)"
+ruleLabelPropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+(if (<scope>!=null) (input.getTokenStream().toString(
+  input.getTreeAdaptor().getTokenStartIndex(<scope>.start),
+  input.getTreeAdaptor().getTokenStopIndex(<scope>.start))) else null)
+<else>
+(if (<scope>!=null) input.toString(<scope>.start,<scope>.stop) else null)
+<endif>
+>>
+
+ruleLabelPropertyRef_st(scope,attr) ::= "(if (<scope>!=null) <scope>.st else null)"
+
+/** Isolated $RULE ref ok in lexer as it's a Token */
+lexerRuleLabel(label) ::= "<label>"
+
+lexerRuleLabelPropertyRef_type(scope,attr) ::=
+    "(if (<scope>!=null) <scope>.getType() else 0)"
+lexerRuleLabelPropertyRef_line(scope,attr) ::=
+    "(if (<scope>!=null) <scope>.getLine() else 0)"
+lexerRuleLabelPropertyRef_pos(scope,attr) ::=
+    "(if (<scope>!=null) <scope>.getCharPositionInLine() else -1)"
+lexerRuleLabelPropertyRef_channel(scope,attr) ::=
+    "(if (<scope>!=null) <scope>.getChannel() else 0)"
+lexerRuleLabelPropertyRef_index(scope,attr) ::=
+    "(if (<scope>!=null) <scope>.getTokenIndex() else 0)"
+lexerRuleLabelPropertyRef_text(scope,attr) ::=
+    "(if (<scope>!=null) <scope>.getText() else null)"
+lexerRuleLabelPropertyRef_int(scope,attr) ::=
+    "(if (<scope>!=null) Integer.valueOf(<scope>.getText()) else 0)"
+
+// Somebody may ref $template or $tree or $stop within a rule:
+rulePropertyRef_start(scope,attr) ::= "(retval.start.asInstanceOf[<labelType>])"
+rulePropertyRef_stop(scope,attr) ::= "(retval.stop.asInstanceOf[<labelType>])"
+rulePropertyRef_tree(scope,attr) ::= "(retval.tree.asInstanceOf[<ASTLabelType>])"
+rulePropertyRef_text(scope,attr) ::= <<
+<if(TREE_PARSER)>
+input.getTokenStream().toString(
+  input.getTreeAdaptor().getTokenStartIndex(retval.start),
+  input.getTreeAdaptor().getTokenStopIndex(retval.start))
+<else>
+input.toString(retval.start,input.LT(-1))
+<endif>
+>>
+rulePropertyRef_st(scope,attr) ::= "retval.st"
+
+lexerRulePropertyRef_text(scope,attr) ::= "getText()"
+lexerRulePropertyRef_type(scope,attr) ::= "_type"
+lexerRulePropertyRef_line(scope,attr) ::= "state.tokenStartLine"
+lexerRulePropertyRef_pos(scope,attr) ::= "state.tokenStartCharPositionInLine"
+lexerRulePropertyRef_index(scope,attr) ::= "-1" // undefined token index in lexer
+lexerRulePropertyRef_channel(scope,attr) ::= "_channel"
+lexerRulePropertyRef_start(scope,attr) ::= "state.tokenStartCharIndex"
+lexerRulePropertyRef_stop(scope,attr) ::= "(getCharIndex()-1)"
+lexerRulePropertyRef_int(scope,attr) ::= "Integer.valueOf(<scope>.getText())"
+
+// setting $st and $tree is allowed in local rule. everything else
+// is flagged as error
+ruleSetPropertyRef_tree(scope,attr,expr) ::= "retval.tree =<expr>"
+ruleSetPropertyRef_st(scope,attr,expr) ::= "retval.st =<expr>"
+
+/** How to execute an action (only when not backtracking) */
+execAction(action) ::= <<
+<if(backtracking)>
+if ( <actions.(actionScope).synpredgate> ) {
+  <action>
+}
+<else>
+<action>
+<endif>
+>>
+
+/** How to always execute an action even when backtracking */
+execForcedAction(action) ::= "<action>"
+
+// M I S C (properties, etc...)
+
+bitset(name, words64) ::= <<
+val <name> = new BitSet(Array[Long](<words64:{it | <it>L};separator=",">))<\n>
+>>
+
+codeFileExtension() ::= ".scala"
+
+true_value() ::= "true"
+false_value() ::= "false"
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/depend.stg b/tool/src/main/resources/org/antlr/tool/templates/depend.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/depend.stg
rename to tool/src/main/resources/org/antlr/tool/templates/depend.stg
diff --git a/tool/src/main/resources/org/antlr/tool/templates/dot/dot.stg b/tool/src/main/resources/org/antlr/tool/templates/dot/dot.stg
new file mode 100644
index 0000000..efcac2c
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/tool/templates/dot/dot.stg
@@ -0,0 +1,73 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Conversion to C#:
+ * Copyright (c) 2011 Sam Harwell, Tunnel Vision Laboratories, LLC.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dfa(decisionRanks,states,edges,rankdir,startState,useBox) ::= <<
+digraph NFA {
+<if(rankdir)>rankdir=<rankdir>;<endif>
+<decisionRanks; separator="\n">
+<states; separator="\n">
+<edges; separator="\n">
+}
+>>
+
+nfa(decisionRanks,states,edges,rankdir,startState) ::= <<
+digraph NFA {
+rankdir=LR;
+<decisionRanks; separator="\n">
+<states; separator="\n">
+<edges; separator="\n">
+}
+>>
+
+decision-rank(states) ::= <<
+{rank=same; rankdir=TB; <states; separator="; ">}
+>>
+
+edge(src,target,label,arrowhead) ::= <<
+<src> -> <target> [fontsize=11, fontname="Courier", arrowsize=.7, label = "<label>"<if(arrowhead)>, arrowhead = <arrowhead><endif>];
+>>
+
+action-edge(src,target,label,arrowhead) ::= <<
+<src> -> <target> [fontsize=11, fontname="Courier", arrowsize=.7, label = "<label>"<if(arrowhead)>, arrowhead = <arrowhead><endif>];
+>>
+
+epsilon-edge(src,target,label,arrowhead) ::= <<
+<src> -> <target> [fontname="Times-Italic", label = "e"];
+>>
+
+state(name,useBox) ::= <<
+node [fontsize=11, shape = <if(useBox)>box<else>circle, fixedsize=true, width=.4<endif>]; <name>
+>>
+
+stopstate(name,useBox) ::= <<
+node [fontsize=11, shape = <if(useBox)>polygon,sides=4,peripheries=2<else>doublecircle, fixedsize=true, width=.6<endif>]; <name>
+>>
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/messages/formats/antlr.stg b/tool/src/main/resources/org/antlr/tool/templates/messages/formats/antlr.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/messages/formats/antlr.stg
rename to tool/src/main/resources/org/antlr/tool/templates/messages/formats/antlr.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/messages/formats/gnu.stg b/tool/src/main/resources/org/antlr/tool/templates/messages/formats/gnu.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/messages/formats/gnu.stg
rename to tool/src/main/resources/org/antlr/tool/templates/messages/formats/gnu.stg
diff --git a/antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/messages/formats/vs2005.stg b/tool/src/main/resources/org/antlr/tool/templates/messages/formats/vs2005.stg
similarity index 100%
rename from antlr-3.4/tool/src/main/resources/org/antlr/tool/templates/messages/formats/vs2005.stg
rename to tool/src/main/resources/org/antlr/tool/templates/messages/formats/vs2005.stg
diff --git a/tool/src/main/resources/org/antlr/tool/templates/messages/languages/en.stg b/tool/src/main/resources/org/antlr/tool/templates/messages/languages/en.stg
new file mode 100644
index 0000000..b5e42d5
--- /dev/null
+++ b/tool/src/main/resources/org/antlr/tool/templates/messages/languages/en.stg
@@ -0,0 +1,300 @@
+/*
+ [The "BSD license"]
+ Copyright (c) 2010 Terence Parr
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+ New style messages. This file only contains the messages in English, but no
+ information about which file, line, or column it occurred in.
+ The location and message ids are taken out of the formats directory.
+														Kay Roepke
+*/
+group en_US;
+
+// TOOL ERRORS
+// file errors
+CANNOT_WRITE_FILE(arg,exception,stackTrace) ::= <<
+cannot write file <arg>: <exception>
+<stackTrace; separator="\n">
+>>
+CANNOT_CLOSE_FILE(arg,exception,stackTrace) ::= "cannot close file <arg>: <exception>"
+CANNOT_FIND_TOKENS_FILE(arg) ::= "cannot find tokens file <arg>"
+ERROR_READING_TOKENS_FILE(arg,exception,stackTrace) ::= <<
+problem reading token vocabulary file <arg>: <exception>
+<stackTrace; separator="\n">
+>>
+DIR_NOT_FOUND(arg,exception,stackTrace) ::= "directory not found: <arg>"
+OUTPUT_DIR_IS_FILE(arg,exception,stackTrace) ::= "output directory is a file: <arg>"
+CANNOT_OPEN_FILE(arg,exception,stackTrace) ::= "cannot find or open file: <arg><if(exception)>; reason: <exception><endif>"
+CIRCULAR_DEPENDENCY() ::= "your grammars contain a circular dependency and cannot be sorted into a valid build order."
+
+INTERNAL_ERROR(arg,arg2,exception,stackTrace) ::= <<
+internal error: <arg> <arg2><if(exception)>: <exception><endif>
+<stackTrace; separator="\n">
+>>
+INTERNAL_WARNING(arg) ::= "internal warning: <arg>"
+ERROR_CREATING_ARTIFICIAL_RULE(arg,exception,stackTrace) ::= <<
+problems creating lexer rule listing all tokens: <exception>
+<stackTrace; separator="\n">
+>>
+TOKENS_FILE_SYNTAX_ERROR(arg,arg2) ::=
+	"problems parsing token vocabulary file <arg> on line <arg2>"
+CANNOT_GEN_DOT_FILE(arg,exception,stackTrace) ::=
+	"cannot write DFA DOT file <arg>: <exception>"
+BAD_ACTION_AST_STRUCTURE(exception,stackTrace) ::=
+	"bad internal tree structure for action '<arg>': <exception>"
+BAD_AST_STRUCTURE(arg,exception,stackTrace) ::= <<
+bad internal tree structure '<arg>': <exception>
+<stackTrace; separator="\n">
+>>
+FILE_AND_GRAMMAR_NAME_DIFFER(arg,arg2) ::=
+  "file <arg2> contains grammar <arg>; names must be identical"
+FILENAME_EXTENSION_ERROR(arg) ::=
+  "file <arg> must end in a file extension, normally .g"
+
+// code gen errors
+MISSING_CODE_GEN_TEMPLATES(arg) ::=
+	"cannot find code generation templates <arg>.stg"
+MISSING_CYCLIC_DFA_CODE_GEN_TEMPLATES() ::=
+	"cannot find code generation cyclic DFA templates for language <arg>"
+CODE_GEN_TEMPLATES_INCOMPLETE(arg) ::=
+	"at least one code generation template missing for language <arg>"
+CANNOT_CREATE_TARGET_GENERATOR(arg,exception,stackTrace) ::=
+	"cannot create target <arg> code generator: <exception>"
+STRING_TEMPLATE_ERROR(arg,exception,stackTrace) ::=
+	"template error: <arg>"
+CANNOT_COMPUTE_SAMPLE_INPUT_SEQ() ::=
+	"cannot generate a sample input sequence from lookahead DFA"
+
+// grammar interpretation errors
+/*
+NO_VIABLE_DFA_ALT(arg,arg2) ::=
+	"no viable transition from state <arg> on <arg2> while interpreting DFA"
+*/
+
+// GRAMMAR ERRORS
+SYNTAX_ERROR(arg) ::= "syntax error: <arg>"
+RULE_REDEFINITION(arg) ::=
+	"rule <arg> redefinition"
+LEXER_RULES_NOT_ALLOWED(arg) ::=
+	"lexer rule <arg> not allowed in parser"
+PARSER_RULES_NOT_ALLOWED(arg) ::=
+	"parser rule <arg> not allowed in lexer"
+CANNOT_FIND_ATTRIBUTE_NAME_IN_DECL(arg) ::=
+	"cannot find an attribute name in attribute declaration"
+NO_TOKEN_DEFINITION(arg) ::=
+	"no lexer rule corresponding to token: <arg>"
+UNDEFINED_RULE_REF(arg) ::=
+	"reference to undefined rule: <arg>"
+LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE(arg) ::=
+	"literal has no associated lexer rule: <arg>"
+CANNOT_ALIAS_TOKENS_IN_LEXER(arg) ::=
+	"literals are illegal in lexer tokens{} section: <arg>"
+ATTRIBUTE_REF_NOT_IN_RULE(arg,arg2) ::=
+	"reference to attribute outside of a rule: <arg><if(arg2)>.<arg2><endif>"
+UNKNOWN_ATTRIBUTE_IN_SCOPE(arg,arg2) ::=
+	"unknown attribute for <arg>: <arg2>"
+UNKNOWN_RULE_ATTRIBUTE(arg,arg2) ::=
+	"unknown attribute for rule <arg>: <arg2>"
+UNKNOWN_SIMPLE_ATTRIBUTE(arg,args2) ::=
+	"attribute is not a token, parameter, or return value: <arg>"
+ISOLATED_RULE_SCOPE(arg) ::=
+	"missing attribute access on rule scope: <arg>"
+INVALID_RULE_PARAMETER_REF(arg,arg2) ::=
+	"cannot access rule <arg>'s parameter: <arg2>"
+INVALID_RULE_SCOPE_ATTRIBUTE_REF(arg,arg2) ::=
+	"cannot access rule <arg>'s dynamically-scoped attribute: <arg2>"
+SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE(arg) ::=
+	"symbol <arg> conflicts with global dynamic scope with same name"
+WRITE_TO_READONLY_ATTR(arg,arg2,arg3) ::=
+	"cannot write to read only attribute: $<arg><if(arg2)>.<arg2><endif>"
+LABEL_CONFLICTS_WITH_RULE(arg) ::=
+	"label <arg> conflicts with rule with same name"
+LABEL_CONFLICTS_WITH_TOKEN(arg) ::=
+	"label <arg> conflicts with token with same name"
+LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE(arg,arg2) ::=
+	"label <arg> conflicts with rule <arg2>'s dynamically-scoped attribute with same name"
+LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL(arg,arg2) ::=
+	"label <arg> conflicts with rule <arg2>'s return value or parameter with same name"
+ATTRIBUTE_CONFLICTS_WITH_RULE(arg,arg2) ::=
+	"rule <arg2>'s dynamically-scoped attribute <arg> conflicts with the rule name"
+ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL(arg,arg2) ::=
+	"rule <arg2>'s dynamically-scoped attribute <arg> conflicts with<arg2>'s return value or parameter with same name"
+LABEL_TYPE_CONFLICT(arg,arg2) ::=
+	"label <arg> type mismatch with previous definition: <arg2>"
+ARG_RETVAL_CONFLICT(arg,arg2) ::=
+	"rule <arg2>'s argument <arg> conflicts a return value with same name"
+NONUNIQUE_REF(arg) ::=
+	"<arg> is a non-unique reference"
+FORWARD_ELEMENT_REF(arg) ::=
+	"illegal forward reference: <arg>"
+MISSING_RULE_ARGS(arg) ::=
+	"missing parameter(s) on rule reference: <arg>"
+RULE_HAS_NO_ARGS(arg) ::=
+	"rule <arg> has no defined parameters"
+ARGS_ON_TOKEN_REF(arg) ::=
+	"token reference <arg> may not have parameters"
+ILLEGAL_OPTION(arg) ::=
+	"illegal option <arg>"
+LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT(arg) ::=
+	"rule '+=' list labels are not allowed w/o output option: <arg>"
+UNDEFINED_TOKEN_REF_IN_REWRITE(arg) ::=
+  "reference to undefined token in rewrite rule: <arg>"
+REWRITE_ELEMENT_NOT_PRESENT_ON_LHS(arg) ::=
+  "reference to rewrite element <arg> without reference on left of ->"
+UNDEFINED_LABEL_REF_IN_REWRITE(arg) ::=
+  "reference to undefined label in rewrite rule: $<arg>"
+NO_GRAMMAR_START_RULE (arg) ::=
+  "grammar <arg>: no start rule (no rule can obviously be followed by EOF)"
+EMPTY_COMPLEMENT(arg) ::= <<
+<if(arg)>
+set complement ~<arg> is empty
+<else>
+set complement is empty
+<endif>
+>>
+UNKNOWN_DYNAMIC_SCOPE(arg) ::=
+  "unknown dynamic scope: <arg>"
+UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE(arg,arg2) ::=
+  "unknown dynamically-scoped attribute for scope <arg>: <arg2>"
+RULE_REF_AMBIG_WITH_RULE_IN_ALT(arg) ::=
+  "reference $<arg> is ambiguous; rule <arg> is enclosing rule and referenced in the production (assuming enclosing rule)"
+ISOLATED_RULE_ATTRIBUTE(arg) ::=
+  "reference to locally-defined rule scope attribute without rule name: <arg>"
+INVALID_ACTION_SCOPE(arg,arg2) ::=
+  "unknown or invalid action scope for <arg2> grammar: <arg>"
+ACTION_REDEFINITION(arg) ::=
+  "redefinition of <arg> action"
+DOUBLE_QUOTES_ILLEGAL(arg) ::=
+  "string literals must use single quotes (such as \'begin\'): <arg>"
+INVALID_TEMPLATE_ACTION(arg) ::=
+  "invalid StringTemplate % shorthand syntax: '<arg>'"
+MISSING_ATTRIBUTE_NAME() ::=
+  "missing attribute name on $ reference"
+ARG_INIT_VALUES_ILLEGAL(arg) ::=
+  "rule parameters may not have init values: <arg>"
+REWRITE_OR_OP_WITH_NO_OUTPUT_OPTION(arg) ::=
+  "<if(arg)>rule <arg> uses <endif>rewrite syntax or operator with no output option; setting output=AST"
+AST_OP_WITH_NON_AST_OUTPUT_OPTION(arg) ::=
+  "AST operator with non-AST output option: <arg>"
+NO_RULES(arg) ::= "grammar file <arg> has no rules"
+MISSING_AST_TYPE_IN_TREE_GRAMMAR(arg) ::=
+  "tree grammar <arg> has no ASTLabelType option"
+REWRITE_FOR_MULTI_ELEMENT_ALT(arg) ::=
+  "with rewrite=true, alt <arg> not simple node or obvious tree element; text attribute for rule not guaranteed to be correct"
+RULE_INVALID_SET(arg) ::=
+  "Cannot complement rule <arg>; not a simple set or element"
+HETERO_ILLEGAL_IN_REWRITE_ALT(arg) ::=
+  "alts with rewrites can't use heterogeneous types left of ->"
+NO_SUCH_GRAMMAR_SCOPE(arg,arg2) ::=
+  "reference to undefined grammar in rule reference: <arg>.<arg2>"
+NO_SUCH_RULE_IN_SCOPE(arg,arg2) ::=
+  "rule <arg2> is not defined in grammar <arg>"
+TOKEN_ALIAS_CONFLICT(arg,arg2) ::=
+  "cannot alias <arg>; string already assigned to <arg2>"
+TOKEN_ALIAS_REASSIGNMENT(arg,arg2) ::=
+  "cannot alias <arg>; token name already assigned to <arg2>"
+TOKEN_VOCAB_IN_DELEGATE(arg,arg2) ::=
+  "tokenVocab option ignored in imported grammar <arg>"
+INVALID_IMPORT(arg,arg2) ::=
+  "<arg.grammarTypeString> grammar <arg.name> cannot import <arg2.grammarTypeString> grammar <arg2.name>"
+IMPORTED_TOKENS_RULE_EMPTY(arg,arg2) ::=
+  "no lexer rules contributed to <arg> from imported grammar <arg2>"
+IMPORT_NAME_CLASH(arg,arg2) ::=
+  "combined grammar <arg.name> and imported <arg2.grammarTypeString> grammar <arg2.name> both generate <arg2.recognizerName>; import ignored"
+AST_OP_IN_ALT_WITH_REWRITE(arg,arg2) ::=
+  "rule <arg> alt <arg2> uses rewrite syntax and also an AST operator"
+WILDCARD_AS_ROOT(arg) ::= "Wildcard invalid as root; wildcard can itself be a tree"
+CONFLICTING_OPTION_IN_TREE_FILTER(arg,arg2) ::= "option <arg>=<arg2> conflicts with tree grammar filter mode"
+ILLEGAL_OPTION_VALUE(arg, arg2) ::= "value '<arg2>' invalid for option <arg>"
+ALL_OPS_NEED_SAME_ASSOC(arg) ::= "all operators of alt <alt> of left-recursive rule must have same associativity"
+RANGE_OP_ILLEGAL(arg) ::= "the .. range operator isn't allowed in parser rules"
+
+// GRAMMAR WARNINGS
+
+GRAMMAR_NONDETERMINISM(input,conflictingAlts,paths,disabled,hasPredicateBlockedByAction) ::=
+<<
+<if(paths)>
+Decision can match input such as "<input>" using multiple alternatives:
+<paths:{ it |  alt <it.alt> via NFA path <it.states; separator=","><\n>}>
+<else>
+Decision can match input such as "<input>" using multiple alternatives: <conflictingAlts; separator=", ">
+<endif>
+<if(disabled)><\n>As a result, alternative(s) <disabled; separator=","> were disabled for that input<endif><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
+>>
+
+DANGLING_STATE(danglingAlts,input) ::= <<
+the decision cannot distinguish between alternative(s) <danglingAlts; separator=","> for input such as "<input>"
+>>
+
+UNREACHABLE_ALTS(alts) ::= <<
+The following alternatives can never be matched: <alts; separator=","><\n>
+>>
+
+INSUFFICIENT_PREDICATES(upon,altToLocations,hasPredicateBlockedByAction) ::= <<
+Input such as "<upon>" is insufficiently covered with predicates at locations: <altToLocations.keys:{alt|alt <alt>: <altToLocations.(alt):{loc| line <loc.line>:<loc.column> at <loc.text>}; separator=", ">}; separator=", "><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
+>>
+
+DUPLICATE_SET_ENTRY(arg) ::=
+	"duplicate token type <arg> when collapsing subrule into set"
+
+ANALYSIS_ABORTED(enclosingRule) ::= <<
+ANTLR could not analyze this decision in rule <enclosingRule>; often this is because of recursive rule references visible from the left edge of alternatives.  ANTLR will re-analyze the decision with a fixed lookahead of k=1.  Consider using "options {k=1;}" for that decision and possibly adding a syntactic predicate.
+>>
+
+RECURSION_OVERLOW(alt,input,targetRules,callSiteStates) ::= <<
+Alternative <alt>: after matching input such as <input> decision cannot predict what comes next due to recursion overflow <targetRules,callSiteStates:{t,c|to <t> from <c:{s|<s.enclosingRule.name>};separator=", ">}; separator=" and ">
+>>
+
+LEFT_RECURSION(targetRules,alt,callSiteStates) ::= <<
+Alternative <alt> discovers infinite left-recursion <targetRules,callSiteStates:{t,c|to <t> from <c:{s|<s.enclosingRule>};separator=", ">}; separator=" and ">
+>>
+
+UNREACHABLE_TOKENS(tokens) ::= <<
+The following token definitions can never be matched because prior tokens match the same input: <tokens; separator=",">
+>>
+
+TOKEN_NONDETERMINISM(input,conflictingTokens,paths,disabled,hasPredicateBlockedByAction) ::=
+<<
+<if(paths)>
+Decision can match input such as "<input>" using multiple alternatives:
+<paths:{ it | alt <it.alt> via NFA path <it.states; separator=","><\n>}>
+<else>
+Multiple token rules can match input such as "<input>": <conflictingTokens; separator=", "><\n>
+<endif>
+<if(disabled)><\n>As a result, token(s) <disabled; separator=","> were disabled for that input<endif><if(hasPredicateBlockedByAction)><\n>Semantic predicates were present but were hidden by actions.<endif>
+>>
+
+LEFT_RECURSION_CYCLES(listOfCycles) ::= <<
+The following sets of rules are mutually left-recursive <listOfCycles:{c| [<c:{r|<r.name>}; separator=", ">]}; separator=" and ">
+>>
+
+NONREGULAR_DECISION(ruleName,alts) ::= <<
+[fatal] rule <ruleName> has non-LL(*) decision due to recursive rule invocations reachable from alts <alts; separator=",">.  Resolve by left-factoring or using syntactic predicates or using backtrack=true option.
+>>
+
+/* l10n for message levels */
+warning() ::= "warning"
+error() ::= "error"
diff --git a/tool/src/test/java/org/antlr/test/BaseTest.java b/tool/src/test/java/org/antlr/test/BaseTest.java
new file mode 100644
index 0000000..12c88ac
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/BaseTest.java
@@ -0,0 +1,979 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+
+import org.antlr.Tool;
+import org.antlr.analysis.Label;
+import org.antlr.runtime.CommonTokenStream;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.TokenSource;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.STGroup;
+import org.antlr.tool.ANTLRErrorListener;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.GrammarSemanticsMessage;
+import org.antlr.tool.Message;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.rules.TestRule;
+import org.junit.rules.TestWatcher;
+import org.junit.runner.Description;
+
+import javax.tools.*;
+import java.io.*;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.util.*;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import static org.junit.Assert.*;
+
+public abstract class BaseTest {
+	// -J-Dorg.antlr.test.BaseTest.level=FINE
+	private static final Logger LOGGER = Logger.getLogger(BaseTest.class.getName());
+
+	public static final String newline = System.getProperty("line.separator");
+
+	public static final String jikes = null;//"/usr/bin/jikes";
+	public static final String pathSep = System.getProperty("path.separator");
+
+	public static final boolean TEST_IN_SAME_PROCESS = Boolean.parseBoolean(System.getProperty("antlr.testinprocess"));
+
+   /**
+    * When runnning from Maven, the junit tests are run via the surefire plugin. It sets the
+    * classpath for the test environment into the following property. We need to pick this up
+    * for the junit tests that are going to generate and try to run code.
+    */
+    public static final String SUREFIRE_CLASSPATH = System.getProperty("surefire.test.class.path", "");
+
+    /**
+     * Build up the full classpath we need, including the surefire path (if present)
+     */
+    public static final String CLASSPATH = System.getProperty("java.class.path") + (SUREFIRE_CLASSPATH.equals("") ? "" : pathSep + SUREFIRE_CLASSPATH);
+
+	public String tmpdir = null;
+
+	/** If error during parser execution, store stderr here; can't return
+     *  stdout and stderr.  This doesn't trap errors from running antlr.
+     */
+	protected String stderrDuringParse;
+
+	@Rule
+	public final TestRule testWatcher = new TestWatcher() {
+
+		@Override
+		protected void succeeded(Description description) {
+			// remove tmpdir if no error.
+			eraseTempDir();
+		}
+
+	};
+
+    @Before
+	public void setUp() throws Exception {
+        // new output dir for each test
+        tmpdir = new File(System.getProperty("java.io.tmpdir"),
+						  "antlr-"+getClass().getName()+"-"+
+						  System.currentTimeMillis()).getAbsolutePath();
+        ErrorManager.resetErrorState();
+        STGroup.defaultGroup = new STGroup();
+    }
+
+    protected Tool newTool(String[] args) {
+		Tool tool = new Tool(args);
+		tool.setOutputDirectory(tmpdir);
+		return tool;
+	}
+
+	protected Tool newTool() {
+		Tool tool = new Tool();
+		tool.setOutputDirectory(tmpdir);
+		return tool;
+	}
+
+	protected boolean compile(String fileName) {
+		String classpathOption = "-classpath";
+
+		String[] args = new String[] {
+					"javac", "-d", tmpdir,
+					classpathOption, tmpdir+pathSep+CLASSPATH,
+					tmpdir+"/"+fileName
+		};
+		String cmdLine = "javac" +" -d "+tmpdir+" "+classpathOption+" "+tmpdir+pathSep+CLASSPATH+" "+fileName;
+		//System.out.println("compile: "+cmdLine);
+
+
+		File f = new File(tmpdir, fileName);
+		JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
+
+		StandardJavaFileManager fileManager =
+			compiler.getStandardFileManager(null, null, null);
+
+		Iterable<? extends JavaFileObject> compilationUnits =
+			fileManager.getJavaFileObjectsFromFiles(Arrays.asList(f));
+
+		Iterable<String> compileOptions =
+			Arrays.asList(new String[]{"-d", tmpdir, "-cp", tmpdir+pathSep+CLASSPATH} );
+
+		JavaCompiler.CompilationTask task =
+			compiler.getTask(null, fileManager, null, compileOptions, null,
+							 compilationUnits);
+		boolean ok = task.call();
+
+		try {
+			fileManager.close();
+		}
+		catch (IOException ioe) {
+			ioe.printStackTrace(System.err);
+		}
+		return ok;
+	}
+
+	/** Return true if all is ok, no errors */
+	protected boolean antlr(String fileName, String grammarFileName, String grammarStr, boolean debug) {
+		boolean allIsWell = true;
+		mkdir(tmpdir);
+		writeFile(tmpdir, fileName, grammarStr);
+		try {
+			final List<String> options = new ArrayList<String>();
+			if ( debug ) {
+				options.add("-debug");
+			}
+			options.add("-o");
+			options.add(tmpdir);
+			options.add("-lib");
+			options.add(tmpdir);
+			options.add(new File(tmpdir,grammarFileName).toString());
+			final String[] optionsA = new String[options.size()];
+			options.toArray(optionsA);
+			/*
+			final ErrorQueue equeue = new ErrorQueue();
+			ErrorManager.setErrorListener(equeue);
+			*/
+			Tool antlr = newTool(optionsA);
+			antlr.process();
+			ANTLRErrorListener listener = ErrorManager.getErrorListener();
+			if ( listener instanceof ErrorQueue ) {
+				ErrorQueue equeue = (ErrorQueue)listener;
+				if ( equeue.errors.size()>0 ) {
+					allIsWell = false;
+					System.err.println("antlr reports errors from "+options);
+					for (int i = 0; i < equeue.errors.size(); i++) {
+						Message msg = equeue.errors.get(i);
+						System.err.println(msg);
+					}
+                    System.out.println("!!!\ngrammar:");
+                    System.out.println(grammarStr);
+                    System.out.println("###");
+                }
+			}
+		}
+		catch (Exception e) {
+			allIsWell = false;
+			System.err.println("problems building grammar: "+e);
+			e.printStackTrace(System.err);
+		}
+		return allIsWell;
+	}
+
+	protected String execLexer(String grammarFileName,
+							   String grammarStr,
+							   String lexerName,
+							   String input,
+							   boolean debug)
+	{
+		boolean compiled = rawGenerateAndBuildRecognizer(grammarFileName,
+									  grammarStr,
+									  null,
+									  lexerName,
+									  debug);
+		Assert.assertTrue(compiled);
+
+		writeFile(tmpdir, "input", input);
+		return rawExecRecognizer(null,
+								 null,
+								 lexerName,
+								 null,
+								 null,
+								 false,
+								 false,
+								 false,
+								 debug);
+	}
+
+	protected String execParser(String grammarFileName,
+								String grammarStr,
+								String parserName,
+								String lexerName,
+								String startRuleName,
+								String input, boolean debug)
+	{
+		boolean compiled = rawGenerateAndBuildRecognizer(grammarFileName,
+									  grammarStr,
+									  parserName,
+									  lexerName,
+									  debug);
+		Assert.assertTrue(compiled);
+
+		writeFile(tmpdir, "input", input);
+		boolean parserBuildsTrees =
+			grammarStr.indexOf("output=AST")>=0 ||
+			grammarStr.indexOf("output = AST")>=0;
+		boolean parserBuildsTemplate =
+			grammarStr.indexOf("output=template")>=0 ||
+			grammarStr.indexOf("output = template")>=0;
+		return rawExecRecognizer(parserName,
+								 null,
+								 lexerName,
+								 startRuleName,
+								 null,
+								 parserBuildsTrees,
+								 parserBuildsTemplate,
+								 false,
+								 debug);
+	}
+
+	protected String execTreeParser(String parserGrammarFileName,
+									String parserGrammarStr,
+									String parserName,
+									String treeParserGrammarFileName,
+									String treeParserGrammarStr,
+									String treeParserName,
+									String lexerName,
+									String parserStartRuleName,
+									String treeParserStartRuleName,
+									String input)
+	{
+		return execTreeParser(parserGrammarFileName,
+							  parserGrammarStr,
+							  parserName,
+							  treeParserGrammarFileName,
+							  treeParserGrammarStr,
+							  treeParserName,
+							  lexerName,
+							  parserStartRuleName,
+							  treeParserStartRuleName,
+							  input,
+							  false);
+	}
+
+	protected String execTreeParser(String parserGrammarFileName,
+									String parserGrammarStr,
+									String parserName,
+									String treeParserGrammarFileName,
+									String treeParserGrammarStr,
+									String treeParserName,
+									String lexerName,
+									String parserStartRuleName,
+									String treeParserStartRuleName,
+									String input,
+									boolean debug)
+	{
+		// build the parser
+		boolean compiled = rawGenerateAndBuildRecognizer(parserGrammarFileName,
+									  parserGrammarStr,
+									  parserName,
+									  lexerName,
+									  debug);
+		Assert.assertTrue(compiled);
+
+		// build the tree parser
+		compiled = rawGenerateAndBuildRecognizer(treeParserGrammarFileName,
+									  treeParserGrammarStr,
+									  treeParserName,
+									  lexerName,
+									  debug);
+		Assert.assertTrue(compiled);
+
+		writeFile(tmpdir, "input", input);
+
+		boolean parserBuildsTrees =
+			parserGrammarStr.indexOf("output=AST")>=0 ||
+			parserGrammarStr.indexOf("output = AST")>=0;
+		boolean treeParserBuildsTrees =
+			treeParserGrammarStr.indexOf("output=AST")>=0 ||
+			treeParserGrammarStr.indexOf("output = AST")>=0;
+		boolean parserBuildsTemplate =
+			parserGrammarStr.indexOf("output=template")>=0 ||
+			parserGrammarStr.indexOf("output = template")>=0;
+
+		return rawExecRecognizer(parserName,
+								 treeParserName,
+								 lexerName,
+								 parserStartRuleName,
+								 treeParserStartRuleName,
+								 parserBuildsTrees,
+								 parserBuildsTemplate,
+								 treeParserBuildsTrees,
+								 debug);
+	}
+
+	/** Return true if all is well */
+	protected boolean rawGenerateAndBuildRecognizer(String grammarFileName,
+													String grammarStr,
+													String parserName,
+													String lexerName,
+													boolean debug)
+	{
+		//System.out.println(grammarStr);
+		boolean allIsWell =
+			antlr(grammarFileName, grammarFileName, grammarStr, debug);
+		if (!allIsWell) {
+			return false;
+		}
+
+		if ( lexerName!=null ) {
+			boolean ok;
+			if ( parserName!=null ) {
+				ok = compile(parserName+".java");
+				if ( !ok ) { allIsWell = false; }
+			}
+			ok = compile(lexerName+".java");
+			if ( !ok ) { allIsWell = false; }
+		}
+		else {
+			boolean ok = compile(parserName+".java");
+			if ( !ok ) { allIsWell = false; }
+		}
+		return allIsWell;
+	}
+
+	protected String rawExecRecognizer(String parserName,
+									   String treeParserName,
+									   String lexerName,
+									   String parserStartRuleName,
+									   String treeParserStartRuleName,
+									   boolean parserBuildsTrees,
+									   boolean parserBuildsTemplate,
+									   boolean treeParserBuildsTrees,
+									   boolean debug)
+	{
+        this.stderrDuringParse = null;
+		writeRecognizerAndCompile(parserName, treeParserName, lexerName, parserStartRuleName, treeParserStartRuleName, parserBuildsTrees, parserBuildsTemplate, treeParserBuildsTrees, debug);
+
+		return execRecognizer();
+	}
+
+	public String execRecognizer() {
+		return execClass("Test");
+	}
+
+	public String execClass(String className) {
+		if (TEST_IN_SAME_PROCESS) {
+			try {
+				ClassLoader loader = new URLClassLoader(new URL[] { new File(tmpdir).toURI().toURL() }, ClassLoader.getSystemClassLoader());
+                final Class<?> mainClass = (Class<?>)loader.loadClass(className);
+				final Method mainMethod = mainClass.getDeclaredMethod("main", String[].class);
+				PipedInputStream stdoutIn = new PipedInputStream();
+				PipedInputStream stderrIn = new PipedInputStream();
+				PipedOutputStream stdoutOut = new PipedOutputStream(stdoutIn);
+				PipedOutputStream stderrOut = new PipedOutputStream(stderrIn);
+				String inputFile = new File(tmpdir, "input").getAbsolutePath();
+				StreamVacuum stdoutVacuum = new StreamVacuum(stdoutIn, inputFile);
+				StreamVacuum stderrVacuum = new StreamVacuum(stderrIn, inputFile);
+
+				PrintStream originalOut = System.out;
+				System.setOut(new PrintStream(stdoutOut));
+				try {
+					PrintStream originalErr = System.err;
+					try {
+						System.setErr(new PrintStream(stderrOut));
+						stdoutVacuum.start();
+						stderrVacuum.start();
+						mainMethod.invoke(null, (Object)new String[] { inputFile });
+					}
+					finally {
+						System.setErr(originalErr);
+					}
+				}
+				finally {
+					System.setOut(originalOut);
+				}
+
+				stdoutOut.close();
+				stderrOut.close();
+				stdoutVacuum.join();
+				stderrVacuum.join();
+				String output = stdoutVacuum.toString();
+				if ( stderrVacuum.toString().length()>0 ) {
+					this.stderrDuringParse = stderrVacuum.toString();
+					System.err.println("exec stderrVacuum: "+ stderrVacuum);
+				}
+				return output;
+			} catch (MalformedURLException ex) {
+				LOGGER.log(Level.SEVERE, null, ex);
+			} catch (IOException ex) {
+				LOGGER.log(Level.SEVERE, null, ex);
+			} catch (InterruptedException ex) {
+				LOGGER.log(Level.SEVERE, null, ex);
+			} catch (IllegalAccessException ex) {
+				LOGGER.log(Level.SEVERE, null, ex);
+			} catch (IllegalArgumentException ex) {
+				LOGGER.log(Level.SEVERE, null, ex);
+			} catch (InvocationTargetException ex) {
+				LOGGER.log(Level.SEVERE, null, ex);
+			} catch (NoSuchMethodException ex) {
+				LOGGER.log(Level.SEVERE, null, ex);
+			} catch (SecurityException ex) {
+				LOGGER.log(Level.SEVERE, null, ex);
+			} catch (ClassNotFoundException ex) {
+				LOGGER.log(Level.SEVERE, null, ex);
+			}
+		}
+
+		try {
+			String inputFile = new File(tmpdir, "input").getAbsolutePath();
+			String[] args = new String[] {
+				"java", "-classpath", tmpdir+pathSep+CLASSPATH,
+				className, inputFile
+			};
+			//String cmdLine = "java -classpath "+CLASSPATH+pathSep+tmpdir+" Test " + new File(tmpdir, "input").getAbsolutePath();
+			//System.out.println("execParser: "+cmdLine);
+			Process process =
+				Runtime.getRuntime().exec(args, null, new File(tmpdir));
+			StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream(), inputFile);
+			StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream(), inputFile);
+			stdoutVacuum.start();
+			stderrVacuum.start();
+			process.waitFor();
+			stdoutVacuum.join();
+			stderrVacuum.join();
+			String output;
+			output = stdoutVacuum.toString();
+			if ( stderrVacuum.toString().length()>0 ) {
+				this.stderrDuringParse = stderrVacuum.toString();
+				System.err.println("exec stderrVacuum: "+ stderrVacuum);
+			}
+			return output;
+		}
+		catch (Exception e) {
+			System.err.println("can't exec recognizer");
+			e.printStackTrace(System.err);
+		}
+		return null;
+	}
+
+	public void writeRecognizerAndCompile(String parserName, String treeParserName, String lexerName, String parserStartRuleName, String treeParserStartRuleName, boolean parserBuildsTrees, boolean parserBuildsTemplate, boolean treeParserBuildsTrees, boolean debug) {
+		if ( treeParserBuildsTrees && parserBuildsTrees ) {
+			writeTreeAndTreeTestFile(parserName,
+									 treeParserName,
+									 lexerName,
+									 parserStartRuleName,
+									 treeParserStartRuleName,
+									 debug);
+		}
+		else if ( parserBuildsTrees ) {
+			writeTreeTestFile(parserName,
+							  treeParserName,
+							  lexerName,
+							  parserStartRuleName,
+							  treeParserStartRuleName,
+							  debug);
+		}
+		else if ( parserBuildsTemplate ) {
+			writeTemplateTestFile(parserName,
+								  lexerName,
+								  parserStartRuleName,
+								  debug);
+		}
+		else if ( parserName==null ) {
+			writeLexerTestFile(lexerName, debug);
+		}
+		else {
+			writeTestFile(parserName,
+						  lexerName,
+						  parserStartRuleName,
+						  debug);
+		}
+
+		compile("Test.java");
+	}
+
+	protected void checkGrammarSemanticsError(ErrorQueue equeue,
+											  GrammarSemanticsMessage expectedMessage)
+		throws Exception
+	{
+		/*
+				System.out.println(equeue.infos);
+				System.out.println(equeue.warnings);
+				System.out.println(equeue.errors);
+				assertTrue("number of errors mismatch", n, equeue.errors.size());
+						   */
+		Message foundMsg = null;
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			Message m = equeue.errors.get(i);
+			if (m.msgID==expectedMessage.msgID ) {
+				foundMsg = m;
+			}
+		}
+		assertNotNull("no error; "+expectedMessage.msgID+" expected", foundMsg);
+		assertTrue("error is not a GrammarSemanticsMessage",
+				   foundMsg instanceof GrammarSemanticsMessage);
+		assertEquals(expectedMessage.arg, foundMsg.arg);
+		if ( equeue.size()!=1 ) {
+			System.err.println(equeue);
+		}
+	}
+
+	protected void checkGrammarSemanticsWarning(ErrorQueue equeue,
+												GrammarSemanticsMessage expectedMessage)
+		throws Exception
+	{
+		Message foundMsg = null;
+		for (int i = 0; i < equeue.warnings.size(); i++) {
+			Message m = equeue.warnings.get(i);
+			if (m.msgID==expectedMessage.msgID ) {
+				foundMsg = m;
+			}
+		}
+		assertNotNull("no error; "+expectedMessage.msgID+" expected", foundMsg);
+		assertTrue("error is not a GrammarSemanticsMessage",
+				   foundMsg instanceof GrammarSemanticsMessage);
+		assertEquals(expectedMessage.arg, foundMsg.arg);
+	}
+
+    protected void checkError(ErrorQueue equeue,
+                              Message expectedMessage)
+        throws Exception
+    {
+        //System.out.println("errors="+equeue);
+        Message foundMsg = null;
+        for (int i = 0; i < equeue.errors.size(); i++) {
+            Message m = equeue.errors.get(i);
+            if (m.msgID==expectedMessage.msgID ) {
+                foundMsg = m;
+            }
+        }
+        assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size()>0);
+        assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1);
+        assertNotNull("couldn't find expected error: "+expectedMessage.msgID, foundMsg);
+        /*
+        assertTrue("error is not a GrammarSemanticsMessage",
+                   foundMsg instanceof GrammarSemanticsMessage);
+         */
+        assertEquals(expectedMessage.arg, foundMsg.arg);
+        assertEquals(expectedMessage.arg2, foundMsg.arg2);
+        ErrorManager.resetErrorState(); // wack errors for next test
+    }
+
+    public static class StreamVacuum implements Runnable {
+		StringBuffer buf = new StringBuffer();
+		BufferedReader in;
+		Thread sucker;
+		String inputFile;
+		public StreamVacuum(InputStream in, String inputFile) {
+			this.in = new BufferedReader( new InputStreamReader(in) );
+			this.inputFile = inputFile;
+		}
+		public void start() {
+			sucker = new Thread(this);
+			sucker.start();
+		}
+		@Override
+		public void run() {
+			try {
+				String line = in.readLine();
+				while (line!=null) {
+					if (line.startsWith(inputFile))
+						line = line.substring(inputFile.length()+1);
+					buf.append(line);
+					buf.append('\n');
+					line = in.readLine();
+				}
+			}
+			catch (IOException ioe) {
+				System.err.println("can't read output from process");
+			}
+		}
+		/** wait for the thread to finish */
+		public void join() throws InterruptedException {
+			sucker.join();
+		}
+		@Override
+		public String toString() {
+			return buf.toString();
+		}
+	}
+
+    public static class FilteringTokenStream extends CommonTokenStream {
+        public FilteringTokenStream(TokenSource src) { super(src); }
+        Set<Integer> hide = new HashSet<Integer>();
+		@Override
+        protected void sync(int i) {
+            super.sync(i);
+            if ( hide.contains(get(i).getType()) ) get(i).setChannel(Token.HIDDEN_CHANNEL);
+        }
+        public void setTokenTypeChannel(int ttype, int channel) {
+            hide.add(ttype);
+        }
+    }
+
+	protected void writeFile(String dir, String fileName, String content) {
+		try {
+			File f = new File(dir, fileName);
+			FileWriter w = new FileWriter(f);
+			BufferedWriter bw = new BufferedWriter(w);
+			bw.write(content);
+			bw.close();
+			w.close();
+		}
+		catch (IOException ioe) {
+			System.err.println("can't write file");
+			ioe.printStackTrace(System.err);
+		}
+	}
+
+	protected void mkdir(String dir) {
+		File f = new File(dir);
+		f.mkdirs();
+	}
+
+	protected void writeTestFile(String parserName,
+								 String lexerName,
+								 String parserStartRuleName,
+								 boolean debug)
+	{
+		ST outputFileST = new ST(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.runtime.tree.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        <lexerName> lex = new <lexerName>(input);\n" +
+			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
+			"        <createParser>\n"+
+			"        parser.<parserStartRuleName>();\n" +
+			"    }\n" +
+			"}"
+			);
+		ST createParserST =
+			new ST(
+			"        Profiler2 profiler = new Profiler2();\n"+
+			"        <parserName> parser = new <parserName>(tokens,profiler);\n" +
+			"        profiler.setParser(parser);\n");
+		if ( !debug ) {
+			createParserST =
+				new ST(
+				"        <parserName> parser = new <parserName>(tokens);\n");
+		}
+		outputFileST.add("createParser", createParserST);
+		outputFileST.add("parserName", parserName);
+		outputFileST.add("lexerName", lexerName);
+		outputFileST.add("parserStartRuleName", parserStartRuleName);
+		writeFile(tmpdir, "Test.java", outputFileST.render());
+	}
+
+	protected void writeLexerTestFile(String lexerName, boolean debug) {
+		ST outputFileST = new ST(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.runtime.tree.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        <lexerName> lex = new <lexerName>(input);\n" +
+			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
+			"        System.out.println(tokens);\n" +
+			"    }\n" +
+			"}"
+			);
+		outputFileST.add("lexerName", lexerName);
+		writeFile(tmpdir, "Test.java", outputFileST.render());
+	}
+
+	protected void writeTreeTestFile(String parserName,
+									 String treeParserName,
+									 String lexerName,
+									 String parserStartRuleName,
+									 String treeParserStartRuleName,
+									 boolean debug)
+	{
+		ST outputFileST = new ST(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.runtime.tree.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        <lexerName> lex = new <lexerName>(input);\n" +
+			"        TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" +
+			"        <createParser>\n"+
+			"        <parserName>.<parserStartRuleName>_return r = parser.<parserStartRuleName>();\n" +
+			"        <if(!treeParserStartRuleName)>\n" +
+			"        if ( r.tree!=null ) {\n" +
+			"            System.out.println(((Tree)r.tree).toStringTree());\n" +
+			"            ((CommonTree)r.tree).sanityCheckParentAndChildIndexes();\n" +
+			"		 }\n" +
+			"        <else>\n" +
+			"        CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" +
+			"        nodes.setTokenStream(tokens);\n" +
+			"        <treeParserName> walker = new <treeParserName>(nodes);\n" +
+			"        walker.<treeParserStartRuleName>();\n" +
+			"        <endif>\n" +
+			"    }\n" +
+			"}"
+			);
+		ST createParserST =
+			new ST(
+			"        Profiler2 profiler = new Profiler2();\n"+
+			"        <parserName> parser = new <parserName>(tokens,profiler);\n" +
+			"        profiler.setParser(parser);\n");
+		if ( !debug ) {
+			createParserST =
+				new ST(
+				"        <parserName> parser = new <parserName>(tokens);\n");
+		}
+		outputFileST.add("createParser", createParserST);
+		outputFileST.add("parserName", parserName);
+		outputFileST.add("treeParserName", treeParserName);
+		outputFileST.add("lexerName", lexerName);
+		outputFileST.add("parserStartRuleName", parserStartRuleName);
+		outputFileST.add("treeParserStartRuleName", treeParserStartRuleName);
+		writeFile(tmpdir, "Test.java", outputFileST.render());
+	}
+
+	/** Parser creates trees and so does the tree parser */
+	protected void writeTreeAndTreeTestFile(String parserName,
+											String treeParserName,
+											String lexerName,
+											String parserStartRuleName,
+											String treeParserStartRuleName,
+											boolean debug)
+	{
+		ST outputFileST = new ST(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.runtime.tree.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        <lexerName> lex = new <lexerName>(input);\n" +
+			"        TokenRewriteStream tokens = new TokenRewriteStream(lex);\n" +
+			"        <createParser>\n"+
+			"        <parserName>.<parserStartRuleName>_return r = parser.<parserStartRuleName>();\n" +
+			"        ((CommonTree)r.tree).sanityCheckParentAndChildIndexes();\n" +
+			"        CommonTreeNodeStream nodes = new CommonTreeNodeStream((Tree)r.tree);\n" +
+			"        nodes.setTokenStream(tokens);\n" +
+			"        <treeParserName> walker = new <treeParserName>(nodes);\n" +
+			"        <treeParserName>.<treeParserStartRuleName>_return r2 = walker.<treeParserStartRuleName>();\n" +
+			"		 CommonTree rt = ((CommonTree)r2.tree);\n" +
+			"		 if ( rt!=null ) System.out.println(((CommonTree)r2.tree).toStringTree());\n" +
+			"    }\n" +
+			"}"
+			);
+		ST createParserST =
+			new ST(
+			"        Profiler2 profiler = new Profiler2();\n"+
+			"        <parserName> parser = new <parserName>(tokens,profiler);\n" +
+			"        profiler.setParser(parser);\n");
+		if ( !debug ) {
+			createParserST =
+				new ST(
+				"        <parserName> parser = new <parserName>(tokens);\n");
+		}
+		outputFileST.add("createParser", createParserST);
+		outputFileST.add("parserName", parserName);
+		outputFileST.add("treeParserName", treeParserName);
+		outputFileST.add("lexerName", lexerName);
+		outputFileST.add("parserStartRuleName", parserStartRuleName);
+		outputFileST.add("treeParserStartRuleName", treeParserStartRuleName);
+		writeFile(tmpdir, "Test.java", outputFileST.render());
+	}
+
+	protected void writeTemplateTestFile(String parserName,
+										 String lexerName,
+										 String parserStartRuleName,
+										 boolean debug)
+	{
+		ST outputFileST = new ST(
+			"import org.antlr.runtime.*;\n" +
+			"import org.antlr.stringtemplate.*;\n" +
+			"import org.antlr.stringtemplate.language.*;\n" +
+			"import org.antlr.runtime.debug.*;\n" +
+			"import java.io.*;\n" +
+			"\n" +
+			"class Profiler2 extends Profiler {\n" +
+			"    public void terminate() { ; }\n" +
+			"}\n"+
+			"public class Test {\n" +
+			"    static String templates = \"group T; foo(x,y) ::= \\\"\\<x> \\<y>\\\"\";\n" +
+			"    static StringTemplateGroup group ="+
+			"    		new StringTemplateGroup(new StringReader(templates)," +
+			"					AngleBracketTemplateLexer.class);"+
+			"    public static void main(String[] args) throws Exception {\n" +
+			"        CharStream input = new ANTLRFileStream(args[0]);\n" +
+			"        <lexerName> lex = new <lexerName>(input);\n" +
+			"        CommonTokenStream tokens = new CommonTokenStream(lex);\n" +
+			"        <createParser>\n"+
+			"		 parser.setTemplateLib(group);\n"+
+			"        <parserName>.<parserStartRuleName>_return r = parser.<parserStartRuleName>();\n" +
+			"        if ( r.st!=null )\n" +
+			"            System.out.print(r.st.toString());\n" +
+			"	 	 else\n" +
+			"            System.out.print(\"\");\n" +
+			"    }\n" +
+			"}"
+			);
+		ST createParserST =
+			new ST(
+			"        Profiler2 profiler = new Profiler2();\n"+
+			"        <parserName> parser = new <parserName>(tokens,profiler);\n" +
+			"        profiler.setParser(parser);\n");
+		if ( !debug ) {
+			createParserST =
+				new ST(
+				"        <parserName> parser = new <parserName>(tokens);\n");
+		}
+		outputFileST.add("createParser", createParserST);
+		outputFileST.add("parserName", parserName);
+		outputFileST.add("lexerName", lexerName);
+		outputFileST.add("parserStartRuleName", parserStartRuleName);
+		writeFile(tmpdir, "Test.java", outputFileST.render());
+	}
+
+    protected void eraseFiles(final String filesEndingWith) {
+        File tmpdirF = new File(tmpdir);
+        String[] files = tmpdirF.list();
+        for(int i = 0; files!=null && i < files.length; i++) {
+            if ( files[i].endsWith(filesEndingWith) ) {
+                new File(tmpdir+"/"+files[i]).delete();
+            }
+        }
+    }
+
+    protected void eraseFiles() {
+        File tmpdirF = new File(tmpdir);
+        String[] files = tmpdirF.list();
+        for(int i = 0; files!=null && i < files.length; i++) {
+            new File(tmpdir+"/"+files[i]).delete();
+        }
+    }
+
+    protected void eraseTempDir() {
+        File tmpdirF = new File(tmpdir);
+        if ( tmpdirF.exists() ) {
+            eraseFiles();
+            tmpdirF.delete();
+        }
+    }
+
+	public String getFirstLineOfException() {
+		if ( this.stderrDuringParse ==null ) {
+			return null;
+		}
+		String[] lines = this.stderrDuringParse.split("\n");
+		String prefix="Exception in thread \"main\" ";
+		return lines[0].substring(prefix.length(),lines[0].length());
+	}
+
+	public <T> List<T> realElements(List<T> elements) {
+		List<T> n = new ArrayList<T>();
+		for (int i = Label.NUM_FAUX_LABELS+Label.MIN_TOKEN_TYPE - 1; i < elements.size(); i++) {
+			T o = elements.get(i);
+			if ( o!=null ) {
+				n.add(o);
+			}
+		}
+		return n;
+	}
+
+	public List<String> realElements(Map<String, Integer> elements) {
+		List<String> n = new ArrayList<String>();
+		for (Map.Entry<String, Integer> entry : elements.entrySet()) {
+			String tokenID = entry.getKey();
+			if ( entry.getValue() >= Label.MIN_TOKEN_TYPE ) {
+				n.add(tokenID+"="+entry.getValue());
+			}
+		}
+		Collections.sort(n);
+		return n;
+	}
+
+    public String sortLinesInString(String s) {
+        String lines[] = s.split("\n");
+        Arrays.sort(lines);
+        List<String> linesL = Arrays.asList(lines);
+        StringBuilder buf = new StringBuilder();
+        for (String l : linesL) {
+            buf.append(l);
+            buf.append('\n');
+        }
+        return buf.toString();
+    }
+
+    /**
+     * When looking at a result set that consists of a Map/HashTable
+     * we cannot rely on the output order, as the hashing algorithm or other aspects
+     * of the implementation may be different on differnt JDKs or platforms. Hence
+     * we take the Map, convert the keys to a List, sort them and Stringify the Map, which is a
+     * bit of a hack, but guarantees that we get the same order on all systems. We assume that
+     * the keys are strings.
+     *
+     * @param m The Map that contains keys we wish to return in sorted order
+     * @return A string that represents all the keys in sorted order.
+     */
+    public <K, V> String sortMapToString(Map<K, V> m) {
+
+        System.out.println("Map toString looks like: " + m.toString());
+        // Pass in crap, and get nothing back
+        //
+        if  (m == null) {
+            return null;
+        }
+
+        // Sort the keys in the Map
+        //
+        TreeMap<K, V> nset = new TreeMap<K, V>(m);
+
+        System.out.println("Tree map looks like: " + nset.toString());
+        return nset.toString();
+    }
+}
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/DebugTestAutoAST.java b/tool/src/test/java/org/antlr/test/DebugTestAutoAST.java
similarity index 100%
rename from antlr-3.4/tool/src/test/java/org/antlr/test/DebugTestAutoAST.java
rename to tool/src/test/java/org/antlr/test/DebugTestAutoAST.java
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/DebugTestCompositeGrammars.java b/tool/src/test/java/org/antlr/test/DebugTestCompositeGrammars.java
similarity index 100%
rename from antlr-3.4/tool/src/test/java/org/antlr/test/DebugTestCompositeGrammars.java
rename to tool/src/test/java/org/antlr/test/DebugTestCompositeGrammars.java
diff --git a/antlr-3.4/tool/src/test/java/org/antlr/test/DebugTestRewriteAST.java b/tool/src/test/java/org/antlr/test/DebugTestRewriteAST.java
similarity index 100%
rename from antlr-3.4/tool/src/test/java/org/antlr/test/DebugTestRewriteAST.java
rename to tool/src/test/java/org/antlr/test/DebugTestRewriteAST.java
diff --git a/tool/src/test/java/org/antlr/test/ErrorQueue.java b/tool/src/test/java/org/antlr/test/ErrorQueue.java
new file mode 100644
index 0000000..8546f3c
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/ErrorQueue.java
@@ -0,0 +1,73 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.tool.ANTLRErrorListener;
+import org.antlr.tool.Message;
+import org.antlr.tool.ToolMessage;
+
+import java.util.LinkedList;
+import java.util.List;
+
+public class ErrorQueue implements ANTLRErrorListener {
+	List<String> infos = new LinkedList<String>();
+	List<Message> errors = new LinkedList<Message>();
+	List<Message> warnings = new LinkedList<Message>();
+
+	@Override
+	public void info(String msg) {
+		infos.add(msg);
+	}
+
+	@Override
+	public void error(Message msg) {
+		errors.add(msg);
+	}
+
+	@Override
+	public void warning(Message msg) {
+		warnings.add(msg);
+	}
+
+	@Override
+	public void error(ToolMessage msg) {
+		errors.add(msg);
+	}
+
+	public int size() {
+		return infos.size() + errors.size() + warnings.size();
+	}
+
+	@Override
+	public String toString() {
+		return "infos: "+infos+
+			"errors: "+errors+
+			"warnings: "+warnings;
+	}
+}
+
diff --git a/tool/src/test/java/org/antlr/test/TestASTConstruction.java b/tool/src/test/java/org/antlr/test/TestASTConstruction.java
new file mode 100644
index 0000000..8a652a2
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestASTConstruction.java
@@ -0,0 +1,374 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.tool.Grammar;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestASTConstruction extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestASTConstruction() {
+    }
+
+	@Test public void testA() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : A;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT A <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testEmptyAlt() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : ;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT epsilon <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testNakeRulePlusInLexer() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"A : B+;\n" +
+				"B : 'a';");
+		String expecting =
+			"(rule A ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT B <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("A").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRulePlus() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (b)+;\n" +
+				"b : B;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testNakedRulePlus() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : b+;\n" +
+				"b : B;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRuleOptional() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (b)?;\n" +
+				"b : B;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (? (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testNakedRuleOptional() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : b?;\n" +
+				"b : B;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (? (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRuleStar() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : (b)*;\n" +
+				"b : B;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testNakedRuleStar() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar P;\n"+
+				"a : b*;\n" +
+				"b : B;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT b <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testCharStar() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : 'a'*;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT 'a' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testCharStarInLexer() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"B : 'b'*;");
+		String expecting =
+			"(rule B ARG RET scope (BLOCK (ALT (* (BLOCK (ALT 'b' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("B").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testStringStar() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : 'while'*;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT 'while' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testStringStarInLexer() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"B : 'while'*;");
+		String expecting =
+			"(rule B ARG RET scope (BLOCK (ALT (* (BLOCK (ALT 'while' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("B").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testCharPlus() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : 'a'+;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT 'a' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testCharPlusInLexer() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"B : 'b'+;");
+		String expecting =
+			"(rule B ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT 'b' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("B").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testCharOptional() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : 'a'?;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (? (BLOCK (ALT 'a' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testCharOptionalInLexer() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"B : 'b'?;");
+		String expecting =
+			"(rule B ARG RET scope (BLOCK (ALT (? (BLOCK (ALT 'b' <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("B").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testCharRangePlus() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar P;\n"+
+				"ID : 'a'..'z'+;");
+		String expecting =
+			"(rule ID ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT (.. 'a' 'z') <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("ID").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=ID;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (= x ID) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testLabelOfOptional() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=ID?;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (? (BLOCK (ALT (= x ID) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testLabelOfClosure() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=ID*;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT (= x ID) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRuleLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=b;\n" +
+				"b : ID;\n");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (= x b) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testSetLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=(A|B);\n");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (= x (BLOCK (ALT A <end-of-alt>) (ALT B <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testNotSetLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=~(A|B);\n");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (= x (~ (BLOCK (ALT A <end-of-alt>) (ALT B <end-of-alt>) <end-of-block>))) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testNotSetListLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x+=~(A|B);\n");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (+= x (~ (BLOCK (ALT A <end-of-alt>) (ALT B <end-of-alt>) <end-of-block>))) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testNotSetListLabelInLoop() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x+=~(A|B)+;\n");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT (+= x (~ (BLOCK (ALT A <end-of-alt>) (ALT B <end-of-alt>) <end-of-block>))) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRuleLabelOfPositiveClosure() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x=b+;\n" +
+				"b : ID;\n");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT (= x b) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testListLabelOfClosure() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x+=ID*;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT (+= x ID) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testListLabelOfClosure2() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n"+
+				"a : x+='int'*;");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (* (BLOCK (ALT (+= x 'int') <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRuleListLabelOfPositiveClosure() throws Exception {
+		Grammar g = new Grammar(
+				"grammar P;\n" +
+				"options {output=AST;}\n"+
+				"a : x+=b+;\n" +
+				"b : ID;\n");
+		String expecting =
+			"(rule a ARG RET scope (BLOCK (ALT (+ (BLOCK (ALT (+= x b) <end-of-alt>) <end-of-block>)) <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("a").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRootTokenInStarLoop() throws Exception {
+		Grammar g = new Grammar(
+				"grammar Expr;\n" +
+				"options { output=AST; backtrack=true; }\n" +
+				"a : ('*'^)* ;\n");  // bug: the synpred had nothing in it
+		String expecting =
+			"(rule synpred1_Expr ARG RET scope (BLOCK (ALT '*' <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("synpred1_Expr").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testActionInStarLoop() throws Exception {
+		Grammar g = new Grammar(
+				"grammar Expr;\n" +
+				"options { backtrack=true; }\n" +
+				"a : ({blort} 'x')* ;\n");  // bug: the synpred had nothing in it
+		String expecting =
+			"(rule synpred1_Expr ARG RET scope (BLOCK (ALT 'x' <end-of-alt>) <end-of-block>) <end-of-rule>)";
+		String found = g.getRule("synpred1_Expr").tree.toStringTree();
+		assertEquals(expecting, found);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestAttributes.java b/tool/src/test/java/org/antlr/test/TestAttributes.java
new file mode 100644
index 0000000..acef5f2
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestAttributes.java
@@ -0,0 +1,3120 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.grammar.v3.ActionTranslator;
+import org.antlr.runtime.CommonToken;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.STGroup;
+import org.antlr.tool.*;
+import org.junit.Test;
+
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.*;
+
+/** Check the $x, $x.y attributes.  For checking the actual
+ *  translation, assume the Java target.  This is still a great test
+ *  for the semantics of the $x.y stuff regardless of the target.
+ */
+public class TestAttributes extends BaseTest {
+
+	/** Public default constructor used by TestRig */
+	public TestAttributes() {
+	}
+
+	@Test public void testEscapedLessThanInAction() throws Exception {
+		Grammar g = new Grammar();
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		String action = "i<3; '<xmltag>'";
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),0);
+		String expecting = action;
+		String rawTranslation =
+			translator.translate();
+		STGroup templates =
+			new STGroup();
+		ST actionST = new ST(templates, "<action>");
+		actionST.add("action", rawTranslation);
+		String found = actionST.render();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testEscaped$InAction() throws Exception {
+		String action = "int \\$n; \"\\$in string\\$\"";
+		String expecting = "int $n; \"$in string$\"";
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"@members {"+action+"}\n"+
+				"a[User u, int i]\n" +
+				"        : {"+action+"}\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"a",
+				new CommonToken(ANTLRParser.ACTION,action),0);
+		String found = translator.translate();		assertEquals(expecting, found);
+	}
+
+	@Test public void testArguments() throws Exception {
+		String action = "$i; $i.x; $u; $u.x";
+		String expecting = "i; i.x; u; u.x";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[User u, int i]\n" +
+				"        : {"+action+"}\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testComplicatedArgParsing() throws Exception {
+		String action = "x, (*a).foo(21,33), 3.2+1, '\\n', "+
+			"\"a,oo\\nick\", {bl, \"fdkj\"eck}";
+		String expecting = "x, (*a).foo(21,33), 3.2+1, '\\n', \"a,oo\\nick\", {bl, \"fdkj\"eck}";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[User u, int i]\n" +
+				"        : A a["+action+"] B\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =	translator.translate();
+		assertEquals(expecting, rawTranslation);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testBracketArgParsing() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[String[\\] ick, int i]\n" +
+				"        : A \n"+
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		Rule r = g.getRule("a");
+		AttributeScope parameters = r.parameterScope;
+		List<Attribute> attrs = parameters.getAttributes();
+		assertEquals("attribute mismatch","String[] ick",attrs.get(0).decl.toString());
+		assertEquals("parameter name mismatch","ick",attrs.get(0).name);
+		assertEquals("declarator mismatch", "String[]", attrs.get(0).type);
+
+		assertEquals("attribute mismatch","int i",attrs.get(1).decl.toString());
+		assertEquals("parameter name mismatch","i",attrs.get(1).name);
+		assertEquals("declarator mismatch", "int", attrs.get(1).type);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testStringArgParsing() throws Exception {
+		String action = "34, '{', \"it's<\", '\"', \"\\\"\", 19";
+		String expecting = "34, '{', \"it's<\", '\"', \"\\\"\", 19";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[User u, int i]\n" +
+				"        : A a["+action+"] B\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =	translator.translate();
+		assertEquals(expecting, rawTranslation);
+
+		List<String> expectArgs = new ArrayList<String>() {
+			{add("34");}
+			{add("'{'");}
+			{add("\"it's<\"");}
+			{add("'\"'");}
+			{add("\"\\\"\"");} // that's "\""
+			{add("19");}
+		};
+		List<String> actualArgs = CodeGenerator.getListOfArgumentsFromAction(action, ',');
+		assertEquals("args mismatch", expectArgs, actualArgs);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testComplicatedSingleArgParsing() throws Exception {
+		String action = "(*a).foo(21,33,\",\")";
+		String expecting = "(*a).foo(21,33,\",\")";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[User u, int i]\n" +
+				"        : A a["+action+"] B\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =	translator.translate();
+		assertEquals(expecting, rawTranslation);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testArgWithLT() throws Exception {
+		String action = "34<50";
+		String expecting = "34<50";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[boolean b]\n" +
+				"        : A a["+action+"] B\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		assertEquals(expecting, rawTranslation);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testGenericsAsArgumentDefinition() throws Exception {
+		String action = "$foo.get(\"ick\");";
+		String expecting = "foo.get(\"ick\");";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String grammar =
+			"parser grammar T;\n"+
+				"a[HashMap<String,String> foo]\n" +
+				"        : {"+action+"}\n" +
+				"        ;";
+		Grammar g = new Grammar(grammar);
+		Rule ra = g.getRule("a");
+		List<Attribute> attrs = ra.parameterScope.getAttributes();
+		assertEquals("attribute mismatch","HashMap<String,String> foo",attrs.get(0).decl.toString());
+		assertEquals("parameter name mismatch","foo",attrs.get(0).name);
+		assertEquals("declarator mismatch", "HashMap<String,String>", attrs.get(0).type);
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testGenericsAsArgumentDefinition2() throws Exception {
+		String action = "$foo.get(\"ick\"); x=3;";
+		String expecting = "foo.get(\"ick\"); x=3;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String grammar =
+			"parser grammar T;\n"+
+				"a[HashMap<String,String> foo, int x, List<String> duh]\n" +
+				"        : {"+action+"}\n" +
+				"        ;";
+		Grammar g = new Grammar(grammar);
+		Rule ra = g.getRule("a");
+		List<Attribute> attrs = ra.parameterScope.getAttributes();
+
+		assertEquals("attribute mismatch","HashMap<String,String> foo",attrs.get(0).decl.toString().trim());
+		assertEquals("parameter name mismatch","foo",attrs.get(0).name);
+		assertEquals("declarator mismatch", "HashMap<String,String>", attrs.get(0).type);
+
+		assertEquals("attribute mismatch","int x",attrs.get(1).decl.toString().trim());
+		assertEquals("parameter name mismatch","x",attrs.get(1).name);
+		assertEquals("declarator mismatch", "int", attrs.get(1).type);
+
+		assertEquals("attribute mismatch","List<String> duh",attrs.get(2).decl.toString().trim());
+		assertEquals("parameter name mismatch","duh",attrs.get(2).name);
+		assertEquals("declarator mismatch", "List<String>", attrs.get(2).type);
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testGenericsAsReturnValue() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String grammar =
+			"parser grammar T;\n"+
+				"a returns [HashMap<String,String> foo] : ;\n";
+		Grammar g = new Grammar(grammar);
+		Rule ra = g.getRule("a");
+		List<Attribute> attrs = ra.returnScope.getAttributes();
+		assertEquals("attribute mismatch","HashMap<String,String> foo",attrs.get(0).decl.toString());
+		assertEquals("parameter name mismatch","foo",attrs.get(0).name);
+		assertEquals("declarator mismatch", "HashMap<String,String>", attrs.get(0).type);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testComplicatedArgParsingWithTranslation() throws Exception {
+		String action = "x, $A.text+\"3242\", (*$A).foo(21,33), 3.2+1, '\\n', "+
+			"\"a,oo\\nick\", {bl, \"fdkj\"eck}";
+		String expecting = "x, (A1!=null?A1.getText():null)+\"3242\", (*A1).foo(21,33), 3.2+1, '\\n', \"a,oo\\nick\", {bl, \"fdkj\"eck}";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// now check in actual grammar.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[User u, int i]\n" +
+				"        : A a["+action+"] B\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	/** $x.start refs are checked during translation not before so ANTLR misses
+	 the fact that rule r has refs to predefined attributes if the ref is after
+	 the def of the method or self-referential.  Actually would be ok if I didn't
+	 convert actions to strings; keep as templates.
+	 June 9, 2006: made action translation leave templates not strings
+	 */
+	@Test public void testRefToReturnValueBeforeRefToPredefinedAttr() throws Exception {
+		String action = "$x.foo";
+		String expecting = "(x!=null?((t.b_return)x).foo:0)";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a : x=b {"+action+"} ;\n" +
+				"b returns [int foo] : B {$b.start} ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRuleLabelBeforeRefToPredefinedAttr() throws Exception {
+		// As of Mar 2007, I'm removing unused labels.  Unfortunately,
+		// the action is not seen until code gen.  Can't see $x.text
+		// before stripping unused labels.  We really need to translate
+		// actions first so code gen logic can use info.
+		String action = "$x.text";
+		String expecting = "(x!=null?input.toString(x.start,x.stop):null)";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a : x=b {###"+action+"!!!} ;\n" +
+				"b : B ;\n");
+		Tool antlr = newTool();
+
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testInvalidArguments() throws Exception {
+		String action = "$x";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[User u, int i]\n" +
+				"        : {"+action+"}\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator = new ActionTranslator(generator,
+			"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE;
+		Object expectedArg = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testReturnValue() throws Exception {
+		String action = "$x.i";
+		String expecting = "x";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a returns [int i]\n" +
+				"        : 'a'\n" +
+				"        ;\n" +
+				"b : x=a {"+action+"} ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"b",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found =	translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testActionNotMovedToSynPred() throws Exception {
+		String action = "$b = true;";
+		String expecting = "retval.b = true;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"options {output=AST;}\n" + // push b into retval struct
+			"a returns [boolean b]\n" +
+			"options {backtrack=true;}\n" +
+			"   : 'a' {"+action+"}\n" +
+			"   | 'a'\n" +
+			"   ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"a",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found =	translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testReturnValueWithNumber() throws Exception {
+		String action = "$x.i1";
+		String expecting = "x";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a returns [int i1]\n" +
+				"        : 'a'\n" +
+				"        ;\n" +
+				"b : x=a {"+action+"} ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"b",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testReturnValues() throws Exception {
+		String action = "$i; $i.x; $u; $u.x";
+		String expecting = "retval.i; retval.i.x; retval.u; retval.u.x";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a returns [User u, int i]\n" +
+				"        : {"+action+"}\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	/* regression test for ANTLR-46 */
+	@Test public void testReturnWithMultipleRuleRefs() throws Exception {
+		String action1 = "$obj = $rule2.obj;";
+		String action2 = "$obj = $rule3.obj;";
+		String expecting1 = "obj = rule21;";
+		String expecting2 = "obj = rule32;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+				"rule1 returns [ Object obj ]\n" +
+				":	rule2 { "+action1+" }\n" +
+				"|	rule3 { "+action2+" }\n" +
+				";\n"+
+				"rule2 returns [ Object obj ]\n"+
+				":	foo='foo' { $obj = $foo.text; }\n"+
+				";\n"+
+				"rule3 returns [ Object obj ]\n"+
+				":	bar='bar' { $obj = $bar.text; }\n"+
+				";");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		int i = 0;
+		String action = action1;
+		String expecting = expecting1;
+		do {
+			ActionTranslator translator = new ActionTranslator(generator,"rule1",
+				new CommonToken(ANTLRParser.ACTION,action),i+1);
+			String found = translator.translate();
+			assertEquals(expecting, found);
+			action = action2;
+			expecting = expecting2;
+		} while (i++ < 1);
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testInvalidReturnValues() throws Exception {
+		String action = "$x";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a returns [User u, int i]\n" +
+				"        : {"+action+"}\n" +
+				"        ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE;
+		Object expectedArg = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testTokenLabels() throws Exception {
+		String action = "$id; $f; $id.text; $id.getText(); $id.dork " +
+			"$id.type; $id.line; $id.pos; " +
+			"$id.channel; $id.index;";
+		String expecting = "id; f; (id!=null?id.getText():null); id.getText(); id.dork (id!=null?id.getType():0); (id!=null?id.getLine():0); (id!=null?id.getCharPositionInLine():0); (id!=null?id.getChannel():0); (id!=null?id.getTokenIndex():0);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a : id=ID f=FLOAT {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRuleLabels() throws Exception {
+		String action = "$r.x; $r.start;\n $r.stop;\n $r.tree; $a.x; $a.stop;";
+		String expecting = "(r!=null?((t.a_return)r).x:0); (r!=null?(r.start):null);" + newline +
+			"\t\t\t (r!=null?(r.stop):null);" + newline +
+			"\t\t\t (r!=null?((Object)r.getTree()):null); (r!=null?((t.a_return)r).x:0); (r!=null?(r.stop):null);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a returns [int x]\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a {###"+action+"!!!}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testAmbiguRuleRef() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a : A a {$a.text} | B ;");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		// error(132): <string>:2:9: reference $a is ambiguous; rule a is enclosing rule and referenced in the production
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+	}
+
+	@Test public void testRuleLabelsWithSpecialToken() throws Exception {
+		String action = "$r.x; $r.start; $r.stop; $r.tree; $a.x; $a.stop;";
+		String expecting = "(r!=null?((t.a_return)r).x:0); (r!=null?((MYTOKEN)r.start):null); (r!=null?((MYTOKEN)r.stop):null); (r!=null?((Object)r.getTree()):null); (r!=null?((t.a_return)r).x:0); (r!=null?((MYTOKEN)r.stop):null);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"options {TokenLabelType=MYTOKEN;}\n"+
+				"a returns [int x]\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a {###"+action+"!!!}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testForwardRefRuleLabels() throws Exception {
+		String action = "$r.x; $r.start; $r.stop; $r.tree; $a.x; $a.tree;";
+		String expecting = "(r!=null?((t.a_return)r).x:0); (r!=null?(r.start):null); (r!=null?(r.stop):null); (r!=null?((Object)r.getTree()):null); (r!=null?((t.a_return)r).x:0); (r!=null?((Object)r.getTree()):null);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"b : r=a {###"+action+"!!!}\n" +
+				"  ;\n" +
+				"a returns [int x]\n" +
+				"  : ;\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testInvalidRuleLabelAccessesParameter() throws Exception {
+		String action = "$r.z";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[int z] returns [int x]\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a[3] {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator = new ActionTranslator(generator, "b",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_INVALID_RULE_PARAMETER_REF;
+		Object expectedArg = "a";
+		Object expectedArg2 = "z";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testInvalidRuleLabelAccessesScopeAttribute() throws Exception {
+		String action = "$r.n";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a\n" +
+				"scope { int n; }\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a[3] {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator = new ActionTranslator(generator, "b",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_INVALID_RULE_SCOPE_ATTRIBUTE_REF;
+		Object expectedArg = "a";
+		Object expectedArg2 = "n";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testInvalidRuleAttribute() throws Exception {
+		String action = "$r.blort";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[int z] returns [int x]\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a[3] {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator = new ActionTranslator(generator, "b",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_RULE_ATTRIBUTE;
+		Object expectedArg = "a";
+		Object expectedArg2 = "blort";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testMissingRuleAttribute() throws Exception {
+		String action = "$r";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a[int z] returns [int x]\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a[3] {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator = new ActionTranslator(generator, "b",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
+		Object expectedArg = "r";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testMissingUnlabeledRuleAttribute() throws Exception {
+		String action = "$a";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a returns [int x]:\n" +
+				"  ;\n"+
+				"b : a {"+action+"}\n" +
+				"  ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator = new ActionTranslator(generator, "b",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
+		Object expectedArg = "a";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testNonDynamicAttributeOutsideRule() throws Exception {
+		String action = "public void foo() { $x; }";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"@members {'+action+'}\n" +
+				"a : ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator = new ActionTranslator(generator,
+			null,
+			new CommonToken(ANTLRParser.ACTION,action),0);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE;
+		Object expectedArg = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testNonDynamicAttributeOutsideRule2() throws Exception {
+		String action = "public void foo() { $x.y; }";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"@members {'+action+'}\n" +
+				"a : ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator = new ActionTranslator(generator,
+			null,
+			new CommonToken(ANTLRParser.ACTION,action),0);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE;
+		Object expectedArg = "x";
+		Object expectedArg2 = "y";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	// D Y N A M I C A L L Y  S C O P E D  A T T R I B U T E S
+
+	@Test public void testBasicGlobalScope() throws Exception {
+		String action = "$Symbols::names.add($id.text);";
+		String expecting = "Symbols_stack.peek().names.add((id!=null?id.getText():null));";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"  List names;\n" +
+				"}\n" +
+				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+				"  ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testUnknownGlobalScope() throws Exception {
+		String action = "$Symbols::names.add($id.text);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+				"  ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+
+		assertEquals("unexpected errors: "+equeue, 2, equeue.errors.size());
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE;
+		Object expectedArg = "Symbols";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testIndexedGlobalScope() throws Exception {
+		String action = "$Symbols[-1]::names.add($id.text);";
+		String expecting =
+			"Symbols_stack.elementAt(Symbols_stack.size()-1-1).names.add((id!=null?id.getText():null));";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"  List names;\n" +
+				"}\n" +
+				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+				"  ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void test0IndexedGlobalScope() throws Exception {
+		String action = "$Symbols[0]::names.add($id.text);";
+		String expecting =
+			"Symbols_stack.elementAt(0).names.add((id!=null?id.getText():null));";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"  List names;\n" +
+				"}\n" +
+				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+				"  ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testAbsoluteIndexedGlobalScope() throws Exception {
+		String action = "$Symbols[3]::names.add($id.text);";
+		String expecting =
+			"Symbols_stack.elementAt(3).names.add((id!=null?id.getText():null));";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"  List names;\n" +
+				"}\n" +
+				"a scope Symbols; : (id=ID ';' {"+action+"} )+\n" +
+				"  ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testScopeAndAttributeWithUnderscore() throws Exception {
+		String action = "$foo_bar::a_b;";
+		String expecting = "foo_bar_stack.peek().a_b;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope foo_bar {\n" +
+				"  int a_b;\n" +
+				"}\n" +
+				"a scope foo_bar; : (ID {"+action+"} )+\n" +
+				"  ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testSharedGlobalScope() throws Exception {
+		String action = "$Symbols::x;";
+		String expecting = "Symbols_stack.peek().x;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  String x;\n" +
+				"}\n" +
+				"a\n"+
+				"scope { int y; }\n"+
+				"scope Symbols;\n" +
+				" : b {"+action+"}\n" +
+				" ;\n" +
+				"b : ID {$Symbols::x=$ID.text} ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testGlobalScopeOutsideRule() throws Exception {
+		String action = "public void foo() {$Symbols::names.add('foo');}";
+		String expecting = "public void foo() {Symbols_stack.peek().names.add('foo');}";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"  List names;\n" +
+				"}\n" +
+				"@members {'+action+'}\n" +
+				"a : \n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRuleScopeOutsideRule() throws Exception {
+		String action = "public void foo() {$a::name;}";
+		String expecting = "public void foo() {a_stack.peek().name;}";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"@members {"+action+"}\n" +
+				"a\n" +
+				"scope { String name; }\n" +
+				"  : {foo();}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+			null,
+			new CommonToken(ANTLRParser.ACTION,action),0);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testBasicRuleScope() throws Exception {
+		String action = "$a::n;";
+		String expecting = "a_stack.peek().n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testUnqualifiedRuleScopeAccessInsideRule() throws Exception {
+		String action = "$n;";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_ATTRIBUTE;
+		Object expectedArg = "n";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg,
+				expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testIsolatedDynamicRuleScopeRef() throws Exception {
+		String action = "$a;"; // refers to stack not top of stack
+		String expecting = "a_stack;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : b ;\n" +
+				"b : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator, "b",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testDynamicRuleScopeRefInSubrule() throws Exception {
+		String action = "$a::n;";
+		String expecting = "a_stack.peek().n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  float n;\n" +
+				"} : b ;\n" +
+				"b : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator, "b",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testIsolatedGlobalScopeRef() throws Exception {
+		String action = "$Symbols;";
+		String expecting = "Symbols_stack;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  String x;\n" +
+				"}\n" +
+				"a\n"+
+				"scope { int y; }\n"+
+				"scope Symbols;\n" +
+				" : b {"+action+"}\n" +
+				" ;\n" +
+				"b : ID {$Symbols::x=$ID.text} ;\n" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRuleScopeFromAnotherRule() throws Exception {
+		String action = "$a::n;"; // must be qualified
+		String expecting = "a_stack.peek().n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  boolean n;\n" +
+				"} : b\n" +
+				"  ;\n" +
+				"b : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator, "b",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testFullyQualifiedRefToCurrentRuleParameter() throws Exception {
+		String action = "$a.i;";
+		String expecting = "i;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a[int i]: {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testFullyQualifiedRefToCurrentRuleRetVal() throws Exception {
+		String action = "$a.i;";
+		String expecting = "retval.i;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a returns [int i, int j]: {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testSetFullyQualifiedRefToCurrentRuleRetVal() throws Exception {
+		String action = "$a.i = 1;";
+		String expecting = "retval.i = 1;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a returns [int i, int j]: {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testIsolatedRefToCurrentRule() throws Exception {
+		String action = "$a;";
+		String expecting = "";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : 'a' {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
+		Object expectedArg = "a";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg,
+				expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testIsolatedRefToRule() throws Exception {
+		String action = "$x;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : x=b {"+action+"}\n" +
+				"  ;\n" +
+				"b : 'b' ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		int expectedMsgID = ErrorManager.MSG_ISOLATED_RULE_SCOPE;
+		Object expectedArg = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	/*  I think these have to be errors $a.x makes no sense.
+	@Test public void testFullyQualifiedRefToLabelInCurrentRule() throws Exception {
+			String action = "$a.x;";
+			String expecting = "x;";
+
+			ErrorQueue equeue = new ErrorQueue();
+			ErrorManager.setErrorListener(equeue);
+			Grammar g = new Grammar(
+				"grammar t;\n"+
+					"a : x='a' {"+action+"}\n" +
+					"  ;\n");
+			Tool antlr = newTool();
+			CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+			g.setCodeGenerator(generator);
+			generator.genRecognizer(); // forces load of templates
+			ActionTranslator translator = new ActionTranslator(generator,"a",
+															   new CommonToken(ANTLRParser.ACTION,action),1);
+			String rawTranslation =
+				translator.translate();
+			STGroup templates =
+				new STGroup();
+			ST actionST = new ST(templates, rawTranslation);
+			String found = actionST.render();
+			assertEquals(expecting, found);
+
+			assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		}
+
+	@Test public void testFullyQualifiedRefToListLabelInCurrentRule() throws Exception {
+		String action = "$a.x;"; // must be qualified
+		String expecting = "list_x;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : x+='a' {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+														   new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+*/
+	@Test public void testFullyQualifiedRefToTemplateAttributeInCurrentRule() throws Exception {
+		String action = "$a.st;"; // can be qualified
+		String expecting = "retval.st;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+				"options {output=template;}\n"+
+				"a : (A->{$A.text}) {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRuleRefWhenRuleHasScope() throws Exception {
+		String action = "$b.start;";
+		String expecting = "(b1!=null?(b1.start):null);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+				"a : b {###"+action+"!!!} ;\n" +
+				"b\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : 'b' \n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testDynamicScopeRefOkEvenThoughRuleRefExists() throws Exception {
+		String action = "$b::n;";
+		String expecting = "b_stack.peek().n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+				"s : b ;\n"+
+				"b\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : '(' b ')' {"+action+"}\n" + // refers to current invocation's n
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator, "b",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRefToTemplateAttributeForCurrentRule() throws Exception {
+		String action = "$st=null;";
+		String expecting = "retval.st =null;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+				"options {output=template;}\n"+
+				"a : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRefToTextAttributeForCurrentRule() throws Exception {
+		String action = "$text";
+		String expecting = "input.toString(retval.start,input.LT(-1))";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+				"options {output=template;}\n"+
+				"a : {###"+action+"!!!}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRefToStartAttributeForCurrentRule() throws Exception {
+		String action = "$start;";
+		String expecting = "(retval.start);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+				"a : {###"+action+"!!!}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testTokenLabelFromMultipleAlts() throws Exception {
+		String action = "$ID.text;"; // must be qualified
+		String action2 = "$INT.text;"; // must be qualified
+		String expecting = "(ID1!=null?ID1.getText():null);";
+		String expecting2 = "(INT2!=null?INT2.getText():null);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID {"+action+"}\n" +
+				"  | INT {"+action2+"}\n" +
+				"  ;\n" +
+				"ID : 'a';\n" +
+				"INT : '0';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		translator = new ActionTranslator(generator,
+			"a",
+			new CommonToken(ANTLRParser.ACTION,action2),2);
+		found = translator.translate();
+		assertEquals(expecting2, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRuleLabelFromMultipleAlts() throws Exception {
+		String action = "$b.text;"; // must be qualified
+		String action2 = "$c.text;"; // must be qualified
+		String expecting = "(b1!=null?input.toString(b1.start,b1.stop):null);";
+		String expecting2 = "(c2!=null?input.toString(c2.start,c2.stop):null);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : b {###"+action+"!!!}\n" +
+				"  | c {^^^"+action2+"&&&}\n" +
+				"  ;\n" +
+				"b : 'a';\n" +
+				"c : '0';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+		found = code.substring(code.indexOf("^^^")+3,code.indexOf("&&&"));
+		assertEquals(expecting2, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testUnknownDynamicAttribute() throws Exception {
+		String action = "$a::x";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : {"+action+"}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"a",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE;
+		Object expectedArg = "a";
+		Object expectedArg2 = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testUnknownGlobalDynamicAttribute() throws Exception {
+		String action = "$Symbols::x";
+		String expecting = action;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"scope Symbols {\n" +
+				"  int n;\n" +
+				"}\n" +
+				"a : {'+action+'}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"a",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE;
+		Object expectedArg = "Symbols";
+		Object expectedArg2 = "x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testUnqualifiedRuleScopeAttribute() throws Exception {
+		String action = "$n;"; // must be qualified
+		String expecting = "$n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : b\n" +
+				"  ;\n" +
+				"b : {'+action+'}\n" +
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"b",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE;
+		Object expectedArg = "n";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testRuleAndTokenLabelTypeMismatch() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : id='foo' id=b\n" +
+				"  ;\n" +
+				"b : ;\n");
+		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
+		Object expectedArg = "id";
+		Object expectedArg2 = "rule!=token";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testListAndTokenLabelTypeMismatch() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ids+='a' ids='b'\n" +
+				"  ;\n" +
+				"b : ;\n");
+		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
+		Object expectedArg = "ids";
+		Object expectedArg2 = "token!=token-list";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testListAndRuleLabelTypeMismatch() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+				"options {output=AST;}\n"+
+				"a : bs+=b bs=b\n" +
+				"  ;\n" +
+				"b : 'b';\n");
+		int expectedMsgID = ErrorManager.MSG_LABEL_TYPE_CONFLICT;
+		Object expectedArg = "bs";
+		Object expectedArg2 = "rule!=rule-list";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testArgReturnValueMismatch() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a[int i] returns [int x, int i]\n" +
+				"  : \n" +
+				"  ;\n" +
+				"b : ;\n");
+		int expectedMsgID = ErrorManager.MSG_ARG_RETVAL_CONFLICT;
+		Object expectedArg = "i";
+		Object expectedArg2 = "a";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testSimplePlusEqualLabel() throws Exception {
+		String action = "$ids.size();"; // must be qualified
+		String expecting = "list_ids.size();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"a : ids+=ID ( COMMA ids+=ID {"+action+"})* ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"a",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testPlusEqualStringLabel() throws Exception {
+		String action = "$ids.size();"; // must be qualified
+		String expecting = "list_ids.size();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ids+='if' ( ',' ids+=ID {"+action+"})* ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"a",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testPlusEqualSetLabel() throws Exception {
+		String action = "$ids.size();"; // must be qualified
+		String expecting = "list_ids.size();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ids+=('a'|'b') ( ',' ids+=ID {"+action+"})* ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"a",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testPlusEqualWildcardLabel() throws Exception {
+		String action = "$ids.size();"; // must be qualified
+		String expecting = "list_ids.size();";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ids+=. ( ',' ids+=ID {"+action+"})* ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"a",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testImplicitTokenLabel() throws Exception {
+		String action = "$ID; $ID.text; $ID.getText()";
+		String expecting = "ID1; (ID1!=null?ID1.getText():null); ID1.getText()";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID {"+action+"} ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"a",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testImplicitRuleLabel() throws Exception {
+		String action = "$r.start;";
+		String expecting = "(r1!=null?(r1.start):null);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : r {###"+action+"!!!} ;" +
+				"r : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testReuseExistingLabelWithImplicitRuleLabel() throws Exception {
+		String action = "$r.start;";
+		String expecting = "(x!=null?(x.start):null);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : x=r {###"+action+"!!!} ;" +
+				"r : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testReuseExistingListLabelWithImplicitRuleLabel() throws Exception {
+		String action = "$r.start;";
+		String expecting = "(x!=null?(x.start):null);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"options {output=AST;}\n" +
+				"a : x+=r {###"+action+"!!!} ;" +
+				"r : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testReuseExistingLabelWithImplicitTokenLabel() throws Exception {
+		String action = "$ID.text;";
+		String expecting = "(x!=null?x.getText():null);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : x=ID {"+action+"} ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testReuseExistingListLabelWithImplicitTokenLabel() throws Exception {
+		String action = "$ID.text;";
+		String expecting = "(x!=null?x.getText():null);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : x+=ID {"+action+"} ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRuleLabelWithoutOutputOption() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar T;\n"+
+				"s : x+=a ;" +
+				"a : 'a';\n"+
+				"b : 'b';\n"+
+				"WS : ' '|'\n';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_LIST_LABEL_INVALID_UNLESS_RETVAL_STRUCT;
+		Object expectedArg = "x";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testRuleLabelOnTwoDifferentRulesAST() throws Exception {
+		String grammar =
+			"grammar T;\n"+
+				"options {output=AST;}\n"+
+				"s : x+=a x+=b {System.out.println($x);} ;" +
+				"a : 'a';\n"+
+				"b : 'b';\n"+
+				"WS : (' '|'\\n') {skip();};\n";
+		String expecting = "[a, b]\na b\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+			"s", "a b", false);
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRuleLabelOnTwoDifferentRulesTemplate() throws Exception {
+		String grammar =
+			"grammar T;\n"+
+				"options {output=template;}\n"+
+				"s : x+=a x+=b {System.out.println($x);} ;" +
+				"a : 'a' -> {%{\"hi\"}} ;\n"+
+				"b : 'b' -> {%{\"mom\"}} ;\n"+
+				"WS : (' '|'\\n') {skip();};\n";
+		String expecting = "[hi, mom]\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+			"s", "a b", false);
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testMissingArgs() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : r ;" +
+				"r[int i] : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_MISSING_RULE_ARGS;
+		Object expectedArg = "r";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testArgsWhenNoneDefined() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : r[32,34] ;" +
+				"r : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_RULE_HAS_NO_ARGS;
+		Object expectedArg = "r";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testReturnInitValue() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : r ;\n" +
+				"r returns [int x=0] : 'a' {$x = 4;} ;\n");
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+		Rule r = g.getRule("r");
+		AttributeScope retScope = r.returnScope;
+		List<Attribute> parameters = retScope.getAttributes();
+		assertNotNull("missing return action", parameters);
+		assertEquals(1, parameters.size());
+		String found = parameters.get(0).toString();
+		String expecting = "int x=0";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testMultipleReturnInitValue() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : r ;\n" +
+				"r returns [int x=0, int y, String s=new String(\"foo\")] : 'a' {$x = 4;} ;\n");
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+		Rule r = g.getRule("r");
+		AttributeScope retScope = r.returnScope;
+		List<Attribute> parameters = retScope.getAttributes();
+		assertNotNull("missing return action", parameters);
+		assertEquals(3, parameters.size());
+		assertEquals("int x=0", parameters.get(0).toString());
+		assertEquals("int y", parameters.get(1).toString());
+		assertEquals("String s=new String(\"foo\")", parameters.get(2).toString());
+	}
+
+	@Test public void testCStyleReturnInitValue() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : r ;\n" +
+				"r returns [int (*x)()=NULL] : 'a' ;\n");
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+		Rule r = g.getRule("r");
+		AttributeScope retScope = r.returnScope;
+		List<Attribute> parameters = retScope.getAttributes();
+		assertNotNull("missing return action", parameters);
+		assertEquals(1, parameters.size());
+		String found = parameters.get(0).toString();
+		String expecting = "int (*)() x=NULL";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testArgsWithInitValues() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : r[32,34] ;" +
+				"r[int x, int y=3] : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_ARG_INIT_VALUES_ILLEGAL;
+		Object expectedArg = "y";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testArgsOnToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID[32,34] ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_ARGS_ON_TOKEN_REF;
+		Object expectedArg = "ID";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testArgsOnTokenInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'z' ID[32,34] ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_RULE_HAS_NO_ARGS;
+		Object expectedArg = "ID";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testLabelOnRuleRefInLexer() throws Exception {
+		String action = "$i.text";
+		String expecting = "(i!=null?i.getText():null)";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'z' i=ID {"+action+"};" +
+				"fragment ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"R",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRefToRuleRefInLexer() throws Exception {
+		String action = "$ID.text";
+		String expecting = "(ID1!=null?ID1.getText():null)";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'z' ID {"+action+"};" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"R",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testRefToRuleRefInLexerNoAttribute() throws Exception {
+		String action = "$ID";
+		String expecting = "ID1";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'z' ID {"+action+"};" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"R",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testCharLabelInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : x='z' ;\n");
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testCharListLabelInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : x+='z' ;\n");
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testWildcardCharLabelInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : x=. ;\n");
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testWildcardCharListLabelInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : x+=. ;\n");
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testMissingArgsInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"A : R ;" +
+				"R[int i] : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_MISSING_RULE_ARGS;
+		Object expectedArg = "R";
+		Object expectedArg2 = null;
+		// getting a second error @1:12, probably from nextToken
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testLexerRulePropertyRefs() throws Exception {
+		String action = "$text $type $line $pos $channel $index $start $stop";
+		String expecting = "getText() _type state.tokenStartLine state.tokenStartCharPositionInLine _channel -1 state.tokenStartCharIndex (getCharIndex()-1)";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'r' {"+action+"};\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"R",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testLexerLabelRefs() throws Exception {
+		String action = "$a $b.text $c $d.text";
+		String expecting = "a (b!=null?b.getText():null) c (d!=null?d.getText():null)";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : a='c' b='hi' c=. d=DUH {"+action+"};\n" +
+				"DUH : 'd' ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"R",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testSettingLexerRulePropertyRefs() throws Exception {
+		String action = "$text $type=1 $line=1 $pos=1 $channel=1 $index";
+		String expecting = "getText() _type=1 state.tokenStartLine=1 state.tokenStartCharPositionInLine=1 _channel=1 -1";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+				"R : 'r' {"+action+"};\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"R",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testArgsOnTokenInLexerRuleOfCombined() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : R;\n" +
+				"R : 'z' ID[32] ;\n" +
+				"ID : 'a';\n");
+
+		String lexerGrammarStr = g.getLexerGrammar();
+		StringReader sr = new StringReader(lexerGrammarStr);
+		Grammar lexerGrammar = new Grammar();
+		lexerGrammar.setFileName("<internally-generated-lexer>");
+		lexerGrammar.importTokenVocabulary(g);
+		lexerGrammar.parseAndBuildAST(sr);
+		lexerGrammar.defineGrammarSymbols();
+		lexerGrammar.checkNameSpaceAndActions();
+		sr.close();
+
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, lexerGrammar, "Java");
+		lexerGrammar.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_RULE_HAS_NO_ARGS;
+		Object expectedArg = "ID";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, lexerGrammar, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testMissingArgsOnTokenInLexerRuleOfCombined() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : R;\n" +
+				"R : 'z' ID ;\n" +
+				"ID[int i] : 'a';\n");
+
+		String lexerGrammarStr = g.getLexerGrammar();
+		StringReader sr = new StringReader(lexerGrammarStr);
+		Grammar lexerGrammar = new Grammar();
+		lexerGrammar.setFileName("<internally-generated-lexer>");
+		lexerGrammar.importTokenVocabulary(g);
+		lexerGrammar.parseAndBuildAST(sr);
+		lexerGrammar.defineGrammarSymbols();
+		lexerGrammar.checkNameSpaceAndActions();
+		sr.close();
+
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, lexerGrammar, "Java");
+		lexerGrammar.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_MISSING_RULE_ARGS;
+		Object expectedArg = "ID";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, lexerGrammar, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	// T R E E S
+
+	@Test public void testTokenLabelTreeProperty() throws Exception {
+		String action = "$id.tree;";
+		String expecting = "id_tree;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : id=ID {"+action+"} ;\n" +
+				"ID : 'a';\n");
+
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+				"a",
+				new CommonToken(ANTLRParser.ACTION,action),1);
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testTokenRefTreeProperty() throws Exception {
+		String action = "$ID.tree;";
+		String expecting = "ID1_tree;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID {"+action+"} ;" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		ActionTranslator translator = new ActionTranslator(generator,"a",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testAmbiguousTokenRef() throws Exception {
+		String action = "$ID;";
+		String expecting = "";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID ID {"+action+"};" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_NONUNIQUE_REF;
+		Object expectedArg = "ID";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testAmbiguousTokenRefWithProp() throws Exception {
+		String action = "$ID.text;";
+		String expecting = "";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+				"a : ID ID {"+action+"};" +
+				"ID : 'a';\n");
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_NONUNIQUE_REF;
+		Object expectedArg = "ID";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testRuleRefWithDynamicScope() throws Exception {
+		String action = "$field::x = $field.st;";
+		String expecting = "field_stack.peek().x = retval.st;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+				"field\n" +
+				"scope { ST x; }\n" +
+				"    :   'y' {"+action+"}\n" +
+				"    ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+			"field",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testAssignToOwnRulenameAttr() throws Exception {
+		String action = "$rule.tree = null;";
+		String expecting = "retval.tree = null;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+				"rule\n" +
+				"    : 'y' {" + action +"}\n" +
+				"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+			"rule",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testAssignToOwnParamAttr() throws Exception {
+		String action = "$rule.i = 42; $i = 23;";
+		String expecting = "i = 42; i = 23;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+				"rule[int i]\n" +
+				"    : 'y' {" + action +"}\n" +
+				"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+			"rule",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testIllegalAssignToOwnRulenameAttr() throws Exception {
+		String action = "$rule.stop = 0;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+				"rule\n" +
+				"    : 'y' {" + action +"}\n" +
+				"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+			"rule",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
+		Object expectedArg = "rule";
+		Object expectedArg2 = "stop";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testIllegalAssignToLocalAttr() throws Exception {
+		String action = "$tree = null; $st = null; $start = 0; $stop = 0; $text = 0;";
+		String expecting = "retval.tree = null; retval.st = null;   ";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+				"rule\n" +
+				"    : 'y' {" + action +"}\n" +
+				"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+			"rule",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
+		ArrayList<Message> expectedErrors = new ArrayList<Message>(3);
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, "start", "");
+		expectedErrors.add(expectedMessage);
+		GrammarSemanticsMessage expectedMessage2 =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, "stop", "");
+		expectedErrors.add(expectedMessage2);
+		GrammarSemanticsMessage expectedMessage3 =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, "text", "");
+		expectedErrors.add(expectedMessage3);
+		checkErrors(equeue, expectedErrors);
+
+		STGroup templates =
+			new STGroup();
+		ST actionST = new ST(templates, rawTranslation);
+		String found = actionST.render();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testIllegalAssignRuleRefAttr() throws Exception {
+		String action = "$other.tree = null;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+				"options { output = AST;}" +
+				"otherrule\n" +
+				"    : 'y' ;" +
+				"rule\n" +
+				"    : other=otherrule {" + action +"}\n" +
+				"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+			"rule",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
+		Object expectedArg = "other";
+		Object expectedArg2 = "tree";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testIllegalAssignTokenRefAttr() throws Exception {
+		String action = "$ID.text = \"test\";";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+				"ID\n" +
+				"    : 'y' ;" +
+				"rule\n" +
+				"    : ID {" + action +"}\n" +
+				"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+			"rule",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
+		Object expectedArg = "ID";
+		Object expectedArg2 = "text";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testAssignToTreeNodeAttribute() throws Exception {
+		String action = "$tree.scope = localScope;";
+		String expecting = "retval.tree.scope = localScope;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+				"options { output=AST; }" +
+				"rule\n" +
+				"@init {\n" +
+				"   Scope localScope=null;\n" +
+				"}\n" +
+				"@after {\n" +
+				"   ###$tree.scope = localScope;!!!\n" +
+				"}\n" +
+				"   : 'a' -> ^('a')\n" +
+				";");
+		Tool antlr = newTool();
+
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+	}
+
+	@Test public void testDoNotTranslateAttributeCompare() throws Exception {
+		String action = "$a.line == $b.line";
+		String expecting = "(a!=null?a.getLine():0) == (b!=null?b.getLine():0)";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"lexer grammar a;\n" +
+				"RULE:\n" +
+				"     a=ID b=ID {" + action + "}" +
+				"    ;\n" +
+				"ID : 'id';"
+		);
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+		ActionTranslator translator = new ActionTranslator(generator,
+			"RULE",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testDoNotTranslateScopeAttributeCompare() throws Exception {
+		String action = "if ($rule::foo == \"foo\" || 1) { System.out.println(\"ouch\"); }";
+		String expecting = "if (rule_stack.peek().foo == \"foo\" || 1) { System.out.println(\"ouch\"); }";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+				"rule\n" +
+				"scope {\n" +
+				"   String foo;" +
+				"} :\n" +
+				"     twoIDs" +
+				"    ;\n" +
+				"twoIDs:\n" +
+				"    ID ID {" + action + "}\n" +
+				"    ;\n" +
+				"ID : 'id';"
+		);
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+		ActionTranslator translator = new ActionTranslator(generator,
+			"twoIDs",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		// check that we didn't use scopeSetAttributeRef int translation!
+		boolean foundScopeSetAttributeRef = false;
+		for (int i = 0; i < translator.chunks.size(); i++) {
+			Object chunk = translator.chunks.get(i);
+			if (chunk instanceof ST) {
+				if (((ST)chunk).getName().equals("/scopeSetAttributeRef")) {
+					foundScopeSetAttributeRef = true;
+				}
+			}
+		}
+		assertFalse("action translator used scopeSetAttributeRef template in comparison!", foundScopeSetAttributeRef);
+		STGroup templates =
+			new STGroup();
+		ST actionST = new ST(templates, rawTranslation);
+		String found = actionST.render();
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testTreeRuleStopAttributeIsInvalid() throws Exception {
+		String action = "$r.x; $r.start; $r.stop";
+		String expecting = "(r!=null?((t.a_return)r).x:0); (r!=null?((CommonTree)r.start):null); $r.stop";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar t;\n" +
+				"options {ASTLabelType=CommonTree;}\n"+
+				"a returns [int x]\n" +
+				"  :\n" +
+				"  ;\n"+
+				"b : r=a {###"+action+"!!!}\n" +
+				"  ;");
+		System.out.println(g.toString());
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		int expectedMsgID = ErrorManager.MSG_UNKNOWN_RULE_ATTRIBUTE;
+		Object expectedArg = "a";
+		Object expectedArg2 = "stop";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		System.out.println("equeue:"+equeue);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testRefToTextAttributeForCurrentTreeRule() throws Exception {
+		String action = "$text";
+		String expecting = "input.getTokenStream().toString(" +
+			"input.getTreeAdaptor().getTokenStartIndex(retval.start)," +
+			"input.getTreeAdaptor().getTokenStopIndex(retval.start))";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar t;\n" +
+				"options {ASTLabelType=CommonTree;}\n" +
+				"a : {###"+action+"!!!}\n" +
+				"  ;\n");
+
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // codegen phase sets some vars we need
+		ST codeST = generator.getRecognizerST();
+		String code = codeST.render();
+		String found = code.substring(code.indexOf("###")+3,code.indexOf("!!!"));
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testTypeOfGuardedAttributeRefIsCorrect() throws Exception {
+		String action = "int x = $b::n;";
+		String expecting = "int x = b_stack.peek().n;";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+				"s : b ;\n"+
+				"b\n" +
+				"scope {\n" +
+				"  int n;\n" +
+				"} : '(' b ')' {"+action+"}\n" + // refers to current invocation's n
+				"  ;\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator, "b",
+			new CommonToken(ANTLRParser.ACTION,action),1);
+		String found = translator.translate();
+		assertEquals(expecting, found);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	// S U P P O R T
+
+	protected void checkError(ErrorQueue equeue,
+							  GrammarSemanticsMessage expectedMessage)
+		throws Exception
+	{
+		/*
+		System.out.println(equeue.infos);
+		System.out.println(equeue.warnings);
+		System.out.println(equeue.errors);
+		*/
+		Message foundMsg = null;
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			Message m = equeue.errors.get(i);
+			if (m.msgID==expectedMessage.msgID ) {
+				foundMsg = m;
+			}
+		}
+		assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size() > 0);
+		assertNotNull("couldn't find expected error: "+expectedMessage.msgID+" in "+equeue, foundMsg);
+		assertTrue("error is not a GrammarSemanticsMessage",
+			foundMsg instanceof GrammarSemanticsMessage);
+		assertEquals(expectedMessage.arg, foundMsg.arg);
+		assertEquals(expectedMessage.arg2, foundMsg.arg2);
+	}
+
+	/** Allow checking for multiple errors in one test */
+	protected void checkErrors(ErrorQueue equeue,
+							   ArrayList<Message> expectedMessages)
+		throws Exception
+	{
+		ArrayList<Boolean> messageExpected = new ArrayList<Boolean>(equeue.errors.size());
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			Message m = equeue.errors.get(i);
+			boolean foundMsg = false;
+			for (int j = 0; j < expectedMessages.size(); j++) {
+				Message em = expectedMessages.get(j);
+				if (m.msgID==em.msgID && m.arg.equals(em.arg) && m.arg2.equals(em.arg2)) {
+					foundMsg = true;
+				}
+			}
+			if (foundMsg) {
+				messageExpected.add(i, Boolean.TRUE);
+			} else
+				messageExpected.add(i, Boolean.FALSE);
+		}
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			assertTrue("unexpected error:" + equeue.errors.get(i), messageExpected.get(i));
+		}
+	}
+}
diff --git a/tool/src/test/java/org/antlr/test/TestAutoAST.java b/tool/src/test/java/org/antlr/test/TestAutoAST.java
new file mode 100644
index 0000000..14aeed0
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestAutoAST.java
@@ -0,0 +1,822 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.junit.Ignore;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestAutoAST extends BaseTest {
+	protected boolean debug = false;
+
+	@Test public void testTokenList() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testTokenListInSingleAltBlock() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : (ID INT) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testSimpleRootAtOuterLevel() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID^ INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc 34", debug);
+		assertEquals("(abc 34)\n", found);
+	}
+
+	@Test public void testSimpleRootAtOuterLevelReverse() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT ID^ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34 abc", debug);
+		assertEquals("(abc 34)\n", found);
+	}
+
+	@Test public void testBang() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT! ID! INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34 dag 4532", debug);
+		assertEquals("abc 4532\n", found);
+	}
+
+	@Test public void testOptionalThenRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ( ID INT )? ID^ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a 1 b", debug);
+		assertEquals("(b a 1)\n", found);
+	}
+
+	@Test public void testLabeledStringRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : v='void'^ ID ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "void foo;", debug);
+		assertEquals("(void foo ;)\n", found);
+	}
+
+	@Test public void testWildcard() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : v='void'^ . ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "void foo;", debug);
+		assertEquals("(void foo ;)\n", found);
+	}
+
+	@Test public void testWildcardRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : v='void' .^ ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "void foo;", debug);
+		assertEquals("(foo void ;)\n", found);
+	}
+
+	@Test public void testWildcardRootWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : v='void' x=.^ ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "void foo;", debug);
+		assertEquals("(foo void ;)\n", found);
+	}
+
+    @Test public void testWildcardRootWithListLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : v='void' x=.^ ';' ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                                  "a", "void foo;", debug);
+        assertEquals("(foo void ;)\n", found);
+    }
+
+    @Test public void testWildcardBangWithListLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : v='void' x=.! ';' ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                                  "a", "void foo;", debug);
+        assertEquals("void ;\n", found);
+    }
+
+	@Test public void testRootRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID^ INT^ ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a 34 c", debug);
+		assertEquals("(34 a c)\n", found);
+	}
+
+	@Test public void testRootRoot2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT^ ID^ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a 34 c", debug);
+		assertEquals("(c (34 a))\n", found);
+	}
+
+	@Test public void testRootThenRootInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID^ (INT '*'^ ID)+ ;\n" +
+			"ID  : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a 34 * b 9 * c", debug);
+		assertEquals("(* (* (a 34) b 9) c)\n", found);
+	}
+
+	@Test public void testNestedSubrule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'void' (({;}ID|INT) ID | 'null' ) ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "void a b;", debug);
+		assertEquals("void a b ;\n", found);
+	}
+
+	@Test public void testInvokeRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a  : type ID ;\n" +
+			"type : {;}'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "int a", debug);
+		assertEquals("int a\n", found);
+	}
+
+	@Test public void testInvokeRuleAsRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a  : type^ ID ;\n" +
+			"type : {;}'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "int a", debug);
+		assertEquals("(int a)\n", found);
+	}
+
+	@Test public void testInvokeRuleAsRootWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a  : x=type^ ID ;\n" +
+			"type : {;}'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "int a", debug);
+		assertEquals("(int a)\n", found);
+	}
+
+	@Test public void testInvokeRuleAsRootWithListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a  : x+=type^ ID ;\n" +
+			"type : {;}'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "int a", debug);
+		assertEquals("(int a)\n", found);
+	}
+
+	@Test public void testRuleRootInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ('+'^ ID)* ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a+b+c+d", debug);
+		assertEquals("(+ (+ (+ a b) c) d)\n", found);
+	}
+
+	@Test public void testRuleInvocationRuleRootInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID (op^ ID)* ;\n" +
+			"op : {;}'+' | '-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a+b+c-d", debug);
+		assertEquals("(- (+ (+ a b) c) d)\n", found);
+	}
+
+	@Test public void testTailRecursion() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"s : a ;\n" +
+			"a : atom ('exp'^ a)? ;\n" +
+			"atom : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "s", "3 exp 4 exp 5", debug);
+		assertEquals("(exp 3 (exp 4 5))\n", found);
+	}
+
+	@Test public void testSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID|INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testSetRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ('+' | '-')^ ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "+abc", debug);
+		assertEquals("(+ abc)\n", found);
+	}
+
+	@Test
+    public void testSetRootWithLabel() throws Exception {
+		
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=('+' | '-')^ ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "+abc", debug);
+		assertEquals("(+ abc)\n", found);
+	}
+
+	@Test public void testSetAsRuleRootInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID (('+'|'-')^ ID)* ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a+b-c", debug);
+		assertEquals("(- (+ a b) c)\n", found);
+	}
+
+	@Test public void testNotSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ~ID '+' INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34+2", debug);
+		assertEquals("34 + 2\n", found);
+	}
+
+	@Test public void testNotSetWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=~ID '+' INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34+2", debug);
+		assertEquals("34 + 2\n", found);
+	}
+
+	@Test public void testNotSetWithListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=~ID '+' INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34+2", debug);
+		assertEquals("34 + 2\n", found);
+	}
+
+	@Test public void testNotSetRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ~'+'^ INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34 55", debug);
+		assertEquals("(34 55)\n", found);
+	}
+
+	@Test public void testNotSetRootWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ~'+'^ INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34 55", debug);
+		assertEquals("(34 55)\n", found);
+	}
+
+	@Test public void testNotSetRootWithListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ~'+'^ INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "34 55", debug);
+		assertEquals("(34 55)\n", found);
+	}
+
+	@Test public void testNotSetRuleRootInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT (~INT^ INT)* ;\n" +
+			"blort : '+' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "3+4+5", debug);
+		assertEquals("(+ (+ 3 4) 5)\n", found);
+	}
+
+	@Test public void testTokenLabelReuse() throws Exception {
+		// check for compilation problem due to multiple defines
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : id=ID id=ID {System.out.print(\"2nd id=\"+$id.text+';');} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		assertEquals("2nd id=b;a b\n", found);
+	}
+
+	@Test public void testTokenLabelReuse2() throws Exception {
+		// check for compilation problem due to multiple defines
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : id=ID id=ID^ {System.out.print(\"2nd id=\"+$id.text+';');} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		assertEquals("2nd id=b;(b a)\n", found);
+	}
+
+	@Test public void testTokenListLabelReuse() throws Exception {
+		// check for compilation problem due to multiple defines
+		// make sure ids has both ID tokens
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ids+=ID ids+=ID {System.out.print(\"id list=\"+$ids+';');} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		String expecting = "id list=[[@0,0:0='a',<4>,1:0], [@2,2:2='b',<4>,1:2]];a b\n";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testTokenListLabelReuse2() throws Exception {
+		// check for compilation problem due to multiple defines
+		// make sure ids has both ID tokens
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ids+=ID^ ids+=ID {System.out.print(\"id list=\"+$ids+';');} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		String expecting = "id list=[[@0,0:0='a',<4>,1:0], [@2,2:2='b',<4>,1:2]];(a b)\n";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testTokenListLabelRuleRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : id+=ID^ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	@Test public void testTokenListLabelBang() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : id+=ID! ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a", debug);
+		assertEquals("", found);
+	}
+
+	@Test public void testRuleListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x+=b x+=b {" +
+			"Tree t=(Tree)$x.get(1);" +
+			"System.out.print(\"2nd x=\"+t.toStringTree()+';');} ;\n" +
+			"b : ID;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		assertEquals("2nd x=b;a b\n", found);
+	}
+
+	@Test public void testRuleListLabelRuleRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ( x+=b^ )+ {" +
+			"System.out.print(\"x=\"+((CommonTree)$x.get(1)).toStringTree()+';');} ;\n" +
+			"b : ID;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		assertEquals("x=(b a);(b a)\n", found);
+	}
+
+	@Test public void testRuleListLabelBang() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x+=b! x+=b {" +
+			"System.out.print(\"1st x=\"+((CommonTree)$x.get(0)).toStringTree()+';');} ;\n" +
+			"b : ID;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b", debug);
+		assertEquals("1st x=a;b\n", found);
+	}
+
+	@Test public void testComplicatedMelange() throws Exception {
+		// check for compilation problem
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : A b=B b=B c+=C c+=C D {String s = $D.text;} ;\n" +
+			"A : 'a' ;\n" +
+			"B : 'b' ;\n" +
+			"C : 'c' ;\n" +
+			"D : 'd' ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a b b c c d", debug);
+		assertEquals("a b b c c d\n", found);
+	}
+
+	@Test public void testReturnValueWithAST() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID b {System.out.println($b.i);} ;\n" +
+			"b returns [int i] : INT {$i=Integer.parseInt($INT.text);} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc 34", debug);
+		assertEquals("34\nabc 34\n", found);
+	}
+
+	@Test public void testSetLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options { output=AST; }\n" +
+			"r : (INT|ID)+ ; \n" +
+			"ID : 'a'..'z' + ;\n" +
+			"INT : '0'..'9' +;\n" +
+			"WS: (' ' | '\\n' | '\\t')+ {$channel = HIDDEN;};\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "r", "abc 34 d", debug);
+		assertEquals("abc 34 d\n", found);
+	}
+
+	@Test public void testExtraTokenInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"decl : type^ ID '='! INT ';'! ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "int 34 x=1;", debug);
+		assertEquals("line 1:4 extraneous input '34' expecting ID\n", this.stderrDuringParse);
+		assertEquals("(int x 1)\n", found); // tree gets correct x and 1 tokens
+	}
+
+	@Test public void testMissingIDInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"tokens {EXPR;}\n" +
+			"decl : type^ ID '='! INT ';'! ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "int =1;", debug);
+		assertEquals("line 1:4 missing ID at '='\n", this.stderrDuringParse);
+		assertEquals("(int <missing ID> 1)\n", found); // tree gets invented ID token
+	}
+
+	@Test public void testMissingSetInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"tokens {EXPR;}\n" +
+			"decl : type^ ID '='! INT ';'! ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "x=1;", debug);
+		assertEquals("line 1:0 mismatched input 'x' expecting set null\n", this.stderrDuringParse);
+		assertEquals("(<error: x> x 1)\n", found); // tree gets invented ID token
+	}
+
+	@Test public void testMissingTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" + // follow is EOF
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc", debug);
+		assertEquals("line 1:3 missing INT at '<EOF>'\n", this.stderrDuringParse);
+		assertEquals("abc <missing INT>\n", found);
+	}
+
+	@Test public void testMissingTokenGivesErrorNodeInInvokedRule() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b ;\n" +
+			"b : ID INT ;\n" + // follow should see EOF
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc", debug);
+		assertEquals("line 1:3 mismatched input '<EOF>' expecting INT\n", this.stderrDuringParse);
+		assertEquals("<mismatched token: [@1,3:3='<EOF>',<-1>,1:3], resync=abc>\n", found);
+	}
+
+	@Test public void testExtraTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b c ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc ick 34", debug);
+		assertEquals("line 1:4 extraneous input 'ick' expecting INT\n", this.stderrDuringParse);
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testMissingFirstTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "34", debug);
+		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
+		assertEquals("<missing ID> 34\n", found);
+	}
+
+	@Test public void testMissingFirstTokenGivesErrorNode2() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b c ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "34", debug);
+		// finds an error at the first token, 34, and re-syncs.
+		// re-synchronizing does not consume a token because 34 follows
+		// ref to rule b (start of c). It then matches 34 in c.
+		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
+		assertEquals("<missing ID> 34\n", found);
+	}
+
+	@Test public void testNoViableAltGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b | c ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"S : '*' ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "*", debug);
+		assertEquals("line 1:0 no viable alternative at input '*'\n", this.stderrDuringParse);
+		assertEquals("<unexpected: [@0,0:0='*',<6>,1:0], resync=*>\n", found);
+	}
+
+
+	// S U P P O R T
+
+	public void _test() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a :  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "abc 34", debug);
+		assertEquals("\n", found);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestBufferedTreeNodeStream.java b/tool/src/test/java/org/antlr/test/TestBufferedTreeNodeStream.java
new file mode 100644
index 0000000..58e5b4b
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestBufferedTreeNodeStream.java
@@ -0,0 +1,77 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.tree.BufferedTreeNodeStream;
+import org.antlr.runtime.tree.CommonTree;
+import org.antlr.runtime.tree.Tree;
+import org.antlr.runtime.tree.TreeNodeStream;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestBufferedTreeNodeStream extends TestTreeNodeStream {
+    // inherits tests; these methods make it use a new buffer
+
+	@Override
+	public TreeNodeStream newStream(Object t) {
+		return new BufferedTreeNodeStream(t);
+	}
+
+	@Override
+    public String toTokenTypeString(TreeNodeStream stream) {
+        return ((BufferedTreeNodeStream)stream).toTokenTypeString();
+    }
+
+    @Test public void testSeek() throws Exception {
+        // ^(101 ^(102 103 ^(106 107) ) 104 105)
+        // stream has 7 real + 6 nav nodes
+        // Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+        Tree r0 = new CommonTree(new CommonToken(101));
+        Tree r1 = new CommonTree(new CommonToken(102));
+        r0.addChild(r1);
+        r1.addChild(new CommonTree(new CommonToken(103)));
+        Tree r2 = new CommonTree(new CommonToken(106));
+        r2.addChild(new CommonTree(new CommonToken(107)));
+        r1.addChild(r2);
+        r0.addChild(new CommonTree(new CommonToken(104)));
+        r0.addChild(new CommonTree(new CommonToken(105)));
+
+        TreeNodeStream stream = newStream(r0);
+        stream.consume(); // consume 101
+        stream.consume(); // consume DN
+        stream.consume(); // consume 102
+        stream.seek(7);   // seek to 107
+        assertEquals(107, ((Tree)stream.LT(1)).getType());
+        stream.consume(); // consume 107
+        stream.consume(); // consume UP
+        stream.consume(); // consume UP
+        assertEquals(104, ((Tree)stream.LT(1)).getType());
+    }    
+}
diff --git a/tool/src/test/java/org/antlr/test/TestCharDFAConversion.java b/tool/src/test/java/org/antlr/test/TestCharDFAConversion.java
new file mode 100644
index 0000000..c1af910
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestCharDFAConversion.java
@@ -0,0 +1,550 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.DFAOptimizer;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.tool.*;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.junit.Assert.*;
+
+public class TestCharDFAConversion extends BaseTest {
+
+	/** Public default constructor used by TestRig */
+	public TestCharDFAConversion() {
+	}
+
+	// R A N G E S  &  S E T S
+
+	@Test public void testSimpleRangeVersusChar() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a'..'z' '@' | 'k' '$' ;");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'k'->.s1\n" +
+			".s0-{'a'..'j', 'l'..'z'}->:s2=>1\n" +
+			".s1-'$'->:s3=>2\n" +
+			".s1-'@'->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testRangeWithDisjointSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a'..'z' '@'\n" +
+			"  | ('k'|'9'|'p') '$'\n" +
+			"  ;\n");
+		g.createLookaheadDFAs();
+		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'}
+		String expecting =
+			".s0-'9'->:s3=>2\n" +
+			".s0-{'a'..'j', 'l'..'o', 'q'..'z'}->:s2=>1\n" +
+			".s0-{'k', 'p'}->.s1\n" +
+			".s1-'$'->:s3=>2\n" +
+			".s1-'@'->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testDisjointSetCollidingWithTwoRanges() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : ('a'..'z'|'0'..'9') '@'\n" +
+			"  | ('k'|'9'|'p') '$'\n" +
+			"  ;\n");
+		g.createLookaheadDFAs(false);
+		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
+		// into 0..8
+		String expecting =
+			".s0-{'0'..'8', 'a'..'j', 'l'..'o', 'q'..'z'}->:s2=>1\n" +
+			".s0-{'9', 'k', 'p'}->.s1\n" +
+			".s1-'$'->:s3=>2\n" +
+			".s1-'@'->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testDisjointSetCollidingWithTwoRangesCharsFirst() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : ('k'|'9'|'p') '$'\n" +
+			"  | ('a'..'z'|'0'..'9') '@'\n" +
+			"  ;\n");
+		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
+		// into 0..8
+		String expecting =
+			".s0-{'0'..'8', 'a'..'j', 'l'..'o', 'q'..'z'}->:s3=>2\n" +
+			".s0-{'9', 'k', 'p'}->.s1\n" +
+			".s1-'$'->:s2=>1\n" +
+			".s1-'@'->:s3=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testDisjointSetCollidingWithTwoRangesAsSeparateAlts() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a'..'z' '@'\n" +
+			"  | 'k' '$'\n" +
+			"  | '9' '$'\n" +
+			"  | 'p' '$'\n" +
+			"  | '0'..'9' '@'\n" +
+			"  ;\n");
+		// must break up a..z into {'a'..'j', 'l'..'o', 'q'..'z'} and 0..9
+		// into 0..8
+		String expecting =
+			".s0-'0'..'8'->:s8=>5\n" +
+			".s0-'9'->.s6\n" +
+			".s0-'k'->.s1\n" +
+			".s0-'p'->.s4\n" +
+			".s0-{'a'..'j', 'l'..'o', 'q'..'z'}->:s2=>1\n" +
+			".s1-'$'->:s3=>2\n" +
+			".s1-'@'->:s2=>1\n" +
+			".s4-'$'->:s5=>4\n" +
+			".s4-'@'->:s2=>1\n" +
+			".s6-'$'->:s7=>3\n" +
+			".s6-'@'->:s8=>5\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testKeywordVersusID() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"IF : 'if' ;\n" + // choose this over ID
+			"ID : ('a'..'z')+ ;\n");
+		String expecting =
+			".s0-'a'..'z'->:s2=>1\n" +
+			".s0-<EOT>->:s1=>2\n";
+		checkDecision(g, 1, expecting, null);
+		expecting =
+			".s0-'i'->.s1\n" +
+			".s0-{'a'..'h', 'j'..'z'}->:s4=>2\n" +
+			".s1-'f'->.s2\n" +
+			".s1-<EOT>->:s4=>2\n" +
+			".s2-'a'..'z'->:s4=>2\n" +
+			".s2-<EOT>->:s3=>1\n";
+		checkDecision(g, 2, expecting, null);
+	}
+
+	@Test public void testIdenticalRules() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a' ;\n" +
+			"B : 'a' ;\n"); // can't reach this
+		String expecting =
+			".s0-'a'->.s1\n" +
+			".s1-<EOT>->:s2=>1\n";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		checkDecision(g, 1, expecting, new int[] {2});
+
+		assertEquals("unexpected number of expected problems",
+				    1, equeue.size());
+		Message msg = equeue.errors.get(0);
+		assertTrue("warning must be an unreachable alt",
+				    msg instanceof GrammarUnreachableAltsMessage);
+		GrammarUnreachableAltsMessage u = (GrammarUnreachableAltsMessage)msg;
+		assertEquals("[2]", u.alts.toString());
+
+	}
+
+	@Test public void testAdjacentNotCharLoops() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : (~'r')+ ;\n" +
+			"B : (~'s')+ ;\n");
+		String expecting =
+			".s0-'r'->:s3=>2\n" +
+			".s0-'s'->:s2=>1\n" +
+			".s0-{'\\u0000'..'q', 't'..'\\uFFFF'}->.s1\n" +
+			".s1-'r'->:s3=>2\n" +
+			".s1-<EOT>->:s2=>1\n" +
+			".s1-{'\\u0000'..'q', 't'..'\\uFFFF'}->.s1\n";
+		checkDecision(g, 3, expecting, null);
+	}
+
+	@Test public void testNonAdjacentNotCharLoops() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : (~'r')+ ;\n" +
+			"B : (~'t')+ ;\n");
+		String expecting =
+			".s0-'r'->:s3=>2\n" +
+			".s0-'t'->:s2=>1\n" +
+			".s0-{'\\u0000'..'q', 's', 'u'..'\\uFFFF'}->.s1\n" +
+			".s1-'r'->:s3=>2\n" +
+			".s1-<EOT>->:s2=>1\n" +
+			".s1-{'\\u0000'..'q', 's', 'u'..'\\uFFFF'}->.s1\n";
+		checkDecision(g, 3, expecting, null);
+	}
+
+	@Test public void testLoopsWithOptimizedOutExitBranches() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'x'* ~'x'+ ;\n");
+		String expecting =
+			".s0-'x'->:s1=>1\n" +
+			".s0-{'\\u0000'..'w', 'y'..'\\uFFFF'}->:s2=>2\n";
+		checkDecision(g, 1, expecting, null);
+
+		// The optimizer yanks out all exit branches from EBNF blocks
+		// This is ok because we've already verified there are no problems
+		// with the enter/exit decision
+		DFAOptimizer optimizer = new DFAOptimizer(g);
+		optimizer.optimize();
+		FASerializer serializer = new FASerializer(g);
+		DFA dfa = g.getLookaheadDFA(1);
+		String result = serializer.serialize(dfa.startState);
+		expecting = ".s0-'x'->:s1=>1\n";
+		assertEquals(expecting, result);
+	}
+
+	// N O N G R E E D Y
+
+	@Test public void testNonGreedy() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"CMT : '/*' ( options {greedy=false;} : . )* '*/' ;");
+		String expecting =
+			".s0-'*'->.s1\n" +
+			".s0-{'\\u0000'..')', '+'..'\\uFFFF'}->:s3=>1\n" +
+			".s1-'/'->:s2=>2\n" +
+			".s1-{'\\u0000'..'.', '0'..'\\uFFFF'}->:s3=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNonGreedyWildcardStar() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"SLCMT : '//' ( options {greedy=false;} : . )* '\n' ;");
+		String expecting =
+			".s0-'\\n'->:s1=>2\n" +
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNonGreedyByDefaultWildcardStar() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"SLCMT : '//' .* '\n' ;");
+		String expecting =
+			".s0-'\\n'->:s1=>2\n" +
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNonGreedyWildcardPlus() throws Exception {
+		// same DFA as nongreedy .* but code gen checks number of
+		// iterations at runtime
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"SLCMT : '//' ( options {greedy=false;} : . )+ '\n' ;");
+		String expecting =
+			".s0-'\\n'->:s1=>2\n" +
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNonGreedyByDefaultWildcardPlus() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"SLCMT : '//' .+ '\n' ;");
+		String expecting =
+			".s0-'\\n'->:s1=>2\n" +
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNonGreedyByDefaultWildcardPlusWithParens() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"SLCMT : '//' (.)+ '\n' ;");
+		String expecting =
+			".s0-'\\n'->:s1=>2\n" +
+			".s0-{'\\u0000'..'\\t', '\\u000B'..'\\uFFFF'}->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNonWildcardNonGreedy() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"DUH : (options {greedy=false;}:'x'|'y')* 'xy' ;");
+		String expecting =
+			".s0-'x'->.s1\n" +
+			".s0-'y'->:s4=>2\n" +
+			".s1-'x'->:s3=>1\n" +
+			".s1-'y'->:s2=>3\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNonWildcardEOTMakesItWorkWithoutNonGreedyOption() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"DUH : ('x'|'y')* 'xy' ;");
+		String expecting =
+			".s0-'x'->.s1\n" +
+			".s0-'y'->:s4=>1\n" +
+			".s1-'x'->:s4=>1\n" +
+			".s1-'y'->.s2\n" +
+			".s2-'x'..'y'->:s4=>1\n" +
+			".s2-<EOT>->:s3=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testAltConflictsWithLoopThenExit() throws Exception {
+		// \" predicts alt 1, but wildcard then " can predict exit also
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"STRING : '\"' (options {greedy=false;}: '\\\\\"' | .)* '\"' ;\n"
+		);
+		String expecting =
+			".s0-'\"'->:s1=>3\n" +
+				".s0-'\\\\'->.s2\n" +
+				".s0-{'\\u0000'..'!', '#'..'[', ']'..'\\uFFFF'}->:s4=>2\n" +
+				".s2-'\"'->:s3=>1\n" +
+				".s2-{'\\u0000'..'!', '#'..'\\uFFFF'}->:s4=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNonGreedyLoopThatNeverLoops() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"DUH : (options {greedy=false;}:'x')+ ;"); // loop never matched
+		String expecting =
+			":s0=>2\n";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		checkDecision(g, 1, expecting, new int[] {1});
+
+		assertEquals("unexpected number of expected problems",
+				    1, equeue.size());
+		Message msg = equeue.errors.get(0);
+		assertTrue("warning must be an unreachable alt",
+				   msg instanceof GrammarUnreachableAltsMessage);
+		GrammarUnreachableAltsMessage u = (GrammarUnreachableAltsMessage)msg;
+		assertEquals("[1]", u.alts.toString());
+	}
+
+	@Test public void testRecursive() throws Exception {
+		// this is cool because the 3rd alt includes !(all other possibilities)
+		Grammar g = new Grammar(
+			"lexer grammar duh;\n" +
+			"SUBTEMPLATE\n" +
+			"        :       '{'\n" +
+			"                ( SUBTEMPLATE\n" +
+			"                | ESC\n" +
+			"                | ~('}'|'\\\\'|'{')\n" +
+			"                )*\n" +
+			"                '}'\n" +
+			"        ;\n" +
+			"fragment\n" +
+			"ESC     :       '\\\\' . ;");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'\\\\'->:s2=>2\n" +
+			".s0-'{'->:s1=>1\n" +
+			".s0-'}'->:s4=>4\n" +
+			".s0-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFF'}->:s3=>3\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testRecursive2() throws Exception {
+		// this is also cool because it resolves \\ to be ESC alt; it's just
+		// less efficient of a DFA
+		Grammar g = new Grammar(
+			"lexer grammar duh;\n" +
+			"SUBTEMPLATE\n" +
+			"        :       '{'\n" +
+			"                ( SUBTEMPLATE\n" +
+			"                | ESC\n" +
+			"                | ~('}'|'{')\n" +
+			"                )*\n" +
+			"                '}'\n" +
+			"        ;\n" +
+			"fragment\n" +
+			"ESC     :       '\\\\' . ;");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'\\\\'->.s3\n" +
+			".s0-'{'->:s2=>1\n" +
+			".s0-'}'->:s1=>4\n" +
+			".s0-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFF'}->:s5=>3\n" +
+			".s3-'\\\\'->:s8=>2\n" +
+			".s3-'{'->:s7=>2\n" +
+			".s3-'}'->.s4\n" +
+			".s3-{'\\u0000'..'[', ']'..'z', '|', '~'..'\\uFFFF'}->:s6=>2\n" +
+			".s4-'\\u0000'..'\\uFFFF'->:s6=>2\n" +
+			".s4-<EOT>->:s5=>3\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNotFragmentInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"A : 'a' | ~B {;} ;\n" +
+			"fragment B : 'a' ;\n");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'a'->:s1=>1\n" +
+			".s0-{'\\u0000'..'`', 'b'..'\\uFFFF'}->:s2=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNotSetFragmentInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"A : B | ~B {;} ;\n" +
+			"fragment B : 'a'|'b' ;\n");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'a'..'b'->:s1=>1\n" +
+			".s0-{'\\u0000'..'`', 'c'..'\\uFFFF'}->:s2=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNotTokenInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"A : 'x' ('a' | ~B {;}) ;\n" +
+			"B : 'a' ;\n");
+		g.createLookaheadDFAs();
+		String expecting =
+			".s0-'a'->:s1=>1\n" +
+			".s0-{'\\u0000'..'`', 'b'..'\\uFFFF'}->:s2=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNotComplicatedSetRuleInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"A : B | ~B {;} ;\n" +
+			"fragment B : 'a'|'b'|'c'..'e'|C ;\n" +
+			"fragment C : 'f' ;\n"); // has to seen from B to C
+		String expecting =
+			".s0-'a'..'f'->:s1=>1\n" +
+			".s0-{'\\u0000'..'`', 'g'..'\\uFFFF'}->:s2=>2\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testNotSetWithRuleInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"T : ~('a' | B) | 'a';\n" +
+			"fragment\n" +
+			"B : 'b' ;\n" +
+			"C : ~'x'{;} ;"); // force Tokens to not collapse T|C
+		String expecting =
+			".s0-'b'->:s3=>2\n" +
+			".s0-'x'->:s2=>1\n" +
+			".s0-{'\\u0000'..'a', 'c'..'w', 'y'..'\\uFFFF'}->.s1\n" +
+			".s1-<EOT>->:s2=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testSetCallsRuleWithNot() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar A;\n" +
+			"T : ~'x' ;\n" +
+			"S : 'x' (T | 'x') ;\n");
+		String expecting =
+			".s0-'x'->:s2=>2\n" +
+			".s0-{'\\u0000'..'w', 'y'..'\\uFFFF'}->:s1=>1\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	@Test public void testSynPredInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar T;\n"+
+			"LT:  '<' ' '*\n" +
+			"  |  ('<' IDENT) => '<' IDENT '>'\n" + // this was causing syntax error
+			"  ;\n" +
+			"IDENT:    'a'+;\n");
+		// basically, Tokens rule should not do set compression test
+		String expecting =
+			".s0-'<'->:s1=>1\n" +
+			".s0-'a'->:s2=>2\n";
+		checkDecision(g, 4, expecting, null); // 4 is Tokens rule
+	}
+
+	// S U P P O R T
+
+	public void _template() throws Exception {
+		Grammar g = new Grammar(
+			"grammar T;\n"+
+			"a : A | B;");
+		String expecting =
+			"\n";
+		checkDecision(g, 1, expecting, null);
+	}
+
+	protected void checkDecision(Grammar g,
+								 int decision,
+								 String expecting,
+								 int[] expectingUnreachableAlts)
+		throws Exception
+	{
+
+		// mimic actions of org.antlr.Tool first time for grammar g
+		if ( g.getCodeGenerator()==null ) {
+			CodeGenerator generator = new CodeGenerator(null, g, "Java");
+			g.setCodeGenerator(generator);
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
+		}
+
+		DFA dfa = g.getLookaheadDFA(decision);
+		assertNotNull("unknown decision #"+decision, dfa);
+		FASerializer serializer = new FASerializer(g);
+		String result = serializer.serialize(dfa.startState);
+		//System.out.print(result);
+		List<Integer> nonDetAlts = dfa.getUnreachableAlts();
+		//System.out.println("alts w/o predict state="+nonDetAlts);
+
+		// first make sure nondeterministic alts are as expected
+		if ( expectingUnreachableAlts==null ) {
+			if ( nonDetAlts!=null && !nonDetAlts.isEmpty() ) {
+				System.err.println("nondeterministic alts (should be empty): "+nonDetAlts);
+			}
+			assertEquals("unreachable alts mismatch", 0, nonDetAlts!=null?nonDetAlts.size():0);
+		}
+		else {
+			for (int i=0; i<expectingUnreachableAlts.length; i++) {
+				assertTrue("unreachable alts mismatch",
+						   nonDetAlts!=null?nonDetAlts.contains(new Integer(expectingUnreachableAlts[i])):false);
+			}
+		}
+		assertEquals(expecting, result);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestCommonTokenStream.java b/tool/src/test/java/org/antlr/test/TestCommonTokenStream.java
new file mode 100644
index 0000000..3a85a1f
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestCommonTokenStream.java
@@ -0,0 +1,227 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.test;
+
+import org.antlr.runtime.*;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.Interpreter;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/** This actually tests new (12/4/09) buffered but on-demand fetching stream */
+public class TestCommonTokenStream extends BaseTest {
+    @Test public void testFirstToken() throws Exception {
+        Grammar g = new Grammar(
+            "lexer grammar t;\n"+
+            "ID : 'a'..'z'+;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';';\n" +
+            "ASSIGN : '=';\n" +
+            "PLUS : '+';\n" +
+            "MULT : '*';\n" +
+            "WS : ' '+;\n");
+        // Tokens: 012345678901234567
+        // Input:  x = 3 * 0 + 2 * 0;
+        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
+        Interpreter lexEngine = new Interpreter(g, input);
+        BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
+
+        String result = tokens.LT(1).getText();
+        String expecting = "x";
+        assertEquals(expecting, result);
+    }
+
+    @Test public void test2ndToken() throws Exception {
+        Grammar g = new Grammar(
+            "lexer grammar t;\n"+
+            "ID : 'a'..'z'+;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';';\n" +
+            "ASSIGN : '=';\n" +
+            "PLUS : '+';\n" +
+            "MULT : '*';\n" +
+            "WS : ' '+;\n");
+        // Tokens: 012345678901234567
+        // Input:  x = 3 * 0 + 2 * 0;
+        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
+        Interpreter lexEngine = new Interpreter(g, input);
+        BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
+
+        String result = tokens.LT(2).getText();
+        String expecting = " ";
+        assertEquals(expecting, result);
+    }
+
+    @Test public void testCompleteBuffer() throws Exception {
+        Grammar g = new Grammar(
+            "lexer grammar t;\n"+
+            "ID : 'a'..'z'+;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';';\n" +
+            "ASSIGN : '=';\n" +
+            "PLUS : '+';\n" +
+            "MULT : '*';\n" +
+            "WS : ' '+;\n");
+        // Tokens: 012345678901234567
+        // Input:  x = 3 * 0 + 2 * 0;
+        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
+        Interpreter lexEngine = new Interpreter(g, input);
+        BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
+
+        int i = 1;
+        Token t = tokens.LT(i);
+        while ( t.getType()!=Token.EOF ) {
+            i++;
+            t = tokens.LT(i);
+        }
+        tokens.LT(i++); // push it past end
+        tokens.LT(i++);
+
+        String result = tokens.toString();
+        String expecting = "x = 3 * 0 + 2 * 0;";
+        assertEquals(expecting, result);
+    }
+
+    @Test public void testCompleteBufferAfterConsuming() throws Exception {
+        Grammar g = new Grammar(
+            "lexer grammar t;\n"+
+            "ID : 'a'..'z'+;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';';\n" +
+            "ASSIGN : '=';\n" +
+            "PLUS : '+';\n" +
+            "MULT : '*';\n" +
+            "WS : ' '+;\n");
+        // Tokens: 012345678901234567
+        // Input:  x = 3 * 0 + 2 * 0;
+        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
+        Interpreter lexEngine = new Interpreter(g, input);
+        BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
+
+        Token t = tokens.LT(1);
+        while ( t.getType()!=Token.EOF ) {
+            tokens.consume();
+            t = tokens.LT(1);
+        }
+        tokens.consume();
+        tokens.LT(1); // push it past end
+        tokens.consume();
+        tokens.LT(1);
+
+        String result = tokens.toString();
+        String expecting = "x = 3 * 0 + 2 * 0;";
+        assertEquals(expecting, result);
+    }
+
+    @Test public void testLookback() throws Exception {
+        Grammar g = new Grammar(
+            "lexer grammar t;\n"+
+            "ID : 'a'..'z'+;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';';\n" +
+            "ASSIGN : '=';\n" +
+            "PLUS : '+';\n" +
+            "MULT : '*';\n" +
+            "WS : ' '+;\n");
+        // Tokens: 012345678901234567
+        // Input:  x = 3 * 0 + 2 * 0;
+        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
+        Interpreter lexEngine = new Interpreter(g, input);
+        BufferedTokenStream tokens = new BufferedTokenStream(lexEngine);
+
+        tokens.consume(); // get x into buffer
+        Token t = tokens.LT(-1);
+        assertEquals("x", t.getText());
+
+        tokens.consume();
+        tokens.consume(); // consume '='
+        t = tokens.LT(-3);
+        assertEquals("x", t.getText());
+        t = tokens.LT(-2);
+        assertEquals(" ", t.getText());
+        t = tokens.LT(-1);
+        assertEquals("=", t.getText());
+    }
+
+    @Test public void testOffChannel() throws Exception {
+        TokenSource lexer = // simulate input " x =34  ;\n"
+            new TokenSource() {
+                int i = 0;
+                Token[] tokens = {
+                    new CommonToken(1," "),
+                    new CommonToken(1,"x"),
+                    new CommonToken(1," "),
+                    new CommonToken(1,"="),
+                    new CommonToken(1,"34"),
+                    new CommonToken(1," "),
+                    new CommonToken(1," "),
+                    new CommonToken(1,";"),
+                    new CommonToken(1,"\n"),
+                    new CommonToken(Token.EOF,"")
+                };
+                {
+                    tokens[0].setChannel(Lexer.HIDDEN);
+                    tokens[2].setChannel(Lexer.HIDDEN);
+                    tokens[5].setChannel(Lexer.HIDDEN);
+                    tokens[6].setChannel(Lexer.HIDDEN);
+                    tokens[8].setChannel(Lexer.HIDDEN);
+                }
+			@Override
+                public Token nextToken() {
+                    return tokens[i++];
+                }
+			@Override
+                public String getSourceName() { return "test"; }
+            };
+
+        CommonTokenStream tokens = new CommonTokenStream(lexer);
+
+        assertEquals("x", tokens.LT(1).getText()); // must skip first off channel token
+        tokens.consume();
+        assertEquals("=", tokens.LT(1).getText());
+        assertEquals("x", tokens.LT(-1).getText());
+
+        tokens.consume();
+        assertEquals("34", tokens.LT(1).getText());
+        assertEquals("=", tokens.LT(-1).getText());
+
+        tokens.consume();
+        assertEquals(";", tokens.LT(1).getText());
+        assertEquals("34", tokens.LT(-1).getText());
+
+        tokens.consume();
+        assertEquals(Token.EOF, tokens.LA(1));
+        assertEquals(";", tokens.LT(-1).getText());
+
+        assertEquals("34", tokens.LT(-2).getText());
+        assertEquals("=", tokens.LT(-3).getText());
+        assertEquals("x", tokens.LT(-4).getText());
+    }
+}
\ No newline at end of file
diff --git a/tool/src/test/java/org/antlr/test/TestCompositeGrammars.java b/tool/src/test/java/org/antlr/test/TestCompositeGrammars.java
new file mode 100644
index 0000000..cad4add
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestCompositeGrammars.java
@@ -0,0 +1,975 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.tool.*;
+import org.junit.Test;
+
+import java.io.File;
+
+import static org.junit.Assert.*;
+
+public class TestCompositeGrammars extends BaseTest {
+	protected boolean debug = false;
+
+	@Test public void testWildcardStillWorks() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String grammar =
+			"parser grammar S;\n" +
+			"a : B . C ;\n"; // not qualified ID
+		Grammar g = new Grammar(grammar);
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testDelegatorInvokesDelegateRule() throws Exception {
+		String slave =
+			"parser grammar S;\n" +
+			"a : B {System.out.println(\"S.a\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : a ;\n" +
+			"B : 'b' ;" + // defines B from inherited token space
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "b", debug);
+		assertEquals("S.a\n", found);
+	}
+
+	@Test public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception {
+		// must generate something like:
+		// public int a(int x) throws RecognitionException { return gS.a(x); }
+		// in M.
+		String slave =
+			"parser grammar S;\n" +
+			"a[int x] returns [int y] : B {System.out.print(\"S.a\"); $y=1000;} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : label=a[3] {System.out.println($label.y);} ;\n" +
+			"B : 'b' ;" + // defines B from inherited token space
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "b", debug);
+		assertEquals("S.a1000\n", found);
+	}
+
+	@Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception {
+		// must generate something like:
+		// public int a(int x) throws RecognitionException { return gS.a(x); }
+		// in M.
+		String slave =
+			"parser grammar S;\n" +
+			"a : B {System.out.print(\"S.a\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : a {System.out.println($a.text);} ;\n" +
+			"B : 'b' ;" + // defines B from inherited token space
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "b", debug);
+		assertEquals("S.ab\n", found);
+	}
+
+	@Test public void testDelegatorAccessesDelegateMembers() throws Exception {
+		String slave =
+			"parser grammar S;\n" +
+			"@members {\n" +
+			"  public void foo() {System.out.println(\"foo\");}\n" +
+			"}\n" +
+			"a : B ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +		// uses no rules from the import
+			"import S;\n" +
+			"s : 'b' {gS.foo();} ;\n" + // gS is import pointer
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "b", debug);
+		assertEquals("foo\n", found);
+	}
+
+	@Test public void testDelegatorInvokesFirstVersionOfDelegateRule() throws Exception {
+		String slave =
+			"parser grammar S;\n" +
+			"a : b {System.out.println(\"S.a\");} ;\n" +
+			"b : B ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String slave2 =
+			"parser grammar T;\n" +
+			"a : B {System.out.println(\"T.a\");} ;\n"; // hidden by S.a
+		writeFile(tmpdir, "T.g", slave2);
+		String master =
+			"grammar M;\n" +
+			"import S,T;\n" +
+			"s : a ;\n" +
+			"B : 'b' ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "b", debug);
+		assertEquals("S.a\n", found);
+	}
+
+	@Test public void testDelegatesSeeSameTokenType() throws Exception {
+		String slave =
+			"parser grammar S;\n" + // A, B, C token type order
+			"tokens { A; B; C; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String slave2 =
+			"parser grammar T;\n" +
+			"tokens { C; B; A; }\n" + // reverse order
+			"y : A {System.out.println(\"T.y\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave2);
+		// The lexer will create rules to match letters a, b, c.
+		// The associated token types A, B, C must have the same value
+		// and all import'd parsers.  Since ANTLR regenerates all imports
+		// for use with the delegator M, it can generate the same token type
+		// mapping in each parser:
+		// public static final int C=6;
+		// public static final int EOF=-1;
+		// public static final int B=5;
+		// public static final int WS=7;
+		// public static final int A=4;
+
+		String master =
+			"grammar M;\n" +
+			"import S,T;\n" +
+			"s : x y ;\n" + // matches AA, which should be "aa"
+			"B : 'b' ;\n" + // another order: B, A, C
+			"A : 'a' ;\n" +
+			"C : 'c' ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "s", "aa", debug);
+		assertEquals("S.x\n" +
+					 "T.y\n", found);
+	}
+
+	@Test public void testDelegatesSeeSameTokenType2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" + // A, B, C token type order
+			"tokens { A; B; C; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String slave2 =
+			"parser grammar T;\n" +
+			"tokens { C; B; A; }\n" + // reverse order
+			"y : A {System.out.println(\"T.y\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave2);
+
+		String master =
+			"grammar M;\n" +
+			"import S,T;\n" +
+			"s : x y ;\n" + // matches AA, which should be "aa"
+			"B : 'b' ;\n" + // another order: B, A, C
+			"A : 'a' ;\n" +
+			"C : 'c' ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[A=4, B=5, C=6, WS=7]";
+		String expectedStringLiteralToTypeMap = "{}";
+		String expectedTypeToTokenList = "[A, B, C, WS]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testCombinedImportsCombined() throws Exception {
+		// for now, we don't allow combined to import combined
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"grammar S;\n" + // A, B, C token type order
+			"tokens { A; B; C; }\n" +
+			"x : 'x' INT {System.out.println(\"S.x\");} ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : x INT ;\n";
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+		String expectedError = "error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: combined grammar M cannot import combined grammar S";
+		assertEquals("unexpected errors: "+equeue, expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+",""));
+	}
+
+	@Test public void testSameStringTwoNames() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			"tokens { A='a'; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String slave2 =
+			"parser grammar T;\n" +
+			"tokens { X='a'; }\n" +
+			"y : X {System.out.println(\"T.y\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave2);
+
+		String master =
+			"grammar M;\n" +
+			"import S,T;\n" +
+			"s : x y ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[A=4, WS=5, X=6]";
+		String expectedStringLiteralToTypeMap = "{'a'=4}";
+		String expectedTypeToTokenList = "[A, WS, X]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		Object expectedArg = "X='a'";
+		Object expectedArg2 = "A";
+		int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_CONFLICT;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+
+		String expectedError =
+			"error(158): T.g:2:10: cannot alias X='a'; string already assigned to A";
+		assertEquals(expectedError, equeue.errors.get(0).toString());
+	}
+
+	@Test public void testSameNameTwoStrings() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			"tokens { A='a'; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String slave2 =
+			"parser grammar T;\n" +
+			"tokens { A='x'; }\n" +
+			"y : A {System.out.println(\"T.y\");} ;\n";
+		
+		writeFile(tmpdir, "T.g", slave2);
+
+		String master =
+			"grammar M;\n" +
+			"import S,T;\n" +
+			"s : x y ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[A=4, T__6=6, WS=5]";
+		String expectedStringLiteralToTypeMap = "{'a'=4, 'x'=6}";
+		String expectedTypeToTokenList = "[A, WS, T__6]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, sortMapToString(g.composite.stringLiteralToTypeMap));
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		Object expectedArg = "A='x'";
+		Object expectedArg2 = "'a'";
+		int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_REASSIGNMENT;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+
+		String expectedError =
+			"error(159): T.g:2:10: cannot alias A='x'; token name already assigned to 'a'";
+		assertEquals(expectedError, equeue.errors.get(0).toString());
+	}
+
+	@Test public void testImportedTokenVocabIgnoredWithWarning() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			"options {tokenVocab=whatever;}\n" +
+			"tokens { A='a'; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : x ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		Object expectedArg = "S";
+		int expectedMsgID = ErrorManager.MSG_TOKEN_VOCAB_IN_DELEGATE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsWarning(equeue, expectedMessage);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		assertEquals("unexpected errors: "+equeue, 1, equeue.warnings.size());
+
+		String expectedError =
+			"warning(160): S.g:2:10: tokenVocab option ignored in imported grammar S";
+		assertEquals(expectedError, equeue.warnings.get(0).toString());
+	}
+
+	@Test public void testImportedTokenVocabWorksInRoot() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			"tokens { A='a'; }\n" +
+			"x : A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		String tokens =
+			"A=99\n";
+		writeFile(tmpdir, "Test.tokens", tokens);
+
+		String master =
+			"grammar M;\n" +
+			"options {tokenVocab=Test;}\n" +
+			"import S;\n" +
+			"s : x ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[A=99, WS=101]";
+		String expectedStringLiteralToTypeMap = "{'a'=100}";
+		String expectedTypeToTokenList = "[A, 'a', WS]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testSyntaxErrorsInImportsNotThrownOut() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			"options {toke\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : x ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		// whole bunch of errors from bad S.g file
+		assertEquals("unexpected errors: "+equeue, 5, equeue.errors.size());
+	}
+
+	@Test public void testSyntaxErrorsInImportsNotThrownOut2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar S;\n" +
+			": A {System.out.println(\"S.x\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"s : x ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		// whole bunch of errors from bad S.g file
+		assertEquals("unexpected errors: "+equeue, 3, equeue.errors.size());
+	}
+
+	@Test public void testDelegatorRuleOverridesDelegate() throws Exception {
+		String slave =
+			"parser grammar S;\n" +
+			"a : b {System.out.println(\"S.a\");} ;\n" +
+			"b : B ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"b : 'b'|'c' ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "a", "c", debug);
+		assertEquals("S.a\n", found);
+	}
+
+	@Test public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception {
+		String slave =
+			"parser grammar JavaDecl;\n" +
+			"type : 'int' ;\n" +
+			"decl : type ID ';'\n" +
+			"     | type ID init ';' {System.out.println(\"JavaDecl: \"+$decl.text);}\n" +
+			"     ;\n" +
+			"init : '=' INT ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "JavaDecl.g", slave);
+		String master =
+			"grammar Java;\n" +
+			"import JavaDecl;\n" +
+			"prog : decl ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"\n" +
+			"ID  : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		// for float to work in decl, type must be overridden
+		String found = execParser("Java.g", master, "JavaParser", "JavaLexer",
+								  "prog", "float x = 3;", debug);
+		assertEquals("JavaDecl: floatx=3;\n", found);
+	}
+
+    @Test public void testDelegatorRuleOverridesDelegates() throws Exception {
+        String slave =
+            "parser grammar S;\n" +
+            "a : b {System.out.println(\"S.a\");} ;\n" +
+            "b : B ;\n" ;
+        mkdir(tmpdir);
+        writeFile(tmpdir, "S.g", slave);
+
+        String slave2 =
+            "parser grammar T;\n" +
+            "tokens { A='x'; }\n" +
+            "b : B {System.out.println(\"T.b\");} ;\n";
+        writeFile(tmpdir, "T.g", slave2);
+
+        String master =
+            "grammar M;\n" +
+            "import S, T;\n" +
+            "b : 'b'|'c' {System.out.println(\"M.b\");}|B|A ;\n" +
+            "WS : (' '|'\\n') {skip();} ;\n" ;
+        String found = execParser("M.g", master, "MParser", "MLexer",
+                                  "a", "c", debug);
+        assertEquals("M.b\n" +
+                     "S.a\n", found);
+    }
+
+	// LEXER INHERITANCE
+
+	@Test public void testLexerDelegatorInvokesDelegateRule() throws Exception {
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'a' {System.out.println(\"S.A\");} ;\n" +
+			"C : 'c' ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"lexer grammar M;\n" +
+			"import S;\n" +
+			"B : 'b' ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execLexer("M.g", master, "M", "abc", debug);
+		assertEquals("S.A\nabc\n", found);
+	}
+
+	@Test public void testLexerDelegatorRuleOverridesDelegate() throws Exception {
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'a' {System.out.println(\"S.A\");} ;\n" +
+			"B : 'b' {System.out.println(\"S.B\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"lexer grammar M;\n" +
+			"import S;\n" +
+			"A : 'a' B {System.out.println(\"M.A\");} ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execLexer("M.g", master, "M", "ab", debug);
+		assertEquals("S.B\n" +
+					 "M.A\n" +
+					 "ab\n", found);
+	}
+
+	@Test public void testLexerDelegatorRuleOverridesDelegateLeavingNoRules() throws Exception {
+		// M.Tokens has nothing to predict tokens from S.  Should
+		// not include S.Tokens alt in this case?
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'a' {System.out.println(\"S.A\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"lexer grammar M;\n" +
+			"import S;\n" +
+			"A : 'a' {System.out.println(\"M.A\");} ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		writeFile(tmpdir, "/M.g", master);
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		composite.assignTokenTypes();
+		composite.defineGrammarSymbols();
+		composite.createNFAs();
+		g.createLookaheadDFAs(false);
+
+		// predict only alts from M not S
+		String expectingDFA =
+			".s0-'a'->.s1\n" +
+			".s0-{'\\n', ' '}->:s3=>2\n" +
+			".s1-<EOT>->:s2=>1\n";
+		org.antlr.analysis.DFA dfa = g.getLookaheadDFA(1);
+		FASerializer serializer = new FASerializer(g);
+		String result = serializer.serialize(dfa.startState);
+		assertEquals(expectingDFA, result);
+
+		// must not be a "unreachable alt: Tokens" error
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testInvalidImportMechanism() throws Exception {
+		// M.Tokens has nothing to predict tokens from S.  Should
+		// not include S.Tokens alt in this case?
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'a' {System.out.println(\"S.A\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"tree grammar M;\n" +
+			"import S;\n" +
+			"a : A ;";
+		writeFile(tmpdir, "/M.g", master);
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
+
+		String expectedError =
+			"error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: tree grammar M cannot import lexer grammar S";
+		assertEquals(expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+",""));
+	}
+
+	@Test public void testSyntacticPredicateRulesAreNotInherited() throws Exception {
+		// if this compiles, it means that synpred1_S is defined in S.java
+		// but not MParser.java.  MParser has its own synpred1_M which must
+		// be separate to compile.
+		String slave =
+			"parser grammar S;\n" +
+			"a : 'a' {System.out.println(\"S.a1\");}\n" +
+			"  | 'a' {System.out.println(\"S.a2\");}\n" +
+			"  ;\n" +
+			"b : 'x' | 'y' {;} ;\n"; // preds generated but not need in DFA here
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"options {backtrack=true;}\n" +
+			"import S;\n" +
+			"start : a b ;\n" +
+			"nonsense : 'q' | 'q' {;} ;" + // forces def of preds here in M
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "start", "ax", debug);
+		assertEquals("S.a1\n", found);
+	}
+
+	@Test public void testKeywordVSIDGivesNoWarning() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'abc' {System.out.println(\"S.A\");} ;\n" +
+			"ID : 'a'..'z'+ ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"a : A {System.out.println(\"M.a\");} ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		String found = execParser("M.g", master, "MParser", "MLexer",
+								  "a", "abc", debug);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size());
+
+		assertEquals("S.A\nM.a\n", found);
+	}
+
+	@Test public void testWarningForUndefinedToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"lexer grammar S;\n" +
+			"A : 'abc' {System.out.println(\"S.A\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"a : ABC A {System.out.println(\"M.a\");} ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		// A is defined in S but M should still see it and not give warning.
+		// only problem is ABC.
+
+		rawGenerateAndBuildRecognizer("M.g", master, "MParser", "MLexer", debug);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+		assertEquals("unexpected warnings: "+equeue, 1, equeue.warnings.size());
+
+		String expectedError =
+			"warning(105): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+File.separator+"M.g:3:5: no lexer rule corresponding to token: ABC";
+		assertEquals(expectedError, equeue.warnings.get(0).toString().replaceFirst("\\-[0-9]+",""));
+	}
+
+	/** Make sure that M can import S that imports T. */
+	@Test public void test3LevelImport() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar T;\n" +
+			"a : T ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave);
+		String slave2 =
+			"parser grammar S;\n" + // A, B, C token type order
+			"import T;\n" +
+			"a : S ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave2);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"a : M ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+		g.composite.defineGrammarSymbols();
+
+		String expectedTokenIDToTypeMap = "[M=4, S=5, T=6]";
+		String expectedStringLiteralToTypeMap = "{}";
+		String expectedTypeToTokenList = "[M, S, T]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+		boolean ok =
+			rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, ok);
+	}
+
+	@Test public void testBigTreeOfImports() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar T;\n" +
+			"x : T ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave);
+		slave =
+			"parser grammar S;\n" +
+			"import T;\n" +
+			"y : S ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+
+		slave =
+			"parser grammar C;\n" +
+			"i : C ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "C.g", slave);
+		slave =
+			"parser grammar B;\n" +
+			"j : B ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "B.g", slave);
+		slave =
+			"parser grammar A;\n" +
+			"import B,C;\n" +
+			"k : A ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "A.g", slave);
+
+		String master =
+			"grammar M;\n" +
+			"import S,A;\n" +
+			"a : M ;\n" ;
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+		g.composite.defineGrammarSymbols();
+
+		String expectedTokenIDToTypeMap = "[A=4, B=5, C=6, M=7, S=8, T=9]";
+		String expectedStringLiteralToTypeMap = "{}";
+		String expectedTypeToTokenList = "[A, B, C, M, S, T]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+		boolean ok =
+			rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, ok);
+	}
+
+	@Test public void testRulesVisibleThroughMultilevelImport() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String slave =
+			"parser grammar T;\n" +
+			"x : T ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "T.g", slave);
+		String slave2 =
+			"parser grammar S;\n" + // A, B, C token type order
+			"import T;\n" +
+			"a : S ;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave2);
+
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"a : M x ;\n" ; // x MUST BE VISIBLE TO M
+		writeFile(tmpdir, "M.g", master);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+		g.composite.defineGrammarSymbols();
+
+		String expectedTokenIDToTypeMap = "[M=4, S=5, T=6]";
+		String expectedStringLiteralToTypeMap = "{}";
+		String expectedTypeToTokenList = "[M, S, T]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testNestedComposite() throws Exception {
+		// Wasn't compiling. http://www.antlr.org/jira/browse/ANTLR-438
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String gstr =
+			"lexer grammar L;\n" +
+			"T1: '1';\n" +
+			"T2: '2';\n" +
+			"T3: '3';\n" +
+			"T4: '4';\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "L.g", gstr);
+		gstr =
+			"parser grammar G1;\n" +
+			"s: a | b;\n" +
+			"a: T1;\n" +
+			"b: T2;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "G1.g", gstr);
+
+		gstr =
+			"parser grammar G2;\n" +
+			"import G1;\n" +
+			"a: T3;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "G2.g", gstr);
+		String G3str =
+			"grammar G3;\n" +
+			"import G2;\n" +
+			"b: T4;\n" ;
+		mkdir(tmpdir);
+		writeFile(tmpdir, "G3.g", G3str);
+
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/G3.g",composite);
+		composite.setDelegationRoot(g);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+		g.composite.defineGrammarSymbols();
+
+		String expectedTokenIDToTypeMap = "[T1=4, T2=5, T3=6, T4=7]";
+		String expectedStringLiteralToTypeMap = "{}";
+		String expectedTypeToTokenList = "[T1, T2, T3, T4]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+
+		boolean ok =
+			rawGenerateAndBuildRecognizer("G3.g", G3str, "G3Parser", null, false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, ok);
+	}
+
+	@Test public void testHeadersPropogatedCorrectlyToImportedGrammars() throws Exception {
+		String slave =
+			"parser grammar S;\n" +
+			"a : B {System.out.print(\"S.a\");} ;\n";
+		mkdir(tmpdir);
+		writeFile(tmpdir, "S.g", slave);
+		String master =
+			"grammar M;\n" +
+			"import S;\n" +
+			"@header{package mypackage;}\n" +
+			"@lexer::header{package mypackage;}\n" +
+			"s : a ;\n" +
+			"B : 'b' ;" + // defines B from inherited token space
+			"WS : (' '|'\\n') {skip();} ;\n" ;
+		boolean ok = antlr("M.g", "M.g", master, debug);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, ok);
+	}
+
+}
\ No newline at end of file
diff --git a/tool/src/test/java/org/antlr/test/TestDFAConversion.java b/tool/src/test/java/org/antlr/test/TestDFAConversion.java
new file mode 100644
index 0000000..b30bd20
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestDFAConversion.java
@@ -0,0 +1,1789 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.Label;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.misc.BitSet;
+import org.antlr.tool.*;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.junit.Assert.*;
+
+public class TestDFAConversion extends BaseTest {
+
+	@Test public void testA() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A C | B;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testAB_or_AC() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A B | A C;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testAB_or_AC_k2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"options {k=2;}\n"+
+			"a : A B | A C;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testAB_or_AC_k1() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"options {k=1;}\n"+
+			"a : A B | A C;");
+		String expecting =
+			".s0-A->:s1=>1\n";
+		int[] unreachableAlts = new int[] {2};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "A" ;
+		int[] danglingAlts = new int[] {2};
+		int numWarnings = 2; // ambig upon A
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testselfRecurseNonDet() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : A a X | A a Y;");
+		List<Integer> altsWithRecursion = Arrays.asList(1, 2);
+		assertNonLLStar(g, altsWithRecursion);
+	}
+
+	@Test public void testRecursionOverflow() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a Y | A A A A A X ;\n" + // force recursion past m=4
+			"a : A a | Q;");
+		List<String> expectedTargetRules = Arrays.asList("a");
+		int expectedAlt = 1;
+		assertRecursionOverflow(g, expectedTargetRules, expectedAlt);
+	}
+
+	@Test public void testRecursionOverflow2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a Y | A+ X ;\n" + // force recursion past m=4
+			"a : A a | Q;");
+		List<String> expectedTargetRules = Arrays.asList("a");
+		int expectedAlt = 1;
+		assertRecursionOverflow(g, expectedTargetRules, expectedAlt);
+	}
+
+	@Test public void testRecursionOverflowWithPredOk() throws Exception {
+		// overflows with k=*, but resolves with pred
+		// no warnings/errors
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : (a Y)=> a Y | A A A A A X ;\n" + // force recursion past m=4
+			"a : A a | Q;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-Q&&{synpred1_t}?->:s11=>1\n" +
+			".s1-A->.s2\n" +
+			".s1-Q&&{synpred1_t}?->:s10=>1\n" +
+			".s2-A->.s3\n" +
+			".s2-Q&&{synpred1_t}?->:s9=>1\n" +
+			".s3-A->.s4\n" +
+			".s3-Q&&{synpred1_t}?->:s8=>1\n" +
+			".s4-A->.s5\n" +
+			".s4-Q&&{synpred1_t}?->:s6=>1\n" +
+			".s5-{synpred1_t}?->:s6=>1\n" +
+			".s5-{true}?->:s7=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testRecursionOverflowWithPredOk2() throws Exception {
+		// must predict Z w/o predicate
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : (a Y)=> a Y | A A A A A X | Z;\n" + // force recursion past m=4
+			"a : A a | Q;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-Q&&{synpred1_t}?->:s11=>1\n" +
+			".s0-Z->:s12=>3\n" +
+			".s1-A->.s2\n" +
+			".s1-Q&&{synpred1_t}?->:s10=>1\n" +
+			".s2-A->.s3\n" +
+			".s2-Q&&{synpred1_t}?->:s9=>1\n" +
+			".s3-A->.s4\n" +
+			".s3-Q&&{synpred1_t}?->:s8=>1\n" +
+			".s4-A->.s5\n" +
+			".s4-Q&&{synpred1_t}?->:s6=>1\n" +
+			".s5-{synpred1_t}?->:s6=>1\n" +
+			".s5-{true}?->:s7=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testCannotSeePastRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		List<Integer> altsWithRecursion = Arrays.asList(1, 2);
+		assertNonLLStar(g, altsWithRecursion);
+	}
+
+	@Test public void testSynPredResolvesRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : (y X)=> y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		String expecting =
+			".s0-B->.s4\n" +
+			".s0-L->.s1\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{synpred1_t}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSynPredMissingInMiddle() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : (A)=> X\n" +
+			"    | X\n" +  // assume missing synpred is true also
+			"	 | (C)=> X" +
+			"    ;\n");
+		String expecting =
+			".s0-X->.s1\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{synpred2_t}?->:s4=>3\n" +
+			".s1-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testAutoBacktrackAndPredMissingInMiddle() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"x   : (A)=> X\n" +
+			"    | X\n" +  // assume missing synpred is true also
+			"	 | (C)=> X" +
+			"    ;\n");
+		String expecting =
+			".s0-X->.s1\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +  // gen code should have this as (A)=>
+			".s1-{synpred2_t}?->:s3=>2\n" + // gen code should have this as (X)=>
+			".s1-{synpred3_t}?->:s4=>3\n"; // gen code should have this as (C)=>
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSemPredResolvesRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : {p}? y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		String expecting =
+			".s0-B->.s4\n" +
+			".s0-L->.s1\n" +
+			".s1-{p}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{p}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSemPredResolvesRecursion2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x\n" +
+			"options {k=1;}\n" +
+			"   : {p}? y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		String expecting =
+			".s0-B->.s4\n" +
+			".s0-L->.s1\n" +
+			".s1-{p}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{p}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSemPredResolvesRecursion3() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x\n" +
+			"options {k=2;}\n" + // just makes bigger DFA
+			"   : {p}? y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		String expecting =
+			".s0-B->.s6\n" +
+			".s0-L->.s1\n" +
+			".s1-B->.s5\n" +
+			".s1-L->.s2\n" +
+			".s2-{p}?->:s3=>1\n" +
+			".s2-{true}?->:s4=>2\n" +
+			".s5-{p}?->:s3=>1\n" +
+			".s5-{true}?->:s4=>2\n" +
+			".s6-X->:s3=>1\n" +
+			".s6-Y->:s4=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSynPredResolvesRecursion2() throws Exception {
+		// k=* fails and it retries/succeeds with k=1 silently
+		// because of predicate
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"statement\n" +
+			"    :     (reference ASSIGN)=> reference ASSIGN expr\n" +
+			"    |     expr\n" +
+			"    ;\n" +
+			"expr:     reference\n" +
+			"    |     INT\n" +
+			"    |     FLOAT\n" +
+			"    ;\n" +
+			"reference\n" +
+			"    :     ID L argument_list R\n" +
+			"    ;\n" +
+			"argument_list\n" +
+			"    :     expr COMMA expr\n" +
+			"    ;");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s0-{FLOAT, INT}->:s3=>2\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSynPredResolvesRecursion3() throws Exception {
+		// No errors with k=1; don't try k=* first
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"statement\n" +
+			"options {k=1;}\n" +
+			"    :     (reference ASSIGN)=> reference ASSIGN expr\n" +
+			"    |     expr\n" +
+			"    ;\n" +
+			"expr:     reference\n" +
+			"    |     INT\n" +
+			"    |     FLOAT\n" +
+			"    ;\n" +
+			"reference\n" +
+			"    :     ID L argument_list R\n" +
+			"    ;\n" +
+			"argument_list\n" +
+			"    :     expr COMMA expr\n" +
+			"    ;");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s0-{FLOAT, INT}->:s3=>2\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSynPredResolvesRecursion4() throws Exception {
+		// No errors with k=2; don't try k=* first
+		// Should be ok like k=1 'except bigger DFA
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"statement\n" +
+			"options {k=2;}\n" +
+			"    :     (reference ASSIGN)=> reference ASSIGN expr\n" +
+			"    |     expr\n" +
+			"    ;\n" +
+			"expr:     reference\n" +
+			"    |     INT\n" +
+			"    |     FLOAT\n" +
+			"    ;\n" +
+			"reference\n" +
+			"    :     ID L argument_list R\n" +
+			"    ;\n" +
+			"argument_list\n" +
+			"    :     expr COMMA expr\n" +
+			"    ;");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s0-{FLOAT, INT}->:s4=>2\n" +
+			".s1-L->.s2\n" +
+			".s2-{synpred1_t}?->:s3=>1\n" +
+			".s2-{true}?->:s4=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testSynPredResolvesRecursionInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A :     (B ';')=> B ';'\n" +
+			"  |     B '.'\n" +
+			"  ;\n" +
+			"fragment\n" +
+			"B :     '(' B ')'\n" +
+			"  |     'x'\n" +
+			"  ;\n");
+		String expecting =
+			".s0-'('->.s1\n" +
+			".s0-'x'->.s4\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{synpred1_t}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testAutoBacktrackResolvesRecursionInLexer() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"options {backtrack=true;}\n"+
+			"A :     B ';'\n" +
+			"  |     B '.'\n" +
+			"  ;\n" +
+			"fragment\n" +
+			"B :     '(' B ')'\n" +
+			"  |     'x'\n" +
+			"  ;\n");
+		String expecting =
+			".s0-'('->.s1\n" +
+			".s0-'x'->.s4\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{synpred1_t}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testAutoBacktrackResolvesRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"x   : y X\n" +
+			"    | y Y\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		String expecting =
+			".s0-B->.s4\n" +
+			".s0-L->.s1\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n" +
+			".s4-{synpred1_t}?->:s2=>1\n" +
+			".s4-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testselfRecurseNonDet2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : P a P | P;");
+		// nondeterministic from left edge
+		String expecting =
+			".s0-P->.s1\n" +
+			".s1-EOF->:s3=>2\n"+
+			".s1-P->:s2=>1\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "P P";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testIndirectRecursionLoop() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : b X ;\n"+
+			"b : a B ;\n");
+
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		Set<Rule> leftRecursive = g.getLeftRecursiveRules();
+		Set<String> expectedRules =
+			new HashSet<String>() {{add("a"); add("b");}};
+		assertEquals(expectedRules, ruleNames(leftRecursive));
+
+		assertEquals(1, equeue.errors.size());
+		Message msg = equeue.errors.get(0);
+		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
+				    msg instanceof LeftRecursionCyclesMessage);
+		LeftRecursionCyclesMessage cyclesMsg = (LeftRecursionCyclesMessage)msg;
+
+		// cycle of [a, b]
+		Collection<? extends Collection<? extends Rule>> result = cyclesMsg.cycles;
+		Set<String> expecting = new HashSet<String>() {{add("a"); add("b");}};
+		assertEquals(expecting, ruleNames2(result));
+	}
+
+	@Test public void testIndirectRecursionLoop2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : i b X ;\n"+ // should see through i
+			"b : a B ;\n" +
+			"i : ;\n");
+
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		Set<Rule> leftRecursive = g.getLeftRecursiveRules();
+		Set<String> expectedRules =
+			new HashSet<String>() {{add("a"); add("b");}};
+		assertEquals(expectedRules, ruleNames(leftRecursive));
+
+		assertEquals(1, equeue.errors.size());
+		Message msg = equeue.errors.get(0);
+		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
+				    msg instanceof LeftRecursionCyclesMessage);
+		LeftRecursionCyclesMessage cyclesMsg = (LeftRecursionCyclesMessage)msg;
+
+		// cycle of [a, b]
+		Collection<? extends Collection<? extends Rule>> result = cyclesMsg.cycles;
+		Set<String> expecting = new HashSet<String>() {{add("a"); add("b");}};
+		assertEquals(expecting, ruleNames2(result));
+	}
+
+	@Test public void testIndirectRecursionLoop3() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : i b X ;\n"+ // should see through i
+			"b : a B ;\n" +
+			"i : ;\n" +
+			"d : e ;\n" +
+			"e : d ;\n");
+
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		Set<Rule> leftRecursive = g.getLeftRecursiveRules();
+		Set<String> expectedRules =
+			new HashSet<String>() {{add("a"); add("b"); add("e"); add("d");}};
+		assertEquals(expectedRules, ruleNames(leftRecursive));
+
+		assertEquals(1, equeue.errors.size());
+		Message msg = equeue.errors.get(0);
+		assertTrue("expecting left recursion cycles; found "+msg.getClass().getName(),
+				    msg instanceof LeftRecursionCyclesMessage);
+		LeftRecursionCyclesMessage cyclesMsg = (LeftRecursionCyclesMessage)msg;
+
+		// cycle of [a, b]
+		Collection<? extends Collection<? extends Rule>> result = cyclesMsg.cycles;
+		Set<String> expecting = new HashSet<String>() {{add("a"); add("b"); add("d"); add("e");}};
+		assertEquals(expecting, ruleNames2(result));
+	}
+
+	@Test public void testifThenElse() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : IF s (E s)? | B;\n" +
+			"slist: s SEMI ;");
+		String expecting =
+			".s0-E->:s1=>1\n" +
+			".s0-SEMI->:s2=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "E";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+		expecting =
+			".s0-B->:s2=>2\n" +
+			".s0-IF->:s1=>1\n";
+		checkDecision(g, 2, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testifThenElseChecksStackSuffixConflict() throws Exception {
+		// if you don't check stack soon enough, this finds E B not just E
+		// as ambig input
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"slist: s SEMI ;\n"+
+			"s : IF s el | B;\n" +
+			"el: (E s)? ;\n");
+		String expecting =
+			".s0-E->:s1=>1\n" +
+			".s0-SEMI->:s2=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "E";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 2, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+		expecting =
+			".s0-B->:s2=>2\n" +
+			".s0-IF->:s1=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+    @Test
+    public void testInvokeRule() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b A\n" +
+			"  | b B\n" +
+			"  | C\n" +
+			"  ;\n" +
+			"b : X\n" +
+			"  ;\n");
+		String expecting =
+			".s0-C->:s4=>3\n" +
+            ".s0-X->.s1\n" +
+            ".s1-A->:s2=>1\n" +
+            ".s1-B->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test
+    public void testDoubleInvokeRuleLeftEdge() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b X\n" +
+			"  | b Y\n" +
+			"  ;\n" +
+			"b : c B\n" +
+			"  | c\n" +
+			"  ;\n" +
+			"c : C ;\n");
+		String expecting =
+			".s0-C->.s1\n" +
+			".s1-B->.s2\n" +
+			".s1-X->:s3=>1\n" +
+			".s1-Y->:s4=>2\n" +
+			".s2-X->:s3=>1\n" +
+			".s2-Y->:s4=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+		expecting =
+			".s0-C->.s1\n" +
+            ".s1-B->:s2=>1\n" +
+            ".s1-X..Y->:s3=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testimmediateTailRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : A a | A B;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-A->:s3=>1\n" +
+			".s1-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test
+    public void testAStar_immediateTailRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : A a | ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
+		int[] unreachableAlts = null; // without
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testNoStartRule() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A a | X;"); // single rule 'a' refers to itself; no start rule
+
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		Message msg = equeue.warnings.get(0);
+		assertTrue("expecting no start rules; found "+msg.getClass().getName(),
+				   msg instanceof GrammarSemanticsMessage);
+	}
+
+	@Test
+    public void testAStar_immediateTailRecursion2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : A a | A ;");
+		String expecting =
+			".s0-A->.s1\n" +
+            ".s1-A->:s2=>1\n" +
+            ".s1-EOF->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testimmediateLeftRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : a A | B;");
+		Set<Rule> leftRecursive = g.getLeftRecursiveRules();
+		Set<String> expectedRules = new HashSet<String>() {{add("a");}};
+		assertEquals(expectedRules, ruleNames(leftRecursive));
+	}
+
+	@Test public void testIndirectLeftRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : b | A ;\n" +
+			"b : c ;\n" +
+			"c : a | C ;\n");
+		Set<Rule> leftRecursive = g.getLeftRecursiveRules();
+		Set<String> expectedRules = new HashSet<String>() {{add("a"); add("b"); add("c");}};
+		assertEquals(expectedRules, ruleNames(leftRecursive));
+	}
+
+	@Test public void testLeftRecursionInMultipleCycles() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+				"s : a x ;\n" +
+				"a : b | A ;\n" +
+				"b : c ;\n" +
+				"c : a | C ;\n" +
+				"x : y | X ;\n" +
+				"y : x ;\n");
+		Set<Rule> leftRecursive = g.getLeftRecursiveRules();
+		Set<String> expectedRules =
+			new HashSet<String>() {{add("a"); add("b"); add("c"); add("x"); add("y");}};
+		assertEquals(expectedRules, ruleNames(leftRecursive));
+	}
+
+	@Test public void testCycleInsideRuleDoesNotForceInfiniteRecursion() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a ;\n" +
+			"a : (A|)+ B;\n");
+		// before I added a visitedStates thing, it was possible to loop
+		// forever inside of a rule if there was an epsilon loop.
+		Set<Rule> leftRecursive = g.getLeftRecursiveRules();
+		Set<Rule> expectedRules = new HashSet<Rule>();
+		assertEquals(expectedRules, leftRecursive);
+	}
+
+	// L O O P S
+
+	@Test public void testAStar() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A )* ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testAorBorCStar() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A | B | C )* ;");
+		String expecting =
+			".s0-A..C->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testAPlus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A )+ ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback decision
+	}
+
+	@Test public void testAPlusNonGreedyWhenDeterministic() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (options {greedy=false;}:A)+ ;\n");
+		// should look the same as A+ since no ambiguity
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testAPlusNonGreedyWhenNonDeterministic() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (options {greedy=false;}:A)+ A+ ;\n");
+		// should look the same as A+ since no ambiguity
+		String expecting =
+			".s0-A->:s1=>2\n"; // always chooses to exit
+		int[] unreachableAlts = new int[] {1};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "A";
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testAPlusGreedyWhenNonDeterministic() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (options {greedy=true;}:A)+ A+ ;\n");
+		// should look the same as A+ since no ambiguity
+		String expecting =
+			".s0-A->:s1=>1\n"; // always chooses to enter loop upon A
+		// turns off 1 of warnings. A can never exit loop now
+		int[] unreachableAlts = new int[] {2};
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testAorBorCPlus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A | B | C )+ ;");
+		String expecting =
+			".s0-A..C->:s1=>1\n" +
+			".s0-EOF->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testAOptional() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A )? B ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback decision
+	}
+
+	@Test public void testAorBorCOptional() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ( A | B | C )? Z ;");
+		String expecting =
+			".s0-A..C->:s1=>1\n" +
+			".s0-Z->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback decision
+	}
+
+	// A R B I T R A R Y  L O O K A H E A D
+
+	@Test
+    public void testAStarBOrAStarC() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A)* B | (A)* C;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback
+		expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-C->:s2=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback
+		expecting =
+			".s0-A->.s1\n" +
+            ".s0-B->:s2=>1\n" +
+            ".s0-C->:s3=>2\n" +
+            ".s1-A->.s1\n" +
+            ".s1-B->:s2=>1\n" +
+            ".s1-C->:s3=>2\n";
+		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule block
+	}
+
+	@Test
+    public void testAStarBOrAPlusC() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A)* B | (A)+ C;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback
+		expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-C->:s2=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback
+		expecting =
+			".s0-A->.s1\n" +
+            ".s0-B->:s2=>1\n" +
+            ".s1-A->.s1\n" +
+            ".s1-B->:s2=>1\n" +
+            ".s1-C->:s3=>2\n";
+		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule block
+	}
+
+
+    @Test
+    public void testAOrBPlusOrAPlus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A|B)* X | (A)+ Y;");
+		String expecting =
+			".s0-A..B->:s1=>1\n" +
+			".s0-X->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0); // loopback (A|B)*
+		expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-Y->:s2=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, 0); // loopback (A)+
+		expecting =
+			".s0-A->.s1\n" +
+            ".s0-B..X->:s2=>1\n" +
+            ".s1-A->.s1\n" +
+            ".s1-B..X->:s2=>1\n" +
+            ".s1-Y->:s3=>2\n";
+		checkDecision(g, 3, expecting, null, null, null, null, 0); // rule
+	}
+
+	@Test public void testLoopbackAndExit() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A|B)+ B;");
+		String expecting =
+			".s0-A->:s3=>1\n" +
+			".s0-B->.s1\n" +
+			".s1-A..B->:s3=>1\n" +
+			".s1-EOF->:s2=>2\n"; // sees A|B as a set
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testOptionalAltAndBypass() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A|B)? B;");
+		String expecting =
+			".s0-A->:s2=>1\n" +
+			".s0-B->.s1\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-EOF->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	// R E S O L V E  S Y N  C O N F L I C T S
+
+	@Test public void testResolveLL1ByChoosingFirst() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A C | A C;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-C->:s2=>1\n";
+		int[] unreachableAlts = new int[] {2};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "A C";
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testResolveLL2ByChoosingFirst() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A B | A B;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n";
+		int[] unreachableAlts = new int[] {2};
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "A B";
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testResolveLL2MixAlt() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A B | A C | A B | Z;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-Z->:s4=>4\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n";
+		int[] unreachableAlts = new int[] {3};
+		int[] nonDetAlts = new int[] {1,3};
+		String ambigInput = "A B";
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testIndirectIFThenElseStyleAmbig() throws Exception {
+		// the (c)+ loopback is ambig because it could match "CASE"
+		// by entering the loop or by falling out and ignoring (s)*
+		// back falling back into (cg)* loop which stats over and
+		// calls cg again.  Either choice allows it to get back to
+		// the same node.  The software catches it as:
+		// "avoid infinite closure computation emanating from alt 1
+		// of ():27|2|[8 $]" where state 27 is the first alt of (c)+
+		// and 8 is the first alt of the (cg)* loop.
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"s : stat ;\n" +
+			"stat : LCURLY ( cg )* RCURLY | E SEMI  ;\n" +
+			"cg : (c)+ (stat)* ;\n" +
+			"c : CASE E ;\n");
+		String expecting =
+			".s0-CASE->:s2=>1\n" +
+			".s0-E..RCURLY->:s1=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "CASE";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 3, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	// S E T S
+
+	@Test public void testComplement() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ~(A | B | C) | C {;} ;\n" +
+			"b : X Y Z ;");
+		String expecting =
+			".s0-C->:s2=>2\n" +
+			".s0-X..Z->:s1=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testComplementToken() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ~C | C {;} ;\n" +
+			"b : X Y Z ;");
+		String expecting =
+			".s0-C->:s2=>2\n" +
+			".s0-X..Z->:s1=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testComplementChar() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : ~'x' | 'x' {;} ;\n");
+		String expecting =
+			".s0-'x'->:s2=>2\n" +
+			".s0-{'\\u0000'..'w', 'y'..'\\uFFFF'}->:s1=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testComplementCharSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : ~(' '|'\t'|'x'|'y') | 'x';\n" + // collapse into single set
+			"B : 'y' ;");
+		String expecting =
+			".s0-'y'->:s2=>2\n" +
+			".s0-{'\\u0000'..'\\b', '\\n'..'\\u001F', '!'..'x', 'z'..'\\uFFFF'}->:s1=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testNoSetCollapseWithActions() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A | B {foo}) | C;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testRuleAltsSetCollapse() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A | B | C ;"
+		);
+		String expecting = // still looks like block
+			"(grammar t (rule a ARG RET scope (BLOCK (ALT A <end-of-alt>) (ALT B <end-of-alt>) (ALT C <end-of-alt>) <end-of-block>) <end-of-rule>))";
+		assertEquals(expecting, g.getGrammarTree().toStringTree());
+	}
+
+	@Test public void testTokensRuleAltsDoNotCollapse() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';" +
+			"B : 'b';\n"
+		);
+		String expecting =
+			".s0-'a'->:s1=>1\n" +
+			".s0-'b'->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testMultipleSequenceCollision() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"a : (A{;}|B)\n" +
+			"  | (A{;}|B)\n" +
+			"  | A\n" +
+			"  ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>1\n"; // not optimized because states are nondet
+		int[] unreachableAlts = new int[] {2,3};
+		int[] nonDetAlts = new int[] {1,2,3};
+		String ambigInput = "A";
+		int[] danglingAlts = null;
+		int numWarnings = 3;
+		checkDecision(g, 3, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+		/* There are 2 nondet errors, but the checkDecision only checks first one :(
+		The "B" conflicting input is not checked except by virtue of the
+		result DFA.
+<string>:2:5: Decision can match input such as "A" using multiple alternatives:
+alt 1 via NFA path 7,2,3
+alt 2 via NFA path 14,9,10
+alt 3 via NFA path 16,17
+As a result, alternative(s) 2,3 were disabled for that input,
+<string>:2:5: Decision can match input such as "B" using multiple alternatives:
+alt 1 via NFA path 7,8,4,5
+alt 2 via NFA path 14,15,11,12
+As a result, alternative(s) 2 were disabled for that input
+<string>:2:5: The following alternatives are unreachable: 2,3
+*/
+	}
+
+	@Test public void testMultipleAltsSameSequenceCollision() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"a : type ID \n" +
+			"  | type ID\n" +
+			"  | type ID\n" +
+			"  | type ID\n" +
+			"  ;\n" +
+			"\n" +
+			"type : I | F;");
+		// nondeterministic from left edge; no stop state
+		String expecting =
+			".s0-F..I->.s1\n" +
+			".s1-ID->:s2=>1\n";
+		int[] unreachableAlts = new int[] {2,3,4};
+		int[] nonDetAlts = new int[] {1,2,3,4};
+		String ambigInput = "F..I ID";
+		int[] danglingAlts = null;
+		int numWarnings = 2;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testFollowReturnsToLoopReenteringSameRule() throws Exception {
+		// D07 can be matched in the (...)? or fall out of esc back into (..)*
+		// loop in sl.  Note that D07 is matched by ~(R|SLASH).  No good
+		// way to write that grammar I guess
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"sl : L ( esc | ~(R|SLASH) )* R ;\n" +
+			"\n" +
+			"esc : SLASH ( N | D03 (D07)? ) ;");
+		String expecting =
+			".s0-D03..N->:s2=>2\n" +
+			".s0-R->:s3=>3\n" +
+			".s0-SLASH->:s1=>1\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "D07";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testTokenCallsAnotherOnLeftEdge() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"F   :   I '.'\n" +
+			"    ;\n" +
+			"I   :   '0'\n" +
+			"    ;\n"
+		);
+		String expecting =
+			".s0-'0'->.s1\n" +
+			".s1-'.'->:s3=>1\n" +
+			".s1-<EOT>->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+
+	@Test public void testSelfRecursionAmbigAlts() throws Exception {
+		// ambiguous grammar for "L ID R" (alts 1,2 of a)
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : a;\n" +
+			"a   :   L ID R\n" +
+			"    |   L a R\n" + // disabled for L ID R
+			"    |   b\n" +
+			"    ;\n" +
+			"\n" +
+			"b   :   ID\n" +
+			"    ;\n");
+		String expecting =
+			".s0-ID->:s5=>3\n" +
+			".s0-L->.s1\n" +
+			".s1-ID->.s2\n" +
+			".s1-L->:s4=>2\n" +
+			".s2-R->:s3=>1\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "L ID R";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testIndirectRecursionAmbigAlts() throws Exception {
+		// ambiguous grammar for "L ID R" (alts 1,2 of a)
+		// This was derived from the java grammar 12/4/2004 when it
+		// was not handling a unaryExpression properly.  I traced it
+		// to incorrect closure-busy condition.  It thought that the trace
+		// of a->b->a->b again for "L ID" was an infinite loop, but actually
+		// the repeat call to b only happens *after* an L has been matched.
+		// I added a check to see what the initial stack looks like and it
+		// seems to work now.
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s   :   a ;\n" +
+			"a   :   L ID R\n" +
+			"    |   b\n" +
+			"    ;\n" +
+			"\n" +
+			"b   :   ID\n" +
+			"    |   L a R\n" +
+			"    ;");
+		String expecting =
+			".s0-ID->:s4=>2\n" +
+			".s0-L->.s1\n" +
+			".s1-ID->.s2\n" +
+			".s1-L->:s4=>2\n" +
+			".s2-R->:s3=>1\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "L ID R";
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testTailRecursionInvokedFromArbitraryLookaheadDecision() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b X\n" +
+			"  | b Y\n" +
+			"  ;\n" +
+			"\n" +
+			"b : A\n" +
+			"  | A b\n" +
+			"  ;\n");
+		List<Integer> altsWithRecursion = Arrays.asList(1, 2);
+		assertNonLLStar(g, altsWithRecursion);
+	}
+
+	@Test public void testWildcardStarK1AndNonGreedyByDefaultInParser() throws Exception {
+		// no error because .* assumes it should finish when it sees R
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"s : A block EOF ;\n" +
+			"block : L .* R ;");
+		String expecting =
+			".s0-A..L->:s2=>1\n" +
+			".s0-R->:s1=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testWildcardPlusK1AndNonGreedyByDefaultInParser() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n" +
+			"s : A block EOF ;\n" +
+			"block : L .+ R ;");
+		String expecting =
+			".s0-A..L->:s2=>1\n" +
+			".s0-R->:s1=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+	}
+
+	@Test public void testGatedSynPred() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : (X)=> X\n" +
+			"    | Y\n" +
+			"    ;\n");
+		String expecting =
+			".s0-X&&{synpred1_t}?->:s1=>1\n" + // does not hoist; it gates edges
+			".s0-Y->:s2=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+
+		Set<String> preds = g.synPredNamesUsedInDFA;
+		Set<String> expectedPreds = new HashSet<String>() {{add("synpred1_t");}};
+		assertEquals("predicate names not recorded properly in grammar", expectedPreds, preds);
+	}
+
+	@Test public void testHoistedGatedSynPred() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : (X)=> X\n" +
+			"    | X\n" +
+			"    ;\n");
+		String expecting =
+			".s0-X->.s1\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" + // hoists into decision
+			".s1-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+
+		Set<String> preds = g.synPredNamesUsedInDFA;
+		Set<String> expectedPreds = new HashSet<String>() {{add("synpred1_t");}};
+		assertEquals("predicate names not recorded properly in grammar", expectedPreds, preds);
+	}
+
+	@Test public void testHoistedGatedSynPred2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : (X)=> (X|Y)\n" +
+			"    | X\n" +
+			"    ;\n");
+		String expecting =
+			".s0-X->.s1\n" +
+			".s0-Y&&{synpred1_t}?->:s2=>1\n" +
+			".s1-{synpred1_t}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+
+		Set<String> preds = g.synPredNamesUsedInDFA;
+		Set<String> expectedPreds = new HashSet<String>() {{add("synpred1_t");}};
+		assertEquals("predicate names not recorded properly in grammar", expectedPreds, preds);
+	}
+
+	@Test public void testGreedyGetsNoErrorForAmbig() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : IF s (options {greedy=true;} : E s)? | B;\n" +
+			"slist: s SEMI ;");
+		String expecting =
+			".s0-E->:s1=>1\n" +
+			".s0-SEMI->:s2=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, danglingAlts, numWarnings);
+		expecting =
+			".s0-B->:s2=>2\n" +
+			".s0-IF->:s1=>1\n";
+		checkDecision(g, 2, expecting, null, null, null, null, 0);
+	}
+
+	@Test public void testGreedyNonLLStarStillGetsError() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"x   : ( options {greedy=true;}\n" +
+			"	   : y X\n" +
+			"      | y Y\n" +
+			"	   )\n" +
+			"    ;\n" +
+			"y   : L y R\n" +
+			"    | B\n" +
+			"    ;");
+		List<Integer> altsWithRecursion = Arrays.asList(1, 2);
+		assertNonLLStar(g, altsWithRecursion);
+	}
+
+	@Test public void testGreedyRecOverflowStillGetsError() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"s : (options {greedy=true;} : a Y | A A A A A X) ;\n" + // force recursion past m=4
+			"a : A a | Q;");
+		List<String> expectedTargetRules = Arrays.asList("a");
+		int expectedAlt = 1;
+		assertRecursionOverflow(g, expectedTargetRules, expectedAlt);
+	}
+
+
+	// Check state table creation
+
+	@Test public void testCyclicTableCreation() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A+ X | A+ Y ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+	}
+
+
+	// S U P P O R T
+
+	public void _template() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A | B;");
+		String expecting =
+			"\n";
+		checkDecision(g, 1, expecting, null, null, null, null, 0);
+	}
+
+	protected void assertNonLLStar(Grammar g, List<Integer> expectedBadAlts) {
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// mimic actions of org.antlr.Tool first time for grammar g
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
+		}
+		NonRegularDecisionMessage msg = getNonRegularDecisionMessage(equeue.errors);
+		assertTrue("expected fatal non-LL(*) msg", msg!=null);
+		List<Integer> alts = new ArrayList<Integer>(msg.altsWithRecursion);
+		Collections.sort(alts);
+		assertEquals(expectedBadAlts,alts);
+	}
+
+	protected void assertRecursionOverflow(Grammar g,
+										   List<String> expectedTargetRules,
+										   int expectedAlt) {
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// mimic actions of org.antlr.Tool first time for grammar g
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
+		}
+		RecursionOverflowMessage msg = getRecursionOverflowMessage(equeue.errors);
+		assertTrue("missing expected recursion overflow msg"+msg, msg!=null);
+		assertEquals("target rules mismatch",
+					 expectedTargetRules.toString(), msg.targetRules.toString());
+		assertEquals("mismatched alt", expectedAlt, msg.alt);
+	}
+
+    @Test
+    public void testWildcardInTreeGrammar() throws Exception {
+        Grammar g = new Grammar(
+            "tree grammar t;\n" +
+            "a : A B | A . ;\n");
+        String expecting =
+            ".s0-A->.s1\n" +
+            ".s1-A->:s3=>2\n" +
+            ".s1-B->:s2=>1\n";
+        int[] unreachableAlts = null;
+        int[] nonDetAlts = new int[] {1,2};
+        String ambigInput = null;
+        int[] danglingAlts = null;
+        int numWarnings = 1;
+        checkDecision(g, 1, expecting, unreachableAlts,
+                      nonDetAlts, ambigInput, danglingAlts, numWarnings);
+    }
+
+    @Test
+    public void testWildcardInTreeGrammar2() throws Exception {
+        Grammar g = new Grammar(
+            "tree grammar t;\n" +
+            "a : ^(A X Y) | ^(A . .) ;\n");
+        String expecting =
+            ".s0-A->.s1\n" +
+            ".s1-DOWN->.s2\n" +
+            ".s2-X->.s3\n" +
+            ".s2-{A, Y}->:s6=>2\n" +
+            ".s3-Y->.s4\n" +
+            ".s3-{DOWN, A..X}->:s6=>2\n" +
+            ".s4-DOWN->:s6=>2\n" +
+            ".s4-UP->:s5=>1\n";
+        int[] unreachableAlts = null;
+        int[] nonDetAlts = new int[] {1,2};
+        String ambigInput = null;
+        int[] danglingAlts = null;
+        int numWarnings = 1;
+        checkDecision(g, 1, expecting, unreachableAlts,
+                      nonDetAlts, ambigInput, danglingAlts, numWarnings);
+    }
+
+    protected void checkDecision(Grammar g,
+								 int decision,
+								 String expecting,
+								 int[] expectingUnreachableAlts,
+								 int[] expectingNonDetAlts,
+								 String expectingAmbigInput,
+								 int[] expectingDanglingAlts,
+								 int expectingNumWarnings)
+		throws Exception
+	{
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		// mimic actions of org.antlr.Tool first time for grammar g
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
+		}
+		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
+		g.setCodeGenerator(generator);
+
+		if ( equeue.size()!=expectingNumWarnings ) {
+			System.err.println("Warnings issued: "+equeue);
+		}
+
+		assertEquals("unexpected number of expected problems",
+				   expectingNumWarnings, equeue.size());
+
+		DFA dfa = g.getLookaheadDFA(decision);
+		assertNotNull("no DFA for decision "+decision, dfa);
+		FASerializer serializer = new FASerializer(g);
+		String result = serializer.serialize(dfa.startState);
+
+		List<Integer> unreachableAlts = dfa.getUnreachableAlts();
+
+		// make sure unreachable alts are as expected
+		if ( expectingUnreachableAlts!=null ) {
+			BitSet s = new BitSet();
+			s.addAll(expectingUnreachableAlts);
+			BitSet s2 = new BitSet();
+			s2.addAll(unreachableAlts);
+			assertEquals("unreachable alts mismatch", s, s2);
+		}
+		else {
+			assertEquals("number of unreachable alts", 0,
+						 unreachableAlts!=null?unreachableAlts.size():0);
+		}
+
+		// check conflicting input
+		if ( expectingAmbigInput!=null ) {
+			// first, find nondet message
+			Message msg = equeue.warnings.get(0);
+			assertTrue("expecting nondeterminism; found "+msg.getClass().getName(),
+					    msg instanceof GrammarNonDeterminismMessage);
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			List<Label> labels =
+				nondetMsg.probe.getSampleNonDeterministicInputSequence(nondetMsg.problemState);
+			String input = nondetMsg.probe.getInputSequenceDisplay(labels);
+			assertEquals(expectingAmbigInput, input);
+		}
+
+		// check nondet alts
+		if ( expectingNonDetAlts!=null ) {
+			RecursionOverflowMessage recMsg = null;
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			List<Integer> nonDetAlts = null;
+			if ( nondetMsg!=null ) {
+				nonDetAlts =
+					nondetMsg.probe.getNonDeterministicAltsForState(nondetMsg.problemState);
+			}
+			else {
+				recMsg = getRecursionOverflowMessage(equeue.warnings);
+				if ( recMsg!=null ) {
+					//nonDetAlts = new ArrayList(recMsg.alts);
+				}
+			}
+			// compare nonDetAlts with expectingNonDetAlts
+			BitSet s = new BitSet();
+			s.addAll(expectingNonDetAlts);
+			BitSet s2 = new BitSet();
+			s2.addAll(nonDetAlts);
+			assertEquals("nondet alts mismatch", s, s2);
+			assertTrue("found no nondet alts; expecting: "+
+					    str(expectingNonDetAlts),
+					    nondetMsg!=null||recMsg!=null);
+		}
+		else {
+			// not expecting any nondet alts, make sure there are none
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			assertNull("found nondet alts, but expecting none", nondetMsg);
+		}
+
+		assertEquals(expecting, result);
+	}
+
+	protected GrammarNonDeterminismMessage getNonDeterminismMessage(List<Message> warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = warnings.get(i);
+			if ( m instanceof GrammarNonDeterminismMessage ) {
+				return (GrammarNonDeterminismMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected NonRegularDecisionMessage getNonRegularDecisionMessage(List<Message> errors) {
+		for (int i = 0; i < errors.size(); i++) {
+			Message m = errors.get(i);
+			if ( m instanceof NonRegularDecisionMessage ) {
+				return (NonRegularDecisionMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected RecursionOverflowMessage getRecursionOverflowMessage(List<Message> warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = warnings.get(i);
+			if ( m instanceof RecursionOverflowMessage ) {
+				return (RecursionOverflowMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected LeftRecursionCyclesMessage getLeftRecursionCyclesMessage(List<Message> warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = warnings.get(i);
+			if ( m instanceof LeftRecursionCyclesMessage ) {
+				return (LeftRecursionCyclesMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected GrammarDanglingStateMessage getDanglingStateMessage(List<Message> warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = warnings.get(i);
+			if ( m instanceof GrammarDanglingStateMessage ) {
+				return (GrammarDanglingStateMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected String str(int[] elements) {
+		StringBuilder buf = new StringBuilder();
+		for (int i = 0; i < elements.length; i++) {
+			if ( i>0 ) {
+				buf.append(", ");
+			}
+			int element = elements[i];
+			buf.append(element);
+		}
+		return buf.toString();
+	}
+
+	protected Set<String> ruleNames(Collection<? extends Rule> rules) {
+		Set<String> x = new HashSet<String>();
+		for (Rule r : rules) {
+			x.add(r.name);
+		}
+		return x;
+	}
+
+	protected Set<String> ruleNames2(Collection<? extends Collection<? extends Rule>> rules) {
+		Set<String> x = new HashSet<String>();
+		for (Collection<? extends Rule> s : rules) {
+			x.addAll(ruleNames(s));
+		}
+		return x;
+	}
+}
diff --git a/tool/src/test/java/org/antlr/test/TestDFAMatching.java b/tool/src/test/java/org/antlr/test/TestDFAMatching.java
new file mode 100644
index 0000000..6b8dc7a
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestDFAMatching.java
@@ -0,0 +1,104 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.NFA;
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.tool.Grammar;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestDFAMatching extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestDFAMatching() {
+    }
+
+    @Test public void testSimpleAltCharTest() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : {;}'a' | 'b' | 'c';");
+		g.buildNFA();
+		g.createLookaheadDFAs(false);
+        DFA dfa = g.getLookaheadDFA(1);
+        checkPrediction(dfa,"a",1);
+        checkPrediction(dfa,"b",2);
+        checkPrediction(dfa,"c",3);
+        checkPrediction(dfa,"d", NFA.INVALID_ALT_NUMBER);
+    }
+
+    @Test public void testSets() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : {;}'a'..'z' | ';' | '0'..'9' ;");
+		g.buildNFA();
+        g.createLookaheadDFAs(false);
+        DFA dfa = g.getLookaheadDFA(1);
+        checkPrediction(dfa,"a",1);
+        checkPrediction(dfa,"q",1);
+        checkPrediction(dfa,"z",1);
+        checkPrediction(dfa,";",2);
+        checkPrediction(dfa,"9",3);
+    }
+
+    @Test public void testFiniteCommonLeftPrefixes() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : 'a' 'b' | 'a' 'c' | 'd' 'e' ;");
+		g.buildNFA();
+        g.createLookaheadDFAs(false);
+        DFA dfa = g.getLookaheadDFA(1);
+        checkPrediction(dfa,"ab",1);
+        checkPrediction(dfa,"ac",2);
+        checkPrediction(dfa,"de",3);
+        checkPrediction(dfa,"q", NFA.INVALID_ALT_NUMBER);
+    }
+
+    @Test public void testSimpleLoops() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : (DIGIT)+ '.' DIGIT | (DIGIT)+ ;\n" +
+                "fragment DIGIT : '0'..'9' ;\n");
+		g.buildNFA();
+        g.createLookaheadDFAs(false);
+        DFA dfa = g.getLookaheadDFA(3);
+        checkPrediction(dfa,"32",2);
+        checkPrediction(dfa,"999.2",1);
+        checkPrediction(dfa,".2", NFA.INVALID_ALT_NUMBER);
+    }
+
+    protected void checkPrediction(DFA dfa, String input, int expected)
+        throws Exception
+    {
+        ANTLRStringStream stream = new ANTLRStringStream(input);
+        assertEquals(dfa.predict(stream), expected);
+    }
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestFastQueue.java b/tool/src/test/java/org/antlr/test/TestFastQueue.java
new file mode 100644
index 0000000..fce3019
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestFastQueue.java
@@ -0,0 +1,132 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.runtime.misc.FastQueue;
+import org.junit.Test;
+
+import java.util.NoSuchElementException;
+
+import static org.junit.Assert.*;
+
+public class TestFastQueue {
+    @Test public void testQueueNoRemove() throws Exception {
+        FastQueue<String> q = new FastQueue<String>();
+        q.add("a");
+        q.add("b");
+        q.add("c");
+        q.add("d");
+        q.add("e");
+        String expecting = "a b c d e";
+        String found = q.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testQueueThenRemoveAll() throws Exception {
+        FastQueue<String> q = new FastQueue<String>();
+        q.add("a");
+        q.add("b");
+        q.add("c");
+        q.add("d");
+        q.add("e");
+        StringBuilder buf = new StringBuilder();
+        while ( q.size()>0 ) {
+            String o = q.remove();
+            buf.append(o);
+            if ( q.size()>0 ) buf.append(" ");
+        }
+        assertEquals("queue should be empty", 0, q.size());
+        String expecting = "a b c d e";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testQueueThenRemoveOneByOne() throws Exception {
+        StringBuilder buf = new StringBuilder();
+        FastQueue<String> q = new FastQueue<String>();
+        q.add("a");
+        buf.append(q.remove());
+        q.add("b");
+        buf.append(q.remove());
+        q.add("c");
+        buf.append(q.remove());
+        q.add("d");
+        buf.append(q.remove());
+        q.add("e");
+        buf.append(q.remove());
+        assertEquals("queue should be empty", 0, q.size());
+        String expecting = "abcde";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    // E r r o r s
+
+    @Test public void testGetFromEmptyQueue() throws Exception {
+        FastQueue<String> q = new FastQueue<String>();
+        String msg = null;
+        try { q.remove(); }
+        catch (NoSuchElementException nsee) {
+            msg = nsee.getMessage();
+        }
+        String expecting = "queue index 0 > last index -1";
+        String found = msg;
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testGetFromEmptyQueueAfterSomeAdds() throws Exception {
+        FastQueue<String> q = new FastQueue<String>();
+        q.add("a");
+        q.add("b");
+        q.remove();
+        q.remove();
+        String msg = null;
+        try { q.remove(); }
+        catch (NoSuchElementException nsee) {
+            msg = nsee.getMessage();
+        }
+        String expecting = "queue index 0 > last index -1";
+        String found = msg;
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testGetFromEmptyQueueAfterClear() throws Exception {
+        FastQueue<String> q = new FastQueue<String>();
+        q.add("a");
+        q.add("b");
+        q.clear();
+        String msg = null;
+        try { q.remove(); }
+        catch (NoSuchElementException nsee) {
+            msg = nsee.getMessage();
+        }
+        String expecting = "queue index 0 > last index -1";
+        String found = msg;
+        assertEquals(expecting, found);
+    }
+}
diff --git a/tool/src/test/java/org/antlr/test/TestHeteroAST.java b/tool/src/test/java/org/antlr/test/TestHeteroAST.java
new file mode 100644
index 0000000..a9f694f
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestHeteroAST.java
@@ -0,0 +1,546 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/** Test hetero trees in parsers and tree parsers */
+public class TestHeteroAST extends BaseTest {
+	protected boolean debug = false;
+
+	// PARSERS -- AUTO AST
+
+    @Test public void testToken() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "@members {static class V extends CommonTree {\n" +
+            "  public V(Token t) { token=t;}\n" +
+            "  public String toString() { return token.getText()+\"<V>\";}\n" +
+            "}\n" +
+            "}\n"+
+            "a : ID<V> ;\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                    "a", "a", debug);
+        assertEquals("a<V>\n", found);
+    }
+
+	@Test public void testTokenCommonTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID<CommonTree> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+					"a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+    @Test public void testTokenWithQualifiedType() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "@members {static class V extends CommonTree {\n" +
+            "  public V(Token t) { token=t;}\n" +
+            "  public String toString() { return token.getText()+\"<V>\";}\n" +
+            "}\n" +
+            "}\n"+
+            "a : ID<TParser.V> ;\n"+ // TParser.V is qualified name
+            "ID : 'a'..'z'+ ;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                    "a", "a", debug);
+        assertEquals("a<V>\n", found);
+    }
+
+	@Test public void testNamedType() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID<node=V> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+					"a", "a", debug);
+		assertEquals("a<V>\n", found);
+	}
+
+
+	@Test public void testTokenWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : x=ID<V> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a<V>\n", found);
+	}
+
+	@Test public void testTokenWithListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : x+=ID<V> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a<V>\n", found);
+	}
+
+	@Test public void testTokenRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID<V>^ ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a<V>\n", found);
+	}
+
+	@Test public void testTokenRootWithListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : x+=ID<V>^ ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a<V>\n", found);
+	}
+
+	@Test public void testString() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : 'begin'<V> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "begin", debug);
+		assertEquals("begin<V>\n", found);
+	}
+
+	@Test public void testStringRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : 'begin'<V>^ ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "begin", debug);
+		assertEquals("begin<V>\n", found);
+	}
+
+	// PARSERS -- REWRITE AST
+
+	@Test public void testRewriteToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ID<V> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a<V>\n", found);
+	}
+
+	@Test public void testRewriteTokenWithArgs() throws Exception {
+		// arg to ID<V>[42,19,30] means you're constructing node not associated with ID
+		// so must pass in token manually
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {\n" +
+			"static class V extends CommonTree {\n" +
+			"  public int x,y,z;\n"+
+			"  public V(int ttype, int x, int y, int z) { this.x=x; this.y=y; this.z=z; token=new CommonToken(ttype,\"\"); }\n" +
+			"  public V(int ttype, Token t, int x) { token=t; this.x=x;}\n" +
+			"  public String toString() { return (token!=null?token.getText():\"\")+\"<V>;\"+x+y+z;}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ID<V>[42,19,30] ID<V>[$ID,99] ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("<V>;421930 a<V>;9900\n", found);
+	}
+
+	@Test public void testRewriteTokenRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID INT -> ^(ID<V> INT) ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 2", debug);
+		assertEquals("(a<V> 2)\n", found);
+	}
+
+	@Test public void testRewriteString() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : 'begin' -> 'begin'<V> ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "begin", debug);
+		assertEquals("begin<V>\n", found);
+	}
+
+	@Test public void testRewriteStringRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"@members {static class V extends CommonTree {\n" +
+			"  public V(Token t) { token=t;}\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : 'begin' INT -> ^('begin'<V> INT) ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "begin 2", debug);
+		assertEquals("(begin<V> 2)\n", found);
+	}
+
+    @Test public void testRewriteRuleResults() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "tokens {LIST;}\n" +
+            "@members {\n" +
+            "static class V extends CommonTree {\n" +
+            "  public V(Token t) { token=t;}\n" +
+            "  public String toString() { return token.getText()+\"<V>\";}\n" +
+            "}\n" +
+            "static class W extends CommonTree {\n" +
+            "  public W(int tokenType, String txt) { super(new CommonToken(tokenType,txt)); }\n" +
+            "  public W(Token t) { token=t;}\n" +
+            "  public String toString() { return token.getText()+\"<W>\";}\n" +
+            "}\n" +
+            "}\n"+
+            "a : id (',' id)* -> ^(LIST<W>[\"LIST\"] id+);\n" +
+            "id : ID -> ID<V>;\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                    "a", "a,b,c", debug);
+        assertEquals("(LIST<W> a<V> b<V> c<V>)\n", found);
+    }
+
+    @Test public void testCopySemanticsWithHetero() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "@members {\n" +
+            "static class V extends CommonTree {\n" +
+            "  public V(Token t) { token=t;}\n" +  // for 'int'<V>
+            "  public V(V node) { super(node); }\n\n" + // for dupNode
+            "  public Tree dupNode() { return new V(this); }\n" + // for dup'ing type
+            "  public String toString() { return token.getText()+\"<V>\";}\n" +
+            "}\n" +
+            "}\n" +
+            "a : type ID (',' ID)* ';' -> ^(type ID)+;\n" +
+            "type : 'int'<V> ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+        String found = execParser("T.g", grammar, "TParser", "TLexer",
+                    "a", "int a, b, c;", debug);
+        assertEquals("(int<V> a) (int<V> b) (int<V> c)\n", found);
+    }
+
+    // TREE PARSERS -- REWRITE AST
+
+	@Test public void testTreeParserRewriteFlatList() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"@members {\n" +
+			"static class V extends CommonTree {\n" +
+			"  public V(Object t) { super((CommonTree)t); }\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"static class W extends CommonTree {\n" +
+			"  public W(Object t) { super((CommonTree)t); }\n" +
+			"  public String toString() { return token.getText()+\"<W>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID INT -> INT<V> ID<W>\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("34<V> abc<W>\n", found);
+	}
+
+	@Test public void testTreeParserRewriteTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"@members {\n" +
+			"static class V extends CommonTree {\n" +
+			"  public V(Object t) { super((CommonTree)t); }\n" +
+			"  public String toString() { return token.getText()+\"<V>\";}\n" +
+			"}\n" +
+			"static class W extends CommonTree {\n" +
+			"  public W(Object t) { super((CommonTree)t); }\n" +
+			"  public String toString() { return token.getText()+\"<W>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID INT -> ^(INT<V> ID<W>)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(34<V> abc<W>)\n", found);
+	}
+
+	@Test public void testTreeParserRewriteImaginary() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"tokens { ROOT; }\n" +
+			"@members {\n" +
+			"class V extends CommonTree {\n" +
+			"  public V(int tokenType) { super(new CommonToken(tokenType)); }\n" +
+			"  public String toString() { return tokenNames[token.getType()]+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ROOT<V> ID\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("ROOT<V> abc\n", found);
+	}
+
+	@Test public void testTreeParserRewriteImaginaryWithArgs() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"tokens { ROOT; }\n" +
+			"@members {\n" +
+			"class V extends CommonTree {\n" +
+			"  public int x;\n" +
+			"  public V(int tokenType, int x) { super(new CommonToken(tokenType)); this.x=x;}\n" +
+			"  public String toString() { return tokenNames[token.getType()]+\"<V>;\"+x;}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ROOT<V>[42] ID\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("ROOT<V>;42 abc\n", found);
+	}
+
+	@Test public void testTreeParserRewriteImaginaryRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"tokens { ROOT; }\n" +
+			"@members {\n" +
+			"class V extends CommonTree {\n" +
+			"  public V(int tokenType) { super(new CommonToken(tokenType)); }\n" +
+			"  public String toString() { return tokenNames[token.getType()]+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ^(ROOT<V> ID)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("(ROOT<V> abc)\n", found);
+	}
+
+	@Test public void testTreeParserRewriteImaginaryFromReal() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"tokens { ROOT; }\n" +
+			"@members {\n" +
+			"class V extends CommonTree {\n" +
+			"  public V(int tokenType) { super(new CommonToken(tokenType)); }\n" +
+			"  public V(int tokenType, Object tree) { super((CommonTree)tree); token.setType(tokenType); }\n" +
+			"  public String toString() { return tokenNames[token.getType()]+\"<V>@\"+token.getLine();}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID -> ROOT<V>[$ID]\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("ROOT<V>@1\n", found); // at line 1; shows copy of ID's stuff
+	}
+
+	@Test public void testTreeParserAutoHeteroAST() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ';' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"tokens { ROOT; }\n" +
+			"@members {\n" +
+			"class V extends CommonTree {\n" +
+			"  public V(CommonTree t) { super(t); }\n" + // NEEDS SPECIAL CTOR
+			"  public String toString() { return super.toString()+\"<V>\";}\n" +
+			"}\n" +
+			"}\n"+
+			"a : ID<V> ';'<V>\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc;");
+		assertEquals("abc<V> ;<V>\n", found);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestInterpretedLexing.java b/tool/src/test/java/org/antlr/test/TestInterpretedLexing.java
new file mode 100644
index 0000000..6a3ab1e
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestInterpretedLexing.java
@@ -0,0 +1,182 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.CommonTokenStream;
+import org.antlr.runtime.Token;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.Interpreter;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestInterpretedLexing extends BaseTest {
+
+	/*
+	static class Tracer implements ANTLRDebugInterface {
+		Grammar g;
+		public DebugActions(Grammar g) {
+			this.g = g;
+		}
+		public void enterRule(String ruleName) {
+			System.out.println("enterRule("+ruleName+")");
+		}
+
+		public void exitRule(String ruleName) {
+			System.out.println("exitRule("+ruleName+")");
+		}
+
+		public void matchElement(int type) {
+			System.out.println("matchElement("+g.getTokenName(type)+")");
+		}
+
+		public void mismatchedElement(MismatchedTokenException e) {
+			System.out.println(e);
+			e.printStackTrace(System.out);
+		}
+
+		public void mismatchedSet(MismatchedSetException e) {
+			System.out.println(e);
+			e.printStackTrace(System.out);
+		}
+
+		public void noViableAlt(NoViableAltException e) {
+			System.out.println(e);
+			e.printStackTrace(System.out);
+		}
+	}
+    */
+
+    /** Public default constructor used by TestRig */
+    public TestInterpretedLexing() {
+    }
+
+	@Test public void testSimpleAltCharTest() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : 'a' | 'b' | 'c';");
+		final int Atype = g.getTokenType("A");
+        Interpreter engine = new Interpreter(g, new ANTLRStringStream("a"));
+        engine = new Interpreter(g, new ANTLRStringStream("b"));
+		Token result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+        engine = new Interpreter(g, new ANTLRStringStream("c"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+    }
+
+    @Test public void testSingleRuleRef() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : 'a' B 'c' ;\n" +
+                "B : 'b' ;\n");
+		final int Atype = g.getTokenType("A");
+		Interpreter engine = new Interpreter(g, new ANTLRStringStream("abc")); // should ignore the x
+		Token result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+    }
+
+    @Test public void testSimpleLoop() throws Exception {
+        Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "INT : (DIGIT)+ ;\n"+
+				"fragment DIGIT : '0'..'9';\n");
+		final int INTtype = g.getTokenType("INT");
+		Interpreter engine = new Interpreter(g, new ANTLRStringStream("12x")); // should ignore the x
+		Token result = engine.scan("INT");
+		assertEquals(result.getType(), INTtype);
+		engine = new Interpreter(g, new ANTLRStringStream("1234"));
+		result = engine.scan("INT");
+		assertEquals(result.getType(), INTtype);
+    }
+
+    @Test public void testMultAltLoop() throws Exception {
+		Grammar g = new Grammar(
+                "lexer grammar t;\n"+
+                "A : ('0'..'9'|'a'|'b')+ ;\n");
+		final int Atype = g.getTokenType("A");
+		Interpreter engine = new Interpreter(g, new ANTLRStringStream("a"));
+		Token result = engine.scan("A");
+        engine = new Interpreter(g, new ANTLRStringStream("a"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+		engine = new Interpreter(g, new ANTLRStringStream("1234"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+        engine = new Interpreter(g, new ANTLRStringStream("aaa"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+        engine = new Interpreter(g, new ANTLRStringStream("aaaa9"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+        engine = new Interpreter(g, new ANTLRStringStream("b"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+        engine = new Interpreter(g, new ANTLRStringStream("baa"));
+		result = engine.scan("A");
+		assertEquals(result.getType(), Atype);
+    }
+
+	@Test public void testSimpleLoops() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar t;\n"+
+				"A : ('0'..'9')+ '.' ('0'..'9')* | ('0'..'9')+ ;\n");
+		final int Atype = g.getTokenType("A");
+		CharStream input = new ANTLRStringStream("1234.5");
+		Interpreter engine = new Interpreter(g, input);
+		Token result = engine.scan("A");
+		assertEquals(Atype, result.getType());
+	}
+
+	@Test public void testTokensRules() throws Exception {
+		Grammar pg = new Grammar(
+			"parser grammar p;\n"+
+			"a : (INT|FLOAT|WS)+;\n");
+		Grammar g = new Grammar();
+		g.importTokenVocabulary(pg);
+		g.setFileName("<string>");
+		g.setGrammarContent(
+			"lexer grammar t;\n"+
+			"INT : (DIGIT)+ ;\n"+
+			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
+			"fragment DIGIT : '0'..'9';\n" +
+			"WS : (' ')+ {channel=99;};\n");
+		CharStream input = new ANTLRStringStream("123 139.52");
+		Interpreter lexEngine = new Interpreter(g, input);
+
+		CommonTokenStream tokens = new CommonTokenStream(lexEngine);
+        tokens.LT(5); // make sure it grabs all tokens
+		String result = tokens.toString();
+		//System.out.println(result);
+		String expecting = "123 139.52";
+		assertEquals(expecting, result);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestInterpretedParsing.java b/tool/src/test/java/org/antlr/test/TestInterpretedParsing.java
new file mode 100644
index 0000000..1e2e910
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestInterpretedParsing.java
@@ -0,0 +1,184 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.tree.ParseTree;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.Interpreter;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestInterpretedParsing extends BaseTest {
+    /** Public default constructor used by TestRig */
+    public TestInterpretedParsing() {
+    }
+
+    @Test public void testSimpleParse() throws Exception {
+        Grammar pg = new Grammar(
+            "parser grammar p;\n"+
+            "prog : WHILE ID LCURLY (assign)* RCURLY EOF;\n" +
+            "assign : ID ASSIGN expr SEMI ;\n" +
+			"expr : INT | FLOAT | ID ;\n");
+		Grammar g = new Grammar();
+		g.importTokenVocabulary(pg);
+		g.setFileName(Grammar.IGNORE_STRING_IN_GRAMMAR_FILE_NAME +"string");
+		g.setGrammarContent(
+			"lexer grammar t;\n"+
+			"WHILE : 'while';\n"+
+			"LCURLY : '{';\n"+
+			"RCURLY : '}';\n"+
+			"ASSIGN : '=';\n"+
+			"SEMI : ';';\n"+
+			"ID : ('a'..'z')+ ;\n"+
+			"INT : (DIGIT)+ ;\n"+
+			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
+			"fragment DIGIT : '0'..'9';\n" +
+			"WS : (' ')+ ;\n");
+		CharStream input = new ANTLRStringStream("while x { i=1; y=3.42; z=y; }");
+		Interpreter lexEngine = new Interpreter(g, input);
+
+		FilteringTokenStream tokens = new FilteringTokenStream(lexEngine);
+		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
+		//System.out.println("tokens="+tokens.toString());
+		Interpreter parseEngine = new Interpreter(pg, tokens);
+		ParseTree t = parseEngine.parse("prog");
+		String result = t.toStringTree();
+		String expecting =
+			"(<grammar p> (prog while x { (assign i = (expr 1) ;) (assign y = (expr 3.42) ;) (assign z = (expr y) ;) } <EOF>))";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testMismatchedTokenError() throws Exception {
+		Grammar pg = new Grammar(
+			"parser grammar p;\n"+
+			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
+			"assign : ID ASSIGN expr SEMI ;\n" +
+			"expr : INT | FLOAT | ID ;\n");
+		Grammar g = new Grammar();
+		g.setFileName(Grammar.IGNORE_STRING_IN_GRAMMAR_FILE_NAME +"string");
+		g.importTokenVocabulary(pg);
+		g.setGrammarContent(
+			"lexer grammar t;\n"+
+			"WHILE : 'while';\n"+
+			"LCURLY : '{';\n"+
+			"RCURLY : '}';\n"+
+			"ASSIGN : '=';\n"+
+			"SEMI : ';';\n"+
+			"ID : ('a'..'z')+ ;\n"+
+			"INT : (DIGIT)+ ;\n"+
+			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
+			"fragment DIGIT : '0'..'9';\n" +
+			"WS : (' ')+ ;\n");
+		CharStream input = new ANTLRStringStream("while x { i=1 y=3.42; z=y; }");
+		Interpreter lexEngine = new Interpreter(g, input);
+
+		FilteringTokenStream tokens = new FilteringTokenStream(lexEngine);
+		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
+		//System.out.println("tokens="+tokens.toString());
+		Interpreter parseEngine = new Interpreter(pg, tokens);
+		ParseTree t = parseEngine.parse("prog");
+		String result = t.toStringTree();
+		String expecting =
+			"(<grammar p> (prog while x { (assign i = (expr 1) MismatchedTokenException(6!=10))))";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testMismatchedSetError() throws Exception {
+		Grammar pg = new Grammar(
+			"parser grammar p;\n"+
+			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
+			"assign : ID ASSIGN expr SEMI ;\n" +
+			"expr : INT | FLOAT | ID ;\n");
+		Grammar g = new Grammar();
+		g.importTokenVocabulary(pg);
+		g.setFileName("<string>");
+		g.setGrammarContent(
+			"lexer grammar t;\n"+
+			"WHILE : 'while';\n"+
+			"LCURLY : '{';\n"+
+			"RCURLY : '}';\n"+
+			"ASSIGN : '=';\n"+
+			"SEMI : ';';\n"+
+			"ID : ('a'..'z')+ ;\n"+
+			"INT : (DIGIT)+ ;\n"+
+			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
+			"fragment DIGIT : '0'..'9';\n" +
+			"WS : (' ')+ ;\n");
+		CharStream input = new ANTLRStringStream("while x { i=; y=3.42; z=y; }");
+		Interpreter lexEngine = new Interpreter(g, input);
+
+		FilteringTokenStream tokens = new FilteringTokenStream(lexEngine);
+		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
+		//System.out.println("tokens="+tokens.toString());
+		Interpreter parseEngine = new Interpreter(pg, tokens);
+		ParseTree t = parseEngine.parse("prog");
+		String result = t.toStringTree();
+		String expecting =
+			"(<grammar p> (prog while x { (assign i = (expr MismatchedSetException(10!={5,6,7})))))";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testNoViableAltError() throws Exception {
+		Grammar pg = new Grammar(
+			"parser grammar p;\n"+
+			"prog : WHILE ID LCURLY (assign)* RCURLY;\n" +
+			"assign : ID ASSIGN expr SEMI ;\n" +
+			"expr : {;}INT | FLOAT | ID ;\n");
+		Grammar g = new Grammar();
+		g.importTokenVocabulary(pg);
+		g.setFileName("<string>");
+		g.setGrammarContent(
+			"lexer grammar t;\n"+
+			"WHILE : 'while';\n"+
+			"LCURLY : '{';\n"+
+			"RCURLY : '}';\n"+
+			"ASSIGN : '=';\n"+
+			"SEMI : ';';\n"+
+			"ID : ('a'..'z')+ ;\n"+
+			"INT : (DIGIT)+ ;\n"+
+			"FLOAT : (DIGIT)+ '.' (DIGIT)* ;\n"+
+			"fragment DIGIT : '0'..'9';\n" +
+			"WS : (' ')+ ;\n");
+		CharStream input = new ANTLRStringStream("while x { i=; y=3.42; z=y; }");
+		Interpreter lexEngine = new Interpreter(g, input);
+
+		FilteringTokenStream tokens = new FilteringTokenStream(lexEngine);
+		tokens.setTokenTypeChannel(g.getTokenType("WS"), 99);
+		//System.out.println("tokens="+tokens.toString());
+		Interpreter parseEngine = new Interpreter(pg, tokens);
+		ParseTree t = parseEngine.parse("prog");
+		String result = t.toStringTree();
+		String expecting =
+			"(<grammar p> (prog while x { (assign i = (expr NoViableAltException(10@[4:1: expr : ( INT | FLOAT | ID );])))))";
+		assertEquals(expecting, result);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestIntervalSet.java b/tool/src/test/java/org/antlr/test/TestIntervalSet.java
new file mode 100644
index 0000000..0e92a52
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestIntervalSet.java
@@ -0,0 +1,405 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.analysis.Label;
+import org.antlr.misc.IntervalSet;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestIntervalSet extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestIntervalSet() {
+    }
+
+    @Test public void testSingleElement() throws Exception {
+        IntervalSet s = IntervalSet.of(99);
+        String expecting = "99";
+        assertEquals(s.toString(), expecting);
+    }
+
+    @Test public void testIsolatedElements() throws Exception {
+        IntervalSet s = new IntervalSet();
+        s.add(1);
+        s.add('z');
+        s.add('\uFFF0');
+        String expecting = "{1, 122, 65520}";
+        assertEquals(s.toString(), expecting);
+    }
+
+    @Test public void testMixedRangesAndElements() throws Exception {
+        IntervalSet s = new IntervalSet();
+        s.add(1);
+        s.add('a','z');
+        s.add('0','9');
+        String expecting = "{1, 48..57, 97..122}";
+        assertEquals(s.toString(), expecting);
+    }
+
+    @Test public void testSimpleAnd() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(13,15);
+        String expecting = "13..15";
+        String result = (s.and(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testRangeAndIsolatedElement() throws Exception {
+        IntervalSet s = IntervalSet.of('a','z');
+        IntervalSet s2 = IntervalSet.of('d');
+        String expecting = "100";
+        String result = (s.and(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+	@Test public void testEmptyIntersection() throws Exception {
+		IntervalSet s = IntervalSet.of('a','z');
+		IntervalSet s2 = IntervalSet.of('0','9');
+		String expecting = "{}";
+		String result = (s.and(s2)).toString();
+		assertEquals(result, expecting);
+	}
+
+	@Test public void testEmptyIntersectionSingleElements() throws Exception {
+		IntervalSet s = IntervalSet.of('a');
+		IntervalSet s2 = IntervalSet.of('d');
+		String expecting = "{}";
+		String result = (s.and(s2)).toString();
+		assertEquals(result, expecting);
+	}
+
+    @Test public void testNotSingleElement() throws Exception {
+        IntervalSet vocabulary = IntervalSet.of(1,1000);
+        vocabulary.add(2000,3000);
+        IntervalSet s = IntervalSet.of(50,50);
+        String expecting = "{1..49, 51..1000, 2000..3000}";
+        String result = (s.complement(vocabulary)).toString();
+        assertEquals(result, expecting);
+    }
+
+	@Test public void testNotSet() throws Exception {
+		IntervalSet vocabulary = IntervalSet.of(1,1000);
+		IntervalSet s = IntervalSet.of(50,60);
+		s.add(5);
+		s.add(250,300);
+		String expecting = "{1..4, 6..49, 61..249, 301..1000}";
+		String result = (s.complement(vocabulary)).toString();
+		assertEquals(result, expecting);
+	}
+
+	@Test public void testNotEqualSet() throws Exception {
+		IntervalSet vocabulary = IntervalSet.of(1,1000);
+		IntervalSet s = IntervalSet.of(1,1000);
+		String expecting = "{}";
+		String result = (s.complement(vocabulary)).toString();
+		assertEquals(result, expecting);
+	}
+
+	@Test public void testNotSetEdgeElement() throws Exception {
+		IntervalSet vocabulary = IntervalSet.of(1,2);
+		IntervalSet s = IntervalSet.of(1);
+		String expecting = "2";
+		String result = (s.complement(vocabulary)).toString();
+		assertEquals(result, expecting);
+	}
+
+    @Test public void testNotSetFragmentedVocabulary() throws Exception {
+        IntervalSet vocabulary = IntervalSet.of(1,255);
+        vocabulary.add(1000,2000);
+        vocabulary.add(9999);
+        IntervalSet s = IntervalSet.of(50,60);
+        s.add(3);
+        s.add(250,300);
+        s.add(10000); // this is outside range of vocab and should be ignored
+        String expecting = "{1..2, 4..49, 61..249, 1000..2000, 9999}";
+        String result = (s.complement(vocabulary)).toString();
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testSubtractOfCompletelyContainedRange() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(12,15);
+        String expecting = "{10..11, 16..20}";
+        String result = (s.subtract(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testSubtractOfOverlappingRangeFromLeft() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(5,11);
+        String expecting = "12..20";
+        String result = (s.subtract(s2)).toString();
+        assertEquals(result, expecting);
+
+        IntervalSet s3 = IntervalSet.of(5,10);
+        expecting = "11..20";
+        result = (s.subtract(s3)).toString();
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testSubtractOfOverlappingRangeFromRight() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(15,25);
+        String expecting = "10..14";
+        String result = (s.subtract(s2)).toString();
+        assertEquals(result, expecting);
+
+        IntervalSet s3 = IntervalSet.of(20,25);
+        expecting = "10..19";
+        result = (s.subtract(s3)).toString();
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testSubtractOfCompletelyCoveredRange() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(1,25);
+        String expecting = "{}";
+        String result = (s.subtract(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testSubtractOfRangeSpanningMultipleRanges() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        s.add(30,40);
+        s.add(50,60); // s has 3 ranges now: 10..20, 30..40, 50..60
+        IntervalSet s2 = IntervalSet.of(5,55); // covers one and touches 2nd range
+        String expecting = "56..60";
+        String result = (s.subtract(s2)).toString();
+        assertEquals(result, expecting);
+
+        IntervalSet s3 = IntervalSet.of(15,55); // touches both
+        expecting = "{10..14, 56..60}";
+        result = (s.subtract(s3)).toString();
+        assertEquals(result, expecting);
+    }
+
+	/** The following was broken:
+	 	{0..113, 115..65534}-{0..115, 117..65534}=116..65534
+	 */
+	@Test public void testSubtractOfWackyRange() throws Exception {
+		IntervalSet s = IntervalSet.of(0,113);
+		s.add(115,200);
+		IntervalSet s2 = IntervalSet.of(0,115);
+		s2.add(117,200);
+		String expecting = "116";
+		String result = (s.subtract(s2)).toString();
+		assertEquals(result, expecting);
+	}
+
+    @Test public void testSimpleEquals() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(10,20);
+        Boolean expecting = true;
+        Boolean result = s.equals(s2);
+        assertEquals(result, expecting);
+
+        IntervalSet s3 = IntervalSet.of(15,55);
+        expecting = false;
+        result = s.equals(s3);
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testEquals() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        s.add(2);
+        s.add(499,501);
+        IntervalSet s2 = IntervalSet.of(10,20);
+        s2.add(2);
+        s2.add(499,501);
+        Boolean expecting = true;
+        Boolean result = s.equals(s2);
+        assertEquals(result, expecting);
+
+        IntervalSet s3 = IntervalSet.of(10,20);
+        s3.add(2);
+        expecting = false;
+        result = s.equals(s3);
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testSingleElementMinusDisjointSet() throws Exception {
+        IntervalSet s = IntervalSet.of(15,15);
+        IntervalSet s2 = IntervalSet.of(1,5);
+        s2.add(10,20);
+        String expecting = "{}"; // 15 - {1..5, 10..20} = {}
+        String result = s.subtract(s2).toString();
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testMembership() throws Exception {
+        IntervalSet s = IntervalSet.of(15,15);
+        s.add(50,60);
+        assertTrue(!s.member(0));
+        assertTrue(!s.member(20));
+        assertTrue(!s.member(100));
+        assertTrue(s.member(15));
+        assertTrue(s.member(55));
+        assertTrue(s.member(50));
+        assertTrue(s.member(60));
+    }
+
+    // {2,15,18} & 10..20
+    @Test public void testIntersectionWithTwoContainedElements() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(2,2);
+        s2.add(15);
+        s2.add(18);
+        String expecting = "{15, 18}";
+        String result = (s.and(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testIntersectionWithTwoContainedElementsReversed() throws Exception {
+        IntervalSet s = IntervalSet.of(10,20);
+        IntervalSet s2 = IntervalSet.of(2,2);
+        s2.add(15);
+        s2.add(18);
+        String expecting = "{15, 18}";
+        String result = (s2.and(s)).toString();
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testComplement() throws Exception {
+        IntervalSet s = IntervalSet.of(100,100);
+        s.add(101,101);
+        IntervalSet s2 = IntervalSet.of(100,102);
+        String expecting = "102";
+        String result = (s.complement(s2)).toString();
+        assertEquals(result, expecting);
+    }
+
+	@Test public void testComplement2() throws Exception {
+		IntervalSet s = IntervalSet.of(100,101);
+		IntervalSet s2 = IntervalSet.of(100,102);
+		String expecting = "102";
+		String result = (s.complement(s2)).toString();
+		assertEquals(result, expecting);
+	}
+
+	@Test public void testComplement3() throws Exception {
+		IntervalSet s = IntervalSet.of(1,96);
+		s.add(99,Label.MAX_CHAR_VALUE);
+		String expecting = "97..98";
+		String result = (s.complement(1,Label.MAX_CHAR_VALUE)).toString();
+		assertEquals(result, expecting);
+	}
+
+    @Test public void testMergeOfRangesAndSingleValues() throws Exception {
+        // {0..41, 42, 43..65534}
+        IntervalSet s = IntervalSet.of(0,41);
+        s.add(42);
+        s.add(43,65534);
+        String expecting = "0..65534";
+        String result = s.toString();
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testMergeOfRangesAndSingleValuesReverse() throws Exception {
+        IntervalSet s = IntervalSet.of(43,65534);
+        s.add(42);
+        s.add(0,41);
+        String expecting = "0..65534";
+        String result = s.toString();
+        assertEquals(result, expecting);
+    }
+
+    @Test public void testMergeWhereAdditionMergesTwoExistingIntervals() throws Exception {
+        // 42, 10, {0..9, 11..41, 43..65534}
+        IntervalSet s = IntervalSet.of(42);
+        s.add(10);
+        s.add(0,9);
+        s.add(43,65534);
+        s.add(11,41);
+        String expecting = "0..65534";
+        String result = s.toString();
+        assertEquals(result, expecting);
+    }
+
+	/**
+	 * This case is responsible for antlr/antlr4#153.
+	 * https://github.com/antlr/antlr4/issues/153
+	 * <p>
+	 * Resolution back-ported from V4.</p>
+	 */
+	@Test public void testMergeWhereAdditionMergesThreeExistingIntervals() throws Exception {
+		IntervalSet s = new IntervalSet();
+		s.add(0);
+		s.add(3);
+		s.add(5);
+		s.add(0, 7);
+		String expecting = "0..7";
+		String result = s.toString();
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testMergeWithDoubleOverlap() throws Exception {
+		IntervalSet s = IntervalSet.of(1,10);
+		s.add(20,30);
+		s.add(5,25); // overlaps two!
+		String expecting = "1..30";
+		String result = s.toString();
+		assertEquals(result, expecting);
+	}
+
+	@Test public void testSize() throws Exception {
+		IntervalSet s = IntervalSet.of(20,30);
+		s.add(50,55);
+		s.add(5,19);
+		String expecting = "32";
+		String result = String.valueOf(s.size());
+		assertEquals(result, expecting);
+	}
+
+	@Test public void testToList() throws Exception {
+		IntervalSet s = IntervalSet.of(20,25);
+		s.add(50,55);
+		s.add(5,5);
+		String expecting = "[5, 20, 21, 22, 23, 24, 25, 50, 51, 52, 53, 54, 55]";
+		String result = String.valueOf(s.toList());
+		assertEquals(result, expecting);
+	}
+
+	/** The following was broken:
+	    {'\u0000'..'s', 'u'..'\uFFFE'} &amp; {'\u0000'..'q', 's'..'\uFFFE'}=
+	    {'\u0000'..'q', 's'}!!!! broken...
+	 	'q' is 113 ascii
+	 	'u' is 117
+	*/
+	@Test public void testNotRIntersectionNotT() throws Exception {
+		IntervalSet s = IntervalSet.of(0,'s');
+		s.add('u',200);
+		IntervalSet s2 = IntervalSet.of(0,'q');
+		s2.add('s',200);
+		String expecting = "{0..113, 115, 117..200}";
+		String result = (s.and(s2)).toString();
+		assertEquals(result, expecting);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestJavaCodeGeneration.java b/tool/src/test/java/org/antlr/test/TestJavaCodeGeneration.java
new file mode 100644
index 0000000..5a189cd
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestJavaCodeGeneration.java
@@ -0,0 +1,161 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/** General code generation testing; compilation and/or execution.
+ *  These tests are more about avoiding duplicate var definitions
+ *  etc... than testing a particular ANTLR feature.
+ */
+public class TestJavaCodeGeneration extends BaseTest {
+	@Test public void testDupVarDefForPinchedState() {
+		// so->s2 and s0->s3->s1 pinches back to s1
+		// LA3_1, s1 state for DFA 3, was defined twice in similar scope
+		// just wrapped in curlies and it's cool.
+		String grammar =
+			"grammar T;\n" +
+			"a : (| A | B) X Y\n" +
+			"  | (| A | B) X Z\n" +
+			"  ;\n" ;
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, "TParser", null, false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testLabeledNotSetsInLexer() {
+		// d must be an int
+		String grammar =
+			"lexer grammar T;\n" +
+			"A : d=~('x'|'y') e='0'..'9'\n" +
+			"  ; \n" ;
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, null, "T", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testLabeledSetsInLexer() {
+		// d must be an int
+		String grammar =
+			"grammar T;\n" +
+			"a : A ;\n" +
+			"A : d=('x'|'y') {System.out.println((char)$d);}\n" +
+			"  ; \n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", false);
+		assertEquals("x\n", found);
+	}
+
+	@Test public void testLabeledRangeInLexer() {
+		// d must be an int
+		String grammar =
+			"grammar T;\n" +
+			"a : A;\n" +
+			"A : d='a'..'z' {System.out.println((char)$d);} \n" +
+			"  ; \n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", false);
+		assertEquals("x\n", found);
+	}
+
+	@Test public void testLabeledWildcardInLexer() {
+		// d must be an int
+		String grammar =
+			"grammar T;\n" +
+			"a : A;\n" +
+			"A : d=. {System.out.println((char)$d);}\n" +
+			"  ; \n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", false);
+		assertEquals("x\n", found);
+	}
+
+	@Test public void testSynpredWithPlusLoop() {
+		String grammar =
+			"grammar T; \n" +
+			"a : (('x'+)=> 'x'+)?;\n";
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, "TParser", "TLexer", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testDoubleQuoteEscape() {
+		String grammar =
+			"lexer grammar T; \n" +
+			"A : '\\\\\"';\n" +          // this is A : '\\"', which should give "\\\"" at Java level;
+            "B : '\\\"';\n" +            // this is B: '\"', which shodl give "\"" at Java level;
+            "C : '\\'\\'';\n" +          // this is C: '\'\'', which shoudl give "''" at Java level
+            "D : '\\k';\n";              // this is D: '\k', which shoudl give just "k" at Java level;
+
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, null, "T", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testBlankRuleGetsNoException() {
+		String grammar =
+			"grammar T;\n" +
+			"a : sync (ID sync)* ;\n" +
+			"sync : ;\n" +
+			"ID : 'a'..'z'+;\n";
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, "TParser", "TLexer", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	/**
+	 * This is a regression test for antlr/antlr3#20: StackOverflow error when
+	 * compiling grammar with backtracking.
+	 * https://github.com/antlr/antlr3/issues/20
+	 */
+	@Test
+	public void testSemanticPredicateAnalysisStackOverflow() throws Exception {
+		String grammar =
+			"grammar T;\n"
+			+ "\n"
+			+ "options {\n"
+			+ "  backtrack=true;\n"
+			+ "}\n"
+			+ "\n"
+			+ "main : ('x'*)*;\n";
+		boolean success = rawGenerateAndBuildRecognizer("T.g", grammar, "TParser", "TLexer", false);
+		assertTrue(success);
+	}
+}
diff --git a/tool/src/test/java/org/antlr/test/TestLeftRecursion.java b/tool/src/test/java/org/antlr/test/TestLeftRecursion.java
new file mode 100644
index 0000000..bf2f798
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestLeftRecursion.java
@@ -0,0 +1,386 @@
+package org.antlr.test;
+
+import org.junit.Ignore;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/** */
+@Ignore
+public class TestLeftRecursion extends BaseTest {
+	protected boolean debug = false;
+
+	@Test public void testSimple() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"s : a {System.out.println($a.text);} ;\n" +
+			"a : a ID\n" +
+			"  | ID" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "s", "a b c", debug);
+		String expecting = "abc\n";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testSemPred() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"s : a {System.out.println($a.text);} ;\n" +
+			"a : a {true}? ID\n" +
+			"  | ID" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "s", "a b c", debug);
+		String expecting = "abc\n";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testTernaryExpr() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"e : e '*'^ e" +
+			"  | e '+'^ e" +
+			"  | e '?'<assoc=right>^ e ':'! e" +
+			"  | e '='<assoc=right>^ e" +
+			"  | ID" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		String[] tests = {
+			"a",			"a",
+			"a+b",			"(+ a b)",
+			"a*b",			"(* a b)",
+			"a?b:c",		"(? a b c)",
+			"a=b=c",		"(= a (= b c))",
+			"a?b+c:d",		"(? a (+ b c) d)",
+			"a?b=c:d",		"(? a (= b c) d)",
+			"a? b?c:d : e",	"(? a (? b c d) e)",
+			"a?b: c?d:e",	"(? a b (? c d e))",
+		};
+		runTests(grammar, tests, "e");
+	}
+
+	@Test public void testDeclarationsUsingASTOperators() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"declarator\n" +
+			"        : declarator '['^ e ']'!\n" +
+			"        | declarator '['^ ']'!\n" +
+			"        | declarator '('^ ')'!\n" +
+			"        | '*'^ declarator\n" + // binds less tight than suffixes
+			"        | '('! declarator ')'!\n" +
+			"        | ID\n" +
+			"        ;\n" +
+			"e : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		String[] tests = {
+			"a",		"a",
+			"*a",		"(* a)",
+			"**a",		"(* (* a))",
+			"a[3]",		"([ a 3)",
+			"b[]",		"([ b)",
+			"(a)",		"a",
+			"a[]()",	"(( ([ a))",
+			"a[][]",	"([ ([ a))",
+			"*a[]",		"(* ([ a))",
+			"(*a)[]",	"([ (* a))",
+		};
+		runTests(grammar, tests, "declarator");
+	}
+
+	@Test public void testDeclarationsUsingRewriteOperators() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"declarator\n" +
+			"        : declarator '[' e ']' -> ^('[' declarator e)\n" +
+			"        | declarator '[' ']' -> ^('[' declarator)\n" +
+			"        | declarator '(' ')' -> ^('(' declarator)\n" +
+			"        | '*' declarator -> ^('*' declarator) \n" + // binds less tight than suffixes
+			"        | '(' declarator ')' -> declarator\n" +
+			"        | ID -> ID\n" +
+			"        ;\n" +
+			"e : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		String[] tests = {
+			"a",		"a",
+			"*a",		"(* a)",
+			"**a",		"(* (* a))",
+			"a[3]",		"([ a 3)",
+			"b[]",		"([ b)",
+			"(a)",		"a",
+			"a[]()",	"(( ([ a))",
+			"a[][]",	"([ ([ a))",
+			"*a[]",		"(* ([ a))",
+			"(*a)[]",	"([ (* a))",
+		};
+		runTests(grammar, tests, "declarator");
+	}
+
+	@Test public void testExpressionsUsingASTOperators() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"e : e '.'^ ID\n" +
+			"  | e '.'^ 'this'\n" +
+			"  | '-'^ e\n" +
+			"  | e '*'^ e\n" +
+			"  | e ('+'^|'-'^) e\n" +
+			"  | INT\n" +
+			"  | ID\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		String[] tests = {
+			"a",		"a",
+			"1",		"1",
+			"a+1",		"(+ a 1)",
+			"a*1",		"(* a 1)",
+			"a.b",		"(. a b)",
+			"a.this",	"(. a this)",
+			"a-b+c",	"(+ (- a b) c)",
+			"a+b*c",	"(+ a (* b c))",
+			"a.b+1",	"(+ (. a b) 1)",
+			"-a",		"(- a)",
+			"-a+b",		"(+ (- a) b)",
+			"-a.b",		"(- (. a b))",
+		};
+		runTests(grammar, tests, "e");
+	}
+
+	@Test public void testExpressionsUsingRewriteOperators() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"e : e '.' ID 				-> ^('.' e ID)\n" +
+			"  | e '.' 'this' 			-> ^('.' e 'this')\n" +
+			"  | '-' e 					-> ^('-' e)\n" +
+			"  | e '*' b=e 				-> ^('*' e $b)\n" +
+			"  | e (op='+'|op='-') b=e	-> ^($op e $b)\n" +
+			"  | INT 					-> INT\n" +
+			"  | ID 					-> ID\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		String[] tests = {
+			"a",		"a",
+			"1",		"1",
+			"a+1",		"(+ a 1)",
+			"a*1",		"(* a 1)",
+			"a.b",		"(. a b)",
+			"a.this",	"(. a this)",
+			"a+b*c",	"(+ a (* b c))",
+			"a.b+1",	"(+ (. a b) 1)",
+			"-a",		"(- a)",
+			"-a+b",		"(+ (- a) b)",
+			"-a.b",		"(- (. a b))",
+		};
+		runTests(grammar, tests, "e");
+	}
+
+	@Test public void testExpressionAssociativity() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"e\n" +
+			"  : e '.'^ ID\n" +
+			"  | '-'^ e\n" +
+			"  | e '^'<assoc=right>^ e\n" +
+			"  | e '*'^ e\n" +
+			"  | e ('+'^|'-'^) e\n" +
+			"  | e ('='<assoc=right>^ |'+='<assoc=right>^) e\n" +
+			"  | INT\n" +
+			"  | ID\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		String[] tests = {
+			"a",		"a",
+			"1",		"1",
+			"a+1",		"(+ a 1)",
+			"a*1",		"(* a 1)",
+			"a.b",		"(. a b)",
+			"a-b+c",	"(+ (- a b) c)",
+
+			"a+b*c",	"(+ a (* b c))",
+			"a.b+1",	"(+ (. a b) 1)",
+			"-a",		"(- a)",
+			"-a+b",		"(+ (- a) b)",
+			"-a.b",		"(- (. a b))",
+			"a^b^c",	"(^ a (^ b c))",
+			"a=b=c",	"(= a (= b c))",
+			"a=b=c+d.e","(= a (= b (+ c (. d e))))",
+		};
+		runTests(grammar, tests, "e");
+	}
+
+	@Test public void testJavaExpressions() throws Exception {
+		// Generates about 7k in bytecodes for generated e_ rule;
+		// Well within the 64k method limit. e_primary compiles
+		// to about 2k in bytecodes.
+		// this is simplified from real java
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"expressionList\n" +
+			"    :   e (','! e)*\n" +
+			"    ;\n" +
+			"e   :   '('! e ')'!\n" +
+			"    |   'this' \n" +
+			"    |   'super'\n" +
+			"    |   INT\n" +
+			"    |   ID\n" +
+			"    |   type '.'^ 'class'\n" +
+			"    |   e '.'^ ID\n" +
+			"    |   e '.'^ 'this'\n" +
+			"    |   e '.'^ 'super' '('^ expressionList? ')'!\n" +
+			"    |   e '.'^ 'new'^ ID '('! expressionList? ')'!\n" +
+			"	 |	 'new'^ type ( '(' expressionList? ')'! | (options {k=1;}:'[' e ']'!)+)\n" + // ugly; simplified
+			"    |   e '['^ e ']'!\n" +
+			"    |   '('^ type ')'! e\n" +
+			"    |   e ('++'^ | '--'^)\n" +
+			"    |   e '('^ expressionList? ')'!\n" +
+			"    |   ('+'^|'-'^|'++'^|'--'^) e\n" +
+			"    |   ('~'^|'!'^) e\n" +
+			"    |   e ('*'^|'/'^|'%'^) e\n" +
+			"    |   e ('+'^|'-'^) e\n" +
+			"    |   e ('<'^ '<' | '>'^ '>' '>' | '>'^ '>') e\n" +
+			"    |   e ('<='^ | '>='^ | '>'^ | '<'^) e\n" +
+			"    |   e 'instanceof'^ e\n" +
+			"    |   e ('=='^ | '!='^) e\n" +
+			"    |   e '&'^ e\n" +
+			"    |   e '^'<assoc=right>^ e\n" +
+			"    |   e '|'^ e\n" +
+			"    |   e '&&'^ e\n" +
+			"    |   e '||'^ e\n" +
+			"    |   e '?' e ':' e\n" +
+			"    |   e ('='<assoc=right>^\n" +
+			"          |'+='<assoc=right>^\n" +
+			"          |'-='<assoc=right>^\n" +
+			"          |'*='<assoc=right>^\n" +
+			"          |'/='<assoc=right>^\n" +
+			"          |'&='<assoc=right>^\n" +
+			"          |'|='<assoc=right>^\n" +
+			"          |'^='<assoc=right>^\n" +
+			"          |'>>='<assoc=right>^\n" +
+			"          |'>>>='<assoc=right>^\n" +
+			"          |'<<='<assoc=right>^\n" +
+			"          |'%='<assoc=right>^) e\n" +
+			"    ;\n" +
+			"type: ID \n" +
+			"    | ID '['^ ']'!\n" +
+			"    | 'int'\n" +
+			"	 | 'int' '['^ ']'! \n" +
+			"    ;\n" +
+			"ID : ('a'..'z'|'A'..'Z'|'_'|'$')+;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		String[] tests = {
+			"a",		"a",
+			"1",		"1",
+			"a+1",		"(+ a 1)",
+			"a*1",		"(* a 1)",
+			"a.b",		"(. a b)",
+			"a-b+c",	"(+ (- a b) c)",
+
+			"a+b*c",	"(+ a (* b c))",
+			"a.b+1",	"(+ (. a b) 1)",
+			"-a",		"(- a)",
+			"-a+b",		"(+ (- a) b)",
+			"-a.b",		"(- (. a b))",
+			"a^b^c",	"(^ a (^ b c))",
+			"a=b=c",	"(= a (= b c))",
+			"a=b=c+d.e","(= a (= b (+ c (. d e))))",
+			"a|b&c",	"(| a (& b c))",
+			"(a|b)&c",	"(& (| a b) c)",
+			"a > b",	"(> a b)",
+			"a >> b",	"(> a b)",  // text is from one token
+			"a < b",	"(< a b)",
+
+			"(T)x",							"(( T x)",
+			"new A().b",					"(. (new A () b)",
+			"(T)t.f()",						"(( (( T (. t f)))",
+			"a.f(x)==T.c",					"(== (( (. a f) x) (. T c))",
+			"a.f().g(x,1)",					"(( (. (( (. a f)) g) x 1)",
+			"new T[((n-1) * x) + 1]",		"(new T [ (+ (* (- n 1) x) 1))",
+		};
+		runTests(grammar, tests, "e");
+	}
+
+	@Test public void testReturnValueAndActions() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"s : e {System.out.println($e.v);} ;\n" +
+			"e returns [int v, List<String> ignored]\n" +
+			"  : e '*' b=e {$v *= $b.v;}\n" +
+			"  | e '+' b=e {$v += $b.v;}\n" +
+			"  | INT {$v = $INT.int;}\n" +
+			"  ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		String[] tests = {
+			"4",			"4",
+			"1+2",			"3",
+		};
+		runTests(grammar, tests, "s");
+	}
+
+	@Test public void testReturnValueAndActionsAndASTs() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"s : e {System.out.print(\"v=\"+$e.v+\", \");} ;\n" +
+			"e returns [int v, List<String> ignored]\n" +
+			"  : e '*'^ b=e {$v *= $b.v;}\n" +
+			"  | e '+'^ b=e {$v += $b.v;}\n" +
+			"  | INT {$v = $INT.int;}\n" +
+			"  ;\n" +
+			"INT : '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {skip();} ;\n";
+		String[] tests = {
+			"4",			"v=4, 4",
+			"1+2",			"v=3, (+ 1 2)",
+		};
+		runTests(grammar, tests, "s");
+	}
+
+	public void runTests(String grammar, String[] tests, String startRule) {
+		rawGenerateAndBuildRecognizer("T.g", grammar, "TParser", "TLexer", debug);
+		boolean parserBuildsTrees =
+			grammar.indexOf("output=AST")>=0 ||
+			grammar.indexOf("output = AST")>=0;
+		writeRecognizerAndCompile("TParser",
+										 null,
+										 "TLexer",
+										 startRule,
+										 null,
+										 parserBuildsTrees,
+										 false,
+										 false,
+										 debug);
+
+		for (int i=0; i<tests.length; i+=2) {
+			String test = tests[i];
+			String expecting = tests[i+1]+"\n";
+			writeFile(tmpdir, "input", test);
+			String found = execRecognizer();
+			System.out.print(test+" -> "+found);
+			assertEquals(expecting, found);
+		}
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestLexer.java b/tool/src/test/java/org/antlr/test/TestLexer.java
new file mode 100644
index 0000000..2e3265d
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestLexer.java
@@ -0,0 +1,256 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.stringtemplate.v4.ST;
+import org.antlr.tool.Grammar;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestLexer extends BaseTest {
+	protected boolean debug = false;
+
+	/** Public default constructor used by TestRig */
+	public TestLexer() {
+	}
+
+	@Test public void testSetText() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n"+
+			"a : A {System.out.println(input);} ;\n"+
+			"A : '\\\\' 't' {setText(\"\t\");} ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "\\t", debug);
+		assertEquals("\t\n", found);
+	}
+
+	@Test public void testRefToRuleDoesNotSetTokenNorEmitAnother() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n"+
+			"a : A EOF {System.out.println(input);} ;\n"+
+			"A : '-' I ;\n" +
+			"I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "-34", debug);
+		assertEquals("-34\n", found);
+	}
+
+	@Test public void testRefToRuleDoesNotSetChannel() throws Exception {
+		// this must set channel of A to HIDDEN.  $channel is local to rule
+		// like $type.
+		String grammar =
+			"grammar P;\n"+
+			"a : A EOF {System.out.println($A.text+\", channel=\"+$A.channel);} ;\n"+
+			"A : '-' WS I ;\n" +
+			"I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "- 34", debug);
+		assertEquals("- 34, channel=0\n", found);
+	}
+
+	@Test public void testWeCanSetType() throws Exception {
+		String grammar =
+			"grammar P;\n"+
+			"tokens {X;}\n" +
+			"a : X EOF {System.out.println(input);} ;\n"+
+			"A : '-' I {$type = X;} ;\n" +
+			"I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "-34", debug);
+		assertEquals("-34\n", found);
+	}
+
+	@Test public void testRefToFragment() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n"+
+			"a : A {System.out.println(input);} ;\n"+
+			"A : '-' I ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "-34", debug);
+		assertEquals("-34\n", found);
+	}
+
+	@Test public void testMultipleRefToFragment() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n"+
+			"a : A EOF {System.out.println(input);} ;\n"+
+			"A : I '.' I ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "3.14159", debug);
+		assertEquals("3.14159\n", found);
+	}
+
+	@Test public void testLabelInSubrule() throws Exception {
+		// can we see v outside?
+		String grammar =
+			"grammar P;\n"+
+			"a : A EOF ;\n"+
+			"A : 'hi' WS (v=I)? {$channel=0; System.out.println($v.text);} ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "hi 342", debug);
+		assertEquals("342\n", found);
+	}
+
+	@Test public void testRefToTokenInLexer() throws Exception {
+		String grammar =
+			"grammar P;\n"+
+			"a : A EOF ;\n"+
+			"A : I {System.out.println($I.text);} ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "342", debug);
+		assertEquals("342\n", found);
+	}
+
+	@Test public void testListLabelInLexer() throws Exception {
+		String grammar =
+			"grammar P;\n"+
+			"a : A ;\n"+
+			"A : i+=I+ {for (Object t : $i) System.out.print(\" \"+((Token)t).getText());} ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "33 297", debug);
+		assertEquals(" 33 297\n", found);
+	}
+
+	@Test public void testDupListRefInLexer() throws Exception {
+		String grammar =
+			"grammar P;\n"+
+			"a : A ;\n"+
+			"A : i+=I WS i+=I {$channel=0; for (Object t : $i) System.out.print(\" \"+((Token)t).getText());} ;\n" +
+			"fragment I : '0'..'9'+ ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+				    "a", "33 297", debug);
+		assertEquals(" 33 297\n", found);
+	}
+
+	@Test public void testCharLabelInLexer() {
+		String grammar =
+			"grammar T;\n" +
+			"a : B ;\n" +
+			"B : x='a' {System.out.println((char)$x);} ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	@Test public void testRepeatedLabelInLexer() {
+		String grammar =
+			"lexer grammar T;\n" +
+			"B : x='a' x='b' ;\n" ;
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, null, "T", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRepeatedRuleLabelInLexer() {
+		String grammar =
+			"lexer grammar T;\n" +
+			"B : x=A x=A ;\n" +
+			"fragment A : 'a' ;\n" ;
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, null, "T", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testIsolatedEOTEdge() {
+		String grammar =
+			"lexer grammar T;\n" +
+			"QUOTED_CONTENT \n" +
+			"        : 'q' (~'q')* (('x' 'q') )* 'q' ; \n";
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, null, "T", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}	
+
+	@Test public void testEscapedLiterals() {
+		/* Grammar:
+			A : '\"' ;  should match a single double-quote: "
+			B : '\\\"' ; should match input \"
+		*/
+		String grammar =
+			"lexer grammar T;\n" +
+			"A : '\\\"' ;\n" +
+			"B : '\\\\\\\"' ;\n" ; // '\\\"'
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, null, "T", false);
+		boolean expecting = true; // should be ok
+		assertEquals(expecting, found);
+	}
+
+    @Test public void testNewlineLiterals() throws Exception {
+        Grammar g = new Grammar(
+            "lexer grammar T;\n" +
+            "A : '\\n\\n' ;\n"  // ANTLR sees '\n\n'
+        );
+        String expecting = "match(\"\\n\\n\")";
+
+        Tool antlr = newTool();
+        antlr.setOutputDirectory(null); // write to /dev/null
+        CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+        g.setCodeGenerator(generator);
+        generator.genRecognizer(); // codegen phase sets some vars we need
+        ST codeST = generator.getRecognizerST();
+        String code = codeST.render();
+        int m = code.indexOf("match(\"");
+        String found = code.substring(m,m+expecting.length());
+
+        assertEquals(expecting, found);
+    }
+}
diff --git a/tool/src/test/java/org/antlr/test/TestMessages.java b/tool/src/test/java/org/antlr/test/TestMessages.java
new file mode 100644
index 0000000..bede62d
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestMessages.java
@@ -0,0 +1,79 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.grammar.v3.ActionTranslator;
+import org.antlr.runtime.CommonToken;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarSemanticsMessage;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestMessages extends BaseTest {
+
+	/** Public default constructor used by TestRig */
+	public TestMessages() {
+	}
+
+
+	@Test public void testMessageStringificationIsConsistent() throws Exception {
+		String action = "$other.tree = null;";
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar a;\n" +
+			"options { output = AST;}" +
+			"otherrule\n" +
+			"    : 'y' ;" +
+			"rule\n" +
+			"    : other=otherrule {" + action +"}\n" +
+			"    ;");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+																	"rule",
+																	new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+
+		int expectedMsgID = ErrorManager.MSG_WRITE_TO_READONLY_ATTR;
+		Object expectedArg = "other";
+		Object expectedArg2 = "tree";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		String expectedMessageString = expectedMessage.toString();
+		assertEquals(expectedMessageString, expectedMessage.toString());
+	}
+}
diff --git a/tool/src/test/java/org/antlr/test/TestNFAConstruction.java b/tool/src/test/java/org/antlr/test/TestNFAConstruction.java
new file mode 100644
index 0000000..05909f5
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestNFAConstruction.java
@@ -0,0 +1,1206 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.analysis.State;
+import org.antlr.tool.FASerializer;
+import org.antlr.tool.Grammar;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestNFAConstruction extends BaseTest {
+
+	/** Public default constructor used by TestRig */
+	public TestNFAConstruction() {
+	}
+
+	@Test public void testA() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-A->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAB() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A B ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-A->.s3\n" +
+			".s3-B->.s4\n" +
+			".s4->:s5\n" +
+			":s5-EOF->.s6\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAorB() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A | B {;} ;");
+		/* expecting (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5,end)
+										|                            ^
+									   (6)--Ep-->(7)--B-->(8)--------|
+				 */
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s7\n" +
+			".s10->.s4\n" +
+			".s2-A->.s3\n" +
+			".s3->.s4\n" +
+			".s4->:s5\n" +
+			".s7->.s8\n" +
+			".s8-B->.s9\n" +
+			".s9-{}->.s10\n" +
+			":s5-EOF->.s6\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testRangeOrRange() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : ('a'..'c' 'h' | 'q' 'j'..'l') ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10-'q'->.s11\n" +
+			".s11-'j'..'l'->.s12\n" +
+			".s12->.s6\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3-'a'..'c'->.s4\n" +
+			".s4-'h'->.s5\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s10\n" +
+			":s7-<EOT>->.s8\n";
+		checkRule(g, "A", expecting);
+	}
+
+	@Test public void testRange() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : 'a'..'c' ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-'a'..'c'->.s3\n" +
+			".s3->:s4\n" +
+			":s4-<EOT>->.s5\n";
+		checkRule(g, "A", expecting);
+	}
+
+	@Test public void testCharSetInParser() throws Exception {
+		Grammar g = new Grammar(
+			"grammar P;\n"+
+			"a : A|'b' ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-A..'b'->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testABorCD() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A B | C D;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s8\n" +
+			".s10-D->.s11\n" +
+			".s11->.s5\n" +
+			".s2-A->.s3\n" +
+			".s3-B->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s9\n" +
+			".s9-C->.s10\n" +
+			":s6-EOF->.s7\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testbA() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b A ;\n"+
+			"b : B ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4->.s5\n" +
+			".s5-B->.s6\n" +
+			".s6->:s7\n" +
+			".s8-A->.s9\n" +
+			".s9->:s10\n" +
+			":s10-EOF->.s11\n" +
+			":s7->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testbA_bC() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b A ;\n"+
+			"b : B ;\n"+
+			"c : b C;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s12->.s13\n" +
+			".s13-C->.s14\n" +
+			".s14->:s15\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4->.s5\n" +
+			".s5-B->.s6\n" +
+			".s6->:s7\n" +
+			".s8-A->.s9\n" +
+			".s9->:s10\n" +
+			":s10-EOF->.s11\n" +
+			":s15-EOF->.s16\n" +
+			":s7->.s12\n" +
+			":s7->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAorEpsilon() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A | ;");
+		/* expecting (0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5,end)
+										|                            ^
+									   (6)--Ep-->(7)--Ep-->(8)-------|
+				 */
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s7\n" +
+			".s2-A->.s3\n" +
+			".s3->.s4\n" +
+			".s4->:s5\n" +
+			".s7->.s8\n" +
+			".s8->.s9\n" +
+			".s9->.s4\n" +
+			":s5-EOF->.s6\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAOptional() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A)?;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s8\n" +
+			".s3-A->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s5\n" +
+			":s6-EOF->.s7\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testNakedAoptional() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A?;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s8\n" +
+			".s3-A->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s5\n" +
+			":s6-EOF->.s7\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAorBthenC() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A | B) C;");
+		/* expecting
+
+				(0)--Ep-->(1)--Ep-->(2)--A-->(3)--Ep-->(4)--Ep-->(5)--C-->(6)--Ep-->(7,end)
+						   |                            ^
+						  (8)--Ep-->(9)--B-->(10)-------|
+				 */
+	}
+
+	@Test public void testAplus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A)+;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testNakedAplus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A+;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAplusNonGreedy() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : (options {greedy=false;}:'0'..'9')+ ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-'0'..'9'->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			":s7-<EOT>->.s8\n";
+		checkRule(g, "A", expecting);
+	}
+
+	@Test public void testAorBplus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A | B{action})+ ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s11\n" +
+			".s11-B->.s12\n" +
+			".s12-{}->.s13\n" +
+			".s13->.s6\n" +
+			".s2->.s3\n" +
+			".s3->.s10\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAorBorEmptyPlus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A | B | )+ ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s11\n" +
+			".s10->.s13\n" +
+			".s11-B->.s12\n" +
+			".s12->.s6\n" +
+			".s13->.s14\n" +
+			".s14->.s15\n" +
+			".s15->.s6\n" +
+			".s2->.s3\n" +
+			".s3->.s10\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAStar() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A)*;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testNestedAstar() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A*)*;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->:s11\n" +
+			".s13->.s8\n" +
+			".s14->.s10\n" +
+			".s2->.s14\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4->.s13\n" +
+			".s4->.s5\n" +
+			".s5->.s6\n" +
+			".s6-A->.s7\n" +
+			".s7->.s5\n" +
+			".s7->.s8\n" +
+			".s8->.s9\n" +
+			".s9->.s10\n" +
+			".s9->.s3\n" +
+			":s11-EOF->.s12\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testPlusNestedInStar() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A+)*;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->:s11\n" +
+			".s13->.s10\n" +
+			".s2->.s13\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4->.s5\n" +
+			".s5->.s6\n" +
+			".s6-A->.s7\n" +
+			".s7->.s5\n" +
+			".s7->.s8\n" +
+			".s8->.s9\n" +
+			".s9->.s10\n" +
+			".s9->.s3\n" +
+			":s11-EOF->.s12\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testStarNestedInPlus() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A*)+;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->:s11\n" +
+			".s13->.s8\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4->.s13\n" +
+			".s4->.s5\n" +
+			".s5->.s6\n" +
+			".s6-A->.s7\n" +
+			".s7->.s5\n" +
+			".s7->.s8\n" +
+			".s8->.s9\n" +
+			".s9->.s10\n" +
+			".s9->.s3\n" +
+			":s11-EOF->.s12\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testNakedAstar() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A*;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAorBstar() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : (A | B{action})* ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s11\n" +
+			".s11-B->.s12\n" +
+			".s12-{}->.s13\n" +
+			".s13->.s6\n" +
+			".s14->.s7\n" +
+			".s2->.s14\n" +
+			".s2->.s3\n" +
+			".s3->.s10\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAorBOptionalSubrule() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : ( A | B )? ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s8\n" +
+			".s3-A..B->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s5\n" +
+			":s6-EOF->.s7\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testPredicatedAorB() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A | {p2}? B ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s8\n" +
+			".s10-B->.s11\n" +
+			".s11->.s5\n" +
+			".s2-{p1}?->.s3\n" +
+			".s3-A->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s9\n" +
+			".s9-{p2}?->.s10\n" +
+			":s6-EOF->.s7\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testMultiplePredicates() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? {p1a}? A | {p2}? B | {p3} b;\n" +
+			"b : {p4}? B ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s9\n" +
+			".s10-{p2}?->.s11\n" +
+			".s11-B->.s12\n" +
+			".s12->.s6\n" +
+			".s13->.s14\n" +
+			".s14-{}->.s15\n" +
+			".s15->.s16\n" +
+			".s16->.s17\n" +
+			".s17->.s18\n" +
+			".s18-{p4}?->.s19\n" +
+			".s19-B->.s20\n" +
+			".s2-{p1}?->.s3\n" +
+			".s20->:s21\n" +
+			".s22->.s6\n" +
+			".s3-{p1a}?->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s10\n" +
+			".s9->.s13\n" +
+			":s21->.s22\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testSets() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : ( A | B )+ ;\n" +
+			"b : ( A | B{;} )+ ;\n" +
+			"c : (A|B) (A|B) ;\n" +
+			"d : ( A | B )* ;\n" +
+			"e : ( A | B )? ;");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-A..B->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+		expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s11\n" +
+			".s11-B->.s12\n" +
+			".s12-{}->.s13\n" +
+			".s13->.s6\n" +
+			".s2->.s3\n" +
+			".s3->.s10\n" +
+			".s3->.s4\n" +
+			".s4-A->.s5\n" +
+			".s5->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+		checkRule(g, "b", expecting);
+		expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-A..B->.s3\n" +
+			".s3-A..B->.s4\n" +
+			".s4->:s5\n" +
+			":s5-EOF->.s6\n";
+		checkRule(g, "c", expecting);
+		expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-A..B->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "d", expecting);
+		expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s8\n" +
+			".s3-A..B->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s5\n" +
+			":s6-EOF->.s7\n";
+		checkRule(g, "e", expecting);
+	}
+
+	@Test public void testNotSet() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"tokens { A; B; C; }\n"+
+			"a : ~A ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-B..C->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+
+		String expectingGrammarStr =
+			"1:8: parser grammar P;\n" +
+			"a : ~ A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	@Test public void testNotSingletonBlockSet() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"tokens { A; B; C; }\n"+
+			"a : ~(A) ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-B..C->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+
+		String expectingGrammarStr =
+			"1:8: parser grammar P;\n" +
+			"a : ~ ( A ) ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	@Test public void testNotCharSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : ~'3' ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s3\n" +
+			".s3->:s4\n" +
+			":s4-<EOT>->.s5\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+			"A : ~ '3' ;\n"+
+			"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	@Test public void testNotBlockSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : ~('3'|'b') ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s3\n" +
+			".s3->:s4\n" +
+			":s4-<EOT>->.s5\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+			"A : ~ ( '3' | 'b' ) ;\n" +
+			"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	@Test public void testNotSetLoop() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : ~('3')* ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-<EOT>->.s8\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+			"A : (~ ( '3' ) )* ;\n" +
+			"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	@Test public void testNotBlockSetLoop() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : ~('3'|'b')* ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-<EOT>->.s8\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+			"A : (~ ( '3' | 'b' ) )* ;\n" +
+			"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	@Test public void testSetsInCombinedGrammarSentToLexer() throws Exception {
+		// not sure this belongs in this test suite, but whatever.
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"A : '{' ~('}')* '}';\n");
+		String result = g.getLexerGrammar();
+		String expecting =
+			"lexer grammar t;" +newline +
+			"// $ANTLR src \"<string>\" 2"+newline+
+			"A : '{' ~('}')* '}';";
+		assertEquals(result, expecting);
+	}
+
+	@Test public void testLabeledNotSet() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"tokens { A; B; C; }\n"+
+			"a : t=~A ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-B..C->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+
+		String expectingGrammarStr =
+			"1:8: parser grammar P;\n" +
+			"a : t=~ A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	@Test public void testLabeledNotCharSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : t=~'3' ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-{'\\u0000'..'2', '4'..'\\uFFFF'}->.s3\n" +
+			".s3->:s4\n" +
+			":s4-<EOT>->.s5\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+			"A : t=~ '3' ;\n"+
+			"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	@Test public void testLabeledNotBlockSet() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : t=~('3'|'b') ;\n");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-{'\\u0000'..'2', '4'..'a', 'c'..'\\uFFFF'}->.s3\n" +
+			".s3->:s4\n" +
+			":s4-<EOT>->.s5\n";
+		checkRule(g, "A", expecting);
+
+		String expectingGrammarStr =
+			"1:7: lexer grammar P;\n" +
+			"A : t=~ ( '3' | 'b' ) ;\n" +
+			"Tokens : A ;";
+		assertEquals(expectingGrammarStr, g.toString());
+	}
+
+	@Test public void testEscapedCharLiteral() throws Exception {
+		Grammar g = new Grammar(
+			"grammar P;\n"+
+			"a : '\\n';");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-'\\n'->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testEscapedStringLiteral() throws Exception {
+		Grammar g = new Grammar(
+			"grammar P;\n"+
+			"a : 'a\\nb\\u0030c\\'';");
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-'a\\nb\\u0030c\\''->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+	}
+
+	// AUTO BACKTRACKING STUFF
+
+	@Test public void testAutoBacktracking_RuleBlock() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : 'a'{;}|'b';"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s9\n" +
+			".s10-'b'->.s11\n" +
+			".s11->.s6\n" +
+			".s2-{synpred1_t}?->.s3\n" +
+			".s3-'a'->.s4\n" +
+			".s4-{}->.s5\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s10\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_RuleSetBlock() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : 'a'|'b';"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-'a'..'b'->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_SimpleBlock() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'{;}|'b') ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s11\n" +
+			".s11-'b'->.s12\n" +
+			".s12->.s7\n" +
+			".s2->.s10\n" +
+			".s2->.s3\n" +
+			".s3-{synpred1_t}?->.s4\n" +
+			".s4-'a'->.s5\n" +
+			".s5-{}->.s6\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_SetBlock() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'|'b') ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2-'a'..'b'->.s3\n" +
+			".s3->:s4\n" +
+			":s4-EOF->.s5\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_StarBlock() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'{;}|'b')* ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s12->.s13\n" +
+			".s13-{synpred2_t}?->.s14\n" +
+			".s14-'b'->.s15\n" +
+			".s15->.s8\n" +
+			".s16->.s9\n" +
+			".s2->.s16\n" +
+			".s2->.s3\n" +
+			".s3->.s12\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1_t}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6-{}->.s7\n" +
+			".s7->.s8\n" +
+			".s8->.s3\n" +
+			".s8->.s9\n" +
+			".s9->:s10\n" +
+			":s10-EOF->.s11\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_StarSetBlock_IgnoresPreds() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'|'b')* ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3->.s4\n" +
+			".s4-'a'..'b'->.s5\n" +
+			".s5->.s3\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_StarSetBlock() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'|'b'{;})* ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s11->.s12\n" +
+			".s12-{synpred2_t}?->.s13\n" +
+			".s13-'b'->.s14\n" +
+			".s14-{}->.s15\n" +
+			".s15->.s7\n" +
+			".s16->.s8\n" +
+			".s2->.s16\n" +
+			".s2->.s3\n" +
+			".s3->.s11\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1_t}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6->.s7\n" +
+			".s7->.s3\n" +
+			".s7->.s8\n" +
+			".s8->:s9\n" +
+			":s9-EOF->.s10\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_StarBlock1Alt() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a')* ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s7\n" +
+			".s2->.s10\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1_t}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_PlusBlock() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'{;}|'b')+ ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s12->.s13\n" +
+			".s13-{synpred2_t}?->.s14\n" +
+			".s14-'b'->.s15\n" +
+			".s15->.s8\n" +
+			".s2->.s3\n" +
+			".s3->.s12\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1_t}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6-{}->.s7\n" +
+			".s7->.s8\n" +
+			".s8->.s3\n" +
+			".s8->.s9\n" +
+			".s9->:s10\n" +
+			":s10-EOF->.s11\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_PlusSetBlock() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'|'b'{;})+ ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s11->.s12\n" +
+			".s12-{synpred2_t}?->.s13\n" +
+			".s13-'b'->.s14\n" +
+			".s14-{}->.s15\n" +
+			".s15->.s7\n" +
+			".s2->.s3\n" +
+			".s3->.s11\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1_t}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6->.s7\n" +
+			".s7->.s3\n" +
+			".s7->.s8\n" +
+			".s8->:s9\n" +
+			":s9-EOF->.s10\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_PlusBlock1Alt() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a')+ ;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s3->.s4\n" +
+			".s4-{synpred1_t}?->.s5\n" +
+			".s5-'a'->.s6\n" +
+			".s6->.s3\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_OptionalBlock2Alts() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a'{;}|'b')?;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s10->.s11\n" +
+			".s10->.s14\n" +
+			".s11-{synpred2_t}?->.s12\n" +
+			".s12-'b'->.s13\n" +
+			".s13->.s7\n" +
+			".s14->.s7\n" +
+			".s2->.s10\n" +
+			".s2->.s3\n" +
+			".s3-{synpred1_t}?->.s4\n" +
+			".s4-'a'->.s5\n" +
+			".s5-{}->.s6\n" +
+			".s6->.s7\n" +
+			".s7->:s8\n" +
+			":s8-EOF->.s9\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_OptionalBlock1Alt() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a')?;"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s2->.s3\n" +
+			".s2->.s9\n" +
+			".s3-{synpred1_t}?->.s4\n" +
+			".s4-'a'->.s5\n" +
+			".s5->.s6\n" +
+			".s6->:s7\n" +
+			".s9->.s6\n" +
+			":s7-EOF->.s8\n";
+		checkRule(g, "a", expecting);
+	}
+
+	@Test public void testAutoBacktracking_ExistingPred() throws Exception {
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {backtrack=true;}\n"+
+			"a : ('a')=> 'a' | 'b';"
+		);
+		String expecting =
+			".s0->.s1\n" +
+			".s1->.s2\n" +
+			".s1->.s8\n" +
+			".s10->.s5\n" +
+			".s2-{synpred1_t}?->.s3\n" +
+			".s3-'a'->.s4\n" +
+			".s4->.s5\n" +
+			".s5->:s6\n" +
+			".s8->.s9\n" +
+			".s9-'b'->.s10\n" +
+			":s6-EOF->.s7\n";
+		checkRule(g, "a", expecting);
+	}
+
+	private void checkRule(Grammar g, String rule, String expecting)
+	{
+		g.buildNFA();
+		State startState = g.getRuleStartState(rule);
+		FASerializer serializer = new FASerializer(g);
+		String result = serializer.serialize(startState);
+
+		//System.out.print(result);
+		assertEquals(expecting, result);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestRewriteAST.java b/tool/src/test/java/org/antlr/test/TestRewriteAST.java
new file mode 100644
index 0000000..7cd8341
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestRewriteAST.java
@@ -0,0 +1,1472 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarSemanticsMessage;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestRewriteAST extends BaseTest {
+	protected boolean debug = false;
+
+	@Test public void testDelete() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("", found);
+	}
+
+	@Test public void testSingleToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> ID;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testSingleTokenToNewNode() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> ID[\"x\"];\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("x\n", found);
+	}
+
+	@Test public void testSingleTokenToNewNodeRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> ^(ID[\"x\"] INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("(x INT)\n", found);
+	}
+
+	@Test public void testSingleTokenToNewNode2() throws Exception {
+		// Allow creation of new nodes w/o args.
+		String grammar =
+			"grammar TT;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> ID[ ];\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("TT.g", grammar, "TTParser", "TTLexer",
+				    "a", "abc", debug);
+		assertEquals("ID\n", found);
+	}
+
+	@Test public void testSingleCharLiteral() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'c' -> 'c';\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "c", debug);
+		assertEquals("c\n", found);
+	}
+
+	@Test public void testSingleStringLiteral() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'ick' -> 'ick';\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "ick", debug);
+		assertEquals("ick\n", found);
+	}
+
+	@Test public void testSingleRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b -> b;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testReorderTokens() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> INT ID;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("34 abc\n", found);
+	}
+
+	@Test public void testReorderTokenAndRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b INT -> INT b;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("34 abc\n", found);
+	}
+
+	@Test public void testTokenTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(INT ID);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("(34 abc)\n", found);
+	}
+
+	@Test public void testTokenTreeAfterOtherStuff() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'void' ID INT -> 'void' ^(INT ID);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "void abc 34", debug);
+		assertEquals("void (34 abc)\n", found);
+	}
+
+	@Test public void testNestedTokenTreeWithOuterLoop() throws Exception {
+		// verify that ID and INT both iterate over outer index variable
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {DUH;}\n" +
+			"a : ID INT ID INT -> ^( DUH ID ^( DUH INT) )+ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 1 b 2", debug);
+		assertEquals("(DUH a (DUH 1)) (DUH b (DUH 2))\n", found);
+	}
+
+	@Test public void testOptionalSingleToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> ID? ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testClosureSingleToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ID -> ID* ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	@Test public void testPositiveClosureSingleToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ID -> ID+ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	@Test public void testOptionalSingleRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b -> b?;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testClosureSingleRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b b -> b*;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	@Test public void testClosureOfLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x+=b x+=b -> $x*;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	@Test public void testOptionalLabelNoListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : (x=ID)? -> $x?;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	@Test public void testPositiveClosureSingleRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b b -> b+;\n" +
+			"b : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	@Test public void testSinglePredicateT() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> {true}? ID -> ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testSinglePredicateF() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID -> {false}? ID -> ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc", debug);
+		assertEquals("", found);
+	}
+
+	@Test public void testMultiplePredicate() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> {false}? ID\n" +
+			"           -> {true}? INT\n" +
+			"           -> \n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 2", debug);
+		assertEquals("2\n", found);
+	}
+
+	@Test public void testMultiplePredicateTrees() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> {false}? ^(ID INT)\n" +
+			"           -> {true}? ^(INT ID)\n" +
+			"           -> ID\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 2", debug);
+		assertEquals("(2 a)\n", found);
+	}
+
+	@Test public void testSimpleTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : op INT -> ^(op INT);\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "-34", debug);
+		assertEquals("(- 34)\n", found);
+	}
+
+	@Test public void testSimpleTree2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : op INT -> ^(INT op);\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "+ 34", debug);
+		assertEquals("(34 +)\n", found);
+	}
+
+
+	@Test public void testNestedTrees() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'var' (ID ':' type ';')+ -> ^('var' ^(':' ID type)+) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "var a:int; b:float;", debug);
+		assertEquals("(var (: a int) (: b float))\n", found);
+	}
+
+	@Test public void testImaginaryTokenCopy() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {VAR;}\n" +
+			"a : ID (',' ID)*-> ^(VAR ID)+ ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a,b,c", debug);
+		assertEquals("(VAR a) (VAR b) (VAR c)\n", found);
+	}
+
+	@Test public void testTokenUnreferencedOnLeftButDefined() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {VAR;}\n" +
+			"a : b -> ID ;\n" +
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("ID\n", found);
+	}
+
+	@Test public void testImaginaryTokenCopySetText() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {VAR;}\n" +
+			"a : ID (',' ID)*-> ^(VAR[\"var\"] ID)+ ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a,b,c", debug);
+		assertEquals("(var a) (var b) (var c)\n", found);
+	}
+
+	@Test public void testImaginaryTokenNoCopyFromToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "{a b c}", debug);
+		assertEquals("({ a b c)\n", found);
+	}
+
+	@Test public void testImaginaryTokenNoCopyFromTokenSetText() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : lc='{' ID+ '}' -> ^(BLOCK[$lc,\"block\"] ID+) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "{a b c}", debug);
+		assertEquals("(block a b c)\n", found);
+	}
+
+	@Test public void testMixedRewriteAndAutoAST() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : b b^ ;\n" + // 2nd b matches only an INT; can make it root
+			"b : ID INT -> INT ID\n" +
+			"  | INT\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 1 2", debug);
+		assertEquals("(2 1 a)\n", found);
+	}
+
+	@Test public void testSubruleWithRewrite() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : b b ;\n" +
+			"b : (ID INT -> INT ID | INT INT -> INT+ )\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a 1 2 3", debug);
+		assertEquals("1 a 2 3\n", found);
+	}
+
+	@Test public void testSubruleWithRewrite2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {TYPE;}\n" +
+			"a : b b ;\n" +
+			"b : 'int'\n" +
+			"    ( ID -> ^(TYPE 'int' ID)\n" +
+			"    | ID '=' INT -> ^(TYPE 'int' ID INT)\n" +
+			"    )\n" +
+			"    ';'\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a; int b=3;", debug);
+		assertEquals("(TYPE int a) (TYPE int b 3)\n", found);
+	}
+
+	@Test public void testNestedRewriteShutsOffAutoAST() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : b b ;\n" +
+			"b : ID ( ID (last=ID -> $last)+ ) ';'\n" + // get last ID
+			"  | INT\n" + // should still get auto AST construction
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b c d; 42", debug);
+		assertEquals("d 42\n", found);
+	}
+
+	@Test public void testRewriteActions() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : atom -> ^({adaptor.create(INT,\"9\")} atom) ;\n" +
+			"atom : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "3", debug);
+		assertEquals("(9 3)\n", found);
+	}
+
+	@Test public void testRewriteActions2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : atom -> {adaptor.create(INT,\"9\")} atom ;\n" +
+			"atom : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "3", debug);
+		assertEquals("9 3\n", found);
+	}
+
+	@Test public void testRefToOldValue() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : (atom -> atom) (op='+' r=atom -> ^($op $a $r) )* ;\n" +
+			"atom : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "3+4+5", debug);
+		assertEquals("(+ (+ 3 4) 5)\n", found);
+	}
+
+	@Test public void testCopySemanticsForRules() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : atom -> ^(atom atom) ;\n" + // NOT CYCLE! (dup atom)
+			"atom : INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "3", debug);
+		assertEquals("(3 3)\n", found);
+	}
+
+	@Test public void testCopySemanticsForRules2() throws Exception {
+		// copy type as a root for each invocation of (...)+ in rewrite
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : type ID (',' ID)* ';' -> ^(type ID)+ ;\n" +
+			"type : 'int' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a,b,c;", debug);
+		assertEquals("(int a) (int b) (int c)\n", found);
+	}
+
+	@Test public void testCopySemanticsForRules3() throws Exception {
+		// copy type *and* modifier even though it's optional
+		// for each invocation of (...)+ in rewrite
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ;\n" +
+			"type : 'int' ;\n" +
+			"modifier : 'public' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "public int a,b,c;", debug);
+		assertEquals("(int public a) (int public b) (int public c)\n", found);
+	}
+
+	@Test public void testCopySemanticsForRules3Double() throws Exception {
+		// copy type *and* modifier even though it's optional
+		// for each invocation of (...)+ in rewrite
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ^(type modifier? ID)+ ;\n" +
+			"type : 'int' ;\n" +
+			"modifier : 'public' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "public int a,b,c;", debug);
+		assertEquals("(int public a) (int public b) (int public c) (int public a) (int public b) (int public c)\n", found);
+	}
+
+	@Test public void testCopySemanticsForRules4() throws Exception {
+		// copy type *and* modifier even though it's optional
+		// for each invocation of (...)+ in rewrite
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {MOD;}\n" +
+			"a : modifier? type ID (',' ID)* ';' -> ^(type ^(MOD modifier)? ID)+ ;\n" +
+			"type : 'int' ;\n" +
+			"modifier : 'public' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "public int a,b,c;", debug);
+		assertEquals("(int (MOD public) a) (int (MOD public) b) (int (MOD public) c)\n", found);
+	}
+
+	@Test public void testCopySemanticsLists() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {MOD;}\n" +
+			"a : ID (',' ID)* ';' -> ID+ ID+ ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a,b,c;", debug);
+		assertEquals("a b c a b c\n", found);
+	}
+
+	@Test public void testCopyRuleLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=b -> $x $x;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a a\n", found);
+	}
+
+	@Test public void testCopyRuleLabel2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=b -> ^($x $x);\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("(a a)\n", found);
+	}
+
+	@Test public void testQueueingOfTokens() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'int' ID (',' ID)* ';' -> ^('int' ID+) ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a,b,c;", debug);
+		assertEquals("(int a b c)\n", found);
+	}
+
+	@Test public void testCopyOfTokens() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'int' ID ';' -> 'int' ID 'int' ID ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a;", debug);
+		assertEquals("int a int a\n", found);
+	}
+
+	@Test public void testTokenCopyInLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'int' ID (',' ID)* ';' -> ^('int' ID)+ ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a,b,c;", debug);
+		assertEquals("(int a) (int b) (int c)\n", found);
+	}
+
+	@Test public void testTokenCopyInLoopAgainstTwoOthers() throws Exception {
+		// must smear 'int' copies across as root of multiple trees
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'int' ID ':' INT (',' ID ':' INT)* ';' -> ^('int' ID INT)+ ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "int a:1,b:2,c:3;", debug);
+		assertEquals("(int a 1) (int b 2) (int c 3)\n", found);
+	}
+
+	@Test public void testListRefdOneAtATime() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID+ -> ID ID ID ;\n" + // works if 3 input IDs
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b c", debug);
+		assertEquals("a b c\n", found);
+	}
+
+	@Test public void testSplitListWithLabels() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {VAR;}\n"+
+			"a : first=ID others+=ID* -> $first VAR $others+ ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b c", debug);
+		assertEquals("a VAR b c\n", found);
+	}
+
+	@Test public void testComplicatedMelange() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : A A b=B B b=B c+=C C c+=C D {String s=$D.text;} -> A+ B+ C+ D ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"A : 'a' ;\n" +
+			"B : 'b' ;\n" +
+			"C : 'c' ;\n" +
+			"D : 'd' ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a a b b b c c c d", debug);
+		assertEquals("a a b b b c c c d\n", found);
+	}
+
+	@Test public void testRuleLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=b -> $x;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	@Test public void testAmbiguousRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID a -> a | INT ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT: '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testWeirdRuleRef() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID a -> $a | INT ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT: '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		// $a is ambig; is it previous root or ref to a ref in alt?
+		assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());		
+	}
+
+	@Test public void testRuleListLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x+=b x+=b -> $x+;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	@Test public void testRuleListLabel2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x+=b x+=b -> $x $x*;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	@Test public void testOptional() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=b (y=b)? -> $x $y?;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	@Test public void testOptional2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=ID (y=b)? -> $x $y?;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	@Test public void testOptional3() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x=ID (y=b)? -> ($x $y)?;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	@Test public void testOptional4() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x+=ID (y=b)? -> ($x $y)?;\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("a b\n", found);
+	}
+
+	@Test public void testOptional5() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : ID -> ID? ;\n"+ // match an ID to optional ID
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a", debug);
+		assertEquals("a\n", found);
+	}
+
+	@Test public void testArbitraryExprType() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : x+=b x+=b -> {new CommonTree()};\n"+
+			"b : ID ;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b", debug);
+		assertEquals("", found);
+	}
+
+	@Test public void testSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options { output = AST; } \n" +
+			"a: (INT|ID)+ -> INT+ ID+ ;\n" +
+			"INT: '0'..'9'+;\n" +
+			"ID : 'a'..'z'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "2 a 34 de", debug);
+		assertEquals("2 34 a de\n", found);
+	}
+
+	@Test public void testSet2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options { output = AST; } \n" +
+			"a: (INT|ID) -> INT? ID? ;\n" +
+			"INT: '0'..'9'+;\n" +
+			"ID : 'a'..'z'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "2", debug);
+		assertEquals("2\n", found);
+	}
+
+	@Test
+    public void testSetWithLabel() throws Exception {
+		
+		String grammar =
+			"grammar T;\n" +
+			"options { output = AST; } \n" +
+			"a : x=(INT|ID) -> $x ;\n" +
+			"INT: '0'..'9'+;\n" +
+			"ID : 'a'..'z'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "2", debug);
+		assertEquals("2\n", found);
+	}
+
+	@Test public void testRewriteAction() throws Exception {
+		String grammar =
+			"grammar T; \n" +
+			"options { output = AST; }\n" +
+			"tokens { FLOAT; }\n" +
+			"r\n" +
+			"    : INT -> {new CommonTree(new CommonToken(FLOAT,$INT.text+\".0\"))} \n" +
+			"    ; \n" +
+			"INT : '0'..'9'+; \n" +
+			"WS: (' ' | '\\n' | '\\t')+ {$channel = HIDDEN;}; \n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "r", "25", debug);
+		assertEquals("25.0\n", found);
+	}
+
+	@Test public void testOptionalSubruleWithoutRealElements() throws Exception {
+		// copy type *and* modifier even though it's optional
+		// for each invocation of (...)+ in rewrite
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;} \n" +
+			"tokens {PARMS;} \n" +
+			"\n" +
+			"modulo \n" +
+			" : 'modulo' ID ('(' parms+ ')')? -> ^('modulo' ID ^(PARMS parms+)?) \n" +
+			" ; \n" +
+			"parms : '#'|ID; \n" +
+			"ID : ('a'..'z' | 'A'..'Z')+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "modulo", "modulo abc (x y #)", debug);
+		assertEquals("(modulo abc (PARMS x y #))\n", found);
+	}
+
+	// C A R D I N A L I T Y  I S S U E S
+
+	@Test public void testCardinality() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {BLOCK;}\n" +
+			"a : ID ID INT INT INT -> (ID INT)+;\n"+
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+; \n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "a b 3 4 5", debug);
+		String expecting =
+			"org.antlr.runtime.tree.RewriteCardinalityException: token ID";
+		String found = getFirstLineOfException();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testCardinality2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID+ -> ID ID ID ;\n" + // only 2 input IDs
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		execParser("T.g", grammar, "TParser", "TLexer",
+				   "a", "a b", debug);
+		String expecting =
+			"org.antlr.runtime.tree.RewriteCardinalityException: token ID";
+		String found = getFirstLineOfException();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testCardinality3() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID? INT -> ID INT ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		execParser("T.g", grammar, "TParser", "TLexer",
+				   "a", "3", debug);
+		String expecting =
+			"org.antlr.runtime.tree.RewriteEmptyStreamException: token ID";
+		String found = getFirstLineOfException();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testLoopCardinality() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID? INT -> ID+ INT ;\n" +
+			"op : '+'|'-' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		execParser("T.g", grammar, "TParser", "TLexer",
+				   "a", "3", debug);
+		String expecting =
+			"org.antlr.runtime.tree.RewriteEarlyExitException";
+		String found = getFirstLineOfException();
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testWildcard() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID c=. -> $c;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "abc 34", debug);
+		assertEquals("34\n", found);
+	}
+
+	// E R R O R S
+
+	@Test public void testUnknownRule() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> ugh ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_RULE_REF;
+		Object expectedArg = "ugh";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testKnownRuleButNotInLHS() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> b ;\n" +
+			"b : 'b' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_REWRITE_ELEMENT_NOT_PRESENT_ON_LHS;
+		Object expectedArg = "b";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testUnknownToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> ICK ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE;
+		Object expectedArg = "ICK";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testUnknownLabel() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> $foo ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_LABEL_REF_IN_REWRITE;
+		Object expectedArg = "foo";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testUnknownCharLiteralToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> 'a' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE;
+		Object expectedArg = "'a'";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testUnknownStringLiteralToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT -> 'foo' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		Grammar g = new Grammar(grammar);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_TOKEN_REF_IN_REWRITE;
+		Object expectedArg = "'foo'";
+		Object expectedArg2 = null;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testExtraTokenInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"tokens {EXPR;}\n" +
+			"decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "int 34 x=1;", debug);
+		assertEquals("line 1:4 extraneous input '34' expecting ID\n", this.stderrDuringParse);
+		assertEquals("(EXPR int x 1)\n", found); // tree gets correct x and 1 tokens
+	}
+
+	@Test public void testMissingIDInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"tokens {EXPR;}\n" +
+			"decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "int =1;", debug);
+		assertEquals("line 1:4 missing ID at '='\n", this.stderrDuringParse);
+		assertEquals("(EXPR int <missing ID> 1)\n", found); // tree gets invented ID token
+	}
+
+	@Test public void testMissingSetInSimpleDecl() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"tokens {EXPR;}\n" +
+			"decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;\n" +
+			"type : 'int' | 'float' ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "decl", "x=1;", debug);
+		assertEquals("line 1:0 mismatched input 'x' expecting set null\n", this.stderrDuringParse);
+		assertEquals("(EXPR <error: x> x 1)\n", found); // tree gets invented ID token
+	}
+
+	@Test public void testMissingTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc", debug);
+		assertEquals("line 1:3 missing INT at '<EOF>'\n", this.stderrDuringParse);
+		// doesn't do in-line recovery for sets (yet?)
+		assertEquals("abc <missing INT>\n", found);
+	}
+
+	@Test public void testExtraTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b c -> b c;\n" +
+			"b : ID -> ID ;\n" +
+			"c : INT -> INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "abc ick 34", debug);
+		assertEquals("line 1:4 extraneous input 'ick' expecting INT\n", this.stderrDuringParse);
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testMissingFirstTokenGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "34", debug);
+		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
+		assertEquals("<missing ID> 34\n", found);
+	}
+
+	@Test public void testMissingFirstTokenGivesErrorNode2() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b c -> b c;\n" +
+			"b : ID -> ID ;\n" +
+			"c : INT -> INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "34", debug);
+		// finds an error at the first token, 34, and re-syncs.
+		// re-synchronizing does not consume a token because 34 follows
+		// ref to rule b (start of c). It then matches 34 in c.
+		assertEquals("line 1:0 missing ID at '34'\n", this.stderrDuringParse);
+		assertEquals("<missing ID> 34\n", found);
+	}
+
+	@Test public void testNoViableAltGivesErrorNode() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"options {output=AST;}\n" +
+			"a : b -> b | c -> c;\n" +
+			"b : ID -> ID ;\n" +
+			"c : INT -> INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"S : '*' ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+								  "a", "*", debug);
+		// finds an error at the first token, 34, and re-syncs.
+		// re-synchronizing does not consume a token because 34 follows
+		// ref to rule b (start of c). It then matches 34 in c.
+		assertEquals("line 1:0 no viable alternative at input '*'\n", this.stderrDuringParse);
+		assertEquals("<unexpected: [@0,0:0='*',<6>,1:0], resync=*>\n", found);
+	}
+
+	@Test public void testRewriteEmptyRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {IMAGINARY;}\n" +
+			"a : empty EOF! ;\n" +
+			"empty : -> IMAGINARY;\n" +
+			"WS : ' ';\n";
+		String result = execParser("T.g", grammar, "TParser", "TLexer",
+				   "a", "", debug);
+		assertEquals("IMAGINARY\n", result);
+		assertNull(stderrDuringParse);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestRewriteTemplates.java b/tool/src/test/java/org/antlr/test/TestRewriteTemplates.java
new file mode 100644
index 0000000..af0e1de
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestRewriteTemplates.java
@@ -0,0 +1,320 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.tool.*;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestRewriteTemplates extends BaseTest {
+	protected boolean debug = false;
+
+	@Test public void testDelete() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("", found);
+	}
+
+	@Test public void testAction() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> {new StringTemplate($ID.text)} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testEmbeddedLiteralConstructor() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> {%{$ID.text}} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testInlineTemplate() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> template(x={$ID},y={$INT}) <<x:<x.text>, y:<y.text>;>> ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("x:abc, y:34;\n", found);
+	}
+
+	@Test public void testNamedTemplate() throws Exception {
+		// the support code adds template group in it's output Test.java
+		// that defines template foo.
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> foo(x={$ID.text},y={$INT.text}) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testIndirectTemplate() throws Exception {
+		// the support code adds template group in it's output Test.java
+		// that defines template foo.
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> ({\"foo\"})(x={$ID.text},y={$INT.text}) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testInlineTemplateInvokingLib() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> template(x={$ID.text},y={$INT.text}) \"<foo(...)>\" ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testPredicatedAlts() throws Exception {
+		// the support code adds template group in it's output Test.java
+		// that defines template foo.
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : ID INT -> {false}? foo(x={$ID.text},y={$INT.text})\n" +
+			"           -> foo(x={\"hi\"}, y={$ID.text})\n" +
+			"  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("hi abc\n", found);
+	}
+
+	@Test public void testTemplateReturn() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : b {System.out.println($b.st);} ;\n" +
+			"b : ID INT -> foo(x={$ID.text},y={$INT.text}) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testReturnValueWithTemplate() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a : b {System.out.println($b.i);} ;\n" +
+			"b returns [int i] : ID INT {$i=8;} ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("8\n", found);
+	}
+
+	@Test public void testTemplateRefToDynamicAttributes() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a scope {String id;} : ID {$a::id=$ID.text;} b\n" +
+			"	{System.out.println($b.st.toString());}\n" +
+			"   ;\n" +
+			"b : INT -> foo(x={$a::id}) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abc 34", debug);
+		assertEquals("abc \n", found);
+	}
+
+	// tests for rewriting templates in tree parsers
+
+	@Test public void testSingleNode() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template;}\n" +
+			"s : a {System.out.println($a.st);} ;\n" +
+			"a : ID -> template(x={$ID.text}) <<|<x>|>> ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc");
+		assertEquals("|abc|\n", found);
+	}
+
+	@Test public void testSingleNodeRewriteMode() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
+			"s : a {System.out.println(input.getTokenStream().toString(0,0));} ;\n" +
+			"a : ID -> template(x={$ID.text}) <<|<x>|>> ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc");
+		assertEquals("|abc|\n", found);
+	}
+
+	@Test public void testRewriteRuleAndRewriteModeOnSimpleElements() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
+			"a: ^(A B) -> {ick}\n" +
+			" | y+=INT -> {ick}\n" +
+			" | x=ID -> {ick}\n" +
+			" | BLORT -> {ick}\n" +
+			" ;\n"
+		);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
+	}
+
+	@Test public void testRewriteRuleAndRewriteModeIgnoreActionsPredicates() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
+			"a: {action} {action2} x=A -> {ick}\n" +
+			" | {pred1}? y+=B -> {ick}\n" +
+			" | C {action} -> {ick}\n" +
+			" | {pred2}?=> z+=D -> {ick}\n" +
+			" | (E)=> ^(F G) -> {ick}\n" +
+			" ;\n"
+		);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
+	}
+
+	@Test public void testRewriteRuleAndRewriteModeNotSimple() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
+			"a  : ID+ -> {ick}\n" +
+			"   | INT INT -> {ick}\n" +
+			"   ;\n"
+		);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
+	}
+
+	@Test public void testRewriteRuleAndRewriteModeRefRule() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"tree grammar TP;\n"+
+			"options {ASTLabelType=CommonTree; output=template; rewrite=true;}\n" +
+			"a  : b+ -> {ick}\n" +
+			"   | b b A -> {ick}\n" +
+			"   ;\n" +
+			"b  : B ;\n"
+		);
+		Tool antlr = newTool();
+		antlr.setOutputDirectory(null); // write to /dev/null
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer();
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size());
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestSemanticPredicateEvaluation.java b/tool/src/test/java/org/antlr/test/TestSemanticPredicateEvaluation.java
new file mode 100644
index 0000000..6da7f31
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestSemanticPredicateEvaluation.java
@@ -0,0 +1,241 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestSemanticPredicateEvaluation extends BaseTest {
+	@Test public void testSimpleCyclicDFAWithPredicate() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"a : {false}? 'x'* 'y' {System.out.println(\"alt1\");}\n" +
+			"  | {true}?  'x'* 'y' {System.out.println(\"alt2\");}\n" +
+			"  ;\n" ;
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "xxxy", false);
+		assertEquals("alt2\n", found);
+	}
+
+	@Test public void testSimpleCyclicDFAWithInstanceVarPredicate() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"@members {boolean v=true;}\n" +
+			"a : {false}? 'x'* 'y' {System.out.println(\"alt1\");}\n" +
+			"  | {v}?     'x'* 'y' {System.out.println(\"alt2\");}\n" +
+			"  ;\n" ;
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "xxxy", false);
+		assertEquals("alt2\n", found);
+	}
+
+	@Test public void testPredicateValidation() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"@members {\n" +
+			"public void reportError(RecognitionException e) {\n" +
+			"    System.out.println(\"error: \"+e.toString());\n" +
+			"}\n" +
+			"}\n" +
+			"\n" +
+			"a : {false}? 'x'\n" +
+			"  ;\n" ;
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "x", false);
+		assertEquals("error: FailedPredicateException(a,{false}?)\n", found);
+	}
+
+	@Test public void testLexerPreds() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=false;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : {p}? 'a'  {System.out.println(\"token 1\");} ;\n" +
+			"B : {!p}? 'a' {System.out.println(\"token 2\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "a", false);
+		// "a" is ambig; can match both A, B.  Pred says match 2
+		assertEquals("token 2\n", found);
+	}
+
+	@Test public void testLexerPreds2() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=true;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : {p}? 'a' {System.out.println(\"token 1\");} ;\n" +
+			"B : ('a'|'b')+ {System.out.println(\"token 2\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "a", false);
+		// "a" is ambig; can match both A, B.  Pred says match 1
+		assertEquals("token 1\n", found);
+	}
+
+	@Test public void testLexerPredInExitBranch() throws Exception {
+		// p says it's ok to exit; it has precendence over the !p loopback branch
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=true;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : ('a' {System.out.print(\"1\");})*\n" +
+			"    {p}?\n" +
+			"    ('a' {System.out.print(\"2\");})* ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aaa", false);
+		assertEquals("222\n", found);
+	}
+
+	@Test public void testLexerPredInExitBranch2() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=true;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : ({p}? 'a' {System.out.print(\"1\");})*\n" +
+			"    ('a' {System.out.print(\"2\");})* ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aaa", false);
+		assertEquals("111\n", found);
+	}
+
+	@Test public void testLexerPredInExitBranch3() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=true;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : ({p}? 'a' {System.out.print(\"1\");} | )\n" +
+			"    ('a' {System.out.print(\"2\");})* ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aaa", false);
+		assertEquals("122\n", found);
+	}
+
+	@Test public void testLexerPredInExitBranch4() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"a : (A|B)+ ;\n" +
+			"A @init {int n=0;} : ({n<2}? 'a' {System.out.print(n++);})+\n" +
+			"    ('a' {System.out.print(\"x\");})* ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aaaaa", false);
+		assertEquals("01xxx\n", found);
+	}
+
+	@Test public void testLexerPredsInCyclicDFA() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=false;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : {p}? ('a')+ 'x'  {System.out.println(\"token 1\");} ;\n" +
+			"B :      ('a')+ 'x' {System.out.println(\"token 2\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aax", false);
+		assertEquals("token 2\n", found);
+	}
+
+	@Test public void testLexerPredsInCyclicDFA2() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"@lexer::members {boolean p=false;}\n" +
+			"a : (A|B)+ ;\n" +
+			"A : {p}? ('a')+ 'x' ('y')? {System.out.println(\"token 1\");} ;\n" +
+			"B :      ('a')+ 'x' {System.out.println(\"token 2\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aax", false);
+		assertEquals("token 2\n", found);
+	}
+
+	@Test public void testGatedPred() throws Exception {
+		String grammar =
+			"grammar foo;" +
+			"a : (A|B)+ ;\n" +
+			"A : {true}?=> 'a' {System.out.println(\"token 1\");} ;\n" +
+			"B : {false}?=>('a'|'b')+ {System.out.println(\"token 2\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aa", false);
+		// "a" is ambig; can match both A, B.  Pred says match A twice
+		assertEquals("token 1\ntoken 1\n", found);
+	}
+
+	@Test public void testGatedPred2() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"@lexer::members {boolean sig=false;}\n"+
+			"a : (A|B)+ ;\n" +
+			"A : 'a' {System.out.print(\"A\"); sig=true;} ;\n" +
+			"B : 'b' ;\n" +
+			"C : {sig}?=> ('a'|'b') {System.out.print(\"C\");} ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aa", false);
+		assertEquals("AC\n", found);
+	}
+
+	@Test public void testPredWithActionTranslation() throws Exception {
+		String grammar =
+			"grammar foo;\n" +
+			"a : b[2] ;\n" +
+			"b[int i]\n" +
+			"  : {$i==1}?   'a' {System.out.println(\"alt 1\");}\n" +
+			"  | {$b.i==2}? 'a' {System.out.println(\"alt 2\");}\n" +
+			"  ;\n";
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "aa", false);
+		assertEquals("alt 2\n", found);
+	}
+
+	@Test public void testPredicatesOnEOTTarget() throws Exception {
+		String grammar =
+			"grammar foo; \n" +
+			"@lexer::members {boolean p=true, q=false;}" +
+			"a : B ;\n" +
+			"A: '</'; \n" +
+			"B: {p}? '<!' {System.out.println(\"B\");};\n" +
+			"C: {q}? '<' {System.out.println(\"C\");}; \n" +
+			"D: '<';\n" ;
+		String found = execParser("foo.g", grammar, "fooParser", "fooLexer",
+				    "a", "<!", false);
+		assertEquals("B\n", found);
+	}
+
+
+	// S U P P O R T
+
+	public void _test() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a :  ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {channel=99;} ;\n";
+		String found = execParser("t.g", grammar, "T", "TLexer",
+				    "a", "abc 34", false);
+		assertEquals("\n", found);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestSemanticPredicates.java b/tool/src/test/java/org/antlr/test/TestSemanticPredicates.java
new file mode 100644
index 0000000..0ad540c
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestSemanticPredicates.java
@@ -0,0 +1,935 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.analysis.DFA;
+import org.antlr.analysis.DecisionProbe;
+import org.antlr.analysis.Label;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.misc.BitSet;
+import org.antlr.runtime.Token;
+import org.antlr.tool.*;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.junit.Assert.*;
+
+public class TestSemanticPredicates extends BaseTest {
+
+	/** Public default constructor used by TestRig */
+	public TestSemanticPredicates() {
+	}
+
+	@Test public void testPredsButSyntaxResolves() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A | {p2}? B ;");
+		String expecting =
+			".s0-A->:s1=>1\n" +
+			".s0-B->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testLL_1_Pred() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A | {p2}? A ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testLL_1_Pred_forced_k_1() throws Exception {
+		// should stop just like before w/o k set.
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a options {k=1;} : {p1}? A | {p2}? A ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testLL_2_Pred() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A B | {p2}? A B ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->.s2\n" +
+			".s2-{p1}?->:s3=>1\n" +
+			".s2-{p2}?->:s4=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testPredicatedLoop() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : ( {p1}? A | {p2}? A )+;");
+		String expecting =                   // loop back
+			".s0-A->.s2\n" +
+			".s0-EOF->:s1=>3\n" +
+			".s2-{p1}?->:s3=>1\n" +
+			".s2-{p2}?->:s4=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testPredicatedToStayInLoop() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : ( {p1}? A )+ (A)+;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";       // loop back
+        checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testAndPredicates() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? {p1a}? A | {p2}? A ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{(p1a&&p1)}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test
+    public void testOrPredicates() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | {p2}? A ;\n" +
+			"b : {p1}? A | {p1a}? A ;");
+		String expecting =
+			".s0-A->.s1\n" +
+            ".s1-{(p1a||p1)}?->:s2=>1\n" +
+            ".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testIgnoresHoistingDepthGreaterThanZero() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : A {p1}? | A {p2}?;");
+		String expecting =
+			".s0-A->:s1=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "A", null, null, 2, false);
+	}
+
+	@Test public void testIgnoresPredsHiddenByActions() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {a1} {p1}? A | {a2} {p2}? A ;");
+		String expecting =
+			".s0-A->:s1=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "A", null, null, 2, true);
+	}
+
+	@Test public void testIgnoresPredsHiddenByActionsOneAlt() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A | {a2} {p2}? A ;"); // ok since 1 pred visible
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null,
+					  null, null, null, null, 0, true);
+	}
+
+	/*
+	@Test public void testIncompleteSemanticHoistedContextk2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b | A B;\n" +
+			"b : {p1}? A B | A B ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "A B", new int[] {1}, null, 3);
+	}	
+	 */
+
+	@Test public void testHoist2() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | c ;\n" +
+			"b : {p1}? A ;\n" +
+			"c : {p2}? A ;\n");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testHoistCorrectContext() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | {p2}? ID ;\n" +
+			"b : {p1}? ID | INT ;\n");
+		String expecting =  // only tests after ID, not INT :)
+			".s0-ID->.s1\n" +
+			".s0-INT->:s2=>1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testDefaultPredNakedAltIsLast() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | ID ;\n" +
+			"b : {p1}? ID | INT ;\n");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s0-INT->:s2=>1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testDefaultPredNakedAltNotLast() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : ID | b ;\n" +
+			"b : {p1}? ID | INT ;\n");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s0-INT->:s3=>2\n" +
+			".s1-{!(p1)}?->:s2=>1\n" +
+			".s1-{p1}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testLeftRecursivePred() throws Exception {
+		// No analysis possible. but probably good to fail.  Not sure we really want
+		// left-recursion even if guarded with pred.
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"s : a ;\n" +
+			"a : {p1}? a | ID ;\n");
+		String expecting =
+			".s0-ID->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{true}?->:s3=>2\n";
+
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
+		g.setCodeGenerator(generator);
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
+		}
+
+		DFA dfa = g.getLookaheadDFA(1);
+		assertEquals(null, dfa); // can't analyze.
+
+		/*
+		String result = serializer.serialize(dfa.startState);
+		assertEquals(expecting, result);
+		*/
+
+		assertEquals("unexpected number of expected problems", 1, equeue.size());
+		Message msg = equeue.errors.get(0);
+		assertTrue("warning must be a left recursion msg",
+				    msg instanceof LeftRecursionCyclesMessage);
+	}
+
+	@Test public void testIgnorePredFromLL2AltLastAltIsDefaultTrue() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A B | A C | {p2}? A | {p3}? A | A ;\n");
+		// two situations of note:
+		// 1. A B syntax is enough to predict that alt, so p1 is not used
+		//    to distinguish it from alts 2..5
+		// 2. Alts 3, 4, 5 are nondeterministic with upon A.  p2, p3 and the
+		//    complement of p2||p3 is sufficient to resolve the conflict. Do
+		//    not include alt 1's p1 pred in the "complement of other alts"
+		//    because it is not considered nondeterministic with alts 3..5
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n" +
+			".s1-{p2}?->:s4=>3\n" +
+			".s1-{p3}?->:s5=>4\n" +
+			".s1-{true}?->:s6=>5\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testIgnorePredFromLL2AltPredUnionNeeded() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : {p1}? A B | A C | {p2}? A | A | {p3}? A ;\n");
+		// two situations of note:
+		// 1. A B syntax is enough to predict that alt, so p1 is not used
+		//    to distinguish it from alts 2..5
+		// 2. Alts 3, 4, 5 are nondeterministic with upon A.  p2, p3 and the
+		//    complement of p2||p3 is sufficient to resolve the conflict. Do
+		//    not include alt 1's p1 pred in the "complement of other alts"
+		//    because it is not considered nondeterministic with alts 3..5
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n" +
+			".s1-C->:s3=>2\n" +
+			".s1-{!((p3||p2))}?->:s5=>4\n" +
+			".s1-{p2}?->:s4=>3\n" +
+			".s1-{p3}?->:s6=>5\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testPredGets2SymbolSyntacticContext() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | A B | C ;\n" +
+			"b : {p1}? A B ;\n");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-C->:s5=>3\n" +
+			".s1-B->.s2\n" +
+			".s2-{p1}?->:s3=>1\n" +
+			".s2-{true}?->:s4=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testMatchesLongestThenTestPred() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"a : b | c ;\n" +
+			"b : {p}? A ;\n" +
+			"c : {q}? (A|B)+ ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-B->:s3=>2\n" +
+			".s1-{p}?->:s2=>1\n" +
+			".s1-{q}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testPredsUsedAfterRecursionOverflow() throws Exception {
+		// analysis must bail out due to non-LL(*) nature (ovf)
+		// retries with k=1 (but with LL(*) algorithm not optimized version
+		// as it has preds)
+		Grammar g = new Grammar(
+			"parser grammar P;\n"+
+			"s : {p1}? e '.' | {p2}? e ':' ;\n" +
+			"e : '(' e ')' | INT ;\n");
+		String expecting =
+			".s0-'('->.s1\n" +
+			".s0-INT->.s4\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n" +
+			".s4-{p1}?->:s2=>1\n" +
+			".s4-{p2}?->:s3=>2\n";
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
+		g.setCodeGenerator(generator);
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
+		}
+
+		assertEquals("unexpected number of expected problems", 0, equeue.size());
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testPredsUsedAfterK2FailsNoRecursionOverflow() throws Exception {
+		// analysis must bail out due to non-LL(*) nature (ovf)
+		// retries with k=1 (but with LL(*) algorithm not optimized version
+		// as it has preds)
+		Grammar g = new Grammar(
+			"grammar P;\n" +
+			"options {k=2;}\n"+
+			"s : {p1}? e '.' | {p2}? e ':' ;\n" +
+			"e : '(' e ')' | INT ;\n");
+		String expecting =
+			".s0-'('->.s1\n" +
+			".s0-INT->.s6\n" +
+			".s1-'('->.s2\n" +
+			".s1-INT->.s5\n" +
+			".s2-{p1}?->:s3=>1\n" +
+			".s2-{p2}?->:s4=>2\n" +
+			".s5-{p1}?->:s3=>1\n" +
+			".s5-{p2}?->:s4=>2\n" +
+			".s6-'.'->:s3=>1\n" +
+			".s6-':'->:s4=>2\n";
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
+		g.setCodeGenerator(generator);
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
+		}
+
+		assertEquals("unexpected number of expected problems", 0, equeue.size());
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testLexerMatchesLongestThenTestPred() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"B : {p}? 'a' ;\n" +
+			"C : {q}? ('a'|'b')+ ;");
+		String expecting =
+			".s0-'a'->.s1\n" +
+			".s0-'b'->:s4=>2\n" +
+			".s1-'a'..'b'->:s4=>2\n" +
+			".s1-<EOT>->.s2\n" +
+			".s2-{p}?->:s3=>1\n" +
+			".s2-{q}?->:s4=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testLexerMatchesLongestMinusPred() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"B : 'a' ;\n" +
+			"C : ('a'|'b')+ ;");
+		String expecting =
+			".s0-'a'->.s1\n" +
+			".s0-'b'->:s3=>2\n" +
+			".s1-'a'..'b'->:s3=>2\n" +
+			".s1-<EOT>->:s2=>1\n";
+		checkDecision(g, 2, expecting, null, null, null, null, null, 0, false);
+	}
+
+    @Test
+    public void testGatedPred() throws Exception {
+		// gated preds are present on all arcs in predictor
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"B : {p}? => 'a' ;\n" +
+			"C : {q}? => ('a'|'b')+ ;");
+		String expecting =
+			".s0-'a'&&{(q||p)}?->.s1\n" +
+            ".s0-'b'&&{q}?->:s4=>2\n" +
+            ".s1-'a'..'b'&&{q}?->:s4=>2\n" +
+            ".s1-<EOT>&&{(q||p)}?->.s2\n" +
+            ".s2-{p}?->:s3=>1\n" +
+            ".s2-{q}?->:s4=>2\n";
+		checkDecision(g, 2, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testGatedPredHoistsAndCanBeInStopState() throws Exception {
+		// I found a bug where merging stop states made us throw away
+		// a stop state with a gated pred!
+		Grammar g = new Grammar(
+			"grammar u;\n" +
+			"a : b+ ;\n" +
+			"b : 'x' | {p}?=> 'y' ;");
+		String expecting =
+			".s0-'x'->:s2=>1\n" +
+			".s0-'y'&&{p}?->:s3=>1\n" +
+			".s0-EOF->:s1=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test
+    public void testGatedPredInCyclicDFA() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : {p}?=> ('a')+ 'x' ;\n" +
+			"B : {q}?=> ('a'|'b')+ 'x' ;");
+		String expecting =
+			".s0-'a'&&{(q||p)}?->.s1\n" +
+            ".s0-'b'&&{q}?->:s5=>2\n" +
+            ".s1-'a'&&{(q||p)}?->.s1\n" +
+            ".s1-'b'&&{q}?->:s5=>2\n" +
+            ".s1-'x'&&{(q||p)}?->.s2\n" +
+            ".s2-<EOT>&&{(q||p)}?->.s3\n" +
+            ".s3-{p}?->:s4=>1\n" +
+            ".s3-{q}?->:s5=>2\n";
+		checkDecision(g, 3, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testGatedPredNotActuallyUsedOnEdges() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar P;\n"+
+			"A : ('a' | {p}?=> 'a')\n" +
+			"  | 'a' 'b'\n" +
+			"  ;");
+		String expecting1 =
+			".s0-'a'->.s1\n" +
+			".s1-{!(p)}?->:s2=>1\n" +  	// Used to disambig subrule
+			".s1-{p}?->:s3=>2\n";
+		// rule A decision can't test p from s0->1 because 'a' is valid
+		// for alt1 *and* alt2 w/o p.  Can't test p from s1 to s3 because
+		// we might have passed the first alt of subrule.  The same state
+		// is listed in s2 in 2 different configurations: one with and one
+		// w/o p.  Can't test therefore.  p||true == true.
+		String expecting2 =
+			".s0-'a'->.s1\n" +
+			".s1-'b'->:s2=>2\n" +
+			".s1-<EOT>->:s3=>1\n";
+		checkDecision(g, 1, expecting1, null, null, null, null, null, 0, false);
+		checkDecision(g, 2, expecting2, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testGatedPredDoesNotForceAllToBeGated() throws Exception {
+		Grammar g = new Grammar(
+			"grammar w;\n" +
+			"a : b | c ;\n" +
+			"b : {p}? B ;\n" +
+			"c : {q}?=> d ;\n" +
+			"d : {r}? C ;\n");
+		String expecting =
+			".s0-B->:s1=>1\n" +
+			".s0-C&&{q}?->:s2=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testGatedPredDoesNotForceAllToBeGated2() throws Exception {
+		Grammar g = new Grammar(
+			"grammar w;\n" +
+			"a : b | c ;\n" +
+			"b : {p}? B ;\n" +
+			"c : {q}?=> d ;\n" +
+			"d : {r}?=> C\n" +
+			"  | B\n" +
+			"  ;\n");
+		String expecting =
+			".s0-B->.s1\n" +
+			".s0-C&&{(r&&q)}?->:s3=>2\n" +
+			".s1-{p}?->:s2=>1\n" +
+			".s1-{q}?->:s3=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	@Test public void testORGatedPred() throws Exception {
+		Grammar g = new Grammar(
+			"grammar w;\n" +
+			"a : b | c ;\n" +
+			"b : {p}? B ;\n" +
+			"c : {q}?=> d ;\n" +
+			"d : {r}?=> C\n" +
+			"  | {s}?=> B\n" +
+			"  ;\n");
+		String expecting =
+			".s0-B->.s1\n" +
+			".s0-C&&{(r&&q)}?->:s3=>2\n" +
+			".s1-{(s&&q)}?->:s3=>2\n" +
+			".s1-{p}?->:s2=>1\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	/** The following grammar should yield an error that rule 'a' has
+	 *  insufficient semantic info pulled from 'b'.
+	 */
+	@Test public void testIncompleteSemanticHoistedContext() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b | B;\n" +
+			"b : {p1}? B | B ;");
+		String expecting =
+			".s0-B->:s1=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "B", new int[] {1}, null, 3, false);
+	}
+
+	@Test public void testIncompleteSemanticHoistedContextk2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b | A B;\n" +
+			"b : {p1}? A B | A B ;");
+		String expecting =
+			".s0-A->.s1\n" +
+			".s1-B->:s2=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "A B", new int[] {1}, null, 3, false);
+	}
+
+	@Test public void testIncompleteSemanticHoistedContextInFOLLOW() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"options {k=1;}\n" + // limit to k=1 because it's LL(2); force pred hoist
+			"a : A? ;\n" + // need FOLLOW
+			"b : X a {p1}? A | Y a A ;"); // only one A is covered
+		String expecting =
+			".s0-A->:s1=>1\n"; // s0-EOF->s2 branch pruned during optimization
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "A", new int[] {2}, null, 3, false);
+	}
+
+	@Test public void testIncompleteSemanticHoistedContextInFOLLOWk2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A B)? ;\n" + // need FOLLOW
+			"b : X a {p1}? A B | Y a A B | Z a ;"); // only first alt is covered
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-EOF->:s3=>2\n" +
+			".s1-B->:s2=>1\n";
+		checkDecision(g, 1, expecting, null,
+					  new int[] {1,2}, "A B", new int[] {2}, null, 2, false);
+	}
+
+	@Test public void testIncompleteSemanticHoistedContextInFOLLOWDueToHiddenPred() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : (A B)? ;\n" + // need FOLLOW
+			"b : X a {p1}? A B | Y a {a1} {p2}? A B | Z a ;"); // only first alt is covered
+		String expecting =
+			".s0-A->.s1\n" +
+			".s0-EOF->:s3=>2\n" +
+			".s1-B->:s2=>1\n";
+		checkDecision(g, 1, expecting, null,
+					  new int[] {1,2}, "A B", new int[] {2}, null, 2, true);
+	}
+
+	/** The following grammar should yield an error that rule 'a' has
+	 *  insufficient semantic info pulled from 'b'.  This is the same
+	 *  as the previous case except that the D prevents the B path from
+	 *  "pinching" together into a single NFA state.
+	 *
+	 *  This test also demonstrates that just because B D could predict
+	 *  alt 1 in rule 'a', it is unnecessary to continue NFA&rarr;DFA
+	 *  conversion to include an edge for D.  Alt 1 is the only possible
+	 *  prediction because we resolve the ambiguity by choosing alt 1.
+	 */
+	@Test public void testIncompleteSemanticHoistedContext2() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : b | B;\n" +
+			"b : {p1}? B | B D ;");
+		String expecting =
+			".s0-B->:s1=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2},
+					  new int[] {1,2}, "B", new int[] {1},
+					  null, 3, false);
+	}
+
+	@Test public void testTooFewSemanticPredicates() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : {p1}? A | A | A ;");
+		String expecting =
+			".s0-A->:s1=>1\n";
+		checkDecision(g, 1, expecting, new int[] {2,3},
+					  new int[] {1,2,3}, "A",
+					  null, null, 2, false);
+	}
+
+	@Test public void testPredWithK1() throws Exception {
+		Grammar g = new Grammar(
+			"\tlexer grammar TLexer;\n" +
+			"A\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"  : {p1}? ('x')+ '.'\n" +
+			"  | {p2}? ('x')+ '.'\n" +
+			"  ;\n");
+		String expecting =
+			".s0-'x'->.s1\n" +
+			".s1-{p1}?->:s2=>1\n" +
+			".s1-{p2}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] insufficientPredAlts = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 3, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, insufficientPredAlts,
+					  danglingAlts, numWarnings, false);
+	}
+
+	@Test public void testPredWithArbitraryLookahead() throws Exception {
+		Grammar g = new Grammar(
+			"\tlexer grammar TLexer;\n" +
+			"A : {p1}? ('x')+ '.'\n" +
+			"  | {p2}? ('x')+ '.'\n" +
+			"  ;\n");
+		String expecting =
+			".s0-'x'->.s1\n" +
+			".s1-'.'->.s2\n" +
+			".s1-'x'->.s1\n" +
+			".s2-{p1}?->:s3=>1\n" +
+			".s2-{p2}?->:s4=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] insufficientPredAlts = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 3, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, insufficientPredAlts,
+					  danglingAlts, numWarnings, false);
+	}
+
+	@Test
+    /** For a DFA state with lots of configurations that have the same
+	 *  predicate, don't just OR them all together as it's a waste to
+	 *  test a||a||b||a||a etc...  ANTLR makes a unique set and THEN
+	 *  OR's them together.
+	 */
+    public void testUniquePredicateOR() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar v;\n" +
+			"\n" +
+			"a : {a}? b\n" +
+			"  | {b}? b\n" +
+			"  ;\n" +
+			"\n" +
+			"b : {c}? (X)+ ;\n" +
+			"\n" +
+			"c : a\n" +
+			"  | b\n" +
+			"  ;\n");
+		String expecting =
+			".s0-X->.s1\n" +
+            ".s1-{((b||a)&&c)}?->:s2=>1\n" +
+            ".s1-{c}?->:s3=>2\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = null;
+		String ambigInput = null;
+		int[] insufficientPredAlts = null;
+		int[] danglingAlts = null;
+		int numWarnings = 0;
+		checkDecision(g, 3, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, insufficientPredAlts,
+					  danglingAlts, numWarnings, false);
+	}
+
+    @Test
+    public void testSemanticContextPreventsEarlyTerminationOfClosure() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar T;\n" +
+			"a : loop SEMI | ID SEMI\n" +
+			"  ;\n" +
+			"loop\n" +
+			"    : {while}? ID\n" +
+			"    | {do}? ID\n" +
+			"    | {for}? ID\n" +
+			"    ;");
+		String expecting =
+			".s0-ID->.s1\n" +
+            ".s1-SEMI->.s2\n" +
+            ".s2-{(for||do||while)}?->:s3=>1\n" +
+            ".s2-{true}?->:s4=>2\n";
+		checkDecision(g, 1, expecting, null, null, null, null, null, 0, false);
+	}
+
+	// S U P P O R T
+
+	public void _template() throws Exception {
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : A | B;");
+		String expecting =
+			"\n";
+		int[] unreachableAlts = null;
+		int[] nonDetAlts = new int[] {1,2};
+		String ambigInput = "L ID R";
+		int[] insufficientPredAlts = new int[] {1};
+		int[] danglingAlts = null;
+		int numWarnings = 1;
+		checkDecision(g, 1, expecting, unreachableAlts,
+					  nonDetAlts, ambigInput, insufficientPredAlts,
+					  danglingAlts, numWarnings, false);
+	}
+
+	protected void checkDecision(Grammar g,
+								 int decision,
+								 String expecting,
+								 int[] expectingUnreachableAlts,
+								 int[] expectingNonDetAlts,
+								 String expectingAmbigInput,
+								 int[] expectingInsufficientPredAlts,
+								 int[] expectingDanglingAlts,
+								 int expectingNumWarnings,
+								 boolean hasPredHiddenByAction)
+		throws Exception
+	{
+		DecisionProbe.verbose=true; // make sure we get all error info
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		CodeGenerator generator = new CodeGenerator(newTool(), g, "Java");
+		g.setCodeGenerator(generator);
+		// mimic actions of org.antlr.Tool first time for grammar g
+		if ( g.getNumberOfDecisions()==0 ) {
+			g.buildNFA();
+			g.createLookaheadDFAs(false);
+		}
+
+		if ( equeue.size()!=expectingNumWarnings ) {
+			System.err.println("Warnings issued: "+equeue);
+		}
+
+		assertEquals("unexpected number of expected problems",
+				   expectingNumWarnings, equeue.size());
+
+		DFA dfa = g.getLookaheadDFA(decision);
+		FASerializer serializer = new FASerializer(g);
+		String result = serializer.serialize(dfa.startState);
+		//System.out.print(result);
+		List<Integer> unreachableAlts = dfa.getUnreachableAlts();
+
+		// make sure unreachable alts are as expected
+		if ( expectingUnreachableAlts!=null ) {
+			BitSet s = new BitSet();
+			s.addAll(expectingUnreachableAlts);
+			BitSet s2 = new BitSet();
+			s2.addAll(unreachableAlts);
+			assertEquals("unreachable alts mismatch", s, s2);
+		}
+		else {
+			assertEquals("unreachable alts mismatch", 0,
+						 unreachableAlts!=null?unreachableAlts.size():0);
+		}
+
+		// check conflicting input
+		if ( expectingAmbigInput!=null ) {
+			// first, find nondet message
+			Message msg = getNonDeterminismMessage(equeue.warnings);
+			assertNotNull("no nondeterminism warning?", msg);
+			assertTrue("expecting nondeterminism; found "+msg.getClass().getName(),
+			msg instanceof GrammarNonDeterminismMessage);
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			List<Label> labels =
+				nondetMsg.probe.getSampleNonDeterministicInputSequence(nondetMsg.problemState);
+			String input = nondetMsg.probe.getInputSequenceDisplay(labels);
+			assertEquals(expectingAmbigInput, input);
+		}
+
+		// check nondet alts
+		if ( expectingNonDetAlts!=null ) {
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			assertNotNull("found no nondet alts; expecting: "+
+										str(expectingNonDetAlts), nondetMsg);
+			List<Integer> nonDetAlts =
+				nondetMsg.probe.getNonDeterministicAltsForState(nondetMsg.problemState);
+			// compare nonDetAlts with expectingNonDetAlts
+			BitSet s = new BitSet();
+			s.addAll(expectingNonDetAlts);
+			BitSet s2 = new BitSet();
+			s2.addAll(nonDetAlts);
+			assertEquals("nondet alts mismatch", s, s2);
+			assertEquals("mismatch between expected hasPredHiddenByAction", hasPredHiddenByAction,
+						 nondetMsg.problemState.dfa.hasPredicateBlockedByAction);
+		}
+		else {
+			// not expecting any nondet alts, make sure there are none
+			GrammarNonDeterminismMessage nondetMsg =
+				getNonDeterminismMessage(equeue.warnings);
+			assertNull("found nondet alts, but expecting none", nondetMsg);
+		}
+
+		if ( expectingInsufficientPredAlts!=null ) {
+			GrammarInsufficientPredicatesMessage insuffPredMsg =
+				getGrammarInsufficientPredicatesMessage(equeue.warnings);
+			assertNotNull("found no GrammarInsufficientPredicatesMessage alts; expecting: "+
+										str(expectingNonDetAlts), insuffPredMsg);
+			Map<Integer, Set<Token>> locations = insuffPredMsg.altToLocations;
+			Set<Integer> actualAlts = locations.keySet();
+			BitSet s = new BitSet();
+			s.addAll(expectingInsufficientPredAlts);
+			BitSet s2 = new BitSet();
+			s2.addAll(actualAlts);
+			assertEquals("mismatch between insufficiently covered alts", s, s2);
+			assertEquals("mismatch between expected hasPredHiddenByAction", hasPredHiddenByAction,
+						 insuffPredMsg.problemState.dfa.hasPredicateBlockedByAction);
+		}
+		else {
+			// not expecting any nondet alts, make sure there are none
+			GrammarInsufficientPredicatesMessage nondetMsg =
+				getGrammarInsufficientPredicatesMessage(equeue.warnings);
+			if ( nondetMsg!=null ) {
+				System.out.println(equeue.warnings);
+			}
+			assertNull("found insufficiently covered alts, but expecting none", nondetMsg);
+		}
+
+		assertEquals(expecting, result);
+	}
+
+	protected GrammarNonDeterminismMessage getNonDeterminismMessage(List<? extends Message> warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = warnings.get(i);
+			if ( m instanceof GrammarNonDeterminismMessage ) {
+				return (GrammarNonDeterminismMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected GrammarInsufficientPredicatesMessage getGrammarInsufficientPredicatesMessage(List<? extends Message> warnings) {
+		for (int i = 0; i < warnings.size(); i++) {
+			Message m = warnings.get(i);
+			if ( m instanceof GrammarInsufficientPredicatesMessage ) {
+				return (GrammarInsufficientPredicatesMessage)m;
+			}
+		}
+		return null;
+	}
+
+	protected String str(int[] elements) {
+		StringBuilder buf = new StringBuilder();
+		for (int i = 0; i < elements.length; i++) {
+			if ( i>0 ) {
+				buf.append(", ");
+			}
+			int element = elements[i];
+			buf.append(element);
+		}
+		return buf.toString();
+	}
+}
diff --git a/tool/src/test/java/org/antlr/test/TestSets.java b/tool/src/test/java/org/antlr/test/TestSets.java
new file mode 100644
index 0000000..8a57af2
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestSets.java
@@ -0,0 +1,291 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/** Test the set stuff in lexer and parser */
+public class TestSets extends BaseTest {
+	protected boolean debug = false;
+
+	/** Public default constructor used by TestRig */
+	public TestSets() {
+	}
+
+	@Test public void testSeqDoesNotBecomeSet() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n" +
+			"a : C {System.out.println(input);} ;\n" +
+			"fragment A : '1' | '2';\n" +
+			"fragment B : '3' '4';\n" +
+			"C : A | B;\n";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+								  "a", "34", debug);
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testParserSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : t=('x'|'y') {System.out.println($t.text);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	@Test public void testParserNotSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : t=~('x'|'y') 'z' {System.out.println($t.text);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "zz", debug);
+		assertEquals("z\n", found);
+	}
+
+	@Test public void testParserNotToken() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : ~'x' 'z' {System.out.println(input);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "zz", debug);
+		assertEquals("zz\n", found);
+	}
+
+	@Test public void testParserNotTokenWithLabel() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : t=~'x' 'z' {System.out.println($t.text);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "zz", debug);
+		assertEquals("z\n", found);
+	}
+
+	@Test public void testRuleAsSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a @after {System.out.println(input);} : 'a' | 'b' |'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "b", debug);
+		assertEquals("b\n", found);
+	}
+
+	@Test public void testRuleAsSetAST() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'a' | 'b' |'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "b", debug);
+		assertEquals("b\n", found);
+	}
+
+	@Test public void testNotChar() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ~'b' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	@Test public void testOptionalSingleElement() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A? 'c' {System.out.println(input);} ;\n" +
+			"A : 'b' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "bc", debug);
+		assertEquals("bc\n", found);
+	}
+
+	@Test public void testOptionalLexerSingleElement() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : 'b'? 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "bc", debug);
+		assertEquals("bc\n", found);
+	}
+
+	@Test public void testStarLexerSingleElement() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : 'b'* 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "bbbbc", debug);
+		assertEquals("bbbbc\n", found);
+		found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "c", debug);
+		assertEquals("c\n", found);
+	}
+
+	@Test public void testPlusLexerSingleElement() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : 'b'+ 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "bbbbc", debug);
+		assertEquals("bbbbc\n", found);
+	}
+
+	@Test public void testOptionalSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : ('a'|'b')? 'c' {System.out.println(input);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "ac", debug);
+		assertEquals("ac\n", found);
+	}
+
+	@Test public void testStarSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : ('a'|'b')* 'c' {System.out.println(input);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abaac", debug);
+		assertEquals("abaac\n", found);
+	}
+
+	@Test public void testPlusSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : ('a'|'b')+ 'c' {System.out.println(input);} ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abaac", debug);
+		assertEquals("abaac\n", found);
+	}
+
+	@Test public void testLexerOptionalSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : ('a'|'b')? 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "ac", debug);
+		assertEquals("ac\n", found);
+	}
+
+	@Test public void testLexerStarSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : ('a'|'b')* 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abaac", debug);
+		assertEquals("abaac\n", found);
+	}
+
+	@Test public void testLexerPlusSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println(input);} ;\n" +
+			"A : ('a'|'b')+ 'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "abaac", debug);
+		assertEquals("abaac\n", found);
+	}
+
+	@Test public void testNotCharSet() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ~('b'|'c') ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	@Test public void testNotCharSetWithLabel() throws Exception {
+		// This doesn't work in lexer yet.
+		// Generates: h=input.LA(1); but h is defined as a Token
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : h=~('b'|'c') ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	@Test public void testNotCharSetWithRuleRef() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ~('a'|B) ;\n" +
+			"B : 'b' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	@Test public void testNotCharSetWithRuleRef2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ~('a'|B) ;\n" +
+			"B : 'b'|'c' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	@Test public void testNotCharSetWithRuleRef3() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ('a'|B) ;\n" +
+			"fragment\n" +
+			"B : ~('a'|'c') ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+	@Test public void testNotCharSetWithRuleRef4() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : A {System.out.println($A.text);} ;\n" +
+			"A : ('a'|B) ;\n" +
+			"fragment\n" +
+			"B : ~('a'|C) ;\n" +
+			"fragment\n" +
+			"C : 'c'|'d' ;\n ";
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+								  "a", "x", debug);
+		assertEquals("x\n", found);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestSymbolDefinitions.java b/tool/src/test/java/org/antlr/test/TestSymbolDefinitions.java
new file mode 100644
index 0000000..43bf93e
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestSymbolDefinitions.java
@@ -0,0 +1,914 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.analysis.Label;
+import org.antlr.codegen.CodeGenerator;
+import org.stringtemplate.v4.ST;
+import org.antlr.tool.*;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.junit.Assert.*;
+
+public class TestSymbolDefinitions extends BaseTest {
+
+	/** Public default constructor used by TestRig */
+	public TestSymbolDefinitions() {
+	}
+
+	@Test public void testParserSimpleTokens() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"a : A | B;\n" +
+				"b : C ;");
+		String rules = "a, b";
+		String tokenNames = "A, B, C";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	@Test public void testParserTokensSection() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar t;\n" +
+				"tokens {\n" +
+				"  C;\n" +
+				"  D;" +
+				"}\n"+
+				"a : A | B;\n" +
+				"b : C ;");
+		String rules = "a, b";
+		String tokenNames = "A, B, C, D";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	@Test public void testLexerTokensSection() throws Exception {
+		Grammar g = new Grammar(
+				"lexer grammar t;\n" +
+				"tokens {\n" +
+				"  C;\n" +
+				"  D;" +
+				"}\n"+
+				"A : 'a';\n" +
+				"C : 'c' ;");
+		String rules = "A, C, Tokens";
+		String tokenNames = "A, C, D";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	@Test public void testTokensSectionWithAssignmentSection() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"tokens {\n" +
+				"  C='c';\n" +
+				"  D;" +
+				"}\n"+
+				"a : A | B;\n" +
+				"b : C ;");
+		String rules = "a, b";
+		String tokenNames = "A, B, C, D, 'c'";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	@Test public void testCombinedGrammarLiterals() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a : 'begin' b 'end';\n" +
+				"b : C ';' ;\n" +
+				"ID : 'a' ;\n" +
+				"FOO : 'foo' ;\n" +  // "foo" is not a token name
+				"C : 'c' ;\n");        // nor is 'c'
+		String rules = "a, b";
+		String tokenNames = "C, FOO, ID, 'begin', 'end', ';'";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	@Test public void testLiteralInParserAndLexer() throws Exception {
+		// 'x' is token and char in lexer rule
+		Grammar g = new Grammar(
+				"grammar t;\n" +
+				"a : 'x' E ; \n" +
+				"E: 'x' '0' ;\n");        // nor is 'c'
+		String literals = "['x']";
+		String foundLiterals = g.getStringLiterals().toString();
+		assertEquals(literals, foundLiterals);
+
+		String implicitLexer =
+			"lexer grammar t;" + newline +
+			"T__5 : 'x' ;" + newline +
+			"" + newline +
+			"// $ANTLR src \"<string>\" 3" + newline +
+			"E: 'x' '0' ;";
+		assertEquals(implicitLexer, g.getLexerGrammar());
+	}
+
+	@Test public void testCombinedGrammarWithRefToLiteralButNoTokenIDRef() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a : 'a' ;\n" +
+				"A : 'a' ;\n");
+		String rules = "a";
+		String tokenNames = "A, 'a'";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	@Test public void testSetDoesNotMissTokenAliases() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a : 'a'|'b' ;\n" +
+				"A : 'a' ;\n" +
+				"B : 'b' ;\n");
+		String rules = "a";
+		String tokenNames = "A, 'a', B, 'b'";
+		checkSymbols(g, rules, tokenNames);
+	}
+
+	@Test public void testSimplePlusEqualLabel() throws Exception {
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"a : ids+=ID ( COMMA ids+=ID )* ;\n");
+		String rule = "a";
+		String tokenLabels = "ids";
+		String ruleLabels = null;
+		checkPlusEqualsLabels(g, rule, tokenLabels, ruleLabels);
+	}
+
+	@Test public void testMixedPlusEqualLabel() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"options {output=AST;}\n" +
+				"a : id+=ID ( ',' e+=expr )* ;\n" +
+				"expr : 'e';\n" +
+				"ID : 'a';\n");
+		String rule = "a";
+		String tokenLabels = "id";
+		String ruleLabels = "e";
+		checkPlusEqualsLabels(g, rule, tokenLabels, ruleLabels);
+	}
+
+	// T E S T  L I T E R A L  E S C A P E S
+
+	@Test public void testParserCharLiteralWithEscape() throws Exception {
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a : '\\n';\n");
+		Set<String> literals = g.getStringLiterals();
+		// must store literals how they appear in the antlr grammar
+		assertEquals("'\\n'", literals.toArray()[0]);
+	}
+
+	@Test public void testTokenInTokensSectionAndTokenRuleDef() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n" +
+			"tokens { B='}'; }\n"+
+			"a : A B {System.out.println(input);} ;\n"+
+			"A : 'a' ;\n" +
+			"B : '}' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+								  "a", "a}", false);
+		assertEquals("a}\n", found);
+	}
+
+	@Test public void testTokenInTokensSectionAndTokenRuleDef2() throws Exception {
+		// this must return A not I to the parser; calling a nonfragment rule
+		// from a nonfragment rule does not set the overall token.
+		String grammar =
+			"grammar P;\n" +
+			"tokens { B='}'; }\n"+
+			"a : A '}' {System.out.println(input);} ;\n"+
+			"A : 'a' ;\n" +
+			"B : '}' {/* */} ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;";
+		String found = execParser("P.g", grammar, "PParser", "PLexer",
+								  "a", "a}", false);
+		assertEquals("a}\n", found);
+	}
+
+
+	@Test public void testRefToRuleWithNoReturnValue() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+
+		String grammarStr =
+			"grammar P;\n" +
+			"a : x=b ;\n" +
+			"b : B ;\n" +
+			"B : 'b' ;\n";
+		Grammar g = new Grammar(grammarStr);
+
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		ST recogST = generator.genRecognizer();
+		String code = recogST.render();
+		assertTrue("not expecting label", code.indexOf("x=b();")<0);
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	// T E S T  E R R O R S
+
+	@Test public void testParserStringLiterals() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"a : 'begin' b ;\n" +
+				"b : C ;");
+		Object expectedArg = "'begin'";
+		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testParserCharLiterals() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"a : '(' b ;\n" +
+				"b : C ;");
+		Object expectedArg = "'('";
+		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testEmptyNotChar() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"grammar foo;\n" +
+				"a : (~'x')+ ;\n");
+		g.buildNFA();
+		Object expectedArg = "'x'";
+		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testEmptyNotToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"grammar foo;\n" +
+				"a : (~A)+ ;\n");
+		g.buildNFA();
+		Object expectedArg = "A";
+		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testEmptyNotSet() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+				"grammar foo;\n" +
+				"a : (~(A|B))+ ;\n");
+		g.buildNFA();
+		Object expectedArg = null;
+		int expectedMsgID = ErrorManager.MSG_EMPTY_COMPLEMENT;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testStringLiteralInParserTokensSection() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"parser grammar t;\n" +
+				"tokens {\n" +
+				"  B='begin';\n" +
+				"}\n"+
+				"a : A B;\n" +
+				"b : C ;");
+		Object expectedArg = "'begin'";
+		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testCharLiteralInParserTokensSection() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"parser grammar t;\n" +
+				"tokens {\n" +
+				"  B='(';\n" +
+				"}\n"+
+				"a : A B;\n" +
+				"b : C ;");
+		Object expectedArg = "'('";
+		int expectedMsgID = ErrorManager.MSG_LITERAL_NOT_ASSOCIATED_WITH_LEXER_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testCharLiteralInLexerTokensSection() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"lexer grammar t;\n" +
+				"tokens {\n" +
+				"  B='(';\n" +
+				"}\n"+
+				"ID : 'a';\n");
+		Object expectedArg = "'('";
+		int expectedMsgID = ErrorManager.MSG_CANNOT_ALIAS_TOKENS_IN_LEXER;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testRuleRedefinition() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"a : A | B;\n" +
+				"a : C ;");
+
+		Object expectedArg = "a";
+		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testLexerRuleRedefinition() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"lexer grammar t;\n"+
+				"ID : 'a' ;\n" +
+				"ID : 'd' ;");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testCombinedRuleRedefinition() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"x : ID ;\n" +
+				"ID : 'a' ;\n" +
+				"x : ID ID ;");
+
+		Object expectedArg = "x";
+		int expectedMsgID = ErrorManager.MSG_RULE_REDEFINITION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testUndefinedToken() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"x : ID ;");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_NO_TOKEN_DEFINITION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsWarning(equeue, expectedMessage);
+	}
+
+	@Test public void testUndefinedTokenOkInParser() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"x : ID ;");
+		assertEquals("should not be an error", 0, equeue.errors.size());
+	}
+
+	@Test public void testUndefinedRule() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"x : r ;");
+
+		Object expectedArg = "r";
+		int expectedMsgID = ErrorManager.MSG_UNDEFINED_RULE_REF;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testLexerRuleInParser() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"parser grammar t;\n"+
+				"X : ;");
+
+		Object expectedArg = "X";
+		int expectedMsgID = ErrorManager.MSG_LEXER_RULES_NOT_ALLOWED;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testParserRuleInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"lexer grammar t;\n"+
+				"a : ;");
+
+		Object expectedArg = "a";
+		int expectedMsgID = ErrorManager.MSG_PARSER_RULES_NOT_ALLOWED;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testRuleScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"scope a {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"a : \n" +
+			"  ;\n");
+
+		Object expectedArg = "a";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testTokenRuleScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"scope ID {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"ID : 'a'\n" +
+			"  ;\n");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testTokenScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"grammar t;\n"+
+			"tokens { ID; }\n"+
+			"scope ID {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"a : \n" +
+			"  ;\n");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testTokenRuleScopeConflictInLexerGrammar() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"scope ID {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"ID : 'a'\n" +
+			"  ;\n");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testTokenLabelScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"scope s {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"a : s=ID \n" +
+			"  ;\n");
+
+		Object expectedArg = "s";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testRuleLabelScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"scope s {\n" +
+			"  int n;\n" +
+			"}\n" +
+			"a : s=b \n" +
+			"  ;\n" +
+			"b : ;\n");
+
+		Object expectedArg = "s";
+		int expectedMsgID = ErrorManager.MSG_SYMBOL_CONFLICTS_WITH_GLOBAL_SCOPE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testLabelAndRuleNameConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : c=b \n" +
+			"  ;\n" +
+			"b : ;\n" +
+			"c : ;\n");
+
+		Object expectedArg = "c";
+		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testLabelAndTokenNameConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a : ID=b \n" +
+			"  ;\n" +
+			"b : ID ;\n" +
+			"c : ;\n");
+
+		Object expectedArg = "ID";
+		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_TOKEN;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testLabelAndArgConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[int i] returns [int x]: i=ID \n" +
+			"  ;\n");
+
+		Object expectedArg = "i";
+		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testLabelAndParameterConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[int i] returns [int x]: x=ID \n" +
+			"  ;\n");
+
+		Object expectedArg = "x";
+		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_ARG_RETVAL;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testLabelRuleScopeConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a\n" +
+			"scope {" +
+			"  int n;" +
+			"}\n" +
+			"  : n=ID\n" +
+			"  ;\n");
+
+		Object expectedArg = "n";
+		Object expectedArg2 = "a";
+		int expectedMsgID = ErrorManager.MSG_LABEL_CONFLICTS_WITH_RULE_SCOPE_ATTRIBUTE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testRuleScopeArgConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a[int n]\n" +
+			"scope {" +
+			"  int n;" +
+			"}\n" +
+			"  : \n" +
+			"  ;\n");
+
+		Object expectedArg = "n";
+		Object expectedArg2 = "a";
+		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testRuleScopeReturnValueConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a returns [int n]\n" +
+			"scope {" +
+			"  int n;" +
+			"}\n" +
+			"  : \n" +
+			"  ;\n");
+
+		Object expectedArg = "n";
+		Object expectedArg2 = "a";
+		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE_ARG_RETVAL;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testRuleScopeRuleNameConflict() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+			"parser grammar t;\n"+
+			"a\n" +
+			"scope {" +
+			"  int a;" +
+			"}\n" +
+			"  : \n" +
+			"  ;\n");
+
+		Object expectedArg = "a";
+		Object expectedArg2 = null;
+		int expectedMsgID = ErrorManager.MSG_ATTRIBUTE_CONFLICTS_WITH_RULE;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testBadGrammarOption() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Tool antlr = newTool();
+		Grammar g = new Grammar(antlr,
+								"grammar t;\n"+
+								"options {foo=3; language=Java;}\n" +
+								"a : 'a';\n");
+
+		Object expectedArg = "foo";
+		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testBadRuleOption() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a\n"+
+				"options {k=3; tokenVocab=blort;}\n" +
+				"  : 'a';\n");
+
+		Object expectedArg = "tokenVocab";
+		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testBadSubRuleOption() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue); // unique listener per thread
+		Grammar g = new Grammar(
+				"grammar t;\n"+
+				"a : ( options {k=3; language=Java;}\n" +
+				"    : 'a'\n" +
+				"    | 'b'\n" +
+				"    )\n" +
+				"  ;\n");
+		Object expectedArg = "language";
+		int expectedMsgID = ErrorManager.MSG_ILLEGAL_OPTION;
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkGrammarSemanticsError(equeue, expectedMessage);
+	}
+
+	@Test public void testTokenVocabStringUsedInLexer() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String tokens =
+			"';'=4\n";
+        mkdir(tmpdir);
+        writeFile(tmpdir, "T.tokens", tokens);
+
+		String importer =
+			"lexer grammar B; \n" +
+			"options\t{tokenVocab=T;} \n" +
+			"SEMI:';' ; \n" ;
+		writeFile(tmpdir, "B.g", importer);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/B.g",composite);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[SEMI=4]";
+		String expectedStringLiteralToTypeMap = "{';'=4}";
+		String expectedTypeToTokenList = "[SEMI]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	@Test public void testTokenVocabStringUsedInCombined() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		String tokens =
+			"';'=4\n";
+        mkdir(tmpdir);
+		writeFile(tmpdir, "T.tokens", tokens);
+
+		String importer =
+			"grammar B; \n" +
+			"options\t{tokenVocab=T;} \n" +
+			"SEMI:';' ; \n" ;
+		writeFile(tmpdir, "B.g", importer);
+		Tool antlr = newTool(new String[] {"-lib", tmpdir});
+		CompositeGrammar composite = new CompositeGrammar();
+		Grammar g = new Grammar(antlr,tmpdir+"/B.g",composite);
+		g.parseAndBuildAST();
+		g.composite.assignTokenTypes();
+
+		String expectedTokenIDToTypeMap = "[SEMI=4]";
+		String expectedStringLiteralToTypeMap = "{';'=4}";
+		String expectedTypeToTokenList = "[SEMI]";
+
+		assertEquals(expectedTokenIDToTypeMap,
+					 realElements(g.composite.tokenIDToTypeMap).toString());
+		assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString());
+		assertEquals(expectedTypeToTokenList,
+					 realElements(g.composite.typeToTokenList).toString());
+
+		assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size());
+	}
+
+	protected void checkPlusEqualsLabels(Grammar g,
+										 String ruleName,
+										 String tokenLabelsStr,
+										 String ruleLabelsStr)
+		throws Exception
+	{
+		// make sure expected += labels are there
+		Rule r = g.getRule(ruleName);
+		StringTokenizer st = new StringTokenizer(tokenLabelsStr, ", ");
+		Set<String> tokenLabels = null;
+		while ( st.hasMoreTokens() ) {
+			if ( tokenLabels==null ) {
+				tokenLabels = new HashSet<String>();
+			}
+			String labelName = st.nextToken();
+			tokenLabels.add(labelName);
+		}
+		Set<String> ruleLabels = null;
+		if ( ruleLabelsStr!=null ) {
+			st = new StringTokenizer(ruleLabelsStr, ", ");
+			ruleLabels = new HashSet<String>();
+			while ( st.hasMoreTokens() ) {
+				String labelName = st.nextToken();
+				ruleLabels.add(labelName);
+			}
+		}
+		assertTrue("token += labels mismatch; "+tokenLabels+"!="+r.tokenListLabels,
+				   (tokenLabels!=null && r.tokenListLabels!=null) ||
+				   (tokenLabels==null && r.tokenListLabels==null));
+		assertTrue("rule += labels mismatch; "+ruleLabels+"!="+r.ruleListLabels,
+				   (ruleLabels!=null && r.ruleListLabels!=null) ||
+				   (ruleLabels==null && r.ruleListLabels==null));
+		if ( tokenLabels!=null ) {
+			assertEquals(tokenLabels, r.tokenListLabels.keySet());
+		}
+		if ( ruleLabels!=null ) {
+			assertEquals(ruleLabels, r.ruleListLabels.keySet());
+		}
+	}
+
+	protected void checkSymbols(Grammar g,
+								String rulesStr,
+								String tokensStr)
+		throws Exception
+	{
+		Set<String> tokens = g.getTokenDisplayNames();
+
+		// make sure expected tokens are there
+		StringTokenizer st = new StringTokenizer(tokensStr, ", ");
+		while ( st.hasMoreTokens() ) {
+			String tokenName = st.nextToken();
+			assertTrue("token "+tokenName+" expected",
+					   g.getTokenType(tokenName)!=Label.INVALID);
+			tokens.remove(tokenName);
+		}
+		// make sure there are not any others (other than <EOF> etc...)
+		for (String tokenName : tokens) {
+			assertTrue("unexpected token name "+tokenName,
+					   g.getTokenType(tokenName)<Label.MIN_TOKEN_TYPE);
+		}
+
+		// make sure all expected rules are there
+		st = new StringTokenizer(rulesStr, ", ");
+		int n = 0;
+		while ( st.hasMoreTokens() ) {
+			String ruleName = st.nextToken();
+			assertNotNull("rule "+ruleName+" expected", g.getRule(ruleName));
+			n++;
+		}
+		Collection<Rule> rules = g.getRules();
+		//System.out.println("rules="+rules);
+		// make sure there are no extra rules
+		assertEquals("number of rules mismatch; expecting "+n+"; found "+rules.size(), n, rules.size());
+
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestSyntacticPredicateEvaluation.java b/tool/src/test/java/org/antlr/test/TestSyntacticPredicateEvaluation.java
new file mode 100644
index 0000000..894ffa3
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestSyntacticPredicateEvaluation.java
@@ -0,0 +1,422 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestSyntacticPredicateEvaluation extends BaseTest {
+	@Test public void testTwoPredsWithNakedAlt() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"s : (a ';')+ ;\n" +
+			"a\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"  : (b '.')=> b '.' {System.out.println(\"alt 1\");}\n" +
+			"  | (b)=> b {System.out.println(\"alt 2\");}\n" +
+			"  | c       {System.out.println(\"alt 3\");}\n" +
+			"  ;\n" +
+			"b\n" +
+			"@init {System.out.println(\"enter b\");}\n" +
+			"   : '(' 'x' ')' ;\n" +
+			"c\n" +
+			"@init {System.out.println(\"enter c\");}\n" +
+			"   : '(' c ')' | 'x' ;\n" +
+			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
+			"   ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "(x) ;", false);
+		String expecting =
+			"enter b\n" +
+			"enter b\n" +
+			"enter b\n" +
+			"alt 2\n";
+		assertEquals(expecting, found);
+
+		found = execParser("T.g", grammar, "TParser", "TLexer",
+			    "a", "(x). ;", false);
+		expecting =
+			"enter b\n" +
+			"enter b\n" +
+			"alt 1\n";
+		assertEquals(expecting, found);
+
+		found = execParser("T.g", grammar, "TParser", "TLexer",
+			    "a", "((x)) ;", false);
+		expecting =
+			"enter b\n" +
+			"enter b\n" +
+			"enter c\n" +
+			"enter c\n" +
+			"enter c\n" +
+			"alt 3\n";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testTwoPredsWithNakedAltNotLast() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"s : (a ';')+ ;\n" +
+			"a\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"  : (b '.')=> b '.' {System.out.println(\"alt 1\");}\n" +
+			"  | c       {System.out.println(\"alt 2\");}\n" +
+			"  | (b)=> b {System.out.println(\"alt 3\");}\n" +
+			"  ;\n" +
+			"b\n" +
+			"@init {System.out.println(\"enter b\");}\n" +
+			"   : '(' 'x' ')' ;\n" +
+			"c\n" +
+			"@init {System.out.println(\"enter c\");}\n" +
+			"   : '(' c ')' | 'x' ;\n" +
+			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
+			"   ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "(x) ;", false);
+		String expecting =
+			"enter b\n" +
+			"enter c\n" +
+			"enter c\n" +
+			"alt 2\n";
+		assertEquals(expecting, found);
+
+		found = execParser("T.g", grammar, "TParser", "TLexer",
+			    "a", "(x). ;", false);
+		expecting =
+			"enter b\n" +
+			"enter b\n" +
+			"alt 1\n";
+		assertEquals(expecting, found);
+
+		found = execParser("T.g", grammar, "TParser", "TLexer",
+			    "a", "((x)) ;", false);
+		expecting =
+			"enter b\n" +
+			"enter c\n" +
+			"enter c\n" +
+			"enter c\n" +
+			"alt 2\n";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testLexerPred() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"s : A ;\n" +
+			"A options {k=1;}\n" + // force backtracking
+			"  : (B '.')=>B '.' {System.out.println(\"alt1\");}\n" +
+			"  | B {System.out.println(\"alt2\");}" +
+			"  ;\n" +
+			"fragment\n" +
+			"B : 'x'+ ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "s", "xxx", false);
+
+		assertEquals("alt2\n", found);
+
+		found = execParser("T.g", grammar, "TParser", "TLexer",
+			    "s", "xxx.", false);
+
+		assertEquals("alt1\n", found);
+	}
+
+	@Test public void testLexerWithPredLongerThanAlt() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"s : A ;\n" +
+			"A options {k=1;}\n" + // force backtracking
+			"  : (B '.')=>B {System.out.println(\"alt1\");}\n" +
+			"  | B {System.out.println(\"alt2\");}" +
+			"  ;\n" +
+			"D : '.' {System.out.println(\"D\");} ;\n" +
+			"fragment\n" +
+			"B : 'x'+ ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "s", "xxx", false);
+
+		assertEquals("alt2\n", found);
+
+		found = execParser("T.g", grammar, "TParser", "TLexer",
+			    "s", "xxx.", false);
+
+		assertEquals("alt1\nD\n", found);
+	}
+
+	@Test public void testLexerPredCyclicPrediction() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"s : A ;\n" +
+			"A : (B)=>(B|'y'+) {System.out.println(\"alt1\");}\n" +
+			"  | B {System.out.println(\"alt2\");}\n" +
+			"  | 'y'+ ';'" +
+			"  ;\n" +
+			"fragment\n" +
+			"B : 'x'+ ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "s", "xxx", false);
+
+		assertEquals("alt1\n", found);
+	}
+
+	@Test public void testLexerPredCyclicPrediction2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"s : A ;\n" +
+			"A : (B '.')=>(B|'y'+) {System.out.println(\"alt1\");}\n" +
+			"  | B {System.out.println(\"alt2\");}\n" +
+			"  | 'y'+ ';'" +
+			"  ;\n" +
+			"fragment\n" +
+			"B : 'x'+ ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "s", "xxx", false);
+		assertEquals("alt2\n", found);
+	}
+
+	@Test public void testSimpleNestedPred() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"s : (expr ';')+ ;\n" +
+			"expr\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"@init {System.out.println(\"enter expr \"+input.LT(1).getText());}\n" +
+			"  : (atom 'x') => atom 'x'\n" +
+			"  | atom\n" +
+			";\n" +
+			"atom\n" +
+			"@init {System.out.println(\"enter atom \"+input.LT(1).getText());}\n" +
+			"   : '(' expr ')'\n" +
+			"   | INT\n" +
+			"   ;\n" +
+			"INT: '0'..'9'+ ;\n" +
+			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
+			"   ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "s", "(34)x;", false);
+		String expecting =
+			"enter expr (\n" +
+			"enter atom (\n" +
+			"enter expr 34\n" +
+			"enter atom 34\n" +
+			"enter atom 34\n" +
+			"enter atom (\n" +
+			"enter expr 34\n" +
+			"enter atom 34\n" +
+			"enter atom 34\n";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testTripleNestedPredInLexer() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"s : (.)+ {System.out.println(\"done\");} ;\n" +
+			"EXPR\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"@init {System.out.println(\"enter expr \"+(char)input.LT(1));}\n" +
+			"  : (ATOM 'x') => ATOM 'x' {System.out.println(\"ATOM x\");}\n" +
+			"  | ATOM {System.out.println(\"ATOM \"+$ATOM.text);}\n" +
+			";\n" +
+			"fragment ATOM\n" +
+			"@init {System.out.println(\"enter atom \"+(char)input.LT(1));}\n" +
+			"   : '(' EXPR ')'\n" +
+			"   | INT\n" +
+			"   ;\n" +
+			"fragment INT: '0'..'9'+ ;\n" +
+			"fragment WS : (' '|'\\n')+ \n" +
+			"   ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "s", "((34)x)x", false);
+		String expecting = // has no memoization
+			"enter expr (\n" +
+			"enter atom (\n" +
+			"enter expr (\n" +
+			"enter atom (\n" +
+			"enter expr 3\n" +
+			"enter atom 3\n" +
+			"enter atom 3\n" +
+			"enter atom (\n" +
+			"enter expr 3\n" +
+			"enter atom 3\n" +
+			"enter atom 3\n" +
+			"enter atom (\n" +
+			"enter expr (\n" +
+			"enter atom (\n" +
+			"enter expr 3\n" +
+			"enter atom 3\n" +
+			"enter atom 3\n" +
+			"enter atom (\n" +
+			"enter expr 3\n" +
+			"enter atom 3\n" +
+			"enter atom 3\n" +
+			"ATOM 34\n" +
+			"ATOM x\n" +
+			"ATOM x\n" +
+			"done\n";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testTreeParserWithSynPred() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT+ (PERIOD|SEMI);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"PERIOD : '.' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n" +
+			"options {k=1; backtrack=true; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID INT+ PERIOD {System.out.print(\"alt 1\");}"+
+			"  | ID INT+ SEMI   {System.out.print(\"alt 2\");}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 2 3;");
+		assertEquals("alt 2\n", found);
+	}
+
+	@Test public void testTreeParserWithNestedSynPred() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT+ (PERIOD|SEMI);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"PERIOD : '.' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		// backtracks in a and b due to k=1
+		String treeGrammar =
+			"tree grammar TP;\n" +
+			"options {k=1; backtrack=true; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID b {System.out.print(\" a:alt 1\");}"+
+			"  | ID INT+ SEMI   {System.out.print(\" a:alt 2\");}\n" +
+			"  ;\n" +
+			"b : INT PERIOD  {System.out.print(\"b:alt 1\");}" + // choose this alt for just one INT
+			"  | INT+ PERIOD {System.out.print(\"b:alt 2\");}" +
+			"  ;";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 2 3.");
+		assertEquals("b:alt 2 a:alt 1\n", found);
+	}
+
+	@Test public void testSynPredWithOutputTemplate() throws Exception {
+		// really just seeing if it will compile
+		String grammar =
+			"grammar T;\n" +
+			"options {output=template;}\n" +
+			"a\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"  : ('x'+ 'y')=> 'x'+ 'y' -> template(a={$text}) <<1:<a>;>>\n" +
+			"  | 'x'+ 'z' -> template(a={$text}) <<2:<a>;>>\n"+
+			"  ;\n" +
+			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
+			"   ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "xxxy", false);
+
+		assertEquals("1:xxxy;\n", found);
+	}
+
+	@Test public void testSynPredWithOutputAST() throws Exception {
+		// really just seeing if it will compile
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a\n" +
+			"options {\n" +
+			"  k=1;\n" +
+			"}\n" +
+			"  : ('x'+ 'y')=> 'x'+ 'y'\n" +
+			"  | 'x'+ 'z'\n"+
+			"  ;\n" +
+			"WS : (' '|'\\n')+ {$channel=HIDDEN;}\n" +
+			"   ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "xxxy", false);
+
+		assertEquals("x x x y\n", found);
+	}
+
+	@Test public void testOptionalBlockWithSynPred() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+				"\n" +
+				"a : ( (b)=> b {System.out.println(\"b\");})? b ;\n" +
+				"b : 'x' ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "xx", false);
+		assertEquals("b\n", found);
+		found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "x", false);
+		assertEquals("", found);
+	}
+
+	@Test public void testSynPredK2() throws Exception {
+		// all manually specified syn predicates are gated (i.e., forced
+		// to execute).
+		String grammar =
+			"grammar T;\n" +
+				"\n" +
+				"a : (b)=> b {System.out.println(\"alt1\");} | 'a' 'c' ;\n" +
+				"b : 'a' 'b' ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "ab", false);
+
+		assertEquals("alt1\n", found);
+	}
+
+	@Test public void testSynPredKStar() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+				"\n" +
+				"a : (b)=> b {System.out.println(\"alt1\");} | 'a'+ 'c' ;\n" +
+				"b : 'a'+ 'b' ;\n" ;
+		String found = execParser("T.g", grammar, "TParser", "TLexer",
+				    "a", "aaab", false);
+
+		assertEquals("alt1\n", found);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestSyntaxErrors.java b/tool/src/test/java/org/antlr/test/TestSyntaxErrors.java
new file mode 100644
index 0000000..9e391e5
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestSyntaxErrors.java
@@ -0,0 +1,165 @@
+/*
+ * [The "BSD license"]
+ * Copyright (c) 2011 Terence Parr
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.antlr.test;
+
+import org.antlr.tool.ErrorManager;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+import java.io.File;
+
+/** test runtime parse errors */
+public class TestSyntaxErrors extends BaseTest {
+	@Test public void testLL2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : 'a' 'b'" +
+			"  | 'a' 'c'" +
+			";\n" +
+			"q : 'e' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "ae", false);
+		String expecting = "line 1:1 no viable alternative at input 'e'\n";
+		String result = stderrDuringParse;
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testLL3() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : 'a' 'b'* 'c'" +
+			"  | 'a' 'b' 'd'" +
+			"  ;\n" +
+			"q : 'e' ;\n";
+		System.out.println(grammar);
+		String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "abe", false);
+		String expecting = "line 1:2 no viable alternative at input 'e'\n";
+		String result = stderrDuringParse;
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testLLStar() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : 'a'+ 'b'" +
+			"  | 'a'+ 'c'" +
+			";\n" +
+			"q : 'e' ;\n";
+		String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "aaae", false);
+		String expecting = "line 1:3 no viable alternative at input 'e'\n";
+		String result = stderrDuringParse;
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testSynPred() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"a : (e '.')=> e '.'" +
+			"  | (e ';')=> e ';'" +
+			"  | 'z'" +
+			"  ;\n" +
+			"e : '(' e ')'" +
+			"  | 'i'" +
+			"  ;\n";
+		System.out.println(grammar);
+		String found = execParser("T.g", grammar, "TParser", "TLexer", "a", "((i))z", false);
+		String expecting = "line 1:1 no viable alternative at input '('\n";
+		String result = stderrDuringParse;
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testLL1ErrorInfo() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"start : animal (AND acClass)? service EOF;\n" +
+			"animal : (DOG | CAT );\n" +
+			"service : (HARDWARE | SOFTWARE) ;\n" +
+			"AND : 'and';\n" +
+			"DOG : 'dog';\n" +
+			"CAT : 'cat';\n" +
+			"HARDWARE: 'hardware';\n" +
+			"SOFTWARE: 'software';\n" +
+			"WS : ' ' {skip();} ;" +
+			"acClass\n" +
+			"@init\n" +
+			"{ System.out.println(computeContextSensitiveRuleFOLLOW().toString(tokenNames)); }\n" +
+			"  : ;\n";
+		String result = execParser("T.g", grammar, "TParser", "TLexer", "start", "dog and software", false);
+		String expecting = "{HARDWARE,SOFTWARE}\n";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testStrayBracketRecovery() {
+		String grammar =
+			"grammar T;\n" +
+			"options {output = AST;}\n" +
+			"tokens{NODE;}\n" +
+			"s : a=ID INT -> ^(NODE[$a]] INT);\n" +
+			"ID: 'a'..'z'+;\n" +
+			"INT: '0'..'9'+;\n";
+
+		ErrorQueue errorQueue = new ErrorQueue();
+		ErrorManager.setErrorListener(errorQueue);
+
+		boolean found =
+			rawGenerateAndBuildRecognizer(
+				"T.g", grammar, "TParser", "TLexer", false);
+
+		assertFalse(found);
+		assertEquals(
+			"[error(100): :4:27: syntax error: antlr: dangling ']'? make sure to escape with \\]]",
+			errorQueue.errors.toString());
+	}
+
+	/**
+	 * This is a regression test for antlr/antlr3#61.
+	 * https://github.com/antlr/antlr3/issues/61
+	 */
+	@Test public void testMissingAttributeAccessPreventsCodeGeneration() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {\n" +
+			"    backtrack = true; \n" +
+			"}\n" +
+			"// if b is rule ref, gens bad void x=null code\n" +
+			"a : x=b {Object o = $x; System.out.println(\"alt1\");}\n" +
+			"  | y=b\n" +
+			"  ;\n" +
+			"\n" +
+			"b : 'a' ;\n" ;
+
+		ErrorQueue errorQueue = new ErrorQueue();
+		ErrorManager.setErrorListener(errorQueue);
+		boolean success = rawGenerateAndBuildRecognizer("T.g", grammar, "TParser", "TLexer", false);
+		assertFalse(success);
+		assertEquals(
+			"[error(117): "+tmpdir.toString()+File.separatorChar+"T.g:6:9: missing attribute access on rule scope: x]",
+			errorQueue.errors.toString());
+	}
+}
diff --git a/tool/src/test/java/org/antlr/test/TestTemplates.java b/tool/src/test/java/org/antlr/test/TestTemplates.java
new file mode 100644
index 0000000..fd41c7a
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTemplates.java
@@ -0,0 +1,376 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.grammar.v3.ANTLRParser;
+import org.antlr.grammar.v3.ActionTranslator;
+import org.antlr.runtime.CommonToken;
+import org.stringtemplate.v4.ST;
+import org.stringtemplate.v4.STGroup;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarSemanticsMessage;
+import org.antlr.tool.Message;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/** Test templates in actions; %... shorthands */
+public class TestTemplates extends BaseTest {
+	private static final String LINE_SEP = System.getProperty("line.separator");
+
+	@Test
+    public void testTemplateConstructor() throws Exception {
+		String action = "x = %foo(name={$ID.text});";
+		String expecting = "x = templateLib.getInstanceOf(\"foo\"," +
+			"new STAttrMap().put(\"name\", (ID1!=null?ID1.getText():null)));";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+										"a",
+										new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		STGroup templates =
+			new STGroup();
+		ST actionST = new ST(templates, rawTranslation);
+		String found = actionST.render();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	@Test
+    public void testTemplateConstructorNoArgs() throws Exception {
+		String action = "x = %foo();";
+		String expecting = "x = templateLib.getInstanceOf(\"foo\");";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+										"a",
+										new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		STGroup templates =
+			new STGroup();
+		ST actionST = new ST(templates, rawTranslation);
+		String found = actionST.render();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	@Test
+    public void testIndirectTemplateConstructor() throws Exception {
+		String action = "x = %({\"foo\"})(name={$ID.text});";
+		String expecting = "x = templateLib.getInstanceOf(\"foo\"," +
+			"new STAttrMap().put(\"name\", (ID1!=null?ID1.getText():null)));";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+										"a",
+										new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		STGroup templates =
+			new STGroup();
+		ST actionST = new ST(templates, rawTranslation);
+		String found = actionST.render();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testStringConstructor() throws Exception {
+		String action = "x = %{$ID.text};";
+		String expecting = "x = new StringTemplate(templateLib,(ID1!=null?ID1.getText():null));";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+																	 "a",
+																	 new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		STGroup templates =
+			new STGroup();
+		ST actionST = new ST(templates, rawTranslation);
+		String found = actionST.render();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testSetAttr() throws Exception {
+		String action = "%x.y = z;";
+		String expecting = "(x).setAttribute(\"y\", z);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator =
+			new ActionTranslator(generator,
+										"a",
+										new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		STGroup templates =
+			new STGroup();
+		ST actionST = new ST(templates, rawTranslation);
+		String found = actionST.render();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testSetAttrOfExpr() throws Exception {
+		String action = "%{foo($ID.text).getST()}.y = z;";
+		String expecting = "(foo((ID1!=null?ID1.getText():null)).getST()).setAttribute(\"y\", z);";
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+		ActionTranslator translator = new ActionTranslator(generator,
+																	 "a",
+																	 new CommonToken(ANTLRParser.ACTION,action),1);
+		String rawTranslation =
+			translator.translate();
+		STGroup templates =
+			new STGroup();
+		ST actionST = new ST(templates, rawTranslation);
+		String found = actionST.render();
+
+		assertNoErrors(equeue);
+
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testSetAttrOfExprInMembers() throws Exception {
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"@members {\n" +
+			"%code.instr = o;" + // must not get null ptr!
+			"}\n" +
+			"a : ID\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		assertNoErrors(equeue);
+	}
+
+	@Test public void testCannotHaveSpaceBeforeDot() throws Exception {
+		String action = "%x .y = z;";
+		String expecting = null;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		int expectedMsgID = ErrorManager.MSG_INVALID_TEMPLATE_ACTION;
+		Object expectedArg = "%x";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	@Test public void testCannotHaveSpaceAfterDot() throws Exception {
+		String action = "%x. y = z;";
+		String expecting = null;
+
+		ErrorQueue equeue = new ErrorQueue();
+		ErrorManager.setErrorListener(equeue);
+		Grammar g = new Grammar(
+			"grammar t;\n" +
+			"options {\n" +
+			"    output=template;\n" +
+			"}\n" +
+			"\n" +
+			"a : ID {"+action+"}\n" +
+			"  ;\n" +
+			"\n" +
+			"ID : 'a';\n");
+		Tool antlr = newTool();
+		CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+		g.setCodeGenerator(generator);
+		generator.genRecognizer(); // forces load of templates
+
+		int expectedMsgID = ErrorManager.MSG_INVALID_TEMPLATE_ACTION;
+		Object expectedArg = "%x.";
+		GrammarSemanticsMessage expectedMessage =
+			new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg);
+		checkError(equeue, expectedMessage);
+	}
+
+	protected void checkError(ErrorQueue equeue,
+							  GrammarSemanticsMessage expectedMessage)
+		throws Exception
+	{
+		/*
+		System.out.println(equeue.infos);
+		System.out.println(equeue.warnings);
+		System.out.println(equeue.errors);
+		*/
+		Message foundMsg = null;
+		for (int i = 0; i < equeue.errors.size(); i++) {
+			Message m = equeue.errors.get(i);
+			if (m.msgID==expectedMessage.msgID ) {
+				foundMsg = m;
+			}
+		}
+		assertTrue("no error; "+expectedMessage.msgID+" expected", equeue.errors.size()>0);
+		assertTrue("too many errors; "+equeue.errors, equeue.errors.size()<=1);
+		assertTrue("couldn't find expected error: "+expectedMessage.msgID, foundMsg!=null);
+		assertTrue("error is not a GrammarSemanticsMessage",
+				   foundMsg instanceof GrammarSemanticsMessage);
+		assertEquals(expectedMessage.arg, foundMsg.arg);
+		assertEquals(expectedMessage.arg2, foundMsg.arg2);
+	}
+
+	// S U P P O R T
+	private void assertNoErrors(ErrorQueue equeue) {
+		assertTrue("unexpected errors: "+equeue, equeue.errors.isEmpty());
+	}
+}
diff --git a/tool/src/test/java/org/antlr/test/TestTokenRewriteStream.java b/tool/src/test/java/org/antlr/test/TestTokenRewriteStream.java
new file mode 100644
index 0000000..81a736b
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTokenRewriteStream.java
@@ -0,0 +1,810 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.TokenRewriteStream;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.Interpreter;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestTokenRewriteStream extends BaseTest {
+
+    /** Public default constructor used by TestRig */
+    public TestTokenRewriteStream() {
+    }
+
+	@Test public void testInsertBeforeIndex0() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.insertBefore(0, "0");
+		String result = tokens.toString();
+		String expecting = "0abc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testInsertAfterLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.insertAfter(2, "x");
+		String result = tokens.toString();
+		String expecting = "abcx";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void test2InsertBeforeAfterMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.insertBefore(1, "x");
+		tokens.insertAfter(1, "x");
+		String result = tokens.toString();
+		String expecting = "axbxc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceIndex0() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(0, "x");
+		String result = tokens.toString();
+		String expecting = "xbc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(2, "x");
+		String result = tokens.toString();
+		String expecting = "abx";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(1, "x");
+		String result = tokens.toString();
+		String expecting = "axc";
+		assertEquals(expecting, result);
+	}
+
+    @Test public void testToStringStartStop() throws Exception {
+        Grammar g = new Grammar(
+            "lexer grammar t;\n"+
+            "ID : 'a'..'z'+;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';';\n" +
+            "MUL : '*';\n" +
+            "ASSIGN : '=';\n" +
+            "WS : ' '+;\n");
+        // Tokens: 0123456789
+        // Input:  x = 3 * 0;
+        CharStream input = new ANTLRStringStream("x = 3 * 0;");
+        Interpreter lexEngine = new Interpreter(g, input);
+        TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+        tokens.fill();
+        tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
+
+        String result = tokens.toOriginalString();
+        String expecting = "x = 3 * 0;";
+        assertEquals(expecting, result);
+
+        result = tokens.toString();
+        expecting = "x = 0;";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(0,9);
+        expecting = "x = 0;";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(4,8);
+        expecting = "0";
+        assertEquals(expecting, result);
+    }
+
+    @Test public void testToStringStartStop2() throws Exception {
+        Grammar g = new Grammar(
+            "lexer grammar t;\n"+
+            "ID : 'a'..'z'+;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';';\n" +
+            "ASSIGN : '=';\n" +
+            "PLUS : '+';\n" +
+            "MULT : '*';\n" +
+            "WS : ' '+;\n");
+        // Tokens: 012345678901234567
+        // Input:  x = 3 * 0 + 2 * 0;
+        CharStream input = new ANTLRStringStream("x = 3 * 0 + 2 * 0;");
+        Interpreter lexEngine = new Interpreter(g, input);
+        TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+        tokens.fill();
+
+        String result = tokens.toOriginalString();
+        String expecting = "x = 3 * 0 + 2 * 0;";
+        assertEquals(expecting, result);
+
+        tokens.replace(4, 8, "0"); // replace 3 * 0 with 0
+        result = tokens.toString();
+        expecting = "x = 0 + 2 * 0;";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(0,17);
+        expecting = "x = 0 + 2 * 0;";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(4,8);
+        expecting = "0";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(0,8);
+        expecting = "x = 0";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(12,16);
+        expecting = "2 * 0";
+        assertEquals(expecting, result);
+
+        tokens.insertAfter(17, "// comment");
+        result = tokens.toString(12,18);
+        expecting = "2 * 0;// comment";
+        assertEquals(expecting, result);
+
+        result = tokens.toString(0,8); // try again after insert at end
+        expecting = "x = 0";
+        assertEquals(expecting, result);
+    }
+
+
+    @Test public void test2ReplaceMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(1, "x");
+		tokens.replace(1, "y");
+		String result = tokens.toString();
+		String expecting = "ayc";
+		assertEquals(expecting, result);
+	}
+
+    @Test public void test2ReplaceMiddleIndex1InsertBefore() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+        tokens.insertBefore(0, "_");
+        tokens.replace(1, "x");
+		tokens.replace(1, "y");
+		String result = tokens.toString();
+		String expecting = "_ayc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceThenDeleteMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(1, "x");
+		tokens.delete(1);
+		String result = tokens.toString();
+		String expecting = "ac";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testInsertInPriorReplace() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(0, 2, "x");
+		tokens.insertBefore(1, "0");
+		Exception exc = null;
+		try {
+			tokens.toString();
+		}
+		catch (IllegalArgumentException iae) {
+			exc = iae;
+		}
+		String expecting = "insert op <InsertBeforeOp@[@1,1:1='b',<5>,1:1]:\"0\"> within boundaries of previous <ReplaceOp@[@0,0:0='a',<4>,1:0]..[@2,2:2='c',<6>,1:2]:\"x\">";
+		assertNotNull(exc);
+		assertEquals(expecting, exc.getMessage());
+	}
+
+	@Test public void testInsertThenReplaceSameIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.insertBefore(0, "0");
+		tokens.replace(0, "x"); // supercedes insert at 0
+		String result = tokens.toString();
+		String expecting = "0xbc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void test2InsertMiddleIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.insertBefore(1, "x");
+		tokens.insertBefore(1, "y");
+		String result = tokens.toString();
+		String expecting = "ayxbc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void test2InsertThenReplaceIndex0() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.insertBefore(0, "x");
+		tokens.insertBefore(0, "y");
+		tokens.replace(0, "z");
+		String result = tokens.toString();
+		String expecting = "yxzbc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceThenInsertBeforeLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(2, "x");
+		tokens.insertBefore(2, "y");
+		String result = tokens.toString();
+		String expecting = "abyx";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testInsertThenReplaceLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.insertBefore(2, "y");
+		tokens.replace(2, "x");
+		String result = tokens.toString();
+		String expecting = "abyx";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceThenInsertAfterLastIndex() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(2, "x");
+		tokens.insertAfter(2, "y");
+		String result = tokens.toString();
+		String expecting = "abxy";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceRangeThenInsertAtLeftEdge() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(2, 4, "x");
+		tokens.insertBefore(2, "y");
+		String result = tokens.toString();
+		String expecting = "abyxba";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceRangeThenInsertAtRightEdge() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(2, 4, "x");
+		tokens.insertBefore(4, "y"); // no effect; within range of a replace
+		Exception exc = null;
+		try {
+			tokens.toString();
+		}
+		catch (IllegalArgumentException iae) {
+			exc = iae;
+		}
+		String expecting = "insert op <InsertBeforeOp@[@4,4:4='c',<6>,1:4]:\"y\"> within boundaries of previous <ReplaceOp@[@2,2:2='c',<6>,1:2]..[@4,4:4='c',<6>,1:4]:\"x\">";
+		assertNotNull(exc);
+		assertEquals(expecting, exc.getMessage());
+	}
+
+	@Test public void testReplaceRangeThenInsertAfterRightEdge() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(2, 4, "x");
+		tokens.insertAfter(4, "y");
+		String result = tokens.toString();
+		String expecting = "abxyba";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceAll() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(0, 6, "x");
+		String result = tokens.toString();
+		String expecting = "x";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceSubsetThenFetch() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(2, 4, "xyz");
+		String result = tokens.toString(0,6);
+		String expecting = "abxyzba";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testReplaceThenReplaceSuperset() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(2, 4, "xyz");
+		tokens.replace(3, 5, "foo"); // overlaps, error
+		Exception exc = null;
+		try {
+			tokens.toString();
+		}
+		catch (IllegalArgumentException iae) {
+			exc = iae;
+		}
+		String expecting = "replace op boundaries of <ReplaceOp@[@3,3:3='c',<6>,1:3]..[@5,5:5='b',<5>,1:5]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<6>,1:2]..[@4,4:4='c',<6>,1:4]:\"xyz\">";
+		assertNotNull(exc);
+		assertEquals(expecting, exc.getMessage());
+	}
+
+	@Test public void testReplaceThenReplaceLowerIndexedSuperset() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcccba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(2, 4, "xyz");
+		tokens.replace(1, 3, "foo"); // overlap, error
+		Exception exc = null;
+		try {
+			tokens.toString();
+		}
+		catch (IllegalArgumentException iae) {
+			exc = iae;
+		}
+		String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<5>,1:1]..[@3,3:3='c',<6>,1:3]:\"foo\"> overlap with previous <ReplaceOp@[@2,2:2='c',<6>,1:2]..[@4,4:4='c',<6>,1:4]:\"xyz\">";
+		assertNotNull(exc);
+		assertEquals(expecting, exc.getMessage());
+	}
+
+	@Test public void testReplaceSingleMiddleThenOverlappingSuperset() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcba");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(2, 2, "xyz");
+		tokens.replace(0, 3, "foo");
+		String result = tokens.toString();
+		String expecting = "fooa";
+		assertEquals(expecting, result);
+	}
+
+	// June 2, 2008 I rewrote core of rewrite engine; just adding lots more tests here
+
+	@Test public void testCombineInserts() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.insertBefore(0, "x");
+		tokens.insertBefore(0, "y");
+		String result = tokens.toString();
+		String expecting = "yxabc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testCombine3Inserts() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.insertBefore(1, "x");
+		tokens.insertBefore(0, "y");
+		tokens.insertBefore(1, "z");
+		String result = tokens.toString();
+		String expecting = "yazxbc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testCombineInsertOnLeftWithReplace() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(0, 2, "foo");
+		tokens.insertBefore(0, "z"); // combine with left edge of rewrite
+		String result = tokens.toString();
+		String expecting = "zfoo";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testCombineInsertOnLeftWithDelete() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.delete(0, 2);
+		tokens.insertBefore(0, "z"); // combine with left edge of rewrite
+		String result = tokens.toString();
+		String expecting = "z"; // make sure combo is not znull
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testDisjointInserts() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.insertBefore(1, "x");
+		tokens.insertBefore(2, "y");
+		tokens.insertBefore(0, "z");
+		String result = tokens.toString();
+		String expecting = "zaxbyc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testOverlappingReplace() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(1, 2, "foo");
+		tokens.replace(0, 3, "bar"); // wipes prior nested replace
+		String result = tokens.toString();
+		String expecting = "bar";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testOverlappingReplace2() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(0, 3, "bar");
+		tokens.replace(1, 2, "foo"); // cannot split earlier replace
+		Exception exc = null;
+		try {
+			tokens.toString();
+		}
+		catch (IllegalArgumentException iae) {
+			exc = iae;
+		}
+		String expecting = "replace op boundaries of <ReplaceOp@[@1,1:1='b',<5>,1:1]..[@2,2:2='c',<6>,1:2]:\"foo\"> overlap with previous <ReplaceOp@[@0,0:0='a',<4>,1:0]..[@3,3:3='c',<6>,1:3]:\"bar\">";
+		assertNotNull(exc);
+		assertEquals(expecting, exc.getMessage());
+	}
+
+	@Test public void testOverlappingReplace3() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(1, 2, "foo");
+		tokens.replace(0, 2, "bar"); // wipes prior nested replace
+		String result = tokens.toString();
+		String expecting = "barc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testOverlappingReplace4() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(1, 2, "foo");
+		tokens.replace(1, 3, "bar"); // wipes prior nested replace
+		String result = tokens.toString();
+		String expecting = "abar";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testDropIdenticalReplace() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(1, 2, "foo");
+		tokens.replace(1, 2, "foo"); // drop previous, identical
+		String result = tokens.toString();
+		String expecting = "afooc";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testDropPrevCoveredInsert() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.insertBefore(1, "foo");
+		tokens.replace(1, 2, "foo"); // kill prev insert
+		String result = tokens.toString();
+		String expecting = "afoofoo";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testLeaveAloneDisjointInsert() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.insertBefore(1, "x");
+		tokens.replace(2, 3, "foo");
+		String result = tokens.toString();
+		String expecting = "axbfoo";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testLeaveAloneDisjointInsert2() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abcc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.replace(2, 3, "foo");
+		tokens.insertBefore(1, "x");
+		String result = tokens.toString();
+		String expecting = "axbfoo";
+		assertEquals(expecting, result);
+	}
+
+	@Test public void testInsertBeforeTokenThenDeleteThatToken() throws Exception {
+		Grammar g = new Grammar(
+			"lexer grammar t;\n"+
+			"A : 'a';\n" +
+			"B : 'b';\n" +
+			"C : 'c';\n");
+		CharStream input = new ANTLRStringStream("abc");
+		Interpreter lexEngine = new Interpreter(g, input);
+		TokenRewriteStream tokens = new TokenRewriteStream(lexEngine);
+		tokens.fill();
+		tokens.insertBefore(2, "y");
+		tokens.delete(2);
+		String result = tokens.toString();
+		String expecting = "aby";
+		assertEquals(expecting, result);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestTopologicalSort.java b/tool/src/test/java/org/antlr/test/TestTopologicalSort.java
new file mode 100644
index 0000000..62cfebd
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTopologicalSort.java
@@ -0,0 +1,115 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.misc.Graph;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.junit.Assert.*;
+
+/** Test topo sort in GraphNode. */
+public class TestTopologicalSort extends BaseTest {
+    @Test
+    public void testFairlyLargeGraph() throws Exception {
+        Graph<String> g = new Graph<String>();
+        g.addEdge("C", "F");
+        g.addEdge("C", "G");
+        g.addEdge("C", "A");
+        g.addEdge("C", "B");
+        g.addEdge("A", "D");
+        g.addEdge("A", "E");
+        g.addEdge("B", "E");
+        g.addEdge("D", "E");
+        g.addEdge("D", "F");
+        g.addEdge("F", "H");
+        g.addEdge("E", "F");
+
+        String expecting = "[H, F, E, D, G, A, B, C]";
+        List<String> nodes = g.sort();
+        String result = nodes.toString();
+        assertEquals(expecting, result);
+    }
+
+    @Test
+    public void testCyclicGraph() throws Exception {
+        Graph<String> g = new Graph<String>();
+        g.addEdge("A", "B");
+        g.addEdge("B", "C");
+        g.addEdge("C", "A");
+        g.addEdge("C", "D");
+
+        String expecting = "[D, C, B, A]";
+        List<String> nodes = g.sort();
+        String result = nodes.toString();
+        assertEquals(expecting, result);
+    }
+
+    @Test
+    public void testRepeatedEdges() throws Exception {
+        Graph<String> g = new Graph<String>();
+        g.addEdge("A", "B");
+        g.addEdge("B", "C");
+        g.addEdge("A", "B"); // dup
+        g.addEdge("C", "D");
+
+        String expecting = "[D, C, B, A]";
+        List<String> nodes = g.sort();
+        String result = nodes.toString();
+        assertEquals(expecting, result);
+    }
+
+    @Test
+    public void testSimpleTokenDependence() throws Exception {
+        Graph<String> g = new Graph<String>();
+        g.addEdge("Java.g", "MyJava.tokens"); // Java feeds off manual token file
+        g.addEdge("Java.tokens", "Java.g");        
+        g.addEdge("Def.g", "Java.tokens");    // walkers feed off generated tokens
+        g.addEdge("Ref.g", "Java.tokens");
+
+        String expecting = "[MyJava.tokens, Java.g, Java.tokens, Ref.g, Def.g]";
+        List<String> nodes = g.sort();
+        String result = nodes.toString();
+        assertEquals(expecting, result);
+    }
+
+    @Test
+    public void testParserLexerCombo() throws Exception {
+        Graph<String> g = new Graph<String>();
+        g.addEdge("JavaLexer.tokens", "JavaLexer.g");
+        g.addEdge("JavaParser.g", "JavaLexer.tokens");
+        g.addEdge("Def.g", "JavaLexer.tokens");
+        g.addEdge("Ref.g", "JavaLexer.tokens");
+
+        String expecting = "[JavaLexer.g, JavaLexer.tokens, JavaParser.g, Ref.g, Def.g]";
+        List<String> nodes = g.sort();
+        String result = nodes.toString();
+        assertEquals(expecting, result);
+    }
+}
diff --git a/tool/src/test/java/org/antlr/test/TestTreeGrammarRewriteAST.java b/tool/src/test/java/org/antlr/test/TestTreeGrammarRewriteAST.java
new file mode 100644
index 0000000..746fddc
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTreeGrammarRewriteAST.java
@@ -0,0 +1,1121 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.Tool;
+import org.antlr.codegen.CodeGenerator;
+import org.antlr.runtime.RecognitionException;
+import org.antlr.tool.ErrorManager;
+import org.antlr.tool.Grammar;
+import org.antlr.tool.GrammarSyntaxMessage;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/** Tree rewrites in tree parsers are basically identical to rewrites
+ *  in a normal grammar except that the atomic element is a node not
+ *  a Token.  Tests here ensure duplication of nodes occurs properly
+ *  and basic functionality.
+ */
+public class TestTreeGrammarRewriteAST extends BaseTest {
+	protected boolean debug = false;
+
+	@Test public void testFlatList() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID INT -> INT ID\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("34 abc\n", found);
+	}
+
+	@Test public void testSimpleTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID INT) -> ^(INT ID)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(34 abc)\n", found);
+	}
+
+	@Test public void testNonImaginaryWithCtor() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : INT ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : INT -> INT[\"99\"]\n" + // make new INT node
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "34");
+		assertEquals("99\n", found);
+	}
+
+	@Test public void testCombinedRewriteAndAuto() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT) | INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID INT) -> ^(INT ID) | INT\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(34 abc)\n", found);
+
+		found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+							   treeGrammar, "TP", "TLexer", "a", "a", "34");
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testAvoidDup() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID -> ^(ID ID)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("(abc abc)\n", found);
+	}
+
+	@Test public void testLoop() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID+ INT+ -> (^(ID INT))+ ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : (^(ID INT))+ -> INT+ ID+\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a b c 3 4 5");
+		assertEquals("3 4 5 a b c\n", found);
+	}
+
+	@Test public void testAutoDup() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID \n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testAutoDupRule() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : b c ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 1");
+		assertEquals("a 1\n", found);
+	}
+
+    @Test public void testAutoWildcard() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "a : ID . \n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+        assertEquals("abc 34\n", found);
+    }
+
+    @Test public void testNoWildcardAsRootError() throws Exception {
+        ErrorQueue equeue = new ErrorQueue();
+        ErrorManager.setErrorListener(equeue);
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST;}\n" +
+            "a : ^(. INT) \n" +
+            "  ;\n";
+
+        Grammar g = new Grammar(treeGrammar);
+        Tool antlr = newTool();
+        antlr.setOutputDirectory(null); // write to /dev/null
+        CodeGenerator generator = new CodeGenerator(antlr, g, "Java");
+        g.setCodeGenerator(generator);
+        generator.genRecognizer();
+
+        assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size());
+
+        int expectedMsgID = ErrorManager.MSG_WILDCARD_AS_ROOT;
+        Object expectedArg = null;
+        RecognitionException expectedExc = null;
+        GrammarSyntaxMessage expectedMessage =
+            new GrammarSyntaxMessage(expectedMsgID, g, null, expectedArg, expectedExc);
+
+        checkError(equeue, expectedMessage);        
+    }
+
+    @Test public void testAutoWildcard2() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID INT);\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "a : ^(ID .) \n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+        assertEquals("(abc 34)\n", found);
+    }
+
+    @Test public void testAutoWildcardWithLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "a : ID c=. \n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+        assertEquals("abc 34\n", found);
+    }
+
+    @Test public void testAutoWildcardWithListLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "a : ID c+=. \n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+        assertEquals("abc 34\n", found);
+    }
+
+    @Test public void testAutoDupMultiple() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID ID INT\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a b 3");
+		assertEquals("a b 3\n", found);
+	}
+
+	@Test public void testAutoDupTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID INT)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupTree2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT INT -> ^(ID INT INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID b b)\n" +
+			"  ;\n" +
+			"b : INT ;";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3 4");
+		assertEquals("(a 3 4)\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithLabels() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(x=ID y=INT)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithListLabels() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(x+=ID y+=INT)\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithRuleRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(b INT) ;\n" +
+			"b : ID ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithRuleRootAndLabels() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(x=b INT) ;\n" +
+			"b : ID ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithRuleRootAndListLabels() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(x+=b y+=c) ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a 3");
+		assertEquals("(a 3)\n", found);
+	}
+
+	@Test public void testAutoDupNestedTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=ID y=ID INT -> ^($x ^($y INT));\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID ^(ID INT))\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a b 3");
+		assertEquals("(a (b 3))\n", found);
+	}
+
+	@Test public void testAutoDupTreeWithSubruleInside() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"tokens {OP;}\n" +
+			"a : (x=ID|x=INT) -> ^(OP $x) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(OP (b|c)) ;\n" +
+			"b : ID ;\n" +
+			"c : INT ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "a");
+		assertEquals("(OP a)\n", found);
+	}
+
+	@Test public void testDelete() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ID -> \n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("", found);
+	}
+
+	@Test public void testSetMatchNoRewrite() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : b INT\n" +
+			"  ;\n" +
+			"b : ID | INT ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("abc 34\n", found);
+	}
+
+	@Test public void testSetOptionalMatchNoRewrite() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : (ID|INT)? INT ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("abc 34\n", found);
+	}
+
+
+	@Test public void testSetMatchNoRewriteLevel2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=ID INT -> ^($x INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(ID (ID | INT) ) ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(abc 34)\n", found);
+	}
+
+	@Test public void testSetMatchNoRewriteLevel2Root() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=ID INT -> ^($x INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^((ID | INT) INT) ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(abc 34)\n", found);
+	}
+
+
+	// REWRITE MODE
+
+	@Test public void testRewriteModeCombinedRewriteAndAuto() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT) | INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"a : ^(ID INT) -> ^(ID[\"ick\"] INT)\n" +
+			"  | INT\n" + // leaves it alone, returning $a.start
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("(ick 34)\n", found);
+
+		found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+							   treeGrammar, "TP", "TLexer", "a", "a", "34");
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testRewriteModeFlatTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ID INT | INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"s : ID a ;\n" +
+			"a : INT -> INT[\"1\"]\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("abc 1\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleFlatTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ID INT | INT ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"s : a ;\n" +
+			"a : b ;\n" +
+			"b : ID INT -> INT ID\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("34 abc\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"s : a ;\n" +
+			"a : b ;\n" + // a.tree must become b.tree
+			"b : ^(ID INT) -> INT\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleTree2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"tokens { X; }\n" +
+			"s : a* b ;\n" + // only b contributes to tree, but it's after a*; s.tree = b.tree
+			"a : X ;\n" +
+			"b : ^(ID INT) -> INT\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleTree3() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'boo' ID INT -> 'boo' ^(ID INT) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"tokens { X; }\n" +
+			"s : 'boo' a* b ;\n" + // don't reset s.tree to b.tree due to 'boo'
+			"a : X ;\n" +
+			"b : ^(ID INT) -> INT\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "boo abc 34");
+		assertEquals("boo 34\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleTree4() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"tokens { X; }\n" +
+			"s : ^('boo' a* b) ;\n" + // don't reset s.tree to b.tree due to 'boo'
+			"a : X ;\n" +
+			"b : ^(ID INT) -> INT\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "boo abc 34");
+		assertEquals("(boo 34)\n", found);
+	}
+
+	@Test public void testRewriteModeChainRuleTree5() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"tokens { X; }\n" +
+			"s : ^(a b) ;\n" + // s.tree is a.tree
+			"a : 'boo' ;\n" +
+			"b : ^(ID INT) -> INT\n"+
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "boo abc 34");
+		assertEquals("(boo 34)\n", found);
+	}
+
+    @Test public void testRewriteOfRuleRef() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ID INT | INT ;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : a -> a ;\n" +
+            "a : ID INT -> ID INT ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("abc 34\n", found);
+    }
+
+    @Test public void testRewriteOfRuleRefRoot() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT INT -> ^(INT ^(ID INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(a ^(ID INT)) -> a ;\n" +
+            "a : INT ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 12 34");
+        // emits whole tree when you ref the root since I can't know whether
+        // you want the children or not.  You might be returning a whole new
+        // tree.  Hmm...still seems weird.  oh well.
+        assertEquals("(12 (abc 34))\n", found);
+    }
+
+    @Test public void testRewriteOfRuleRefRootLabeled() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT INT -> ^(INT ^(ID INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(label=a ^(ID INT)) -> a ;\n" +
+            "a : INT ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 12 34");
+        // emits whole tree when you ref the root since I can't know whether
+        // you want the children or not.  You might be returning a whole new
+        // tree.  Hmm...still seems weird.  oh well.
+        assertEquals("(12 (abc 34))\n", found);
+    }
+
+    @Test public void testRewriteOfRuleRefRootListLabeled() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT INT -> ^(INT ^(ID INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(label+=a ^(ID INT)) -> a ;\n" +
+            "a : INT ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 12 34");
+        // emits whole tree when you ref the root since I can't know whether
+        // you want the children or not.  You might be returning a whole new
+        // tree.  Hmm...still seems weird.  oh well.
+        assertEquals("(12 (abc 34))\n", found);
+    }
+
+    @Test public void testRewriteOfRuleRefChild() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID ^(INT INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(ID a) -> a ;\n" +
+            "a : ^(INT INT) ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("(34 34)\n", found);
+    }
+
+    @Test public void testRewriteOfRuleRefLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID ^(INT INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(ID label=a) -> a ;\n" +
+            "a : ^(INT INT) ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("(34 34)\n", found);
+    }
+
+    @Test public void testRewriteOfRuleRefListLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID ^(INT INT));\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(ID label+=a) -> a ;\n" +
+            "a : ^(INT INT) ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("(34 34)\n", found);
+    }
+
+    @Test public void testRewriteModeWithPredicatedRewrites() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID[\"root\"] ^(ID INT)) | INT -> ^(ID[\"root\"] INT) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+			"s : ^(ID a) {System.out.println(\"altered tree=\"+$s.start.toStringTree());};\n" +
+			"a : ^(ID INT) -> {true}? ^(ID[\"ick\"] INT)\n" +
+			"              -> INT\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("altered tree=(root (ick 34))\n" +
+					 "(root (ick 34))\n", found);
+	}
+
+    @Test public void testWildcardSingleNode() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID[\"root\"] INT);\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "s : ^(ID c=.) -> $c\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("34\n", found);
+    }
+
+    @Test public void testWildcardUnlabeledSingleNode() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID INT -> ^(ID INT);\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "s : ^(ID .) -> ID\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+        assertEquals("abc\n", found);
+    }
+
+    @Test public void testWildcardGrabsSubtree() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID x=INT y=INT z=INT -> ^(ID[\"root\"] ^($x $y $z));\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "s : ^(ID c=.) -> $c\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 1 2 3");
+        assertEquals("(1 2 3)\n", found);
+    }
+
+    @Test public void testWildcardGrabsSubtree2() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID x=INT y=INT z=INT -> ID ^($x $y $z);\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "s : ID c=. -> $c\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "abc 1 2 3");
+        assertEquals("(1 2 3)\n", found);
+    }
+
+    @Test public void testWildcardListLabel() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : INT INT INT ;\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+            "s : (c+=.)+ -> $c+\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "1 2 3");
+        assertEquals("1 2 3\n", found);
+    }
+
+    @Test public void testWildcardListLabel2() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST; ASTLabelType=CommonTree;}\n" +
+            "a  : x=INT y=INT z=INT -> ^($x ^($y $z) ^($y $z));\n"+
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP;\n"+
+            "options {output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}\n" +
+            "s : ^(INT (c+=.)+) -> $c+\n" +
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                                      treeGrammar, "TP", "TLexer", "a", "s", "1 2 3");
+        assertEquals("(2 3) (2 3)\n", found);
+    }
+
+	@Test public void testRuleResultAsRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID '=' INT -> ^('=' ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"COLON : ':' ;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n"+
+			"options {output=AST; rewrite=true; ASTLabelType=CommonTree; tokenVocab=T;}\n" +
+			"a : ^(eq e1=ID e2=.) -> ^(eq $e2 $e1) ;\n" +
+			"eq : '=' | ':' {;} ;\n";  // bug in set match, doesn't add to tree!! booh. force nonset.
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+									  treeGrammar, "TP", "TLexer", "a", "a", "abc = 34");
+		assertEquals("(= 34 abc)\n", found);
+	}
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestTreeIterator.java b/tool/src/test/java/org/antlr/test/TestTreeIterator.java
new file mode 100644
index 0000000..0803bc3
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTreeIterator.java
@@ -0,0 +1,132 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.runtime.tree.*;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestTreeIterator {
+    static final String[] tokens = new String[] {
+        "<invalid>", "<EOR>", "<DOWN>", "<UP>", "A", "B", "C", "D", "E", "F", "G"
+    };
+
+    @Test public void testNode() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("A");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testFlatAB() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(nil A B)");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "nil DOWN A B UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testAB() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(A B)");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A DOWN B UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testABC() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(A B C)");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A DOWN B C UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testVerticalList() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(A (B C))");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A DOWN B DOWN C UP UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testComplex() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(A (B (C D E) F) G)");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    @Test public void testReset() {
+        TreeAdaptor adaptor = new CommonTreeAdaptor();
+        TreeWizard wiz = new TreeWizard(adaptor, tokens);
+        CommonTree t = (CommonTree)wiz.create("(A (B (C D E) F) G)");
+        TreeIterator it = new TreeIterator(t);
+        StringBuffer buf = toString(it);
+        String expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF";
+        String found = buf.toString();
+        assertEquals(expecting, found);
+
+        it.reset();
+        buf = toString(it);
+        expecting = "A DOWN B DOWN C DOWN D E UP F UP G UP EOF";
+        found = buf.toString();
+        assertEquals(expecting, found);
+    }
+
+    protected static StringBuffer toString(TreeIterator it) {
+        StringBuffer buf = new StringBuffer();
+        while ( it.hasNext() ) {
+            CommonTree n = (CommonTree)it.next();
+            buf.append(n);
+            if ( it.hasNext() ) buf.append(" ");
+        }
+        return buf;
+    }
+}
diff --git a/tool/src/test/java/org/antlr/test/TestTreeNodeStream.java b/tool/src/test/java/org/antlr/test/TestTreeNodeStream.java
new file mode 100644
index 0000000..f0ddcd1
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTreeNodeStream.java
@@ -0,0 +1,378 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.*;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/** Test the tree node stream. */
+public class TestTreeNodeStream extends BaseTest {
+
+	/** Build new stream; let's us override to test other streams. */
+	public TreeNodeStream newStream(Object t) {
+		return new CommonTreeNodeStream(t);
+	}
+
+    public String toTokenTypeString(TreeNodeStream stream) {
+        return ((CommonTreeNodeStream)stream).toTokenTypeString();
+    }
+
+	@Test public void testSingleNode() throws Exception {
+		Tree t = new CommonTree(new CommonToken(101));
+
+		TreeNodeStream stream = newStream(t);
+		String expecting = " 101";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101";
+		found = toTokenTypeString(stream);
+		assertEquals(expecting, found);
+	}
+
+	@Test public void test4Nodes() throws Exception {
+		// ^(101 ^(102 103) 104)
+		Tree t = new CommonTree(new CommonToken(101));
+		t.addChild(new CommonTree(new CommonToken(102)));
+		t.getChild(0).addChild(new CommonTree(new CommonToken(103)));
+		t.addChild(new CommonTree(new CommonToken(104)));
+
+		TreeNodeStream stream = newStream(t);
+		String expecting = " 101 102 103 104";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101 2 102 2 103 3 104 3";
+		found = toTokenTypeString(stream);
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testList() throws Exception {
+		Tree root = new CommonTree((Token)null);
+
+		Tree t = new CommonTree(new CommonToken(101));
+		t.addChild(new CommonTree(new CommonToken(102)));
+		t.getChild(0).addChild(new CommonTree(new CommonToken(103)));
+		t.addChild(new CommonTree(new CommonToken(104)));
+
+		Tree u = new CommonTree(new CommonToken(105));
+
+		root.addChild(t);
+		root.addChild(u);
+
+		TreeNodeStream stream = newStream(root);
+		String expecting = " 101 102 103 104 105";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101 2 102 2 103 3 104 3 105";
+		found = toTokenTypeString(stream);
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testFlatList() throws Exception {
+		Tree root = new CommonTree((Token)null);
+
+		root.addChild(new CommonTree(new CommonToken(101)));
+		root.addChild(new CommonTree(new CommonToken(102)));
+		root.addChild(new CommonTree(new CommonToken(103)));
+
+		TreeNodeStream stream = newStream(root);
+		String expecting = " 101 102 103";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101 102 103";
+		found = toTokenTypeString(stream);
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testListWithOneNode() throws Exception {
+		Tree root = new CommonTree((Token)null);
+
+		root.addChild(new CommonTree(new CommonToken(101)));
+
+		TreeNodeStream stream = newStream(root);
+		String expecting = " 101";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101";
+		found = toTokenTypeString(stream);
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testAoverB() throws Exception {
+		Tree t = new CommonTree(new CommonToken(101));
+		t.addChild(new CommonTree(new CommonToken(102)));
+
+		TreeNodeStream stream = newStream(t);
+		String expecting = " 101 102";
+		String found = toNodesOnlyString(stream);
+		assertEquals(expecting, found);
+
+		expecting = " 101 2 102 3";
+		found = toTokenTypeString(stream);
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testLT() throws Exception {
+		// ^(101 ^(102 103) 104)
+		Tree t = new CommonTree(new CommonToken(101));
+		t.addChild(new CommonTree(new CommonToken(102)));
+		t.getChild(0).addChild(new CommonTree(new CommonToken(103)));
+		t.addChild(new CommonTree(new CommonToken(104)));
+
+		TreeNodeStream stream = newStream(t);
+		assertEquals(101, ((Tree)stream.LT(1)).getType());
+		assertEquals(Token.DOWN, ((Tree)stream.LT(2)).getType());
+		assertEquals(102, ((Tree)stream.LT(3)).getType());
+		assertEquals(Token.DOWN, ((Tree)stream.LT(4)).getType());
+		assertEquals(103, ((Tree)stream.LT(5)).getType());
+		assertEquals(Token.UP, ((Tree)stream.LT(6)).getType());
+		assertEquals(104, ((Tree)stream.LT(7)).getType());
+		assertEquals(Token.UP, ((Tree)stream.LT(8)).getType());
+		assertEquals(Token.EOF, ((Tree)stream.LT(9)).getType());
+		// check way ahead
+		assertEquals(Token.EOF, ((Tree)stream.LT(100)).getType());
+	}
+
+	@Test public void testMarkRewindEntire() throws Exception {
+		// ^(101 ^(102 103 ^(106 107) ) 104 105)
+		// stream has 7 real + 6 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r0.addChild(r1);
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		Tree r2 = new CommonTree(new CommonToken(106));
+		r2.addChild(new CommonTree(new CommonToken(107)));
+		r1.addChild(r2);
+		r0.addChild(new CommonTree(new CommonToken(104)));
+		r0.addChild(new CommonTree(new CommonToken(105)));
+
+		TreeNodeStream stream = newStream(r0);
+		int m = stream.mark(); // MARK
+		for (int k=1; k<=13; k++) { // consume til end
+			stream.LT(1);
+			stream.consume();
+		}
+		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
+		stream.rewind(m);      // REWIND
+
+		// consume til end again :)
+		for (int k=1; k<=13; k++) { // consume til end
+			stream.LT(1);
+			stream.consume();
+		}
+		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
+	}
+
+	@Test public void testMarkRewindInMiddle() throws Exception {
+		// ^(101 ^(102 103 ^(106 107) ) 104 105)
+		// stream has 7 real + 6 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r0.addChild(r1);
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		Tree r2 = new CommonTree(new CommonToken(106));
+		r2.addChild(new CommonTree(new CommonToken(107)));
+		r1.addChild(r2);
+		r0.addChild(new CommonTree(new CommonToken(104)));
+		r0.addChild(new CommonTree(new CommonToken(105)));
+
+		TreeNodeStream stream = newStream(r0);
+		for (int k=1; k<=7; k++) { // consume til middle
+			//System.out.println(((Tree)stream.LT(1)).getType());
+			stream.consume();
+		}
+		assertEquals(107, ((Tree)stream.LT(1)).getType());
+		stream.mark(); // MARK
+		stream.consume(); // consume 107
+		stream.consume(); // consume UP
+		stream.consume(); // consume UP
+		stream.consume(); // consume 104
+		stream.rewind(); // REWIND
+        stream.mark();   // keep saving nodes though
+
+		assertEquals(107, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(104, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		// now we're past rewind position
+		assertEquals(105, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.UP, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.EOF, ((Tree)stream.LT(1)).getType());
+		assertEquals(Token.UP, ((Tree)stream.LT(-1)).getType());
+	}
+
+	@Test public void testMarkRewindNested() throws Exception {
+		// ^(101 ^(102 103 ^(106 107) ) 104 105)
+		// stream has 7 real + 6 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r0.addChild(r1);
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		Tree r2 = new CommonTree(new CommonToken(106));
+		r2.addChild(new CommonTree(new CommonToken(107)));
+		r1.addChild(r2);
+		r0.addChild(new CommonTree(new CommonToken(104)));
+		r0.addChild(new CommonTree(new CommonToken(105)));
+
+		TreeNodeStream stream = newStream(r0);
+		int m = stream.mark(); // MARK at start
+		stream.consume(); // consume 101
+		stream.consume(); // consume DN
+		int m2 = stream.mark(); // MARK on 102
+		stream.consume(); // consume 102
+		stream.consume(); // consume DN
+		stream.consume(); // consume 103
+		stream.consume(); // consume 106
+		stream.rewind(m2);      // REWIND to 102
+		assertEquals(102, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		// stop at 103 and rewind to start
+		stream.rewind(m); // REWIND to 101
+		assertEquals(101, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(102, ((Tree)stream.LT(1)).getType());
+		stream.consume();
+		assertEquals(Token.DOWN, ((Tree)stream.LT(1)).getType());
+	}
+
+	@Test public void testSeekFromStart() throws Exception {
+		// ^(101 ^(102 103 ^(106 107) ) 104 105)
+		// stream has 7 real + 6 nav nodes
+		// Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+		Tree r0 = new CommonTree(new CommonToken(101));
+		Tree r1 = new CommonTree(new CommonToken(102));
+		r0.addChild(r1);
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		Tree r2 = new CommonTree(new CommonToken(106));
+		r2.addChild(new CommonTree(new CommonToken(107)));
+		r1.addChild(r2);
+		r0.addChild(new CommonTree(new CommonToken(104)));
+		r0.addChild(new CommonTree(new CommonToken(105)));
+
+		TreeNodeStream stream = newStream(r0);
+		stream.seek(7);   // seek to 107
+		assertEquals(107, ((Tree)stream.LT(1)).getType());
+		stream.consume(); // consume 107
+		stream.consume(); // consume UP
+		stream.consume(); // consume UP
+		assertEquals(104, ((Tree)stream.LT(1)).getType());
+	}
+
+    @Test public void testReset() throws Exception {
+        // ^(101 ^(102 103 ^(106 107) ) 104 105)
+        // stream has 7 real + 6 nav nodes
+        // Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
+        Tree r0 = new CommonTree(new CommonToken(101));
+        Tree r1 = new CommonTree(new CommonToken(102));
+        r0.addChild(r1);
+        r1.addChild(new CommonTree(new CommonToken(103)));
+        Tree r2 = new CommonTree(new CommonToken(106));
+        r2.addChild(new CommonTree(new CommonToken(107)));
+        r1.addChild(r2);
+        r0.addChild(new CommonTree(new CommonToken(104)));
+        r0.addChild(new CommonTree(new CommonToken(105)));
+
+        TreeNodeStream stream = newStream(r0);
+        String v = toNodesOnlyString(stream); // scan all
+        stream.reset();
+        String v2 = toNodesOnlyString(stream); // scan all
+        assertEquals(v, v2);
+    }
+
+	@Test public void testDeepTree() throws Exception {
+		// ^(10 100 101 ^(20 ^(30 40 (50 (60 70)))) (80 90)))
+		// stream has 8 real + 10 nav nodes
+		int n = 9;
+		CommonTree[] nodes = new CommonTree[n];
+		for (int i=0; i< n; i++) {
+			nodes[i] = new CommonTree(new CommonToken((i+1)*10));
+		}
+		Tree g = nodes[0];
+		Tree rules = nodes[1];
+		Tree rule1 = nodes[2];
+		Tree id = nodes[3];
+		Tree block = nodes[4];
+		Tree alt = nodes[5];
+		Tree s = nodes[6];
+		Tree rule2 = nodes[7];
+		Tree id2 = nodes[8];
+		g.addChild(new CommonTree(new CommonToken(100)));
+		g.addChild(new CommonTree(new CommonToken(101)));
+		g.addChild(rules);
+		rules.addChild(rule1);
+		rule1.addChild(id);
+		rule1.addChild(block);
+		block.addChild(alt);
+		alt.addChild(s);
+		rules.addChild(rule2);
+		rule2.addChild(id2);
+
+		TreeNodeStream stream = newStream(g);
+		String expecting = " 10 2 100 101 20 2 30 2 40 50 2 60 2 70 3 3 3 80 2 90 3 3 3";
+		String found = toTokenTypeString(stream);
+		assertEquals(expecting, found);
+	}
+
+	public String toNodesOnlyString(TreeNodeStream nodes) {
+        TreeAdaptor adaptor = nodes.getTreeAdaptor();
+		StringBuilder buf = new StringBuilder();
+        Object o = nodes.LT(1);
+        int type = adaptor.getType(o);
+        while ( o!=null && type!=Token.EOF ) {
+			if ( !(type==Token.DOWN||type==Token.UP) ) {
+				buf.append(" ");
+				buf.append(type);
+			}
+            nodes.consume();
+            o = nodes.LT(1);
+            type = adaptor.getType(o);
+		}
+		return buf.toString();
+	}
+}
diff --git a/tool/src/test/java/org/antlr/test/TestTreeParsing.java b/tool/src/test/java/org/antlr/test/TestTreeParsing.java
new file mode 100644
index 0000000..4e81ebb
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTreeParsing.java
@@ -0,0 +1,342 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestTreeParsing extends BaseTest {
+	@Test public void testFlatList() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ID INT\n" +
+			"    {System.out.println($ID+\", \"+$INT);}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("abc, 34\n", found);
+	}
+
+	@Test public void testSimpleTree() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT -> ^(ID INT);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ^(ID INT)\n" +
+			"    {System.out.println($ID+\", \"+$INT);}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc 34");
+		assertEquals("abc, 34\n", found);
+	}
+
+	@Test public void testFlatVsTreeDecision() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b c ;\n" +
+			"b : ID INT -> ^(ID INT);\n" +
+			"c : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : b b ;\n" +
+			"b : ID INT    {System.out.print($ID+\" \"+$INT);}\n" +
+			"  | ^(ID INT) {System.out.print(\"^(\"+$ID+\" \"+$INT+')');}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 b 2");
+		assertEquals("^(a 1)b 2\n", found);
+	}
+
+	@Test public void testFlatVsTreeDecision2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : b c ;\n" +
+			"b : ID INT+ -> ^(ID INT+);\n" +
+			"c : ID INT+;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : b b ;\n" +
+			"b : ID INT+    {System.out.print($ID+\" \"+$INT);}\n" +
+			"  | ^(x=ID (y=INT)+) {System.out.print(\"^(\"+$x+' '+$y+')');}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a",
+				    "a 1 2 3 b 4 5");
+		assertEquals("^(a 3)b 5\n", found);
+	}
+
+	@Test public void testCyclicDFALookahead() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT+ PERIOD;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"PERIOD : '.' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ID INT+ PERIOD {System.out.print(\"alt 1\");}"+
+			"  | ID INT+ SEMI   {System.out.print(\"alt 2\");}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "a 1 2 3.");
+		assertEquals("alt 1\n", found);
+	}
+
+	@Test public void testTemplateOutput() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP;\n" +
+			"options {output=template; ASTLabelType=CommonTree;}\n" +
+			"s : a {System.out.println($a.st);};\n" +
+			"a : ID INT -> {new StringTemplate($INT.text)}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
+		assertEquals("34\n", found);
+	}
+
+	@Test public void testNullableChildList() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT? -> ^(ID INT?);\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ^(ID INT?)\n" +
+			"    {System.out.println($ID);}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc");
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testNullableChildList2() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : ID INT? SEMI -> ^(ID INT?) SEMI ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ^(ID INT?) SEMI\n" +
+			"    {System.out.println($ID);}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc;");
+		assertEquals("abc\n", found);
+	}
+
+	@Test public void testNullableChildList3() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=ID INT? (y=ID)? SEMI -> ^($x INT? $y?) SEMI ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a : ^(ID INT? b) SEMI\n" +
+			"    {System.out.println($ID+\", \"+$b.text);}\n" +
+			"  ;\n"+
+			"b : ID? ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc def;");
+		assertEquals("abc, def\n", found);
+	}
+
+	@Test public void testActionsAfterRoot() throws Exception {
+		String grammar =
+			"grammar T;\n" +
+			"options {output=AST;}\n" +
+			"a : x=ID INT? SEMI -> ^($x INT?) ;\n" +
+			"ID : 'a'..'z'+ ;\n" +
+			"INT : '0'..'9'+;\n" +
+			"SEMI : ';' ;\n"+
+			"WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+		String treeGrammar =
+			"tree grammar TP; options {ASTLabelType=CommonTree;}\n" +
+			"a @init {int x=0;} : ^(ID {x=1;} {x=2;} INT?)\n" +
+			"    {System.out.println($ID+\", \"+x);}\n" +
+			"  ;\n";
+
+		String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+				    treeGrammar, "TP", "TLexer", "a", "a", "abc;");
+		assertEquals("abc, 2\n", found);
+	}
+
+    @Test public void testWildcardLookahead() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID '+'^ INT;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';' ;\n"+
+            "PERIOD : '.' ;\n"+
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
+            "a : ^('+' . INT) {System.out.print(\"alt 1\");}"+
+            "  ;\n";
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
+        assertEquals("alt 1\n", found);
+    }
+
+    @Test public void testWildcardLookahead2() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID '+'^ INT;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';' ;\n"+
+            "PERIOD : '.' ;\n"+
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
+            "a : ^('+' . INT) {System.out.print(\"alt 1\");}"+
+            "  | ^('+' . .)   {System.out.print(\"alt 2\");}\n" +
+            "  ;\n";
+
+        // AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
+        assertEquals("alt 1\n", found);
+    }
+
+    @Test public void testWildcardLookahead3() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID '+'^ INT;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';' ;\n"+
+            "PERIOD : '.' ;\n"+
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
+            "a : ^('+' ID INT) {System.out.print(\"alt 1\");}"+
+            "  | ^('+' . .)   {System.out.print(\"alt 2\");}\n" +
+            "  ;\n";
+
+        // AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
+        assertEquals("alt 1\n", found);
+    }
+
+    @Test public void testWildcardPlusLookahead() throws Exception {
+        String grammar =
+            "grammar T;\n" +
+            "options {output=AST;}\n" +
+            "a : ID '+'^ INT;\n" +
+            "ID : 'a'..'z'+ ;\n" +
+            "INT : '0'..'9'+;\n" +
+            "SEMI : ';' ;\n"+
+            "PERIOD : '.' ;\n"+
+            "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
+
+        String treeGrammar =
+            "tree grammar TP; options {tokenVocab=T; ASTLabelType=CommonTree;}\n" +
+            "a : ^('+' INT INT ) {System.out.print(\"alt 1\");}"+
+            "  | ^('+' .+)   {System.out.print(\"alt 2\");}\n" +
+            "  ;\n";
+
+        // AMBIG upon '+' DOWN INT UP etc.. but so what.
+
+        String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
+                    treeGrammar, "TP", "TLexer", "a", "a", "a + 2");
+        assertEquals("alt 2\n", found);
+    }
+
+}
diff --git a/tool/src/test/java/org/antlr/test/TestTreeWizard.java b/tool/src/test/java/org/antlr/test/TestTreeWizard.java
new file mode 100644
index 0000000..dc52f64
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTreeWizard.java
@@ -0,0 +1,412 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.runtime.tree.CommonTree;
+import org.antlr.runtime.tree.CommonTreeAdaptor;
+import org.antlr.runtime.tree.TreeAdaptor;
+import org.antlr.runtime.tree.TreeWizard;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.*;
+
+public class TestTreeWizard extends BaseTest {
+	protected static final String[] tokens =
+		new String[] {"", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR"};
+	protected static final TreeAdaptor adaptor = new CommonTreeAdaptor();
+
+	@Test public void testSingleNode() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("ID");
+		String found = t.toStringTree();
+		String expecting = "ID";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testSingleNodeWithArg() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("ID[foo]");
+		String found = t.toStringTree();
+		String expecting = "foo";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testSingleNodeTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A)");
+		String found = t.toStringTree();
+		String expecting = "A";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testSingleLevelTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C D)");
+		String found = t.toStringTree();
+		String expecting = "(A B C D)";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testListTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(nil A B C)");
+		String found = t.toStringTree();
+		String expecting = "A B C";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testInvalidListTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("A B C");
+		assertNull(t);
+	}
+
+	@Test public void testDoubleLevelTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A (B C) (B D) E)");
+		String found = t.toStringTree();
+		String expecting = "(A (B C) (B D) E)";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testSingleNodeIndex() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("ID");
+		Map<Integer, List<Object>> m = wiz.index(t);
+		String found = m.toString();
+		String expecting = "{10=[ID]}";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testNoRepeatsIndex() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C D)");
+		Map<Integer, List<Object>> m = wiz.index(t);
+		String found = sortMapToString(m);
+        String expecting = "{5=[A], 6=[B], 7=[C], 8=[D]}";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRepeatsIndex() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		Map<Integer, List<Object>> m = wiz.index(t);
+		String found =  sortMapToString(m);
+        String expecting = "{5=[A, A], 6=[B, B, B], 7=[C], 8=[D, D]}";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testNoRepeatsVisit() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C D)");
+		final List<Object> elements = new ArrayList<Object>();
+		wiz.visit(t, wiz.getTokenType("B"), new TreeWizard.Visitor() {
+			@Override
+			public void visit(Object t) {
+				elements.add(t);
+			}
+		});
+		String found = elements.toString();
+		String expecting = "[B]";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testNoRepeatsVisit2() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		final List<Object> elements = new ArrayList<Object>();
+		wiz.visit(t, wiz.getTokenType("C"),
+					   new TreeWizard.Visitor() {
+							@Override
+							public void visit(Object t) {
+								elements.add(t);
+							}
+					   });
+		String found = elements.toString();
+		String expecting = "[C]";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRepeatsVisit() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		final List<Object> elements = new ArrayList<Object>();
+		wiz.visit(t, wiz.getTokenType("B"),
+					   new TreeWizard.Visitor() {
+							@Override
+							public void visit(Object t) {
+								elements.add(t);
+							}
+					   });
+		String found = elements.toString();
+		String expecting = "[B, B, B]";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRepeatsVisit2() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		final List<Object> elements = new ArrayList<Object>();
+		wiz.visit(t, wiz.getTokenType("A"),
+					   new TreeWizard.Visitor() {
+							@Override
+							public void visit(Object t) {
+								elements.add(t);
+							}
+					   });
+		String found = elements.toString();
+		String expecting = "[A, A]";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRepeatsVisitWithContext() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		final List<String> elements = new ArrayList<String>();
+		wiz.visit(t, wiz.getTokenType("B"),
+		   new TreeWizard.ContextVisitor() {
+			   @Override
+			   public void visit(Object t, Object parent, int childIndex, Map<String, Object> labels) {
+				   elements.add(adaptor.getText(t)+"@"+
+								(parent!=null?adaptor.getText(parent):"nil")+
+								"["+childIndex+"]");
+			   }
+		   });
+		String found = elements.toString();
+		String expecting = "[B@A[0], B@A[1], B@A[2]]";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testRepeatsVisitWithNullParentAndContext() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B (A C B) B D D)");
+		final List<String> elements = new ArrayList<String>();
+		wiz.visit(t, wiz.getTokenType("A"),
+		   new TreeWizard.ContextVisitor() {
+			   @Override
+			   public void visit(Object t, Object parent, int childIndex, Map<String, Object> labels) {
+				   elements.add(adaptor.getText(t)+"@"+
+								(parent!=null?adaptor.getText(parent):"nil")+
+								"["+childIndex+"]");
+			   }
+		   });
+		String found = elements.toString();
+		String expecting = "[A@nil[0], A@A[1]]";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testVisitPattern() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C (A B) D)");
+		final List<Object> elements = new ArrayList<Object>();
+		wiz.visit(t, "(A B)",
+					   new TreeWizard.Visitor() {
+							@Override
+							public void visit(Object t) {
+								elements.add(t);
+							}
+					   });
+		String found = elements.toString();
+		String expecting = "[A]"; // shouldn't match overall root, just (A B)
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testVisitPatternMultiple() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C (A B) (D (A B)))");
+		final List<String> elements = new ArrayList<String>();
+		wiz.visit(t, "(A B)",
+					   new TreeWizard.ContextVisitor() {
+						   @Override
+						   public void visit(Object t, Object parent, int childIndex, Map<String, Object> labels) {
+							   elements.add(adaptor.getText(t)+"@"+
+											(parent!=null?adaptor.getText(parent):"nil")+
+											"["+childIndex+"]");
+						   }
+					   });
+		String found = elements.toString();
+		String expecting = "[A@A[2], A@D[0]]"; // shouldn't match overall root, just (A B)
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testVisitPatternMultipleWithLabels() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))");
+		final List<String> elements = new ArrayList<String>();
+		wiz.visit(t, "(%a:A %b:B)",
+					   new TreeWizard.ContextVisitor() {
+						   @Override
+						   public void visit(Object t, Object parent, int childIndex, Map<String, Object> labels) {
+							   elements.add(adaptor.getText(t)+"@"+
+											(parent!=null?adaptor.getText(parent):"nil")+
+											"["+childIndex+"]"+labels.get("a")+"&"+labels.get("b"));
+						   }
+					   });
+		String found = elements.toString();
+		String expecting = "[foo@A[2]foo&bar, big@D[0]big&dog]";
+		assertEquals(expecting, found);
+	}
+
+	@Test public void testParse() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C)");
+		boolean valid = wiz.parse(t, "(A B C)");
+		assertTrue(valid);
+	}
+
+	@Test public void testParseSingleNode() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("A");
+		boolean valid = wiz.parse(t, "A");
+		assertTrue(valid);
+	}
+
+	@Test public void testParseFlatTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(nil A B C)");
+		boolean valid = wiz.parse(t, "(nil A B C)");
+		assertTrue(valid);
+	}
+
+	@Test public void testWildcard() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C)");
+		boolean valid = wiz.parse(t, "(A . .)");
+		assertTrue(valid);
+	}
+
+	@Test public void testParseWithText() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B[foo] C[bar])");
+		// C pattern has no text arg so despite [bar] in t, no need
+		// to match text--check structure only.
+		boolean valid = wiz.parse(t, "(A B[foo] C)");
+		assertTrue(valid);
+	}
+
+	@Test public void testParseWithText2() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B[T__32] (C (D E[a])))");
+		// C pattern has no text arg so despite [bar] in t, no need
+		// to match text--check structure only.
+		boolean valid = wiz.parse(t, "(A B[foo] C)");
+		assertEquals("(A T__32 (C (D a)))", t.toStringTree());
+	}
+
+	@Test public void testParseWithTextFails() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C)");
+		boolean valid = wiz.parse(t, "(A[foo] B C)");
+		assertTrue(!valid); // fails
+	}
+
+	@Test public void testParseLabels() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C)");
+		Map<String, Object> labels = new HashMap<String, Object>();
+		boolean valid = wiz.parse(t, "(%a:A %b:B %c:C)", labels);
+		assertTrue(valid);
+		assertEquals("A", labels.get("a").toString());
+		assertEquals("B", labels.get("b").toString());
+		assertEquals("C", labels.get("c").toString());
+	}
+
+	@Test public void testParseWithWildcardLabels() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C)");
+		Map<String, Object> labels = new HashMap<String, Object>();
+		boolean valid = wiz.parse(t, "(A %b:. %c:.)", labels);
+		assertTrue(valid);
+		assertEquals("B", labels.get("b").toString());
+		assertEquals("C", labels.get("c").toString());
+	}
+
+	@Test public void testParseLabelsAndTestText() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B[foo] C)");
+		Map<String, Object> labels = new HashMap<String, Object>();
+		boolean valid = wiz.parse(t, "(%a:A %b:B[foo] %c:C)", labels);
+		assertTrue(valid);
+		assertEquals("A", labels.get("a").toString());
+		assertEquals("foo", labels.get("b").toString());
+		assertEquals("C", labels.get("c").toString());
+	}
+
+	@Test public void testParseLabelsInNestedTree() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A (B C) (D E))");
+		Map<String, Object> labels = new HashMap<String, Object>();
+		boolean valid = wiz.parse(t, "(%a:A (%b:B %c:C) (%d:D %e:E) )", labels);
+		assertTrue(valid);
+		assertEquals("A", labels.get("a").toString());
+		assertEquals("B", labels.get("b").toString());
+		assertEquals("C", labels.get("c").toString());
+		assertEquals("D", labels.get("d").toString());
+		assertEquals("E", labels.get("e").toString());
+	}
+
+	@Test public void testEquals() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t1 = (CommonTree)wiz.create("(A B C)");
+		CommonTree t2 = (CommonTree)wiz.create("(A B C)");
+		boolean same = TreeWizard.equals(t1, t2, adaptor);
+		assertTrue(same);
+	}
+
+	@Test public void testEqualsWithText() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t1 = (CommonTree)wiz.create("(A B[foo] C)");
+		CommonTree t2 = (CommonTree)wiz.create("(A B[foo] C)");
+		boolean same = TreeWizard.equals(t1, t2, adaptor);
+		assertTrue(same);
+	}
+	
+	@Test public void testEqualsWithMismatchedText() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t1 = (CommonTree)wiz.create("(A B[foo] C)");
+		CommonTree t2 = (CommonTree)wiz.create("(A B C)");
+		boolean same = TreeWizard.equals(t1, t2, adaptor);
+		assertTrue(!same);
+	}
+
+	@Test public void testFindPattern() throws Exception {
+		TreeWizard wiz = new TreeWizard(adaptor, tokens);
+		CommonTree t = (CommonTree)wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))");
+		final List<? extends Object> subtrees = wiz.find(t, "(A B)");
+		List<? extends Object> elements = subtrees;
+		String found = elements.toString();
+		String expecting = "[foo, big]";
+		assertEquals(expecting, found);
+	}
+	
+}
\ No newline at end of file
diff --git a/tool/src/test/java/org/antlr/test/TestTrees.java b/tool/src/test/java/org/antlr/test/TestTrees.java
new file mode 100644
index 0000000..0dd54b4
--- /dev/null
+++ b/tool/src/test/java/org/antlr/test/TestTrees.java
@@ -0,0 +1,405 @@
+/*
+ * [The "BSD license"]
+ *  Copyright (c) 2010 Terence Parr
+ *  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ *  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ *  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.antlr.test;
+
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+import org.antlr.runtime.tree.CommonTree;
+import org.antlr.runtime.tree.CommonTreeAdaptor;
+import org.antlr.runtime.tree.Tree;
+import org.antlr.runtime.tree.TreeAdaptor;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class TestTrees extends BaseTest {
+	TreeAdaptor adaptor = new CommonTreeAdaptor();
+	protected boolean debug = false;
+
+	static class V extends CommonTree {
+		public int x;
+		public V(Token t) { this.token = t;}
+		public V(int ttype, int x) { this.x=x; token=new CommonToken(ttype); }
+		public V(int ttype, Token t, int x) { token=t; this.x=x;}
+		@Override
+		public String toString() { return (token!=null?token.getText():"")+"<V>";}
+	}
+
+	@Test public void testSingleNode() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(101));
+		assertNull(t.parent);
+		assertEquals(-1, t.childIndex);
+	}
+
+	@Test public void testTwoChildrenOfNilRoot() throws Exception {
+		CommonTree root_0 = (CommonTree)adaptor.nil();
+		CommonTree t = new V(101, 2);
+		CommonTree u = new V(new CommonToken(102,"102"));
+		adaptor.addChild(root_0, t);
+		adaptor.addChild(root_0, u);
+		assertNull(root_0.parent);
+		assertEquals(-1, root_0.childIndex);
+		assertEquals(0, t.childIndex);
+		assertEquals(1, u.childIndex);
+	}
+
+	@Test public void test4Nodes() throws Exception {
+		// ^(101 ^(102 103) 104)
+		CommonTree r0 = new CommonTree(new CommonToken(101));
+		r0.addChild(new CommonTree(new CommonToken(102)));
+		r0.getChild(0).addChild(new CommonTree(new CommonToken(103)));
+		r0.addChild(new CommonTree(new CommonToken(104)));
+
+		assertNull(r0.parent);
+		assertEquals(-1, r0.childIndex);
+	}
+
+	@Test public void testList() throws Exception {
+		// ^(nil 101 102 103)
+		CommonTree r0 = new CommonTree((Token)null);
+		CommonTree c0, c1, c2;
+		r0.addChild(c0=new CommonTree(new CommonToken(101)));
+		r0.addChild(c1=new CommonTree(new CommonToken(102)));
+		r0.addChild(c2=new CommonTree(new CommonToken(103)));
+
+		assertNull(r0.parent);
+		assertEquals(-1, r0.childIndex);
+		assertEquals(r0, c0.parent);
+		assertEquals(0, c0.childIndex);
+		assertEquals(r0, c1.parent);
+		assertEquals(1, c1.childIndex);		
+		assertEquals(r0, c2.parent);
+		assertEquals(2, c2.childIndex);
+	}
+
+	@Test public void testList2() throws Exception {
+		// Add child ^(nil 101 102 103) to root 5
+		// should pull 101 102 103 directly to become 5's child list
+		CommonTree root = new CommonTree(new CommonToken(5));
+
+		// child tree
+		CommonTree r0 = new CommonTree((Token)null);
+		CommonTree c0, c1, c2;
+		r0.addChild(c0=new CommonTree(new CommonToken(101)));
+		r0.addChild(c1=new CommonTree(new CommonToken(102)));
+		r0.addChild(c2=new CommonTree(new CommonToken(103)));
+
+		root.addChild(r0);
+
+		assertNull(root.parent);
+		assertEquals(-1, root.childIndex);
+		// check children of root all point at root
+		assertEquals(root, c0.parent);
+		assertEquals(0, c0.childIndex);
+		assertEquals(root, c0.parent);
+		assertEquals(1, c1.childIndex);
+		assertEquals(root, c0.parent);
+		assertEquals(2, c2.childIndex);
+	}
+
+	@Test public void testAddListToExistChildren() throws Exception {
+		// Add child ^(nil 101 102 103) to root ^(5 6)
+		// should add 101 102 103 to end of 5's child list
+		CommonTree root = new CommonTree(new CommonToken(5));
+		root.addChild(new CommonTree(new CommonToken(6)));
+
+		// child tree
+		CommonTree r0 = new CommonTree((Token)null);
+		CommonTree c0, c1, c2;
+		r0.addChild(c0=new CommonTree(new CommonToken(101)));
+		r0.addChild(c1=new CommonTree(new CommonToken(102)));
+		r0.addChild(c2=new CommonTree(new CommonToken(103)));
+
+		root.addChild(r0);
+
+		assertNull(root.parent);
+		assertEquals(-1, root.childIndex);
+		// check children of root all point at root
+		assertEquals(root, c0.parent);
+		assertEquals(1, c0.childIndex);
+		assertEquals(root, c0.parent);
+		assertEquals(2, c1.childIndex);
+		assertEquals(root, c0.parent);
+		assertEquals(3, c2.childIndex);
+	}
+
+	@Test public void testDupTree() throws Exception {
+		// ^(101 ^(102 103 ^(106 107) ) 104 105)
+		CommonTree r0 = new CommonTree(new CommonToken(101));
+		CommonTree r1 = new CommonTree(new CommonToken(102));
+		r0.addChild(r1);
+		r1.addChild(new CommonTree(new CommonToken(103)));
+		Tree r2 = new CommonTree(new CommonToken(106));
+		r2.addChild(new CommonTree(new CommonToken(107)));
+		r1.addChild(r2);
+		r0.addChild(new CommonTree(new CommonToken(104)));
+		r0.addChild(new CommonTree(new CommonToken(105)));
+
+		CommonTree dup = (CommonTree)(new CommonTreeAdaptor()).dupTree(r0);
+
+		assertNull(dup.parent);
+		assertEquals(-1, dup.childIndex);
+		dup.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testBecomeRoot() throws Exception {
+		// 5 becomes new root of ^(nil 101 102 103)
+		CommonTree newRoot = new CommonTree(new CommonToken(5));
+
+		CommonTree oldRoot = new CommonTree((Token)null);
+		oldRoot.addChild(new CommonTree(new CommonToken(101)));
+		oldRoot.addChild(new CommonTree(new CommonToken(102)));
+		oldRoot.addChild(new CommonTree(new CommonToken(103)));
+
+		TreeAdaptor adaptor = new CommonTreeAdaptor();
+		adaptor.becomeRoot(newRoot, oldRoot);
+		newRoot.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testBecomeRoot2() throws Exception {
+		// 5 becomes new root of ^(101 102 103)
+		CommonTree newRoot = new CommonTree(new CommonToken(5));
+
+		CommonTree oldRoot = new CommonTree(new CommonToken(101));
+		oldRoot.addChild(new CommonTree(new CommonToken(102)));
+		oldRoot.addChild(new CommonTree(new CommonToken(103)));
+
+		TreeAdaptor adaptor = new CommonTreeAdaptor();
+		adaptor.becomeRoot(newRoot, oldRoot);
+		newRoot.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testBecomeRoot3() throws Exception {
+		// ^(nil 5) becomes new root of ^(nil 101 102 103)
+		CommonTree newRoot = new CommonTree((Token)null);
+		newRoot.addChild(new CommonTree(new CommonToken(5)));
+
+		CommonTree oldRoot = new CommonTree((Token)null);
+		oldRoot.addChild(new CommonTree(new CommonToken(101)));
+		oldRoot.addChild(new CommonTree(new CommonToken(102)));
+		oldRoot.addChild(new CommonTree(new CommonToken(103)));
+
+		TreeAdaptor adaptor = new CommonTreeAdaptor();
+		adaptor.becomeRoot(newRoot, oldRoot);
+		newRoot.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testBecomeRoot5() throws Exception {
+		// ^(nil 5) becomes new root of ^(101 102 103)
+		CommonTree newRoot = new CommonTree((Token)null);
+		newRoot.addChild(new CommonTree(new CommonToken(5)));
+
+		CommonTree oldRoot = new CommonTree(new CommonToken(101));
+		oldRoot.addChild(new CommonTree(new CommonToken(102)));
+		oldRoot.addChild(new CommonTree(new CommonToken(103)));
+
+		TreeAdaptor adaptor = new CommonTreeAdaptor();
+		adaptor.becomeRoot(newRoot, oldRoot);
+		newRoot.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testBecomeRoot6() throws Exception {
+		// emulates construction of ^(5 6)
+		CommonTree root_0 = (CommonTree)adaptor.nil();
+		CommonTree root_1 = (CommonTree)adaptor.nil();
+		root_1 = (CommonTree)adaptor.becomeRoot(new CommonTree(new CommonToken(5)), root_1);
+
+		adaptor.addChild(root_1, new CommonTree(new CommonToken(6)));
+
+		adaptor.addChild(root_0, root_1);
+
+		root_0.sanityCheckParentAndChildIndexes();
+	}
+
+	// Test replaceChildren
+
+	@Test(expected = IllegalArgumentException.class)
+	public void testReplaceWithNoChildren() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(101));
+		CommonTree newChild = new CommonTree(new CommonToken(5));
+		t.replaceChildren(0, 0, newChild);
+	}
+
+	@Test public void testReplaceWithOneChildren() throws Exception {
+		// assume token type 99 and use text
+		CommonTree t = new CommonTree(new CommonToken(99,"a"));
+		CommonTree c0 = new CommonTree(new CommonToken(99, "b"));
+		t.addChild(c0);
+
+		CommonTree newChild = new CommonTree(new CommonToken(99, "c"));
+		t.replaceChildren(0, 0, newChild);
+		String expecting = "(a c)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceInMiddle() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c"))); // index 1
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+		t.replaceChildren(1, 1, newChild);
+		String expecting = "(a b x d)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceAtLeft() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b"))); // index 0
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+		t.replaceChildren(0, 0, newChild);
+		String expecting = "(a x c d)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceAtRight() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d"))); // index 2
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+		t.replaceChildren(2, 2, newChild);
+		String expecting = "(a b c x)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceOneWithTwoAtLeft() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChildren = (CommonTree)adaptor.nil();
+		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
+		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
+
+		t.replaceChildren(0, 0, newChildren);
+		String expecting = "(a x y c d)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceOneWithTwoAtRight() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChildren = (CommonTree)adaptor.nil();
+		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
+		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
+
+		t.replaceChildren(2, 2, newChildren);
+		String expecting = "(a b c x y)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceOneWithTwoInMiddle() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChildren = (CommonTree)adaptor.nil();
+		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
+		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
+
+		t.replaceChildren(1, 1, newChildren);
+		String expecting = "(a b x y d)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceTwoWithOneAtLeft() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+
+		t.replaceChildren(0, 1, newChild);
+		String expecting = "(a x d)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceTwoWithOneAtRight() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+
+		t.replaceChildren(1, 2, newChild);
+		String expecting = "(a b x)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceAllWithOne() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChild = new CommonTree(new CommonToken(99,"x"));
+
+		t.replaceChildren(0, 2, newChild);
+		String expecting = "(a x)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+
+	@Test public void testReplaceAllWithTwo() throws Exception {
+		CommonTree t = new CommonTree(new CommonToken(99, "a"));
+		t.addChild(new CommonTree(new CommonToken(99, "b")));
+		t.addChild(new CommonTree(new CommonToken(99, "c")));
+		t.addChild(new CommonTree(new CommonToken(99, "d")));
+
+		CommonTree newChildren = (CommonTree)adaptor.nil();
+		newChildren.addChild(new CommonTree(new CommonToken(99,"x")));
+		newChildren.addChild(new CommonTree(new CommonToken(99,"y")));
+
+		t.replaceChildren(0, 2, newChildren);
+		String expecting = "(a x y)";
+		assertEquals(expecting, t.toStringTree());
+		t.sanityCheckParentAndChildIndexes();
+	}
+}